From 6d0895f6530b51a8bf3ced622adb5eeda65794a1 Mon Sep 17 00:00:00 2001 From: Archit Kulkarni Date: Wed, 19 Apr 2023 10:56:17 -0700 Subject: [PATCH 001/424] Add CLI warning for arguments Signed-off-by: Archit Kulkarni --- .../cluster/running-applications/job-submission/cli.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/source/cluster/running-applications/job-submission/cli.rst b/doc/source/cluster/running-applications/job-submission/cli.rst index 708ca12c8d12..94a950ebf39d 100644 --- a/doc/source/cluster/running-applications/job-submission/cli.rst +++ b/doc/source/cluster/running-applications/job-submission/cli.rst @@ -16,6 +16,12 @@ This section contains commands for :ref:`Ray Job Submission `. ``ray job submit --working_dir="." -- python script.py`` instead of ``ray job submit --working_dir="." -- "python script.py"``. Otherwise you may encounter the error ``/bin/sh: 1: python script.py: not found``. +.. warning:: + + The entrypoint command must be provided last, and any arguments to `ray job submit` must be provided before the entrypoint command. + For example, use ``ray job submit --working_dir="." -- python script.py`` instead of ``ray job submit -- python script.py --working_dir="."``. + This is to support the use of ``--`` to separate arguments to `ray job submit` from arguments to the entrypoint command. + .. _ray-job-status-doc: .. click:: ray.dashboard.modules.job.cli:status From 37790056f139ea0bc7d55af8fe6b14d46fefe3a6 Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Wed, 19 Apr 2023 01:44:52 -0700 Subject: [PATCH 002/424] [CI][Bisect][Easy/Urgent] Fix bisect (#34559) Fix a couple of issues: - Correct git command to get the list of revs including both boundaries - Correct the boundary of the remaining list after each bisect Previous code has issues with the boundaries. Added a test case that failed in previous code but pass in this new code. Signed-off-by: Cuong Nguyen --- release/ray_release/scripts/ray_bisect.py | 13 ++++---- release/ray_release/tests/test_bisect.py | 40 +++++++++++++---------- 2 files changed, 29 insertions(+), 24 deletions(-) diff --git a/release/ray_release/scripts/ray_bisect.py b/release/ray_release/scripts/ray_bisect.py index 58844d291531..03f1d28a5414 100644 --- a/release/ray_release/scripts/ray_bisect.py +++ b/release/ray_release/scripts/ray_bisect.py @@ -26,7 +26,7 @@ def main(test_name: str, passing_commit: str, failing_commit: str) -> None: def _bisect(test_name: str, commit_list: List[str]) -> str: test = _get_test(test_name) - while len(commit_list) > 1: + while len(commit_list) > 2: logger.info( f"Bisecting between {len(commit_list)} commits: " f"{commit_list[0]} to {commit_list[-1]}" @@ -35,9 +35,9 @@ def _bisect(test_name: str, commit_list: List[str]) -> str: middle_commit = commit_list[middle_commit_idx] is_passing = _run_test(test, middle_commit) if is_passing: - commit_list = commit_list[middle_commit_idx + 1 :] + commit_list = commit_list[middle_commit_idx:] else: - commit_list = commit_list[:middle_commit_idx] + commit_list = commit_list[: middle_commit_idx + 1] return commit_list[-1] @@ -86,17 +86,16 @@ def _get_test(test_name: str) -> Test: def _get_commit_lists(passing_commit: str, failing_commit: str) -> List[str]: - commit_lists = ( + # This command obtains all commits between inclusively + return ( subprocess.check_output( - f"git rev-list --ancestry-path {passing_commit}..{failing_commit}", + f"git rev-list --reverse ^{passing_commit}~ {failing_commit}", shell=True, ) .decode("utf-8") .strip() .split("\n") ) - commit_lists.reverse() - return commit_lists if __name__ == "__main__": diff --git a/release/ray_release/tests/test_bisect.py b/release/ray_release/tests/test_bisect.py index e6a8a3b22dd4..b6d0d0b6d8e5 100644 --- a/release/ray_release/tests/test_bisect.py +++ b/release/ray_release/tests/test_bisect.py @@ -3,23 +3,29 @@ def test_bisect(): - commit_to_test_result = { - "c0": True, - "c1": True, - "c2": True, - "c3": False, - "c4": False, + test_cases = { + "c3": { + "c0": True, + "c1": True, + "c3": False, + "c4": False, + }, + "c1": { + "c0": True, + "c1": False, + }, } - def _mock_run_test(test_name: str, commit: str) -> bool: - return commit_to_test_result[commit] + for output, input in test_cases.items(): - with mock.patch( - "ray_release.scripts.ray_bisect._run_test", - side_effect=_mock_run_test, - ), mock.patch( - "ray_release.scripts.ray_bisect._get_test", - return_value={}, - ): - blamed_commit = _bisect("test", list(commit_to_test_result.keys())) - assert blamed_commit == "c3" + def _mock_run_test(test_name: str, commit: str) -> bool: + return input[commit] + + with mock.patch( + "ray_release.scripts.ray_bisect._run_test", + side_effect=_mock_run_test, + ), mock.patch( + "ray_release.scripts.ray_bisect._get_test", + return_value={}, + ): + assert _bisect("test", list(input.keys())) == output From 011a0eb714fbe58ea462d2af357462597c102c64 Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Wed, 19 Apr 2023 13:13:33 +0100 Subject: [PATCH 003/424] [ci/release] GCE test variants for ml_user tests (#34465) Signed-off-by: Kai Fricke --- ...{compute_tpl.yaml => compute_tpl_aws.yaml} | 0 .../horovod/compute_tpl_gce.yaml | 24 +++++ ...{compute_tpl.yaml => compute_tpl_aws.yaml} | 0 .../ray-lightning/compute_tpl_gce.yaml | 24 +++++ .../ray-lightning/ray_lightning_user_test.py | 12 +-- ...{compute_tpl.yaml => compute_tpl_aws.yaml} | 0 .../ml_user_tests/train/compute_tpl_gce.yaml | 17 ++++ ...{compute_tpl.yaml => compute_tpl_aws.yaml} | 0 .../tune_rllib/compute_tpl_gce.yaml | 31 ++++++ ...ng.yaml => tpl_gpu_small_scaling_aws.yaml} | 0 .../xgboost/tpl_gpu_small_scaling_gce.yaml | 18 ++++ .../xgboost/train_gpu_connect.py | 12 +-- release/release_tests.yaml | 99 +++++++++++++++++-- 13 files changed, 206 insertions(+), 31 deletions(-) rename release/ml_user_tests/horovod/{compute_tpl.yaml => compute_tpl_aws.yaml} (100%) create mode 100644 release/ml_user_tests/horovod/compute_tpl_gce.yaml rename release/ml_user_tests/ray-lightning/{compute_tpl.yaml => compute_tpl_aws.yaml} (100%) create mode 100644 release/ml_user_tests/ray-lightning/compute_tpl_gce.yaml rename release/ml_user_tests/train/{compute_tpl.yaml => compute_tpl_aws.yaml} (100%) create mode 100644 release/ml_user_tests/train/compute_tpl_gce.yaml rename release/ml_user_tests/tune_rllib/{compute_tpl.yaml => compute_tpl_aws.yaml} (100%) create mode 100644 release/ml_user_tests/tune_rllib/compute_tpl_gce.yaml rename release/ml_user_tests/xgboost/{tpl_gpu_small_scaling.yaml => tpl_gpu_small_scaling_aws.yaml} (100%) create mode 100644 release/ml_user_tests/xgboost/tpl_gpu_small_scaling_gce.yaml diff --git a/release/ml_user_tests/horovod/compute_tpl.yaml b/release/ml_user_tests/horovod/compute_tpl_aws.yaml similarity index 100% rename from release/ml_user_tests/horovod/compute_tpl.yaml rename to release/ml_user_tests/horovod/compute_tpl_aws.yaml diff --git a/release/ml_user_tests/horovod/compute_tpl_gce.yaml b/release/ml_user_tests/horovod/compute_tpl_gce.yaml new file mode 100644 index 000000000000..2cad8d220fba --- /dev/null +++ b/release/ml_user_tests/horovod/compute_tpl_gce.yaml @@ -0,0 +1,24 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west1 +allowed_azs: + - us-west1-b + +max_workers: 3 + +head_node_type: + name: head_node + instance_type: n1-standard-4 + +worker_node_types: + - name: worker_node + instance_type: n1-standard-32-nvidia-tesla-t4-2 + max_workers: 3 + min_workers: 3 + use_spot: false + +#aws: +# TagSpecifications: +# - ResourceType: "instance" +# Tags: +# - Key: ttl-hours +# Value: '24' diff --git a/release/ml_user_tests/ray-lightning/compute_tpl.yaml b/release/ml_user_tests/ray-lightning/compute_tpl_aws.yaml similarity index 100% rename from release/ml_user_tests/ray-lightning/compute_tpl.yaml rename to release/ml_user_tests/ray-lightning/compute_tpl_aws.yaml diff --git a/release/ml_user_tests/ray-lightning/compute_tpl_gce.yaml b/release/ml_user_tests/ray-lightning/compute_tpl_gce.yaml new file mode 100644 index 000000000000..ffe5cfde17dc --- /dev/null +++ b/release/ml_user_tests/ray-lightning/compute_tpl_gce.yaml @@ -0,0 +1,24 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west1 +allowed_azs: + - us-west1-b + +max_workers: 2 + +head_node_type: + name: head_node + instance_type: n1-standard-32-nvidia-tesla-t4-2 + +worker_node_types: + - name: worker_node + instance_type: n1-standard-32-nvidia-tesla-t4-2 + min_workers: 2 + max_workers: 2 + use_spot: false + +#aws: +# TagSpecifications: +# - ResourceType: "instance" +# Tags: +# - Key: ttl-hours +# Value: '24' diff --git a/release/ml_user_tests/ray-lightning/ray_lightning_user_test.py b/release/ml_user_tests/ray-lightning/ray_lightning_user_test.py index 5357f9b5ba63..ee78efc9746e 100644 --- a/release/ml_user_tests/ray-lightning/ray_lightning_user_test.py +++ b/release/ml_user_tests/ray-lightning/ray_lightning_user_test.py @@ -8,23 +8,13 @@ if __name__ == "__main__": start = time.time() - addr = os.environ.get("RAY_ADDRESS") - job_name = os.environ.get("RAY_JOB_NAME", "ray_lightning_user_test") - - # Manually set NCCL_SOCKET_IFNAME to "ens3" so NCCL training works on - # anyscale_default_cloud. - # See https://github.com/pytorch/pytorch/issues/68893 for more details. # Passing in runtime_env to ray.init() will also set it for all the # workers. runtime_env = { - "env_vars": {"NCCL_SOCKET_IFNAME": "ens3"}, "working_dir": os.path.dirname(__file__), } - if addr.startswith("anyscale://"): - ray.init(address=addr, job_name=job_name, runtime_env=runtime_env) - else: - ray.init(address="auto", runtime_env=runtime_env) + ray.init(address="auto", runtime_env=runtime_env) main(num_workers=6, use_gpu=True, max_steps=50) diff --git a/release/ml_user_tests/train/compute_tpl.yaml b/release/ml_user_tests/train/compute_tpl_aws.yaml similarity index 100% rename from release/ml_user_tests/train/compute_tpl.yaml rename to release/ml_user_tests/train/compute_tpl_aws.yaml diff --git a/release/ml_user_tests/train/compute_tpl_gce.yaml b/release/ml_user_tests/train/compute_tpl_gce.yaml new file mode 100644 index 000000000000..57049d10efd4 --- /dev/null +++ b/release/ml_user_tests/train/compute_tpl_gce.yaml @@ -0,0 +1,17 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west1 +allowed_azs: + - us-west1-b + +max_workers: 2 + +head_node_type: + name: head_node + instance_type: n1-standard-32-nvidia-tesla-t4-2 + +worker_node_types: + - name: worker_node + instance_type: n1-standard-32-nvidia-tesla-t4-2 + min_workers: 2 + max_workers: 2 + use_spot: false diff --git a/release/ml_user_tests/tune_rllib/compute_tpl.yaml b/release/ml_user_tests/tune_rllib/compute_tpl_aws.yaml similarity index 100% rename from release/ml_user_tests/tune_rllib/compute_tpl.yaml rename to release/ml_user_tests/tune_rllib/compute_tpl_aws.yaml diff --git a/release/ml_user_tests/tune_rllib/compute_tpl_gce.yaml b/release/ml_user_tests/tune_rllib/compute_tpl_gce.yaml new file mode 100644 index 000000000000..dbab3926c68d --- /dev/null +++ b/release/ml_user_tests/tune_rllib/compute_tpl_gce.yaml @@ -0,0 +1,31 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west1 +allowed_azs: + - us-west1-b + +max_workers: 8 + +head_node_type: + name: head_node + instance_type: n1-standard-4 + +# We should be good with 2 GPUs and 50 CPUs. +worker_node_types: + - name: worker_node_cpu + instance_type: n1-standard-4 # 4 CPU + min_workers: 6 + max_workers: 6 + use_spot: false + - name: worker_node_gpu + instance_type: n1-standard-16-nvidia-tesla-t4-1 # 1 GPU and 16 CPU + min_workers: 2 + max_workers: 2 + use_spot: false + +gcp_advanced_configurations_json: + instance_properties: + disks: + - boot: true + auto_delete: true + initialize_params: + disk_size_gb: 500 diff --git a/release/ml_user_tests/xgboost/tpl_gpu_small_scaling.yaml b/release/ml_user_tests/xgboost/tpl_gpu_small_scaling_aws.yaml similarity index 100% rename from release/ml_user_tests/xgboost/tpl_gpu_small_scaling.yaml rename to release/ml_user_tests/xgboost/tpl_gpu_small_scaling_aws.yaml diff --git a/release/ml_user_tests/xgboost/tpl_gpu_small_scaling_gce.yaml b/release/ml_user_tests/xgboost/tpl_gpu_small_scaling_gce.yaml new file mode 100644 index 000000000000..a08bbb742f72 --- /dev/null +++ b/release/ml_user_tests/xgboost/tpl_gpu_small_scaling_gce.yaml @@ -0,0 +1,18 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west1 +allowed_azs: + - us-west1-b + +max_workers: 4 + +head_node_type: + name: head_node + instance_type: n1-standard-4 + +worker_node_types: + - name: worker_node + instance_type: n1-standard-16-nvidia-tesla-t4-1 + min_workers: 4 + max_workers: 4 + use_spot: false + diff --git a/release/ml_user_tests/xgboost/train_gpu_connect.py b/release/ml_user_tests/xgboost/train_gpu_connect.py index 4a2877a97b77..b3b6188636da 100644 --- a/release/ml_user_tests/xgboost/train_gpu_connect.py +++ b/release/ml_user_tests/xgboost/train_gpu_connect.py @@ -15,26 +15,16 @@ if __name__ == "__main__": os.environ["RXGB_PLACEMENT_GROUP_TIMEOUT_S"] = "1200" - addr = os.environ.get("RAY_ADDRESS") - job_name = os.environ.get("RAY_JOB_NAME", "train_gpu_connect") - - # Manually set NCCL_SOCKET_IFNAME to "ens3" so NCCL training works on - # anyscale_default_cloud. - # See https://github.com/pytorch/pytorch/issues/68893 for more details. # Passing in runtime_env to ray.init() will also set it for all the # workers. runtime_env = { "env_vars": { "RXGB_PLACEMENT_GROUP_TIMEOUT_S": "1200", - "NCCL_SOCKET_IFNAME": "ens3", }, "working_dir": os.path.dirname(__file__), } - if addr.startswith("anyscale://"): - ray.init(address=addr, job_name=job_name, runtime_env=runtime_env) - else: - ray.init(address="auto", runtime_env=runtime_env) + ray.init(address="auto", runtime_env=runtime_env) from xgboost_ray import RayParams from release_test_util import train_ray, get_parquet_files diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 27a5b9c137c6..7897533cba41 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -1284,7 +1284,7 @@ cluster: cluster_env: horovod/app_config.yaml - cluster_compute: horovod/compute_tpl.yaml + cluster_compute: horovod/compute_tpl_aws.yaml driver_setup: horovod/driver_setup_latest.sh run: @@ -1293,6 +1293,15 @@ wait_for_nodes: num_nodes: 4 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: horovod/app_config.yaml + cluster_compute: horovod/compute_tpl_gce.yaml + alert: default - name: ml_user_horovod_user_test_master @@ -1304,7 +1313,7 @@ cluster: cluster_env: horovod/app_config_master.yaml - cluster_compute: horovod/compute_tpl.yaml + cluster_compute: horovod/compute_tpl_aws.yaml driver_setup: horovod/driver_setup_master.sh run: @@ -1313,6 +1322,15 @@ wait_for_nodes: num_nodes: 4 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: horovod/app_config_master.yaml + cluster_compute: horovod/compute_tpl_gce.yaml + alert: default - name: ml_user_train_tensorflow_mnist_test @@ -1324,7 +1342,7 @@ cluster: cluster_env: train/app_config.yaml - cluster_compute: train/compute_tpl.yaml + cluster_compute: train/compute_tpl_aws.yaml driver_setup: train/driver_setup.sh run: @@ -1333,6 +1351,15 @@ wait_for_nodes: num_nodes: 3 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: train/app_config.yaml + cluster_compute: train/compute_tpl_gce.yaml + alert: default - name: ml_user_train_torch_linear_test @@ -1344,7 +1371,7 @@ cluster: cluster_env: train/app_config.yaml - cluster_compute: train/compute_tpl.yaml + cluster_compute: train/compute_tpl_aws.yaml driver_setup: train/driver_setup.sh run: @@ -1353,6 +1380,15 @@ wait_for_nodes: num_nodes: 3 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: train/app_config.yaml + cluster_compute: train/compute_tpl_gce.yaml + alert: default - name: ml_user_xgboost_gpu_connect_latest @@ -1364,7 +1400,7 @@ cluster: cluster_env: xgboost/app_config_gpu.yaml - cluster_compute: xgboost/tpl_gpu_small_scaling.yaml + cluster_compute: xgboost/tpl_gpu_small_scaling_aws.yaml run: timeout: 1200 @@ -1372,6 +1408,15 @@ wait_for_nodes: num_nodes: 5 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: xgboost/app_config_gpu.yaml + cluster_compute: xgboost/tpl_gpu_small_scaling_gce.yaml + alert: default - name: ml_user_xgboost_gpu_connect_master @@ -1383,7 +1428,7 @@ cluster: cluster_env: xgboost/app_config_gpu_master.yaml - cluster_compute: xgboost/tpl_gpu_small_scaling.yaml + cluster_compute: xgboost/tpl_gpu_small_scaling_aws.yaml run: timeout: 1200 @@ -1391,6 +1436,15 @@ wait_for_nodes: num_nodes: 5 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: xgboost/app_config_gpu_master.yaml + cluster_compute: xgboost/tpl_gpu_small_scaling_gce.yaml + alert: default - name: ml_user_ray_lightning_user_test_latest @@ -1402,7 +1456,7 @@ cluster: cluster_env: ray-lightning/app_config.yaml - cluster_compute: ray-lightning/compute_tpl.yaml + cluster_compute: ray-lightning/compute_tpl_aws.yaml driver_setup: ray-lightning/driver_setup.sh run: @@ -1411,6 +1465,15 @@ wait_for_nodes: num_nodes: 3 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: ray-lightning/app_config.yaml + cluster_compute: ray-lightning/compute_tpl_gce.yaml + alert: default - name: ml_user_ray_lightning_user_test_master @@ -1422,7 +1485,7 @@ cluster: cluster_env: ray-lightning/app_config_master.yaml - cluster_compute: ray-lightning/compute_tpl.yaml + cluster_compute: ray-lightning/compute_tpl_aws.yaml driver_setup: ray-lightning/driver_setup.sh run: @@ -1431,6 +1494,15 @@ wait_for_nodes: num_nodes: 3 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: ray-lightning/app_config_master.yaml + cluster_compute: ray-lightning/compute_tpl_gce.yaml + alert: default - name: ml_user_tune_rllib_connect_test @@ -1442,7 +1514,7 @@ cluster: cluster_env: ../rllib_tests/app_config.yaml - cluster_compute: tune_rllib/compute_tpl.yaml + cluster_compute: tune_rllib/compute_tpl_aws.yaml driver_setup: tune_rllib/driver_setup.sh run: @@ -1451,6 +1523,15 @@ wait_for_nodes: num_nodes: 9 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: ../rllib_tests/app_config.yaml + cluster_compute: tune_rllib/compute_tpl_gce.yaml + alert: default ####################### From eba6747799d4771c4ebb6383079046276de7444c Mon Sep 17 00:00:00 2001 From: Chen Shen Date: Wed, 19 Apr 2023 05:30:43 -0700 Subject: [PATCH 004/424] [Core][easy] disable test not suppose to work with ray client #34556 this env doesn't work with ray client. --- python/ray/tests/test_basic_5.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/python/ray/tests/test_basic_5.py b/python/ray/tests/test_basic_5.py index 20aee63e2983..0d88f03ce0b5 100644 --- a/python/ray/tests/test_basic_5.py +++ b/python/ray/tests/test_basic_5.py @@ -15,6 +15,7 @@ from ray._private.test_utils import ( run_string_as_driver, wait_for_pid_to_exit, + client_test_enabled, ) logger = logging.getLogger(__name__) @@ -362,6 +363,7 @@ def verify_imports(latch): ray.get(futures) +@pytest.mark.skipif(client_test_enabled(), reason="only server mode") def test_gcs_port_env(): try: with unittest.mock.patch.dict(os.environ): From c182b35928e7aa02ea2aa2f04b10d0f553f80ac0 Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Wed, 19 Apr 2023 15:38:51 +0100 Subject: [PATCH 005/424] [ci/release] GCE test variants for air_benchmark and air_examples (#34466) Signed-off-by: Kai Fricke --- ...mpute.yaml => dreambooth_compute_aws.yaml} | 0 ...e.yaml => gptj_deepspeed_compute_aws.yaml} | 0 .../gptj_deepspeed_compute_gce.yaml | 22 +++ ...pute_cpu_1.yaml => compute_cpu_1_aws.yaml} | 0 .../air_benchmarks/compute_cpu_1_gce.yaml | 12 ++ ...pute_cpu_4.yaml => compute_cpu_4_aws.yaml} | 0 ..._4_g4_12xl.yaml => compute_cpu_4_gce.yaml} | 8 +- ...pute_cpu_8.yaml => compute_cpu_8_aws.yaml} | 0 ..._8_g4_12xl.yaml => compute_cpu_8_gce.yaml} | 8 +- ...es.yaml => compute_data_20_nodes_aws.yaml} | 0 .../compute_data_20_nodes_gce.yaml | 17 ++ .../compute_gce_gpu_4_g4_12xl.yaml | 17 -- ...pute_gpu_1.yaml => compute_gpu_1_aws.yaml} | 0 ...8xl.yaml => compute_gpu_1_cpu_16_aws.yaml} | 2 +- ...8xl.yaml => compute_gpu_1_cpu_16_gce.yaml} | 0 ..._gce_gpu_1.yaml => compute_gpu_1_gce.yaml} | 2 +- ..._gpu_2x2.yaml => compute_gpu_2x2_aws.yaml} | 0 .../air_benchmarks/compute_gpu_2x2_gce.yaml | 17 ++ .../air_benchmarks/compute_gpu_4x4.yaml | 15 -- ...e_gpu_16.yaml => compute_gpu_4x4_aws.yaml} | 7 +- ...e_gpu_16.yaml => compute_gpu_4x4_gce.yaml} | 6 +- ...pute_tpl.yaml => compute_xgboost_aws.yaml} | 0 .../air_benchmarks/compute_xgboost_gce.yaml | 28 +++ release/release_tests.yaml | 166 +++++++++++++++--- 24 files changed, 252 insertions(+), 75 deletions(-) rename release/air_examples/dreambooth/{dreambooth_compute.yaml => dreambooth_compute_aws.yaml} (100%) rename release/air_examples/gptj_deepspeed_finetuning/{gptj_deepspeed_compute.yaml => gptj_deepspeed_compute_aws.yaml} (100%) create mode 100644 release/air_examples/gptj_deepspeed_finetuning/gptj_deepspeed_compute_gce.yaml rename release/air_tests/air_benchmarks/{compute_cpu_1.yaml => compute_cpu_1_aws.yaml} (100%) create mode 100644 release/air_tests/air_benchmarks/compute_cpu_1_gce.yaml rename release/air_tests/air_benchmarks/{compute_cpu_4.yaml => compute_cpu_4_aws.yaml} (100%) rename release/air_tests/air_benchmarks/{compute_gpu_4_g4_12xl.yaml => compute_cpu_4_gce.yaml} (63%) rename release/air_tests/air_benchmarks/{compute_cpu_8.yaml => compute_cpu_8_aws.yaml} (100%) rename release/air_tests/air_benchmarks/{compute_gpu_8_g4_12xl.yaml => compute_cpu_8_gce.yaml} (63%) rename release/air_tests/air_benchmarks/{data_20_nodes.yaml => compute_data_20_nodes_aws.yaml} (100%) create mode 100644 release/air_tests/air_benchmarks/compute_data_20_nodes_gce.yaml delete mode 100644 release/air_tests/air_benchmarks/compute_gce_gpu_4_g4_12xl.yaml rename release/air_tests/air_benchmarks/{compute_gpu_1.yaml => compute_gpu_1_aws.yaml} (100%) rename release/air_tests/air_benchmarks/{compute_gpu_1_g4_8xl.yaml => compute_gpu_1_cpu_16_aws.yaml} (80%) rename release/air_tests/air_benchmarks/{compute_gce_gpu_1_g4_8xl.yaml => compute_gpu_1_cpu_16_gce.yaml} (100%) rename release/air_tests/air_benchmarks/{compute_gce_gpu_1.yaml => compute_gpu_1_gce.yaml} (82%) rename release/air_tests/air_benchmarks/{compute_gpu_2x2.yaml => compute_gpu_2x2_aws.yaml} (100%) create mode 100644 release/air_tests/air_benchmarks/compute_gpu_2x2_gce.yaml delete mode 100644 release/air_tests/air_benchmarks/compute_gpu_4x4.yaml rename release/air_tests/air_benchmarks/{compute_gpu_16.yaml => compute_gpu_4x4_aws.yaml} (77%) rename release/air_tests/air_benchmarks/{compute_gce_gpu_16.yaml => compute_gpu_4x4_gce.yaml} (72%) rename release/air_tests/air_benchmarks/{xgboost_compute_tpl.yaml => compute_xgboost_aws.yaml} (100%) create mode 100644 release/air_tests/air_benchmarks/compute_xgboost_gce.yaml diff --git a/release/air_examples/dreambooth/dreambooth_compute.yaml b/release/air_examples/dreambooth/dreambooth_compute_aws.yaml similarity index 100% rename from release/air_examples/dreambooth/dreambooth_compute.yaml rename to release/air_examples/dreambooth/dreambooth_compute_aws.yaml diff --git a/release/air_examples/gptj_deepspeed_finetuning/gptj_deepspeed_compute.yaml b/release/air_examples/gptj_deepspeed_finetuning/gptj_deepspeed_compute_aws.yaml similarity index 100% rename from release/air_examples/gptj_deepspeed_finetuning/gptj_deepspeed_compute.yaml rename to release/air_examples/gptj_deepspeed_finetuning/gptj_deepspeed_compute_aws.yaml diff --git a/release/air_examples/gptj_deepspeed_finetuning/gptj_deepspeed_compute_gce.yaml b/release/air_examples/gptj_deepspeed_finetuning/gptj_deepspeed_compute_gce.yaml new file mode 100644 index 000000000000..036b337e92e8 --- /dev/null +++ b/release/air_examples/gptj_deepspeed_finetuning/gptj_deepspeed_compute_gce.yaml @@ -0,0 +1,22 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west1 +allowed_azs: + - us-west1-b + +head_node_type: + name: head_node + instance_type: n1-standard-16-nvidia-tesla-t4-1 + +worker_node_types: + - name: worker_node + instance_type: n1-standard-16-nvidia-tesla-t4-1 + min_workers: 15 + max_workers: 15 + use_spot: false + +#aws: +# TagSpecifications: +# - ResourceType: "instance" +# Tags: +# - Key: ttl-hours +# Value: '24' diff --git a/release/air_tests/air_benchmarks/compute_cpu_1.yaml b/release/air_tests/air_benchmarks/compute_cpu_1_aws.yaml similarity index 100% rename from release/air_tests/air_benchmarks/compute_cpu_1.yaml rename to release/air_tests/air_benchmarks/compute_cpu_1_aws.yaml diff --git a/release/air_tests/air_benchmarks/compute_cpu_1_gce.yaml b/release/air_tests/air_benchmarks/compute_cpu_1_gce.yaml new file mode 100644 index 000000000000..90de98eb18e6 --- /dev/null +++ b/release/air_tests/air_benchmarks/compute_cpu_1_gce.yaml @@ -0,0 +1,12 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west1 +allowed_azs: + - us-west1-b + +max_workers: 0 + +head_node_type: + name: head_node + instance_type: n1-standard-8 + +worker_node_types: [] diff --git a/release/air_tests/air_benchmarks/compute_cpu_4.yaml b/release/air_tests/air_benchmarks/compute_cpu_4_aws.yaml similarity index 100% rename from release/air_tests/air_benchmarks/compute_cpu_4.yaml rename to release/air_tests/air_benchmarks/compute_cpu_4_aws.yaml diff --git a/release/air_tests/air_benchmarks/compute_gpu_4_g4_12xl.yaml b/release/air_tests/air_benchmarks/compute_cpu_4_gce.yaml similarity index 63% rename from release/air_tests/air_benchmarks/compute_gpu_4_g4_12xl.yaml rename to release/air_tests/air_benchmarks/compute_cpu_4_gce.yaml index 0bb94dc6c3dc..03f5772f88be 100644 --- a/release/air_tests/air_benchmarks/compute_gpu_4_g4_12xl.yaml +++ b/release/air_tests/air_benchmarks/compute_cpu_4_gce.yaml @@ -1,15 +1,17 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} -region: us-west-2 +region: us-west1 +allowed_azs: + - us-west1-b max_workers: 3 head_node_type: name: head_node - instance_type: g4dn.12xlarge + instance_type: n1-standard-8 worker_node_types: - name: worker_node - instance_type: g4dn.12xlarge + instance_type: n1-standard-8 max_workers: 3 min_workers: 3 use_spot: false diff --git a/release/air_tests/air_benchmarks/compute_cpu_8.yaml b/release/air_tests/air_benchmarks/compute_cpu_8_aws.yaml similarity index 100% rename from release/air_tests/air_benchmarks/compute_cpu_8.yaml rename to release/air_tests/air_benchmarks/compute_cpu_8_aws.yaml diff --git a/release/air_tests/air_benchmarks/compute_gpu_8_g4_12xl.yaml b/release/air_tests/air_benchmarks/compute_cpu_8_gce.yaml similarity index 63% rename from release/air_tests/air_benchmarks/compute_gpu_8_g4_12xl.yaml rename to release/air_tests/air_benchmarks/compute_cpu_8_gce.yaml index 630fe5690d39..b15168fcb260 100644 --- a/release/air_tests/air_benchmarks/compute_gpu_8_g4_12xl.yaml +++ b/release/air_tests/air_benchmarks/compute_cpu_8_gce.yaml @@ -1,15 +1,17 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} -region: us-west-2 +region: us-west1 +allowed_azs: + - us-west1-b max_workers: 7 head_node_type: name: head_node - instance_type: g4dn.12xlarge + instance_type: n1-standard-8 worker_node_types: - name: worker_node - instance_type: g4dn.12xlarge + instance_type: n1-standard-8 max_workers: 7 min_workers: 7 use_spot: false diff --git a/release/air_tests/air_benchmarks/data_20_nodes.yaml b/release/air_tests/air_benchmarks/compute_data_20_nodes_aws.yaml similarity index 100% rename from release/air_tests/air_benchmarks/data_20_nodes.yaml rename to release/air_tests/air_benchmarks/compute_data_20_nodes_aws.yaml diff --git a/release/air_tests/air_benchmarks/compute_data_20_nodes_gce.yaml b/release/air_tests/air_benchmarks/compute_data_20_nodes_gce.yaml new file mode 100644 index 000000000000..1248701435d0 --- /dev/null +++ b/release/air_tests/air_benchmarks/compute_data_20_nodes_gce.yaml @@ -0,0 +1,17 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west1 +allowed_azs: + - us-west1-b + +max_workers: 19 + +head_node_type: + name: head_node + instance_type: n1-standard-16 + +worker_node_types: + - name: worker_node + instance_type: n1-standard-16 + max_workers: 19 + min_workers: 19 + use_spot: false diff --git a/release/air_tests/air_benchmarks/compute_gce_gpu_4_g4_12xl.yaml b/release/air_tests/air_benchmarks/compute_gce_gpu_4_g4_12xl.yaml deleted file mode 100644 index 4182417f7ce7..000000000000 --- a/release/air_tests/air_benchmarks/compute_gce_gpu_4_g4_12xl.yaml +++ /dev/null @@ -1,17 +0,0 @@ -cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} -region: us-west1 -allowed_azs: - - us-west1-b - -max_workers: 3 - -head_node_type: - name: head_node - instance_type: n1-standard-64-nvidia-tesla-t4-4 # g4dn.12xlarge - -worker_node_types: - - name: worker_node - instance_type: n1-standard-64-nvidia-tesla-t4-4 # g4dn.12xlarge - max_workers: 3 - min_workers: 3 - use_spot: false diff --git a/release/air_tests/air_benchmarks/compute_gpu_1.yaml b/release/air_tests/air_benchmarks/compute_gpu_1_aws.yaml similarity index 100% rename from release/air_tests/air_benchmarks/compute_gpu_1.yaml rename to release/air_tests/air_benchmarks/compute_gpu_1_aws.yaml diff --git a/release/air_tests/air_benchmarks/compute_gpu_1_g4_8xl.yaml b/release/air_tests/air_benchmarks/compute_gpu_1_cpu_16_aws.yaml similarity index 80% rename from release/air_tests/air_benchmarks/compute_gpu_1_g4_8xl.yaml rename to release/air_tests/air_benchmarks/compute_gpu_1_cpu_16_aws.yaml index b4de6db623ed..e38dc1a84d88 100644 --- a/release/air_tests/air_benchmarks/compute_gpu_1_g4_8xl.yaml +++ b/release/air_tests/air_benchmarks/compute_gpu_1_cpu_16_aws.yaml @@ -5,6 +5,6 @@ max_workers: 0 head_node_type: name: head_node - instance_type: g4dn.8xlarge + instance_type: g4dn.4xlarge worker_node_types: [] diff --git a/release/air_tests/air_benchmarks/compute_gce_gpu_1_g4_8xl.yaml b/release/air_tests/air_benchmarks/compute_gpu_1_cpu_16_gce.yaml similarity index 100% rename from release/air_tests/air_benchmarks/compute_gce_gpu_1_g4_8xl.yaml rename to release/air_tests/air_benchmarks/compute_gpu_1_cpu_16_gce.yaml diff --git a/release/air_tests/air_benchmarks/compute_gce_gpu_1.yaml b/release/air_tests/air_benchmarks/compute_gpu_1_gce.yaml similarity index 82% rename from release/air_tests/air_benchmarks/compute_gce_gpu_1.yaml rename to release/air_tests/air_benchmarks/compute_gpu_1_gce.yaml index a4e3746e2cbf..4776275bbc19 100644 --- a/release/air_tests/air_benchmarks/compute_gce_gpu_1.yaml +++ b/release/air_tests/air_benchmarks/compute_gpu_1_gce.yaml @@ -7,7 +7,7 @@ max_workers: 0 head_node_type: name: head_node - instance_type: n1-standard-32-nvidia-tesla-t4-2 # aws g3.8xlarge + instance_type: n1-standard-32-nvidia-tesla-t4-2 worker_node_types: [] diff --git a/release/air_tests/air_benchmarks/compute_gpu_2x2.yaml b/release/air_tests/air_benchmarks/compute_gpu_2x2_aws.yaml similarity index 100% rename from release/air_tests/air_benchmarks/compute_gpu_2x2.yaml rename to release/air_tests/air_benchmarks/compute_gpu_2x2_aws.yaml diff --git a/release/air_tests/air_benchmarks/compute_gpu_2x2_gce.yaml b/release/air_tests/air_benchmarks/compute_gpu_2x2_gce.yaml new file mode 100644 index 000000000000..3bf0b4eca9d0 --- /dev/null +++ b/release/air_tests/air_benchmarks/compute_gpu_2x2_gce.yaml @@ -0,0 +1,17 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west1 +allowed_azs: + - us-west1-b + +max_workers: 1 + +head_node_type: + name: head_node + instance_type: n1-standard-32-nvidia-tesla-t4-2 + +worker_node_types: + - name: worker_node + instance_type: n1-standard-32-nvidia-tesla-t4-2 + max_workers: 1 + min_workers: 1 + use_spot: false diff --git a/release/air_tests/air_benchmarks/compute_gpu_4x4.yaml b/release/air_tests/air_benchmarks/compute_gpu_4x4.yaml deleted file mode 100644 index 0bb94dc6c3dc..000000000000 --- a/release/air_tests/air_benchmarks/compute_gpu_4x4.yaml +++ /dev/null @@ -1,15 +0,0 @@ -cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} -region: us-west-2 - -max_workers: 3 - -head_node_type: - name: head_node - instance_type: g4dn.12xlarge - -worker_node_types: - - name: worker_node - instance_type: g4dn.12xlarge - max_workers: 3 - min_workers: 3 - use_spot: false diff --git a/release/air_tests/air_benchmarks/compute_gpu_16.yaml b/release/air_tests/air_benchmarks/compute_gpu_4x4_aws.yaml similarity index 77% rename from release/air_tests/air_benchmarks/compute_gpu_16.yaml rename to release/air_tests/air_benchmarks/compute_gpu_4x4_aws.yaml index e19da50cc89d..ee7d1436e7cf 100644 --- a/release/air_tests/air_benchmarks/compute_gpu_16.yaml +++ b/release/air_tests/air_benchmarks/compute_gpu_4x4_aws.yaml @@ -5,11 +5,11 @@ max_workers: 3 head_node_type: name: head_node - instance_type: g3.16xlarge + instance_type: g4dn.12xlarge worker_node_types: - name: worker_node - instance_type: g3.16xlarge + instance_type: g4dn.12xlarge max_workers: 3 min_workers: 3 use_spot: false @@ -22,5 +22,4 @@ aws: VolumeSize: 800 Iops: 5000 Throughput: 1000 - VolumeSize: 1000 - VolumeType: gp3 + VolumeType: gp3 \ No newline at end of file diff --git a/release/air_tests/air_benchmarks/compute_gce_gpu_16.yaml b/release/air_tests/air_benchmarks/compute_gpu_4x4_gce.yaml similarity index 72% rename from release/air_tests/air_benchmarks/compute_gce_gpu_16.yaml rename to release/air_tests/air_benchmarks/compute_gpu_4x4_gce.yaml index 8f94d0691021..5702b44d240e 100644 --- a/release/air_tests/air_benchmarks/compute_gce_gpu_16.yaml +++ b/release/air_tests/air_benchmarks/compute_gpu_4x4_gce.yaml @@ -1,17 +1,17 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west1 -allowed_azs: +allowed_azs: - us-west1-b max_workers: 3 head_node_type: name: head_node - instance_type: n1-standard-64-nvidia-tesla-t4-4 # aws g3.16xlarge + instance_type: n1-standard-64-nvidia-tesla-t4-4 worker_node_types: - name: worker_node - instance_type: n1-standard-64-nvidia-tesla-t4-4 # aws g3.16xlarge + instance_type: n1-standard-64-nvidia-tesla-t4-4 max_workers: 3 min_workers: 3 use_spot: false diff --git a/release/air_tests/air_benchmarks/xgboost_compute_tpl.yaml b/release/air_tests/air_benchmarks/compute_xgboost_aws.yaml similarity index 100% rename from release/air_tests/air_benchmarks/xgboost_compute_tpl.yaml rename to release/air_tests/air_benchmarks/compute_xgboost_aws.yaml diff --git a/release/air_tests/air_benchmarks/compute_xgboost_gce.yaml b/release/air_tests/air_benchmarks/compute_xgboost_gce.yaml new file mode 100644 index 000000000000..13159b6cc420 --- /dev/null +++ b/release/air_tests/air_benchmarks/compute_xgboost_gce.yaml @@ -0,0 +1,28 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west1 +allowed_azs: + - us-west1-b + +max_workers: 10 + +head_node_type: + name: head_node + instance_type: n1-standard-8 + resources: + cpu: 0 + + +worker_node_types: + - name: worker_node + instance_type: n1-standard-16 + max_workers: 10 + min_workers: 10 + use_spot: false + +gcp_advanced_configurations_json: + instance_properties: + disks: + - boot: true + auto_delete: true + initialize_params: + disk_size_gb: 1000 diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 7897533cba41..342f43a456bc 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -190,7 +190,7 @@ cluster: cluster_env: app_config.yaml - cluster_compute: data_20_nodes.yaml + cluster_compute: compute_data_20_nodes_aws.yaml run: timeout: 3600 @@ -199,6 +199,14 @@ wait_for_nodes: num_nodes: 20 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: compute_data_20_nodes_gce.yaml alert: default @@ -212,7 +220,7 @@ cluster: cluster_env: xgboost_app_config.yaml - cluster_compute: xgboost_compute_tpl.yaml + cluster_compute: compute_xgboost_aws.yaml run: timeout: 36000 @@ -221,6 +229,14 @@ wait_for_nodes: num_nodes: 11 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: xgboost_app_config.yaml + cluster_compute: compute_xgboost_gce.yaml smoke_test: frequency: manual @@ -240,7 +256,7 @@ cluster: cluster_env: app_config.yaml - cluster_compute: compute_cpu_4.yaml + cluster_compute: compute_cpu_4_aws.yaml run: timeout: 3600 @@ -249,6 +265,15 @@ wait_for_nodes: num_nodes: 4 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: compute_cpu_4_gce.yaml + alert: default - name: air_benchmark_torch_mnist_gpu_4x4 @@ -260,7 +285,7 @@ cluster: cluster_env: app_config.yaml - cluster_compute: compute_gpu_4x4.yaml + cluster_compute: compute_gpu_4x4_aws.yaml run: timeout: 4800 @@ -273,7 +298,7 @@ frequency: nightly cluster: - cluster_compute: compute_gpu_2x2.yaml + cluster_compute: compute_gpu_2x2_aws.yaml run: timeout: 3600 @@ -282,6 +307,17 @@ wait_for_nodes: num_nodes: 2 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: compute_gpu_4x4_gce.yaml + smoke_test: + frequency: manual + alert: default @@ -294,12 +330,20 @@ cluster: cluster_env: app_config.yaml - cluster_compute: compute_cpu_1.yaml + cluster_compute: compute_cpu_1_aws.yaml run: timeout: 3600 script: python workloads/torch_benchmark.py run --num-runs 3 --num-epochs 20 --num-workers 4 --cpus-per-worker 2 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: compute_cpu_1_gce.yaml alert: default @@ -313,13 +357,12 @@ cluster: cluster_env: app_config.yaml - cluster_compute: compute_gpu_1_g4_8xl.yaml + cluster_compute: compute_gpu_1_cpu_16_aws.yaml run: timeout: 3600 script: python workloads/gpu_batch_prediction.py --data-size-gb 20 - alert: default variations: @@ -329,7 +372,7 @@ frequency: manual cluster: cluster_env: app_config.yaml - cluster_compute: compute_gce_gpu_1_g4_8xl.yaml + cluster_compute: compute_gpu_1_cpu_16_gce.yaml - name: air_benchmark_torch_batch_prediction_gpu_4x4_100gb @@ -343,7 +386,7 @@ cluster: cluster_env: app_config.yaml - cluster_compute: compute_gpu_4_g4_12xl.yaml + cluster_compute: compute_gpu_4x4_aws.yaml run: timeout: 10800 @@ -361,7 +404,7 @@ frequency: manual cluster: cluster_env: app_config.yaml - cluster_compute: compute_gce_gpu_4_g4_12xl.yaml + cluster_compute: compute_gpu_4x4_gce.yaml - name: air_benchmark_torch_mnist_cpu_4x4 group: AIR tests @@ -372,7 +415,7 @@ cluster: cluster_env: app_config.yaml - cluster_compute: compute_cpu_4.yaml + cluster_compute: compute_cpu_4_aws.yaml run: timeout: 5400 @@ -381,10 +424,17 @@ wait_for_nodes: num_nodes: 4 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: compute_cpu_4_gce.yaml alert: default - - name: air_benchmark_tune_torch_mnist group: AIR tests working_dir: air_tests/air_benchmarks @@ -394,7 +444,7 @@ cluster: cluster_env: app_config.yaml - cluster_compute: compute_cpu_8.yaml + cluster_compute: compute_cpu_8_aws.yaml run: timeout: 3600 @@ -403,6 +453,14 @@ wait_for_nodes: num_nodes: 8 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: compute_cpu_8_gce.yaml alert: default @@ -415,7 +473,7 @@ cluster: cluster_env: app_config.yaml - cluster_compute: compute_gpu_4_g4_12xl.yaml + cluster_compute: compute_gpu_4x4_aws.yaml run: timeout: 3600 @@ -424,6 +482,14 @@ wait_for_nodes: num_nodes: 4 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: compute_gpu_4x4_gce.yaml alert: default @@ -437,7 +503,7 @@ cluster: cluster_env: app_config.yaml - cluster_compute: compute_cpu_4.yaml + cluster_compute: compute_cpu_4_aws.yaml run: timeout: 5400 @@ -446,6 +512,14 @@ wait_for_nodes: num_nodes: 4 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: compute_cpu_4_gce.yaml alert: default @@ -459,12 +533,20 @@ cluster: cluster_env: app_config.yaml - cluster_compute: compute_cpu_1.yaml + cluster_compute: compute_cpu_1_aws.yaml run: timeout: 5400 script: python workloads/tensorflow_benchmark.py run --num-runs 3 --num-epochs 20 --num-workers 4 --cpus-per-worker 2 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: compute_cpu_1_gce.yaml alert: default @@ -480,7 +562,7 @@ cluster: cluster_env: app_config.yaml - cluster_compute: compute_cpu_4.yaml + cluster_compute: compute_cpu_4_aws.yaml run: timeout: 5400 @@ -489,6 +571,14 @@ wait_for_nodes: num_nodes: 4 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: compute_cpu_4_gce.yaml alert: default @@ -504,7 +594,7 @@ cluster: cluster_env: app_config.yaml - cluster_compute: compute_gpu_4x4.yaml + cluster_compute: compute_gpu_4x4_aws.yaml run: timeout: 5400 @@ -518,7 +608,7 @@ frequency: nightly cluster: - cluster_compute: compute_gpu_2x2.yaml + cluster_compute: compute_gpu_2x2_aws.yaml run: script: python workloads/tensorflow_benchmark.py run --num-runs 3 --num-epochs 60 --num-workers 4 --cpus-per-worker 4 --batch-size 512 --use-gpu @@ -526,8 +616,18 @@ wait_for_nodes: num_nodes: 2 - alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: compute_gpu_4x4_gce.yaml + smoke_test: + frequency: manual + alert: default - name: air_benchmark_pytorch_training_e2e_gpu_1x1_20gb group: AIR tests @@ -538,13 +638,12 @@ cluster: cluster_env: app_config.yaml - cluster_compute: compute_gpu_1.yaml + cluster_compute: compute_gpu_1_aws.yaml run: timeout: 3600 script: python workloads/pytorch_training_e2e.py --data-size-gb 20 - alert: default variations: @@ -554,7 +653,7 @@ frequency: manual cluster: cluster_env: app_config.yaml - cluster_compute: compute_gce_gpu_1.yaml + cluster_compute: compute_gpu_1_gce.yaml - name: air_benchmark_pytorch_training_e2e_gpu_4x4_100gb @@ -568,7 +667,7 @@ cluster: cluster_env: app_config.yaml - cluster_compute: compute_gpu_16.yaml + cluster_compute: compute_gpu_4x4_aws.yaml run: timeout: 10800 @@ -586,7 +685,7 @@ frequency: manual cluster: cluster_env: app_config.yaml - cluster_compute: compute_gce_gpu_16.yaml + cluster_compute: compute_gpu_4x4_gce.yaml # Test tiny, medium, and huge input files. - name: ray-data-bulk-ingest-file-size-benchmark @@ -686,13 +785,15 @@ team: ml cluster: cluster_env: dreambooth_env.yaml - cluster_compute: dreambooth_compute.yaml + cluster_compute: dreambooth_compute_aws.yaml run: timeout: 1800 script: bash dreambooth_run.sh artifact_path: /tmp/artifacts/example_out.jpg + # variations: A10G not available on GCE, yet. + - name: air_example_gptj_deepspeed_fine_tuning group: AIR examples @@ -704,12 +805,21 @@ team: ml cluster: cluster_env: gptj_deepspeed_env.yaml - cluster_compute: gptj_deepspeed_compute.yaml + cluster_compute: gptj_deepspeed_compute_aws.yaml run: timeout: 3600 script: python test_myst_doc.py --path gptj_deepspeed_fine_tuning.ipynb + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: gptj_deepspeed_env.yaml + cluster_compute: gptj_deepspeed_compute_gce.yaml + ##################################### # Workspace templates release tests # From e59701568b815813b225e7fc8f976f84927fc2cf Mon Sep 17 00:00:00 2001 From: Nathan Azrak <42650258+nathan-az@users.noreply.github.com> Date: Thu, 20 Apr 2023 01:54:18 +1000 Subject: [PATCH 006/424] Log databricks proxy (#34088) This PR adds standard logging of the Databricks proxy URL for the dashboard when a ray cluster starts. Currently the HTML link does not render until cell completion so it is difficult to access the dashboard while a ray workload is running. Signed-off-by: Nathan Azrak Co-authored-by: Nathan Azrak --- python/ray/util/spark/databricks_hook.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/python/ray/util/spark/databricks_hook.py b/python/ray/util/spark/databricks_hook.py index 404ff5a7211e..86d4b8a8f8a2 100644 --- a/python/ray/util/spark/databricks_hook.py +++ b/python/ray/util/spark/databricks_hook.py @@ -44,13 +44,16 @@ def display_databricks_driver_proxy_url(spark_context, port, title): orgId = commandContextTags.apply("orgId") clusterId = commandContextTags.apply("clusterId") - template = "/driver-proxy/o/{orgId}/{clusterId}/{port}/" - proxy_url = template.format(orgId=orgId, clusterId=clusterId, port=port) + proxy_link = f"/driver-proxy/o/{orgId}/{clusterId}/{port}/" + proxy_url = f"https://dbc-dp-{orgId}.cloud.databricks.com{proxy_link}" + + print("To monitor and debug Ray from Databricks, view the dashboard at ") + print(f" {proxy_url}") displayHTML( f""" From c0b0bb6a30941043677077bf18e975c519011596 Mon Sep 17 00:00:00 2001 From: Chen Shen Date: Wed, 19 Apr 2023 10:03:32 -0700 Subject: [PATCH 007/424] [core] add core team to protobuf owner #34566 update the right ownership for relative folders --- .github/CODEOWNERS | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 2b995c957d37..52bf373935c9 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -15,9 +15,9 @@ # ==== Ray core ==== # API compatibility -/src/ray/protobuf/common.proto @wuisawesome @ericl @ameerhajali @robertnishihara @pcmoritz @raulchen @iycheng @scv119 -/src/ray/protobuf/gcs.proto @wuisawesome @ericl @ameerhajali @robertnishihara @pcmoritz @raulchen @iycheng @scv119 -/src/ray/protobuf/gcs_service.proto @wuisawesome @ericl @ameerhajali @robertnishihara @pcmoritz @raulchen @iycheng @scv119 +/src/ray/protobuf/common.proto @wuisawesome @ericl @ameerhajali @robertnishihara @pcmoritz @raulchen @ray-project/ray-core +/src/ray/protobuf/gcs.proto @wuisawesome @ericl @ameerhajali @robertnishihara @pcmoritz @raulchen @ray-project/ray-core +/src/ray/protobuf/gcs_service.proto @wuisawesome @ericl @ameerhajali @robertnishihara @pcmoritz @raulchen @ray-project/ray-core /dashboard/modules/snapshot @wuisawesome @ijrsvt @edoakes @alanwguo @architkulkarni /python/ray/autoscaler/_private/monitor.py @wuisawesome @DmitriGekhtman From 7c3f3f100f8dd0e69aa8725a3a4ce0d2e1dd597f Mon Sep 17 00:00:00 2001 From: Chen Shen Date: Wed, 19 Apr 2023 10:04:15 -0700 Subject: [PATCH 008/424] [Core][pubsub] handle failures when publish failed. (#33115) Why are these changes needed? #32046 indicating that the pubsub might lose data, especially when the subscriber is under load. After examine the protocol it seems one bug is that the publisher fails to handle publish failures. i.e. when we push message in mailbox, we will delete the message being sent regardless of RPC failures. This PR tries to address the problem by adding monotonically increasing sequence_id to each message, and only delete messages when the subscriber acknowledged a message has been received. The sequence_id sequences is also generated per publisher, regardless of channels. This means if there exists multiple channels for the same publisher, each channel might not see contiguous sequences. This also assumes the invariant that a subscriber object will only subscribe to one publisher. We also relies on the pubsub protocol that at most one going push request will be inflight. This also handles the case gcs failover. We do so by track the publisher_id between both publisher and subscriber. When gcs failover, the publisher_id will be different, thus both the publisher and subscriber will forget the information about previous state. --- python/ray/_private/gcs_pubsub.py | 33 ++- python/ray/tests/test_gcs_fault_tolerance.py | 44 ++++ src/mock/ray/pubsub/publisher.h | 4 +- src/ray/core_worker/core_worker.cc | 5 +- src/ray/core_worker/reference_count.cc | 4 +- .../core_worker/test/reference_count_test.cc | 5 +- src/ray/gcs/gcs_client/gcs_client.cc | 3 + src/ray/gcs/gcs_server/gcs_server.cc | 3 +- src/ray/gcs/gcs_server/pubsub_handler.cc | 4 + src/ray/gcs/pubsub/gcs_pub_sub.cc | 10 +- src/ray/protobuf/gcs_service.proto | 9 + src/ray/protobuf/pubsub.proto | 11 + src/ray/pubsub/mock_pubsub.h | 2 +- src/ray/pubsub/publisher.cc | 62 ++++-- src/ray/pubsub/publisher.h | 46 ++++- src/ray/pubsub/subscriber.cc | 40 +++- src/ray/pubsub/subscriber.h | 6 + src/ray/pubsub/test/integration_test.cc | 7 +- src/ray/pubsub/test/publisher_test.cc | 154 ++++++++++---- src/ray/pubsub/test/subscriber_test.cc | 189 ++++++++++++++---- 20 files changed, 519 insertions(+), 122 deletions(-) diff --git a/python/ray/_private/gcs_pubsub.py b/python/ray/_private/gcs_pubsub.py index def67746dd42..c1d39e728b15 100644 --- a/python/ray/_private/gcs_pubsub.py +++ b/python/ray/_private/gcs_pubsub.py @@ -75,6 +75,8 @@ def __init__(self, worker_id: bytes = None): # SubscriberID / UniqueID, which is 28 (kUniqueIDSize) random bytes. self._subscriber_id = bytes(bytearray(random.getrandbits(8) for _ in range(28))) self._last_batch_size = 0 + self._max_processed_sequence_id = 0 + self._publisher_id = b"" # Batch size of the result from last poll. Used to indicate whether the # subscriber can keep up. @@ -91,7 +93,9 @@ def _subscribe_request(self, channel): def _poll_request(self): return gcs_service_pb2.GcsSubscriberPollRequest( - subscriber_id=self._subscriber_id + subscriber_id=self._subscriber_id, + max_processed_sequence_id=self._max_processed_sequence_id, + publisher_id=self._publisher_id, ) def _unsubscribe_request(self, channels): @@ -272,7 +276,21 @@ def _poll_locked(self, timeout=None) -> None: if fut.done(): self._last_batch_size = len(fut.result().pub_messages) + if fut.result().publisher_id != self._publisher_id: + if self._publisher_id != "": + logger.debug( + f"replied publisher_id {fut.result().publisher_id} " + f"different from {self._publisher_id}, this should " + "only happens during gcs failover." + ) + self._publisher_id = fut.result().publisher_id + self._max_processed_sequence_id = 0 + for msg in fut.result().pub_messages: + if msg.sequence_id <= self._max_processed_sequence_id: + logger.warn(f"Ignoring out of order message {msg}") + continue + self._max_processed_sequence_id = msg.sequence_id if msg.channel_type != self._channel: logger.warn(f"Ignoring message from unsubscribed channel {msg}") continue @@ -538,7 +556,20 @@ async def _poll(self, timeout=None) -> None: break try: self._last_batch_size = len(poll.result().pub_messages) + if poll.result().publisher_id != self._publisher_id: + if self._publisher_id != "": + logger.debug( + f"replied publisher_id {poll.result().publisher_id}" + f"different from {self._publisher_id}, this should " + "only happens during gcs failover." + ) + self._publisher_id = poll.result().publisher_id + self._max_processed_sequence_id = 0 for msg in poll.result().pub_messages: + if msg.sequence_id <= self._max_processed_sequence_id: + logger.warn(f"Ignoring out of order message {msg}") + continue + self._max_processed_sequence_id = msg.sequence_id self._queue.append(msg) except grpc.RpcError as e: if self._should_terminate_polling(e): diff --git a/python/ray/tests/test_gcs_fault_tolerance.py b/python/ray/tests/test_gcs_fault_tolerance.py index 3f70356db8f6..fedd531d6cb8 100644 --- a/python/ray/tests/test_gcs_fault_tolerance.py +++ b/python/ray/tests/test_gcs_fault_tolerance.py @@ -17,6 +17,11 @@ wait_for_pid_to_exit, run_string_as_driver, ) +from ray._private.gcs_pubsub import ( + GcsPublisher, + GcsErrorSubscriber, +) +from ray.core.generated.gcs_pb2 import ErrorTableData import psutil @@ -649,6 +654,45 @@ def pid(self): ray.get_actor("A") +@pytest.mark.parametrize( + "ray_start_regular_with_external_redis", + [ + generate_system_config_map( + gcs_failover_worker_reconnect_timeout=20, + gcs_rpc_server_reconnect_timeout_s=60, + gcs_server_request_timeout_seconds=10, + ) + ], + indirect=True, +) +@pytest.mark.skip( + reason="python publisher and subscriber doesn't handle gcs server failover" +) +def test_publish_and_subscribe_error_info(ray_start_regular_with_external_redis): + address_info = ray_start_regular_with_external_redis + gcs_server_addr = address_info["gcs_address"] + + subscriber = GcsErrorSubscriber(address=gcs_server_addr) + subscriber.subscribe() + + publisher = GcsPublisher(address=gcs_server_addr) + err1 = ErrorTableData(error_message="test error message 1") + err2 = ErrorTableData(error_message="test error message 2") + print("sending error message 1") + publisher.publish_error(b"aaa_id", err1) + + ray._private.worker._global_node.kill_gcs_server() + ray._private.worker._global_node.start_gcs_server() + + print("sending error message 2") + publisher.publish_error(b"bbb_id", err2) + print("done") + + assert subscriber.poll() == (b"bbb_id", err2) + + subscriber.close() + + @pytest.fixture def redis_replicas(monkeypatch): monkeypatch.setenv("TEST_EXTERNAL_REDIS_REPLICAS", "3") diff --git a/src/mock/ray/pubsub/publisher.h b/src/mock/ray/pubsub/publisher.h index 77fd2fd68802..e3f5a4447999 100644 --- a/src/mock/ray/pubsub/publisher.h +++ b/src/mock/ray/pubsub/publisher.h @@ -59,7 +59,7 @@ class MockPublisherInterface : public PublisherInterface { const SubscriberID &subscriber_id, const std::optional &key_id), (override)); - MOCK_METHOD(void, Publish, (const rpc::PubMessage &pub_message), (override)); + MOCK_METHOD(void, Publish, (rpc::PubMessage pub_message), (override)); MOCK_METHOD(void, PublishFailure, (const rpc::ChannelType channel_type, const std::string &key_id), @@ -86,7 +86,7 @@ class MockPublisher : public Publisher { const SubscriberID &subscriber_id, const std::optional &key_id), (override)); - MOCK_METHOD(void, Publish, (const rpc::PubMessage &pub_message), (override)); + MOCK_METHOD(void, Publish, (rpc::PubMessage pub_message), (override)); MOCK_METHOD(void, PublishFailure, (const rpc::ChannelType channel_type, const std::string &key_id), diff --git a/src/ray/core_worker/core_worker.cc b/src/ray/core_worker/core_worker.cc index 2b001719a72b..b84d91909dad 100644 --- a/src/ray/core_worker/core_worker.cc +++ b/src/ray/core_worker/core_worker.cc @@ -242,7 +242,8 @@ CoreWorker::CoreWorker(const CoreWorkerOptions &options, const WorkerID &worker_ /*periodical_runner=*/&periodical_runner_, /*get_time_ms=*/[]() { return absl::GetCurrentTimeNanos() / 1e6; }, /*subscriber_timeout_ms=*/RayConfig::instance().subscriber_timeout_ms(), - /*publish_batch_size_=*/RayConfig::instance().publish_batch_size()); + /*publish_batch_size_=*/RayConfig::instance().publish_batch_size(), + GetWorkerID()); object_info_subscriber_ = std::make_unique( /*subscriber_id=*/GetWorkerID(), /*channels=*/ @@ -3121,7 +3122,7 @@ void CoreWorker::ProcessSubscribeForObjectEviction( pub_message.mutable_worker_object_eviction_message()->set_object_id( object_id.Binary()); - object_info_publisher_->Publish(pub_message); + object_info_publisher_->Publish(std::move(pub_message)); }; const auto object_id = ObjectID::FromBinary(message.object_id()); diff --git a/src/ray/core_worker/reference_count.cc b/src/ray/core_worker/reference_count.cc index 3c285cd97b4c..970c9c990c65 100644 --- a/src/ray/core_worker/reference_count.cc +++ b/src/ray/core_worker/reference_count.cc @@ -1169,7 +1169,7 @@ void ReferenceCounter::HandleRefRemoved(const ObjectID &object_id) { RAY_LOG(DEBUG) << "Publishing WaitForRefRemoved message for " << object_id << ", message has " << worker_ref_removed_message->borrowed_refs().size() << " borrowed references."; - object_info_publisher_->Publish(pub_message); + object_info_publisher_->Publish(std::move(pub_message)); } void ReferenceCounter::SetRefRemovedCallback( @@ -1459,7 +1459,7 @@ void ReferenceCounter::PushToLocationSubscribers(ReferenceTable::iterator it) { auto object_locations_msg = pub_message.mutable_worker_object_locations_message(); FillObjectInformationInternal(it, object_locations_msg); - object_info_publisher_->Publish(pub_message); + object_info_publisher_->Publish(std::move(pub_message)); } Status ReferenceCounter::FillObjectInformation( diff --git a/src/ray/core_worker/test/reference_count_test.cc b/src/ray/core_worker/test/reference_count_test.cc index de5de4146411..51b5d51523ac 100644 --- a/src/ray/core_worker/test/reference_count_test.cc +++ b/src/ray/core_worker/test/reference_count_test.cc @@ -136,7 +136,8 @@ class MockDistributedSubscriber : public pubsub::SubscriberInterface { subscriber_id, /*get_time_ms=*/[]() { return 1.0; }, /*subscriber_timeout_ms=*/1000, - /*publish_batch_size=*/1000)), + /*publish_batch_size=*/1000, + UniqueID::FromRandom())), client_factory_(client_factory) {} ~MockDistributedSubscriber() = default; @@ -249,7 +250,7 @@ class MockDistributedPublisher : public pubsub::PublisherInterface { void PublishFailure(const rpc::ChannelType channel_type, const std::string &key_id_binary) {} - void Publish(const rpc::PubMessage &pub_message) { + void Publish(rpc::PubMessage pub_message) { if (pub_message.channel_type() == rpc::ChannelType::WORKER_OBJECT_LOCATIONS_CHANNEL) { // TODO(swang): Test object locations pubsub too. return; diff --git a/src/ray/gcs/gcs_client/gcs_client.cc b/src/ray/gcs/gcs_client/gcs_client.cc index 40fe55c23c18..79b8be674404 100644 --- a/src/ray/gcs/gcs_client/gcs_client.cc +++ b/src/ray/gcs/gcs_client/gcs_client.cc @@ -49,11 +49,14 @@ void GcsSubscriberClient::PubsubLongPolling( const rpc::ClientCallback &callback) { rpc::GcsSubscriberPollRequest req; req.set_subscriber_id(request.subscriber_id()); + req.set_max_processed_sequence_id(request.max_processed_sequence_id()); + req.set_publisher_id(request.publisher_id()); rpc_client_->GcsSubscriberPoll( req, [callback](const Status &status, const rpc::GcsSubscriberPollReply &poll_reply) { rpc::PubsubLongPollingReply reply; *reply.mutable_pub_messages() = poll_reply.pub_messages(); + *reply.mutable_publisher_id() = poll_reply.publisher_id(); callback(status, reply); }); } diff --git a/src/ray/gcs/gcs_server/gcs_server.cc b/src/ray/gcs/gcs_server/gcs_server.cc index 55b4bb61f301..1edb966d4b42 100644 --- a/src/ray/gcs/gcs_server/gcs_server.cc +++ b/src/ray/gcs/gcs_server/gcs_server.cc @@ -99,7 +99,8 @@ GcsServer::GcsServer(const ray::gcs::GcsServerConfig &config, /*periodical_runner=*/&pubsub_periodical_runner_, /*get_time_ms=*/[]() { return absl::GetCurrentTimeNanos() / 1e6; }, /*subscriber_timeout_ms=*/RayConfig::instance().subscriber_timeout_ms(), - /*publish_batch_size_=*/RayConfig::instance().publish_batch_size()); + /*publish_batch_size_=*/RayConfig::instance().publish_batch_size(), + /*publisher_id=*/NodeID::FromRandom()); gcs_publisher_ = std::make_shared(std::move(inner_publisher)); } diff --git a/src/ray/gcs/gcs_server/pubsub_handler.cc b/src/ray/gcs/gcs_server/pubsub_handler.cc index a089b8ca765a..cf34b4f1e8a6 100644 --- a/src/ray/gcs/gcs_server/pubsub_handler.cc +++ b/src/ray/gcs/gcs_server/pubsub_handler.cc @@ -40,6 +40,7 @@ void InternalPubSubHandler::HandleGcsPublish(rpc::GcsPublishRequest request, nullptr); return; } + RAY_LOG(DEBUG) << "received publish request: " << request.DebugString(); for (const auto &msg : request.pub_messages()) { gcs_publisher_->GetPublisher()->Publish(msg); } @@ -63,6 +64,8 @@ void InternalPubSubHandler::HandleGcsSubscriberPoll( } rpc::PubsubLongPollingRequest pubsub_req; pubsub_req.set_subscriber_id(request.subscriber_id()); + pubsub_req.set_publisher_id(request.publisher_id()); + pubsub_req.set_max_processed_sequence_id(request.max_processed_sequence_id()); auto pubsub_reply = std::make_shared(); auto pubsub_reply_ptr = pubsub_reply.get(); gcs_publisher_->GetPublisher()->ConnectToSubscriber( @@ -74,6 +77,7 @@ void InternalPubSubHandler::HandleGcsSubscriberPoll( std::function success_cb, std::function failure_cb) { reply->mutable_pub_messages()->Swap(pubsub_reply->mutable_pub_messages()); + reply->set_publisher_id(std::move(*pubsub_reply->mutable_publisher_id())); reply_cb(std::move(status), std::move(success_cb), std::move(failure_cb)); }); } diff --git a/src/ray/gcs/pubsub/gcs_pub_sub.cc b/src/ray/gcs/pubsub/gcs_pub_sub.cc index c7ac4294dc8e..32c0e9f41367 100644 --- a/src/ray/gcs/pubsub/gcs_pub_sub.cc +++ b/src/ray/gcs/pubsub/gcs_pub_sub.cc @@ -26,7 +26,7 @@ Status GcsPublisher::PublishActor(const ActorID &id, msg.set_channel_type(rpc::ChannelType::GCS_ACTOR_CHANNEL); msg.set_key_id(id.Binary()); *msg.mutable_actor_message() = message; - publisher_->Publish(msg); + publisher_->Publish(std::move(msg)); if (done != nullptr) { done(Status::OK()); } @@ -40,7 +40,7 @@ Status GcsPublisher::PublishJob(const JobID &id, msg.set_channel_type(rpc::ChannelType::GCS_JOB_CHANNEL); msg.set_key_id(id.Binary()); *msg.mutable_job_message() = message; - publisher_->Publish(msg); + publisher_->Publish(std::move(msg)); if (done != nullptr) { done(Status::OK()); } @@ -54,7 +54,7 @@ Status GcsPublisher::PublishNodeInfo(const NodeID &id, msg.set_channel_type(rpc::ChannelType::GCS_NODE_INFO_CHANNEL); msg.set_key_id(id.Binary()); *msg.mutable_node_info_message() = message; - publisher_->Publish(msg); + publisher_->Publish(std::move(msg)); if (done != nullptr) { done(Status::OK()); } @@ -68,7 +68,7 @@ Status GcsPublisher::PublishWorkerFailure(const WorkerID &id, msg.set_channel_type(rpc::ChannelType::GCS_WORKER_DELTA_CHANNEL); msg.set_key_id(id.Binary()); *msg.mutable_worker_delta_message() = message; - publisher_->Publish(msg); + publisher_->Publish(std::move(msg)); if (done != nullptr) { done(Status::OK()); } @@ -82,7 +82,7 @@ Status GcsPublisher::PublishError(const std::string &id, msg.set_channel_type(rpc::ChannelType::RAY_ERROR_INFO_CHANNEL); msg.set_key_id(id); *msg.mutable_error_info_message() = message; - publisher_->Publish(msg); + publisher_->Publish(std::move(msg)); if (done != nullptr) { done(Status::OK()); } diff --git a/src/ray/protobuf/gcs_service.proto b/src/ray/protobuf/gcs_service.proto index 9c63d76c4130..38280e48d3f6 100644 --- a/src/ray/protobuf/gcs_service.proto +++ b/src/ray/protobuf/gcs_service.proto @@ -538,11 +538,20 @@ message GcsPublishReply { message GcsSubscriberPollRequest { /// The id of the subscriber. bytes subscriber_id = 1; + /// The max squence_id that has been processed by the subscriber. The Publisher + /// will drop queued messages with smaller sequence_id for this subscriber. + int64 max_processed_sequence_id = 2; + /// The expected publisher_id. The publisher will ignore the + /// max_processed_sequence_id if the publisher_id doesn't match. + /// This usuall happens when gcs failover. + bytes publisher_id = 3; } message GcsSubscriberPollReply { /// The messages that are published. repeated PubMessage pub_messages = 1; + /// The publisher's id. + bytes publisher_id = 2; // Not populated. GcsStatus status = 100; } diff --git a/src/ray/protobuf/pubsub.proto b/src/ray/protobuf/pubsub.proto index eff40b652fea..aba5f588d4b1 100644 --- a/src/ray/protobuf/pubsub.proto +++ b/src/ray/protobuf/pubsub.proto @@ -77,6 +77,8 @@ message PubMessage { // The message that indicates the given key id is not available anymore. FailureMessage failure_message = 6; } + /// A monotonically increasing sequence_id generated by the publisher. + int64 sequence_id = 16; } message WorkerObjectEvictionMessage { @@ -202,11 +204,20 @@ message WorkerObjectLocationsSubMessage { message PubsubLongPollingRequest { /// The id of the subscriber. bytes subscriber_id = 1; + /// The max squence_id that has been processed by the subscriber. The Publisher + /// will drop queued messages with smaller sequence_id for this subscriber. + int64 max_processed_sequence_id = 2; + /// The expected publisher_id. The publisher will ignore the + /// max_processed_sequence_id if the publisher_id doesn't match. + /// This usuall happens when gcs failover. + bytes publisher_id = 3; } message PubsubLongPollingReply { /// The messages that are published. repeated PubMessage pub_messages = 1; + /// The publisher_id. + bytes publisher_id = 2; } message PubsubCommandBatchRequest { diff --git a/src/ray/pubsub/mock_pubsub.h b/src/ray/pubsub/mock_pubsub.h index 83dec35f72a3..5cb085a83444 100644 --- a/src/ray/pubsub/mock_pubsub.h +++ b/src/ray/pubsub/mock_pubsub.h @@ -67,7 +67,7 @@ class MockPublisher : public pubsub::PublisherInterface { const pubsub::SubscriberID &subscriber_id, const std::optional &key_id)); - MOCK_METHOD1(Publish, void(const rpc::PubMessage &pub_message)); + MOCK_METHOD1(Publish, void(rpc::PubMessage pub_message)); MOCK_METHOD3(UnregisterSubscription, bool(const rpc::ChannelType channel_type, diff --git a/src/ray/pubsub/publisher.cc b/src/ray/pubsub/publisher.cc index 40d6c4412815..fec34b5e32fd 100644 --- a/src/ray/pubsub/publisher.cc +++ b/src/ray/pubsub/publisher.cc @@ -22,24 +22,22 @@ namespace pubsub { namespace pub_internal { -bool BasicEntityState::Publish(const rpc::PubMessage &pub_message) { +bool BasicEntityState::Publish(std::shared_ptr msg) { if (subscribers_.empty()) { return false; } - const auto msg = std::make_shared(pub_message); for (auto &[id, subscriber] : subscribers_) { subscriber->QueueMessage(msg); } return true; } -bool CappedEntityState::Publish(const rpc::PubMessage &pub_message) { +bool CappedEntityState::Publish(std::shared_ptr msg) { if (subscribers_.empty()) { return false; } - const int64_t message_size = pub_message.ByteSizeLong(); - + const int64_t message_size = msg->ByteSizeLong(); while (!pending_messages_.empty()) { // NOTE: if atomic ref counting becomes too expensive, it should be possible // to implement inflight message tracking across subscribers with non-atomic @@ -77,7 +75,6 @@ bool CappedEntityState::Publish(const rpc::PubMessage &pub_message) { message_sizes_.pop(); } - const auto msg = std::make_shared(pub_message); pending_messages_.push(msg); total_size_ += message_size; message_sizes_.push(message_size); @@ -104,10 +101,10 @@ const absl::flat_hash_map &EntityState::Subscri SubscriptionIndex::SubscriptionIndex(rpc::ChannelType channel_type) : channel_type_(channel_type), subscribers_to_all_(CreateEntityState()) {} -bool SubscriptionIndex::Publish(const rpc::PubMessage &pub_message) { +bool SubscriptionIndex::Publish(std::shared_ptr pub_message) { const bool publish_to_all = subscribers_to_all_->Publish(pub_message); bool publish_to_entity = false; - auto it = entities_.find(pub_message.key_id()); + auto it = entities_.find(pub_message->key_id()); if (it != entities_.end()) { publish_to_entity = it->second->Publish(pub_message); } @@ -246,6 +243,22 @@ std::unique_ptr SubscriptionIndex::CreateEntityState() { void SubscriberState::ConnectToSubscriber(const rpc::PubsubLongPollingRequest &request, rpc::PubsubLongPollingReply *reply, rpc::SendReplyCallback send_reply_callback) { + auto max_processed_sequence_id = request.max_processed_sequence_id(); + if (request.publisher_id().empty() || + publisher_id_ != PublisherID::FromBinary(request.publisher_id())) { + // in case the publisher_id mismatches, we should ignore the + // max_processed_sequence_id. + max_processed_sequence_id = 0; + } + + // clean up messages that have already been processed. + while (!mailbox_.empty() && + mailbox_.front()->sequence_id() <= max_processed_sequence_id) { + RAY_LOG(DEBUG) << "removing " << max_processed_sequence_id << " : " + << mailbox_.front()->sequence_id(); + mailbox_.pop_front(); + } + if (long_polling_connection_) { // Because of the new long polling request, flush the current polling request with an // empty reply. @@ -262,7 +275,8 @@ void SubscriberState::ConnectToSubscriber(const rpc::PubsubLongPollingRequest &r void SubscriberState::QueueMessage(const std::shared_ptr &pub_message, bool try_publish) { - mailbox_.push(pub_message); + RAY_LOG(DEBUG) << "enqueue: " << pub_message->sequence_id(); + mailbox_.push_back(pub_message); if (try_publish) { PublishIfPossible(); } @@ -278,28 +292,35 @@ bool SubscriberState::PublishIfPossible(bool force_noop) { // No message should have been added to the reply. RAY_CHECK(long_polling_connection_->reply->pub_messages().empty()); + *long_polling_connection_->reply->mutable_publisher_id() = publisher_id_.Binary(); if (!force_noop) { - for (int i = 0; i < publish_batch_size_ && !mailbox_.empty(); ++i) { - const rpc::PubMessage &msg = *mailbox_.front(); + for (auto it = mailbox_.begin(); it != mailbox_.end(); it++) { + if (long_polling_connection_->reply->pub_messages().size() >= publish_batch_size_) { + break; + } + const rpc::PubMessage &msg = **it; // Avoid sending empty message to the subscriber. The message might have been // cleared because the subscribed entity's buffer was full. if (msg.inner_message_case() != rpc::PubMessage::INNER_MESSAGE_NOT_SET) { *long_polling_connection_->reply->add_pub_messages() = msg; } - mailbox_.pop(); } } + + RAY_LOG(DEBUG) << "sending reply back" + << long_polling_connection_->reply->DebugString(); long_polling_connection_->send_reply_callback(Status::OK(), nullptr, nullptr); // Clean up & update metadata. long_polling_connection_.reset(); + // Clean up & update metadata. last_connection_update_time_ms_ = get_time_ms_(); return true; } bool SubscriberState::CheckNoLeaks() const { // If all message in the mailbox has been replied, consider there is no leak. - return !long_polling_connection_ && mailbox_.empty(); + return mailbox_.empty(); } bool SubscriberState::ConnectionExists() const { @@ -319,7 +340,8 @@ void Publisher::ConnectToSubscriber(const rpc::PubsubLongPollingRequest &request RAY_CHECK(send_reply_callback != nullptr); const auto subscriber_id = SubscriberID::FromBinary(request.subscriber_id()); - RAY_LOG(DEBUG) << "Long polling connection initiated by " << subscriber_id.Hex(); + RAY_LOG(DEBUG) << "Long polling connection initiated by " << subscriber_id.Hex() + << ", publisher_id " << publisher_id_.Hex(); absl::MutexLock lock(&mutex_); auto it = subscribers_.find(subscriber_id); if (it == subscribers_.end()) { @@ -329,7 +351,8 @@ void Publisher::ConnectToSubscriber(const rpc::PubsubLongPollingRequest &request std::make_unique(subscriber_id, get_time_ms_, subscriber_timeout_ms_, - publish_batch_size_)) + publish_batch_size_, + publisher_id_)) .first; } auto &subscriber = it->second; @@ -350,7 +373,8 @@ bool Publisher::RegisterSubscription(const rpc::ChannelType channel_type, std::make_unique(subscriber_id, get_time_ms_, subscriber_timeout_ms_, - publish_batch_size_)) + publish_batch_size_, + publisher_id_)) .first; } pub_internal::SubscriberState *subscriber = it->second.get(); @@ -359,13 +383,15 @@ bool Publisher::RegisterSubscription(const rpc::ChannelType channel_type, return subscription_index_it->second.AddEntry(key_id.value_or(""), subscriber); } -void Publisher::Publish(const rpc::PubMessage &pub_message) { +void Publisher::Publish(rpc::PubMessage pub_message) { + RAY_CHECK_EQ(pub_message.sequence_id(), 0) << "sequence_id should not be set;"; const auto channel_type = pub_message.channel_type(); absl::MutexLock lock(&mutex_); auto &subscription_index = subscription_index_map_.at(channel_type); // TODO(sang): Currently messages are lost if publish happens // before there's any subscriber for the object. - subscription_index.Publish(pub_message); + pub_message.set_sequence_id(++next_sequence_id_); + subscription_index.Publish(std::make_shared(std::move(pub_message))); cum_pub_message_cnt_[channel_type]++; } diff --git a/src/ray/pubsub/publisher.h b/src/ray/pubsub/publisher.h index 20ccdc29ebdb..f14b0c4e3775 100644 --- a/src/ray/pubsub/publisher.h +++ b/src/ray/pubsub/publisher.h @@ -16,6 +16,7 @@ #include +#include #include #include #include @@ -35,6 +36,7 @@ namespace ray { namespace pubsub { using SubscriberID = UniqueID; +using PublisherID = UniqueID; namespace pub_internal { @@ -47,7 +49,7 @@ class EntityState { /// Publishes the message to subscribers of the entity. /// Returns true if there are subscribers, returns false otherwise. - virtual bool Publish(const rpc::PubMessage &pub_message) = 0; + virtual bool Publish(std::shared_ptr pub_message) = 0; /// Manages the set of subscribers of this entity. bool AddSubscriber(SubscriberState *subscriber); @@ -77,14 +79,14 @@ class EntityState { /// Publishes the message to all subscribers, without size cap on buffered messages. class BasicEntityState : public EntityState { public: - bool Publish(const rpc::PubMessage &pub_message) override; + bool Publish(std::shared_ptr pub_message) override; }; /// Publishes the message to all subscribers, and enforce a total size cap on buffered /// messages. class CappedEntityState : public EntityState { public: - bool Publish(const rpc::PubMessage &pub_message) override; + bool Publish(std::shared_ptr pub_message) override; private: // Tracks inflight messages. The messages have shared ownership by @@ -110,7 +112,7 @@ class SubscriptionIndex { /// Publishes the message to relevant subscribers. /// Returns true if there are subscribers listening on the entity key of the message, /// returns false otherwise. - bool Publish(const rpc::PubMessage &pub_message); + bool Publish(std::shared_ptr pub_message); /// Adds a new subscriber and the key it subscribes to. /// When `key_id` is empty, the subscriber subscribes to all keys. @@ -172,12 +174,14 @@ class SubscriberState { SubscriberState(SubscriberID subscriber_id, std::function get_time_ms, uint64_t connection_timeout_ms, - const int publish_batch_size) + const int publish_batch_size, + PublisherID publisher_id) : subscriber_id_(subscriber_id), get_time_ms_(std::move(get_time_ms)), connection_timeout_ms_(connection_timeout_ms), publish_batch_size_(publish_batch_size), - last_connection_update_time_ms_(get_time_ms_()) {} + last_connection_update_time_ms_(get_time_ms_()), + publisher_id_(publisher_id) {} ~SubscriberState() { // Force a push to close the long-polling. @@ -229,7 +233,7 @@ class SubscriberState { /// Inflight long polling reply callback, for replying to the subscriber. std::unique_ptr long_polling_connection_; /// Queued messages to publish. - std::queue> mailbox_; + std::deque> mailbox_; /// Callback to get the current time. const std::function get_time_ms_; /// The time in which the connection is considered as timed out. @@ -238,6 +242,7 @@ class SubscriberState { const int publish_batch_size_; /// The last time long polling was connected in milliseconds. double last_connection_update_time_ms_; + PublisherID publisher_id_; }; } // namespace pub_internal @@ -263,7 +268,7 @@ class PublisherInterface { /// /// \param pub_message The message to publish. /// Required to contain channel_type and key_id fields. - virtual void Publish(const rpc::PubMessage &pub_message) = 0; + virtual void Publish(rpc::PubMessage pub_message) = 0; /// Publish to the subscriber that the given key id is not available anymore. /// It will invoke the failure callback on the subscriber side. @@ -315,11 +320,13 @@ class Publisher : public PublisherInterface { PeriodicalRunner *const periodical_runner, std::function get_time_ms, const uint64_t subscriber_timeout_ms, - const int publish_batch_size) + const int publish_batch_size, + PublisherID publisher_id = NodeID::FromRandom()) : periodical_runner_(periodical_runner), get_time_ms_(std::move(get_time_ms)), subscriber_timeout_ms_(subscriber_timeout_ms), - publish_batch_size_(publish_batch_size) { + publish_batch_size_(publish_batch_size), + publisher_id_(publisher_id) { // Insert index map for each channel. for (auto type : channels) { subscription_index_map_.emplace(type, type); @@ -354,7 +361,7 @@ class Publisher : public PublisherInterface { /// /// \param pub_message The message to publish. /// Required to contain channel_type and key_id fields. - void Publish(const rpc::PubMessage &pub_message) override; + void Publish(rpc::PubMessage pub_message) override; /// Publish to the subscriber that the given key id is not available anymore. /// It will invoke the failure callback on the subscriber side. @@ -461,6 +468,23 @@ class Publisher : public PublisherInterface { int publish_batch_size_; absl::flat_hash_map cum_pub_message_cnt_ GUARDED_BY(mutex_); + + /// The monotonically increasing sequence_id for this publisher. + /// The publisher will add this sequence_id to every message to be published. + /// The sequence_id is used for handling failures: the publisher will not delete + /// a message from the sending queue until the subscriber has acknowledge + /// it has processed beyond the message's sequence_id. + /// + /// Note: + /// - a valide sequence_id starts from 1. + /// - the subscriber doesn't expect the sequences it receives are contiguous. + /// this is due the fact a subscriber can only subscribe a subset + /// of a channel. + int64_t next_sequence_id_ GUARDED_BY(mutex_) = 0; + + /// A unique identifier identifies the publisher_id. + /// TODO(scv119) add docs about the semantics. + const PublisherID publisher_id_; }; } // namespace pubsub diff --git a/src/ray/pubsub/subscriber.cc b/src/ray/pubsub/subscriber.cc index f36fbabf52c9..b5546fe5d17d 100644 --- a/src/ray/pubsub/subscriber.cc +++ b/src/ray/pubsub/subscriber.cc @@ -17,6 +17,9 @@ namespace ray { namespace pubsub { +namespace { +const PublisherID kDefaultPublisherID{}; +} /////////////////////////////////////////////////////////////////////////////// /// SubscriberChannel @@ -349,7 +352,9 @@ void Subscriber::MakeLongPollingPubsubConnection(const rpc::Address &publisher_a auto subscriber_client = get_client_(publisher_address); rpc::PubsubLongPollingRequest long_polling_request; long_polling_request.set_subscriber_id(subscriber_id_.Binary()); - + auto &processed_state = processed_sequences_[publisher_id]; + long_polling_request.set_publisher_id(processed_state.first.Binary()); + long_polling_request.set_max_processed_sequence_id(processed_state.second); subscriber_client->PubsubLongPolling( long_polling_request, [this, publisher_address](Status status, const rpc::PubsubLongPollingReply &reply) { @@ -362,7 +367,7 @@ void Subscriber::HandleLongPollingResponse(const rpc::Address &publisher_address const Status &status, const rpc::PubsubLongPollingReply &reply) { const auto publisher_id = PublisherID::FromBinary(publisher_address.worker_id()); - RAY_LOG(DEBUG) << "Long polling request has replied from " << publisher_id; + RAY_LOG(DEBUG) << "Long polling request has been replied from " << publisher_id; RAY_CHECK(publishers_connected_.count(publisher_id)); if (!status.ok()) { @@ -377,10 +382,38 @@ void Subscriber::HandleLongPollingResponse(const rpc::Address &publisher_address // Empty the command queue because we cannot send commands anymore. commands_.erase(publisher_id); } else { + RAY_CHECK(!reply.publisher_id().empty()) << "publisher_id is empty."; + auto reply_publisher_id = PublisherID::FromBinary(reply.publisher_id()); + if (reply_publisher_id != processed_sequences_[publisher_id].first) { + if (processed_sequences_[publisher_id].first != kDefaultPublisherID) { + RAY_LOG(INFO) << "Received publisher_id " << reply_publisher_id.Hex() + << " is different from last seen publisher_id " + << processed_sequences_[publisher_id].first + << ", this can only happen when gcs failsover."; + } + // reset publisher_id and processed_sequence + // if the publisher_id changes. + processed_sequences_[publisher_id].first = reply_publisher_id; + processed_sequences_[publisher_id].second = 0; + } + for (int i = 0; i < reply.pub_messages_size(); i++) { const auto &msg = reply.pub_messages(i); const auto channel_type = msg.channel_type(); const auto &key_id = msg.key_id(); + RAY_CHECK_GT(msg.sequence_id(), 0) + << "message's sequence_id is invalid " << msg.sequence_id(); + + if (msg.sequence_id() <= processed_sequences_[publisher_id].second) { + RAY_LOG_EVERY_MS(WARNING, 10000) + << "Received message out of order, publisher_id: " + << processed_sequences_[publisher_id].first + << ", received message sequence_id " + << processed_sequences_[publisher_id].second + << ", received message sequence_id " << msg.sequence_id(); + continue; + } + processed_sequences_[publisher_id].second = msg.sequence_id(); // If the published message is a failure message, the publisher indicates // this key id is failed. Invoke the failure callback. At this time, we should not // unsubscribe the publisher because there are other entries that subscribe from the @@ -399,6 +432,7 @@ void Subscriber::HandleLongPollingResponse(const rpc::Address &publisher_address if (SubscriptionExists(publisher_id)) { MakeLongPollingPubsubConnection(publisher_address); } else { + processed_sequences_.erase(publisher_id); publishers_connected_.erase(publisher_id); } } @@ -478,7 +512,7 @@ bool Subscriber::CheckNoLeaks() const { } } return !leaks && publishers_connected_.empty() && command_batch_sent_.empty() && - commands_.empty(); + commands_.empty() && processed_sequences_.empty(); } std::string Subscriber::DebugString() const { diff --git a/src/ray/pubsub/subscriber.h b/src/ray/pubsub/subscriber.h index a76e9cff40cb..8c73716d8844 100644 --- a/src/ray/pubsub/subscriber.h +++ b/src/ray/pubsub/subscriber.h @@ -391,6 +391,7 @@ class Subscriber : public SubscriberInterface { /// FRIEND_TEST(IntegrationTest, SubscribersToOneIDAndAllIDs); + FRIEND_TEST(IntegrationTest, GcsFailsOver); FRIEND_TEST(SubscriberTest, TestBasicSubscription); FRIEND_TEST(SubscriberTest, TestSingleLongPollingWithMultipleSubscriptions); FRIEND_TEST(SubscriberTest, TestMultiLongPollingWithTheSameSubscription); @@ -491,6 +492,11 @@ class Subscriber : public SubscriberInterface { /// Mapping of channel type to channels. absl::flat_hash_map> channels_ GUARDED_BY(mutex_); + + /// Keeps track of last processed by publisher. + /// Note the publisher_id only change if gcs failover. + absl::flat_hash_map> processed_sequences_ + GUARDED_BY(mutex_); }; } // namespace pubsub diff --git a/src/ray/pubsub/test/integration_test.cc b/src/ray/pubsub/test/integration_test.cc index ffca2acd0e05..bb6574e3a2f0 100644 --- a/src/ray/pubsub/test/integration_test.cc +++ b/src/ray/pubsub/test/integration_test.cc @@ -50,8 +50,6 @@ class SubscriberServiceImpl final : public rpc::SubscriberService::CallbackServi std::function failure_cb) { // Long polling should always succeed. RAY_CHECK_OK(status); - RAY_CHECK(success_cb == nullptr); - RAY_CHECK(failure_cb == nullptr); reactor->Finish(grpc::Status::OK); }); return reactor; @@ -149,8 +147,10 @@ class IntegrationTest : public ::testing::Test { } ~IntegrationTest() { + RAY_LOG(INFO) << "Shutting down server."; // Stop callback runners. io_service_.Stop(); + RAY_LOG(INFO) << "Shutting down server1."; // Assume no new subscriber is connected after the unregisteration above. Otherwise // shutdown would hang below. server_->Shutdown(); @@ -179,6 +179,8 @@ class IntegrationTest : public ::testing::Test { server_ = builder.BuildAndStart(); } + void RestartServer() { SetupServer(); } + std::unique_ptr CreateSubscriber() { return std::make_unique( UniqueID::FromRandom(), @@ -300,6 +302,5 @@ TEST_F(IntegrationTest, SubscribersToOneIDAndAllIDs) { absl::SleepFor(absl::Seconds(1)); } } - } // namespace pubsub } // namespace ray diff --git a/src/ray/pubsub/test/publisher_test.cc b/src/ray/pubsub/test/publisher_test.cc index ccc41c10f7d3..604d0d352c45 100644 --- a/src/ray/pubsub/test/publisher_test.cc +++ b/src/ray/pubsub/test/publisher_test.cc @@ -23,6 +23,9 @@ namespace ray { namespace pubsub { +namespace { +const NodeID kDefaultPublisherId = NodeID::FromRandom(); +} using namespace pub_internal; @@ -44,19 +47,28 @@ class PublisherTest : public ::testing::Test { /*periodic_runner=*/periodic_runner_.get(), /*get_time_ms=*/[this]() { return current_time_; }, /*subscriber_timeout_ms=*/subscriber_timeout_ms_, - /*batch_size*/ 100); + /*batch_size*/ 100, + kDefaultPublisherId); current_time_ = 0; request_.set_subscriber_id(subscriber_id_.Binary()); + request_.set_publisher_id(kDefaultPublisherId.Binary()); } void TearDown() {} - const rpc::PubMessage GeneratePubMessage(const ObjectID &object_id) { + void ResetSequenceId() { sequence_id_ = 0; } + + int64_t GetNextSequenceId() { return ++sequence_id_; } + + const rpc::PubMessage GeneratePubMessage(const ObjectID &object_id, + int64_t sequence_id = 0) { rpc::PubMessage pub_message; auto *object_eviction_msg = pub_message.mutable_worker_object_eviction_message(); object_eviction_msg->set_object_id(object_id.Binary()); pub_message.set_key_id(object_id.Binary()); pub_message.set_channel_type(rpc::ChannelType::WORKER_OBJECT_EVICTION); + RAY_LOG(INFO) << "message sequence_id is" << sequence_id; + pub_message.set_sequence_id(sequence_id); return pub_message; } @@ -81,7 +93,8 @@ class PublisherTest : public ::testing::Test { NodeID::FromRandom(), /*get_time_ms=*/[]() { return 1.0; }, /*subscriber_timeout_ms=*/1000, - /*publish_batch_size=*/1000)); + /*publish_batch_size=*/1000, + kDefaultPublisherId)); return subscribers_.back().get(); } @@ -107,6 +120,7 @@ class PublisherTest : public ::testing::Test { const SubscriberID subscriber_id_ = SubscriberID::FromRandom(); rpc::PubsubLongPollingRequest request_; std::vector> subscribers_; + int64_t sequence_id_ = 0; }; TEST_F(PublisherTest, TestSubscriptionIndexSingeNodeSingleObject) { @@ -327,7 +341,11 @@ TEST_F(PublisherTest, TestSubscriber) { }; auto subscriber = std::make_shared( - subscriber_id_, [this]() { return current_time_; }, subscriber_timeout_ms_, 10); + subscriber_id_, + [this]() { return current_time_; }, + subscriber_timeout_ms_, + 10, + kDefaultPublisherId); // If there's no connection, it will return false. ASSERT_FALSE(subscriber->PublishIfPossible()); // Try connecting. @@ -342,8 +360,9 @@ TEST_F(PublisherTest, TestSubscriber) { absl::flat_hash_set published_objects; // Make sure publishing one object works as expected. auto oid = ObjectID::FromRandom(); - subscriber->QueueMessage(std::make_shared(GeneratePubMessage(oid)), - /*try_publish=*/false); + subscriber->QueueMessage( + std::make_shared(GeneratePubMessage(oid, GetNextSequenceId())), + /*try_publish=*/false); published_objects.emplace(oid); ASSERT_TRUE(subscriber->PublishIfPossible()); ASSERT_TRUE(object_ids_published.contains(oid)); @@ -353,8 +372,9 @@ TEST_F(PublisherTest, TestSubscriber) { // Add 3 oids and see if it works properly. for (int i = 0; i < 3; i++) { oid = ObjectID::FromRandom(); - subscriber->QueueMessage(std::make_shared(GeneratePubMessage(oid)), - /*try_publish=*/false); + subscriber->QueueMessage( + std::make_shared(GeneratePubMessage(oid, GetNextSequenceId())), + /*try_publish=*/false); published_objects.emplace(oid); } // Since there's no connection, objects won't be published. @@ -363,29 +383,51 @@ TEST_F(PublisherTest, TestSubscriber) { for (auto oid : published_objects) { ASSERT_TRUE(object_ids_published.contains(oid)); } + + // Queue is not cleaned up if max_processed_sequence_id hasn't + // been set properly. + request_.set_max_processed_sequence_id(1); + subscriber->ConnectToSubscriber(request_, &reply, send_reply_callback); + ASSERT_FALSE(subscriber->CheckNoLeaks()); + + // If we set wrong publisher_id, the queue won't be cleaned up. + request_.set_publisher_id(NodeID::FromRandom().Binary()); + request_.set_max_processed_sequence_id(sequence_id_); + subscriber->ConnectToSubscriber(request_, &reply, send_reply_callback); + ASSERT_FALSE(subscriber->CheckNoLeaks()); + + // By sending back max_processed_sequence_id, the subscriber's sending queue + // is cleaned up. + request_.set_max_processed_sequence_id(sequence_id_); + request_.set_publisher_id(kDefaultPublisherId.Binary()); + subscriber->ConnectToSubscriber(request_, &reply, send_reply_callback); ASSERT_TRUE(subscriber->CheckNoLeaks()); } TEST_F(PublisherTest, TestSubscriberBatchSize) { absl::flat_hash_set object_ids_published; - send_reply_callback = [this, &object_ids_published](Status status, - std::function success, - std::function failure) { - for (int i = 0; i < reply.pub_messages_size(); i++) { - const auto &msg = reply.pub_messages(i); - const auto oid = - ObjectID::FromBinary(msg.worker_object_eviction_message().object_id()); - object_ids_published.emplace(oid); - } - reply = rpc::PubsubLongPollingReply(); - }; + int64_t max_processed_seuquence_id = 0; + send_reply_callback = + [this, &object_ids_published, &max_processed_seuquence_id]( + Status status, std::function success, std::function failure) { + for (int i = 0; i < reply.pub_messages_size(); i++) { + const auto &msg = reply.pub_messages(i); + const auto oid = + ObjectID::FromBinary(msg.worker_object_eviction_message().object_id()); + object_ids_published.emplace(oid); + max_processed_seuquence_id = + std::max(msg.sequence_id(), max_processed_seuquence_id); + } + reply = rpc::PubsubLongPollingReply(); + }; auto max_publish_size = 5; auto subscriber = std::make_shared( subscriber_id_, [this]() { return current_time_; }, subscriber_timeout_ms_, - max_publish_size); + max_publish_size, + kDefaultPublisherId); subscriber->ConnectToSubscriber(request_, &reply, send_reply_callback); absl::flat_hash_set published_objects; @@ -393,8 +435,9 @@ TEST_F(PublisherTest, TestSubscriberBatchSize) { for (int i = 0; i < 10; i++) { auto oid = ObjectID::FromRandom(); oids.push_back(oid); - subscriber->QueueMessage(std::make_shared(GeneratePubMessage(oid)), - /*try_publish=*/false); + subscriber->QueueMessage( + std::make_shared(GeneratePubMessage(oid, GetNextSequenceId())), + /*try_publish=*/false); published_objects.emplace(oid); } @@ -409,6 +452,8 @@ TEST_F(PublisherTest, TestSubscriberBatchSize) { } // Remaining messages are published upon polling. + ASSERT_EQ(max_processed_seuquence_id, max_publish_size); + request_.set_max_processed_sequence_id(max_processed_seuquence_id); subscriber->ConnectToSubscriber(request_, &reply, send_reply_callback); for (int i = 0; i < 10; i++) { ASSERT_TRUE(object_ids_published.contains(oids[i])); @@ -426,7 +471,11 @@ TEST_F(PublisherTest, TestSubscriberActiveTimeout) { std::function failure) { reply_cnt++; }; auto subscriber = std::make_shared( - subscriber_id_, [this]() { return current_time_; }, subscriber_timeout_ms_, 10); + subscriber_id_, + [this]() { return current_time_; }, + subscriber_timeout_ms_, + 10, + kDefaultPublisherId); subscriber->ConnectToSubscriber(request_, &reply, send_reply_callback); @@ -449,6 +498,7 @@ TEST_F(PublisherTest, TestSubscriberActiveTimeout) { ASSERT_EQ(reply_cnt, 1); // New connection is established. + reply = rpc::PubsubLongPollingReply(); subscriber->ConnectToSubscriber(request_, &reply, send_reply_callback); ASSERT_TRUE(subscriber->IsActive()); ASSERT_TRUE(subscriber->ConnectionExists()); @@ -460,7 +510,8 @@ TEST_F(PublisherTest, TestSubscriberActiveTimeout) { // A message is published, so the connection is refreshed. auto oid = ObjectID::FromRandom(); - subscriber->QueueMessage(std::make_shared(GeneratePubMessage(oid))); + subscriber->QueueMessage( + std::make_shared(GeneratePubMessage(oid, GetNextSequenceId()))); ASSERT_TRUE(subscriber->IsActive()); ASSERT_FALSE(subscriber->ConnectionExists()); ASSERT_EQ(reply_cnt, 2); @@ -471,6 +522,13 @@ TEST_F(PublisherTest, TestSubscriberActiveTimeout) { ASSERT_TRUE(subscriber->IsActive()); ASSERT_FALSE(subscriber->ConnectionExists()); + // There is one message to be GCed. + ASSERT_FALSE(subscriber->CheckNoLeaks()); + + // Notify that message 1 is safe to be GCed. + request_.set_max_processed_sequence_id(1); + reply = rpc::PubsubLongPollingReply(); + subscriber->ConnectToSubscriber(request_, &reply, send_reply_callback); ASSERT_TRUE(subscriber->CheckNoLeaks()); } @@ -485,7 +543,11 @@ TEST_F(PublisherTest, TestSubscriberDisconnected) { std::function failure) { reply_cnt++; }; auto subscriber = std::make_shared( - subscriber_id_, [this]() { return current_time_; }, subscriber_timeout_ms_, 10); + subscriber_id_, + [this]() { return current_time_; }, + subscriber_timeout_ms_, + 10, + kDefaultPublisherId); // Suppose the new connection is removed. subscriber->ConnectToSubscriber(request_, &reply, send_reply_callback); @@ -543,7 +605,11 @@ TEST_F(PublisherTest, TestSubscriberTimeoutComplicated) { std::function failure) { reply_cnt++; }; auto subscriber = std::make_shared( - subscriber_id_, [this]() { return current_time_; }, subscriber_timeout_ms_, 10); + subscriber_id_, + [this]() { return current_time_; }, + subscriber_timeout_ms_, + 10, + kDefaultPublisherId); // Suppose the new connection is removed. subscriber->ConnectToSubscriber(request_, &reply, send_reply_callback); @@ -596,7 +662,7 @@ TEST_F(PublisherTest, TestBasicSingleSubscriber) { publisher_->ConnectToSubscriber(request_, &reply, send_reply_callback); publisher_->RegisterSubscription( rpc::ChannelType::WORKER_OBJECT_EVICTION, subscriber_id_, oid.Binary()); - publisher_->Publish(GeneratePubMessage(oid)); + publisher_->Publish(GeneratePubMessage(oid, 0)); ASSERT_EQ(batched_ids[0], oid); } @@ -742,14 +808,17 @@ TEST_F(PublisherTest, TestMultiSubscribers) { TEST_F(PublisherTest, TestBatch) { // Test if published objects are batched properly. std::vector batched_ids; - send_reply_callback = [this, &batched_ids](Status status, - std::function success, - std::function failure) { + int64_t max_processed_sequence_id = 0; + send_reply_callback = [this, &batched_ids, &max_processed_sequence_id]( + Status status, + std::function success, + std::function failure) { for (int i = 0; i < reply.pub_messages_size(); i++) { const auto &msg = reply.pub_messages(i); const auto oid = ObjectID::FromBinary(msg.worker_object_eviction_message().object_id()); batched_ids.push_back(oid); + max_processed_sequence_id = std::max(max_processed_sequence_id, msg.sequence_id()); } reply = rpc::PubsubLongPollingReply(); }; @@ -766,6 +835,7 @@ TEST_F(PublisherTest, TestBatch) { ASSERT_EQ(batched_ids.size(), 0); // Now connection is initiated, and all oids are published. + request_.set_max_processed_sequence_id(max_processed_sequence_id); publisher_->ConnectToSubscriber(request_, &reply, send_reply_callback); for (int i = 0; i < num_oids; i++) { const auto oid_test = oids[i]; @@ -783,7 +853,10 @@ TEST_F(PublisherTest, TestBatch) { rpc::ChannelType::WORKER_OBJECT_EVICTION, subscriber_id_, oid.Binary()); publisher_->Publish(GeneratePubMessage(oid)); } + request_.set_max_processed_sequence_id(max_processed_sequence_id); publisher_->ConnectToSubscriber(request_, &reply, send_reply_callback); + ASSERT_EQ(num_oids, oids.size()); + ASSERT_EQ(num_oids, batched_ids.size()); for (int i = 0; i < num_oids; i++) { const auto oid_test = oids[i]; const auto published_oid = batched_ids[i]; @@ -1032,18 +1105,21 @@ TEST_F(PublisherTest, TestMaxBufferSizePerEntity) { rpc::PubMessage pub_message; pub_message.set_key_id(job_id.Binary()); pub_message.set_channel_type(rpc::ChannelType::RAY_ERROR_INFO_CHANNEL); + pub_message.set_sequence_id(GetNextSequenceId()); pub_message.mutable_error_info_message()->set_error_message(std::string(4000, 'a')); // Buffer is available. - EXPECT_TRUE(subscription_index.Publish(pub_message)); + EXPECT_TRUE(subscription_index.Publish(std::make_shared(pub_message))); // Buffer is still available. pub_message.mutable_error_info_message()->set_error_message(std::string(4000, 'b')); - EXPECT_TRUE(subscription_index.Publish(pub_message)); + pub_message.set_sequence_id(GetNextSequenceId()); + EXPECT_TRUE(subscription_index.Publish(std::make_shared(pub_message))); // Buffer is full. pub_message.mutable_error_info_message()->set_error_message(std::string(4000, 'c')); - EXPECT_TRUE(subscription_index.Publish(pub_message)); + pub_message.set_sequence_id(GetNextSequenceId()); + EXPECT_TRUE(subscription_index.Publish(std::make_shared(pub_message))); // Subscriber receives the last two messages. 1st message is dropped. auto reply = FlushSubscriber(subscriber); @@ -1055,7 +1131,8 @@ TEST_F(PublisherTest, TestMaxBufferSizePerEntity) { // A message larger than the buffer limit can still be published. pub_message.mutable_error_info_message()->set_error_message(std::string(14000, 'd')); - EXPECT_TRUE(subscription_index.Publish(pub_message)); + pub_message.set_sequence_id(GetNextSequenceId()); + EXPECT_TRUE(subscription_index.Publish(std::make_shared(pub_message))); reply = FlushSubscriber(subscriber); ASSERT_EQ(reply.pub_messages().size(), 1); EXPECT_EQ(reply.pub_messages(0).error_info_message().error_message(), @@ -1074,19 +1151,22 @@ TEST_F(PublisherTest, TestMaxBufferSizeAllEntities) { pub_message.set_key_id("aaa"); pub_message.set_channel_type(rpc::ChannelType::RAY_ERROR_INFO_CHANNEL); pub_message.mutable_error_info_message()->set_error_message(std::string(4000, 'a')); + pub_message.set_sequence_id(GetNextSequenceId()); // Buffer is available. - EXPECT_TRUE(subscription_index.Publish(pub_message)); + EXPECT_TRUE(subscription_index.Publish(std::make_shared(pub_message))); // Buffer is still available. pub_message.set_key_id("bbb"); pub_message.mutable_error_info_message()->set_error_message(std::string(4000, 'b')); - EXPECT_TRUE(subscription_index.Publish(pub_message)); + pub_message.set_sequence_id(GetNextSequenceId()); + EXPECT_TRUE(subscription_index.Publish(std::make_shared(pub_message))); // Buffer is full. pub_message.set_key_id("ccc"); pub_message.mutable_error_info_message()->set_error_message(std::string(4000, 'c')); - EXPECT_TRUE(subscription_index.Publish(pub_message)); + pub_message.set_sequence_id(GetNextSequenceId()); + EXPECT_TRUE(subscription_index.Publish(std::make_shared(pub_message))); auto reply = FlushSubscriber(subscriber); ASSERT_EQ(reply.pub_messages().size(), 2); diff --git a/src/ray/pubsub/test/subscriber_test.cc b/src/ray/pubsub/test/subscriber_test.cc index 0dcddf18f55a..2ed946e06eca 100644 --- a/src/ray/pubsub/test/subscriber_test.cc +++ b/src/ray/pubsub/test/subscriber_test.cc @@ -27,6 +27,8 @@ class MockWorkerClient : public pubsub::SubscriberClientInterface { void PubsubLongPolling( const rpc::PubsubLongPollingRequest &request, const rpc::ClientCallback &callback) override { + max_processed_sequence_id_ = request.max_processed_sequence_id(); + publisher_id_ = request.publisher_id(); long_polling_callbacks.push_back(callback); } @@ -52,20 +54,34 @@ class MockWorkerClient : public pubsub::SubscriberClientInterface { return r; } + void ResetSequenceId(int64_t start_sequence_id) { + sequence_id_ = start_sequence_id - 1; + } + + int64_t GetNextSequenceId() { return ++sequence_id_; } + int64_t GetReportedMaxProcessedSequenceId() { return max_processed_sequence_id_; } + bool ReplyLongPolling(rpc::ChannelType channel_type, std::vector &object_ids, - Status status = Status::OK()) { + std::vector sequence_ids, + Status status = Status::OK(), + std::string publisher_id = "") { if (long_polling_callbacks.empty()) { return false; } auto callback = long_polling_callbacks.front(); auto reply = rpc::PubsubLongPollingReply(); - for (const auto &object_id : object_ids) { + for (size_t i = 0; i < object_ids.size(); i++) { + const auto &object_id = object_ids.at(i); auto *new_pub_message = reply.add_pub_messages(); new_pub_message->set_key_id(object_id.Binary()); new_pub_message->set_channel_type(channel_type); + int64_t sequence_id = + sequence_ids.empty() ? GetNextSequenceId() : sequence_ids.at(i); + new_pub_message->set_sequence_id(sequence_id); } + reply.set_publisher_id(publisher_id.empty() ? publisher_id_ : publisher_id); callback(status, reply); long_polling_callbacks.pop_front(); return true; @@ -79,12 +95,14 @@ class MockWorkerClient : public pubsub::SubscriberClientInterface { auto callback = long_polling_callbacks.front(); auto reply = rpc::PubsubLongPollingReply(); + reply.set_publisher_id(publisher_id_); for (const auto &object_id : object_ids) { auto new_pub_message = reply.add_pub_messages(); new_pub_message->set_key_id(object_id.Binary()); new_pub_message->set_channel_type(channel_type); new_pub_message->mutable_failure_message(); + new_pub_message->set_sequence_id(GetNextSequenceId()); } callback(Status::OK(), reply); long_polling_callbacks.pop_front(); @@ -98,6 +116,9 @@ class MockWorkerClient : public pubsub::SubscriberClientInterface { std::deque> long_polling_callbacks; std::deque> command_batch_callbacks; std::queue requests_; + int64_t sequence_id_ = 0; + int64_t max_processed_sequence_id_ = 0; + std::string publisher_id_ = pubsub::PublisherID::FromRandom().Binary(); }; namespace pubsub { @@ -149,9 +170,12 @@ class SubscriberTest : public ::testing::Test { } bool ReplyLongPolling(rpc::ChannelType channel_type, - std::vector &object_ids, - Status status = Status::OK()) { - auto success = owner_client->ReplyLongPolling(channel_type, object_ids, status); + std::vector object_ids, + std::vector sequence_ids = {}, + Status status = Status::OK(), + std::string publiser_id = "") { + auto success = owner_client->ReplyLongPolling( + channel_type, object_ids, sequence_ids, status, publiser_id); // Need to call this to invoke callback when the reply comes. // The io service basically executes the queued handler in a blocking manner, and // reset should be called in order to run the poll_one again. @@ -160,6 +184,10 @@ class SubscriberTest : public ::testing::Test { return success; } + void ResetSequenceId(int64_t start_sequence_id = 1) { + owner_client->ResetSequenceId(start_sequence_id); + } + bool FailureMessagePublished(rpc::ChannelType channel_type, std::vector &object_ids) { auto published = owner_client->FailureMessagePublished(channel_type, object_ids); @@ -177,14 +205,14 @@ class SubscriberTest : public ::testing::Test { std::function(const rpc::Address &)> client_pool; std::shared_ptr subscriber_; - std::unordered_set object_subscribed_; + std::unordered_map object_subscribed_; std::unordered_set object_failed_to_subscribe_; rpc::ChannelType channel; }; TEST_F(SubscriberTest, TestBasicSubscription) { auto subscription_callback = [this](const rpc::PubMessage &msg) { - object_subscribed_.emplace(ObjectID::FromBinary(msg.key_id())); + object_subscribed_[ObjectID::FromBinary(msg.key_id())]++; }; auto failure_callback = EMPTY_FAILURE_CALLBACK; @@ -205,18 +233,111 @@ TEST_F(SubscriberTest, TestBasicSubscription) { std::vector objects_batched; objects_batched.push_back(object_id); ASSERT_TRUE(ReplyLongPolling(channel, objects_batched)); + // Make sure the long polling batch works as expected. + for (const auto &object_id : objects_batched) { + ASSERT_TRUE(object_subscribed_[object_id] == 1); + } + + // Publish the objects again, and subscriber should receive it. + ASSERT_TRUE(ReplyLongPolling(channel, objects_batched)); + for (const auto &object_id : objects_batched) { + ASSERT_TRUE(object_subscribed_[object_id] == 2); + } + ASSERT_TRUE(subscriber_->Unsubscribe(channel, owner_addr, object_id.Binary())); ASSERT_TRUE(owner_client->ReplyCommandBatch()); ASSERT_FALSE(subscriber_->IsSubscribed(channel, owner_addr, object_id.Binary())); + // Here, once the long polling request is replied, the metadata is cleaned up. + ASSERT_TRUE(ReplyLongPolling(channel, objects_batched)); + ASSERT_TRUE(subscriber_->CheckNoLeaks()); +} + +TEST_F(SubscriberTest, TestIgnoreOutofOrderMessage) { + auto subscription_callback = [this](const rpc::PubMessage &msg) { + object_subscribed_[ObjectID::FromBinary(msg.key_id())]++; + }; + auto failure_callback = EMPTY_FAILURE_CALLBACK; + + const auto owner_addr = GenerateOwnerAddress(); + const auto object_id = ObjectID::FromRandom(); + const auto object_id1 = ObjectID::FromRandom(); + subscriber_->SubscribeChannel(std::make_unique(), + channel, + owner_addr, + /*subscribe_done_callback=*/nullptr, + subscription_callback, + failure_callback); + ASSERT_TRUE(owner_client->ReplyCommandBatch()); + + std::vector objects_batched; + objects_batched.push_back(object_id); + objects_batched.push_back(object_id1); // Make sure the long polling batch works as expected. + ASSERT_TRUE(ReplyLongPolling(channel, objects_batched)); + ASSERT_EQ(2, owner_client->GetReportedMaxProcessedSequenceId()); + for (const auto &object_id : objects_batched) { - ASSERT_TRUE(object_subscribed_.count(object_id) > 0); + ASSERT_TRUE(object_subscribed_[object_id] == 1); } - // Here, once the long polling request is replied, the metadata is cleaned up. + // By resetting the sequence_id, the message now come out of order, + // and the subscriber should ignore out of order message. + ASSERT_TRUE(ReplyLongPolling(channel, objects_batched, {1, 2})); + ASSERT_EQ(2, owner_client->GetReportedMaxProcessedSequenceId()); + + // Make sure the long polling batch works as expected. + for (const auto &object_id : objects_batched) { + ASSERT_TRUE(object_subscribed_[object_id] == 1); + } + + // message arrives out of order (sequence_id 4 comes before 3), + // we will ignore message with sequence id 3. + ASSERT_TRUE(ReplyLongPolling(channel, objects_batched, {4, 3})); + ASSERT_TRUE(object_subscribed_[object_id] == 2); + ASSERT_TRUE(object_subscribed_[object_id1] == 1); + ASSERT_EQ(4, owner_client->GetReportedMaxProcessedSequenceId()); +} + +TEST_F(SubscriberTest, TestPublisherFailsOver) { + auto subscription_callback = [this](const rpc::PubMessage &msg) { + object_subscribed_[ObjectID::FromBinary(msg.key_id())]++; + }; + auto failure_callback = EMPTY_FAILURE_CALLBACK; + + const auto owner_addr = GenerateOwnerAddress(); + const auto object_id = ObjectID::FromRandom(); + const auto object_id1 = ObjectID::FromRandom(); + subscriber_->SubscribeChannel(std::make_unique(), + channel, + owner_addr, + /*subscribe_done_callback=*/nullptr, + subscription_callback, + failure_callback); + ASSERT_TRUE(owner_client->ReplyCommandBatch()); + + std::vector objects_batched; + objects_batched.push_back(object_id); + objects_batched.push_back(object_id1); + // Make sure the long polling batch works as expected. ASSERT_TRUE(ReplyLongPolling(channel, objects_batched)); - ASSERT_TRUE(subscriber_->CheckNoLeaks()); + ASSERT_EQ(2, owner_client->GetReportedMaxProcessedSequenceId()); + + for (const auto &object_id : objects_batched) { + ASSERT_TRUE(object_subscribed_[object_id] == 1); + } + + // By resetting the sequence_id, the message now come out of order, + // and the subscriber should ignore out of order message. + ASSERT_TRUE(ReplyLongPolling(channel, objects_batched, {1, 2})); + ASSERT_EQ(2, owner_client->GetReportedMaxProcessedSequenceId()); + + auto new_publisher_id = NodeID::FromRandom().Binary(); + // if the publisher_id changes, we should reset both publisher_id and sequence_id. + ASSERT_TRUE(ReplyLongPolling( + channel, std::vector({object_id}), {1}, Status::OK(), new_publisher_id)); + ASSERT_EQ(1, owner_client->GetReportedMaxProcessedSequenceId()); + ASSERT_EQ(new_publisher_id, owner_client->publisher_id_); } TEST_F(SubscriberTest, TestSingleLongPollingWithMultipleSubscriptions) { @@ -225,7 +346,7 @@ TEST_F(SubscriberTest, TestSingleLongPollingWithMultipleSubscriptions) { /// auto subscription_callback = [this](const rpc::PubMessage &msg) { - object_subscribed_.emplace(ObjectID::FromBinary(msg.key_id())); + object_subscribed_[ObjectID::FromBinary(msg.key_id())]++; }; auto failure_callback = EMPTY_FAILURE_CALLBACK; @@ -253,8 +374,8 @@ TEST_F(SubscriberTest, TestSingleLongPollingWithMultipleSubscriptions) { // Make sure the long polling batch works as expected. for (const auto &object_id : objects_batched) { - // RAY_LOG(ERROR) << "haha " << object_subscribed_.count(object_id); - ASSERT_TRUE(object_subscribed_.count(object_id) > 0); + // RAY_LOG(ERROR) << "haha " << object_subscribed_[object_id]; + ASSERT_TRUE(object_subscribed_[object_id] > 0); } } @@ -264,7 +385,7 @@ TEST_F(SubscriberTest, TestMultiLongPollingWithTheSameSubscription) { /// auto subscription_callback = [this](const rpc::PubMessage &msg) { - object_subscribed_.emplace(ObjectID::FromBinary(msg.key_id())); + object_subscribed_[ObjectID::FromBinary(msg.key_id())]++; }; auto failure_callback = EMPTY_FAILURE_CALLBACK; @@ -285,7 +406,7 @@ TEST_F(SubscriberTest, TestMultiLongPollingWithTheSameSubscription) { std::vector objects_batched; objects_batched.push_back(object_id); ASSERT_TRUE(ReplyLongPolling(channel, objects_batched)); - ASSERT_TRUE(object_subscribed_.count(object_id) > 0); + ASSERT_TRUE(object_subscribed_[object_id] > 0); objects_batched.clear(); object_subscribed_.clear(); @@ -293,7 +414,7 @@ TEST_F(SubscriberTest, TestMultiLongPollingWithTheSameSubscription) { ASSERT_EQ(owner_client->GetNumberOfInFlightLongPollingRequests(), 1); objects_batched.push_back(object_id); ASSERT_TRUE(ReplyLongPolling(channel, objects_batched)); - ASSERT_TRUE(object_subscribed_.count(object_id) > 0); + ASSERT_TRUE(object_subscribed_[object_id] > 0); } TEST_F(SubscriberTest, TestCallbackNotInvokedForNonSubscribedObject) { @@ -302,7 +423,7 @@ TEST_F(SubscriberTest, TestCallbackNotInvokedForNonSubscribedObject) { /// auto subscription_callback = [this](const rpc::PubMessage &msg) { - object_subscribed_.emplace(ObjectID::FromBinary(msg.key_id())); + object_subscribed_[ObjectID::FromBinary(msg.key_id())]++; }; auto failure_callback = EMPTY_FAILURE_CALLBACK; @@ -322,7 +443,7 @@ TEST_F(SubscriberTest, TestCallbackNotInvokedForNonSubscribedObject) { std::vector objects_batched; objects_batched.push_back(object_id_not_subscribed); ASSERT_TRUE(ReplyLongPolling(channel, objects_batched)); - ASSERT_EQ(object_subscribed_.count(object_id), 0); + ASSERT_EQ(object_subscribed_[object_id], 0); } TEST_F(SubscriberTest, TestSubscribeChannelEntities) { @@ -331,7 +452,7 @@ TEST_F(SubscriberTest, TestSubscribeChannelEntities) { /// auto subscription_callback = [this](const rpc::PubMessage &msg) { - object_subscribed_.emplace(ObjectID::FromBinary(msg.key_id())); + object_subscribed_[ObjectID::FromBinary(msg.key_id())]++; }; auto failure_callback = EMPTY_FAILURE_CALLBACK; @@ -352,7 +473,7 @@ TEST_F(SubscriberTest, TestSubscribeChannelEntities) { } ASSERT_TRUE(ReplyLongPolling(channel, objects_batched)); for (int i = 0; i < 5; ++i) { - ASSERT_EQ(object_subscribed_.count(objects_batched[i]), 1); + ASSERT_EQ(object_subscribed_[objects_batched[i]], 1); } objects_batched.clear(); object_subscribed_.clear(); @@ -366,7 +487,7 @@ TEST_F(SubscriberTest, TestSubscribeChannelEntities) { } ASSERT_TRUE(ReplyLongPolling(channel, objects_batched)); for (int i = 0; i < 10; ++i) { - ASSERT_EQ(object_subscribed_.count(objects_batched[i]), 1); + ASSERT_EQ(object_subscribed_[objects_batched[i]], 1); } // Unsubscribe from the channel. @@ -379,7 +500,7 @@ TEST_F(SubscriberTest, TestIgnoreBatchAfterUnsubscription) { /// auto subscription_callback = [this](const rpc::PubMessage &msg) { - object_subscribed_.emplace(ObjectID::FromBinary(msg.key_id())); + object_subscribed_[ObjectID::FromBinary(msg.key_id())]++; }; auto failure_callback = EMPTY_FAILURE_CALLBACK; @@ -400,7 +521,7 @@ TEST_F(SubscriberTest, TestIgnoreBatchAfterUnsubscription) { ASSERT_TRUE(ReplyLongPolling(channel, objects_batched)); // Make sure the batched object won't invoke the callback since it is already // unsubscribed before long polling is replied. - ASSERT_EQ(object_subscribed_.count(object_id), 0); + ASSERT_EQ(object_subscribed_[object_id], 0); // Make sure the long polling is not invoked since there's no more subscribed object to // this owner. ASSERT_EQ(owner_client->GetNumberOfInFlightLongPollingRequests(), 0); @@ -413,7 +534,7 @@ TEST_F(SubscriberTest, TestIgnoreBatchAfterUnsubscribeFromAll) { /// auto subscription_callback = [this](const rpc::PubMessage &msg) { - object_subscribed_.emplace(ObjectID::FromBinary(msg.key_id())); + object_subscribed_[ObjectID::FromBinary(msg.key_id())]++; }; auto failure_callback = EMPTY_FAILURE_CALLBACK; @@ -434,7 +555,7 @@ TEST_F(SubscriberTest, TestIgnoreBatchAfterUnsubscribeFromAll) { ASSERT_TRUE(ReplyLongPolling(channel, objects_batched)); // Make sure the returned object won't invoke the callback since the channel is already // unsubscribed before long polling is replied. - ASSERT_EQ(object_subscribed_.count(object_id), 0); + ASSERT_EQ(object_subscribed_[object_id], 0); // After the previous reply, no new long polling is invoked since the channel has been // unsubscribed. ASSERT_EQ(owner_client->GetNumberOfInFlightLongPollingRequests(), 0); @@ -443,7 +564,7 @@ TEST_F(SubscriberTest, TestIgnoreBatchAfterUnsubscribeFromAll) { TEST_F(SubscriberTest, TestLongPollingFailure) { auto subscription_callback = [this](const rpc::PubMessage &msg) { - object_subscribed_.emplace(ObjectID::FromBinary(msg.key_id())); + object_subscribed_[ObjectID::FromBinary(msg.key_id())]++; }; const auto owner_addr = GenerateOwnerAddress(); @@ -462,9 +583,9 @@ TEST_F(SubscriberTest, TestLongPollingFailure) { // Long polling failed. std::vector objects_batched; - ASSERT_TRUE(ReplyLongPolling(channel, objects_batched, Status::NotFound(""))); + ASSERT_TRUE(ReplyLongPolling(channel, objects_batched, {}, Status::NotFound(""))); // Callback is not invoked. - ASSERT_EQ(object_subscribed_.count(object_id), 0); + ASSERT_EQ(object_subscribed_[object_id], 0); // Failure callback is invoked. ASSERT_EQ(object_failed_to_subscribe_.count(object_id), 1); // Since the long polling is failed due to the publisher failure, we shouldn't have any @@ -480,7 +601,7 @@ TEST_F(SubscriberTest, TestUnsubscribeInSubscriptionCallback) { const auto object_id = ObjectID::FromBinary(msg.key_id()); subscriber_->Unsubscribe(channel, owner_addr, object_id.Binary()); ASSERT_TRUE(owner_client->ReplyCommandBatch()); - object_subscribed_.emplace(object_id); + object_subscribed_[object_id]++; }; auto failure_callback = [](const std::string &key_id, const Status &) { // This shouldn't be invoked in this test. @@ -761,7 +882,7 @@ TEST_F(SubscriberTest, TestCommandsCleanedUponPublishFailure) { std::vector objects_batched; // The publisher failed. In this case, the queue should be cleaned up. - ASSERT_TRUE(ReplyLongPolling(channel, objects_batched, Status::Invalid(""))); + ASSERT_TRUE(ReplyLongPolling(channel, objects_batched, {}, Status::Invalid(""))); // The reply from the first batch. ASSERT_TRUE(owner_client->ReplyCommandBatch()); // We shouldn't have the second batch request because the publisher is already dead and @@ -778,7 +899,7 @@ TEST_F(SubscriberTest, TestFailureMessagePublished) { /// is properly called in this scenario. /// auto subscription_callback = [this](const rpc::PubMessage &msg) { - object_subscribed_.emplace(ObjectID::FromBinary(msg.key_id())); + object_subscribed_[ObjectID::FromBinary(msg.key_id())]++; }; const auto owner_addr = GenerateOwnerAddress(); @@ -809,7 +930,7 @@ TEST_F(SubscriberTest, TestFailureMessagePublished) { objects_batched.push_back(object_id); ASSERT_TRUE(FailureMessagePublished(channel, objects_batched)); // Callback is not invoked. - ASSERT_EQ(object_subscribed_.count(object_id), 0); + ASSERT_EQ(object_subscribed_[object_id], 0); // Failure callback is invoked. ASSERT_EQ(object_failed_to_subscribe_.count(object_id), 1); // Since object2 is still subscribed, we should have the long polling requests. @@ -819,14 +940,14 @@ TEST_F(SubscriberTest, TestFailureMessagePublished) { objects_batched.clear(); objects_batched.push_back(object_id2); ASSERT_TRUE(FailureMessagePublished(channel, objects_batched)); - ASSERT_EQ(object_subscribed_.count(object_id2), 0); + ASSERT_EQ(object_subscribed_[object_id2], 0); ASSERT_EQ(object_failed_to_subscribe_.count(object_id2), 1); ASSERT_EQ(owner_client->GetNumberOfInFlightLongPollingRequests(), 0); } TEST_F(SubscriberTest, TestIsSubscribed) { auto subscription_callback = [this](const rpc::PubMessage &msg) { - object_subscribed_.emplace(ObjectID::FromBinary(msg.key_id())); + object_subscribed_[ObjectID::FromBinary(msg.key_id())]++; }; auto failure_callback = EMPTY_FAILURE_CALLBACK; const auto owner_addr = GenerateOwnerAddress(); From 1b6a892509b03f8699eeba0c7ce4a1c077b0a5e8 Mon Sep 17 00:00:00 2001 From: Jun Gong Date: Wed, 19 Apr 2023 10:08:18 -0700 Subject: [PATCH 009/424] [AIR] Add util to create a torch ddp process group for a list of workers. (#34202) Signed-off-by: Jun Gong --- python/ray/air/BUILD | 7 + python/ray/air/tests/test_util_torch_dist.py | 71 +++++++ python/ray/air/util/torch_dist.py | 186 +++++++++++++++++++ 3 files changed, 264 insertions(+) create mode 100644 python/ray/air/tests/test_util_torch_dist.py create mode 100644 python/ray/air/util/torch_dist.py diff --git a/python/ray/air/BUILD b/python/ray/air/BUILD index cc2886d267de..b3c22b8caa6e 100644 --- a/python/ray/air/BUILD +++ b/python/ray/air/BUILD @@ -173,6 +173,13 @@ py_test( deps = [":ml_lib"] ) +py_test( + name = "test_util_torch_dist", + size = "small", + srcs = ["tests/test_util_torch_dist.py"], + tags = ["team:ml", "gpu", "exclusive"], + deps = [":ml_lib"] +) # -------------------------------------------------------------------- # Tests from the python/ray/air/tests/execution directory. diff --git a/python/ray/air/tests/test_util_torch_dist.py b/python/ray/air/tests/test_util_torch_dist.py new file mode 100644 index 000000000000..fea665fa6594 --- /dev/null +++ b/python/ray/air/tests/test_util_torch_dist.py @@ -0,0 +1,71 @@ +import numpy as np +import pytest +import torch +import torch.distributed as dist + +import ray +from ray.air.util.torch_dist import ( + init_torch_dist_process_group, + shutdown_torch_dist_process_group, + TorchDistributedWorker, +) + + +def test_torch_process_group_gloo(): + @ray.remote + class TestWorker(TorchDistributedWorker): + def run(self): + tensor = torch.tensor([1.0]) + dist.all_reduce(tensor) + return tensor.numpy() + + workers = [TestWorker.remote() for _ in range(5)] + + init_torch_dist_process_group(workers, backend="gloo", init_method="env") + + reduced = ray.get([w.run.remote() for w in workers]) + + # One tensor from each worker. + assert len(reduced) == 5 + for r in reduced: + assert len(r) == 1 + assert r.dtype == np.float32 + # All-reduce. Each tensor contributed 1.0. 5 tensors in total. + assert r[0] == 5.0 + + shutdown_torch_dist_process_group(workers) + + +def test_torch_process_group_nccl(): + @ray.remote(num_gpus=2) + class TestWorker(TorchDistributedWorker): + def __init__(self): + super().__init__() + self.dev = f"cuda:{ray.get_gpu_ids()[0]}" + + def run(self): + tensor = torch.tensor([1.0]).to(self.dev) + dist.all_reduce(tensor) + return tensor.cpu().numpy() + + workers = [TestWorker.remote() for _ in range(2)] + + init_torch_dist_process_group(workers, backend="nccl", init_method="env") + + reduced = ray.get([w.run.remote() for w in workers]) + + # One tensor from each worker (2 workers total). + assert len(reduced) == 2 + for r in reduced: + assert len(r) == 1 + assert r.dtype == np.float32 + # All-reduce. Each tensor contributed 1.0. 5 tensors in total. + assert r[0] == 2.0 + + shutdown_torch_dist_process_group(workers) + + +if __name__ == "__main__": + import sys + + sys.exit(pytest.main(["-v", "-x", __file__])) diff --git a/python/ray/air/util/torch_dist.py b/python/ray/air/util/torch_dist.py new file mode 100644 index 000000000000..83bb48e66142 --- /dev/null +++ b/python/ray/air/util/torch_dist.py @@ -0,0 +1,186 @@ +"""This file is modeled after ray/python/ray/train/torch/config.py + +The logics are duplicated right now to allow maximum flexibility for +setting up PyTorch DDP process groups outside the context of Ray Train. +Eventually, these use cases should be consolidated. +""" + +from abc import ABC +from collections import defaultdict +from datetime import timedelta +import os +import torch +import torch.distributed as dist +from typing import Callable, List, T + +import ray +from ray.actor import ActorHandle +from ray.train._internal.utils import get_address_and_port +from ray.train.constants import DEFAULT_NCCL_SOCKET_IFNAME +from ray.train.torch.train_loop_utils import get_device + + +class TorchDistributedWorker(ABC): + """Defines the interfaces required by the init_torch_dist_process_group(). + + This is modeled after RayTrainerWorker, which allows arbitrary functions + to be executed on a remote DDP worker. + """ + + def execute(self, func: Callable[..., T], *args, **kwargs) -> T: + """Executes the input function and returns the output. + + Args: + func: The function to execute. + args, kwargs: The arguments to pass into func. + """ + return func(*args, **kwargs) + + +def _init_torch_distributed( + init_method: str, + backend: str, + rank: int, + world_size: int, + local_rank: int, + local_world_size: int, + master_addr: str, + master_port: str, + gpu_ids: List[int], +): + """Initialize torch distributed backend""" + if init_method == "env": + os.environ["MASTER_ADDR"] = str(master_addr) + os.environ["MASTER_PORT"] = str(master_port) + url = "env://" + elif init_method == "tcp": + url = f"tcp://{master_addr}:{master_port}" + else: + raise ValueError( + f"The provided init_method (" + f"{init_method}) is not supported. Must " + f"be either 'env' or 'tcp'." + ) + + if backend == "nccl": + # Same as in Ray Train + os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1" + # All workers on a same node should share the same set of + # visible GPUs. Otherwise they can't talk among themselves. + os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(str(gid) for gid in gpu_ids) + if "NCCL_SOCKET_IFNAME" not in os.environ: + os.environ["NCCL_SOCKET_IFNAME"] = DEFAULT_NCCL_SOCKET_IFNAME + + dist.init_process_group( + backend=backend, + init_method=url, + rank=rank, + world_size=world_size, + timeout=timedelta(seconds=1800), + ) + + os.environ["RANK"] = str(rank) + os.environ["LOCAL_RANK"] = str(local_rank) + os.environ["WORLD_SIZE"] = str(world_size) + os.environ["LOCAL_WORLD_SIZE"] = str(local_world_size) + + +def _get_node_and_gpu_ids(): + """Returns the node_id and gpu_ids for this worker.""" + node_id = ray.get_runtime_context().get_node_id() + gpu_ids = ray.get_gpu_ids() + return node_id, gpu_ids + + +def init_torch_dist_process_group( + workers: List[ActorHandle], + backend: str = "gloo", + init_method: str = "env", +) -> List[int]: + """Initialize a torch distributed process group. + + Note: this util assumes that the order of the workers passed in + are their global ranks. + + Args: + workers: A list of TorchDistributedWorker actors. + backend: The torch distributed backend to use, + possible choices are "gloo" or "nccl". + init_method: The initialization method to use, + possible choices are "env" or "tcp". + + Returns: + Local ranks on their respective nodes for the list of workers. + """ + if not dist.is_available(): + raise RuntimeError("Distributed torch is not available.") + + # Build a map from node_id to workers on that node. + node_and_gpu_ids = ray.get( + [w.execute.remote(_get_node_and_gpu_ids) for w in workers] + ) + # All the workers on a specific node. + node_to_workers = defaultdict(list) + # All the gpu ids visible to all the workers on a specific node. + node_to_gpu_ids = defaultdict(set) + for i, (node_id, gpu_ids) in enumerate(node_and_gpu_ids): + node_to_workers[node_id].append(i) + # Force list. + if not isinstance(gpu_ids, list): + gpu_ids = [gpu_ids] + # It is possible for a worker to have access to multiple GPUs. + for gpu_id in gpu_ids: + node_to_gpu_ids[node_id].add(gpu_id) + + # Assume the first worker is the master. + master_addr, master_port = ray.get(workers[0].execute.remote(get_address_and_port)) + + setup_futures = [] + world_size = len(workers) + local_ranks = [] + for rank, worker in enumerate(workers): + node_id = node_and_gpu_ids[rank][0] + local_rank = node_to_workers[node_id].index(rank) + local_world_size = len(node_to_workers[node_id]) + setup_futures.append( + worker.execute.remote( + _init_torch_distributed, + init_method=init_method, + backend=backend, + rank=rank, + world_size=world_size, + local_rank=local_rank, + local_world_size=local_world_size, + master_addr=master_addr, + master_port=master_port, + # list(set) will sort the gpu ids, so VISIBLE_CUDA_DEVICES + # is always sorted. + gpu_ids=list(node_to_gpu_ids[node_id]), + ) + ) + local_ranks.append(local_rank) + + # Wait for all workers to join the process group. + ray.get(setup_futures) + + return local_ranks + + +def _shutdown_torch_distributed(): + """Shutdown torch distributed backend""" + dist.destroy_process_group() + + if not torch.cuda.is_available(): + return + + # Clean up cuda memory. + devices = get_device() + if not isinstance(devices, list): + devices = [devices] + for device in devices: + with torch.cuda.device(device): + torch.cuda.empty_cache() + + +def shutdown_torch_dist_process_group(workers: List[ActorHandle]): + ray.get([w.execute.remote(_shutdown_torch_distributed) for w in workers]) From 6b1090aa31f63d2b8d5aa99ce4f2605b7a3d4fbe Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Wed, 19 Apr 2023 10:42:57 -0700 Subject: [PATCH 010/424] [CI][Core] Set some GCE smoke tests to run on manual frequency (#34516) I noticed some GCE smoke versions are run on nightly. Let's move them to run on manual instead, since we don't want to spend the cost on run them on an automatic cadence yet. --------- Signed-off-by: Cuong Nguyen --- release/ray_release/config.py | 2 +- release/ray_release/tests/test_config.py | 2 +- release/release_tests.yaml | 7 ++++++- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/release/ray_release/config.py b/release/ray_release/config.py index 47266b02f970..8697f992d7d0 100644 --- a/release/ray_release/config.py +++ b/release/ray_release/config.py @@ -98,7 +98,7 @@ def parse_test_definition(test_definitions: List[TestDefinition]) -> List[Test]: ) test = copy.deepcopy(test_definition) test["name"] = f'{test["name"]}.{variation.pop("__suffix__")}' - test.update(variation) + test = deep_update(test, variation) tests.append(test) return tests diff --git a/release/ray_release/tests/test_config.py b/release/ray_release/tests/test_config.py index 80915758b572..a544c68f3c56 100644 --- a/release/ray_release/tests/test_config.py +++ b/release/ray_release/tests/test_config.py @@ -65,7 +65,6 @@ def test_parse_test_definition(): - __suffix__: aws - __suffix__: gce cluster: - cluster_env: env_gce.yaml cluster_compute: compute_gce.yaml """ ) @@ -79,6 +78,7 @@ def test_parse_test_definition(): assert not validate_test(gce_test, schema) assert aws_test["name"] == "sample_test.aws" assert gce_test["cluster"]["cluster_compute"] == "compute_gce.yaml" + assert gce_test["cluster"]["cluster_env"] == "env.yaml" invalid_test_definition = test_definitions[0] # Intentionally make the test definition invalid by create an empty 'variations' # field. Check that the parser throws exception at runtime diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 342f43a456bc..29f6843ce331 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -1670,7 +1670,6 @@ env: gce frequency: manual cluster: - cluster_env: app_config.yaml cluster_compute: tpl_gce_4x8.yaml alert: tune_tests @@ -4201,6 +4200,8 @@ cluster: cluster_env: stress_tests/state_api_app_config.yaml cluster_compute: stress_tests/stress_tests_compute_large_gce.yaml + smoke_test: + frequency: manual - name: shuffle_20gb_with_state_api @@ -4264,6 +4265,8 @@ cluster: cluster_env: stress_tests/stress_tests_app_config.yaml cluster_compute: stress_tests/stress_tests_compute_gce.yaml + smoke_test: + frequency: manual - name: stress_test_dead_actors group: core-daily-test @@ -4303,6 +4306,8 @@ cluster: cluster_env: stress_tests/stress_tests_app_config.yaml cluster_compute: stress_tests/stress_tests_compute_gce.yaml + smoke_test: + frequency: manual # The full test is not stable, so run the smoke test only. # See https://github.com/ray-project/ray/issues/23244. From ccb6a980680a1605e537eab4fb3b47fce47dedd3 Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Wed, 19 Apr 2023 11:12:34 -0700 Subject: [PATCH 011/424] [CI] Fix some chaos test configurations (#34571) Some GCE chaos test configurations are using aws configs. Change them to the equivalence GCE. Also use the more powerful n2 instead of e2 machine. Signed-off-by: Cuong Nguyen --- .../dask_on_ray_stress_compute_gce.yaml | 15 ++++++++------- .../dataset/pipelined_ingestion_compute_gce.yaml | 13 +++++++------ ...s_large_scale_compute_small_instances_gce.yaml | 4 ++-- 3 files changed, 17 insertions(+), 15 deletions(-) diff --git a/release/nightly_tests/dask_on_ray/dask_on_ray_stress_compute_gce.yaml b/release/nightly_tests/dask_on_ray/dask_on_ray_stress_compute_gce.yaml index 7c0c9098a4b7..2302c7030951 100644 --- a/release/nightly_tests/dask_on_ray/dask_on_ray_stress_compute_gce.yaml +++ b/release/nightly_tests/dask_on_ray/dask_on_ray_stress_compute_gce.yaml @@ -1,14 +1,15 @@ cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west-1 allowed_azs: -- us-west1-c + - us-west1-c -aws: - BlockDeviceMappings: - - DeviceName: /dev/sda1 - Ebs: - DeleteOnTermination: true - VolumeSize: 500 +gcp_advanced_configurations_json: + instance_properties: + disks: + - boot: true + auto_delete: true + initialize_params: + disk_size_gb: 500 head_node_type: name: head_node diff --git a/release/nightly_tests/dataset/pipelined_ingestion_compute_gce.yaml b/release/nightly_tests/dataset/pipelined_ingestion_compute_gce.yaml index dc4aea7d096a..4c9c2a497ccd 100644 --- a/release/nightly_tests/dataset/pipelined_ingestion_compute_gce.yaml +++ b/release/nightly_tests/dataset/pipelined_ingestion_compute_gce.yaml @@ -5,12 +5,13 @@ allowed_azs: max_workers: 999 -aws: - BlockDeviceMappings: - - DeviceName: /dev/sda1 - Ebs: - DeleteOnTermination: true - VolumeSize: 500 +gcp_advanced_configurations_json: + instance_properties: + disks: + - boot: true + auto_delete: true + initialize_params: + disk_size_gb: 500 head_node_type: name: head_node diff --git a/release/nightly_tests/shuffle/datasets_large_scale_compute_small_instances_gce.yaml b/release/nightly_tests/shuffle/datasets_large_scale_compute_small_instances_gce.yaml index 7408ca8f065a..f9c1742fcbda 100644 --- a/release/nightly_tests/shuffle/datasets_large_scale_compute_small_instances_gce.yaml +++ b/release/nightly_tests/shuffle/datasets_large_scale_compute_small_instances_gce.yaml @@ -13,11 +13,11 @@ gcp_advanced_configurations_json: head_node_type: name: head_node - instance_type: e2-standard-16 # aws m5.4xlarge + instance_type: n2-standard-16 # aws m5.4xlarge worker_node_types: - name: worker_node - instance_type: e2-standard-16 # aws m5.4xlarge + instance_type: n2-standard-16 # aws m5.4xlarge min_workers: 19 max_workers: 19 use_spot: false From 3764c7247aac592a97721914a42ca57ad1143d3a Mon Sep 17 00:00:00 2001 From: xwjiang2010 <87673679+xwjiang2010@users.noreply.github.com> Date: Wed, 19 Apr 2023 11:17:54 -0700 Subject: [PATCH 012/424] [release] Make sure that test code matches the installed wheel. (#30156) Signed-off-by: xwjiang2010 --- release/ray_release/scripts/build_pipeline.py | 4 ++++ release/ray_release/tests/test_wheels.py | 11 +++++++++++ release/ray_release/wheels.py | 9 +++++++++ release/run_release_test.sh | 19 ++++++++++++++++++- 4 files changed, 42 insertions(+), 1 deletion(-) diff --git a/release/ray_release/scripts/build_pipeline.py b/release/ray_release/scripts/build_pipeline.py index c184e7f3b58e..28a4fe8ce0d2 100644 --- a/release/ray_release/scripts/build_pipeline.py +++ b/release/ray_release/scripts/build_pipeline.py @@ -22,6 +22,7 @@ find_and_wait_for_ray_wheels_url, find_ray_wheels_url, get_buildkite_repo_branch, + parse_commit_from_wheel_url, ) PIPELINE_ARTIFACT_PATH = "/tmp/pipeline_artifacts" @@ -185,6 +186,9 @@ def main(test_collection_file: Optional[str] = None, no_clone_repo: bool = False else: this_ray_wheels_url = ray_wheels_url + ray_commit = parse_commit_from_wheel_url(this_ray_wheels_url) + if ray_commit: + env.update({"RAY_COMMIT_OF_WHEEL": ray_commit}) step = get_step( test, report=report, diff --git a/release/ray_release/tests/test_wheels.py b/release/ray_release/tests/test_wheels.py index 75dc068952a5..fb62f8ba2093 100644 --- a/release/ray_release/tests/test_wheels.py +++ b/release/ray_release/tests/test_wheels.py @@ -19,6 +19,7 @@ is_wheels_url_matching_ray_verison, get_wheels_filename, maybe_rewrite_wheels_url, + parse_commit_from_wheel_url, ) @@ -252,5 +253,15 @@ def test_url_exist(): assert not url_exists("invalid://somewhere") +def test_parse_commit_from_wheel_url(): + url = ( + "https://s3-us-west-2.amazonaws.com/ray-wheels/master/" + "0e0c15065507f01e8bfe78e49b0d0de063f81164/" + "ray-3.0.0.dev0-cp37-cp37m-manylinux2014_x86_64.whl" + ) + expected_commit = "0e0c15065507f01e8bfe78e49b0d0de063f81164" + assert parse_commit_from_wheel_url(url) == expected_commit + + if __name__ == "__main__": sys.exit(pytest.main(["-v", __file__])) diff --git a/release/ray_release/wheels.py b/release/ray_release/wheels.py index 0ce5344a7852..92aaa02a155a 100644 --- a/release/ray_release/wheels.py +++ b/release/ray_release/wheels.py @@ -437,3 +437,12 @@ def install_matching_ray_locally(ray_wheels: Optional[str]): for module_name in RELOAD_MODULES: if module_name in sys.modules: importlib.reload(sys.modules[module_name]) + + +def parse_commit_from_wheel_url(url: str) -> str: + # url is expected to be in the format of + # https://s3-us-west-2.amazonaws.com/ray-wheels/master/0e0c15065507f01e8bfe78e49b0d0de063f81164/ray-3.0.0.dev0-cp37-cp37m-manylinux2014_x86_64.whl # noqa + regex = r"/([0-9a-f]{40})/" + match = re.search(regex, url) + if match: + return match.group(1) diff --git a/release/run_release_test.sh b/release/run_release_test.sh index 367d2fb8ceb9..52b157a80c8f 100755 --- a/release/run_release_test.sh +++ b/release/run_release_test.sh @@ -69,8 +69,25 @@ fi if [ -z "${NO_CLONE}" ]; then TMPDIR=$(mktemp -d -t release-XXXXXXXXXX) echo "Cloning test repo ${RAY_TEST_REPO} branch ${RAY_TEST_BRANCH}" - git clone --depth 1 -b "${RAY_TEST_BRANCH}" "${RAY_TEST_REPO}" "${TMPDIR}" + git clone -b "${RAY_TEST_BRANCH}" "${RAY_TEST_REPO}" "${TMPDIR}" pushd "${TMPDIR}/release" || true + HEAD_COMMIT=$(git rev-parse HEAD) + echo "The cloned test repo has head commit of ${HEAD_COMMIT}" + + # We only do this if RAY_TEST_REPO and RAY_TEST_BRANCH are pointing to ray master. + # Theoretically, release manager may also run into this issue when manually triggering + # release test runs. But cherry-picks are rare and thus it's less likely to run into + # this racing condition, ignoring for now. + if [ "${RAY_TEST_REPO}" == "https://github.com/ray-project/ray.git" ] && \ + [[ "${PARSED_RAY_WHEELS}" == *"master"* ]] && \ + [ "${RAY_TEST_BRANCH-}" == "master" ] && [ -n "${RAY_COMMIT_OF_WHEEL-}" ] && \ + [ "${HEAD_COMMIT}" != "${RAY_COMMIT_OF_WHEEL}" ]; then + echo "The checked out test code doesn't match with the installed wheel. \ +This is likely due to a racing condition when a PR is landed between \ +a wheel is installed and test code is checked out." + echo "Hard resetting from ${HEAD_COMMIT} to ${RAY_COMMIT_OF_WHEEL}." + git reset --hard "${RAY_COMMIT_OF_WHEEL}" + fi fi if [ -z "${NO_INSTALL}" ]; then From abd210f0da01e2742953971bbb43075327f822d0 Mon Sep 17 00:00:00 2001 From: xwjiang2010 <87673679+xwjiang2010@users.noreply.github.com> Date: Wed, 19 Apr 2023 11:21:39 -0700 Subject: [PATCH 013/424] [air-output] minor fix to print configuration on start. (#34575) Signed-off-by: xwjiang2010 --- python/ray/tune/experimental/output.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/ray/tune/experimental/output.py b/python/ray/tune/experimental/output.py index 726e4696b01c..37d2ba5418c8 100644 --- a/python/ray/tune/experimental/output.py +++ b/python/ray/tune/experimental/output.py @@ -736,7 +736,8 @@ def on_trial_start(self, iteration: int, trials: List[Trial], trial: Trial, **in ] ) ) - self._print_config(trial) + if has_config: + self._print_config(trial) class TuneResultProgressCallback(AirResultProgressCallback): From 4de3c415dac6b22e36061da38467b1f7c88e22ac Mon Sep 17 00:00:00 2001 From: Cade Daniel Date: Wed, 19 Apr 2023 11:26:21 -0700 Subject: [PATCH 014/424] [Core] Deflake test_advanced_9 (#34410) Looks like gcs server proc doesn't go back to original num_fds; it goes lower. output from my machine: >> 222 # before starting worker procs (A pid=28851) HELLO ['WORLD', 'WORLD', 'WORLD', 'WORLD', 'WORLD', 'WORLD', 'WORLD', 'WORLD', 'WORLD', 'WORLD'] >> 250 # with worker procs >> 217 >> 216 >> 213 >> 212 >> 207 >> 206 # after work procs die. >> 206 >> 208 # Not sure why it goes up again >> 208 # Remains at 208, times out This PR deflakes the test, but I don't know enough about gcs server to say if this is a good fix or not. --- python/ray/tests/test_advanced_9.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/python/ray/tests/test_advanced_9.py b/python/ray/tests/test_advanced_9.py index 369370bac091..b61e5aac9216 100644 --- a/python/ray/tests/test_advanced_9.py +++ b/python/ray/tests/test_advanced_9.py @@ -280,7 +280,7 @@ def get_gcs_num_of_connections(): time.sleep(10) - curr_fds = get_gcs_num_of_connections() + fds_without_workers = get_gcs_num_of_connections() @ray.remote class A: @@ -289,27 +289,27 @@ def ready(self): return "WORLD" num_of_actors = 10 - a = [A.remote() for _ in range(num_of_actors)] - print(ray.get([t.ready.remote() for t in a])) + actors = [A.remote() for _ in range(num_of_actors)] + print(ray.get([t.ready.remote() for t in actors])) - # Kill the actor - del a + # Kill the actors + del actors - # TODO(clarng):remove this once prestart works with actors. - # ray_start_cluster defaults to one cpu, which prestarts one worker. - FD_PER_WORKER = 2 # Make sure the # of fds opened by the GCS dropped. - wait_for_condition(lambda: get_gcs_num_of_connections() + FD_PER_WORKER == curr_fds) + # This assumes worker processes are not created after the actor worker + # processes die. + wait_for_condition(lambda: get_gcs_num_of_connections() <= fds_without_workers) + num_fds_after_workers_die = get_gcs_num_of_connections() n = cluster.add_node(wait=True) # Make sure the # of fds opened by the GCS increased. - wait_for_condition(lambda: get_gcs_num_of_connections() + FD_PER_WORKER > curr_fds) + wait_for_condition(lambda: get_gcs_num_of_connections() > num_fds_after_workers_die) cluster.remove_node(n) # Make sure the # of fds opened by the GCS dropped. - wait_for_condition(lambda: get_gcs_num_of_connections() + FD_PER_WORKER == curr_fds) + wait_for_condition(lambda: get_gcs_num_of_connections() <= fds_without_workers) @pytest.mark.parametrize( From 9d9673c9fe58b9ca59323b4b1e0d2916969f6102 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Wed, 19 Apr 2023 13:34:05 -0700 Subject: [PATCH 015/424] [data] Standardize on Arrow types for schema() in strict mode Signed-off-by: Eric Liang --- python/ray/data/datastream.py | 38 +++++++++++++++++++++-- python/ray/data/tests/test_strict_mode.py | 20 ++++++++++-- 2 files changed, 53 insertions(+), 5 deletions(-) diff --git a/python/ray/data/datastream.py b/python/ray/data/datastream.py index 82c0dcc0be28..ac39aac9113d 100644 --- a/python/ray/data/datastream.py +++ b/python/ray/data/datastream.py @@ -4666,8 +4666,11 @@ class MaterializedDatastream(Datastream, Generic[T]): @PublicAPI(stability="beta") class Schema: """Datastream schema. + Attributes: names: List of column names of this Datastream. + types: List of Arrow types of the Datastream. Note that the "object" type is + not Arrow compatible and hence will be returned as `object`. base_schema: The underlying Arrow or Pandas schema. """ @@ -4679,10 +4682,39 @@ def names(self) -> List[str]: """Lists the columns of this Datastream.""" return self.base_schema.names + @property + def types(self) -> List[Union[Literal[object], "pyarrow.DataType"]]: + """Lists the types of this Datastream in Arrow format + + For non-Arrow compatible types, we return "object". + """ + import pyarrow as pa + from ray.data.extensions import TensorDtype, ArrowTensorType + + if isinstance(self.base_schema, pa.lib.Schema): + return list(self.base_schema.types) + + arrow_types = [] + for dtype in self.base_schema.types: + if isinstance(dtype, TensorDtype): + # Manually convert our Pandas tensor extension type to Arrow. + arrow_types.append( + ArrowTensorType( + shape=dtype._shape, dtype=pa.from_numpy_dtype(dtype._dtype) + ) + ) + else: + try: + arrow_types.append(pa.from_numpy_dtype(dtype)) + except pa.ArrowNotImplementedError: + arrow_types.append(object) + except Exception: + logger.exception(f"Error converting dtype {dtype} to Arrow.") + arrow_types.append(None) + return arrow_types + def __str__(self): - # TODO(ekl) we should canonicalize Pandas vs Pyarrow dtypes, which will be - # possible one we support Python objects in Arrow via an extension type. - return f"Schema({dict(zip(self.base_schema.names, self.base_schema.types))})" + return f"Schema({dict(zip(self.names, self.types))})" def __repr__(self): return str(self) diff --git a/python/ray/data/tests/test_strict_mode.py b/python/ray/data/tests/test_strict_mode.py index a1d7c92d7c50..268bd0fed3d6 100644 --- a/python/ray/data/tests/test_strict_mode.py +++ b/python/ray/data/tests/test_strict_mode.py @@ -143,6 +143,23 @@ def test_strict_schema(ray_start_regular_shared): ds = ray.data.from_items([{"x": 2}]) schema = ds.schema() assert isinstance(schema.base_schema, pyarrow.lib.Schema) + assert str(schema) == "Schema({'x': DataType(int64)})" + + ds = ray.data.from_items([{"x": 2, "y": [1, 2]}]) + schema = ds.schema() + assert isinstance(schema.base_schema, pyarrow.lib.Schema) + assert ( + str(schema) + == "Schema({'x': DataType(int64), 'y': ListType(list)})" + ) + + ds = ray.data.from_items([{"x": 2, "y": object(), "z": [1, 2]}]) + schema = ds.schema() + assert isinstance(schema.base_schema, PandasBlockSchema) + assert str(schema) == ( + "Schema({'x': DataType(int64), 'y': " + ", 'z': })" + ) ds = ray.data.from_numpy(np.ones((100, 10))) schema = ds.schema() @@ -150,8 +167,7 @@ def test_strict_schema(ray_start_regular_shared): assert str(schema) == "Schema({'data': numpy.ndarray(shape=(10,), dtype=double)})" schema = ds.map_batches(lambda x: x, batch_format="pandas").schema() - # TODO(ekl) fix this to return ndarray - assert str(schema) == "Schema({'data': TensorDtype(shape=(10,), dtype=float64)})" + assert str(schema) == "Schema({'data': numpy.ndarray(shape=(10,), dtype=double)})" assert isinstance(schema.base_schema, PandasBlockSchema) From faa8072e8b469217b44e3a2b786fa9a111b841ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=8D=A3=E5=BB=BA=E6=B0=91?= <1179320652@qq.com> Date: Thu, 20 Apr 2023 04:36:09 +0800 Subject: [PATCH 016/424] [ray-data] Add alias parameters to the aggregate function, and add quantile fn (#34358) --- python/ray/data/aggregate.py | 147 +++++++++++++++++++++-- python/ray/data/tests/test_all_to_all.py | 67 ++++++++++- 2 files changed, 200 insertions(+), 14 deletions(-) diff --git a/python/ray/data/aggregate.py b/python/ray/data/aggregate.py index c9da21735c96..3d0b28f0efb9 100644 --- a/python/ray/data/aggregate.py +++ b/python/ray/data/aggregate.py @@ -113,8 +113,17 @@ def __init__(self): class Sum(_AggregateOnKeyBase): """Defines sum aggregation.""" - def __init__(self, on: Optional[KeyFn] = None, ignore_nulls: bool = True): + def __init__( + self, + on: Optional[KeyFn] = None, + ignore_nulls: bool = True, + alias_name: Optional[KeyFn] = None, + ): self._set_key_fn(on) + if alias_name: + self._rs_name = alias_name + else: + self._rs_name = f"sum({str(on)})" null_merge = _null_wrap_merge(ignore_nulls, lambda a1, a2: a1 + a2) @@ -127,7 +136,7 @@ def __init__(self, on: Optional[KeyFn] = None, ignore_nulls: bool = True): null_merge, ), finalize=_null_wrap_finalize(lambda a: a), - name=(f"sum({str(on)})"), + name=(self._rs_name), ) @@ -135,8 +144,17 @@ def __init__(self, on: Optional[KeyFn] = None, ignore_nulls: bool = True): class Min(_AggregateOnKeyBase): """Defines min aggregation.""" - def __init__(self, on: Optional[KeyFn] = None, ignore_nulls: bool = True): + def __init__( + self, + on: Optional[KeyFn] = None, + ignore_nulls: bool = True, + alias_name: Optional[KeyFn] = None, + ): self._set_key_fn(on) + if alias_name: + self._rs_name = alias_name + else: + self._rs_name = f"min({str(on)})" null_merge = _null_wrap_merge(ignore_nulls, min) @@ -149,7 +167,7 @@ def __init__(self, on: Optional[KeyFn] = None, ignore_nulls: bool = True): null_merge, ), finalize=_null_wrap_finalize(lambda a: a), - name=(f"min({str(on)})"), + name=(self._rs_name), ) @@ -157,8 +175,17 @@ def __init__(self, on: Optional[KeyFn] = None, ignore_nulls: bool = True): class Max(_AggregateOnKeyBase): """Defines max aggregation.""" - def __init__(self, on: Optional[KeyFn] = None, ignore_nulls: bool = True): + def __init__( + self, + on: Optional[KeyFn] = None, + ignore_nulls: bool = True, + alias_name: Optional[KeyFn] = None, + ): self._set_key_fn(on) + if alias_name: + self._rs_name = alias_name + else: + self._rs_name = f"max({str(on)})" null_merge = _null_wrap_merge(ignore_nulls, max) @@ -171,7 +198,7 @@ def __init__(self, on: Optional[KeyFn] = None, ignore_nulls: bool = True): null_merge, ), finalize=_null_wrap_finalize(lambda a: a), - name=(f"max({str(on)})"), + name=(self._rs_name), ) @@ -179,8 +206,17 @@ def __init__(self, on: Optional[KeyFn] = None, ignore_nulls: bool = True): class Mean(_AggregateOnKeyBase): """Defines mean aggregation.""" - def __init__(self, on: Optional[KeyFn] = None, ignore_nulls: bool = True): + def __init__( + self, + on: Optional[KeyFn] = None, + ignore_nulls: bool = True, + alias_name: Optional[KeyFn] = None, + ): self._set_key_fn(on) + if alias_name: + self._rs_name = alias_name + else: + self._rs_name = f"mean({str(on)})" null_merge = _null_wrap_merge( ignore_nulls, lambda a1, a2: [a1[0] + a2[0], a1[1] + a2[1]] @@ -207,7 +243,7 @@ def vectorized_mean(block: Block[T]) -> AggType: null_merge, ), finalize=_null_wrap_finalize(lambda a: a[0] / a[1]), - name=(f"mean({str(on)})"), + name=(self._rs_name), ) @@ -229,8 +265,13 @@ def __init__( on: Optional[KeyFn] = None, ddof: int = 1, ignore_nulls: bool = True, + alias_name: Optional[KeyFn] = None, ): self._set_key_fn(on) + if alias_name: + self._rs_name = alias_name + else: + self._rs_name = f"std({str(on)})" def merge(a: List[float], b: List[float]): # Merges two accumulations into one. @@ -282,7 +323,7 @@ def finalize(a: List[float]): null_merge, ), finalize=_null_wrap_finalize(finalize), - name=(f"std({str(on)})"), + name=(self._rs_name), ) @@ -290,9 +331,18 @@ def finalize(a: List[float]): class AbsMax(_AggregateOnKeyBase): """Defines absolute max aggregation.""" - def __init__(self, on: Optional[KeyFn] = None, ignore_nulls: bool = True): + def __init__( + self, + on: Optional[KeyFn] = None, + ignore_nulls: bool = True, + alias_name: Optional[KeyFn] = None, + ): self._set_key_fn(on) on_fn = _to_on_fn(on) + if alias_name: + self._rs_name = alias_name + else: + self._rs_name = f"abs_max({str(on)})" super().__init__( init=_null_wrap_init(lambda k: 0), @@ -301,7 +351,7 @@ def __init__(self, on: Optional[KeyFn] = None, ignore_nulls: bool = True): ignore_nulls, on_fn, lambda a, r: max(a, abs(r)) ), finalize=_null_wrap_finalize(lambda a: a), - name=(f"abs_max({str(on)})"), + name=(self._rs_name), ) @@ -312,3 +362,78 @@ def _to_on_fn(on: Optional[KeyFn]): return lambda r: r[on] else: return on + + +@PublicAPI +class Quantile(_AggregateOnKeyBase): + """Defines Quantile aggregation.""" + + def __init__( + self, + on: Optional[KeyFn] = None, + q: float = 0.5, + ignore_nulls: bool = True, + alias_name: Optional[KeyFn] = None, + ): + self._set_key_fn(on) + self._q = q + if alias_name: + self._rs_name = alias_name + else: + self._rs_name = f"quantile({str(on)})" + + def merge(a: List[int], b: List[int]): + if isinstance(a, List) and isinstance(b, List): + a.extend(b) + return a + if isinstance(a, List) and (not isinstance(b, List)): + if b is not None and b != "": + a.append(b) + return a + if isinstance(b, List) and (not isinstance(a, List)): + if a is not None and a != "": + b.append(a) + return b + + ls = [] + if a is not None and a != "": + ls.append(a) + if b is not None and b != "": + ls.append(b) + return ls + + null_merge = _null_wrap_merge(ignore_nulls, merge) + + def block_row_ls(block: Block[T]) -> AggType: + block_acc = BlockAccessor.for_block(block) + ls = [] + for row in block_acc.iter_rows(): + ls.append(row.get(on)) + return ls + + import math + + def percentile(input_values, key=lambda x: x): + if not input_values: + return None + input_values = sorted(input_values) + k = (len(input_values) - 1) * self._q + f = math.floor(k) + c = math.ceil(k) + if f == c: + return key(input_values[int(k)]) + d0 = key(input_values[int(f)]) * (c - k) + d1 = key(input_values[int(c)]) * (k - f) + return round(d0 + d1, 5) + + super().__init__( + init=_null_wrap_init(lambda k: [0]), + merge=null_merge, + accumulate_block=_null_wrap_accumulate_block( + ignore_nulls, + block_row_ls, + null_merge, + ), + finalize=_null_wrap_finalize(percentile), + name=(self._rs_name), + ) diff --git a/python/ray/data/tests/test_all_to_all.py b/python/ray/data/tests/test_all_to_all.py index c98021da8b13..c848adb8eece 100644 --- a/python/ray/data/tests/test_all_to_all.py +++ b/python/ray/data/tests/test_all_to_all.py @@ -9,7 +9,7 @@ import pytest import ray -from ray.data.aggregate import AggregateFn, Count, Max, Mean, Min, Std, Sum +from ray.data.aggregate import AggregateFn, Count, Max, Mean, Min, Std, Sum, Quantile from ray.data.context import DataContext from ray.data.tests.conftest import * # noqa from ray.tests.conftest import * # noqa @@ -818,13 +818,14 @@ def test_groupby_arrow_multi_agg(ray_start_regular_shared, num_parts): Max("B"), Mean("B"), Std("B"), + Quantile("B"), ) ) assert agg_ds.count() == 3 agg_df = agg_ds.to_pandas() expected_grouped = df.groupby("A")["B"] np.testing.assert_array_equal(agg_df["count()"].to_numpy(), [34, 33, 33]) - for agg in ["sum", "min", "max", "mean", "std"]: + for agg in ["sum", "min", "max", "mean", "quantile", "std"]: result = agg_df[f"{agg}(B)"].to_numpy() expected = getattr(expected_grouped, agg)().to_numpy() if agg == "std": @@ -843,9 +844,10 @@ def test_groupby_arrow_multi_agg(ray_start_regular_shared, num_parts): Max("A"), Mean("A"), Std("A"), + Quantile("A"), ) ) - for agg in ["sum", "min", "max", "mean", "std"]: + for agg in ["sum", "min", "max", "mean", "quantile", "std"]: result = result_row[f"{agg}(A)"] expected = getattr(df["A"], agg)() if agg == "std": @@ -854,6 +856,65 @@ def test_groupby_arrow_multi_agg(ray_start_regular_shared, num_parts): assert result == expected +@pytest.mark.parametrize("num_parts", [1, 30]) +def test_groupby_arrow_multi_agg_alias(ray_start_regular_shared, num_parts): + seed = int(time.time()) + print(f"Seeding RNG for test_groupby_arrow_multi_agg with: {seed}") + random.seed(seed) + xs = list(range(100)) + random.shuffle(xs) + df = pd.DataFrame({"A": [x % 3 for x in xs], "B": xs}) + agg_ds = ( + ray.data.from_pandas(df) + .repartition(num_parts) + .groupby("A") + .aggregate( + Sum("B", alias_name="sum_b"), + Min("B", alias_name="min_b"), + Max("B", alias_name="max_b"), + Mean("B", alias_name="mean_b"), + Std("B", alias_name="std_b"), + Quantile("B", alias_name="quantile_b"), + ) + ) + + agg_df = agg_ds.to_pandas() + expected_grouped = df.groupby("A")["B"] + for agg in ["sum", "min", "max", "mean", "quantile", "std"]: + result = agg_df[f"{agg}_b"].to_numpy() + print(agg) + print(result) + expected = getattr(expected_grouped, agg)().to_numpy() + print(expected) + if agg == "std": + np.testing.assert_array_almost_equal(result, expected) + else: + np.testing.assert_array_equal(result, expected) + # Test built-in global std aggregation + df = pd.DataFrame({"A": xs}) + result_row = ( + ray.data.from_pandas(df) + .repartition(num_parts) + .aggregate( + Sum("A", alias_name="sum_b"), + Min("A", alias_name="min_b"), + Max("A", alias_name="max_b"), + Mean("A", alias_name="mean_b"), + Std("A", alias_name="std_b"), + Quantile("A", alias_name="quantile_b"), + ) + ) + for agg in ["sum", "min", "max", "mean", "quantile", "std"]: + result = result_row[f"{agg}_b"] + print(result) + expected = getattr(df["A"], agg)() + print(expected) + if agg == "std": + assert math.isclose(result, expected) + else: + assert result == expected + + def test_groupby_simple(ray_start_regular_shared): seed = int(time.time()) print(f"Seeding RNG for test_groupby_simple with: {seed}") From abf319de92dc7f9a75713ea97e240a72c9b66615 Mon Sep 17 00:00:00 2001 From: xwjiang2010 <87673679+xwjiang2010@users.noreply.github.com> Date: Wed, 19 Apr 2023 13:53:41 -0700 Subject: [PATCH 017/424] Revert "[data] Add usage tag for which block formats are used (#34384)" (#34569) This reverts commit ffeedbf63efea61f626b9e4f2895179534840025. [release test passing](https://buildkite.com/ray-project/release-tests-pr/builds/35579) Signed-off-by: xwjiang2010 --- .../data/_internal/execution/legacy_compat.py | 2 +- .../_internal/{usage.py => logical/util.py} | 31 +++++-------------- python/ray/data/block.py | 14 --------- .../data/tests/test_execution_optimizer.py | 8 ++--- python/ray/data/tests/test_util.py | 11 ------- src/ray/protobuf/usage.proto | 3 -- 6 files changed, 13 insertions(+), 56 deletions(-) rename python/ray/data/_internal/{usage.py => logical/util.py} (70%) diff --git a/python/ray/data/_internal/execution/legacy_compat.py b/python/ray/data/_internal/execution/legacy_compat.py index f3f28c01f8eb..a343c5384a3b 100644 --- a/python/ray/data/_internal/execution/legacy_compat.py +++ b/python/ray/data/_internal/execution/legacy_compat.py @@ -8,7 +8,7 @@ import ray from ray.data._internal.logical.optimizers import get_execution_plan -from ray.data._internal.usage import record_operators_usage +from ray.data._internal.logical.util import record_operators_usage from ray.data.context import DataContext from ray.types import ObjectRef from ray.data.block import Block, BlockMetadata, List diff --git a/python/ray/data/_internal/usage.py b/python/ray/data/_internal/logical/util.py similarity index 70% rename from python/ray/data/_internal/usage.py rename to python/ray/data/_internal/logical/util.py index a3b8af4f756a..cc7e0dc40cdb 100644 --- a/python/ray/data/_internal/usage.py +++ b/python/ray/data/_internal/logical/util.py @@ -1,18 +1,15 @@ -from typing import Dict, TYPE_CHECKING +from typing import Dict import json import threading from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag +from ray.data._internal.logical.interfaces import LogicalOperator +from ray.data._internal.logical.operators.read_operator import Read +from ray.data._internal.logical.operators.write_operator import Write -if TYPE_CHECKING: - from ray.data._internal.logical.interfaces import LogicalOperator - -# Guards the below dicts. -_recording_lock = threading.Lock() # The dictionary for the operator name and count. _recorded_operators = dict() -# The dictionary for the block format name and count. -_recorded_block_formats = dict() +_recorded_operators_lock = threading.Lock() # The white list of operator names allowed to be recorded. _op_name_white_list = [ @@ -62,21 +59,12 @@ ] -def record_block_format_usage(block_format: str): - with _recording_lock: - _recorded_block_formats.setdefault(block_format, 0) - _recorded_block_formats[block_format] += 1 - formats_json_str = json.dumps(_recorded_block_formats) - - record_extra_usage_tag(TagKey.DATA_BLOCK_FORMATS, formats_json_str) - - -def record_operators_usage(op: "LogicalOperator"): +def record_operators_usage(op: LogicalOperator): """Record logical operator usage with Ray telemetry.""" ops_dict = dict() _collect_operators_to_dict(op, ops_dict) ops_json_str = "" - with _recording_lock: + with _recorded_operators_lock: for op, count in ops_dict.items(): _recorded_operators.setdefault(op, 0) _recorded_operators[op] += count @@ -85,11 +73,8 @@ def record_operators_usage(op: "LogicalOperator"): record_extra_usage_tag(TagKey.DATA_LOGICAL_OPS, ops_json_str) -def _collect_operators_to_dict(op: "LogicalOperator", ops_dict: Dict[str, int]): +def _collect_operators_to_dict(op: LogicalOperator, ops_dict: Dict[str, int]): """Collect the logical operator name and count into `ops_dict`.""" - from ray.data._internal.logical.operators.read_operator import Read - from ray.data._internal.logical.operators.write_operator import Write - for child in op.input_dependencies: _collect_operators_to_dict(child, ops_dict) diff --git a/python/ray/data/block.py b/python/ray/data/block.py index d8fec85f4a2f..6ee03722a951 100644 --- a/python/ray/data/block.py +++ b/python/ray/data/block.py @@ -22,7 +22,6 @@ import ray from ray import ObjectRefGenerator from ray.data._internal.util import _check_pyarrow_version, _truncated_repr -from ray.data._internal.usage import record_block_format_usage from ray.types import ObjectRef from ray.util.annotations import DeveloperAPI, PublicAPI @@ -433,31 +432,18 @@ def for_block(block: Block) -> "BlockAccessor[T]": if isinstance(block, pyarrow.Table): from ray.data._internal.arrow_block import ArrowBlockAccessor - record_block_format_usage("arrow") return ArrowBlockAccessor(block) elif isinstance(block, pandas.DataFrame): from ray.data._internal.pandas_block import PandasBlockAccessor - record_block_format_usage("pandas") return PandasBlockAccessor(block) elif isinstance(block, bytes): from ray.data._internal.arrow_block import ArrowBlockAccessor - record_block_format_usage("arrow") return ArrowBlockAccessor.from_bytes(block) elif isinstance(block, list): from ray.data._internal.simple_block import SimpleBlockAccessor - ctx = ray.data.DatasetContext.get_current() - if ctx.strict_mode: - raise StrictModeError( - f"Error validating {_truncated_repr(block)}: " - "Standalone Python objects are not " - "allowed in strict mode. To use Python objects in a datastream, " - "wrap them in a dict of numpy arrays, e.g., " - "return `{'item': np.array(batch)}` instead of just `batch`." - ) - record_block_format_usage("simple") return SimpleBlockAccessor(block) else: raise TypeError("Not a block type: {} ({})".format(block, type(block))) diff --git a/python/ray/data/tests/test_execution_optimizer.py b/python/ray/data/tests/test_execution_optimizer.py index b00a45569409..7ac1fb4e4fd5 100644 --- a/python/ray/data/tests/test_execution_optimizer.py +++ b/python/ray/data/tests/test_execution_optimizer.py @@ -43,9 +43,9 @@ FlatMap, ) from ray.data._internal.logical.operators.n_ary_operator import Zip -from ray.data._internal.usage import ( +from ray.data._internal.logical.util import ( _recorded_operators, - _recording_lock, + _recorded_operators_lock, _op_name_white_list, ) from ray.data._internal.planner.planner import Planner @@ -62,10 +62,10 @@ def _check_usage_record(op_names: List[str], clear_after_check: Optional[bool] = (so that subsequent checks do not use existing records of operator usage).""" for op_name in op_names: assert op_name in _op_name_white_list - with _recording_lock: + with _recorded_operators_lock: assert _recorded_operators.get(op_name, 0) > 0, _recorded_operators if clear_after_check: - with _recording_lock: + with _recorded_operators_lock: _recorded_operators.clear() diff --git a/python/ray/data/tests/test_util.py b/python/ray/data/tests/test_util.py index 0f3651f3d896..d64f3da9c715 100644 --- a/python/ray/data/tests/test_util.py +++ b/python/ray/data/tests/test_util.py @@ -3,7 +3,6 @@ import numpy as np from ray.data._internal.util import _check_pyarrow_version, _split_list -from ray.data._internal.usage import _recorded_block_formats from ray.data._internal.memory_tracing import ( trace_allocation, trace_deallocation, @@ -88,16 +87,6 @@ def test_list_splits(): assert _split_list(["foo", 1, [0], None], 3) == [["foo", 1], [[0]], [None]] -def test_block_format_usage(): - assert not _recorded_block_formats - ray.data.range(10).show() - assert set(_recorded_block_formats.keys()) == {"simple"} - ray.data.range_table(10).show() - assert set(_recorded_block_formats.keys()) == {"simple", "arrow"} - ray.data.range_table(10).map_batches(lambda x: x).show() - assert set(_recorded_block_formats.keys()) == {"simple", "arrow", "pandas"} - - if __name__ == "__main__": import sys diff --git a/src/ray/protobuf/usage.proto b/src/ray/protobuf/usage.proto index 3ad666484452..0f5197cf1529 100644 --- a/src/ray/protobuf/usage.proto +++ b/src/ray/protobuf/usage.proto @@ -119,9 +119,6 @@ enum TagKey { // Logical operators, stored in JSON format with operator name and count. // Example: {"MapBatches": 2, "Filter": 1} DATA_LOGICAL_OPS = 400; - // Block formats: simple, pandas, or arrow. - // Example: {"pandas": 2, "numpy": 1} - DATA_BLOCK_FORMATS = 401; // AIR // Name of AIR trainer, or "Custom" if user-defined. From 7fd44e66c93fb7a2b7e1efc95b0ee4ce64fc7cd9 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Wed, 19 Apr 2023 14:43:50 -0700 Subject: [PATCH 018/424] Disallow format query in strict mode (#34564) Signed-off-by: Eric Liang --- python/ray/data/datastream.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/python/ray/data/datastream.py b/python/ray/data/datastream.py index ac39aac9113d..0c033e1d1111 100644 --- a/python/ray/data/datastream.py +++ b/python/ray/data/datastream.py @@ -99,6 +99,7 @@ FlatMapUDF, KeyFn, RowUDF, + StrictModeError, T, U, _validate_key_fn, @@ -4381,6 +4382,13 @@ def default_batch_format(self) -> Type: Call this function to iterate over batches of data. """ # noqa: E501 + + context = DataContext.get_current() + if context.strict_mode: + raise StrictModeError( + "default_batch_format() is not allowed in strict mode" + ) + import pandas as pd import pyarrow as pa @@ -4410,6 +4418,9 @@ def dataset_format(self) -> BlockFormat: the schema for the first block. """ context = DataContext.get_current() + if context.strict_mode: + raise StrictModeError("dataset_format() is not allowed in strict mode") + if context.use_streaming_executor: raise DeprecationWarning( "`dataset_format` is deprecated for streaming execution. To use " From 53a95ab824b7ebb7aafbb5a937a5646ac4e7f570 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Wed, 19 Apr 2023 16:01:52 -0700 Subject: [PATCH 019/424] [data] Log a warning if the batch size is misconfigured in a way that would grossly reduce parallelism for actor pool. (#34594) --- .../operators/actor_pool_map_operator.py | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/python/ray/data/_internal/execution/operators/actor_pool_map_operator.py b/python/ray/data/_internal/execution/operators/actor_pool_map_operator.py index 1156bc09bf72..fa727c363578 100644 --- a/python/ray/data/_internal/execution/operators/actor_pool_map_operator.py +++ b/python/ray/data/_internal/execution/operators/actor_pool_map_operator.py @@ -75,6 +75,7 @@ def __init__( ) self._init_fn = init_fn self._ray_remote_args = self._apply_default_remote_args(self._ray_remote_args) + self._min_rows_per_bundle = min_rows_per_bundle # Create autoscaling policy from compute strategy. self._autoscaling_policy = autoscaling_policy @@ -249,6 +250,32 @@ def shutdown(self): self._actor_pool.kill_all_actors() super().shutdown() + # Warn if the user specified a batch or block size that prevents full + # parallelization across the actor pool. We only know this information after + # execution has completed. + total_rows = sum([m.num_rows for m in self._output_metadata]) + min_workers = self._autoscaling_policy.min_workers + max_desired_batch_size = total_rows // min_workers + if ( + self._min_rows_per_bundle is not None + and self._min_rows_per_bundle > max_desired_batch_size + ): + # The user specified a batch size, but it was probably too large. + logger.get_logger().warning( + "To ensure full parallelization across an actor pool of size " + f"{min_workers}, the specified batch size " + f"should be at most {max_desired_batch_size}. Your configured batch " + f"size for this operator was {self._min_rows_per_bundle}." + ) + elif len(self._output_metadata) < min_workers: + # The user created a stream that has too few blocks to begin with. + logger.get_logger().warning( + "To ensure full parallelization across an actor pool of size " + f"{min_workers}, the Datastream should consist of at least " + f"{min_workers} distinct blocks. Consider increasing " + "the parallelism when creating the Datastream." + ) + def get_work_refs(self) -> List[ray.ObjectRef]: # Work references that we wish the executor to wait on includes both task # futures AND worker ready futures. From f6c7a27334ccf2b86f528264132b925bc16b8dfa Mon Sep 17 00:00:00 2001 From: Alan Guo Date: Wed, 19 Apr 2023 17:23:36 -0600 Subject: [PATCH 020/424] [Dashboard] Make loading screen not block out the entire page. (#34515) Previously, if a dashboard page was loading, it would grey out the whole screen and buttons would not be press-able. Now, we don't block out the whole page. Also don't show loading bar if data is already loaded from in-memory cache. --- dashboard/client/src/components/Loading.tsx | 9 +++------ dashboard/client/src/pages/actor/ActorDetail.tsx | 4 ++-- dashboard/client/src/pages/actor/hook/useActorDetail.ts | 3 ++- dashboard/client/src/pages/job/JobDetail.tsx | 4 ++-- dashboard/client/src/pages/job/JobDetailInfoPage.tsx | 4 ++-- dashboard/client/src/pages/job/hook/useJobDetail.ts | 3 ++- dashboard/client/src/pages/job/hook/useJobList.ts | 3 ++- dashboard/client/src/pages/job/index.tsx | 3 ++- .../client/src/pages/node/ClusterDetailInfoPage.tsx | 4 ++-- dashboard/client/src/pages/node/NodeDetail.tsx | 3 ++- dashboard/client/src/pages/node/hook/useClusterDetail.ts | 3 ++- dashboard/client/src/pages/node/hook/useNodeDetail.ts | 3 ++- dashboard/client/src/pages/node/hook/useNodeList.ts | 3 ++- dashboard/client/src/pages/node/index.tsx | 3 ++- 14 files changed, 29 insertions(+), 23 deletions(-) diff --git a/dashboard/client/src/components/Loading.tsx b/dashboard/client/src/components/Loading.tsx index 6c1cb1e8f0ea..edca6bb2063e 100644 --- a/dashboard/client/src/components/Loading.tsx +++ b/dashboard/client/src/components/Loading.tsx @@ -1,10 +1,7 @@ -import { Backdrop, CircularProgress } from "@material-ui/core"; +import { CircularProgress } from "@material-ui/core"; import React from "react"; -const Loading = ({ loading }: { loading: boolean }) => ( - - - -); +const Loading = ({ loading }: { loading: boolean }) => + loading ? : null; export default Loading; diff --git a/dashboard/client/src/pages/actor/ActorDetail.tsx b/dashboard/client/src/pages/actor/ActorDetail.tsx index 3951cb0ffa54..30a407c14709 100644 --- a/dashboard/client/src/pages/actor/ActorDetail.tsx +++ b/dashboard/client/src/pages/actor/ActorDetail.tsx @@ -37,12 +37,12 @@ const useStyle = makeStyles((theme) => ({ const ActorDetailPage = () => { const classes = useStyle(); const { ipLogMap } = useContext(GlobalContext); - const { params, actorDetail, msg } = useActorDetail(); + const { params, actorDetail, msg, isLoading } = useActorDetail(); if (!actorDetail) { return (
- +
diff --git a/dashboard/client/src/pages/actor/hook/useActorDetail.ts b/dashboard/client/src/pages/actor/hook/useActorDetail.ts index 271f903e076c..86aad3aaa959 100644 --- a/dashboard/client/src/pages/actor/hook/useActorDetail.ts +++ b/dashboard/client/src/pages/actor/hook/useActorDetail.ts @@ -10,7 +10,7 @@ export const useActorDetail = () => { const [msg, setMsg] = useState("Loading the actor infos..."); const { namespaceMap } = useContext(GlobalContext); - const { data: actorDetail } = useSWR( + const { data: actorDetail, isLoading } = useSWR( ["useActorDetail", params.id], async ([_, actorId]) => { const actor_resp = await getActor(actorId); @@ -35,6 +35,7 @@ export const useActorDetail = () => { params, actorDetail, msg, + isLoading, namespaceMap, }; }; diff --git a/dashboard/client/src/pages/job/JobDetail.tsx b/dashboard/client/src/pages/job/JobDetail.tsx index 888ae5eeb48d..e3721403dbc7 100644 --- a/dashboard/client/src/pages/job/JobDetail.tsx +++ b/dashboard/client/src/pages/job/JobDetail.tsx @@ -31,7 +31,7 @@ const useStyle = makeStyles((theme) => ({ export const JobDetailChartsPage = () => { const classes = useStyle(); - const { job, msg, params } = useJobDetail(); + const { job, msg, isLoading, params } = useJobDetail(); const jobId = params.id; const [taskListFilter, setTaskListFilter] = useState(); @@ -99,7 +99,7 @@ export const JobDetailChartsPage = () => { if (!job) { return (
- +
diff --git a/dashboard/client/src/pages/job/JobDetailInfoPage.tsx b/dashboard/client/src/pages/job/JobDetailInfoPage.tsx index 24bb85b23e17..b8345e8d44e4 100644 --- a/dashboard/client/src/pages/job/JobDetailInfoPage.tsx +++ b/dashboard/client/src/pages/job/JobDetailInfoPage.tsx @@ -20,7 +20,7 @@ export const JobDetailInfoPage = () => { // TODO(aguo): Add more content to this page! const classes = useStyle(); - const { job, msg, params } = useJobDetail(); + const { job, msg, isLoading, params } = useJobDetail(); if (!job) { return ( @@ -32,7 +32,7 @@ export const JobDetailInfoPage = () => { path: undefined, }} /> - +
diff --git a/dashboard/client/src/pages/job/hook/useJobDetail.ts b/dashboard/client/src/pages/job/hook/useJobDetail.ts index 7dda767a297e..d473eb3cba31 100644 --- a/dashboard/client/src/pages/job/hook/useJobDetail.ts +++ b/dashboard/client/src/pages/job/hook/useJobDetail.ts @@ -10,7 +10,7 @@ export const useJobDetail = () => { const [msg, setMsg] = useState("Loading the job detail"); const [refreshing, setRefresh] = useState(true); const { ipLogMap } = useContext(GlobalContext); - const { data: job } = useSWR( + const { data: job, isLoading } = useSWR( "useJobDetail", async () => { try { @@ -26,6 +26,7 @@ export const useJobDetail = () => { return { job, + isLoading, msg, params, ipLogMap, diff --git a/dashboard/client/src/pages/job/hook/useJobList.ts b/dashboard/client/src/pages/job/hook/useJobList.ts index ba4beedb6b6c..8ed6079be5a4 100644 --- a/dashboard/client/src/pages/job/hook/useJobList.ts +++ b/dashboard/client/src/pages/job/hook/useJobList.ts @@ -30,7 +30,7 @@ export const useJobList = () => { }; refreshRef.current = isRefreshing; - const { data } = useSWR( + const { data, isLoading } = useSWR( "useJobList", async () => { const rsp = await getJobList(); @@ -52,6 +52,7 @@ export const useJobList = () => { filter.every((f) => node[f.key] && (node[f.key] ?? "").includes(f.val)), ), msg, + isLoading, isRefreshing, onSwitchChange, changeFilter, diff --git a/dashboard/client/src/pages/job/index.tsx b/dashboard/client/src/pages/job/index.tsx index 4fe82836ab34..1ff5eb6346f6 100644 --- a/dashboard/client/src/pages/job/index.tsx +++ b/dashboard/client/src/pages/job/index.tsx @@ -68,6 +68,7 @@ const JobList = () => { const classes = useStyles(); const { msg, + isLoading, isRefreshing, onSwitchChange, jobList, @@ -78,7 +79,7 @@ const JobList = () => { return (
- + Auto Refresh: { // TODO(aguo): Add more content to this page! const classes = useStyle(); - const { clusterDetail, msg } = useClusterDetail(); + const { clusterDetail, msg, isLoading } = useClusterDetail(); if (!clusterDetail) { return ( @@ -30,7 +30,7 @@ export const ClusterDetailInfoPage = () => { path: undefined, }} /> - +
diff --git a/dashboard/client/src/pages/node/NodeDetail.tsx b/dashboard/client/src/pages/node/NodeDetail.tsx index 6c3399301777..9a6dd2aee7f0 100644 --- a/dashboard/client/src/pages/node/NodeDetail.tsx +++ b/dashboard/client/src/pages/node/NodeDetail.tsx @@ -43,6 +43,7 @@ const NodeDetailPage = () => { selectedTab, nodeDetail, msg, + isLoading, isRefreshing, onRefreshChange, raylet, @@ -59,7 +60,7 @@ const NodeDetailPage = () => { path: `/cluster/nodes/${params.id}`, }} /> - + { const [msg, setMsg] = useState("Loading the job detail"); const [refreshing, setRefresh] = useState(true); - const { data: clusterDetail } = useSWR( + const { data: clusterDetail, isLoading } = useSWR( "useClusterDetail", async () => { try { @@ -23,5 +23,6 @@ export const useClusterDetail = () => { return { clusterDetail, msg, + isLoading, }; }; diff --git a/dashboard/client/src/pages/node/hook/useNodeDetail.ts b/dashboard/client/src/pages/node/hook/useNodeDetail.ts index 3dd00e01818b..5d06fba25446 100644 --- a/dashboard/client/src/pages/node/hook/useNodeDetail.ts +++ b/dashboard/client/src/pages/node/hook/useNodeDetail.ts @@ -15,7 +15,7 @@ export const useNodeDetail = () => { setRefresh(event.target.checked); }; - const { data: nodeDetail } = useSWR( + const { data: nodeDetail, isLoading } = useSWR( ["useNodeDetail", params.id], async ([_, nodeId]) => { const { data } = await getNodeDetail(nodeId); @@ -47,6 +47,7 @@ export const useNodeDetail = () => { selectedTab, nodeDetail, msg, + isLoading, isRefreshing, onRefreshChange, raylet, diff --git a/dashboard/client/src/pages/node/hook/useNodeList.ts b/dashboard/client/src/pages/node/hook/useNodeList.ts index 3409138ad712..ce9c764cf1a5 100644 --- a/dashboard/client/src/pages/node/hook/useNodeList.ts +++ b/dashboard/client/src/pages/node/hook/useNodeList.ts @@ -26,7 +26,7 @@ export const useNodeList = () => { const onSwitchChange = (event: React.ChangeEvent) => { setRefresh(event.target.checked); }; - const { data } = useSWR( + const { data, isLoading } = useSWR( "useNodeList", async () => { const { data } = await getNodeList(); @@ -62,6 +62,7 @@ export const useNodeList = () => { filter.every((f) => node[f.key] && node[f.key].includes(f.val)), ), msg, + isLoading, isRefreshing, onSwitchChange, changeFilter, diff --git a/dashboard/client/src/pages/node/index.tsx b/dashboard/client/src/pages/node/index.tsx index 9f2f4bf8fc8e..3753af2d2213 100644 --- a/dashboard/client/src/pages/node/index.tsx +++ b/dashboard/client/src/pages/node/index.tsx @@ -158,6 +158,7 @@ const Nodes = () => { const classes = useStyles(); const { msg, + isLoading, isRefreshing, onSwitchChange, nodeList, @@ -172,7 +173,7 @@ const Nodes = () => { return (
- + Auto Refresh: Date: Wed, 19 Apr 2023 17:01:20 -0700 Subject: [PATCH 021/424] [data] [docs] Datastream docs rename [5/n] (#34512) Part 5 of #34235 --- README.rst | 4 +- doc/BUILD | 2 +- doc/source/_static/js/custom.js | 2 +- doc/source/_static/js/top-navigation.js | 2 +- doc/source/_toc.yml | 7 +- doc/source/data/api/api.rst | 11 +- .../{dataset_context.rst => data_context.rst} | 2 +- ...dataset_iterator.rst => data_iterator.rst} | 0 doc/source/data/api/data_representations.rst | 6 +- doc/source/data/api/dataset.rst | 165 --------- doc/source/data/api/dataset_pipeline.rst | 99 ------ doc/source/data/api/datastream.rst | 165 +++++++++ doc/source/data/api/from_other_data_libs.rst | 64 ++-- .../{grouped_dataset.rst => grouped_data.rst} | 20 +- doc/source/data/api/input_output.rst | 30 +- doc/source/data/api/random_access_dataset.rst | 4 +- ...datasets.rst => consuming-datastreams.rst} | 70 ++-- ...-datasets.rst => creating-datastreams.rst} | 314 +++++++++--------- doc/source/data/custom-datasource.rst | 26 +- ...taset-internals.rst => data-internals.rst} | 94 +++--- ...or-support.rst => data-tensor-support.rst} | 52 +-- doc/source/data/data.rst | 200 +++++++++++ doc/source/data/dataset.rst | 210 ------------ ...g_datasets.py => consuming_datastreams.py} | 25 +- ...ng_datasets.py => creating_datastreams.py} | 92 ++--- ...ed.py => creating_datastreams_untested.py} | 22 +- doc/source/data/doc_code/key_concepts.py | 8 +- doc/source/data/doc_code/quick_start.py | 16 +- ...ving_datasets.py => saving_datastreams.py} | 10 +- doc/source/data/doc_code/tensor.py | 76 ++--- ...atasets.py => transforming_datastreams.py} | 118 ++++--- .../data/examples/advanced-pipelines.rst | 112 ------- doc/source/data/examples/batch_training.ipynb | 68 ++-- doc/source/data/examples/index.rst | 26 +- .../examples/nyc_taxi_basic_processing.ipynb | 66 ++-- doc/source/data/examples/ocr_example.ipynb | 28 +- doc/source/data/examples/random-access.rst | 10 +- doc/source/data/faq.rst | 181 +++++----- doc/source/data/getting-started.rst | 83 ++--- doc/source/data/glossary.rst | 85 +++-- doc/source/data/images/dataset-arch.svg | 1 - doc/source/data/images/dataset-compute-1.png | Bin 38712 -> 0 bytes doc/source/data/images/dataset-loading-1.png | Bin 62699 -> 0 bytes doc/source/data/images/dataset.svg | 1 - doc/source/data/images/datastream-arch.svg | 1 + .../data/images/datastream-loading-1.png | Bin 0 -> 126100 bytes .../{dataset-map.svg => datastream-map.svg} | 0 .../{dataset-read.svg => datastream-read.svg} | 0 ...set-shuffle.svg => datastream-shuffle.svg} | 0 doc/source/data/images/datastream.svg | 1 + doc/source/data/images/stream-example.png | Bin 0 -> 106561 bytes doc/source/data/integrations.rst | 2 +- doc/source/data/key-concepts.rst | 85 +++-- doc/source/data/mars-on-ray.rst | 6 +- doc/source/data/performance-tips.rst | 110 +++--- doc/source/data/pipelining-compute.rst | 26 +- ...asets.rst => transforming-datastreams.rst} | 250 +++++++------- doc/source/data/user-guide.rst | 18 +- doc/source/index.md | 4 +- doc/source/ray-air/api/dataset-ingest.rst | 6 +- doc/source/ray-air/check-ingest.rst | 8 +- doc/source/ray-air/computer-vision.rst | 8 +- .../examples/analyze_tuning_results.ipynb | 4 +- ...ert_existing_pytorch_code_to_ray_air.ipynb | 6 +- .../convert_existing_tf_code_to_ray_air.ipynb | 8 +- .../ray-air/examples/feast_example.ipynb | 2 +- .../examples/gptj_batch_prediction.ipynb | 2 +- .../examples/gptj_deepspeed_fine_tuning.ipynb | 2 +- .../huggingface_text_classification.ipynb | 2 +- .../pytorch_resnet_batch_prediction.ipynb | 4 +- .../examples/pytorch_tabular_starter.py | 2 +- .../stablediffusion_batch_prediction.ipynb | 2 +- .../ray-air/examples/tf_tabular_starter.py | 4 +- .../examples/tfx_tabular_train_to_serve.ipynb | 4 +- .../ray-air/examples/torch_detection.ipynb | 8 +- .../examples/torch_image_example.ipynb | 2 +- .../examples/torch_incremental_learning.ipynb | 32 +- .../ray-air/examples/upload_to_comet_ml.ipynb | 2 +- .../ray-air/examples/upload_to_wandb.ipynb | 2 +- doc/source/ray-air/key-concepts.rst | 10 +- doc/source/ray-air/predictors.rst | 6 +- doc/source/ray-air/preprocessors.rst | 6 +- doc/source/ray-air/trainers.rst | 2 +- doc/source/ray-air/tuner.rst | 2 +- .../datasets_train/datasets_train.py | 12 +- .../ray-core/examples/batch_prediction.ipynb | 2 +- doc/source/ray-core/patterns/pipelining.rst | 2 +- doc/source/ray-overview/getting-started.md | 22 +- doc/source/ray-overview/index.md | 4 +- doc/source/ray-overview/learn-more.md | 2 +- doc/source/ray-overview/use-cases.rst | 4 +- doc/source/ray-references/glossary.rst | 6 +- doc/source/rllib/rllib-offline.rst | 16 +- doc/source/serve/tutorials/serve-ml-models.md | 2 +- .../templates/01_batch_inference/README.md | 8 +- .../01_batch_inference/batch_inference.ipynb | 10 +- doc/source/train/dl_guide.rst | 10 +- .../lightning/lightning_cola_advanced.ipynb | 12 +- .../lightning/lightning_mnist_example.ipynb | 2 +- doc/source/train/getting-started.rst | 10 +- doc/source/train/key-concepts.rst | 4 +- doc/source/train/train.rst | 2 +- .../tune/tutorials/tune-fault-tolerance.rst | 4 +- .../tutorials/tune_get_data_in_and_out.md | 2 +- python/ray/air/examples/dreambooth/dataset.py | 4 +- python/ray/air/tests/test_api.py | 2 +- .../_internal/delegating_block_builder.py | 2 +- .../_internal/planner/plan_from_numpy_op.py | 2 +- python/ray/data/_internal/table_block.py | 2 +- python/ray/data/block.py | 13 +- .../ray/data/datasource/file_meta_provider.py | 2 +- .../ray/data/datasource/numpy_datasource.py | 2 +- python/ray/data/datastream.py | 16 +- .../{grouped_dataset.py => grouped_data.py} | 0 python/ray/data/read_api.py | 14 +- .../data/tests/test_execution_optimizer.py | 4 +- python/ray/data/tests/test_huggingface.py | 2 +- python/ray/data/tests/test_strict_mode.py | 2 +- .../ray/train/_internal/backend_executor.py | 2 +- python/ray/train/_internal/dataset_spec.py | 22 +- python/ray/train/_internal/session.py | 4 +- python/ray/train/base_trainer.py | 23 +- python/ray/train/batch_predictor.py | 36 +- python/ray/train/data_parallel_trainer.py | 4 +- .../pytorch/torch_regression_example.py | 4 +- .../tf/tensorflow_autoencoder_example.py | 2 +- .../tf/tensorflow_regression_example.py | 4 +- python/ray/train/gbdt_trainer.py | 4 +- python/ray/train/horovod/horovod_trainer.py | 4 +- .../train/huggingface/_huggingface_utils.py | 6 +- .../accelerate/accelerate_trainer.py | 4 +- .../train/huggingface/huggingface_trainer.py | 4 +- python/ray/train/lightgbm/lightgbm_trainer.py | 2 +- .../ray/train/lightning/lightning_trainer.py | 13 +- python/ray/train/mosaic/mosaic_trainer.py | 2 +- python/ray/train/rl/rl_trainer.py | 2 +- python/ray/train/session.py | 2 +- python/ray/train/sklearn/sklearn_trainer.py | 2 +- .../train/tensorflow/tensorflow_trainer.py | 4 +- .../ray/train/tensorflow/train_loop_utils.py | 2 +- python/ray/train/tests/test_base_trainer.py | 2 +- .../ray/train/tests/test_xgboost_trainer.py | 2 +- python/ray/train/torch/torch_trainer.py | 4 +- python/ray/train/xgboost/xgboost_trainer.py | 2 +- python/ray/tune/execution/experiment_state.py | 2 +- .../impl/out_of_band_serialize_dataset.py | 8 +- python/ray/tune/tests/test_trial_runner_3.py | 2 +- python/ray/tune/tuner.py | 2 +- python/ray/util/actor_group.py | 2 +- .../dataset/operator_fusion_benchmark.py | 4 +- rllib/offline/dataset_reader.py | 2 +- rllib/offline/feature_importance.py | 2 +- rllib/offline/tests/test_dataset_reader.py | 6 +- 153 files changed, 1663 insertions(+), 1948 deletions(-) rename doc/source/data/api/{dataset_context.rst => data_context.rst} (90%) rename doc/source/data/api/{dataset_iterator.rst => data_iterator.rst} (100%) delete mode 100644 doc/source/data/api/dataset.rst delete mode 100644 doc/source/data/api/dataset_pipeline.rst create mode 100644 doc/source/data/api/datastream.rst rename doc/source/data/api/{grouped_dataset.rst => grouped_data.rst} (57%) rename doc/source/data/{consuming-datasets.rst => consuming-datastreams.rst} (54%) rename doc/source/data/{creating-datasets.rst => creating-datastreams.rst} (70%) rename doc/source/data/{dataset-internals.rst => data-internals.rst} (75%) rename doc/source/data/{dataset-tensor-support.rst => data-tensor-support.rst} (77%) create mode 100644 doc/source/data/data.rst delete mode 100644 doc/source/data/dataset.rst rename doc/source/data/doc_code/{consuming_datasets.py => consuming_datastreams.py} (75%) rename doc/source/data/doc_code/{creating_datasets.py => creating_datastreams.py} (82%) rename doc/source/data/doc_code/{creating_datasets_untested.py => creating_datastreams_untested.py} (79%) rename doc/source/data/doc_code/{saving_datasets.py => saving_datastreams.py} (93%) rename doc/source/data/doc_code/{transforming_datasets.py => transforming_datastreams.py} (89%) delete mode 100644 doc/source/data/examples/advanced-pipelines.rst delete mode 100644 doc/source/data/images/dataset-arch.svg delete mode 100644 doc/source/data/images/dataset-compute-1.png delete mode 100644 doc/source/data/images/dataset-loading-1.png delete mode 100644 doc/source/data/images/dataset.svg create mode 100644 doc/source/data/images/datastream-arch.svg create mode 100644 doc/source/data/images/datastream-loading-1.png rename doc/source/data/images/{dataset-map.svg => datastream-map.svg} (100%) rename doc/source/data/images/{dataset-read.svg => datastream-read.svg} (100%) rename doc/source/data/images/{dataset-shuffle.svg => datastream-shuffle.svg} (100%) create mode 100644 doc/source/data/images/datastream.svg create mode 100644 doc/source/data/images/stream-example.png rename doc/source/data/{transforming-datasets.rst => transforming-datastreams.rst} (67%) rename python/ray/data/{grouped_dataset.py => grouped_data.py} (100%) diff --git a/README.rst b/README.rst index c2a389f80617..3e2eaab5109f 100644 --- a/README.rst +++ b/README.rst @@ -23,7 +23,7 @@ Ray is a unified framework for scaling AI and Python applications. Ray consists Learn more about `Ray AIR`_ and its libraries: -- `Datasets`_: Distributed Data Preprocessing +- `Data`_: Distributed ML Preprocessing - `Train`_: Distributed Training - `Tune`_: Scalable Hyperparameter Tuning - `RLlib`_: Scalable Reinforcement Learning @@ -44,7 +44,7 @@ Install Ray with: ``pip install ray``. For nightly wheels, see the `Installation page `__. .. _`Serve`: https://docs.ray.io/en/latest/serve/index.html -.. _`Datasets`: https://docs.ray.io/en/latest/data/dataset.html +.. _`Data`: https://docs.ray.io/en/latest/data/data.html .. _`Workflow`: https://docs.ray.io/en/latest/workflows/concepts.html .. _`Train`: https://docs.ray.io/en/latest/train/train.html .. _`Tune`: https://docs.ray.io/en/latest/tune/index.html diff --git a/doc/BUILD b/doc/BUILD index af4044126013..30a81aa1474e 100644 --- a/doc/BUILD +++ b/doc/BUILD @@ -223,7 +223,7 @@ py_test_run_all_subdirectory( include = ["source/data/doc_code/*.py"], exclude = [ "source/ray-air/doc_code/predictors.py", - "source/data/doc_code/creating_datasets_untested.py" + "source/data/doc_code/creating_datastreams_untested.py" ], extra_srcs = [], tags = ["exclusive", "team:core"], diff --git a/doc/source/_static/js/custom.js b/doc/source/_static/js/custom.js index ff4f883b65a3..53369e1c855d 100644 --- a/doc/source/_static/js/custom.js +++ b/doc/source/_static/js/custom.js @@ -40,7 +40,7 @@ document.addEventListener("DOMContentLoaded", function() { "Ray Clusters", "Deploying on Kubernetes", "Deploying on VMs", "Applications Guide", "Ray Cluster Management API", "Ray AI Runtime (AIR)", "Ray AIR API", - "Ray Data", "Ray Datasets API", "Integrations", + "Ray Data", "Ray Data API", "Integrations", "Ray Train", "Ray Train API", "Ray Tune", "Ray Tune Examples", "Ray Tune API", "Ray Serve", "Ray Serve API", diff --git a/doc/source/_static/js/top-navigation.js b/doc/source/_static/js/top-navigation.js index 0a0a5adf5e57..bbb7a60ece8b 100644 --- a/doc/source/_static/js/top-navigation.js +++ b/doc/source/_static/js/top-navigation.js @@ -77,7 +77,7 @@ librariesMenu.innerHTML = "Libraries" + downCaret + "" librariesList = document.createElement("ul") librariesList.innerHTML += "
  • Ray CoreScale general Python applications
  • " librariesList.innerHTML += "
  • Ray AIRScale AI applications
  • " -librariesList.innerHTML += "
  • Ray DatasetsScale data ingest and preprocessing
  • " +librariesList.innerHTML += "
  • Ray DataScale data ingest and preprocessing
  • " librariesList.innerHTML += "
  • Ray TrainScale machine learning training
  • " librariesList.innerHTML += "
  • Ray TuneScale hyperparameter tuning
  • " librariesList.innerHTML += "
  • Ray ServeScale model serving
  • " diff --git a/doc/source/_toc.yml b/doc/source/_toc.yml index 0c87cc8cf76e..c9afcf13b8da 100644 --- a/doc/source/_toc.yml +++ b/doc/source/_toc.yml @@ -84,7 +84,7 @@ parts: - file: ray-air/api/api - file: ray-air/benchmarks - - file: data/dataset + - file: data/data title: Ray Data sections: - file: data/getting-started @@ -95,10 +95,9 @@ parts: - file: data/examples/nyc_taxi_basic_processing title: Processing the NYC taxi dataset - file: data/examples/batch_training - title: Batch Training with Ray Datasets + title: Batch Training with Ray Data - file: data/examples/ocr_example - title: Scaling OCR with Ray Datasets - - file: data/examples/advanced-pipelines + title: Scaling OCR with Ray Data - file: data/examples/random-access - file: data/faq - file: data/api/api diff --git a/doc/source/data/api/api.rst b/doc/source/data/api/api.rst index 3d0c571222e4..b1cd7d4ddf33 100644 --- a/doc/source/data/api/api.rst +++ b/doc/source/data/api/api.rst @@ -1,18 +1,17 @@ .. _data-api: -Ray Datasets API +Ray Data API ================ .. toctree:: :maxdepth: 2 input_output.rst - dataset.rst - dataset_iterator.rst - dataset_pipeline.rst + datastream.rst + data_iterator.rst execution_options.rst - grouped_dataset.rst - dataset_context.rst + grouped_data.rst + data_context.rst data_representations.rst random_access_dataset.rst utility.rst diff --git a/doc/source/data/api/dataset_context.rst b/doc/source/data/api/data_context.rst similarity index 90% rename from doc/source/data/api/dataset_context.rst rename to doc/source/data/api/data_context.rst index 8d1ae2c112e4..2a006b4a2619 100644 --- a/doc/source/data/api/dataset_context.rst +++ b/doc/source/data/api/data_context.rst @@ -1,4 +1,4 @@ -.. _dataset-context-api: +.. _data-context-api: DataContext API =============== diff --git a/doc/source/data/api/dataset_iterator.rst b/doc/source/data/api/data_iterator.rst similarity index 100% rename from doc/source/data/api/dataset_iterator.rst rename to doc/source/data/api/data_iterator.rst diff --git a/doc/source/data/api/data_representations.rst b/doc/source/data/api/data_representations.rst index 6d731af919bf..7e7ce12de71e 100644 --- a/doc/source/data/api/data_representations.rst +++ b/doc/source/data/api/data_representations.rst @@ -1,7 +1,7 @@ .. _data-representations: -Data Representations -==================== +Data Representations (internal) +=============================== .. currentmodule:: ray.data @@ -34,7 +34,7 @@ Row API row.TableRow -.. _dataset-tensor-extension-api: +.. _datastream-tensor-extension-api: Tensor Column Extension API --------------------------- diff --git a/doc/source/data/api/dataset.rst b/doc/source/data/api/dataset.rst deleted file mode 100644 index af6b0dbcebe8..000000000000 --- a/doc/source/data/api/dataset.rst +++ /dev/null @@ -1,165 +0,0 @@ -.. _dataset-api: - -Dataset API -=========== - -.. currentmodule:: ray.data - -Constructor ------------ - -.. autosummary:: - :toctree: doc/ - - Dataset - -Basic Transformations ---------------------- - -.. autosummary:: - :toctree: doc/ - - Dataset.map - Dataset.map_batches - Dataset.flat_map - Dataset.filter - Dataset.add_column - Dataset.drop_columns - Dataset.select_columns - Dataset.random_sample - Dataset.limit - -Sorting, Shuffling, Repartitioning ----------------------------------- - -.. autosummary:: - :toctree: doc/ - - Dataset.sort - Dataset.random_shuffle - Dataset.randomize_block_order - Dataset.repartition - -Splitting and Merging Datasets ------------------------------- - -.. autosummary:: - :toctree: doc/ - - Dataset.split - Dataset.split_at_indices - Dataset.split_proportionately - Dataset.streaming_split - Dataset.train_test_split - Dataset.union - Dataset.zip - -Grouped and Global Aggregations -------------------------------- - -.. autosummary:: - :toctree: doc/ - - Dataset.groupby - Dataset.aggregate - Dataset.sum - Dataset.min - Dataset.max - Dataset.mean - Dataset.std - -Converting to Pipeline ----------------------- - -.. autosummary:: - :toctree: doc/ - - Dataset.repeat - Dataset.window - -Consuming Datasets ------------------- - -.. autosummary:: - :toctree: doc/ - - Dataset.show - Dataset.take - Dataset.take_batch - Dataset.take_all - Dataset.iterator - Dataset.iter_rows - Dataset.iter_batches - Dataset.iter_torch_batches - Dataset.iter_tf_batches - -I/O and Conversion ------------------- - -.. autosummary:: - :toctree: doc/ - - Dataset.write_parquet - Dataset.write_json - Dataset.write_csv - Dataset.write_numpy - Dataset.write_tfrecords - Dataset.write_webdataset - Dataset.write_mongo - Dataset.write_datasource - Dataset.to_torch - Dataset.to_tf - Dataset.to_dask - Dataset.to_mars - Dataset.to_modin - Dataset.to_spark - Dataset.to_pandas - Dataset.to_pandas_refs - Dataset.to_numpy_refs - Dataset.to_arrow_refs - Dataset.to_random_access_dataset - -Inspecting Metadata -------------------- - -.. autosummary:: - :toctree: doc/ - - Dataset.count - Dataset.schema - Dataset.default_batch_format - Dataset.num_blocks - Dataset.size_bytes - Dataset.input_files - Dataset.stats - Dataset.get_internal_block_refs - -Execution ---------- - -.. autosummary:: - :toctree: doc/ - - Dataset.materialize - -Serialization -------------- - -.. autosummary:: - :toctree: doc/ - - Dataset.has_serializable_lineage - Dataset.serialize_lineage - Dataset.deserialize_lineage - -Internals ---------- - -.. autosummary:: - :toctree: doc/ - - Dataset.__init__ - Dataset.dataset_format - Dataset.fully_executed - Dataset.is_fully_executed - Dataset.lazy diff --git a/doc/source/data/api/dataset_pipeline.rst b/doc/source/data/api/dataset_pipeline.rst deleted file mode 100644 index 70919f562253..000000000000 --- a/doc/source/data/api/dataset_pipeline.rst +++ /dev/null @@ -1,99 +0,0 @@ -.. _dataset-pipeline-api: - -DatasetPipeline API -=================== - -.. currentmodule:: ray.data - -Constructor ------------ - -.. autosummary:: - :toctree: doc/ - - DatasetPipeline - -Basic Transformations ---------------------- - -.. autosummary:: - :toctree: doc/ - - DatasetPipeline.map - DatasetPipeline.map_batches - DatasetPipeline.flat_map - DatasetPipeline.foreach_window - DatasetPipeline.filter - DatasetPipeline.add_column - DatasetPipeline.drop_columns - DatasetPipeline.select_columns - -Sorting, Shuffling, Repartitioning ----------------------------------- - -.. autosummary:: - :toctree: doc/ - - DatasetPipeline.sort_each_window - DatasetPipeline.random_shuffle_each_window - DatasetPipeline.randomize_block_order_each_window - DatasetPipeline.repartition_each_window - -Splitting DatasetPipelines --------------------------- - -.. autosummary:: - :toctree: doc/ - - DatasetPipeline.split - DatasetPipeline.split_at_indices - -Creating DatasetPipelines -------------------------- - -.. autosummary:: - :toctree: doc/ - - DatasetPipeline.repeat - DatasetPipeline.rewindow - DatasetPipeline.from_iterable - -Consuming DatasetPipelines --------------------------- - -.. autosummary:: - :toctree: doc/ - - DatasetPipeline.show - DatasetPipeline.show_windows - DatasetPipeline.take - DatasetPipeline.take_all - DatasetPipeline.iterator - DatasetPipeline.iter_rows - DatasetPipeline.iter_batches - DatasetPipeline.iter_torch_batches - DatasetPipeline.iter_tf_batches - -I/O and Conversion ------------------- - -.. autosummary:: - :toctree: doc/ - - DatasetPipeline.write_json - DatasetPipeline.write_csv - DatasetPipeline.write_parquet - DatasetPipeline.write_datasource - DatasetPipeline.to_tf - DatasetPipeline.to_torch - -Inspecting Metadata -------------------- - -.. autosummary:: - :toctree: doc/ - - DatasetPipeline.schema - DatasetPipeline.count - DatasetPipeline.stats - DatasetPipeline.sum diff --git a/doc/source/data/api/datastream.rst b/doc/source/data/api/datastream.rst new file mode 100644 index 000000000000..1963ce142f2b --- /dev/null +++ b/doc/source/data/api/datastream.rst @@ -0,0 +1,165 @@ +.. _datastream-api: + +Datastream API +============== + +.. currentmodule:: ray.data + +Constructor +----------- + +.. autosummary:: + :toctree: doc/ + + Datastream + +Basic Transformations +--------------------- + +.. autosummary:: + :toctree: doc/ + + Datastream.map + Datastream.map_batches + Datastream.flat_map + Datastream.filter + Datastream.add_column + Datastream.drop_columns + Datastream.select_columns + Datastream.random_sample + Datastream.limit + +Sorting, Shuffling, Repartitioning +---------------------------------- + +.. autosummary:: + :toctree: doc/ + + Datastream.sort + Datastream.random_shuffle + Datastream.randomize_block_order + Datastream.repartition + +Splitting and Merging Datastreams +--------------------------------- + +.. autosummary:: + :toctree: doc/ + + Datastream.split + Datastream.split_at_indices + Datastream.split_proportionately + Datastream.streaming_split + Datastream.train_test_split + Datastream.union + Datastream.zip + +Grouped and Global Aggregations +------------------------------- + +.. autosummary:: + :toctree: doc/ + + Datastream.groupby + Datastream.aggregate + Datastream.sum + Datastream.min + Datastream.max + Datastream.mean + Datastream.std + +Converting to Pipeline +---------------------- + +.. autosummary:: + :toctree: doc/ + + Datastream.repeat + Datastream.window + +Consuming Datastreams +--------------------- + +.. autosummary:: + :toctree: doc/ + + Datastream.show + Datastream.take + Datastream.take_batch + Datastream.take_all + Datastream.iterator + Datastream.iter_rows + Datastream.iter_batches + Datastream.iter_torch_batches + Datastream.iter_tf_batches + +I/O and Conversion +------------------ + +.. autosummary:: + :toctree: doc/ + + Datastream.write_parquet + Datastream.write_json + Datastream.write_csv + Datastream.write_numpy + Datastream.write_tfrecords + Datastream.write_webdataset + Datastream.write_mongo + Datastream.write_datasource + Datastream.to_torch + Datastream.to_tf + Datastream.to_dask + Datastream.to_mars + Datastream.to_modin + Datastream.to_spark + Datastream.to_pandas + Datastream.to_pandas_refs + Datastream.to_numpy_refs + Datastream.to_arrow_refs + Datastream.to_random_access_dataset + +Inspecting Metadata +------------------- + +.. autosummary:: + :toctree: doc/ + + Datastream.count + Datastream.schema + Datastream.default_batch_format + Datastream.num_blocks + Datastream.size_bytes + Datastream.input_files + Datastream.stats + Datastream.get_internal_block_refs + +Execution +--------- + +.. autosummary:: + :toctree: doc/ + + Datastream.materialize + +Serialization +------------- + +.. autosummary:: + :toctree: doc/ + + Datastream.has_serializable_lineage + Datastream.serialize_lineage + Datastream.deserialize_lineage + +Internals +--------- + +.. autosummary:: + :toctree: doc/ + + Datastream.__init__ + Datastream.dataset_format + Datastream.fully_executed + Datastream.is_fully_executed + Datastream.lazy diff --git a/doc/source/data/api/from_other_data_libs.rst b/doc/source/data/api/from_other_data_libs.rst index 5fb1d540074b..054119c874b3 100644 --- a/doc/source/data/api/from_other_data_libs.rst +++ b/doc/source/data/api/from_other_data_libs.rst @@ -3,16 +3,16 @@ API Guide for Users from Other Data Libraries ============================================= -Ray Datasets is a data loading and preprocessing library for ML. It shares certain +Ray Data is a data loading and preprocessing library for ML. It shares certain similarities with other ETL data processing libraries, but also has its own focus. In this API guide, we will provide API mappings for users who come from those data -libraries, so you can quickly map what you may already know to Ray Datasets APIs. +libraries, so you can quickly map what you may already know to Ray Data APIs. .. note:: - This is meant to map APIs that perform comparable but not necessarily identical operations. Please check the API reference for exact semantics and usage. - - This list may not be exhaustive: Ray Datasets is not a traditional ETL data processing library, so not all data processing APIs can map to Datasets. + - This list may not be exhaustive: Ray Data is not a traditional ETL data processing library, so not all data processing APIs can map to Datastreams. In addition, we try to focus on common APIs or APIs that are less obvious to see a connection. .. _api-guide-for-pandas-users: @@ -20,69 +20,69 @@ libraries, so you can quickly map what you may already know to Ray Datasets APIs For Pandas Users ---------------- -.. list-table:: Pandas DataFrame vs. Ray Datasets APIs +.. list-table:: Pandas DataFrame vs. Ray Data APIs :header-rows: 1 * - Pandas DataFrame API - - Ray Datasets API + - Ray Data API * - df.head() - - :meth:`ds.show() `, :meth:`ds.take() `, or :meth:`ds.take_batch() ` + - :meth:`ds.show() `, :meth:`ds.take() `, or :meth:`ds.take_batch() ` * - df.dtypes - - :meth:`ds.schema() ` + - :meth:`ds.schema() ` * - len(df) or df.shape[0] - - :meth:`ds.count() ` + - :meth:`ds.count() ` * - df.truncate() - - :meth:`ds.limit() ` + - :meth:`ds.limit() ` * - df.iterrows() - - :meth:`ds.iter_rows() ` + - :meth:`ds.iter_rows() ` * - df.drop() - - :meth:`ds.drop_columns() ` + - :meth:`ds.drop_columns() ` * - df.transform() - - :meth:`ds.map_batches() ` or :meth:`ds.map() ` + - :meth:`ds.map_batches() ` or :meth:`ds.map() ` * - df.groupby() - - :meth:`ds.groupby() ` + - :meth:`ds.groupby() ` * - df.groupby().apply() - - :meth:`ds.groupby().map_groups() ` + - :meth:`ds.groupby().map_groups() ` * - df.sample() - - :meth:`ds.random_sample() ` + - :meth:`ds.random_sample() ` * - df.sort_values() - - :meth:`ds.sort() ` + - :meth:`ds.sort() ` * - df.append() - - :meth:`ds.union() ` + - :meth:`ds.union() ` * - df.aggregate() - - :meth:`ds.aggregate() ` + - :meth:`ds.aggregate() ` * - df.min() - - :meth:`ds.min() ` + - :meth:`ds.min() ` * - df.max() - - :meth:`ds.max() ` + - :meth:`ds.max() ` * - df.sum() - - :meth:`ds.sum() ` + - :meth:`ds.sum() ` * - df.mean() - - :meth:`ds.mean() ` + - :meth:`ds.mean() ` * - df.std() - - :meth:`ds.std() ` + - :meth:`ds.std() ` .. _api-guide-for-pyarrow-users: For PyArrow Users ----------------- -.. list-table:: PyArrow Table vs. Ray Datasets APIs +.. list-table:: PyArrow Table vs. Ray Data APIs :header-rows: 1 * - PyArrow Table API - - Ray Datasets API + - Ray Data API * - pa.Table.schema - - :meth:`ds.schema() ` + - :meth:`ds.schema() ` * - pa.Table.num_rows - - :meth:`ds.count() ` + - :meth:`ds.count() ` * - pa.Table.filter() - - :meth:`ds.filter() ` + - :meth:`ds.filter() ` * - pa.Table.drop() - - :meth:`ds.drop_columns() ` + - :meth:`ds.drop_columns() ` * - pa.Table.add_column() - - :meth:`ds.add_column() ` + - :meth:`ds.add_column() ` * - pa.Table.groupby() - - :meth:`ds.groupby() ` + - :meth:`ds.groupby() ` * - pa.Table.sort_by() - - :meth:`ds.sort() ` + - :meth:`ds.sort() ` diff --git a/doc/source/data/api/grouped_dataset.rst b/doc/source/data/api/grouped_data.rst similarity index 57% rename from doc/source/data/api/grouped_dataset.rst rename to doc/source/data/api/grouped_data.rst index afcfb498d7f6..2b2e74721477 100644 --- a/doc/source/data/api/grouped_dataset.rst +++ b/doc/source/data/api/grouped_data.rst @@ -5,7 +5,7 @@ GroupedData API .. currentmodule:: ray.data -GroupedData objects are returned by groupby call: Dataset.groupby(). +GroupedData objects are returned by groupby call: Datastream.groupby(). Constructor ----------- @@ -13,7 +13,7 @@ Constructor .. autosummary:: :toctree: doc/ - grouped_dataset.GroupedData + grouped_data.GroupedData Computations / Descriptive Stats -------------------------------- @@ -21,12 +21,12 @@ Computations / Descriptive Stats .. autosummary:: :toctree: doc/ - grouped_dataset.GroupedData.count - grouped_dataset.GroupedData.sum - grouped_dataset.GroupedData.min - grouped_dataset.GroupedData.max - grouped_dataset.GroupedData.mean - grouped_dataset.GroupedData.std + grouped_data.GroupedData.count + grouped_data.GroupedData.sum + grouped_data.GroupedData.min + grouped_data.GroupedData.max + grouped_data.GroupedData.mean + grouped_data.GroupedData.std Function Application -------------------- @@ -34,8 +34,8 @@ Function Application .. autosummary:: :toctree: doc/ - grouped_dataset.GroupedData.aggregate - grouped_dataset.GroupedData.map_groups + grouped_data.GroupedData.aggregate + grouped_data.GroupedData.map_groups Aggregate Function ------------------ diff --git a/doc/source/data/api/input_output.rst b/doc/source/data/api/input_output.rst index 281d486911bb..2ebc39c22506 100644 --- a/doc/source/data/api/input_output.rst +++ b/doc/source/data/api/input_output.rst @@ -31,7 +31,7 @@ Parquet read_parquet read_parquet_bulk - Dataset.write_parquet + Datastream.write_parquet CSV --- @@ -40,7 +40,7 @@ CSV :toctree: doc/ read_csv - Dataset.write_csv + Datastream.write_csv JSON ---- @@ -49,7 +49,7 @@ JSON :toctree: doc/ read_json - Dataset.write_json + Datastream.write_json Text ---- @@ -82,7 +82,7 @@ TFRecords :toctree: doc/ read_tfrecords - Dataset.write_tfrecords + Datastream.write_tfrecords Pandas @@ -93,8 +93,8 @@ Pandas from_pandas from_pandas_refs - Dataset.to_pandas - Dataset.to_pandas_refs + Datastream.to_pandas + Datastream.to_pandas_refs NumPy ----- @@ -105,8 +105,8 @@ NumPy read_numpy from_numpy from_numpy_refs - Dataset.write_numpy - Dataset.to_numpy_refs + Datastream.write_numpy + Datastream.to_numpy_refs Arrow ----- @@ -116,7 +116,7 @@ Arrow from_arrow from_arrow_refs - Dataset.to_arrow_refs + Datastream.to_arrow_refs MongoDB ------- @@ -125,7 +125,7 @@ MongoDB :toctree: doc/ read_mongo - Dataset.write_mongo + Datastream.write_mongo SQL Databases ------------- @@ -142,7 +142,7 @@ Dask :toctree: doc/ from_dask - Dataset.to_dask + Datastream.to_dask Spark ----- @@ -151,7 +151,7 @@ Spark :toctree: doc/ from_spark - Dataset.to_spark + Datastream.to_spark Modin ----- @@ -160,7 +160,7 @@ Modin :toctree: doc/ from_modin - Dataset.to_modin + Datastream.to_modin Mars ---- @@ -169,7 +169,7 @@ Mars :toctree: doc/ from_mars - Dataset.to_mars + Datastream.to_mars Torch ----- @@ -212,7 +212,7 @@ Datasource API :toctree: doc/ read_datasource - Dataset.write_datasource + Datastream.write_datasource Datasource ReadTask datasource.Reader diff --git a/doc/source/data/api/random_access_dataset.rst b/doc/source/data/api/random_access_dataset.rst index e3a171af18d2..6bfbdba1585c 100644 --- a/doc/source/data/api/random_access_dataset.rst +++ b/doc/source/data/api/random_access_dataset.rst @@ -1,7 +1,7 @@ .. _random-access-dataset-api: -(Experimental) RandomAccessDataset API -====================================== +RandomAccessDataset (experimental) +================================== .. currentmodule:: ray.data diff --git a/doc/source/data/consuming-datasets.rst b/doc/source/data/consuming-datastreams.rst similarity index 54% rename from doc/source/data/consuming-datasets.rst rename to doc/source/data/consuming-datastreams.rst index bd67e078eb4b..be61d82f016f 100644 --- a/doc/source/data/consuming-datasets.rst +++ b/doc/source/data/consuming-datastreams.rst @@ -1,10 +1,10 @@ -.. _consuming_datasets: +.. _consuming_datastreams: -================== -Consuming Datasets -================== +===================== +Consuming Datastreams +===================== -The data underlying a ``Dataset`` can be consumed in several ways: +The data underlying a ``Datastream`` can be consumed in several ways: * Retrieving a limited prefix of rows. * Iterating over rows and batches. @@ -13,46 +13,46 @@ The data underlying a ``Dataset`` can be consumed in several ways: Retrieving a limited set of rows ================================ -A limited set of rows can be retrieved from a ``Dataset`` via the -:meth:`ds.take() ` or :meth:`ds.take_batch() ` -APIs, and :meth:`ds.show() `, for printing a limited set of rows. These +A limited set of rows can be retrieved from a ``Datastream`` via the +:meth:`ds.take() ` or :meth:`ds.take_batch() ` +APIs, and :meth:`ds.show() `, for printing a limited set of rows. These methods are convenient for quickly inspecting a subset (prefix) of rows. They have the benefit that, if used right after reading, they will only trigger more files to be read if needed to retrieve rows from that file; if inspecting a small prefix of rows, often only the first file will need to be read. -.. literalinclude:: ./doc_code/consuming_datasets.py +.. literalinclude:: ./doc_code/consuming_datastreams.py :language: python :start-after: __take_begin__ :end-before: __take_end__ -Iterating over Datasets -======================= +Iterating over Datastreams +========================== -Datasets can be consumed a row at a time using the -:meth:`ds.iter_rows() ` API +Datastreams can be consumed a row at a time using the +:meth:`ds.iter_rows() ` API -.. literalinclude:: ./doc_code/consuming_datasets.py +.. literalinclude:: ./doc_code/consuming_datastreams.py :language: python :start-after: __iter_rows_begin__ :end-before: __iter_rows_end__ or a batch at a time using the -:meth:`ds.iter_batches() ` API, where you can specify +:meth:`ds.iter_batches() ` API, where you can specify batch size as well as the desired batch format. By default, the batch format is ``"default"``. For tabular data, the default format is a Pandas DataFrame; for Python objects, it's a list. -.. literalinclude:: ./doc_code/consuming_datasets.py +.. literalinclude:: ./doc_code/consuming_datastreams.py :language: python :start-after: __iter_batches_begin__ :end-before: __iter_batches_end__ -Datasets can be passed to Ray tasks or actors and accessed by these iteration methods. -This does not incur a copy, since the blocks of the Dataset are passed by reference as Ray objects: +Datastreams can be passed to Ray tasks or actors and accessed by these iteration methods. +This does not incur a copy, since the blocks of the Datastream are passed by reference as Ray objects: -.. literalinclude:: ./doc_code/consuming_datasets.py +.. literalinclude:: ./doc_code/consuming_datastreams.py :language: python :start-after: __remote_iterators_begin__ :end-before: __remote_iterators_end__ @@ -61,62 +61,60 @@ This does not incur a copy, since the blocks of the Dataset are passed by refere Splitting Into and Consuming Shards =================================== -Datasets can be split up into disjoint sub-datasets, or shards. -Locality-aware splitting is supported if you pass in a list of actor handles to the -:meth:`ds.split() ` function along with the number of desired splits. +Datastreams can be split up into disjoint iterators, or shards. This is a common pattern useful for loading and sharding data between distributed training actors: .. note:: - If using :ref:`Ray Train ` for distributed training, you do not need to split the dataset; Ray - Train will automatically do locality-aware splitting into per-trainer shards for you! + If using :ref:`Ray Train ` for distributed training, you do not need to split the datastream; Ray + Train will automatically do locality-aware splitting into per-trainer shards for you. -.. literalinclude:: ./doc_code/consuming_datasets.py +.. literalinclude:: ./doc_code/consuming_datastreams.py :language: python :start-after: __split_begin__ :end-before: __split_end__ -.. _saving_datasets: +.. _saving_datastreams: -Saving Datasets -=============== +Saving Datastreams +================== -Datasets can be written to local or remote storage in the desired data format. +Datastreams can be written to local or remote storage in the desired data format. The supported formats include Parquet, CSV, JSON, NumPy. To control the number -of output files, you may use :meth:`ds.repartition() ` -to repartition the Dataset before writing out. +of output files, you may use :meth:`ds.repartition() ` +to repartition the Datastream before writing out. .. tabbed:: Parquet - .. literalinclude:: ./doc_code/saving_datasets.py + .. literalinclude:: ./doc_code/saving_datastreams.py :language: python :start-after: __write_parquet_begin__ :end-before: __write_parquet_end__ .. tabbed:: CSV - .. literalinclude:: ./doc_code/saving_datasets.py + .. literalinclude:: ./doc_code/saving_datastreams.py :language: python :start-after: __write_csv_begin__ :end-before: __write_csv_end__ .. tabbed:: JSON - .. literalinclude:: ./doc_code/saving_datasets.py + .. literalinclude:: ./doc_code/saving_datastreams.py :language: python :start-after: __write_json_begin__ :end-before: __write_json_end__ .. tabbed:: NumPy - .. literalinclude:: ./doc_code/saving_datasets.py + .. literalinclude:: ./doc_code/saving_datastreams.py :language: python :start-after: __write_numpy_begin__ :end-before: __write_numpy_end__ .. tabbed:: TFRecords - .. literalinclude:: ./doc_code/saving_datasets.py + .. literalinclude:: ./doc_code/saving_datastreams.py :language: python :start-after: __write_tfrecords_begin__ :end-before: __write_tfrecords_end__ diff --git a/doc/source/data/creating-datasets.rst b/doc/source/data/creating-datastreams.rst similarity index 70% rename from doc/source/data/creating-datasets.rst rename to doc/source/data/creating-datastreams.rst index ff17277fd400..8c180dd3eded 100644 --- a/doc/source/data/creating-datasets.rst +++ b/doc/source/data/creating-datastreams.rst @@ -1,22 +1,22 @@ -.. _creating_datasets: +.. _creating_datastreams: -================= -Creating Datasets -================= +==================== +Creating Datastreams +==================== -Ray :class:`Datasets ` can be created from: +:class:`Datastreams ` can be created from: * generated synthetic data, * local and distributed in-memory data, and * local and external storage systems (local disk, cloud storage, HDFS, etc.). -This guide surveys the many ways to create a ``Dataset``. If none of these meet your +This guide surveys the many ways to create a ``Datastream``. If none of these meet your needs, please reach out to us on `Discourse `__ or open a feature request on the `Ray GitHub repo `__, and check out -our :ref:`guide for implementing a custom Datasets datasource ` +our :ref:`guide for implementing a custom datasource ` if you're interested in rolling your own integration! -.. _dataset_generate_data: +.. _datastream_generate_data: ------------------------- Generating Synthetic Data @@ -24,40 +24,40 @@ Generating Synthetic Data .. tabbed:: Int Range - Create a ``Dataset`` from a range of integers. + Create a ``Datastream`` from a range of integers. - .. literalinclude:: ./doc_code/creating_datasets.py + .. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __gen_synth_int_range_begin__ :end-before: __gen_synth_int_range_end__ .. tabbed:: Tabular Range - Create an Arrow (tabular) ``Dataset`` from a range of integers, + Create an Arrow (tabular) ``Datastream`` from a range of integers, with a single column containing this integer range. - .. literalinclude:: ./doc_code/creating_datasets.py + .. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __gen_synth_tabular_range_begin__ :end-before: __gen_synth_tabular_range_end__ .. tabbed:: Tensor Range - Create a tensor dataset from a range of integers, packing this integer range into + Create a tensor datastream from a range of integers, packing this integer range into tensors of the provided shape. - .. literalinclude:: ./doc_code/creating_datasets.py + .. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __gen_synth_tensor_range_begin__ :end-before: __gen_synth_tensor_range_end__ -.. _dataset_reading_from_storage: +.. _datastream_reading_from_storage: -------------------------- Reading Files From Storage -------------------------- -Using the ``ray.data.read_*()`` APIs, Datasets can be created from files on local disk +Using the ``ray.data.read_*()`` APIs, Datastreams can be created from files on local disk or remote storage system such as S3, GCS, Azure Blob Storage, or HDFS. Any filesystem `supported by pyarrow `__ can be used to specify file locations, and many common file formats are supported: @@ -67,29 +67,29 @@ Each of these APIs take a path or list of paths to files or directories. Any dir provided will be walked in order to obtain concrete file paths, at which point all files will be read in parallel. -.. _dataset_supported_file_formats: +.. _datastream_supported_file_formats: Supported File Formats ====================== .. tabbed:: Parquet - Read Parquet files into a tabular ``Dataset``. The Parquet data will be read into + Read Parquet files into a tabular ``Datastream``. The Parquet data will be read into `Arrow Table `__ blocks. Although this simple example demonstrates reading a single file, note that - Datasets can also read directories of Parquet files. We also support reading partitioned + Datastreams can also read directories of Parquet files. We also support reading partitioned Parquet datasets with partition column values pulled from the file paths. - .. literalinclude:: ./doc_code/creating_datasets.py + .. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __read_parquet_begin__ :end-before: __read_parquet_end__ - Datasets' Parquet reader also supports projection and filter pushdown, allowing column + Datastreams' Parquet reader also supports projection and filter pushdown, allowing column selection and row filtering to be pushed down to the file scan. For column selection, unselected columns will never be read from the file. - .. literalinclude:: ./doc_code/creating_datasets.py + .. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __read_parquet_pushdown_begin__ :end-before: __read_parquet_pushdown_end__ @@ -98,13 +98,13 @@ Supported File Formats .. tabbed:: CSV - Read CSV files into a tabular ``Dataset``. The CSV data will be read into + Read CSV files into a tabular ``Datastream``. The CSV data will be read into `Arrow Table `__ blocks. Although this simple example demonstrates reading a single file, note that - Datasets can also read directories of CSV files, with one tabular block created + Datastreams can also read directories of CSV files, with one tabular block created per file. - .. literalinclude:: ./doc_code/creating_datasets.py + .. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __read_csv_begin__ :end-before: __read_csv_end__ @@ -113,15 +113,15 @@ Supported File Formats .. tabbed:: JSON - Read JSON files into a tabular ``Dataset``. The JSON data will be read into + Read JSON files into a tabular ``Datastream``. The JSON data will be read into `Arrow Table `__ blocks. Although this simple example demonstrates reading a single file, note that - Datasets can also read directories of JSON files, with one tabular block created + Datastreams can also read directories of JSON files, with one tabular block created per file. Currently, only newline-delimited JSON (NDJSON) is supported. - .. literalinclude:: ./doc_code/creating_datasets.py + .. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __read_json_begin__ :end-before: __read_json_end__ @@ -130,18 +130,18 @@ Supported File Formats .. tabbed:: NumPy - Read NumPy files into a tensor ``Dataset``. The NumPy ndarray data will be read into + Read NumPy files into a tensor ``Datastream``. The NumPy ndarray data will be read into single-column `Arrow Table `__ blocks using our :class:`tensor extension type `, treating the outermost ndarray dimension as the row dimension. See our - :ref:`tensor data guide ` for more information on working - with tensors in Datasets. Although this simple example demonstrates reading a single - file, note that Datasets can also read directories of NumPy files, with one tensor + :ref:`tensor data guide ` for more information on working + with tensors in Datastreams. Although this simple example demonstrates reading a single + file, note that Datastreams can also read directories of NumPy files, with one tensor block created per file. - .. literalinclude:: ./doc_code/creating_datasets.py + .. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __read_numpy_begin__ :end-before: __read_numpy_end__ @@ -150,11 +150,11 @@ Supported File Formats .. tabbed:: Text - Read text files into a ``Dataset``. Each line in each text file will be treated as a - row in the dataset, resulting in a list-of-strings block being created for each text + Read text files into a ``Datastream``. Each line in each text file will be treated as a + row in the datastream, resulting in a list-of-strings block being created for each text file. - .. literalinclude:: ./doc_code/creating_datasets.py + .. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __read_text_begin__ :end-before: __read_text_end__ @@ -163,31 +163,31 @@ Supported File Formats .. tabbed:: Images - Call :func:`~ray.data.read_images` to read images into a :class:`~ray.data.Dataset`. + Call :func:`~ray.data.read_images` to read images into a :class:`~ray.data.Datastream`. This function stores image data in single-column `Arrow Table `__ blocks using the :class:`tensor extension type `. - For more information on working with tensors in Datasets, read the - :ref:`tensor data guide `. + For more information on working with tensors in Datastreams, read the + :ref:`tensor data guide `. - .. literalinclude:: ./doc_code/creating_datasets.py + .. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __read_images_begin__ :end-before: __read_images_end__ .. tabbed:: Binary - Read binary files into a ``Dataset``. Each binary file will be treated as a single row + Read binary files into a ``Datastream``. Each binary file will be treated as a single row of opaque bytes. These bytes can be decoded into tensor, tabular, text, or any other - kind of data using :meth:`~ray.data.Dataset.map_batches` to apply a per-row decoding - :ref:`user-defined function `. + kind of data using :meth:`~ray.data.Datastream.map_batches` to apply a per-row decoding + :ref:`user-defined function `. - Although this simple example demonstrates reading a single file, note that Datasets + Although this simple example demonstrates reading a single file, note that Datastreams can also read directories of binary files, with one bytes block created per file. - .. literalinclude:: ./doc_code/creating_datasets.py + .. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __read_binary_begin__ :end-before: __read_binary_end__ @@ -197,18 +197,18 @@ Supported File Formats .. tabbed:: TFRecords Call :func:`~ray.data.read_tfrecords` to read TFRecord files into a tabular - :class:`~ray.data.Dataset`. + :class:`~ray.data.Datastream`. .. warning:: Only `tf.train.Example `_ records are supported. - .. literalinclude:: ./doc_code/creating_datasets.py + .. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __read_tfrecords_begin__ :end-before: __read_tfrecords_end__ -.. _dataset_reading_remote_storage: +.. _datastream_reading_remote_storage: Reading from Remote Storage @@ -238,7 +238,7 @@ are supported for each of these storage systems. configuration such as S3 credentials being pulled from the machine's environment (e.g. the ``AWS_ACCESS_KEY_ID`` and ``AWS_SECRET_ACCESS_KEY`` environment variables). - .. literalinclude:: ./doc_code/creating_datasets.py + .. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __read_parquet_s3_begin__ :end-before: __read_parquet_s3_end__ @@ -248,7 +248,7 @@ are supported for each of these storage systems. `S3FileSystem `__ instance to :func:`read_parquet() `. - .. literalinclude:: ./doc_code/creating_datasets_untested.py + .. literalinclude:: ./doc_code/creating_datastreams_untested.py :language: python :start-after: __read_parquet_s3_with_fs_begin__ :end-before: __read_parquet_s3_with_fs_end__ @@ -263,7 +263,7 @@ are supported for each of these storage systems. This example is not runnable as-is; you'll need to point it at your HDFS cluster/data. - .. literalinclude:: ./doc_code/creating_datasets_untested.py + .. literalinclude:: ./doc_code/creating_datastreams_untested.py :language: python :start-after: __read_parquet_hdfs_begin__ :end-before: __read_parquet_hdfs_end__ @@ -273,7 +273,7 @@ are supported for each of these storage systems. `__ instance to :func:`read_parquet() `. - .. literalinclude:: ./doc_code/creating_datasets_untested.py + .. literalinclude:: ./doc_code/creating_datastreams_untested.py :language: python :start-after: __read_parquet_hdfs_with_fs_begin__ :end-before: __read_parquet_hdfs_with_fs_end__ @@ -288,7 +288,7 @@ are supported for each of these storage systems. This example is not runnable as-is; you'll need to point it at your GCS bucket and configure your GCP project and credentials. - .. literalinclude:: ./doc_code/creating_datasets_untested.py + .. literalinclude:: ./doc_code/creating_datastreams_untested.py :language: python :start-after: __read_parquet_gcs_begin__ :end-before: __read_parquet_gcs_end__ @@ -297,7 +297,7 @@ are supported for each of these storage systems. To verify that your GCP project and credentials are set up, validate that the GCS `filesystem` has permissions to read the input `path`. - .. literalinclude:: ./doc_code/creating_datasets_untested.py + .. literalinclude:: ./doc_code/creating_datastreams_untested.py :language: python :start-after: __validate_parquet_gcs_begin__ :end-before: __validate_parquet_gcs_end__ @@ -310,7 +310,7 @@ are supported for each of these storage systems. `adlfs AzureBlobFileSystem `__, where the appropriate account name and account key can be specified. - .. literalinclude:: ./doc_code/creating_datasets_untested.py + .. literalinclude:: ./doc_code/creating_datastreams_untested.py :language: python :start-after: __read_parquet_az_begin__ :end-before: __read_parquet_az_end__ @@ -318,7 +318,7 @@ are supported for each of these storage systems. Reading from Local Storage ========================== -In Ray Datasets, users often read from remote storage systems as described above. In +In Ray Data, users often read from remote storage systems as described above. In some use cases, users may want to read from local storage. There are three ways to read from a local filesystem: @@ -342,180 +342,180 @@ from a local filesystem: Reading Compressed Files ======================== -Ray Datasets supports reading compressed files using the ``arrow_open_stream_args`` arg. +Ray Data supports reading compressed files using the ``arrow_open_stream_args`` arg. `Codecs supported by Arrow `__ -(bz2, brotli, gzip, lz4 or zstd) are compatible with Ray Datasets. +(bz2, brotli, gzip, lz4 or zstd) are compatible with Ray Data. For example: -.. literalinclude:: ./doc_code/creating_datasets.py +.. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __read_compressed_begin__ :end-before: __read_compressed_end__ -.. _dataset_from_in_memory_data: +.. _datastream_from_in_memory_data: ------------------- From In-Memory Data ------------------- -Datasets can be constructed from existing in-memory data. In addition to being able to -construct a ``Dataset`` from plain Python objects, Datasets also interoperates with popular +Datastreams can be constructed from existing in-memory data. In addition to being able to +construct a ``Datastream`` from plain Python objects, Datastreams also interoperates with popular single-node libraries (`Pandas `__, `NumPy `__, `Arrow `__) as well as distributed frameworks (:ref:`Dask `, :ref:`Spark `, :ref:`Modin `, :ref:`Mars `). -.. _dataset_from_in_memory_data_single_node: +.. _datastream_from_in_memory_data_single_node: From Single-Node Data Libraries =============================== -In this section, we demonstrate creating a ``Dataset`` from single-node in-memory data. +In this section, we demonstrate creating a ``Datastream`` from single-node in-memory data. .. tabbed:: Pandas - Create a ``Dataset`` from a Pandas DataFrame. This constructs a ``Dataset`` + Create a ``Datastream`` from a Pandas DataFrame. This constructs a ``Datastream`` backed by a single Pandas DataFrame block. - .. literalinclude:: ./doc_code/creating_datasets.py + .. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __from_pandas_begin__ :end-before: __from_pandas_end__ - We can also build a ``Dataset`` from more than one Pandas DataFrame, where each said - DataFrame will become a block in the ``Dataset``. + We can also build a ``Datastream`` from more than one Pandas DataFrame, where each said + DataFrame will become a block in the ``Datastream``. - .. literalinclude:: ./doc_code/creating_datasets.py + .. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __from_pandas_mult_begin__ :end-before: __from_pandas_mult_end__ .. tabbed:: NumPy - Create a ``Dataset`` from a NumPy ndarray. This constructs a ``Dataset`` + Create a ``Datastream`` from a NumPy ndarray. This constructs a ``Datastream`` backed by a single-column Arrow table block; the outer dimension of the ndarray will be treated as the row dimension, and the column will have name ``"__value__"``. - .. literalinclude:: ./doc_code/creating_datasets.py + .. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __from_numpy_begin__ :end-before: __from_numpy_end__ - We can also build a ``Dataset`` from more than one NumPy ndarray, where each said - ndarray will become a single-column Arrow table block in the ``Dataset``. + We can also build a ``Datastream`` from more than one NumPy ndarray, where each said + ndarray will become a single-column Arrow table block in the ``Datastream``. - .. literalinclude:: ./doc_code/creating_datasets.py + .. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __from_numpy_mult_begin__ :end-before: __from_numpy_mult_end__ .. tabbed:: Arrow - Create a ``Dataset`` from an + Create a ``Datastream`` from an `Arrow Table `__. - This constructs a ``Dataset`` backed by a single Arrow ``Table`` block. + This constructs a ``Datastream`` backed by a single Arrow ``Table`` block. - .. literalinclude:: ./doc_code/creating_datasets.py + .. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __from_arrow_begin__ :end-before: __from_arrow_end__ - We can also build a ``Dataset`` from more than one Arrow Table, where each said - ``Table`` will become a block in the ``Dataset``. + We can also build a ``Datastream`` from more than one Arrow Table, where each said + ``Table`` will become a block in the ``Datastream``. - .. literalinclude:: ./doc_code/creating_datasets.py + .. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __from_arrow_mult_begin__ :end-before: __from_arrow_mult_end__ .. tabbed:: Python Objects - Create a ``Dataset`` from a list of Python objects; since each object in this - particular list is a dictionary, Datasets will treat this list as a list of tabular - records, and will construct an Arrow ``Dataset``. + Create a ``Datastream`` from a list of Python objects; since each object in this + particular list is a dictionary, Datastreams will treat this list as a list of tabular + records, and will construct an Arrow ``Datastream``. - .. literalinclude:: ./doc_code/creating_datasets.py + .. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __from_items_begin__ :end-before: __from_items_end__ -.. _dataset_from_in_memory_data_distributed: +.. _datastream_from_in_memory_data_distributed: From Distributed Data Processing Frameworks =========================================== -In addition to working with single-node in-memory data, Datasets can be constructed from +In addition to working with single-node in-memory data, Datastreams can be constructed from distributed (multi-node) in-memory data, interoperating with popular distributed data processing frameworks such as :ref:`Dask `, :ref:`Spark `, :ref:`Modin `, and :ref:`Mars `. These conversions work by running Ray tasks converting each Dask/Spark/Modin/Mars -data partition to a block format supported by Datasets (copying data if needed), and using the -futures representing the return value of those conversion tasks as the ``Dataset`` block +data partition to a block format supported by Datastreams (copying data if needed), and using the +futures representing the return value of those conversion tasks as the ``Datastream`` block futures. .. note:: - These data processing frameworks must be running on Ray in order for these Datasets + These data processing frameworks must be running on Ray in order for these Datastreams integrations to work. See how these frameworks can be run on Ray in our :ref:`data processing integrations docs `. .. tabbed:: Dask - Create a ``Dataset`` from a + Create a ``MaterializedDatastream`` from a `Dask DataFrame `__. This constructs a - ``Dataset`` backed by the distributed Pandas DataFrame partitions that underly the + ``Datastream`` backed by the distributed Pandas DataFrame partitions that underly the Dask DataFrame. - This conversion has near-zero overhead, since Datasets simply reinterprets existing - Dask-in-Ray partition objects as Dataset blocks. + This conversion has near-zero overhead, since Datastreams simply reinterprets existing + Dask-in-Ray partition objects as Datastream blocks. - .. literalinclude:: ./doc_code/creating_datasets.py + .. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __from_dask_begin__ :end-before: __from_dask_end__ .. tabbed:: Spark - Create a ``Dataset`` from a `Spark DataFrame + Create a ``MaterializedDatastream`` from a `Spark DataFrame `__. - This constructs a ``Dataset`` backed by the distributed Spark DataFrame partitions + This constructs a ``Datastream`` backed by the distributed Spark DataFrame partitions that underly the Spark DataFrame. When this conversion happens, Spark-on-Ray (RayDP) will save the Spark DataFrame partitions to Ray's object store in the Arrow format, - which Datasets will then interpret as its blocks. + which Datastreams will then interpret as its blocks. - .. literalinclude:: ./doc_code/creating_datasets_untested.py + .. literalinclude:: ./doc_code/creating_datastreams_untested.py :language: python :start-after: __from_spark_begin__ :end-before: __from_spark_end__ .. tabbed:: Modin - Create a ``Dataset`` from a Modin DataFrame. This constructs a ``Dataset`` + Create a ``MaterializedDatastream`` from a Modin DataFrame. This constructs a ``Datastream`` backed by the distributed Pandas DataFrame partitions that underly the Modin DataFrame. - This conversion has near-zero overhead, since Datasets simply reinterprets existing - Modin partition objects as Dataset blocks. + This conversion has near-zero overhead, since Datastreams simply reinterprets existing + Modin partition objects as Datastream blocks. - .. literalinclude:: ./doc_code/creating_datasets.py + .. literalinclude:: ./doc_code/creating_datastreams.py :language: python :start-after: __from_modin_begin__ :end-before: __from_modin_end__ .. tabbed:: Mars - Create a ``Dataset`` from a Mars DataFrame. This constructs a ``Dataset`` + Create a ``MaterializedDatastream`` from a Mars DataFrame. This constructs a ``Datastream`` backed by the distributed Pandas DataFrame partitions that underly the Mars DataFrame. - This conversion has near-zero overhead, since Datasets simply reinterprets existing - Mars partition objects as Dataset blocks. + This conversion has near-zero overhead, since Datastreams simply reinterprets existing + Mars partition objects as Datastream blocks. - .. literalinclude:: ./doc_code/creating_datasets_untested.py + .. literalinclude:: ./doc_code/creating_datastreams_untested.py :language: python :start-after: __from_mars_begin__ :end-before: __from_mars_end__ -.. _dataset_from_torch_tf: +.. _datastream_from_torch_tf: ------------------------- From Torch and TensorFlow @@ -523,12 +523,12 @@ From Torch and TensorFlow .. tabbed:: PyTorch - If you already have a Torch dataset available, you can create a Ray Dataset using + If you already have a Torch dataset available, you can create a Datastream using :class:`~ray.data.from_torch`. .. warning:: :class:`~ray.data.from_torch` doesn't support parallel - reads. You should only use this datasource for small datasets like MNIST or + reads. You should only use this datasource for small datastreams like MNIST or CIFAR. .. code-block:: python @@ -536,45 +536,45 @@ From Torch and TensorFlow import ray import torchvision - dataset = torchvision.datasets.MNIST("data", download=True) - dataset = ray.data.from_torch(dataset) - dataset.take(1) + torch_ds = torchvision.datasets.MNIST("data", download=True) + datastream = ray.data.from_torch(torch_ds) + datastream.take(1) # (, 5) .. tabbed:: TensorFlow - If you already have a TensorFlow dataset available, you can create a Ray Dataset + If you already have a TensorFlow dataset available, you can create a Datastream using :class:`~ray.data.from_tf`. .. warning:: :class:`~ray.data.from_tf` doesn't support parallel reads. You - should only use this function with small datasets like MNIST or CIFAR. + should only use this function with small datastreams like MNIST or CIFAR. .. code-block:: python import ray import tensorflow_datasets as tfds - dataset, _ = tfds.load("cifar10", split=["train", "test"]) - dataset = ray.data.from_tf(dataset) + tf_ds, _ = tfds.load("cifar10", split=["train", "test"]) + datastream = ray.data.from_tf(tf_ds) - dataset - # -> Dataset(num_blocks=200, num_rows=50000, schema={id: binary, image: numpy.ndarray(shape=(32, 32, 3), dtype=uint8), label: int64}) + datastream + # -> MaterializedDatastream(num_blocks=200, num_rows=50000, schema={id: binary, image: numpy.ndarray(shape=(32, 32, 3), dtype=uint8), label: int64}) -.. _dataset_from_huggingface: +.. _datastream_from_huggingface: ------------------------------- From 🤗 (Hugging Face) Datasets ------------------------------- -You can convert 🤗 Datasets into Ray Datasets by using +You can convert 🤗 Datasets into Ray Data by using :py:class:`~ray.data.from_huggingface`. This function accesses the underlying Arrow table and -converts it into a Ray Dataset directly. +converts it into a Datastream directly. .. warning:: :py:class:`~ray.data.from_huggingface` doesn't support parallel reads. This will not usually be an issue with in-memory 🤗 Datasets, - but may fail with large memory-mapped 🤗 Datasets. 🤗 ``IterableDataset`` + but may fail with large memory-mapped 🤗 Datasets. 🤗 ``IterableDatastream`` objects are not supported. .. code-block:: python @@ -582,24 +582,24 @@ converts it into a Ray Dataset directly. import ray.data from datasets import load_dataset - hf_datasets = load_dataset("wikitext", "wikitext-2-raw-v1") - ray_datasets = ray.data.from_huggingface(hf_datasets) - ray_datasets["train"].take(2) + hf_ds = load_dataset("wikitext", "wikitext-2-raw-v1") + ray_ds = ray.data.from_huggingface(hf_ds) + ray_ds["train"].take(2) # [{'text': ''}, {'text': ' = Valkyria Chronicles III = \n'}] -.. _dataset_mongo_db: +.. _datastream_mongo_db: ------------ From MongoDB ------------ -A Dataset can also be created from `MongoDB `__ with +A Datastream can also be created from `MongoDB `__ with :py:class:`~ray.data.read_mongo`. This interacts with MongoDB similar to external filesystems, except here you will need to specify the MongoDB source by its `uri `__, `database and collection `__, and specify a `pipeline `__ to run against -the collection. The execution results are then used to create a Dataset. +the collection. The execution results are then used to create a Datastream. .. note:: @@ -634,7 +634,7 @@ the collection. The execution results are then used to create a Dataset. collection="my_collection", ) -.. _datasets_sql_databases: +.. _datastreams_sql_databases: -------------------------- Reading From SQL Databases @@ -671,13 +671,13 @@ Call :func:`~ray.data.read_sql` to read data from a database that provides a ) # Get all movies - dataset = ray.data.read_sql("SELECT * FROM movie", create_connection) + datastream = ray.data.read_sql("SELECT * FROM movie", create_connection) # Get movies after the year 1980 - dataset = ray.data.read_sql( + datastream = ray.data.read_sql( "SELECT title, score FROM movie WHERE year >= 1980", create_connection ) # Get the number of movies per year - dataset = ray.data.read_sql( + datastream = ray.data.read_sql( "SELECT year, COUNT(*) FROM movie GROUP BY year", create_connection ) @@ -708,13 +708,13 @@ Call :func:`~ray.data.read_sql` to read data from a database that provides a ) # Get all movies - dataset = ray.data.read_sql("SELECT * FROM movie", create_connection) + datastream = ray.data.read_sql("SELECT * FROM movie", create_connection) # Get movies after the year 1980 - dataset = ray.data.read_sql( + datastream = ray.data.read_sql( "SELECT title, score FROM movie WHERE year >= 1980", create_connection ) # Get the number of movies per year - dataset = ray.data.read_sql( + datastream = ray.data.read_sql( "SELECT year, COUNT(*) FROM movie GROUP BY year", create_connection ) @@ -744,13 +744,13 @@ Call :func:`~ray.data.read_sql` to read data from a database that provides a ) # Get all movies - dataset = ray.data.read_sql("SELECT * FROM movie", create_connection) + datastream = ray.data.read_sql("SELECT * FROM movie", create_connection) # Get movies after the year 1980 - dataset = ray.data.read_sql( + datastream = ray.data.read_sql( "SELECT title, score FROM movie WHERE year >= 1980", create_connection ) # Get the number of movies per year - dataset = ray.data.read_sql( + datastream = ray.data.read_sql( "SELECT year, COUNT(*) FROM movie GROUP BY year", create_connection ) @@ -781,13 +781,13 @@ Call :func:`~ray.data.read_sql` to read data from a database that provides a # Get all movies - dataset = ray.data.read_sql("SELECT * FROM movie", create_connection) + datastream = ray.data.read_sql("SELECT * FROM movie", create_connection) # Get movies after the year 1980 - dataset = ray.data.read_sql( + datastream = ray.data.read_sql( "SELECT title, score FROM movie WHERE year >= 1980", create_connection ) # Get the number of movies per year - dataset = ray.data.read_sql( + datastream = ray.data.read_sql( "SELECT year, COUNT(*) FROM movie GROUP BY year", create_connection ) @@ -815,24 +815,24 @@ Call :func:`~ray.data.read_sql` to read data from a database that provides a return dbapi.Connection(client) # Get all movies - dataset = ray.data.read_sql("SELECT * FROM movie", create_connection) + datastream = ray.data.read_sql("SELECT * FROM movie", create_connection) # Get movies after the year 1980 - dataset = ray.data.read_sql( + datastream = ray.data.read_sql( "SELECT title, score FROM movie WHERE year >= 1980", create_connection ) # Get the number of movies per year - dataset = ray.data.read_sql( + datastream = ray.data.read_sql( "SELECT year, COUNT(*) FROM movie GROUP BY year", create_connection ) -.. _datasets_custom_datasource: +.. _data_custom_datasource: ------------------ Custom Datasources ------------------ -Datasets can read and write in parallel to :ref:`custom datasources ` defined in Python. +Datastreams can read and write in parallel to :ref:`custom datasources ` defined in Python. Once you have implemented `YourCustomDataSource`, you can use it like any other source in Ray Data: .. code-block:: python @@ -843,7 +843,7 @@ Once you have implemented `YourCustomDataSource`, you can use it like any other # Write to a custom datasource. ds.write_datasource(YourCustomDatasource(), **write_args) -For more details, check out :ref:`guide for implementing a custom Datasets datasource `. +For more details, check out :ref:`guide for implementing a custom datasource `. -------------------------- Performance Considerations @@ -852,20 +852,16 @@ Performance Considerations Read Parallelism ================ -Datasets automatically selects the read ``parallelism`` according to the following procedure: +Datastreams automatically selects the read ``parallelism`` according to the following procedure: 1. The number of available CPUs is estimated. If in a placement group, the number of CPUs in the cluster is scaled by the size of the placement group compared to the cluster size. If not in a placement group, this is the number of CPUs in the cluster. 2. The parallelism is set to the estimated number of CPUs multiplied by 2. If the parallelism is less than 8, it is set to 8. 3. The in-memory data size is estimated. If the parallelism would create in-memory blocks that are larger on average than the target block size (512MiB), the parallelism is increased until the blocks are < 512MiB in size. 4. The parallelism is truncated to ``min(num_files, parallelism)``. -To perform the read, ``parallelism`` parallel read tasks will be -launched, each reading one or more files and each creating a single block of data. -When reading from remote datasources, these parallel read tasks will be spread across -the nodes in your Ray cluster, creating the distributed collection of blocks that makes -up a distributed Ray Dataset. +The ``parallelism`` determines the number of blocks the base data will be split into for parallel reads. Datastream will decide internally how many read tasks to run concurrently to best utilize the cluster, ranging from ``1...parallelism`` tasks. In other words, the higher the parallelism, the smaller the data blocks in the Datastream and hence the more opportunity for parallel execution. -.. image:: images/dataset-read.svg +.. image:: images/datastream-read.svg :width: 650px :align: center @@ -873,15 +869,15 @@ This default parallelism can be overridden via the ``parallelism`` argument; see :ref:`performance guide ` for tips on how to tune this read parallelism. -.. _dataset_deferred_reading: +.. _datastream_deferred_reading: Deferred Read Task Execution ============================ -Datasets created via the ``ray.data.read_*()`` APIs are lazy: no read tasks are +Datastreams created via the ``ray.data.read_*()`` APIs are lazy: no read tasks are executed until a downstream consumption operation triggers execution. Metadata -inspection functions like :meth:`ds.schema() ` and -:meth:`ds.show() ` will trigger execution of only one or some +inspection functions like :meth:`ds.schema() ` and +:meth:`ds.show() ` will trigger execution of only one or some tasks, instead of all tasks. This allows metadata to be inspected right away. Execution of all read tasks can be triggered manually using the -:meth:`ds.materialize() ` API. +:meth:`ds.materialize() ` API. diff --git a/doc/source/data/custom-datasource.rst b/doc/source/data/custom-datasource.rst index d08d1e105bf1..010f08e19385 100644 --- a/doc/source/data/custom-datasource.rst +++ b/doc/source/data/custom-datasource.rst @@ -7,18 +7,18 @@ Custom Datasources .. note:: This MongoDatasource guide below is for education only. For production use of MongoDB - in Ray Datasets, see :ref:`Creating Dataset from MongoDB `. + in Ray Data, see :ref:`Creating Datastream from MongoDB `. -Ray Datasets supports multiple ways to :ref:`create a dataset `, +Ray Data supports multiple ways to :ref:`create a datastream `, allowing you to easily ingest data of common formats from popular sources. However, if the datasource you want to read from is not in the built-in list, don't worry, you can implement a custom one for your use case. In this guide, we will walk you through how to build your own custom datasource, using `MongoDB `__ as an example. -By the end of the guide, you will have a ``MongoDatasource`` that you can use to create dataset as follows: +By the end of the guide, you will have a ``MongoDatasource`` that you can use to create datastream as follows: .. code-block:: python - # Read from custom MongoDB datasource to create a dataset. + # Read from custom MongoDB datasource to create a datastream. ds = ray.data.read_datasource( MongoDatasource(), uri=MY_URI, @@ -27,7 +27,7 @@ By the end of the guide, you will have a ``MongoDatasource`` that you can use to pipelines=MY_PIPELINES ) - # Write the dataset to custom MongoDB datasource. + # Write the datastream to custom MongoDB datasource. ds.write_datasource( MongoDatasource(), uri=MY_URI, database=MY_DATABASE, collection=MY_COLLECTION ) @@ -38,7 +38,7 @@ By the end of the guide, you will have a ``MongoDatasource`` that you can use to a MongoDB instance, which hosts `Databases and Collections `__. A collection is analogous to a table in SQL databases. MongoDB also has a `pipeline `__ concept, which expresses document processing in a series of stages (e.g. match documents with a predicate, sort results, and then select a few fields). - The execution results of the pipelines are used to create dataset. + The execution results of the pipelines are used to create datastream. A custom datasource is an implementation of :class:`~ray.data.Datasource`. In the example here, let's call it ``MongoDatasource``. At a high level, it will have two @@ -50,7 +50,7 @@ core parts to build out: Here are the key design choices we will make in this guide: - **MongoDB connector**: We use `PyMongo `__ to connect to MongoDB. -- **MongoDB to Arrow conversion**: We use `PyMongoArrow `__ to convert MongoDB execution results into Arrow tables, which Datasets supports as a data format. +- **MongoDB to Arrow conversion**: We use `PyMongoArrow `__ to convert MongoDB execution results into Arrow tables, which Datastreams supports as a data format. - **Parallel execution**: We ask the user to provide a list of MongoDB pipelines, with each corresponding to a partition of the MongoDB collection, which will be executed in parallel with :class:`~ray.data.ReadTask`. For example, suppose you have a MongoDB collection with 4 documents, which have a ``partition_field`` with values 0, 1, 2, 3. @@ -94,7 +94,7 @@ MongoDB. This ``Reader`` creates a list of :class:`~ray.data.ReadTask` for the g list of MongoDB pipelines. Each :class:`~ray.data.ReadTask` returns a list of blocks when called, and each :class:`~ray.data.ReadTask` is executed in remote workers to parallelize the execution. -You can find documentation about Ray Datasets :ref:`block concept here ` and :ref:`block APIs here `. +You can find documentation about Ray Data :ref:`block concept here ` and :ref:`block APIs here `. First, let's handle a single MongoDB pipeline, which is the unit of execution in :class:`~ray.data.ReadTask`. We need to connect to MongoDB, execute the pipeline against it, @@ -119,7 +119,7 @@ The :class:`~ray.data.block.BlockMetadata` contains metadata like number of rows that we know about the block prior to actually executing the read task; the no-arg read function is just a wrapper of ``_read_single_partition``. A list of :class:`~ray.data.ReadTask` objects are returned by ``get_read_tasks``, and these -tasks are executed on remote workers. You can find more details about `Dataset read execution here `__. +tasks are executed on remote workers. You can find more details about `Datastream read execution here `__. .. literalinclude:: ./doc_code/custom_datasource.py :language: python @@ -169,12 +169,12 @@ a ``MongoDatasource``. :start-after: __mongo_datasource_start__ :end-before: __mongo_datasource_end__ -Now you can create a Ray Dataset from and write back to MongoDB, just like +Now you can create a Datastream from and write back to MongoDB, just like any other datasource! .. code-block:: python - # Read from MongoDB datasource and create a dataset. + # Read from MongoDB datasource and create a datastream. # The args are passed to MongoDatasource.create_reader(). ds = ray.data.read_datasource( MongoDatasource(), @@ -184,10 +184,10 @@ any other datasource! pipelines=my_pipelines, # See the example definition of ``my_pipelines`` above ) - # Data preprocessing with Dataset APIs here + # Data preprocessing with Datastream APIs here # ... - # Write the dataset back to MongoDB datasource. + # Write the datastream back to MongoDB datasource. # The args are passed to MongoDatasource.do_write(). ds.write_datasource( MongoDatasource(), diff --git a/doc/source/data/dataset-internals.rst b/doc/source/data/data-internals.rst similarity index 75% rename from doc/source/data/dataset-internals.rst rename to doc/source/data/data-internals.rst index e3b196f99dec..c901c867de65 100644 --- a/doc/source/data/dataset-internals.rst +++ b/doc/source/data/data-internals.rst @@ -1,4 +1,4 @@ -.. _datasets_scheduling: +.. _datastreams_scheduling: ============================================ Scheduling, Execution, and Memory Management @@ -7,24 +7,24 @@ Scheduling, Execution, and Memory Management Scheduling ========== -Datasets uses Ray core for execution, and hence is subject to the same scheduling considerations as normal Ray tasks and actors. Datasets uses the following custom scheduling settings by default for improved performance: +Ray Data uses Ray core for execution, and hence is subject to the same scheduling considerations as normal Ray tasks and actors. Ray Data uses the following custom scheduling settings by default for improved performance: * The ``SPREAD`` scheduling strategy is used to ensure data blocks are evenly balanced across the cluster. * Retries of application-level exceptions are enabled to handle transient errors from remote datasources. -* Dataset tasks ignore placement groups by default, see :ref:`Datasets and Placement Groups `. +* Datastream tasks ignore placement groups by default, see :ref:`Ray Data and Placement Groups `. -.. _datasets_tune: +.. _datastreams_tune: -Datasets and Tune +Ray Data and Tune ~~~~~~~~~~~~~~~~~ -When using Datasets in conjunction with :ref:`Ray Tune `, it is important to ensure there are enough free CPUs for Datasets to run on. By default, Tune will try to fully utilize cluster CPUs. This can prevent Datasets from scheduling tasks, reducing performance or causing workloads to hang. +When using Ray Data in conjunction with :ref:`Ray Tune `, it is important to ensure there are enough free CPUs for Ray Data to run on. By default, Tune will try to fully utilize cluster CPUs. This can prevent Ray Data from scheduling tasks, reducing performance or causing workloads to hang. -As an example, the following shows two ways to use Datasets together with Tune: +As an example, the following shows two ways to use Ray Data together with Tune: .. tabbed:: Limiting Tune Concurrency - By limiting the number of concurrent Tune trials, we ensure CPU resources are always available for Datasets execution. + By limiting the number of concurrent Tune trials, we ensure CPU resources are always available for Ray Data execution. This can be done using the ``max_concurrent_trials`` Tune option. .. literalinclude:: ./doc_code/key_concepts.py @@ -36,7 +36,7 @@ As an example, the following shows two ways to use Datasets together with Tune: Alternatively, we can tell Tune to set aside CPU resources for other libraries. This can be done by setting ``_max_cpu_fraction_per_node=0.8``, which reserves - 20% of node CPUs for Dataset execution. + 20% of node CPUs for Datastream execution. .. literalinclude:: ./doc_code/key_concepts.py :language: python @@ -48,36 +48,36 @@ As an example, the following shows two ways to use Datasets together with Tune: This option is experimental and not currently recommended for use with autoscaling clusters (scale-up will not trigger properly). -.. _datasets_pg: +.. _datastreams_pg: -Datasets and Placement Groups +Ray Data and Placement Groups ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -By default, Datasets configures its tasks and actors to use the cluster-default scheduling strategy ("DEFAULT"). You can inspect this configuration variable here: +By default, Ray Data configures its tasks and actors to use the cluster-default scheduling strategy ("DEFAULT"). You can inspect this configuration variable here: :class:`ray.data.DataContext.get_current().scheduling_strategy `. This scheduling strategy will schedule these tasks and actors outside any present -placement group. If you want to force Datasets to schedule tasks within the current placement group (i.e., to use current placement group resources specifically for Datasets), you can set ``ray.data.DataContext.get_current().scheduling_strategy = None``. +placement group. If you want to force Ray Data to schedule tasks within the current placement group (i.e., to use current placement group resources specifically for Ray Data), you can set ``ray.data.DataContext.get_current().scheduling_strategy = None``. -This should be considered for advanced use cases to improve performance predictability only. We generally recommend letting Datasets run outside placement groups as documented in the :ref:`Datasets and Other Libraries ` section. +This should be considered for advanced use cases to improve performance predictability only. We generally recommend letting Ray Data run outside placement groups as documented in the :ref:`Ray Data and Other Libraries ` section. -.. _datasets_execution: +.. _datastream_execution: Execution ========= -The Datasets execution by default is: +Ray Data execution by default is: -- **Lazy**: This means that transformations on Dataset are not executed until a - consumption operation (e.g. :meth:`ds.iter_batches() `) - or :meth:`Dataset.materialize() ` is called. This creates - opportunities for optimizing the execution plan (e.g. :ref:`stage fusion `). -- **Pipelined**: This means that Dataset transformations will be executed in a +- **Lazy**: This means that transformations on Datastream are not executed until a + consumption operation (e.g. :meth:`ds.iter_batches() `) + or :meth:`Datastream.materialize() ` is called. This creates + opportunities for optimizing the execution plan (e.g. :ref:`stage fusion `). +- **Streaming**: This means that Datastream transformations will be executed in a streaming way, incrementally on the base data, instead of on all of the data at once, and overlapping the execution of operations. This can be used for streaming data loading into ML training to overlap the data preprocessing and model training, - or to execute batch transformations on large datasets without needing to load the - entire dataset into cluster memory. + or to execute batch transformations on large datastreams without needing to load the + entire datastream into cluster memory. -.. _datasets_lazy_execution: +.. _datastreams_lazy_execution: Lazy Execution ~~~~~~~~~~~~~~ @@ -85,26 +85,26 @@ Lazy Execution Lazy execution offers opportunities for improved performance and memory stability due to stage fusion optimizations and aggressive garbage collection of intermediate results. -Dataset creation and transformation APIs are lazy, with execution only triggered via "sink" -APIs, such as consuming (:meth:`ds.iter_batches() `), -writing (:meth:`ds.write_parquet() `), or manually triggering via -:meth:`ds.materialize() `. There are a few +Datastream creation and transformation APIs are lazy, with execution only triggered via "sink" +APIs, such as consuming (:meth:`ds.iter_batches() `), +writing (:meth:`ds.write_parquet() `), or manually triggering via +:meth:`ds.materialize() `. There are a few exceptions to this rule, where transformations such as :meth:`ds.union() -` and -:meth:`ds.limit() ` trigger execution; we plan to make these +` and +:meth:`ds.limit() ` trigger execution; we plan to make these operations lazy in the future. -Check the API docs for Datasets methods to see if they +Check the API docs for Ray Data methods to see if they trigger execution. Those that do trigger execution will have a ``Note`` indicating as much. -.. _datasets_streaming_execution: +.. _streaming_execution: Streaming Execution ~~~~~~~~~~~~~~~~~~~ The following code is a hello world example which invokes the execution with -:meth:`ds.iter_batches() ` consumption. We will also enable verbose progress reporting, which shows per-operator progress in addition to overall progress. +:meth:`ds.iter_batches() ` consumption. We will also enable verbose progress reporting, which shows per-operator progress in addition to overall progress. .. code-block:: @@ -206,18 +206,18 @@ Locality with Output (ML ingest use case) ctx.execution_options.locality_with_output = True -Setting this to True tells Datasets to prefer placing operator tasks onto the consumer node in the cluster, rather than spreading them evenly across the cluster. This can be useful if you know you'll be consuming the output data directly on the consumer node (i.e., for ML training ingest). However, this may incur a performance penalty for other use cases. +Setting this to True tells Ray Data to prefer placing operator tasks onto the consumer node in the cluster, rather than spreading them evenly across the cluster. This can be useful if you know you'll be consuming the output data directly on the consumer node (i.e., for ML training ingest). However, this may incur a performance penalty for other use cases. Scalability ----------- We expect the data streaming backend to scale to tens of thousands of files / blocks and up to hundreds of terabytes of data. Please report if you experience performance degradation at these scales, we would be very interested to investigate! -.. _datasets_stage_fusion: +.. _datastreams_stage_fusion: Stage Fusion Optimization ~~~~~~~~~~~~~~~~~~~~~~~~~ -In order to reduce memory usage and task overheads, Datasets will automatically fuse together +In order to reduce memory usage and task overheads, Ray Data will automatically fuse together lazy operations that are compatible: * Same compute pattern: embarrassingly parallel map vs. all-to-all shuffle @@ -226,10 +226,10 @@ lazy operations that are compatible: Read stages and subsequent map-like transformations will usually be fused together. All-to-all transformations such as -:meth:`ds.random_shuffle() ` can be fused with earlier +:meth:`ds.random_shuffle() ` can be fused with earlier map-like stages, but not later stages. -You can tell if stage fusion is enabled by checking the :ref:`Dataset stats ` and looking for fused stages (e.g., ``read->map_batches``). +You can tell if stage fusion is enabled by checking the :ref:`Datastream stats ` and looking for fused stages (e.g., ``read->map_batches``). .. code-block:: @@ -241,34 +241,34 @@ You can tell if stage fusion is enabled by checking the :ref:`Dataset stats ` with batch size small enough such that the output batch can comfortably fit into memory. +Large block size can lead to potential out-of-memory situations. To avoid these issues, make sure no single item in your Ray Data is too large, and always call :meth:`ds.map_batches() ` with batch size small enough such that the output batch can comfortably fit into memory. Object Store Memory ~~~~~~~~~~~~~~~~~~~ -Datasets uses the Ray object store to store data blocks, which means it inherits the memory management features of the Ray object store. This section discusses the relevant features: +Ray Data uses the Ray object store to store data blocks, which means it inherits the memory management features of the Ray object store. This section discusses the relevant features: -* Object Spilling: Since Datasets uses the Ray object store to store data blocks, any blocks that can't fit into object store memory are automatically spilled to disk. The objects are automatically reloaded when needed by downstream compute tasks: +* Object Spilling: Since Ray Data uses the Ray object store to store data blocks, any blocks that can't fit into object store memory are automatically spilled to disk. The objects are automatically reloaded when needed by downstream compute tasks: * Locality Scheduling: Ray will preferentially schedule compute tasks on nodes that already have a local copy of the object, reducing the need to transfer objects between nodes in the cluster. -* Reference Counting: Dataset blocks are kept alive by object store reference counting as long as there is any Dataset that references them. To free memory, delete any Python references to the Dataset object. +* Reference Counting: Datastream blocks are kept alive by object store reference counting as long as there is any Datastream that references them. To free memory, delete any Python references to the Datastream object. Block Data Formats ~~~~~~~~~~~~~~~~~~ -In order to optimize conversion costs, Datasets can hold tabular data in-memory +In order to optimize conversion costs, Ray Data can hold tabular data in-memory as either `Arrow Tables `__ or `Pandas DataFrames `__. -Different ways of creating Datasets leads to a different starting internal format: +Different ways of creating Ray Data leads to a different starting internal format: * Reading tabular files (Parquet, CSV, JSON) creates Arrow blocks initially. * Converting from Pandas, Dask, Modin, and Mars creates Pandas blocks initially. @@ -276,5 +276,5 @@ Different ways of creating Datasets leads to a different starting internal forma * Reading TFRecord file creates Arrow blocks. * Reading MongoDB creates Arrow blocks. -However, this internal format is not exposed to the user. Datasets converts between formats +However, this internal format is not exposed to the user. Ray Data converts between formats as needed internally depending on the specified ``batch_format`` of transformations. diff --git a/doc/source/data/dataset-tensor-support.rst b/doc/source/data/data-tensor-support.rst similarity index 77% rename from doc/source/data/dataset-tensor-support.rst rename to doc/source/data/data-tensor-support.rst index bed848b8fd02..08049fc63951 100644 --- a/doc/source/data/dataset-tensor-support.rst +++ b/doc/source/data/data-tensor-support.rst @@ -1,24 +1,24 @@ -.. _datasets_tensor_support: +.. _data_tensor_support: ML Tensor Support ================= -Tensor (multi-dimensional array) data is ubiquitous in ML workloads. However, popular data formats such as Pandas, Parquet, and Arrow don't natively support tensor data types. To bridge this gap, Datasets provides a unified tensor data type that can be used to represent, transform, and store tensor data: +Tensor (multi-dimensional array) data is ubiquitous in ML workloads. However, popular data formats such as Pandas, Parquet, and Arrow don't natively support tensor data types. To bridge this gap, Ray Data provides a unified tensor data type that can be used to represent, transform, and store tensor data: -* For Pandas, Datasets will transparently convert ``List[np.ndarray]`` columns to and from the :class:`TensorDtype ` extension type. -* For Parquet, Datasets has an Arrow extension :class:`ArrowTensorType ` that allows tensors to be loaded from and stored in the Parquet format. -* In addition, single-column tensor datasets can be created from NumPy (.npy) files. +* For Pandas, Ray Data will transparently convert ``List[np.ndarray]`` columns to and from the :class:`TensorDtype ` extension type. +* For Parquet, Ray Data has an Arrow extension :class:`ArrowTensorType ` that allows tensors to be loaded from and stored in the Parquet format. +* In addition, single-column tensor datastreams can be created from NumPy (.npy) files. -Datasets automatically converts between the extension types/arrays above. This means you can think of a ``Tensor`` as a first-class data type in Datasets. +Ray Data automatically converts between the extension types/arrays above. This means you can think of a ``Tensor`` as a first-class data type in Ray Data. -Creating Tensor Datasets ------------------------- +Creating Tensor Datastreams +--------------------------- -This section shows how to create single and multi-column tensor datasets. +This section shows how to create single and multi-column tensor datastreams. .. tabbed:: Synthetic Data - Create a synthetic tensor dataset from a range of integers. + Create a synthetic tensor datastream from a range of integers. **Single-column only**: @@ -29,8 +29,8 @@ This section shows how to create single and multi-column tensor datasets. .. tabbed:: Pandas UDF - Create tensor datasets by returning ``List[np.ndarray]`` columns from a Pandas - :ref:`user-defined function `. + Create tensor datastreams by returning ``List[np.ndarray]`` columns from a Pandas + :ref:`user-defined function `. **Single-column**: @@ -59,13 +59,13 @@ This section shows how to create single and multi-column tensor datasets. .. tabbed:: Parquet - There are two ways to construct a Parquet tensor dataset: (1) loading a - previously-saved tensor dataset, or (2) casting non-tensor Parquet columns to tensor + There are two ways to construct a Parquet tensor datastream: (1) loading a + previously-saved tensor datastream, or (2) casting non-tensor Parquet columns to tensor type. When casting data, a tensor schema or deserialization - :ref:`user-defined function ` must be provided. The + :ref:`user-defined function ` must be provided. The following are examples for each method. - **Previously-saved tensor datasets**: + **Previously-saved tensor datastreams**: .. literalinclude:: ./doc_code/tensor.py :language: python @@ -85,7 +85,7 @@ This section shows how to create single and multi-column tensor datasets. **Cast from data stored in custom formats**: For tensors stored in other formats (e.g., pickled), you can specify a deserializer - :ref:`user-defined function ` that returns + :ref:`user-defined function ` that returns :class:`~ray.data.extensions.tensor_extension.TensorArray` columns: .. literalinclude:: ./doc_code/tensor.py @@ -106,13 +106,13 @@ This section shows how to create single and multi-column tensor datasets. .. note:: - By convention, single-column tensor datasets are represented with a single ``__value__`` column. - This kind of dataset will be converted automatically to/from NumPy ndarray format in all transformation and consumption APIs. + By convention, single-column tensor datastreams are represented with a single ``__value__`` column. + This kind of datastream will be converted automatically to/from NumPy ndarray format in all transformation and consumption APIs. Transforming / Consuming Tensor Data ------------------------------------ -Like any other Dataset, Datasets with tensor columns can be consumed / transformed in batches via the :meth:`ds.iter_batches(batch_format=\) ` and :meth:`ds.map_batches(fn, batch_format=\) ` APIs. This section shows the available batch formats and their behavior: +Like any other Datastream, Datastreams with tensor columns can be consumed / transformed in batches via the :meth:`ds.iter_batches(batch_format=\) ` and :meth:`ds.map_batches(fn, batch_format=\) ` APIs. This section shows the available batch formats and their behavior: .. tabbed:: "default" @@ -178,12 +178,12 @@ Like any other Dataset, Datasets with tensor columns can be consumed / transform :start-after: __consume_numpy_2_begin__ :end-before: __consume_numpy_2_end__ -Saving Tensor Datasets ----------------------- +Saving Tensor Datastreams +------------------------- -Because tensor datasets rely on Datasets-specific extension types, they can only be +Because tensor datastreams rely on Datastreams-specific extension types, they can only be saved in formats that preserve Arrow metadata (currently only Parquet). In addition, -single-column tensor datasets can be saved in NumPy format. +single-column tensor datastreams can be saved in NumPy format. .. tabbed:: Parquet @@ -209,7 +209,7 @@ Ragged Tensor Support `N-grams `__), computer vision (images of differing resolution, `ssd300_vgg16 detection outputs `__), -and audio ML (differing durations). Datasets has basic support for ragged tensors, +and audio ML (differing durations). Datastreams has basic support for ragged tensors, namely tensors that are a collection (batch) of variably-shaped subtensors, e.g. a batch of images of differing sizes or a batch of sentences of differing lengths. @@ -245,6 +245,6 @@ below. Limitations ----------- -The following are current limitations of tensor datasets. +The following are current limitations of tensor datastreams. * Arbitrarily `nested/ragged tensors `__ are not supported. Only tensors with all uniform dimensions (i.e. a fully well-defined shape) and tensors representing a collection of variable-shaped tensor elements (e.g. a collection of images with different shapes) are supported; arbitrary raggedness and nested ragged tensors is not supported. diff --git a/doc/source/data/data.rst b/doc/source/data/data.rst new file mode 100644 index 000000000000..2d009240e588 --- /dev/null +++ b/doc/source/data/data.rst @@ -0,0 +1,200 @@ +.. include:: /_includes/data/announcement.rst + +.. _data: + +====================================== +Ray Data: Distributed ML Preprocessing +====================================== + +.. _data-intro: + +Ray Data is the standard way to load and exchange data in Ray libraries and applications. +It provides streaming distributed transformations such as maps +(:meth:`map_batches `), +global and grouped aggregations (:class:`GroupedData `), and +shuffling operations (:meth:`random_shuffle `, +:meth:`sort `, +:meth:`repartition `), +and is compatible with a variety of file formats, data sources, and distributed frameworks. + +Here's an overview of the integrations with other processing frameworks, file formats, and supported operations, +as well as a glimpse at the Ray Data API. + +Check the :ref:`Input/Output reference ` to see if your favorite format +is already supported. + +.. image:: images/datastream.svg + +.. + https://docs.google.com/drawings/d/16AwJeBNR46_TsrkOmMbGaBK7u-OPsf_V8fHjU-d2PPQ/edit + +------------------------- +Streaming Batch Inference +------------------------- + +Ray Data simplifies general purpose parallel GPU and CPU compute in Ray through its +powerful :ref:`Datastream ` primitive. Datastreams enables workloads such as +:ref:`GPU batch inference ` efficiently on large datasets. Ray Data manages +the pipelined processing of data in the cluster, maximizing resource utilization +by keeping the working data fitting into Ray object store memory. + +.. image:: images/stream-example.png + :width: 650px + :align: center + +.. + https://docs.google.com/presentation/d/1l03C1-4jsujvEFZUM4JVNy8Ju8jnY5Lc_3q7MBWi2PQ/edit#slide=id.g230eb261ad2_0_0 + +As part of the Ray ecosystem, Ray Data can leverage the full functionality of Ray's distributed scheduler, +e.g., using actors for optimizing setup time and GPU scheduling, and supports data throughputs of +100GiB/s or more for common inference workloads. + +To learn more about the features Ray Data supports, read the +:ref:`Data User Guide `. + +--------------------------------------- +Streaming Preprocessing for ML Training +--------------------------------------- + +Use Ray Data to load and preprocess data for distributed :ref:`ML training pipelines ` in a streaming fashion. +Ray Data is intended to serve as a last-mile bridge from storage or ETL pipeline outputs to distributed +applications and libraries in Ray. Don't use it as a replacement for more general data +processing systems. + +.. image:: images/datastream-loading-1.png + :width: 650px + :align: center + +.. + https://docs.google.com/presentation/d/1l03C1-4jsujvEFZUM4JVNy8Ju8jnY5Lc_3q7MBWi2PQ/edit + +---------------------- +Where to Go from Here? +---------------------- + +As new user of Ray Data, you may want to start with our :ref:`Getting Started Guide `. +If you've run your first examples already, you might want to dive into Ray Data' +:ref:`key concepts ` or our :ref:`User Guide ` instead. +Advanced users can refer directly to the Ray Data :ref:`API reference ` for their projects. + +.. panels:: + :container: text-center + :column: col-lg-6 px-2 py-2 + :card: + + **Getting Started** + ^^^ + + Start with our quick start tutorials for working with Data. + These concrete examples will give you an idea of how to use Ray Data. + + +++ + .. link-button:: data_getting_started + :type: ref + :text: Get Started with Ray Data + :classes: btn-outline-info btn-block + --- + + **Key Concepts** + ^^^ + + Understand the key concepts behind Ray Data. + Learn what :ref:`Datastreams ` are and how they are executed in Ray + Data. + + +++ + .. link-button:: data_key_concepts + :type: ref + :text: Learn Key Concepts + :classes: btn-outline-info btn-block + --- + + **User Guides** + ^^^ + + Learn how to :ref:`create datastreams `, :ref:`save + datastreams `, :ref:`transform datastreams `, + :ref:`access and exchange datastreams `, or + :ref:`work with tensor data `. + + +++ + .. link-button:: data_user_guide + :type: ref + :text: Start Using Ray Data + :classes: btn-outline-info btn-block + --- + + **Examples** + ^^^ + + Find both simple and scaling-out examples of using Ray Data for data + processing and ML ingest. + + +++ + .. link-button:: data-recipes + :type: ref + :text: Ray Data Examples + :classes: btn-outline-info btn-block + --- + + **Ray Data FAQ** + ^^^ + + Find answers to commonly asked questions in our detailed FAQ. + + +++ + .. link-button:: data_faq + :type: ref + :text: Ray Data FAQ + :classes: btn-outline-info btn-block + --- + + **API** + ^^^ + + Get more in-depth information about the Ray Data API. + + +++ + .. link-button:: data-api + :type: ref + :text: Read the API Reference + :classes: btn-outline-info btn-block + --- + + **Other Data Processing Solutions** + ^^^ + + For running ETL pipelines, check out :ref:`Spark-on-Ray `. For scaling + up your data science workloads, check out :ref:`Dask-on-Ray `, + :ref:`Modin `, and :ref:`Mars-on-Ray `. + + +++ + .. link-button:: integrations + :type: ref + :text: Check Out Other Data Processing Options + :classes: btn-outline-info btn-block + +------------------------ +Datasource Compatibility +------------------------ + +Ray Data supports reading and writing many file formats. +To view supported formats, read the :ref:`Input/Output reference `. + +If your use case isn't supported, reach out on `Discourse `__ or open a feature +request on the `Ray GitHub repo `__, and check out +our :ref:`guide for implementing a custom datasource ` +if you're interested in rolling your own integration! + +---------- +Contribute +---------- + +Contributions to Ray Data are :ref:`welcome `! +There are many potential improvements, including: + +- Supporting more data sources and transforms. +- Integration with more ecosystem libraries. +- Performance optimizations. + +.. include:: /_includes/data/announcement_bottom.rst diff --git a/doc/source/data/dataset.rst b/doc/source/data/dataset.rst deleted file mode 100644 index d55e77b4a8ae..000000000000 --- a/doc/source/data/dataset.rst +++ /dev/null @@ -1,210 +0,0 @@ -.. include:: /_includes/data/announcement.rst - -.. _datasets: - -============================================ -Ray Datasets: Distributed Data Preprocessing -============================================ - -.. _datasets-intro: - -Ray Datasets are the standard way to load and exchange data in Ray libraries and applications. -They provide basic distributed data transformations such as maps -(:meth:`map_batches `), -global and grouped aggregations (:class:`GroupedData `), and -shuffling operations (:meth:`random_shuffle `, -:meth:`sort `, -:meth:`repartition `), -and are compatible with a variety of file formats, data sources, and distributed frameworks. - -Here's an overview of the integrations with other processing frameworks, file formats, and supported operations, -as well as a glimpse at the Ray Datasets API. - -Check the :ref:`Input/Output reference ` to see if your favorite format -is already supported. - -.. image:: images/dataset.svg - -.. - https://docs.google.com/drawings/d/16AwJeBNR46_TsrkOmMbGaBK7u-OPsf_V8fHjU-d2PPQ/edit - - ----------------------------------------------- -Data Loading and Preprocessing for ML Training ----------------------------------------------- - -Use Ray Datasets to load and preprocess data for distributed :ref:`ML training pipelines `. -Compared to other loading solutions, Datasets are more flexible (e.g., can express higher-quality per-epoch global shuffles) and provides `higher overall performance `__. - -Use Datasets as a last-mile bridge from storage or ETL pipeline outputs to distributed -applications and libraries in Ray. Don't use it as a replacement for more general data -processing systems. - -.. image:: images/dataset-loading-1.png - :width: 650px - :align: center - -.. - https://docs.google.com/presentation/d/1l03C1-4jsujvEFZUM4JVNy8Ju8jnY5Lc_3q7MBWi2PQ/edit - -To learn more about the features Datasets supports, read the -:ref:`Datasets User Guide `. - ------------------------------ -Datasets for Parallel Compute ------------------------------ - -Datasets also simplify general purpose parallel GPU and CPU compute in Ray; for -instance, for :ref:`GPU batch inference `. -They provide a higher-level API for Ray tasks and actors for such embarrassingly parallel compute, -internally handling operations like batching, pipelining, and memory management. - -.. image:: images/dataset-compute-1.png - :width: 500px - :align: center - -As part of the Ray ecosystem, Ray Datasets can leverage the full functionality of Ray's distributed scheduler, -e.g., using actors for optimizing setup time and GPU scheduling. - ----------------------- -Where to Go from Here? ----------------------- - -As new user of Ray Datasets, you may want to start with our :ref:`Getting Started guide`. -If you've run your first examples already, you might want to dive into Ray Datasets' -:ref:`key concepts ` or our :ref:`User Guide ` instead. -Advanced users can refer directly to the Ray Datasets :ref:`API reference ` for their projects. - -.. panels:: - :container: text-center - :column: col-lg-6 px-2 py-2 - :card: - - **Getting Started** - ^^^ - - Start with our quick start tutorials for working with Datasets. - These concrete examples will give you an idea of how to use Ray Datasets. - - +++ - .. link-button:: datasets_getting_started - :type: ref - :text: Get Started with Ray Datasets - :classes: btn-outline-info btn-block - --- - - **Key Concepts** - ^^^ - - Understand the key concepts behind Ray Datasets. - Learn what :ref:`Datasets ` are and how they are executed in Ray - Datasets. - - +++ - .. link-button:: data_key_concepts - :type: ref - :text: Learn Key Concepts - :classes: btn-outline-info btn-block - --- - - **User Guides** - ^^^ - - Learn how to :ref:`create datasets `, :ref:`save - datasets `, :ref:`transform datasets `, - :ref:`access and exchange datasets `, :ref:`pipeline - transformations `, or - :ref:`work with tensor data `. - - +++ - .. link-button:: data_user_guide - :type: ref - :text: Start Using Ray Datasets - :classes: btn-outline-info btn-block - --- - - **Examples** - ^^^ - - Find both simple and scaling-out examples of using Ray Datasets for data - processing and ML ingest. - - +++ - .. link-button:: datasets-recipes - :type: ref - :text: Ray Datasets Examples - :classes: btn-outline-info btn-block - --- - - **Ray Datasets FAQ** - ^^^ - - Find answers to commonly asked questions in our detailed FAQ. - - +++ - .. link-button:: datasets_faq - :type: ref - :text: Ray Datasets FAQ - :classes: btn-outline-info btn-block - --- - - **API** - ^^^ - - Get more in-depth information about the Ray Datasets API. - - +++ - .. link-button:: data-api - :type: ref - :text: Read the API Reference - :classes: btn-outline-info btn-block - --- - - **Other Data Processing Solutions** - ^^^ - - For running ETL pipelines, check out :ref:`Spark-on-Ray `. For scaling - up your data science workloads, check out :ref:`Dask-on-Ray `, - :ref:`Modin `, and :ref:`Mars-on-Ray `. - - +++ - .. link-button:: integrations - :type: ref - :text: Check Out Other Data Processing Options - :classes: btn-outline-info btn-block - ------------------------- -Datasource Compatibility ------------------------- - -Ray Datasets supports reading and writing many file formats. -To view supported formats, read the :ref:`Input/Output reference `. - -If your use case isn't supported, reach out on `Discourse `__ or open a feature -request on the `Ray GitHub repo `__, and check out -our :ref:`guide for implementing a custom Datasets datasource ` -if you're interested in rolling your own integration! - -.. _data-talks: - ----------- -Learn More ----------- - -- [slides] `Talk given at PyData 2021 `_ -- [blog] `Data Ingest in a Third Generation ML Architecture `_ -- [blog] `Building an end-to-end ML pipeline using Mars and XGBoost on Ray `_ -- [blog] `Ray Datasets for large-scale machine learning ingest and scoring `_ - ----------- -Contribute ----------- - -Contributions to Ray Datasets are :ref:`welcome `! -There are many potential improvements, including: - -- Supporting more data sources and transforms. -- Integration with more ecosystem libraries. -- Performance optimizations. - -.. include:: /_includes/data/announcement_bottom.rst diff --git a/doc/source/data/doc_code/consuming_datasets.py b/doc/source/data/doc_code/consuming_datastreams.py similarity index 75% rename from doc/source/data/doc_code/consuming_datasets.py rename to doc/source/data/doc_code/consuming_datastreams.py index 1d946d5b5458..731fe4753b7b 100644 --- a/doc/source/data/doc_code/consuming_datasets.py +++ b/doc/source/data/doc_code/consuming_datastreams.py @@ -38,7 +38,7 @@ ds = ray.data.range(10000) num_rows = 0 -# Consume all rows in the Dataset. +# Consume all rows in the Datastream. for row in ds.iter_rows(): assert isinstance(row, int) num_rows += 1 @@ -56,7 +56,7 @@ ds = ray.data.range(10000) num_batches = 0 -# Consume all batches in the Dataset. +# Consume all batches in the Datastream. for batch in ds.iter_batches(batch_size=2): assert isinstance(batch, list) num_batches += 1 @@ -68,7 +68,7 @@ cum_sum = 0 for batch in ds.iter_batches(batch_size=2, batch_format="pandas"): assert isinstance(batch, pd.DataFrame) - # Simple integer Dataset is converted to a single-column Pandas DataFrame. + # Simple integer Datastream is converted to a single-column Pandas DataFrame. cum_sum += batch["value"] print(cum_sum) # -> 49995000 @@ -81,7 +81,7 @@ import ray @ray.remote -def consume(data: ray.data.Dataset[int]) -> int: +def consume(data: ray.data.Datastream[int]) -> int: num_batches = 0 # Consume data in 2-record batches. for batch in data.iter_batches(batch_size=2): @@ -103,20 +103,23 @@ class Worker: def __init__(self, rank: int): pass - def train(self, shard: ray.data.Dataset[int]) -> int: + def train(self, shard: ray.data.DataIterator) -> int: + total = 0 for batch in shard.iter_torch_batches(batch_size=256): - pass - return shard.count() + total += len(batch) + return total workers = [Worker.remote(i) for i in range(4)] # -> [Actor(Worker, ...), Actor(Worker, ...), ...] ds = ray.data.range(10000) -# -> Dataset(num_blocks=200, num_rows=10000, schema=) +# -> Datastream(num_blocks=200, num_rows=10000, schema=) -shards = ds.split(n=4, locality_hints=workers) -# -> [Dataset(num_blocks=13, num_rows=2500, schema=), -# Dataset(num_blocks=13, num_rows=2500, schema=), ...] +shards = ds.streaming_split(n=4, equal=True) +# -> [, +# , +# , +# ] ray.get([w.train.remote(s) for w, s in zip(workers, shards)]) # -> [2500, 2500, 2500, 2500] diff --git a/doc/source/data/doc_code/creating_datasets.py b/doc/source/data/doc_code/creating_datastreams.py similarity index 82% rename from doc/source/data/doc_code/creating_datasets.py rename to doc/source/data/doc_code/creating_datastreams.py index 05e79cdc0194..3b74578ed7dc 100644 --- a/doc/source/data/doc_code/creating_datasets.py +++ b/doc/source/data/doc_code/creating_datastreams.py @@ -1,9 +1,9 @@ # flake8: noqa # fmt: off -# __creating_datasets_import_begin__ +# __creating_datastreams_import_begin__ import ray -# __creating_datasets_import_end__ +# __creating_datastreams_import_end__ # fmt: on # For tfrecords @@ -11,9 +11,9 @@ # fmt: off # __gen_synth_int_range_begin__ -# Create a Dataset of Python objects. +# Create a Datastream of Python objects. ds = ray.data.range(10000) -# -> Dataset(num_blocks=200, num_rows=10000, schema=) +# -> Datastream(num_blocks=200, num_rows=10000, schema=) ds.take(5) # -> [0, 1, 2, 3, 4] @@ -22,9 +22,9 @@ # fmt: off # __gen_synth_tabular_range_begin__ -# Create a Dataset of Arrow records. +# Create a Datastream of Arrow records. ds = ray.data.range_table(10000) -# -> Dataset(num_blocks=200, num_rows=10000, schema={value: int64}) +# -> Datastream(num_blocks=200, num_rows=10000, schema={value: int64}) ds.take(5) # -> [{'value': 0}, {'value': 1}, {'value': 2}, {'value': 3}, {'value': 4}] @@ -33,9 +33,9 @@ # fmt: off # __gen_synth_tensor_range_begin__ -# Create a Dataset of tensors. +# Create a Datastream of tensors. ds = ray.data.range_tensor(100 * 64 * 64, shape=(64, 64)) -# -> Dataset( +# -> Datastream( # num_blocks=200, # num_rows=409600, # schema={__value__: numpy.ndarray(shape=(64, 64), dtype=int64)} @@ -61,9 +61,9 @@ # fmt: off # __from_items_begin__ -# Create a Dataset of tabular (Arrow) records. +# Create a Datastream of tabular (Arrow) records. ds = ray.data.from_items([{"col1": i, "col2": str(i)} for i in range(10000)]) -# -> Dataset(num_blocks=200, num_rows=10000, schema={col1: int64, col2: string}) +# -> MaterializedDatastream(num_blocks=200, num_rows=10000, schema={col1: int64, col2: string}) ds.show(3) # -> {'col1': 0, 'col2': '0'} @@ -76,10 +76,10 @@ # __from_pandas_begin__ import pandas as pd -# Create a tabular Dataset from a Pandas DataFrame. +# Create a tabular Datastream from a Pandas DataFrame. df = pd.DataFrame({"col1": list(range(10000)), "col2": list(map(str, range(10000)))}) ds = ray.data.from_pandas(df) -# -> Dataset(num_blocks=1, num_rows=10000, schema={col1: int64, col2: object}) +# -> MaterializedDatastream(num_blocks=1, num_rows=10000, schema={col1: int64, col2: object}) ds.show(3) # -> {'col1': 0, 'col2': '0'} @@ -100,9 +100,9 @@ pd.DataFrame({"col1": list(chunk), "col2": list(map(str, chunk))}) for chunk in chunks ] -# Create a tabular Dataset from multiple Pandas DataFrames. +# Create a tabular Datastream from multiple Pandas DataFrames. ds = ray.data.from_pandas(dfs) -# -> Dataset(num_blocks=10, num_rows=10000, schema={col1: int64, col2: object}) +# -> MaterializedDatastream(num_blocks=10, num_rows=10000, schema={col1: int64, col2: object}) ds.show(3) # -> {'col1': 0, 'col2': '0'} @@ -115,11 +115,11 @@ # __from_numpy_begin__ import numpy as np -# Create a tensor Dataset from a 3D NumPy ndarray. +# Create a tensor Datastream from a 3D NumPy ndarray. arr = np.ones((3, 4, 4)) # The outer dimension is treated as the row dimension. ds = ray.data.from_numpy(arr) -# -> Dataset( +# -> MaterializedDatastream( # num_blocks=1, # num_rows=3, # schema={__value__: numpy.ndarray(shape=(4, 4), dtype=double)} @@ -140,7 +140,7 @@ # fmt: off # __read_images_begin__ ds = ray.data.read_images("example://image-datasets/simple") -# -> Dataset(num_blocks=3, num_rows=3, +# -> Datastream(num_blocks=3, num_rows=3, # schema={image: numpy.ndarray(shape=(32, 32, 3), dtype=uint8)}) ds.take(1) @@ -158,11 +158,11 @@ # __from_numpy_mult_begin__ import numpy as np -# Create a tensor Dataset from multiple 3D NumPy ndarray. +# Create a tensor Datastream from multiple 3D NumPy ndarray. arrs = [np.random.rand(2, 4, 4) for _ in range(4)] # The outer dimension is treated as the row dimension. ds = ray.data.from_numpy(arrs) -# -> Dataset( +# -> MaterializedDatastream( # num_blocks=4, # num_rows=8, # schema={__value__: numpy.ndarray(shape=(4, 4), dtype=double)} @@ -184,10 +184,10 @@ # __from_arrow_begin__ import pyarrow as pa -# Create a tabular Dataset from an Arrow Table. +# Create a tabular Datastream from an Arrow Table. t = pa.table({"col1": list(range(10000)), "col2": list(map(str, range(10000)))}) ds = ray.data.from_arrow(t) -# -> Dataset(num_blocks=1, num_rows=10000, schema={col1: int64, col2: string}) +# -> MaterializedDatastream(num_blocks=1, num_rows=10000, schema={col1: int64, col2: string}) ds.show(3) # -> {'col1': 0, 'col2': '0'} @@ -208,9 +208,9 @@ pa.table({"col1": list(chunk), "col2": list(map(str, chunk))}) for chunk in chunks ] -# Create a tabular Dataset from multiple Arrow Tables. +# Create a tabular Datastream from multiple Arrow Tables. ds = ray.data.from_arrow(ts) -# -> Dataset(num_blocks=10, num_rows=10000, schema={col1: int64, col2: string}) +# -> MaterializedDatastream(num_blocks=10, num_rows=10000, schema={col1: int64, col2: string}) ds.show(3) # -> {'col1': 0, 'col2': '0'} @@ -226,9 +226,9 @@ df = pd.DataFrame({"col1": list(range(10000)), "col2": list(map(str, range(10000)))}) ddf = dd.from_pandas(df, npartitions=4) -# Create a tabular Dataset from a Dask DataFrame. +# Create a tabular Datastream from a Dask DataFrame. ds = ray.data.from_dask(ddf) -# -> Dataset(num_blocks=10, num_rows=10000, schema={col1: int64, col2: object}) +# -> MaterializedDatastream(num_blocks=10, num_rows=10000, schema={col1: int64, col2: object}) ds.show(3) # -> {'col1': 0, 'col2': '0'} @@ -243,9 +243,9 @@ df = pd.DataFrame({"col1": list(range(10000)), "col2": list(map(str, range(10000)))}) mdf = md.DataFrame(df) -# Create a tabular Dataset from a Modin DataFrame. +# Create a tabular Datastream from a Modin DataFrame. ds = ray.data.from_modin(mdf) -# -> Dataset(num_blocks=8, num_rows=10000, schema={col1: int64, col2: object}) +# -> MaterializedDatastream(num_blocks=8, num_rows=10000, schema={col1: int64, col2: object}) ds.show(3) # -> {'col1': 0, 'col2': '0'} @@ -256,9 +256,9 @@ # fmt: off # __read_parquet_begin__ -# Create a tabular Dataset by reading a Parquet file. +# Create a tabular Datastream by reading a Parquet file. ds = ray.data.read_parquet("example://iris.parquet") -# -> Dataset( +# -> Datastream( # num_blocks=1, # num_rows=150, # schema={ @@ -292,14 +292,14 @@ # __read_parquet_pushdown_begin__ import pyarrow as pa -# Create a tabular Dataset by reading a Parquet file, pushing column selection and row +# Create a tabular Datastream by reading a Parquet file, pushing column selection and row # filtering down to the file scan. ds = ray.data.read_parquet( "example://iris.parquet", columns=["sepal.length", "variety"], filter=pa.dataset.field("sepal.length") > 5.0, ).materialize() # Force a full read of the file. -# -> Dataset(num_blocks=1, num_rows=118, schema={sepal.length: double, variety: string}) +# -> Datastream(num_blocks=1, num_rows=118, schema={sepal.length: double, variety: string}) ds.show(2) # -> {'sepal.length': 5.1, 'variety': 'Setosa'} @@ -309,9 +309,9 @@ # fmt: off # __read_csv_begin__ -# Create a tabular Dataset by reading a CSV file. +# Create a tabular Datastream by reading a CSV file. ds = ray.data.read_csv("example://iris.csv") -# -> Dataset( +# -> Datastream( # num_blocks=1, # num_rows=150, # schema={ @@ -343,9 +343,9 @@ # fmt: off # __read_json_begin__ -# Create a tabular Dataset by reading a JSON file. +# Create a tabular Datastream by reading a JSON file. ds = ray.data.read_json("example://iris.json") -# -> Dataset( +# -> Datastream( # num_blocks=1, # num_rows=150, # schema={ @@ -377,9 +377,9 @@ # fmt: off # __read_numpy_begin__ -# Create a tensor Dataset by reading a NumPy file. +# Create a tensor Datastream by reading a NumPy file. ds = ray.data.read_numpy("example://mnist_subset.npy") -# -> Dataset( +# -> Datastream( # num_blocks=1, # num_rows=3, # schema={__value__: numpy.ndarray(shape=(28, 28), dtype=uint8)} @@ -392,9 +392,9 @@ # fmt: off # __read_text_begin__ -# Create a tabular Dataset by reading a text file. +# Create a tabular Datastream by reading a text file. ds = ray.data.read_text("example://sms_spam_collection_subset.txt") -# -> Dataset(num_blocks=1, num_rows=10, schema=) +# -> Datastream(num_blocks=1, num_rows=10, schema=) ds.show(3) # -> ham Go until jurong point, crazy.. Available only in bugis n great world la e @@ -411,12 +411,12 @@ from io import BytesIO import PIL.Image -# Create a tabular Dataset by reading a binary file. +# Create a tabular Datastream by reading a binary file. ds = ray.data.read_binary_files("example://mnist_subset_partitioned/0/1.png") -# -> Dataset(num_blocks=1, num_rows=1, schema=) +# -> Datastream(num_blocks=1, num_rows=1, schema=) ds = ds.map(lambda bytes_: np.asarray(PIL.Image.open(BytesIO(bytes_)).convert("L"))) -# -> Dataset( +# -> Datastream( # num_blocks=1, # num_rows=1, # schema={__value__: numpy.ndarray(shape=(28, 28), dtype=uint8)} @@ -434,9 +434,9 @@ # fmt: off # __read_parquet_s3_begin__ -# Create a tabular Dataset by reading a Parquet file from S3. +# Create a tabular Datastream by reading a Parquet file from S3. ds = ray.data.read_parquet("s3://anonymous@air-example-data/ursa-labs-taxi-data/by_year/2019/01/data.parquet") -# -> Dataset( +# -> Datastream( # num_blocks=1, # num_rows=7667792, # schema={ @@ -487,9 +487,9 @@ # fmt: off # __read_tfrecords_begin__ -# Create a tabular Dataset by reading a TFRecord file. +# Create a tabular Datastream by reading a TFRecord file. ds = ray.data.read_tfrecords("example://iris.tfrecords") -# Dataset( +# Datastream( # num_blocks=1, # num_rows=150, # schema={ diff --git a/doc/source/data/doc_code/creating_datasets_untested.py b/doc/source/data/doc_code/creating_datastreams_untested.py similarity index 79% rename from doc/source/data/doc_code/creating_datasets_untested.py rename to doc/source/data/doc_code/creating_datastreams_untested.py index fbc7923b3b98..87a30fe57d57 100644 --- a/doc/source/data/doc_code/creating_datasets_untested.py +++ b/doc/source/data/doc_code/creating_datastreams_untested.py @@ -7,14 +7,14 @@ # __from_spark_begin__ import raydp -spark = raydp.init_spark(app_name="Spark -> Datasets Example", +spark = raydp.init_spark(app_name="Spark -> Datastreams Example", num_executors=2, executor_cores=2, executor_memory="500MB") df = spark.createDataFrame([(i, str(i)) for i in range(10000)], ["col1", "col2"]) -# Create a tabular Dataset from a Spark DataFrame. -ds = ray.data.from_dask(df) -# -> Dataset(num_blocks=10, num_rows=10000, schema={col1: int64, col2: string}) +# Create a tabular Datastream from a Spark DataFrame. +ds = ray.data.from_spark(df) +# -> MaterializedDatastream(num_blocks=10, num_rows=10000, schema={col1: int64, col2: string}) ds.show(3) # -> {'col1': 0, 'col2': '0'} @@ -27,7 +27,7 @@ # __read_parquet_s3_with_fs_begin__ import pyarrow as pa -# Create a tabular Dataset by reading a Parquet file from a private S3 bucket. +# Create a tabular Datastream by reading a Parquet file from a private S3 bucket. # NOTE: This example is not runnable as-is; add in a path to your private bucket and the # required S3 credentials! ds = ray.data.read_parquet( @@ -43,7 +43,7 @@ # fmt: off # __read_parquet_hdfs_begin__ -# Create a tabular Dataset by reading a Parquet file from HDFS using HDFS connection +# Create a tabular Datastream by reading a Parquet file from HDFS using HDFS connection # automatically constructed based on the URI. # NOTE: This example is not runnable as-is; you'll need to point it at your HDFS # cluster/data. @@ -58,7 +58,7 @@ # __read_parquet_hdfs_with_fs_begin__ import pyarrow as pa -# Create a tabular Dataset by reading a Parquet file from HDFS, manually specifying a +# Create a tabular Datastream by reading a Parquet file from HDFS, manually specifying a # configured HDFS connection via a Pyarrow HDFSFileSystem instance. # NOTE: This example is not runnable as-is; you'll need to point it at your HDFS # cluster/data. @@ -75,7 +75,7 @@ # __read_parquet_gcs_begin__ import gcsfs -# Create a tabular Dataset by reading a Parquet file from GCS, passing the configured +# Create a tabular Datastream by reading a Parquet file from GCS, passing the configured # GCSFileSystem. # NOTE: This example is not runnable as-is; you need to point it at your GCS bucket # and configure your GCP project and credentials. @@ -99,7 +99,7 @@ # __read_parquet_az_begin__ import adlfs -# Create a tabular Dataset by reading a Parquet file from Azure Blob Storage, passing +# Create a tabular Datastream by reading a Parquet file from Azure Blob Storage, passing # the configured AzureBlobFileSystem. path = ( "az://nyctlc/yellow/puYear=2009/puMonth=1/" @@ -123,9 +123,9 @@ df = pd.DataFrame({"col1": list(range(10000)), "col2": list(map(str, range(10000)))}) mdf = md.DataFrame(df, num_partitions=8) -# Create a tabular Dataset from a Mars DataFrame. +# Create a tabular Datastream from a Mars DataFrame. ds = ray.data.from_mars(mdf) -# -> Dataset(num_blocks=8, num_rows=10000, schema={col1: int64, col2: object}) +# -> MaterializedDatastream(num_blocks=8, num_rows=10000, schema={col1: int64, col2: object}) ds.show(3) # -> {'col1': 0, 'col2': '0'} diff --git a/doc/source/data/doc_code/key_concepts.py b/doc/source/data/doc_code/key_concepts.py index b0935a743ed3..4769bf47601f 100644 --- a/doc/source/data/doc_code/key_concepts.py +++ b/doc/source/data/doc_code/key_concepts.py @@ -5,7 +5,7 @@ import ray from ray import tune -# This Dataset workload will use spare cluster resources for execution. +# This workload will use spare cluster resources for execution. def objective(*args): ray.data.range(10).show() @@ -13,7 +13,7 @@ def objective(*args): ray.init(num_cpus=4) # By setting `max_concurrent_trials=3`, this ensures the cluster will always -# have a sparse CPU for Datasets. Try setting `max_concurrent_trials=4` here, +# have a sparse CPU for Datastream. Try setting `max_concurrent_trials=4` here, # and notice that the experiment will appear to hang. tuner = tune.Tuner( tune.with_resources(objective, {"cpu": 1}), @@ -33,7 +33,7 @@ def objective(*args): import ray from ray import tune -# This Dataset workload will use reserved cluster resources for execution. +# This workload will use reserved cluster resources for execution. def objective(*args): ray.data.range(10).show() @@ -41,7 +41,7 @@ def objective(*args): ray.init(num_cpus=4) # This runs smoothly since _max_cpu_fraction_per_node is set to 0.8, effectively -# reserving 1 CPU for Datasets task execution. +# reserving 1 CPU for Datastream task execution. tuner = tune.Tuner( tune.with_resources(objective, tune.PlacementGroupFactory( [{"CPU": 1}], diff --git a/doc/source/data/doc_code/quick_start.py b/doc/source/data/doc_code/quick_start.py index fd660be1e605..33a457d6152f 100644 --- a/doc/source/data/doc_code/quick_start.py +++ b/doc/source/data/doc_code/quick_start.py @@ -4,9 +4,9 @@ # __create_from_python_begin__ import ray -# Create a Dataset of Python objects. +# Create a Datastream of Python objects. ds = ray.data.range(10000) -# -> Dataset(num_blocks=200, num_rows=10000, schema=) +# -> Datastream(num_blocks=200, num_rows=10000, schema=) ds.take(5) # -> [0, 1, 2, 3, 4] @@ -14,7 +14,7 @@ ds.schema() # -# Create a Dataset from Python objects, which are held as Arrow records. +# Create a Datastream from Python objects, which are held as Arrow records. ds = ray.data.from_items([ {"sepal.length": 5.1, "sepal.width": 3.5, "petal.length": 1.4, "petal.width": 0.2, "variety": "Setosa"}, @@ -23,7 +23,7 @@ {"sepal.length": 4.7, "sepal.width": 3.2, "petal.length": 1.3, "petal.width": 0.2, "variety": "Setosa"}, ]) -# Dataset(num_blocks=3, num_rows=3, +# Datastream(num_blocks=3, num_rows=3, # schema={sepal.length: float64, sepal.width: float64, # petal.length: float64, petal.width: float64, variety: object}) @@ -48,13 +48,13 @@ # __create_from_files_begin__ # Create from CSV. ds = ray.data.read_csv("s3://anonymous@air-example-data/iris.csv") -# Dataset(num_blocks=1, num_rows=150, +# Datastream(num_blocks=1, num_rows=150, # schema={sepal length (cm): double, sepal width (cm): double, # petal length (cm): double, petal width (cm): double, target: int64}) # Create from Parquet. ds = ray.data.read_parquet("s3://anonymous@air-example-data/iris.parquet") -# Dataset(num_blocks=1, num_rows=150, +# Datastream(num_blocks=1, num_rows=150, # schema={sepal.length: double, sepal.width: double, # petal.length: double, petal.width: double, variety: string}) @@ -67,7 +67,7 @@ # Create 10 blocks for parallelism. ds = ds.repartition(10) -# Dataset(num_blocks=10, num_rows=150, +# Datastream(num_blocks=10, num_rows=150, # schema={sepal.length: float64, sepal.width: float64, # petal.length: float64, petal.width: float64, variety: object}) @@ -76,7 +76,7 @@ def transform_batch(df: pandas.DataFrame) -> pandas.DataFrame: return df[(df["sepal.length"] < 5.5) & (df["petal.length"] > 3.5)] transformed_ds = ds.map_batches(transform_batch) -# Dataset(num_blocks=10, num_rows=3, +# Datastream(num_blocks=10, num_rows=3, # schema={sepal.length: float64, sepal.width: float64, # petal.length: float64, petal.width: float64, variety: object}) diff --git a/doc/source/data/doc_code/saving_datasets.py b/doc/source/data/doc_code/saving_datastreams.py similarity index 93% rename from doc/source/data/doc_code/saving_datasets.py rename to doc/source/data/doc_code/saving_datastreams.py index ece4e6240d75..80a5b30d4553 100644 --- a/doc/source/data/doc_code/saving_datasets.py +++ b/doc/source/data/doc_code/saving_datastreams.py @@ -10,7 +10,7 @@ import ray ds = ray.data.range(1000) -# -> Dataset(num_blocks=200, num_rows=1000, schema=) +# -> Datastream(num_blocks=200, num_rows=1000, schema=) ds.take(5) # -> [0, 1, 2, 3, 4] @@ -31,7 +31,7 @@ import ray ds = ray.data.range(1000) -# -> Dataset(num_blocks=200, num_rows=1000, schema=) +# -> Datastream(num_blocks=200, num_rows=1000, schema=) ds.take(5) # -> [0, 1, 2, 3, 4] @@ -52,7 +52,7 @@ import ray ds = ray.data.range(1000) -# -> Dataset(num_blocks=200, num_rows=1000, schema=) +# -> Datastream(num_blocks=200, num_rows=1000, schema=) ds.take(5) # -> [0, 1, 2, 3, 4] @@ -74,7 +74,7 @@ import numpy as np ds = ray.data.from_numpy(np.arange(1000)) -# -> Dataset( +# -> Datastream( # num_blocks=1, # num_rows=1000, # schema={value: }, @@ -105,7 +105,7 @@ {"some_int": 2, "some_float": 2.0, "some_bytestring": b"def"}, ] ) -# -> Dataset( +# -> Datastream( # num_blocks=2, # num_rows=2, # schema={some_int: int64, some_float: double, some_bytestring: binary} diff --git a/doc/source/data/doc_code/tensor.py b/doc/source/data/doc_code/tensor.py index e2a515e91c30..6d1a1d2606fc 100644 --- a/doc/source/data/doc_code/tensor.py +++ b/doc/source/data/doc_code/tensor.py @@ -6,9 +6,9 @@ # __create_range_begin__ import ray -# Create a Dataset of tensors. +# Create a Datastream of tensors. ds = ray.data.range_tensor(10000, shape=(64, 64)) -# -> Dataset(num_blocks=200, num_rows=10000, +# -> Datastream(num_blocks=200, num_rows=10000, # schema={__value__: numpy.ndarray(shape=(64, 64), dtype=int64)}) ds.take(2) @@ -34,7 +34,7 @@ import pandas as pd import numpy as np -# Start with a tabular base dataset. +# Start with a tabular base datastream. ds = ray.data.range_table(1000) # Create a single TensorArray column. @@ -53,7 +53,7 @@ def single_col_udf(batch: pd.DataFrame) -> pd.DataFrame: ds.map_batches(single_col_udf) ds.materialize() -# -> Dataset(num_blocks=17, num_rows=1000, +# -> Datastream(num_blocks=17, num_rows=1000, # schema={__value__: numpy.ndarray(shape=(128, 128, 3), dtype=int64)}) # __create_pandas_end__ @@ -75,7 +75,7 @@ def multi_col_udf(batch: pd.DataFrame) -> pd.DataFrame: ds.map_batches(multi_col_udf) ds.materialize() -# -> Dataset(num_blocks=17, num_rows=1000, +# -> Datastream(num_blocks=17, num_rows=1000, # schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=int64), # embed: numpy.ndarray(shape=(256,), dtype=uint8)}) # __create_pandas_2_end__ @@ -85,12 +85,12 @@ def multi_col_udf(batch: pd.DataFrame) -> pd.DataFrame: # From in-memory numpy data. ray.data.from_numpy(np.zeros((1000, 128, 128, 3), dtype=np.int64)) -# -> Dataset(num_blocks=1, num_rows=1000, +# -> Datastream(num_blocks=1, num_rows=1000, # schema={__value__: numpy.ndarray(shape=(128, 128, 3), dtype=int64)}) # From saved numpy files. ray.data.read_numpy("example://mnist_subset.npy") -# -> Dataset(num_blocks=1, num_rows=3, +# -> Datastream(num_blocks=1, num_rows=3, # schema={__value__: numpy.ndarray(shape=(28, 28), dtype=uint8)}) # __create_numpy_end__ @@ -99,7 +99,7 @@ def multi_col_udf(batch: pd.DataFrame) -> pd.DataFrame: # Reading previously saved Tensor data works out of the box. ds = ray.data.read_parquet("example://parquet_images_mini") -# -> Dataset(num_blocks=3, num_rows=3, +# -> Datastream(num_blocks=3, num_rows=3, # schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), # label: string}) @@ -140,12 +140,12 @@ def multi_col_udf(batch: pd.DataFrame) -> pd.DataFrame: "one": [1, 2, 3], "two": [tensor.tobytes() for tensor in arr]}) -# Write the dataset to Parquet. The tensor column will be written as an +# Write the datastream to Parquet. The tensor column will be written as an # array of opaque byte blobs. ds = ray.data.from_pandas([df]) ds.write_parquet(path) -# Read the Parquet files into a new Dataset, with the serialized tensors +# Read the Parquet files into a new Datastream, with the serialized tensors # automatically cast to our tensor column extension type. ds = ray.data.read_parquet( path, tensor_column_schema={"two": (np.int_, (2, 2, 2))}) @@ -172,7 +172,7 @@ def multi_col_udf(batch: pd.DataFrame) -> pd.DataFrame: "one": [1, 2, 3], "two": [pickle.dumps(tensor) for tensor in arr]}) -# Write the dataset to Parquet. The tensor column will be written as an +# Write the datastream to Parquet. The tensor column will be written as an # array of opaque byte blobs. ds = ray.data.from_pandas([df]) ds.write_parquet(path) @@ -184,7 +184,7 @@ def cast_udf(block: pa.Table) -> pa.Table: block["two"] = TensorArray([pickle.loads(a) for a in block["two"]]) return pa.Table.from_pandas(block) -# Read the Parquet files into a new Dataset, applying the casting UDF +# Read the Parquet files into a new Datastream, applying the casting UDF # on-the-fly within the underlying read tasks. ds = ray.data.read_parquet(path, _block_udf=cast_udf) @@ -197,7 +197,7 @@ def cast_udf(block: pa.Table) -> pa.Table: # __create_images_begin__ ds = ray.data.read_images("example://image-datasets/simple") -# -> Dataset(num_blocks=3, num_rows=3, +# -> Datastream(num_blocks=3, num_rows=3, # schema={__value__: numpy.ndarray(shape=(32, 32, 3), dtype=uint8)}) ds.take(1) @@ -214,9 +214,9 @@ def cast_udf(block: pa.Table) -> pa.Table: # __consume_native_begin__ import ray -# Read a single-column example dataset. +# Read a single-column example datastream. ds = ray.data.read_numpy("example://mnist_subset.npy") -# -> Dataset(num_blocks=1, num_rows=3, +# -> Datastream(num_blocks=1, num_rows=3, # schema={__value__: numpy.ndarray(shape=(28, 28), dtype=uint8)}) def add_one(batch: np.ndarray) -> np.ndarray: @@ -245,9 +245,9 @@ def add_one(batch: np.ndarray) -> np.ndarray: # __consume_native_2_begin__ import ray -# Read a multi-column example dataset. +# Read a multi-column example datastream. ds = ray.data.read_parquet("example://parquet_images_mini") -# -> Dataset(num_blocks=3, num_rows=3, +# -> Datastream(num_blocks=3, num_rows=3, # schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), # label: string}) @@ -269,9 +269,9 @@ def add_one(batch: pd.DataFrame) -> pd.DataFrame: # __consume_pandas_begin__ import ray -# Read a single-column example dataset. +# Read a single-column example datastream. ds = ray.data.read_numpy("example://mnist_subset.npy") -# -> Dataset(num_blocks=1, num_rows=3, +# -> Datastream(num_blocks=1, num_rows=3, # schema={__value__: numpy.ndarray(shape=(28, 28), dtype=uint8)}) def add_one(batch: pd.DataFrame) -> pd.DataFrame: @@ -292,9 +292,9 @@ def add_one(batch: pd.DataFrame) -> pd.DataFrame: # __consume_pandas_2_begin__ import ray -# Read a multi-column example dataset. +# Read a multi-column example datastream. ds = ray.data.read_parquet("example://parquet_images_mini") -# -> Dataset(num_blocks=3, num_rows=3, +# -> Datastream(num_blocks=3, num_rows=3, # schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), # label: string}) @@ -319,9 +319,9 @@ def add_one(batch: pd.DataFrame) -> pd.DataFrame: import pyarrow -# Read a single-column example dataset. +# Read a single-column example datastream. ds = ray.data.read_numpy("example://mnist_subset.npy") -# -> Dataset(num_blocks=1, num_rows=3, +# -> Datastream(num_blocks=1, num_rows=3, # schema={__value__: numpy.ndarray(shape=(28, 28), dtype=uint8)}) def add_one(batch: pyarrow.Table) -> pyarrow.Table: @@ -351,9 +351,9 @@ def add_one(batch: pyarrow.Table) -> pyarrow.Table: # __consume_pyarrow_end__ # __consume_pyarrow_2_begin__ -# Read a multi-column example dataset. +# Read a multi-column example datastream. ds = ray.data.read_parquet("example://parquet_images_mini") -# -> Dataset(num_blocks=3, num_rows=3, +# -> Datastream(num_blocks=3, num_rows=3, # schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), label: object}) def add_one(batch: pyarrow.Table) -> pyarrow.Table: @@ -387,9 +387,9 @@ def add_one(batch: pyarrow.Table) -> pyarrow.Table: # __consume_numpy_begin__ import ray -# Read a single-column example dataset. +# Read a single-column example datastream. ds = ray.data.read_numpy("example://mnist_subset.npy") -# -> Dataset(num_blocks=1, num_rows=3, +# -> Datastream(num_blocks=1, num_rows=3, # schema={__value__: numpy.ndarray(shape=(28, 28), dtype=uint8)}) def add_one(batch: np.ndarray) -> np.ndarray: @@ -417,9 +417,9 @@ def add_one(batch: np.ndarray) -> np.ndarray: # __consume_numpy_end__ # __consume_numpy_2_begin__ -# Read a multi-column example dataset. +# Read a multi-column example datastream. ds = ray.data.read_parquet("example://parquet_images_mini") -# -> Dataset(num_blocks=3, num_rows=3, +# -> Datastream(num_blocks=3, num_rows=3, # schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), label: object}) def add_one(batch: Dict[str, Any]) -> Dict[str, Any]: @@ -453,12 +453,12 @@ def add_one(batch: Dict[str, Any]) -> Dict[str, Any]: shutil.rmtree("/tmp/some_path") # __write_1_begin__ -# Read a multi-column example dataset. +# Read a multi-column example datastream. ds = ray.data.read_parquet("example://parquet_images_mini") -# -> Dataset(num_blocks=3, num_rows=3, +# -> Datastream(num_blocks=3, num_rows=3, # schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), label: object}) -# You can write the dataset to Parquet. +# You can write the datastream to Parquet. ds.write_parquet("/tmp/some_path") # And you can read it back. @@ -472,12 +472,12 @@ def add_one(batch: Dict[str, Any]) -> Dict[str, Any]: shutil.rmtree("/tmp/some_path") # __write_2_begin__ -# Read a single-column example dataset. +# Read a single-column example datastream. ds = ray.data.read_numpy("example://mnist_subset.npy") -# -> Dataset(num_blocks=1, num_rows=3, +# -> Datastream(num_blocks=1, num_rows=3, # schema={__value__: numpy.ndarray(shape=(28, 28), dtype=uint8)}) -# You can write the dataset to Parquet. +# You can write the datastream to Parquet. ds.write_numpy("/tmp/some_path") # And you can read it back. @@ -488,11 +488,11 @@ def add_one(batch: Dict[str, Any]) -> Dict[str, Any]: # fmt: off # __create_variable_shaped_tensors_begin___ -# Create a Dataset of variable-shaped tensors. +# Create a Datastream of variable-shaped tensors. ragged_array = np.array([np.ones((2, 2)), np.ones((3, 3))], dtype=object) df = pd.DataFrame({"feature": ragged_array, "label": [1, 1]}) ds = ray.data.from_pandas([df, df]) -# -> Dataset(num_blocks=2, num_rows=4, +# -> Datastream(num_blocks=2, num_rows=4, # schema={feature: numpy.ndarray(shape=(None, None), dtype=float64), # label: int64}) @@ -508,7 +508,7 @@ def add_one(batch: Dict[str, Any]) -> Dict[str, Any]: # fmt: off # __tf_variable_shaped_tensors_begin___ -# Convert Ray Dataset to a TensorFlow Dataset. +# Convert Datastream to a TensorFlow Dataset. tf_ds = ds.to_tf( batch_size=2, feature_columns="feature", diff --git a/doc/source/data/doc_code/transforming_datasets.py b/doc/source/data/doc_code/transforming_datastreams.py similarity index 89% rename from doc/source/data/doc_code/transforming_datasets.py rename to doc/source/data/doc_code/transforming_datastreams.py index 648735f0de20..f4345f9dc87e 100644 --- a/doc/source/data/doc_code/transforming_datasets.py +++ b/doc/source/data/doc_code/transforming_datastreams.py @@ -1,15 +1,15 @@ # flake8: noqa # fmt: off -# __dataset_transformation_begin__ +# __datastream_transformation_begin__ import ray import pandas -# Create a dataset from file with Iris data. +# Create a datastream from file with Iris data. # Tip: "example://" is a convenient protocol to access the # python/ray/data/examples/data directory. ds = ray.data.read_csv("example://iris.csv") -# Dataset(num_blocks=1, num_rows=150, +# Datastream(num_blocks=1, num_rows=150, # schema={sepal.length: float64, sepal.width: float64, # petal.length: float64, petal.width: float64, variety: object}) ds.show(3) @@ -20,10 +20,10 @@ # -> {'sepal.length': 4.7, 'sepal.width': 3.2, # 'petal.length': 1.3, 'petal.width': 0.2, 'variety': 'Setosa'} -# Repartition the dataset to 5 blocks. +# Repartition the datastream to 5 blocks. ds = ds.repartition(5) # -> Repartition -# +- Dataset(num_blocks=1, num_rows=150, +# +- Datastream(num_blocks=1, num_rows=150, # schema={sepal.length: float64, sepal.width: float64, # petal.length: float64, petal.width: float64, variety: object}) @@ -31,7 +31,7 @@ def transform_batch(df: pandas.DataFrame) -> pandas.DataFrame: return df[(df["sepal.length"] < 5.5) & (df["petal.length"] > 3.5)] -# Map processing the dataset. +# Map processing the datastream. ds.map_batches(transform_batch).show() # -> {'sepal.length': 5.2, 'sepal.width': 2.7, # 'petal.length': 3.9, 'petal.width': 1.4, 'variety': 'Versicolor'} @@ -40,16 +40,12 @@ def transform_batch(df: pandas.DataFrame) -> pandas.DataFrame: # -> {'sepal.length': 4.9, 'sepal.width': 2.5, # 'petal.length': 4.5, 'petal.width': 1.7, 'variety': 'Virginica'} -# Split the dataset into 2 datasets -ds.split(2) -# -> [Dataset(num_blocks=3, num_rows=90, -# schema={sepal.length: double, sepal.width: double, -# petal.length: double, petal.width: double, variety: string}), -# Dataset(num_blocks=2, num_rows=60, -# schema={sepal.length: double, sepal.width: double, -# petal.length: double, petal.width: double, variety: string})] +# Split the datastream into 2 disjoint iterators. +ds.streaming_split(2) +# -> [, +# ] -# Sort the dataset by sepal.length. +# Sort the datastream by sepal.length. ds = ds.sort("sepal.length") ds.show(3) # -> {'sepal.length': 4.3, 'sepal.width': 3.0, @@ -59,7 +55,7 @@ def transform_batch(df: pandas.DataFrame) -> pandas.DataFrame: # -> {'sepal.length': 4.4, 'sepal.width': 3.0, # 'petal.length': 1.3, 'petal.width': 0.2, 'variety': 'Setosa'} -# Shuffle the dataset. +# Shuffle the datastream. ds = ds.random_shuffle() ds.show(3) # -> {'sepal.length': 6.7, 'sepal.width': 3.1, @@ -74,7 +70,7 @@ def transform_batch(df: pandas.DataFrame) -> pandas.DataFrame: # -> {'variety': 'Setosa', 'count()': 50} # -> {'variety': 'Versicolor', 'count()': 50} # -> {'variety': 'Virginica', 'count()': 50} -# __dataset_transformation_end__ +# __datastream_transformation_end__ # fmt: on # fmt: off @@ -82,7 +78,7 @@ def transform_batch(df: pandas.DataFrame) -> pandas.DataFrame: import ray import pandas as pd -# Load dataset. +# Load datastream. ds = ray.data.read_csv("example://iris.csv") print(ds.default_batch_format()) # @@ -112,7 +108,7 @@ def pandas_transform(df_batch: pd.DataFrame) -> pd.DataFrame: import ray import numpy as np -# Load dataset. +# Load datastream. ds = ray.data.range_tensor(1000, shape=(2, 2)) print(ds.default_batch_format()) # @@ -136,7 +132,7 @@ def tensor_transform(arr: np.ndarray) -> np.ndarray: # __writing_default_udfs_list_begin__ import ray -# Load dataset. +# Load datastream. ds = ray.data.range(1000) print(ds.default_batch_format()) # @@ -159,7 +155,7 @@ def list_transform(list) -> list: import ray import pandas as pd -# Load dataset. +# Load datastream. ds = ray.data.read_csv("example://iris.csv") # UDF as a function on Pandas DataFrame batches. @@ -186,7 +182,7 @@ def pandas_transform(df: pd.DataFrame) -> pd.DataFrame: import pyarrow as pa import pyarrow.compute as pac -# Load dataset. +# Load datastream. ds = ray.data.read_csv("example://iris.csv") # UDF as a function on Arrow Table batches. @@ -211,7 +207,7 @@ def pyarrow_transform(batch: pa.Table) -> pa.Table: import ray import numpy as np -# Load dataset. +# Load datastream. ds = ray.data.read_numpy("example://mnist_subset.npy") # UDF as a function on NumPy ndarray batches. @@ -227,7 +223,7 @@ def normalize(arr: np.ndarray) -> np.ndarray: ds = ds.map_batches(normalize, batch_format="numpy") # -> MapBatches(normalize) -# +- Dataset(num_blocks=1, +# +- Datastream(num_blocks=1, # num_rows=3, # schema={__value__: numpy.ndarray(shape=(28, 28), dtype=uint8)} # ) @@ -238,7 +234,7 @@ def normalize(arr: np.ndarray) -> np.ndarray: # __writing_callable_classes_udfs_begin__ import ray -# Load dataset. +# Load datastream. ds = ray.data.read_csv("example://iris.csv") # UDF as a function on Pandas DataFrame batches. @@ -266,7 +262,7 @@ def __call__(self, df: pd.DataFrame) -> pd.DataFrame: import ray from typing import Iterator -# Load dataset. +# Load datastream. ds = ray.data.read_csv("example://iris.csv") # UDF to repeat the dataframe 100 times, in chunks of 20. @@ -286,9 +282,9 @@ def repeat_dataframe(df: pd.DataFrame) -> Iterator[pd.DataFrame]: import pandas as pd from typing import List -# Load dataset. +# Load datastream. ds = ray.data.from_items(["test", "string", "teststring"]) -# -> Dataset(num_blocks=1, num_rows=3, schema=) +# -> Datastream(num_blocks=1, num_rows=3, schema=) # Convert to Pandas. def convert_to_pandas(text: List[str]) -> pd.DataFrame: @@ -296,14 +292,14 @@ def convert_to_pandas(text: List[str]) -> pd.DataFrame: ds = ds.map_batches(convert_to_pandas) # -> MapBatches(convert_to_pandas) -# +- Dataset(num_blocks=3, num_rows=3, schema=) +# +- Datastream(num_blocks=3, num_rows=3, schema=) ds.show(2) # -> {'text': 'test'} # -> {'text': 'string'} print(ds) -# -> Dataset(num_blocks=3, num_rows=3, schema={text: string}) +# -> Datastream(num_blocks=3, num_rows=3, schema={text: string}) # __writing_pandas_out_udfs_end__ # fmt: on @@ -313,9 +309,9 @@ def convert_to_pandas(text: List[str]) -> pd.DataFrame: import pyarrow as pa from typing import List -# Load dataset. +# Load datastream. ds = ray.data.from_items(["test", "string", "teststring"]) -# -> Dataset(num_blocks=1, num_rows=3, schema=) +# -> Datastream(num_blocks=1, num_rows=3, schema=) # Convert to Arrow. def convert_to_arrow(text: List[str]) -> pa.Table: @@ -323,14 +319,14 @@ def convert_to_arrow(text: List[str]) -> pa.Table: ds = ds.map_batches(convert_to_arrow) # -> MapBatches(convert_to_arrow) -# +- Dataset(num_blocks=1, num_rows=3, schema=) +# +- Datastream(num_blocks=1, num_rows=3, schema=) ds.show(2) # -> {'text': 'test'} # -> {'text': 'string'} print(ds) -# -> Dataset(num_blocks=3, num_rows=3, schema={text: string}) +# -> Datastream(num_blocks=3, num_rows=3, schema={text: string}) # __writing_arrow_out_udfs_end__ # fmt: on @@ -341,9 +337,9 @@ def convert_to_arrow(text: List[str]) -> pa.Table: import numpy as np from typing import Dict -# Load dataset. +# Load datastream. ds = ray.data.read_csv("example://iris.csv") -# -> Dataset( +# -> Datastream( # num_blocks=1, # num_rows=150, # schema={ @@ -361,7 +357,7 @@ def convert_to_numpy(df: pd.DataFrame) -> np.ndarray: ds = ds.map_batches(convert_to_numpy) # -> MapBatches(convert_to_numpy) -# +- Dataset( +# +- Datastream( # num_blocks=1, # num_rows=150, # schema={ @@ -386,9 +382,9 @@ def convert_to_numpy(df: pd.DataFrame) -> np.ndarray: import numpy as np from typing import Dict -# Load dataset. +# Load datastream. ds = ray.data.read_csv("example://iris.csv") -# -> Dataset( +# -> Datastream( # num_blocks=1, # num_rows=150, # schema={ @@ -410,7 +406,7 @@ def convert_to_numpy(df: pd.DataFrame) -> Dict[str, np.ndarray]: ds = ds.map_batches(convert_to_numpy) # -> MapBatches(convert_to_numpy) -# +- Dataset( +# +- Datastream( # num_blocks=1, # num_rows=150, # schema={ @@ -434,9 +430,9 @@ def convert_to_numpy(df: pd.DataFrame) -> Dict[str, np.ndarray]: import pandas as pd from typing import List -# Load dataset. +# Load datastream. ds = ray.data.read_csv("example://iris.csv") -# -> Dataset( +# -> Datastream( # num_blocks=1, # num_rows=150, # schema={ @@ -454,7 +450,7 @@ def convert_to_list(df: pd.DataFrame) -> List[dict]: ds = ds.map_batches(convert_to_list) # -> MapBatches(convert_to_list) -# +- Dataset( +# +- Datastream( # num_blocks=1, # num_rows=150, # schema={ @@ -480,9 +476,9 @@ def convert_to_list(df: pd.DataFrame) -> List[dict]: import pandas as pd from typing import Dict -# Load dataset. +# Load datastream. ds = ray.data.range(10) -# -> Dataset(num_blocks=10, num_rows=10, schema=) +# -> Datastream(num_blocks=10, num_rows=10, schema=) # Convert row to dict. def row_to_dict(row: int) -> Dict[str, int]: @@ -490,7 +486,7 @@ def row_to_dict(row: int) -> Dict[str, int]: ds = ds.map(row_to_dict) # -> Map -# +- Dataset(num_blocks=10, num_rows=10, schema=) +# +- Datastream(num_blocks=10, num_rows=10, schema=) ds.show(2) # -> {'foo': 0} @@ -505,9 +501,9 @@ def row_to_dict(row: int) -> Dict[str, int]: import pandas as pd from typing import Dict -# Load dataset. +# Load datastream. ds = ray.data.read_csv("example://iris.csv") -# -> Dataset( +# -> Datastream( # num_blocks=1, # num_rows=150, # schema={ @@ -527,7 +523,7 @@ def map_row(row: TableRow) -> TableRow: ds = ds.map(map_row) # -> Map -# +- Dataset( +# +- Datastream( # num_blocks=1, # num_rows=150, # schema={ @@ -553,9 +549,9 @@ def map_row(row: TableRow) -> TableRow: import numpy as np from typing import Dict -# Load dataset. +# Load datastream. ds = ray.data.range(10) -# -> Dataset(num_blocks=10, num_rows=10, schema=) +# -> Datastream(num_blocks=10, num_rows=10, schema=) # Convert row to NumPy ndarray. def row_to_numpy(row: int) -> np.ndarray: @@ -563,7 +559,7 @@ def row_to_numpy(row: int) -> np.ndarray: ds = ds.map(row_to_numpy) # -> Map -# +- Dataset(num_blocks=10, num_rows=10, schema=) +# +- Datastream(num_blocks=10, num_rows=10, schema=) ds.show(2) # -> [[0 0] @@ -579,9 +575,9 @@ def row_to_numpy(row: int) -> np.ndarray: from ray.data.row import TableRow from typing import List -# Load dataset. +# Load datastream. ds = ray.data.read_csv("example://iris.csv") -# -> Dataset( +# -> Datastream( # num_blocks=1, # num_rows=150, # schema={ @@ -599,7 +595,7 @@ def map_row(row: TableRow) -> tuple: ds = ds.map(map_row) # -> Map -# +- Dataset( +# +- Datastream( # num_blocks=1, # num_rows=150, # schema={ @@ -624,7 +620,7 @@ def map_row(row: TableRow) -> tuple: import ray import pandas as pd -# Load dataset. +# Load datastream. ds = ray.data.read_csv("example://iris.csv") # UDF as a function on Pandas DataFrame batches. @@ -640,7 +636,7 @@ def pandas_transform(df: pd.DataFrame) -> pd.DataFrame: # Have each batch that pandas_transform receives contain 10 rows. ds = ds.map_batches(pandas_transform, batch_size=10) # -> MapBatches(pandas_transform) -# +- Dataset( +# +- Datastream( # num_blocks=1, # num_rows=150, # schema={ @@ -661,7 +657,7 @@ def pandas_transform(df: pd.DataFrame) -> pd.DataFrame: # fmt: on # fmt: off -# __dataset_compute_strategy_begin__ +# __datastream_compute_strategy_begin__ import ray import pandas import numpy @@ -689,8 +685,8 @@ def __call__(self, batch: pandas.DataFrame) -> pandas.DataFrame: # Batch inference processing with Ray tasks (the default compute strategy). predicted = ds.map_batches(predict_iris) -# Batch inference processing with Ray actors. Autoscale the actors between 3 and 10. +# Batch inference processing with Ray actors (pool of size 5). predicted = ds.map_batches( - IrisInferModel, compute=ActorPoolStrategy(min_size=3, max_size=10), batch_size=10) -# __dataset_compute_strategy_end__ + IrisInferModel, compute=ActorPoolStrategy(size=5), batch_size=10) +# __datastream_compute_strategy_end__ # fmt: on diff --git a/doc/source/data/examples/advanced-pipelines.rst b/doc/source/data/examples/advanced-pipelines.rst deleted file mode 100644 index 2b41a66d18e6..000000000000 --- a/doc/source/data/examples/advanced-pipelines.rst +++ /dev/null @@ -1,112 +0,0 @@ -.. _data_pipeline_usage: - --------------------------- -Advanced Pipeline Examples --------------------------- - -This page covers more advanced examples for dataset pipelines. - -.. _dataset-pipeline-per-epoch-shuffle: - -Pre-repeat vs post-repeat transforms -==================================== - -Transformations prior to the call to ``.repeat()`` will be cached. However, note that the initial read will not be cached unless there is a subsequent transformation or ``.materialize()`` call. Transformations made to the DatasetPipeline after the repeat will always be executed once for each repetition of the Dataset. - -For example, in the following pipeline, the ``map(func)`` transformation only occurs once. However, the random shuffle is applied to each repetition in the pipeline. However, if we omitted the map transformation, then the pipeline would re-read from the base data on each repetition. - -.. note:: - Global per-epoch shuffling is an expensive operation that will slow down your ML - ingest pipeline, prevents you from using a fully-streaming ML ingest pipeline, and - can cause large increases in memory utilization and spilling to disk; only use - global per-epoch shuffling if your model benefits from it! If your model doesn't - benefit from global per-epoch shuffling and/or you run into performance or stability - issues, you should try out windowed or local per-epoch shuffling. - -**Code**: - -.. code-block:: python - - # Create a pipeline that loops over its source dataset indefinitely. - pipe: DatasetPipeline = ray.data \ - .read_datasource(...) \ - .map(func) \ - .repeat() \ - .random_shuffle_each_window() - - @ray.remote(num_gpus=1) - def train_func(pipe: DatasetPipeline): - model = MyModel() - for batch in pipe.iter_torch_batches(): - model.fit(batch) - - # Read from the pipeline in a remote training function. - ray.get(train_func.remote(pipe)) - - -**Pipeline**: - -.. image:: ../images/dataset-repeat-1.svg - -.. important:: - - Result caching only applies if there are *transformation* stages prior to the pipelining operation. If you ``repeat()`` or ``window()`` a Dataset right after the read call (e.g., ``ray.data.read_parquet(...).repeat()``), then the read will still be re-executed on each repetition. This optimization saves memory, at the cost of repeated reads from the datasource. To force result caching in all cases, use ``.materialize().repeat()``. - -Changing Pipeline Structure -=========================== - -Sometimes, you may want to change the structure of an existing pipeline. For example, after generating a pipeline with ``ds.window(k)``, you may want to repeat that windowed pipeline ``n`` times. This can be done with ``ds.window(k).repeat(n)``. As another example, suppose you have a repeating pipeline generated with ``ds.repeat(n)``. The windowing of that pipeline can be changed with ``ds.repeat(n).rewindow(k)``. Note the subtle difference in the two examples: the former is repeating a windowed pipeline that has a base window size of ``k``, while the latter is re-windowing a pipeline of initial window size of ``ds.num_blocks()``. The latter may produce windows that span multiple copies of the same original data if ``preserve_epoch=False`` is set: - -.. code-block:: python - - # Window followed by repeat. - ray.data.from_items([0, 1, 2, 3, 4]) \ - .window(blocks_per_window=2) \ - .repeat(2) \ - .show_windows() - # -> - # ------ Epoch 0 ------ - # === Window 0 === - # 0 - # 1 - # === Window 1 === - # 2 - # 3 - # === Window 2 === - # 4 - # ------ Epoch 1 ------ - # === Window 3 === - # 0 - # 1 - # === Window 4 === - # 2 - # 3 - # === Window 5 === - # 4 - - # Repeat followed by window. Since preserve_epoch=True, at epoch boundaries - # windows may be smaller than the target size. If it was set to False, all - # windows except the last would be the target size. - ray.data.from_items([0, 1, 2, 3, 4]) \ - .repeat(2) \ - .rewindow(blocks_per_window=2, preserve_epoch=True) \ - .show_windows() - # -> - # ------ Epoch 0 ------ - # === Window 0 === - # 0 - # 1 - # === Window 1 === - # 2 - # 3 - # === Window 2 === - # 4 - # ------ Epoch 1 ------ - # === Window 3 === - # 0 - # 1 - # === Window 4 === - # 2 - # 3 - # === Window 5 === - # 4 diff --git a/doc/source/data/examples/batch_training.ipynb b/doc/source/data/examples/batch_training.ipynb index c873dbd716e4..67c34134bac7 100644 --- a/doc/source/data/examples/batch_training.ipynb +++ b/doc/source/data/examples/batch_training.ipynb @@ -6,7 +6,7 @@ "source": [ "(mmt-datasets)=\n", "\n", - "# Batch Training with Ray Datasets" + "# Batch Training with Ray Data" ] }, { @@ -15,7 +15,7 @@ "source": [ "**Batch training** and tuning are common tasks in simple machine learning use-cases such as time series forecasting. They require fitting of simple models on data batches corresponding to different locations, products, etc. Batch training can take less time to process all the data at once, but only if those batches can run in parallel!\n", "\n", - "This notebook showcases how to conduct batch training regression algorithms from [XGBoost](https://docs.ray.io/en/latest/tune/examples/tune-xgboost.html) and [Scikit-learn](https://docs.ray.io/en/latest/ray-more-libs/joblib.html) with **[Ray Datasets](https://docs.ray.io/en/latest/data/dataset.html)**. **XGBoost** is a popular open-source library used for regression and classification. **Scikit-learn** is a popular open-source library with a vast assortment of well-known ML algorithms.\n", + "This notebook showcases how to conduct batch training regression algorithms from [XGBoost](https://docs.ray.io/en/latest/tune/examples/tune-xgboost.html) and [Scikit-learn](https://docs.ray.io/en/latest/ray-more-libs/joblib.html) with **[Ray Data](https://docs.ray.io/en/latest/data/data.html)**. **XGBoost** is a popular open-source library used for regression and classification. **Scikit-learn** is a popular open-source library with a vast assortment of well-known ML algorithms.\n", "\n", "```{tip}\n", "The workload showcased in this notebook can be expressed using different Ray components, such as Ray Data, Ray Tune and Ray Core.\n", @@ -37,11 +37,11 @@ "# Contents\n", "\n", "In this this tutorial, you will learn about:\n", - " 1. [Creating a Ray Dataset](#create_ds)\n", - " 2. [Filtering a Ray Dataset on Read](#filter_ds)\n", - " 3. [Inspecting a Ray Dataset](#inspect_ds)\n", - " 4. [Transforming a Ray Dataset in parallel](#transform_ds)\n", - " 5. [Batch training with Ray Datasets in parallel](#batch_train_ds)\n", + " 1. [Creating a Datastream](#create_ds)\n", + " 2. [Filtering a Datastream on Read](#filter_ds)\n", + " 3. [Inspecting a Datastream](#inspect_ds)\n", + " 4. [Transforming a Datastream in parallel](#transform_ds)\n", + " 5. [Batch training with Ray Data in parallel](#batch_train_ds)\n", " 6. [Load a saved model and perform batch prediction](#load_model)\n", "\n", "# Walkthrough\n", @@ -80,7 +80,7 @@ "import pyarrow.dataset as pds\n", "\n", "print(f\"pyarrow: {pyarrow.__version__}\")\n", - "from ray.data import Dataset" + "from ray.data import Datastream" ] }, { @@ -201,7 +201,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Creating Ray Dataset " + "## Creating a Datastream " ] }, { @@ -209,10 +209,10 @@ "metadata": {}, "source": [ "```{tip}\n", - "Ray Datasets uses PyArrow dataset and table for reading or writing large parquet files. Its native multithreaded C++ adpater is faster than pandas `read_parquet`, even using `engine='pyarrow'`. For more details see [Ray Datasets User Guide](https://docs.ray.io/en/latest/data/user-guide.html).\n", + "Ray Data uses PyArrow dataset and table for reading or writing large parquet files. Its native multithreaded C++ adpater is faster than pandas `read_parquet`, even using `engine='pyarrow'`. For more details see [Ray Data User Guide](https://docs.ray.io/en/latest/data/user-guide.html).\n", "```\n", "\n", - "[Ray Datasets](datasets) are the standard way to load and exchange data in Ray libraries and applications. We will use the [Ray Dataset APIs](dataset-api) to read the data and quickly inspect it.\n", + "[Ray Data](data) is the standard way to load and exchange data in Ray libraries and applications. We will use the [Ray Data APIs](data-api) to read the data and quickly inspect it.\n", "\n", "First, we will define some global variables we will use throughout the notebook, such as the list of S3 links to the files making up the dataset and the possible location IDs." ] @@ -289,7 +289,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Filtering a Ray Dataset on Read \n", + "### Filtering a Datastream on Read \n", "\n", "Normally there is some last-mile data processing required before training. Let's just assume we know the data processing steps are:\n", "- Drop negative trip distances, 0 fares, 0 passengers.\n", @@ -300,10 +300,10 @@ "Instead of blindly reading all the data, it would be better if we only read the data we needed. This is similar concept to SQL `SELECT only rows, columns you need` vs `SELECT *`.\n", "\n", "```{tip}\n", - "Best practice is to filter as much as you can directly in the Ray Dataset `read_parquet()`.\n", + "Best practice is to filter as much as you can directly in the Datastream `read_parquet()`.\n", "```\n", "\n", - "Note that Ray Datasets' Parquet reader supports projection (column selection) and row filter pushdown, where we can push the above column selection and the row-based filter to the Parquet read. If we specify column selection at Parquet read time, the unselected columns won't even be read from disk. This can save a lot of memory, especially with big datasets, and allow us to avoid OOM issues.\n", + "Note that Ray Data' Parquet reader supports projection (column selection) and row filter pushdown, where we can push the above column selection and the row-based filter to the Parquet read. If we specify column selection at Parquet read time, the unselected columns won't even be read from disk. This can save a lot of memory, especially with big datasets, and allow us to avoid OOM issues.\n", "\n", "The row-based filter is specified via [Arrow's dataset field expressions](https://arrow.apache.org/docs/6.0/python/generated/pyarrow.dataset.Expression.html#pyarrow.dataset.Expression). \n" ] @@ -314,7 +314,7 @@ "metadata": {}, "outputs": [], "source": [ - "def pushdown_read_data(files_list: list, sample_ids: list) -> Dataset:\n", + "def pushdown_read_data(files_list: list, sample_ids: list) -> Datastream:\n", " start = time.time()\n", "\n", " filter_expr = (\n", @@ -368,11 +368,11 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Inspecting a Ray Dataset \n", + "### Inspecting a Datastream \n", "\n", - "Let's get some basic statistics about our newly created Ray Dataset.\n", + "Let's get some basic statistics about our newly created Datastream.\n", "\n", - "As our Ray Dataset is backed by Parquet, we can obtain the number of rows from the metadata without triggering a full data read.\n" + "As our Datastream is backed by Parquet, we can obtain the number of rows from the metadata without triggering a full data read.\n" ] }, { @@ -396,7 +396,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Similarly, we can obtain the Dataset size (in bytes) from the metadata.\n" + "Similarly, we can obtain the Datastream size (in bytes) from the metadata.\n" ] }, { @@ -456,12 +456,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Transforming a Ray Dataset in parallel using custom functions \n", + "### Transforming a Datastream in parallel using custom functions \n", "\n", - "Ray Datasets allows you to specify custom data transform functions. These [user defined functions (UDFs)](transforming_datasets) can be called using `Dataset.map_batches(my_function)`. The transformation will be conducted in parallel for each data batch.\n", + "Ray Data allows you to specify custom data transform functions. These [user defined functions (UDFs)](transforming_datastreams) can be called using `Datastream.map_batches(my_function)`. The transformation will be conducted in parallel for each data batch.\n", "\n", "```{tip}\n", - "You may need to call `Dataset.repartition(n)` first to split the Dataset into more blocks internally. By default, each block corresponds to one file. The upper bound of parallelism is the number of blocks.\n", + "You may need to call `Datastream.repartition(n)` first to split the Datastream into more blocks internally. By default, each block corresponds to one file. The upper bound of parallelism is the number of blocks.\n", "```\n", "\n", "You can specify the data format you are using in the `batch_format` parameter. The dataset will be divided into batches and those batches converted into the specified format. Available data formats you can specify in the `batch_format` paramater include `\"pandas\", \"pyarrow\", \"numpy\"`. Tabular data will be passed into your UDF by default as a pandas DataFrame. Tensor data will be passed into your UDF as a numpy array.\n", @@ -475,7 +475,7 @@ "metadata": {}, "outputs": [], "source": [ - "# A pandas DataFrame UDF for transforming the Dataset in parallel.\n", + "# A pandas DataFrame UDF for transforming the Datastream in parallel.\n", "def transform_df(input_df: pd.DataFrame) -> pd.DataFrame:\n", " df = input_df.copy()\n", "\n", @@ -554,7 +554,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Batch training with Ray Datasets " + "## Batch training with Ray Data " ] }, { @@ -685,7 +685,7 @@ "source": [ "The `train_and_evaluate` function contains the logic for train-test splitting and fitting of a model using the `fit_and_score_sklearn` function.\n", "\n", - "As an input, this function takes in a pandas DataFrame. When we call `Dataset.map_batches` or `Dataset.groupby().map_groups()`, the Dataset will be batched into multiple pandas DataFrames and this function will run for each batch in parallel. We will return the model and its error. Those results will be collected back into a Ray Dataset." + "As an input, this function takes in a pandas DataFrame. When we call `Datastream.map_batches` or `Datastream.groupby().map_groups()`, the Datastream will be batched into multiple pandas DataFrames and this function will run for each batch in parallel. We will return the model and its error. Those results will be collected back into a Datastream." ] }, { @@ -730,10 +730,10 @@ "metadata": {}, "source": [ "Recall how we wrote a data transform `transform_batch` UDF? It was called with pattern:\n", - "- `Dataset.map_batches(transform_batch, batch_format=\"pandas\")`\n", + "- `Datastream.map_batches(transform_batch, batch_format=\"pandas\")`\n", "\n", - "Similarly, we can write a custom groupy-aggregate function `agg_func` which will run for each [Ray Dataset *group-by*](datasets-groupbys) group in parallel. The usage pattern is:\n", - "- `Dataset.groupby(column).map_groups(agg_func, batch_format=\"pandas\")`.\n", + "Similarly, we can write a custom groupy-aggregate function `agg_func` which will run for each [Datastream *group-by*](data-groupbys) group in parallel. The usage pattern is:\n", + "- `Datastream.groupby(column).map_groups(agg_func, batch_format=\"pandas\")`.\n", "\n", "In the cell below, we define our custom `agg_func`." ] @@ -745,7 +745,7 @@ "outputs": [], "source": [ "# A Pandas DataFrame aggregation function for processing\n", - "# grouped batches of Ray Dataset data.\n", + "# grouped batches of Datastream data.\n", "def agg_func(df: pd.DataFrame) -> pd.DataFrame:\n", " location_id = df[\"dropoff_location_id\"][0]\n", "\n", @@ -772,9 +772,9 @@ "source": [ "### Run batch training using `map_groups`\n", "\n", - "The main \"driver code\" reads each Parquet file (where each file corresponds to one month of NYC taxi data) into a Ray Dataset `ds`. \n", + "The main \"driver code\" reads each Parquet file (where each file corresponds to one month of NYC taxi data) into a Datastream `ds`. \n", "\n", - "Then we use Ray Dataset *group-by* to map each group into a batch of data and run `agg_func` on each grouping in parallel by calling `ds.groupby(\"dropoff_location_id\").map_groups(agg_func, batch_format=\"pandas\")`." + "Then we use Datastream *group-by* to map each group into a batch of data and run `agg_func` on each grouping in parallel by calling `ds.groupby(\"dropoff_location_id\").map_groups(agg_func, batch_format=\"pandas\")`." ] }, { @@ -813,12 +813,12 @@ "\n", "start = time.time()\n", "\n", - "# Read data into Ray Dataset\n", + "# Read data into Datastream\n", "# ds = pushdown_read_data(s3_files, sample_locations)\\\n", "# .repartition(14)\\\n", "# .ds.map_batches(transform_df, batch_format=\"pandas\")\n", "\n", - "# Use Ray Dataset groupby.map_groups() to process each group in parallel and return a Ray Dataset.\n", + "# Use Datastream groupby.map_groups() to process each group in parallel and return a Datastream.\n", "results = ds.groupby(\"dropoff_location_id\").map_groups(agg_func, batch_format=\"pandas\")\n", "\n", "total_time_taken = time.time() - start\n", @@ -841,7 +841,7 @@ { "data": { "text/plain": [ - "Dataset(num_blocks=6, num_rows=6, schema={location_id: int32, model: object, error: float64})" + "Datastream(num_blocks=6, num_rows=6, schema={location_id: int32, model: object, error: float64})" ] }, "execution_count": 20, diff --git a/doc/source/data/examples/index.rst b/doc/source/data/examples/index.rst index 3bcec2037a62..62750e4885eb 100644 --- a/doc/source/data/examples/index.rst +++ b/doc/source/data/examples/index.rst @@ -1,20 +1,20 @@ -.. _datasets-examples-ref: +.. _data-examples-ref: ======== Examples ======== -.. tip:: Check out the Datasets :ref:`User Guide ` to learn more about - Datasets' features in-depth. +.. tip:: Check out the Datastreams :ref:`User Guide ` to learn more about + Datastream features in-depth. -.. _datasets-recipes: +.. _data-recipes: Simple Data Processing Examples ------------------------------- -Ray Datasets is a data processing engine that supports multiple data +Ray Data is a data processing engine that supports multiple data modalities and types. Here you will find a few end-to-end examples of some basic data -processing with Ray Datasets on tabular data, text (coming soon!), and imagery (coming +processing with Ray Data on tabular data, text (coming soon!), and imagery (coming soon!). .. panels:: @@ -36,7 +36,7 @@ soon!). +++ .. link-button:: batch_training :type: ref - :text: Batch Training with Ray Datasets + :text: Batch Training with Ray Data :classes: btn-link btn-block stretched-link --- :img-top: /images/ocr.jpg @@ -44,7 +44,7 @@ soon!). +++ .. link-button:: ocr_example :type: ref - :text: Scaling OCR with Ray Datasets + :text: Scaling OCR with Ray Data :classes: btn-link btn-block stretched-link @@ -57,15 +57,7 @@ Other Examples :img-top-cls: pt-5 w-75 d-block mx-auto --- - :img-top: /images/dataset-repeat-2.svg - - +++ - .. link-button:: advanced-pipelines - :type: ref - :text: Advanced Pipeline Examples - :classes: btn-link btn-block stretched-link - --- - :img-top: ../images/dataset-arch.svg + :img-top: ../images/datastream-arch.svg +++ .. link-button:: random-access diff --git a/doc/source/data/examples/nyc_taxi_basic_processing.ipynb b/doc/source/data/examples/nyc_taxi_basic_processing.ipynb index 15b2983aaeae..d31f303b2311 100644 --- a/doc/source/data/examples/nyc_taxi_basic_processing.ipynb +++ b/doc/source/data/examples/nyc_taxi_basic_processing.ipynb @@ -25,20 +25,20 @@ "id": "af627a74", "metadata": {}, "source": [ - "# Processing NYC taxi data using Ray Datasets\n", + "# Processing NYC taxi data using Ray Data\n", "\n", - "The [NYC Taxi dataset](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page) is a popular tabular dataset. In this example, we demonstrate some basic data processing on this dataset using Ray Datasets.\n", + "The [NYC Taxi dataset](https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page) is a popular tabular dataset. In this example, we demonstrate some basic data processing on this dataset using Ray Data.\n", "\n", "## Overview\n", "\n", "This tutorial will cover:\n", " - Reading Parquet data\n", - " - Inspecting the metadata and first few rows of a large Ray {class}`Dataset `\n", + " - Inspecting the metadata and first few rows of a large Ray {class}`Datastream `\n", " - Calculating some common global and grouped statistics on the dataset\n", " - Dropping columns and rows\n", " - Adding a derived column\n", - " - Shuffling the dataset\n", - " - Sharding the dataset and feeding it to parallel consumers (trainers)\n", + " - Shuffling the data\n", + " - Sharding the data and feeding it to parallel consumers (trainers)\n", " - Applying batch (offline) inference to the data\n", "\n", "## Walkthrough\n", @@ -68,9 +68,7 @@ "source": [ "### Reading and Inspecting the Data\n", "\n", - "Next, we read a few of the files from the dataset. This read is lazy, where reading and all future transformations are delayed until a downstream operation triggers execution (e.g. consuming the data with {meth}`ds.take() `)\n", - "\n", - "We could process the entire Dataset in a streaming fashion using pipelining or all of it in parallel using a multi-node Ray cluster, but we save that for our large-scale examples." + "Next, we read a few of the files from the dataset. This read is lazy, where reading and all future transformations are delayed until a downstream operation triggers execution (e.g. consuming the data with {meth}`ds.take() `)\n" ] }, { @@ -87,7 +85,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "⚠️ The number of blocks in this dataset (2) limits its parallelism to 2 concurrent tasks. This is much less than the number of available CPU slots in the cluster. Use `.repartition(n)` to increase the number of dataset blocks.\n" + "⚠️ The number of blocks in this datastream (2) limits its parallelism to 2 concurrent tasks. This is much less than the number of available CPU slots in the cluster. Use `.repartition(n)` to increase the number of datastream blocks.\n" ] } ], @@ -104,7 +102,7 @@ "id": "a4d4769c", "metadata": {}, "source": [ - "We can easily inspect the schema of this dataset. For Parquet files, we don't even have to read the actual data to get the schema; we can read it from the lightweight Parquet metadata!" + "We can easily inspect the schema of this datastream. For Parquet files, we don't even have to read the actual data to get the schema; we can read it from the lightweight Parquet metadata!" ] }, { @@ -182,7 +180,7 @@ "id": "87fd9a17", "metadata": {}, "source": [ - "We can get a nice, cheap summary of the ``Dataset`` by leveraging it's informative repr:" + "We can get a nice, cheap summary of the ``Datastream`` by leveraging it's informative repr:" ] }, { @@ -194,7 +192,7 @@ { "data": { "text/plain": [ - "Dataset(num_blocks=2, num_rows=2749936, schema={vendor_id: string, pickup_at: timestamp[us], dropoff_at: timestamp[us], passenger_count: int8, trip_distance: float, pickup_longitude: float, pickup_latitude: float, rate_code_id: null, store_and_fwd_flag: string, dropoff_longitude: float, dropoff_latitude: float, payment_type: string, fare_amount: float, extra: float, mta_tax: float, tip_amount: float, tolls_amount: float, total_amount: float})" + "Datastream(num_blocks=2, num_rows=2749936, schema={vendor_id: string, pickup_at: timestamp[us], dropoff_at: timestamp[us], passenger_count: int8, trip_distance: float, pickup_longitude: float, pickup_latitude: float, rate_code_id: null, store_and_fwd_flag: string, dropoff_longitude: float, dropoff_latitude: float, payment_type: string, fare_amount: float, extra: float, mta_tax: float, tip_amount: float, tolls_amount: float, total_amount: float})" ] }, "execution_count": 6, @@ -203,7 +201,7 @@ } ], "source": [ - "# Display some metadata about the dataset.\n", + "# Display some metadata about the datastream.\n", "ds" ] }, @@ -258,7 +256,7 @@ "id": "a3fb551b", "metadata": {}, "source": [ - "To get a better sense of the data size, we can calculate the size in bytes of the full dataset. Note that for Parquet files, this size-in-bytes will be pulled from the Parquet metadata (not triggering a data read), and therefore might be significantly different than the in-memory size!" + "To get a better sense of the data size, we can calculate the size in bytes of the full datastream. Note that for Parquet files, this size-in-bytes will be pulled from the Parquet metadata (not triggering a data read), and therefore might be significantly different than the in-memory size!" ] }, { @@ -289,7 +287,7 @@ "id": "cb4515bf", "metadata": {}, "source": [ - "In order to get the in-memory size, we can trigger full reading of the dataset and inspect the size in bytes." + "In order to get the in-memory size, we can trigger full reading of the datastream and inspect the size in bytes." ] }, { @@ -332,7 +330,7 @@ "For the NYC taxi dataset, instead of reading individual per-month Parquet files, we can read the entire 2009 directory.\n", "\n", "```{warning}\n", - "This could be a lot of data (downsampled with 0.01 ratio leads to ~50.2 MB on disk, ~147 MB in memory), so be careful triggering full reads on a limited-memory machine! This is one place where Datasets' lazy reading comes in handy: Datasets will not execute any read tasks eagerly and will execute the minimum number of file reads to satisfy downstream operations, which allows us to inspect a subset of the data without having to read the entire dataset.\n", + "This could be a lot of data (downsampled with 0.01 ratio leads to ~50.2 MB on disk, ~147 MB in memory), so be careful triggering full reads on a limited-memory machine! This is one place where Datastream's lazy reading comes in handy: Datastream will not execute any read tasks eagerly and will execute the minimum number of file reads to satisfy downstream operations, which allows us to inspect a subset of the data without having to read the entire dataset.\n", "```" ] }, @@ -358,7 +356,7 @@ "id": "6616a15d", "metadata": {}, "source": [ - "The metadata that Datasets prints in its repr is guaranteed to not trigger reads of all files; data such as the row count and the schema is pulled directly from the Parquet metadata." + "The metadata that Datastream prints in its repr is guaranteed to not trigger reads of all files; data such as the row count and the schema is pulled directly from the Parquet metadata." ] }, { @@ -387,7 +385,7 @@ "id": "e61dd6d7", "metadata": {}, "source": [ - "That's a lot of rows! Since we're not going to use this full-year dataset, let's now delete this dataset to free up some memory in our Ray cluster." + "That's a lot of rows! Since we're not going to use this full-year data, let's now delete this datastream to free up some memory in our Ray cluster." ] }, { @@ -593,7 +591,7 @@ "id": "0ade2a72", "metadata": {}, "source": [ - "See {ref}`Transforming Datasets ` for more information on how we can process our data with Ray Datasets." + "See {ref}`Transforming Datastreams ` for more information on how we can process our data with Ray Data." ] }, { @@ -603,10 +601,10 @@ "source": [ "#### Advanced Aside - Projection and Filter Pushdown\n", "\n", - "Note that Ray Datasets' Parquet reader supports projection (column selection) and row filter pushdown, where we can push the above column selection and the row-based filter to the Parquet read. If we specify column selection at Parquet read time, the unselected columns won't even be read from disk!\n", + "Note that Ray Data' Parquet reader supports projection (column selection) and row filter pushdown, where we can push the above column selection and the row-based filter to the Parquet read. If we specify column selection at Parquet read time, the unselected columns won't even be read from disk!\n", "\n", "The row-based filter is specified via\n", - "[Arrow's dataset field expressions](https://arrow.apache.org/docs/6.0/python/generated/pyarrow.dataset.Expression.html#pyarrow.dataset.Expression). See the {ref}`feature guide for reading Parquet data ` for more information." + "[Arrow's dataset field expressions](https://arrow.apache.org/docs/6.0/python/generated/pyarrow.dataset.Expression.html#pyarrow.dataset.Expression). See the {ref}`feature guide for reading Parquet data ` for more information." ] }, { @@ -621,14 +619,14 @@ "name": "stderr", "output_type": "stream", "text": [ - "⚠️ The number of blocks in this dataset (2) limits its parallelism to 2 concurrent tasks. This is much less than the number of available CPU slots in the cluster. Use `.repartition(n)` to increase the number of dataset blocks.\n", + "⚠️ The number of blocks in this datastream (2) limits its parallelism to 2 concurrent tasks. This is much less than the number of available CPU slots in the cluster. Use `.repartition(n)` to increase the number of datastream blocks.\n", "Read progress: 100%|██████████| 2/2 [00:00<00:00, 9.19it/s]\n" ] }, { "data": { "text/plain": [ - "Dataset(num_blocks=2, num_rows=2749842, schema={passenger_count: int8, trip_distance: float})" + "Datastream(num_blocks=2, num_rows=2749842, schema={passenger_count: int8, trip_distance: float})" ] }, "execution_count": 19, @@ -665,7 +663,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Delete the pushdown dataset. Deleting the Dataset object\n", + "# Delete the pushdown datastream. Deleting the Datastream object\n", "# will release the underlying memory in the cluster.\n", "del pushdown_ds" ] @@ -679,7 +677,7 @@ "\n", "Now that we've learned more about our data and we have cleaned up our dataset a bit, we now look at how we can feed this dataset into some dummy model trainers.\n", "\n", - "First, let's do a full global random shuffle of the dataset to decorrelate these samples." + "First, let's do a full global random shuffle of the datastream to decorrelate these samples." ] }, { @@ -708,7 +706,7 @@ "id": "ff05b6ea", "metadata": {}, "source": [ - "We define a dummy ``Trainer`` actor, where each trainer will consume a dataset shard in batches and simulate model training.\n", + "We define a dummy ``Trainer`` actor, where each trainer will consume a datastream shard in batches and simulate model training.\n", "\n", ":::{note}\n", "In a real training workflow, we would feed ``ds`` to {ref}`Ray Train `, which would do this sharding and creation of training actors for us, under the hood.\n" @@ -740,7 +738,7 @@ " def __init__(self, rank: int):\n", " pass\n", "\n", - " def train(self, shard: ray.data.Dataset) -> int:\n", + " def train(self, shard: ray.data.Datastream) -> int:\n", " for batch in shard.iter_batches(batch_size=256):\n", " pass\n", " return shard.count()\n", @@ -754,7 +752,7 @@ "id": "9a1afb70", "metadata": {}, "source": [ - "Next, we split the dataset into ``len(trainers)`` shards, ensuring that the shards are of equal size." + "Next, we split the datastream into ``len(trainers)`` shards, ensuring that the shards are of equal size." ] }, { @@ -766,10 +764,10 @@ { "data": { "text/plain": [ - "[Dataset(num_blocks=1, num_rows=687460, schema={vendor_id: object, pickup_at: datetime64[ns], dropoff_at: datetime64[ns], passenger_count: int8, trip_distance: float32, pickup_longitude: float32, pickup_latitude: float32, rate_code_id: object, dropoff_longitude: float32, dropoff_latitude: float32, payment_type: object, fare_amount: float32, extra: float32, tip_amount: float32, tolls_amount: float32, total_amount: float32}),\n", - " Dataset(num_blocks=1, num_rows=687460, schema={vendor_id: object, pickup_at: datetime64[ns], dropoff_at: datetime64[ns], passenger_count: int8, trip_distance: float32, pickup_longitude: float32, pickup_latitude: float32, rate_code_id: object, dropoff_longitude: float32, dropoff_latitude: float32, payment_type: object, fare_amount: float32, extra: float32, tip_amount: float32, tolls_amount: float32, total_amount: float32}),\n", - " Dataset(num_blocks=2, num_rows=687460, schema={vendor_id: object, pickup_at: datetime64[ns], dropoff_at: datetime64[ns], passenger_count: int8, trip_distance: float32, pickup_longitude: float32, pickup_latitude: float32, rate_code_id: object, dropoff_longitude: float32, dropoff_latitude: float32, payment_type: object, fare_amount: float32, extra: float32, tip_amount: float32, tolls_amount: float32, total_amount: float32}),\n", - " Dataset(num_blocks=1, num_rows=687460, schema={vendor_id: object, pickup_at: datetime64[ns], dropoff_at: datetime64[ns], passenger_count: int8, trip_distance: float32, pickup_longitude: float32, pickup_latitude: float32, rate_code_id: object, dropoff_longitude: float32, dropoff_latitude: float32, payment_type: object, fare_amount: float32, extra: float32, tip_amount: float32, tolls_amount: float32, total_amount: float32})]" + "[Datastream(num_blocks=1, num_rows=687460, schema={vendor_id: object, pickup_at: datetime64[ns], dropoff_at: datetime64[ns], passenger_count: int8, trip_distance: float32, pickup_longitude: float32, pickup_latitude: float32, rate_code_id: object, dropoff_longitude: float32, dropoff_latitude: float32, payment_type: object, fare_amount: float32, extra: float32, tip_amount: float32, tolls_amount: float32, total_amount: float32}),\n", + " Datastream(num_blocks=1, num_rows=687460, schema={vendor_id: object, pickup_at: datetime64[ns], dropoff_at: datetime64[ns], passenger_count: int8, trip_distance: float32, pickup_longitude: float32, pickup_latitude: float32, rate_code_id: object, dropoff_longitude: float32, dropoff_latitude: float32, payment_type: object, fare_amount: float32, extra: float32, tip_amount: float32, tolls_amount: float32, total_amount: float32}),\n", + " Datastream(num_blocks=2, num_rows=687460, schema={vendor_id: object, pickup_at: datetime64[ns], dropoff_at: datetime64[ns], passenger_count: int8, trip_distance: float32, pickup_longitude: float32, pickup_latitude: float32, rate_code_id: object, dropoff_longitude: float32, dropoff_latitude: float32, payment_type: object, fare_amount: float32, extra: float32, tip_amount: float32, tolls_amount: float32, total_amount: float32}),\n", + " Datastream(num_blocks=1, num_rows=687460, schema={vendor_id: object, pickup_at: datetime64[ns], dropoff_at: datetime64[ns], passenger_count: int8, trip_distance: float32, pickup_longitude: float32, pickup_latitude: float32, rate_code_id: object, dropoff_longitude: float32, dropoff_latitude: float32, payment_type: object, fare_amount: float32, extra: float32, tip_amount: float32, tolls_amount: float32, total_amount: float32})]" ] }, "execution_count": 24, @@ -832,7 +830,7 @@ "```{tip}\n", "Refer to the blog on [Model Batch Inference in Ray](https://www.anyscale.com/blog/model-batch-inference-in-ray-actors-actorpool-and-datasets) for an overview of batch inference strategies in Ray and additional examples.\n", "```\n", - "After we've trained a model, we may want to perform batch (offline) inference on such a tabular dataset. With Ray Datasets, this is as easy as a {meth}`ds.map_batches() ` call!\n", + "After we've trained a model, we may want to perform batch (offline) inference on such a tabular dataset. With Ray Data, this is as easy as a {meth}`ds.map_batches() ` call!\n", "\n", "First, we define a callable class that will cache the loading of the model in its constructor." ] @@ -865,7 +863,7 @@ "id": "0c1ba955", "metadata": {}, "source": [ - "``BatchInferModel``'s constructor will only be called once per actor worker when using the actor pool compute strategy in {meth}`ds.map_batches() `." + "``BatchInferModel``'s constructor will only be called once per actor worker when using the actor pool compute strategy in {meth}`ds.map_batches() `." ] }, { diff --git a/doc/source/data/examples/ocr_example.ipynb b/doc/source/data/examples/ocr_example.ipynb index c7bd6ebea7fd..637fd8cf80ec 100644 --- a/doc/source/data/examples/ocr_example.ipynb +++ b/doc/source/data/examples/ocr_example.ipynb @@ -25,18 +25,18 @@ "id": "6945c179", "metadata": {}, "source": [ - "# Scaling OCR using Ray Datasets\n", + "# Scaling OCR using Ray Data\n", "\n", - "In this example, we will show you how to run optical character recognition (OCR) on a set of documents and analyze the resulting text with the natural language processing library [spaCy](https://spacy.io/). Running OCR on a large dataset is very computationally expensive, so using Ray for distributed processing can really speed up the analysis. Ray Datasets makes it easy to compose the different steps of the pipeline, namely the OCR and the natural language processing. Ray Datasets' actor support also allows us to be more efficient by sharing the spaCy NLP context between several datapoints.\n", + "In this example, we will show you how to run optical character recognition (OCR) on a set of documents and analyze the resulting text with the natural language processing library [spaCy](https://spacy.io/). Running OCR on a large dataset is very computationally expensive, so using Ray for distributed processing can really speed up the analysis. Ray Data makes it easy to compose the different steps of the pipeline, namely the OCR and the natural language processing. Ray Data' actor support also allows us to be more efficient by sharing the spaCy NLP context between several datapoints.\n", "\n", "To make it more interesting, we will run the analysis on the [LightShot](https://www.kaggle.com/datasets/datasnaek/lightshot) dataset. It is a large publicly available OCR dataset with a wide variety of different documents, all of them screenshots of various forms. It is easy to replace that dataset with your own data and adapt the example to your own use cases!\n", "\n", "## Overview\n", "\n", "This tutorial will cover:\n", - " - Creating a Ray Dataset that represents the images in the dataset\n", - " - Running the computationally expensive OCR process on each image in the dataset in parallel\n", - " - Filtering the dataset by keeping only images that contain text\n", + " - Creating a Datastream that represents the images in the dataset\n", + " - Running the computationally expensive OCR process on each image in the datastream in parallel\n", + " - Filtering the datastream by keeping only images that contain text\n", " - Performing various NLP operations on the text\n", "\n", "## Walkthrough\n", @@ -111,7 +111,7 @@ "source": [ "### Running the OCR software on the data\n", "\n", - "We can now use the {meth}`ray.data.read_binary_files ` function to read all the images from S3. We set the `include_paths=True` option to create a dataset of the S3 paths and image contents. We then run the {meth}`ds.map ` function on this dataset to execute the actual OCR process on each file and convert the screen shots into text. This will create a tabular dataset with columns `path` and `text`, see also [](transform_datasets_row_output_types).\n", + "We can now use the {meth}`ray.data.read_binary_files ` function to read all the images from S3. We set the `include_paths=True` option to create a datastream of the S3 paths and image contents. We then run the {meth}`ds.map ` function on this datastream to execute the actual OCR process on each file and convert the screen shots into text. This will create a tabular datastream with columns `path` and `text`, see also [](transform_datastreams_row_output_types).\n", "\n", "````{note}\n", "If you want to load the data from a private bucket, you have to run\n", @@ -138,7 +138,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "2022-07-04 14:35:53,683\tWARNING read_api.py:256 -- The number of blocks in this dataset (3) limits its parallelism to 3 concurrent tasks. This is much less than the number of available CPU slots in the cluster. Use `.repartition(n)` to increase the number of dataset blocks.\n", + "2022-07-04 14:35:53,683\tWARNING read_api.py:256 -- The number of blocks in this datastream (3) limits its parallelism to 3 concurrent tasks. This is much less than the number of available CPU slots in the cluster. Use `.repartition(n)` to increase the number of datastream blocks.\n", "Read->Map: 100%|██████████| 3/3 [00:07<00:00, 2.34s/it]\n" ] } @@ -167,7 +167,7 @@ "id": "e22e7cd7", "metadata": {}, "source": [ - "Let us have a look at some of the data points with the {meth}`take ` function." + "Let us have a look at some of the data points with the {meth}`take ` function." ] }, { @@ -204,10 +204,10 @@ "### Saving and loading the result of the OCR run\n", "\n", "````{note}\n", - "Saving the dataset is optional, you can also continue with the in-memory data without persisting it to storage.\n", + "Saving the datastream is optional, you can also continue with the in-memory data without persisting it to storage.\n", "````\n", "\n", - "We can save the result of running tesseract on the dataset on disk so we can read it out later if we want to re-run the NLP analysis without needing to re-run the OCR (which is very expensive on the whole dataset). This can be done with the {meth}`write_parquet ` function:" + "We can save the result of running tesseract on the datastream on disk so we can read it out later if we want to re-run the NLP analysis without needing to re-run the OCR (which is very expensive on the whole datastream). This can be done with the {meth}`write_parquet ` function:" ] }, { @@ -234,7 +234,7 @@ "id": "7a387f42", "metadata": {}, "source": [ - "You can later reload the dataset with the {meth}`read_parquet ` function:" + "You can later reload the data with the {meth}`read_parquet ` function:" ] }, { @@ -247,7 +247,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "2022-07-04 14:36:13,515\tWARNING read_api.py:256 -- The number of blocks in this dataset (6) limits its parallelism to 6 concurrent tasks. This is much less than the number of available CPU slots in the cluster. Use `.repartition(n)` to increase the number of dataset blocks.\n" + "2022-07-04 14:36:13,515\tWARNING read_api.py:256 -- The number of blocks in this datastream (6) limits its parallelism to 6 concurrent tasks. This is much less than the number of available CPU slots in the cluster. Use `.repartition(n)` to increase the number of datastream blocks.\n" ] } ], @@ -330,7 +330,7 @@ "source": [ "It gives both the language and a confidence score for that language.\n", "\n", - "In order to run the code on the dataset, we should use Ray Datasets' built in support for actors since the `nlp` object is not serializable and we want to avoid having to recreate it for each individual sentence. We also batch the computation with the {meth}`map_batches ` function to ensure spaCy can use more efficient vectorized operations where available:" + "In order to run the code on the dataset, we should use Ray Data' built in support for actors since the `nlp` object is not serializable and we want to avoid having to recreate it for each individual sentence. We also batch the computation with the {meth}`map_batches ` function to ensure spaCy can use more efficient vectorized operations where available:" ] }, { @@ -350,7 +350,7 @@ { "data": { "text/plain": [ - "Dataset(num_blocks=6, num_rows=6, schema={path: object, text: object, language: object, score: float64})" + "Datastream(num_blocks=6, num_rows=6, schema={path: object, text: object, language: object, score: float64})" ] }, "execution_count": 10, diff --git a/doc/source/data/examples/random-access.rst b/doc/source/data/examples/random-access.rst index 908cd8948bae..bb0d5536002d 100644 --- a/doc/source/data/examples/random-access.rst +++ b/doc/source/data/examples/random-access.rst @@ -4,7 +4,7 @@ Random Data Access (Experimental) --------------------------------- -Any Arrow-format dataset can be enabled for random access by calling ``dataset.to_random_access_dataset(key="col_name")``. This partitions the dataset across the cluster by the given sort key, providing efficient random access to records via binary search. A number of worker actors are created, each of which has zero-copy access to the underlying sorted data blocks of the Dataset. +Any Arrow-format datastream can be enabled for random access by calling ``ds.to_random_access_dataset(key="col_name")``. This partitions the data across the cluster by the given sort key, providing efficient random access to records via binary search. A number of worker actors are created, each of which has zero-copy access to the underlying sorted data blocks of the Datastream. .. code-block:: python @@ -13,7 +13,7 @@ Any Arrow-format dataset can be enabled for random access by calling ``dataset.t ds = ds.add_column("embedding", lambda b: b["value"] ** 2) # -> schema={value: int64, embedding: int64} - # Enable random access on the dataset. This launches a number of actors + # Enable random access on the datastream. This launches a number of actors # spread across the cluster that serve random access queries to the data. rmap = ds.to_random_access_dataset(key="value", num_workers=4) @@ -29,12 +29,12 @@ Any Arrow-format dataset can be enabled for random access by calling ``dataset.t rmap.multiget([4, 2]) # -> [{"value": 4, "embedding": 16}, {"value": 2, "embedding": 4}] -Similar to Dataset, a RandomAccessDataset can be passed to and used from any Ray actor or task. +Similar to Datastream, a RandomAccessDataset can be passed to and used from any Ray actor or task. Architecture ------------ -RandomAccessDataset spreads its workers evenly across the cluster. Each worker fetches and pins in shared memory all blocks of the sorted source dataset found on its node. In addition, it is ensured that each block is assigned to at least one worker. A central index of block to key-range assignments is computed, which is used to serve lookups. +RandomAccessDataset spreads its workers evenly across the cluster. Each worker fetches and pins in shared memory all blocks of the sorted source data found on its node. In addition, it is ensured that each block is assigned to at least one worker. A central index of block to key-range assignments is computed, which is used to serve lookups. Lookups occur as follows: @@ -68,4 +68,4 @@ It is important to note that the client (Ray worker process) can also be a bottl Fault Tolerance --------------- -Currently, RandomAccessDataset is not fault-tolerant. Losing any of the worker actors invalidates the dataset, and it must be re-created from the source dataset. +Currently, RandomAccessDataset is not fault-tolerant. Losing any of the worker actors invalidates the dataset, and it must be re-created from the source data. diff --git a/doc/source/data/faq.rst b/doc/source/data/faq.rst index ad628f0cbadc..f68f1ff5796d 100644 --- a/doc/source/data/faq.rst +++ b/doc/source/data/faq.rst @@ -1,13 +1,13 @@ -.. _datasets_faq: +.. _data_faq: === FAQ === -These are some Frequently Asked Questions that we've seen pop up for Ray Datasets. +These are some Frequently Asked Questions that we've seen pop up for Ray Data. .. note:: - For a general conceptual overview of Ray Datasets, see our + For a general conceptual overview of Ray Data, see our :ref:`Key Concepts docs `. If you still have questions after reading this FAQ, please reach out on @@ -18,10 +18,10 @@ If you still have questions after reading this FAQ, please reach out on :depth: 2 -What problems does Ray Datasets solve? +What problems does Ray Data solve? ====================================== -Ray Datasets aims to solve the problems of slow, resource-inefficient, unscalable data +Ray Data aims to solve the problems of slow, resource-inefficient, unscalable data loading and preprocessing pipelines for two core uses cases: 1. **Model training:** resulting in poor training throughput and low GPU utilization as @@ -29,40 +29,40 @@ loading and preprocessing pipelines for two core uses cases: 2. **Batch inference:** resulting in poor batch inference throughput and low GPU utilization. -In order to solve these problems without sacrificing usability, Ray Datasets simplifies +In order to solve these problems without sacrificing usability, Ray Data simplifies parallel and pipelined data processing on Ray, providing a higher-level API while internally handling data batching, task parallelism and pipelining, and memory management. -Who is using Ray Datasets? -========================== +Who is using Ray Data? +====================== -To give an idea of Datasets use cases, we list a few notable users running Datasets +To give an idea of Ray Data use cases, we list a few notable users running Ray Data integrations in production below: -* Predibase is using Ray Datasets for ML ingest and batch inference in their OSS +* Predibase is using Ray Data for ML ingest and batch inference in their OSS declarative ML framework, `Ludwig `__, and internally in their `AutoML product `__. -* Amazon is using Ray Datasets for large-scale I/O in their scalable data catalog, +* Amazon is using Ray Data for large-scale I/O in their scalable data catalog, `DeltaCAT `__. -* Shopify is using Ray Datasets for ML ingest and batch inference in their ML platform, +* Shopify is using Ray Data for ML ingest and batch inference in their ML platform, `Merlin `__. -* Ray Datasets is used as the data processing engine for the +* Ray Data is used as the data processing engine for the `Ray-based Apache Beam runner `__. -* Ray Datasets is used as the preprocessing and batch inference engine for +* Ray Data is used as the preprocessing and batch inference engine for :ref:`Ray AIR `. -If you're using Ray Datasets, please let us know about your experience on the +If you're using Ray Data, please let us know about your experience on the `Slack `__ or `Discourse `__; we'd love to hear from you! -What should I use Ray Datasets for? -=================================== +What should I use Ray Data for? +=============================== -Ray Datasets is the standard way to load, process, and exchange data in Ray libraries +Ray Data is the standard way to load, process, and exchange data in Ray libraries and applications, with a particular emphasis on ease-of-use, performance, and -scalability in both data size and cluster size. Within that, Datasets is designed for +scalability in both data size and cluster size. Within that, Datastreams is designed for two core uses cases: * **ML (training) ingest:** Loading, preprocessing, and ingesting data into one or more @@ -70,43 +70,43 @@ two core uses cases: * **Batch inference:** Loading, preprocessing, and performing parallel batch inference on data. -We have designed the Datasets APIs, data model, execution model, and +We have designed the Datastream APIs, data model, execution model, and integrations with these use cases in mind, and have captured these use cases in large-scale nightly tests to ensure that we're hitting our scalability, performance, and efficiency marks for these use cases. -What should I not use Ray Datasets for? -======================================= +What should I not use Ray Data for? +=================================== -Ray Datasets is not meant to be used for generic ETL pipelines (like Spark) or +Ray Data is not meant to be used for generic ETL pipelines (like Spark) or scalable data science (like Dask, Modin, or Mars). However, each of these frameworks -are :ref:`runnable on Ray `, and Datasets integrates tightly with +are :ref:`runnable on Ray `, and Datastreams integrates tightly with these frameworks, allowing for efficient exchange of distributed data partitions often with zero-copy. Check out the -:ref:`dataset creation feature guide ` to learn +:ref:`datastream creation feature guide ` to learn more about these integrations. -Datasets is specifically targeting +Datastreams is specifically targeting the ML ingest and batch inference use cases, with focus on data loading and last-mile preprocessing for ML pipelines. -For data loading for training, how does Ray Datasets compare to other solutions? +For data loading for training, how does Ray Data compare to other solutions? ================================================================================ There are several ML framework-specific and general solutions for loading data into -model trainers. Below, we summarize some advantages Datasets offers over these more +model trainers. Below, we summarize some advantages Datastreams offers over these more specific ingest frameworks. Torch datasets (and data loaders) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -* **Framework-agnostic:** Datasets is framework-agnostic and portable between different +* **Framework-agnostic:** Datastreams is framework-agnostic and portable between different distributed training frameworks, while `Torch datasets `__ are specific to Torch. * **No built-in IO layer:** Torch datasets do not have an I/O layer for common file formats or in-memory exchange with other frameworks; users need to bring in other libraries and roll this integration themselves. -* **Generic distributed data processing:** Datasets is more general: it can handle +* **Generic distributed data processing:** Datastreams is more general: it can handle generic distributed operations, including global per-epoch shuffling, which would otherwise have to be implemented by stitching together two separate systems. Torch datasets would require such stitching for anything more involved @@ -114,22 +114,22 @@ Torch datasets (and data loaders) shards. See our `blog post `__ on why this shared infrastructure is important for 3rd generation ML architectures. -* **Lower overhead:** Datasets is lower overhead: it supports zero-copy exchange between +* **Lower overhead:** Datastreams is lower overhead: it supports zero-copy exchange between processes, in contrast to the multi-processing-based pipelines of Torch datasets. TensorFlow datasets ~~~~~~~~~~~~~~~~~~~ -* **Framework-agnostic:** Datasets is framework-agnostic and portable between different +* **Framework-agnostic:** Datastreams is framework-agnostic and portable between different distributed training frameworks, while `TensorFlow datasets `__ is specific to TensorFlow. -* **Unified single-node and distributed:** Datasets unifies single and multi-node training under +* **Unified single-node and distributed:** Datastreams unifies single and multi-node training under the same abstraction. TensorFlow datasets presents `separate concepts `__ for distributed data loading and prevents code from being seamlessly scaled to larger clusters. -* **Generic distributed data processing:** Datasets is more general: it can handle +* **Generic distributed data processing:** Datastreams is more general: it can handle generic distributed operations, including global per-epoch shuffling, which would otherwise have to be implemented by stitching together two separate systems. TensorFlow datasets would require such stitching for anything more involved @@ -137,15 +137,15 @@ TensorFlow datasets shards; only file interleaving is supported. See our `blog post `__ on why this shared infrastructure is important for 3rd generation ML architectures. -* **Lower overhead:** Datasets is lower overhead: it supports zero-copy exchange between +* **Lower overhead:** Datastreams is lower overhead: it supports zero-copy exchange between processes, in contrast to the multi-processing-based pipelines of TensorFlow datasets. Petastorm ~~~~~~~~~ * **Supported data types:** `Petastorm `__ only supports Parquet data, while - Ray Datasets supports many file formats. -* **Lower overhead:** Datasets is lower overhead: it supports zero-copy exchange between + Ray Data supports many file formats. +* **Lower overhead:** Datastreams is lower overhead: it supports zero-copy exchange between processes, in contrast to the multi-processing-based pipelines used by Petastorm. * **No data processing:** Petastorm does not expose any data processing APIs. @@ -153,107 +153,82 @@ NVTabular ~~~~~~~~~ * **Supported data types:** `NVTabular `__ only supports tabular - (Parquet, CSV, Avro) data, while Ray Datasets supports many other file formats. -* **Lower overhead:** Datasets is lower overhead: it supports zero-copy exchange between + (Parquet, CSV, Avro) data, while Ray Data supports many other file formats. +* **Lower overhead:** Datastreams is lower overhead: it supports zero-copy exchange between processes, in contrast to the multi-processing-based pipelines used by Petastorm. -* **Heterogeneous compute:** NVTabular doesn't support mixing heterogeneous resources in dataset transforms (e.g. - both CPU and GPU transformations), while Ray Datasets supports this. +* **Heterogeneous compute:** NVTabular doesn't support mixing heterogeneous resources in datastream transforms (e.g. + both CPU and GPU transformations), while Ray Data supports this. * **ML-specific ops:** NVTabular has a bunch of great ML-specific preprocessing - operations; this is currently WIP for Ray Datasets: + operations; this is currently WIP for Ray Data: :ref:`Ray AIR preprocessors `. -.. _datasets_streaming_faq: +.. _streaming_faq: -For batch (offline) inference, why should I use Ray Datasets instead of an actor pool? +For batch (offline) inference, why should I use Ray Data instead of an actor pool? ====================================================================================== -Ray Datasets provides its own autoscaling actor pool via the actor compute strategy for -:meth:`ds.map_batches() `, allowing you to perform CPU- or +Ray Data provides its own autoscaling actor pool via the actor compute strategy for +:meth:`ds.map_batches() `, allowing you to perform CPU- or GPU-based batch inference on this actor pool. Using this instead of the `Ray actor pool `__ has a few advantages: -* Ray Datasets actor pool is autoscaling and supports easy-to-configure task dependency +* Ray Data actor pool is autoscaling and supports easy-to-configure task dependency prefetching, pipelining data transfer with compute. -* Ray Datasets takes care of orchestrating the tasks, batching the data, and managing +* Ray Data takes care of orchestrating the tasks, batching the data, and managing the memory. -* With Ray Datasets pipelining, you can - precisely configure pipelining of preprocessing with batch inference, allowing you to - easily tweak parallelism vs. pipelining to maximize your GPU utilization. -* Ray Datasets provides a broad and performant I/O layer, which you would otherwise have +* Ray Data provides a broad and performant I/O layer, which you would otherwise have to roll yourself. -How fast is Ray Datasets? +How fast is Ray Data? ========================= We're still working on open benchmarks, but we've done some benchmarking on synthetic data and have helped several users port from solutions using Petastorm, Torch multi-processing data loader, and TensorFlow datasets that have seen a big training throughput improvement (4-8x) and model accuracy improvement (due to global per-epoch -shuffling) using Ray Datasets. +shuffling) using Ray Data. -Please see our -`recent blog post on Ray Datasets `__ +Please see this +`blog post on Ray Data `__ for more information on this benchmarking. +The new streaming backend for Ray Data (Datastream) supports throughputs of up to +hundreds of gigabytes per second in a large cluster. + Does all of my data need to fit into memory? ============================================ No, with Ray's support for :ref:`spilling objects to disk `, you only need to be able to fit your data into memory OR disk. However, keeping your data in distributed memory may speed up your workload, which can be done on arbitrarily large -datasets by windowing them, creating pipelines. +datastreams by windowing them, creating pipelines. -How much data can Ray Datasets handle? -====================================== +How much data can Ray Data handle? +================================== -Ray Datasets has been tested at multi-petabyte scale for I/O and multi-terabyte scale for +Ray Data has been tested at multi-petabyte scale for I/O and multi-terabyte scale for shuffling, and we're continuously working on improving this scalability. If you have a -very large dataset that you'd like to process and you're running into scalability +very large datastream that you'd like to process and you're running into scalability issues, please reach out to us on our `Discourse `__. -How do I get my data into Ray Datasets? -======================================= +How do I get my data into Ray Data? +=================================== -Ray Datasets supports creating a ``Dataset`` from local and distributed in-memory data +Ray Data supports creating a ``Datastream`` from local and distributed in-memory data via integrations with common data libraries, as well as from local and remote storage systems via our support for many common file formats and storage backends. -Check out our :ref:`feature guide for creating datasets ` for +Check out our :ref:`feature guide for creating datastreams ` for details. -How do I do streaming/online data loading and processing? -========================================================= - -Streaming data loading and data processing can be accomplished by using -dataset pipelines. By windowing a dataset, you can -stream data transformations across subsets of the data, even windowing down to the -reading of each file. - -When should I use pipelining? -============================= - -Pipelining is useful in a few scenarios: - -* You have two chained operations using different resources (e.g. CPU and GPU) that you - want to saturate; this is the case for both ML ingest (CPU-based preprocessing and - GPU-based training) and batch inference (CPU-based preprocessing and GPU-based batch - inference). -* You want to do streaming data loading and processing in order to keep the size of the - working set small; see previous FAQ on - :ref:`how to do streaming data loading and processing `. -* You want to decrease the time-to-first-batch (latency) for a certain operation at the - end of your workload. This is the case for training and inference since this prevents - GPUs from being idle (which is costly), and can be advantageous for some other - latency-sensitive consumers of datasets. - When should I use global per-epoch shuffling? ============================================= Background ~~~~~~~~~~ -When training a machine learning model, shuffling your training dataset is important in +When training a machine learning model, shuffling your training datastream is important in general in order to ensure that your model isn't overfitting on some unintended pattern in your data, e.g. sorting on the label column, or time-correlated samples. Per-epoch shuffling in particular can improve your model's precision gain per epoch by reducing @@ -263,9 +238,9 @@ learned weights in the wrong direction, shuffling before the next epoch lets you out of such a gradient rut. In the distributed data-parallel training case, the current status quo solution is typically to have a per-shard in-memory shuffle buffer that you fill up and pop random batches from, without mixing data across shards between epochs. -Ray Datasets also offers fully global random shuffling via -:meth:`ds.random_shuffle() `, and doing so on an -epoch-repeated dataset pipeline to provide global per-epoch shuffling is as simple as +Ray Data also offers fully global random shuffling via +:meth:`ds.random_shuffle() `, and doing so on an +epoch-repeated datastream pipeline to provide global per-epoch shuffling is as simple as ``ray.data.read().repeat().random_shuffle_each_window()``. But when should you opt for global per-epoch shuffling instead of local shuffle buffer shuffling? @@ -279,7 +254,7 @@ gradient-descent-based model trainers benefiting from improved (global) shuffle and we've found that this is particular pronounced for tabular data/models in practice. However, the more global your shuffle is, the expensive the shuffling operation, and this compounds when doing distributed data-parallel training on a multi-node cluster due -to data transfer costs, and this cost can be prohibitive when using very large datasets. +to data transfer costs, and this cost can be prohibitive when using very large datastreams. The best route for determining the best tradeoff between preprocessing time + cost and per-epoch shuffle quality is to measure the precision gain per training step for your @@ -296,28 +271,28 @@ loading + shuffling throughput is higher than your training throughput, your GPU be saturated, so we like to recommend users with shuffle-sensitive models to push their shuffle quality higher until this threshold is hit. -What is Arrow and how does Ray Datasets use it? +What is Arrow and how does Ray Data use it? =============================================== `Apache Arrow `__ is a columnar memory format and a -single-node data processing and I/O library that Ray Datasets leverages extensively. You -can think of Ray Datasets as orchestrating distributed processing of Arrow data. +single-node data processing and I/O library that Ray Data leverages extensively. You +can think of Ray Data as orchestrating distributed processing of Arrow data. -See our :ref:`key concepts ` for more information on how Ray Datasets +See our :ref:`key concepts ` for more information on how Ray Data uses Arrow. -How much performance tuning does Ray Datasets require? +How much performance tuning does Ray Data require? ====================================================== -Ray Datasets doesn't perform query optimization, so some manual performance +Ray Data doesn't perform query optimization, so some manual performance tuning may be necessary depending on your use case and data scale. Please see our :ref:`performance tuning guide ` for more information. -How can I contribute to Ray Datasets? +How can I contribute to Ray Data? ===================================== We're always happy to accept external contributions! If you have a question, a feature -request, or want to contibute to Ray Datasets or tell us about your use case, please +request, or want to contibute to Ray Data or tell us about your use case, please reach out to us on `Discourse `__; if you have a you're confident that you've found a bug, please open an issue on the `Ray GitHub repo `__. Please see our diff --git a/doc/source/data/getting-started.rst b/doc/source/data/getting-started.rst index 028ae66cf160..a8cacbeee520 100644 --- a/doc/source/data/getting-started.rst +++ b/doc/source/data/getting-started.rst @@ -1,11 +1,17 @@ -.. _datasets_getting_started: +.. _data_getting_started: Getting Started =============== -A Ray :class:`Dataset ` is a distributed data collection. It holds -references to distributed data *blocks*, and exposes APIs for loading and processing -data. +A :class:`Datastream ` is a distributed data transformation +pipeline. It provides APIs for loading external data into the Ray object store in *blocks*, +and exposes APIs for streaming processing of these data blocks in the cluster. + +.. tip:: + + Ray Data is for processing of *finite* datasets for ML training and + batch inference. This is in contrast to frameworks such as Apache Flink that + process infinite data streams. Install Ray Data ---------------- @@ -19,10 +25,10 @@ To install Ray Data, run: To learn more about installing Ray and its libraries, read :ref:`Installing Ray `. -Create a dataset ----------------- +Create a datastream +------------------- -Create datasets from on-disk files, Python objects, and cloud storage services like S3. +Create datastreams from on-disk files, Python objects, and cloud storage services like S3. Ray reads from any `filesystem supported by Arrow `__. @@ -30,23 +36,23 @@ Ray reads from any `filesystem supported by Arrow import ray - dataset = ray.data.read_csv("s3://anonymous@air-example-data/iris.csv") + datastream = ray.data.read_csv("s3://anonymous@air-example-data/iris.csv") - dataset.show(limit=1) + datastream.show(limit=1) .. testoutput:: {'sepal length (cm)': 5.1, 'sepal width (cm)': 3.5, 'petal length (cm)': 1.4, 'petal width (cm)': 0.2, 'target': 0} -To learn more about creating datasets, read -:ref:`Creating datasets `. +To learn more about creating datastreams, read +:ref:`Creating datastreams `. -Transform the dataset ---------------------- +Transform the datastream +------------------------ -Apply :ref:`user-defined functions ` (UDFs) to -transform datasets. Ray executes transformations in parallel for performance at scale. +Apply :ref:`user-defined functions ` (UDFs) to +transform datastreams. Ray executes transformations in parallel for performance. .. testcode:: @@ -56,8 +62,8 @@ transform datasets. Ray executes transformations in parallel for performance at def transform_batch(df: pd.DataFrame) -> pd.DataFrame: return df[(df["sepal length (cm)"] < 5.5) & (df["petal length (cm)"] > 3.5)] - transformed_dataset = dataset.map_batches(transform_batch) - print(transformed_dataset) + transformed_ds = datastream.map_batches(transform_batch) + print(transformed_ds) .. testoutput:: @@ -75,20 +81,20 @@ transform datasets. Ray executes transformations in parallel for performance at ) -To learn more about transforming datasets, read -:ref:`Transforming datasets `. +To learn more about transforming datastreams, read +:ref:`Transforming datastreams `. -Consume the dataset -------------------- +Consume the datastream +---------------------- -Pass datasets to Ray tasks or actors, and access records with methods like -:meth:`~ray.data.Dataset.iter_batches`. +Pass datastreams to Ray tasks or actors, and access records with methods like +:meth:`~ray.data.Datastream.iter_batches`. .. tabbed:: Local .. testcode:: - batches = transformed_dataset.iter_batches(batch_size=8) + batches = transformed_ds.iter_batches(batch_size=8) print(next(iter(batches))) .. testoutput:: @@ -106,13 +112,13 @@ Pass datasets to Ray tasks or actors, and access records with methods like .. testcode:: @ray.remote - def consume(dataset: ray.data.Dataset) -> int: + def consume(ds: ray.data.Datastream) -> int: num_batches = 0 - for batch in dataset.iter_batches(batch_size=8): + for batch in ds.iter_batches(batch_size=8): num_batches += 1 return num_batches - ray.get(consume.remote(transformed_dataset)) + ray.get(consume.remote(transformed_ds)) .. tabbed:: Actors @@ -121,30 +127,29 @@ Pass datasets to Ray tasks or actors, and access records with methods like @ray.remote class Worker: - def train(self, shard) -> int: - for batch in shard.iter_batches(batch_size=8): + def train(self, data_iterator): + for batch in data_iterator.iter_batches(batch_size=8): pass - return shard.count() workers = [Worker.remote() for _ in range(4)] - shards = transformed_dataset.split(n=4, locality_hints=workers) + shards = transformed_ds.streaming_split(n=4, equal=True) ray.get([w.train.remote(s) for w, s in zip(workers, shards)]) -To learn more about consuming datasets, read -:ref:`Consuming datasets `. +To learn more about consuming datastreams, read +:ref:`Consuming datastreams `. -Save the dataset ----------------- +Save the datastream +------------------- -Call methods like :meth:`~ray.data.Dataset.write_parquet` to save datasets to local +Call methods like :meth:`~ray.data.Datastream.write_parquet` to save datastream contents to local or remote filesystems. .. testcode:: import os - transformed_dataset.write_parquet("iris") + transformed_ds.write_parquet("iris") print(os.listdir("iris")) @@ -154,9 +159,9 @@ or remote filesystems. ['..._000000.parquet'] -To learn more about saving datasets, read :ref:`Saving datasets `. +To learn more about saving datastream contents, read :ref:`Saving datastreams `. Next Steps ---------- -* To check how your application is doing, you can use the :ref:`Ray dashboard`. \ No newline at end of file +* To check how your application is doing, you can use the :ref:`Ray dashboard`. diff --git a/doc/source/data/glossary.rst b/doc/source/data/glossary.rst index 31caf719191f..6547032b3490 100644 --- a/doc/source/data/glossary.rst +++ b/doc/source/data/glossary.rst @@ -1,7 +1,7 @@ -.. _datasets_glossary: +.. _datastreams_glossary: ===================== -Ray Datasets Glossary +Ray Data Glossary ===================== .. glossary:: @@ -10,20 +10,20 @@ Ray Datasets Glossary The way batches of data are represented. Set ``batch_format`` in methods like - :meth:`Dataset.iter_batches() ` and - :meth:`Dataset.map_batches() ` to specify the + :meth:`Datastream.iter_batches() ` and + :meth:`Datastream.map_batches() ` to specify the batch type. .. doctest:: >>> import ray - >>> # Dataset is executed by streaming executor by default, which doesn't + >>> # Datastream is executed by streaming executor by default, which doesn't >>> # preserve the order, so we explicitly set it here. >>> ray.data.context.DataContext.get_current().execution_options.preserve_order = True - >>> dataset = ray.data.range_table(10) - >>> next(iter(dataset.iter_batches(batch_format="numpy", batch_size=5))) + >>> datastream = ray.data.range_table(10) + >>> next(iter(datastream.iter_batches(batch_format="numpy", batch_size=5))) {'value': array([0, 1, 2, 3, 4])} - >>> next(iter(dataset.iter_batches(batch_format="pandas", batch_size=5))) + >>> next(iter(datastream.iter_batches(batch_format="pandas", batch_size=5))) value 0 0 1 1 @@ -32,14 +32,14 @@ Ray Datasets Glossary 4 4 To learn more about batch formats, read - :ref:`UDF Input Batch Formats `. + :ref:`UDF Input Batch Formats `. Block - A processing unit of data. A :class:`~ray.data.Dataset` consists of a + A processing unit of data. A :class:`~ray.data.Datastream` consists of a collection of blocks. - Under the hood, :term:`Datasets ` partition :term:`records ` - into a set of distributed data blocks. This allows Datasets to perform operations + Under the hood, :term:`Ray Data ` partition :term:`records ` + into a set of distributed data blocks. This allows it to perform operations in parallel. Unlike a batch, which is a user-facing object, a block is an internal abstraction. @@ -50,24 +50,23 @@ Ray Datasets Glossary Blocks are represented as `Arrow tables `_, `pandas DataFrames `_, - and Python lists. To determine the block format, call - :meth:`Dataset.dataset_format() `. + and Python lists. - Datasets (library) + Ray Data (library) A library for distributed data processing. - Datasets isn’t intended as a replacement for more general data processing systems. + Ray Data isn’t intended as a replacement for more general data processing systems. Its utility is as the last-mile bridge from ETL pipeline outputs to distributed ML applications and libraries in Ray. - To learn more about Ray Datasets, read :ref:`Key Concepts `. + To learn more about Ray Data, read :ref:`Key Concepts `. - Dataset (object) - A class that represents a distributed collection of data. + Datastream (object) + A class that produces a sequence of distributed data blocks. - :class:`~ray.data.Dataset` exposes methods to read, transform, and consume data at scale. + :class:`~ray.data.Datastream` exposes methods to read, transform, and consume data at scale. - To learn more about Datasets and the operations they support, read the :ref:`Datasets API Reference `. + To learn more about Datastreams and the operations they support, read the :ref:`Datastreams API Reference `. Datasource A :class:`~ray.data.Datasource` specifies how to read and write from @@ -84,24 +83,24 @@ Ray Datasets Glossary Record A single data item. - If your dataset is :term:`tabular `, then records are :class:`TableRows `. - If your dataset is :term:`simple `, then records are arbitrary Python objects. - If your dataset is :term:`tensor `, then records are `NumPy ndarrays `_. + If your datastream is :term:`tabular `, then records are :class:`TableRows `. + If your datastream is :term:`simple `, then records are arbitrary Python objects. + If your datastream is :term:`tensor `, then records are `NumPy ndarrays `_. Schema - The data type of a dataset. + The data type of a datastream. - If your dataset is :term:`tabular `, then the schema describes - the column names and data types. If your dataset is :term:`simple `, - then the schema describes the Python object type. If your dataset is - :term:`tensor `, then the schema describes the per-element + If your datastream is :term:`tabular `, then the schema describes + the column names and data types. If your datastream is :term:`simple `, + then the schema describes the Python object type. If your datastream is + :term:`tensor `, then the schema describes the per-element tensor shape and data type. - To determine a dataset's schema, call - :meth:`Dataset.schema() `. + To determine a datastream's schema, call + :meth:`Datastream.schema() `. - Simple Dataset - A Dataset that represents a collection of arbitrary Python objects. + Simple Datastream + A Datastream that represents a collection of arbitrary Python objects. .. doctest:: @@ -109,10 +108,10 @@ Ray Datasets Glossary >>> ray.data.from_items(["spam", "ham", "eggs"]) MaterializedDatastream(num_blocks=3, num_rows=3, schema=) - Tensor Dataset - A Dataset that represents a collection of ndarrays. + Tensor Datastream + A Datastream that represents a collection of ndarrays. - :term:`Tabular datasets ` that contain tensor columns aren’t tensor datasets. + :term:`Tabular datastreams ` that contain tensor columns aren’t tensor datastreams. .. doctest:: @@ -125,8 +124,8 @@ Ray Datasets Glossary schema={__value__: numpy.ndarray(shape=(32, 32, 3), dtype=double)} ) - Tabular Dataset - A Dataset that represents columnar data. + Tabular Datastream + A Datastream that represents columnar data. .. doctest:: @@ -145,10 +144,10 @@ Ray Datasets Glossary ) User-defined function (UDF) - A callable that transforms batches or :term:`records ` of data. UDFs let you arbitrarily transform datasets. + A callable that transforms batches or :term:`records ` of data. UDFs let you arbitrarily transform datastreams. - Call :meth:`Dataset.map_batches() `, - :meth:`Dataset.map() `, or - :meth:`Dataset.flat_map() ` to apply UDFs. + Call :meth:`Datastream.map_batches() `, + :meth:`Datastream.map() `, or + :meth:`Datastream.flat_map() ` to apply UDFs. - To learn more about UDFs, read :ref:`Writing User-Defined Functions `. + To learn more about UDFs, read :ref:`Writing User-Defined Functions `. diff --git a/doc/source/data/images/dataset-arch.svg b/doc/source/data/images/dataset-arch.svg deleted file mode 100644 index a56515610e8e..000000000000 --- a/doc/source/data/images/dataset-arch.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/doc/source/data/images/dataset-compute-1.png b/doc/source/data/images/dataset-compute-1.png deleted file mode 100644 index 2f5629b5db2f5b40cb7e28ba1dbac50771179c72..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 38712 zcmbq)WmuG3+x9)f&<)a!2ue!}(jd|yrF4okN=gmgpn!lN-AD@3F|zhOOKAgE@t#z%d&huOm8fpsoIMg^01mP<_meYbD1Pp>;Tv!<38?Onk zKj43eS2Bv)Sm5J>Wf>0sP30=D@A|~a+SSACr4{tt(aFJz>y^bzD=WuWHcqa)Xl)V@ z#0V+MNo#w4*qZb3G+6T$x;p2yMetod+dTJ=MLVnXAK_<~C- zHm7GNw%^|6DgNk*IQW|^P@FEh?@k*hQfp-pe`n^0w`S0@Sv2VW@Pl0jA{Z?m38OR# z%g(Udej^r%$l4s66t;hB(j zl;`53gEzz8rxT27b+!*w&og)H2+ls}oyhO~ZFN=~yBv?P9w?=MIQ8Ir$KyPStFiw-$7Hk4cVxx36t{H%|CmA2zN35Iz0zeB7UGGps}p6n>`>kgcJ zLQ!9v%|fw7AX_FN5C?XguCwo?{wrzK^^95Md(uo;W{i}6XJ2i0@*Sc{*vfIhbN)GQ zlbtc~1=3)vaMMj6b+v@%W=_Liv`2Py;J2m4=M=r$w_`U&>NZvz!HDi~LtRqd3Gu@NZdbG`4ac#Jr{agN z8hzo7%?-W+Pv+pI1??uwgxK{X=`*ekvCID%s}bXg+rTh)?2H2Kl#Aoopc~ymb=2}P zaj)Bl8P~9CFn0O>uI9~9-%2(;N&eaI{P$1uv!(!zY0s8tzajdwQo2tf_n)Cfdt;3- zd>fgkkcpE_&3f3NqYy`EQ-onXj`h^nEaIPC-qR7{xHz6~^VqonU;hWzY^xS>hBbFK z8|9c3n(;-1x{e4%sy#YM^R>huhxk9AQ8`mhITbF!&KLIvcuiGAxjfNO!`ynoc=E0v z><@Aq28W@4J&5(4l)mh%CWgUnKg8{Q?;W;Rv|Q#j(XY)=31!q*8}}qdz1{Q&sFQ{k zLnQxswyI+WGV(TsZ_&%t%4N#Eg^Cd8VNp_v#(9DAe!-GCnDHD%`|vd-^7>EZ8&>&V zlR0i{ot1!pW@3}Gd5rD3j?Sg4;e6$ay)bJT(#a>$&u=>i^EM20`x)wC^!-|3pWb~4s`lSBot zC>k*9`v~Y=z9}U2&l3|DzrIKH2=4b@G47?aUG8D@SYLTz!)ocC>@P3$Cf9$ub$XD7 zz<~Z8fMR?Qvf6>#Z!geaWi>sS$n<>F6BkOzSuSVgh5ik3UvD1ylWKo+HN97-xsiwO z3&)?^JWMDGNB-p`vQcP}R2Z8R{`j9K3IwaF!r7H`+bYs|%m!(_#@F5_cOVPJ;Oj>U zi-c9U&_CAHti51@4VH-}4HNNJ^bnXOZlcACAodVC0(CK}3+VsE)||eQuf?WH#a`NL z_j7h2QRJ(13+;|K|Jszq?jU%H&@NXrjyMFO%ixt^PrD}l=jp#X8_GvpMkTh0q^1HU zTQww-L;ks`sOTt6;56`ek*fbRlT?^6o{j~_f5W?v?MbkL!;ek^*pF$Pf4Aw~-Pf0I zH8)>p9xw)*_>ad~@ylyB!RmYk$p35idbjfqWI4)_e6e!Hsqq0m@$`?6%K>3NRlO$X zl@PEX{9gBTTIiEsa`aPjKsj7EsJa0fac>WU(*w--hG?=T)VP3Py9iTynf z;6LLcNqK@*d1=!uBJJyDoE-G;qo9EzV)HhJ7o&efw?D`rWx4hd=6>nCU-zwO?JEwmc>=QR${Yf;AKkrq2)idagw+946a$;++5W`;~n3WVTwT%?+=E(N9+V`c9 zAW$q3Z|>}T2`{Q&7bVgXlzUWcdt^h5ka{b3ch-p%KdKy!Scm3w-FuGIvvYzf9x&PFw1EJalPfKf8re1a=N1W6lPaLZ}}dF&UvIg1ID; zzyv|x_Uw%`@-CM1j8&q(l}##vQ`TP6+wi*!2-q7dMxx1)ZQR=r1Gq6*G^HYRWZoM| z%S{ktZH%xE{UnXyfxVGu%&}Yl^N1*VrOrzYX4w#fiQzEED36rx;P0X9!DD? zZfT;x+vL3m|M6Oowv9-}Kiu1lIr3AQ{6JB><^~_^HCBd{83v44HH-yk#0}QVgW-}L z6~RiZ>X-v**IRNrXfRF(sF`?n)L@OcNliF6+$jkkbCCYC@r3;P(%#@|(qobdRXlmYLfA;)8AM#;g|tn^UJ8$GDU_Y|;)EoFAZEB} zJUVBlRgd`Qha%^@kcP7~vSefZSe_9FPHRoo{wB7L5W~4DK^g*sxg$iJ@!4HqurA97 zjt(m+$em&cV1x(~qD?8M{aAqW;=#i|X_`~G9GTNN8d}T0Q-Ajz16{dfX- zOiyNHOvZ0JP~`krLy*o)hvCtk_|e?WQP!$YELl}Qg0;fJlhnwJB?-gj1ia8YMb%6q zQI2HybAKQbI2kENS-V1N%F(jJ2{aJ}$s9#@-@0UT1h)E>WySKyh1{i_bSV~t>rjkn z6r-$2i6hs;Rf1#u17AK17(D+OS7DTcGNbY`N+^{@t}Wo^PElMU)5u{~vBbk^U_Gn^ zMgt*5k`awQMr3$orzJML;N5*-4)h>oRWc>K^aEFJfj}Yb>f2XQgy-qI(-BDd`6?R% z`i6k{(K)n$wF(DM(PXk9lG6S_iHI&UVsk}wjy7FSaqIR7Av}GQ?;f9jRt{-CR)35K zFG%88-<}#B?FBERL=X{^@nOK^x+A1f(+|eshi5rVs18vo{*S%9h?v-T?vRc` zzLYd4*4=ma%L2ihy20R>*$u%h1JwMtBBUYB`=&&^Lfo$MVb>=DRBo!@yD^FNp|+n34!$5m$AINJsg)rGU&+r-Sq>{h&@Fu|2<*h1 zfq$;o(C;l)2K!k~K2#dDo*)k+8@dqe<*6rvOo^oA-v2WW>6_odVAk0q|JV0mT_Qr<&U^!3fB)W(94bD>&^(l z+=#6@z(N%{gLkp3;lckn&B`Eq>w?YO;;jDHP+g^^fE7mbdKnqpBKZ&yn1H7BjcIW7 z%!x+y%TO%U^8Z<=H7Yq ze99CEIf4I-6{=!HvGaj}FoWqvv!knV1R)KZ_cCII2_Yvs?QB`pfUQf{*azo-@8n0o zCRnvasBWx`b7SRul#`=BNCOC9NAGY9b+OySdl(sUo#+rlwb<+HW11#hriv{Of4+$I z!C3}Lrd0|7X)FsZ#ZI_GsGS5DLgs>*n5;=du6Ku28bXPy5@?H(K}cbudqt$GI%A_7 z__C1mFBN`hvDiVva4R}`H;|Y(hJ;S4LcMd$pant}oRKGrA=R8H%|0XDN2rQYV>7PS zC0@6?qU&#>#%7vDW_0y?DRN}SmnZgmTKyo@kuoPJjYd|nnTS^L-&6@P$wEkA5`0Hy zbQs+{F9hbKO?$IoJQ%PLvyM;*0)dE{7W^ltLx3s}Ie&$PxCv}zLCAgK?EkqL6Q=HT zumlH}rRK4|CL&U_C1xyjTio#Q5gDWAE?UObWHKk>ZDsYrg78QXAFvpSR*h4Nrd7uo zrAPwUYHv1ZpPd#u%ne6wG=Qz{HCSM)0BO`I)!8W=-JB82W)nBNhs%7KohC^r)l4&Z z-&lZ|9YT{vks7qDAFda<-gVZu3`ehxqQrtXw__y@@-An%xvOwx-tyui)_ewxJJ|`- zC!_jBfF7xBDz=V?5eTU>26~NI+S|U&YJGOE{$_~OSa9PR3aPtz+7DDK?6N~Gw|_p!Bas4u^bCw>9j{L78SG?#$K-2^8~ zp>ie&It^rRZtoz3-54;D>ObasM~N7)1lI^qBJz5P7!__DgcSTN7h=bV<@{YNap%ChK7mwhv#pzrD`gadmt5u=fsq0hhUiKP;wq#pNngLr1Geb1TPB?@|aOJhmkm(y#nG_8O!w=5na z7^LEo=De1=E)wRQ7K9ZXwHu`!og|pLKSBkPo3_L0j6&;F zq_gEq|KUll%0*lnWs5k;qfCeOBh?R4WB*UzTf#?!z&=8QGNylah75i26akuzo|7vSr-Q;(-o%f|zn!oJlz+JzbhsX8(xjzFK>egza zN!t}FTz#?@k_FmA};5b$5^!?UQGX8(2+#?l@TySm#Awn; zVaoPTRMxZAL$SrbMh);PVftk*aSscQmpdqt^xvfm`c}wii*!U^+{L2b+=-Ym0<~;XeE(v*-Na*Q z@%xAf7jaCNn{ru^v(g@hK=Y~Jbv<`~bgCzobEDwHSY2fnP$J&cLKMvEs~ zT<|1;l)bX^uc1E1cXK+Ir<@D80>uZVo85RJ6VspQ+H#4l^2kLM=)Ud%Hy#WCbR0tj(n)A#{($ntBbKo#sOK*Dez*YG+0+BcA zKt4cP>gtw*7>+xBR)piW5AF*51;*2z#KoE(54J*9XJsS|#H<7mv;IK8)X)j4wVhRk zzY8)7t9=aTncL|3^ypwjoi!&gaaIsj@}7Vg|32 zQ8DL$Adv~t2Rm}r@~Eyqm(RWN;;e}8zEaYkx9Q3G>nU%fh`9QR#_e;p68X!RyqBJ#mL*fE9L?6&f|i?w*eK^PSP<`sQ%63&$tqjPCS^z(OE(9iA#C zx4XjLs-NUlWfQ|0ZB*flR&-&`>g?=ip^D@qqlG``ETk`Xj2JH3x1J|%dY8K>pAKWP zKPh-(#J&_!A#W#M<0`jzE&k2y)IUPCE2iH;U$is$$4e@q>hSQA++6kU_G+f)nrenM z(NO>PX$`sxOd`C^`f+hDZV4)c3hhiB;@j2b8J)yz)sNUXNEFSWO@o6Uz#0jqepTsCP>=y<86rin6a(}0j0_;8~v4? z>fI`Z-6B_SL0S>gIYRups9}v$RZ}F?U+{gU>h;HD6;Fnz!5ecTc~mt*Hx% zIImG-2Isw!Zj$dRy~k{=s<{`yVShTs*5q;_WBcS)p-$JrwIbE;kI^O@?*(zJC-3Tu zPX$uUD@*PgV!`(sW|S(KHn=aw`KUZfldAjz(60~WJaosXH@t@?zkavAIKosmyHE9; zB?4~SZ#ACOU+-6pA9s;W=s(Nf+z5~0f(_NkXQ_W0v}VSD^+yR{eOYOslK?pt^#Lg;jeY1MU?|Vw=MaeFpaWH_rUlLv7jTi zpd(GWiEZbylh>YW=;euLqirmDW>X7dGS6-OQ-r?$^NtQtQpbjGp-$HYG}|r3(ubw@ zaIS`19H+VVhl}LMD_ujWl%J>U&$^pPJ1i$gf*iKbp2TRnrij|abkD#t(@YP}Kl&D4 z*hg?H86N>F!xjW#B^947+L9##B`mq6h7G;;r5>tPAi~-RUGrP9W!o{@ef)k1zu&&z zu591@^h*QLyf``UhnjcOaz00n9$)+DA)lS&_(UkH&ZM(X+3XP0jkAbtb!S#h5wO#*Mvk)3$o53#FAZph0Mnd{9DJGZF}>dJ#Z?m{ zx{PSx(s-r@%ix6ix6GUHnlGk|-y1CZT-MW}ffvNjk2o&l;*a`8yH! zxZt{CSsS9qnK(>e$YJ+{1RwA^J-b->!tiasbM1BIrsZL5F*A0(M++LmGlw|wwH;PW zcUs(@&^M%zz3nHOg7YZJ<8Jmj5N!NUA5&SINWi$OqNXdqMC!SB??w)0Ix00p9j)l{ z_%(Io8pWwCj|n$8Z^yq)N2QX%(&7Vy=LV%O78~eKgG3Q>JUf!U)pjEr(JSH}UfMpJ zqugL`pDq=0m{;unu|1v4HM)1uXz_W@nWSdf23qhhU~}ibjG!^BcMIWaG=w{!>$qQd zYqkB(p5|g^SN<8XVp)v&W+>Q}co7?I$TJGbR`=K04=?o{1-l}mL@Bz@tB}pFkda|V z>8FQO62ix{0sm5(u3~#zp-@3KL^*oyJ#Cc3tGiP?pQ&u`&^8hEc=(NlT=g5JD%msB zUp%HN(2-LoyE+u5sHG!7F??M}z660v`TPXa(S0e@s&;xgDHe;Mw#&fbG4tMg17$qv z--H>P?X{iz+)TnIW}TS}x8@ksU6iNmzs7=U!s5EkA%rgf_%y;?ZeqXX`RmGqKSDe1 z;io@Xg{kfSFrYN9R8z@ea9;n^Xzx^7Punz#LT->fMl~Yy<)tL8em&ygz zAbJd#;y@(QK#!8g(*MfVgcCrdl_krggY*MMnw5D>uXFA}^}hM|*GDouNE?s9fEahw z>g9effI8|3{#sCULA*|>eMIn515^75=bz@3ZC*2WDLtJ$C+249*gE;0P7<$vsZ=k; zKDnE-+-AHUGXagjni~rl4hlG?D|ufnzKJ{eP?o#a3H+r^{Ol?Mxn%vr&imYgf*S3h zPv@m3YAnIj64|bj$jAtWEs)BD0|z_Aphg;1z>1j|EY>cHT&DO*@h-QjEOA=i=Q26n zMzMOR*L;TdE0^96yf}uLtq62Tszz|4V&QO+!=niX2~{IMA8-K=ak+ly^l|MO9!yGv z1V5z{y9`Ztpf;<6Y$223Aq{9*p}jxleuj19)ON+Lomw0jKYquQt*uz}M(v*T!Xd^V z>_5YXPkt(7mOtL8l@EELg@Cn=m%~8u917Y(xt7jkM$?mqk*O;a&u`;aCreO%=@U7)*N8-Mzx2(oE%`pH#t z+4s*qXCQ+q85HdeUS3JdB7K_I?D5HH4_C}2B8*@HbRshnXFPmthD4B|S(S&IaraiqW=9E^Jz@jXev(x`tL^;2423Ik`55>vxEMSe%73FK0ll+G?BLj$6vC^ zmlw!|QxpUd;I7ktgXps2t8+p1uYOi8E&B%p+QPq_l|^df*gX_ev{;vA^bkU?YIbCD zTzg}qjEHY`{_L|_jFh{qPl$Nyl5QFh(BQ-|Nnzl@F@$dyd#$`be%upe_wvBGPBwUq zotoWJc=B$&%X_ke!ZeMYL@gYvJ0uB?edF~pCd)~96xp|1WYdp)K9%N($De$_t&{B< z1UcF=lS=k73(adE*T*Xa)d+6)M#sr$)HM_Lgr{crQQ+w5Vf3>bMb{(vC zi`zN+D&T)u}Et9Wz@{a*(D{W>b2+VQ z6b8*;B};m>gz1PcyZGky5>PYzJGN}l4#I*-fxg3$(jf z;ge|q#<~G8@VGaLOvgmgTah?U8uG`G%E`xrOjlxLtQO7$WaA=ycC zJ1#ywUY-jj&3rpl2GlB0>aO)1nEZ*4zp5)M@#;R%Eo>zVwo@k0DDlkv;C}FQ# zt@q99!cRMTNIF|rHawa|JcSbX1;KVHx)c0-o=UWfa*>eQq~bArAo9&|io`by%$SM* z#>EIO<8Zk11TE+HWWLcT17hdpJlcgesEgq(N-^Z9eQ)NDB- zn>45O?Kzeow7Cx5L6?kViMBaRd<)#(Fmz;j`iwVYn$L58eC6tV?CUE7ljPT0O}3WL z)~X+QZe{HY6Cp0#qbPRiac^23=etzH1~`MDf2D}-3Pe%~Dz(#@gyW1#5$&zQB$ANSSp{82#Ll1uC z^;*lL{^lv2gGtk_;zFsb23kK#^i|-g1r$4Lt?6Hdp zan|&NdL5w)7^>j7PjNk-^B7Le$;vMAtc&TB!yMT*16w*8fH>?s9+G$o_pwrh2ZKMZ z>VNu6RX>dv>ehVPT4jE~R_;L+y7Va(anU`f$G#`RX~uldxijvunJG>S#Qbm~|xHM{#lcEaK3B${J-dyY}kEDTbimmusE2{+7PUJ(PZpwahB2(=T&wXwSe78l+)-j#KT}`g!pO-G2Jeg_M z=j)`U5$yxb2fz~mhSxzce!rVb`fZFtL1xRh=OQjME&`@l1mqKGdvogAi{>7Pn*?J{ zx<4r`X>U!;!5s%Pwx!rlk9)UlsDL1BnU3-;loff&9qcvsbL=~a;+sj&Mt_ZJEGGdD zXUma*ntLRlw$9sMqZfDguk+N8)4$JVO*@7q^dh3>Z0*-OSI+PT?6T^7fFD+<%lc~w z*FZPb*bi@=X)gHHR+xk5i6_yC(RTFbNN0UHpX*@`=M_UNw|9KD{kgJMEpCjarvyH} z2{dfnq5JFB!&z+0Ii~g!2Bk;PH$}bMQkaVtF~~rHAHW#HJPA`L#}=0|xa5ZAU!3k^ zd+yz~5}BcwX3Wb4actITVUD&dNj5+wq5nI6f9XBgUkah5pSUz*k|q8FOOOLntCzJr(YW zo@*mIL<^GSPn^8f(5viw_TuQY^+|#tq##I%*mj7neGMU%gerNda&7$6W2eSS@eNSX zE6W(=+hai&(sYuZ<(O$F=lH|E4o;E;pDEMaCiVJmnpu*x@XJD3&y{zriDE+4h3xwG z#r(96JJ-BNzdwI?&s|?`T<0rqAADYnJ{q4?K#Km+pX(W}`NXO=vZqo;`FNm14k^b1 zQ!-JaWMS(%JbOJH!NrcWLEkLVy5}zAU4A=oExyFJiH>=4pgOD8@R4^#9CUM}hF#OLH@V4-r;#P7KogudF!>gXVcEe~%Kxy&euwbu5E5^ zwEdm2y;%Jy=((jl;kk04b2^lnv`i{okJlMW@~czMo_Mb~4RO(*iS2p3Nif^yw@2Rl zoxPmDx4s>lY&hInBhKsMU4Ls5_pTI;2! zF!0OV7Xj#?h0m^xJuqSy+C5l4^1aj$T--+U(|-3D00(4_VZp6__+si^Jn>@Z+CKYh z>C-invF~1S-hCChhw1iVGurgPPZHdjJMhB3$Cm1mX|63w^0Jmj*yKg$=cqDJGRlr2 zYmLc3xv+PB;=6~LD;uxFCXn_y1>uQy<~s~X2xg*57f@=KDBTIZEmf!2U3IW8_9xEa zcu~cGlA*pMG?X1ZkjNpjd08K7^RJK;x;g@_QnxP+ZOYFi03?aHGeyrc{XHqXg>I>g zycJ3gv)eAJe`pHf)IBy+4Ihw_c_>8b0Wc#>Xv75!aE9Fbcj4%^BnO@`UaK zp!@VHF3b3SX?9GEeWcw{-(eDlN{e)v4>Y&<&b)m zxtY4OV7o^l!4mcI1n+~+0hH?%6lO&Hm2)i?E-)xYIM zvEYKet+d6ws-ok(i9>TNfVrQ$hDpVjcvr!m7{Yq}$Fx1jW*dJ=%Vd?wJMtYPq!txz zTZp~ZB=GQ3%>^AMCm@o*X?n)VFI4t9ci>)DCk=N!BNSe%U+m28oMN>lPR8B{IJ3w& z2(^7{f60x&P_Q>pN=`nEO@Jv7Vr*RC{@m~U-ilV7;od3Z;`|YTz3taln%_Wf!1VA` zf2tnOLMcJ-{gaNE{<3=ODzf-I3Rfc2B=mta^WmApJWK|ky>|P|E!>hl425|@7e1U~ z>WQ+bVk1y;y+zV31^kmLHpsY?RqXSP^1eAeWFoUIqjA#H5&mstYE<} z@yJFTbTaOnIOG}WGmgoeItyo_8!R(R{2@QCmujjdhv=kcl_AL57}b3qh&A45rJek> ze!47Ys*r4a9~}a1cB@a_%!tDhC|dlk6bR{Ow%VO)J@9j<%D-fRJ_Ad8>CMeT@Bv@t&2#g##kX6-+^!g;kb_F%mTn$AvexusiBoM;X;9=OFEp z|#}mL=?)1w-5X3(8`f-cji4s*2lQj0E-3`vO!A0~yqpZHxfCprrA8kORlp z!6{(Fi941Xmb* z+D`yEcz#)LUO>fdP~(jX6y-PmK9M%qci!>yQ#AX30+$cYgo2LnbNHeiSepk-* zwV;6Zy`mH;Y19KS4S_z@Q${IOeE8qLpA(xlEbf)Uef{t`tPOabca2HV2U2rPJDE9l z^tVx<*Ake#>-U%8yh1g!kk4^#y#BR6bQ-sC^ixLf0!I=s z_`ftqYZyiG<;h2WbWTDcr5ZsHImQPmSGql8ZgP=)m3Ba`qpVk;m@7A-Zn2S=BX5t|J+Jigw-ra~x##b8 z0%QP)KMHktAI|&oVVOt;&5^W(XZ|Pc$T@Xv>JEOUG(I>E@HCT2R+Tfdy3kU>bp9!0 zn|UqE)zil1$osrBL@I=-LErS~mwCXI3p;uJ0n6eMnwWZUWszFPx7RP$DewcBP?-pVpjn!i88D_?;(3Ldmyf)5`+9Xri>dOu7ySuydtuR=bD$(SAQR$zZ`O0G4w3dWm@zN?mfYxR zTxE$xZxv&?0SP`W0^+dl<{2SVME&XUG`R+vmRO5@BE~9?cGM=rxz^E}-L}^FWD`

    zZTq-aA;+zFG>(G*EC>#S==n$}PZRW^JZCE5A{81YuJkAbmpy28lO- zELYV{xQCTuU?3umEKuk~p2R7{`KsB~#9sqooqRIw27%;dSqKy*rLWINNcxns9oNYp zvHF~G3)`>a>0LgZe))v~7Y3Pj+beS-K<|Kvx+k$s*9#9dYr}j-wDR=@-TbD_7MT{& z8z($iPk1?6MnBsjfK%>11tS)|VbufM8BGtpE~r6%27D2gJ&awM<)%~PR4!^<0K}O| z3wL&ZZU^X%x&Zv)uC16u!2rr%beH29NES!|okcKnqZt;^8IeKI zVcyk^d<@Xbyop~Br$ZXEmiTZ=cr=p{rVkUDgYV5~CZve6Hb)t=J_X+h2$&udz;y?$ zh?L5){umP%Hhou;xW z3Mw0^(vCJo^2(z58(h6Kg^ZB#wGZ`iIsvuuCO!*S8}zWMyMK9+MF40KLLrOj8;uDo z?P%4>OQOo+8xfFZHo!LJu=O8ANlVohB=yxhhWkLd(e=j<>>&FM_$ThP;Fh3SFDJQ~jNJ$9PZ0OjfZXB-4(X1u115Bnb8kdV;W+OIvZ~{xrMSTYgTN7oe~Bq~ zehP3;&+{w>u5IGOBReFND0YXlT-fYV#v=3LAf5PwV-&ZCcg6ndM9yo{54yzkZ5S|A znC?uKc`a25#$#-^(}JY_dmei*(@itU9pg<1NGg=;Uu*izuZi8ZX-&M!xdh~{=c2XV z*8ZFworoV(bkK8ID5o2Zp}so=eQbHT>3aQ)J`^II2I4x9bcM>EEaDdJVd%(Vwr9PK z#Drz9?**XWLOf=^At>cwHjencC3W8x0XEF*|IAh4y6MDckMP?-jLls<_)C8OMr^$y z0gHK8Qg%aTT5{retW#OhXWrT$nkjpZb77zmrtZvVSDU5yW(nkA9&J7?grL(4wf_5L zmPDd?=>CdbCo!F*SC#Pb>D;pPtjA}5cln={;ip%bdW(Qp`^}zfL;%!udy@gs&J0oJ z)s%393*Z{!9FHjjgMp0FoCzYrXU1rDLuI-&7%$Ak9pQ3SMfpT2qJkLib$&m_0_CLJ zyH94p0bsY=KS=yhf@DqVBSQRFHtC|+61_ntS~5$zHmG{bOh-X(O!G;J>Fb7Qf{M^` z35Rj_j$y}>r3kM1X>}ztQV2|TIlw@vvT`U`gA#DSvPgTM{a7tPf^-9Z)A;)@b*kZ{ zyLzbvu7@S-hqg9FS{h67#Qzk%J2iHqL|AqjEU>YDGVP?tB2!e-nNEjsR55}$tv<;# zX{od~2oa8+YpKms62?dXCnEU@gP%URWmr8O$?^QX%a#vi%jLt281fz5Vtv4esTtvP zZr{>0c@d~cPK;-l0zH60U0o~+pnt9eO(XVY^SDUkObp*!Ns>8_!d5su6pKD;Gg^A7 z@7sGL7JM5qdr5aPPLAwAC$g#LMO?}IYnQX0z6rf!6eHW;N z0lROvX0Z2J)loYM&j|gs)O*>E&&s7D?h?_p#`I%a!GDRejTfD4Iui zwNANWdvfhi-Q`#r^Q(0GuC=r{c;B0^<}6^%-uCOPr^CB@OJ{VgOqt1j00P-sQLWD5 z*KESTYRtIYu&{H&O8z^Y{89iLzR0Uw--n0Nr;W1ncy!0cF&A#=C#dIH!6cFJWM$B2A6gQ1G zWT`2Y|0JN3)8hL5ND%_IaBhkIo3w=QBQ-e7a~Zr`b7Jsh5GUWjA3 zZWaF35D1mNw`(tHARkC2nKKEOsp(q>am!s8bN{b|B%Er;*?-OASIU!*UG)B|+3v1~ z#ropIfrRBX7Oz910b#3RfEczvF~8n8tvUiV^K9+6P_Q3CE(KuELfe|Mch31+ARBlU zBYu49KfkvK6gHqXuXhN+zd=TS6OA&AyVTzaUaLI^B<^p&Urv(Rk)IEiLn^O!Iebq$ zcgCAc7+QKF#B_VtPPW_`K!XkAz3^T7^oSq8-=>ab*msU?&?_7(LeZWFdRpoEml91s z9w?4+M+d}DFkcu}lwvSX){<6TBpT2rxOqs5{{0pv{a&K~`x7$z=C85eKjVNV{-y*0 z0LY3EgDAgB6ss^yszJFcOjyKuf)n?Gm<$~M2qb01f^vE(>QqDfnU~kkzNFwYJQkoz zY+{*p4Z)w{{3J#4c8YQFt>n7Ri>QpIe-2^XZhP9F1fg4J$s=QlZ2MK~KO!U13!WHo zFMmv%P5CL5aoPv8=4Dyg&)T{4ruP77R(Xa?gry z$MCiVV73Xb8gU6eBK@z%UM4pE9Ci^y7`BZ^-oa<}IcgJoqQ*BzKy1=569b|)%PY{k zy=ONc(nhDz`@TQ2>s__|Wu4ri!7hf5!FX3dyA(agcVdg27<3Sh&m22zZ^JHDy62-Q zsI`57^5(Ko25PP^>irWL0OH(OOYp%SL)&I|#_G736^ozkBEyMf#N0tRWT-iMiokz+JAf6It_&xXZ zSmR`cWj1_9*sVR)x?1}b;rv|=psIV>K$WrTK$xM(pXr+U>taLKN9YBe9`M4-flxr0 z?0LkwE%~f{>BLRdk2gRV3eH3;B?lh2Mi3*Sr}hHm654HQvFhFOAssT_mVv-jEtho) z%K$+bj<)^;@uaJY%Uatb)J-S2a++S5?0(!yGorbg2oiIa?I>v<4AgUlGvM&V6PH6n zANw&7P5iGsAFz{f_ZWyWPrEUPbH|1L)}qRR?tVfF0{Os+KCkFSzttW0KHgBfLWRzp z*9qBQlyhDt2iHoF(da$+W;}p48fBBfZ0hk%@l1Hf#lq3SrxAc1Ab%Nd>1N-)^0QI| z_zCCq7_FRM5*K_`UATRDYSI01C^+nB7=0|fv;O3Uu;@q@PLDI%>?&XlTWYugOXk=a zA@Vy{aT{cOt15y^+d)@>zt2^dIiB5?k_}UkmwgA0t6&qWD%7Jyvtq4dkc7roy#v9Q zQU-C6*QZ9m=`Jm|TiAy@9;Ibp{dOa)We~*hdv}NA>euqGB6dNHv@&t_*Ui*h~R2bItncJtQZsj#0l?Ygh!N47Zm44^3o8`TOVXhrv4}AV|CL4ZhJ= zrXC3OJ$(CWNuMLs@Hiw8@>$N{>-^;5Ex7fNxT_%+ONowoKuI9$>8$CaWQFeH$yx4_ zJH)3==U;&{Q1TuA6{tBX;pLDiwMq8$lE^_$j~JuHOZJmebb2n5Cuc9}F;_EZRXu9o zJnxaT$=<)OaXV}LUUsWiz*AykyiE<<7y8L~J&li`QA@sOg76QzSzqs%jtcP8CS62- z>jl9>X(~5cSxhEHt6*_QFn593G>d0Uwcp|N5YsTU?Rq7DJYh4=WLGAFi=Mo>`6Gj* z|7RZeHz`kZ7a4YWf|zkZwU^mE8sdMtP(FQ-P!~ou!3Vqu)US(WXnvsB+7F4zcfK>Q z+zEuMO)SlyJ~9k%@3or|?|+|B!t>YyF?Nxn;<%eTbrTQ}>XeyoQ+=#qLMav)ntAcbl7ZCs4K4YO2>F-4zD{Bv8uEwN9C-r9s25^^ z8=Rgz_cmWvN}iU#Q5II^F^T$1LlPd|_ax2}+-0#6nmJF(J#L_SN#A0^mP=|7ZSa<@ zfD8L<3~9M~>s*|kwAgHfn_kLrNVq#R|3Wfj>+=trPgAg9b^L%DFKs#gm405a2?D!-;5-S+w) zjn#^_sk3QKI1KDSz_$b$vE?#V{VPpVTAz3JNI(;dMR#cwq(`#&C!Lrq%K>6<_l))W8DfUwfzCH)@f-S%*rEYNZs*%>+Xymbr2deBR%3_!(f;ikH!XX zyY<&rr7L|ZaJnKJ3+M!$Po7~K9>1mY`6r>w;9_@$E)!yyQFna$H-zJ;>uKuqoz z#^c^DYHF?eS63qfaB>yd3)kfd(homR)T=)g?2Z3M_kwfoW&Ysa>CF-miC4uh|5poO z2)2vgWdYmqv^6=NAa1C+E@HN^qkiGy?4xh!5tXjn!^ZDHzSu{U2&k??fZKpCS+(mV zW2e3L_UP|bHl7d=Gu;Xiht&lHmrqAm$Oz#+p4W0%YEi*Bh4A|`F8e*-rHB@>OHw2U zgT-{eYd-1m4wL&1j+@Mj-a;~B6}hIGOZ#M-VreY8%B79{4FadxSq9$gQnHcZ7Np^^ zy;&1wesNyLV0&MP%}86)lPPLNM!RqDFRbg^XBc2@pV&eywe;)C>`JE~i`^uwZ+TH~ zC>C~a2xDLvQ*E3-PgUULlT@-FBzF}3J+5Oed#v-`-HFeBz&;A;OpJab?`a>Cz8epv z74v#*mXvoc>oH-$e{+z@3g;MN!%eFx7oPu-Fcr~diHUrU93(ofQG_ZN@L)8h@D7+n^U)avw!bwT|d6^=D8EX~@#KGdojWPyku5_WSDbUCS#ddXON_zSWW$~w5hc=1B(Bfo zA39g#63%YP0BvjTnv96EP_X!5`v(P#FCZbBi=1wXj(k|!v8vOPb!xL-chuCovn)fA zmZbFo%Iab=pOfJLhf7ctTB9>2o(sg%P-$d1UsE*5whjuHHdKoIFT@)!S>`qRdn1mb z^P09cO4bPUcrMkD;tETxil;Hpk9;`)ayt zGe4*BF?>}m&V7S4EBB-1!Aa!^7Tol$eBuvFL=fCvFwFUoRsrkw*HC8c6wO=!yW_GO zk~B7FC(5)gb8X6wFOicYXzf0+pd55eby%9cABL}a9Ojl4@~Z7^CoQGBuo)MzZEj9! z{YI>Z3n?noVm9aUxZTEK6YNxZsH!Wt=skS6NWIMCLpE#p>9MnnEI8%@NMHMm#E z8$w06j@bUTntXM1YgwQDL6yCGwR3x_Y-T96~ZiawtTF+IoYkcEw)28SZ5HXHv1UC5T3N+k{M|!!j-kgB} ze|lr@_zHNw*D_9bqiZL?nl!B5-`E)Auosw=NC^U9?%YULI6eH+Mu z_w_@g6xmA%f?jLo=N+WKm5y@2r`%F=uchgA7Rm;-#A(N{s54Lp-be{Pv3!1CfV7zl zbn-8yyA0C<`LB4MjYbNy0CTm?XjweQ>wAB;usT=9ywECoJv0i(wl2o4)`sOc5Il2L0DSrX!z#`a8MBfHdXGb4vr`kBh8R ztd%L~DV9vjbfYU`X?+!iKf55t-y`3NR#|Qt8wgGXnp}(~OoaS2nR^DWW?tNu_1k(8 zOM$a}@PDX#%dn`vFKqbC(A}K^BGL^?Dyg)zbc&R8$Dnkh2uO#L(kVHlib|(+DAFY% z`RwuM|9YNp@8_4x56;Y)IcLXO>t6SYO=o5#sJ!2Ge@+^!#b`(QEQc@6*TiPSi~r?g zLbR6&OvH#=4L*l{A;}lM_cBO?xU--N$;vMfthVHc9d0*4-Kipt}dR`7z}2VC*?x z#foTZGota(5KZ3k=q^fKL#PVtjPxy^i}KFyPrywxZehDCyZ|r|DjVP;M!oK&6mT-} zfSkoQjgt8j999!(`}pU^BOKdaAB$jtJR3bXYQxus^~r2MiU--sF;UP(wWvV!gO?H#o)##GhCy1@)Z5VFeX-n9_tct+8;bSWx><4g@%0AfzMx`| zO$kwpnBf-1GP+GZ#sY|pS!KO(4&_h1Zkc;98r4&J+z~w6L}Z|u8f2gitZM0pzNL+M z8G-h>2u#3Fg7@zxZ=Hqp1*$#k2yY0D*_nS!b_p1p+r5-x+%<${H)RjnhgK#Z!6$ld zJi$`sd9`1XVLSyAR^z@nK1~pKgBlv|>bK}bFKQ(icr*5V=Kdev z6W3}OzSw)>A$d>}-r@hAsdDFwMu>Vigpr>Jhn|fvB=!=pC*O&?6%=?$JAdckda7mj zmuQ8M)8e482W6Euse*2)L4Gs0A#aV%mlPx8RW|j^ovGXEH5qElY#EMQbE`46QQBE* zTU&)wuDd2XFKv$IOL&tw$>|J+zI;8wl83*z|C~+vdC#GQ(pn@lKQ%tGb9Qb1-1HLT zP`~oqs0nlY?U$~TBEC&t?HD=D+*oGqcf~697R(Jk^n_QMWJF|88M)5%iW(;`S_)0F zSnl+Es?oebAwD|NqUgQl+Ty4ffOg$WFwk^9TQ!8ce<4!C5z43htW~q4{RnSU$iEbW zfE#QBshUy~u`MHLFG|JH=NFekle;f(Mb@*ME(H0)QIi}I~9xXa-3^jjg~2XZAUB_ z&N#Yd4|?Xf++2^0@uXd&2)s>R#k@2P{RE25#2PkoR`9;Kd9q4<#e|_gC>>66xj{U( zxA8N3lZ;p)1`oR16^HnGSMF9HWbP8Mt0$3t8Du!wVN*-$4IkbUoGpap=}fJfl!{eB z$Eig})s;zm()}U*=%Lw){CSd|!E9FuVhrqHsO_U`4lhr*{Sx(|>w9VF0eatSPLntz z$j7f-yHWkv-1nIER_Vtu|9W)j+D}_rdXDyfpaXV#;f;^n`GU58+E2Vb__ytxPNxtn zDyhYsPO)(DN(Jwq&*(p{?wCkuyLIVf^Gap%)Abg`ou(fwTo-d`;;Y3 zM@I|w6Qk$EP%_1*X1Kp;NBBbfY1@Te((0i22xOsj7kByl> zTt*U=^HTiLa$loT6S2_JvHQO9X#YTwO1hD|Xj4?6GdD(OAZ&NnZRI7NYMLpDb8-eg zeCJ#TW>5EG?@T0?C&lv%&5robZl!a7Z^nS2HnQ=RxHPx+q-UbIr~9|?hGrR`e<{kF zo4D@9>OSS1LLD3YmD24rO-dX{n83$5OX%IkAllO28|Z4!zN|vv(M!HO zzf2fdI)OY@Cg}bAO6S7D_OGKuwhgJ88Tl!v{EFedDJ%w`4Y?xcyJ6qh@@wQLFrZS* zR^Tklv<+kStK&m{UcXeKg>xezQKo~!*-fw9frQ=5a>L2Zg_3HX(BR-Yn)pZXz@L3DAQ%_oeIT%%~J2sy%X;l7y-2j$43kZR6d%u{fcb)L=-A~v0vZt)*;P5 z05~0wxY8wl6RgT>^F6vlRSKBe!A8qT*UO>v>R!Q8MXqMZe1?PxT;?z`nW?Ib5p?E( zC)yWU9QJ{}^=PyV z^O-+#vrvx=fvwm6SZ4nF&V-33QJ5!I>y+8rM~ZREuk=rl#c)ty2|XZR+%K`S!&_Qh zhbFf67e7t62Zroc9;@f%!iC}CDv<#q|5vIdnmZhK)5&bK|ylGIP`svGd`QZ{EG#X^!MEdo473N zAPJ^)+d0z_fB6i9QkBt0Hr`M+L#4N3C8)Kk^GzMJFt8$hu{TUPTQe2CZbeWbBCkw9>-nA`_9$VYQ8&uhA^mQ15~AMF3<6LXeLTFz&_*1r}nz4526tm6hCB+HknfT@U7v1UPb zh&ey67eYMpoP%>oGl@tBZ%Y#eU2`fCkr1$9LSse?E#vVRuUPAgt}blp=4ak9`PK!* zV*n#U1-Vp+g!*62iWi?NOVO1=i@^&Mnv=wYbNZ(57voi8r6lv8WD@eDH^UK!+7J*= zty`$BPH$9_&{az`sUgkpH#zekzOi~63;jwy@IaJMuw2|bx(MjJs#HrW2mOZ1?)CkY zGi_}Za4&?2hlead2dU%_chL|=sZ-R@@6Px8iWk4gl58H!Ol@~ww+@Gg>9PlG2-5X$ z2@ceu?{8l_jYz4dpmS$E?R)-NF_up^(TZ}B2u@Ib&r#;*M>-?pxe8-TQQW5d@6SJw z#T9elYG6@79zMcjqL@{G5{b%Ad=`oNf+H?>#U3V7>GB_$U5$i}e%qhizTT}3OQWxk zT?hKze62dWNbvRa&5Na-<)#^x!<&-lmx9nzNn`cLx58#!HHMVNnl#y0wxW^PCA9Pd zd&%|n=TcK%Q)1kD#ngm~k3sqb3%buDA>Kg!AFYM#?0TG9Ef1*bN1|AP{r??xJ{scl z4}9pXYh_X0_6`B0R)zQfh~TEEztx{MbG zM~(?#2CV%IFp+D2HOG#07P6f1VRe3N-3wHHcx$*WC$IsXYpFkaC~RNL!+;~3}I8?MbJ({5-7U_3u$MGhDa6rptbPw&n;ROS|5DS`b&@s4e=YhZ;VZZ z4t>_En&&B$_>98!x(Lq$WUy%yGi@CM{bQzYp;xPgY~woA(t5i)^R+&*$@`r&WCvtZ zHj7E5=%RTTt=E?{=+xD}%d!aVGZQ}Fw%>cxYhPS+k+DqN-L?JVg>OJnWUa+udEEs$ z;y}vj@6ZheG7W-vr6$*iE@G%V740F&#S?L_6GBA31QG%E3N`dr!?ES5PDm`EA&%mS zC$vf{Bi%{mh>cQw#vSRE;z7=A?xP1TPe3sD+*c@H7F=yCLQGO+47gH@EUWv-s!DN^;HZl}hu zG)e%nFo(zWq{A;hXdTIPU8~Tz95>Ce=-)UcP$1a)@(XG|awWOuhiu2s(|(3QAW;M< z03^{m+ziTvN)bfIg3u@O%4e=iM?4kTyUOxyqL1K^wJemV?3x`4gg@xiPN+5L^5j9b zZqIT_VMUHq5J5uaj_mJrTcu4jAO8rtNgiymmNzW^?QiPVQ*v4LI*4;UJHo3b0^mJ2VyU8UQ2N-f5h8uO_LTge?yo!Fn z8AGSm1WV5GW{Dh18^D6nIhWh!7+Y(Gf%b@0tRo?Vib*H7P8PrMya8h5=T|Po3+ge~ zDKO`X5!z~Ti%`d> zV$iDmY)!nkopG?gVM_%Ru!mYgu>)>?#naQ)Le*DMFl1&FoIS;XqhG-M=hScE1sXU$ zG0-i^s!t#n-+EsIbVxxIO;{J{-y-GX`+~~?!-ge4$+8IxQZ$qg5updhe615G09S%# z)oO`*0y2>6;rXqEN6n=_yo+ks4w~Yy+D?0Jiej%74JLqf+Aa4grgf>gfk@YeZAq@&_wtRubI>&U?VjX;p2UuEb{U;6GcNM<5ypxN#rw1;_n3<<=3so;2BR>g zaH6bW?^HV70t%q|Cgz-V&{dfnkiV-j-y6d{d~{kre2YsJZ5rU)RhRla2ASvbqbm!c zCNGv5!ni}qTPeD-@xc~dj*~VkB$f!8pX()oGVO&+VsVTtZx;=r>TBEET(<8I7D;lUu#}NBE%VI=NUHi^HFu>M1p(y z4G=PXYDJz*^@*cVMcjiu(dBMuA1$A=ft>=F9$6pfz&g>!(#;=clH;M@B7V<&0Nr6m zc$&pJw6hg@R)=x>w*~w0*jnr7;$9J)9V;>yFHL)@&(=*v5AdYn%aLhXHxEFI-?{N2 zNFD2!7-}Elqd~OzD-T~+T<6#=o)Br30^=8DBS@RIvpFTALrg{WF|$3!q!6sc9Y zwKWatpAz<7T>-Gq{#t3Eno2`#$)lD4m@C<&5Q@Y1O^)F-PR6qtL2;FmoJ{mhJ{8XDTsb1l6c*;XI)US?52Md@JJX?z7A zi@(-Vy;0sjDN!yJ9Fmc3u+40F!@#wE|qg7P%I zXHu_+h#LnQIUuV7UURF65=E{?6$7v^Pbn~Cd6O-KqWC%Btw9t#1ACiOUOh2tSb-19 z9xQ;q8jZ%*n_ded5ODKEQ=_epV2#kNrED7+9V9FOeG*S-Ux+tcAuy5K{z(2AFaf>ddQ1qm@WEG> zIu0l6^#c|8K_Q9ZYm+T|(_CF^(ULKdSRFc{-}{*9D~Q?d$=-{P92OYGH~s!=-sf1+xX8%OUF-}NuUL@r%F~v*m|!J zgVzl(r2)*U?MR|S_OK3(!gztjB>!mF{2SP*`e9lmnkjsF%%uY%e7UZiva!D)^QF}- zu>HcJz`p;W1Ah}hK><&Fd4H-Zw+2;AGb0@nHPiVl=Gjb$gmPfQa>nR)sv_yFEecf9Fy}?939?J);ml|_t+iyJPlSm(D z>#`@+0^4DEoxRQ@bg%K`ZdG;R+QZo>>`3}U!IsdN!Z*O$L@u<>Mvs2G$bOsm5i<92 z`Oj~iZ5fFlxp3*$BMtdnKt6W~T-TsBp$gbFu#~utEEQ(dj1_#?s8(40=-+Z=mAhqR zenn;ox?C7TgF&l3ANkbjgR<>$mo=W%Els4k2y69TWw$m@zr&+KhjDzKq@usLEGs|V z;qm$^Zq|Zi#xv2Tv8?}~b8+@VFVo>i@(yrnS*YQ^VgKnkKpti6hFvPcVjZAH1Cm9g!!cn8aikuHc4#JAd`kVdp^{f&PGC0h`%{sc4!(q0{Y zru>>=4e%GcDu=da8dqmAHg;H7!MwUCX7yz;k;Ady8HGoJaQ7YPbfHmXOawxZaq;Q7 ze7qSYA?5ethAiJ-$VUy~S46YWTzk1oe-)rI0=R1?{?w%uRqQTTq0n>#2~_ zm&)0+b|xvgms#*}&2R7KY&OsRerCdPtX!#siMq%KQ4lfj4)WEasORXrE7$? zg3*`<3=m)^&T&N0UCE^t_Zk8095Xo}Wu?QI!1*2`@Vy+fxl`@sS&J zJjEbid(iY7PS%gda-RB(V!z~_=*7}_gLM1j{3{EHzfL_9Z?OSu!~e1gY_J)LSs#C; zh46YN87kzybK%2xmZSIhL>W}2?{0h+VeoNNJ9uh(eHk#gAOy&fhUu_LQE#Spd#HhN z-@<`NyNJV591B7V1M>kQh%LGyhDqU05VHzA`@;F@_gy zkl+6~>~$}$qb)2?vkAVLR10&w`>Pk=5Id<06Uv_tn`|pEfbs?*I13=duTi6xz&=76Uu~krOb#GXMbGz|3MVP*h5lRzKC@vrGNXzWs3m*(VVNv-syTef%P%+;D>%54Asf9-4sa+a5;iv4zA1RW z-`G*+d)n`S*&V%Cj2UZ_68o30y}o8quUDh!kB9|FrsP4Ry92|*Zh+-Raap8R*oiEF zMjN7m=#L4$KoGXRIzf0{#fOKOhO%I%p6GvH`DqZxlOk|jL(iRQ*>$N=*JC2=cX7tY z6r+VqOD+ zm&X&DZ(LF~rT5J4u@AMA2ex@4M)LH#w{qU2?>g4(Xbr(`TNMD_Kn(#JVztS|8kYrz zh%HyT=oW>3kxR%Qefi=p;F}CLla*^@C7;}ssaEX-PHWtZI{(jf7sYpZGm!Ze8*7kD zh6xoEI<^<~L{8PJqei}OP+A+ih3AUZ?}sBE%8LLDx8#zq$?&~9TNyd$^nW%IZILDxfPS!U=58MnJ*oN9{KS@)U%?ifaG^o@@4KK} ze|nsu7r^45YcZKluhLGyAYvvsce;I7sgtwh@ja}Idjxu~BK<4AbYFZ`F?J#bNCE8D1S70S>v#KGzIlG#TmVIL%O;@*?3WtPUR$e7UZ$!Fby9jR zU9P4Rwi4?~s*r}v@O|t2NEt@AgJSlJMOWg=lRh{CdGfgI_#hR!dtG4I_$u#^5&sLm$~0z-FZZ_`K8&z+B*c$PQA&Jy=B{pQi{?>9`3<+J8;?)kq%boYj)w~<2-2%l5I(`q=a6N{ zS0wBOZO-bV-Hr-Ez4qQA$(Oc`=f!cGI6TN^-)mRMjX$~T}oH_h`BSzdFu1`(i; zz;x`V#G?m4q9$)_>&#HA0xk~u{PWpUc$s$#ncjf3&s{_+z0{H%B> zoc}A^g_%B)5Y*(wN{B{t=c0&G`&wsD2m;iV#LhYU+?juT;Zg#KK6?Qdx;a@6ks4&) z{XtY_V+O0;+p5?`8-hZW;B0O+_pZ~e%2Yo-)RC>!-l`h69>fWas8ci(kLQWH2sVxi zk|-i2zeEGfO_WSg^=X{^GzXpP?8HW6Mq2r~DQ2eQk~3^X!qI!y$Y1 zu(jh|=_M6vemmN#^+Y0-!Z0N^Y`Du81<_fzB!X>ciU@VAs>*?cxsQQcS^cuz0??2S z$?i5I4GeUs!#inE8eFEt@_BdyY@&9v8F1oQpVbp>4P=U`>yv8+tH-s?uRF8J2ZNB8 zwzMc8diQE?-0${Z_K=Q@!(rt|H;hgTgKqcJ2<|b zz!BOVzmKAaYnh41s&)`m0CFR`OM=y|PrM`)DU@B8lz|<;_}q7RMD6AR;PTqUo&Djf zRfo}$MH)RAf}Rl4(d}tznf383aIi*)u(2TC%F^=|RUM3=s&P@w{d0Yzb6sUV-8;4< zVOk0r8a<&618LdU#$SdQbG}8if`c8lag-46{=y1L0#N#;#R>`IFm1W6d@FYb8sxS) zG~|yD%eS@y=5+mRuv+aZ`zm&197$}M-Q<4VGiAmc#N?mf+d2i9u=g@U?r7(#ssf+9 zy_;Qs)iE)(3`P6CipA;1Pyo#s3juDX-hZK#;YsiU6k;=76wrKGGeEsh)=#QY55j6I zK>u19N8w!?f3tx{Jo12Cz8T|P(9lEFHY6CLH|e1*kCoSP6-~M_;Vpm&F~Qap=|`M+ z-OP^v-Mb*tGj}`~%(dPB=NZ)B%2wGyypX&0E^{IFwtk35^C-EUL-I-7lGV#2U zKPM{}!tnOjElv-bdKUk|n`*zB6$?d*v4`38l9v1<#g2H@01pA82p~k&%-Aeh7CC5| z^a9Jwaa>CJE&Q3Y`^q;b=T$>_)pCAwH(zUs;g6eu^FDRSi5Nu-R|fG3%L!o?TGW=a+xm0?EoNmgwl+Vt#)M>j9w6!X zVujs!6hS?KBv$YP3+XHKN^BWQ;>xr;f{aJ&Glb=%&qvF+o|6Z9Y_QUMi3IIYs=o1b(V6^S>CT}PLj)O zIjz7LR8Q(NwW zHq6h;|0^d`4cj6}Ac>f!?jXvC0JNHIx_!SoZf}2%90MI8s~N-9B^LM|w(#MO%=$wd z;M84h#^0Z_<4!Q!ZusR8+{>8JPfC;1CV9(3^A+GZo{6n(7UJ7s=RA{mw}RS~pKzg8 zup1vGs{x~6+#!pc)Qdy~`YKA*HJF0*641>dVy{jEH0O@7@F&u4EN{c{_5u&j0_N!Z z&6PLxHHM5NB%Ai{J4}j#sA9lMcz|SUDM9w3oH4NFE`y%F%^%wuzr1KDF2OBpXmQ;R zaL`Baml$_$r~X)cQx9q=mtuOJ%_d^qwCA||379;mx)xo>;{E02;=RSP$)8J42E{>G zKAY%np>m(e)`bAHL;z!2PX8z*2v`>y-@27Oz6$eBweS|-ZebW zwNi(lk6nP=G%y}70rl>jgIV@K{)0(Z9j@H0rW@4ExEum@Y7Yqba=(whP}2Y*3#J|3 zzUE8QF%gVBd)od38^E94H~gA-3UqO6E$gFQWHB~Jqf!_|5H(0p>EvYrJf6b%sNZgU z?*&2T28mI^sS8N8Nicgc$8HH!-5s(x*SK9Jz530<8*gXMTckju4Bb}4Cx0K1(r>HA zk76xoXYMb>w7qokp_OP1o(rnHIx1QMYl#N@hpl8M7*h80cVYxgwg)6LIDhyQ{==Qe zZe3F=ov-&Oy0yJGt|3UL*wa^6^7mu9cJH#bvb}|uPuT%yAm7G{p78*Xn^#?pfOGmN zJw(;*U`MbNCBks}^+%H8Z=RTP1L=>kdPUKXTHfM8-)A|m_S=K#{zs;^Spe5v^7m^? z`Z6MLYJuy!=!zZWs6&nnVz9(UVH>VTgglZ zf~Ag}XT9Fe!za#bkL8U8zTh(lC2PM2rglGg7e8S58<@y~8u?+K`t&CxVE3ssCT#8% z1Ew!xM~Xq8WfQlUJ!6gOQHWXFR5a79?W9_*APu^)aOtkC~8LIDHj~_ee2T4HeSj&gQciJ_eljx|i>=Dq$L?uj>_EGB;dd+Btg=H~(&H`! zx(kf|a@kF14qYornYKy(snVQR2e@yggGZNIQME5(en|hzL^$K)y*vws`@P z(`6bz5V7aTgcs>`fYJ43IF2`2fHSNM0acxcG7(9x$+pP_6B_YPhkno+t-sngRT>#L z1eo37*vCQ>J(f#rd0;n6xGsF}Tz{s@PvgQK4+N&t^hjt7ho&eW`gF|O*%MiO6p$&@ zr+)OfUvnJM*w6yt-pry(m7r|!;BfLZNb`t!4+2Rjwup3^5&D#m$j|&eUP)3nB{KTb zC!oB83*A63t6`1F1AGc0Wv=SUQ;JQ0f((*K(Z=Y_pGL03!eoGXFShsbSv_x=14QjD zi=@KcIs1>#KPTZ0%XCbqctlm$*}g#7)bgZBbVf#7(g37+g}{v93I(Cr_j9QMcS`JR zyl{ZqYi?`P6py$^NOU4IP2B?`RufxC9xd$kpq8ffdu*7lqD~6OCxIC3aAD#4h$;~3 z0t0H{=@Dz!=>$BVDh=7roDf|XRSIt<;Nb6}hzy`HOh)l7X{=$X1N>GX7|=)g`BP&Q zD31ckgp^OPKW@B0`4a%=&QTuZ{{}p1YuxRZ)ppT1AczQpXH{<$I7o+3Keq)ja18T5X7Q^8$n zkb(44N=f@?6yS;Ac3uRppv4Pc33@2fpl$GlSR)ECgmL2*bAOo!LGIWZM9Ghd?A^3{ z;I3r^f!3@auH);Zu5&FRVoExAz{*lzQ#O0PBM47Ma=4?={}Ks=0bm;&xLIe0k%@^4 zGntkfI20k@pzjE;Rt$nhnBdlGYS6!^AWQyNk@skb!?o=J$o$AuObH@>r<{Tw%ac@X z*^YsC%jOxxPh7(S?5eE%XYgz+{l?vQQCa59Xz7t6{!#|Lw7|t!$A>D6%sasj5We%m z&J{D)E#C2Q>?tvzSEhJy-(%7kC4ledgA+qRu9CwP7PJ-v-KVJXtBg>9q8^Ri!B_;n zfwKa||1g2*MpygOge z5R(mdc6J!tm_!xDdM>)2*BN`PqUb{yP$pbfUJDQbBsRQ;r2J2lpJz}B0YPF1v}>6+ z;2u*31#G;a+(?ePeoZ+Rlx?Cjngutplc(E(A@YPEO$AiKWvLSi zKy^aqzuwWqXRmJy(N{1(o_*u)W{Xa%^5H-J8-FYZ1hOeBGOapw zYrA2xa{iDsYNF(vQPaQ{fP@enHZ{~_P!pP+D+UdOpJdT^7eYnP3WcC1`w9&GK^j{a zk{x_UxaO~|tKl$Lt&Vm0se}QkV?uII4Z{9T{~N$t{&5rvxc7G%K%igCE1D4EwT2P} z(A88f8N?@lDh0L#GoReL4X!{%(Sf{x+#C)G1uQ706yNR#IYL^fA9R5+8f|E^&?5Vr zhVq{<_F_VD46r(|?`ij`A?t?N976Ei|2K{5??!mTSMc8&{y#qr!g7p{vI(nN^uoF^ z>YC71IT%jGw35)t4M652$RCt{``-=z8S(H0pc<}yX6KHs&v&U41J^AR(aERC4aiYf zqLzWf{eM@c#%2(LNP__ZN;3SwazqNIa^s{K41f{M017!skQ1F>8NM~)0#$8egTY3l z#6J0hwHESJkcP>~8l-?FD}dk{<&AQqO>OMO{UYyMP#_8#nBVF2loj?7-De_)rb6CxxaQyFa}8wQ;-L6&FDG%Dc<`m0~8fqZ|Y zG!*cc1mAVc!-Gm=U7El>lbw`cjr!esWlZ{dTpZsvgIR`(h?%o2|0dA69L(;W9x&&J zpnm7yfPtXQ1WQBI8&Y&{^qj^Qi68`%P~St2Z?DeUz9ko-sg_61){jr`u3`LL7puQR z{l^==__9K^NF?uf_2pnxd!$l>6-KX4aLJ?o_ez?0m@Eg!VwMSNL$9Jzi}G|sN}~+E zp|&MHgtQ?Dg(V+;fhZqSWZGd{&S)_`zd=wC6CU6N zC$WKm1NfYiHDyq=n+`uu-~}3DnI3~up|9D>)KO+x&Nb*d!j6Q*hVve@B}xKvn>3~9 z(I5r+BsCD%15r_`miY4|2a+U*&&3$Ek%BJ)F<$eA(pY>N37IF3HOgNxNeOmzJaYDo zSYr&;IJiZ3b`|;@no=djb;3o*2Fb=QEB93v#DH|i87943A-yvOe!KpGjKOQLKTqG!7^B+Ns z2!C?|+OGQtQU3IsH!#^-piQLY65isv)t#47ku@($VR73*;v`+nsh%8?6pMT7$^-}n?U9+Q9V={RIZGayefj_9=n8?Lnhcv1X8SkXvDa&K z7LU&6UdVe1<|Bw^|GV2%!Eh{~*cQLC3HiT&l z8#f}#F#lUbEw8(Ngg*91Lx8=aRf2n@8*ui;VNdzu1Dyx|cgeADU<~kZIuo%VBovGO z%cDZATO;Qvw?eB|>{+r_8XbLP(f?Lk`3XHTW)l&vceyPG+Q^|%!y&{do})NtFmGD4Mq{`+|_cfk7M!=yTnXN9N=?$a-8ZEr%AVN6P=_qgfn-(}@ z4)rJOmgNc&buj+9==jOXO|J^~$ z?+rOg!9m>V7YUf-9r_-z3bB>^(!9;f1W~pdvbP##=rPCYi%+*>Xx++b3*%vZsW@q- z0y_Q*|J`!T-`AE#;SR22pUaDdb%F)yv9{~saAEXczlS*a=I8QqSinxxNrC^sfcI}B zK=PL}+SbI41(Sb!SMcKij`-$~=0bj6y8@Cb6_1}zI1;XDuB-Xd0l{K(Rq7+B9nara z!Y;BS8Z3)k(gy#s8s%xPF%`Nyg7ep2v}|)!t=R1z|M-p}rJ*9e0?kj2v}_N&!Ty>t zb$EGovOVwA__bo`3`2@32B6Sl@3&VWs-7)@clx|}cmGHS?6UZs&%-49vcgvc)NN4j zgSSpkNcya1TejhCGXGw7qFxe?iEuBpU|$7>-n`qlMt_zclqCA}w4u<8CRO!yqQlD@ z)eDTKVr&qSc$Udy`1^S#p0$7nS_%2iyU@59sscfj=?~=Z>9k-o zu2e1g#A;7Fo5c(KH(~qKC(bQvjq_#O2W>Hn3rgq-uDcBqnv+!lyB!Xs7`2^~GF29X;No#8Y zNZ$EMC6KHK1eVmiL09Fb@NZoJlAJj|wy>eoIFoAi^@P_kcU}VVv1!yrvaiIkk?&*L zt_k|JbeB)8f1^q6$-kE@sF!mg&EY@%sX99ns8+dpQw=0GIRF2=^giPT1F)j`c_)o00Y1L&%sa>DGEe*7 z?~f2~p)>~pdOZ3+E89;>j{^pNc^tDQHRZxFR`ik98fi%mY9cfZx%2IlLvM~+uNs93 z@7~n1Ud|n*aP`XY zK2d_wlv8PNNc%^zw$t%79^yMfh!MxpJm%+L(;_1|V)#+d3UeR5k5-XU`P)av3^Fhy z>#-^ev~%q%f_>c3rI#$X{7y}1Uhw;IFhUy!zSHveJ2#%_Aig~5Pcyx-WOTjue3B_R z$4LpECfYZ(?KRu(GPo0ObuZISXLuy9YXysqlsXN=$LYk(MiC4$gW2NU{X zh1;_5rI3(dlL*~4mpAg2TGAv})Di`I{{e<7$Lp%9hl)qyx7R-<88N!BGx&mv-);Qm z@~i<=da(S((w7wH6DPB*=xrtVlo8c^_m*(cM$8BBgK~Hud1RW5Vdu*dLAPcZNeCek zv=Ta6*0CM^MzGyC@U+&w&b0Rx)0w9G-7(J&r4E}Hb|AHCpYc(MZBe-G6YQ4wh`rxRLGRPb7Txu!Eh7K03v(jh zjat9|PSZg|Xa5{T-v(LfM494)xgqYX=|d~+B|J!@{AlUO33f^*sT+ayYp+;5NWIrc z;zcnB>HZM~yDCE=>+QRe$O0yx{hbPC(m>q%_SRuh>7$(UQ;f0-Ns)YJHu6VJA>{|b ztM8*b+ShuonosXwgb-`)PHOIATWk&!Ga3;DoIPT#$eO%dLpl;K{@ySvIlmY@-_W^O z#StswCnnL#As8%!v9Qoq_mSUpuB-PaivWd{7Y7(bS64yH5c9N^=>jD(SbF;&^F6gD zt|xS)(#~_8*%9&nm&o~7`*b{CN%(7Pt_emci^7>MM&qc&ZW2PIB@U1FUwoYPk+|3N zay|X7@a%ap-Yy#s^b&mFx}eSYo)$UT>J>~ZR=D!m&nla?i zt8!8aRIpuf9qiFis4>imDKs+R-E@3w;uSKx+Z7Jv>(wnO@rj6WQeo)2wm&BBLA-9^ z**2GH=F8lxaWVGRV4LPf(G5bES;yB@=Ta(diM_e%qgy*t7Msp+DSN zawLkVe0*M0IS~Dl@zJO}wE8TJVRL+n*^$W1eSm?`mH6EmGLC*am_Q6v_CpFKpgpzv z4BBo*nmm|r%#jdiCuy^N#&3aUw~kMfs(=Bap5`^7o1|ZR2}2PKAD6}4)-fJ-na&=~ zsp4pgn9o$75?kGW6B&(uL-Cn#gM$y(y)3u_Jm~B;UPTrauUDD_tT3JgBv#sTm&FgB zWtlFgH}3`eKLNGO zWd;GZB_#TT!@)a_?29vEZ}{8pUtgWS0IAYsgJuuN;bRgy^mhyHlOtyML>H@rtC#kJ z>zAf?r}t7TjVwf9ER`ET^J4t9OLB_iU59TqghTh{cObo8HA!5M0+8n7E31IXo5gso zoAQ1#aGvev(x(vCCvr^zg+UT`Y|XV^(@(ze&sA3_Q&AD`T}0jny$ny++5!}>m0+Nj zD5om_@?aX5J^7~dWuV0ykjxQP8QNhR3^8?xyUx8(U3Te9@Z&$KnmN3sa=O}!xtfLn z$YzeOB%0=Anu)L4k4WZc8JY48VwE57X+V4XHx*}3lCQd}k7bFyd6l}VZtJaKr&Jm< zFg_*bnSMwGG+@+!mgg|lI{+tQ>?E88W~L!1eBS&Aw<06Q53L26(o{XRqduz=@Wb&Y zFZvMVC`sKV;=a;xzmZ;DQe^2R`aOkbP7>)pp*%jbh%h<2 zlcJ;FpC(pv-KioN=M<%HYf#S`sVkA$Ha{N~PK+3nX4O_a366RI+@8OPMC+$|;@*Bt zT-x2c^?kuk1<^9COT9}%Xth3PHHNs^k2fbpx^#JrqpnJ)lPZVh3m|SqP_S_oJbt$= zN{Q1zja3{b+jMdvA@;`gT6>2PEeqIchhvOtTQlN(=qvrLr3nR0odlI<-FK$_hxMJZ z#~St1Woj0l0pmiC4?u|>l6(#4fxq?4T-2Igjb|yU9G^Y9%^^1Xjhbuudd{MgY2o*>yqx{Lgi;tV zy}+r*N>xvF9rP@$8<*L;X-1!~G21>S2i1n4kn#}|YF%jg#dw6-cxES;VC~0MLKW#( zAM^+`f_`NV2gsgX`010D&-!f6DxMNHk^vCk~!KGA_TMBF@ zVV$Y*+#1{2tf_H<3H^Ao*ISn_>rM8b(?Yg_-qAT`2K-LVUH_3=38edrRNPYm;-Lx0 zSXC=N{67}gx-ajA>hQJuyDu-lY_m~6NCCT8z(!|Ro5r8do<`Xio){T9fj+)>Eo+oY5lNxAmO zeRs~|$uO5wRqTj?aLrxi_Bl_Hd)zW4UGIfe398aaJBoxpK0Ljs*?GYQzP3UK)h8R;6loop_6V_W}=ANGZqkRc)>N`@l=h=^#&2mrovwF%$( zyF0GIf3#l3Yg+=iabe=I@D1&W8(ro0Qh{5Ajf6)!d%ihtcdzr%tFWqs=luq2tAe!5wOKZvCB z?{^;$4Osn#e5-)vb7U-;CgatbpJ#Fs3`9HC4kwOxS<~i0qbiL2Q8`w zJh3+Q1R$h_r*CXM62dE+1NieQFaG{s&yTm7fVt!2k5!--wFuhX$?sd7D}0J-MGjqGhl<6Bi;Kk(~Uym3VcJGJyMjJ?xUv*LUAA6paOFui<6`bGhCx^{lz zZGNyfh?-N08=sGU;Da;4;k#tI+^>}d1R!a>hDSf}p)qEZMo0x7UggE1UM%Cdz9xvT zJ?z3ypY`Cc@AM)MuznPuw9^EZUThP*46f7(htQ@Hv6|2#31zOg<#}{kSc4rH1w^-co=Vdc(cV_m*JimGV&wq?1 zPnV{JJzWZ0%ao>ta=+T{yi!xsMrZ!rWJ+53W>}lH#dWaFRydmeOsn6HXw$aLK%~Hu z7EbN^0K177IG^L;c3bspkL$&cqH1W2YyFo|)vrCS51Z2Wu=Yad`)1pn3-{V8OGW<7 z<5q46>+|mu8G%m6u)sC%MzrJS%u2krD)aLOvJ6~2*kW8u?$(qZd?T#gttoY!&7n41uWgBH{a2YC8{>z&x?Rcv06>3WCIA3nK$tz=(#OrF zb~(J&Ui)!W7hhm0@ULCorjN1o`sY!{H@2I$#T^UavcZP#nCsQb4V^2Oz(_f0uo>z@(9`tb+;r>aO`-{rKDQj5qC(n6z$lRO>dMyj}k8 z`Cbhf)c2(dm({qn@tc@p4woVvmELR2y#93|OI5>M+PWvRNbhV8wK=KtBN~NT>Kkcl`I;zS{O>qcNg1&Tnf7k0NNuTaW=+hnloZPGR0lEJU zB?`S#{ncJQ@>XVR$*$Iv9(XM*mznM~aJoK{6!1#-UYVP13jqM=3F2n}z<^*#x^tda zrbB&tG;HgoPdXQarA3CWt970w;H1betyt(+b)|EW9#2Ype@|r{IM`uMP4}(-vvW8Q zu&aPqdiW-PujUPO3}JNI<)Al;14h>F4JT!T4c#%%D_@qOf+>eE=dz0|T{}7ZJ&wer zBki`1hO-9%m8GVZFYq~juO#Wx@x8kq6Gpgn&wS_atCS8kx(fgvy3wDLFa`jCo+1PQ z01OzzhnTu@qI1#r$#;p2Ht$6f=5Mcz@9I@3NwH1q}+wOfHsier%c_pUCS6ZsAvQ&5qSxl+==z({(#uUli?M>HC?!SS60002FLI?l= zz$t3lm(<3uGZTQ4B11EBG<6>U0H8nd!~y_-gWr$X`hI^>MSeq(n62iPgtqKRIBrF^ zsLmsI|85EZ0058^ga7~lIA}SL(z6YblO8v&(xutG+-n{H0N`|EIRF3vy;K}DRDX@P z-?rZa0077}UR?kH&05~Y}8LB8TbhzDCG$y5NDd;nF z{!mlXLvGEiwwMC|007Q7hHcvpBaQ$70002M{|z$%000000LT-B000000086(LI3~& z0002;1R($b00000d4doC00000fILA60000006?A~1ONa4001CQ5CQ-I0001xCkO!m f00000$P<47oIwm;eFY(e00000NkvXXu0mjf{6bTb diff --git a/doc/source/data/images/dataset-loading-1.png b/doc/source/data/images/dataset-loading-1.png deleted file mode 100644 index 9cfcd8c6bd2d0a8f3ec02d35af3e285be6cae4eb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 62699 zcmaI61yEf<(Yg*Vr>E!Xp6+KlSYB2P5e^p)002Y@abZOOfUE!jh+x<^;3tPsF|FW##D?OE zG63LC0RX=K0B{d}62hOAU6+nmT|DvoR{4LATeY<5%y`sn zQ`~?mtIVMdVF@6q$vy;-@wf@Pdk@75x}ODLii^MT*FoslgFI~;lBu8dXw#`5J&WX4 z!5T_1lDKZwecl4u*w{GJAMml?q~i!uzwKc{c>QR_G){tm|9lCiIi>X(Uey`vm%RG%u@=_4PcA;q#a{p6{6aa&-*eNKV}i-&uSI;TOLGJ{R?KnouJ{ zk80k<46AXwT1qjFL~@6SheGa)#+@J`8{`>~8`rRP+o+43GDUi@ ztYRGjzy7cv*j{(BdK1~P_NxR!K8?%Cp#RS#2>sO-m@1_7U;D2GlyW=fm=J(~q1blM zS-GGa6}8n@TYXtjM-nH$RCoL5RZqE5>Zu~?=( zYKuox%tv}CschE=ZypMI*xgI@d63G<4scvu!%@b%n%q;R0SkWRAoiJ>O|di?EvCSg zu1JVq5B~_y1UnGtGv-h+UJ6|bWxm|7Ggftq9(e4fr~vH7a?j`q+bB-NibN5MQP;)T zWFli`r6A_Wo{!obJ9vRCf*L~ML8;e{{+c46x8EIk>4Bxm@=yxrL^nk8WLil*+~!nl zXWG2Q{R^m`@vQ#I1h3ij0kM`3U`%1v$kW+oIg5D|2JGFc7o!2TN>sCd7- zVY;`=z&YtXh;@R0vipkIM+C`8b|Rih`y|C?VxPOV%l1x(M^1vTd9O>~$i}-@TJ~va zF)^&B_tKxdm;@n#O-cTPe4?_!y`-2ncFM%_@WLid35S_|Dclr0unC(=;}0XAPh*Qo z?qF!&K@UF!GT0v+FpI870z`((D3&L8pKOS{H_|eO?_b>`P?Dv#!}IrV@MDV4I`Xw6 zA6mp(M8IoF%=wNAx}II{xIH3aM0!@ILu5hdL3sDp%l@dsW!5TA#BI{Qq%6j(R_{dki}m zSG!n2^f>OM1Y(rV@J!$x7-ggZE2$>*`G(yp)EosffRVaA%4Yps8}ig7+M1oN+1UOW zjO?>4|D3O~%3^@*p=hg%@ADKlN@-5kNsy8M66gu*>iAg|Un&NX9+ zGtK3`PGPtCfoR3s&2j@75RyQPcJLhS8B!s zW(@&2#sTVR73n^P9pP-F7%#4uD1L{4M%$?MT7r@4p;<$mK7)euqI!&id!zSR=)?1K zxTTNGpn1ObKebkl%T}u^KqYj^6=|-d1N~1-1%ooQBQlJI7Ca~&NAOP9vORHA#rd&p zGV$&5Fo3UX8Z~FpotydF)rUBh<5@%kVEjBUpyr;SBfqJdWrDDSm)a#o0yoQQHOpP( z>HKi_r+n}GPIHGnQ}i3}rWy_{H-T9U0CQ0zZPgTi>T9eA6&t`&R{j|Fu8qjz(5%pk zW9`rynhHpyr=~0;=5MRy{_dZfz2zjP!Q}x*7NJa3n_|u2p?DT}(Cl2hE3a*-A|0F} z?C?cWR^l|y+xT9A+`W|IoPN~*SOR~;nQ(Cvv7WL;u>fPNN^?Q%3$$Ct=F)Yn)7sCI zSv<^LUAIG|zpju5>33K`@QCFbY(A}qZZhV7H2XpZt9WB=Q+e6sl4*5l#&Jdc7DuFk z@uhIl2*UQqp`OdT&&Axlv+0u_OaUC_tR%=g(^wi%viZS{p#4;)?&C&>>&6T$T=4j z&p}dIT@>8ayncroh76xa39e>kFwU2G1`1kOwi=v-UdEdTf1YoO5pH1ilj_q}SqHU+qwDw+SpSSqa086P z(>DC5XXb?RD$_$gZBJ?)R0zcQi0!|hryu0fyk#RBTw3QIj0)1H%Eq*+Kdv;PP3Nz3 zpTQT1>i;`B94>V3ECGsq8PZbruMmeCmTT)b&NtdV6Ic;%StrxMp9U9p zo5f=ccnV#Un2~4sJr9rh6Yh320n;qQ@v3zAd@Ykt>ZX|E)@CI(2UcNc{G2|`5^>c^ zmw;LG^%TrYu&z*aVtI;(m%??gv+fZ$x30})oucIW%KZfxyk8dQIv~CxWo*IKP*|ia z?OHR8(6lr0Lys(o2}n*!IjjT}c*XraSGvH-z`}k-Jh?+Ntw-&0ci1~8dKK`;)SZ>F z>{rTNN%L8L<82TK# zQ2U^f!i77<8;rEuGaKBP937XwDakFh9HC6`JD~M^IW(W4#eMDP?a8~v*ylgpq? z0NbbvHs5Oc9Pp7xoni<9>1sf%MrNWMd&TaZJ;kf;<9TkHl24;yb#|VfD5vRdfd|rkjt*|h0}LenNlO!g z4?cgU<%*CVSF=47`ge@Y!BDP2MI9w~as?*W{DRF)PUFpWJuQuKYlBpGmec`%&UmTw z!cVSFo-?|PLh>~LfQ51-v_KyaD@N*_whe7)8eK+R=kV%YsbSB(Q66xhpg(ROb$A?= zI>U5j?n`%HvI!qgT{Spsf68>p7?d2M|Cy4Ms1p|(<8jrh;>Ry5BIKs3)HL-id3BlK zi>Z$L#!#~%KywtCo)X9#vF(ZujXb`IFs+%}VWM~_9el?B``^xe&dwrXgiDJ{RAvRA znEfHgGmkl?n|fqw3O=PQ6O(n!H$qZhT23;i`9#>miu=)KM=6gqi}99@3?xf$a-o8` zei^A+e31lsSt9xh#XlsDwBJrtbced_hIq3tyWNnQ?fp8#5N7 zAFj;TC9QdX?`3t|*OUzXO?Bgb3c4xR5w=OQaly6@^Q8tb!=$tNxPH4|p0fLMK=-Am zZGO$if@4{!-TLXYZo{ndMpPmSou0YA^ro;ex$kmwsJdqzlQDrqD?Z6AhsiQ$C$4zW zyV;?>6LG+UH}8oZ`_D|f^R z<|xPVbdHDp5EbuwSZ|A!N^zbG*+smz2)=llr%&J>MMen% zzj%f5w}GXp0r_tn5eFEnm>IU3)Z)HPzso@TZ|8l&%?E*NHd1M;o437)nH3g1c9jO} zL-)DQE@dQUWF>Up;&vVVVdhV99ng{$06<%Ls~K1#+}!VR;SH#7C^H; z*w;NOfalS)7tLRm>T8x4%+c}#rfB3gkex)7gKjQXqzm^4G)@@sWft| zaYSV_p#^&({aY#JPJ64<4J#QlyHas(D~-2EAHQ#g2wN_q{~8fEu3lyeL!%FJ6xwU? z)>ET|0q1C0wLuzz-pIR~yLM!{Y&Z9(Ejx8VTk4p;pnZa`y~>TrL!7G$*aAYDtI0?U zyyv#yL~tNm=6pHt?eusC#n#0~ceoW7qU9;CRNVf0AtL;8o=W|3O~$qt1eZB7;*j91 zWB9w~g7DDHr6TS#?)J-on+)$iv+K{B>+6UEfOCbd z<7!I*CnN8qy+~;M{8ejI4s~SfMXKq#m;F;Y8ztC{mjFSvVtsHOSG^pAK1&jRBG$rQ zorwod>OUXA*07EsSgmajUrc)#>@S^Eh{nSL5w>p<_g8pzTv++eD?!+)?@Rf0Tz+`? zh@8}LhQ}sLh=My-xMXTM$zt0Gl%R$H@o%ph> zh0~FKEFIn3O9@dlPvhk=?2=x{jjQ|B=H8!kb#=DF+Pl5=l*%^=)x+!ZQicF=zyisd zt1bR;a&UKzTg_PW%!yalQu6taAzRR9gA37W^TYn! zt#t-g+NVUu{gbxKM~Tp3+0$UgdY9JI!HVZg$e>w$`P>%wB;;yOS-EvLml^2FJgc6B zYQ(oVXcT)gHK`}Vd!?=(1C~JdPS364$^m=x$X=HfeBNlnXs)NeBA{KB>xUMs;_3qb_8#_5S+MV2dUBXzuIGt6z zwTt%E`6<%US}fGSXcAMyjats2XLS2Ry7yiqk)|bU=_LwT;>oQ zK~3!l{v1Pk7otAjYhzllo#~v}gof3ZC>^?f71RopnjAf`Kb@@^a?{{NP^ZwqDe9XH zmBuv1P*rF-N_i5cE>51wgTxeLSjgM=p+6Wc`K#~EEP>ZYVI;@pYA>g<6?~g!DVy5D zRO1TXwly|Xq>`3-i5+JfO8=GSE=p zvW!`)r$ML8BJ*;NFAj};%Rek}ff}n(nc++grfHM<8&E2l_|5w5uLvQ$BMnPnfiB|R zB3cVkn`eK#61*uGaV_m-UsNyvkcXn(*miJ}l7|mr<M}?XTT9<$ zipzT~?e9IWI>DuSTJfarLk~N+nm8V&Oz&~mKwzQcdSmV5zOi@Cesnz3D!1}GU=oDV z6XpS9>7pSM@qXN6!ra;K>Tls($h&~hX<Cd4VRGI%jlhDyl0{BF zPye%}`GMgtRRH#+d^b z_0aV-*H88G(`F6eVl)s3dS?`7IQ1upO1sVe(BiN2fCpjkD*jyyxadc4I52qC^_9D8 za{Nm?y;6Ew&1$ci#Oreg?SE;U-e|UI#>z$N#jaoODl+($$?g9P7aWPHIEt;aOkcQR|ts6Dq}HYqk1P| zEMuCE1y+maIDtwGA(iv6(w`uKN-hlk?oglHu{DowC?&y2=B)r#Uar??@HG9+>2+rf zU_#(tg-a+QB_VEUZo9euhn_O-Ni29>O~|m>)DZ%K1uQH@HN<6o28yEh7{jUW6WHTd z<(u7iW@h!gUP;Vc2K?BF854q{A@d8aX7-RbiUi_SLgSvJ( zbNSV>2y1voU}hTn7!$&d${waAY4fu+$fEv%pT_=6j*5o!TP>>VIA+m^v2y^JXp$f0 z(<0uPvo22B=w9;@f*CC$IDuJnA($di?f~vkFvU!r+u;%q-g}vh1Xb>e+aII&1P7v( zuiPLDoI!Er`NXzgd=nSBzn;aXbWMNiMPxNMuw@gmqs4UKMy0x@hLk4BN=P5!bTg4|lMS32tE95nX$yW3lb2duNqe{w$}fZZxaz;xHp zgwOi8m)m$jwaq~Awli2T?d(%w;xn2gZqBM0rdi~})P5;;o;&&!ww18i*(Wn&LuG{d!_>$cZ z2JnZOhNjHBU)5t-oWtQkAU%awx=S9mi*5S%9Y!w=Uc5}vxOXKJD&8SfoXLiBD#vJ z@p5Y+jr2HJfyE0PurY+xq>8Lh~6BmL)!rxL9#jf$wgPL}rM6Z}y*(*<6+3bxOLr^RE7%*&>B{5x2dG_5SXjFU+c`O&XTRHcB#hA~yP=P?mS!F#hzF#kW& z7qIuMR?mmc{EE{AbIx|9DLzM~oTeZvmoJHSvz={EU2FdQ_w>sMGo{X;aE&J z;d4iQ>It;IkcGDm-Qu>vilH^EI&W5GFb62J`Y8g)P%(*V3!`|V59;Gf)}(}ZwAH52 zpXjj`2{q>R`PAZBN(wEgg8oJ>lcc+z1iyGU{GjE9LC)QgpBCqv5fDFzWJpZ*rsHe$ zn8TeNAONnu^|@#PRa^$?zCG16yS)BX^;B!!Bn}c z=s;(-hNGB15KnJ*j`&g{tYXC%$X`yb;8ZJ@gtIJJv%!=*YGY&O@_vWoUqmgimF7}Q z7lJL%7z3BaUDZjys-{kTJlEOxFp1Vy*eNK;7rk@jSg0zmZtvJjU(|1S6=Sh%h_KO2 zN5jWJF`=cv+;veB*F0%p@6hbJ!MR0G`}cf5(LNMt;Lw}g3CP-Pt zqw<9>M6_6QC`0nNxHFq;Y9ScqiO*n9La;?AtI;w)&KBXJB`dprQkxOk}fxO@tNH?le(C1IR#{x;w^QQ}KVH8$W?lLMTTGeGz z8$;d+SoJFCHSa>KIY@g0WTC(B5ckvvn51r>37+7Yy#JtbeS`8H4zXF69AAT!qIOQp zI-i%Uo?OQIRZtP-`f1HMZG;-X9-qOsGIdg$Qsy&L^@mKH8w!)jlK}}CDf{AS{=tGI za!WglqJb3Lj{orZGBOTuzA*@uJD-xWF9Ro**hFQ1ZJeh3PEq8kC)A8YQ#pG<*TsHW z6TVgkfDcK_7>cBD5p37NElvc%R0w*q5CoOj8?IJ7B1%7n@Zf;XRW^>0r7KZQ~ zCnTrzlN~pE00lRoXTs;@W}4%3|JXm(<^y#`#JQzTxmaZ}7x2KS*I&U(2}QUTReI3c zd9-H-_4m9D49bDa{!RyHX!pcql7`ALGT0ODP^5jj*yu^4&4&?3lJf^jUeY6Qg__1p zmF@iTT`()Yie2ef>Ct@Pkb44C{1~6{BVWsEJa5~~>ccQe{;r-b1~QS9Doddd;zg+` z#)L|ZeteKO)9&Vgovr5NVGyI0Y&FdI7VJOwj*Q3(e^Z21S>c@Q5N`z|hOBU3YRyca zxYmYEasEf{0~AT*mQ0~F?*?Fk&HIq%kTZIC6R$Gi-w0T9uW^1Sy%LslT)e)6 z6`f!atDrM*XINB$=nW-zgyp$br6kNq6M;GQDFczqto2%Fy;Od2R=cARtPaH|pZA$V`05!|u2cB`JzdBwHDcH>tYqJT z!zn~rTJwE-pDv9=s`K@#K?=}GQkA2>3r@l(9x8qqs6O?y)9}>Py(A)v9i2Zkh^t$Z zz)>#luL!O*A+ubfa!0kFom_^Z^2ah`h~bGFQfjnNSYu0D_tc~D@=t|`%6`gz&=rx& z3|YN4#?MaC6v!QeR%}+xYVnbMgAvscP%Ui z1W?tpp;*XpK&2_GXaYCNdILi`t5uz$+`;;Q@AS79ZHW#i_|x_<2CDqsayf%1Zz%Zb zjVf}&CEf5RZDUlwWNhyIYe%y8W-jD2mpI)}z#?I3+N{IevK-lNuwhacvKyQL%b2Bt z@ZUQu7A*(qt7+hlCNQS@G{p)hx_sh32p^hM6uZsO)U1v{YKj`*_W~II+p#Smjt01e#>^`* z>nB?4)_nMV>&|n{a1ik(js*Y2r}mw>S3TKsbmm6^yp+~Lkj(a^5zIx5TqaxoUN(25 zYJHZ+;HsN=^;V#c1HxHZ?_?lK`vgMIzcM3CCN#ucLUeo;%Y``q8$gsI)lv{v2qHg1 zis`xXw^fs0iTzTt{BTn|Mtc}*3^UEV!dr#gJeSE$A3ad|nBix`&G!Ro6}%yZsW^9R$_QnvVv^tnnQ&K_+DcVL21)KuJfbHQyb# z>N__4K7L@N`z)39pw7y-Xs6GGu=H}AHkV<@MOVOy4sogxwpZ_rB2fIUrjn$8XBs3`ss|gEc%o|#X z@+L{4c>jI9*)%Qf=G;@-yIqRV(_5tr?lJ(EW*m#gzw!T+E4Wgi(B@AE^yRM71>zX~ zD@T(;(LUTh!qwhW11&fK)UUa&tlyKyYc)9{Ld%%EOCIS=ift11jy8LX-dJL;mdujo z-@!ETv#3M~AICa7e-Z=>El2tg$PSzc@D%y4E`gH2h7kG9 z2~WOgA*|h+EQxmrtGy$zp_Z!7Cs$Xp>t`($>r54UumGc3{;t1*;9t2{f2EDL2v%IMRV(jCAIHRmVhr zEGlpZU3j^2Q>E1jQllPhdYnKU{=@aYrPQiTW6ba4Dt5(v76zoL$#&ASW=>+P*we4A z8xEkc$AJ8_Ff+Vz4+0yV$T{uYRatP!9@$){=7Lx5_Vw+igW_4ZF_#R#XO~N*D9%Ye z)5u*?0ci$bN@1VBf zbA`|e1uNBR_%#W~1)ANUCk`rNp!mvG2KS%YlpnFqd@e{}d$CaAdq-Rc-Y~QDC;#t^ zl6(JrnBqMLM9HU4^^Z#holGN*_80sMw>V}Slb`|DZt&=}DuIzOC*% zEx}q|n>A#3?nt?%%pyy_D$z$greAo@Qut2f1E~ufKuu3Fqbiwn=p2j4%gA^6B5Or|-vmyxCcW-ye;OWZ}oLm}Un`kY*upGa~BztZGa|9icQuj6q$OFFg+)- zG+G7|+lIlwOB3d-32l8O#BpvTye!&6P|&bD!6FB`lX1K3ngodx(IK;J5rBb!&0K1Z z?Q7;&gIzKwR2gwy#5|HGju^aQ;xJ+f$EG1jZ(4{D+hMmW1Mxq)xl=oJ01xv-An;yWARBvSbzi&c@$aZa4WDE~Wu zK;jm|Chdlwg1dyrtX`uHO*nlH{gxTgz<&eoS&qD2c{jj9K zlwoVO0{{WMnar1THs;XHu;q$Z*^G#8)#t#St$GI4o(dN_D>LkJu!VfRy^y$2nI5^M zM7YF4GG(V%ZQip#;!V^f*RLcx0co1Gfxm7B?@h>N_^IPgi46pO;q-B@ST?{F6L|zf z!eI9e?BB!pB)d23BwDTqaAohn!VwJI8~SHO ziKW^}>weE@aea1eLHqte0AP}MTQ9Z%h-rvppLOd zV1%hB0$2E5$X5-IthxNax3WyGlK`>0ilxRvG@5wIR0dDpY6byR0463!S0$O4k(?v; z3m`B1#lJKu14zY58aT-{ zNuZ4DdhS(5`oN)aSFl;$92;OHeX!8F0BK~NP!7|TzBE0Q$`)chsrzr||z z8i0hlmK(ZV@W1|?Z2Y-3i~YVA67y3d9X?L?^!OHawYV&O5L2+JN5V2!VMEOnJ_?xF za)QM=R_uMl?9ipPprNB9I(F4~&so|6si%w0JaWgP%&=mD)S&jKFv}%8Xp3Cixo>0$ zWhlXQJ$%3b2+@UO)13LPXWvg(q(8}H#fBP}gwCwGGgYK0);0X&3#B^O9>iefQ_7J- z>(hr)SArSe-2FGYhvy~S$S=R&u6Ly{PcoawHq9ogS0s^mSmFC_g&)E%_uxeex42LZ zq4T@tS=%S{Isg;L)4FRY-cG8G9zjPYDXO|AGIqaF0@|C{a}FUN!#L?zOv|enX_zJ6 z6oIA*)v@*){Q89X-15AJj210O01~)Wd%ElWmahW6jn>&$^>2eyGHq5jLumde`B*~A zr;vJiHd!0wi^Bq}&WGdHh;42B_W7TrM-zZKc2B;|ueU8OjfZWMdeTJTc2le}_%)f~ z)TkCX)h9~kxFB#-Eb2iNq|>K9N^=E5|CBUl)vt`^_CciUruJeI>)yJ&{Xv@a%$X8Q zIJ?{5x5E%sSr80DVEYv8P>HOOXp?zzFqIu(alQq_=o84~iBw*T`X6ra+hv!Z_vuMr zAuo@v*zoN(8Ml)Y20);jeqV1~bUQ=Ud;}H8zD!!Um`6ZmEf(ORo6A14%r7uz#uc8o znV0W$9T89?>98Y5Olo4Ofihg`;hZFh`P&D)`q0%;#hEOAEZm)~@6`YFTDxtxt2t^; zHYo{qgbKF}OCh(PWxWffB>+k(Hfzb8uNNAXDPf2yC0t$WEEN+9@oE~jB}46nx;g6WEOVbg!%6LH1z)%{i&h zJF)qvPZ*H-+_Yx%#mPWk@t`iQj>D6A5(`bUC%?XO1~n!JSCuHLVa~(i5fgA%o**H~ z^G6o{G@F1^n;%3bCLQGzQSiiP)$f@v-XhIrVBa?)ff7NK!JtWyfUJs(nU6MF5MU5u zk{IwMf*lGqNBHCIhE9kg{MKkZEPIct_vhyd22?{zWFMmB>X}Nn`Uh1z-*XuyMm>BMDXKq$$%Rh^_o{pM3#hxdc?;Ge_ zWcvZ>@mQd>lJ`$TdHXg|+y2wc`VxfnIGUG!58g+<^Tc|&$EFh&AIn_yRXNVtUTKA2 zLm%dtd7!|jFR68$q^lcR$Z2PFf#|~@oZW9z)QfAs+*&@?@sivb7`O#lYm5Bc_~IeG z_i2`*tU~sDpcis9f&h`wMq1fPanZc!w??sNf@G!&OCwr(eJsHzNeH;LAMYp-0jilO z?9v(-ixUfZt`qd`)%e~SV4um8UZ`up%%Y%**uN-^5VI%#tN5pJ5jw$G28v5oNmx1N zH!RlXA6ngba|nJD;%&T?@Xh^C2K_^fo*1U8sfNIeskkW+5|%! z?95LOfmxrk5B{8Ab**uOQ45o4)^UYJUvY+nM$9il>4z<&Q_hUb~z$@82vEu*}F;P8N&2Ew*Qt zQ6&9_i6V{jAyx{di7y`YyeJ?(>jxA$Iq^FoD%^bK?-pZG2}Gn#!g0n+dJ%+(w<{?o z)tV`Yv7x@m#ov&*1T9YsAzIxCz{%GcN+X5t;LD<&^^E2BSbR^<`c+aVUcaHGH}0Ww zat`NBL&5C1kLVlC>bvGmimh@D4Mt)lpt<^+IDF*>bNG<)+a}t@F-WtE!_}R>GH>3T ziM8CFQVRPgy5SX)A`YRrmj03Swp}m5X4h3|2lMg;kMei+fsx7Q-I3L}pZr0EYKY7% zVgGYwOfZYV%<-;|C@@=p%QWz=67BS~gLx?;;pBUL#yG^g_gsy-t+gVy32D#SV?Ac8 zoM{D|$07XRN@?x($DfvmQsry!Ss%nwCfna{cMNyyWU?j23DhSlHo1?6MV!w%A9rofz- zQjA6TA~_;pGevLyVX|0sX-B+eQQozyv`&g+X?sXTh-KQ->-oKiwmp?#LB7aclJ3VQ zgI3o48&tSeCr9_S7en#fslb^oKg(m%gN= z93EktJ$Um5vn~5P2sd%j?(v~x`_P-GI3)LS7@q-OML0cB^uY_riM8}xfX;Z7)I_G1nW3{AALU2wI`zRx&QN?;Py&Q zu1ASKXeMCcvgdhu+>sXo8yD#jj=Pvqr~{xZezAzX|3dKp-4>8TZFjCKT)68|CEA0 z3kk+laUO^(w52oC(agl1xJ^9?=C3Z61xW0nVWp8o?5Q+rM9!ro1z0H&@5BA0YzM!n zs=Qh2O_pvAPNdgw(bA7j;SlzNP}MZeAPXXxyT_)6f?E4+=?N#`8~ylD=6c@GrdCHt zVEj8S;upNOmMAlF!~LH^N#khg(P&?JPs5Z@Ij7$MjV3G5z*zemzgV*l90KPNf!i4s zXGf>$Pi|=ZwIT=I&xgWWp)6z97{altnQ8R-Ev_ZO{s!m*&M^}$hOR?KoIRtEw@cIM zl&Q)ThFrP|0j)O1$m9{D!;1ScNh~J}< zGmFTj{Bwz)p0ZistG<7g+ozy2XH31cTVqw~1OnM6x(b>oz#xCQ^OBKO*r9-979?K6 zm@GY~0QUz<#0OnV+H^fWHKtnWAuQQ9wZ0$276|Lq>h{=gx&!nYk zk|~BbQCoMtvrn)7E?+~pEG$Au!?g&oV1(o=LH0(@(T^cDCFwi;8jfeHg7tfB3Fp%Y zk+~ix3pYRaC%Vg##~Kwu8*V~P9hy*{yY8z9{`Lc`w-*6N$4?q~9Vm=4vUb0vC{lfI z8R;A$SIIF0%yV)!gkvpYC0e6-#8}hUn3hp9V{dD>?lxHvYjQR=*t{X_*rO?x9C~>% z;D9MM**F{#Lt=1YB`p1fUl?JnOE}i^rTzO2Uqa&vbzl6;H&d<)knm(W$K7$K99CJB z(%5C5uFiMco(Ptfz^&8gc zU{_q7@VUHit_714(>A{|_Y7*&t7kcK08TN$5$oj^a;U2?I+m>0(lhp$2UzV~pS$aD z3mw+q^}bS&dpuyX)^Hg;FZx6jdRoRhP7{?Nn~*4*03nRNv^lMm9=sWxPH8}-Zn4Gr z_=`bRrX(SjZeN=SHTKvuRL*8!+`kN_ybiMQNtJ$Ct{0SMNG*;*=tC@_5Ph(L7G9g4 z&uT_Pi%WK*<~6RlgV&q<&(hf$2e~k2^I)} z$ZTUgXP+~kIf&~4VH;sqstvakGFr|E@fh`?N)h9=L3?4Sh&S?1_EVP2I%nnkwf&A` zg4*;W2RBRr_C^=JPaz4t>lV$*E#cA)iTPZB zYhP%$IMn8~uYTHwW`o`g@GBBq*S-2F;cjl2BH2Fn?P})+*Uln+c3XTf^rNZL1X&C? z#I1f|K+WyY+=8jR|5!5Iy=cUM%?3s{@8h={L)C=~MgNGb^Qj1V>lVWCq5^v5EcElI zieylI(%@H%ovN#@WlS}yU+?jNVvV}Y*6lW%WO{CiL0kAEBw^h(ocj%J5VG_rrlzO6 zO3C$lS+-(IynQHrUA(>X=7qt6YZFQ8P@ zXB+l~wT6}FvEPQ6PqAWbB(eFyZ)@GA7yj#%wpD-FB(n*ad6Dh>!{}$RaCd^j*?z{s zg+y#lp@>D+ZxZiW`$A`PN-eJY4a=l+`}0yxuc+7u{D3HYV*GL+w1dQ}W1DfY`yFI! zs)wGeB|TY@-H(9UxxE2Ls~!Ywxd%qF>NTg;l7YA^nD7S3#y?+J4G!7LL2saOm%1Xd zRgk{yW)%Ej=&V7@c7@Ped(V9Ia)m#^_U>)N=1!j}-NLBa8$I9l4ezN$fm(6~E3GMn z(xoWaKj#!XT9KpgHz$bXE*)iii(SxiH=A$98{f408~TV%A*j`$Ewdz&_Zu$Sqp3!o z-7A{UH87otc|}Ayv4LGUFp{Dhv}B<6u`^g)GpW%Lz*3sC{>{%ynxV6zz$7FzK(MCfTg03So(6$U3y6H5Sd!X>&}F(QNR| zoWI=Bu}ho%Jf#WQ4-3JgQCcgKEW6MUEY_2U#pj*cHA5I=k7vbf>^}EeD;L*Pjb0** zX7!5-W}VIPS0VIOnR*PVEGIG6jh!}EoYq3Ap%iq=xg{5p7-5m-kQ~%I!lou~ra(sG zsZf;sMv#yKxyTe;UOP6ECc23Fi8#>UfJ>NL*8aOiVa!ex!MJ=n}!Db2GMxppkii#9&Q=V{Gn{ClNmAc;bh_W;PF^4e%$u6$f%Eo`B$QHtto;# zWWm>g1~a>Oe6g+shiPY#Gv|J6K<6-10RV<2q(Be!Blr6k>7K~}pi>EMPuLN^cPo071TOGL z?lA#;A6z-p&G4a3V{eFV^bu=8~HYfSHr7j9S{N#9zQ`w>F=zPlkKOt~gUCM1CQ zeJuP)%E|nFA|+rZVDG*s-XJ(n!+`Q*TZu(|; zNSuLOtmWu=?FEvirrN~-)SE)+cSUbLWW^m@*xHoJ>~d;FuYaN}QF882yYqGlp3m4z z2@6$~)I&6eDbXoSOSR)T_Boi! z%-WCV`8O>4uPa)8`&_4aaeVLpVb0>1D9SX+ZAOKoMOk8z5{DsdAK1|{}ZVyf4#sK0u3!_EIF z@0t4`|5lSh+Uq@@dDnHt*F%nH)^b<;wD0x2!JzpIAF>!Po*lVH7-A-Lt4F$8%23+FPmR*=BF@Q;zVzFzzy(V57iD>&e;Q1=6?Br7ibTaRAQDJmA#RJ#GQi8k5`5s2fDPg2Us9LkhPGx6-w z0nS!}xskfm!=_#iE<*^67w%qAgRGanmzH?QZjZ)C#ePUaDSZpeSFH z(mrFqwk}Y{CSCe6DW3L!vG!I$bw%5@_QE~5ySoJl?jAh22Z!LkaQEOOxH};bg1fuB zySuyHnSK8H_j$OF_bpT?DAt%`jNW_uTANo(TdMCQmaZ)xAAb*7I}RSTjEo(Y`huDX z_4x@`iB+t0TfLl|zPdHti&Q(?O6vSJRIFu)s^%{$dO*m1PRmTUPUNGQih;eKQS*zS z1vkv95dK-9{!K-nvJIbH^q}NvOCGW>f?B^wU;CW5h3#R>h?uU*%r4du5&CtKsyVNp zA_=3j;JT>uF8`njEjmro0Jo5i98ErL5nEk7^$XgB<;c`pJksDF1X{3!i?PO|i?X^h zA)LpH5oTGpwdPhIisfoR(j)HEZ2xbS?64H0P|KBDG;7a1Eo1Y5nXUQlYX{PDjVjX` zT{i?mFF?*fXWMUPtC_xs{}3|6RVRMFWo=l7qD3K(W|BOs}8YQ-MBN zTQRJ%A`Yv^5vPCQKs*o?5thr-?d%aBBLX5r(?4w=n8>6?4Q9jA=Gn5YJ!_2ILr zw>tGq1`aD;M)1xIk!NB{W-_Vs*O-CMBtsqOd>o^iKjzoafACR!DV+bi2i8fDOiewy z?d!=zIWsx^RIlCIR$?WoB3C5%nVyCQ4nFr>y;#jn=U2;50SmB2{EQJ znT&7I=z$TkqU!r|>IE8}%s(=JVmX%!GL`bB#uGI7IEm*nnxfx8F^`q>L6ltSfJWr6 ze=}GOIfpiDLX!nwCHQ`rMs}a%REbC{A0r#L20@E*FXv5u`i1cWK}J_jL!!##BpRm* zA}6VuP7g;ZC~(QCf|V0Fa2EW^u+jj*ygZig{+`hL4?u`+7V-Wg1zatoBgtDxM4wy^ zYy%u8PNEi4Yl{kuX;O#d1okX z`7H}JDVtMKMB?Ma%Nq3EOIGxykP7nC$b$WOVI_|kg)H~(D#W&w^-_-&iIpNORN*ie#OdsO7#+O$S<90qUKA6qwr0d=UtbE#&A962Y6L9#<$L;e zcv8{BJ!j)m7mdBz(RW!0bx@p9>`yQxJq~paJo%4eG7whg1<_w(psA)4a15N!UK)}Ptoz!rdw#Ky$ge+#2pNr`B3kxATZ2JFnqCct0W+Y%!&>Q3 zm;>#fR&3`DvlTRp=+8+@YM7YuL-u=9Raz(n8p*XEvUqoA=WlQJoQz|mL1_OpF3{KE z?(dbLjyFSKumU1;SEoS9Kkt%WU9~PW3^swh!81rfI9CBRoJD87j?11~^50%NCS|6)k z=v#cbL@-nFI_6k6cQ`J^gOU|VLm);T7;GAqp}6`%`i*mNl5w zQkzcEL#mLMUw~{9?66%yT*8;$YjhBSjY>s$F}f@-Z-FK;#k(xQ7m0!Udy>;yWTIMwDPBW z{qkmaOiO-655y1du6b9G$98`?6fOMbtdkeStCh&}dIk}{;$h$&Ga)M*V2z}6m1zV* zfoLSRJPZH4R&tP}USA&RogI1zJBcC&PqhaBt3{ZGPd-goBI7Lxi-7j6x`! zc`#?Yj@pYl7$Dg$^@Pztd(UKnO&Wz`8-x@3wex$Wai8WNa~J~dJWDPrIJ?DwJD-6@ z7Iu}2M{nH2aO;)ugy)Xo1~WJvcXA6!ylp3K4m0 zp(t#Mk3aZh8)rduzTOw~<=#kSS{Kf0?V&PAZr)IGq>@h2dCqJ6J0%#F=67%@~1&%9>mK(;7P1`d6iyHU*@_2K3x*oB{}Pox&i6+AOgJ5)9|lpz4xK4z zv%nK_QN7!;6M%Li}kto-?)K!}+gANtS0xJKty zMAdq;m9CLTIw$^b{}qPu2#pi?Ke_#;1lP5vt~ajE+7DO3-sTp=W=+K5w^Xfi6G?-a z-iT@Z?p|VZLN*0Mx8Hl@oK@apd!%~9+|MKm@LRqkxQqYU8a>9GzBC6i-^9DKNZoEz zFk}>o9tmS!P}IV#F%Zv1PIV({7k83`uJ^KjUD2xC!Ql~P^wKpU-5yuH8Gz!5WDLE9 z;0pNGnjm(SL}MweX!H_u!2n!mbbnY>Ay?k)7-NMevbr3>dnixB^n?N;@n`ifvMl+>!jTf*lA6$Cs`?>GR-_oxRWz)G^%n<;Je1y$93n%S}(3c zN$Mgmmio1}3BH>AkNH-5H)!sDO9CA~8HiB+>lDQBY`cxe1wg4w2qGVs6jV9z!u8jZ z2!;?O?gJY)IIXzljmXd~7At6sFG7 z4OY=Og=>Ym$MN5KMvj2?1XN!6*~8e-pmyBpx+gStyu=PEJ!>VqzLbT2uzW?Z_N*^K zs@frjm;C*$3J-nWaD15z%Y26VH#~(fVnQk5F8uDo*p>q6t4f z?qI?B<;lKotxFfof5sTFMPg1?NNT?TKTpKYoyKO%Gss|nP-9$wai{eMfV9dq8aN7A zT^^H`^q-9S8zft)ymf@WQvh=4!w%j~w|f3IZVORff1g+j_Xq6sxHY&Nq_E4wwGyq<>IVR_Xg)PGw8G}mNjn*LlDMyL*zzJd z|0CqZ>OyA>Vk@BMfBC>Ei5+6QRw^k@nfFf)>{O73(ST7Tlkn4a1?A!JPACDGEr znDF5OZJ2e^zNO5j>u06}yTda=jo?2FXo29cDs{y1;S;9wKefRMx<>-kSsf!c&d-w{ zkqx3{AuD+WGl}eF%oXaSOnI5$*<+?5Md1oF$Sgr>-A5=&-zSixR?KP5Ojr=1ESIBd zxwo3;C;vdC4of$4ZTI0Zr9Lc$Wkh|JM*%EfHcL=Vy%K!lCOIu(_!`@Vrh)iKeM$td zB4D8PR#Peu>0~nzkD%2`z_NrNK#vxl*bx@$2};Rqx?g= z4FVKdFoqcK+D15pm}}-dR{S=K!3-t3rp4Dlu`i=P-NGEgrQuCfk6FrJ(KK2jLvMY4 zidc+&+ks@4{>ts#c7YF7vN{4o@k+{turGkrK!qlmZ?S&0y5ABckS0sT?z(dSWcUSO zKVhx(0J=E18(XPk!g1>mFk*I7neygoV%K=)PvvSzt8K#vbQdVfT^btB2yo4|*AHTU z0mpL}6+S4mIF^i(C`)8bn9A9BmicKLERVR@wkM5Yl3A&8;&`_xlPg^eU!3`X%k;E}~op~4?6>V%; z)mXD*bO-?j*rKE;-x*xTLG%71rNgr=! z?^>`#ClAuSC}S&8SJ;-F_Kkln4|#O=`F#p8SkFwM*?o3za!LpRB6j}^g}03gX_GERF|-nX48ZL>;~uN(!E6n|QUr@{m2 z?w!c_&Aw)teGJ)ztPOw=w})*(fhyu08-E?9uM0>k+}7`lKdBeX6%QCTbwU74pF3=H zpqkOQx`*!O)%R-&Y1`(XTN~5?AT*u=UXSL-8hlC^^JyL&wWDy{=scBF=PwI>WpA{)U&HU-n^ET2LbubRmtL{G-_BUQ@2X~M(v+a0{FC0c^nlTE2LC3S0-bUT9d6Rx5wQ4!Y|L{ zC`{=Y%%R3EN|GnIl}|2E?#y+*H;C!OL%T)rwCOXus%9WBhNN?WWdyRtF~4&~%|uNl zH;39&@Ih%zrSE`B8YS2Bjs4p%j`9rVC_d&D0GWwx!TdA0lAoRgcRw}A3>4SM=bl*z;4{l}+Y@-5`sgzrb?w(~ejoIb}I^D7k7T^Q4IR}2117JcD> z`=p&4=3K`{aFT%JX)+~B7qo*FwxP=xmqLjk> z&>c)$V&-%CEbw{~=ZuvQ2loDum6^6=$n{}2_>Ou=Z~jo#Eav&RB&Lw>Ly+J7ZEN~u zs?w0~h;2PA`yLVvoJT0*e2Jr)_hEqo9dwznq75_E?cV6F-UHm1t@|;SNC%OKpr#me z!!7649UGMm&&Y__xqEN6=lQ-kyAIq>-H~l7Wf#6o38G~8ms$j+vEze9P4lMW?#gYW!H ze`S`qGeuP8UMlYW`zqNjcwd2SkRnx|Zj>bn1Q32)US0321XE)dsmIIpje0+`wpZ?p z>mE=ffs8LvaG4@R4czV7qJ%n1f`8}t0XfGDNH`Fqc^YdrQ$Kf~Av?d-ON*Dm6^(i1 zdgG|N25yyh-?$<3UKasv`}jUq2rt5axwNeLdGBE1diAL0MjA?2-}mTE*qRffc-|$J zf)`yfUbHglSANQ8F=x_HWkTl>RvNCZ?A%(sgEoGvmw0Rk>I?5d8mWNcM=+AJ zZT@Ltw`ScJ6{)+Mtl3mhU}N!(-cuvctHy5*xgMLXwIy7SlU0$(pzJK#)%uaWDo3P9 z1)nL0O2t*uwvAJL`c2|9-XLA?CecoG8^&0{HlfMTK(*W|)!4tKbA3U1oSR@Ky5kpx7%$nPf4JtU31)Dw zy#l42-@(p4)s17mb;z-P2uFRV`ByM6h$g-~BXc2opX_2MVw{?PL{B08uC|z8_w}id za#G!SghY3|rspyZt~hmA7K2WCzJj=va1lKNJ>=}S+8);DdHLjZek|U_a$s|jv<;(d zoD&9)+Y|o6wBZ2x2dZttxi`jsk{Zn?4uL3XJzAB_0s~2<$l#v?6JxEA#Us0GGt5o(`qfxh8>ckZ#b$c1)YKMc6wxb!`4GCRb%`J59& zXoa&+knlfoSa6|#z8u+;I4D1#fY36#npdK=WsZHlfH{Y0z=~}pOi4PdN^e~EqX2@e z8-5e{n`iryXofb%T_uEsX*S1lNt#Wk;2rADSNLp=M>O3HE^e_3Z$Vsi)m=F2hTWtd zDgSn=uoitkFaX|RutGrJiGkRPx5_LazRuUP z??M#lhmL&&e33P55`;wp{x9i9gTW+{-LNO`9@$*M2w}4K`48QH_WX}5S3_QfrI)j^ zh_CEL4}<>T0=$Wl;N2^9=TeV&M)(w*3K(wWFaopG8{Mq^yfkjC0kZHts34V$Xof7U z{vR?^t|-v-Li%Sf$BZ8sKY{k%K!Wm-4Yh>hJSZ7?YafrLv zpBz`XIzK3mb_fduz}mHd6)Xyi}6SQJe;uCZ^7=-WK;NalAfo( zCEPYEjRQIv^q6HQD$|;H9fi(ciCSLNow#dQ`K^dH)*!y{e^7dNaIA=&&${GzPu8ILvL&g<~)%YA@-4|ATWOFFzD+b zrGt{J&3qT2`SjgyG97y4S!2+wMAd7!7K#V{dJF2}eX+nWfw z29!#cfJ!3JPodC+SV;d||BDDr*|eB@p${N!O_N<+G6B$m%Ed>4!hvQ4))8Rv2Kuts z&4uncpI+$2@G)6bv2SNwMDfT&QkP_{5fc!mjt!2rp+>S|znigPX;p6uIz1uv0whYf zB|qQLn493P>B9B1J@Q=EeuwlPMTaMFq*6KjJ5uf8TiTv8o`u?9Vq>b$m8Xk*UJu9C zbl=wfo>o>>b6qUfRcqnC+rRw6w79)T>1?+@Q02R=R@F#37IY+3)J}b@a)*j-UEW7& ztI&TQlq+YoT^YE#5N=ympCrKUy~GNEkRSd5{$R(5n_bnzgl;&X#oqlQ%=qlRn2<;% zKZZf--I58ZkqS&NARM&1K+st|=f2*JKHuu_)`dq0{V0d=WAK|9u|SQ3T{U{8lqn57 zr5}8CDt2P=+Fwfei-Cll8mpHArpxNsnqJ{`-zS1+;MrM~ejXG&dr}l+)jnYHBZ&Pk zf4>{*Ruj+elvoO54f0PmQiSk7G=%jJMR{;1c}oq=kA?6r#u|$NaJk>8)LH76L~G5c zZ;1n1X(^P()B6_%n5-k8XR`Lz`8tlx4Z{**b9YQqF1hsPxzv97>Sz0`s5%_PF5IaT zJy3&~-k>t5b71cTzVBOXw8oMzeHaX|&I+@H^G9%}&AZra-&l+QzkUu*??`u8B*lLJOo*U^B2p(Fo8i0RN19Jv zKJ|F$SIOxjh{zV)H%NEQyLSsk;=ODl^Omnbu^|tsnPU95|JHvs6b#}SSI`&Sc=h&;4C2- zUW7Hu5^s_yUlh>)6}R}FwKl<#>M_LA@{^e7rdUXqK)7QM4zr&5aG)I@G?d*{@ebu5 z12vJ?@T8rhs!~HBP1+mm1wpnFK8hN6bn(MvdjIRV_&&LR&Ve1;hnbGPak9Ax2PlBB z)D((7u{OH$#}wfK0pc$h9~f}&`gs`;gJ>Z|hV({!@La7YYhYq^Vb5#}`PQDn#&2QU zN+x&i9SCg2WLi-zs1W`*U2xf_xVNj6A7&ps%QaBvIn zw}xH`fFZN|w>vN|^zQYe&~#<)R>msX=@!YDv?&cJorh+Zp1J9)bsv*7ZGP2h<9rJ| z_~@+(x30hIrJqA7u>CQrOPmZr&nWVxGP?7D0UMd)tf28Bdu6-AN)v2Er!+OfxhBklG`W*W&O8c}3NDiq#6;xCbCRJ3n zmM;Ql;9)Yk2M)BgF(vOcD#~U&C(?9#(9FXLlpgCJ0(wZRFWyG35<8NKCFa9MJ^ zABVtn)d4pM%~pI?4&a?0lps4M`{TR>CHRZ{@Tc36#U-a{l3sJi>?pk~bGhFD)W8+{cq-pEa6Av%k-osl;;F&@K0j!L z4=CBJa(oftEqtaAs7QEvI0>;*B=NV)DWkshEwHRudJM|3cnIxHh)Ms{#SRe~S|`YJ!Dz}NewD#-wtjlM8^KFG zMzMF=xcVAnck_8<^^lh&sj>Qv>`EWgA+K6w=jv zv1Rk1ly^j3t{re_D7EJVh{5Qo!|{zo%k43*!ou58cVqXjdHh9!J(_ zVhL`Ik(p2w;j$h$I@|&0ANzY#eT@q7DilOS8*&UajnUE*nk-QMob-ox;c$uz9rn;C z0KsCQZsY5e!|#N<^*Tm(=pqaV#P|+p2HH_JXBD=P9@N6=@+&%dUpB$CiP9Zo>To3D$<#DRAfIe2TiJ=7o)8o{$8a`0tniAtzWlk&8mx^LAE6 zEV|7vn69`iumXX|KD@|HA6wj@7^6em|XnQf&=0b45`< z$_CCbip7vH^Gk#t!vzCt?r}Aqk_$&3hT0N#tH;5|JO-{?A6M}L%1>NO|IXU00=C|l zt-A}qPt|lI-{nXM@29<1nIV28I0E#CWow{KQF^;$1xyD=x;Rj}!hcnXpw9|D)OLm= zZX1l+o<$GN;Sm>@%YpopCgXdQA{*OnYLhVzFKHy_Y1{M<{gOAvvo;Rk5!(MB9)YTI{$VfX7nV2G=4AEK`xolcQM^j)I^L&-0LJJn%RUyR9GGdNZ zp3c^-;4Rvfj%Y+E?Z1IU36$bhZfoWYr+rdOT94$i>-Gz&+735=D6bi;eSF(|0hPy zgaRd2uhQYJcA%|p97nvnPl>k4b@^WWQrzk5WMzCC^x5ErywV9ywj;$7EHiRvAiiPm z_sXxMh@61Y=$p)kmA_D@&vDMg64K*u-4VvyP+E^PRJ1@eOT2l?G)An=>lfp(jlaS< zJKI1G%O)$^v2I*O#P`4Z&umy~t%0E^19~x%?R`DZpbuGJK$c6;%-@@8;pc&0ewa`9 zh-aJ`L9wbut0;sd-g{ZhzTXUBJk3LZOSNJ!f+Dfok9QoC?~;>dHTGsW&d=G{Kj)na znOUXIvFt&f^*wRg#(PJ=ZxaO&uOhy+SRzC@$wv$&1rqdt zD(Mr9E)t01M^vaOR2@P^xBTXjGc_q`#7azK;1kLuR(0g_k|=vXB0Srit{;-n3}Rt_ zhj!wpeZ>>t0|!0YN7 zNtw?)!#Tz3LXIFnFY~E#5%aPC8F*Vlv=NIya>(;wtDT|5w3KJ?867ZalEnJ_A==%(KvQp0ySdInn@T)I~<+O95mQFW+zg zU%UVX?DWL9lUwo8tfJ0S@kFUy8qSn=UipbAie8-v4I3cqcYC&Y!t4nG<|9m%1zSPCiHl|>QRXp{6wWV55EaTFr}J3T zc+y_QL5{*55-7t)4MiFtKyH4hWu(*X!0|vN2{@N})<`!m49BlskWuV^_q33bpNpgY zF7boYCG9#&^mlkFWdIBsv3a*d&+h%m`j|5*2mp7+u+D`MgN8>?*-IscEc%7lK>X#I zY3f)zas(l5kYEoSv|UmWsUwfI%i%Vtn)CcvP^=Vy{FiR)roX_{I>$xY;q%YN*VHlW zr2P)(8f!5Giny~>Y8%eK!6$U&Ka%21^d^68-F__At8C>^5hmFxZ{ArLKrOAp zOE?X9ZCmXfMCNDx_YFbp3i$Ofu26_l(WLD=K9)bGZFd2un)Pz6{cG_xBR5~-$x4P8 zAg5fv*4+`k-rD+AE47<9ozytcrq=+29+H-|c+>rxsDt^m38QirUQJvYL{tZe|6m$| zQ8l-ejbvVE*LU&>WQ}CB2r}bxj&)>Fu4OgEvEI2VgqrGpy){nI`IhYz9smeU&8w?WlSEH@OuMuQJ zOr$I3djy=~y~W(mH{l5n!*|8ciC3z@r~M|v;5B{>)~7!1>ATsiWw_h_1#AR8k^F6W z{OX~)h<89pf~0~6n%%7AS^f}N?0Ed)^K?y7A;juS%q6~l^)0r0}bq6pt;E>k}in z?s82qJU9l6YWSR(V2^Hv8I8S62a~iVibF!13W$5L=H`9BAB@Q*elW&VxAOK*7~&fn zk5{J=ZdxuRFkwWDC0gQ2vQ;bKdwBRZ4h#g94DWi?_$Un{E8EaX)!ZHN3ONO&^4Od^ zGak6jy|L$PcGd%5bUo|1ZKY1rChb<(@(C*Y4pvs2kkrl<%KMd9fdpCnWr*I$d0{_?uOES6lWvwb6R{;HmDqO?9U5ZX z8mE}lpMV43b3A!2bWA!(<8uUE??b>*#HyG>sfm5A9%(=e|Hr~n2YqwlhtJ?g9kb77 zOtFY9G~ug$yx;>I33-V_U*TALl?gvZ1kBJwIYFTQ@ZXp5O=vj`2U>7Sj1UMS`xa_|oUMy?VyWoG} zDq#^0f0(BS@)t@MjSV6%l9g`Dx#o`M=TBO}IKPRaIY;s?sOgqMllK7mL&h*o>l3e5 zP#U+x0V>q^oWR;aT8@+gQ87{x-e$DHvIiH^`Wc`{TQ!ZKDoaGb-bqn7nD8t#`W#UtW0Wnf zD#1mAk`ijG2mbjAp0R&2IgWJV68^j+RY1&rJ z9%4o#o|=E&%<&@jJL&qTYcIMHBB+`q6>|~D{arO`XkbD_g^>RDf&bQf>`s5nCADSq zHZsxmIGKaN0ogHX&c`Sj+8SyZ^XdJ{veY=&HKz=IIb?ocex?NU%81>&VcJxAiU>NF z?f8Sla~Pk^p31@eY}q}J7i$x?#j*8|>&D#9#G>1&qq*F7ROWh*4^GPogIAYQw<5Kk z@9LkjC;{Of?BkBtQOhyHXpsu+1Q77;t3mNVv!B=MboyQ35YR>wt0lzHR1S*rCu!&4 zUiBwGu}U_)e}4Y%4TPKFe_UH5eH<@RzBjEvicevcJqBg~)rlBE@$5T+x^u7|C&z%mxKWXcR;9|KMErM{vFw90V>vKg4Q)P`K#ga(h)_GX%sX^ zE?#Kj+p{S>2u)=41%|GLW7h106TFIZ!4F3+&@DMpMOsWYDkguw=ZNy^WAhkrYj<6K z_-deRV#G1)p&m~qxAmIfe-+q=k0^P`NfQU!Ew ze|bE;OXl5f@-BcJ`4*Ksd09^@Ak7Ww{ivG)xOg&@eV_YaBD+95frHu+jiuY#qeh!k z87;D8!$m`v>w!&fqYR0L-aUzjdk_zlq1_|;{M`c7x-aXr{9Bo|rHYChgRRNSQ+g5D z-hc@1%y>eB&T5AW<&e_INo$WWxE1kn-c<{ft-E>9sus}Sw^}v8$6@%?*Uq;c09J`O z)U9u7O2%N5+etgzLpa=e7h23cu^ClD->OY0YU z3H=`W;WwKC(;!;APx`U$`0yAmt3_owjTLcpi6$aq>F&qDdXPsH z^>FfSMuMK)f{0w2j_0$i_i2-8mW(UYYwDdfRF4$z2QhD*hzB(cBJc-Da)ENan~%Pl z5T(??BwIAU(JQ$ehY)SWt9&Usmg|R*>a=l>0ypy7tf7uW`ML>?u}L6f$XI39bW7j% zYSCl64(;BuhJOPmE4QFhMjQ)diS_)qWly2p#_|=wv<+`|I(hEO!<*)D zk9&1`$-^zBb4LqW1n%+b8v{(L+1~d{=678$N_@LFK$zxOc>(gb=FVb+o951(v>U`- zkJ`Cm0mb)q_On$CR%{or;nrI>Q6-}JakS6dMgUpR*R=F>(j5v?2}sW9M4zd#TK5@S z-kP@0vVWXWzXSB{BdaUjXNv5mB|G+bz-1oo=Q`tlAV)iF*u2=7v%qN|Xtf`)fURm4 zlhdT|ypn$kD*yP{nQ3RZ;GoM3MB?{mP(gHXzs+i|l|VMwr`qaNV5wW3bG0g(vB!@K z&l${?F=tZvy^1S>u4z*MHU~27`R5|2=IIttnW_hWCGW|gR=Vk5WEV{uVGN?4Ii)Jh z5Spo^;KTkGR5s`U?h-aTg?M zg_GLP9GfVU1s5&3zXAz39L3)0MR^QxceNJwC$I)5zy>qG99JbV_fESuJ+MS25iqjc z;b%)`t+-S_zbb5|p3e9*+P+)Io7qdV77L)p2XzEEK@*{FqY2CUXZ3TvNIV&bD0d*0 z;5bX>)gcYGyZVp|6$kem&KA93)!rU;@b7zQYh(R;6y{BYlndXsu-9SexJF&BfxO1s zHH^q9!`8j?`zb$t+!DLlJ55Fqj#3iDNu>s`_8`SirRgzJm6=8cTE<72p~K&jz@sAK z#c!1gos}yGoDrs;6*FF0rPAxb97~>PKke`%NWB;^?c=_GzCT0_oObRl<(-&wdJPqqIMN7HX~~ zn97tv9)n#<=dR~`r9|Z_ZBOGL@{a|YquASAe>o3VEdbALyRNuW8}|j<9FV}W)s4nY z65;FdLwqAbp9X|D)dL>;7g9L*3ySWS`X{2uiWK$>zSiwbs<`vj&A+LW_(s6Ok^q6f z$26<5O>0rCY6RvgDUDQdz+fOFC+Z6_bnq9&8L;TA+TaAVff_Zh6ntUp9#qWyEkPQY zHbq|P__}AzJhZBOwCuMT)4#WR;AS!oeH|6;?j#d$cWl8>7J8oIB_15m)zwt2_Flh$KjnMjW|JiO< zIk35~{g+Kt($u^vZ;auQz4-9Yr(|r}-TI8os$GY`2cP#DMa)$(4of!X$_H0zPh!** zfsjuLB?i=zqw?8PY6JZu(C%;OY)Zi|Z)2p&D4+g8704L87TGGEmsK-7i%pC`IihEz z9oQlCpbIhy>Mau2Z;A0OLr=abT5FT6&-K2}-WCCtZownrF2!CfEEq5#kn>+z2~oAq z2_h*n^MTe_dXZ|p&FjXZf2A4ufa9?A@huR5(fpNT_&!T@OFr*&cIw$SOwT5s?IB{| zCE@E5(_Pv~L-WU@oL(|70tGy{A6%2T3w)z21F6pNe%2Nw% zJMrV&&fI(K1EJ})M85Rks1^gQjlp_*XBvJPuW(5tbLeRA|2+A@%9B{tw5~n-%2D=P zlA|X#NHie?5=Ut-0cbK#BUvkB*L<`G3DguRSO#j(t*+M@XTIN}ekEvtU^!?T?3o@$R8PxJj zkzdP13|cZVR4lePPIp|B(Y10b{nFcqB=V7F^xY*4%BP*yR4RnX#&Y~;;hEuWg|m67 zwXjG6!B|3*CGQ6?mF#^q+f!bJ0*F8mBr><*>=b5xbt1I=06dV$NT)P(L=V9rFW2J1 z<>h5uILB=?S9jctZ>*89@z-fhc}w|HQO|zi0GU8Rf1Ft!bE|s>zH?mo1V}1OkCC`n zW4Qc5!A%nF#%iD+#{lG!UNXSTk|`q9KU&@ZXB-P7x1eT|djuE1Y>IFvRwc6H-|MzN zmww|ek&yI*Xu#Ro_ASlk=fHRG!_^>*)U>0Ucimh4;bh>@`$VkZ@^zf~kkrjP7J~GiXn}n;0Wzui4o<(Zi<9=I+6wNBRfdI**ggEgi8#QE2vuX zLim=y9QeK(>Luaet7G17KYEU4>%0|<4se1EiY6&)^XS~JR+c&e91@LenOgJHF=}ip z-FHbtsJJUFD66|=e8vm-{@OWS2>{A{?8ilO!^nYYmg}&HZgNKQkW&Af}90STjo!LAt zm!tXdk4l3$5GQ>2NK(;IvqL}=0kAvK(s@6amtGKx9zH_Hwh@9oq#xW#?{{dBfQBQt zm`~^R>8Hu0nddBefkCUMPF^i8byhdGy{nl=g9c7WYCLzszZD3@&;Hn3hg?dW<5AQ%eIEbt zA?<30EGH+$j);XT267`8%0Xp)Yv5Dn1y*oS=d`n)OauVRk%|1&t+3_ry)dj4BH4p_ zJ+HgSi*PDRe;+$#FXA$!zRa2lN^=dRmEc3)!cub zel~JSKfTg=YM?9_{uZd_g%;tsD6V~bcmFkPw(09q-|Q7;g7U4!^I_5*QuzH(zt!g3 z!(hRfvhm+=n##5_5ojQx3Z~63dD&!}LH2Vrx!=8qu{`mXnK zmzCz8B0|pD!G6@X;wypnoA=lBfb7-dPjWjz=LSNK6t5kcA+oA@hYBN=zKBlGe5BXo z0P5a>|H+>IpBFz!{Zj}>*tT)VFCP~=;sYWXWyV#vcpK&^)pGW^dMr*P{}(&G8W(_0 zHf{FJk{bYjG@JjZH1rlnK^`J!o~pkZ zJ+obnY|yFIIWDQKtBsla_N%tLPDgXqOT)UBA)L?@K~Xu#1`Q*DO>FM<_0rqR_c53} z52Nq;EeN-f_kPVeG2J=a>kR*$-?F_$zS*S`?1Vba@*C!7pG`<)gkQyqPwhkR@H?+x zh2WPKaB0@W?95V9CiC1$vw`>N=DQ$v{12<^g0#r^OM6^X)Q?F}uhCy)4s5FS)(ifs zPEy|_Mkq2H`YL}cO_$}jAW81Vz_qRv{&Lc8P(H^NjHc4&c}e1^r8M2~)0+{~txC-6 zSoU$P#29aynO&jLrX4J%iJ6V#?IjnZ(=9Nlk=dwAaH}i|uTqjy_}rV=l;&o~>>m(( zU6@7~JB$jvrb8F2H8pj1={?;?Gna$ZpiN%4%+yl)IT6DbB1}>v`_@#r*)80tTz~W2jAwezH5<&r&pC?deGAEtAuS%E!Csns71( z#9L8GG+|MUgP&Wh+hUnZ5@Akli+Nc3E=V4db?pB)!$<5A0p#w5kY z>f0*=c}fo^^5-w*&rj#~Z^#rdb!gF}bYynKsGYc(`Olmk=%6L$c!Ly{?ECm78jsxi z(6&{bXy>o0aST7OIqrON(K$Lvy~vv%vrvojJ(Dlb(GzE%`{8&CPfyH&wL7mcD=}v# z5J&x7@G7-av)4`PVByKf-R$lM$H!OSt9B({<1j6wEe$fiu(b(-wo|ji6y}{~ z`hP2A#(ik@z58sX_SX9ww;PHsbX*8?N+>tr6wBZ26}{6^5Ee-%I4l>46{t0$I$2zx z$ouJ@sS3|v>SWHN6p22*avH(n0%iq_&fc_%Xmq9!y1+|_I!MIJ>hPKH*8Yu%tgE!e@3T#z=APh|FuBhb!H08!sJC+NfdNwLZ@&bjXh;>!~)$3k(chp(G8xW9(kDOW)_m z7r1Zb`QPg2&tluLNY-1|5&g|}!WU?xkm%|hf!AwaI%H|_Cbg#Qg7&0x1zJ?1xnZ3r zf-&cC?aPi2A1CV0o%dnhJ=QtjcH@5*jZueN4ROIGH0PWrKHy0TGE1lJ*Et%!4q1Vy zCWkgfT&9p+5|^h41EbALfjb3Psg;~VRJz%*8FQ^ zp`&L=t_Gn5?qay3r_+TmMIqalitLh1-iQqZ?jQ93&*hTfa%T=$M9Tbf>@nIZh%0ez z#STeCzE-E!^Z#6mc~E}KxBGv> zR&-P=3CaGIxQ(H;$~!f4_;Qsvztx3jg8r97@=}QF7yn)I>{g0Av0{M<1FeN<0~1^` zjXSOiflA=Y6}xSuEpWUy+xbJ{%iyIls{f56X5!PT9QNW-cLN#aH66`!vXYJ$b;|GT@2gxziA>M#GR zidI%3_vIZ4M8!UE?tt3=>=MM1m~!Aa(bBGjKMMZQq7Yj}ROl*NqIHE>)pSehv0m-8 z$82%d|J+eM8iNkc?bFlN*3vQ5S7+pXuI;aCYb)iOw3*jL8lefT(`g zT<`a5CHHc}?cvMzYtL>YA_9+;{!qF|KB0XqNO9YQ`d>6UO+P*x!f!&IKnTXdGZ&(e z--g+v%I$`3h?pkaL~Po#W;KnehmX7=J94<-c{^$I4<< zSAh*&`^^3#qSQZ&sf8KR4LdQKZOH;bt4>SZMF(SsWO&Z!SllLXAAPkG7{JvR&E2iI6SWP*$#7xNC@mWCchqjMs>C9j@)NVg>lJQ`zLgA zo&TbU#c(q*wo;9bS0!GOnAmSAi?!n2B|aIFs{gV9s4q!f&Zp+AYeUQq>Gd1#v<*6^!XC9Lsd!E^kfCf z4qkNgS%kU`HnUe%2k4cb$Fy~irCdN4tTgV%O{!oRNIu>2Zs?k8Jutu;7B~LlW0(J{ z#1wWy^+9SRk)61R2rSF7&qUqNZ(kDchR_b2KJ|=GT~+w~{2Nc&yV;pS7tjdZ%VLR# zEWPFzgkLE?iTdzRHxU|K{QI=QzVZRU>(ISrO9Uc6#6yNzXB+Oeqs))5ODvTP0Iw)aBg z>FFV<_ZxNNNwYCnWDyx{)Q_J-$@oR;cv`)T%7#)4pL|I~_@2pwLRWe{EYeS%A#{FE zmCt~|wZ>nn0N=tWZ{L)`zg|!(WTX+JwAuA6$1?n8z-t%1M9qEX z8)8?kGAaS|_~nY=#L9kbysv*4N$b$A(C^I4pFaFuz+zYFU8j4%GUY2&uR zrJ8HC-$~$U$i} zSdX*@ypFpcvKcHyxJ!w4q0hBUGhCK43+|z5Q-6|(Oj)Mo)trNNf+n;>A9iz{e9bu& zTOm^RHXVsBu=PpT1Q@E)eF=*^Uz~OH=(ZPaWlC9(rMrUBK@v%+T@ou{OZ@Kg#JH`+ zII}Hr5&JyWC_Mo5VA`P45b6K-k$2_CoA`1AzJXdPs(Fp^+KWaGZ8)S|(F?mTEp(jH z@!=+v=*I^JANn&!?J&OO346cNm(>*0Y5)0I%)ih(Ygf7&%e2P2_)g}2ox{(` zLY#f4O3cr^`pdae_SFvP@TU)}y*Sg!-{I3M-j9RvI~XU(q3M3xPGHTIHd_ZeHafj1 zB%izQZ^Og<*ZW??s$0J-#|9N>?=wjFmy()^MkCSAVTQ#=e~vIR?qTdVXYEV%4SRxL z^dDmUqMEagXFdT&`u^FV`!gc8D~X6GHsrbYMJyvxJnkuPxJvh@L}9@N#oYblh5IB- z6|we85W$CxY7Hf8H)jXW8&nwLk{P#L!|(qbF{JyqUa8g5yEjaXtnN5=M900L&tH;I z4Yp0yHPbCYG9GBb!V4rZ#MhL^*N@WFi}MOe2n0?Iiy3y~zhmh_3$bxWj!G1JWDhD# z$_`HayWXElOTqP}4!deQetfxUmC-pBntMhLJXgf~g-a+ywMyTfdymLedi-ISp5)UQ zU&p8FcjIbDjl*vmBUMPKMAoT!Eo8}q-8P!dM$&6W5rpj6)_B3aP?0>+dzr0P#C@yM z`NP5nX&MITR@tB%V_JQtL;fb zgJhnp%x`#_n<)~CM3dprx(S!NJSsNJ#cO|++uGZI+n|+rLI|eev;5f+s%@^bvsYbL zx*oO8RgaI*D^j7bDO|aYvYP4f{6U+QoQXmfhD-0?3dl_#nEd-N>39xx?{XIb?#}Ln z){lZcdiJ_1p8JD`=V_J17nKskl1CX(SjM`nfp)&zhBsW(jZmSNWXWenu1F5~&!^T! z6(IckQCO`sw`vy@R#e2*!Rp&bq*7koq+ihfe%IvrN~Q zD9Jw9a{3q1=kV-&B$+kAQ9TZ{GRB8|##=f@TiV6~f>#o$(0o|EZvLaoi~@Qe@4EfG zH>qKNzU_MA`v3iqTG#EL<3-mpc!d^|Q;7J^7-g%;+#!idFxhQ&`eMQ?9@hMTo{R$< z_ed&5MkYlDf1LZt+_ynTkL&3RTK&{fUs3zw)PeDWWTM{cZ?uN`wcGCB!>?Sul=toh z+11>ThN`gA*y%=ozWdJ_AOFBX4}!HB480r3jzvPh$|E5}HGFf(14GD7_}>(P=>PjN z!ZxBH5*rCS1AjrQ(-A@z{!*~=uRFs##Ec2I2Y>nh@?yHaq5-QzS4#Elmz&{%Vr**0 z@8)YTNYgnJlbB$qUrP%xY+N>q@iS4m$4X|}=y99tIfJw_^hJ)FH*em1ZFJQ`V0tK? z_g-XVBxxE6mN6FIAmmkqQ{!s#Sb3J|ekb=+HbzxdwNS_IzYkF6q_nrUS6KR{ZgBs; zDkr7F(y7dj>?f+{qSJ~{@hbfM{A6iSJh}{I4>F^XcHG3o#3dyqq-jwSmX?;fT3Tky zMi+;ChFm)zR@@ciq`Zxf!c9$0vB~*a{C9cIOP758{h8qYKTS-?T)EQO*KoaHvtaJo z$p`s%vVPki;njT&1=oy17+miageOptX1Dbccp+XIIzru7y&=8etEmA{}4R&*l|=d+ULVpBWUa zDNxuV_|>dWIc}Y!V0D07Ul_u` zp4O`#%Y9fZ_T@`vg(W0h!(-tS4Heb%xHzPO_52S65z*DFS8dN^zeic<<9G=B!hQMj zWfTgt+?R>#QT8l~#>GT~C(qJo-OE~%dmS;X>FewJA(oATfwqFk@4b_l+?e7q?eXK zh|%w4!et{O!^6X)qm5bveE$l5D2l=np#tP*W@ds%UK^WN)EhT>?KGSfMHfW)9RA*H z^jvH7`XTMNZKJTXU~D^>7o)>7Rq^0fv0kZzqa#@w3l@If*r6B$(c_y4Z>9D&(6+1dGZ9;BLdgtxpGCydwsITegyVuZf(te`Fpc_XJ_aA zI;S7~*&krh)Ya5T;j*iR2L{5TW209tUp{ZBA*f&eDwEnsED~XSg2j7BajHxM}Z9?ObRdH{r5#d5evCDJ~7cYdya&JGva;iK&OcXH#vT?iOkXI~OHE1f`#If`ubJ^_!Hv&xS&20;6hnkvNJ05Q@-|D8O_IVZ`#=^pKE1G_GVZm~Fu7D0YkMj(a zmEE5+&d$!)AW-i;5vHf1@q}V<^+Ct>%3%IUxeaJTe)=ym4>XI1l@t}3uLT8sdlw<+ zzgr^j>g&6oz<+;hYb#GJ`PF`h3&n7zM~G>zRZU%8#(O>B`{>m| zo$)+%ppL{OB=<@UhwGeYVBNk}MMc6R|BTDezl@rPRD!Lab8BdHo@xs|9<6u)ne=Y+ z?gjJ?Gp%L85fyRh+qZ9JyuQ1*y7n`T);Bi?!0QA5?6S!ElxJ2MXwLpU=~oBE`TOEx ztz{SOR4BB+`?U_q*MeK28fA)vLlc49Dmwn$-M!SC(Pr6|kd?(gwE(Yib$7>W)iV;7 z%Bdro0pia4=X9>>6Siy9*|#WQFBGiFfJ+ib3F{m~@7i1LpZmQz=g~RU_|$%9_1loM zNQ+v2qHdv;SEf+H7YO|R{aaF6+N8lP6iJA$zCJA~Dk=qQVt6kFVG%^GUL_(UlXG7r zMa@HSLV8Okjq?JWS^XG(frFi$z1q;x!^5L3=r5s&0juTvr1f{C%(QKJ*@iyS=Uw;( zT#v?Spx#9s>FMci4Tl{uOkQ4Iqx+1gq3aL=tJff>Wn`L|B%D4jxY1uYt4ooM(YdA1 zr>CXW_crAGM4hwqa`Q#37XQC^f!|EcChH@_%^m*UoIzTU>;yc+dbRzC2G{f1l4V5Z zIkJB%=Ih&OQR($#oI#p{>@?veZ<_NG5)ue~6EVz6c&?CNRVgd)o&p@ZGgVPp$?ijR z10m)@h;oV^P04%x{~qy4Zf8fF56oy`5=_CQIAyD`? zjhKIb`Tqyddz~q=q45$D1u|go=Zqi^kDH4N)cYS;A}Z+8*2@7c?YXLnmV)8c!@~f84hn2ea$9fDvlZwR0S?hjlOFE#y;0!m zxBGoeVkY3Ot8Wx7Km3D%k@4Wk?w^H3Q!7i$6@jZ_Vvf+BAgG~on|Rx=tgHwK2zYKi zx`0;p@~TZsV@aHlEE^*0YH(lDcqy4#7uU7m=i@`lAe;jY+c_|nF~;gkiNP@EGBh)N z{n0fiJ%L2%nh%j%)BZonSfsb7g6zl;gZ0!jG-QmT1p^#uqPEXea)mw475{?Tte}^1 zE!YIF3wL%KL4-gQU=CCh1!`;utv)IEA+#7D$w_ zyJ@s6HdZs&zq+>eXm2LizLn*vvMA&vpqDS*A9PJjj(*h5q@<+K($S4{v)R-s&}26?$?0zh82=ShiezADJ%PIFF)1iW0i8VfKH)1 zTbUm8_dqW2pt@&ufUwW%zjN2%>bGK9FJVjcD;{@ue2M)z`SooGfRL}p+uPgW;fNTe zDx3Z+=r!Wb(}(=);rLm#RB^H+K|#T59?MA7J7}8zd+X)pz#(cz`7(Nc4j0p*as)Em zL{H$zKg{I)n5e_wAY+xSudaR}5E~T*B?pDoH#FplSDDgiw{TUZz&KViq6` zm6`6&&I=KWmo8n>Ej7{8Rb}PCz<^z=7&e8^P@?1>hv)jFl)il6 zfydN>mRsvOMJ+qR3#xo!6POKn9ND01Mso5M$_SHYpUk?+?(P_*c7;>Y;$8NqI+IN* zxkc60`LAAn?(FQp-wGY_<1FtQZe{L|N|)SrZC~p1R0-!*f#dn-nr8cuV4jKictcJ~ zz`8B_(@;Axg0QZoJnDXajnlV(e|t{?PZS@o`->$$A)$3Y_BF~*r{5)voCU~!>655rVeyj70yiMlX>S~d8;nuHTx1KN;P9RDEc8b0Pr`bqHM>Ocr zhdfLH0oiWc7Sv)QYNV|#?7pbxvrcj9)F}WA47jnj;I`=)Ejv3Nyb$1z&Aq*RlC!7Y zPByp~M@C%x{q;0j88Wi>rPN^7O%nY}qN1W49H%Lbn!LzpA!^U0$p88M`^|lTt#W?b zuVjeH$y@z)iq<wA90Lu#OL-@RSgs zCeGJL<9Xis)i_tX!DSZx;K75Gi$X#hfMq>ATM{T;*aNrwZvk9){8Ga3H_xL?AMoC` z`>*Zo=AggQ%s-)ElI@e6lbNMm^>g2oxY=UrX}{#ZwXovaDt5sp5Hgd06EA`xBo( zt0|x87n0rC*xanBuEth{eKC30(Sb%-+SvhmV?wwnpwV9)^7@60FA)3r$i6kNAfS7o zKh8Vf$Ea!z0!2%P?NGayu#!?zfRd=tW^JO3=~C`ooKUdE0@vZC@B-c#9AKu4UnGQu zfl21L9uwNq%F?<1@|YO1*bi0J)j3bI5)F79jH0#y? zU=z!4ZE4xR78eyoqWGGRb`1)v6@6ZFGqZp(A8j!_RJkh$NA|mSiZ~24@mbkTR~Hw0 zQ5z&b4$$3crsM5FO#X=tf zE)A0;*mg*%@nmhJOoa4#ax#%vTD7{gA|AE_wu=-GjY9Gc0F;Y2GUNllUOLkU1V6;a z`S|E>w1%jIot@v&;XdHW%bc9^OG^|C!teU~UH8_f%AekT&UNEO-mqQry_@(%0prKs zP~tN8-)dg_t9V)%htc5~PP+QQu*Pl&>S8+&uU^T+GlC|~+yVmf!AG*qAY!yAa{_q< z<_0*Ss;({=;0|60#HkG_0$>@SNH(eCQcpRv^MOF*`S$mhG8ocqMn8R81xO8GIe6pi zBbD68nq|Jf7PU-3>zElHrg=@~vPX1}oHh15O^1|}luKP7$Ph~TWCTU%>)Jylm2!5jFg8@w3$yO>gs5&RFuc@ht{@CpMYq@X$h%c0#->@a+IRiFH$V#Z#I80ntgLaD$kXK2oT<4U6gO7XCWE4690o_s+ zFV*9vjCkeneK0>(nG-M=(4@HDnV>&p;)tnZ13p@Zljy8o`9Gx7>dMLpGBV?8n+rOu z8PcA_wG_p4baV=qx8VDDAs|Wai=Q8zpQ zLC9}s)zaRcM3YB{M>kd65rG1{Xi2XGgm7Yl3a#{nnP>%Go6V$lFZaNPKI;e6(=<8% z1WeLGSK^<819Ar8Gk$pk9fYp)py&xsikW1JYl-z_sc^Fotv(%G@ znt-S>t+G{d(C4q)DCAR0@)ko?e;i-Me?7&ufVlMdD7MI@Nl7 z^hc%1&E4Idcgk6}e}m%M;faaN8V?<9 zZOXtzB~slQCM$X+p7_;8s&ON9QoB`^|ITU+@Hb$E%kt;LmVBUPcyPu*$HuQ}`5u1q z!qbzDR1{^+LSW-{OR_)~$-Whfwv5ckVB<5Yv>-w$a#O41YU40CGN!ygPP#?0IDS4p zofRd z(dDl4!s4RSc=ZbOi=f{dvumto8XDv%Z12(_7p;T%1Rg&JVJ8}+#zkO88fnt;Nl6HU zz}(T%Q79GrP#((vrU;3MWM`1Ze)zSq5grpW%+!B(@6BCp?XMZHUR`WN8p^+EVvISb zH}c~fBRze=USMv1{#u|ZP#rnB7HHU2R^8(j55m;AKovlGG?O6^kkkqQ1&C1g0BPg{ zd<+c6cA%!3wrL#@{myyqS9Cp3dfkHN8^tu$v_F?-HngH>s}sh9UiOHRbm$ zfZjoQyrG;xG$qp_P6wrPsObuh-|Kt17u$tHZ_j%1ka{MF9`> zXWazw@}O!=D~|@mM83!A@_{N`x+auh7m z)lO4@19f;pNyy?~zC4_Zy=KxHkX#!vA_mxmE$H`+Os$tv?p7dHYz)=W;?J$Qd3%pI zw!QARIXe8+3lE`q>sDfX{ICm0W1=j*&xvm@xw1AD@eq_JPo9Lt0^ABZ;G>_Oo8R6; zB6495kN*7pwb-2=uh?_WGhXE;6)^5}Ie$=RQ<9TI!@@ul;<$E= zjfcKy{0LS+K|w)`F{Z5SXEcLIqxa9F;d`d<-@VhxU<*ZO_S@K?$bI|%URh<4WZAyu zy{&ivYPxmRSow#dpt_CMK=9ws#=J80{ zY?eX#ho|*jpKSR3+vm3cP;mULOY6aE@#-~~7KOe*)jF|EH7@@JiEt!4R^PzD*N5;$ z*gaTX0|NtCOwjEd$0{$O6a^B!zmMMGq#PR^{hTh>1T_^Z)J)(W%YGU2SGNWX1~U_U z{1-6Ay%K%bKf8|F#b)2ur<%&ueRS&0-$y#P?&IE@@BEwr@$Lv#wC`$NQ9E)gIU&L7 z!2=Nx5aG3J3+@o@&@l)KJP6GwqoDn*g{6#O!2NXmhG%R+G5iqCkX>gvemT=mD_aA0!JtjE#wD0(5TBK7lF--mfhI zl6&p%_8`RY%FTBlKIA|6@C3wUAVsq-lEk5pfflFB?yFbkNW^|V&lXT_KO!KIC}Z(4 z4tQg=!x-hksP45BrecZBd9NV0uyM&E)?hdl>J%ANS*6MPYF|XYaRWj7#fzNOfvzrI zY3cEfj>X<<$Jk5#{rwOXa4)ZK+kRVI1Y8BJPg(inVs|QL7K9wg+=cb^*Qwgxzr(?F zxlXnW-~2kw!Y-m+Wkc`x-nM2uDT&@D{*I2$_Sd(kQ@%n418o``2ShQNQ4IW+)B*hN ziiu6lCL|`TeqLO0n0q}Z}xXt?~D_&@NdK$=T zK>;^VdP?5bs8^vc3)%niqcUxjCqQJMR&V&z zW=@fUlDu z!8Wt|Bc{PO{;q>!Wi~o8G^CRPm=J0N-05d1FF(d>%qxCFLu3osxB-E#sQ51HW)wU= zP{NXao1o*r-t>CkK%xiS2n(Rae`~KxC_1x^Tc-i4xN&QM6aWHg>4Q|~)<+DOI>!k; z6B8#(OByyd3QbvYr^$l4ZvdCg&CLaj8+8l~RS+v9Wf@K_4*bg|E|$U!FY&PKkE} z%a_e?hy3?zIWJ#^1|g70@?dLeVS!*t)YnV$^E*OmfC3H%4Jn>k(&Zv2CqW>90+gm# zytB3S*lGrgw6J2y9C-Pb1d`3Maws|Mm}rv)_q383eI zM{oSehiP;tP<{H6_<4Bj07mAZrIMdLtAvEtM|WKUaUbyld^=P0M-b#Z!s1JG01E!s z=USkwfmX4%GI*Z#bh|}ADgL??G}@s#7)ZT>M@a#en1&ak3gt zqvPYd|9!Z6xqNT=?|>sHo3r*H*T}sjaxn*6u+?YtG13i^V|j_pls8GzH4D@h?ZW8j zXin!zwa14)&)zH4L6e-f3FS`g0!*MfBfM>`7t7E%yLCSyBZFcmk0v=exzx0c)$axn zh=TDUOIzF9=NTTlMD89~9IXigH6s=r92hXWh%tsN3AsRNBPeZCO%CkPk?A~{#Y%v9 z`OLj15+!tY_!9#2Z6+RW4KJ+mk#IXc3RKqG^v$|{ z5xipTT$X9$)2A2YIsiMr8Bza99InB1Lshj4l+^YP-XAwdn!KG;f|vKMBOaLq0{XI> zxw5>ZQBKE#)-AAWo(1Clre^ZfCmsn22^pDGahg8f%V1nC8*aa)L(PxZ*bjfND=J)s zPKZHVyv{)hMyH5~hMTzGeb1mx82?h4-favuok&BW5x~IAPWpj6Ue{5S^9+;)sno*$H(Wfl$9ds$_}Hc z5@lAq0!0TV9+{b$jU)gUR`#tN90+S)ao(e>q=+tUJz>@fq}9&848N(_<+J)^X%P{f zP*dQ3^ZRYgl9H#Cx0Ftt%rRY%u}Ce@t1fJ3)LR#qLY);>2sv7 z{ucA`^FvGhd${iolPKoq!hk!j9UqFw%TL2YcXxMx_;48(?b@}O?rz6+EK#4Ms%Du< zi;=Ohh_LYQfFNjqpmI4nI8YOZ0xW_%gV>HkDF_P-fBN(ZqL)i8$z^-F9||>il3+mr z`gU!Ygl2xtcT4divc9Pa(#a11E70uV&2MMJ@UC871Ofy#3FKceo&CW)2xH5GH36D> znVOi2l2TYiL{&uv_y{eZ{szHkXsH1>1R^`AknrJVq+(y^=c#FFxsV78;bbjthv{Zt zurHvD+SW<2#!C2ZS%8iN?CH^UGi&SfGYHaRHH7camc8#keq5p?O6{(#tXv%_n*rEN zw4<=$2#JmNSyWO|g7$Ax?=lOT#hKn%7+M*H;s~G`$_9DZ-2A*~z;73uhbNh!(l=IB zxr54K7V@8Q4HXd9`{9}1bUEHDSHPGgp`fs?(C4GI2bUQ(1D)CT;lomtVC!#CgfpoQ zK>WvW0ILRf0SYg?0Mz|2KNins%a!Z%!Aiy`Q85y}hzIoVkepuVRdscBd3hXu z_WH^y=y}?O^+xL;$ACj^1-?09n>^U)Y2RR!o|tHDYrC?v zbSvbt1vfY_<`tt5X%I2nJz>ZJSY8;4nHU?Z7;{+8`?jzEdV7ae6$>-79?bK2tPH$( zL4E7i2Z;aik-OZ~tg=3FNMYN7oJdbOn!@2#(4m?d8bYzyKG3mHAiP3ez)rxe=aYny zG3n{+>+9&en{@#o;=b_}2ny)HFhT@Eevw}B&&h`DKdQ~1YYBi)*Ap_+C%U@UrqmNm zX3FUuo@4DCpl5*8CL%5_aOKKX4--;b=t59TUcY|r(S9&LZ^5lc&MbKXugn1xiy%w9 zJ<$T9mVT90OlDO`-IuHW8uB?@AsUBL!wwiu#nMkb8@4lxfw^nW}+L+g6*5A zbeyP#l}=4g25mE3;~1wE0s=tm;Z_%Tmr%_>`0Jm&_4MukGoHts4l`5#Z-aNUNt9&M zK$XGwhT?HOpZsb)#K5gvx9DhT$45tfe0@FPCl!%5c5`uZD(7lLbxOLu?l}eMxM;uN z@87>L$Of(>_~06dm9Z+oB%gYF!!|pCn44Ky={-08k7kaIkGFpZH~I>U-#|6SA^|ZZ zWf$KdfUeuOBjV#}(vWAw&bl=OK;;Ca2!e8CY-|~qCD7;{F2~Z#m6es+ckl8Bc8!mZ z_x5U*>dn-|@wJo_A{i*p-`W>FQdM`Kw*% zpSr5>Z4FRIWK2xpNE5EG7Sv+9IzKUv?3;>;LZnq-RQVKYans`-*zyKpg%(S zgFH13bg6mpfn345)=kLVJNFzZ{hQ^_r&_GKVdk!O-ApwiMcRMY0iF`{wyvT4JCX#^ z2?{w3I1z>xmO_O{MF4No2iE~*GH&#EWWWt!0%Rr;QhAw_9n1I(ZqDD|A4UU*`WR3! za>9Jo{2k1$k)~a+PzHDnLrld4w|h}riHt-EF9vE1U>X=cJicRScrfAIb^)EIkp@$X zNc2a7@H*Y1iZFn=GAE~XXcDl5*AA9B(BX>gV>R}PnIgaK9zA+Q=aD~9kAL?Wa;R~5@H zfQyMc0}PYOSQ0EWg{3_jK>9h}6-_R)k26Iqg<^`P%JqunfS;NM|BcVc=moo^)=>s( z4!jPD27%vmW#AI}BXo2dnIw?!00AP=Fxv$D@Y3^6AXt8L9b^qgK#d_*A=7^>E-E3U zz{0bz;4G}66Q#tPxF&PO-b{LY1stWo$u>K2ZW@hGdkfG50$iwcgY(lZr z;-hV00g>I#6rA5b$AL{L{C2o{1Y0f6hI2F0w6F6 z9%3pFH}`Ka@LyS`U%$Aqw+9&;1S1Rx6zG!>0hh4REHeKykoOXB1>9O`VloJoMuZuf z0U#!OVC0l$IP_**10cbPvH+$9!O`2*HPPtFiy{bT1U;g`-5TZ`>gx9XBY`|qzzpfK zva^X$Y0{pILS}F7_oC1{P_!A;G-NRCSm@;-nX7YBHZ(Q{fg{33cc1T1_{LMuY z2w!$gOgGfsX}EnzD4H0XuP1d;Odagy)1eZGd$(-sCNavH&me*OAJfn? zF#H7=s_Te>X3-b{a9G-VT@A4UFqdEc7#vuEL{$U`R8R{itE@EvNWQml`Q75TQ&~~b z|2NNuw;1L{Bl-6n9UY;aG2pHU2;eS>`qkFd=<>uj{=h-u1t?C0Vr69olS-^pwwmd6 zwG+M0jFaTms~1dGO&Ez#w6wJBPZ71RCY&VM+0hq~NVK%a^2K-+d(T#mp}6H5raVW8 zoR^4&b4bJ)kjzN&%Tw%LklC1|XV0ENNUqY5Gl@IA^55SXG4*_K7T6Lu13+8^(W}Fi zp+c&R-Y`+9rZ7R;`2K=}-sUxax&IL{n=uY$%u*Aq{Q*9mZt@mz)D zZPlF2Qf@i$SxzaC0XI(h>=rM9Wt@K>$o#pMsVpwd^b);d)D6I2L2RjxmQ3Q#IZ-ChYGtY1fU^`-rWOyO1x3>s2w?4D2gGVgghG5-o zxH?ZY!XymvqZ?*Rb933lb__Toa&j)LkH5cyU#>P${v975NxV?o@sSMb-Q=V!5045b zB^2T7#*1NWK_7)agEnE4`*R>yHTcgD2^#Qp;EV zmmancb#z!kQzUe%L`WByWBo=8B0Vu3N>H#22DRX@K!k~v9UYknX_ehDPYYW#6hJ6k zv$swycV9O%`wDU!be8ih&Y@`3yCzVqP*`A*Kc|`quiB8JLI>?H&?#MJ#03O_<7pbS ze<^z~N&t)w5Cb;e4k&NOUu0bTweNJi2IC2FuWfQvs6_}GK56|kL=C#m7kmye0g$NWOzLoGHll%&^Zx-Cz{;bO! z@XonN0-Yhn=j7(zu#9hN`crr}A3Ch!oN9{={Qw`ZV;C(Z#j7O=os9ehdbuggs*HPI zKtl~fJ?{i|0E4@Vh|QfH;~G1fZK5wxFzizZ9PB$F3h2>V@)Hvia0mjPQe0U00O|?| zP)ae3XH{z*#=yAS|FtwryajD1LNNav*2zDsNJ6y-_eViUv)w?2$uLvveR1%~%%8_eMHGpv&e7-F-6G>964 zuL_;?tFd3ktzaP+H?T*;M@madE-J~wFf0a>sa+Uwux+dR;SY>RjgEIdzl`aGy}@)6 zL~_s^1P&Wv-cjF`1mI=JwuIBLK^(+hy^Y6dbJ-xlX!#$FZUn`w@^zh4g zyCn=8f+P*IIEIFdbadW3tGYh-c;egd1OVQEkpq|u&eJYj4ni^Rf|^+T0`w8UpZ4iL ziaK6A40^5>lRXO!Tfwivi2UrCmiRCmnEat%{W&!`$x|wGtKd!%4GhrZ0U7>q8zi~7>wL7(u^K`FpF(Tj~4>+^f<<0vi@m*1TK^L0X-KS zxJlqMZGDNMA3x&@QyIXb^@@jKp8faNf}@9w6&E;^{zVzRT_VVi%g;avSew4_R%&j2 zU89$wJ`ymBFln}S;VX0<&`YT8+&nyR62cc$lqtsQ?ht=*eZY`kx8Ynbub`myQ53Gh z$jQ%7+PI?V>Lr_3Wpre~=S>BZF!bl_;QZP}x1)Ny$=AY@A*oiWtmC@+OPAZIy z?N$D01WN{`ikD$cXq*|c@2;f79Z#wlwio+$jRVMFxY}MS9DPV^$r$xME4IN*_G@GarpBfp?fuZ}?%hGa zQD6YCLk9&`=)r8j?Q60ECZ5*CSOyVXtrwGgKz&362z{Lpm^S)l{>{}Vuya7^%Yg6d z@cdoL&j>nLxeOPACs0ckwuC`CT~C%9o>#72%UCvYF(rEa5rmxdVi)5tQApU&WvSDf zS6@;X33vlSgivmTUI3ldfKLz3%fOKOVow^0W<0den}`do@P`5Z1RJmgso#cS8vVA3uCpsbb?Eu7&2+zDX?ft(xvsnLe)(#yg| z=i8i=Q;9GHaOhpEh{(*&T&^#Jx06Gz~EFr@*T_c9@27rez~ zF8H}bjd$fU%CxvIsseQ&tlpyE`uaKmw#g>%GNtVPKlR`?@OI;_U(_ld2EIZta7+8k zPyp1SfPXolx`@4gle&PuWKa~ub`UfG8KuHUq<$yQ#J%Ra#&BJA3Jz$2uQpZpDm>&A z_Pu#arP`qVr2t?Qn{yxjS^MxFs3P?+y`-HP{yGw@{9v2-^8{|EAgZQn)iO6MqYrw{ z$B!Rjkm&o?LKmolfaSs70;obTh{2RwH9N(jBUkX`(TDu9cS35mwpUQ|fUc?;F84Jt zuRVEE`5nM#-@SVmW?8{r0jL-UtR19xapIndEO zm?JK!CIE=9a~K;KAD5Pu4S?yZjDRiZ3>bR-wCSL?v2WkKJEPGJmI*rMCd{_Ou)2*6 zH)+XkBjD=sTva-lqa|Y!_xLfs12iu-GV(ETw2C#Umxl+I4+cu`&j6XexlK{Sh>Zi< z?eFLJg#PL&o@N-2u7yE4pz_xO4_+YA_a_C7>a}=4!%iRo>umufGju?BLEGULNi#u` z1jT9PlZE9KfaVsz_?bu5vcVi z1H-+bVt&jtQKv+!)0XmtXpg*od-8QX2-zWjmf+YJ96JLI8#%r?DC`Id6P(s7DRBVc ziF~%&&o7swZL6eo4wXZgxzijp;jaGt=%voigom5k<6UPMtg?p5Gn8Vf9^?$QUYX<< z#av+aeXosWfLq2Wi3A<~Cd?-uus-DxI9c%I#`E`)52AI>3RuII!^uY!7NqI8xH!nZ zw{ISoy@0c|l$7RTy_>Y{y}a_LtZsJI%D@5G7K~8bFbNORWRp^J}X2-xJ__^e`2frCkVf zOQh0o=Qg=CUjBYhXL>!v5B(8fE3ce|vgRKPP=Y;HwDofV=Kr6*t^}Iu^<9@_OeFIV zN=RXwBPp9qQIbmLjbw4=KWJ>OjeRn!Z3GC;2`R={>jJ8$$^4MyUu!Mw#co=iF0y80%F?a_1P%3m$_3Zi; zk{zS%*8MAvDz>z&>R%6Z3#yB{V-b9xx%yItu`OVql;cW$3z4$O0+{8>lHPv_hIZ#F zLKjLZyDmqLH#<-}iWH808e7^JA+9^M$w&icXJr2ZBy_nD9sLZw;Ct=H;V+=sTm);u%$TiE$J|B32)!NJ@66t2E(xEGU_S? z$K?5}-GGK3f-oauEpv;5=bElEB{u{j-s%4w2FXmTdba~%J4tw1+z5<^w#T%@n~GsS z1ZIu))!l|m^{uGu5kvw+o`?OoD=aCwg{8waQq#`I&ChS(&LI`rzTYTrqjqJxk>*36 zy^6uSMTUm%)Scl=udA!etlN7wUY-afsHs^Wzs(8lW^~3BaUg+p7BfEJejE~b0p^3Q%rF1!`VmuwSv+%`bqxGEJ+z|!| z3gC26E?YJHHOG5Ohr?HLd#2&)MSU|2kBns1ZCDZ8n1)UM_F#oqI~~)x5Hn$kILFGy z2F(6*uzj!L-hLAXZU$x(x3N`LD=DFhE|zO{scGyfT$M|uLKc3H zf5>z%($m?cU{RET}Y&Tx(Hb;csZ0L8c-M#FU!{2ADaM z1CfPk@i-@kRc!M%9UTLTQj%%LLex|O1a=^IS)tfWe!I%p3%8&*?%1)z&25<2Kkp1d zNWi4zhEpqcDtyiAfQya+>(YDY%$`ZNl0h|qzc7Xf877AKFhy-rNXZT< zF_T@pctq7?)zu%gsp>-K@ANL88W-f{dc5V(p+iEkx!-(|So6G3uJa?CB8_$Mj;5w& zl<@!{F-&VcJt==}n)%Y8_3^|HkPnbTeu3N69(x&M^~>m`l)b$@uwwB+?#37>6Y+jv z36srp;XskR2u@l=(Gs*c`?tzSz9{k<9EPN%K@{;!L-wB`n-mlh&2ypb!uj!+7j$l* zbVAQxJqWY7JFK8`|h#1jweqt+xCPd|JEX2^e1S0JQQ3lgr0VHzyGfGUja{4 z;eBYQG2oy>H6*yE+4{MdOvab$6_##eH#n#%18h8O{JJ>DN?p0xiv;j#lB)qrd*dCg zv&06EC_Vihyc`3ve`GqGnLa+FD)@-gQ7slv7|_09!l zWdgVQ*OI@V(f9r211f6U)5;Pc=6Dg+As+TfZBvaJD2XV?-6Ka6&^)nQ(|28;XjAccfyBN@e*GF&As{4D=5-*@~D7pa;$#-eU} z;5ej0whPZ7Az1t=?b~|KQ7D6u3IWfMYnrSeB0yT#&w}3wy2?m{nMIkri)&z?T?pXl*F9i5U%->VGS^q;!mrXJ!fI`;Xp#o(6aR-A2{R|e~CA1|V#Rr;F zFO$Bm$7DrKJG~d)+MCy*_E@UoOM{js>75!>s^~)4%F1{8T|f z-wG@A;ATZP;fz%WiK|gl;KNpQ?c790E^9QS)e-4&F8Te?#2kb18{zt9@@C>3z%HV4 z_8T@M6rk$#%XjW<_6;PtH^a<<;U7m^QViCjrGT%z{szMZMjp);t^U_IeYlw^nKp`W z^JaM^Na(P5fnrjx?L=Ulm?HwYTTy`P_CckM5O@rRZOAdv(I!-?##!O@9q_g_{Jjoz zt-gxNrQ1J1c07E$^r-57c{!o0EE0O-ZSrnTD0nKNKgaS=znH#6LK};?t{cOIIOm3> zk$8p&en4r{CQ%5FQ$bHhzSg0Y>A$ox!4nT}`sY(aOc;CE`WJt-unAg7Zb-%Q$>uPN z$y8)>j+ReDA7J+PVswEEydB++=K!?(*`9(fg*xPaW zQ!86x`a>nXC*-@Sl)_-6nU&wak1}P=bIEai5(=}0_93G0Jp55e#i{6#Gr}Ua#Quke zt@=1IiwMhD=(zJcddzh3@de+=R?_uXPIKwl2!_MUG5m|>fyrAOdk%xO1CNT)4t!)* z7TmJDBJ@p4N>NrluU{*J4*|{3_aSh4K=NR;X9{Bwhub1enUNz5fBsvb3+#-p{wQ_E&te*zJU)EtVFupAz0XMvc^y245)?tkAZ=KH?@52 zP)rBc;w}hh*;l+|VR6}N(Xch&x=BHMVPzZXYNrb=x zD*aFl*(<`y>C9J!j;^~*5ysyF$knSDFsG8EvUf_0Y^P}yo2Mfeb>LA7()12fJafOa z8ayAu>|1{B7wu;;V?zk~z5HHi8c`-QxOt9>$f>AcNY@b%+_gw@Kmg;&sSjyH2>@P?{O-=m8p8e1v0 z#Vw7ls(P$2TItB1cTT`;kTb4eF?-j6^%CT-XYZ*weX`WHg%oF=OAM2OVh8RSa8qs7 zhGg@>0!tzdygy3b5-ntu=O)az zf+NG=@jGM91~HdX+gd6l`x}%LB3K)4M>aJwVwI7_7nhZ3;I>jx;T5YeF*fEWfByWr z3khV{`wt!JMpptAvlDm9`bY%f4zuHUUbzZ}-0X?Q(Hc%%gcAeo!xT0gw<2s0Y| zUVuX|T#!X|s6753E<-x<6+AiYxnrSJ%|#G{o)K7Hz%G9qRz&OU8y zbOej0ufMMMCAtS#6~ygU&QA6B5i>bh;>9srXa=!5ax+*7=c+fbLgBjRCwsg~TPeI% z>pDDWG@;mC96CsB`3>ou`~+`8-jG0*Fv6Q1qh5Z5@kr=* z9&hjwwKtp9jVq-jBvxH>3l_n!5@eqCzfuI&wK~3w11APU!tnv4dU~mMIZC>{%hM{z zC2sM^6I_2qKhNo@kp@w49Y#^%$U+eUQz~9|vhKvkivu&~m=m#lCs1gJ3}EdR893K% z-p2Yp+Y;=O8FJk=_82#7IHEK8m5)JAB2T&m(yzd#8r&kfruI`Upf~2eer>lG4&O(P zaXLAYmS`%=H!1~(4g|dF<3yPBamQpf+gX>vubVJBC8|tmhYXVQrE(z{ZCF6Pt`YeQ zl=)m!a>QLPC|bDALEfMw)CPm@i4_I{{0e*uz?N8H6ZYa$Tbp$hxs|zX5;HOjd5KuL z=Uz{5?{Pe&%Hz(s7=nTQksdX*!=wEXP!)Gu65}~qz z8=UBTXP0oQzME)ssA_9>_hI&y4v&XtZ9!?SVx=mNdACL#Audt*Vku&zw1ftq*<@SOq-Z<}Q>FP*oZ@+{xyhD7(0FfOdUx zWe}dZ0b6#OikWYOOOj~^R1f&nk!Aon$0@|d&Q5USY5lN*V&*hs8wh2Hd7Lu1uonRN z5om0MD+5GyG~We#MMm6z9ih%!J$C9i1!u$@DA% zj_Ck#3z^=nn`1Zoy+-^#cw7=rRS9=+1V`~(S>#b|!yDFLbYni=p2bfQcK?Rk2RT>L4xKMy)?LI z<@iBtd>o}r-GjN-$X3+X??l)RqeVS~XJBA~-iwKF{NtN4U;k5lGB7Zz@C{&*;d^fB z`i)Bmz9E*xz1~s8C57)Ftlh%5X`*%C*6s5At*7`?}Pcvl5fT!z->0Lmfv6#N4d>8(CrO zue99Z1V0L|X!J%rK-iMNChEj#A6gUnT4J?HrE!(AW&?o`=DYCpmS>-Z)JtaHI+`?( zN`T!6p=|^!c@tJ%c;U~%-1X19?L0E>*k^#wp^IPG^**1M+2eg}qdwsW7E*Sw{i39G zo#*97elw`!$O^-RsB^4`lwS!rfjDC#F?EI%{1!hAmsfVHXGX(TB$)WnRFJn-S`#t$ z=_O5Qc^6Ng=Ore>p)rV0NN_?kk9!W6c_>79v32jEBy-5%EdcCu3vC(gDb04eKHU8g ztQAfpzHGzm3r#?!cRz390> zbW_JKuLW+(|CI4dS+DdMtZFv{vT0}ZrRdQYFI*UVSGRHb#pjX=g5dl_*QO z2Sl~!Fv~u`@x>jpmTbOzH%hzH6-Q!%<{!S{0BsQXTf;_Y?{%$7WXfQS1N*mt+9q$2 zvP`hLEbAwJCd^z)e=NO!{1;H5W{Ce-=_g68d4Q1^%X*(bztHG9rk_BcC?|%5`R<nCxQ8sA3E8mL1|zWVUI}Iv~M4PxT43-Oe>qk zQOq+t3__JN<8U*o@0cPpf{9e5Q(>ofuUuo*pbIi1O#NdnYF5JoMnrvhE>~X11sj8VVY6>Bf+B{2ncVJeH6N+kJ|A1X2dm^ zZ;CZ4Q6};Pqjo9?{00=9DKV98E51VylS8MP@$DYc&2$fOnovAOX-u&+soz2nHxuZl z(=Qmnr~0e&GNIT`aB!Gd*!%kCD-XhV2B8E$ez&EirnWY`K+_-;G6UkSTxrMuBl;`c zfMColQ%ITEwahGG?|6`Eh4r$SyV`cmT%k)zK$m z1c5&S^#PdWlDM`qruBjn|4=7qLQ_{ixmBX}Qp}%T&50)^1hc5M_0YrwVWjQt3czoLC`@&P4s@D^QyJ5b^72M7M(955-Gy0K4IUCxy#Sp0Z<^cU{y~Cg+K<6 zD(GGX@nPriXSmqW5a;FP89Mjk>WsigEG44f;3s=V@Sp~c-bB2k59^$rmF3d(2}>fb zYod7qsB6>y+el=;kj^Cin5NabKj$}$YG9q9Pc-4G0YuK%)-ZFkwzb`gp2 zaGtEXd7?@=_x5dCNJuoD-lksxyv*D-hcObZ1r$_3f27b>(1SGPlT3kKagux!Yj@{b zqcO%B%5U7*>6g=6$&yW(?ZUWioVUA0MHYWTltEyRhVokihrvCekHwO7NH$3zWJL=C zdvp%_FWwkM$8h0Y_Bb(z6?+vxsHkW9j`DwEStZ6>2LNS=WR@nFgg^iE-{z)wFRd{9 zb-FJiaID-A)|A- \ No newline at end of file diff --git a/doc/source/data/images/datastream-arch.svg b/doc/source/data/images/datastream-arch.svg new file mode 100644 index 000000000000..757f93d1777c --- /dev/null +++ b/doc/source/data/images/datastream-arch.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/doc/source/data/images/datastream-loading-1.png b/doc/source/data/images/datastream-loading-1.png new file mode 100644 index 0000000000000000000000000000000000000000..9673789bc3fee7a7e18d8eab191c9315dc330857 GIT binary patch literal 126100 zcma%iWn5I>^S=esAR-;2NOw2V5=$?g(jZHBsdP)jQc_DwcXy|BcXv1ZF6!s|;Q#2) zgXOh%?>YCJnK|#7dC%McIT>+OBz&YNPoAJkd=gQ3^5n(NlPAyKA;1Gy;`5}R0smgu z3P~s-0Do=>2L8ZzJUdZUJ4H()J4Zbm!zacNOAA9fTYVctLx`=3rQN~vM*b&H-ae5K z5ma(Y+?|_yVzHO^c?ZpAXH0}(4Em9Mis^P?*`bPTbP0)onFeAAA&`3i{a9|v z!I^vV-#6fTTb#!*>fhfA=|6msdACePE};*6eE0x*9l`p+e0_-RIow+N4UDSwr)-pW zBHLukkTkOq8aVz=!xHyp@b{+@(8MXD&c4s4*gc3d_;AACCn@;!HF9&qSx&NZ*=-+| z$&7GMCCEEhSz{pj1o5vg2E%yktn3QzdZErJ{UY+(UtTv29lR!Qj_=e|K`l{66O}jK z*NO5*xijv(J++t;gO{q5ofTjpToRoJZjr|-F(TGtGuqdQAV=}yx5lh@0KpZ95uhx{ zflv{a7emIB65A5(`xZ<dWQMxY9shdi^&jGaK?o88pys_OOmEx^=XV#jHo56a>wcH8VRm7ETn2yyKAt zaZUQ#9vcghFp+s+Z`JzB zy-_Mr%v=5|$+cVExnoo!b&Xk_)dD4Awd3Iji%N+&?=1cSA&>YA9D(NXq<9MaUwTOg zXbnw4nh`8AX#2EoeUjGC6)TOj8^>Ilv@fVUc2jDS^5a(`qZfXZBAILl>6p)1bxqWI z62FWvPNXb|F0d&*e_El(vg3V{-PqZBUFj6=+A-fuLE|Y-SLvM^&r;~A`Ve!y`1LKK z%$&>zmQL|rPd|%E%E0rMGrr2!30KdKpxJJTscuN$oKd>XYpF_O?Z%O*RnwhU+Pg_L zM2aj|25THq%wRi1F;F|{#d+h;m2!m0%~1=wb(N~pOT-FDimKuTS_7{NsJ`(MvA5k` z+QI>3!A?@NFvDL8voT7RRrZt`@9WRck>^~@n|sj3LK(=n zuja{Dfh1}w?ooWHBuP3F(8+JmX?-60lsffWhpX zkYoFT!8xvC7@A8hFJ!hgWLB{<{pVr#wq;x|eP4c#fr4WH(o#GdTWrz>j+CdX4XB!~ zVd@4M;^weun|ID{zP3X#KI(A8G_%)f+z@Ipw+=i>&Z>vSoZ8>5VeTQgV1bl79TtOZ zKC6AX*#S_LLp#+?EliE`~$ zd_pm6VrK->%9&lfp0%L8^M-CiPO7XQ`X-8Vv)xLL338|4Q1;sOlgSVlNIXEcN}fwM zzR$7)#pdaM=HSESwYjXphla;rOTDKWFLQ*#U{xvwJ{51OB}?~|B@;CmY<-a6$q z8s=1WTx#yNzb;7{AWz}H<8yE9RO5StZ6jsS&sz~5?|fI^%8*6cZ#piiQmtMOhh+>e z-akz0Id#u*?Gbhv{8|Py9M?;i=ya#NL;5(r^{3~y1L6B#;DAY0G8vDKQf}HhJ}~Us zR~`*=R@c@azA|6S`^a4jbK!BTxpRg0IOvPK{SMApjgknN8tiZFsC27+xO&vE)}rJY zd2SkD#RW6VK}iC?iP;bb5j4EI^oxsJ#6Wit1;s3lzQP~HHj`UHY|${pi_iwb6uA_6 z#htYs&765MS>P$Yez%4jZfu!(++&JnW~FCe2P+;z^cXqqdDK`2&5n>@5Ir-7@zJJ( zqqF&}-3teV0R+=Wd@$QWYU+D(`Loe|A4)xK7??GtbVty@cZqos49#7OsZy`^Nuy*j z&)s9c`n-W|H#;6L02Q%r`mDFsMjXSl*9m|qSYC}fAF%RCmQVoe+#E0A68c{nkB>(8 zfC`i#5y^6jAxq7trKB>lg;a_xIR+cDbsnWxd?^RFOMTEDH8?=xOm^l*jO>TCZuJjT zFYD)6%(w5_bxIVbpBPPydK_#yJ=76XWlbneF^IC91)*`d;5i;x~?0BPHV1 z`C-sem=KGZq+tBHcb$5DyO75lDMvYDRvLddc)_qlaFblfYgg)qJ+l1Qj}Fu+$lJO} z&(jY$@WZ%-%cAswX=!=f(6kUgb*i5~KNH}c-^_@ZdULIi4TtfHEq$d7_%y)Tjr)Qx z*@1Wox76=B5t;+y@>jBu!C7TnyDsGo(sRVdRmVDPdHm$+q_;;?dk|)X8WvYHhdFkS z)mK@@2Reh6CuMgSoqYp@4kNh4!G3s{DO1SXCRKg~1Qh3JTQB+2Y|FYs#J)HZui(bV zQ#AYC)!UMsRLtn+-63pvKPg@t4~SUo+Y>SE^6*SxL^U3d!U@IoZMEg|G#(pm)(x|& z@ysGQ>)8C!f8NbDcBU@UJ!NsMAC|OF&ezCS3gd>n7C86ky1CkMu}H2US&EB~X9JTE zV~~?>#k+D8P=O|jjb5NjK!+{{H56N|LGfy3cu%*Q$uM|4L43T$uxZGhW+$)oGe9se z0t8t4Y+9ExCZ-6SQnnkVK}1DU9#)c|@XKSrrB4=CHKpbB5qu*4x&HiVy3$^~q-E+|9EKV*W^6VH>g8$r@Mc zwZKR&g`Ho^9({H+0-<^}ON}bUW<_2ssj=C!PLfPu8Ov76k?cpB8q3~(*}U4bbtiU> zgqpd6PVrJuf9VlDBo?j|2o&T5hbK9^>)WwTHL~~;XEFJC5n(<00g$ijV17=w9ME0Xv{w?96H#u% z;;Q)6POQIM^Gnvn&dVH*>8C#lm2yVef7cQ%v!W_G>`l^ls$Uqle#cHPW1c#xNs?kGhhSBwugRDsC%FI{nt>}c_=iY zFNf3hLA@1zpKINN%`Vc+;l2%u^e$z6aZ|H!#TpbX%ey`Zo5CF&ab_0%X6B&BOnAL! zP7NB_s!MRr>r!9C58c<@W``N+ZDGeGH#0EJUn61sEvEx88scmg;9t3)e3t31mJUioxQ%>LaXC05*h*JKKOSv z1LS!JTx74muSnM7ra5(YP>Ud6sir`0NZYUvh%|9aO8c2ZO_ND8oBZI9j_uj)vepq` zyV8LA4*;v4P*F)>mQJ~Kt*kqKDJNDSAso7>89fgAq68^YQm2sqE>T$X!=$9eD1v-s z{>e6CeZils!voU7sk@2ZF+zX&9Nr@=bs_!0aTv=hI6hN1T9wM;@IvLm$K1A(p{Bb~ zWc@GJUefH`XrZ~*@>=jMo8BRfoJ#a3wJ8Kr z63KWZYlXK5PM4`cIKcqw!fU6bE@bgfQdr^)YN}l@NB#>#3K!L6YdQ}7_s-w%lX&!V z+@vre`DW(^8ggPKGDq(cu6+qf1Xpd6BR}dxQxcjR$!2TNPiUZ+#+k9HhI?`p;z>0A)av7m`o_}w<$VzmVhJtFei_UD_bIHxhF7Ne5^)%qx)SH2Kr zcco7DsDvYy*BwuR1kPNklbty~ibD<@9id37zs9f@EAnT?%2=Vqj<)b164w{SVK_CD zF7OPemv%djSV_zi=9Sg_7yMj#RyiK5(U!h{b}A4ag(YCKM=`TM)LDU=xnzG}&zGhnNhk9f3^ROUU}HP2=E;8+ixWIS~YM90qdX?*G5;E*Cb zA|eA(b_RWFhc9!doS{bI&nyRJl!$mve8fa^>1_G0+xXR3It4WE25((fQUJ@*w0r#& zap#YX&Bw8VuFe;2Y3Im|*Gz-aOls+7tv*GG;LP#<*`A|16xJruMu}@Py0DR5DL!bS zSR^&8ehHV7e;b1y>uJA~PGW$Y|Ck83nFC1Du&~xV%;8pjL9&_Kp$U04Q}*b(LG(2N zf7it;DnG>LRi`KfR+65}n6ETJ+jjGGilszDjQl!-Oai!LWaR$^4ydNkp)+n?v)L8L zuzO1f?u59S?vjNi^}TaZ4%t7c`K|x6)!k<>$PzBzr__~-D$h%{R^5;D)FtXQk{x~8 zpKc74_5bW+0^`H0g?-Ns&-x3`!@FUv?_KFpIgGJ7b(=86KQ}1G)rhiHHvMWx&soDk z#7ya5zi_L#dx_uLifkVH&$^u8fP)-yXc_iE0|a5=E}rO$EY%9N-EJQ7`SG=zm7E5%BAxsMVO3|0erSJ+g43^Kx35Ln!EkB3d2sl6|0lt?F6 zqU`GsU9)qa!nfpql&T#HGvhRZ2M=@N@9J%_Lv!`F&UNbPFvtIFwJzh=`1|{^tA}Gi z4t5~pdYdGgv4lyN^|XY4!-XR{%I3(SY3bC|y*?q0E(HHYauPt|Z4RHCR;*n;pkR$U zV?|%1_GeGas9V<-Wy!v<%|1h%`&`Au|8jE72v#w$5|T^+!HHw`o0u*&XC-hgAoHWr zH8}b?!32j!=lvU#+|bkn6R5A@q`@9-Xi~J^HUp{5XWoUiqIz+R>nD_fl3Y5eKj`!l zEN{ZFs7#>|&jQneH*zqA`~w)#HeKkHL`4@=5m-NRbrEp)k ze{Z8OU#nJjMF)+^V{tXVUd~#*7On_muYNe4v0V_KK%pJ!7(yv=i22+gR4$jESh%Q) zX9tq;8Nlp(btfNdN$e?MISTi2f)!FwKlrBKDX+EWEfQ{xyQZfImrgtL7cB66uMbA2 zbGW#>)kiG9S)R*4&VVbcD;2k@QySXVt8O8WD{st4IdNurd)M!ipEdJadnN-k&?>sKOO-_F#FM5WRB9jm z-mx)c$XuQTMg7)vJ^4k&eu-^ad*ki4XR3N)r&50qBpG8F^Y~rtpurWX#|66F9PSO=Kq%FtDc4DQ)X&SmmXZlKHFliWi9fT1q9 zBMYPYrvSWVvd_5ZQ+pOnC@;xO-G)J!%Spy2@D*!6%1c@N(!{O3ahy=g~xM?j$2#c zX^t-m?a@ri!mRFwF9TT(EAOLxj^A;pK+<=Z6YsQ`j(rJP9Zl`FU&vbx_rDUV6UF)& zm9r>L?_Ss{XzBOz%re_xqneH3-%fSfOPxdRtF9JILqY*NC!}#4?#C38HrS;#M^?E! zOY&@D-i@zBO#hL7&i`;%>S<{EFwFXe#96q;3(dV!mP$p4OHZ`%=&)<*t^fmDP&GGq*&ChojuWm7Bpj zdZLR4-aBVWiIJ(!Bi>G{bAB2Dvjqcm1ABE+dLkgYHq51sQJ%~fSiN7Gh+<)bjLv(m z(6mba<j0*_AFt zUU@P>qRI6)EwT&#;FK8YX&ha+h z+aNbB*is{or}&p}rL6H}^O^4mZzsD2b}!B5bzT8?65!4NGwav(nlF;V2;G=1LkZG^ zeJWQ?k4R~C>L>R71@YQ15>5=xLS(e_a)W&=mrHQI5!>+JFJeK(v}k>oLZ#57Vl?lu zh_;CWEvrGjhJ=wxe8mETt@CL!HGL%YAjfX^R{6hKKOne6>$z*iWw%gWJ6%5c$#==z zQMp%&cl1QqA1fh^;mFiQlBIHND_D{Y5wjwuKk+#ls`}i}=H?uppCqnW9={j1>_6~R zv~~fi9W#hUxziROcHp8h5-`UgmWk=6E<)C8!H-Y^(>!B(+?ay0pRJ6{X#Tr!(tT*dJ=eY0W z(K&Emi4Qy7)Px#xuO>c2UK1V1DPYvmc3ZPDsVweqO>(_VMgKU^U0Uq4*H#4CSwswT zIFR)!wP|P>%OvP*sbK{j38(aXiSKean4III%sus3lo93^m1ndI3QkIm%PW#oTfRO< zEpb2tjY#}{F+p;4ckakLCeJmfya+NwPo#IH&R(V{ByeO|zwt zQziI+y3t!o5#StsOeEvV+FfzMK-=YowD4u@x0M!n7IuM=YXC&-7tyDPYy)mRq_31 zHLR{Umu#&D>^RJ!vGYvpiR2X$SC-ZCUKqC@?_{+sv+2ZV`*@pur$X)tlH!TkrJ$Fd zMwJcyLqv(w@FPWWGy{?3OcODd3VqQO-%>iY-4pg&`*HSPi!Gfx=VGrHfsFCridl&1 z?VHN;HZXn59*^dxNAMTMa@NW2i9Ua0A0;kk`zr`z;`zC|qQLe);5m8^!1V%&yVQ)u z5-yr(mJpL-{iYEjsRtQIF@lV1T+z%1m1?CZb1u(zlDv{#M35Q`Jh|${ua~4OP_%BD+b;{Q(r+ z>vZ}gL0A0I=Blizp4n}wm-eR6-@9X1$HQ5(?zdyCD2h=>b!LO>g0Af1KoktP#s;@T zyaJ9iWoRd3J_rB9?>x?J!ow^}kHL}F{-dIiZhqc{iX%7MExW26LG#!J_o-G^I-7W2 z>5VnT&Q6>0MB5&8+Y725^sH%N%cjUX@!PP$2ZH?!j+F1;)A$_;zp;W4(trH&>a>Hm zPy#QFPv2lsoLNP*j`RQ0_G%{ccJsKeY2Rj!{_eJqV2x**D@LVZ$aPGmYUojYtx=-9 zA99~f^UoKB5uQ6ZS)H7F&gUE)bEF%vxBVAJ@X4 zyDpTEO??3KRmsSv~tR0rouE=?)qvpT?=L)^9SyMbt?y z7a(3VEG`K)ouuvm1svUa!PxsvKj$5kIo;x#V8b}9(hs@q__4oWI(I1q+>0^1emM8S}@?nffcs0-Y>o3FutC8?19JdLT1#vMtmdcEz z=g5Me`KI%i|9*DW*Uk6Z2Lne+Ff}5NDMbqwkt7Z_8dSN;{V#TwnXs@Pnn`8i6&bPX znW9!xgH!gG0*>r?LM_VdJaYYD8s)n{;qR2XjN7t)U(IsG2D)v{_t5fA2s-z(7?T$u z_8v*!W+;M%Y~r7J+U{_S_8^Z`uWMTR<5YQD10MNh(+OW)(}%$KD05A$v^_r5<;Tx> z&MW|oSPcU(_*9(FCh}@Dnd=J)`VO;0j7_Q}6^CoFPvi@|kLk8u9=49ZBdrLD>6K%g zE$`e5=oj9mPyzA@8haC1jhQlk^Ij152N@z@w-EP)JSmQDt75$*f-00*e(gmWhTcgv zW%HeUI?Z-=;DPPPnk6!5Gkrt*Y{k+E!Abh%6*3(?Y1GtsJiWilc{pLM>2n8oF3%&J z2*=fY`;$U+qn3q1k-XTLyEZ@1v>^XMM>XVb_qFemE+uD2;G*U z^uK@;(@UuXKiN|ITPhrx+~HsQSt*Qhv%33^tM+S-2F@HUXuI)ql>R(Y9z%%Mb5!7jA5JwubSr5TxtIe8;Jx0>zlPvQckLmfRo zY=J6dJNmbfNPs5@)nfM;;2|tK@0#WUav4@ycI?7bYU<~evK<5AgIL{M&a|yU@>sO3 zpPeBgte_l6v@q0kHaoBKD^#;gQRx-GcDC)L_xN+^rVLVWaWwrW&0}F|$r_?bwB!-? zHh{x&Am9No>9*;;%x5(98(Of64C;;&I5Up!s8^%WD77Tmf_p?AZzAz@6MMPDwv{ww ziFB;*S#EB9aX9Y3bc1(8V&|#f9Un^nXa2_EoT)CvNQ~H#Qm#H--oc5WlW?kR0MAs6 z>^;18@+zwuY>SB(3$`)#F^w@w=P4| zS%B{@P|`21Y58mze|3ymzRXKgZm$Fu21ju)TPhh3W$d9tQx9LScQM*ogksLip=Y6wj}WF(>0bWE*$2bZ-D7pwA>p#vFBOl{(u8w& z&4~j0F>9=Pp`gGDq`3B9=Jbk-XzRDVt1<{_Scr{Ob6<5b&FT3m7dCehfLe*PMzXQi zFE2Y5Z0xtV+cizynC-=$q-0P5hLER`HB~sWRlbQb4In|CE-y~^=NgO|+6c-1|Ez+ph76QNn=7V?EBe}UrIk)|mDvWj*6E5HSalJ-L)=Zji3 zt%(IrC%N5tQSI&Ken!zK_NWx;*dWk;jA*<3CVTxw53z$ZvA~o?#S#4G&B$ed$B$20 zo{3iO+$XsF={fi(Yoh$sSLQ%;_1&RS0t0>o?9@tW(B^ceDJpMzk-*9=zRTvInoVo1 zzs9#Nc~UqlMHlSW!s%2{P1UdcT;SIF6VJL!y1rf#$@L3)#*>Oc+?oIjBkde_AngDS z>~uQ3TItuc>^2GMj;;n}jQ`SATO@~owcp%$3=}j?NY(*(HG)hkpED~$4JUuVfQ|Q- zU0n}v0GI-VB=^~aUP-Pm05-o$vuz%t`#xwqi>+ z0WWZhIY#f5$kRLdi%DMFz_hP6w_2_2L2% zJ*Sq^#}T^{2P5V(8=tG=XSVI44%-QCC#zD|={JLX_hdz``}M7?v_N7NZi|$?O=``n z&A3!lF3zO!>?e#X)A8F+VjZm%i+-8M6_yB30jC;^8FN$3bM_AC;3{w}{ zTd4@|`RvY04aZ{XjcDLza2P7`xKfB@YLAV@+3ZXjqNU~qNm5vj-G(N?{H^agj(vcGa+ zx2M~Uw}$UmuHG#!;{$TZv5Q*i}fZHr%_kSi=mtONp07kUF0?kOfF2Yj`ik zvx2htRi11{ERy%E)I*NZw?kR# zi7V^VL;y9ESjecNuB$}EXm#UOd&QP;69zb&Cy~y3&wi7Ug2yC&a1!U<>aT1pUfy`{ zYmjYV!3NbL$X2-DWyaCSUQij%CL91s=5;!8jGkTs+LiRu?$t3C`CfQ~y-;00mT2>D zx-YGQPp>^<#}5%^ii%HTT^ZcP{+9m`I!;1P>(Gi%b6+q2G6rf;V}=k*w@;-LK&JSL zf(fXy;*~j+y6XhuV{1(PZE|hBP|i?o7u@^;0$g9jNEBOM9o<-fv{GpJ zUa%!1|H*OkQR6kO135E52E{w?+BDKiv0^pqs1~n7v4I7kzuq>vPq4&1l^p0D+RP2$ z5j;%7azlg%H@KUrdNL?xY-_P85CO*(oYWjh2uQ1#Sqa%T>f}u_F@OBI+?R8F-sziU z4fBJ@@=jz7xJXfn29HxNfN1+e#O5<-uk|e3mpJUVxf5P=ME#WUubeAwcVkw@R;)aF z9|rUato0@SSHGYJz5d>nwNs9u4}icW7eIVz>bQpiY!3j2dW-iM1$^`FA4wGrES8We zwt{8Luu!!|vS^&p>iBQ^weXYPuLrJxMQ_rHFDe>71463@9L3592_Y=e4CQN~A#e{k zdv&8r6%Vk4CRSeWqX2Cno6D)`**PsKq4T3bo}e@cD!bXU9xP#9+gif5j$p(+dlZud zrbAeTHbOtMOiW(ygmr$Ol;5Y&cvABy+7BHZBTT%gLqT{pSsC zm$6&coDzv`r(%2#+cP8cem5an_cjOUw|9OIB1|e`6RgW*sRs_Gb~~pjt};N4ATd(h z6EZ*v1`o3(ZG>6lUIEJTCkVF6M}a|X47cD<);^f6iAwjA`FD`+15DfC{sZDjky;AL zgiP(i>fpj1&y2Xnh`Rz7T(6C4c7EL`6P8fj93e}Pg%Pfc`+3a_(3q4aau!|j;cqHG z@9bBvMaw8buHu!OT;CP4U!GEW=>fQuD6i&VgqVH#!;?~~D;ptO= zw^}L+pvUfLN+{09PnZBZ^e~ZfkKF#{6`W7FKnBMy^RX`jN-X-y69&8PEFYX>c2W6k zENXy1tGYOy!P(!LL|b$7v1vF~zu`%KKe(K3?kP;z3D9$_^#$|%!1&|-$ih!}g&sS8 z0{LIeHsOH?xUuSphLGmwcbFEHQcm>xRE|KB_BhGe<$AXY@06=I%&bi!n8c;(Tac+L zV|*hdCUYC9UeGToozA6|-WrqT*q3+qE;;?26pzQNfS^sHZ-{7!n1yG1v5x4XFXkFJ6#tdd$Cj#bc`*Qs9|~~%#}TRG$fCU^I!hK8DM$_ zePmc@V)TGyNS=@>u@4bpMU?3{o)=)eidCJ<+v&V9EGeWai|m55r(s;YE$GIeSS$X& z&VM<{acE(!cTQj!m)OQ|AA3#u4H&3}`&T9v;L$3@R8_dW1}L?KS-1{iWsQ1xbjO3z znj*mu0%*`j=&_tkJva|oiF=EZU5UOWvo094f&iAEQmqTcsR9l!Ojt+D^HbJ>K>??L zErKav;F%QzcY6og2s>t(aMV84r5l_)FF|9f8g+pzu^2W5>+e`* z+t<8;i*6yTxjxPc82cP@=99O}UIFYFh!6_o~N9G7FRjcYR$Isol+C`{tWOr|_x4^STxkz0x00M(p;wu4MCt6tmc=6p2CPDBIz zf7RK8DYkM*C-K@BASW?Jo&w&~j(JeKTy@1OkK~p2Q=T&r{Qg%)q^(m8mYFYjEBDYLX<~-!V!Hu(y1 zfL5#1$zw7*?eivirki1nY>g36$TFEeQtVEQGzPL)ba9eW=fk*tpm^cZW*wZC9p@`C z-p! zllhBu>pOizN}Sx;`OB~6(;dhs?P{=oP1EEZno5Ae1ROU4#Zwr!S_p^!229U`|(UN+k~Gs;D?ND_~zk zcYtRlmY!=FqKjaKtyCPoKZsWGj{aLmJ1FA}IDC)8KnQeP`x>S3IhIfZZCIhvdYKi0 zY4=twk>$3dfyf)`KlA{?EjQa>4@I6(X5wxXY(cpA0f@up>BcW(b?yoQ^ynH`BLAhG z#27lvnxs(BCutF~)62mGN-(MfRL*=1pAqb*%B{TKw%E>!0;G|efwEm4%q*0+DZs>Di z&{&t@F+ioL=KNBmq3d(|(#i;ME+3ZsepLLRQlJ~vc40y%ZCf)iA%{T#*{AD%C2=M* z4TjaZos$1^4W_L>!w5pZTB8$!##YND{H4hC4OMe27P6Jh3 zHZqW!>77-4RxrU}3VU@@!4?qe0vxF{p?KsB&E|9!1=Msp`?1hYYQFL5w{eh2wh7c> z*)7&%bA-Tt3$<3{h?5a$hqFl)V$!&m@RiQZ4b(TPs<4YJp9acZvVM!yf!y;H{cWs$ z@qAcp;P(fsKpL=AMKNU{i#o?S{`{abtXfG6rk;)2Fw~gXfhJ_}?7;69oFzrx7_mQc%~W&G)xX805@h)b4hC=V zl@|ApkVs7g>(hiJlgqpVC`#VRd&u;Al#cppAb5YjW8fKaK@0~c3bxActD0Eoo!7zh z`+`U3cpj4CDm_vJd%7usGUt}Vb!`U{pRh|D;c%tT&22bkk_W$$D%Yuoefw~$^LE`4$hvT@S43YWYbdoa(*cQs3U)zyN_uAsFk3GJ==Cz0 z%iiAK4iA4v^+b`j!F44C*7!{ggrjM@{*UrXw~TK?-vckffIJblu|xr=7Bln z{uv)YF|*(}JT_%HNRD`OhJ?|5KcW6hueb5gNdj-+B%kIhW*R49EQe%cZ&cMEJq2og zoB_k+$Oc%p9{}h!Zm9b-&i~MY3ealwjZvqQQ7;Q6|MNy^WbY(8^^$VUmXE zTFBqgo5a~p4CJ7mpqV$nJl?{f%An)9^*?b>3wijEg$&y#h0fGGc5MI+KmL_UxG&3m z?SUac1zJe&O%FpOk($e=KR^DxL+W3jB)6pt-4z#9h<|IWpq zZ$NM5|2z0Djo|I$Nb7KR4fIQ+2bKzT^e$B{pg9c&(XuQ3Gie)-q|2Xvqva%r(S;Sg zJ09Y(gJB1PN1gy^#dv&Pq(4S9op+8L# zYw*)e5pyH7?M}2cpyWXRd(%NU3=#03c^y)g;{RD3N`D&@A&}Inphi_v4oNB)61HXb za0W|((?031#-s2hgj9Px?Iv@9ZE1(f$K?9T~%DfE3d)q`_` zg?Ydr`REs-?zJ9f!4dvVJI5K$Db>60qQ8WsX?bkzQJO*t&B>oK#S{D=J&_dui9sSn5D{+u6*7+f~&l~z1H@-+eiD={K^rVf5~$E z{@VpMzYAc*gMYep>qfr37oMk%s0?b`U%O+D%Rti07~bD7xi~(BSG#`yl7sQc?X>_S zf>0#Z8^xD1FJ_*A;e@4z+2GbJu1(b+btb?g@k}r=0)`=iZWmZqw|s&*=d zRxju)R{1ec!zNz3Y>^`8Gq!#?vZf(BPvaDfqO_A#)lyiLGvde z+C73AOWEl|L}pIf$w_+2d#&DyET5~#7q@?tX5M1V5Dwa3*#5(qwhylEOjCf@r0 zvk}04vz!tdx*7Z(A7WVC5Lu!dc|%VfPU#G~g>FrkC^V7crro zk^?@{uZDqv)E->}Ma$?(Ro>+XT zhhhItMc~PI72lLp_C6p)o6!@c#BDfg5K-ko@}ujH`T{KzQP7WHrG+Nf8@tpHp#Chz z`8pjt+qibQY>=A!#O^tzQfkEpPPkXH(0Xjvv9O^b|5fq@Q9cKiNOyxfdaZRDhGF^{_x28{1{+{#7Y2%qR>dm)Tz+>KZf~n3)vEc6c4%6kL z@5)L-Q5Ew?jsR8D80Vc9)Z9$=^&jvKNIN1TBCD!9Fbn)Sd+40K%M`-E8pEo`2(4~C zcr`MCqKc10EL|&Ap{ULSe?X>X^+&3*3#ruO{9H=Ub~L=C(QouLTEUl>6^0(=ZF@H> zxF{RO`^@ZBk{ESy6An+tUtp~;>z7l(OSNLPEQf}s7s)(65pE}ww08##nI~GA-o=to z_*@3zV~Qg3PTbDRp>5>6qM+8e z;%zbU=CkHFO9E@0}|Z;t|IkV_^|9;eYvNLp>pGiK~y=6X?>N|O z%;vg9%T716N`XTlUF-6$tWw3*0R}TOs9J+T6s5&ePR%%L?{8}z_0|Q0GCyj)NPLKH-vk0e zAp9I;&StbBuBu5)R^-bu&~zn1*?CjX{3yqCnj%K-Czi=fDC2~4+5uNg-KdNXQd4GlPT4=j9rU_Hg1hd*T0|YEOsaW7YP~%UC)h`o{ZkecCE)Tli_9pVxnifk`xjWMOv(cL3Q>y2v*l>;jKp+}oElX=yyF?$<=&@j9joP$GJl3# z&MaN{u@-rQJXM>Ycu96&_ZKS1#?mp-r^;Qz?)rS&K&Pyc4&LzVpGU(Zh8vf^5PtTXGvE5Zd*Gfy#k;jLJ?vTkmNEc$sqUAd^q% zum7R9f!IEy#f6Ex8Fq{)hxm6b>GiF+4s%spNCF(P>Bn?b#~Ald_Wa*}GsPh#Z7|ku zs%mr0eCIv{J4o#m6|Fz7l(asWI4Rqp9n>p-8iGBmaXhoB9mv+LvC2r0-!%CoD#-Ut z`P-OSG=MGP-#R=P_Fl;zP5A(a&7b!d8LFywUU3n#KalKj^+`YlAW%OS~75uD1Wu7TBey z$GAOY;HvYpnUiH@Jlfb|OPM9>!(mfci0!xupi8U9MngRI+~;~Oxb!kadw?cJ!BSQ> zeO89pL|v&u;%j@PL}=b z=(nTV@Zs;XG`-_rIh~P(dD|}vXP>c;(j3lrGFC1NlYBjkX*^X}$lJ3{JOP`|y@(jS z@OZ96gbn9GvnQBBKeyjpZ%Cp}eitL>@z(p{L1LREYt5HrdD!G8H@04Z{?him2a%X( zi>>$W#grMMhZWxbIXlTkIjoT^-WM)_&?s}Vl7gzEXq#!(yG`MritP(NLqmqdztOKq)qp)ywU+ z8EurnHyM0nDJO7nl|6TIQGLd4ed*@DIzD4{mi(a25sC-bAY@_P?`QYK6zM{R&^Aww z;Xr3fUDsXpa$K#7e*#_ghIFNaYlqxzJx1*n*X1$>eDA%nd;OcCZERJG7`z+~1cwj# zEUE+;a7?MF#YV;{lhlvec{K&61~GL&%ID}cP!lPCk<}4G%_tJe^!x=p@Jh}%lI%n0 zL@!pB+tZqr9wHLmOtr?E-m7i|coM8PpW|@7IR>}~LM_j(k!rZvE^4+;0tZySBMbRzVcG~#9bZ73d z+0mh~a?|TuD`ll;_R_H;2<2|x;SPh<+2m73Yf-h?E3V-0PdZ)_^`Z(_;uM#u>lgig zS9EnkG6NsALapePg_{$s7%0iZ9jeX$=<9aBlOd|9=8S;e^}w}S^*0KtocVMeMPK9< ztIzGF4vLLUw?;!~K$k~Zp|S_ph&dWGbN@%wTgOH9eQm?iNOwqs2ndoALrO>s2qGN= zNJxWpOAMV-f`ou{cMjc+ba!_QFvNTCd;gwy{+!R8bJpIgu63=o&rnYtiPsujp0j(-7f< z@QVCm8ZwC_vna~vilQ4AcePZ*HoDHGNMPR0D66%xyl zl+|f74M}`uBr6uV;iGqOt@dci)gltsMoHL=RKg{hTJ2}665J9GOs)3rZ8P_RAAbI6 z=P`!nq;>RIqU*bh@53VPP1_~kak{HVL$Fm}8s1pBB3V{>U1sA>CS71RbW2D4Y+zqh zz;mb=Iur3FXUmBdLA{jm@`z0GKChJD~qzwY6cZ<0H1h+7_E9tGa2GvyM14 zc=4epb1VW}4flK&Tcu)}mvKh^t+wR%wa_LnU#SF&2O=885?F0LpWs#-9@lKKkq%K zU2iGl2;84b(9RgA#U1~3;9{wgzD?_GuCjpHFm6+CZ9ri>0RTho+&x{U8R+31SCBp; zW4rpCi3qvMXWN5*$>@duWqr_QJ>3V%2EE6liccr=Q&Jl{{pUlwCCY2?-W%(j($oMo ziazHFGb*aczPk?a`6GEJJO)yGl8RJm1g% zVw9mI|0=XBesx`KomzPjqR~b6IHQa$q<@6w)MZ9P#7>X)$?!fSjrok`dzA-%{2)Q# zY@F>K;(s>irer|^6ssm$@5)DgPDu}71yUv)C{wVQj%z&Tf&29N62j{hZ}%CEm=8E@ zvvM?_M4FsxR1I|)b$;HA+B9PyA`9w4FKZ@GMYd{i#Cm_>o>Ha|8#6dTy{@u!Qs%$t z=+%L6kr<&tN+Ym7ODFygQOnhhJaAb>We?-PYIo`^-|i$C83pHj&E%Z7uiMuJv>=Nb zt#}u&VTeZ~)f5Uz`-9NRh_KldEH{O6u8wS2@j7Fex=7D~{)|%5N455GrG0d_E8Fee z|5Z&4nTbJFja)ber>di@Y%vU_nkSQHfioI4PVS-?U!B@6Yn_5fP%R^ChbfezZnhyK z>kC_WAvSAhvTP;ry}BG!wh~an6jon?_0JT3Qypy$4!C@c>VY{7%MItSWZBf+z8_8X zka{7><$}N`E5eT?PErW7|IB;F*&_{Y{%aeQg`L>5u6}Oe&OM<+b`sx8j=d7gW`RF7 z{UYA45ky>r2S02J?1lI*dSB1(+1PYp7eR+p%=jVT;NGzfCSgeq-g{iVbg>v00f)pm z6FmtEqYu8kBi_2rKZtt`M+)`R($lSvC+XU z#$8Z~B)1v7$taod7dn3!X)>^IF}A5(*5eA=#h0lz@tj=Y*?YxVYouu*;I32iQ+vJc zTaTTK8~pN@zkWsYvVH>G&cS{W65pt$$FCd6k>(#ZAu6DId@bK`?77p?#dbbnX*&Y% z;cz(#R4d3M1QlRpLW@9>E{W|8Y$N8*)5&_xeTEhR+F)M?U9YY6f5sVOe0aB0C_ov9 z#A8T{linOo@0d^}_MQ#reu^I-G*@&fb{sG92n7bKA=M}T%7!-81}uqf2fvTGwtc6S zMO#R9v#|L|S0EaO7K@`+@Ujt*0Y0C~k-=*Ub>|eMSJcS~qZ%x&fh=MuHH)Rx`xkd} zYrPuv){5yU1~)FwYh&K-%7tS=%Ev;2eOK?e2CxS#A6lrlZY2e3`W>NL9;}TNQ`V`u z5otd5RkIZtPNR6~h3w*4A=gpx?4IBYlqoQL75EK4_IG z^Ga3Bi|Fq2fR5usJ=p45&}-41`L8yUu*639X*A6m19EfNYH2uNXbx3^b{=hF>Izu0i-s~vd;W3%KSUzTw2hU#4{@3T$4`Y&TITj(6@GxmJ9;}gDwt*A>B)VV(mq5fjY*bFbT5aHqHB4s50bOrnYPk zqHl!`7r7f&zCAvbYZmTk_3=qG<1d{1Nd>?p$q|W*jlHIAUKKybIB$uAVmF`16{JR; zbMKUs>t~G+<$2>1WYDrs*dWz9?Ngw>YG_h^5Tw*ldQI6UVyEEd=ZKP)rr=E zZk+e;Kj4Vo+w%oErcycv=*Y{*Vu?1xZi@SZ5!p`(7gxy8^E7d|7|K5hypVBRF+c@;vt+%A3 zr);Ibs+S&$;;;J~^1%l937aY4I9{Ms=e+nqY=`SOY^xUW`!H&xk1L7Hy~cQQz|OvV zbjQku0*9^2JyL&<#@rH|hr@RPvwrgl15ai?idaSk9`1PMrlT3*$Ly=2biLOy;6g2G z7SuQpnGBY!Ke1X)5AK~%{b#^EbE=kUIs?=7m7Y|_Bb=JlOUH~My$C!iLFZ~MX!BF0 z5r5NcG6w%f<9Y@a%xmka2H$nPc-+4BeRM3tP{0-oc{}8(T>rHX*H3^8V-k^wL9%*- zb-vY>usEK^w;Eyx5{bCi^5A$Gm$7Y_SRi4ns)2M zzS?r76aQB&c5@oFez?hjWe=k;yg-%kv?DTl zhdVOkkPgJlalH6Cd7@6BwEV|P0+hxG*QZ75zaoVK-+9&_&hm!-GGW|bjg38IK!jnL zpSaG{^>lubT_)yE9<+a*h~7LBSJ$Ka9s;H@60y+UH+zocdjEIW23~fZdF(T>W&A04 zMPWPLvuInyED&8O;$|T0$KR0N^*}zOjJJb!7OBZ?JfARyk{0~`_jsf}?PVZlGerrm zOd;wvtk+y>vy$FbO+i?X0Q)wR|Cqdb#%%EO=6%0?eCrXL^M54>&}Mme_%d(*!qAzV zWSS~7FdlUwm4CrtsYwa%n%ZXjmDgq(z*>nJWBFmTNPk%~+>Urd{M2g!K9qD?IF(^1 zr`BQ_;AWHvzHf0u`&jB=)4z{5CnP!mi(v4tnek-P(Gk8!Qwv#N+`6xetIfLaNoL-O z{|fH)%gQwcP#ds$4B2=$U}9X`%*7-@_?$%#g;~1wJAg535-9jr3j*B@iEuwmwH6M& z!gVJ3fXqx)-__Sx(^yEv0F@O?`RV^XDk+36FCk;%SdCPhxm$(xhT3TaWcXQ1ikiU% z)Lb?2)gz=xg$V~kO8joH=i`6os0}LN)_h@FEi6UH29A*bAhmJA+V${TQ%P#0ORh3N zskAGnKAr31FViNzA(fk151jwOulSJKJmP&_1Hln@TkQWDDN@cEaaDDd4*oFFdmjDt z2W|tAdsZQg{fn=k{yFz(kaw7bX6w&qS+Ab}EB-73gkeCf!2wFdw-{a27YU4svz{jdE0PYrngTRU(K4*I`#fb?~sr=EQa`Upfm7) zN#N$uJc$N~j`d$BRDmRtZf%T$+G(>tQyT((j^%-;y@-)4NOVfpfp#3`?7#KEMwRbq#GksP z-Cvz5iF-T|gLElp)VAnYl04l@l`^25zue$vgcFIh4H@1{|8FyA(dIiEGO#oi-6-K( z%N<_V(3PWRw8PDNEr2HOwI9wEDir^f2J(7GDJN0Tfzg@X zK~eZNra9uy_jw{%>F&$pvK*4jF+94ejVk?P{gWDg3}o7qx~xZexoS|h{T zA5)HS2_lNgGPZcL4o?)iYxDR(Poc^hB<1(&Pgw8i324L_wVgYFSn-hU>enL?&kZ}# zR}Pz}zE@AI|1kGp3k1I6)4BLaUH1)2%7~_TMEUEbadFDoZGN=DqftomShTFyfz$@N z-Mg&Ax^F_Ok9Rh4H^e=LplmYj@U$(djbQlQ@T88%OoSKy@Z%eF3}Dy4>#uhesQ`gP zpx=G;XUF30G#=422)Pewk^9VH&Fo}~r{|+r35UTe9OJKD;=gH^j=zY+I7SP~6efNQ zK^j(uWNU@k`utrzQZ1kc6tI(%Y@CyP`jD%KLD%HPZt5;20!Xyuah6mcLG5f23A&*$ zWMkKYz{S2-6#^FkP0X&%GG|Peb>+LyGwpUzh#o$QbK~|^Kd(0*T+)VJZ?&_RJoMv@ zN`Mo^_~6*VmmllC2Cg~p8n>^x0QCh^K6k@W*e73xE-(&&Gk!nGU`PIxc%&HDGTs@w zO#Ru|<-1RL_FE8{%XDkH%=TU+??W4%I~~yebTBVo>ePcLZqVGh)NQeC?17o)t@pvO z_m^daF}*H_rgR}c6G-ntEHJc=Iq zKO$^-BR5Y)9_a$ckQU^Bg}Cs5sr$*_5gJB*A6x28y6Z98QG#8XjuX;B@VLihz4e)! zKz$~pm#(%NvMBb*kL|r%O;0FES6KYRCLxQeoZbQN$8YXj2*=6z4k;Nkj@sht0>lh+ zU-6kZf*PgYDTnP~3rAy5HGSZ7suW zUtS=y01az?T~yZeHJdxGXi4GV{It)wLbIk>5WO6{lcND(+vsH!uIz9d9YR!#o-|N( zU=;^_PFn!}PH?qhYq)%io5@{pDDdQg?Pux>xjwQFV`81vrgX|MkM|C(*~wXBL*`ei z-s@qE-mCHSS^w_XyC#j>c+4Au^iTNBo3x>Jf&gMNck*-0$7kKkL3wow3QujJ9>>vC ze~RT+y<3^Ou5^i|`Q!I}2$f?UHq~(_x4Y2>K8>A&=CVWu7-~|HLDKW5eTM-)55G_` zStHvVWVx!yM1}wrzz*gOYq=4S-z(ZxM8^I0uh<=%EpK`e*HvKpwilyiVw?TuOa>xRjdF`wevS zDyl>qrn}1Hx7VA~Vd1zzMK!7oS|!*9^k)*^VoA>TU2blE(VU)LQ#5hi;{e2t$JY9R zjiwnQI%d*1vZ6Qk+&LbRJ*)*+tD?|_5s*nIn@Y1Wr162{{@9q(bcYw+sEYCGd@{^G zeEN_1pZ%h$#48Vw$Cfy5V<)UkNQ^kvl(FR=3~BslyO8B6YUi$*lPRsI-5_NhPy=RY zOf_scRmpRIQ3mu2J|mPc_rz`F@u7nFXCq+Jd>{A=XhzDUbmKXjr|&*)GA9_NfQgp* z``BmN)0)dN50)#DgOY-$=!6aIK?_VSkqx4qFO{G1yXH{-FD5yFmQ2 z<2#vA>)Y0+O73cSLD|kCWdU`@5sDd4&Up&-dU2wG(5ZzCy~!c4H{bCbHFf@YkMn-R zd}aqcy(92&Rr>ikQirxsPRbU%`7M47)7*U2tUqEW66Y=7mL(L8p;R32a90noJ@ltQ zd-F&Oy+W(ojH25=fQ4@O&hCI~7vx>n6?;pedPb&iUt&~QYfD9gMLG{t%K#(`UO~-&h2$u3L1b#W|7?V>a+xA9-cQ5*ZHBkYlR%7Wcg_DW;Xm7Js_$aJ>GbSsgkL( z21xmd))B<`?{yWypc76`LE)Sbzy5^#6{x~osSlHyaw$0{C(J@#fIVjnn>l=yv^aDD{5=0kp zGpUVJT~v#naV_i}cA1T2y4Razad^{I;v1u^hfK0R%N)pEE^%+;W~u&ow?G-MT*X}X ze-Up{z%(_X%3SO*G!uQ7II`!!M%d{;Ws6`ro*sFg_1tN9qJMG8mp4J;lSv-8d@)cO zVG0Qjhhgs>1yqDwf&+pOPs#^#`pT5U(N2pC6m(^e&mB?@%zb}sAF=Yo!@9N(T=dO z^P7Y)YT?TEgZRF4jvhzlI&aU{WxuD^;39+uuqaDt&K;}wAykZYln}Mqde=)4wHjPd zR)+0*b9?~m2OwfL!Q>#)J*Tx$ea5mN-o)__m+lf$&Lu?gunR%{VJ=2U@?D7du*=@h`q$ot~0Y6URl7ryH9*; zxJz)u2?(IgRbE7Y@w0a=l_`XTSr{={HcA~Ar!aJe~ zW9x%^zi~EYAfRcXnL-0Gjmqd|dTaC074n{ESjnCbQ9B=7U6eIqxR5-V0}j+$}D{f3p>m&WdLhfTxIZi7`dBiFQLLx zWp=S|_AmU;t%kksLD|-FQ*Y!ZVx5rAI(>O&m~%OkzRTgLXqw5fj1Qodn?{aqSRKI@ z_k~>S_6^o!QHfAnX8>%~A6D-JBBDM5lcJ8^*B^sjIK4vwFgCKL-R+Sm_;#)Z@~b+? z>0h38D7#9TLsv`d9K3sdRrvV)ud;uC)O)E&-ozV=GWK&QuGJ2@0w~a3TC=b9-CWH# z5{4<;o+AAyt;UZT_CkLu@y2v;<>vixuLu;d55cgj`XuO?GjJRe$Z!lMK+N3yC(^wm=j(kwGE6YJ znh;8^H`RoWAB9i2&Ru;?XJ{~`8asd8-)J*bW6D={0qEp0sQQ~jR;k7r_4Sw0+5J!y`v!&F+V>piG)rM(VfGYs+m{w4s>*OEIAk0=fV|F zZUD~JulW9m-+wD|@ES!xj4HN~VEN7s;xWMTYYE{dc`j4Ck6W`Ec`Di@^78Yr_WsG| zzn?-M;N(Tu>B|6w=&C-buf0GaQHqfHVI=tzG&KjpECg9x-EA+gh~?IjoTZXh+?VNc zHn~^4%Q3Hm#@b)nL|vdb(MS860Sh;>a)s*-UdaBd8n@;MSm~92G^X0jf%8Gi#VWaY z@xlJ>JT%*efloQ;wTjT>F*s3j?t8ptQ`sAFpk^?%`ZN_o5$-h83N|*-BUR@f|NJfu z*+e8v?!+(klGs(#$0PB!#qQNfEv5omt^pP9?Idw;UNpC?>E{q#f6@mJ4(pCXu?`fJ zG6Y5Bp5G9?W`RzC{ZN|wO)!lfc>=p_;tNs&OdLKQnDiNDeo>Y}hT(c{pG&LCLQflmZ!&DPDrIj91Gqs6cw78bE&iLj`I_ah>XjwX?39SGO za9ik?3@a@wlcHM6=5Mx-BIHPy6`T9EKk^dah&7F`PIx?bAF*$GN}O$*?8D+Yn%Sxs zP*Nfw2*thBijp>qz%})0-Ds8jv!6>=tR#GHkE5@N_Mn%*Vu;G#8VE%Jlf(R%_o4IS zm05uAuimN|s);pEBDYiwqYmSS=54(k3NJG2n{RP{8SL;~RbI27E0e(*`Q?zWaCJae zxWpTVk_}|E>NyU;YQ_p@X2<5JrFKPH*JcpyFbmep&dczOota0}x2R^4+-h`v zmL_iGMRfdJ$t-bOjPzIewy6W))!e%DJN}cv$7i}n5T#BecqKI`x&oLq`ND=|n;ym@ z9c0_Iy!GHv!>U8S=^BjK(N`Y5d~Fm`J518+0L8v~pO?TECrRhWyPzenWo71AdvRa`Agc&pNg1ARpzj;p2_0FH0T1@2QubvhRB)HH8_am`os^o_ln z-gB07ye4}4{tc0g`e7_7>EHN4H{U~tyU43~{mkViy)_w% zvwnQ-$y1OI`SmOcaMw$ZnME-~k%P49?(cHSrFrxbV!7y$E^nDeGMZA=TZw5T6i&UJYXK#;c0x+)~gX_&_sdit}}RHQ_* ziZtBq*guRGH{$qi6(U6+ z;r@(Q)dj;Vii#wk@A)$*v$WM5S42-y43aF(A69XA$m zPB7TWgr!@wXg?=XAA@x$eDbqWW8?j4xP)Z033-85KZ=53Mj~iR8#Ec)b#PM8UdE~U zoUBOuHHzfuGIhgL|85QroE(i0Rl*X$$$#a89NGvj(_fEc&FMV~uoaoT%+JCjm)=KU zl97D<-e-{0moN+H0l1Lz9I@x(QfIl(a(^l(GLmHB7RX2uh{gK+69kYHpJBU?LzDdp zyP2a9CrfgLf}*${02oFB`BwFr$Wsz)b`;PDUIQhLz;p3Jw+fHMUhJWiPI)Xr6W}}h zZ}h({Iw`m?X4F#0!r0vQnCct6f0G1&z$^>}Ux@Zt>?grlLmg!MHN@_W)(WZ$=%;#z(38;39y5_Oqjb_ezlgGv4a@98PTf4i7^)ILCT>c9%A1 zE2nQ!QCSl>446+?OA`gSUJyWnoJUSOSg8_HZu|>s%)nTmFcMtYNv^vz!gNzZ(UCgj zdA9viM1ay0BH(^!jbsUE%=@sIz&8C2Cl94c@GY@j{`!*q@QBUDbsPg)wjK$8=LtLRdTc4Il1ZgzH7SEPe4(R8#PT6;4WZ|<|s!QCzzvuYk2}98Nt`kHrsTxbE`_bXz}O3GX7}Va7TF1TwI2HhR0kN{V@x!+)<5&ivM-9$<)U(^#TSBRO4b%7)39 zN!fhPEs2)|^bCzSY0#`NgkiJlb-m%!lX_#l zFhj~E5R3+t4!$x^#2toxNvDD5=;}lN*b&$T{Ccgpl8e-gUcP+wY%yRTMwmbWvNw0V z|1@}E*BY1~%pA~FoqA-2i=>DPQN%*|E1$3_J?z5aboXQO830G`)>u)3z587hMRaX=Iw;j?*yZ5DvI$70bIt-bbUic~1Y}y( zZr#r{@^&a5h@YjVSw8y6p`$NE&ZCXHb44juzw`ynyS}dT9Bi2M;&~F;#e7)GVye%% zJp@!-q5g|m~$Cp_e&7r zWx$w&G)MJl+_%OJ&1 zs)+l{$Su&+0qnY=iy_T2KinOw(Z}Of@b{5NZ(SU>wcOsV5V5pl$8B_a=Y_`I8!`tr zaz+AvS>T{jin!I%_v{aI&wDGP1@#p>ndfs+99nG8JMhDQVYK>6c)JWG&sll#Y>M%=k)*&p*mA%D)Nz>tl zuiemV?>N(G0Oi@HkKFWkA(jK@;y88BAK3>`2J*+E0iUn;7gAtZZvMO4tN^@TJ2ijx zneppz?0`>U%8wfwtXldYd>&Oy0IwAX{t$c2=!h!$X722$6fXTT*?0#t2W?)5UvuAM zT(2>)tSQaGf^XGq+Cvwbt`d$t)jIR8)v$|O(2(VlCNau}F(A{8)9`nkfVbT&T9m?m z3nUc*7*%Yo9kahX#nl0_4-gM?$Z51m=q2k{;F}^tZ)an)TUCZ)O(S(MS>C8m_Nnzd zXdc1Jkn)H^KlMMs-^P8rmI}UZSQ&*^<6{YX`~87W(A_gDVgmeGrpO2jFH;?km;N4{ zj3;eQRNuHeCO4DnO?zcQS$JEWQ(V(n^%c5?IfSvBZ`PhoH74M0g?-~0&>^(mv+uq) zZ9Tk9@{^DzdU>JR;mMN`NA9qFJdY4Fv||!9dl_ACswlE#HD$tp{yUYe*S;PF&22=Z zXETJ(+KgzgMS`8VZiHin+mP?O$i63f(mDtHq_7Qahn!s^)e|wn6K4{RxB21wxA>-s z-xCx;v`iHr+RJ39Kb5iftUdT#EwN3N9EhI7i>rHh(uI~4&K%e)wqEBKyo_h-t0{`L znhl+_TVpYQYxs7w-1zONo8sAzL^9bWL4@x%FU6Vc=Ofo^=#*m_Enmxhmg60H8FtR- zw!J!NhP$5*d8|h=s|Zy$i62JOjfSOGPV0LdI$bU9&o4j1IlJjy7j#SZmJe;b@=hz)z-CyQNNBl; zfA*;tJMHgO`!SC%YWmgVK3yMK>g@p|!pYB!KZUrM(yh14cki1KE(xTpxJj#NPz4}_ ze|l?2KZ0^vrTCWYx++3LWbaGVD8EVl$=R+b6Gx6tz(iMkYFwf!juIN>tkeEpgR0ex zSbnzgPfSt`2k&cB4frzvDSJL%QY%wM=96$N*Q8_ErYXDbdww_LS04=$;8dIvmM>pa zm4URh)&Bf3uN-J&P$Qr7K4ZeAs=-Dfq8=nY^}Yy+(>M36gVQ!AiLW-g3bB=v8cwD@`0 zYD5wm5h%Q(dP(O(=q|Aoe);Aoz5BV}{x!=?^DDgQ8)6Z@U`Q8zm0HxOqhcb@p8-&} zxz<5nbyB>IFK=1tlySq~taS1OA%w8vXp#7ULGtiz^Z}g2FNU~C>mPxsZzMZ!$r2@m zy=y)AIz*I>IsVvyv3PtL_o@OJzEGl?7O2?kzCoi-76qSQVUVviDdg^rEYimaGx1ZXB$usf* zCz-rFTwS=o{Po4_^4GAz;cnaSeLs85yBLO6?oa3 zATaAydxgip3h{4m{JP!oVG4}OST3{DS}HmBeyatOBKOF-hp1b^4fXdeFswo#`*T#s zUwHN2YrzwuisDaCBu!gnuizr3g?76?st4tPKjvi$BgJd+el_sGoKuK)D!tb-&TPW` zJr^DKc2Btljcl6ksfsd&^$=5>jz0e?cO$VeDj z1XWs~%~vh+nl5Fi{VqY!JlA z`6vp7&AIHX#ee}UCn3T$>4Pt9S z?s+7uJRazs$s!Ba9|)p$mbA0R723uShD#>~UfwaRg3ZDSbW`NHw=)v^)xX%)Pn@lM zX!~H;#`8jvOaJov;+_lQlAo@#!TPagp;xO?B^=A%?#89Ra^8Rk;Y=6SOsG{NR_h_0 zMzz3&ArLe0z!tV+fAI3e`O3n1VOvec7)Nox{F?e3AvkAu=OXYlw8vy|eh*gM3d2%! ziekc*p7{&!|IVK0WR2t~hkr68{`Z&W_{zGp2{?L!IqvC$q{zX7 zI<+&!V)!=z0+p<21tY$(SjI}jA1l|l1kE)Ma5($fGWP_@kJ+Fs3;5YAkL~11Ouqo? z0Om4RB&Q>3A*K;X;RM~0Fuj=RAHIPS5qJ`_GLQaGca1~O-af%+NMLN9D|AZvXX0lM zBGCkvt?o&EOyITheIK@Caa-b_jIw@UpNw)%~ z8*Mr|(be10IT{(Tak!$iw=9=~nCTU1W)p?m`Z8;|+lPu_-)-YR>EFv`Y!?0~&|17R zLzopn8gd-fa<{#IOWo--cr+q$`RwI>?~QGHH%NAgL2LW{obwm43TMS0L&8nHq`oNe zL-c13Ya`%$D$#H3qffoG120qz!qH?Ik~IIx^_3FI;U(nnulKfIh;odOx;_{Mo<|@a zb}~5LE@BDyX!B>pX(5i_M=%i~>2dxUR=J>x*=B~|gtyU!caz6;{peFw;&n(}F4JWF zyB037f-c-vi3gX+Dg_hC;dHfGPQ?Eh`<>7bpY_23M_ydXJWxKndsn*#LtMOr=58jY z3fp2rgM;5}2Ojx;yL|L)$9dIv_hZNciB|1-tWh{lC5_Pc?+p9sTFQM5MP?^qsA3I{ z)3i)MZ09>wq1hI)>~u=>4xzyjTrE*F7=9K6Kd})l3}dMem6BhE;In-kZ9hjTe~)}( zg20to@}sm!uVtDR=a!;(7dm@YR!F0c+nABPae*gxUY7vToCLUNf54oFA);YJiGRAxWn%$zUWukqwB8 z5?7L}Of`013Vm^A{oVGEzI3z>#xI*)ZMCK%M8(jih zd4O=8*1)l(x*nRB(~8LiPyOkVxcCSN!O*BaM1fP{uT>(NZ#%+x5lgwuGpK}o@Iw8) z4L`oOLy=77e$W8m3Id26+4^Mbry}X}SIfT&a6}1AsF=A(z^}y0tF`0!A4~n_w%NVl zSSu(=vu2z!-Yz{9{z+1m>;7xBv#mp3NN(^b@Tyzyt;-)8aeST|frE#lSR}KS=M=fi zIw_l4gDnPKNAT)4BdeapbzWKpF%*20(|FwU7kfy)Nk4{%67uB_~k#0@$%4R zx#TqCE%4pjtu`N&$BSi#^AYQE&OhE{jw{n#9xd6BstfvY0c^M1BbXb%p%-(wVdS0T zVDEkJo@I1(OvHJCk|(Jh&S$}d+60Uy*Zg*!0pi=_MJ1!I(UCXXd{wE{FTN=LaQLIB zwDFv0^GCS=1Db>~GirfrVSvOK>0UVlDH)5&f*BQj-|Gz&gDR8hFD5ASm>F4n58t)j zL@wU8!KdsU%jogF6>rz`@J^6ltq5>5qJ_ALwughPB9`G$XU=QDsAv0T5;4)8u8Y&` z&rhqx3qilPXq>Ln#9m}4^lcnluGVk-hddIB*7F;VB4eCTx7nI)TKCSF<+M9p5%|!{ z{1}6|C7|MTnoJl!U0>0SVEuAdEL2C^CnTLCTElh_3RLMF?`~+pnKNy!Sm@uP{_yI5 zvjF4g>=MWo$Zp#lwARMd_ehav%Y zq`Cttbt*QRKEM2uEsnb5%J!=~j$x@qRTfQ>DW~+QuNH@zn^*F?af*{);lGsW-u?|u zFf!kHDKFdY_oZp?BYJ%2e8?_-)cu5Lb!^M&y0O>8X*|vV1qjvk)-UOsav86)2G*v! zF?83K6fKAew@2}>r=ZvBb>NT3lS5Pr>uTp2-o1S;Tb$7kTXDzauYmu$_?|OQ%3i#M z<<>xVgj^$haNB

    mTrp3Hq_@l|^(C-2^QU=OC^r`l3*G9h0>q@x14Ma_NAuRJ%; zt4zeGBeg~p6^_9~B}O*uJO@53tSFjr*IVPvU@(Nn6o0ih`O zYpFltTC|!^mVAW=BBC43INusUtKpq%bOz}=&9)<&Qt1&U( zW+;|^KttR=RlJRGv2>MNJk1kDQ$ zWlM|~lvD4v{?Z)Z1wn@1@G^vJ^5^kQ)!g^Spn%vb3(EGj+G7oc>m4a$pAE0grsj<@ zuwSBggwx8N-Vu!`aK*-5c?llU4|Oved0~7ElPD@^XkjUgR&wW*^+O4Zp@f6oT4Qp1 zb7;huI)rZ_!Fr!?8{0P)k7n^;ceCsKJx6oFl?mL=&dk+uZ6!)F?}t@H+(Wg=B0>RYy3O&g(m94K$Rs1al?BY@ zQ!7$Q1s;@h#40R1aFXVS&{lZ{Gr$85!xEOLf;FuLr{*K8O^5F*>2QaF2E!MqwWbdQ zm2?%|?D#UsJPyduZ`KGI)4ZA$3b$v(HqQrpK7m{q5e+8&b)X(rC`Lbw1|Ir+Mb;ZG zX0Wzqu;}|2YaB)^IX|9j*}Sj1H;l*$<-Gk)Zd&J&LalMTg?cq z0u$vOf-b>yNA#lFCRC{HuT3`)1TH96R5#iyM+H1;h5mSHc~I0}-}~jJiCaB8MK?V_ zVEnYkHTah)qc*&#rC>AgNUs1Ajv^Lti$A#IKWZQ|2#Vb2`0&$CAbn?QzX)PCs%Fyvl8I_1bV@-{c3P zinA*bnW>L+*q2#Jd*b)^7?NIEZkS6C#vAQXH9pX^+Y>YNWS+P1g!7@`u%Te3y^mpP z)IZG~T|dbcheZU}CMclWWh`xu<|Rbmrmc`?ntZX$tm1h;m1M09GN$Ewtv<8sUhl^E z92Jb4GJvgx=kLv{@$cNtOOpvrVO}e0|t%J}Y{O0!^AngN|WsN#KCiV>RX~mc)5i z{U9?cRLB5!vyyDC-tYN0F`J@Y4q+0Ra(EjXpX>sVOZ<7?h~OgUNxy894Xsa%cS^xS zTO>AFbrZOHG`za!=03|9j&t?g0rMl<0i%kUv(le4d>MGi_Z3d`t8k;Bpz6_9 zQ0O0gM@;5gwqO_jxk?HEUIv)$2=%}}Y&1x4tJDGxNGL-ed4TaG`9SXNVCi;2P#AV& zk7vuBNsO~{ULqS>u9xQsbIi5{ZBInWX(n#vIzm^k1DJ7r{HrLN8s%Cf-9X^w)g-ku z_eGhv8xAl%AnJ5Rp7L~`_!W-oS7Yf9?SGK zz=wa8JKGHv1jnwsP@XUCQBS%>uG1Z$z>M3I;k%d1ZhqG1Tf{>xrGD9fVVN}a_np1$ zl3G;h)q4@s{e7}~d-2D#*a72@kr4R4bnH9mm=Q4_%PG+VUWo^kD zSoeIKMT(juQm+Yjc{lFSR{-aio zvJZgS&5VaSEt2Y=7jxJBX`6}FFWx&T3YWB)v%hap7}fhbKGaS|^y+dZ0%Ah7!rO%Ju^EgNEX$)C9~l|H!VV)b{; z?PT2NEKPmNz8bBa#++vv1f4=rJ(%AjJB`VpZ^@QKFQ!qA|2AQ#w~05a*zdjAW|JI` zteMrBD8Ho7JMg>pN@S9q!j&|mzNHuy#BEkLxwKZaJfS)&(fwe48-tqm|Iu`gQIa)5 z8lIlUv~AnAZQHhc+QzhP+qP}nwrz9k+dcccZ&l|xl^GfD8&AYVCS?$i=m92-eR=eH zQB4Bh3d@CV3OH{_V#e24nO~o`l~$i))n-kllj20>GVlhU;Pf#-F&gNrgN%1|%E^*i zKmL$ENIIH5^`veAW_{QEtsJ8W8{^_;T`ol8G^zHHD5OF%sfrXG{ZtkR>nl%7$xp}B zh$uz&bZQDl2DSqUs^@(vnODABEm55(8KP@yUXm=jQ;_9G+!ic@>?OuBT^1~x}`5Z?g(c}?JhtQzS z)R;^yw^A2kP*h2C>^}>N%)!24P^^z!;eM8T$IgUpmR?Q4-!xFq<^n36!H-azR^hl#DZI+>bLBWTiLPvnlPreH0uwo-+DLoS7_TBUfW# z=8TrA!0;UQqL&Y)KJquI0$k3EicL?=1)8;hphi3uWJ}oTRzCx9O3Ot6=BZy;`B4fk z)$AG6*T0vYp{2;V@Q@~X3g|>vZ5)4~^2!XHmoVU~g?^7{EJkvnO@YI=*@0_W!l@t^ zIU8V7%XfUhC|hMJjp6d-S*F4^-5A^AY8=B*I4E9!V;pGd2zsU*Ebb|^Ljdfg1J`kp zx57{R4KJ~LOPUP=AQfl&uV;wNxM1j)bW(Lq%{uO5iqwTZK=9kRvFKl@8)*d%pdBAu7&?4`C_h@# z8N__hn4UoDZB+SmLn98WmYk-v|>+*8(ZfF3VAh!BHxT zj1!y|rWn#E4~dQwK%_p+uS&qAZS;z+8O+p(QMU{!oK^&PD3*c<5%a{xxnN=y*XxR} zam*LqdK2vpTM9+0V34Yc^|8H+ln-NsjeK-7&mZ~r5aemso=fHe#34s4pc5>hh$}zh zmdmPVaxjjl?4k^Jc7-6_L#Tgr)UMpynv}QmUO`M3BG1T(^^E#S#1r!sH}k}7t%LC; z$I)S4B8ku-^veb|(G30*&|S%5j#d1QO@haeZgKLJE~PP*NN9-kxIAjPIWfDQ7}-|) zX|U75fT}l`P|ZXV;v|=jjyfKe(~D)Bq(Qd;Py0o%XDIuSLjKyA5!|k%>J10`ek!OWobKvN-_I zHzEerX=An)P!D@mj{)Q~L?kiexyDUfJuE~|K9pV;N3c{=nI$p9+>LC+kkC*U9CSI? z4+#g}yw1Lpc@|G~RxVlP2OgT=WWRHK8+$KXYsMm$Z`aoXPj=1}=m022Sv;>6T&`|? zC9}h_X0+POBgIIt*t19=Frf|8>t4?GwGGcM2O|zF=dOAg*C(2dKc`{lEEemDSNE?Hs)M##_IZ4Ttf};Q3=R-~OoZZ&{7L z&uR*%o6*mOBAPZ0NY?5%Dm$!{)WLmq)4F~Ba{&LPK%(^YtN8oZ{rHc6<1|0Bf7uS4 zfC`_fx3mC?awVQnr+>WG%NOc#Z%x^Xp8W5If~|h!V(y5<7lQ8`LbRW=E+iKL35obS z0m18mo|^K)ae-Q1#Kb1Jq^?!5AF9-MKzQHkU^PVr-#lFC5jK<(F|! z1Hh0ZG$HQrFi{ni770OAQu;e$#y1M%HwKlznY0w%ZiD#H;jV_h!j=W`Z&{I!}ESMI1RoWA^S z1KN9qDvy2K|CcY9%M9E!kUAzuX!e84@oZ*|@GJnqbPrF88)U9LlE+=~FhpmG6>kKY zAkJPsJ`=&WY$K(Vx~4bCeP6>cR~mno;_XVB=kSZ^8|1p=2e>lFtHtad44y^}C$XNi z&7cYCuSZvJwx%eH12XW4w1ypc7mnzegsr;u2eww2n@Qea+f!#= z)3#an(&1bg?S~aa63!m9g{}I9Ml_9BXS@1-RudsqRjkW?LRcT6aQA_B!Kg#YkZ8@6 z5E9RDIEPC^kIuw}6hjSdB1aFxg0$1m))|;*IgdwMQH#*kiD$H}DSJ2e!@FUa)KsWo ztP)jmA{9kkPC!8A1wom}Jq~E0D81^nxkOngMy#{xUtACbp&lcR9J)Dm+Jp>0!6GsD zLZP;89C+v0Lk0=m{=K~he=)28VD3x`d_wecw6_Zf-rJlhh@hss6BJJG`9*zT0GX{amI?mlq?NXC&9R*=)z%u1 z1gc-9K-ft3$kf+z`;O7oqHf1Euf?o5gqVTL$OZ7SUTK7J(o$mA`9=zF5bdHV6};z!y^YvOg)2J2F*V|9x`b~``DOGaBb z$TeJ=VV+a@k8Oq&B%5F?&~!wn1>3eP?!oTI19*m#fSvd}*p2T$-Nl}AOSWToClv!Q z5#e|)ZPBi>gC#FBZp%dE1bt>CBX&k_DYSb+d$^?Mvc1GG%*)hN2o4 z4-_O7k_7*LbYJ4x)Y~o$*j^gY|E5&~zC7OCx?Vkk|DyqSMtj;%T3WhJMs8ka>|Rse z1IR(>{{`U@NzG8LF+9^p^7vES1m#9%zG0c?1U!oz&Nr0PQeP?hHVjLcXNe^1sVbH- zRt=7?8fpIaxa+9>hK+wov|jt5N_A6Qa!Vl7|1Mb}y(s1l^WN|2D)tb^EQuS+>A$~t z_rFu_LsfoUiTlH>;`IGTQkJ&c&sawK>X+%vNnWcIl30o^|ME6Q;Rf%&G&e-0wLc3v z{!1-!KpJM9>JuCX43TJrQB0$oY;oI^i7)!DzPOT1skOz#W)IBuubJ5abz<~JF=ejn zt3={g_?0=!yYY2@7(#rdJ~s3MBIwT9!AMl@qc&sG-jOeV7~{=q-h5 zTEM0vTEv1i^}+_e^n&O9xH4Z+P;k8aQBDfyI^(uI2G}sxx~PYZjLDHhH2FYlyfP5e zWGNMJqHex$&1wj@vk|lvP)2Tsq`h%SyT@cN1mOuiaKDO7z8<8Ol`tf6GQJferc2e} zOVN=lm=07as%v=QYIp)K>;B1%ujD$OL{+!~l}8`C{HdQ#>W@se(fh=m*HLCUTRliU z_sJpy2c%o7F+%f$=yDSMxsheqv-%Oo8k=ptvj&xTeV1?k@lZ&3YBggr_0>MgOQB64 zxjo_9pS1P&!SS92aA$YPW^uXTOJACV-V=QUEnqu>98QS?nX``U49rM5NEITaQWQt@ zC;B`DkF;tHCa}j^yft(~+`e`FV@rfqU4FWzV_71=;6bwIbJE-%WkcvBx3I4ec_rh) zN7W;BNuejll$q)@^^357C|hkfD>K{ywf9YnKZZL9f4sxH3)0kOtAjot1%nPMNK`oq z&iu5kNO>h}B^-r(XNv_O`X8_e>}XTicS0G!V53~v>6eWn4~29KraeOCy`fS2_aigPWq6;m%4-;yXjp z9wCfjX>zij?A5E`(_+F zZ%<^=8oS2psu^~9e~b{9e4$}!9`IQ&!Gi>=BAUnL%<<;wj zwRA>M_XKLc;!^kIe^5peRdJ|$;m(sf+|R@nsV$yZeIi`G8l7_?#|Rwl;^*#Lrq+oq zn%!uo`l7}=8z)qDAW9&i`uJ;mpmw5bctXXKDn`X6vE|vTLS#McEOrk$k9NrO6lR+z zeQEM~s+4fwP5?|ke6w%a2?DWb!UABDBs7D3giWj;*tT1pE#n(B`Xg`o>>xJ~%a0^} zs_211BTdrC-k_U(*-s4ZPuxJ2QyQbs-=>EIGTS=tP#T^e!$k$zH6#T^7$m3d5wks@ zGd)?gK+15Gcd5>_o?C$|Sx^>O7MY7}Oh22wRJ8Jhmd|SVPDACJDI4VP-yv#PX?{Yk3|F3nRFK0ZxyxVb}Q_5+)4=G@c4`zOR#DajoP zPYl)zgL6XdW5ykU_&@n4fSSFq;^Hft)OLrC>?-gS4rT2Mn)V@fN`r6KQw{fJd|6EXF8SAOd8ps>EdqMa#Cx8M714379SS7 zE=4JVQBcmDBw4q*2*$!z<1ElgDJH>6q&;m!x@~5pI0G9~2A$>zL={Q@VKSC1xYBIY z6L%lXSZ8ey?J(LHCzPpmGQRkq!;Me$2^YQp69}Kft3q*eY!dDqejb}^vQG~`A4LLI zj$osJIIvJS5h)wVpqpEDh;vKpeW%)UThh*OyWj(% zPuJ&L*V8k|tUQ;T60!`>ZkM?CwOED#HmMb9wEDJ|Dv)7m9$*Kk0!&UzCbNR8Qsl)8 zhn+;ZiW77iPvwYY+x=9n>C%+oU}vFC??U%}Nzb^pB_?(R(g`M^`s;UkeIk`H^a>zQ zvK|*Z_f-frcqG$Y?%MO2rbk3XwWPrbrcp)`uXxfzu712v@Y-1o0aU%-N3e)!ZUw&P zp_FE+dk>hyH$K6AOkosr8kqIarq70+;^S9V`KOhq-Fh5deYHf_!>_CP2`iiSTcX3q zo!%fk?eF1X?CX9&4?oYT6w)orKADBhAKlj>fwtQv@Az*>hi5$fGnOm0n!G&Fef$e@ z3Fp0#S{ForLWetz!%@B!O9v$VeoDhCM4ke;3l96VVw`Djxfm`j5mcYwM?B@@SnlW) zSIZ0?adH~+{^!&nB!x^h=Y1Pl5yO-QXK0D&%r3c{Sgq`ZIL22nhgp0=$K4f!MEqs2 zNn5Gox?kRBBB?(NjVVtr5qyH<9}FKpDiTw11@c=lkGkDHdhHc|uRB`X%<`Cd>C^3g zsg6+C5Lp;?3@#2!>Q-R~j4@$e*M=pv80-~gd>o1prLNIYrjc^Yvj=s)ip_4I7Y<<> z-=UOR_yvWH(0_FMXMS!}Am$hK1XK5fcRr+Wz2BkMKNrV0`cxk7k{)-L=q4yAjPPAu za6bta_C$}hXOKzEJbmy>x}i$`1E67M&Fe;GIE*{d79_kkmC2#rd8lT{1H6FU&r~kW zwF2&DQ#ZIVL9*-t1R#WN#X|;+aGsSD<+&Sh9j%K7%bF0Y77sQ>|D8GXcWcII5>q#N z|0m1O4TFkY#5u-S)h_OPg)_dIaJx}d+sp4gZR+|h$&B-G>0^H(i-Uo^<^f(0fAH9=i|~7>yRRH{IY@REAW0 z^d|5o8Zepbm?RcbH<)g>naA;bksWViJ$90w`7Kh)yFc@z=U~$31VJv>)dgy%YESRqIyDZ_>CY3H*y zWc5%=HmnhgP3c*IO-O@ep0fKcKM;%C?J}gp#d2Bo$E%hCcUobuhA!}2)sE?~M zm)|aYpFRFU)Y~ggxFx_yU(uVsHw5Shg4Mr+-LLix*2@eUt)Iw9dtoCj)Bovi z_r7fUSjAlF4-D4dgw8Nh-G(WEGo<{(0JsNTv^QDEy_I}xT=#0+Sw7K~I^>dyzMN_7 zZdjD3fKT6CYJH@Ei9~Vx6J3n@5e;ivA4#Grj`Lcf=Sf8!NH#7}#^9D@@Rt=V9c~ri5ZEMiTKQd9~9n6NxLkB)}A7w42 zE?h874VpE^-k%x=#52*X{XYv3Gu6n!y$<*T(Qi*Hi~>-ckruUa>qjqI79!MjpahbU zwWwI73kE72l5^Zl^Sin;R5&>o47x)%RddqhGY8pg=o%Vj3sPJ_4Nht*)+2G@QbRLL z>>Qfuh!;+#(ao!U$cdu>_h^!(k(q4_rVZf6#}KRO4V5YO4p64fGg1+Mb`n@j zVIM3bhrnNEP=_ucL>Ri;eaEhjoM6P$B|7Jx3UG9?ANE&%OX zSU56s0MjN0(x-W}9Y9Eo?1uV*GyZH&O+E0%R=dk*Dknut#Wg}$$c2U6(@ribrEcXc z)X$Xi%G)}}ZI9!l8}N5;oXlW1JRx{OLErl@m#) z9j`*hSL}(!amVhHmrR)B3Yq_SdJ6L>w{VQJ&@8|QNcY18N=Le{Rz3;iS@9p>Q+77> z#-W_DaX-ruosYIOk4f8-)d4Jz4>YUr$on(g3njWQ7dx@d#hc3-N;vo2L#Fee@d5E3 zF131A+BVU&bJ8{{Z4pQ{QwAluQZ-$7)tQnzr|uRyLfy*l1g;GaJbjo&qmjKiK$#`x zDH}}skufGocC;=DG2tlD5I+)yF?pT$gf-Flp$bsc-8*2ixyHL;P#F*P67U;Edv7zy zyK=0_FZ%Li6rz2hw zd{Oh<3M6t*n21z)Zej0NVrZE~G~EOKL5 z09dvJ0`#=-A+r4-h%L$mCtf%4DpudotgbNam%7CDnpx~Qf%HfId&ssC0N32a*#z*^ zGD?~u{{>;*$yGot^GjOL;?%%(Vl<1sB_3cSJjtQ~dy4mUV*D;4EBnf`w3Ca&(2KWzC6xxGn`>%Zz37qZZ248rHbDaaN zcF4@I%qq@N38CyGkO<Onq+hcR6K!L6n!bAFD%0i#4OI zumv7&K#JMVW(qCG*!1*Bd|F>yU9HM|jY+GoH0mA*c;c?#_|v0?6vB|aC+jURuT6jI z6L9APVTbqtzgvmVFWxK8iZpB}q;Kjh-o|aOPvf53hXo8v9GV8Ya0jRPKsx^P%s$NP z@T=-Bv@LQMj1TbW%^oghOqFb~``im?);u}O!i{gz1?bJb3{p^>+*h0gwOYY(~6a?ZOxQ{0v;E&B;-xuQ;4Qf1|?Z^io(ej!uZYk37RZr-}knVUqihNh$oS# z4&(o3$Dq@g9vvfcE&Rbq#*A-$XF^Fgw+Zr&uX;Izl3xqMVi*L`i`~ATohJd!Is*@& zgYl%@@h%#|*&fJLHjwrK^ryf4(PJVJIZ$8*Mpx*or+ADO{+8@A!c=m+rC@R!aThj1 z9R!{KScK%k3TY1vTmz2D|7Om-ihu%pRS~XBFn|EB!gu{^!BVoe6w(N&)cq*)W1}Ch zI8M`)6t1}HVgjmYBtBqcobqh9-~i4qiP|2+;TY?tCnT#bKqb@l&8kRZ^3W)?zmT#j z-Aw;YMCA-%>w5D`06{z4ZH(6O3CR9r+49}RCS&MY`KBNssxA6~qCDMJ@a0m}TiG4> z-0$?QQ>gT-ybLTV?J`Ezb=n;es=Y7(lwU`oBm{R_+S;=Eb$gPNZ5D}duu1>M)H z;onRBgVu-oI(DK|L~?faJJsRTk5Ps(G-UheCb8GOaY)Dxi$1k5i}X`>-@fx3Wd5ZN zq1V^zmO_rV74#1n*QeYLVabeY&lb6&7NTQEja6SPq64hs zAI!uW0C>@a;jzV!D{JEFK#J>-;JS-K)=tgEJ)iVnB1W=9DY9XB0x9;{iVyl4tLv`NiTW(iL=5$ zq)rups7;fCV#FWFel1^Ci7?_FId`ce#&3FlSnQuf>(dbM@OF#k1+zyLa-e7J9GKdxqypOWo&4cIy-bzsSA zH)j_wnb8DyiZ%3M2nOz$Zsv*|3|D<+i3oUU(F-qh0lYWtI23VVRYT<~PR z$ctTeOg`3BJuEoZmM;WEP(+s(ywS*#5)n!U_3eblF!%!Z$%}pXNK4K6ct;V7atk5y zezB`uKm8$T7N|5ST>=hx`BU-nf-uhshebx;;VZF(e*o7%STNcj(SEZ=r~mu|SyDv`2f%=psyt@C82FgUpg;6?=w-nVc~-E+Kg|I`p@6m9=M!R zv*|0j(Yb{{kX9pcM{LZgc|N}ye$*H~dh#JE%)UhvWVzVnbKZvp5bJOf6I}=_E*>K! z<+JCI)s28W-~6@jCq~9&9=T7_rbjLaEbt%rW|6nel@sjozEeg4h351V<-E3w1^PD- zWYWBraP8M*u#!$IbduQs|KtV9;wuvyo{;5{NFLZC9A0g%o`f5uNQ^+4MJ3!E%Q;-~ zz2y&ri!Q$;TYef@3u4@>!4b_oWJ^QqU7e0d&XRsa1jxa#LQ<)M0aE5}r7M3>W&|j1B#$W zntlV6lu}Wqi`9m4v&zD1n7~UfElTtyK1tiAe*EyYdKOo(HfX64KZsgCX^GRQ3hJOf;$6Hrm#NE zfM^)BFgWt-&oKVOjL->K->(}~R(Zv%TRQ_6Umtjoy|NQkixx!6VIkBUp=n>JWjQ!y zc^nz9sSnB8D4uYR*?CYIX8*M(40LyWPd?pBK7hi%aEYg)V0z$?<_^++2daNhPp&q_ zAwi5#O8xi=0>gPFnS3R}q~8mY6n_=rtu<^YtkX%hrk3UnbH9CQRBm*EXL?AZiQzp! zi_Lf&=u%^z?wh9)zrZeLk4p2vS=7mVW8+bJ->=~4q;Eqtj?8o=F##Yyc9i2?9t|e{)ev5gE%n%bu{-SHF#v#WWw^e7dOL)T=p6YV_oH9p z$^i|&6_!m{1U3S$_PZ-+7Un|%JtZIIID^u@9`cGdjnvL&ZCdNWURol?(cdZbcMR%W z!I`fS7dH((K#qKVXEPQUwg!l>UMd4>9uw_3aR4F^X!K+BeM9n4hidjPnIQH#t}tk9 z9r*s)UPX5qwu0>VItLH_l<^kO5JP8CE1S6Z`?d;Rora5taoMOFrG6Dw{RZ^WQHtx% zoD3IWljUetDmYB(k~sZXf$MakI6TqF$LD za$rji7qyYpMvpoMa)}Qx16U$J%e_1VlW7HyA>T+t?J8v3Y9xEJwy_<}-mG{yUfatT zWYd|-%8h8DgkMv1>*|)`($O^846p;lr~`cGzg*4G{K(eDkr$k0M}7X%jv4D!zd}hOBb#{f@+1U*dj-V;86uOj5P3;HmPVg%F$*Ub#hOwc{Gl{lf0Qi*yBvdu!lpsh3K=L>U73k{W*Z^K@& zuB2)|83C3M#WZ=E=5~Pry(Ld!CfPxMq(PIU!&eOi?}|8mmdWwOV*F_@eN`HLP@OZP z^ZK*qIfE}%|1o~!YPWteq2`?S+&yAQAN>nlA0ieX0yvuPVB~VeT5%8(5z*3vN?LZ)^~Y z8PiFhRQP`Hpj<_u;4f?VT*cCw+}|)Ypq!fS-O>Cu8RwB@9g&!uFj{Sfb9j|Fr#LFl zD`+La)L0q4^c+T<~}6ngd}S-g%v9k!LXfPaaBhxxqz}oLcQ*RP69`O7!Cad` zpi|#fo4foBs-dlOx)KzgeVs?}-FFspr=sXG4}Rml5`7%t>jBp)PwVmnp>TIn5qId6fR(`T#kfs(L#^=UD$6^-t zpy5aY;a8J|wyE|uIIJ(G%>fuTu(U*Z%<9|AFQ|ENo~Qn0w^fp?iE-7Hye5T@KoxV@ zJS(xelO7nV6MSpi$`sPmA=f#vAGJfHpR&6Xhw)j*-onuA52b>_jh72>U+_IRLEF{~ z_uSbAzJLAnhWjUMl5h*F*aw@QqDLklp?PKqE(qftFmX&@LXZ1gqX9ipr@oWuuV}$A z45f$#qY>v9WsNP`!(|KAnzdY{7K18Ao2jC5J?7BwEkD2W>4R_o^j-)}_4sFdq7Z=W z-~G&2-k{2~-HwYm&HrotCthG{u}=!oHeA}8ui}U;@ps6<1ldqwD>1ZK;iZ+iGE=nY zgO9)V*MT0((TJ|^3;O!$y8YuDpIFYvLC$Du%uv3#`?E#uJ#AQDh{Tq?;JnMN!UP+WD10N(hqw>rry7#4C`jg5!&4=_< ztLRhpKGSuut+ue@D&d^OpDtCf(*22=v?(7y%{;juE2jf&!;=+*Pc!MzAg2yvHyoCad;;KNUoQ=f zs$~d5OTWL9?s5ZE!<<2d0nw^32;Iq=l`I~|phkc9o-mt=R^VKKwz37X-hJVRaOmGf zge^)4(z|q*g=Chi>ssBfE`CT$_!Rx$iigh1N7Bk?{xTeo>i0fNZqksj?7=oOgJfD! z6wVgbUSpt@#CzyBEV`JB?s-5^ra~+_h%suow$<)8V6H=>5(YyO0JjAaqrAF6f@9`Q zc}?W4*PYIgY=Nx8Ea`C}FcU_&PPS}ij;LuHcU4m1GlB} zbOBM&uRONa#v4^1*?X1ss#6qS7CoL^Z!c6X`!gpwtovi;?4>LdLU*N39x%=Qqk!N;)mFCLZn91rE#)IQ&HeboET2dWvFei_OAD{h43f50J{%Le$pU?T8l@+iKQZ z*y7idxIeUwF_fK!W=1BCrv zWcj^ChpbR7YK6pU|I@Se0$a=|v;e_U#gAaE*h9mH?AVm3s(<`9r5wVM3d=E%onfO0 z(qFwsi`0Be2bDszZ_0{cADJ_ba4K~k+Vz*zI&v4IkKVt$dDeT`z3BC^(e^2LM22?} zr2AZ?RL==`MAzR{1hDcqUyg%!I~~iArt>`#I%3LfMHOm zG@jLRuatsCM#x+yHEj@8Hd_6$*<)vlNRqkpOc(?-gmWP-zp5DY@M)W z?Dv8nDDUeH@EYsU{?>qNWdh4w(9#IHR963T3eQEzukRo0$?rH*X||Hea`dNl--y}= z)6Ie<7(<#UeP`6Cem>DvUQQV446748ii0|*{U4#6pX{>)EoNbrMZrH*rkgd+)N0vp zL$0$5Q*IAaJso=^t_v_7D~nt|$aix+=f!=j)pD?+6{ki&bnlZb2+3yA9JaQVIZpb= z&X4yLjXM#y6QV%rAg8qk)gAg40oYPPrCDNK_dEERgq%^iOm^ZzRe4Q*-?y%FX5TU| znHumdb6M$YbeJq}LdOF%-gpHMPM)1suyCG~bOXLA=jfofaj4zvQHuL`8_Q zwEO5Psk`Cn^!V{UdxmP)-03J&1j;zI8b%*U{&XA}mEwICzik)xEfv0>6Te^mAY6Uk zh_=_h>CTqrk@<$|zQV`d08faJ?v49q8YQhfIZ3RfivE0j%4L(^}hqu6dQ$+4*G=yur*`LNuB`O=gyL9`<3L>KW< zqQZb$8|pvW6%x~0BIT%g^o<#@m;{faTB{%uLHsbWq1?0;tsWR+Y<4|}mxa#%O z^&lN$*0^hX_1?XEF}jwFY8^k`S9BWOp1o$JhIOs!{;(@!?KWMkIco$TZd*jlL{2vUVz!d^7am#`#IiLpcDT1^NX6K>iNscowo)dFVkrp zj__8RSy5&sc-=e4O$k7PsfcS!%@{4Rg|BlTHo9C0`aAv$!OO?)PDO7NFYwcU)nXkX z6F)rNjbAAwbuo6vb`-cUG+aqKaRF)8s1SwZda(2v^_GT~roaPYyZhO}lLUz<1xmHx zf{4@vJi^5|U2d1#6@~7iUu%%9Y`{_Km2!RFFr+2?@?6x|_5C3FM-9bv|NGL_FwMeo z7PizTD~PP)`|w2(LgTSCL_uRmA86Ry1#UyP>TVy*h~~g~kkyNpRm04C+68tBtI@LV z>S@AJ`pm*=Ny3J-|Mc#$2ePwm{!bH8)6M%o3DCBGGz4M$4M zL(goiH+BNotiCXzl*2_N$5PB%3Q4Ouz1BY6yuN0C7Tk2O6?oW4vxfN z5BxOUr&jfux>XkoGSup#(hyYdw#2pKiLoN?rRWGMmW^-%p&)Pyd`TMS;94tT))Py2 z>#{Vvw#SpnK#RPizs2cB_16oL)L{1no*rD*HGz-s;iakcn&GLsQ5sR$rdYf;(-!A> zQ|1)?sOd-1GV3&+I#JgCw)jDIRMce>?c#)sK&F0 z8<3tKT~ck@7u)wR%q-o5BN)Eg0{0BtkG7qy%G;Ua%t`)kfy0Yy8FA^i;b5yBv{FPP z?Xd${6oNN~EdQoJ5j1Z^CAUEsMB%i+g8-%G>NP`1Q2~LgBMX~u3<%KIaQHD0ZJt^G|rN2GFrP7E4aVG@eU>Q>T(^u;m+@~jz|+Z%{j5P_YOE`A z0O;x(l0z9n{py{$i$u>+v(S)Ktl&Z3tv;4P;1 z!`t=9Z-Rfa*p8~^71g7M?+~rIhMJWMr3Lsg*Ue23xsBDXQiy2{;!BWT66A$7DY`57 z!YNU+Lex}Rf{6o{&S2oUKseb#Ydfn$bdCv9_5jU2Vy>KxLQ@^CeFN7Ag5%d8B^#+& z)t<8!-Z~)&1iQVd6GU9x@8R)pn(Zce2l*d-D_buz$u-Ot6Fiq7-4}o#8iSELaCEfYs-IYOoTn!kF_{9W>+v2S8B9iF2?lTkC6VwM z6kyXTL#Ugy4QVv2J*k^v;t1#mm3?^)?esFlg4YkX5|>6vC1qva_QihVn znhyT@0N#E}VO*KHs~OIY0uq*XhN2@gr*e47)#(dWyOr39vwJdd&2A%2Tx2q?Jw>dV zP>h%bsZ1GFE^8UTNO11f=r5qCFzjN)T_GwvZ4d!2@LY^WAHF_J{l17Nh)MYT#o6w& zPE7p)oYqe}z{%>l>U~v)Z8$Yx%faA({_#u7-v)wOLhXt)7LhYg)Iuf`6R&s*$%bIE zJ=z4>q8Vf+2_%kCB5Isef<-coOkPTGI^i{X4KDS{OKrcypZF-b^8$P0QVP6)T_Vw9 zqYkv0L|joO{ru^fT1sDQSN-<{B5@e0tjrE;LLAdl;7#|MU}#fro)%tD#c=+Ia|}{L zOe+^G;AfUhQ_#N zDfHXUG}QCu=V>$_VP%5Jk)@Vs;)et!YUllb7GRVEf+#5V>&qq^+oHaOE+17q- zF@qk3!KzvLg z`Poj9%*={l=ft>MKEtA#w2`{K^obEdq6O7sRFmRhMX5I%+#7X?+0G}HWjQ%U+~%=* z_j2o)Nbux#f#EvD7NAn}V#c`JZi`h)VWos|f#&#}Jm7ejM5Uq`3PBUp$&U%Jbq4|gPN-tdr)NM1mMhj2lNtmMJFL4E)6#@qn(r1u_*l3vZU1QcjGWtlN-I>j_uYSmz<@c zQu(b$IXr>${NzmTP63$ZBWhezu}bL!c+OCa#JCfCA%#hJ-87u?#(Y?cA$sxAExc^W z?BHVBwl6+#y#a z*LLt`59|_ExfGca<=7clfe6&kR>ku$sL>T?T3Di)&ryncTljP#F5h2kNIJGbBq8Eg z38S^+7fHdxM;H4YdS5OMyTGy+2IB;ZOFMAq(J_@y*VOV};PZbJT7?|9p>uRMZQwdK z`lby+N?ts@Txv$dW1Uk5+H1kGe5{a_} z))glvKqu0GO~mivM%?sl9q$_oZYNx>9nUHyvM1jKK41gBHG_Rgnr>?|SP=4*O6wBa z1)W+8`lZksul|fi9E=LpT@F7eH$LSNm4qwA=God2AV5zkQjhHODNcy9Y1Az!%3%^j zwH`@2M6rWAfRLD4kSxpquPSNGv*e9rv;SiKl>op&Q@qrb ziAkxiU@JrbICg)F07@PD5b524!ej=7jaU+%nACfzi8&xB^b6RgImPJt3AE8jqP#CT z-M`>;qDa|GWNuFV{=9@=%2E=L5gAeXj8AXJni{XC{JyWwzbr<^7AXfcB4N^);DDTA z^v0%0H(txHD1C*j>A(YuL`KxEnA+}!m!f=kIA^Sv1t)h;-|7OGiA6XW_qsF7kM9&D zD~Tb8*i3U18oS#C=Mq%{CH};XVMKrE&v{3}@4o!Jy2EZ7>#ooV81t_I&Z)A$xKQAR zk=F&IaqI64p388K3^JFFM!~6y5n$7b7g`IamC&g?h?F_r4Ab0J`kdKQa&9sDWjm%# zBYXWx0)^ZD5S(=a=qGjV`hlamEZQl4M*{*=O9p~aW?++uFoBHkc;lVYi48MB)N=}e zO0Z=Wg$&t_C%EiaXKI6IYKzrgQ2)}brTKRQUhvBaWGK_nhs5C7%l||YHj3*piCU6l z>{%cKe|b_>i4+a-T?&;e#CrI{X8qssU{R_3@!62iTPC3*6?M_5r?{?D)ME?y%rv&2eA$c3 zr1dWo$;i=!XZ770W}shpXFgrcF4$RVb`#k&JrYAWZ;9+0X~I(Yo5Xc203LhMWv zd7{=KqzyKi0Hm|ZnRPG~J~47IJkbpPvlmilcgMM!nbe<|XTabxsnyWSqfIhciE@a9 z#C*Ef`vQ*X>A{!8N~$C2*lB5&qff6%*KKQi3yQoR>$|oAUOjj2nZGe26lAA#T7yl2 zef3k(1b_U3k`hy*!8#rC0~BLlh;#TZA6{)^G?_#!gleZ$mA-d_z54R9M0VZ|e4M-nux4Gev$6wfnc99F=4kQ$64w{{tH) zX&&fIT48od2fdJimwUPMEp-A2>ZneZp`0iMWIYj_4<+(kP-i@{X_K)8Lcv)4`jUb+ z5SUvquB~4F#Vo=XXXXC!C3iSY*9c@oOW9T!*U*{)s=up1v+)eqR5}eKQzGm)3o}qN z%Fb*diWCN~?eO8gQCqu$J3AuSneW-%YkGsuDf|lqY}#C%eKDGGqiHDISOukgUUehd zl03y0fjVymLRRF!#3{s;N?DY&0gW9Q^x$o%;zYg9!fdO3f$YB`pR~N*>adBtQ4=B< z9G4PXW`aB2Iq_9yVNn#=%nh0fLicS?1B01^OmodaMQum}6|UZ*J1I$;ku1+`oY&w) z;`_Bq1dCCT_LUc4nJ1zW<8+~qY6@j=8zCK>z+Os=T5Vp6JunPr$ME?sP0_4`Pk!_h7 zF|p0t^~9%u!(ol%-#WB;PO{zciuor5?_@0OgsI+EDi=}d9g}S?VMiqA2bLNy;Ba}; zp2>JLc29R-(&1W?943r$tUO;UKfMKIy7+$ilEcml{})Z4y%J>7uc+QTL~5xjvSHI! zh(aS|XV0Fn)w>8%`F&jC-s8;QC_O0-)o4p&(+gw}f_^k4NtrSQRU9r1yIBngPg>M} zwkQ`jASp;T-l;`DtUV&)9kyR9VbrL@+hF?F&daxt)fA_vhcEFQ|Jw8o`pIz6anK*v zuB%oyB11hTjs>MUHiZ8VP1hJ5X&0oEOfs=;O>EmXC-%gg*tTukwv7opwryM6-|pGJ z_g9~O>sCFey5$4}NFIDqkc<&}&GF-B8UZu-<4pVp`#QIe?TooMg^1i^S8)1Pi49sd zX(;Q>PlGE>sYMy4c@|BN9SL;O>?oF?Rhgein;(%TPXXJMdJ->V3Eg40qVxX);gjZMFf0=PLay>xqqL4 zcV&fu6Qa&>Szm%oydA4KDniK&QCl0P7S3ghQfUsU;D|y5-SS;A17spJ59YG{%5@JX zE=6@m?GKx-v-`Jgpg=0NI}xPn$n-A#lg?yLVr zwq1bV7hWEu9s@Z^OL0E5*gf##H}%C7`s0u7vi=mBN-eVWat9)9Rgu?UVxyvEqdKDj z+@IqKv;2;m65*!p`J61MyK4a~jZ8GHg2K|!kfZnr(VcG7hdfW&VW}|?=by7!CVE+C z6TCZ3>^VH;C{8579ebKK(X=0JtsF8L%qz_en^_b>&aBmp!}dT>e1%~A{H`~)Nq}D7tb~w>39tPfMFTQ{g68G_2X=vc z_P&G0EJhnV!7Juf-kdKCU)ue0p=n?mL7%^uBpAmB>`(D?jz z{)^6hYVhnAxU=M1$mAL`l!&CpCePa@Ihunvpc7G=r?yvqF5n4lfdOle^wj#6N#NjQ z_0JbA=GuLo+k(y?MD7otp&mP7TO?C`@!a?6$J*bcp?BbLd{cA^=m{9C^nG(ulrh5J zhJPnow*=6!QS6W4iGa>kl53jDlnfv(D{}F^KoeODKp>H1N^T(snV>oE z>kIOK>HBTxB0FQ-!brf})f649cKH+xT2X+}Pj0^-LsRWIhI-`)oHSO?kVmnZnf@Wi zG)hrZ5xi-hIlw+;TYkWSUW7v%EhRltMYScy%dMusL zH03#!xjoSwLbTK*Eax!%i#e)!wRcP7S^R#jnR&hQQO{XL9V863tTlShj%AlR_*8^P zd9u35)r2{RcrfrMRbDG4SHtGvxdLWdwM0{a0)rA{NEY5%Jr|>oCuDjNBM^M(%UJw$ z^{+~ddj|KEzz1Rrp(-vG)W)t16rag=rFMe=FW_1DoQ{qpCy%H1_7NN+B69;m%}v?+ zgdBl0At{a?R2|;XlfK}^S0~3x>sbQG#7R0~;mjw5YOD=@zUeB!7o6!1E{N@6vNA-q zO$I4>GfdYPairJdGkFQ9#uvd^LPtA838leks8YPC5+&)?T)qop(dYa zY1ZF>%+njP@~sVW;^-T||4D&lx@;c;=NdG}K6IroPVixKbJZiE8=}CEC!;vNsxhz! zdEM;3Z#*`6*R9TxdB@7)k9$@RtVKQ!ygO4Wps+L}t+wxZj#G@_6U-4X%kf$1;avRa z{vi%qM%I0M;kpE&@_CmU%)a4cr5CKj{;Dss%1= z=sUjs=e5RQyKJzQSHG?LaxMQTl+ejMn0U(Iym&DM?IDG>=P&1IzI~{c%x!g%7?0|C zKfh~4JVE3P{u_-Fij>(=)&^7VxHV@Z*s?S6C>#%7BEIIp@kE} z1E|?2imar+iXeiSZm4(AW-)spQ+)jP}fkID&c}eiXKJWY&4zymySk z{jzbw!4^hCk|!uKdn;95dA9;NDMP3Z?p$vTg|NlGBZX>>dMds8;@XC3BcqHKvc*!H z5o+?-Uw)bUS!!OxVwisY7bnB&(xZ;IF&oJgcY?)N7wR+(#g@2JIS>ny~P3}FnX3E4iRA~qGU%E;$ia?!+p?Yd)C5eiA@C&d+FqESt~(B zQQnGnodXLjn@0IL4Q-a=My_zH2Ylz$!txgPDrB7|MTc2p#d%?3h&wW9$=bUzor9w2(^4n>h&Qz}QRr#&rTVy{vxjR?= zf5Ja<=vs_@W9XLFZ6q?X-l+usVcTLOa%i?*DRFE+mc2V_aC;mR02LkS*=mb2pCH>Q z=YOm-NKV7hq9%w1U6?JNaX}|*QYW;vi8`%_pe>L=l-I&&qsX5Mg6Ak5E_iE^>Ast4c^+`&y|FF#bsj&tWu7}FI6}A0q7I5} zq<^nFwzhg({RGa?Zf3QAaVz0hNWe4odLWR0!+iKhAHEt6tzh<33FGABCZ43HvZs{d zn@{18%s$%H`U1H`8&0bG!l%O#uo9Lq8lnUw*r5gm0KsrA0b-ighDU5i7R_qoRDVnD z_Xevv%P!*Lh?a8Yh$}>A2z~{;7z-fkL5sIxvv>#hcNQ90EzfS%fn3@^4sxi6_HHMz zk>WH6W)$NA?NAyK)LzX5RmHjrmI5 zt+x+0Su`8?mycCAxkIwNZf9D=%@rH2Z4+1~U*k39?_3Uu@ zp?`ni*!g*#5@z0vY;0eM1Uqrr((Q6#wLY>rXFmKJM#3W9hW0w*b2yV1w06Z*_urw@ z%;pi3Jp0J2bQNKyr@Q6~{974=7pC_ur0%0p12R3asQSOv-_T_%7wR3RgjUN*5pw!- zCujS9SN3hVmnTi0(Luq2IghAs3g2MZ}Z|wiLbw81uFTVZCwN;B;Qf3=# zb-z}}8Uf!>19t!^RK9PVx5MwZwROx;+sglqL9l^6ElK1f#rg0gC^no4?q?HR8HNHd?$idaIx&bL4MLNh(>LQAXkaCqX@@4OHE0b|bS zLe%U-BX^J|u-pFfi@*Gz2t}{H-@|%DO!-Q~u)e7a8VJ2l?SLf^_R>Uz_mNOI$`^a~ znUa%C1gU(;ZR5XoByao*iq<2yWacxmTFj@HA$T93`allM;|eIXvD!fJ&Eq+!MRty(Vm ztlx@$yYg;}D13t8II8Po7TXWq%md9m=VVP3d|IJPN{_pDK%4QL&@vHtk~s38TnRXS z_*X*_OAC{cHV?ETJ&~F2W}&a&Xuo{*f6oZG_nCV~3T zO2#|df$@CxnF0!55Sr3{DqjL8NbO2~C{zs_t0sB7a)f+j$^hV!EyS(!y0ux}#_c~zC{EYjm0zsHsLhuH$uLG3m>=Ke`Ea7rR;3&H&G)h357(D62IsupQ1WS%?Yc z#LvgMt9MU~MS-?xDG~N%qf|3=ER=0OkJJw#>m*_tI-%}r`pm5AV&G$VFf+)kV8f=i zvKIR+>Q+>0>k;xzLMOxFdfT?mg-{%P!olGM$VCeo2}E<}5opCxsayKp>qp9LKvj-l zJ=EGu!jIkfu>Cb{SsvWWiHRiYw^~^Iu1dJF-rb~6ZZ7u$lYXosWzi)4(|W%nq&I?{ zYq-&r=dm!gE|32#f23)i`>A>8z6x@A0AC?OD1{FBPiyJThgH>d&+FIMAP)JT3+oZT z7B%MF%=mo)W=4RL)!8l=W&;l@U^4iD9tot{g45_aX$qYD_)6naz~%?$HM4y!01O+U zyf|KNM4IN-%O?)*f7Zr?Xs<|8ALOyf7OzbZ6;CKWf?ON$|8x?bij?3v;O$54r z(?~HWF6ZRIyL*~ry3csS;QxNWH0{R}*h*UJP7q0)K{8;V^{P4n>L z<2OV*yQJq!sczUiA@}X`Vq5m}4+uU|Q84mCvadpERDpA|E z%DRLQR%x)Zi;w}%w-5a^`?r;TOdLhvn_y&_TSa$``3;avPls2&9y>|-&maTmfs6bi z*3f-Zq)$GmKsJ(h3_>C>_eg{q5_QQFb;1|b^P&#C1;U;xT;_&o_4_m-f6rI#l5|_P zz1e1iwnW0dMwKXh2`fg4t+5>JZlH%^az(7)yOf01xEgzlet3sYzH)urMO)IGkq091 zhp&9054~(N5m>2BH$jL$E_ot(JOS}B-zS(T{NG4GBd+@mNB%d{n4Jjcu&rhj`}?^s zxCdf^!Z-3{Z=(>knhR-*_cT(i9`w;RYp!+;c4_=^v~XkB52gpWhl|iT`5+bM`3Wv}aOj9>5fps0GDAtI$0OcUo^VbQ{tRB2fg- zq&?fQEc4x^V2AEp>yUG@Gc{Y0%~Mb=@!;oRhr}jFw1lqxoYsq%@~ z`9%WHlC25Iy(Za^<`Dt3xP=Dl#4fypfs50 zRxJlCwQCjnnnEm9ZbztClxPo&pv}ywOszO57*&e7Jo@8;xzv%!N))WZP^W;jTwZom zW3Fh)mcA06bV|Q_zGMfsV1;svd$TVz-Esh^1;;x=2o-6T+3|AZWJX&+WN_ROOI5Tp z+UXrxL{S^Jq#%t%q+1G;+3|unw+b|uym}QUZ!vbWi-ahU&_YA#-vtkHA;@zQp_Qv? z4t(Gn09SR^;}ew6B{?KW>oIH8Qv%ni3f?}Y`vowhV>P|b6>^P0!YLiPT)!V}RbpRr z@-Tzy#H6x)=`>r~PLig?P}mJZb>}Lz2VD9Fa6@ zAbO;<-c914jN*>=++@cD-9d!V5#0bumey1LEtR+noE9$5t2vmcMd2E&*1D281d=JB zA=Mu-jJR#ZqAj;%CIU$j#c@Wut_!r6fQE9Cy9Mi-S)kungIGW6ByDMz^BKQANiMUb z;VzH+&j?aghi{oECQOE4lJ0LW{x!`^bsqK0rmXxHfOX z_tJ>N$bFVnBVM0?>e6E^~Py4UVBnKq9%|pB-qyyv#&^ChO%Juo&@EUW1kz z2=h&U7rT3(CmZY_4}EPjp4G9k{@Ko83R1JcEVvFMZV4cB+VFqDazh`Q&Xu@$;V$6x zznaAL<;+g|e7p#pqj$m)Yy)$&!zEiAV+qsUW*qN(lQQ=*_L`wxMoJ7;YEofdJ{nj@ z;vnYm!}{#H#cgsB?`Hq*fTpsS2{AwCF`B-konSu-@;y_N^264h;FrMleiv^)$H4-G`N1P_%i*rFT zK$kz?!X?pq7qp8;Bme?jUcXwt#qX|%B@*L)ClX3s*0c*N;vW9alD3YlI;emsJ=2iiI=gTvA zkrj&+b-UUQQhD31UR)unww-`vqyO&(XiNKh%RN4%+FzcI)z-qyFCT4ms8z;>-psPB z$KIR3-0SdpJVv=Rxu|?(nO;FUIQU~Cpr>HCkVl7L^Hgz6gh!f1Y!km=N9Il1H0G3N zB!no6t#L7kakdFePC@8ywVOcU;ttR^XF2XQg`@+t)_@ks6>?}#!Z6TXNw-{s=Q#T* zi$WuS;De=|ISd90YTcas zf~;Vrdy0CEl^)!(7&YoSU6_p94ZA+PByQ4yzu_7 zRGc<1g-el^WC?>=w@hP{44{3XW}mS{<(&4xJnlMASbV{$g)ZJqR_^77T5{Qo6bx5* zH<#oK6h8U!USpBB%!p3&F!dy6x-Fsd%b7>w{)i>b{k%ph z#uu+&dWL#KApc!(3^Z*6Dqihcqp-YSLULI~%TYx77GoUc5{56C4s%!-1kIrFtNX^K z3Zr-))=eACUQd!$PUd^oc*k>=yzCM$2?qvS5*Yn-FERPHFW{#M` z7unmOuVIqmu;EZmEh)k$LX(+Xc++;P(9$MM7MO5pSuh8n^a~K>YXNz8>KkyvH`jec zK-TO?bXLOax$d~Bd>R1l$oWn8P%glD?0hj1_Tw9b>17q3p;KGT5XvDi57p_C)M=xw z`x12a8R~)0VX%RW?>nRYD^MILnYGdV(n>IgjTS5jYYHrQZ`jqs%x?4WMyk8u4pM?a#xmNXO(;j5j+wa0-A=;bZb;Z}T=N=oV zaF7F=>|AN3d!{NW!1VYC{yKQT@|`jNX9MvILlsu62^`%-AH!5Np>hMqZpq>XlF$jW zqd1vtlv{%R+ARkpYp0%sLWz_oYl^zusSk={xr<?lw`{B;cN5s3lRO|}Fu6E^k=v(VxkAz*1TTBn1$JV^5J z&3$f9!DjGo(oS0cYd-L#jet#1%fq^A=3?G;k?}(}J3{HX7mtjDW$G4-?7er(F?;en zu)Tj_-oWe0E))Kre;ip_U9`f^*)Ma-ZHfbQsrmVDT-I>G!x zKJbV-t5yk?-;Cp~=7M5Rc3tD@3s@rY_;LoCh#51#ja3yL?F28IEGR7V_1-Ds2w8(> z?x+gS5-`BoPH58_G$+{3N_hZ=k+TdWz;ZyDi$3>IugzbPsdCcJ0cf<-Y2;M`UJ(5k z{2y!{&0dtS_$FB za~nXirw$2jou&8!{K5!S+VLm8w@s{K3A(A{a$Ss_yfq%X@e{P_56+9d-$kml@YgHt z*S;KFP=%l)lbl3NLV}{2{MP>-^m;DtOQGs^Nbc6zX}sek(^?Qs-(RxjZ$dyF!~4-O zPVAgOk-#FUT#{wD^&InedQ)Vbo+!AW2nP=)=HBfXeE*jBE>V~l6KH1n;=7-?MoFA{ zKb1|~>t>O;|^HTuWG#wEML;udr0@xzG%x{hN5cL974Ye8IX*Z6l6hUQv)@=RA|X4J*Mbih+vXDE(P)6S=J>si656PBT(S`A;H4^L zzlSRk>u;EY5MlvqVft~;sNM|eYaP(tq{&?QS*dj-KKuEyQ?;1_hVGyo?^Ps|W#M}; zR(ip@u3JqX5E#-D9W_aFd2C_2GMhD948D8jy*|4P=MCn9XCjZ?A1bCgSZsebG#SsD z3?NXWQ-G6t9o_k-e#Whf$B?PB}aW`~p9dc$#fmtszTP;fAj zhA+eJ_*XRj`e#T2O!n1RIAd#kz$XZu_)ZzYFZQ%YYNyY|9tfC;iTe|v?aXHifr4$Z ztD(u^Q}{6}GxhKh8OxJa=6L~2FO`Kl&HzIAU+GOhSZG7^&5Youfe_*9ZYUZv_+8WO z5-Lw1w~zox;p%p>G|1Ox`2};#KC&$9m$S`3R=o$s;cA0agIka&jejW0$QZ7kQEzRo zh)b(dE0zmt;aE@=Ie$VXHrvdJ`e8to%dIcFjV);9sR(7jsCsxOh(p<|)uRn`td)vc z&GM1o*Dfu40%j%4<6BPWM*~06QD9pHbq3x|B8(xaLLfx zU|{bkbN9n__MvEKspS2$aE}KMWnvl)U>nk36PRe~4%K*5En_MknH?BGC#L8Q{3v_5 zSKYK3YKa9-BC^kCXAjMdT6Pa>IN?UDy@OidifwM$0VR-6|J(oKU)VtTl$tA(F<{@3 zsTL10O^?0D4ybaE-AIKd#W|X&_a73`$)S}JHF$2Q5&s@xakOA7-s<_#WRx;wk`n3z zX80VN668!%vxpZZz zJs-nfgIl?-KZ*jUMys=4wBrJN#Qrr%%>XQzq*&9oh_<$*NhCNixtBlg#VOJGoWS`W zIN7IY_WjX+Q`U(QBTL~vj+ex>$@&#JuT*kkmIC_?Vrg#@zkufhkdN2scloq(WDG%TtEq{+cJX< zEoKVwSzlZc<$qJQwM0A(ACuRv)Bh%|COpv;Z%%CU=sHH9;X<>>%p~bU7pT=#6%p4k zhsUeX@-eB3Ao)&(kYV^GV_~H7;{qy8s*FxUc^XAxfLgQcyZ9ihpKyQD^Yc~*Im^uJ z`K~cGNN~tCy}@0iV4)Jnl$44)OYetlJ}%Dv*oXGk9orLk1@Dm*VC(pUd!Brurf;rO z5nrO_n}~EDBkq)tcvB+(8?Ayj0>{mw^Mh0`^bP%l2XxujkGALdU!!NRv2l%@3nk9* zb*tQfV*Tmn#6t3KL1K%ELwx}{0En@b?yuP56vNQv*Rq{n}oeHTxB6HAx)&2;}Xh=enbkZ17 ztnx}(ORf<^rS&0WeDb!Uxa^@>7)h>yS{Svk(Ar@)DF-kMW4#`x{muMX|3#1}?OsThFp!Ojb(X(l!s@5iQX$aYy`JF%dftT8Y-a`vphj=mB{tu{p8)~%zpT+QUvynk!W*%+#$ z5g&4~t=_TCyQcDAc;n5=fg*eF%!iE6bOw2I+5VMUp=T!5`WaL|ZsAA1W0uR2o)D{P zV%Td_2z0yM1n#pwfq>cN?tnS8&y@fa%Nz8C9g8B~pD@N}p3fcXpVy%mrW6jYoWAVZ zUX9)*(c7_^$H8;yhtED^x`!;-m8fN9nB}^`BUJ)N&#=e7%NC1n0LMp*fUp7bj5hQ+ zTQvKnyd)%1eptC=lt}~>+DojpDKL8)uu_2ka4{1SD#rXey6rTobGEqX27SN})RJ`S znS47722?{UUMVxo^3Rm_?Qe1lCEW!-vs8_gkWbmth|Jshb9Zf&FU{*?Cb$4xW@6Y?dfOmEN;H9N`Nj!i$eVAb?=nE9D~7#vy>SV>^$)}oU}49WY8xQO1ObT?dEW? zBvB!ekNJ0CDtg6Lr$l{$SUa2+TzuEqQ%BnUPGh^)iE*?vq%^T=JzdH5v$HMIx8s@l z%%xxjHjpBp7p^0WwKT5ynb!h2Md?Q=0MZvSfSLIj)%cE~QEdX@fdEWJIQqI_BeMpo z^#etGQ|}V+%}ad|@b-Iy;TzJ}d5#8gVi0gxohxm@&jM!5Qff<)5Tc%IQ?L?S5k=b7 z{MncX#Ru79Q<*8vgDJ9hGu|{;*ZL{mN{L$eZg7)CL7mw+HuwG|)6|``sz=vw!+sV0 zO;6fARuf>lj$yTqWcH+rypkqX(i5ojTBE-4`#JwDCpceSDX>&2nJLV};DAj*!}&}P zx`jQs=?g9W!^)1~lJ5eRCPW|>=QV-i`c41F_bRk1@SVFTfi=H7>Wn{v8bQz=30hc^ zg-lm9xW+rjsb?9|0ae%;D#BY*S;8{)ll_~Ta+`o9OQlFkaD&<)ajX)ML?mAry{TLs z?$;dAhMLeMVvQF7cI-sn;~W&Mz4ZjcSy`72U=V2nfZFxmLE>|_b492w4$1gnl&a9vC2Qr z%9>Ds1_HXN8wc00G4q%-v#{<6(w{o+fpoBqxAVLzhIk_4{w;R#U@{`&IPj^f3PG{} zjL7zFmn#L-#aUXQ_vpW8kfW5kU&E-SM28dMzk4Zz?{lY=`LCU~kX|ANPXSRnesA#) zM9-5~xVW`cHmUEwk2InjM#t8RZD}P(bIzS)QV5v*y}?Q*J3aIJE#uIkHVhzdeXmfvU)n8r`J^VEp(N-U+ut%y~u(lZ#)M*V}kgON_SHAG>!R_zWvQ^`v9@7fIzNfRlYL!WuHP(JY|~FT;b(&b!C5+34RcV%l?240 z?CAqGpZPy}_0BegXs#l*>*_(iQNwLuYIQaE(?2usiRHW(t5{5|P}ZCpAFS_DP|J1x zY&Zj5`xlop7%t}y!vZxwL}74uTjY^{AC@qIz2T#WL15+iJnxODqJWxoY%g?6evNN8 z*3cPDYQ64**x}O?;0V=xZPnu@M77a;-8ar6+8p}-av5Py))kb z5zG(K6T*M}@x9tjLRsJu>)FuM4c9z@m_vc7`pJKh-FDBq_wktD{xKHriT+5m_?fHo z5YKvDTmMYFlQlpd!is=&`|3!-&oE66I!&H)gHl)i)H1h_*VLN*M#~*22e-E%*P_;c zyjEPkrF=nkXvc12DTykAq`4xrWI?wTpdG^$-?{&KoJANZ*R{xY3NCs$RLg)M`R6+s zT@;$U3Cj_ZSq7T>FZk{t-w7^RlJmHzA$B2NwVMC^)S!;Sb3OfL(1?-qm=U+=#BZtM zpyUMgIr~aWb;b%g#CzUR_n3dbVCPB{O3E4uW&8tJv06CXm|o)wEK$?q8eO1q;fP*R z$=8M-emUY@$Tl#fyO&*h(yok`y_af=Z+_|gUwqH%vi>UHn{*mbm~m+JSoC(4NZ06B7iqO7g(p1h`4GfNkHeVveP(HSHW1OgK;y}nb+Rz zv0HsL_ufALf}4#Ao@j)w#b*Es*^039^#&&GU}qV1sr$YBuzG$Gg9WvpdNC3|C{kSJ zjO&?X)bmr7HA;{+MjahON+YW}sS2-bHVy?si&_4Qd2EH6i*NW7zK~Xs5Jj9j>2m*6 zL5Z<9GUG2fjmaeEy{@&f;+#pUSix^9$+faNu>EbnEwh&8Lek!isQ{Q%s(IO2RB_02 zMVtq4fEs_gaPaSiG0eDeIBH7whB6O8B7+eN%28w7*p={7F@6yf{Jih+W$&R9)+bB_ z7i6=QPuAy59=Q)LjCY;=pxHpR&xO>+qm+ghB->kudO)-ng3Y?*GmqZCw(sRnvtiI* z-ZQn0%>lGsfvmQ9FrQ^L(41ePiUiM36I=Ubf4;#Ny@#}uY*1@x7>O-UOcr*xA$0}O zI}F}R6+8E|4GZ-YTEg!StoMKZf>&KOTsS%II-}Qxrh}00%em;B z$kj9e<)n&K{cEX}Qb5{?P)xxK72E4}dcI0enErFMQ1_Z1NvTcs-3ZzWuvYLRl2Z;R zlruG%#h6`Lpjt>YKYBRGMW&lv&5o<5bqkc!PRhBw)#a@3c!2BpUmq)bTf)di8|Wj3 zPOdM-6#{sMUt>laD!}5TdXg^{-I%k|J156p^f0X~EpO`51e<5M{95p6lLk$=49{CA zG6sB-0vGGBEQ1_WI4>AvB22Snv`B+3&^(wKXVR3kN*ucug|^}+=^0M$vF?a*ypZ}n z&C_+7%BWP4WUAI-9^Yu?Eo2aNev$H0#t+sD{;FFDRARc3^<%g4SN{Fmk4=uf`5{yB z&3p=mLfbs%$^NfGsF)ffL~#M#RtZmZ7=?x3Fh8x>)Q?rRc(-@=WQ7}KrWIp=D1+Am z0Mjv%k%)@SKu87Ht~Ngw-8B12V?d{IOqb{VdoVvfZs?7CF%<#RmO2dz4qPFRD=!;L z1(s@FCdHp#q`By2qaIAF-(gCb&O%-ett`dK*3ttmY(A~oE!ma(yU*>K7~uimu*iyo z=73`Vv>kVIyi8jnWoxkeySHzx$j)~I7C>Ba9Hj1O^Qq_(XJ1c&Qu7J#@MxFcd_-YU z>+x>th%i1_7SAAJA`s=hnu>=8$+t_Y`NwMEFcM_QL=3@7?-JFy@ada$;meLl{re}^ zbMN38m$?r%HENC|@8|Z*(p=sJy0-NGU>S^0J9&`mC4A=I2%N*yx1e-kS^j+46=~W0 zHy=u^J2TCgmi0Wm@e{?^g1!ik6}E0K9E*Sm!beJO?X~*}Pq0VH>0;=(+g@w;MeOLA zroKFmwgDA_kSRp6by9wum%p6~S{@BE?|=PFwM=qJAD@aR2em*kp17B*)@J`Lw#4}U z569DttstlfeK$+}(cQs*p1V?U1O-E-K=}<|ywNR4)gPXpr#w=C-m~xsYE=^%1kiSF zCLP{M>ZiIKcG9vzZz`)QA_Jt@Huv*4k0F~<$%GoViYqjXuyM#&33sd-3{3q6(C5`Z zNM;N1%;<1ROJL59DX)?I-{H6jpqtdtgyovC{+d!BP2JmUWqw?c$bc0>T|2D7@F(#!J2=`Z#*||8SuKqm!xhu)gtv^B!nqmo}4>p80Z>S{?qO8hB#py^?4DHlHS{Jl=Wh z6~mYj`~w-kqinoOw-NeHbG9HL@q5IZ$k!0$l$JR4Ti_REe#{4|sy))8Gk6CA2`r(X z_`p zTnMVF@0Ca`2IjanI=@*g_cF_V^oE~#Paj|B_qD`FV;|i0ir08=Yv*$q(lvY#-;MZ2 zdK?Knu`iy1D&SBZN@X|S18)%U7mX(tgR}&CJ@mR4Z|&L@h6sdG$G$)A<~2s>eA7F; z&cwCrTi2f7_4+U2Ia~;cBKDpoTO2B(?JXwZ&NJSNY!v%KoU8Nz3Vh((Z%W&m!nt2~ z0N-5#SBFgBAQ#F-_VDd*sf@47V|S~goxNgtT(O?VzQXvg-dMnj>WzVp7S{~Bb=T>G z{dg!4BJ3oSKZ=i(r089$CNI-whUkVan%4g;@zRIRml4EJmp0G zJK8msD4NP8KYh!HhO{STDE^j3j`8s0ODRXS*R9CYH&*cY*V8!nH$_5_Q1^h8bG%Zs zw5YU5a$Ciy`+C&$x91Egz#AjVqX1j7ssGb$HbZn{IEG) z>AsLmWC}3Vx@$S!SC{}zks9xrnPX**-o1a5PdE;bo^|}E_bmYd`nFd8)@8LL$JTxq zaqhrk*#t!KU{RZG%1toe;d_< z?yxYlohDV;4o;5EXAVK@U|G~RO}C+YWm>8=o9bW1sr_K9$`Wx3Yr^zyhY{3~;Qv}? z{A3vt<&R~kvAa%b3ZCqSxD=R;3{bxGv&$y!J z_;D4gh%JJNHfpL=GJ*1$K>LZ0Xpju=0 zquNV6+c^W41I6dVFthTW>T-s&@T$+CZ&{zxu&JX^d5znSAwyfv{abo6y%L{&?G<%; zJ_^uh(7M~a5AlQuPFtmInnG8-5`U0?!R6wwjPx^tflvEjco4amq`lyZ7$6^12}|XB zH^@CKsbmMuLUUTvULwFhzc(ivg|4B|I$68HGZMc>rXI1CNEqCr1uQ zbH4VD;X5KS7N95CTG5~X+AtmR^lt1-InpUZE@QTa_XEk!RLsp0#CH`3SE_+4t!wix z^vJ*VU+=soHiJg2Xryuqnksf&KJN8Xt5*1e$fc_-^u=DMs|BgqVE`%cRGE=yv>fY( ztEH%pe*&Syd2dVvX4=Z!|Q`vz3*$nKe~W@^sdxIe=#goBtI(z5Db|}&l?^upU5th?K(Ho zM4E2TnTT_$tjyj$Q#-q(G?E`_R$G$07G<*p~3|9Z#UO{b6O0pj$JXxRxABBnqZ#|)%PGx8@zTp4DE^5Y&6Gmq#L?PrD<_lkBNh42 z2yzg{F)(dzN`Z5qn^187&Dr@8Rd4>Mb)49`0-CgwE(ukiD1M}ebAWu!Y|jMMntr?Z z=kdpi+==6rA_A(0tUyByxLDI;wz3CJovURT7-J!HkQ07Y(HOu@+2xO88iB$*emi_vDI)}5JXe6FED8O*zGMJT~#K->`9`_p{Y-?mZY9ueSr@= zVL_YwJKx3C<>Mnd?Vf|*Jwu*kvQ`U2jV@Rc**q9MAAe%0`D6U!5-eG3{`TtNY7eY- zywmYRt&z0L(c;-X)hgwX$9nx^?J{Uz9=I_L({@Ow%_Or>a8_lu%b`fHxXio>j|P79 zz*@HYu;HhRn7Wh-6neI(M)`Udx=ZZtKeBU|sj|)n4%&=ZSdyH)PJPbv?~U-Tj`d>& zuokE)$U1%=*cxZw*ANoyTld@)d^5retTi<2&#QSp`k>#c+f)ztz_%%Fi|p}DeWScS zC=7VYa{d4nu^Yr|djBOb<;-yyS(iY!2K&pi(spzOHZO$D7&z`*Y$sC=Fxy1832EG< z>9INh$t`{l279twc|X^oO!1Fa6X(YvU1^+9pA1zyOqC0wv$be2DV_(pMoomxqnGXj z1w&yOB(k7jv|@ZMi-0i{G?XefO&aUE?C{Z&zPUu_jz9Hea{fVMLx>-O$q}YxbPhG` zxhzwplVpKmN=}q_xaX2Jwc{3lEIKekhl*(crAVb|Uy?i@2M9 z3(&D8*hCU5Mf~O)Io!cQ*EJ(v30>@_1g>(!LF@8sxQ%d z!{9ZmAir?f1{n9$Yjd~d7k*?f3lqr$|{``xpuv;g~*>ESngvE=QC zsHkNyi~1jB8bq;`P3u?RjywXQs~4fCQ8nbb&zf`keaAh*CNp(9EgE3Hyj(d<59YEB z1e1ms&foR3^UaT95;mffx=P(nMDnsan|FI$#EHn?i=08@s%QA+JfJ*v$I)x;V8c|5 zM2!FkaX|=2DIU=bKe6B?%=2$%u{($&vC5N#3Uz{uV5#N}yOyBE_7Szbzgu!PdrTgHn0hp5 z7_C?+90_BtI)C+7pC*I@4Q%OO`F*2HmIptgrjx!+(Th!*D3lJ_$?!Q=^m!w)OA<9= zm0Qksh_^uGi}+Rx!Su%SVLMw!LFC z;u)l*puR87^glaTb;+hQeES4j4U0|fly_~68ngN~$wn5jg2N05Z>$_0zusThrrzvY zB0~A0{gpUfzl(7)hvB3=u8m8SeF)C4>^z0{dr@6@5L|`pxO%D>ja!*qP;#$XI}>c9 zD`zq0JfYm?aFUc)JmCU6woe@|=z{$BhD9KHzKp{Apz528Uc3Ls%z)UF$TF1b=*nWB zu|24>4)TTgzIh}sj4wI2DBo0ctp1+RPUT+Eer+~m*!>3_GiEun#%;el_N4evJTJwz zz*S=)x&CBgelk(rc2vZq^Hr2_fS;iLqNxnKX1JbBn*EmS9e&e_RjE)`d*_E<=#(P?3e>0mCN$zD z($KDu|9E3QKO$0tLDR`rY9Eogg+z0X6okxbhAlEhA_fEB=YF7=$Gg%o_WeML|A80* zG!jOC2vUhPn+vk0n$Zco(@+q=_mzq$4LUxLxso~R#m6rqA-kdWJ z%|Z<~ra0peh}wEU1D$XBRVNSAgAO|g?}rq}^5gxyKSB9S^obUvfM@$_Mgybs(72cD zizdaAOdGg28~$POdLqSn0Pe=@YEANE6nc4zA)cf#Eg^MHMcjMy0x>Rs;$o`)7Kyz?X^4n8nEP&FZGUFIgJn9j` zDU~h%_N-m)BAG%37W%lBqUZMa2QMjK$T6ZAoD(*XdFktzH?G|q)8yR14OJZYcjl)mH`8wKd%y0>L%72ZtcR zJrLa8-QC?KxD#ySAp{BT?yzxp+qed|;C2`1eE-9JNEH>RC3AL<9zD9(?2TSbcftFj zKIv6wUw+g40Yy6NUy}jrbtan+#=|>VulCwDO_`VH&b~LEHG?e>V3g%M9KjVnd^bQ@ zEJuesQ3FallTA?V6P^$qMf2z~o6jOAH4_T9O#br}FDbzhjrHC>R^s!a?xir^G*2@6 zv2yhjo=6PKcj0lsar6#jr7FutG2oE#MgsatX;!!FKJ8$cFG@H#bEs4~sp;t-sXh&e z7WC!_RMX&<&;7WYsYwxRL@0Qlr>mZki!m@}N^H7|L$Lqk4|Ff6h@pQ;>y{-88J;4n z`_mM7ZyKzqSQigI08l-;7}uYy{Ql0Mj(QM{tSxghs_p{V;61Ff*ZXKvj4nJ|0@ zsC}u+9k!_Mg!;ThPS)hcMq@QCn-T96=IIJNuiCbjjin7YnxWtHT1ViwuYngDMWgm! zemn)OW%q_9;Ohcc8@+G|Unp3_hZ2ILkc72BlRPq|KhV>-3&eSTEaKo2u2S}p9rH;% zTDeXO zcr0#Sf@xr46^1erJ<3QydZ2L8y=T9mto81~HBdL1kw79MMk)rm7UC$u^ux8qqrtTK zHG^AI`E2Xj>ztYv&W97;0VNmac~V9Ac4KB{L=4Re9j0-Z4g}vdt`1mOSRdp{VtQvw zN<;U)%L`Z8FOWuXdst<=A|)c$_6$^#3xdReFpz%zRcFstsxvmS?LvPaW&T?vZ@1xQ zsaLTpHHY3ASk0mV(-}TgR2`s)TXe&PhF@zzD&&gGc#Ebs?XoyTS&`0CJ|?`)m(sY* zAW7QZZ|q`8Bv6KprM}R_o5~Du4@RqZCqr%WV ziNHTOb$X-dglNYu1+m8n-kKF@pHm;!Fie&U_nTtQ?_Y_AO%za+;Fz0Te;CLJC+HxN{~ z@YB{Qi+(FC#bf44y&Q?>Yjb)TQq;3~uLQr_b5l>t9M#PP{>HF95Q!$RZpl}6m6M8; zahgbD*$?;R%&8qT-}4YwFy;JLmA&)cIb8)4BNUx;S-AZKzNYIe39gM-!A7&9mC^V& z`lps*#B%IPBBqZ^+=O=Fr=FRonw>`Zjqvc1b{7|Qo$?aj3bkjERXGYIoor=|%F$>P z%4m*wBbhnT&vky&w>fkF!m|sDlmo6=wK#_$1GrYH(e{{f3?d> zJ{d(k;`XYAMJw)u@~j5d%rWxkFazr0OtgT)=KF|*4cm$`k5Ebo64xoh!h%)>w9)7I5{4MI>CZI};oJNB5(03z*swh=lFT$oTtWBwfv zL5;%B5k&`Ha}8x62srQT6P~iTrnDtitvS!?Fj4%2o$~8fYkn~<@i(A2Bz@noc$(;t z?CT!T`>i{WR&h21WJUw@H>LdAn#+5?T>=K;SS2;Yy|VH(#7 z(fi$MxpyZ4E%ShaeddV!y3T-hBmjPP*%?gOrVDW~{luCxS18w#LIxKUVT&-*OV{gt zoz5MMx=lQD8Yicg-8eR^&Q?0jXUY7&KY|7x(xKQ>2Uc~6rJJ~aS#!$|trf(^g zk!e>0i{X=_<$`Dw{^bUv>aJVDOcUPkSA?Zh)Tc{b}$ zbuW1XQ^|77@5(CJ)YmX)8L3u>AHWP`^UQnd%%9AL{yG(&6Qv?fRAHzh)D?`(SsvUT z)S|#nUrRK8^AK@^al7*X`v2KzWBJv9{1wPSe-c@J3jKZ*4I=fM+{&}@;mQNNE~Smp z1ykuK-Z3W;5h^!75uogt4?v_+=NX3>qe6`HHH!hjqI`PV=6u!*E4~K~zSbAi+UokA zi`N}nkSyrq5_}X*XY|wa#qe9e>j*Ybm9yv3sl~DwROeL99RPT;FOR zAy%eu^R4UBP{85gCSnypcRVx{r8!}qG`qXTl>zWn>QkFX0eHQmzS|n?WJtRqGlApv zY#M1|0tLv=dkc)Qh1)>5@>7$e#{t;V%UNfoijDTWa=a6+Cm(~7U)W`wM7u#WP2Qzp z3Hk=;zp$U4;TZj$s>htiexWavLPtugnO@Sk<4 zcU%5ByOeGjT7pTbM+`V^qQNF)98j2OxSvX&2eF>pOyJ&cvGqNYNW>|?eN)QHinxo# zl~I+MthTBs8cCpl6DJ%F{q*QfjaP)Mf-Hkpm({K?Z)Y~^_MJW;O!gUtAqVbpXXSi% zFo9xVyH}nSr9PYVcYURanzUWx4v$<0OoV5HETO`z6=UO)G*`~KE{Mk?$?gWnS;6cG zML3#e>!c_hTRmOJUe`e@b{Znxj<<{LTin=E5_9Z45Aij_?OWXw?xUMtH0{G*t9Xoz z>VEtxM1vf9L`1)~q0Kv-;fncbK(lEyKiiOLNdcIn_%0K}T|BQB)0<$QP#>S7$LZ@B zbEU++{W+s~Fa>4HAWFUT*k25zNY!wp(EU59dz~_zn*k!=>olDE{o!vV$4sx43bY(`oxrRe_w?d5#S<#gD z+W+v^!$#a2+M7O%5?dhc(DB84G`0YXM)=T|3@oWIDh+nB>bJl_b+w9%4Hf-n>@2Ve z$V0`U$@1jvCPqI;&Y!y9wVRCrT-vg7Q+ejzgZwL2mt*`U5)C47d94zyd4jl3Ul(W^ zeYd8JU1kIU!70HejJQO-ga z34Ivf%uJ%#qOwG97|^RL=-C(wSA)k@;x$3i6^s?HGe7ykw=j0k_}G2XUQ`n-=N zY9S8Sb5~N-AoZ6CW3f+@`k%L=JuxjTRlg6KOVuqNbPAcb4D_4P`t44B`rh1>5BSw@@_9t zzh+LBW@)Q#_}oVb#B?QXmpVgYMtw#uw(I6?d0T4^~oO*M)v zmK>V-_6UUH1G0<{Qxk|l2e0%dWomTgMh{|ISCT((K{piQ)9A>Su%eVYvXV>(n|wG_gm-icz{cD=0|m{SP$P5 zvRv5&|W28?Xt4o6C0x<$D z5*?mB(LSv*8673fk!8>h(GQ->xO(h-kQ@{@oBjt`b0h3RoDUVFR5~TyW0#?XG6-LG zh0SLGA$;s>V{Ym3)z)&PqnUtFaYH$GmQH<}OS1@z>TG75w@$=6F%7L5ohhBMI%vJH z)LPc}=RAR#axKBDVc(d*Ubt+viOM!SKw1ZAAqG5J#{P_E05?~4VOU@Q+@SqHbN#&8 zyF9LH{@ly&3-;L5)b}4zdy`eG%92bAj52NmPZF>8iQ7o(1E?8*EC%@sU%^u-Y7f3fRy4(4>8hf)N<{ zHK!>>vuF-ocL=ronnOPFQ95L5@h289XI-mxBH`>;86{+#I_cSyhf<_@-~veR27G#-U>&+zxi zvdWf`&liMqip9V?(HQq#Z}uvxGm>WqvF3lGR2TaooO}mDi#8HrUoZ_KXXGu(;S~8A zs2nB=CIIUufK^?C3k+dOeZFj_CIraCu?Gq5f&CcXquqc`e^eD05eBU47`TY(#rXt| z*GvJexeK_A4hDzkGqvS3%2v~d@U^9&`|FeA;#ovM%&4;Gaj6Hdl(d^D^;7fIrE@h# zc6o}W%qud|$&##>I|ljIzvO#o$IW z3vn#2^}t8E#yIhp>H5gh4TxNiR7yRVKI@oC1<&%Sn zE!bJcvceoZHR?i?pk>MB??3AuhCkeJ$jf>C{4)!9-zL7~AY;ovpI}BS5drNE-a9?`$gE^oPtnl4i{$%fIuwufE9D^lMw1hrZ(qsA$j!t&~JT zLZ%S?Wc07iJICZ#TKq92vki$G>EE4A!u$UQ+6b`8Y>T0|a_d{{7+M)6PSR-QLx900 ze^zWEYU#)J1*ehpM{PkTd=4DVz&7@ft?Tm5n$U`UQt_~sCyw}2?&-E^@Z2UukHa)Z zU1m4S!-H|L^6j03xQMv-Uv(o{R0;l)%1Tq*dJxwv@-T;H5ut3FO&R{SPSg$OPz_ z81iZ7dSK$1TbiGQ@D+Xv#h_B!$hpDvC4XFDa#ECL&ZQ`EMgPs5D@G{Pi;?mHC_RK| z>8~=O8Fj#@!BZf8@Nv_zoci{&_d|+t)?9jqf_)|zJW;4~kkr#%tdkZ~3_1)d9hE*y z6XdG;-JoiS5hxpX8IJm}%Rxqp2Idn`-TD;9w7R9c&+MilR!6M{e9jm_u!<$wlpL4z zC&DYNXk?sINuns@uS+)WT|}O)kq{@IJFezjxKx4U)Eu5iQd?G=gi<|1>}Fdw87Bod zVj#GQGx$O+&Rt;dE9>(Hp~pb%TDsD$kD^zLwt)*j3GMroLBnKo^S+DVTOHlBI4k!b ziiM~OgB*N4%l*N{8D&*ALPFNL^RBAV8L@GWR@9qtq_&tr-!67BvKQ%R+;uvEFu*al zHHO>dtTdyyX70XRz2XjZ;C(pdY%m|-f_y?=HIu8)k9g;nBwV4Y2uO!8;&#P@8N?Fp z2MvWYhz~7)dx$ZJ+ZsV^(d97$+nq$^9-c2ZcUCRk7A-^s>z7fHnt-i znJ3VlFU=e>kH(pepX(1kSv{17P5Fa|Vu}clkboxGJIGfd27R~GuAUY%qw+q01OhoY z!1=m!zXyY%0JTNf5|0$QsU>YKU%id+bU((SZkO6^$1y(lciEDc*;zTB_~Xsm_Hre8 zT@av}$&)fNuN+0T^tR~Poxh>N1#MMh#Y?04tlmA6=J5ZhOq`;H$%i4j*V$*XDaL$0%bfy+C;ju068D2`6=GNxvPY&TU40r6AZsR{} zD8g-*=rF+r8Vf~5sN#Ics0ANjHS>ySmMZ7J(kDA5t7R)yTzIWnh=TB3IXcWcC+fVz z(mt$~Irje8P1!*vqZ2bLPA9P*g0fY7cM@UAI`tBfnsLSkV&I>>S%ph*oOaZkGP1dJq+35y&4SEiH65k}UXRD@9=e zxb-GeIxYq}_ZE*uzJ1BXuYR0e8UXY~0ThMk7NwxgSw2&t!RYRQ=mE1CSHXFebByM@ zOJn8WL?^^kz-jf9%8k`#!w}X|lYn1or1~VO{m(g~Y9zY!w6A;_6%nTM3`zt_kYV9u z;pCoNjPmEZlswsRsQPj45iBaB-f~vTXoe=C57^3uV?{dVv*kb0AI!3$4>;82K1aqW zO4CY)C%8sdn?{nfUchs)pPPNyuFO+-{;h~V0X(fos5&}=>#l^t1`EqYv+P*^sAp8!kLz{{{N~RV8Q0B+#`!X3X5DiG2 zoXL&z7~jaIht}2f=DUfL_y*R4Jq^YYH;;+fML@4fxZ?;WykKQ-i; zkbNd)VxBdTEO+!`+MF(Ph|;Uj0zTI$MH%_pxEU()9_SO%_M(DBhYJJeg+~6Co_gh% z3f*nlown^MhQ`>2hN3|e1!`ql+Kn;Sa6r=sdFP8T!qLW=pyg#=yf#AxPsfXJ!@!6OevecBgrf8Bu!7QxjM_)mSJbo@oVCPpI~jG^X1oXfJWjy9RG8ge*iiU?^8eA zJO3ls^aTA_S{b(gf|8R3d=RFDvB^8)Un%0Hj}z>Rgr?b=Rg8{SAf;OxFLAxMHt3y2 z%s(tFYS|y9bJ1aYaOB_8-*czz!jgwDQFZ;=#Sjox7gr4L*kwDEe!Fn12vlg-q#ClO4Lm>x`h1eIvx-`g z0P!y8hn2JS)NRg~e;c%$vHNr#;z4bqFMu^=z~Ot5PF_iL(f)zADOO`cK=2<)7 z1bP|z3KB#w`6-_`><>_fjw1Xw0a4+IX#xDVG}%iye(^XNU3*dhek1k z?Ly{;p3aw40x^V2C7?#dIo?SlLM~H?Q6HC4*Y452_uLeK-9x|_i4?XdbV`VcZ293} zd}L7n^dIrOg_4T)Pq(8km$n^)hr(PfEjNeS5BH<0|NJ@jz8WiQl>c#+b^F}zAloX7 zrt>t@X^_7Z8@b)BylQR4d~|wh+y&?18^pW`{NUNQ_fR+DIJFhNFs+;847Z;8l}Auh zoJbTQnh*mGB*K}2QQ_h%w%y@P^nK!Q8h(QmPyhUAA1%qKqV26Q=n!LOZpLZori2B# ziFflHa%P%Vk>RNnLP}eNnUF2!B-*%cUVRavTqsSZMtQX7@!Ideb5HS_MBwIBrImN8 zV7T32JsTM@SZs(Nto2c0vA=sdLMZpIgZu^K2L_6Ms+EjyCj4#)hL}b)7*Zc>pNlWb zJ6$XbIntCouF+VSUPw%7V2^SQSW?3)F{v<2`Vg4A>t|X6X6Q%%VXX)?$tdGAe%T5e zD8CP^@M!pK^axeg!qQ+id13qjVH8>w!b7oc;_wO9wpCd0_IWs)g|NJyZZnP4M;x&Q zJDS(o8Zj?6ZM{f${YHE6^|X!o}7 z2F9N~_|~KIvt8L5ec7^TEOMNIELvUN8hsR;9Rp4gxF4Jhr0to8*8woaMVvuHN(sa6gCh8w8h zma_b7e7a8B7N4_ri|{MtW5{O`)QZn*?K`Y&#ww$z`UW-kwOV$fD+Z`{e{6EX>7k5Z z;|>Z0 zp$=zI&a7Sceikxxy;90a5&tV&RXYc(Xfxn@Gx>W)Ce0vvuxSzvE|YyFD*JN$3qz`nQ{}*Ynu(`TKK;@5 zf(T8=h~Zj^yAcKaj_wr3lbY9iJACFzyR%EMRab-VOoD7x6!PN zc5ugXE#Ip;SD0Aaz}fTXp5>^#d$bcc(>JI_h}5Va<6?x5lc;0(@xb`-X%=%KLhMT1-A2<2wbPP_qBvye$nz=2@!Xd-wQPEp>@=l_%oXN z+c`iP1hB<4w|br@ex|=pQ#gBkItDdvuSXl-JsIUpke;5@i?NKBhmDq>el82?My-2% zUh{N;U6t_g6KH9Du30}}Z)M>ky>7m@JQJ*?k+BxM8{UV$xzT-QUb?V&bfntY9F7i% zBP@@2eXN8`JU2s)B177Ej;3!+w6oJHaVbEY!oTojU8+j$1x#$O!Dt_X{KoH+pPh(< zBgvuzUbB(eD1v`h?1q0{NfQ&@Y08q-yp@e@5!8s~#lT2NrQ-^7+B`GyzwHbfEehgY zN=mjzZ;qOBJi{<$R(6LI0E`7NrW@tsGPkySt{!dk)F2bFM+{ zUB7jg4QbP;Te`*|Z%n6w*I(>2=rK9{v8E4>zThS#U0nX_P-H{x0aDJd@kZOAH{^{V zYTUP;jqOb2I5d4_jSIC&pXEE>=ki|ho&D{C3hIc;$`6hG&W8lm`63Pr4s0~n6PLl! z7wmTXsg1`#bs8S57eLB%7SND)91GRv*1g+aO2-t362vPQ6IpkoCMrKTf^wv2E6@uK#UHg%XXA1zs?|TZLK8l=K}i0ZZZZp1n20?y_cXndKG) zjf@OB!uVp)H!|Kba6~2ujAu=UoePo@Fi~bq)7&$ZFcg%$vnDev#<{rA%h`RnB8(f{ zY;Nd2ju`XNP|-~XW(uV>;O1fvkUJ;p>uWaJ#kRnqyXtG>ii(QrODQZA`CR1)`)dnj z9}hEkaiTa^$mJ!S-vwfGB{6JL3L*%*<4A`W=e^WAmSg&3CEUg)fkIhsxPSDR$$=bL0jWHqH? z&c7M<=Lm;a(yEI&L#eU|VErorlzZhc6~~$#`q2cE5-`IIfepA8i%09YL=c%P;-dqi zFo}b`GkPDowbxoRdPp#F;L!=*$f1e7{cH9=7kj6bLTQ#X(95asVwKnyyA{31#0eb#q2O3P zELN&8XNvsezfvAYq6NfB$!1dr-SL7amwjM?lONWz+4;=U@b!q7D^hqNl}cG(f38=h z9z2Zx6kn~U%k(Fjj#tf-7nP6S{$di)BEGuPNzrjzDXA`?{;!#Dn-_c(jC4As+fl#NYdzhY*;O){pR{RjKUy#sTRQfdtYf!*Ck+yvmB>I4bqHxYx)8;h{1 z{k9zeRDCuooOCp~xS}E277XMPUCgpXd}3sJ)JJ=c4EtYCh}K72e%s8J2M9kc2?j6w z%@tu{VDQ*a%oaU<{wSDNP|3&4`3i?z*xdFjH`yrAjKEYcT7F_aX!GM3w>BHi!1nQ# zt^fCqsbp`iEZF7y*!k^mRZxqbW?`&Z8YN|~RBx$v${35BGaVrX%iX~kiD2gm9l()X z%u1{;jvVs6&ZMLKv8FD`-@B1J=hxVStldLAXaZ8oGQpe*Bcm-23q`H31657{NCL(i zmuVzqM6`WKYOMS8WiX~he3eAAFw{z$l2Q~7<>-gDo-fYlN96{o;Rg73chL$yz7IbY zYH{h`?zerI55BEz0erQ9RLnCMz<{>H4nG*?xS!fFR+>%jQDvEeswg{oC`UkGEl(_a z3c7pg44NbS7nz+`%CL6CS(u)g`a^+L9%sQV z00E5K3Db)2;y$?2&-x@nb2n3ThscrP4G+%@q%QjFiRl=8-Fi~f*o7+?Az zdu161A7*5TKc8DFN~)*XsFr6RYvd4R)}e_3G$8X%={W05i4w`%*uACa)nJU@gLt^S}1a z@L*~-ol!h{p8*7J5P9}nr#7J#wzs`FsaGpJ}WEIkn8ktlSSp&)?kwku`YIx zwt!P}d3-f*U1M8XUxRkKbC82tiUR%=iiwJvzPYmAy-18qCt5lN01$y?nN()KBa=e> zXK8r8YFgm4fUu#^Ltv-q41^CgsjVPEgF6a>2hQ96IpQ>L_uB98W66Bmgy#_^A)>8N zmfRp-3@>Rc!VrwFL3YGrqr5ZB)pBs#-;s>aaOdsjigfU2i+s-WgkoFX7#o0DrNHLv zJrT#`o>_{oLYZaJybr|ep{3;cYHX&%Lg!O;)Q90Va|E~1oKin?ktj7 zc__Z*9eoQxr2mE&XhmR36iv(C)QWyi(|J63V`4{5M!p|phmVXcyVeTY^3l+Z;tAHK znT+AZSOTppoIeD|_SD#uLU>mYl(Ks3o~Xy{i3enz%55g(7fq!YLWNImVo zA|5rq0iWoO*__6#xqd%!4WYX}BJ%^LZ5+gY4SA=%>7>{vZ-^n*g`E zFz4JrL!L-IuBac*VZ3LRclc*q!A;WkM+JT3f33ymHF2k#6x^@zzg0rUM8s5>Dl|p`8!I)>qw592FdiTq+s@1v@P3zY4q1B+lD$!lm7QD6YH@%(Y zBT(M}m?N8fIE8>$HB>wnixPSj#^NBi1 z_K%rp!FA?#bh16sa2&R&B{mCEb-ez6ao{I&q|^PaSA+PfRYt3uyH;4JO>_P+`ySLg zMX;n}S%hyzv{An+zhze9@JE0iQ2%f^zgQLMY}>AF!oU!3iNH>%ooFy@Grj!) zrG2~aWq!g>_piwHK9dflcSCQ!h4!NYIM*fT>t+zBg9Lh{YN+{r5ckKpn@;4O28vI8 zIPLlw>J#R9?=e$MfHRfS`V9|kh|*Vd2xg0H-2cy_gm}2%57%1vl!pjsTcC`Rk6rxh zBHE;X;i(fM=&g6vi$pb}?qS$6ZNhzd$c?vKj@cWK;!B5atZ&pAkb+Ud-3m0lsEiPN z-mh{;3Q}<%(F6>O-$yy`wEdOja#?n(NwZLlMX!ew?dFBoebBKu(nNyYr)r*m3-vU~ z3@_gJu8n=4_}Y&9;Vn7W>t!k9grkyCw*T5Z z$(xvM_}rC8>Zu7DbM()etqyZ@;!ab#1(?FZt8uNkUf>>fG~o9X;E*k>{<&AY?fO@& zX|QA0M=5uUbk06ie26RR-{%TqR^{UDMY0o;vc`|nH#*O!mYRh^%k`3cI05e9Z7`p} zQ8`|U!-s?`W8=XsxXOaNvBv3MaoNui@UC3KqBkF-jG9sPNR{PthRMJ!At4_T|J*?C zawV3f+0?Vhw~2i5kH?KxT{sT?MQa}N#k0q6-eO^FO3)K!f58}m8#aK3_8_AKg_Ou` zJi)*4H_cukJF(;|Jyxq6<|xZLqrCTt*stZnNwtj?~o2G}uKivGn?vf1L zH5ViP{2Ri%SaZ`jydD!r(DY?kIBb=UVE>AY5#c1XU%b<_`uZCVg#CTO8;g~s(Ca){ zGE0=*oA54IK|sy4#rIosb`LYEf0Ey06;mXa&FnSs517mLsQhna)el>N?>}DedrEl~ z%9LsoZT|@_-zD_zILGh+30gEaf32CYB7kL9wo)8)JGkCFdjF?AziONvS$8BqtNtEf z?}E=oBi<^l9Z`e#O5-R&E$jCwM7#X6yih_#1a9Ot9_1wby5ByAD0j#;Plkw`z9SsK zh&|;M`lBM7nqIEk&AStOv){?LruzVF?&`B(SS(*Kfg3M!6kvIz;E*1e{*-_ar+dL5 zSGRMEOJ#toqz_XmyGQ9y<{iu-^a#YA7#?r$Giw*u)(P}#Gn)Ia=Ff*2<0!+?hc?nE zxciMQ;t0OXZs++3l#WTV$%*c);4$j7x$yKkw53zCkOK*REHCk%sA=UE9s#ju6|xXa zig0GG@#EwPFKDFi&?cEpjasHwR)fii_|p8bcAZ!d;3o$by|)1{r8&0qZ%i}6#H8RO zUkm_lfH-xk!g07`<|LbHZ6CAw#642}zRf>AOd~IgxTpF?--R;gq6ll*bo$G3Hj#^s z;&8X-z2}1+0WAIiIXtpJCfePGO-|qScmJOt&^4~+8F0R0DJ>VPv9~uc%C_r6 z#@2~ZKdj^BIc4l|bUHU9z7UlHGdq?8grIrud%&|%YQ1^>7^UcrZj~&psVvZibopXK z*B>s+$?Nx^^b7cnslf-sV$iuK@fM|d(_+5vpYQI$BK0fx5`1< zvhfv84*o#}8AHm%OmRYVKCaK~$j}gW{NW`)2ADALUo&YY`9SK+#)grgQt+L_%cOtv zc74J`>tfIR!&{tu#2o-0ZEpdVOZ{_9x#B2^zk0Trc78T;s4D&dZ%YQvgg%Nbb{AV0 z9nn$}kh2YvP$Cc+FVON3Z1-99N6(mM5w-iLYI|fSXOkW6-L8b$z$IT2Q;RQXma2Ky zZ7Gg-KUNaUk)j|^H%N`w-x;_U#u3#YMJtn*b-0vTQ z9!IL0jL=2NLT$sVtz&JM1zjQdG$WGsSdhA?`S5SBG+3^j-Pg}N1{D-PZ?@wR_$Of}Cla|syQxa3ZD*&wPRGV14`GqbI!5UV%5 z$VQ^8h#}oZ#UUsEtVL%5$boFStVJHsPt0WV%L{B&P~yWlubY)bn|01!yQ6X3qTx;W z_yd#In>YMr9AqcnqcP>mqc>IMe-!=LP?EBAer6DZ`M0!Zv@IwAE4yUGEWlsG3ka{& zaG3sMKRK+9KOS$A?jwjXlK-Hap}Bc+$b2b>;9PxeTBYcyUa4wL=Ac` z==;U(JPdfrVs80AHzwBE|9p;7%R5Cr3}*pTWxnn--uTzb>_8GJb0LCvsJHt6$PhXP zAn^M4&Pry=%tSZId#34Q^s!WIC%PbE&4{G+PvKCjYXUkV@LVFzN=#}7zmqE_{~ozM z5_=1U`<68Ab0Sl8S5q*_F>c?gFqFldciRO|eK6IPB>nZ`SWYddlLOcU8m;;!80HSqeB0Pnl|^rQU%^M`(V4uW!qa{CPR)~*2qH1j#V zO#Zv$7RqmuGqQj;V9i*hWYrZAtmxs5mVx2Z@pa7HZBG=1^b#pOzYT!v`S`V4MQU|u z&Ok+`r4lVz>PFdb7Rz~A{BAw_2}-WBTiYTsqfy*BFgi}QWMbzW_KRR&xqikyN2c)n z$L*fuK^9kCz^rz#L6nm>8>d?v^zambDW?;y{Y*Y0DzOthuAb5|YHts+O7j%*sFfX_ zn*Z_NlGdOMIO8+QDi5KW5WuefjK#e{ot<|q79+|mn~Sz(mtN6xWk#VjBg=t0`lF!$ zJXJ)S^A^n0KO_}h*#lL|@k16qx|v!(wXKb{;MU8@<&Bs1llmU`{r|A&G>@v@e<&D5 z@5cytRY-_uCGw9{TqNiLZ=c z$S&{GU+U3HAtuP~)+~9_97+w{>vub!Ss%i(`c$dJkVfi`iOlUYA}>Wa#3SJCMcq?8 zqLKIehj^Qz7bvPvBBXQ2bW%xJ)%AXF53KwMHdy->ic&X|3tWDX?C(=|MPzBTucjhz15 zG5-Em85RHyxN^eNh*5N%)30@vPV>8WRd64uxIkNSE6~ z3|i&Y+I4HPv!3kL2BGvnoJ`tuGiNj=QJ6`QqI3M8M96G3T6AoS;@X zFptN?6~EIg3kI@h7L}G>N&ld!CzzxHEnPl0%9AtE{}w+SZ`QP!II;g5%T2Ad4iiLANU z-i)!95-JG2?Zq&4A-wc_={ZE4tD6ZR=4%j}D0^^Pu_Ugb@^Dx6lXcgOnzN@N?)1u8z5$Ik*_u047m zbv!7%nN4u0=aAWG!%&A=|C^1#i}0^J^1LJ7iAKV7?E) z4KDX~B~&lRP|7rg@V!VwJ@G=+Q)as7( z`B-^Ky_wrL*4Z2$!n=ry@nyYWX{<(@{=(2HwHU8kC3)qGn_Xh}YNCS<*BI9z{Bh;vi>6TZ!f8REf*ij-ezJer!RQrA+= zP|bA`)VsCn4%Jh%iVpi;Raro4Bg0XIldByljB|P5(4lLAzVT*gS?>`x;3h9%4a!N_ zTM>>JHEp`QF}&Fy}`xX^rt&7E&PKaOblVAid&x1)qRnsGf8MZ^Mob5M7 zgLwb9Kp$aYK~@*5L$}N*Nwjfy-q+MpA24aAa+f({Lwx%w1nYZe2VqAD%}6R$wbqI@ zuJuSNA^h6))eK3Sfd;51V#53rEeksT=fBg!y!zR0yf3t*eIBU+Ym2V_0q8 z&MG;`%Q+z()D5Hw5k$&(;S7y9cbRa4=CpjGvN5E@FY;%YPR8hB)L+)k?CXbnhM7z2 zSx@ZU(R=J*oeH5q+23Udu~-!c1I?)Ni5=~#*mnUXM^s*th~{rzw>XB8>ErkcI%nEMM6IFzn(@lo~72#|YsT8eaeCf<2%J>lcdasf*t+zV7wx(JXMo zM91f6I#FxzUr<4%Bg-YdDZZVrmdng&>~}t5L8)|iok;kamFC$Iz2Nc3n@+o(J=Xn) zIgLzhQo+m^x;ST{k)cj-EG=>uduO`d+mbnd_z~hbl@uV3E{@)h5J#Li=85H^7j@6H zs~hgyAi;E!;QpAu`p-;Mylu+YtX_Z&YFaT>O>5^boG}CZ@Ydqx%Ub8T4q$b%VE$hg z5W!G4;Hu;jv}Cy_TuXl{s+P5WZQ4zuJyKFq60IuF(sKTR$!^> z-u59^4t<&OD^_N2wNw^u`CI0bWJG{a2(%1Ty=%bUaqSSPrL+I|q{xW^<_P$jr7Lg(wa79jXD5fn;G|&Ai0-LNe#-<9`rI6#Vf}<#s;Z zf&b^4&hPC;t9ulp5kP9S9qHx{`{b--iekQYPv_ks5+sDa9Fkj5_3yUDSWz#khnR%o z!RBWL0{2e3tRB5v)c6NRDJO2da9+=(!=1Epox;o_jaKgJDD{;|Lt-sItC+-r=0@^A zSY!d{fc~OUg@4WjuHa5ET!0u4U$3ZBM()#KH4c+}2lnVeh{BtNEW3DRCA8I{RDm*) z_$$5`0t4i@Bk2l8+Bh!!{I3oL4a>M=O_~RsDXu@(5x$jr23c5;gX@44VaHJ}RlieC z@UN8*%J9i2lRmnrbk?|C60nOP;Y4dG1N8Pw*=7QvVYBp{)^FL ztPINT8Oq)fw5Y{y^GkoqE|(WQnwVW@NK_NWV6J=v$f)QJLu67KEvk_VY_{2Q6G*0iTnR|!X-B_1 z)r2@nezs&wCjK(-Od0;)=Il^ONk(j^H9ve2$bRJ!3^+H@_D`aXtU}I=N!qNxXe{27 za6Fln^3Zs@@~n*HpK!m7b`)f=VLr1Ei<)^EbDyMrkZ}o2SatI#2iZ53`W-jE-(2Ev zY>>wS8JS_mxQ$uv)cME4!1~_i196A;ebD^_G@##Yn>!unRT};4J^H|dHLm}fvza`e zvwV%2<$C-)Ia76LzrTc9Gqg_(JQ|f!7d?6BZUmzd3kM z?ETE-|7R19M4y{m$T5jxGqcqKOUOkEYD1;l50$Q%z}9IAEBvKkq4 zq|QLohFYGyhnq4gj~K#&hxMa@0nR0p;9IcSh=Tt5csnZ!<_KO}$H`+|<82m=AFCb1 z)%!EA4;W%(N;Qxp!qJ^NISa^FE~&)dYgb>|1ZMjEH+?$RS?ZsCP!W`fa_(8oI)9$- zNX5}X>R1$((rP?>C*qxD#dNs7xF2I?4k(P6w|=QpFqg&GENs62kyFotjec_Z!jU<;N7Nn&5123PFlxMzwsDlDYQ0Xi1d;bKmbx!1{>nx zlyX2Czvs0e)x^kyJnsRv=3Ly<5YUIz!rnsY`ZOOWw*(SdV^kc9we=yxrXnNm>=U<0 zVMcVfr9UyHD;!C#5rGK!yPAB;#jPuLText?SEOCOY#K z%64?eC)*Vez*pRDsUdC6aviV~pQ z-X7T%>uc4;tNp5Y;jRzCxBM~g9o!S#T8OQ2#RReTc94M_W)zpJ+a~GdA%WXFLs_>Q zfkI`mPt6#kc0qaFY1UO!`V>o4`9m!{(&=QE2h$DlK0@*0mEsVHo$L~jL8q+T)XQ#j z|C(`7Ix0JHl1(Yz32XH8Ju?}`dm^zYF;%(WCs~_Fb7lx%**mMYv*jv$$NSvv^l2HK z_R0Jc7F&uP=`GiQs!Jn_S;kHlZhW!nRc+ zupf`MfR@I_axRh+J5F$Pg8h%}h#5Cme`s#0E}>VnO`e02WU8a=e({5=MQRVw0RqTU zV9&#KWY||ONeF&Pvl0OKVtDTvI7n5F%2m$F(Llj4UxFroL%3K_FrkwYo?yVu)pj{% z?X|e|tD?Kc&bpUr;beU@X>YZj%u@-^HDC()lYDX(v{F;pB! zO>nwGwvaH&jDGTV2)@1zV6zX4Ocomy}xN=@g#%jzaQ z{WHoFdu5e$n-7d*>MJhFGO!`*^^;FKG|w{e*?6y+$JA~7mN`6d1fA;CLmM551#~}J zF$&%F&9{)tfH&22baywV*>rM`DQ~0waCTAmlSnr08F6ro+hK!vywZEs(*L2td)~nD zDC8q2X2j_fcTv<3(Yx{wKkC$qj4aDf{RW#o%7xRcV?e?lnzsnO_qi zCL2%I9K|@|D9JQ9+w~0&PTJ-$&9V6^JfePln`4lFE5>b4NOoUr$?od z|3c{9)?nGrz|S^+P8XMdY79LBM*o~*ywv{J^b*)s?g#eyz+^(TX=U&4y%dtxyjHmE zx*04Cmz$LH|OIPX{ODzZQ1ZY5Yop;wJmHJd0%+-d4YhpgjA3n6pA78G0(9VPT@o6-_jp_$z~kmFzS8?hiPpQC?VbYoo7p}`&Fl8gve~6G~9?L-ZG~nG9((} z)V-Sp8o#2~eyQIVq9b;F-z>V2?DGNF0aA~qv%8YP_};N9vlw>W)xiFf4{;j27Pj0) z=@(sPgH5y)$Q>*n!Pp%v2o#RJTPTqlbbP<$tZY53?0RK?UsuCRtObTOXbdK333ZFk zD+BhT(da1-{KN>HUV$@b--G00jhRMtcATq3eg6G#?qFGUl@(8JvW81urT@2}k7~M2 zexUFwxt2&z#L$%zX-hsA&y7KFeQxd1EGz@)Ze*%L?KEuS+vd3L${B^3j*PF z$!lx=C5@JCMklvEux>b)^_aOy^EA$&&smvX&H?%v%D2PK)(E6sqzc0Psxloc1bK`q zCw}BUqo6oh_NI3q7;~9n*|%XPOkrS1Ls2jGPpn0|GPO?M*F!F`h+~=mL(5>B?ZJ(z9&d|0 zP#oPru-OltofS9qUn^##%{tp4z-+C+2{1hOK{H%}hfge+{l~>QQJg#8`}ziOhk(^} z7yqs?tvKf;kr-DxdAjc{B{>%#a!V~j=#_}vt9M3_sN?iy5(* z-l8O)qeMVF)OFK9eL@H{&9wY3t&}8M;f;w+A%DY>I~r91Q>q$)uhXvZo75FTS#ak* z_9w}y%I(q0F*gIHn*){u|<<`2REA|Dx@`F(2{%*EjiZj4JB?8WsPI zDUJMp)iU3?z-!q}OJ^POw9TOEAxG^`X=>sTo)Frv`6zVclN->bno7s8LICtlpQjpQ zSnV^l!e6IbSv*mpr=3dY;W#vND#-*XDad;Z9zl9s1vF@~M$7Eh7`5 zPoBDZdTQ-LyW?Gf*EfF>*u7xsKoRuAeY8j}xz&AhAiGPn2aM!%U7fL=Wdu1-oPKz6mcbE>1fh z&%RXFCqjVD-<>?TRILyL3wU55NQif=l&qZ`AUqOFYeb&hdkMbm>H(cr^_J!0`2*bO z4F%urZ+#r!&xNDBYd)&r&S~O&6DzK%416sJHq{;QTtc{JHG` zzWzP6BH%l<&z-dS%c04Oh6OBj86c8MN1Z!nK|G+~yDP3){sMfs?{#5tiROCL%4> zDb}RU8gYMq3ch=MQt*`n{e*=-e`p}YoV>g~Qzim+n&r~;X%H6|)dHQ`6w}(l-@nO= zHRFOrL&4q}@=*^41_tU_5^it(U#&rPNu`jgUOR7~uW#<-lPUSxZOTN@S5{V*kuc$R zSD>$Bc{*qqW4n`|-<(>e(m`x&c1GDEs&y;3&iyHb{XW$l6HFcX`2Smnops3YXbbv3 zYskvUxs%6aWDo?c>6t@&EcbHTFr%vku&(m?j8u zu^;gv#8XbZAUv_T+7-Ca ztWvBgFwNJA76tdi0(b3tabe-xd1u;jDtUuTi&B><0}31vpb_H=2P^}6PF`N=<>jwK zP7}=3D`!pr9zvWxV!i6OiRrS|J=WLPr{JrQv>1Vm{h^>>_o!t}ckGW8XU`)t zK0dy2n@1D~4_DS05bC&#DmVxr*a~K6b-pgL)b>2J_qAJUGrsUU9|KL6$fzhp;4A;z zCJG2b;h>uopN_H7QH;&aO`vqCN^Pc~lfX4gclY<;+JLTA=)wx6)4*YpfRxxF|KP_e zgud+dwJR3)9Uiij*mH`BO$-kYD_kf6?T)7~#@q4c23<_beJB|MpVQLPe%SFWCH0U7 zVFQ)6)9-yi*Lby2%ii^U4(P9WAR$V813qXL`pDDJ(4efMl9zy)b9O7NS7S__p;+J9 zIl4KR3?%lz_`|=m(H{eVD`m{4E#Q)A-|Oy6hj)pHFOstX;kHLM<$!+J!5-&W#eR~aNwAX3DGpSeUstCSyqpggG0PATa2{ z5F}4yJ-2+^xhkvvyD!1$keWWMY3|oqBUQDv4Xv&69V<3gR*~9F-cu%Nv~}Ix<7%OI zTEOmYK1ZakJ&(LlBxxD%bnX|N=2j@y)X$Wu)&*Y;NV2Gx=grkQA_0B`i{z_$|0r4z za`)Qz_;;^6`D*BMFg-oJ7u*w`L8gpg-T=2sW@e@xH)(5)Qn|J!h+>cUC`c=T7qAb9 zkkAO&kV?J2upVD${%`%S&ba_-fYE>Mo^N>wux1Ev&dtxa1>f^h24CR1cAg^5*P7Y- z`Q7|R7-O(suERp=QO{6BT)Q?fHns{5emUqaUpjpkg$Is6{6LftFPdB*&l3td{Xs;h z&?j`#+27uih8Yj#jJogH#gVneL#49ywcCD z60*q`5)l`Ny22<-!;W!a?pE5YSq@zBc1SHmvS^YjL-7qKXE=m-_v}Az!=6cp>Q1gz zIok@hDRwuR4EEl6y&Bkgf7~G`AtaHl91SK5TH76A@7a2KyvM-Ad_S`NVgUdhoIcZoHEF zzh?nzB60O-G?+D7nCtEJ9HjId(yD9jKGe)$nw0a|UpAsLduLS5Fi9}Q-#Xyd98Wd1 zWx0$x+mWLs!bdJez38A@599xMLixnw^D!kd62Y&Bj@d)lc4yR2*mK-=9GiwFK%5Q< z=lJj6zH+Tnx6?l|4-dOygPX}>STY)Q=Dp$jEi28PSUAa8W84;=l!WzB&>O9-t&NeD^@nA$NOWgsCo;`1_k%fB3@j`f zVvIip&kqc2rk$}r5{QbbsxSh&@~h8nTMy=IH-EQz@`pU|*O+$Ykb?t!Q^i3X{6j8e zRq&dV&mf`*LwYE)*i7JHSd5x6e*F01xAk65UmPU1b~Q{atUtpU%>1EG!T|vRpDMuX zadKn&`Xq@HWrZ((NrBm-p<&s5e=PJ-G?-NE>4r8EC&iis%%a~NqabWv<-Y zjzEnN-bB6BV zS!!}@1xbL-q&<={f4=-KYzi@l}ex-=xMR>>VyP zq}A@X*3l0LKtUuDr{7eXTLcLi8SJ>pY8&bmt$wF8;{{^L&<*(e`$MG%0sR1nE%v~1 za&-N{alYnrX({sax!5y&!9)zuyE)Vt^%jT}{#&?P+uM-da76t&b67$)ukETO;BTSFcLmY8u+KDD$E5wV*}^OFxiogYAEVPoTq1VUDew^Fok zMhE82LKCm9e4tJUb|>9j<7t!||HpUibjMm+)b3r1 zGr5^;COK7nDuWSKPfsC1*DK(KyidZwOVg{kUj0=|3tkxpxSijBzB?f&Apwhqj(!TL zNfFor@SvgmbQWaQE>z@$M!+mNI5^(A@xKGp0O?*vL`1~O7D1C&;pCpxRz@s6)u}Z7 z=`~R25KMptZ%JVx4U0*;*DzZ<@M0M%oNoXr+(3*=7io;<2@!z>3$7S+rC)H_j+2k( z@?i_+v5~ZnjEqQygovJ z4uNdERC!roH$Z`c@b3qa88pnHP_mP4`!B8F0Rj9S)GjYAeE|~>ya(XT((>}@7~k4D zAhAPdgQ>5Sj$0o<9gm3s2iv;h0?ni7ge>2_)Jbu!zT|$ zKw%OQ5%Ff5Jv5GhS@*Yl7d>fWVNVaGiHXV0$@7yd1_p-t?b-H19Rxl;K7OI_2zCIv z1x1vw-d+i)Cj;^oy>=oeCx=YPY8VO3x>;VDpW_yGdw zoSlurXHZ?ADpq2A{kr#mi4nRAz@1are)xC5(V!U)8d*?fLDLq9$xK$GXkcwxc6L|e zsAb^!pBozRLBv57{BOZB8M64*I>kEML>Y9c825qhQDXY}eUHhzAFqr+Cj>sAivpl4 zG!(rAS3b;pKRx*01c}}}hOiiXMX1!RmITfV+(Q3}s!)MC@*6;J;FJG=9jS;hBr~n6 zOuIeK9ApQN!{dJsd<1VbTHJ*%WEBk1pteF_Rw1;7PD@0K)hH&aVEcH z^Em2yl$gnA;9$k1HB>MJ;H=cxYFY%fgJP8wP&gQMP0etiO3TwfKhijT7*;5Wt7r?q zfV2hy5d)YJh?bk$=R0;5!$uTrkqaC;1=2TpIB*+o#LqW_V*fyOZ1E$W;3g~dmS}5t zw_yZ&_%+dCuF3$4;n0);fM;W&-V&NY0Q|mBOXIQ-q(%ThmbRTNN8yj~d{_G%A08g= z-ZV^0Lz=wUU@MhM_kw@`0)p`6%a=E&gOsPgq_82Ag)&8=bRfS9Laj(H?1uxa*mLuP z{oi@PGr8x^=te)HV!!!ITueaQ(>|tlf>d5n0TqrGX*J~GRPLjICsOG1{n~#yUrprl z1&D|5ZwP^g?M6XmBZV!3Ks8r=378Hv8f+%=^P2FMK^6rF@aH7-SxA{4f3DVSLV+%J zuSQ`MC%2#g8w4mKIw6&qSm<{JYQTI21qIiAIv^ax|1PH0EWy%2t>Ol6^yQ@cN3*$* zLB|GcV(;KU3b+7(?0T>LDF|#wE}!dv@d4Z@lie(-;PKGbAPagjpH!vr;eA#Xc!bgo zEU3?WK^)Wpwgt&pgbo2S^1J^*fQAAHYAKBunPO1Eyb?0#Pyx)}Jl&rFmqh6@EodtK z4G2BdeZ^72zJBEdc<(eb*MDKYX~{q{>~Q?M4RBw9ce1pgZ`XbIm-*uI5=wvVM-U?< zp!Dm#JOZGAwx5!Ks_2zVr$3jKp~x2DrqXR}4W+dP-JaE*4Hhe9xq&DGXtWOCCET>u z_TU9g`vK5mBEVqb$BXF?T((#Nfq|3D%L#yAzse#ZC6(kSkDl1=2kv*em8RBb&f!Lb zN=gvnEzMmqHKi$hw^sz^obB!HZkK;&w`?U*K#)RnC1^zgiEqm+mw~h4CjP)z6Al}s z3?&D?3PqW|4YxBcY6R#w02|O?1$hH1PY$nO9v1K#AUrUtY+#@el&Vl8z=-SUczJto z08PIwWc0h+FTHgOb4dW$Lplq1WxdC?;=`e>Sj6CF@AXrjBDMuNlTH5cs793;F5eub&Gv!(zL;`t%ryv<*8URcqf#&G= z!cU(*!IY}z0V~LYY^tV)4YpP1w8+UA-)CS3R<@-r1Yg`hh0j$+xV7)%zG5;4m)L%qx&P;xQ28v~BT3Uqn!5qZg90m$?-@d^-gosgu+(4jS+ui^= ztkTz?fhzm0I3;v+pzb+eVW~%4+Q}ZDyWMK8+mlas+;zi>d;i=pqcJ7ffSOd7z>R~<#r)N2-eNdP_qrJTSq9GFri z4vMRQPW#<##6VdhE9RpENvjl79BTH z>CgUE6gm;qK1m(oKnq&X!uSK9D+OrIAcF$kZKH3091mAR7t|B|pj7&gjMCs%>DR*n zaLhh-niPqKdUO_)jRPKe11y0nvpJvJ_t+Tez)teN%ZG-Bdi$c# z(?SjO>1k*R%fU?!;6Pq#0EECQpbr5D3c$C$f~0(_+p%`WI}-)Tz_^?5wFDgC=h&EaT~e$~)pvbwr7SiXQ&S2s2XR%Ad+w`9oasDNn)VjJzrpX%ucN)1 zAd2^DVx?)IN#_5gf07dbEZ;$310d#GG58#a1K{cZ{%|2sDuC7g_cMlrG53G}#{K{P z!TtYV2kragFi_2b3IOAzm$31A#paC(7TOE=cl7x0;=S}deB=U*?S#QYCbHR(1-`=d zTdIB2E)=&I3kRuNa}-Z9c7GhhP0U#TIiS>iY98FS;7+R}WBG5w6+)d2 zG0kKmUW0#A8_Q9BHlj@v$n(g7^|OtH1eAVn22!?(Z-3qFn9g(>zz>$Db-4hS&5lW& zg(iepdZT)R*;>QT6O7K4u#~@JjDFo&b6h3Evi}V|cw1dWm`Sq6tQ&5x#>%8p|E>aF z0^uR=AGv$qECDt@*3GX6Bj!H0&(0HcO>c!3BQw*c$qR+Lv3r)L!*O8x&sFUFv#al8 z?Y<&xh-^iRkp?o5RoA7D-jKnmyg@pEt{zLDYaIPDkO?xZxl|L-fR^}d8cl>X0PiDG zZFf1-_w}`<1P{u|d)`*nJKR?C=!&$@p-!wUMpYa{rFvTr1$_PZW-sYztCm%WSKD-> zp3moIJlbUSTda*-tOitk7JO~AX^J3zqIE@oe5?Oe!W2sIhhF3T<)f6@uGlz0De&&~ z{vP2IiUJ`>p_uD$9glyRo0!5gs3dYAc@Reh7h|-MsAu8cn^@8VCy)a9?G#n+W-;ja z`wo6jcw2@8tqivLs0jI+-KGxx-rJAHA}=Cv9p{OsLVCDV3y{ZbXwmn_f^b1~EM ziqgx-nkJ6KJ&9rHKCHRK+-S! zmzJmQIY8{Ahyk2bD%^;pie116+^f@`H1E2ezj5BU8KZ7~D`jj$@_g%k-Dhar0};j|oH@b~}n!7OwUJ z4$n&{f%Ysi>;FU-LJt%Vt|};M2hj%)VDoTw1~q%z-6`2=NKNEuub%F%z*gnsQJdHJ z*Zs!O`jwqt_%tIXtsTa=E;Q|4eaTZPTX6cO{F}8s)O}PCWP6&z4iz092}V@Sni5h! z4@#%z;Gu76+D_5VdID8s>H5|hn0=R_zMuX?Xjc;Tvk9@p=@J3+wDbh)+Hy zaWa6WZD}vQa?zP*gJ}yEU)EvexVr54IbX+(IOM8N3%~a4CEDO%B)Fe)aoNey;8FT7 zLufnM1uaaRGhWuYR)(FkOTaL7R#*S)GELN$V7tbRo9ux1S^r|LgH+(~|UjxK%lj@O%O4#ZDbM22GCy_C=VYcepjeQL-J6$4r3>|H>=~1 z;kEdwYzv6SEuZ`#hSBa?fA`rZ{cd~gqOLt2#`@_Oc+WDGrM=evdKT@A9c-vFJ6EEZ zT@^i=;~T0K3uck{=zK2ZZ+Zs_WF;MiMD{aj|9~_4dQuWzy*y0y(qmwl-nPpQn|75z zJKFmii}=Y9JM@LRTGOXXZJr9HO@$&;er$$tYrN5I0xg`zRCA6!*zXbElQ`iLFlcX$ zxvskPVt&1i{|95fWUZ(hQtYj(AAi+94-W(J>k?fzI45oqnB3%vuL;oZTu5gzf`dVN z7^>-b>e2pmczyx*dE=IzyX)t@;7ZTC+@rEl@>)C?b5d> zM_h!4W1N+uPTaWg3{v%sAgmedVpFjLQlgR8=oPD9ixHOx+H2oP_=$QK|81yruFEW; zSiiw)v2ij&(iSrgb&eh(6W3?xuUmp!Rz=64JrY~-B(BPpb*Lq(nOOPfbAz*|g$t*i z!YuXt0hj(pPLCxoq{|!k0-rX$=kmzalpRI^fvJlUgtq zrry4t7Oj!!ii_Q&m^%3Wq?gxXqP(b?km*~Ub03B=?IScrS|_bjR)MC2NMpY;qFn|L z^QV_#pG0U_>7f3E4rVqXGeT55YhXm$tohp_zFMYHbi9ccw_lrTd{bsZk4m$}>*h}n zGD=H@f8V0hlU4c%{{pU-F0BFOVlrb>&bTe3;xA=D(_N# zk(XGf*}!TgtQeezxPmmeZTkz>|BVHBqgWa+S%SB8kpU--f)ajb3J+z52PT04X-lXW^0WqFyJEIv3J1lWMn4jfgv`ZWgii z7i}}qU9R(m4rIs;ozIaQw`CMHRG*y6BZFT`@dA**#w-6Gozvo=-o|xbRoxwNU;kW0 z*s6N7enMtRC2t%4D8O}9@J@w$jygeLr{e9)?!(@KlLUAzXQ5ENAlczE%Gfm1C0hPTb_(((Jq8 zwe;SH7Db=(E^N2(xDFOSKlb5Kp}Ldxy)Z!-lmZ8t#A*zq=XJz_*%jM>+;zVuBpz46 zUx%nFzAk$;DfOEsz8>rP*BwF&6-lvsA-YZxoBc@Kq?YwH!{IooRs% zy%8#jnP+JzVVtsQEmmQm`i(p-z^v7S`CP5uLY7UP=I3fel>%~^`wb>yeAwn1`WZ@P zFMFQCKesao{D13p8xgshZ~e)P<9-ysc48%87B>_Z4JyM`36@=0br2u1b++@b6>g@) zUz%TbHDdPr6?O;9>|g>bjYNx*SEl5sKqNXBIoE){q>ux-!Ps*VY@#ICzE67`gB3T` zOB&v7F07h)8~zbRGKA6?`C#$`%8pEgTUQ%=;Cy3|3wq_%HTrY!+?KNK#d?a8GVuY5 zFi2F%Y4d&U%lUItMSaBL_a(vg?s<0~%lw;gjf}Vq6M_s9vYxmuKh3pvpgNyq_2bmm zmWM8eh3^{5g``yG+0p!!8lk4F@HBDD)8r{o)5t#4=E@N+|2Q%0KxV+5B77oRq|ZnX)mfPAitc`**V{5XTLtnL4zC7`8?$pRv5J z^#V;~^ZidD_!YcAIxuQ0r60*u`3sUMv1enhun6>jbo*ZmA{`v(P?3i2%F8@c_g*NX ze>~CG3ciIkk3gI=$tZg`Kwr2Eq77ntTSl1}>LfjzSy44+&|hloMsfP-ToPuZ4516c zI=bvsXLTQT74R$P1BZm|TkBF1w736;bh(?_Hx9I@_Y)bHHDQ;_Ot||0g&s$q`!PI( zacJjZ^#A45Z!v(7^J2+2OT1z56dxBVy=wgx@GHtEl=vslx7MNf__2XEK4QAK!pG9f z^*%Rnkg^o2gGBufduFGDDfaaz_MDA2oUnUK-_BoUH0$1lx3!4&YjTvgZAu9TA^17l-?|!KZKSL&yZB}4And%3?8x>31sf*CYLC(geT0yC^am{ixtSor z(}dB6f-r&hXq~g7mwUmNFw0pg;TQ{(w{Y-yTZ3~J2dZSFGI)Iqd2V;F&YJK3?^%F8 zgJ#fp@l9y_jMUVTds>NY!O9*Uv|FcrxQD+<$R?(=|77#S4ZGD`!5a`UQ#Bo|BXJ-|7+jW*@maHmUKgs{}fN%e@=sca$LG(Kz0nnQ8?=2B0Fzlx?o zhGHcGVddpZEJ!NV+Xba1eP%&qIVBw2;})mBD~qNwNAPyu&`cS&4w)mLqLdUv*Rx`+ z$lDCU$-FkqQWM7Z8_rZ6Zey&tqT1Z3su5*&^z)Mynsip%RV0WRE%q8c6;*EJqj+iT zm*PFbCbHr!&Vl*gh4+?5Oq5YRlbJtlLrd)zoJsQwZj{(qi7;R8I}$~(78NanOY){r zMr_EDaq*6Qo%=orO?|b|SX>fGdORd-%GF%X^lgbdo>PRR#?%@yy(Utz?67tiiI()PxGuSZTrN%v=JKYd}v%vsaByS5A(W2Nav z(&%=$d9r#8iNxt!OMmpuc6@QA)-E%0JYzf699`Ap3=UG@=k?V~c&?ak;v6jw2V@yZ z``7OXnrO2b!sRq|IxA)zdC_nD$`oO1LG9qJwjylmW?3e7BARu?yM}G3b4gl*_mb7v zCfRif)@8F&SuA--8_l(lMo8j`9md?hrtau zjv9O+#PjZv>ywHQ$)F?4lWunB37MH|!MbAXF||E7BG%~@U)O2OkaQQ5H5G*;iSWB{ zgCLh9i&2ua`~d^SdSddzi=iUwO?^;}%#2CwqflXWE~C#RuVj}6OHy}L`G|tQ`1K;VOS^Uh%Q1KcJi!Bq9ND-)LPQFsz;&{ zpT204&zh#3Q0WuhjphqdoH~=vWkk~wc{?0T*A4Ls24c$7H8TQc@r*u_~E#MkI7AIG7&SSJN@*-<99)TGtOF)BEZ zg^F2H0jb*xBE(U(L?4M$cyDt454qj3LT+?AmXqo5j46$yI-zHdIEs2Cg0uWPn<1Os z+-FP*IJmD0+7$duOCoYI{Rg^`6smL@l_-Ce3^#_60P4v`k*)`=#Y4g))0RC9<<--6 zR-}VHi8S0YzLRo%Ok`5c+BKu;;kEw1d8plj5Z2*VSwI1bW+7R8v^i^G?$*uVmuht`N|`aoVMq|`y5sJC^A!^hujDHl zb6oEyAtSSUqny&%C|!+Ge?{WcIW(m|gKXJRMN(=1^fT!BU!lI%Bizw5l(v;enOtLC zGUX~I@Jk0#lwj6Uwp?YO*U(G7#3Rwr_*Ovy><8b**c^?3dPEiySSisMG)HfklRN;2 zO;U}m(gTXcc)towH61GS<-r{EUy%{sBvB5FG~_vRdMjhwBw?Kw<|gY>l{*SG653C! z-e0Ejag+!(DxFbz%7WWjP)7nuCajT?bYLnW&^7kCG9wE{R$-lp&g zXjVVb8(+<8^hC34_-iyQM`xXLhDZ z`FU?HPOc)<1DwsQ;p902Ic9{rX6n#5ShR1dR7-wyT$Cwc1@OqMjI>0S2`8W5#YSR*uBM2_G|)CL9OAn&H?hfPItrNz<$nBJas|@Q z(OV;KrxBEtq3yy~o<_icDvlEDsOLhP`@uMT`Exuek&s*|@-fYXqmT*CKLKC7b^erT z8O=q!WdmOnBlU}gk}j?6%8CsiPi2{BzFjiGjeL4; zb4kX#yu^izML)}QH?^5N_APJXIAcZ9%A$)Zw?1D`z0iUl(g7nnU~%#9j+5JfI7TW0 zcsMKFs*{WFA2SZog!WX?cQ8V8aB!VFw1{3#pYW(XPlbD*il^}Ie=^d}u!E<^df%yi z;}9j_kNa~>}eu^6#+3SJ;u?|04wJ#m%N zG9^lpPu^(|-_Z==7p`#ceEP|MA$euNgVf}%e8nwzO6bDF`(d@qhzEBnTe~p(c32<3Ewhq@O$~b!>oJ<0yljRJlIL5Y6Rs8jar(K7RPWk2g5oc)0St~-taBQv2+uMeUBKl zcUtnzFC4%PRt$$@Ob2863%O0cXnDDaI!B`zcuA(_*ovJ6by{54>N*F*WhI%6KYMk~ zQB%VhKl9f6|12XMd@(j4RvhbV9;D7mLX>UmLHIt|QnfD?v}Xlb(qaCcWm(YDS#?xS zT{$7zh!bR)GvC5;_dz~BAfN3K387WB|CeTXY3+J#V@zM&n)|0vW zLLi7gJU6niMnG1lg&AYfF!lHCKU80*D^8eS`x-D}oeb0S`gO)|?b=kNk@u~X3m>h~ zj`qn@v^tKHt`-m|U|>kBbCpSye%86Xj<_i2I{5Z|IND=@&#KCk3WEo&tinfLjo`1J zO~@EeU%Sv|YdMoPj}MD>p3N9MZrSOHt+8U&f=141j|Do*tG5hKdC|e52wfZ|QVfewoVkI(orgM`M5Ql5^m~u&y6nqYR?n43<1MMvZj0WY^@?2lX z3fcjeH6xdiIn#T{LW*^CNn8n8@+k&6+2!XhS{}QW#sN5`oe9S=&?&p_9kKr%7h95o zJDZOZy;d%(Ziwj{dR1JkDMTvlGjnybjvWoV2xA$S%4AekF1eho7#RqUvXAyjZ;M4L zE@mCu!Nz7qxOWk+g@>9hhdzZP4Q}LQ`PZ!>PPbM_WD#CE9zKT0U(e;hqq?q_kv-Uq zjM`KT!=$OG83kT_6c@eom2hpG7(bAHp62C$+?0{tY_7ob?s4o};uBVfEi*fo+otsW zc6oopQl>%hUc<)kG*~-fPWugEMUFM(NE#nPl(UBR{oo6>3P{=!wNSNPcN;-1h5MJ@)@hs3dy$C-+|TfCXmng5!m)jbu0r z5lmcN9K9Ry9_E)->0mn+vxEVSulv{cuo&2L3YG6TlC&k!0)E}7vXWwVY>1G(Lo6(j zZ)N|0cD8q+o{2q?Zfy0oDq_dawrX?4W~O^Sc5Y&M;{emv%#KDIeEIuOZj(W^RrQB0 zX`b|y2RugqI5Ip&04hI8L1{C)Pd=uZ>HvPR!OpAW(^4DD2!uz=#T_;z3?xLnKU@tm z@b!ypX#M{jlM5{5bBCO*AP9~7cZR2ZcXRKFl9AR=P~lTx^AIy?4S!u!FcYR=-LKy@ zVAqJ9Bp*qlf>(F@PW@P{VoVgV#~?F zp=aW3nmC7;1ESaM>m+qH+-NE0{@vsQ$ydEZnEl=h`1igpBuqXw_Qaef^wwr^nl@^_ zNE8@&7&1A38~#cK>Sg|jud3@YNc_7tXjYrmpU!Ij=7Yw@T;fwIlJ>h9ER~$x?E=~; zw<3oC^oC_8y_YDq&hMi;al^4pE*ILnG1wo<3G!?7KdEYBaAcA~3@sx$hlsvj%=zPa zd_8KxqD7E@yKcr@ca@EHc%Q&bST?PYl90%enPbkxqx=we^cMM^xAw=J-oa#mFkxAm z6=F0Vt+Jys#|u@jAEcV9R}c2dy|H0a!?iyhPYfhHYA;s_192cRPFz>&jjytc~B?St8{>k>$P2 zvlE*{2t3U@iO^Mb3ERnwSI?Ytbl{IvZ~ip<7N4ibFkezl0QlBWL6XF6CH6uwajuh z4BRR&cb8G(u!-)6Mpo4#iPqJ+es}V;6c?r!U)oV&#ra2k$uOydvaA0Bg73;?d~*Cv z_dr*>a|uPQaMt@XV$wBJ1BRo7G`GN)F)V(c;u_A4iI;x+5es#E(OJ2_&N2yPo&9wf z`Wq_=Yx!+S7aP{^vGKdZ+}@a=0pUCPZmy3st)W)>%^^}?mVB<$ zLIro>8;W;l!P*gz6HT8hywWLm+d4dRu_e)FqnSZcBxFL*c+Xfc>syyTU zrKx^`CS_%abJnj@lzz|AJTx1EBk}D7S(U6r^G>qeJdI#cuZp*+geK?xzVrmYhL&`E zxGU%SKA_+9!F(!g=tm~VX2s|ScPX@7w;9Y?kYGrnb&z0A8I&MmL0xZ)_0zcjN}ZD> z+QlHeW=D<&H)Z*l_V>d>ZjfQlTkl)t)#vmzx(DJtzMoM(1c@;XJFcU_#ole){YG~B z>Es|b>6OMM(51eekCihnnAg7!u^7uOJD1EU`Jnxsm=|t%(74-3_ zL5R3klh%zoMOUt3fm0xFiS-1LYPDlUlk;#K<*>zMS^9eRZApswRgp-vGqX>lj7xz~ zdrO`5%1WxE%&hBZ`8Sz`s4gQP^EF6lri%wtxEXKJ&f0YerewCZU6^-p1^uzmOlr;z zb^hvouae(>OQ6Pi%ww6@AIj$VxRf~~^~}~^(%`jUs+~BOpKoW0{!LZ#EL;x;@OKPM zrrHW3Z5cZcENwxwH=Y_1C60FC*|gH}YJD+_3U#ZB9X(?LP?*7phR1DIBD=Y~Efv;gcqRX|8mt z@5Ot@QZ`~KU;p%1cDi(5rEhjK(e1%r?CRN_@~X=KeK$&9a@xjOkjN0LI5bQ|F=TsT;_!07#QTq(oHzoTSA5Y)7P_U2#Zq8I zU1&*m`>lQk$DU?^&P%fVv7%aH&1F-aSHFk$>uCS%F%Qhx3P#j*&(beF!s7C85~{H0 zYE1_c8OroWS%D|$&U$y0nHDYDF48e!lM@`}7Zq-JS8~YXD89EbJmS&mTPctiTGE~` zLX9oS%Q=x^KtCVB1X8r@T{Oo2WksUi=k}i>k-3cXn?m>Qqp|NUnDWrL=4-L@bYMrwRixH8SwPAf^iQnaX+N5fLvf^v=8Vs~Gs z`GEjMK_l|xPrVw086T@`+29WsCoqC23iYuS$t#8D%B60hnyf4`Il*I1v4Uf_yqV{+ zDAY6z@yHIRgKvcs{M8(3co3P|dUg*%F z@l()%F{ga1I8Vib#TN6wGZ0>}AKeppZFB7e)^WBSxxIOdjkys^gnXs7l6%`bs(Vak zj=0fY(-QOjQ7T0_KNj5R{@7NSVi&mRm3t}QcQHJ9q4@j~@gNfC(Pd-d1Bu(HpMtE$ zQ`{ArV6-^2)gq$Aw2(zoWvurqNw6~V4XYm^_NWTKK11YQ89`>>`|JLvfVLe!5}6E! z_Z(C?K1{Ua@CwAfEwM$exzjzAuiZQ03+)3lEg21@^UP4(3k6x%Gp?8#W0x#h?R&c? zllEiRdUq5x+WsH5-U6r%u4x+%?(XhTC=y(YyE_!8xO;JThoZ&ZwYa;qxDd%ya)E^y%y#OuYQ3Bm_V6R!^%fF#CD&6N7HU zn%NXb;CP6<9xh(_B4S9Dc@A`vn6Kb1CP@`p<-0&mj6VC)y|1fV02&!Jbj9p$gD`GUq6OjxNN1Us9o^HR9aQwjQ_6A!WOITYHE>p z`q~MHacIRiDbKdCa9bLwf@?+j?K^Kw#@-Ks*{mk21o<-FdB~CPCOYplLm}4>S&rTI z8JD)rVHTNN@0}1Ds|uq666Ey2vnt6Xi;ze*&6L99bR;)##(~V3F<9*!x4enDFi^~L zUeT;M?_|(EqSt*mrI4Mjslzn5D7J^)4mfO9CSuBbM+?pd^xvC@bsu4a;o zE-LR%OKPct(?==<>1$lZhv1w~>Ln2ndKM6Fs9jQUxlDhr$w+b4a_j+K;4BR3M@th zBzVnaF5}>+%dcdw#=;C7;IowAQ3XR_KiD6T5RDAPH6w=Sj4=X2?}75MH<(+DIdtwk zwjFJuIj!WNAQkyd0oK!ShDtnK-$i{e6J-ivw}ps=fmNbEOp-)mp4Q^RwlA(nW^oyS z*zv5@gi74NZo3L!zen$2FfcTll8;T)5YNggb9`P=hW4@`6qu1z-*SD<6@sc=%Kc9_ zy|_3P0ertZSAb)IywLI;b^$?LA724n|0uX$2o1W+k<=!o^Guczl6Tw-&Mpp2!~4|BhKA?73T%ClCp4gjuWS!jd8|90|;;P-nrxE zZR^_1%9bXD!R?UiXhD~x&I4p*AQ3JdWBN9tE;%r_tOWNT9^^y&tn7UEQb_g3aWEpe zha<}$)W5>15S&$_3Nc!xYDT^tsP{Wo}%Hj7s zUSt5~xI@dpTf0-c3;2o`*AXxcl(PNscU|XU(3o;?g?r}Ku|{B7lw+=WBqx`8_t)r* zvoHiDQc-d7LWJI@Nx3p!Qip@i;lQR<1)zeC6+iNn)SV*?gqa-8=?Q5va4Wcd6WozQ zzMJkQjux>+kH*I`QqnNt`G}A%$D1_!CL?i~jCW=MsjLC`g&gd}BtKNOhK`}YM0Wpk zu>|I1MM+)BpsuEcR5zBTkC(jf+JG*u`!xXB3JCHG-~O15H&TZ=2T|LAC94+AbP_>| zE?Z83JoN>h`ctvFwp=Mc8JJM`^A%24Z8u&}SaQ>WAk2+cfZSNF;27(^@2(Ba7!Uv* zyo`wT8J8V;RUV5P*o8K!|Nqfh+RBsv7yRU&e{6>|pv(Bs z0UROMosRq9?p73d9e*$))jQFw85Ra0e?;-v@cZ zE;;(uT6~*DcUvGSz3n&hDlHUs$Tt6c(1M8pv^v5sW2WZw#y&j`)gf((%OWJZT<`4; zbek*YeULHmr}6T)z{J>%;~ey`o7n25=Rh(5d3E{l$HRv@2PvY0rJa|{5PZ)ZoYtiW z`wAcBu5@BBU32n)9(3Hgrx4b^s_z=k7itnr4Hz`AAujL*UVUvTTTgx-f6UMo_dzM} zNilzVl=~Nx!#t)eusXan`%x>}VM!#vPrhS^e$VNAZ)TQMM|OmrX#9kV@UW_xFFi2# zDbptOMZ6kFCpbqS{+Crj^-5qpNa%aSek}w+Yu(L*v5Z7UB?_85g<23uQ16j?CLx% zU*7JBW|nPyL!XU!F~S791&b%a2=mm4QdxYg9+&KYO7oQMo2>CZ_VLd@LiW4PR~>F4 zcKwEc-MeLKA#V;>gC?!0vt3sAj|1NS3WVc3ZvrQZjHn2n_4lrjg9cC?&EFbx_4g9e z_eXD;@NHjX*9@)x^0%e-Cm4>L7YcMeM&C4@q!SuZzELCamS+t-_@UhU^ZMjE{>xZ&ydM-y zikhf9V+eN(#>~8x{tssn%Af6b`hzz%2#>SQ-V!tVwEq9}f?OPK#0KB)##|cBDHLaWIeE3@jIN1Q#ti_fAH|C#hSF7u) zA}?IP2!Bs~|GQPH8|DPQA4m5xykWr|2z=Y7t$8I+K}X=a(ZB575_PYAE->B1YbZ)b z>kC6}Vt$UT_bqchu3-r#eraJbi@QtvoeyTe=oz7+9y9S)z zRX}26AH+kVb^6Y_Rpmz~lv$6x1P~;3L)7G9CC4<58Bm7j=D=MV_+>Ne)Etq#>mI1pU^w%g!?#FuLp?GBuNo+A+^Y-_mN@BXx)k{#R{UQs0QGRT znyuw+W1b#F06F2uLI|FU%&*<)xHyz9PTY+`;g@mxM})1_!3{$Vt?!faKZ-HQ_S1F!nhAg;%A z33^p>AYHJ7%2$vl{%OYqRt6}@{@e;aOu_?m=!Ym#NcCs~3FCZP85fnyrgu zhSObukxwN8N{2%Ig9NU6PYa%1H9Sr2sKS#M{?QEweFdLTQBlM zf27k=2uqlUSX4YM81ieFUmHe=MRWfz#kRws^!H%SGLHeQixSni)G>uRh5eNJkP9v2} z6VXnGPKvQO4u+Pr8b$dXF}`g@aHr2A$sY%0t|P6LZIvtraix)Us^N92!L+#b+37jT zgz94&(_k{XY%$Iam9ijzs@gDkQ{x+T5 z6^Q##xvFx6hhNi|@G#_NFbCqtpd~|~(Mp~5q-S|UQ_3vF3borC>F=h)yyZm6e~I}U zX<#`j0KVGwEdmgalBXB&+yNUC2_!8^tdy#DPLR|`VRBADiwSuBAs)T801K~Y;2)e{ zqZ)ovhsJ!OYX%i&+zSl&F~L6>P!F4jG~9lx593vRo`W$8m*J1m#!4_ijXDHSUP#pW z4G3SKNWs9PrPI5AfRzN!q7_waBuK39m{FewZ2{QK0HHMU?HnGcWP=f_u^12VdT)?= z-YtTXuqL{!>VpnHjaQhbc=Xo2B$@FN1o$I&H|eJde|?z>6vjs~y7Gem@mKT~2>D@= z7)1w6tmh#NbyOLC{3sc};0tJT5ouMTOB6V47J^IT9PG&n`FKgDY1|A74u4r9F2L(V zCv@n7foU)#kvcN3x$CN%g1K0RVa&3$Y^a|)CvP0UG{4X~zp(Gyw5?i&z1_sy62j%6 zfgmL3{{nCDwRhaotv{9nS5U~HjT@7@pBX1mq`~JvN@#e>9OUn!#=2MQJ-!+LD=O4q z@!K|ig$*1F_1IO+q|q>+qz&l^>w_GsWIWP_+I2fWrN13wgb!8)J_z7gC=yb-Vb)W5 z*5`rVcN_&Qs4{n6zSqH3;{B1@Rflu zG=%7`8qdhr+f6R1ch8O2!Yr{t{+Lm|eNF`^a?wSm9!XS)SE-4fO+9~!&FcM~g{a_x zn)S0Be3U)=N#pZy{A6!lOBQafH#-!P6_>H4XK489g#q8mC~o;C{-;CFMvg>2Xn3et z2j?TZmf?-Ffm?CtByNUGm*F|=ROaG3R|Eij>pE2en&qazq!HO{2LH zRF>X(JmSG6&3s_8pN6SNnXYUeK#+Vv}lj$7COHjAW(PlG|I|B;7bRQD#7i)G442UR@>~0 zpe|H;qWcMsk!KbYg>Z{@%Chzb|~rwNk)S1WIRqHP(3y)Ym+C zr%{;2t?#co8lCr`CERgMLQ!?1!+OaMQ#`vVe6))uv%&W>D+(fGVgDkd<5iZ+a=HH7 z>+9ueib{V8+sKaLZlS>Y4^NC`0FzQ^$shw=CA2< z=n6>@s5Y`g^lI)(D$ijHLQn+!<}Aate^GwP?5L0$hG6gFSjprI(BjV8e)dkiFZP+W zCs~-1RK&5$UCyPAqPmg)V!u8^CEr{#af-2@U-`4{%(*%;02OcT#ir7L1xps0-}C_6Z{E$hMg8Jypo5-qJX1f%Qva0 znCI=ym;5t7P(&^_Or?mp2g^7W2hCsivO79NN{e_ZkHTlDM^}Wtv5{@wP0E;%89?BP zHoo8D^P)y=B|8>t%Nz{X$v=j;E#l=RqU?rFWgo)EVE*0SN3WJsg2@$IcPTa|O#-{+ zmy@?p% zkrg8GBcnoeeZC~(e?}>q3LwN%8WI)D-KuRC%La+z4Y16sym!@IdobPEN56~>00FbR zAzRL3eB>gHdcTKvsdos}7?<3Hn|%$${FBA6jjM^#7AX+Ey}?d4Ix^L*ZC8ja{2rtt4AP`g zr|yyLee=bKqZ4p^z&b+vVJ-^2fim@-w&T-bUdgy2;<++965T1cp1rc@mJaq%_&~a^ zJi|5{3<}}e-zT<&4ic#|Ra*bF?d5?!j~vn0H0XIfh_K+`tsmQ8D64PCjq)zNF(Xqr zoZVE^F$N0ZwwLE9IiB30^>v$`#YL_S^5280P9J*LCHl0S=HWKQxhL}=&g_Zr=FB>{ zP7ecKw8J_xp<@0#$o<~S=jVYZle?6!az|aMOBK7;EFoPPd(#i=jH_b-Pe}r{N4u2L zs5eznd>{+M4fELx2h0Wj>2J%DqAi3#`5nntK3`HqL^z{~?P7fHXnbwcL8^XUzF;Os z0zi-8mjGxupnr*8!xFa7N*b}+TVNb%9XuCOAcGyp#zA@ZuIZi3Nv>2^~*=|3rx z0@&t)%sSQZNAboly>_sB-3Oef>zc_w{vz$Fz!KOH>{M;0#l%F7XY>p;&6(pQhw6umME5J_lQV$JI%>I)~a1LFb``dw)+#L{1B?q zu#CrmLWZlmc(zskq8?MzZGB`tEA^!FCSDTFa?SC*stuJ5J8w)v)=E(1O{}P#tY1C3 zhD*Kcxc+-D$n1Aj{LQR(y+pp2X9f?GN`C2G7J>=8YujHdT{I?HUokxw-!GJRZ=JFx zQWI07`4TG?y863++3p@PeYfHUSDLlF5=)0tu{vc33J*N@;%JYo*Oc`2Zn6@y4G7-~ zC2;%)Ese-F?^m!Be>W5Fxe#%GUsy zzQCYv+{4VEd95dGaNna`LUDdq>I+DD)Q490fmENSN80S0?|DPA2TtJz=?(khQ`OrN zo(kxg{bIjA4dI1-s**7_=UU~XoPe+$sD#x9DjmYV(}^262WWgW5|=WmEE+W6B0W_F4S5M6%o$VId zi#8?pS;RQJrUd_-!nC}--#PBKiT30&FRnD*{VeS^JtR{$vB?cIZE}Q5TbwUXIqkouTH02$bM5 zYdOf!LP779y)+qWE$f#=X!{FO8o8;HM!59u)CEU4sev9FdT6xyUrT@^EDExOEpb#3 z6AW)xzvWdNhzBry7mpe`_qpye?^w-}8xo%M2S)amfxkVs zxD=>H=Yq&d4&Q1e!b8Dn{_c|nkbhfjkj=h78XXS)c7#CuofpAwnvE`wWHr4{e}>W%_N?7<32JpmQlJ>7?;>Nf0u0>%B8DXD^{LEEqpNZ7XvH zX7La5;PYLH(eows0qM^}Ekc)I-7%4_B$0eu;SOFDG>n9=*Xf(_)rjpi#pqd=#>X=I z`1TwzOPq!c{|HSTy z0Ay2Ug>ODm_NI%y6vd&OuZG z24#&Tl`=7o_R@^u*t;ZyspUUittM2$e6ta3(N8RvME_PNe5~$FxPdU0sPmjfoVluOnU&YIZ%HHRUZ(Aa}kRPH9yD6q6^oVxI*uhQpcnrCcgye{SmHa zTD_M8x6`W^wf|mi#`OahK^~kNyLmQR=E920a}btkLhMpMBRNDn(}BUF&et$??j%K9ka>;kL!w#x zr&&R;QY6oY7gEs(={ z))Y87&CL&2W;@C^rcsism2LEJ$nj!Y?6AX2n&ryDqRHqsTFyUBvOFSPt=zv( z!Jk;*PEYM@gN&;-5oNe5$AY9d88^+!vN7AD`)LO)#Ohh;CBOFKtCm&ES=P!hi}F+x z?IsoME{RcxzzOF3NTyk9xp-xU&RpM{`IEZ$6kihNgasytms8kt{OCOAv}3$ege$EE zy;XE$g=ZRPV(E=Zky}Uh8kJSB1xe!3Yr|w|@tbd*cp+A*-<4p7B_|n8M++6VnU{GQ zW1{Tq4D^KIgZjY$X!%hpy*+aId;7V90-pgCyUxI`zHWMhy~NrRHx8YBgh7*NWmPNF zCV^{r3lQu1YY1A74sJOo%vA;)5}-H0`SaIB42nX77&P1JE)@Dw^e+1-Xe2C%Rc2~} zYbwXqb|UK{^cUVab&2W=Q^t@{Ry-a!2q9eYY0y>43A!R9xsNo!$q_w141LJ~RdmWs z;VtP5zcNRnreqZkQpI+y<4B&kDPydRGOpdY5H@-B;7L3_=4UjnewrY+Xxb~z)Nk&H zOS2;9oGORz`Gj_YzEf^PBcBiE+xIlAfsZ#Ba|hFmxl-=)0badJ!>qs#O$vLK)7sqb z@VFgL{Tyj4;rb>CaR{r>!9~Rc2COe;zq@0b! zr!!`}7NWKN=T>0lt6~ClXayaOLx7~ZEU9;ZAC8NU?Ef!+=R6{h? zgt{QT9u`0b4%cz|UsWNknBqGzgVZ}XQ#n7mf6l4zSH#E~@DaMH3k@nwl$iVlOF@Q~ zi6QEclJVUlrQ*8<)`_LNrt1mA2aBn?xS9q(;4th~rfIqtz6b&UOpu|;FRD$J1kh8q}2P(X`AZY_UMkC^=El%JbgxRIn^zU3HQ5~yA+{b)Rwcbso9UB2)* zyXU&Bs??pVT)3G-$e1j$s~zl-pW@f`ClWXjSUh`SfBJl%9WDH4gJo4QlJme1$;3{M z*)(phPwm5>P|!c6gq@$sE-tiu1&DS`Gp5LKSX?ip+iY6vGO+%sV`MWEyts0Kr&$gx z?V8=~6Jy8R3nU7kFN%S_rvA`#D<6JG6SqIERlRo6lQ+fc-O<-y%K4h^L{upQ?I%z9}8Cpu>w2d6GpH%FFBIJf$B9{qeC2RPsn zQ1}ipm`v}9CCA=Y8GoZr1fY^F1=C7EQL=x%zh-O(_1Ya<)DA^=_1$@<@=}yNvDaUQdTZPYGQ20%a6x6ju z%xd-^+Hhp@{xk0#Vk}*9fMXSi_Cyok_8nlc_|v~K$IW$YC7Y{Ogi_V1E~V5glD50b z3wT3`WAp6JU%e=wNc&EsV%7M}*WP-r6R)zJUH9aNetK{0MH%#)D5VQ(*9uWCO;x)c z8UVd^jxieEdFjiEP`=rG0e|emGDv2g_!&x)A#TY<==Ck29#g8I#Jw+mz&+wb)SF0o zREsTE!&(Bg8jSzm?CZ4q6Sg|$;8X;aF6-@{L@{pHHWvwnfZN-9#Y5C?M0-&d7p>kP zfz}rSC1dFKOe=eA;@z!ejP+CB9U+sbztz*+aY#dbgjfJMdS2+l(!FSJ{4)mW9hL3T z=(<*3$*goScG>5j?FYmFY`@Hb$3x0l8ACZHnCrypD%M?eavlWbzaVx4&6)-s-nVzm z{xqtHO$DllS5D_yUY^sH^}fkF9pXNAU=znel${&fhDF5m4zZ&i5lJ-cA3{nC^9f!C zqUk$s-!E+V$iv}?5Zp)o!c^H1>QN+^{D#Co zeD@_`BzpLP^~UhTf%PR6?iLU??iKEQ;aF7Ol_z1O1c9|Q;z|tD5cuxF_#epd{!C)0 ztO*?r0b9!+zP|EdukfUv;hn#mtJZ&;nD)T0{RT+ZfPPAC5vDLcSO+D}U^og5=Hoe! z{*7B>S*Gm2j#vNWUg;u0{qv_6ft~T(&G6QCS2HpjPwdu%(_WP%0sb@@y|x#);1>=L zDJK&$!Whr6AdY1NE`lLmlI`YisDQX(x9jj!mC{yp5^_&72>}-!*w+G9#Ue62yjlha65u%KL1qvI|zoaYAy2B#lr9Sks<^x zDLCdqk6e&V3r`K>V%x*il7faon*0=Te+D`m!UKe*&Jz~`kKDq8YEbM3MvczriD|15 zSlVNN2bJw{Eq#3^MXNnesIxo7jb61V%{GSN&VGc60|<|>%^RHHh}x)A2HCXc5$IXi zjB-->=qJh+z%j>vLzr>TeGMepy&fJqXv0xqyc_`K6+=t2V^ofu;BB)&`W zU`X7e5hQJzZ#Y*Fh4z>nN|?Y) z)F?<7It+hEAQH{pXY>2nzjin$y@Je7Nh?%)oJ*kWK%@N0B5$o+P=Ht5qf_-JZM$hz zH@wFw^*j7trNp#W<5}gcE$M6~5W(HMYS6wFn1Sr__taFQwS2JsMp*86A?!0z*H#s* zR#yZq*)L~|=jfKbq4}Dx;6b94A%i%%$0A=Rmm7&}tD5lfqJUM0#ie{7HZssXtSD}# zN&tlFkb+c+dt<_5-S=~Id}l{T68(^8GIsCUD{uUfo=s*>6v&6P8|}{arFS9AJZ=_2Dk6kcpdNc0rGbb zKTq#X-fg1~6~G@{GzHgWGNdfQ@rgY@)Aa-uainM$JKAR1^d7c|2cE!397Q%bkg?JE zAt11*PR;y6aki&puX7_t3SCpKT(+4 zawR)gx@3U|a4VNUxY*PQZPl{xz=mw)?+r(=W4VnyV0mCSvd#9XxHgKzcc{Tnz?)UF ztLf2UQVdTG_pCJ^m(WPp(m{Ws9WPqF*)lcq!ye~mGYsY1y__6rG_9=5fo^7ZpRh~n z$|8Y{uCsBbc{a${UP9M1@G+F*<^T=Vri+yR#f8}L)r7dgok&cEx3&9L ztl1KPUrSiAW%@{4abD6MRP4fP%3!6rb+5=}3V$Z*&AAYDf12Zqxu-PV6|q*WBQ)O3 zzDYK6l{5$M#~&V=@vzxEGgPm$o$a{LHR!sB6x(eCS){_=5$D7b=OOl;~CrpNIRohiIT@!nQ zJ8FMt&26nSbH0)Gks(m==&W;owgJO{jfT0labWA%?Fu-(C4aTS9eQP$m4&R&EsS{2 z>UCy_evQ^(G`w?{sEh*tNJ{FBb0;U`bSmQZ4q_o_bi!-9VcxY!H6rh^9v43ZA$Ago z&VB7$!^PoA7(|!j)=&IQ-p*+Cdf>I(x`!q#7u^Fz8AA049M7@&r`64XhXdS@|C94a ztxFlm0WMI2iN7A}}YEesR@s4(ous4i$O1+H9uy&x=mg-a88vRNNym+^UFZ&`VodO~#P2oXM|!?i6#0LLzZ~*i z|JmQ{YGSWDO{I>hCjbk8(qV?{wflU%vQsuhJfBjrkry0p^gEbY@rhht0967G>}CX> z=monDLth_AMoSAK*8T*aKp}GJcN5ZB;%+NKE|=gv@)hLt&k#J;lxcg|g2U$*E6f4+ zfrMe|))0eJVT7U1u<>l_WV#ZFIHJ40MXu8p@DJ}KPek#!Mtu8^L#Hy|!93yoqAU}l z%sNV-Tnk+X^dP1}MfrUp2EPvLk&~xfcJCk&D7eq6R^?DqWBp;Z9V;Fk}MbA1`Kji{KcG zsH?aK4s72kkx9ZorXjmq%c1rm3pOQyE~A@lk;K9E%=^d(wG}{O$UQMChYs!wIEU4y zi{SiLc_^_W-p0JEee|+JosNJ%j3^gPi+v-xBA+AHM@7!vMQ_a(*D-{zdB{e4oWMRK0sO={V_ zC7O#q?9=D4qB%?vQkug1YGyCXI5`$Gn7b&j-9F7b9l0KFQ{Ewe2lg~wp*~rS~BWJK}!9e6oVM?$Mt$E)7I0*H*zrN!h^J z{!BamL6spB*0P?kZ6X(uEnY`ofk71(b8DBC^wS;ArI<3hEH^c2Bx6A19iUe>B@2qCWga6ospHqh~M12F%4i^^a2hRGX~u2W#SOD&~$BV!HmD z3;g6tQt1&<_!u3%X%b@TmaGOMX zGn#@+yx;zjQ*fV$5&@#ocDlCb0yAEnD{X5WBUzgf=D>}&r|#kY3u zI|Bm&?|Q+S)5iFKZ4dZh$W~Nj@D570>xv%J?VglsGNu|o)9`KAx@7$7fm5J z@pB;m;T`w;h13U^BGguhfL;4Lo9u%;Vq3{PktjR-p6uhq3Z@Dt@5@ACp4NaJFXrnyvUUGrK?1{tqPuK}8 zP>S8LB2K!%y4!dI`3=|pk6exM`0n!Wn{?pB>|fCsbuQw=n4Vjx_`qy^oCmm+5>d|8 zn*y704h}Ch$#INF?mFXju6z*k!N|9ezSJ8DKB#$`bVQ7JWhqO{p!D2smvdw#>Rsj$ z6*~X50BSm4FItqJ(6R@s7Xkgj)+skHLByE`<;)Y}$)b03kv*Z>1PpS4A{pTaMW16f@qq3l z@lW-3x5Pd_#V;F-z6>ybB$NAb0^G@ruvN7m8BE)=A7YHIFKNfsHOY-_i@; zk0mVK$jSZh{{}16@PSc=+Xc_lsz}Tj9P7I$x*S@?G8c(}0N%lt8PcX*_$dk{hf4O* zudvZ&E*-t~Z!KioEJI$8w%Z3Bl3D!IFNWaP419@$Ms*V^>GhN6BP?I7&I?6IF(ll! zFd*=ZgU9dF5n_LuMuH&TYpJiicRtR)9`HYQZ!!m*0DtMT4yF2&B2~Obx=y0*8;p?n zsP#oLlg0xpXgHxK$lqyTQ)~Y*d|iLW!H=ELjxE0R4ZHksq8z!E0`1je0GBHwxG2^=OzJ!?NKYd(8y<5n|6xj`cok^!|86j;6p_-s(JlJ) z=op5j71&v-J7Wm*Nr*PDD^8MUIW$=9+&yIke_JIO>Wf$C)hX zo==1$_b>H6gD5#)dD=&Ic@F%3_Z>O)dCP7rm^$^>d&;w*P(5})VDG?Z#fo(wJmtAf ze&y2sUkUsph}w}cm=b078Bn(r4Fg6Z*rsxxeT_m zzaa@<&i;qc^dC9)88rs>-kISGrtLF{NePxxzRO{9(l_^?SrLC23}2To-u5{o4|p46 zoWl+zB!2C^efTL0PY=IF*MI4|w$(57-59UI-W5$u;&tKKczQi724-s&tJ-6cfIysi znAP`f;I76bjXk*Uhz-WnzWi<-PcfO|Z0&umO@C-QZ*5Y2{T(4Ichu2;D+A#_n~Fih za~g;H5Wivfy{!By{Squ)4G{owcSy6R_>}iiFuw@3M}?5%%QV0D2d2ng=MMs=fRwMo zP>dnAynk-_yF}ed09iNG$%B@E6}cx}XjN*s0W3V7@T&*(EChE*4lbO45=D1`267N8 z#B2!B>lkvXUxcVHY{Uln>r1QK3i*~GCaZuK`ZXzOcD0}Dp8W{A;cp5DfFk(1#Spyn zGxQDJxrFvY;fcM3MU=b|gexpCMAX`=%U^kz(3u_hP93#$1M-$c{S!-rO$@lW=BPev z7DUq^ru4~;CBN-}gc9D?#QvQ|>haRdb^Cw=W?`t|^HMvMjn*Ctf|deFdLfSb!9}Kz95}_g?QFMpcz5E)jV`El@IgI8_?aSg87|-{mXw(WjZ>b5Kk`` zD`*VGN-&^%qpA z;cI%Q3II%uGcNl;5H@v}|F32gq7qyWvThB;!(Wox{aYgcx z14gx7;qpmRf!><{;VsVCL(vwHTt_4MJrMT{cESawY4*X@>kW$sZgZ3cNKs8q?al^AAXJWUYQ=HqC+C(Vj`nrd06`>z7-qD+_9*MO7p;8f1JX=H_-g5*?n!n#hD zEY-7-F4?*1g#8C}H6SQwZTSER*~?)HC;gesgUzW_oMC znMN2JVRWgd6K_yBc|?OsMY(6{BSEA{lwUHaFO~?2lSy@5gk{@=!|=T3@_t5DRiyCw z{(D@Sot)|xoQbL;4B<&6%hK#=;cl;*w2$GQL$i91oZsuVdJ=;fJ<}iu7j!f@^zNMA z>nKDG8FH^aG|%Z63r^AOS?hdwM6G#%n`3IglO_x&kTUJ?*v~W zznx1OuT%ZkWyYYv?3M?$O?B5*YYmC*I`RV0xqWhf63{p5vVK}n{yicyNUHU5fxdMm ztD6+)MLN{aa?sEY#2J3Y=&MR{p>p&jJYOM_giSmtUvNutJ9_gQ^v@3eh(La-!@EX@ zOJC{I*35ER=#~{oUbKSf zfn7AC0Yujjf}@kh$hp;smV^XF?6ANk5P14WN2d7o17l++>Oh~!qX+ag1;LIN zWekgpvi)c2eU80FkoffkG#RErE-+DdB(dDjsV&}Yw1YP{kb`fKo_}B0)*5!+jc4xkri-H)v z%NAVLC&3wjdehtjc1U+*I;;?7ueTdPB7NUI)i!C$_LqZ!Ag$pm-4y=pD-Vc~DJ4o@ zF!JgK!;b?4!+?NkU!r`>8pv}HScwMADMkOF3A`oaiy@^Od?S@7#ay*m^86`sokpE3 zbPfDNoW+c=fq)>Ylz9RPP!|3}`Dx7eG_W(%wZ*W^8W~(&d8wi_1!0G63+@HZUw3?) z%y;IGRF|wA&o_aBn16SAM3txV6obnXYavi9NPy~}N?VnzweiqBY7SLa${Gek zoodJ4H@4srS@V~d{WHjTw->Eyw? z`?{vA5T)LAl>bdB+5`|K$bC1WMHKh7cw2?tT<!hyNr|MfhzOZ`UNeoh^}uB?lXmj*>M0|yxzJSw^_)w55}gf1qrCyM<>4S0;k zC8&5PUu-qh{vlW`s4rY9>f$qQ^38LZ(L;&uS7zJjto9pKw)=4zm|0oIn+e9#^3`;3 zf@ya~P`(dgcL|)g-`9alOygow)J$ICg9-yore%WA+W4pZ;eqN5!R>I?Q$R^Ut0gQC zj__a;L-Ol9)hJ}yOTXjriv*-BsDK=iyYy zxYy7Aev94oE{8AHa6EE1LqS2)^Bl>;mc7mpzu85^?Zbgj3cr3xjQknH7Nt8l{c2{A z_O9QoY`p(UZSG*h^y>}X(WkPi9gOEk@s13lY-$m;>OXh_znCq%%-~RK=^WsJ_*UYR z@Skk+GrGcsmkt9j($R9daFl?%PQ-KJbQqr2yrUwFyR6Ec+~L2c@IX0^nuskW+b^3j zZxeJ#>9>;rN2GkL9)cB6{~L)_$K_rii(VZ9p91?xYD9tUmbIAy`SoAR>? z0)kJU8DTU2W0+UQA|4R^d3Q9T#Li+FmsG9_66YI~KKw+B(8K~#BYj%CPmQ`xWv~H3 zL3>@GIj<>r+IWtaW;?#_~HBFp2?VouL66)t{-972{Vz5>VDC^A^kp1xLZ9|$=xww zGGx1<&Bt2%kq^4q`V)n>m@@lVghBSmPOVi@trr9Lz^|}kcH<)J&B9+np-8{IWX;%c z!!l&a+p&YaSYS7Wo7Qw-Z?uw9GgIH9wOR>88|eZpcc{E}z*VkEd|+$TFm! zVI=5#Vt)FXlNVl*hbq|dkbn9Rx<#bod3@q9_m2?sRJU~5g;AcQ z$Zo*bgQby%BH}(FOaTdTd^#BwoHeNZ$lNvq1EE(HS4qMGiNanqVMCvzBZ}D~NUA6O zDI&L)Tde{b*I!YnSwa)uOkpF1Wbz_t}n7;l}=J*{?X{ zmwJ=m?&3AZK1bV14qZ>(r1OI8Q}@eBcn^m5gzw@HWvDNpp4Uykn5M*du?kKDSBC6kkwm@OJ_z84 z(S$T-Z{O9Tu`hJd-156T?%c4P)+ULJua63|XmKp|_mm{^oc+oiBOSr0_P%#aP zMiEZW7&e`#yQGe?oOl^?6yc2{DPWq&a+KDY9GC^Hh{d6k&legeKC*# z2j3fcnfbEA}s6PQ=)JVzEN-HUhkX8(Yp(riV9ZHYb=x%8QR8n9gBt{Gb zq)QryzyJr*9a8VSuCMR=AG|-E=f3Xehr6EVKIeQspX-QwJ_MTXc1)1bzEa!SDEQWv zs(xpR+Ep5q8>2h0vE;nXrt|#aB_v`&xqJ6_xAuX&H*Rb=IP!dQmDC{A#W%+PX4_0u z)o;Gy!ki#ArPeSBkFGQg)!UYmFvSdGiWkY&e~SSU$bgjt^JC!ZTFZU{QadPu*pQ3g z5!#o(sUA3xHxmrla6A?XTRqhJ2^7&HS-{#e-BJ%Ead=?D7dAb77uBptA+}bi!WL-K z=0}Su<-KsbaoQZdiVIJY)7bdabo%NtiZsn8_}?|U3g9)bPP6-1&XcK^KdW1&O$uJ- zTUwZsYknU3-r`nY`44TpIi=(Kq}mG2-tG=s*-wkg+|#_x)oA?cor@uGQETploefc& zE7?n?6h2yD2-A|WYDnEA=AdXA?<+nT?J2;Q&uo%{yo|U;*rpT~!E>hjCj-*K%GMh= z|6@HX2UkeWYFPL{NbD($H{Wf_5Se#3?(| z$?>$Y8(ej<#lRLX2MW1m(Mdt`*72K-!m+(>p5_sj&o5w~f6&M51m1QrXjCL3FR&uyeJpWjHsg#EAJ0mW0|irDns?1 z{zVp+H4R1niJu*rHjlu(-eob=fnbdKdf_`Pi3{t}VWc#1pH31#u`UizWO3-JeT)2C zkN{k^OTf#0NYU_s#nI@qC-%Gm#Gp?&A)>mFi+xgC6>B-f|MdmW)$AhU z*ixu_%W@{%+(2f!p2S-)v?tvQCHJZP(}{`t+Dnq}3TzZKiy3EHL4P#q`_PZ^y4Wz| zA)ZB!2UhRBqz99)O^hU-2ZECFQlOmnBW4}d+Brj{V$w~8+3U<3gv#?A+%GLnW#hmg z&aG0~+2=eee^HeDxqj}8CW>3Bsvne;I5N5TC4@?#I!(d>c))l&B*vF9D9&}f@q&M% z-?`8J&)tzExC?r{C3Y!BXK^zoeSai(&F(I}Ds2{3a;T%AK0CI*`5Vjqh4Ydyg#(p{ zxk&cP(!d*gOt&?NOI>L!POwLw?8DM_)(CGsMd;6Zn6;dNdM+PwTOO14`q5l#?R9$9yL2pjFf}t*J;+!{=$}zXKeAXW{-yY-R+o=s* z?TwZ}zmbjH9Y*X-Np^A)qZKHDxp7ld3|2h}3rT~24t6sX??$1UlA`%~#(@5>I4Nxf zc#;*z%T%@Ikw```cp2sI=J{Qu2kxj|)3SHp3 zfmjjdwLQ;hc`mV#6kzV7Z4kd%8hV2rP?^@*etMH6fq|56VGbSUD~QiW3a0KaEbx3` zc|5dOs!4i%Y~dtmHwbjLsZKh(15P#y#4F$;&S%8C;G=-mho%&~d#94;#DG3(-VF8} z)xVlFM0ugOpUQ(B*|7l?2&XoN8O$@1XPe*RWR@_)X*fmcthp5NlPTMYOrR~GhwS*n z+PouM^1RN{$qYoA7(i8k)X2}tiqIed5?#yHvl;ctQ6EBlORc{PyJNdM#7BEMXRdYS zIQD?iqlItMD3c><@U;U58!jo|*i{#3BRhlAk5emRAII1@a3r?y+}wspT%hDM;Erx% zuLx-{G-7-Z$i$CNGT6J@)L|^*rifA7d0pL*_!F(RA><9B5% zt-$mk5HXs44B}(iSPEu6u(*8C!@DvrHyb4WvA8C&^{J{DaTvCSrS%?mny9lB$ZO_u z40}Ti<|R@N{SeQgSq;d~e?6oSjP81SA=$1>;^fHvbu)YcDm)iu&Cz!S?%cTooL5;d zqaI=*G;&s=cLHa-U7CLx`a4#NPkYmY_Cm4J=<>o3FD=a+nYQ}4TtYdK0`!k`|IAPl zshsgdLI)eU{DhXg`vkt7u+WJAK<}HPak{K9ulF z1SLS*(vpOF7r%V_Nx1G}@bXr2zn5{HE~CdinAdcBbPOzJGd}$kAKQxcJyv@S08qYt zq@k)8_tcT#IvJT<{ea8R^h$zUP+T!0Wr7aE^i%CX|KCBox2A2sHCOcmi96psC+M|9bB zI;{}bBZvO_AuK<=thVLp{*GSIUP;~2B6l3fdm{f#>95j3H#WWT+gW!>1d+V=%YzvT zYk)QNsVzXJ`TOd_iV~ZEi=`d7J@v7OUWdRu*;|7@@}a9O;I|3xf`O6=CZkh$Qk6CZ zxv_w$;B~GPw+mO=Oi9GsYrF0|f}d)gJ`S#W6A%zCe%tNauMb17V$+}t@5y5t>S1|vj3Mp)rWn6ajWDL+CV@l2HrmnOW?k^1hi09s< z%gO4T-IfOsXDSFEJU`ksqE0I)t}lGR`|rf=rsJTD_t`dI1vp+`Uhfwos-MEXi4@F? z{5bx`4A>b(5N+7JZXVCSC#E6`7!^EB-RiOSv|>G1E*m!Lnt`4iy4sWN<9kH z*_P1feyw3ul@>xHEDlYkibieAy*~G9iq7WyfpIP#i8aZ4P54_r0>F9ij>ufMyN+=! zdX=wxIVoO+8ZdBh?D0V)5_Z{BC*#Q6S(HgM0;#@A**&KZ^^%J0g%5qmg+xZ~!(J+Bj{SbJZc!JpSnr&!#!>i+S#m83_r#m)IJa8fEbNe7s=rAJympWEHe zM`Poqzw(X1S+khEBl(AaHH90lme_3kTYu{%0YAO-qBL=2okM4iJyn3@>e`4ct6iQ}e1%hbpJ@`+SuKkM?e=ImqD zKxR8inzxgkcGONtSpLMSwQ_W-CXBhvh_R2DEhl17GgLh1LG~vOtOjR_J33L3W}khG z?UgyZPc-*|PpLZ$C-Y4}ZC9qBc@gshmb(Oc8jU%E*ukc1MPeW~wX$PLLxSP{>IRW? zJlSzI)czPvaaeWjc+1%fQo$cqvf4(s(@o`69@_PFe^7e1K4ik`+Vz_{AoT`ebxu!J zx2?ALXLP5IKRr12nXzUr;|nAgkMH}L|Kt!Ga684nI|629XhBR>nH$ySUg zzDkhKd?J%Lujd`z1*YY`T>yv4I#n`=`?iOSJgzmP!Hb`w8TM_vs$xWV{X{~S=0#s9 zZi9*Z-}V`AnjB!c(Oc$C#WsfOM>J#pE8y8$ zbh+-ojq4)Qyq1ZzU+$Ox(nJ@EDU;KsZO|sgU8qR;V`Owb8Z;CV2XKOCucoM|CS1em zeDy9qbyHvUZaJ`FyrEs$xNp3vtt64&{Ly2OA_i^bwHw{ytfG|y8_%jO4O)_O}%fdo5Rf9)U#L^Y+(1HzG#3EIrOV} zApQb9zUzU!-1u|{x$nyuFFLwW#&qM}laV$hObraCvDT7YQ~@i^rI!hCadxi)6-Mmq z;bluuW<~pQ{i-O7@%~I_^ssHVNW7m=shOAyTOFs)zO>A(j65vzb`T5|wz+b+=V0sX ziZ;8S1;IJ|6ziXD)V_^Hj&0n?+>58i)8^0S0!&@bF~2J2P2*u{Y?S{8MY5yPN?Th` zeX}<*Kdj}sHo0%J`?4trKg(Sy^*|oN4qA!}MiCQ8H;YpgL2A=Ruhr~G3nDOm3rYca z_*1_09Q#SZ^*5IIy69N4STCw?b{mFO%H`re^n6vGM>#x?Ba995dy0> ztcSK{OM5c(h!Eu+>h6as8e5(o{#Q>m~&lSw?=;tp>` ze(SrsI@)59?o)_g+DU&K*6$S(Mq*^w7A!zvBq)77g5OGRbp(>Bb!&4ZYfwv=!)TK+ zeH5Bl5T;^M08A6~XmBwqVG&q@`fw1TKa4Ol`hNg%Xphth-eB9l>zjM{Yk|fa+#NB0 z0k7~Fa<$q_$#UusKt55e|BkodpUZku|2Wx3e%{%7^Aj^y)#t%DC#SU1QLXhsuOB*1 zJqM4D{e-q$3iV$;s>z&mQ`nr!J8G0oTG`+<(vMjM`Z{6!y?`dkwDu000E@a4W!@?HB<-$s+O=e>VLfd) z45E+m_aJxJevD6uwZgUGHsH>JG;B+T&Hii3C^k6j3+24Q0b4>9KlX}p+3*_JTg8sS zg67(sB>^ti5NFPezVGTGikn`gqZfZnl*e!NVrB;5g5%c!fXWuFNePIx?0$-r;ZJby zKwqt>)})p8JIVxW&@&B+_bf%OY@EH%x%?INv|;e2(V(#PsgIO{ zk)_hD^PKGJ6i;Nj8JnOM*te{6-x@ZQS4+*yF^w+pzURDd} zF0;+w8C&GMW25meZ;DLX9br>fuEHpE&I4 z((p|b$l7Y&;D;Y+fZ*T35LbZ+2H>!ms>=2EfuK}f|7-I3JLUh{67v6EP%9@UG!a>} d|Bu~Kt$=s~lf*1{IPC9O9^Kd0K&rhC{vTp1@GSrU literal 0 HcmV?d00001 diff --git a/doc/source/data/images/dataset-map.svg b/doc/source/data/images/datastream-map.svg similarity index 100% rename from doc/source/data/images/dataset-map.svg rename to doc/source/data/images/datastream-map.svg diff --git a/doc/source/data/images/dataset-read.svg b/doc/source/data/images/datastream-read.svg similarity index 100% rename from doc/source/data/images/dataset-read.svg rename to doc/source/data/images/datastream-read.svg diff --git a/doc/source/data/images/dataset-shuffle.svg b/doc/source/data/images/datastream-shuffle.svg similarity index 100% rename from doc/source/data/images/dataset-shuffle.svg rename to doc/source/data/images/datastream-shuffle.svg diff --git a/doc/source/data/images/datastream.svg b/doc/source/data/images/datastream.svg new file mode 100644 index 000000000000..a607ea98213e --- /dev/null +++ b/doc/source/data/images/datastream.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/doc/source/data/images/stream-example.png b/doc/source/data/images/stream-example.png new file mode 100644 index 0000000000000000000000000000000000000000..c0ab99e0b5c30bf1b9f28600cce09a9c3dd75f13 GIT binary patch literal 106561 zcmd?QWm{aqvNnt)NN@n?(PJ4cMa|k+#LpYcXxLg6S6AO%Rb6#=n4F9#!WZl>U|?Vf;$lJyU|^rN!N4FOVW2>Fe!72A1ATpR{3)&k z1NwNu7=?mt<{IVswjIJxRO7=xMG*jgLYI2t+_8{0UV**aZ9cJYFNeFqa45>Rr> zJX?3yQhIE8x{m1v>tp<0vgQ9hoSKLzlxPP#ifkd(qJk_+x<0CdP24tt^|?^fW3{Nm z+9stydc1?7vT+)_LmsCATUAQ{f+#eUXoS|E9R_j?tgrhLsS$ixDH!smUAMY@2i|nT zojo&+1*hAx8B`>ozyJG5*q<*GZ}GXJM9xCD~S|MUxL;~SYc`+u7vq?Y>sUB_c0 z$6$f`#u$rCy~Z%n4}z^)7uACnL$pKfs_|>*OMmXOBOIo!Kd<<|8 z^#|B6R-9X;q{`w_C7pSquQTAzF%PGmFJ8z$=r` zTRm=P{Fj4E30)Dmo9hX?ZH5wq?3giGzmRwN*B)V!<#V1{xNVGlYvq zkZj`}P<4B5!NXUfsW^A@KO9nT!5RNQsTzJvYTK2+7RM?=1*xTo;R*>yKSBhUanHdn9#d5OvnbfFoP5Pq4xCn zT*?1)1r6_)-C5%4X(NHBI*0Vn~)q9Q~l`5cC9q#-rPMo^L$K8kj4` zq|j>v=gf_i_J(n@g4(EE6ZT2l6Fej4rR^%4L~Ey|oSKX8X>O_X~27fSS%# z%*zwcPF}y>T5PMf&S!gFkji+T+tzF&-`B?B@2JEA%{M%s z_%S@QAT~C3s|-Sf4+Ur<7TF%p@Gj25^*fO{J5Tq+C30VQd4G6wx4yeO(|lb&e}2)* z>YlGTH3o_AiSE|w+##ioo~Zm_m(S(*#~?d$ z^j}2ial8C}G&~#L6pp^U7`|R-C^W>4{2!J7!;%`&Uqt;KQE$zxb|lBZpMA4KKG%AJ z-pDZ4u~ah}*cR}g@ph0(#tmV%))hsqjrWtL#s2pA^fQ&$kQL+vk^jubk#CtL4w^m3 zfSenKLz@^j=L(X(bUn=|QR~G#c_L})40u6NRknjS-aZuCiv}9F;g${hlLs@|dTX2G zKg$*Rt7qdIL_79}WwPR$8j9OC)YlAUC>3S4t%JEohHxNplHJ?X`He?enRy!NzD-ix zjRo5NOZH?f(SPh!sHmzsy4JX@)KHRgZ;VO+U;{g91h-Ntx}>M|&Ke zsRb;)<@o8S^^^)0!`y?H2-ZI%22VTRU=18CRaSv_uJ2hN{#Ncuu8saBjzo)ZD${E< z$E={N^?tI(CJvqf{XfI+52&fD=^dYjVk1WvUz9f5tTL5$Vq;Q)iHIY~>@}dBzg{$C zv5vO|1C2WjCQVJ?pFu?uHRcK2yacxvh9uUic#5fBLg8M}IQ|2LC*Di3=i<)e`=W|q z?q0+?>FL?+k6h~a&r%CrHm4)yWLp5&K$GR=m`ekCKM~dfIi4gF;(;7{*}kz+`aWY& z&dnj6Wb;4zftdzY9rX3w%IVC%WahsiF9@yHQvW1#ID(50JT>gvP##Z7tE|ghDb?%w z8VWB*|Kp!pxNpiV3EwF*#jJs^pblRL6}WKEjlV%l#*=6fM2ynkY?@)OscL(CeJi)CXIbgQ9`mU*@Ralbmfa6d z{R=e7zm~6K86yymER7R&ZDUEa=*)8b>J)pze*^{mx2>bkX#-Hv1#>s|k7XJT1Uv8I zs27Cmv|ViAO+F~ioRqk!|HdywC)NT^;3F@o>@1jp8`{hlALt1K@&~UTuJ=5!?6Pb@ zkpD0hctn~lh~tPq#g2U?3vH2zVCDxhwt`Qm*BZ4EIHq6XKKrQ$47` z)Ojn2jU@7K_CrOLkK9tRre2;vUQ|N_Cj0_4H1IWAOP+2BKzBRx=Kz#{8a*PGEc(?X zdb-PpSCSlt?O`S+C>U^73RF_v>`p6(lF=?Z3^UiWEq4<7_X@Iie(SgQE7=lr?5I}i zpMSkD16wD>`RVMTl7)F+9EcmE5a z4CHRF&CZdE8}Wz)6+HPB5+8G`I)AOAH%Ns18x~A%GwE!%9GqmaK)+kk%=VopBkn!# z8fE#mOOR9-)xWvdw@EC=>&qfVSwzQ*UJ(SDO46kNqs&0FYS3CpZ}lkbJ6beiw_vkL z0mU5y;aun<1twS9iC@?EWb6sija@0|X7k!Sp|#a1_FW}QOId&5Wy|>mB%onqurUj+ z+buA^>+YTe14HBBA8{+4-2O*D2C?PkOz}C+2D{v7B&UeWzYyE}rkxy>b zjy_A4Zs)ATnfI%(u_*ZU%yQ32hm46Q=w?Pisj-orlVfYE05ClaPHrlSi;E|>EHQkd zr=+bN(5WD+T!`M8m8yUVhXFL%mG5+OhE1sYvk*g4v^mhN&A>fmH(t3bW&9-Z}K=D?O%?cWN+ z5DJyIt)Sl67}@QP2s6&oLFk#<#^l!#N6R2aeQ7_2q3)DD$|&J0 zKdLGUHk2mCs_ms<;Pubkd;w2Va-2%SkpxwMVVw;eJdmTnaLFt+9$W^5VYc(=y%7aL zVt=P4%Os>CjykhUVOUt$IpvEE6WC2-H;1t4TTeukg$FAnE!1lB%CbCW8yXoBF}56S z4okSYM=}|?Tb2kR_b92jL;SqFI5g5c7FAw4w=h@#YBdx~*xbUtU1o1!Hv5ry)ph3IHJ29pFp5iYO z4cXp3taMT{KyLOp#s&#IJUlkwSkDHRQmtSA=vLT=ooKU3x%E>E9s_Lxg5LDS=s+}> z-sem#h!}0B=a}-Iq|n%bnl|aR*TCy705+!qO!q^tP!QxOrr|(P-ani6J_4E5v2?)Q zvVL<+Y$Hc+t%aq9ICq9nZ<^7_Wio@|hQR?(jl%ICYNBGyMqKOHM|LEt4NbbfmJE>C zkPM|F?@qsKbR#2(y)MhYK+hp*ORe=M+4 zeyeuN_KP)_2w^rm8mZR`p*fr)FfH| zDM*H#+M<)>EeP2FjO%&8rjy$}b*r>Th%Wutp;>7ovUhozg6vKji3~nxU;kFAm-BfS z6<)mX>!M5v1M535MOPJ#TP3&%Lha+sln~bd&GkF7x4{oM-l-M#dF&OE0cvf5gGQ!B zOe{+=w%i?lBt%&l!hY|U z!-Wq*mTl@;Q(41jM_GwWi(D8@O>|zLjS{?hFL%o1vC(0vbnLK(C=izJDHXj9g|+Ii z&qj@6os z{kHWtFJp9+y27yh!c3mlm?gsxgR!GHFY95cpOD^3h~lBV`Q>wt?5t65qOxf0r2fU( zp5n)`Aj@($mHg=2bFTS$Or1O8v+RU2CoZ8MfR(H5{ec4>t2biP8|QI5TOwr6a-?jl z!?%(5eeg7zeFaIL_P5VZ%Iy_UFd*sCAG@NsAogn>tsJ|79Hr9Zkx9dSAV_3HW(A;dxv`uG<@V~E82qe?rn zxB%B3l1%LoN<5Y7KTSck5Fny>=0Y*Kn=2u&`}!ah*gH~p&;$5;AUn}+Xc{P7x-^e{N@ zL%vi=Xgo(?p#gUSmqSc#j$5hGMIvOLDj3YXCF+B=+(K_yDUEbq;u!2D;cUl)GQ&j3 z**;OnZoIZiXXmWujau`KWEaxtDD7zTfQKQYG*0oUtLr^(HRxF)Uy@^#$(Ryo; zu3BH^iKMU(B~N#no#T&H5Bx-Cm(<#D{-?me-L1U}CpA!H<2Q=2!_nwVPS^h8oCdB8 zkC{*`y)(BxoA?zS6`4``*v?}BPx~}>4uzJHL>UCY(uyi#J@}`gL+0QziRjl`G=TKo zc34sj6<##*@@GJV>&48(|B4;M} z;T(o%bDC#>>&K=?p&*I@zuV!x?UtVHU?Y-mrPGg}FbpNiiJSXZd`*mo6YxZNeMun2 z0$CRpBZ?Qeho6^r`|;PSo!F7ay~IxTxzUuRX}Y1Pa0(%hY|AX9B|@g?v^tSx0fk1% zd~Jy+#36t6fMitv_yb?J)9;O|ORqP`jVwr*5^u)Oj$xbnnjGnR9V5qN55_#2LP)mb z_J!@~y_)afXhT-L{#6|`S$+=>G#6ht^UCrvCA6`U(D#*&M@r_BMa?6dNF@%hlfar5P0W2yvg6mzio2~5#Uj`mog1P6 zt@@Y9-5S0$wdBw9OOXAG}`y)`v)SnjRyX+Cj+ZG797 zvJ-^dZvHddFbu!pU0t_RuXp`-~K@+;a?5@XC@N0{~fd-VPWm5ROc=Wow3qnra!{(xz6Bi&}3RRXY&V?i6 zwO64YT?9-C5u3S>=jscL!AYhakrxYIC+}`u6z<582{9!)wkndy%QFK75ll7Wi}B&* z=2&(_@BzkFmj$HcYnz&>JohRqd9L-uqQ|63-wHC+t9koyca~+E(N|U;{EJ^3pSU5| zEQ&=#>O4<%CUSUW_PEp5>zIPvmtQRhbwi_JwyEHXK-h80IM2B!WrmbVG)&hjE&}qQElRQZ9xlcBnL;P=fJh?;`e&>w{ylRvMYep>`lve zi+Ln;dNGQy5lu-G(mE4ntlHR9sX^3kK&enJ!DVia!H%*OLzchoy>>qRzO6^OwUqPd z(2{p*U2--jy(pBBihU7qkgbf4_P9-pDw%<=5w%^jn2feI;AIrt-(kh6HFdMZ3BI@z1D^0yjN@*Kkvj*?J!M&BTILU$>M+PSY&@8L=8?ji^*oMCmU~K;a7%@7VQ$# z{rNyJmLxN+47=37t(=DNdJLsVL`L7suiBW^U@pGakctcO@b~HWOY^zYse_ZL1Cw}% zr|&?bY@_wA@xi8XjBpffA4IH_D4r!f&P?u}&aM`;*E{TN-R%%}jp}y#gvilKarXNi z8x_@jQYxN=yyT@~y+EKuukrbMe>MpZe2me$E0O!B&X<>l;aM_!!gr!D)95Wej?CN( z#{O2Ggwp8z99z@+Lzrw`-o;+cw(;#4-b10%WkpKch4$7|IZrrSjk}icUM+DD?2h42YymP+H&}I^o(gtTdx41>&3b=KvkH?kRs`c z9{%ZhDaH|x=J7t!<>QmS-g`@R6n&R@Jw}DZG;tC6SwqXvqoSgIwRCB~erEDu_IvOI z#)TY1@%s}+n1)@bTX0IsaKCo)HxxV(2QNyHo)Df$aK%s&K^aI9<*2c_IJbizMV!R) zx29?)kN}A2Hy2`0+WR}m%}`c*I$!FU>zm2s-OYiltP9mb((^j$_UhH{47wKz)fSU6 zk_$0T^+Nt60uQuiq>IU1UIuR&cCL@sjD99j_t)dYM8x7v4&MWIVK&F-4K`Jf7cgCp zGA6I%dgpYvmicPClgb@3mG;fYG@1N%1KNIXUhpVf)s1keHUlkSFBLPav{`j0Enf;IliyA))|gc;vA@W*FgZTm3Y6pv!_+;b4=LUZKlLn?$9UfYM}I181y!))sI=z5c?M~>e8^9K%p@O z&VmpHW#W7!Qdnp&_sZzieFXR#n{G>FjG?)Ctl?&-?6lh=rnOnr!YdKR_nJa&2$>*H zGiEppsh-V21Psp~bQIsI-l=tQrA1en{fAS1{b_u027No%mjB4Iyi;WxSH&zVk`cee zL(CZLiVs_F6vgEkwy|>_hHw{7vBX_Fpqlf%-F)jHqwmjw{k1zRCT-B8sv>U7&eYnb zF)@fw&fT9g#Xs7`P?afZli~+SAB*W3Hj*}LU%Su+eh$yLJ4|Uv=!*)fe@RE^{q<8naUxz3_ zfg>5&1(E2^PD`d!bgBF;Xp6=_q1hVexl76BSm*XosGPj)SL!E7aAua_r)Yka!+Y{;KftW(Z8qsO(g;SNqcbIbGqw^EL`T=X_$q5PrR5I?qQB~InGT8xV=E3FaW6u zJ{D#pGg@r zx`70F1yT7|VU5t}9Mz(3xK=Qj*jKPDorjyAFs%E@kroR81!17P ztQaVnHG%t{83Ahwn}OS{xvBHl@PT#!d5gYL!BFi6K*h~6{7S7Bl~T^7lR%t?UX%3rkr}w~ zN$>mlEwRzKH`HddNSB)VRTj!mGhmLeFr(}6*Tz>jN6XEonTUddY7+Jex`bNJ+KB>1 zC2au>E&I{B;0z27^nBsZ&9c$`UD6B@DB1_;@w0Ullry>2pOf;TWOe!ci=nrd=a0w| ze&tDGh_`9*%a?+8Ooxu>1hg%(zX}Fi5!~zLlSKv{ljvBVqZec22?hGLS<*>r@!dn1 zVwifp4aTKq3rIJBa{+&?MzuFq{dx(8stAUc=lnVVGNexI#7!OrKXb_Ys2(-oeC89BbGVSPe;{^ZaM*8(1tnr&zeKSI4z z;x#VBj%~gf7Tj7t@)E0yy4KOuXg`M5Iw4xiK=aofB`uI-)}%I%Yc6}&`gy{2Ogfmg zVQ&stE?;TMwc(@riA(NmHr25@Xa0u^P)ke(Ehy?*hKR2hlR@Cv)sl>^|0q9ULwPU< z#L-MhqeMFBe8!eH=`wtu#B1M`ym@*KA349`*i;g?NU+oK5T*laNIr-fYrz z>%=K3zJYiK-b}d6lGnh>D?xhC;n4&K> z1v-R;`LTHpkPY{Gr+WZ3TU(-{##NDsFID|Igw@ZL7brtJ{H7LgGfUn(sP0LWWZaF3 zNoZ}p&|eEl?XG2JA(L=sO{MjIID?WKb~^^N?u8;&c}jNN8}R22YMywm)w=s< z`?xm}lqJj%E)#AZQ6>^%V7d4_Zlb(X@0e@2XXkF*oQb|?JOU1{6h$H$t(!af6rhBl z4E|cwNLlu!x1(^5(%F|>=7tU$$wOIF7a{r(!`Y=SM>yB;FKLf^5`}`NDGs(;#%n^7 zS%YTc?AhiEkTOA+Wq)yC-6C4G{anuBNs@BJzPqbAnrQOKcCn zd>rCSZGL@eYV$c)sO4hcM5=vq78V%g&o0a4Kq_(LNq{!)=#iO2Allh;8?^lCVx33h zAkU&fa3q@5+h#z)SLeA$AazJ%*;S;*L~Bakb!(1A_Y6tn>xk8l>npp3H|m))*ZQe@ zMY@};BWVu_PIWBvPcmV0B*7aBnu~rN67hBc8ZP#h;sqU*e_?$IKp$1^ksoUqJy5p8 zE}}!cY-IE2bU?TiDv{$cnq*E2z(%hpsRyMMou@*?n}NB+*8x#hlFV(2yJyjdb~qz& z5!1?$WRU96O@F4}+uCbJxYNwOTi=FQF;uhHFK%5Fi5N5grkU4g{#C=AV0dX8*tDq^ zq$~U2A6?&NWg*?IZVB_&giw*a=5do7u{r`6FRfJeS+HR4V_{dDm)FS~&?~5++l-E~ zs`m(BOTPCWKDL|Gk9@!v-ncLK?mT}#@j9~y6}#2w>*&gyPQ)TR`qvLah{@3KF!^Tzf_B$=o3uLq7!GLJ%fBI9EhEUXWJWyep}y+DoKr* zwk>Ts#hY^7tDA7BUOmvg4x@K2x6ir^VikAGTj<_lv~ z!O|`$)krYqTAg&YU)q<+-JBr~zWe-eGHCCdj3qO$m1pmuH@|aiHn;FHWhjF#*oU%EtV)69`ZU|OB^&4dnXeq6RhuFc%8 z2HHCfF>3<`lOlX|065S3dYBQ%b6QtCsXytPfk5i=V0fQl8EV#YXjyt%yn)U>kmLKd ziD9JM#=VtbHxKYQQx0&wDGVr8`aq3rjS6w}kn@5|2-GuQ|<=u}ps)`6kzULog7yOnGGA!m4 zwqCXy5V8rUcnAlpgN+KqG8TicZFu`I98VZ2*(|4a)NYL6WAom>x zg}uO{;`nz|hT+Pv-!-%TShke_VlRlsVO5-vume)Z<}C)Dqcb1BV^-iteO#3J0W2ww z72a=^O(z8lYEqX$(XYzh8ILONj$Mx`H_^Vmn!a5t>C~U(mbZ+wMU8L6a%&P3-!k0a zChFQBFBQ#c@>KLKU3645L8~EWpn+uTBBWB{GxM9wJ$mMPd(zU}mp_d?3uVz7bs7B) z?)L4szi@otw{ZdK-X;qs1c#NbNR9*TobxMCn&@*XY&rdlaVd9n8{&5z#oT`xP}XP#-N0{9_~_0P(9pXDjv$1WO>RDP=Y61gPR-|x&e>i z5){pDx(8HypR$NVCZ-Cj2 z>d4Oi6g2gg1S;=4*4aOzOlP@7hizL+OQW^=GRoiNz$q62hvCuqRtbK$UNCzF^W#-AOIFx~@p!svM7;`~*l)f_xA0%D`XnP@ueg z+r%8K~8}0NUl)$Raw~Yc4;j%P7Kj zeyKJFW$2CCZtN41HBEGzFVG+~(d@t;@P#V11AOMB?VJr$+qm!=A5I|!6d~E|kC;KC zLwTdp@||LX-I}b;N=*}8WXqD(JnxNoe5x~HCVS5;k}(c{n+N2i*`Rq|kkM6~uv4VE zJ~8pKN_Hb4yno?)d%%Uz{{5iBh_m?-+1G?!eqw|B8Yyeq-UF9^cy(b*R3f%95PZI< zse|*fhSSbE4<)y~pe3MUm+9J+L7I6cT4%Lh*G}*KNoK+B(F51_{m9WELnH5N7BG`9 zP--hneLRf{$7+0YH)!~*jCqThEcJOi-@cqpkE&GcW|im0-yo&Dn1#1b15zv_5ur`Pf%{&t6bd zSM{Wd<~xJm+O_>$FB5$|1r!9MPUQ#+AfHS~@s9enF4J;p<^F-O)~Vb*LRzQRDfglW z3r^2PlxnY&-F|y0yL|Ibr`GC`br+UzKh&x_^(mrqQ%!!Qh$qscSu=-Jul~Ai$+d?X zvgAfz0i#ckMli^unajd5h4t`*655rUXWVuKXIj%889O((Y9+K&MQ|=}!4e@!ml=hZ zd}z{2;|0J{8#x_V1_u8XJv~XvO^U=V@p)m+jm9&$0dV`3qjmQpucvNx)(z6!ce8&Y z@gR=-La3wtSMlrWIUk%BpZZ9_6d0n{f*J~!T*6h5D`8VJ19U>NDUsXR7S2x)pQ3hQ zEcWtyC;s9(&47+|%ql(HJe|_ww24tymSD9~a|2^+jW5;e&KNJMw8&lIXCo~d8>U9x zee`iOls8xQSTZY$(_YSx7Eoq;Y6at`tJ3(po9u*07H_v{N1-(>{^=$&K ze{;2ig5p5nD7=s-azO0;ecXyeW365Uq32Zh;5ig=rntlF%}6}IT?9hTN0m!2!45Y;5pOQw+QmL&e}#liG>Cp42=gpm$A%T5u>7b`2?R@8#q@%Fy~@EAI&$i& zG9`s5Y3`kPY%t4U(>w2(BPGP!e6GWnHVfn$L}XPxLXU0sW(r#chjwizdttU=Bl@(t zJ>t6K=;Xk!t3LhaA49*cO#@1cyp&+z%4JH8H7wwh8TJ)3cJ6zZkQtbO$%vaPL9B_B z9gLtt1gv@s?`!YpEvJgDhV_hDq>TPS(CU`eMJiG#1Ps?OOfMJ~;ZCF@IW{2aUEg_9 zwOS2I8!v>;?yXyR>>Mc5Ci4Q85ZU&}c8V|gz+LHZipE-9Q;QGlha)%BMw5z!+>Ixs zZ=`!7n^c#1w9OIu`F+f?AlZ8 zX?67$l*X-h>BX)z>_dX_KCk0^5hW7(cnBfr=wH3`?yd3W!sOb>+0OQ!Qpw9H`$0F* zQ$xNhNDvuSX0K*`OHnhuG+h9W)KfsZ8`-!zF!wI0-n3Ai^H9%fDwGi?;b6fkObUVq z)QJ>-M;sR$=c+5^tLf+5Fi8swTf^_CN3^VXpOq}uyaC$;Uxn=4U=m)L*DoM<=L4)> z6;?HRj=uG0mdLz3-5=a11aYw53$rLGQ{oH>Ufuhx6^$;Jo%EzdVBLwcPnT%uhLvZ0 zz8vGSoKgcHM>jVvG&?1rv$-{K$)-Md@7m!}yUAi% z^KgX@J2ydRO#T*&qRKC_2K!*`GWao^RG~TFb5=H9)=Lg1{mDMnd*{ zFWsS_^V2~x59V}wEQcX)d@}22Z|Y90NCp0H|GMi%cMA63(tJ%Ty%}HAlp9*3ZkpLj zjm|~3=Ag&k*Jwy4U;kh-q+jD@S*d%{yCfp4Sx8W(OSI^~IMu5su?ciAysWQ8!Rp{C zT_NK2s6jW+WGd3q6@JCErQG+3Q{OeQL?Uxygj!o2MKIG5zKf$$+xgR#M|)Jv6?w!{=R&X9imAy@;~95;ybV!~ADKYn-B&Uh zOb~Du#(RLrQ8`{}Ca*h(FBE}X8FSiUoDExTgKKalq5AM9mEX`li&U`A@YzZP)S1eX zyiYnaC)Q(B=TJ+XPc6xLekb1^`^$?r%$t$F;{lyH_#3 zkx9t#{>41)a}-lq<8owjWJktRSVT+b|Ls~qcMRKlyD2#qS8!8fx`gkK+na$?Y_?0fmW}kzd^LI7&T9RGMV)gIZ z8Zzo72OcGjR2zq#HR(EXr_80+un6mKj0OuTv2DY{Br4GKqi5clg>0$HF25?Z>2lsr z5R}sRT$E<*tCpb1MeD@T=;cZ;?F8%==<8LZ9a(H1jc{>tni}$1LQI$y)REL0B8Ers zn`EkFaq;F%`kZp}-2LGsK>(p8=?;lV!;%P@rdx0J2r~q_T+N)2f+}ItZ*YOg{Dwpq zz^?byx19GN(*v$PKKu_Ia#Kl%5FKlhcup&X;$bLq+43@R)6BFlFs!)(%p}s}bb8MGy~fK2*i& zAZo%D`p9Tbb+t=2bpK_?~AF9kW=q{e!kEnH6SyP{ecfIUD()Xq;9vhw3?sGrR zYWS3qrD`@R^QW%uzJ&RSPA&Yyzfh0yH_ealry7Xr6o=x?ODn{(o;P4MqdJEAqR*_sMDWvSIG1%L6>$2~c5})0* zJBoHF8HS-u#+msA*GfZFxtJucCj1;)<6dGz~||@reSXkL&CvOcAVR&E2~X*1Plrwh(yKbWt^?tfJfx@ zf%b+KPV7J7fApvrb zvSt+weFD@%C?HwBU_@_7q%|KzTptl-&FK=u1R-0OP6M#X+N`DOkU@)gN}NjXBa%d=PLOG zM4c(F{kh1dY{x&+QqS5a<_X|8I3pdCWk@GiF;mgl7@d!#6|`siP@!IQMs@uxTxfC$ zW@*Yz!!x$7Bw#L@BMJB-G42{r2PFz-iD1nMKc_0Ev~gV~dD*xgBeyRs%MK`VdFFeE)K^6El#oJIm_t3p3MTwBvVkhG$ zaZ*uoQ8=4{fY9fIjwSd4>rG)T)59r0hL2CH?{v__dPA~@bJ4^FPA=Zp-ONuf=`k`4 zu?|m?oLqgZCfQqRtglZ97`J?><)lnX0&vcCAaDs3K9$A7p9#ev>^I{<#u3lxiQyQ{3A8?zMDU%nRc9 zCWDR>u^mxu)9$N#=)HD>ctzdA%kjE8evc3;W1Zb0`TuVl~1qKLCLA6TE5Oy zP2oaJ0Nr60f$%0%oWDsu4k9yBxAJrXPRPm7J~w*8M5AO>^~}Jz@#7hPcFx)p8&d(U z-squfv$9rXH1fim4^8g*Q&WCX>myn#^BGa0dwE=dg(m|}M4zX7ZK zBdk$@HlJ8*^K~y{evJ-Vf~pQY?P-iKne10sq)j(&mVHbmR#4p);y>>cB@d+X{}e&-dH>X%PL-=seNu;P~Zyw+nkUM-6{#FjWDoryt&#@w?pzEL!2MLHmY z=V-M4Xa$3U16Z?z4tI~N_+yk?&((2T7LdQa%<+wvCg@!gr^U3<+S^N$a-zj= zK8?r-cEN7BHs)4G3&1=$=%GL4yr~Vx>H1E4zW;^XC+vovy-OJ ze}Epd71;n>gNdYCD{)1IA!5IynvWe3m)YLW!)G#CeW!$#VTE~1>;{rJo`cHCs`M@| zp@?X9a-zps3f=9Gb1(;!xZ@X-Hmo+AP6^&Q#WiQE2X)~K_PY_gm4l4*%-sGI-gb;G zyK3-Zx7mbwq&?a_VRZ4UcQZHy%(?rI2w5!XlL^HcGR>!A+;*8%vt2)bt5+nI4lY}> zTGAL6)&4dn3VZM1gQ6sd)*PYaq>#ilx~XGoNwrHhO%`J6S%pw7{t3@GNgcg2)1XD$ zpQU$v;tXPEn#n)^Ipdm){g|L-Ootm9bzpy(2I2xEu%j}dpe-TyHSmI6Qs5~!g)2NK ze)9CU=c(7wXY=9P@V2ojd~J%@95@%|(F%xgo^4PDW=1(8Q!NH>CRm-s2Vg_jg*`+b zLoe5NM2E`%#>SHAS3_DGwrZJ}%3A9gM7)J{W{daTrnaZhuo7|CQ(SVYG-wqcNt5z7 zb>Ctf!<#?H4VM6e&-Cy`55vZO8y#cSQWq~LXWNU`>Sn3EvrTRJU!QNxn zg-T($gC}b%B|t8}cI!R?84u@9yxEqfepe&V$atU}f7o*#=?F+Jq@|G(8 zR{e5i=XRm_BTSj-)s^}-oFElIBBJInb$PGS_lcH<3 z#@WGJu?%O!X0$X6dD7e?6GD17Oj1#iCfo501!ojyD{La+JJGdFRp$2dnBO|vzfJnS zMO#xuNn$EI+_WVI1o~VyS*3QwvtYn2p;lXZoP#$GG{odSyJGhBrXn2RtHklZJ!K+`NJ=1q4pqE~2HzbpDPWHjTmCq$H zfS7?=EZ?ep2Kdif%1s0%)>B#XPYM4B76+l$zi~!|xc;Y3hHd#E^2ci539&0>Dp8Gc z|0*P-M4L0=NFFiKgzOLS=zie~+o4{zce``rH%-wHRy1S~kq!$-8Aa+3bWH4VU~A8k zu%x<|2O2N~@wTmI)m@n{B`Gf7UU2X}QRpG3ZMhX8MUR!62Yl3(=}%81_b*o;^S$Lk za;61=$$LkJGZ{wUTzyV={IZGo0IZ|Cxd-EArsWHAn{#@TuX@i&Py*JeRRJ?o+B$=( z=4H5MTaDW|E+7M)_z=%ZkDaY*>E$R!V#b{WqsZ&^q$IjGQ`JtKIJukBNFrbYZbVg3&nV7PM(u;gUl zw#>E~GoPSrM(zGaA_NUe)!Sr-p!aEGW1FU+dH3~%1Z@Jr73sW(YXq-Jie2e}@RZCO zK2lkHIYDr*ty$5)?b0ia`s^I20tc@TEQ18bX_qNRseM+ck5DmuPfF|$XzsLIt;XPa z4$Y-mGFVs(Q5MQ>%K<1IMr-54o-tNffd*H0S?^n{gO^eUi{u0sM>+~pX*jiVd%0<~ ztjBdWJ0$j@cR`HdBRvtqv-Rt!P^0Z|_du7G<_2O)FI8G{N*^@)LNHKi?Q&On7B1Ukj;jIo&%N znCh~1C#eX}FDM=t-x&Nj%Fd@w3s5gW^f%DKp6kkL8bFn-$xZYBG8!|o_ckirof&c! zTAqhWe~gjtOU#CJ+?y56{lJmc_3(1RJfzjx{kaA!-mC66LKW}ag=lb5iiXRTCLxmC zSg7v{XUUQ+pctyboYR7W&x6VM99yvtDxcvzO|Xw*K<|ooYIw=A;FrjGJ~KGGFw0VZ z>3#df`^Y?|-4*=GcSc0J*^W_x4K0e(oz=4G{dspVr{A#mQ6$mX*@fRTeoNLvb~P2SaaM;dcGrY>x_TsjQTFGWCFnqDAnN_qPPy*jplsL; z32|$l!9^kt{X(f6M=939_VkLT0E!4aY2g~h{9iIq2X@LfD7bmp2&t;6NX zo#Sif`tW_Y;H54DO1*xx(L*xPLym>Qf5PPF=a=Uv5)l_qjB0v?;Kod)O`)6lXUE)W81cE+qP}nw$rg~+qP{d z9c$A4{@y{=eDhUJ)&1{Q-Fu#MHr8Hy?ej=sNST{Tg$mCJA(E@&K>&t>31SER$oyff z3u3J6<1+yO1O-dJGk@%))moLA#$YnV2E%adkn`rSz29VYnO7+$(~Q;u$QVeCnPY1; zSaF?a!yCQ~h;u;gJZauB)Cfbc*{OUJyLVVlOgm$8MFT>~r8$3k%B)t4KR7ND=CcEs z^5#n6`FgAK{O4I`<^C+A*(-VUwj8&FoF2mUe0=C`nhkFFYMGzVGU1^<+EQ(&1m<;nCoW;`5rJ(Mi#~u)F<< za82`YD>Kc_&12(A6m#qyKfNN~V=v$RqNT0Qwag^GKI-*BOY$eTg(o3}Mf!`2bR*N_ z!{d13#r*MuI%T5ziqLpLx!5?b@9n&5=M1W)1qD2B?hfrFgzs%;p+`q zQ^(h|2|E3>)jeton?V`VpgaiOQ`5<~6Hj@NaZGw5_ZdmhCeSj}rB3 zE5O*D{k6v3bvHj*1~UUn{dD&9gpqwH#GdWbuigzRq1|Y|8140Xd0Wp^bg1e0^H%wK zOiBzaD-sb!Pi3u{zjKfn3w}| z%MlY6RmFkZ+2qq%M-k&FFT*y5lj|PJS=2`QG}Iy}M~h?=r!4vgWUVF`rkKD=Bb{FJ zDycZ-2>PrF%9E>oh%R1mSIG%uYH;ZG9$NQpfJZr0r<*iOUovGTzMdPLS1V|0(S7cj-8pj#!nTZSy0wv2-> z1W(@ViTm>-a@G=@P!aL{LSPo_4=Z(h27x=pp#2C}=yamo=L+k z#6K3$?Ct{02L}uaABYId*D{>?<}T+x`S6E=4^A`AZS5Xpva`r+se@xtt8B+yfopqY zgWKksO;%~BbQd+#1Hd9jJpYylM$S}EALZz@sj$4TrXe~)SV%nq{V>d2hE;7^6878a6O5-FR&C=B2E&Ok^8Y^sod4q{i@+URhWwVbBBdSim<` z2(LmsGXIFb6s!;2A9q7o5KbEI?}qTDqNq^o;F9Y8kkfmboab00C9CM6?Le)G1l`v+ zi%gJJH8=QvW--;Z(YFu0Epp3VM!fP$U!ZPLvJ&&1Vo`Z59Qg=Z*Prh!Dh^qijEt0ibk~FKY!F4-=yS}d4vcqGr?M_Csg8H$boOfO z1KMKKkEXt66bz@DltEUr_Mmv|9ASs3st(VIiS zB~rFEo=5!EWRzQoTe&@BY;pMZ1gvur{4pd@W#Pr!q28VPrJ9q|FpPugVuYbZzuRNT zwoyh`V2V$&OcsG`p{QG?4yPV0fIEyyEYtD*S+7LCAE6qpM)s<(vp4{R3+)2(?JI@| z>B4G`HHTNA4rR>Vp<;1s>eP?AJdnG)D5z(UdS@p}bsJhTW9CidAs~0h4j~DPtJ)Xa3Bdeuap`xTk6k7_ zovPDtKC#c7n%!;wIzq4X+s%H*pZEF4uddd#1vW}d8nFAk$33?K1aOMI{G(H>s7&$- ztjQq=^`U@%J7y!w^W^BQ2y0F)@uc!2@Brw8&lm;g49etg7vLQLPXG9ohsIy|QSu<` zY~FRqvAH)Yh$lQvS7{3pQOfzmviZV9Ks9pq4ZiH>H|Pim&DXliXTEXL(`6-07prN( z&V;^fCJ1>kL05H7Byk`nB_ZRlXZ3UG?o~qoK z4H;SYc#+{T5P4VfKY>0zzG2Vb&G^5f185`4xGKW0@b-01E$=gNHaj;)0*Q=F!2{79 z;i<93+Q(NbfULsI5_>wY`dftKtRr@J%IF+8xTjE!VOZzuox|%3{p%Q=okgm77G6|3 z)oHR~N)E$X!ve{PlezrFNW+gGrV7f@#w}5@@#*Mn#IY*QU0a{s4vyScM*V!(>5zrZt}@@t@1X~P zW^b8c74>+Usocua;0NCQ`%vGBh;4Cp$$wXUnA5nI&!G1ExIAbrRDMRfY1exG1Sgp9 zh{}AmxD0f+22#;5b$}o*y`GN*xe9JA50eNtGV(;VgYPw6T%Pq>vrQ5{7H<8~E7+Fq zepSWik>k*mk-b))V5KOgDcNHvGnLt%ULuP!{2Kz5gR4tp+V;J?sde)d96V1CJtadK z*aFNE;sC9xX}vgSYV@V0nF<1WK%}(b@plO%30Pm{va#=nPt&w0=XbNCQQ{ipH*qHJ zx8;KQ(-!~1dQ8WG$5VL3&Eg86%H-VK`@;)M;`8*af`C(A%S?tUEKF3@7jq*2++H`q zUf1loX9u)om^CR{c3ahmJ|8niK-!Ymb?wm~T3{Jv-aIV%Fg7?oC?OHVyo~X11?;bv zu2hy3@NI`ysCJrgH?I~8M%T9Q$V#Cyj`TIBo(kbrHHE`^ozm#U_+AenJ{ZZrb>U9b zY8$Wscym+dv)5mXlaXj-C4)vWDx(!wVHkG|gvm3MmS+K3?H|d*sHG|G9~VGL$Ni!$ zGR|`3Z=R!@Zk2i5ctEpdQy78R$9}TK9A0^9l)7`*ondD_w|#bUu;&i^$*s-U81~Zk z{jbeHbXZFN3FD~>McLF{e+p@KkN3Gg#_Dq~91l~?3K$HE3Tgn4S7W!j>gb@M|7;H3 z(zLO4kuOZC>~C@tYYGJ=*K*Y2eR zPyYiofxu;y@&LzF)h}QWpztKmi^!WwkU&L$#?3jyQMX~KsKi7Yi_DS7V6lG)lKH9y zhyKg5y3s`=Cx#+$DDwHt>8K%L7K)HuQbIdXc#~gP)R21jEgh8N}{G!dBWJFv$UL?0>-3ijVK!tX~{k;a?gicwA^*lu{FA6^Ue zELFMQJDokgcQ2`WF1y^6?g=lU{mc>X+Xp64{vk^^U|&80qi=R~d_$r^O_S56QSZ#M zxU4>_Y)&5r3&vnvrTO;4{2`DMw!F))t_%bP!*17+j_n$oFQd+HlVdIvW8SGIC_*V8 z3quJnLGlt^uc)>yxo#IAP;n?Z?J@{=f=odMWi99)K9E7d^)-GzQJ$%^yy$AAtc@p* z2-JeC9&n|ET}ymD*ztmj5O5_viGOQLf4D)E%kArh7$`&ee#cAu#3Fq3rCvcr$%u1( z2{gufLn68_D(=+?NXf0^t}3eW$xlQV!2)G`mb)%2(w}9t@u{ju);0F{4IBU5Pj}&u ztcEiG)oJqV(@oTI>qH6oHb=dTzlaMRO}D<$`m2#?=W7X5wmF z^2G$HILyM@7jy+Q=JP7|8gnI~(0-7Z%J`^ExJY+?EL^J|1KMYPMvK?x?$K zfQx*hd`?{xRH7SKrc-MDI$SVDGH%?#*BVG%Pit#xe>AK=cy9CtWBgW~==A+spn&|| z1_$wf&Kl^E8CAw}WwPA@kV#G+Br=aE7B-jr81Gw}6@fy*>fI5FuxUHY&0utmy~jHN zX=p)W?PjP!mf_~zI8SN^2NKCnotzS{vc?@hODCH#ef}bLv5703*ta>ju^cgHv6pzh zYRsxJw z(ag}K3U4aKE#LdbOuO@%;1Qd;NStS_bU^@1Xec3#vwdw58jLMW=5p_WNC@+}CbVz- zwe}biyew3e*>bI?DG^{9&9eFQrA2|P^U}uy2wBB!>wCBQJll=~Y$F)0p2p>oHMx_>3uvg*@9Y#Te&PhIm0!s-)* z#w@R$LSc-?Na#|%lB(vhpQyPd7jBf!nsAfCTD|a($Q;_ml6+o10CNOyZbZ$!rB{eIWZ7H@zbJLl9f*nWHD!1=Z!e_MmA7u>(li?;c1tvBr_ zftrbvC@$NwP76I9S$jq{zBzq0Wp(jk96!%1;~%03!3rpx%#98SV`u6KFr}BlIx-?v_B%Mq|ty41zihOg~*NiZgC zOeiK>;w9_hU)v5UkON&a>-q}jL5Z-n#x;LW`X9svrU0qf%MinB=$i?!O|yBn*-G>m zwFhDtH0jpC0A7uF8t#n?+E%jVsKB!S+%fF6bTh_SCHq0T(JQMlaBhQ{H#7UQ`tlFF zcP(dlih{^-k$6_IrP?{G<4PnWb=0OteCR!$!h;&N&4(K!sY#n{`hFWy(-CMQegQQ4Jvfc%TWE>+xjRNI=421)3En&w#%g!v{WGp zt1DZOR>ZNX$v@E4LZ=K#T))q*i6^l4U6!9PlMGQcL#KfTTHta!Mi+T*{1`ZvX(6Jl zbXr(wn6;Y>G!pe6PzctMrG>BErTpW33+OZ|C$78y^u2ES*n@5aKiv`@i5a&~`w`zJDvxNN}r^96l zWKOhB+dJOqcVHr|UsQbl&xI&LXED)@t^loa5g^NdWm)O*iw{+<9bAv(=^rC4mp5noZ{y_e=%Y|m2)Mo0Q=Ze+ zDMvNQuxL#jcW&p@evz`hC9uq9Wt5b%uPDwOi$vCJ-+Dra;3N7LGm_^B^}sg z>22(FmsVoesv6ZF6-`_5>LtJrTkr)$4`G;7r?%71iC#L~jtv}mfQzsIA{7~rDVSZyr zPH1h^Q&RkWg<%tS@h>Ae%Wc|GVe53Hpyv67t#P;a0|3r!WC?%gmKslhQVoXGBJNIH zFc-3i->mX)m^$KD<_!#Hx!=c-r9pD&!!EAG1NRP>w=HJ0JCcy4=xLjXNEJPJ0)?L~ zgI!6&vCwns5@TP5a-%ail@zDYof5)A{(aYlwSA(2HMX|-^FD>JuI08}x!pa{Pw1F3y#x1LY;&d696ulYlk%X8?n_MC=2La&SKsUp@Jy@TE z0U=0SzF(b*Ms%N~1pEIzH2=*Q0HaeaEs$YZIacPBGN#pdfDJ4+f(6H3_hlwUZ<)vqF+jV9NcE(#)P3T7*4)F){U_v654E z90-3~mO1WsRvIqP@+W?{E7?0+{03PipazzCL*qZ_-OWK1Q$(hJ)ju-C)~*wI_0_Pr z5l}hIuEY{sW00fN*`{36rwmSxq(~J%ig9-+~s5nZeVZ-LIO5li4ftQ#-xP!fa8aMhuBE5{!i1mDOk} zxU|b}>6M<^c{K&v$!WoH{3ZrQm}zx^S%Hg)&4kr?AY-O5jIdHn*DZ1z>XVd6&~l@9 zyArd#NXjMoNqaiK2vibf%AwYRjLZP0-+wm(q>9l+4=p|y3W9)r4JR!E$1S2Nn?gsk zU$@Rz(N7#hcmSU#Ga-hO>VeS-77)T;pNVwC7aNOdvTE#*!MRgUAzNiPdfCKS60p44 zGk$|{bOnlPBK1|X3T#BKCH)lFND|I^2uXZlN(vszeJwaO|1>utE?2$7|I(*9j0)a? zgNHvZaTK;Lvcs_eIbj}2wZeGKoMu+pBdlYCfV6>(^G%Z&P2Dc-Zx97z*q0J);8}3= zZ;Uz;R)sh48FDVnR;Jh?6PUBRprFHH{|DfUNalqq8(8qz*S5Wdc_v6&?+&B9 z{GuRgik(K^&>OKK<<2Zkvl%&uqE+vQ)p_AbAvAg=T6n(Z<3#n{E4-?zpce+iUS=VP zdi}-iyFCbS`r>xtvLubwUz-tyGXD6Ue<0;*XJCm|>e-4q6c$EXyBF5GH89y5RvienSqiQ{g zf8r1?DL?NaU4N)Fo|g_;?{dD5IV-bz9~bjfFFQT=U7EgcALWkA1n;vQ|0Kh{f4HWB z<~@fE>Q~UCf=*oIH{WP^*vMb`IQKDVp7(}%cjAfZwlfp<<5|3OY$Az*HY~aDM|$DT$W`|PiZ;*R5WXw zIYg>33JQ+vmSoR&`@}eK;fieuCKtp$O*#58A{>^99w@zZ>}w)Y*JUHV@#Gr2;`_En zBibzbeUW#4PJgR?CT|;?H3i0i^fI4+!_>?!>gl`U(xdFhM*oI&J)WmV<=H}S6CUk- zB3%O7J?*IRNIIv!hBk4%j=GPO)Gy)b_pr(2Yq}HbdRsIr0)*_7`db{4j8!t8gU&wj z%M(6_5oWd$z!Hz(n5+A}PP@MGw5zrfXP=ltLv|AFFf#*y`ZnW0b`F~qI?3CWO=0^c zn1@E>{66t@ID6_VBn*^@x zozBV9QR6keBUhFkZKZMlftE*s2=;JGU|Lpy+>OfI1=r86g>@)ol$fkIzqZIPqz(DY z&8_6OfvCC@4+TPi+{B4UEs8+OKFRnb;`hBB3!m=XZudY_PJm~kOIBJ!B?suga1I@+ z9yr-R-&bDv$ssp;Y|m0ZLS)WFHrf=E14nX;(;R%b`;(II4=|w&m+wcbzI6rf{Gk$< zlO}yjLi0jcMR>etVUDb%4Eguu@_pB?-;AMkBs12@Rh|``B4XV{WW#9|Am5cZ;Kxuk zgUCjJnCkBX*>XEW9qFT#c&wBIclPG{uJ*}y#Indx6N%S{W%S#d``(OMC>{j`$fyB! zrb^Uf@k#{(DBi=i`7(YCnWg2uQ9zK$%Z0@yhuXGi1)s`X@#|;EtMbF0;YQ0If#KAjA#S^Or?^%DMO<1%nMkaQAgLGT| z4_~}f(MhBA#c8%#Gk*92KC%Y~tq8TV>AR4G+AJ;$o6nt7nT<11Mef~45ZHU$ORE-U z&3Xa*o_sRR?XVI;P31f3o4q;R)1!W^ z{{T;BV)fsN;0#HX<&i{+upu9Qub&#-5f_{+;Pf)!y7@;1AHxu4tmoO@p+?*QyU!Wx zD^JD9>{IVa>CkEeT&d9$nl`X~tz@C98B=X&(alEbqVin`Vq?mXm50YCX=q>X5NYE$Og^lmkNDC{V)A2Wg$!z)w$#o2mINXMmZ{o;i z<3zly@Z^Y6)xYNCzSxMIN-ZOUr`$!vn{1%WFc84(fkN?KxqZUVEx+puNiAM{A0rZBxqZ<=R#-S0} zo+y-A>`(@08m;+IcR8t>N5llAcE9+-%2l`3CQ066X;jSF@z!vH)?iNprx-fwn zu~8vERHxUP8Lux=w3C~?v3)U2z>#1@_JiQqj#o$Gs`rUa_AFBlwI{eyE`MH*Lt^!5 zTPU-8OOZ96$}g4-!BGZc2bStrJrFZ&GVH9}Nvv16X9IZJaF#2>mop zfBRvE3lEQ=7!Ie{piA3mD+CH(WbEM!g_bvqhsbh@HJYyYg@MyDeb?B0xo3je!aAC8 zr@Q3oggA~FM%8SNiNDcx=5(Xu144Vd5P;43aOhF}EA?;vDZPg?nofeRb|1*~yrifi zqacO#%jg01wf9a4kIe^S_iGcI?FfiG%q^89rxPcmHY!5Mq=b}5Lcs{jN@u$_|283V zrPGi=&51by98ptbSQc_6D1#B|W;z&hAda&C{pAtUls6;$OJ^*Pwqf!-Kb|?QPfhKu3rPnm9jnkI-!W~S+r8Er-kyo(|P|0SpPX{$aT0; z>cl)%Zd*L_KP39%H|`Dsb?dsutHTBpmV#|GB09*nMw=wP|3{*`T}_aYFnM68NxhAl zVPDOd7mg{K2}6Q7)PJO_B1)F9;DX}`dV>VCMijkjvrA5< z0u=ZO(%9Qww)fz4XS5M8k1ub@gVT@RxHD544@+WNk*{vMht9+ahHEgwzz42pqV7U8 zCrVc0saN8;ACfOs@{Ds-AIcekC^%6`3B*MsnS@pSgI zJsU0q32pPfAI_{nGA_*Z(i*89O;Nrl?MWG!KMs-92UEI{nFK4t<2*InO-#n{po1H(S#@xn(F& zhen#KsHdWI&D9i6^)i)%lQ~8jDpC`)zy)W#Z(pt=b+;Vts>%!BM#`%25wna+!LFIye-}si5!caZn{pN<00*9b{#3rX%2bhmI zrXds<0g=8tu79J(kS%i#;<2NnvufCzIkVY*_sK^;n38}Lq;8{Le_xUTj;yRf-vUnG zD++xmw)Mu&Y19VdNwY}{S{h-$6#7uSD%vj-D&4AlTpf`=#*-_spN5;?bI7;%K<<8c zKl92FhCqIr3}pOTrKuTUf0PS@omuCf1NECnZM1eq;zS@kXf2ZMa1$%fCT?tf?E=!;YoKdZc=Ghlwq}^D8N5sl4&+LvWf={f5l&bXKF`_S^<~R)GNTKmPUQ2U2|jW! z6Xz~2@ocF-xycFQnU#2Fo1FvaYRsf)hKnB0G0iMjtCoAeDpq84ArSM9ym0g| z0S$`@J{;A{WStKijUwx2qC~lWHt3ovg~=?RYGiTM4&umAp+#GFiLKoX7Awvdvnj$M z6X#*d;~OI@&LO^X4cq4^TK&AU>iBwWQd^Px*Bxr)jM7@6J^6Fe?m-J9E=QhJ;^iev zR{V9U$W<4yA2ZQ+#i{^O{}@)R_7=#+QC->}I;;N^md#v;$q)S{vgp8{I_nvHKoR~4Y^Q6tG&uXCj%$4don}Blv zDB$viKtuileKLTUhii7DAWPW1z5PSNP8IL~OG(6jLR>!6{!&kD@$1)@oM?GtN;uD9 zJ*PY3tbHt{n9*juId_L?uj~!zW1}#_-n`8Epga2nE;xjqRt@k5a*eEeR8causaJ+q zV_e{?xNdGnk;Fr44U3oR(QoaFte}V-_OqTJMUWydhY<$T^=G@5&Q?pB%Wl7}m=TcD zcfzpbZ6pvB(*YFIGjL9(m|=C8;<{Maloqu7$w^TcIh}w10@h3Y-#4>CD$?p)VC`^^ ze%2MIb0a7AT~(~mp;ZEPay2X}$E&DHtlJV-+#-O&B-c5+#FzXa1FIK=N1NhrMrIEl zru4=WWc{)X34$Z%l9{~O&SOj+jAP+oOkwj^=+v#;Ptd{4#itE#s4hmJD_-70;AdD$ zkav&O=W;~CEW|YfJ?|rOV1|*S3?td=N3K%sm5uMWVDcudB#!i^qlCQr{W}OvQbH;B z?#Bf0he=0gGtAeGVOg3~_>(0AKA)>#M9F*84 zKcr>aMrDIS;Im>!&KBcJIJ2_vqw&sh>SVD#Blje-7?X5z!FjJl{sf#_Zm_-*QE7bA zn$22emYfD{NmniKo&=U^OEZF3e2qy?twWh9thT%Yc2k`Iez-+@H6&)q1QPVe{Nl(K ze%&R>WL1dCD?$(0Z$Tlh(l<~h&x=mH4V}2;JWU%8`_0Zyt(|M&x-M_yACIH!wL~t` zeuGxS?o{N23ZAV!?Dp%Mwg|*v{;%Tsk=;UICr>sX8q&Z>o>IN=Jf$%S#Xe?HGV(v zW9WVp6${HrU5#rBrYeuQdGG!;dz9K{UPf71LlYL+IuVG)IPaS0AAeTPcs7|@q0DK> z4ICU87Yiq$KLaiK^IOxUB#R2q9pt!TqK`it9Mc*62t&-+BNq&c%bFRh_KX-R?Gt;F z?rHvqnKFw?lkTk^;3>{-lZd)ApIO z(zwRSNe^0vnBt9Z;#xZwP%~O+$aX%qj?S+v;P~{US+AsexI(lgt^S*<#`m}Fd$+$Q z#FhT=AbXcAAh32%6FavUG)pXWdLDIA z#EuqKDE`IHBc5Xp4W6gAy(gkZogM$(547MI@6zurV9~=)%vF4o+Tbb0v5Vmr+sJ38 z)FO^F#t_xQvDuj4-W8GI;K4T-Z0?#4Ps5&(oG$;%xofguSPc%p9QNXm{izDkvk_Y) zv>q{>HmKeAH)TG9h^}c~xn(zkIC}$%#a^AJXoY^H4({is+L^gUdhB>+EnSYwRClRM zB`?Ns!oJOLGyCqRr&0P+aj-btF5-jyHV_lj7B<)Hfi|fk+gpl-*BtN@9tIM)WIXc? zE!B3~mrQ|Nd0VoBaH{&G3TJhc^>=Zoz+BqL)M}`^pwoZGE)H!I_2fD9r}P*W^h-^J#uQKysvu z379LsH854+D2YW}VP9OE7jZi>U>A{vZ-?0at#_L-`q{eIHk{&bOE6F@+2X|6DQ8wi zaJ@R85!72xCnD!BdFk`v;$hsgBI zlfeZ_y~;oxEJj4SVU-$;fvftEcz9*va?Jeb%`i%4Ao*8&(9eeS|E@iF2wWqpvUE{b zu5%WE@Jp!T=}<@T&=yOd<&cz#?t3YAnYrE8ocVP-W`QHeJm<>q&wm-Ph8VfYh5Bmg z8#Od9vSJW-vvie~E%H4rVV{tDt_v=kOX$OoI3FuP7B&j>dq>^v-2@*lwN**ht6$RXCEI9w02Y&jDK6*hczKQItzFvT;wkP z|K0j$SUc`{L0F9(Wr^x+XF)s`ZJF_w0;=ROuqhRNlpPAU1TPqQ{)}-A~iE>6B zSDDZSCFSr;OxTo${W(}?A3l(VVN!H!K7I0=I6M*e*nyD5-hjBhvXIfe6ztCf9|gVq z-yqt)^1axInU>m_C!gD!OmD4!tq0&;W@gptQt|tfI?Ck8#IQ{iPW*ULXwqCeR)Wey zgyV-(Q>`VZgxHqPyf}c&toXzO+mZ?=%JmtS3_me;F4psAb|ymr&O=cxCTCR3ogNt& zyAF?dY8h*8%jc!<@ieJBZWRf)k1uVf^j9OJ9{r-)rsOs{r@Mm!Ycaht;c^z~@V<33 z^Xo%SZ+if2nFu1ja%V?tXj~@?kWz=c&%eiiDM%bd3r)(o34Zf2_g99!mks-O_T=vg zxPbR7p=Uuh{{%)nE!!_{2f6e@FvXfOoL?_CX@lr%eS>Q?)wh*8EFJ(71~dl}P-ql% z!B5)T?XYArzyc-)-m5ryaEIF5$H6IVmi)xrghoPUl&oh!z-O^EcsfG^hP4U(-*kmZ z4_EI08pDPY;9|`_yeL?w3x|HhKe5mE3T<6Z80ODAE~s({S)}Nh1t1xUx<;AFMRv=s zniJ{xTF?|o<=W}izV5N{W=ov<(p;PB~Qt-(Va17DhPDQDRkB?xL z8LS#pOv^h3wXfij){wHjs1u+nQM3PT1kEkQ|467eur`LMVAnLOjD4ou_`-)h7mX4? zavwtac1vrLLB`DSn5;M4ujDR1AG&MT&3HgR+j54tTNvwma)`4p9^^=eG7-VCwYg0N z0>Yru2=6DXE^d0(Oq?5+O7O;40E*sPBzX=7th}PXq{|4e>m4%5tMkQK?EcM~d@4@& zz~&f8%q-ZD{aY3J|A<-n;I3f)HbGd?sR1^*8le5n`IFQBil=c$w8VBWoj|jCxKaa& z#9fL{>xz=?d%OKP0S{7F|Wk)PJeXH<)D)lp+y_&N<+fivaODdoIn^wBA z+`mSF_>T$oWr=;$>kclHQ|x2xS{2L7ylD=tjMciy69$h;`sxts#XISx&ji{Z4PI-@ zr|nV8JRcA4L4ojxoF7evfg59^^br3yHa(9TLn`vKI)vW0r2wfedu?bd=bCW*bOWQR ziQ{beuhsT#iu|8r1nq(V7Al}I(jl0ZyZr=7sY4Y8`B(d%;+aLVW$`NUJ7rhvnqi^@ z9+Jz-1Vo^ZwQsz1Mp^rS+Qhl3pt<;$+0JC0Gv#(wn4(BoMU#;|9jACul&8tZVyzP6S+J82gJsz z(>bK=F4g%b{goEYc+JnFXQ{MO&N(N!+I6m$oZK$E~tEL4wED zS^wXy2;_*N4A4JZ1#o!uNJ_0 z>f4`IhG3gwAdTBXYA?@E6Fa}Bqkf)L5k0vy9!TFcl@&~ST5MK655ocLsaBUxb4SxX z&MjLs#TZC>rA89FM|r7&Rz%lE^wBxr_Gz3(rSL~L_hAIhVVrh2wZR8+On--d#K2Wr zTHV93L-#TYixTqg$kt|?iTf)AaBXh#0|*{N@n6~AdZ=%I95oqqnVDvm4RmLiox_7u z`Z9>XBf=y?iSXGmS(<4kkMaIlaTWz>P>MD<@wF&6TTqHACJxJ;d_oI*)WrnTz{^%V zSA#-u?d9NgZBs{R=DWPKQloB-q~>Hli!YIczFJyKq!XDQHM-OU_u!tR_vgH-H1_my z|9nabI*$}GZxOMz8v+J%VH`YO&NFAOYG&^i4yXeuNzQ&D6>7!)a%?VGnEvIfV_i39 zus(LQ6kMR(kTpSLN|mgZU7EzN6cWzsT{3~HKLS;X-*+X5{7y8WMQd4Giyk!1Z7#Ox zvp_a~|9&7mV(#>vnzy5U+CUCL6`I5)KBazuTXmPc#;2{6uJ59&4|>i8y~VyYyY0-K zT1kT^X z_U4bS7(Znk9lYQ`V7{#TbU*yxnDY$|{+C)>ng3{M=Pi9+n74d9)+sMa0(%b4F4VJ! zs4Y<)-T!=JDbPaFB=LnV?vg}?7Q>Bi!6^*#GiK%U@#bnBN96#uKqB9yyst}hT_8yF9+XVd&;`+{ooWT}aNMDBz;a~PJIY0eM z`1imy(nkGbO}h@5UDQOaudXPeA`J9YF>8{p9*zG{F`ZN?JA6hxh3eD>Ceo1PYay{; zHhp(wzbia{+9=3ErOeL@%1IS;@JR6;zlO^af8@=0uQ-auKx#MN86sG?x<7pBTf|6R<<`DK2=4yF&)bLQ7zR7nyBn2aYF%aaBo;0-ibFAFYk{L-Ad zg1Ew<$;H>#YWsZ+^EAiymrWqR4}Fw37~@(sE6aW8>u5xdsC^=_1m;o0w4y9(vGl3T zw8y6mhAcJ)E(Vjec`m7SLaB91={-VeOOjjbn_w%;0JL!M@v$1k6dQ-1hv0l7^rG zW9q?vkz&UN34TOlgml`3^xvFQC|J2SD`-at6Z+OumhXU`Nzo(&f$<2^p%$p_Q5T!e zz6l~S`@i7$$L2&r#kF!BD=;9UWk^`Lv)1U{LH>Ks_bynt zG?Ig<4EJQzB=B%ye8T}aU|%sA)_L&nc?U3wiQjFsDgFel{Ti1ChP8kEFbng+>GTQ$ z5%(Za#3^3x61*H6ni5!jsAf;uO0+f-qn$K<%cH<0Ckjc)VHd}Ei3wOC!dj||Id)8h z|L2YEec6|*O*tJ}A-51dD|u<*>WT+AT{$kCqNhQLn&=C>_Sw0qfJ3m2Y!2150q@VSAwZ zx6c_?tDQK+r)oUWsQ(>APKf7@^t4OaUXc4IHgSa9xXjC13fopaL{9(f+|u4YnB%hf z)iW)6dM`Hob5Dc)U6L*B)Xe8!Mf*ntJ?bTn_*wqyua^;QoON%~>npCHB?2v*)(DLJ zJ=1;f@*J1*7%{mt(-*V$P&`%nzoXZ03yVvW>%A{gAc}^G$?W_1eqn@VWG<*Yb!~FD zU8}D8TpGn!fWMs|{F3TNH`L+sM5yOmf$_WlgjMDzC5{$TD!Mjg^Lk4RA)$ANTFII~ zooZ@mT>8MX+GH#B!E5OCIbrSWqTN#)Eb*U}1~og~PK1w`e zCDZY<-3Q^P^(0kP;%&Le2%~gYksE&u6!4zl3q3?h`@O!ien;2^Lddsk&ezZ%FOV-+ zhxKvcWdF&m0h(U6%+d=)5Ji1SlD`%n@I zaerwJ;iVp8;LZG+Ri9zypR-DJg(BV9aMmIfa~y1eoA_R_(_$>;C46V}VW z8BdU>9q>vC{Kbx`k~o+tQd3WGi_eP!V;R)SJmIJ=vQk_d3Mig#k%)UV0~*)tEzm?p zu*%VBfP*~%R{qEFuiMoHsE*~p_B>nkou2PyBz$ftr!VIs2gL`#uzGZ( z(B-&(D~ls8){`aNmB#s#kE-#6XLm)LU}V%rTb=sINg5PQ+5OLCIL~${Jqj5r3{D6Y%*c}@cf38=E_+FkU^2iYE{t~wNdw97B;#}b{BX>b-qHb0=eqvq? zKN>D5C`sY|;DzPFCop*9Q}rswzjv5o>1L@7exZp<-Uy|MdJv31!u`~$GiM&}n^pe?V0=aqCY zVr5Pmwso4PA?l~-9n%T$F=e}Ngh9Ic@TRf9n`h*ye{A|u(tB2h0~eBpW5W%n4M7au z-NS)KaPl-nqHnwk!w1fRj&iIJ9I6atvK1bE9l1fULys|oNY1q)Pq^%~saMdLju`M;DkFnBV9)=UL^*Ulk!<4S z2_B2|TDK;rqYlm7HxA!P%PlB*bC2cdSlcdZB7R$_*9lX@?cT#WV4S2Rbjd~+)c*^h zA^Vo6o2DlS*;Cjy3i37VJhaczX{EFjC+^ zWU0RncbEu@l~Vnv%1IcpYID?S!d#L00Vcpujh9j%&(9{Qi9k}pQsY8C8*Kd-?=`By z0(o2Tb|Ok$2+hiln{;99*CT5+ieM^bC6x!3aL+r$5P*B#5H3JJ$_q6E8YkmXF|5JO zmxnHE3Oqk;nF=mL&rsk=DU|D`8}jl`VV4i|^~>M9!>N-5qMviYg(rDa z5SOQjVQe@H`F%hSDlVGz&b?G#&VP#EU>X`%-6Dwh9n#z#S`N!x*g$wUi(`%@_o*jr z(5C(mFV5IJDxX#P6mve?>AEY;*dAc#3q|87#ELvW|BxQ*^2e&poS!EI+B|yj@hxkl z#jyV<0xT+MP*qz~+(VkS(qYFK$we9`?suQE^z|<-mfc)xz%ZF50q2$yH6sr=`IsNr zPfPS&>heQGLH0LJth~4$%6{NK3sC@-jcT%b{W|#miMi>~M&4x?NvSu1+~j80yvN~0 zhK!Pw5>Kj9p#eR0#}u}a(7O8{17*)b+(q>JPF^MNEpzAZEJCqjqv`Ie>u$P`0xWPSRHk2ZAzLE5Ol!mAC=__ZT1t zK){DcDsm0Z6gb<7;Bd=QU@TCDA%2Auk&0y%5(vMaEbbon$GFMy-0r1iG70FCJnG)x z((o?-*@2qYW%j0n;8~-iowGp*9iVR5Tq_7mfJj~h)D*-4}$L?#&s;z5V6A&N6hBT$%joWRH?;9 zL^_T5GDku{ABMv0_4ViXS*>rB_P2Ch)Vj#!Lyg@%x8LQV-4p_JfT}Sz+<^1v)-?6% zDFc>;9poaXi1 zjo#D(JPudCPmc@&zbpd~RA@>dCBN<)x0^gH7;+20va(2J`UIAjTY>nf4qTY<9&z#U zy9NiDIma-E4v!CWDqD5IxLsm3PA<^4<=H3UZSwH}2&DdhM03bfn-gMfw+2U7Leu4v z5GXApfSAVcyUle_rs16Z!4=;l%(Qi<5^KnHXI4EfoWycMUT?)yd|toZ{D);cgFkB5 z?iHRaL{-H#m&5s4{7zvP1-B=H`mAMM>%5c^&b!ASeZZR%o7TS>k{mB1lWe!luFl!; zAAq0~@-XxS^I1%c>MiyB4gUzZBrsHF3Xuvlbxvdk>u`^{f#CIn6H%8GJut4YPXC5< zgW$=@p5?J9M7kyna~1J!EZYMD5QDcYuFeU)Jbx)DDdo~pV!DX`bF;eQ@rl6Zgpr>F zHsvQ;$vjPQ;iNrIK(@rV>6|cv=unkvumc7dF3|SBI_KPIpMAjF76X_Bhk82*KJR)j zvsnXfMAr1%wis$`7AX>cN5(w1dMb({EtN=h;a6MKPGJ@0@*Tbx-{Uq?k+6Zi7Al~5 z;RP}y8)9pFlNmcsp71Ev+@*L1=p)9Tw->Nf<&A|vaWWvoWq<<<3kzwhHk|MMCX#>b z4@FKv0_KQRVeK6{RgCy%j=OU5gseWhj&0YqwHUEt3r(Frgi>p(h?iX*oQcw(=hg%> zZP7`~DF8c1OxOAV0V@N`rbYZ~dLaUG`ix~}vS0np787(^T%-I8HPjfj@*OdmMZMf? ziFK=v14S;+E;6SliX4%eStmI8pvU^>wKqNs>j1T!0S-fgZL3mOSPt-@P{2e0A@zij zf9<6q5|UBWuaXDD3ma%L(uX}@=NT=eLB>E*Qc|{f8aID3&^DF_Zs%(a{7|05THkZm zz=koVt=IBDL$(3f9BI{+>i@(0?A7WzOvh#5XG{UG|1rJ$kt#8a|2<_TV@|~0{uXVI zifvnYxRrP~U?K~V0d*Wa$gMzjTA93kp059xlKJ(GCnotaxW12?;XJFr*gc;B!>SWE z0UXJ8C*9WYX#n^r=FxWneGmO(v%y2;ovsHQ^l|(;raEI+E}Sk7Jb@y*7xpXr=rb}fn7`SBIND}->&1SKlsKyL%zYEjzd$VW zJ|c=WCmA`d!Q-Afwa*(V8OinWt)L4!eA6{2U+qU04C_?Qkyckze!vd@lZEaDp<9Jj zGon+eeH@bE(#F6*5`5wDypY#wOWxukt+&ga;Ug7vX^}wS{FO#UkgEc_k6Jv>gT0r$5an3s1>EHYpq5H61kfU6ci$w@U(TbsfQ zjIb-tk@(sAnoVzk&CB~2W{ZK^YII>%%0n4?I)9i_qE0RxQW##FTr?F`*i5-7C?!4W zi-&G7P>raC01TQeet~^OHJhh2#Wb5o4U$lH%nA3&? z0)g(|nT13e`J#5Ig<;!?Sa-hVTdu+)rJi2$zNx>f!Yww zCTNu7GQ0+rDCa_Yf>>eU{O~Tfj+CU|@rjhWR${EZsDr7eZEtIyIq3`&|B1D@_U};d zegU}Bl@_S1H!|~u(Qs05!wM4O!vpN|a%41hN7)_OW-+jEvK%oDL@djKV$jFJH5gFN z4bqdOT8$W`L*+xVTH6S#v)-zQ=*01ZRcC07L?y^=EbyLVnyn5sI%@gZD+j2n7uE931x6XPk(nMERCwp;57C~f;u z6?X0u5>IzA7HjKr6`YR>K<%TV-$KmEiIWfd3^2b8!wm~hYN=Y^o075jAdN=pukgDJ z$F8W_WJ1U5qt`t(FdFXsRk;Hx6c&E|;PpQwKs8HXOe>n&onRX`sx~&OU1SY=nK@Qu4 zInTNjTbSw+cEG$=du1~L;Mj|>h1#n>N%I)xq}S%NA?{Qi*7 ze+~0P>Dt>J>C@knpEYxXY{qZ`-JqfF_b;a|0@Au2n2Z3d>F*GuC4F>@!KLja-wq!Mdww#5HTdP1d zw)FB^W%buU?Km%DKWmekAP+M;xQ#Y@VLZ~l?!bcDR10TG*orm4@+F{1l3IQO5L%gZJ6c{^TYb+2;VBGFl?&5#(JR+{nR(Xbr@h;^e+|9aH?%UFOg1m`J|+-#`bEVcXRmpvyRA`M*Rw0|OKT;!@ula)ii|nX~)yIZPR&>O=FRwKo0c zE?sgj*a%d}FhUMMg205lX4RR$wl^>-g*D}DpDAxk=euVg{k#Hl#=R3wgpt@|!raNm zYiza(8s)Gek#D6>P{hSUkxfcdd!dWP>Ly(>?f8^y+ZQkuN7=b2Br?v}Ad3p`Kl%Gx z#pD$U!{;q-PR&G~ecRm~$-a?c=o`a+Bia`&Qm7)w!Hy^=LnK8f8;3q7bd_A%?RTbD zGNnkNx^#rO8EsfZJEqDLB5Og#{l!y>&a7HFHvAJsP|nA<;4A+Yd8aHZgHlph2|ysq zV#M!+4rKgg*D6><6dvc6Z`00xH02p%96O~h?#OfjT4ExW$%S1Fzq6y>YVgFoQj1IV zl%l25;LEM4MWNA&VG<1JR4f8gIkF^T-V|9=#vi3doX=8&Z9>nD^{OC1SQlxhwc z9XUAv+(gZ~KDxin>wXNHJ)XbreOV$IA7BRG3VJ%jb@j2cH2zN)(BZ zao)b=%SM2X$w~kK4f9(|;Z8!M_l)Df7j<|8?f)nOF*R-|SXREmsKa>uZVssDa~duK z1#_~lxDXS@jmEk(0d}2Dv)|GYp@#S=DxkXQWiXd0uuSX)-yDa%U%)LcG#)Zi@~KpH-X{jC&yfJhoaVf6<&0NJ zFjt6KIaM|wD-&Zm&}`Gr8%fJg|48b1jGs--kje%DrueIGC9M7aWF8N{%6^^kwHzp$ z3Gas6w{m~lJV)%sg@3a2ZR`J~zx|*{3FOzVzW=O-9?OoErJ&k#6g`>deG$DtE&x`7 zI2`RRE;$jx;CClMbHKcf%QyH9KpLl*d$@59HR|e*2d*=c7I5`ei z>~r3nrTIL-j*X20JaP!{Z2J#Mf-wOF@>5dU+DAjjq6%}#urXZA@c!K=(v9fznhF3oezy4cHZ6-|Xrfe~Cd7OD2$AKUM}fJ&3TyRrqtCMx z&vo3<<8%bx(Wx@elt2E_sS$9$OhH@po}M@U>lYsY=OaMdu-M4!CGvmN0-VtwRXzBk z_Us~BuFDe^kcGdk0&Vq!2FG-vM-Cq|n2R^AG) z`_%**_i^7-1*^gUv!)^fb?pz!+uc29M8(Dr(7rzC0#*Z;rN(_e#qfJAHj7=i}oCx(=595l!N6 zgk?jniMJnzdEn2Zw_;F*@k&6qm2BM{hz*LNYGZrndz_OB?x{J%FF>j+0q}iQ>wL8@ z|F;%Ew9{J7=N=Saq1m-HZllB#Zi~t=Cf8E68rT?&q1^E!))qzbeM5;^Ypv&F;2MdA z5Jf?|YiP*N6@V=qESP5Q=U#0(9p;RRRuYg9RswfYO5&*lu$>U8|7b8G?)DBj(L1x9 zyU+d#ea{ZR>FUm*tW}h>K8?3tVYVK-hkKqWYn4a?ZJ+Vdcp<5qc9F%X<5CY#`cQR7 z6`p8YRO^EcJlkpvit>K~3dgkNyou|d`*Za|I23MK;CgBT``{iI?x+r2s8^els>uFo zUQe*7@z=*MNBCIz%tD_By>wnuHTo6(0?^p;o~0upd=?X(N_4^ zKC!bhAJ_EAKVRf5AE8wXZm5aC<<~WH4(5-ig|yV`fDoA-^a7@$0E>2U+I&3p8cg2h z*r+Gp^K>Cm>R4Htg0nxnINz)uhcLf?k- z@S{F0qs}HxPQUN`W%3VKz!FG%$Ps?{wKO!uK*&?C`=btNu+9&3)fB0AguREq*gJ1u z>6vqJQE=Y}g!MjHVvv-MS{iIxpI5IdZ!}0+9D=e%yx-5d_Q(@ei3W6rP`bKDgA?ZO zqwzS;VF)D<_Ly>go4qsUKUj55PdHa-#cWTf~@4Ma+1)kN!-@0Wp8T?d(PIbOIa-qow#IQKP_S?(9jT&F-GFV3UtI7dXYHF+x zU0^`z3^s9o8&^d74R(G3CG%eJ?7vx(>;mq8yxLE^@fw`bj0sHMzn7+gjD?0~7}m?# zI6)jq4%36<{GI21FqnX*4G@yP=I^W%V;4B`LCCbG&0rDdzZ{W1j8Xu_gNd~qcGEfp z&?AN0JTBkN3-pocoh=_~S=4o+U4*eWVs45{>i_eB+)kg04|n)bZ>PQY^4#wl5Z z@Hvmd_F(}p{76u2@CQ9xDzB8rl#XMRr0KICC4Wp-P7+X5?hl7g&Mtq^F+}>n*AfK= zOU9&&B2;Gi-9E84Ej5D=_CL{h9|)QNGtDmN5%`n#9jAwp=NYydR6^|Fl;-F3$_KT6 z3SV&NGhjN1#4egYX?k26C++riMALLXx-{`Ffln;SC`ccghKsN2^*S%&?!|UT6O*MC zZ2D$L<5E=-2NGlY06}c;rDs+?u>mbKgb-OF0P>aE;R84F5YU=cFczE~35|&PtyHYz zMc573CgUd?NQf$EY&X0f4Kgvo6Sc0F+IP)QBM6pF`!A0zF8xT+JK4(G+;M8lm9d9} z+fwM_PiCzx2zlAJ!T?zWu053v{Y)G@RXJJGcV2sf5rJLXqI9svFRIRu$?*6IbVVH) zPK_X-y0*?DBt8LXy;gg$#|17=idmC_1nAjq37-ZMJ_5yPalT!+FSQ1tSy%h&Dfz_X z1@BRC+~_r!FWqzFb}LeX`~QRn>O0U*u-&6MxRYEpInllk(e*^NC3`)z>Xv1cM*#16 zgz}!x4Ilxv>u|mdG}F!LsYzLcY-6 zGHo|K?8P65*nsz5mLg`S&{+x)n0%R9we3Bh$>?xyes+9vh*mM-U)3z@+0(_2QqNkc z1A&wx0E06__i{oQdU5bZw?kJFhp;1A_O{Yilw%erpr45yr>%(VxJ!0RY@~*2mtKmLgE=7bjGFCIbOFnU5v1on5D1PM zqW=+#K87;5?qS&IVJaaf{h6B{%@@Gmi-(Gfxb#J58~KJJynZb8Vucl8tGm#be_U-a zZLz&P!@qw#dxevM1)@D36W;#m;rEH<;V=3hpedkDav%YqM$3l&T(Xj`nKATko2y1I z{r#E;Gu8m}7kGn`DUnf67fDL3dD6EQ%FzNUr$Aq+w&}NWXe?Xxi}OoTBe|u#IJSoe zisB)QLaMTC08__2eT7gDn(9c|c(rtuOAN9)H+@#AM0FcLJx@O|KaB;|hJnRkK z|LK%FEo%CRFvZ4CG$HS+&et&9&y7PNlk!6 zs+hL10cSQ>2cDFFbm#Hd2N!1*)xn9JUQ55L(bMvLYB|5W?&AG~OT6|D_%aCt2lzeP zqKmkU-GA7V!aRkZ4v&9e=)WjusbNe)JIIXKQ0Xt`kBX1h_4E58gi69ZQiOh!I~Wgd z*s&gLcCeqn=6M~DC?_Wegy5~MJy}~{&egW4y9bZ0<1JB%ukZrit5VD4q zjSUv{|I!3${};7vAa6TVA6q|+K3_RrPpt|1MpSU>3EZ|>ss;_BZnNriko!~XzttMf zKs-?P8`AvMKK8*UEdxl~X;J^MAMyZRWz{Fp3_|sPH6H=aEeq|&9OXC~=Wf0o8I$p* z+&W3mSNL<~%S{mh8N}K!D3mW^7S3N_*Mzw^{+GdWlaaYkz7Z+#grIf_@mI}|z@(z0 zP>qZyCCo3XLBatzAap7NRz^TyC~c({quzBbu+uY3d39?Xe}?t${OiN}M;FVif)d@}AYpg{74I9i@4W?ov>3CM zihy0vX^A9(hos@N;q5cy?=x%1hTF$!@8UBVHEpt_G!SE7+QQ}}36Wy+H zmvb6GXpbp+z>Km#515re*z(!n9FIOo`w-*fXE#==b-r~A@_(P83oM@F!C4^vuH6U@ zmH#ySQDb}wS9`5RE3CeNcy&%lrv_h#!E2>slK(H2X&r<)R980qoZHIw14dpj8D^)FHlC7%n%cL!F905$0?;habf6MOn=BF0lk z$^iHxixF-MJXg;79M^JZk1<byZ56TVW}M-5CD>cMh_i843R-_u zptbP3lkGx4r$mT8MP?<{FGAbj%Iqvknc7>j&~`t1SiT3|krf^q z84L&2k375|8cs2_;`~-utk`UJhKx+f!5#2roVl%YMod)^Ms-v-k7N3oPe;k1jveto9%Wajk;QWoxHi8K?PlfPdp_s5l0tUf*p$Kks$>N`$u7zuh~ ztS`g08$0}f(b-(Qvx$h>72vM7phY=(`Q7>BrKD|ld;1+yMyik$7Xj#4d`&^p%!t=2 z-WExOOtVk_sT6%?_|CnJ^|$hrsbK#^B$M|yp3X(Sh!Wn3 zICI8mM?CJ0ziy_iTI$MtO{8fbZ$28nOiMGwiF2|nloYh&$D8E9=(NIzOBjo-Yn)oq z)o{iumY?Q=I(~-sMVtkFML&ILGD)^DH*1GCg_4B7?9Nk{mvkUH_DiTx_R0F~9KnR% zHRGt`@l0zoH6x{|NS5|ra#=^OUNPA+~+O~cYJ9ov;Cgj8#3o5B*T`zhk118!G)2=Yzi5Om&p;Si`Ik9vCCg7 zPDrnduAKOz}aF{dhf?mBV@FyZ~x^5S_CVyST-C7|r zoM?HHby+#{?d)%18)yT~)n>JW(tE4nc}eiW>j}Fm4HOvbgA<);tyojdEjKWR5} z)my>+btqx}?G~-g?hQs0WA^A(0Q-l24mD6l0U(0DgLjC?>u&hi{&(}>kq`K7I>ti^s zMdq7;b%xbSzNZP1()aNR626^Ho_dU?6rZiwKlHC1|Hw=6Yre4^LhsL?P4W*XWaV3K zJ-<1C1NHNUFGLH7Fn-iBdruKo8JUA4CZKXTWH_w~L+G3AyB+9O>=Fs=u{;taPx$g} zJ%I89lS0)g(Tisb%CzddB1URcI?h}!2GhgitaewRwg5#1cc{81N;h~F$HmQ0^kw%?_uZaB4H-TjpAMQ1Kb#_fvR>}sTI@3Yorl@$ zIk$xL8eT89*cP-p^aYm04EEUj?QXf|tUXg7Kd~_>4BkWtWEx-5?6TDPXux_Sy4~BH zQZ{TN>JeDTb19Sa3O@R$#ih;DVW?p+Sy9XLH~1$`re`%rx(M)nsm4NAe$#(AL?_T&F1$WS4KAv%htmdI;B*fO`J{5uwG=fB;ly4LaSQ>qICCewM>d zz)sP&;JR3KmcxhCK z{`2D`tpLz^Sgg689i4WhXlVhyvIQw`MC^oKQ`kIo_ML&;K9Sz7J3|)UGG~8iR~LFR z97F||y@`MJO<2=L)n$702Ujb31b$zSuN)=HPZ~{wrZCs02Gc~x(_z~0`eE%0tQU#91LyjXp+H86pjX1ZC`9I#n0F3vt8ceK7xiP|t!L-spD@sl?H2mN#JjwN}JHqNdP zV!#j-NBevE*KTvef{r6+5k<)Ukm4UIbpF!ZEC|Uw8>6}q6m3ldaTUAE>!K6kgO`>_ zUBbvJ~+zxd@q^_tOehWF^M;7h9PxM$${AF5w5a?H={v8e?2Z0sfo z@?MlF!-zN7ESxt)-YX^2<8EbtB|j^9OZ#H+JeV?y zI%z~FQpi2})aps8I9T_RPCv!rnw@?W#`5JlA@^|IH4g0igmsd#G{HFkj}DkZY#bKET#(?9i5tg2ybNgdia z{iUuxI_~(?R;O@$!QmhEE_hsz=NX)pwe&>O&^(0=lkmW-d>yQ{wpA_h`QGeJf8One z-+R~-+&}OZS|0Vm-@WTa-$CGiun)m+W-jsyijXF;HWr@wrcRbu7I##Eujg!KUa=z+ zw>xzUNjB_mQDjQcChKxa5^dBqq04iES5#)6Rac7HiuDJ#u(a4vVN1lbL9k{<$)Lq` z*2o^%&QD9~kk|ASDEDqj+2}%yUoBQ=*E+rIztTUrU)8GaAC{OF7G)lX#0-|+4dp0x zPFe}+ zUZNzug{&KfFdfX-1{PvrgTQkoc!_ag)G%cnE3lRPbXnTk;GZ9}0SY+FvmE;UjwJJv zj@-?CpFSrS8C^tpP^&6h=ox^O(R;JM-^Os+r{Y;?rWILodJqH>O!<~l-i?I}Y&@nV z3B7Ayul)p_fAD-%GH9ZF#r}%2a$@Pei)Zf%VZlLa^BjFV`UOO;Di)L|+@Do=_gU|8 zoOpHimj7;{8l{Cikg^hihswsUlj6X_KyB)bJigfc%Fj0i)~OvYwCD#9zf1&P+p^AI zHkM%-SxaFq^O2f#o6N&ch0k5V$ZZn3Ld;HLMAJC^Tl3R0ZaI*)eVjZ2U*yRiqA9Kl z#;43-V3B9yRKA^`k+R6S-19$iwcO6-R_y+wEKe!|nN!@}+Emt0L0SDG!r<3e{~Cn4b$hU%wlm+ zm7xQg>EBK^RYy|WuZk;R0{k!c9T+Qj%A!7<*tAJ&sFbom7JMqf86n*fZ&In;odHT#!ODwBS6J z&ZNUANnZ}2#uRD-M^AN|3Dv^%cg@r9++lw(JY!m|jEoIU4mWXCe`f`$bS~fK>}lBM zcu(;DTu3zJp$(f!6ZpCU7CAaY2Dv*^RPW}YdIUbM6!}#rra1auY?KDlX#<&$AP@D|LM_WKL%J6cd*Eyu77SIT#A z`_&U+%SqBfU+H7|#$L(FS;OwU&7Up8Q}4PMKPz>gh(Lz0g1NPRy3=9(L+ZbS@`8fh zJNos|p17_4vTX(T4hEcs*{%mE5q}1VWU%1wW>>`&V9w71+-ik zrp69WPq#<*VNVE+PqW3{$a(jtkbeIio7e5q&e+bW7rq#x)Lg1bn-Aq>U`(?=2d3DR zuAi5Xz0MP!Xxg`?#;0&j*42MS);;iga?XZz*0Hvr_B^%eC1WApE|U4y zA>&~~do7Di7q5q##3X2X@*UMy-R%4vs;N`u3G%go(eL*Hm0|amgoJLF>Sg@NcGFg; zhNR}>8L@6xOrjl_J|n>G!|Ml@QuAe9apID%C)zRaNI1!(qwcVQbXUL2lWUWiR=y#w zXcrZi5;E+mc;^;;)nm4h7ZEljJP{R5WKvN3`+ib=oY_8N&?J3)Kwss@XR-Ur)lp*TP4T#Bg@0soJh!*)dq_agK`X|1ISv7v)EPD9UD z98tt$BY%q*un6*e_Q=bKb|;6?JOBO7OBR&lBHwKe*4JW+yViOSCWk%I>iR0EKNGX& z-yBI;(6%Dyy0zUrd~g8&uBvl*N*g6SgfLV)+Z@+bW;Gij@1k8M198pL0W)i|oDc8&AU zCQ!L4{UmJkg>C9aW;lb{87V4T8h>VY7jQd0@jaBoj(IM2Br&%4Fp>r*EmkMR;f3GS z>0yRBhNA?iTAPZ^XWyt-U*2kSFT&pSS9|JmL>)MfS^n*o^di+>z1aDMgM(i9bIgx7 zK>tq1GpWs!l=Tzag!1^H?!5{-X4Os_YTI*KX{G)MgeO^I`rrB-o#4vPfiN4#Pz?{@&~!t{JRbc&kg?N*zUB5-CZ8GK{RfqT+7A@=FLsZvBU|FZ?_Z_4tEpKE6WZ z@34Q>I8>GX5-9n#^b`82ygW_p6ioO2G{3Mk>Z65V+&$c&{94DSTiy|u-l?LxY(}^K zTmus^ikdiw(oT3kqN62`)T08N4TAK(;%x^)8Y{e2M?T;Ub&?Ol&}p4ZT{p+P+>0;` z?2$`<9uQI_t*B({t zM?~y1Lg_wy7QHc(WDM7AFqL5dvO~1Og1g@VSwH!tuuR&4ZmG@R)5A4mJT#dLc-?+* ztgVMikVj`_V{-)aSF<*)+kOPzjNFF9f^cnU+=4=a!X`HU@qcRpy!!OkKZ10x z;wztr83#%%TTSE}Ebc2!!RT^6uD<5gNxO*mM=g3kocuBOH^|r8(2fVE(T>Ho0 zuN+`qH{`Q9OuBEBLB(LX z|4!3so*3^QJ}h27Nv3_#J*A1Lik=23Yiq*V4ntXkp$P`{n%r3o7u(Ko4ObAkjDF4? zz>e%se9J9wLWXEh#6({ix6kkX*;WO4`Cgx`Mp2!2>NiY3Di~2im*F_h+>Ebtl>>Td z;#-T3XaB0mtf=_I6dfH5OgE*E?KHEOH=p+^b2p`HqW|q`+VKjRk5bO{N4FRGU9N4p zcd|yEN~|E1v;rs8iQZ}wkwBj1z4$enp4O^f=JL_Y>-lqFEO%c*MltVk3T; zbG=M1&rKVxTh_THh}{DX>O`f@);=)OuejBhD&}m~<{iTKZ5ueWI))lAGw4$-kcJaU z$#o;-x^+Ky>KEHhe{Ll;3#2z5j_ilN=XPp{%)a2@ z86K~yf1>Lz&RB9!G@WlqWsbBx)YzVBxAG?mb9 zkLm@RlLTl8LOZ>B1B)`Ir=a9MiM;$STGmFaL>DroZw%qSk99!O0HP z7a0zMjKQ|pYU{rQEvkH;e=9$f<8ot667iLDc<+8<1U6HKgM$zI{r%WzT<5{vBUDd{ zOM547DF4Xe=}3XhON}f}6t0Ml2cCv~wTUU3%qWSSEKE}lZ*Q**iyQ$1LGk9?RtyvLJd#p*5a0$*!f;& z$mlL%OIZ;-9%Tj0Ae#;fnYyYL7Wp=pMvH)#G;!J^#ZAIxzO&=oAt#N|39|Z+ZJl3j zcquvQeoy6|feN4*s}ZrcMRmZTH_S8x7S17qz?lF!;oWgLaSRcBQ2^E&Zji9?|bnsi;KpPwZwRc36WR_fz49m zZDslj_M^$t2|;l!Pr~ni_5Q+CQ&lRn$y+goRHpqUzM405_gPRUCg>5s9ny8(Uag<5 zU^-N8m@B*#9nDS~YOB!^&JqnTMUOvE907o0|OwH@`;R_cXS!l#X zCJ&CuuQqP%05kqn)iJ)i**yuFH16O*%T#T)N=5Ur)cS8l72-CdB+h@*o7^FgNr4?J z8sJ0}q2rg#ujVifA5)45lGrlCBHC(f4)WFzy*QVy$1l2+R`NirPBHwdow)!Y*SR!_ z&XC%8sD@hd^W6K?3T)NnPH1>sP~DW+(%LObmVTaFfD*Aw0c`0`=_pC32UjLOHvPV^ zTS@apAtaW}P-)=uh{5gt?tH*uZ7ZkVyZh1RDLPEvC#4}*CC_M)Pj>3P-d#U#_%Uy1 z(kA{D8EgFr(P-=*r*gVaP^%rH+Yy0uQ0t9o=GqsV1iQQSqfJRu4T!dRS|IYo=$4 z7}8OzQO6tIdn{0ku+#jwwXzxf=m-THYs%x&SoLMtF`)&n2fye_`Djb6#nE5oK$Ab` zgToo&^{ev)jvLk;kq9H@phzBQ zK*H`1^YY+qS?krmm`vXQZ!e9^TI%qzP(ccDS_J;GpZPlK zk?MmiIGRu}W>;JnRp;Gdo>h<+Z$w{9NXzwAr4PTYo(0k-GrQc*)huTSLtPmQL?h-M z$nZ1b{goP7UM&$SSYB58a=__XGR?V{I=VHVmDBP=9y{k4Rt6tYn)&wl2BZSZQ<1CZ zRGj{YuDrq!A%>}W8^{@#*i)&GlSy>=ME506lHpfB;;lPmTm(7wd3NxIlVAH+y#mFZ zR^48GSd*hn*m4vr3K6kpgnS_ozMkg$Av<^Uk@w0~-E*fL#jH6U3p^e<98)|>SJ`pM z{cTupn5snX`=|%fTj@fnk5;H72n(E_vbGXzF#&Amn#l0zDx5s?6PiA?|F`U zt*!0E@8I(M)h=pu!`QOIDD#@`M2g@iDs8Aqm4is)pU!t*aq6%VS-{k{tiIege)n-2 zm(!;g_M&>q)U4F!8=CdbE>VNqJ<@~A2DNP{$tcX&)3bLs7gi-ff`#m=43 z%?i^Wd7`m8-M>Ku``ZnTk-t;o$%sVP(c5BBt=z_OdAQiWak_rlksOhcH#E`mo|)gf zxK{VzR<+8e7xe4GBJCLn_;Vb?Pk`-Q&NZAliDWf;U4youo;b56v18l_`KvY{a)65m z5s}`ae^4i>-VyoIeeR@`&THgDnt&8v>TzZ+-pJ?-hy6Gxpe-vsom|w;{$smEy6*UJ zE(vSc&!ZCl-UrF$Hm_F&0ZJT~^u?Q@9?OoiFH%s#X<3;lJmy7(e!k^WG|dpd;2bs{*)U*69p!Z&;9@;IBa#!Gvg0W6a^E+l2X;g=LjSw5G-tQ`y zSTRmzzjqm!VpEM?g=}67JSO-V72Nir(!HEu^KZ25#|xt4lEg%vnQIwO4Xw6Gl({YA zymAg?h=?3JPmwHLZXoK8Ta{uFpv+j68dE<|gjFY_sh-M2L zGZ>vSorXls#m7b!qwJg(U+Enf9VqEZR(IIpIZ9Nf^_>TAEqksf&4BMeG!wWu_+4BI z<~CgQ!~^AYd5Ns(4SW9L=3WPX50$6WW}0-aPJT+Z4+#yGXq_}pf=xbWZ*KLxH{hx{ z@L2t}OEH#yms$;dR8gCQHIY4kTi%%0^O7F=j$@(3%C}qyT{BB{4bQ9KPaQa^oT6XN z3)E@+EBn*ndl`G3rgSKx^#<0I&}QQp!L@ygUmzc!Rs3> zoB7fA3A2E?u?=q-qqR*?`u8=DSN{Dq5IH@W{X=R&q6BW_NT4XfhRzcUPHSVj^5aX0 zfEi4vu%}%WbCwMiDfvBO_}aeN?{y5JKgmDzyYrh@eRSQge4+jyQC}4mSJy-d0TSFD zIuL?81b26LcXxLJ1b26L2<{HS9Rf6N!QGu8bMyUcX4c}NAAozhPRXv?b*rFE59E}= zEf6Jeym_`$CYvpmNo)jgdd-n{-&3YBlC3$MKM-=N>Y^~yUgYQ7a1(+9MC4PHJ^G4GHSV|p9JK7#3doA$);1*P5m(Y9s z{>}SdzQs|RT{yEQI8@A%@-(zX3uUM3z+5%p)GK`69~F0{8brfDPt1P<53{U(bZQ}f zawuc-;)4lqJ(bHd+p=CHnb_+4m(5Vov2x{^1pefzXJLQG0r7fRQBAy|Ek42tYVuiX zoT=`wi4WL+4H)%EJ^74HJ>~*-SDA&(yCmjt@Hf^s$05u)oMf9A-hk(n^1N^(M|{pn z=jetzeGx?G^SxjSpAv>POd?n}Z#DLhRvKJL^_zHm*fe`TZ_#}3bBBw@FVk;iL@iF_ z?v*=-o{ln~sb}E>Huzv3w2{?UZN3h-i&2wJd^2Q0S}im;d-Lhf zZt^RnEfvAD6?i&ae$fR^x|<`sZT%)CQ&FbRYj5fUyU_MeGuh{5wwylqgnt_8CVqhnMh1&t{g7-f6wV;a4%j5Uq5Er z-T%@IR1+R^zj)<+`yyaIo31^2eoi zL-cGHZ{?BFI`k2#ECwDflBuf!SLhyr#23P3Mv~{QnQG!c#E0&e9^x8AKKqV-dFyd!hI2@nQ&Sc) z^_GwEoL;m1#TYWLk`$lo8vJ8|MJtm)arq})c$S=o3*lSu76h*S`gUJ=p>LtJ{bu*% zDGZH-R-5A3Vd18U-v^a)mJ=Cp(nj8OO7Th9(B^b~LS8gG-$tCONkMmgLPSbVHEGWG zN5osR)Y06>B43W!uH-rPD|z2_MeWwx&>pA8k)U$s(A#SdA0h8h0iNueIB%4iD2(lb zOvYx}wZeHrQgw}ny*;A3_tzHA+bw;M{kBxNT%T1?iD-hD1SV?zn;?pbt zq3+A2%{%f(tjOeTHa~giW)f-}_HR|kw@WzvK<;EGFqJlz?b~nLHj>i< zRj8D~5M7@F3BtUb`SE;rZRfA-__BX~uX^N%Z-)qqMe)Yx?-9@sE=qfq1aj6H%ot(c zubDqZ_liC9QWJD$ffZKZ_tmU2TFtpM%#FWxjq`PIL;gU`tEE2J$NPz;xckSd$Uw-{LU6Dq7r7-<6k9Kls?JcBSPV&a{;Li%2w zxgScM$jHCLoS=W8D_k|niS0gPRP>GEH}uYtU)Gi`I_vMgG<^MR zSxr^nn3S9Z*af?4nd?ux9lW)5UxB$ons4&?xt*HbhUD!8G+aU~Q^Y7V^hjHVZ9ySz zrcNPb+zuXC*+wWjwg&-|^|xZWtX|^Y<1eP)J^CH7hRg-S8^{wYNdnLIQwqj5TgN%y zL|$z;A=wxc4!&G(oH*>$bDaY_CKe=QSAD&)f)q{raJD|Io4RD$7qVw4^7XR+lrb9w zpq3F3TAsG&+z-#s$*qRgwh*|G%PjIKI}o81-nR@{R#Q>WYMMJ?qq(S+rsFmma;n$H zq2$vO+?;8O$&duT4)Z?@o7z&wz(cW`D^s05`6qLC@Z?FyWR*+rT#fPw2hWI1hQ}^d z=}#0q;to8-}alnyNV-;9+tv17l4Qonj&R`(i|0N2N z3LTwT$uLPLpB-NlbZ&qn*S`O%Uh#<@m==)uy)DYDfiT)0b>^y0CK93Vj!BKfEvqBy zDRpztBRxE((U>s&&RDsTtl~h0pJkNJC5>RDU1R7)_H6`G`H7Qn)en?76!2%TBK>4@ zq*z`)QFS?j$L=dPjy4zfNYlB<@RK8RQvfm^$_tor_xogk{mHr3C%e37$GFr zc@B;?gF?bR5NXh7HcBq3RxjaCYrBCpr^|K3DU7z$ky?I<(&jY^YtAI6+s82WnVl)!F*jkpU=$_3`s%3LGNR z5ZNm{k{L*Su3lpg!&DPvT4VC+HX`T9KyTnvB#Dl=HbQ9+m%!9~m! zyjeM(-#DjRh0K=+t^1A2o=4nQxB%kh24QA$h=P4;#u>r4Et^9E&JjFbe1J@ zpQyRT8+q65)^Og#^eJ3^yJ?3vKXAIl9CU0(7gjal6|Nyy=}zBZ{6)7PL`MBU_V#V@ zCNceFriFdZj1lSWeSkCE??rP~fV-D>>r*Ey{IO~@-I>l*V^jif{edsUkFQ4I@>+5h zV3*y9zuGYCbS%^{*9k%kwg7?iD=t2UTaFSA);08$x{QW+8gV4+3a<5L;|eh=D&ZsI z9t>tWUCR}+8&NmxGjhfqYF-E$|NVWG`TPsv##1vS{w#+VOMT7Uu|#(05@%T*=7Sa` zm%jfOM$BSO@eQDNh38s=!*kOBQ9098A@s2XGbaR2KgioN90?R+7jbfjviXQ+v+~3h z%J+gG{?IM&-V(m__|#v&c_o0?l4iO{{ludjT23E2{Z11)jU|<|U@*HmUko0~ZnHM} z4UzFADWba}uNE2p-0;`)q|&rqeX|)J$BYgZRGVoXx%&( zv9l_W|E7iFUKez&lP03F^1VL8#l-g!M{D-Qk&#^?NjtO`G3LrKr3nPV{RzqGn9-2< zWXJvJ(%Jkt!*v4xG$J+{q4y@W!Nv``HwVQB8im5y@VvyM{<~iceql^y;3hh#Gs6ID zq#r{AQ}5S7+sBN=hf6nE)QuH-E9ddD#lW?>Owq(WV&@^%83|EU1hCJq)y2Tey43zE zHR|i}!(X$vU#x!Lxg+i7oNInUJ{;lfyZG*PadX;gKv7fUNHlxHl`+4l7Ov=E9FbKU zv`4xDq(^k*_z_c=s|DXEG1(|hx}uS3F_9H*a_ltiWzbNkpZ$gSD9;MMyb{09q6*}- zM@Gy#S+xsnZ4Y{}drmih)8ONb?NTL=Vl98Uo?fhNIxd+z70K(!N@(9ALSj$y5iU_H zOco5p!=L3pc;9g8XSZQG>FQFzOffD>R@Q&X#%Cu(nx9|0Mk2oyLn9-{SjLHMXtDmN zO|o9wq722wm|TCcBj379IP$Y8Fbek}YqGfv{Vi)o#Qezd?c&_$M^=?ZRaIW+I`k4M zhx=9bM??(p{7tFE+cWWci5)h7Lo!~t^z$1w@3(-y(+bQ3^E!ih@(=w5q%zbz=xyj# z4NjMqr6`+_TEmd-m1DbQU??5x@uUUVCkUFYl5X;{Pi3P#7D&n#fPFb zoFB`iw9Hw6ABd#8;_6jxg5^Y27?)MR4GJ(Lr8hhWz(WuDM>Kp^5W^;f;s_CmY#e`e zJ@v8ov?gIET&p*pGVrDVCLZS9REnYuIq9LK{A4okAQcEVlJjkK-NaMI@NFCpI_j{l zg)nz-JT3W+DK#UH?+JEv&aG}lpU>#vOU9+d2fmsDFzjUN!_5iFr_(4kh3@3vouxkyQ4b*oNx2IMZO=} zk$1YF7@sbz<_7vm@aadYozbwfJ6}=q3_{wb_mjk;fTbb`6zED02f0WVkMOm=vedT?YR>)4V9v*n&{l>82P-aFP0TkHeQ}92=e^g+$15c2Q|0DV zLu8;s6O%%2X8$|_o{lU{;adv+bn76`yO)rB$ECOdi{bIR{6*>(YR=+JWqz|%K)psvW{bF+ zTdRd(JfiGT6m-S0Fvf29wl;e0?Thh*6}=QfzcV>Mu1T~L^x?2GefVfrElL19u`F^I z#c^s=PTsR}{WoA4waJ9ro<5A^x15~0GN-0!aZ*cWtt-62(eYU)=bY_?+$V3PCRO&w zQ~REsfh~Dh*znyw_V>G-4>~n46>MmHbUBYjd+0@&&|8+@GGwxPGKRG;h_>(iJTo8e zvw!_|jXYQ&y;{}pgcoq|n23XS=n{cEXYzPnpA!){Kk>U@YV=~}b7vr8S|++MOw&N? z%Ng%wzVl6G>$w}_ zSJ5iUu`$0OBCw&M1GIeedD#+2qtOiE+W{Tu{uQNE|Mi7c4;Y6268M_4k?nS5VK<`< z|BVfwFgpq5bH;LUVRt?hh8pD=p#zxje4Yd&qy&4VL!f%rpj36EA?;cCMyuSxMG3`= zKv){#j2y=8{+J?Tn}=lLBUc93Z};&!XD1H2 z+Th&x!c-nR|9hd-l^gojuLY z3te?ywqIbnTOD$Gx}WyHZVp@bJWa-(7v>fG*gRdnk$5l!x6I}>$W*D6HMZX7-h*@7 zBAeMjTn$+FTHZE9PsU}D4WHg7J{6PKrjMcc)kc)XdsOkeb$|MHtZn%_P|G&<{u}`h zc-Qn$z}6i2Je^1nvNg-(p6i8}AYWNoX*De#+L+ppkF>}>t7>#}EmW{}#IxX9f<5#_ zdab$O9|z|_rrP`2cn)H{4CnG0uUgx06JT-`t>*AvaUOI(p;rZBNYb$JxV&m6UM&ch zX7JMWKegSIxxL&jR?27$M0f_)K)AIOTr0g@UUhWV`Qr3;L&myf^838dL)hDF-Yszk zho7Mk^+f6zZu|MZFAulB&S#zIj8Xnk68lneZr91Hn}&zgH&KS#$$LSIP!GuNd*yWY z%=ub41IOP;%bpo^dAr92h3B9$QqJMdYEenYIeiEC<$RhZs z3`{?yZlDiR<){DpJ;F9J`@L2Z+_3_vE`NL2hvcqg@Y^C ztt#u^R;Kz9dg8hM_-b?yvmD--7{?w!tZURMqVRwwdM*82W_Mt+&Qy90@^4$_ z_*#=_a6Ggi?neO8%Ve-f%7|l~7dsB$EXYnZr{(QAzqL(;QmxooQPZ6?d~rAG%^E3f zZ%$xzbH%&O;%m>fE+6&q_rv7`76u>@U((1M#(a><3%lpUynC@wOQ5qzP8bks9J(pxSTO_F@jwXfy z_*-^c;T)e3446F@aI#Yqa@_KtZH&HIPf6NaO#QCDe$&$Ig9-IdM;7*`Z`XGdnvGJ& z5!u_~}J{DE}XvoNBN2}8w94y^iwwTWjY-h4j2vd{j0~Vs! z?A)sVr5bA!Q_#TFR=2~RU9HaW**+W*%z5#b(T>T~R@|H^F7;be?8Mhrgr|-PK z1ervwt!DFr%ye~A6hjKzr)+S1zP}rc2#7n*yNl3&1eM>-R&OZk5S695(5=Z37(s3r zM(%@lZc-9IR$`ao!bk^8DCQA8<~hD#Xi7%0_h{JOAxo>><7;xEfEr||uCG0Kw%X!| zCX4q^CHBaINr|{6=A(@Y%*o>#jlMQ#z->eerTNPox_*rE7s?mB!}S%7dPcrA`y~~w z#@R0K97X;W52g(aUYyClC@QPWFnAy3>9PCb7~k8<5iu_eY_A`e|v5SI-Q1cXsU8ZoT=fX(^-?F^KT9A`j7 zCIx|ex157V6u2%3uB47Mx}bR@_Y@WurT4Y_Ls~%n8ljc_-HXMUm`CgMC0n{B&oI%( zQ;5_zj^THIKRu2C`rLhZ~wtT{0(^C@URZ z>klSV*|+!j@G%eoo$0p(>IX%@f?7^BCz|e8BNX7))cF4<{4*a9k-H{Uja5oV2QPuY z?t-|^GA_sMjF3*&tj)1Zu?zJdKNGhIT7N}m`drjm6leUGKd-GWXBj~)uFpODx zN8Tyq7}~UC0T~Mp{Vc8+9Yb^&4nb7%pcV2*Y9tmmFKp3KO-$Y^Y&$gZ5%i4sR%k%P z!zjKofWbAb>L+t0Fv=i4)4QB;xqyJ5Y?sCpV1{i`R~1Hfb>b8<7FMP;}7YY*%)s(^8Ixqq{{ zkS@r5y%e*Q+G**SRx&kpx`sVu=Q6&BO`xzH61Q2vS zZ00E}@I1ilbwi?;q6g`Hgh!BO3<=$A$T3~LBm6lLxFLZnU}?&U7tC37U^+Lu5t6MX ztRuq0l#d1-`OwK2am2+%cffSU#pUY7wc>iX!hK}XkhQqh?uyVP7A;mGW^8WF=~t(0 zT%oA>-M&=JkoO9nrmv6*H@^}&FH>SOeAz+_bBmPCcItx(GffQWk=x9f9YE7Lz$Fbk zoTS9h_A$fPM~;lsiqxDCI&cTiViRa{+o-s)u(8h0Ylh4)UN!?_+hY166Aky3c!?<8 zW>$g^+7K51_$M4}cpO&<ax~w9MUt-L)%GkS~+y+69(#W z?9e$CtsW&}*?3-QQ=1}-DXLgVdB!a|2*LRLd#roBZ&yJL1Vt3DX|n4nXNHX17E)j7 zV}f^A*6%V7Aa?b2o%+JX?Iq0UNr<(Dr%~~i!nJ>3rjaGSA^kNcFa#mWVXRA?>+?OT zQEaZuFU}e^y)EE^Gp3n!me48fxWAsrwjuaFEkB04mt*YK(frlGL~2_uzEo%a%^OpzPO+W z5h6N~`;F;F(K7q*l;QJ~sY(Xq!<;Si250EZCTFT;HT{>T0ndck-2RQx_{&4o55li- zuvfzLIApMfMi>|ViX_Z-^vpDz`aaj}_uYTJ+0I`eo!W4RxL0twkcx8Jgd7d=_Q+DO zuVZoopeJ==BJC-W&%X@m`jtq_h*TB;hD1>Iv-c^^ce^m&hQIusFdx zI+ya!?4X6n{gZOUf}E^&N6dBs`ifD(c~&j{c$=2p2e8FF*NRKv>94ZFqRuP=IxZPz z&U-C}zL&M%$d9u21^ESGm6=?%j^vb-5SyX9CpMH69qr+2U&~I{U=c3+M15p=!EeUq zoG+82I)qI=?@#j_7OcB^45?fx#B0;1qP6;*c*~&O{ZxZ2^dBF7;~XeieOMEQZI=C7 z_@w(Z=2f z4avxr6lpFbot84aVr=lL|M*O{F{f6`HG$^)gomt4`Q-sTeCqnf#9cyp(X$0>wdm?V z#~&K?gD_%D8Zr$Zcfy%w5d#B1rb=n7K93l&viWDsEm%ehe!RS4xTG7J7bZ7Lm+nY;^^HH?7tm> zHnc$#ZB-=Yal=+qb3*ogH2?29&wQY)3F_#VEF7u!;BlslW>hxA)JI%Ma3*^PDWH?H z=lbnmpb6JrfeG)%V-u!_tC<#MlttzBVGMkKBPz-{CnPiEc5g4yCW+S+^{v~+=*0Tq z`eo0Kg0<8|WL449pDEG3r?#R@MR(mHk@aNi=(mr3ML957+bBE_T(AKe-%qKn&S6HY zIe1-lmwkPA;Na7ew%1||rW~ydZV{q+=V*8h-+_YSNmt>OiXN?QS3KSKZEq{$^rvDU zH`4To46hV&IrEdBo=X}uBQ(`-G+wdW~D>4bXXd;;LFR=RUOkpFN^w$R&hXQmrAl)04ij0(@=eRmA?H$3czkiab038d z8{byLRkTzd?bmeQ4wU$mjQ+KvqwR>Qy2F)Q*sZ*9`Fz6)0O7JMaH^`X^& z+A9}-hIeSrsjG}}2`usH=1b-MMVUlj{h9yQ1_@{+J1sd)q5wg~Bx_4Sk(HvG!4Wdln* zUrIyrWTlJG&R<(nlvh?7y2d2V?u76m8D}i3J|N;+OHp=V$xpy$&)>#)bfn}4J~gM0 z+I_Cf=?Pk@gOy67Gp%C0G_!$5H0 zw>1@I=kPv;Uv!3d$l(b-qS5c;rPMms@FnDha=b+>s^&-}NT>3-O`V2~jW%#gXf0q# z006=uC5NHONLCoUf-{M(_}d*i*Vj@b>!0LZjR*=Dpg$i~U+uvIS2rmi2-zTQR((o| zQ*ev+Q#+;=jI8!-tk^1ijb<_`I8#?mmIYXoL7CX!);}6h3+_KWnR=acb=Z$?TjUoT z9H2L4sROj?Ywf6$)dugc<$EPL6Snv?#`>>6h9i>3m;VJuYj287b@F<*D;ppS>ifL} z3=W{7D69=!E5N^PaiU~_h}jzO8flyFffc{Wrn<&QntGk0J#EC#!(;qi_AMYfX9Kc> zI(k0`xv~s2Ggj-9b2r;Ew{g=@cZztCf)uw5mP`DMI}{1&NhWH7slk3b9(1m;?UIu7N1*zfKRt&NQeCJ!Jo?qV z)oTkMp8|yh+oqS@sTy4Oaqlx`w6_Aw-ts{k@PE&>{V43tS+Hv)`_yb{Tss|0yPG3M zj+~2PzC=ko{rK@?rbNH6sx$1gx_qKtQ=|l0;fZ-tUDEMAvoSetQ(0NrL*DdxogyE( zcYrfDzX&;Meou#eh*?*EGCD_)2EGQO;Mpu;3?Qm%*B*2`4l8L(OAery)E1=F9PM#f zpy087BeSaRHEywNEBA^utUR0XaXW=Ho}LWIA?+^D-=v>1dC9p!Z^e%$1%*NvP|_Av zb^1rHr&aPvc@UAQPIK&;KXPgtBU4)lUA%)Rc(8cwzM zQhz4!UKFtJ{$u z{3ZGlns<0SBmT(@hZm(Ljsvg|$&R>*Ca}%+!El|y9#458TwmC~Xf?q=8AaFeeFC_I zhJ*FZ2FrsJj`mhbkhM-nj(*=_s2mWkBu294A4R8yt8ocVKu=;*gETBLfN|(_+_EOK zTWK~~!hIJsQdx7bw6_(jIYE`h3qmVQBa~@;#y5A}Gf8pZQc{&7Fd$$|W~4c1=xb`Z zkH%ND1g~kksT}^Y;xpP57Lzyi1x#;#65eII2(N^`8 zrQg3dWL>3kKF27Pw&k|GCw0I(+Sfe|vOBolBylaYx}5Kd$`SH~S1qI;Rp?riVE?>Q-%AQhdRUG3Onr!Tt%ahsPi3bhSq?guz)5*)9e671_r zV<5bXNrYM7{I?}_^siWWn%%>@F?-87z9mVhw^)AK#QKAs-SNXW|CcGlANCW>*dB;+ z%PV-HIfxjNf|6vpAj$pbXsi9{2qORiSg;!1)-fWN358vyBx)Pw$)YR;qw*di%BUL& z@p9^E2VSXt89ibZnZ9bo9K%=URJEwJ*(a2XA|OT>*u;ZB3HZrEzc5%gp^LolHC1vj z+tY&hQ=pG`7gH^RF{RY8jy1#c9s*=S_%|(a68=~Lp+;7X>)CfeY>ilIYTm$ z91R11>gQ8rI8wC1EUDuxLlkdrm$!dsl+W4+l5mNYAf<2jB`!Zf>tPzTc+rWUmPh}V zSJ;}2M_N1H;95Xe5=ZG`bi?BOq3@r6V_PWns7)-5<=1w54SVg_+;4E-FuIvqbi6?? z%nX_9Md=~lal($#3mBkZ4N&^ly!glY3Xbgi4oaKb3sm>xDdn9jQsq*u$gzg8o;{je zR1k((Oja#bfW@~43!3&mPF@3y5tnzt?rKn$Lp(sfaS>!%^gm36u{eKa?TY@WI@MYR znf}o`@8hA3ZI?Tyl=4@kS9r^RrRDMxj5zL33Aj}FFzM|b%g&_1#-r+&PVV~@9c+xU zEQ4r6SY%9w0jD%n1F#mQK^P{?}~(BEloLNWAOO)kWTX0 z^!olSh`*9O9RrF8X$0+mZjFfdl#VQ7WJpu8au*bDFq3s(eE9J-9AUfd@`&>ok?hp_ zJ$CnFYk>vTmzH*@uTp(q7x)_?7E7!tc&C}St6B>X{V(%Sn`>4CY&1yO78d01li+@c zaIJ88T=ekgUzzc~)k(_8>M*z#QS9SnhmB1IoX&Ya-PCLt-b}d$4a+412?Am!KqM2; ztabW$Ww74L$-YA7@IfjK694Q7CyHaad?_L$n8&@ z$y48+M?#UAk>JetbK>tYHv{iWNJiOaOud)4((3&vg|ls3JP z@2Pi;hu_xbSG6a-U}I2rhG{dtAvnlZot<(fOgvS1${<~DeVVTMGNLXcjWYyA*3j~X z@Q~eJvE%Vt>iA#_s((4?Z1VA6*YIZcev+Qhvnl}kHM%ro#-`BD{1+=(f=IgtdM1zZ z>QqshV>l}4N=di)JKp&q8^24QdQrg@cBhvhV>lIqrrgH7*%O{>kG$VQ^L`Lk@0s6J zY<`CNHQXP`Dk&X~6!}Y)g%e{aT$)Oqqy2&dNGzS%-=P+ocWTNiYZaN?Pu_)})i0c{ z5Alvq@Ru5mv%z)HTj2mw!reLC9s+u72j15GO#aJ4rr0l$@V%{5C94Xn3Tq(P-A^)? zIy|MbEKFczw8Q3h4W=%hklx=K^lu{HU&_JpLq*Rk)FnaU(jZ~n2U=w%Tj!fYl}BdY zKVMGBI0mo#&#aKMdB)oaWRp^{?@q;J4VDm_r*Z}V=r%|+%~U+ACb#h>8j~56a(rVd=l&kDxi+{iiv2^v z82=Pk;^fc7f^Le?+J?K|bP10BFh7S5;HAed-tJ-{Xx}&dND@)_hD>;&C6zCt9W09IAuzPZzkxQYCS!n78zcwt-_wbbp6QuwByFo?Ex zw@0F&uQEI_qh04UG)EoXu;#P?erJrDZco68SrrKh!pD7;xNuPV{NkJj2cy1&x{f-qQkzL3rm;R+4EZp zkpDHnlZ!FLrWi!9Q)z4C)n4)zAL#dvVcpU*22M){qE+%NgPFsz1+$=|^d)@u7gs__ z&)})N!txk~2cO_7F;R~w+`TKtJ3z$5QQFxbq5di>NsLYmY_4b1Az;@&+*b(O6EmdRz5d}&F^C>c*2x2`rkTFUOJB<8Y+S3uQfvq#9Z%FN!BOyYs zK09Wg%dO`XZu5P4oTDT9{i9;vBlA|I!~2D}m(y4Wq=#jBvFW+S*HjuW0F2%|4ZjNm zG{ne}QO?VH#Emq^%P1Pp4a=4z-S#Z_XkA$N3LFQ=upU<&D&uLor&| zoyn(D8|0#*-wVs5Ap;_6L&M4nieFr51=AdEbw5_`0Sd97s`rU_iQRuvdJz=}SJ_?` zvV_3BaJXNS;K-LUvpVulC6-En%vHiPpFiN-zoY~b`HtQ%vKd$8ECov%+WZ63S4IF4 zY2$T;^sn3&<8%Lc-|pMYH;kefR_e9N@`KA#{d`9!3R=a+rfzMGZ7!F|Cc@`k%2oaK zsLav|aJ(2<;uW^+ovX;SOcGZFz#kPgmxsTOj+$WNU|`{nlsw&BIk}|(NJWNnf3>9z z$OXLg*wTaBARaxv8r*uB0X=qfb4cRICzhb0p$o+bm1pL(AqS9;(l8z?7`b|Y_Ajz} z_X`oSpHq@;jZF$y!etm@OI|hJZcB$_Q*Kb^tH18R5G$ZB<_kTSyvTmH>fI|Xndf#G zb7qk!CkUlVmezKr`zr?u%i)1-9p$HlaMC){d(-cI!OG>8tKDM95`_sCW5lpKb z4g8#VO-iFNrsEP(=+V(Y31qBW?^lzLwE3L&khVu_YrW6;hkx0-6NYsS{0IM0lkHP> zoSAkRa$3VGu%j4A{oSDteIWse;y~@paG^82Ywfp&;ws`rHpK56odVYB$%YiJS!Id{ zhn?T;2m1X0Q-2jmG_!7NB-UqC{#CN1^6(?zpdW`xx3*~(cnQKH?Q!3jB#TOOzL=RM zrKNEBhqU7hU~WMhR?<{fn)o>}7qhaGiiy@aZt$>BCj;@$P>7P6;Y|CWs!%z!a3QCy z&T$|_4E@g;%|BP+N?)o2WVkVD#nZ-B=yP?S&>3yNxDKWlk(Z)z)8JcYhO7TZ8~O}L zvvh%{1%ajwYxDaXo|!UxLqduqND{rAN?piB<><*vw?3nCZV`;}M%k;7;8F0AvOdAzq)a7q#Srr(0j}4WhW%my zkXm`+-@|h@Ci5BU@~^zAFw3I5rn^k88(vBH-sL+{R#MTtZ*$lQnNDFpP9=oC1{TM# z{A2;EMk)fEuEmP6uzEIYu8V!SoKIesF{wI(?wr==@efKLDUR}@mg-eVL9*s;z+GNBZLbXxPaXnA22)mt5c1phssrn*yD2HF?bEXDkW!t9u4 zY$kL$)S)=##p*ghf&hs>O|LVueKc1Lh+rp1Zc`G-^pBpFF;lRhP`8N&7WSqA#}uw* zO5-Rbh?@!6#1oJ3OqyI7cCjSo~zpbcp$E2{Pk zq~JaG_9hUKOm|+MIINqLhS7r@`%Rb~tB6CbVK_~&_96cq>`gw4(uBbw4NF#LQ^U#^ z0UQPZDbtyqVVdvh0mq~4Q+S~0{EDBdG^^3%X;d9bAB9u^%N{VHUk904724QShp$3KyZZ=S6{~@XKjHu?sIXa9yYs*~cvx5L6 z4{l^IWE}Fy1HJwF$CSnifua1>8v#j}Nns>}De(^XF;h#Fsz3`0v>}9MaWSEUT*=S; z0MWUUIw~r-;N<^&$plTiqL9~j^ zOOImv>4<+sWFaK!GEe}%3hW4ND1h;{4mzYST=gO7nu&7yQHjTGz9m&zS;^r#Fw5#X zpMnF}Wo0$a+ArTR+H&J4ORWci2n2dbkg*EzG6j0!A813&Sv*8S3CS;H-!S}8P&U@u zlNE@F{;`;8YHF&2VuHZYI9Xbn^3;}@>1ZRCeme-g1Qx|m!0u>yO2S05pTMP&Wiav3 z<7_h8*qn)=$>Glb>lE1|V{^f6r>X|}arVkHSpDz-x~co=MR~%k0qhte>-ZfGRAnoZ zO^b|~8#K2|95L6O&(RZcck^)2IBtXKbK^raqlO zpM>GW&+?iR>sp0X2aGlyIU5V;y8MbT_;hN(1E(r0+Ha=wr-1Z23bH9cg8# zmUhJda1J_abTlE4cZ&654F(8vpv3d$(ghpz8``ieLo1UmxTVv^4y^J46@)%j-zGT_ zBu0#*+zzuS>{1Z0NNIfyGz&mYD&~J)BCS;`L#v#s zhLn~`4$#^hpkAHW{hBmeKmb@EJXKZiCWyqB+^$X&2!cSv&gFdfdQz&&^^88y+*hF; zKRTqR+TPt9FqC;G62cJ2HP#CBQVHE3As$;3INYVB(ajPZ|4e7GOu&VsfJLYN|1nfV ztlsj?hlP5EQWW)b@QleW(0qdT&tdFA{o!8@y^B;w$3Txs>h~OETVpGlkE_iPTad%R004yW z#3+=mX=6pbs{y*cI~LP4(nLv+C@MK&;pjkbgZUFV9TJ1N`S(^sD8&E_cdt%jL{T$C zyJ?p>+V)Aa&NUtlRzD`DFK7T7kphBK(06#J>OwaM(KF(W-2UEglo_23pNeC?z(V0` zt7039Jjq|}w5jtv-rE1A4Q^F$DNtFK0G-5R?duFrEn^gm>CA(WrJ|v&@uk`_(Hw!M|a&vCI7!I2Z-U_+Z{5fVoe2E zCqRumk!z@R>ocb8{ZUsE*7`60CHZ5H9}XyrK^W&~*jRn{i_OMeN- zQPHeIE2}O`TwzTeA&Fxf(J{?d<;D`*&C4>vx~$5^=Kskis^s1cxuhsABUKy-kDIz3 z*naNr|J*>Cs+OjlSbp*4!rDT_klN(!W!Zaf*I(q==kDw7_Y^T#GcZDef_)+VmoJK< z!v?=p=GH)~;mYn0a%^qx4IS>+r-_r<>n0D9Y~hJbI~>)R$~IBGLoAXmqzX`X5nicg zVy;C8D0orEHqa8!(JTk9x;m616F#`o8R<9No6Zsu5no9dMUFg#!k8EjqaiHT_nz!J zmpHBJoE?fP8i2ilLOubQTLU;ELm@Yhk=#pn*LoW-{q$~pJy_=I6vf8t$#uhFrKxI1 zxN{NNg((YSg|keUGqW1QBO_t7rl>{6v7dul{|=u=zcnRrM$7#V@>68m}|v#(E^C3OnQK4itk z!^7(xvpS|b@j$JTEd=>H5#F6k9a}xkZ>%u++GXm*WBL{Utc!=z)YPe|CRZ&rz9|s( z;NYeKNy9eGuFOo$7kqG%u2!65Y;WUM#2G&hmX-t$79=be3(07*M>@pFDFqS8yT!=4 zG)kk%r4dCk^ns>{3TlU-1x665`qt?=crKkmj}NGSI1fXHShDHyoLkAOd7Cz2PA zg{*2Ymc3oOt-8r^Pm!=xBnj3q1xP21cYie|JmsP-Ln(uVqM+}|L9qu0t##yK) zW2Kflk`PLZ{v7zWU^X|KAq7z&)VF+fDV9)R$w?#Dj~}1;M=Nm9<=veQ1>$ z+0*x;;e#gU)8zjro#$#BdE>h$gMH^Ma;&c}!AAFfOVhzVEsS@QOJ8(pW^AdTePb($ zj78jWh)3M$$Zfvul4 zDy+S^v5<$$b-!akM{ZK$87@je818!nfBG>P><+T!xwqYTda!yRh>DGUrzUA60Ez3f z6HRBv){t4+{-mHJ(E?Ty%g}^$^WB@9C8Vc{*8*0y*^Hzs;6Gazq_8#_(^4TCj2Mgf zZEiKRDN-#C6DzXC#RP$=3ePKQ7KR=2l(31#W$XyS@Ou#~#gIuzF1vhZYoNQ`Y6d>4 z4WF*ivmGyl8}L;1|2?%B*z-{gjP}RW93viR;CjFAM>Xbs8U+ zgheox_TuXP{2AD^B!q^={
    WdzXM_FG;K>nw?AX%a;oX%B(0#@qyj}2W>mzBo=#7QoE(nKRjAbw)5`&hDbr3Mi`2rkk+G^>*V6En ziaLf=mzK#eUTPRvv}faBfJ#82*~q-1;#L^k^7aN9oCtg634Ha3iRJn5Vg0pU?)Qe& zMZb5pSU+Fxt(p7?vssII2*lfC$KUOg8Kzu#halb%uYYm?ti~c+SJ8UcqXG4yu(dhm zH9cL0jir{6FNZD?JKr&m9oOhOdA)46J-X1q79ZOGr zYA%jw5&Mw1hiCC5YaIB8Am8iy#(#Fx<9`jTSE zXu;P{l3@0x=%*W1>N%)&{g(U#wXLek5_tc#S_!x6<&nxVkJGavZGG4)Q7v;MS0p;A z;WW38UU$GLw{}T(B7!{Qf%~#Yj=66!8|b>Z)@$bNNKY%G6s@<&68CG}I9)^E`9D;> z1yGh-)HaNwgfvK(fYRNKw6uhDcf&(BNC*f#NVjx@bVy2fOLuo89slOM@B7XCGiOHU z91iz=@4fO`*IH{`l`0jO7WW$VDU1Yug=Y<+N%;Pq`$wF(l^U+q)}X<9dyupwCeG3B z5Z%%4d0E2r*?EG?^29Q*n3+8p+i0G;+aBUHFa|G#tu=nf=M<=?=4NXmKyWpwIJ|FB zbB$#A9=b`Y8OKw}27wH9!dBE&HP>)BkECB!ZT6vbjzH5ch%6&0V7$K_NwpsD9{i8= zz7o(Oz96_5N2Kb+s{yI@JGPiGadENNaA)le&Vkq0)Wk9&h-|E^?UQR`n{P~NzQD}P z%~jf-N(19|>JZaKArewCVLo|r`3b|e2?a<+e0D)QvJ{3qKeJUFN2IS2U!$&jg122$ zu!UP!o%I_i=5PZn%Yujx7mxC9tr=Iv6N(k;;UjuXpO~gcf6ii^W_G-M zJ%n?#SkgvV#~!9s4h6Ij=o`_&R#UY{|3;)wO0ie)-7X}(ckmw%xq~p%g`qKN|M@@n zB%P#@VQ4wV%QG>?#_=;-Yi^#ph*o-^KWHanG!H{CXlGsq6Ke^e5!*?^$gm98*CR73 z6;H?76qu7_!79Sj6iRPcuX>LP{r2OYXeV}+=vc7bEnI4HTwT&_u-VxXktGfP_I4&A zN?6rLkgTP}%XgmB>4ig+=mKFx4AP;_kaW*=D%4g0w+zRTr7Us0$O8tLFLgmPvSJ+i zuHh-w)j*N($YBy#7*uGyvw?F_c}-Q9<2S0qf181M^lH4r)uUsd+sgX_?fj~7-kej2 zbF&zb%(lSANq12N%Nr7~f^p*Qh|R6QhA^i0WoEa$1+&~PI&e9oluc*b-1DQ%6Ax-zukU%+VmXKDg9&v$4`zR=2(^y#~~CFWY;$jx{M zNx&JI*(q4bMS)_0u$3H!%jIRD1RNc)$+%Z7zH?r?RN!Y-Q7ytdHnu(nCMp*C@S)w^ zYY#VIaAXZEOW6cGC^|YQ)U_0jknv(H5@l{aX!jF7yhbP38K~vh&H1o*a%um1#lxKe zMshe6@7zX9OY1q}YlbokKrIK7Px&Ty*2sIWuaRxIq)*;_SLGd!omT%TBb_V{)NTR1 zRA-=8v&C!LIF5Xwena!#&i;QgR0l0tSk5Y6bf^rEk`WuxWX6;n(`Mcec$`^U74>v? ztLm2n=Dp#VK%lOLp|>yS?E!y;c<-kl{WwHfEG0^+xB(`ejLTV`kd8 z@3n*QZQCo{IZ=~3UvuyQv%XZY*&{qPk$I&5i zcel@#%Z6M~1$q&7y16PaXP~VO2{_3(ZJtronD6as!erdSo*17BQa${05co-Hog#Gl zb>zM`A`5n$>J$d%2bugqokYgO^L)n9NiSGjJKx-OS&bpE9HleRptMpI?D%pu28j%5 z>!gf91frjrn#c)q5UiYIm>NT8w?tRPm4YE_l6tF=S7ZdQB~fSyN`7f=?4SDA>mKv* zIinaiL+XWshtq-_rSlVg7_qS{gy_O*e=$xCCUtZN zo&95?{1#ltq|4^jM^|KgI9?6k(tMj&JJzmc%fdyVRiRk;NhaKQyWztiG!iekQ$!YR zdcX=FkLW!wq;_1fQzuXiE)z07uB%2XUVry@Rz*(D5!9gsa=`hw!~#FkxuI3=MKU!t z<>hq3-H~U&DcEAGZ6_p)J6Iu#f3e$WKiXfu;W?RqoYnj*Hdd0-HuM`$bT0DNex1+_ zjY#meu;#Xk&xwvN=onVjoqBURefAyfW1Q@wOBGv)h=@R0 zRU`wAY3Qt0Yo_K3W~E=mVI1C0hC36iNb_iyk-TI4EUulgX#*RtMkj&+uzF#W?6nS8 zp>AW=ip|2)6mRr2Jn`-dN0rT=l$LF`kVYLD#W+s_Pk5~?tn64QYoTTe0Sr(g3bWGN zmZkDAKE9tZ9bX4!fpwm#u*M%S9a7?rE=M;QJ2m!p2x%X;+!`X7Ux74NL=L2bB$&Do z?j0rn53|}5V;8T$du#MHM)3M$UHSS}X8BHUS>PajEE|syE|grglZYSN+n!_d6T&vD z_*qy^gD2;zywGdRG!O1Lc;-LQAdsM78M5Z)j!5JxLCmi(K)_M!ZF63~@M}t{W1*{x z2f`hY)bKWyKeqs^qb+s*yJ1#oeMnv{PXNTUD88GQh(j|~djZ(r&cFgo%V(_+=itZ@ z7q0*b-a=aHbL1Kl>C3#t=0!tadecw7@T0FYw@Ha{3nxnFGu%h@3?@EO4G~MPS3i%& zHyJzC|NiAto*!+WyQl%XZH?YyU`Y&?s24e~ji-p?0?#uJN=7;MwS*q3UGJA4hNP!g2c6|j8;_z_)q8@0ffAIqOT}VfrhU{M z#+S`AQ1Rtp=WEP}9k|KW9%L}J@9mT&Rp1n0Pyc)U>E2raTbiFUmm^ME5`7Knjf@ox z!W&Yp?LuZWN8AtRdu7WRyi73}fllDy5WfxMSR#%5NF=a>gl%^I1+F!-KBxK>SSxa; z>l+Cck98R_S&N&T>)RC8)^ZMNu!nVcLf7(T+SooW*)Pdi*|0c^C#W~taMa>(tL4`e z{_N!*s9)RJI1n+b)7`~0#M}VMHWNj_Z7rv2PENVSs))4>{qfy=>Q00=3^z|B2omwz zR>wfW0pqtg_WPc1%Ica!&)2evJz=!dH8GTgjbs{?jLvhUr9vfbA@?EI4!Es_)}3-c z#NUhl;{@$lYq~(XaWW9!Rq;&sF%{|mi_Ztrd(@FC-nXGM zoKMRY5W)lYuNzHy%n=kYaEof*TARv8g>ki4f^;DuH-!6c7 zWz476Q3rwSLDCz->BIY*^~_{1tjgjSz41WIdfhuCS_>P3#Y!_Cac-`?WIJkNQQ03Q z_g)~l%sM_aarEFVo)`yfux+!sF4qpM-PwjT3HGUJdNZk+Qfd`&R|8+)o^^JcfJ>rF zJ$jT=fsa8)Gc-01jAV5LLjvBh*kq1FI=xSlcmk>~j4MY>z)ggA%i*ASCC#;0foN9> zH#L#9pcJ|(i)OImhRVWFVNhK8GT&n$)d=BUPG1JcMEIZ1$@>)IIS{@ zgJy*Q^@ia$Q}3Q+`!Ak!AaZW7Ft=K-gGW2N?IFe5+U`+Ecb9l$Z`wOLA2~Iv0rs$G z6=z3;;ggB~}ix*v=lbrnMscq){%r zOTG3Y2Us8z8}To;-^LXORb2;Aiq1#CN3YArv;>Z!=I+jvBm4QA)iK1KwR~T*A9EHLSrU47LLd`=laH?l0F*(ESTJ2K z$$ZfpLHft9?s*iH3x~rCk_CaqPdeNWty6Y;Y?rGWhDnYl`AER(!4Pp+^3FHLLLoAr zjG3P=v`%knK+AVwXc)J4?a1BNP7`1Z^m=uH>!hjeQ^FCpj}+^vE_<|so*{Eq+Zw+O z@Eht8fE^+bjUhlDD@w+h4C_-~>X!Hh<-GrakMz0P@jUdXSw3dhbh zfyp}c?G+U-y}n2b^r9KIV9~4l!6lBJo%^Gr59fy)o><+1*MFF(V@>vJv%+M$XAi>` z5xJ0Fk(z8Vb4dk9CfphKfYZ3;W(&TAGOAkZKF58l2fVjc9R*ynjpRgsqJt+aEG)B` zjYy&Lzuba5X^8u*H+=3X(}tV@`sk9XlNJkQvk3pD+w?&1*m3J*0OF#}Q_h|Hl3ixh zHOn@=0n3_rMmE~^wjI+9s%KD$>3DM}!KDJY1-BcwrD>bovQz~bBRcm41^xXDGIou* zb2SuF4hUOvE1(Otv z2@!qlfe9nBIKQ+NR7Bp8EVZ~k+|eM#E2gFoNha~|H@kPee~)A6QQR=rITmic7QKyu zX@O56VQZVrxf4;ZFk$!@pU*kEFe=ej*qn10WME~7PsPykKSj&i*`#{FH;@i!Fv2g=aac(~ly7ND-xwtH9Po1=9Uk^65 zX0>fmO;7wCl{lRhtsh3{^xpbQ{OTcNZoi)zh|FjK^OUVlu2ZU%*(l){cLa_T-@F!B zTU?cxiwv{W@(BFN4=t6pHJ(%H9*nD<`$_UkqT!61>=YEbtqPNWFLNnES!hd(@*8_T z7TE5&u&TuAW^)S#XEhC6{3C!kUm3gHI%KUWpPZt#eU(!cZ)(BNdUFcd*D zcmW3&>}K->k)_1yJ7>s7Lam>)uo@-AmS=^Ol$Ori9(X*Sgb6`*n-daS4}EEfr4+l& z&=l32LSGMTMK`?D0C!qcR+nxt!Ayi83jOpg2vib01vTuG9JIviP_GAX z2_y)UZ!O0^pJ@f5yTzXCO`)f6Veoo}n_NE7{%+gUiDaRPQ5ak_uqQErNJj5+ z+{)3adoiJNQ9&_dJ}~vs{zMIY^NLofA@b4`w1Po@m=|FeFV}+WMpb+RS`UPVkrlWM zDg3)>YS801uk3t2ipvQMkj{Ry`&d2>p^ON~CgAh*L^fre_sDX)b35IcAe=ehhSlad zBd8+rk=Xt8-JEtLn|%8fi_}=jg1$RdO8D(q*?Hjt;`Tb+$gexFUyH)CA2(l2b^G-!X8{-okR&eEOpdlfE;w6%w>^;W3q=1$N4p9tL7Ox{}lbK;ge zrq;F0h)4nZ7pQ*}OARLTdH`;*uHY#|61*Xz{!?xFAu+X`)pST_@E|DOwR*mRML}yxWza;?VKgrT!pg($j>e zN3O^Frw5@_0Ux{({FV~mgV&Eo`K_jhmu85OBv0g7AnY#Y6q2&?#1-iCEP*KIgf^csX}N^W~P$#h81mKN^Lljmkm0b=j8 zk|K1U48B5HM{q7=ZEH2x9`p3w26qw6J95e>qvK}LFBbp2N3>Ir8EqN)Z-$7Q9zfC0 z%>tLNwI%MPVAQiEO~Vm$=lF1a=c@uY_>~&<=f@Q_;HsG96$2h5Dwl51ZZFdKuUr)r z=G$tF6g^6j@g9&4gTp?CwHB?=t7g{NSBS?HhkC#1oSWbVv6~EzP72v zO)|Smfx~Nwp3)fjuQmlHIhG0?m~JC!g@!j@ad9{%H!8mcGxjP`c^<*>eL*=ZMl0UG@|ktBHz=(c#J6jlF+w{M)mEt%-%Uul#}O zx=ElJ9?3lLAb|mWqw0^noKB}S+`sE-qOID#J3H0Zoh?zBg}ymYxUm=U3Ia~==_0)h z_T*yuKBnCw7MlnI_MLCu*k_NRV$x=L zG3)n{z7LAehm|j*;Ovg#m{0-`4!4gW3#|*3q=K-lBLGC8WXS7tqh>Y5h2#;1OG@IK7+8>(<)Lin_6XGTWC-47Y7!d9KKbsH=v( zcHBNYRJ@qe=eBhp)N?Clilw+BbAIxtQ;Bo4jT-HkT&*IrGHzkv>r`NQkw#BkI78x!1}+R7F$k-z<9^f~Qm z=C3jdvX;eOL)}Z6yIy`eHG}ziJrxNAiaPa^hGi*MW&?MB7=1nhe7SuWf0L=($uQk< z*0DJ`WE0C=JAZ_w>+C>*n#VOU7mdlZ!bwtDRprIU&tlc>Qk?-x4;xfMH=*NXv7F58 zBEgVPyLjKLYHKOC7cBfcU?J!BUwDUgW@l#56Np5;Wqf7 z-tzLHXbU*MwTeSI^11TqoLq=C^A%YwY#!9&jbu>x^@SIz(c7y1QA6?dZZYS|!JfRF zJ4V-Q9(v7YM0m-8eRQYulf3NGq=8wUPKj5mt6L>b@5ACCEa`f!EJ+7b;5V|Dn#{Jrh-cUusL6i>*Y)067kvgs-(Xi4F4L~!u8 zpOmmc4Yr#^df4~VVa<7oc5CeGf$irgHWpWH?PzmoFoRjY8f$oX>2WF=PKMpH&Ec0` zb8o1lS}1Zw)2y3xv6c0+SpL#2d&k3>dT52ax^L5_AT}`ye&qMqOgpgI>D4!uTt)r1 zL+<;#dmr~VM6A!WLGXlt5Rn>-jAjY+)$Q(e`4dwsXd(#PCeis?HFd@qj{s>@7}j3> z2it=fDX%}kwyX&59T9l~vKG)B`!+r^EbKGyaLyZq(`_*43kqvTymbc8gjJ(2Kvoq` zOT6FWRJV|^GbqfvM>l(OpAFKOZpSGx@B*1l1<~a@ITnh3RZcj|h;_#c@1`w-ciBIB zG1n(gcdbZqjZTA>_x#ZBkz#D(x(AvX8W`!AGRod;&!W1v6<+rApo5JJMQHEq3YE#1 zA6HKf0C93mFjJA#k#;?4z(X?t(eh6I+Rp9u&z3tfScI9<5f4~vkI+%Bue0UkmA!vw ze9;94&#upa8c|}y=T)bEhyx_IwJZ_|mk}MG73-L0^fjLja2a2hn;r$bTYP@bRsVOi zIZ3JH5s5_L-{{Vsv;6cCE?Fpd6ge*b-4cILMa6TuG_TG5fMLUc5-3hp?lvi*YUATy zUf`y^z)#8E;dXvlN7G3-OE5WIt{C^JA;k0cPN@a>&hPHE z-=pK|GCIyEh|gIy+wNZi$t+BAa~s?I>oW~atM6d!6yz+GfIJ=drk7%FX{tEC>u4d36R7*H;-mPqkq6XFfg`%Jw$ z7WyV%IX*Z6P27FfX&+@U4kZ=q2vj**J=f9AD$6RIbjf}14kX$G^18!}A$HK8!&^$a zEFfHk247v1;_sW=Z0!XhLt9%@N)%8P)xJo4EPw{lks2IcmSN!YA=S}58ZP#Aq3C$M z`LdZyBmjZ)OJd_MuVZ@(GZG08Uf9QOf>4K5C7i3b`0$;_mn!l`&yJg?^~TeWPRZT1gd@G=^Bc$G~=F&(XTcD*aiaDgK6j@~1oR zq{}JPrDf;9-s@P4irCB|#K--22mJ&v?-+aUm-H0a!-Sv&ydBBy1To_*gWuAcVf)V2 zPENnpg`{B10njoz;u1vMiiIj(<=fOCSa8HyUPgZLl#z5T#1k{4!ICPJCO4LJ^&4{d z@+%?|Vt7hqT-;e0odlR;L&Gg2r;7uerPHb6pvDDyrt${$sd0a+_9qhyY&07WB0f)e znAHLfdH)h?e8}|3*-inz=CxTN z1AoUyJthFvO35g+Id=cuoB4^HQ0cIIRUqzlr1VT`fR8Ksl}6gC*UOiU}Ynf_uNxdYEWY zMqr3FulkOeN9(x8G__>C$HW^n_Yj~628UaB^QcxdjHV{*q(uECzr*YvGys7KMpuv( z9z`GIrQgpOtG0J|{2$C#_HqDcV#P{Zp1 z((78d+dDfOj#BD=_;>H#`(y9BqR0L4?|#WiF*i3`0raSoa)jb;5Yffk{Z!MxUIViZSQq?MUSt=L9_ArJo(1e zhN)OMe#&UfrV4o{eXA^f)j8b3_a0~D4|NGP_v(bAp74Bx62Qwx0bUM%u=?VyX@966 zpJkEVmSMMv5)OR_lf8#_{UPCJ-mhmM3wCXk7htYYunAAuxS?}O}<7#TUqj-G083LRx9L@R08E@_Yq~Bd2{5WmS=R!YVQedB0JnFzt{hd1U z9fdCu-{}dr3fz-&Y=WGWFe(KF#q}Vv;n<#>SXcha)~*0FG#~2s1L36zId$Vt$>G_U zCdkRFd?xJZpO~UfZZKbc;&19h3QK+UfA~jRI)u@3I}4)0!%J>4>B*ek=bBfu!k_B8 z-QC7i>f__Gm8m;n98m%uV|_M0e<1yW>o8+Q~^|C)?+q@bXuHc}+zx zs-=TJ6ca;&zVpMrb zv93wk@PciN18EuMFu4?qRW({_1~xWbr)cs9pv+gv`|#Cv1cW5d*aj zE6TZO$BuT8_nVGud4uTP1!aUK0qYs9<^~JbG*OvNnBA-{N|aY)grjiY|8A~XX%7U zGkAiO>emV8XE?nCFzDNjREq5bsU?r6_u%-K#Dd{};9*N^8~~xJH%aPf|{u) zC#ycXGVA4@Ol1Xc3zF*-f>#G-B1XJl+Ij$*lwC1hcEtfL!eQf{sB!bW`hlWNc@7SjACMQ5djbCG zA@ad9ToRZ8;vf)~a>u{W$KToUUv&jQ$Ow2^Ae~wq9s0Pula7W0_0hS#y~D&OlrXlg zueMx<4Or?WeOg!G=H{(h4?12ixVcCA?h4+aK!wxV^T_V4w0VoXS9yX=QqN4K#|uK$ zL1n-sP*$ApJwlUrY;KBougaQfv1K`8R63}6h0TF-zrkgPL4?t9x&7oa=v!7>>2y(V zq09O0=dUr7SCGCPOr;Yv)bu}{6OzvE7?URAf8%i>*&fwK+K2iIA}R=(Uj^%Vtsu_h zD@S{Q03GJj`i;s!DOtDl$*k_@C8I!^jt6feiZO1ylFl@;-&9|cUpcjZdltf*$o9!a z<<-s!QK>d;)TUERr1&9|&2PIbGFDAVvXo$^mc(+8c3DP#F+3QtL?zklb3M%Yw<{j6y=Wvp*z^QeL(&uNF#zeQX&S~*6??nJ^ zCi6~;gX*LWqeB4=K3KF>KA_a`LnB($H!s$3H zFrbvC60eOFG+VC~R=s{yr* zSAfC+Y9@)_WciymGZBB2a-peh(I-FTKDCbNca9o?e?G3~2(RZzlqxEG!jZYWJzmNy zY)L8|@xirgWg^L)a&;ZI8Xe%TFFV<(THf5$dt?*3dtlUk+C+ z<9@b7C2_JVI#(P~G`Gx8g;yj`hoj~Fimbaoo~ST39lnEUB5Ssqm`Ij@6WW9@GE zPt9V3N}K%fFY{ELQQpQ3C5?Nnaw#x-hk~7JX(d2F64YD&%&S2!ys=w9E;9*G@>&y+ zc#~#xv5pG)bxQ?kW_Pzi)=c{}iqsy@If#fR5AM4!ydD5FD;i}XKXi&geZ1UPDXcOacejoEanpV}$CwjDi z4{hVYnkIng_<@$`ens!5E3#!+S?Kyh>-FmN$v+o05QFcsTvTp%$ja;WQtr>+L({ci6Exz_VI4~YKcqmuZ$abE}a`riKhpY`WpQV=> zj1t~-M?Tuj*@_)ClwvV5+MjVV@Fne;j#RS4TWz)QQHXY1Xgl|9@( zZ9dC@@A;sF(wed0q+CAJM%CSk#YMMZZJAEzT&@Km$i|eERt2Zu-!e3KTr^#``a;7} z;9(xTo8NAlD5s{&nMjc_H(VQUeoF(E)_6V>T|_pk?CHepek758YMe&?d4DOSZ)PSmcM6)F0cZvz_}Esr)%3g1l#ML#^WRJz-?h0oB>vkA36Ph@ zLMdLspwvV(-KKB>jO=W~^$|m|1{5f;@+M%Oob@l(XFg&Jc`mSSS894u5rw;Spdf2_ zF*wLR2dzfmm>TsSndY>3-Q3QliT0mN+ttYpB_N}tVfdJjq8E82HI zKJ36l_U6BiJQ1NLv7(^==Yt^#6uJJjC0o{{sT zD0=PGzh2%w9rUTh#ZK=t{RRhunws_z#zG4_2a`KaMLns9TL%GX3MzPmdM!9QUj2za zB#2~ziA0eHZm3Ty_Dc@~TGe$<<>r&1@`Z?$kgn6)GQCV;(#EP7I+tYrctT(+bM|$r}p}W{&|Bd#N=QgwAY1Dz+BY;gq zJYyY?Xi16rhX*r0tJ=B4Q6*G3Q#|bTESnBTf1%f=(fM&j3nD9M^A&2F@C$3*C}}09 zg7GNIu9`{$7t-O3%G#(8j#x7-E*Jzog|06|_!DiLqaugj+V7~t=?by|n`Fd(0Z>3F zqu^^DFIE7B#X@NU(bwoeZFjSGX&MyXjoQvscz)7SrU~0^D4BBRLrp6|+@8aboS6G{ ztGE9R<~@UFZN}SGTjd4w96NjUT|DLPkG?`wf-`bQUG#?0SyEbwvi4#>#w(6AmNs8r zww?-NuoxV884ylQ#cRc-(|$jb;RcfXC3o_D9)oe$E%D3@)TZ8DONOHC{!DM?x|3_P zkF)*G;jsTT2zhV;-XHslm~QI>I^XNos~$sr`w#(A(-Vj(>Wda#YrV#hzyfqoy_cGr z=lTO`2gW9P*=1#_AyHDDH3Oiva%^?AO%bF;-X5iMM@V5Dan>9@R1+GD?==_EFj3we zK&0RNicTa@u=HSk6jru^)mV+&Kkz<;F=#+{X)4Fo(&8z!fm+g9@__H0{)K~qpCHzM zzxnFh?$ItMU>3L^4+)*!U-^CyzDk=e4{O{%vIg-oP31_czPb62o4zsnlms#3-oLj? zPqf>=w9Q6Vk>_|lpV^yQstSudiEEF4*{jR1I%MC-6yp=H8km@fe|BT9+G3<+wT*x7 z=Mdq~uu^0E;eGiE>{QKRl%;ewc*AI0$uAjR1|nWYlTCXTw&0(?NR)7Yy>}jyJ~&`F zRtZX(`}PrS;Ldef9%L*QT*ylEOJz>sG{re9Z)VLRm6dfx7E16$$O8}~QPWrzjHl1P z&pzaPzY2!0T)!#h=GI(lqI3K^IVLyz?+vX?F-C-3?M9qh4YWlM365p+`Y5 z91L{%e-w>d(n=RmH>*l@M0j8-;r^)IAq%s8b>dppBWhLbge?Aix!wUCZ@CEKpe&o4 zx`SB*r zckhbTDa**q!qMX6dk%slrN~(h+Zv)!TW;VR=H}gQLFqET`ejCKW$XbP74`54+@IQQph%J?Hl_CM6*trrDL}A6Dn3(7nTCy&Gv9{^*jz*dgR2K|`|LF?3%8DH zXiVcc0-XTIz?P*7L^ou;AVAqjM^GXwf?0moU(#|51)792rp zVbmaj%&s9WtwL&BW$7H0jyc8l8Z$_?Pf2(krXIpphTsx=zj#R+N zl$BSK*xg_WV{Ky{f{6BxhcOl1msC5grk*!t`t~0vcLBP=GsXHmCVRHl1>=ZO9#nF$ za_a2-E{AQ0br{w_SI7wV649ilKSg$Eh!i<5%a=B5qdY+Hdj}&WCae-Gf&Is0!Q;NJVI_he+otahHDf9~%vOjxO158^Pa%%gkoj!|RS^ z!KN+5&~P$ta*5pn(+*V=@$hu_X86zk&sK1jU3TlA;2z&McemPci@fx$Z-mtxT%!}S z!fIxOi^MiguC-&6WAeUwWk54u{?(*P)mZ@Z{QmUKqL^{Hrs^z0uFl%#r+pWW?Er&U z?_t7~-WDlJiB~hFke((7#Pmg#`^YFhoQ@M?h@1;K|!DFMNtDNpCpl5cDM zgyuxVfPY}Pg^VWd5E44Z#r)B%?(49v0P!N+9G@p1-u?Z(nwgqwyAft+8V7FcM~x;j zVD#BUMFak-#*QwI$-AoYfexHH!J{K{Fj;_xSdseA>?8_R!q^q#u%FC zROQXC-TKi7w#>YS|Dl}8F1nj2hoUhpnsvO}J~I)J*%+|5 z#rW$ByFU94_K?-i;ku!2MHYVNbyZbe&*MZ}k3{y1iBWV^sO9}jQH0R&4=gqqwJzhQ z^x&(8t*xy$@iNiTGVJs(Tx*J*BEU5Xn%U-j$GQ=Pc{|y@Kpg~&D1P%gsPfQi*=SkM zXJkpucL>0u5eXv~v-jqq&+Ofe)@+d@=a@q=mOW2yZzm!5gy9w3xBbz@PDcPi5g($^9qS@ED1%bJ}^uL z^1?1vsoW4;M+j{&Z#IU>T-heWi{udk&#gtBU#Wh7D*CQ;KUCuR|Fx<(blPi zJYhiAIyKuG$MKzb{rq5PwXls{P=#+`x62NDZ6}O68_sF zCwd4eAa>*;4U2#_G01JNPk-21Xo#F)Ux(I4$PGJ?W(mb56X`b&1!aY=-jQ>Lf@%mQ zs~|oh;km{=#q+Zbq->|Yz%*%>(6=C=0`v0P;r*9|P*6_LYQ5w-@I0&oFyOSc`xc*r zm(3eX(5*?%DFrB?(P@0AWcZ}SV%g57LsMoU=>mGYpVnUdBe(!HTIJ|;(aC(ReNfoe zsJTIWu3#5!zDWj8BMTh=rRsHgy6}lc2y`a5tXxtQZvO&&RQtPog#-q#SW7jE8VQzsc+$T9Sp)(RW?X zgzV;cr10S*?OI#9hI-@FMdRYq|DMY8+W~aIMyc?m!6*gZ`SV0^y8Tuk8|M(oH>f6L zr2V+7(yk4v>=plhA1}JF|I-Q-?*a|P;lcobekX%Z-?s8mC_j<}I4(q>GVT|%$Ng8k zZ&j7-gVw-k^7BfTq;Dr@Nqy~c`=QdG+B)XdKub3GR`fO0U-7=R_#89y1a`PqCa_6} z0A6joRpEQQkP?E~{tmiT1r@*v?oUgYbvEB^Kbf8@FGo{Al|RZD5&Jo{%Q;Si)uv!4 zhm&h{g?<+=V62sah?=xWx=itZAl7CHSVmit9C~dDr*>@}EwJt8!-bbE5xLk?6&Lc~f zZ}`WgEP$iX%c?2VZCJ@G9MDL&zwLbi@Ou$%~$lyaExFEGXvUlr06ugr$sEbm{=+ z1(Z=8jJ3wulSR!~XD+y7wF862ZgR-(9@4NT^k47V2I|U*t5H3^LYVM3iy0lpk0n(U} zY7PPy+z|mGO?znL+({=_`O-fs=#eMv#OM#AP&SInSc6;Y=JK%vo2bpQ> zs9_p54sPa;kVlj6XO%Qe{qXvxpYfh4ZH$8A4LCv1r<<3#f`2k#cGy?ySN{&ss0N{+ zP3w=Sl}tVa!MpYCTBn+)agSqx3hi^oLpgz7xJw6a?YabkD$QOwVQjRc5bY|;_l6!M zGHwT5tyJ&ci|neep?*Jil-O><-e9(7UESPQxD_P~kxq;Nne20~=JUZKZfs<F z-rWt!F~0XI_V=@s<`zoHYO)3;JjnS4sEmC8iN~xG-LuLV2sfo2#6tC_!1+lGLpkdU zZFZRZqv0T~fVuUx1|VE$R|W(GhL2~U9iq}4^e%%+1Rb&HLN;)e@}#p0?uvp@kSg^i zk18jnq$t8KCUrRU7f#L@&JJXNQ~`wbRiFedvBI-m5RqlIqtdEIb$Io!_!Gtvi5u_i z_$*&%Z^*b*r%Jg)hy0`O<&nUze%9ruC#zEDDs==}^EZ?vpRuUlVH*i2C0oQK@>p_` z@Xm9~u}_ql*G||b_ZTT0-5gy`nT~`HW!mH?3F9I0q0Gc4n`CGC!{ucl(o+9`XBNlV zcY1h)Ng%@M9ZTuhzHz%|#fPik8I379inCj_uOUjXx zo3a{FWc_3OpU>UtTr)D>fk}Gp%7NKlBP>)EO7=p~3o`n(OS^G31DzoWGD03MLBUsR zyyC!vyvkjGD@pAR^z42eX)+c>rrS2rTi-sZE3a?@{(iaP}L!IQx&D_`8K z4-;6phU3+9?}m5RhJRn}>nNo=Y&jrM$>BF}9yO7+$xQMFA5Y5n_7jjfRDR6*fDa4AtOZYv=R8@%dIGmE16L&K@24;~4B%~ENEf@Y@^GTKBh7{%GwS6b)= zpRNU3iurqN*p3bQOFaKE7Ah1~%yN6Z3%+l%esg{fgNNY)>$-FYOKwEDzU-SZjQyV> z2szpq8nSPB~Kdnp`>q%UUFYHT*YB$@RESl*wSEu;4_1TlI zy(T2I8L3>E+&&*i-v)xaHo?*6a&B3ppJ$!u8(AFKA+$Aa>rlY-dic~lu%{fWRMVzq z%X#zL^92IF;P*n;5h`-t_>BE;d)fdv11G-&6g&A97@2qS26ZI*6l`y9}uwry90(f z-?uJXdN=3mY(toTYN7+}51FrUNKuJgqntYt|H`*{Ipw{AWI5aZ_Qs)%>CPw3S##k@ zYTSS{%j|*SpOY#-`9w?DO?Eit)2I#S^PIjTp*c?ExaBPhq(8!%9NpG@)^-Z?Z?Zmf ztSW4<|LOPQt60yHgqtiqMZ4{2r5@cZp+kl^`Y`P;3ku;j%HF zu8l-&u%&+fZ|&JsyE;l`YCe0fUjLrm*fvpyY-xEh_=t6OvI<&8v$yFacqX~ufy`Ob6ySWHZK_#7$97lR8b*1qh&n-f-9+0K7J4XE$NiYD6 zKp>PP-&Lb|Jm?(zEm3dgJFb*yqh%OoPV(fF9d&X;%5|f7Ut-Mvq~Xw!$FFfAWYqx% z7svWg7@S$ETZ5x$`gu_3C^dO*m7{O9`noSu6=i?+-_s&To&^PwX*G z^^(Tt@rPs6qc!Y#Z)mOf(tVpSwElfK`qUIzZ{hlUQee+j-aDpXAP;YlP+^!sH)GWn z0~tRh&XeB@UN!1x&qC(Gi@3s@5NMzFhgWw!-_h&FJ~ANxnBz2UDAQFcD>yU++I3-% zNx1A2=el&wfJ7qSUY6dvWb)4~e?T6SgGTt6^)?o$=4`D6MbM3Vcw};Kbt5jb`KNO5 zGX(vImrfiEy#p`ay?0^^(~(dAU%L+jK1nvc z>hsG{`HP{3na84Mr z-8m|ZgAwYZRqBeL1<&ilo_$e*1jibntE zaV{L-V8cnQ4gAu(>$Vq>iK*FEc%?<^8CAZlug%~S%GYTdj&XkIX(4+kt$Brv0BoF8 zWoHSoi|;97x=GVO71IL=Y1Ki+G&Ye7-#lk-Ow~Huvoo-89Wj%2iv{Oy4qTFDLduq@ zvilJ-POHyPd^>}6yYc_ie@S((@Nm2h8KEkvVRXq*S#@OE)sZf9GJmqIwVCU6joVISwPLIrN-cJk z5}2B`lMz6{X<-0s-PtiPum-LRqyi-s{@h&RGa*>}M2pdee|SaG6*#!sK5 zprwVAym9UPgh!>BfBzSev*iK)&!6_-^w{R~xs}7^r{ou3g$I34dOQ-wi-8sFs2y~A zb5@B+=nKY~j!<3SMgb?d?xp4q?SOaa9U0o%IL(GxuwNhgsbQBKd9U!_#fxYY{-Z*E zjdnt2oKfJ0OINaATe3Ga;PJFBjcIozQsnQQs=?!<<%Do`?U(4T-la%<=jTCD=ALv`8VR4I?Fz`U`vqXFj=zfYu6#Q*k zt$vUHr)#&#Mw=%~sEJ+yU*YrbSVt8m8gq2zejRcAe_Xv~R9#K8HA+H)yF0-lxH|-b z1PGAe?(XgyNN@-)8+X^Bd*kj9+}+(B?&5jh=bZ1{UyLxwTHV!Evu4fe)xECE{Dp}h z)^{jykF~aDJ8Jxy-%6I`ojSv>BpiK0?MY{ZJQ9(uL!(h;*c9wArB1yfbxk((N~rS zH;y}Zc&g5HI8gC)vHcXf$BmCi8>ZD1Hxvl|g_3G_cHnBQRov*%FA3XusFsXPQ+^B{ zsL5@;`WU}6QhB;YDG`Z01J1Rf7iI>bK3|)X$Umdb$y5Z?8Ry4d-3h}J2?Xxd;0}-& zmpN^&suGP1k_j^t0_+8tnCG>7tex6Fc)YuJepo+HOdo}eZ??MU$siUHCxN6bCE8$F zI*@<6By7>gw{CTM8(3AVpL&Q*qv3D{mj1i)3~f3fSG~GMlc#CG{#<)U;o^PG+W&G_ z46KaW3ejVzb@JY;rJUz)l3Z@QkR;5dmSZ*~F9Zi=OG=p7KG?CKvsML4_BuTEvHS}P z-de`WM~C%Kl7vD+KwiX=XWg)k2F4;Q%9G~xr&na(#WO(N9*4aPxJyxayn4SlEVuRP zV}TqNFb>Kd@c}>c!>87#kc*v>^HX`oA7!T!1{kQ#PAUCaHS)UY&NgeeX%QpMki z!*`)IYWH`T=)^K&Um0?G#P_xTQ!S#+9#*oG)@^ZlK{6`*eOu*CTD4^`I`#?&^4+{0 zku@*Mhe9u|{7rWT!+FNpJR>45>vgvP*lBrb!>ksojDFF4BA;^8F|{F^ zeIoJ!Klf#nk&p(E|iOHMH29J9r{zsg8$HgC~`bnQRPWh9MeP;zJ3%=VXim(?a z{Jh6o_%)9teAZf!?@pSX{rzKnPhGrnZR>fMzUGo4S-|ReGDgo8m7~uotwd}zBk417 z{h;{bWXsIbIgn9o8q9wz8#~dzq9-GJ8o2oTs{Py zd>r{Z+A56By&@{+#6Z#e>)>BdI21;BK*%~bINEtkNPR*{m4j}HIlhE*0C^+!KH^pNcegk+jgGUv6hEw?$%%f8G6Ep;mGFuzcbvz)=O1Tzuvbm z#cKMDY>|E{o5HnB<$<9NFXIO)M?*GquY@xuro^y7eiP`^GacG8vnN*4w5ju%!I7!R zwCl;))W~VKz4iA@sTHRj#%7ig&Gv^{d}nmHQC3}g=UKj8Y{jAl(Px~e&NEZ9OFfYn z>rxgi!}<9Fgr>@LFtN&XB4P2F}2RvaFW%JCDsz@!UL1c`YEpAH3snn~LgB}T`U-fKAr z`%vwb&#cTPK3;G0T3)$!h<|ScRq9a~{OgNnWTPdWg_XX-tkOuwKXZ>#>SGeI z`sNo-hGQw@s`r8wAC6?~oW+S}iHP2{_BQqY-XP*Th3q@jhV#yw_8`(MUtxA#uSXJ} zlyy(XZToG0(=g#aLPU}=2R6^=>~*!qnk8Ub;{>gA4>*3ORXK1Zs}tJE_8&{O@-cz!YFN)y6uY3mxg;_<^g;D6nzvGi3Yg>K{3BQHmHv_lTxsv@mh49w<_ zJn6YVzqKh_=4+|Q2hYr=JdkfZ>>GJ`7Pv-uJ0Ch-tynilpLsj8wKPwFdjNmT25LX;-$u`{-`g03#Ijj z+ko^Lt-MVwZe0F`;0hAiZ0vQQZKzAcpQio)cN<5>XPbpfUbdDBiJp2_j~TpGOM_j% zgp)95h{Plf= zVPR!oJCy1AcuB^>0%+If4JES&QOP9eaa&aF_C!ek^y;`jf#pY9B=%F03}t}H~r5a z4xyJv$DSzC(I}rw3gBmjtGc$u&GC|vv2m5>jg1x)ZjMBR!FaYf@Rst{XODjwjA$@5 zWguoYs#=2`86Y4d6LPIO@wp#X^>nP!8u9=;?P0XHw*!-dEt!&5Q~MvNCKC&LeRkCC^G$11zImx`D@nmbe<(IR z@NC62A@7v^{e2;J%;T;Q%v_+E?fc?m)3}9`z@#m0X9rY*&P|;`sIl?!-R0Wg!otE2 zEG)n26!Z0JHA2F}C;wZPkOi7?!UHz?eB8K0!XdlC?0bhOR}f2+;iYgA!zXKR&-y6? zZf)7~L~z-ChZwko&+WXQCYjARbeYPzc%wJU(*zQqkYG@y791AVX;I5&Z*Uus4a< zky<8!p?!4pWS{x)&A{NGV7un`@90n}B~vw{p!N^iS^s^sASlpI^iqV~bVO`|L&x)o z6Ih>7fs1fpRI^+8NX=D*g~KKkjEoWTLU)1Rb0p3lFGewmiOXEoiB;6q_slEpcE`w| zsJ|w)0oJV4C~KzH!}?FL0{8oM%$E0;d%(3R&$qq8=fW=+!jv>LH@i?J4Gj&dBR?Ky zTA#&%wcHs=GhFNV$)ME$NM(GvO-=KdkfW7WQorC}xK}R&oKZwXVZms|lJmYo@{v_$8_p+gpjc+l@QvIleE?KqY9= zoJZHW)*19Va$(^NV4`V2w4&kR6}qZhI{+3REv+9IkOsUciOZV6-rinJOl zlNqPrfeR>xBEH@Gn$!~We1u;?gg;#=PO@^s<_xFO{E{s6EQlQiAm zzkhFcbE4qj;q}kfySuu(_q0_?u(7e-f7Dl3vjGisV2Od**tFc4{BPXahycqW1|5V# zY;d(dg@n%*xVdTc3ke^P8&zvPt}K|e8rpmxdwqeL8FaJ+P6RMQE{(5S@Mf_yhDwG7 z56L6ZSs4JO^wp!;Dnq;o31Gc}IRWqaf8HVa`VPz>R8o^+;6k*Mm5kSSAQR@hN-=ue z?-hhZMsA<=lFoqRR7y%qDFKINZn^z&dTNKmtn<~)tzp@?c6YJSF@3cxGZV@$1e3<& z2X|Qa|7go_}y|u?TZSCRntwHzQte}R6WAGJIf%5+X z5U?gJrXxhnG8WSS774U3sOoyZ|72v8YudQi<`3;${_USC5m2dM%!fV%o>~kgv6gBz z#sF_{OW}0tjZ8=cu{q8Bu57`ik^2pVF7W@wwX}$U&?V#HQ9G$lu>-&2rh5l>Au;Of zZ<(0NHK&38{y&bilr|KIDf(x3vPiL1r#VF?k?CyFag^0&kp~!&-obQPDFFYkFoA@} z^6A7OrPh8!V%hsf8*m#nh#PDDiVt|1iZePkb_0Id!os2f@B+aHNCW|=^vU_Z$Ri0L z51hU7iq8Wh;HLT6**^fglarHQGWBm9)vr(kaV7k?hU$^HuB@X&Vs37Zg^kT>In9v3 zp#Hb)T8`iCG?T`S-}hM{00#M-fs) zETnCgd8;>7tPJ2ry6>}lvy6FWQ&Y-oW_h|)1OjPzy*n%xcPekF zuErD)n@~uFklJ#kVuPciXqcE#YUWBct^bP?<1=4emB_?Ew+|0B4qMVH6*|N86_Abz zdEtmDi>bVXm>7&qPbZ1tzP=BPjL5pWx+I>5tR^F=MKZ+7%F5}hjNII~ejuBskX{nc zZ~$^`mLUIqGMNW^GCWP78E)cW_x@DL-F`{M7isBrz$)NvP9r0u-!U;Az(_+XDo!|k zef?h3UqXU}kB^XIft)db4DVoIbS4P2m=aBk_R@=U&?8Ce-BqdI>9(D0 zT7I`|F*i4t4uc3#695Kl^xEH+s#w-15&uW@1-kzTK(0k2j!8`&vh{t?e16;!-f(D4 zVm0jH^gIE~0%XLBxd-h!(@_!t(g4reO7}e0?~kFvL;7WTzO3u}R13tY>xy@;^NeyFT9K0$ie2)$!@{{u$L66Hq;PHDC1@Lfir%qfEp`rWdH@N? zbLy8p5P?NFoT~v}J^`@!vvaM%WRQv5cKNG@1_380Cxd2PIIrWj)80dTT%4N;1REDO z$DBZ=BtD45BN+JhoXOkv)#rn+3;7PR7pG5I9sQ!m{AWzM{m?|6T>p91Fs8hKKaj(& zDtuqf?|Qt*)P*`#_QOF2_fcnc;^Q= zwc|7P#ds`J zMBr{S_G>a*-o(lV5b3YH(Q<~V<@w$cDmyz{t;LHMn7p5#pPa8eF!5~u5p&zkaTqg?dZn)W8l0tR~ zIZT$#$iE2jB9Ms%^+zd?i4}NmMoW%yM7S=y!vYjBTRe>KNiZ}l3~Ys?Wn;qx02~p& zOE>^2Rc>bnKtALvwg&)tZ-xJlyjyLze**qKNY z!H#ZpcrOiZzF^`aEhEg4fJW!G2poU{;Oz%M-h!FV+MrQ0%?W@dA^{jDkd&PE&j1hQ zZ~lk7adO7X3qQq;=SW`shr2DA_pX3lD*y-GdO8Fsr4SHWi6gtO&4v5o;t3{&zszUu z;mpj#*L&bMF~i-kCkt-CWrrtMQ#LKPr74FLLXYP{x*hx{E7YF3KbAO4mppj`+0S=r zUqiBt;uX>d8FV0cvJBkk&j`9!P+d2Iwqsd*o{(%{HcL)V43qfWKeH0mUozpi0i{fqnwQL^U<=IU(?rk-s>#CQpcK_?^8qx z{9QA2F;V<=e(s>4s5sZ`$<4ju#ZIqUZnW7KP0Y6)O(7}3zC4o3+v)cP`W2LggwhFZ zAyu86h`<=U$CC&-$R9y>*@R5W`R7)f?uxL_f=MEsw#6!4R`CLb=%N9 zHPK1-)CQ81E-Y89KXRnMtL+=u`p3Ry9#lEnLeaS^2vOa(;Be*ls5PLO+qq%_f<^J4 zU~#zE8SOP<15goA%Qpd9b24@&EiDa&$7&!J6d4;GEdoFTPT0?_!9>TMG}pQZweIfj zbnj)kbO8Xdng7F`sAW?ov-kim8!R+xM#S~3iVoXZI5O|O<8msvDKCc0mc^nlks9lnyy_) z>)t7+pQ9XrO`{!`IsRAczPLut#pj6pAueoPT09>45MihV=EqxPvGBi1I9s1@KiO8e z#6B^@&fyXY3XFAC-DpS)G;>`0tj8O4Hs{~iw>g^h)JQtU87i1MB1L4HZ! zlCkNGI~4cbUBq#eX4OU*!2t70MoDVW6C_LZq; zuGUkezJ3KtyJe39Mxbbv7raseM=EM*Etad>Nz(xV07x9q?XP+Fdd@Uiz}*29c?Q6+ zy9tC?zzxlBch`)>_tAcRV`FcoLKk%K+V~ZtAnF>Xor)Cfw^9 zIj*e+0wzMPl;D<}JhYMF5hB>R2qx=}!tKuu=K-9Mra6)FdP0QRBO8R0%*w9rZEcqy zz4w&{b<9;y2f{Mn<4!)*oT(MxT>S_@LD)}|?BXEp;aFxLJzfuEeSJomrqe)moh2Q1J&~*a`YI)s`25 zkDCx>C)Da~hymm)$na?z?xm%pGk0|RUoK*Nyx8~(kO5iAt+O1!8gv3QvwvWKoQ4M0 z4=7nkyiP^_(O-})E_nX|K6-q(^Xn4>n@X^MkBEGHY^`r?RpS}P^mYSwBiQN@^0&Sp zDTfu4qvz0?195b3J?t$vJ54Q`mU82)bM;;)>vsZ1*-Hz#c70q~`QA@fkYrZtM=YLg z9CrF|?;<$S`4?v?QZ(ZZi%qsnmrdVt6+9LOBrdyLOtJ}^xR@9yZ*2~@F5G6uAE+H+ zGc7NElRXnal#jS!GvR@Xp*JG^`5Sx;U3Q#o9(fUEdTdmMXQ5t@k14d`(>BvFW&yQcM1k5;222JP@4jG4L0ZIw` z>L^aE*7F>IOaw6EEui+N=HZ;I)^QlmLfqK_sGPOpB5_-NgMo=zai6lds5R;^ z0aSSCL_|ZWypFGViGks8pXMKl=qaA>_R(n~X!vu4Y+`o0)2ZC|iS74U_C64Yg1v!K z=7KQSXBAoLfZv~^n`cC%!fl(|diwsrQ!7MrBl#cFq#3E+anw*MM|;ms<(8DZm_-C$ zTQzOFI8H8DcLp{*mdN;Nc@ zd+KX@orTWgUJDzF6_Y|8U(&SOi6HbX2d`!K2$AX0q2%zVTRcp3f0#=87EXd@0bRH( z*jJv>)Ob|40PwVx=j;0P4mKj>wH*KH1lqQck*0L3FnZJd2ULsYLHEJ z)gx&-rsb6d4lU&L)j^`dr>Dz#c<=!^ znUadie!;rl-Fq33L%-UwTli5jEa7NgI3PeHAY8%H8uOowr!)LB#KEw!7=Id2*t= zcv~>q;Kw6giPV_3l@-G)vjb$Q#~I%zTtF=*f`&!8yk7)r#EOUZt^ZL>8xG3BfGA=g ztL|)UYz$PMfMW2;8qRIS`v#qqbY))x88Aa&!!*`USYoWYn=NS0^kmcPdF&~GUf`Yh z9-W>WeV#x*djn3K9Muf?wVjZ=jm@#t%D|&Qk>8v#ohq9+^H$Gd=gb2Ox?R>LW<4z2 zqFCDd3uIjEl7$&Kw}#&jdLXQMxSE5-i=% zZP()G)x#h!C*<;_9a=$X(#HFS3Q;IcbhwXUwU$YbwUNZ4SoMYen&B5t$X*{4RJ%f_ z_>bvPpm)l4g>#?_4v2ljJ&ad%nAgNBo`v*FxHx8I3@;hs{J5<3-G@xY2%v^3I41L`sGV?R?X6+%?D~Vt_6km*18fAX2a-qN6B%!7*?FI%} zHX%3NDSF9>Ahl;QGM5hLg#G#>6DRxU@5vXzf z>1FZnuMT=&o=;w?fj-EqWIhm2kI!llSkNe{qJkq$_bP2Y-K@xac?pn_k$I#i{1bY8 zt*zq^R#XM+3l};+YTjrZyyYnf8@jQExa3|yf3!s=008_DvijLzX$bPeR*S>NhyXLs z*-9d|!au9!lXGWK$w*_7-||YT2z%8otj}qBMBGMMLxr4p|92W&66b3D22}as3#zPI znK(^7SC>aTC?4er?_97Srh*&+AfVW*l!4Uc4^y8vp8&ik%zk(ps&!YQhP1N z17K0NH==}JmB9Ud3qX?Xfw*71N(q;{aF)9#H>ZZgibie7g$gHK#B@l) z=Hzb&L-vZZZ$>XQ$xtH(lwJ+d=+MZwpmr-LEt{6v1LKT}Z8yuYRi~o6I81prk)!6J+P1wSiVF}k1Yn1^vj;V(XE{ap3 z-%5e1Cs7fLS;&H{E(e_82G)6X&Cd%a3;5x;^(I4CxepX#q*Sk!t0sY<2Z!*>-KgL& z!E3jR%X|d;0Qu5jqbEX1NeK-LtLu;X1TaGia&pm~f!A()2%O;4gW+23FC#}=?BI}#1Mi&%q`z37iW9$73FYkTl6VB{X)!Hg@%Z1$qNyM zr1&%tQY;{@Ue?L}V`nDm1h(6BU{Hqvt~-s^X5#eG!%PFkk?~F%_;H$lip@GeD-#sa0@^Tu*pxJEa(%KAm61UTgi%8S58$Plu0^lV zHx|u1ITVUV)4I6A@~tMrrP3v+hhDKchz?sa4Y`9ADqNyPcu)ok)^2>bu?}Y^PbA5CSglyU zZpOax9d7)g|0~eE)cxXlAy=V>@7Lh3Dz zS`Mr*bg&->d%~D>eS~Cl@_wZ#_Z~qW{nNCV2e~>ohP&!qxm10jMI=&&{toi*=I+BV zv%eXgcfZj)ATXU}ZlL|>sOU8IjiO^kWbLO1YMp-5K(`67 zLyI;7t|sj8Ue^^o=iJs^y+&OTyH*@}m`kK``+yIJz=Ae5`e{`Ull61%-Vd~9|JUxu zB^=Ol>$KvMfqLEF5^VBb_S9J2Y@+vw<_MB7!J&?LdV^ubvver1xZCK0n@l?W+kN@r zLB+>4e$U<+nP9)9&-GON>Cc#%k%p4w^#{v=7Z`SW$ni@)?X2;Jq;K(dZ5m3o%#zY9 zmJ2w09}@P%i{Cb1*FZcaZLv7Nf(w=VBDSCSmX>#R*Eypl^pRlt*cQ)eew0rQWJ6OQ zuXytjW$JVt%vt(0)DGJyR~-eOJ#Ea{G9fV4-sBb z(Rn#vBskI5JYwRqs)|kCSWWYtb3e-9gq#D&oRhZFs0x>b`YO)9B4^+(RXF9cvB`z* zAr_a0bP~Av*D&`jYt6F4#Mqj|qdj*jZ;-)`urgMzE?lk(3nVs|-&Vko3QRPJwM zg=-?uKCp9!!(vI;xAmo-=PUYBU5r489^x}l(*@`OSjGEl( zmBc&S4;vAH3I?Tt4-c&_D__*uN0+y^^I97ToeXL-Tu%RH5b1GLAD;Oz5FF+)F$0bh z7W~&tzuV{`S(ZWLFzC*k&=v0+G;U7O@K_ec_a(_=BvU(%2dziThAx z)^PZqM$DG{b!~zz*T+Z`S!wK19DSP7tL6Cek7`~@1-F}D`c;@g5;w{ZK8mho8B4R~ zGa5jn6sxAE8h^P1qkE)`^RSd#d_Z6^F%d|mI!WUP5_cxP6pAOOhDYmz>@nd(pdFcY zyU(#ZpeN*awP=sGQ5R(Gqk|iUUcoo#ZaZvfB&KWexjv5;38;#nBW`u6a?eSv3@m;y zw`#17ROICp$3=k6u^BI8oI3z0ZxyubEtsVDugX4y&@? zAswt-tV#*cjD?>7e*iFpt{7-HG|*M3Wa7gSSy7VDdh)>PwXWuG^8bW}L*l?VEMr@n z8-ao%{rq|^!1!nHoug8(VL6ci^aR<_7GPKEA4w~i3-o59`_I6+o()`2~k*G z!QWOLWwg9o*|+@fC7?}oq1yw(15vDGhn;7hN7gGA|eD^|Y25wC^&xhsjM%>>o{}2C2QA zR8$*wv8lXJ`di+*e*#oW-3?R9M;MB=GOwn_)@arzP2bxNBGViw6ui=CynMw|0}eEf zF0rxSBS!=CB+mh|T`0#*d_zyNf%R)T+VSI`%nE-cnn23*KA2U8WHJfp0-K;H2fzQ^ zMW&{#I_^YbV>|HiP7=AlcP@Q$W|J`$$0(emeK>bcmkHmT_8mO@v1QKXl3Tt#^|H_x zNm6xPRxq{D8*g-ef1$8}7U?P<~$+h}3mm4<|Q8f4Ghp0o4~xNy(SgN7Wql zlS%&N2GCGYZAyyYQn)hK()RMFztAEs1w0Uom)Jm2G7hUoj#J6U9#i?A-OWM~|G_7= ztJu@E^5e1$k}196N&&vrl6!DAd%O}487*aa2z&@bs$g^*j4lmUgL*89;k2a zS*W@@SD!z%QQ|PAd7Yg0^`uXp_Ak@#!}&S}^V zrg>%NjCy~T?4H$PV^nr^lOqDCFMQ7NOR8ObUaNJ}bZw}R=IFHH)I0GJeGxCXxFbVD z3qDQMczufBzjrm&uUFraLsvk0M_d)YthfhDgMPMEelcOtkTVcNR0Tut7}3x5_qek4 z@hVAB;fUHa#N&tIuoyb=A0JnhmA;e)XGggxz^jG2#+*~D`Bg3?vKyw><4#RI?+Jx| zv_?Y{HR>Qr7?%H#YY!j)FngO%)a4wXIk@!r+Mgq0j5& z{Pz9ZxA2Yn80;oATs1oe_?j;%BGH(?fM&#YifZ*qnv(;Q=vdcJhNQ7e;O;Uusg*wg zpS$B{89IZ%eleO`oIkU77r0$w3CdH1EP%=nZT=b<8 zE3Of(!WIWIbCvPlRPoL;o@9eKvXqpl|xs2#z zahHpGDuZ-`LVbOG|BCc>e%U4wUCt#raJ&f`zBfJlHZ|M$;8y*XX-stuRynAvqY<;p zE4Kf@)~Na<{AVDIZ+mXGlf%b$W+xaK)7nbMjGExGB2CNQzg<5mJ@9EngKhG2f0?eh z>T?HlZs$V=7GjYozuu7LD}sn$$+l~!Z0 zl*e}mu@_W}y6kIvFR$tCIvnC1a+eW_crB*hos3egzpw4IzYCfAxPvOAY^29N+MV5j zQ>M&|JMV#9Jk{PxSrjtE>8hpF7iSc&Gx^Jdm|y99cRmHDlv!CbJwz#Ca5Th2WQ(U9 z;@O>EYhkXWSF?lFXjVHveoJ0|qcPL(_UHOZ)F&zH<8`S@NS3UwC4Rk99u|FGD7(^M zBjzi0@~Ly=yyb)aHeT#!T#oP%9{LktDi+T-e$QW|*Q#KXXcQz!xDi_7VAoNKWAiB;@VXRk1`pn3 zDiM1b79rHQ-L-(4RfwHvm8+cNQ_$D9QhqXhJ3w*g_sJT#okPN2-%9c$rxc;GK0Ti} z?z4#36VaFYR)UIMd`T*A#fS3eQ~YTOaj}MSN$LvYH~r=E1L6`Tx!S}gCL*N+lC<3` z9W9a##Ai>**o>mQ3aaU8*0uNXj%I1nms*Hn8kH@g)^}2BOIK>;Y#O(Bs!vi1*3BBZ z(#vzd9;gDxMp;1~n=`nR5FDSf+p#RBeeHr*b|MW(rXHm(fp*0j#sV4-YpDkb#h3S_ z_o3xppO8{3mCEjAe$7|WNNyd@W&ia}%*7C83c_ceeE6-Ag<_Il7^geqojE0{v|gd0 zG;ogROoU&HK^R^Owd=jo4_=Z6&F#`8ipgIKOTo z^7qQZK5^zkXHuJ@GWQ3Z_-C+@e4lZMt}yN+j^4Ax};Z|r3{+666o~n7P{N>T1)}s@r)^HT1c8i(0uxs3ztZkRFHqU-` zEZT9tURt&kvFPna?Bx6qk+PanA=q=g-YQ2WUj&Yd zjh$?|s=cw4`F^A*K(^={@^P=7#TY-aFh+Oqo96XqVQLw^#SuHZ(id$zjO$LMsnxiH z_uT!XG{v7}f>U^W-H)1|2jdY?Ti9}#yx^6hWmJmd%A9xVOv5y8yqYarD8)I0=lSqD zu%B&yAwU4&^%f}`T+DK~KKzF7`$dIaLNm_z+@Y4Eg(=X}L^5DT2YxM~WDAqw<7Omf zt;^@5o8HJF=l%EcYWTbG9GczHIXc0``~{>mMS;J+)UK-1SqzR4)f!qhX*5%w57@tW zmT3?W6jBQ{NauMe@JmeiHcw)jz%WVKUOwq$vH57-c*rEBoYgpgG#nLCR&)Bc<)u(z zB9kjUV5Q_ZZo%34ql4``<4ZphHXGCT=2MBIo2f|Dnc}T8=QoyZNLL-CF-k_Ck~B`& z`QoeCCAX|(RV{sa#vRK*%O#?Nn zF^*crEpL_;KYiG9PLOEkYxeOgRpIx+QGZzWM^qZvQ%Mh2u;|@NR#r^meaUwIc-AzJ zdepz5sa`>2^yz2duc8p)GaRIZV&@v@gCEsKwcUfJJNrk@Z?bR-aY}7(10%H#7!!rV z0Zf_8t@9wAuAQHE;YiU|tg<(^rk&bcdF7IzX4YVT|V6$nIwvBOCF1&<2D3>yenOE?A)JC!SgNBVkP?S)9gAq3c z5+ndTuY$~ejTFNYnuzf$m35hGD<4LOvh8;%XMgk6!&+I>670Of_g5w&T(})sS)$t& zmiPDjgZk_r?Y=jibKMPkHmkM>k9DNzu=hrK=t}++oWD&`E+HPB^llAeP{qGmZt(D| z4uud!wr+3n&)ZWW)qFMMp;yK(D{D@4_CK8OsrkYTTRutR+fEn}<@>Fy7?FeVf^Y<* z{MOEW=_b5>hAvO3NLpRNwfQpuMjp)9;iGM6uU=FpIC(3LTRi1V=D51(Ja9l;H1cim z^Fhxzec6)~Gtln{krlj<&N)451u!qL-}x`ua!{P^NU^ zd8#C+pw^UdWV%w%4P1tczm|e_D&+8Q{AywXU_<9mBXKY?kuC8DZ1rz=^}??R@w1Qmj6%WEDC=VZvrbthbz}3IW=Gp*8%LzPG>F;- zh}vL8>%|Txm(2DNhjxV0qI1bG?JrdboXVM5hGw6BzzBQ`fc=>CU}&USC-GcnmFlH~ z_JvB2eN=#5AqlwNvE|^w)R6 z8=;$h`Ri^H;JhKW*?vy?GQwZMbsF^od^)e|9X`Gc^L%!1h~!Kl;CNeLhR=f@Eg37y z6D=8`A7{HLC}7LU#1cg)D9YNSVC}NBJ z9ip4CmG|H^vo#c?NXE(ccPFE=Z%=p%9zTPKVOMOHejI-+)y|vfcl~u5h`Aujg@Dsu zFlSAE(@uK0vrMX3pIwSVnG_zhuwdZm*2FI1?-G-Y<@=aRk>^&^NsC>%APZLMtYgF8{@&Jd8=1fpopPoHD1L zSsTfoSnv&@F3pY2Yq2zzn39X|Zvth=*jpT!2-&T%e$5WVyJIVAf~cUt{V}ryiZe6% zs^8v(5yKo3Pzh>x@E^DOdgv1pwgg=pr z9b1XY>A2C2#ElA78qrI;^!}F1jfb12&x=du8T9u3OgFuI`|!(;&&!4jtzWKXpq+fZ zQq~#;44LcL?1dz%1;wiwrK$x*N{flMuQ}?~vdEGzA{0qF-zK}>+gn^NfNRhmjmO*R z-ZEOQtPHCWwwj@orAr5Xa0=%z_+?@a1KUwte!QK8u$BBd+N5+_!;qw#&#Y~E2(tvo zJil@JD}TV$hdPN_BVr2__gX*2bVf5Zl`E@iY-$x5_SMxT#&5Lf1aj7rXJAnl1=(S- z&SHO2Gza3qLzo)Li2RimuW>Q2lm*B*Rq;{{$KDRA;Ov%3F|Qg zbEq;zO^md{i=y{5O#4!MxMKOYWE;!ZaED-Yt_dr8$b#Xez`}4JE_Rf;I8rzaG=+g4 z+WGCY3P&DZQ*yUYNrsH?==$Pt^c}6_b`ByMYi0U==EauK{YaOK(}e{`U=R}GAbTH# z2-A1Kx+UxH+hc=n-;-`N$$@tG={W{1z)C&egkR;m7{JB3kTWwnq~e7}h%GljG3?pgH+1em`2% zTK6{o^z~r_-C93w&2V?x0H>i0Pi|W@MW%Q_6M}+B+E< z-kh4fV}Xto7Re2}%O6WVt#C$bYW_=zzdg^obPmQ3ly_o;OX2DlAmuTdLE%mAugxoC zef+je{yKI+s&59X;GtQ5*JnZB4AA$XQBh`*(p|KFBkJ1$6dyw2481f4+wS*j11u@wx)Y3dv(A! z+DZTTr|Ct<{)Iof4-#E!`1P}hiF?9xkUJi(`r~mm+syN(+0C2&2m4xCvsSKqIbxYu zpM4DP4Bl|vezw_!jy5V!&H&A~4_|SmjO|_x@R0rrnEgxNCCXthoQAF^C=*N>m+*~u zHPBgtGct@GuPU9J6HakH>nsO)9x0%;iCcM8d}X^m_|4x>TrO2B5g`7k#C@i?)b6vx z7M^;N^J0PA%Phn@MTZrnqu70Toizcs*46ZEeP9W1z#Ei8E(VL4ON<#JeI3 zzs6rrVPylJPi|UG^VEkOG#GW^!QiEvxsCnJLA^mW9o52y>{F@1_|*dCQ-(3E4k=Yl zQg%)pJVH8<8u>bBVlmOt(W9Tlf8vXg;EVT+=zQ3Ciw?S^$5uwXsA*5J`ZYX3W3l)+ zGR)%5{8$#T+4@n~VJ>F-YJX9X}T+y81LY?#Nmr4>) zqM?foDB^%;8eCOk=mCM~XcT?&X^|LH8DM{y@&VOi=UtgKy0xn|a&~i0JM_WE)3I2A z4FnselAUD2IJ5tF%9;H(QCC6RY(Tlw5Lrl^d+*76!e4Ybfn3yc!t!(_`5~K4(=O|Q`%yCNEmQ=&Hg`#AfSiU5h8#90YEhW zi!S^=mj*M^-&Sh01vh6_TDI%!v4M^3TGKN3sVFOPu0~?#8YBov+kMk!kTrt8vrkjd z$@x0AjMI|A0G$yR0nYa;TW+hS9+CWeWknfx>Yi%O2QA3F{OC*M@4M>rY43_258I>t zB$RHmh_`NE<&8xH5KboSskec?!vhCSyunew9`$51117g+_&^$bPb-`Xjm#e!`e{-v zrpUE5o;6Ozv=T$3&s>&07$QkK!!gMSGHY7Y%Y;-f%y_Z)m0z9;h?SO`c?WjABBI>c zky`)u=FzLCKCo#{Qcsa!8J!9dn?fW$Xh72SJ)#}3Cy7Va>2b|6H)9(42KuIun5c_P z!Lyl^f&V}y#Vb_82>;k|w|rq};&D{(d0ytbg(?DcCLbg-TbR%d#2#l@!= zAA*u*`gzAFq-)9$4Q3mfjhDHEaQS0m7`f>pv9oQwp$@r?X1sEh8N%{r?qczK)b-qL zTvz(3ay*Ra)a>QIR_}q=@LkQ@u-;Ic8O)piK3=JQT_Yjm8+m0uGqxS#T+qFNlfwkePS!}iLN>(-4rA$j( zSszW1U0b)hjZ(FirV*tsl?_XhMx+(hRhPIcE!kp4RjFHDDCb9uOH=GPyAZ#23 zEeGvlv5XtegMwTqeu4Jr=ru>SF02*^1Y6Q;!j4D zB^728dwh*Egoq6GdDl#E$h107a;Lhn7yJBx`j^;O0{duh*lxq4mWdeIHE6@!w;^i2V$Q_k`+9=XuRtNvoGxyX4souZprn_Xwl$LIYuNVUcVg4k}`K zR?OA86B<0<&~&!ha?-HUg_iW75}w?s;Ockex(geIJ-52YduDQUIM(g!FVsPZXb3y1 zHVM(86B~W6FO&j!!UB!MGw(R1TMWr{{#@e$QuApBVBq;}(D~O^#l5NZ{xMmdE%?M# zOzPVinH#O-Zu4mz_~b=rAOn~aTkXjMLb1+s zGi;*cQjq1h4=|Ew7$hvnU$mL@xQN%FBlhFg_^=(IpE`rW*#^+^~rrnE$ zq+0;%({;u%X1zTVc3>`Wnu((^Y9il)SU`ZN(`JddyXa zzjCCa6H|rRF7+jVyXJo(W6cvH=deD=lytCim#Nh_WH6wTHF+)$WzoyTEl#N4yTrEL z8`0mhMx&&bJ?~DK=^xgj{PTK3PZ}`9JVs~V?d^JR*<2R~yuX@MEasz~w2M^?MIO*FxQ^qYIHWxFJ6c!8{b(A_3Ffyw=w^#At|8bB zkqQ8O3iW)$XX*_oyKWpkxVAIa!M4+dEKtTOqC^T}O=A?5x8L~r4)fXYr!`Woq>T_Q zbX%)dYb|E+7s94L1qk)VpH3!9R!s*iw4p(4a;w^j*IesajgCD&s}7hli746s3l5Zg zaB?&MO6Y{wG-W`n47h^a&Ei;OOZ~Q`{c$cB)kpp9ZB+k4^MrhNPPiTu56wNqQ$&TZ|{nF{EAg0?lJC}=M_sCE9v^>6S-&;{x^(q+T z42>=y28znrg3#{Id-OUWsZxt&Ft!#G#DT4A><@U*aO~y5YcDmdwS91SduMKllaM$X zwUy)JPicCClaImp6H@wzHkAhLSLyF3g3m8FZO zFm1a3*iB2(_JeBm(v6$@l}?lcynN1OP(6;&OPV@6e20e$Seex2IJm#|>%>YVEW+rd zRdVU=Yhy0PCsC^wY8p9{l2_PJNBC&Kv2Fv=1XxkvS{Q@6udmP_)6WRGXbyXdH%IuP zA(jU1XFqmxjb8omEgP=&BDhPsB>_V2LeY|ayssgm$Qawr2JK_=PN#OTkvmbRN@v|$ z3%Yhc5E+Ks?q?0-tLH?`jV3qqUknEu*%KMZMVPf);J_a<$gddWzax``AN&&vVOG8Ct&U3~%gpnimZrQUYDO8`UUq>f*-T<- zRxyTd+49hTy-jEDwzJcNq+u_~guHI5Z4p}=+^yId|`+xe( z3kUZvP^)oF6EcimG*jSc2dhypKV-&#VYaunfC4Im^2G0Ge!g)Dfko6XeQXVJR@c@{ zm=B?^?FCNC$hsW&SmhJBafTSA!%K{~s(l>pnf$ z^#eE*7{wo`!b3a<9Uj9MN>r(m>pzT=zC-`MP=@mU$KwCPh>)=SV|1E&%7pb12^?)* KFIU>!jQcOnq=Fv+ literal 0 HcmV?d00001 diff --git a/doc/source/data/integrations.rst b/doc/source/data/integrations.rst index 636966d496b5..55b6161896a6 100644 --- a/doc/source/data/integrations.rst +++ b/doc/source/data/integrations.rst @@ -4,7 +4,7 @@ Integrations ============ -If you’re new to Ray Datasets, we recommend starting with the :ref:`Ray Datasets Quick Start `. +If you’re new to Ray Data, we recommend starting with the :ref:`Ray Data Quick Start `. This is a guide on how to run Dask, Spark, Mars or Modin on Ray. diff --git a/doc/source/data/key-concepts.rst b/doc/source/data/key-concepts.rst index da3a10b2e88a..7980f3ba4665 100644 --- a/doc/source/data/key-concepts.rst +++ b/doc/source/data/key-concepts.rst @@ -4,108 +4,103 @@ Key Concepts ============ -.. _dataset_concept: +.. _datastream_concept: --------- -Datasets --------- +---------- +Datastream +---------- -A :term:`Dataset ` contains a list of Ray object references to :term:`blocks `. +A :term:`Datastream ` operates over a sequence of Ray object references to :term:`blocks `. Each block holds a set of items in an `Arrow table `_, `pandas DataFrame `_, or Python list. -Having multiple blocks in a dataset allows for parallel transformation and ingest. +Having multiple blocks in a datastream allows for parallel transformation and ingest. -For ML use cases, Datasets also natively supports mixing :ref:`Tensors ` and tabular data. +For ML use cases, Datastream also natively supports mixing :ref:`Tensors ` and tabular data. -There are three types of datasets: +The following figure visualizes a datastream with three blocks, each holding 1000 rows. Note that certain blocks +may not be computed yet. Normally, callers iterate over datastream blocks in a streaming fashion, so that not all +blocks need to be materialized in the cluster memory at once. -* :term:`Simple datasets ` -- Datasets that represent a collection of Python objects -* :term:`Tabular datasets ` -- Datasets that represent columnar data -* :term:`Tensor datasets ` -- Datasets that represent a collection of ndarrays - -The following figure visualizes a tabular dataset with three blocks, each holding 1000 rows: - -.. image:: images/dataset-arch.svg +.. image:: images/datastream-arch.svg .. https://docs.google.com/drawings/d/1PmbDvHRfVthme9XD7EYM-LIHPXtHdOfjCbc1SCsM64k/edit -Since a Dataset is just a list of Ray object references, it can be freely passed between Ray tasks, -actors, and libraries like any other object reference. -This flexibility is a unique characteristic of Ray Datasets. - Reading Data ============ -Datasets uses Ray tasks to read data from remote storage in parallel. Each read task reads one or more files and produces an output block: +Datastream uses Ray tasks to read data from remote storage in parallel. Each read task reads one or more files and produces an output block: -.. image:: images/dataset-read.svg +.. image:: images/datastream-read.svg :align: center .. https://docs.google.com/drawings/d/15B4TB8b5xN15Q9S8-s0MjW6iIvo_PrH7JtV1fL123pU/edit -You can manually specify the number of read tasks, but the final parallelism is always capped by the number of files in the underlying dataset. +You can manually specify the number of read tasks, but the final parallelism is always capped by the number of files in the underlying datastream. -For an in-depth guide on creating datasets, read :ref:`Creating Datasets `. +For an in-depth guide on creating datastreams, read :ref:`Creating Datastreams `. Transforming Data ================= -Datasets uses either Ray tasks or Ray actors to transform data blocks. By default, Datasets uses tasks. +Datastream uses either Ray tasks or Ray actors to transform data blocks. By default, it uses tasks. To use Actors, pass an :class:`ActorPoolStrategy` to ``compute`` in methods like -:meth:`~ray.data.Dataset.map_batches`. :class:`ActorPoolStrategy` creates an autoscaling +:meth:`~ray.data.Datastream.map_batches`. :class:`ActorPoolStrategy` creates an autoscaling pool of Ray actors. This allows you to cache expensive state initialization (e.g., model loading for GPU-based tasks). -.. image:: images/dataset-map.svg +.. image:: images/datastream-map.svg :align: center .. https://docs.google.com/drawings/d/12STHGV0meGWfdWyBlJMUgw7a-JcFPu9BwSOn5BjRw9k/edit -For an in-depth guide on transforming datasets, read :ref:`Transforming Datasets `. +For an in-depth guide on transforming datastreams, read :ref:`Transforming Datastreams `. Shuffling Data ============== -Operations like :meth:`~ray.data.Dataset.sort` and :meth:`~ray.data.Dataset.groupby` -require blocks to be partitioned by value or *shuffled*. Datasets uses tasks to shuffle blocks in a map-reduce +Operations like :meth:`~ray.data.Datastream.sort` and :meth:`~ray.data.Datastream.groupby` +require blocks to be partitioned by value or *shuffled*. Datastream uses tasks to shuffle blocks in a map-reduce style: map tasks partition blocks by value and then reduce tasks merge co-partitioned blocks. -Call :meth:`~ray.data.Dataset.repartition` to change the number of blocks in a :class:`~ray.data.Dataset`. +Call :meth:`~ray.data.Datastream.repartition` to change the number of blocks in a :class:`~ray.data.Datastream`. Repartition has two modes: * ``shuffle=False`` - performs the minimal data movement needed to equalize block sizes * ``shuffle=True`` - performs a full distributed shuffle -.. image:: images/dataset-shuffle.svg +.. image:: images/datastream-shuffle.svg :align: center .. https://docs.google.com/drawings/d/132jhE3KXZsf29ho1yUdPrCHB9uheHBWHJhDQMXqIVPA/edit -Datasets can shuffle hundreds of terabytes of data. For an in-depth guide on shuffle performance, read :ref:`Performance Tips and Tuning `. +Datastream can shuffle multi-terabyte datasets, leveraging the Ray object store for disk spilling. For an in-depth guide on shuffle performance, read :ref:`Performance Tips and Tuning `. +Note that operations like shuffle materialize the entire Datastream prior to their execution (shuffle execution is not streamed through memory). -Execution mode -============== +Iteration and materialization +============================= -Most transformations are lazy. They don't execute until you consume a dataset or call -:meth:`Dataset.materialize() `. +Most transformations on a datastream are lazy. They don't execute until you iterate over the datastream or call +:meth:`Datastream.materialize() `. When a Datastream is materialized, its +type becomes a `MaterializedDatastream`, which indicates that all its blocks are materialized in Ray +object store memory. -The transformations are executed in a streaming way, incrementally on the data and -with operators processed in parallel, see :ref:`Streaming Execution `. +Datastream transformations are executed in a streaming way, incrementally on the data and +with operators processed in parallel, see :ref:`Streaming Execution `. -For an in-depth guide on Datasets execution, read :ref:`Execution `. +Datastreams and MaterializedDatastreams can be freely passed between Ray tasks, actors, and libraries without +incurring copies of the underlying block data (pass by reference semantics). Fault tolerance =============== -Datasets performs *lineage reconstruction* to recover data. If an application error or -system failure occurs, Datasets recreates lost blocks by re-executing tasks. - -Fault tolerance isn't supported in two cases: +Datastream performs *lineage reconstruction* to recover data. If an application error or +system failure occurs, Datastream recreates lost blocks by re-executing tasks. If ``compute=ActorPoolStrategy(size=n)`` is used, then Ray +will restart the actor used for computing the block prior to re-executing the task. -* If the original worker process that created the Dataset dies. This is because the creator stores the metadata for the :ref:`objects ` that comprise the Dataset. -* If you specify ``compute=ActorPoolStrategy(size=n)`` for transformations. This is because Datasets relies on :ref:`task-based fault tolerance `. +Fault tolerance is not supported if the original worker process that created the Datastream dies. +This is because the creator stores the metadata for the :ref:`objects ` that comprise the Datastream. diff --git a/doc/source/data/mars-on-ray.rst b/doc/source/data/mars-on-ray.rst index 252cea195f66..423f42580c6e 100644 --- a/doc/source/data/mars-on-ray.rst +++ b/doc/source/data/mars-on-ray.rst @@ -54,7 +54,7 @@ Or connecting to a Mars on Ray runtime which is already initialized: # perform computation -Interact with Ray Dataset: +Interact with Datastream: .. code-block:: python @@ -64,13 +64,13 @@ Interact with Ray Dataset: df = md.DataFrame( mt.random.rand(1000_0000, 4), columns=list('abcd')) - # Convert mars dataframe to ray dataset + # Convert mars dataframe to ray datastream import ray # ds = md.to_ray_dataset(df) ds = ray.data.from_mars(df) print(ds.schema(), ds.count()) ds.filter(lambda row: row["a"] > 0.5).show(5) - # Convert ray dataset to mars dataframe + # Convert ray datastream to mars dataframe # df2 = md.read_ray_dataset(ds) df2 = ds.to_mars() print(df2.head(5).execute()) diff --git a/doc/source/data/performance-tips.rst b/doc/source/data/performance-tips.rst index 4b3ac00fb54c..ede2c8e6db55 100644 --- a/doc/source/data/performance-tips.rst +++ b/doc/source/data/performance-tips.rst @@ -6,8 +6,8 @@ Performance Tips and Tuning Debugging Statistics ~~~~~~~~~~~~~~~~~~~~ -You can view debug stats for your Dataset and DatasetPipeline executions via :meth:`ds.stats() `. -These stats can be used to understand the performance of your Dataset workload and can help you debug problematic bottlenecks. Note that both execution and iterator statistics are available: +You can view debug stats for your Datastream executions via :meth:`ds.stats() `. +These stats can be used to understand the performance of your Datastream workload and can help you debug problematic bottlenecks. Note that both execution and iterator statistics are available: .. code-block:: python @@ -20,82 +20,60 @@ These stats can be used to understand the performance of your Dataset workload a ds = ray.data.range(10000) ds = ds.map(lambda x: str(x + 1)) + ds = ds.map(pause) - pipe = ds.repeat(5).map(pause).random_shuffle_each_window() + for x in ds.iter_batches(): + pass - @ray.remote - def consume(p, stats=False): - for x in p.iter_batches(): - pass - if stats: - print(p.stats()) - - a, b = pipe.split(2) - ray.get([consume.remote(a), consume.remote(b, True)]) + print(ds.stats()) .. code-block:: - == Pipeline Window 4 == - Stage 0 read: [execution cached] - Stage 1 map: [execution cached] - Stage 2 map: 200/200 blocks executed in 0.37s - * Remote wall time: 8.08ms min, 15.82ms max, 9.36ms mean, 1.87s total - * Remote cpu time: 688.79us min, 3.63ms max, 977.38us mean, 195.48ms total - * Output num rows: 50 min, 50 max, 50 mean, 10000 total - * Output size bytes: 456 min, 456 max, 456 mean, 91200 total - * Tasks per node: 200 min, 200 max, 200 mean; 1 nodes used - - Stage 3 random_shuffle_map: 200/200 blocks executed in 0.63s - * Remote wall time: 550.98us min, 5.2ms max, 900.66us mean, 180.13ms total - * Remote cpu time: 550.79us min, 1.13ms max, 870.82us mean, 174.16ms total - * Output num rows: 50 min, 50 max, 50 mean, 10000 total - * Output size bytes: 456 min, 456 max, 456 mean, 91200 total - * Tasks per node: 200 min, 200 max, 200 mean; 1 nodes used - - Stage 3 random_shuffle_reduce: 200/200 blocks executed in 0.63s - * Remote wall time: 152.37us min, 322.96us max, 218.32us mean, 43.66ms total - * Remote cpu time: 151.9us min, 321.53us max, 217.96us mean, 43.59ms total - * Output num rows: 32 min, 69 max, 50 mean, 10000 total - * Output size bytes: 312 min, 608 max, 456 mean, 91200 total - * Tasks per node: 200 min, 200 max, 200 mean; 1 nodes used - - Dataset iterator time breakdown: - * In ray.wait(): 1.15ms - * In ray.get(): 3.51ms - * In format_batch(): 6.83ms - * In user code: 441.53us - * Total time: 12.92ms - - ##### Overall Pipeline Time Breakdown ##### - * Time stalled waiting for next dataset: 3.48ms min, 758.48ms max, 486.78ms mean, 1.95s total - * Time in dataset iterator: 270.66ms - * Time in user code: 1.38ms - * Total time: 4.47s + Stage 1 ReadRange->Map->Map: 16/16 blocks executed in 0.37s + * Remote wall time: 101.55ms min, 331.39ms max, 135.24ms mean, 2.16s total + * Remote cpu time: 7.42ms min, 15.88ms max, 11.01ms mean, 176.15ms total + * Peak heap memory usage (MiB): 157.18 min, 157.73 max, 157 mean + * Output num rows: 625 min, 625 max, 625 mean, 10000 total + * Output size bytes: 3658 min, 4392 max, 4321 mean, 69150 total + * Tasks per node: 16 min, 16 max, 16 mean; 1 nodes used + * Extra metrics: {'obj_store_mem_alloc': 3658, 'obj_store_mem_freed': 5000, 'obj_store_mem_peak': 40000} + + Datastream iterator time breakdown: + * Total time user code is blocked: 551.67ms + * Total time in user code: 144.97us + * Total time overall: 1.01s + * Num blocks local: 0 + * Num blocks remote: 0 + * Num blocks unknown location: 16 + * Batch iteration time breakdown (summed across prefetch threads): + * In ray.get(): 75.68us min, 220.26us max, 131.89us avg, 2.11ms total + * In batch creation: 326.58us min, 1.37ms max, 644.86us avg, 25.79ms total + * In batch formatting: 101.81us min, 898.73us max, 172.38us avg, 6.9ms total Batching Transforms ~~~~~~~~~~~~~~~~~~~ -Mapping individual records using :meth:`.map(fn) ` can be quite slow. -Instead, consider using :meth:`.map_batches(batch_fn, batch_format="pandas") ` and writing your ``batch_fn`` to +Mapping individual records using :meth:`.map(fn) ` can be quite slow. +Instead, consider using :meth:`.map_batches(batch_fn, batch_format="pandas") ` and writing your ``batch_fn`` to perform vectorized pandas operations. Parquet Column Pruning ~~~~~~~~~~~~~~~~~~~~~~ -Current Datasets will read all Parquet columns into memory. +Current Datastream will read all Parquet columns into memory. If you only need a subset of the columns, make sure to specify the list of columns explicitly when calling :meth:`ray.data.read_parquet() ` to avoid loading unnecessary data (projection pushdown). For example, use ``ray.data.read_parquet("example://iris.parquet", columns=["sepal.length", "variety"])`` to read -just two of the five columns of Iris dataset. +just two of the five columns of Iris datastream. Parquet Row Pruning ~~~~~~~~~~~~~~~~~~~ -Similarly, you can pass in a filter to :meth:`ray.data.read_parquet() ` (filter pushdown) +Similarly, you can pass in a filter to :meth:`ray.data.read_parquet() ` (filter pushdown) which will be applied at the file scan so only rows that match the filter predicate will be returned. -For example, use ``ray.data.read_parquet("example://iris.parquet", filter=pyarrow.dataset.field("sepal.length") > 5.0)`` +For example, use ``ray.data.read_parquet("example://iris.parquet", filter=pyarrow.datastream.field("sepal.length") > 5.0)`` (where ``pyarrow`` has to be imported) to read rows with sepal.length greater than 5.0. This can be used in conjunction with column pruning when appropriate to get the benefits of both. @@ -107,7 +85,7 @@ By default, Ray requests 1 CPU per read task, which means one read tasks per CPU For data sources that can benefit from higher degress of I/O parallelism, you can specify a lower ``num_cpus`` value for the read function via the ``ray_remote_args`` parameter. For example, use ``ray.data.read_parquet(path, ray_remote_args={"num_cpus": 0.25})`` to allow up to four read tasks per CPU. -By default, Datasets automatically selects the read parallelism based on the current cluster size and dataset size. +By default, Ray Data automatically selects the read parallelism based on the current cluster size and datastream size. However, the number of read tasks can also be increased manually via the ``parallelism`` parameter. For example, use ``ray.data.read_parquet(path, parallelism=1000)`` to force up to 1000 read tasks to be created. @@ -116,16 +94,16 @@ For example, use ``ray.data.read_parquet(path, parallelism=1000)`` to force up t Enabling Push-Based Shuffle ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Some Dataset operations require a *shuffle* operation, meaning that data is shuffled from all of the input partitions to all of the output partitions. -These operations include :meth:`Dataset.random_shuffle `, -:meth:`Dataset.sort ` and :meth:`Dataset.groupby `. -Shuffle can be challenging to scale to large data sizes and clusters, especially when the total dataset size cannot fit into memory. +Some Datastream operations require a *shuffle* operation, meaning that data is shuffled from all of the input partitions to all of the output partitions. +These operations include :meth:`Datastream.random_shuffle `, +:meth:`Datastream.sort ` and :meth:`Datastream.groupby `. +Shuffle can be challenging to scale to large data sizes and clusters, especially when the total datastream size cannot fit into memory. -Datasets provides an alternative shuffle implementation known as push-based shuffle for improving large-scale performance. -We recommend trying this out if your dataset has more than 1000 blocks or is larger than 1 TB in size. +Datastreams provides an alternative shuffle implementation known as push-based shuffle for improving large-scale performance. +We recommend trying this out if your datastream has more than 1000 blocks or is larger than 1 TB in size. -To try this out locally or on a cluster, you can start with the `nightly release test `_ that Ray runs for :meth:`Dataset.random_shuffle ` and :meth:`Dataset.sort `. -To get an idea of the performance you can expect, here are some run time results for :meth:`Dataset.random_shuffle ` on 1-10TB of data on 20 machines (m5.4xlarge instances on AWS EC2, each with 16 vCPUs, 64GB RAM). +To try this out locally or on a cluster, you can start with the `nightly release test `_ that Ray runs for :meth:`Datastream.random_shuffle ` and :meth:`Datastream.sort `. +To get an idea of the performance you can expect, here are some run time results for :meth:`Datastream.random_shuffle ` on 1-10TB of data on 20 machines (m5.4xlarge instances on AWS EC2, each with 16 vCPUs, 64GB RAM). .. image:: https://docs.google.com/spreadsheets/d/e/2PACX-1vQvBWpdxHsW0-loasJsBpdarAixb7rjoo-lTgikghfCeKPQtjQDDo2fY51Yc1B6k_S4bnYEoChmFrH2/pubchart?oid=598567373&format=image :align: center @@ -134,10 +112,10 @@ To try out push-based shuffle, set the environment variable ``RAY_DATA_PUSH_BASE .. code-block:: bash - $ wget https://raw.githubusercontent.com/ray-project/ray/master/release/nightly_tests/dataset/sort.py + $ wget https://raw.githubusercontent.com/ray-project/ray/master/release/nightly_tests/datastream/sort.py $ RAY_DATA_PUSH_BASED_SHUFFLE=1 python sort.py --num-partitions=10 --partition-size=1e7 - # Dataset size: 10 partitions, 0.01GB partition size, 0.1GB total - # [dataset]: Run `pip install tqdm` to enable progress reporting. + # Datastream size: 10 partitions, 0.01GB partition size, 0.1GB total + # [datastream]: Run `pip install tqdm` to enable progress reporting. # 2022-05-04 17:30:28,806 INFO push_based_shuffle.py:118 -- Using experimental push-based shuffle. # Finished in 9.571171760559082 # ... diff --git a/doc/source/data/pipelining-compute.rst b/doc/source/data/pipelining-compute.rst index acab716c1611..7a17c431825b 100644 --- a/doc/source/data/pipelining-compute.rst +++ b/doc/source/data/pipelining-compute.rst @@ -1,15 +1,13 @@ .. _pipelining_datasets: -.. note:: +============================= +DatasetPipelines (deprecated) +============================= - The DatasetPipeline is expected to be deprecated in Ray 2.5. If your use case doesn't - need per-window shuffle, we recommend using just plain Datasets, which supports the - streaming execution by default in Ray 2.4. For more detail, see - :ref:`Streaming Execution `. +.. warning:: -================== -Pipelining Compute -================== + DatasetPipelines are deprecated now that Datastream provides pipelined execution + by default. For more detail, see :ref:`Streaming Execution `. Dataset pipelines allow Dataset transformations to be executed incrementally on *windows* of the base data, instead of on all of the data at once. This can be used for streaming data loading into ML training, or to execute batch transformations on large datasets without needing to load the entire dataset into cluster memory. @@ -18,7 +16,7 @@ Dataset pipelines can be read in a streaming fashion by one consumer, or split i Creating a DatasetPipeline ========================== -A `DatasetPipeline `__ can be constructed in two ways: either by pipelining the execution of an existing Dataset (via :meth:`~ray.data.Dataset.window`), or generating repeats of an existing Dataset (via :meth:`~ray.data.Dataset.repeat`). Similar to Datasets, you can freely pass DatasetPipelines between Ray tasks, actors, and libraries. Get started with this synthetic data example: +A `DatasetPipeline `__ can be constructed in two ways: either by pipelining the execution of an existing Dataset (via :meth:`~ray.data.Datastream.window`), or generating repeats of an existing Dataset (via :meth:`~ray.data.Datastream.repeat`). Similar to Datasets, you can freely pass DatasetPipelines between Ray tasks, actors, and libraries. Get started with this synthetic data example: .. code-block:: python @@ -60,7 +58,7 @@ A `DatasetPipeline `__ can be constructed print("Total num rows", num_rows) # -> Total num rows 1000000 -You can also create a DatasetPipeline from a custom iterator over dataset creators using :meth:`~ray.data.DatasetPipeline.from_iterable`. For example, this is how you would implement :meth:`~ray.data.Dataset.repeat` and :meth:`~ray.data.Dataset.window` using :meth:`~ray.data.DatasetPipeline.from_iterable`: +You can also create a DatasetPipeline from a custom iterator over dataset creators using :meth:`~ray.data.DatastreamPipeline.from_iterable`. For example, this is how you would implement :meth:`~ray.data.Datastream.repeat` and :meth:`~ray.data.Datastream.window` using :meth:`~ray.data.DatastreamPipeline.from_iterable`: .. code-block:: python @@ -105,7 +103,7 @@ While most Dataset operations are per-row (e.g., map, filter), some operations a # 0 # 3 -You can also apply arbitrary transformations to each window using :meth:`DatasetPipeline.foreach_window() `: +You can also apply arbitrary transformations to each window using :meth:`DatasetPipeline.foreach_window() `: .. code-block:: python @@ -169,7 +167,7 @@ Ignoring the output, the above script has three separate stages: loading, prepro Enabling Pipelining ~~~~~~~~~~~~~~~~~~~ -We can optimize this by *pipelining* the execution of the dataset with the :meth:`~ray.data.Dataset.window` call, which returns a DatasetPipeline instead of a Dataset object. The pipeline supports similar transformations to the original Dataset: +We can optimize this by *pipelining* the execution of the dataset with the :meth:`~ray.data.Datastream.window` call, which returns a DatasetPipeline instead of a Dataset object. The pipeline supports similar transformations to the original Dataset: .. code-block:: python @@ -228,7 +226,7 @@ Dataset pipelines can also be used for streaming data loading into distributed t Splitting pipelines for distributed ingest ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Similar to how you can split a Dataset with :meth:`Dataset.split() `, you can also split a DatasetPipeline with the same method call :meth:`DatasetPipeline.split() `. This returns a number of DatasetPipeline shards that share a common parent pipeline. Each shard can be passed to a remote task or actor. +Similar to how you can split a Dataset with :meth:`Dataset.split() `, you can also split a DatasetPipeline with the same method call :meth:`DatasetPipeline.split() `. This returns a number of DatasetPipeline shards that share a common parent pipeline. Each shard can be passed to a remote task or actor. **Code**: @@ -260,7 +258,7 @@ Handling Epochs ~~~~~~~~~~~~~~~ It's common in ML training to want to divide data ingest into epochs, or repetitions over the original source dataset. -DatasetPipeline provides a convenient :meth:`DatasetPipeline.iter_epochs() ` method that can be used to split up the pipeline into epoch-delimited pipeline segments. +DatasetPipeline provides a convenient :meth:`DatasetPipeline.iter_epochs() ` method that can be used to split up the pipeline into epoch-delimited pipeline segments. Epochs are defined by the last call to ``.repeat()`` in a pipeline, for example: .. code-block:: python diff --git a/doc/source/data/transforming-datasets.rst b/doc/source/data/transforming-datastreams.rst similarity index 67% rename from doc/source/data/transforming-datasets.rst rename to doc/source/data/transforming-datastreams.rst index ab646b904eb2..c40a3af89c91 100644 --- a/doc/source/data/transforming-datasets.rst +++ b/doc/source/data/transforming-datastreams.rst @@ -1,22 +1,16 @@ -.. _transforming_datasets: +.. _transforming_datastreams: -===================== -Transforming Datasets -===================== +======================== +Transforming Datastreams +======================== -Datasets transformations take in datasets and produce new datasets. For example, *map* +Datastreams transformations take in datastreams and produce new datastreams. For example, *map_batches* is a transformation that applies a -:ref:`user-defined function ` on each dataset record -and returns a new dataset as the result. Datasets transformations can be composed to +:ref:`user-defined function ` on each data record +and returns a new datastream as the result. Datastreams transformations can be composed to express a chain of computations. -.. tip:: - - If you're performing common ML transformations like normalization and label - encoding, create a :class:`~ray.data.preprocessor.Preprocessor` instead. To learn - more, read :ref:`Using Preprocessors `. - -.. _transform_datasets_transformations: +.. _transform_datastreams_transformations: --------------- Transformations @@ -25,81 +19,81 @@ Transformations There are two main types of transformations: * One-to-one: each input block will contribute to only one output - block, such as :meth:`ds.map_batches() `. + block, such as :meth:`ds.map_batches() `. * All-to-all: input blocks can contribute to multiple output blocks, - such as :meth:`ds.random_shuffle() `. + such as :meth:`ds.random_shuffle() `. -Here is a table listing some common transformations supported by Ray Datasets. +Here is a table listing some common transformations supported by Ray Data. -.. list-table:: Common Ray Datasets transformations. +.. list-table:: Common Ray Data transformations. :header-rows: 1 * - Transformation - Type - Description - * - :meth:`ds.map_batches() ` + * - :meth:`ds.map_batches() ` - One-to-one - - Apply a given function to batches of records of this dataset. - * - :meth:`ds.add_column() ` + - Apply a given function to batches of records of this datastream. + * - :meth:`ds.add_column() ` - One-to-one - Apply a given function to batches of records to create a new column. - * - :meth:`ds.drop_columns() ` + * - :meth:`ds.drop_columns() ` - One-to-one - - Drop the given columns from the dataset. - * - :meth:`ds.split() ` + - Drop the given columns from the datastream. + * - :meth:`ds.streaming_split() ` - One-to-one - - | Split the dataset into N disjoint pieces. - * - :meth:`ds.repartition(shuffle=False) ` + - | Split the datastream into N disjoint iterators. + * - :meth:`ds.repartition(shuffle=False) ` - One-to-one - - | Repartition the dataset into N blocks, without shuffling the data. - * - :meth:`ds.repartition(shuffle=True) ` + - | Repartition the datastream into N blocks, without shuffling the data. + * - :meth:`ds.repartition(shuffle=True) ` - All-to-all - - | Repartition the dataset into N blocks, shuffling the data during repartition. - * - :meth:`ds.random_shuffle() ` + - | Repartition the datastream into N blocks, shuffling the data during repartition. + * - :meth:`ds.random_shuffle() ` - All-to-all - - | Randomly shuffle the elements of this dataset. - * - :meth:`ds.sort() ` + - | Randomly shuffle the elements of this datastream. + * - :meth:`ds.sort() ` - All-to-all - - | Sort the dataset by a sortkey. - * - :meth:`ds.groupby() ` + - | Sort the datastream by a sortkey. + * - :meth:`ds.groupby() ` - All-to-all - - | Group the dataset by a groupkey. + - | Group the datastream by a groupkey. .. tip:: - Datasets also provides the convenience transformation methods :meth:`ds.map() `, - :meth:`ds.flat_map() `, and :meth:`ds.filter() `, - which are not vectorized (slower than :meth:`ds.map_batches() `), but + Datastreams also provides the convenience transformation methods :meth:`ds.map() `, + :meth:`ds.flat_map() `, and :meth:`ds.filter() `, + which are not vectorized (slower than :meth:`ds.map_batches() `), but may be useful for development. The following is an example to make use of those transformation APIs for processing -the Iris dataset. +the Iris datastream. -.. literalinclude:: ./doc_code/transforming_datasets.py +.. literalinclude:: ./doc_code/transforming_datastreams.py :language: python - :start-after: __dataset_transformation_begin__ - :end-before: __dataset_transformation_end__ + :start-after: __datastream_transformation_begin__ + :end-before: __datastream_transformation_end__ -.. _transform_datasets_writing_udfs: +.. _transform_datastreams_writing_udfs: ------------------------------------- Writing User-defined Functions (UDFs) ------------------------------------- User-defined functions (UDFs) are routines that apply on one row (e.g. -:meth:`.map() `) or a batch of rows (e.g. -:meth:`.map_batches() `) of a dataset. UDFs let you +:meth:`.map() `) or a batch of rows (e.g. +:meth:`.map_batches() `) of a datastream. UDFs let you express your customized business logic in transformations. Here we will focus on -:meth:`.map_batches() ` as it's the primary mapping -API in Datasets. +:meth:`.map_batches() ` as it's the primary mapping +API in Datastreams. Here are the basics that you need to know about UDFs: -* A UDF can be either a function, a generator, or if using the :ref:`actor compute strategy `, a :ref:`callable class `. -* Select the UDF input :ref:`batch format ` using the ``batch_format`` argument. -* The UDF output type determines the Dataset schema of the transformation result. +* A UDF can be either a function, a generator, or if using the :ref:`actor compute strategy `, a :ref:`callable class `. +* Select the UDF input :ref:`batch format ` using the ``batch_format`` argument. +* The UDF output type determines the Datastream schema of the transformation result. -.. _transform_datasets_callable_classes: +.. _transform_datastreams_callable_classes: Types of UDFs ============= @@ -107,9 +101,9 @@ There are three types of UDFs that you can use with Ray Data: Function UDFs, Cal .. tabbed:: "Function UDFs" - The most basic UDFs are functions that take in a batch or row as input, and returns a batch or row as output. See :ref:`transform_datasets_batch_formats` for the supported batch formats. + The most basic UDFs are functions that take in a batch or row as input, and returns a batch or row as output. See :ref:`transform_datastreams_batch_formats` for the supported batch formats. - .. literalinclude:: ./doc_code/transforming_datasets.py + .. literalinclude:: ./doc_code/transforming_datastreams.py :language: python :start-after: __writing_default_udfs_tabular_begin__ :end-before: __writing_default_udfs_tabular_end__ @@ -127,7 +121,7 @@ There are three types of UDFs that you can use with Ray Data: Function UDFs, Cal These transformation APIs take the uninstantiated callable class as an argument, not an instance of the class. - .. literalinclude:: ./doc_code/transforming_datasets.py + .. literalinclude:: ./doc_code/transforming_datastreams.py :language: python :start-after: __writing_callable_classes_udfs_begin__ :end-before: __writing_callable_classes_udfs_end__ @@ -137,49 +131,49 @@ There are three types of UDFs that you can use with Ray Data: Function UDFs, Cal UDFs can also be written as Python generators, yielding multiple outputs for a batch or row instead of a single item. Generator UDFs are useful when returning large objects. Instead of returning a very large output batch, ``fn`` can instead yield the output batch in chunks to avoid excessive heap memory usage. .. warning:: - When applying a generator UDF on individual rows, make sure to use the :meth:`.flat_map() ` API and not the :meth:`.map() ` API. + When applying a generator UDF on individual rows, make sure to use the :meth:`.flat_map() ` API and not the :meth:`.map() ` API. - .. literalinclude:: ./doc_code/transforming_datasets.py + .. literalinclude:: ./doc_code/transforming_datastreams.py :language: python :start-after: __writing_generator_udfs_begin__ :end-before: __writing_generator_udfs_end__ -.. _transform_datasets_batch_formats: +.. _transform_datastreams_batch_formats: UDF Input Batch Format ====================== Choose the *batch format* of the data given to UDFs -by setting the ``batch_format`` option of :meth:`.map_batches() `. +by setting the ``batch_format`` option of :meth:`.map_batches() `. Here is an overview of the available batch formats: .. tabbed:: "default" - The "default" batch format presents data as follows for each Dataset type: + The "default" batch format presents data as follows for each Datastream type: - * **Tabular Datasets**: Each batch will be a + * **Tabular Datastreams**: Each batch will be a `pandas.DataFrame `__. - This may incur a conversion cost if the underlying Dataset block is not + This may incur a conversion cost if the underlying Datastream block is not zero-copy convertible from an Arrow table. - .. literalinclude:: ./doc_code/transforming_datasets.py + .. literalinclude:: ./doc_code/transforming_datastreams.py :language: python :start-after: __writing_default_udfs_tabular_begin__ :end-before: __writing_default_udfs_tabular_end__ - * **Tensor Datasets** (single-column): Each batch will be a single + * **Tensor Datastreams** (single-column): Each batch will be a single `numpy.ndarray `__ containing the single tensor column for this batch. - .. literalinclude:: ./doc_code/transforming_datasets.py + .. literalinclude:: ./doc_code/transforming_datastreams.py :language: python :start-after: __writing_default_udfs_tensor_begin__ :end-before: __writing_default_udfs_tensor_end__ - * **Simple Datasets**: Each batch will be a Python list. + * **Simple Datastreams**: Each batch will be a Python list. - .. literalinclude:: ./doc_code/transforming_datasets.py + .. literalinclude:: ./doc_code/transforming_datastreams.py :language: python :start-after: __writing_default_udfs_list_begin__ :end-before: __writing_default_udfs_list_end__ @@ -188,10 +182,10 @@ Here is an overview of the available batch formats: The ``"pandas"`` batch format presents batches in `pandas.DataFrame `__ - format. If converting a simple dataset to Pandas DataFrame batches, a single-column + format. If converting a simple datastream to Pandas DataFrame batches, a single-column dataframe with the column ``"__value__"`` will be created. - .. literalinclude:: ./doc_code/transforming_datasets.py + .. literalinclude:: ./doc_code/transforming_datastreams.py :language: python :start-after: __writing_pandas_udfs_begin__ :end-before: __writing_pandas_udfs_end__ @@ -200,10 +194,10 @@ Here is an overview of the available batch formats: The ``"pyarrow"`` batch format presents batches in `pyarrow.Table `__ - format. If converting a simple dataset to Arrow Table batches, a single-column table + format. If converting a simple datastream to Arrow Table batches, a single-column table with the column ``"__value__"`` will be created. - .. literalinclude:: ./doc_code/transforming_datasets.py + .. literalinclude:: ./doc_code/transforming_datastreams.py :language: python :start-after: __writing_arrow_udfs_begin__ :end-before: __writing_arrow_udfs_end__ @@ -214,23 +208,23 @@ Here is an overview of the available batch formats: `numpy.ndarray `__ format as follows: - * **Tabular Datasets**: Each batch will be a dictionary of NumPy + * **Tabular Datastreams**: Each batch will be a dictionary of NumPy ndarrays (``Dict[str, np.ndarray]``), with each key-value pair representing a column in the table. - * **Tensor Datasets** (single-column): Each batch will be a single + * **Tensor Datastreams** (single-column): Each batch will be a single `numpy.ndarray `__ containing the single tensor column for this batch. - * **Simple Datasets**: Each batch will be a single NumPy ndarray, where Datasets will + * **Simple Datastreams**: Each batch will be a single NumPy ndarray, where Datastreams will attempt to convert each list-batch to an ndarray. - .. literalinclude:: ./doc_code/transforming_datasets.py + .. literalinclude:: ./doc_code/transforming_datastreams.py :language: python :start-after: __writing_numpy_udfs_begin__ :end-before: __writing_numpy_udfs_end__ -Converting between the underlying Datasets data representations (Arrow, Pandas, and +Converting between the underlying Datastreams data representations (Arrow, Pandas, and Python lists) and the requested batch format (``"default"``, ``"pandas"``, ``"pyarrow"``, ``"numpy"``) may incur data copies; which conversions cause data copying is given in the below table: @@ -240,7 +234,7 @@ is given in the below table: :header-rows: 1 :stub-columns: 1 - * - Dataset Format x Batch Format + * - Datastream Format x Batch Format - ``"default"`` - ``"pandas"`` - ``"numpy"`` @@ -281,121 +275,121 @@ is given in the below table: .. tip:: - If the UDF for :meth:`ds.map_batches() ` does **not** + If the UDF for :meth:`ds.map_batches() ` does **not** mutate its input, we can prevent an unnecessary data batch copy by specifying ``zero_copy_batch=True``, which will provide the UDF with zero-copy, read-only - batches. See the :meth:`ds.map_batches() ` docstring for + batches. See the :meth:`ds.map_batches() ` docstring for more information. -.. _transform_datasets_batch_output_types: +.. _transform_datastreams_batch_output_types: Batch UDF Output Types ====================== The following output types are allowed for batch UDFs (e.g., -:meth:`ds.map_batches() `). The following describes +:meth:`ds.map_batches() `). The following describes how they are interpreted to create the transformation result: .. tabbed:: pd.DataFrame - Returning ``pd.DataFrame`` creates a Tabular dataset as the transformation result: + Returning ``pd.DataFrame`` creates a Tabular datastream as the transformation result: - .. literalinclude:: ./doc_code/transforming_datasets.py + .. literalinclude:: ./doc_code/transforming_datastreams.py :language: python :start-after: __writing_pandas_out_udfs_begin__ :end-before: __writing_pandas_out_udfs_end__ .. tabbed:: pa.Table - Returning ``pa.Table`` creates a Tabular dataset as the transformation result: + Returning ``pa.Table`` creates a Tabular datastream as the transformation result: - .. literalinclude:: ./doc_code/transforming_datasets.py + .. literalinclude:: ./doc_code/transforming_datastreams.py :language: python :start-after: __writing_arrow_out_udfs_begin__ :end-before: __writing_arrow_out_udfs_end__ .. tabbed:: np.ndarray - Returning ``np.ndarray`` creates a single-column Tensor dataset as the transformation result: + Returning ``np.ndarray`` creates a single-column Tensor datastream as the transformation result: - .. literalinclude:: ./doc_code/transforming_datasets.py + .. literalinclude:: ./doc_code/transforming_datastreams.py :language: python :start-after: __writing_numpy_out_udfs_begin__ :end-before: __writing_numpy_out_udfs_end__ .. tabbed:: Dict[str, np.ndarray] - Returning ``Dict[str, np.ndarray]`` creates a multi-column Tensor dataset as the transformation result. + Returning ``Dict[str, np.ndarray]`` creates a multi-column Tensor datastream as the transformation result. If a column tensor is 1-dimensional, then the native Arrow 1D list - type is used; if a column tensor has 2 or more dimensions, then the Dataset - :ref:`tensor extension type ` to embed these + type is used; if a column tensor has 2 or more dimensions, then the Datastream + :ref:`tensor extension type ` to embed these n-dimensional tensors in the Arrow table. - .. literalinclude:: ./doc_code/transforming_datasets.py + .. literalinclude:: ./doc_code/transforming_datastreams.py :language: python :start-after: __writing_numpy_dict_out_udfs_begin__ :end-before: __writing_numpy_dict_out_udfs_end__ .. tabbed:: list - Returning ``list`` creates a simple Python object dataset as the transformation result: + Returning ``list`` creates a simple Python object datastream as the transformation result: - .. literalinclude:: ./doc_code/transforming_datasets.py + .. literalinclude:: ./doc_code/transforming_datastreams.py :language: python :start-after: __writing_simple_out_udfs_begin__ :end-before: __writing_simple_out_udfs_end__ -.. _transform_datasets_row_output_types: +.. _transform_datastreams_row_output_types: Row UDF Output Types ==================== The following output types are allowed for per-row UDFs (e.g., -:meth:`ds.map() `): +:meth:`ds.map() `): .. tabbed:: dict - Returning a ``dict`` of Arrow-compatible data types creates a Tabular dataset + Returning a ``dict`` of Arrow-compatible data types creates a Tabular datastream as the transformation result. If any dict values are not Arrow-compatible, then - a simple Python object dataset will be created: + a simple Python object datastream will be created: - .. literalinclude:: ./doc_code/transforming_datasets.py + .. literalinclude:: ./doc_code/transforming_datastreams.py :language: python :start-after: __writing_dict_out_row_udfs_begin__ :end-before: __writing_dict_out_row_udfs_end__ .. tabbed:: np.ndarray - Returning ``np.ndarray`` creates a single-column Tensor dataset as the transformation result: + Returning ``np.ndarray`` creates a single-column Tensor datastream as the transformation result: - .. literalinclude:: ./doc_code/transforming_datasets.py + .. literalinclude:: ./doc_code/transforming_datastreams.py :language: python :start-after: __writing_numpy_out_row_udfs_begin__ :end-before: __writing_numpy_out_row_udfs_end__ .. tabbed:: object - Other return row types will create a simple Python object dataset as the transformation result: + Other return row types will create a simple Python object datastream as the transformation result: - .. literalinclude:: ./doc_code/transforming_datasets.py + .. literalinclude:: ./doc_code/transforming_datastreams.py :language: python :start-after: __writing_simple_out_row_udfs_begin__ :end-before: __writing_simple_out_row_udfs_end__ -.. _transform_datasets_configuring_batch_size: +.. _transform_datastreams_configuring_batch_size: ---------------------- Configuring Batch Size ---------------------- -:meth:`ds.map_batches() ` is the canonical parallel -transformation API for Datasets: it launches parallel tasks over the underlying Datasets +:meth:`ds.map_batches() ` is the canonical parallel +transformation API for Datastreams: it launches parallel tasks over the underlying Datastreams blocks and maps UDFs over data batches within those tasks, allowing the UDF to implement vectorized operations on batches. An important parameter to set is ``batch_size``, which controls the size of the batches provided to the UDF. -.. literalinclude:: ./doc_code/transforming_datasets.py +.. literalinclude:: ./doc_code/transforming_datastreams.py :language: python :start-after: __configuring_batch_size_begin__ :end-before: __configuring_batch_size_end__ @@ -407,19 +401,19 @@ lead to out-of-memory failures. If encountering OOMs, decreasing your ``batch_si help. .. note:: - The default ``batch_size`` of ``4096`` may be too large for datasets with large rows + The default ``batch_size`` of ``4096`` may be too large for datastreams with large rows (e.g. tables with many columns or a collection of large images). -If you specify a ``batch_size`` that's larger than your ``Dataset`` blocks, Datasets +If you specify a ``batch_size`` that's larger than your ``Datastream`` blocks, Datastreams will bundle multiple blocks together for a single task in order to better satisfy -``batch_size``. If ``batch_size`` is a lot larger than your ``Dataset`` blocks (e.g. if -your dataset was created with too large of a ``parallelism`` and/or the ``batch_size`` -is set to too large of a value for your dataset), the number of parallel tasks +``batch_size``. If ``batch_size`` is a lot larger than your ``Datastream`` blocks (e.g. if +your datastream was created with too large of a ``parallelism`` and/or the ``batch_size`` +is set to too large of a value for your datastream), the number of parallel tasks may be less than expected. -If your ``Dataset`` blocks are smaller than your ``batch_size`` and you want to increase -:meth:`ds.map_batches() ` parallelism, decrease your -``batch_size`` to prevent this block bundling. If you think that your ``Dataset`` blocks +If your ``Datastream`` blocks are smaller than your ``batch_size`` and you want to increase +:meth:`ds.map_batches() ` parallelism, decrease your +``batch_size`` to prevent this block bundling. If you think that your ``Datastream`` blocks are too small, try decreasing ``parallelism`` during the read to create larger blocks. .. note:: @@ -434,13 +428,13 @@ are too small, try decreasing ``parallelism`` during the read to create larger b in that task will the same size as the block, and will therefore be smaller than the default ``batch_size``. -.. _transform_datasets_compute_strategy: +.. _transform_datastreams_compute_strategy: ---------------- Compute Strategy ---------------- -Datasets transformations are executed by either :ref:`Ray tasks ` +Datastreams transformations are executed by either :ref:`Ray tasks ` or :ref:`Ray actors ` across a Ray cluster. By default, Ray tasks are used (with ``compute="tasks"``). For transformations that require expensive setup, it's preferrable to use Ray actors, which are stateful and allow setup to be reused @@ -450,12 +444,12 @@ For an autoscaling actor pool, use ``compute=ray.data.ActorPoolStrategy(min_size The following is an example of using the Ray tasks and actors compute strategy for batch inference: -.. literalinclude:: ./doc_code/transforming_datasets.py +.. literalinclude:: ./doc_code/transforming_datastreams.py :language: python - :start-after: __dataset_compute_strategy_begin__ - :end-before: __dataset_compute_strategy_end__ + :start-after: __datastream_compute_strategy_begin__ + :end-before: __datastream_compute_strategy_end__ -.. _datasets-groupbys: +.. _data-groupbys: -------------------------- Group-bys and aggregations @@ -467,16 +461,16 @@ aggregation has been computed. .. code-block:: python - ds: ray.data.Dataset = ray.data.from_items([ + ds: ray.data.Datastream = ray.data.from_items([ {"A": x % 3, "B": 2 * x, "C": 3 * x} for x in range(10)]) # Group by the A column and calculate the per-group mean for B and C columns. - agg_ds: ray.data.Dataset = ds.groupby("A").mean(["B", "C"]).materialize() + agg_ds: ray.data.Datastream = ds.groupby("A").mean(["B", "C"]).materialize() # -> Sort Sample: 100%|███████████████████████████████████████| 10/10 [00:01<00:00, 9.04it/s] # -> GroupBy Map: 100%|███████████████████████████████████████| 10/10 [00:00<00:00, 23.66it/s] # -> GroupBy Reduce: 100%|████████████████████████████████████| 10/10 [00:00<00:00, 937.21it/s] - # -> Dataset(num_blocks=10, num_rows=3, schema={}) + # -> Datastream(num_blocks=10, num_rows=3, schema={}) agg_ds.to_pandas() # -> # A mean(B) mean(C) @@ -503,7 +497,7 @@ aggregation has been computed. # -> GroupBy Reduce: 100%|████████████████████████████████████| 1/1 [00:00<00:00, 133.51it/s] # -> {'mean(A)': 0.9, 'std(A)': 0.8306623862918076, 'mean(B)': 9.0, 'std(B)': 5.744562646538029} -Combine aggreations with batch mapping to transform datasets using computed statistics. +Combine aggreations with batch mapping to transform datastreams using computed statistics. For example, you can efficiently standardize feature columns and impute missing values with calculated column means. @@ -521,7 +515,7 @@ with calculated column means. ds = ds.map_batches(impute_b, batch_format="pandas") # -> MapBatches(impute_b) - # +- Dataset(num_blocks=10, num_rows=10, schema={A: int64, B: int64, C: int64}) + # +- Datastream(num_blocks=10, num_rows=10, schema={A: int64, B: int64, C: int64}) # Standard scaling of all feature columns. stats = ds.aggregate(Mean("B"), Std("B"), Mean("C"), Std("C")) @@ -543,20 +537,20 @@ with calculated column means. ds = ds.map_batches(batch_standard_scaler, batch_format="pandas") ds.materialize() # -> Map Progress: 100%|██████████████████████████████████████| 10/10 [00:00<00:00, 144.79it/s] - # -> Dataset(num_blocks=10, num_rows=10, schema={A: int64, B: double, C: double}) + # -> Datastream(num_blocks=10, num_rows=10, schema={A: int64, B: double, C: double}) -------------- Shuffling data -------------- -Call :meth:`Dataset.random_shuffle() ` to +Call :meth:`Datastream.random_shuffle() ` to perform a global shuffle. .. doctest:: >>> import ray - >>> dataset = ray.data.range(10) - >>> dataset.random_shuffle().take_all() # doctest: +SKIP + >>> datastream = ray.data.range(10) + >>> datastream.random_shuffle().take_all() # doctest: +SKIP [7, 0, 9, 3, 5, 1, 4, 2, 8, 6] For better performance, perform a local shuffle. Read diff --git a/doc/source/data/user-guide.rst b/doc/source/data/user-guide.rst index 0d38e670ab3b..4888c02c3598 100644 --- a/doc/source/data/user-guide.rst +++ b/doc/source/data/user-guide.rst @@ -1,20 +1,20 @@ -.. _data_user_guide : +.. _data_user_guide: =========== User Guides =========== -If you’re new to Ray Datasets, we recommend starting with the :ref:`Ray Datasets Quick Start `. -This user guide will help you navigate the Ray Datasets project and show you how achieve several tasks. +If you’re new to Ray Data, we recommend starting with the :ref:`Ray Data Quick Start `. +This user guide will help you navigate the Ray Data project and show you how achieve several tasks. .. toctree:: :maxdepth: 2 - creating-datasets - transforming-datasets - consuming-datasets - dataset-tensor-support + creating-datastreams + transforming-datastreams + consuming-datastreams + data-tensor-support custom-datasource - pipelining-compute - dataset-internals + data-internals performance-tips + pipelining-compute diff --git a/doc/source/index.md b/doc/source/index.md index cb33f3860e65..dcb1edc23d9e 100644 --- a/doc/source/index.md +++ b/doc/source/index.md @@ -133,7 +133,7 @@ dataset_transformed = preprocessor.fit_transform(dataset=dataset)
    -``` \ No newline at end of file +``` diff --git a/doc/source/ray-air/api/dataset-ingest.rst b/doc/source/ray-air/api/dataset-ingest.rst index 22dfcdc53062..fe3b350db779 100644 --- a/doc/source/ray-air/api/dataset-ingest.rst +++ b/doc/source/ray-air/api/dataset-ingest.rst @@ -1,9 +1,9 @@ -Ray Dataset Ingest into AIR Trainers -===================================== +Ray Data Ingest into AIR Trainers +================================= .. seealso:: - See this :ref:`AIR Dataset ingest guide ` for usage examples. + See this :ref:`AIR Data ingest guide ` for usage examples. .. currentmodule:: ray diff --git a/doc/source/ray-air/check-ingest.rst b/doc/source/ray-air/check-ingest.rst index f6969103c62e..1716bd341080 100644 --- a/doc/source/ray-air/check-ingest.rst +++ b/doc/source/ray-air/check-ingest.rst @@ -3,7 +3,7 @@ Configuring Training Datasets ============================= -AIR builds its training data pipeline on :ref:`Ray Datasets `, which is a scalable, framework-agnostic data loading and preprocessing library. Datasets enables AIR to seamlessly load data for local and distributed training with Train. +AIR builds its training data pipeline on :ref:`Ray Data `, which is a scalable, framework-agnostic data loading and preprocessing library. Ray Data enables AIR to seamlessly load data for local and distributed training with Train. This page describes how to setup and configure these datasets in Train under different scenarios and scales. @@ -13,7 +13,7 @@ Overview .. _ingest_basics: The following figure illustrates a simple Ray AIR training job that (1) loads parquet data from S3, (2) applies a simple -:ref:`user-defined function ` to preprocess batches of data, and (3) runs an AIR Trainer with the given dataset and preprocessor. +:ref:`user-defined function ` to preprocess batches of data, and (3) runs an AIR Trainer with the given dataset and preprocessor. .. figure:: images/ingest.svg @@ -29,7 +29,7 @@ on the train dataset passed to the Trainer, followed by :py:meth:`prep.transform on remaining datasets. **Training**: Then, AIR passes the preprocessed dataset to Train workers (Ray actors) launched by the Trainer. Each worker calls :func:`~ray.air.session.get_dataset_shard` to get a handle to its assigned data shard. -This returns a :class:`~ray.data.DataIterator`, which can be used to loop over the data with :meth:`~ray.data.DataIterator.iter_batches`, :meth:`~ray.data.Dataset.iter_torch_batches`, or :meth:`~ray.data.Dataset.to_tf`. +This returns a :class:`~ray.data.DataIterator`, which can be used to loop over the data with :meth:`~ray.data.DataIterator.iter_batches`, :meth:`~ray.data.Datastream.iter_torch_batches`, or :meth:`~ray.data.Datastream.to_tf`. Each of these returns a batch iterator for one epoch (a full pass over the original dataset). Getting Started @@ -213,7 +213,7 @@ By default, only the `"train"` dataset is split. All the other Datasets are not However, you may want to split a large validation dataset example to also do data parallel validation. This example shows overriding the split config for the "valid" and "test" datasets. This means that -both the valid and test datasets here will be :py:meth:`.split() ` across the training workers. +both the valid and test datasets here will be :py:meth:`.split() ` across the training workers. .. literalinclude:: doc_code/air_ingest.py :language: python diff --git a/doc/source/ray-air/computer-vision.rst b/doc/source/ray-air/computer-vision.rst index ea35dcf4438c..2fef456f0ee1 100644 --- a/doc/source/ray-air/computer-vision.rst +++ b/doc/source/ray-air/computer-vision.rst @@ -36,7 +36,7 @@ Reading image data :end-before: __read_images1_stop__ :dedent: - Then, apply a :ref:`user-defined function ` to + Then, apply a :ref:`user-defined function ` to encode the class names as integer targets. .. literalinclude:: ./doc_code/computer_vision.py @@ -96,7 +96,7 @@ Reading image data :end-before: __read_tfrecords1_stop__ :dedent: - Then, apply a :ref:`user-defined function ` to + Then, apply a :ref:`user-defined function ` to decode the raw image bytes. .. literalinclude:: ./doc_code/computer_vision.py @@ -114,7 +114,7 @@ Reading image data :dedent: -For more information on creating datasets, see :ref:`Creating Datasets `. +For more information on creating datastreams, see :ref:`Creating Datastreams `. Transforming images @@ -153,7 +153,7 @@ standard way to preprocess data with Ray. For more information on transforming data, see :ref:`Using Preprocessors ` and -:ref:`Transforming Datasets `. +:ref:`Transforming Datastreams `. Training vision models ---------------------- diff --git a/doc/source/ray-air/examples/analyze_tuning_results.ipynb b/doc/source/ray-air/examples/analyze_tuning_results.ipynb index 038b9c77f9d3..9eab291552f1 100644 --- a/doc/source/ray-air/examples/analyze_tuning_results.ipynb +++ b/doc/source/ray-air/examples/analyze_tuning_results.ipynb @@ -21,7 +21,7 @@ "id": "41abda7b", "metadata": {}, "source": [ - "We'll use the [Covertype dataset](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.fetch_covtype.html#sklearn-datasets-fetch-covtype) provided from sklearn to train a multiclass classification task using XGBoost.\n", + "We'll use the [Covertype dataset](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.fetch_covtype.html#sklearn-data-fetch-covtype) provided from sklearn to train a multiclass classification task using XGBoost.\n", "\n", "In this dataset, we try to predict the forst cover type (e.g. \"lodgehole pine\") from cartographic variables, like the distance to the closest road, or the hillshade at different times of the day. The features are binary, discrete and continuous and thus well suited for a decision-tree based classification task.\n", "\n", @@ -82,7 +82,7 @@ "id": "a93b242c", "metadata": {}, "source": [ - "We'll define a utility function to create a Ray Datastream from the Sklearn dataset. We expect the target column to be in the dataframe, so we'll add it to the dataframe manually." + "We'll define a utility function to create a Datastream from the Sklearn dataset. We expect the target column to be in the dataframe, so we'll add it to the dataframe manually." ] }, { diff --git a/doc/source/ray-air/examples/convert_existing_pytorch_code_to_ray_air.ipynb b/doc/source/ray-air/examples/convert_existing_pytorch_code_to_ray_air.ipynb index 0919cb625690..4ead1c2fc71b 100644 --- a/doc/source/ray-air/examples/convert_existing_pytorch_code_to_ray_air.ipynb +++ b/doc/source/ray-air/examples/convert_existing_pytorch_code_to_ray_air.ipynb @@ -1079,7 +1079,7 @@ "id": "ad556eeb", "metadata": {}, "source": [ - "Batch predictors work with Ray Datastreams. Here we convert our test dataset into a Ray Datastream - note that this is not very efficient, and you can look at our {ref}`other tutorials ` to see more efficient ways to generate a Ray Datastream." + "Batch predictors work with Ray Data. Here we convert our test dataset into a Datastream - note that this is not very efficient, and you can look at our {ref}`other tutorials ` to see more efficient ways to generate a Datastream." ] }, { @@ -1125,7 +1125,7 @@ "id": "41094a55", "metadata": {}, "source": [ - "`results` is another Ray Datastream. We can use `results.show()` to see our prediction results:" + "`results` is another Datastream. We can use `results.show()` to see our prediction results:" ] }, { @@ -1295,7 +1295,7 @@ "- save and retrieve model checkpoints via Ray AIR\n", "- load a model for batch prediction\n", "\n", - "In our {ref}`other examples ` you can learn how to do more things with the Ray AIR API, such as **serving your model with Ray Serve** or **tune your hyperparameters with Ray Tune.** You can also learn how to **construct Ray Datasets** to leverage Ray AIR's **preprocessing** API.\n", + "In our {ref}`other examples ` you can learn how to do more things with the Ray AIR API, such as **serving your model with Ray Serve** or **tune your hyperparameters with Ray Tune.** You can also learn how to **construct Ray Data** to leverage Ray AIR's **preprocessing** API.\n", "\n", "We hope this tutorial gave you a good starting point to leverage Ray AIR. If you have any questions, suggestions, or run into any problems pelase reach out on [Discuss](https://discuss.ray.io/) or [GitHub](https://github.com/ray-project/ray)!" ] diff --git a/doc/source/ray-air/examples/convert_existing_tf_code_to_ray_air.ipynb b/doc/source/ray-air/examples/convert_existing_tf_code_to_ray_air.ipynb index cf31b16e20ff..4bc43970e9ac 100644 --- a/doc/source/ray-air/examples/convert_existing_tf_code_to_ray_air.ipynb +++ b/doc/source/ray-air/examples/convert_existing_tf_code_to_ray_air.ipynb @@ -705,7 +705,7 @@ "id": "fd72830b", "metadata": {}, "source": [ - "Batch predictors work with [Ray Datasets](datasets). Here, we create a {class}`Dataset ` of images from our test set." + "Batch predictors work with [Ray Data](data). Here, we create a {class}`Datastream ` of images from our test set." ] }, { @@ -723,7 +723,7 @@ "id": "6ab1b08a", "metadata": {}, "source": [ - "Let's run {meth}`BatchPredictor.predict ` on our Ray Dataset. This will distribute the prediction across a specified number of workers!" + "Let's run {meth}`BatchPredictor.predict ` on our Datastream. This will distribute the prediction across a specified number of workers!" ] }, { @@ -741,7 +741,7 @@ "id": "9ccadf89", "metadata": {}, "source": [ - "`predict_results` is also a Ray Dataset, and we can take a look at the predictions inside:" + "`predict_results` is also a Datastream, and we can take a look at the predictions inside:" ] }, { @@ -826,7 +826,7 @@ "- save and retrieve model checkpoints via Ray AIR\n", "- load a model for batch prediction\n", "\n", - "In our [other examples](air-examples-ref) you can learn how to do more things with the Ray AIR API, such as **serving your model with Ray Serve** or **tune your hyperparameters with Ray Tune**. You can also learn how to **construct Ray Datasets** to leverage Ray AIR’s **preprocessing** API.\n", + "In our [other examples](air-examples-ref) you can learn how to do more things with the Ray AIR API, such as **serving your model with Ray Serve** or **tune your hyperparameters with Ray Tune**. You can also learn how to **construct Ray Data** to leverage Ray AIR’s **preprocessing** API.\n", "\n", "See [this table](train-framework-catalog) for a full catalog of frameworks that AIR supports out of the box.\n", "\n", diff --git a/doc/source/ray-air/examples/feast_example.ipynb b/doc/source/ray-air/examples/feast_example.ipynb index 25462ab455ea..3631138f01ea 100644 --- a/doc/source/ray-air/examples/feast_example.ipynb +++ b/doc/source/ray-air/examples/feast_example.ipynb @@ -1062,7 +1062,7 @@ "source": [ "## Define Preprocessors\n", "\n", - "[Preprocessor](https://docs.ray.io/en/latest/ray-air/getting-started.html#preprocessors) does last mile processing on Ray Datastreams before feeding into training model." + "[Preprocessor](https://docs.ray.io/en/latest/ray-air/getting-started.html#preprocessors) does last mile processing on Ray Data before feeding into training model." ] }, { diff --git a/doc/source/ray-air/examples/gptj_batch_prediction.ipynb b/doc/source/ray-air/examples/gptj_batch_prediction.ipynb index 64b3b27fd888..3ddc7342af02 100644 --- a/doc/source/ray-air/examples/gptj_batch_prediction.ipynb +++ b/doc/source/ray-air/examples/gptj_batch_prediction.ipynb @@ -95,7 +95,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Since we will be using a pretrained model from Hugging Face hub, the simplest way is to use {meth}`map_batches ` with a [callable class UDF](transform_datasets_callable_classes). This will allow us to save time by initializing a model just once and then feed it multiple batches of data." + "Since we will be using a pretrained model from Hugging Face hub, the simplest way is to use {meth}`map_batches ` with a [callable class UDF](transform_datastreams_callable_classes). This will allow us to save time by initializing a model just once and then feed it multiple batches of data." ] }, { diff --git a/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb b/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb index 3a7f92378434..adb429bcb89a 100644 --- a/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb +++ b/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb @@ -308,7 +308,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We will use [Ray Data](datasets) for distributed preprocessing and data ingestion. We can easily convert the dataset obtained from Hugging Face Hub to Ray Data by using {meth}`ray.data.from_huggingface`." + "We will use [Ray Data](data) for distributed preprocessing and data ingestion. We can easily convert the dataset obtained from Hugging Face Hub to Ray Data by using {meth}`ray.data.from_huggingface`." ] }, { diff --git a/doc/source/ray-air/examples/huggingface_text_classification.ipynb b/doc/source/ray-air/examples/huggingface_text_classification.ipynb index 59d25e4a5164..6031e7683c0a 100644 --- a/doc/source/ray-air/examples/huggingface_text_classification.ipynb +++ b/doc/source/ray-air/examples/huggingface_text_classification.ipynb @@ -433,7 +433,7 @@ "id": "256fOuzjhYbY" }, "source": [ - "For Ray AIR, instead of using 🤗 Dataset objects directly, we will convert them to [Ray Datastreams](https://docs.ray.io/en/latest/data/dataset.html). Both are backed by Arrow tables, so the conversion is straightforward. We will use the built-in `ray.data.from_huggingface` function." + "For Ray AIR, instead of using 🤗 Dataset objects directly, we will convert them to [Ray Data](https://docs.ray.io/en/latest/data/data.html). Both are backed by Arrow tables, so the conversion is straightforward. We will use the built-in `ray.data.from_huggingface` function." ] }, { diff --git a/doc/source/ray-air/examples/pytorch_resnet_batch_prediction.ipynb b/doc/source/ray-air/examples/pytorch_resnet_batch_prediction.ipynb index c30cfc3548d2..92c3152024c5 100644 --- a/doc/source/ray-air/examples/pytorch_resnet_batch_prediction.ipynb +++ b/doc/source/ray-air/examples/pytorch_resnet_batch_prediction.ipynb @@ -304,7 +304,7 @@ "source": [ "## Build a BatchPredictor\n", "\n", - "Now that we have our dataset loaded and preprocessed with [Ray Data](datasets), we're ready to construct our {class}`BatchPredictor `! A {class}`BatchPredictor ` takes a checkpoint and a predictor class (e.g., {class}`~ray.train.torch.TorchPredictor`, {class}`~ray.train.tensorflow.TensorflowPredictor`) and provides an interface to run batch prediction on Ray {class}`~ray.data.Datastream`s. It will distribute the inference workload across multiple workers when calling `predict()` and run prediction on multiple shards of data in parallel. You can find more details in [Using Predictors for Inference](air-predictors).\n", + "Now that we have our dataset loaded and preprocessed with [Ray Data](data), we're ready to construct our {class}`BatchPredictor `! A {class}`BatchPredictor ` takes a checkpoint and a predictor class (e.g., {class}`~ray.train.torch.TorchPredictor`, {class}`~ray.train.tensorflow.TensorflowPredictor`) and provides an interface to run batch prediction on Ray {class}`~ray.data.Datastream`s. It will distribute the inference workload across multiple workers when calling `predict()` and run prediction on multiple shards of data in parallel. You can find more details in [Using Predictors for Inference](air-predictors).\n", "\n", "For the demo, we'll directly load a pretrained ResNet model from `torchvision.models` and construct a {class}`~ray.train.torch.TorchCheckpoint` which includes the preprocessor. You can also load your own Ray AIR checkpoint from your previous Train/Tune experiments. You can find more details about checkpoint loading at the [AIR `Checkpoint` API reference](air-checkpoint-ref)." ] @@ -391,7 +391,7 @@ "source": [ "## Evaluating Prediction Accuracy\n", "\n", - "`BatchPredictor.predict()` will return a Ray Datastream with a column of model output with key `\"predictions\"`, and all columns specified in `keep_columns`.\n", + "`BatchPredictor.predict()` will return a Datastream with a column of model output with key `\"predictions\"`, and all columns specified in `keep_columns`.\n", "\n", "In this example, the output of the ResNet model is a 1000-dimensional tensor containing the logits of each class. We'll measure accuracy with Top-1 and Top-5 accuracy.\n", "(Top-N accuracy: The percentage of predictions where the true label falls in the top N predicted classes.)" diff --git a/doc/source/ray-air/examples/pytorch_tabular_starter.py b/doc/source/ray-air/examples/pytorch_tabular_starter.py index 1e0c4e9d780c..4df2d99631a7 100644 --- a/doc/source/ray-air/examples/pytorch_tabular_starter.py +++ b/doc/source/ray-air/examples/pytorch_tabular_starter.py @@ -54,7 +54,7 @@ def train_loop_per_worker(config): epochs = config["num_epochs"] num_features = config["num_features"] - # Get the Ray Datastream shard for this data parallel worker, + # Get the Datastream shard for this data parallel worker, # and convert it to a PyTorch Dataset. train_data = session.get_dataset_shard("train") # Create model. diff --git a/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb b/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb index efa12042a183..e22ef72a4f46 100644 --- a/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb +++ b/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb @@ -89,7 +89,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Since we will be using a pretrained model from Hugging Face hub, the simplest way is to use {meth}`map_batches ` with a [callable class UDF](transform_datasets_callable_classes). This will allow us to save time by initializing a model just once and then feed it multiple batches of data." + "Since we will be using a pretrained model from Hugging Face hub, the simplest way is to use {meth}`map_batches ` with a [callable class UDF](transform_datastreams_callable_classes). This will allow us to save time by initializing a model just once and then feed it multiple batches of data." ] }, { diff --git a/doc/source/ray-air/examples/tf_tabular_starter.py b/doc/source/ray-air/examples/tf_tabular_starter.py index ed339626b9b7..aa83346a76c0 100644 --- a/doc/source/ray-air/examples/tf_tabular_starter.py +++ b/doc/source/ray-air/examples/tf_tabular_starter.py @@ -55,8 +55,8 @@ def train_loop_per_worker(config): epochs = config["num_epochs"] num_features = config["num_features"] - # Get the Ray Datastream shard for this data parallel worker, - # and convert it to a Tensorflow Datastream. + # Get the Datastream shard for this data parallel worker, + # and convert it to a Tensorflow Dataset. train_data = session.get_dataset_shard("train") strategy = tf.distribute.MultiWorkerMirroredStrategy() diff --git a/doc/source/ray-air/examples/tfx_tabular_train_to_serve.ipynb b/doc/source/ray-air/examples/tfx_tabular_train_to_serve.ipynb index 20ee25f6b8d4..1e7dab24f552 100644 --- a/doc/source/ray-air/examples/tfx_tabular_train_to_serve.ipynb +++ b/doc/source/ray-air/examples/tfx_tabular_train_to_serve.ipynb @@ -14,7 +14,7 @@ "In this example, we showcase how to achieve the same tasks as the Keras Tutorial using [Ray AIR](https://docs.ray.io/en/latest/ray-air/getting-started.html), covering\n", "every step from data ingestion to pushing a model to serving.\n", "\n", - "1. Read a CSV into [Ray Datastream](https://docs.ray.io/en/latest/data/dataset.html).\n", + "1. Read a CSV into [Datastream](https://docs.ray.io/en/latest/data/data.html).\n", "2. Process the dataset by chaining [Ray AIR preprocessors](https://docs.ray.io/en/latest/ray-air/getting-started.html#preprocessors).\n", "3. Train the model using the TensorflowTrainer from AIR.\n", "4. Serve the model using Ray Serve and the above preprocessors." @@ -445,7 +445,7 @@ " Returns:\n", " A tuple containing train dataset, test data and test label.\n", " \"\"\"\n", - " # There is a native offering in Ray Datastream for split as well.\n", + " # There is a native offering in Datastream for split as well.\n", " # However, supporting stratification is a TODO there. So use\n", " # scikit-learn equivalent here.\n", " train_data, test_data = train_test_split(\n", diff --git a/doc/source/ray-air/examples/torch_detection.ipynb b/doc/source/ray-air/examples/torch_detection.ipynb index 31a9f4748184..0b95b2a483cb 100644 --- a/doc/source/ray-air/examples/torch_detection.ipynb +++ b/doc/source/ray-air/examples/torch_detection.ipynb @@ -229,11 +229,11 @@ "\n", "```\n", "\n", - "[Ray Datasets](datasets) lets you read and preprocess data in parallel. Datasets doesn't\n", + "[Ray Data](data) lets you read and preprocess data in parallel. Ray Data doesn't\n", "have built-in support for VOC-style annotations, so you'll need to define a custom\n", "datasource.\n", "\n", - "A Datasource is an object that reads data of a particular type. For example, Datasets\n", + "A Datasource is an object that reads data of a particular type. For example, Ray Data\n", "implements a Datasource that reads CSV files. Your datasource will parse labels and\n", "bounding boxes from XML files. Later, you'll read the corresponding images.\n", "\n", @@ -749,7 +749,7 @@ "Stage 0: 100%|██████████| 1/1 [00:03<00:00, 3.96s/it]2023-03-01 13:07:29,436\tINFO bulk_executor.py:41 -- Executing DAG InputDataBuffer[Input] -> TaskPoolMapOperator[TorchVisionPreprocessor]\n", "(PipelineSplitExecutorCoordinator pid=191352) \n", "Stage 0: : 2it [00:08, 4.31s/it] 2023-03-01 13:07:33,990\tINFO bulk_executor.py:41 -- Executing DAG InputDataBuffer[Input] -> TaskPoolMapOperator[TorchVisionPreprocessor]\n", - "(RayTrainWorker pid=175612) 2023-03-01 13:07:34,394\tWARNING plan.py:527 -- Warning: The Ray cluster currently does not have any available CPUs. The Dataset job will hang unless more CPUs are freed up. A common reason is that cluster resources are used by Actors or Tune trials; see the following link for more details: https://docs.ray.io/en/master/data/dataset-internals.html#datasets-and-tune\n", + "(RayTrainWorker pid=175612) 2023-03-01 13:07:34,394\tWARNING plan.py:527 -- Warning: The Ray cluster currently does not have any available CPUs. The Dataset job will hang unless more CPUs are freed up. A common reason is that cluster resources are used by Actors or Tune trials; see the following link for more details: https://docs.ray.io/en/master/data/dataset-internals.html#data-and-tune\n", "(PipelineSplitExecutorCoordinator pid=191352) \n", "Stage 0: : 3it [00:13, 4.48s/it]2023-03-01 13:07:38,660\tINFO bulk_executor.py:41 -- Executing DAG InputDataBuffer[Input] -> TaskPoolMapOperator[TorchVisionPreprocessor]\n", "(RayTrainWorker pid=175612) /tmp/ipykernel_160001/3839218723.py:23: UserWarning: The given NumPy array is not writable, and PyTorch does not support non-writable tensors. This means writing to this tensor will result in undefined behavior. You may want to copy the array to protect its data or make it writable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at ../torch/csrc/utils/tensor_numpy.cpp:199.)\n", @@ -800,7 +800,7 @@ "(RayTrainWorker pid=175612) 2023-03-01 13:07:41,980\tINFO distributed.py:1027 -- Reducer buckets have been rebuilt in this iteration.\n", "(PipelineSplitExecutorCoordinator pid=191352) \n", "Stage 0: : 4it [01:11, 25.77s/it]2023-03-01 13:08:37,068\tINFO bulk_executor.py:41 -- Executing DAG InputDataBuffer[Input] -> TaskPoolMapOperator[TorchVisionPreprocessor]\n", - "(RayTrainWorker pid=175614) 2023-03-01 13:08:37,464\tWARNING plan.py:527 -- Warning: The Ray cluster currently does not have any available CPUs. The Dataset job will hang unless more CPUs are freed up. A common reason is that cluster resources are used by Actors or Tune trials; see the following link for more details: https://docs.ray.io/en/master/data/dataset-internals.html#datasets-and-tune\n", + "(RayTrainWorker pid=175614) 2023-03-01 13:08:37,464\tWARNING plan.py:527 -- Warning: The Ray cluster currently does not have any available CPUs. The Dataset job will hang unless more CPUs are freed up. A common reason is that cluster resources are used by Actors or Tune trials; see the following link for more details: https://docs.ray.io/en/master/data/dataset-internals.html#data-and-tune\n", "2023-03-01 13:08:45,074\tINFO tune.py:825 -- Total run time: 125.51 seconds (125.36 seconds for the tuning loop).\n" ] } diff --git a/doc/source/ray-air/examples/torch_image_example.ipynb b/doc/source/ray-air/examples/torch_image_example.ipynb index d43dda4fcd7e..1e53e1e857d7 100644 --- a/doc/source/ray-air/examples/torch_image_example.ipynb +++ b/doc/source/ray-air/examples/torch_image_example.ipynb @@ -53,7 +53,7 @@ "\n", "We'll train our classifier on a popular image dataset called [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html).\n", "\n", - "First, let's load CIFAR-10 into a Ray Datastream." + "First, let's load CIFAR-10 into a Datastream." ] }, { diff --git a/doc/source/ray-air/examples/torch_incremental_learning.ipynb b/doc/source/ray-air/examples/torch_incremental_learning.ipynb index 038059889856..8589bd9f37ec 100644 --- a/doc/source/ray-air/examples/torch_incremental_learning.ipynb +++ b/doc/source/ray-air/examples/torch_incremental_learning.ipynb @@ -46,9 +46,9 @@ }, "source": [ "This example will cover the following:\n", - "1. Loading a PyTorch Dataset to Ray Datastreams\n", - "2. Create an `Iterator[ray.data.Datastreams]` abstraction to represent a stream of data to train on for incremental training.\n", - "3. Implement a custom Ray AIR preprocessor to preprocess the Datastream.\n", + "1. Loading a PyTorch Dataset to Ray Data\n", + "2. Create an `Iterator[ray.data.Datastream]` abstraction to represent a stream of data to train on for incremental training.\n", + "3. Implement a custom Ray AIR preprocessor to preprocess the dataset.\n", "4. Incrementally train a model using data parallel training.\n", "5. Use our trained model to perform batch prediction on test data.\n", "6. Incrementally deploying our trained model with Ray Serve and performing online prediction queries." @@ -242,19 +242,19 @@ "id": "3SVSrkqrDJuc" }, "source": [ - "## 3a: Load MNIST Dataset to a Ray Datastream\n", + "## 3a: Load MNIST Dataset to a Datastream\n", "\n", - "Let's first define a simple function that will return the original MNIST Dataset as a distributed Ray Datastream. Ray Datastreams are the standard way to load and exchange data in Ray libraries and applications, read more about them [here](https://docs.ray.io/en/latest/data/dataset.html)!\n", + "Let's first define a simple function that will return the original MNIST Dataset as a distributed Datastream. Ray Data is the standard way to load and exchange data in Ray libraries and applications, read more about them [here](https://docs.ray.io/en/latest/data/data.html)!\n", "\n", "The function in the below code snippet does the following:\n", "1. Downloads the MNIST Dataset from torchvision in-memory\n", - "2. Loads the in-memory Torch Dataset into a Ray Datastream\n", - "3. Converts the Ray Datastream into Numpy format. Instead of the Ray Datastream iterating over tuples, it will have 2 columns: \"image\" & \"label\". \n", - "This will allow us to apply built-in preprocessors to the Ray Datastream and allow Ray Datastreams to be used with Ray AIR Predictors.\n", + "2. Loads the in-memory Torch Dataset into a Datastream\n", + "3. Converts the Datastream into Numpy format. Instead of the Datastream iterating over tuples, it will have 2 columns: \"image\" & \"label\". \n", + "This will allow us to apply built-in preprocessors to the Datastream and allow Datastreams to be used with Ray AIR Predictors.\n", "\n", "For this example, since we are just working with MNIST dataset, which is small, we use the {py:class}`~ray.data.datasource.from_torch` which just loads the full MNIST dataset into memory.\n", "\n", - "For loading larger datasets in a parallel fashion, you should use [Ray Datastream's additional read APIs](https://docs.ray.io/en/master/data/dataset.html#supported-input-formats) to load data from parquet, csv, image files, and more!" + "For loading larger datasets in a parallel fashion, you should use [Datastream's additional read APIs](https://docs.ray.io/en/master/data/data.html#supported-input-formats) to load data from parquet, csv, image files, and more!" ] }, { @@ -308,7 +308,7 @@ "\n", "Now we can create our \"stream\" abstraction. This abstraction provides two\n", "methods (`generate_train_stream` and `generate_test_stream`) that each returns an Iterator\n", - "over Ray Datasets. Each item in this iterator contains a unique permutation of\n", + "over Ray Data. Each item in this iterator contains a unique permutation of\n", "MNIST, and is one task that we want to train on.\n", "\n", "In this example, \"the stream of tasks\" is contrived since all the data for all tasks exist already in an offline setting. For true online continual learning, you would want to implement a custom dataset iterator that reads from some stream datasource to produce new tasks. The only abstraction that's needed is `Iterator[ray.data.Datastream]`.\n", @@ -401,7 +401,7 @@ "source": [ "# Step 4: Define the logic for Training and Inference/Prediction\n", "\n", - "Now that we can get an Iterator over Ray Datasets, we can incrementally train our model in a data parallel fashion via Ray Train, while incrementally deploying our model via Ray Serve. Let's define some helper functions to allow us to do this!\n", + "Now that we can get an Iterator over Ray Data, we can incrementally train our model in a data parallel fashion via Ray Train, while incrementally deploying our model via Ray Serve. Let's define some helper functions to allow us to do this!\n", "\n", "If you are not familiar with data parallel training, it is a form of distributed training strategies, where we have multiple model replicas, and each replica trains on a different batch of data. After each batch, the gradients are synchronized across the replicas. This effecitively allows us to train on more data in a shorter amount of time." ] @@ -421,7 +421,7 @@ "\n", "This is just standard PyTorch training, with the difference being that we can leverage [Ray Train's utility functions](train-pytorch-integration) and [Ray AIR Sesssion](air-session-ref):\n", "- `ray.train.torch.prepare_model(...)`: This will prepare the model for distributed training by wrapping it in either PyTorch `DistributedDataParallel` or `FullyShardedDataParallel` and moving it to the correct accelerator device.\n", - "- `ray.air.session.get_dataset_shard(...)`: This will get the Ray Dataset shard for this particular Data Parallel worker.\n", + "- `ray.air.session.get_dataset_shard(...)`: This will get the Datastream shard for this particular Data Parallel worker.\n", "- `ray.air.session.report({}, checkpoint=...)`: This will tell Ray Train to persist the provided `Checkpoint` object.\n", "- `ray.air.session.get_checkpoint()`: Returns a checkpoint to resume from. This is useful for either fault tolerance purposes, or for our purposes, to continue training the same model on a new incoming dataset." ] @@ -459,7 +459,7 @@ " optimizer = SGD(model.parameters(), lr=learning_rate, momentum=momentum)\n", " criterion = CrossEntropyLoss()\n", "\n", - " # Get the Ray Dataset shard for this data parallel worker, and convert it to a PyTorch Dataset.\n", + " # Get the Datastream shard for this data parallel worker, and convert it to a PyTorch Dataset.\n", " dataset_shard = session.get_dataset_shard(\"train\").iter_torch_batches(\n", " batch_size=batch_size,\n", " )\n", @@ -1393,7 +1393,7 @@ " # **************Batch Prediction**************************\n", "\n", " # We can do batch prediction on the test data for the tasks seen so far.\n", - " # TODO: Fix type signature in Ray Datasets\n", + " # TODO: Fix type signature in Ray Data\n", " # TODO: Fix dataset.union when used with empty list.\n", " if len(all_test_datasets_seen_so_far) > 0:\n", " full_test_dataset = test_dataset.union(*all_test_datasets_seen_so_far)\n", @@ -1480,7 +1480,7 @@ "id": "RNHsEVBHc0p2" }, "source": [ - "Let's first combine all of our datasets for each task into a single, unified Dataset" + "Let's first combine all of our datasets for each task into a single, unified dataset" ] }, { @@ -1525,7 +1525,7 @@ "id": "tJ6Oqdgvc5dn" }, "source": [ - "Then, we train a new model on the unified Dataset using the same configurations as before." + "Then, we train a new model on the unified dataset using the same configurations as before." ] }, { diff --git a/doc/source/ray-air/examples/upload_to_comet_ml.ipynb b/doc/source/ray-air/examples/upload_to_comet_ml.ipynb index eaa4c8f39b4e..b2d2454abefd 100644 --- a/doc/source/ray-air/examples/upload_to_comet_ml.ipynb +++ b/doc/source/ray-air/examples/upload_to_comet_ml.ipynb @@ -57,7 +57,7 @@ "id": "29fcd93b", "metadata": {}, "source": [ - "We define a simple function that returns our training dataset as a Ray Datastream:" + "We define a simple function that returns our training dataset as a Datastream:" ] }, { diff --git a/doc/source/ray-air/examples/upload_to_wandb.ipynb b/doc/source/ray-air/examples/upload_to_wandb.ipynb index 8079de127b5c..7b62ff1168eb 100644 --- a/doc/source/ray-air/examples/upload_to_wandb.ipynb +++ b/doc/source/ray-air/examples/upload_to_wandb.ipynb @@ -63,7 +63,7 @@ "id": "2efa1564", "metadata": {}, "source": [ - "We define a simple function that returns our training dataset as a Ray Datastream:\n" + "We define a simple function that returns our training dataset as a Datastream:\n" ] }, { diff --git a/doc/source/ray-air/key-concepts.rst b/doc/source/ray-air/key-concepts.rst index 7058ca49455e..5f5165342901 100644 --- a/doc/source/ray-air/key-concepts.rst +++ b/doc/source/ray-air/key-concepts.rst @@ -9,16 +9,16 @@ Here, we cover the main concepts in AIR. :local: -Datasets --------- +Datastreams +----------- -:ref:`Ray Datasets ` are the standard way to load and exchange data in Ray AIR. In AIR, Datasets are used extensively for data loading, preprocessing, and batch inference. +:ref:`Ray Data ` is the standard way to load and exchange data in Ray AIR. It provides a `Datastream ` concept which is used extensively for data loading, preprocessing, and batch inference. Preprocessors ------------- -Preprocessors are primitives that can be used to transform input data into features. Preprocessors operate on :ref:`Datasets `, which makes them scalable and compatible with a variety of datasources and dataframe libraries. +Preprocessors are primitives that can be used to transform input data into features. Preprocessors operate on :ref:`Datastreams `, which makes them scalable and compatible with a variety of datasources and dataframe libraries. A Preprocessor is fitted during Training, and applied at runtime in both Training and Serving on data batches in the same way. AIR comes with a collection of built-in preprocessors, and you can also define your own with simple templates. @@ -33,7 +33,7 @@ See the documentation on :ref:`Preprocessors `. Trainers -------- -Trainers are wrapper classes around third-party training frameworks such as XGBoost and Pytorch. They are built to help integrate with core Ray actors (for distribution), Ray Tune, and Ray Datasets. +Trainers are wrapper classes around third-party training frameworks such as XGBoost and Pytorch. They are built to help integrate with core Ray actors (for distribution), Ray Tune, and Ray Data. See the documentation on :ref:`Trainers `. diff --git a/doc/source/ray-air/predictors.rst b/doc/source/ray-air/predictors.rst index 994eee76287b..e281e18554d3 100644 --- a/doc/source/ray-air/predictors.rst +++ b/doc/source/ray-air/predictors.rst @@ -404,7 +404,7 @@ Implement `_predict_numpy` or `_predict_pandas` batch of NumPy data. It accepts a ``np.ndarray`` or ``dict[str, np.ndarray]`` as input and returns a ``np.ndarray`` or ``dict[str, np.ndarray]`` as output. - The input type is determined by the type of :class:`~ray.data.Dataset` passed to + The input type is determined by the type of :class:`~ray.data.Datastream` passed to :meth:`BatchPredictor.predict `. If your dataset has columns, the input is a ``dict``; otherwise, the input is a ``np.ndarray``. @@ -445,7 +445,7 @@ Perform inference You can also use any of the out-of-the-box preprocessors instead of implementing your own: :ref:`air-preprocessor-ref`. 2. Create a :class:`~ray.train.batch_predictor.BatchPredictor` from your checkpoint. - 3. Read sample images into a :class:`~ray.data.Dataset`. + 3. Read sample images into a :class:`~ray.data.Datastream`. 4. Call :class:`~ray.train.batch_predictor.BatchPredictor.predict` to classify the images in the dataset. @@ -461,7 +461,7 @@ Perform inference 1. Create a :class:`~ray.train.batch_predictor.BatchPredictor` from your checkpoint. - 2. Read the Guerry dataset into a :class:`~ray.data.Dataset`. + 2. Read the Guerry dataset into a :class:`~ray.data.Datastream`. 3. Call :class:`~ray.train.batch_predictor.BatchPredictor.predict` to perform regression on the samples in the dataset. diff --git a/doc/source/ray-air/preprocessors.rst b/doc/source/ray-air/preprocessors.rst index 8b562e7a1525..bc9bcad8b37c 100644 --- a/doc/source/ray-air/preprocessors.rst +++ b/doc/source/ray-air/preprocessors.rst @@ -15,7 +15,7 @@ Ray AIR provides several common preprocessors out of the box and interfaces to d Overview -------- -The most common way of using a preprocessor is by passing it as an argument to the constructor of a :ref:`Trainer ` in conjunction with a :ref:`Ray Dataset `. +The most common way of using a preprocessor is by passing it as an argument to the constructor of a :ref:`Trainer ` in conjunction with a :ref:`Ray Data `. For example, the following code trains a model with a preprocessor that normalizes the data. .. literalinclude:: doc_code/preprocessors.py @@ -25,7 +25,7 @@ For example, the following code trains a model with a preprocessor that normaliz The ``Preprocessor`` class with four public methods that can we used separately from a trainer: -#. ``fit()``: Compute state information about a :class:`Dataset ` (e.g., the mean or standard deviation of a column) +#. ``fit()``: Compute state information about a :class:`Dataset ` (e.g., the mean or standard deviation of a column) and save it to the ``Preprocessor``. This information is used to perform ``transform()``, and the method is typically called on a training dataset. #. ``transform()``: Apply a transformation to a ``Dataset``. @@ -183,7 +183,7 @@ Ray AIR provides a handful of preprocessors out of the box. .. autosummary:: :nosignatures: - ray.data.Dataset.train_test_split + ray.data.Datastream.train_test_split Which preprocessor should you use? ---------------------------------- diff --git a/doc/source/ray-air/trainers.rst b/doc/source/ray-air/trainers.rst index 3c5182b95977..ea08fd7352b2 100644 --- a/doc/source/ray-air/trainers.rst +++ b/doc/source/ray-air/trainers.rst @@ -60,7 +60,7 @@ You can provide multiple datasets to a trainer via the ``datasets`` parameter. If ``datasets`` includes a training dataset (denoted by the "train" key), then it will be split into multiple dataset shards, with each worker training on a single shard. All other datasets will not be split. You can access the data shard within a worker via :func:`~ray.air.session.get_dataset_shard()`, and use -:meth:`~ray.data.Dataset.to_tf` or `iter_torch_batches` to generate batches of Tensorflow or Pytorch tensors. +:meth:`~ray.data.Datastream.to_tf` or `iter_torch_batches` to generate batches of Tensorflow or Pytorch tensors. You can read more about :ref:`data ingest ` here. Read more about :ref:`Ray Train's Deep Learning Trainers `. diff --git a/doc/source/ray-air/tuner.rst b/doc/source/ray-air/tuner.rst index 03fb0f6deffd..3bba0b1d47a6 100644 --- a/doc/source/ray-air/tuner.rst +++ b/doc/source/ray-air/tuner.rst @@ -82,7 +82,7 @@ Read more about :ref:`Tune search spaces here `. You can use a Tuner to tune most arguments and configurations in Ray AIR, including but not limited to: -- Ray Datasets +- Ray Data - Preprocessors - Scaling configurations - and other hyperparameters. diff --git a/doc/source/ray-core/_examples/datasets_train/datasets_train.py b/doc/source/ray-core/_examples/datasets_train/datasets_train.py index 62e6fb79d73b..09874f87a909 100644 --- a/doc/source/ray-core/_examples/datasets_train/datasets_train.py +++ b/doc/source/ray-core/_examples/datasets_train/datasets_train.py @@ -120,7 +120,7 @@ def create_data_chunk(n, d, seed, include_label=False): # os.system("aws s3 sync ./inference s3://cuj-big-data/inference") -def read_dataset(path: str) -> ray.data.Dataset: +def read_dataset(path: str) -> ray.data.Datastream: print(f"reading data from {path}") return ray.data.read_parquet(path).random_shuffle() @@ -139,18 +139,18 @@ def __init__(self): self.standard_stats = None def preprocess_train_data( - self, ds: ray.data.Dataset - ) -> Tuple[ray.data.Dataset, ray.data.Dataset]: + self, ds: ray.data.Datastream + ) -> Tuple[ray.data.Datastream, ray.data.Datastream]: print("\n\nPreprocessing training dataset.\n") return self._preprocess(ds, False) - def preprocess_inference_data(self, df: ray.data.Dataset) -> ray.data.Dataset: + def preprocess_inference_data(self, df: ray.data.Datastream) -> ray.data.Datastream: print("\n\nPreprocessing inference dataset.\n") return self._preprocess(df, True)[0] def _preprocess( - self, ds: ray.data.Dataset, inferencing: bool - ) -> Tuple[ray.data.Dataset, ray.data.Dataset]: + self, ds: ray.data.Datastream, inferencing: bool + ) -> Tuple[ray.data.Datastream, ray.data.Datastream]: print("\nStep 1: Dropping nulls, creating new_col, updating feature_1\n") def batch_transformer(df: pd.DataFrame): diff --git a/doc/source/ray-core/examples/batch_prediction.ipynb b/doc/source/ray-core/examples/batch_prediction.ipynb index 382979127766..9cb708dd8212 100644 --- a/doc/source/ray-core/examples/batch_prediction.ipynb +++ b/doc/source/ray-core/examples/batch_prediction.ipynb @@ -213,7 +213,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Note that the ActorPool is fixed in size, unlike task-based approach where the number of parallel tasks can be dynamic (as long as it's not exceeding max_in_flight_tasks). To have autoscaling actor pool, you will need to use the {doc}`Ray Datasets batch prediction `." + "Note that the ActorPool is fixed in size, unlike task-based approach where the number of parallel tasks can be dynamic (as long as it's not exceeding max_in_flight_tasks). To have autoscaling actor pool, you will need to use the {doc}`Ray Data batch prediction `." ] }, { diff --git a/doc/source/ray-core/patterns/pipelining.rst b/doc/source/ray-core/patterns/pipelining.rst index cf9e63bdb2ce..e11e26be2455 100644 --- a/doc/source/ray-core/patterns/pipelining.rst +++ b/doc/source/ray-core/patterns/pipelining.rst @@ -7,7 +7,7 @@ you can use the `pipelining .. note:: Pipelining is an important technique to improve the performance and is heavily used by Ray libraries. - See :ref:`Ray Dataset pipelines ` as an example. + See :ref:`DatasetPipelines ` as an example. .. figure:: ../images/pipelining.svg diff --git a/doc/source/ray-overview/getting-started.md b/doc/source/ray-overview/getting-started.md index 61223c8f2563..3180513face8 100644 --- a/doc/source/ray-overview/getting-started.md +++ b/doc/source/ray-overview/getting-started.md @@ -63,7 +63,7 @@ pip install "ray[air]" `````{dropdown} Efficiently process your data into features. -Load data into a ``Dataset``. +Load data into a ``Datastream``. ```{literalinclude} ../ray-air/examples/xgboost_starter.py :language: python @@ -139,11 +139,11 @@ Use the trained model for batch prediction with a ``BatchPredictor``. Ray has a rich ecosystem of libraries and frameworks built on top of it. Simply click on the dropdowns below to see examples of our most popular libraries. -`````{dropdown} ray Data: Creating and Transforming Datasets +`````{dropdown} ray Data: Creating and Transforming Datastreams :animate: fade-in-slide-down -Ray Datasets are the standard way to load and exchange data in Ray libraries and applications. -Datasets provide basic distributed data transformations such as `map`, `filter`, and `repartition`. +Ray Data is the standard way to load and exchange data in Ray libraries and applications. +Ray Data provides basic distributed data transformations such as `map`, `filter`, and `repartition`. They are compatible with a variety of file formats, datasources, and distributed frameworks. ````{note} @@ -154,8 +154,8 @@ pip install "ray[data]" dask ``` ```` -Get started by creating Datasets from synthetic data using ``ray.data.range()`` and ``ray.data.from_items()``. -Datasets can hold either plain Python objects (schema is a Python type), or Arrow records (schema is Arrow). +Get started by creating a Datastream from synthetic data using ``ray.data.range()`` and ``ray.data.from_items()``. +A Datastream can hold either plain Python objects (schema is a Python type), or Arrow records (schema is Arrow). ```{literalinclude} ../data/doc_code/quick_start.py :language: python @@ -163,18 +163,18 @@ Datasets can hold either plain Python objects (schema is a Python type), or Arro :end-before: __create_from_python_end__ ``` -Datasets can be created from files on local disk or remote datasources such as S3. Any filesystem +Datastreams can be created from files on local disk or remote datasources such as S3. Any filesystem [supported by pyarrow](http://arrow.apache.org/docs/python/generated/pyarrow.fs.FileSystem.html) can be used to specify file locations. -You can also create a ``Dataset`` from existing data in the Ray object store or Ray-compatible distributed DataFrames: +You can also create a ``Datastream`` from existing data in the Ray object store or Ray-compatible distributed DataFrames: ```{literalinclude} ../data/doc_code/quick_start.py :language: python :start-after: __create_from_files_begin__ :end-before: __create_from_files_end__ ``` -Datasets can be transformed in parallel using ``.map()``. +Datastreams can be transformed in parallel using ``.map()``. Transformations are executed *eagerly* and block until the operation is finished. -Datasets also supports ``.filter()`` and ``.flat_map()``. +Datastreams also supports ``.filter()`` and ``.flat_map()``. ```{literalinclude} ../data/doc_code/quick_start.py :language: python @@ -182,7 +182,7 @@ Datasets also supports ``.filter()`` and ``.flat_map()``. :end-before: __data_transform_end__ ``` -```{link-button} ../data/dataset +```{link-button} ../data/data :type: ref :text: Learn more about Ray Data :classes: btn-outline-primary btn-block diff --git a/doc/source/ray-overview/index.md b/doc/source/ray-overview/index.md index 2839ad014e28..38e3e7baa7f2 100644 --- a/doc/source/ray-overview/index.md +++ b/doc/source/ray-overview/index.md @@ -52,7 +52,7 @@ Ray's unified compute framework comprises of three layers: **Scale machine learning workloads** ^^^ Build ML applications with a toolkit of libraries for distributed -[data processing](../data/dataset.rst), +[data processing](../data/data.rst), [model training](../train/train.rst), [tuning](tune/../index.rst), [reinforcement learning](../rllib/index.rst), @@ -97,7 +97,7 @@ or [Slurm](../cluster/vms/user-guides/community/slurm) clusters. ```` Each of [Ray AIR's](../ray-air/getting-started) five native libraries distributes a specific ML task: -- [Data](../data/dataset): Scalable, framework-agnostic data loading and transformation across training, tuning, and prediction. +- [Data](../data/data): Scalable, framework-agnostic data loading and transformation across training, tuning, and prediction. - [Train](../train/train): Distributed multi-node and multi-core model training with fault tolerance that integrates with popular training libraries. - [Tune](../tune/index): Scalable hyperparameter tuning to optimize model performance. - [Serve](../serve/index): Scalable and programmable serving to deploy models for online inference, with optional microbatching to improve performance. diff --git a/doc/source/ray-overview/learn-more.md b/doc/source/ray-overview/learn-more.md index 7861cb987814..6173a0eacbc7 100644 --- a/doc/source/ray-overview/learn-more.md +++ b/doc/source/ray-overview/learn-more.md @@ -25,7 +25,7 @@ Please raise an issue if any of the below links are broken, or if you'd like to ## Talks (Videos) -- [Unifying Large Scale Data Preprocessing and Machine Learning Pipelines with Ray Datasets \| PyData 2021](https://zoom.us/rec/share/0cjbk_YdCTbiTm7gNhzSeNxxTCCEy1pCDUkkjfBjtvOsKGA8XmDOx82jflHdQCUP.fsjQkj5PWSYplOTz?startTime=1635456658000) [(slides)](https://docs.google.com/presentation/d/19F_wxkpo1JAROPxULmJHYZd3sKryapkbMd0ib3ndMiU/edit?usp=sharing) +- [Unifying Large Scale Data Preprocessing and Machine Learning Pipelines with Ray Data \| PyData 2021](https://zoom.us/rec/share/0cjbk_YdCTbiTm7gNhzSeNxxTCCEy1pCDUkkjfBjtvOsKGA8XmDOx82jflHdQCUP.fsjQkj5PWSYplOTz?startTime=1635456658000) [(slides)](https://docs.google.com/presentation/d/19F_wxkpo1JAROPxULmJHYZd3sKryapkbMd0ib3ndMiU/edit?usp=sharing) - [Programming at any Scale with Ray \| SF Python Meetup Sept 2019](https://www.youtube.com/watch?v=LfpHyIXBhlE) - [Ray for Reinforcement Learning \| Data Council 2019](https://www.youtube.com/watch?v=Ayc0ca150HI) - [Scaling Interactive Pandas Workflows with Modin](https://www.youtube.com/watch?v=-HjLd_3ahCw) diff --git a/doc/source/ray-overview/use-cases.rst b/doc/source/ray-overview/use-cases.rst index 3d9d25e52085..a6ae09503c98 100644 --- a/doc/source/ray-overview/use-cases.rst +++ b/doc/source/ray-overview/use-cases.rst @@ -82,7 +82,7 @@ How do I do many model training on Ray? To train multiple independent models, use the Ray Tune (:ref:`Tutorial `) library. This is the recommended library for most cases. You can use Tune with your current data preprocessing pipeline if your data source fits into the memory of a single machine (node). -If you need to scale your data, or you want to plan for future scaling, use the :ref:`Ray Data ` library. +If you need to scale your data, or you want to plan for future scaling, use the :ref:`Ray Data ` library. Your data must be a :ref:`supported format `, to use Ray Data. Alternative solutions exist for less common cases: @@ -123,7 +123,7 @@ Learn more about many model training with the following resources. .. link-button:: /data/examples/batch_training :type: ref - :text: [Example] Batch Training with Ray Datasets + :text: [Example] Batch Training with Ray Data :classes: btn-link btn-block stretched-link batchTrainingDatasets --- :img-top: /images/tune.png diff --git a/doc/source/ray-references/glossary.rst b/doc/source/ray-references/glossary.rst index a75b366978e4..8366fd7b7b76 100644 --- a/doc/source/ray-references/glossary.rst +++ b/doc/source/ray-references/glossary.rst @@ -5,7 +5,7 @@ Ray Glossary On this page you find a list of important terminology used throughout the Ray documentation, sorted alphabetically. If you're interested in a glossary for -Ray Data specifically, please see the :ref:`Ray Datasets Glossary`. +Ray Data specifically, please see the :ref:`Ray Data Glossary`. .. glossary:: @@ -124,7 +124,7 @@ Ray Data specifically, please see the :ref:`Ray Datasets Glossary` for + :ref:`An interface used to preprocess a Datastream` for training and inference (prediction) with other AIR components. Preprocessors can be stateful, as they can be fitted on the training dataset before being used to transform the training and evaluation datasets. diff --git a/doc/source/rllib/rllib-offline.rst b/doc/source/rllib/rllib-offline.rst index 3917e321bac2..aeee5aae8568 100644 --- a/doc/source/rllib/rllib-offline.rst +++ b/doc/source/rllib/rllib-offline.rst @@ -220,17 +220,17 @@ Scaling I/O throughput Similar to scaling online training, you can scale offline I/O throughput by increasing the number of RLlib workers via the ``num_workers`` config. Each worker accesses offline storage independently in parallel, for linear scaling of I/O throughput. Within each read worker, files are chosen in random order for reads, but file contents are read sequentially. -Ray Dataset Integration --------------------------- +Ray Data Integration +-------------------- RLlib has experimental support for reading/writing training samples from/to large offline datasets using -`Ray Dataset `__. -We support JSON and Parquet files today. Other file formats supported by Dataset can also be easily added. +`Ray Data `__. +We support JSON and Parquet files today. Other file formats supported by Ray Data can also be easily added. Unlike JSON input, a single dataset can be automatically sharded and replayed by multiple rollout workers by simply specifying the desired num_workers config. -To load sample data using Dataset, specify input and input_config keys like the following: +To load sample data using Datastream, specify input and input_config keys like the following: .. code-block:: python @@ -243,14 +243,14 @@ To load sample data using Dataset, specify input and input_config keys like the "path": "/path/to/json_dir/", # Num of tasks reading dataset in parallel, default is num_workers. "parallelism": 3, - # Dataset allocates 0.5 CPU for each reader by default. + # Datastream allocates 0.5 CPU for each reader by default. # Adjust this value based on the size of your offline dataset. "num_cpus_per_read_task": 0.5, } ... } -To write sample data to JSON or Parquet files using Dataset, specify output and output_config keys like the following: +To write sample data to JSON or Parquet files using Datastream, specify output and output_config keys like the following: .. code-block:: python @@ -276,7 +276,7 @@ ensures that the ``infos`` dictionary, as returned by the RL environment, is inc .. note:: This setting is only relevant for the TensorFlow based agents, for PyTorch agents the ``infos`` data is always stored. -To write the ``infos`` data to JSON or Parquet files using Dataset, specify output and output_config keys like the following: +To write the ``infos`` data to JSON or Parquet files using Datastream, specify output and output_config keys like the following: .. code-block:: python diff --git a/doc/source/serve/tutorials/serve-ml-models.md b/doc/source/serve/tutorials/serve-ml-models.md index 5e63c4779c2d..f7cd000a7ee3 100644 --- a/doc/source/serve/tutorials/serve-ml-models.md +++ b/doc/source/serve/tutorials/serve-ml-models.md @@ -208,7 +208,7 @@ Open a new Python file called `tutorial_sklearn.py`. Let's import Ray Serve and **Train a Classifier** -We will train a classifier with the [iris dataset](https://scikit-learn.org/stable/auto_examples/datasets/plot_iris_dataset.html). +We will train a classifier with the [iris dataset](https://scikit-learn.org/stable/auto_examples/datasets/plot_iris_data.html). First, let's instantiate a `GradientBoostingClassifier` loaded from Scikit-Learn. diff --git a/doc/source/templates/01_batch_inference/README.md b/doc/source/templates/01_batch_inference/README.md index e87d6a5a92f4..af76ebcf9ec1 100644 --- a/doc/source/templates/01_batch_inference/README.md +++ b/doc/source/templates/01_batch_inference/README.md @@ -1,7 +1,7 @@ # Scaling Batch Inference with Ray Data This template is a quickstart to using [Ray -Data](https://docs.ray.io/en/latest/data/dataset.html) for batch +Data](https://docs.ray.io/en/latest/data/data.html) for batch inference. Ray Data is one of many libraries under the [Ray AI Runtime](https://docs.ray.io/en/latest/ray-air/getting-started.html). See [this blog @@ -16,12 +16,12 @@ to help you build your own application! At a high level, this template will: 1. [Load your dataset using Ray - Data.](https://docs.ray.io/en/latest/data/creating-datasets.html) + Data.](https://docs.ray.io/en/latest/data/creating-datastreams.html) 2. [Preprocess your dataset before feeding it to your - model.](https://docs.ray.io/en/latest/data/transforming-datasets.html) + model.](https://docs.ray.io/en/latest/data/transforming-datastreams.html) 3. [Initialize your model and perform inference on a shard of your dataset with a remote - actor.](https://docs.ray.io/en/latest/data/transforming-datasets.html#callable-class-udfs) + actor.](https://docs.ray.io/en/latest/data/transforming-datastreams.html#callable-class-udfs) 4. [Save your prediction results.](https://docs.ray.io/en/latest/data/api/input_output.html) diff --git a/doc/source/templates/01_batch_inference/batch_inference.ipynb b/doc/source/templates/01_batch_inference/batch_inference.ipynb index a4bf225d399b..052813e94b6a 100644 --- a/doc/source/templates/01_batch_inference/batch_inference.ipynb +++ b/doc/source/templates/01_batch_inference/batch_inference.ipynb @@ -8,14 +8,14 @@ "source": [ "# Scaling Batch Inference with Ray Data\n", "\n", - "This template is a quickstart to using [Ray Data](https://docs.ray.io/en/latest/data/dataset.html) for batch inference. Ray Data is one of many libraries under the [Ray AI Runtime](https://docs.ray.io/en/latest/ray-air/getting-started.html). See [this blog post](https://www.anyscale.com/blog/model-batch-inference-in-ray-actors-actorpool-and-datasets) for more information on why and how you should perform batch inference with Ray!\n", + "This template is a quickstart to using [Ray Data](https://docs.ray.io/en/latest/data/data.html) for batch inference. Ray Data is one of many libraries under the [Ray AI Runtime](https://docs.ray.io/en/latest/ray-air/getting-started.html). See [this blog post](https://www.anyscale.com/blog/model-batch-inference-in-ray-actors-actorpool-and-datasets) for more information on why and how you should perform batch inference with Ray!\n", "\n", "This template walks through GPU batch prediction on an image dataset using a PyTorch model, but the framework and data format are there just to help you build your own application!\n", "\n", "At a high level, this template will:\n", - "1. [Load your dataset using Ray Data.](https://docs.ray.io/en/latest/data/creating-datasets.html)\n", - "2. [Preprocess your dataset before feeding it to your model.](https://docs.ray.io/en/latest/data/transforming-datasets.html)\n", - "3. [Initialize your model and perform inference on a shard of your dataset with a remote actor.](https://docs.ray.io/en/latest/data/transforming-datasets.html#callable-class-udfs)\n", + "1. [Load your dataset using Ray Data.](https://docs.ray.io/en/latest/data/creating-datastreams.html)\n", + "2. [Preprocess your dataset before feeding it to your model.](https://docs.ray.io/en/latest/data/transforming-datastreams.html)\n", + "3. [Initialize your model and perform inference on a shard of your dataset with a remote actor.](https://docs.ray.io/en/latest/data/transforming-datastreams.html#callable-class-udfs)\n", "4. [Save your prediction results.](https://docs.ray.io/en/latest/data/api/input_output.html)\n", "\n", "> Slot in your code below wherever you see the ✂️ icon to build a many model training Ray application off of this template!" @@ -107,7 +107,7 @@ "metadata": {}, "outputs": [], "source": [ - "def load_ray_dataset() -> ray.data.Dataset:\n", + "def load_ray_dataset() -> ray.data.Datastream:\n", " from ray.data.datasource.partitioning import Partitioning\n", "\n", " s3_uri = \"s3://anonymous@air-example-data-2/imagenette2/val/\"\n", diff --git a/doc/source/train/dl_guide.rst b/doc/source/train/dl_guide.rst index d9af4f66ae79..045fa0f930a9 100644 --- a/doc/source/train/dl_guide.rst +++ b/doc/source/train/dl_guide.rst @@ -92,7 +92,7 @@ training. Then, use the ``prepare_data_loader`` function to automatically add a ``DistributedSampler`` to your ``DataLoader`` - and move the batches to the right device. This step is not necessary if you are passing in Ray Datasets to your Trainer + and move the batches to the right device. This step is not necessary if you are passing in Ray Data to your Trainer (see :ref:`train-datasets`): .. code-block:: diff @@ -408,13 +408,13 @@ of the :py:class:`~ray.air.result.Result` object returned by ``Trainer.fit()``. .. _train-datasets: -Distributed Data Ingest with Ray Datasets and Ray Train +Distributed Data Ingest with Ray Data and Ray Train ------------------------------------------------------- -:ref:`Ray Datasets ` are the recommended way to work with large datasets in Ray Train. Datasets provides automatic loading, sharding, and pipelined ingest (optional) of Data across multiple Train workers. -To get started, pass in one or more datasets under the ``datasets`` keyword argument for Trainer (e.g., ``Trainer(datasets={...})``). +:ref:`Ray Data ` is the recommended way to work with large datasets in Ray Train. Ray Data provides automatic loading, sharding, and streamed ingest of Data across multiple Train workers. +To get started, pass in one or more datastreams under the ``datasets`` keyword argument for Trainer (e.g., ``Trainer(datasets={...})``). -Here's a simple code overview of the Datasets integration: +Here's a simple code overview of the Ray Data integration: .. code-block:: python diff --git a/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb b/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb index 2c6b85e8dc40..a5af0f3e4f1c 100644 --- a/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb +++ b/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb @@ -11,7 +11,7 @@ "\n", ":::{note}\n", "\n", - "This is an advanced example for {class}`LightningTrainer `, which demonstrates how to use LightningTrainer with `Ray Dataset` and `Batch Predictor`. \n", + "This is an advanced example for {class}`LightningTrainer `, which demonstrates how to use LightningTrainer with `Datastream` and `Batch Predictor`. \n", "\n", "If you just want to quickly convert your existing PyTorch Lightning scripts into Ray AIR, you can refer to this starter example:\n", "{ref}`Train a Pytorch Lightning Image Classifier `.\n", @@ -20,7 +20,7 @@ "\n", "In this demo, we will introduce how to finetune a text classifier on [CoLA(The Corpus of Linguistic Acceptability)](https://nyu-mll.github.io/CoLA/) datasets with pretrained BERT. \n", "In particular, we will:\n", - "- Create Ray Datasets from the original CoLA dataset.\n", + "- Create Ray Data from the original CoLA dataset.\n", "- Define a preprocessor to tokenize the sentences.\n", "- Finetune a BERT model using LightningTrainer.\n", "- Construct a BatchPredictor with the checkpoint and preprocessor.\n", @@ -61,9 +61,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 1. Pre-process CoLA Dataset\n", + "## 1. Pre-process CoLA Datastream\n", "\n", - "CoLA is a binary sentence classification task with 10.6K training examples. First, we download the dataset and metrics using the HuggingFace API, and create Ray Datasets for each split accordingly." + "CoLA is a binary sentence classification task with 10.6K training examples. First, we download the dataset and metrics using the HuggingFace API, and create Ray Data for each split accordingly." ] }, { @@ -277,11 +277,11 @@ "To feed data into LightningTrainer, we need to configure the following arguments:\n", "\n", "- datasets: A dictionary of the input Ray datasets, with special keys \"train\" and \"val\".\n", - "- datasets_iter_config: The argument list of {meth}`iter_torch_batches() `. It defines the way we iterate dataset shards for each worker.\n", + "- datasets_iter_config: The argument list of {meth}`iter_torch_batches() `. It defines the way we iterate dataset shards for each worker.\n", "- preprocessor: The preprocessor that will be applied to the input dataset.\n", "\n", ":::{note}\n", - "Note that we are using Ray Dataset for data ingestion for faster preprocessing here, but you can also continue to use the native `PyTorch DataLoader` or `LightningDataModule`. See {ref}`this example `. \n", + "Note that we are using Datastream for data ingestion for faster preprocessing here, but you can also continue to use the native `PyTorch DataLoader` or `LightningDataModule`. See {ref}`this example `. \n", "\n", ":::\n", "\n", diff --git a/doc/source/train/examples/lightning/lightning_mnist_example.ipynb b/doc/source/train/examples/lightning/lightning_mnist_example.ipynb index 043dba3a08ac..4f2e534d03d5 100644 --- a/doc/source/train/examples/lightning/lightning_mnist_example.ipynb +++ b/doc/source/train/examples/lightning/lightning_mnist_example.ipynb @@ -658,7 +658,7 @@ "metadata": {}, "source": [ "## What's next?\n", - "- Use Ray Dataset for more efficient data preprocessing.\n", + "- Use Ray Data for more efficient data preprocessing.\n", "- Use {class}`BatchPredictor ` for large-scale distributed inference.\n", "- Find the best hyperparameter settings with Ray Tune." ] diff --git a/doc/source/train/getting-started.rst b/doc/source/train/getting-started.rst index 113eb6ff607e..0e9eead57a00 100644 --- a/doc/source/train/getting-started.rst +++ b/doc/source/train/getting-started.rst @@ -10,7 +10,7 @@ Here are examples for some of the commonly used trainers: In this example we will train a model using distributed XGBoost. - First, we load the dataset from S3 using Ray Datasets and split it into a + First, we load the dataset from S3 using Ray Data and split it into a train and validation dataset. .. literalinclude:: doc_code/gbdt_user_guide.py @@ -29,7 +29,7 @@ Here are examples for some of the commonly used trainers: We then instantiate our XGBoostTrainer by passing in: - The aforementioned ``ScalingConfig``. - - The ``label_column`` refers to the column name containing the labels in the Ray Dataset + - The ``label_column`` refers to the column name containing the labels in the Datastream - The ``params`` are `XGBoost training parameters `__ .. literalinclude:: doc_code/gbdt_user_guide.py @@ -48,7 +48,7 @@ Here are examples for some of the commonly used trainers: In this example we will train a model using distributed LightGBM. - First, we load the dataset from S3 using Ray Datasets and split it into a + First, we load the dataset from S3 using Ray Data and split it into a train and validation dataset. .. literalinclude:: doc_code/gbdt_user_guide.py @@ -67,7 +67,7 @@ Here are examples for some of the commonly used trainers: We then instantiate our LightGBMTrainer by passing in: - The aforementioned ``ScalingConfig`` - - The ``label_column`` refers to the column name containing the labels in the Ray Dataset + - The ``label_column`` refers to the column name containing the labels in the Datastream - The ``params`` are core `LightGBM training parameters `__ .. literalinclude:: doc_code/gbdt_user_guide.py @@ -188,4 +188,4 @@ Here are examples for some of the commonly used trainers: Next Steps ---------- -* To check how your application is doing, you can use the :ref:`Ray dashboard`. \ No newline at end of file +* To check how your application is doing, you can use the :ref:`Ray dashboard`. diff --git a/doc/source/train/key-concepts.rst b/doc/source/train/key-concepts.rst index c477099d2104..f50996973ac2 100644 --- a/doc/source/train/key-concepts.rst +++ b/doc/source/train/key-concepts.rst @@ -20,7 +20,7 @@ Trainers Trainers are responsible for executing (distributed) training runs. The output of a Trainer run is a :ref:`Result ` that contains metrics from the training run and the latest saved :ref:`Checkpoint `. -Trainers can also be configured with :ref:`Datasets ` and :ref:`Preprocessors ` for scalable data ingest and preprocessing. +Trainers can also be configured with :ref:`Datastreams ` and :ref:`Preprocessors ` for scalable data ingest and preprocessing. Deep Learning, Tree-Based, and other Trainers @@ -113,7 +113,7 @@ Each Trainer has a respective Predictor implementation that is compatible with i A predictor can be passed into a :class:`BatchPredictor ` is used to scale up prediction over a Ray cluster. -It takes a Ray Dataset as input. +It takes a Datastream as input. .. dropdown:: Example: Batch prediction with :class:`XGBoostPredictor ` diff --git a/doc/source/train/train.rst b/doc/source/train/train.rst index c79ab6ea278a..75ed1f1211d8 100644 --- a/doc/source/train/train.rst +++ b/doc/source/train/train.rst @@ -37,7 +37,7 @@ There are three broad categories of Trainers that Train offers: **Batteries included**: Train is part of :ref:`Ray AIR ` and seamlessly operates in the Ray ecosystem. -* Use :ref:`Ray Datasets ` with Train to load and process datasets both small and large. +* Use :ref:`Ray Data ` with Train to load and process datasets both small and large. * Use :ref:`Ray Tune ` with Train to sweep parameter grids and leverage cutting edge hyperparameter search algorithms. * Leverage the :ref:`Ray cluster launcher ` to launch autoscaling or spot instance clusters on any cloud. diff --git a/doc/source/tune/tutorials/tune-fault-tolerance.rst b/doc/source/tune/tutorials/tune-fault-tolerance.rst index eff8d9f922a0..e2ba45831338 100644 --- a/doc/source/tune/tutorials/tune-fault-tolerance.rst +++ b/doc/source/tune/tutorials/tune-fault-tolerance.rst @@ -162,8 +162,8 @@ To restore, we just need to re-specify the ``param_space`` via :meth:`Tuner.rest .. note:: - If you're tuning over :ref:`Ray Datasets `, you'll also need to re-specify them in the ``param_space``. - Ray Datasets can contain object references, so the same problems described above apply. + If you're tuning over :ref:`Ray Data `, you'll also need to re-specify them in the ``param_space``. + Ray Data can contain object references, so the same problems described above apply. See below for an example: diff --git a/doc/source/tune/tutorials/tune_get_data_in_and_out.md b/doc/source/tune/tutorials/tune_get_data_in_and_out.md index 9ee48d06fccf..e9151071458c 100644 --- a/doc/source/tune/tutorials/tune_get_data_in_and_out.md +++ b/doc/source/tune/tutorials/tune_get_data_in_and_out.md @@ -71,7 +71,7 @@ For example, passing in a large pandas DataFrame or an unserializable model obje Instead, use strings or other identifiers as your values, and initialize/load the objects inside your Trainable directly depending on those. ```{note} -[Ray Datasets](datasets_getting_started) can be used as values in the search space directly. +[Datastreams](data_getting_started) can be used as values in the search space directly. ``` In our example, we want to tune the two model hyperparameters. We also want to set the number of epochs, so that we can easily tweak it later. For the hyperparameters, we will use the `tune.uniform` distribution. We will also modify the `training_function` to obtain those values from the `config` dictionary. diff --git a/python/ray/air/examples/dreambooth/dataset.py b/python/ray/air/examples/dreambooth/dataset.py index 30b485b31d7c..d837525194b3 100644 --- a/python/ray/air/examples/dreambooth/dataset.py +++ b/python/ray/air/examples/dreambooth/dataset.py @@ -8,8 +8,8 @@ def get_train_dataset(args, image_resolution=512): - """Build a Ray Dataset for fine-tuning DreamBooth model.""" - # Load images into Ray Dataset + """Build a Datastream for fine-tuning DreamBooth model.""" + # Load images into Datastream instance_dataset = read_images(args.instance_images_dir) class_dataset = read_images(args.class_images_dir) diff --git a/python/ray/air/tests/test_api.py b/python/ray/air/tests/test_api.py index 940e2e2f9435..5072e45a4851 100644 --- a/python/ray/air/tests/test_api.py +++ b/python/ray/air/tests/test_api.py @@ -13,7 +13,7 @@ def training_loop(self) -> None: pass -class DummyDataset(ray.data.Dataset): +class DummyDataset(ray.data.Datastream): def __init__(self): pass diff --git a/python/ray/data/_internal/delegating_block_builder.py b/python/ray/data/_internal/delegating_block_builder.py index e5fb44155459..1232cf922e0e 100644 --- a/python/ray/data/_internal/delegating_block_builder.py +++ b/python/ray/data/_internal/delegating_block_builder.py @@ -28,7 +28,7 @@ def add(self, item: Any) -> None: check.build() self._builder = ArrowBlockBuilder() except (TypeError, pyarrow.lib.ArrowInvalid): - ctx = ray.data.DatasetContext.get_current() + ctx = ray.data.DataContext.get_current() if ctx.strict_mode: # Can also handle nested Python objects, which Arrow cannot. self._builder = PandasBlockBuilder() diff --git a/python/ray/data/_internal/planner/plan_from_numpy_op.py b/python/ray/data/_internal/planner/plan_from_numpy_op.py index c8317b9f4031..3c792de3c753 100644 --- a/python/ray/data/_internal/planner/plan_from_numpy_op.py +++ b/python/ray/data/_internal/planner/plan_from_numpy_op.py @@ -22,7 +22,7 @@ def get_input_data() -> List[RefBundle]: ndarray_to_block_remote = cached_remote_fn(ndarray_to_block, num_returns=2) - ctx = ray.data.DatasetContext.get_current() + ctx = ray.data.DataContext.get_current() res = [ ndarray_to_block_remote.remote(arr_ref, ctx.strict_mode) for arr_ref in op._ndarrays diff --git a/python/ray/data/_internal/table_block.py b/python/ray/data/_internal/table_block.py index b003746861a4..809137f25fda 100644 --- a/python/ray/data/_internal/table_block.py +++ b/python/ray/data/_internal/table_block.py @@ -175,7 +175,7 @@ def to_block(self) -> Block: return self._table def is_tensor_wrapper(self) -> bool: - ctx = ray.data.DatasetContext.get_current() + ctx = ray.data.DataContext.get_current() if ctx.strict_mode: return False return _is_tensor_schema(self.column_names()) diff --git a/python/ray/data/block.py b/python/ray/data/block.py index 6ee03722a951..e213ad33887b 100644 --- a/python/ray/data/block.py +++ b/python/ray/data/block.py @@ -161,7 +161,7 @@ def __call__(self, __arg: T) -> Union[U, Iterator[U]]: def _apply_strict_mode_batch_format(given_batch_format: Optional[str]) -> str: - ctx = ray.data.DatasetContext.get_current() + ctx = ray.data.DataContext.get_current() if ctx.strict_mode: if given_batch_format == "default": given_batch_format = "numpy" @@ -396,7 +396,7 @@ def batch_to_block(batch: DataBatch) -> Block: if isinstance(batch, np.ndarray): from ray.data._internal.arrow_block import ArrowBlockAccessor - ctx = ray.data.DatasetContext.get_current() + ctx = ray.data.DataContext.get_current() if ctx.strict_mode: raise StrictModeError( f"Error validating {_truncated_repr(batch)}: " @@ -444,6 +444,15 @@ def for_block(block: Block) -> "BlockAccessor[T]": elif isinstance(block, list): from ray.data._internal.simple_block import SimpleBlockAccessor + ctx = ray.data.DataContext.get_current() + if ctx.strict_mode: + raise StrictModeError( + f"Error validating {_truncated_repr(block)}: " + "Standalone Python objects are not " + "allowed in strict mode. To use Python objects in a datastream, " + "wrap them in a dict of numpy arrays, e.g., " + "return `{'item': np.array(batch)}` instead of just `batch`." + ) return SimpleBlockAccessor(block) else: raise TypeError("Not a block type: {} ({})".format(block, type(block))) diff --git a/python/ray/data/datasource/file_meta_provider.py b/python/ray/data/datasource/file_meta_provider.py index 1242d6ba5aab..a904a6ebebaa 100644 --- a/python/ray/data/datasource/file_meta_provider.py +++ b/python/ray/data/datasource/file_meta_provider.py @@ -362,7 +362,7 @@ def _handle_read_os_error(error: OSError, paths: Union[str, List[str]]) -> str: "You can also run AWS CLI command to get more detailed error message " "(e.g., aws s3 ls ). " "See https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3/index.html " # noqa - "and https://docs.ray.io/en/latest/data/creating-datasets.html#reading-from-remote-storage " # noqa + "and https://docs.ray.io/en/latest/data/creating-datastreams.html#reading-from-remote-storage " # noqa "for more information." ) ) diff --git a/python/ray/data/datasource/numpy_datasource.py b/python/ray/data/datasource/numpy_datasource.py index d5691ce68fa8..ade44406cf89 100644 --- a/python/ray/data/datasource/numpy_datasource.py +++ b/python/ray/data/datasource/numpy_datasource.py @@ -40,7 +40,7 @@ def _read_file(self, f: "pyarrow.NativeFile", path: str, **reader_args): data = f.readall() buf.write(data) buf.seek(0) - ctx = ray.data.DatasetContext.get_current() + ctx = ray.data.DataContext.get_current() if ctx.strict_mode: return BlockAccessor.batch_to_block( {"data": np.load(buf, allow_pickle=True)} diff --git a/python/ray/data/datastream.py b/python/ray/data/datastream.py index 0c033e1d1111..2803c0b520ca 100644 --- a/python/ray/data/datastream.py +++ b/python/ray/data/datastream.py @@ -152,7 +152,7 @@ import torch.utils.data from ray.data.dataset_pipeline import DatasetPipeline - from ray.data.grouped_dataset import GroupedData + from ray.data.grouped_data import GroupedData from ray.data._internal.execution.interfaces import Executor, NodeIdStr from ray.data._internal.torch_iterable_dataset import TorchTensorBatchType from tensorflow_metadata.proto.v0 import schema_pb2 @@ -409,7 +409,7 @@ def map_batches( type with ``batch_format``. To learn more about writing functions for :meth:`~Datastream.map_batches`, read - :ref:`writing user-defined functions `. + :ref:`writing user-defined functions `. .. tip:: If you have a small number of big blocks, it may limit parallelism. You may @@ -473,7 +473,7 @@ def map_batches( Your ``fn`` can return a different type than the input type. To learn more about supported output types, read - :ref:`user-defined function output types `. + :ref:`user-defined function output types `. >>> from typing import List >>> def map_fn(batch: pd.DataFrame) -> List[int]: @@ -554,7 +554,7 @@ def map_batches( If ``fn`` mutates its input, this will need to be ``False`` in order to avoid "assignment destination is read-only" or "buffer source array is read-only" errors. Default is ``False``. See - :ref:`batch format docs ` for details + :ref:`batch format docs ` for details on which format conversion always require a copy. fn_args: Positional arguments to pass to ``fn`` after the first argument. These arguments are top-level arguments to the underlying Ray task. @@ -1716,7 +1716,7 @@ def groupby(self, key: Optional[KeyFn]) -> "GroupedData[T]": Returns: A lazy GroupedData that can be aggregated later. """ - from ray.data.grouped_dataset import GroupedData + from ray.data.grouped_data import GroupedData # Always allow None since groupby interprets that as grouping all # records into a single global group. @@ -4324,7 +4324,7 @@ def default_batch_format(self) -> Type: The default batch format describes what batches of data look like. To learn more about batch formats, read - :ref:`writing user-defined functions `. + :ref:`writing user-defined functions `. Examples: @@ -4343,7 +4343,7 @@ def default_batch_format(self) -> Type: If your datastream contains a single ``numpy.ndarray`` column named ``__value__`` (as created by :func:`ray.data.from_numpy`), then the default batch format is ``np.ndarray``. For more information on tensor - formats, read the :ref:`tensor support guide `. + formats, read the :ref:`tensor support guide `. >>> ds = ray.data.range_tensor(100) >>> ds # doctest: +SKIP @@ -4589,7 +4589,7 @@ def __iter__(self): raise TypeError( "`Datastream` objects aren't iterable. To iterate records, call " "`ds.iter_rows()` or `ds.iter_batches()`. For more information, read " - "https://docs.ray.io/en/latest/data/consuming-datasets.html." + "https://docs.ray.io/en/latest/data/consuming-datastreams.html." ) def _block_num_rows(self) -> List[int]: diff --git a/python/ray/data/grouped_dataset.py b/python/ray/data/grouped_data.py similarity index 100% rename from python/ray/data/grouped_dataset.py rename to python/ray/data/grouped_data.py diff --git a/python/ray/data/read_api.py b/python/ray/data/read_api.py index b24009ed5a98..b608b1bb523f 100644 --- a/python/ray/data/read_api.py +++ b/python/ray/data/read_api.py @@ -131,7 +131,7 @@ def from_items( Returns: MaterializedDatastream holding the items. """ - ctx = ray.data.DatasetContext.get_current() + ctx = ray.data.DataContext.get_current() if ctx.strict_mode: output_arrow_format = True @@ -227,7 +227,7 @@ def range(n: int, *, parallelism: int = -1) -> Datastream[TableRow]: Returns: Datastream producing the integers. """ - ctx = ray.data.DatasetContext.get_current() + ctx = ray.data.DataContext.get_current() if ctx.strict_mode: return read_datasource( RangeDatasource(), @@ -264,7 +264,7 @@ def range_table(n: int, *, parallelism: int = -1) -> Datastream[TableRow]: Returns: Datastream producing the integers as Arrow records. """ - ctx = ray.data.DatasetContext.get_current() + ctx = ray.data.DataContext.get_current() if ctx.strict_mode: raise DeprecationWarning( "In strict mode, use range() instead of range_table()." @@ -317,7 +317,7 @@ def range_tensor( Returns: Datastream producing the integers as Arrow tensor records. """ - ctx = ray.data.DatasetContext.get_current() + ctx = ray.data.DataContext.get_current() return read_datasource( RangeDatasource(), parallelism=parallelism, @@ -726,7 +726,7 @@ def read_images( Returns: A :class:`~ray.data.Datastream` producing tensors that represent the images at the specified paths. For information on working with tensors, read the - :ref:`tensor data guide `. + :ref:`tensor data guide `. Raises: ValueError: if ``size`` contains non-positive numbers. @@ -1378,7 +1378,7 @@ def read_binary_files( Returns: Datastream producing records read from the specified paths. """ - ctx = ray.data.DatasetContext.get_current() + ctx = ray.data.DataContext.get_current() if ctx.strict_mode: output_arrow_format = True @@ -1430,7 +1430,7 @@ def read_sql( Examples: For examples of reading from larger databases like MySQL and PostgreSQL, see - :ref:`Reading from SQL Databases `. + :ref:`Reading from SQL Databases `. .. testcode:: diff --git a/python/ray/data/tests/test_execution_optimizer.py b/python/ray/data/tests/test_execution_optimizer.py index 7ac1fb4e4fd5..c0c2d83ab28f 100644 --- a/python/ray/data/tests/test_execution_optimizer.py +++ b/python/ray/data/tests/test_execution_optimizer.py @@ -1084,7 +1084,7 @@ def test_from_huggingface_e2e(ray_start_regular_shared, enable_optimizer): assert isinstance(ray_datasets, dict) for ds_key, ds in ray_datasets.items(): - assert isinstance(ds, ray.data.Dataset) + assert isinstance(ds, ray.data.Datastream) # `ds.take_all()` triggers execution with new backend, which is # needed for checking operator usage below. assert len(ds.take_all()) > 0 @@ -1099,7 +1099,7 @@ def test_from_huggingface_e2e(ray_start_regular_shared, enable_optimizer): _check_usage_record(["FromHuggingFace"]) ray_dataset = ray.data.from_huggingface(data["train"]) - assert isinstance(ray_dataset, ray.data.Dataset) + assert isinstance(ray_dataset, ray.data.Datastream) assert len(ray_dataset.take_all()) > 0 assert "FromArrowRefs" in ray_dataset.stats() assert ray_dataset._plan._logical_plan.dag.name == "FromHuggingFace" diff --git a/python/ray/data/tests/test_huggingface.py b/python/ray/data/tests/test_huggingface.py index 3e46c8dc10bc..31057ac633b9 100644 --- a/python/ray/data/tests/test_huggingface.py +++ b/python/ray/data/tests/test_huggingface.py @@ -17,7 +17,7 @@ def test_huggingface(ray_start_regular_shared): ) ray_dataset = ray.data.from_huggingface(data["train"]) - assert isinstance(ray_dataset, ray.data.Dataset) + assert isinstance(ray_dataset, ray.data.Datastream) assert ray.get(ray_dataset.to_arrow_refs())[0].equals(data["train"].data.table) diff --git a/python/ray/data/tests/test_strict_mode.py b/python/ray/data/tests/test_strict_mode.py index 268bd0fed3d6..0b8fc9a6cabf 100644 --- a/python/ray/data/tests/test_strict_mode.py +++ b/python/ray/data/tests/test_strict_mode.py @@ -8,7 +8,7 @@ from ray.tests.conftest import * # noqa # Force strict mode. -ctx = ray.data.DatasetContext.get_current() +ctx = ray.data.DataContext.get_current() ctx.strict_mode = True diff --git a/python/ray/train/_internal/backend_executor.py b/python/ray/train/_internal/backend_executor.py index 15c99aa9ed6e..749fc3dfe71b 100644 --- a/python/ray/train/_internal/backend_executor.py +++ b/python/ray/train/_internal/backend_executor.py @@ -334,7 +334,7 @@ def start_training( Args: train_func: The training function to run on each worker. - dataset_spec: A specification for the Ray Dataset to be + dataset_spec: A specification for the Datastream to be passed to the training workers, and the logic on how to shard the Ray Dataset. checkpoint: The checkpoint data that diff --git a/python/ray/train/_internal/dataset_spec.py b/python/ray/train/_internal/dataset_spec.py index 55113e31c651..1eedcb4301c7 100644 --- a/python/ray/train/_internal/dataset_spec.py +++ b/python/ray/train/_internal/dataset_spec.py @@ -4,7 +4,7 @@ from ray.actor import ActorHandle from ray.air.config import DatasetConfig -from ray.data import Dataset, DatasetPipeline +from ray.data import Datastream, DatasetPipeline from ray.data.preprocessor import Preprocessor from ray.data.preprocessors import Chain from ray.air._internal.util import _estimate_avail_object_store_memory @@ -12,14 +12,14 @@ if TYPE_CHECKING: from ray.data import DataIterator -RayDataset = Union["Dataset", "DatasetPipeline"] +RayDataset = Union["Datastream", "DatasetPipeline"] @dataclass class RayDatasetSpec: - """Configuration for Ray Datasets to pass to the training workers. + """Configuration for Datastreams to pass to the training workers. - dataset_or_dict: An optional Ray Dataset (or DatasetPipeline) or a dictionary of + dataset_or_dict: An optional Datastream (or DatasetPipeline) or a dictionary of datasets to be sharded across all the training workers, which can be accessed from the training function via ``session.get_dataset_shard()``. Multiple Datasets can be passed in as a dictionary that maps each name key to a @@ -32,7 +32,7 @@ class RayDatasetSpec: training workers (to use as locality hints). The Callable is expected to return a list of RayDatasets or a list of dictionaries of RayDatasets, with the length of the list equal to the length of the list of actor handles. - If None is provided, the provided Ray Dataset(s) will be equally split. + If None is provided, the provided Datastream(s) will be equally split. """ @@ -91,7 +91,7 @@ def get_dataset_shards( ) if not len(splits) == len(training_worker_handles): raise RuntimeError( - "The list of Datasets returned by the " + "The list of Datastreams returned by the " f"`dataset_split_fn`: {len(splits)} does not match " f"the number of training workers: {len(training_worker_handles)}" ) @@ -109,14 +109,14 @@ def __init__(self, dataset_config: Dict[str, DatasetConfig]): with all defaults filled in. """ self.dataset_config = dataset_config - self.preprocessed_datasets: Optional[Dict[str, "Dataset"]] = None + self.preprocessed_datasets: Optional[Dict[str, "Datastream"]] = None self.preprocessor: Optional["Preprocessor"] = None def preprocess_datasets( self, prep: "Preprocessor", - datasets: Dict[str, "Dataset"], - ) -> Dict[str, "Dataset"]: + datasets: Dict[str, "Datastream"], + ) -> Dict[str, "Datastream"]: """Preprocess the given datasets. This will be called prior to `get_dataset_shards()`. @@ -215,7 +215,7 @@ def get_dataset_shards( dataset = dataset.randomize_block_order_each_window() elif config.per_epoch_preprocessor is not None: # Reapply the per epoch preprocessor on each epoch. - if isinstance(dataset, Dataset): + if isinstance(dataset, Datastream): dataset = dataset.repeat() dataset = config.per_epoch_preprocessor._transform_pipeline(dataset) @@ -223,7 +223,7 @@ def get_dataset_shards( # If global shuffle is requested, then we should try to overlap # this with other computation, so convert to a DatasetPipeline # if not already being used. - if isinstance(dataset, Dataset): + if isinstance(dataset, Datastream): dataset = dataset.repeat() dataset = dataset.random_shuffle_each_window() diff --git a/python/ray/train/_internal/session.py b/python/ray/train/_internal/session.py index 369261901f46..d1f1c003e0ec 100644 --- a/python/ray/train/_internal/session.py +++ b/python/ray/train/_internal/session.py @@ -15,7 +15,7 @@ from ray.air._internal.util import StartTraceback, RunnerThread from ray.air.checkpoint import Checkpoint from ray.air.constants import _RESULT_FETCH_TIMEOUT, _ERROR_FETCH_TIMEOUT -from ray.data import Dataset, DatasetPipeline +from ray.data import Datastream, DatasetPipeline from ray.train._internal.accelerator import Accelerator from ray.train.constants import ( DETAILED_AUTOFILLED_KEYS, @@ -73,7 +73,7 @@ def __init__( world_size: int, # TODO(xwjiang): Legacy Ray Train trainer clean up! trial_info: Optional[TrialInfo] = None, - dataset_shard: Optional[Union[Dataset, DatasetPipeline]] = None, + dataset_shard: Optional[Union[Datastream, DatasetPipeline]] = None, # TODO(xwjiang): Legacy Ray Train trainer clean up! checkpoint: Optional[Checkpoint] = None, # Deprecated diff --git a/python/ray/train/base_trainer.py b/python/ray/train/base_trainer.py index 595dd4646c6c..108cbb592872 100644 --- a/python/ray/train/base_trainer.py +++ b/python/ray/train/base_trainer.py @@ -27,16 +27,16 @@ from ray._private.dict import merge_dicts if TYPE_CHECKING: - from ray.data import Dataset + from ray.data import Datastream from ray.data.preprocessor import Preprocessor from ray.tune import Trainable _TRAINER_PKL = "trainer.pkl" -# A type representing either a ray.data.Dataset or a function that returns a -# ray.data.Dataset and accepts no arguments. -GenDataset = Union["Dataset", Callable[[], "Dataset"]] +# A type representing either a ray.data.Datastream or a function that returns a +# ray.data.Datastream and accepts no arguments. +GenDataset = Union["Datastream", Callable[[], "Datastream"]] logger = logging.getLogger(__name__) @@ -83,7 +83,7 @@ class BaseTrainer(abc.ABC): - ``trainer.setup()``: Any heavyweight Trainer setup should be specified here. - ``trainer.preprocess_datasets()``: The provided - ray.data.Dataset are preprocessed with the provided + ray.data.Datastream are preprocessed with the provided ray.data.Preprocessor. - ``trainer.train_loop()``: Executes the main training logic. - Calling ``trainer.fit()`` will return a ``ray.result.Result`` @@ -157,7 +157,7 @@ def training_loop(self): Args: scaling_config: Configuration for how to scale training. run_config: Configuration for the execution of the training run. - datasets: Any Ray Datasets to use for training. Use the key "train" + datasets: Any Datastreams to use for training. Use the key "train" to denote which dataset is the training dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed @@ -407,7 +407,7 @@ def _validate_attributes(self): if not isinstance(self.datasets, dict): raise ValueError( f"`datasets` should be a dict mapping from a string to " - f"`ray.data.Dataset` objects, " + f"`ray.data.Datastream` objects, " f"found {type(self.datasets)} with value `{self.datasets}`." ) else: @@ -415,17 +415,18 @@ def _validate_attributes(self): if isinstance(dataset, ray.data.DatasetPipeline): raise ValueError( f"The Dataset under '{key}' key is a " - f"`ray.data.DatasetPipeline`. Only `ray.data.Dataset` are " + f"`ray.data.DatasetPipeline`. Only `ray.data.Datastream` are " f"allowed to be passed in. Pipelined/streaming ingest can be " f"configured via the `dataset_config` arg. See " "https://docs.ray.io/en/latest/ray-air/check-ingest.html#enabling-streaming-ingest" # noqa: E501 "for an example." ) - elif not isinstance(dataset, ray.data.Dataset) and not callable( + elif not isinstance(dataset, ray.data.Datastream) and not callable( dataset ): raise ValueError( - f"The Dataset under '{key}' key is not a `ray.data.Dataset`. " + f"The Datastream under '{key}' key is not a " + "`ray.data.Datastream`. " f"Received {dataset} instead." ) @@ -622,7 +623,7 @@ def _save(self, experiment_path: Union[str, Path]): of parameters can be passed in again), that parameter will be loaded from the saved copy. - Ray Datasets should not be saved as part of the state. Instead, we save the + Datastreams should not be saved as part of the state. Instead, we save the keys and replace the dataset values with dummy functions that will raise an error if invoked. The error only serves as a guardrail for misuse (e.g., manually unpickling and constructing the Trainer again) diff --git a/python/ray/train/batch_predictor.py b/python/ray/train/batch_predictor.py index 745572d909e6..694103b906bf 100644 --- a/python/ray/train/batch_predictor.py +++ b/python/ray/train/batch_predictor.py @@ -8,7 +8,7 @@ from ray.air import Checkpoint from ray.air.data_batch_type import DataBatchType from ray.air.util.data_batch_conversion import BatchFormat -from ray.data import Dataset, DatasetPipeline, Preprocessor +from ray.data import Datastream, DatasetPipeline, Preprocessor from ray.data.context import DataContext from ray.train.predictor import Predictor from ray.util.annotations import PublicAPI @@ -21,7 +21,7 @@ class BatchPredictor: """Batch predictor class. Takes a predictor class and a checkpoint and provides an interface to run - batch scoring on Ray datasets. + batch scoring on Datastreams. This batch predictor wraps around a predictor class and executes it in a distributed way when calling ``predict()``. @@ -111,7 +111,7 @@ def set_preprocessor(self, preprocessor: Preprocessor) -> None: def predict( self, - data: Union[ray.data.Dataset, ray.data.DatasetPipeline], + data: Union[ray.data.Datastream, ray.data.DatasetPipeline], *, feature_columns: Optional[List[str]] = None, keep_columns: Optional[List[str]] = None, @@ -123,14 +123,14 @@ def predict( separate_gpu_stage: bool = True, ray_remote_args: Optional[Dict[str, Any]] = None, **predict_kwargs, - ) -> Union[ray.data.Dataset, ray.data.DatasetPipeline]: - """Run batch scoring on a Dataset. + ) -> Union[ray.data.Datastream, ray.data.DatasetPipeline]: + """Run batch scoring on a Datastream. .. note:: - In Ray 2.4, `BatchPredictor` is lazy by default. Use one of the Datasets consumption APIs, such as iterating through the output, to trigger the execution of prediction. + In Ray 2.4, `BatchPredictor` is lazy by default. Use one of the Datastream consumption APIs, such as iterating through the output, to trigger the execution of prediction. Args: - data: Ray dataset or pipeline to run batch prediction on. + data: Datastream or pipeline to run batch prediction on. feature_columns: List of columns in the preprocessed dataset to use for prediction. Columns not specified will be dropped from `data` before being passed to the predictor. @@ -157,7 +157,7 @@ def predict( ``predict()`` method. Returns: - Dataset containing scoring results. + Datastream containing scoring results. Examples: @@ -297,7 +297,7 @@ def _keep_columns_from_input_batch( return prediction_output_batch def __call__(self, input_batch: DataBatchType) -> DataBatchType: - # TODO: Delegate separate_gpu_stage flag to Datasets. + # TODO: Delegate separate_gpu_stage flag to Datastream. if self.override_prep: # Apply preprocessing before selecting feature columns. input_batch = self.override_prep.transform_batch(input_batch) @@ -330,18 +330,18 @@ def __call__(self, input_batch: DataBatchType) -> DataBatchType: preprocessor = self.get_preprocessor() override_prep = None if preprocessor: - # TODO: Delegate separate_gpu_stage flag to Datasets. + # TODO: Delegate separate_gpu_stage flag to Datastream. if not separate_gpu_stage and num_gpus_per_worker > 0: override_prep = preprocessor else: # In batch prediction, preprocessing is always done in a separate stage. # We should not in-line it with prediction, unless separate_gpu_stage is # False. - # Dataset optimizer will fuse preprocessing+prediction stage as + # Datastream optimizer will fuse preprocessing+prediction stage as # necessary. - if isinstance(data, Dataset): - # Dataset is lazy by default so this transform + if isinstance(data, Datastream): + # Datastream is lazy by default so this transform # will not trigger execution. data = preprocessor.transform(data) elif isinstance(data, DatasetPipeline): @@ -362,7 +362,7 @@ def __call__(self, input_batch: DataBatchType) -> DataBatchType: def predict_pipelined( self, - data: ray.data.Dataset, + data: ray.data.Datastream, *, blocks_per_window: Optional[int] = None, bytes_per_window: Optional[int] = None, @@ -383,11 +383,11 @@ def predict_pipelined( Unlike `predict()`, this generates a DatasetPipeline object and does not perform execution. Execution can be triggered by pulling from the pipeline. - This is a convenience wrapper around calling `.window()` on the Dataset prior + This is a convenience wrapper around calling `.window()` on the Datastream prior to passing it `BatchPredictor.predict()`. Args: - data: Ray dataset to run batch prediction on. + data: Datastream to run batch prediction on. blocks_per_window: The window size (parallelism) in blocks. Increasing window size increases pipeline throughput, but also increases the latency to initial output, since it decreases the @@ -468,7 +468,7 @@ def predict_pipelined( ) def _determine_preprocessor_batch_format( - self, ds: Union[ray.data.Dataset, ray.data.DatasetPipeline] + self, ds: Union[ray.data.Datastream, ray.data.DatasetPipeline] ) -> BatchFormat: """Determine batch format we use for the first preprocessor. @@ -477,7 +477,7 @@ def _determine_preprocessor_batch_format( transform type to avoid unnecessary data conversion. Args: - ds (Union[ray.data.Dataset, ray.data.DatasetPipeline]): Input + ds (Union[ray.data.Datastream, ray.data.DatasetPipeline]): Input dataset or dataset pipeline. Returns: diff --git a/python/ray/train/data_parallel_trainer.py b/python/ray/train/data_parallel_trainer.py index 4f0b95f17ae5..9d2beb3b19eb 100644 --- a/python/ray/train/data_parallel_trainer.py +++ b/python/ray/train/data_parallel_trainer.py @@ -99,7 +99,7 @@ def train_loop_per_worker(): # Returns dict of last saved checkpoint. session.get_checkpoint() - # Returns the Ray Dataset shard for the given key. + # Returns the Datastream shard for the given key. session.get_dataset_shard("my_dataset") # Returns the total number of workers executing training. @@ -210,7 +210,7 @@ def __init__(self, train_loop_per_worker, my_backend_config: dataset_config: Configuration for dataset ingest. This is merged with the default dataset config for the given trainer (`cls._dataset_config`). run_config: Configuration for the execution of the training run. - datasets: Any Ray Datasets to use for training. Use + datasets: Any Datastreams to use for training. Use the key "train" to denote which dataset is the training dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed diff --git a/python/ray/train/examples/pytorch/torch_regression_example.py b/python/ray/train/examples/pytorch/torch_regression_example.py index f3bcedfc7b07..3354d3bd241b 100644 --- a/python/ray/train/examples/pytorch/torch_regression_example.py +++ b/python/ray/train/examples/pytorch/torch_regression_example.py @@ -12,13 +12,13 @@ import ray.train as train from ray.air import session from ray.air.result import Result -from ray.data import Dataset +from ray.data import Datastream from ray.train.batch_predictor import BatchPredictor from ray.train.torch import TorchPredictor, TorchTrainer from ray.air.config import ScalingConfig -def get_datasets(split: float = 0.7) -> Tuple[Dataset]: +def get_datasets(split: float = 0.7) -> Tuple[Datastream]: dataset = ray.data.read_csv("s3://anonymous@air-example-data/regression.csv") def combine_x(batch): diff --git a/python/ray/train/examples/tf/tensorflow_autoencoder_example.py b/python/ray/train/examples/tf/tensorflow_autoencoder_example.py index c0a91307af6f..80579d169585 100644 --- a/python/ray/train/examples/tf/tensorflow_autoencoder_example.py +++ b/python/ray/train/examples/tf/tensorflow_autoencoder_example.py @@ -137,7 +137,7 @@ def train_tensorflow_mnist( return results -def predict_tensorflow_mnist(result: Result) -> ray.data.Dataset: +def predict_tensorflow_mnist(result: Result) -> ray.data.Datastream: test_dataset = get_dataset(split_type="test") batch_predictor = BatchPredictor.from_checkpoint( result.checkpoint, TensorflowPredictor, model_definition=build_autoencoder_model diff --git a/python/ray/train/examples/tf/tensorflow_regression_example.py b/python/ray/train/examples/tf/tensorflow_regression_example.py index 5b130a9947b4..b7ecf7da0dad 100644 --- a/python/ray/train/examples/tf/tensorflow_regression_example.py +++ b/python/ray/train/examples/tf/tensorflow_regression_example.py @@ -8,7 +8,7 @@ from ray.air import session from ray.air.integrations.keras import ReportCheckpointCallback from ray.air.result import Result -from ray.data import Dataset +from ray.data import Datastream from ray.data.preprocessors import Concatenator from ray.train.batch_predictor import BatchPredictor from ray.train.tensorflow import ( @@ -75,7 +75,7 @@ def train_tensorflow_regression(num_workers: int = 2, use_gpu: bool = False) -> return results -def predict_regression(result: Result) -> Dataset: +def predict_regression(result: Result) -> Datastream: batch_predictor = BatchPredictor.from_checkpoint( result.checkpoint, TensorflowPredictor, model_definition=build_model ) diff --git a/python/ray/train/gbdt_trainer.py b/python/ray/train/gbdt_trainer.py index 48a19866e011..2e56488d13ca 100644 --- a/python/ray/train/gbdt_trainer.py +++ b/python/ray/train/gbdt_trainer.py @@ -108,7 +108,7 @@ class GBDTTrainer(BaseTrainer): Inherited by XGBoostTrainer and LightGBMTrainer. Args: - datasets: Ray Datasets to use for training and validation. Must include a + datasets: Datastreams to use for training and validation. Must include a "train" key denoting the training dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed by the ``preprocessor`` if @@ -170,7 +170,7 @@ def __init__( resume_from_checkpoint=resume_from_checkpoint, ) - # Ray Datasets should always use distributed loading. + # Datastreams should always use distributed loading. for dataset_name in self.datasets.keys(): dataset_params = self.dmatrix_params.get(dataset_name, {}) dataset_params["distributed"] = True diff --git a/python/ray/train/horovod/horovod_trainer.py b/python/ray/train/horovod/horovod_trainer.py index a4209cec65a2..ff74ee1648b9 100644 --- a/python/ray/train/horovod/horovod_trainer.py +++ b/python/ray/train/horovod/horovod_trainer.py @@ -57,7 +57,7 @@ def train_loop_per_worker(): # Returns dict of last saved checkpoint. session.get_checkpoint() - # Returns the Ray Dataset shard for the given key. + # Returns the Datastream shard for the given key. session.get_dataset_shard("my_dataset") # Returns the total number of workers executing training. @@ -162,7 +162,7 @@ def train_loop_per_worker(): scaling_config: Configuration for how to scale data parallel training. dataset_config: Configuration for dataset ingest. run_config: Configuration for the execution of the training run. - datasets: Any Ray Datasets to use for training. Use + datasets: Any Datastreams to use for training. Use the key "train" to denote which dataset is the training dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed diff --git a/python/ray/train/huggingface/_huggingface_utils.py b/python/ray/train/huggingface/_huggingface_utils.py index 7aaab6ad4e73..a90acd49e2d2 100644 --- a/python/ray/train/huggingface/_huggingface_utils.py +++ b/python/ray/train/huggingface/_huggingface_utils.py @@ -59,9 +59,9 @@ def get_train_dataloader(self): return trainer -# TODO(ml-team): Replace with a Ray Datasets-HuggingFace integration when available. +# TODO(ml-team): Replace with a Datastreams-HuggingFace integration when available. class RayDatasetHFIterable(datasets.iterable_dataset.ExamplesIterable): - """HF ExamplesIterable backed by a Ray Dataset.""" + """HF ExamplesIterable backed by a Datastream.""" def __init__(self, dataset: DataIterator) -> None: self.dataset = dataset @@ -76,7 +76,7 @@ def __iter__(self): def process_dataset_for_hf(dataset: DataIterator) -> "IterableDataset": - """Converts a Ray Dataset into a HF IterableDataset.""" + """Converts a Datastream into a HF IterableDataset.""" hf_iterable = RayDatasetHFIterable(dataset) iterable_dataset = datasets.iterable_dataset.IterableDataset( diff --git a/python/ray/train/huggingface/accelerate/accelerate_trainer.py b/python/ray/train/huggingface/accelerate/accelerate_trainer.py index f0d441d23171..726bc10569f2 100644 --- a/python/ray/train/huggingface/accelerate/accelerate_trainer.py +++ b/python/ray/train/huggingface/accelerate/accelerate_trainer.py @@ -71,7 +71,7 @@ def train_loop_per_worker(): # Get dict of last saved checkpoint. session.get_checkpoint() - # Session returns the Ray Dataset shard for the given key. + # Session returns the Datastream shard for the given key. session.get_dataset_shard("my_dataset") # Get the total number of workers executing training. @@ -252,7 +252,7 @@ def train_loop_per_worker(): scaling_config: Configuration for how to scale data parallel training. dataset_config: Configuration for dataset ingest. run_config: Configuration for the execution of the training run. - datasets: Any Ray Datasets to use for training. Use + datasets: Any Datastreams to use for training. Use the key "train" to denote which dataset is the training dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed diff --git a/python/ray/train/huggingface/huggingface_trainer.py b/python/ray/train/huggingface/huggingface_trainer.py index a29414786f32..a27454a55079 100644 --- a/python/ray/train/huggingface/huggingface_trainer.py +++ b/python/ray/train/huggingface/huggingface_trainer.py @@ -209,7 +209,7 @@ def trainer_init_per_worker(train_dataset, eval_dataset, **config): ``transformers.Trainer`` object and takes in the following arguments: train ``Torch.Dataset``, optional evaluation ``Torch.Dataset`` and config as kwargs. The Torch Datasets are automatically - created by converting the Ray Datasets internally before + created by converting the Datastreams internally before they are passed into the function. trainer_init_config: Configurations to pass into ``trainer_init_per_worker`` as kwargs. @@ -219,7 +219,7 @@ def trainer_init_per_worker(train_dataset, eval_dataset, **config): scaling_config: Configuration for how to scale data parallel training. dataset_config: Configuration for dataset ingest. run_config: Configuration for the execution of the training run. - datasets: Any Ray Datasets to use for training. Use + datasets: Any datasets to use for training. Use the key "train" to denote which dataset is the training dataset and key "evaluation" to denote the evaluation dataset. Can only contain a training dataset diff --git a/python/ray/train/lightgbm/lightgbm_trainer.py b/python/ray/train/lightgbm/lightgbm_trainer.py index 6ae4fe6f0f01..0a738c4696ef 100644 --- a/python/ray/train/lightgbm/lightgbm_trainer.py +++ b/python/ray/train/lightgbm/lightgbm_trainer.py @@ -50,7 +50,7 @@ class LightGBMTrainer(GBDTTrainer): result = trainer.fit() Args: - datasets: Ray Datasets to use for training and validation. Must include a + datasets: Datastreams to use for training and validation. Must include a "train" key denoting the training dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed by the ``preprocessor`` if diff --git a/python/ray/train/lightning/lightning_trainer.py b/python/ray/train/lightning/lightning_trainer.py index 3ec7b14db04c..14aea9d320da 100644 --- a/python/ray/train/lightning/lightning_trainer.py +++ b/python/ray/train/lightning/lightning_trainer.py @@ -226,7 +226,7 @@ class LightningTrainer(TorchTrainer): ``pytorch_lightning.LightningModule`` using the arguments provided in ``LightningConfigBuilder.module()``. - For data ingestion, the LightningTrainer will then either convert the Ray Dataset + For data ingestion, the LightningTrainer will then either convert the Datastream shards to a ``pytorch_lightning.LightningDataModule``, or directly use the datamodule or dataloaders if provided by users. @@ -348,18 +348,19 @@ def configure_optimizers(self): scaling_config: Configuration for how to scale data parallel training. dataset_config: Configuration for dataset ingest. run_config: Configuration for the execution of the training run. - datasets: A dictionary of Ray Datasets to use for training. + datasets: A dictionary of Datastreams to use for training. Use the key "train" to denote which dataset is the training dataset and (optionally) key "val" to denote the validation dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed by the ``preprocessor`` if one is provided. - datasets_iter_config: Configurations for iterating over input Ray datasets. + datasets_iter_config: Configurations for iterating over input Datastreams. This configuration is only valid when `datasets` argument is provided to the LightningTrainer. Otherwise, LightningTrainer will use datamodule or dataloaders specified in ``LightningConfig.trainer_init_config``. For valid arguments to pass, please refer to: - :py:meth:`Dataset.iter_torch_batches ` + :py:meth:`Dataset.iter_torch_batches + ` preprocessor: A ray.data.Preprocessor to preprocess the provided datasets. resume_from_checkpoint: A checkpoint to resume training from. @@ -488,13 +489,13 @@ def _lightning_train_loop_per_worker(config): if not (train_dataloaders or datamodule or train_ray_dataset): raise RuntimeError( "Please provide at least one of the following data inputs: " - "train_dataloaders, datamodule, or Ray Datasets with key 'train'." + "train_dataloaders, datamodule, or Datastreams with key 'train'." ) if train_ray_dataset: if datamodule: logger.warning( - "Using Ray datasets as primary input. The 'datamodule' defined in " + "Using Datastreams as primary input. The 'datamodule' defined in " "'LightningConfig.trainer_fit_params' is ignored!" ) diff --git a/python/ray/train/mosaic/mosaic_trainer.py b/python/ray/train/mosaic/mosaic_trainer.py index cef1789616b0..12e533ff1ccb 100644 --- a/python/ray/train/mosaic/mosaic_trainer.py +++ b/python/ray/train/mosaic/mosaic_trainer.py @@ -108,7 +108,7 @@ class MosaicTrainer(TorchTrainer): ``composer.Trainer`` object and takes in configuration dictionary (``config``) as an argument. This dictionary is based on ``trainer_init_config`` and is modified for Ray - Composer integration. - datasets: Any Ray Datasets to use for training. At the moment, we do not support + datasets: Any Datastreams to use for training. At the moment, we do not support passing datasets to the trainer and using the dataset shards in the trainer loop. Instead, configure and load the datasets inside ``trainer_init_per_worker`` function diff --git a/python/ray/train/rl/rl_trainer.py b/python/ray/train/rl/rl_trainer.py index c6feb79fedc4..bbf406a7b285 100644 --- a/python/ray/train/rl/rl_trainer.py +++ b/python/ray/train/rl/rl_trainer.py @@ -37,7 +37,7 @@ class RLTrainer(BaseTrainer): (e.g. ``"PPO"``) or a RLlib trainer class. scaling_config: Configuration for how to scale training. run_config: Configuration for the execution of the training run. - datasets: Any Ray Datasets to use for training. Use the key "train" + datasets: Any Datastreams to use for training. Use the key "train" to denote which dataset is the training dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed diff --git a/python/ray/train/session.py b/python/ray/train/session.py index ca46d3e2f79d..70801dbe653d 100644 --- a/python/ray/train/session.py +++ b/python/ray/train/session.py @@ -81,7 +81,7 @@ def get_dataset_shard( if shard is None: warnings.warn( "No dataset passed in. Returning None. Make sure to " - "pass in a Ray Dataset to Trainer.run to use this " + "pass in a Datastream to Trainer.run to use this " "function." ) elif isinstance(shard, dict): diff --git a/python/ray/train/sklearn/sklearn_trainer.py b/python/ray/train/sklearn/sklearn_trainer.py index e31205783a4a..d570c80feb69 100644 --- a/python/ray/train/sklearn/sklearn_trainer.py +++ b/python/ray/train/sklearn/sklearn_trainer.py @@ -84,7 +84,7 @@ class SklearnTrainer(BaseTrainer): Args: estimator: A scikit-learn compatible estimator to use. - datasets: Ray Datasets to use for training and validation. Must include a + datasets: Datastreams to use for training and validation. Must include a "train" key denoting the training dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed by the ``preprocessor`` if diff --git a/python/ray/train/tensorflow/tensorflow_trainer.py b/python/ray/train/tensorflow/tensorflow_trainer.py index 6b2ef8609df1..045b6f2e6100 100644 --- a/python/ray/train/tensorflow/tensorflow_trainer.py +++ b/python/ray/train/tensorflow/tensorflow_trainer.py @@ -65,7 +65,7 @@ def train_loop_per_worker(): # Returns dict of last saved checkpoint. session.get_checkpoint() - # Returns the Ray Dataset shard for the given key. + # Returns the Datastream shard for the given key. session.get_dataset_shard("my_dataset") # Returns the total number of workers executing training. @@ -154,7 +154,7 @@ def train_loop_per_worker(config): scaling_config: Configuration for how to scale data parallel training. dataset_config: Configuration for dataset ingest. run_config: Configuration for the execution of the training run. - datasets: Any Ray Datasets to use for training. Use + datasets: Any Datastreams to use for training. Use the key "train" to denote which dataset is the training dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed diff --git a/python/ray/train/tensorflow/train_loop_utils.py b/python/ray/train/tensorflow/train_loop_utils.py index d0a74a35e6de..f2657c75ccc4 100644 --- a/python/ray/train/tensorflow/train_loop_utils.py +++ b/python/ray/train/tensorflow/train_loop_utils.py @@ -8,7 +8,7 @@ def prepare_dataset_shard(tf_dataset_shard: tf.data.Dataset): """A utility function that overrides default config for Tensorflow Dataset. This should be used on a TensorFlow ``Dataset`` created by calling - ``iter_tf_batches()`` on a ``ray.data.Dataset`` returned by + ``iter_tf_batches()`` on a ``ray.data.Datastream`` returned by ``ray.air.session.get_dataset_shard()`` since the dataset has already been sharded across the workers. diff --git a/python/ray/train/tests/test_base_trainer.py b/python/ray/train/tests/test_base_trainer.py index ad9d7a4ab9a7..236050653954 100644 --- a/python/ray/train/tests/test_base_trainer.py +++ b/python/ray/train/tests/test_base_trainer.py @@ -118,7 +118,7 @@ def test_validate_datasets(ray_start_4_cpus): with pytest.raises(ValueError) as e: DummyTrainer(train_loop=None, datasets={"train": 1}) - assert "The Dataset under train key is not a `ray.data.Dataset`" + assert "The Dataset under train key is not a `ray.data.Datastream`" with pytest.raises(ValueError) as e: DummyTrainer( diff --git a/python/ray/train/tests/test_xgboost_trainer.py b/python/ray/train/tests/test_xgboost_trainer.py index b61af80619fe..b1eedbe1dcaf 100644 --- a/python/ray/train/tests/test_xgboost_trainer.py +++ b/python/ray/train/tests/test_xgboost_trainer.py @@ -246,7 +246,7 @@ def test_validation(ray_start_4_cpus): def test_distributed_data_loading(ray_start_4_cpus): - """Checks that XGBoostTrainer does distributed data loading for Ray Datasets.""" + """Checks that XGBoostTrainer does distributed data loading for Datastreams.""" class DummyXGBoostTrainer(XGBoostTrainer): def _train(self, params, dtrain, **kwargs): diff --git a/python/ray/train/torch/torch_trainer.py b/python/ray/train/torch/torch_trainer.py index 6c43776bbe9a..5b0951799fe7 100644 --- a/python/ray/train/torch/torch_trainer.py +++ b/python/ray/train/torch/torch_trainer.py @@ -56,7 +56,7 @@ def train_loop_per_worker(): # Get dict of last saved checkpoint. session.get_checkpoint() - # Session returns the Ray Dataset shard for the given key. + # Session returns the Datastream shard for the given key. session.get_dataset_shard("my_dataset") # Get the total number of workers executing training. @@ -247,7 +247,7 @@ def train_loop_per_worker(): scaling_config: Configuration for how to scale data parallel training. dataset_config: Configuration for dataset ingest. run_config: Configuration for the execution of the training run. - datasets: Any Ray Datasets to use for training. Use + datasets: Any Datastreams to use for training. Use the key "train" to denote which dataset is the training dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed diff --git a/python/ray/train/xgboost/xgboost_trainer.py b/python/ray/train/xgboost/xgboost_trainer.py index c0a19d9b096c..e6f1156b7624 100644 --- a/python/ray/train/xgboost/xgboost_trainer.py +++ b/python/ray/train/xgboost/xgboost_trainer.py @@ -46,7 +46,7 @@ class XGBoostTrainer(GBDTTrainer): result = trainer.fit() Args: - datasets: Ray Datasets to use for training and validation. Must include a + datasets: Datastreams to use for training and validation. Must include a "train" key denoting the training dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed by the ``preprocessor`` if diff --git a/python/ray/tune/execution/experiment_state.py b/python/ray/tune/execution/experiment_state.py index daf89075012f..ff9220c1b253 100644 --- a/python/ray/tune/execution/experiment_state.py +++ b/python/ray/tune/execution/experiment_state.py @@ -206,7 +206,7 @@ def checkpoint( # Checkpoint checkpoint_time_start = time.monotonic() - # NOTE: This context manager is for Ray Datasets captured in a trial config. + # NOTE: This context manager is for Datastreams captured in a trial config. # This is the case when *tuning over datasets*. # If the datasets have already been full executed, then serializing # block refs means that this checkpoint is not usable in a new Ray cluster. diff --git a/python/ray/tune/impl/out_of_band_serialize_dataset.py b/python/ray/tune/impl/out_of_band_serialize_dataset.py index 112cee4d8032..25749f3b4aa6 100644 --- a/python/ray/tune/impl/out_of_band_serialize_dataset.py +++ b/python/ray/tune/impl/out_of_band_serialize_dataset.py @@ -5,11 +5,11 @@ def _deserialize_and_fully_execute_if_needed(serialized_ds: bytes): - ds = ray.data.Dataset.deserialize_lineage(serialized_ds) + ds = ray.data.Datastream.deserialize_lineage(serialized_ds) return ds -def _reduce(ds: ray.data.Dataset): +def _reduce(ds: ray.data.Datastream): tb_list = traceback.format_list(traceback.extract_stack()) _already_in_out_of_band_serialization = False for tb in tb_list: @@ -27,7 +27,7 @@ def _reduce(ds: ray.data.Dataset): def out_of_band_serialize_dataset(): context = ray._private.worker.global_worker.get_serialization_context() try: - context._register_cloudpickle_reducer(ray.data.Dataset, _reduce) + context._register_cloudpickle_reducer(ray.data.Datastream, _reduce) yield finally: - context._unregister_cloudpickle_reducer(ray.data.Dataset) + context._unregister_cloudpickle_reducer(ray.data.Datastream) diff --git a/python/ray/tune/tests/test_trial_runner_3.py b/python/ray/tune/tests/test_trial_runner_3.py index fb5da5758474..3893104b5f62 100644 --- a/python/ray/tune/tests/test_trial_runner_3.py +++ b/python/ray/tune/tests/test_trial_runner_3.py @@ -1159,7 +1159,7 @@ def testPeriodicCloudCheckpointSyncTimeout(self): assert syncer.sync_up_counter == 2 def testExperimentCheckpointWithDatasets(self): - """Test trial runner checkpointing where trials contain Ray Datasets. + """Test trial runner checkpointing where trials contain Datastreams. When possible, a dataset plan should be saved (for read_* APIs). See `Dataset.serialize_lineage` for more information. diff --git a/python/ray/tune/tuner.py b/python/ray/tune/tuner.py index 88ccc109cf1e..42b1c63235a1 100644 --- a/python/ray/tune/tuner.py +++ b/python/ray/tune/tuner.py @@ -219,7 +219,7 @@ def restore( param_space: The same `param_space` that was passed to the original Tuner. This can be optionally re-specified due to the `param_space` potentially containing Ray object - references (tuning over Ray Datasets or tuning over + references (tuning over Datastreams or tuning over several `ray.put` object references). **Tune expects the `param_space` to be unmodified**, and the only part that will be used during restore are the updated object references. diff --git a/python/ray/util/actor_group.py b/python/ray/util/actor_group.py index 35484abfe62b..a7a625cb1808 100644 --- a/python/ray/util/actor_group.py +++ b/python/ray/util/actor_group.py @@ -56,7 +56,7 @@ def remote(self, *args, **kwargs): f"in https://docs.ray.io/en/{get_ray_doc_version()}/ray-more-libs/multiprocessing.html. " # noqa: E501 "For stateful/actor processing such as batch prediction, use " "Datasets.map_batches(compute=ActorPoolStrategy, ...), see details in " - f"https://docs.ray.io/en/{get_ray_doc_version()}/data/api/dataset.html#ray.data.Dataset.map_batches.", # noqa: E501 + f"https://docs.ray.io/en/{get_ray_doc_version()}/data/api/datastream.html#ray.data.Datastream.map_batches.", # noqa: E501 warning=True, ) class ActorGroup: diff --git a/release/nightly_tests/dataset/operator_fusion_benchmark.py b/release/nightly_tests/dataset/operator_fusion_benchmark.py index 6b2817f35741..bf24fcd99d2b 100644 --- a/release/nightly_tests/dataset/operator_fusion_benchmark.py +++ b/release/nightly_tests/dataset/operator_fusion_benchmark.py @@ -77,7 +77,7 @@ def make_ds( num_columns: int, ops_spec: List[Dict[str, Any]], target_max_block_size: int, -) -> ray.data.Dataset: +) -> ray.data.Datastream: ds = ray.data.read_datasource( BlockDatasource(), num_blocks_per_task=num_blocks_per_task, @@ -96,7 +96,7 @@ def make_ds( return ds -def execute_ds(ds: ray.data.Dataset): +def execute_ds(ds: ray.data.Datastream): ds = ds.fully_executed() diff --git a/rllib/offline/dataset_reader.py b/rllib/offline/dataset_reader.py index c6a11622aa1f..14d4c7aeb062 100644 --- a/rllib/offline/dataset_reader.py +++ b/rllib/offline/dataset_reader.py @@ -197,7 +197,7 @@ class DatasetReader(InputReader): """ @PublicAPI - def __init__(self, ds: ray.data.Dataset, ioctx: Optional[IOContext] = None): + def __init__(self, ds: ray.data.Datastream, ioctx: Optional[IOContext] = None): """Initializes a DatasetReader instance. Args: diff --git a/rllib/offline/feature_importance.py b/rllib/offline/feature_importance.py index 5ffac614eb71..76067449c776 100644 --- a/rllib/offline/feature_importance.py +++ b/rllib/offline/feature_importance.py @@ -70,7 +70,7 @@ def _compute_actions( @ray.remote def get_feature_importance_on_index( - dataset: ray.data.Dataset, + dataset: ray.data.Datastream, *, index: int, perturb_fn: Callable[[pd.DataFrame, int], None], diff --git a/rllib/offline/tests/test_dataset_reader.py b/rllib/offline/tests/test_dataset_reader.py index 9557f68cb4f1..3887908402fa 100644 --- a/rllib/offline/tests/test_dataset_reader.py +++ b/rllib/offline/tests/test_dataset_reader.py @@ -60,7 +60,7 @@ def test_dataset_shard_with_only_local(self): _, shards = get_dataset_and_shards(config, num_workers=0) assert len(shards) == 1 - assert isinstance(shards[0], ray.data.Dataset) + assert isinstance(shards[0], ray.data.Datastream) def test_dataset_shard_remote_workers_with_local_worker(self): """Tests whether the dataset_shard function works correctly for the remote @@ -76,7 +76,7 @@ def test_dataset_shard_remote_workers_with_local_worker(self): assert len(shards) == NUM_WORKERS + 1 assert shards[0] is None assert all( - isinstance(remote_shard, ray.data.Dataset) for remote_shard in shards[1:] + isinstance(remote_shard, ray.data.Datastream) for remote_shard in shards[1:] ) def test_dataset_shard_with_task_parallelization(self): @@ -100,7 +100,7 @@ def test_dataset_shard_with_task_parallelization(self): assert len(shards) == NUM_WORKERS + 1 assert shards[0] is None assert all( - isinstance(remote_shard, ray.data.Dataset) for remote_shard in shards[1:] + isinstance(remote_shard, ray.data.Datastream) for remote_shard in shards[1:] ) def test_dataset_shard_with_loader_fn(self): From c81921f3a94003d2f6ee396c20cc25cc7469787d Mon Sep 17 00:00:00 2001 From: angelinalg <122562471+angelinalg@users.noreply.github.com> Date: Wed, 19 Apr 2023 19:37:33 -0700 Subject: [PATCH 022/424] clarify M1 installation instructions (#34505) A few folks have been confused by the order of the installation instructions for M1, so adding some clarifying language. While I was at it, I made minor improvements to some language in nearby paragraphs. --- doc/source/ray-contribute/docs.ipynb | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/doc/source/ray-contribute/docs.ipynb b/doc/source/ray-contribute/docs.ipynb index 47fa7f49f730..d7636a73c082 100644 --- a/doc/source/ray-contribute/docs.ipynb +++ b/doc/source/ray-contribute/docs.ipynb @@ -28,28 +28,27 @@ "cd ray/doc\n", "```\n", "\n", - "**Note**: If you are on an Apple Silicon (M1) read the instructions below for installing the dependencies.\n", + "**Note**: If you are using Apple Silicon (M1), follow the instructions below before continuing.\n", "\n", - "Make sure you activate the Python environment you are using (e.g. venv, conda, etc.) and then to install the documentation dependencies, run the following command:\n", + "Activate the Python environment you are using (e.g., venv, conda, etc.). Install the documentation dependencies, with the following command:\n", "\n", "```shell\n", "pip install -r requirements-doc.txt\n", "```\n", "\n", - "Additionally, it's best if you install the dependencies for our linters with\n", + "Install the dependencies for our linters to ensure your changes comply with our style guide.\n", "\n", "```shell\n", "pip install -r ../python/requirements_linters.txt\n", "```\n", "\n", - "so that you can make sure your changes comply with our style guide.\n", - "Building the documentation is done by running the following command:\n", + "Build the documentation by running the following command:\n", "\n", "```shell\n", "make develop\n", "```\n", "\n", - "which will build the documentation into the `_build` directory.\n", + "Find the documentation build in the `_build` directory.\n", "After the build finishes, you can simply open the `_build/html/index.html` file in your browser.\n", "It's considered good practice to check the output of your build to make sure everything is working as expected.\n", "\n", From f53c229d541e92940ed7ce5ad2492ce75e0fc7b8 Mon Sep 17 00:00:00 2001 From: angelinalg <122562471+angelinalg@users.noreply.github.com> Date: Wed, 19 Apr 2023 21:04:47 -0700 Subject: [PATCH 023/424] Create LLM section and add examples (#34614) Surface LLM/Generative AI use cases. Signed-off-by: angelinalg <122562471+angelinalg@users.noreply.github.com> --- doc/source/ray-overview/use-cases.rst | 92 ++++++++++++++++++++++++--- 1 file changed, 84 insertions(+), 8 deletions(-) diff --git a/doc/source/ray-overview/use-cases.rst b/doc/source/ray-overview/use-cases.rst index a6ae09503c98..e66b27267aa5 100644 --- a/doc/source/ray-overview/use-cases.rst +++ b/doc/source/ray-overview/use-cases.rst @@ -5,6 +5,89 @@ Ray Use Cases This page indexes common Ray use cases for scaling ML. It contains highlighted references to blogs, examples, and tutorials also located elsewhere in the Ray documentation. +.. _ref-use-cases-llm: + +Large Language Models and Generative AI +--------------------------------------- + +The following highlights feature projects that use Ray to implement Large Language Models and Generative AI applications. + +.. panels:: + :container: container pb-3 + :column: col-md-3 px-1 py-1 + :img-top-cls: p-2 w-75 d-block mx-auto fixed-height-img + + --- + :img-top: /images/ray_logo.png + + .. link-button:: https://www.anyscale.com/blog/ray-common-production-challenges-for-generative-ai-infrastructure + :type: url + :text: [Blog] How Ray solves common production challenges for generative AI infrastructure + :classes: btn-link btn-block stretched-link webCrawler + + --- + :img-top: /images/ray_logo.png + + .. link-button:: https://www.anyscale.com/blog/training-175b-parameter-language-models-at-1000-gpu-scale-with-alpa-and-ray + :type: url + :text: [Blog] Training 175B Parameter Language Models at 1000 GPU scale with Alpa and Ray + :classes: btn-link btn-block stretched-link webCrawler + + --- + :img-top: /images/ray_logo.png + + .. link-button:: https://www.anyscale.com/blog/faster-stable-diffusion-fine-tuning-with-ray-air + :type: url + :text: [Blog] Faster stable diffusion fine-tuning with Ray AIR + :classes: btn-link btn-block stretched-link webCrawler + + --- + :img-top: /images/ray_logo.png + + .. link-button:: https://www.anyscale.com/blog/how-to-fine-tune-and-serve-llms-simply-quickly-and-cost-effectively-using + :type: url + :text: [Blog] How to fine tune and serve LLMs simply, quickly and cost effectively using Ray + DeepSpeed + HuggingFace + :classes: btn-link btn-block stretched-link webCrawler + + --- + :img-top: /images/ray_logo.png + + .. link-button:: https://www.businessinsider.com/openai-chatgpt-trained-on-anyscale-ray-generative-lifelike-ai-models-2022-12 + :type: url + :text: [Blog] How OpenAI Uses Ray to Train Tools like ChatGPT + :classes: btn-link btn-block stretched-link chatgpt + --- + :img-top: /images/ray_logo.png + + .. link-button:: /ray-air/examples/gptj_deepspeed_fine_tuning + :type: ref + :text: [Example] GPT-J-6B Fine-Tuning with Ray AIR and DeepSpeed + :classes: btn-link btn-block stretched-link antServing + + --- + :img-top: /images/ray_logo.png + + .. link-button:: /ray-air/examples/dreambooth_finetuning + :type: ref + :text: [Example] Fine-tuning DreamBooth with Ray AIR + :classes: btn-link btn-block stretched-link rayForward + + --- + :img-top: /images/ray_logo.png + + .. link-button:: /ray-air/examples/stablediffusion_batch_prediction + :type: ref + :text: [Example] Stable Diffusion Batch Prediction with Ray AIR + :classes: btn-link btn-block stretched-link rayForward + + --- + :img-top: /images/ray_logo.png + + .. link-button:: /ray-air/examples/gptj_serving + :type: ref + :text: [Example] GPT-J-6B Serving with Ray AIR + :classes: btn-link btn-block stretched-link webCrawler + .. _ref-use-cases-batch-infer: Batch Inference @@ -530,13 +613,6 @@ The following highlights feature projects leveraging Ray Core's distributed APIs --- :img-top: /images/ray_logo.png - .. link-button:: https://www.businessinsider.com/openai-chatgpt-trained-on-anyscale-ray-generative-lifelike-ai-models-2022-12 - :type: url - :text: [Blog] How OpenAI Uses Ray to Train Tools like ChatGPT - :classes: btn-link btn-block stretched-link chatgpt - --- - :img-top: /images/ray_logo.png - .. link-button:: https://www.anyscale.com/blog/building-highly-available-and-scalable-online-applications-on-ray-at-ant :type: url :text: [Blog] Highly Available and Scalable Online Applications on Ray at Ant Group @@ -564,4 +640,4 @@ The following highlights feature projects leveraging Ray Core's distributed APIs .. link-button:: /ray-core/examples/web-crawler :type: ref :text: [Example] Speed up your web crawler by parallelizing it with Ray - :classes: btn-link btn-block stretched-link webCrawler + :classes: btn-link btn-block stretched-link webCrawler \ No newline at end of file From b62151a9dc2d3e18a514f3996e19ba0be85fe20b Mon Sep 17 00:00:00 2001 From: Alan Guo Date: Thu, 20 Apr 2023 01:51:25 -0600 Subject: [PATCH 024/424] Add driver logs to Jobs page for submission jobs (#34514) Add driver logs to Jobs page for submission jobs Adds a refresh button to the log viewer to reload the logs. Refactors the log viewer from the logs page into its own component Updates the look and feel of the jobs page to match the new IA style. Adds User-provided metadata to the job detail page. (fixes [Core|Dashboard] Support custom tags for jobs. #34187 ) Updates the table icon Change "Tasks" to "Tasks/actor overview" Adds Node Count Card next to ray status cards --- .../CodeDialogButton/CodeDialogButton.tsx | 10 +- dashboard/client/src/common/Section.tsx | 46 +++ .../MetadataSection/MetadataSection.tsx | 16 +- dashboard/client/src/pages/job/JobDetail.tsx | 266 ++++++++---------- .../src/pages/job/JobDetailActorPage.tsx | 7 +- .../src/pages/job/JobDetailInfoPage.tsx | 186 +++++++----- .../job/JobDriverLogs.component.test.tsx | 47 ++++ .../client/src/pages/job/JobDriverLogs.tsx | 78 +++++ .../client/src/pages/job/TaskTimeline.tsx | 5 +- .../client/src/pages/job/hook/useJobDetail.ts | 6 +- dashboard/client/src/pages/log/LogViewer.tsx | 195 +++++++++++++ dashboard/client/src/pages/log/Logs.tsx | 183 +----------- .../client/src/pages/node/ClusterLayout.tsx | 9 +- dashboard/client/src/theme.ts | 6 +- ...rve_deployment_grafana_dashboard_base.json | 1 + 15 files changed, 639 insertions(+), 422 deletions(-) create mode 100644 dashboard/client/src/common/Section.tsx create mode 100644 dashboard/client/src/pages/job/JobDriverLogs.component.test.tsx create mode 100644 dashboard/client/src/pages/job/JobDriverLogs.tsx create mode 100644 dashboard/client/src/pages/log/LogViewer.tsx diff --git a/dashboard/client/src/common/CodeDialogButton/CodeDialogButton.tsx b/dashboard/client/src/common/CodeDialogButton/CodeDialogButton.tsx index 711616fa1092..0aea9c9f4d61 100644 --- a/dashboard/client/src/common/CodeDialogButton/CodeDialogButton.tsx +++ b/dashboard/client/src/common/CodeDialogButton/CodeDialogButton.tsx @@ -5,9 +5,11 @@ import { makeStyles, Typography, } from "@material-ui/core"; +import classNames from "classnames"; import yaml from "js-yaml"; import React, { useState } from "react"; import DialogWithTitle from "../DialogWithTitle"; +import { ClassNameProps } from "../props"; const useStyles = makeStyles((theme) => createStyles({ @@ -15,7 +17,8 @@ const useStyles = makeStyles((theme) => whiteSpace: "pre", fontFamily: "SFMono-Regular,Consolas,Liberation Mono,Menlo,monospace", padding: theme.spacing(2), - // borderRadius: theme.spacing(1), + overflow: "scroll", + maxHeight: 600, }, }), ); @@ -92,13 +95,14 @@ const useCodeDialogButtonWithPreviewStyles = makeStyles((theme) => }), ); -type CodeDialogButtonWithPreviewProps = CodeDialogButtonProps; +type CodeDialogButtonWithPreviewProps = CodeDialogButtonProps & ClassNameProps; /** * Similar to CodeDialogButton but also shows a snippet of the expanded text next to the button. */ export const CodeDialogButtonWithPreview = ({ code, buttonText, + className, ...props }: CodeDialogButtonWithPreviewProps) => { const classes = useCodeDialogButtonWithPreviewStyles(); @@ -109,7 +113,7 @@ export const CodeDialogButtonWithPreview = ({ const buttonTextToPass = buttonText ?? "Expand"; return ( -
    +
    {codeText} + createStyles({ + contentContainer: { + padding: theme.spacing(2), + height: "100%", + }, + }), +); + +type SectionProps = { + title?: string; +} & ClassNameProps & + BoxProps; + +export const Section = ({ + title, + children, + className, + ...props +}: PropsWithChildren) => { + const classes = useStyles(); + + return ( + + {title && ( + + {title} + + )} + + {children} + + + ); +}; diff --git a/dashboard/client/src/components/MetadataSection/MetadataSection.tsx b/dashboard/client/src/components/MetadataSection/MetadataSection.tsx index eb2c8af78b50..91c73dc7723d 100644 --- a/dashboard/client/src/components/MetadataSection/MetadataSection.tsx +++ b/dashboard/client/src/components/MetadataSection/MetadataSection.tsx @@ -4,7 +4,6 @@ import { IconButton, Link, makeStyles, - Paper, Tooltip, Typography, } from "@material-ui/core"; @@ -12,6 +11,7 @@ import copy from "copy-to-clipboard"; import React, { useState } from "react"; import { RiFileCopyLine } from "react-icons/ri"; import { Link as RouterLink } from "react-router-dom"; +import { Section } from "../../common/Section"; import { HelpInfo } from "../Tooltip"; export type StringOnlyMetadataContent = { @@ -55,7 +55,6 @@ const useStyles = makeStyles((theme) => gridTemplateColumns: "repeat(3, minmax(0, 1fr))", rowGap: theme.spacing(1), columnGap: theme.spacing(4), - padding: theme.spacing(2), }, label: { color: theme.palette.text.secondary, @@ -193,15 +192,8 @@ export const MetadataSection = ({ metadataList: Metadata[]; }) => { return ( - - {header && ( - - {header} - - )} - - - - +
    + +
    ); }; diff --git a/dashboard/client/src/pages/job/JobDetail.tsx b/dashboard/client/src/pages/job/JobDetail.tsx index e3721403dbc7..3e630c5e1c6f 100644 --- a/dashboard/client/src/pages/job/JobDetail.tsx +++ b/dashboard/client/src/pages/job/JobDetail.tsx @@ -1,31 +1,41 @@ -import { Box, Grid, makeStyles, Typography } from "@material-ui/core"; +import { Box, makeStyles, Typography } from "@material-ui/core"; import React, { useContext, useRef, useState } from "react"; import { Link } from "react-router-dom"; import { GlobalContext } from "../../App"; import { CollapsibleSection } from "../../common/CollapsibleSection"; -import { DurationText } from "../../common/DurationText"; -import { formatDateFromTimeMs } from "../../common/formatUtils"; -import { - CpuProfilingLink, - CpuStackTraceLink, -} from "../../common/ProfilingLink"; +import { Section } from "../../common/Section"; import Loading from "../../components/Loading"; -import { MetadataSection } from "../../components/MetadataSection"; import { StatusChip } from "../../components/StatusChip"; import TitleCard from "../../components/TitleCard"; import { NestedJobProgressLink, UnifiedJob } from "../../type/job"; import ActorList from "../actor/ActorList"; +import { NodeCountCard } from "../overview/cards/NodeCountCard"; import PlacementGroupList from "../state/PlacementGroup"; import TaskList from "../state/task"; import { useRayStatus } from "./hook/useClusterStatus"; import { useJobDetail } from "./hook/useJobDetail"; +import { JobMetadataSection } from "./JobDetailInfoPage"; +import { JobDriverLogs } from "./JobDriverLogs"; import { JobProgressBar } from "./JobProgressBar"; import { TaskTimeline } from "./TaskTimeline"; const useStyle = makeStyles((theme) => ({ root: { padding: theme.spacing(2), + backgroundColor: "white", + }, + section: { + marginBottom: theme.spacing(4), + }, + autoscalerSection: { + flexWrap: "wrap", + [theme.breakpoints.up("md")]: { + flexWrap: "nowrap", + }, + }, + nodeCountCard: { + flex: "1 0 500px", }, })); @@ -71,23 +81,21 @@ export const JobDetailChartsPage = () => { return (
    - - {title} - + + {title} + {cluster_status_rows.map((i, key) => { // Format the output. // See format_info_string in util.py - if (i.startsWith("-----") || i.startsWith("=====")) { - // Separator - return
    ; + if (i.startsWith("-----") || i.startsWith("=====") || i === "") { + // Ignore separators + return null; } else if (i.endsWith(":")) { return (
    {i}
    ); - } else if (i === "") { - return
    ; } else { return
    {i}
    ; } @@ -145,174 +153,132 @@ export const JobDetailChartsPage = () => { return (
    - - , - }, - { - label: "Job ID", - content: job.job_id - ? { - value: job.job_id, - copyableValue: job.job_id, - } - : { value: "-" }, - }, - { - label: "Submission ID", - content: job.submission_id - ? { - value: job.submission_id, - copyableValue: job.submission_id, - } - : { - value: "-", - }, - }, - { - label: "Duration", - content: job.start_time ? ( - - ) : ( - - - ), - }, - { - label: "Started at", - content: { - value: job.start_time - ? formatDateFromTimeMs(job.start_time) - : "-", - }, - }, - { - label: "Ended at", - content: { - value: job.end_time ? formatDateFromTimeMs(job.end_time) : "-", - }, - }, - { - label: "Actions", - content: ( -
    - -
    - -
    - -
    - ), - }, - ]} - /> -
    - - - - - - - - - + + + +
    + +
    +
    + + {job.type === "SUBMISSION" && ( + +
    + +
    +
    + )} + + +
    + +
    +
    + + + + +
    {cluster_status?.data ? formatNodeStatus(cluster_status?.data.clusterStatus) : "No cluster status."} - - - - +
    +
    {cluster_status?.data ? formatResourcesStatus(cluster_status?.data.clusterStatus) : "No cluster status."} - - - - - { - setTaskTableExpanded(!taskTableExpanded); - }} - > +
    +
    +
    + + { + setTaskTableExpanded(!taskTableExpanded); + }} + className={classes.section} + > +
    - - - - { - setActorTableExpanded(!actorTableExpanded); - }} - > +
    +
    + + { + setActorTableExpanded(!actorTableExpanded); + }} + className={classes.section} + > +
    - - - - +
    +
    + + +
    - - +
    +
    ); }; diff --git a/dashboard/client/src/pages/job/JobDetailActorPage.tsx b/dashboard/client/src/pages/job/JobDetailActorPage.tsx index b31d653ca3ed..2a8ab3d9e1a6 100644 --- a/dashboard/client/src/pages/job/JobDetailActorPage.tsx +++ b/dashboard/client/src/pages/job/JobDetailActorPage.tsx @@ -1,7 +1,7 @@ import { makeStyles } from "@material-ui/core"; import React from "react"; -import TitleCard from "../../components/TitleCard"; +import { Section } from "../../common/Section"; import ActorList from "../actor/ActorList"; import { MainNavPageInfo } from "../layout/mainNavContext"; import { useJobDetail } from "./hook/useJobDetail"; @@ -9,6 +9,7 @@ import { useJobDetail } from "./hook/useJobDetail"; const useStyle = makeStyles((theme) => ({ root: { padding: theme.spacing(2), + backgroundColor: "white", }, })); @@ -31,9 +32,9 @@ export const JobDetailActorsPage = () => { return (
    - +
    - +
    ); }; diff --git a/dashboard/client/src/pages/job/JobDetailInfoPage.tsx b/dashboard/client/src/pages/job/JobDetailInfoPage.tsx index b8345e8d44e4..2d69d75d0d11 100644 --- a/dashboard/client/src/pages/job/JobDetailInfoPage.tsx +++ b/dashboard/client/src/pages/job/JobDetailInfoPage.tsx @@ -1,18 +1,26 @@ -import { makeStyles } from "@material-ui/core"; +import { createStyles, makeStyles, Typography } from "@material-ui/core"; import React from "react"; +import { CodeDialogButtonWithPreview } from "../../common/CodeDialogButton"; import { DurationText } from "../../common/DurationText"; import { formatDateFromTimeMs } from "../../common/formatUtils"; +import { + CpuProfilingLink, + CpuStackTraceLink, +} from "../../common/ProfilingLink"; import Loading from "../../components/Loading"; import { MetadataSection } from "../../components/MetadataSection"; import { StatusChip } from "../../components/StatusChip"; import TitleCard from "../../components/TitleCard"; +import { UnifiedJob } from "../../type/job"; import { MainNavPageInfo } from "../layout/mainNavContext"; import { useJobDetail } from "./hook/useJobDetail"; +import { JobLogsLink } from "./JobDetail"; const useStyle = makeStyles((theme) => ({ root: { padding: theme.spacing(2), + backgroundColor: "white", }, })); @@ -51,70 +59,120 @@ export const JobDetailInfoPage = () => { path: job.job_id ? `/jobs/${job.job_id}/info` : undefined, }} /> - - , - }, - { - label: "Job ID", - content: job.job_id - ? { - value: job.job_id, - copyableValue: job.job_id, - } - : { value: "-" }, - }, - { - label: "Submission ID", - content: job.submission_id - ? { - value: job.submission_id, - copyableValue: job.submission_id, - } - : { - value: "-", - }, - }, - { - label: "Duration", - content: job.start_time ? ( - - ) : ( - - - ), - }, - { - label: "Started at", - content: { - value: job.start_time - ? formatDateFromTimeMs(job.start_time) - : "-", + {job.job_id} + +
    + ); +}; + +const useJobMetadataSectionStyles = makeStyles((theme) => + createStyles({ + metadataButton: { + display: "inline-flex", + maxWidth: "100%", + }, + }), +); + +type JobMetadataSectionProps = { + job: UnifiedJob; +}; + +export const JobMetadataSection = ({ job }: JobMetadataSectionProps) => { + const classes = useJobMetadataSectionStyles(); + + return ( + , + }, + { + label: "Job ID", + content: job.job_id + ? { + value: job.job_id, + copyableValue: job.job_id, + } + : { value: "-" }, + }, + { + label: "Submission ID", + content: job.submission_id + ? { + value: job.submission_id, + copyableValue: job.submission_id, + } + : { + value: "-", }, - }, - { - label: "Ended at", - content: { - value: job.end_time ? formatDateFromTimeMs(job.end_time) : "-", + }, + { + label: "Duration", + content: job.start_time ? ( + + ) : ( + - + ), + }, + { + label: "Started at", + content: { + value: job.start_time ? formatDateFromTimeMs(job.start_time) : "-", + }, + }, + { + label: "Ended at", + content: { + value: job.end_time ? formatDateFromTimeMs(job.end_time) : "-", + }, + }, + ...(job.type === "SUBMISSION" + ? [ + { + label: "User-provided metadata", + content: + job.metadata && Object.keys(job.metadata).length ? ( + + ) : undefined, }, - }, - ]} - /> - -
    + ] + : []), + { + label: "Actions", + content: ( +
    + +
    + +
    + +
    + ), + }, + ]} + /> ); }; diff --git a/dashboard/client/src/pages/job/JobDriverLogs.component.test.tsx b/dashboard/client/src/pages/job/JobDriverLogs.component.test.tsx new file mode 100644 index 000000000000..1c526b1caccf --- /dev/null +++ b/dashboard/client/src/pages/job/JobDriverLogs.component.test.tsx @@ -0,0 +1,47 @@ +import { render, screen } from "@testing-library/react"; +import React from "react"; +import { get } from "../../service/requestHandlers"; +import { JobDriverLogs } from "./JobDriverLogs"; + +jest.mock("../../service/requestHandlers"); + +const mockedGet = jest.mocked(get); + +describe("JobDriverLogs", () => { + it("renders", async () => { + expect.assertions(6); + + mockedGet.mockResolvedValue({ + headers: { + "content-type": "text/plain", + }, + data: "log line\nthis is a line\nHi\n10\nfoo", + }); + + render( + , + ); + + await screen.findByText(/log line/); + expect(screen.getByText(/log line/)).toBeVisible(); + expect(screen.getByText(/this is a line/)).toBeVisible(); + expect(screen.getByText(/Hi/)).toBeVisible(); + expect(screen.getByText(/10/)).toBeVisible(); + expect(screen.getByText(/foo/)).toBeVisible(); + + expect(mockedGet).toBeCalledWith( + "log_proxy?url=http%3A%2F%2F127.0.0.1%3A52365%2Flogs%2Fjob-driver-raysubmit_12345.log", + ); + }); +}); diff --git a/dashboard/client/src/pages/job/JobDriverLogs.tsx b/dashboard/client/src/pages/job/JobDriverLogs.tsx new file mode 100644 index 000000000000..5d45b03bf1bb --- /dev/null +++ b/dashboard/client/src/pages/job/JobDriverLogs.tsx @@ -0,0 +1,78 @@ +import { Typography } from "@material-ui/core"; +import React, { useContext } from "react"; +import useSWR from "swr"; +import { GlobalContext } from "../../App"; +import { getLogDetail, getLogDownloadUrl } from "../../service/log"; +import { UnifiedJob } from "../../type/job"; +import { LogViewer } from "../log/LogViewer"; + +const useDriverLogs = ( + job: Pick< + UnifiedJob, + "driver_agent_http_address" | "driver_info" | "submission_id" + >, +) => { + const { ipLogMap } = useContext(GlobalContext); + const { driver_agent_http_address, driver_info, submission_id } = job; + const host = (() => { + if (driver_agent_http_address) { + return `${driver_agent_http_address}/logs/`; + } else if (driver_info && ipLogMap[driver_info.node_ip_address]) { + return `${ipLogMap[driver_info.node_ip_address]}/`; + } + })(); + const path = `job-driver-${submission_id}.log`; + + const url = host ? `${host}${path}` : undefined; + const downloadUrl = url ? getLogDownloadUrl(url) : undefined; + + const { + data: log, + isLoading, + mutate, + } = useSWR(url ? ["useDriverLogs", url] : null, async ([_, url]) => + getLogDetail(url) + .then((res) => { + if (res) { + return res; + } else { + return "(This file is empty.)"; + } + }) + .catch(() => { + return "(Failed to load)"; + }), + ); + + return { + log: isLoading ? "Loading..." : log, + downloadUrl, + refresh: mutate, + host, + path, + }; +}; + +type JobDriverLogsProps = { + job: Pick< + UnifiedJob, + "driver_agent_http_address" | "driver_info" | "submission_id" + >; +}; + +export const JobDriverLogs = ({ job }: JobDriverLogsProps) => { + const { downloadUrl, log, path, refresh } = useDriverLogs(job); + return typeof log === "string" ? ( + { + refresh(); + }} + /> + ) : ( + Failed to load + ); +}; diff --git a/dashboard/client/src/pages/job/TaskTimeline.tsx b/dashboard/client/src/pages/job/TaskTimeline.tsx index 50e7fd2b3357..a3129636cb3f 100644 --- a/dashboard/client/src/pages/job/TaskTimeline.tsx +++ b/dashboard/client/src/pages/job/TaskTimeline.tsx @@ -10,9 +10,6 @@ import { ClassNameProps } from "../../common/props"; import { downloadTaskTimelineHref } from "../../service/task"; const useStyle = makeStyles((theme) => ({ - root: { - padding: theme.spacing(2, 0, 0), - }, button: { marginTop: theme.spacing(2), }, @@ -26,7 +23,7 @@ export const TaskTimeline = ({ jobId }: TaskTimelineProps) => { const classes = useStyle(); return ( -
    +
    {/* TODO(aguo): Add link to external documentation about Timeline view. */} Timeline view shows how tasks are executed across different nodes and diff --git a/dashboard/client/src/pages/job/hook/useJobDetail.ts b/dashboard/client/src/pages/job/hook/useJobDetail.ts index d473eb3cba31..9c31ab4ad1a0 100644 --- a/dashboard/client/src/pages/job/hook/useJobDetail.ts +++ b/dashboard/client/src/pages/job/hook/useJobDetail.ts @@ -11,10 +11,10 @@ export const useJobDetail = () => { const [refreshing, setRefresh] = useState(true); const { ipLogMap } = useContext(GlobalContext); const { data: job, isLoading } = useSWR( - "useJobDetail", - async () => { + ["useJobDetail", params.id], + async ([_, jobId]) => { try { - const rsp = await getJobDetail(params.id); + const rsp = await getJobDetail(jobId); return rsp.data; } catch (e) { setMsg("Job Query Error Please Check JobId"); diff --git a/dashboard/client/src/pages/log/LogViewer.tsx b/dashboard/client/src/pages/log/LogViewer.tsx new file mode 100644 index 000000000000..cb155b9417c7 --- /dev/null +++ b/dashboard/client/src/pages/log/LogViewer.tsx @@ -0,0 +1,195 @@ +import { + Button, + createStyles, + InputAdornment, + LinearProgress, + makeStyles, + Switch, + TextField, +} from "@material-ui/core"; +import { SearchOutlined } from "@material-ui/icons"; +import React, { useState } from "react"; +import LogVirtualView from "../../components/LogView/LogVirtualView"; + +const useStyles = makeStyles((theme) => + createStyles({ + search: { + margin: theme.spacing(1), + }, + }), +); + +const useLogViewer = () => { + const [search, setSearch] = + useState<{ + keywords?: string; + lineNumber?: string; + fontSize?: number; + revert?: boolean; + }>(); + const [startTime, setStart] = useState(); + const [endTime, setEnd] = useState(); + + return { + search, + setSearch, + startTime, + setStart, + endTime, + setEnd, + }; +}; + +type LogViewerProps = { + path?: string; + log: string; + downloadUrl?: string; + onRefreshClick?: () => void; + height?: number; +}; + +export const LogViewer = ({ + path, + log, + downloadUrl, + onRefreshClick, + height = 600, +}: LogViewerProps) => { + const classes = useStyles(); + + const { search, setSearch, startTime, setStart, endTime, setEnd } = + useLogViewer(); + + return ( + + {log !== "Loading..." && ( +
    +
    + { + setSearch({ ...search, keywords: value }); + }, + type: "", + endAdornment: ( + + + + ), + }} + /> + { + setSearch({ ...search, lineNumber: value }); + }, + type: "", + endAdornment: ( + + + + ), + }} + /> + { + setSearch({ ...search, fontSize: Number(value) }); + }, + type: "", + }} + /> + { + setStart(val.target.value); + }} + InputLabelProps={{ + shrink: true, + }} + /> + { + setEnd(val.target.value); + }} + InputLabelProps={{ + shrink: true, + }} + /> +
    + Reverse:{" "} + setSearch({ ...search, revert: v })} + /> + {onRefreshClick && ( + + )} + + {downloadUrl && path && ( + + )} +
    +
    + +
    + )} + {log === "Loading..." && ( +
    +
    + +
    + )} +
    + ); +}; diff --git a/dashboard/client/src/pages/log/Logs.tsx b/dashboard/client/src/pages/log/Logs.tsx index a44253595a51..61eb93468840 100644 --- a/dashboard/client/src/pages/log/Logs.tsx +++ b/dashboard/client/src/pages/log/Logs.tsx @@ -1,22 +1,11 @@ -import { - Button, - InputAdornment, - LinearProgress, - List, - ListItem, - makeStyles, - Paper, - Switch, - TextField, -} from "@material-ui/core"; -import { SearchOutlined } from "@material-ui/icons"; -import React, { useEffect, useRef, useState } from "react"; +import { Button, List, ListItem, makeStyles, Paper } from "@material-ui/core"; +import React, { useEffect, useState } from "react"; import { Outlet, useLocation, useParams } from "react-router-dom"; -import LogVirtualView from "../../components/LogView/LogVirtualView"; import { SearchInput } from "../../components/SearchComponent"; import TitleCard from "../../components/TitleCard"; import { getLogDetail, getLogDownloadUrl } from "../../service/log"; import { MainNavPageInfo } from "../layout/mainNavContext"; +import { LogViewer } from "./LogViewer"; const useStyles = makeStyles((theme) => ({ root: { @@ -36,30 +25,16 @@ const useStyles = makeStyles((theme) => ({ }, })); -type LogsProps = { - theme?: "dark" | "light"; -}; - -const useLogs = ({ theme }: LogsProps) => { +const useLogs = () => { const { search: urlSearch } = useLocation(); const { host, path } = useParams(); const searchMap = new URLSearchParams(urlSearch); const urlFileName = searchMap.get("fileName"); - const el = useRef(null); const [origin, setOrigin] = useState(); - const [search, setSearch] = - useState<{ - keywords?: string; - lineNumber?: string; - fontSize?: number; - revert?: boolean; - }>(); const [fileName, setFileName] = useState(searchMap.get("fileName") || ""); const [log, setLogs] = useState(); const [downloadUrl, setDownloadUrl] = useState(); - const [startTime, setStart] = useState(); - const [endTime, setEnd] = useState(); useEffect(() => { setFileName(urlFileName || ""); @@ -97,37 +72,14 @@ const useLogs = ({ theme }: LogsProps) => { downloadUrl, host, path, - el, - search, - setSearch, - theme, fileName, setFileName, - startTime, - setStart, - endTime, - setEnd, }; }; -const Logs = (props: LogsProps) => { +const Logs = () => { const classes = useStyles(); - const { - log, - origin, - downloadUrl, - path, - el, - search, - setSearch, - theme, - fileName, - setFileName, - startTime, - setStart, - endTime, - setEnd, - } = useLogs(props); + const { log, origin, downloadUrl, path, fileName, setFileName } = useLogs(); let href = "#/logs/"; if (origin) { @@ -142,7 +94,7 @@ const Logs = (props: LogsProps) => { } } return ( -
    +
    {!origin &&

    Select a node to view logs

    } @@ -191,125 +143,8 @@ const Logs = (props: LogsProps) => { ))} )} - {typeof log === "string" && log !== "Loading..." && ( -
    -
    - { - setSearch({ ...search, keywords: value }); - }, - type: "", - endAdornment: ( - - - - ), - }} - /> - { - setSearch({ ...search, lineNumber: value }); - }, - type: "", - endAdornment: ( - - - - ), - }} - /> - { - setSearch({ ...search, fontSize: Number(value) }); - }, - type: "", - }} - /> - { - setStart(val.target.value); - }} - InputLabelProps={{ - shrink: true, - }} - /> - { - setEnd(val.target.value); - }} - InputLabelProps={{ - shrink: true, - }} - /> -
    - Reverse:{" "} - setSearch({ ...search, revert: v })} - /> - - {downloadUrl && path && ( - - )} -
    -
    - -
    - )} - {log === "Loading..." && ( -
    -
    - -
    + {typeof log === "string" && ( + )}
    diff --git a/dashboard/client/src/pages/node/ClusterLayout.tsx b/dashboard/client/src/pages/node/ClusterLayout.tsx index c74f6d7a3909..16a02d05eb97 100644 --- a/dashboard/client/src/pages/node/ClusterLayout.tsx +++ b/dashboard/client/src/pages/node/ClusterLayout.tsx @@ -1,17 +1,12 @@ import React from "react"; -import { RiInformationLine, RiTableAltLine } from "react-icons/ri"; +import { RiInformationLine, RiTableLine } from "react-icons/ri"; import { SideTabLayout, SideTabRouteLink } from "../layout/SideTabLayout"; export const ClusterLayout = () => { return ( - + ); }; diff --git a/dashboard/client/src/theme.ts b/dashboard/client/src/theme.ts index 6259f9b65b22..a3ec06564f61 100644 --- a/dashboard/client/src/theme.ts +++ b/dashboard/client/src/theme.ts @@ -17,10 +17,12 @@ const basicTheme: ThemeOptions = { '"Segoe UI Symbol"', ].join(","), h1: { - fontSize: "2rem", + fontSize: "1.5rem", + fontWeight: 500, }, h2: { - fontSize: "1.5rem", + fontSize: "1.25rem", + fontWeight: 500, }, h3: { fontSize: "1rem", diff --git a/dashboard/modules/metrics/dashboards/serve_deployment_grafana_dashboard_base.json b/dashboard/modules/metrics/dashboards/serve_deployment_grafana_dashboard_base.json index c66c41e5d50e..af9c611e8867 100644 --- a/dashboard/modules/metrics/dashboards/serve_deployment_grafana_dashboard_base.json +++ b/dashboard/modules/metrics/dashboards/serve_deployment_grafana_dashboard_base.json @@ -131,6 +131,7 @@ } ] }, + "rayMeta": ["excludesSystemRoutes"], "time": { "from": "now-30m", "to": "now" From db3795925d976860d7c241479a07df6a3f0e2f43 Mon Sep 17 00:00:00 2001 From: Yunxuan Xiao Date: Thu, 20 Apr 2023 01:13:51 -0700 Subject: [PATCH 025/424] [air/Doc] Fix unused config building function in lightning MNIST example. The build_lightning_config_from_existing_code() is not called in the example, and there is a duplicated config building logic below. This PR use this function and remove the other one. Signed-off-by: woshiyyya --- .../lightning/lightning_mnist_example.ipynb | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/doc/source/train/examples/lightning/lightning_mnist_example.ipynb b/doc/source/train/examples/lightning/lightning_mnist_example.ipynb index 4f2e534d03d5..dc41c625d122 100644 --- a/doc/source/train/examples/lightning/lightning_mnist_example.ipynb +++ b/doc/source/train/examples/lightning/lightning_mnist_example.ipynb @@ -263,19 +263,7 @@ "metadata": {}, "outputs": [], "source": [ - "lightning_config = (\n", - " LightningConfigBuilder()\n", - " .module(MNISTClassifier, lr=1e-3, feature_dim=128)\n", - " .trainer(\n", - " max_epochs=10,\n", - " accelerator=\"cpu\",\n", - " log_every_n_steps=100,\n", - " logger=CSVLogger(\"logs\"),\n", - " )\n", - " .fit_params(datamodule=datamodule)\n", - " .checkpointing(monitor=\"val_accuracy\", mode=\"max\", save_top_k=3)\n", - " .build()\n", - ")" + "lightning_config = build_lightning_config_from_existing_code()" ] }, { From 53da33e945f2222f3e50272ea7198df406229e03 Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Thu, 20 Apr 2023 05:25:54 -0700 Subject: [PATCH 026/424] [core][state][nightly] Fix stress_test_state_api_scale (#34579) --- release/nightly_tests/stress_tests/test_state_api_scale.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release/nightly_tests/stress_tests/test_state_api_scale.py b/release/nightly_tests/stress_tests/test_state_api_scale.py index 9972fae1f6d1..f6ef89ecb0fc 100644 --- a/release/nightly_tests/stress_tests/test_state_api_scale.py +++ b/release/nightly_tests/stress_tests/test_state_api_scale.py @@ -226,7 +226,7 @@ def _split(a, n): list_objects, filters=[ ("reference_type", "=", "LOCAL_REFERENCE"), - ("type", "=", "Worker"), + ("type", "=", "WORKER"), ], key_suffix=f"{num_objects}", limit=STATE_LIST_LIMIT, From fa731d8cc24af8a5044b5fc0db0aec515dee8b87 Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Thu, 20 Apr 2023 07:44:20 -0700 Subject: [PATCH 027/424] [ci/release] Increase concurrency limit for gpu gce (#34578) We now have 100 T4 machines, so increase the limit. At peak, the this limit means that we will use: 84 + 44 + 2*8 + 32 = 96 machines Signed-off-by: Cuong Nguyen --- release/ray_release/buildkite/concurrency.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/release/ray_release/buildkite/concurrency.py b/release/ray_release/buildkite/concurrency.py index e414ec344d60..185580505d65 100644 --- a/release/ray_release/buildkite/concurrency.py +++ b/release/ray_release/buildkite/concurrency.py @@ -31,10 +31,10 @@ ] gce_gpu_cpu_to_concurrent_groups = [ - Condition(min_gpu=8, max_gpu=-1, min_cpu=0, max_cpu=-1, group="gpu-gce", limit=1), - Condition(min_gpu=4, max_gpu=-1, min_cpu=0, max_cpu=-1, group="gpu-gce", limit=1), - Condition(min_gpu=2, max_gpu=-1, min_cpu=0, max_cpu=-1, group="gpu-gce", limit=3), - Condition(min_gpu=1, max_gpu=-1, min_cpu=0, max_cpu=-1, group="gpu-gce", limit=4), + Condition(min_gpu=8, max_gpu=-1, min_cpu=0, max_cpu=-1, group="gpu-gce", limit=4), + Condition(min_gpu=4, max_gpu=-1, min_cpu=0, max_cpu=-1, group="gpu-gce", limit=8), + Condition(min_gpu=2, max_gpu=-1, min_cpu=0, max_cpu=-1, group="gpu-gce", limit=16), + Condition(min_gpu=1, max_gpu=-1, min_cpu=0, max_cpu=-1, group="gpu-gce", limit=32), Condition( min_gpu=0, max_gpu=0, min_cpu=1025, max_cpu=-1, group="enormous-gce", limit=1 ), From 2531871bd76c5c5b4c351560903395dd5142c40a Mon Sep 17 00:00:00 2001 From: Edward Oakes Date: Thu, 20 Apr 2023 10:04:56 -0500 Subject: [PATCH 028/424] [serve][nit] Fix formatting & verbiage for `serve shutdown` (#34585) Fixes unnecessary spaces & cleans up wording. Before: ``` (ray) eoakes@Edwards-MacBook-Pro-2 serve % serve shutdown This will shutdown the Serve application at address "http://localhost:52365" and delete all deployments there. Do you want to continue? [y/N]: y 2023-04-19 12:46:12,078 SUCC scripts.py:584 -- Sent delete request successfully! ``` After: ``` (ray) eoakes@Edwards-MacBook-Pro-2 serve % serve shutdown This will shut down Serve on the cluster at address "http://localhost:52365" and delete all applications there. Do you want to continue? [y/N]: y 2023-04-19 12:45:52,050 SUCC scripts.py:583 -- Sent shutdown request; applications will be deleted asynchronously. ``` --- python/ray/serve/scripts.py | 18 ++++++++---------- python/ray/serve/tests/test_cli.py | 10 +++++----- 2 files changed, 13 insertions(+), 15 deletions(-) diff --git a/python/ray/serve/scripts.py b/python/ray/serve/scripts.py index c0f381fca175..dddf4356040b 100644 --- a/python/ray/serve/scripts.py +++ b/python/ray/serve/scripts.py @@ -204,13 +204,11 @@ def deploy(config_file_name: str, address: str): # Error deploying application raise - cli_logger.newline() cli_logger.success( - "\nSent deploy request successfully!\n " - "* Use `serve status` to check deployments' statuses.\n " - "* Use `serve config` to see the current config(s).\n" + "\nSent deploy request successfully.\n " + "* Use `serve status` to check applications' statuses.\n " + "* Use `serve config` to see the current application config(s).\n" ) - cli_logger.newline() @cli.command( @@ -572,17 +570,17 @@ def status(address: str, name: Optional[str]): def shutdown(address: str, yes: bool): if not yes: click.confirm( - f"\nThis will shutdown the Serve application at address " - f'"{address}" and delete all deployments there. Do you ' + f"This will shut down Serve on the cluster at address " + f'"{address}" and delete all applications there. Do you ' "want to continue?", abort=True, ) ServeSubmissionClient(address).delete_application() - cli_logger.newline() - cli_logger.success("\nSent delete request successfully!\n") - cli_logger.newline() + cli_logger.success( + "Sent shutdown request; applications will be deleted asynchronously." + ) @cli.command( diff --git a/python/ray/serve/tests/test_cli.py b/python/ray/serve/tests/test_cli.py index 217e1e56512a..add6e13ef907 100644 --- a/python/ray/serve/tests/test_cli.py +++ b/python/ray/serve/tests/test_cli.py @@ -66,7 +66,7 @@ def test_deploy(ray_start_stop): os.path.dirname(__file__), "test_config_files", "arithmetic.yaml" ) - success_message_fragment = b"Sent deploy request successfully!" + success_message_fragment = b"Sent deploy request successfully." # Ensure the CLI is idempotent num_iterations = 2 @@ -136,7 +136,7 @@ def test_deploy_with_http_options(ray_start_stop): f2 = os.path.join( os.path.dirname(__file__), "test_config_files", "basic_graph.yaml" ) - success_message_fragment = b"Sent deploy request successfully!" + success_message_fragment = b"Sent deploy request successfully." with open(f1, "r") as config_file: config = yaml.safe_load(config_file) @@ -183,7 +183,7 @@ def test_deploy_multi_app(ray_start_stop): os.path.dirname(__file__), "test_config_files", "pizza_world.yaml" ) - success_message_fragment = b"Sent deploy request successfully!" + success_message_fragment = b"Sent deploy request successfully." # Ensure the CLI is idempotent num_iterations = 2 @@ -368,7 +368,7 @@ def test_config(ray_start_stop): config_file_name = os.path.join( os.path.dirname(__file__), "test_config_files", "basic_graph.yaml" ) - success_message_fragment = b"Sent deploy request successfully!" + success_message_fragment = b"Sent deploy request successfully." with open(config_file_name, "r") as config_file: config = yaml.safe_load(config_file) @@ -1030,7 +1030,7 @@ def test_idempotence_after_controller_death(ray_start_stop, use_command: bool): config_file_name = os.path.join( os.path.dirname(__file__), "test_config_files", "basic_graph.yaml" ) - success_message_fragment = b"Sent deploy request successfully!" + success_message_fragment = b"Sent deploy request successfully." deploy_response = subprocess.check_output(["serve", "deploy", config_file_name]) assert success_message_fragment in deploy_response From f6c559a1edf2db42ce6b7da88d41d6126efc0d71 Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Thu, 20 Apr 2023 16:22:43 +0100 Subject: [PATCH 029/424] [ci/release] GCE variants for remaining Tune tests (#34572) Signed-off-by: Kai Fricke --- python/ray/tune/utils/release_test_util.py | 5 +- ...te_config.yaml => compute_config_aws.yaml} | 0 .../frequent_pausing/compute_config_gce.yaml | 12 +++ release/release_tests.yaml | 77 +++++++++++++++++-- .../tune_tests/cloud_tests/tpl_gce_4x2.yaml | 17 ++++ .../fault_tolerance_tests/tpl_gce_16x1.yaml | 28 +++++++ .../scalability_tests/app_config.yaml | 2 + .../scalability_tests/app_config_data.yaml | 3 +- .../workloads/test_durable_trainable.py | 19 +++-- 9 files changed, 148 insertions(+), 15 deletions(-) rename release/air_tests/frequent_pausing/{compute_config.yaml => compute_config_aws.yaml} (100%) create mode 100644 release/air_tests/frequent_pausing/compute_config_gce.yaml create mode 100644 release/tune_tests/cloud_tests/tpl_gce_4x2.yaml create mode 100644 release/tune_tests/fault_tolerance_tests/tpl_gce_16x1.yaml diff --git a/python/ray/tune/utils/release_test_util.py b/python/ray/tune/utils/release_test_util.py index db7d848c12aa..5c6fdf943ee3 100644 --- a/python/ray/tune/utils/release_test_util.py +++ b/python/ray/tune/utils/release_test_util.py @@ -109,7 +109,10 @@ def timed_tune_run( durable = ( "storage_path" in tune_kwargs and tune_kwargs["storage_path"] - and tune_kwargs["storage_path"].startswith("s3://") + and ( + tune_kwargs["storage_path"].startswith("s3://") + or tune_kwargs["storage_path"].startswith("gs://") + ) ) sleep_time = 1.0 / results_per_second diff --git a/release/air_tests/frequent_pausing/compute_config.yaml b/release/air_tests/frequent_pausing/compute_config_aws.yaml similarity index 100% rename from release/air_tests/frequent_pausing/compute_config.yaml rename to release/air_tests/frequent_pausing/compute_config_aws.yaml diff --git a/release/air_tests/frequent_pausing/compute_config_gce.yaml b/release/air_tests/frequent_pausing/compute_config_gce.yaml new file mode 100644 index 000000000000..62833efbd455 --- /dev/null +++ b/release/air_tests/frequent_pausing/compute_config_gce.yaml @@ -0,0 +1,12 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west1 +allowed_azs: + - us-west1-b + +max_workers: 0 + +head_node_type: + name: head_node + instance_type: n1-standard-2 + +worker_node_types: [] diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 29f6843ce331..b809651bfd72 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -134,13 +134,22 @@ cluster: cluster_env: frequent_pausing/app_config.yaml - cluster_compute: frequent_pausing/compute_config.yaml + cluster_compute: frequent_pausing/compute_config_aws.yaml run: timeout: 600 # 10min long_running: true script: python frequent_pausing/script.py + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: frequent_pausing/app_config.yaml + cluster_compute: frequent_pausing/compute_config_gce.yaml + alert: default @@ -1734,7 +1743,7 @@ alert: tune_tests -- name: tune_cloud_aws_durable_upload_rllib_str +- name: tune_cloud_durable_upload_rllib_str group: Tune cloud tests working_dir: tune_tests/cloud_tests @@ -1756,10 +1765,25 @@ wait_for_nodes: num_nodes: 4 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config_ml.yaml + cluster_compute: tpl_gce_4x2.yaml + run: + timeout: 600 + script: python workloads/run_cloud_test.py durable_upload --trainable rllib_str + --bucket gs://tune-cloud-tests/durable_upload_rllib_str + wait_for_nodes: + num_nodes: 4 + alert: tune_tests -- name: tune_cloud_aws_durable_upload_rllib_trainer +- name: tune_cloud_durable_upload_rllib_trainer group: Tune cloud tests working_dir: tune_tests/cloud_tests @@ -1780,6 +1804,20 @@ wait_for_nodes: num_nodes: 4 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config_ml.yaml + cluster_compute: tpl_gce_4x2.yaml + run: + timeout: 600 + script: python workloads/run_cloud_test.py durable_upload --trainable rllib_str + --bucket gs://tune-cloud-tests/durable_upload_rllib_trainer + wait_for_nodes: + num_nodes: 4 alert: tune_tests @@ -1826,10 +1864,23 @@ run: timeout: 900 - script: python workloads/test_durable_trainable.py --bucket tune-cloud-tests + script: python workloads/test_durable_trainable.py --bucket s3://tune-cloud-tests/scalability_durable_trainable wait_for_nodes: num_nodes: 16 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + run: + timeout: 900 + script: python workloads/test_durable_trainable.py --bucket gs://tune-cloud-tests/scalability_durable_trainable + wait_for_nodes: + num_nodes: 16 + cluster: + cluster_env: app_config.yaml + cluster_compute: tpl_gce_16x2.yaml alert: tune_tests @@ -2016,11 +2067,27 @@ run: timeout: 5400 - script: python workloads/test_tune_worker_fault_tolerance.py + script: python workloads/test_tune_worker_fault_tolerance.py --bucket s3://tune-cloud-tests/worker_fault_tolerance wait_for_nodes: num_nodes: 16 +# Disabled until we can kill nodes in GCE +# variations: +# - __suffix__: aws +# - __suffix__: gce +# env: gce +# frequency: manual +# run: +# timeout: 5400 +# script: python workloads/test_tune_worker_fault_tolerance.py --bucket gs://tune-cloud-tests/worker_fault_tolerance +# +# wait_for_nodes: +# num_nodes: 16 +# cluster: +# cluster_env: app_config.yaml +# cluster_compute: tpl_gce_16x1.yaml + ######################## # Golden Notebook tests ######################## diff --git a/release/tune_tests/cloud_tests/tpl_gce_4x2.yaml b/release/tune_tests/cloud_tests/tpl_gce_4x2.yaml new file mode 100644 index 000000000000..c9e14cff5f0d --- /dev/null +++ b/release/tune_tests/cloud_tests/tpl_gce_4x2.yaml @@ -0,0 +1,17 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west1 +allowed_azs: + - us-west1-b + +max_workers: 3 + +head_node_type: + name: head_node + instance_type: n1-standard-2 + +worker_node_types: + - name: worker_node + instance_type: n1-standard-2 + min_workers: 3 + max_workers: 3 + use_spot: false diff --git a/release/tune_tests/fault_tolerance_tests/tpl_gce_16x1.yaml b/release/tune_tests/fault_tolerance_tests/tpl_gce_16x1.yaml new file mode 100644 index 000000000000..dbccfa496b2d --- /dev/null +++ b/release/tune_tests/fault_tolerance_tests/tpl_gce_16x1.yaml @@ -0,0 +1,28 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west1 +allowed_azs: + - us-west1-b + +max_workers: 16 + +head_node_type: + name: head_node + instance_type: n1-standard-2 + resources: + custom_resources: + head: 1 + +worker_node_types: + - name: worker_node + instance_type: n1-standard-2 + min_workers: 16 + max_workers: 16 + use_spot: true + +## Required to allow nodes to terminate themselves. +#aws: +# TagSpecifications: +# - ResourceType: "instance" +# Tags: +# - Key: chaos-test-name +# Value: 'tune-chaos-test' \ No newline at end of file diff --git a/release/tune_tests/scalability_tests/app_config.yaml b/release/tune_tests/scalability_tests/app_config.yaml index e552178aa270..7832627aba4d 100755 --- a/release/tune_tests/scalability_tests/app_config.yaml +++ b/release/tune_tests/scalability_tests/app_config.yaml @@ -7,9 +7,11 @@ python: pip_packages: - pytest - awscli + - gcsfs<=2022.7.1 - pyarrow>=6.0.1,<7.0.0 conda_packages: [] post_build_cmds: - pip3 uninstall -y ray || true && pip3 install -U {{ env["RAY_WHEELS"] | default("ray") }} + - pip3 install ray[tune] # Installing Tune dependency so we can get protobuf version back. - {{ env["RAY_WHEELS_SANITY_CHECK"] | default("echo No Ray wheels sanity check") }} diff --git a/release/tune_tests/scalability_tests/app_config_data.yaml b/release/tune_tests/scalability_tests/app_config_data.yaml index 6a18a57ba889..788d7a952d5a 100755 --- a/release/tune_tests/scalability_tests/app_config_data.yaml +++ b/release/tune_tests/scalability_tests/app_config_data.yaml @@ -7,13 +7,14 @@ python: pip_packages: - pytest - awscli + - gcsfs<=2022.7.1 - xgboost_ray # this will install protobuf version beyond the upper bound of what Tune allows - pyarrow>=6.0.1,<7.0.0 conda_packages: [] post_build_cmds: - pip3 uninstall -y ray || true && pip3 install -U {{ env["RAY_WHEELS"] | default("ray") }} - - pip3 install ray[tune] # Needed for Ray Client to work. Installing Tune dependency so we can get protobuf version back. + - pip3 install ray[tune] # Installing Tune dependency so we can get protobuf version back. - {{ env["RAY_WHEELS_SANITY_CHECK"] | default("echo No Ray wheels sanity check") }} - sudo mkdir -p /data || true - sudo chown ray:1000 /data || true diff --git a/release/tune_tests/scalability_tests/workloads/test_durable_trainable.py b/release/tune_tests/scalability_tests/workloads/test_durable_trainable.py index 1b293a3fddd0..1a07f6edf651 100644 --- a/release/tune_tests/scalability_tests/workloads/test_durable_trainable.py +++ b/release/tune_tests/scalability_tests/workloads/test_durable_trainable.py @@ -36,15 +36,18 @@ def main(bucket): os.environ[var] = str(y) else: print("No AWS secrets file found. Loading from boto.") - from boto3 import Session + try: + from boto3 import Session - session = Session() - credentials = session.get_credentials() - current_credentials = credentials.get_frozen_credentials() + session = Session() + credentials = session.get_credentials() + current_credentials = credentials.get_frozen_credentials() - os.environ["AWS_ACCESS_KEY_ID"] = current_credentials.access_key - os.environ["AWS_SECRET_ACCESS_KEY"] = current_credentials.secret_key - os.environ["AWS_SESSION_TOKEN"] = current_credentials.token + os.environ["AWS_ACCESS_KEY_ID"] = current_credentials.access_key + os.environ["AWS_SECRET_ACCESS_KEY"] = current_credentials.secret_key + os.environ["AWS_SESSION_TOKEN"] = current_credentials.token + except Exception: + print("Cannot setup AWS credentials (is this running on GCE?)") if all( os.getenv(k, "") @@ -76,7 +79,7 @@ def main(bucket): checkpoint_size_b=int(10 * 1000**2), # 10 MB keep_checkpoints_num=2, resources_per_trial={"cpu": 2}, - storage_path=f"s3://{bucket}/durable/", + storage_path=bucket, ) From b91659adb5362c9b96d84ae35989eb8681e0eaa9 Mon Sep 17 00:00:00 2001 From: Yunxuan Xiao Date: Thu, 20 Apr 2023 10:07:41 -0700 Subject: [PATCH 030/424] [Doc] Fix AIR benchmark configuration link failure. (#34597) --- doc/source/ray-air/benchmarks.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/source/ray-air/benchmarks.rst b/doc/source/ray-air/benchmarks.rst index b5e30f905c21..46a4b0d0f0c4 100644 --- a/doc/source/ray-air/benchmarks.rst +++ b/doc/source/ray-air/benchmarks.rst @@ -259,13 +259,13 @@ overhead of a few seconds for both methods that is negligible for longer trainin .. _`XGBoost Cluster Configuration`: https://github.com/ray-project/ray/blob/a241e6a0f5a630d6ed5b84cce30c51963834d15b/release/air_tests/air_benchmarks/xgboost_compute_tpl.yaml#L6-L24 .. _`GPU image batch prediction script`: https://github.com/ray-project/ray/blob/cec82a1ced631525a4d115e4dc0c283fa4275a7f/release/air_tests/air_benchmarks/workloads/gpu_batch_prediction.py#L18-L49 .. _`GPU image training script`: https://github.com/ray-project/ray/blob/cec82a1ced631525a4d115e4dc0c283fa4275a7f/release/air_tests/air_benchmarks/workloads/pytorch_training_e2e.py#L95-L106 -.. _`GPU prediction small cluster configuration`: https://github.com/ray-project/ray/blob/master/release/air_tests/air_benchmarks/compute_gpu_1_g4_8xl.yaml#L6-L15 -.. _`GPU prediction large cluster configuration`: https://github.com/ray-project/ray/blob/master/release/air_tests/air_benchmarks/compute_gpu_4_g4_12xl.yaml#L6-L15 -.. _`GPU training small cluster configuration`: https://github.com/ray-project/ray/blob/master/release/air_tests/air_benchmarks/compute_gpu_1.yaml#L6-L24 -.. _`GPU training large cluster configuration`: https://github.com/ray-project/ray/blob/master/release/air_tests/air_benchmarks/compute_gpu_16.yaml#L5-L25 +.. _`GPU prediction small cluster configuration`: https://github.com/ray-project/ray/blob/master/release/air_tests/air_benchmarks/compute_gpu_1_cpu_16_aws.yaml#L6-L15 +.. _`GPU prediction large cluster configuration`: https://github.com/ray-project/ray/blob/master/release/air_tests/air_benchmarks/compute_gpu_4x4_aws.yaml#L6-L15 +.. _`GPU training small cluster configuration`: https://github.com/ray-project/ray/blob/master/release/air_tests/air_benchmarks/compute_gpu_1_aws.yaml#L6-L24 +.. _`GPU training large cluster configuration`: https://github.com/ray-project/ray/blob/master/release/air_tests/air_benchmarks/compute_gpu_4x4_aws.yaml#L5-L25 .. _`Pytorch comparison training script`: https://github.com/ray-project/ray/blob/master/release/air_tests/air_benchmarks/workloads/torch_benchmark.py -.. _`Pytorch comparison CPU cluster configuration`: https://github.com/ray-project/ray/blob/master/release/air_tests/air_benchmarks/compute_cpu_4.yaml -.. _`Pytorch comparison GPU cluster configuration`: https://github.com/ray-project/ray/blob/master/release/air_tests/air_benchmarks/compute_gpu_4x4.yaml +.. _`Pytorch comparison CPU cluster configuration`: https://github.com/ray-project/ray/blob/master/release/air_tests/air_benchmarks/compute_cpu_4_aws.yaml +.. _`Pytorch comparison GPU cluster configuration`: https://github.com/ray-project/ray/blob/master/release/air_tests/air_benchmarks/compute_gpu_4x4_aws.yaml .. _`Tensorflow comparison training script`: https://github.com/ray-project/ray/blob/master/release/air_tests/air_benchmarks/workloads/tensorflow_benchmark.py -.. _`Tensorflow comparison CPU cluster configuration`: https://github.com/ray-project/ray/blob/master/release/air_tests/air_benchmarks/compute_cpu_4.yaml -.. _`Tensorflow comparison GPU cluster configuration`: https://github.com/ray-project/ray/blob/master/release/air_tests/air_benchmarks/compute_gpu_4x4.yaml +.. _`Tensorflow comparison CPU cluster configuration`: https://github.com/ray-project/ray/blob/master/release/air_tests/air_benchmarks/compute_cpu_4_aws.yaml +.. _`Tensorflow comparison GPU cluster configuration`: https://github.com/ray-project/ray/blob/master/release/air_tests/air_benchmarks/compute_gpu_4x4_aws.yaml From 87b4e631d4797c323b0e7c65bc3219d5609b9022 Mon Sep 17 00:00:00 2001 From: xwjiang2010 <87673679+xwjiang2010@users.noreply.github.com> Date: Thu, 20 Apr 2023 10:27:12 -0700 Subject: [PATCH 031/424] [air-output] print out worker ip for distributed train workers. (#33807) Signed-off-by: xwjiang2010 --- python/ray/train/_internal/backend_executor.py | 7 +++++++ python/ray/train/_internal/worker_group.py | 12 ++++++++++-- python/ray/train/tests/test_backend.py | 6 +++++- python/ray/train/tests/test_worker_group.py | 6 +++++- 4 files changed, 27 insertions(+), 4 deletions(-) diff --git a/python/ray/train/_internal/backend_executor.py b/python/ray/train/_internal/backend_executor.py index 749fc3dfe71b..62bd77454ecb 100644 --- a/python/ray/train/_internal/backend_executor.py +++ b/python/ray/train/_internal/backend_executor.py @@ -120,6 +120,13 @@ def start( # TODO remove if self._trial_info and self._trial_info.driver_ip: self.worker_group._move_workers_with_ip_to_front(self._trial_info.driver_ip) + + worker_locs = [ + f"{w.metadata.pid} ({w.metadata.node_ip})" + for w in self.worker_group.workers + ] + logger.info(f"Starting distributed worker processes: {worker_locs}") + try: if initialization_hook: self._initialization_hook = initialization_hook diff --git a/python/ray/train/_internal/worker_group.py b/python/ray/train/_internal/worker_group.py index ac0aab8ebf2e..36dd2326f7f4 100644 --- a/python/ray/train/_internal/worker_group.py +++ b/python/ray/train/_internal/worker_group.py @@ -1,4 +1,5 @@ import logging +import os import socket from dataclasses import dataclass from typing import Callable, List, TypeVar, Optional, Dict, Type, Tuple, Union @@ -42,13 +43,15 @@ class WorkerMetadata: node_id: ID of the node this worker is on. node_ip: IP address of the node this worker is on. hostname: Hostname that this worker is on. - gpu_ids (List[int]): List of CUDA IDs available to this worker. + gpu_ids: List of CUDA IDs available to this worker. + pid: Process ID of this worker. """ node_id: str node_ip: str hostname: str gpu_ids: Optional[List[str]] + pid: int @dataclass @@ -83,9 +86,14 @@ def construct_metadata() -> WorkerMetadata: node_ip = ray.util.get_node_ip_address() hostname = socket.gethostname() gpu_ids = [str(gpu_id) for gpu_id in ray.get_gpu_ids()] + pid = os.getpid() return WorkerMetadata( - node_id=node_id, node_ip=node_ip, hostname=hostname, gpu_ids=gpu_ids + node_id=node_id, + node_ip=node_ip, + hostname=hostname, + gpu_ids=gpu_ids, + pid=pid, ) diff --git a/python/ray/train/tests/test_backend.py b/python/ray/train/tests/test_backend.py index 1667dc8678a5..7508a8d51eb5 100644 --- a/python/ray/train/tests/test_backend.py +++ b/python/ray/train/tests/test_backend.py @@ -71,7 +71,11 @@ def mock_add_workers(self, num_workers): original_add_workers(self, num_workers) for i, worker in enumerate(self.workers): metadata = WorkerMetadata( - node_id=0, node_ip=str(i % 2), hostname=0, gpu_ids=[0] + node_id=0, + node_ip=str(i % 2), + hostname=0, + gpu_ids=[0], + pid=0, ) worker.metadata = metadata diff --git a/python/ray/train/tests/test_worker_group.py b/python/ray/train/tests/test_worker_group.py index e40be2ff16a0..06770c6e6af0 100644 --- a/python/ray/train/tests/test_worker_group.py +++ b/python/ray/train/tests/test_worker_group.py @@ -89,7 +89,11 @@ def test_move_workers_with_ip_to_front(ray_start_2_cpus): Worker( actor=None, metadata=WorkerMetadata( - node_id="dummy", node_ip=f"10.1.10.{i}", hostname="dummy", gpu_ids=None + node_id="dummy", + node_ip=f"10.1.10.{i}", + hostname="dummy", + gpu_ids=None, + pid=0, ), ) for i in range(1, 17) From cd084079c629e344cafa8a1081f78f383999a074 Mon Sep 17 00:00:00 2001 From: Jiajun Yao Date: Thu, 20 Apr 2023 10:38:24 -0700 Subject: [PATCH 032/424] Fix download_wheels.sh wheel urls (#34616) Some mac wheel urls are invalid Signed-off-by: Jiajun Yao --- release/util/download_wheels.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/release/util/download_wheels.sh b/release/util/download_wheels.sh index 40bf28320a57..a7113aeeccca 100755 --- a/release/util/download_wheels.sh +++ b/release/util/download_wheels.sh @@ -34,8 +34,8 @@ download_wheel "https://s3-us-west-2.amazonaws.com/ray-wheels/releases/$RAY_VERS download_wheel "https://s3-us-west-2.amazonaws.com/ray-wheels/releases/$RAY_VERSION/$RAY_HASH/ray-$RAY_VERSION-cp311-cp311-manylinux2014_aarch64.whl" # macOS. -echo "Downloading Ray core MacOS wheels (intel)" -download_wheel "https://s3-us-west-2.amazonaws.com/ray-wheels/releases/$RAY_VERSION/$RAY_HASH/ray-$RAY_VERSION-cp37-cp37m-macosx_10_15_intel.whl" +echo "Downloading Ray core MacOS wheels (x86_64)" +download_wheel "https://s3-us-west-2.amazonaws.com/ray-wheels/releases/$RAY_VERSION/$RAY_HASH/ray-$RAY_VERSION-cp37-cp37m-macosx_10_15_x86_64.whl" download_wheel "https://s3-us-west-2.amazonaws.com/ray-wheels/releases/$RAY_VERSION/$RAY_HASH/ray-$RAY_VERSION-cp38-cp38-macosx_10_15_x86_64.whl" download_wheel "https://s3-us-west-2.amazonaws.com/ray-wheels/releases/$RAY_VERSION/$RAY_HASH/ray-$RAY_VERSION-cp39-cp39-macosx_10_15_x86_64.whl" download_wheel "https://s3-us-west-2.amazonaws.com/ray-wheels/releases/$RAY_VERSION/$RAY_HASH/ray-$RAY_VERSION-cp310-cp310-macosx_10_15_universal2.whl" @@ -71,8 +71,8 @@ download_wheel "https://s3-us-west-2.amazonaws.com/ray-wheels/releases/$RAY_VERS download_wheel "https://s3-us-west-2.amazonaws.com/ray-wheels/releases/$RAY_VERSION/$RAY_HASH/ray_cpp-$RAY_VERSION-cp311-cp311-manylinux2014_aarch64.whl" # macOS CPP. -echo "Downloading Ray CPP MacOS wheels (intel)" -download_wheel "https://s3-us-west-2.amazonaws.com/ray-wheels/releases/$RAY_VERSION/$RAY_HASH/ray_cpp-$RAY_VERSION-cp37-cp37m-macosx_10_15_intel.whl" +echo "Downloading Ray CPP MacOS wheels (x86_64)" +download_wheel "https://s3-us-west-2.amazonaws.com/ray-wheels/releases/$RAY_VERSION/$RAY_HASH/ray_cpp-$RAY_VERSION-cp37-cp37m-macosx_10_15_x86_64.whl" download_wheel "https://s3-us-west-2.amazonaws.com/ray-wheels/releases/$RAY_VERSION/$RAY_HASH/ray_cpp-$RAY_VERSION-cp38-cp38-macosx_10_15_x86_64.whl" download_wheel "https://s3-us-west-2.amazonaws.com/ray-wheels/releases/$RAY_VERSION/$RAY_HASH/ray_cpp-$RAY_VERSION-cp39-cp39-macosx_10_15_x86_64.whl" download_wheel "https://s3-us-west-2.amazonaws.com/ray-wheels/releases/$RAY_VERSION/$RAY_HASH/ray_cpp-$RAY_VERSION-cp310-cp310-macosx_10_15_universal2.whl" From ab6527c5b6920bbb7afbe4fe52e20c3752035f62 Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Thu, 20 Apr 2023 10:53:50 -0700 Subject: [PATCH 033/424] [Data] Fix `iter_tensor_batches_benchmark_multi_node` GCE (#34598) The `iter_tensor_batches_benchmark_multi_node` GCE variant was failing because it used the wrong compute config. Signed-off-by: Balaji Veeramani --- .../multi_node_benchmark_compute_gce.yaml | 17 +++++++++++++++++ release/release_tests.yaml | 2 +- 2 files changed, 18 insertions(+), 1 deletion(-) create mode 100644 release/nightly_tests/dataset/multi_node_benchmark_compute_gce.yaml diff --git a/release/nightly_tests/dataset/multi_node_benchmark_compute_gce.yaml b/release/nightly_tests/dataset/multi_node_benchmark_compute_gce.yaml new file mode 100644 index 000000000000..12b52948af59 --- /dev/null +++ b/release/nightly_tests/dataset/multi_node_benchmark_compute_gce.yaml @@ -0,0 +1,17 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west1 +allowed_azs: +- us-west1-c + +max_workers: 0 + +head_node_type: + name: head_node + instance_type: n2-standard-16 # m5.4xlarge + +worker_node_types: + - name: worker_node + instance_type: n2-standard-16 # m5.4xlarge + max_workers: 3 + min_workers: 3 + use_spot: false diff --git a/release/release_tests.yaml b/release/release_tests.yaml index b809651bfd72..3a86d897510a 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -5169,7 +5169,7 @@ frequency: manual cluster: cluster_env: app_config.yaml - cluster_compute: single_node_benchmark_compute_gce.yaml + cluster_compute: multi_node_benchmark_compute_gce.yaml - name: iter_batches_benchmark_single_node group: data-tests From 76d2c9f893d9ae57079e5ba4348c4e12fd27c984 Mon Sep 17 00:00:00 2001 From: Justin Yu Date: Thu, 20 Apr 2023 13:23:55 -0700 Subject: [PATCH 034/424] [Doc][AIR] Improve visibility of Trainer restore and stateful callback restoration (#34350) --- .../examples/gptj_deepspeed_fine_tuning.ipynb | 3 +- doc/source/train/api/api.rst | 2 +- doc/source/train/config_guide.rst | 90 +++++++++---- doc/source/train/dl_guide.rst | 125 ++++++++++++++++-- doc/source/train/doc_code/dl_guide.py | 98 ++++++++++++++ doc/source/train/doc_code/key_concepts.py | 37 ++++-- doc/source/train/faq.rst | 103 --------------- doc/source/train/gbdt.rst | 26 ++++ doc/source/tune/tutorials/tune-storage.rst | 5 +- python/ray/tune/callback.py | 37 +++++- 10 files changed, 366 insertions(+), 160 deletions(-) create mode 100644 doc/source/train/doc_code/dl_guide.py diff --git a/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb b/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb index adb429bcb89a..1a70fa3a4f0e 100644 --- a/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb +++ b/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb @@ -559,6 +559,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -567,7 +568,7 @@ "We pass the preprocessors we have defined earlier as an argument, wrapped in a {class}`~ray.data.preprocessors.chain.Chain`. The preprocessor will be included with the returned {class}`~ray.air.checkpoint.Checkpoint`, meaning it will also be applied during inference.\n", "\n", "```{note}\n", - "If you want to upload checkpoints to cloud storage (eg. S3), use {class}`~ray.tune.syncer.SyncConfig` - see {ref}`train-config-sync` for an example. Using cloud storage is highly recommended, especially for production.\n", + "If you want to upload checkpoints to cloud storage (eg. S3), set {class}`air.RunConfig(storage_path) `. See {ref}`train-run-config` for an example. Using cloud storage is highly recommended, especially for production.\n", "```" ] }, diff --git a/doc/source/train/api/api.rst b/doc/source/train/api/api.rst index 2230170fc993..52e106c632f1 100644 --- a/doc/source/train/api/api.rst +++ b/doc/source/train/api/api.rst @@ -232,4 +232,4 @@ Restoration API for Built-in Trainers .. seealso:: - See :ref:`train-restore-faq` for more details on when and how trainer restore should be used. + See :ref:`train-restore-guide` for more details on when and how trainer restore should be used. diff --git a/doc/source/train/config_guide.rst b/doc/source/train/config_guide.rst index b2a010024808..3ccfd2fc0279 100644 --- a/doc/source/train/config_guide.rst +++ b/doc/source/train/config_guide.rst @@ -7,36 +7,51 @@ The following overviews how to configure scale-out, run options, and fault-toler For more details on how to configure data ingest, also refer to :ref:`air-ingest`. Scaling Configurations in Train (``ScalingConfig``) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +--------------------------------------------------- The scaling configuration specifies distributed training properties like the number of workers or the resources per worker. The properties of the scaling configuration are :ref:`tunable `. -:class:`ScalingConfig API reference ` - .. literalinclude:: doc_code/key_concepts.py :language: python :start-after: __scaling_config_start__ :end-before: __scaling_config_end__ +.. seealso:: + + See the :class:`~ray.air.ScalingConfig` API reference. + +.. _train-run-config: Run Configuration in Train (``RunConfig``) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------------------ -The run configuration specifies distributed training properties like the number of workers or the -resources per worker. +``RunConfig`` is a configuration object used in Ray Train to define the experiment +spec that corresponds to a call to ``trainer.fit()``. -The properties of the run configuration are :ref:`not tunable `. +It includes settings such as the experiment name, storage path for results, +stopping conditions, custom callbacks, checkpoint configuration, verbosity level, +and logging options. + +Many of these settings are configured through other config objects and passed through +the ``RunConfig``. The following sub-sections contain descriptions of these configs. -:class:`RunConfig API reference ` +The properties of the run configuration are :ref:`not tunable `. .. literalinclude:: doc_code/key_concepts.py :language: python :start-after: __run_config_start__ :end-before: __run_config_end__ +.. seealso:: + + See the :class:`~ray.air.RunConfig` API reference. + + See :ref:`tune-storage-options` for storage configuration examples (related to ``storage_path``). + + Failure configurations in Train (``FailureConfig``) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -45,30 +60,15 @@ The failure configuration specifies how training failures should be dealt with. As part of the RunConfig, the properties of the failure configuration are :ref:`not tunable `. -:class:`FailureConfig API reference ` .. literalinclude:: doc_code/key_concepts.py :language: python :start-after: __failure_config_start__ :end-before: __failure_config_end__ -.. _train-config-sync: - -Sync configurations in Train (``SyncConfig``) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. seealso:: -The sync configuration specifies how to synchronize checkpoints between the -Ray cluster and remote storage. - -As part of the RunConfig, the properties of the sync configuration -are :ref:`not tunable `. - -:class:`SyncConfig API reference ` - -.. literalinclude:: doc_code/key_concepts.py - :language: python - :start-after: __sync_config_start__ - :end-before: __sync_config_end__ + See the :class:`~ray.air.FailureConfig` API reference. Checkpoint configurations in Train (``CheckpointConfig``) @@ -80,10 +80,46 @@ and how many checkpoints to keep. As part of the RunConfig, the properties of the checkpoint configuration are :ref:`not tunable `. -:class:`CheckpointConfig API reference ` - .. literalinclude:: doc_code/key_concepts.py :language: python :start-after: __checkpoint_config_start__ :end-before: __checkpoint_config_end__ +Trainers of certain frameworks including :class:`~ray.train.xgboost.XGBoostTrainer`, +:class:`~ray.train.lightgbm.LightGBMTrainer`, and :class:`~ray.train.huggingface.HuggingFaceTrainer` +implement checkpointing out of the box. For these trainers, checkpointing can be +enabled by setting the checkpoint frequency within the :class:`~ray.air.CheckpointConfig`. + +.. literalinclude:: doc_code/key_concepts.py + :language: python + :start-after: __checkpoint_config_ckpt_freq_start__ + :end-before: __checkpoint_config_ckpt_freq_end__ + +.. warning:: + + ``checkpoint_frequency`` and other parameters do *not* work for trainers + that accept a custom training loop such as :class:`~ray.train.torch.TorchTrainer`, + since checkpointing is fully user-controlled. + +.. seealso:: + + See the :class:`~ray.air.CheckpointConfig` API reference. + + +Synchronization configurations in Train (``tune.SyncConfig``) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The ``tune.SyncConfig`` specifies how synchronization of results +and checkpoints should happen in a distributed Ray cluster. + +As part of the RunConfig, the properties of the failure configuration +are :ref:`not tunable `. + +.. note:: + + This configuration is mostly relevant to running multiple Train runs with a + Ray Tune. See :ref:`tune-storage-options` for a guide on using the ``SyncConfig``. + +.. seealso:: + + See the :class:`~ray.tune.syncer.SyncConfig` API reference. diff --git a/doc/source/train/dl_guide.rst b/doc/source/train/dl_guide.rst index 045fa0f930a9..b97fe2bdc0e9 100644 --- a/doc/source/train/dl_guide.rst +++ b/doc/source/train/dl_guide.rst @@ -504,6 +504,8 @@ The following figure shows how these two sessions look like in a Data Parallel t .. https://docs.google.com/drawings/d/1g0pv8gqgG29aPEPTcd4BC0LaRNbW1sAkv3H6W1TCp0c/edit +.. _train-dl-saving-checkpoints: + Saving checkpoints ++++++++++++++++++ @@ -688,6 +690,8 @@ You may also config ``CheckpointConfig`` to keep the "N best" checkpoints persis # ('local_path', '/home/ubuntu/ray_results/TorchTrainer_2022-06-24_21-34-49/TorchTrainer_7988b_00000_0_2022-06-24_21-34-49/checkpoint_000002') +.. _train-dl-loading-checkpoints: + Loading checkpoints +++++++++++++++++++ @@ -945,25 +949,124 @@ metrics from multiple workers. .. _train-fault-tolerance: -Fault Tolerance & Elastic Training ----------------------------------- +Fault Tolerance +--------------- + +Automatically Recover from Train Worker Failures +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Ray Train has built-in fault tolerance to recover from worker failures (i.e. ``RayActorError``\s). When a failure is detected, the workers will be shut -down and new workers will be added in. The training function will be -restarted, but progress from the previous execution can be resumed through -checkpointing. +down and new workers will be added in. + +.. note:: Elastic Training is not yet supported. + +The training function will be restarted, but progress from the previous execution can +be resumed through checkpointing. -.. warning:: In order to retain progress when recovery, your training function - **must** implement logic for both saving *and* loading :ref:`checkpoints - `. +.. tip:: + In order to retain progress when recovery, your training function + **must** implement logic for both :ref:`saving ` + *and* :ref:`loading checkpoints `. Each instance of recovery from a worker failure is considered a retry. The number of retries is configurable through the ``max_failures`` attribute of the -``failure_config`` argument set in the ``run_config`` argument passed to the -``Trainer``. +:class:`~ray.air.FailureConfig` argument set in the :class:`~ray.air.RunConfig` +passed to the ``Trainer``: -.. note:: Elastic Training is not yet supported. +.. literalinclude:: doc_code/key_concepts.py + :language: python + :start-after: __failure_config_start__ + :end-before: __failure_config_end__ + +.. _train-restore-guide: + +Restore a Ray Train Experiment +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +At the experiment level, :ref:`Trainer restoration ` +allows you to resume a previously interrupted experiment from where it left off. + +A Train experiment may be interrupted due to one of the following reasons: + +- The experiment was manually interrupted (e.g., Ctrl+C, or pre-empted head node instance). +- The head node crashed (e.g., OOM or some other runtime error). +- The entire cluster went down (e.g., network error affecting all nodes). + +Trainer restoration is possible for all of Ray Train's built-in trainers, +but we use ``TorchTrainer`` in the examples for demonstration. +We also use ``Trainer`` to refer to methods that are shared across all +built-in trainers. + +Let's say your initial Train experiment is configured as follows. +The actual training loop is just for demonstration purposes: the important detail is that +:ref:`saving ` *and* :ref:`loading checkpoints ` +has been implemented. + +.. literalinclude:: doc_code/dl_guide.py + :language: python + :start-after: __ft_initial_run_start__ + :end-before: __ft_initial_run_end__ + +The results and checkpoints of the experiment are saved to the path configured by :class:`~ray.air.config.RunConfig`. +If the experiment has been interrupted due to one of the reasons listed above, use this path to resume: + +.. literalinclude:: doc_code/dl_guide.py + :language: python + :start-after: __ft_restored_run_start__ + :end-before: __ft_restored_run_end__ + +.. tip:: + + You can also restore from a remote path (e.g., from an experiment directory stored in a s3 bucket). + + .. literalinclude:: doc_code/dl_guide.py + :language: python + :dedent: + :start-after: __ft_restore_from_cloud_initial_start__ + :end-before: __ft_restore_from_cloud_initial_end__ + + .. literalinclude:: doc_code/dl_guide.py + :language: python + :dedent: + :start-after: __ft_restore_from_cloud_restored_start__ + :end-before: __ft_restore_from_cloud_restored_end__ + +.. note:: + + Different trainers may allow more parameters to be optionally re-specified on restore. + Only **datasets** are required to be re-specified on restore, if they were supplied originally. + + See :ref:`train-framework-specific-restore` for more details. + + +Auto-resume ++++++++++++ + +Adding the branching logic below will allow you to run the same script after the interrupt, +picking up training from where you left on the previous run. Notice that we use the +:meth:`Trainer.can_restore ` utility method +to determine the existence and validity of the given experiment directory. + +.. literalinclude:: doc_code/dl_guide.py + :language: python + :start-after: __ft_autoresume_start__ + :end-before: __ft_autoresume_end__ + +.. seealso:: + + See the :meth:`BaseTrainer.restore ` docstring + for a full example. + +.. note:: + + `Trainer.restore` is different from + :class:`Trainer(..., resume_from_checkpoint=...) `. + `resume_from_checkpoint` is meant to be used to start a *new* Train experiment, + which writes results to a new directory and starts over from iteration 0. + + `Trainer.restore` is used to continue an existing experiment, where + new results will continue to be appended to existing logs. .. Running on pre-emptible machines .. -------------------------------- diff --git a/doc/source/train/doc_code/dl_guide.py b/doc/source/train/doc_code/dl_guide.py new file mode 100644 index 000000000000..67f74dee5cb3 --- /dev/null +++ b/doc/source/train/doc_code/dl_guide.py @@ -0,0 +1,98 @@ +# flake8: noqa + +MOCK = True + +# __ft_initial_run_start__ +from typing import Dict, Optional + +import ray +from ray import air +from ray.air import session +from ray.train.torch import TorchCheckpoint, TorchTrainer + + +def get_datasets() -> Dict[str, ray.data.Dataset]: + return {"train": ray.data.from_items([{"x": i, "y": 2 * i} for i in range(10)])} + + +def train_loop_per_worker(config: dict): + from torchvision.models import resnet18 + + # Checkpoint loading + checkpoint: Optional[TorchCheckpoint] = session.get_checkpoint() + model = checkpoint.get_model() if checkpoint else resnet18() + ray.train.torch.prepare_model(model) + + train_ds = session.get_dataset_shard("train") + + for epoch in range(5): + # Do some training... + + # Checkpoint saving + session.report( + {"epoch": epoch}, + checkpoint=TorchCheckpoint.from_model(model), + ) + + +trainer = TorchTrainer( + train_loop_per_worker=train_loop_per_worker, + datasets=get_datasets(), + scaling_config=air.ScalingConfig(num_workers=2), + run_config=air.RunConfig( + storage_path="~/ray_results", + name="dl_trainer_restore", + ), +) +result = trainer.fit() +# __ft_initial_run_end__ + +# __ft_restored_run_start__ +from ray.train.torch import TorchTrainer + +restored_trainer = TorchTrainer.restore( + path="~/ray_results/dl_trainer_restore", + datasets=get_datasets(), +) +# __ft_restored_run_end__ + + +if not MOCK: + # __ft_restore_from_cloud_initial_start__ + original_trainer = TorchTrainer( + # ... + run_config=air.RunConfig( + # Configure cloud storage + storage_path="s3://results-bucket", + name="dl_trainer_restore", + ), + ) + result = trainer.fit() + # __ft_restore_from_cloud_initial_end__ + + # __ft_restore_from_cloud_restored_start__ + restored_trainer = TorchTrainer.restore( + "s3://results-bucket/dl_trainer_restore", + datasets=get_datasets(), + ) + # __ft_restore_from_cloud_restored_end__ + + +# __ft_autoresume_start__ +if TorchTrainer.can_restore("~/ray_results/dl_restore_autoresume"): + trainer = TorchTrainer.restore( + "~/ray_results/dl_restore_autoresume", + datasets=get_datasets(), + ) + result = trainer.fit() +else: + trainer = TorchTrainer( + train_loop_per_worker=train_loop_per_worker, + datasets=get_datasets(), + scaling_config=air.ScalingConfig(num_workers=2), + run_config=air.RunConfig( + storage_path="~/ray_results", name="dl_restore_autoresume" + ), + ) +result = trainer.fit() +# __ft_autoresume_end__ diff --git a/doc/source/train/doc_code/key_concepts.py b/doc/source/train/doc_code/key_concepts.py index b7845f80d530..9d4704f36d7d 100644 --- a/doc/source/train/doc_code/key_concepts.py +++ b/doc/source/train/doc_code/key_concepts.py @@ -98,14 +98,20 @@ def train_fn(config): # __run_config_start__ from ray.air import RunConfig +from ray.air.integrations.wandb import WandbLoggerCallback run_config = RunConfig( # Name of the training run (directory name). name="my_train_run", - # Directory to store results in (will be storage_path/name). + # The experiment results will be saved to: storage_path/name storage_path="~/ray_results", + # storage_path="s3://my_bucket/tune_results", # Low training verbosity. verbose=1, + # Custom and built-in callbacks + callbacks=[WandbLoggerCallback()], + # Stopping criteria + stop={"training_iteration": 10}, ) # __run_config_end__ @@ -120,26 +126,37 @@ def train_fn(config): ) # __failure_config_end__ -# __sync_config_start__ -from ray.air import RunConfig -from ray.tune import SyncConfig +# __checkpoint_config_start__ +from ray.air import RunConfig, CheckpointConfig run_config = RunConfig( + checkpoint_config=CheckpointConfig( + # Only keep the 2 *best* checkpoints and delete the others. + num_to_keep=2, + # *Best* checkpoints are determined by these params: + checkpoint_score_attribute="mean_accuracy", + checkpoint_score_order="max", + ), # This will store checkpoints on S3. - storage_path="s3://remote-bucket/location" + storage_path="s3://remote-bucket/location", ) -# __sync_config_end__ +# __checkpoint_config_end__ -# __checkpoint_config_start__ +# __checkpoint_config_ckpt_freq_start__ from ray.air import RunConfig, CheckpointConfig run_config = RunConfig( checkpoint_config=CheckpointConfig( - # Only keep this many checkpoints. - num_to_keep=2 + # Checkpoint every iteration. + checkpoint_frequency=1, + # Only keep the latest checkpoint and delete the others. + num_to_keep=1, ) ) -# __checkpoint_config_end__ + +# from ray.train.xgboost import XGBoostTrainer +# trainer = XGBoostTrainer(..., run_config=run_config) +# __checkpoint_config_ckpt_freq_end__ # __results_start__ diff --git a/doc/source/train/faq.rst b/doc/source/train/faq.rst index 737cf16f81e0..320aa4610d20 100644 --- a/doc/source/train/faq.rst +++ b/doc/source/train/faq.rst @@ -27,109 +27,6 @@ you can initialize the ``Trainer`` with ``resources_per_worker`` specified in `` currently assume each worker is allocated exactly 1 GPU. The partial GPU and multi GPU use-cases can still be run with Ray Train today without these functions. -.. _train-restore-faq: - -How do I restore a Ray Train experiment? ----------------------------------------- - -A Train experiment may be interrupted due to one of the following reasons: - -- The experiment was manually interrupted (e.g., Ctrl+C, or pre-empted head node instance). -- The head node crashed (e.g., OOM or some other runtime error). -- The entire cluster went down (e.g., network error affecting all nodes). - -In these cases, a Trainer :ref:`can be restored ` for the experiment to resume. - -Since this is applicable to all of Ray Train's built-in trainers, -we'll use `FrameworkTrainer` to refer to a generic trainer for the remainder of this answer. - -To restore an experiment, first find the experiment directory that your previous -run was saved to. If you saved locally, this will look like ``{storage_path}/{name}``, -where ``storage_path`` may be ``~/ray_results``, and ``name`` is something -like ``FrameworkTrainer_2023-xxx``. - -Note that these are the same parameters that you pass through :class:`~ray.air.RunConfig`. - -.. code-block:: python - - datasets = {"train": ray.data.from_items([{"x": i, "y": 2 * i} for i in range(10)])} - - restored_trainer = FrameworkTrainer.restore( - path="~/ray_results/FrameworkTrainer_2023-02-15_00-46-58", - datasets=datasets, - ) - -It's also possible to restore from a remote path (e.g., from an experiment directory -stored in a s3 bucket). - -.. code-block:: python - - datasets = {"train": ray.data.from_items([{"x": i, "y": 2 * i} for i in range(10)])} - - restored_trainer = FrameworkTrainer.restore( - path="s3://results-bucket/FrameworkTrainer_2023-02-15_00-46-58", - datasets=datasets, - ) - -.. note:: - - `FrameworkTrainer.restore` may allow more parameters to be re-specified depending - on which trainer you're using. See :ref:`train-framework-specific-restore` for more details. - - -Single Script for Automatic Restoration -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Adding the branching logic below will allow you to run the same script after the interrupt, -picking up training from where you left on the previous run. Notice that we use the -:meth:`FrameworkTrainer.can_restore ` utility method -to determine the existence/validity of the given experiment directory. - -.. code-block:: python - - # run_train_experiment.py - - # Load datasets, define a preprocessor, etc. - # datasets = { ... } - # preprocessor = ... - - experiment_name = "train_experiment" - experiment_dir = f"~/ray_results/{experiment_name}" - - if FrameworkTrainer.can_restore(experiment_dir): - trainer = FrameworkTrainer.restore( - experiment_dir, - datasets=datasets, - ) - else: - trainer = FrameworkTrainer( - datasets=datasets, - preprocessor=preprocessor, - scaling_config=air.ScalingConfig(num_workers=2, use_gpu=False), - run_config=air.RunConfig( - name=experiment_name, - storage_path="~/ray_results", - failure_config=air.FailureConfig(max_failures=3), - stop={"training_iteration": 10}, - ), - ) - -.. seealso:: - - See the :meth:`BaseTrainer.restore ` docstring - for a full example. - -.. note:: - - `FrameworkTrainer.restore` is different from - :class:`FrameworkTrainer(..., resume_from_checkpoint=...) `. - `resume_from_checkpoint` is meant to be used to start a *new* Train experiment, - which writes results to a new directory and starts over from iteration 0. - - `FrameworkTrainer.restore` is used to continue an existing experiment, where - new results will continue to be appended to existing logs. - - My multi-node PyTorch GPU training is hanging or giving me obscure NCCL errors. What do I do? --------------------------------------------------------------------------------------------- diff --git a/doc/source/train/gbdt.rst b/doc/source/train/gbdt.rst index a4f1aec6cb68..ce69c6fbaa65 100644 --- a/doc/source/train/gbdt.rst +++ b/doc/source/train/gbdt.rst @@ -33,8 +33,34 @@ training parameters are passed as the ``params`` dictionary. Ray-specific params are passed in through the trainer constructors. +Saving and Loading XGBoost and LightGBM Checkpoints +--------------------------------------------------- + +When a new tree is trained on every boosting round, +it's possible to save a checkpoint to snapshot the training progress so far. +:class:`~ray.train.xgboost.XGBoostTrainer` and :class:`~ray.train.lightgbm.LightGBMTrainer` +both implement checkpointing out of the box. + +The only required change is to configure :class:`~ray.air.CheckpointConfig` to set +the checkpointing frequency. For example, the following configuration will +save a checkpoint on every boosting round and will only keep the latest checkpoint: + +.. literalinclude:: doc_code/key_concepts.py + :language: python + :start-after: __checkpoint_config_ckpt_freq_start__ + :end-before: __checkpoint_config_ckpt_freq_end__ + +.. tip:: + + Once checkpointing is enabled, you can follow :ref:`this guide ` + to enable fault tolerance. + + See the :ref:`Trainer restore API reference ` for more details. + + How to scale out training? -------------------------- + The benefit of using Ray AIR is that you can seamlessly scale up your training by adjusting the :class:`ScalingConfig `. diff --git a/doc/source/tune/tutorials/tune-storage.rst b/doc/source/tune/tutorials/tune-storage.rst index ddeb0dd66586..dcb61f910cb3 100644 --- a/doc/source/tune/tutorials/tune-storage.rst +++ b/doc/source/tune/tutorials/tune-storage.rst @@ -137,7 +137,6 @@ then all experiment outputs can be saved in a shared cloud bucket. We can configure cloud storage by telling Ray Tune to **upload to a remote** ``storage_path``: .. code-block:: python - :emphasize-lines: 8, 9, 10, 11 from ray import tune from ray.air.config import RunConfig @@ -154,7 +153,7 @@ We can configure cloud storage by telling Ray Tune to **upload to a remote** ``s Ray AIR automatically configures a default syncer that uses pyarrow to perform syncing with the specified cloud ``storage_path``. You can also pass a custom :class:`Syncer ` object -to the :class:`tune.SyncConfig ` +to a :class:`tune.SyncConfig ` within the :class:`air.RunConfig ` if you want to implement custom logic for uploading/downloading from the cloud. See :ref:`tune-cloud-syncing` and :ref:`tune-cloud-syncing-command-line-example` for more details and examples of custom syncing. @@ -213,8 +212,6 @@ that implements saving and loading checkpoints. # We recommend cloud storage checkpointing as it survives the cluster when # instances are terminated and has better performance. storage_path="s3://my-checkpoints-bucket/path/", - # See above! we will sync our checkpoints to S3 directory - sync_config=sync_config, checkpoint_config=air.CheckpointConfig( # We'll keep the best five checkpoints at all times # (with the highest AUC scores, a metric reported by the trainable) diff --git a/python/ray/tune/callback.py b/python/ray/tune/callback.py index 20d57bac93a0..ef057bb30c4f 100644 --- a/python/ray/tune/callback.py +++ b/python/ray/tune/callback.py @@ -292,18 +292,49 @@ def get_state(self) -> Optional[Dict]: This method should be implemented by subclasses to return a dictionary representation of the object's current state. + This is called automatically by Tune to periodically checkpoint callback state. + Upon :ref:`Tune experiment restoration `, + callback state will be restored via :meth:`~ray.tune.Callback.set_state`. + + .. code-block:: python + + from typing import Dict, List, Optional + + from ray.tune import Callback + from ray.tune.experiment import Trial + + class MyCallback(Callback): + def __init__(self): + self._trial_ids = set() + + def on_trial_start( + self, iteration: int, trials: List["Trial"], trial: "Trial", **info + ): + self._trial_ids.add(trial.trial_id) + + def get_state(self) -> Optional[Dict]: + return {"trial_ids": self._trial_ids.copy()} + + def set_state(self, state: Dict) -> Optional[Dict]: + self._trial_ids = state["trial_ids"] + Returns: - state: State of the callback. Should be `None` if the callback does not - have any state to save (this is the default). + dict: State of the callback. Should be `None` if the callback does not + have any state to save (this is the default). """ return None def set_state(self, state: Dict): - """Get the state of the callback. + """Set the state of the callback. This method should be implemented by subclasses to restore the callback's state based on the given dict state. + This is used automatically by Tune to restore checkpoint callback state + on :ref:`Tune experiment restoration `. + + See :meth:`~ray.tune.Callback.get_state` for an example implementation. + Args: state: State of the callback. """ From 9877d72ccb9926baef82420641bf90b4c3a7d844 Mon Sep 17 00:00:00 2001 From: robin-anyscale <129903427+robin-anyscale@users.noreply.github.com> Date: Thu, 20 Apr 2023 13:31:30 -0700 Subject: [PATCH 035/424] [Serve] [Docs] Change incorrect Serve app name in Stable Diffusion tutorial (#34426) The ray serve command was not matching the correct object. --- doc/source/serve/doc_code/stable_diffusion.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/serve/doc_code/stable_diffusion.py b/doc/source/serve/doc_code/stable_diffusion.py index d60db3e75005..8b9a86cf0a49 100644 --- a/doc/source/serve/doc_code/stable_diffusion.py +++ b/doc/source/serve/doc_code/stable_diffusion.py @@ -59,7 +59,7 @@ def generate(self, prompt: str, img_size: int = 512): return image -my_first_deployment = APIIngress.bind(StableDiffusionV2.bind()) +entrypoint = APIIngress.bind(StableDiffusionV2.bind()) # __example_code_end__ @@ -88,7 +88,7 @@ def serve_session(deployment): } ) - with serve_session(my_first_deployment) as handle: + with serve_session(entrypoint) as handle: ray.get(handle.generate.remote("hi")) prompt = "a cute cat is dancing on the grass." From 4d4e3e87c81fd4bf4d4847fee90af62b3b0dacba Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Thu, 20 Apr 2023 13:57:34 -0700 Subject: [PATCH 036/424] [data] [strict-mode] Require compute spec to be explicitly spelled out (#34610) --- doc/source/data/getting-started.rst | 7 +++-- python/ray/data/_internal/compute.py | 29 ++++++++++++++----- .../logical/operators/map_operator.py | 7 ++--- .../logical/operators/write_operator.py | 3 +- python/ray/data/_internal/plan.py | 5 ++-- python/ray/data/datastream.py | 2 +- python/ray/data/tests/test_strict_mode.py | 11 +++++++ 7 files changed, 45 insertions(+), 19 deletions(-) diff --git a/doc/source/data/getting-started.rst b/doc/source/data/getting-started.rst index a8cacbeee520..0359d20e8a6b 100644 --- a/doc/source/data/getting-started.rst +++ b/doc/source/data/getting-started.rst @@ -3,9 +3,10 @@ Getting Started =============== -A :class:`Datastream ` is a distributed data transformation -pipeline. It provides APIs for loading external data into the Ray object store in *blocks*, -and exposes APIs for streaming processing of these data blocks in the cluster. +Ray Data's main abstraction is a :class:`Datastream `, which +is a distributed data transformation pipeline. Datastream provides APIs for loading +external data into Ray in *blocks*, and it exposes APIs for streaming +processing of these data blocks in the cluster. .. tip:: diff --git a/python/ray/data/_internal/compute.py b/python/ray/data/_internal/compute.py index 6e74fbcf6c3b..63d4ce9e550d 100644 --- a/python/ray/data/_internal/compute.py +++ b/python/ray/data/_internal/compute.py @@ -18,6 +18,7 @@ BlockPartition, CallableClass, RowUDF, + StrictModeError, ) from ray.data.context import DEFAULT_SCHEDULING_STRATEGY, DataContext from ray.types import ObjectRef @@ -177,7 +178,7 @@ def _apply( ) def __eq__(self, other: Any) -> bool: - return isinstance(other, TaskPoolStrategy) + return isinstance(other, TaskPoolStrategy) or other == "tasks" @PublicAPI @@ -222,12 +223,18 @@ def __init__( computation and avoiding actor startup delays, but will also increase queueing delay. """ + ctx = DataContext.get_current() if legacy_min_size is not None or legacy_max_size is not None: - # TODO: make this an error in Ray 2.5. - logger.warning( - "DeprecationWarning: ActorPoolStrategy will require min_size and " - "max_size to be explicit kwargs in a future release" - ) + if ctx.strict_mode: + raise StrictModeError( + "In strict mode, ActorPoolStrategy requires min_size and " + "max_size to be explicit kwargs." + ) + else: + logger.warning( + "DeprecationWarning: ActorPoolStrategy will require min_size and " + "max_size to be explicit kwargs in a future release" + ) if legacy_min_size is not None: min_size = legacy_min_size if legacy_max_size is not None: @@ -495,7 +502,15 @@ def __eq__(self, other: Any) -> bool: def get_compute(compute_spec: Union[str, ComputeStrategy]) -> ComputeStrategy: - if not compute_spec or compute_spec == "tasks": + ctx = DataContext.get_current() + if ctx.strict_mode and not isinstance( + compute_spec, (TaskPoolStrategy, ActorPoolStrategy) + ): + raise StrictModeError( + "In strict mode, the compute spec must be either " + f"TaskPoolStrategy or ActorPoolStategy, was: {compute_spec}." + ) + elif not compute_spec or compute_spec == "tasks": return TaskPoolStrategy() elif compute_spec == "actors": return ActorPoolStrategy() diff --git a/python/ray/data/_internal/logical/operators/map_operator.py b/python/ray/data/_internal/logical/operators/map_operator.py index 133e2ff59d1a..906b07a892d9 100644 --- a/python/ray/data/_internal/logical/operators/map_operator.py +++ b/python/ray/data/_internal/logical/operators/map_operator.py @@ -1,10 +1,7 @@ from typing import Any, Dict, Iterable, Optional, Union from ray.data._internal.logical.interfaces import LogicalOperator -from ray.data._internal.compute import ( - UDF, - ComputeStrategy, -) +from ray.data._internal.compute import UDF, ComputeStrategy, TaskPoolStrategy from ray.data.block import BatchUDF, RowUDF from ray.data.context import DEFAULT_BATCH_SIZE @@ -75,7 +72,7 @@ def __init__( self._fn_constructor_args = fn_constructor_args self._fn_constructor_kwargs = fn_constructor_kwargs self._target_block_size = target_block_size - self._compute = compute or "tasks" + self._compute = compute or TaskPoolStrategy() class MapBatches(AbstractUDFMap): diff --git a/python/ray/data/_internal/logical/operators/write_operator.py b/python/ray/data/_internal/logical/operators/write_operator.py index f85b513e37f1..83b997933eb1 100644 --- a/python/ray/data/_internal/logical/operators/write_operator.py +++ b/python/ray/data/_internal/logical/operators/write_operator.py @@ -2,6 +2,7 @@ from ray.data._internal.logical.interfaces import LogicalOperator from ray.data._internal.logical.operators.map_operator import AbstractMap +from ray.data._internal.compute import TaskPoolStrategy from ray.data.datasource.datasource import Datasource @@ -23,6 +24,6 @@ def __init__( self._datasource = datasource self._write_args = write_args # Always use task to write. - self._compute = "tasks" + self._compute = TaskPoolStrategy() # Take the input blocks unchanged while writing. self._target_block_size = float("inf") diff --git a/python/ray/data/_internal/plan.py b/python/ray/data/_internal/plan.py index d10be2363225..0ee077d392c8 100644 --- a/python/ray/data/_internal/plan.py +++ b/python/ray/data/_internal/plan.py @@ -24,6 +24,7 @@ from ray.data._internal.compute import ( UDF, ActorPoolStrategy, + TaskPoolStrategy, BlockTransform, CallableClass, ComputeStrategy, @@ -882,7 +883,7 @@ def __init__( ): super().__init__(name, None) self.block_fn = block_fn - self.compute = compute or "tasks" + self.compute = compute or TaskPoolStrategy() self.ray_remote_args = ray_remote_args or {} self.target_block_size = target_block_size self.fn = fn @@ -1192,7 +1193,7 @@ def block_fn( stage = OneToOneStage( name, block_fn, - "tasks", + TaskPoolStrategy(), remote_args, ) stats = DatastreamStats(stages={}, parent=None) diff --git a/python/ray/data/datastream.py b/python/ray/data/datastream.py index 2803c0b520ca..f01ca22c0d0a 100644 --- a/python/ray/data/datastream.py +++ b/python/ray/data/datastream.py @@ -2939,7 +2939,7 @@ def write_fn_wrapper(blocks: Iterator[Block], ctx, fn) -> Iterator[Block]: OneToOneStage( "Write", write_fn_wrapper, - "tasks", + TaskPoolStrategy(), ray_remote_args, fn=lambda x: x, ) diff --git a/python/ray/data/tests/test_strict_mode.py b/python/ray/data/tests/test_strict_mode.py index 0b8fc9a6cabf..5d7920e41b01 100644 --- a/python/ray/data/tests/test_strict_mode.py +++ b/python/ray/data/tests/test_strict_mode.py @@ -136,6 +136,17 @@ def test_strict_object_support(ray_start_regular_shared): ds.map_batches(lambda x: x, batch_format="numpy").materialize() +def test_strict_compute(ray_start_regular_shared): + with pytest.raises(StrictModeError): + ray.data.range(10).map(lambda x: x, compute="actors").show() + with pytest.raises(StrictModeError): + ray.data.range(10).map( + lambda x: x, compute=ray.data.ActorPoolStrategy(1, 1) + ).show() + with pytest.raises(StrictModeError): + ray.data.range(10).map(lambda x: x, compute="tasks").show() + + def test_strict_schema(ray_start_regular_shared): import pyarrow from ray.data._internal.pandas_block import PandasBlockSchema From 47b211a18fe0d5e5e2d4fac09af1c67c007df14c Mon Sep 17 00:00:00 2001 From: angelinalg <122562471+angelinalg@users.noreply.github.com> Date: Thu, 20 Apr 2023 14:21:47 -0700 Subject: [PATCH 037/424] [docs] intro and graphic for LLM (#34615) Follow up to #34614 Why are these changes needed? To match the other use cases, we need a more substantial intro paragraph and graphic. --------- Signed-off-by: angelinalg <122562471+angelinalg@users.noreply.github.com> Signed-off-by: Philipp Moritz Co-authored-by: Philipp Moritz --- doc/source/images/llm-stack.png | Bin 0 -> 144021 bytes doc/source/ray-overview/use-cases.rst | 17 +++++++++++------ 2 files changed, 11 insertions(+), 6 deletions(-) create mode 100644 doc/source/images/llm-stack.png diff --git a/doc/source/images/llm-stack.png b/doc/source/images/llm-stack.png new file mode 100644 index 0000000000000000000000000000000000000000..110c1339b81fd3e6c7c731d1fbaec4c1e8804151 GIT binary patch literal 144021 zcmZ_01ymf%wlIvlyX)ZY?l!nJ+s8Tg-uK@B-<(;i zS5;Tlu3g)Achz*XijoX60s#UT7#OlFP*M#H42lg53?c&#=Dj7SU`F!20C!W95eKWD zBszS*F}2W>o3L*bfhsb#E2NP45kd=Ke)y-TjEF9geo!m1pAkyCf7i=_i+;tQc_|2Rgm`%)` zOf8tb9i0CH0TcA*f3G@NxSNo9JJ>tA@p}tV{5^vIz5Z7-3kBKVL)`6zD0CE6$RwOx zEy%c;S(#ZWgb~Qd$OK)@E&0_XrT-27ekVj>?e6Z(&%)y6<;Cp9!R+K}#lptN$H&6T z&ce>l^ge>g&BxK*#GA>{jq+cU{AV6X3pX=Y8)tVLCr7fs<~1>O@^BZTp!f^X|NQ>7 zP780F|Hb6!_HVM@31s=JhlP!qmF0iteg_r&tCe5H#@oVPSJKAeojvb3gt@sn1pglY zUp@be@&7>T{4XRME6@K2^#64I|3Nj~ELo+P@+K@?pD3JYB7zYAAr2szW;n3d9SjXw{-0;ghYK>kMpszpU* zWA$Bf8WR6$g@u5Kraxi+dpX0{aJ11$42~%{YX7vt)PZU4Kd|m!>}iE~U!LoChgos@ ze_(M?GTt)GA4z|2B-17<1V~r6DCz0X;avo%)PZ5_W?96_e-faa zsWst0@E$8zKlpult=rr-zH!;bcZS}=3(Tt z5wc8QRpj!nv&lX}n;lR=PU=anI)4(Mjtsj4KqA0D&r*mWW1>HaBKNuRX2eWE`UZAd0 zo^ooTD2q>*e*m5C@cCyGw>4QL>yqK+PPVJVI(aofLi6ezV@)bdXh}A%_G!M|;Gbn6 z+C;ty8Hl>^wS(4AX;8jW#pp25E`ToR^5tjZmlQi3Ip2NLV#Y-*s@?#@**eR_RQh5l zX`IbD^?xAzI0kG@qQNOwh92iSt;A~EVkZ657p?qbIaA5L_QUZFmfiqrS%Ur&8{+$3 z71e+6Qo*a?U?nLH?c_QL{jCSH#B6mp&L^6cJsy;7*35P1=D8;kJabo4{$(C(#+Sj` z>ezo4wf#H@~2ZJ&_+oFi7kA^Tex*R`uS(?797Y2@pJTE z-ZVLbh!>j$w*<4kv1uV(2&jM&4Hk@^;DOD``Hxn1aV}aYO7fR?pi;dY!NAY!Y{OfK z9ss7hCCP51%H%(U7TNuCTzf-+!mBH>1}BY?1ZLbvu`;v`ZGE2I<*CNzkhLMP`^z@vtmQR zqs@7er0wYjbYw1X|FcoNhbCl*Hye!5f$%Nnd)Q|v$TH{4KFhi}{*hc*KN5N3h@sSJ z;9RDAa4x`+|WrA!poJrUcMA#!x^&SFDeBv(mg z;xJIuda*^4mnz&W`5xd)a|w|Zj2);eVxRM<{ZGnShada~6>X9ggSQS|ts46B9zF^a z?t^PD%9o|o1k<6CS~w_hJ)im9hC8Iw}7$AC9t zbhXR@A|=+^Y4MtvuVjc60F&akHLH^EQ;!&hALxN2&kt18GxE$s)UrstXts8V!z%2Q z*uHH=e^vx@M2I(A49aLvGBTVHWAI+eAZ| zZe>ft;F4^I*zaCZCCP6@k+lxvCAsp%XatHu$Tz3ABjCR`#Q-%D#8>@r^gYXay_K( zr+-Z4=F#r8lksGodk#rIiRyFFM?uf{q!hM?zV}}c@P3L$W6|KkxESN!b>aM9Cpry99#`c za*&NNBk?YuMM{YUyJntLHXoxo%Ih0yRIH?|d6ibw;|MG3FVa6N)MgC&=C@}fasZ39 z_%`fhp)-jQe3S@X-J@6{-noPy1VBPeqn;lneaMvxDuO|7ml|Oc^|~r3oPhWsA&T*g z$@KyjK7?DEmQxIBMn#2FqPoShXyu<=M4}JWs}umHN_-Qt1dPc0aVV%q=M4j7%qXLy z5u}PBZ-IvVJ2+`gT&7T}Rzva?&X{6m`t|Ja>89lK8G)qGCt=Z0x0*Y>7fG}}aT8LOM21%Orv!}NoLz0PDxt-MJ5v8$^ zllJmc!)$%2cJ|QE8VjW^h!<>s&Ml#=$0dMd{8bW7fsYu8?}_Z?^%M%M6kqFp{^XAR!r{Kv z+#c1x%#bMM%xYKb8RQ2#wA|lM$KM~xi-S61iMbR|PC`9dcDwTZODC9`KPH?vw!Z$X zDKZrM%NqwX$dGBL;HBfhOaPy^r}|I_os)*)BvHW0EM~+y3>$ZDC7 zyGf9=^Tl3O{8R9jtU7*V3dXd5SUOCGf^szlvHpk5?>WP*IcR3e99V&qJ7M*|KAi;z z#5ZwvZ$lsvq&=~4Z9d$NV+ykQ6&YA1ulGS`r=O-=jkT*?j{zd zQ1>b6^l-yJVqw!#x2>~aGiWCfG#()o_+d-+M1To+Eoj}JHAp_i;b63B{gv@Mw!F^ zzJZ}Zt}~wUYEt=JG@L!jwFWoDV%HQlydfYjo4@I8v$h$e!I&2`OznuV=#^5#>-s|( zr>2IM&82I$<<`VT|GMtU{iFt0s{A1F>IVWEV<6D@I|{~T9%JnwI^8Wp(iXmZMv2C0 zuCRYZte;y{F5*zEqcJ-E^Rlr|aiB<}u4k_t&x5zm$YBbN`NOsm5>ewZ*Z3^u(C_Hz z=(?+BDOO1N5o&%Vh1ex ziF@+)r|j3jK0P*C9n&5AY$;vYii0+p&Ba5+3v^Kkx=+1@BMX z5q=lHqyCfnIOm}ar)A@vVAZ>c8R3a_ks&C(FuA%`I%s`?Yg)5jrV8n~e@K8}2T5zT z;vLcmPxsxs`;cVa4=~%P2rZYGo=2QM>|RDU0{971;j8nUbPe%m>?+*tJ8p$pL@$`b&Fn-O->-G8%)gp1wsU z!z5nx=5jo$nWuPZcMP^0d|@_A-cqjofEGeK9&F{hJ~VRlH1CZ2d@0+*smJf*Ii(;O z{p{_$fN^>1<ZVB^l5_~sK3Y{iO zG$QWiFfB@w-a!p~(g2>($g$nAJI&zJ=&`z;>`*$ZS#4NkZZv$n1cUKX*xr5f^r*M^ zvpgrZh&5Kvr-9!a^~EH-x1I1;BbnsGj8gkGtF$-A8^%ENGpgRJ#}-tJ{S}t7+%O~c z{b$DJwSaaP#_S_e=H;lkxRg)MiP9JH#&86SzrS^HvW-+?5aQA}A-yZu3&^xQMyJS? z>*4HRqg1DsNN&{pGmdXEMIsLPqpS7wy>ta6$mBCx@W@foY#OWdyPg#S?~F0|oB0E|oxxO3DNF)@rlLbJUp5CNRR$_P96SvVma zll~WJ-9|?0ur)PBlsAGA0X)XLBiurw)=x&aVxpfknjEB-Z%u@Xj%n*jfz^oQsmiLHIfYVryOTk^EftC2Hb$XhvD+SDBZ(eNTC>r zR6LpF*%?reC`o=;c{9&LE1-pcrnN}95E55}Sm&_~W2k%aB-7+yP;oKQx0ljv1&2gj7xw$*aWPUJ* zb9-v&66U$ku_XcDZM)qUErRN=P3WjZi}5;^rY2GwBlcNeK~)`j8?b`uiC{9{dveH) zULFiaid)(j#s%;lL}0>DRuH)=qC&A@mee1zti;8O2L_~bJbS#*j?W-rhqWK|>+~2g z!>2%kI0(lI4uF^n_AE@n=i00Ip>`&2W|%3kV>`cQKC?6Ok4UJO1XbW0Gw#fJsj1d} zf!n$bLIT>ilcT@-wAQ>Q?(ca+&B%z=Dz4WjX<3A-|9U79Rg(V8K+BB|r2_HpB_+vz zYhdvQ0+4?x%wYtOPHs=}RXqq0tU?U_RS;lV}IKI zs6)=r1Hz_6w~Kr#L>=|N9miklvZ$8~iyRT&CC0|5+hZj(DhK(bI6WZ5=0mhW)O?6o z7yRah3dHQ#tu?4Ip)~W{Ji}Np6FG;5ZC#nEUgSnO+n|v_B^EcJm(N1LE^|MuQ+tXa zRG9gNmy_hh?X(Q$O6mbcE=Ke*jW+ACt3@NM6y0T;i$W=q+_m3O|2Q?3`GeLdc#WXX zRkq)lXJR6B3KGY!et|>_Vb}Y_Tz(fKOA9*Oo=Dz6E^#~j? zUxRFaR67h)2uChabfC~zMtddkd8`d}3^kZMVGHVwJE?eEAmoI60PRRV7 zX(-7fT!t0Wb9J?*%1 zPs)>6V>AC8lPSS;j(@>QFz}^zQafOrY>(UR z-$>D+VA&KL*YTmuj=(KR%@j^P&9%GFAoZVg<)e>tK*`NDD{$i;Z0-NPgkuLYKu<3+ zpBvl?m1Bysn@M9ehJZ^Kt0b1_k*&)if9Ld3jYf<_g1MlFL;%72o%Z5r+M% z2gT@qI4Xx#U{=Hylikm1i_t;B)+v0M^j>${le<)l{m8s}KB8~aMI-``?{>&x+kd;g zh+y$HCdk(hBn51lm{^wQKxT;JY3v1eBWQ2+q}i}DwVZ$dxG$48TTUB;QkVgsW!4|% zvDMk)eMtcmeSoYqz}ukmX?My9tbS%V49pvC5=u%^a#-p|-ou_0ag>`VndQ~2ECF;N zJvyF_jFdEf2O>5-BlPBO{As`O@s{4(cM0^U)NAweGuPKu{7T&emS&Yrd%0t2x=attk=n2ZYPT)8L$0Zr|lkk237)7#HZ z5!g!#$Dhs!A9h3U#$BYHeRfW35}Y_Sgj~Q`>B*#7sGDm-R_NyKFY=SJb&~TKT6>-*S09f8_GsHUG3s%;ZmV z__$z9JL^=T4DBdyae9PJmNHe!m&S3~K}vkqrM*&^M*YuZA32E3Nnec;JYdNh=0wHj0*%%uTlt)T&?l7jkBvZeD&v^u+91r^|g{>4sP8* zyZ2j{(3SDQE%h7yPut;UMBK4U%E-NX*|!u6xOyL=P&E;2gtQu+5kd9a5OZ7XNZkk6 zvyPuGjvK+DyWtBFV@QI>hzFDDHvrz*q^4$%#M=vtxkee5l8OGhlncG?Gv%z?2lEGy zSN&GdgIac~#h7+txwTn>;3Za6{i98MMRVvf0z(FK612$pV7nD1aQ*j+7;=Y|)2EGL zUK5Aes@7KJ!@YyH1fR*=M`+-}jd%A~FCoTWvQ51P8ycR*cEvW~X^uH+LoI1g;F&1l zw%dI#)vj%6KHIb=(?V{ZW)o_PR?-}qeHPkG=^xny~+xU0r9TnFLGMMRy?G;i- z45WG!Y|^1POKGC`OF}g?0oGs~f@FZxZ^4jYHvaj=I2YY=s1#+CO6`~8Rh83Xy3i$f zSIH~N=8r8nT!MU)-<2aTcv(Eb^Y(a=iVx?u3P80)_(n?!n)2FXV^hDSvQ7-oROd4c z1Lr`C_p0(eg%3-w31dcq)vo%5nkyeHe)y4=Qn1I93NWV#KdRtNJS#2fpy&8~x@EvSsMY{VVBW8F7b0cG(r%3&98);8)IOG8lXEYUcXk_UdW;sWX5`?CmY;ntI zsxD%wFY?Ms;c+eJ%E|Nq^(c*ch3t~6#VpZngsn1oyMv%?q{lILA*6ur?WvBPhD0UB zWW0c;|M}rhBNG4L=;OAy^35FLrq-tgc6)1Y{s5Y$SI48t2wC<@@ic;~(WvG!vCv0q z5gY)?R$$Oh9TR?qGeU%mM+M92xSMRi!O!y1vS;H!}H?zDvRfRq^Tt?RUW-g7T)ZQ(EJiRL+i zCRHHehd?rcIh5q8YZTH|78zE*j{5^ri`%KOIa_ZX8q0XzUKtcpPD}d3ydnSRfafkybk*CnSJuTXobK%cO2^!) z9d%Me-tEIs`7^fzi5K6;lL+p<{Ja&5-n)9%PiIUL(p{lmbp=mQjtSrL4;Oy&!NdMM zl!aJNtG850Yc?c}Js??@A~6@?EJqPdIjr!hFB_ut-&@Lv%yv@C2GGWHyn?3Q^9>D> z!+>TsA6X~XTxwKQf!uQr$Ok~U= z6FHPG1^&7)f_KU7+c_`efs^BhSA&~Y#LLEqR8b+8v!-Zc3>l2c3DmJk1zOdL=MC<6 zm#)+&W=V0Kt?XQ4bJ)YuXdeCzIz@ch^HA0=<{)y%wCOl%v)O($i2JF zq{-rbo(?>_rvp>v@=2zLcf_k@3on1aG~NY3&BhPAsvWMTLqoFF;f0_`BQ+4x0ua>h z*c=9=@pEM``YQ`UyLv@14DmPOuuKAea9aQ{D2}p4K*mv|(mSf(Y=TegaidVuMVLei zk}b3XYK;=FdIuW6oyr!cZ0{|OsDFauB%vLFx1 zzcwmGAd`Z7m>4o}qFVR0#;;BIR=X<-(<#Hgl|sZ>L)G{WD3Ri7b(BGooBq_UJT3B6+;)_qb%iE*yRJo@W6fqpFe!SV+B3yB%x- z4=4S17NkFpZPrCMV3dXCF_#(L;&c^#_uwIgnT|9Py=gp-e35g2Lw;=pH(n7lWprTh zGuUunp9Cz!E0bSF3j=14XK3ceuG-Gqq3Ti~M_EMjtw+UAtFBZCyT9(N&7#F?AmT8a zkEa1A9fOyql8tbTBkoyzTzYocLE}%B7r!boK+RI7vw3`xtuoh81-#y zvA%*5-G(heeh?Fl=c4&vKXi^-+k}13P1Y-uu((O@>|3(ZE=pglI#dsK3UrGqCHeVC zI@nx>G_D?#L2Ch=uea*6;8CF4_vcZ6)oUM2v@cNhHA28aq64azG zcO?$F!yc8ALX$9pWIaD!ddRC_4|C%`kZHw-(e}`5Wj>>Py%(KtK%9+RHkO|8 z$U3T?Ia3*{Vw-5oOoRPM4dR}R^>`9(sn*I$zfl^`Pym^GOV!^Sp`wnGK@w;BbP@fV zRi3&25tDh+(0AV0$+{v6$w-Ayg73lU&WpxzZ8m$ggAq z%})I?2=&pohB(AAGhLb;)oklfCVEfN$|hrv>+~-h8gl zXc`0vt_VgTQH|8Es@f=yFPOXNL=JVy={=6~(wqPZGfJhd$7f7FXX_d!rqN?!FRjs^ z_U7`4$nX+TtW3vPm@Ls9Q=V+~MZY$Uj~1kvbzq5xTntd=RhzFfF!#QU6y%6SL$!^& z-b2fq?n*t_%$V9Neo8ugR(Wwn8U7f)F(pNeuyllmNk_$?m4^AvE)7po`Vz9p7`~Z& zK+ta6fm6$R9-_TiLzW4XpJgAZ`qz-)NpFXT z;eHTJ^zPLTrUI}zMH2jeK0h$Ncqml}N4Wk8C*yN;&*nB#VdSew;(uk+poxT%x6Z(* z`ccclVtzqg(eGeHqvX}1S3h%BFr2AE&h(=mJ&2hRf)b3trIX}1vbWz@BDSUJFvqDI zGIVN!JzyND+w}|g;r^wzLo%jRG`a4a;BEUQ|9XP+;qjv~k+!3kpNx;qFx)-oC(~SP zew0m_9M5LCDd7YOj#D}$sDsf(9wQz|-2*rM!0Q**o4JJP^7L$pJ$fzi6+FtXE9>@_ zd+G(}R7@)kX-2ruQDQq1@e-Kp_|UVPW@s361}I%`zjjPw@<3Z7cQ+tO5<=F$w$#!; z8(iZlVxILBdY#LIv838 zw#5=lS8fxY6QB23!Tr|s3j*5&r&p{YrU&|e?p~GWIE9W#pf=>)nYX==)OK=r)VAV_ z3b4x_z;YSUrhWMtR=x|gnXlvOQV*~!ly_(g{G27mG3`nwkxwXy)#9mCLOgq$eme_S zZWEdhl8h%>b-H-@9eVk`ZS|eifETdlQUfFSgaMZ}YICNEc>neBbtUWSorMCnw|hgR zqlD+N{hmh$$u~MN2OmqRz6)j~++_^G3Wh$Zj6Sp zrp)jz*(OqV^ID*SNlEJ*?Jm}PhHJR)BkKDux7n!zk0Ordb4 z)KSQwlBSlilGpEQb~Fokbxv6_+iu8Glga{ot#hBMoVohDx+z!8RFy@LqTjsG$u0ps}>-% zguSHgaMP_hvh_Xn6LPMxLFn&XAsf1k)j~yM^xr;pEWW{?4D|dUAI{=96q}}KLFhvn zYWn)Yf=@v$Rt_F6zK<$q_*1h?FK1Cvl~Ff|9!{s&^5W0=5@xxC*D2Y&?4NOvlWQs# z&o>!Hs&!x=4o#=VN-H^x`!|5xFE<&2m(C)IXuME8e2AV05(%;fyi#UE)L;6l6(IP@ zUIS#+Dv9#OskUFGfcnW_muO(2MwF~mbNj2971?>!Tb*AMk~jAv+rVWKu{cY-JDiNr zBxKl({p4KV-uB*JtL51B){MFVr%1FYMabm!0N{p4 z1%HKo`YwN?hrHW6v8mB5|npZ)^f(sK!*_k ze@U0=$B4PP_h2YJHtg#yR6cwg>Wa$|H#mBp=5ha?NJ&cFIVUB`!F7WB@^SbiI{QiotIx zaGUwhaUHDYybUaJnZcjz);U#6B?h!jU}Rw5GrTq^Be9ghZkiq8#~-{U4WpS^T-R2z zYAK{md6jo%yV7yNNfL&xnLKH9+J@5c{-jDy*A)po>xbB4SkwB~KcX?E5hDVm3?Z43 z3Ak{iB`yI!S_rOt&5<~Ba(e43-0FQ}lhl2CJHlV3tlN@doIrt2_A_dON{E6y20Gl%k*jQx-{b6oki1uE8?^2w3B?H{089pH(g=_ zWvmm^{8l=O*GP~U3%>%f+Cd%?ayr<~#T{R!@kM(x6am_g;Y~Nu*|b;Ns;8mm)v4

    xveY0bA`kefmNGfvIVQKDGftwsc9!kGKCexlMPq`n&}gFxDT;k0E7ipRurr= z;6>bHn%TT)BTm&r zlr?a4?c5L(Pkv{G%vyn{=!`hE&blsB-K~ApVj@jlSD?}9$#cWMkB%x9U9F97*NF($ zAhlh$xZ8LUh3+t}Pp7(s9tv8pc`VOevZazCg&A~zSq)4J0}OWgKe6=;!4d)xa&>`A zYTwJag%_#h(~@y6Y=SEhSAI@tbE--AXytIoXrPzT?_^;{syLQHw?N0FCc|9x7$VeD z`donDM$C->+}$s3WF%*eeFCOM=@7Y=t9*HRpf_*OZykPRs?OG43k~w^PZ!mUeKyJn zG4BOKBX0hPOA#waGgUEqLSo$n1S5c$qaKn=Dd*|)lG6Re6iHA(^n_Bf4vef06`VgX zsq~XDu(#|9%IaHuyI9|xB~^Z2td=$H_8DQ#06&~Q?W)c+umb8E9sZ~+8ThWB{=xAW z@bgPa{l_LH#dOtv!?eRWs}dsXs^J9U7S>NLxcw(vx|!>Y_vqPx0Hu09HmlO}kCb~O zG*2d*TcK|Kaq24Tn3HvrkcTetGtdCI0nDg;&%E>WnG%`)H|Xe3fL1t<*UA*(baRo_ z4_I$CaXF=0OdmMm4nFVw-iU2tvt3j{!<;NFcv8F8%KDZQoh7j0ucRHYi}9@h5!+ba zQR*Pa_82GFiBx#xh|fb_twN?)i=1LiUXS{ zl3xjjb*CJKeEWL=>p4o|H2JZ#VkTkPTX==KP!V%Bl>zKE;?5^ew{ODf0jO)|Waw&n zyNkdG)VS1J?Jtgyt%Q(*+HsIWhqt7HO2uaH@9NDl6>@IRmQ55Aew`$p%dewm!;IJE zph9hth^%@AMT5-NI2Ht3C+Z?YI0F!tIRI?5$MaR3)To+5_xh|ful*vlbPf3*N29fXo>fKoltw zAvB$B^Q-6M?ug}mzKJBRoGj=V7L^d518%B|U=hXI@EQwlk~oT4(ujp~YO&pq2ddNep#R5AQFE!dPopJ0QCkn0lemyhs4vKVhKE(jrhDro z25bx~D`Az0$V{~FCX}IxC{vlCkI4 z--gjhqSMTP@q;c65Q2Bv$bzSsuzlH#=7Jz~pOu<81wl`ubz-bB|@X_ryWpa#Gvldjuyv(6vC&u$U zn}^KC5i9Ss)Ih~BA-WH^>g638ck#7pdhHyM6snC!CgmU@BDK0&^}71%hEE=ujpk+v zN-J`LW>Bn=sRD)Eu%10 zKilJ9pL{rI{eT&%<*<_NT)na;IpLhaUH@b=za@)+Lyz zcH(J(eQmS-b!cgW|G5ThVDodpj&v5iz?dd>7sFX=O~rE%_~xEGuEoSTLy?dcVW{HN zo-5WqLcsJT8|8_P8bRoR0~vGF5XyaL#Y3)6mM7nf<&=+0U1eKQ0wWw10! zk(v^>2~E#ghxttRE4#LzWS?4S?dC_9ro6*f9c;rS?^xyHS7(D&*qUi~ z{iVB$CifK=jg_&869yo)A3=*Fjl(6p*0z#)2%+h*c>}%4oun!D9mtK;fRB0f{_7b#OL23>Z=-_z&BQ#3*+- zXAHuSKhLIs)s?EHefAEhVw$pR67e#JZ3N*+eCfroU4eqxzvR_vnA~g5GOJfX6Tq^S zp9zGzGg6H17-wAoIjxkUHqa_W+1bg!*d-_2n27he*6p!jNQ3ktj*`ecLfOD0<7gieB9h$oC zZuXQ#v<9}?am1(O2C(k>_@TLQl9w<>ddiFU@m>qS`p z1W{HW4j7_8D8Dx#{$}xxDHc+TbaBtG&&zl0=wiN0BmruZGhUI8j|3SxbimpgnXEOs zCfHvKH#DP4i(gL;R}o)_w@Y0?bjyPkg=fzghDhGEw?+GEEo?(0ZqVjFzU8oeb6^~f zT{bpM@ARWuGxH1PMc*^9S7U3A#r$rt7tec{-^a$SJ}<{JaL{j@QK!*%wnSSQ|82e< zBO!j+BFvUhWLpuRg%$EL#1hHDu!tCAGYudSnZU`xJ)UL3h|IE&Ds&sw@iiKV633!}u!H0?n3AlTmxTz0dws>>* zVK;bU(Nv2}_v~rd=(Q{NTTdp-dJ%Iu2I-O?WeNO2)C^5wsli}&YuXxja5+^-7EP`K~FX3}E+xsDhy@NLnL4a(p%Ci|lCi_JRv=GF@u zv=I$W0x1XIjJ90a=bz)p6roidg5!2Fy)+S2NQ8n!@zxxE%Dg2VHvPUSxi-KneTV39 zo8mpcpoPz>UzF0hF7XUXX(`%sg?+@W>2W-d8n9r4(+oRp__6dp4QqbUBFSXRPEew* zRw3#FGL9IL{SZ^od0PEybF@hW!9zatN?c1*=(%HY&s7UhULUu~(ml9tHNH2L2NLOo zy7NPAKn~TWv-U-IcRx85dE{Sb?^_~SohjWLd?P};&ZW&PjVHc=8U7k;*;+-^X`Zo0 z6zg~2MdNZ-0F4p4T%wEwniQ3XFhtBU6`U;v{(2yCo>y{0;5&gX<(*npo0O6+LuU|N&2wDeOszCC5sfsNG6qd5nt2dt!b8dUR3D{yoXw<*-B>OpE=zKd zQD|~145~LiI~40c2@RH++XIUSzi_d-Z;25>0Q&c!g9nuU)DE{pbG!RFg@{Q|H}Wfb z?0ry~tTb#dVct=uo7y)GU7xL1E3~eL@Y9yLjTHg+RD7cVazdVSF@jYdTRZ?86F;v@ z<|Pm=DOjQ)!>`S=@ry;dx^cyG-L=&V4D#OO6KQ(FMo!d~vkFvSjEUA#;r5gS!_Ws` z5k67hTi@aPi{Y!r8AmEUr6UvRPEdFZFu%=lKm>*O%4mz&|Lzvqm~2{1LS_V33C75K zi?6lb4R`!=Ad6E1o}wqk#NJ`mM1`oTEGKsjW0Q!bPgj}@sUPQ01?C!wB7g&BG{3xHaQ4oo50 zV2Sup{(#-+fl7(=h3bdesBP;~MqO5qUTdkLddfByMT(9qST?DCwZ^zxk`z`HWh0Xz z@dJ19qecd4_uG6Xc^wiPYvjvDCXb?53rjnSPg>zlD{+a23x$F^xMeODqCq^UCm0jD*8p}x6NugznDAg3>uJ}{TnmGh z1-TWz{mRHHn{eFF3+;nWhnq01&~AmZ@&VG5ibow)9r6z>nd+73bQ~49U&B8fy{K4~ z$HgyG@2s4wRFsqn&0)ANKG$f9UA67RHKCo-5*I_G(;?!D(hW5#+ssX#EA=hF2{n|k zfMK?tZ_i-n)&I#Jp!;-3=StjjT8SdG!Q8-RQ4EtLMXoHnwiDpqd}+Z zNXH3YJhX|xQhNq0QMPWetUB*n?}vd9W6ZM_IG4Mygtv1vHDbjjscT_lh^ z8-{HK<~A0P4Q)$kJZTjghs8}M%;{eU)zk;%QipjsjzEtCOUX4^D^jL~bkRhE@`m9W z6dJ8JjFRz9yrZsq&iCJBi+cuT5GU^>t72PZ|1~w>1n^f9F<9XzkPPE*(Gb2@V{_jW z40Wfaiii$wI4$^7ktG!Yv5Fhu2w(c`fwEw5;FT1E3<7+vZn29Xe%2YMqI8E=Y}EG< z(3f^^k&@EK(gD)IXckK-6sRS7`J6J8O#y3QkdUQjYhbjI-R`MoktJ-aHOHvn%mEO_ zU87Iz-YpYygs0uHc=tCc$Oge%5~J&>*dZMZGtt3)WO4sR^ z8JSRM%Jh-eRl&s4y^5G0@V0X28}+o@II1mQnOsLX%+Uk;*>d1`B!T|cm>%<7YDImt zrS3ldSv9qGz1)OO;XUYzV2`_d=|kWV4hl0a^hC>3u7eWgL8Kc$)!p6M7aVZrGBC6b ze>J| z`owbL2yG+%daH?ORo`uGpA`+ecr$Rr)w@QkvnC^}D{MtgXOT`ZYZ@h38LcWwTbry0 zm%JSKv>4vH8WTw zl3^`rWJ0CeN|RMe?`%ER7@F7}UN~9kDg~)ezVoz3n$KTk zapiKzjnpH$nlskr1agPKHx@FQ#2(UV2976sFVu{cbJ!IUAN`<&`FVQgxCcOF@wvhu zy*@K_vR;TpRPRB+OBQ4bi^ni(<$XBV^4)^h;W7K38W}Llz`0mVqPIivLokX&aw(>E zqhE{ZIfV3=B_cLD@VSu74kP&)c-)aAxmU=@R;!gn=)}WFEZgALMlEr=vwqs!QOqe+ zu=Ozfq@ZEY-kbh4l~CJ-fKl$bW)lJD6jG{QMN%E&c9`g01ozyzOd6wQwmpDHaLTOF zzfOjIR#|Ys3>R1g&If@BLr|`^E3T8QuoHczkcE=oDot#TMLvWeYkGzw;pjh zBX)=1>BblKKFyfyS_Vy)mRsM9jF~iv>@l>Lai)ef>^}@qRA_=?BpvK-Gb`?48H`ho zC8uHF7f5#yOxG-@JUCZ z^4fZ)WRPd3(VAt(TMrr%c6+5dQ9%%5}{iJkv61)T(nW z`4aWrcv-xbzaQMV7YT#tJY#XN;e4Ov$wMNp)_H&9A`20yAz@CKJo)5!nXay{ghkvqdb@0ET0YK!jJs!+a`9CS@R6(<%&P4% zU3s*K&T98UNe>SBL{xv@-D>Uiu;YO`I1`-ge=S18+Ci+P4iSNpN)wc%D&l32pqN zq!}5;QoiA#2H($vJoEMY($Oyh<7wZLesD$y>)()`IblGAMD|i?hDYP_oI!NkoIC1x z3TZa5=M$;de!iI9yOTGJT8?d)oPD-v*d@8Ix#B~4{}v`ur((T_C`|E!iB&IqZ_uf} z_qm-iy!S9TG9`bhb7wk;4CW#AuX*Q-2mo=z8ewr@fQ%A?m!ccFAIs7^AyhOV=I}Qn zHF2zJSs{gN65w~}7i`jMJ3dHo3qw2WJ0U$mgZvPLUzQVZ(SZD6`@Kn)DtLSiQK3MRmwsF#u965@g+!42 zguxc$O5l*!DfM1-Jm`@=cx6t5)u`PAZuWXd$nLMG6Wzpv0d4}`$YtpU=}cUD%4C+5$46( z*`Mnc$ElJV8XJ&z0znUp;N_u0CLk1|>Yy_Dz|uUnAsUgr&Yd0RXq04dcu9~qy(RHF zIwPbWw*p~I2gg~;Uj}NiT}G+wSDXo6mDZeYD-=4k+-0PfW%$$Lfo>%knMuE)z$Wbk z3yrrk-U$;sVwC3gh_5p%o^^2?bO`v6x!8B6<{$?s0Yeda8n6_2AmQK;;Kl7s_9~paUdoOzlo1n$`Zxv5 z3p3E8M=V{EiTRm>J;>9D+o(N3vcNLIYR-U8Uf1zH!|pRmVSD;`k|e25Ro}lNRY%64 z@OhL@A;cQC=kRbnPH4J3a|8N7{MpxFOEQ!YNzojx$oIr~b(Ay7kE#FjC<2S|XVPt) zv&oeYtKuF5IF=@>b*wf-QEEca3b<_h;PZad8kjBZl^b>&(uef4z3ds_{=>)JSJT3ysn8|z$* z&U|W>?X9jOhY5U%WjWFVH#Jf0PJ1rSK3ABwbM-%oZ@ulX-0ePi2VF$@WN>)M@TI$4 z(R(mo!J=%LGaW>a-^T}m^Hw}GU!vjiE$3@DBOOP_nDrZPi67ByHBtmYHWz=uzqakM z#9q5Ga6d$@2tjf=3;Lu#co%QxNOdCZUl%s?U)mV6bHZ;I0ta1oe~u+>GwsPyZ`0=K zwIIA0*Y{@ogCFf;LW*&agPOQ^kr7|iVB3}#zp}@BV=BGahX~GqL04bT4Mngfx9*X%!ey|&<$jVLbqM{e| zQ+MGMSp7pUyyS5Hui}_fg2GgOX~oihfN`X{n2?fp40`aOp=rYsqk($5{jue`kjJC7 zJ0D8T5CEZ6v_L2^4Y+r=gkupLa|sM748Dod^4o0GycmETX_(l=7wT0DIwzQRMERh1 znetu0DGNKC!QlbcY_(N}Le55!0Xvh+g?m@=tYn%vwv(RczT9oO;GDaio@GAKuq*gD zMMU|V&bX=}l~RiqWP<~tPBe%`%P+{ZcuGjhzg~}~uMfLYbir5&V9H;Ry%k0l%_(%H zUcP$pyhB7UU%>z9RVOz!_u7rRnNDFU95nQ{*PiPg3AEf(EEh&~cYMTFfcn0j*-x$f zy}pgbC7>;Ug2{xH8H#H1K&!l9I(8SRP;3(rV)NeLUh2?`=8Z-n&p-qm#Sb}bU2EP3q3hO8d)-HAJ5!Do>Le?DuKZFI|Dwi)kh zopj6{Ouxl&0Dgzi%DG9d@wud}bdInln{}f4Y^T(vUG&P}VF#uONd&)TNbO!q@XdKW zWh&}n#A^H1Q@hi38z#MOi(B_w_~JGMAND%ErL}~xTdI_otyD1tX}nUx2EO4C&KGYF zOQcrZJR;lS%h42q%+ESOHx;`&W>mGH*1PM7LPjSg2}b=hB4p3s?miRvkitf;yM$uW zr7+C*GpahqP~6c%FZ(Sgc}R(M-E~6RnIZZ%l(^w9M&rH3;)^zC>DX%v+C4gi>8>?ou@EeT#V<^I*dL288R&8ON{O~J2*cs=gwlV9oPn7j zyA|4JxWQooh$n$e2PlcgPh5YA0hk4#?ymLeH+ako>nm5T^$J>sI<)gBm*-^{D+uZ; z+i!N=On#In9%GH!_s4S2U3UfL+S6`X`@*)lHsKG;BJ?e#8rF(dh~;j*X(Mjj*H2Hd z#&Xe;VBWTr#(;Z<@Kq^-U@=C?`XbREXZ$i;SFurWluVgPz z49^~iY+DXT=bZ<#!MMfwGQ{h2^n%Az{*Zl1HYJwR*&-thX#Y!MXoCDI3>ue{NCc`R zi`%x8E@&o`!=Byo>VrJH1&@*A2>1tV|2Oo6uSrzfyd^fjGM&9^effT(W1MqU>k@61 zCSie!Oe2=7)d0<)y4C~A8h=#bN(#4HIi8+>CQP!4*6lEw(5QMt z-0)g=T6*I@c?u{$_4d3%7N7?3GqM3NCkCbZ$DI^aj0>o}j|n@QZOgGU0K?TL9Agk&B@ zvD0-s0E17Y>&-YE+k#Vgo!x>g%Dn|(KVRE#(skRtG%Ytutt4 zoH6dlz4&sQeR~=B=vwO}@&HFg31{;UO_h7(d>}Wmj`symQO)p{Y zT_;w1{f)ktvot&5FbtZAEU8MH&bqp}a!tN3DIcoYqn;|2f+A*k8(`J}JYZEHQJBHq zUSWhsM}@O@ZxW6w7J6RK+aOOKBjjM{7Q^+IPEE&$B%F7SZj&uMOPa|v&;4dpQ;RB{ zq4=>6yw7WG&{hfjl#@K8LWen>RT^Yfflm18%|9t?6xW}W#tm)v9_{qj5j0VM~8EY1%agj zl8%bSpoSlrz0ttzF(jb3P{sqXy=rP9wQAypLHdOLh~HSM{Oy}1gv4^&d|=SXM2;ja z6dqq{cS8-+Vg-YtFm=%;N{UnBNrWlTOeHS4;cP7CN@?d2H)48tx%r-7V`tGi9rnLQ zQnMWKDFP&jtNn4Jy#`Vu5XJ6;36n=h1%`~tiS6M+ATS9S4Z>YKFI_!TDe|a29 zqLM5cNy4?xaNhTpkrnr$k2HWpAZv8A(|!c0nJ~?!Dnle7v$ky&(x`72Nv3;V#(A}$ zXsQLYn8=aJASRtAD|?=driYA_C67+H?06r6SsPI*B@sJk3U5RgI%)f>j3Qq1>@yI4`$b&YaaYM{nP;hwEshvFP?*`69^L3M5#xZKdkGuU41m^S3>=bu1fOQPZOr0cnDu#0c;ADqU=Ju1{qY-${Ha(v{Zm}$A z84@x4{V}uyofO*j&x!7M{K?TTy_<|AU=hOtM~_V*hA-flPZu9S6Qtq9()VgrMa?On z2?62Dx5xw_-$uRL@sr>;aFYrwhCCN>1MBa6E0cB6m#%WakR$r}Xkr)n^I6PSfYO2H zL~nuY69_cfY!lgZJ>^|4*KAIvKbE^09ZyP%Xe87WGGefY#z9#6AfEx!*d%%Opk3fI4Zx}`f&IuMx&uK7UAeql-vLKe_`S=#; zHD0_;f8FkCMED9N^?agf7LgPHs^v#$@VFL~#cR$?|NiP7Oq}C+c}bbo>ddx@HaPL) zb+ zm-TLBuVnM&_bb-of{8n37Ai(;o0kjGH)RVKR=IP z8;oT<`V_0rF3H;t-Sl^+K4{%8SR3Qx7={Gfn|zCZULWRZ6YU?F-`i&s9fp3xXAy?U z5#XG}VL0$MpDHaQL^*=V6u#sLI`@I!|2SU^c3ae8o-_fEb$50Bxw+w@u;{;N_qqLt z#bzDS!W`gRG=EUPsOzy3ba3&kM)h`*;wWvK4wDE9NpmrX8L2Z?>fM zxerdruta$ke1Zf1R%?m)IdN;sL_ydA4z%gv%MkpO!x@w?Q0jZ8zsK#jjYlS}maYxd zSC#nAgxkP)6no3)gWuJH)F{E(3>A}rqR;uL#JmUcVz@+_avh4i5*(ZC&6+Jn18ZTd z$GVwXgVUQX3_fk5E^c&|L!{6V2$E#|#pj9-AqEqhQ_jeAHsLBhElw)u(20n_1uaF| zF$@bEJXe}frxb!2k*EMPYwOs~Zb2TZTUJO=5C})qI+o?f;@10>D1d^Koo0i%sh7Fc zdIz*80PhJVY8D9U4kgm`wG3qlCZI>kX1jL=wck3`nV^;VTl`+XzbqE~N*XSIiKu$x+BydN+}_*vx_~| zmB$wz3-+?7iJk`T?cy9J`>e+l<_CU-CfpW$6c zTsRvLlJO4t5w}YL5|N6^4`#K&uoZ{PVJJ@4axC{A|5yH4>a$2RIm6P$mq3)vr(J`w z#10djRr}4)sx5`5xYjWCtGN{jT$Yi!s3rnO5I!uV*?OUl~L&|h|KV^b3;L-bw zzHT!_2~%qu4A+bIO{=g=elGX#~%eIm)+3oBID7|C1_P?eGz2GlIJ4*mwuj_#^wa3gd zUTX}@zjGhiJtpZF6cGr{=t^WzXceZmy>t%}pj{>wgu-LtKzkA-01!_l`(ywOECXz}pLa;q8Xs3d&i zc2{0Vh$tqi(7xiu^pm&jF@x%rB-A)nQ$fLlW+y$6o(DC+FJ`*lfJOk!@By#<27Ly) zw6VFl{N^CY5xS-U?cx*Aa6^*C(rKR=@mb*l{JGNuVeHtHHIM*^b6tkV_l+X-ABf#Lgb3N>yv|sag{e$TsVpExBUMD=KrY}sG zp(^))KYZ^efV#f#K<^$}gG;)8iAO6{%O(1@zFwiH|HVb$KWbgu7aLhvUd~D&F8)Hp zqV;2w&GvG&759v`^^EQ6zQfh2wQBIa#3Lw%kvNO^{I6_4BwAT|X$ZIx1&gs&v*YRXzx(ixyZgEBvLp z>IB~}y5nh-rpx(sfW8>f0@s*RdXHOKY;UcSZ!$Cm1<_o?8jV)g4|&AzY;(u$UD03o z-su6dJ~PdH`#gHMtZc ze9J3+N(L4I*dM(f(^B+e-eZ}-uV&J}`Og}a2D|_VvlR?*A7;|a3@z4P<5zvzv^%z%cCrZKwu`e z>XEt`yiC#x35Dw%h-SYKek56=W&2ug%2SHrUjdsXlRh``AnL^t^Pd=elvQk+Hb9+uXw zHh;V!eOJ+zUjj}&wRw;%w`dykwpz3!nxP~tB(3ub+GhJ`V8Y8%9KPKW$Ie(Ex`muw z15$dyR1;Fwc4z=36uVoe%_GWPt8iWr`jK#=y~0)v2)?XjAQnD%OaQEfGyQrbyhVx4 zOwM8u((t7-o87_LQ9(?NyW`#Mlx^NwEzrsUeQ zS-b|Qz5K5W)P@aiPU8xDOOnO$=?#Ldfo;PIgNT!bsp?l;S zO2}IIO8~ZEy%}oyEr!?FG}AL~J^UdxoqSK64`XlOklXcW-(^UCuz4OXGX!Tf&;m++ z>wt&60LBsT2OHgu%2fmt5Y@S9Hs746esCJ-6+_pJ0GStFI>bf8=eLtAiU@8_;kz9j}FbNYlMEybf{(abMrmK`n=Q=Lu7=Sw)?-$j&wd zU}<`lrq~ zuYc8TU+#crT+OSgcFf3!Dr5{9df>el_pF=tOoWAz1>dw;R;BIUYi-92qu6zIr7+5{Ijw@==YIBt*W5BxRJ;Yh8Hnq0 zVOfylwjslQE=4}321%&LIYKNgkx$T^E2^m5me5<-JOCN_I5vO%j#4PNDg0N6yFh}- zg)8SiS8WIvbwzGn$g-+=DRs$U&n@K>k6wT>h5y>Y#T;s2g_K9DK{+7V%!JhkNcJdD z5oYqzs6mgR$pPafisGf9P@dG_lUqPF-=WY6BI1Q*t7LJ7jUG2-#XnnN|$A#&u z?mxJ=1&{02*!^_9q~Ip-fgNdz%PVs=(|?9g>fUr>Im&9Hfd)3bZKm|qBG*nN3;GOa zGM#7e-9KScSeQaOtCJ43d?@Nkm?GK4f zGt1TZe)vi$?xtBlAMBrL;E^vIl z@J030R*Ap9N8DT{r3aXT3|cMqvjO!!1`YI|&2G9qxwl?_)4#Cb+`{HF{?6onC8k<- z*~TX1&K+Q?{A$Y!{Cu~?G(E^)AR3hiIMruf{}bkmlWbw21Z90y=IZ`tVBn~cnPH)6 zAqSwSpr9gKYpeP?R9{x8S8kf90ur{Pnte;{It6=am6gV*d*aqM75&wYGxd~q7Xb_C zIl(3>90Iaj$Rlry0M%p#v#x8LYs$$gV>?WeL`?`*GVH1qd91&)VW`pu8x&ub~%sIUh#sa(qChYc8wK=%<~ll{VEo6)Z9@vv!YXTS$1J6D!XbZFP62tBcy7 z>>^7?XS9fXc_^e3v}FY*)B6~(_36`Ho0&nt4Z7>?-T9LRik<5?j~kx&TU4*%%#~A> z8ha;i$|qFhri3|X;AuEMJoF~ zQli{kb~i6nNY@)rvFqnnSK%t1OTu>4TQ6`FX=}wjzAYZ)T$+e8_+5kh>Rh*f9E{fF z^=4MsDkMrAnSpjHu~wi)q9rA8F=1(-PFIwEQzJoXrJJa-;>o}V4_q$Fpn?3k-p zhkl@RvQEk&#DLy4c~e(eZ+v<%>;$w12m*gaflhJ{`I6GTr)Y&;9_f6GyosYP&M!& zC|u@aRrywDat}1!6zjrikmrkq=V?*YL(x_b^!wWeueC@n8Duqb8^+34%f)hsfbJ!? za;&+65@gy$-%Y<+pgd$zx+$X~v)nGbjm+f>Exd?t;f=i2cU5NMQd-u&2c82Z98lhY zS1gs5{;WF@Xk)IDQ?(r{Cao6}d;%gf!|E=TZQGy+(5iWIe~JC~Rq73N_ccra)@+gE zqM1f(q$zOAKxFEe%v(loY8^uJIWyN#+^fPaw-J;Nr%EBC7rV(}RD#SxaY~;;ikAw< zjVgFc3OgA6Y;LwMNJf~lOWe<>VEIO z<@pigVK--?(HoqcpBfsVDvQB8{f%B<>xj8B3@A+`kH4ER>y+1N^4x|m1sV5c9p$n5 zNb9IA-p9!FP_N%lHuq!l}$=qNbvf(L`1Cz2u^?FUVd^D7Rg`yQ_1j##h#qVVEG^f0z!Ys$# zhpNzA~}YM6nEsW;o> z*Y?y*t&wSRYo%BCy5YA18P;x5Ol^mvgVm%Z0c)OD}Urt8uQow(+Wx#%g zdX?hOAVkfQvyhq`IK+p5YA&Z#J-k7|SrWT@P;B31-p;KKSTxv``ik@uKBxbYy4RGL zH}mJ-HRz+zVdm>azkgNUB0IA(n1e&)i_*iy3{t1nY$=FQF7+LsB(UQL}vIv znLYO>G2c{w8{AaRDWb_C5qJpHnNTSz&f+&j_4rt2|Llyks%XlM*lNyR_$m%snM8f0^I z%4Gzp&*&Xg7||Pt8PvuL)vXFwS3=xPpa*-Ub`_~9-f^W^2vBmT{67mGjD-1b`5oXe z>mKkzVNn?67D%UuO}z%?XGVkf;((5t73~_BiLpz7O7UcNMyXk;zJme|@Ck(GFl$Q; z#h{0-z1eA&jK@W@Q}SO2QE=k{ljG8Wr&~J`@dnb+H8vOv78vn>V3^Z;Z(9J_#BNDg z*o6l`e5%_GWPy}yD5=!3QZ}c74VQ+fDbkJu49@Lw8ER6-&1itS4#kQj{ z`1Y`6^w{j`JGN~@-30zYa`@fOFI!3qosjO=ibZYx-41^3bo|fPn&vO~P7ujBYs;K2 zCjlocvnGNs2HUUhtoMPgLO;a%*er}5^oM7VWw?4BYELh=c;OqFWqBD_vc0Y9t^HzP zz@Xfsx!;XWTP=O<))qLuKR7#T1$_0U(L*X3lAmWG3IB)&4=5f zLr)!K+qx^3&B7gqncgoy1kw$MEmdL;;#F2bzqlyzC4V@6HHGA_b$lM)=zxf3ZXgs! z;ynt<1!5Z}bbL(3v1t4tDkEDKm@v!BQ%b#q&{`ZjpYkkUmt@*ZtC>`_@s9^$*N~hx zPx8__QGt?Zl?B0h))7$Ls}SGFc)ayaT6g&b<{fuKHQ0noQD47azg#Y{py#MkVc5{H+pu^$964FD8`w57JxoO~xI2KYF3i}o7A_+Jq7Z8Sk2tgNz z^9s^>D*>37EB!tQp@6NPK3zjcjhUIQdQRUYy2xaoy%Wzpj0wkb7<|57G9l~{PwVp& zW*}Isfb}3aev_&1O+0Ty6xcDOeCE&c3Ss_T2A#n0jdirmOJHZMog(}Y@`$(KP?8Gq z3D!iJ{ytatr>y2XY{|)jgZ%wMt#G*3o892u!iK5mbFQioh7mc6st;3y$MGW2XX7b3 zP4JuU>kH+y%iD;-{BI9fdaai{Qz3AjV8x8rs5@$^Ihw~vf6t-_{wqWTgrbNuGQ@#Q z`UnPcsokVJkd#Qp*w^xi$q0dI669fi>f*HHlJrax+ah?@yw$K>jUaW_VB3N^Y9M>q zKO0#&W*Kw$5k(>DuhZV8m*h~=oyZ!$I}}s z7k@a(5p>{5_L22ynRc)Jr0;*4RH@Jcb2+aVZCo#R%{i~&$!6o^Y}{EcVESvP=kIIn zQR{$21S){-#}PB*5gjo83YR0cU$vhGCPS=+u;38_pnw27_Y_wG><9tDuc2;oXu%L> zar7C2zT9V#EB4GVkB1)T?w2Wi@F3DKF0|_bfm>oEg?Kv%5^hY8%hUdhaJN~WDCj=4 zkc6Y=1exoAoZJEhNP6z6&#&)6C(zBs~|9lbY17m;iQvn4*?A~ZK z&JWCr%iaaSl6mU+%=UYltL{?15E@L+R`sCM!LjRho<$@NhR`L4@+6W4Vzu8 zf``MZ4m3d7nCL$RL4U=4rJu=vfY^}m{nyTz0|ug=P z-(T?j*Mvw7bf*%wZJa=ku$7D}KSD;4n=2RC-8tDBqIo0?5)6sJ>IMWxNBiL}Bi`Ly z!$gTv^lq9XUZV@&_IyA#S?J-VmR;m5B>WOsYj4X9|U1OfVs8-gOMHw z5fTmE>cRx_LR>q@Gwlw+WFO#?n)fN^7W60(BCk~K@GnOY5=}vZF>ONM|7VQ<$6qCA z|BA0+@`>X5{(_(+s3OD^`>y`gLF7uhZAEJ`t^esJzx{>3OOY4>=1;=|L@dPvdb)iJ zwkBfbJE0-#+xlAcWb=jfgAyh1-#Yr=`sdFg;BSJ}LQEvw3^r;gvW$RWIMT>>*XmMW z+*0Q&S;-bb{H@&oywS=WNKQj7CDD=r5ilaMRHsJhZa_sqPG29;G7Z5dW5%IoN6h(4 ziV{5{=YLYDza3sUcLj1P-e^9=Dk2zxn5@O4A8sC76BM5ZsF7rRr-z!Dd{SYiM!4)> z)9!!v`&YYbK>$D@5*6ElnstVnwfNun+K28|j_&}Ac49!QnyOw=Ws&{EFMbmZBDqD# zLIHJs@2}DQ!eG?2-#jqq(Hp}M0*5b`h(|kA>3`jCSFwbLTTr3-!!Dzad-gp&!^XXqs_fuQ+363Dk>di1uK_yl>r}Kh^vK3- zbBtt(uJDNyx*Jr3WcA9O1tU|#N!CiErx`~pA1d#g=T!a&tA0D7a4s#v(8xqxj%o4o zD~md7wP*c#hqhG|iDd*7!=tcux+uROvgPnq)OGJKW=WaRz;KBu?jJ;`zbAwLXq1`& zZQFTWpPo5ow9wD4utii0y$mSd;zH#^?-8QBt}$-bI^3L{<0qUix?kx*0S+*DlB+c} z#vz$lrJYS_{~<_n1wrC-!>-14DOo+$i}wYk4f^t;DjYLPp|mxV5>A2;0wE_u2zFO} zXU=|@mnA2mR|E1CROmKgL_|O)&=*i~{IL8-(tq3*3}VWI2sCTy?jEQ;vTb^8AgsYfN^FaYgs7hW&NBfYscJ^5Zhp|R$ zT>X-Z^`RA(n9^&p?}~yjyfk@Zx=F^n+tW@9BUYnhBX!A;`U^je3t7V9)qt3pcCN$2 zezl0QOo+4nhO$x;K#d=q5z1q{LGyb~Qlzs*v%mF7B}M>00)}~|pOzUdE1F#qvd}-% zXSEu%iJR*-Rb`oYzRt9uK@y}&yF-<&eHetX;b_!Tcbp!@ao=0+^kQxUR>*Y(vtKJ@ zga%eZ7}wA{C>h=O+YeF65USh4V3>Srw|A*BHinC0aoL~B`kU!DKaFp;|Jtf`}ThY-Pc_CXmc>2DbPe80#Jiin9DM4Xk<= z5H3Uc+oMb2fRw=gxdXx9f;3k0MKB}Er2sXtp2m_Yw@^0_9TK{RtRY8b{$ye1%hW{bh{6i%91tUW;9y}cWfKR2W zAS8XIs>?-vpUmL@1YZ7L(ogo*JMPKz^S^%cgP8jQgk&)H<4nQePtt57eM+-^(4MXJ zw}E~K6+g%c>Zc-3lwBxm{t@Cg=1ti_iib_x-Rj8F{y`CU-#!&_N7b6B_6KRq3;~~* z{;mbk{3)4PvwIc!=Mv+$G5_80_YnD1ibs<6*YZC|#;o-z8S`4(VnW0pl+ykerEMRf zB~|%@WdGm9e~0z|EffDgWmbZGT3!!PmF)e4h5kfJ>EF?^j|7owe6XiZ5NoQwRBU{p z0q}wH8%j>_{Q;El^NCMoB=LrXmJ~1??GK%ttll-_jKeb=sA+f><7clvlitoUX_Bbc zRsZ&4Jx55AV&m0sd(@Vg==#>X#B6Mw+H`-1V0AD=kWd&RK5})?lATp=xw&Aovn{&0 zy}UkJBd8VEUz|!IA?dX4{NV|c)lee2A_O3iiFG;FVow~{5E@l_{qJb`9>Wjt4gD1Z z%Uec+;y2>V*#d2wEfQam0|i7ZMDLJEzo}g4jZ6`M7ivQ{Q~kS>%mfd zAGKyZ+3Jj*KwRXt_lLE?*%;vULcP3O1-DrRBm+BiorQLz_oLXiagAi-^NiqCR%@aP z%T;m9RqC1it*l<@xvdy7D+|&~XXO8amQO~G09axi&QwP*rv|M*@QRxDXtZn)dArKb zu)UEgLQ4}ZjiutdY4t&m9*40 z9@739Lio~fxdOpR38opVixI2SHKH}JOBB=TxQ6D7uhLTrng87WdrBby>W2stXrZr` zEkB=P7&)jKskK*BjL1^N<+6|pi%U$l?4%p2W{04?KSQ+~N47BinRMcp&;C22{T*HCQ2=WGaXopd_3RGk1{xF#B){ z`$2m$+v1p5fBF^~1?yNpQu#dY=TI07)r*Tv4nDP#PebkC{LiUv{_Rut4D^HLAQ~H0mbF-1c!eJbJ7!(=D z;}yQEq85mfz?7N0CnxGJfK6~01FwdqcY1O=4`88f8+-K!ypk&J?&85{YIpNNf`QIv1VkaNX<58#dXuK|oOndyQ!)KSM~SxiTetM7f+9cwP6)oyiY zNWrNqB}Jw&=v5d^fGMEe&zvFp?{Pr?H4f-l$yN~pc%*OTO3g<@zIhvdRG_EGm@Gy`|8e(5rgk zh^6MSo`htLzG4xy4*h76@*Iop;UCm=`9=GEt~HsW71Tehb)hA7^!)4R0#Jy)oh5tI zfWz??g~|Jg{Gksluup0_GIGehWGknIP?|drOA7t-Cq3lw06Dk`AtqXO?Euljc+=3G zW6j8r1_YT3zaInei#`%klAijwYtvYCiKff#IW>7+u<_?tQC~o*eO6Y{P*H)q#R@@d zS$EbBl2_|%NH00(Dij2}!~V42!A$bWK2gT4ArOF;(-1hRgF^a{>Z&0|Eox{$z^$q2 zs2SZ@C-ieM{DC5>BaNU(rMg*RS5UtY)cd)J>@b^YjrpB`jPU_gAjn4Vi!{}r6|bs! zGzAjWADR5DiJ~FroNy?>5)el`3_{+Vl>yC=@y;a7R6?o2vbNGC}rftelDD_3|9!rXxr~ak^29 zg?R@A!9oCo>}wf+`LGrL>wouq-#H`zW(3dCq?AFTnImr8hJ7^T*E21=tjfK(dyp7 zU^_PY%|4}qj+rjfJqh;S$Xj&~DL1DM+?Uyuwhz z4}8fs&oVh5vh}L@LM3})vq#HH#dnWc?ef+yD)}b#`FTtCdx-Qv`}>);NR?kUBB&4b zKSwA3KU?{KYMZkxrHJ=Scw_)}Y#EHKZ)bUxv2PA146~r(!HvI{ zUlCLW8D8siOP=NLUw2k^5%g0PbX3l>TRut$E0yTKO46Q+po!O&7*uA)UL|x526lMM51&_4!^%tgBTt|3B#~f8QiTM$#LK{fX{VAgY%lgnWnG zACngg*lMW>UXn7Y{!kBaKgymi2^eww+VVjwA669ipy!>_ba9nj@Lksvq2$Vi!vLc= z+o3&;;8HVK_Z{Y(k?Zw-%gpk0-J1#jxQo4B)h+fFHQ>gE?nvWdSFNb)i&n!*`=3LX zb}3!mU30Dhhc+G_qm`wOaxJ7kDq%2WZ6>txF9l2SY0ZLh+46!8vq5;0_g7-)+y5Tl z?;acp!~rf>hJi^=rj>E!2hs%bp~~VIJm(2>aYjZQ+B}>H1 zN3<+e0I29X3!bV z$x}wa84h^QXxk_-M@3JQ388M<*lix@|6L5#tfUdW&nyX>jvQdLk2q?$-u&5 zG8}I*vL*?;tByRZAD{>^qKIWXO?xiluI^BV9siG{|cP=>N{um)s&TES@L^TQ)y%2AxpWxmn>eN>yp#dFy z)W--HX3XbYpJfnt!O_y=4@*#nUc(K2u|9Um>lpn*Uf%6*Vy~Q?nb$2lj&Zoai|>E^ z%6=IA+#^&{ns0D@9WnN$tKm`l^(d1;srPTFm=WgurcCTGN{e6kX@}3Jlv;bMp7g*5 zpSc>7@B#fRNYUb>!Ki8{eHJ@`Z-fRB6)+k?`wF3*x~&#GT@O2R@yAX>LrIva=uhJ-FvK36*!(2{dM zH-RyfEY@In)#RQsqE=IZa&}s>-WC;gA`nBVT7HkuC{tHg*PZENGBvWmM1wM87br&R zxi={h`1I~(grZ>Y`lgX`US3X}iSg|X`cl?z%lkhYdr~HQ$>coI=j%V@Y3vG>3?e92 z25@aY2BZU*nl~|{z^>VEYQC9|oEqwqPYs5Ho& z_i}@_+tnQPz8oW{RwQuf>NrklQz2r^lGT<0Sj zi2h{zdi$HIMSHTOyZ#;_o$ww1ntfa8gJ-P(DR7M#a^7U&0#Hn%3(Wbm%l>iy@LOeQ zZfC$4#*e80>m0?vR~~+uk@D-4-3ZK{JOt#k>sYc-RgHukINgxTmOn7YyGn?RLA76b zA?t;x!ZYfKbXkcGp;@Uf+;Ou;f6m|hii>xcG4sz3I-RoHT>h3?wr9^c)brR*ge z{`M`1SfeEHcxLOM%1zU)>C}(T(lm*Dn z$ALtCk3zSVinwajWPKa4AY6K3(dGTP3JCo-2Z@q{mU-$r2W0vop<)}O9b_b9MXXj7 zX0O-uRe#f@v<`(Du_~dEkXLdN4H1Wi(#Cn&GRv@|BVyv1PjSMHA85QmDWrJ!MS+ly{f?R@)F`yQ z5fkaO4LYl9Yy|4zM*4M<&PX47Yc_cILSf)R3g|iD^XJ}tbS!P|fB1Q~* zhRw=jFVJm9v!f)}l>{Ukb463UE|^3D+-~Qj{sj6I5eHlGSXP@dY4mSei44!_tLX-p zg-U@!;hy%jm4lyl7BvPLP-a`4G4T8xqv1e8$^QNNrJCYwCz1Uy{CKG>4N!spm;<8e)iV>r^vtRzdDFb^)s~xfIHLRnp-NVPC|gMcGIeX(+}^b zG)FYE{;K)Sk;VNp#FxdyAGwTGdhU*b6I{pCT428cS|xv^ACEi{@YCBR{Moo*>uvI; z$?FcVlqavQ%|e}3Q`6lz>;i+JG66SFHWpX@TCWoBms0Afc@~P$l$mfa)b{Emd(roz z#(kO9q~0^!-Y^qnW$uc;1Gp_$ty~;!yK3_Yu$>afw(7U63R1(NKbK*OY%i&WEWnlcamw3f85TLGoSdR<)BZXMM!_R~e;NN1O5L%YKB z)4!#Wg6ce!k_WFBg4{0ra@NRegsM~>#g`^OHy;$S+>ZmNm=$vPNts>GL(R-aOmf}! z|5KeWu9$mgzs!q|Ri;2-_c|jg^@2EoyN&N@fK7Kt>Uh5x z^!`|rNJ@)u|4pix6fCJWi{j{l7g(q3sTQ!CxO_KTv*NJLvt7aUpz!N3Ap(qYqyXdo z`xTs$rGX77YPuRf&1RKj0W7PPH7^_2ICk_s`y7Ilbe97m;8D)~8z>~H#6k{r*jk|~ zTe;aa%4*WRh^Myb@4^xs7wRZ*h`ZgU7j!uO%fib+Ah<_^Mcj~CBn>VP#Y>fO8mKf^ z+JxuL3Eri=ZS1Mb%WP|mt=&8o>H798l|_teNv|p4oXS9FZalka7bv&X=Kq&-z|`BD zFVLc`K9Ix;aUW&852#0cX1Y0mhIrAL9-J#9x@!JZ?mnw6GVS=Qe<6iMymR_{AEbAk z*}AWufo0DANf$|VhUmhB>bq_3KqzAz_Q*B}y}rykoErbYj1Twfzhaon{W-t5`~fTT zmWU&P@ULaUeU~UR3@leE?2}rBK6lnj(iLKg?|rp~8}ln1ts%4{u0+3`71x!&kjhEh zl!V*l8tLoIx<@Fvv>5(dO*gIP50YM#ync7tWN$0!{;kddp^^%+s|ljI+1fo47-`F3 zyo<1flx;W$V^8TXk%Tf5Bg)#Lrvp;CX?0>&Vs~A386o^o zPi}^k3DY11K#Ykekl(mE;*WaWX`({x6zuL+{G$a+L)H_E{x_=0?{s^r;R-f>{^*ka zY8zSYA-0d(PYY^{Pw>QDrg2l4`JHE$&{ecrpaIvW3)0$@_LoU?0O_pEAndmK0=SWl zUh^3sW876#^!e2L_rJ>DtNj)Ird7T(Yi|G+Qw^Vv0zH?UyUaFo&H0Q!*#8Pd1lw$i z4E$m`_+?X3W{NCBM+YUIjij0WMsMg7k>@BEgiS9ou3uEH zM>pRr3_+H*F};@{adlqgA1dh$?ST7@yR>LgVAd{)LDejkF^lrPgy(rt)2Bd+yflwf ziE8-*8C!S5H|*rYR|3m(4?f4n3!JtUw2}Fd3Md@DiNUMyX%(lu8Vi6YmSWU+;8rOx zSmk3TjqX*sF29z12il-FbVI4%RQj(I04@oPDe4sh+p?t`YrY&qtn#|?s&95J>({sd zBR|4U5aGTO@>{}NTZE$I$*926aYaF-IyA<{*7gZdJiA##yjw|tFU-aa6)SGY>8aR= zkxteJ220`OL34)u#QkWY^9~7Zp0D#qO)CoDS#LOe)C~q`7u!dU?*v)R8y&)p2P~Gu z(0yLRQ4CPaYj7C_~1a}2x9%vo3e7^ceeE>%bVjT zuqrROv{f^w%lyXr+Ms7(&SjwNbB90cd3+NEwUyeOzDs!;ESAH#rM^_w6^k~?2Aw?o zW`P2PPyp?s&fUs56J66Un-tBMiL9pNPqKtuuh!q??S7O>vdfhY82b{~(p1@cqAr#? zs+E)D;%T)K9!yvF*HLeuYo&xVh#zT2LMIf?$&z96^CrND*zNOyz~r*v%6skMLjFE6 zkU;anSEXjq9K5?a{*5Lq_MAwRsr{)T_K31=6-axB9s~`SVAHwJS+N13c6oTdga%v3 zn$I^A{bacS9TPpQ`&Z+J4N<39UlaxOv3Lb9vR>@j4+FH;y)Uz2?M%i6J7>+lAnvGk_C+AxEgv`CMP{W3 z--is5%`?kl*I(_BzdhVvd&jK^^ia|b;hFSAg@|pE;GSkHa>WanDK69^Gp9vHH z5!r=~?#5i5DSqbIo9Kg-iyJpF{|#hY4E1thWTqd%u0vR;cS4D2#b9^vnJj*yMZM=+ z7zBepr4$x=;gqt_>tJU6X>Q!tpcsHTaTTLKH&1EYy~;j!`2S!R#@VM zJfG7ZbzieFm>X_9tKH(4h|~9PLNumFC8GBG=?B(WwkbAtV-Eoz7X`FvAaQv^$j=kQ zRWcXMU!?87I`N1RuRZWWL`1tYNsl-b4@NrBt_YDf_om{pOlug#)D8Kc9lB_EWd8EI z(P705pX!&H+dwfVuv=8LfioUHY%*pwwLh0`7cV45C@9Nn67ENKP{I_u4&j;YXt?Tt ztW;4g2LpWE3H8ejA4&PH0L1Ih0`$82?9%Ka*@F1?l!({Wi1#1TB3q1vD;oMvxrx-4 zLgFpzi( zpDuXsAL2Wh%2pJG9#4q4jJ01~Fo@J|-HI5J~UN^IAh}6JecaJ$Q)u9^>V9G3*31dZPbfy)_r}FN(qF(dAE!2<=SGbG z$eu8&$u3mpsvRtNSj-yo5ve~W9Ksrw9RXHKRe?!$=7bu38KoHlC9hyee>PeF9p8k| zJk9(JpHl0dl>>HZF~`5Bpw<<73UB+MfnK7t)~p>-bx`6Hiw5tXLFc=w7ag?-etMS| zL_vXWhIz?$c7I%E35J>H#Ew$F5&V`E{Ad(Fh8j?ujcA^ zxf!Eo@4}$lxgKtyh14)zk_SPtZ&il}xA06`i*hZSO!~^d@}m$LZxkR~TF!0OJt<~% z&R*d~zp@2bL>+)_+W-t~#)~gUNI@U;yS7O>Lk{bGKCAJf&E5OrYp_4UOfyl5>|AYr zhfhlvD+j$+N8s5O(NAOL<|0@<@zW6PW$KTvWFf=qji`%MMKwLSYrc6GQ-s>5Tk(Lr zB_D)n4Ew^L$jsoe`L^F~$A6SB`fgCIeHV$`>~tTV)!%Us70&w{5X_E4VuE|V~BR7+8mv@-di?eR$gSUghC&}BhHB0^yIPVFs3A_pS!%uM+1w8i#$J=L-p=Wt0%l@&s)gsNP`KwHC&{^J^tBQI)8^5P$as)n4;@||qu$#P0IyFZBj zZm)eIRINAHM>=NlwL)ix?O$A)`<{`jyR7Y5Qz;DDb=!wsuIY%8VsHc>kOw@bnkN}p zJV}*PzhtHu@fM-hDHfH|l@Z#>mq9LHp0z>1lTi_T-k-LIZbt4fw~XX@-E)~O>Kyl2hY!Yg=p5>l=2SXpEV^R8wxx!NRGxssxLTQeT;T!LL~)5B116^iYzMC_q= z7qESDl(6rY)ZN_vhL(xe?p%9Am}mU{8TjDyje_AS6&+irI8Ql=?dCA$<-ZZRU1mnG zvc#z$17T8H-yPwnOcINr7|PXH&B_+kYMArU<46`gfkuXhzvvdj!}6f{8r5%Bw8~eY zcYS{N+rsBdE>zpj=MPLiTED&tpt(jB!4YM%)@PXX9-Zwctn5) z8AK+r8BK4`YK9^lm6~~M;ewce7us_9k@bl*I1SN10G!8+yB;NPL|5O%Z zhOe77%ikUReF940GqwlHq5sODFO`&I=bT$M>_J$ZU()`Qk`?G38LEf(;m4|w{j<2zp52WfXa@Lk^8(mAe+ z-z|XO4LXMGCF+sD!Cx`Uf3CfO|K-H_x~kh_GQy+{+<<92?$Xb(0l6I)&KhT?a%}tF-qL3 z{E!597yy>6vssYS$)AsBOh*!ycCC-BwApQaQ=3`#XACqz>N&e>D0IplDW48##eGv2 zB5>eiE;wH$NLYC^ksn^?nfwuJ8M9rHVHE4%ZQa6N#WYiDW{qmPDw6Rhn%hiLfH#&MK0BJN-MdB;)rmL`hlhre# zWie+xE!RD_&N<|Vu_a@c@VQ!>4(4e_Ib|jrs}5a$iaYPa;g9N|88&(O3gyQF`YesD zQ|IP?)b`yzu3`)_0_HOwIMsON5|D}uG;VRK^N@G(c%?fZ9vEfcu;H}nm*X>fSDGbb z^Ross1CBS9wJJ=BTe(Bd$46vO?;9#4g#=#E)-jQq+Bc_>jJ?X}cie4n=tP2_XAFp? zpg6-NJVrCJYNF-Jopq=WmokHNNC!AIFT2_5_H9!Mv+cg(G`2+OASZ_FdtF0*B=TPV zp0PY#3chbbmioQ_aclrzs3&7&@Xto(uM$lOLWei0Ii5?%eWWW8WDPzbd2W^$^hBE< z{~J2^-}8BYHWDc#oC4gf&TO5S;g9AVxVF#2O2#%CIV_WGlKC4H!s@fub96-=&-t8A zkk#&sSN{k+9k^KWqN7dKmJ#}D3t;N?7k*+z+=cQgJnC9^u?1&6ScyJS24{PCtBpJ~ z{pBNd%33G4`T@Tqbk9(-3>ae`boWA>R?Fud5EGNUCMU=uh9eylAs_lrsw%|2o$LTJ zL=>2AF zA*PU)07)@8{T@k@of_JEA0!B1eqy5L&ip8Vs;Z3}l_qIjcRs&%aYd!W;md>hJ@`+O z8wFVjEV=Ixf67(ukJ51R1b`?KTJrC#MC3kXL5_JNGh7%=1H;YS=i*(rnx17pJ4FG> z_gvXM;)UIgdzPvfz%>J|0mdKJne7fY0jU=~WncnAiSOv*(+GwX!1*xAMf0bYQIv-< znYfZ~TXI0lgD_aLk)T)79_{NR9#S29P4GlInx64sRhhhp&lw00&!wq{c#5_xhL%H@#{^)_;Vp8Fi?L+80~!>G|brw%CZgnp+jsY1`_z z+Rj4w3rXoEDx^N?k1Co4UkpN#dAz``MkC)JGesIE+XG>oymZ9=@aa+`8*$U%YyeTo z_kptJv7u6iC^}F#p^>C}2c(hq%lG^}L#jNh0h5INWrw$8z91 z=$pvEJsU`SzT)NnT74}?t_nGd1Dk4cA+dF2REUdQV<~Q+KOE0^NXzky&MgNc=>zTubS`g#uj?{5CpPW2OMQv*>MuUuJX-ccDiilZE7nFS zrB0F%V`!jBYgk4-zj-+^GjXj{w!dHkrbS(j{B)Z6VO(qP>*W-ql@@-{;x(@=iRzkC8$N=Teo0>t8 z>J_23wW(-n6^(!QuI8?O)tuMr1HSm%TrJ)G0 zb?V|ORtM)Ju)=O~D=AEL^twB%1qd9t5w6Jo(=R5j&gU~x-gn;u0900dCVZ%qg0}V~ zP_}_!(zLmZkF*Ogg-xM}p=G&%Nh zerTIbhmIws1R#jJ=II1AsyZ>vjZt2{ayuk6e7NkNoc#xM*KEZ-g2909p`~Dx@&aLH zo_=fksN&^82bxsop_i|Ty=*mVbS_yrz@?p*YRL&^i?Pn>kv389HQ@gAFqlq2lz{6J zeVU$cnJ&8lZ>3N1zgTXo&U7m0`N4SKnphfG1ZGTJvP#&yXQP)vJuT;)D9p{bV6L2d z234zhL@VUHpZ-u#G!eWM1gyGZ$<}+w$099H@ps1Dm(=Avu-0EoElDGhp`gTX;oM;E zeo>U_q1KTpS09th?sh{ZVR=2rxx6GKj1F^NoHA| zNaBNukK<@1E()S=)FT14zqE`@htF1~Qh2n}R;@ly(}Oii`7C#f!gZin(aA5`LEmAc zGUfR69eaBJ4m<|Gy;lkgrDs9vOOEEchhEIlabn1Npy(Apm`|%^&WfvGmqPeJ_eEkR z=fa~NhCv4Ryrh|lJYllELwJVcUsTiGFP$;=;H78Bv-^nsv3x=acjf*}WPcP1%}Be|sjiD=q)v<~?^w3YAr=a2Es;KT45 zfC%H;^cQ_7Nk0mHJAAgfQ~kfz)hxR2i4||&im{UGp689`2oHKnEHLOEi{%^N;Y`lX z37uko)@{2h{ua!)O~av$xWBC1Na=k;nZEeT+}B&k(H;k;I# zEYaJvpDaN$B`O*}0AR`@`&pF)KuevUQ7RWhq)1A*g}jG=T0>&|@i!~!`=LjsCx1tG zBi1@hRYQ4J)9&|awt5j-idO>C53J3{L~M4Fxms&q{eZGg?$%@WMW4FDW_?qv3Gk4e zJy!Bk>))bW5ZC8B)W9lGY0Egp29@M`f#U9Y>2VETPFd4YF>PzP^((6loicb>=ymh! z0C$xyej4obfy2?<|M(i^th&#>Ce=nJgk!%gJ z?8iLc`Mcyb;3LSsV;jlppkYG<9_yMa4UTaSV2L~Afs z&JKJssYGR{u1ik%4pMjB3raMRz4Er#H6yc9eKKh43oN3eYFw9UB;FD@vsvvYE@zpa!Uu}%L>w1C8!fsq;oz=VZkA}Y_# zG`ptTU5N#!)htE4m)m8Jpo2d!NeClavELc0+vKjvlAhIqftc2>HPa%SmiKrQL_Ghf_x3kz*sHmU z`<3@@cknc?xP{N71X1iL#X+Ie2pW7AH=;y=6`G0gTE*_b%I4YdE<8Mh{>>$L2}0-U z^ChkL7OJ*0eP_>AcUHb2^=xnnoMWX=D{p>-0XI*8vNGL=O-RE0`H_p?TO`V;Y8qPV z4xq#UiPj^=%yh>K1T;h6zA|+4M4Z_AUM0PnVay_=6eUSfY&`!nQ2HRJUHFvG%k;%z z#6V_(+qBQ< z=IU?ACHFu_wq{?(-z64&%b&z~7-E0Cegy1anz2tUYudn_Y1wd_W&yS@?&X0FL+3n> zq#>1Pt{;f&ZbS=a5NXvC9u)D<6%I&YWt2Z+*U1_Q=uAEqOuSJZ7T3m%_qrP6JYUB^#cFjalZO#GzNZ$t}u1A^g&4>d<5P2dOV64yw^|5C9X(e^~ar z>eEUbJfL~Y!(ZO|4d#{vej*11E0;%PHwQNsRzz0~=iz%iBR0MioGdz6jLb9gqterw zxgB!t5ck=!o`v4XjPeB(bXK#g;RtzJEdW2Y?-?RxqyZ^(-zias7oESmG`bYc8bzEl zX4cSB&W#j4)rkwmW7EIe41YB^c?iQ&Hy#h)Sg~N3Y+sdnUX0!Cvo&E4M~g`bWTCfr z!nRx%q$Gk#hXaDzS%n=v2(AT++9jO=AuM&#ZWJSbZ|utv2lv5G6orgZe`tM2I<)4u zEw8#&EgC2Vi8RU6=viY!j|eXrEIDFlGN27V?%U#pZG~RXk*8YoUqFY#DeZKCz{aTB zPjZ73OW%s&O4~Je@(Wm1)g{`8mZPOtZCsf!E_45mxj!((NM%1&_!GQ~g%#PW3)SCk zr%Ger>pTtZ%h~6@0^!&ardEGka!N&unt~410Yq+LDk*4FJuXut6%05? zo>_)%uV6;`kN3Jj2ZK6-Wa3LVkLKSOs4nckh~sPsI*^dN>RjJyAwwpGlA7(0w!{i*Z{i&iEX>jn#+ot1cMH>LF2o2 z^ZB%SvmBYE5<~)LV{26E6AveI>2S*t{pHpzX2?x!(1#p0X#dsWanDfbWHjV^Rctpl zr0rBl0M&B2ar-HduSM!~MfNOx*WHHD1Vcq2-WVO42}-cYQzSaJ86Hg5)E>|j*r&ZV zytH5Zm~W&K$|vc+Sm+?Ol{(?t+Nf z_dYIdvbyo6B21dx;sL@#+9Xj*EY~9~*e-zv%-j59#E6Mw3H)+6k{jk$uK;7*u3lAY zA-P*k`Vi7lA~%X7K(v>;nTTEN58iI{y$PafP=IhR)j?slL&&!LjIfKgUhUsxt6W>Z zU|ONA-njFKr^f>7?I#5{;r+N}u_0dVc$Bh}vaE6>n`w&Uma8Yjlkfk%J(7dGx#vNj zt?R_zC)EgO4de^pEb92TtaHI~S4X#xaR?JP121bB4uU{PZ3)ixKJgMi@NJrwQ zn5Upxg78INL-};`lJ!_Iva;9BKOW>*CzgU4@okGE!QBBPUZ(YU>=D)+y%^hm-Nws) z7E^%eJz%EEWOCO3&een) z^pk_=-c$S<_B6P0!^c8U57fyscyrEfOpE*iCHdktyO?!LDz4S3paIl>GsdQ2zh>zu z5Y-SIPiN@%&Li2wg-2SLVkoT8k@Bja;#TuR9qn`#@*TO;E&M|9eyIsbtj;ek zv^sBODaCpZ_(1&OQsKGJm}kl8izx`%e{?{n9Y9x=Fv5?6GAulVT+oXjexf zUAJc4-gfLe$Z009azH_pcdxP>_rlwV0Iu8SzJ*@1(>V}!(ApWyQpK|f!h}gw3i7Ez zG_Xcf+l?fPn`gzf(rU`!MGt_7FHQze1wMWAqe7K?Bt&q^kq<`6Wo_*f469^P=}qpt zu{h+X`IVXl5>)B6`cRKoRY!1Yq<*$#xcpw2w0TNYL<&6`-gjj@#cut~S;a?_&s+uo z_)vj&Tk5>&*5kmAj2m)rWWj|N1v@1(ytR&DD3h^rZX0DnjW)--Q1IhB98H)JaxMD3 za@DHzc&+WN{p)v4HBZ*|QV;JfC&!sSeyOJix#@T=QWx;+JkWgy`)C3i@$K%@sM~Us zA}MKzrqn5XG`MKM9M5M9K?T^rObn(9_gwL=a#(q_$0Yf{vB!34Ij;%|-d*c(OXa0w zxIaI-T19B~w$bhwq8mLd>T%%MPoksG0^B!BTp4dyMng9qtFrD+T1IWX(0am4SzP$0 z&Bp3c0fq2I4%UcrGP|kOf8)ljrxCs-o9A#(Cl$=1G}jSvy~{JAO&?jp!*NENrzr?u zG8^yN-rb%*6t$N1-51TLX!qh4sUz~BnuS0y>qQTglkw9FFXn@}#6gCp)JB(q-9|d0 z!|s{kFyoQHGP_K`B1huzAGNmxnFRcmNa0I)F7iATT)eVOatn?F`$YNzt5)>y;53Q- z46^RQjz8b~zjc7RFZORp5b0vX=<3iAEa7DAnb*4y3x_95t7)Uv5Qaxu|XvOiDkho^DH< zh|SaYtF0;Af-fy76Lj!$0h`rfx{*e=b|7+kxz1Y_7W*&ed0y1jOh6>Sd7~pX@}V+q z)Oe=2w{dREXz0=X^niW*<=&K?Y7$<$S#K@Pqq6YUlWlgW`|+tbm-Y^8noa4mnm|HX zuEgv-Fa=&U+Wu|7kqfY#cqru{KoeoDlw!E*m80G za%c=5Fq!L8hP;`N7hn87)z$#Ao7(8KI}`qUbei{1fa1y~+h{p`RfAvkgooLe|5h15 zxZPg6eDh->=7qne55{)okUYuT|5NQBkCyHEbFh6}Y4fo@ z*Xq;8U%0o~$4kyc8s>zKwfstQiz7)lGbLu5pCOpx(Pka48Ld2h^8G)5LT7Y*AsZDv z3(6g3Jh_K2{Nsb8NCxN*@%vo%FejEN(~}u7bB#gJ^D*TxQS*efZrh# zGCJ~tkD8m}JqH(=rUJG+t^0PBDu+aq*WBti*v02Czx4@=F!^1pO7eae?DX|1#dXOM z?vz~pUYln@7cdQ*c{9rUQm>$?JaFzIg51W~pRn|nsHl(0YD< z)g8f--z`P_KFH_gG53fO3{f?5K&% z>|%z!<&RD}l9~#+|355%DjnO7m{BtCnpdjdzEWTQAdve?Ezv0-KSf4Y%9kcSLLzUw zHV?%n?*SBvh??*9L+J1fE`*>e$!Sj{7X`k#PW0`St$78$OTD?i^llvT4bG8z|zWr(%R9nSAz%+mj zPc%IoO!ZxkM2$kgA3wua%`vh{!NGU3xT^v zy_^R-a?)a{Q#N&#vphOIxIWmzs!>blhq}H4F9#l7MwP*3y;r^L*+-$#jJ>xxfx;4p z2=$4+{7jt>L__xkrL7nH^rggW;T76rc1v3XBWb;-4oWy5u`*}@@g;ckQ>m!?IOfC4 z)^{w+b6;tj!HP`4L!VLV97quG-uF!&HhAN=8NO?qe$u|cUOz-5%(zB`;N(Uk%WHai^w(*k zLGX6J(7}kH``6w3pWTu}=I=z}6b)7ioo6Z4FGmNhHc#lYaX1Lf+-@-6&X%q0K*XOO zV{g?ORh><7MP2}fYc|iHljqjZ>`jkn#Ns}Y3Zf__WDMjID<`O~I>T%+PvXOw~~})JT6Ioewr(m9X$P%0>F<-9jCD^C-3= z)23?Qy0D3|I$B9Z;lUWmK!q5r%3O86`>i8z>=(FE;r+|V%NnrESX zi!R4QpWotNcKl{;T#P9jZk)S{a>_QfFxM1OB~lIkBOY436HiOCMn?OfYQ z8=-Vsl~hCT!mHtS-*_z_OPyFM0~g1)u2yJOJjw%?^@#}XN8W4uDi3ozFDADAZf9div_HF{=Aua*r8eLa|RH2cRjfDJ}J62m_ff+ zRc8c$vl5WsphqZ;%o!bi-z+$!$_9DA!yWF$t9HDDO7Gm%NEPF6%t53NY&*@A0z}yT z{TtJsE?d@9AU!R1SPvRfQQopN*9V4K*CSzc!S02fV@FY5<7;6qJ9GJpCFtk&2Dy%|a~d>!{*y^!`~KVrJH!E_3W;y{-lgM-D2D7o1_2yoP3WTC8i=e9~IR$`juxgx0lQc*!jg9|v(s$7cQK2^*$N1aWW$^VQKieQWkm$eJtRb zkD@}=((WD225QKzWWR+z7avY)2z^;V%llL7yHjg(m{R+D(y1Nn7l#sLYvsVE&A>nQ7rstoMeZP_iK;(pb&0=&r6~Tk zwvwqa0x2B~{BMdHp2Iq;omN|~q=>0urXu-m;tWBX5A3t;bym3!&@beyUUGCH?O0ao zJQV9khU4adv$;@cRiT2_;EG^kuF-c0Su=02U7l%jafz0@4)^1(U`x+Dh@1Tx(&XT^ z+2UA{OMo5jPG%KPf@Vmz7r4c}^hx_mAe+F58h=v?F-ZPt6tCxR#!F52Tv>sRVR;Kf zBl{>Z@$$j$3>p~^FtueNGK-yho6;7#Ju`g}MawTu z512`~!eiIf^s_Q-FV}+*3(Xt(qjg*+G7=vbwMUy*)R7@W-cCn^GxR!qNJd^;`I;m< zkttWr$w`3Z0F>mn0GguF%G2TQM8}Og>okuOef$^5t+d;Zt3Jwlk0mV!-|!Z^R*MOf zerwyV68bMj+jmuq(JpRakMA1Qq4`4e?XU#;$t6GnC+3H2_rXGvA~Tn+0+c;Qk|F{sc`!YF7p`t+tD?e>!$Txy1#+0tXa__eE{B`V zhC{sT(HTj?+sTq%w#`@V%D<1E>Mo_Te6XAcKUnB8fB$cc(kVq=BKTq|;NhO=xPm7NKC>zjC z&UlhALWA5Odu(|6FdV?D`^s^9*#q-&`OK+ZObf}a(w?37?r-O|<;Yt~M@fZ4Dt1=4 zmOG0?ej@Qu98eng>fr+GK%3s=7y?=?3A2;H*@{K9Kxj21OihU`C`Hn*{V)<}^y9!i zZTckMDUm2{AOJdVRDMIHZaDxNUg%Mcx;l*Nd-}Am*tDYkD$x?v7DTS98sG&?_q*fp z{TbV=S$JM~OKGeGXm2U>Nhn1iA@E>D6D zyv;JEDZ-$d2Q6V@)Lh9TwR7}T3V)^T^FBp%F={H$8Pq{>I9bl(I;)-zl5+2C7FmmY z-P|9~Nv#h5@w=U|CYF9c?Iy+D?@aME^6$o9_r@pAVHWJ44Qy712nabHSN)82X*7!x zhCC`flDMC%#Ea*=E*~_fBXR6de(Gx#Jzk%iSg@(1_jSYx+uSIjqp>x|pjlp~DpKM8 z)z}A`AmP9DM9V3O;`VFsvw6^ zeS0{Nl9E6k*EN2uXVcFv1U(CL5d?qX1WF+e%Mmec#Y{$BWhLDe__xY_eq*|kTQ%>{ z5Z`z2>N>fV-aaXp{15S^STET0b5nfZ@vE0y=+7roIu|MG9KXvye$T~@jeE|c+>~EX z&`5rSs-=h%Zar(l8&i`|R3?fSf&S@YD6U-856};O5YD$gGw7Xda?0ZQV2hqZ`}@!_ z&?19X`@72D2&huKAJVI}?Wo)Qi3{Zm7NRGO2XjP4{XaRGci`DHZ3y4!QBV%&t61}< zj``7(=BrQnQr>^{94R?>eB*AK6sSQ~rOexHyZLe+AG)ngxrQKlWrF5C8XBJwn$i@X ze%65Z#v(7ZYP+HsCG@tsqs2|@a}!hxv_rXs)pJ(t2x@chZyW8;#xrJ`|5?rL(QLE~ zw*?(^a7Zie&AV`My*}1YZ0eBkkgCIuF|dr_z-y}yxCZ9P|EZ+;j$5V3d7SsrO86~Y z1&A8@$N9P4OrN}=NVf=lEXJbX^K!NE31N!BfYsZFqu!4rvCWqvtWix&=Or)=n|6^_ zHMOkhwdX11mqgT%jt)qbqWD(Z$>oRz%-_Xf;X>g3Ngucy=Z($7R4NS#_^UO}p=Yzk z9isfGD~FlpoKqL2RFvuG?-`Ooc@u%YsFF2eeu*&~RsNEHKv_o0w&kwX7PD2qGXc7j zdSU&krfsQ@2D6!bs(9+Z_oqux7drQc?r`!uFS`PnIFo1y;Qxocw+xPJS+YhgW@aV} zEM{i3EwtETu$Y;d*7E_Cvnep|RxpU^sz5e(9ei1tqrItE(K~`2)Rpwgk zi270M59iWr0C=I{6(x&-f}BIYFC`I1a3s`|Sr_O*colM&84~H-p+8#nG`M}_E_lzd zFyi2-S^M!A=1%;V?S(pR2!Kf9FDf|Wnd z@z~U**24X|MxJdG9BX1gkc%<7Fqtf#9X<-1_388ekJx2C!q$f%TrIs4nY^Ew{(j3p zn_v^Ovf#&)ZEX^#`Eumhn4FHA4_*bTtCI>pl+UpL1uAWjm)Rd?9$5MCUEd4#oUjASWbgg@!R zy7tHwoS|!*D>NV~l@b&NYg#f)FMq#R_kCYim+^+KqfOKFPVX(tk)w;yeQTf7z7V(> zz0M6{0mKXETqYimh2g_< zmQ5V{fzi}5cVHB8dMz)x%-k2dH&T(D)?>Ht4Fhs38Eiku4=NujWE(<)7pk5v1DkQQ z_xOdS@ICEFHDSE*xLuJ|+6@nsDx4pzj@m6evX$#ALXLx_Z%9}Wh$jeo;yGdrx1^8P zOX34M5SyYV65GNE`OZO`1b?g=WH~QB$GAS^Hfl(^TsU&nd))))H!3z>n(gp9E<7Wi zrnr4wiH+(yy8@NZ@{ugNZ|}(mmf!=r27ckFR)P?ZCt55Ocr7i)IB9=OjW{ zcL-uh^X8pWi`65m0|B?9RuLwdfF^QTBy6821P$o21!yD!7%nB)UK*-3cn8}-)vF*n z6z@%p%yjp#dpRAM7epByh@;*$mc++=v*=`Q{ac|e9an7|A0wgmpRJn!!^PFvjrKqx zC|oj$KvXN9JHGxiJI&O00^r?;oOd#Bi|h$f;{M=dSA-Bf{wPUIyu*1VYks;qOI zABqqY*{Rqi}kTGhXgU2qM|qD$N4LAXbM{SVeJ zhu<)VF$k{}By~zH!{znZv!?HDi~uo?ZEOK0G9Hm<&J^0#@r_a~{Eah3@mxteY(F8h z)!taXD&tflMijHoijbqGf?!aH3HZC+##?}CuOziQau^`M2;`p7`N`77Rk%+E4)YZD zu#kkKMd~tBuF@$&Blw=qp8^RX=o{VC9=eN7;3B*O8~_kyG;qI95k!1V8;Ot%@>j8XHICXT8ZC~BO%kpJk0fmX8~poqSOu@(I61E*ua9_zr=Jhs%| zjOxDF{qyUM$mKJWP;;-bT>6pd;H{+kY!|%xhUJ|RVHra|ycm3sVqtz^Q#DL4-ZWWG zQDzrv=osEGWzX|IL-Z520p&Qk{?Q@Jy`JXKlK6l#KFR=i_)- z!ZhXpTPN+7?Pj{6WHor8W2BWn=CsPF3b=oxWY@$VOUK}k)^!g4YEK`C8?{LvSqiwJ z>*-T@9<~kt%<(pF)%@h&!@L>5&&tZmfFn~JH!hB)IzPzn%4$O1S}FM6#~4I?&6`r? zYUp*|UgJEj(P%FGmXK1*_+s`X6t5rg9B_>h*9G>eVt9BD=DcH5$dc}fIH1bHCwa;= zXC4K#%b#ufA`Tqy&G+pg4?f$aRgj1;{_N}C%jCDvfp<4gPT`TnoHdk}JpNOOV)qa!KOKmYuZLp<9?m~i<)UE0 zx^)}Y)MZom3E~1>t$IQ#pX90U@@NH!GfvIHfXlcS~^572LN!gAsXcKfAGcOP6TdL!KS;$(&T73%i8)04C^}R%v~< z;unT86Nedi6l`U;2f-~~><12oZ0{41M%eBE=ku8bIgwe`;4$Rn(4<>2-LE%NeI|*< zW+wq$(EQJ*$hhfE_h20kA|c2j?c&$+6%SJBDS(#x0)%f&GoF5T?m7T*G(kvZ)oYP#Mfrm);HvJ|`=YcQnS;639Rz z(5M)D#;8Zb)pg$(E^2)^Taglho)c{Ch0d7h#r2M+Llw(*Tj6ZYlhuwgA-WFNJ3JRNo~3!){1AL`N?!Z{a7V2*k2PSBIUk|jVC$odWSdbanz8)qncqJ%==+6NhI>g4df~WJ6zCmJEKiA10sEF{la1;j~B<~PA0h&lD!Id>>DP^(b@2!_kjgoMnaH$W%w)` z>6&p>?T!G51qxD+-ltgZQo>VCEu47sSx@`v-@K7z?F2JiB^PLXzM`D>B2jIJzXgz zYwM*_cbakAj9VyErc@YwQ8bnRxTlm@bP?CXXP#H?hhY>fA@h}XZvN_wZkoDYfxi>U z6Gb8o*JEEPpbFLr>M*RaWJUh!d!3#)UZ-**hY-y@xaOmrIlm|5elNv(KR0q1z6GWw zr+6nCORnsUr%BOTD=!&zwuZS|4GAc{BXl3yC6NEPLog4tRYXloj&|9b$TDi*VEec% z4%3Hd)6PNpWOO|pXfya|>ZVEIbA|Vv*{6d*NmB5uN8Hq2+rg*Vm+e-bjU9g8T$c3$ z?{@=oTJSkp(w4>_PRV(Ny8vny%N}!r*{OqC@=7QfXb@6c{qu@^z-Q_AS0`yr0pOMz zeKoYMmSsQg$$kT0zdi883hFRQpUPiB;gXVx(k8giVnYM5Jgv$-y5E?o#@{_OHu{T+ zEjCsIVR}O>#vF^Tx2iSOxTd8EguK9v9N%*rQkwx+7s6d1+k`XS4I;}xp9nsYK5OL1 zx#h=cy?jJ-b!J8mYHyjkxgV}A@n~m9Z$KMI;KOwG&2uHABc1Nr&0NENiG$wkvIOkf&KRBuf5*Yfb2R|+tT5_FPmT?cP>3)jXFd}gkIR9 zz98(F?8U(Iffg)0{3_d=dT4!eH{Dhdg&j(GRk;AR>97k|A(IK2#?+NyBrj};S~Nyw z+GBrO|C_3HC_0J3Y|trJ%k`EI?wQ%cC;+B$>eGs}Jc=FXfg|tNx#ydk7VDoU5~r}l zS`nR1ersc?oJYSDqtc&=J@Dx)St?#H=NI?x)*7?h-{);zibku*CBdLmCF8j-hXWMG z_;{h>U2T2>>%L2mDWUr>c}6}R!GO5ORHr`9OOx)H3408cuSjqwFT8lKaJeD_BFhl`B{{1aNYQfk)dOxTt~IB(#4PC5p#~onLM%Kwm@GA}dm4ridMhh0P~Y3>dJ#nq z^)p9kSAS5@St@yyMC#z2tf4#SK3{W<${z69UrStTSZh7~cJ@5YXRZ1Ev2~a^m(af- zkxAS2vEJ4A{@PpeC0`Qh*Q@8v8tlTm>=TjT6tg28ZDhqe@(l&pCJ(VQ^@_ndfVb!z zy0doZ98Kp&AthDvcq2tFVKC~E)?mp3h|HSHrpNS(NTNzuo>}z ziIARmN?so{t;~F`v-n(A!0-y_BKT+wOLSC}OpGf~vF+^6!>ZEj&LvK>LU~Hik%#Id zB9R0Gd}-dLgiJ0>JzbkPhr3+cxwxbMDOyjgAgua;R`H2L&X7Y1rHF_9*C4Zw&FmAm zN7rniKGaT~10$<9>j@~@c;iEmF-As{uGb9F#K!@L;6W8kc+M1`{5Bpk{Ur^3X1B}4 zg{2(2N*D8kSm-1|y^Qc@^AH?5YF}?K=pZZxva#lq<@D8$gOzzQgZenyc2Sx3cTm>$ z`{lFW7I)M|g~f|hXR%2Ud=}M{3aHsx&WNrM1IeVCUT-A2zEFxSJstb6loTG_N6T9I z)}o?E+;+DOM}|Gd3E?p}JXTX@9%T$xzgC?vZhf^iKbsRjefpsZY*gFV%MWkAJ2dgQ zJ;lM)yRatB4vb$=TK!7z6NC4f%GQ3xZKs*99QuX6-k^fWF-sf9W;I5@l?&3b075HG zZPk%FFo)6%U&aj_<9XV!%_}$C5(L%d5~A&K2sQL9y5m*3+Z^U}5L?hG|wbkrgKPeX%kGn1`C&=prU{H48q!yn_BDI^o7Awpg{AQAbiV!Aqel*tI%OmmP zIKn6@!Q^M@!4V8$M|*kjIp-D;Lg70pHY~$i=KGMyL&qMtySJfM_<`dBM%E-7kCofh zxrL2fR?B-;)>SGg&0d_AlY`ZxXB`E{EX7zv({5snN3P+JGvOL#9YE4?o#eq(_3phn zEETX<&mUzNHd@)qk3(I|=NYm=W_Br`v~-F| zXFA&rkMB}*;_RWRwO5@~PJ3j8 zB=~*+g--vPTvSd|-?3ZwW2hsX8uE=b$a|lZQjl!IdG!~H>ddu2#PQ@dO+tqnQ*e^|I3n7}dviH2lh#>*oy;cDvo% zzL|*zMs1H}CZSi~#8KUhA70q1TJbrzlO>uiCSsjZ*h6p^Z@22~P6(`CcR5%Y zFIOO0d8dI@cIWO67a~EH(I68YHCk9XAPlqHbvxSWB$PJKn?#Wck;}}4Mv#>f6{AzU zE&vEV^I&A^QhC~7bdbOl6~v#e56AnGVf*J1>0Pf+Is21DYu_&t#D39$rv89QY?t_? zsFm(tQrY={x#{E+LQ2!?yzC@jR&nnn8ktC!#B;mktJFeXlqJvcY6`9L=1~r}o(T=D zYDe5s>-clXUfb)Ms0xnZcMX6Oh!sCb^ese5&p<3H^O!UN_i3ON^=THx9emU6 zOmRw+0l*gEO(Kkmi6SYG?ev0nmU1~+=C<3vfAS)tmVk!dnrZa*slkRB&K**;k|FUR z;ju@$vf9JCeI84HWg#0VFXUUnn=QjWnM z^K{ng(`WYqi2v(M%lpmDb^mpvCWoS92GEKN5LqHD8JBvOe(PiIO^gxT0OUIR6bT=J zptfLnuk)!0aI8(Ou}fs1&KJ!RosahhK1JwTe;%*wMr~LNc}vGHK)V(O=6pJ%xL-Y^ zTEHw_yWBvf|5yN3yZ>OGEN~}Ivy_F4AY&gPR@re=w(5Q^cA!ZfLcP~&btLq@I1?j? z`uNoSdp3*dM>A2K8j6s+9`nw95i^9qK@|myaSt0h*(KhlKl<}e-JJ4#WkE3ey~P}A za(_Fp$ivH!GiO6H^L+R7_erhAb-St4{-R)E1wjhOMo?^4J&3r4BD7%$-Qlg?oi13j zr-dZ<6AO-iL}8NdzOd%Z3HYddejSm#`4q@+bc(a>SzRe5^|WD{g` zgid6n<(30Vmh#O7(tTW54%P3|FWL0B*=h_{(-tXkdJ=pN*n-vM{mmjW9h4J~ixTHU zR&Ve!YMkxv+!q2{Dch&a58pyFHrf!GWGkwaa=N4Gw-6SS;ex=mRF-ocaQL9#=uI6? zGqyK%xOt4ZKgl`1^gyg1%Kg?q_kI?e{6L1F!ycr5=S=)CMPUyWhYtk?lVCf7W1Mra z8#K);mE0=2D4dPOC`03X^^~#kO!UKTze*-U*v_8wB4Nf3xdLgNz$G*8m~A>-&AS6} zln&y0*Bf5bIpF*Qe6$vgK*G4)hxIGywXy8G)06|coS%P9$vm-% z$r^>3fLmex{;9!iMA=CHF38aRK|Gm}^)MqIcuj|;$8I^h z$ejyvN)m!-AGx&bVplJ_C=@QwoBXiRkKN`Y@thBNig-oRl1)I=j3Nm?YeWg)Pw#{d z6jFY+pFxLTn*jPg-nzFQ=SneeSnePB8QrHSez*fuB{>ud2|GQ@h+w*AM7xZ~H?Sbp zYCD_0)(^jhWRFI6*pu~>W3$EoUJ)D;uBGg$G5N`@j|~j!kr2pd)T=BqXm$M^n1M;}yPe3|n_z;=`JQgGXWm>-|XYWC%5B zH;j2qy=|AA8oAfGUtXtYVtUMMz4(w{5h>tVVk4xR6hyqEy?D$MRvmZyDGA2SqHdRs z7Lc+WN}Fd0Pud$V-h}Zz7Z@tqK^M>0n4Zv^egPngOaztgv&n+JslSU76vOrUO z9F{s`+HO4*hJ8Fpre*i(pwMiz+Rr=_?0hw@%d}gg9BO3Wwhvd?TUmY!xaTJl^glou zmM&X=MdpSTdYrgzuzol+NeHjHc3hLzryOLYSofGQ^U}6GOjJiSNO6oN0C!`S?;t1? zAvjwDTHAsnp6TCEc3i)P-4045_)%vEyNqK}qZ$H#g`LfDtwwjWD(RI4$G7E8w_T95 zxts;cR|rwPovblTnIk?{YAbbK3wRyYb1lc9ngTL8X3UD#Tz4s*pS=RC)*+As;lzn9 zjHOYSb4hI;Yb5S&C{W1QpV&}|p|N|o8I{6id|v@hjcZWdmoy7&s$GG169g4Cbfg=6 zH=z`hI8cy71sM@jN&7%)0KZXX$3qzbTW#lcUWRx_++Ao_A;#j(_p8EW{fK>3r3{Ob za3!z9J2!y4$s~h@>oLp`Y1=hRxzb!^bppnSl!R$C+0lw5eEM-Kg;o}254TRDwb_F)wGt876p9n#RcR#r&yc1v zNG(rKtN;j?n)cI>?t8antu}zL!cFZXP)4GMRyFJ$S{bDKG&BAtQ27!M$HS#^yiuCKjsy_(t1CyQJ7cAXqW!Uj!ux!&T( zcD;Z7IvEdWQPJ#dfV}^v?)vhnXbEL9VIweef)o^zXHEc@_%6qHX6jXCzlfHmVu5?K}S&D21~Z-VS4SjzW$G1NX>5{+9Zx=vSWujP@B78Rl2nY|eh@ zOoy6p!02T?0grR|Ts^Vph+7^PMwwJ9@}ueX?w)Vy(HPu!cAH0Ru6kHLoAn32k9nh& z{^wyfQynIH^_RM>Z#CDMa4HMLtCwYG;3mHqHas7hPSOuO`DOi#$us5HP9aUYleOHe zlz+c0kk?VyUv>xsoA2VTb0roLa=U=7CSg5|W=#W*vvj}IzdcV$dz{VytlQ5wLwl}* zxsxLwJ=p|;hF7yYUxQhpWT+pan{mDLe{%v`Up23VczC|2I12d9?KPiqexfND*#0e+ zEdqr1*|&t@-#hT57NddM$DuR>t!i~5lRrG~reIc+lC$yrTb?H$74v)u&E>CFN{SmV z4F_}H-^d=Ub8I~>W|(+#1Cz7gn6o?i$bj3;ldtgr%mmpptjqdlWgiTbywrgQrNINdE|DkYQ1EsS! zL8k2)RmN#6{nb)vBxlJ<$M5Uk$;tP4HGD1E=|hIwmF;#fd95W!0^w*(4{G1GKGIw`IS2}Gd?ZGcv&nzHv&=k*^lN8Wldcy5bk85qHYwI|a$P`*9V9Yvt=4G!zxC}35$N{dXlxZiH?F?o@L z`F`sM8+!83nbI5T%4Ukd*xofFGZc{_ZhTP+c!p228@l=8KGK%QjVq)3d^K>%@1DWW z0+4cqC2seRL+?v2h8;cG-GEVrcm3}33WErcs+v2{U`}wJndf;<4!Amuv(^!~MAZ~T_rSquv zJHL?x*z>k9ELOEU7S&YXZLCQMZ#n!tR=o_K({Mx^ zraSz36vkwc4zV@fv1;&H#ck&!A+g&D+}W$wWh!+`N6cF;IABOk|2!t*2iNAS`$-tJ zNYjnoqG4S%J(#`Vv%ho(3J1JJ$*vfKEWGc4m(`@DTg3_7gTzFmUhWRJ={2&CTkk7D zMh+6W0u*F}8=0i#CjRfnD~X7Z`9Ny%MPCaBCiT8Cp@l+Rh#rhw$xs3^SWoSgNch6U!PauRS zrIviRmXdXHnPmn@_W z%BOd6VgM{>jKF)1!_OyBv_~)3K%?9PmOE9DyVt$YWW7@woMQ1_XV$xGL+`XQB6Uts zfza;*O|-SC8`+z#pC;vM=c_B>y!BSB`i$slG_DR-M8@iorYfCa0}w_CcxQ=5_ zFTG`KKjq|~$A9)muZUEv-B;$`wuTo{-Muwx6NF-6YQM~~6MbRxjGF|3y@?kJRxs8? zo;SJ>i@|w(!k937-FF2Y{$2^b%Dk-i66qSJ#~69*c&A7wS)~)Cp#-|n68yi8Y2sA;ikZ92iiOKIB+zakO>+axmfO0{(Pb7T1yd+3Lp6hZ-jE@ zli$v9C2f*51glYNECVD(ARY|0r_S_7CUidEW|#fsdK1Uyb?5yc__{eG_N;tfH8**2 z6F17H_3Hxp!izV47rj$)g|Gr~qO9d(XIq9=-6;|q3PW<|SjTA4>sKKQ9fOoJBnOm} z0?ErMSNWOQl-RSyyS?$$-Aw$K9sfx(Da0yJ-pkA*^>EBImm4X%L} z3^HbxJee^KWkfj!nF>dtj3>ju1IY?cHhFGZ(`P9Y&q{-W=-(W37wRh8tFsejk6St)obiA?^us7VEUXL6GB0QUh$l~-bH5*VOM?k&h>M~D6nG9c{QZpf9OA>?5k5N>-5c7M) zfCWU_MIz^e$YD1G3%@Y4Ox{D(BXRFfeFNA)xDXP;*#_6$)0mB}(@@`L6l=@d(!@|5 z_VqWvnDAld>Rk3_>Jix)d`WoIm3)KK=%2IJl)Ub2xGybdn_}5;%t4W>4jW5t zM$)FbRbMHRh1hM73u{rqK_}~tZth&vf zP|I7M`(yVD+1{cm7okuN383`_=%@Rwi zs3Fl$ucX<{nRpu!q+Y8WSLG}+Tkv77ah z_q6eRqt4^S&3eS8A=7OV-p+fhE2U6R)>B`BVt4$CUbkg7C~}`=HsjZ1Co8=xag7~# zjsyaB4$*jhCz@y?fb2K~++>ZZb0#4$-n(IVsn!@0s_+Eo5a!cO%e9{MNJkX#QO*YQ zN>KlmQM`4R#Zq&&#lruMPcdEZdwZWVm(LsFO)3}}(AgF6^lSW*8(w0`+18EAU616m zN$p$TXI1G*hTn7pS#yFcHZv(8(;fohRxisAWgNHI_(1Zb0EiJ2z1w z1tzP@x|zwn^*Zt^_@D|s!fOLzoe4_H6C@-tW>@J>!%+i;-%T@M^f z8xTbYB7jvVqK7ysGun_Y&DG7#@*)hOmUk$KcA8!&`^(<3Ksr`pyA~Prm2q=w6ZpVO zYKqW%Q)S-EDjeDy9CP{@HyZB)Fl|I z%c;8lt3cc~OS9L!cBNj(A@xqoF;)o&B$u3XlFAbuP1{H2N>o3DnCs{;<<2R>?N-Os z$~WHMGT9j89CW+`ot-kdBadidA_t$w(rPLlB>vppQHKK1#7Q+U4|li(PITOcc;P zV8cs;@!59r$J7=&?DWomN!2vna*JjIvag-Lah{sUkl<&gK@qC6#gH+*ESQ-E?iH;A zk-?lb=P3I^>Ais?j{`Ur4RtELWL>b_QV1@s62qC6+O_FqU5jX~5KJ*$b}UIidmK%> z8(>o12}*=!=j;`}Q%zeTpQasaMzzF3a>5TR1gRXP72PIQQ_b(-Co6};>9o~BPzRsC zF(@mUh{HJ7q|;T;oP8MdlSd*V_WaUF&>#H+_5adg);K)D<}Q z-OFht(a~r5BHa-#xv1TGbS_QOcs!%W6dX9I|eY4v2TA!^+NOo!R(s|TcLStOb zZu5GxB6waMr6WzDC8}`)a#87?uY@my$NUaABu}(6JT9Xh!ICA89nqe0j!~!p9KW?xeYy#kllYG&>uc!PmIUIwbpVa=m z=t}o_X@NCO%W%o8)j$__?lXT`kduZwCq-=6qlujsxyWo5gmhv`-;%Cf{);H5_;(%xb7W>x;mre@0Q*r4q2pn zLuB?xNHw>zjCF1K$QJ=<3P>ntD3792ne{AjpNFhwOS6@^Kk)j7(Ya-iNMB+6W^zqKq3DPCWGZ)*DGvm4AXmUMF znZ?gaE;KgfHH5N~=o`K=BsN@sm@Gr%rBOr6Tx~-|_XoPqraJv8iJAZY!dV&c8@p;? zme)z01Ct}vm>A6&*V}Vkrn99FLD0n>rn;-o!&V1CvabAE9?z7C|1s3zZ5Tsd=$;(s z%+n#=Z9%4#R>cWzv2g6m7DAF3!Zt_5csUu4^m})gKxEoPf>CD} zfUeqYa|5N?p3p_<_Y<1^yioZ27(3651In316G0H*SBq|yMtOLpwxM%VPy}X ze`Sen6^WVf4u(Rf5v6BE?yGs}OjpxExJ8*+Eq1pluRih|6`WjUitmi&T%Str=oh!lXu+-g^ zgS@)8PS)-Z6HQ~|;C_8Ak!}h5ukbQb$PH{0t$n;Mr{Nc!U(od|4@ZEK6(kU0={Q9- zBi4`S-xhoh7~$FY3=`c)A#2Va4KR+Iyk zhL*N2G7^!F5kAmEUQVw1W3fKnU(FDfB^B<(etdjnrW>xWx0qiH-<7u7d~usFqlO}q zP2;S6Lc%{1(8i$s4V?rwK3F(5TiipQln~;CWzN~1U)B7d9~%1)*~$% z2gC{PH!kXHv3eP*(bG9w=V-B1ns|FsFP&uXEu&SLVRqghh0JQlT$KLHI?<>$LX&TPm|UgJ^~*d zrmK1S#G*=4<5w!9lCJq>*z|q6S|X2faBy+m!9jKeSLM6W$e+l}&1t}J4PV%p7NDLN zbB*%mzEo&bi_Pur87`Jc_|u)N7(F&HA|MMU{(R zyK;`I&s0@tZJ8^&aV}MY)5*I-=4)x&Q9|w?`g)cSyly0=mYpQ_3w*g-JGaGZ6E+;n z7AY~0L9();%;I^`kEI_eiI}5v|)b`p-ko%G5mqWU zR~Xjq-(ZPrBX##xA`rc{I^MxjyHK=3)d`AL=nvYmI1R~K8)11vvXqplX(@FlSI=2B zRvfKdE{cfl8Y4H{a34L~VD6Rcb?K2*t_~w2Ifr0a8W44b`6qp+3Hh*ppK=T=m=vST}U{NK2F0_>MJU*>R0=>h3Jf;C9*|^E&2%dnrxX(2LIE zPZcKwlbJz47>8#r0gQaA$u8Vo%yzgc%c)u%-$pshGI-rYac~&h=lYErkWhTj;_|&C zYETv+e2_>9+a8Cg5}$;$MbE%9Fs*37fYrJETmCuUKC$4}-?NW1{DX=>J$v=H1IM_3 zDab4?xCHEMY^JVdDyq(p9ZcYKJyZkZ@1Zo&?L4m;*W*>j;g+5ea(%W>A1$59g4HlU zTHJsxCrAw#t-oZuvg}S19Sy<^K~qNuao5EK|Bu*EVFpT60yVu$I4D(h)blsd#QSNeP7aC1aNNJ71EkitZ}>8gxsE64(jq9$!}RgZ+-;v*=tkdc$$5+h4Xiji~DJ*V4~{31`ZsO znIR@5BI2o}T9t&6^yp(R5+o$vr!QbGsG8tP^-|O&2ZhCUA{gj}?q+uLd1xIU68^XT zr<6ar1s1%&>&-XV`JW=JtED>x72YW-4gae*bAr*k+S&_or7i8qBu%k+m^p&vOgC>d z|3_&^X~DB5{VN^4@4cm*YLL?{6$n>+oV_7?Ps zI4Ou65|VkD{=a(izx(s=p9-~qWvSO5i4~yzL*n0yOsFsjT^m-f?)(2#@t;OH#~5^z z_RDxKi1v@C)>YKU9k&0#qxG--62ce-sT1>dMd$lIefz|359WIX0+P z=Bi%B?0*!E2I@*Qut?5`|5)`vqcBSks?}`SlY-elCH`F|M+HTs{l5YJZyo%91N?tA z=>OjgCKc?t8*ky^>Rg162rn-lDyynmthu@%$o~4b7T5n2mWeDMp{ZHKt*u{@XwWn2 z=s&2}L7-EV_-~7|I1hJmOB90RZmt;FKiIEMKTG<+_X&*Yks-k#Jhox7KB^}|g6`^c z5Ra$9R2vPNzrTSICk_#Fm~6>js{s>TTkT)7+*}AOlHOp8v?+AFek%`>4urcJyzHad z0`afexJU)J(XJB-70!$LgZ*nv{oVtxU8$zt9Mk60m-O`}!w6WGd1LIQStz%=g*L5c zlP`Wo!nWUFxa7^`6v;pGU;ex>t*Vt3Bxy1GFM)pLLB2jw;!L;Y1*%RY|J`07F<7a< z4i2U!@teIqhsbCcq^Z%#B>P%Ry<8pRgM}8Cb?x{jS1YPv|C6YP)rxz1ZCh7OPS{kF zb=&Cz!;%JjXd@&n6T1FJk31YpLwksr%YRx{kXcm-jfHVRd3;1Q-f+O$yV)+MAb^AL@i42FDA;BVZ+8~ z2AIMc*Ta1>1Q2FOFD}cud_@{Dcf+=bTPwP=TeQkke=v4>K#C7sc&ZoR|4-F(z{n}iQ>lgI%au=?lMbu5DcoBoakwfi$DO0?8yfo5aPKn_h%-ZeJSRA; z(fc0sqNA*-O>6iEo42xvu|FfjX>u)%kOEU*J)7NZ6I2Q_3Hl?@(b5^w9pgPM9+0wArxQ)QkA) zAF?8ey5-A(xDLtKi_2x?7Ep?I^7k3V(GhG}Z@XcPY0(f;~CIqruMGrOZc#9Ufc$I+6e%>4%)LIpEt%P)c?_bdyc zlGMlg%2Fy3x>+hygHW&ahyPyOgH<3DK2Lf1Z9XM@3$h{2=F>=jbQ-w9RT$c7x@pOR zm;Y4jP~q~we7o{uDlEl6jDWbXC}RDeygF1r4H1wBrSVOcHvbQu5eo8<%pv|c9)FkX z{Xu`L&r4JO{D*d-g1ne-_N4{_>n8I7)v0E$W{Ibc14~zsKUAg<1H&yryt- zMY2EWwlK(7Qy}h+@rKI3SgoX9%$~ftMeFXKhr4{6?jinVw z>GR({Z*e!|mWAz7X`3lae?N9|ran}OQz^fVk-7&Q<}dBBY_?Hj*=%DLRZFisp~bCj z4D)lOk{$A@M6?S8oX0lnL-AAz5pO>%Y%C3Hxe6l_lkeM9vJ9N6$KaYR){b6ip~1eL z@|%|&xw>J6E4=^h@sbMbrK20iG8A9sj8Whf0hRVfR z?_1a0j_4p20)Qf-mL+{&VsF<#_^H2agy2{>t1}NA(Td^0Po&k#HT3b0=gC@e`I9f_ z>)fOxB7pWi{yqNSnoW8g?XSypoNuixN!iIZZf-nxQ))u$<{BC*D-Bnm-Yb>_Kl%Td zdZ*|(z%6XJapT5r)Y!Jw#-*N4o4J{r+53I> ze%=Qf>Q%Z%sV7U-DHK%2nma|~1z}oIV}Bd}Qyr=t0k-0Qlx&8bEi^ll&+E1aqRQ0# zdmzrsutqDtWarXQa>52(aC3>s|CDVgC@_ss_*s&KW1*xJSJ@-O%f-djfJ^X}-kZ#h zG<`B}ugGAwei&%G9o8?=znN$uA-}6KXPhlZpCQkb{3Y0W4}P9oGpgVDpm3a&@3yL6 z!M$Uf?C`;kSfL- zyYL!OL1CiC%JGE-MSLQ{5$ct;p%ZHr0B~l-o?9ggG^2p83c_$00o)>3$f-*%^SQWx zyQ}PH%h1i?%tY;uf)EbV%fGUrx`ExT>T-M)YODfX$lzcYPU~Gbknz*?pFzhxK?;uk zc=Ut{wRE@nN(%)U`Cqkpy~o)O*E|^l&aFTJFU+jBs0-@|QfQu|hI$4D0fB9x-8dh0 zEQ6fR`7bk|{F8V5n)hg<88j}9r9x)g!>Tp39Og6ddJ7I@dX*;)UrzIAIRok-^7@CS z-i()3JL+uNmY|q6k)7}HB#Qkd3lhd;BnBI`=PN# zhI<$h>n1eUR$2Hp7B==0WC=ewlWk+H3H=P5PKJRp7F0$;O$6Dv}X+lZ$Y>p9moA)J#+rL^U2DHGaV|ix_a6_AYt5*V} zm;oYGG@oNSmaz?bJ1svCAK%JnD3ZBOr%mc;qg{vB{WQVF()!Ey&F86W_Q0|l+_@|) z19+3p*D*1-%|_;MRgu)v1_V5GBzv^MOJL$PWRhWO08fqg`2Bn4G8YA`Q-EWJP21w zP^-whG1{;suWUa#XXxF@awq7vJH#P(o|wNJTnL@zmD|_CMj2k$!G{GM{jl^QbX3rzU+?-B z9l;FT_+#x)Vl-$ca5jWNDt=`pnmEL^>hFNYPC@z3z5c16kkDcG%K|=cz04O_A@bd9 zk1`lxXoMxiC)jp}6-y}hI&dW##M+Z@c#aANM{&WG!hQE`XdL)W*Z!5f`->Bj?DOmK z8bOa_@#jGH4cFxk*=^J4EtgVvS&*l8WO$Ks zT*}-z#guq@@7rVB52C9rSe%?oGsxeev(rPPqxJH7Z7aYSezPciu64sY+@Q`fc>y^T zF8}6KE_(e25|94bS@xCB@HCx{CFo#)lqlk%ZhJy_W|IClkwqrgoXkLt?Eh!bR;3H5 z{G=|UsVL+3vQ{9IY4lI!n zFOIcG^_%S#X5S*)vZkBW7z)*Jiql&>9DAQc(yl@NJb#$irAs# zWe+dyi}mhnX0|YPCQg{@UjupXpQJ9dV)WZ=>Np3A5-aNtoN|QiGT6N^s=o9}BjJ-d zxO*mJQZMs&I6odq`(xw_aIJQDKRIT*`)x(ffEc9w??j1X-=Ar0fh_7>BIJB8y&{~x zs|z(xAFEg{i=$wjt{_xL(tGr6lku6n15cQzzZi!UKQ?^?zx=cTJU#u9Mz?ech%$SZ zXuAclZ+Uztc%*YSR25nj+}U28fipTpPiFzPPFeg6mFLsEInx#gV&f*57?d9E=@sZlT8y6%(pg;77=o-&`hK|mW-igXp={p3)y?N;v3vA+ z)9LWu1ma-t!@0*0oHr0 zx#(!Y8(4)uUBz>8Q!DgqM~d>JkGV=dAJR_irXoE75It@=Ky}rs$+wKLdSs6<+ps4V zWexu2lQfQ#sigPXiZP^<`ipn8(W7IC!Fqn@gNa5h%MHxeS|FFz@>kcs=dE>Nvwu5U z`zIO0-q=uG(jN@YNSp@;FwF@J;7m@lvhj@f>hgRPOO><9`5lbyRoC7|t?EisQ`4Q* zE?bC^rDm%cVQ-tMvCJlx1j_}x?%22fUQa~b)NKrm3X?&i3ayC!^N z1F5O%{s3ZT=mZV>9^_mD=`2@;BX)G>3wC#fsxDR?kZI{j^)=eBsJo>yIlw{;w)hg) z@AYgA^=iTS3VjQ<_e^Wg$?g?oMDf20EX+53ivxo}0Q!#{RgE{@F`cx0R1F7q+R!;>a0GzT+y_%x(@wAG@nx5JkwX>IqW zCe4{?(4m-Qe~*{=`?h+}EmkS>khyC3QElK?$5E4PKV8wXw`WI-ZpZKgI-UthE!IB`*R6vG-oMjR$o$s0q2CY9X zn@mZ2#~ABf9;GcquGwu?XOU@D(6U&T75yWx>(EJ^@i@8Q%>IPJi@r%lv3S^6m0BI) z+WbYC&{!uxe&9~AznXwbCbM`a{*XA}DwsArnS$C%0 zbhR(Bw~n|nY}e7Y7%zdi=17(8nT70o99*)Z)sm`FxK_qe-DQsDODlj=5}_d)_U%CbLJn`|g|kibbi5jp+)vU1TKbT&yI!@}zvm8dC& z(fKhU&-bkO;JfGw7UaB*dSK{~C-VNNTsRAA+NjC)eVoYjJqQx3P&ueV3x_KZe`!W%l=5{b~NAD`FaJeN!DvKM?MEMM}62uMu`+; z`#e%!QLR4=TVw#JGCdg^QhD2g1JbKo$Mk_mTJDQ1gW1MlWbZzBwDz0F37&`D01xx` z1J1KX10jN|sbr4@dXPmZzo~ssDGhMyQ3XNv&wnWre>mItWW!#!xY^iPPvugAefjPQ zUhKw_t5C19S6Xd#`1UFgPfZhR;h}A(C_e)uy&vhA`7#Ynx)8qVi-Cz8UVphw=1MJp z=2-46`}+wQDA>?Jp{{{#%{3pGC|~VX#J-@@u<*aqF|`}IuR9d-{r1^-vq5TD^^)N2 z=V9-9&O^-k^_<36WZwm4`)|P~FhsrU& z^ug)SrJQ>CfXlW9ViSS&r_KCLJ+Ofb@IMp>2(-mWXb?WBy&Es=;%$IZ zQ(R@&@h_dG1=79k$g5ezt2$}jmRbCEpRWFxewm9N4Tk!KPNQJrJ@e)`zL_n}1gMCo zn}elki5tDILh>k-@5~c|xK|uVnwn&) zT9%B#x;V!u2h=jkJA zF}PTGp{?SWKq>ohsdBFonDi4J0Fwnd> zB&NP&-VVhfL~|I4#v97H-;YnpQ&Iq^cW&#mnb)+1S4cRqWmd2XKL4g3p5t4Tw#~Zo zGClvEgN*0^u#js`?OFY}q;$T}Q+Ma9O@V+D`0Tz1XG4S_5M$+1DtRdqfVuh9a8qcQCsKxf)Q;*XkYY>TVWx~t?*vQ3zrXbGyIMJ-4gsSd8 zsqz=zyg#XGaO6%7oN%z3w#PWL|EiNk#Jbq|ZWTX+5AUw4y3)I`4_hoc*j-8T(lQV* zHS1TjD5s%s)Ajl&=Dl?uL3HpAV z{Q>j7)Up`(>_h9?qj(RmKVHfJW~wqxw!xz>-^<=qW0UQxKisKbk@e~WNJjXmmKChO zW-fkwNxxSp{Wb$%lI??nub?x2$)iok`(M1(8Z)3W4=1{Zr0|@okKz6z>@olXLh=m+ zdQTh7K#jDtVvD)G%BSBd-(0oP zXNmq?GSipbcLt1W(yWNuTt1tz10LrJwX4^_c`3gCzNgK8iCvt(<+2Vy(p};~hAmi( z+_Y~JY&~|UU<(av>-WJjwW;)PxnX(2%Ai1$ITnH{TxK5(Y6Pe+UFL$9Q8jzG-6!Yf zFJj;2)AMAbn}>~&B3W;oE3FbFlMFv~n5HY<)u{Cra2o&~!#LQ=2ys=>?-vC5b)P># z7NHEwGs*q*p3YRG{MwN2gWU`FYX{b~!tv&ef>hm!&uw-6iA$^1rlsGA!|#^rBS`uNv+W-A zVr%itq*TTMea#X_o4%M7UP-Q++5}jeFAa45n6wBXdHyFd+5C&n+hd5y8a3*}EhBx* zVS$Az%i4Cume@&k?tRzv=m36)RJVDc=!Fus?QCwL^49HWy|3}iN`;b7uIRJN(axH_ zVKi`j^CGlB6?|&HTe1-1f?%vR&6wn8dGu{!JtJCZ@I<7-EHUV+2!W!~$6#4%v72m> z2d?n>egwU&$hV!FSFSXOZPZLqz6nl!rjZ1PO`$4of$RNPM4Aa695?b6g8Tp%4K>+_ zIAitRb}bW~A|GPN)xKZtM=a$9yw4CsCR|dc=^KU<-fKLa&0Ag$-qwSM$DmSHmFeiG zf3ThQ*Tkjqz2qRfEZ5fG7+u3r`pbr`M$c{Unp&q${eT5>jQxFmQhECr1xu|!HEn2` zqyJNT#h%>F+p(y+2fscLa}5`UAX25yvhrO0fjYawc?alD-5(q=e!oVgT9}P}0^caW zz~Dx*HWK+Ta)jJ#NgRZlYE%jd1qeU3P<=;1V4j|t(NoQunEZ_(cj`^tdcrAdDFx%+ zC`w?wvhO@HB+6z)S!4Xf`ht9xuFV!S!Mk)fV+PIBQ%~+U9PsTB1YmMLrGYl|wnmWq zX>IxjAxA99k#_mkWasFw?b)Q~*8=c^yAxW4pOtLCCr*C)?tOuH&@Q+7l(BfTlp%OV z{Ggwg`c3YS-EZNOCnNj5V(F#(?Z43%@5Egn6Lwwi()1D|#Uwj262zX+&v1-4%@SAvX%s*~BuFILS(RhWeY1zf=MLt_opkCE)kx=h_w_+# z{7+uw)N$5ATPb~Y4CjQmrXg7P&!9>Yi-tt3soa%E{sQw#rT zo#3-5H)$-i6R=nQrZZ_!nj1MtNqzFQEJSZMJqU>fBCc5vHji#%)|)~v+EMajO(USv zBXaSkKE;!ojdsW)vSnr>h$cO+JogN+41HPNW7wiHJ6mn`;-G%FZX>Cd9+^W9NYb|& zHz>7fVirw86ayH-v25ojHPD4fs1BK|Hv)6_$ETcfzVd+aTZph`e=5|p)J1#rT&qq- z(!jDB;Iw_TszEH8A3vznoSE;JWBK25^{NIQ%}QUQ!02BI#od%$J+Pm_vkggN{#RtM zOXOR(EHvMjnI43!`7Y}C8aR))y(Bd8bgh|?TfpYyS)pNzHV(}_S>88d2lk}5pGLp? zXq)vtzk-vSwNuj2ndQnhY6AWf_R)5iCSvz+n#yKFxhxCL^oCnWbIUYF(=e`7Cs zQW2kxReSj0&rTY6UtpZfn^?#UIDd`7KZKY>%t2AF)Hcg!&Lz6;)}!8Vs%Z5!U(j>w z?D#z_o!J^Ew_#b;C#3(OlWE%Ekc^+%C@0A>2()LPm(jQT-hf596rq4x3%<0=o0mzKMnLlm7LL6D*`=ro z`2%hb=MV)+nX?T0Iya5KQrDl=#|XLff;cSOPOj`)FF+@m-76S|t1vS}X+pDr$?lH9 z4p67ZmZ-j5|d)Nbl<#^|ow-!5LL9ltJ!j{ZQ>l7*8&K8NZ3gQbPM zU#|&bN0bn!rxweA2t)Eb3L(2UnYrCJ)wi+P>*Mi0l0G?Y zW6lb8Shgv_Y&bMKX(Ax06}3OV7b}*yu>+yh8p>~p9E6O+Gg$TnTrM)rm%0>fB7N-s z$Y#9LJ%0m*A+Fj@BLn{^Y|$3yg(sE_IfEjltS%JmAwLeU=2~U4(W2{MMkdLnqJIu6 z7f$O@0SKbpo$dh<5)s(#oOjLJm!XP)5T z6w!WW>SS}9c+sfnHknEL*v^P*>|v;2T#m+bentKE$Bf4IOhMESOx_J8*V+a`dpCR{ zPh_&9>Ni%jzo%6w%ef2w77<1QHGW`}otSl$dXGbVZUIU5F#weh&vLU8d$kUMl?olt z8YdN915^#V-~@3O2l`0J7n6zC>BH&*r5jF379&?tu}Q~Hl+2KkIcw>&=cwI13%Jo1 z>XcXCTTtQv(z0vHULDAlzcZ;CYS1ye-JUnm-jC%wLW=&8xxq1qUAW%l6IW)ns#Y*& zqk->h^zYxnKehfbd0f;xPUm>iK_|#;G{{J)3J*5qHXJQVB$3tZxVo}lUG!f)m?)MM zb-5$^2tW6E6?}uGiN0c^?ZYpU75DVCo7AfVY>;ZTSU1{ec8F>>UTk~NzGk7qTfR&~ z+$>}REvxk++=y%(nQDH(oJ#|rk-NK}DcHU(EYg0wax9EtFS`$2N?;k>d}ILV-CX{n zkP&d$$60-*#GOcd+=lE8u2e_X&@qQ&G0|N3a$;X$@|tNEp#<6;xrYAU)N=aJ6Ob#` zwWtg=BH_S3G6>3OQOji?HF&$S2pPrzcO){R`0of*U}J%Wo@zcz$oxJ#>P0^dWck-_ z(kM$iWqd9Y2Y62OMPiS}6Oobe*sTs7YPjF;<5S27zpLpskT4AqKQYE>Z(IMvaTR;( zYa!+NSvVz7I!H;K2H%CPws%1wCK=MwV5O$Y3N!Yi4qCjNVd)V;47) zm(a(d-cq|`hZYuudn=eymeY0S;MV{z<{+CMT=!Nm!bp7#GAlyvcuk)7MU;}HRz4Jq zMIH;;ZS_UB2yR0MEX>T44L4rQP6d+1=$%(?7{Rdhm&0SnJfp0Q_PM+3Id`E?xN4DJ z9nY(s_!Cw6r!ocg^7v@{#ZO>cTbAuY0>GzJ$!w3d$$;#*p;mTX2!CJdh{xFz-p80U zS_13|Hc$no$yG=+!#P|4c?2JkY3+gc&mw{|rhr9B6m zQ2Kj&aBfv0*h&ghH4+O4hif*vQ6v3|j*tnL-7PM*+*aq3s$6tdJ{P`j!Yh(<_9Gu% zBBl$vR_fMw_CS=o;@iUx|3Wv`9yewVg^cwEd4~s?@PTY|LrF=nav!B!R9$yjc`K;% zdE2?>iDB63u6F{)w1!no2X)EBLxJ75os_W}oB!BMipHnA za_)JauKe)j<6Nps+AYW-v@17X_j|jd8{gL?4`VP)G-oQ+Y6`&Peu04| z`|~%gVZhAuH^brUL^_{Tuj0Vwf*>>Hl-Bv?wlq0(fk$}^gEe1J=E3LWKG@`fap-?ot(Xo zyHc>Yi6$1Htw!>eoG(me14MoII;Hsp>s8r;7U4Sgk|HHzyB?uCHl`(==EEDn)JP-P zqG7q87Uf(nCsyWf)9MG$p|o-!o25uibZ6Nm7zF5%ORjb2vhK;KhO@GU3!k9&a4mD0 zS+JgoA9Yhj0lN7(vG=oQKCVkUY6Z~6uTgQ@1~-ZSu6J}A{~SB1Fr;zyy%Y?GBv0&d z`YR=21=b0FLj(?0tN(uT?ZW%g(F#(cP#@!D^>lALf>9;mUGNPyZL_V_?kEq&0d6Vz z=sx;of53gOmWYvzt;iP;u2PQH7+s?G8O-Dvf5nZh;i`wnYEuBb+fdB2U3xSCE&PPD zRp@%Gvo#FAX&R}=afvf%*!w3&>Wn`RXC1J4$^;AEJY9}c&&6Ih%kO%hM-s2()y`JO zx)XeJ9C?^mu!3zWjv4Spg^`S%IVm=?q)aN#m2JkIavwNzUxT8{8H3``b|rjWjriKW z+(#&yd}P=qS?M+1cF`e^3MkhR!FyeGc;U2;s<5_|9WHx&L!U;)ww24&ZMpdi!zSjdmk%&Q`@eoIx_o@`3tU*E^i?A)e~inSBZ0D4k2roF(}r=%Tgh5Z zE}rBOQi9t+rYr7TvDb04g3(_^{G#NAo{QF^8Qc0YjR9A$cGvRvzc7f{KW%pLJavx0puT6s6re#hAEmF*ilT~rW?C#bd$-1OT>IVoE>>kNe>U+=j)f?ZzLU+K9M zhRdSmHF-LTnaSWR?}I}bDyYp)wdFt@xFh1#H-s@&-=7xxrRebvQGb8lJ93<5yRNNwHXUM9#+oHuySV#HE&{=oYDoH4ze=kuIyJQkw9?|5E@1wrK0uX2sNKG)&@hEr%m>h!r>Exy zWUbvST9L}Dmz;nth}#(!{?h#5AV=VPdod?mjza{mGp2dMgl|_Ix447tnjzops;8i0 zg#H#~)H03SMqD#sD5(oRI*AsY)N7Y4InFkcwgGlePAqgM2uF`yRJfXz^(}KZo1;40 z0dtk8!BTqlnkH@~6rnrbx~V>V=XB6_(JU*fQe&7=Epc;=P0?F$Ck0hs!)U# ze0w3hpPm_QVI$;a^zpA_f9&X`%R9WW>xXEMmheCRo1rv`ZXDMj?xM3c+NIY1P#aBJ z#wiW#!ZqH{=;|!JW~xxwHe@N!yOLaXJw^f>$nu(zdoi$^#JI}0wzQuAFsd6#wFL6I zwG3;J7%c^dnmJ#fEcXM=@f)MWEIxTI<|@;-3lU{QMkE6>FcM9-E`xzV zI0(-O5Q6)(%%W1^m&SSbg&yVoAJTvhd8mj^fMFKy*9RhY@ey*TW;ZYTDHPBAB8Dv~ zU;HG5(5;(}o+Fz*ys_QW)%pdb{q$~HQf_F<{H43G3MjLyQapqrd3>s#%Z>%r8o5iS zg^7B)yp_hIMJ%X~YxLi5ejyyN@{fUfjag`WBQgheO}(Ll^_XP`?!N-+%-_oPw!<{> z)V8Qr5bPkVgOvP|0B%8oG@yhpAa z+ehid`^F|Nse{anQh@U!#T4F~P1(gbRA<7+X_q&*!}CUko%+F|e17;tSZ5+;$q{g4WeI47@AsV#!AB2X%sVwd30(4ZdTb|T&Z(2{XEsxaA|~R9r;Ey_iw$$haG0Ix`82xn*Gi2CFl?jZ{l1o1HNBfr;*(3j(cmZSmEuUH zLe$sy)VXD3OEC`$3h<`qp^!+9H^Sax)om~4Oh?Ut)VUA8AL1zmX;8WD#w zb|cNcw}GumqLo*)QZ;6+y#naqILsaRJ2@LhYayQ<{DN=HXsUdM-*{}wNAf2|p-zX$ z(vz3&6viy9HMWI}$5RmND^gv0n1zPhH)DSfK200Tfp}SYmNsLjcKwzj?u@nxLGK-{ zE$g<)vobyR^*lwmJN#MAK{JnAU6Nn~ELaTSyIu{g26l;7SK0?3!BA5<7`V^36xM_08@=n0Ip z_&;whNgMxhcS+$wj#^iQr3x)#+W3L9$=&Zhvfnxvy0;~#LiJkLmI-J^`s6-DW=nt%F6qCF0HIFT)l)%tyA{fd_UCv_7MU_KhBE?(ph^fBy5Ro| zIX_AK&>ie4U{iS8^RHOtA5ONz_^?g9AfgdOzQ-j#k7kY@ce726~3t13VtNLR1`&3G`qrwlkE;))tvC2Fi%AA$r8uC-Nv6%-w+3kwTm z!LqvCJs;kCMFomZ6yMENFwxW~I#52#&UYtxlJdr?KV%kX6UW)ExJmrV9|@#~Ik2OF zbKx6*S0+(pOEzuMX#MMjJgZGbrCdu}L9B;Ktoh2**%prJ>XPlC|EF#iU>3$P+2mV} zE&|n{U1#~uKP9vv@j}C?2w(Gk4v988Z-fZ>-I|7F{*z<98cW|Kr}kZI9`7y|eQ5+; z73RpEBeSwh-`k#b}IvGD*$*jKr<;4h8R_-;&4UFm^03 z+jsA{aZF}qSrW>aG02mz7k@-f;DzLm*#s|=I!`LlI@$yI^oaNrRYDBH#n`E6QOSA7 zklNUWtzkH&LvTZD^wdL(VP+z($Vf&~N{&yp$Ki~712}4}P}PT0V z;Fb}A#l)!ht=pbLIr7-h@2m`~hVxG^$-n*m0;N|D#6T?m>hH!Xn8N&-?Ma9iyvW-r zefh?4gL-vNVng?WTrj!n%~Vk-)j0F4p&VnG<3tMR6xvpV&c&A%(!czEAj+wG>mIkZ z^M;Vk-WP&(0scOvuH$xos7Ut=wXpx@Kij$i_8LdU*RED$Alf~emPnQ6u$i9f?na{t zC)8Vblu3^hGJuO<=$WHjma<0tU+MK7GIMy(+-NcZosybU8C|el^($iENU?q&rDWLE z!Rbb1dAL02&{Y{Gy;Y@by9sjD7cvNPhQ`bGt}#<&;Kq4R*T&VsZ1iSDcdU`!Rz5r8 zP^hdiy{WdnL-YFJq}Tp`_=Trb_xN92hc{|n<$uJP-wUK}d7IUJqKT8B_1xmN1Cmp~ z?`LGcz8VJ48vUXm{{ce;FB`m9A>RJ&lbuh^wXse$UMOY~?3KRsr$#h55?yR&zce_Q zRJx zj`MiKwxATHxm0bLS>8P|J1c;DN@gfN07@?hAv@^hVR=ChU~tQ$@$IOPh+!_8n6C&w z(FJOxqWQ2fk{^IrRkBig5~!V{ixKF|6m@SoJe421DfkT;X3G;17LZx5&;cujLhJgwc~F6LX*5rA^o$Tb}kZq2Uq!LHoqGrn0xi( zQ@S9W#PIiXHb|=rGJt=@?X;rWQ7gJ(2>-R2eZ+F= z`Pl`*R6rkB*S+&WrlUFWzc{=J*PIV=dsfRFC{9weMC=Sp)dlw*KDhKR2s<^{9L7Vn>;=~n(0wG7i`itCH&77pM);e z8j(>{)_`0rU5O%WU`9DPzN5{3M7eqNuJy)lDaLS<4`jQZsmvyG`q zo%ot|AoEp1KEcSR=cGbZ|J}E`+KldwR<}7PXxjB3s1Fz9CY>*62pP*Qz-D^uUbX+7 z0afS@H%COKKWH>H z3A5pi8@YWL4OoNYs=7~`vx4roFQwK(!4VVCCTix@IRw5}J>DhA=4A{|fO~ojg`wxn zx#G^oSqZD2cAoq3V-97F!W(I_m+9FQMZ~!m?WgbZnf#1t!{&X$6V<@ zwmMv++66djr3*Q=&?JT+3TgcyGMygtyLUmS!x}>W{yF0Ub#<$iLcsocd2?18a46nt zBU%AxW>EY5RHc$jpO|H~vz1=CSv!g7KDgylVojN&54?Gx-;fOQyRh>-T<4P|{HBp* zc2D&5zhQH;?wf-%1*|F#L;(6+ZE*p%lp*8TGY5iKRX9_K(Lk-mM}B!;wT=B;N)E0c6ve?{WR80-^?02D=W%` zW<_y|A zO1oY^;i$Rkr!4yNy589__~uzHvY!P%e7Cu&wc=0Hk^bG#wsnu)M>#)Am?Srb_7C@R zq~|u!*xj$U0LxvhmL~tbI~2a{UB<3v{O_Vb|JvMvE_U{6JgSk@Oz_;}1(|7Hz<@}2 z6aRbcO{5)t^Wkg+i9%l=qOIxY;Q|v`;jxJ9bHPvJHt!#05a#?WvBL^^`#Jcg<&a|w zz|HbjpDQvR=!6NFE^y-PZ_Spc=r$HW@(Tnkhl56S+so(?o5kK`T>kY;lYs>u#cG@u zq;i*B_H7~IvY+sXM#+y#UKhusI_{*nt2SDZhQ7ZD5vc-vO(RS*oyjH6)&XLq0juPj z2N!fuGrSkP7|$Eejf&9iCcInArMB~BQK=LQuE}DmMv7QgNIK;Dq2bz|$Uz^V=c4oq zh*bWIhAiHdXSlJt4WMgtM;h?F!+=&Jz?c>1_&c_`cv?@?cs!R>2T8f1QGlaXQJ$;v zNrn9&(2AASl88M>Hk6SIZy`+9-66f~(E?pP5?;g6tSB zZqQWG#0jYDURaS}RIf9-wao}nx-j(PcUGGplr*mn<0`v%$ss*;#!)RzY&_D)xwIzsyBa2kNAv)1qwb)Xm1Il$+`m6#sz$=bs)}8i=yk4R zBBe;2%NfVD^_V!&(#e@5OcK;eG$qzwFP#&HrDF;mef52w9jCgFQq{Sc=>FCH55~_x zcbM*XX$5xb#vN}_9M~Px4ZHGfMkP>xr$UjQJfxy{Exy`DUWivc`(@#yb8P=w=;0}S z=~w>4N7emGKRnrqc8tDHW_XG3dvd{MM)7egp}(}0YWHY+6})((SAW+vs_aNp_bz9i zvKo>dFHb63OGBEKR30N)lKc7cxT)pi*+S|y6FsZTJvH5w&5zOKR|db=`v~X|=uv;T z@1h4~Kh@is+pA!G`sZ9FVyw06M<$!cI5aC0<+nfeF&ysq z(Qio6GdqakIxSS04g2_FwQLG41zhU{6Meo?^~BPeujn&@NQ#lYX?l za6Y{q;5ndiLrIK30d~)V=us8L6`su7k$gAxossWe(?vz@Uq|hWBDl8Cb#+U-XpfiO zj{@1Z1WwZxvK||287Cotw6Y&WPgaU2YJX5UAdd!%jhL&z_s?Iuz4b4v+%cwJN)9rC zg}4GLzTPm)Q@Z0ly^I{8?wqWV)kjHQfq5n`C1ot*v*WTmE7nWtYVc-0gciRPf-uX? zu5(`@>@z*Bx7%6ld(#86oo%svmaIF&U5hIwb|HRy`-bgWWT3=DsY+K|`^_+}4nLA! z?BFy=1;^8OmS8&}3H=c)Ao5Wz|lH#dqsYKf$3Y+}3> zv4Y#Vtyiz+W2%7)=$K%?3wkyTlnkbK2u`NVA?`hf$+`?HXCW`v$o=_Nmd2_{~;=cC69 zv&GF&(~Yb(tDsE20w~>aX;Kw`bLOy6m{}TAy5&|Y4CVfJlWKkV=0Se0SF^Bg&%L5V zMjd(Iv=>xZJ$Kx$2R%MePkTo&UZPq+m=}Drq_~t6dE<`TQWQ6*a~!;|L}h#HnpYeD z{jFj`B@Ne!OW}xeq5WTLj$ILGM4!80iyySK%Coona0QumrvX~)tUq^jM2$^MavQiw zPt+^BBVVjv>f1=BwFR%n5S8mxX~ud@pgp6)ii>0TuXh!f+5pM5hf^~PJ*-vw>8=mqf}h%gT#HsehquKXi+?8ukxM#K~UqIRWOVrt|2 zs9You5+9-Q&Q_=>e7-RSLX$_enf1h8?anRd zNH>-Z2`g*K?>t?;j2)R{H2O>(=$cE5Gtg0Ue{-d?PZUp|W=%be7O*>sdc?y_3+2JqBeLxD1iLKik z;Sr#E8ECnc2#QFeh{s zK1E0?@ZVorrs7b!xEgr-u!r`rueV{A8>8mZmt`A2=BR{~eGq<@F_~|%v>k`_G)WmZ z{CV$rhD5vg(PBi!^UxKCu7|}2`?ScU!`FyUU#>iNTn9YCMpAbvnvtox@E|w0yk-pV z=xjJ)x1s!u@l5o&-5+(rLF^tmyK|%(FQctC|>uo^8<4lg1v;wx}*dSzPHp9v15U2uEU76Dek- zFpy27j6rJGaqE@RlYnEVJ$5sPL@-HOR?-HYk&thAfRSp5k`D&lfr5ufk0}?$qn8%2 zCT>1qG;Yri36Y-d6Rdzt_JsJIFFC+sg1r{BYGZ%%dbi>KkG*${&TMJ=hrB}rs}#Jt z@MDZ`?6L$uA*V!!F@lA?@xAPjpBa&kU0Gv(lE)_M9jTh4%Lsg}Voim+Q11ILChKvS zwvTFU3{e{v%s%6OBY9zF;JglpKCgv#Ars8^aSd5}!waI|I(AKQ^;Lr!)3!zPg?M{e z3igCVLwHF9N2WfPDt1(*a;*?ADvs5mZp~FYKm=`ow5Yt3@!D&F2a&pV!+z zA*l+W#ODu5h{LL)?=>DjBPl1iQSa+J-`xcb2`y{5OAx}a4|~2wpZc2}#t(grdQQfJ z6VhhT=ckhsuzp)e6TKRKU?>!{^0%Z42#I1yQ_4)r35@pCw8TjztQLd&YGUWH;Z&1G zh!p|@0r1|Fc~aB7&xT9}AySs%msPk#?ChXK`LH2XQu(21HDPcOrr;xB=R-+}n?;E9 zOZRRcITOl@Z#X{X1UT#!PeZzjhRqk9&0W0Bvfclqt1zFW4*~ zSe@)>;V;yS(V|F(3xXXhszhXl!o{;B48-`*#De=w_igFt;|TO>!Cs?-3g>TN`_<0Q zR}wlMv&1)NaZ0EZ8ZPCwsyTJDum%|u&0a6M3(jmR&3PT`Z!ubrOQ!K%z&|~IhnGt+ zRmCyQ(%0oYw;ErtC<0CB2E~tc#^oZutLN=z_Hj>p<~OTYq)@2#j7FJ6rBY#h=v?9} z@np_!AV#06PRydtYU*juGV6H@YXxuEXPb0CId!MeXA}M=Ol%ySf|+Q(qP;E+I@H|R z-PLA2?P}V_c}T5ZnfgQxYgAR?RCk=XZerT!fB!*ymvOIqu2Za_jFfsRByL#-l@?CXC=NGZ)_!35gA>}f4tGqQb#8mlrPy}rmDu+ z-#=h?`q@GSWQk#kmEo0PwRL{!4u@!5sn>~GH_?8&la-E}2n9j5T;oAmPIbqS^w~0% z!zOO>%^>+T*g<7Ks@o>NfOzOLUE+{dbtrqv;T%Rf1SuVzRRjjm3%V5wlraI%S zmHV6JMWe7m04aA1o9jadM8R}O-qYnB`9 z;oX8s$S|0kks_>QWaGtxQnk(I+~D?Txu?=6bx9{5l~q-S`F{1~9)@SIbaB=KQ=jby zs06*M``k*6uKDigyOeC^B|>krbFzEX*Z&CAPe|9T{n$5b`jEvZ8c(An47u;eG;OJE`75U2)Sk|N_eP5HLMq==W z9EO)QbM%MAeH}{82C5U$V-yz|DI_#Gq1Pn`p-^ zK)~<+^&Euh)EO%{KtIWZ=5`)gx7WK48muiFtl27Me76AXN%D;5PQyd)WHEQ+Agq{% z{yF8_y7nety3uI4gYnCWTaFFe!5+p&+c!JPu##- zl1kjPk62)#oZj5_e+=J>j(_QZA#1)z2|Ez4EX$Ni(= z^DppSl0R3MWJ~h88MiU3EM@%>Kf3-5j4QpLgtSYgLM*fyzR6(PXsimeuClx+Jb0L3#BjeWC%vNv(qd0dz~7Ip;PDNlG@1U zAdZ@MHA@9QG&e|w)tzS`aT9>dmh063lD=kfEb*PRL?1e;mf(_U8S7YYDj>)*eD!aX z;%Cyhs4aV=Mn!ik*>)|R>HM{Os{be5ORpdkqxnRja_CX`0gC-Sil0u&^%@tdXgxaEV2!X=stw}F z183sGQq?*>9|~82L8z+}O>>`IlYhIUzyCf(@M}MD?$tDOj++Of_{0W9k9=6| zIYjj&Xl9$F_vvdE{H*Oq;nQctUd!cf{5nWG5+Ko6pP)%9?A9aV>Yp2F02@DX(DzdaK968w%8 z|Me!(t#O20q}_2_m!ne5bwWpSshr1y%Cvfq5!NssblR#FXI)vb6jOU*q)|*@*yhKB zQ?>gn=su7Fvk2wp#_;>1|7>D^qN@Lxqb&!NY?vK~*}n|0JuQd`ed zY{7_3m@&!K)lGuB(}_fm_k&nlcoJztDrSnK?ztogi7uomcjR*vb_U<)Fe)U<|3x5` zWS}GrmE@l91tEc`iPkNfv|85PTKNUjV)pXku5KO!RU}_KyW((xm#|Qf2>d#U`HO@% z>Ws$z>ovA@f6QLhK1GkOsqr%CE-1$bP>J?1#^zh!|6UM4zhw*n7p`sjE7$?j9)&59 z{DX|Y={f2EB3YMy5b6f>KLq}p(gc2M1pK5T*0P3}|22lc=_E~Xk%$4qopyj81Mxq- z{{D#Ht0=+ZK=L~M8S!#?D!DiezKzV)!+mV&4@I)M(I$x@|33gY|LaHafjmos8dpb| zZyp&7M1OTZ$wM8&EvJui`H+af2hZWnidn|*P*eZ(Azv5#FT=96LYAfNy<#5Q0o@mN zt^HPA4t^~XhPXv3AghlPgcLv(Y0+_YefnFm{PW}0JRsk42O)3oVnN=u+uX0+@&_RU zNrR~vqxr+)5-o26#un8Z82*(^o?`sCOmHQXjQ-HAenEoDM^>J4-*qdhb=6*@wWdBAB14l40p0GQl8@QXY8&Q&`A>4KLV#oS8KE)AgYudGFxxBRrti zlT{dUCj9@>Y`>R&i;ASyO~;=m7%_l~-r5W&l^hS#t$1+uM6P#6;oKb0)KfRvCME$l z1Yu912^%09e>2$M4ZjM1-{>279GQ4Nw!zSr6o^|OfqXTI8^P;h#gKGmnse$F7AjxT zGA0wLM$4c0^yd14zYUN7^)6c*5V5hcdeq%l%n2e=tSbih1=v{Vq0XDrLT-6NH)VJA zzVb#@D2d1;YJC#mG;u7Y>xAM(P?Fx|06H=^*qr^}Sf9UF_>TrZ9r9&hf-sG~4&4R{ zv4t}khC$FY%UhAqI{%eW@qB0eME>CHcu8UCDSIZt?niWd1LVD0TWich`TC=nz+l3u8Zd&?Ch~PU4#VdMd`s1c2kHB-&9A zCMWTbEt&i{s2CQ45BttG9w8`@Eo-Y8+|WWbDD&5s{MJaf90<@J4wj+;3LPje6%Hvh!8`6W(Blp0Kx|IGOB~)z&Vy-0z zB=QOC6VdGQcUM_O+9P6sK#@RZqeS1JrGPG=D(M??_mNQ0>F15gPQWxR<^H|Yt@yw! zovIBFnXmOng^gthR959^ou{}s4+X9v}oba3HoGkqu9Kzqv0QN-^(88yG$ik=<2uxty z>Q^?||zyA3zmGtLy0R2G!U%&rLn*T?a|Hm)? zWhnmtE=R5C!0_r*&0(WQcoRIFEoDufD zXYaTLNfH6JLY19<5`Hl|Fx(6yu3e*DKd`W9iKNs(#W1D)prz2RENVM7SIPe<_fb@k z4eha=vn-OaG40gU)*T13HhXEBz`wMv5(wv4c;9P)$`t~6ZrI~fIag`JtLgdvq;_!9}4dFi!s$kEzg}lQAl-T%I@_I#=q#uSa4trqTYL;rKGa> zNLK{LtFrnN(R@p;RDu_2GBIEb(k~T<drg|Gq#~aB<&)aDv^^i_CN%W! zU*Hn2hrIHt{2oGAivN8^*F3;1oTB1LKfOo}pSsQJ{r_#)9KcG(r(DS-K2=f4pe6C7`L!u) zn{;2H_11D!;t#|lRZR`MU&!T^Ps3BFY_Z$Aq(Z}fitR>pwinW*{2vn@ush)c-Y2Qy zD&34H9lv7aRfs?Q295yqg`Q*vR~gMRGO5V}xzM#V#jz6KblML(va>aj=D+>t-)l(6 z2MmiiS0opkNPkR=9qZ-z?!)M&9LBArZ^@NO(6BU=>YHU{{)wQ)aw9Eq50en>FXsKP zImVLua&~?q#YQYXWtuDSl;5s*P*lnO zukMG6?M)(+&5vzqtiUDkuGM)oGHM(oBd8LilgOrIjLs5E<{^(SM5v@USM~+@ACu#s zX1nbol59)-HGrR7TU&5AQivep{8?H^0Y>{O9I2?HX1F3h!wi29HUXWcy$8fM`5AivMi?vGn38KHP>VGswN5G&zd4zp>=0?w; zGRegrl^R1P6*1H-|2kBX-dO&ne{^bM=-=E7fICNl_KR{3stgJDGkTO};-BA)`HBi~ z6uQ1}K>hnru?TbBeTfLQkqX2kG1z4F2m7)}pvx2&0s+@#T{Vf(vr5GN(szH620$-s zF(CEG{=SX;XcF=Qc|?5ThiM`i8b*0}c~Zo2G@ZbKTvCy9Et=%7|I+s-lPEk*}IFW z68&EvbAl-97Y_8{|La;mB>}<(Pj}*Xv$CFa)@qk8rM<=1~B z3&~a_P<*bueL`4VAStMc$-k;S<;36=SblpGW2tsyp(`UgS*9%Hx9O-t6yS0f- z^uibotujODmEGM&Q@g_E5za}g!e2z8n=pSe{rdDTB+dOJu(IF`)HS%FFm`=6_f;Vt z{@trHGlyH5jh~yx5(|3K;tSm=ZCA>O;9MM$2?Dvn`!pEf3VTRPb>Tm0^>^D)!uOoy zC(*cjz}LZZO;FNclAp~H6kC)oidV=o?2(0nA$dOLR&up&<);RclUK>FIB zMiK_q6)1a!Kt8+vbizWnocYrqvBiWerDI_sl|$Mo*q`-#tGD_t(%PrCvNRzJj7KyB zg|VMVRa(k1RKoklQ z9WjP2(vL$h)8r9+zV8$Rpk1oJMpYj&;zB&DeAngUrIQBWOC>jOoG&Ma!3w!o|XNo_dXRDZ5kFDa6)bG!stDRQzA!b$!& zC>zYEg)%6Li)CUlrjM_UYZ~Jk$AWTKz=`M?GUQ_R>1oaahg{{vUFGjzMPZ=NK3o&z2=W3rovP&zC!udb1_bEi^fDvR0eNZ!gHx?>%il zw4aVb#^gJ^L7@aaO8&$4zFdg8NW|#X$>^LyE=rdPcL(x&7YSqW;nMObjmQNLrYlON zER6|q#l5owS^ezCQ6D=)@DbQ0l+5;dSTz1%)s!G(Vqr}84KYS9$x_lYs}mC%udi>6 zwpWROYrSIM(W;6>>voVZF!q{36-vl60#GT(#IAP}ZT7ePyzaYJ8ZT&sBhXXQosJ`F zYHGZmlperm))Na2IRFQMl{yVr4RbzYZ4M?;LH%(95JNLFI^u~`v_TPQ3Hn14qN~wt zqHWNqB%Qhf54evdu#QtrgHJjU`OuLp)G1@hQ6B>#IO6Xnljj=LrJiUvmr8 z83B2`#pA|k@-0(c(8s5_DJfAAX}B`e-p-mP8I9kIPc_#^eqy@kfm+-hCFEJ!pDu8$ zhD_Yx`zsNK(OGCe%#XqIPEF$)R~6pmQAj26hiCJ6>bXeKOcU-Z1aJWR-ZCzHUT zfmb~DajoU%I3-mj0NYt!#OCT#B^&%Yu_v}`h*XITclwi*@i9wosYcBV&8;u~pknSq zqTpAR(N=Cqk=Ma=e5qR#rS&&{9sG*q59*qp+stO$wwG>*LbVBHYZpA_pt~&gn-EGS zw;k0R9FOOrlp^J>XXzx2m3wNXGBVm~8~O)#n zZGY8{dViA`lL02W9uQW4(9^Yr$TVDY@|z-)Ey=dp41%qRJyT*LNwiNTeiRI&;CmP-2y>RweR?g!4Qv zy9?oBoI95cSS^POYY-(Fl*$)uD7wzi;Tz5>XFd+&gs#ufb4GtAj1(z0JKbng5Kp0|?|Oz!1RN993-&o#3+8GLiOWMSBbk>qVtEj_YQ+zVe3QW0 z0RbEYAvBzQGn^#HA6>8+9&4rKlR%yv)Q%9Zq)^3sZE;T8R?L$}J&FN#w-p^#niAB4 zP~^C}t~Fr@=o;N9AAm-S&o|FPMloCyjo;2k!E~E$O~K)CL?T(M+3v5cGt}ZcoVFT#h{6 zgI83#Q}wY2uLF{%_@DaUmIYzdeb1F8bK8g;0+-Xxpxs0kQ%8)J81gi%vHXzT6k1vG z9orxnn&oW}Um2^Qd05O}8kV$ZoY+F&C|}0+3-wL_2IdkF%>y;un_niko8clk==d!8ORinf0BK1I#SjqJD!YCUl0DOtAF)=s@_5{V-Ch_ZyPW#Ze>hZ?KY_Yl^ zF`Oc&NXs}eJ z9d<2dDu-jM4{a+whq`2B&z@V-AHG0P&~V-55X|MhTCWc}*OSEXqkdilgR3Jdo|h%#a9+~F!om};96qC`YJ3x~KN}`_@w;x)E!R86v_H0xnF%^q zR-tDuj)`&~y1Q@wfNFBoSsd26IBAJy!~VIa_)cy6;n=76vf%iago5>22|A&!s4+}j z&EO`z8G^lk=`9ieqKTEQj!JG3P3RHU)L^BA_j*Fu4XJ!^v`g9tJ}}gwi};G#y){GM z85;<=Db%+GvXhZXvEjW@Z#i9aecH=q^!>RBoo8##i;YHy#v78PSm0)d5u4TP^&mbm zmryP%q9vx~w%sJR@Kdl@YE6pd>!TKj-D&9!E>q>4z$DaUe~h<5V4%oX(YxoUB8eD{ zJTSYV7BV5d*ED*l#}@+zJd|Kyx6(Iyb`qSd`+WIgOK>;WOXln6mmE=3)z-3)G31f` zh?va*m*q9kM%bgA=eUufp>#|w+{@$oNPQ~{FV5 zUwAQze1i8Hqi|mkH&~j)-wR;VNj@wUEkUiF9=dn)W*+#&hkVufoi(uwHMVN%$QcUi z%Jw6SaYlf%PPMGTMf= zVG&dYyXjNkz5822<{EvomDMuuu3ZziM%E;Hh3hVnfrhs?pUKNgb+r|j2k=h-LCPL{ zH)7O3)_OLtJVB7Vx*6x>eqk)JOc%RrO8pl0D8yXSVbs#<|fAex|jD# zG%Urs>m}VcCf&`~-U?wBcm$A@XTYm0Cld3`kK+iC&`spIw@5U=%a{}^#%Ob+tKMN1 zjl+Gf_H?~T!t?Dd7l)EOC;}xez3D_eE?<}w^GvK|z1BG@lm8w*NuN|)ya|hC>R~YO zt6GOU#|}d&=~t??hR#B#RXB_vtxIso43A1Fp{ApV66tC7g7#vobl|KNO>MNAv4wu>^oYT(ukDjpKEWsyB*GdFKN)O7;Y)OIE)@K4@E zTdwvl7^Z8eALny>ZS7>(V^nMY@k=m&d3U#NQ@pDEZt^OtZuvf61z=U_UvKRSMNW*( zlH)Dt8_lQAM8)ssk!Le4uhB@pd_zT{e%B%js|A`D@NBzAE#lV9N}K^6B>Lm}p+omo z=(qU~zi*M)F}v?eE>YNy*MpRRynH+>^LDl?q$V?&ejvs5C?OR(z+fd+o3&w&&t-R^ z7H1<=8hlMz=3dQ&4AspipfC4F;Bob>!C1HTB2z2!^^#&@!A-i-Ur)SF4;jH`*9c0a zvl0U+9wv_z*?@K*IcH zok9@>Jm!_*q1XFUPG9HjWossuq<*mlp}oo<@kQwBVv2WOalZG(2rQ4zsje$+my5g4w&Ki{AP+}RlHN;20}YJvr1 zktQD`AN48C_(ZiHU=^)5FR-ptuGlfHxq4#0$O0VFK(%?L`YQcf-kp#EcoS$X9VNqC zz(HGFSlRXVIat`RhmS+0R2<{@fQ~wRo@>tb~U ztyEl_?hnv6#@rti9jsMJP&qy%GEuIuSzSi@3mY$1w7lQ#!f{rcVC#6u#8)#s#LOJq8%}<_wzwBXVlpbe zG7wzRt4pglpCsw9p?>U1tGH>Qfd2}G{&c5>)%Wg}C<()aRt*p50b|;fYppCXG zO)&Eu{GzxCwq}#K=(y;M^a_>y_A90)GAU#fIkZ5Rl5yerumzKkpkn;ny?t_DQITO< z{r#qz7Y7WfW=-|kVESS<`x{%CT!pi8$B*SQ>>*z+YgXRd`=G~qT=DUjm)l1AdEXMg zY3z8^CyggiYqW0+sbiAm3;bjlU{a?pA42kF_NXwaSG^NoeF3Fm9#!x# zZBx`%t8b#1?qBd3306t)0mYg0s?iiVb!)==A_3+uRnJr=ozDl$NcU<)O{dkTxt~1D zmAHk(MCh$gJD!$3(rFP0rcej+j8o~ z-dtkcIbWog2+3l__stiJE_xv*T}FHo!-lQXxXn7{=x9fLl~9Q>hn8n#I5!;|);wRb z=F6qHI*_P#ozvcUVfXuGz88cAQhNP^u+ECjEkWHifpuinv7Y za(4(Q0hodBlQ2A`KE7~4HkN2JJQ3UoRJ-o6LFn}YKj>UqTd-xa(DwcM?9r|s$U638 zGUuRm>Aj1U22?|9Q~DF{EdtX{qR7gw)%*h*ph{}q7lUcRg2CB?xwM9o{cH`J#tRFt zG^Q(yIlgal5e1v?+SZ8}?KPk3-nODrjJ9(eByq|aH>Z4wgp)9$J+eNk&)o0AjhGx( z9%O&f_#+J`7eBnBHQOMT8mvZZ?Gct_Z#Zp1!C+ySj8Nm)3 zTC56c%QyO#{3PG4b9iYn7~zIF0_RKpB8=B_;QTX2?fd@cU*p+l58_IbOB~h*m2mA- z^F8RJn5jT7g=AtLj@skOl|-oJiYW#V_v3)dBzuUM`<4%l`(?!rPnQWZV_O=W62Ox{ zu!3I2Z;U#xdY{fE^ND800cqDL1V|-9UHEf79~Z$KAwe^(ruS7CfwR15LezvLZo8Eo z9wxu8p6WOlkA)s&57O-2e`ciwiwIB{iAgsZKmr^8&W9Td?{jA5xMSB)3 z2K9ajs0L8?)~yC-QJAsa9&7h%usN2O2`GWI0%13$-jW1RkalfCH31kQ<%Tr8Z#E<1 zB~^2Nw}Ff~VgZR5>S*pr*5I^O;KUa{O@DtK9_(~>yw`%yqW z=4o~|HRr~~t1BD#k#<*vmS=%7O01q~#gg+wUKiLT>mGNk$;NOQZMRAWrrb|!5o83% z#jq!a@Wxw!WKlE^8#WEowa{atYP3w+_*ed8_WD)@In8~p!w%Q6Qy*0+eF4t~%I;BK z4puTl63L9r%MiJ)3JxByhmRAtluQ>7I=Y0jK;t=>&MK5I(P^G@CypOkJVu!F4&@ec z;k@vRNxx2y5`MKTNufG_2HVq^8>nmO()OugyRgn!}oU2-D1@2 zp%7jO#oNbcZhPfMUUWXU3)K*wJm{QZa(>Q zzw3GUCDbJr2JE7@g1){gPE#rqrSnMFdocs8f{eprY0i5MNpS|(>`vu zlL7kN*TXoopY622PcgPu2yCaQ8E?V88OVga1%_i)nv15K^_qJlB`@R8!zu|M6t7Y^ zx3!zMGSwPC<04eCCzA}YjZFBEvrJKq_kH9D3gqYNRQ{spnRpQ|?ZI0ln~lr$`#uN^wt^IT_pw=YBZKMk8W2QQ(@p_AqB5=FrITv=Z z{=n;Y64Zx;o&;GDNh=Z-x4zw^ke?NcKkP}x8)|#3)7Yt4Wh4RaVlcCD{j=1Bl#+Ga zacjl-I9y?}Q^*vz zLno2s&kz_~>zpwa3q&UN`|T<@}K><+MN;yARe$I&|ulIjxk_Wu)>DHU{t(HbKOW9oA}Sx01>Dz?u{U_on8lmFv0rmq zlHnW2qG(A>o2IDH;NSgKRcIcCMV4CHRJG*7wMP2<(2WTSuwm>l>V=%XiElY)wd)3V zlW2Z>?Y(Qo;c-{(#(W8?bCrDn$y&$bung^dT?!!^2v;&T&Y|bMOEp_fkNSnyy4Ymu z(1r5FPR?`t-44WB5IYJrj<-R{E*F}u$*~pmOnW_t4#9TL>k~W{WYCC611BV7%=2xJ z4i-^@qF!Ud)~qZCC|iGkZQtYTfJ-c{IoElXXxjZc7^QC|16IZ$JiwGX{>Avo&()`w zl?v0XROp8wiiy=$bh8Zem3rDr&~)9D z+Y9}sVoVmtP(Z$MWnFjp4Hn&4#cFl%WNAIw<3Zvd_fVZL-fUK4S|P!FFJP5A&&KND zRsngU$UM&1FjoS&!#;N$*-#CW9W^F7-mcM(Y)%a;tuZw+#WO9EpIuZ->*%_1*+e!x ztOC`_tyETBFSVHZiJz{iEAx6{jug4J&iyDF9)_h!kbIAAeZ*BD$VmOPyb3!!0} zPURm6rjqS92V^iFlKn%+P?52%vdN!c*@0Aeo7QGc#(Q~NMsp^Xu_tJjvpq2_dN+R` zL#crJS!jpVy-3!>sAE%O&j=U*M}*$^UmVs*eJfv3GtoCxRMI(87{d?PoYcvHI)w1* z;?d?^3oE_nEcIOs(#l1d<0wq2^@dBtsad*85!buKm0IP$LvRtIlmmL{Y$6trK4z31 z4mjDhd?Olh*W0~gR1JwFdz*M1%{>7@;9l@9?0oZwQw)PfE%fDkRWU$DOoW-#M95mq zfN#)(B};|OS%csBSX-Lw?}p82N!BMNo;~8A>Um>O!^-_$@#|4744<AOa850U3#@Rxd|V1)#pRaR6LwNGpKpAJg)C^ z?=}UxH!WmKm9*{1yq}r=yxT({QqhDi|MCz_xg-7BucPC<15VIido{ctkzKRcdHk3# z6@m4L`%}7eoI<#e=A6GWJk|;6d2Yb;g?^~^yd2w7YE)-7nr)8pezpLl5{YOhEL9YS zi6al`HeCDm`ud9K1BPbt=y3n)MA84QbnyX5YN7j~g~{lu2<%Sz(1ZS|Kdaq2VQy4D zKR>*pz?AwpdaqK`A_zJ zFMgn3_g(j8X?6RlqS;103kt}^xQZxd<@LmE*?g;+1O@yj+WEafI||BDfPx=y#RcXSGbINk#&PF(&AH`ca_I0d@@$r&PyB>`-$updl3@c(cSiWeIr^BQ~ zDzC^Rr9xJYbJLah93rB!YVxDO1r11ENehQ>RqA1WoggnXpMsE*Qa{i86WNa-aVXIr z6#hX$1fjsH6d>~c{*KTT>m9w$ULOu7o0rDUCJ}17KR*_YHaZ`y-?S&Y4zfp&@f56p zV23JJgxUI1qM;Yo*!QA{iHLrD#5MdJr3Z2ZEOla?FVjI2Togtev|TjRcTFufL7$ve=XR-4NEeFHbu%Pb=n zN`~R8go>Q*7XWY71A+t&PpxFR5PT&f`_y{`7-wBgwk!Sr=u_m!o3QM)Aa15YU#W28oUD*ONQ(RDXByX3kdNd&8)|~ zoOJptxI!L0d6yYc5XC^B3~XF80>zs=_;OE{ouXt|f1)4?t=Tqw{6vDZm-h zJhcthCM032v9d>%jMGLrA@7B^ZtU%`ON3J7WcE8|rrNo)_i|KK#xj?b=l-BCdEPSt zbp%A&hblp>3Ym(%z2GVfVQ$q|-mJcuEg>1B4HPm3=9w!SWwR;tEFBP=+ZTX@a`aR& z43(mNQy$xMIf=QSvK7I{8NuCbpdxQD!18`hk{gI*O~kYLG?F_5<27cwdkEt1renJL z{X~JeqS%(3({NAKY`Lw2Qr_pLMCy${Urza%Hd{@}SVyaKA~?oly;J5Evt=`41>Ol~ z-ZfBvZZcf0h#(R1(#V+HWBY99QVui&oDlO4E+mR{A-u1-K*0{Bsj{b! z6#F=IN(+4e<|j!$$^Iv5!qLa`9I%ikF?Y3UfnMKs1pUYKJH0l5rDw#p?_2poX?H$AJZ7o zew!MvDgbT&JcqgB5!z3V(OZM7ormNmL5`i?cp!Fo>m4ucYDk+x2W zW0ujjkJ!p_Wk2ynKxCit7(s!Uh5umKAXzjO;kM^Yn%T`M?dz(&sbMhNNY)(ZEF_7F z)+#Uzch725+1+BOIF;-=W{M zfz#1#d>-2nbB%_CV-|gp*XC-v3>ta9T(n7>v*yo9EYww#IhAsu@OqCIz?;+kqBL8b z5Ly);P}LCJR#Mee?fE1Z%gu*X%#I3|QWM+6uMA$|vd#xtY!rPU9n^-608G{KvZmnC zmh&eBGPIVxB4T5mshcV%Nd0`Jq$V|Ap@ax{sb^)eN)>Fs3 z7Hl`xO!1_<*;e*wS)vg{^u<@bmz)8ydMtk^sUm_=#0g_bT_z?xw8BKdNu*p8Cvf&>vV=>n}|DzgcCcA|& ze{3z8P@Y4pwI1~C(b-17`JGd8Nc-^sA)lC}q$pLOoE(1d-rO)h%g6t;5$xZY~%hKBEfLgaG9!#!rN1zTv2WDY&=4CoJA(g(aO7Ng_R8*W6b3n2j_4x`q#Rrl|J znxHHd8|gaW)W<6PDtPM&-EiLN4_RUJxg(91T5i+kf_vH`+C3v9x5F_#gAK0)~?`-ziMpT#1ju>x+2u_sgF6otH_;?}GINm4Ks3SbJ)=R z*Gv_3Pgqq3G)s(<9}<)r?rZdRhvj?Bm>VSCz`Z!mki&w3SSt$iqCuIiBn)Q?j4V=j z$U;*K#NM#ZHcR@2$o|Cd`1Oku?3(7r#}d;y)QL%ybx&L6Mmf}RJ8=Yukfo{s0u`6$4F zpCcOg>$7x6qSI0S_iJ>b0@X5~!k$E7xBEhe%$1_oBB3K@W^W!S(hwkKjk?G*!Za?<=!MkoZ$vK$0j**r= zz^u|>PX~a<_p(3o9j+AzIkr8^A8baguS}%o&4h|b*UF!@-|a7sDeN2nXslr9;~^a= z%i=9oHjknpz&uww$z3hUR_W0rK7z@i{aK_AiHW7w7Jl;uwxb-8d`&@K90}(mYtA(F zavDN+njfyN`>#_gyT)p&v?p3$7{RthAP#k{RPGj zZ_-d`D!2Xq@QgcuE-b#`kyZttg#&TP%x`;JZF03Wx65 z+xRdyTA<)Nga;u8q!RxRc~pcS3Er@uujASuHIA|LCcRhEtPD$GqrxCl6I$UN8!Mq9 zSIYzF5-MkIYcP3jJ82pPG>sLG$jz3376)fxvjU7VLyB^ay5N;kv|*fH@wcgS|3CKL zvOBJ1$r=_jgT>6u%*@QPz+$$TSr%IsGc$w5%(9r7EM{gbNl*3d?mKg5`VYMCnlH7E zRw+;A$&8a35xHY;_0~GoT6XpFZ3X^{5qEt`3DUHyem-#o*e*ST%z zJ~wpfJ`f3!OsG7Zk{QGz{q}B`zv6#@7;EDax@++X8nM~cyUx3TQR441jGP-kA z4Ush~p&=6r3r}LkhXz}*_DXfcRfM#v;Lk&Bua)Yp>8Nem2OvYy1Z*T6Y~IZa-=<)Q z8fR`rXJHHVJ+6ovY&v@7vp5Nto0l4tA$fs%6BBifzalX?F5kvCyfr2DL7SD1#?ltv zK7Mz}Dh%|X39epif{n<~BDB@yBd_Uc!{GlO7kxd2Rf4;JzSp9g_J? z*(F8i<`nsT*0yIr{*10{-?l?ETDk@DT&7`AIeudZ)t|ucOn0yl&2UgH1T?q>4MdCd zfZ83@RI^0o?QiGtM@nAw@&VfyJMy#WXH>_i0yEdIOzkR)@~Y{+&JUnmVhLsTrwGY-LP3kj!m{Q$lQ$11XtQ1h&SR;UZ}Z#7iA*eeP&r;o#nn*yrJ% zL`rdJX0k@gJ7mbwepn}iz129jn=Z`t7LSZN4{x^D-?s8$GrwsBOZmJKFT3#uBlO<< zW|G^_dlxA5Sy5%RSj1wznkiw+vRu8j3Z#|C;ke*P<2Op2FH_Yijv^xIy-Glh5xKv; z@~CzLY@~Ly%#Sf);Riw!c}rGXdRL{H9uagxtzw}|c5T@nD!?EwS6MLevxOtwm1UBBx$b_ytNXz`7In)m@J+Cu4nVh*)|Sy?oso&>9g@Gy zmvhX^+ipELR}DD-zB;o;C2;yB=kmC1t`@9~gDzuEtn$QL@eR`SJ>LzZ=yGKI z#kyv=t!q)x9@eSy<}3$ymVIdCeJg=vGfGM5XV<}tTERgnCCFD0Kod`R z+1&89iD82ERmj!3)*LFR89cip+W6+cb%8uVN>6hX+m&^n^+xOJO4AvXBYU{gYXg*Im3JAf{%EGC&W3`g&4hzB{ zZH`8Ok*sm&Wu;HHpmPD_K^ejY)t{*fZWs(*5=qpj@)n3yEIFm(VaH*(V*7}c!z~+9 z5O8V&!D9!7r0fs=Sn}>17lg{7t2c#UDLdUo4Gs^w{A2{-RLzOPJl!QCg}AY?iWECj zX{A`3VJ+_jatZiBw;bcH8;4uvV5tRy=tPj-0WxZfd&jW0d+*oyz12}rRl@TIzdcu< z=6B`z@c>w`q=?W)stlz>l?EpY;K|9*-tR)~#PcTApxzT2X?x|)r`{2B70di5 z$S`B`Po#m$c0~LKt{y8xvT>Y)VzVMoJFWHPNF*Bl-xFiMa;+l-Mpzpy=eF!G9Dy>B zoGmvxXV5=yrV1M^Zc z@U`=nZ*#nNv^Q{*z*|9FxX_ssWXbY(J%c%3sp%d9ZOkrgE(uj=nC%6B9T%ubSXd|L zuo{(t8z?_zV6Ml_VYHoL4r<iK79xfZysPg&VkXMhSJtn;%UZ*;Rt(dTQ32(wnkWIqqy8-oLYD<*lP}lA!>H_4E zPTuk;IX^C5hVU0&^Yub*bPQwg??w!ZMjjH`Iv(&3&Dtp;zS82)Vju*H{a7teihmuf zunLg;4ErM>;MW6Q-eYSz9zrP8!pDU-NVL(?mS-wS#fKiv8karueRW{J_S+l`jn_qx z5EnKP=ivzs%a5OLYIP8qk;PP)a7ULaU*ntj1PON|oU?uOg8e+J1WB&t{qLV)LWrrx z5?|LS#&$en{|reSQ2rH?kR=7&P9{^lneyN}m6P^)qlk-FD#dm!fE8gIr}qtLf9iie z2(E&SuSCb*D}(yLNa-6fr`c+cq{Wa^(E%h7Iru_jhS-E`(jOR z?b39yTEtVRbGca0G!Gid+T9n%cp|8$nUY<6bU}t;hG*g%5gKul>w5`l4iNZX*2ZP3 zzxJ~aCFR7pcScnMKn-w7FJdIRjc53TBZWcojb2@@ax_#q?pN`gRzSgr@)93L~Fhw;R6;@J}u(A-ro@OUYuL}M%E4rYQxdnBdk`yF?FHRR9(&G7$ysOn~pWA_@5x~itJ>zt_UwuY3o z_vXG4i>{m@6axDZl;e-$dQoy;&N(t_)}4`m!eI-y@a$#oA3gfeC&SCaK%|3nfF8ZD z4-|vI(Rf_HT5h*!mV4S|H5SMHa! zDOI6s?5wUY9qC!2bk9D~u2oNC_56Cc(nMAAyx=}`hBG&QyiX@S>Yc*}0n0OCFEoX9 zKwN&D``GC{o3lmwOEx{6L5pN`o+Wr`9f7TcA%k<2i%l@a-@$GHOA22e8&92{gpTvv zNpi%2iB727X0Bqfbib1HHCk`?$@6n`MTQje-#*QnSq5oEJ;A)ajC<_{tD-;!xe>6K>tukthv(1 z#3TM53$C0ae1$3%q-^3L5Udw61^E zt1@lq>8Pz&ai$-!n5=PB;Xrvk=h8SWBP;?FEMjDx!=xKn^b=aqtNx>$ySO;;bi~%H zTvj9KBNV**9FI#z1>x#ZB$p*u>kz+c3KP=Xuu}F@Al$`Ljvuv_`x)2*07XfI=dnaM zat^!pgl@5v;e(W4Z+G#$&}wX~*81G5GZUx9aRy!a2SOvm5{h3v`qzY>^aG ztN0t3M=apE9kH$11X8F3mB*#0+xL}Ir_;$Z`st*;r2HyH-!TdB^xHP!u;MsJF2iOu zP0DAa4|39p&R-+o{S=vMP~q?_dTZlARvsBoT_B_EBAv! zV7l{d)G6PP*hBqlmF;O?*-j%048zm0_ip_*p56zi-P(ScUT4$m!$q7> zGQHcurJFBN@EAkV>tjtl#E_&$s+{GKCWppMA^IJsnbV@8UCM9dkcTx(GM>oEX8(JO z0-5^c$(x1i!|MX6qB<75@%NX5qTia`?1d+oT8&AXRW*ql)73BMhdFz{N{Hf0cZbF= zSAb6H$oV4z|LN(@nH~~XqVK}VLY9S%cuE#uQ-tK!u$8Kz>SkZ&jSsoLbPJlOpsxYD zonG^zc+|+GPUXU)xMdcg@do*{LT`45RUDeGptP^O4zuSYWXqT?7=@2mo*4|`40(Xq5ds3+F84YYycq%Zl~e^-7C`H$17I}Yj0j5_q<=I>VRuDXc)JGHu_^XTVX1#Il zx(1bWE^}Oau#*yzATctMzl5Mc1FV0B{(CXwD(R(fxBQ?&mXWQq@7p)i*B6dKHx4Sy zEM8YtNx{&_0m7$zvecFyX!ZcTc}o#E;W5Xvs`vSx-Q2b)Fqx(3CU05T$gxYfaE*wB_dB@^M|9XRnKkE93=j9WVX!hX!PlKC z**->vJeB9VO&*}ZiLKp(4>z}M1h}89eSTZ|-BaB@&p$fZ66Q~hOFn(B4fB{RCpZ=h zu2dXZVBxTDCnO1EIPARj&m^NN$aiv4xNjp8bdgSG0Q}N9oM494fNuZy>SqVmn)UA0 z{(h^QjU;fIe3w!N%u%~CqqCe~@%Ajn1^S8BO(UR+ZiMFUCZ`878mT$XQ$RjPAOt$> zBo&jWicBb|)8?6_Ix#QzJ-#zzKA(*;ml^hH1m%@Pt;Z8jeT_`{ws^u^J)WhZP90R| zlG7yf;i3nH7?XkvF^2tg2-DwmF=;2uPryVWo5;#?$#K#7nlt!^4M==#`%sCHVh=R? z?w7!W4X7ySt&B(N`{R6#q)1lQNe_XYrcL&8?P73Kp9j$XnfLwznISeG4FuZg*60vF zOxcS^Zj^?J-AR*oiq-?7GHHhI!WRVT0Ursm`@=k$J~%=Kt^r~_A-iS z)Bo+bZq<&U-v7k9rCLvM5Q3dc=60d{vtjjD_pFiPXbCMwLq@#C>f|crlqJ}ZYL65_ zr31*fuburXS*|wkiB@WxLwodOe1|e^fsexUq#5g;CQ9Nd4&r>JdX2onEOt{7$pH(W zI&l&<)$LGEbr!+LJ~6~b)I^UpF`hf) zLp+)E_7Rc{6gjM6of~4ae_~jqP34GIu{+#zQyZIgR{&<}6&8Iz-slpjy0WVbkvkPk zOpHxW65YS7)=%u&%{R2Nhf#*Yp{4y*x!tn{p=U9-!Pps)sD+G+qsD}skX52)N&(Qs z{~jVB05E0`4k(}j=pJlx$V> zRIC^+CK0)*-^kzvqyhnIsXrrFnh8b#YDpof=xWIk#!f9`n-^I3uVLj68ywpo zZq4iNl+h7E!SIGM?g=_?_` zvF?wxx~-ZR41u=H#9mJc(!l$rlC5dhBnn_lFH|oPkGYwvkt1UngL%ccpU;R1`ufLL zet0+H*b3cG3#i&}!<7X^Ga0>G?e6j435Hn40W`$nJ>84S%Btw zV}ne15&C!%8$|F$nlx)BfiTVeP6YGFd$!r=i(4heA)NdL$VE=V-~F7Kn-cnO6V=&|(nZLfa>T_K0E<1Rlj)bP9DTi!N^A=kxNLpl}jNic%v zorn&66`BHV`0;XMd*b&f{$>JUyYlU;J^22-#cmBvMtxFT*n_4Gebe>NYCVn3Iy8$9 z$KvB{g{#t?j3Y8K(-6y4{n?)Kz~pz8RlC6&@Dz2#?gQ_LBQR=1t$EYC+lR$95})5g z7)b>E5P0%u+Q)Y(wyZZBoVD=4jlNng%H3%QL$wSWp$o3Y9dCImhPev z;e}an4DWw(+dfkjo$Rt)Ywt}-5bUqe;*qBlOus9hFW1-Nb-gqKh^2W?w5?o~c|P>3 zA17xMIOJTg3wpB`+G)zHmGwbkqLsETZaTP?>&Et8L7En=KS-?Q9WXDso-!nCVq0)4 z1c6YZU6Q)eR^lq8HR?}<7k6>Nf~~_ATX*Jg+^80nCrz$&THdGS{OPoYd;=-&ljlvA z4i(RZ%MZA#Af$feLWt+HhL&;WepeT61(t7r5xFhR-JawXxe@OFa;%SuDw&&==ZsP{ zCL6*1oZ*jVx}#aiToluc+oN(=@#9+&TYd)lwg-*RC1~$(&r7e9Qm3#LxKTJGv9-V< zDWuqfZx48ktc3gnZ89B0j=ayWWdaM$W?r*bi!sfWpH4XA(uyNyc6>6m@e~t#bdcW$ zGv6tC5lUQN8Fv!W$*EAXwjXm24m?Ctb=q5EAR;jsXdd~wmtjTwMf`$9n!EiEYI;UB z%15VJYJe!7_xm9y9FNaJm2NSKA1J&}N89}l%kJww7a_5BJ5MZ5xAz1;&?g^Y?w8vX zK3nKdhOIRX`KoEzuazZ8ivwl0CZ$XKZvs~qHD0_B)6>syuVdhpF8udoxHW7qEBzoK z(%m=Lr8GYp(P;Oe_aJ;wOM4DMC;^-PY@aVqHr2XbtwV1C8DG1l2)*4(9oLW6J)=2Y zj5V8VJ#jc+G+NI_D>54eAD6$~bia3+j()BYQEtQQD+g!x=K-Vo88#jBR%i5k^JYga zW|-mX&;*v5UGJbLj3{NMrpt`7f3k1VNo-9Ut=OSp(_<-kdST~4EZqC!{4I}zeKYt? z!1n~B;j=NqDY%1Ar7rSEdu>Yn!Pplk7d-rxYJA9tL+hp7uLU8_ z9@9|%Ai59oVSl#M%jlGpE~Z!4E}q)ewp^0WWFE{c2}+t=dK@%Kxn8N(s|vj~%BOno z`XY#}82h{3ZBzt?7I`PVf+yI!<+GY4;|C3>!hlLmmmPa0dqvajFDmGf0`N%F5(X@;9)$OAsbD^N+-Mc{e zs}6y!MFq#cW?(3P=jFCkk#mgTLAazW4$q!U>*FHcmEY&cQ#X|2uO1sSZa+5I{WtLW zr36$Sr7dKD%6B1Kx7*HMK(UTPsDD*s3g;0ED3+s@|A>*S0H|#%>*MtJ^O5$>`_2 z*g}0HdQ&0c#O9E!d@U&|n*UR&UPw}EM``>?Pm95BaeVq2d_p`(nvg}4%fP6`uTc%V zM#lsQC#hWMzAHwZ7UeDgfUN$8XTM5o!n#cn_b$7h>Rw=`(ZZgs)z_xavb1i<2(0Pz z2L5CGb4AcdEN_S22e53Fh*Ct;dbu!-pATS`#$#8GD$f1To@|$8^D+OjfIOse&TGu^ zF7Qg?={7Ki_KEPIQNur_4=8IjF=62Mre6?uGq-O4GssW()8xktAcqMZ$2pDt9=?F#_I%um66~9#-%Pg(6Texxz2|e1 zd0(sD?I6NL1huNTq;cKvMhhZK*kJP(3s%$N{^+K2zR}pYT~ls$q4^Hul^q}=5+a}J z{#T*FJEHtNw270SI%jW9 zf7V-d3P|N}^XaU9xn>28_xo=Oc>iad$4MEBkb7MAK2!hv8Hc(T69RhGhSZLS$*`MY zU-w~Mm@$_|e~1Rf4ScRqOaIc=0_z2BNB7HaBvwtkKw#30>C;#RMHOC z7m3uJ4ofo*uq2usKP&bhnS8em+u$XA3j4oi(8WW{yooJUNOIsF?wCM=z<$WO>`4935#=yN}yt64ITIa+1pX?Z-NP(NW;9>b1%g zVpoY=%Gf=NJ=phb&_di1BUG7lEW|ayki9AIIyLfa) zHA;`1B6-y^q5T=$KqZH&1@`V~h~%USxx;t~P*C?C`P@RBW5U+)2o>$*n{P;~rB>y@J3;nh=$-%i z6$&&oy<0%QB@xxH)Q}`*s{LGW^sUNr%4oAmcj!;@p1?lQ0MRHw+w3tJ^r^+7s;=^u zeP8h2HN$lsmftV-=iM#239)@RaiNhx`(m>G6~K3>GB2xg9BJ*Uv92D6N>Rlpz~>cj zi1Oa+X=2YkL7tFUFHKdYt1>hpO8QKQKCr9$vrM&GaT+LrD|w0q-L zIiju~%24OXk2kJ*$?2Ivpo#8K`4{`QuOZ?+L(9<>k=^fQxSpCamt{b*kkLk%HJkmF zI>%1>&vn=qWzco_JyyT z<^507!{tWDLk+%5u9p}5YOl_Clx9X(OuJU1Tl_p&c&lm<`z}pP3>-L{Y?s~MwPs@} zDThBr+qb(5SV4?ZIO-x5rO)|+y0E?k=*>l^Zav4lY)A>2^adP$e(#e_)b?xmL!B@6 z@74IOAy0#!F9%HBo9MM$2^F#V(qA;%MeW`jg;tq5{dj5C8XjBHqz!=w2A0YD@}4to zObrNG;|G+-Z%zx11g0x(=d30dOw2N+7VJWIero;<#kMdVF2)@PL{Bz>U&`D%UkEF% zu0@7zpL~B_44q}8dyACEneMCofLD6VyLb#k$pdd-JYdi@$`0G1h~!yCEH3^XTCuB> z)oe)tK}aO~ZBU_ZVbKNwI?lfHqih_o%|#Z#K4s{Yw#eqe0dN{Od~c5O^hH~(nEKv{ z^vVTI_$T1+cr2@O3}ps5?Of<&vpi-P3J%vR~dqo68x= znPe9$#$}IF<*d_LT1m|FwxuuTxIT^j`gW9{EfX8hJD(S zVtzG@(c|>%pXBy5d$&5Cqm^eYL(TjZK$-$+xK(n>Oh%Z^%OzPfj4Y)}&P%Cboad&3 z=Erb?ahp)FWn!|Dhe6i!6Y69E+-FbaslA2JkLl)fej#`&Fd3P6iK0&oyNcP2>?ex* zWr4n=IY3e+mCGa@0Ge?~9-MhL{Yu$dY+~W8q1bouSkP>kQriDoq}LkhaP*Ad@o{L| zle1vaHdD)_s-q1E{;=Mq!6NOwde1T|FsCcV6>)oS|E#8i_Q&;|d z2#yd`D2uo_=JH;lm@a&UqBG{2nj4)EGUhSymRvo5_q?!hPtPIhZ!Q4SdNh2<42KU8 zDG#YE2&+J=rnYm}y$1akpLZmYErqM9Jee#DKUQbPKKlazj= zS~9~*N(OdsBD7({R`qfQ+=`)Aqy9J#gv%PQjU zg}(FNOR}w2G2R>azo~R{1Pv{ddDHm=k2GoVsdX!Z9aV9$F#-hl&o+LpR_4$m%o;1< z3k9CK=4J^uiX=sG+$~Deb65ayt@mG&@ zjSk%VPb*ap1W_t>#N_1t+8d{Xd7s}-2*&st+)xCq6_XS~b-HlKlNZJA;?QLq(v0^; z9HW9QE=-#U4KAf6c0FNsipPGm-3dLAQo@j61qL3Hf|+-rR-cC$m&OK=h5An{mLZok zIm7>hAF_@#_qGLqW}Nw^>Z!@VL_Xv)=|jz^F<2AU9S%OCjV6;baylFFZiGp%0p#wm z7RkR>E%;((aIp@{-0;!?!)QD4kh+;@L;Eww_hGI2Tq89*0gihd2PoUu3Ia?_-b(gB zlYIIJ$pSP_ep-L-AuiQF{7pWlgYp+N_pcn#7Aff$HP+5>FW2byeA;NX51GWYF!g}I zQCBp?`>9DYfT{L3Vpz32$+i!8J6+FAwkCBdN6Doe)KQfm#5Z}3FkL~vOtnpf zzl+;O{8oeuft@~&PYROJSq?dRKHYJpfTc?xDVy|G!Xv^X2N+ibgUk*{7dc;UQaJdy z$~u-ytJ~jCMPh`nG5s&# zKEb`Dv%W{ekaN3g*aCSX3p3fmvMp8=%z-@}rUG-Ut-Tq6P9SF?>7CCd;h+=O94%SZ zrvHz@!?6A1{`>c83g6M2Vgr7Xv!r8P{3=%_(-s>S3ot_4BixAGiZdV;0!}L&HYf+h zvpWzf@^p(lnY`&VB;ay%T-b~F7xC7g{dS-bCJCjB(euCkHa98PPm-lf0||j4_l4An zdr}{{Kd-cQ+q9g8X+|~@TBHLQ<$V}OZ(>UWBk67dQ;{~sdJt(hMjjV8e@h^CUx1G%7RUcNE2Khm zGg&PzGozoHbH{K(r4eby&R08m2=vK&eklouV4m~3$0b)Wab+>6R&tZZKXHd-VF`gg z{CTeZH?OCY6wtX^zaI7Agg{Foj%sO5?3NeGq`!U=^s>mu?;mW814SbwuEWB`25+W_ zBrm;0urv~GakN_yR*^txng0Uh`lKF!u%0hJo%Nux=F~nAY?!v1gLKEcOU<{ z$VEZVRfA&E^I0gigiwfz`5Y=8Jj#5BjEhyWbu~xR(^D;cW^X_#Y+!{!|87CA&@m16 z=|9N^|IAJS2f^Cd;~2L>KL0DM|8uQ=%Q%2bMiXB)-o)Vw790(|st~m=*`*AHSWD|F z1q5+u^q=TH$z$n&rnLvB^+2VCbelwl2N8kXz@SJ~B!f23KEEywi8bO95GNp<{ZCyW zgi#LP1|45HAjSue8rhj_vZkHIK1jQNYxS^@I8DiOZKiO@D~xwW_SB{KpLaY1Pxh zSVKxBsUTStD0Op&3>Y1WHxl8fpU$Wxf_(Or)$6PpQdUR*nZ=aR0gSee+l5po2sODL z1|aFB)=c6bj7=hlhI&R9mamp4m=8z(_j%5L<5~qyMo36cU@3-49_pu^!v)5}7!>ey z?Ct4+@JN|B%hW{X|4kwaToEylz#L)ABneUAxX@8?ulv4_v=e5|nHN*k97T7spj6UpWm zTfF~RMgClfJ-!LEMzOe^DWUmGv!rVx-joX=*cov2Ow=g236JDli%Aqm(d!e4py-zbEm!sTFVEg{DZn8ul9?Cvr1_Wq;AIXI$) zM<^xzI^Eqf^+|WJ#2}Rr?PPMg@{4G)naP)oZ7BUj{<@uX_xR&FK2g3N4Uh2a|F$F` zBcup11%-m9T|^4QD|!*+xO8+8 zy2~@k7W_zG4j;LeZcA(s0h0?8Y0@af0bZ;#>A>{jrC=ht zv*ys~ps$Q-V^p`^8n^f;#%&6S2mORQjcnJpN$>xd?LYohZhE3#O11OO1cp@$f5hrW}|?$_!12vDeH_rdUHG=kC$MRY!;&yh>B)QU~d) z37;9yb(8edw@&^yN&0^?WKXhOCs<5_LQ(kbt!wg?pL8mV?u7h*FoL;Cqk#vRCpl!m~9>`}fM~fPN)|u3yVSSzy)n z>)*Dgi~@TE5PNmy+BMh<4A{x-D@@!kQ$a)za-p~MMD0kKky zj5`^iEEf4pHX&S>Gi3MoG7}giA}sMqCBo5(`ynwA1}P!FcYE1n00*)F+?L{RQwT^1 zU}8rU1;2)-gKI3LhW+t9GzpA?{t1w(l<7_qnt;Hmt{_3dPg53c_sa71-y$g(_+S-o zl4A`EQblr|JF{IPumwpf4!Tpx|2A1|h^(>P{3oFiaJC)tP`HI&h_GZYSCIaGvj09m zfnZ|5ofOQuY+3mqz6R<9Qx8zDFXeRprVx%lKdJp~E)nbk0?70Q_OtIu+uv zkFl!JuSMSoCvI8^#lgbivSkdOtXRsCvcr0fN+4tHpUyFH4TSm^M}86mB&y70v_KM7 zN^1GZ(=TK-1V)1YUWTswFTFfsV=!Rv6T%$~Y4qm}Pb8rtH$Cf4xtTKCLXeVQo0#Imt3_^o@Kqt}vHf|9hMC{Pv9Q_qsQ|v!@!vE|D4s0o!s0%O_>~|ss z=Dv^!l#=zO;wd;;GyudMmMRR=UWT}o5~7notOt3}jZ0@_F?a?1eKP@F90sx?+A391 z-SgB$)R?l*a>~TKQ3>i_9sLmXSPj@O{x{7$F|blgxREKU8j>@9tN&&ixU+W&$-MKV zl0$Ff=9}yIAFr^=my7cBa^;xe9Lk4ZZ^n_Qb+Sq}5y>X6VuJ0hQszy!TzxCIGU`NJ zw*U8B{MQ9_D#Uzu(Muy>|5BT5ni_MP&X)->NmAq6ZKK#T+6@G`)~X^@lMz}HfrjP0 zKhMVdG-|3)WgC9rnDmusGGP%jSRd5r%5BNneIJeu!;%u;tvz+-&$C{DV*&C>pmMn4g1(r`b2< zGAtb8j0eh*AVfzC9pi(QTZM2(TsM}nW(sK((P}+iD(z9yXnOR&os0j@g5UE?hM0d| zOLqxA3FfIsua06voci8P33!LvOUpWZ-Zc86?@zJ|b%FWWa(@GU{xc9|R0PBAC@@tq zq=EOxo^dRz+ZB#q;!C#|jRN}96pS2Ti%oEZbe%so_|Ih_JO9(K4Zy}Pr?7UQQ zTgsK2`m7JCF}c{e@nx;4j67zUA;poX$>_tMQOE!I+ye*2)SWNKCP#rD7956*&RJx* zECM#S%v@Hxwo;`ZP>Lk$fcwWM7v3gDAv^=VW&m=iMI0&1jfyM3Cv#) zC{yKZ$UYk@=1242O>r5@K58!MApJwT*nno((&4@-EeUK=8W_N~04mfTLb3Y$KP@6a zGud8-g8j#Au@R53|q% z|3?!t5LpzF!+t!Nll!<^m0GL|jKA_eTjX1p{s;QmaGgu5`f&}+4p94flWVXJGu7j31ioyOJAk50L(|lk zaOY$|<#0NbCQK#$e1gvs78R}RxY~0@^WABAf`%9-Qxam*DAy43?@DF_`SN0EtNqw4 zpLtdH^a`CTVZ~~?$4&FO3jjIY2Hy-T2-563X72gA;20mDNnbRY7}rjOPalU#hjsFL z+}CMri}No-?+Fp!%EvOsrb@@wD=IFMR+SAZ++yo%4iblY6v`h2FqIf$B|Bczejmu8X6T9UJQ#8@fk0cP)AD*F$X9+|hn6lra3 z>d9LG|FNv)d;~h{(Z!nS^Ci4MvK0YL0CT|*BA3jo;_toqt&RA#OJC+8QJ9Ix9s;g0A~~!KfSTZ|Y}>6ie_>Sp9M!3@0t z2B<+)D3)@uwNCY#KUdS5(=BvS9knF%3dY#Q#_EZ{*rj-&7NcGumGF(^vc#uv-1yJe zg%J%5D+@)83cUn9X5=E_)b#gtjH%(cIZsxDgt6N0_?D0YbcSFa+7{McOR$v)+L@Mk z-$KPG&pVFzSf83RULM1y%qOu$DTT1&L6zFHii6!Er37%G%0srlyAiST;0A96 z+}n8h&+ez^XW0=|Gx9oB3TLnK^2B9TW8-{4y@&=V6#lFbv1H{vb@)lqDn*GIcHKo43eg((2$S$)2>rPV>v1nf3UJ

    z@NZy6qNWryntEFKt`zf){uZl1Sir-1(%b7ptL0qbHsB^M?y15u>*QKZ>(%|OUM-qP zAU>}qT}EqpERoJ)-nZWPpcag9m?DsUrR8f@htRRY^ZDWBJ~lgLom_$e5mSA!FR5Mi zIWdqwhW?_vO~-s@hyf%Wk4MHU$7baZa3=fLMNvZUuc-}oOEc>w4*=fUDxLP~O2c+u zU-t`2O0T9G6G@YsSChK&L}Z97{+j1o@H0%{(uPLQAZc3NTNzxeuL%w|+(ZfSRXKh$sQrs5gUiSCti08Hi|Iy!E z{qq#aWfB#1g%cbEg|L*GTKe8UARr+46kkuQ??zMe5kAzKLHG>(P`;QEDCF9ESPi2HSmcGE-VKiem{kLOPXE;DLOm)Vpr z-Q*I4vS@&aJ61afzP%d`Mo!HQzjL!^PCUeU7+Dw+F4cGINEYuG3l!-$vVH?U z2}Y|KR;fM_U!+wa%3Y&E!yPr5K(f`pT08QUN+Vh{Ha<0$*ycl*qWSTdHbL-RwC%nP zv~wyew{NXQ+rR*Jxq7WNPW!MiZ(QoXB9xHq9tn~GX*s!M%&_9o(S9`}USPR2u_M|R zIzsP&>uj#LcNw8$Q6f_HWy^Bv-sJjoOJ#&)xCZ%x`_(;cJO~Y)nW&9i5~)U!-E#`X zKsWU$OuY3k-C+R<&XLU_^O5!6kGW^22}7onVg5dODsfAN==5)Mum%-$)$t-q0#p-B zo$Ko@b4&j|)1PAgI}NjWHlu^2>0zV~w0U z46f?j)ct%Z8im~0`Qtj1U3J&ux!#WkVoBwj+jV;q6rcXeEgPO)$qwp?D<28XO54cb=dMj_g6d{-M^| zZH3|Z&+TMVs}TJXEe{jPkC(g=@W%do|J0ew{Dzo|mgDaqx^ zr~K=v4uqbuZeJmxi$?-ZE@e8=1oEm-Va}tzv^I>}&HmKJN~<-=wH4q&e%`iaGDKVT zNTxZsqj|nB@w}OOsJdBD`o4Abvm-5v!+KVpG!kQkc!0abKc~{56^)Xlk(GeFlxN5d zxtJOVUyO6wujiU^xKz#dj?md-N%i#MvuU&4Xff!@e66@X$X@IH;q6^{39GGd*k&1+ z$?Z12nsi*U_=q?^^;zFj1Bxw3p3T9c8s>~V+*8H%eO=&^Qeo?S)e+ZZ=xEc>baE8Ov4Wg zwqE--?v}j7-Dxa^^&>8?o@N!(uDMgmYkGs}a3syvvTJV-L*)k}w3^{A1ShuMz2Kyy z2;r!rytKIg?ll6roCC}_$@Uk`9IUtdF`1l3?F#z?)s^a$Hv^b3=JlJoSQ#~^PPtaf zya`7ZM~6wgm{*WV$6j%coXNqG@U(T(j;amtmy#fbAzzeO(8dM(itFs^vN!%yd^O~* zfgUU+(hZtWB_#Qd_-%IWOUOO{WRec5Si(>l>@+JH^F18ctzr}WN9YELE#a}N(?S!9X2xXrv< z2~lbgm&(S7x||=W->~tTvRX)+zi8zq{4?3nLJW(cOcEM1KqWPHBX&7P6tO93zM{;6Q_Q@};p}Kh%KzveumeigWrzCqETTV@X=YH&|@PA<7Kuo1{QP z2?7-d|DdG9=nZfma8~Mh^>5(e_uM&!dF6&ecXfS8dn_zA#O1B~?W(_RWB$GMo$m_A zmlW>XS-vOJ6aJoh*9o4K?+|@J+2rB`9t!4mFn-vU;#*K@3|>~QYg<>!{I_S#)>MhhaUIHoq-+YIZ=ul~WoV6W{()xVm{wK`fsmW{VwDcVt$(Ub@bpj&;jdtU z#SZVtPjn=Rd5G?qfNt?VJw>ux4>a?mMz6<-rKo8Iokg2v>8~Q>2tO|B9p7|eb_7`; zMQr7x_=Qf)%G7G&O&pTeAc1RWEE|j1fbQ;7ZRY*T>ycK@!ggS5^ zRa^Ugs;L0PLw>Gv2&~lY63=%)CIYw5syyN2p{E!9+HtPOp<)0$+0(|jKypTO<&cUBkWG|?cs;=SsX#$oJju7_YsFNFYuBSF*p?O z?R~#7Yf1J2)WX>G=fj&?AvEzT4vIpXCRG=yNM1XMu? zQucVJB>QGkRn7-DkJbRcl=e;E`#}OPFXDi5Ktjcd zl~&g6e3fR?hi5`OTy>5Y^X$l5%s(u{(5+SPclEbYoHDq?`JpAgE0w~jFhQFb{(Jrx z{oW$fu&c@r9yg5W$X2F{0PsC)4(n8J$B{n6ZTMrUdJh$XrJL66f;q9LHVmVSXP`_> zpZgLeItdjgCzM|m{#1n1+@?e4X3bZ30#a>_K3}wxYaU02w9P+r%Z%b+kc#0(P%sf; zd+k}^`+R)A=9I@4dhQ)Z`Sk0_EHh}&wu zg}QNLPT$FrYHV+yDg$v9X%#KPO zRq;;%nyWzS0DiV3LALF>hZP~$zAG~IS9_?OvvG*CXVx{h%6BG~iid~YAL*P4pzgn+ zg>X!IjlSJZ17jYO+?GLk!c4wa$3+O2eN`&d?hnq*M2#|84Ns?GW;Ps0B6SOmGy^4q zs0yVYIS1pMC6ieK{qnN9%~m$SsXF^p7pa1H!1%j2V3lZSd6}5Gz3r1)BaDf71ri(Vtw>!=)t%36O%?l?Qh1s#Tefy7 z3CS1hX>~OsLgxlPJWD08$5907H+u zpaLz#PtDw)pq=D{fb_i7*Uw6E;D(~|j^?KTiqz5^;dps*=LFoV(S3ovYluCf{m_k~_gj4ZMb!5O z4aL$%DKRe?CYvXs7=4kfmnYFqEW+>lw$-m`&Sf{rg^!7Bt0Z1K2V)4@&y-rDcKgG6 z>e${C?X8SgVn<3)FwWZGD<|dC8}Oo?f6^aLKbPZ$lqn4s-G?Pw55HCGhO2l@qGTgg zB?Kz|*0K-_q&-HH&+p4fbH7iuo705bQ|j(})~Avfp4nYY4iWT;He~o8Dcqvw8hzwdhxZ@sF_U>8xYyUv(%*^z<8iSNE(c49b=T^!%vqI%}89 za^KkKuqyt7F%uEbjwK3JKA8)hio$gSE6asW+6`7C6nNp|L~4M)fPm_9qX%$B z{z_7TF<4I**{ja*9Azp9m6n?*liS4D_r7Lz`Ky8!pgGt5iya_rtoBgFU+H-L^axP% zBlFs_-ECB-t5a)(FlGE6lF!)9`ZJ^1*z7|_+kzpkEX(CEZ=z6>B4k;rvrs=K&Ec_r zrTzGYqVc=o<(8Kn57SxlhXSa+XYSw+^VyBMWCne2eK>Kcqgi2;@_0TNF`nCm!7E+T zZ(4N|eC#M_D8J2Ev1VE^7$i2( zr`W6Mmvl>y)x`$sbicEcG{?DHlD!%cVsr;pm~zXg@!ah{A9_g47nN>26+CS^J0Io? z%v?5=yH@pdZO8;(JUm|B-qTclqgqI#AUFyZ(4|8`w5M|j=ba3Sg7$#mXl;XI`{F6i zkAe%dQsBLQX&i@l443UZTH8CM?&RF*B?!H!Q6Hrrg43`HNT^(kuAD`=MvSV!GS_-> zccs0+4}*&Q`=-#)Jg|zvlVtu}IjbEl^D7?tm*0y?nm^zx)T``;i5B_nqP>vrgKj5p zmE9`Tc2~L3fx1WarbUiPKUrTFT&?|k-_A0g!jTP5?qxmEEDq)|E%L9z9HRoaXpG zP(*$Qc4$(S;<;w6{eYxuUWdtuyvfSQ|JuCkW)&fG*9`PFn&^%M+Wc*2oR+=KPx;bM zsnNYEB+>my1QSWAK907qnVc`Smnrh})-C4f5)v>J{P+5FonqDh$z)c`!%th$qxjM_ zLX;YZYUJYbw=3>2m450=v)^ZU#55KRcbsx`M$W}N7%~!qX7Ie^8NowsG})tfsLcDD z6|R~%`?o8MKGB}+M*sGbYV4KY=vVlF2ROz|uG5Z5|E-HWaSaB(qk8=w=i^1r{_cjA zJQ&p1hI?i8xu9Iq^)k?HtrglYD4%xIQX`T1o%@#-1jhU%PU7(+bdr~{^bHky&RA;Bfkv%hHDTzRNWP1xa3KPkiAGc=jJg&lZkRL>6lVjQyA>q z+o7k6ipze}r6XPAOrweCkeEq4#eR2r>}ft?^i2@tvcSy$G|6!wa&?2^cprq(CikSm zBK-OD8nN5^0lnNO6@!8=VOqDJKH1b4v!oL>z_U=33NBS?(utFLA(|0`CInnIS{e#% z3x*u0gMwlJQ1={70F2jjGhTwXq0tHoSgz?9f*^W5%kd_$MOr{<<-JR4niM9U%xRNqGJ4OzVt2kpHM6LfYvzy@}1E8oW`dp-Q)rX{0wMxUw=Vb+?Ic>e(uH!^}&drKKXQABtAxjOmcUG5QAF}FVvO#g||Yp6t- z-7;&-`^=f^^^&6huV-puFg_BlU(bh!%5S1K6~;5Asgz`j9@>ypP+;l`EWAmyMUrYs zvCmDnWl6It-7$oBAYoNQuvD!8FL>2_0`a!%7J^>Lfrsw9BpPl|1a1dk-WoFZwnTzu=5dUatYQ@6q$iq)pXnB zAMPVzZd2A8v_gP8A2+p_MPuZFv46HPKD8WX1>W>gqNh| zXD~IF)dF|A-Be)jYgC*FM|T`Elw;B{*WTdw-J;tC^_{K{kiT6;nSL}C+wIds*S!gH za5$rZ)(d$0@Y+&B;rWk6nb~b7FL?J~iNw8d;<>$u>_2E!7fF5e6O@g&ROWyHdFa38 zj*8#h{f*i3M4B7i))}l~rxFb^3yk-MXyCssF#^PuzSHXNe#yE60+S_1n=K>=mg~RT z29r~9bcS5#lV?c4s{Be2oBV6h_cN>)_STl4Tc5SY9%alSGaXqdy4GJyBO*rBl#uRm z3VIqCs&G|p4Z`~p9i4QMUXnhV1d^OLZ;x+eQ4MiH5rXt}j~Zqpc8CAgJ)hNf z^NYB+ln(Ay36)>+E8fh8>W#uyZ9-dV)E6jS!bczxcPSnDq$OyGW5@sDlPW(FS*?7`M75dH|DH! zsC>&ur%;S~Tx>SsJdAJ2X{thUG7EkKE6rR8>!Y^}3`Z47Pl8sn&rd6c#)adz_Il9~ zSy_tq_PpLmYv?`LBp1RS-ys0}l@(d#UEd+RLaDnqgRuOsG?~(}&R6AAuV=m&Si32O zGEL>75sq~z^qiyVN33L({5>(Mp0|d1(&{T@WE?uSt&eze!jY|sgdd6F=Tg8$G#m2D z8=fS;*VQMeososd`{@QrV91;m0=<1+^Az-;&(Bo)d&>nsRP0MeCPP?`7Tcc6e%s(0 z97di$M4ShFSH13TnnFTGO{%D|0p$2ycm4VNuouQ>1}#jgg_FfTs+xm#S7@zz@q$lV zvFHLxI_T(xrSM<90ueqTz@{ZVnTK}6LP^MaxL&|G?!2F7!TA$Q-VJTiW>~9zSzDJz zM5;!j#tbBL)To6;!qa)@4I>F7CO*Sk3nV#XrNG=dJIii5d4K-dZ02QVZf4FSkTH2F zwCt?trs{HA)1)V&**(AE{pevFxn-F0nxp&y9)vMfqfO*#o8>zNS8YA6h6){njVq`I zY+i9gf4i4T&Uw!9CS@1XlDZLsIUtO~tvV0Q@~w{{5*n*}*;>aMdYiYJA@6**PSM#2 zyE@=Ted?NN-+mj;X{oN=C{QgrP(_i+&WuIF6xmiQ*(#CU`5JEiT~8B5_}1XaX>@^y z+~5LJ!=cIkfCIGsc+&E9Jo?nd$q+dgE=qS{Z_3&yO;O zJG7h1XA|inN`7Xc8HVvOdR?pxhDN`J#LYQ??X{bo6cL8HhDFV>cI~!h9c^%YWx3@f z+<43j0hc+N=L$ey8!GY`9+d^;H{51IfMr|0{7IlztYPEP0U>mEfwpU(?gxtb~$8i_`a&Lz75;;ycpc~9-l{oZKKQN zKeSg{8H~I1NlOU!WTh_?kf**fIJI%}+#)Zr}}el2wVn{W8uv4G0YaK&jh zjWZup01*k3biFn4(JNb~IZz0On0We2V@`>S{Cls)U6IIO4)FnVHkY@*L2kO7O$JD{ z0BEMgvftnTm?^8p?avxZ#WiC@^&HQYAC?o@20`!A-w$qJs0EU2`2=dlRJXpFp-v$L zt9M;q*_$V+;)!TU>$pzFOkL%F00x#xprJGNp(V5TpUwoD41fV)ve}_fXXFEpBNOFB z4f3`nPSSN$qoshM! zop*ungxQP1$!|&9)5}J%fTEkPO4^KHEsk)aC_s5dBf#2df#>}3zEd^=Y8c#S5g2Hc~vcG~>_m00-G*csu*6h|H*$EjO+h$|#;HfQ)m zd(;KTan2LGkYMbw^NaH|aW1s|!1~75UfWx($v0v>vQ#&`X08s#hh3l)`{Zk*g>(iUR%ouRPo ze4XHoc2NHB7b_93NeHM1mNgfz_x4Nlf*mNe=v+KL-iivpJ7j`d+}dTsLLFmT9u`_? z0=~Hojpj(_3(dE@*X~0WFR!Vsp9Q^JFzNb!1W7_#7P`HM*KZVEbJbqSjzoqePdA+P zJP5%blgW_ut-A%K#$8%ALZGy>4hI=tCw)w$!Zp5#yjUW_ zwQTwtlCJ4XdBowF1w1}QP4{^hEP>swU>~46E#6-2;nMT2TZeJX=pG>0E0+ncVzYjq ztFn2#)C@4OSNK_&2o7z5ohuI+IByv|=CpdLC4C1k<{f+OOj%w*>Uw`9D-_>fjk^WE zHtXH?aQAWSAhfF4e^4~M^M|q*K^dYBUtn^yiv=DuwFGy6Xh03(R7g|VImW3VHCEAq zl8wl02T=>U)*VX$cDY{n=*U3c11-61z5BU9bKWKY!+PMRpy_g)xxR-$XDUO9pXjgA ztyhswM^5vp_6rVhj5WRKXtmd=&3YY%UNVaq=1ecrl-F6NX2Z7GWf9X4eAG0x0>=OHT}}Q?HQcOwJLamXVU8}8)304rF=p;$S6j!lQF~M&(y}H$-j5tj@+sT!t8*);)n=?X-OqoMYng8IYqc>U{KYlHviIXHc(Fa#0D;&WF+7#? z=%9Z+*J&dP_&`iDyl?TJ7xZWlW1S>!b=Jm_i&W z)?TTLPd)=3&9_5Qsw}CS2L~yz?>cQpe6x2xw$C?YTaL>Jpc!uuL{TiHk?TC(3v7~t zLBRdoqmwyR%mrqJCRL%q%{Y#{NA}wDGUsBaDieLS%#t=dxFDmZdp=DK0YHqdhHS7f zU_8x>DAq=Ji9oPb$3GT{JAU#V*6zk}X8Y?<X-NZI%Cq-!n~ai}%pmHi4I-}%zY z`}MECP&Rh37#(3)FbtaCDJ+P2T5#8`^Y%%0n-PZX!eZvvQA3bhS~aEmy450#IX&*yB*dPjJdgaZKlg(m7;7=%bdiGIIQGx|L@p{3|AlMa$Q>nm+^nu?A2o2_ac? zB@+w`Z)L9&-PXNLrYZ^+`Z3UJ8Iw&BnK5w(%h`d=8yjHKH-iy0(d)xgS#;j%j5NKm z#CrZ>k!Ve*_v1ePwthi|V|Qtk!fsSf8D!%xD{hL_YpERxxQb9{SnCt!@7k+L@$3U`fRH zS=E`{yN$?bGV|Q;^XLyrCO0V8Nrb#8+f+cCpvV}L_jcN6L+p?I1>&DdMPWW8#UciW z;!}4ElKGAO5-poKLHP#YW5JBBSAk;r7WhF=G*4+~Nf8i**Gs(d9?n?=T_8p1C#YOz zy_1_dm+MH`uIam?4a@nWiRk)O+4TKu3p}HIY44UnAn=v7bw?|M&5uSe*GrN|*~a&v zKgk;I4kSA~l_l#Fe+xEJoU>L;{pt01W>w4Gz)$6!UmD)1d=c8OUP_ z_Sddz?z%4x87l~KSv%~2snZkeeizHzKvV?|E>lzNCEKyd&_TbKvagu0#PKO7W+iUi zY#|Mi15IZ>qaKZ(UB_u#OOqvNLsy+MFEbxEUBr%8m(8yG6?t4bTf7sVi0db^)a_Q} z>=4(uhPd%%=Jf#sxKfI?R77e!-T$IrBJR{nN~a~dYdd5(ypHvS`}Imf*(`|f;i3Qs zm!T32WD%Wm?O{NhWof-p* z(zH@v3L+@IJRR@d>-$9$96s#%%J>q7qhX&jl><^H5{s{a#!4HBQ+ewq$fwv{s=j(q zXSa26J{&%gto_k+#1b@2sP;#Lr~0B45`&jJArv8k<1;=xYe2f-xrZmu%i=XiJV=*P zUHaCQ7;4c;aFFDtf@CrAP3W3ILdB#?R)opq@xt~3i&j%02a|`JRJJ(tbZ_GXZBixM zwX~)`H;|u+zEPARWsABhj=Wf<)q_U9@@ZvdWu$(7s7UCI1XzjBszIQ7u8HqV}P z5C9K+QHDPx+A>aORLbga)?^cHV|X?XG$)LK?sv^LuWzmrpy4V*Gp5A#zk1D9OC$BR zX!nDzzani$KRg?ptu$AE(3g|3aT#abLRt3@!eA;I$Zcco;l4IKzj&4DCV1N5*t3Y! z>!O0k^VKac3CXy^LaZ5R6m0FPoH!|zj|#d?2IyJ?gkL4><1-FKZ->vj>vE#>k!6t! zx2=3fkV{N7Eo@&>Mb`P@B~Z~j^T&_eyrGF7W>yEst@?ACy~y(V=r1-LHG%b%OW!jx47uz-_PV05&7{!US=d1qk5f&`zj3U$LnH1M`uy6Q z%dJp~LEt$otj=+wMMf`yikv@vu}3$JT>{ViL@hh2+6ni+yM&FyT~_w(1uK89NPXTR z(_A@dwE8#P zi#749dz^m5PZx~y_VG=Olx@<)cJY-;$p9P*nj)PMx0T<+YyaEK6Xw?X`a?2P6;<=&uxu z&gvr&fq!(<>o4G`IspLEEX%6qM6x)Q^C&45B z^u0PVSH0k8*I0LHMjcq7-|XV+^q36yM}!n>DCp5HDC>aOq$cThTjJR}TJ5Zk<)JEX zyvPVjlaFaec5(uf3a8xAGSSJk%ZCOf&c4WE0@^DWQ8|H<1K!UzEB&q}VPPj{u^wtl zjaYyYu8X8P?&BqA08&OIa&E1Lss41wLbtYijuXHz73n4JmT-~7$xWd6I%rX_!fTC{ z!)r8ht?lkO@wfGUdrnr-x3xZP3plbx5&>67;Cy}(0V?#}MmUdAdh=PG8(E6hWDmKj z*}XVS=Lp`4q8X{-&3#5qo1mlVC4f=KlVXN^QgIx|FjIe)@K?hJ;mN&tcNm5tusd!t z%rlya*35(N{NBoBm;ruN4H3h&Dp^4K%Nx(MAlf)Oj$!2pUu|}05P?29YLNH)WlIR5J zW;B5ihjxtxR_tv&-#N)dL(`jo6)zi0C5%S<40B0AkIoK3?sD+9y1(md^Q-hn|-V0_1>Dggik5G?B&Rwe*%4M$x>Y^auJ2o z34Km~ao>ZsZ1kndmu9h+#9=PJXxL3O-J8GmlYRm%nW-zog^KWUvCo{%+4;S53QkT= zN^%Ev^zC15I1~^*+pCa~4s63x~-W;lQ-5%k#dn#B3L9rsv{<_|a2AzjVsc}!P zhuAZ?f0le|IeI!P8RC{Y+ni2d9+1_Gy(6KRlzzfC{Z^iz(|zOx{EyE#o`24WZGDj8^+vPtm|H%G-SU3RYqZ(? z^NGtERr2R!5#B7t-AR{m>kfVEB6U_Q0m-QN)~KSeJC9C|rh95}Eh9C8e%ieRn;4*` z>p>@2HAJ7(ZY+R?Gr`^PSjyW!mI9Z8s32`pH`w-bXp!vcjyr}&o5vZk!8z{;BRCE@ zs>y1xRRbZ~4b$;;Wlh26@oCL2yWbflzw>s?9(5HN7`{BC zcd(*JWSgxM-qpsK`!~IZsXi}L&80AXMV=-X`-L5@K9R+gf9oz?*k&!+XiPKM%!>83 z*t45l(@AT~O+XisEzx7MK|jXb$-ME2UDbKr#w0pWTzFkmp)oiENmpbe!zJE_$1ruT z@El4u$|XGQGNx*zAOA-u`NMowWA_L^rXjRR8)S=SSFfu@Y<T(%f@^i4J5>Tl9TD#Ww|j`N?w+RC(6SCbjya zxq&Dg%1+&n$0&ERb8$)a8g2JEJRqiXpJSdPOFkqvvwp*`WOnC0*TpfRUQ>TBcTYyc z&KkgWU32wdTC&6hK)Z_X%XR2G%Kg?as#vixv2?AJb8jRbrC37S9DNgUQ4DKAA#+BqN2YOkwVeg0mV%Cd>R0@FHDT!tLqTWc`Z z-BO;lFhpI~q{VgAqRM&IBJQoP7q3!JYppcsM4vH&S zH#!vt%H(N=nrnYO)VtSCsSK!SxJrGxu4l#CprYM*yBm+&9JcJb+3|_jTHI7Frf|Yo zi6#?R#XSj4Y(6zGB>YJFoNS{-m3x5Bjn1H9PO&@t)D+&tVcZ@?12oq=s0M|CP!~;61sJ@tJids0SE)FakfT13qd3&bsiWfQUlxA`@>*4G zqhNRPH&|##bnAORR|~KoW`LcFHTr)l$dPAM8NR!JW>d=T%mSt^sX^8tL zChOyL46KK1B5f2n_$EG7%yy{h+KC(5#RlM!t4c?hjm5p4&1Q2@F0%RJ$ldhwsWhb1tMz{U!!_{vb#+b!Svx9h07a{UL2D&SW+pCtn3oqZod|_>{o9k{gaSoD_Zn@^k{70~r{#TWm7sKhWw} za+V8?`T}LGCe9p1Q(n1QoBr1ju3hK|JBO_vMB&i&6T``{i;d4N4mf<<_ zvc;%k5I+^cFvdORJkkhUsh?i*fj1Dh;<YWR@&KW&~RNHZ>4?*eS2fvs@Uo_*N zVL?t#1Tu2Z4h|8Y`=%=G9y}L6w;m9A)6WcE86-v&S5Si@{eB{ojVnzh)lqw&%VjV) z^(=)o_xeEXt>M>m%Ov-$&3)H%-&0|=Q;o+-fFGzdz9(NcV&2Dc>c>&qFlqe(sn?92 z$C-2q1je_z>m$J>dU0|TWOpgbMROsNJ3zidS)t20{?y#Lrf~jL%r*XVHxWxNfZyg3XaZZ+JJXSH;&Mij`tiAN>8M)|@b${S3nA?0Q_vx#o$6Q;Yoi_l^JEOPq5 zia}+6pk$>{g8LCK4;XaqGl+7Y-uZYpTIqEfQAY>5uneQz7+P-@w2PL#*OHHOTC?R6 zXKX;vl9Aik#T!8GhILT-P7=&OcJB4!;cZE*WBcyi?e%(px@2@V6u{O|G|JIqOYeaf>c z)UqB7s3ThU__U6}VIq|xYuJSqoyKW$=)pQCCi(O|p8rIEb0*Vu>T&zHroZTKdDPV4 zmQ9+#;vb&_J~qof_m1N}Cu_ob$gbeEJB)IN@nNsC#IzrE69r-T(LOK2cBw}3Hc^u* zR^Vt}$==jDTK`s2_=?>74=EB)c7jvrs{vVA?RMAxw%55tbk20&ddJR?_BOH-wmwy- z5fg;sLXM<6>UslAwdi6ua(+0am_)&nd}` zW?SAjCmyj=8A71YY>2xE*Os*m`>u+t@&~``@i8|$j?C|C1(8uHN)5M8aPWB~K=3+P z=-nhDY6Ynz8)9ZCDbM+-{!gTDX$u&3Q%Yq1^v|SZRLR#Eq{pt+j~IJ+`>)4~#*2aKq1h!)UMVFd8wI z&J(yI-0sF1N9yDYxHk#Z`k@!$G0M3zIGKV8QlGmz)+s@5F^pOyczxsdOKw72fBjg3 zhY4Bvhq;y@K6jXuy3SDCB z-@aFz#Z!oWX;-EizV}^Exiz1Y?qzosiMT8S{ViW@sN^&sp$|e_{NCP(z&-E1;km)S zUB#nVr4i&|@4h$ER?CHTUHAJ|4^J&0VA}LgIw^uqs7edg~;2RZ0=nNZ*CI(JFkgw<6dfJ5#I=p?QIuaSvB zk?pm=To(lJZaF?L`p?gobp5k=6U2Bk9jUPs@S0E5=hFISxrXxM?Pn%QpCio z{xwUz)aSM?oy_9u_gN5e-ds13cCL#;1A4J@$}x*!O8dt6kp-l_`o{;5DGEG86Cp08 znVg%d)+uImppg;{S-9^nxSd>GhZ$jZEyIC@>_(98`sQ9wAtpodlT?L~vsq$Wjk!!& z>fNL0=WC4dsnNP9*7m+O3}G~$z%e}G7L1_TBv!OeU&5%cMJ$LvC&|gJkmmc>RvR|F ze(P4Jm)KTm01(-K77pk)-M)pbVX}r?5u!PGjzcs3Tf?X5$2Q-9r(x}6)HLBqB%6jg zz7C4Y&r8p^WS@H?NPNdhBToOd&(k@Xr@~-q|Ci3NEF`zaO~1#{?l0aH$hxzhZq{bv z(>()i2QCA*{a_pHomdE^3SP3FVymOG2$7((Mj?~pj;_;f!fV(PP14tc9ESw?Fz}W& zESAtu1APIP!!~=7RW!kN5VsOFXE?)om0hQxpm)~1RAnv>e2(RCw!5Q_+3214-oI>a z<=LX6BlX8ESqUsZ;(<5CARhic&(FLwcy_6_%4#nboa*nk*F3Scs&G-?p*5v1`qK5R z4Y&1lc=mvkCNLlIhz!f>{;1jL*zT%zA%+ZkHS@X&<*xwxqr|F-GRzaBW5#f~M_4zSu zMtz)cj+W}-WIY0Ge2P?(X=qN>AwH~-l^|Sg%B?j8Ef?SpiTWaX9VZjjl0cqbdp<{= zyf)=0gSyY3K9C})UBBJJ3E#E12&rOQ8;X3lTrMpWdOZfYZuZzV9FY2rq}3{ls|5+z zb)zpq*v4zOnq=Rva!q!|&wb*A%)M^cTgGq+y2;Ws`CLIl7@}gUD3%oG_F|}nfWO^- z>pK$<;H`~*D9S3i{Jre3oO)jb;={9=Kj4RqH4Kp_jRBg6MLn-=P4(cTgdp~hsmIRm zFc{SwH1Gltcostpq3e8IpG!8Pck_{(oH|4I_wlhQ2j$|$zrqMUk71$#4sWLF?Hr>U z<7*0bH18MS=N>YTPt~02{7kr{J?Go}{UX9-?yPgG{$s2F^K8E|G@!xLCBpkDzUpzJ z&lY>zO*%LRXaUKiAYBp;vbho-G-q3CAb#G50lS8|?6SDMAuQ>ID%;b77~1`n*!xgv z^dL*|=Zdg6#LHL@T@B|f`{N@lnc3I8!=D@ZriY<_i6YreJ0RUoSci9`(Q5nd@g;9u zd$ileQH;aK#SGP05N!xmI_Vc(%k{&>(rJD}sHh=syF#|#4wl#D5nbk0xG5SNprjl{_?~VyuH{5XFS=@V}Mygc2 zeoTG{2H2#~V_c0ril%(JW}#}eF+y`1Ag8jKvYp!{IS%pT41Cte0pLO!Vl?y@;|yNw zmDoDqszi(;tPsW`%_gy0IYVJvaEx6BDDQ$mJNnEFirRI@R+`}-SfU8<1N3t&C3^#C z<-tBCuNDfY+oVnX3LPsBbQpCT0;i~Odfz;7by*)Wh8=O>fgg*FSb9_D#}eD^)myh_ zE38X&UERcFN{lY*EP&oENazwlUs|fV?pCJZHc!@?UM}?lZQu?(glEyf2^BSN0aRfk zxAcKwZ>SYXXtp0Dnc{PA0{8l|eV*I4JhD1mrAxnEo@j`09r(l(S!cT5v1u$iyQ#4V zB3JBj@l-b6n3XmhfEWdFDAQPg zwXY)NMnec%Cp1iA!Yabl<)hbom)kY%P=qjF6^KQK7$OL9sOeRf?}+Sc(S*T*_`_lB z!aFXOBsyuG7pZis-GG#n&!uvPSyzo&l|@)d0k0Umhtkb0ohK2cY;EJWY{E(h5@GIO9SY-x zs;qd+uVj8dg0X|xb1Y6(lYdDIt$nTOd1W0~UtZKqyLfUs@SX2&vIT9pu9@<59F~JC zhh2qEF%nT^Dz6hx5zY@xepS}8)?Cu*d(P<*zD4^yjw0M0ocHiqr5=KVZSMNpoMwMD zSSxCzaM;&M{L3#5rv4R*AbO=3MSbsZa)ahNU1D{5?qvds%{m_NNt3B>lVKU^&89Nb zHS1x1iF@`W?YdUb{9dNbY{`s&E*He#Z6v@Y^a-UxlsT$wpM_0#c*lXg)!K2S_AEzRM*fZdUs4W4A*O3j(l`hs1LFwbyNM}KvS3M}gBOg@ zJPbIZ!mHlv z62(xg0r|i;F1ss}Ny$~A*`}v|O3IY51xvB@jHi{OQlF?^1ETfgUA_`U91mDAEkf)+ z$-(tN1A&(>n?#X?)|sHu5o%Z?-9Yy*AJ2e#_6xF*EdJs?hfhD8rc;IC=0>?aX59^~+roN%TKKGhcSJJCJXN+q^^+20iCTBq2WI87JElI&mmxa)kZM_l~_gB*MV`v}DR*rJg1 zMl7~Roa%kyKB9)SD&PUMtd+Obq*C(BS{2l)binBX~{Pvn^l*pSz&RD279MoH_Zk2gEes*C5MiGz&>4v zNBfIR1AIX+ct7!m#}FLlZtn{2sW#5H-e^E{&A64ro1=>^xAy3qO=dS9nCns;pl`*f z&+5q!uI_$DJr8cmcA4Y3OY*b_@24Axwa=lebHMHH*5(z%j- z`N!vR;hYnJ>E@P9#sj@tZBn6N!P!I+sgmj>rze^(36P`6UPH8IHFl>cpFvq zm3a7Ns}xp8_a&&B@!X%A{-42P{|qDk*9bta+U);$DrmS|Op57c5M4Cw*qHEXN~=j< zTe(Z|r~W@REJ^EXp#Pqg z|5;f`7L9aGg#kC)rC}e%suC1Sm zv=E`h`tM2pAC-r3%+#Lz8ONU`nhI)@GV2G4^z0nlGCxErLT!RECSFY8l2#45X#Z1Z zm3)wxHB}9WuDCUY2zckmt96>h_nOrvex0$Bq%Sn{bIqMhYqqhbe!U&6$o`MAsDc-8 zIRi5vXq0Q*;0!XevZvdQb+ghOcQlA+xB8x4r8)f}2m8My^M7<2GvM76+5=9K+e$iM zK}szl+XnMvK>`g;GQo*)!xLAP^!jJ*I@{m!X*ud;f4(uL1*GHr&!_*ZzAOjL^=rlW zEWuklkeV#|_e%AVTH!$M;@(VFy{;=H9J{jRvVog%hwd)Gw!t<;`d(X_k-Sr;n4ZG4@Q~#}&x{2gVZQtL93`* zkF1;I#Oc{pU@7e6>ZL6<==n-ojAhj?UZMacpncLU~{(~KTpaz@Y4TWXd$ zQ(h9E^AhpE#J3iCNl9RSI{tr+&i|zoAbCJCh-~mffqQUw+apcfev)}j)$0ARASzHj zxNu63vI_ABuJ>YphN1WGsS#&|8E;J zbz|GIIf$RzK8)FfS~?3Zv_zvw2G#cCGn$Qjwhy0eki4eK)|xDb)l#i%%v_b88d^n0 znwiT*2mHrR|5++ozUCqZJ;XdpHsj?{s8QX)txF}>ur%5jgwK%fJ_vP~EZ?1SR;uB~ zY**++Z2z(IKN{`Bg}Xbb)Oofs%VyE&8gvd1cpzkxaCKIplQ=%xfkU+#gqdn6PU? zhrn|Q{Xce{n;9+Lx?jXou~2jJ6Bl#AU2>F-CQBlL_L;rK_F#mOZS?gPA z&VOuQNcT@9Sa`Z9b^4FZ*^t)k;D2Y<{I5Iy|409OK>pu1Oug?gxGzsMG}GI%A0R(z MNd<}OF9w1C2XqmOP5=M^ literal 0 HcmV?d00001 diff --git a/doc/source/ray-overview/use-cases.rst b/doc/source/ray-overview/use-cases.rst index e66b27267aa5..5a9c22c40952 100644 --- a/doc/source/ray-overview/use-cases.rst +++ b/doc/source/ray-overview/use-cases.rst @@ -7,10 +7,14 @@ This page indexes common Ray use cases for scaling ML. It contains highlighted r .. _ref-use-cases-llm: -Large Language Models and Generative AI ---------------------------------------- +LLMs and Gen AI +--------------- + +Large language models (LLMs) and generative AI are rapidly changing industries, and demand compute at an astonishing pace. Ray provides a distributed compute framework for scaling these models, allowing developers to train and deploy models faster and more efficiently. With specialized libraries for data streaming, training, fine-tuning, hyperparameter tuning, and serving, Ray simplifies the process of developing and deploying large-scale AI models. -The following highlights feature projects that use Ray to implement Large Language Models and Generative AI applications. +.. figure:: /images/llm-stack.png + +Learn more about how Ray scales LLMs and generative AI with the following resources. .. panels:: :container: container pb-3 @@ -48,7 +52,7 @@ The following highlights feature projects that use Ray to implement Large Langua :type: url :text: [Blog] How to fine tune and serve LLMs simply, quickly and cost effectively using Ray + DeepSpeed + HuggingFace :classes: btn-link btn-block stretched-link webCrawler - + --- :img-top: /images/ray_logo.png @@ -56,6 +60,7 @@ The following highlights feature projects that use Ray to implement Large Langua :type: url :text: [Blog] How OpenAI Uses Ray to Train Tools like ChatGPT :classes: btn-link btn-block stretched-link chatgpt + --- :img-top: /images/ray_logo.png @@ -87,7 +92,7 @@ The following highlights feature projects that use Ray to implement Large Langua :type: ref :text: [Example] GPT-J-6B Serving with Ray AIR :classes: btn-link btn-block stretched-link webCrawler - + .. _ref-use-cases-batch-infer: Batch Inference @@ -640,4 +645,4 @@ The following highlights feature projects leveraging Ray Core's distributed APIs .. link-button:: /ray-core/examples/web-crawler :type: ref :text: [Example] Speed up your web crawler by parallelizing it with Ray - :classes: btn-link btn-block stretched-link webCrawler \ No newline at end of file + :classes: btn-link btn-block stretched-link webCrawler From de5eb1b1b978227d331dc834195b807aa3c2376e Mon Sep 17 00:00:00 2001 From: JYX Date: Fri, 21 Apr 2023 06:09:35 +0800 Subject: [PATCH 038/424] Fix typo in node.py (#34630) Fix typo in docstring. Signed-off-by: JYX --- python/ray/_private/node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/ray/_private/node.py b/python/ray/_private/node.py index ebcb3c6c1a79..e5ec183cc165 100644 --- a/python/ray/_private/node.py +++ b/python/ray/_private/node.py @@ -485,7 +485,7 @@ def raylet_ip_address(self): @property def address(self): """Get the address for bootstrapping, e.g. the address to pass to - `ray start` or `ray.int()` to start worker nodes, that has been + `ray start` or `ray.init()` to start worker nodes, that has been converted to ip:port format. """ return self._gcs_address From 5e9bd289c0f492721948aadcb5913dd0dde142cb Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Thu, 20 Apr 2023 16:10:19 -0700 Subject: [PATCH 039/424] [CI][Green-Ray][3] Extract error logs from ray logs (#34193) Currently there are a lot of test run instances where we fail to acquire logs (especially for infra-failure issues). This PR will fall back to query ray logs for error patterns if we fail to query the application logs. --- .../job_manager/anyscale_job_manager.py | 60 ++++++++++++++++++- .../ray_release/scripts/run_release_test.py | 1 - .../tests/test_anyscale_job_manager.py | 15 +++++ release/ray_release/util.py | 4 ++ 4 files changed, 76 insertions(+), 4 deletions(-) create mode 100644 release/ray_release/tests/test_anyscale_job_manager.py diff --git a/release/ray_release/job_manager/anyscale_job_manager.py b/release/ray_release/job_manager/anyscale_job_manager.py index 61561a596d47..1b605b412e84 100644 --- a/release/ray_release/job_manager/anyscale_job_manager.py +++ b/release/ray_release/job_manager/anyscale_job_manager.py @@ -1,6 +1,9 @@ import io import os import time +import subprocess +import tempfile +from collections import deque from contextlib import redirect_stdout, redirect_stderr, contextmanager from typing import Any, Dict, Optional, Tuple @@ -10,7 +13,6 @@ HaJobStates, ) from anyscale.controllers.job_controller import JobController, terminal_state - from ray_release.anyscale_util import LAST_LOGS_LENGTH, get_cluster_name from ray_release.cluster_manager.cluster_manager import ClusterManager from ray_release.exception import ( @@ -22,6 +24,7 @@ from ray_release.signal_handling import register_handler, unregister_handler from ray_release.util import ( ANYSCALE_HOST, + ERROR_LOG_PATTERNS, exponential_backoff_retry, anyscale_job_url, format_link, @@ -105,7 +108,7 @@ def last_job_result(self): def last_job_result(self, value): cluster_id = value.state.cluster_id # Set this only once. - if self._last_job_result is None and cluster_id: + if self.cluster_manager.cluster_id is None and cluster_id: self.cluster_manager.cluster_id = value.state.cluster_id self.cluster_manager.cluster_name = get_cluster_name( value.state.cluster_id, self.sdk @@ -259,6 +262,55 @@ def run_and_wait( ) return self._wait_job(timeout) + def _get_ray_error_logs(self) -> Optional[str]: + """ + Obtain any ray logs that contain keywords that indicate a crash, such as + ERROR or Traceback + """ + tmpdir = tempfile.mktemp() + try: + subprocess.check_output( + [ + "anyscale", + "logs", + "cluster", + "--id", + self.cluster_manager.cluster_id, + "--head-only", + "--download", + "--download-dir", + tmpdir, + ] + ) + except Exception as e: + logger.log(f"Failed to download logs from anyscale {e}") + return None + return AnyscaleJobManager._find_ray_error_logs(tmpdir) + + @staticmethod + def _find_ray_error_logs(tmpdir: str) -> Optional[str]: + # Ignored some ray files that do not crash ray despite having exceptions + ignored_ray_files = [ + "monitor.log", + "event_AUTOSCALER.log", + "event_JOBS.log", + ] + error_output = None + matched_pattern_count = 0 + for root, _, files in os.walk(tmpdir): + for file in files: + if file in ignored_ray_files: + continue + with open(os.path.join(root, file)) as lines: + output = "".join(deque(lines, maxlen=3 * LAST_LOGS_LENGTH)) + # favor error logs that match with the most number of error patterns + if ( + len([error for error in ERROR_LOG_PATTERNS if error in output]) + > matched_pattern_count + ): + error_output = output + return error_output + def get_last_logs(self): if not self.job_id: raise RuntimeError( @@ -280,7 +332,9 @@ def _get_logs(): ) print("", flush=True) output = buf.getvalue().strip() - assert "### Starting ###" in output, "No logs fetched" + if "### Starting ###" not in output: + output = self._get_ray_error_logs() + assert output, "No logs fetched" return "\n".join(output.splitlines()[-LAST_LOGS_LENGTH * 3 :]) ret = exponential_backoff_retry( diff --git a/release/ray_release/scripts/run_release_test.py b/release/ray_release/scripts/run_release_test.py index b259e5d3bfc9..449dee26557d 100644 --- a/release/ray_release/scripts/run_release_test.py +++ b/release/ray_release/scripts/run_release_test.py @@ -164,7 +164,6 @@ def main( except ReleaseTestError as e: logger.exception(e) return_code = e.exit_code.value - logger.info( f"Release test pipeline for test {test['name']} completed. " f"Returning with exit code = {return_code}" diff --git a/release/ray_release/tests/test_anyscale_job_manager.py b/release/ray_release/tests/test_anyscale_job_manager.py new file mode 100644 index 000000000000..528bd453be3e --- /dev/null +++ b/release/ray_release/tests/test_anyscale_job_manager.py @@ -0,0 +1,15 @@ +import tempfile +import os +from ray_release.util import ERROR_LOG_PATTERNS +from ray_release.job_manager.anyscale_job_manager import AnyscaleJobManager + + +def test_get_ray_error_logs(): + with tempfile.TemporaryDirectory() as tmpdir: + with open(os.path.join(tmpdir, "log01"), "w") as f: + f.writelines(ERROR_LOG_PATTERNS[:1]) + with open(os.path.join(tmpdir, "log02"), "w") as f: + f.writelines(ERROR_LOG_PATTERNS + ["haha"]) + assert AnyscaleJobManager._find_ray_error_logs(tmpdir) == "".join( + ERROR_LOG_PATTERNS + ["haha"] + ) diff --git a/release/ray_release/util.py b/release/ray_release/util.py index acfa27bda9b3..2a73f40418e7 100644 --- a/release/ray_release/util.py +++ b/release/ray_release/util.py @@ -28,6 +28,10 @@ def __str__(self): S3_CLOUD_STORAGE = "s3" GS_CLOUD_STORAGE = "gs" GS_BUCKET = "anyscale-oss-dev-bucket" +ERROR_LOG_PATTERNS = [ + "ERROR", + "Traceback (most recent call last)", +] def deep_update(d, u) -> Dict: From 77a914ca271c959b6486cd54d0ff746f809e331b Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Thu, 20 Apr 2023 16:59:39 -0700 Subject: [PATCH 040/424] [Data] [strict-mode] Remove internal TableRow abstractions and instead use Dict[str, Any] as the row format Signed-off-by: Eric Liang --- python/ray/data/_internal/table_block.py | 11 ++++++++--- python/ray/data/datastream.py | 12 ++++++------ python/ray/data/iterator.py | 10 +++++----- python/ray/data/tests/test_strict_mode.py | 11 +++++++++++ 4 files changed, 30 insertions(+), 14 deletions(-) diff --git a/python/ray/data/_internal/table_block.py b/python/ray/data/_internal/table_block.py index 809137f25fda..0971ecea6e1a 100644 --- a/python/ray/data/_internal/table_block.py +++ b/python/ray/data/_internal/table_block.py @@ -1,5 +1,5 @@ import collections -from typing import Dict, Iterator, List, Union, Any, TypeVar, TYPE_CHECKING +from typing import Dict, Iterator, List, Union, Any, TypeVar, Mapping, TYPE_CHECKING import numpy as np @@ -180,7 +180,8 @@ def is_tensor_wrapper(self) -> bool: return False return _is_tensor_schema(self.column_names()) - def iter_rows(self) -> Iterator[Union[TableRow, np.ndarray]]: + def iter_rows(self) -> Iterator[Union[Mapping, np.ndarray]]: + ctx = ray.data.DataContext.get_current() outer = self class Iter: @@ -193,7 +194,11 @@ def __iter__(self): def __next__(self): self._cur += 1 if self._cur < outer.num_rows(): - return outer._get_row(self._cur) + row = outer._get_row(self._cur) + if ctx.strict_mode and isinstance(row, TableRow): + return row.as_pydict() + else: + return row raise StopIteration return Iter() diff --git a/python/ray/data/datastream.py b/python/ray/data/datastream.py index f01ca22c0d0a..3240b29e3f1a 100644 --- a/python/ray/data/datastream.py +++ b/python/ray/data/datastream.py @@ -17,6 +17,7 @@ Optional, Tuple, Union, + Mapping, ) from uuid import uuid4 @@ -128,7 +129,6 @@ _wrap_arrow_serialization_workaround, ) from ray.data.random_access_dataset import RandomAccessDataset -from ray.data.row import TableRow from ray.types import ObjectRef from ray.util.annotations import DeveloperAPI, PublicAPI, Deprecated from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy @@ -3027,12 +3027,12 @@ def iterator(self) -> DataIterator: return DataIteratorImpl(self) @ConsumptionAPI - def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[Union[T, TableRow]]: + def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[Union[T, Mapping]]: """Return a local row iterator over the datastream. - If the datastream is a tabular datastream (Arrow/Pandas blocks), dict-like - mappings :py:class:`~ray.data.row.TableRow` are yielded for each row by the - iterator. If the datastream is not tabular, the raw row is yielded. + If the datastream is a tabular datastream (Arrow/Pandas blocks), dicts + are yielded for each row by the iterator. If the datastream is not tabular, + the raw row is yielded. Examples: >>> import ray @@ -4488,7 +4488,7 @@ def _build_multicolumn_aggs( on = [on] return [agg_cls(on_, *args, ignore_nulls=ignore_nulls, **kwargs) for on_ in on] - def _aggregate_result(self, result: Union[Tuple, TableRow]) -> U: + def _aggregate_result(self, result: Union[Tuple, Mapping]) -> U: if result is not None and len(result) == 1: if isinstance(result, tuple): return result[0] diff --git a/python/ray/data/iterator.py b/python/ray/data/iterator.py index f7841673cf03..4957512da212 100644 --- a/python/ray/data/iterator.py +++ b/python/ray/data/iterator.py @@ -11,12 +11,12 @@ Tuple, Union, Iterator, + Mapping, ) from ray.types import ObjectRef from ray.data.block import BlockAccessor, Block, BlockMetadata, DataBatch, T from ray.data.context import DataContext -from ray.data.row import TableRow from ray.util.annotations import PublicAPI from ray.data._internal.block_batching import batch_block_refs from ray.data._internal.block_batching.iter_batches import iter_batches @@ -200,12 +200,12 @@ def drop_metadata(block_iterator): if stats: stats.iter_total_s.add(time.perf_counter() - time_start) - def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[Union[T, TableRow]]: + def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[Union[T, Mapping]]: """Return a local row iterator over the datastream. - If the datastream is a tabular datastream (Arrow/Pandas blocks), dict-like - mappings :py:class:`~ray.data.row.TableRow` are yielded for each row by the - iterator. If the datastream is not tabular, the raw row is yielded. + If the datastream is a tabular datastream (Arrow/Pandas blocks), dicts + are yielded for each row by the iterator. If the datastream is not tabular, + the raw row is yielded. Examples: >>> import ray diff --git a/python/ray/data/tests/test_strict_mode.py b/python/ray/data/tests/test_strict_mode.py index 5d7920e41b01..100097c91f6b 100644 --- a/python/ray/data/tests/test_strict_mode.py +++ b/python/ray/data/tests/test_strict_mode.py @@ -182,6 +182,17 @@ def test_strict_schema(ray_start_regular_shared): assert isinstance(schema.base_schema, PandasBlockSchema) +def test_use_raw_dicts(ray_start_regular_shared): + assert type(ray.data.range(10).take(1)[0]) is dict + assert type(ray.data.from_items([1]).take(1)[0]) is dict + + def checker(x): + assert type(x) is dict + return x + + ray.data.range(10).map(checker).show() + + if __name__ == "__main__": import sys From 00979ddb433574803a44a97e3325038f322a354d Mon Sep 17 00:00:00 2001 From: matthewdeng Date: Thu, 20 Apr 2023 17:18:44 -0700 Subject: [PATCH 041/424] [train] Add AccelerateTrainer as valid AIR_TRAINER (#34639) Signed-off-by: Matthew Deng --- python/ray/air/_internal/usage.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/ray/air/_internal/usage.py b/python/ray/air/_internal/usage.py index 8d6068260eb9..fb9492dfd46e 100644 --- a/python/ray/air/_internal/usage.py +++ b/python/ray/air/_internal/usage.py @@ -8,6 +8,7 @@ from ray.tune.search import BasicVariantGenerator, Searcher AIR_TRAINERS = { + "AccelerateTrainer", "HorovodTrainer", "HuggingFaceTrainer", "LightGBMTrainer", From 3f6ce85a5a4e3cd06e0b39bb7b7bba74497d1cc5 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Thu, 20 Apr 2023 17:34:40 -0700 Subject: [PATCH 042/424] [data] Configure progress bars via DataContext --- python/ray/data/_internal/progress_bar.py | 20 +++++++++++--------- python/ray/data/context.py | 9 +++++++++ python/ray/data/dataset_pipeline.py | 3 +-- 3 files changed, 21 insertions(+), 11 deletions(-) diff --git a/python/ray/data/_internal/progress_bar.py b/python/ray/data/_internal/progress_bar.py index 5cc9c6562544..c6e376de9b07 100644 --- a/python/ray/data/_internal/progress_bar.py +++ b/python/ray/data/_internal/progress_bar.py @@ -1,8 +1,7 @@ import threading -from typing import Any, List +from typing import Any, List, Optional import ray -from ray._private.ray_constants import env_integer from ray.experimental import tqdm_ray from ray.types import ObjectRef from ray.util.annotations import PublicAPI @@ -15,9 +14,6 @@ tqdm = None needs_warning = True -# Whether progress bars are enabled in this thread. -_enabled = not bool(env_integer("RAY_DATA_DISABLE_PROGRESS_BARS", 0)) - # Used a signal to cancel execution. _canceled_threads = set() _canceled_threads_lock = threading.Lock() @@ -35,9 +31,11 @@ def set_progress_bars(enabled: bool) -> bool: Returns: Whether progress bars were previously enabled. """ - global _enabled - old_value = _enabled - _enabled = enabled + from ray.data import DataContext + + ctx = DataContext.get_current() + old_value = ctx.enable_progress_bars + ctx.enable_progress_bars = enabled return old_value @@ -45,9 +43,13 @@ class ProgressBar: """Thin wrapper around tqdm to handle soft imports.""" def __init__( - self, name: str, total: int, position: int = 0, enabled: bool = _enabled + self, name: str, total: int, position: int = 0, enabled: Optional[bool] = None ): self._desc = name + if enabled is None: + from ray.data import DataContext + + enabled = DataContext.get_current().enable_progress_bars if not enabled: self._bar = None elif tqdm: diff --git a/python/ray/data/context.py b/python/ray/data/context.py index 651a10de7c3c..15b92dd0d5ab 100644 --- a/python/ray/data/context.py +++ b/python/ray/data/context.py @@ -2,6 +2,7 @@ import threading from typing import Optional, TYPE_CHECKING +from ray._private.ray_constants import env_integer from ray.util.annotations import DeveloperAPI from ray.util.scheduling_strategies import SchedulingStrategyT @@ -120,6 +121,11 @@ # Default batch size for batch transformations. DEFAULT_BATCH_SIZE = 4096 +# Whether to enable progress bars. +DEFAULT_ENABLE_PROGRESS_BARS = not bool( + env_integer("RAY_DATA_DISABLE_PROGRESS_BARS", 0) +) + @DeveloperAPI class DataContext: @@ -158,6 +164,7 @@ def __init__( use_ray_tqdm: bool, use_legacy_iter_batches: bool, strict_mode: bool, + enable_progress_bars: bool, ): """Private constructor (use get_current() instead).""" self.block_splitting_enabled = block_splitting_enabled @@ -190,6 +197,7 @@ def __init__( self.use_ray_tqdm = use_ray_tqdm self.use_legacy_iter_batches = use_legacy_iter_batches self.strict_mode = strict_mode + self.enable_progress_bars = enable_progress_bars @staticmethod def get_current() -> "DataContext": @@ -238,6 +246,7 @@ def get_current() -> "DataContext": use_ray_tqdm=DEFAULT_USE_RAY_TQDM, use_legacy_iter_batches=DEFAULT_USE_LEGACY_ITER_BATCHES, strict_mode=DEFAULT_STRICT_MODE, + enable_progress_bars=DEFAULT_ENABLE_PROGRESS_BARS, ) return _default_context diff --git a/python/ray/data/dataset_pipeline.py b/python/ray/data/dataset_pipeline.py index 1d3e2bb54151..88f69634cb2f 100644 --- a/python/ray/data/dataset_pipeline.py +++ b/python/ray/data/dataset_pipeline.py @@ -20,7 +20,6 @@ import ray from ray.air.util.data_batch_conversion import BlockFormat -from ray.data._internal import progress_bar from ray.data._internal.block_batching import batch_block_refs from ray.data._internal.block_list import BlockList from ray.data._internal.compute import ComputeStrategy @@ -95,7 +94,7 @@ def __init__( base_iterable: Iterable[Callable[[], Datastream[T]]], stages: List[Callable[[Datastream[Any]], Datastream[Any]]] = None, length: Optional[int] = None, - progress_bars: bool = progress_bar._enabled, + progress_bars: bool = DataContext.get_current().enable_progress_bars, _executed: List[bool] = None, ): """Construct a DatasetPipeline (internal API). From ca237a8f06c84d30ed627b8ba6c434e7049d9b8a Mon Sep 17 00:00:00 2001 From: Chen Shen Date: Thu, 20 Apr 2023 18:48:34 -0700 Subject: [PATCH 043/424] [CI] disable flaky test test_run_on_all_workers (#34647) --- python/ray/tests/test_basic_5.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/ray/tests/test_basic_5.py b/python/ray/tests/test_basic_5.py index 0d88f03ce0b5..ffdeb6cf20b5 100644 --- a/python/ray/tests/test_basic_5.py +++ b/python/ray/tests/test_basic_5.py @@ -145,7 +145,7 @@ def pid(self): assert "Traceback" not in log -@pytest.mark.skipif(sys.platform == "win32", reason="Flaky on windows") +@pytest.mark.skip("flaky test") def test_run_on_all_workers(call_ray_start, tmp_path): # This test is to ensure run_function_on_all_workers are executed # on all workers. From 622c8dc63c82380501b4e658aa879caf25ed0452 Mon Sep 17 00:00:00 2001 From: Chen Shen Date: Thu, 20 Apr 2023 18:49:37 -0700 Subject: [PATCH 044/424] Revert "[core]Turn on light weight resource broadcasting. (#32625)" (#34636) This reverts commit 1bfbc46dcfd09cf85115684e0675b800c7f86368. --- src/ray/common/ray_config_def.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ray/common/ray_config_def.h b/src/ray/common/ray_config_def.h index 674c69ed4eeb..0188646292cc 100644 --- a/src/ray/common/ray_config_def.h +++ b/src/ray/common/ray_config_def.h @@ -441,7 +441,7 @@ RAY_CONFIG(uint64_t, gcs_grpc_max_request_queued_max_bytes, 1024UL * 1024 * 1024 RAY_CONFIG(int32_t, gcs_client_check_connection_status_interval_milliseconds, 1000) /// Feature flag to use the ray syncer for resource synchronization -RAY_CONFIG(bool, use_ray_syncer, true) +RAY_CONFIG(bool, use_ray_syncer, false) /// Due to the protocol drawback, raylet needs to refresh the message if /// no message is received for a while. /// Refer to https://tinyurl.com/n6kvsp87 for more details From e3575cc98542fe63857e3d1d7982db640cf38bab Mon Sep 17 00:00:00 2001 From: angelinalg <122562471+angelinalg@users.noreply.github.com> Date: Thu, 20 Apr 2023 20:36:11 -0700 Subject: [PATCH 045/424] [docs] replace tune.report with session.report (#34435) --- python/ray/tune/tests/tutorial.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/ray/tune/tests/tutorial.py b/python/ray/tune/tests/tutorial.py index ceb48366334f..7016ff18be92 100644 --- a/python/ray/tune/tests/tutorial.py +++ b/python/ray/tune/tests/tutorial.py @@ -12,6 +12,7 @@ import torch.nn.functional as F from ray import air, tune +from ray.air import session from ray.tune.schedulers import ASHAScheduler # __tutorial_imports_end__ # fmt: on @@ -105,7 +106,7 @@ def train_mnist(config): acc = test(model, test_loader) # Send the current training result back to Tune - tune.report(mean_accuracy=acc) + session.report(mean_accuracy=acc) if i % 5 == 0: # This saves the model to the trial directory From b6b84d4dea18926593b0569e97581833b606551a Mon Sep 17 00:00:00 2001 From: Chen Shen Date: Thu, 20 Apr 2023 21:41:15 -0700 Subject: [PATCH 046/424] [Ci] fix pip version to deflake minimal install 3.10 see if the test failure is caused by pip version upgrade --- ci/env/install-dependencies.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/env/install-dependencies.sh b/ci/env/install-dependencies.sh index 6bf7e4959450..ec15821091de 100755 --- a/ci/env/install-dependencies.sh +++ b/ci/env/install-dependencies.sh @@ -233,7 +233,7 @@ install_upgrade_pip() { fi if "${python}" -m pip --version || "${python}" -m ensurepip; then # Configure pip if present - "${python}" -m pip install --upgrade pip + "${python}" -m pip install --upgrade "pip!=23.1" # If we're in a CI environment, do some configuration if [ "${CI-}" = true ]; then From b4c20c2fef029e9484873b94dde110bf84543d0d Mon Sep 17 00:00:00 2001 From: Chen Shen Date: Thu, 20 Apr 2023 23:36:23 -0700 Subject: [PATCH 047/424] [CI] fix virtualenv version to deflake linux://python/ray/tests:test_runtime_env_complicated (#34650) Looks the virtualenv has been upgraded between the success and failed test. --- python/requirements.txt | 2 +- python/setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python/requirements.txt b/python/requirements.txt index fb0ea7d1698d..e8227a403996 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -18,7 +18,7 @@ pyyaml aiosignal frozenlist requests -virtualenv>=20.0.24 +virtualenv>=20.0.24, < 20.21.1 # Python version-specific requirements dataclasses; python_version < '3.7' diff --git a/python/setup.py b/python/setup.py index 6cb8a41be1d5..0aa569b5ef63 100644 --- a/python/setup.py +++ b/python/setup.py @@ -330,7 +330,7 @@ def get_packages(self): # Light weight requirement, can be replaced with "typing" once # we deprecate Python 3.7 (this will take a while). "typing_extensions; python_version < '3.8'", - "virtualenv>=20.0.24", # For pip runtime env. + "virtualenv >=20.0.24, < 20.21.1", # For pip runtime env. ] From cb8572751c9bcf63a14df36fc018b9322aad4b8e Mon Sep 17 00:00:00 2001 From: SangBin Cho Date: Fri, 21 Apr 2023 16:17:27 +0900 Subject: [PATCH 048/424] [Syncer] Remove spammy logs. (#34654) --- src/ray/common/ray_syncer/ray_syncer.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/ray/common/ray_syncer/ray_syncer.cc b/src/ray/common/ray_syncer/ray_syncer.cc index 3f56ffc94550..1ebbb1793e23 100644 --- a/src/ray/common/ray_syncer/ray_syncer.cc +++ b/src/ray/common/ray_syncer/ray_syncer.cc @@ -208,8 +208,9 @@ void RaySyncer::Connect(const std::string &node_id, [this, channel](const std::string &node_id, bool restart) { sync_reactors_.erase(node_id); if (restart) { - RAY_LOG(INFO) << "Connection is broken. Reconnect to node: " - << NodeID::FromBinary(node_id); + RAY_LOG_EVERY_MS(INFO, 10 * 1000) + << "Connection is broken. Reconnect to node: " + << NodeID::FromBinary(node_id); Connect(node_id, channel); } }, From ea408a3e9e8d6c0ea7ebcdb9e6afeedaa6914c04 Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Fri, 21 Apr 2023 13:06:45 +0100 Subject: [PATCH 049/424] [ci/release] GCE variants for Alpa, Golden notebooks, Lightning, Horovod, Workspace templates (#34565) Signed-off-by: Kai Fricke --- .../configs/compute/cpu/gcp_large.yaml | 6 + .../configs/compute/cpu/gcp_small.yaml | 3 + .../configs/compute/gpu/gcp_large.yaml | 6 + .../configs/compute/gpu/gcp_small.yaml | 4 + ...p3_16xlarge.yaml => gpu_1x8_v100_aws.yaml} | 0 release/alpa_tests/gpu_1x8_v100_gce.yaml | 20 ++++ ...g4dn_12xlarge.yaml => gpu_2x4_t4_aws.yaml} | 0 release/alpa_tests/gpu_2x4_t4_gce.yaml | 25 +++++ release/alpa_tests/run_inference_opt_30b.sh | 34 +++++- release/alpa_tests/run_train_opt_2_7b.sh | 41 ++++++- .../{gpu_tpl.yaml => gpu_tpl_aws.yaml} | 0 .../golden_notebook_tests/gpu_tpl_gce.yaml | 17 +++ ...{compute_tpl.yaml => compute_tpl_aws.yaml} | 0 release/lightning_tests/compute_tpl_gce.yaml | 24 ++++ release/ray_release/buildkite/concurrency.py | 1 + release/ray_release/config.py | 20 ++++ release/ray_release/template.py | 7 +- release/release_tests.yaml | 106 ++++++++++++++++-- ...{compute_tpl.yaml => compute_tpl_aws.yaml} | 0 .../train_tests/horovod/compute_tpl_gce.yaml | 25 +++++ 20 files changed, 325 insertions(+), 14 deletions(-) rename release/alpa_tests/{1_p3_16xlarge.yaml => gpu_1x8_v100_aws.yaml} (100%) create mode 100644 release/alpa_tests/gpu_1x8_v100_gce.yaml rename release/alpa_tests/{2_g4dn_12xlarge.yaml => gpu_2x4_t4_aws.yaml} (100%) create mode 100644 release/alpa_tests/gpu_2x4_t4_gce.yaml rename release/golden_notebook_tests/{gpu_tpl.yaml => gpu_tpl_aws.yaml} (100%) create mode 100644 release/golden_notebook_tests/gpu_tpl_gce.yaml rename release/lightning_tests/{compute_tpl.yaml => compute_tpl_aws.yaml} (100%) create mode 100644 release/lightning_tests/compute_tpl_gce.yaml rename release/train_tests/horovod/{compute_tpl.yaml => compute_tpl_aws.yaml} (100%) create mode 100644 release/train_tests/horovod/compute_tpl_gce.yaml diff --git a/doc/source/templates/configs/compute/cpu/gcp_large.yaml b/doc/source/templates/configs/compute/cpu/gcp_large.yaml index c31a8a1dadb2..918b5ccd349f 100644 --- a/doc/source/templates/configs/compute/cpu/gcp_large.yaml +++ b/doc/source/templates/configs/compute/cpu/gcp_large.yaml @@ -1,3 +1,9 @@ +cloud_id: {{ env["ANYSCALE_CLOUD_ID"] }} + +region: us-west1 +allowed_azs: + - us-west1-b + # 8 n2-standard-8 nodes --> 64 CPUs head_node_type: name: head_node_type diff --git a/doc/source/templates/configs/compute/cpu/gcp_small.yaml b/doc/source/templates/configs/compute/cpu/gcp_small.yaml index 4ea97f92161b..b5ad8b29c217 100644 --- a/doc/source/templates/configs/compute/cpu/gcp_small.yaml +++ b/doc/source/templates/configs/compute/cpu/gcp_small.yaml @@ -1,5 +1,8 @@ cloud_id: {{ env["ANYSCALE_CLOUD_ID"] }} + region: us-west1 +allowed_azs: + - us-west1-b # 1 n1-standard-8 node --> 8 CPUs head_node_type: diff --git a/doc/source/templates/configs/compute/gpu/gcp_large.yaml b/doc/source/templates/configs/compute/gpu/gcp_large.yaml index 0de76bd875a2..146d633d57b3 100644 --- a/doc/source/templates/configs/compute/gpu/gcp_large.yaml +++ b/doc/source/templates/configs/compute/gpu/gcp_large.yaml @@ -1,3 +1,9 @@ +cloud_id: {{ env["ANYSCALE_CLOUD_ID"] }} + +region: us-west1 +allowed_azs: + - us-west1-b + # 4 n1-standard-16-nvidia-tesla-t4-1 nodes --> 64 CPUs, 4 GPUs head_node_type: name: head_node_type diff --git a/doc/source/templates/configs/compute/gpu/gcp_small.yaml b/doc/source/templates/configs/compute/gpu/gcp_small.yaml index 9fddd463b62b..b57d71bfe75e 100644 --- a/doc/source/templates/configs/compute/gpu/gcp_small.yaml +++ b/doc/source/templates/configs/compute/gpu/gcp_small.yaml @@ -1,5 +1,9 @@ cloud_id: {{ env["ANYSCALE_CLOUD_ID"] }} + region: us-west1 +allowed_azs: + - us-west1-b + # 1 n1-standard-16-nvidia-tesla-t4-1 node --> 16 CPUs, 1 GPU head_node_type: diff --git a/release/alpa_tests/1_p3_16xlarge.yaml b/release/alpa_tests/gpu_1x8_v100_aws.yaml similarity index 100% rename from release/alpa_tests/1_p3_16xlarge.yaml rename to release/alpa_tests/gpu_1x8_v100_aws.yaml diff --git a/release/alpa_tests/gpu_1x8_v100_gce.yaml b/release/alpa_tests/gpu_1x8_v100_gce.yaml new file mode 100644 index 000000000000..4d5a6a692a70 --- /dev/null +++ b/release/alpa_tests/gpu_1x8_v100_gce.yaml @@ -0,0 +1,20 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west1 +allowed_azs: + - us-west1-b + +max_workers: 1 + +head_node_type: + name: head_node + instance_type: n1-highmem-64-nvidia-tesla-v100-8 + +worker_node_types: [] + +gcp_advanced_configurations_json: + instance_properties: + disks: + - boot: true + auto_delete: true + initialize_params: + disk_size_gb: 500 diff --git a/release/alpa_tests/2_g4dn_12xlarge.yaml b/release/alpa_tests/gpu_2x4_t4_aws.yaml similarity index 100% rename from release/alpa_tests/2_g4dn_12xlarge.yaml rename to release/alpa_tests/gpu_2x4_t4_aws.yaml diff --git a/release/alpa_tests/gpu_2x4_t4_gce.yaml b/release/alpa_tests/gpu_2x4_t4_gce.yaml new file mode 100644 index 000000000000..c831b68a400f --- /dev/null +++ b/release/alpa_tests/gpu_2x4_t4_gce.yaml @@ -0,0 +1,25 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west1 +allowed_azs: + - us-west1-b + +max_workers: 2 + +head_node_type: + name: head_node + instance_type: n1-standard-64-nvidia-tesla-t4-4 + +worker_node_types: + - name: worker_node + instance_type: n1-standard-64-nvidia-tesla-t4-4 + min_workers: 1 + max_workers: 1 + use_spot: false + +gcp_advanced_configurations_json: + instance_properties: + disks: + - boot: true + auto_delete: true + initialize_params: + disk_size_gb: 500 diff --git a/release/alpa_tests/run_inference_opt_30b.sh b/release/alpa_tests/run_inference_opt_30b.sh index 24d5cc261450..2430fcf78da0 100755 --- a/release/alpa_tests/run_inference_opt_30b.sh +++ b/release/alpa_tests/run_inference_opt_30b.sh @@ -3,9 +3,35 @@ # Integration test for Alpa and Ray. # Exit if any of the test commands fail. -set -x -e pipeline +set -x -e -o pipefail + +# Parse command line args +STORAGE_PROVIDER="aws" + +while [[ $# -gt 0 ]] +do + key="$1" + case $key in + --storage) + STORAGE_PROVIDER="$2" + shift + shift + ;; + *) # Unknown option + echo "Unknown option: $1" + exit 1 + ;; + esac +done + +if [ "$STORAGE_PROVIDER" != "aws" ] && [ "$STORAGE_PROVIDER" != "gcs" ]; then + echo "Invalid storage provider: $STORAGE_PROVIDER" + exit 1 +fi S3_MODEL_DIR=s3://air-example-data-2/alpa/opt/models/models--facebook--opt-30b/ +GS_MODEL_DIR=gs://air-example-data/alpa/opt/models/models--facebook--opt-30b/ + LOCAL_MODEL_DIR=/tmp/opt-30b/ mkdir -p $LOCAL_MODEL_DIR @@ -13,7 +39,11 @@ mkdir -p $LOCAL_MODEL_DIR # Download weights and tokenizer. Excluding the original # FLAX weights. We only need the alpa converted np weights # for this test. -aws s3 sync $S3_MODEL_DIR $LOCAL_MODEL_DIR --exclude="*.msgpack" +if [ "$STORAGE_PROVIDER" = "aws" ]; then + aws s3 sync $S3_MODEL_DIR $LOCAL_MODEL_DIR --exclude="*.msgpack" +else + gsutil rsync -r -x ".*\.msgpack" $GS_MODEL_DIR $LOCAL_MODEL_DIR +fi # Run training. python inference_opt_30b.py --model_dir $LOCAL_MODEL_DIR diff --git a/release/alpa_tests/run_train_opt_2_7b.sh b/release/alpa_tests/run_train_opt_2_7b.sh index 96885235bad0..90e43a94d2dc 100755 --- a/release/alpa_tests/run_train_opt_2_7b.sh +++ b/release/alpa_tests/run_train_opt_2_7b.sh @@ -3,19 +3,54 @@ # Integration test for Alpa and Ray. # Exit if any of the test commands fail. -set -x -e pipeline +set -x -e -o pipefail -TRAIN_FILE=https://air-example-data-2.s3.us-west-2.amazonaws.com/alpa/alllines.txt +# Parse command line args +STORAGE_PROVIDER="aws" + +while [[ $# -gt 0 ]] +do + key="$1" + case $key in + --storage) + STORAGE_PROVIDER="$2" + shift + shift + ;; + *) # Unknown option + echo "Unknown option: $1" + exit 1 + ;; + esac +done + +if [ "$STORAGE_PROVIDER" != "aws" ] && [ "$STORAGE_PROVIDER" != "gcs" ]; then + echo "Invalid storage provider: $STORAGE_PROVIDER" + exit 1 +fi + +S3_TRAIN_FILE="https://air-example-data-2.s3.us-west-2.amazonaws.com/alpa/alllines.txt" S3_MODEL_DIR=s3://air-example-data-2/alpa/opt/models/models--facebook--opt-2.7b/ + +GS_TRAIN_FILE="https://storage.googleapis.com/air-example-data/alpa/alllines.txt" +GS_MODEL_DIR=gs://air-example-data/alpa/opt/models/models--facebook--opt-2.7b/ + LOCAL_MODEL_DIR=/tmp/opt-2.7b/ OUTPUT_DIR=/tmp/alpa_outputs/ mkdir -p $LOCAL_MODEL_DIR mkdir -p $OUTPUT_DIR + # Download weights and tokenizer. # We only need the FLAX weights to run this test. -aws s3 sync $S3_MODEL_DIR $LOCAL_MODEL_DIR --exclude="*.bin,*.h5" +if [ "$STORAGE_PROVIDER" = "aws" ]; then + aws s3 sync $S3_MODEL_DIR $LOCAL_MODEL_DIR --exclude="*.bin,*.h5" + TRAIN_FILE=$S3_TRAIN_FILE +else + gsutil rsync -r -x ".*\.bin|.*\.h5" $GS_MODEL_DIR $LOCAL_MODEL_DIR + TRAIN_FILE=$GS_TRAIN_FILE +fi # Run training. # 2 instances, 4 GPUs each. So set the pipeline parallelism to 2, diff --git a/release/golden_notebook_tests/gpu_tpl.yaml b/release/golden_notebook_tests/gpu_tpl_aws.yaml similarity index 100% rename from release/golden_notebook_tests/gpu_tpl.yaml rename to release/golden_notebook_tests/gpu_tpl_aws.yaml diff --git a/release/golden_notebook_tests/gpu_tpl_gce.yaml b/release/golden_notebook_tests/gpu_tpl_gce.yaml new file mode 100644 index 000000000000..ff65fb25f5fd --- /dev/null +++ b/release/golden_notebook_tests/gpu_tpl_gce.yaml @@ -0,0 +1,17 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west1 +allowed_azs: + - us-west1-b + +max_workers: 2 + +head_node_type: + name: head_node + instance_type: n1-standard-4 + +worker_node_types: + - name: worker_node + instance_type: n1-standard-32-nvidia-tesla-t4-2 + min_workers: 2 + max_workers: 2 + use_spot: true diff --git a/release/lightning_tests/compute_tpl.yaml b/release/lightning_tests/compute_tpl_aws.yaml similarity index 100% rename from release/lightning_tests/compute_tpl.yaml rename to release/lightning_tests/compute_tpl_aws.yaml diff --git a/release/lightning_tests/compute_tpl_gce.yaml b/release/lightning_tests/compute_tpl_gce.yaml new file mode 100644 index 000000000000..22697c374da9 --- /dev/null +++ b/release/lightning_tests/compute_tpl_gce.yaml @@ -0,0 +1,24 @@ +# 3 x g4dn.4xlarge = 48 cpus + 3 gpus total + +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west1 +allowed_azs: + - us-west1-b + +head_node_type: + name: head_node + instance_type: n1-standard-16-nvidia-tesla-t4-1 + +worker_node_types: + - name: worker_node + instance_type: n1-standard-16-nvidia-tesla-t4-1 + min_workers: 2 + max_workers: 2 + use_spot: false + +#aws: +# TagSpecifications: +# - ResourceType: "instance" +# Tags: +# - Key: ttl-hours +# Value: '24' diff --git a/release/ray_release/buildkite/concurrency.py b/release/ray_release/buildkite/concurrency.py index 185580505d65..5f7e21b446b4 100644 --- a/release/ray_release/buildkite/concurrency.py +++ b/release/ray_release/buildkite/concurrency.py @@ -61,6 +61,7 @@ "n1-standard-16-nvidia-tesla-t4-1": (16, 1), "n1-standard-64-nvidia-tesla-t4-4": (64, 4), "n1-standard-32-nvidia-tesla-t4-2": (32, 2), + "n1-highmem-64-nvidia-tesla-v100-8": {64, 8}, "n1-highmem-96-nvidia-tesla-v100-8": {96, 8}, } diff --git a/release/ray_release/config.py b/release/ray_release/config.py index 8697f992d7d0..02a0422d148a 100644 --- a/release/ray_release/config.py +++ b/release/ray_release/config.py @@ -135,6 +135,13 @@ def validate_release_test_collection( ) num_errors += 1 + error = validate_test_cluster_env(test) + if error: + logger.error( + f"Failed to validate test {test.get('name', '(unnamed)')}: {error}" + ) + num_errors += 1 + if num_errors > 0: raise ReleaseTestConfigError( f"Release test configuration error: Found {num_errors} test " @@ -180,6 +187,19 @@ def validate_cluster_compute(cluster_compute: Dict[str, Any]) -> Optional[str]: return None +def validate_test_cluster_env(test: Test) -> Optional[str]: + from ray_release.template import get_cluster_env_path + + cluster_env_path = get_cluster_env_path(test) + + if not os.path.exists(cluster_env_path): + raise ReleaseTestConfigError( + f"Cannot load yaml template from {cluster_env_path}: Path not found." + ) + + return None + + def validate_aws_config(aws_config: Dict[str, Any]) -> Optional[str]: for block_device_mapping in aws_config.get("BlockDeviceMappings", []): ebs = block_device_mapping.get("Ebs") diff --git a/release/ray_release/template.py b/release/ray_release/template.py index edb81d444607..8444d4970d7f 100644 --- a/release/ray_release/template.py +++ b/release/ray_release/template.py @@ -98,11 +98,16 @@ def render_yaml_template(template: str, env: Optional[Dict] = None): ) from e -def load_test_cluster_env(test: "Test", ray_wheels_url: str) -> Optional[Dict]: +def get_cluster_env_path(test: "Test") -> str: cluster_env_file = test["cluster"]["cluster_env"] cluster_env_path = os.path.join( RELEASE_PACKAGE_DIR, test.get("working_dir", ""), cluster_env_file ) + return cluster_env_path + + +def load_test_cluster_env(test: "Test", ray_wheels_url: str) -> Optional[Dict]: + cluster_env_path = get_cluster_env_path(test) env = populate_cluster_env_variables(test, ray_wheels_url=ray_wheels_url) diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 3a86d897510a..22cf106ae73c 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -849,6 +849,15 @@ script: jupyter nbconvert --TagRemovePreprocessor.remove_input_tags='large' --to script --output _test batch_inference.ipynb && ipython _test.py + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: ../configs/release_test_cluster_env.yaml + cluster_compute: ../configs/compute/gpu/gcp_small.yaml + - name: workspace_template_small_02_many_model_training group: Workspace templates @@ -866,6 +875,14 @@ && jupyter nbconvert --TagRemovePreprocessor.remove_input_tags='large' --to script --output _test many_model_training.ipynb && ipython _test.py + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: ../configs/release_test_cluster_env.yaml + cluster_compute: ../configs/compute/cpu/gcp_small.yaml - name: workspace_template_small_03_serving_stable_diffusion group: Workspace templates @@ -883,6 +900,14 @@ && jupyter nbconvert --TagRemovePreprocessor.remove_input_tags='large' --to script --output _test serving_stable_diffusion.ipynb && ipython _test.py + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: ../configs/release_test_cluster_env.yaml + cluster_compute: ../configs/compute/gpu/gcp_small.yaml ####################### # XGBoost release tests @@ -1362,7 +1387,7 @@ cluster: cluster_env: app_config.yaml - cluster_compute: compute_tpl.yaml + cluster_compute: compute_tpl_aws.yaml run: timeout: 1200 @@ -1370,6 +1395,15 @@ wait_for_nodes: num_nodes: 3 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: compute_tpl_gce.yaml + alert: default - name: lightning_gpu_tune_3x16_3x1 @@ -1381,7 +1415,7 @@ cluster: cluster_env: app_config.yaml - cluster_compute: compute_tpl.yaml + cluster_compute: compute_tpl_aws.yaml run: timeout: 1200 @@ -1389,6 +1423,15 @@ wait_for_nodes: num_nodes: 3 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: compute_tpl_gce.yaml + alert: default ####################### @@ -2100,7 +2143,7 @@ cluster: cluster_env: torch_tune_serve_app_config.yaml - cluster_compute: gpu_tpl.yaml + cluster_compute: gpu_tpl_aws.yaml run: timeout: 600 @@ -2108,6 +2151,15 @@ wait_for_nodes: num_nodes: 2 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: torch_tune_serve_app_config.yaml + cluster_compute: gpu_tpl_gce.yaml + alert: default @@ -3211,7 +3263,7 @@ cluster: cluster_env: app_config.yaml - cluster_compute: compute_tpl.yaml + cluster_compute: compute_tpl_aws.yaml run: timeout: 3000 @@ -3220,6 +3272,14 @@ wait_for_nodes: num_nodes: 2 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: compute_tpl_gce.yaml alert: default @@ -3236,15 +3296,30 @@ cluster: cluster_env: app_config.yaml - cluster_compute: 2_g4dn_12xlarge.yaml + cluster_compute: gpu_2x4_t4_aws.yaml run: timeout: 3600 - script: bash run_train_opt_2_7b.sh + script: bash run_train_opt_2_7b.sh --storage aws wait_for_nodes: num_nodes: 2 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: gpu_2x4_t4_gce.yaml + run: + timeout: 3600 + script: bash run_train_opt_2_7b.sh --storage gcs + + wait_for_nodes: + num_nodes: 2 + alert: default - name: alpa_opt_30b_inference @@ -3256,15 +3331,30 @@ cluster: cluster_env: app_config.yaml - cluster_compute: 1_p3_16xlarge.yaml + cluster_compute: gpu_1x8_v100_aws.yaml run: timeout: 3600 - script: bash run_inference_opt_30b.sh + script: bash run_inference_opt_30b.sh --storage aws wait_for_nodes: num_nodes: 1 + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: gpu_1x8_v100_gce.yaml + run: + timeout: 3600 + script: bash run_inference_opt_30b.sh --storage gcs + + wait_for_nodes: + num_nodes: 1 + alert: default ######################## diff --git a/release/train_tests/horovod/compute_tpl.yaml b/release/train_tests/horovod/compute_tpl_aws.yaml similarity index 100% rename from release/train_tests/horovod/compute_tpl.yaml rename to release/train_tests/horovod/compute_tpl_aws.yaml diff --git a/release/train_tests/horovod/compute_tpl_gce.yaml b/release/train_tests/horovod/compute_tpl_gce.yaml new file mode 100644 index 000000000000..31730aac6e79 --- /dev/null +++ b/release/train_tests/horovod/compute_tpl_gce.yaml @@ -0,0 +1,25 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west1 +allowed_azs: + - us-west1-b + +max_workers: 1 + +head_node_type: + name: head_node + # 4 cpus, 16G mem, $0.224/hr on demand + instance_type: n1-standard-4 + +worker_node_types: + - name: worker_node + instance_type: n1-standard-4 + max_workers: 1 + min_workers: 1 + use_spot: false + +#aws: +# TagSpecifications: +# - ResourceType: "instance" +# Tags: +# - Key: ttl-hours +# Value: '24' From a2f765f82a1c235afa48658bcb08eef596cf411e Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Fri, 21 Apr 2023 13:07:03 +0100 Subject: [PATCH 050/424] [docs][tune] Fix Tune tutorial (#34660) One line fix for bug introduced in #34435 Signed-off-by: Kai Fricke --- python/ray/tune/tests/tutorial.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/ray/tune/tests/tutorial.py b/python/ray/tune/tests/tutorial.py index 7016ff18be92..d3b217ca7f8f 100644 --- a/python/ray/tune/tests/tutorial.py +++ b/python/ray/tune/tests/tutorial.py @@ -106,7 +106,7 @@ def train_mnist(config): acc = test(model, test_loader) # Send the current training result back to Tune - session.report(mean_accuracy=acc) + session.report({"mean_accuracy": acc}) if i % 5 == 0: # This saves the model to the trial directory From 563bf914f7a43d1d152c0301afa20bf4a8a6897d Mon Sep 17 00:00:00 2001 From: Daniel Yeh <46629671+Dan-Yeh@users.noreply.github.com> Date: Fri, 21 Apr 2023 15:57:25 +0200 Subject: [PATCH 051/424] [Autoscaler][gcp] parallel terminate nodes (#34455) Why are these changes needed? ray down takes a lot of time when using GCPNodeProvider as stated in #26239 because GCPNodeProvider uses the serial implementation of terminate_nodes from parent class NodeProvider and also uses a coarse lock in its terminate_node which prevents executing it in a concurrent fashion (not really sure coz I'm new to this). add threadpoolexecutor in GCPNodeProvider.terminate_nodes for parallelization execution of terminate_node use fine-grained locks which assign one RLock per node_id add unit_tests why not go with the suggestions(batch apis and non-blocking version of terminate_node) mentioned in #26239? As a novice, I think both solutions would break Liskov Substitute Principle, and also for those who already used terminate_node(s) would need to add await. Related issue number #26239 --------- Signed-off-by: Chen-Chen Yeh Co-authored-by: Chen-Chen Yeh --- .../autoscaler/_private/gcp/node_provider.py | 44 +++++++++++++------ .../ray/tests/gcp/test_gcp_node_provider.py | 27 ++++++++++++ 2 files changed, 57 insertions(+), 14 deletions(-) diff --git a/python/ray/autoscaler/_private/gcp/node_provider.py b/python/ray/autoscaler/_private/gcp/node_provider.py index c45a762a5daa..f8a5d684d870 100644 --- a/python/ray/autoscaler/_private/gcp/node_provider.py +++ b/python/ray/autoscaler/_private/gcp/node_provider.py @@ -1,3 +1,4 @@ +import concurrent.futures import logging import time from functools import wraps @@ -179,23 +180,38 @@ def create_node(self, base_config: dict, tags: dict, count: int) -> Dict[str, di ) # type: List[Tuple[dict, str]] return {instance_id: result for result, instance_id in results} + def _thread_unsafe_terminate_node(self, node_id: str): + # Assumes the global lock is held for the duration of this operation. + # The lock may be held by a different thread if in `terminate_nodes()` case. + logger.info("NodeProvider: {}: Terminating node".format(node_id)) + resource = self._get_resource_depending_on_node_name(node_id) + try: + result = resource.delete_instance( + node_id=node_id, + ) + except googleapiclient.errors.HttpError as http_error: + if http_error.resp.status == 404: + logger.warning( + f"Tried to delete the node with id {node_id} " + "but it was already gone." + ) + else: + raise http_error from None + return result + @_retry def terminate_node(self, node_id: str): with self.lock: - resource = self._get_resource_depending_on_node_name(node_id) - try: - result = resource.delete_instance( - node_id=node_id, - ) - except googleapiclient.errors.HttpError as http_error: - if http_error.resp.status == 404: - logger.warning( - f"Tried to delete the node with id {node_id} " - "but it was already gone." - ) - else: - raise http_error from None - return result + self._thread_unsafe_terminate_node(node_id) + + def terminate_nodes(self, node_ids: List[str]): + if not node_ids: + return None + + with self.lock, concurrent.futures.ThreadPoolExecutor() as executor: + result = executor.map(self._thread_unsafe_terminate_node, node_ids) + + return list(result) @_retry def _get_node(self, node_id: str) -> GCPNode: diff --git a/python/ray/tests/gcp/test_gcp_node_provider.py b/python/ray/tests/gcp/test_gcp_node_provider.py index da27cc3be02e..7c551455a29f 100644 --- a/python/ray/tests/gcp/test_gcp_node_provider.py +++ b/python/ray/tests/gcp/test_gcp_node_provider.py @@ -35,6 +35,33 @@ def __init__(self, provider_config: dict, cluster_name: str): assert create_node_return_value == expected_return_value +def test_terminate_nodes(): + mock_node_config = {"machineType": "n2-standard-8"} + node_type = GCPNodeType.COMPUTE.value + id1, id2 = f"instance-id1-{node_type}", f"instance-id2-{node_type}" + terminate_node_ids = [id1, id2] + mock_resource = MagicMock() + mock_resource.create_instances.return_value = [ + ({"dict": 1}, id1), + ({"dict": 2}, id2), + ] + mock_resource.delete_instance.return_value = "test" + expected_terminate_nodes_result_len = 2 + + def __init__(self, provider_config: dict, cluster_name: str): + self.lock = RLock() + self.cached_nodes: Dict[str, GCPNode] = {} + self.resources: Dict[GCPNodeType, GCPResource] = {} + self.resources[GCPNodeType.COMPUTE] = mock_resource + + with patch.object(GCPNodeProvider, "__init__", __init__): + node_provider = GCPNodeProvider({}, "") + node_provider.create_node(mock_node_config, {}, 1) + create_results = node_provider.terminate_nodes(terminate_node_ids) + + assert len(create_results) == expected_terminate_nodes_result_len + + @pytest.mark.parametrize( "test_case", [ From 61747b079164b46942e73a0c6271ed6c7ae15c06 Mon Sep 17 00:00:00 2001 From: Justin Yu Date: Fri, 21 Apr 2023 07:55:58 -0700 Subject: [PATCH 052/424] [Tune] Enable `tune.ExperimentAnalysis` to pull experiment checkpoint files from the cloud if needed (#34461) For post-experiment analysis of a Tune run that uploaded results and checkpoints to S3, the node where analysis is being done may not contain the experiment directory. In this case, the experiment checkpoint + other files (json + csv result files and the param space) should be pulled to a temp directory in the local filesys. While this adds functionality to `ExperimentAnalysis`, it also provides the functionality to: 1. `ResultGrid(ExperimentAnalysis("s3://..."))`, which is what we do in the `tuner.fit()` 2. `Tuner.restore("s3://...").get_results()` Point 2 was the error that flagged this issue in the first place. This PR also cleans up some confusing trial metadata loading code in `ExperimentAnalysis`. Signed-off-by: Justin Yu --- .../ray/air/_internal/checkpoint_manager.py | 39 ++- python/ray/air/_internal/remote_storage.py | 35 ++- python/ray/air/_internal/uri_utils.py | 28 ++ python/ray/tune/BUILD | 6 +- .../ray/tune/analysis/experiment_analysis.py | 275 ++++++++++++++---- python/ray/tune/execution/experiment_state.py | 23 +- .../ray/tune/execution/ray_trial_executor.py | 8 - python/ray/tune/execution/trial_runner.py | 11 +- python/ray/tune/execution/tune_controller.py | 7 - python/ray/tune/impl/tuner_internal.py | 2 +- python/ray/tune/result_grid.py | 23 +- python/ray/tune/tests/conftest.py | 28 ++ .../tune/tests/test_experiment_analysis.py | 68 +++-- python/ray/tune/tests/test_result_grid.py | 2 +- python/ray/tune/tests/test_syncer.py | 22 -- python/ray/tune/tests/utils/experiment.py | 26 +- python/ray/tune/trainable/trainable.py | 4 +- python/ray/tune/trainable/util.py | 16 +- python/ray/tune/tune.py | 3 +- 19 files changed, 447 insertions(+), 179 deletions(-) diff --git a/python/ray/air/_internal/checkpoint_manager.py b/python/ray/air/_internal/checkpoint_manager.py index a1c89b02596c..096326f57bce 100644 --- a/python/ray/air/_internal/checkpoint_manager.py +++ b/python/ray/air/_internal/checkpoint_manager.py @@ -55,11 +55,6 @@ class _TrackedCheckpoint: into `"evaluation/episode_reward_mean"`. node_ip: IP of the node where the checkpoint was generated. Defaults to the current node. - local_dir_to_remote_uri_fn: Function that takes in this checkpoint's local - directory path and returns the corresponding remote URI in the cloud. - This should only be specified if the data was synced to cloud. - Only applied during conversion to AIR checkpoint and only - if ``dir_or_data`` is or resolves to a directory path. """ def __init__( @@ -69,16 +64,12 @@ def __init__( checkpoint_id: Optional[int] = None, metrics: Optional[Dict] = None, node_ip: Optional[str] = None, - local_to_remote_path_fn: Optional[Callable[[str], str]] = None, ): from ray.tune.result import NODE_IP self.dir_or_data = dir_or_data self.id = checkpoint_id self.storage_mode = storage_mode - # This is a function because dir_or_data may be an object ref - # and we need to wait until its resolved first. - self.local_to_remote_path_fn = local_to_remote_path_fn self.metrics = flatten_dict(metrics) if metrics else {} self.node_ip = node_ip or self.metrics.get(NODE_IP, None) @@ -142,7 +133,31 @@ def delete( except Exception as e: logger.warning(f"Checkpoint deletion failed: {e}") - def to_air_checkpoint(self) -> Optional[Checkpoint]: + def to_air_checkpoint( + self, local_to_remote_path_fn: Optional[Callable[[str], str]] = None + ) -> Optional[Checkpoint]: + """Converter from a `_TrackedCheckpoint` to a `ray.air.Checkpoint`. + + This method Resolves the checkpoint data if it is an object reference. + + This method handles multiple types of checkpoint data: + - If the data is a string (local checkpoint path), this returns a + directory-backed checkpoint. + - If a `local_to_remote_path_fn` is provided, this converts + local path to a remote URI, then returns a URI-backed checkpoint. + - If the data is bytes or a dictionary, it returns an in-memory + bytes/dict-backed checkpoint. + + Args: + local_to_remote_path_fn: Function that takes in this checkpoint's local + directory path and returns the corresponding remote URI in the cloud. + This should only be specified if the data was synced to cloud. + Only applied during conversion to AIR checkpoint and only + if ``dir_or_data`` is or resolves to a directory path. + + Returns: + Checkpoint: The AIR checkpoint backed by the resolved data. + """ from ray.tune.trainable.util import TrainableUtil checkpoint_data = self.dir_or_data @@ -158,9 +173,9 @@ def to_air_checkpoint(self) -> Optional[Checkpoint]: if isinstance(checkpoint_data, str): # Prefer cloud checkpoints - if self.local_to_remote_path_fn: + if local_to_remote_path_fn: checkpoint = Checkpoint.from_uri( - self.local_to_remote_path_fn(checkpoint_data) + local_to_remote_path_fn(checkpoint_data) ) else: try: diff --git a/python/ray/air/_internal/remote_storage.py b/python/ray/air/_internal/remote_storage.py index 5b14c919cc44..d76f0012ac39 100644 --- a/python/ray/air/_internal/remote_storage.py +++ b/python/ray/air/_internal/remote_storage.py @@ -102,25 +102,24 @@ def is_non_local_path_uri(uri: str) -> bool: _cached_fs = {} -def _is_local_path(uri: str) -> bool: - """Check if the path points to the local filesystem.""" - if len(uri) >= 1 and uri[0] == "/": - return True - +def is_local_path(path: str) -> bool: + """Check if a given path is a local path or a remote URI.""" if sys.platform == "win32": - return _is_local_windows_path(uri) - return False + return _is_local_windows_path(path) + scheme = urllib.parse.urlparse(path).scheme + return scheme in ("", "file") -def _is_local_windows_path(uri: str) -> bool: + +def _is_local_windows_path(path: str) -> bool: """Determines if path is a Windows file-system location.""" - if len(uri) >= 1 and uri[0] == "\\": + if len(path) >= 1 and path[0] == "\\": return True if ( - len(uri) >= 3 - and uri[1] == ":" - and (uri[2] == "/" or uri[2] == "\\") - and uri[0].isalpha() + len(path) >= 3 + and path[1] == ":" + and (path[2] == "/" or path[2] == "\\") + and path[0].isalpha() ): return True return False @@ -132,8 +131,9 @@ def get_fs_and_path( if not pyarrow: return None, None - if _is_local_path(uri): - # Append protocol such that the downstream operations work + scheme = urllib.parse.urlparse(uri).scheme + if is_local_path(uri) and not scheme: + # Append local filesys scheme such that the downstream operations work # properly on Linux and Windows. uri = "file://" + pathlib.Path(uri).as_posix() @@ -284,10 +284,13 @@ def download_from_uri(uri: str, local_path: str, filelock: bool = True): f"Hint: {fs_hint(uri)}" ) - _local_path = Path(local_path) + _local_path = Path(local_path).resolve() exists_before = _local_path.exists() if is_directory(uri): _local_path.mkdir(parents=True, exist_ok=True) + else: + _local_path.parent.mkdir(parents=True, exist_ok=True) + try: if filelock: with TempFileLock(f"{os.path.normpath(local_path)}.lock"): diff --git a/python/ray/air/_internal/uri_utils.py b/python/ray/air/_internal/uri_utils.py index c6222198b137..fd836e794ad4 100644 --- a/python/ray/air/_internal/uri_utils.py +++ b/python/ray/air/_internal/uri_utils.py @@ -42,6 +42,14 @@ def parent(self) -> "URI": assert self._path.parent != ".", f"{str(self)} has no valid parent URI" return URI(self._get_str_representation(self._parsed, self._path.parent)) + @property + def scheme(self) -> str: + return self._parsed.scheme + + @property + def path(self) -> str: + return str(self._path) + def __truediv__(self, path_to_append): assert isinstance(path_to_append, str) return URI( @@ -59,3 +67,23 @@ def __repr__(self): def __str__(self): return self._get_str_representation(self._parsed, self._path) + + +def _join_path_or_uri(base_path: str, path_to_join: str) -> str: + """Joins paths to form either a URI (w/ possible URL params) or a local path. + + Example: + + >>> local_path = "/a/b" + >>> uri = "s3://bucket/a?scheme=http" + >>> path_to_join = "c/d" + >>> _join_path_or_uri(local_path, path_to_join) + '/a/b/c/d' + >>> _join_path_or_uri(uri, path_to_join) + 's3://bucket/a/c/d?scheme=http' + + """ + from ray.air._internal.remote_storage import is_local_path + + base_path_or_uri = Path(base_path) if is_local_path(base_path) else URI(base_path) + return str(base_path_or_uri / path_to_join) diff --git a/python/ray/tune/BUILD b/python/ray/tune/BUILD index a24da02b83f1..dc2bc678e646 100644 --- a/python/ray/tune/BUILD +++ b/python/ray/tune/BUILD @@ -121,7 +121,7 @@ py_test( name = "test_experiment_analysis", size = "medium", srcs = ["tests/test_experiment_analysis.py"], - deps = [":tune_lib"], + deps = [":tune_lib", ":conftest"], tags = ["team:ml", "exclusive"], ) @@ -236,7 +236,7 @@ py_test( name = "test_result_grid", size = "medium", srcs = ["tests/test_result_grid.py"], - deps = [":tune_lib"], + deps = [":tune_lib", ":conftest"], tags = ["team:ml", "exclusive"], ) @@ -308,7 +308,7 @@ py_test( name = "test_syncer", size = "medium", srcs = ["tests/test_syncer.py"], - deps = [":tune_lib"], + deps = [":tune_lib", ":conftest"], tags = ["team:ml", "exclusive"], ) diff --git a/python/ray/tune/analysis/experiment_analysis.py b/python/ray/tune/analysis/experiment_analysis.py index cb432d134e58..f3a680ed8374 100644 --- a/python/ray/tune/analysis/experiment_analysis.py +++ b/python/ray/tune/analysis/experiment_analysis.py @@ -1,11 +1,19 @@ import json import logging import os +import tempfile import traceback +from typing import Any, Dict, List, Optional, Tuple, Union from numbers import Number from pathlib import Path -from typing import Any, Dict, List, Optional, Tuple, Union +from ray.air._internal.remote_storage import ( + download_from_uri, + is_directory, + is_local_path, + list_at_uri, +) +from ray.air._internal.uri_utils import _join_path_or_uri, URI from ray.air.checkpoint import Checkpoint from ray.tune.syncer import SyncConfig from ray.tune.utils import flatten_dict @@ -80,12 +88,29 @@ def __init__( # Deprecate: Raise in 2.6, remove in 2.7 sync_config: Optional[SyncConfig] = None, ): + self._local_experiment_path: str = None + self._remote_experiment_path: Optional[str] = None + + # If the user passes in a remote checkpoint path, + # Set the remote experiment path to this path, and set + # the local experiment path to a temp directory. + if not is_local_path(experiment_checkpoint_path): + self._remote_experiment_path = experiment_checkpoint_path + + # Create a temp directory to store downloaded checkpoint files if + # they are pulled from a remote `experiment_checkpoint_path`. + self._local_experiment_path = tempfile.TemporaryDirectory( + prefix="experiment_analysis_" + ).name + os.makedirs(self._local_experiment_path, exist_ok=True) + # Load the experiment checkpoints and their parent paths. # This is important for when experiment folders have been # relocated (e.g. from a ray cluster to local disk or GCS/S3)- self._experiment_states = [] self._checkpoints_and_paths: List[Tuple[dict, os.PathLike]] = [] self._load_checkpoints(experiment_checkpoint_path) + assert self._checkpoints_and_paths self.trials = trials @@ -102,7 +127,18 @@ def __init__( # If only a mode was passed, use anonymous metric self.default_metric = DEFAULT_METRIC - self._local_experiment_path = self._checkpoints_and_paths[0][1] + # TODO(ml-team): Remove in 2.7 along with sync_config parameter + if sync_config and sync_config.upload_dir: + remote_storage_path = sync_config.upload_dir + + if not self._local_experiment_path: + self._local_experiment_path = str(self._checkpoints_and_paths[0][1]) + + if not self._remote_experiment_path and remote_storage_path: + self._remote_experiment_path = str( + URI(remote_storage_path) / Path(self._local_experiment_path).name + ) + if not pd: logger.warning( "pandas not installed. Run `pip install pandas` for " @@ -111,18 +147,13 @@ def __init__( else: self.fetch_trial_dataframes() - if sync_config and sync_config.upload_dir: - remote_storage_path = sync_config.upload_dir - - self._remote_storage_path = remote_storage_path - @property def _local_path(self) -> str: - return str(self._local_experiment_path) + return self._local_experiment_path @property - def _remote_path(self) -> Optional[str]: - return self._parse_cloud_path(self._local_path) + def _remote_path(self) -> str: + return self._remote_experiment_path @property def experiment_path(self) -> str: @@ -136,24 +167,36 @@ def experiment_path(self) -> str: """ return self._remote_path or self._local_path - def _parse_cloud_path(self, local_path: str): - """Convert local path into cloud storage path""" - if not self._remote_storage_path: + def _convert_local_to_cloud_path(self, local_path: str): + """Convert local path into cloud storage path. + + Example: + local_path = "/a/b/c.json" + self._remote_experiment_path = "s3://bucket?param=abcd" + self._local_experiment_path = "/a/b" + + -> "s3://bucket/c?param=abcd" + """ + if not self._remote_experiment_path: return None - return local_path.replace( - str(Path(self._local_experiment_path).parent), self._remote_storage_path - ) + rel_path = str(Path(local_path).relative_to(self._local_experiment_path)) + return str(URI(self._remote_experiment_path) / rel_path) def _load_checkpoints(self, experiment_checkpoint_path: str) -> List[str]: - experiment_checkpoint_path = Path(experiment_checkpoint_path).expanduser() # Get the latest checkpoints from the checkpoint_path. - latest_checkpoint = self._get_latest_checkpoint(experiment_checkpoint_path) + latest_checkpoints = self._get_latest_checkpoint(experiment_checkpoint_path) + if not latest_checkpoints: + raise ValueError( + f"`{experiment_checkpoint_path}` must either be a path to an " + "experiment checkpoint file, or a directory containing an experiment " + "checkpoint file." + ) # Collect all checkpoints and their directory paths. # These are used to infer the `local_dir` from the checkpoints # in case the experiment folder had been moved from its original # location (e.g. from a ray cluster to a GCS/S3 bucket or to local disk). - self._load_checkpoints_from_latest(latest_checkpoint) + self._load_checkpoints_from_latest(latest_checkpoints) def _load_checkpoints_from_latest(self, latest_checkpoint: List[str]) -> None: # Collect all checkpoints and their directory paths. @@ -169,46 +212,118 @@ def _load_checkpoints_from_latest(self, latest_checkpoint: List[str]) -> None: (cp, Path(path).parent) for cp in experiment_state["checkpoints"] ] - def _get_latest_checkpoint(self, experiment_checkpoint_path: Path) -> List[str]: - # Case 1: Dir specified, find latest checkpoint. - if experiment_checkpoint_path.is_dir(): - latest_checkpoint = _find_newest_experiment_checkpoint( - str(experiment_checkpoint_path) - ) + def _maybe_download_experiment_checkpoint( + self, experiment_checkpoint_path: str + ) -> Optional[str]: + """Downloads the experiment checkpoint from a remote path if needed. + + Args: + experiment_checkpoint_path: The local or remote path to the experiment + checkpoint file. + + Returns: + str: The local copy of the experiment checkpoint. + If a local path is passed in, this method will return that immediately. + If a remote path is passed in, this will try to download that file. + Will return None if the download failed. + """ + if is_local_path(experiment_checkpoint_path): + return os.path.expanduser(experiment_checkpoint_path) + + assert self._local_path and self._remote_path + + experiment_path = Path(URI(self._remote_path).path) + # s3://bucket/exp_dir/nested/experiment_state.json + # -> bucket/exp_dir/nested/experiment_state.json + checkpoint_path = Path(URI(experiment_checkpoint_path).path) + + assert experiment_path in checkpoint_path.parents + # -> nested/experiment_state.json + relative_path = checkpoint_path.relative_to(experiment_path) + + # Download to: + # -> {self._local_path}/nested/experiment_state.json + local_path = os.path.join(self._local_path, relative_path) + try: + download_from_uri(experiment_checkpoint_path, local_path) + except FileNotFoundError: + return None + + return local_path + + def _get_latest_checkpoint_from_dir( + self, experiment_checkpoint_path: str, top_level: bool = True + ) -> List[str]: + """Gets the latest experiment checkpoints from a given directory. + + Args: + experiment_checkpoint_path: A local or remote path to a directory + containing at least one experiment checkpoint file. + top_level: True if this is the first directory level. False if + we are searching in a subdirectory. (Max recursion depth of 1.) + + Returns: + list: A list of local paths pointing to the latest experiment checkpoint + file for each experiment found within the given directory. + """ + latest_checkpoint = _find_newest_experiment_checkpoint( + experiment_checkpoint_path + ) + + latest_checkpoints = [] + if latest_checkpoint: + assert not is_directory( + latest_checkpoint + ), "This should point to an actual experiment checkpoint file." + latest_checkpoints.extend(self._get_latest_checkpoint(latest_checkpoint)) + + if not latest_checkpoint and top_level: # If no checkpoint in this folder the sub-directory is searched. # In this case also multiple experiment folders could exist in # the same root. In this case the length of `latest_checkpoint` # will be greater than 1. - if not latest_checkpoint: - latest_checkpoint = [] - for fname in experiment_checkpoint_path.iterdir(): - fname = experiment_checkpoint_path.joinpath(fname) - latest_checkpoint_subdir = _find_newest_experiment_checkpoint( - str(fname) + for subdir in list_at_uri(experiment_checkpoint_path): + full_path = _join_path_or_uri(experiment_checkpoint_path, subdir) + if is_directory(full_path): + latest_checkpoints.extend( + self._get_latest_checkpoint_from_dir(full_path, top_level=False) ) - if latest_checkpoint_subdir: - latest_checkpoint.append(latest_checkpoint_subdir) - if not latest_checkpoint: - # This avoid nested experiment directories of the form - # `experiment_name1/experiment_name2/experiment_state.json`. - experiment_checkpoint_path = str(experiment_checkpoint_path) - raise ValueError( - f"The directory `{experiment_checkpoint_path}` does not " - "contain a Ray Tune experiment checkpoint." - ) - elif not experiment_checkpoint_path.is_file(): - # Case 2: File specified, but does not exist. - experiment_checkpoint_path = str(experiment_checkpoint_path) + + return latest_checkpoints + + def _get_latest_checkpoint(self, experiment_checkpoint_path: str) -> List[str]: + """Gets the latest experiment checkpoints corresponding to a given path. + + Acceptable path inputs (either local or remote): + - A path to an experiment checkpoint file. + - A path to an experiment directory, which contains an experiment checkpoint + file at the directory's top-level. + - A path to a directory that contains multiple experiment directories, + where each subdirectory contains an experiment checkpoint file. + + Returns: + list: A list of local paths pointing to the latest experiment checkpoint + file for each experiment corresponding to the given path. + """ + if is_directory(experiment_checkpoint_path): + return self._get_latest_checkpoint_from_dir(experiment_checkpoint_path) + + local_experiment_checkpoint_path = self._maybe_download_experiment_checkpoint( + experiment_checkpoint_path + ) + + if ( + not local_experiment_checkpoint_path + or not Path(local_experiment_checkpoint_path).exists() + ): raise ValueError( f"The file `{experiment_checkpoint_path}` does not " f"exist and cannot be loaded for experiment analysis." ) - else: - # Case 3: File specified, use as latest checkpoint. - latest_checkpoint = str(experiment_checkpoint_path) - if not isinstance(latest_checkpoint, list): - latest_checkpoint = [latest_checkpoint] - return latest_checkpoint + + assert Path(local_experiment_checkpoint_path).is_file() + + return [local_experiment_checkpoint_path] @property def best_trial(self) -> Trial: @@ -508,7 +623,7 @@ def get_best_checkpoint( best_path_metrics = sorted(checkpoint_paths, key=lambda x: a * x[1]) best_path, best_metric = best_path_metrics[0] - cloud_path = self._parse_cloud_path(best_path) + cloud_path = self._convert_local_to_cloud_path(best_path) if cloud_path: # Prefer cloud path over local path for downsteam processing @@ -545,8 +660,15 @@ def get_all_configs(self, prefix: bool = False) -> Dict[str, Dict]: their trial dir. """ fail_count = 0 + failed_paths = [] for path in self._get_trial_paths(): try: + param_file = os.path.join(path, EXPR_PARAM_FILE) + if not os.path.exists(param_file) and self._remote_path: + download_from_uri( + self._convert_local_to_cloud_path(param_file), param_file + ) + with open(os.path.join(path, EXPR_PARAM_FILE)) as f: config = json.load(f) if prefix: @@ -554,10 +676,20 @@ def get_all_configs(self, prefix: bool = False) -> Dict[str, Dict]: else: self._configs[path] = config except Exception: + logger.debug( + f"Exception occurred when loading trial configs. " + f"See traceback:\n{traceback.format_exc()}" + ) fail_count += 1 + failed_paths.append(path) if fail_count: - logger.warning("Couldn't read config from {} paths".format(fail_count)) + failed_paths_str = "\n".join([f"- {path}" for path in failed_paths]) + logger.warning( + f"Failed to read the config for {fail_count} trials:\n" + f"{failed_paths_str}" + ) + return self._configs def get_best_trial( @@ -742,23 +874,44 @@ def fetch_trial_dataframes(self) -> Dict[str, DataFrame]: A dictionary containing "trial dir" to Dataframe. """ fail_count = 0 + failed_paths = [] force_dtype = {"trial_id": str} # Never convert trial_id to float. for path in self._get_trial_paths(): try: if self._file_type == "json": - with open(os.path.join(path, EXPR_RESULT_FILE), "r") as f: + json_file = os.path.join(path, EXPR_RESULT_FILE) + if not os.path.exists(json_file) and self._remote_path: + download_from_uri( + self._convert_local_to_cloud_path(json_file), json_file + ) + + with open(json_file, "r") as f: json_list = [json.loads(line) for line in f if line] df = pd.json_normalize(json_list, sep="/") elif self._file_type == "csv": - df = pd.read_csv( - os.path.join(path, EXPR_PROGRESS_FILE), dtype=force_dtype - ) + csv_file = os.path.join(path, EXPR_PROGRESS_FILE) + if not os.path.exists(csv_file) and self._remote_path: + download_from_uri( + self._convert_local_to_cloud_path(csv_file), csv_file + ) + + df = pd.read_csv(csv_file, dtype=force_dtype) self.trial_dataframes[path] = df except Exception: + logger.debug( + f"Exception occurred when loading trial results. See traceback:\n" + f"{traceback.format_exc()}" + ) fail_count += 1 + failed_paths.append(path) if fail_count: - logger.debug("Couldn't read results from {} paths".format(fail_count)) + failed_paths_str = "\n".join([f"- {path}" for path in failed_paths]) + logger.warning( + f"Failed to read the results for {fail_count} trials:\n" + f"{failed_paths_str}" + ) + return self.trial_dataframes def stats(self) -> Dict: @@ -811,9 +964,11 @@ def _get_trial_paths(self) -> List[str]: _trial_paths = [str(t.local_path) for t in self.trials] else: logger.info( - "No `self.trials`. Drawing logdirs from checkpoint " - "file. This may result in some information that is " - "out of sync, as checkpointing is periodic." + "No trial data passed in during `ExperimentAnalysis` initialization -- " + "you are most likely loading the experiment after it has completed.\n" + "Loading trial data from the experiment checkpoint file. " + "This may result in loading some stale information, " + "since checkpointing is periodic." ) self.trials = [] for trial_json_state, path in self._checkpoints_and_paths: diff --git a/python/ray/tune/execution/experiment_state.py b/python/ray/tune/execution/experiment_state.py index ff9220c1b253..74fbbb990cc1 100644 --- a/python/ray/tune/execution/experiment_state.py +++ b/python/ray/tune/execution/experiment_state.py @@ -8,11 +8,13 @@ import time import warnings -from ray.tune.impl.out_of_band_serialize_dataset import out_of_band_serialize_dataset -from ray.tune import TuneError +from ray.air._internal.remote_storage import list_at_uri +from ray.air._internal.uri_utils import _join_path_or_uri -from ray.tune.syncer import SyncConfig, get_node_to_storage_syncer +from ray.tune import TuneError from ray.tune.experiment import Trial +from ray.tune.impl.out_of_band_serialize_dataset import out_of_band_serialize_dataset +from ray.tune.syncer import SyncConfig, get_node_to_storage_syncer logger = logging.getLogger(__name__) @@ -69,14 +71,23 @@ def _experiment_checkpoint_exists(experiment_dir: str) -> bool: def _find_newest_experiment_checkpoint(experiment_dir: str) -> Optional[str]: - """Returns file name of most recently modified checkpoint.""" + """Returns file name of most recently created experiment checkpoint. + + Args: + experiment_dir: Local or remote path to the experiment directory + containing at least one experiment checkpoint file. + + Returns: + str: The local or remote path to the latest experiment checkpoint file + based on timestamp. None if no experiment checkpoints were found. + """ def construct(file: str) -> str: - return os.path.join(experiment_dir, file) + return _join_path_or_uri(experiment_dir, file) candidate_paths = [ construct(file) - for file in os.listdir(experiment_dir) + for file in list_at_uri(experiment_dir) if file.startswith("experiment_state") and file.endswith(".json") ] if not candidate_paths: diff --git a/python/ray/tune/execution/ray_trial_executor.py b/python/ray/tune/execution/ray_trial_executor.py index 0a31e063f808..c9eeed663e87 100644 --- a/python/ray/tune/execution/ray_trial_executor.py +++ b/python/ray/tune/execution/ray_trial_executor.py @@ -7,7 +7,6 @@ import traceback from collections import deque from enum import Enum -from functools import partial from typing import Callable, Dict, Iterable, Optional, Set, Union import ray @@ -946,13 +945,6 @@ def save( dir_or_data=value, storage_mode=storage, metrics=result, - local_to_remote_path_fn=partial( - TrainableUtil.get_remote_storage_path, - logdir=trial.local_path, - remote_checkpoint_dir=trial.remote_path, - ) - if trial.uses_cloud_checkpointing - else None, ) trial.saving_to = checkpoint self._futures[value] = (_ExecutorEventType.SAVING_RESULT, trial) diff --git a/python/ray/tune/execution/trial_runner.py b/python/ray/tune/execution/trial_runner.py index 185ec391b3c0..c3cc20abe0e5 100644 --- a/python/ray/tune/execution/trial_runner.py +++ b/python/ray/tune/execution/trial_runner.py @@ -315,10 +315,15 @@ def experiment_state_file_name(self) -> str: @property def experiment_state_path(self) -> str: + """Returns the local experiment checkpoint path.""" return os.path.join( self._local_experiment_path, self.experiment_state_file_name ) + @property + def experiment_path(self) -> str: + return self._remote_experiment_path or self._local_experiment_path + def _create_checkpoint_manager(self): return _ExperimentCheckpointManager( local_checkpoint_dir=self._local_experiment_path, @@ -328,12 +333,6 @@ def _create_checkpoint_manager(self): sync_every_n_trial_checkpoints=self._trial_checkpoint_config.num_to_keep, ) - @property - def _remote_checkpoint_dir(self): - if self._sync_config.upload_dir and self._experiment_dir_name: - return str(URI(self._sync_config.upload_dir) / self._experiment_dir_name) - return None - @classmethod def checkpoint_exists(cls, directory: str) -> bool: if not os.path.exists(directory): diff --git a/python/ray/tune/execution/tune_controller.py b/python/ray/tune/execution/tune_controller.py index 8ad4aebcc288..f7f7051a616b 100644 --- a/python/ray/tune/execution/tune_controller.py +++ b/python/ray/tune/execution/tune_controller.py @@ -971,13 +971,6 @@ def _schedule_trial_save( dir_or_data=future, storage_mode=storage, metrics=result, - local_to_remote_path_fn=partial( - TrainableUtil.get_remote_storage_path, - logdir=trial.logdir, - remote_checkpoint_dir=trial.remote_checkpoint_dir, - ) - if trial.uses_cloud_checkpointing - else None, ) trial.saving_to = checkpoint diff --git a/python/ray/tune/impl/tuner_internal.py b/python/ray/tune/impl/tuner_internal.py index 4730c22dc103..13e64c0d8ada 100644 --- a/python/ray/tune/impl/tuner_internal.py +++ b/python/ray/tune/impl/tuner_internal.py @@ -363,7 +363,7 @@ def _restore_from_path_or_uri( try: self._experiment_analysis = ExperimentAnalysis( - self._experiment_checkpoint_dir, + experiment_checkpoint_path=path_or_uri, default_metric=self._tune_config.metric, default_mode=self._tune_config.mode, ) diff --git a/python/ray/tune/result_grid.py b/python/ray/tune/result_grid.py index 098035627450..f1e495ef7c45 100644 --- a/python/ray/tune/result_grid.py +++ b/python/ray/tune/result_grid.py @@ -1,7 +1,7 @@ +from functools import partial import os -from typing import Optional, Union - import pandas as pd +from typing import Optional, Union from ray.air.result import Result from ray.cloudpickle import cloudpickle @@ -9,6 +9,7 @@ from ray.tune.analysis import ExperimentAnalysis from ray.tune.error import TuneError from ray.tune.experiment import Trial +from ray.tune.trainable.util import TrainableUtil from ray.util import PublicAPI @@ -243,9 +244,23 @@ def _populate_exception(trial: Trial) -> Optional[Union[TuneError, RayTaskError] return None def _trial_to_result(self, trial: Trial) -> Result: - checkpoint = trial.checkpoint.to_air_checkpoint() + local_to_remote_path_fn = ( + partial( + TrainableUtil.get_remote_storage_path, + local_path_prefix=trial.local_path, + remote_path_prefix=trial.remote_path, + ) + if trial.uses_cloud_checkpointing + else None + ) + checkpoint = trial.checkpoint.to_air_checkpoint( + local_to_remote_path_fn, + ) best_checkpoints = [ - (checkpoint.to_air_checkpoint(), checkpoint.metrics) + ( + checkpoint.to_air_checkpoint(local_to_remote_path_fn), + checkpoint.metrics, + ) for checkpoint in trial.get_trial_checkpoints() ] diff --git a/python/ray/tune/tests/conftest.py b/python/ray/tune/tests/conftest.py index ad1b6d49c3bd..fd6fd0563759 100644 --- a/python/ray/tune/tests/conftest.py +++ b/python/ray/tune/tests/conftest.py @@ -1,3 +1,31 @@ # Trigger pytest hook to automatically zip test cluster logs to archive dir on failure from ray.tests.conftest import pytest_runtest_makereport # noqa from ray.tests.conftest import propagate_logs # noqa + + +import logging +import boto3 +import pytest + +from ray.air._internal.uri_utils import URI +from ray._private.test_utils import simulate_storage + + +@pytest.fixture +def mock_s3_bucket_uri(): + port = 5002 + region = "us-west-2" + with simulate_storage("s3", port=port, region=region) as s3_uri: + s3 = boto3.client( + "s3", region_name=region, endpoint_url=f"http://localhost:{port}" + ) + # Bucket name will be autogenerated/unique per test + bucket_name = URI(s3_uri).name + s3.create_bucket( + Bucket=bucket_name, + CreateBucketConfiguration={"LocationConstraint": region}, + ) + # Disable server HTTP request logging + logging.getLogger("werkzeug").setLevel(logging.WARNING) + yield s3_uri + logging.getLogger("werkzeug").setLevel(logging.INFO) diff --git a/python/ray/tune/tests/test_experiment_analysis.py b/python/ray/tune/tests/test_experiment_analysis.py index dc17d26e5b03..0d966fff3e73 100644 --- a/python/ray/tune/tests/test_experiment_analysis.py +++ b/python/ray/tune/tests/test_experiment_analysis.py @@ -9,6 +9,7 @@ import ray from ray import tune +from ray.air._internal.remote_storage import upload_to_uri from ray.tune import ExperimentAnalysis import ray.tune.registry from ray.tune.tests.utils.experiment import create_test_experiment_checkpoint @@ -355,6 +356,21 @@ def train(config): self.assertEqual(var, 1) +def run_test_exp(path: str) -> ExperimentAnalysis: + with create_test_experiment_checkpoint(path) as creator: + for i in range(10): + trial = creator.create_trial(f"trial_{i}", config={"id": i, "hparam": 1}) + creator.trial_result( + trial, + { + "training_iteration": 1, + "episode_reward_mean": 10 + int(90 * random.random()), + }, + ) + + return ExperimentAnalysis(path, trials=creator.get_trials()) + + class ExperimentAnalysisStubSuite(unittest.TestCase): def setUp(self): self.test_dir = tempfile.mkdtemp() @@ -362,27 +378,12 @@ def setUp(self): self.num_samples = 2 self.metric = "episode_reward_mean" self.test_path = os.path.join(self.test_dir, self.test_name) - self.run_test_exp() def tearDown(self): shutil.rmtree(self.test_dir, ignore_errors=True) - def run_test_exp(self): - with create_test_experiment_checkpoint(self.test_path) as creator: - for i in range(10): - trial = creator.create_trial(f"trial_{i}", config={}) - creator.trial_result( - trial, - { - "training_iteration": 1, - "episode_reward_mean": 10 + int(90 * random.random()), - }, - ) - - return ExperimentAnalysis(self.test_dir, trials=creator.get_trials()) - def testPickling(self): - analysis = self.run_test_exp() + analysis = run_test_exp(self.test_path) pickle_path = os.path.join(self.test_dir, "analysis.pickle") with open(pickle_path, "wb") as f: pickle.dump(analysis, f) @@ -394,8 +395,8 @@ def testPickling(self): self.assertTrue(analysis.get_best_trial(metric=self.metric, mode="max")) - def testFromPath(self): - self.run_test_exp() + def testFromLocalPath(self): + run_test_exp(self.test_path) analysis = ExperimentAnalysis(self.test_path) self.assertTrue(analysis.get_best_trial(metric=self.metric, mode="max")) @@ -433,6 +434,37 @@ def testEmptyCheckpoint(self): assert len(ea.trials) == 10 +def test_create_from_remote_path(tmp_path, mock_s3_bucket_uri): + run_test_exp(str(tmp_path)) + upload_to_uri(str(tmp_path), mock_s3_bucket_uri) + + local_analysis = ExperimentAnalysis(str(tmp_path)) + remote_analysis = ExperimentAnalysis(mock_s3_bucket_uri) + + metric = "episode_reward_mean" + mode = "max" + + # Tracked metric data is the same + assert ( + local_analysis.get_best_trial(metric=metric, mode=mode).trial_id + == remote_analysis.get_best_trial(metric=metric, mode=mode).trial_id + ) + + # Trial result dataframes are the same + assert all( + local_df.equals(remote_df) + for local_df, remote_df in zip( + local_analysis.trial_dataframes.values(), + remote_analysis.trial_dataframes.values(), + ) + ) + + # Trial configs are the same + assert list(local_analysis.get_all_configs().values()) == list( + remote_analysis.get_all_configs().values() + ) + + if __name__ == "__main__": import pytest import sys diff --git a/python/ray/tune/tests/test_result_grid.py b/python/ray/tune/tests/test_result_grid.py index 020a9ca127a0..c32fec617c2f 100644 --- a/python/ray/tune/tests/test_result_grid.py +++ b/python/ray/tune/tests/test_result_grid.py @@ -354,7 +354,7 @@ def test_num_errors_terminated(tmpdir): trials[i].status = Trial.TERMINATED create_tune_experiment_checkpoint(trials, local_checkpoint_dir=str(tmpdir)) - result_grid = ResultGrid(tune.ExperimentAnalysis(tmpdir)) + result_grid = ResultGrid(tune.ExperimentAnalysis(str(tmpdir))) assert len(result_grid.errors) == 3 assert result_grid.num_errors == 3 assert result_grid.num_terminated == 2 diff --git a/python/ray/tune/tests/test_syncer.py b/python/ray/tune/tests/test_syncer.py index 853b512e211b..ee522c069bd7 100644 --- a/python/ray/tune/tests/test_syncer.py +++ b/python/ray/tune/tests/test_syncer.py @@ -8,7 +8,6 @@ from typing import List, Optional from unittest.mock import patch -import boto3 import pytest from freezegun import freeze_time @@ -21,7 +20,6 @@ from ray.tune.syncer import _DefaultSyncer, Syncer, SyncConfig from ray.tune.utils.file_transfer import _pack_dir, _unpack_dir from ray.air._internal.remote_storage import upload_to_uri, download_from_uri -from ray._private.test_utils import simulate_storage @pytest.fixture @@ -74,26 +72,6 @@ def temp_data_dirs(): shutil.rmtree(tmp_target) -@pytest.fixture -def mock_s3_bucket_uri(): - port = 5002 - region = "us-west-2" - with simulate_storage("s3", port=port, region=region) as s3_uri: - s3 = boto3.client( - "s3", region_name=region, endpoint_url=f"http://localhost:{port}" - ) - # Bucket name will be autogenerated/unique per test - bucket_name = URI(s3_uri).name - s3.create_bucket( - Bucket=bucket_name, - CreateBucketConfiguration={"LocationConstraint": region}, - ) - # Disable server HTTP request logging - logging.getLogger("werkzeug").setLevel(logging.WARNING) - yield s3_uri - logging.getLogger("werkzeug").setLevel(logging.INFO) - - def assert_file(exists: bool, root: str, path: str): full_path = os.path.join(root, path) diff --git a/python/ray/tune/tests/utils/experiment.py b/python/ray/tune/tests/utils/experiment.py index 754dd6cd2274..8237beb9cb78 100644 --- a/python/ray/tune/tests/utils/experiment.py +++ b/python/ray/tune/tests/utils/experiment.py @@ -1,14 +1,15 @@ import os import tempfile from contextlib import contextmanager -from functools import partial from pathlib import Path from typing import Any, Dict, Optional, Type from ray.air._internal.checkpoint_manager import _TrackedCheckpoint, CheckpointStorage +from ray.tune import SyncConfig +from ray.tune.callback import CallbackList from ray.tune.execution.trial_runner import TrialRunner, _TuneControllerBase from ray.tune.experiment import Trial -from ray.tune.trainable import TrainableUtil +from ray.tune.utils.callback import _create_default_callbacks class _ExperimentCheckpointCreator: @@ -30,6 +31,11 @@ def _get_trial_checkpoints(self): experiment_path=experiment_path, experiment_dir_name=experiment_name ) + # Also, create any default logger callback artifacts. + self.callbacks = CallbackList( + _create_default_callbacks([], sync_config=SyncConfig(syncer=None)) + ) + def save_checkpoint(self): self.runner.save_to_dir() @@ -44,6 +50,12 @@ def get_trial_checkpoints(self): def trial_result(self, trial: Trial, result: Dict): trial.update_last_result(result) trial.invalidate_json_state() + self.callbacks.on_trial_result( + iteration=-1, # Dummy value + trials=self.get_trials(), + trial=trial, + result=result, + ) def trial_checkpoint( self, @@ -56,11 +68,6 @@ def trial_checkpoint( dir_or_data=checkpoint_data, storage_mode=checkpoint_storage, metrics=trial.last_result, - local_to_remote_path_fn=partial( - TrainableUtil.get_remote_storage_path, - logdir=trial.local_path, - remote_checkpoint_dir=trial.remote_path, - ), ) trial.on_checkpoint(checkpoint) trial.invalidate_json_state() @@ -83,6 +90,11 @@ def create_trial( ) trial.init_local_path() self.runner.add_trial(trial) + self.callbacks.on_trial_start( + iteration=-1, # Dummy value + trials=self.get_trials(), + trial=trial, + ) return trial diff --git a/python/ray/tune/trainable/trainable.py b/python/ray/tune/trainable/trainable.py index f13e22ba6afe..bd51ac89fffe 100644 --- a/python/ray/tune/trainable/trainable.py +++ b/python/ray/tune/trainable/trainable.py @@ -219,7 +219,9 @@ def _remote_storage_path(self, local_path): """Converts a `local_path` to be based off of `self.remote_checkpoint_dir`.""" return TrainableUtil.get_remote_storage_path( - local_path, self.logdir, self.remote_checkpoint_dir + local_path=local_path, + local_path_prefix=self.logdir, + remote_path_prefix=self.remote_checkpoint_dir, ) @classmethod diff --git a/python/ray/tune/trainable/util.py b/python/ray/tune/trainable/util.py index 6366338bd821..cac898afb89c 100644 --- a/python/ray/tune/trainable/util.py +++ b/python/ray/tune/trainable/util.py @@ -201,14 +201,20 @@ def get_checkpoints_paths(logdir): @staticmethod def get_remote_storage_path( - local_path: str, logdir: str, remote_checkpoint_dir: str + local_path: str, local_path_prefix: str, remote_path_prefix: str ) -> str: """Converts a ``local_path`` to be based off of - ``remote_checkpoint_dir`` instead of ``logdir``. + ``remote_path_prefix`` instead of ``local_path_prefix``. - ``logdir`` is assumed to be a prefix of ``local_path``.""" - rel_local_path = os.path.relpath(local_path, logdir) - uri = URI(remote_checkpoint_dir) + ``local_path_prefix`` is assumed to be a prefix of ``local_path``. + + Example: + + >>> TrainableUtil.get_remote_storage_path("/a/b/c", "/a", "s3://bucket/") + 's3://bucket/b/c' + """ + rel_local_path = os.path.relpath(local_path, local_path_prefix) + uri = URI(remote_path_prefix) return str(uri / rel_local_path) diff --git a/python/ray/tune/tune.py b/python/ray/tune/tune.py index 48c7da2a2aa8..3f3a8d335b2c 100644 --- a/python/ray/tune/tune.py +++ b/python/ray/tune/tune.py @@ -4,7 +4,6 @@ import datetime import logging import os -from pathlib import Path import signal import sys import threading @@ -1015,7 +1014,7 @@ class and registered trainables. if experiment_interrupted_event.is_set(): restore_entrypoint = error_message_map["restore_entrypoint"].format( - path=Path(experiment_checkpoint).parent, + path=runner.experiment_path, ) logger.warning( "Experiment has been interrupted, but the most recent state was saved.\n" From a59cef6e4ae5825f216a4e825722ad2abf8addcc Mon Sep 17 00:00:00 2001 From: angelinalg <122562471+angelinalg@users.noreply.github.com> Date: Fri, 21 Apr 2023 09:48:58 -0700 Subject: [PATCH 053/424] [docs] [serve] removed line numbers and fixed file name summary_model.py (#34617) Copy and paste button was including line numbers in 3 code examples, which is a bad user experience. Fixed error with filename. The command line instructions said `python model.py` but it should be `python summary_model.py`. This addresses two issues in GH issue 34481, but not all of them. ## Checks - [ ] I've signed off every commit(by using the -s flag, i.e., `git commit -s`) in this PR. - [ ] I've run `scripts/format.sh` to lint the changes in this PR. - [ ] I've included any doc changes needed for https://docs.ray.io/en/master/. - [ ] I've added any new APIs to the API Reference. For example, if I added a method in Tune, I've added it in `doc/source/tune/api/` under the corresponding `.rst` file. - [ ] I've made sure the tests are passing. Note that there might be a few flaky tests, see the recent failures at https://flakey-tests.ray.io/ - Testing Strategy - [ ] Unit tests - [ ] Release tests - [ ] This PR is not tested :( --- doc/source/serve/getting_started.md | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/doc/source/serve/getting_started.md b/doc/source/serve/getting_started.md index b639a3916744..409d28f2f23e 100644 --- a/doc/source/serve/getting_started.md +++ b/doc/source/serve/getting_started.md @@ -37,18 +37,16 @@ First, let's take a look at our text-translation model. Here's its code: :start-after: __start_translation_model__ :end-before: __end_translation_model__ :language: python -:linenos: true ``` The Python file, called `model.py`, uses the `Translator` class to translate English text to French. -- The `self.model` variable on line 8 inside `Translator`'s `__init__` method +- The `self.model` variable inside `Translator`'s `__init__` method stores a function that uses the [t5-small](https://huggingface.co/t5-small) model to translate text. - When `self.model` is called on English text, it returns translated French text inside a dictionary formatted as `[{"translation_text": "..."}]`. -- The `Translator`'s `translate` method extracts the translated text on - line 15 by indexing into the dictionary. +- The `Translator`'s `translate` method extracts the translated text by indexing into the dictionary. You can copy-paste this script and run it locally. It translates `"Hello world!"` into `"Bonjour Monde!"`. @@ -133,7 +131,6 @@ Here's the full Ray Serve script that we built: :start-after: __deployment_full_start__ :end-before: __deployment_full_end__ :language: python -:linenos: true ``` We can run our script with the `serve run` CLI command. This command takes in an import path @@ -201,7 +198,7 @@ For example, let's deploy a machine learning pipeline with two steps: You can copy-paste this script and run it locally. It summarizes the snippet from _A Tale of Two Cities_ to `it was the best of times, it was worst of times .` ```console -$ python model.py +$ python summary_model.py it was the best of times, it was worst of times . ``` @@ -212,10 +209,9 @@ Here's a Ray Serve deployment graph that chains the two models together. The gra :start-after: __start_graph__ :end-before: __end_graph__ :language: python -:linenos: true ``` -This script contains our `Summarizer` class converted to a deployment and our `Translator` class with some modifications. In this script, the `Summarizer` class contains the `__call__` method since requests are sent to it first. It also takes in the `Translator` as one of its constructor arguments, so it can forward summarized texts to the `Translator` deployment. The `__call__` method also contains some new code on lines 44 and 45: +This script contains our `Summarizer` class converted to a deployment and our `Translator` class with some modifications. In this script, the `Summarizer` class contains the `__call__` method since requests are sent to it first. It also takes in the `Translator` as one of its constructor arguments, so it can forward summarized texts to the `Translator` deployment. The `__call__` method also contains some new code: ```python translation_ref = await self.translator.translate.remote(summary) From a8c262dd3dd1711b7e2c1e1e591f6b45d5fcce2c Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Fri, 21 Apr 2023 09:59:49 -0700 Subject: [PATCH 054/424] [CI][Green-Ray][4] Compute and store unique crash pattern from logs (#34200) This PR computes and aggregate unique crash patterns from logs, then store them in Databricks. Later on, this will help us build a dashboard for heat map of errors from aggregated logs, help us prioritize the most impactful errors to fix. Signed-off-by: Cuong Nguyen --- release/ray_release/anyscale_util.py | 2 +- .../job_manager/anyscale_job_manager.py | 5 +- release/ray_release/log_aggregator.py | 100 ++++++++++++++++++ release/ray_release/reporter/db.py | 4 + .../ray_release/tests/test_log_aggregator.py | 68 ++++++++++++ 5 files changed, 177 insertions(+), 2 deletions(-) create mode 100644 release/ray_release/log_aggregator.py create mode 100644 release/ray_release/tests/test_log_aggregator.py diff --git a/release/ray_release/anyscale_util.py b/release/ray_release/anyscale_util.py index 6552dae281da..6ef84cbea060 100644 --- a/release/ray_release/anyscale_util.py +++ b/release/ray_release/anyscale_util.py @@ -7,7 +7,7 @@ from anyscale.sdk.anyscale_client.sdk import AnyscaleSDK -LAST_LOGS_LENGTH = 10 +LAST_LOGS_LENGTH = 30 def find_cloud_by_name( diff --git a/release/ray_release/job_manager/anyscale_job_manager.py b/release/ray_release/job_manager/anyscale_job_manager.py index 1b605b412e84..5b831e4e7d3b 100644 --- a/release/ray_release/job_manager/anyscale_job_manager.py +++ b/release/ray_release/job_manager/anyscale_job_manager.py @@ -332,7 +332,10 @@ def _get_logs(): ) print("", flush=True) output = buf.getvalue().strip() - if "### Starting ###" not in output: + # Many of Ray components have their separated logs (e.g. dashboard, + # gcs_server, etc.), so the interesting errors are not always in the + # job logs. If the job has no logs, check other ray logs for error patterns. + if not output: output = self._get_ray_error_logs() assert output, "No logs fetched" return "\n".join(output.splitlines()[-LAST_LOGS_LENGTH * 3 :]) diff --git a/release/ray_release/log_aggregator.py b/release/ray_release/log_aggregator.py new file mode 100644 index 000000000000..4617b63bfb8b --- /dev/null +++ b/release/ray_release/log_aggregator.py @@ -0,0 +1,100 @@ +import re +from typing import List + +TRACEBACK_PATTERN = "Traceback (most recent call last)" + + +class LogAggregator: + def __init__(self, log: str): + self.log = log + + def compute_crash_pattern(self) -> str: + stack_trace = LogAggregator._compute_stack_trace(self.log.splitlines()) + # truncate short enough to store in databases, but long enough to keep the + # pattern unique + return LogAggregator._compute_signature(stack_trace)[:4000] + + @staticmethod + def _compute_signature(stack_trace: List[str]) -> str: + """ + Compute signature pattern from stack trace, by remove factors such as date, + time, temp directory, line numbers, etc. This help to aggregate similar logs + into same bug patterns + """ + massaged_trace = [] + for line in stack_trace: + line = re.sub(r"\d", "", line.strip()) + if line == "Traceback (most recent call last):": + continue + file_line = re.search(r'File "(.*)", (.*)', line) + if file_line: + # append the file's base name and caller information; the result string + # is not something meaningful to human, we just need something that + # uniquely represent the stack trace + line = f'{file_line.group(1).split("/")[-1]}{file_line.group(2)}' + massaged_trace.append(line) + return "".join(massaged_trace) + + @staticmethod + def _compute_stack_trace(logs: List[str]) -> List[str]: + """ + Extract stack trace pattern from the logs. Stack trace pattern often matches + the following: + ERROR ... + Traceback (most recent call last): + File "...", line ..., in ... + ... + Exception: exception error + """ + error_stacktrace = [] + stacktrace = [] + i = 0 + while i < len(logs): + stack = [] + trace = error_stacktrace + # Search for lines that are either + # ... ERROR ... + # or + # ... ERROR ... + # Traceback (most recent call last): + if "ERROR" in logs[i]: + stack.append(logs[i]) + next = i + 1 + if i + 1 < len(logs) and TRACEBACK_PATTERN in logs[i + 1]: + stack.append(logs[i + 1]) + next = i + 2 + # Or if the line with ERROR does not exist, just search for the line with + # Traceback (most recent call last): + elif TRACEBACK_PATTERN in logs[i]: + stack.append(logs[i]) + trace = stacktrace + next = i + 1 + # Or else, skip this line and continue + else: + i = i + 1 + continue + # If the line that contains ERROR, Traceback, etc. is found, scan the logs + # until the line no longer has indentation. This is because stack trace + # is always indented, and stops when the line is no longer indented + while next < len(logs): + if logs[next].startswith((" ", "\t")): + stack.append(logs[next]) + next = next + 1 + else: + break + # Finished capturing the entire stack trace + if next < len(logs): + stack.append(logs[next]) + if stack: + trace.append(stack) + i = next + 1 + + # Favor stack trace that contains the ERROR keyword + if error_stacktrace: + return error_stacktrace[-1] + + # Otherwise any stack trace is fine + if stacktrace: + return stacktrace[-1] + + return [] diff --git a/release/ray_release/reporter/db.py b/release/ray_release/reporter/db.py index e9295140f921..ec816739a306 100644 --- a/release/ray_release/reporter/db.py +++ b/release/ray_release/reporter/db.py @@ -7,6 +7,7 @@ from ray_release.result import Result from ray_release.config import Test from ray_release.logger import logger +from ray_release.log_aggregator import LogAggregator class DBReporter(Reporter): @@ -40,6 +41,9 @@ def report_result(self, test: Test, result: Result): "return_code": result.return_code, "smoke_test": result.smoke_test, "extra_tags": result.extra_tags or {}, + "crash_pattern": LogAggregator( + result.last_logs or "" + ).compute_crash_pattern(), } logger.debug(f"Result json: {json.dumps(result_json)}") diff --git a/release/ray_release/tests/test_log_aggregator.py b/release/ray_release/tests/test_log_aggregator.py new file mode 100644 index 000000000000..6cc92e74f3b9 --- /dev/null +++ b/release/ray_release/tests/test_log_aggregator.py @@ -0,0 +1,68 @@ +from ray_release.log_aggregator import LogAggregator + + +def test_compute_stack_pattern(): + assert ( + LogAggregator( + "\n".join( + [ + "haha", + "Traceback (most recent call last):", + ' File "/tmp/something", line 584', + "Exception: yaya45", + "hehe", + ] + ) + ).compute_crash_pattern() + == "somethingline Exception: yaya" + ) + + +def test_compute_signature(): + assert ( + LogAggregator._compute_signature( + [ + "Traceback (most recent call last):", + ' File "/tmp/something", line 584', + "Exception: yaya45", + ] + ) + == "somethingline Exception: yaya" + ) + + +def test_compute_stack_trace(): + trace = [ + "Traceback (most recent call last):", + ' File "/tmp/something", line 584, in run_release_test', + " raise pipeline_exception", + "ray_release.exception.JobNoLogsError: Could not obtain logs for the job.", + ] + error_trace = [ + "[2023-01-01] ERROR: something is wrong", + "Traceback (most recent call last):", + ' File "/tmp/something", line 584, in run_release_test', + " raise pipeline_exception", + "ray_release.exception.JobStartupTimeout: Cluster did not start.", + ] + error_trace_short = [ + "[2023-01-01] ERROR: something is wrong" + ' File "/tmp/something", line 584, in run_release_test', + " raise pipeline_exception", + "ray_release.exception.JobStartupTimeout: Cluster did not start.", + ] + assert LogAggregator._compute_stack_trace(["haha"] + trace + ["hehe"]) == trace + assert ( + LogAggregator._compute_stack_trace(["haha"] + error_trace + ["hehe"]) + == error_trace + ) + assert ( + LogAggregator._compute_stack_trace(["haha"] + error_trace_short + ["hehe"]) + == error_trace_short + ) + assert ( + LogAggregator._compute_stack_trace( + ["haha"] + trace + ["w00t"] + error_trace + ["hehe"] + ) + == error_trace + ) From 970fd8112ef923a7e00a906327914efa5959bca5 Mon Sep 17 00:00:00 2001 From: Edward Oakes Date: Fri, 21 Apr 2023 13:28:41 -0500 Subject: [PATCH 055/424] [serve] Add support for application builders & arguments (#34584) First cut at an implementation for https://github.com/ray-project/ray/issues/34542. There should be no changes in behavior for existing applications. Will update documentation & examples in a separate PR, would like to get it merged to get feedback from others on the API. --- python/ray/serve/_private/api.py | 60 +++++++- python/ray/serve/controller.py | 6 +- python/ray/serve/schema.py | 6 +- python/ray/serve/scripts.py | 53 +++++-- python/ray/serve/tests/test_api.py | 139 +++++++++++++++++- python/ray/serve/tests/test_cli.py | 116 ++++++++++++++- .../test_config_files/apps_with_args.yaml | 17 +++ .../tests/test_config_files/arg_builders.py | 25 ++++ 8 files changed, 403 insertions(+), 19 deletions(-) create mode 100644 python/ray/serve/tests/test_config_files/apps_with_args.yaml create mode 100644 python/ray/serve/tests/test_config_files/arg_builders.py diff --git a/python/ray/serve/_private/api.py b/python/ray/serve/_private/api.py index 29e4d69a22ef..f71af2afee77 100644 --- a/python/ray/serve/_private/api.py +++ b/python/ray/serve/_private/api.py @@ -1,10 +1,15 @@ -from typing import Dict, Optional, Tuple, Union +import inspect import logging import os +from types import FunctionType +from typing import Any, Dict, Optional, Tuple, Union + +from pydantic.main import ModelMetaclass import ray from ray._private.usage import usage_lib from ray.serve.deployment import Deployment +from ray.serve.deployment_graph import ClassNode, FunctionNode from ray.serve.exceptions import RayServeException from ray.serve.config import HTTPOptions from ray.serve._private.constants import ( @@ -329,3 +334,56 @@ def serve_start( f'namespace "{SERVE_NAMESPACE}".' ) return client + + +def call_app_builder_with_args_if_necessary( + builder: Union[ClassNode, FunctionNode, FunctionType], + args: Dict[str, Any], +) -> Union[ClassNode, FunctionNode]: + """Builds a Serve application from an application builder function. + + If a pre-built application (ClassNode or FunctionNode) is passed, this is a no-op. + + Else, we validate the signature of the builder, convert the args dictionary to + the user-annotated Pydantic model if provided, and call the builder function. + + The output of the function is returned (must be a ClassNode or FunctionNode). + """ + if isinstance(builder, (ClassNode, FunctionNode)): + if len(args) > 0: + raise ValueError( + "Arguments can only be passed to an application builder function, " + "not an already built application." + ) + return builder + elif not isinstance(builder, FunctionType): + raise TypeError( + "Expected a built Serve application or an application builder function " + f"but got: {type(builder)}." + ) + + # Check that the builder only takes a single argument. + # TODO(edoakes): we may want to loosen this to allow optional kwargs in the future. + signature = inspect.signature(builder) + if len(signature.parameters) != 1: + raise TypeError( + "Application builder functions should take exactly one parameter, " + "a dictionary containing the passed arguments." + ) + + # If the sole argument to the builder is a pydantic model, convert the args dict to + # that model. This will perform standard pydantic validation (e.g., raise an + # exception if required fields are missing). + param = signature.parameters[list(signature.parameters.keys())[0]] + if issubclass(type(param.annotation), ModelMetaclass): + args = param.annotation.parse_obj(args) + + app = builder(args) + if not isinstance(app, (ClassNode, FunctionNode)): + raise TypeError( + "Application builder functions must return a `ClassNode` or " + "`FunctionNode` returned from `Deployment.bind()`, " + f"but got: {type(app)}." + ) + + return app diff --git a/python/ray/serve/controller.py b/python/ray/serve/controller.py index be60f3e2190b..9f0499ce41b8 100644 --- a/python/ray/serve/controller.py +++ b/python/ray/serve/controller.py @@ -558,6 +558,7 @@ def deploy_apps( updated_versions, app_config_dict.get("route_prefix", DEFAULT.VALUE), app_config.name, + app_config.args, ) self.application_state_manager.create_application_state( @@ -881,6 +882,7 @@ def deploy_serve_application( deployment_versions: Dict, route_prefix: str, name: str, + args: Dict, ): """Deploy Serve application from a user-provided config. @@ -899,9 +901,11 @@ def deploy_serve_application( try: from ray import serve from ray.serve.api import build + from ray.serve._private.api import call_app_builder_with_args_if_necessary # Import and build the application. - app = build(import_attr(import_path), name) + app = call_app_builder_with_args_if_necessary(import_attr(import_path), args) + app = build(app, name) # Override options for each deployment. for options in deployment_override_options: diff --git a/python/ray/serve/schema.py b/python/ray/serve/schema.py index 76ead035e00e..4f1c7e1a7c2e 100644 --- a/python/ray/serve/schema.py +++ b/python/ray/serve/schema.py @@ -357,7 +357,11 @@ class ServeApplicationSchema(BaseModel, extra=Extra.forbid): ) deployments: List[DeploymentSchema] = Field( default=[], - description=("Deployment options that override options specified in the code."), + description="Deployment options that override options specified in the code.", + ) + args: Dict = Field( + default={}, + description="Arguments that will be passed to the application builder.", ) @validator("runtime_env") diff --git a/python/ray/serve/scripts.py b/python/ray/serve/scripts.py index dddf4356040b..d97bf7a22112 100644 --- a/python/ray/serve/scripts.py +++ b/python/ray/serve/scripts.py @@ -3,7 +3,7 @@ import pathlib import sys import time -from typing import Optional, Union, Tuple +from typing import Dict, Optional, Union, Tuple import click import yaml @@ -106,6 +106,21 @@ def process_dict_for_yaml_dump(data): return data +def convert_args_to_dict(args: Tuple[str]) -> Dict[str, str]: + args_dict = dict() + for arg in args: + split = arg.split("=") + if len(split) != 2: + raise click.ClickException( + f"Invalid application argument '{arg}', " + "must be of the form '='." + ) + + args_dict[split[0]] = split[1] + + return args_dict + + @click.group(help="CLI for managing Serve instances on a Ray cluster.") def cli(): pass @@ -214,16 +229,20 @@ def deploy(config_file_name: str, address: str): @cli.command( short_help="Run Serve application(s).", help=( - "Runs the Serve application from the specified import path (e.g. my_script:" - "my_bound_deployment) or application(s) from a YAML config.\n\n" - "If using a YAML config, existing deployments with no code changes in an " - "application will not be redeployed.\n\n" - "Any import path must lead to a FunctionNode or ClassNode object. " - "By default, this will block and periodically log status. If you " - "Ctrl-C the command, it will tear down the app." + "Runs an application from the specified import path (e.g., my_script:" + "app) or application(s) from a YAML config.\n\n" + "If passing an import path, it must point to a bound Serve application or " + "a function that returns one. If a function is used, arguments can be " + "passed to it in 'key=val' format after the import path, for example:\n\n" + "serve run my_script:app model_path='/path/to/model.pkl' num_replicas=5\n\n" + "If passing a YAML config, existing applications with no code changes will not " + "be updated.\n\n" + "By default, this will block and stream logs to the console. If you " + "Ctrl-C the command, it will shut down Serve on the cluster." ), ) @click.argument("config_or_import_path") +@click.argument("arguments", nargs=-1, required=False) @click.option( "--runtime-env", type=str, @@ -301,6 +320,7 @@ def deploy(config_file_name: str, address: str): ) def run( config_or_import_path: str, + arguments: Tuple[str], runtime_env: str, runtime_env_json: str, working_dir: str, @@ -312,7 +332,7 @@ def run( gradio: bool, ): sys.path.insert(0, app_dir) - + args_dict = convert_args_to_dict(arguments) final_runtime_env = parse_runtime_env_args( runtime_env=runtime_env, runtime_env_json=runtime_env_json, @@ -320,9 +340,14 @@ def run( ) if pathlib.Path(config_or_import_path).is_file(): + if len(args_dict) > 0: + cli_logger.warning( + "Application arguments are ignored when running a config file." + ) + is_config = True config_path = config_or_import_path - cli_logger.print(f'Deploying from config file: "{config_path}".') + cli_logger.print(f"Running config file: '{config_path}'.") with open(config_path, "r") as config_file: config_dict = yaml.safe_load(config_file) @@ -375,8 +400,10 @@ def run( if port is None: port = DEFAULT_HTTP_PORT import_path = config_or_import_path - cli_logger.print(f'Deploying from import path: "{import_path}".') - node = import_attr(import_path) + cli_logger.print(f"Running import path: '{import_path}'.") + app = _private_api.call_app_builder_with_args_if_necessary( + import_attr(import_path), args_dict + ) # Setting the runtime_env here will set defaults for the deployments. ray.init(address=address, namespace=SERVE_NAMESPACE, runtime_env=final_runtime_env) @@ -392,7 +419,7 @@ def run( if gradio: handle = serve.get_deployment("DAGDriver").get_handle() else: - handle = serve.run(node, host=host, port=port) + handle = serve.run(app, host=host, port=port) cli_logger.success("Deployed Serve app successfully.") if gradio: diff --git a/python/ray/serve/tests/test_api.py b/python/ray/serve/tests/test_api.py index e070e2c901a0..a205512018b8 100644 --- a/python/ray/serve/tests/test_api.py +++ b/python/ray/serve/tests/test_api.py @@ -1,18 +1,21 @@ import asyncio import os -from ray.serve.deployment_graph import RayServeDAGHandle +from typing import Optional +from fastapi import FastAPI import requests +from pydantic import BaseModel, ValidationError import pytest import starlette.responses -from fastapi import FastAPI import ray from ray import serve from ray._private.test_utils import SignalActor, wait_for_condition from ray.serve.application import Application +from ray.serve.deployment_graph import ClassNode, FunctionNode, RayServeDAGHandle from ray.serve.drivers import DAGDriver from ray.serve.exceptions import RayServeException +from ray.serve._private.api import call_app_builder_with_args_if_necessary @serve.deployment() @@ -708,6 +711,138 @@ def f(): f.options(autoscaling_config={"min_replicas": "1"}) +class TestAppBuilder: + @serve.deployment + class A: + pass + + @serve.deployment + def f(): + pass + + class TypedArgs(BaseModel): + message: str + num_replicas: Optional[int] + + def test_prebuilt_app(self): + a = self.A.bind() + assert call_app_builder_with_args_if_necessary(a, {}) == a + + f = self.f.bind() + assert call_app_builder_with_args_if_necessary(f, {}) == f + + with pytest.raises( + ValueError, + match="Arguments can only be passed to an application builder function", + ): + call_app_builder_with_args_if_necessary(f, {"key": "val"}) + + def test_invalid_builder(self): + class ThisShouldBeAFunction: + pass + + with pytest.raises( + TypeError, + match=( + "Expected a built Serve application " + "or an application builder function" + ), + ): + call_app_builder_with_args_if_necessary(ThisShouldBeAFunction, {}) + + def test_invalid_signature(self): + def builder_with_two_args(args1, args2): + return self.f.bind() + + with pytest.raises( + TypeError, + match="Application builder functions should take exactly one parameter", + ): + call_app_builder_with_args_if_necessary(builder_with_two_args, {}) + + def test_builder_returns_bad_type(self): + def return_none(args): + self.f.bind() + + with pytest.raises( + TypeError, + match="Application builder functions must return a", + ): + call_app_builder_with_args_if_necessary(return_none, {}) + + def return_unbound_deployment(args): + return self.f + + with pytest.raises( + TypeError, + match="Application builder functions must return a", + ): + call_app_builder_with_args_if_necessary(return_unbound_deployment, {}) + + def test_basic_no_args(self): + def build_function(args): + return self.A.bind() + + assert isinstance( + call_app_builder_with_args_if_necessary(build_function, {}), ClassNode + ) + + def build_class(args): + return self.f.bind() + + assert isinstance( + call_app_builder_with_args_if_necessary(build_class, {}), FunctionNode + ) + + def test_args_dict(self): + args_dict = {"message": "hiya", "num_replicas": "3"} + + def build(args): + assert len(args) == 2 + assert args["message"] == "hiya" + assert args["num_replicas"] == "3" + return self.A.options(num_replicas=int(args["num_replicas"])).bind( + args["message"] + ) + + app = call_app_builder_with_args_if_necessary(build, args_dict) + assert isinstance(app, ClassNode) + + def test_args_typed(self): + args_dict = {"message": "hiya", "num_replicas": "3"} + + def build(args: self.TypedArgs): + assert isinstance(args, self.TypedArgs) + assert args.message == "hiya" + assert args.num_replicas == 3 + return self.A.options(num_replicas=args.num_replicas).bind(args.message) + + app = call_app_builder_with_args_if_necessary(build, args_dict) + assert isinstance(app, ClassNode) + + # Sanity check that pydantic validation works. + + # 1) Check that validation permits a missing optional field. + def check_missing_optional(args: self.TypedArgs): + assert args.message == "hiya" + assert args.num_replicas is None + return self.A.bind() + + app = call_app_builder_with_args_if_necessary( + check_missing_optional, {"message": "hiya"} + ) + assert isinstance(app, ClassNode) + + # 2) Check that validation rejects a missing required field. + def check_missing_required(args: self.TypedArgs): + assert False, "Shouldn't get here because validation failed." + + with pytest.raises(ValidationError, match="field required"): + call_app_builder_with_args_if_necessary( + check_missing_required, {"num_replicas": "10"} + ) + + if __name__ == "__main__": import sys diff --git a/python/ray/serve/tests/test_cli.py b/python/ray/serve/tests/test_cli.py index add6e13ef907..5ec96d7d9d2b 100644 --- a/python/ray/serve/tests/test_cli.py +++ b/python/ray/serve/tests/test_cli.py @@ -7,6 +7,8 @@ from tempfile import NamedTemporaryFile from typing import List +import click +from pydantic import BaseModel import pytest import requests import yaml @@ -20,7 +22,7 @@ from ray.serve.deployment_graph import RayServeDAGHandle from ray.tests.conftest import tmp_working_dir # noqa: F401, E501 from ray.dashboard.modules.serve.sdk import ServeSubmissionClient -from ray.serve.scripts import remove_ansi_escape_sequences +from ray.serve.scripts import convert_args_to_dict, remove_ansi_escape_sequences CONNECTION_ERROR_MSG = "connection error" @@ -47,6 +49,20 @@ def assert_deployments_live(names: List[str]): assert all_deployments_live, f'"{nonliving_deployment}" deployment is not live.' +def test_convert_args_to_dict(): + assert convert_args_to_dict(tuple()) == {} + + with pytest.raises( + click.ClickException, match="Invalid application argument 'bad_arg'" + ): + convert_args_to_dict(("bad_arg",)) + + assert convert_args_to_dict(("key1=val1", "key2=val2")) == { + "key1": "val1", + "key2": "val2", + } + + def test_start_shutdown(ray_start_stop): subprocess.check_output(["serve", "start"]) subprocess.check_output(["serve", "shutdown", "-y"]) @@ -355,6 +371,38 @@ def test_deploy_single_with_name(ray_start_stop): assert MULTI_APP_MIGRATION_MESSAGE in e.value.output.decode("utf-8") +@pytest.mark.skipif(sys.platform == "win32", reason="File path incorrect on Windows.") +def test_deploy_multi_app_builder_with_args(ray_start_stop): + """Deploys a config file containing multiple applications that take arguments.""" + # Create absolute file names to YAML config file. + apps_with_args = os.path.join( + os.path.dirname(__file__), "test_config_files", "apps_with_args.yaml" + ) + + subprocess.check_output(["serve", "deploy", apps_with_args]) + + wait_for_condition( + lambda: requests.post("http://localhost:8000/untyped_default").text + == "DEFAULT", + timeout=10, + ) + + wait_for_condition( + lambda: requests.post("http://localhost:8000/untyped_hello").text == "hello", + timeout=10, + ) + + wait_for_condition( + lambda: requests.post("http://localhost:8000/typed_default").text == "DEFAULT", + timeout=10, + ) + + wait_for_condition( + lambda: requests.post("http://localhost:8000/typed_hello").text == "hello", + timeout=10, + ) + + @pytest.mark.skipif(sys.platform == "win32", reason="File path incorrect on Windows.") def test_config(ray_start_stop): """Deploys config and checks that `serve config` returns correct response.""" @@ -753,6 +801,72 @@ def test_run_deployment_node(ray_start_stop): assert ping_endpoint("Macaw") == CONNECTION_ERROR_MSG +@serve.deployment +class Echo: + def __init__(self, message: str): + print("Echo message:", message) + self._message = message + + def __call__(self, *args): + return self._message + + +def build_echo_app(args): + return Echo.bind(args.get("message", "DEFAULT")) + + +class TypedArgs(BaseModel): + message: str = "DEFAULT" + + +def build_echo_app_typed(args: TypedArgs): + return Echo.bind(args.message) + + +@pytest.mark.skipif(sys.platform == "win32", reason="File path incorrect on Windows.") +@pytest.mark.parametrize( + "import_path", + [ + "ray.serve.tests.test_cli.build_echo_app", + "ray.serve.tests.test_cli.build_echo_app_typed", + ], +) +def test_run_builder_with_args(ray_start_stop, import_path: str): + """Test `serve run` with args passed into a builder function. + + Tests both the untyped and typed args cases. + """ + # First deploy without any arguments, should get default response. + p = subprocess.Popen( + [ + "serve", + "run", + "--address=auto", + import_path, + ] + ) + wait_for_condition(lambda: ping_endpoint("") == "DEFAULT", timeout=10) + p.send_signal(signal.SIGINT) + p.wait() + assert ping_endpoint("") == CONNECTION_ERROR_MSG + + # Now deploy passing a message as an argument, should get passed message. + p = subprocess.Popen( + [ + "serve", + "run", + "--address=auto", + import_path, + "message=hello world", + ] + ) + wait_for_condition(lambda: ping_endpoint("") == "hello world", timeout=10) + + p.send_signal(signal.SIGINT) + p.wait() + assert ping_endpoint("") == CONNECTION_ERROR_MSG + + @serve.deployment class MetalDetector: def __call__(self, *args): diff --git a/python/ray/serve/tests/test_config_files/apps_with_args.yaml b/python/ray/serve/tests/test_config_files/apps_with_args.yaml new file mode 100644 index 000000000000..f2b95e6e071a --- /dev/null +++ b/python/ray/serve/tests/test_config_files/apps_with_args.yaml @@ -0,0 +1,17 @@ +applications: + - name: untyped_default + route_prefix: /untyped_default + import_path: ray.serve.tests.test_config_files.arg_builders.build_echo_app + - name: untyped_hello + route_prefix: /untyped_hello + import_path: ray.serve.tests.test_config_files.arg_builders.build_echo_app + args: + message: hello + - name: typed_default + route_prefix: /typed_default + import_path: ray.serve.tests.test_config_files.arg_builders.build_echo_app_typed + - name: typed_hello + route_prefix: /typed_hello + import_path: ray.serve.tests.test_config_files.arg_builders.build_echo_app_typed + args: + message: hello diff --git a/python/ray/serve/tests/test_config_files/arg_builders.py b/python/ray/serve/tests/test_config_files/arg_builders.py new file mode 100644 index 000000000000..78bb5cf2b424 --- /dev/null +++ b/python/ray/serve/tests/test_config_files/arg_builders.py @@ -0,0 +1,25 @@ +from pydantic import BaseModel + +from ray import serve + + +class TypedArgs(BaseModel): + message: str = "DEFAULT" + + +@serve.deployment(ray_actor_options={"num_cpus": 0}) +class Echo: + def __init__(self, message: str): + print("Echo message:", message) + self._message = message + + def __call__(self, *args): + return self._message + + +def build_echo_app(args): + return Echo.bind(args.get("message", "DEFAULT")) + + +def build_echo_app_typed(args: TypedArgs): + return Echo.bind(args.message) From 2e97806d67de7b4cf073ed1fa42ebc98d7446b22 Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Fri, 21 Apr 2023 20:46:32 +0200 Subject: [PATCH 056/424] [docs] add click events for code blocks (#34623) --- doc/source/_static/js/custom.js | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/doc/source/_static/js/custom.js b/doc/source/_static/js/custom.js index 53369e1c855d..727459e9fea8 100644 --- a/doc/source/_static/js/custom.js +++ b/doc/source/_static/js/custom.js @@ -102,3 +102,22 @@ window.onload = function() { localStorage.removeItem("scroll"); } }; + +// When the document is fully loaded +document.addEventListener("DOMContentLoaded", function() { + // find all the code blocks' copy buttons + let codeButtons = document.querySelectorAll(".copybtn"); + for (let i = 0; i < codeButtons.length; i++) { + const button = codeButtons[i]; + // and add a click event listener to each one for Google Analytics. + button.addEventListener("click", function() { + gtag("event", "code_copy_click", { + "send_to": "UA-110413294-1", + "event_category": "ray_docs_copy_code", + "event_label": "URL: " + document.URL + + " Button: " + button.getAttribute("data-clipboard-target"), + "value": 1, + }); + }); + } +}); From 526151b16450a7c816ad0673888d7e17d6072745 Mon Sep 17 00:00:00 2001 From: Scott Lee Date: Fri, 21 Apr 2023 11:50:01 -0700 Subject: [PATCH 057/424] [Datasets] Support non-shuffle repartitioning in `Repartition` `LogicalOperator` (#34547) This is a followup for https://github.com/ray-project/ray/pull/32102, to support non-shuffle repartition in logical operator, as _internal/fast_repartition.py. Signed-off-by: Scott Lee --- .../split_repartition_task_scheduler.py | 136 ++++++++++++++++++ .../ray/data/_internal/planner/repartition.py | 22 ++- .../ray/data/tests/preprocessors/__init__.py | 0 .../data/tests/test_execution_optimizer.py | 76 +++++++--- 4 files changed, 211 insertions(+), 23 deletions(-) create mode 100644 python/ray/data/_internal/planner/exchange/split_repartition_task_scheduler.py create mode 100644 python/ray/data/tests/preprocessors/__init__.py diff --git a/python/ray/data/_internal/planner/exchange/split_repartition_task_scheduler.py b/python/ray/data/_internal/planner/exchange/split_repartition_task_scheduler.py new file mode 100644 index 000000000000..f0a2075e740d --- /dev/null +++ b/python/ray/data/_internal/planner/exchange/split_repartition_task_scheduler.py @@ -0,0 +1,136 @@ +from typing import Any, Dict, List, Optional, Tuple + +import ray +from ray.data._internal.execution.interfaces import RefBundle +from ray.data._internal.planner.exchange.interfaces import ExchangeTaskScheduler +from ray.data._internal.progress_bar import ProgressBar +from ray.data._internal.remote_fn import cached_remote_fn +from ray.data._internal.split import _split_at_indices +from ray.data._internal.stats import StatsDict +from ray.data.block import Block, BlockAccessor, BlockMetadata +from ray.types import ObjectRef + + +class SplitRepartitionTaskScheduler(ExchangeTaskScheduler): + """ + The split (non-shuffle) repartition scheduler. + + First, we calculate global splits needed to produce `output_num_blocks` blocks. + After the split blocks are generated accordingly, reduce tasks are scheduled + to combine split blocks together. + """ + + def execute( + self, + refs: List[RefBundle], + output_num_blocks: int, + map_ray_remote_args: Optional[Dict[str, Any]] = None, + reduce_ray_remote_args: Optional[Dict[str, Any]] = None, + ) -> Tuple[List[RefBundle], StatsDict]: + input_num_rows = 0 + input_owned_by_consumer = True + for ref_bundle in refs: + block_num_rows = ref_bundle.num_rows() + if block_num_rows is None: + raise ValueError( + "Cannot split partition on blocks with unknown number of rows." + ) + input_num_rows += block_num_rows + if not ref_bundle.owns_blocks: + input_owned_by_consumer = False + + # Compute the (output_num_blocks-1) indices needed for + # an equal split of the input blocks. + indices = [] + cur_idx = 0 + for _ in range(output_num_blocks - 1): + cur_idx += input_num_rows / output_num_blocks + indices.append(int(cur_idx)) + assert len(indices) < output_num_blocks, (indices, output_num_blocks) + + if map_ray_remote_args is None: + map_ray_remote_args = {} + if reduce_ray_remote_args is None: + reduce_ray_remote_args = {} + if "scheduling_strategy" not in reduce_ray_remote_args: + reduce_ray_remote_args = reduce_ray_remote_args.copy() + reduce_ray_remote_args["scheduling_strategy"] = "SPREAD" + + blocks_with_metadata: List[Tuple[ObjectRef[Block], BlockMetadata]] = [] + for ref_bundle in refs: + blocks_with_metadata.extend(ref_bundle.blocks) + if indices: + split_return = _split_at_indices( + blocks_with_metadata, indices, input_owned_by_consumer + ) + split_block_refs, split_metadata = [], [] + for b, m in zip(*split_return): + split_block_refs.append(b) + split_metadata.extend(m) + else: + split_block_refs, split_metadata = [], [] + for b, m in blocks_with_metadata: + split_block_refs.append([b]) + split_metadata.append(m) + + reduce_bar = ProgressBar("Split Repartition", total=output_num_blocks) + reduce_task = cached_remote_fn(self._exchange_spec.reduce) + reduce_return = [ + reduce_task.options(**reduce_ray_remote_args, num_returns=2).remote( + *self._exchange_spec._reduce_args, + *split_block_refs[j], + ) + for j in range(output_num_blocks) + # Only process splits which contain blocks. + if len(split_block_refs[j]) > 0 + ] + + reduce_block_refs, reduce_metadata = zip(*reduce_return) + reduce_metadata = reduce_bar.fetch_until_complete(list(reduce_metadata)) + reduce_block_refs, reduce_metadata = list(reduce_block_refs), list( + reduce_metadata + ) + reduce_bar.close() + + # Handle empty blocks. + if len(reduce_block_refs) < output_num_blocks: + from ray.data._internal.arrow_block import ArrowBlockBuilder + from ray.data._internal.pandas_block import PandasBlockBuilder + from ray.data._internal.simple_block import SimpleBlockBuilder + + import pyarrow as pa + from ray.data._internal.pandas_block import PandasBlockSchema + + num_empty_blocks = output_num_blocks - len(reduce_block_refs) + first_block_schema = reduce_metadata[0].schema + if first_block_schema is None: + raise ValueError( + "Cannot split partition on blocks with unknown block format." + ) + elif isinstance(first_block_schema, type): + builder = SimpleBlockBuilder() + elif isinstance(first_block_schema, pa.Schema): + builder = ArrowBlockBuilder() + elif isinstance(first_block_schema, PandasBlockSchema): + builder = PandasBlockBuilder() + empty_block = builder.build() + empty_meta = BlockAccessor.for_block(empty_block).get_metadata( + input_files=None, exec_stats=None + ) # No stats for empty block. + empty_block_refs, empty_metadata = zip( + *[(ray.put(empty_block), empty_meta) for _ in range(num_empty_blocks)] + ) + reduce_block_refs.extend(empty_block_refs) + reduce_metadata.extend(empty_metadata) + + output = [] + for block, meta in zip(reduce_block_refs, reduce_metadata): + output.append( + RefBundle([(block, meta)], owns_blocks=input_owned_by_consumer) + ) + stats = { + "split": split_metadata, + "reduce": reduce_metadata, + } + + return (output, stats) diff --git a/python/ray/data/_internal/planner/repartition.py b/python/ray/data/_internal/planner/repartition.py index 8f79b8bddc5f..544b0f83b06c 100644 --- a/python/ray/data/_internal/planner/repartition.py +++ b/python/ray/data/_internal/planner/repartition.py @@ -5,6 +5,10 @@ RefBundle, TaskContext, ) + +from ray.data._internal.planner.exchange.split_repartition_task_scheduler import ( + SplitRepartitionTaskScheduler, +) from ray.data._internal.planner.exchange.push_based_shuffle_task_scheduler import ( PushBasedShuffleTaskScheduler, ) @@ -20,11 +24,9 @@ def generate_repartition_fn( num_outputs: int, shuffle: bool, ) -> AllToAllTransformFn: - """Generate function to randomly shuffle each records of blocks.""" - # TODO: support non-shuffle repartition as _internal/fast_repartition.py. - assert shuffle, "Execution optimizer does not support non-shuffle repartition yet." + """Generate function to partition each records of blocks.""" - def fn( + def shuffle_repartition_fn( refs: List[RefBundle], ctx: TaskContext, ) -> Tuple[List[RefBundle], StatsDict]: @@ -37,4 +39,14 @@ def fn( return scheduler.execute(refs, num_outputs) - return fn + def split_repartition_fn( + refs: List[RefBundle], + ctx: TaskContext, + ) -> Tuple[List[RefBundle], StatsDict]: + shuffle_spec = ShuffleTaskSpec(random_shuffle=False) + scheduler = SplitRepartitionTaskScheduler(shuffle_spec) + return scheduler.execute(refs, num_outputs) + + if shuffle: + return shuffle_repartition_fn + return split_repartition_fn diff --git a/python/ray/data/tests/preprocessors/__init__.py b/python/ray/data/tests/preprocessors/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/python/ray/data/tests/test_execution_optimizer.py b/python/ray/data/tests/test_execution_optimizer.py index c0c2d83ab28f..068a7849d953 100644 --- a/python/ray/data/tests/test_execution_optimizer.py +++ b/python/ray/data/tests/test_execution_optimizer.py @@ -49,6 +49,7 @@ _op_name_white_list, ) from ray.data._internal.planner.planner import Planner +from ray.data._internal.stats import DatastreamStats from ray.data.aggregate import Count from ray.data.datasource.parquet_datasource import ParquetDatasource @@ -259,10 +260,14 @@ def test_random_shuffle_e2e( _check_usage_record(["ReadRange", "RandomShuffle"]) -def test_repartition_operator(ray_start_regular_shared, enable_optimizer): +@pytest.mark.parametrize( + "shuffle", + [True, False], +) +def test_repartition_operator(ray_start_regular_shared, enable_optimizer, shuffle): planner = Planner() read_op = Read(ParquetDatasource()) - op = Repartition(read_op, num_outputs=5, shuffle=True) + op = Repartition(read_op, num_outputs=5, shuffle=shuffle) plan = LogicalPlan(op) physical_op = planner.plan(plan).dag @@ -271,25 +276,60 @@ def test_repartition_operator(ray_start_regular_shared, enable_optimizer): assert len(physical_op.input_dependencies) == 1 assert isinstance(physical_op.input_dependencies[0], MapOperator) - # Check error is thrown for non-shuffle repartition. - op = Repartition(read_op, num_outputs=5, shuffle=False) - plan = LogicalPlan(op) - with pytest.raises(AssertionError): - planner.plan(plan) - +@pytest.mark.parametrize( + "shuffle", + [True, False], +) def test_repartition_e2e( - ray_start_regular_shared, enable_optimizer, use_push_based_shuffle + ray_start_regular_shared, enable_optimizer, use_push_based_shuffle, shuffle ): - ds = ray.data.range(10000, parallelism=10) - ds1 = ds.repartition(20, shuffle=True) - assert ds1._block_num_rows() == [500] * 20, ds - - # Check error is thrown for non-shuffle repartition. - with pytest.raises(AssertionError): - ds.repartition(20, shuffle=False).take_all() - - _check_usage_record(["ReadRange", "Repartition"]) + def _check_repartition_usage_and_stats(ds): + _check_usage_record(["ReadRange", "Repartition"]) + + blocks = ray.get(ds.get_internal_block_refs()) + assert all(isinstance(block, list) for block in blocks), blocks + + ds_stats: DatastreamStats = ds._plan.stats() + assert ds_stats.base_name == "Repartition" + if shuffle: + assert "RepartitionMap" in ds_stats.stages + else: + assert "RepartitionSplit" in ds_stats.stages + assert "RepartitionReduce" in ds_stats.stages + + ds = ray.data.range(10000, parallelism=10).repartition(20, shuffle=shuffle) + assert ds.num_blocks() == 20, ds.num_blocks() + assert ds.sum() == sum(range(10000)) + assert ds._block_num_rows() == [500] * 20, ds._block_num_rows() + _check_repartition_usage_and_stats(ds) + + # Test num_output_blocks > num_rows to trigger empty block handling. + ds = ray.data.range(20, parallelism=10).repartition(40, shuffle=shuffle) + assert ds.num_blocks() == 40, ds.num_blocks() + assert ds.sum() == sum(range(20)) + if shuffle: + assert ds._block_num_rows() == [10] * 2 + [0] * (40 - 2), ds._block_num_rows() + else: + assert ds._block_num_rows() == [1] * 20 + [0] * 20, ds._block_num_rows() + _check_repartition_usage_and_stats(ds) + + # Test case where number of rows does not divide equally into num_output_blocks. + ds = ray.data.range(22).repartition(4, shuffle=shuffle) + assert ds.num_blocks() == 4, ds.num_blocks() + assert ds.sum() == sum(range(22)) + if shuffle: + assert ds._block_num_rows() == [6, 6, 6, 4], ds._block_num_rows() + else: + assert ds._block_num_rows() == [5, 6, 5, 6], ds._block_num_rows() + _check_repartition_usage_and_stats(ds) + + # Test case where we do not split on repartitioning. + ds = ray.data.range(10, parallelism=1).repartition(1, shuffle=shuffle) + assert ds.num_blocks() == 1, ds.num_blocks() + assert ds.sum() == sum(range(10)) + assert ds._block_num_rows() == [10], ds._block_num_rows() + _check_repartition_usage_and_stats(ds) def test_read_map_batches_operator_fusion(ray_start_regular_shared, enable_optimizer): From 39e7f352e45fd80fda1c129c0ff3a8038ba5b16c Mon Sep 17 00:00:00 2001 From: Jiajun Yao Date: Fri, 21 Apr 2023 12:40:41 -0700 Subject: [PATCH 058/424] [docs] Fix broken links (#34665) --- doc/source/data/examples/batch_training.ipynb | 2 +- .../huggingface_text_classification.ipynb | 2 +- .../examples/tfx_tabular_train_to_serve.ipynb | 2 +- .../examples/torch_incremental_learning.ipynb | 4 ++-- doc/source/rllib/rllib-offline.rst | 2 +- doc/source/serve/tutorials/serve-ml-models.md | 16 ++++++++-------- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/doc/source/data/examples/batch_training.ipynb b/doc/source/data/examples/batch_training.ipynb index 67c34134bac7..2f9742e6c26c 100644 --- a/doc/source/data/examples/batch_training.ipynb +++ b/doc/source/data/examples/batch_training.ipynb @@ -15,7 +15,7 @@ "source": [ "**Batch training** and tuning are common tasks in simple machine learning use-cases such as time series forecasting. They require fitting of simple models on data batches corresponding to different locations, products, etc. Batch training can take less time to process all the data at once, but only if those batches can run in parallel!\n", "\n", - "This notebook showcases how to conduct batch training regression algorithms from [XGBoost](https://docs.ray.io/en/latest/tune/examples/tune-xgboost.html) and [Scikit-learn](https://docs.ray.io/en/latest/ray-more-libs/joblib.html) with **[Ray Data](https://docs.ray.io/en/latest/data/data.html)**. **XGBoost** is a popular open-source library used for regression and classification. **Scikit-learn** is a popular open-source library with a vast assortment of well-known ML algorithms.\n", + "This notebook showcases how to conduct batch training regression algorithms from [XGBoost](https://docs.ray.io/en/latest/tune/examples/tune-xgboost.html) and [Scikit-learn](https://docs.ray.io/en/latest/ray-more-libs/joblib.html) with **[Ray Data](data)**. **XGBoost** is a popular open-source library used for regression and classification. **Scikit-learn** is a popular open-source library with a vast assortment of well-known ML algorithms.\n", "\n", "```{tip}\n", "The workload showcased in this notebook can be expressed using different Ray components, such as Ray Data, Ray Tune and Ray Core.\n", diff --git a/doc/source/ray-air/examples/huggingface_text_classification.ipynb b/doc/source/ray-air/examples/huggingface_text_classification.ipynb index 6031e7683c0a..ad52466f0d5d 100644 --- a/doc/source/ray-air/examples/huggingface_text_classification.ipynb +++ b/doc/source/ray-air/examples/huggingface_text_classification.ipynb @@ -433,7 +433,7 @@ "id": "256fOuzjhYbY" }, "source": [ - "For Ray AIR, instead of using 🤗 Dataset objects directly, we will convert them to [Ray Data](https://docs.ray.io/en/latest/data/data.html). Both are backed by Arrow tables, so the conversion is straightforward. We will use the built-in `ray.data.from_huggingface` function." + "For Ray AIR, instead of using 🤗 Dataset objects directly, we will convert them to [Ray Data](data). Both are backed by Arrow tables, so the conversion is straightforward. We will use the built-in `ray.data.from_huggingface` function." ] }, { diff --git a/doc/source/ray-air/examples/tfx_tabular_train_to_serve.ipynb b/doc/source/ray-air/examples/tfx_tabular_train_to_serve.ipynb index 1e7dab24f552..9acf35498ab8 100644 --- a/doc/source/ray-air/examples/tfx_tabular_train_to_serve.ipynb +++ b/doc/source/ray-air/examples/tfx_tabular_train_to_serve.ipynb @@ -14,7 +14,7 @@ "In this example, we showcase how to achieve the same tasks as the Keras Tutorial using [Ray AIR](https://docs.ray.io/en/latest/ray-air/getting-started.html), covering\n", "every step from data ingestion to pushing a model to serving.\n", "\n", - "1. Read a CSV into [Datastream](https://docs.ray.io/en/latest/data/data.html).\n", + "1. Read a CSV into [Datastream](datastream_concept).\n", "2. Process the dataset by chaining [Ray AIR preprocessors](https://docs.ray.io/en/latest/ray-air/getting-started.html#preprocessors).\n", "3. Train the model using the TensorflowTrainer from AIR.\n", "4. Serve the model using Ray Serve and the above preprocessors." diff --git a/doc/source/ray-air/examples/torch_incremental_learning.ipynb b/doc/source/ray-air/examples/torch_incremental_learning.ipynb index 8589bd9f37ec..40c38c2ef7c8 100644 --- a/doc/source/ray-air/examples/torch_incremental_learning.ipynb +++ b/doc/source/ray-air/examples/torch_incremental_learning.ipynb @@ -244,7 +244,7 @@ "source": [ "## 3a: Load MNIST Dataset to a Datastream\n", "\n", - "Let's first define a simple function that will return the original MNIST Dataset as a distributed Datastream. Ray Data is the standard way to load and exchange data in Ray libraries and applications, read more about them [here](https://docs.ray.io/en/latest/data/data.html)!\n", + "Let's first define a simple function that will return the original MNIST Dataset as a distributed Datastream. Ray Data is the standard way to load and exchange data in Ray libraries and applications, read more about the library [here](data)!\n", "\n", "The function in the below code snippet does the following:\n", "1. Downloads the MNIST Dataset from torchvision in-memory\n", @@ -254,7 +254,7 @@ "\n", "For this example, since we are just working with MNIST dataset, which is small, we use the {py:class}`~ray.data.datasource.from_torch` which just loads the full MNIST dataset into memory.\n", "\n", - "For loading larger datasets in a parallel fashion, you should use [Datastream's additional read APIs](https://docs.ray.io/en/master/data/data.html#supported-input-formats) to load data from parquet, csv, image files, and more!" + "For loading larger datasets in a parallel fashion, you should use [Datastream's additional read APIs](input-output) to load data from parquet, csv, image files, and more!" ] }, { diff --git a/doc/source/rllib/rllib-offline.rst b/doc/source/rllib/rllib-offline.rst index aeee5aae8568..e15036780686 100644 --- a/doc/source/rllib/rllib-offline.rst +++ b/doc/source/rllib/rllib-offline.rst @@ -224,7 +224,7 @@ Ray Data Integration -------------------- RLlib has experimental support for reading/writing training samples from/to large offline datasets using -`Ray Data `__. +:ref:`Ray Data `. We support JSON and Parquet files today. Other file formats supported by Ray Data can also be easily added. Unlike JSON input, a single dataset can be automatically sharded and replayed by multiple rollout workers diff --git a/doc/source/serve/tutorials/serve-ml-models.md b/doc/source/serve/tutorials/serve-ml-models.md index f7cd000a7ee3..5ff6b1c2abf8 100644 --- a/doc/source/serve/tutorials/serve-ml-models.md +++ b/doc/source/serve/tutorials/serve-ml-models.md @@ -45,7 +45,7 @@ Next, we define a class `TFMnistModel` that will accept HTTP requests and run th :end-before: __doc_define_servable_end__ ``` -:::{note} +:::{note} When `TFMnistModel` is deployed and instantiated, it will load the Tensorflow model from your file system so that it can be ready to run inference on the model and serve requests later. ::: @@ -56,7 +56,7 @@ Now that we've defined our Serve deployment, let's prepare it so that it can be :end-before: __doc_deploy_end__ ``` -:::{note} +:::{note} `TFMnistModel.bind(TRAINED_MODEL_PATH)` binds the argument `TRAINED_MODEL_PATH` to our deployment and returns a `DeploymentNode` object (wrapping an `TFMnistModel` deployment object) that can then be used to connect with other `DeploymentNodes` to form a more complex [deployment graph](serve-model-composition-deployment-graph). ::: @@ -72,7 +72,7 @@ If you see the following error: ```console TypeError: Descriptors cannot not be created directly. If this call came from a _pb2.py file, your generated code is out of date and must be regenerated with protoc >= 3.19.0. - If you cannot immediately regenerate your protos, some other possible workarounds are: + If you cannot immediately regenerate your protos, some other possible workarounds are: 1. Downgrade the protobuf package to 3.20.x or lower. 2. Set PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python (but this will use pure-Python parsing and will be much slower). ``` @@ -144,7 +144,7 @@ We define a class `ImageModel` that parses the input data, transforms the images :end-before: __doc_define_servable_end__ ``` -:::{note} +:::{note} When `ImageModel` is deployed and instantiated, it will load the resnet18 model from `torchvision` so that it can be ready to run inference on the model and serve requests later. ::: @@ -155,7 +155,7 @@ Now that we've defined our Serve deployment, let's prepare it so that it can be :end-before: __doc_deploy_end__ ``` -:::{note} +:::{note} `ImageModel.bind()` returns a `DeploymentNode` object (wrapping an `ImageModel` deployment object) that can then be used to connect with other `DeploymentNodes` to form a more complex [deployment graph](serve-model-composition-deployment-graph). ::: @@ -208,7 +208,7 @@ Open a new Python file called `tutorial_sklearn.py`. Let's import Ray Serve and **Train a Classifier** -We will train a classifier with the [iris dataset](https://scikit-learn.org/stable/auto_examples/datasets/plot_iris_data.html). +We will train a classifier with the [iris dataset](https://scikit-learn.org/stable/auto_examples/datasets/plot_iris_dataset.html). First, let's instantiate a `GradientBoostingClassifier` loaded from Scikit-Learn. @@ -243,7 +243,7 @@ We define a class `BoostingModel` that runs inference on the `GradientBoosingCla :end-before: __doc_define_servable_end__ ``` -:::{note} +:::{note} When `BoostingModel` is deployed and instantiated, it will load the classifier model that we trained from your file system so that it can be ready to run inference on the model and serve requests later. ::: @@ -254,7 +254,7 @@ Now that we've defined our Serve deployment, let's prepare it so that it can be :end-before: __doc_deploy_end__ ``` -:::{note} +:::{note} `BoostingModel.bind(MODEL_PATH, LABEL_PATH)` binds the arguments `MODEL_PATH` and `LABEL_PATH` to our deployment and returns a `DeploymentNode` object (wrapping an `BoostingModel` deployment object) that can then be used to connect with other `DeploymentNodes` to form a more complex [deployment graph](serve-model-composition-deployment-graph). ::: From 741522b274b942b2395877bc97e766323c716a3c Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Fri, 21 Apr 2023 23:41:50 +0200 Subject: [PATCH 059/424] [docs] wrap autogenerated API nav items (#34047) --- doc/source/_static/css/custom.css | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/source/_static/css/custom.css b/doc/source/_static/css/custom.css index d2e920761432..7586bdfae7ce 100644 --- a/doc/source/_static/css/custom.css +++ b/doc/source/_static/css/custom.css @@ -109,6 +109,12 @@ div.navbar-brand-box { display: flex; flex-direction: column; } + +.bd-sidebar li { + position: relative; + word-wrap: break-word; +} + nav.bd-links { overflow-y: auto; flex: 1; From 41604f01e6e580f05d5877fc7276793e3c56898f Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Fri, 21 Apr 2023 23:44:24 +0200 Subject: [PATCH 060/424] [docs] sphinx design 1/n (#34625) --- ci/ci.sh | 3 +- doc/requirements-doc.txt | 1 + doc/source/conf.py | 3 +- doc/source/data/examples/index.rst | 98 ++++--- doc/source/ray-air/check-ingest.rst | 202 +++++++------- doc/source/ray-air/computer-vision.rst | 367 +++++++++++++------------ doc/source/ray-air/getting-started.rst | 127 +++++---- doc/source/ray-air/predictors.rst | 31 ++- doc/source/ray-air/trainers.rst | 24 +- doc/source/ray-air/tuner.rst | 22 +- 10 files changed, 465 insertions(+), 413 deletions(-) diff --git a/ci/ci.sh b/ci/ci.sh index 47ae6224b830..98071a8bfeaa 100755 --- a/ci/ci.sh +++ b/ci/ci.sh @@ -308,7 +308,8 @@ build_sphinx_docs() { if [ "${OSTYPE}" = msys ]; then echo "WARNING: Documentation not built on Windows due to currently-unresolved issues" else - FAST=True make html + # TODO: revert to "make html" once "sphinx_panels" plugin is fully removed. + FAST=True make develop pip install datasets==2.0.0 RAY_MOCK_MODULES=0 RAY_DEDUP_LOGS=0 make doctest fi diff --git a/doc/requirements-doc.txt b/doc/requirements-doc.txt index 4172bc287d96..3aa5e69bf7bb 100644 --- a/doc/requirements-doc.txt +++ b/doc/requirements-doc.txt @@ -65,6 +65,7 @@ sphinxcontrib-redoc==1.6.0 sphinx-tabs==3.4.0 sphinx-remove-toctrees==0.0.3 autodoc_pydantic==1.6.1 +sphinx_design==0.4.1 # MyST myst-parser==0.15.2 diff --git a/doc/source/conf.py b/doc/source/conf.py index 28c50db32e28..1dbaeea85714 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -38,7 +38,6 @@ extensions = [ "callouts", # custom extension from _ext folder - "sphinx_panels", "sphinx.ext.autodoc", "sphinx.ext.viewcode", "sphinx.ext.napoleon", @@ -58,6 +57,8 @@ "sphinxcontrib.redoc", "sphinx_tabs.tabs", "sphinx_remove_toctrees", + "sphinx_panels", + "sphinx_design", ] # Prune deep toc-trees on demand for smaller html and faster builds. diff --git a/doc/source/data/examples/index.rst b/doc/source/data/examples/index.rst index 62750e4885eb..0f815bd224f7 100644 --- a/doc/source/data/examples/index.rst +++ b/doc/source/data/examples/index.rst @@ -17,50 +17,68 @@ modalities and types. Here you will find a few end-to-end examples of some basic processing with Ray Data on tabular data, text (coming soon!), and imagery (coming soon!). -.. panels:: - :container: container pb-4 - :column: col-md-4 px-2 py-2 - :img-top-cls: pt-5 w-75 d-block mx-auto - - --- - :img-top: /images/taxi.png - - +++ - .. link-button:: nyc_taxi_basic_processing - :type: ref - :text: Processing the NYC taxi dataset - :classes: btn-link btn-block stretched-link - --- - :img-top: /images/taxi.png - - +++ - .. link-button:: batch_training - :type: ref - :text: Batch Training with Ray Data - :classes: btn-link btn-block stretched-link - --- - :img-top: /images/ocr.jpg - - +++ - .. link-button:: ocr_example - :type: ref - :text: Scaling OCR with Ray Data - :classes: btn-link btn-block stretched-link +.. grid:: 3 + :gutter: 2 + :class-container: container pb-4 + + .. grid-item-card:: + :img-top: /images/taxi.png + :class-img-top: pt-5 w-75 d-block mx-auto + + +++ + .. button-ref:: nyc_taxi_basic_processing + :ref-type: doc + :color: primary + :outline: + :expand: + + Processing the NYC taxi dataset + + .. grid-item-card:: + :img-top: /images/taxi.png + :class-img-top: pt-5 w-75 d-block mx-auto + + +++ + .. button-ref:: batch_training + :ref-type: doc + :color: primary + :outline: + :expand: + + Batch Training with Ray Data + + .. grid-item-card:: + :img-top: /images/ocr.jpg + :class-img-top: pt-5 w-75 d-block mx-auto + + +++ + .. button-ref:: ocr_example + :ref-type: doc + :color: primary + :outline: + :expand: + + Scaling OCR with Ray Data + Other Examples -------------- -.. panels:: - :container: container pb-4 - :column: col-md-4 px-2 py-2 - :img-top-cls: pt-5 w-75 d-block mx-auto - --- - :img-top: ../images/datastream-arch.svg +.. grid:: 3 + :gutter: 2 + :class-container: container pb-4 + + .. grid-item-card:: + :img-top: ../images/datastream-arch.svg + :class-img-top: pt-5 w-75 d-block mx-auto + + +++ + .. button-ref:: random-access + :ref-type: doc + :color: primary + :outline: + :expand: - +++ - .. link-button:: random-access - :type: ref - :text: Random Data Access (Experimental) - :classes: btn-link btn-block stretched-link + Random Data Access (Experimental) diff --git a/doc/source/ray-air/check-ingest.rst b/doc/source/ray-air/check-ingest.rst index 1716bd341080..6984c2fad7f0 100644 --- a/doc/source/ray-air/check-ingest.rst +++ b/doc/source/ray-air/check-ingest.rst @@ -74,57 +74,59 @@ Here are some examples of configuring Dataset ingest options and what they do: Enabling Streaming Ingest ~~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: Bulk Ingest +.. tab-set:: - By default, AIR loads all datasets into the Ray object store at the start of training. - This provides the best performance if the cluster can fit the datasets - entirely in memory, or if the preprocessing step is expensive to run more than once. + .. tab-item:: Bulk Ingest - .. literalinclude:: doc_code/air_ingest.py - :language: python - :start-after: __config_4__ - :end-before: __config_4_end__ + By default, AIR loads all datasets into the Ray object store at the start of training. + This provides the best performance if the cluster can fit the datasets + entirely in memory, or if the preprocessing step is expensive to run more than once. - You should use bulk ingest when: + .. literalinclude:: doc_code/air_ingest.py + :language: python + :start-after: __config_4__ + :end-before: __config_4_end__ - * you have enough memory to fit data blocks in cluster object store; or - * your preprocessing transform is expensive to recompute on each epoch + You should use bulk ingest when: -.. tabbed:: Streaming Ingest (experimental) + * you have enough memory to fit data blocks in cluster object store; or + * your preprocessing transform is expensive to recompute on each epoch - In streaming ingest mode, instead of loading the entire dataset into the - Ray object store at once, AIR will load a fraction of the dataset at a - time. This can be desirable when the dataset is very large, and caching it - all at once would cause expensive disk spilling. The downside is that the - dataset will have to be preprocessed on each epoch, which may be more - expensive. Preprocessing is overlapped with training computation, but - overall training throughput may still decrease if preprocessing is more - expensive than the training computation (forward pass, backward pass, - gradient sync). + .. tab-item:: Streaming Ingest (experimental) - To enable this mode, use the :py:meth:`max_object_store_memory_fraction - ` argument. This argument defaults to -1, - meaning that bulk ingest should be used and the entire dataset should be - computed and cached before training starts. + In streaming ingest mode, instead of loading the entire dataset into the + Ray object store at once, AIR will load a fraction of the dataset at a + time. This can be desirable when the dataset is very large, and caching it + all at once would cause expensive disk spilling. The downside is that the + dataset will have to be preprocessed on each epoch, which may be more + expensive. Preprocessing is overlapped with training computation, but + overall training throughput may still decrease if preprocessing is more + expensive than the training computation (forward pass, backward pass, + gradient sync). - Use a float value 0 or greater to indicate the "window" size, i.e. the - maximum fraction of object store memory that should be used at once. A - reasonable value is 0.2, meaning 20% of available object store memory. - Larger window sizes can improve performance by increasing parallelism. A - window size of 1 or greater will likely result in spilling. + To enable this mode, use the :py:meth:`max_object_store_memory_fraction + ` argument. This argument defaults to -1, + meaning that bulk ingest should be used and the entire dataset should be + computed and cached before training starts. - .. literalinclude:: doc_code/air_ingest.py - :language: python - :start-after: __config_5__ - :end-before: __config_5_end__ + Use a float value 0 or greater to indicate the "window" size, i.e. the + maximum fraction of object store memory that should be used at once. A + reasonable value is 0.2, meaning 20% of available object store memory. + Larger window sizes can improve performance by increasing parallelism. A + window size of 1 or greater will likely result in spilling. - Use streaming ingest when: + .. literalinclude:: doc_code/air_ingest.py + :language: python + :start-after: __config_5__ + :end-before: __config_5_end__ - * you have large datasets that don't fit into memory; and - * re-executing the preprocessing step on each epoch is faster than caching the preprocessed dataset on disk and reloading from disk on each epoch + Use streaming ingest when: - Note that this feature is experimental and the actual object store memory - usage may vary. Please file a `GitHub issue `_ if you run into problems. + * you have large datasets that don't fit into memory; and + * re-executing the preprocessing step on each epoch is faster than caching the preprocessed dataset on disk and reloading from disk on each epoch + + Note that this feature is experimental and the actual object store memory + usage may vary. Please file a `GitHub issue `_ if you run into problems. .. _air-shuffle: @@ -138,50 +140,52 @@ By default, AIR shuffles the assignment of data blocks (files) to dataset shards To randomize data records within a file, perform a local or global shuffle. -.. tabbed:: Local Shuffling +.. tab-set:: + + .. tab-item:: Local Shuffling - Local shuffling is the recommended approach for randomizing data order. To use local shuffle, - simply specify a non-zero ``local_shuffle_buffer_size`` as an argument to :meth:`~ray.data.DataIterator.iter_batches`. - The iterator will then use a local buffer of the given size to randomize record order. The - larger the buffer size, the more randomization will be applied, but it will also use more - memory. + Local shuffling is the recommended approach for randomizing data order. To use local shuffle, + simply specify a non-zero ``local_shuffle_buffer_size`` as an argument to :meth:`~ray.data.DataIterator.iter_batches`. + The iterator will then use a local buffer of the given size to randomize record order. The + larger the buffer size, the more randomization will be applied, but it will also use more + memory. - See :meth:`~ray.data.DataIterator.iter_batches` for more details. + See :meth:`~ray.data.DataIterator.iter_batches` for more details. - .. literalinclude:: doc_code/air_ingest.py - :language: python - :start-after: __local_shuffling_start__ - :end-before: __local_shuffling_end__ + .. literalinclude:: doc_code/air_ingest.py + :language: python + :start-after: __local_shuffling_start__ + :end-before: __local_shuffling_end__ - You should use local shuffling when: + You should use local shuffling when: - * a small in-memory buffer provides enough randomization; or - * you want the highest possible ingest performance; or - * your model is not overly sensitive to shuffle quality + * a small in-memory buffer provides enough randomization; or + * you want the highest possible ingest performance; or + * your model is not overly sensitive to shuffle quality -.. tabbed:: Global Shuffling (slower) + .. tab-item:: Global Shuffling (slower) - Global shuffling provides more uniformly random (decorrelated) samples and is carried - out via a distributed map-reduce operation. This higher quality shuffle can often lead - to more precision gain per training step, but it is also an expensive distributed - operation and will decrease the ingest throughput. The shuffle step is overlapped with - training computation, so as long as the shuffled ingest throughput matches - or exceeds the model training (forward pass, backward pass, gradient sync) - throughput, this higher-quality shuffle shouldn't slow down the overall - training. + Global shuffling provides more uniformly random (decorrelated) samples and is carried + out via a distributed map-reduce operation. This higher quality shuffle can often lead + to more precision gain per training step, but it is also an expensive distributed + operation and will decrease the ingest throughput. The shuffle step is overlapped with + training computation, so as long as the shuffled ingest throughput matches + or exceeds the model training (forward pass, backward pass, gradient sync) + throughput, this higher-quality shuffle shouldn't slow down the overall + training. - If global shuffling *is* causing the ingest throughput to become the training - bottleneck, local shuffling may be a better option. + If global shuffling *is* causing the ingest throughput to become the training + bottleneck, local shuffling may be a better option. - .. literalinclude:: doc_code/air_ingest.py - :language: python - :start-after: __global_shuffling_start__ - :end-before: __global_shuffling_end__ + .. literalinclude:: doc_code/air_ingest.py + :language: python + :start-after: __global_shuffling_start__ + :end-before: __global_shuffling_end__ - You should use global shuffling when: + You should use global shuffling when: - * you suspect high-quality shuffles may significantly improve model quality; and - * absolute ingest performance is less of a concern + * you suspect high-quality shuffles may significantly improve model quality; and + * absolute ingest performance is less of a concern .. _air-per-epoch-preprocessing: @@ -240,43 +244,45 @@ Dataset Resources Datasets uses Ray tasks to execute data processing operations. These tasks use CPU resources in the cluster during execution, which may compete with resources needed for Training. -.. tabbed:: Unreserved CPUs +.. tab-set:: + + .. tab-item:: Unreserved CPUs - By default, Dataset tasks use cluster CPU resources for execution. This can sometimes - conflict with Trainer resource requests. For example, if Trainers allocate all CPU resources - in the cluster, then no Datasets tasks can run. + By default, Dataset tasks use cluster CPU resources for execution. This can sometimes + conflict with Trainer resource requests. For example, if Trainers allocate all CPU resources + in the cluster, then no Datasets tasks can run. - .. literalinclude:: ./doc_code/air_ingest.py - :language: python - :start-after: __resource_allocation_1_begin__ - :end-before: __resource_allocation_1_end__ + .. literalinclude:: ./doc_code/air_ingest.py + :language: python + :start-after: __resource_allocation_1_begin__ + :end-before: __resource_allocation_1_end__ - Unreserved CPUs work well when: + Unreserved CPUs work well when: - * you are running only one Trainer and the cluster has enough CPUs; or - * your Trainers are configured to use GPUs and not CPUs + * you are running only one Trainer and the cluster has enough CPUs; or + * your Trainers are configured to use GPUs and not CPUs -.. tabbed:: Using Reserved CPUs (experimental) + .. tab-item:: Using Reserved CPUs (experimental) - The ``_max_cpu_fraction_per_node`` option can be used to exclude CPUs from placement - group scheduling. In the below example, setting this parameter to ``0.8`` enables Tune - trials to run smoothly without risk of deadlock by reserving 20% of node CPUs for - Dataset execution. + The ``_max_cpu_fraction_per_node`` option can be used to exclude CPUs from placement + group scheduling. In the below example, setting this parameter to ``0.8`` enables Tune + trials to run smoothly without risk of deadlock by reserving 20% of node CPUs for + Dataset execution. - .. literalinclude:: ./doc_code/air_ingest.py - :language: python - :start-after: __resource_allocation_2_begin__ - :end-before: __resource_allocation_2_end__ + .. literalinclude:: ./doc_code/air_ingest.py + :language: python + :start-after: __resource_allocation_2_begin__ + :end-before: __resource_allocation_2_end__ - You should use reserved CPUs when: + You should use reserved CPUs when: - * you are running multiple concurrent CPU Trainers using Tune; or - * you want to ensure predictable Datasets performance + * you are running multiple concurrent CPU Trainers using Tune; or + * you want to ensure predictable Datasets performance - .. warning:: + .. warning:: - ``_max_cpu_fraction_per_node`` is experimental and not currently recommended for use with - autoscaling clusters (scale-up will not trigger properly). + ``_max_cpu_fraction_per_node`` is experimental and not currently recommended for use with + autoscaling clusters (scale-up will not trigger properly). Debugging Ingest with the ``DummyTrainer`` ------------------------------------------ diff --git a/doc/source/ray-air/computer-vision.rst b/doc/source/ray-air/computer-vision.rst index 2fef456f0ee1..de89fd60b596 100644 --- a/doc/source/ray-air/computer-vision.rst +++ b/doc/source/ray-air/computer-vision.rst @@ -14,104 +14,106 @@ This guide explains how to perform common computer vision tasks like: Reading image data ------------------ -.. tabbed:: Raw images +.. tab-set:: - Datasets like ImageNet store files like this: + .. tab-item:: Raw images - .. code-block:: + Datasets like ImageNet store files like this: - root/dog/xxx.png - root/dog/xxy.png - root/dog/[...]/xxz.png + .. code-block:: - root/cat/123.png - root/cat/nsdf3.png - root/cat/[...]/asd932_.png + root/dog/xxx.png + root/dog/xxy.png + root/dog/[...]/xxz.png - To load images stored in this layout, read the raw images and include the - class names. + root/cat/123.png + root/cat/nsdf3.png + root/cat/[...]/asd932_.png - .. literalinclude:: ./doc_code/computer_vision.py - :start-after: __read_images1_start__ - :end-before: __read_images1_stop__ - :dedent: + To load images stored in this layout, read the raw images and include the + class names. - Then, apply a :ref:`user-defined function ` to - encode the class names as integer targets. + .. literalinclude:: ./doc_code/computer_vision.py + :start-after: __read_images1_start__ + :end-before: __read_images1_stop__ + :dedent: - .. literalinclude:: ./doc_code/computer_vision.py - :start-after: __read_images2_start__ - :end-before: __read_images2_stop__ - :dedent: + Then, apply a :ref:`user-defined function ` to + encode the class names as integer targets. - .. tip:: + .. literalinclude:: ./doc_code/computer_vision.py + :start-after: __read_images2_start__ + :end-before: __read_images2_stop__ + :dedent: - You can also use :class:`~ray.data.preprocessors.LabelEncoder` to encode labels. + .. tip:: -.. tabbed:: NumPy + You can also use :class:`~ray.data.preprocessors.LabelEncoder` to encode labels. - To load NumPy arrays into a :class:`~ray.data.Datastream`, separately read the image and label arrays. + .. tab-item:: NumPy - .. literalinclude:: ./doc_code/computer_vision.py - :start-after: __read_numpy1_start__ - :end-before: __read_numpy1_stop__ - :dedent: + To load NumPy arrays into a :class:`~ray.data.Datastream`, separately read the image and label arrays. - Then, combine the datasets and rename the columns. + .. literalinclude:: ./doc_code/computer_vision.py + :start-after: __read_numpy1_start__ + :end-before: __read_numpy1_stop__ + :dedent: - .. literalinclude:: ./doc_code/computer_vision.py - :start-after: __read_numpy2_start__ - :end-before: __read_numpy2_stop__ - :dedent: + Then, combine the datasets and rename the columns. -.. tabbed:: TFRecords + .. literalinclude:: ./doc_code/computer_vision.py + :start-after: __read_numpy2_start__ + :end-before: __read_numpy2_stop__ + :dedent: - Image datasets often contain ``tf.train.Example`` messages that look like this: + .. tab-item:: TFRecords - .. code-block:: + Image datasets often contain ``tf.train.Example`` messages that look like this: - features { - feature { - key: "image" - value { - bytes_list { - value: ... # Raw image bytes + .. code-block:: + + features { + feature { + key: "image" + value { + bytes_list { + value: ... # Raw image bytes + } } } - } - feature { - key: "label" - value { - int64_list { - value: 3 + feature { + key: "label" + value { + int64_list { + value: 3 + } } } } - } - To load examples stored in this format, read the TFRecords into a :class:`~ray.data.Datastream`. + To load examples stored in this format, read the TFRecords into a :class:`~ray.data.Datastream`. - .. literalinclude:: ./doc_code/computer_vision.py - :start-after: __read_tfrecords1_start__ - :end-before: __read_tfrecords1_stop__ - :dedent: + .. literalinclude:: ./doc_code/computer_vision.py + :start-after: __read_tfrecords1_start__ + :end-before: __read_tfrecords1_stop__ + :dedent: - Then, apply a :ref:`user-defined function ` to - decode the raw image bytes. + Then, apply a :ref:`user-defined function ` to + decode the raw image bytes. - .. literalinclude:: ./doc_code/computer_vision.py - :start-after: __read_tfrecords2_start__ - :end-before: __read_tfrecords2_stop__ - :dedent: + .. literalinclude:: ./doc_code/computer_vision.py + :start-after: __read_tfrecords2_start__ + :end-before: __read_tfrecords2_stop__ + :dedent: -.. tabbed:: Parquet + .. tab-item:: Parquet - To load image data stored in Parquet files, call :func:`ray.data.read_parquet`. + To load image data stored in Parquet files, call :func:`ray.data.read_parquet`. - .. literalinclude:: ./doc_code/computer_vision.py - :start-after: __read_parquet_start__ - :end-before: __read_parquet_stop__ - :dedent: + .. literalinclude:: ./doc_code/computer_vision.py + :start-after: __read_parquet_start__ + :end-before: __read_parquet_stop__ + :dedent: For more information on creating datastreams, see :ref:`Creating Datastreams `. @@ -123,33 +125,35 @@ Transforming images To transform images, create a :class:`~ray.data.preprocessor.Preprocessor`. They're the standard way to preprocess data with Ray. -.. tabbed:: Torch +.. tab-set:: + + .. tab-item:: Torch - To apply TorchVision transforms, create a :class:`~ray.data.preprocessors.TorchVisionPreprocessor`. + To apply TorchVision transforms, create a :class:`~ray.data.preprocessors.TorchVisionPreprocessor`. - Create two :class:`TorchVisionPreprocessors ` - -- one to normalize images, and another to augment images. Later, you'll pass the preprocessors to :class:`Trainers `, - :class:`Predictors `, and - :class:`PredictorDeployments `. + Create two :class:`TorchVisionPreprocessors ` + -- one to normalize images, and another to augment images. Later, you'll pass the preprocessors to :class:`Trainers `, + :class:`Predictors `, and + :class:`PredictorDeployments `. - .. literalinclude:: ./doc_code/computer_vision.py - :start-after: __torch_preprocessors_start__ - :end-before: __torch_preprocessors_stop__ - :dedent: + .. literalinclude:: ./doc_code/computer_vision.py + :start-after: __torch_preprocessors_start__ + :end-before: __torch_preprocessors_stop__ + :dedent: -.. tabbed:: TensorFlow + .. tab-item:: TensorFlow - To apply TorchVision transforms, create a :class:`~ray.data.preprocessors.BatchMapper`. + To apply TorchVision transforms, create a :class:`~ray.data.preprocessors.BatchMapper`. - Create two :class:`~ray.data.preprocessors.BatchMapper` -- one to normalize images, and another to - augment images. Later, you'll pass the preprocessors to :class:`Trainers `, - :class:`Predictors `, and - :class:`PredictorDeployments `. + Create two :class:`~ray.data.preprocessors.BatchMapper` -- one to normalize images, and another to + augment images. Later, you'll pass the preprocessors to :class:`Trainers `, + :class:`Predictors `, and + :class:`PredictorDeployments `. - .. literalinclude:: ./doc_code/computer_vision.py - :start-after: __tensorflow_preprocessors_start__ - :end-before: __tensorflow_preprocessors_stop__ - :dedent: + .. literalinclude:: ./doc_code/computer_vision.py + :start-after: __tensorflow_preprocessors_start__ + :end-before: __tensorflow_preprocessors_stop__ + :dedent: For more information on transforming data, see :ref:`Using Preprocessors ` and @@ -160,44 +164,46 @@ Training vision models :class:`Trainers ` let you train models in parallel. -.. tabbed:: Torch +.. tab-set:: - To train a vision model, define the training loop per worker. + .. tab-item:: Torch - .. literalinclude:: ./doc_code/computer_vision.py - :start-after: __torch_training_loop_start__ - :end-before: __torch_training_loop_stop__ - :dedent: + To train a vision model, define the training loop per worker. - Then, create a :class:`~ray.train.torch.TorchTrainer` and call - :meth:`~ray.train.torch.TorchTrainer.fit`. + .. literalinclude:: ./doc_code/computer_vision.py + :start-after: __torch_training_loop_start__ + :end-before: __torch_training_loop_stop__ + :dedent: - .. literalinclude:: ./doc_code/computer_vision.py - :start-after: __torch_trainer_start__ - :end-before: __torch_trainer_stop__ - :dedent: + Then, create a :class:`~ray.train.torch.TorchTrainer` and call + :meth:`~ray.train.torch.TorchTrainer.fit`. - For more in-depth examples, read :doc:`/ray-air/examples/torch_image_example` and - :ref:`Using Trainers `. + .. literalinclude:: ./doc_code/computer_vision.py + :start-after: __torch_trainer_start__ + :end-before: __torch_trainer_stop__ + :dedent: -.. tabbed:: TensorFlow + For more in-depth examples, read :doc:`/ray-air/examples/torch_image_example` and + :ref:`Using Trainers `. - To train a vision model, define the training loop per worker. + .. tab-item:: TensorFlow - .. literalinclude:: ./doc_code/computer_vision.py - :start-after: __tensorflow_training_loop_start__ - :end-before: __tensorflow_training_loop_stop__ - :dedent: + To train a vision model, define the training loop per worker. - Then, create a :class:`~ray.train.tensorflow.TensorflowTrainer` and call - :meth:`~ray.train.tensorflow.TensorflowTrainer.fit`. + .. literalinclude:: ./doc_code/computer_vision.py + :start-after: __tensorflow_training_loop_start__ + :end-before: __tensorflow_training_loop_stop__ + :dedent: - .. literalinclude:: ./doc_code/computer_vision.py - :start-after: __tensorflow_trainer_start__ - :end-before: __tensorflow_trainer_stop__ - :dedent: + Then, create a :class:`~ray.train.tensorflow.TensorflowTrainer` and call + :meth:`~ray.train.tensorflow.TensorflowTrainer.fit`. - For more information, read :ref:`Using Trainers `. + .. literalinclude:: ./doc_code/computer_vision.py + :start-after: __tensorflow_trainer_start__ + :end-before: __tensorflow_trainer_stop__ + :dedent: + + For more information, read :ref:`Using Trainers `. Creating checkpoints -------------------- @@ -210,27 +216,29 @@ If you're going from training to prediction, don't create a new checkpoint. :class:`~ray.air.result.Result` object. Use :attr:`Result.checkpoint ` instead. -.. tabbed:: Torch +.. tab-set:: + + .. tab-item:: Torch - To create a :class:`~ray.train.torch.TorchCheckpoint`, pass a Torch model and - the :class:`~ray.data.preprocessor.Preprocessor` you created in `Transforming images`_ - to :meth:`TorchCheckpoint.from_model() `. + To create a :class:`~ray.train.torch.TorchCheckpoint`, pass a Torch model and + the :class:`~ray.data.preprocessor.Preprocessor` you created in `Transforming images`_ + to :meth:`TorchCheckpoint.from_model() `. - .. literalinclude:: ./doc_code/computer_vision.py - :start-after: __torch_checkpoint_start__ - :end-before: __torch_checkpoint_stop__ - :dedent: + .. literalinclude:: ./doc_code/computer_vision.py + :start-after: __torch_checkpoint_start__ + :end-before: __torch_checkpoint_stop__ + :dedent: -.. tabbed:: TensorFlow + .. tab-item:: TensorFlow - To create a :class:`~ray.train.tensorflow.TensorflowCheckpoint`, pass a TensorFlow model and - the :class:`~ray.data.preprocessor.Preprocessor` you created in `Transforming images`_ - to :meth:`TensorflowCheckpoint.from_model() `. + To create a :class:`~ray.train.tensorflow.TensorflowCheckpoint`, pass a TensorFlow model and + the :class:`~ray.data.preprocessor.Preprocessor` you created in `Transforming images`_ + to :meth:`TensorflowCheckpoint.from_model() `. - .. literalinclude:: ./doc_code/computer_vision.py - :start-after: __tensorflow_checkpoint_start__ - :end-before: __tensorflow_checkpoint_stop__ - :dedent: + .. literalinclude:: ./doc_code/computer_vision.py + :start-after: __tensorflow_checkpoint_start__ + :end-before: __tensorflow_checkpoint_stop__ + :dedent: Batch predicting images @@ -239,32 +247,34 @@ Batch predicting images :class:`~ray.train.batch_predictor.BatchPredictor` lets you perform inference on large image datasets. -.. tabbed:: Torch +.. tab-set:: + + .. tab-item:: Torch - To create a :class:`~ray.train.batch_predictor.BatchPredictor`, call - :meth:`BatchPredictor.from_checkpoint ` and pass the checkpoint - you created in `Creating checkpoints`_. + To create a :class:`~ray.train.batch_predictor.BatchPredictor`, call + :meth:`BatchPredictor.from_checkpoint ` and pass the checkpoint + you created in `Creating checkpoints`_. - .. literalinclude:: ./doc_code/computer_vision.py - :start-after: __torch_batch_predictor_start__ - :end-before: __torch_batch_predictor_stop__ - :dedent: + .. literalinclude:: ./doc_code/computer_vision.py + :start-after: __torch_batch_predictor_start__ + :end-before: __torch_batch_predictor_stop__ + :dedent: - For more in-depth examples, read :doc:`/ray-air/examples/pytorch_resnet_batch_prediction` - and :ref:`Using Predictors for Inference `. + For more in-depth examples, read :doc:`/ray-air/examples/pytorch_resnet_batch_prediction` + and :ref:`Using Predictors for Inference `. -.. tabbed:: TensorFlow + .. tab-item:: TensorFlow - To create a :class:`~ray.train.batch_predictor.BatchPredictor`, call - :meth:`BatchPredictor.from_checkpoint ` and pass the checkpoint - you created in `Creating checkpoints`_. + To create a :class:`~ray.train.batch_predictor.BatchPredictor`, call + :meth:`BatchPredictor.from_checkpoint ` and pass the checkpoint + you created in `Creating checkpoints`_. - .. literalinclude:: ./doc_code/computer_vision.py - :start-after: __tensorflow_batch_predictor_start__ - :end-before: __tensorflow_batch_predictor_stop__ - :dedent: + .. literalinclude:: ./doc_code/computer_vision.py + :start-after: __tensorflow_batch_predictor_start__ + :end-before: __tensorflow_batch_predictor_stop__ + :dedent: - For more information, read :ref:`Using Predictors for Inference `. + For more information, read :ref:`Using Predictors for Inference `. Serving vision models --------------------- @@ -286,44 +296,45 @@ To NumPy ndarrays like this: array([[1., 2.], [3., 4.]]) +.. tab-set:: -.. tabbed:: Torch + .. tab-item:: Torch - To deploy a Torch model to an endpoint, pass the checkpoint you created in `Creating checkpoints`_ - to :meth:`PredictorDeployment.bind ` and specify - :func:`~ray.serve.http_adapters.json_to_ndarray` as the HTTP adapter. + To deploy a Torch model to an endpoint, pass the checkpoint you created in `Creating checkpoints`_ + to :meth:`PredictorDeployment.bind ` and specify + :func:`~ray.serve.http_adapters.json_to_ndarray` as the HTTP adapter. - .. literalinclude:: ./doc_code/computer_vision.py - :start-after: __torch_serve_start__ - :end-before: __torch_serve_stop__ - :dedent: + .. literalinclude:: ./doc_code/computer_vision.py + :start-after: __torch_serve_start__ + :end-before: __torch_serve_stop__ + :dedent: - Then, make a request to classify an image. + Then, make a request to classify an image. - .. literalinclude:: ./doc_code/computer_vision.py - :start-after: __torch_online_predict_start__ - :end-before: __torch_online_predict_stop__ - :dedent: + .. literalinclude:: ./doc_code/computer_vision.py + :start-after: __torch_online_predict_start__ + :end-before: __torch_online_predict_stop__ + :dedent: - For more in-depth examples, read :doc:`/ray-air/examples/torch_image_example` - and :doc:`/ray-air/examples/serving_guide`. + For more in-depth examples, read :doc:`/ray-air/examples/torch_image_example` + and :doc:`/ray-air/examples/serving_guide`. -.. tabbed:: TensorFlow + .. tab-item:: TensorFlow - To deploy a TensorFlow model to an endpoint, pass the checkpoint you created in `Creating checkpoints`_ - to :meth:`PredictorDeployment.bind ` and specify - :func:`~ray.serve.http_adapters.json_to_multi_ndarray` as the HTTP adapter. + To deploy a TensorFlow model to an endpoint, pass the checkpoint you created in `Creating checkpoints`_ + to :meth:`PredictorDeployment.bind ` and specify + :func:`~ray.serve.http_adapters.json_to_multi_ndarray` as the HTTP adapter. - .. literalinclude:: ./doc_code/computer_vision.py - :start-after: __tensorflow_serve_start__ - :end-before: __tensorflow_serve_stop__ - :dedent: + .. literalinclude:: ./doc_code/computer_vision.py + :start-after: __tensorflow_serve_start__ + :end-before: __tensorflow_serve_stop__ + :dedent: - Then, make a request to classify an image. + Then, make a request to classify an image. - .. literalinclude:: ./doc_code/computer_vision.py - :start-after: __tensorflow_online_predict_start__ - :end-before: __tensorflow_online_predict_stop__ - :dedent: + .. literalinclude:: ./doc_code/computer_vision.py + :start-after: __tensorflow_online_predict_start__ + :end-before: __tensorflow_online_predict_stop__ + :dedent: - For more information, read :doc:`/ray-air/examples/serving_guide`. + For more information, read :doc:`/ray-air/examples/serving_guide`. diff --git a/doc/source/ray-air/getting-started.rst b/doc/source/ray-air/getting-started.rst index aa998f69dc71..abb5fc29068a 100644 --- a/doc/source/ray-air/getting-started.rst +++ b/doc/source/ray-air/getting-started.rst @@ -84,78 +84,84 @@ First, let's start by loading a dataset from storage: Then, we define a ``Preprocessor`` pipeline for our task: -.. tabbed:: XGBoost +.. tab-set:: - .. literalinclude:: examples/xgboost_starter.py - :language: python - :start-after: __air_xgb_preprocess_start__ - :end-before: __air_xgb_preprocess_end__ + .. tab-item:: XGBoost -.. tabbed:: Pytorch + .. literalinclude:: examples/xgboost_starter.py + :language: python + :start-after: __air_xgb_preprocess_start__ + :end-before: __air_xgb_preprocess_end__ - .. literalinclude:: examples/pytorch_tabular_starter.py - :language: python - :start-after: __air_pytorch_preprocess_start__ - :end-before: __air_pytorch_preprocess_end__ + .. tab-item:: Pytorch -.. tabbed:: Tensorflow + .. literalinclude:: examples/pytorch_tabular_starter.py + :language: python + :start-after: __air_pytorch_preprocess_start__ + :end-before: __air_pytorch_preprocess_end__ - .. literalinclude:: examples/tf_tabular_starter.py - :language: python - :start-after: __air_tf_preprocess_start__ - :end-before: __air_tf_preprocess_end__ + .. tab-item:: Tensorflow + + .. literalinclude:: examples/tf_tabular_starter.py + :language: python + :start-after: __air_tf_preprocess_start__ + :end-before: __air_tf_preprocess_end__ Training ~~~~~~~~ Train a model with a ``Trainer`` with common ML frameworks: -.. tabbed:: XGBoost +.. tab-set:: + + .. tab-item:: XGBoost - .. literalinclude:: examples/xgboost_starter.py - :language: python - :start-after: __air_xgb_train_start__ - :end-before: __air_xgb_train_end__ + .. literalinclude:: examples/xgboost_starter.py + :language: python + :start-after: __air_xgb_train_start__ + :end-before: __air_xgb_train_end__ -.. tabbed:: Pytorch + .. tab-item:: Pytorch - .. literalinclude:: examples/pytorch_tabular_starter.py - :language: python - :start-after: __air_pytorch_train_start__ - :end-before: __air_pytorch_train_end__ + .. literalinclude:: examples/pytorch_tabular_starter.py + :language: python + :start-after: __air_pytorch_train_start__ + :end-before: __air_pytorch_train_end__ -.. tabbed:: Tensorflow + .. tab-item:: Tensorflow - .. literalinclude:: examples/tf_tabular_starter.py - :language: python - :start-after: __air_tf_train_start__ - :end-before: __air_tf_train_end__ + .. literalinclude:: examples/tf_tabular_starter.py + :language: python + :start-after: __air_tf_train_start__ + :end-before: __air_tf_train_end__ Hyperparameter Tuning ~~~~~~~~~~~~~~~~~~~~~ You can specify a hyperparameter space to search over for each trainer: -.. tabbed:: XGBoost +.. tab-set:: + + .. tab-item:: XGBoost - .. literalinclude:: examples/xgboost_starter.py - :language: python - :start-after: __air_xgb_tuner_start__ - :end-before: __air_xgb_tuner_end__ + .. literalinclude:: examples/xgboost_starter.py + :language: python + :start-after: __air_xgb_tuner_start__ + :end-before: __air_xgb_tuner_end__ -.. tabbed:: Pytorch + .. tab-item:: Pytorch - .. literalinclude:: examples/pytorch_tabular_starter.py - :language: python - :start-after: __air_pytorch_tuner_start__ - :end-before: __air_pytorch_tuner_end__ + .. literalinclude:: examples/pytorch_tabular_starter.py + :language: python + :start-after: __air_pytorch_tuner_start__ + :end-before: __air_pytorch_tuner_end__ -.. tabbed:: Tensorflow + .. tab-item:: Tensorflow - .. literalinclude:: examples/tf_tabular_starter.py - :language: python - :start-after: __air_tf_tuner_start__ - :end-before: __air_tf_tuner_end__ + .. literalinclude:: examples/tf_tabular_starter.py + :language: python + :start-after: __air_tf_tuner_start__ + :end-before: __air_tf_tuner_end__ Then use the ``Tuner`` to run the search: @@ -169,27 +175,28 @@ Batch Inference Use the trained model for scalable batch prediction with a ``BatchPredictor``. -.. tabbed:: XGBoost +.. tab-set:: - .. literalinclude:: examples/xgboost_starter.py - :language: python - :start-after: __air_xgb_batchpred_start__ - :end-before: __air_xgb_batchpred_end__ + .. tab-item:: XGBoost -.. tabbed:: Pytorch + .. literalinclude:: examples/xgboost_starter.py + :language: python + :start-after: __air_xgb_batchpred_start__ + :end-before: __air_xgb_batchpred_end__ - .. literalinclude:: examples/pytorch_tabular_starter.py - :language: python - :start-after: __air_pytorch_batchpred_start__ - :end-before: __air_pytorch_batchpred_end__ + .. tab-item:: Pytorch -.. tabbed:: Tensorflow + .. literalinclude:: examples/pytorch_tabular_starter.py + :language: python + :start-after: __air_pytorch_batchpred_start__ + :end-before: __air_pytorch_batchpred_end__ - .. literalinclude:: examples/tf_tabular_starter.py - :language: python - :start-after: __air_tf_batchpred_start__ - :end-before: __air_tf_batchpred_end__ + .. tab-item:: Tensorflow + .. literalinclude:: examples/tf_tabular_starter.py + :language: python + :start-after: __air_tf_batchpred_start__ + :end-before: __air_tf_batchpred_end__ Project Status -------------- diff --git a/doc/source/ray-air/predictors.rst b/doc/source/ray-air/predictors.rst index e281e18554d3..16c1545d45d8 100644 --- a/doc/source/ray-air/predictors.rst +++ b/doc/source/ray-air/predictors.rst @@ -146,34 +146,37 @@ Below, we provide examples of using common frameworks to do batch inference for Tabular ~~~~~~~ -.. tabbed:: XGBoost +.. tab-set:: - .. literalinclude:: examples/xgboost_batch_prediction.py - :language: python + .. tab-item:: XGBoost -.. tabbed:: Pytorch + .. literalinclude:: examples/xgboost_batch_prediction.py + :language: python - .. literalinclude:: examples/pytorch_tabular_batch_prediction.py - :language: python + .. tab-item:: Pytorch -.. tabbed:: Tensorflow + .. literalinclude:: examples/pytorch_tabular_batch_prediction.py + :language: python - .. literalinclude:: examples/tf_tabular_batch_prediction.py - :language: python + .. tab-item:: Tensorflow + .. literalinclude:: examples/tf_tabular_batch_prediction.py + :language: python Image ~~~~~ -.. tabbed:: Pytorch +.. tab-set:: - .. literalinclude:: examples/torch_image_batch_pretrained.py - :language: python + .. tab-item:: Pytorch + + .. literalinclude:: examples/torch_image_batch_pretrained.py + :language: python -.. tabbed:: Tensorflow + .. tab-item:: Tensorflow - Coming soon! + Coming soon! Text ~~~~ diff --git a/doc/source/ray-air/trainers.rst b/doc/source/ray-air/trainers.rst index ea08fd7352b2..9022ed097ad4 100644 --- a/doc/source/ray-air/trainers.rst +++ b/doc/source/ray-air/trainers.rst @@ -67,22 +67,24 @@ Read more about :ref:`Ray Train's Deep Learning Trainers `. .. dropdown:: Code examples - .. tabbed:: Torch + .. tab-set:: - .. literalinclude:: doc_code/torch_trainer.py - :language: python + .. tab-item:: Torch - .. tabbed:: Tensorflow + .. literalinclude:: doc_code/torch_trainer.py + :language: python - .. literalinclude:: doc_code/tf_starter.py - :language: python - :start-after: __air_tf_train_start__ - :end-before: __air_tf_train_end__ + .. tab-item:: Tensorflow - .. tabbed:: Horovod + .. literalinclude:: doc_code/tf_starter.py + :language: python + :start-after: __air_tf_train_start__ + :end-before: __air_tf_train_end__ - .. literalinclude:: doc_code/hvd_trainer.py - :language: python + .. tab-item:: Horovod + + .. literalinclude:: doc_code/hvd_trainer.py + :language: python How to report metrics and checkpoints? diff --git a/doc/source/ray-air/tuner.rst b/doc/source/ray-air/tuner.rst index 3bba0b1d47a6..43f14c470c0f 100644 --- a/doc/source/ray-air/tuner.rst +++ b/doc/source/ray-air/tuner.rst @@ -63,19 +63,21 @@ Depending on the model and dataset, you may want to tune: The following shows some example code on how to specify the ``param_space``. -.. tabbed:: XGBoost +.. tab-set:: - .. literalinclude:: doc_code/tuner.py - :language: python - :start-after: __xgboost_start__ - :end-before: __xgboost_end__ + .. tab-item:: XGBoost -.. tabbed:: Pytorch + .. literalinclude:: doc_code/tuner.py + :language: python + :start-after: __xgboost_start__ + :end-before: __xgboost_end__ - .. literalinclude:: doc_code/tuner.py - :language: python - :start-after: __torch_start__ - :end-before: __torch_end__ + .. tab-item:: Pytorch + + .. literalinclude:: doc_code/tuner.py + :language: python + :start-after: __torch_start__ + :end-before: __torch_end__ Read more about :ref:`Tune search spaces here `. From 86594fbdb8f0c9a5d2635d72ad03a2533fae8766 Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Fri, 21 Apr 2023 16:55:34 -0700 Subject: [PATCH 061/424] [CI][Bisect][4] Add pre-sanity check to avoid infra or external change root causes (#34553) Why are these changes needed? Many time tests can fail due to a non-code-change issue (external or infra issues). Before running a bisect, run a pre-sanity check to make sure that the provided passing and failing revision is valid. Otherwise, terminate bisect early and let the users know that the test is flaky. --- release/ray_release/scripts/ray_bisect.py | 134 ++++++++++++++++------ release/ray_release/tests/test_bisect.py | 29 +++-- 2 files changed, 119 insertions(+), 44 deletions(-) diff --git a/release/ray_release/scripts/ray_bisect.py b/release/ray_release/scripts/ray_bisect.py index 03f1d28a5414..5ac1640d94b6 100644 --- a/release/ray_release/scripts/ray_bisect.py +++ b/release/ray_release/scripts/ray_bisect.py @@ -3,7 +3,7 @@ import os import json import time -from typing import List +from typing import Dict, List, Optional, Set from ray_release.logger import logger from ray_release.buildkite.step import get_step from ray_release.config import ( @@ -18,33 +18,82 @@ @click.argument("test_name", required=True, type=str) @click.argument("passing_commit", required=True, type=str) @click.argument("failing_commit", required=True, type=str) -def main(test_name: str, passing_commit: str, failing_commit: str) -> None: +@click.option( + "--concurrency", + default=3, + type=int, + help=( + "Maximum number of concurrent test jobs to run. Higher number uses more " + "capacity, but reduce the bisect duration" + ), +) +def main( + test_name: str, + passing_commit: str, + failing_commit: str, + concurrency: Optional[int] = 1, +) -> None: + if concurrency <= 0: + raise ValueError( + f"Concurrency input need to be a positive number, received: {concurrency}" + ) + test = _get_test(test_name) + pre_sanity_check = _sanity_check(test, passing_commit, failing_commit) + if not pre_sanity_check: + logger.info( + "Failed pre-saniy check, the test might be flaky or fail due to" + " an external (not a code change) factors" + ) + return commit_lists = _get_commit_lists(passing_commit, failing_commit) - blamed_commit = _bisect(test_name, commit_lists) + blamed_commit = _bisect(test, commit_lists, concurrency) logger.info(f"Blamed commit found for test {test_name}: {blamed_commit}") -def _bisect(test_name: str, commit_list: List[str]) -> str: - test = _get_test(test_name) +def _bisect(test: Test, commit_list: List[str], concurrency: int) -> str: while len(commit_list) > 2: logger.info( f"Bisecting between {len(commit_list)} commits: " - f"{commit_list[0]} to {commit_list[-1]}" + f"{commit_list[0]} to {commit_list[-1]} with concurrency {concurrency}" ) - middle_commit_idx = len(commit_list) // 2 - middle_commit = commit_list[middle_commit_idx] - is_passing = _run_test(test, middle_commit) - if is_passing: - commit_list = commit_list[middle_commit_idx:] - else: - commit_list = commit_list[: middle_commit_idx + 1] + idx_to_commit = {} + for i in range(concurrency): + idx = len(commit_list) * (i + 1) // (concurrency + 1) + idx_to_commit[idx] = commit_list[idx] + outcomes = _run_test(test, set(idx_to_commit.values())) + passing_idx = 0 + failing_idx = len(commit_list) - 1 + for idx, commit in idx_to_commit.items(): + is_passing = outcomes[commit] == "passed" + if is_passing and idx > passing_idx: + passing_idx = idx + if not is_passing and idx < failing_idx: + failing_idx = idx + commit_list = commit_list[passing_idx : failing_idx + 1] return commit_list[-1] -def _run_test(test: Test, commit: str) -> bool: - logger.info(f'Running test {test["name"]} on commit {commit}') - _trigger_test_run(test, commit) - return _obtain_test_result(commit) +def _sanity_check(test: Test, passing_revision: str, failing_revision: str) -> bool: + """ + Sanity check that the test indeed passes on the passing revision, and fails on the + failing revision + """ + logger.info( + f"Sanity check passing revision: {passing_revision}" + f" and failing revision: {failing_revision}" + ) + outcomes = _run_test(test, [passing_revision, failing_revision]) + return ( + outcomes[passing_revision] == "passed" + and outcomes[failing_revision] != "passed" + ) + + +def _run_test(test: Test, commits: Set[str]) -> Dict[str, str]: + logger.info(f'Running test {test["name"]} on commits {commits}') + for commit in commits: + _trigger_test_run(test, commit) + return _obtain_test_result(commits) def _trigger_test_run(test: Test, commit: str) -> None: @@ -52,30 +101,51 @@ def _trigger_test_run(test: Test, commit: str) -> None: commit, timeout=DEFAULT_WHEEL_WAIT_TIMEOUT, ) - step = get_step(test, ray_wheels=ray_wheels_url) - step["label"] = f'{test["name"]}:{commit[:6]}' + step = get_step( + test, + ray_wheels=ray_wheels_url, + env={ + "RAY_COMMIT_OF_WHEEL": commit, + }, + ) + step["label"] = f'{test["name"]}:{commit[:7]}' step["key"] = commit - pipeline = json.dumps({"steps": [step]}) + pipeline = subprocess.Popen( + ["echo", json.dumps({"steps": [step]})], stdout=subprocess.PIPE + ) subprocess.check_output( - f'echo "{pipeline}" | buildkite-agent pipeline upload', - shell=True, + ["buildkite-agent", "pipeline", "upload"], stdin=pipeline.stdout ) + pipeline.stdout.close() -def _obtain_test_result(buildkite_step_key: str) -> bool: - outcome = None - wait = 30 +def _obtain_test_result(buildkite_step_keys: List[str]) -> Dict[str, str]: + outcomes = {} + wait = 5 total_wait = 0 - while outcome not in ["passed", "hard_failed", "soft_failed"]: + while True: logger.info(f"... waiting for test result ...({total_wait} seconds)") - outcome = subprocess.check_output( - f'buildkite-agent step get "outcome" --step "{buildkite_step_key}"', - shell=True, - ).decode("utf-8") + for key in buildkite_step_keys: + if key in outcomes: + continue + outcome = subprocess.check_output( + [ + "buildkite-agent", + "step", + "get", + "outcome", + "--step", + key, + ] + ).decode("utf-8") + if outcome: + outcomes[key] = outcome + if len(outcomes) == len(buildkite_step_keys): + break time.sleep(wait) total_wait = total_wait + wait - logger.info(f"Final test outcome: {outcome}") - return outcome == "passed" + logger.info(f"Final test outcomes: {outcomes}") + return outcomes def _get_test(test_name: str) -> Test: diff --git a/release/ray_release/tests/test_bisect.py b/release/ray_release/tests/test_bisect.py index b6d0d0b6d8e5..5d363e807630 100644 --- a/release/ray_release/tests/test_bisect.py +++ b/release/ray_release/tests/test_bisect.py @@ -1,31 +1,36 @@ from unittest import mock +from typing import List, Dict from ray_release.scripts.ray_bisect import _bisect +from ray_release.config import Test def test_bisect(): test_cases = { "c3": { - "c0": True, - "c1": True, - "c3": False, - "c4": False, + "c0": "passed", + "c1": "passed", + "c3": "hard_failed", + "c4": "soft_failed", }, "c1": { - "c0": True, - "c1": False, + "c0": "passed", + "c1": "hard_failed", + "c2": "hard_failed", + }, + "cc1": { + "cc0": "passed", + "cc1": "hard_failed", }, } for output, input in test_cases.items(): - def _mock_run_test(test_name: str, commit: str) -> bool: - return input[commit] + def _mock_run_test(test: Test, commit: List[str]) -> Dict[str, str]: + return input with mock.patch( "ray_release.scripts.ray_bisect._run_test", side_effect=_mock_run_test, - ), mock.patch( - "ray_release.scripts.ray_bisect._get_test", - return_value={}, ): - assert _bisect("test", list(input.keys())) == output + for concurreny in range(1, 4): + assert _bisect({}, list(input.keys()), concurreny) == output From 34e78db914bbb628a273bde928125fd50e281744 Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Fri, 21 Apr 2023 22:30:32 -0700 Subject: [PATCH 062/424] [CI][HotFix] Revert 34499 #34688 Signed-off-by: Cuong Nguyen --- release/ray_release/job_manager/anyscale_job_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release/ray_release/job_manager/anyscale_job_manager.py b/release/ray_release/job_manager/anyscale_job_manager.py index 5b831e4e7d3b..0d02dbb8a784 100644 --- a/release/ray_release/job_manager/anyscale_job_manager.py +++ b/release/ray_release/job_manager/anyscale_job_manager.py @@ -335,7 +335,7 @@ def _get_logs(): # Many of Ray components have their separated logs (e.g. dashboard, # gcs_server, etc.), so the interesting errors are not always in the # job logs. If the job has no logs, check other ray logs for error patterns. - if not output: + if "### Starting ###" not in output: output = self._get_ray_error_logs() assert output, "No logs fetched" return "\n".join(output.splitlines()[-LAST_LOGS_LENGTH * 3 :]) From e01864cc393613b7319f8bfff6447556d61c49b5 Mon Sep 17 00:00:00 2001 From: Chen Shen Date: Sat, 22 Apr 2023 01:28:06 -0700 Subject: [PATCH 063/424] [autoscaler v2] Interface between autoscaler and gcs (#34680) Why are these changes needed? This PR introduce the interface between GCS and Autoscaler. Specifically it introduces 2 APIs GetClusterResourceState: Autoscaler will query this interface to get cluster resource usage, which includes nodes (state and resource ulitization), as well as pending requests, which include ResourceRequest, GangResourceRequest, as well as ClusterResourceConstraint. For NodeState, it includes NodeStatus, which can transit from ALIVE -> DEAD, or ALIVE -> DRAIN_PENDING -> DRAINING -> DRAINED -> DEAD, or ALIVE -> DRAIN_PENDING -> DRAIN_FAILED. it also includes instance_id where the autoscaler is aware of, this allows autoscaler to do reconsiliation if available. For ResourceRequest, it comes with a PlacementConstraint which only support AntiAffinityConstraint today, which the semantics the resource request can't be allocated on a node with the same label/value specified in the AntiAffinityConstraint There is also GangResourceRequest, which has gang scheduling semantics where the requests in the gang should be all fulfilled atomically. ReportAutoscalingState: Autoscaler will also report its own state back to cluster using this API, where it includes all instances (including both pending launch), as well as infeasible requests. Instance state could transition from QUEUED -> REQUESTED -> BOOTSTRAPPING -> ALIVE -> TERMINATING -> DEAD. two special states are TO_BE_PREEMPTED and TO_BE_DRAINED, where one is force preemption, another is collaborating draining (can be reversed). It also reports back requests that infeasible, associated with a specific request version. --- src/ray/protobuf/BUILD | 15 ++ .../protobuf/experimental/autoscaler.proto | 221 ++++++++++++++++++ 2 files changed, 236 insertions(+) create mode 100644 src/ray/protobuf/experimental/autoscaler.proto diff --git a/src/ray/protobuf/BUILD b/src/ray/protobuf/BUILD index 22054c994261..3ab65258b760 100644 --- a/src/ray/protobuf/BUILD +++ b/src/ray/protobuf/BUILD @@ -347,3 +347,18 @@ cc_proto_library( name = "usage_cc_proto", deps = [":usage_proto"], ) + +proto_library( + name = "autoscaler_proto", + srcs = ["experimental/autoscaler.proto"], +) + +python_grpc_compile( + name = "autoscaler_py_proto", + deps = [":autoscaler_proto"], +) + +cc_proto_library( + name = "autoscaler_cc_proto", + deps = [":autoscaler_proto"], +) diff --git a/src/ray/protobuf/experimental/autoscaler.proto b/src/ray/protobuf/experimental/autoscaler.proto new file mode 100644 index 000000000000..93bbf98f72df --- /dev/null +++ b/src/ray/protobuf/experimental/autoscaler.proto @@ -0,0 +1,221 @@ +// Copyright 2023 The Ray Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; +option cc_enable_arenas = true; + +package ray.rpc; + +// ============= Cluster Resources ==================== +// +// Following fields represents the Cluster Resources autoscaler interested +// in. + +// Represents an anti-affinity constraint. A bundle with this constraint +// can't be allocated to a node that has a label with the same name and +// value. This is used to implement placement group anti-affinity. +// +// For placement group, the label_name is "_PG" (reserved), +// and the label_value is the placement group id. +message AntiAffinityConstraint { + string label_name = 1; + string label_value = 2; + // If true, the label will be created on the node + // where the request with this constraint is scheduled. + bool create_label_on_schedule = 3; +} + +message PlacementConstraint { + AntiAffinityConstraint anti_affinity = 1; +} + +message ResourceRequest { + // resource requirements for the request. + map resources_bundle = 1; + // placement constraint for the request. multiple constraints + // form AND semantics. + repeated PlacementConstraint placement_constraints = 2; +} + +message ResourceRequestByCount { + ResourceRequest request = 1; + int64 count = 2; +} + +// All bundles in the same resource request require gang +// allocation semantics: they should be allocated all or nothing. +message GangResourceRequest { + // a map from bundles to the number of bundles requested. + repeated ResourceRequest requests = 1; +} + +// Cluster resource constraint represents minimial cluster size requirement, +// this is issued through ray.autoscaler.sdk.request_resources. +message ClusterResourceConstraint { + // If not empty, the cluster should have the capacity (total resource) to + // fit the min_resources. + map min_resources = 1; + // If not emtpy, the cluster should have the capacity (total resource) to fit + // the min_bundles. + repeated ResourceRequest min_bundles = 2; + // Id of the job who issued this constraint. + string job_id = 3; +} + +message NodeState { + enum NodeStatus { + // Node is alive. + ALIVE = 0; + // Node is dead. + DEAD = 1; + // Node is being drained. + DRAIN_PENDING = 2; + // Node is being drained. + DRAIN_FAILED = 3; + // Node is being drained. + DRAINING = 4; + // Node is already drained, and ready to be removed. + DRAINED = 5; + } + // The node id internal to Ray. + string node_id = 11; + + // The instance id that the node is running on. + // This is passed in when the node is registered. + string instance_id = 12; + + // The available resources on the node. + // Reserved resource names: CPU, GPU, MEMORY, OBJECT_STORE_MEMORY + map available_resources = 13; + + // The corresponding total resources on the node. + map total_resources = 14; + + // Dynamic labels associated with the node. + // Reserved dynamic label names: _PG + map dynamic_labels = 15; + + // A monotonic increasing version of the node resource state. + int64 node_state_version = 16; + + // The status of the node. + NodeStatus status = 17; +} + +// ============= Autoscaling State Service API ======================= +// +// Autoscaler periodically calls to +// two snapshot APIs, GetClusterResourceState +// and ReportAutoscalingState. +// The GetClusterResourceState will return a snapshot +// of Ray state that Autoscaler interested, along with +// the cluster_resource_state_version (version). +// +// Separately, autoscaler will constantly making decisions +// based on the latest Ray state, and also change its +// state based on the information from node provider. +// Autoscaler will periodically report its state to GCS +// through ReportAutoscalingState API. + +message GetClusterResourceStateRequest { + // The last seen cluster resource state version. The default value is reserved for if a + // previous scheduling state has never been seen. + int64 last_seen_cluster_resource_state_version = 1; +} + +message GetClusterResourceStateReply { + // an monotonically increasing version of the cluster resources. + int64 cluster_resource_state_version = 1; + // last seen autoscaler state. + int64 last_seen_autoscaler_state_version = 2; + // Current cluster resources. + repeated NodeState node_states = 3; + // Resource requests pending scheduling. + repeated ResourceRequestByCount pending_resource_requests = 4; + // Gang resource requests pending scheduling. + repeated GangResourceRequest pending_gang_resource_requests = 5; + // Cluster resource constraints. + // There could be multiple constraints issued by different + // jobs. Autoscaler to make sure all constraints are satisfied. + repeated ClusterResourceConstraint cluster_resource_constraints = 6; +} + +message Instance { + enum InstanceStatus { + // The unspecified state - most likey it is queued. + INSTANCE_STATUS_UNSPECIFIED = 0; + // Instance is starting. The first state update received from the + // instance. + STARTING = 1; + // The instance is running - one of two states of a healthy instance. + RUNNING = 2; + // The instance is idle - one of two states of a healthy instance. + IDLE = 3; + // The instance is stopping - usually follows from the RUNNING, IDLE, + // PREEMPT_REQUEST or DRAIN_REQUEST state. + STOPPING = 4; + // The instance is stopped - follows from the STOPPING state. + STOPPED = 5; + // The instance is in a bad state - but it is still able to send updates. + FAILING = 6; + // The subscribe service moves instances to this state if they + // have been idle for too long. This allows the cluster manager to + // make a final decision on whether or not to commence a drain + // sequence for this instance. + DRAIN_CONFIRMATION_PENDING = 7; + // The instance should be drained, Ray should start draining process + // but could reject if failed to drain. + DRAIN_REQUEST = 8; + // The instance will be reempted by the instance manager, regardless + // of whether it is drainable or not. + PREEMPT_REQUEST = 9; + } + // an unique id for the instance that's generated by the + // instance manager. This may be optional if + // the instance hasn't be started yet. + string instance_id = 11; + // the status of the instance. + InstanceStatus status = 12; + // the node id of the instance. + string node_type = 13; + // The corresponding total resources on the node. + map total_resources = 14; + // timestamp of the last state changed. + int64 timestamp_since_last_state_change = 15; +} + +message ReportAutoscalingStateRequest { + int64 last_seen_cluster_resource_state_version = 1; + // A monotonically increasing version identifies + // the state of autoscaler. + // Note: for the same cluster resource state, the + // autoscaler state might be different, since + // the autoscaler's state could also be updated by + // node provider. + int64 autoscaler_state_version = 2; + repeated Instance instances = 3; + // infeasible resource requests. + repeated ResourceRequest infeasible_resource_requests = 4; + repeated ClusterResourceConstraint infeasible_gange_resource_requests = 5; + repeated ClusterResourceConstraint infeasible_cluster_resource_constraints = 6; +} + +message ReportAutoscalingStateReply {} + +service AutoscalerStateService { + rpc GetClusterResourceState(GetClusterResourceStateRequest) + returns (GetClusterResourceStateReply); + rpc ReportAutoscalingState(ReportAutoscalingStateRequest) + returns (ReportAutoscalingStateReply); +} \ No newline at end of file From 860e37e500e74172605c12e644acd381c19c1aa8 Mon Sep 17 00:00:00 2001 From: SangBin Cho Date: Sun, 23 Apr 2023 08:00:22 +0900 Subject: [PATCH 064/424] GCS client test failure flakiness (#34656) Why are these changes needed? Right now the theory is as follow. pubsub io service is created and run inside the GcsServer. That means if pubsub io service is accessed after GCSServer GC'ed, it will segfault. Right now, upon teardown, when we call rpc::DrainAndResetExecutor, this will recreate the Executor thread pool. Upon teardown, If DrainAndResetExecutor -> GcsServer's internal pubsub posts new SendReply to the newly created threadpool -> GcsServer.reset -> pubsub io service GC'ed -> SendReply invoked from the newly created thread pool, it will segfault. NOTE: the segfault is from pubsub service if you see the failure #2 0x7f92034d9129 in ray::rpc::ServerCallImpl::HandleRequestImpl()::'lambda'(ray::Status, std::__1::function, std::__1::function)::operator()(ray::Status, std::__1::function, std::__1::function) const::'lambda'()::operator()() const /proc/self/cwd/bazel-out/k8-opt/bin/_virtual_includes/grpc_common_lib/ray/rpc/server_call.h:212:48 As a fix, I only drain the thread pool. And then reset it after all operations are fully cleaned up (only from tests). I think there's no need to reset for regular proc termination like raylet, gcs, core workers. Related issue number Closes #34344 Signed-off-by: SangBin Cho --- src/ray/core_worker/core_worker.cc | 2 +- src/ray/gcs/gcs_client/test/gcs_client_test.cc | 3 ++- src/ray/gcs/gcs_server/gcs_server_main.cc | 2 +- src/ray/gcs/gcs_server/test/gcs_server_rpc_test.cc | 3 ++- src/ray/raylet/node_manager.cc | 2 +- src/ray/rpc/server_call.cc | 5 +++-- src/ray/rpc/server_call.h | 11 ++++++++--- 7 files changed, 18 insertions(+), 10 deletions(-) diff --git a/src/ray/core_worker/core_worker.cc b/src/ray/core_worker/core_worker.cc index b84d91909dad..329fda454b12 100644 --- a/src/ray/core_worker/core_worker.cc +++ b/src/ray/core_worker/core_worker.cc @@ -783,7 +783,7 @@ void CoreWorker::Exit( exit_type, detail = std::move(detail), creation_task_exception_pb_bytes]() { - rpc::DrainAndResetServerCallExecutor(); + rpc::DrainServerCallExecutor(); Disconnect(exit_type, detail, creation_task_exception_pb_bytes); KillChildProcs(); Shutdown(); diff --git a/src/ray/gcs/gcs_client/test/gcs_client_test.cc b/src/ray/gcs/gcs_client/test/gcs_client_test.cc index 6039392ca032..d3baeeb964d0 100644 --- a/src/ray/gcs/gcs_client/test/gcs_client_test.cc +++ b/src/ray/gcs/gcs_client/test/gcs_client_test.cc @@ -105,13 +105,14 @@ class GcsClientTest : public ::testing::TestWithParam { gcs_client_.reset(); server_io_service_->stop(); - rpc::DrainAndResetServerCallExecutor(); + rpc::DrainServerCallExecutor(); server_io_service_thread_->join(); gcs_server_->Stop(); gcs_server_.reset(); if (!no_redis_) { TestSetupUtil::FlushAllRedisServers(); } + rpc::ResetServerCallExecutor(); } void RestartGcsServer() { diff --git a/src/ray/gcs/gcs_server/gcs_server_main.cc b/src/ray/gcs/gcs_server/gcs_server_main.cc index 682830597bc3..151a56efdc64 100644 --- a/src/ray/gcs/gcs_server/gcs_server_main.cc +++ b/src/ray/gcs/gcs_server/gcs_server_main.cc @@ -107,7 +107,7 @@ int main(int argc, char *argv[]) { int signal_number) { RAY_LOG(INFO) << "GCS server received SIGTERM, shutting down..."; main_service.stop(); - ray::rpc::DrainAndResetServerCallExecutor(); + ray::rpc::DrainServerCallExecutor(); gcs_server.Stop(); ray::stats::Shutdown(); }; diff --git a/src/ray/gcs/gcs_server/test/gcs_server_rpc_test.cc b/src/ray/gcs/gcs_server/test/gcs_server_rpc_test.cc index cabad9872701..cf5078762e1f 100644 --- a/src/ray/gcs/gcs_server/test/gcs_server_rpc_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_server_rpc_test.cc @@ -59,11 +59,12 @@ class GcsServerTest : public ::testing::Test { void TearDown() override { io_service_.stop(); - rpc::DrainAndResetServerCallExecutor(); + rpc::DrainServerCallExecutor(); gcs_server_->Stop(); thread_io_service_->join(); gcs_server_.reset(); ray::gcs::RedisCallbackManager::instance().Clear(); + rpc::ResetServerCallExecutor(); } bool AddJob(const rpc::AddJobRequest &request) { diff --git a/src/ray/raylet/node_manager.cc b/src/ray/raylet/node_manager.cc index ca0b4e015cd7..4b0593ea8515 100644 --- a/src/ray/raylet/node_manager.cc +++ b/src/ray/raylet/node_manager.cc @@ -2028,7 +2028,7 @@ void NodeManager::HandleShutdownRaylet(rpc::ShutdownRayletRequest request, return; } auto shutdown_after_reply = []() { - rpc::DrainAndResetServerCallExecutor(); + rpc::DrainServerCallExecutor(); // Note that the callback is posted to the io service after the shutdown GRPC request // is replied. Otherwise, the RPC might not be replied to GCS before it shutsdown // itself. Implementation note: When raylet is shutdown by ray stop, the CLI sends a diff --git a/src/ray/rpc/server_call.cc b/src/ray/rpc/server_call.cc index b28317598e05..2f432999b29e 100644 --- a/src/ray/rpc/server_call.cc +++ b/src/ray/rpc/server_call.cc @@ -30,8 +30,9 @@ std::unique_ptr &_GetServerCallExecutor() { boost::asio::thread_pool &GetServerCallExecutor() { return *_GetServerCallExecutor(); } -void DrainAndResetServerCallExecutor() { - GetServerCallExecutor().join(); +void DrainServerCallExecutor() { GetServerCallExecutor().join(); } + +void ResetServerCallExecutor() { _GetServerCallExecutor() = std::make_unique( ::RayConfig::instance().num_server_call_thread()); } diff --git a/src/ray/rpc/server_call.h b/src/ray/rpc/server_call.h index 31d078ff78f0..8242c6b69fe8 100644 --- a/src/ray/rpc/server_call.h +++ b/src/ray/rpc/server_call.h @@ -32,9 +32,14 @@ namespace rpc { /// This pool is shared across gRPC servers. boost::asio::thread_pool &GetServerCallExecutor(); -/// For testing -/// Drain the executor and reset it. -void DrainAndResetServerCallExecutor(); +/// Drain the executor. +void DrainServerCallExecutor(); + +/// Reset the server call executor. +/// Testing only. After you drain the executor +/// you need to regenerate the executor +/// because they are global. +void ResetServerCallExecutor(); /// Represents the callback function to be called when a `ServiceHandler` finishes /// handling a request. From 6defc2b4c22f9defb02adc7137d34594be94ebfa Mon Sep 17 00:00:00 2001 From: Larry <554538252@qq.com> Date: Sun, 23 Apr 2023 14:56:15 +0800 Subject: [PATCH 065/424] [Runtime Env] Support expansion of certain PATH variables in env vars (#34394) If the user customizes environment variables such as PATH \ LD_LIBARAY_PATH in the runtime env, the original PATH \ LD_LIBARAY_PATH environment variables on the ray cluster will be directly overwritten in the end. In C++ Worker, because we set the path of libray_api.so to LD_LIBRARY_PATH. If the user also sets LD_LIBRARY_PATH in the runtime env, the C++ default worker cannot find libray_api.so example: ``` 1. libray_api.so in /python/site-packages/ray/cpp/lib/libray_api.so. C++ worker origin environ LD_LIBARAY_PATH is "/python/site-packages/ray/cpp/lib/". 2. Then user set runtime_env={ "env_vars": { "LD_LIBARAY_PATH": "${LD_LIBARAY_PATH}:/home/admin/my_lib", } } 3. Finally, LD_LIBARAY_PATH -> "/python/site-packages/ray/cpp/lib:/home/admin/my_lib" ``` --- python/ray/_private/runtime_env/context.py | 3 +- python/ray/_private/utils.py | 22 ++++++++++++++ python/ray/tests/test_environ.py | 30 +++++++++++++++++++ python/ray/tests/test_runtime_env_env_vars.py | 19 ++++++++++++ 4 files changed, 73 insertions(+), 1 deletion(-) diff --git a/python/ray/_private/runtime_env/context.py b/python/ray/_private/runtime_env/context.py index 582063ad0896..4c980e4e4ae4 100644 --- a/python/ray/_private/runtime_env/context.py +++ b/python/ray/_private/runtime_env/context.py @@ -8,6 +8,7 @@ from ray.util.annotations import DeveloperAPI from ray.core.generated.common_pb2 import Language from ray._private.services import get_ray_jars_dir +from ray._private.utils import update_envs logger = logging.getLogger(__name__) @@ -44,7 +45,7 @@ def deserialize(json_string): return RuntimeEnvContext(**json.loads(json_string)) def exec_worker(self, passthrough_args: List[str], language: Language): - os.environ.update(self.env_vars) + update_envs(self.env_vars) if language == Language.PYTHON and sys.platform == "win32": executable = self.py_executable diff --git a/python/ray/_private/utils.py b/python/ray/_private/utils.py index d35be4353bd7..6174890cd8ea 100644 --- a/python/ray/_private/utils.py +++ b/python/ray/_private/utils.py @@ -1893,3 +1893,25 @@ def try_import_each_module(module_names_to_import: List[str]) -> None: importlib.import_module(module_to_preload) except ImportError: logger.exception(f'Failed to preload the module "{module_to_preload}"') + + +def update_envs(env_vars: Dict[str, str]): + """ + When updating the environment variable, if there is ${X}, + it will be replaced with the current environment variable. + """ + if not env_vars: + return + + replaceable_keys = [ + "PATH", + "LD_LIBRARY_PATH", + "DYLD_LIBRARY_PATH", + "LD_PRELOAD", + ] + + for key, value in env_vars.items(): + if key in replaceable_keys: + os.environ[key] = value.replace("${" + key + "}", os.environ.get(key, "")) + else: + os.environ[key] = value diff --git a/python/ray/tests/test_environ.py b/python/ray/tests/test_environ.py index ec407799feb5..e074a761f0a5 100644 --- a/python/ray/tests/test_environ.py +++ b/python/ray/tests/test_environ.py @@ -1,6 +1,8 @@ import os import pytest +import unittest import ray +from ray._private.utils import update_envs @pytest.mark.skipif("sys.platform != 'linux'") @@ -31,6 +33,34 @@ def get_os_environ(self): assert len(actor_os_environ) > 0 +def test_update_envs(): + with unittest.mock.patch.dict(os.environ): + env_vars = { + "PATH": "/test/lib/path:${PATH}", + "LD_LIBRARY_PATH": "/test/path1:${LD_LIBRARY_PATH}:./test/path2", + "DYLD_LIBRARY_PATH": "${DYLD_LIBRARY_PATH}:/test/path", + "LD_PRELOAD": "", + } + old_path = os.environ["PATH"] + os.environ["LD_LIBRARY_PATH"] = "./" + os.environ["DYLD_LIBRARY_PATH"] = "/lib64" + os.environ["LD_PRELOAD"] = "/lib:/usr/local/lib" + update_envs(env_vars) + assert os.environ["PATH"] == "/test/lib/path:" + old_path + assert os.environ["LD_LIBRARY_PATH"] == "/test/path1:./:./test/path2" + assert os.environ["DYLD_LIBRARY_PATH"] == "/lib64:/test/path" + assert os.environ["LD_PRELOAD"] == env_vars["LD_PRELOAD"] + + # Test the empty string scenario + os.environ["LD_LIBRARY_PATH"] = "" + del os.environ["DYLD_LIBRARY_PATH"] + del os.environ["LD_PRELOAD"] + update_envs(env_vars) + assert os.environ["LD_LIBRARY_PATH"] == "/test/path1::./test/path2" + assert os.environ["DYLD_LIBRARY_PATH"] == ":/test/path" + assert os.environ["LD_PRELOAD"] == env_vars["LD_PRELOAD"] + + if __name__ == "__main__": import pytest import sys diff --git a/python/ray/tests/test_runtime_env_env_vars.py b/python/ray/tests/test_runtime_env_env_vars.py index 69daa429341b..43dd6ff56f57 100644 --- a/python/ray/tests/test_runtime_env_env_vars.py +++ b/python/ray/tests/test_runtime_env_env_vars.py @@ -300,6 +300,25 @@ def get_options(val): assert pid7 == pid1 +def test_appendable_environ(ray_start_regular): + @ray.remote + def get_env(key): + return os.environ.get(key) + + custom_env = os.path.pathsep + "/usr/local/bin" + remote_env = ray.get( + get_env.options( + runtime_env={ + "env_vars": { + "PATH": "${PATH}" + custom_env, + } + } + ).remote("PATH") + ) + assert remote_env.endswith(custom_env) + assert len(remote_env) > len(custom_env) + + if __name__ == "__main__": import pytest From 8ee15509948999c73cf3e5a70447baf721f1bbd8 Mon Sep 17 00:00:00 2001 From: Philipp Moritz Date: Sun, 23 Apr 2023 20:54:17 -0700 Subject: [PATCH 066/424] Rewrite deprecated runtime_context methods (#34702) I saw these deprecation warnings while debugging something. --- python/ray/util/client/server/server.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/ray/util/client/server/server.py b/python/ray/util/client/server/server.py index c07db1bc9a1c..e5f97251e5a0 100644 --- a/python/ray/util/client/server/server.py +++ b/python/ray/util/client/server/server.py @@ -262,8 +262,8 @@ def ClusterInfo(self, request, context=None) -> ray_client_pb2.ClusterInfoRespon ctx = ray_client_pb2.ClusterInfoResponse.RuntimeContext() with disable_client_hook(): rtc = ray.get_runtime_context() - ctx.job_id = rtc.job_id.binary() - ctx.node_id = rtc.node_id.binary() + ctx.job_id = ray._private.utils.hex_to_binary(rtc.get_job_id()) + ctx.node_id = ray._private.utils.hex_to_binary(rtc.get_node_id()) ctx.namespace = rtc.namespace ctx.capture_client_tasks = ( rtc.should_capture_child_tasks_in_placement_group From 1b6884508b1c743d461fce3b9f2ae9d912382729 Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Mon, 24 Apr 2023 04:53:31 -0700 Subject: [PATCH 067/424] Open [ci] Ping pip version <23.1 for python 3.10 dependency Signed-off-by: rickyyx --- ci/env/install-dependencies.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/env/install-dependencies.sh b/ci/env/install-dependencies.sh index ec15821091de..fa6cd0def65e 100755 --- a/ci/env/install-dependencies.sh +++ b/ci/env/install-dependencies.sh @@ -233,7 +233,7 @@ install_upgrade_pip() { fi if "${python}" -m pip --version || "${python}" -m ensurepip; then # Configure pip if present - "${python}" -m pip install --upgrade "pip!=23.1" + "${python}" -m pip install --upgrade "pip<23.1" # If we're in a CI environment, do some configuration if [ "${CI-}" = true ]; then From db990902f801448117e0fccb127041a04dcdaa6e Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Mon, 24 Apr 2023 04:54:26 -0700 Subject: [PATCH 068/424] [CI] Fix bisect index and boundary (#34686) Make sure that the bisect indexes are not at the boundary. This prevents situations where we bisect the same revision again (which cause buildkite to create a duplicated key, as well as wasting resources). I have this check in some PR before but lost during rebase :( Signed-off-by: Cuong Nguyen --- release/ray_release/scripts/ray_bisect.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/ray_release/scripts/ray_bisect.py b/release/ray_release/scripts/ray_bisect.py index 5ac1640d94b6..c8691cd4ad47 100644 --- a/release/ray_release/scripts/ray_bisect.py +++ b/release/ray_release/scripts/ray_bisect.py @@ -59,6 +59,9 @@ def _bisect(test: Test, commit_list: List[str], concurrency: int) -> str: idx_to_commit = {} for i in range(concurrency): idx = len(commit_list) * (i + 1) // (concurrency + 1) + # make sure that idx is not at the boundary; this avoids rerun bisect + # on the previously run revision + idx = min(max(idx, 1), len(commit_list) - 2) idx_to_commit[idx] = commit_list[idx] outcomes = _run_test(test, set(idx_to_commit.values())) passing_idx = 0 From 89b5331eae3fd45eceebb25831a9f3c0b7976ee7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Apr 2023 13:17:36 +0100 Subject: [PATCH 069/424] Bump torch from 1.11.0 to 1.13.1 in /release/ml_user_tests/ray-lightning (#31520) * Bump torch from 1.11.0 to 1.13.1 in /release/ml_user_tests/ray-lightning Signed-off-by: dependabot[bot] Signed-off-by: Kai Fricke Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Kai Fricke --- release/ml_user_tests/ray-lightning/driver_requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) mode change 100755 => 100644 release/ml_user_tests/ray-lightning/driver_requirements.txt diff --git a/release/ml_user_tests/ray-lightning/driver_requirements.txt b/release/ml_user_tests/ray-lightning/driver_requirements.txt old mode 100755 new mode 100644 index e7ab66c99970..082b6e3ef034 --- a/release/ml_user_tests/ray-lightning/driver_requirements.txt +++ b/release/ml_user_tests/ray-lightning/driver_requirements.txt @@ -1,3 +1,3 @@ -torch==1.11.0 -torchvision==0.12.0 +torch==1.13.1 +torchvision==0.14.1 pytorch-lightning \ No newline at end of file From b6687f0298c7248398ef367cd89dc1c262f473fe Mon Sep 17 00:00:00 2001 From: Scott Lee Date: Mon, 24 Apr 2023 10:46:47 -0700 Subject: [PATCH 070/424] [Dataset] De-flake `test_stats.test_dataset__repr__` (#34659) The test_dataset__repr__ test in test_stats.py is flakey. This PR fixes this issue, so the test passes consistently. --------- Signed-off-by: Scott Lee --- python/ray/data/tests/test_stats.py | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/python/ray/data/tests/test_stats.py b/python/ray/data/tests/test_stats.py index 8f4e58218596..58a72d57ddea 100644 --- a/python/ray/data/tests/test_stats.py +++ b/python/ray/data/tests/test_stats.py @@ -262,18 +262,12 @@ def test_dataset_stats_basic(ray_start_regular_shared, enable_auto_log_stats): def test_dataset__repr__(ray_start_regular_shared): - context = DataContext.get_current() - context.optimize_fuse_stages = True - - n = 4 - ds = ray.data.range(n).materialize() + n = 100 + ds = ray.data.range(n) assert len(ds.take_all()) == n - ds2 = ds.map_batches(lambda x: x).materialize() - assert len(ds2.take_all()) == n - ss = ds._plan.stats().to_summary() - ss2 = ds2._plan.stats().to_summary() + ds = ds.materialize() - assert canonicalize(repr(ss)) == ( + assert canonicalize(repr(ds._plan.stats().to_summary())) == ( "DatastreamStatsSummary(\n" " datastream_uuid=U,\n" " base_name=None,\n" @@ -298,7 +292,7 @@ def test_dataset__repr__(ray_start_regular_shared): " get_time=T,\n" " iter_blocks_local=None,\n" " iter_blocks_remote=None,\n" - " iter_unknown_location=N,\n" + " iter_unknown_location=None,\n" " next_time=T,\n" " format_time=T,\n" " user_time=T,\n" @@ -307,7 +301,10 @@ def test_dataset__repr__(ray_start_regular_shared): " parents=[],\n" ")" ) - assert canonicalize(repr(ss2)) == ( + + ds2 = ds.map_batches(lambda x: x).materialize() + assert len(ds2.take_all()) == n + assert canonicalize(repr(ds2._plan.stats().to_summary())) == ( "DatastreamStatsSummary(\n" " datastream_uuid=U,\n" " base_name=MapBatches(),\n" @@ -367,7 +364,7 @@ def test_dataset__repr__(ray_start_regular_shared): " get_time=T,\n" " iter_blocks_local=None,\n" " iter_blocks_remote=None,\n" - " iter_unknown_location=N,\n" + " iter_unknown_location=None,\n" " next_time=T,\n" " format_time=T,\n" " user_time=T,\n" From a7731457cb902e8cb1bd4ef96510e99e843c2404 Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Mon, 24 Apr 2023 21:49:34 +0200 Subject: [PATCH 071/424] [docs] sphinx design migration 2/N (#34707) --- .../job-submission/ray-client.rst | 50 +- .../monitoring-and-observability.rst | 66 +- doc/source/cluster/vms/getting-started.rst | 92 +-- .../references/ray-cluster-configuration.rst | 586 ++++++++++-------- doc/source/data/consuming-datastreams.rst | 52 +- doc/source/data/data-internals.rst | 38 +- doc/source/data/data-tensor-support.rst | 234 +++---- doc/source/data/getting-started.rst | 62 +- doc/source/data/transforming-datastreams.rst | 282 +++++---- doc/source/ray-contribute/development.rst | 68 +- .../monitoring-debugging/profiling.rst | 24 +- doc/source/ray-observability/ray-tracing.rst | 34 +- .../ray-observability/state/state-api.rst | 409 ++++++------ 13 files changed, 1065 insertions(+), 932 deletions(-) diff --git a/doc/source/cluster/running-applications/job-submission/ray-client.rst b/doc/source/cluster/running-applications/job-submission/ray-client.rst index 593ad80b198e..29de3f118b69 100644 --- a/doc/source/cluster/running-applications/job-submission/ray-client.rst +++ b/doc/source/cluster/running-applications/job-submission/ray-client.rst @@ -82,30 +82,32 @@ Ensure that the Ray Client port on the head node is reachable from your local ma This means opening that port up by configuring security groups or other access controls (on `EC2 `_) or proxying from your local machine to the cluster (on `K8s `_). -.. tabbed:: AWS - - With the Ray cluster launcher, you can configure the security group - to allow inbound access by defining :ref:`cluster-configuration-security-group` - in your `cluster.yaml`. - - .. code-block:: yaml - - # An unique identifier for the head node and workers of this cluster. - cluster_name: minimal_security_group - - # Cloud-provider specific configuration. - provider: - type: aws - region: us-west-2 - security_group: - GroupName: ray_client_security_group - IpPermissions: - - FromPort: 10001 - ToPort: 10001 - IpProtocol: TCP - IpRanges: - # This will enable inbound access from ALL IPv4 addresses. - - CidrIp: 0.0.0.0/0 +.. tab-set:: + + .. tab-item:: AWS + + With the Ray cluster launcher, you can configure the security group + to allow inbound access by defining :ref:`cluster-configuration-security-group` + in your `cluster.yaml`. + + .. code-block:: yaml + + # An unique identifier for the head node and workers of this cluster. + cluster_name: minimal_security_group + + # Cloud-provider specific configuration. + provider: + type: aws + region: us-west-2 + security_group: + GroupName: ray_client_security_group + IpPermissions: + - FromPort: 10001 + ToPort: 10001 + IpProtocol: TCP + IpRanges: + # This will enable inbound access from ALL IPv4 addresses. + - CidrIp: 0.0.0.0/0 Step 3: Run Ray code ~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/cluster/running-applications/monitoring-and-observability.rst b/doc/source/cluster/running-applications/monitoring-and-observability.rst index 8d41eafa2ed6..df3bcd6cddd9 100644 --- a/doc/source/cluster/running-applications/monitoring-and-observability.rst +++ b/doc/source/cluster/running-applications/monitoring-and-observability.rst @@ -21,32 +21,34 @@ including the running jobs, actors, workers, nodes, etc. By default, the :ref:`cluster launcher ` and :ref:`KubeRay operator ` will launch the dashboard, but will not publicly expose the port. -.. tabbed:: If using the VM cluster launcher +.. tab-set:: - You can securely port-forward local traffic to the dashboard via the ``ray - dashboard`` command. + .. tab-item:: If using the VM cluster launcher - .. code-block:: shell + You can securely port-forward local traffic to the dashboard via the ``ray + dashboard`` command. - $ ray dashboard [-p ] + .. code-block:: shell - The dashboard will now be visible at ``http://localhost:8265``. + $ ray dashboard [-p ] -.. tabbed:: If using Kubernetes + The dashboard will now be visible at ``http://localhost:8265``. - The KubeRay operator makes the dashboard available via a Service targeting - the Ray head pod, named ``-head-svc``. You can access the - dashboard from within the Kubernetes cluster at ``http://-head-svc:8265``. + .. tab-item:: If using Kubernetes - You can also view the dashboard from outside the Kubernetes cluster by - using port-forwarding: + The KubeRay operator makes the dashboard available via a Service targeting + the Ray head pod, named ``-head-svc``. You can access the + dashboard from within the Kubernetes cluster at ``http://-head-svc:8265``. - .. code-block:: shell + You can also view the dashboard from outside the Kubernetes cluster by + using port-forwarding: - $ kubectl port-forward service/raycluster-autoscaler-head-svc 8265:8265 + .. code-block:: shell - For more information about configuring network access to a Ray cluster on - Kubernetes, see the :ref:`networking notes `. + $ kubectl port-forward service/raycluster-autoscaler-head-svc 8265:8265 + + For more information about configuring network access to a Ray cluster on + Kubernetes, see the :ref:`networking notes `. Using Ray Cluster CLI tools @@ -63,29 +65,31 @@ These CLI commands can be run on any node in a Ray Cluster. Examples for executing these commands from a machine outside the Ray Cluster are provided below. -.. tabbed:: If using the VM cluster launcher +.. tab-set:: + + .. tab-item:: If using the VM cluster launcher - Execute a command on the cluster using ``ray exec``: + Execute a command on the cluster using ``ray exec``: - .. code-block:: shell + .. code-block:: shell - $ ray exec "ray status" + $ ray exec "ray status" -.. tabbed:: If using Kubernetes + .. tab-item:: If using Kubernetes - Execute a command on the cluster using ``kubectl exec`` and the configured - RayCluster name. We will use the Service targeting the Ray head pod to - execute a CLI command on the cluster. + Execute a command on the cluster using ``kubectl exec`` and the configured + RayCluster name. We will use the Service targeting the Ray head pod to + execute a CLI command on the cluster. - .. code-block:: shell + .. code-block:: shell - # First, find the name of the Ray head service. - $ kubectl get pod | grep -head - # NAME READY STATUS RESTARTS AGE - # -head-xxxxx 2/2 Running 0 XXs + # First, find the name of the Ray head service. + $ kubectl get pod | grep -head + # NAME READY STATUS RESTARTS AGE + # -head-xxxxx 2/2 Running 0 XXs - # Then, use the name of the Ray head service to run `ray status`. - $ kubectl exec -head-xxxxx -- ray status + # Then, use the name of the Ray head service to run `ray status`. + $ kubectl exec -head-xxxxx -- ray status .. _multi-node-metrics: diff --git a/doc/source/cluster/vms/getting-started.rst b/doc/source/cluster/vms/getting-started.rst index 97e553f1f62b..b323c29bd92c 100644 --- a/doc/source/cluster/vms/getting-started.rst +++ b/doc/source/cluster/vms/getting-started.rst @@ -31,37 +31,41 @@ Setup Before we start, you will need to install some Python dependencies as follows: -.. tabbed:: AWS +.. tab-set:: - .. code-block:: shell + .. tab-item:: AWS - $ pip install -U "ray[default]" boto3 + .. code-block:: shell -.. tabbed:: Azure + $ pip install -U "ray[default]" boto3 - .. code-block:: shell + .. tab-item:: Azure - $ pip install -U "ray[default]" azure-cli azure-core + .. code-block:: shell -.. tabbed:: GCP + $ pip install -U "ray[default]" azure-cli azure-core - .. code-block:: shell + .. tab-item:: GCP - $ pip install -U "ray[default]" google-api-python-client + .. code-block:: shell + + $ pip install -U "ray[default]" google-api-python-client Next, if you're not set up to use your cloud provider from the command line, you'll have to configure your credentials: -.. tabbed:: AWS +.. tab-set:: + + .. tab-item:: AWS - Configure your credentials in ``~/.aws/credentials`` as described in `the AWS docs `_. + Configure your credentials in ``~/.aws/credentials`` as described in `the AWS docs `_. -.. tabbed:: Azure + .. tab-item:: Azure - Log in using ``az login``, then configure your credentials with ``az account set -s ``. + Log in using ``az login``, then configure your credentials with ``az account set -s ``. -.. tabbed:: GCP + .. tab-item:: GCP - Set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable as described in `the GCP docs `_. + Set the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable as described in `the GCP docs `_. Create a (basic) Python application ----------------------------------- @@ -154,45 +158,47 @@ To start a Ray Cluster, first we need to define the cluster configuration. The c A minimal sample cluster configuration file looks as follows: -.. tabbed:: AWS +.. tab-set:: + + .. tab-item:: AWS - .. literalinclude:: ../../../../python/ray/autoscaler/aws/example-minimal.yaml - :language: yaml + .. literalinclude:: ../../../../python/ray/autoscaler/aws/example-minimal.yaml + :language: yaml -.. tabbed:: Azure + .. tab-item:: Azure - .. code-block:: yaml + .. code-block:: yaml - # An unique identifier for the head node and workers of this cluster. - cluster_name: minimal + # An unique identifier for the head node and workers of this cluster. + cluster_name: minimal - # Cloud-provider specific configuration. - provider: - type: azure - location: westus2 - resource_group: ray-cluster + # Cloud-provider specific configuration. + provider: + type: azure + location: westus2 + resource_group: ray-cluster - # How Ray will authenticate with newly launched nodes. - auth: - ssh_user: ubuntu - # you must specify paths to matching private and public key pair files - # use `ssh-keygen -t rsa -b 4096` to generate a new ssh key pair - ssh_private_key: ~/.ssh/id_rsa - # changes to this should match what is specified in file_mounts - ssh_public_key: ~/.ssh/id_rsa.pub + # How Ray will authenticate with newly launched nodes. + auth: + ssh_user: ubuntu + # you must specify paths to matching private and public key pair files + # use `ssh-keygen -t rsa -b 4096` to generate a new ssh key pair + ssh_private_key: ~/.ssh/id_rsa + # changes to this should match what is specified in file_mounts + ssh_public_key: ~/.ssh/id_rsa.pub -.. tabbed:: GCP + .. tab-item:: GCP - .. code-block:: yaml + .. code-block:: yaml - # A unique identifier for the head node and workers of this cluster. - cluster_name: minimal + # A unique identifier for the head node and workers of this cluster. + cluster_name: minimal - # Cloud-provider specific configuration. - provider: - type: gcp - region: us-west1 + # Cloud-provider specific configuration. + provider: + type: gcp + region: us-west1 Save this configuration file as ``config.yaml``. You can specify a lot more details in the configuration file: instance types to use, minimum and maximum number of workers to start, autoscaling strategy, files to sync, and more. For a full reference on the available configuration properties, please refer to the :ref:`cluster YAML configuration options reference `. diff --git a/doc/source/cluster/vms/references/ray-cluster-configuration.rst b/doc/source/cluster/vms/references/ray-cluster-configuration.rst index b12bb7a76047..b9e6f2f974b6 100644 --- a/doc/source/cluster/vms/references/ray-cluster-configuration.rst +++ b/doc/source/cluster/vms/references/ray-cluster-configuration.rst @@ -72,76 +72,82 @@ Docker Auth ~~~~ -.. tabbed:: AWS +.. tab-set:: - .. parsed-literal:: + .. tab-item:: AWS - :ref:`ssh_user `: str - :ref:`ssh_private_key `: str + .. parsed-literal:: -.. tabbed:: Azure + :ref:`ssh_user `: str + :ref:`ssh_private_key `: str - .. parsed-literal:: + .. tab-item:: Azure - :ref:`ssh_user `: str - :ref:`ssh_private_key `: str - :ref:`ssh_public_key `: str + .. parsed-literal:: -.. tabbed:: GCP + :ref:`ssh_user `: str + :ref:`ssh_private_key `: str + :ref:`ssh_public_key `: str - .. parsed-literal:: + .. tab-item:: GCP - :ref:`ssh_user `: str - :ref:`ssh_private_key `: str + .. parsed-literal:: + + :ref:`ssh_user `: str + :ref:`ssh_private_key `: str .. _cluster-configuration-provider-type: Provider ~~~~~~~~ -.. tabbed:: AWS +.. tab-set:: - .. parsed-literal:: + .. tab-item:: AWS - :ref:`type `: str - :ref:`region `: str - :ref:`availability_zone `: str - :ref:`cache_stopped_nodes `: bool - :ref:`security_group `: - :ref:`Security Group ` + .. parsed-literal:: -.. tabbed:: Azure + :ref:`type `: str + :ref:`region `: str + :ref:`availability_zone `: str + :ref:`cache_stopped_nodes `: bool + :ref:`security_group `: + :ref:`Security Group ` - .. parsed-literal:: + .. tab-item:: Azure - :ref:`type `: str - :ref:`location `: str - :ref:`resource_group `: str - :ref:`subscription_id `: str - :ref:`cache_stopped_nodes `: bool + .. parsed-literal:: -.. tabbed:: GCP + :ref:`type `: str + :ref:`location `: str + :ref:`resource_group `: str + :ref:`subscription_id `: str + :ref:`cache_stopped_nodes `: bool - .. parsed-literal:: + .. tab-item:: GCP - :ref:`type `: str - :ref:`region `: str - :ref:`availability_zone `: str - :ref:`project_id `: str - :ref:`cache_stopped_nodes `: bool + .. parsed-literal:: + + :ref:`type `: str + :ref:`region `: str + :ref:`availability_zone `: str + :ref:`project_id `: str + :ref:`cache_stopped_nodes `: bool .. _cluster-configuration-security-group-type: Security Group ~~~~~~~~~~~~~~ -.. tabbed:: AWS +.. tab-set:: - .. parsed-literal:: + .. tab-item:: AWS - :ref:`GroupName `: str - :ref:`IpPermissions `: - - `IpPermission `_ + .. parsed-literal:: + + :ref:`GroupName `: str + :ref:`IpPermissions `: + - `IpPermission `_ .. _cluster-configuration-node-types-type: @@ -181,17 +187,19 @@ Cloud-specific configuration for nodes of a given node type. Modifying the ``node_config`` and updating with :ref:`ray up ` will cause the autoscaler to scale down all existing nodes of the node type; nodes with the newly applied ``node_config`` will then be created according to cluster configuration and Ray resource demands. -.. tabbed:: AWS +.. tab-set:: - A YAML object which conforms to the EC2 ``create_instances`` API in `the AWS docs `_. + .. tab-item:: AWS -.. tabbed:: Azure + A YAML object which conforms to the EC2 ``create_instances`` API in `the AWS docs `_. - A YAML object as defined in `the deployment template `_ whose resources are defined in `the Azure docs `_. + .. tab-item:: Azure -.. tabbed:: GCP + A YAML object as defined in `the deployment template `_ whose resources are defined in `the Azure docs `_. + + .. tab-item:: GCP - A YAML object as defined in `the GCP docs `_. + A YAML object as defined in `the GCP docs `_. .. _cluster-configuration-node-docker-type: @@ -347,26 +355,28 @@ Each node type is identified by a user-specified key. * **Type:** :ref:`Node types ` * **Default:** -.. tabbed:: AWS - - .. code-block:: yaml - - available_node_types: - ray.head.default: - node_config: - InstanceType: m5.large - BlockDeviceMappings: - - DeviceName: /dev/sda1 - Ebs: - VolumeSize: 140 - resources: {"CPU": 2} - ray.worker.default: - node_config: - InstanceType: m5.large - InstanceMarketOptions: - MarketType: spot - resources: {"CPU": 2} - min_workers: 0 +.. tab-set:: + + .. tab-item:: AWS + + .. code-block:: yaml + + available_node_types: + ray.head.default: + node_config: + InstanceType: m5.large + BlockDeviceMappings: + - DeviceName: /dev/sda1 + Ebs: + VolumeSize: 140 + resources: {"CPU": 2} + ray.worker.default: + node_config: + InstanceType: m5.large + InstanceMarketOptions: + MarketType: spot + resources: {"CPU": 2} + min_workers: 0 .. _cluster-configuration-head-node-type: @@ -462,14 +472,16 @@ A list of commands to run to set up nodes. These commands will always run on the * **Type:** List of String * **Default:** -.. tabbed:: AWS +.. tab-set:: - .. code-block:: yaml + .. tab-item:: AWS - # Default setup_commands: - setup_commands: - - echo 'export PATH="$HOME/anaconda3/envs/tensorflow_p36/bin:$PATH"' >> ~/.bashrc - - pip install -U https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-3.0.0.dev0-cp37-cp37m-manylinux2014_x86_64.whl + .. code-block:: yaml + + # Default setup_commands: + setup_commands: + - echo 'export PATH="$HOME/anaconda3/envs/tensorflow_p36/bin:$PATH"' >> ~/.bashrc + - pip install -U https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-3.0.0.dev0-cp37-cp37m-manylinux2014_x86_64.whl - Setup commands should ideally be *idempotent* (i.e., can be run multiple times without changing the result); this allows Ray to safely update nodes after they have been created. You can usually make commands idempotent with small modifications, e.g. ``git clone foo`` can be rewritten as ``test -e foo || git clone foo`` which checks if the repo is already cloned first. @@ -522,13 +534,15 @@ Commands to start ray on the head node. You don't need to change this. * **Type:** List of String * **Default:** -.. tabbed:: AWS +.. tab-set:: - .. code-block:: yaml + .. tab-item:: AWS + + .. code-block:: yaml - head_start_ray_commands: - - ray stop - - ulimit -n 65536; ray start --head --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml + head_start_ray_commands: + - ray stop + - ulimit -n 65536; ray start --head --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml .. _cluster-configuration-worker-start-ray-commands: @@ -542,13 +556,15 @@ Command to start ray on worker nodes. You don't need to change this. * **Type:** List of String * **Default:** -.. tabbed:: AWS +.. tab-set:: - .. code-block:: yaml + .. tab-item:: AWS - worker_start_ray_commands: - - ray stop - - ulimit -n 65536; ray start --address=$RAY_HEAD_IP:6379 --object-manager-port=8076 + .. code-block:: yaml + + worker_start_ray_commands: + - ray stop + - ulimit -n 65536; ray start --address=$RAY_HEAD_IP:6379 --object-manager-port=8076 .. _cluster-configuration-image: @@ -691,225 +707,243 @@ The user that Ray will authenticate with when launching new nodes. ``auth.ssh_private_key`` ~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: AWS +.. tab-set:: - The path to an existing private key for Ray to use. If not configured, Ray will create a new private keypair (default behavior). If configured, the key must be added to the project-wide metadata and ``KeyName`` has to be defined in the :ref:`node configuration `. + .. tab-item:: AWS - * **Required:** No - * **Importance:** Low - * **Type:** String + The path to an existing private key for Ray to use. If not configured, Ray will create a new private keypair (default behavior). If configured, the key must be added to the project-wide metadata and ``KeyName`` has to be defined in the :ref:`node configuration `. -.. tabbed:: Azure + * **Required:** No + * **Importance:** Low + * **Type:** String - The path to an existing private key for Ray to use. + .. tab-item:: Azure - * **Required:** Yes - * **Importance:** High - * **Type:** String + The path to an existing private key for Ray to use. - You may use ``ssh-keygen -t rsa -b 4096`` to generate a new ssh keypair. + * **Required:** Yes + * **Importance:** High + * **Type:** String -.. tabbed:: GCP + You may use ``ssh-keygen -t rsa -b 4096`` to generate a new ssh keypair. - The path to an existing private key for Ray to use. If not configured, Ray will create a new private keypair (default behavior). If configured, the key must be added to the project-wide metadata and ``KeyName`` has to be defined in the :ref:`node configuration `. + .. tab-item:: GCP - * **Required:** No - * **Importance:** Low - * **Type:** String + The path to an existing private key for Ray to use. If not configured, Ray will create a new private keypair (default behavior). If configured, the key must be added to the project-wide metadata and ``KeyName`` has to be defined in the :ref:`node configuration `. + + * **Required:** No + * **Importance:** Low + * **Type:** String .. _cluster-configuration-ssh-public-key: ``auth.ssh_public_key`` ~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: AWS +.. tab-set:: - Not available. + .. tab-item:: AWS -.. tabbed:: Azure + Not available. - The path to an existing public key for Ray to use. + .. tab-item:: Azure - * **Required:** Yes - * **Importance:** High - * **Type:** String + The path to an existing public key for Ray to use. -.. tabbed:: GCP + * **Required:** Yes + * **Importance:** High + * **Type:** String + + .. tab-item:: GCP - Not available. + Not available. .. _cluster-configuration-type: ``provider.type`` ~~~~~~~~~~~~~~~~~ -.. tabbed:: AWS +.. tab-set:: - The cloud service provider. For AWS, this must be set to ``aws``. + .. tab-item:: AWS - * **Required:** Yes - * **Importance:** High - * **Type:** String + The cloud service provider. For AWS, this must be set to ``aws``. -.. tabbed:: Azure + * **Required:** Yes + * **Importance:** High + * **Type:** String - The cloud service provider. For Azure, this must be set to ``azure``. + .. tab-item:: Azure - * **Required:** Yes - * **Importance:** High - * **Type:** String + The cloud service provider. For Azure, this must be set to ``azure``. -.. tabbed:: GCP + * **Required:** Yes + * **Importance:** High + * **Type:** String - The cloud service provider. For GCP, this must be set to ``gcp``. + .. tab-item:: GCP - * **Required:** Yes - * **Importance:** High - * **Type:** String + The cloud service provider. For GCP, this must be set to ``gcp``. + + * **Required:** Yes + * **Importance:** High + * **Type:** String .. _cluster-configuration-region: ``provider.region`` ~~~~~~~~~~~~~~~~~~~ -.. tabbed:: AWS +.. tab-set:: - The region to use for deployment of the Ray cluster. + .. tab-item:: AWS - * **Required:** Yes - * **Importance:** High - * **Type:** String - * **Default:** us-west-2 + The region to use for deployment of the Ray cluster. -.. tabbed:: Azure + * **Required:** Yes + * **Importance:** High + * **Type:** String + * **Default:** us-west-2 - Not available. + .. tab-item:: Azure -.. tabbed:: GCP + Not available. - The region to use for deployment of the Ray cluster. + .. tab-item:: GCP - * **Required:** Yes - * **Importance:** High - * **Type:** String - * **Default:** us-west1 + The region to use for deployment of the Ray cluster. + + * **Required:** Yes + * **Importance:** High + * **Type:** String + * **Default:** us-west1 .. _cluster-configuration-availability-zone: ``provider.availability_zone`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: AWS +.. tab-set:: - A string specifying a comma-separated list of availability zone(s) that nodes may be launched in. - Nodes will be launched in the first listed availability zone and will be tried in the following availability - zones if launching fails. + .. tab-item:: AWS - * **Required:** No - * **Importance:** Low - * **Type:** String - * **Default:** us-west-2a,us-west-2b + A string specifying a comma-separated list of availability zone(s) that nodes may be launched in. + Nodes will be launched in the first listed availability zone and will be tried in the following availability + zones if launching fails. -.. tabbed:: Azure + * **Required:** No + * **Importance:** Low + * **Type:** String + * **Default:** us-west-2a,us-west-2b - Not available. + .. tab-item:: Azure -.. tabbed:: GCP + Not available. - A string specifying a comma-separated list of availability zone(s) that nodes may be launched in. + .. tab-item:: GCP - * **Required:** No - * **Importance:** Low - * **Type:** String - * **Default:** us-west1-a + A string specifying a comma-separated list of availability zone(s) that nodes may be launched in. + + * **Required:** No + * **Importance:** Low + * **Type:** String + * **Default:** us-west1-a .. _cluster-configuration-location: ``provider.location`` ~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: AWS +.. tab-set:: - Not available. + .. tab-item:: AWS -.. tabbed:: Azure + Not available. - The location to use for deployment of the Ray cluster. + .. tab-item:: Azure - * **Required:** Yes - * **Importance:** High - * **Type:** String - * **Default:** westus2 + The location to use for deployment of the Ray cluster. -.. tabbed:: GCP + * **Required:** Yes + * **Importance:** High + * **Type:** String + * **Default:** westus2 - Not available. + .. tab-item:: GCP + + Not available. .. _cluster-configuration-resource-group: ``provider.resource_group`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: AWS +.. tab-set:: - Not available. + .. tab-item:: AWS -.. tabbed:: Azure + Not available. - The resource group to use for deployment of the Ray cluster. + .. tab-item:: Azure - * **Required:** Yes - * **Importance:** High - * **Type:** String - * **Default:** ray-cluster + The resource group to use for deployment of the Ray cluster. -.. tabbed:: GCP + * **Required:** Yes + * **Importance:** High + * **Type:** String + * **Default:** ray-cluster - Not available. + .. tab-item:: GCP + + Not available. .. _cluster-configuration-subscription-id: ``provider.subscription_id`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: AWS +.. tab-set:: - Not available. + .. tab-item:: AWS -.. tabbed:: Azure + Not available. - The subscription ID to use for deployment of the Ray cluster. If not specified, Ray will use the default from the Azure CLI. + .. tab-item:: Azure - * **Required:** No - * **Importance:** High - * **Type:** String - * **Default:** ``""`` + The subscription ID to use for deployment of the Ray cluster. If not specified, Ray will use the default from the Azure CLI. -.. tabbed:: GCP + * **Required:** No + * **Importance:** High + * **Type:** String + * **Default:** ``""`` - Not available. + .. tab-item:: GCP + + Not available. .. _cluster-configuration-project-id: ``provider.project_id`` ~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: AWS +.. tab-set:: - Not available. + .. tab-item:: AWS -.. tabbed:: Azure + Not available. - Not available. + .. tab-item:: Azure -.. tabbed:: GCP + Not available. - The globally unique project ID to use for deployment of the Ray cluster. + .. tab-item:: GCP - * **Required:** Yes - * **Importance:** Low - * **Type:** String - * **Default:** ``null`` + The globally unique project ID to use for deployment of the Ray cluster. + + * **Required:** Yes + * **Importance:** Low + * **Type:** String + * **Default:** ``null`` .. _cluster-configuration-cache-stopped-nodes: @@ -929,21 +963,23 @@ If enabled, nodes will be *stopped* when the cluster scales down. If disabled, n ``provider.security_group`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: AWS +.. tab-set:: - A security group that can be used to specify custom inbound rules. + .. tab-item:: AWS - * **Required:** No - * **Importance:** Medium - * **Type:** :ref:`Security Group ` + A security group that can be used to specify custom inbound rules. -.. tabbed:: Azure + * **Required:** No + * **Importance:** Medium + * **Type:** :ref:`Security Group ` - Not available. + .. tab-item:: Azure -.. tabbed:: GCP + Not available. + + .. tab-item:: GCP - Not available. + Not available. .. _cluster-configuration-group-name: @@ -1041,29 +1077,31 @@ A list of commands to run to set up worker nodes of this type. These commands wi ``available_node_types..node_type.resources.CPU`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: AWS +.. tab-set:: - The number of CPUs made available by this node. If not configured, Autoscaler can automatically detect them only for AWS/Kubernetes cloud providers. + .. tab-item:: AWS - * **Required:** Yes (except for AWS/K8s) - * **Importance:** High - * **Type:** Integer + The number of CPUs made available by this node. If not configured, Autoscaler can automatically detect them only for AWS/Kubernetes cloud providers. -.. tabbed:: Azure + * **Required:** Yes (except for AWS/K8s) + * **Importance:** High + * **Type:** Integer - The number of CPUs made available by this node. + .. tab-item:: Azure - * **Required:** Yes - * **Importance:** High - * **Type:** Integer + The number of CPUs made available by this node. -.. tabbed:: GCP + * **Required:** Yes + * **Importance:** High + * **Type:** Integer - The number of CPUs made available by this node. + .. tab-item:: GCP - * **Required:** No - * **Importance:** High - * **Type:** Integer + The number of CPUs made available by this node. + + * **Required:** No + * **Importance:** High + * **Type:** Integer .. _cluster-configuration-gpu: @@ -1071,29 +1109,31 @@ A list of commands to run to set up worker nodes of this type. These commands wi ``available_node_types..node_type.resources.GPU`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: AWS +.. tab-set:: - The number of GPUs made available by this node. If not configured, Autoscaler can automatically detect them only for AWS/Kubernetes cloud providers. + .. tab-item:: AWS - * **Required:** No - * **Importance:** Low - * **Type:** Integer + The number of GPUs made available by this node. If not configured, Autoscaler can automatically detect them only for AWS/Kubernetes cloud providers. -.. tabbed:: Azure + * **Required:** No + * **Importance:** Low + * **Type:** Integer - The number of GPUs made available by this node. + .. tab-item:: Azure - * **Required:** No - * **Importance:** High - * **Type:** Integer + The number of GPUs made available by this node. -.. tabbed:: GCP + * **Required:** No + * **Importance:** High + * **Type:** Integer - The number of GPUs made available by this node. + .. tab-item:: GCP - * **Required:** No - * **Importance:** High - * **Type:** Integer + The number of GPUs made available by this node. + + * **Required:** No + * **Importance:** High + * **Type:** Integer .. _cluster-configuration-memory: @@ -1102,7 +1142,9 @@ A list of commands to run to set up worker nodes of this type. These commands wi .. tabbed:: AWS - The memory in bytes allocated for python worker heap memory on the node. If not configured, Autoscaler will automatically detect the amount of RAM on the node for AWS/Kubernetes and allocate 70% of it for the heap. + The memory in bytes allocated for python worker heap memory on the node. + If not configured, Autoscaler will automatically detect the amount of RAM on + the node for AWS/Kubernetes and allocate 70% of it for the heap. * **Required:** No * **Importance:** Low @@ -1129,29 +1171,31 @@ A list of commands to run to set up worker nodes of this type. These commands wi ``available_node_types..node_type.resources.object-store-memory`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: AWS +.. tab-set:: - The memory in bytes allocated for the object store on the node. If not configured, Autoscaler will automatically detect the amount of RAM on the node for AWS/Kubernetes and allocate 30% of it for the object store. + .. tab-item:: AWS - * **Required:** No - * **Importance:** Low - * **Type:** Integer + The memory in bytes allocated for the object store on the node. If not configured, Autoscaler will automatically detect the amount of RAM on the node for AWS/Kubernetes and allocate 30% of it for the object store. -.. tabbed:: Azure + * **Required:** No + * **Importance:** Low + * **Type:** Integer - The memory in bytes allocated for the object store on the node. + .. tab-item:: Azure - * **Required:** No - * **Importance:** High - * **Type:** Integer + The memory in bytes allocated for the object store on the node. -.. tabbed:: GCP + * **Required:** No + * **Importance:** High + * **Type:** Integer - The memory in bytes allocated for the object store on the node. + .. tab-item:: GCP - * **Required:** No - * **Importance:** High - * **Type:** Integer + The memory in bytes allocated for the object store on the node. + + * **Required:** No + * **Importance:** High + * **Type:** Integer .. _cluster-configuration-node-docker: @@ -1171,38 +1215,42 @@ Examples Minimal configuration ~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: AWS +.. tab-set:: - .. literalinclude:: ../../../../../python/ray/autoscaler/aws/example-minimal.yaml - :language: yaml + .. tab-item:: AWS -.. tabbed:: Azure + .. literalinclude:: ../../../../../python/ray/autoscaler/aws/example-minimal.yaml + :language: yaml - .. literalinclude:: ../../../../../python/ray/autoscaler/azure/example-minimal.yaml - :language: yaml + .. tab-item:: Azure -.. tabbed:: GCP + .. literalinclude:: ../../../../../python/ray/autoscaler/azure/example-minimal.yaml + :language: yaml + + .. tab-item:: GCP - .. literalinclude:: ../../../../../python/ray/autoscaler/gcp/example-minimal.yaml - :language: yaml + .. literalinclude:: ../../../../../python/ray/autoscaler/gcp/example-minimal.yaml + :language: yaml Full configuration ~~~~~~~~~~~~~~~~~~ -.. tabbed:: AWS +.. tab-set:: - .. literalinclude:: ../../../../../python/ray/autoscaler/aws/example-full.yaml - :language: yaml + .. tab-item:: AWS -.. tabbed:: Azure + .. literalinclude:: ../../../../../python/ray/autoscaler/aws/example-full.yaml + :language: yaml - .. literalinclude:: ../../../../../python/ray/autoscaler/azure/example-full.yaml - :language: yaml + .. tab-item:: Azure -.. tabbed:: GCP + .. literalinclude:: ../../../../../python/ray/autoscaler/azure/example-full.yaml + :language: yaml + + .. tab-item:: GCP - .. literalinclude:: ../../../../../python/ray/autoscaler/gcp/example-full.yaml - :language: yaml + .. literalinclude:: ../../../../../python/ray/autoscaler/gcp/example-full.yaml + :language: yaml TPU Configuration ~~~~~~~~~~~~~~~~~ @@ -1211,7 +1259,9 @@ It is possible to use `TPU VMs `_. -.. tabbed:: GCP +.. tab-set:: + + .. tab-item:: GCP - .. literalinclude:: ../../../../../python/ray/autoscaler/gcp/tpu.yaml - :language: yaml + .. literalinclude:: ../../../../../python/ray/autoscaler/gcp/tpu.yaml + :language: yaml diff --git a/doc/source/data/consuming-datastreams.rst b/doc/source/data/consuming-datastreams.rst index be61d82f016f..9921d7941ccf 100644 --- a/doc/source/data/consuming-datastreams.rst +++ b/doc/source/data/consuming-datastreams.rst @@ -84,37 +84,39 @@ The supported formats include Parquet, CSV, JSON, NumPy. To control the number of output files, you may use :meth:`ds.repartition() ` to repartition the Datastream before writing out. -.. tabbed:: Parquet +.. tab-set:: - .. literalinclude:: ./doc_code/saving_datastreams.py - :language: python - :start-after: __write_parquet_begin__ - :end-before: __write_parquet_end__ + .. tab-item:: Parquet -.. tabbed:: CSV + .. literalinclude:: ./doc_code/saving_datastreams.py + :language: python + :start-after: __write_parquet_begin__ + :end-before: __write_parquet_end__ - .. literalinclude:: ./doc_code/saving_datastreams.py - :language: python - :start-after: __write_csv_begin__ - :end-before: __write_csv_end__ + .. tab-item:: CSV -.. tabbed:: JSON + .. literalinclude:: ./doc_code/saving_datastreams.py + :language: python + :start-after: __write_csv_begin__ + :end-before: __write_csv_end__ - .. literalinclude:: ./doc_code/saving_datastreams.py - :language: python - :start-after: __write_json_begin__ - :end-before: __write_json_end__ + .. tab-item:: JSON -.. tabbed:: NumPy + .. literalinclude:: ./doc_code/saving_datastreams.py + :language: python + :start-after: __write_json_begin__ + :end-before: __write_json_end__ - .. literalinclude:: ./doc_code/saving_datastreams.py - :language: python - :start-after: __write_numpy_begin__ - :end-before: __write_numpy_end__ + .. tab-item:: NumPy -.. tabbed:: TFRecords + .. literalinclude:: ./doc_code/saving_datastreams.py + :language: python + :start-after: __write_numpy_begin__ + :end-before: __write_numpy_end__ - .. literalinclude:: ./doc_code/saving_datastreams.py - :language: python - :start-after: __write_tfrecords_begin__ - :end-before: __write_tfrecords_end__ + .. tab-item:: TFRecords + + .. literalinclude:: ./doc_code/saving_datastreams.py + :language: python + :start-after: __write_tfrecords_begin__ + :end-before: __write_tfrecords_end__ diff --git a/doc/source/data/data-internals.rst b/doc/source/data/data-internals.rst index c901c867de65..7fd74a9f4d43 100644 --- a/doc/source/data/data-internals.rst +++ b/doc/source/data/data-internals.rst @@ -22,31 +22,33 @@ When using Ray Data in conjunction with :ref:`Ray Tune `, it is impor As an example, the following shows two ways to use Ray Data together with Tune: -.. tabbed:: Limiting Tune Concurrency +.. tab-set:: - By limiting the number of concurrent Tune trials, we ensure CPU resources are always available for Ray Data execution. - This can be done using the ``max_concurrent_trials`` Tune option. + .. tab-item:: Limiting Tune Concurrency - .. literalinclude:: ./doc_code/key_concepts.py - :language: python - :start-after: __resource_allocation_1_begin__ - :end-before: __resource_allocation_1_end__ + By limiting the number of concurrent Tune trials, we ensure CPU resources are always available for Ray Data execution. + This can be done using the ``max_concurrent_trials`` Tune option. -.. tabbed:: Reserving CPUs (Experimental) + .. literalinclude:: ./doc_code/key_concepts.py + :language: python + :start-after: __resource_allocation_1_begin__ + :end-before: __resource_allocation_1_end__ - Alternatively, we can tell Tune to set aside CPU resources for other libraries. - This can be done by setting ``_max_cpu_fraction_per_node=0.8``, which reserves - 20% of node CPUs for Datastream execution. + .. tab-item:: Reserving CPUs (Experimental) - .. literalinclude:: ./doc_code/key_concepts.py - :language: python - :start-after: __resource_allocation_2_begin__ - :end-before: __resource_allocation_2_end__ + Alternatively, we can tell Tune to set aside CPU resources for other libraries. + This can be done by setting ``_max_cpu_fraction_per_node=0.8``, which reserves + 20% of node CPUs for Datastream execution. - .. warning:: + .. literalinclude:: ./doc_code/key_concepts.py + :language: python + :start-after: __resource_allocation_2_begin__ + :end-before: __resource_allocation_2_end__ - This option is experimental and not currently recommended for use with - autoscaling clusters (scale-up will not trigger properly). + .. warning:: + + This option is experimental and not currently recommended for use with + autoscaling clusters (scale-up will not trigger properly). .. _datastreams_pg: diff --git a/doc/source/data/data-tensor-support.rst b/doc/source/data/data-tensor-support.rst index 08049fc63951..282b0c931fca 100644 --- a/doc/source/data/data-tensor-support.rst +++ b/doc/source/data/data-tensor-support.rst @@ -16,93 +16,95 @@ Creating Tensor Datastreams This section shows how to create single and multi-column tensor datastreams. -.. tabbed:: Synthetic Data +.. tab-set:: - Create a synthetic tensor datastream from a range of integers. + .. tab-item:: Synthetic Data - **Single-column only**: + Create a synthetic tensor datastream from a range of integers. - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __create_range_begin__ - :end-before: __create_range_end__ + **Single-column only**: -.. tabbed:: Pandas UDF + .. literalinclude:: ./doc_code/tensor.py + :language: python + :start-after: __create_range_begin__ + :end-before: __create_range_end__ - Create tensor datastreams by returning ``List[np.ndarray]`` columns from a Pandas - :ref:`user-defined function `. + .. tab-item:: Pandas UDF - **Single-column**: + Create tensor datastreams by returning ``List[np.ndarray]`` columns from a Pandas + :ref:`user-defined function `. - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __create_pandas_begin__ - :end-before: __create_pandas_end__ + **Single-column**: - **Multi-column**: + .. literalinclude:: ./doc_code/tensor.py + :language: python + :start-after: __create_pandas_begin__ + :end-before: __create_pandas_end__ - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __create_pandas_2_begin__ - :end-before: __create_pandas_2_end__ + **Multi-column**: -.. tabbed:: NumPy + .. literalinclude:: ./doc_code/tensor.py + :language: python + :start-after: __create_pandas_2_begin__ + :end-before: __create_pandas_2_end__ - Create from in-memory NumPy data or from previously saved NumPy (.npy) files. + .. tab-item:: NumPy - **Single-column only**: + Create from in-memory NumPy data or from previously saved NumPy (.npy) files. - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __create_numpy_begin__ - :end-before: __create_numpy_end__ + **Single-column only**: -.. tabbed:: Parquet + .. literalinclude:: ./doc_code/tensor.py + :language: python + :start-after: __create_numpy_begin__ + :end-before: __create_numpy_end__ - There are two ways to construct a Parquet tensor datastream: (1) loading a - previously-saved tensor datastream, or (2) casting non-tensor Parquet columns to tensor - type. When casting data, a tensor schema or deserialization - :ref:`user-defined function ` must be provided. The - following are examples for each method. + .. tab-item:: Parquet - **Previously-saved tensor datastreams**: + There are two ways to construct a Parquet tensor datastream: (1) loading a + previously-saved tensor datastream, or (2) casting non-tensor Parquet columns to tensor + type. When casting data, a tensor schema or deserialization + :ref:`user-defined function ` must be provided. The + following are examples for each method. - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __create_parquet_1_begin__ - :end-before: __create_parquet_1_end__ + **Previously-saved tensor datastreams**: - **Cast from data stored in C-contiguous format**: + .. literalinclude:: ./doc_code/tensor.py + :language: python + :start-after: __create_parquet_1_begin__ + :end-before: __create_parquet_1_end__ - For tensors stored as raw NumPy ndarray bytes in C-contiguous order (e.g., via - `ndarray.tobytes() `__), all you need to specify is the tensor column schema. The following is an end-to-end example: + **Cast from data stored in C-contiguous format**: - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __create_parquet_2_begin__ - :end-before: __create_parquet_2_end__ + For tensors stored as raw NumPy ndarray bytes in C-contiguous order (e.g., via + `ndarray.tobytes() `__), all you need to specify is the tensor column schema. The following is an end-to-end example: - **Cast from data stored in custom formats**: + .. literalinclude:: ./doc_code/tensor.py + :language: python + :start-after: __create_parquet_2_begin__ + :end-before: __create_parquet_2_end__ - For tensors stored in other formats (e.g., pickled), you can specify a deserializer - :ref:`user-defined function ` that returns - :class:`~ray.data.extensions.tensor_extension.TensorArray` columns: + **Cast from data stored in custom formats**: - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __create_parquet_3_begin__ - :end-before: __create_parquet_3_end__ + For tensors stored in other formats (e.g., pickled), you can specify a deserializer + :ref:`user-defined function ` that returns + :class:`~ray.data.extensions.tensor_extension.TensorArray` columns: -.. tabbed:: Images + .. literalinclude:: ./doc_code/tensor.py + :language: python + :start-after: __create_parquet_3_begin__ + :end-before: __create_parquet_3_end__ - Load image data stored as individual files using :func:`~ray.data.read_images`: + .. tab-item:: Images - **Image and label columns**: + Load image data stored as individual files using :func:`~ray.data.read_images`: - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __create_images_begin__ - :end-before: __create_images_end__ + **Image and label columns**: + + .. literalinclude:: ./doc_code/tensor.py + :language: python + :start-after: __create_images_begin__ + :end-before: __create_images_end__ .. note:: @@ -114,69 +116,71 @@ Transforming / Consuming Tensor Data Like any other Datastream, Datastreams with tensor columns can be consumed / transformed in batches via the :meth:`ds.iter_batches(batch_format=\) ` and :meth:`ds.map_batches(fn, batch_format=\) ` APIs. This section shows the available batch formats and their behavior: -.. tabbed:: "default" +.. tab-set:: + + .. tab-item:: "default" - **Single-column**: + **Single-column**: - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __consume_native_begin__ - :end-before: __consume_native_end__ + .. literalinclude:: ./doc_code/tensor.py + :language: python + :start-after: __consume_native_begin__ + :end-before: __consume_native_end__ - **Multi-column**: + **Multi-column**: - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __consume_native_2_begin__ - :end-before: __consume_native_2_end__ + .. literalinclude:: ./doc_code/tensor.py + :language: python + :start-after: __consume_native_2_begin__ + :end-before: __consume_native_2_end__ -.. tabbed:: "pandas" + .. tab-item:: "pandas" - **Single-column**: + **Single-column**: - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __consume_pandas_begin__ - :end-before: __consume_pandas_end__ + .. literalinclude:: ./doc_code/tensor.py + :language: python + :start-after: __consume_pandas_begin__ + :end-before: __consume_pandas_end__ - **Multi-column**: + **Multi-column**: - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __consume_pandas_2_begin__ - :end-before: __consume_pandas_2_end__ + .. literalinclude:: ./doc_code/tensor.py + :language: python + :start-after: __consume_pandas_2_begin__ + :end-before: __consume_pandas_2_end__ -.. tabbed:: "pyarrow" + .. tab-item:: "pyarrow" - **Single-column**: + **Single-column**: - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __consume_pyarrow_begin__ - :end-before: __consume_pyarrow_end__ + .. literalinclude:: ./doc_code/tensor.py + :language: python + :start-after: __consume_pyarrow_begin__ + :end-before: __consume_pyarrow_end__ - **Multi-column**: + **Multi-column**: - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __consume_pyarrow_2_begin__ - :end-before: __consume_pyarrow_2_end__ + .. literalinclude:: ./doc_code/tensor.py + :language: python + :start-after: __consume_pyarrow_2_begin__ + :end-before: __consume_pyarrow_2_end__ -.. tabbed:: "numpy" + .. tab-item:: "numpy" - **Single-column**: + **Single-column**: - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __consume_numpy_begin__ - :end-before: __consume_numpy_end__ + .. literalinclude:: ./doc_code/tensor.py + :language: python + :start-after: __consume_numpy_begin__ + :end-before: __consume_numpy_end__ - **Multi-column**: + **Multi-column**: - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __consume_numpy_2_begin__ - :end-before: __consume_numpy_2_end__ + .. literalinclude:: ./doc_code/tensor.py + :language: python + :start-after: __consume_numpy_2_begin__ + :end-before: __consume_numpy_2_end__ Saving Tensor Datastreams ------------------------- @@ -185,19 +189,21 @@ Because tensor datastreams rely on Datastreams-specific extension types, they ca saved in formats that preserve Arrow metadata (currently only Parquet). In addition, single-column tensor datastreams can be saved in NumPy format. -.. tabbed:: Parquet +.. tab-set:: + + .. tab-item:: Parquet - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __write_1_begin_ - :end-before: __write_1_end__ + .. literalinclude:: ./doc_code/tensor.py + :language: python + :start-after: __write_1_begin_ + :end-before: __write_1_end__ -.. tabbed:: NumPy + .. tab-item:: NumPy - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __write_2_begin_ - :end-before: __write_2_end__ + .. literalinclude:: ./doc_code/tensor.py + :language: python + :start-after: __write_2_begin_ + :end-before: __write_2_end__ .. _ragged_tensor_support: diff --git a/doc/source/data/getting-started.rst b/doc/source/data/getting-started.rst index 0359d20e8a6b..b769148b3adf 100644 --- a/doc/source/data/getting-started.rst +++ b/doc/source/data/getting-started.rst @@ -91,50 +91,52 @@ Consume the datastream Pass datastreams to Ray tasks or actors, and access records with methods like :meth:`~ray.data.Datastream.iter_batches`. -.. tabbed:: Local +.. tab-set:: - .. testcode:: + .. tab-item:: Local - batches = transformed_ds.iter_batches(batch_size=8) - print(next(iter(batches))) + .. testcode:: - .. testoutput:: - :options: +NORMALIZE_WHITESPACE + batches = transformed_ds.iter_batches(batch_size=8) + print(next(iter(batches))) - sepal length (cm) ... target - 0 5.2 ... 1 - 1 5.4 ... 1 - 2 4.9 ... 2 + .. testoutput:: + :options: +NORMALIZE_WHITESPACE - [3 rows x 5 columns] + sepal length (cm) ... target + 0 5.2 ... 1 + 1 5.4 ... 1 + 2 4.9 ... 2 -.. tabbed:: Tasks + [3 rows x 5 columns] - .. testcode:: + .. tab-item:: Tasks - @ray.remote - def consume(ds: ray.data.Datastream) -> int: - num_batches = 0 - for batch in ds.iter_batches(batch_size=8): - num_batches += 1 - return num_batches + .. testcode:: - ray.get(consume.remote(transformed_ds)) + @ray.remote + def consume(ds: ray.data.Datastream) -> int: + num_batches = 0 + for batch in ds.iter_batches(batch_size=8): + num_batches += 1 + return num_batches -.. tabbed:: Actors + ray.get(consume.remote(transformed_ds)) - .. testcode:: + .. tab-item:: Actors - @ray.remote - class Worker: + .. testcode:: - def train(self, data_iterator): - for batch in data_iterator.iter_batches(batch_size=8): - pass + @ray.remote + class Worker: - workers = [Worker.remote() for _ in range(4)] - shards = transformed_ds.streaming_split(n=4, equal=True) - ray.get([w.train.remote(s) for w, s in zip(workers, shards)]) + def train(self, data_iterator): + for batch in data_iterator.iter_batches(batch_size=8): + pass + + workers = [Worker.remote() for _ in range(4)] + shards = transformed_ds.streaming_split(n=4, equal=True) + ray.get([w.train.remote(s) for w, s in zip(workers, shards)]) To learn more about consuming datastreams, read diff --git a/doc/source/data/transforming-datastreams.rst b/doc/source/data/transforming-datastreams.rst index c40a3af89c91..226ffcb7f60d 100644 --- a/doc/source/data/transforming-datastreams.rst +++ b/doc/source/data/transforming-datastreams.rst @@ -99,44 +99,46 @@ Types of UDFs ============= There are three types of UDFs that you can use with Ray Data: Function UDFs, Callable Class UDFs, and Generator UDFs. -.. tabbed:: "Function UDFs" +.. tab-set:: - The most basic UDFs are functions that take in a batch or row as input, and returns a batch or row as output. See :ref:`transform_datastreams_batch_formats` for the supported batch formats. + .. tab-item:: "Function UDFs" - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_default_udfs_tabular_begin__ - :end-before: __writing_default_udfs_tabular_end__ + The most basic UDFs are functions that take in a batch or row as input, and returns a batch or row as output. See :ref:`transform_datastreams_batch_formats` for the supported batch formats. -.. tabbed:: "Callable Class UDFs" + .. literalinclude:: ./doc_code/transforming_datastreams.py + :language: python + :start-after: __writing_default_udfs_tabular_begin__ + :end-before: __writing_default_udfs_tabular_end__ - With the actor compute strategy, you can use per-row and per-batch UDFs - *callable classes*, i.e., classes that implement the ``__call__`` magic method. You - can use the constructor of the class for stateful setup, and it is only invoked once - per worker actor. + .. tab-item:: "Callable Class UDFs" - Callable classes are useful if you need to load expensive state (such as a model) for the UDF. By using an actor class, you only need to load the state once in the beginning, rather than for each batch. + With the actor compute strategy, you can use per-row and per-batch UDFs + *callable classes*, i.e., classes that implement the ``__call__`` magic method. You + can use the constructor of the class for stateful setup, and it is only invoked once + per worker actor. - .. note:: - These transformation APIs take the uninstantiated callable class as an argument, - not an instance of the class. + Callable classes are useful if you need to load expensive state (such as a model) for the UDF. By using an actor class, you only need to load the state once in the beginning, rather than for each batch. - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_callable_classes_udfs_begin__ - :end-before: __writing_callable_classes_udfs_end__ + .. note:: + These transformation APIs take the uninstantiated callable class as an argument, + not an instance of the class. -.. tabbed:: "Generator UDFs" + .. literalinclude:: ./doc_code/transforming_datastreams.py + :language: python + :start-after: __writing_callable_classes_udfs_begin__ + :end-before: __writing_callable_classes_udfs_end__ - UDFs can also be written as Python generators, yielding multiple outputs for a batch or row instead of a single item. Generator UDFs are useful when returning large objects. Instead of returning a very large output batch, ``fn`` can instead yield the output batch in chunks to avoid excessive heap memory usage. + .. tab-item:: "Generator UDFs" - .. warning:: - When applying a generator UDF on individual rows, make sure to use the :meth:`.flat_map() ` API and not the :meth:`.map() ` API. + UDFs can also be written as Python generators, yielding multiple outputs for a batch or row instead of a single item. Generator UDFs are useful when returning large objects. Instead of returning a very large output batch, ``fn`` can instead yield the output batch in chunks to avoid excessive heap memory usage. - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_generator_udfs_begin__ - :end-before: __writing_generator_udfs_end__ + .. warning:: + When applying a generator UDF on individual rows, make sure to use the :meth:`.flat_map() ` API and not the :meth:`.map() ` API. + + .. literalinclude:: ./doc_code/transforming_datastreams.py + :language: python + :start-after: __writing_generator_udfs_begin__ + :end-before: __writing_generator_udfs_end__ .. _transform_datastreams_batch_formats: @@ -148,81 +150,83 @@ Choose the *batch format* of the data given to UDFs by setting the ``batch_format`` option of :meth:`.map_batches() `. Here is an overview of the available batch formats: -.. tabbed:: "default" +.. tab-set:: + + .. tab-item:: "default" - The "default" batch format presents data as follows for each Datastream type: + The "default" batch format presents data as follows for each Datastream type: - * **Tabular Datastreams**: Each batch will be a - `pandas.DataFrame `__. - This may incur a conversion cost if the underlying Datastream block is not - zero-copy convertible from an Arrow table. + * **Tabular Datastreams**: Each batch will be a + `pandas.DataFrame `__. + This may incur a conversion cost if the underlying Datastream block is not + zero-copy convertible from an Arrow table. - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_default_udfs_tabular_begin__ - :end-before: __writing_default_udfs_tabular_end__ + .. literalinclude:: ./doc_code/transforming_datastreams.py + :language: python + :start-after: __writing_default_udfs_tabular_begin__ + :end-before: __writing_default_udfs_tabular_end__ - * **Tensor Datastreams** (single-column): Each batch will be a single - `numpy.ndarray `__ - containing the single tensor column for this batch. + * **Tensor Datastreams** (single-column): Each batch will be a single + `numpy.ndarray `__ + containing the single tensor column for this batch. - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_default_udfs_tensor_begin__ - :end-before: __writing_default_udfs_tensor_end__ + .. literalinclude:: ./doc_code/transforming_datastreams.py + :language: python + :start-after: __writing_default_udfs_tensor_begin__ + :end-before: __writing_default_udfs_tensor_end__ - * **Simple Datastreams**: Each batch will be a Python list. + * **Simple Datastreams**: Each batch will be a Python list. - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_default_udfs_list_begin__ - :end-before: __writing_default_udfs_list_end__ + .. literalinclude:: ./doc_code/transforming_datastreams.py + :language: python + :start-after: __writing_default_udfs_list_begin__ + :end-before: __writing_default_udfs_list_end__ -.. tabbed:: "pandas" + .. tab-item:: "pandas" - The ``"pandas"`` batch format presents batches in - `pandas.DataFrame `__ - format. If converting a simple datastream to Pandas DataFrame batches, a single-column - dataframe with the column ``"__value__"`` will be created. + The ``"pandas"`` batch format presents batches in + `pandas.DataFrame `__ + format. If converting a simple datastream to Pandas DataFrame batches, a single-column + dataframe with the column ``"__value__"`` will be created. - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_pandas_udfs_begin__ - :end-before: __writing_pandas_udfs_end__ + .. literalinclude:: ./doc_code/transforming_datastreams.py + :language: python + :start-after: __writing_pandas_udfs_begin__ + :end-before: __writing_pandas_udfs_end__ -.. tabbed:: "pyarrow" + .. tab-item:: "pyarrow" - The ``"pyarrow"`` batch format presents batches in - `pyarrow.Table `__ - format. If converting a simple datastream to Arrow Table batches, a single-column table - with the column ``"__value__"`` will be created. + The ``"pyarrow"`` batch format presents batches in + `pyarrow.Table `__ + format. If converting a simple datastream to Arrow Table batches, a single-column table + with the column ``"__value__"`` will be created. - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_arrow_udfs_begin__ - :end-before: __writing_arrow_udfs_end__ + .. literalinclude:: ./doc_code/transforming_datastreams.py + :language: python + :start-after: __writing_arrow_udfs_begin__ + :end-before: __writing_arrow_udfs_end__ -.. tabbed:: "numpy" + .. tab-item:: "numpy" - The ``"numpy"`` batch format presents batches in - `numpy.ndarray `__ - format as follows: + The ``"numpy"`` batch format presents batches in + `numpy.ndarray `__ + format as follows: - * **Tabular Datastreams**: Each batch will be a dictionary of NumPy - ndarrays (``Dict[str, np.ndarray]``), with each key-value pair representing a column - in the table. + * **Tabular Datastreams**: Each batch will be a dictionary of NumPy + ndarrays (``Dict[str, np.ndarray]``), with each key-value pair representing a column + in the table. - * **Tensor Datastreams** (single-column): Each batch will be a single - `numpy.ndarray `__ - containing the single tensor column for this batch. + * **Tensor Datastreams** (single-column): Each batch will be a single + `numpy.ndarray `__ + containing the single tensor column for this batch. - * **Simple Datastreams**: Each batch will be a single NumPy ndarray, where Datastreams will - attempt to convert each list-batch to an ndarray. + * **Simple Datastreams**: Each batch will be a single NumPy ndarray, where Datastreams will + attempt to convert each list-batch to an ndarray. - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_numpy_udfs_begin__ - :end-before: __writing_numpy_udfs_end__ + .. literalinclude:: ./doc_code/transforming_datastreams.py + :language: python + :start-after: __writing_numpy_udfs_begin__ + :end-before: __writing_numpy_udfs_end__ Converting between the underlying Datastreams data representations (Arrow, Pandas, and Python lists) and the requested batch format (``"default"``, ``"pandas"``, @@ -290,55 +294,57 @@ The following output types are allowed for batch UDFs (e.g., :meth:`ds.map_batches() `). The following describes how they are interpreted to create the transformation result: -.. tabbed:: pd.DataFrame +.. tab-set:: - Returning ``pd.DataFrame`` creates a Tabular datastream as the transformation result: + .. tab-item:: pd.DataFrame - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_pandas_out_udfs_begin__ - :end-before: __writing_pandas_out_udfs_end__ + Returning ``pd.DataFrame`` creates a Tabular datastream as the transformation result: -.. tabbed:: pa.Table + .. literalinclude:: ./doc_code/transforming_datastreams.py + :language: python + :start-after: __writing_pandas_out_udfs_begin__ + :end-before: __writing_pandas_out_udfs_end__ - Returning ``pa.Table`` creates a Tabular datastream as the transformation result: + .. tab-item:: pa.Table - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_arrow_out_udfs_begin__ - :end-before: __writing_arrow_out_udfs_end__ + Returning ``pa.Table`` creates a Tabular datastream as the transformation result: -.. tabbed:: np.ndarray + .. literalinclude:: ./doc_code/transforming_datastreams.py + :language: python + :start-after: __writing_arrow_out_udfs_begin__ + :end-before: __writing_arrow_out_udfs_end__ - Returning ``np.ndarray`` creates a single-column Tensor datastream as the transformation result: + .. tab-item:: np.ndarray - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_numpy_out_udfs_begin__ - :end-before: __writing_numpy_out_udfs_end__ + Returning ``np.ndarray`` creates a single-column Tensor datastream as the transformation result: -.. tabbed:: Dict[str, np.ndarray] + .. literalinclude:: ./doc_code/transforming_datastreams.py + :language: python + :start-after: __writing_numpy_out_udfs_begin__ + :end-before: __writing_numpy_out_udfs_end__ - Returning ``Dict[str, np.ndarray]`` creates a multi-column Tensor datastream as the transformation result. + .. tab-item:: Dict[str, np.ndarray] - If a column tensor is 1-dimensional, then the native Arrow 1D list - type is used; if a column tensor has 2 or more dimensions, then the Datastream - :ref:`tensor extension type ` to embed these - n-dimensional tensors in the Arrow table. + Returning ``Dict[str, np.ndarray]`` creates a multi-column Tensor datastream as the transformation result. - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_numpy_dict_out_udfs_begin__ - :end-before: __writing_numpy_dict_out_udfs_end__ + If a column tensor is 1-dimensional, then the native Arrow 1D list + type is used; if a column tensor has 2 or more dimensions, then the Datastream + :ref:`tensor extension type ` to embed these + n-dimensional tensors in the Arrow table. -.. tabbed:: list + .. literalinclude:: ./doc_code/transforming_datastreams.py + :language: python + :start-after: __writing_numpy_dict_out_udfs_begin__ + :end-before: __writing_numpy_dict_out_udfs_end__ - Returning ``list`` creates a simple Python object datastream as the transformation result: + .. tab-item:: list - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_simple_out_udfs_begin__ - :end-before: __writing_simple_out_udfs_end__ + Returning ``list`` creates a simple Python object datastream as the transformation result: + + .. literalinclude:: ./doc_code/transforming_datastreams.py + :language: python + :start-after: __writing_simple_out_udfs_begin__ + :end-before: __writing_simple_out_udfs_end__ .. _transform_datastreams_row_output_types: @@ -348,34 +354,36 @@ Row UDF Output Types The following output types are allowed for per-row UDFs (e.g., :meth:`ds.map() `): -.. tabbed:: dict +.. tab-set:: + + .. tab-item:: dict - Returning a ``dict`` of Arrow-compatible data types creates a Tabular datastream - as the transformation result. If any dict values are not Arrow-compatible, then - a simple Python object datastream will be created: + Returning a ``dict`` of Arrow-compatible data types creates a Tabular datastream + as the transformation result. If any dict values are not Arrow-compatible, then + a simple Python object datastream will be created: - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_dict_out_row_udfs_begin__ - :end-before: __writing_dict_out_row_udfs_end__ + .. literalinclude:: ./doc_code/transforming_datastreams.py + :language: python + :start-after: __writing_dict_out_row_udfs_begin__ + :end-before: __writing_dict_out_row_udfs_end__ -.. tabbed:: np.ndarray + .. tab-item:: np.ndarray - Returning ``np.ndarray`` creates a single-column Tensor datastream as the transformation result: + Returning ``np.ndarray`` creates a single-column Tensor datastream as the transformation result: - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_numpy_out_row_udfs_begin__ - :end-before: __writing_numpy_out_row_udfs_end__ + .. literalinclude:: ./doc_code/transforming_datastreams.py + :language: python + :start-after: __writing_numpy_out_row_udfs_begin__ + :end-before: __writing_numpy_out_row_udfs_end__ -.. tabbed:: object + .. tab-item:: object - Other return row types will create a simple Python object datastream as the transformation result: + Other return row types will create a simple Python object datastream as the transformation result: - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_simple_out_row_udfs_begin__ - :end-before: __writing_simple_out_row_udfs_end__ + .. literalinclude:: ./doc_code/transforming_datastreams.py + :language: python + :start-after: __writing_simple_out_row_udfs_begin__ + :end-before: __writing_simple_out_row_udfs_end__ .. _transform_datastreams_configuring_batch_size: diff --git a/doc/source/ray-contribute/development.rst b/doc/source/ray-contribute/development.rst index 8291c63d6ee7..4e4b7179aa4e 100644 --- a/doc/source/ray-contribute/development.rst +++ b/doc/source/ray-contribute/development.rst @@ -19,7 +19,7 @@ Clone the repository To build Ray locally you will need to have the Git repository, so first, fork it on GitHub. Then you can clone it to your machine: -.. tabbed:: Git SSH +.. tab-item:: Git SSH To clone the repository using Git with SSH (the default) run: @@ -27,7 +27,7 @@ To build Ray locally you will need to have the Git repository, so first, fork it git clone git@github.com:[your username]/ray.git -.. tabbed:: Git HTTPS +.. tab-item:: Git HTTPS To clone the repository using Git with HTTPS run: @@ -43,21 +43,23 @@ Then you can enter into the Ray git repository directory: Next make sure you connect your repository to the upstream (main project) Ray repository. This will allow you to push your code to your repository when proposing changes (in pull requests) while also pulling updates from the main project. -.. tabbed:: Git SSH +.. tab-set:: - To connect your repository using SSH (the default) run the command: + .. tab-item:: Git SSH - .. code-block:: shell + To connect your repository using SSH (the default) run the command: - git remote add upstream git@github.com:ray-project/ray.git + .. code-block:: shell -.. tabbed:: Git HTTPS + git remote add upstream git@github.com:ray-project/ray.git - To connect your repository using HTTPS run the command: + .. tab-item:: Git HTTPS - .. code-block:: shell + To connect your repository using HTTPS run the command: + + .. code-block:: shell - git remote add upstream https://github.com/ray-project/ray.git + git remote add upstream https://github.com/ray-project/ray.git Every time you want to update your local version you can pull the changes from the main repository: @@ -73,46 +75,48 @@ Prepare the Python environment You probably want some type of Python virtual environment. For example, you can use Anaconda's ``conda``. -.. tabbed:: conda +.. tab-set:: - Set up a ``conda`` environment named ``ray``: + .. tab-item:: conda - .. code-block:: shell + Set up a ``conda`` environment named ``ray``: - conda create -c conda-forge python=3.9 -n ray + .. code-block:: shell + conda create -c conda-forge python=3.9 -n ray - Activate your virtual environment to tell the shell/terminal to use this particular Python: - .. code-block:: shell - - conda activate ray - - You need to activate the virtual environment every time you start a new shell/terminal to work on Ray. + Activate your virtual environment to tell the shell/terminal to use this particular Python: -.. tabbed:: venv + .. code-block:: shell - Use Python's integrated ``venv`` module to create a virtual environment called ``venv`` in the current directory: + conda activate ray - .. code-block:: shell + You need to activate the virtual environment every time you start a new shell/terminal to work on Ray. - python -m venv venv + .. tab-item:: venv - This contains a directory with all the packages used by the local Python of your project. You only need to do this step once. + Use Python's integrated ``venv`` module to create a virtual environment called ``venv`` in the current directory: - Activate your virtual environment to tell the shell/terminal to use this particular Python: + .. code-block:: shell - .. code-block:: shell + python -m venv venv - source venv/bin/activate + This contains a directory with all the packages used by the local Python of your project. You only need to do this step once. - You need to activate the virtual environment every time you start a new shell/terminal to work on Ray. + Activate your virtual environment to tell the shell/terminal to use this particular Python: - Creating a new virtual environment can come with older versions of ``pip`` and ``wheel``. To avoid problems when you install packages, use the module ``pip`` to install the latest version of ``pip`` (itself) and ``wheel``: + .. code-block:: shell - .. code-block:: shell + source venv/bin/activate + + You need to activate the virtual environment every time you start a new shell/terminal to work on Ray. + + Creating a new virtual environment can come with older versions of ``pip`` and ``wheel``. To avoid problems when you install packages, use the module ``pip`` to install the latest version of ``pip`` (itself) and ``wheel``: + + .. code-block:: shell - python -m pip install --upgrade pip wheel + python -m pip install --upgrade pip wheel .. _python-develop: diff --git a/doc/source/ray-observability/monitoring-debugging/profiling.rst b/doc/source/ray-observability/monitoring-debugging/profiling.rst index c2ff41687fe5..756005eaec21 100644 --- a/doc/source/ray-observability/monitoring-debugging/profiling.rst +++ b/doc/source/ray-observability/monitoring-debugging/profiling.rst @@ -21,21 +21,23 @@ First, install ``memray``. But in this example, we will write them to `/tmp/ray/session_latest/logs` because Ray dashboard allows you to download files inside the log folder. This will allow you to download profiling files from other nodes. -.. tabbed:: Actors +.. tab-set:: - .. literalinclude:: ../doc_code/memray_profiling.py - :language: python - :start-after: __memray_profiling_start__ - :end-before: __memray_profiling_end__ + .. tab-item:: Actors -.. tabbed:: Tasks + .. literalinclude:: ../doc_code/memray_profiling.py + :language: python + :start-after: __memray_profiling_start__ + :end-before: __memray_profiling_end__ - Note that tasks have a shorter lifetime, so there could be lots of memory profiling files. + .. tab-item:: Tasks - .. literalinclude:: ../doc_code/memray_profiling.py - :language: python - :start-after: __memray_profiling_task_start__ - :end-before: __memray_profiling_task_end__ + Note that tasks have a shorter lifetime, so there could be lots of memory profiling files. + + .. literalinclude:: ../doc_code/memray_profiling.py + :language: python + :start-after: __memray_profiling_task_start__ + :end-before: __memray_profiling_task_end__ Once the task or actor runs, go to the :ref:`Logs View ` of the dashboard. Find and click the log file name. diff --git a/doc/source/ray-observability/ray-tracing.rst b/doc/source/ray-observability/ray-tracing.rst index afc1c45ff8b0..7e0de9e7e490 100644 --- a/doc/source/ray-observability/ray-tracing.rst +++ b/doc/source/ray-observability/ray-tracing.rst @@ -54,29 +54,31 @@ Below is an example tracing startup hook that sets up the default tracing provid For open-source users who want to experiment with tracing, Ray has a default tracing startup hook that exports spans to the folder ``/tmp/spans``. To run using this default hook, you can run the following code sample to set up tracing and trace a simple Ray task. -.. tabbed:: ray start +.. tab-set:: - .. code-block:: shell + .. tab-item:: ray start - $ ray start --head --tracing-startup-hook=ray.util.tracing.setup_local_tmp_tracing:setup_tracing - $ python - >>> ray.init() - >>> @ray.remote - def my_function(): - return 1 + .. code-block:: shell - obj_ref = my_function.remote() + $ ray start --head --tracing-startup-hook=ray.util.tracing.setup_local_tmp_tracing:setup_tracing + $ python + >>> ray.init() + >>> @ray.remote + def my_function(): + return 1 -.. tabbed:: ray.init() + obj_ref = my_function.remote() - .. code-block:: python + .. tab-item:: ray.init() - >>> ray.init(_tracing_startup_hook="ray.util.tracing.setup_local_tmp_tracing:setup_tracing") - >>> @ray.remote - def my_function(): - return 1 + .. code-block:: python - obj_ref = my_function.remote() + >>> ray.init(_tracing_startup_hook="ray.util.tracing.setup_local_tmp_tracing:setup_tracing") + >>> @ray.remote + def my_function(): + return 1 + + obj_ref = my_function.remote() If you want to provide your own custom tracing startup hook, provide your startup hook in the format of ``module:attribute`` where the attribute is the ``setup_tracing`` function to be run. diff --git a/doc/source/ray-observability/state/state-api.rst b/doc/source/ray-observability/state/state-api.rst index 78778ef3b753..72096718a8f2 100644 --- a/doc/source/ray-observability/state/state-api.rst +++ b/doc/source/ray-observability/state/state-api.rst @@ -43,18 +43,20 @@ Run any workload. In this example, you will use the following script that runs 2 Now, let's see the summarized states of tasks. If it doesn't return the output immediately, retry the command. -.. tabbed:: CLI +.. tabs:: - .. code-block:: bash + .. group-tab:: CLI - ray summary tasks + .. code-block:: bash -.. tabbed:: Python SDK + ray summary tasks - .. code-block:: python + .. group-tab:: Python SDK - from ray.experimental.state.api import summarize_tasks - print(summarize_tasks()) + .. code-block:: python + + from ray.experimental.state.api import summarize_tasks + print(summarize_tasks()) .. code-block:: text @@ -74,18 +76,20 @@ Now, let's see the summarized states of tasks. If it doesn't return the output i Let's list all actors. -.. tabbed:: CLI +.. tabs:: + + .. group-tab:: CLI - .. code-block:: bash + .. code-block:: bash - ray list actors + ray list actors -.. tabbed:: Python SDK + .. group-tab:: Python SDK - .. code-block:: python + .. code-block:: python - from ray.experimental.state.api import list_actors - print(list_actors()) + from ray.experimental.state.api import list_actors + print(list_actors()) .. code-block:: text @@ -102,20 +106,22 @@ Let's list all actors. You can get the state of a single task using the get API. -.. tabbed:: CLI +.. tabs:: + + .. group-tab:: CLI - .. code-block:: bash + .. code-block:: bash - # In this case, 31405554844820381c2f0f8501000000 - ray get actors + # In this case, 31405554844820381c2f0f8501000000 + ray get actors -.. tabbed:: Python SDK + .. group-tab:: Python SDK - .. code-block:: python + .. code-block:: python - from ray.experimental.state.api import get_actor - # In this case, 31405554844820381c2f0f8501000000 - print(get_actor(id=)) + from ray.experimental.state.api import get_actor + # In this case, 31405554844820381c2f0f8501000000 + print(get_actor(id=)) .. code-block:: text @@ -133,23 +139,25 @@ You can get the state of a single task using the get API. You can also access logs through ``ray logs`` API. -.. tabbed:: CLI +.. tabs:: - .. code-block:: bash + .. group-tab:: CLI - ray list actors - # In this case, ACTOR_ID is 31405554844820381c2f0f8501000000 - ray logs actor --id + .. code-block:: bash -.. tabbed:: Python SDK + ray list actors + # In this case, ACTOR_ID is 31405554844820381c2f0f8501000000 + ray logs actor --id - .. code-block:: python + .. group-tab:: Python SDK - from ray.experimental.state.api import get_log + .. code-block:: python - # In this case, ACTOR_ID is 31405554844820381c2f0f8501000000 - for line in get_log(actor_id=): - print(line) + from ray.experimental.state.api import get_log + + # In this case, ACTOR_ID is 31405554844820381c2f0f8501000000 + for line in get_log(actor_id=): + print(line) .. code-block:: text @@ -180,34 +188,38 @@ you can use ``list`` or ``get`` APIs to get more details for an individual abnor E.g., Summarize all actors ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: CLI +.. tabs:: + + .. group-tab:: CLI - .. code-block:: bash + .. code-block:: bash - ray summary actors + ray summary actors -.. tabbed:: Python SDK + .. group-tab:: Python SDK - .. code-block:: python + .. code-block:: python - from ray.experimental.state.api import summarize_actors - print(summarize_actors()) + from ray.experimental.state.api import summarize_actors + print(summarize_actors()) E.g., Summarize all tasks ~~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: CLI +.. tabs:: + + .. group-tab:: CLI - .. code-block:: bash + .. code-block:: bash - ray summary tasks + ray summary tasks -.. tabbed:: Python SDK + .. group-tab:: Python SDK - .. code-block:: python + .. code-block:: python - from ray.experimental.state.api import summarize_tasks - print(summarize_tasks()) + from ray.experimental.state.api import summarize_tasks + print(summarize_tasks()) E.g., Summarize all objects ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -218,19 +230,20 @@ E.g., Summarize all objects To get callsite info, set env variable `RAY_record_ref_creation_sites=1` when starting the ray cluster RAY_record_ref_creation_sites=1 ray start --head +.. tabs:: -.. tabbed:: CLI + .. group-tab:: CLI - .. code-block:: bash + .. code-block:: bash - ray summary objects + ray summary objects -.. tabbed:: Python SDK + .. group-tab:: Python SDK - .. code-block:: python + .. code-block:: python - from ray.experimental.state.api import summarize_objects - print(summarize_objects()) + from ray.experimental.state.api import summarize_objects + print(summarize_objects()) List ---- @@ -249,34 +262,38 @@ Get a list of resources, possible resources include: E.g., List all nodes ~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: CLI +.. tabs:: - .. code-block:: bash + .. group-tab:: CLI - ray list nodes + .. code-block:: bash -.. tabbed:: Python SDK + ray list nodes - .. code-block:: python + .. group-tab:: Python SDK - from ray.experimental.state.api import list_nodes() - list_nodes() + .. code-block:: python + + from ray.experimental.state.api import list_nodes() + list_nodes() E.g., List all placement groups ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: CLI +.. tabs:: + + .. group-tab:: CLI - .. code-block:: bash + .. code-block:: bash - ray list placement-groups + ray list placement-groups -.. tabbed:: Python SDK + .. group-tab:: Python SDK - .. code-block:: python + .. code-block:: python - from ray.experimental.state.api import list_placement_groups - list_placement_groups() + from ray.experimental.state.api import list_placement_groups + list_placement_groups() E.g., List local referenced objects created by a process @@ -284,100 +301,112 @@ E.g., List local referenced objects created by a process .. tip:: You can list resources with one or multiple filters: using `--filter` or `-f` -.. tabbed:: CLI +.. tabs:: + + .. group-tab:: CLI - .. code-block:: bash + .. code-block:: bash - ray list objects -f pid= -f reference_type=LOCAL_REFERENCE + ray list objects -f pid= -f reference_type=LOCAL_REFERENCE -.. tabbed:: Python SDK + .. group-tab:: Python SDK - .. code-block:: python + .. code-block:: python - from ray.experimental.state.api import list_objects - list_objects(filters=[("pid", "=", ), ("reference_type", "=", "LOCAL_REFERENCE")]) + from ray.experimental.state.api import list_objects + list_objects(filters=[("pid", "=", ), ("reference_type", "=", "LOCAL_REFERENCE")]) E.g., List alive actors ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: CLI +.. tabs:: - .. code-block:: bash + .. group-tab:: CLI - ray list actors -f state=ALIVE + .. code-block:: bash -.. tabbed:: Python SDK + ray list actors -f state=ALIVE - .. code-block:: python + .. group-tab:: Python SDK - from ray.experimental.state.api import list_actors - list_actors(filters=[("state", "=", "ALIVE")]) + .. code-block:: python + + from ray.experimental.state.api import list_actors + list_actors(filters=[("state", "=", "ALIVE")]) E.g., List running tasks ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: CLI +.. tabs:: + + .. group-tab:: CLI - .. code-block:: bash + .. code-block:: bash - ray list tasks -f state=RUNNING + ray list tasks -f state=RUNNING -.. tabbed:: Python SDK + .. group-tab:: Python SDK - .. code-block:: python + .. code-block:: python - from ray.experimental.state.api import list_tasks - list_tasks(filters=[("state", "=", "RUNNING")]) + from ray.experimental.state.api import list_tasks + list_tasks(filters=[("state", "=", "RUNNING")]) E.g., List non-running tasks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: CLI +.. tabs:: - .. code-block:: bash + .. group-tab:: CLI - ray list tasks -f state!=RUNNING + .. code-block:: bash -.. tabbed:: Python SDK + ray list tasks -f state!=RUNNING - .. code-block:: python + .. group-tab:: Python SDK - from ray.experimental.state.api import list_tasks - list_tasks(filters=[("state", "!=", "RUNNING")]) + .. code-block:: python + + from ray.experimental.state.api import list_tasks + list_tasks(filters=[("state", "!=", "RUNNING")]) E.g., List running tasks that have a name func ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: CLI +.. tabs:: + + .. group-tab:: CLI - .. code-block:: bash + .. code-block:: bash - ray list tasks -f state=RUNNING -f name="task_running_300_seconds()" + ray list tasks -f state=RUNNING -f name="task_running_300_seconds()" -.. tabbed:: Python SDK + .. group-tab:: Python SDK - .. code-block:: python + .. code-block:: python - from ray.experimental.state.api import list_tasks - list_tasks(filters=[("state", "=", "RUNNING"), ("name", "=", "task_running_300_seconds()")]) + from ray.experimental.state.api import list_tasks + list_tasks(filters=[("state", "=", "RUNNING"), ("name", "=", "task_running_300_seconds()")]) E.g., List tasks with more details ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. tip:: When ``--detail`` is specified, the API can query more data sources to obtain state information in details. -.. tabbed:: CLI +.. tabs:: + + .. group-tab:: CLI - .. code-block:: bash + .. code-block:: bash - ray list tasks --detail + ray list tasks --detail -.. tabbed:: Python SDK + .. group-tab:: Python SDK - .. code-block:: python + .. code-block:: python - from ray.experimental.state.api import list_tasks - list_tasks(detail=True) + from ray.experimental.state.api import list_tasks + list_tasks(detail=True) Get --- @@ -385,34 +414,38 @@ Get E.g., Get a task info ~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: CLI +.. tabs:: - .. code-block:: bash + .. group-tab:: CLI - ray get tasks + .. code-block:: bash -.. tabbed:: Python SDK + ray get tasks - .. code-block:: python + .. group-tab:: Python SDK - from ray.experimental.state.api import get_task - get_task(id=) + .. code-block:: python + + from ray.experimental.state.api import get_task + get_task(id=) E.g., Get a node info ~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: CLI +.. tabs:: + + .. group-tab:: CLI - .. code-block:: bash + .. code-block:: bash - ray get nodes + ray get nodes -.. tabbed:: Python SDK + .. group-tab:: Python SDK - .. code-block:: python + .. code-block:: python - from ray.experimental.state.api import get_node - get_node(id=) + from ray.experimental.state.api import get_node + get_node(id=) Logs ---- @@ -425,110 +458,120 @@ By default, the API prints log from a head node. E.g., Get all retrievable log file names from a head node in a cluster ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: CLI +.. tabs:: - .. code-block:: bash + .. group-tab:: CLI - ray logs cluster + .. code-block:: bash -.. tabbed:: Python SDK + ray logs cluster - .. code-block:: python + .. group-tab:: Python SDK - # You could get the node id / node ip from `ray list nodes` - from ray.experimental.state.api import list_logs - # `ray logs` by default print logs from a head node. - # So in order to list the same logs, you should provide the head node id. - # You could get the node id / node ip from `ray list nodes` - list_logs(node_id=) + .. code-block:: python + + # You could get the node id / node ip from `ray list nodes` + from ray.experimental.state.api import list_logs + # `ray logs` by default print logs from a head node. + # So in order to list the same logs, you should provide the head node id. + # You could get the node id / node ip from `ray list nodes` + list_logs(node_id=) E.g., Get a particular log file from a node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: CLI +.. tabs:: + + .. group-tab:: CLI - .. code-block:: bash + .. code-block:: bash - # You could get the node id / node ip from `ray list nodes` - ray logs cluster gcs_server.out --node-id - # `ray logs cluster` is alias to `ray logs` when querying with globs. - ray logs gcs_server.out --node-id + # You could get the node id / node ip from `ray list nodes` + ray logs cluster gcs_server.out --node-id + # `ray logs cluster` is alias to `ray logs` when querying with globs. + ray logs gcs_server.out --node-id -.. tabbed:: Python SDK + .. group-tab:: Python SDK - .. code-block:: python + .. code-block:: python - from ray.experimental.state.api import get_log + from ray.experimental.state.api import get_log - # Node IP could be retrieved from list_nodes() or ray.nodes() - for line in get_log(filename="gcs_server.out", node_id=): - print(line) + # Node IP could be retrieved from list_nodes() or ray.nodes() + for line in get_log(filename="gcs_server.out", node_id=): + print(line) E.g., Stream a log file from a node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: CLI +.. tabs:: + + .. group-tab:: CLI - .. code-block:: bash + .. code-block:: bash - # You could get the node id / node ip from `ray list nodes` - ray logs raylet.out --node-ip --follow - # Or, - ray logs cluster raylet.out --node-ip --follow + # You could get the node id / node ip from `ray list nodes` + ray logs raylet.out --node-ip --follow + # Or, + ray logs cluster raylet.out --node-ip --follow -.. tabbed:: Python SDK + .. group-tab:: Python SDK - .. code-block:: python + .. code-block:: python - from ray.experimental.state.api import get_log + from ray.experimental.state.api import get_log - # Node IP could be retrieved from list_nodes() or ray.nodes() - # The loop will block with `follow=True` - for line in get_log(filename="raylet.out", node_ip=, follow=True): - print(line) + # Node IP could be retrieved from list_nodes() or ray.nodes() + # The loop will block with `follow=True` + for line in get_log(filename="raylet.out", node_ip=, follow=True): + print(line) E.g., Stream log from an actor with actor id ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: CLI +.. tabs:: - .. code-block:: bash + .. group-tab:: CLI - ray logs actor --id= --follow + .. code-block:: bash -.. tabbed:: Python SDK + ray logs actor --id= --follow - .. code-block:: python + .. group-tab:: Python SDK - from ray.experimental.state.api import get_log + .. code-block:: python - # You could get the actor's ID from the output of `ray list actors`. - # The loop will block with `follow=True` - for line in get_log(actor_id=, follow=True): - print(line) + from ray.experimental.state.api import get_log + + # You could get the actor's ID from the output of `ray list actors`. + # The loop will block with `follow=True` + for line in get_log(actor_id=, follow=True): + print(line) E.g., Stream log from a pid ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: CLI +.. tabs:: + + .. group-tab:: CLI - .. code-block:: bash + .. code-block:: bash - ray logs worker --pid= --follow + ray logs worker --pid= --follow -.. tabbed:: Python SDK + .. group-tab:: Python SDK - .. code-block:: python + .. code-block:: python - from ray.experimental.state.api import get_log + from ray.experimental.state.api import get_log - # Node IP could be retrieved from list_nodes() or ray.nodes() - # You could get the pid of the worker running the actor easily when output - # of worker being directed to the driver (default) - # The loop will block with `follow=True` - for line in get_log(pid=, node_ip=, follow=True): - print(line) + # Node IP could be retrieved from list_nodes() or ray.nodes() + # You could get the pid of the worker running the actor easily when output + # of worker being directed to the driver (default) + # The loop will block with `follow=True` + for line in get_log(pid=, node_ip=, follow=True): + print(line) Failure Semantics ----------------- From e5504f91278edcf6cf2048e6c4f762536bc8f24a Mon Sep 17 00:00:00 2001 From: angelinalg <122562471+angelinalg@users.noreply.github.com> Date: Mon, 24 Apr 2023 12:58:12 -0700 Subject: [PATCH 072/424] [docs] fix link to data/data.html (#34718) Link was broken unintentionally. --- doc/source/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/index.md b/doc/source/index.md index dcb1edc23d9e..cee36c217b45 100644 --- a/doc/source/index.md +++ b/doc/source/index.md @@ -133,7 +133,7 @@ dataset_transformed = preprocessor.fit_transform(dataset=dataset)

    From 8a709c9aad29f46ca3e0667261ef4ffeeddd2944 Mon Sep 17 00:00:00 2001 From: Artur Niederfahrenhorst Date: Mon, 24 Apr 2023 13:22:22 -0700 Subject: [PATCH 073/424] [RLlib] Unify TensorSpecs to a single framework-agnostic class. (#34493) --- rllib/algorithms/ppo/ppo_base_rl_module.py | 50 +++- rllib/algorithms/ppo/tf/ppo_tf_rl_module.py | 38 +-- .../ppo/torch/ppo_torch_rl_module.py | 43 ---- rllib/core/models/specs/checker.py | 9 +- rllib/core/models/specs/specs_base.py | 152 +++++++----- rllib/core/models/specs/specs_jax.py | 31 --- rllib/core/models/specs/specs_np.py | 24 -- rllib/core/models/specs/specs_tf.py | 30 --- rllib/core/models/specs/specs_torch.py | 29 --- .../models/specs/tests/test_check_specs.py | 22 +- .../core/models/specs/tests/test_spec_dict.py | 16 +- .../models/specs/tests/test_tensor_spec.py | 229 ++++++++++++------ rllib/core/models/tf/encoder.py | 75 ++++-- rllib/core/models/tf/heads.py | 15 +- rllib/core/models/torch/encoder.py | 61 +++-- rllib/core/models/torch/heads.py | 15 +- rllib/utils/__init__.py | 10 +- rllib/utils/typing.py | 8 +- 18 files changed, 451 insertions(+), 406 deletions(-) delete mode 100644 rllib/core/models/specs/specs_jax.py delete mode 100644 rllib/core/models/specs/specs_np.py delete mode 100644 rllib/core/models/specs/specs_tf.py delete mode 100644 rllib/core/models/specs/specs_torch.py diff --git a/rllib/algorithms/ppo/ppo_base_rl_module.py b/rllib/algorithms/ppo/ppo_base_rl_module.py index 2f48a61c6dee..39a740fd9c22 100644 --- a/rllib/algorithms/ppo/ppo_base_rl_module.py +++ b/rllib/algorithms/ppo/ppo_base_rl_module.py @@ -4,9 +4,15 @@ import abc -from ray.rllib.core.rl_module.rl_module import RLModule, RLModuleConfig -from ray.rllib.utils.annotations import ExperimentalAPI from ray.rllib.core.models.base import ActorCriticEncoder +from ray.rllib.core.models.specs.specs_base import TensorSpec +from ray.rllib.core.models.specs.specs_dict import SpecDict +from ray.rllib.core.rl_module.rl_module import RLModule +from ray.rllib.core.rl_module.rl_module import RLModuleConfig +from ray.rllib.models.distributions import Distribution +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.utils.annotations import ExperimentalAPI +from ray.rllib.utils.annotations import override @ExperimentalAPI @@ -27,3 +33,43 @@ def setup(self): # __sphinx_doc_end__ assert isinstance(self.encoder, ActorCriticEncoder) + + @override(RLModule) + def input_specs_inference(self) -> SpecDict: + return self.input_specs_exploration() + + @override(RLModule) + def output_specs_inference(self) -> SpecDict: + return SpecDict({SampleBatch.ACTION_DIST: Distribution}) + + @override(RLModule) + def input_specs_exploration(self): + return [] + + @override(RLModule) + def output_specs_exploration(self) -> SpecDict: + return [ + SampleBatch.VF_PREDS, + SampleBatch.ACTION_DIST, + SampleBatch.ACTION_DIST_INPUTS, + ] + + @override(RLModule) + def input_specs_train(self) -> SpecDict: + specs = self.input_specs_exploration() + specs.append(SampleBatch.ACTIONS) + if SampleBatch.OBS in specs: + specs.append(SampleBatch.NEXT_OBS) + return specs + + @override(RLModule) + def output_specs_train(self) -> SpecDict: + spec = SpecDict( + { + SampleBatch.ACTION_DIST: Distribution, + SampleBatch.ACTION_LOGP: TensorSpec("b", framework=self.framework), + SampleBatch.VF_PREDS: TensorSpec("b", framework=self.framework), + "entropy": TensorSpec("b", framework=self.framework), + } + ) + return spec diff --git a/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py b/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py index e84ee2bec90b..503866de47ff 100644 --- a/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py +++ b/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py @@ -1,12 +1,10 @@ -from typing import Mapping, Any, List +from typing import Mapping, Any from ray.rllib.algorithms.ppo.ppo_base_rl_module import PPORLModuleBase from ray.rllib.core.models.base import ACTOR, CRITIC from ray.rllib.core.models.tf.encoder import ENCODER_OUT -from ray.rllib.models.distributions import Distribution from ray.rllib.core.rl_module.rl_module import RLModule from ray.rllib.core.rl_module.tf.tf_rl_module import TfRLModule -from ray.rllib.core.models.specs.specs_dict import SpecDict from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.annotations import override from ray.rllib.utils.framework import try_import_tf @@ -30,40 +28,6 @@ def __init__(self, *args, **kwargs): # else: # return NestedDict({}) - @override(RLModule) - def input_specs_train(self) -> List[str]: - return [SampleBatch.OBS, SampleBatch.ACTIONS, SampleBatch.ACTION_LOGP] - - @override(RLModule) - def output_specs_train(self) -> List[str]: - return [ - SampleBatch.ACTION_DIST_INPUTS, - SampleBatch.ACTION_DIST, - SampleBatch.ACTION_LOGP, - SampleBatch.VF_PREDS, - "entropy", - ] - - @override(RLModule) - def input_specs_exploration(self): - return [] - - @override(RLModule) - def output_specs_exploration(self) -> List[str]: - return [ - SampleBatch.ACTION_DIST, - SampleBatch.VF_PREDS, - SampleBatch.ACTION_DIST_INPUTS, - ] - - @override(RLModule) - def input_specs_inference(self) -> SpecDict: - return self.input_specs_exploration() - - @override(RLModule) - def output_specs_inference(self) -> SpecDict: - return SpecDict({SampleBatch.ACTION_DIST: Distribution}) - @override(RLModule) def _forward_inference(self, batch: NestedDict) -> Mapping[str, Any]: output = {} diff --git a/rllib/algorithms/ppo/torch/ppo_torch_rl_module.py b/rllib/algorithms/ppo/torch/ppo_torch_rl_module.py index 0cef8d2a404a..48459a580cb3 100644 --- a/rllib/algorithms/ppo/torch/ppo_torch_rl_module.py +++ b/rllib/algorithms/ppo/torch/ppo_torch_rl_module.py @@ -5,9 +5,6 @@ from ray.rllib.core.models.base import ACTOR, CRITIC, ENCODER_OUT, STATE_IN from ray.rllib.core.rl_module.rl_module import RLModule from ray.rllib.core.rl_module.torch import TorchRLModule -from ray.rllib.core.models.specs.specs_dict import SpecDict -from ray.rllib.core.models.specs.specs_torch import TorchTensorSpec -from ray.rllib.models.distributions import Distribution from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.annotations import override from ray.rllib.utils.framework import try_import_torch @@ -36,14 +33,6 @@ def __init__(self, *args, **kwargs): TorchRLModule.__init__(self, *args, **kwargs) PPORLModuleBase.__init__(self, *args, **kwargs) - @override(RLModule) - def input_specs_inference(self) -> SpecDict: - return self.input_specs_exploration() - - @override(RLModule) - def output_specs_inference(self) -> SpecDict: - return SpecDict({SampleBatch.ACTION_DIST: Distribution}) - @override(RLModule) def _forward_inference(self, batch: NestedDict) -> Mapping[str, Any]: output = {} @@ -69,18 +58,6 @@ def _forward_inference(self, batch: NestedDict) -> Mapping[str, Any]: return output - @override(RLModule) - def input_specs_exploration(self): - return [] - - @override(RLModule) - def output_specs_exploration(self) -> SpecDict: - return [ - SampleBatch.VF_PREDS, - SampleBatch.ACTION_DIST, - SampleBatch.ACTION_DIST_INPUTS, - ] - @override(RLModule) def _forward_exploration(self, batch: NestedDict) -> Mapping[str, Any]: """PPO forward pass during exploration. @@ -118,26 +95,6 @@ def _forward_exploration(self, batch: NestedDict) -> Mapping[str, Any]: ) return output - @override(RLModule) - def input_specs_train(self) -> SpecDict: - specs = self.input_specs_exploration() - specs.append(SampleBatch.ACTIONS) - if SampleBatch.OBS in specs: - specs.append(SampleBatch.NEXT_OBS) - return specs - - @override(RLModule) - def output_specs_train(self) -> SpecDict: - spec = SpecDict( - { - SampleBatch.ACTION_DIST: Distribution, - SampleBatch.ACTION_LOGP: TorchTensorSpec("b", dtype=torch.float32), - SampleBatch.VF_PREDS: TorchTensorSpec("b", dtype=torch.float32), - "entropy": TorchTensorSpec("b", dtype=torch.float32), - } - ) - return spec - def _forward_train(self, batch: NestedDict) -> Mapping[str, Any]: output = {} diff --git a/rllib/core/models/specs/checker.py b/rllib/core/models/specs/checker.py index 67a43e6f9e5f..50bc5102969c 100644 --- a/rllib/core/models/specs/checker.py +++ b/rllib/core/models/specs/checker.py @@ -50,10 +50,11 @@ def convert_to_canonical_format(spec: SpecType) -> Union[Spec, SpecDict]: # {"foo": TypeSpec(int), "bar": SpecDict({"baz": TypeSpec(str)})} # ) - spec = {"foo": int, "bar": {"baz": TorchTensorSpec("b,h")}} + spec = {"foo": int, "bar": {"baz": TensorSpec("b,h", framework="torch")}} output = convert_to_canonical_format(spec) # output = SpecDict( - # {"foo": TypeSpec(int), "bar": SpecDict({"baz": TorchTensorSpec("b,h")})} + # {"foo": TypeSpec(int), "bar": SpecDict({"baz": TensorSpec("b,h", + framework="torch")})} # ) @@ -68,9 +69,9 @@ def convert_to_canonical_format(spec: SpecType) -> Union[Spec, SpecDict]: output = convert_to_canonical_format(spec) # output = None - spec = TorchTensorSpec("b,h") + spec = TensorSpec("b,h", framework="torch") output = convert_to_canonical_format(spec) - # output = TorchTensorSpec("b,h") + # output = TensorSpec("b,h", framework="torch") Args: spec: The spec to convert to canonical format. diff --git a/rllib/core/models/specs/specs_base.py b/rllib/core/models/specs/specs_base.py index 8e22ee7d0cfb..90b4c53baad3 100644 --- a/rllib/core/models/specs/specs_base.py +++ b/rllib/core/models/specs/specs_base.py @@ -1,10 +1,17 @@ import abc from copy import deepcopy +import numpy as np from typing import Any, Optional, Dict, List, Tuple, Union, Type +from ray.rllib.utils import try_import_jax, try_import_tf, try_import_torch +from ray.rllib.utils.annotations import OverrideToImplementCustomLogic from ray.rllib.utils.annotations import DeveloperAPI, override from ray.rllib.utils.typing import TensorType +torch, _ = try_import_torch() +_, tf, _ = try_import_tf() +jax, _ = try_import_jax() + _INVALID_INPUT_DUP_DIM = "Duplicate dimension names in shape ({})" _INVALID_INPUT_UNKNOWN_DIM = "Unknown dimension name {} in shape ({})" _INVALID_INPUT_POSITIVE = "Dimension {} in ({}) must be positive, got {}" @@ -71,6 +78,8 @@ class TensorSpec(Spec): dtype: The dtype of the tensor. If None, the dtype is not checked during validation. Also during Sampling the dtype is set the default dtype of the backend. + framework: The framework of the tensor. If None, the framework is not + checked during validation. shape_vals: An optional dictionary mapping some dimension names to their values. For example, if shape is "B, C" and shape_vals is {"C": 3}, then the shape of the tensor is (B, 3). B is to be determined during @@ -87,14 +96,8 @@ class TensorSpec(Spec): validate: Checks if the shape and dtype of the tensor matches the specification. fill: creates a tensor with the specified value that is an - example of a tensor that matches the specification. - - Abstract Methods: - get_type: Returns the type of the tensor, e.g. tf.Tensor or torch.Tensor. - get_shape: Returns the shape of the tensor depending on the backend. - get_dtype: Returns the dtype of the tensor depending on the backend. - _full: Creates a tensor with the specified value that - has values of fill_value, shape of shape, and dtype of self.dtype. + example of a tensor that matches the specification. This can only be + called if `framework` is specified. """ def __init__( @@ -102,17 +105,75 @@ def __init__( shape: str, *, dtype: Optional[Any] = None, + framework: Optional[str] = None, **shape_vals: int, ) -> None: self._expected_shape = self._parse_expected_shape(shape, shape_vals) self._full_shape = self._get_full_shape() self._dtype = dtype + self._framework = framework + + if framework not in ("tf2", "torch", "np", "jax", None): + raise ValueError(f"Unknown framework {self._framework}") + + self._type = self._get_expected_type() + + @OverrideToImplementCustomLogic + def _get_expected_type(self) -> Type: + """Returns the expected type of the checked tensor.""" + if self._framework == "torch": + return torch.Tensor + elif self._framework == "tf2": + return tf.Tensor + elif self._framework == "np": + return np.ndarray + elif self._framework == "jax": + jax, _ = try_import_jax() + return jax.numpy.ndarray + elif self._framework is None: + # Don't restrict the type of the tensor if no framework is specified. + return object + + @OverrideToImplementCustomLogic + def get_shape(self, tensor: TensorType) -> Tuple[int]: + """Returns the shape of a tensor. + + Args: + tensor: The tensor whose shape is to be returned. + Returns: + A `tuple` specifying the shape of the tensor. + """ + if self._framework == "tf2": + # tf2 returns `Dimension` objects instead of `int` objects. + return tuple(int(i) for i in tensor.shape) + return tuple(tensor.shape) + + @OverrideToImplementCustomLogic + def get_dtype(self, tensor: TensorType) -> Any: + """Returns the expected data type of the checked tensor. + + Args: + tensor: The tensor whose data type is to be returned. + Returns: + The data type of the tensor. + """ + return tensor.dtype + + @property + def dtype(self) -> Any: + """Returns the expected data type of the checked tensor.""" + return self._dtype @property def shape(self) -> Tuple[Union[int, str]]: """Returns a `tuple` specifying the abstract tensor shape (int and str).""" return self._expected_shape + @property + def type(self) -> Type: + """Returns the expected type of the checked tensor.""" + return self._type + @property def full_shape(self) -> Tuple[int]: """Returns a `tuple` specifying the concrete tensor shape (only ints).""" @@ -154,11 +215,6 @@ def append(self, spec: "TensorSpec") -> "TensorSpec": copy_._full_shape = self._get_full_shape() return copy_ - @property - def dtype(self) -> Any: - """Returns a dtype specifying the tensor dtype.""" - return self._dtype - @override(Spec) def validate(self, tensor: TensorType) -> None: """Checks if the shape and dtype of the tensor matches the specification. @@ -170,9 +226,8 @@ def validate(self, tensor: TensorType) -> None: ValueError: If the shape or dtype of the tensor does not match the """ - expected_type = self.get_type() - if not isinstance(tensor, expected_type): - raise ValueError(_INVALID_TYPE.format(expected_type, type(tensor).__name__)) + if not isinstance(tensor, self.type): + raise ValueError(_INVALID_TYPE.format(self.type, type(tensor).__name__)) shape = self.get_shape(tensor) if len(shape) != len(self._expected_shape): @@ -182,42 +237,10 @@ def validate(self, tensor: TensorType) -> None: if isinstance(expected_d, int) and expected_d != actual_d: raise ValueError(_INVALID_SHAPE.format(self._expected_shape, shape)) - dtype = self.get_dtype(tensor) + dtype = tensor.dtype if self.dtype and dtype != self.dtype: raise ValueError(_INVALID_TYPE.format(self.dtype, tensor.dtype)) - @classmethod - @abc.abstractmethod - def get_type(cls) -> Union[Type, Tuple[Type]]: - """Returns the type of a tensor e.g. torch.Tensor or tf.Tensor. - - Returns: - The type of a tensor. If the backend supports multiple tensor types, then a - tuple of types is returned. - """ - - @abc.abstractmethod - def get_shape(self, tensor: TensorType) -> Tuple[int]: - """Returns the shape of a tensor. - - Args: - tensor: The tensor whose shape is to be returned. - - Returns: - A `tuple` specifying the shape of the tensor. - """ - - @abc.abstractmethod - def get_dtype(self, tensor: TensorType) -> Any: - """Returns the data type of a tensor. - - Args: - tensor: The tensor whose data type is to be returned. - - Returns: - The data type of the tensor. - """ - @DeveloperAPI def fill(self, fill_value: Union[float, int] = 0) -> TensorType: """Creates a tensor filled with `fill_value` that matches the specs. @@ -227,23 +250,30 @@ def fill(self, fill_value: Union[float, int] = 0) -> TensorType: Returns: A tensor with the specified value that matches the specs. + + Raises: + ValueError: If `framework` is not specified. """ - return self._full(self.full_shape, fill_value) - @abc.abstractmethod - def _full(self, shape: Tuple[int], fill_value: Union[float, int] = 0) -> TensorType: - """Creates a tensor with the given shape filled with `fill_value`. + if self._framework == "torch": + return torch.full(self.full_shape, fill_value, dtype=self.dtype) - The tensor dtype is inferred from `fill_value`. This is equivalent to - np.full(shape, val). + elif self._framework == "tf2": + if self.dtype: + return tf.ones(self.full_shape, dtype=self.dtype) * fill_value + return tf.fill(self.full_shape, fill_value) - Args: - shape: The shape of the tensor to be sampled. - fill_value: The value to fill the tensor with. + elif self._framework == "np": + return np.full(self.full_shape, fill_value, dtype=self.dtype) - Returns: - A tensor with the specified value that matches the specs. - """ + elif self._framework == "jax": + return jax.numpy.full(self.full_shape, fill_value, dtype=self.dtype) + + elif self._framework is None: + raise ValueError( + "Cannot fill tensor without providing `framework` to TensorSpec. " + "This TensorSpec was instantiated without `framework`." + ) def _get_full_shape(self) -> Tuple[int]: """Converts the expected shape to a shape by replacing the unknown dimension diff --git a/rllib/core/models/specs/specs_jax.py b/rllib/core/models/specs/specs_jax.py deleted file mode 100644 index 50fb24d1c275..000000000000 --- a/rllib/core/models/specs/specs_jax.py +++ /dev/null @@ -1,31 +0,0 @@ -from typing import Tuple, Any, Union, Type - -from ray.rllib.utils.annotations import DeveloperAPI, override -from ray.rllib.utils.framework import try_import_jax -from ray.rllib.core.models.specs.specs_base import TensorSpec - -jax, _ = try_import_jax() -jnp = None -if jax is not None: - jnp = jax.numpy - - -@DeveloperAPI -class JAXTensorSpec(TensorSpec): - @override(TensorSpec) - def get_type(cls) -> Type: - return jnp.ndarray - - @override(TensorSpec) - def get_shape(self, tensor: jnp.ndarray) -> Tuple[int]: - return tuple(tensor.shape) - - @override(TensorSpec) - def get_dtype(self, tensor: jnp.ndarray) -> Any: - return tensor.dtype - - @override(TensorSpec) - def _full( - self, shape: Tuple[int], fill_value: Union[float, int] = 0 - ) -> jnp.ndarray: - return jnp.full(shape, fill_value, dtype=self.dtype) diff --git a/rllib/core/models/specs/specs_np.py b/rllib/core/models/specs/specs_np.py deleted file mode 100644 index 7f4c6fb901b2..000000000000 --- a/rllib/core/models/specs/specs_np.py +++ /dev/null @@ -1,24 +0,0 @@ -from typing import Tuple, Any, Union, Type -import numpy as np - -from ray.rllib.utils.annotations import DeveloperAPI, override -from ray.rllib.core.models.specs.specs_base import TensorSpec - - -@DeveloperAPI -class NPTensorSpec(TensorSpec): - @override(TensorSpec) - def get_type(cls) -> Type: - return np.ndarray - - @override(TensorSpec) - def get_shape(self, tensor: np.ndarray) -> Tuple[int]: - return tuple(tensor.shape) - - @override(TensorSpec) - def get_dtype(self, tensor: np.ndarray) -> Any: - return tensor.dtype - - @override(TensorSpec) - def _full(self, shape: Tuple[int], fill_value: Union[float, int] = 0) -> np.ndarray: - return np.full(shape, fill_value, dtype=self.dtype) diff --git a/rllib/core/models/specs/specs_tf.py b/rllib/core/models/specs/specs_tf.py deleted file mode 100644 index 17efbf7fbc44..000000000000 --- a/rllib/core/models/specs/specs_tf.py +++ /dev/null @@ -1,30 +0,0 @@ -from typing import Tuple, Any, Union, Type - -from ray.rllib.utils.annotations import DeveloperAPI, override -from ray.rllib.utils.framework import try_import_tf -from ray.rllib.core.models.specs.specs_base import TensorSpec - -_, tf, tfv = try_import_tf() - - -@DeveloperAPI -class TfTensorSpec(TensorSpec): - @override(TensorSpec) - def get_type(cls) -> Type: - return tf.Tensor - - @override(TensorSpec) - def get_shape(self, tensor: "tf.Tensor") -> Tuple[int]: - return tuple(tensor.shape) - - @override(TensorSpec) - def get_dtype(self, tensor: "tf.Tensor") -> Any: - return tensor.dtype - - @override(TensorSpec) - def _full( - self, shape: Tuple[int], fill_value: Union[float, int] = 0 - ) -> "tf.Tensor": - if self.dtype: - return tf.ones(shape, dtype=self.dtype) * fill_value - return tf.fill(shape, fill_value) diff --git a/rllib/core/models/specs/specs_torch.py b/rllib/core/models/specs/specs_torch.py deleted file mode 100644 index 85ea1ae3750c..000000000000 --- a/rllib/core/models/specs/specs_torch.py +++ /dev/null @@ -1,29 +0,0 @@ -from typing import Tuple, Any, Union, Type - -from ray.rllib.utils.annotations import DeveloperAPI, override -from ray.rllib.utils.framework import try_import_torch -from ray.rllib.core.models.specs.specs_base import TensorSpec - - -torch, _ = try_import_torch() - - -@DeveloperAPI -class TorchTensorSpec(TensorSpec): - @override(TensorSpec) - def get_type(cls) -> Type: - return torch.Tensor - - @override(TensorSpec) - def get_shape(self, tensor: torch.Tensor) -> Tuple[int]: - return tuple(tensor.shape) - - @override(TensorSpec) - def get_dtype(self, tensor: torch.Tensor) -> Any: - return tensor.dtype - - @override(TensorSpec) - def _full( - self, shape: Tuple[int], fill_value: Union[float, int] = 0 - ) -> torch.Tensor: - return torch.full(shape, fill_value, dtype=self.dtype) diff --git a/rllib/core/models/specs/tests/test_check_specs.py b/rllib/core/models/specs/tests/test_check_specs.py index ef05d2a5f9dd..1122ec05079d 100644 --- a/rllib/core/models/specs/tests/test_check_specs.py +++ b/rllib/core/models/specs/tests/test_check_specs.py @@ -7,7 +7,6 @@ from ray.rllib.core.models.specs.specs_base import TensorSpec, TypeSpec from ray.rllib.core.models.specs.specs_dict import SpecDict -from ray.rllib.core.models.specs.specs_torch import TorchTensorSpec from ray.rllib.utils.annotations import override from ray.rllib.utils.nested_dict import NestedDict from ray.rllib.core.models.specs.checker import ( @@ -239,7 +238,7 @@ def test_tensor_specs(self): class ClassWithTensorSpec: @property def input_spec1(self) -> TensorSpec: - return TorchTensorSpec("b, h", h=4) + return TensorSpec("b, h", h=4, framework="torch") @check_input_specs("input_spec1", cache=False) def forward(self, input_data) -> Any: @@ -290,14 +289,17 @@ def test_convert_to_canonical_format(self): # Case: input is a Nested Mapping returned = convert_to_canonical_format( - {"foo": {"bar": TorchTensorSpec("b")}, "jar": {"tar": int, "car": None}} + { + "foo": {"bar": TensorSpec("b", framework="torch")}, + "jar": {"tar": int, "car": None}, + } ) self.assertIsInstance(returned, SpecDict) self.assertDictEqual( returned.asdict(), SpecDict( { - "foo": {"bar": TorchTensorSpec("b")}, + "foo": {"bar": TensorSpec("b", framework="torch")}, "jar": {"tar": TypeSpec(int), "car": None}, } ).asdict(), @@ -305,13 +307,21 @@ def test_convert_to_canonical_format(self): # Case: input is a SpecDict already returned = convert_to_canonical_format( - SpecDict({"foo": {"bar": TorchTensorSpec("b")}, "jar": {"tar": int}}) + SpecDict( + { + "foo": {"bar": TensorSpec("b", framework="torch")}, + "jar": {"tar": int}, + } + ) ) self.assertIsInstance(returned, SpecDict) self.assertDictEqual( returned.asdict(), SpecDict( - {"foo": {"bar": TorchTensorSpec("b")}, "jar": {"tar": TypeSpec(int)}} + { + "foo": {"bar": TensorSpec("b", framework="torch")}, + "jar": {"tar": TypeSpec(int)}, + } ).asdict(), ) diff --git a/rllib/core/models/specs/tests/test_spec_dict.py b/rllib/core/models/specs/tests/test_spec_dict.py index 941bab465e60..0838f515d15b 100644 --- a/rllib/core/models/specs/tests/test_spec_dict.py +++ b/rllib/core/models/specs/tests/test_spec_dict.py @@ -1,7 +1,7 @@ import unittest import numpy as np -from ray.rllib.core.models.specs.specs_np import NPTensorSpec +from ray.rllib.core.models.specs.specs_base import TensorSpec from ray.rllib.core.models.specs.specs_dict import SpecDict from ray.rllib.core.models.specs.checker import ( check_input_specs, @@ -24,8 +24,8 @@ def test_basic_validation(self): h1, h2 = 3, 4 spec_1 = SpecDict( { - "out_tensor_1": NPTensorSpec("b, h", h=h1), - "out_tensor_2": NPTensorSpec("b, h", h=h2), + "out_tensor_1": TensorSpec("b, h", h=h1, framework="np"), + "out_tensor_2": TensorSpec("b, h", h=h2, framework="np"), "out_class_1": TypeClass1, } ) @@ -76,12 +76,12 @@ def test_basic_validation(self): spec_2 = SpecDict( { "encoder": { - "input": NPTensorSpec("b, h", h=h1), - "output": NPTensorSpec("b, h", h=h2), + "input": TensorSpec("b, h", h=h1, framework="np"), + "output": TensorSpec("b, h", h=h2, framework="np"), }, "decoder": { - "input": NPTensorSpec("b, h", h=h2), - "output": NPTensorSpec("b, h", h=h1), + "input": TensorSpec("b, h", h=h2, framework="np"), + "output": TensorSpec("b, h", h=h1, framework="np"), }, } ) @@ -160,7 +160,7 @@ def dict_key_spec_with_none_leaves(self): @property def spec_with_type_and_tensor_leaves(self): - return {"a": TypeClass1, "b": NPTensorSpec("b, h", h=3)} + return {"a": TypeClass1, "b": TensorSpec("b, h", h=3, framework="np")} @check_input_specs("nested_key_spec") def forward_nested_key(self, input_dict): diff --git a/rllib/core/models/specs/tests/test_tensor_spec.py b/rllib/core/models/specs/tests/test_tensor_spec.py index 7e961d5e2045..11d005489852 100644 --- a/rllib/core/models/specs/tests/test_tensor_spec.py +++ b/rllib/core/models/specs/tests/test_tensor_spec.py @@ -1,26 +1,31 @@ import itertools import unittest -import torch import numpy as np -import tensorflow as tf +from ray.rllib.utils import try_import_jax, try_import_tf, try_import_torch from ray.rllib.utils.test_utils import check -from ray.rllib.core.models.specs.specs_np import NPTensorSpec -from ray.rllib.core.models.specs.specs_tf import TfTensorSpec -from ray.rllib.core.models.specs.specs_torch import TorchTensorSpec +from ray.rllib.core.models.specs.specs_base import TensorSpec -# TODO: add jax tests +_, tf, _ = try_import_tf() +torch, _ = try_import_torch() +jax, _ = try_import_jax() +jnp = jax.numpy -SPEC_CLASSES = {"torch": TorchTensorSpec, "np": NPTensorSpec, "tf": TfTensorSpec} +# This makes it so that does not convert 64-bit floats to 32-bit +jax.config.update("jax_enable_x64", True) + +FRAMEWORKS_TO_TEST = {"torch", "np", "tf2", "jax"} DOUBLE_TYPE = { "torch": torch.float64, "np": np.float64, - "tf": tf.float64, + "tf2": tf.float64, + "jax": jnp.float64, } FLOAT_TYPE = { "torch": torch.float32, "np": np.float32, - "tf": tf.float32, + "tf2": tf.float32, + "jax": jnp.float32, } @@ -31,82 +36,95 @@ def setUpClass(cls) -> None: def test_fill(self): - for fw in SPEC_CLASSES.keys(): - spec_class = SPEC_CLASSES[fw] + for fw in FRAMEWORKS_TO_TEST: double_type = DOUBLE_TYPE[fw] # if un-specified dims should be 1, dtype is not important - x = spec_class("b,h").fill(float(2.0)) + x = TensorSpec("b,h", framework=fw).fill(float(2.0)) # check the shape self.assertEqual(x.shape, (1, 1)) # check the value check(x, np.array([[2.0]])) - x = spec_class("b,h", b=2, h=3).fill(2.0) + x = TensorSpec("b,h", b=2, h=3, framework=fw).fill(2.0) self.assertEqual(x.shape, (2, 3)) - x = spec_class("b,h1,h2,h3", h1=2, h2=3, h3=3, dtype=double_type).fill(2) + x = TensorSpec( + "b,h1,h2,h3", h1=2, h2=3, h3=3, framework=fw, dtype=double_type + ).fill(2) self.assertEqual(x.shape, (1, 2, 3, 3)) self.assertEqual(x.dtype, double_type) - # def test_validation(self): - - # b, h = 2, 3 - - # for fw in SPEC_CLASSES.keys(): - # spec_class = SPEC_CLASSES[fw] - # double_type = DOUBLE_TYPE[fw] - # float_type = FLOAT_TYPE[fw] - - # tensor_2d = spec_class("b,h", b=b, h=h, dtype=double_type).fill() - - # matching_specs = [ - # spec_class("b,h"), - # spec_class("b,h", h=h), - # spec_class("b,h", h=h, b=b), - # spec_class("b,h", b=b, dtype=double_type), - # ] - - # # check if get_shape returns a tuple of ints - # shape = matching_specs[0].get_shape(tensor_2d) - # self.assertIsInstance(shape, tuple) - # self.assertTrue(all(isinstance(x, int) for x in shape)) - - # # check matching - # for spec in matching_specs: - # spec.validate(tensor_2d) - - # non_matching_specs = [ - # spec_class("b"), - # spec_class("b,h1,h2"), - # spec_class("b,h", h=h + 1), - # ] - # if fw != "jax": - # non_matching_specs.append(spec_class("b,h", dtype=float_type)) - - # for spec in non_matching_specs: - # self.assertRaises(ValueError, lambda: spec.validate(tensor_2d)) - - # # non unique dimensions - # self.assertRaises(ValueError, lambda: spec_class("b,b")) - # # unknown dimensions - # self.assertRaises(ValueError, lambda: spec_class("b,h", b=1, h=2, c=3)) - # self.assertRaises(ValueError, lambda: spec_class("b1", b2=1)) - # # zero dimensions - # self.assertRaises(ValueError, lambda: spec_class("b,h", b=1, h=0)) - # # non-integer dimension - # self.assertRaises(ValueError, lambda: spec_class("b,h", b=1, h="h")) + def test_validation(self): + + b, h = 2, 3 + + for fw in FRAMEWORKS_TO_TEST: + double_type = DOUBLE_TYPE[fw] + float_type = FLOAT_TYPE[fw] + + tensor_2d = TensorSpec( + "b,h", b=b, h=h, framework=fw, dtype=double_type + ).fill() + + matching_specs = [ + TensorSpec("b,h", framework=fw), + TensorSpec("b,h", h=h, framework=fw), + TensorSpec("b,h", h=h, b=b, framework=fw), + TensorSpec("b,h", b=b, framework=fw, dtype=double_type), + ] + + # check if get_shape returns a tuple of ints + shape = matching_specs[0].get_shape(tensor_2d) + self.assertIsInstance(shape, tuple) + print(fw) + print(shape) + self.assertTrue(all(isinstance(x, int) for x in shape)) + + # check matching + for spec in matching_specs: + spec.validate(tensor_2d) + + non_matching_specs = [ + TensorSpec("b", framework=fw), + TensorSpec("b,h1,h2", framework=fw), + TensorSpec("b,h", h=h + 1, framework=fw), + ] + if fw != "jax": + non_matching_specs.append( + TensorSpec("b,h", framework=fw, dtype=float_type) + ) + + for spec in non_matching_specs: + self.assertRaises(ValueError, lambda: spec.validate(tensor_2d)) + + # non unique dimensions + self.assertRaises(ValueError, lambda: TensorSpec("b,b", framework=fw)) + # unknown dimensions + self.assertRaises( + ValueError, lambda: TensorSpec("b,h", b=1, h=2, c=3, framework=fw) + ) + self.assertRaises(ValueError, lambda: TensorSpec("b1", b2=1, framework=fw)) + # zero dimensions + self.assertRaises( + ValueError, lambda: TensorSpec("b,h", b=1, h=0, framework=fw) + ) + # non-integer dimension + self.assertRaises( + ValueError, lambda: TensorSpec("b,h", b=1, h="h", framework=fw) + ) def test_equal(self): - for fw in SPEC_CLASSES.keys(): - spec_class = SPEC_CLASSES[fw] - spec_eq_1 = spec_class("b,h", b=2, h=3) - spec_eq_2 = spec_class("b, h", b=2, h=3) - spec_eq_3 = spec_class(" b, h", b=2, h=3) - spec_neq_1 = spec_class("b, h", h=3, b=3) - spec_neq_2 = spec_class("b, h", h=3, b=3, dtype=DOUBLE_TYPE[fw]) + for fw in FRAMEWORKS_TO_TEST: + spec_eq_1 = TensorSpec("b,h", b=2, h=3, framework=fw) + spec_eq_2 = TensorSpec("b, h", b=2, h=3, framework=fw) + spec_eq_3 = TensorSpec(" b, h", b=2, h=3, framework=fw) + spec_neq_1 = TensorSpec("b, h", h=3, b=3, framework=fw) + spec_neq_2 = TensorSpec( + "b, h", h=3, b=3, framework=fw, dtype=DOUBLE_TYPE[fw] + ) self.assertTrue(spec_eq_1 == spec_eq_2) self.assertTrue(spec_eq_2 == spec_eq_3) @@ -114,13 +132,13 @@ def test_equal(self): self.assertTrue(spec_eq_1 != spec_neq_2) def test_type_validation(self): - - fw_keys = SPEC_CLASSES.keys() # check all combinations of spec fws with tensor fws - for spec_fw, tensor_fw in itertools.product(fw_keys, fw_keys): + for spec_fw, tensor_fw in itertools.product( + FRAMEWORKS_TO_TEST, FRAMEWORKS_TO_TEST + ): - spec = SPEC_CLASSES[spec_fw]("b, h", b=2, h=3) - tensor = SPEC_CLASSES[tensor_fw]("b, h", b=2, h=3).fill(0) + spec = TensorSpec("b, h", b=2, h=3, framework=spec_fw) + tensor = TensorSpec("b, h", b=2, h=3, framework=tensor_fw).fill(0) print("spec:", type(spec), ", tensor: ", type(tensor)) @@ -129,6 +147,71 @@ def test_type_validation(self): else: self.assertRaises(ValueError, lambda: spec.validate(tensor)) + def test_no_framework_arg(self): + """ + Test that a TensorSpec without a framework can be created and used except + for filling. + """ + spec = TensorSpec("b, h", b=2, h=3) + self.assertRaises(ValueError, lambda: spec.fill(0)) + + for fw in FRAMEWORKS_TO_TEST: + tensor = TensorSpec("b, h", b=2, h=3, framework=fw).fill(0) + spec.validate(tensor) + + def test_validate_framework(self): + """ + Test that a TensorSpec with a framework raises an error + when being used with a tensor from a different framework. + """ + for spec_fw, tensor_fw in itertools.product( + FRAMEWORKS_TO_TEST, FRAMEWORKS_TO_TEST + ): + spec = TensorSpec("b, h", b=2, h=3, framework=spec_fw) + tensor = TensorSpec("b, h", b=2, h=3, framework=tensor_fw).fill(0) + if spec_fw == tensor_fw: + spec.validate(tensor) + else: + self.assertRaises(ValueError, lambda: spec.validate(tensor)) + + def test_validate_dtype(self): + """ + Test that a TensorSpec with a dtype raises an error + when being used with a tensor from a different dtype but works otherwise. + """ + + all_types = [DOUBLE_TYPE, FLOAT_TYPE] + + for spec_types, tensor_types in itertools.product(all_types, all_types): + for spec_fw, tensor_fw in itertools.product( + FRAMEWORKS_TO_TEST, FRAMEWORKS_TO_TEST + ): + + # Pick the correct types for the frameworks + spec_type = spec_types[spec_fw] + tensor_type = tensor_types[tensor_fw] + + print( + "\nTesting.." "\nspec_fw: ", + spec_fw, + "\ntensor_fw: ", + tensor_fw, + "\nspec_type: ", + spec_type, + "\ntensor_type: ", + tensor_type, + ) + + spec = TensorSpec("b, h", b=2, h=3, dtype=spec_type) + tensor = TensorSpec( + "b, h", b=2, h=3, framework=tensor_fw, dtype=tensor_type + ).fill(0) + + if spec_type != tensor_type: + self.assertRaises(ValueError, lambda: spec.validate(tensor)) + else: + spec.validate(tensor) + if __name__ == "__main__": import pytest diff --git a/rllib/core/models/tf/encoder.py b/rllib/core/models/tf/encoder.py index 8f739934dcb0..f83b51e1b49e 100644 --- a/rllib/core/models/tf/encoder.py +++ b/rllib/core/models/tf/encoder.py @@ -20,7 +20,7 @@ from ray.rllib.core.models.tf.primitives import TfMLP, TfCNN from ray.rllib.core.models.specs.specs_base import Spec from ray.rllib.core.models.specs.specs_dict import SpecDict -from ray.rllib.core.models.specs.specs_tf import TfTensorSpec +from ray.rllib.core.models.specs.specs_base import TensorSpec from ray.rllib.models.utils import get_activation_fn from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.annotations import override @@ -75,11 +75,12 @@ def __init__(self, config: CNNEncoderConfig) -> None: def get_input_specs(self) -> Optional[Spec]: return SpecDict( { - SampleBatch.OBS: TfTensorSpec( + SampleBatch.OBS: TensorSpec( "b, w, h, c", w=self.config.input_dims[0], h=self.config.input_dims[1], c=self.config.input_dims[2], + framework="tf2", ), STATE_IN: None, SampleBatch.SEQ_LENS: None, @@ -90,7 +91,9 @@ def get_input_specs(self) -> Optional[Spec]: def get_output_specs(self) -> Optional[Spec]: return SpecDict( { - ENCODER_OUT: TfTensorSpec("b, d", d=self.config.output_dims[0]), + ENCODER_OUT: TensorSpec( + "b, d", d=self.config.output_dims[0], framework="tf2" + ), STATE_OUT: None, } ) @@ -125,7 +128,9 @@ def __init__(self, config: MLPEncoderConfig) -> None: def get_input_specs(self) -> Optional[Spec]: return SpecDict( { - SampleBatch.OBS: TfTensorSpec("b, d", d=self.config.input_dims[0]), + SampleBatch.OBS: TensorSpec( + "b, d", d=self.config.input_dims[0], framework="tf2" + ), # STATE_IN: None, # SampleBatch.SEQ_LENS: None, } @@ -135,7 +140,9 @@ def get_input_specs(self) -> Optional[Spec]: def get_output_specs(self) -> Optional[Spec]: return SpecDict( { - ENCODER_OUT: TfTensorSpec("b, d", d=self.config.output_dims[0]), + ENCODER_OUT: TensorSpec( + "b, d", d=self.config.output_dims[0], framework="tf2" + ), STATE_OUT: None, } ) @@ -180,10 +187,15 @@ def get_input_specs(self) -> Optional[Spec]: return SpecDict( { # b, t for batch major; t, b for time major. - SampleBatch.OBS: TfTensorSpec("b, t, d", d=self.config.input_dims[0]), + SampleBatch.OBS: TensorSpec( + "b, t, d", d=self.config.input_dims[0], framework="tf2" + ), STATE_IN: { - "h": TfTensorSpec( - "b, l, h", h=self.config.hidden_dim, l=self.config.num_layers + "h": TensorSpec( + "b, l, h", + h=self.config.hidden_dim, + l=self.config.num_layers, + framework="tf2", ), }, } @@ -193,10 +205,15 @@ def get_input_specs(self) -> Optional[Spec]: def get_output_specs(self) -> Optional[Spec]: return SpecDict( { - ENCODER_OUT: TfTensorSpec("b, t, d", d=self.config.output_dims[0]), + ENCODER_OUT: TensorSpec( + "b, t, d", d=self.config.output_dims[0], framework="tf2" + ), STATE_OUT: { - "h": TfTensorSpec( - "b, l, h", h=self.config.hidden_dim, l=self.config.num_layers + "h": TensorSpec( + "b, l, h", + h=self.config.hidden_dim, + l=self.config.num_layers, + framework="tf2", ), }, } @@ -262,13 +279,21 @@ def get_input_specs(self) -> Optional[Spec]: return SpecDict( { # b, t for batch major; t, b for time major. - SampleBatch.OBS: TfTensorSpec("b, t, d", d=self.config.input_dims[0]), + SampleBatch.OBS: TensorSpec( + "b, t, d", d=self.config.input_dims[0], framework="tf2" + ), STATE_IN: { - "h": TfTensorSpec( - "b, l, h", h=self.config.hidden_dim, l=self.config.num_layers + "h": TensorSpec( + "b, l, h", + h=self.config.hidden_dim, + l=self.config.num_layers, + framework="tf2", ), - "c": TfTensorSpec( - "b, l, h", h=self.config.hidden_dim, l=self.config.num_layers + "c": TensorSpec( + "b, l, h", + h=self.config.hidden_dim, + l=self.config.num_layers, + framework="tf2", ), }, } @@ -278,13 +303,21 @@ def get_input_specs(self) -> Optional[Spec]: def get_output_specs(self) -> Optional[Spec]: return SpecDict( { - ENCODER_OUT: TfTensorSpec("b, t, d", d=self.config.output_dims[0]), + ENCODER_OUT: TensorSpec( + "b, t, d", d=self.config.output_dims[0], framework="tf2" + ), STATE_OUT: { - "h": TfTensorSpec( - "b, l, h", h=self.config.hidden_dim, l=self.config.num_layers + "h": TensorSpec( + "b, l, h", + h=self.config.hidden_dim, + l=self.config.num_layers, + framework="tf2", ), - "c": TfTensorSpec( - "b, l, h", h=self.config.hidden_dim, l=self.config.num_layers + "c": TensorSpec( + "b, l, h", + h=self.config.hidden_dim, + l=self.config.num_layers, + framework="tf2", ), }, } diff --git a/rllib/core/models/tf/heads.py b/rllib/core/models/tf/heads.py index e4fd7fe289a6..2b9c78824b10 100644 --- a/rllib/core/models/tf/heads.py +++ b/rllib/core/models/tf/heads.py @@ -9,7 +9,7 @@ MLPHeadConfig, ) from ray.rllib.core.models.specs.specs_base import Spec -from ray.rllib.core.models.specs.specs_tf import TfTensorSpec +from ray.rllib.core.models.specs.specs_base import TensorSpec from ray.rllib.core.models.tf.base import TfModel from ray.rllib.core.models.tf.primitives import TfCNNTranspose, TfMLP from ray.rllib.utils import try_import_tf @@ -34,11 +34,11 @@ def __init__(self, config: MLPHeadConfig) -> None: @override(Model) def get_input_specs(self) -> Optional[Spec]: - return TfTensorSpec("b, d", d=self.config.input_dims[0]) + return TensorSpec("b, d", d=self.config.input_dims[0], framework="tf2") @override(Model) def get_output_specs(self) -> Optional[Spec]: - return TfTensorSpec("b, d", d=self.config.output_dims[0]) + return TensorSpec("b, d", d=self.config.output_dims[0], framework="tf2") @override(Model) def _forward(self, inputs: tf.Tensor, **kwargs) -> tf.Tensor: @@ -73,11 +73,11 @@ def __init__(self, config: FreeLogStdMLPHeadConfig) -> None: @override(Model) def get_input_specs(self) -> Optional[Spec]: - return TfTensorSpec("b, d", d=self.config.input_dims[0]) + return TensorSpec("b, d", d=self.config.input_dims[0], framework="tf2") @override(Model) def get_output_specs(self) -> Optional[Spec]: - return TfTensorSpec("b, d", d=self.config.output_dims[0]) + return TensorSpec("b, d", d=self.config.output_dims[0], framework="tf2") @override(Model) def _forward(self, inputs: tf.Tensor, **kwargs) -> tf.Tensor: @@ -112,15 +112,16 @@ def __init__(self, config: CNNTransposeHeadConfig) -> None: @override(Model) def get_input_specs(self) -> Optional[Spec]: - return TfTensorSpec("b, d", d=self.config.input_dims[0]) + return TensorSpec("b, d", d=self.config.input_dims[0], framework="tf2") @override(Model) def get_output_specs(self) -> Optional[Spec]: - return TfTensorSpec( + return TensorSpec( "b, w, h, c", w=self.config.output_dims[0], h=self.config.output_dims[1], c=self.config.output_dims[2], + framework="tf2", ) @override(Model) diff --git a/rllib/core/models/torch/encoder.py b/rllib/core/models/torch/encoder.py index 8fbd1c70c03b..8f6b930524f3 100644 --- a/rllib/core/models/torch/encoder.py +++ b/rllib/core/models/torch/encoder.py @@ -20,7 +20,7 @@ from ray.rllib.core.models.torch.primitives import TorchMLP, TorchCNN from ray.rllib.core.models.specs.specs_base import Spec from ray.rllib.core.models.specs.specs_dict import SpecDict -from ray.rllib.core.models.specs.specs_torch import TorchTensorSpec +from ray.rllib.core.models.specs.specs_base import TensorSpec from ray.rllib.models.utils import get_activation_fn from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.annotations import override @@ -60,7 +60,9 @@ def __init__(self, config: MLPEncoderConfig) -> None: def get_input_specs(self) -> Optional[Spec]: return SpecDict( { - SampleBatch.OBS: TorchTensorSpec("b, d", d=self.config.input_dims[0]), + SampleBatch.OBS: TensorSpec( + "b, d", d=self.config.input_dims[0], framework="torch" + ), STATE_IN: None, SampleBatch.SEQ_LENS: None, } @@ -70,7 +72,9 @@ def get_input_specs(self) -> Optional[Spec]: def get_output_specs(self) -> Optional[Spec]: return SpecDict( { - ENCODER_OUT: TorchTensorSpec("b, d", d=self.config.output_dims[0]), + ENCODER_OUT: TensorSpec( + "b, d", d=self.config.output_dims[0], framework="torch" + ), STATE_OUT: None, } ) @@ -125,11 +129,12 @@ def __init__(self, config: CNNEncoderConfig) -> None: def get_input_specs(self) -> Optional[Spec]: return SpecDict( { - SampleBatch.OBS: TorchTensorSpec( + SampleBatch.OBS: TensorSpec( "b, w, h, c", w=self.config.input_dims[0], h=self.config.input_dims[1], c=self.config.input_dims[2], + framework="torch", ), STATE_IN: None, SampleBatch.SEQ_LENS: None, @@ -140,7 +145,9 @@ def get_input_specs(self) -> Optional[Spec]: def get_output_specs(self) -> Optional[Spec]: return SpecDict( { - ENCODER_OUT: TorchTensorSpec("b, d", d=self.config.output_dims[0]), + ENCODER_OUT: TensorSpec( + "b, d", d=self.config.output_dims[0], framework="torch" + ), STATE_OUT: None, } ) @@ -181,12 +188,17 @@ def get_input_specs(self) -> Optional[Spec]: return SpecDict( { # b, t for batch major; t, b for time major. - SampleBatch.OBS: TorchTensorSpec( - "b, t, d", d=self.config.input_dims[0] + SampleBatch.OBS: TensorSpec( + "b, t, d", + d=self.config.input_dims[0], + framework="torch", ), STATE_IN: { - "h": TorchTensorSpec( - "b, l, h", h=self.config.hidden_dim, l=self.config.num_layers + "h": TensorSpec( + "b, l, h", + h=self.config.hidden_dim, + l=self.config.num_layers, + framework="torch", ), }, } @@ -196,10 +208,15 @@ def get_input_specs(self) -> Optional[Spec]: def get_output_specs(self) -> Optional[Spec]: return SpecDict( { - ENCODER_OUT: TorchTensorSpec("b, t, d", d=self.config.output_dims[0]), + ENCODER_OUT: TensorSpec( + "b, t, d", d=self.config.output_dims[0], framework="torch" + ), STATE_OUT: { - "h": TorchTensorSpec( - "b, l, h", h=self.config.hidden_dim, l=self.config.num_layers + "h": TensorSpec( + "b, l, h", + h=self.config.hidden_dim, + l=self.config.num_layers, + framework="torch", ), }, } @@ -257,19 +274,21 @@ def get_input_specs(self) -> Optional[Spec]: return SpecDict( { # b, t for batch major; t, b for time major. - SampleBatch.OBS: TorchTensorSpec( - "b, t, d", d=self.config.input_dims[0] + SampleBatch.OBS: TensorSpec( + "b, t, d", d=self.config.input_dims[0], framework="torch" ), STATE_IN: { - "h": TorchTensorSpec( + "h": TensorSpec( "b, l, h", h=self.config.hidden_dim, l=self.config.num_layers, + framework="torch", ), - "c": TorchTensorSpec( + "c": TensorSpec( "b, l, h", h=self.config.hidden_dim, l=self.config.num_layers, + framework="torch", ), }, } @@ -279,17 +298,21 @@ def get_input_specs(self) -> Optional[Spec]: def get_output_specs(self) -> Optional[Spec]: return SpecDict( { - ENCODER_OUT: TorchTensorSpec("b, t, d", d=self.config.output_dims[0]), + ENCODER_OUT: TensorSpec( + "b, t, d", d=self.config.output_dims[0], framework="torch" + ), STATE_OUT: { - "h": TorchTensorSpec( + "h": TensorSpec( "b, l, h", h=self.config.hidden_dim, l=self.config.num_layers, + framework="torch", ), - "c": TorchTensorSpec( + "c": TensorSpec( "b, l, h", h=self.config.hidden_dim, l=self.config.num_layers, + framework="torch", ), }, } diff --git a/rllib/core/models/torch/heads.py b/rllib/core/models/torch/heads.py index f9de4040f8c0..81fbad04dedc 100644 --- a/rllib/core/models/torch/heads.py +++ b/rllib/core/models/torch/heads.py @@ -9,7 +9,7 @@ MLPHeadConfig, ) from ray.rllib.core.models.specs.specs_base import Spec -from ray.rllib.core.models.specs.specs_torch import TorchTensorSpec +from ray.rllib.core.models.specs.specs_base import TensorSpec from ray.rllib.core.models.torch.base import TorchModel from ray.rllib.core.models.torch.primitives import TorchCNNTranspose, TorchMLP from ray.rllib.utils.annotations import override @@ -34,11 +34,11 @@ def __init__(self, config: MLPHeadConfig) -> None: @override(Model) def get_input_specs(self) -> Optional[Spec]: - return TorchTensorSpec("b, d", d=self.config.input_dims[0]) + return TensorSpec("b, d", d=self.config.input_dims[0], framework="torch") @override(Model) def get_output_specs(self) -> Optional[Spec]: - return TorchTensorSpec("b, d", d=self.config.output_dims[0]) + return TensorSpec("b, d", d=self.config.output_dims[0], framework="torch") @override(Model) def _forward(self, inputs: torch.Tensor, **kwargs) -> torch.Tensor: @@ -70,11 +70,11 @@ def __init__(self, config: FreeLogStdMLPHeadConfig) -> None: @override(Model) def get_input_specs(self) -> Optional[Spec]: - return TorchTensorSpec("b, d", d=self.config.input_dims[0]) + return TensorSpec("b, d", d=self.config.input_dims[0], framework="torch") @override(Model) def get_output_specs(self) -> Optional[Spec]: - return TorchTensorSpec("b, d", d=self.config.output_dims[0]) + return TensorSpec("b, d", d=self.config.output_dims[0], framework="torch") @override(Model) def _forward(self, inputs: torch.Tensor, **kwargs) -> torch.Tensor: @@ -110,15 +110,16 @@ def __init__(self, config: CNNTransposeHeadConfig) -> None: @override(Model) def get_input_specs(self) -> Optional[Spec]: - return TorchTensorSpec("b, d", d=self.config.input_dims[0]) + return TensorSpec("b, d", d=self.config.input_dims[0], framework="torch") @override(Model) def get_output_specs(self) -> Optional[Spec]: - return TorchTensorSpec( + return TensorSpec( "b, w, h, c", w=self.config.output_dims[0], h=self.config.output_dims[1], c=self.config.output_dims[2], + framework="torch", ) @override(Model) diff --git a/rllib/utils/__init__.py b/rllib/utils/__init__.py index 14ecece9a2ab..402f747e1cc4 100644 --- a/rllib/utils/__init__.py +++ b/rllib/utils/__init__.py @@ -2,10 +2,15 @@ from functools import partial from ray.rllib.utils.annotations import override, PublicAPI, DeveloperAPI -from ray.rllib.utils.framework import try_import_tf, try_import_tfp, try_import_torch from ray.rllib.utils.deprecation import deprecation_warning -from ray.rllib.utils.filter_manager import FilterManager from ray.rllib.utils.filter import Filter +from ray.rllib.utils.filter_manager import FilterManager +from ray.rllib.utils.framework import ( + try_import_jax, + try_import_tf, + try_import_tfp, + try_import_torch, +) from ray.rllib.utils.numpy import ( sigmoid, softmax, @@ -120,6 +125,7 @@ def __exit__(self, *args): "relu", "sigmoid", "softmax", + "try_import_jax", "try_import_tf", "try_import_tfp", "try_import_torch", diff --git a/rllib/utils/typing.py b/rllib/utils/typing.py index af6a31b1b8da..1e490174ffbe 100644 --- a/rllib/utils/typing.py +++ b/rllib/utils/typing.py @@ -23,14 +23,18 @@ from ray.rllib.policy.policy import PolicySpec from ray.rllib.policy.sample_batch import MultiAgentBatch, SampleBatch from ray.rllib.policy.view_requirement import ViewRequirement - from ray.rllib.utils import try_import_tf, try_import_torch + from ray.rllib.utils import try_import_jax, try_import_tf, try_import_torch _, tf, _ = try_import_tf() torch, _ = try_import_torch() + jax, _ = try_import_jax() + jnp = None + if jax is not None: + jnp = jax.numpy # Represents a generic tensor type. # This could be an np.ndarray, tf.Tensor, or a torch.Tensor. -TensorType = Union[np.array, "tf.Tensor", "torch.Tensor"] +TensorType = Union[np.array, "jnp.ndarray", "tf.Tensor", "torch.Tensor"] # Either a plain tensor, or a dict or tuple of tensors (or StructTensors). TensorStructType = Union[TensorType, dict, tuple] From 2cafa00a4f91fcf091a37ebe20614e5d35779092 Mon Sep 17 00:00:00 2001 From: Antoni Baum Date: Mon, 24 Apr 2023 13:23:31 -0700 Subject: [PATCH 074/424] [AIR] Fixed UUID for URI checkpoints (#34503) * [AIR] Fixed UUID for URI checkpoints Signed-off-by: Antoni Baum * Add test Signed-off-by: Antoni Baum * Tweak Signed-off-by: Antoni Baum * Fix test Signed-off-by: Antoni Baum * More explaination Signed-off-by: Antoni Baum --------- Signed-off-by: Antoni Baum --- python/ray/air/checkpoint.py | 14 ++++- python/ray/air/tests/test_checkpoints.py | 79 ++++++++++++++++++++++++ 2 files changed, 92 insertions(+), 1 deletion(-) diff --git a/python/ray/air/checkpoint.py b/python/ray/air/checkpoint.py index 60fefca9505e..9bcc0860fb02 100644 --- a/python/ray/air/checkpoint.py +++ b/python/ray/air/checkpoint.py @@ -41,6 +41,8 @@ _BYTES_DATA_KEY = "bytes_data" _METADATA_KEY = "_metadata" _CHECKPOINT_DIR_PREFIX = "checkpoint_tmp_" +# The namespace is a constant UUID to prevent conflicts, as defined in RFC-4122 +_CHECKPOINT_UUID_URI_NAMESPACE = uuid.UUID("627fe696-f135-436f-bc4b-bda0306e0181") logger = logging.getLogger(__name__) @@ -213,7 +215,17 @@ def __init__( self._override_preprocessor: Optional["Preprocessor"] = None self._override_preprocessor_set = False - self._uuid = uuid.uuid4() + # When using a cloud URI, we make sure that the uuid is constant. + # This ensures we do not download the data multiple times on one node. + # Note that this is not a caching mechanism - instead, this + # only ensures that if there are several processes downloading + # from the same URI, only one process does the actual work + # while the rest waits (FileLock). This also means data will not be duplicated. + self._uuid = ( + uuid.uuid4() + if not self._uri + else uuid.uuid5(_CHECKPOINT_UUID_URI_NAMESPACE, self._uri) + ) def __repr__(self): parameter, argument = self.get_internal_representation() diff --git a/python/ray/air/tests/test_checkpoints.py b/python/ray/air/tests/test_checkpoints.py index 0b6aced414b6..8338fee31ca8 100644 --- a/python/ray/air/tests/test_checkpoints.py +++ b/python/ray/air/tests/test_checkpoints.py @@ -1,19 +1,24 @@ +import logging import os import pickle import re import shutil import tempfile import unittest +from contextlib import contextmanager from pathlib import Path from typing import Any import pytest +import boto3 import ray from ray.air._internal.remote_storage import _ensure_directory, delete_at_uri +from ray.air._internal.uri_utils import URI from ray.air.checkpoint import _DICT_CHECKPOINT_ADDITIONAL_FILE_KEY, Checkpoint from ray.air.constants import MAX_REPR_LENGTH, PREPROCESSOR_KEY from ray.data import Preprocessor +from ray._private.test_utils import simulate_storage class DummyPreprocessor(Preprocessor): @@ -817,6 +822,80 @@ def testCheckpointUri(self): self.assertEqual(checkpoint.uri, "memory://some/location") +class URITestCheckpoint(Checkpoint): + def _to_directory(self, path: str, move_instead_of_copy: bool = False) -> None: + super()._to_directory(path, move_instead_of_copy) + # Drop a marker file with the current pid. + # Only one file should be created, as only one task should + # download the data, with the rest waiting. + with open(Path(path, f"_pid_marker_{os.getpid()}"), "w"): + pass + + +@contextmanager +def mock_s3_bucket_uri(): + port = 5002 + region = "us-west-2" + with simulate_storage("s3", port=port, region=region) as s3_uri: + s3 = boto3.client( + "s3", region_name=region, endpoint_url=f"http://localhost:{port}" + ) + # Bucket name will be autogenerated/unique per test + bucket_name = URI(s3_uri).name + s3.create_bucket( + Bucket=bucket_name, + CreateBucketConfiguration={"LocationConstraint": region}, + ) + # Disable server HTTP request logging + logging.getLogger("werkzeug").setLevel(logging.WARNING) + yield URI(s3_uri) + logging.getLogger("werkzeug").setLevel(logging.INFO) + + +@ray.remote +def download_uri_checkpoint(checkpoint: URITestCheckpoint): + with checkpoint.as_directory() as dir: + dir = Path(dir) + all_pid_marker_files = list(dir.glob("_pid_marker_*")) + # There should be only one file, as only one task should + # download. + assert len(all_pid_marker_files) == 1 + assert (dir / "mock.file").exists() + + +class TestCheckpointURIConstantUUID(unittest.TestCase): + def setUp(self) -> None: + ray.shutdown() + ray.init(num_cpus=4) + + def tearDown(self) -> None: + ray.shutdown() + + def testCheckpointURIConstantUUID(self): + """Test that multiple workers using the same URI checkpoint + share the local directory, and that only one worker downloads + the data.""" + with mock_s3_bucket_uri() as base_uri, tempfile.TemporaryDirectory() as tmpdir: + checkpoint_dir = Path(tmpdir, "checkpoint") + os.makedirs(checkpoint_dir) + with open(checkpoint_dir / "mock.file", "w"): + pass + checkpoint_uri = str(base_uri / "model") + uri = Checkpoint.from_directory(checkpoint_dir).to_uri(checkpoint_uri) + + # Check that two separate checkpoints have the same uuid + checkpoint = URITestCheckpoint.from_uri(uri) + checkpoint2 = URITestCheckpoint.from_uri(uri) + assert checkpoint._uuid == checkpoint2._uuid + + # Create a separate checkpoint for each task + tasks = [ + download_uri_checkpoint.remote(URITestCheckpoint.from_uri(uri)) + for _ in range(4) + ] + ray.get(tasks) + + if __name__ == "__main__": import sys From 6560f94efa7c67b432044c109728baf325bb50a7 Mon Sep 17 00:00:00 2001 From: Archit Kulkarni Date: Mon, 24 Apr 2023 13:34:54 -0700 Subject: [PATCH 075/424] [Doc] [Serve] Fix typo in serve dev workflow (#34713) Change the --working_dir command line option to --working-dir. Closes #34691 --- doc/source/serve/dev-workflow.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/serve/dev-workflow.md b/doc/source/serve/dev-workflow.md index d32626d049a7..8a40215feeb9 100644 --- a/doc/source/serve/dev-workflow.md +++ b/doc/source/serve/dev-workflow.md @@ -88,7 +88,7 @@ When making the transition from your local machine to a remote cluster, you'll n Let's see a simple example that just packages the code. Run the following command on your local machine, with your remote cluster head node IP address substituted for `` in the command: ```bash -serve run --address=ray://:10001 --working_dir="./project/src" local_dev:graph +serve run --address=ray://:10001 --working-dir="./project/src" local_dev:graph ``` This will connect to the remote cluster via Ray Client, upload the `working_dir` directory, and run your serve application. Here, the local directory specified by `working_dir` must contain `local_dev.py` so that it can be uploaded to the cluster and imported by Ray Serve. From b2a752836903cd579a9a29f5328dab6b6a0076de Mon Sep 17 00:00:00 2001 From: Jiajun Yao Date: Mon, 24 Apr 2023 13:59:17 -0700 Subject: [PATCH 076/424] Update 2.4.0 release logs (#34700) Signed-off-by: Jiajun Yao --- .../2.4.0/benchmarks/many_actors.json | 60 +- .../2.4.0/benchmarks/many_nodes.json | 72 +-- .../2.4.0/benchmarks/many_pgs.json | 60 +- .../2.4.0/benchmarks/many_tasks.json | 72 +-- .../release_logs/2.4.0/microbenchmark.json | 564 +++++++++--------- .../2.4.0/scalability/object_store.json | 49 +- .../2.4.0/scalability/single_node.json | 76 +-- .../stress_tests/stress_test_dead_actors.json | 10 +- .../stress_tests/stress_test_many_tasks.json | 36 +- .../stress_test_placement_group.json | 8 +- 10 files changed, 490 insertions(+), 517 deletions(-) diff --git a/release/release_logs/2.4.0/benchmarks/many_actors.json b/release/release_logs/2.4.0/benchmarks/many_actors.json index 80bbbeb5746c..bb15979086f1 100644 --- a/release/release_logs/2.4.0/benchmarks/many_actors.json +++ b/release/release_logs/2.4.0/benchmarks/many_actors.json @@ -1,32 +1,32 @@ { - "_dashboard_memory_usage_mb": 513.560576, - "_dashboard_test_success": true, - "_peak_memory": 3.91, - "_peak_process_memory": "PID\tMEM\tCOMMAND\n165\t2.02GiB\t/home/ray/anaconda3/lib/python3.7/site-packages/ray/core/src/ray/gcs/gcs_server --log_dir=/tmp/ray/s\n2388\t0.85GiB\tpython distributed/test_many_actors.py\n353\t0.37GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/lib/python3.7/site-packages/ray/dashboard/dashboa\n41\t0.09GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/bin/anyscale session web_terminal_server --deploy\n670\t0.09GiB\t/home/ray/anaconda3/bin/python -u /home/ray/anaconda3/lib/python3.7/site-packages/ray/dashboard/agen\n38\t0.07GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/bin/jupyter-lab --ServerApp.token=agh0_CkgwRgIhAP\n2610\t0.07GiB\tray::DashboardTester.run\n2523\t0.07GiB\tray::MemoryMonitorActor.run\n280\t0.04GiB\t/home/ray/anaconda3/bin/python -m ray.util.client.server --address=172.31.113.246:9031 --host=0.0.0.\n553\t0.04GiB\t/home/ray/anaconda3/bin/python -u /home/ray/anaconda3/lib/python3.7/site-packages/ray/_private/log_m", - "actors_per_second": 772.644103201044, - "num_actors": 10000, - "perf_metrics": [ - { - "perf_metric_name": "actors_per_second", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 772.644103201044 - }, - { - "perf_metric_name": "dashboard_p50_latency_ms", - "perf_metric_type": "LATENCY", - "perf_metric_value": 34.714 - }, - { - "perf_metric_name": "dashboard_p95_latency_ms", - "perf_metric_type": "LATENCY", - "perf_metric_value": 2419.503 - }, - { - "perf_metric_name": "dashboard_p99_latency_ms", - "perf_metric_type": "LATENCY", - "perf_metric_value": 3842.061 - } - ], - "success": "1", - "time": 12.942569494247437 + "_dashboard_memory_usage_mb": 574.578688, + "_dashboard_test_success": true, + "_peak_memory": 3.84, + "_peak_process_memory": "PID\tMEM\tCOMMAND\n165\t2.01GiB\t/home/ray/anaconda3/lib/python3.7/site-packages/ray/core/src/ray/gcs/gcs_server --log_dir=/tmp/ray/s\n2859\t0.83GiB\tpython distributed/test_many_actors.py\n338\t0.33GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/lib/python3.7/site-packages/ray/dashboard/dashboa\n41\t0.09GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/bin/anyscale session web_terminal_server --deploy\n639\t0.09GiB\t/home/ray/anaconda3/bin/python -u /home/ray/anaconda3/lib/python3.7/site-packages/ray/dashboard/agen\n38\t0.07GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/bin/jupyter-lab --ServerApp.token= --allow-root -\n3082\t0.07GiB\tray::DashboardTester.run\n2996\t0.07GiB\tray::MemoryMonitorActor.run\n265\t0.04GiB\t/home/ray/anaconda3/bin/python -m ray.util.client.server --address=172.31.97.64:9031 --host=0.0.0.0\n538\t0.04GiB\t/home/ray/anaconda3/bin/python -u /home/ray/anaconda3/lib/python3.7/site-packages/ray/_private/log_m", + "actors_per_second": 737.6387503180771, + "num_actors": 10000, + "perf_metrics": [ + { + "perf_metric_name": "actors_per_second", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 737.6387503180771 + }, + { + "perf_metric_name": "dashboard_p50_latency_ms", + "perf_metric_type": "LATENCY", + "perf_metric_value": 28.971 + }, + { + "perf_metric_name": "dashboard_p95_latency_ms", + "perf_metric_type": "LATENCY", + "perf_metric_value": 1899.861 + }, + { + "perf_metric_name": "dashboard_p99_latency_ms", + "perf_metric_type": "LATENCY", + "perf_metric_value": 2901.064 + } + ], + "success": "1", + "time": 13.556771516799927 } diff --git a/release/release_logs/2.4.0/benchmarks/many_nodes.json b/release/release_logs/2.4.0/benchmarks/many_nodes.json index 8abafc45366a..844e5bbf70da 100644 --- a/release/release_logs/2.4.0/benchmarks/many_nodes.json +++ b/release/release_logs/2.4.0/benchmarks/many_nodes.json @@ -1,38 +1,38 @@ { - "_dashboard_memory_usage_mb": 186.179584, - "_dashboard_test_success": true, - "_peak_memory": 4.23, - "_peak_process_memory": "PID\tMEM\tCOMMAND\n277\t0.59GiB\t/home/ray/anaconda3/lib/python3.7/site-packages/ray/core/src/ray/gcs/gcs_server --log_dir=/tmp/ray/s\n1900\t0.22GiB\tpython distributed/test_many_tasks.py --num-tasks=1000\n477\t0.16GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/lib/python3.7/site-packages/ray/dashboard/dashboa\n811\t0.09GiB\t/home/ray/anaconda3/bin/python -u /home/ray/anaconda3/lib/python3.7/site-packages/ray/dashboard/agen\n56\t0.09GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/bin/anyscale session web_terminal_server --deploy\n2221\t0.08GiB\tray::StateAPIGeneratorActor.start\n1486\t0.08GiB\tray::JobSupervisor\n46\t0.07GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/bin/jupyter-lab --allow-root --ip=127.0.0.1 --no-\n2047\t0.07GiB\tray::MemoryMonitorActor.run\n2144\t0.07GiB\tray::DashboardTester.run", - "num_tasks": 1000, - "perf_metrics": [ - { - "perf_metric_name": "tasks_per_second", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 216.16404352694366 - }, - { - "perf_metric_name": "used_cpus_by_deadline", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 250.0 - }, - { - "perf_metric_name": "dashboard_p50_latency_ms", - "perf_metric_type": "LATENCY", - "perf_metric_value": 3.915 - }, - { - "perf_metric_name": "dashboard_p95_latency_ms", - "perf_metric_type": "LATENCY", - "perf_metric_value": 56.099 - }, - { - "perf_metric_name": "dashboard_p99_latency_ms", - "perf_metric_type": "LATENCY", - "perf_metric_value": 130.237 - } - ], - "success": "1", - "tasks_per_second": 216.16404352694366, - "time": 304.62611627578735, - "used_cpus": 250.0 + "_dashboard_memory_usage_mb": 187.82208, + "_dashboard_test_success": true, + "_peak_memory": 4.05, + "_peak_process_memory": "PID\tMEM\tCOMMAND\n277\t0.77GiB\t/home/ray/anaconda3/lib/python3.7/site-packages/ray/core/src/ray/gcs/gcs_server --log_dir=/tmp/ray/s\n1605\t0.23GiB\tpython distributed/test_many_tasks.py --num-tasks=1000\n450\t0.16GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/lib/python3.7/site-packages/ray/dashboard/dashboa\n783\t0.09GiB\t/home/ray/anaconda3/bin/python -u /home/ray/anaconda3/lib/python3.7/site-packages/ray/dashboard/agen\n61\t0.09GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/bin/anyscale session web_terminal_server --deploy\n1925\t0.08GiB\tray::StateAPIGeneratorActor.start\n1201\t0.08GiB\tray::JobSupervisor\n52\t0.07GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/bin/jupyter-lab --allow-root --ip=127.0.0.1 --no-\n1753\t0.07GiB\tray::MemoryMonitorActor.run\n1863\t0.07GiB\tray::DashboardTester.run", + "num_tasks": 1000, + "perf_metrics": [ + { + "perf_metric_name": "tasks_per_second", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 228.0567062081301 + }, + { + "perf_metric_name": "used_cpus_by_deadline", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 250.0 + }, + { + "perf_metric_name": "dashboard_p50_latency_ms", + "perf_metric_type": "LATENCY", + "perf_metric_value": 4.072 + }, + { + "perf_metric_name": "dashboard_p95_latency_ms", + "perf_metric_type": "LATENCY", + "perf_metric_value": 34.235 + }, + { + "perf_metric_name": "dashboard_p99_latency_ms", + "perf_metric_type": "LATENCY", + "perf_metric_value": 134.023 + } + ], + "success": "1", + "tasks_per_second": 228.0567062081301, + "time": 304.38487434387207, + "used_cpus": 250.0 } diff --git a/release/release_logs/2.4.0/benchmarks/many_pgs.json b/release/release_logs/2.4.0/benchmarks/many_pgs.json index 0c8b02ccdc3a..f6653ea5e65c 100644 --- a/release/release_logs/2.4.0/benchmarks/many_pgs.json +++ b/release/release_logs/2.4.0/benchmarks/many_pgs.json @@ -1,32 +1,32 @@ { - "_dashboard_memory_usage_mb": 166.48192, - "_dashboard_test_success": true, - "_peak_memory": 4.85, - "_peak_process_memory": "PID\tMEM\tCOMMAND\n256\t1.02GiB\t/home/ray/anaconda3/lib/python3.7/site-packages/ray/core/src/ray/gcs/gcs_server --log_dir=/tmp/ray/s\n1705\t0.4GiB\tpython distributed/test_many_pgs.py\n471\t0.15GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/lib/python3.7/site-packages/ray/dashboard/dashboa\n803\t0.11GiB\t/home/ray/anaconda3/bin/python -u /home/ray/anaconda3/lib/python3.7/site-packages/ray/dashboard/agen\n61\t0.09GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/bin/anyscale session web_terminal_server --deploy\n608\t0.08GiB\t/home/ray/anaconda3/lib/python3.7/site-packages/ray/core/src/ray/raylet/raylet --raylet_socket_name=\n1300\t0.07GiB\tray::JobSupervisor\n48\t0.07GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/bin/jupyter-lab --allow-root --ip=127.0.0.1 --no-\n1963\t0.07GiB\tray::DashboardTester.run\n1852\t0.07GiB\tray::MemoryMonitorActor.run", - "num_pgs": 1000, - "perf_metrics": [ - { - "perf_metric_name": "pgs_per_second", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 17.929446347235622 - }, - { - "perf_metric_name": "dashboard_p50_latency_ms", - "perf_metric_type": "LATENCY", - "perf_metric_value": 3.307 - }, - { - "perf_metric_name": "dashboard_p95_latency_ms", - "perf_metric_type": "LATENCY", - "perf_metric_value": 128.925 - }, - { - "perf_metric_name": "dashboard_p99_latency_ms", - "perf_metric_type": "LATENCY", - "perf_metric_value": 197.038 - } - ], - "pgs_per_second": 17.929446347235622, - "success": "1", - "time": 55.77417063713074 + "_dashboard_memory_usage_mb": 181.796864, + "_dashboard_test_success": true, + "_peak_memory": 4.48, + "_peak_process_memory": "PID\tMEM\tCOMMAND\n277\t1.15GiB\t/home/ray/anaconda3/lib/python3.7/site-packages/ray/core/src/ray/gcs/gcs_server --log_dir=/tmp/ray/s\n1724\t0.35GiB\tpython distributed/test_many_pgs.py\n468\t0.12GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/lib/python3.7/site-packages/ray/dashboard/dashboa\n774\t0.11GiB\t/home/ray/anaconda3/bin/python -u /home/ray/anaconda3/lib/python3.7/site-packages/ray/dashboard/agen\n595\t0.09GiB\t/home/ray/anaconda3/lib/python3.7/site-packages/ray/core/src/ray/raylet/raylet --raylet_socket_name=\n69\t0.09GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/bin/anyscale session web_terminal_server --deploy\n1318\t0.07GiB\tray::JobSupervisor\n52\t0.07GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/bin/jupyter-lab --allow-root --ip=127.0.0.1 --no-\n1869\t0.07GiB\tray::MemoryMonitorActor.run\n1965\t0.06GiB\tray::DashboardTester.run", + "num_pgs": 1000, + "perf_metrics": [ + { + "perf_metric_name": "pgs_per_second", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 17.321330540558634 + }, + { + "perf_metric_name": "dashboard_p50_latency_ms", + "perf_metric_type": "LATENCY", + "perf_metric_value": 3.244 + }, + { + "perf_metric_name": "dashboard_p95_latency_ms", + "perf_metric_type": "LATENCY", + "perf_metric_value": 134.793 + }, + { + "perf_metric_name": "dashboard_p99_latency_ms", + "perf_metric_type": "LATENCY", + "perf_metric_value": 171.365 + } + ], + "pgs_per_second": 17.321330540558634, + "success": "1", + "time": 57.732285499572754 } diff --git a/release/release_logs/2.4.0/benchmarks/many_tasks.json b/release/release_logs/2.4.0/benchmarks/many_tasks.json index 01ca873ccb04..e5e7c8f8a7f7 100644 --- a/release/release_logs/2.4.0/benchmarks/many_tasks.json +++ b/release/release_logs/2.4.0/benchmarks/many_tasks.json @@ -1,38 +1,38 @@ { - "_dashboard_memory_usage_mb": 659.90656, - "_dashboard_test_success": true, - "_peak_memory": 4.44, - "_peak_process_memory": "PID\tMEM\tCOMMAND\n165\t1.99GiB\t/home/ray/anaconda3/lib/python3.7/site-packages/ray/core/src/ray/gcs/gcs_server --log_dir=/tmp/ray/s\n2232\t0.87GiB\tpython distributed/test_many_tasks.py --num-tasks=10000\n353\t0.74GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/lib/python3.7/site-packages/ray/dashboard/dashboa\n2457\t0.1GiB\tray::DashboardTester.run\n41\t0.09GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/bin/anyscale session web_terminal_server --deploy\n670\t0.09GiB\t/home/ray/anaconda3/bin/python -u /home/ray/anaconda3/lib/python3.7/site-packages/ray/dashboard/agen\n38\t0.07GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/bin/jupyter-lab --ServerApp.token=agh0_CkYwRAIgH_\n2520\t0.07GiB\tray::StateAPIGeneratorActor.start\n2370\t0.07GiB\tray::MemoryMonitorActor.run\n280\t0.04GiB\t/home/ray/anaconda3/bin/python -m ray.util.client.server --address=172.31.96.232:9031 --host=0.0.0.0", - "num_tasks": 10000, - "perf_metrics": [ - { - "perf_metric_name": "tasks_per_second", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 324.3236630929032 - }, - { - "perf_metric_name": "used_cpus_by_deadline", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 2500.0 - }, - { - "perf_metric_name": "dashboard_p50_latency_ms", - "perf_metric_type": "LATENCY", - "perf_metric_value": 5.019 - }, - { - "perf_metric_name": "dashboard_p95_latency_ms", - "perf_metric_type": "LATENCY", - "perf_metric_value": 2788.74 - }, - { - "perf_metric_name": "dashboard_p99_latency_ms", - "perf_metric_type": "LATENCY", - "perf_metric_value": 3431.297 - } - ], - "success": "1", - "tasks_per_second": 324.3236630929032, - "time": 330.83339619636536, - "used_cpus": 2500.0 + "_dashboard_memory_usage_mb": 611.98336, + "_dashboard_test_success": true, + "_peak_memory": 4.61, + "_peak_process_memory": "PID\tMEM\tCOMMAND\n165\t2.19GiB\t/home/ray/anaconda3/lib/python3.7/site-packages/ray/core/src/ray/gcs/gcs_server --log_dir=/tmp/ray/s\n2582\t0.86GiB\tpython distributed/test_many_tasks.py --num-tasks=10000\n338\t0.71GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/lib/python3.7/site-packages/ray/dashboard/dashboa\n2807\t0.1GiB\tray::DashboardTester.run\n43\t0.09GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/bin/anyscale session web_terminal_server --deploy\n639\t0.09GiB\t/home/ray/anaconda3/bin/python -u /home/ray/anaconda3/lib/python3.7/site-packages/ray/dashboard/agen\n40\t0.07GiB\t/home/ray/anaconda3/bin/python /home/ray/anaconda3/bin/jupyter-lab --ServerApp.token= --allow-root -\n2870\t0.07GiB\tray::StateAPIGeneratorActor.start\n2720\t0.07GiB\tray::MemoryMonitorActor.run\n265\t0.04GiB\t/home/ray/anaconda3/bin/python -m ray.util.client.server --address=172.31.106.119:9031 --host=0.0.0.", + "num_tasks": 10000, + "perf_metrics": [ + { + "perf_metric_name": "tasks_per_second", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 296.54971037133174 + }, + { + "perf_metric_name": "used_cpus_by_deadline", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 2500.0 + }, + { + "perf_metric_name": "dashboard_p50_latency_ms", + "perf_metric_type": "LATENCY", + "perf_metric_value": 5.024 + }, + { + "perf_metric_name": "dashboard_p95_latency_ms", + "perf_metric_type": "LATENCY", + "perf_metric_value": 2713.875 + }, + { + "perf_metric_name": "dashboard_p99_latency_ms", + "perf_metric_type": "LATENCY", + "perf_metric_value": 3772.233 + } + ], + "success": "1", + "tasks_per_second": 296.54971037133174, + "time": 333.7211592197418, + "used_cpus": 2500.0 } diff --git a/release/release_logs/2.4.0/microbenchmark.json b/release/release_logs/2.4.0/microbenchmark.json index e0494e2c93f9..af88513bbd59 100644 --- a/release/release_logs/2.4.0/microbenchmark.json +++ b/release/release_logs/2.4.0/microbenchmark.json @@ -1,283 +1,283 @@ { - "1_1_actor_calls_async": [ - 7875.2205662523575, - 91.32036915829057 - ], - "1_1_actor_calls_concurrent": [ - 4898.403930689569, - 18.787428785414974 - ], - "1_1_actor_calls_sync": [ - 2490.1228801310986, - 61.66895883060133 - ], - "1_1_async_actor_calls_async": [ - 2893.7637668160814, - 58.54240700476942 - ], - "1_1_async_actor_calls_sync": [ - 1663.656683863718, - 82.11634772966543 - ], - "1_1_async_actor_calls_with_args_async": [ - 2053.88520928116, - 77.88191802930348 - ], - "1_n_actor_calls_async": [ - 10918.570247859934, - 252.77023513295532 - ], - "1_n_async_actor_calls_async": [ - 10007.72780551488, - 63.167900610588546 - ], - "client__1_1_actor_calls_async": [ - 1053.9221163152763, - 16.386239593374267 - ], - "client__1_1_actor_calls_concurrent": [ - 1067.680489513954, - 12.709001083081098 - ], - "client__1_1_actor_calls_sync": [ - 587.2290114221607, - 11.254483031373043 - ], - "client__get_calls": [ - 1169.0846386325316, - 30.090440134694333 - ], - "client__put_calls": [ - 953.6497274525543, - 27.62987622516938 - ], - "client__put_gigabytes": [ - 0.04453569846336401, - 0.0005889062724214797 - ], - "client__tasks_and_get_batch": [ - 0.9962426891274011, - 0.012695780047090623 - ], - "client__tasks_and_put_batch": [ - 11636.76543061424, - 344.5895993880181 - ], - "multi_client_put_calls_Plasma_Store": [ - 12782.464998678728, - 303.0850304711337 - ], - "multi_client_put_gigabytes": [ - 25.409834920338362, - 0.9620047718388134 - ], - "multi_client_tasks_async": [ - 29499.600819285548, - 1838.097234923537 - ], - "n_n_actor_calls_async": [ - 31558.549225320676, - 675.1014999204177 - ], - "n_n_actor_calls_with_arg_async": [ - 3114.287612859598, - 43.68969356632565 - ], - "n_n_async_actor_calls_async": [ - 25348.416455631697, - 927.5572889075144 - ], - "perf_metrics": [ - { - "perf_metric_name": "single_client_get_calls_Plasma_Store", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 6717.39789740067 - }, - { - "perf_metric_name": "single_client_put_calls_Plasma_Store", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 6141.116222223478 - }, - { - "perf_metric_name": "multi_client_put_calls_Plasma_Store", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 12782.464998678728 - }, - { - "perf_metric_name": "single_client_put_gigabytes", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 19.77763371659089 - }, - { - "perf_metric_name": "single_client_tasks_and_get_batch", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 10.903109137328919 - }, - { - "perf_metric_name": "multi_client_put_gigabytes", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 25.409834920338362 - }, - { - "perf_metric_name": "single_client_get_object_containing_10k_refs", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 12.601815520396993 - }, - { - "perf_metric_name": "single_client_wait_1k_refs", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 6.25672193789162 - }, - { - "perf_metric_name": "single_client_tasks_sync", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 1315.6532263025485 - }, - { - "perf_metric_name": "single_client_tasks_async", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 10565.441516164923 - }, - { - "perf_metric_name": "multi_client_tasks_async", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 29499.600819285548 - }, - { - "perf_metric_name": "1_1_actor_calls_sync", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 2490.1228801310986 - }, - { - "perf_metric_name": "1_1_actor_calls_async", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 7875.2205662523575 - }, - { - "perf_metric_name": "1_1_actor_calls_concurrent", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 4898.403930689569 - }, - { - "perf_metric_name": "1_n_actor_calls_async", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 10918.570247859934 - }, - { - "perf_metric_name": "n_n_actor_calls_async", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 31558.549225320676 - }, - { - "perf_metric_name": "n_n_actor_calls_with_arg_async", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 3114.287612859598 - }, - { - "perf_metric_name": "1_1_async_actor_calls_sync", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 1663.656683863718 - }, - { - "perf_metric_name": "1_1_async_actor_calls_async", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 2893.7637668160814 - }, - { - "perf_metric_name": "1_1_async_actor_calls_with_args_async", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 2053.88520928116 - }, - { - "perf_metric_name": "1_n_async_actor_calls_async", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 10007.72780551488 - }, - { - "perf_metric_name": "n_n_async_actor_calls_async", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 25348.416455631697 - }, - { - "perf_metric_name": "placement_group_create/removal", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 1010.4619236411705 - }, - { - "perf_metric_name": "client__get_calls", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 1169.0846386325316 - }, - { - "perf_metric_name": "client__put_calls", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 953.6497274525543 - }, - { - "perf_metric_name": "client__put_gigabytes", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 0.04453569846336401 - }, - { - "perf_metric_name": "client__tasks_and_put_batch", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 11636.76543061424 - }, - { - "perf_metric_name": "client__1_1_actor_calls_sync", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 587.2290114221607 - }, - { - "perf_metric_name": "client__1_1_actor_calls_async", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 1053.9221163152763 - }, - { - "perf_metric_name": "client__1_1_actor_calls_concurrent", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 1067.680489513954 - }, - { - "perf_metric_name": "client__tasks_and_get_batch", - "perf_metric_type": "THROUGHPUT", - "perf_metric_value": 0.9962426891274011 - } - ], - "placement_group_create/removal": [ - 1010.4619236411705, - 10.079449349718745 - ], - "single_client_get_calls_Plasma_Store": [ - 6717.39789740067, - 383.3989162609101 - ], - "single_client_get_object_containing_10k_refs": [ - 12.601815520396993, - 0.02598969871083366 - ], - "single_client_put_calls_Plasma_Store": [ - 6141.116222223478, - 55.35476909877019 - ], - "single_client_put_gigabytes": [ - 19.77763371659089, - 4.567428080163564 - ], - "single_client_tasks_and_get_batch": [ - 10.903109137328919, - 0.578088867627798 - ], - "single_client_tasks_async": [ - 10565.441516164923, - 517.415416188027 - ], - "single_client_tasks_sync": [ - 1315.6532263025485, - 28.172086151746203 - ], - "single_client_wait_1k_refs": [ - 6.25672193789162, - 0.1697384586263945 - ] -} \ No newline at end of file + "1_1_actor_calls_async": [ + 8774.565215109124, + 99.25145099727403 + ], + "1_1_actor_calls_concurrent": [ + 5556.408295085073, + 310.93719458837893 + ], + "1_1_actor_calls_sync": [ + 2627.6907826949946, + 17.95559542036301 + ], + "1_1_async_actor_calls_async": [ + 3053.0727243465535, + 44.040906553833615 + ], + "1_1_async_actor_calls_sync": [ + 1749.332379578524, + 36.34387252208381 + ], + "1_1_async_actor_calls_with_args_async": [ + 2438.8474219503923, + 88.20268908351537 + ], + "1_n_actor_calls_async": [ + 11443.027888783963, + 87.49946447311738 + ], + "1_n_async_actor_calls_async": [ + 10589.09103935827, + 106.34051316384893 + ], + "client__1_1_actor_calls_async": [ + 1084.4466197839831, + 33.13091505679245 + ], + "client__1_1_actor_calls_concurrent": [ + 1106.1285553207586, + 25.829782511660305 + ], + "client__1_1_actor_calls_sync": [ + 569.89256256793, + 24.8044284674939 + ], + "client__get_calls": [ + 1150.072552279968, + 36.79667266684934 + ], + "client__put_calls": [ + 904.8795868787086, + 13.073088078601502 + ], + "client__put_gigabytes": [ + 0.045687216636994404, + 0.00042081305098886794 + ], + "client__tasks_and_get_batch": [ + 0.9467558674435517, + 0.053000735219486415 + ], + "client__tasks_and_put_batch": [ + 12964.98005883289, + 319.3993930878376 + ], + "multi_client_put_calls_Plasma_Store": [ + 13685.622603708454, + 107.89607650269706 + ], + "multi_client_put_gigabytes": [ + 31.944310601566972, + 0.8025680665642678 + ], + "multi_client_tasks_async": [ + 34377.35783189367, + 2098.212516049616 + ], + "n_n_actor_calls_async": [ + 34184.700321977085, + 833.4165939251417 + ], + "n_n_actor_calls_with_arg_async": [ + 3086.29057625603, + 35.752775132663736 + ], + "n_n_async_actor_calls_async": [ + 27000.281282494278, + 1192.3434495510094 + ], + "perf_metrics": [ + { + "perf_metric_name": "single_client_get_calls_Plasma_Store", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 6220.115319914286 + }, + { + "perf_metric_name": "single_client_put_calls_Plasma_Store", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 6427.78729348863 + }, + { + "perf_metric_name": "multi_client_put_calls_Plasma_Store", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 13685.622603708454 + }, + { + "perf_metric_name": "single_client_put_gigabytes", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 20.114238761619227 + }, + { + "perf_metric_name": "single_client_tasks_and_get_batch", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 10.621044232599615 + }, + { + "perf_metric_name": "multi_client_put_gigabytes", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 31.944310601566972 + }, + { + "perf_metric_name": "single_client_get_object_containing_10k_refs", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 13.4303118492593 + }, + { + "perf_metric_name": "single_client_wait_1k_refs", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 5.669743175103769 + }, + { + "perf_metric_name": "single_client_tasks_sync", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 1402.5799311893395 + }, + { + "perf_metric_name": "single_client_tasks_async", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 11589.713176381527 + }, + { + "perf_metric_name": "multi_client_tasks_async", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 34377.35783189367 + }, + { + "perf_metric_name": "1_1_actor_calls_sync", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 2627.6907826949946 + }, + { + "perf_metric_name": "1_1_actor_calls_async", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 8774.565215109124 + }, + { + "perf_metric_name": "1_1_actor_calls_concurrent", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 5556.408295085073 + }, + { + "perf_metric_name": "1_n_actor_calls_async", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 11443.027888783963 + }, + { + "perf_metric_name": "n_n_actor_calls_async", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 34184.700321977085 + }, + { + "perf_metric_name": "n_n_actor_calls_with_arg_async", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 3086.29057625603 + }, + { + "perf_metric_name": "1_1_async_actor_calls_sync", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 1749.332379578524 + }, + { + "perf_metric_name": "1_1_async_actor_calls_async", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 3053.0727243465535 + }, + { + "perf_metric_name": "1_1_async_actor_calls_with_args_async", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 2438.8474219503923 + }, + { + "perf_metric_name": "1_n_async_actor_calls_async", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 10589.09103935827 + }, + { + "perf_metric_name": "n_n_async_actor_calls_async", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 27000.281282494278 + }, + { + "perf_metric_name": "placement_group_create/removal", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 1111.4458914419295 + }, + { + "perf_metric_name": "client__get_calls", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 1150.072552279968 + }, + { + "perf_metric_name": "client__put_calls", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 904.8795868787086 + }, + { + "perf_metric_name": "client__put_gigabytes", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 0.045687216636994404 + }, + { + "perf_metric_name": "client__tasks_and_put_batch", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 12964.98005883289 + }, + { + "perf_metric_name": "client__1_1_actor_calls_sync", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 569.89256256793 + }, + { + "perf_metric_name": "client__1_1_actor_calls_async", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 1084.4466197839831 + }, + { + "perf_metric_name": "client__1_1_actor_calls_concurrent", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 1106.1285553207586 + }, + { + "perf_metric_name": "client__tasks_and_get_batch", + "perf_metric_type": "THROUGHPUT", + "perf_metric_value": 0.9467558674435517 + } + ], + "placement_group_create/removal": [ + 1111.4458914419295, + 36.12535104191624 + ], + "single_client_get_calls_Plasma_Store": [ + 6220.115319914286, + 224.5052300173086 + ], + "single_client_get_object_containing_10k_refs": [ + 13.4303118492593, + 0.17122469816034125 + ], + "single_client_put_calls_Plasma_Store": [ + 6427.78729348863, + 84.21488331092435 + ], + "single_client_put_gigabytes": [ + 20.114238761619227, + 6.003066360606937 + ], + "single_client_tasks_and_get_batch": [ + 10.621044232599615, + 0.7018065234293067 + ], + "single_client_tasks_async": [ + 11589.713176381527, + 69.968115451784 + ], + "single_client_tasks_sync": [ + 1402.5799311893395, + 44.22066668152743 + ], + "single_client_wait_1k_refs": [ + 5.669743175103769, + 0.16404015686449241 + ] +} diff --git a/release/release_logs/2.4.0/scalability/object_store.json b/release/release_logs/2.4.0/scalability/object_store.json index 7a9f43ff8c59..726d2fdf756e 100644 --- a/release/release_logs/2.4.0/scalability/object_store.json +++ b/release/release_logs/2.4.0/scalability/object_store.json @@ -1,40 +1,13 @@ { - "args_time": 16.78179498900002, - "get_time": 24.349769197, - "large_object_size": 107374182400, - "large_object_time": 34.58217635799997, - "num_args": 10000, - "num_get_args": 10000, - "num_queued": 1000000, - "num_returns": 3000, - "perf_metrics": [ - { - "perf_metric_name": "10000_args_time", - "perf_metric_type": "LATENCY", - "perf_metric_value": 16.78179498900002 - }, - { - "perf_metric_name": "3000_returns_time", - "perf_metric_type": "LATENCY", - "perf_metric_value": 6.026168543000011 - }, - { - "perf_metric_name": "10000_get_time", - "perf_metric_type": "LATENCY", - "perf_metric_value": 24.349769197 - }, - { - "perf_metric_name": "1000000_queued_time", - "perf_metric_type": "LATENCY", - "perf_metric_value": 239.18845452300002 - }, - { - "perf_metric_name": "107374182400_large_object_time", - "perf_metric_type": "LATENCY", - "perf_metric_value": 34.58217635799997 - } - ], - "queued_time": 239.18845452300002, - "returns_time": 6.026168543000011, - "success": "1" + "broadcast_time": 89.42807069100002, + "num_nodes": 50, + "object_size": 1073741824, + "perf_metrics": [ + { + "perf_metric_name": "time_to_broadcast_1073741824_bytes_to_50_nodes", + "perf_metric_type": "LATENCY", + "perf_metric_value": 89.42807069100002 + } + ], + "success": "1" } diff --git a/release/release_logs/2.4.0/scalability/single_node.json b/release/release_logs/2.4.0/scalability/single_node.json index 7a9f43ff8c59..2183a69fd2ae 100644 --- a/release/release_logs/2.4.0/scalability/single_node.json +++ b/release/release_logs/2.4.0/scalability/single_node.json @@ -1,40 +1,40 @@ { - "args_time": 16.78179498900002, - "get_time": 24.349769197, - "large_object_size": 107374182400, - "large_object_time": 34.58217635799997, - "num_args": 10000, - "num_get_args": 10000, - "num_queued": 1000000, - "num_returns": 3000, - "perf_metrics": [ - { - "perf_metric_name": "10000_args_time", - "perf_metric_type": "LATENCY", - "perf_metric_value": 16.78179498900002 - }, - { - "perf_metric_name": "3000_returns_time", - "perf_metric_type": "LATENCY", - "perf_metric_value": 6.026168543000011 - }, - { - "perf_metric_name": "10000_get_time", - "perf_metric_type": "LATENCY", - "perf_metric_value": 24.349769197 - }, - { - "perf_metric_name": "1000000_queued_time", - "perf_metric_type": "LATENCY", - "perf_metric_value": 239.18845452300002 - }, - { - "perf_metric_name": "107374182400_large_object_time", - "perf_metric_type": "LATENCY", - "perf_metric_value": 34.58217635799997 - } - ], - "queued_time": 239.18845452300002, - "returns_time": 6.026168543000011, - "success": "1" + "args_time": 17.054044035999993, + "get_time": 24.36676771400002, + "large_object_size": 107374182400, + "large_object_time": 34.06999374499998, + "num_args": 10000, + "num_get_args": 10000, + "num_queued": 1000000, + "num_returns": 3000, + "perf_metrics": [ + { + "perf_metric_name": "10000_args_time", + "perf_metric_type": "LATENCY", + "perf_metric_value": 17.054044035999993 + }, + { + "perf_metric_name": "3000_returns_time", + "perf_metric_type": "LATENCY", + "perf_metric_value": 6.002825282000003 + }, + { + "perf_metric_name": "10000_get_time", + "perf_metric_type": "LATENCY", + "perf_metric_value": 24.36676771400002 + }, + { + "perf_metric_name": "1000000_queued_time", + "perf_metric_type": "LATENCY", + "perf_metric_value": 175.74473816900002 + }, + { + "perf_metric_name": "107374182400_large_object_time", + "perf_metric_type": "LATENCY", + "perf_metric_value": 34.06999374499998 + } + ], + "queued_time": 175.74473816900002, + "returns_time": 6.002825282000003, + "success": "1" } diff --git a/release/release_logs/2.4.0/stress_tests/stress_test_dead_actors.json b/release/release_logs/2.4.0/stress_tests/stress_test_dead_actors.json index 4674b3febdbd..8e7263ecac1e 100644 --- a/release/release_logs/2.4.0/stress_tests/stress_test_dead_actors.json +++ b/release/release_logs/2.4.0/stress_tests/stress_test_dead_actors.json @@ -1,14 +1,14 @@ { - "avg_iteration_time": 2.4128899502754213, - "max_iteration_time": 11.154391050338745, - "min_iteration_time": 0.2948293685913086, + "avg_iteration_time": 2.0598055481910706, + "max_iteration_time": 15.883565187454224, + "min_iteration_time": 0.14446020126342773, "perf_metrics": [ { "perf_metric_name": "avg_iteration_time", "perf_metric_type": "LATENCY", - "perf_metric_value": 2.4128899502754213 + "perf_metric_value": 2.0598055481910706 } ], "success": 1, - "total_time": 241.28934574127197 + "total_time": 205.98083114624023 } diff --git a/release/release_logs/2.4.0/stress_tests/stress_test_many_tasks.json b/release/release_logs/2.4.0/stress_tests/stress_test_many_tasks.json index cfe605e556d4..f863c0b276fc 100644 --- a/release/release_logs/2.4.0/stress_tests/stress_test_many_tasks.json +++ b/release/release_logs/2.4.0/stress_tests/stress_test_many_tasks.json @@ -3,45 +3,45 @@ { "perf_metric_name": "stage_0_time", "perf_metric_type": "LATENCY", - "perf_metric_value": 14.71593689918518 + "perf_metric_value": 12.432663917541504 }, { "perf_metric_name": "stage_1_avg_iteration_time", "perf_metric_type": "LATENCY", - "perf_metric_value": 23.45582284927368 + "perf_metric_value": 22.303217387199403 }, { "perf_metric_name": "stage_2_avg_iteration_time", "perf_metric_type": "LATENCY", - "perf_metric_value": 59.12007422447205 + "perf_metric_value": 58.13721342086792 }, { "perf_metric_name": "stage_3_creation_time", "perf_metric_type": "LATENCY", - "perf_metric_value": 5.639009952545166 + "perf_metric_value": 4.9506330490112305 }, { "perf_metric_name": "stage_3_time", "perf_metric_type": "LATENCY", - "perf_metric_value": 2746.616822242737 + "perf_metric_value": 2541.1457979679108 }, { "perf_metric_name": "stage_4_spread", "perf_metric_type": "LATENCY", - "perf_metric_value": 0.8324665945841853 + "perf_metric_value": 0.653147785580004 } ], - "stage_0_time": 14.71593689918518, - "stage_1_avg_iteration_time": 23.45582284927368, - "stage_1_max_iteration_time": 24.144246339797974, - "stage_1_min_iteration_time": 22.638681411743164, - "stage_1_time": 234.5583221912384, - "stage_2_avg_iteration_time": 59.12007422447205, - "stage_2_max_iteration_time": 60.01493453979492, - "stage_2_min_iteration_time": 57.3223192691803, - "stage_2_time": 295.60199069976807, - "stage_3_creation_time": 5.639009952545166, - "stage_3_time": 2746.616822242737, - "stage_4_spread": 0.8324665945841853, + "stage_0_time": 12.432663917541504, + "stage_1_avg_iteration_time": 22.303217387199403, + "stage_1_max_iteration_time": 23.565119743347168, + "stage_1_min_iteration_time": 21.196225881576538, + "stage_1_time": 223.03227972984314, + "stage_2_avg_iteration_time": 58.13721342086792, + "stage_2_max_iteration_time": 58.608288526535034, + "stage_2_min_iteration_time": 57.48827838897705, + "stage_2_time": 290.6870460510254, + "stage_3_creation_time": 4.9506330490112305, + "stage_3_time": 2541.1457979679108, + "stage_4_spread": 0.653147785580004, "success": 1 } diff --git a/release/release_logs/2.4.0/stress_tests/stress_test_placement_group.json b/release/release_logs/2.4.0/stress_tests/stress_test_placement_group.json index bc4bc4ae8d49..751120113766 100644 --- a/release/release_logs/2.4.0/stress_tests/stress_test_placement_group.json +++ b/release/release_logs/2.4.0/stress_tests/stress_test_placement_group.json @@ -1,16 +1,16 @@ { - "avg_pg_create_time_ms": 0.9266749219217597, - "avg_pg_remove_time_ms": 0.9992335435431319, + "avg_pg_create_time_ms": 0.8416926516519441, + "avg_pg_remove_time_ms": 0.8426233363353036, "perf_metrics": [ { "perf_metric_name": "avg_pg_create_time_ms", "perf_metric_type": "LATENCY", - "perf_metric_value": 0.9266749219217597 + "perf_metric_value": 0.8416926516519441 }, { "perf_metric_name": "avg_pg_remove_time_ms", "perf_metric_type": "LATENCY", - "perf_metric_value": 0.9992335435431319 + "perf_metric_value": 0.8426233363353036 } ], "success": 1 From ac0031b6726b6738579695afb6abcf49a07417bb Mon Sep 17 00:00:00 2001 From: Edward Oakes Date: Mon, 24 Apr 2023 16:51:20 -0500 Subject: [PATCH 077/424] [serve] Replace `ClassNode` and `FunctionNode` with `Application` in top-level Serve APIs (#34627) A bit of renaming to streamline the concepts in the API: - Instead of bind() returning ClassNode or FunctionNode (which are very in the weeds), it returns an Application object. - This required renaming the output of serve.build, which I've renamed to BuiltApplication (checks out conceptually - serve.build(app) -> built_app. - From now on, we should never make mention of "nodes" or "graphs" in user-facing docs/docstrings except for where we are explicitly talking about the call graph API (with InputNode: ...). I will update the docs accordingly. - We should also type annotate our documentation examples where possible to make the concepts very clear. --- python/ray/serve/BUILD | 4 +- python/ray/serve/__init__.py | 6 ++ python/ray/serve/_private/api.py | 20 ++--- .../serve/_private/deployment_graph_build.py | 9 ++ python/ray/serve/api.py | 87 +++++++------------ .../{application.py => built_application.py} | 2 +- python/ray/serve/deployment.py | 48 ++++++++-- python/ray/serve/scripts.py | 20 ++--- python/ray/serve/tests/test_api.py | 19 ++-- ...plication.py => test_built_application.py} | 32 +++---- .../ray/serve/tests/test_deployment_graph.py | 28 +++--- 11 files changed, 153 insertions(+), 122 deletions(-) rename python/ray/serve/{application.py => built_application.py} (99%) rename python/ray/serve/tests/{test_application.py => test_built_application.py} (89%) diff --git a/python/ray/serve/BUILD b/python/ray/serve/BUILD index deb0959afd6a..6ac79f95de11 100644 --- a/python/ray/serve/BUILD +++ b/python/ray/serve/BUILD @@ -26,7 +26,7 @@ py_test( ) py_test( - name = "test_application", + name = "test_built_application", size = "medium", srcs = serve_tests_srcs, tags = ["exclusive", "team:serve"], @@ -573,4 +573,4 @@ py_test( srcs = serve_tests_srcs, tags = ["exclusive", "team:serve"], deps = [":serve_lib"], -) \ No newline at end of file +) diff --git a/python/ray/serve/__init__.py b/python/ray/serve/__init__.py index 23c0eac007c4..12d3c360092d 100644 --- a/python/ray/serve/__init__.py +++ b/python/ray/serve/__init__.py @@ -11,6 +11,9 @@ shutdown, start, delete, + Application, + BuiltApplication, + Deployment, ) from ray.serve.air_integrations import PredictorDeployment from ray.serve.batching import batch @@ -40,4 +43,7 @@ "run", "PredictorDeployment", "delete", + "Application", + "BuiltApplication", + "Deployment", ] diff --git a/python/ray/serve/_private/api.py b/python/ray/serve/_private/api.py index f71af2afee77..d43ccc7fd344 100644 --- a/python/ray/serve/_private/api.py +++ b/python/ray/serve/_private/api.py @@ -8,8 +8,7 @@ import ray from ray._private.usage import usage_lib -from ray.serve.deployment import Deployment -from ray.serve.deployment_graph import ClassNode, FunctionNode +from ray.serve.deployment import Application, Deployment from ray.serve.exceptions import RayServeException from ray.serve.config import HTTPOptions from ray.serve._private.constants import ( @@ -337,19 +336,19 @@ def serve_start( def call_app_builder_with_args_if_necessary( - builder: Union[ClassNode, FunctionNode, FunctionType], + builder: Union[Application, FunctionType], args: Dict[str, Any], -) -> Union[ClassNode, FunctionNode]: +) -> Application: """Builds a Serve application from an application builder function. - If a pre-built application (ClassNode or FunctionNode) is passed, this is a no-op. + If a pre-built application is passed, this is a no-op. Else, we validate the signature of the builder, convert the args dictionary to the user-annotated Pydantic model if provided, and call the builder function. - The output of the function is returned (must be a ClassNode or FunctionNode). + The output of the function is returned (must be an Application). """ - if isinstance(builder, (ClassNode, FunctionNode)): + if isinstance(builder, Application): if len(args) > 0: raise ValueError( "Arguments can only be passed to an application builder function, " @@ -379,11 +378,10 @@ def call_app_builder_with_args_if_necessary( args = param.annotation.parse_obj(args) app = builder(args) - if not isinstance(app, (ClassNode, FunctionNode)): + if not isinstance(app, Application): raise TypeError( - "Application builder functions must return a `ClassNode` or " - "`FunctionNode` returned from `Deployment.bind()`, " - f"but got: {type(app)}." + "Application builder functions must return an `Application` returned " + f"`from `Deployment.bind()`, but got: {type(app)}." ) return app diff --git a/python/ray/serve/_private/deployment_graph_build.py b/python/ray/serve/_private/deployment_graph_build.py index 6ce515936c15..c75542d0e92a 100644 --- a/python/ray/serve/_private/deployment_graph_build.py +++ b/python/ray/serve/_private/deployment_graph_build.py @@ -93,6 +93,15 @@ def build(ray_dag_root_node: DAGNode, name: str = None) -> List[Deployment]: ) deployments = extract_deployments_from_serve_dag(serve_root_dag) + # If the ingress deployment is a function and it is bound to other deployments, + # reject. + if isinstance(serve_root_dag, DeploymentFunctionNode) and len(deployments) != 1: + raise ValueError( + "The ingress deployment to your application cannot be a function if there " + "are multiple deployments. If you want to compose them, use a class. If " + "you're using the DAG API, the function should be bound to a DAGDriver." + ) + # After Ray DAG is transformed to Serve DAG with deployments and their init # args filled, generate a minimal weight executor serve dag for perf serve_executor_root_dag = serve_root_dag.apply_recursive( diff --git a/python/ray/serve/api.py b/python/ray/serve/api.py index 8bb34bf9fc57..82568a02f2fa 100644 --- a/python/ray/serve/api.py +++ b/python/ray/serve/api.py @@ -13,7 +13,7 @@ from ray.dag import DAGNode from ray.util.annotations import Deprecated, PublicAPI -from ray.serve.application import Application +from ray.serve.built_application import BuiltApplication from ray.serve._private.client import ServeControllerClient from ray.serve.config import AutoscalingConfig, DeploymentConfig, HTTPOptions from ray.serve._private.constants import ( @@ -28,8 +28,7 @@ get_internal_replica_context, _set_global_client, ) -from ray.serve.deployment import Deployment -from ray.serve.deployment_graph import ClassNode, FunctionNode +from ray.serve.deployment import Application, Deployment from ray.serve._private.deployment_graph_build import build as pipeline_build from ray.serve._private.deployment_graph_build import ( get_and_validate_ingress_deployment, @@ -458,24 +457,21 @@ def list_deployments() -> Dict[str, Deployment]: @PublicAPI(stability="beta") def run( - target: Union[ClassNode, FunctionNode], + target: Union[Application, BuiltApplication], _blocking: bool = True, host: str = DEFAULT_HTTP_HOST, port: int = DEFAULT_HTTP_PORT, name: str = SERVE_DEFAULT_APP_NAME, route_prefix: str = DEFAULT.VALUE, ) -> Optional[RayServeHandle]: - """Run a Serve application and return a ServeHandle to the ingress. + """Run a Serve application and return a ServeHandle to the ingress deployment. - Either a ClassNode, FunctionNode, or a pre-built application - can be passed in. If a node is passed in, all of the deployments it depends - on will be deployed. If there is an ingress, its handle will be returned. + The application is returned by `Deployment.bind()` or `serve.build`. Args: - target (Union[ClassNode, FunctionNode, Application]): - A user-built Serve Application or a ClassNode that acts as the - root node of DAG. By default ClassNode is the Driver - deployment unless user provides a customized one. + target (Union[Application, BuiltApplication]): + A Serve application returned from `Deployment.bind()` or a built application + returned from `serve.build()`. host: Host for HTTP servers to listen on. Defaults to "127.0.0.1". To expose Serve publicly, you probably want to set this to "0.0.0.0". @@ -499,35 +495,20 @@ def run( record_extra_usage_tag(TagKey.SERVE_API_VERSION, "v2") if isinstance(target, Application): + deployments = pipeline_build(target._get_internal_dag_node(), name) + ingress = get_and_validate_ingress_deployment(deployments) + elif isinstance(target, BuiltApplication): deployments = list(target.deployments.values()) ingress = target.ingress - # Each DAG should always provide a valid Driver ClassNode - elif isinstance(target, ClassNode): - deployments = pipeline_build(target, name) - ingress = get_and_validate_ingress_deployment(deployments) - # Special case where user is doing single function serve.run(func.bind()) - elif isinstance(target, FunctionNode): - deployments = pipeline_build(target, name) - ingress = get_and_validate_ingress_deployment(deployments) - if len(deployments) != 1: - raise ValueError( - "We only support single function node in serve.run, ex: " - "serve.run(func.bind()). For more than one nodes in your DAG, " - "Please provide a driver class and bind it as entrypoint to " - "your Serve DAG." - ) - elif isinstance(target, DAGNode): - raise ValueError( - "Invalid DAGNode type as entry to serve.run(), " - f"type: {type(target)}, accepted: ClassNode, " - "FunctionNode please provide a driver class and bind it " - "as entrypoint to your Serve DAG." - ) else: - raise TypeError( - "Expected a ClassNode, FunctionNode, or Application as target. " - f"Got unexpected type {type(target)} instead." + msg = ( + "`serve.run` expects an `Application` returned by `Deployment.bind()` " + "or a static `BuiltApplication` returned by `serve.build`." ) + if isinstance(target, DAGNode): + msg += " If you are using the DAG API, you must bind the DAG node to a " + "deployment like: `app = Deployment.bind(my_dag_output)`. " + raise TypeError(msg) # when name provided, keep all existing applications # otherwise, delete all of them. @@ -567,30 +548,24 @@ def run( @PublicAPI(stability="alpha") -def build( - target: Union[ClassNode, FunctionNode], name: str = SERVE_DEFAULT_APP_NAME -) -> Application: - """Builds a Serve application into a static application. - - Takes in a ClassNode or FunctionNode and converts it to a - Serve application consisting of one or more deployments. This is intended - to be used for production scenarios and deployed via the Serve REST API or - CLI, so there are some restrictions placed on the deployments: - 1) All of the deployments must be importable. That is, they cannot be - defined in __main__ or inline defined. The deployments will be - imported in production using the same import path they were here. - 2) All arguments bound to the deployment must be JSON-serializable. +def build(target: Application, name: str = SERVE_DEFAULT_APP_NAME) -> BuiltApplication: + """Builds a Serve application into a static, built application. - The returned Application object can be exported to a dictionary or YAML - config. + Resolves the provided Application object into a list of deployments. + This can be converted to a Serve config file that can be deployed via + the Serve REST API or CLI. + + All of the deployments must be importable. That is, they cannot be + defined in __main__ or inline defined. The deployments will be + imported in the config file using the same import path they were here. Args: - target (Union[ClassNode, FunctionNode]): A ClassNode or FunctionNode - that acts as the top level node of the DAG. + target: The Serve application to run consisting of one or more + deployments. name: The name of the Serve application. Returns: - The static built Serve application + The static built Serve application. """ if in_interactive_shell(): raise RuntimeError( @@ -601,7 +576,7 @@ def build( # TODO(edoakes): this should accept host and port, but we don't # currently support them in the REST API. - return Application(pipeline_build(target, name)) + return BuiltApplication(pipeline_build(target._get_internal_dag_node(), name)) @PublicAPI(stability="alpha") diff --git a/python/ray/serve/application.py b/python/ray/serve/built_application.py similarity index 99% rename from python/ray/serve/application.py rename to python/ray/serve/built_application.py index 942cfdec1f94..195ad35019b2 100644 --- a/python/ray/serve/application.py +++ b/python/ray/serve/built_application.py @@ -23,7 +23,7 @@ def __setitem__(self, *args): @DeveloperAPI -class Application: +class BuiltApplication: """A static, pre-built Serve application. An application consists of a number of Serve deployments that can send diff --git a/python/ray/serve/deployment.py b/python/ray/serve/deployment.py index b3a900ddd55d..460066982844 100644 --- a/python/ray/serve/deployment.py +++ b/python/ray/serve/deployment.py @@ -12,6 +12,7 @@ from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag from ray.serve.context import get_global_client +from ray.dag.dag_node import DAGNodeBase from ray.dag.class_node import ClassNode from ray.dag.function_node import FunctionNode from ray.serve.config import ( @@ -31,6 +32,39 @@ logger = logging.getLogger(SERVE_LOGGER_NAME) +@PublicAPI +class Application(DAGNodeBase): + """Returned from `Deployment.bind()`. + + Can be passed into another `Deployment.bind()` to compose multiple deployments in a + single application, passed to `serve.run`, or deployed via a Serve config file. + """ + + def __init__( + self, *, _internal_dag_node: Optional[Union[ClassNode, FunctionNode]] = None + ): + """This class should not be constructed directly.""" + if _internal_dag_node is None: + raise RuntimeError("This class should not be constructed directly.") + + self._internal_dag_node = _internal_dag_node + + def _get_internal_dag_node(self) -> Union[ClassNode, FunctionNode]: + if self._internal_dag_node is None: + raise RuntimeError("Application object should not be constructed directly.") + + return self._internal_dag_node + + @classmethod + def _from_internal_dag_node(cls, dag_node: Union[ClassNode, FunctionNode]): + return cls(_internal_dag_node=dag_node) + + # Proxy all method calls to the underlying DAG node. This allows this class to be + # passed in place of the ClassNode or FunctionNode in the DAG building code. + def __getattr__(self, name: str) -> Any: + return getattr(self._get_internal_dag_node(), name) + + @PublicAPI class Deployment: def __init__( @@ -184,11 +218,11 @@ def __call__(self): ) @PublicAPI(stability="beta") - def bind(self, *args, **kwargs) -> Union[ClassNode, FunctionNode]: - """Bind the provided arguments and return a class or function node. + def bind(self, *args, **kwargs) -> Application: + """Bind the arguments to the deployment and return an Application. - The returned bound deployment can be deployed or bound to other - deployments to create a deployment graph. + The returned Application can be deployed using `serve.run` (or via + config file) or bound to another deployment for composition. """ copied_self = copy(self) @@ -196,7 +230,7 @@ def bind(self, *args, **kwargs) -> Union[ClassNode, FunctionNode]: schema_shell = deployment_to_schema(copied_self) if inspect.isfunction(self._func_or_class): - return FunctionNode( + dag_node = FunctionNode( self._func_or_class, args, # Used to bind and resolve DAG only, can take user input kwargs, # Used to bind and resolve DAG only, can take user input @@ -207,7 +241,7 @@ def bind(self, *args, **kwargs) -> Union[ClassNode, FunctionNode]: }, ) else: - return ClassNode( + dag_node = ClassNode( self._func_or_class, args, kwargs, @@ -218,6 +252,8 @@ def bind(self, *args, **kwargs) -> Union[ClassNode, FunctionNode]: }, ) + return Application._from_internal_dag_node(dag_node) + @guarded_deprecation_warning(instructions=MIGRATION_MESSAGE) @Deprecated(message=MIGRATION_MESSAGE) def deploy(self, *init_args, _blocking=True, **init_kwargs): diff --git a/python/ray/serve/scripts.py b/python/ray/serve/scripts.py index d97bf7a22112..949ed8059c90 100644 --- a/python/ray/serve/scripts.py +++ b/python/ray/serve/scripts.py @@ -3,7 +3,7 @@ import pathlib import sys import time -from typing import Dict, Optional, Union, Tuple +from typing import Dict, Optional, Tuple import click import yaml @@ -26,8 +26,7 @@ SERVE_DEFAULT_APP_NAME, ) from ray.serve._private.common import ServeDeployMode -from ray.serve.deployment import deployment_to_schema -from ray.serve.deployment_graph import ClassNode, FunctionNode +from ray.serve.deployment import Application, deployment_to_schema from ray.serve._private import api as _private_api from ray.serve.schema import ( ServeApplicationSchema, @@ -231,7 +230,7 @@ def deploy(config_file_name: str, address: str): help=( "Runs an application from the specified import path (e.g., my_script:" "app) or application(s) from a YAML config.\n\n" - "If passing an import path, it must point to a bound Serve application or " + "If passing an import path, it must point to a Serve Application or " "a function that returns one. If a function is used, arguments can be " "passed to it in 'key=val' format after the import path, for example:\n\n" "serve run my_script:app model_path='/path/to/model.pkl' num_replicas=5\n\n" @@ -613,9 +612,9 @@ def shutdown(address: str, yes: bool): @cli.command( short_help="Writes a Serve Deployment Graph's config file.", help=( - "Imports the ClassNode(s) or FunctionNode(s) at IMPORT_PATH(S) and generates a " + "Imports the Application at IMPORT_PATH(S) and generates a " "structured config for it. If the flag --multi-app is set, accepts multiple " - "ClassNode/FunctionNodes and generates a multi-application config. Config " + "Applications and generates a multi-application config. Config " "outputted from this command can be used by `serve deploy` or the REST API. " ), ) @@ -659,14 +658,13 @@ def build( sys.path.insert(0, app_dir) def build_app_config(import_path: str, name: str = None): - node: Union[ClassNode, FunctionNode] = import_attr(import_path) - if not isinstance(node, (ClassNode, FunctionNode)): + app: Application = import_attr(import_path) + if not isinstance(app, Application): raise TypeError( - f"Expected '{import_path}' to be ClassNode or " - f"FunctionNode, but got {type(node)}." + f"Expected '{import_path}' to be an Application but got {type(app)}." ) - app = build_app(node) + app = build_app(app) schema = ServeApplicationSchema( import_path=import_path, runtime_env={}, diff --git a/python/ray/serve/tests/test_api.py b/python/ray/serve/tests/test_api.py index a205512018b8..5bc5e3da2d5b 100644 --- a/python/ray/serve/tests/test_api.py +++ b/python/ray/serve/tests/test_api.py @@ -11,8 +11,9 @@ import ray from ray import serve from ray._private.test_utils import SignalActor, wait_for_condition -from ray.serve.application import Application -from ray.serve.deployment_graph import ClassNode, FunctionNode, RayServeDAGHandle +from ray.serve.built_application import BuiltApplication +from ray.serve.deployment import Application +from ray.serve.deployment_graph import RayServeDAGHandle from ray.serve.drivers import DAGDriver from ray.serve.exceptions import RayServeException from ray.serve._private.api import call_app_builder_with_args_if_necessary @@ -389,13 +390,13 @@ def test_run_get_ingress_app(serve_instance): def g(): return "got g" - app = Application([g]) + app = BuiltApplication([g]) ingress_handle = serve.run(app) assert ray.get(ingress_handle.remote()) == "got g" serve_instance.delete_deployments(["g"]) - no_ingress_app = Application([g.options(route_prefix=None)]) + no_ingress_app = BuiltApplication([g.options(route_prefix=None)]) ingress_handle = serve.run(no_ingress_app) assert ingress_handle is None @@ -784,14 +785,14 @@ def build_function(args): return self.A.bind() assert isinstance( - call_app_builder_with_args_if_necessary(build_function, {}), ClassNode + call_app_builder_with_args_if_necessary(build_function, {}), Application ) def build_class(args): return self.f.bind() assert isinstance( - call_app_builder_with_args_if_necessary(build_class, {}), FunctionNode + call_app_builder_with_args_if_necessary(build_class, {}), Application ) def test_args_dict(self): @@ -806,7 +807,7 @@ def build(args): ) app = call_app_builder_with_args_if_necessary(build, args_dict) - assert isinstance(app, ClassNode) + assert isinstance(app, Application) def test_args_typed(self): args_dict = {"message": "hiya", "num_replicas": "3"} @@ -818,7 +819,7 @@ def build(args: self.TypedArgs): return self.A.options(num_replicas=args.num_replicas).bind(args.message) app = call_app_builder_with_args_if_necessary(build, args_dict) - assert isinstance(app, ClassNode) + assert isinstance(app, Application) # Sanity check that pydantic validation works. @@ -831,7 +832,7 @@ def check_missing_optional(args: self.TypedArgs): app = call_app_builder_with_args_if_necessary( check_missing_optional, {"message": "hiya"} ) - assert isinstance(app, ClassNode) + assert isinstance(app, Application) # 2) Check that validation rejects a missing required field. def check_missing_required(args: self.TypedArgs): diff --git a/python/ray/serve/tests/test_application.py b/python/ray/serve/tests/test_built_application.py similarity index 89% rename from python/ray/serve/tests/test_application.py rename to python/ray/serve/tests/test_built_application.py index 4919b32c0bd6..9269f77d57d2 100644 --- a/python/ray/serve/tests/test_application.py +++ b/python/ray/serve/tests/test_built_application.py @@ -3,11 +3,11 @@ import ray from ray import serve -from ray.serve.application import Application +from ray.serve.built_application import BuiltApplication from ray._private.test_utils import wait_for_condition -class TestApplicationConstruction: +class TestBuiltApplicationConstruction: @serve.deployment def f(*args): return "got f" @@ -18,7 +18,7 @@ def __call__(self, *args): return "got C" def test_valid_deployments(self): - app = Application([self.f, self.C]) + app = BuiltApplication([self.f, self.C]) assert len(app.deployments) == 2 app_deployment_names = {d.name for d in app.deployments.values()} @@ -27,14 +27,14 @@ def test_valid_deployments(self): def test_repeated_deployment_names(self): with pytest.raises(ValueError): - Application([self.f, self.C.options(name="f")]) + BuiltApplication([self.f, self.C.options(name="f")]) with pytest.raises(ValueError): - Application([self.C, self.f.options(name="C")]) + BuiltApplication([self.C, self.f.options(name="C")]) def test_non_deployments(self): with pytest.raises(TypeError): - Application([self.f, 5, "hello"]) + BuiltApplication([self.f, 5, "hello"]) class TestServeRun: @@ -68,7 +68,7 @@ def deploy_and_check_responses( for i in range(len(deployments)): serve.run( - Application([deployments[i]]), + BuiltApplication([deployments[i]]), name=f"app{i}", _blocking=blocking, ) @@ -102,7 +102,7 @@ def test_basic_run(self, serve_instance): self.deploy_and_check_responses(deployments, responses) def test_non_blocking_run(self, serve_instance): - """Checks Application's deploy() behavior when blocking=False.""" + """Checks BuiltApplication's deploy() behavior when blocking=False.""" deployments = [self.f, self.g, self.C, self.D] responses = ["f reached", "g reached", "C reached", "D reached"] @@ -144,14 +144,14 @@ async def request_echo(self, echo: str): MutualHandles.options(name=deployment_name, init_args=(handle_name,)) ) - serve.run(Application(deployments), _blocking=True) + serve.run(BuiltApplication(deployments), _blocking=True) for deployment in deployments: assert (ray.get(deployment.get_handle().remote("hello"))) == "hello" def test_decorated_deployments(self, serve_instance): """ - Checks Application's deploy behavior when deployments have options set + Checks BuiltApplication's deploy behavior when deployments have options set in their @serve.deployment decorator. """ @@ -170,18 +170,18 @@ async def __call__(self): self.deploy_and_check_responses(deployments, responses) def test_empty_list(self, serve_instance): - """Checks Application's deploy behavior when deployment group is empty.""" + """Checks BuiltApplication's deploy behavior when deployment group is empty.""" self.deploy_and_check_responses([], []) def test_invalid_input(self, serve_instance): """ - Checks Application's deploy behavior when deployment group contains + Checks BuiltApplication's deploy behavior when deployment group contains non-Deployment objects. """ with pytest.raises(TypeError): - Application([self.f, self.C, "not a Deployment object"]).deploy( + BuiltApplication([self.f, self.C, "not a Deployment object"]).deploy( blocking=True ) @@ -242,11 +242,11 @@ def test_different_pymodules(self, serve_instance): def test_import_path_deployment_decorated(self, serve_instance): func = serve.deployment(name="decorated_func", route_prefix="/decorated_func")( - "ray.serve.tests.test_application.decorated_func" + "ray.serve.tests.test_built_application.decorated_func" ) clss = serve.deployment(name="decorated_clss", route_prefix="/decorated_clss")( - "ray.serve.tests.test_application.DecoratedClass" + "ray.serve.tests.test_built_application.DecoratedClass" ) deployments = [func, clss] @@ -273,7 +273,7 @@ def __call__(self, req=None): def test_immutable_deployment_list(serve_instance): - app = Application([DecoratedClass, decorated_func]) + app = BuiltApplication([DecoratedClass, decorated_func]) assert len(app.deployments.values()) == 2 for name in app.deployments.keys(): diff --git a/python/ray/serve/tests/test_deployment_graph.py b/python/ray/serve/tests/test_deployment_graph.py index 7451f7372517..e9fa7db68acb 100644 --- a/python/ray/serve/tests/test_deployment_graph.py +++ b/python/ray/serve/tests/test_deployment_graph.py @@ -5,27 +5,29 @@ import numpy as np import requests +import starlette.requests import ray from ray import serve -from ray.serve.application import Application from ray.serve.api import build as build_app -from ray.serve.deployment_graph import RayServeDAGHandle -from ray.serve._private.deployment_graph_build import build as pipeline_build -from ray.serve.deployment_graph import ClassNode, InputNode +from ray.serve.built_application import BuiltApplication +from ray.serve.deployment import Application +from ray.serve.deployment_graph import InputNode, RayServeDAGHandle from ray.serve.drivers import DAGDriver -import starlette.requests +from ray.serve._private.deployment_graph_build import build as pipeline_build RayHandleLike = TypeVar("RayHandleLike") NESTED_HANDLE_KEY = "nested_handle" -def maybe_build(node: ClassNode, use_build: bool) -> Union[Application, ClassNode]: +def maybe_build( + app: Application, use_build: bool +) -> Union[Application, BuiltApplication]: if use_build: - return build_app(node) + return build_app(app) else: - return node + return app @serve.deployment @@ -166,8 +168,14 @@ def func_3(input): output_2 = func_2.bind(dag_input) output_3 = func_3.bind(output_2) ray_dag = combine.bind(output_1, output_2, kwargs_output=output_3) - with pytest.raises(ValueError, match="Please provide a driver class"): - _ = serve.run(ray_dag) + with pytest.raises( + ValueError, + match=( + "The ingress deployment to your application cannot be a " + "function if there are multiple deployment" + ), + ): + serve.run(ray_dag) serve_dag = DAGDriver.bind(ray_dag, http_adapter=json_resolver) From 98339847349eb079a73ddd08426e8485b6447747 Mon Sep 17 00:00:00 2001 From: Edward Oakes Date: Mon, 24 Apr 2023 16:51:33 -0500 Subject: [PATCH 078/424] [serve] Update quickstart & getting started for explicit `Application` concept (#34673) --- .../getting_started/model_deployment.py | 6 +- .../getting_started/model_deployment_full.py | 6 +- .../doc_code/getting_started/model_graph.py | 9 ++- doc/source/serve/doc_code/quickstart.py | 10 ++- .../serve/doc_code/quickstart_composed.py | 47 ++++++++++++ doc/source/serve/doc_code/quickstart_graph.py | 35 --------- doc/source/serve/getting_started.md | 73 +++++++++---------- doc/source/serve/index.md | 13 ++-- doc/source/serve/migration.md | 2 +- 9 files changed, 105 insertions(+), 96 deletions(-) create mode 100644 doc/source/serve/doc_code/quickstart_composed.py delete mode 100644 doc/source/serve/doc_code/quickstart_graph.py diff --git a/doc/source/serve/doc_code/getting_started/model_deployment.py b/doc/source/serve/doc_code/getting_started/model_deployment.py index 571de9dda515..482d12ffe4ad 100644 --- a/doc/source/serve/doc_code/getting_started/model_deployment.py +++ b/doc/source/serve/doc_code/getting_started/model_deployment.py @@ -35,11 +35,11 @@ async def __call__(self, http_request: Request) -> str: # __model_end__ # __model_deploy_start__ -translator = Translator.bind() +translator_app = Translator.bind() # __model_deploy_end__ -translator = Translator.options(ray_actor_options={}).bind() -serve.run(translator) +translator_app = Translator.options(ray_actor_options={}).bind() +serve.run(translator_app) # __client_function_start__ # File name: model_client.py diff --git a/doc/source/serve/doc_code/getting_started/model_deployment_full.py b/doc/source/serve/doc_code/getting_started/model_deployment_full.py index f6166779749e..16a4f65d5ade 100644 --- a/doc/source/serve/doc_code/getting_started/model_deployment_full.py +++ b/doc/source/serve/doc_code/getting_started/model_deployment_full.py @@ -30,11 +30,11 @@ async def __call__(self, http_request: Request) -> str: return self.translate(english_text) -translator = Translator.bind() +translator_app = Translator.bind() # __deployment_full_end__ -translator = Translator.options(ray_actor_options={}).bind() -serve.run(translator) +translator_app = Translator.options(ray_actor_options={}).bind() +serve.run(translator_app) import requests diff --git a/doc/source/serve/doc_code/getting_started/model_graph.py b/doc/source/serve/doc_code/getting_started/model_graph.py index b20a91ee3a69..7db64b9ddc83 100644 --- a/doc/source/serve/doc_code/getting_started/model_graph.py +++ b/doc/source/serve/doc_code/getting_started/model_graph.py @@ -6,6 +6,7 @@ import ray from ray import serve +from ray.serve.handle import RayServeHandle from transformers import pipeline @@ -28,7 +29,7 @@ def translate(self, text: str) -> str: @serve.deployment class Summarizer: - def __init__(self, translator): + def __init__(self, translator: RayServeHandle): # Load model self.model = pipeline("summarization", model="t5-small") self.translator = translator @@ -52,13 +53,13 @@ async def __call__(self, http_request: Request) -> str: return translation -deployment_graph = Summarizer.bind(Translator.bind()) +app = Summarizer.bind(Translator.bind()) # __end_graph__ -serve.run(deployment_graph) +serve.run(app) # __start_client__ -# File name: graph_client.py +# File name: composed_client.py import requests english_text = ( diff --git a/doc/source/serve/doc_code/quickstart.py b/doc/source/serve/doc_code/quickstart.py index adaf042e3b76..13aaddfca819 100644 --- a/doc/source/serve/doc_code/quickstart.py +++ b/doc/source/serve/doc_code/quickstart.py @@ -5,7 +5,7 @@ from ray import serve -# 1: Define a Ray Serve deployment. +# 1: Define a Ray Serve application. @serve.deployment(route_prefix="/") class MyModelDeployment: def __init__(self, msg: str): @@ -16,9 +16,11 @@ def __call__(self, request: Request) -> Dict: return {"result": self._msg} -# 2: Deploy the model. -serve.run(MyModelDeployment.bind(msg="Hello world!")) +app = MyModelDeployment.bind(msg="Hello world!") -# 3: Query the deployment and print the result. +# 2: Deploy the application locally. +serve.run(app) + +# 3: Query the application and print the result. print(requests.get("http://localhost:8000/").json()) # {'result': 'Hello world!'} diff --git a/doc/source/serve/doc_code/quickstart_composed.py b/doc/source/serve/doc_code/quickstart_composed.py new file mode 100644 index 000000000000..f88250d8116c --- /dev/null +++ b/doc/source/serve/doc_code/quickstart_composed.py @@ -0,0 +1,47 @@ +import requests +import starlette +from typing import Dict +from ray import serve +from ray.serve.handle import RayServeHandle + + +# 1. Define the models in our composition graph and an ingress that calls them. +@serve.deployment +class Adder: + def __init__(self, increment: int): + self.increment = increment + + def add(self, inp: int): + return self.increment + inp + + +@serve.deployment +class Combiner: + def average(self, *inputs) -> float: + return sum(inputs) / len(inputs) + + +@serve.deployment +class Ingress: + def __init__( + self, adder1: RayServeHandle, adder2: RayServeHandle, combiner: RayServeHandle + ): + self._adder1, self._adder2, self._combiner = adder1, adder2, combiner + + async def __call__(self, request: starlette.requests.Request) -> Dict[str, float]: + input_json = await request.json() + + adder1_result = await self._adder1.add.remote(input_json["val"]) + adder2_result = await self._adder2.add.remote(input_json["val"]) + final_result = await self._combiner.average.remote(adder1_result, adder2_result) + + return {"result": await final_result} + + +# 2. Build the application consisting of the models and ingress. +app = Ingress.bind(Adder.bind(increment=1), Adder.bind(increment=2), Combiner.bind()) +serve.run(app) + +# 3: Query the application and print the result. +print(requests.post("http://localhost:8000/", json={"val": 100.0}).json()) +# {"result": 101.5} diff --git a/doc/source/serve/doc_code/quickstart_graph.py b/doc/source/serve/doc_code/quickstart_graph.py deleted file mode 100644 index a98e6aaf8d4a..000000000000 --- a/doc/source/serve/doc_code/quickstart_graph.py +++ /dev/null @@ -1,35 +0,0 @@ -import requests -from ray import serve -from ray.serve.drivers import DAGDriver -from ray.serve.dag import InputNode -from ray.serve.http_adapters import json_request - - -# 1. Define the models in our composition graph -@serve.deployment -class Adder: - def __init__(self, increment: int): - self.increment = increment - - def predict(self, inp: int): - return self.increment + inp - - -@serve.deployment -def combine_average(*input_values) -> float: - return {"result": sum(input_values) / len(input_values)} - - -# 2: Define the model composition graph and call it. -with InputNode() as input_node: - adder_1 = Adder.bind(increment=1) - adder_2 = Adder.bind(increment=2) - dag = combine_average.bind( - adder_1.predict.bind(input_node), adder_2.predict.bind(input_node) - ) - -serve.run(DAGDriver.bind(dag, http_adapter=json_request)) - -# 3: Query the deployment and print the result. -print(requests.post("http://localhost:8000/", json=100).json()) -# {"result": 101.5} diff --git a/doc/source/serve/getting_started.md b/doc/source/serve/getting_started.md index 409d28f2f23e..a900ffd1d39f 100644 --- a/doc/source/serve/getting_started.md +++ b/doc/source/serve/getting_started.md @@ -2,10 +2,10 @@ # Getting Started -This tutorial will walk you through the process of deploying models with Ray Serve. It will show you how to +This tutorial will walk you through the process of writing and testing a Ray Serve application. It will show you how to -* expose your models over HTTP using deployments -* test your deployments over HTTP +* convert a machine learning model to a Ray Serve deployment +* test a Ray Serve application locally over HTTP * compose multiple-model machine learning models together into a single application We'll use two models in this tutorial: @@ -29,7 +29,7 @@ pip install "ray[serve]" transformers requests torch ``` -## Model Example: Before Ray Serve +## Text Translation Model (before Ray Serve) First, let's take a look at our text-translation model. Here's its code: @@ -64,14 +64,14 @@ PyTorch, and Tensorflow for more info and examples: - {ref}`serve-ml-models-tutorial` -(converting-to-ray-serve-deployment)= -## Converting to a Ray Serve Deployment +(converting-to-ray-serve-application)= +## Converting to a Ray Serve Application In this section, we'll deploy the text translation model using Ray Serve, so it can be scaled up and queried over HTTP. We'll start by converting -`Translator` into a Ray Serve deployment that runs locally on your computer. +`Translator` into a Ray Serve deployment. -First, we open a new Python file and import `ray` and `ray serve`: +First, we open a new Python file and import `ray` and `ray.serve`: ```{literalinclude} ../serve/doc_code/getting_started/model_deployment.py :start-after: __import_start__ @@ -91,8 +91,7 @@ The `Translator` class has two modifications: 1. It has a decorator, `@serve.deployment`. 2. It has a new method, `__call__`. -The decorator converts `Translator` from a Python class into a Ray Serve -`Deployment` object. +The decorator converts `Translator` from a Python class into a Ray Serve `Deployment` object. Each deployment stores a single Python function or class that you write and uses it to serve requests. You can scale and configure each of your deployments independently using @@ -112,11 +111,11 @@ class Translator: ... ``` -Deployments receive Starlette HTTP `request` objects [^f1]. If your deployment stores a Python function, the function is called on this `request` object. If your deployment stores a class, the class's `__call__` method is called on this `request` object. The return value is sent back in the HTTP response body. +Deployments receive Starlette HTTP `request` objects [^f1]. By default, the deployment class's `__call__` method is called on this `request` object. The return value is sent back in the HTTP response body. This is why `Translator` needs a new `__call__` method. The method processes the incoming HTTP request by reading its JSON data and forwarding it to the `translate` method. The translated text is returned and sent back through the HTTP response. You can also use Ray Serve's FastAPI integration to avoid working with raw HTTP requests. Check out {ref}`serve-fastapi-http` for more info about FastAPI with Serve. -Next, we need to `bind` our `Translator` deployment to arguments that Ray Serve can pass into its constructor. This will let Ray Serve initialize a `Translator` object that can serve requests. Since `Translator`'s constructor doesn't take in any arguments, we can call the deployment's `bind` method without passing anything in: +Next, we need to `bind` our `Translator` deployment to arguments that will be passed into its constructor. This defines a Ray Serve application that we can run locally or deploy to production (you'll see later that applications can consist of multiple deployments). Since `Translator`'s constructor doesn't take in any arguments, we can call the deployment's `bind` method without passing anything in: ```{literalinclude} ../serve/doc_code/getting_started/model_deployment.py :start-after: __model_deploy_start__ @@ -124,8 +123,11 @@ Next, we need to `bind` our `Translator` deployment to arguments that Ray Serve :language: python ``` -With that, we can run our model on Ray Serve! -Here's the full Ray Serve script that we built: +With that, we are ready to test the application locally. + +## Running a Ray Serve Application + +Here's the full Ray Serve script that we built above: ```{literalinclude} ../serve/doc_code/getting_started/model_deployment_full.py :start-after: __deployment_full_start__ @@ -133,27 +135,21 @@ Here's the full Ray Serve script that we built: :language: python ``` -We can run our script with the `serve run` CLI command. This command takes in an import path -to our deployment formatted as `module:bound_deployment`. Make sure to run the command from a directory containing a local copy of this script, so it can find the bound deployment: +To test locally, we run the script with the `serve run` CLI command. This command takes in an import path +to our deployment formatted as `module:application`. Make sure to run the command from a directory containing a local copy of this script saved as `serve_quickstart.py`, so it can import the application: ```console -$ serve run serve_deployment:translator +$ serve run serve_quickstart:translator_app ``` -This command will start running `Translator` and then block. It can be killed with `ctrl-C` in the terminal. +This command will run the `translator_app` application and then block, streaming logs to the console. It can be killed with `Ctrl-C`, which will tear down the application. -## Testing Ray Serve Deployments - -We can now test our model over HTTP. It can be reached at the following URL: +We can now test our model over HTTP. It can be reached at the following URL by default: ``` http://127.0.0.1:8000/ ``` -Since the cluster is deployed locally in this tutorial, the `127.0.0.1:8000` -refers to a localhost with port 8000 (the default port where you can reach -Serve deployments). - We'll send a POST request with JSON data containing our English text. `Translator`'s `__call__` method will unpack this text and forward it to the `translate` method. Here's a client script that requests a translation for "Hello world!": @@ -167,7 +163,7 @@ We'll send a POST request with JSON data containing our English text. To test our deployment, first make sure `Translator` is running: ``` -$ serve run serve_deployment:translator +$ serve run serve_deployment:translator_app ``` While `Translator` is running, we can open a separate terminal window and run the client script. This will get a response over HTTP: @@ -178,9 +174,10 @@ $ python model_client.py Bonjour monde! ``` -## Composing Machine Learning Models with Deployment Graphs +## Composing Multiple Models -Ray Serve's Deployment Graph API allows us to compose multiple machine learning models together into a single Ray Serve application. We can use parameters like `num_replicas`, `num_cpus`, and `num_gpus` to independently configure and scale each deployment in the graph. +Ray Serve allows you to compose multiple deployments into a single Ray Serve application. This makes it easy to combine multiple machine learning models along with business logic to serve a single request. +We can use parameters like `autoscaling_config`, `num_replicas`, `num_cpus`, and `num_gpus` to independently configure and scale each deployment in the application. For example, let's deploy a machine learning pipeline with two steps: @@ -203,7 +200,7 @@ $ python summary_model.py it was the best of times, it was worst of times . ``` -Here's a Ray Serve deployment graph that chains the two models together. The graph takes English text, summarizes it, and then translates it: +Here's an application that chains the two models together. The graph takes English text, summarizes it, and then translates it: ```{literalinclude} ../serve/doc_code/getting_started/model_graph.py :start-after: __start_graph__ @@ -218,18 +215,18 @@ translation_ref = await self.translator.translate.remote(summary) translation = await translation_ref ``` -`self.translator.translate.remote(summary)` issues an asynchronous call to the `Translator`'s `translate` method. Essentially, this line tells Ray to schedule a request to the `Translator` deployment's `translate` method, which can be fulfilled asynchronously. The line immediately returns a reference to the method's output. The next line `await translation_ref` waits for `translate` to execute and returns the value of that execution. +`self.translator.translate.remote(summary)` issues an asynchronous call to the `Translator`'s `translate` method. The line immediately returns a reference to the method's output, then the next line `await translation_ref` waits for `translate` to execute and returns the value of that execution. -We compose our graph in line 52: +We define the full application as follows: ```python deployment_graph = Summarizer.bind(Translator.bind()) ``` -Here, we bind `Translator` to its (empty) constructor arguments, and then we pass in the bound `Translator` as the constructor argument for the `Summarizer`. We can run this deployment graph using the `serve run` CLI command. Make sure to run this command from a directory containing a local copy of the `graph.py` code: +Here, we bind `Translator` to its (empty) constructor arguments, and then we pass in the bound `Translator` as the constructor argument for the `Summarizer`. We can run this deployment graph using the `serve run` CLI command. Make sure to run this command from a directory containing a local copy of the `serve_quickstart_composed.py` code: ```console -$ serve run graph:deployment_graph +$ serve run serve_quickstart_composed:app ``` We can use this client script to make requests to the graph: @@ -240,15 +237,15 @@ We can use this client script to make requests to the graph: :language: python ``` -While the graph is running, we can open a separate terminal window and run the client script: +While the application is running, we can open a separate terminal window and query it: ```console -$ python graph_client.py +$ python composed_client.py c'était le meilleur des temps, c'était le pire des temps . ``` -Deployment graphs are useful since they let you deploy each part of your machine learning pipeline, such as inference and business logic steps, in separate deployments. Each of these deployments can be individually configured and scaled, ensuring you get maximal performance from your resources. See the guide on [model composition](serve-model-composition) to learn more. +Composed Ray Serve applications let you deploy each part of your machine learning pipeline, such as inference and business logic steps, in separate deployments. Each of these deployments can be individually configured and scaled, ensuring you get maximal performance from your resources. See the guide on [model composition](serve-model-composition) to learn more. ## Next Steps @@ -260,6 +257,4 @@ Deployment graphs are useful since they let you deploy each part of your machine ```{rubric} Footnotes ``` -[^f1]: [Starlette](https://www.starlette.io/) is a web server framework - used by Ray Serve. Its [Request](https://www.starlette.io/requests/) class - provides a nice interface for incoming HTTP requests. +[^f1]: [Starlette](https://www.starlette.io/) is a web server framework used by Ray Serve. diff --git a/doc/source/serve/index.md b/doc/source/serve/index.md index 7d5fe2fa3011..4650ed3abb6b 100644 --- a/doc/source/serve/index.md +++ b/doc/source/serve/index.md @@ -32,8 +32,7 @@ Install Ray Serve and its dependencies: ```bash pip install "ray[serve]" ``` - -In this quick-start example we will define a simple "hello world" deployment, deploy it behind HTTP locally, and query it. +Define a simple "hello world" application, run it locally, and query it over HTTP. ```{literalinclude} doc_code/quickstart.py :language: python @@ -45,16 +44,16 @@ For more examples, select from the tabs. :::{tabbed} Model composition -In this example, we demonstrate how you can use Serve's model composition API to express a complex computation graph and deploy it as a Serve application. +Use Serve's model composition API to combine multiple deployments into a single application. -```{literalinclude} doc_code/quickstart_graph.py +```{literalinclude} doc_code/quickstart_composed.py :language: python ``` ::: :::{tabbed} FastAPI integration -In this example we will use Serve's [FastAPI](https://fastapi.tiangolo.com/) integration to make use of more advanced HTTP functionality. +Use Serve's [FastAPI](https://fastapi.tiangolo.com/) integration to elegantly handle HTTP parsing and validation. ```{literalinclude} doc_code/fastapi_example.py :language: python @@ -65,7 +64,7 @@ In this example we will use Serve's [FastAPI](https://fastapi.tiangolo.com/) int To run this example, install the following: ``pip install transformers`` -In this example we will serve a pre-trained [Hugging Face Transformers](https://huggingface.co/docs/transformers/index) model using Ray Serve. +Serve a pre-trained [Hugging Face Transformers](https://huggingface.co/docs/transformers/index) model using Ray Serve. The model we'll use is a sentiment analysis model: it will take a text string as input and return if the text was "POSITIVE" or "NEGATIVE." ```{literalinclude} doc_code/transformers_example.py @@ -218,7 +217,7 @@ or head over to the {doc}`tutorials/index` to get started building your Ray Serv **Getting Started** ^^^ - Start with our quick start tutorials for :ref:`deploying a single model locally ` and how to :ref:`convert an existing model into a Ray Serve deployment ` . + Start with our quick start tutorials for :ref:`deploying a single model locally ` and how to :ref:`convert an existing model into a Ray Serve deployment ` . +++ .. link-button:: getting-started diff --git a/doc/source/serve/migration.md b/doc/source/serve/migration.md index f19bac4cae7c..1d0501695914 100644 --- a/doc/source/serve/migration.md +++ b/doc/source/serve/migration.md @@ -6,7 +6,7 @@ This section covers what to consider or change in your application when migratin ## What has been changed? -In Ray Serve 2.0, we released a [new deployment API](converting-to-ray-serve-deployment). The 1.x deployment API can still be used, but it will be deprecated in the future version. +In Ray Serve 2.0, we released a [new deployment API](converting-to-ray-serve-application). The 1.x deployment API can still be used, but it will be deprecated in the future version. ## Migrating the 1.x Deployment From a958505862f4fd14c152309ea24434508178266a Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Mon, 24 Apr 2023 15:00:45 -0700 Subject: [PATCH 079/424] [Data] Add GCE variant for shuffle_data_loader (#34632) This PR configures BuildKite to run the shuffle_data_loader Data release tests on GCE. This test was previously excluded in #34105. --------- Signed-off-by: Balaji Veeramani --- .../dataset/dataset_shuffle_data_loader.py | 27 ++++++++++++++----- .../dataset/shuffle_app_config.yaml | 1 - release/release_tests.yaml | 24 ++++++++--------- 3 files changed, 32 insertions(+), 20 deletions(-) diff --git a/release/nightly_tests/dataset/dataset_shuffle_data_loader.py b/release/nightly_tests/dataset/dataset_shuffle_data_loader.py index 5c2aba492c7e..d2c1c1f4d062 100644 --- a/release/nightly_tests/dataset/dataset_shuffle_data_loader.py +++ b/release/nightly_tests/dataset/dataset_shuffle_data_loader.py @@ -5,13 +5,20 @@ import ray +from pyarrow import fs import numpy as np import torch -PATH = [ - f"s3://shuffling-data-loader-benchmarks/data/input_data_{i}.parquet.snappy" - for i in range(0, 25) -] +PATHS = { + "aws": [ + f"s3://shuffling-data-loader-benchmarks/data/input_data_{i}.parquet.snappy" + for i in range(0, 25) + ], + "gcp": [ + f"gcs://shuffling-data-loader-benchmarks/data/input_data_{i}.parquet.snappy" + for i in range(0, 25) + ], +} def create_parser(): @@ -26,6 +33,7 @@ def create_parser(): ) parser.add_argument("--num-workers", type=int, default=4) parser.add_argument("--repeat-times", type=int, default=16) + parser.add_argument("--cloud", type=str, choices=["aws", "gcp"]) return parser @@ -83,9 +91,14 @@ def create_torch_iterator(split, batch_size, rank=None): return torch_iterator -def create_dataset(filenames, repeat_times): +def create_dataset(filenames, repeat_times, cloud): + if cloud == "gcp": + filesystem = fs.GcsFileSystem() + else: + filesystem = None + pipeline = ( - ray.data.read_parquet(list(filenames)) + ray.data.read_parquet(list(filenames), filesystem=filesystem) .repeat(times=repeat_times) .random_shuffle_each_window() ) @@ -100,7 +113,7 @@ def create_dataset(filenames, repeat_times): start = time.time() - pipeline = create_dataset(PATH, args.repeat_times) + pipeline = create_dataset(PATHS[args.cloud], args.repeat_times, args.cloud) splits = pipeline.split(args.num_workers) @ray.remote(num_gpus=1) diff --git a/release/nightly_tests/dataset/shuffle_app_config.yaml b/release/nightly_tests/dataset/shuffle_app_config.yaml index 50791b5bf15b..c0728acd33e8 100644 --- a/release/nightly_tests/dataset/shuffle_app_config.yaml +++ b/release/nightly_tests/dataset/shuffle_app_config.yaml @@ -3,7 +3,6 @@ base_image: {{ env["RAY_IMAGE_ML_NIGHTLY_GPU"] | default("anyscale/ray-ml:nightl python: pip_packages: - boto3 - - pyarrow<7.0.0 conda_packages: [] post_build_cmds: diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 22cf106ae73c..949713a6ccde 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -3371,14 +3371,14 @@ cluster: cluster_env: app_config.yaml cluster_compute: multi_node_checkpointing_compute_config.yaml - + run: timeout: 3600 script: pytest checkpointing_tests/test_learner_group_checkpointing.py wait_for_nodes: num_nodes: 3 - + alert: default @@ -4982,17 +4982,17 @@ run: timeout: 1800 - script: python dataset_shuffle_data_loader.py + script: python dataset_shuffle_data_loader.py --cloud aws - # TODO: Port s3://shuffling-data-loader-benchmarks/ to GCS. - # variations: - # - __suffix__: aws - # - __suffix__: gce - # env: gce - # frequency: manual - # cluster: - # cluster_env: shuffle_app_config.yaml - # cluster_compute: shuffle_compute_gce.yaml + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: shuffle_compute_gce.yaml + run: + script: python dataset_shuffle_data_loader.py --cloud gcp - name: parquet_metadata_resolution group: data-tests From ccfe4b74d987041e69528235eef68789c44ad5f1 Mon Sep 17 00:00:00 2001 From: Chao Wang <125417081+chaowanggg@users.noreply.github.com> Date: Mon, 24 Apr 2023 18:22:30 -0700 Subject: [PATCH 080/424] [UI] [Nodes] Disable actions for dead nodes (#34387) We are going to remove logs for dead nodes and remove Stack Trace and CPU Flame Graph for dead workers. Otherwise, users will click the link and jump to a page without information they need. --- .../src/pages/node/NodeRow.component.test.tsx | 169 ++++++++---------- dashboard/client/src/pages/node/NodeRow.tsx | 9 +- 2 files changed, 87 insertions(+), 91 deletions(-) diff --git a/dashboard/client/src/pages/node/NodeRow.component.test.tsx b/dashboard/client/src/pages/node/NodeRow.component.test.tsx index f5f27ce7029a..2b3aa9d87ca5 100644 --- a/dashboard/client/src/pages/node/NodeRow.component.test.tsx +++ b/dashboard/client/src/pages/node/NodeRow.component.test.tsx @@ -1,50 +1,72 @@ import { render, screen } from "@testing-library/react"; +import { noop } from "lodash"; import React from "react"; import { MemoryRouter } from "react-router-dom"; import { NodeDetail } from "../../type/node"; import { CoreWorkerStats, Worker } from "../../type/worker"; import { NodeRow, WorkerRow } from "./NodeRow"; +const NODE: NodeDetail = { + hostname: "test-hostname", + ip: "192.168.0.1", + cpu: 15, + mem: [100, 95, 5], + state: "ALIVE", + disk: { + "/": { + used: 20000000, + total: 200000000, + free: 180000000, + percent: 10, + }, + "/tmp": { + used: 0, + total: 200, + free: 200, + percent: 0, + }, + }, + networkSpeed: [5, 10], + raylet: { + state: "ALIVE", + nodeId: "1234567890ab", + isHeadNode: true, + numWorkers: 0, + pid: 2345, + startTime: 100, + terminateTime: -1, + brpcPort: 3456, + nodeManagerPort: 5890, + objectStoreAvailableMemory: 40, + objectStoreUsedMemory: 10, + }, + logUrl: "http://192.16.0.1/logs", +} as NodeDetail; + +const WORKER: Worker = { + cmdline: ["echo hi"], + pid: 3456, + cpuPercent: 14, + memoryInfo: { + rss: 75, + vms: 0, + pageins: 0, + pfaults: 0, + }, + coreWorkerStats: [ + { + workerId: "worker-12345", + } as CoreWorkerStats, + ], +} as Worker; + +const DEAD_NODE = { ...NODE, state: "DEAD" }; + describe("NodeRow", () => { it("renders", async () => { - const node: NodeDetail = { - hostname: "test-hostname", - ip: "192.168.0.1", - cpu: 15, - mem: [100, 95, 5], - disk: { - "/": { - used: 20000000, - total: 200000000, - free: 180000000, - percent: 10, - }, - "/tmp": { - used: 0, - total: 200, - free: 200, - percent: 0, - }, - }, - networkSpeed: [5, 10], - raylet: { - state: "ALIVE", - nodeId: "1234567890ab", - isHeadNode: true, - numWorkers: 0, - pid: 2345, - startTime: 100, - terminateTime: -1, - brpcPort: 3456, - nodeManagerPort: 5890, - objectStoreAvailableMemory: 40, - objectStoreUsedMemory: 10, - }, - logUrl: "http://192.16.0.1/logs", - } as NodeDetail; render( { /* purposefully empty */ @@ -77,64 +99,31 @@ describe("NodeRow", () => { expect(screen.getByText(/5.0000B\/s/)).toBeVisible(); expect(screen.getByText(/10.0000B\/s/)).toBeVisible(); }); -}); -describe("WorkerRow", () => { - it("renders", async () => { - const node: NodeDetail = { - hostname: "test-hostname", - ip: "192.168.0.1", - cpu: 15, - mem: [100, 95, 5], - disk: { - "/": { - used: 20000000, - total: 200000000, - free: 180000000, - percent: 10, - }, - "/tmp": { - used: 0, - total: 200, - free: 200, - percent: 0, - }, - }, - networkSpeed: [5, 10], - raylet: { - state: "ALIVE", - nodeId: "1234567890ab", - isHeadNode: true, - numWorkers: 0, - pid: 2345, - startTime: 100, - terminateTime: -1, - brpcPort: 3456, - nodeManagerPort: 5890, - objectStoreAvailableMemory: 40, - objectStoreUsedMemory: 10, + it("Disable actions for Dead node", async () => { + render( + , + { + wrapper: ({ children }) => ( + + + {children} +
    +
    + ), }, - logUrl: "http://192.16.0.1/logs", - } as NodeDetail; + ); + await screen.findByText("test-hostname"); + // Could not access logs for Dead nodes(the log is hidden) + expect(screen.queryByLabelText(/Log/)).not.toBeInTheDocument(); - const worker: Worker = { - cmdline: ["echo hi"], - pid: 3456, - cpuPercent: 14, - memoryInfo: { - rss: 75, - vms: 0, - pageins: 0, - pfaults: 0, - }, - coreWorkerStats: [ - { - workerId: "worker-12345", - } as CoreWorkerStats, - ], - } as Worker; + expect(screen.getByText(/3456/)).toBeVisible(); + }); +}); - render(, { +describe("WorkerRow", () => { + it("renders", async () => { + render(, { wrapper: ({ children }) => ( diff --git a/dashboard/client/src/pages/node/NodeRow.tsx b/dashboard/client/src/pages/node/NodeRow.tsx index a8d9d4a1b811..5456afef45df 100644 --- a/dashboard/client/src/pages/node/NodeRow.tsx +++ b/dashboard/client/src/pages/node/NodeRow.tsx @@ -60,6 +60,11 @@ export const NodeRow = ({ const objectStoreTotalMemory = raylet.objectStoreAvailableMemory + raylet.objectStoreUsedMemory; + /** + * Why do we use raylet.state instead of node.state in the following code? + * Because in ray, raylet == node + */ + return ( @@ -94,7 +99,9 @@ export const NodeRow = ({ - Log + {raylet.state !== "DEAD" && ( + Log + )} From 6823748df5ba9072116bca72fe4eacc115a5a774 Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Mon, 24 Apr 2023 19:16:41 -0700 Subject: [PATCH 081/424] [core][serve][ci] Move runtime env tests to be owned by core team (#34728) --- python/ray/tests/BUILD | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/python/ray/tests/BUILD b/python/ray/tests/BUILD index 26623a66b6c0..34c239ceb9c7 100644 --- a/python/ray/tests/BUILD +++ b/python/ray/tests/BUILD @@ -355,7 +355,7 @@ py_test_module_list( "test_runtime_env_working_dir_remote_uri.py" ], size = "large", - tags = ["exclusive", "large_size_python_tests_shard_2", "team:serve"], + tags = ["exclusive", "large_size_python_tests_shard_2", "team:core"], deps = ["//:ray_lib", ":conftest"], data = ["pip_install_test-0.5-py3-none-any.whl"], ) @@ -369,7 +369,7 @@ py_test_module_list( "test_runtime_env_conda_and_pip_5.py", ], size = "large", - tags = ["exclusive", "post_wheel_build", "team:serve"], + tags = ["exclusive", "post_wheel_build", "team:core"], deps = ["//:ray_lib", ":conftest"], ) @@ -377,7 +377,7 @@ py_test( name = "test_runtime_env_complicated", size = "large", srcs = ["test_runtime_env_complicated.py"], - tags = ["exclusive", "post_wheel_build", "team:serve"], + tags = ["exclusive", "post_wheel_build", "team:core"], deps = ["//:ray_lib", ":conftest"], data = ["//python/ray/experimental/packaging/example_pkg"], ) @@ -386,7 +386,7 @@ py_test( name = "test_actor_group", size = "medium", srcs = ["test_actor_group.py"], - tags = ["exclusive", "medium_size_python_tests_a_to_j", "team:serve"], + tags = ["exclusive", "medium_size_python_tests_a_to_j", "team:core"], deps = ["//:ray_lib", ":conftest"] ) @@ -452,7 +452,7 @@ py_test( "test_runtime_env_validation_1_schema.json", "test_runtime_env_validation_2_schema.json", ], - tags = ["exclusive", "small_size_python_tests", "team:serve"], + tags = ["exclusive", "small_size_python_tests", "team:core"], deps = ["//:ray_lib", ":conftest"], ) @@ -460,7 +460,7 @@ py_test( name = "test_runtime_env_ray_minimal", size = "medium", srcs = ["test_runtime_env_ray_minimal.py"], - tags = ["exclusive", "medium_size_python_tests_k_to_z", "team:serve"], + tags = ["exclusive", "medium_size_python_tests_k_to_z", "team:core"], deps = ["//:ray_lib", ":conftest"], ) From 7e15012bf23ee4f7fc5de2d28f0961bef5e3024e Mon Sep 17 00:00:00 2001 From: Amog Kamsetty Date: Mon, 24 Apr 2023 19:23:53 -0700 Subject: [PATCH 082/424] [Data] Require `batch_size` for GPU `map_batches` in strict mode (#34588) In strict mode, require a batch_size for map_batches when requesting GPUs. This makes batching more explicit to users. Reduce the CPU default batch size in strict mode from 4096 to 1024. --------- Signed-off-by: amogkam --- python/ray/data/block.py | 29 ++++++++++++++++++++-- python/ray/data/context.py | 3 +++ python/ray/data/datastream.py | 10 +++++--- python/ray/data/tests/conftest.py | 8 ++++++ python/ray/data/tests/test_strict_mode.py | 30 +++++++++++++---------- 5 files changed, 61 insertions(+), 19 deletions(-) diff --git a/python/ray/data/block.py b/python/ray/data/block.py index e213ad33887b..e47c4a8f241d 100644 --- a/python/ray/data/block.py +++ b/python/ray/data/block.py @@ -33,9 +33,9 @@ resource = None if sys.version_info >= (3, 8): - from typing import Protocol + from typing import Literal, Protocol else: - from typing_extensions import Protocol + from typing_extensions import Literal, Protocol if TYPE_CHECKING: import pandas @@ -173,6 +173,31 @@ def _apply_strict_mode_batch_format(given_batch_format: Optional[str]) -> str: return given_batch_format +def _apply_strict_mode_batch_size( + given_batch_size: Optional[Union[int, Literal["default"]]], use_gpu: bool +) -> Optional[int]: + ctx = ray.data.DatasetContext.get_current() + if ctx.strict_mode: + if use_gpu and (not given_batch_size or given_batch_size == "default"): + raise StrictModeError( + "`batch_size` must be provided to `map_batches` when requesting GPUs. " + "The optimal batch size depends on the model, data, and GPU used. " + "It is recommended to use the largest batch size that doesn't result " + "in your GPU device running out of memory. You can view the GPU memory " + "usage via the Ray dashboard." + ) + elif given_batch_size == "default": + return ray.data.context.STRICT_MODE_DEFAULT_BATCH_SIZE + else: + return given_batch_size + + else: + if given_batch_size == "default": + return ray.data.context.DEFAULT_BATCH_SIZE + else: + return given_batch_size + + @DeveloperAPI class BlockExecStats: """Execution stats for this block. diff --git a/python/ray/data/context.py b/python/ray/data/context.py index 15b92dd0d5ab..50384d9e7633 100644 --- a/python/ray/data/context.py +++ b/python/ray/data/context.py @@ -121,6 +121,9 @@ # Default batch size for batch transformations. DEFAULT_BATCH_SIZE = 4096 +# Default batch size for batch transformations in strict mode. +STRICT_MODE_DEFAULT_BATCH_SIZE = 1024 + # Whether to enable progress bars. DEFAULT_ENABLE_PROGRESS_BARS = not bool( env_integer("RAY_DATA_DISABLE_PROGRESS_BARS", 0) diff --git a/python/ray/data/datastream.py b/python/ray/data/datastream.py index 3240b29e3f1a..7332c38231ef 100644 --- a/python/ray/data/datastream.py +++ b/python/ray/data/datastream.py @@ -91,6 +91,7 @@ from ray.data.block import ( VALID_BATCH_FORMATS, _apply_strict_mode_batch_format, + _apply_strict_mode_batch_size, BatchUDF, Block, BlockAccessor, @@ -110,7 +111,6 @@ WARN_PREFIX, OK_PREFIX, ESTIMATED_SAFE_MEMORY_FRACTION, - DEFAULT_BATCH_SIZE, ) from ray.data.datasource import ( BlockWritePathProvider, @@ -597,14 +597,16 @@ def map_batches( logger.warning("The 'native' batch format has been renamed 'default'.") target_block_size = None - if batch_size == "default": - batch_size = DEFAULT_BATCH_SIZE - elif batch_size is not None: + if batch_size is not None and batch_size != "default": if batch_size < 1: raise ValueError("Batch size cannot be negative or 0") # Enable blocks bundling when batch_size is specified by caller. target_block_size = batch_size + batch_size = _apply_strict_mode_batch_size( + batch_size, use_gpu="num_gpus" in ray_remote_args + ) + if batch_format not in VALID_BATCH_FORMATS: raise ValueError( f"The batch format must be one of {VALID_BATCH_FORMATS}, got: " diff --git a/python/ray/data/tests/conftest.py b/python/ray/data/tests/conftest.py index d5bb43a57b6b..9ddc28d0d731 100644 --- a/python/ray/data/tests/conftest.py +++ b/python/ray/data/tests/conftest.py @@ -36,6 +36,14 @@ def ray_start_10_cpus_shared(request): yield res +@pytest.fixture(scope="module") +def enable_strict_mode(): + ctx = ray.data.DataContext.get_current() + ctx.strict_mode = True + yield + ctx.strict_mode = False + + @pytest.fixture(scope="function") def aws_credentials(): import os diff --git a/python/ray/data/tests/test_strict_mode.py b/python/ray/data/tests/test_strict_mode.py index 100097c91f6b..19eb1853f5eb 100644 --- a/python/ray/data/tests/test_strict_mode.py +++ b/python/ray/data/tests/test_strict_mode.py @@ -7,12 +7,8 @@ from ray.data.tests.conftest import * # noqa from ray.tests.conftest import * # noqa -# Force strict mode. -ctx = ray.data.DataContext.get_current() -ctx.strict_mode = True - -def test_strict_read_schemas(ray_start_regular_shared): +def test_strict_read_schemas(ray_start_regular_shared, enable_strict_mode): ds = ray.data.range(1) assert ds.take()[0] == {"id": 0} @@ -47,7 +43,7 @@ def test_strict_read_schemas(ray_start_regular_shared): assert "text" in ds.take()[0] -def test_strict_map_output(ray_start_regular_shared): +def test_strict_map_output(ray_start_regular_shared, enable_strict_mode): ds = ray.data.range(1) with pytest.raises(StrictModeError): @@ -84,7 +80,7 @@ def test_strict_map_output(ray_start_regular_shared): ds.map(lambda x: UserDict({"x": object()})).materialize() -def test_strict_default_batch_format(ray_start_regular_shared): +def test_strict_default_batch_format(ray_start_regular_shared, enable_strict_mode): ds = ray.data.range(1) @ray.remote @@ -111,7 +107,7 @@ def f(x): assert isinstance(batch["id"], np.ndarray), batch -def test_strict_tensor_support(ray_start_regular_shared): +def test_strict_tensor_support(ray_start_regular_shared, enable_strict_mode): ds = ray.data.from_items([np.ones(10), np.ones(10)]) assert np.array_equal(ds.take()[0]["item"], np.ones(10)) @@ -122,7 +118,7 @@ def test_strict_tensor_support(ray_start_regular_shared): assert np.array_equal(ds.take()[0]["item"], 4 * np.ones(10)) -def test_strict_value_repr(ray_start_regular_shared): +def test_strict_value_repr(ray_start_regular_shared, enable_strict_mode): ds = ray.data.from_items([{"__value__": np.ones(10)}]) ds = ds.map_batches(lambda x: {"__value__": x["__value__"] * 2}) @@ -131,12 +127,12 @@ def test_strict_value_repr(ray_start_regular_shared): assert np.array_equal(ds.take_batch()["x"][0], 4 * np.ones(10)) -def test_strict_object_support(ray_start_regular_shared): +def test_strict_object_support(ray_start_regular_shared, enable_strict_mode): ds = ray.data.from_items([{"x": 2}, {"x": object()}]) ds.map_batches(lambda x: x, batch_format="numpy").materialize() -def test_strict_compute(ray_start_regular_shared): +def test_strict_compute(ray_start_regular_shared, enable_strict_mode): with pytest.raises(StrictModeError): ray.data.range(10).map(lambda x: x, compute="actors").show() with pytest.raises(StrictModeError): @@ -147,7 +143,7 @@ def test_strict_compute(ray_start_regular_shared): ray.data.range(10).map(lambda x: x, compute="tasks").show() -def test_strict_schema(ray_start_regular_shared): +def test_strict_schema(ray_start_regular_shared, enable_strict_mode): import pyarrow from ray.data._internal.pandas_block import PandasBlockSchema @@ -182,7 +178,7 @@ def test_strict_schema(ray_start_regular_shared): assert isinstance(schema.base_schema, PandasBlockSchema) -def test_use_raw_dicts(ray_start_regular_shared): +def test_use_raw_dicts(ray_start_regular_shared, enable_strict_mode): assert type(ray.data.range(10).take(1)[0]) is dict assert type(ray.data.from_items([1]).take(1)[0]) is dict @@ -193,6 +189,14 @@ def checker(x): ray.data.range(10).map(checker).show() +def test_strict_require_batch_size_for_gpu(enable_strict_mode): + ray.shutdown() + ray.init(num_cpus=4, num_gpus=1) + ds = ray.data.range(1) + with pytest.raises(StrictModeError): + ds.map_batches(lambda x: x, num_gpus=1) + + if __name__ == "__main__": import sys From f68533d9d4503f2d39cddf296b0a713d7c942ccf Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Tue, 25 Apr 2023 09:29:36 +0100 Subject: [PATCH 083/424] [docker] Add script to generate commands to fix docker images (#34362) We sometimes have to manually fix docker images, e.g. by updating a public key, or by updating a dependency. This PR adds a script that generates the commands needed to fix docker images manually. I've added the command to the build-docker-images.py script, so we can use the original tagging function (see #33804). Signed-off-by: Kai Fricke --- ci/build/build-docker-images.py | 83 ++++++++++++++++++++++++++++++--- 1 file changed, 77 insertions(+), 6 deletions(-) diff --git a/ci/build/build-docker-images.py b/ci/build/build-docker-images.py index b12fb87745a3..ead5f67c39bf 100644 --- a/ci/build/build-docker-images.py +++ b/ci/build/build-docker-images.py @@ -21,11 +21,15 @@ PYTHON_WHL_VERSION = "cp3" ADDITIONAL_PLATFORMS = ["aarch64"] +DOCKER_HUB_REPO = "rayproject" + DOCKER_HUB_DESCRIPTION = { "base-deps": ( - "Internal Image, refer to " "https://hub.docker.com/r/rayproject/ray" + f"Internal Image, refer to https://hub.docker.com/r/{DOCKER_HUB_REPO}/ray" + ), + "ray-deps": ( + f"Internal Image, refer to https://hub.docker.com/r/{DOCKER_HUB_REPO}/ray" ), - "ray-deps": ("Internal Image, refer to " "https://hub.docker.com/r/rayproject/ray"), "ray": "Official Docker Images for Ray, the distributed computing API.", "ray-ml": "Developer ready Docker Image for Ray.", "ray-worker-container": "Internal Image for CI test", @@ -233,7 +237,7 @@ def _build_docker_image( # can be found. build_args["FIND_LINKS_PATH"] = ".whl" - tagged_name = f"rayproject/{image_name}:nightly-{py_version}-{device_tag}" + tagged_name = f"{DOCKER_HUB_REPO}/{image_name}:nightly-{py_version}-{device_tag}" tagged_name = _with_suffix(tagged_name, suffix=suffix) @@ -355,7 +359,7 @@ def build_or_pull_base_images( suffix: Optional[str] = None, ) -> bool: """Returns images to tag and build.""" - repositories = ["rayproject/base-deps", "rayproject/ray-deps"] + repositories = [f"{DOCKER_HUB_REPO}/base-deps", f"{DOCKER_HUB_REPO}/ray-deps"] tags = [ f"nightly-{py_version}-{image_type}" for py_version, image_type in itertools.product(py_versions, image_types) @@ -672,7 +676,7 @@ def push_readmes(merge_build: bool): "PUSHRM_DEBUG": 1, "PUSHRM_SHORT": tag_line, } - cmd_string = f"rayproject/{image}" + cmd_string = f"{DOCKER_HUB_REPO}/{image}" print( DOCKER_CLIENT.containers.run( @@ -879,5 +883,72 @@ def main( # push_readmes(build_type is MERGE) +def fix_docker_images( + image: str = "ray-ml", + version: str = "nightly", + repo: str = DOCKER_HUB_REPO, +): + """Print commands to manually update docker images post-release. + + This function prints commands that can be run to add new layers to + fix docker images post-release, e.g. when dependencies have to be fixed + or public keys expired. + + The commands can be copied/pasted and executed in a shell. + + Example: + FIX_IMAGE=ray-ml FIX_VERSION=2.3.0 python build-docker-images.py + + """ + tags = create_image_tags( + image_name=image, + py_versions=list(PY_MATRIX.keys()), + image_types=list(BASE_IMAGES.keys()), + specific_tag=None, # Set to `latest` for latest image fixes + version=version, + suffix=None, + ) + print(dict(tags)) + + # Pull images we want to rebuild + for base_tag in tags: + base_image = f"{repo}/{image}:{base_tag}" + + print(f"docker pull {base_image}") + + # Re-tag these base images as e.g. pinned/ray-ml:tag + # This is so we can re-run the build command safely. + pinned_base_image = {} + for base_tag in tags: + base_image = f"{repo}/{image}:{base_tag}" + pinned_image = f"pinned/{image}:{base_tag}" + + pinned_base_image[base_image] = pinned_image + + print(f"docker tag {base_image} {pinned_image}") + + # Create commands to build the new layer for the base images. + for base_tag in tags: + base_image = f"{repo}/{image}:{base_tag}" + pinned_image = pinned_base_image[base_image] + + print(f"docker build --build-arg BASE_IMAGE={pinned_image} -t {base_image} .") + for subtag in tags[base_tag]: + if subtag == base_tag: + continue + + # This will overwrite the rayproject/ray-ml:tag image + # - but we still have the pinned/ image if we want to re-run! + target_image = f"{repo}/{image}:{subtag}" + print(f"docker tag {base_image} {target_image}") + + # Lastly, push new layers + print(f"docker push --all-tags {repo}/{image}") + + if __name__ == "__main__": - main() + fix_image = os.environ.get("FIX_IMAGE") + if not fix_image: + main() + else: + fix_docker_images(fix_image, os.environ.get("FIX_VERSION")) From 55d5009135eafc764072a2a90eb2a058a7d03cc6 Mon Sep 17 00:00:00 2001 From: Artur Niederfahrenhorst Date: Tue, 25 Apr 2023 12:15:50 +0200 Subject: [PATCH 084/424] [RLlib] Disable RL Modules for policy server examples. (#34483) --- rllib/algorithms/algorithm_config.py | 73 ++++++++++++------- .../algorithms/tests/test_algorithm_config.py | 5 +- rllib/examples/serving/cartpole_server.py | 4 + rllib/examples/serving/unity3d_server.py | 5 ++ rllib/policy/tests/test_policy.py | 10 ++- rllib/tests/test_rllib_train_and_evaluate.py | 18 ++++- .../exploration/tests/test_explorations.py | 3 + 7 files changed, 85 insertions(+), 33 deletions(-) diff --git a/rllib/algorithms/algorithm_config.py b/rllib/algorithms/algorithm_config.py index 249781cda12e..b2b3e8467453 100644 --- a/rllib/algorithms/algorithm_config.py +++ b/rllib/algorithms/algorithm_config.py @@ -419,8 +419,9 @@ def __init__(self, algo_class=None): # `self.rl_module()` self.rl_module_spec = None self._enable_rl_module_api = False - # Whether to error out if exploration config is set when using RLModules. - self._validate_exploration_conf_and_rl_modules = True + # Helper to keep track of the original exploration config when dis-/enabling + # rl modules. + self.__prior_exploration_config = None # `self.experimental()` self._tf_policy_handles_more_than_one_loss = False @@ -568,6 +569,13 @@ def update_from_dict( """ eval_call = {} + # We deal with this special key before all others because it may influence + # stuff like "exploration_config". + # Namely, we want to re-instantiate the exploration config this config had + # inside `self.rl_module()` before potentially overwriting it in the following. + if "_enable_rl_module_api" in config_dict: + self.rl_module(_enable_rl_module_api=config_dict["_enable_rl_module_api"]) + # Modify our properties one by one. for key, value in config_dict.items(): key = self._translate_special_keys(key, warn_deprecated=False) @@ -577,8 +585,11 @@ def update_from_dict( if key == TRIAL_INFO: continue + if key == "_enable_rl_module_api": + # We've dealt with this above. + continue # Set our multi-agent settings. - if key == "multiagent": + elif key == "multiagent": kwargs = { k: value[k] for k in [ @@ -863,13 +874,13 @@ def validate(self) -> None: self.enable_connectors = True # Explore parameter cannot be False with RLModule API enabled. - # The reason is that the explore is not just a parameter that will get passed + # The reason is that `explore` is not just a parameter that will get passed # down to the policy.compute_actions() anymore. It is a phase in which RLModule. - # forward_exploration() will get called during smapling. If user needs to + # forward_exploration() will get called during sampling. If user needs to # really disable the stochasticity during this phase, they need to override the # RLModule.forward_exploration() method or setup model parameters such that it - # will disable the stocalisticity of this method (e.g. by setting the std to 0 - # or setting temprature to 0 for the Categorical distribution). + # will disable the stochasticity of this method (e.g. by setting the std to 0 + # or setting temperature to 0 for the Categorical distribution). if self._enable_rl_module_api and not self.explore: raise ValueError( @@ -1002,25 +1013,16 @@ def validate(self) -> None: self.rl_module_spec = default_rl_module_spec if self.exploration_config: - if self._validate_exploration_conf_and_rl_modules: - # This is not compatible with RLModules, which have a method - # `forward_exploration` to specify custom exploration behavior. - raise ValueError( - "When RLModule API are enabled, exploration_config can not be " - "set. If you want to implement custom exploration behaviour, " - "please modify the `forward_exploration` method of the " - "RLModule at hand. On configs that have a default exploration " - "config, this must be done with " - "`config.exploration_config={}`." - ) - else: - # RLModules don't support exploration_configs anymore. - # AlgorithmConfig has a default exploration config. - logger.warning( - "When RLModule API are enabled, exploration_config " - "will be ignored. Disable RLModule API make use of an " - "exploration_config." - ) + # This is not compatible with RLModules, which have a method + # `forward_exploration` to specify custom exploration behavior. + raise ValueError( + "When RLModule API are enabled, exploration_config can not be " + "set. If you want to implement custom exploration behaviour, " + "please modify the `forward_exploration` method of the " + "RLModule at hand. On configs that have a default exploration " + "config, this must be done with " + "`config.exploration_config={}`." + ) # make sure the resource requirements for learner_group is valid if self.num_learner_workers == 0 and self.num_gpus_per_worker > 1: @@ -2420,7 +2422,26 @@ def rl_module( "config, this must be done with " "`config.exploration_config={}`." ) + self.__prior_exploration_config = self.exploration_config self.exploration_config = {} + elif _enable_rl_module_api is False and not self.exploration_config: + if self.__prior_exploration_config is not None: + logger.warning( + f"Setting `exploration_config=" + f"{self.__prior_exploration_config}` because you set " + f"`_enable_rl_modules=False`. This exploration config was " + f"restored from a prior exploration config that was overriden " + f"when setting `_enable_rl_modules=True`. This occurs because " + f"when RLModule API are enabled, exploration_config can not " + f"be set." + ) + self.exploration_config = self.__prior_exploration_config + self.__prior_exploration_config = None + else: + logger.warning( + "config._enable_rl_module_api was set to False, but no prior " + "exploration config was found to be restored." + ) else: # throw a warning if the user has used this API but not enabled it. logger.warning( diff --git a/rllib/algorithms/tests/test_algorithm_config.py b/rllib/algorithms/tests/test_algorithm_config.py index dbb873a4f641..cc949c45f0e5 100644 --- a/rllib/algorithms/tests/test_algorithm_config.py +++ b/rllib/algorithms/tests/test_algorithm_config.py @@ -4,7 +4,8 @@ import ray from ray.rllib.algorithms.algorithm_config import AlgorithmConfig from ray.rllib.algorithms.callbacks import make_multi_callbacks -from ray.rllib.algorithms.ppo import PPO, PPOConfig +from ray.rllib.algorithms.ppo import PPOConfig +from ray.rllib.algorithms.ppo import PPO from ray.rllib.algorithms.ppo.torch.ppo_torch_rl_module import PPOTorchRLModule from ray.rllib.core.rl_module.rl_module import SingleAgentRLModuleSpec from ray.rllib.core.rl_module.marl_module import ( @@ -16,7 +17,7 @@ class TestAlgorithmConfig(unittest.TestCase): @classmethod def setUpClass(cls): - ray.init(num_cpus=6) + ray.init(num_cpus=6, local_mode=True) @classmethod def tearDownClass(cls): diff --git a/rllib/examples/serving/cartpole_server.py b/rllib/examples/serving/cartpole_server.py index d157ef8030e6..d800ab837f2e 100755 --- a/rllib/examples/serving/cartpole_server.py +++ b/rllib/examples/serving/cartpole_server.py @@ -180,6 +180,10 @@ def _input(ioctx): # Set to INFO so we'll see the server's actual address:port. .debugging(log_level="INFO") ) + # Disable RLModules because they need connectors + # TODO(Artur): Deprecate ExternalEnv and reenable connectors and RL Modules here + config.rl_module(_enable_rl_module_api=False) + config.training(_enable_learner_api=False) # DQN. if args.run == "DQN" or args.run == "APEX" or args.run == "R2D2": diff --git a/rllib/examples/serving/unity3d_server.py b/rllib/examples/serving/unity3d_server.py index 700ca6759390..b04a2397f524 100755 --- a/rllib/examples/serving/unity3d_server.py +++ b/rllib/examples/serving/unity3d_server.py @@ -151,6 +151,11 @@ def _input(ioctx): .evaluation(off_policy_estimation_methods={}) ) + # Disable RLModules because they need connectors + # TODO(Artur): Deprecate ExternalEnv and reenable connectors and RL Modules here + config.rl_module(_enable_rl_module_api=False) + config._enable_learner_api = False + # Create the Trainer used for Policy serving. algo = config.build() diff --git a/rllib/policy/tests/test_policy.py b/rllib/policy/tests/test_policy.py index 66db2f8b7a10..77e16e13bc4f 100644 --- a/rllib/policy/tests/test_policy.py +++ b/rllib/policy/tests/test_policy.py @@ -32,7 +32,10 @@ def test_policy_get_and_set_state(self): policy.set_state(state1) state3 = policy.get_state() # Make sure everything is the same. - check(state1["_exploration_state"], state3["_exploration_state"]) + # This is only supported without RLModule API. See AlgorithmConfig for + # more info. + if not config._enable_rl_module_api: + check(state1["_exploration_state"], state3["_exploration_state"]) check(state1["global_timestep"], state3["global_timestep"]) check(state1["weights"], state3["weights"]) @@ -42,7 +45,10 @@ def test_policy_get_and_set_state(self): if isinstance(policy, (EagerTFPolicyV2, DynamicTFPolicyV2, TorchPolicyV2)): policy_restored_from_scratch = Policy.from_state(state3) state4 = policy_restored_from_scratch.get_state() - check(state3["_exploration_state"], state4["_exploration_state"]) + # This is only supported without RLModule API. See AlgorithmConfig for + # more info. + if not config._enable_rl_module_api: + check(state3["_exploration_state"], state4["_exploration_state"]) check(state3["global_timestep"], state4["global_timestep"]) # For tf static graph, the new model has different layer names # (as it gets written into the same graph as the old one). diff --git a/rllib/tests/test_rllib_train_and_evaluate.py b/rllib/tests/test_rllib_train_and_evaluate.py index 172c86b1da96..0d540abb6a1d 100644 --- a/rllib/tests/test_rllib_train_and_evaluate.py +++ b/rllib/tests/test_rllib_train_and_evaluate.py @@ -96,12 +96,23 @@ def learn_test_plus_evaluate(algo: str, env="CartPole-v1"): print("Saving results to {}".format(tmp_dir)) rllib_dir = str(Path(__file__).parent.parent.absolute()) + + # This is only supported without RLModule API. See AlgorithmConfig for + # more info. We need to prefetch the default config that will be used when we + # call rllib train here to see if the RLModule API is enabled. + algo_cls = get_trainable_cls(algo) + config = algo_cls.get_default_config() + if config._enable_rl_module_api: + eval_ = ', \\"evaluation_config\\": {}' + else: + eval_ = ', \\"evaluation_config\\": {\\"explore\\": false}' + print("RLlib dir = {}\nexists={}".format(rllib_dir, os.path.exists(rllib_dir))) os.system( "python {}/train.py --local-dir={} --run={} " "--checkpoint-freq=1 --checkpoint-at-end ".format(rllib_dir, tmp_dir, algo) - + '--config="{\\"num_gpus\\": 0, \\"num_workers\\": 1, ' - '\\"evaluation_config\\": {\\"explore\\": false}' + + '--config="{\\"num_gpus\\": 0, \\"num_workers\\": 1' + + eval_ + fw_ + '}" ' + '--stop="{\\"episode_reward_mean\\": 100.0}"' @@ -182,7 +193,8 @@ def policy_fn(agent_id, episode, **kwargs): policy_mapping_fn=policy_fn, ) .resources(num_gpus=0) - .evaluation(evaluation_config=AlgorithmConfig.overrides(explore=False)) + .evaluation(evaluation_config=AlgorithmConfig.overrides(explore=True)) + .evaluation(evaluation_config=AlgorithmConfig.overrides(explore=True)) .rl_module( rl_module_spec=MultiAgentRLModuleSpec( module_specs={ diff --git a/rllib/utils/exploration/tests/test_explorations.py b/rllib/utils/exploration/tests/test_explorations.py index 4e969347c58e..fc5830adf051 100644 --- a/rllib/utils/exploration/tests/test_explorations.py +++ b/rllib/utils/exploration/tests/test_explorations.py @@ -28,6 +28,9 @@ def do_test_explorations(config, dummy_obs, prev_a=None, expected_mean_action=No for exploration in [None, "Random"]: local_config = config.copy() if exploration == "Random": + if local_config._enable_rl_module_api: + # TODO(Artur): Support Random exploration with RL Modules. + continue local_config.exploration(exploration_config={"type": "Random"}) print("exploration={}".format(exploration or "default")) From c31e1c6ed828bad6bce2ba93afa3a2cbb8e426d2 Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Tue, 25 Apr 2023 13:08:01 +0100 Subject: [PATCH 085/424] [air] Use filesystem wrapper to exclude files from upload (#34102) Ray Tune uploads experiment state using pyarrow. When cloud checkpointing is configured, the driver will exclude any trial-level checkpoints. Pyarrow does not natively support file exclusion, though - instead, we repeatedly call `pyarrow.fs.copy_files` on single non-excluded files. This seems to be inefficient as the connection to the remote filesystem is opened and closed repeatedly. It also means we can never leverage multi-threaded upload. This PR implements a custom fsspec-based local filesystem that excludes files on the selector level. Thus, we can call pyarrow.fs.copy_files exactly once, with a selector that does not see the excluded files. Edit: [See here for benchmark results](https://github.com/ray-project/ray/pull/34102#issuecomment-1517846074) Signed-off-by: Kai Fricke --- doc/source/tune/api/env.rst | 2 + python/ray/air/_internal/remote_storage.py | 71 ++++++++++++++++++- python/ray/air/tests/test_remote_storage.py | 40 +++++++++++ python/ray/tune/execution/experiment_state.py | 36 +++++++++- 4 files changed, 144 insertions(+), 5 deletions(-) diff --git a/doc/source/tune/api/env.rst b/doc/source/tune/api/env.rst index 8ff39bd73bae..df8b9580f5ae 100644 --- a/doc/source/tune/api/env.rst +++ b/doc/source/tune/api/env.rst @@ -89,6 +89,8 @@ These are the environment variables Ray Tune currently considers: repeatedly every this amount of seconds. Defaults to 60 (seconds). * **TUNE_WARN_EXCESSIVE_EXPERIMENT_CHECKPOINT_SYNC_THRESHOLD_S**: Threshold for throwing a warning if the experiment state is synced multiple times in that many seconds. Defaults to 30 (seconds). +* **TUNE_WARN_SLOW_EXPERIMENT_CHECKPOINT_SYNC_THRESHOLD_S**: Threshold for throwing a warning if the experiment state syncing + takes longer than this time in seconds. Defaults to 30 (seconds). * **TUNE_STATE_REFRESH_PERIOD**: Frequency of updating the resource tracking from Ray. Defaults to 10 (seconds). * **TUNE_RESTORE_RETRY_NUM**: The number of retries that are done before a particular trial's restore is determined unsuccessful. After that, the trial is not restored to its previous checkpoint but rather from scratch. diff --git a/python/ray/air/_internal/remote_storage.py b/python/ray/air/_internal/remote_storage.py index d76f0012ac39..684b8015fbc5 100644 --- a/python/ray/air/_internal/remote_storage.py +++ b/python/ray/air/_internal/remote_storage.py @@ -12,9 +12,11 @@ try: import fsspec + from fsspec.implementations.local import LocalFileSystem except ImportError: fsspec = None + LocalFileSystem = object try: import pyarrow @@ -40,6 +42,52 @@ def create_dir(self, path, recursive): from ray import logger +class _ExcludingLocalFilesystem(LocalFileSystem): + """LocalFileSystem wrapper to exclude files according to patterns. + + Args: + exclude: List of patterns that are applied to files returned by + ``self.find()``. If a file path matches this pattern, it will + be excluded. + + """ + + def __init__(self, exclude: List[str], **kwargs): + super().__init__(**kwargs) + self._exclude = exclude + + @property + def fsid(self): + return "_excluding_local" + + def _should_exclude(self, name: str) -> bool: + """Return True if `name` matches any of the `self._exclude` patterns.""" + alt = None + if os.path.isdir(name): + # If this is a directory, also test it with trailing slash + alt = os.path.join(name, "") + for excl in self._exclude: + if fnmatch.fnmatch(name, excl): + return True + if alt and fnmatch.fnmatch(alt, excl): + return True + return False + + def find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs): + """Call parent find() and exclude from result.""" + names = super().find( + path, maxdepth=maxdepth, withdirs=withdirs, detail=detail, **kwargs + ) + if detail: + return { + name: out + for name, out in names.items() + if not self._should_exclude(name) + } + else: + return [name for name in names if not self._should_exclude(name)] + + def _pyarrow_fs_copy_files( source, destination, source_filesystem=None, destination_filesystem=None, **kwargs ): @@ -334,14 +382,33 @@ def upload_to_uri( if not exclude: _ensure_directory(bucket_path, fs=fs) _pyarrow_fs_copy_files(local_path, bucket_path, destination_filesystem=fs) + elif fsspec: + # If fsspec is available, prefer it because it's more efficient than + # calling pyarrow.fs.copy_files multiple times + _upload_to_uri_with_exclude_fsspec( + local_path=local_path, fs=fs, bucket_path=bucket_path, exclude=exclude + ) else: # Walk the filetree and upload - _upload_to_uri_with_exclude( + _upload_to_uri_with_exclude_pyarrow( local_path=local_path, fs=fs, bucket_path=bucket_path, exclude=exclude ) -def _upload_to_uri_with_exclude( +def _upload_to_uri_with_exclude_fsspec( + local_path: str, fs: "pyarrow.fs", bucket_path: str, exclude: Optional[List[str]] +) -> None: + local_fs = _ExcludingLocalFilesystem(exclude=exclude) + handler = pyarrow.fs.FSSpecHandler(local_fs) + source_fs = pyarrow.fs.PyFileSystem(handler) + + _ensure_directory(bucket_path, fs=fs) + _pyarrow_fs_copy_files( + local_path, bucket_path, source_filesystem=source_fs, destination_filesystem=fs + ) + + +def _upload_to_uri_with_exclude_pyarrow( local_path: str, fs: "pyarrow.fs", bucket_path: str, exclude: Optional[List[str]] ) -> None: def _should_exclude(candidate: str) -> bool: diff --git a/python/ray/air/tests/test_remote_storage.py b/python/ray/air/tests/test_remote_storage.py index cc3c98209a6f..32ed694045f9 100644 --- a/python/ray/air/tests/test_remote_storage.py +++ b/python/ray/air/tests/test_remote_storage.py @@ -131,6 +131,46 @@ def test_upload_exclude_multimatch(temp_data_dirs): assert_file(False, tmp_target, "subdir_exclude/something/somewhere.txt") +@pytest.mark.parametrize("no_fsspec", [False, True]) +def test_upload_local_exclude_multi(temp_data_dirs, no_fsspec): + if no_fsspec: + with patch("ray.air._internal.remote_storage.fsspec", None): + return test_upload_local_exclude_multi(temp_data_dirs, no_fsspec=False) + + tmp_source, tmp_target = temp_data_dirs + + upload_to_uri(tmp_source, tmp_target, exclude=["*_exclude.txt", "*_exclude/*"]) + + assert_file(True, tmp_target, "level0.txt") + assert_file(False, tmp_target, "level0_exclude.txt") + assert_file(True, tmp_target, "subdir/level1.txt") + assert_file(False, tmp_target, "subdir/level1_exclude.txt") + assert_file(True, tmp_target, "subdir/nested/level2.txt") + assert_file(False, tmp_target, "subdir_nested_level2_exclude.txt") + assert_file(False, tmp_target, "subdir_exclude") + assert_file(False, tmp_target, "subdir_exclude/something/somewhere.txt") + + +@pytest.mark.parametrize("no_fsspec", [False, True]) +def test_upload_local_exclude_multimatch(temp_data_dirs, no_fsspec): + if no_fsspec: + with patch("ray.air._internal.remote_storage.fsspec", None): + return test_upload_local_exclude_multimatch(temp_data_dirs, no_fsspec=False) + + tmp_source, tmp_target = temp_data_dirs + + upload_to_uri(tmp_source, tmp_target, exclude=["*_exclude*"]) + + assert_file(True, tmp_target, "level0.txt") + assert_file(False, tmp_target, "level0_exclude.txt") + assert_file(True, tmp_target, "subdir/level1.txt") + assert_file(False, tmp_target, "subdir/level1_exclude.txt") + assert_file(True, tmp_target, "subdir/nested/level2.txt") + assert_file(False, tmp_target, "subdir_nested_level2_exclude.txt") + assert_file(False, tmp_target, "subdir_exclude") + assert_file(False, tmp_target, "subdir_exclude/something/somewhere.txt") + + def test_get_recursive_files_race_con(temp_data_dirs): tmp_source, _ = temp_data_dirs diff --git a/python/ray/tune/execution/experiment_state.py b/python/ray/tune/execution/experiment_state.py index 74fbbb990cc1..fa719d3f7463 100644 --- a/python/ray/tune/execution/experiment_state.py +++ b/python/ray/tune/execution/experiment_state.py @@ -16,7 +16,6 @@ from ray.tune.impl.out_of_band_serialize_dataset import out_of_band_serialize_dataset from ray.tune.syncer import SyncConfig, get_node_to_storage_syncer - logger = logging.getLogger(__name__) @@ -149,6 +148,13 @@ def __init__( # Upload triggered by trial checkpoints self._sync_every_n_trial_checkpoints = sync_every_n_trial_checkpoints self._trial_num_checkpoints_since_last_sync: Dict[Trial, int] = Counter() + + self._slow_sync_threshold = float( + os.environ.get( + "TUNE_WARN_SLOW_EXPERIMENT_CHECKPOINT_SYNC_THRESHOLD_S", "30" + ) + ) + self._excessive_sync_threshold = float( os.environ.get( "TUNE_WARN_EXCESSIVE_EXPERIMENT_CHECKPOINT_SYNC_THRESHOLD_S", "30" @@ -277,17 +283,41 @@ def sync_up(self, force: bool = False, wait: bool = False) -> bool: exclude=exclude, ) + start_time = time.monotonic() if wait: self._syncer.wait() + now = time.monotonic() + sync_time_taken = now - start_time + + if sync_time_taken > self._slow_sync_threshold: + try: + import fsspec + except Exception: + fsspec = None + + fsspec_msg = "" + if fsspec is None: + fsspec_msg = ( + "If your data is small, try installing fsspec " + "(`pip install fsspec`) for more efficient local file parsing. " + ) + + logger.warning( + "Syncing the experiment checkpoint to cloud took a long time with " + f"{sync_time_taken:.2f} seconds. This can be due to a large number " + f"of trials, large logfiles, or throttling from the " + f"remote storage provider for too frequent syncs. {fsspec_msg}" + f"If your `CheckpointConfig.num_to_keep` is a low number, this can " + f"trigger frequent syncing, in which case you should increase it. " + ) + if not synced: return False self._should_force_cloud_sync = False self._trial_num_checkpoints_since_last_sync.clear() - # syncing might have taken some time, so we grab the current timestamp again - now = time.time() if now - self._last_sync_time < self._excessive_sync_threshold: logger.warning( "Experiment checkpoint syncing has been triggered multiple " From 79e824c16de80a11b4acdbc5ebbeb569fcfabab8 Mon Sep 17 00:00:00 2001 From: Sihan Wang Date: Tue, 25 Apr 2023 09:26:33 -0700 Subject: [PATCH 086/424] [Serve] Add default app name (#34260) Add default name for applications deploy. Fix a bug for not setting the deployment name correctly for function deployment. Fix unit tests because of the default application name change. --- python/ray/serve/_private/constants.py | 2 +- .../_private/deployment_function_node.py | 8 -- .../serve/_private/deployment_graph_build.py | 13 +- python/ray/serve/api.py | 5 +- python/ray/serve/schema.py | 5 +- python/ray/serve/tests/test_advanced.py | 3 +- python/ray/serve/tests/test_api.py | 30 ++--- .../serve/tests/test_autoscaling_policy.py | 119 ++++++++++++++---- python/ray/serve/tests/test_cli.py | 31 +++-- .../serve/tests/test_constructor_failure.py | 20 ++- python/ray/serve/tests/test_controller.py | 13 +- .../serve/tests/test_controller_recovery.py | 24 ++-- python/ray/serve/tests/test_deploy.py | 36 +++--- python/ray/serve/tests/test_deploy_2.py | 27 ++-- .../ray/serve/tests/test_deployment_graph.py | 9 +- .../test_deployment_graph_autoscaling.py | 7 ++ python/ray/serve/tests/test_failure.py | 9 +- python/ray/serve/tests/test_gcs_failure.py | 12 +- python/ray/serve/tests/test_get_deployment.py | 4 +- python/ray/serve/tests/test_grpc.py | 14 ++- python/ray/serve/tests/test_handle.py | 10 +- python/ray/serve/tests/test_healthcheck.py | 10 +- python/ray/serve/tests/test_metrics.py | 22 ++-- python/ray/serve/tests/test_persistence.py | 4 +- python/ray/serve/tests/test_regression.py | 4 +- python/ray/serve/tests/test_runtime_env.py | 6 +- python/ray/serve/tests/test_standalone.py | 8 +- python/ray/serve/tests/test_standalone2.py | 41 ++++-- 28 files changed, 323 insertions(+), 173 deletions(-) diff --git a/python/ray/serve/_private/constants.py b/python/ray/serve/_private/constants.py index 651c4e12c698..b94683f381b8 100644 --- a/python/ray/serve/_private/constants.py +++ b/python/ray/serve/_private/constants.py @@ -29,7 +29,7 @@ DEFAULT_GRPC_PORT = 9000 #: Default Serve application name -SERVE_DEFAULT_APP_NAME = "" +SERVE_DEFAULT_APP_NAME = "default" #: Separator between app name and deployment name when we prepend #: the app name to each deployment name. This prepending is currently diff --git a/python/ray/serve/_private/deployment_function_node.py b/python/ray/serve/_private/deployment_function_node.py index be47328d80c1..c53f816e7211 100644 --- a/python/ray/serve/_private/deployment_function_node.py +++ b/python/ray/serve/_private/deployment_function_node.py @@ -1,4 +1,3 @@ -import inspect from typing import Any, Callable, Dict, List, Union from ray.dag.dag_node import DAGNode @@ -35,13 +34,6 @@ def __init__( ] deployment_shell = schema_to_deployment(deployment_schema) - # Prefer user specified name to override the generated one. - if ( - inspect.isfunction(func_body) - and deployment_shell.name != func_body.__name__ - ): - self._deployment_name = deployment_shell.name - # Set the route prefix, prefer the one user supplied, # otherwise set it to /deployment_name if ( diff --git a/python/ray/serve/_private/deployment_graph_build.py b/python/ray/serve/_private/deployment_graph_build.py index c75542d0e92a..d06c4a05442f 100644 --- a/python/ray/serve/_private/deployment_graph_build.py +++ b/python/ray/serve/_private/deployment_graph_build.py @@ -66,7 +66,8 @@ def build(ray_dag_root_node: DAGNode, name: str = None) -> List[Deployment]: should be executable via `ray_dag_root_node.execute(user_input)` and should have `InputNode` in it. name: Application name,. If provided, formatting all the deployment name to - {name}_{deployment_name} + {name}_{deployment_name}, if not provided, the deployment name won't be + updated. Returns: deployments: All deployments needed for an e2e runnable serve pipeline, @@ -273,6 +274,16 @@ def replace_with_handle(node): dag_node._body.__annotations__["return"] ) + # Set the deployment name if the user provides. + if "deployment_schema" in dag_node._bound_other_args_to_resolve: + schema = dag_node._bound_other_args_to_resolve["deployment_schema"] + if ( + inspect.isfunction(dag_node._body) + and schema.name != dag_node._body.__name__ + ): + deployment_name = schema.name + + # Update the deployment name if the application name provided. if name: deployment_name = name + DEPLOYMENT_NAME_PREFIX_SEPARATOR + deployment_name diff --git a/python/ray/serve/api.py b/python/ray/serve/api.py index 82568a02f2fa..28e0e0175f88 100644 --- a/python/ray/serve/api.py +++ b/python/ray/serve/api.py @@ -548,7 +548,7 @@ def run( @PublicAPI(stability="alpha") -def build(target: Application, name: str = SERVE_DEFAULT_APP_NAME) -> BuiltApplication: +def build(target: Application, name: str = None) -> BuiltApplication: """Builds a Serve application into a static, built application. Resolves the provided Application object into a list of deployments. @@ -562,7 +562,8 @@ def build(target: Application, name: str = SERVE_DEFAULT_APP_NAME) -> BuiltAppli Args: target: The Serve application to run consisting of one or more deployments. - name: The name of the Serve application. + name: The name of the Serve application. When name is not provided, the + deployment name won't be updated. (SINGLE_APP use case.) Returns: The static built Serve application. diff --git a/python/ray/serve/schema.py b/python/ray/serve/schema.py index 4f1c7e1a7c2e..ee61b8c5f0e2 100644 --- a/python/ray/serve/schema.py +++ b/python/ray/serve/schema.py @@ -15,6 +15,7 @@ from ray.serve.config import DeploymentMode from ray.serve._private.utils import DEFAULT, dict_keys_snake_to_camel_case from ray.util.annotations import DeveloperAPI, PublicAPI +from ray.serve._private.constants import SERVE_DEFAULT_APP_NAME def _route_prefix_format(cls, v): @@ -304,9 +305,7 @@ class ServeApplicationSchema(BaseModel, extra=Extra.forbid): """ name: str = Field( - # TODO(cindy): eventually we should set the default app name to a non-empty - # string and forbid empty app names. - default="", + default=SERVE_DEFAULT_APP_NAME, description=( "Application name, the name should be unique within the serve instance" ), diff --git a/python/ray/serve/tests/test_advanced.py b/python/ray/serve/tests/test_advanced.py index 495ba9a7cb2f..73c3d6c6f8b2 100644 --- a/python/ray/serve/tests/test_advanced.py +++ b/python/ray/serve/tests/test_advanced.py @@ -6,6 +6,7 @@ import ray from ray import serve from ray._private.test_utils import SignalActor +from ray.serve._private.constants import SERVE_DEFAULT_APP_NAME def test_serve_forceful_shutdown(serve_instance): @@ -16,7 +17,7 @@ def sleeper(): handle = serve.run(sleeper.bind()) ref = handle.remote() - sleeper.delete() + serve.delete(SERVE_DEFAULT_APP_NAME) with pytest.raises(ray.exceptions.RayActorError): ray.get(ref) diff --git a/python/ray/serve/tests/test_api.py b/python/ray/serve/tests/test_api.py index 5bc5e3da2d5b..597a6998723a 100644 --- a/python/ray/serve/tests/test_api.py +++ b/python/ray/serve/tests/test_api.py @@ -17,6 +17,10 @@ from ray.serve.drivers import DAGDriver from ray.serve.exceptions import RayServeException from ray.serve._private.api import call_app_builder_with_args_if_necessary +from ray.serve._private.constants import ( + SERVE_DEFAULT_APP_NAME, + DEPLOYMENT_NAME_PREFIX_SEPARATOR, +) @serve.deployment() @@ -423,27 +427,6 @@ def __call__(self, *args): assert ray.get(ingress_handle.remote()) == "got f" -def test_run_delete_old_deployments(serve_instance): - """Check that serve.run() can remove all old deployments""" - - @serve.deployment(name="f", route_prefix="/test1") - def f(): - return "got f" - - @serve.deployment(name="g", route_prefix="/test2") - def g(): - return "got g" - - ingress_handle = serve.run(f.bind()) - assert ray.get(ingress_handle.remote()) == "got f" - - ingress_handle = serve.run(g.bind()) - assert ray.get(ingress_handle.remote()) == "got g" - - assert "g" in serve.list_deployments() - assert "f" not in serve.list_deployments() - - class TestSetOptions: def test_set_options_basic(self): @serve.deployment( @@ -574,7 +557,10 @@ def g(): serve.run(g.bind()) deployment_info = ray.get(controller._all_running_replicas.remote()) - assert "g" in deployment_info + assert ( + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}g" + in deployment_info + ) @serve.deployment def f(): diff --git a/python/ray/serve/tests/test_autoscaling_policy.py b/python/ray/serve/tests/test_autoscaling_policy.py index 0bdcb4b7b75c..1805c84d718f 100644 --- a/python/ray/serve/tests/test_autoscaling_policy.py +++ b/python/ray/serve/tests/test_autoscaling_policy.py @@ -17,7 +17,11 @@ from ray.serve._private.common import DeploymentInfo from ray.serve._private.common import ReplicaState from ray.serve.config import AutoscalingConfig -from ray.serve._private.constants import CONTROL_LOOP_PERIOD_S +from ray.serve._private.constants import ( + CONTROL_LOOP_PERIOD_S, + SERVE_DEFAULT_APP_NAME, + DEPLOYMENT_NAME_PREFIX_SEPARATOR, +) from ray.serve.controller import ServeController from ray.serve.deployment import Deployment import ray.experimental.state.api as state_api @@ -108,28 +112,38 @@ def test_smoothing_factor(self): assert 5 <= desired_num_replicas <= 8 # 10 + 0.5 * (2.5 - 10) = 6.25 -def get_running_replicas(controller: ServeController, deployment: Deployment) -> List: +def get_running_replicas( + controller: ServeController, deployment: Deployment, app_name +) -> List: """Get the replicas currently running for given deployment""" + if app_name: + deployment_name = app_name + DEPLOYMENT_NAME_PREFIX_SEPARATOR + deployment.name + else: + deployment_name = deployment.name replicas = ray.get( - controller._dump_replica_states_for_testing.remote(deployment.name) + controller._dump_replica_states_for_testing.remote(deployment_name) ) running_replicas = replicas.get([ReplicaState.RUNNING]) return running_replicas def get_running_replica_tags( - controller: ServeController, deployment: Deployment + controller: ServeController, + deployment: Deployment, + app_name: str = SERVE_DEFAULT_APP_NAME, ) -> List: """Get the replica tags of running replicas for given deployment""" - running_replicas = get_running_replicas(controller, deployment) + running_replicas = get_running_replicas(controller, deployment, app_name) return [replica.replica_tag for replica in running_replicas] def get_num_running_replicas( - controller: ServeController, deployment: Deployment + controller: ServeController, + deployment: Deployment, + app_name: str = SERVE_DEFAULT_APP_NAME, ) -> int: """Get the amount of replicas currently running for given deployment""" - running_replicas = get_running_replicas(controller, deployment) + running_replicas = get_running_replicas(controller, deployment, app_name) return len(running_replicas) @@ -167,7 +181,11 @@ def test_assert_no_replicas_deprovisioned(): assert_no_replicas_deprovisioned(replica_tags_2, replica_tags_1) -def get_deployment_start_time(controller: ServeController, deployment: Deployment): +def get_deployment_start_time( + controller: ServeController, + deployment: Deployment, + app_name: str = SERVE_DEFAULT_APP_NAME, +): """Return start time for given deployment""" deployment_route_list = DeploymentRouteList.FromString( ray.get(controller.list_deployments.remote()) @@ -179,7 +197,11 @@ def get_deployment_start_time(controller: ServeController, deployment: Deploymen ) for deployment_route in deployment_route_list.deployment_routes } - deployment_info, _route_prefix = deployments[deployment.name] + if app_name: + deployment_name = app_name + DEPLOYMENT_NAME_PREFIX_SEPARATOR + deployment.name + else: + deployment_name = deployment.name + deployment_info, _route_prefix = deployments[deployment_name] return deployment_info.start_time_ms @@ -673,7 +695,6 @@ def __call__(self): controller = serve_instance._controller start_time = get_deployment_start_time(controller, A) - A.get_handle() [handle.remote() for _ in range(50)] wait_for_condition( @@ -830,19 +851,19 @@ def __call__(self): print("Deployed A.") controller = serve_instance._controller - start_time = get_deployment_start_time(controller, A) + start_time = get_deployment_start_time(controller, A, app_name=None) - assert get_num_running_replicas(controller, A) == 0 + assert get_num_running_replicas(controller, A, app_name=None) == 0 handle = A.get_handle() [handle.remote() for _ in range(1)] print("Issued one request.") time.sleep(2) - assert get_num_running_replicas(controller, A) == 1 + assert get_num_running_replicas(controller, A, app_name=None) == 1 print("Scale up to 1 replica.") - first_deployment_replicas = get_running_replica_tags(controller, A) + first_deployment_replicas = get_running_replica_tags(controller, A, app_name=None) A.options( autoscaling_config={ @@ -859,14 +880,16 @@ def __call__(self): ).deploy() print("Redeployed A with min_replicas set to 2.") - wait_for_condition(lambda: get_num_running_replicas(controller, A) >= 2) + wait_for_condition( + lambda: get_num_running_replicas(controller, A, app_name=None) >= 2 + ) time.sleep(5) # Confirm that autoscaler doesn't scale above 2 even after waiting - assert get_num_running_replicas(controller, A) == 2 + assert get_num_running_replicas(controller, A, app_name=None) == 2 print("Autoscaled to 2 without issuing any new requests.") - second_deployment_replicas = get_running_replica_tags(controller, A) + second_deployment_replicas = get_running_replica_tags(controller, A, app_name=None) # Confirm that none of the original replicas were de-provisioned assert_no_replicas_deprovisioned( @@ -878,12 +901,14 @@ def __call__(self): print("Completed request.") # As the queue is drained, we should scale back down. - wait_for_condition(lambda: get_num_running_replicas(controller, A) <= 2) - assert get_num_running_replicas(controller, A) > 1 + wait_for_condition( + lambda: get_num_running_replicas(controller, A, app_name=None) <= 2 + ) + assert get_num_running_replicas(controller, A, app_name=None) > 1 print("Stayed at 2 replicas.") # Make sure start time did not change for the deployment - assert get_deployment_start_time(controller, A) == start_time + assert get_deployment_start_time(controller, A, app_name=None) == start_time @pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.") @@ -903,7 +928,15 @@ def f(): # f should start with initial_replicas (2) deployments actors = state_api.list_actors( - filters=[("class_name", "=", "ServeReplica:f"), ("state", "=", "ALIVE")] + filters=[ + ( + "class_name", + "=", + f"ServeReplica:{SERVE_DEFAULT_APP_NAME}" + f"{DEPLOYMENT_NAME_PREFIX_SEPARATOR}f", + ), + ("state", "=", "ALIVE"), + ] ) print(actors) assert len(actors) == 2 @@ -911,7 +944,15 @@ def f(): # f should scale down to min_replicas (1) deployments def check_one_replica(): actors = state_api.list_actors( - filters=[("class_name", "=", "ServeReplica:f"), ("state", "=", "ALIVE")] + filters=[ + ( + "class_name", + "=", + f"ServeReplica:{SERVE_DEFAULT_APP_NAME}" + f"{DEPLOYMENT_NAME_PREFIX_SEPARATOR}f", + ), + ("state", "=", "ALIVE"), + ] ) return len(actors) == 1 @@ -946,7 +987,12 @@ def scaler(): def check_two_replicas(): actors = state_api.list_actors( filters=[ - ("class_name", "=", "ServeReplica:scaler"), + ( + "class_name", + "=", + f"ServeReplica:{SERVE_DEFAULT_APP_NAME}" + f"{DEPLOYMENT_NAME_PREFIX_SEPARATOR}scaler", + ), ("state", "=", "ALIVE"), ] ) @@ -969,12 +1015,25 @@ def check_two_replicas(): def check_num_replicas(live: int, dead: int): live_actors = state_api.list_actors( filters=[ - ("class_name", "=", "ServeReplica:scaler"), + ( + "class_name", + "=", + f"ServeReplica:{SERVE_DEFAULT_APP_NAME}" + f"{DEPLOYMENT_NAME_PREFIX_SEPARATOR}scaler", + ), ("state", "=", "ALIVE"), ] ) dead_actors = state_api.list_actors( - filters=[("class_name", "=", "ServeReplica:scaler"), ("state", "=", "DEAD")] + filters=[ + ( + "class_name", + "=", + f"ServeReplica:{SERVE_DEFAULT_APP_NAME}" + f"{DEPLOYMENT_NAME_PREFIX_SEPARATOR}scaler", + ), + ("state", "=", "DEAD"), + ] ) return len(live_actors) == live and len(dead_actors) == dead @@ -1063,7 +1122,15 @@ def send_request(): def check_num_replicas(num: int): actors = state_api.list_actors( - filters=[("class_name", "=", "ServeReplica:g"), ("state", "=", "ALIVE")] + filters=[ + ( + "class_name", + "=", + f"ServeReplica:{SERVE_DEFAULT_APP_NAME}" + f"{DEPLOYMENT_NAME_PREFIX_SEPARATOR}g", + ), + ("state", "=", "ALIVE"), + ] ) return len(actors) == num diff --git a/python/ray/serve/tests/test_cli.py b/python/ray/serve/tests/test_cli.py index 5ec96d7d9d2b..d68a5608f096 100644 --- a/python/ray/serve/tests/test_cli.py +++ b/python/ray/serve/tests/test_cli.py @@ -23,6 +23,10 @@ from ray.tests.conftest import tmp_working_dir # noqa: F401, E501 from ray.dashboard.modules.serve.sdk import ServeSubmissionClient from ray.serve.scripts import convert_args_to_dict, remove_ansi_escape_sequences +from ray.serve._private.constants import ( + SERVE_DEFAULT_APP_NAME, + DEPLOYMENT_NAME_PREFIX_SEPARATOR, +) CONNECTION_ERROR_MSG = "connection error" @@ -107,11 +111,11 @@ def test_deploy(ray_start_stop): print("Deployments are reachable over HTTP.") deployment_names = [ - "DAGDriver", - "create_order", - "Router", - "Multiplier", - "Adder", + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}DAGDriver", + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}create_order", + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}Router", + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}Multiplier", + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}Adder", ] assert_deployments_live(deployment_names) print("All deployments are live.\n") @@ -135,7 +139,12 @@ def test_deploy(ray_start_stop): ) print("Deployments are reachable over HTTP.") - deployment_names = ["DAGDriver", "Router", "Add", "Subtract"] + deployment_names = [ + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}DAGDriver", + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}Router", + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}Add", + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}Subtract", + ] assert_deployments_live(deployment_names) print("All deployments are live.\n") @@ -479,11 +488,11 @@ def num_live_deployments(): serve_status = yaml.safe_load(status_response) expected_deployments = { - "DAGDriver", - "Multiplier", - "Adder", - "Router", - "create_order", + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}DAGDriver", + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}Multiplier", + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}Adder", + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}Router", + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}create_order", } for status in serve_status["deployment_statuses"]: expected_deployments.remove(status["name"]) diff --git a/python/ray/serve/tests/test_constructor_failure.py b/python/ray/serve/tests/test_constructor_failure.py index 56a592c1bc68..ffed1c5b868f 100644 --- a/python/ray/serve/tests/test_constructor_failure.py +++ b/python/ray/serve/tests/test_constructor_failure.py @@ -6,6 +6,14 @@ import ray from ray import serve +from ray.serve._private.constants import ( + SERVE_DEFAULT_APP_NAME, + DEPLOYMENT_NAME_PREFIX_SEPARATOR, +) + + +def get_deployment_name(name: str): + return f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}{name}" def test_deploy_with_consistent_constructor_failure(serve_instance): @@ -23,8 +31,9 @@ async def serve(self, request): # Assert no replicas are running in deployment deployment after failed # deploy call + deployment_name = get_deployment_name("ConstructorFailureDeploymentOneReplica") deployment_dict = ray.get(serve_instance._controller._all_running_replicas.remote()) - assert deployment_dict["ConstructorFailureDeploymentOneReplica"] == [] + assert deployment_dict[deployment_name] == [] # # Test failed to deploy with total of 2 replicas @serve.deployment(num_replicas=2) @@ -40,8 +49,9 @@ async def serve(self, request): # Assert no replicas are running in deployment deployment after failed # deploy call + deployment_name = get_deployment_name("ConstructorFailureDeploymentTwoReplicas") deployment_dict = ray.get(serve_instance._controller._all_running_replicas.remote()) - assert deployment_dict["ConstructorFailureDeploymentTwoReplicas"] == [] + assert deployment_dict[deployment_name] == [] def test_deploy_with_partial_constructor_failure(serve_instance): @@ -75,7 +85,8 @@ async def serve(self, request): # Assert 2 replicas are running in deployment deployment after partially # successful deploy call deployment_dict = ray.get(serve_instance._controller._all_running_replicas.remote()) - assert len(deployment_dict["PartialConstructorFailureDeployment"]) == 2 + deployment_name = get_deployment_name("PartialConstructorFailureDeployment") + assert len(deployment_dict[deployment_name]) == 2 def test_deploy_with_transient_constructor_failure(serve_instance): @@ -101,7 +112,8 @@ async def serve(self, request): # Assert 2 replicas are running in deployment deployment after partially # successful deploy call with transient error deployment_dict = ray.get(serve_instance._controller._all_running_replicas.remote()) - assert len(deployment_dict["TransientConstructorFailureDeployment"]) == 2 + deployment_name = get_deployment_name("TransientConstructorFailureDeployment") + assert len(deployment_dict[deployment_name]) == 2 if __name__ == "__main__": diff --git a/python/ray/serve/tests/test_controller.py b/python/ray/serve/tests/test_controller.py index 1e283cd3fea0..3f3a18077e9d 100644 --- a/python/ray/serve/tests/test_controller.py +++ b/python/ray/serve/tests/test_controller.py @@ -8,6 +8,14 @@ from ray.serve._private.common import DeploymentInfo from ray.serve.generated.serve_pb2 import DeploymentRoute from ray.serve.controller import _generate_deployment_config_versions +from ray.serve._private.constants import ( + SERVE_DEFAULT_APP_NAME, + DEPLOYMENT_NAME_PREFIX_SEPARATOR, +) + + +def get_deployment_name(name: str): + return f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}{name}" def test_redeploy_start_time(serve_instance): @@ -20,8 +28,9 @@ def test(_): return "1" serve.run(test.bind()) + deployment_name = get_deployment_name("test") deployment_route = DeploymentRoute.FromString( - ray.get(controller.get_deployment_info.remote("test")) + ray.get(controller.get_deployment_info.remote(deployment_name)) ) deployment_info_1 = DeploymentInfo.from_proto(deployment_route.deployment_info) start_time_ms_1 = deployment_info_1.start_time_ms @@ -34,7 +43,7 @@ def test(_): serve.run(test.bind()) deployment_route = DeploymentRoute.FromString( - ray.get(controller.get_deployment_info.remote("test")) + ray.get(controller.get_deployment_info.remote(deployment_name)) ) deployment_info_2 = DeploymentInfo.from_proto(deployment_route.deployment_info) start_time_ms_2 = deployment_info_2.start_time_ms diff --git a/python/ray/serve/tests/test_controller_recovery.py b/python/ray/serve/tests/test_controller_recovery.py index 13a4ce7a0d65..77d262c26ee2 100644 --- a/python/ray/serve/tests/test_controller_recovery.py +++ b/python/ray/serve/tests/test_controller_recovery.py @@ -33,7 +33,7 @@ def __init__(self): def __call__(self, *args): return "hii" - serve.run(TransientConstructorFailureDeployment.bind()) + serve.run(TransientConstructorFailureDeployment.bind(), name="app") for _ in range(10): response = request_with_retries( "/recover_start_from_replica_actor_names/", timeout=30 @@ -42,10 +42,10 @@ def __call__(self, *args): # Assert 2 replicas are running in deployment deployment after partially # successful deploy() call with transient error deployment_dict = ray.get(serve_instance._controller._all_running_replicas.remote()) - assert len(deployment_dict["recover_start_from_replica_actor_names"]) == 2 + assert len(deployment_dict["app_recover_start_from_replica_actor_names"]) == 2 replica_version_hash = None - for replica in deployment_dict["recover_start_from_replica_actor_names"]: + for replica in deployment_dict["app_recover_start_from_replica_actor_names"]: ref = replica.actor_handle.get_metadata.remote() _, version = ray.get(ref) if replica_version_hash is None: @@ -116,7 +116,7 @@ def test_recover_rolling_update_from_replica_actor_names(serve_instance): @ray.remote(num_cpus=0) def call(block=False): - handle = serve.get_deployment(name).get_handle() + handle = serve.get_deployment(f"app_{name}").get_handle() ret = ray.get(handle.handler.remote(block)) return ret.split("|")[0], ret.split("|")[1] @@ -167,7 +167,7 @@ def make_nonblocking_calls(expected, expect_blocking=False, num_returns=1): return responses, blocking - serve.run(V1.bind()) + serve.run(V1.bind(), name="app") responses1, _ = make_nonblocking_calls({"1": 2}, num_returns=2) pids1 = responses1["1"] @@ -182,9 +182,9 @@ def make_nonblocking_calls(expected, expect_blocking=False, num_returns=1): # Redeploy new version. Since there is one replica blocking, only one new # replica should be started up. V2 = V1.options(func_or_class=V2, version="2") - serve.run(V2.bind(), _blocking=False) + serve.run(V2.bind(), _blocking=False, name="app") with pytest.raises(TimeoutError): - client._wait_for_deployment_healthy(V2.name, timeout_s=0.1) + client._wait_for_deployment_healthy(f"app_{V2.name}", timeout_s=0.1) responses3, blocking3 = make_nonblocking_calls({"1": 1}, expect_blocking=True) ray.kill(serve.context._global_client._controller, no_restart=False) @@ -197,7 +197,7 @@ def make_nonblocking_calls(expected, expect_blocking=False, num_returns=1): # Now the goal and requests to the new version should complete. # We should have two running replicas of the new version. - client._wait_for_deployment_healthy(V2.name) + client._wait_for_deployment_healthy(f"app_{V2.name}") make_nonblocking_calls({"2": 2}, num_returns=2) @@ -222,7 +222,7 @@ async def __init__(self): def __call__(self, request): return f"1|{os.getpid()}" - serve.run(V1.bind(), _blocking=False) + serve.run(V1.bind(), _blocking=False, name="app") ray.get(pending_init_indicator.remote()) def get_actor_info(name: str): @@ -234,7 +234,7 @@ def get_actor_info(name: str): print(actor) return actor["name"], actor["pid"] - actor_tag, _ = get_actor_info(V1.name) + actor_tag, _ = get_actor_info(f"app_{V1.name}") _, controller1_pid = get_actor_info(SERVE_CONTROLLER_NAME) ray.kill(serve.context._global_client._controller, no_restart=False) # wait for controller is alive again @@ -243,9 +243,9 @@ def get_actor_info(name: str): # Let the actor proceed initialization ray.get(signal.send.remote()) - client._wait_for_deployment_healthy(V1.name) + client._wait_for_deployment_healthy(f"app_{V1.name}") # Make sure the actor before controller dead is staying alive. - assert actor_tag == get_actor_info(V1.name)[0] + assert actor_tag == get_actor_info(f"app_{V1.name}")[0] if __name__ == "__main__": diff --git a/python/ray/serve/tests/test_deploy.py b/python/ray/serve/tests/test_deploy.py index d01b597982fd..24b3dc001c41 100644 --- a/python/ray/serve/tests/test_deploy.py +++ b/python/ray/serve/tests/test_deploy.py @@ -12,6 +12,7 @@ from ray import serve from ray.serve.exceptions import RayServeException from ray.serve._private.utils import get_random_letters +from ray.serve.context import get_global_client @pytest.mark.parametrize("use_handle", [True, False]) @@ -210,7 +211,7 @@ def test_redeploy_single_replica(serve_instance, use_handle): @ray.remote def call(block=False): if use_handle: - handle = serve.get_deployment(name).get_handle() + handle = serve.get_deployment(f"app_{name}").get_handle() ret = ray.get(handle.handler.remote(block)) else: ret = requests.get( @@ -241,7 +242,7 @@ async def handler(self, *args): async def __call__(self, request): return await self.handler() - serve.run(V1.bind()) + serve.run(V1.bind(), name="app") ref1 = call.remote(block=False) val1, pid1 = ray.get(ref1) assert val1 == "1" @@ -253,9 +254,9 @@ async def __call__(self, request): # Redeploy new version. This should not go through until the old version # replica completely stops. V2 = V1.options(func_or_class=V2, version="2") - serve.run(V2.bind(), _blocking=False) + serve.run(V2.bind(), _blocking=False, name="app") with pytest.raises(TimeoutError): - client._wait_for_deployment_healthy(V2.name, timeout_s=0.1) + client._wait_for_deployment_healthy(f"app_{V2.name}", timeout_s=0.1) # It may take some time for the handle change to propagate and requests # to get sent to the new version. Repeatedly send requests until they @@ -283,7 +284,7 @@ async def __call__(self, request): assert pid2 == pid1 # Now the goal and request to the new version should complete. - client._wait_for_deployment_healthy(V2.name) + client._wait_for_deployment_healthy(f"app_{V2.name}") new_version_val, new_version_pid = ray.get(new_version_ref) assert new_version_val == "2" assert new_version_pid != pid2 @@ -301,7 +302,7 @@ def test_redeploy_multiple_replicas(serve_instance, use_handle): @ray.remote(num_cpus=0) def call(block=False): if use_handle: - handle = serve.get_deployment(name).get_handle() + handle = serve.get_deployment(f"app_{name}").get_handle() ret = ray.get(handle.handler.remote(block)) else: ret = requests.get( @@ -355,7 +356,7 @@ def make_nonblocking_calls(expected, expect_blocking=False): return responses, blocking - serve.run(V1.bind()) + serve.run(V1.bind(), name="app") responses1, _ = make_nonblocking_calls({"1": 2}) pids1 = responses1["1"] @@ -368,9 +369,9 @@ def make_nonblocking_calls(expected, expect_blocking=False): # Redeploy new version. Since there is one replica blocking, only one new # replica should be started up. V2 = V1.options(func_or_class=V2, version="2") - serve.run(V2.bind(), _blocking=False) + serve.run(V2.bind(), _blocking=False, name="app") with pytest.raises(TimeoutError): - client._wait_for_deployment_healthy(V2.name, timeout_s=0.1) + client._wait_for_deployment_healthy(f"app_{V2.name}", timeout_s=0.1) responses3, blocking3 = make_nonblocking_calls({"1": 1}, expect_blocking=True) # Signal the original call to exit. @@ -381,7 +382,7 @@ def make_nonblocking_calls(expected, expect_blocking=False): # Now the goal and requests to the new version should complete. # We should have two running replicas of the new version. - client._wait_for_deployment_healthy(V2.name) + client._wait_for_deployment_healthy(f"app_{V2.name}") make_nonblocking_calls({"2": 2}) @@ -511,7 +512,7 @@ def v1(*args): @ray.remote(num_cpus=0) def call(): if use_handle: - handle = v1.get_handle() + handle = get_global_client().get_handle(f"app_{name}", sync=True) ret = ray.get(handle.remote()) else: ret = requests.get(f"http://localhost:8000/{name}").text @@ -536,7 +537,7 @@ def make_calls(expected): return responses - serve.run(v1.bind()) + serve.run(v1.bind(), name="app") responses1 = make_calls({"1": 4}) pids1 = responses1["1"] @@ -544,7 +545,7 @@ def make_calls(expected): def v2(*args): return f"2|{os.getpid()}" - serve.run(v2.bind()) + serve.run(v2.bind(), name="app") responses2 = make_calls({"2": 2}) assert all(pid not in pids1 for pid in responses2["2"]) @@ -562,7 +563,7 @@ def v1(*args): @ray.remote(num_cpus=0) def call(): if use_handle: - handle = v1.get_handle() + handle = get_global_client().get_handle(f"app_{name}", sync=True) ret = ray.get(handle.remote()) else: ret = requests.get(f"http://localhost:8000/{name}").text @@ -587,7 +588,7 @@ def make_calls(expected): return responses - serve.run(v1.bind()) + serve.run(v1.bind(), name="app") responses1 = make_calls({"1": 2}) pids1 = responses1["1"] @@ -595,7 +596,7 @@ def make_calls(expected): def v2(*args): return f"2|{os.getpid()}" - serve.run(v2.bind()) + serve.run(v2.bind(), name="app") responses2 = make_calls({"2": 4}) assert all(pid not in pids1 for pid in responses2["2"]) @@ -606,8 +607,7 @@ class A: def b(self, *args): return "hello" - serve.run(A.bind()) - handle = A.get_handle() + handle = serve.run(A.bind(), name="app") # Legacy code path assert ray.get(handle.options(method_name="b").remote()) == "hello" diff --git a/python/ray/serve/tests/test_deploy_2.py b/python/ray/serve/tests/test_deploy_2.py index 3d0a8156be3a..61449bb618a8 100644 --- a/python/ray/serve/tests/test_deploy_2.py +++ b/python/ray/serve/tests/test_deploy_2.py @@ -16,6 +16,7 @@ class TestGetDeployment: + # Test V1 API get_deployment() def get_deployment(self, name, use_list_api): if use_list_api: return serve.list_deployments()[name] @@ -33,8 +34,8 @@ def d(*args): with pytest.raises(KeyError): self.get_deployment(name, use_list_api) - handle = serve.run(d.bind()) - val1, pid1 = ray.get(handle.remote()) + d.deploy() + val1, pid1 = ray.get(d.get_handle().remote()) assert val1 == "1" del d @@ -52,7 +53,7 @@ def test_get_after_delete(self, serve_instance, use_list_api): def d(*args): return "1", os.getpid() - serve.run(d.bind()) + d.deploy() del d d2 = self.get_deployment(name, use_list_api) @@ -70,15 +71,15 @@ def test_deploy_new_version(self, serve_instance, use_list_api): def d(*args): return "1", os.getpid() - handle = serve.run(d.bind()) - val1, pid1 = ray.get(handle.remote()) + d.deploy() + val1, pid1 = ray.get(d.get_handle().remote()) assert val1 == "1" del d d2 = self.get_deployment(name, use_list_api) - handle = serve.run(d2.options(version="2").bind()) - val2, pid2 = ray.get(handle.remote()) + d2.options(version="2").deploy() + val2, pid2 = ray.get(d2.get_handle().remote()) assert val2 == "1" assert pid2 != pid1 @@ -90,15 +91,15 @@ def test_deploy_empty_version(self, serve_instance, use_list_api): def d(*args): return "1", os.getpid() - handle = serve.run(d.bind()) - val1, pid1 = ray.get(handle.remote()) + d.deploy() + val1, pid1 = ray.get(d.get_handle().remote()) assert val1 == "1" del d d2 = self.get_deployment(name, use_list_api) - handle = serve.run(d2.bind()) - val2, pid2 = ray.get(handle.remote()) + d2.deploy() + val2, pid2 = ray.get(d2.get_handle().remote()) assert val2 == "1" assert pid2 != pid1 @@ -144,12 +145,12 @@ def check_num_replicas(num): handle = self.get_deployment(name, use_list_api).get_handle() assert len(set(ray.get([handle.remote() for _ in range(50)]))) == num - serve.run(d.bind()) + d.deploy() check_num_replicas(1) del d d2 = self.get_deployment(name, use_list_api) - serve.run(d2.options(num_replicas=2).bind()) + d2.options(num_replicas=2).deploy() check_num_replicas(2) diff --git a/python/ray/serve/tests/test_deployment_graph.py b/python/ray/serve/tests/test_deployment_graph.py index e9fa7db68acb..f6633ca1353a 100644 --- a/python/ray/serve/tests/test_deployment_graph.py +++ b/python/ray/serve/tests/test_deployment_graph.py @@ -484,12 +484,13 @@ def get(self): tracker = CallTracker.bind() with InputNode() as inp: - dag = DAGDriver.bind(tracker.predict.bind(inp)) + dag = DAGDriver.bind( + {"/get": tracker.get.bind(), "/predict": tracker.predict.bind(inp)} + ) handle = serve.run(dag) - ray.get(handle.predict.remote(1)) + ray.get(handle.predict_with_route.remote("/predict", 1)) - call_tracker = CallTracker.get_handle() - assert ray.get(call_tracker.get.remote()) == ["predict"] + assert ray.get(handle.predict_with_route.remote("/get", 1)) == ["predict"] def test_sharing_call_for_broadcast(serve_instance): diff --git a/python/ray/serve/tests/test_deployment_graph_autoscaling.py b/python/ray/serve/tests/test_deployment_graph_autoscaling.py index 662f25104bdc..6b0256f0b0c4 100644 --- a/python/ray/serve/tests/test_deployment_graph_autoscaling.py +++ b/python/ray/serve/tests/test_deployment_graph_autoscaling.py @@ -9,12 +9,19 @@ from ray.dag.input_node import InputNode from ray.serve._private.common import ReplicaState from ray._private.test_utils import SignalActor, wait_for_condition +from ray.serve._private.constants import ( + SERVE_DEFAULT_APP_NAME, + DEPLOYMENT_NAME_PREFIX_SEPARATOR, +) # Magic number to use for speed up scale from 0 replica serve_constants.HANDLE_METRIC_PUSH_INTERVAL_S = 1 def get_num_running_replicas(controller, deployment_name): + deployment_name = ( + SERVE_DEFAULT_APP_NAME + DEPLOYMENT_NAME_PREFIX_SEPARATOR + deployment_name + ) replicas = ray.get( controller._dump_replica_states_for_testing.remote(deployment_name) ) diff --git a/python/ray/serve/tests/test_failure.py b/python/ray/serve/tests/test_failure.py index 6f35c5dd4351..98b6282553d2 100644 --- a/python/ray/serve/tests/test_failure.py +++ b/python/ray/serve/tests/test_failure.py @@ -7,6 +7,10 @@ import ray from ray import serve from ray._private.test_utils import wait_for_condition +from ray.serve._private.constants import ( + SERVE_DEFAULT_APP_NAME, + DEPLOYMENT_NAME_PREFIX_SEPARATOR, +) def request_with_retries(endpoint, timeout=30): @@ -107,10 +111,13 @@ def check_new(): def _get_worker_handles(deployment): + deployment_name = ( + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}{deployment}" + ) controller = serve.context._global_client._controller deployment_dict = ray.get(controller._all_running_replicas.remote()) - return [replica.actor_handle for replica in deployment_dict[deployment]] + return [replica.actor_handle for replica in deployment_dict[deployment_name]] # Test that a worker dying unexpectedly causes it to restart and continue diff --git a/python/ray/serve/tests/test_gcs_failure.py b/python/ray/serve/tests/test_gcs_failure.py index 9efe0bb6cce2..c553a9684e2c 100644 --- a/python/ray/serve/tests/test_gcs_failure.py +++ b/python/ray/serve/tests/test_gcs_failure.py @@ -10,6 +10,11 @@ from ray._private.test_utils import wait_for_condition from ray.serve._private.storage.kv_store import KVStoreError, RayInternalKVStore from ray.tests.conftest import external_redis # noqa: F401 +from ray.serve._private.constants import ( + SERVE_DEFAULT_APP_NAME, + DEPLOYMENT_NAME_PREFIX_SEPARATOR, +) +from ray.serve.context import get_global_client @pytest.fixture(scope="function") @@ -58,7 +63,12 @@ def d(*args): def call(): if use_handle: - ret = ray.get(d.get_handle().remote()) + deployment_name = ( + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}d" + ) + ret = ray.get( + get_global_client().get_handle(deployment_name, sync=True).remote() + ) else: ret = requests.get("http://localhost:8000/d").text return ret diff --git a/python/ray/serve/tests/test_get_deployment.py b/python/ray/serve/tests/test_get_deployment.py index bc5f03db8f1f..0406b6695251 100644 --- a/python/ray/serve/tests/test_get_deployment.py +++ b/python/ray/serve/tests/test_get_deployment.py @@ -16,8 +16,8 @@ def d(*args): with pytest.raises(KeyError): serve.get_deployment(name=name) - handle = serve.run(d.bind()) - val1, pid1 = ray.get(handle.remote()) + d.deploy() + val1, pid1 = ray.get(d.get_handle().remote()) assert val1 == "1" del d diff --git a/python/ray/serve/tests/test_grpc.py b/python/ray/serve/tests/test_grpc.py index b5ff870cf57e..69f00886a683 100644 --- a/python/ray/serve/tests/test_grpc.py +++ b/python/ray/serve/tests/test_grpc.py @@ -10,6 +10,11 @@ from ray._private.test_utils import wait_for_condition from ray.serve.exceptions import RayServeException +from ray.serve._private.constants import ( + SERVE_DEFAULT_APP_NAME, + DEPLOYMENT_NAME_PREFIX_SEPARATOR, +) + from unittest.mock import patch @@ -84,7 +89,10 @@ def __call__(self, input): replicas = ray.get( serve.context._global_client._controller._all_running_replicas.remote() ) - assert len(replicas["DefaultgRPCDriver"]) == 1 + deployment_name = ( + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}DefaultgRPCDriver" + ) + assert len(replicas[deployment_name]) == 1 worker_node = cluster.add_node(num_cpus=2) @@ -92,7 +100,7 @@ def __call__(self, input): lambda: len( ray.get( serve.context._global_client._controller._all_running_replicas.remote() - )["DefaultgRPCDriver"] + )[deployment_name] ) == 2 ) @@ -104,7 +112,7 @@ def __call__(self, input): lambda: len( ray.get( serve.context._global_client._controller._all_running_replicas.remote() - )["DefaultgRPCDriver"] + )[deployment_name] ) == 1 ) diff --git a/python/ray/serve/tests/test_handle.py b/python/ray/serve/tests/test_handle.py index 8c62814d9f02..344b418ab11f 100644 --- a/python/ray/serve/tests/test_handle.py +++ b/python/ray/serve/tests/test_handle.py @@ -7,6 +7,11 @@ import ray from ray import serve from ray.serve.exceptions import RayServeException +from ray.serve._private.constants import ( + SERVE_DEFAULT_APP_NAME, + DEPLOYMENT_NAME_PREFIX_SEPARATOR, +) +from ray.serve.context import get_global_client @pytest.mark.asyncio @@ -80,7 +85,10 @@ def f(): handle = serve.run(f.bind()) def thread_get_handle(deploy): - handle = deploy.get_handle(sync=True) + deployment_name = ( + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}{deploy._name}" + ) + handle = get_global_client().get_handle(deployment_name, sync=True) return handle with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor: diff --git a/python/ray/serve/tests/test_healthcheck.py b/python/ray/serve/tests/test_healthcheck.py index 03c6dbf5fe5c..d0a1fca074dc 100644 --- a/python/ray/serve/tests/test_healthcheck.py +++ b/python/ray/serve/tests/test_healthcheck.py @@ -6,6 +6,10 @@ from ray import serve from ray.serve._private.common import DeploymentStatus from ray.serve._private.constants import REPLICA_HEALTH_CHECK_UNHEALTHY_THRESHOLD +from ray.serve._private.constants import ( + SERVE_DEFAULT_APP_NAME, + DEPLOYMENT_NAME_PREFIX_SEPARATOR, +) class Counter: @@ -219,7 +223,8 @@ def __call__(self, *args): app_status = serve_instance.get_serve_status() assert ( - app_status.deployment_statuses[0].name == "AlwaysUnhealthy" + app_status.deployment_statuses[0].name + == f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}AlwaysUnhealthy" and app_status.deployment_statuses[0].status == DeploymentStatus.UNHEALTHY ) @@ -255,7 +260,8 @@ def __call__(self, *args): def check_status(expected_status: DeploymentStatus): app_status = serve_instance.get_serve_status() return ( - app_status.deployment_statuses[0].name == "WillBeUnhealthy" + app_status.deployment_statuses[0].name == f"{SERVE_DEFAULT_APP_NAME}" + f"{DEPLOYMENT_NAME_PREFIX_SEPARATOR}WillBeUnhealthy" and app_status.deployment_statuses[0].status == expected_status ) diff --git a/python/ray/serve/tests/test_metrics.py b/python/ray/serve/tests/test_metrics.py index 63867945956d..41084f76feba 100644 --- a/python/ray/serve/tests/test_metrics.py +++ b/python/ray/serve/tests/test_metrics.py @@ -134,7 +134,7 @@ async def __call__(self): # Trigger RayActorError os._exit(0) - serve.run(A.bind()) + serve.run(A.bind(), name="app") requests.get("http://127.0.0.1:8000/A/") requests.get("http://127.0.0.1:8000/A/") try: @@ -162,8 +162,8 @@ def verify_error_count(do_assert=False): elif "serve_num_deployment_http_error_requests" in metrics: # deployment A should have error count 2 if do_assert: - assert 'deployment="A"' in metrics and "2.0" in metrics - if 'deployment="A"' not in metrics or "2.0" not in metrics: + assert 'deployment="app_A"' in metrics and "2.0" in metrics + if 'deployment="app_A"' not in metrics or "2.0" not in metrics: return False return True @@ -181,7 +181,7 @@ def test_http_metrics_fields(serve_start_shutdown): def f(*args): return 1 / 0 - serve.run(f.bind()) + serve.run(f.bind(), name="app") # Should generate 404 responses broken_url = "http://127.0.0.1:8000/fake_route" @@ -212,7 +212,7 @@ def f(*args): "serve_num_deployment_http_error_requests" ) assert len(num_deployment_errors) == 1 - assert num_deployment_errors[0]["deployment"] == "f" + assert num_deployment_errors[0]["deployment"] == "app_f" assert num_deployment_errors[0]["error_code"] == "500" assert num_deployment_errors[0]["method"] == "GET" print("serve_num_deployment_http_error_requests working as expected.") @@ -348,7 +348,7 @@ async def app1(self): async def app2(self): return await (await self.handle2.remote()) - serve.run(G.bind(g1.bind(), g2.bind())) + serve.run(G.bind(g1.bind(), g2.bind()), name="app") resp = requests.get("http://127.0.0.1:8000/api") assert resp.text == '"ok1"' resp = requests.get("http://127.0.0.1:8000/api2") @@ -368,9 +368,9 @@ async def app2(self): requests_metrics = self._generate_metrics_summary( get_metric_dictionaries("serve_deployment_request_counter") ) - assert requests_metrics["G"] == {"/api", "/api2"} - assert requests_metrics["g1"] == {"/api"} - assert requests_metrics["g2"] == {"/api2"} + assert requests_metrics["app_G"] == {"/api", "/api2"} + assert requests_metrics["app_g1"] == {"/api"} + assert requests_metrics["app_g2"] == {"/api2"} def test_customer_metrics_with_context(self, serve_start_shutdown): @serve.deployment @@ -562,11 +562,11 @@ def test_actor_summary(serve_instance): def f(): pass - serve.run(f.bind()) + serve.run(f.bind(), name="app") actors = state_api.list_actors(filters=[("state", "=", "ALIVE")]) class_names = {actor["class_name"] for actor in actors} assert class_names.issuperset( - {"ServeController", "HTTPProxyActor", "ServeReplica:f"} + {"ServeController", "HTTPProxyActor", "ServeReplica:app_f"} ) diff --git a/python/ray/serve/tests/test_persistence.py b/python/ray/serve/tests/test_persistence.py index 3ba7d592a58d..c0167fc02067 100644 --- a/python/ray/serve/tests/test_persistence.py +++ b/python/ray/serve/tests/test_persistence.py @@ -14,13 +14,13 @@ def test_new_driver(serve_instance): def driver(): return "OK!" -serve.run(driver.bind()) +serve.run(driver.bind(), name="app") """.format( ray._private.worker._global_node.address ) run_string_as_driver(script) - handle = serve.get_deployment("driver").get_handle() + handle = serve.get_deployment("app_driver").get_handle() assert ray.get(handle.remote()) == "OK!" diff --git a/python/ray/serve/tests/test_regression.py b/python/ray/serve/tests/test_regression.py index 23be1e9d1988..9f636fd58340 100644 --- a/python/ray/serve/tests/test_regression.py +++ b/python/ray/serve/tests/test_regression.py @@ -153,13 +153,13 @@ def test_handle_cache_out_of_scope(serve_instance): def f(): return "hi" - handle = serve.run(f.bind()) + handle = serve.run(f.bind(), name="app") handle_cache = get_global_client().handle_cache assert len(handle_cache) == initial_num_cached + 1 def sender_where_handle_goes_out_of_scope(): - f = serve.get_deployment("f").get_handle() + f = get_global_client().get_handle("app_f", missing_ok=True, sync=True) assert f is handle assert ray.get(f.remote()) == "hi" diff --git a/python/ray/serve/tests/test_runtime_env.py b/python/ray/serve/tests/test_runtime_env.py index 14c8c5e3552c..e6fe295fae4a 100644 --- a/python/ray/serve/tests/test_runtime_env.py +++ b/python/ray/serve/tests/test_runtime_env.py @@ -73,7 +73,7 @@ class Test: def __call__(self, *args): return open("hello").read() -handle = serve.run(Test.bind()) +handle = serve.run(Test.bind(), name="app") assert ray.get(handle.remote()) == "world" """ @@ -87,8 +87,8 @@ def __call__(self, *args): ray.init(address="auto", namespace="serve", job_config=job_config) -Test = serve.get_deployment("Test") -handle = serve.run(Test.bind()) +Test = serve.get_deployment("app_Test") +handle = serve.run(Test.bind(), name="app") assert ray.get(handle.remote()) == "world" Test.delete() """ diff --git a/python/ray/serve/tests/test_standalone.py b/python/ray/serve/tests/test_standalone.py index 41004e4abda0..4d2f10616d66 100644 --- a/python/ray/serve/tests/test_standalone.py +++ b/python/ray/serve/tests/test_standalone.py @@ -186,12 +186,12 @@ def test_single_app_shutdown_actors(ray_shutdown): def f(): pass - serve.run(f.bind()) + serve.run(f.bind(), name="app") actor_names = { "ServeController", "HTTPProxyActor", - "ServeReplica:f", + "ServeReplica:app_f", } def check_alive(): @@ -710,7 +710,7 @@ def check(): return False serve.start(detached=True) - serve.run(hello.bind()) + serve.run(hello.bind(), name="app") check() webui_url = ray_start_with_dashboard["webui_url"] @@ -731,7 +731,7 @@ def verify_snapshot(): snapshot = get_deployment_snapshot() assert len(snapshot) == 1 hello_deployment = list(snapshot.values())[0] - assert hello_deployment["name"] == "hello" + assert hello_deployment["name"] == "app_hello" assert hello_deployment["status"] == "RUNNING" diff --git a/python/ray/serve/tests/test_standalone2.py b/python/ray/serve/tests/test_standalone2.py index b4b483d53ad3..fb6dabd5d4eb 100644 --- a/python/ray/serve/tests/test_standalone2.py +++ b/python/ray/serve/tests/test_standalone2.py @@ -23,7 +23,11 @@ from ray.serve.exceptions import RayServeException from ray.serve._private.client import ServeControllerClient from ray.serve._private.common import ApplicationStatus, DeploymentStatus -from ray.serve._private.constants import SERVE_NAMESPACE +from ray.serve._private.constants import ( + SERVE_NAMESPACE, + SERVE_DEFAULT_APP_NAME, + DEPLOYMENT_NAME_PREFIX_SEPARATOR, +) from ray.serve.context import get_global_client from ray.serve.schema import ( ServeApplicationSchema, @@ -133,9 +137,9 @@ def hello(*args, **kwargs): return "world" ray.init(num_gpus=3, namespace="serve") - serve.run(hello.bind()) + handle = serve.run(hello.bind()) - assert ray.get(hello.get_handle().remote()) == "world" + assert ray.get(handle.remote()) == "world" @pytest.mark.parametrize("detached", [True, False]) @@ -252,7 +256,10 @@ def f(*args): client = get_global_client() status_info_1 = client.get_serve_status() assert status_info_1.app_status.status == "RUNNING" - assert status_info_1.deployment_statuses[0].name == "f" + assert ( + status_info_1.deployment_statuses[0].name + == f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}f" + ) assert status_info_1.deployment_statuses[0].status in {"UPDATING", "HEALTHY"} serve.shutdown() @@ -366,14 +373,14 @@ def test_controller_recover_and_delete(shutdown_ray): def f(): pass - f.deploy() + serve.run(f.bind()) actors = list_actors( address=ray_context.address_info["address"], filters=[("state", "=", "ALIVE")] ) - # Try to delete the deployments and kill the controller right after - client.delete_deployments(["f"], blocking=False) + # Try to delete the application and kill the controller right after + serve.delete(SERVE_DEFAULT_APP_NAME, _blocking=False) ray.kill(client._controller, no_restart=False) # All replicas should be removed already or after the controller revives @@ -400,7 +407,12 @@ def f(): # The deployment should be deleted, meaning its state should not be stored # in the DeploymentStateManager. This can be checked by attempting to # retrieve the deployment's status through the controller. - assert client.get_serve_status().get_deployment_status("f") is None + assert ( + client.get_serve_status().get_deployment_status( + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}f" + ) + is None + ) serve.shutdown() ray.shutdown() @@ -1067,9 +1079,11 @@ def test_controller_recover_and_deploy(self, client: ServeControllerClient): deployment_timestamp = client.get_serve_status().app_status.deployment_timestamp # Delete all deployments, but don't update config - client.delete_deployments( - ["Router", "Multiplier", "Adder", "create_order", "DAGDriver"] - ) + deployments = [ + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}{name}" + for name in ["Router", "Multiplier", "Adder", "create_order", "DAGDriver"] + ] + client.delete_deployments(deployments) ray.kill(client._controller, no_restart=False) @@ -1117,11 +1131,12 @@ def test_deploy_config_update( """ def deployment_running(): + name = f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}f" serve_status = client.get_serve_status() return ( - serve_status.get_deployment_status("f") is not None + serve_status.get_deployment_status(name) is not None and serve_status.app_status.status == ApplicationStatus.RUNNING - and serve_status.get_deployment_status("f").status + and serve_status.get_deployment_status(name).status == DeploymentStatus.HEALTHY ) From 93722e9a4dcfe66cb120f6bdbf5d6ad8a761c874 Mon Sep 17 00:00:00 2001 From: Sofian Hnaide <103539032+sofianhnaide@users.noreply.github.com> Date: Tue, 25 Apr 2023 09:43:48 -0700 Subject: [PATCH 087/424] update workspace templates base image (#34684) --- doc/source/templates/configs/anyscale_cluster_env.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/templates/configs/anyscale_cluster_env.yaml b/doc/source/templates/configs/anyscale_cluster_env.yaml index 68ea5af0a616..2b8c1e458e95 100644 --- a/doc/source/templates/configs/anyscale_cluster_env.yaml +++ b/doc/source/templates/configs/anyscale_cluster_env.yaml @@ -3,4 +3,4 @@ # docker_image: anyscale/ray-ml:latest-py39 # ray_version: nightly # or define a build_id for existing images, e.g. for "anyscaleray-ml231-py39-gpu" -build_id: "anyscaleray-mlnightly-py39-gpu" +build_id: "anyscaleray-ml240-py39-gpu" From 8815b9741659a8b78d396266d5e909b226513420 Mon Sep 17 00:00:00 2001 From: Antoni Baum Date: Tue, 25 Apr 2023 10:11:23 -0700 Subject: [PATCH 088/424] [Train] Ensure local HF Datasets are split (#34581) Signed-off-by: Antoni Baum --- .../ray/train/huggingface/_huggingface_utils.py | 15 +++++++++++---- .../ray/train/huggingface/huggingface_trainer.py | 13 +++++++------ 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/python/ray/train/huggingface/_huggingface_utils.py b/python/ray/train/huggingface/_huggingface_utils.py index a90acd49e2d2..65f54e234161 100644 --- a/python/ray/train/huggingface/_huggingface_utils.py +++ b/python/ray/train/huggingface/_huggingface_utils.py @@ -44,7 +44,7 @@ def get_train_dataloader(self): data_loader = super().get_train_dataloader() if isinstance( data_loader.dataset, transformers.trainer.IterableDatasetShard - ): + ) and getattr(data_loader.dataset.dataset, "_do_not_split", False): # Default Trainer.get_train_dataloader will wrap the dataset in # IterableDatasetShard, which will perform additional sharding on top # of the already sharded dataset. By setting those two attributes, @@ -75,8 +75,10 @@ def __iter__(self): yield (0, {k: v for k, v in row.as_pydict().items()}) -def process_dataset_for_hf(dataset: DataIterator) -> "IterableDataset": - """Converts a Datastream into a HF IterableDataset.""" +def process_dataset_for_hf( + dataset: DataIterator, disable_transformers_splitting: bool = False +) -> "IterableDataset": + """Converts a Ray Dataset into a HF IterableDataset.""" hf_iterable = RayDatasetHFIterable(dataset) iterable_dataset = datasets.iterable_dataset.IterableDataset( @@ -90,6 +92,9 @@ def process_dataset_for_hf(dataset: DataIterator) -> "IterableDataset": dataset_length = None iterable_dataset = maybe_add_length(iterable_dataset, dataset_length) + # Trigger logic in `wrap_transformers_trainer` to disable built-in + # HuggingFace splitting, as we have already split the dataset ourselves. + iterable_dataset._do_not_split = disable_transformers_splitting return iterable_dataset @@ -99,7 +104,9 @@ def process_datasets( ) -> Tuple["IterableDataset", "IterableDataset"]: """Convert Ray train and validation to HF IterableDatasets.""" if train_dataset: - train_torch_dataset = process_dataset_for_hf(train_dataset) + train_torch_dataset = process_dataset_for_hf( + train_dataset, disable_transformers_splitting=True + ) else: train_torch_dataset = None diff --git a/python/ray/train/huggingface/huggingface_trainer.py b/python/ray/train/huggingface/huggingface_trainer.py index a27454a55079..99bd1ad96ea4 100644 --- a/python/ray/train/huggingface/huggingface_trainer.py +++ b/python/ray/train/huggingface/huggingface_trainer.py @@ -92,17 +92,18 @@ class HuggingFaceTrainer(TorchTrainer): shards, with each Actor training on a single shard. All the other datasets will not be split. + Please note that if you use a custom ``transformers.Trainer`` subclass, + the ``get_train_dataloader`` method will be wrapped around to disable + sharding by ``transformers.IterableDatasetShard``, as the dataset will + already be sharded on the Ray AIR side. + You can also provide ``datasets.Dataset`` object or other dataset objects allowed by ``transformers.Trainer`` directly in the ``trainer_init_per_worker`` function, without specifying the ``datasets`` dict. It is recommended to initialize those objects inside the function, as otherwise they will be serialized and passed to the function, which may lead to long runtime and memory issues with large - amounts of data. - - Please note that if you use a custom ``transformers.Trainer`` subclass, - the ``get_train_dataloader`` method will be wrapped around to disable - sharding by ``transformers.IterableDatasetShard``, as the dataset will - already be sharded on the Ray AIR side. + amounts of data. In this case, the training dataset will be split + automatically by Transformers. HuggingFace loggers will be automatically disabled, and the ``local_rank`` argument in ``TrainingArguments`` will be automatically set. Please note From 2431a98c07fa674d497219383a6b1ee4046a89c2 Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Tue, 25 Apr 2023 11:10:02 -0700 Subject: [PATCH 089/424] Fix pip dependency conflicts (#34715) Signed-off-by: Cuong Nguyen --- release/ray_release/command_runner/_anyscale_job_wrapper.py | 1 - 1 file changed, 1 deletion(-) diff --git a/release/ray_release/command_runner/_anyscale_job_wrapper.py b/release/ray_release/command_runner/_anyscale_job_wrapper.py index f99bc918914f..986894f755ed 100644 --- a/release/ray_release/command_runner/_anyscale_job_wrapper.py +++ b/release/ray_release/command_runner/_anyscale_job_wrapper.py @@ -71,7 +71,6 @@ def run_storage_cp(source: str, target: str): storage_service = urlparse(target).scheme cp_cmd_args = [] if storage_service == "s3": - install_pip("awscli") cp_cmd_args = [ "aws", "s3", From ac453dc23f1cb9e8835ee80a1c8c575bcca7a87c Mon Sep 17 00:00:00 2001 From: Edward Oakes Date: Tue, 25 Apr 2023 13:32:33 -0500 Subject: [PATCH 090/424] [serve] Clean up `RayServeHandle` and `RayServeSyncHandle` docstrings & typing (#34714) Cleans up wording, adds examples to handle docstrings. --- python/ray/serve/_private/client.py | 2 +- python/ray/serve/api.py | 10 +- python/ray/serve/handle.py | 188 ++++++++++++++++++---------- 3 files changed, 124 insertions(+), 76 deletions(-) diff --git a/python/ray/serve/_private/client.py b/python/ray/serve/_private/client.py index 12d07b1e6840..081671681e35 100644 --- a/python/ray/serve/_private/client.py +++ b/python/ray/serve/_private/client.py @@ -456,7 +456,7 @@ def get_handle( cache_key = (deployment_name, missing_ok, sync) if cache_key in self.handle_cache: cached_handle = self.handle_cache[cache_key] - if cached_handle.is_polling and cached_handle.is_same_loop: + if cached_handle._is_polling and cached_handle._is_same_loop: return cached_handle all_endpoints = ray.get(self._controller.get_all_endpoints.remote()) diff --git a/python/ray/serve/api.py b/python/ray/serve/api.py index 28e0e0175f88..99f4bb9cd81d 100644 --- a/python/ray/serve/api.py +++ b/python/ray/serve/api.py @@ -34,7 +34,7 @@ get_and_validate_ingress_deployment, ) from ray.serve.exceptions import RayServeException -from ray.serve.handle import RayServeHandle +from ray.serve.handle import RayServeSyncHandle from ray.serve._private.http_util import ASGIHTTPSender, make_fastapi_class_based_view from ray.serve._private.logging_utils import LoggingContext from ray.serve._private.utils import ( @@ -463,8 +463,8 @@ def run( port: int = DEFAULT_HTTP_PORT, name: str = SERVE_DEFAULT_APP_NAME, route_prefix: str = DEFAULT.VALUE, -) -> Optional[RayServeHandle]: - """Run a Serve application and return a ServeHandle to the ingress deployment. +) -> Optional[RayServeSyncHandle]: + """Run an application and return a handle to its ingress deployment. The application is returned by `Deployment.bind()` or `serve.build`. @@ -481,10 +481,6 @@ def run( route_prefix: Route prefix for HTTP requests. If not provided, it will use route_prefix of the ingress deployment. If specified neither as an argument nor in the ingress deployment, the route prefix will default to '/'. - - Returns: - RayServeHandle: A regular ray serve handle that can be called by user - to execute the serve DAG. """ client = _private_api.serve_start( detached=True, diff --git a/python/ray/serve/handle.py b/python/ray/serve/handle.py index 711bca9073da..c1b068c864f2 100644 --- a/python/ray/serve/handle.py +++ b/python/ray/serve/handle.py @@ -76,37 +76,40 @@ class HandleOptions: @PublicAPI(stability="beta") class RayServeHandle: - """A handle to a service deployment. - - Invoking this deployment with .remote is equivalent to pinging - an HTTP deployment. - - Example: - >>> import ray - >>> serve_client = ... # doctest: +SKIP - >>> handle = serve_client.get_handle("my_deployment") # doctest: +SKIP - >>> handle # doctest: +SKIP - RayServeSyncHandle(deployment_name="my_deployment") - >>> my_request_content = ... # doctest: +SKIP - >>> handle.remote(my_request_content) # doctest: +SKIP - ObjectRef(...) - >>> ray.get(handle.remote(...)) # doctest: +SKIP - # result - >>> let_it_crash_request = ... # doctest: +SKIP - >>> ray.get(handle.remote(let_it_crash_request)) # doctest: +SKIP - # raises RayTaskError Exception - >>> async_handle = serve_client.get_handle( # doctest: +SKIP - ... "my_deployment", sync=False) - >>> async_handle # doctest: +SKIP - RayServeHandle(deployment="my_deployment") - >>> await async_handle.remote(my_request_content) # doctest: +SKIP - ObjectRef(...) - >>> ray.get(await async_handle.remote(...)) # doctest: +SKIP - # result - >>> ray.get( # doctest: +SKIP - ... await async_handle.remote(let_it_crash_request) - ... ) - # raises RayTaskError Exception + """A handle used to make requests from one deployment to another. + + This is used to compose multiple deployments in a single application by binding + them together when building the application. For example: + + .. code-block:: python + + import ray + from ray import serve + from ray.serve.handle import RayServeHandle, RayServeSyncHandle + + @serve.deployment + class Downstream: + def __init__(self, message: str): + self._message = message + + def __call__(self, name: str) -> str: + return self._message + name + + @serve.deployment + class Ingress: + def __init__(self, handle: RayServeHandle): + self._handle = handle + + async def __call__(self, name: str) -> str: + obj_ref: ray.ObjectRef = await self._handle.remote(name) + return await obj_ref + + app = Ingress.bind(Downstream.bind("Hello ")) + handle: RayServeSyncHandle = serve.run(app) + + # Prints "Hello Mr. Magoo" + print(ray.get(handle.remote("Mr. Magoo"))) + """ def __init__( @@ -173,28 +176,23 @@ def stop_metrics_pusher(self): self._pusher.join() @property - def is_polling(self) -> bool: + def _is_polling(self) -> bool: """Whether this handle is actively polling for replica updates.""" return self.router.long_poll_client.is_running @property - def is_same_loop(self) -> bool: + def _is_same_loop(self) -> bool: """Whether the caller's asyncio loop is the same loop for handle. This is only useful for async handles. """ return get_or_create_event_loop() == self.router._event_loop - def options( + def _options( self, *, method_name: Union[str, DEFAULT] = DEFAULT.VALUE, ): - """Set options for this handle. - - Args: - method_name: The method to invoke. - """ new_options_dict = self.handle_options.__dict__.copy() user_modified_options_dict = { key: value @@ -212,6 +210,24 @@ def options( _internal_pickled_http_request=self._pickled_http_request, ) + def options( + self, + *, + method_name: Union[str, DEFAULT] = DEFAULT.VALUE, + ) -> "RayServeHandle": + """Set options for this handle and return an updated copy of it. + + Example: + + .. code-block:: python + + # The following two lines are equivalent: + obj_ref = await handle.other_method.remote(*args) + obj_ref = await handle.options(method_name="other_method").remote(*args) + + """ + return self._options(method_name=method_name) + def _remote(self, deployment_name, handle_options, args, kwargs) -> Coroutine: _request_context = ray.serve.context._serve_request_context.get() request_metadata = RequestMetadata( @@ -226,20 +242,19 @@ def _remote(self, deployment_name, handle_options, args, kwargs) -> Coroutine: return coro @_wrap_into_async_task - async def remote(self, *args, **kwargs): - """Issue an asynchronous request to the deployment. + async def remote(self, *args, **kwargs) -> asyncio.Task: + """Issue an asynchronous request to the __call__ method of the deployment. + + Returns an `asyncio.Task` whose underlying result is a Ray ObjectRef that + points to the final result of the request. + + The final result can be retrieved by `await`ing the ObjectRef. Example: + + .. code-block:: python + + obj_ref = await handle.remote(*args) + result = await obj_ref - Returns a Ray ObjectRef whose results can be waited for or retrieved - using ray.wait or ray.get (or ``await object_ref``), respectively. - - Returns: - ray.ObjectRef - Args: - request_data(dict, Any): If it's a dictionary, the data will be - available in ``request.json()`` or ``request.form()``. - Otherwise, it will be available in ``request.body()``. - ``**kwargs``: All keyword arguments will be available in - ``request.query_params``. """ return await self._remote( self.deployment_name, self.handle_options, args, kwargs @@ -271,8 +286,32 @@ def __del__(self): @PublicAPI(stability="beta") class RayServeSyncHandle(RayServeHandle): + """A handle used to make requests to the ingress deployment of an application. + + This is returned by `serve.run` and can be used to invoke the application from + Python rather than over HTTP. For example: + + .. code-block:: python + + import ray + from ray import serve + from ray.serve.handle import RayServeSyncHandle + + @serve.deployment + class Ingress: + def __call__(self, name: str) -> str: + return f"Hello {name}" + + app = Ingress.bind() + handle: RayServeSyncHandle = serve.run(app) + + # Prints "Hello Mr. Magoo" + print(ray.get(handle.remote("Mr. Magoo"))) + + """ + @property - def is_same_loop(self) -> bool: + def _is_same_loop(self) -> bool: # NOTE(simon): For sync handle, the caller doesn't have to be in the # same loop as the handle's loop, so we always return True here. return True @@ -285,22 +324,35 @@ def _make_router(self) -> Router: event_loop=_create_or_get_async_loop_in_thread(), ) - def remote(self, *args, **kwargs): - """Issue an asynchronous request to the deployment. + def options( + self, + *, + method_name: Union[str, DEFAULT] = DEFAULT.VALUE, + ) -> "RayServeSyncHandle": + """Set options for this handle and return an updated copy of it. + + Example: + + .. code-block:: python + + # The following two lines are equivalent: + obj_ref = handle.other_method.remote(*args) + obj_ref = handle.options(method_name="other_method").remote(*args) + + """ + return self._options(method_name=method_name) + + def remote(self, *args, **kwargs) -> ray.ObjectRef: + """Issue an asynchronous request to the __call__ method of the deployment. Returns a Ray ObjectRef whose results can be waited for or retrieved - using ray.wait or ray.get (or ``await object_ref``), respectively. - - Returns: - ray.ObjectRef - Args: - request_data(dict, Any): If it's a dictionary, the data will be - available in ``request.json()`` or ``request.form()``. - If it's a Starlette Request object, it will be passed in to the - handler directly, unmodified. Otherwise, the data will be - available in ``request.data``. - ``**kwargs``: All keyword arguments will be available in - ``request.args``. + using ray.wait or ray.get, respectively. + + .. code-block:: python + + obj_ref = handle.remote(*args) + result = ray.get(obj_ref) + """ coro = self._remote(self.deployment_name, self.handle_options, args, kwargs) future: concurrent.futures.Future = asyncio.run_coroutine_threadsafe( @@ -334,7 +386,7 @@ def __init__( # requirement of serve.start; Thus handle is fulfilled at runtime. self.handle: RayServeHandle = None - def options(self, *, method_name: str): + def options(self, *, method_name: str) -> "RayServeDeploymentHandle": return self.__class__( self.deployment_name, HandleOptions(method_name=method_name) ) From a86222a7d01d99c0e0b907e2be165da5ba4cefc2 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Tue, 25 Apr 2023 12:00:41 -0700 Subject: [PATCH 091/424] [CI] Define and export autoscaler test config files. (#34733) To release tests infra. These files are referenced in release tests, and needs the exporting for proper visibility. Also formats the build file with buildifier. --- python/ray/autoscaler/aws/BUILD | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/python/ray/autoscaler/aws/BUILD b/python/ray/autoscaler/aws/BUILD index 4e733cfe26be..d7fff50db624 100644 --- a/python/ray/autoscaler/aws/BUILD +++ b/python/ray/autoscaler/aws/BUILD @@ -1,5 +1,11 @@ filegroup( - name = "example", - data = glob(["example-*.yaml"]), - visibility = ["//python/ray/tests:__pkg__"], + name = "example", + data = glob(["example-*.yaml"]), + visibility = ["//python/ray/tests:__pkg__"], +) + +filegroup( + name = "test_configs", + data = glob(["tests/*.yaml"]), + visibility = ["//release:__pkg__"], ) From c47c43dccab247f8c20a45c1979b3b89edba11aa Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Tue, 25 Apr 2023 12:03:45 -0700 Subject: [PATCH 092/424] [data] [strict mode] Allow returning lists instead of arrays for numpy batches (#34734) Allow map_batches UDFs to return {"foo": [1, 2, 3]} in addition to {"foo": np.array([1, 2, 3])} by implicitly casting lists to arrays. --- .../ray/data/_internal/planner/map_batches.py | 20 +++++++++++++++---- python/ray/data/block.py | 2 +- python/ray/data/tests/test_strict_mode.py | 20 +++++++++++++++++++ 3 files changed, 37 insertions(+), 5 deletions(-) diff --git a/python/ray/data/_internal/planner/map_batches.py b/python/ray/data/_internal/planner/map_batches.py index 1a03d3b6e45e..a0d528bd8519 100644 --- a/python/ray/data/_internal/planner/map_batches.py +++ b/python/ray/data/_internal/planner/map_batches.py @@ -51,17 +51,29 @@ def validate_batch(batch: Block) -> None: ) if isinstance(batch, collections.abc.Mapping): - for key, value in batch.items(): - if not isinstance(value, np.ndarray): + for key, value in list(batch.items()): + if not isinstance(value, (np.ndarray, list)): raise ValueError( f"Error validating {_truncated_repr(batch)}: " "The `fn` you passed to `map_batches` returned a " f"`dict`. `map_batches` expects all `dict` values " - f"to be of type `numpy.ndarray`, but the value " + f"to be `list` or `np.ndarray` type, but the value " f"corresponding to key {key!r} is of type " f"{type(value)}. To fix this issue, convert " - f"the {type(value)} to a `numpy.ndarray`." + f"the {type(value)} to a `np.ndarray`." ) + if isinstance(value, list): + # Try to convert list values into an numpy array via + # np.array(), so users don't need to manually cast. + # NOTE: we don't cast generic iterables, since types like + # `str` are also Iterable. + try: + batch[key] = np.array(value) + except Exception: + raise ValueError( + "Failed to convert column values to numpy array: " + f"({_truncated_repr(value)})." + ) def process_next_batch(batch: DataBatch) -> Iterator[Block]: # Apply UDF. diff --git a/python/ray/data/block.py b/python/ray/data/block.py index e47c4a8f241d..d525e0f07185 100644 --- a/python/ray/data/block.py +++ b/python/ray/data/block.py @@ -439,7 +439,7 @@ def batch_to_block(batch: DataBatch) -> Block: return ArrowBlockAccessor.numpy_to_block( batch, passthrough_arrow_not_implemented_errors=True ) - except pa.ArrowNotImplementedError: + except (pa.ArrowNotImplementedError, pa.ArrowInvalid): import pandas as pd # TODO(ekl) once we support Python objects within Arrow blocks, we diff --git a/python/ray/data/tests/test_strict_mode.py b/python/ray/data/tests/test_strict_mode.py index 19eb1853f5eb..bbbf3c6b1b23 100644 --- a/python/ray/data/tests/test_strict_mode.py +++ b/python/ray/data/tests/test_strict_mode.py @@ -80,6 +80,26 @@ def test_strict_map_output(ray_start_regular_shared, enable_strict_mode): ds.map(lambda x: UserDict({"x": object()})).materialize() +def test_strict_convert_map_output(ray_start_regular_shared, enable_strict_mode): + ds = ray.data.range(1).map_batches(lambda x: {"id": [0, 1, 2, 3]}).materialize() + assert ds.take_batch()["id"].tolist() == [0, 1, 2, 3] + + with pytest.raises(ValueError): + # Strings not converted into array. + ray.data.range(1).map_batches(lambda x: {"id": "string"}).materialize() + + class UserObj: + def __eq__(self, other): + return isinstance(other, UserObj) + + ds = ( + ray.data.range(1) + .map_batches(lambda x: {"id": [0, 1, 2, UserObj()]}) + .materialize() + ) + assert ds.take_batch()["id"].tolist() == [0, 1, 2, UserObj()] + + def test_strict_default_batch_format(ray_start_regular_shared, enable_strict_mode): ds = ray.data.range(1) From 381ccc5347300d70d510927b228c7dd16c481bc5 Mon Sep 17 00:00:00 2001 From: Artur Niederfahrenhorst Date: Tue, 25 Apr 2023 21:40:37 +0200 Subject: [PATCH 093/424] [RLlib] Introduce experimental larger than GPU train batch size feature for torch (#34189) Signed-off-by: Artur Niederfahrenhorst --- rllib/BUILD | 11 ++- rllib/algorithms/algorithm_config.py | 16 +++++ rllib/policy/sample_batch.py | 23 +++++- rllib/policy/torch_policy.py | 36 +++++++++- rllib/policy/torch_policy_v2.py | 42 +++++++++-- rllib/tests/test_gpus.py | 102 +++++++++++++++++++++++++-- 6 files changed, 214 insertions(+), 16 deletions(-) diff --git a/rllib/BUILD b/rllib/BUILD index 100df5ec4262..0d357bf7cf4d 100644 --- a/rllib/BUILD +++ b/rllib/BUILD @@ -2525,7 +2525,16 @@ py_test( name = "tests/test_gpus", tags = ["team:rllib", "tests_dir"], size = "large", - srcs = ["tests/test_gpus.py"] + srcs = ["tests/test_gpus.py"], + args = ["TestGPUs"] +) + +py_test( + name = "tests/test_gpus_large_batch", + tags = ["team:rllib", "tests_dir"], + size = "large", + srcs = ["tests/test_gpus.py"], + args = ["TestGPUsLargeBatch"] ) py_test( diff --git a/rllib/algorithms/algorithm_config.py b/rllib/algorithms/algorithm_config.py index b2b3e8467453..d3f4a99b9a90 100644 --- a/rllib/algorithms/algorithm_config.py +++ b/rllib/algorithms/algorithm_config.py @@ -429,6 +429,7 @@ def __init__(self, algo_class=None): self._disable_action_flattening = False self._disable_execution_plan_api = True self._disable_initialize_loss_from_dummy_batch = False + self._load_only_minibatch_onto_device = False # Has this config object been frozen (cannot alter its attributes anymore). self._is_frozen = False @@ -964,6 +965,14 @@ def validate(self) -> None: f"config.framework({self.framework_str})!" ) + if ( + self.simple_optimizer or self.framework_str != "torch" + ) and self._load_only_minibatch_onto_device: + raise ValueError( + "`load_only_minibatch_onto_device` is only supported for " + f"config.framework({self.framework_str}) and without simple_optimizer." + ) + # Detect if specified env is an Atari env. if self.is_atari is None: self.is_atari = self._detect_atari_env() @@ -1625,6 +1634,10 @@ def training( _enable_learner_api: Whether to enable the LearnerGroup and Learner for training. This API uses ray.train to run the training loop which allows for a more flexible distributed training. + _load_only_minibatch_onto_device: Whether to load only the minibatch onto + the given device. This is useful for larger training batches that + don't fit on the given device while the mini-batches and their + gradients do. This experimental setting is only supported for torch Returns: This updated AlgorithmConfig object. @@ -2460,6 +2473,7 @@ def experimental( _disable_action_flattening: Optional[bool] = NotProvided, _disable_execution_plan_api: Optional[bool] = NotProvided, _disable_initialize_loss_from_dummy_batch: Optional[bool] = NotProvided, + _load_only_minibatch_onto_device: Optional[bool] = NotProvided, ) -> "AlgorithmConfig": """Sets the config's experimental settings. @@ -2503,6 +2517,8 @@ def experimental( self._disable_initialize_loss_from_dummy_batch = ( _disable_initialize_loss_from_dummy_batch ) + if _load_only_minibatch_onto_device is not NotProvided: + self._load_only_minibatch_onto_device = _load_only_minibatch_onto_device return self diff --git a/rllib/policy/sample_batch.py b/rllib/policy/sample_batch.py index 1eb75a679988..cd1c7d14ad64 100644 --- a/rllib/policy/sample_batch.py +++ b/rllib/policy/sample_batch.py @@ -790,15 +790,32 @@ def _zero_pad_in_place(path, value): return self @ExperimentalAPI - def to_device(self, device, framework="torch"): + def to_device(self, device, framework="torch", copy=False): + """Moves tensors inside this batch to a given device. + + Depending on the copy flag, this will either return a new batch + or modify this batch in-place. + + Args: + device: The device to move the tensors to. + framework: The framework to use for the device (e.g. "torch"). + copy: If False, modify batch in place. If True, return a new batch. + + Returns: + A batch with all tensors moved to the given device. + """ """TODO: transfer batch to given device as framework tensor.""" if framework == "torch": assert torch is not None + if copy: + target = SampleBatch() + else: + target = self for k, v in self.items(): - self[k] = convert_to_torch_tensor(v, device) + target[k] = convert_to_torch_tensor(v, device) else: raise NotImplementedError - return self + return target @PublicAPI def size_bytes(self) -> int: diff --git a/rllib/policy/torch_policy.py b/rllib/policy/torch_policy.py index f78b13858390..79920d4d18d8 100644 --- a/rllib/policy/torch_policy.py +++ b/rllib/policy/torch_policy.py @@ -536,7 +536,14 @@ def load_batch_into_buffer( ) # 3) Load splits into the given buffer (consisting of n GPUs). - slices = [slice.to_device(self.devices[i]) for i, slice in enumerate(slices)] + if not self.config.get("_load_only_minibatch_onto_device", False): + # We usually want to load the full batch onto the device here, which is + # much faster than loading the batch slice-by-slice. + # However, if the batch is too large, it may be favorable to load the + # batch slice-by-slice. + slices = [ + slice.to_device(self.devices[i]) for i, slice in enumerate(slices) + ] self._loaded_batches[buffer_index] = slices # Return loaded samples per-device. @@ -604,8 +611,20 @@ def learn_on_loaded_batch(self, offset: int = 0, buffer_index: int = 0): ) batch_fetches[f"tower_{i}"] = {"custom_metrics": custom_metrics} + # If `_load_only_minibatch_onto_device` is True, then the main batch always + # remains on the CPU (it's probably too big to be fit on the GPU). Thus, in + # this case, for each individual update step, we need to copy the freshly + # determined sub-slice to the GPU. These sub-slices need to be small enough + # then to fit on the GPU. + if self.config.get("_load_only_minibatch_onto_device", False): + copy_batch_to_device = True + else: + copy_batch_to_device = False + # Do the (maybe parallelized) gradient calculation step. - tower_outputs = self._multi_gpu_parallel_grad_calc(device_batches) + tower_outputs = self._multi_gpu_parallel_grad_calc( + device_batches, copy_batch_to_device=copy_batch_to_device + ) # Mean-reduce gradients over GPU-towers (do this on CPU: self.device). all_grads = [] @@ -1061,7 +1080,9 @@ def _lazy_tensor_dict(self, postprocessed_batch: SampleBatch, device=None): return postprocessed_batch def _multi_gpu_parallel_grad_calc( - self, sample_batches: List[SampleBatch] + self, + sample_batches: List[SampleBatch], + copy_batch_to_device: bool = False, ) -> List[Tuple[List[TensorType], GradInfoDict]]: """Performs a parallelized loss and gradient calculation over the batch. @@ -1073,6 +1094,10 @@ def _multi_gpu_parallel_grad_calc( Args: sample_batches: A list of SampleBatch shards to calculate loss and gradients for. + copy_batch_to_device: Whether to create a copy of the batch that is then + moved to GPU. This is useful if we don't want to move the original + batch to the device. In case of a large batch, we can thereby only move + mini-batches to GPU one by one and free them after each step. Returns: A list (one item per device) of 2-tuples, each with 1) gradient @@ -1083,6 +1108,11 @@ def _multi_gpu_parallel_grad_calc( results = {} grad_enabled = torch.is_grad_enabled() + if copy_batch_to_device: + sample_batches = [ + batch.to_device(i, copy=True) for i, batch in enumerate(sample_batches) + ] + def _worker(shard_idx, model, sample_batch, device): torch.set_grad_enabled(grad_enabled) try: diff --git a/rllib/policy/torch_policy_v2.py b/rllib/policy/torch_policy_v2.py index 79546b0623ba..86354800cb23 100644 --- a/rllib/policy/torch_policy_v2.py +++ b/rllib/policy/torch_policy_v2.py @@ -734,7 +734,14 @@ def load_batch_into_buffer( ) # 3) Load splits into the given buffer (consisting of n GPUs). - slices = [slice.to_device(self.devices[i]) for i, slice in enumerate(slices)] + if not self.config.get("_load_only_minibatch_onto_device", False): + # We usually want to load the full batch onto the device here, which is + # much faster than loading the batch slice-by-slice. + # However, if the batch is too large, it may be favorable to load the + # batch slice-by-slice. + slices = [ + slice.to_device(self.devices[i]) for i, slice in enumerate(slices) + ] self._loaded_batches[buffer_index] = slices # Return loaded samples per-device. @@ -749,7 +756,11 @@ def get_num_samples_loaded_into_buffer(self, buffer_index: int = 0) -> int: @override(Policy) @DeveloperAPI - def learn_on_loaded_batch(self, offset: int = 0, buffer_index: int = 0): + def learn_on_loaded_batch( + self, + offset: int = 0, + buffer_index: int = 0, + ): if not self._loaded_batches[buffer_index]: raise ValueError( "Must call Policy.load_batch_into_buffer() before " @@ -803,8 +814,20 @@ def learn_on_loaded_batch(self, offset: int = 0, buffer_index: int = 0): ) batch_fetches[f"tower_{i}"] = {"custom_metrics": custom_metrics} + # If `_load_only_minibatch_onto_device` is True, then the main batch always + # remains on the CPU (it's probably too big to be fit on the GPU). Thus, in + # this case, for each individual update step, we need to copy the freshly + # determined sub-slice to the GPU. These sub-slices need to be small enough + # then to fit on the GPU. + if self.config.get("_load_only_minibatch_onto_device", False): + copy_batch_to_device = True + else: + copy_batch_to_device = False + # Do the (maybe parallelized) gradient calculation step. - tower_outputs = self._multi_gpu_parallel_grad_calc(device_batches) + tower_outputs = self._multi_gpu_parallel_grad_calc( + device_batches, copy_batch_to_device=copy_batch_to_device + ) # Mean-reduce gradients over GPU-towers (do this on CPU: self.device). all_grads = [] @@ -1188,7 +1211,9 @@ def _lazy_tensor_dict(self, postprocessed_batch: SampleBatch, device=None): return postprocessed_batch def _multi_gpu_parallel_grad_calc( - self, sample_batches: List[SampleBatch] + self, + sample_batches: List[SampleBatch], + copy_batch_to_device: bool = False, ) -> List[Tuple[List[TensorType], GradInfoDict]]: """Performs a parallelized loss and gradient calculation over the batch. @@ -1200,6 +1225,10 @@ def _multi_gpu_parallel_grad_calc( Args: sample_batches: A list of SampleBatch shards to calculate loss and gradients for. + copy_batch_to_device: Whether to create a copy of the batch that is then + moved to GPU. This is useful if we don't want to move the original + batch to the device. In case of a large batch, we can thereby only move + mini-batches to GPU one by one and free them after each step. Returns: A list (one item per device) of 2-tuples, each with 1) gradient @@ -1210,6 +1239,11 @@ def _multi_gpu_parallel_grad_calc( results = {} grad_enabled = torch.is_grad_enabled() + if copy_batch_to_device: + sample_batches = [ + batch.to_device(i, copy=True) for i, batch in enumerate(sample_batches) + ] + def _worker(shard_idx, model, sample_batch, device): torch.set_grad_enabled(grad_enabled) try: diff --git a/rllib/tests/test_gpus.py b/rllib/tests/test_gpus.py index 3d01901a5db6..e61d4c04a657 100644 --- a/rllib/tests/test_gpus.py +++ b/rllib/tests/test_gpus.py @@ -1,13 +1,19 @@ +import copy import unittest +import numpy as np +import torch + import ray from ray import air +from ray import tune from ray.rllib.algorithms.a2c.a2c import A2CConfig -from ray.rllib.utils.framework import try_import_torch +from ray.rllib.algorithms.ppo import PPOConfig +from ray.rllib.algorithms.qmix import QMixConfig +from ray.rllib.policy.torch_policy import TorchPolicy +from ray.rllib.policy.torch_policy_v2 import TorchPolicyV2 +from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.test_utils import framework_iterator -from ray import tune - -torch, _ = try_import_torch() class TestGPUs(unittest.TestCase): @@ -111,8 +117,94 @@ def test_gpus_in_local_mode(self): ray.shutdown() +class TestGPUsLargeBatch(unittest.TestCase): + def test_larger_train_batch_size_multi_gpu_train_one_step(self): + # Tests that we can use a `train_batch_size` larger than GPU memory with our + # experimental setting `_load_only_minibatch_onto_device` with + # multi_gpu_train_one_step. + + # These values make it so that one large minibatch and the optimizer + # variables can fit onto the device, but the whole sample_batch is already too + # large for the GPU itself. + sgd_minibatch_size = int(1e4) + train_batch_size = int(sgd_minibatch_size * 1e5) + + # Fake CartPole episode of n time steps. + CARTPOLE_FAKE_BATCH = SampleBatch( + { + SampleBatch.OBS: np.zeros((train_batch_size, 4), dtype=np.float32), + SampleBatch.ACTIONS: np.zeros((train_batch_size,), dtype=np.float32), + SampleBatch.PREV_ACTIONS: np.zeros( + (train_batch_size,), dtype=np.float32 + ), + SampleBatch.REWARDS: np.zeros((train_batch_size,), dtype=np.float32), + SampleBatch.PREV_REWARDS: np.zeros( + (train_batch_size,), dtype=np.float32 + ), + "value_targets": np.zeros((train_batch_size,), dtype=np.float32), + SampleBatch.TERMINATEDS: np.array([False] * train_batch_size), + SampleBatch.TRUNCATEDS: np.array([False] * train_batch_size), + "advantages": np.zeros((train_batch_size,), dtype=np.float32), + SampleBatch.VF_PREDS: np.zeros((train_batch_size,), dtype=np.float32), + SampleBatch.ACTION_DIST_INPUTS: np.zeros( + (train_batch_size, 2), dtype=np.float32 + ), + SampleBatch.ACTION_LOGP: np.zeros( + (train_batch_size,), dtype=np.float32 + ), + SampleBatch.EPS_ID: np.zeros((train_batch_size,), dtype=np.int64), + SampleBatch.AGENT_INDEX: np.zeros((train_batch_size,), dtype=np.int64), + } + ) + + # Test if we can even fail this test due too a GPU OOM + try: + batch_copy = copy.deepcopy(CARTPOLE_FAKE_BATCH) + batch_copy.to_device(0) + raise ValueError( + "We should not be able to move this batch to the device. " + "If this error occurs, this means that this test cannot fail " + "inside multi_gpu_train_one_step." + ) + except torch.cuda.OutOfMemoryError: + pass + + for config_class in (PPOConfig, QMixConfig): + config = ( + config_class() + .environment(env="CartPole-v1") + .framework("torch") + .resources(num_gpus=1) + .rollouts(num_rollout_workers=0) + .training( + train_batch_size=train_batch_size, + num_sgd_iter=1, + sgd_minibatch_size=self.sgd_minibatch_size, + # This setting makes it so that we don't load a batch of + # size `train_batch_size` onto the device, but only + # minibatches. + _load_only_minibatch_onto_device=True, + ) + ) + + algorithm = config.build() + policy = algorithm.get_policy() + + # Sanity check if we are covering both, TorchPolicy and TorchPolicyV2 + if config_class is QMixConfig: + assert isinstance(policy, TorchPolicy) + elif config_class is PPOConfig: + assert isinstance(policy, TorchPolicyV2) + + policy.load_batch_into_buffer(CARTPOLE_FAKE_BATCH) + policy.learn_on_loaded_batch() + + if __name__ == "__main__": import pytest import sys - sys.exit(pytest.main(["-v", __file__])) + # One can specify the specific TestCase class to run. + # None for all unittest.TestCase classes in this file. + class_ = sys.argv[1] if len(sys.argv) > 1 else None + sys.exit(pytest.main(["-v", __file__ + ("" if class_ is None else "::" + class_)])) From fb528fb0a70a0949611072aadbcb58b761e5b0f1 Mon Sep 17 00:00:00 2001 From: Edward Oakes Date: Tue, 25 Apr 2023 14:46:31 -0500 Subject: [PATCH 094/424] [serve] Wait until replicas have finished recovering (with timeout) to broadcast `LongPoll` updates (#34675) When the controller recovers, all replicas are put into the `RECOVERING` state. These are not included in long poll updates for running replicas, which means we broadcast an update that effectively clears out all available replicas in all handles. This PR addresses this problem by avoiding broadcasting any updates until all replicas are fully recovered (or a 10s timeout is reached). We also wait to run the `http_state` update loop because if a new proxy is started, it won't be able to serve any traffic due to having no replicas available. --- python/ray/serve/_private/constants.py | 4 ++ python/ray/serve/_private/deployment_state.py | 45 ++++++++++++------- python/ray/serve/controller.py | 31 ++++++++++++- .../ray/serve/tests/test_deployment_state.py | 26 ++++++----- 4 files changed, 77 insertions(+), 29 deletions(-) diff --git a/python/ray/serve/_private/constants.py b/python/ray/serve/_private/constants.py index b94683f381b8..cf8e9a4fbf72 100644 --- a/python/ray/serve/_private/constants.py +++ b/python/ray/serve/_private/constants.py @@ -120,6 +120,10 @@ # Env var to control legacy sync deployment handle behavior in DAG. SYNC_HANDLE_IN_DAG_FEATURE_FLAG_ENV_KEY = "SERVE_DEPLOYMENT_HANDLE_IS_SYNC" +# Maximum duration to wait until broadcasting a long poll update if there are +# still replicas in the RECOVERING state. +RECOVERING_LONG_POLL_BROADCAST_TIMEOUT_S = 10.0 + class ServeHandleType(str, Enum): SYNC = "SYNC" diff --git a/python/ray/serve/_private/deployment_state.py b/python/ray/serve/_private/deployment_state.py index 0a39eabf0363..ea0180f5efc0 100644 --- a/python/ray/serve/_private/deployment_state.py +++ b/python/ray/serve/_private/deployment_state.py @@ -1434,7 +1434,7 @@ def _scale_deployment_replicas(self) -> bool: return replicas_stopped - def _check_curr_status(self) -> bool: + def _check_curr_status(self) -> Tuple[bool, bool]: """Check the current deployment status. Checks the difference between the target vs. running replica count for @@ -1443,8 +1443,7 @@ def _check_curr_status(self) -> bool: This will update the current deployment status depending on the state of the replicas. - Returns: - was_deleted + Returns (deleted, any_replicas_recovering). """ # TODO(edoakes): we could make this more efficient in steady-state by # having a "healthy" flag that gets flipped if an update or replica @@ -1453,6 +1452,9 @@ def _check_curr_status(self) -> bool: target_version = self._target_state.version target_replica_count = self._target_state.num_replicas + any_replicas_recovering = ( + self._replicas.count(states=[ReplicaState.RECOVERING]) > 0 + ) all_running_replica_cnt = self._replicas.count(states=[ReplicaState.RUNNING]) running_at_target_version_replica_cnt = self._replicas.count( states=[ReplicaState.RUNNING], version=target_version @@ -1488,7 +1490,7 @@ def _check_curr_status(self) -> bool: f"details. Retrying after {self._backoff_time_s} seconds." ), ) - return False + return False, any_replicas_recovering # If we have pending ops, the current goal is *not* ready. if ( @@ -1504,16 +1506,16 @@ def _check_curr_status(self) -> bool: ): # Check for deleting. if self._target_state.deleting and all_running_replica_cnt == 0: - return True + return True, any_replicas_recovering # Check for a non-zero number of deployments. if target_replica_count == running_at_target_version_replica_cnt: self._curr_status_info = DeploymentStatusInfo( self._name, DeploymentStatus.HEALTHY ) - return False + return False, any_replicas_recovering - return False + return False, any_replicas_recovering def _check_startup_replicas( self, original_state: ReplicaState, stop_on_slow=False @@ -1707,7 +1709,7 @@ def _check_and_update_replicas(self) -> bool: return running_replicas_changed - def update(self) -> bool: + def update(self) -> Tuple[bool, bool]: """Attempts to reconcile this deployment to match its goal state. This is an asynchronous call; it's expected to be called repeatedly. @@ -1715,8 +1717,9 @@ def update(self) -> bool: Also updates the internal DeploymentStatusInfo based on the current state of the system. - Returns true if this deployment was successfully deleted. + Returns (deleted, any_replicas_recovering). """ + deleted, any_replicas_recovering = False, False try: # Add or remove DeploymentReplica instances in self._replicas. # This should be the only place we adjust total number of replicas @@ -1730,16 +1733,15 @@ def update(self) -> bool: if running_replicas_changed: self._notify_running_replicas_changed() - deleted = self._check_curr_status() + deleted, any_replicas_recovering = self._check_curr_status() except Exception: self._curr_status_info = DeploymentStatusInfo( name=self._name, status=DeploymentStatus.UNHEALTHY, message="Failed to update deployment:" f"\n{traceback.format_exc()}", ) - deleted = False - return deleted + return deleted, any_replicas_recovering def _stop_one_running_replica_for_testing(self): running_replicas = self._replicas.pop(states=[ReplicaState.RUNNING]) @@ -1837,7 +1839,8 @@ def _calculate_max_replicas_to_stop(self) -> int: pending_replicas = nums_nodes - new_running_replicas - old_running_replicas return max(rollout_size - pending_replicas, 0) - def update(self) -> bool: + def update(self) -> Tuple[bool, bool]: + """Returns (deleted, any_replicas_recovering).""" try: if self._target_state.deleting: self._stop_all_replicas() @@ -1864,7 +1867,7 @@ def update(self) -> bool: status=DeploymentStatus.UNHEALTHY, message="Failed to update deployment:" f"\n{traceback.format_exc()}", ) - return False + return False, False def should_autoscale(self) -> bool: return False @@ -2194,9 +2197,13 @@ def get_handle_queueing_metrics( current_handle_queued_queries = 0 return current_handle_queued_queries - def update(self): - """Updates the state of all deployments to match their goal state.""" + def update(self) -> bool: + """Updates the state of all deployments to match their goal state. + + Returns True if any of the deployments have replicas in the RECOVERING state. + """ deleted_tags = [] + any_recovering = False for deployment_name, deployment_state in self._deployment_states.items(): if deployment_state.should_autoscale(): current_num_ongoing_requests = self.get_replica_ongoing_request_metrics( @@ -2210,7 +2217,7 @@ def update(self): deployment_state.autoscale( current_num_ongoing_requests, current_handle_queued_queries ) - deleted = deployment_state.update() + deleted, recovering = deployment_state.update() if deleted: deleted_tags.append(deployment_name) deployment_info = deployment_state.target_info @@ -2219,12 +2226,16 @@ def update(self): self._deleted_deployment_metadata.popitem(last=False) self._deleted_deployment_metadata[deployment_name] = deployment_info + any_recovering |= recovering + for tag in deleted_tags: del self._deployment_states[tag] if len(deleted_tags): self._record_deployment_usage() + return any_recovering + def _record_deployment_usage(self): record_extra_usage_tag( TagKey.SERVE_NUM_DEPLOYMENTS, str(len(self._deployment_states)) diff --git a/python/ray/serve/controller.py b/python/ray/serve/controller.py index 9f0499ce41b8..19914e59a951 100644 --- a/python/ray/serve/controller.py +++ b/python/ray/serve/controller.py @@ -32,6 +32,7 @@ SERVE_ROOT_URL_ENV_KEY, SERVE_NAMESPACE, RAY_INTERNAL_SERVE_CONTROLLER_PIN_ON_NODE, + RECOVERING_LONG_POLL_BROADCAST_TIMEOUT_S, SERVE_DEFAULT_APP_NAME, DEPLOYMENT_NAME_PREFIX_SEPARATOR, MULTI_APP_MIGRATION_MESSAGE, @@ -120,6 +121,7 @@ async def __init__( self.deployment_stats = defaultdict(lambda: defaultdict(dict)) self.long_poll_host = LongPollHost() + self.done_recovering_event = asyncio.Event() if _disable_http_proxy: self.http_state = None @@ -197,6 +199,9 @@ async def listen_for_change(self, keys_to_snapshot_ids: Dict[str, int]): determine whether or not the host should immediately return the data or wait for the value to be changed. """ + if not self.done_recovering_event.is_set(): + await self.done_recovering_event.wait() + return await (self.long_poll_host.listen_for_change(keys_to_snapshot_ids)) async def listen_for_change_java(self, keys_to_snapshot_ids_bytes: bytes): @@ -206,6 +211,9 @@ async def listen_for_change_java(self, keys_to_snapshot_ids_bytes: bytes): keys_to_snapshot_ids_bytes (Dict[str, int]): the protobuf bytes of keys_to_snapshot_ids (Dict[str, int]). """ + if not self.done_recovering_event.is_set(): + await self.done_recovering_event.wait() + return await ( self.long_poll_host.listen_for_change_java(keys_to_snapshot_ids_bytes) ) @@ -250,16 +258,35 @@ async def run_control_loop(self) -> None: # NOTE(edoakes): we catch all exceptions here and simply log them, # because an unhandled exception would cause the main control loop to # halt, which should *never* happen. + recovering_timeout = RECOVERING_LONG_POLL_BROADCAST_TIMEOUT_S + start_time = time.time() while True: - if self.http_state: + if ( + not self.done_recovering_event.is_set() + and time.time() - start_time > recovering_timeout + ): + logger.warning( + f"Replicas still recovering after {recovering_timeout}s, " + "setting done recovering event to broadcast long poll updates." + ) + self.done_recovering_event.set() + + # Don't update http_state until after the done recovering event is set, + # otherwise we may start a new HTTP proxy but not broadcast it any + # info about available deployments & their replicas. + if self.http_state and self.done_recovering_event.is_set(): try: self.http_state.update() except Exception: logger.exception("Exception updating HTTP state.") + try: - self.deployment_state_manager.update() + any_recovering = self.deployment_state_manager.update() + if not self.done_recovering_event.is_set() and not any_recovering: + self.done_recovering_event.set() except Exception: logger.exception("Exception updating deployment state.") + try: self.application_state_manager.update() except Exception: diff --git a/python/ray/serve/tests/test_deployment_state.py b/python/ray/serve/tests/test_deployment_state.py index e4976a4c9abf..d7ea620c4f6f 100644 --- a/python/ray/serve/tests/test_deployment_state.py +++ b/python/ray/serve/tests/test_deployment_state.py @@ -508,7 +508,7 @@ def test_create_delete_single_replica(mock_get_all_node_ids, mock_deployment_sta # Once it's done stopping, replica should be removed. replica = deployment_state._replicas.get()[0] replica._actor.set_done_stopping() - deleted = deployment_state.update() + deleted, _ = deployment_state.update() assert deleted check_counts(deployment_state, total=0) @@ -557,7 +557,7 @@ def test_force_kill(mock_get_all_node_ids, mock_deployment_state): # Once the replica is done stopping, it should be removed. replica = deployment_state._replicas.get()[0] replica._actor.set_done_stopping() - deleted = deployment_state.update() + deleted, _ = deployment_state.update() assert deleted check_counts(deployment_state, total=0) @@ -689,7 +689,7 @@ def test_redeploy_no_version(mock_get_all_node_ids, mock_deployment_state): check_counts(deployment_state, total=1, by_state=[(ReplicaState.STARTING, 1)]) assert deployment_state.curr_status_info.status == DeploymentStatus.UPDATING - deleted = deployment_state.update() + deleted, _ = deployment_state.update() assert not deleted check_counts(deployment_state, total=1, by_state=[(ReplicaState.RUNNING, 1)]) assert deployment_state.curr_status_info.status == DeploymentStatus.HEALTHY @@ -793,7 +793,7 @@ def test_redeploy_new_version(mock_get_all_node_ids, mock_deployment_state): by_state=[(ReplicaState.STARTING, 1)], ) - deleted = deployment_state.update() + deleted, _ = deployment_state.update() assert not deleted check_counts( deployment_state, @@ -2148,7 +2148,8 @@ def test_resume_deployment_state_from_replica_tags( deployment_state_manager._deployment_states[tag] = deployment_state # Single replica should be created. - deployment_state_manager.update() + any_recovering = deployment_state_manager.update() + assert not any_recovering check_counts( deployment_state, total=1, @@ -2158,7 +2159,8 @@ def test_resume_deployment_state_from_replica_tags( deployment_state._replicas.get()[0]._actor.set_ready() # Now the replica should be marked running. - deployment_state_manager.update() + any_recovering = deployment_state_manager.update() + assert not any_recovering check_counts( deployment_state, total=1, @@ -2170,8 +2172,8 @@ def test_resume_deployment_state_from_replica_tags( # Step 2: Delete _replicas from deployment_state deployment_state._replicas = ReplicaStateContainer() - # Step 3: Create new deployment_state by resuming from passed in replicas + # Step 3: Create new deployment_state by resuming from passed in replicas deployment_state_manager._recover_from_checkpoint( [ReplicaName.prefix + mocked_replica.replica_tag] ) @@ -2183,11 +2185,12 @@ def test_resume_deployment_state_from_replica_tags( check_counts( deployment_state, total=1, version=None, by_state=[(ReplicaState.RECOVERING, 1)] ) - deployment_state._replicas.get()[0]._actor.set_ready() - deployment_state._replicas.get()[0]._actor.set_starting_version(b_version_1) # Now the replica should be marked running. - deployment_state_manager.update() + deployment_state._replicas.get()[0]._actor.set_ready() + deployment_state._replicas.get()[0]._actor.set_starting_version(b_version_1) + any_recovering = deployment_state_manager.update() + assert not any_recovering check_counts( deployment_state, total=1, @@ -2197,6 +2200,9 @@ def test_resume_deployment_state_from_replica_tags( # Ensure same replica name is used assert deployment_state._replicas.get()[0].replica_tag == mocked_replica.replica_tag + any_recovering = deployment_state_manager.update() + assert not any_recovering + def test_stopping_replicas_ranking(): @dataclass From f54fac366bf5cc74430da6bb05199d6f9d445d9d Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Tue, 25 Apr 2023 13:35:09 -0700 Subject: [PATCH 095/424] [core] Fix flickering progress bar due to overriden prints to StringIO (#34735) Some modules are using print() to string buffers, which we unnecessarily intercept and hide/unhide our tqdm bars for. This causes flickering in the progress bar. --- python/ray/experimental/tqdm_ray.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/python/ray/experimental/tqdm_ray.py b/python/ray/experimental/tqdm_ray.py index 59ceb4967c34..360e2a7e5c02 100644 --- a/python/ray/experimental/tqdm_ray.py +++ b/python/ray/experimental/tqdm_ray.py @@ -3,6 +3,7 @@ import json import logging import os +import sys import threading import uuid from typing import Any, Dict, Iterable, Optional @@ -37,6 +38,11 @@ def safe_print(*args, **kwargs): By default, the builtin print will be patched to this function when tqdm_ray is used. To disable this, set RAY_TQDM_PATCH_PRINT=0. """ + + # Ignore prints to StringIO objects, etc. + if kwargs.get("file") not in [sys.stdout, sys.stderr, None]: + return _print(*args, **kwargs) + try: instance().hide_bars() _print(*args, **kwargs) From 1a1a43071af6beb4d9fb0030d7e898dc50944604 Mon Sep 17 00:00:00 2001 From: Rohan Potdar <66227218+Rohan138@users.noreply.github.com> Date: Tue, 25 Apr 2023 16:36:46 -0400 Subject: [PATCH 096/424] [RLlib] fix an example (#34325) Signed-off-by: Rohan Potdar --- rllib/examples/cartpole_lstm.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/rllib/examples/cartpole_lstm.py b/rllib/examples/cartpole_lstm.py index 1f5cb0128fd4..ce01295380f9 100644 --- a/rllib/examples/cartpole_lstm.py +++ b/rllib/examples/cartpole_lstm.py @@ -105,7 +105,8 @@ # >> # >> while True: # >> a, state_out, _ = algo.compute_single_action( - # .. obs, state, prev_a, prev_r) + # .. obs, state, prev_action=prev_a, prev_reward=prev_r + # .. ) # >> obs, reward, done, truncated, _ = env.step(a) # >> if done: # >> obs, info = env.reset() From ba9ad19ef449556ac941e0dd3e9763d3db402bca Mon Sep 17 00:00:00 2001 From: Hao Chen Date: Tue, 25 Apr 2023 16:35:34 -0700 Subject: [PATCH 097/424] [Data] Implement limit physical operator (#34705) ## Why are these changes needed? - Implemented the Limit physical operator for streaming execution. - Added the `LimitStage` for legacy compatibility. Note, currently when the limit operator reaches the limit, the upstream operators still won't stop producing data. This will be optimized in a follow-up PR. ## Related issue number #34234 --- .../data/_internal/execution/legacy_compat.py | 8 +- .../execution/operators/limit_operator.py | 98 +++++++++++++++++++ .../logical/operators/limit_operator.py | 16 +++ python/ray/data/_internal/stage_impl.py | 43 +++++++- python/ray/data/datastream.py | 44 ++------- 5 files changed, 172 insertions(+), 37 deletions(-) create mode 100644 python/ray/data/_internal/execution/operators/limit_operator.py create mode 100644 python/ray/data/_internal/logical/operators/limit_operator.py diff --git a/python/ray/data/_internal/execution/legacy_compat.py b/python/ray/data/_internal/execution/legacy_compat.py index a343c5384a3b..d0fc71b2fff4 100644 --- a/python/ray/data/_internal/execution/legacy_compat.py +++ b/python/ray/data/_internal/execution/legacy_compat.py @@ -14,7 +14,10 @@ from ray.data.block import Block, BlockMetadata, List from ray.data.datasource import ReadTask from ray.data._internal.stats import StatsDict, DatastreamStats -from ray.data._internal.stage_impl import RandomizeBlocksStage +from ray.data._internal.stage_impl import ( + RandomizeBlocksStage, + LimitStage, +) from ray.data._internal.block_list import BlockList from ray.data._internal.lazy_block_list import LazyBlockList from ray.data._internal.compute import ( @@ -26,6 +29,7 @@ from ray.data._internal.memory_tracing import trace_allocation from ray.data._internal.plan import ExecutionPlan, OneToOneStage, AllToAllStage, Stage from ray.data._internal.execution.operators.map_operator import MapOperator +from ray.data._internal.execution.operators.limit_operator import LimitOperator from ray.data._internal.execution.operators.all_to_all_operator import AllToAllOperator from ray.data._internal.execution.operators.input_data_buffer import InputDataBuffer from ray.data._internal.execution.interfaces import ( @@ -300,6 +304,8 @@ def do_map(blocks: Iterator[Block], ctx: TaskContext) -> Iterator[Block]: min_rows_per_bundle=stage.target_block_size, ray_remote_args=stage.ray_remote_args, ) + elif isinstance(stage, LimitStage): + return LimitOperator(stage.limit, input_op) elif isinstance(stage, AllToAllStage): fn = stage.fn block_udf = stage.block_udf diff --git a/python/ray/data/_internal/execution/operators/limit_operator.py b/python/ray/data/_internal/execution/operators/limit_operator.py new file mode 100644 index 000000000000..6a8da6e76993 --- /dev/null +++ b/python/ray/data/_internal/execution/operators/limit_operator.py @@ -0,0 +1,98 @@ +import ray +import copy +from collections import deque +from ray.data.block import ( + Block, + BlockAccessor, + BlockMetadata, +) +from ray.data._internal.stats import StatsDict +from ray.data._internal.execution.interfaces import ( + PhysicalOperator, + RefBundle, +) +from ray.data._internal.remote_fn import cached_remote_fn +from ray.types import ObjectRef +from typing import ( + Deque, + List, + Optional, + Tuple, +) + + +class LimitOperator(PhysicalOperator): + """Physical operator for limit.""" + + def __init__( + self, + limit: int, + input_op: PhysicalOperator, + ): + self._limit = limit + self._consumed_rows = 0 + self._buffer: Deque[RefBundle] = deque() + self._name = f"Limit[limit={limit}]" + self._output_metadata: List[BlockMetadata] = [] + self._num_outputs_total = input_op.num_outputs_total() + if self._num_outputs_total is not None: + self._num_outputs_total = min(self._num_outputs_total, limit) + super().__init__(self._name, [input_op]) + + def _limit_reached(self) -> bool: + return self._consumed_rows >= self._limit + + def add_input(self, refs: RefBundle, input_index: int) -> None: + assert not self.completed() + assert input_index == 0, input_index + if self._limit_reached(): + return + out_blocks: List[ObjectRef[Block]] = [] + out_metadata: List[BlockMetadata] = [] + for block, metadata in refs.blocks: + num_rows = metadata.num_rows + assert num_rows is not None + if self._consumed_rows + num_rows <= self._limit: + self._consumed_rows += num_rows + out_blocks.append(block) + out_metadata.append(metadata) + self._output_metadata.append(metadata) + else: + # Slice the last block. + def slice_fn(block, metadata, num_rows) -> Tuple[Block, BlockMetadata]: + block = BlockAccessor.for_block(block).slice(0, num_rows, copy=True) + metadata = copy.deepcopy(metadata) + metadata.num_rows = num_rows + metadata.size_bytes = BlockAccessor.for_block(block).size_bytes() + return block, metadata + + block, metadata_ref = cached_remote_fn(slice_fn, num_returns=2).remote( + block, + metadata, + self._limit - self._consumed_rows, + ) + out_blocks.append(block) + metadata = ray.get(metadata_ref) + out_metadata.append(metadata) + self._output_metadata.append(metadata) + break + out_refs = RefBundle( + list(zip(out_blocks, out_metadata)), + owns_blocks=refs.owns_blocks, + ) + self._buffer.append(out_refs) + + def has_next(self) -> bool: + return len(self._buffer) > 0 + + def get_next(self) -> RefBundle: + return self._buffer.popleft() + + def get_stats(self) -> StatsDict: + return {self._name: self._output_metadata} + + def num_outputs_total(self) -> Optional[int]: + if self._limit_reached(): + return self._limit + else: + return self._num_outputs_total diff --git a/python/ray/data/_internal/logical/operators/limit_operator.py b/python/ray/data/_internal/logical/operators/limit_operator.py new file mode 100644 index 000000000000..c7d9690ad8b7 --- /dev/null +++ b/python/ray/data/_internal/logical/operators/limit_operator.py @@ -0,0 +1,16 @@ +from ray.data._internal.logical.interfaces import LogicalOperator + + +class Limit(LogicalOperator): + """Logical operator for limit.""" + + def __init__( + self, + input_op: LogicalOperator, + limit: int, + ): + super().__init__( + "Limit", + [input_op], + ) + self._limit = limit diff --git a/python/ray/data/_internal/stage_impl.py b/python/ray/data/_internal/stage_impl.py index 4a89454846c8..472853055301 100644 --- a/python/ray/data/_internal/stage_impl.py +++ b/python/ray/data/_internal/stage_impl.py @@ -8,7 +8,10 @@ PushBasedShufflePartitionOp, SimpleShufflePartitionOp, ) -from ray.data._internal.split import _split_at_indices +from ray.data._internal.split import ( + _split_at_index, + _split_at_indices, +) from ray.data._internal.block_list import BlockList from ray.data._internal.delegating_block_builder import DelegatingBlockBuilder from ray.data._internal.execution.interfaces import TaskContext @@ -344,3 +347,41 @@ def do_sort( do_sort, sub_stage_names=["SortSample", "ShuffleMap", "ShuffleReduce"], ) + + +class LimitStage(AllToAllStage): + """Implementation of `Datastream.limit()`.""" + + def __init__(self, limit: int): + self._limit = limit + super().__init__( + "Limit", + None, + self._do_limit, + ) + + @property + def limit(self) -> int: + return self._limit + + def _do_limit( + self, + input_block_list: BlockList, + clear_input_blocks: bool, + *_, + ): + if clear_input_blocks: + block_list = input_block_list.copy() + input_block_list.clear() + else: + block_list = input_block_list + block_list = block_list.truncate_by_rows(self._limit) + blocks, metadata, _, _ = _split_at_index(block_list, self._limit) + return ( + BlockList( + blocks, + metadata, + owned_by_consumer=block_list._owned_by_consumer, + ), + {}, + ) diff --git a/python/ray/data/datastream.py b/python/ray/data/datastream.py index 7332c38231ef..20517e1db0c4 100644 --- a/python/ray/data/datastream.py +++ b/python/ray/data/datastream.py @@ -37,6 +37,7 @@ ) from ray.data._internal.logical.operators.n_ary_operator import Zip from ray.data._internal.logical.optimizers import LogicalPlan +from ray.data._internal.logical.operators.limit_operator import Limit from ray.data._internal.logical.operators.map_operator import ( Filter, FlatMap, @@ -82,10 +83,11 @@ RandomShuffleStage, ZipStage, SortStage, + LimitStage, ) from ray.data._internal.progress_bar import ProgressBar from ray.data._internal.remote_fn import cached_remote_fn -from ray.data._internal.split import _split_at_index, _split_at_indices, _get_num_rows +from ray.data._internal.split import _split_at_indices, _get_num_rows from ray.data._internal.stats import DatastreamStats, DatastreamStatsSummary from ray.data.aggregate import AggregateFn, Max, Mean, Min, Std, Sum from ray.data.block import ( @@ -2199,40 +2201,12 @@ def limit(self, limit: int) -> "Datastream[T]": Returns: The truncated datastream. """ - start_time = time.perf_counter() - # Truncate the block list to the minimum number of blocks that contains at least - # `limit` rows. - block_list = self._plan.execute().truncate_by_rows(limit) - blocks, metadata, _, _ = _split_at_index(block_list, limit) - split_duration = time.perf_counter() - start_time - meta_for_stats = [ - BlockMetadata( - num_rows=m.num_rows, - size_bytes=m.size_bytes, - schema=m.schema, - input_files=m.input_files, - exec_stats=None, - ) - for m in metadata - ] - datastream_stats = DatastreamStats( - stages={"Limit": meta_for_stats}, - parent=self._plan.stats(), - ) - datastream_stats.time_total_s = split_duration - return Datastream( - ExecutionPlan( - BlockList( - blocks, - metadata, - owned_by_consumer=block_list._owned_by_consumer, - ), - datastream_stats, - run_by_consumer=block_list._owned_by_consumer, - ), - self._epoch, - self._lazy, - ) + plan = self._plan.with_stage(LimitStage(limit)) + logical_plan = self._logical_plan + if logical_plan is not None: + op = Limit(logical_plan.dag, limit=limit) + logical_plan = LogicalPlan(op) + return Datastream(plan, self._epoch, self._lazy, logical_plan) @ConsumptionAPI(pattern="Time complexity:") def take_batch( From 1bf5811b145334f72dbf327b46bb94a805780c04 Mon Sep 17 00:00:00 2001 From: Sihan Wang Date: Tue, 25 Apr 2023 17:04:31 -0700 Subject: [PATCH 098/424] [Serve] Fix dashboard test (#34758) Fixes test_serve_agent. Signed-off-by: sihanwang41 --- dashboard/modules/serve/tests/test_serve_agent.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/dashboard/modules/serve/tests/test_serve_agent.py b/dashboard/modules/serve/tests/test_serve_agent.py index 1ea921687c1d..7d7d43c335f1 100644 --- a/dashboard/modules/serve/tests/test_serve_agent.py +++ b/dashboard/modules/serve/tests/test_serve_agent.py @@ -14,6 +14,10 @@ from ray.serve.tests.conftest import * # noqa: F401 F403 from ray.serve.schema import ServeInstanceDetails from ray.serve._private.common import ApplicationStatus, DeploymentStatus, ReplicaState +from ray.serve._private.constants import ( + SERVE_DEFAULT_APP_NAME, + DEPLOYMENT_NAME_PREFIX_SEPARATOR, +) GET_OR_PUT_URL = "http://localhost:52365/api/serve/deployments/" STATUS_URL = "http://localhost:52365/api/serve/deployments/status" @@ -444,7 +448,10 @@ def test_get_status(ray_start_stop): deployment_statuses = serve_status["deployment_statuses"] assert len(deployment_statuses) == 2 - expected_deployment_names = {"f", "BasicDriver"} + expected_deployment_names = { + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}f", + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}BasicDriver", + } for deployment_status in deployment_statuses: assert deployment_status["name"] in expected_deployment_names expected_deployment_names.remove(deployment_status["name"]) @@ -705,7 +712,10 @@ def test_serve_namespace(ray_start_stop): serve_status = client.get_serve_status() assert ( len(serve_status.deployment_statuses) == 2 - and serve_status.get_deployment_status("f") is not None + and serve_status.get_deployment_status( + f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}f" + ) + is not None ) print("Successfully retrieved deployment statuses with Python API.") print("Shutting down Python API.") From d0593093180162a8db2ce28dc5ec561fb2a4286c Mon Sep 17 00:00:00 2001 From: Avnish Narayan <38871737+avnishn@users.noreply.github.com> Date: Tue, 25 Apr 2023 17:32:07 -0700 Subject: [PATCH 099/424] Revert "[RLlib] Introduce experimental larger than GPU train batch size feature for torch (#34189)" (#34766) This reverts commit 72268e801d40a477dbfad0e1702ae5988e3c8bd6. --- rllib/BUILD | 11 +-- rllib/algorithms/algorithm_config.py | 16 ----- rllib/policy/sample_batch.py | 23 +----- rllib/policy/torch_policy.py | 36 +--------- rllib/policy/torch_policy_v2.py | 42 ++--------- rllib/tests/test_gpus.py | 102 ++------------------------- 6 files changed, 16 insertions(+), 214 deletions(-) diff --git a/rllib/BUILD b/rllib/BUILD index 0d357bf7cf4d..100df5ec4262 100644 --- a/rllib/BUILD +++ b/rllib/BUILD @@ -2525,16 +2525,7 @@ py_test( name = "tests/test_gpus", tags = ["team:rllib", "tests_dir"], size = "large", - srcs = ["tests/test_gpus.py"], - args = ["TestGPUs"] -) - -py_test( - name = "tests/test_gpus_large_batch", - tags = ["team:rllib", "tests_dir"], - size = "large", - srcs = ["tests/test_gpus.py"], - args = ["TestGPUsLargeBatch"] + srcs = ["tests/test_gpus.py"] ) py_test( diff --git a/rllib/algorithms/algorithm_config.py b/rllib/algorithms/algorithm_config.py index d3f4a99b9a90..b2b3e8467453 100644 --- a/rllib/algorithms/algorithm_config.py +++ b/rllib/algorithms/algorithm_config.py @@ -429,7 +429,6 @@ def __init__(self, algo_class=None): self._disable_action_flattening = False self._disable_execution_plan_api = True self._disable_initialize_loss_from_dummy_batch = False - self._load_only_minibatch_onto_device = False # Has this config object been frozen (cannot alter its attributes anymore). self._is_frozen = False @@ -965,14 +964,6 @@ def validate(self) -> None: f"config.framework({self.framework_str})!" ) - if ( - self.simple_optimizer or self.framework_str != "torch" - ) and self._load_only_minibatch_onto_device: - raise ValueError( - "`load_only_minibatch_onto_device` is only supported for " - f"config.framework({self.framework_str}) and without simple_optimizer." - ) - # Detect if specified env is an Atari env. if self.is_atari is None: self.is_atari = self._detect_atari_env() @@ -1634,10 +1625,6 @@ def training( _enable_learner_api: Whether to enable the LearnerGroup and Learner for training. This API uses ray.train to run the training loop which allows for a more flexible distributed training. - _load_only_minibatch_onto_device: Whether to load only the minibatch onto - the given device. This is useful for larger training batches that - don't fit on the given device while the mini-batches and their - gradients do. This experimental setting is only supported for torch Returns: This updated AlgorithmConfig object. @@ -2473,7 +2460,6 @@ def experimental( _disable_action_flattening: Optional[bool] = NotProvided, _disable_execution_plan_api: Optional[bool] = NotProvided, _disable_initialize_loss_from_dummy_batch: Optional[bool] = NotProvided, - _load_only_minibatch_onto_device: Optional[bool] = NotProvided, ) -> "AlgorithmConfig": """Sets the config's experimental settings. @@ -2517,8 +2503,6 @@ def experimental( self._disable_initialize_loss_from_dummy_batch = ( _disable_initialize_loss_from_dummy_batch ) - if _load_only_minibatch_onto_device is not NotProvided: - self._load_only_minibatch_onto_device = _load_only_minibatch_onto_device return self diff --git a/rllib/policy/sample_batch.py b/rllib/policy/sample_batch.py index cd1c7d14ad64..1eb75a679988 100644 --- a/rllib/policy/sample_batch.py +++ b/rllib/policy/sample_batch.py @@ -790,32 +790,15 @@ def _zero_pad_in_place(path, value): return self @ExperimentalAPI - def to_device(self, device, framework="torch", copy=False): - """Moves tensors inside this batch to a given device. - - Depending on the copy flag, this will either return a new batch - or modify this batch in-place. - - Args: - device: The device to move the tensors to. - framework: The framework to use for the device (e.g. "torch"). - copy: If False, modify batch in place. If True, return a new batch. - - Returns: - A batch with all tensors moved to the given device. - """ + def to_device(self, device, framework="torch"): """TODO: transfer batch to given device as framework tensor.""" if framework == "torch": assert torch is not None - if copy: - target = SampleBatch() - else: - target = self for k, v in self.items(): - target[k] = convert_to_torch_tensor(v, device) + self[k] = convert_to_torch_tensor(v, device) else: raise NotImplementedError - return target + return self @PublicAPI def size_bytes(self) -> int: diff --git a/rllib/policy/torch_policy.py b/rllib/policy/torch_policy.py index 79920d4d18d8..f78b13858390 100644 --- a/rllib/policy/torch_policy.py +++ b/rllib/policy/torch_policy.py @@ -536,14 +536,7 @@ def load_batch_into_buffer( ) # 3) Load splits into the given buffer (consisting of n GPUs). - if not self.config.get("_load_only_minibatch_onto_device", False): - # We usually want to load the full batch onto the device here, which is - # much faster than loading the batch slice-by-slice. - # However, if the batch is too large, it may be favorable to load the - # batch slice-by-slice. - slices = [ - slice.to_device(self.devices[i]) for i, slice in enumerate(slices) - ] + slices = [slice.to_device(self.devices[i]) for i, slice in enumerate(slices)] self._loaded_batches[buffer_index] = slices # Return loaded samples per-device. @@ -611,20 +604,8 @@ def learn_on_loaded_batch(self, offset: int = 0, buffer_index: int = 0): ) batch_fetches[f"tower_{i}"] = {"custom_metrics": custom_metrics} - # If `_load_only_minibatch_onto_device` is True, then the main batch always - # remains on the CPU (it's probably too big to be fit on the GPU). Thus, in - # this case, for each individual update step, we need to copy the freshly - # determined sub-slice to the GPU. These sub-slices need to be small enough - # then to fit on the GPU. - if self.config.get("_load_only_minibatch_onto_device", False): - copy_batch_to_device = True - else: - copy_batch_to_device = False - # Do the (maybe parallelized) gradient calculation step. - tower_outputs = self._multi_gpu_parallel_grad_calc( - device_batches, copy_batch_to_device=copy_batch_to_device - ) + tower_outputs = self._multi_gpu_parallel_grad_calc(device_batches) # Mean-reduce gradients over GPU-towers (do this on CPU: self.device). all_grads = [] @@ -1080,9 +1061,7 @@ def _lazy_tensor_dict(self, postprocessed_batch: SampleBatch, device=None): return postprocessed_batch def _multi_gpu_parallel_grad_calc( - self, - sample_batches: List[SampleBatch], - copy_batch_to_device: bool = False, + self, sample_batches: List[SampleBatch] ) -> List[Tuple[List[TensorType], GradInfoDict]]: """Performs a parallelized loss and gradient calculation over the batch. @@ -1094,10 +1073,6 @@ def _multi_gpu_parallel_grad_calc( Args: sample_batches: A list of SampleBatch shards to calculate loss and gradients for. - copy_batch_to_device: Whether to create a copy of the batch that is then - moved to GPU. This is useful if we don't want to move the original - batch to the device. In case of a large batch, we can thereby only move - mini-batches to GPU one by one and free them after each step. Returns: A list (one item per device) of 2-tuples, each with 1) gradient @@ -1108,11 +1083,6 @@ def _multi_gpu_parallel_grad_calc( results = {} grad_enabled = torch.is_grad_enabled() - if copy_batch_to_device: - sample_batches = [ - batch.to_device(i, copy=True) for i, batch in enumerate(sample_batches) - ] - def _worker(shard_idx, model, sample_batch, device): torch.set_grad_enabled(grad_enabled) try: diff --git a/rllib/policy/torch_policy_v2.py b/rllib/policy/torch_policy_v2.py index 86354800cb23..79546b0623ba 100644 --- a/rllib/policy/torch_policy_v2.py +++ b/rllib/policy/torch_policy_v2.py @@ -734,14 +734,7 @@ def load_batch_into_buffer( ) # 3) Load splits into the given buffer (consisting of n GPUs). - if not self.config.get("_load_only_minibatch_onto_device", False): - # We usually want to load the full batch onto the device here, which is - # much faster than loading the batch slice-by-slice. - # However, if the batch is too large, it may be favorable to load the - # batch slice-by-slice. - slices = [ - slice.to_device(self.devices[i]) for i, slice in enumerate(slices) - ] + slices = [slice.to_device(self.devices[i]) for i, slice in enumerate(slices)] self._loaded_batches[buffer_index] = slices # Return loaded samples per-device. @@ -756,11 +749,7 @@ def get_num_samples_loaded_into_buffer(self, buffer_index: int = 0) -> int: @override(Policy) @DeveloperAPI - def learn_on_loaded_batch( - self, - offset: int = 0, - buffer_index: int = 0, - ): + def learn_on_loaded_batch(self, offset: int = 0, buffer_index: int = 0): if not self._loaded_batches[buffer_index]: raise ValueError( "Must call Policy.load_batch_into_buffer() before " @@ -814,20 +803,8 @@ def learn_on_loaded_batch( ) batch_fetches[f"tower_{i}"] = {"custom_metrics": custom_metrics} - # If `_load_only_minibatch_onto_device` is True, then the main batch always - # remains on the CPU (it's probably too big to be fit on the GPU). Thus, in - # this case, for each individual update step, we need to copy the freshly - # determined sub-slice to the GPU. These sub-slices need to be small enough - # then to fit on the GPU. - if self.config.get("_load_only_minibatch_onto_device", False): - copy_batch_to_device = True - else: - copy_batch_to_device = False - # Do the (maybe parallelized) gradient calculation step. - tower_outputs = self._multi_gpu_parallel_grad_calc( - device_batches, copy_batch_to_device=copy_batch_to_device - ) + tower_outputs = self._multi_gpu_parallel_grad_calc(device_batches) # Mean-reduce gradients over GPU-towers (do this on CPU: self.device). all_grads = [] @@ -1211,9 +1188,7 @@ def _lazy_tensor_dict(self, postprocessed_batch: SampleBatch, device=None): return postprocessed_batch def _multi_gpu_parallel_grad_calc( - self, - sample_batches: List[SampleBatch], - copy_batch_to_device: bool = False, + self, sample_batches: List[SampleBatch] ) -> List[Tuple[List[TensorType], GradInfoDict]]: """Performs a parallelized loss and gradient calculation over the batch. @@ -1225,10 +1200,6 @@ def _multi_gpu_parallel_grad_calc( Args: sample_batches: A list of SampleBatch shards to calculate loss and gradients for. - copy_batch_to_device: Whether to create a copy of the batch that is then - moved to GPU. This is useful if we don't want to move the original - batch to the device. In case of a large batch, we can thereby only move - mini-batches to GPU one by one and free them after each step. Returns: A list (one item per device) of 2-tuples, each with 1) gradient @@ -1239,11 +1210,6 @@ def _multi_gpu_parallel_grad_calc( results = {} grad_enabled = torch.is_grad_enabled() - if copy_batch_to_device: - sample_batches = [ - batch.to_device(i, copy=True) for i, batch in enumerate(sample_batches) - ] - def _worker(shard_idx, model, sample_batch, device): torch.set_grad_enabled(grad_enabled) try: diff --git a/rllib/tests/test_gpus.py b/rllib/tests/test_gpus.py index e61d4c04a657..3d01901a5db6 100644 --- a/rllib/tests/test_gpus.py +++ b/rllib/tests/test_gpus.py @@ -1,19 +1,13 @@ -import copy import unittest -import numpy as np -import torch - import ray from ray import air -from ray import tune from ray.rllib.algorithms.a2c.a2c import A2CConfig -from ray.rllib.algorithms.ppo import PPOConfig -from ray.rllib.algorithms.qmix import QMixConfig -from ray.rllib.policy.torch_policy import TorchPolicy -from ray.rllib.policy.torch_policy_v2 import TorchPolicyV2 -from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.utils.framework import try_import_torch from ray.rllib.utils.test_utils import framework_iterator +from ray import tune + +torch, _ = try_import_torch() class TestGPUs(unittest.TestCase): @@ -117,94 +111,8 @@ def test_gpus_in_local_mode(self): ray.shutdown() -class TestGPUsLargeBatch(unittest.TestCase): - def test_larger_train_batch_size_multi_gpu_train_one_step(self): - # Tests that we can use a `train_batch_size` larger than GPU memory with our - # experimental setting `_load_only_minibatch_onto_device` with - # multi_gpu_train_one_step. - - # These values make it so that one large minibatch and the optimizer - # variables can fit onto the device, but the whole sample_batch is already too - # large for the GPU itself. - sgd_minibatch_size = int(1e4) - train_batch_size = int(sgd_minibatch_size * 1e5) - - # Fake CartPole episode of n time steps. - CARTPOLE_FAKE_BATCH = SampleBatch( - { - SampleBatch.OBS: np.zeros((train_batch_size, 4), dtype=np.float32), - SampleBatch.ACTIONS: np.zeros((train_batch_size,), dtype=np.float32), - SampleBatch.PREV_ACTIONS: np.zeros( - (train_batch_size,), dtype=np.float32 - ), - SampleBatch.REWARDS: np.zeros((train_batch_size,), dtype=np.float32), - SampleBatch.PREV_REWARDS: np.zeros( - (train_batch_size,), dtype=np.float32 - ), - "value_targets": np.zeros((train_batch_size,), dtype=np.float32), - SampleBatch.TERMINATEDS: np.array([False] * train_batch_size), - SampleBatch.TRUNCATEDS: np.array([False] * train_batch_size), - "advantages": np.zeros((train_batch_size,), dtype=np.float32), - SampleBatch.VF_PREDS: np.zeros((train_batch_size,), dtype=np.float32), - SampleBatch.ACTION_DIST_INPUTS: np.zeros( - (train_batch_size, 2), dtype=np.float32 - ), - SampleBatch.ACTION_LOGP: np.zeros( - (train_batch_size,), dtype=np.float32 - ), - SampleBatch.EPS_ID: np.zeros((train_batch_size,), dtype=np.int64), - SampleBatch.AGENT_INDEX: np.zeros((train_batch_size,), dtype=np.int64), - } - ) - - # Test if we can even fail this test due too a GPU OOM - try: - batch_copy = copy.deepcopy(CARTPOLE_FAKE_BATCH) - batch_copy.to_device(0) - raise ValueError( - "We should not be able to move this batch to the device. " - "If this error occurs, this means that this test cannot fail " - "inside multi_gpu_train_one_step." - ) - except torch.cuda.OutOfMemoryError: - pass - - for config_class in (PPOConfig, QMixConfig): - config = ( - config_class() - .environment(env="CartPole-v1") - .framework("torch") - .resources(num_gpus=1) - .rollouts(num_rollout_workers=0) - .training( - train_batch_size=train_batch_size, - num_sgd_iter=1, - sgd_minibatch_size=self.sgd_minibatch_size, - # This setting makes it so that we don't load a batch of - # size `train_batch_size` onto the device, but only - # minibatches. - _load_only_minibatch_onto_device=True, - ) - ) - - algorithm = config.build() - policy = algorithm.get_policy() - - # Sanity check if we are covering both, TorchPolicy and TorchPolicyV2 - if config_class is QMixConfig: - assert isinstance(policy, TorchPolicy) - elif config_class is PPOConfig: - assert isinstance(policy, TorchPolicyV2) - - policy.load_batch_into_buffer(CARTPOLE_FAKE_BATCH) - policy.learn_on_loaded_batch() - - if __name__ == "__main__": import pytest import sys - # One can specify the specific TestCase class to run. - # None for all unittest.TestCase classes in this file. - class_ = sys.argv[1] if len(sys.argv) > 1 else None - sys.exit(pytest.main(["-v", __file__ + ("" if class_ is None else "::" + class_)])) + sys.exit(pytest.main(["-v", __file__])) From 52512b17656365072f530551b17bc5530582f0eb Mon Sep 17 00:00:00 2001 From: Ethan Brooks Date: Tue, 25 Apr 2023 21:05:39 -0400 Subject: [PATCH 100/424] Use PID to assign rainbow colors to console output prefixes. (#26900) When I use tune.run, different processes are distinguished by the number corresponding to their pid: --- .../images/coloring-actor-log-prefixes.png | Bin 0 -> 538770 bytes doc/source/ray-observability/ray-logging.rst | 9 +++++++ python/ray/_private/worker.py | 22 ++++++++++++++++++ 3 files changed, 31 insertions(+) create mode 100644 doc/source/ray-observability/images/coloring-actor-log-prefixes.png diff --git a/doc/source/ray-observability/images/coloring-actor-log-prefixes.png b/doc/source/ray-observability/images/coloring-actor-log-prefixes.png new file mode 100644 index 0000000000000000000000000000000000000000..1acc3ceca33950c30d0647afbace816a082f0b90 GIT binary patch literal 538770 zcmb5W1yo$YvM!8=put@N1b6pAhM>WNOK`Wrb#T|Bz-DUVE=bU@* zx$j@=-TP*(y>{>2T`kpJwY#h8nZK zWl@F|kHUndr(-8C2lTHH_J_B{d*L=gEQ}RZI^jKb-9{SmxGNrlhTDh{$sfhbuarLY zhH-?6T*dyNexfFqo4wx`9=Axpt|rSq6#M#gRb>4$B>@LFcI37Ymp)i3l4NM_B~#9V zUKu{*qf(k&a@XhmGC>qw^=`ARDH0m$iZ>}}BDyc*-**UZKHU*~5wJ4?y;A1S-~F+3 zeIoTJOm0m4_vj;_{DZquL@D_B)?HkvwaIv|9#CtL;a5;ZgyfcIv-T2rJX<))cow6?3^^> zXw=lyq9AiiVRhj9f04tU#AvMD+?<6uI6OT)**$sKoj_I`TtY%Z9Gu)7+}v!i5^S#C zj&3GiY>uw9|E%Oc>H%7~nt^Pb-E5p3ssGk%V(R4XCPqW^x1s<1{j;AIUN-;LlB4Ut zh6NiS$KMNJwqs7rg zIsS9f#L*@%gg(K+Nx;biB{jX^kNjU{f9$({SumVm@eN|DFJ@Jg{az3lY^YY#k82hS z<)c?i&Xeb*PwS=hjr!7S6r3MPfe(+~c59Sw;mg7LG?96`16uX{Ioj&E%D;1_d*XjP zo+Gp?a=aqsI{tcyQq>P1p9u*c0S!JZP)g!${u@v$5MHL9B}U@nh=b?CflnMS1t% zIkCb~y^Q6`4cdV8%bOV_M3gVPtT>(e6hiSxNzo#HKRWqJpUQbFCxDfAcOw&{D?_^9uh*34U91J@fCJKA3AvUD-w%Id zHcW!%Z_VZ}-bQ61?>DJtOc4dlw_|)ew4=)vO{;GibWx&ttUG^xFKy!>WG~X#!|MW^ zKqXE!K5_w{-w-%AR$j7^-p^p|A%8MhRddcipthT!3|@qG%H-_{%Ec)Lm7Spq`fQnNO z7Vk#y-Qv@Jz?k8DUp!M#+pO2J(g6k&18$Jm;-KauKOD{wX0cFFuw19gGg{0~Bn`UZK4z z?znv5%^8p;1Z0n^7;DP@K6TeNMC^&c3d_tJ4POvUXTg=0xO$dw%gqcpf2TxZqxHa= zO=bJ-`O%1bgePMz0)qsBHaem!(xm4tHx0s%<9V!@bhePN7{Xf939(0VIc0@uoiZou zKs55X^lBZf--7XY=jZKPYm#>tht>7E~#E(OK+hcA^l2LKGtYMVuV0~^mN?SF*$ zc=@GlbV;7>W)dcYSHlg1f`v|FhN(uOZ-+xV_CIT=uhqPwGaT*IJh)sB(s!J!b8~LMR+z#2AMwTk0FPMD(_QF znnLomO$iV2uFGQOBV(oQrBu5zF#(Q-B}^L8AT(Vn8zB&JD!#*;-?Ru1#k)NRZDgOE z`T{CpienMc3zq^1nE^t>j`8P(=y1oEarQ#PSJFs5UcAYp-F}Xg@hiJUhs?-wyh7G! zDDPf++N%_SZ8a^8s%<7;&~mD_Bo_PNjPkDZ9YSshTDau%tIp0pVDwvYFTl~Cp9vO1 z7=U@F$L$aHKR?j6=QFIc@4sfq!FWb{=mg|?DQPrE?s)F>$|vT2L5mn3DUh{8FWR6v zLB2Z&ICsK%-9-(+vnVUztNl}O^wt2b7n#oI_e2spI>gBi<3xFnm7O?j+W16j-v?qz zk;BTacp$aWXdi+$>suk)(Iq?^t9xI{$OR01+e&n-`-xeZo!$_LFTi-#5Kb_9qW&F?Rcz6nAP#Z$?Fdxf29ic>{;S)wLL?}8lPzMNbugiJ&b*Uxe)Y90{LNW^Iky@)`;1co8U*BMW|5UvE=Qh;ms zv?eB>q;=lJk#K!xg2sL)_%OhYdpEKFhy)`C+J2rhc3Z~UH z*BbjDb6Pxo8J%PMeNX-32UZQ;b6?-w5izp=O8X_qv|8u|;PU~|v=4|qZEa3QqUDS$ zcYV2)*~apCd9&TK>Q+B4g&%xKsr-4S(e1{2#Kzm=pnLxu)>CtH9Qi*B_jpMKw0~ky z!5%iifw&veyd1BDHSVnTeWGh-^Wm(J28-{uw?5VmcH0x3;fVSscKgaVEev`av14Q? z+t;bzYqsXc``GhY2zgZ(2vRl}W@-98^g zEyR!0e@lXlSoh$~=nR~(5#3N6{k|fdaq+&z(z(`r&h}~YT2mnr8H-^GB)S@Gy>Pr~|h7>wURcDj^B5z5n zZZqgz>(%f`Vs0)L_KGi+1~gX0gnU$HgoI}5NxYBy=cd&0ikNSPlo_s`he3SHNA$<* z2%y1PWp^Ju3GVj9K1gP}?fii(!#zA!Ny`SN<@@@+u@?GL6%-?}bspN3JC?T2TwH*T zhEqQb%w$o>Z0+S_l`}czdYzEyw0N*(hYpXCRY6Iz1JSBE+M&1}Crd-oZ-ypf)2qCm zp|a2}58GQwnNyCcCPL=&Z);Pfx#kc}(yQ|rj%HL!-;3A!YU%5OyqkrYl>39j;doBB zEOQk>R>nS*+|xX3?JX}`JVxhN6fPJ&Txo;0K|g8l4$8PkoJH58D?%H+h9oA*q{>RO5loi+Y37c1QKUc&TuV{Ek18AF>p{<%v)U?9f5VhsrrRx}6kd z@(_L@n;;3>k{^_T!h*U&ZL?D(M+Aq12MX0W0tp zr{)^n0DX<&({*Lschg&8s(12j%0e?U4)8Nevm3XNDnRTEPY|Qi-u(&9t09o6M*gt` zN=w`JK!w$w&>~4=^#L56`OQFBjWZjI(_vk7Gjo|uRm5E+aY=??a8*wvaOOM~ecDHw zTTd;?LdC1h;B$!IbKI6yPBw6KaZWs8Jz2n;z6XL_MU6pAjXW&JlU%UPp1qHEM&X{q1Ad~YR zLX*K~wY9;sUGt{i^0L+_Yj}yO>vrf3Sv@y%=*nII(;HkSZ4@Fgjv}OY+CEyx*Aaa& z1y`STI9}{AEf99d`eB9yG)@3kH zA(8Gu1-@ab3!F*NP75P{CGd43DeP1&r)5f_uS7J#G}niyBxq-U@Un3sIF2i4$+!jY zPR)Oq(J6E|rB{Vfr7OC)HQTl{C4J$E{xRT_z=ZP%ep?1ZAt=vUz|m;kvG3|uWb-~n zKABkm;~vT}o2CL^%QD^ZVi?)&*+kaO@#+Boi~T|^vfgC#BdO-fmCQSF$GID2=BKQK zxUxc1;e{VoL(sRZjfPUCBdLScuNi=ohYW1i{Y)wtM(7Ud0zyGVzAxC z=g*-s>{>JT{1Efy*|^SHsotZAg_h+JL3U9<)j1JlUxc#|2Btg?!a^2Q1_oG$pg7jg zUxqB)bB5h)bAK?)UKv?s`~-TZe%J**3;$!4veJ9(Tim&}zRM!vbdIAkMF+7u|w4Q3|xcUM7Gk*$FX{W_^_TfBrY7)oN{JTi9esA%seg}pe z2VO7;NRgbABogMuy4c|Aikz{}ovzM%^K8y!HYf^RIK1DwiA@n8i2Hp9IwC6!23V!C z({rPsSYCyMlx4pa@bXu8y*GoWsvO?VOxL0crr7q)r!E%QqO%&F6B(ct9|P@tZolCEPT1(@&}qD@e15Oi1QDt_^NuU&rf_r@z0(yEyut_ z4eXE@=G&N3TAAp0!iC0B2^SZ?S~sdka{dxKeElkYeUJIHByY8|!j97C%EY1IeVVbZ zcMh|5no7$R4agkAM>_TXDRLVaGne(lF>IZgpyO zhhms&=yl@FNotr%(a&`o(`4LpqyjkCp~GboAX|1a5_;y=Y4YKV$>Fz{CgF5D`{b90 z0l)b5c!HvcJ)LWXnp~0?u zPLb#zGm2BD0@woSetb@p6?$#Mv@PeG{XO!V{av0hgk2l&Vj6sFFJos;U*WU53+wUB zZ0sJMwpO11UQl^xS}q9)JPu=V0(u*@S@OX#J42@W-dzK9zX)3}X9o;_9UWsAQ><}i zRNfq^Zez|OP~_mo8mhNSg1+r3+Y>d@t{N2tKEW7fk&j$bH}!7CD>g@-tc5h|%cp^= z9DA&M;i27017{mJfUiY4a(fhwTc zk?NNTrWHtlw6=1!k#pa>^lp&fYR2uDs8+L*?~ml3Bk=}!^|d`Sh`L@2ls+s*cO7N! zc=2l&ZFsu5a|y;#fBC1Ygqt#BY3C)PndxbzRR)>! zD4ZT1Jucf*$TMmj9%l#~`5gB?zs^}4>f$+f)V5qb7i}AzvXTiTjKbrR@R1*T%Z3%L z6ux3(v+Jjzgv)DkcGa~|Z|#GBV|M1HhZO(omHq$V|NXaF5 z@EvJG&sedDUrNsFJ#{u_Q8b9!*VMd4vOxt0+GeUN`Xsk{2|iIzRC4h}6g$p%Wq4+( z%E&V(AbRs+Kgzsz6BVhxU&vfc%AsNZkC40RSpRfW=$_fVS1I282eg4K$Ht_-n0eUu z?}w?XGYw^rX{m4Nwhf+gg!uivY6v^;#*|+!qqsOc7GKlG{de@cJl%jNXuf1f2H>h9 z7DJH2%<#=vUzp5d4U=^3lhDU1fds7!9}Z4$0NJ2Z9mvAvEu}Khj#*EY9=#wq7=aBY zPGHYvDA~L*D9~R?{mrJsAJNC8idN;+k^B11gwi^Z=s`)UIUva$PEVAP&%}1oQTx9CW;UjcE-ttsdY*m zc+aN?0i}Csv=`h_7T(_Cz^T#%$(Zt%(?;|^Qo?_jlw;zy#3LUx-t|Y)2%Bw~H`*`u zEt+li$09#$UUgMuo{{tq(BKXw0IWRe-r+Fey@GW6il)h?#RJ7$plxT_u0bAiuINZp zAR9>^&K;r7woXO(`1{txXs&U=sGWfs5qG-ue9xB?EP6Y!@wQ+6azUQA6rSD7x6A0} zC8xYYzp9h()>&SoQ?DWt2-*-^g&u5>#j*{UDYytppHs9EKN8^_e<`_B#s3-8?;q`= z9!|!$0~W|zChAks#7g@*2Kyk@BN%e#hB^AGYp9zX3okCs_Jc|=>1R~c6hULzVt)#G zlf2*WJWE*S7CbZ_YyXt6cf&|LY&aJA;d?f#3!Z$%VIp5|vWa5D&%?vBz=Kaa;p;NT z&ZC}@{Ej8AN#yJ~nIC><1L3+%6I9Ju-HAgH0sK>E{qj9Id6g)~3OJlf5&^uRrQA2X zd~p_iXUAM?`X}5$kW_$XlKc$`&JaH%QlWvGfndDWz^B+`t?hGP?#xDFGdZAVFj8~vL< z5t&K^^7S2+X`Vlz5am`cs}ogT!QqVq7= z-}`wP9}aqtWDymva$iKxR?C51eV$x}0o}vatqe|tZO%Q!b-DZqzqY<7~Y03g{7Ijar@$S>4Kx9g2?m8wLSLmneBU_n?MV}w$|F-jDy(T1L{y; z;6c5!qfAuUmW|*N(j`B8H_^Feq-@_Wrsn}}77SfvERNE;;kVILLrFO4XbpSgc9hII zda5;P$=P9O*&Oj*VhBs0a!mtRhT!t%mU~Pf3dd>Wv1_;(!>O|^KN z#;(@(@P?GA|MRhEWL!*G_yfv^pwivcrjbtc`h02dm6~_|K3-6YAVe))*-#5zB&U2^F9rwK1{$;t z#|WWAKAV|Dc@by^u}5JoFE2L(0N6FRf?89Xi>OUDipUU5B4``}~E) zvquvE>0)J$`W=$!;rx*A=J%juD1FYmbh&`VkYI-TAzDH}IHPjkH@@=8FBf{mF&Qd? z`{o~lX!Y6Il#@YQUw89DNSEH-4z5z3t-a?5Bt#(Iox8@i0G|^*XgeZn{tPO;>=-YF z&IjZIZz`dL69W`hzK3sO|Aa4D_vzXfZK(*zZYgk8z|@-gUFySZ1zb<)HK;J#P_X` zp&#v#3`=Z1cjkHTfO^yT+Y>F;j)Zcf-?c5}9A|sH;3gF#CoD)S@at*P*h=(Se`q+> zQ50Fbyu4Va_p7GUtMwxh8Ge#YGXW}USA2R-2Pjf)KogGy&F6A09wR6wAeXUnP28J_ zasj=Mnwn>yFG|`AC@KA}aK=dK2n9ecEYqJ-SfXPn+T8%en?-U>kWB(6(Nd)^qqTk) z_f{s$&F-UzT^!&5+N{#*?waTI0V+JEX@)pM-y84F4Fm^MQL0J-8^pn)*QGT&5m!b% znoaPbZE`Q$*9@-pJ`;o#r_VMV9-`6XkD^p8LX2^8kDJ~?e2%IC z5Rl+PRU`RBVFE%#3q!fBGqTMb1J+h6p`keJ(5M9ZVqK7_p$wvjR?VacJo!+| z)^6!En-OTNsET8<#j%QjsCGEqwS>&i>|}nrDSB3!4wH2kj5@62J<|e{aMpfTV}hzOmap_DAvW^@s`L5d3WmEInJo)WflGM*xyh+*NIff z@c;d)0I71W!dk>tu`&5w0Xz%(HiO0Dr>#^2Vy(TU)`e6%EoBQtb_Iw!E3P5Q?9bm> z?|9mj6#}!)HUop-Cuh~m=K7H{5@IpL)3HTDkD|-pbmZ>YTEL(5FQHCN4H&$fU3uJ} zyQy?sA^hRWPRR&OqhgT2VMaXZHatlHJn~i;cjUffVP@{`JD<6uT}(1iXn#8B$Y_XX z1;ch7nK-HGrJe`Y+EuLVy+nPK-n|=}XXjw?*Y%vzRjSid?!-;}Zx7-1;8x<<@nM9D z9lfsKmOhOkZ*JqX%hnF5z z6P;*jVo)Dgq_#1coaIb-@C@<1ldS_*CZe7KuCHHgtBMJquV@cD_V?M*xxvHlwzmLz zzG*`;>^uQdVJFJu*pW{|31Y-+V6iDk(WC>3riMNu10*<=5oy;1V*k?u?s0t$47o1n zs4YD6-33#1B4m$5{c!Sh=#O?AD8G0GU=g~yh|bk-n9-{Dqf;b|uI9!h!UcDGC`G3- z?Xf59@q{!DI_4Ja0#~rjcQca&0}?-XU=g(&_jC2G#$nN%O4Ef{3oEcFe89i$Z?id2 zs>fbw3}kyjVcvQJqAm#V1jUl20oFR4I1XLI;{Lijr(vFfJ1gG;`S^^HCthb z$aX?3%6)&&gQeSG1%8Mi=zvMSHB6intznWc=T@V51s%=&auFUdV=%BQQVKIohTWkO z>c%BQETZGJpC7NSp2PX4lPeZ4)9jPH{&TN zeRq~8DMLTNKUeGCeanku8hj0k%+()a5ng$mF{-#r&b;*&*8<(mK(mPjfZNsy9v0t* z1i!H z-J3eM#ps$$!G+?(yd>d^d0SE3qD!&CUbb1`{b~X z!u^cXEY3&6+fUw9_rb-t?ggv!w^c6-4NBg1PA>sJ4h^XUf-lN?v0p<*NP0Km{p5;|!a4VtSQa^$No2vLacG@he7J>YIg5E9x?{ z<}_S*y3#6%mB@%BIi}ob zwom#pbb(RUL;YqH>@s#=%aUDRMx_Kpw4yxPnLol9rR#3p%nGpOdLi{F~F15aI)qXRE74OMQ-lR&kOE;E{zg505j<3mSg=~r$vuxg@_@q!6 zA8jQ%fR*FOAfS5LmJ78tUWhLpgw9Pxi;0Axe-*7nN8o)5>ET6)y<0Vp(IKM{0-FH; zbTLL5P{4#62n7u_ho#`JItc%epiPdD(*xhf0K+1tpjOd!g{4~=z*xjB?+@U-ae=}Z z-p|RVK+IDyB&^>bb(o!6^Ga8L$5n-uXUN$pGMu^3-eU2n`DUlIth zPbMY(n7BL?U$EvR{|GXH{6)FmV9G7%Oj>eRyHalm>LqTq*-bOmju^z@aVDA&u}SXM zo=A`3Xg8|x{4clw8 zO7T0m>u&o;MKdjKts{}NQ4w$3@lV7Kc#?t+-0 zg@_Mp-8_we{MPKKQqUy=v-5RT%GGv#Bik-5t=MwDOw(%5zG2FY$r3rY*-ixK?xo5U z;MrObkwBUZI|04IW>23=y0j9QxYMs%5Tr|9g(AMnAnTM2 z3;X_CvFj5Zo*{ZY>=-;cjD6Mokt}gc-@Nr3aGOWXF@Cqd;i$sCQm~dWt~)~-Snr}i zn|-J(bZ&lJdK;BoJGrEgCB72yBF0E68rsJJv-xRvUvhK|MsHu!Ux?EM$NXf~>Ug#( zwY;Zr2mgs#JDTt!uzs1mJT`wmxNH#qnN@0WHj{1^9wQ$2+go;AU&}9Z882z>TY$ww z776zIxyoT)1t-rjb^v?CvZ%FoJmB_uzr$mD{_h9OH=TCcs5NiuB`x!IUr0pAt1T+_ zHYIPlQPKt|r-?Og%=stPk=~<5S@Jjv*I{F%QC;K`@Y8XUqe3I_nx-B^jV3LiX&5A| z5HB%Y+!r3C`<&56Q0+@nF`}9zSvAerp2$J+RmD$3*YI^HAlesLq>c3X!W>w|lF!Z| zfXmj23&CZ5k&-qrw4m;PdW&B5+jnX^Eo?e;w-R9a&ayVopa9ZQ3b<94CIP{`0qOrq1E$9&t-6a7O;kS^>0F3?` z0$FUOCJpdcQjc>to^cOVKR4}>h7sn(MtaQeGjTJ2?iC1$rbrn#R|T+ z=5u*>5-*NSfU*QpOtC6=v)MCp9PX889a?hQ(R=stw)IU}b+uJYtxq0N`yPqYXXGceO+vioGjxh;I<|vgI)2?!_VW|(~Q!B7&z9a+Ae8_pBFP(cX z(M@cz60X!0qVUsHltUmPE~lz|Yh#2@Wn^a{FI{LS-<{Soq;~fB$MilZXIw>7ycP+P zAp^v7S^wGeRrk@XC4(zhpL3`xKxX75{%#@ec4eNpK_hp^*KJuk28zGs!Q0Po+b8pa zeBvc2iV0XuWqAmo=9$=PW{b4Bwbw2#X3^%H<&e^L$Nw(+=3CEs4x<&^<@f?XE#Im=~&yshrupf^s)&Psa{eZ2jI~w7M!8AJotf8%+v(NFK z%^?@qmrcuF4X$(S{Q+2xf(5sj%7s(@q&T{ODHLJS-N?_$ z$r*}#KC6vLm1#o+9=^D3)z7!bKXsC}pdP(vI7bFQIu_PxXp@yh8b-8HtTNAszd}t0 zI;NgK`QH+c&2Zo0RC_cM51DyX7Vfa|1jJT}ysNec^K2CJ7#t3s8UH_~Fez2DSCheMNk_nJ5> z+;oI4|J~sC*0+r#l?2O5+Y<7}ZwZma7uzm-$}pnYa1$neuV4PYW=DTj=yvQcy)+nf8#8$8>g+Mj#8dZST7~CrA4Vq3PMf`s%q-LA%Q2u zvJVn=BY0n<2IBPhccrx#URx~K%gOug1is_3ePzz25p=Hixe(7pGl1)jvNw~Es;)7z zO%2Qm(mO!u#`7LblR})_$!Y}p9Dl65G4Zjm7$bqf4Pm3^=eN*QnHT}x*CeWmi|EV0 z8amsD%|y0jX3n{f;?2;)rGan#Da}P0S1Wx%f+zk zzB%I>S>R=%ObP=O%2^@3y1!*fi{ROpK*mS97ZT=SnuPw2gyxXyUvX=n!-8#Zq_mZQ zbPQ8`+6Kt(*X~N^H^L~N2|sp5rLjb#oAp1h)}yok^7u{3Ca7MboZ+#0TS%M zQ8Cb5*jE)4hWHlIP+T=|lgiC8(|jA|UnYSzj%piwApcQQ=upH;<4kvcqNI@7Zlnw; z9>{O$@P9OXPGtRTE$PDdz`2X2lKFHc>b8BU$k85(oTTEy@k?zf$=KkUOQ(xbbOzbPrWdT(+@baC;>F_t^o#9<4@B+wLL55%9K&v!M$OhJ8Q3q!!>xF` z67dou;?zPhp5;Thbvbgf8xu%K5I6YEVCoie z!JB^U7&i*MMhbI_Rr_BJ4OG0~W0N3&TQ6^q{9}UCAEoh)U8wXxTWf+>5vwckm(CRl zeJ&UFn%`&cT78a5+5YHrl^9^xYZ}L&oDGJ_0)a@)w(d+mvn>L+Hc<}B2>Y^@x*`hz z;4aCAOo>5~iyUu9(`rD^M%Prf`-|9Z_aY`)K}`+3bsbQw-_u8j2r%^(WZ}$J&d_og z6rb#Ie|Y*QPW#VKr;zP(?ViC6(<3|2E^f`-X^}pdA@IBXO6EDd$m}Lc7-_5+u*&rG zzGwL;W38b}!Pt>=;wyN%pICj(AYwK#>!*EM0Jx?{CLEmU@oi$xdL_L}*RmD#4ZL?x zB=+<11ZON_df~73Q464HR29ZKD1#j`1#PwNm1jf+6 zGWvecup>OoZdS+7{_E;|{i6V=Ni9<(d(dk?D_?ciDrmo;|_Itz4!V}4eL zLW~e~vk7uvF~NJLk8oVfTyj4ZkYKP(S5Q=}w7|=KO^S*ZSGPXuYUCrkbqZsWrgzpV zjyLzm3!fE4Ts~HQIg7s#`+~qAOtXlHt)Lx*mdp3Z|7mjHtR@^dvp-TJT-pc-nN*Nd zDeBex;fl$=(&V8!B?hy96dO}%qEJ+@Ax0y=Q#Ai54K^pu&C5Ex^z!!)@i-pyTnV>! zOOk16DKYlKX;B_d+cmx?@>MHEhulw@8XyK-5nz&594nXZ-fjl(fL88 z0WhmVYB~~$sP!9s%&S;MnV7dge6sy9&s7|)wZRmYV05$YXXCg(V7})q5sQ2OXx;ZB zIX1Ffs~?qa%T{vz_mLCwf7D(Gt5sou4d`oc4nnj9fryi-_FIUI8Eu5E>)fkjKt-#qZ-tpyt5jLfDiU{tCPUJM9Mr!D8 zOF+VG1UvM=CFs0w&L~Vjz=amLAw}Cab{}xA!fAIipj3bt7mX;E)O~Yql`BQ!gdk2h zIWV+%eW=4d^%U*A>vKx3;>q#=+I#ZV>~xKmS{Zbpj}!4#C27H6W4(@fglww^R8~gl z{wRv%rp@io-Y(l>fbichW;uC{cCE7O1U~;j?T;fFF?g#k?DKMO+2#3t z-XThQ4k%|y4)aJso~1(pQ{LehpP97H$#KULHnK!vfr@85OMaT6r}Yetl~UG5My*Si zZOeak3Qm`9UU-KJlCbi@CBD#4vTCCMn9Bq7U}4Wgkg)n5t#)q33vsr}@Q@i{pqYqk z*`cEudH02!KaWGVo!r`N^jKiEn?KRn2>6Fb#mRkZm8x>y`1-29#i=JhD9nCy+NM-F zP$Ax)fJt?iL)0)mFa7?(gXI&7kCHIi>X1t8?|)Exe!g z$W7}^7{=}Di)A=Hq~LS%zBIYk2O1?oS$H~J0$`dadvLZvgF1`ixY zr;203pckWadt#KzDsmxQ-hHFNCT2rFJfb3`>8H4?Db!b&@|#pIK7EUM+G7+79g%ql zoS38x9NF)VX{4D<_N_#9lZEYRyk5QWR?gcw+K~TV-HbGQx_m?oHiwt4u_K*Ah(Ia4 zDR@)=DJE>tIXcK0Np4E!crhI~b1!b=5k|(J#JDl^x+@^$!C%Te6&tk0)X+hE+4>+|H$#nCN{JIo^A=jD$qi`WdMz z`vvOxL{$=l@g1L1$xtVb&nqa{vtQYkH#KVHRA691WTJ8UdVAse@ZQqc1=-dsqo!oD zYNr8eQQsDK!#Nuk>dW4#Q+7749Z8J&llF8cDl(+FJF1HpDd z`DSVQjBRVFizaN@wC@($ae$ETHb3{BUD#D0#9RpeWKPE&ZlxL==b?;sTc-*&F)|X@ z>kq9VCceVzOa-f(s&#ZWTf#56z<*w080)9~V$9>LRE(VQ?SOel{lhm_!;MeZg%Sfq zB=BAw^oP;m5LT$HZ0^5am`q!I$5Q^x;Wbu1BcWda`}y0Z<6ci}3x!7D#tvP}tvtyS zab0lZ9jYZLXxQo0dnA-#a|COMWo9*&WPhzZ5>fYd+Or<&8s+*JS=-qYaMwR~<%n?+3LL!oRk_AB>3uJcBVrOFw=ir{GcH0=5hO!IOEZ`~#=0Pdu%9X8 z0TKIUnYpRiDlm|7{P-tRU*M3Va}><*x@bestYO15iCTZrvh{>Mzwsi5z2wwc6eYGy>RZRM^K42$7n?`r+l?i(CcH7xQ5X>j_P-tGM9eyQm@4%^<4jMe$1X-0UK zn@_%W$iSpsQ0C#W#PG)nW+Vie&dD&+iIm{dbWN&Y(b(kA#1yZl*40$uVG+inal*_n zSkW>Oe00c1b=SzI9*yBYC;Af#wKtM$mTsOhB9gh1M9vD{GvXQL>e(`Z1|BXy4aavX zKr^)Y&a;J6=eyX?+ts9a_4=2q#I!@Bwwj7%%Kt!TNxY)Y?`JWaBw+7vM7sR;aPpeI znfX-W4K)m~I0ruW-bKvb&75iBftIESVd0CF}Zr{H;yJ zfXl)~fPfaXi5w)3?4REXF<=X%;vfhhA0Y{EnR_2c9Ocl4#_zD=M^0M+JRL=i{4uki zZZK^6LCt_aI_Eq11v<`}w($W56cdccQQb^+*=8|_b$Pw>@p>6g$?fAx5KUrs_GpFU z^i%`yR9dCJ$>EKIZL&pv=eOOEXc!0#iNGke>3;<@`zPYc8%b)LN=@b8;8*`Mhzto2 zhU)su9;g2Yj9LIS3~v??SVH;#f&^1S>fTW2KB;4-rr}0-xITWhupsn?fZ%o9hYv6q zm*urwX(JIC8M>J7P3Y{b*4uy$ybY7iK&i5C-;QuEUca_T+Ve`DLx^^Ck2&Du<;BOM z61Qj8ma|5s>t&EIH!r%GTbefgVzg`xK*~NzSKgfqkV7ghI*UZjZz?Dz`13fmmQnx` z6UWK-ES}HMq4(4Fl0C3*C^|c|=R?TfNf_5=uq3~~G%7uq!}u`g-g0Ujmd6)xQ1ySX zzYg_^b0l6I<*%EzO$B>$F?^ybB6Ai7*xXemI2&kmJ|2DJrV|7-Jx|L)Y986rPcBYY4f4UDi3i; zUOr9t<@+7Oe62-{#g{4&kI*}0O5e4&^FPc)MDGTBO6;*&$x%I@9|S0Gsl2Xm=G*DV;Lz?Xu~lh)i4E?1MH8r)mVZO(L=H@TO^2*BL zt`ov!3Rsk(934;vv-F3T?R4b`1to#q8KFh@%;b}4j)aH+c8zLnS*aDAtJxMeFcXyAzdjrC@=ZiP8)m0 zHBIfI>ZZM52fI-3D0}Y7yISMg&LADK+JOczd;0b9zc0R^HR?~_I)!`>7Mw2{&qsr| zc?`%MBo))LD_9v>IIyu8aGS75Bhe>+z7V$OzOc1EpNO=pK)Rz+lzsY=ooFUlU~rQo z1UCGiL<_?>!1uyZE7Z|u8P`LbDxp`uG<$rXHsB4OPTA42RewVG=z14=x41sr_)@5@ud`3>jCwQNpXUY2u!QwK8O=}y{N;g)BP(=G1( zFFemh5t$e7OZPisHmBh~YLWyPLcsGqRBst?SLS6!*xrK6g*}}g+56skN@!QgP+F|@ zDGzKXbJW%MvKcpwMKO(^_uA#0({gLjBZL~zA)&Ft+Uv6YjBO=h=)ktMy)CI{`vsed zqy2_i6QbXP;CJG;w49rf2&ZivfGm1{r6F#U(>$5;uzfx`k@L+nHG%jqGkOK_6}Rij z&2gTp!5wHt5|mn+NJKec_$TZE0U1+N!MMDcYdZ!RPrp^w%Lv0t&)mm^n{hH0sAD~m831s*dVn*?LdUX@vas7xrAe4W`Y*0Hj`1BJ$#rj}OMBA;FT;d_SK;{`_S1UuYD z!MNAko=A^htXHqr{yav6lCLCOtZLAIFfIMKw{pZ*pKIsnXxZU;yoh|Y`{D8CG{pb$ zhKuDZVeE`6Cd}AHcm>Z(a-qDq-jP!#QuGpluGwhyr(uXHx=Ig_ola4_-?4|7f9X`alNlQNz zP#eoHjvpmfTWx1~S@9PY<|Bwxmmdy2C8}P!XOxTb`LIx&8N1Mv= z@d;4qqxL8CzAnv1OJ>MMS=iHy3(j}yA|^d;eW6TANwFTZw6a3%qxc(veFTG^mM94$ zhGS8V8t_kHJW;4$RSFv%QjCj({F==Z$paBY>@xdDS3M|4sO`% zcL?uHPfxE&+20mU$t;=}Xg-;q%yB->sKj3z8|t-az`~(&=T=YWeo{T&!~bBZYK6r{ z2^;?_rkj>lSENqiaH7p|h68zkZ{db0#NQV`zOW~PP}<0mJ?31J9mwyrjP^JDnQD+N zPY8YB56oqBwk>eE_P|ozkGxTMc7;CCh2)kcTXi{Wijh^C2Y#<{FnERS-z7u*Xn#;N zbTs{{&a%5IM`WQX`8b!mJ_`UTEL_(Xfa3l|+@5ZT4l|W%b2M&zRx}Zc-@JT-PLp~5 zi%H1*;T4mP%nEj6ERC!P?11iZp#Tf0P8Fnl?^);TjCDny`A}??^LZ`{yJgkmIr~II zWVz0h!!M*taBPP}*IUTg^6vc6L-y+frj$s-X2MC#6>NnaEw-T=vwle2!EU+WXuHE5 zJX|Jg{~~~`RcW-HW^8_j_Z1sE94R33##mi6JFd!P zAR+u*#`Zt{3xEVMbDv~#{uh#n$FcdU(ox&8VOqy!7IwMhH{%wUo(SwLbg4$vE91;t z@`6)i$*2F1v$qV2v)dB3pWwj>9^Bo7TL>0{y99RvK^phq?(P~If&~J>X!hT6JN)b?h6}2Jx7Bc)uNUjP`QGyQKe8(!32gF8~LfbzxyU67f~W zf9ADR8Qlnyq?zfIWT@(Y6GsU+q~cH)M71qOfxnBV0H*BrI3IaQ9&?EVj&3fU^q^%t zAS4Ui{kQ2L#m9RFKC#WvDGSJR1bVce z-qSwnqDUrrjF0>jMX`AL&X#hdYs%jK<0OZcpMh|;=3}KkbMocZ8#np)$Jl%}FJL~A!NVm`JJxY&xP$9`X3+_E# z%jJo4Q%}z5nD?TI-s42KPlWvmmTs?!MVEj3|JzDEiu;m)081H{w85x`tsmJGFBAlp z(R}x9t-T3577I|Bj=l@_q8z29$ zpv_!o7U$_4u>}y}4rk&UHA5LOG{tl(*VCUbk9{=(=CluF)qc$J<4Bke0W)8xvv{Kj zge;$)GEkoeC|k(ACC)FRAPM%M;ou0{-Dtv-bv7>hxm{$V^oOKgY|uw%_~@LIfCB`H ztNo0NNY|3vKxD;x>F~8L@7Y;I?CE|bi*rE0uSSO#5grlvoSSl6|3Fa9c-JeS3M$_4 zDUOlP<0X=Z3-OZrh6m0>6((bvBnH3w*Vje*6Z9Lc2@L};8>>uI;H;}1a(oqeE@}zg zA%LNnqD;G7hM~Cst$`TLr?<$a%I;XAbI3vaOet`w?i`-`hD?sv zcILyW@J;hJb z)xs7ZJM=#)ZRc$txo~BIk&;p#PDRf;aW7U-I6s&2{$Vth2-%21Ch&I1~e^f}-l`pwCM>Xx1IV+Em4lo~(|Mv|GyJ1J-^g zlk#tlk?Wef<9sEkaUZgvvD{_$-Q^}(F1@i_kM4Me4|b>?#|yMj5bup}Tnr_hd(D5f zoH#$wtd?}$1{2je$Kd(DpM1BkDrJV*Do5dmaW*T-o_AK!Q`Nr&hga5sU z2d2Ctx&C4-RlJ3KoOxiK3rKp7=G^SpBLBpC{2EP4fh0h0O}_0M1&djxvQTGxEH1c5 zR95arR<6}>?^1>wVU*%*ADm;x-%<2M{U>{87cUGimLglQq*LuP=6u{!C)E=*;VulOOV48Iy`+MuaU=`B+EG%`I;A-mLJ zBoVaSlr1T9{#jOVl*(TuqQ504{KzD*hbL>jTS@A=U8bOg@SymT(V#{m5C2l?HtNFU zgCJnuS)}Wp&*l@D>`wXkU&WtTlKoX#C>WmzG9sFa2S@ zW|u3G+@Ao6xG;+ilI|l8u>zN@1S#15;;Vr zc9P_Ij=u4eVN<2af{S!0DxRUtk`#mE8~XYb=}@k$H{D&bWqxTo|J7?$cu%vMfdHJU zrXTvXyA|}_BYi~nl@Wamrud|a4UmQ0!dEXT#Y)3;g|r#pZwyvoE*3KBjKT^4OomNt zk)!~ts+8`V8`cL8^7F_qa?{vZNI((H*Ij|xiX}}g!8(RLEe#O|K=|$jxT=^2yKeQOp&N7Vi=rJ%NFy#S5;5qY%;K5M${))MqQ5t)-@Taw&asO7o2+bwr1<9RS>7`PqJak7n;0xpTC zG4TdE3rnvM#a@tO*EoZtn0{!MJXh^;k^pwRx%a755`bD|XaP@Pd&UT39i}m>J;BVg z?BSmKBwNX*92?6`{*B?Ned|lISP#~bTJ2}T{&A=W*GQ$l4;$%dK2Mv%6VVv{?8Wjl2TfPk37lA|{w;N=iTW}!mqvdq z>Un6cx@oEu6N{;9>e31ypN++vbgYM~dpsy(pILdH5Y#DATFQ_)RZ1JshzQqFbu6{$ z-xL{c=G=FJB+(=}jC!KkI9bms^?;j&tF9jiUAV^+8|Mj&JK z55TR$o{Sp9Zm(uEqteo^!!2&-)}7cJg}7gVWm3sC)r2L!vc(jlp7kXMfkquBV6c5g zXFZs|v-E4}_tX6Kyu7%!Qq|urJm+I{RK#Cv+7AFi$Z7Q5%8nab6Lz>ZOZ z31IR5AY91sE=MtV@rvN2MfR(cB#KGSdaLmf(pypRh!W79C{_Bb(PA&y3>jAGzX^@9U8`lvwuyqD)EBo% z_hO0ga~d9uWtM{UNxM6ba5xWdSMVKfwV(bi6pzt4k6KF89v_kjcV-y8*&_Zqfwdw2l+WW zg4)Qv)uekWORDyZ(-9Ur@w|dN@?V>m3bueXIKz$V3Gook6})60+Rwv)6V=?-3Iwrz znWURXdw)O?@2R`9KbTDwFLo;Vm%ISZfsYyMtz@xR2lSlvmpN%^$byER;y)vpNy5Rh z259*S7}ScBaYdv?fJxcW;%~^+p2mUu${blK0JSnI|1%zYrhe)0)%jCjy_4SSiRqDg zX}0CU?F}ps)-LK|@Pr<>m^^YOvb)RNFcVuOZ_2_}QzkypVQkc;QUdyyi;!hMNMiteXq!-EAalE~q171$IU^qF#8|Io zGWhR2x)GTBGbP{?wqblsD#0(CEjVe#$UF5Z-s~&6?I>?+<%7;zG)OMh6 zPSL+2RGch-`v*-px%YkQ6Dop2EE#Gs19AjDo4c#%=xPzCOun)aI~uL6s(L%8$62tY z^)r&(htmF2e9P)aI?>&IJ*@ikk;l#qqnAYSVHU7SGf=&-+0=rb4@S7^%wr81G}>uZ5SKY0Zk#6R8kGUQQyv#+(~k|$v+KS`y{Z!N>BDR;CCWHLFFHsjuC-7Mm@?K-2x+r! z<9=9P8w0<(f^bm(tWlId{8BK6IM)>RgB{QV3d;4Yd|JcdR0+IxmY7lhw@k*D8aG+I zaNhI2A=F{5ye|^CGQR_kHSb>CtLU44!Pw6`jD@eCpGf8W$l39itAKy*=C8L_cpc5U z+PXX$pUR?SorLJ+g*cZr-!6re522S~i1O9F;vw<(8h5t+v(S(V}Lb0&%Nv$M&14$M8ZmkLB~hQr`# zG>VGnLIkFlOHebz3228+6Cc1cC4eQgNSn4Vh(NFb_uXj>>qHnsTqCT6ObjqC|KUJz z`2()M0O;)q5#YyFPi1Y_E6KFwyN81@+4~413CMMx!!_+W>Sc_Bo9c&imi{a5~gjnu}A149xL*trBrArHd>w1dmoo&}gVe+TK zZQgQn#!M%d!_tL+${-SANX}A3l4~ARf2no{q>gh)76(6CE$3Rw(Oxpf6zJ^^jbEq;>syB1>n z0e=5qe>m!LG^DDom#m(wuK!3~cGQq;QJN>XiMeQmxa z7ObCF9B;n})_{1uD+^#jS|kkG+xt`~DS8GhSlX&MVndR_^V;DGoJme@gn@X7OnePY z?87SYS&)7=n_*`3^pg;~Yya1$)fI@8iX8tuU%>=#UB3wHatV410Py~3Rglj}llcS8 zR+GrgkJgu|N>MS_G7}pJNpSw9^^O&(^)Ce4M={Lmqd$FZ|F%DU?PcUr4R#M?q%so# zOANfcq5qNq(dPN-(J1J1KOHSKuOoTFTNcCkK?7Z0wtw}YqERAz2*MY0&>s7@v*N$| z^Wh!!?^@t*>HqG3{x9{al=NE`=U|j-u-<=t2>-2|H4u#%IX){6BcDt=GESxerc~%HAMEqj`xDg<{eEIUPBAN-08)x~tiO*mxDRTUE zd}>}^jeC1XyVE4qH+kbBjKsNPDob2oz zv&R7}Zi`tZcvok7Dr#Hs^dgGeYn6H@rvQg%a#BrO6EP;>;YDY@Ao1S*e&d6sip-5` ztD(Ag3%S0&KKE|JGz~pH5~M&FF`F#shNGfHsRjN5MWk|b2_6$r>Vy|C=9^tg2E~D! zgnvc^QB#N{gM)sgT$*+JG_|{Td&4%0{otdHe_n+Ksw~L|$Hey?m}{#-g$u?er>=G; z5jjC_B3e4S_H*~w$2c2<`GsF63mg`HVg+>ZMB&#qy?DHn;)wT_+n`e2ALn6WLOpsu z@Az)r>*IB{3w#(xc-Q;R=se#M`;4u`@nqs{?)1^!rIQ%F_v)m)>HXDb%DF$!*z&Y@ z_u)d;UF*@m()Lf}($(v~tv4oh_uME@WMX!CuqgcEa9OQSwQC;lQ7s^K!JBmd??&a}SaKinO1 zvNdO7XBRi%d${f5H+6g$2`6EWTAFec;ygG=I5z%e>kFs5 z$CFIhe)$zGxIkNE(xs$hz=jDz*WA91PHy;?e9@kIlhI0gBuU5cgfpnV75&QaITm3k zQgO?hgp&B!06F5@2A9oK(q zM?;jt$N!-nweS6hcC=kujpkpqqdAWMqjvO)49iS!+@vZ8Ct@l7Fn>(-5ZPNn_-=FY zw%*9Tu(UPxk%+@nl=U+&{#2of0i_xwRbF4YB^`J$?5=B!499L&MN@FrFkG=&Nl-4S z{z7zm=2*U8ef;hOl}O*>e(35v%n+mlxWW&6MlLd%?DY`0TaLqr6L-4b8%5 zlJfb)O3um^L0Qd_7#&kXygGQ-^#XBqjfoPqN{B^W=JxL$AnhV}la(?a8Z!?1tEZx+jl_woBIb@jE~fR3#rf>~tg!)-CAUU0 zNGNm%FITyvbF#jU3xs$NJz%?VnNIla;9$LuM8_a+0{w&Vc(RJ4PVzLPoZM^p{C<;Rv%cXQ%D! zT-R*^Ms$7mq@E7j3DZJB$(k-oMuCU(35(;;uzKkFV5LN#K)A zx8w<+wmmWYgHtXw!*Y%a7mZH&(|}JGn=*b0zBj{zMRzL@O=-lNj&mU@9`6?2unV^pvvOO2B`b zYfpP9RS?yF#B}E9a$<27To8nX`m<+tD(}EkwV{tRM-UUd&jzPo>+)jUm7fli=g{pU zQpTtUucnN?SNc)4<_Y~Qhhi6d>fs%7IC=XM_BMTke0?bDKRxxAmj-Ad&=)i4f4#q} ziT3}#wTfNt*cXPTg$>t1_S{I6n40QH;=q4#5iC=`U6NT}G);)@9HvV8fk7NqS^kHY z^<8W92AnK8J6tS`F9rxOBY5Tohg)Mt#?SG^YnB@Dw*79A*=6@%@_uCed=(OS%VvOo z87gH<(@Q9np_~J6ODO8rgk~6flTJ-fuXXz#TYw8(m|{a(Y=x!iv!Ot}Z4X1el{On3 zOKFK%i$^z%$*msc7SU;rb2noFPV9EWt5)ET-Gct-l{br&xbzLu$M+1Ry&=jX1KWhErxBCGSx zP%M^Icq)-_(k&c^%+O6l(pW}*_BasYm#4p#z7dWgyN)*0=`{b#$*Is>fRz;-5)^sj zoEe@TC8I4w>46L$=bA_r#Fzx1{u8Dta`%M8Gzn;bV4C>LG_^0|c?o3xuO??UXEIFI zn!XmRzSs-qMxfxwcGqg@=`wxCuL3UI%DGAie8lMM@8_v(uSgmHvJB}6@=&SOYv~bm zVQPC-Z<;lix$a?qgSBY<} zd<+=%2?qo7{O-SDzkO#NXgGupW53ml(f+M1rR*x+C*Q~2--qW$A-DNi+d4ho#xQY~ zEP0ZjO#do0;EA4-e6_PPqT8lf=L@~#~@fz z6#}vC>{`ubQg65&26D$Kg9tt`dlV$#m?FFmviwM&hagWc35K;HLnnjqW)|QbsZ4|j z&cqmcQsj8dK$UdG_GoZUr0CUfBV-O55TYX&HbJ(+fc%bj3%ptk3rj8B{4oBl zPbww*q~x~CQ9`>)>PXi2zg1%*Q9nSJW`i52ZUqS#>Pa=SqatoidK%^sY*Y&qvM)OR zIf6ND1`H3jE)(oj#!kNjpYLbM;0y~IxsdZ;5>E%3*6Kj2=XhW%cXHa@37*@jw-&7P zH*?-7Klq$^yTUVy_1te_YJD3^z}g)UcnHVGE?G+2@mMuqNfwDAv*nxtzU65cTc3YD zxDQ`x@8{6GFwc)opO9&D;luP~D+57E$KlqRzWHzO1Z|y)G`=kyGTO~?E}w)4LK>W& zC4Oevj-+!;1kbL|6`qxGv7WlL{q)pWgK9a+P!LWJ{}fle!ZGVUd?jL3BNHI*jE@i# zXm(m{IoRo8Qfzpb^_OZeuL{ag%JqhsZMP>QTy_;3b4ziAgYi1%NVhrEjjg7I)j@yj z$C+ZHjC@F+$d5rox$pCk?c1Ar!y&&SC6l~rXaV!-K*hJW!UB%(hl(t^lQeq?x1{tR z)tgJc`@syTw&EnemJI;k2D-`1y1X@*oC-Cp`@9UAp_7nD;w(!TVhdie6@ROP>py)? zO|g-l*s>|ma78nMCB9{i>=L+@9Z)Ff$DC;P$)t$)R*7p88Q zXk_JksSrA5byG5sl$D6R%k25iQY@tfi}*xA03a)`l`Be8>?`-lDy5S1*o)20)eC|f zyEToFGbtj&h1vpMJYji#-~U)cfA_m2B}rS1t#5`((5p_KMBSr}%^=r|Fv53Tf!rIQwnRT+VkocL&) zY)acJY^w$`^fS%|n5(s0Glxh>GAD7civeYwUtipnZojr#o8zwpc!z(Ti7=JFrDT4; zizTd}Pnj+w7I%>3;W>rDH@W?*hC~SQL^k`6MHE~0|6vhj{a1^q()4;cCUTDn_15cC zTgGFUNx;kJ0i2YnsSszbqh-(kr$p3D_CF<}3l~AF2de%)z9AI103$x5H;`z5^~l5ygB4m-3nJtajhz zlOQx`K3_ILe$T%eR#iM$Rq&BxRLT=0mBXTXQ5V_GX($v3C&T>B_l2}e;iZ*mD)WGQ zj;432Ms2w~_UYTni6in4k9XT2i?EkABdC4_V|=HG?ehf;Z->URJH=Wtjrd)`l5~NglEr{Lu5#EU5(yv-c$!p3DfEJooOizkr z6G@r`!am2whWa#lPKPyJ@mT)WUzv2yg?)sN1T zcA+u}IMiOK(({3jI+lsU>wTnhvM|y%fYkua`Z#>bACZGQFOKtcz=B<6k$LtXV3q4% z{e74^@C4CWLODU|HIwp)D>U(SZQ|g|`E4duA*}jIWr3DaNHEd)@hXZ$rwhYti=-*P zPAU-Sf-D2za~aIW*+P)MaX--Rv|Tr4wfG~C-1@o#D-l7S$|7xQ@lue*a1>pz*>O$< zi;}0EK7`N8RPc|huR;Ftb?`=KW_r5y-=aQtPMwDnUkYI-7s}CiII8oz)+n$f%ts!I zpEov+m3i%@T9-bIytrNcajj&5DyMtT*CF zGLjMbetr&Y?WrEXGU6?Qqk3M9kVEi+Xg)AQ_^t|VqS%#|)!@S?Y$;`n)oxDLWL`6& z)` zAEok$RkyuNB+40G22*sAF!GdR4!!#-RZ%k$If-`s zw`erw(xN#cgk$UMa|9eCTzJ=vpiJ{2e9D6}ddIf%Y3vuEDGFv%?qv zwELpEenGE}_bm`B2hm8bmiY+g?F5PR1W%D9XlZ8qZaA5~UxYZxy)_%kDdRd1lSiwk zT)x1X+fGUW4*$*>E#$)#aCQ1B zm$8$d*k*$ILl}yoLvff)$!P+RPbAzzEMGL>k*w^Y2n&fJQ`a_9@kNO$RmcPMpJZP2 zv^iZ1PVtj|6b~@+Z!j>&qwZ?Hna3jNNywkup3yJ-^k<~Z*~a(+g@VExq?H^@8RTa# z_5^mOD4%t|fHh$l!eI@J2(5T)D)_>5)b)#RPzD0|xi=%BMVSkH(hx2yd(SC%{Io)$ zH0zT)QW2MgaC{_s6xw#&Mm(ehwM(urVLI*AB`12@iOT9c#XW{L(=SL;89$i14hUaw z`^|H3SqxA*_oEZQOPFJ{p+_y5T(iuT7Wn&b38nML2;{cp{pK&M{gX0up*Rk9bK z0LrP7K8B^w*#sM|`rG1g0ZfJ+m&9nMA#T>S)h4c8U@l>sO9&8O81BBkC23?UIrd88 zjhnh*cGlQP)^7!ecclrwslXZ|?INlJL0e_+S1Uf(^ddiXPe}4n%^okUck8OWL(!z< z7+l?dBMjSE|B*j6@hvR4Gh;TrcPg(^FWch!G)6-{s+{~^OrzE>+&(^h9i@`Lb}|f|VHYSIWEY=+RB;Eo}V-+{^I;42AMS zgAOOe1D7_e5+Rw=?_fEG>?SrIQQ@ka)6R@P!qW1>oQUXRBCW0@cY+x%7OZ9jc`f65 z>c3s2+;A7^j6gZXrON13bes$_hCYctUYg>eyy|VHr%6NbLI~&YpQ#5EP;;2S+Hi7Q zw1A4G@U&&&dpw0XRF1K7Ncy4i2z#GbK15!_L0fQ!*ZaLxMx4E&rCs9-D<*rMf4E40 z=}}=-D|0f?tPc@Wv`;TQGFIPkajRf(^r>+#0FAv=Mq-RtqdX2*LeMiTn+Zb3Uv*qO zFWh6Fh(uQ&Zj>F-whaAT_B{@Be8*pQZJhFUm5JtMVyGX@u)KnVk5d`_3j}&o%%ac< z1QNufj%!Zc;bd)x1^E1Ui)W3FuCJ#}?N#g8sl_2=C5w;mSY<)nW=xo?gLedUxZk?9EcBW*`DLp`7>=*AH6 z8us2>-&-jNR#gdb3c?`hI+2t_P<`rt`Ho?1J|lk+)%+LF?&!WdrA|x5Hxm*ergO1& z?jP6fczWM$lcGfg06S_es}6lV-Vc+Zq$=;6Pi{!Gue%W9T(bilmlQvN_&!I-niC;K zpPrla1Q!d|bUvUn+yE~J@W&-MmZ)PLB37O~AFn+5>mi{Oby^ViFPGzdTw=BBG8aLz z;LnmB4C#(M%W>`3lc1vCcBuCGPiiQiQ(2~ggGXFF{>qFGZU)nIf*G5u7LhKj*1xil z_eU$OkhrPbFL-#6RVDx|xn+ROI$%NatApgW=NjKnT1?EK*ID}~8zOy1#l_KWNd>a( zLfN~}n-Vh9IKKBPJz_h{Ert>1pDeM?{M-k)PfC0vC6`?@%)MV$6{|?yKQQB*?mr>0 zRXCY(@T2GOa>4;?=8e_?0wJl>bNdJr|J>`xyIoxq)kHqiyh-%v9y(JOK9}`5*849{ zh%*LFfXh7tGQx*h4qk3==zHsxR-}0V9To>7^uDmne0bpq@i4b7P@$n=g2%VqvP3OP z3%lQT_-fIW!?VkCZ(hN%%a_#-d||govWdgd`(D##RBb~SwiV&UOW;O%wm&X47-5p9 zJV%-W2pN@Te9*g|R6Eh|*3>a(yq0hd9DPlXO4i#zXvYfX@9)PkfvEP2*=f}5`pJgD zDwy(1^(4+NQh1#pVq3Cncl!ZVv%4caP0;I!5JuVA4;ym@_Esm&UCK!|-99N~Z&k?M z+oRowu3u9`r0FXU?om}wD61|y!OE3=U`{t$B3-M}?T$z!?3N~w&Ev(v%<8NwB zx6~;NpgY3NMmc}azEr{C^o`Msa*77-DVa**Db53j(&U)Z$wDh7vD-#EiH^Br*5-&3 z-Z(JXRC@VWbq|9>ie6(suLN*e^oppJy4f#^`!PR?GxStN30ULJ`8ZJk)|O)9NDVdz zdMTG7V$F&%^N%@a-2yqF2y7I7>!HdcN-}m#?E@o68tqmlCEaGFY>K&mWX0nYe>n0- zRc5o<&rs$t7tY6TiZQtyP?_s5Y%@4lBLcRPX^#GCx~;F4x1GH@kn6mBDx!Gso;cCM z#LrlNAF0|zZ)*>n`BN8&v%jPJC&X|KptnFbERKrgsqoM>c3MldD|NtxFFo%yP20#h zaU`BLH%haK;gO}>MDI^zNk*8qj;7&0S~+#z=hs8-rFcaM>30-SvE_KD>BDpwdX`sO zK!%Ljh}L!<@=B0>Vul3jRE`8Z8;EPZFBdewIjH)kapX_3xfExM*Zurh*j|JXfu3{f ztW2O<_OO1Q^$>8n1?o3cRL;G~%f>NijBX!c(By9~b`&cb#g#f{NKawFY zz+)w|q0JCdpevet$S;1q2h{%l@Rb2$qcRs&YMuVVf>lQ)tW9hRWHc)Pa*w%Hf(QoJ ze3a%6%>4^_0UNDz&>UOAVsa!bsysc{DwGfSA3{0;PvyX8Y)n#uq$k+#uhW5`XM`Z3~IQLxKfd))yyh44Qg_F&sm8O5oYcg$0^@r!`iD1Ladv>^NBW%ht&C%Uhn z&_2o4x%(L$&olsboCzOit*?(-06HaSIZ}M!TfP=%F9K28BPw8wy&WH(j)6Yq9xD&j z{-7v>86Wchz3q)JG3t-(C5#W>mpzB^IA#8{FuADwg!M&^-RD7qF+H*kZ`DR%Sv~1z zI^o=qtdt}nn(*ZPSdM+u=g7kYnEL?8C0R!wR#>;v22OJu=JJ13qh@@ zefjP{xsV3E+?+i_cTvUP@ptM$cq_$r#1@E4*EQ-SX>14YPd8`cZyEwXyN=_eAR9r$ z&Loec3Z256(^Y>~?(CjVR^kLyjcKkkJZmv2-}-F zQrN)pt-8?_UysV?5wniI0Fm!TPF4yH1T}YWZols5LlyScOo;9TR~D0A+sKWMMYtW* zdTepN03EvMIKG)F{Yr2<9b7?ZPAW_&6tJ~-*H6{>h3Bbt(&?hkzP8?ZWcvneUm%eoV0oIL?v!{dP zA7ivrat9h~4r?*)5hV?jc&x+9R?t$Fr!xQEif+PX@}PhZ5+=QS+oRJ1%uRKJG}e8| zU(w32UN953PxHvrOEVq|Z`!vyew9k{y6w*>|JHtSHc~UoT6X!y8$`=B?Z&uOV?^fN z4^rMnf4AJGseQ^Fy_xvzE}C7#XO=-0YLZhvm1A#a zG~8gQX}D)lAanb;JuGv(T$3}Z+rUiEF<`h&Uu_C&9`2u?*7-8_CIDvqxWHAr^jNSG zP-%!h(nBX7o7#WR+!y(yfRVNL&?oaeZTH4`mhJ$yznQnQ zY?-H$DwsBR4QoyN=HV?4rQYCp|JalJtOt)GkHXLh;Q;=DfIqvk@j&^!dY@}owS+2bRDiu*ECtF`n4$weMw}Er>#S z3s0OiYk0=uQ_fBfKFS$A3fGE`6Zgu#J~dTh8ZcdKx5$A9{w&=Br`kxg?0UX-j;Qr% z^)e!(O;zvp>lU^;b*vSoD zP0yEKlN9S0jVeC7TOjaQ^oQo-ovHxM(^BME#s@7%xGcWTA_G0w??!S~rn^v6sJvD* zL23^+3LH=2$hh5~GByg}l}`ceN5|N8q|^OGBNP&zq~0sL-OCiz{s@gW~X*# zTRbHqk=F@Kh-HDv4WGGgY_$)38B$rt=1%rlNmpv|5qCeEfeoKbd+!65(JQqi?9tl; zTn1~u*7KU5R(l^UP^jzeKr2KTeLBa^u22Jvt?C_zS7C$G3)Uc4PyqOWz`XPP$ro_A zrOOlNL>sKzSpIekNhkHMg|$_et;{>{IzB9& zb<0!rNIr54lSb!aI~#ewG!KPeM8=Sq%$emqR4K{CC*jt%9N!ucu&-3V#h4uteDV=w z76L$=Z?jR=e+@Ksw#ToXX@u{NS7|15`^Ns%%uXq{-_XD|8WF+SWNV8*?a0{TiN0n< z3d^jAPm-8bi(Zvmm`iS`_Bde7={Vkb9Tq$%2Ke87y9v`Z24sneU+HUC1voMuOEWh2 zHTCa%@;<)7109#tBjD$zWYzA8NaS8(pWlky`sCeLPfMM+rl+70+q|wle{6B|o?)95 zPr35>d*7&2EZ6v^?WRMHv#BIkutDw4;8XkBT|X+5c!j{I@r?%x%M}|5 zkjDSC9EW3V<{`po)YQL1x8LFML{F}B>Gt4Ed zC&u`5E6Y8{N`}%cuL|C9#Yyp*rE^3lzn*2+RE3?j=J@mFzHF-pS09SGNlph5XVV06 zh+2+ywKkwyO(9iKg2IC8mjQq6K;XbM&7?osaaxbK0WH|+byV*LF_5LxP%yv_YdW>Q z0S#QYg<|KFcy&|8-zSX5*{XC`B#Bs~#7qyMJBoDGkh2btcJH=# zC6ZZ&2p{LfIH)h-N_uH(W z3F&WR^<_j}1Vi)EL@l0YIIa646d7*CO{VK{WFH=86TBRj?k{MpTIexZc6hh{>!?}P<-|en z_|%MyO2cob{u>B-9Mg?go%EfxkB5z*O4{8gkCVdgJ+U3H^InGcIl&j2lATh{*t{lT z{1Oj)#~#pgQ7}iy8h?|!2wDL901k2Qw{pvS`xSJJ!S9P?Ztchw8=XpRlCXQ~4MBD# zC77>~vEC!YP;2eTmB{SZ=aye&##ZpO@$al}uR3S)wT7NiY$hM3lB(6)sEERa1?DE# z)3+gBy2W*u;s|e9k?+GtLzdZZux6`a6E^#;&+mk5@^kf$|61@49;gh#V zGcd%s@c2mC4Ecm$HA&frAJra94=w=jTSeAM&p9ib1@&AVL5z^EKO$@BwUT!4epJwL zLyh@JQNH;p*iSSu&{x0jte3;6*xv0l4JYFxT`OifGFkc%wjL|bwt|#nXX;^9qAVG$ zC^G(5?Z_oLbG$*XvOZ$Fn&*%`Dn>;auT$^(>!bbk+qT9GBEF@<-z8x)6Do+IULFp{ ze<|jh+hyS|q$x?*uUf}GE0^ngXj+|WB!2w3)&#Flix`i;U;X*BSuIW0Op)xOc8p`O z8*7K$2*94?~wh)nE# z-ZyVoQmx1P`1a#WuIH>U03|e1#UOYExB(%pt%4Vl@8e$mzCM5$$n z-R`fV4(W~Dcemq{S0WSJTHvlGb{N=1R=IU%vhH;*r8o@)c|wqB_~G00$+ z`4KfS;&QmnCl44}xVPKtQWP8U4?BzdzkfMRJi8rKd9`=moM8x)O3hcidHG>1#XCar zpDqB{RZNakIJal#D5M$Wq#fBs*V^@y7 zEEYr>q%EO`S3Q#z>Mva9w`Rkj$guoMb}0|D-WRZCnN;Q#Nl1XAkSX6AoMuHEP!ol3c;VamvVNx3koZAma8Y$mzz{ZTknKyS<#KcSXfy-hYP|x0O$%?op+Z&MSNFg(kk& zkAW4Ze1%RXw}+oW8plqU#`%~$#v3Af00L9WyGt&|v~iv}m}x#aBC;q922XPP<{^H) zs--mSP+%_wrIA-D5ZHTZFEhy}*~-I`J`XG9++fDr=N^pM73pn7Vwtm~v#W>Ss9X!L^Mq?V;u^jZX9DfJ=P^Crh#uen(jCd*O4LUnq?QLr4LBL?7oO*gU5ohZJ%&9f&G{Ss zoa3>5KF<+c1_N>mvc^g&;OBgGd*R$C7Uv<;?LBXR()HShNC~r3`mJbif=jVPXR|Y@ zaq{(tru4j03WSOj@EUkG$7;Gzk2#@^pMn;%M$moBVHblzZNH2o%^ZGdU!Ok8NJOhf zu?NWvPL|CJ(E)L@kNUtgH3N>&vr?;P^3@Jbi6yji|LgHH5#5 zL675){P@H9QA4=#nAy`qLf6iJ`mb4`j^Z1Ul-l`L#QVfkx5&|!XhrkAxUv8hTKfoA69ZEmtR5*pFjHI&W=8s}6}+J8GvOxg2Eo z^jq}fr9bOd3tyJ#mrG7l$cC%$S&Pjlq#~?E2sao@Lvn@Dz}G#95W51^P?D!Cp-gL) z{K9*ub>0FgsAxW}!vEn6J*;365GI;X-nBM>u8~Hk@*4Inuv1*+37SxwVbt~wX4H}6 zr0aHF0p2+qyj^a<_;GVP&XBXuV88}|v~QK=q#|_iEOH|vq~lQIKCc{tg83~<1FUel zCGMwU0MiKp?4-DFcpb+CQ<#QDj~^9T-0BO3oL>=k;fSA-EP93Pw1iPHxx&UZhm~$G zd&e%Xkoi}tg+|tH1=V)!!7?j5;VI|yubDl8-R9FQ%Yqyk58u9du>I76>kOzL-r#i1 zUdeP>cLxr1IBTxneEry9nfUA@_k4oRiaqSWh@uRjZ2CK-Bem7Tgq zOhM(Ry83(@Lo|@jb5F7lzbkA(EVZP)hOVx>@|Xeq^tdyx`6Pzn@xcemnJELhQEE$$Tepv4JJ zkwSq&@ZbT0y9IW7zt1~+?q~1$=KC`s;qrzjYil2c;F_PWOVkc856~C!=31 z=(0NGSp&v{%zwuwSpl3hr5E;6!lBgucWN!j6cd)sb4JyZ#93w&-}B^`4mae)Os0z$ ziS*&|s?~()afrJ>W`D-Sf)v4a>vxh{bW`^qg~b(jZCb|?bQ0W@w&*lKG0tumbLlb=l|P<29w+=lmzm&!&a)6huZezp_mnu8 z4Cb5yxc#*|d@PnExaB~v@ODSVsprNdZ{`%$WWWUsEV`{L*`r5Qf3XXk+3AS85o^O) zpOyo+A7`8$Ol#=9qYhkTxn6$0$(t>Nv&3F^#%Y|>m!XLY&?t@N-Ayw2hS-8_XFQqhq{-cG@nL4=ZSnq_{lJ0K z@M?y*oKwDo%m#WjNc2OTHwoocgYg12x}nugRo*L#W!Ip(NL_43MWglA-{rvvHMUl2 z6z6iUntwF%zW;O}Y54W%(+nHK5oU&$O)py*4qK=Ho-F$r9O^>e{9B1Tj8uo+5Yc!a+(#;D$pI4L&1st$BhGoMv8>)@# zh1Z0CF`%%f8@yeMC)TpfSi`{*o~d;wa}7R;N0?wyW)ZGj?{Pj}_bQYafh4J&y>E9vAW39OI91dsOx`setKL<4XA z9$ph}xfdK74g}Bsa5U(ly8r0-l7!(|W%Kh>dZX+sqUHW=RN?XC&NyJ%w<{hNtNQ4= zMXBYXR#tNqTOVuU_2J{Y<);pGY>h-;OB?Sfxt%&eLYj?XEB&kHa)v{6~~i5PBm$2QJU8*8G~o&vYL(nmkxLS?o!9E>>fOAiJi9y{INpM-}^A zt41EXO?Y&gL!94LyfpoprsCv4Hb@UU^vQJv;`8J1E-Sh|os?Mz?e_9xj|Fd4Bn(8T zoI5PxBSfN`Elu6O!EP-H?=-FOT+60wFvxZ9JB^!&H2aZmANKKhmlxUVa=~a%^e@$2 zSMHW4-O&Ng?L?k)Y2_JMFClc=J(ivEq~z*>6Mt9NZ1IVShKKYq%BW#Ek0mfwnScC; z(k?SAk*3{8h#)6y4ilA@`h3s6AGR@B?_+DB1l$;ddqnm5l2Azu>yUX2;odb(TON;HB$~!JW z#7D+E$Z1X+v6H1BMqaH_4Yn=@zBm|0rO zud>E}&j=f0Y}YBY?c=1@Z*INsTHAYX@sRtlL-0UZf#L0NkID0mNbmaq(LC{&Ak??k z&=A&~CW~y7k$W4e`%Hsmi-?Ry+PJWPXQUoHi~~^1l?S-)JG>3=_J zgQ6tfLTWWQzL0T?FRs??10|ln*)JhCa`_(pwqMXWCc6fNyg_e}{-ICy_)FJnTu?T6 z(J!dKpDI<;xDlpS_8xjt&5q{d<1@TlOlqi*n8IPirjW{ApDBRAsr;3zj!}umTy&(r zh5GNmzw7Ub=wfi7{hvOD(5nJJk!AM&U;XiazKS*(TNhKAZKAQA^Z)qW|2i^5aFEst z!{dx!=KmLG_5U7--*!>-A>J6X(56tKsd}lP=UMs3snFoA;seY_;z>LT-vl ziIJY3{vr@wZS-_%f$|nVxMt95_U?j;sEn1BQ_QGYV6XgL;Dh*EE8Xe{W;B1D;h{0L zSm2B0ulNlmmd7H$b0Q;+fcWh;-%4eM+M#psOzdAjJt2oCUvdGUi|T*h0EuR7e#O2x z8g!_!Pva}G6YMe_urbFRYRZ?(9scVJa@mCWobpdrH2nm1e&K7epTG{E&Mc7O1j(yAn_!YQ}J4FZ<$-_;}W2Ur94p(-3Gey7MhH6ESqN zFqCLH^o=pc4nHN+=U3)P=;fVUN|i3x<#M1&kNxHDoPyKC+_=Lc1Ri{Ab$w#>wz}v< z7SL@ME8!+H8*BQ_zpO~7;Y?CD* zg9C6%iU%SYw2k5(6u8V!lh5EpH zPCEoYF7etx&0_uNr~15yo)_1xK!vHl7h42$QT|^S+nx{=KPmWpV_v!UeH{+jgb~Ia zgfJ>dW(w3)(nvhruHIUpX_S}u z$>giEh`R1qIecKb~-K^kLT2x~fZ9y9r%WUVaYBN#2y9EC_eg~gi8jzbC7-#eu6!MZOx&7cJE zDUq7-L?3YdI24(l0eaBCTRg7U`LCB)JyhZy=Gf2gy_!*h0sDt1``)nP;xmR8fglHd zb^5&qf-mh2H+L1Flvh&f z&@AMhQM>OY_5%T6zRt&8@p!Jmm6$@@@~3!co<_+S%Gw#4rgMLNAvmX|&qgKUm6XD% zKT|$f&)vZy?cy*?aXr>^aAB>KIZT>r$QCbpuUpk%W+a$2u(|qXtk?G-C7uH9*4;mC z`ZtHx?0%l~(P^NMTB!u}EUIJgH#2uawgphI#{T@z)c})5tlfqi0q=5dg581Xm%_LH zOS!o_m*DK6w~^suR-^v7>)e?Q6@zmPG^ zqbMYxk1|QGY%4nnZ~t&RGTvcgpm>BcX?()RModPlXw!SASMOKEZ6>Z@+c#@HWs616wP4N{m*OMn^2yq&7mD_A}TqBYQqSKJ5eMlqL9nKuJ& zMJnN52a9DOuzsWb6)*?Y3}r^lRB33dw-J`W*;t%gy$)W#p@>##g#|_&@AXipnw>6~ z&_W{=RKB@oM$s>A`1JdzNA=zizk_&dhao?cT->ZKJiK#?s8=$En~j$YT8Rof%Gr4n?;@E95!RhOHZ znl$%Z56f=z8eSIjbEq-Pim4wLB-X}~vLZ-QGyx$SWBlOzA4GnK8#uM3O>GLBX^JY}W^_=I(wAQeT~ikrx8>i_u23eSTPgnnsQB4oRV*Q;!H z5Wv9Hkhlg=326Ulkl(pY3L%szs@{QZ2Is0AivC=$Ty0=`^vD4Dt z8&d~vu~J|*`FLal$ay|e2i`=bsWbn)av@tJs2!Kd{q(K%Y)fR_YI-<%S9BDl5)MMh zYHO$5Z45$3Zty5K@G1*3aON8m6}X#Ki!G=hUg;);jFy-+(*L~53z^f3;{`Hd&p>H$CT!FLU2pfk_C_cmw6E3G^jO)ot2AMX zyB%Ws+I0wns`BFP>ziMcjk~Mv-Pq)z$CMjlgWI=ILf}$+jq8N}{=~ln$;y#bp1%8Y z$6Kkun|ezds|u4C;vkip53$bq*Hd z!i2?UkwlQh(AZ5bWdLvYy(+Ryo2p zTtp&YpiFqM?22%78>9*3_CQGRI!4I1)7+$M}k8aye;?v7~=p)dJlb#F->qh_CG*Im|Np@z##@t&hWo1}wo~2Ra ziWmn~4+y~T-u2Pi#|V{Bk(~J*iIDy&laeK+EOQ1g%dd{}-d{FBzL53{mFF4xeW&cB zU8f&-#7y$+{%E-3j|}F3@07Nw*@a&U;EyTtLxg!9&HTKe$l8S(ln>AvF1$XTK8DI& zP!BM+pZMGE_)qB#fDa;PeAw$RHm<|xmT5f!6!690Q{;yp9Y;80O>|_-c2~u=6YXK6*#Lm`_Ya7X)0Mb{ zL;Q7^pGqySkCWsPpAR*Y<8M2~L2gb-Q0S$NKB+sE(GT+$OS90IkaXTjzSorCF8;Z5 zCFr1!V*lyebU>EpkqDr_zmNvQXI$X={%wD}<1|Z;DU@6LAeVnXI;LV4p z`r};(Hj{odNRjZa-Nvs*MI?neC_Cf3pHnKs?LMcpk1CjfsL7FFsegLM7J*h{=@%vJeZQ(asvK$N(c1{r zOf6B&-D;MnS+u?cY6@{(*q=-aAArO?nHFuzeSc6qF%#slR--m15w0 zpWe#%+}+1cFTE~fD0~`+_2-o;#SvVS<@7u}2tZh|v92OM)5L&^uEl26G_~?;ea_-| zD2{BMp7gYg6@)6LIYywsivVhCztBHoKLrWgv!ANVLK=$40i?)3V5j3tIAM zL9!N7B*_4)$X}lh(Ju$v$u~?xGcWul2*2sU7T;_&YKp>WPi69bg}|9B-xPsIet#b{ zybzPwWN0%HdwN_9~7}Q$C4O0?X5c>uz| z5+9j;(C_qb5iaf~VrJnSh**#OE;ap-H30dHNZ_^_TNJ8ef7wfq4Y8bG4WsdfpC3#R zYfEYkXH$muL`XYbSy{d}+{)0kRm?I>ECG061(^P@^f$j6FicQ?(i96%Q z=L%Tl+*ye}o!p&wO_3GPH9gh}4>ci<)8$1vjSfo+#Z--H&4#5LX;C+yfS4jqm&vAOi=Qt%`+jX`>v!8$dLFqn?+FU z`Y{JE2;m%A$w}@@^>i20ml zsPsBT?EDQ+Rw7DAe2!o{5#59P{5J~@^Wl~QJ}*#wBvNrIT^y!)rZFYF>tVdEQ*tbl zap4CP_iQm}1sb~d&#z?MRd&buqzoiKwtAMjt()g2_xwNnYkpKG>g0;pAlt_6E?n)(Cc|1c@H`5zCGEo$?aQvgRx4%P(=>P7V%WC z+y2H5tPQ0_-85B}}*xIyonQn zV3Q*t)8Rl9@hMT^D0YIFa$x)3^Cl*;v17-*;Dnha>{wK{c<9QiRbWC%+0cMkjTd3? z+IYzD(yvnzj+ZE$wqia{5M48oumQzS^u?4*#4!v>a-P!vcg^XQFE#^e&lAn|RG6fp zBKX6Cz;klB_77j4F}s$y&nAV>QX7+4r$4I{!58TUUTsVD@x5STmijvNnCml_q+$-I z!H+(1u8!Tr?Hm)4wDAhgU(X>CNmz6h#l9|fgD<2#>A<{0-t6Y9E;{0v+mPmUsJfn> zT0yf|8$AvmTpE}&)^NKknts2h)s)wKWkz&%ebA?@jXL)0p~YYNMKs$5s*)8-=4Wkp`Ul*fU}HGp-a3 zzlxDyvqgUc=yiWX4oqYQA32)s6B{4^~C=xoIhbkdv2oh7}4oA&6Zs7 zh==p!ls>rM$Lxh+%-813k2cap!Yci_ulr1BJ>Mrd;575A*?nc(jUuRw8GTmvzHK}6 zHa@UZhe;_rKbrD4*hIZQd>S7skkNDJkKVROrZx|XO!$uTXbhpJHz)d8xuMWp@!8~4 zRojXDcMm$+x;{Q~sYA^yHY?~kk^>q_W|#09*RqjQPFtw*m=N+A=IY}0DY_(umx%7w zpL9M=stj)2Nfu9(Vf4>h)o+Dv#?~!EG4ZH9=uqN#ycW*(`pV65 zv;tRb6=T`(3_2$*|f@a@gnYuWQF$7TxjDOQf>2h;9w+fA2V^BiKYl*C`FP z-zm=T06EH8XF23JAruoX?8VKfkvYz6H+E^~fPQXU2tj>Z*~&Sdd6w&`v_&%(b+~SH zpV>QpYWxwt0H@-sqI^82_0-KVBSIf$5&Yw)ncY1wIYS#cR5(@^FB1okw0|vi`g>19 z-&+;l`0_UIjU47^Ho;Ds{=F~9rU|xBjD6IeN>AN=Hdk-;GwAWj!U76UARM-~w*2|i ztd}$uI@m!GWN=g%rb?(~Rzhu2d3X1K)ZBhu&X+`Ju z=2R+Gj1z;#$&$vnsCff+vk=^ePq&;c`z1|3(+gA*H*r?tjfG7>u; z)p+XWYa;#^yV0hg!~y&qW(Yf8E_#1ZoDYnMQuqyDC|07pUVRJA2U-qGu#mH2-xHxF zl(|@>MREyV8d|9};?9V0KOm=d>)tF#79Vy#a8IjXo$Y^VS=ZrRU2rZ`%d(QKbjeM0 zta)S*K~;%qdk7A6t?&#);sVHqiTc?0dTPxE5HF zV}w1tALUn{MJ`(+Swi2nW7HC4NIVUMgAY|@na5g}CF9JRLalCN<26T1n67G_B75K&ZL5env%{7>`%c`9sm!cLW>fl9?WQ70JP+#et+nmN$()=C;<|D}WdDE-yD zzZ;-^aA!J;)x@ut?WQ-2AuDk0_*O$cDoAd?u?eY39E{f+cJ0UFK_)LP-Qt0m)EXWh z+31X*PO^joW`GlZM*a)CC;p*hl*d+WNP!+JU|@tWVn5vEpQnx52D#tz?}}|#cd*1e z+*fQLSVooJTRb1lpwSkZo9nq^)?7XcU5|q2B)3HBeJ?9#TZ_oJ>v`R({%JdEFiFJ% zCnP>RS%$LDVy;1m$UOp0y7;@qn4E`$L^Nc~!MDk{1fS~9*a@h^@6;`(GwqF}0A2C; z^IqAXtjt*~1vM62Vw_6%BCG7_uNwQJ7zIR+9iGgkt>|EdP5!Mudf0t?+^Mnj2~J~MYaVmjcm{w0Q@mj zF^W0Pvq(+y!^=)Heh~T!c_9oTobl>BYyHCbw?A^9v>D4V-xL zF-(iGcskZGnCLrpL5rS>(Y!n1Q7O!4=m(2MI+3tNRmqO~J3jfDxKAmO_mjg7g6_j-9{UG(%u-OY}2<-A0Ga#~Y^xHX#jwXZ{Yy zZ$5RJT~6tHmiC6_T~S=#3jbl9);pg+p(5F{s1;&;o-W#ycg2YMsR$H`hQvCr?fjue z3IFkNDPMq7a71zXW{ZQq?34jUpg3~U8|JTw!l`|}7b&2TUrZy9mx%QROkoi!_ohsSQzB!YO90!dHI@PvR|o$9Er z`NkF^Fh5Bga^0KVL)Dmq>P!X)#6PNXctJV-1!pz74lqzTBnZXH+H`U79S;b+Fn2iA z!-#oq8~f{VOt#>s-=-}qmG#q1F(QX8ez{I7Jk^y-FQjPGFGF>=q^|nb+^~U$3;h2& z+uNtXh2|@JC9U)3|7p#b($Gs(lva*%2tAC}zaOjAyP{lQE-z{~i>3@bvmo``K?TU# z;mnMLITO^`MR#57FN8fHS<+n(2B(&ru+v^2l>qOYIasU29zt~eZzperwVTDBeh$h8 zEUwlyH?Nz6|0`u@h!1M)lA{*MocgDd`HBpCV{?^{pyCy`goPz$7n!5ohov`ZJHq z4A>;_jpoA4bL_NH$_RQ@vee$=vIT+v+?)TYNPVC}(+*A){>W+budDN4ndg6fl%rDx zmKh2p{(E)#SBdNY(;JrU{P=1Ja7rB2j<|eE@IpU2LJhug1?`C82^x}OeG?xuiu)K^f47LEUeA} zKBn84$?y1@O2p$EYUvrSHfl46?ngmQQo|;~$$}A9n%d^#DFm6&5?8EutVsDAP9zI%!{6WTh5rVq9HQaN7s*_sZn#HKnEwS1*+j zGk7>nOZskkCp)LAB<@@3awO(0W?9!=-gbPwJKvQX8Sqh8Yt-=4+*4iD=eOgsO14t3 zWxtbdq-|>S_&8x)T$RNsqPEqoJR}-_bB-8!ZWJ*bz7|`1|1nNV%r0YESD)XBE1l3j zAYR(v4<)!@(ja*uY%IYy$;$w!V4ak?;V}6&Lspe~l1=(HnpY_`F`Yg=cG6*E8Yur8 z?mcjReOFvnD0D-iWzfILScmWE=~ruUy%$&s&k^&p92`T98Ox#kfvRR{pr_b#p`EQo z9+^Gm)fN>FP$##(qt(ea8}4?J&8H?I+Esd-4>v0T*pJC~n93|5Amm2jx=O;>r{MC> zzg_NoFnJHMkLxUCEwkQyWrYxfTtd!vrVU!Xb{XdM^weX>m!e<+?71ilf=QY;Oy{_5 zhEo^zvgXV9zY{I=d4UJ1lmPhonZ5WM3VWOtC z=Fiq1ypxNz!{L+75&I+(GOo92)Iq>8@ly`F`NA#Ps_L!6GL1j4q=E-1N@BlONlTw} zN6>69OxBo5wQTf!DYU)4m_0vOY8&}VFEcPHsFK2AzyveLpD4Tyh<92lBymq#@GqL8 z-j>CAQr&T4FXX%N-2UMAc>qdD((0hy=cdHa{aT9`8|i#^dpP8m)=OZ;O9hu4*bI)* zfpi9JHfYEyNFoHjrKht}0vy$G+Tq5p4Aza9KTz%hN@<1=^p?Cz-h$^4TET?|vS3>5 zw6ROcPxB4;*oL3(F?nzL5;qQ=?={394&hJv(u&Jnl6vcW>4m)HZMqz{zZC4gJ{MZrk1~ulK`cRJ&<5i} zLeN@ny&`C0*(5kmQz2MBT+jXm_e!r$^SoF4Z?t)g-uW5%152g59vn?{DJf8GC8*9kgRK(}{ z8TKQ@$cMX}7O&3Ag&%a-1_cc}v3a^bKjshf47&s|%WwIprYSv(9%*{wwA6*)QDjsv zV6T`YNioG=z;h!d#v-^d?5$8DDN8hmoCN2U>{hg09E5)}kGB+1Om(Ufo_)zY;1T z|GjzWGj9Z(QI)|dVltxP70$wpk^u@Jrwa2l)iaR)Av}j64B4t1DSCuQpB6v|Wd zg%zTqEU2d6XiJyb+V*l5}$LTb-Gm>-(2JR^mh{S zTT)NZqV+Gsk>v&-cg^4_!#LoUZnE|L-F2P$CXQ!L+)^I%D}cO!(=v(1%I}-^C9yWc z*#-K+>wA})0)X*fp4s3vpI4=j!5C`#!B?(@np#>z5Fqls*Xqv}U)(Jf=HvqwE#L_L zalCFEeWI(AFjw{q>PUFN&DmFzo_sLn)Twy2#hR$=r9!uDDn$SW@K@nHl@~`HkLT zO;Dm#I|!Z3UDfYoXy6?Iu49*0-^6DcQIk+#|N>H|MbPk#$O#8mia-8T)SOF*gJgN;;*+UImzrLC)S zxU~qe2B+|K7ZpPo<)v8oGOPv2a0;IMK;TJ_Yg?f-0z0cAS3%AiF>FBgy5Z0@64f_ZV3c^K8 z5Y_E{HD^_NW5cu+N~e>o%W4&~CsNqC<9L=M5k z$%sxS)qwHZ>Yb}#-KlBW=gl!41&GFsvHw>KpojibHHwjkmN_LW!K-J3X$9x$P7p5W znIJ*6m-y8cke76Z7=^o~h+=0RB6}a?g)t^O%;*Y81^aZ@2F)DR2Zupz$WKFZ}c>D~H!T`$8BBRjD+&s1x|D>4fJh>*9+WOg&- z$K@dOUR4I4lDbeX$4+SkC9zWfCN=Z0M?D1X6Eml$Zu;jXW1~2e7W602)Q2-S*>#3{ zRYb9LH0<1<*^tz%%8xrUMHQoILQ?g-!WkE-^rocmMxNm4(1d7VkUn2zgq3*H=(T>d ztE|*qC1PKBB+_zw|DZvmZIri-d@hc16Zoq9$l*q@4$t+g{_6Ku=G_ zDOO{zxkxMxoF3;s0i<#^IS*7KmPXTqslHar%17 zW5c!i49MNSC-z0c=sD_5KhJ65$lA@e8Li6vB8m3O>KpnLMM9{-Q${Dz+cW9R-AOpZLqFGp)d z@Nw28OkLv%{pnlG@ji1Lyxf3%q2xV49EAvI4H!T=Ue28!6oO(E*ikM!8l(l z?!dvmgmWMvvo0mNEZt#?eHnfJQqC7TM)`0T;SbmmLG5qEen=hx2zqRYFnV9kGXGi|Rf7W_O(T;2f?%qcqe2h;c)W zzvh+KxD1(F$(uY2>+b1S_|y1Gw-1AH>b@u2qOb{eo2tHi)Q-mW=PJ09imQvCG4#ur z-y7<(NY3|JDR`8x-B0kZyy4FGu${=I`~{gWMbtGtrv>?I*e1We4AzyR$zJ-!H9?R_ z(z!MpFx!L4!@X*)=a>!%MbN8^;48L9IaMI=XkV1Dzaln=m1xPmw#V-mw!4YhiTYNG zI5U6%lK~$i>@vkDLo$s|gLec0RH-1(IoRbw&wMmLsz8~15)RMONfAowZPyO{4V-&& z$%wfh2ttBPW|pcd-`w(DKUsxGC?#C?TXwGbTThzka^GLlPkDg+zje?sfr>p%)6GT$ zi%-Q6q=hu4XQc&D{eqmT@pMcKIYUVaI05|E)A2i49^3SKYMVHLQ(9S5>cP+tp~4r^AJdKMUd)hJ zdS_Z+%TGw_XjU{;YT5CNlavC6MpM{KC$Mp{m?GWtE$%dh*QVYuDSJ1=?9ten&1H4G zaG~5)&k@WY{InQf1->j;6#ulqRjR)uGf{+c@drm8n7V`n~Z(1Fw^nHvftOg@nc#8E&J z3r#3-`}+o`NqN2gkJ25)YA6~aqif#YpxHZK-|WA@ zSrj=yU)h?90)&GS^$hhS2+~&aroo7E&Qora4W`jQ!a`ni{q4$P*Nvo z)})_UqQFF{E24;qbA6-T-resa94YW`EE-oQ(*Ye>?xr@OLFEjuFw^>!*B$sw9?`JtSg}UomXP(6Gvy= z&FaX5(U-!u3H~Q@C9@JWPn_(goG&r2B;ih0EM*tQbt$hublo&?(=Rg92V%j*0N- zc(B#3t9W_+_)YMTcaQ$Za6E6wqpbGRj=B#y&IgUxxw&^&zIyduRd<&o+#9bNEM6E! zxQrs7SR9Hqx8~&q9is`G6L&G^26EL~kC%7fe`pR9xrsz6WknHNog@v_`hiY^hXe_D zxhybg@|fqrBCeIoyDP?X+SgT9XL8I#za>K%pN!j*RvMZh2PvnWUQIkqnjtf@(~Djd z_R6bWvKr4uDVb{e*5zgt7OcPuA24?=D`j<&{J1<9vnoOl?(3n&Azu3%lek<-JN4FO zpU6>FZ1|-J)~aliVhpgmA&p**Ph6wsP$M~`T*inGe&Yera;XGn|Qe0A%V%1=7$gzG8?RN1_ zR(qlt!O7x_1kh}P8Lw^Eqh>9~3iEF%YfI)#TW<$Q-u4Tr8Qpf*Ww{1_aDKhDQ@$Ttn`Hf^#ppu~@j6EUN!$!*Qwdg85fRJfMU@G_BJUkQeeN${)QJ^4$i zxFc&N(s+hGK_qN^UMbz&ToqW%9&u#OP4X)C`6&ZI_YOtxKx`@PET*?ReL^wCTIENU zIgCp;r9$9*6FQm)n`A@3luQA?9U2JHq*J0`vmwnHR=criivY5s`|-#@JvP1i!H?0 zOdgLkt6wP@KLOaaa@*y!tq?gfCUJNacv(Wf55JCe^)0T>slw<~OpOCUA4My&p;30f zE@dS7?C~%#3&>PxdiPk*fG7W&d!bo9mTus#%%PHfi~?|XG{|O*QT0CMHH)C!V2y6n zl3n4tEK;J?W0!%ks0dX}v+%5vODwKyshdX&B_}@Cpg1b=K3x8m3N@VHfR7LmB0|n6 zFoN4?&z`-LM4)uwjy=!po!eJ<#hr~n%xnM_uF77un&Ny3uo#-XSNNL6YEM|TRvR)U zcjTrAPo720XTCvSq&xHnpwf-Ji;>pic%$hfqcULZXgIdEn+DC_>SMn%R2p-Ha zX?Qll>h+MlSTU(C-{=?Qstg$Vu*mAcESviE{J5I;(LVk8XSb|lpYoN?Z|esr$!#rH z$IciwG@6^KTt>=+hYuuM;^#*NyIFw)_nVB@_?*G+UXc$J+tvIGq&yh+AGVFd#DJ*MqCS(#F~$NO8y zJWWo_rU!Ri%Zw5Y>e;kPbf6=Xv@2sayLkHjy%&8m7mo*FxJ&I>{ylPTJU3;<^hbMF zCd6SL4>JA&Qy#@gR9BUtKDXGS{Sq;4@Dd%sylHNuI%^2tcB z17Z_HkkclS5kvmtsAF}Jke@JI|7!cHc^{~Blf2{Kr9}9O(vULO?>J0%Ra4a%11gt= zXg2GXlbmgBOo;QaW5xH7!>!d$&}pmknPUbwbEj#nLRJ z>kW*`Xm3}Oby&^BEe@-qLFlbTv5;a|ZwLr5aQgJ7O}5Et*J#J8IddOc0^Svw)HsSe z=30(@DFhoNjA8sL!Tf17i++yKbDXdj zL#mG}2})x!;#Lrg$34EnLxmtk2nm@)A-*j)=7sT}XK1Ghf? zHS^hdm@!$Q>ErvN5LaB*u(sOq?=1Q6@@c`b_T0%>`oR`AqPCLnH}P(N_uxIRjC8oQ zb~Kl^DYRn2EtthjB~BT+UoTW%S!$GiqK8gyuYjW?=v;QnRdZvWJM+HDI@u+cT!m?; zv}WE|uY6ao-*kbgMl(1Jiq>FOC!zo_uR5OMgld4e5 z0V=Wjjrk0q;CoO+zLCLR{Vj#?<=fzKlM-A`ZV(&ay`fjPG#gXc{jc>=q55oK>{#dw zccoi)GM^ooFF+S=youEx=6Q4WWt_3Z6{XITJV^3std&=pDC(WgeTmD>22EBT34?`+ zZ|8vi`Y7BRCD_}xl7i6l-*`BYAygespqmx^r^KZKx1F}4B zwMHp7!GEHJcMlOa@+H8mcw92ho*$Sf^NEOIOdEQ#vGwWU=ud{q!uL-a&E=QSzwnGn zw!)lc^DaNlP`-LHyfWYTp_xglOOG4Xzh-3U8GK8_qqbyh3f%7OlO~{xBgs zOtkuJ55(Mok^r{2*0B(0ni$?BxLhn66Gi=p}-lsw-1Iq`Wl44j?2C`%4#|KH+uJwUQ#~&k~ zlnZ_CN&2K?`cDc8L=9BQ=K6C~)nr~>S;T40;`deCwqo32yn8FgO#BByXcLX5q@OJM zMc31#!hb#9Dd_DOtzsQqqOS%b9}k={$h7UY&7o^tsYg z{f}ryT@4jvx_-?G>Q*|<)k#}XQY`V;U^_9a1K87kgr;zL)DOi{ZDdC(PB9MW?s-Ds zUc?-@EqYOz5mz}a=$X4WU2caza+lZi3(3chDU`*olhX1ZVw(ns26aO}c*G9;dSTo^ zNLhB~r<-sn!<|!nIa6&=v1YWjc55jC1J!;+-r@y4iHg*TYn;tX8+IjO=kJ#L?Zf#v zbJi-+2kMbs%p5Cncw-R*;2Z~?ep&ca+z|W1FZccW5-%!|2kdiu2q~T1E~`iF(frdB zbP1wEA+ZHZpGi}7ZYNYU;b&NOlf)q)oFEDni^*sTp2uxt8VS&sA#f_5%~+Jur6EqL z1&Nmii|UG?&nn6deJ!8XFwqX_;hRX$`N3AMkdva} zU9njx_M(2U)Ckc>YpoO^4=ZT}s#fNy9ZGs}WqqW~aP%N63(T-iORIp9gzEKMluGCR z^tp|y9~+p>`!MM+MFqS{7Hg*qnB>A%wfinPxr?hm(3fb}a(ulA()ivdF0qMcr|?Jp z%W!qxdn*mh`Q}m=yPis_73CPN-S+MJFq4W;D#A(;WSEoWPVgBgZcQh6`NRZk-Yea= zI$Ne6-~3P$KeC327Y|}z+fNkg!c6TGJw}ojAj#1}z?JztBhBK8b1EYYd#GJQ z9;QqBtRBPlXyihZ9l1E!YE(e9f`v^%;qi6r;1YO}5FJ)9wF{C7-cC+6uJ&4UX_`mV zwXw94UcBCJ5WP8`vZDz+plEc@Y72;aDWtw1(@NFW>q+>j7&UlI<3&XSuJ4QUU=_OP zhbK2G|Go*+s;-W)^Ygr^i39zOb?5r+{|{ql6%<$4b!jxX1cFowl1En_th3Skcuie6;@T{>?bd@CWPb z39Br9>hmL-p+UTrxcKI>J~jeVKhlj8wt(aOery#;%063Iy_H4{iX%(Fy=}6dRjaM;#XS5cIj?HlQRBz5 zpZmNil7v~@J4SG@uG$m#`2J;^mm)O^y2r>T-uzUAhA_7E5dLhURV&3Xw6@3zVTvl} zr9UAmn0E)W^4XqyfZr13@Aq=DRncOgrEmVjl6<^Tuc^DpCKa{>y5k-Hwn3sFZyF0N zH;iD{2tc=}(ThmZ%S=N$6f^oOj5CW~6m%#2IFoqzFqt&}NvFB0zfF!K%h=s)l{2kc z;R$Wj2%VB2!uC1=c$RNbcvjo{o0b1ac4ot&SPb?iOFTd_*??#kOWrBuIOIJ)whjAG z6i`@!j9T`&gdWG*#~Z@OgRdFK`fNmUk5t<_whY`jelfdB%P|IWDhn!Fyur)`U6{{c zR+OihDQ?}(iw<>>MgAt3h&-xRZPOz<^6NQjoq8efke~|d@=$Z?(O1MUxL&BMB3k5R-?zZTbaF&;xu_)OYm8& z1vBW>tmezpy_xg?To2e#!Mo+~C#`9JET3BLBc-p-ZdosUgLWa-QS5&?$V=qN#*5PT z{wP2E+s|Qvq(4I7b8W%x*9t&HSxdFk|ELsYCI@DrwG~)xgO?xkF{JEjJSKNKafB3Y z_I@uR6g=r7FPD+2Vtzuu-uH*Fl%Xy`qg@?golB_zmh6_kSo=Twu&-N80Q@$50`0REZIIVqVcwU1sQKQNWK>=J$r)Tgrn1+s!ouba(M37r@U#aC z+aXE_!O*y?&}gnX$GAE>i{c!06SPh_={^zi(dG3S==^!*H4{3{GD2O7%ixz*)kQELVGRF1u z0|Z^ekBy=cWPPqMi}M@cc-ez5`kfGpnz>!;{*0CZ>-Fsf3eGD~@=HLq}_fIjis!p-L@ zt9jRK$Xh--V~WBCd#IdsMJpi+M3Vo|%U3b*gnWSTSJM?Dmy&B4&2RYFYvF+h(!9?C zy|3D{n)-U_Z46#7Q8>C0@?8IskT(B(AIg7rShv4*>(!!BoDg@aa~5{_orK5J8YF^Rza7aQt6KEU64K)a$Jw_D6msB!dmrRq~7EyXA& ziLxlYTrMdCDq26V}^D~)SLkNI~nH15G9!as?@LEO-R5=w7 z4EDG0Zw-_TQBg7NKFNPyA1s%KMDE`|Tm8DXuVlyIAj8`EtPK@jh4b-c%{qXvkFbz& z{TLEQa|#y%l@OIg3HMr` ztnyODZ-wVjGBXwVIF!uXtEwk!6r>~P z3MfpfDLQy&+kP>B6eqm5peiB)_}>s{whZM!j~n$D(BlTm5n+LGtwLfO)Rv?%u(m<` z*E4OY_qZ?iEzz+rTibgd|B$1Fk*4$c(S{w-+=m?OdhyWvcoGs#PT`tV9iFj-QwcHZ zaP{-ukt(3rAd514w$|BxzM0iFe0aSsP`w;hAKe%tV>tQH z>|2SdZJ<=hktDj+0QT|2S7PbM){qBs{JoCy?!HIQT?_4v3_x7nx!3;1khR?-`@i{T z;j3|h|JJeH)x34Ky#622#Dxp!W%!g{DM`5|nN{gJIXW z@gbG-o42SAYaDSmG&qa$tw#}`xiV4B{eV`(u}JKev#UTb2mBYpvOMhT&=oqeN;5dM zxPkGdKTKZbr=2!u_AhWZWdCL?HxG|!lXc9}YHXGZbtBDPvE&D`cMBU9P7#;!w3dFg zE=Ju(4T0}ma#g?m*5;6YIh*`hDTFeKHu!goGmcp=*O=|$Lk=p28fy#sr`!7ZUM}K^ z&&Z8{IX#;Bh!jbnB6yd2978ttz3FTX`POtv*6gfX`4VpJH|v>?RD8b?C*JAs$6Iyg zhd*TGJFpL8Bx&0B*H+@iiX)o1VmSWa^k#0f)0{jB>_@TUBd+O^{8&wCqx=j~b-%v_ z+fsDzrbMq@6sK#+@=|=U25;bt*+wy@jn<_QWU+5uTz)B*mvMglW^t~@YaMlusx;Ca zR^w;yl6Q)dd8athN0$OC$L$2k`i#mgi$OeRu4D?Hkl$XdG=$!ZxQQ(GsY|D>~>-LMqM^WS5?MFzM?W+(?EroCCw$E zWp?3Ki<1jF?|w_XvEXYfRO7SHwrmHrHI^3b<9fm6& zrmD?kmh9@~t6UsYl3xZ5=p;*kfO8;Nq7a$oV82QUOso@k(S^c2YG1aphPR`2<~;Y% z)aRD%PRWPUQpwDsVWFBjRRjRJ+D~EHZ@3Q&gvmpPS(MEeZLa|-yH`L3p^Gz zb9vK=l07GLb#`TTzRNcWni=z^w0`jYrH4hQ4p9tt_htiov(xX`iwXN89^`Um3efsV z`-x2dmPpPt;{9OFon5ACkHIN)Y0t)>IP&tC0a|8JTZ*MZ3TWh0M)*>5z-^%v0zUCj z$VGdUl+ZiWq64ffH;Xvc1bR_F1L1$f=ryRQm*f;{x|Z=3IkjH>d^l@q{A2j|<0k>$ z|H`IPZoaE*x?}GrE+|cFrWmmZj<}bQ?MFF$D+}QKCoR-W+ou4vP=disH*8Qmq-ywrX%&dyeWi^(1c;aAgx( z?N+f-s3k-Tg8%WDS?Km0g^-{n$@66mwsd_%_X+rRB`N{q)W%{x*O0d5_JL`cZ(ikG ztcQS}j-`zS>FA>Oid7{jVNZ3FecSOLV_L%nB<`O^LAx`5@(d>^rYtq-#y}F3P{01R zwU_bxQ)driXof%zaYar=R^wkwE=v0usGJx(-<;={ zC;U!x^D5@=9(0R~zO0;re^$bRKLUlqWG>emf$$-xk|Gjs)Gz+GAb6peL+huOQjg^X zyT#;{j1|GLqLd%lUJkMWV#u;060flz65`@m#R4V;>9IflFoL7h2&O+>bxvt#BDguc z?K+!Xn?1Jn*c-S0Gk;ZZuL4#^P3&T55Xxkxb z#V668OzR;Ty^_dogA%Euz%6xTAf$)vSJFOQycXa3S0RS!3fAwD?xv^FL-iz4_c&;B`MLv}leV%~RM@ zd-RS0@)QEz-tZW;sf@<^6pDV@U*B$JJ1V^0BZ+=!1ChjuipgyZ#z~MU79F^H*;7Z0 z9=Wx@=nR$`0%u{G7_f^YSn&LB-t0B?k6N%MYy`6VuD%N1mI?7*MGpH6+@{B*kW@c` zu=xrLF$~!UhKBkq4y|T)@nA#9$y1BeiO61o!k7vV9wkGO@cMc;!e+c@0*#|ytF8vR zS9~X|Ql#Q-J+mx3YXctA4czsHqW41C5jp~msnW)ScZyul1TV9{O%CmOzZ-95#M$F2 znH*)%$oq6>Y^*vONtnHhE+Oi9lTLTIitK%c2b%ul{6@=yR6zV8$Af*tUcG*-K6vqj zQ%W9h&iuK_u+Aq~^1*DN#^i!` zTxj~5kvz)&<=ZRD?>o!#u9&juSYAyTZ9?Nv@og8%inYu(ykywIIzkIYVm7Tus(pUg z2&w?=5bW1GQsFNNZQSWG`e^jkrPzaWvOhHY=hQ5jw~#qtG!3jqE4X!T;}`9oTysz~ z;(3zoqfxq1^cE4A9ChsZw>DhE1X=B}`YyHm@aDw(hly4=jW6OL$ge)N_96klh{+i^ z4y(B>0-Sd4gb-?T5=4!PFo{`ns?*9%w&+3B!0M+Zz($1P^kN5%5sMH0gwI%sl56Vn=XU`&<$~ z39>s>TuQo8;tccH@h)(XDIJl7|Lt zF@EGAQcJyS9MIX|LH_l6`|MH&02h6?a}qFt`b&yXI3CH)-7+zzR8D6-P`J`!G&V3UX8Hn#022TOJxMjhl2k9j`8!O)r^s0jW7Erp0q zB4IzPTU8D-8@qz~+ytFg?sL0Cd~@0K^nFy|6JRj$WM#aWoyPc@JFX8ITT6xDTIN~4_CgrjpEu{}KD1ncymy=fst`Ssi{DjPFJ$whrSFwPq4#nuts4@E zL5F;w5InPiw~M*eesDA&ghK8D?b&2DA;6d*_ZVr~>E}(K@%0GJ5OJ6=t%S^Kf7dso*a!XOx@c&GyGU-k5X#K5FD7K>ODX2gizrw!lLo z{JPFf*_jZCct_;^`cDMkl|QHz@A~S#Gf7EO0JF!g-gA=O)kdk?cxRmCE5~lOK5yPR zhyyOY(B`{+b=d0|;^uGEba27z9o61SBIDf{vaJC@*b00818Ln`u_y6d%Hn*}H| zecm2%6`8!5qG2{HGISd*MDB{8T!sZ5%)*w= z>t(P7BDa6R&V6@P@dzEkMC+%K{KlhDgiI++)+d;|PrN>JSIukFz2V=O4Rf;Hw`A|< zmM0b1`>Lwv+;kewq-^}!Z(6vsz#`kk(LbCQo#~k4tbbS3k=p#8y5IX0kXPE6P0b8X z$YWK5>=#8)1eDQ{buG=;?$2`xgq2nxgcKAdB$od0Z zmHV=Ygr=#fi4?1lef7;WjEr|(AM@)&iZA9p={f)89_jv!K z0ys(J*rX|@;4zf8ahXs}MA)$NVF4k1In%nogdB=mJ#glBjQSh^k@C>uT0JhIKQ&a6^%o zf0w{0__p*3*h7TKNE%GUj2m+rp@23i6LW?NP@CHUs(dDDhW8Z8g?IuD3VXo5QuLhq zQkH>wXS6p$==8>1T(^dWUE|~-V`f_}NQ7vSqi#YaD$pwpBaSsavhf`HYKoE-c(~f{ z*&9&;8CnUWLj*hy&@ncJQeTVddOi{SromR5>hhQWQ|eo(=^($ff)C8P&s!FQ_4c>2 zR~Ph0Vp5}0b#|^(k&l$LJ8`zUY5vtwkz#QZn9G8Q|2^wms5ZL&JW{FwnX=@3AFMoV z5XlyCuo^?4M{|SNGQeMj&{Hrfw4vDG{shV5|0ZynC2uX)!#>rn?ZEADveYS$O!q_G zz#y|=sts{FfSu}m8bx{fcE}3N5KS$&?<2$;dpQc9Z9T}HW;I)tP=5VJtInInyZbUG zy{q9*`_t>wMirXYC?=eh;k-6Bx-jgfI#*q}HpJ;F)8hxEj^LCCs6@^?!}|v_fMbd& z^-n&!&OyIhWbkBp$BQ zF~UM9EX73lwoS-p(f;2NE5afpieN4yF)?4Py6&p z1GJ@7^4q%(f)?h!bP-G#H24wR9noJbdX;vl>mK!(&<#E;{yZpk_-|A*C%nbWgFT&p=j(7U&xe_yhw((t=*fNS(@ z1~gaJtovnY5rr1CzlWpZ*2enbT$fqXbuHPp6FT(lcpA30qy^x&rt!XR9%_!YZMb)+ zuYA=_*%VB~yN zC}qN??qL_6^ALpP5ljFG3ozOYTYOGtDD^H`;y8+~Dgd$q}&KPts+X z*eC_icJ5O&WSQ;PrhwL>JqyhCy#UfjU^_?}$2QmnxJfe^p;0Bx>OmCdO_Yu<=B#u6 z4GuwolO1o0$lG*eO--XQEgm%}HmM`kqjI-(@otNs_hC(dfLu%}6j(oLkGI+|uYBav z4GEZlW#z}l>o%bwk>inN56BeiryQ1%((fT`Y#og$xZUP^!BhWVqf5^|>?%%}4 z(oB8JfT0(sNhOD<^$L8aT+|WFLRgv>^p@2|oNWcN;hpNCj8hQ({_L;qKMVOtKhGhd z6S$oy?^|w$|2-D;nnCcYh9!*_X78xjywrTCRupE3o9a&NPN^!GpAP3OCISumRq_hM z(h9?Ubze3AC&;D6L?>laf!b0}!Zmfh4^!6hbKv9o;*yeR%%o!DZCn0QRf>x(H6G0m zAQ-s^PP~L~({3!%Cu){!OP48SwQqD>CB^y;PMD%++Tl}Nt8zXN#8_c^@+&D40R*zK zG=W7o?gg%4rlhHt6Gs=Odeu4O5Hac9fG+a05|6pFlz%raf+WC$w1gM{h1ee7jsN#y z{srLwHm!~SzO?_|?Em|N82GGJnmhHU|G#hlpTF0@gX9BK+{k~Na`u0I{Qthp|NRZe zVOWZ8r(rhH*8e`i|M%Ux$N1mRY2}7l zP*^;?7TT$%ufM$2Cg67Z2~Y~4-2J7k@ojj@XY4%z1#3783KTO+iG;<~MXvVK|KDHL z{k{iwTpIXtWieYO0kE>-qk25{n1%%GiTOx$>_B$q4GcOoG=7cVZkYl~s^?RJ^}((^ z=*$blBO_{d5gPqvv*iVCZ35GUeWwOSM%k_thiet9PgtaJH1Y%!$MF~mFAhUw*iE71d`wBi`BHp$49!(83Uu_?2~YP zl+E~3=QX-S_O;Fg@>y0{F0rL*)s~{*GF=X^;IWVYhIqL;rKi z+UNXkIOPZ;59I#AK$dBG-~buVvRKLuuj{l8BlbVlnL*j+*7PcHPmyE-VVO>88)?%l z%QcTe{rU`)EaDuA)4^sLtdY!x%LPRu8Uf$aSvsNt;iWk1%u4Q>r-3UKvTVr-sZ`;s zTYZV;+o(ccZUQa;G^}lhDki=lVU>8@S2SG{k#bqpC}arfN}-d)e05EBoa%78`V52? z#4gyo7z$Pq5fPoI|Kgg%;IFvdx9F<2s2KrYxJ%2*P>VR3&wfCS+faGwpWJ63StYpy z4rEeG|C>Ue^Qka)|J2lmi6bw?r*fj1b`J-Br@f2mQgvG3L8G>*QBBK#jwilK`j^!X zIU?^9$I?WVQPI%8Q~_s=d2i~*uO>vaL(z%pvIITiZ%86{jH^LZ89a8VfQ;qfaqM3? zkCoz~syW%2r(_?(>%tH+94v7uVNO&`0*I<$|^m+9Bn({}Th zM>k4{r{gdp!HAPI8MBAzxutHavqk-p^kewAp#X0c3Dem*brx;goBLHx#$7y1wXl7$ zDGOmJKP66QcxwT=%gCl=(e^``e07{tzVxV#XLV;tqp|nLnC3{97MV8pgS>Q?02}WL zTmlwdCVd`)m;)`T7tydX=Ia0?*ATLjM@`i?4wrg#gd*eCFRIs8UmfFUC zg`~o6>3WGrJgZNW@O8Ei&c1X-2SG$a3u$~Nc?MB#7N6SA;=?QsrfDdBP?U@3zidLV z25MLN_>M>{%UWsG+W$o_YU=8^2Wssimb_SLRr3m9->WDG8LjU{%%Y&otHui5`avGW zf=IhXEWMbnMI&`zhtV%S?OAeeuFxIyxXc;yratrW!GXGZpA@_FxsFuKK%ipE{;JvP$#06R zf<)KT8Qrb61$fiS;qoRX!Q@x9)U34B&lcQ@aH?RQ*b`BS$M4r#W-Y|u6ucxD?pIL) z%qbqMPo6)fs>KN^L?490Kf$OSr}S4{aPSElC)?`M>)+aQW7MhC(cjuS#Ai%e$P1NJtx zRkzhpZP0Lt7>HaiMeqV{BIMk=Kkhfjv$SaZ^67n+?SM26fJwyFA9@zX{^^md7r>i_ ztu?{-NJu2~E%*6iO%zd_0DP&$zbQFIv)!u5Xgw=<;kPI894 z`6w0e)KN9x(Df`MvRk0Ov*M6NV+aQq?(uqM^cN-u&{^bDyIpy+v;(!`5Q=rlG}ugJ ze+<}m^Ix6u24nEbVGiOWie&0biFXTM7mFt5yk0`oWJiAx9`xfXC4iL$ba%56#bfch zDe2}hV8po|udgOP1Z>~D{M0g*7FIr|&d?(>fA@Od2|h|%DYJZWBiwtN(c(DM{*Wg`xH z$BwI-UCUfI*r+anYSm<{#iHafHt!tib7D~$$h== zP)sUJX45g!^4YGyatwu?)7a<(Yx1pB2Jf|pN&$y*w+A@uZgo4{GC*cmT40_Y=2PPU zudIHxi^7_I(FE|*TH|%*UhA3^V>NU3=o~px*tYlQGRe~1Gd9Gvc9r62Hl0j^rJjs91wx0A6@SB%B>X=U9y)eII#8~ka5 zo4<#43jJ%cCU21FhZtoFF4;O9Fb#aNd12rc$(?Nyw;+!UkXc0Iw_!$70wowp>DOi_ zQWy*$bbV^Z463Vrk)l8U?8(wQz7j2e9=Kqh*7vEQd&*powdeCIM4~JRj@JjkW5Myb+5?TFcZHS-D%HnYn&~xZ-4Nt2bIX2 z`kwo>^#4p;_IJxD_(U_h5g2T~nHSPVr@ef9Ov?XO;UGPB#e@I~4-coD-&VcjuOXyN zN=hoVvsuN`ynlaw=Hc<8>bro2;P@=;vy&23?Xm8ka|i?=oZP!ii0Hq%u6M>{ro{qJ zl6Z~KWThENpqn1EUYiEF*Yo?~9?$!+$}THt*W7En*3}S*3Owe4V<8W)_eWC&sBdN# z@k}Uz413u`;HZvT=_=>o>5oNY7}JCPStp~h1j($Npq#r7*76G|*3<08;_)zr-K1`%7-u)$0d%ZwC@2lriBu-#74a0p$LHCEQSy5Y_~?1XU%_Y` zWMQvqQ8?KtH9;;M3{>=30mUke-f(1PIDPB4>>F$?ov)Dro?i_8!ke1{7Y$$3G^C3F zBZTgHuh`K@iZ8AW8FMEu+*(-v-u{sLUg#KT3%C9U7uVCw@ zIZkUng#rJ#c79Jky(rE}*qQk-PzI|aBk)AB{;MqA8W;rQ5x_IF<(nJ*)Kk<1ackNu zJZR$H69u=Wu`G4OXJ(Qz?LoR;8Vibw7;I+1xtR-H#-hK!%2ccm_XI5;jQ`eyEOa(2 zbQ}V6dZwW6gHku)grA zb(8q!EZ24EyN6jXsOH<-EV|2{aU%q!@~swppELC?8!5;G=}m7X;<1U_`Kq;O>NdEx zZNLgY$>JN-Pu0;@x%3Yy(lD0>G^R)E<2T>nr0O*Q5n(dfIf~Y09&Bf|#cilpc@j?| zSzkCmEt30mvxdz*{-`wzhCA@{EWHcrPPl#+HSYGQHNH@=EO>&&*hu{~=^6!)Lz)vFNld2l#X5)+*0?QTSZ4S44%%il$;T|4=w|>m#tyM=qI4 z%3k$+aULh7UsSan!>^-|_wOv>#bVKr39->)oJVu*QfcvuRv_k8Qd1)qb(SEub`-lK zT5rFU+YMsWyOc?99}=_q)qwJmrqa!{6B_Ud;Y=I>DcOI&XDSb(xHr8okCmdnsK5x( zTtA$hGOXlq0O$f{oUeViub!Iyg&2`;DdJ9+n@2OwUiEcnbRk_{v%F zT=yH=6Ko$~g(wMj-4O335E0t>M-zE;Q4ioE+u>{WxH~O(#>7{kfFo^rrz-bOQr5%l z#lLr-uX2W;{%9g$IhiuhE{rj|0ZbYY;E&fQ+H<7K!ntT7RWdjtP#K7oe z9i|UUikefQMcg3eLm-x!q$GSH>82BUcOjbVs!H+GrOt`^!^ydey#2a#n~(Y{@FDZ4 z?WF0mPt)l7(tOia=cpTGk2zykM~8fp*}6K@NSsl>`Nkuxml)8~i`b;A2riW{5P~Q; z2;ft3*W^cc$@mJbjHmtmVstC3pIZO-aD)MI1KKrin1haFF&_L_Sot)S;MO!Hv=};3 zNVcAD=+Z{9`Jl()P{ZUm;TYXVv3i6N48dy~g7CV^!)7KojV)>8f+n|-WUDJ5j8xX) z=Ynd^+_P|#g)e{iv|K)R4uqzjf8}kVS(j!8vS9CqYJt3>%8vQRNGE*~Paj&cp}gSVqEw*B(f*k6O<0T(s-XoY zM7PkmQrBV5i8QR#sIiz{?zo*X0#N7~lw=SuZNEf)7?iD5BS>Au3K?x)BjT5X>23K) z;$c_&nSIFvETW{;IbBaf@}i_H#%6jA6h*i{CNZ3fAH1fZmtDmPasz8>dv@R%k*4sX z*7^{eb60$+c*!lVP&9e@36F(zeay-a5XL^E5l3cWT5dvRsk9EhX*rN)OB@@Y3EZ}@ z)GW1K>iimdOH8Na6&&-&sfgI2u=8bfD^I`j!5CPA2S4c?hQ-Q>}|+g28>{0`Hggv+ctle_Ok zlbty`-)}K%KRK(Xa5GmTH(>j4!&xMRZVf2qwR7s>oWC0SF5(@MzU^*@Ol)M|xQzw- zNinddPStVRI+~Fogwc_@ePIR|%O6J13|hVTp1s`dvg*(!iACP=Se<$+#vew9ux>+b z>1}5|+Pd@4o2P=oaG1>}Zp+5ihpX6=I~mB`dtDTMFdm^#n|lrc)~`G%+j4gg{b{sx zM_H&=htpfsSV2^@rfeXzZ;gXrFV}xl%0=w@@y%`VD=|`gNXAl)F`z*Apfz(UMCy8ILPe@Bb3_$ zV$$i*5qZ3)E@mLDCg{c(1JGs{OuwM9kMV>8`ULReFkqJev{?lW>*~3y&F=$tUEX}y z34<+#8n%=(`Ei}-Z$~&MUKIfy^?&mz1F}RcB2=vIgTBUhmkOsy zk%o~c*t5f7-DuM%2epuN(BWg+We9+%Bds;z)2vLKdX7I|sjxZepAl9WFH|(0Lik-OxHG+(qLk^i`aoy)9c9NF-R$j4^yQYC1-V}T)-vu*+wr7 ziCE#c%e#Q&t%xi&RT6I@nANwHzeXIGECp6$HEo`q{nSl=0?1zqXuH#NLyQp(>zx8; zg!$DEu^W8NDNt>&7{o$*AwOz9o*}-3GNMtU&ZF4&_eAS^=wRFgbg!dsI8x=n@v0I1 z%&F_llbo()-se;(JG3$94H|Bz+`ip}O-C*d%cj>Isa$dIXOn$LI7yN#9$q+3%mi1O zl@^i8)G)ap^ld5rCyW&Q%DkdDv5lxyJ%yVlYJmxe1 z81V}tOAy{stC>+{9>w^bhRs=>McK+CZiuV=xN|pHSN*SJ9pJA!V5Jjvt$0RLNa!yJ zBhfGJo%}6=Kr+Q@=k728W2}4FwM9cdO8(cU0OJS3{00WbAMM?a9J10m@!OBF3BE{o zqiO*z@Gvq<@6XhknbsI=Y|L#AcuMU`Y?4gJc5vb7{fls`H`{KECS?g&x^Dw%q%@k@lY zW+t@Wy_z3a`Qx%5pN!vsu_&Sr_;Qj%0c{4h+Z4^RKIfoQeL3VTsv-4etU#KJ#)axh zSX>TM%gogRX=3wTzxFA)I%A@bCq|A>z-ENjzl*_NmvQLc_)R&dw&G?m&UzgBUJx7m z0Q7{h&S)Eh9Nhdf&P=%5z)vfrKb{U^wPII+sJC6-4|s?9A?z?K*1y5KdzEi4^@m@$ z*X(xq5sRDGp4D4#c$-mH;VaOPTp-~8~7erpiX)&=g~N*kw5kE zpZTkvef;&sM*JP6G91ho(5HS{I`KUEHzg_CaX^U4!RkO`j~@04NWeN-7&mbJI4v)5&HmLC^^wH+M8kzM;k zv!}XUB48T#h%II+_I>1J+0@wb4ADA^8JmAXN#Uqd1HqiV0WDiHI^?(VAzwPt|7HPL z`j%CxZ}?s*ifXyQSWLyeYIO%BTR{R!mUhiLgr=_*u|kC`Wu=@0U3=Awus_?;1JkS+KdzR*Qeig*Bj8Mev|7 z&stGl`DD^IF7op(V%Y1a@A+dZS6cN76lZjo-w45ANKhPV7VWY1`(nd!P<`;+%Rya< zCE#JLkvZ+=Y}1?a{0d}nU#0V*J|JZ2dU;?C9&j8>SZ>a-U94NQ%K9RI^*2hPFd!Fg zc;v6zCW5Cz^KrVuP?FMwUBg{6cQhD;Vyt2KZe2S5FydNR#)9nMDtW-i{ZFjGQ^~8A zvwYS19Y!g3hG}+fIz2g<_gRG`O;7`+wo6Bl_r6C**BBA@O@?Ti(OApY5VtpH<(d?c zoLTJLO$q-2TSUKG?KJ1%jE%A2De?wfWpfjR|7RpL(!(m~hQ+iI8$f70MUy-?##d>1$Zh|*3D{W=I zcwr#!+gbn*{~uTc#@wK zgXb%HDMWnVli^mw#;~%+$HN!%XmO3Hm4Tg-%M=wvOLbgh6hpzdn|~}`6SRm1A=aF%OV)d+Wv&Roo^{CgsWa5La31@EntT^`T=-58ltppKJ ziljG+JzZF4CzP|aOL$5AObJ~;`(R~V$EaM~y3>KCfsW$)_k^lk+2sf^!?*e5s?5kH zQQ^@>@oW}k*lGXJ%j0`KES&)7Kf*oBJ!Z7fQ8ku8kGvV%JDT{u>nF@G?t~CZ&-Z=K zVWH1Enw=Pq;g<_wWqJ%lzm3F_(uW=69Cm<|3^B_-p+XWnyU4>cr*4xi^wcPU^Odh< z{~7H=w`_D$^BMprbcd$M$4>W%X1T^pZ7DA82+)!ee;i&>BK@37DIMrjPpH`y zhDZ$I@?9VKT}h<0J9(;fI9tB>Opwho&7Q=uKX+DZnVmc!PtaF(A(s}^#BpATFCaAn znQc{97NJ2l==iCFefDYY%ZDS(-MQA+r-!Ul-!0R<^eiPYg%YZMw;6QWvfH=Ph^RoyxHADExMe{jz%52ua z5JB{QQ-Yx#$ea8yaq7EMR2?R8;=$5xz=8~OXDd%>z$Wszvt6c3W+4O~Ih*h5GQ2sU zidt1a?+{SM-@<)GoHH}-jj(<9%bsPR-F`n;TpR`>7f^@b2ne~`FpK>|j(qdf%W&)- z`E8&Sv-s$y(M_Tz72jnQR_ioCo;fHFAzY?@s!5~EDnGAyJ z_nc_0^8G9Dir>iadOrvEO(z=cV!!kO3}n^=#^3wEGr^XU*5lY@NauK|>8i*4u-H`g zzt8{1Zhy#4&#J@b?&rLXl_VCO`@ZXq+~;^IbuQDP>n$w_$3v%H~E8_HZ zIB@RtVcq9YYzhNIqef-<=?*NL+L;E8n{kM@KliW6wNU*P3r4N}L@Tccc}TBqWsq|TocvtRP{`xN_@cQ zmsz{*Ie;I#E=9|(JTvHlVxW0wjCZXdz4y+aro!()^COWFy23pF^~bC7W&iw z2ccHA&~429N?aq9Nr3brUNNv(u^QCdsY_SaB8Z^^-I5H?s1U6D;ZDAvl;; z4yCi|iCUQygln-Dyt4wm-=PB7PjY{w(7U+wrK<)Wu39p7rF2*J%b2+Kc z1|h@xZg#Y6OC{RZ2K1`iIzp3kV{rc(7Lo(+18`oP?UJsF3bN^D-W{kj=WIOU@?`1SDG$S&*9ZIqxXvR2|06<0!U z*?S`AOg;URYtk=_qYv*A3Y!EFCyq>l$Re>9M!ibe{k9wDOFz%|S^5d#7l+rF-(yyw zXTL5R8~7emV99e@PhO?nLq^}~>L?Fi5v*WyV0w~KsZayJs8$2cqIp_7#%x>Nj1C<| zKRmA?!j!qPALywojrO;Oy>GK$tF?>+AJ5d{+SX(^2(|%de~xk<_MT z9s{?7T?q_#>tWFl9-ol5RW)9#4cs_NO%1I-s?ubYw}Oas#=bC~R(amF(rm_WJC77} zqaU*0yw(hW6k4L`E5CSQtH@GjQeLY{;CH26-;kodA;hxD!n||g4n>Ueodt{qiMgWBSk1%Q- zB5yA)QGj0v&dHzAZA7OZZ=&U_Q35^9HTv`Bc)FHLR=%7UHOlyt9JB&md#p6^#O10C zA>ZsN132N5Z4dy~@b*oqB{5D8a3jXW-KHlMoS@&AEWw)Nop;hm51M)Sul3tGjaKcD zmGp7@byuT$#=a3tYqMBF0rYh_@8171|#rg#Yz4sC+e<@4+VwCUv}%NrV)yY zgZz`XFc->8+-}NT0szR;DACn^O}|apsEfe0RcX1;?wS9FcF8UA4lar4zLFC=;^Vr| z3;NCJ8wGOUf$tVG3_lwFIF@dgbor>)+L(~}*gXD|{Typp9)%Z+NBwwH#(x$j$p0)% z+kHpCVEngq5u|gIEF@@g{mR9Lw=1;@OW`5!=w;rfdPJaw{d2r1YoDp#(y!7-X9ayY zSl*tWK=N1bDfyl+IGFCYpS1-^K$K)dWxdOviZUHRR4D(OSeAY5S&UPGbxY zrzqXa_ud!j3#wV0<~P0l=$Mj217eIYV%0n6qoL@@)%xhMSx_g(<8?cg{V&SiGAfQJ zO4n|j;3T-a1Pd12-3dwgmI>b-cRm-yFe}Kc&3k+ zb7?8Ng>j_n#<-wx%rVV~O-fw9&r6!Yw>c~|i5B?+!k52^B|wZwCt-KgyHflvg(Z=1 zMz!w~)+&6pci8VLX8f)v^MQ9tPKTvHR3rh}TZ^*+WLBcMurp7L-ex^GvZQvEgRYzM ziWDX6cCGf)0+da`3UO~oK>~OBsX~|Su;)LoRJA4wBNm-D;yx#T4w!lF{gmHcqGKGT z_FRK-@^()OI)l!kv5*J5U(d3=Y@UBq$G>}~aSdkg{3amNqO0@y?riXqfIR_%+d}_x z|Gs2Z20S|zLBP!atj{5M=yV*T$Vj zjmZ1{Q+EKSD2mKBS2seFyF=M^gV&k&YRbCsMd>X##%XN9pRSK+^gYf@c-=&m6&j_i zM}Oc&@`_XSrP!WA}aPk^26@kI^uXQ*N!Lb)fx{mmBa9|Q5Ys-%)IH7;Zc}`Yjgf8 zW2Y^eH1Dl0*rjU#&*i>8e0_CdB&1#U%FI@3sXZX8K6zPl36l9Kw-~t^4}( zIv#N?(JxL?$fiF*?xIC0W${JJt{CIfe!EJpTTU}?%cg(Hq1np?oy~RBx<(7}!0xf# zl1?%44b?Q;0T7)Lw%4R}@2BMIcRoSb_-#H;`)_?=##2^;4%jW{kYAJ?hmLFL?k*1? z)(^G?a?p5p6mMYII%I2Jz;zM7ic(ROjb`cH8OuM^~o1P0yTT9yQ-b!(lFy z#Z?nNs8rhab*IZL8{ha)ZDCH_>GrwBNIwl58>&|8an0<5dAsPFotWJh>ziO?Sv3%l z8*Gs?JfWjMx4kN9g#^A^LpmESpCTJ?z1RYB#Rvm~S|3I0?R}XL{ zafhZ-Fu;~*FfCT43n`)}XEUYQ^ubj){^?C)T_u$Z4oV?A_+@yGKUUP?LGkh%B-=-N z%;?Z=>)yG%>rRg#Uc;gQuDd6SC1(Q~lR1W${ zFQJHkUa+Bl`I%?$`)^fGJY%QEM*ma=j}U7iaJxK&I00sJD@xDg21S7Kz7ph~{29z( zrKqv(_Jm;`GS$`J`v6xi53hC?_Oqx{{&=IUX=;xyI0+%BGLvbe5l!IQ=Wt%-z7lp7 zSb5DAq;ywt7;j~#;iLpJUt+(Dqz_Jdi3;zSSyQ~^UWeO zs_w-9Y@g9p*jsYEh{X)>DME5F;WU-URxwq)ZgP5u)Epm!G;|ki;HoZ9))$yI=0@Dx ziuy=%J9ft^cKDvX-@t9xaz^hjy%CpLAY^$aO5%8#vNe$KON>veAowJc zXZq!+qOXyu2;pJ(thZTPW2+B2r#O*GbShHCe6=>vR>Mf4SV_o&P5uGiJRFmtqT}2u zkP^q)#;AMmx5uTb5T4WyYu?v^A8 z$~ir%|1)z^36l~{drU@)vso61U-ek>y@#hWK6L`j2_Y|qpMAp;+ zIx(w?-9ElUk~?2XFQgQkWhRJCOt|MFIeR@`Z~&B(4MsrjB&Eh;X>~t!r-82Sy%{-j z$z(0BT4MlUX<@w)ULhllB`Y@nWE40}fARfQd>O0b{o${-M!B2 zhb_lGKakQ)T!}4_wv&sQ;Z~fjlJ$o14`XlE>K)>D3Q4=u{VDZ75pON~Qy(J|)PEdD4-oNvBuj^P1zQx*m5Y)HK5Ru+9?9 zzK!>#GMKP0>HJYH0`Ch*sq|{lec1$}`dP}4-yFfEl5KHo&B@_3*0|gl^IJGizbj5| zTwOeYlOe^0g)sTc)s}NG{mNIFui01j9;3{vn%-Ss#$pahN_%HCg5NnJ@wihvpXghc znLppeThoT}b+!`q+lX*pSfo|Y%5%qw>Nxnsofs&OTQGcpM!d2ROQ;n6HZUtK_|}n< z%TK>1>q+*5*AGZ%w^M556y*aS^(`$C?#FKh7R89SALpPB`(v^#Uh{_~_r|}L=isAf zYHAqRwG@on-{$3AQX_UF>Tr)prdP4GyLa~tEQlGE>jn89iCrG8|AG~#rmKMYRoD{b z|GdwZ9Xrj7`HV1k7NuO^Sy^gbfXPROW0FG12Xb5=^W(}_bDQ%Pz}pva9wE9Z_O*bhuxDyvHLnHSve(E{WT^Xg?^ z=)ukeVV?TEg2%Xns`>O-7FyFPa4I_rj$;M{>2`4gLbM3;3_69#pHh;mIA zl9T8%q0X1{loJdkw$FWxyNJDi;s-Y_1z>eIuSb$ zX=`(j$JY)3FCVOLMQ1!b!yV*^CXLslX6lEXl)#l2unjkF!ePu9odY)NxS5Ax!!3j!A-q?|r4~{n(>V41p`H^}rAn0$+>Qr>kr~ks6sT z2|Cf!7kXQoE-`h(x1#N5E0-yU7O7wG)X!#i1SU!y$7grm&ZiV@ak!eKZKra~Ux4dR zce~~ieX(TLq=44{_s3v9>X8@QU+?(bhszt6@wA+o#Ah%H!XdCd%FMmDd12-2A7l zalQbmG9saLTX`tB(xOt%6_Ts2XB3&vjiIdeCcj^H>gbnGdp@|uE(v2lgVEwrc68Xt zARK-tbj1wkj52v{c|iz&`sgn16M1dP9(~)szzj2G%tignMK9-`BE}*()+LTZB@(g1 zEGzr4S98&IQVY)ewc=($(Boxvs6m^CVcV;|{RVnw(e_9ViE5rd6_QUE2kLApNhPyG zx>$yeD#ZN8I*ul^+O=BdKay5{kd;-}q-Zab^0KLnxn?mS^}ge1Wj+i=QhToKu1!!( z+ga9)YOO)%;MNGT2#x<=;EaJQkd3(G_!aD9A)m_x2Jt44u*L;CsKhc4_)KSx$ z$5dq}dVhg_ZBF{uGCCW%XBvY@splCl?wJv1q_OuBZGa@yf_!mF2@GFLGMJa(qaD_ z?57x1xB%H03gYkOXIbyK6Dsd8F@4zot-tyBjM(3qR8Xa~S!htd){?j4O&h-1piRnO zU3rs{CLCY&*zDwi@A?sO4idjtN)nbeR`g|`Dn+&+I0zL#u?h)SgGZ+7ki z*m8%0o*_mlQult(N8=n;^Ob60#p*yt`yElX&)!C#;`!d^mUXSmU+M3@o?$#ISJ_pK zQS|+Y6sqfvKl2YtLBn217P;~n|_(en$Zh=YlRWHeI zuN9rs_Y`Ox%a&ck!X^9t((CUCBQsvu3|ztJHH#u}0JD!^WK@Rl!Dec3dSUwio}^*d9r zc-M^IYtwK-)UC>}9VsPduvqL;)Q^}xphQ`2G;fFR11 zNR?WK5jWl-Ph?-m6y={f6E$Kt`E@M(9^?ng4MK73_rR6V)2TKUO z_A2|WF)yfmW2-$tX>|1mdb-Z4zVA3xk}189aAAGC z3ojmrG?=Pvu|lJIaQo20=8fHlq)E^Uf?oxHZ4$oLvBG#*CUhp2QD-M<1199g1T-eq z!adV(kd!HKuBJ%VeyP(0OAVpT7%OG-{M6>?xf;$~9Yft+6qJHfz&s;-^F#AOO0R5+Luhuws8$?R>v8XnG?aj$ zz>4q2;&*-@&F#BhDjo)O%^92Mj;JD9|2I{$#{s zxpu{$z7a|(|3){NN$XBk2k&8gnw%IY5>i{^l;35JtkO554$o2AS`dvMcfbW6?4?Ul zDx1N3EsAkV@pm7_jInS!L2fJvsY$JNQ`Rqu_TxT)N6LXFUm%e@yl+a zV&wDMMJ7OOG*#ZT*4)eJra$O2)3?3TTm3TJaFTbzw-EC1?v>G`44IVw+kS{V@;Lqe zWn=%Y@0lFmN#q z^#Oy4>iQ^{_7BY(7Hb-MxY$23W-7}Chgh-RQ;lMk0tO?#QdqpL z@(Idpn~@rq8qdsOQ=3rM@H)ZhOY#8W%V+jZJo&$;&mVHIO!b+BlCgs48T2>a!9lqp<^772&e4yvN z`w=ipXhhBWoAwhDSYHK%yJu4RlJ5H@pm_mZmfea)XG*D;3Fc*}zI@-&sxzmcLOC?ep4X*qw zs(YtcC{3KD)Vr0~;>WAUULnXQwjp8*)#NIm>tR0rA*6L3PzdT8Z0jkcg7r4e=ojAp zdO=|Q7ZOiLLJ1E}$2YbA5N6w6LZ<5X@J2D>TDyhasJOC(W=w>;rUiX$Kj{k_?LEd2 z(YLfkyIqoyTGeBoHr3-%j68d3=wD9DaW!en|563UdmWTU$9?s6C{a5# zP0i+CY|t%#H@%sTCLlwE1C|+chLDqEqh)R`Ul(Pr&H>@PIhME2I=|{Gs;ly-Ym$f; z)AFCKg`E&Zh(-#U_I*M5s*X!1?a!k4QN2s47<%199x(_uTIf`*Gakz1cKR!PZ1HZs zr^a1#ewGU-Mh6x@!KrpnW^77O#a}$7_?mQwDg3PgN?(N(igp6}3j4vuhXKu;OP$LI zraVYRjsBga{$3;;l@A6}WJOBdHJ~XGxB@$X73Tv}Jp0>|*-UN=G*d+E^f!px5umvf zS-;0NXS`UPwm9FOd_UR%r^{w0fIUh*5=ar8Q|(+erUx=)v?-V^ut`{kbn@=}1t!5!BWh+UzLI>u0WIdpSgB~~z6jdLvYnRC9 z9BpuRZEG1FrO^9-otC9OA@f>uw%}#XZAs=_qAbrZVlP4So7rg{pNM& zW_wTO=DX(D5DOzpX@wG1zfr;tU$E;$h09I}rsM6_6;Mqv?tsjw2q+)ECwmiMch=U` zV@(6zNFb))5wemwC(%Q}Nd1bes&!InLte*-LmOqgf4>5`+jVLxrNFbq4*S%iRlaTe_at39(8q&(z;293}J3@Q#Jr2N%N2dpxHv|1^+tbW!n?y8iIiE5q$HyW?A{>xpN$ zZa?~zCya)&bm<*8W2sw;zJYup*c$GCI)Gh_lj`&E+uB~hVMj78?D!-9&3g8pU%S`rV$7N83s;z+ z5*-ZU#lSpq7?-N1P4$inCJ&uo1U9bk=v~?B@_6;Fv*I*Y(Q0y)7A{i;9!74Oh9A#9 ztU0fmOq?+pyjHM(`;cpk*)46W^P{wjMAwmIBa%aPT@Gl5`>?BwyK!Uby;p+y^LJYp z!t-0AhSd=wd>@rEOo}biQuw5lETS={BntZ*hJhu}+p0Z;FO;&}sn<8}`J1grK@cJIPpoY3oW}qZov%gE+mFZy66Fmi@D9Zc^S&K^g)&d3z z>B+mL=l_ss@%zo*UN{6RXzpsRUP@D*F^ui+2l-!}^U=*!l*z>4+_W+(!CitMTG;td z{qbvaT*ip72hn?;-mzSA+>%B2hE!Iv5_9{;S`Hvl+fFPs)f4Ejce_d>zK_V^3ivrt z4~r5-5#9_IdlEMLY9i0#)=ODh^~1Es`!Auqg!Gr2Hd;aM!Tn!w+W!FR5C$xF$E9oN z|9@WkAGH7PstA9tDzbvqgZaO`DjYiS)scRTpoRY}T|Py@zk$Hdxf-F$xpTqYM1b;e zKMl^l9>4xiGChPg2ElgUmW!lsczbIL+_jw}=#_YNb(N6kezCrQaa0kh-P=|uAWMe#j)-n)?h_;LTy{J>)5z;gHa;P^ku z>6Bu>4eEP5xE<{cX~s@ z`pjQ$y`3N*jrpcA~FePx-Li>XXz7`?*~IVea)RYZDug?ws^KmkNj{Dyr@_*1@Yz)-?`N zDK96bqysM&zhlhMu2@1o1uxJOOS0}3ms@Xome+qK`6fYQm zxm)_Zi^q4Jc_z~`X{+>feLTx(UN3M7Q7|(z`_5Z)cUWRP?*o`Z#u6kDirpaW^??>& z9uDVpEoV!Hy{zOB$g#i`#x=KFQx8&suuAS8`h8nl+~xD@&bAM6qy3%#oxXlia=g@3 zVd(yZqyaXj5dNR^HI9n!3idzxT23aj#vMHC0AbG|U1!1Fb)4lgSMZuE*8Sc({dK3Q zWj(Rw4POfb<_O?B$qFRX7&<-eR+_IdA#|;#rAS{h>9}{HyO^A~|Lz`mf&B6KS2wpEubxho)M`V(k?h>#^2*>6#+zdjOG+x$uLpZ6rR_=wEdAS^ z|7YooVM(JnQ=O5ndbM6n9?@c=+(?QWv38W-Ao}rBJ2eh^U?FKTQ5e@-727X1u%-VT z&!HATpkr)`a`@u*mOV9Rr^9}>@sBxU^xzA_A&(cw9a&s*R}?fxRD1UT!<9&u$E0~N zGOBv!`SUIU2jrPr#&6tc6=A>Hu&PogYrEY6rzfwhc}-+x1Nms)Mv3=63B`(cr`nIN z`5ARTYuZ5M)f~uT=9LB~g&^F<5v@s84t8u2E*6KP_~UiT*ah0dX>52Y@mm3AQEXSr zuJK}n0oN1@$^xQGMYy7_LNQ%t=_52F&jyk87-iIz(4PzR@>(H{{0~Abtgv0HtqnNy{YlT#;n>O z7@OnOnP^5Ws(KT=Ko;_3A(rU z2waWCtU}xP{dqR-N!Y6&ob|`^+>+w&mk87-;+X|Z(XQf z2Ww7wrNcXUYMmqwW$Ao3MhzP%LnGib8F*RD(i?jCAfR zm+b{LtF?T|&LgQub(GFQ>O>ZPzRO>(SP&{q{MI|#QhdVz*}oYD)ljjG1^!h%zeAP*7M>o{_5)eyeJm`{J!P)>E(OC6u* z91;_gS;J7U5+h;=4hjt&y2@LcZnw(09nx?A(!(#QYRORZp?W&gJMBh2#R=4mmO>bOpOco6&cr zHTd+9n|L3u+j>|kzORBh4YE8p+vY*qz8^A`bKev+RoD~mbhPz&Lbv7r{Kbjg19OpC zguvPF;2^nC6gco50Ori)Iw-H-;poso#8L7&9TDg1bn;p1Mp7Y?$5$%^-bwkgg)V26 z-$9{`T)@drq{neLEw0}r=)ogdXdep18vWN=jUEn(P7J9`RxUm^>1xp-a0FA}SLTz} z%I23$Kk9z)|AcX{NLl@r%hC_Q@U4%sJ4;EKK`(qsV35?0eoLd&We25R(s`K$n@0Ei zGckH<8EwjiHkR<;=OAa2N5+=Od!Cj@VZ)i0tH9QMNa||AycJ-KZnEzvf|9^X7-)6| zJG0b^Xj^H(v83^UJR9o^n?*-?avUubkSfRRSN1I#teIl+$>rtD5SMig>on`nlwIv@ zO+Bbq_NIM5>InE&at832M)v+HoL~WM#D3v?lzn~3>EKYb{JuWmyf`S5ZW#e?(-l>T zA(p%Xk)iW<4JB(N6^LHsd6H0z@;kSRoB@a|=_1`}-nzlvQ+utnK}-{br8cvZND3NJ zxd$@h9C(kNkXb?AL87;REcA#mlLDPjr(bfFEnu2dI&2Ed>-u_b-;E2n6hZdzN^(IV(#K{ zsx(;C5#mxA;rfkTZI?6FSW&}U4OBlF9866V z3~9Y(j4$G?Wc)aQ6qMhLvp$Kb^+mdXr6+`Mr6vNs35LGw2CXgh9;~7{YxF*+nL8<~ z{ZU8DHyLFl(7EfGeL>OUUhVoJ=~|GjUf$QSXj%F7OBostSm9VyXfny{yMvftk|Q)-iyI5F^A*?qj1mll_O-l{)-lYJDr?6DMaWrs+23t2nF3oa`ZzH+0ojhCJ!Oe8D}PaK!QRJt z;uqUiwTs*JSO2H#Z*Py63TJGd z)bn#w(^>ucVjMxvGdK|Gk=wF{!BR8jY-;D86l500i@53|w3aH&kJsBJJ>|HmL1o{( z=uTjws8Xr+#>9=yp8$3ilrXMv2w0wR!}XdSQhtHAS{sXum0pE*XrjbL3AJe~Hu}Y2 z9P_^U-ZNKQztmo;fMe||ruRl{$#urH$QqmqPuMnf9b4Oanvz;oY3})Tb(-k~0SBVf z&979HzASlLm06jwaW_AB2P*Lf((Q2Zcc)imfZ~}kC3b4Y;YZ}i7@3S9uFa_1yh{6ndDLNbX9?3fEa8 ze?8ov=Hi^EI?% zt;H}9RC3+E6JvHw{(8~Jnd2&|<0Q6C&3C1y_dz@ERv>e?cPs%Ex9VR!3vP)uz`?c~ z1#nc4On7}!r_2MhRpo@}UnnO`96*lhKWdfU^WY=n_}&-V6hQ8Sn+G|4_Exoih3uWn zW0n|8%8gvja{o=wJY`P1-sapK9~~p(kDN2g?<#}Y`m>Y;6YTc%S){rEKBJ(oTGXc% zPMz%50hh{p6Uth4Lu#VWhC|E^2o@d!oDS2K}pj1%c)=Dt(-5E8wuF3?agPX13S zBOrv*|2fmn>M!fcY1kS-`j~$WpSStEp&xj-?$iPAnuufYdFQ;X1s!ADLAZQHevr-j_|*>a>U=5mFPrY^*2*@|s0ijV|+(&)4Hc6Ni)t zqJD7(EV&E#F7agW`ZfQ^5MuP6IDR184;buM-W|=YJ+aE>tws{fxVEjujJm{HEyL|q zkXKG{iKhP@i;){|cW$g2<~klz(^<)yR%I2w!Z%1@_t__G+m2>8#pUbBH(nYG{27_~ zgC@HjOprr~yWsGBwAsL)0AW8}6W5Hp=75dqC@bU@n;DVa`V^L(5IG8aL=TMH!b40a zlWeQCU$(6iKlkZNbKGCfqi)k91uUVmoI##o<`r!_+_@&d`3WDbTwh&$;xLXETlIw3 zw$Ao!SC(T{ef?=}er`1Kv&;ecSbBQq$_ii0WlA;e%zRlu31AVuXVM^jaQp8C1o8VF z@!2%r{LiB=Mib%kgTeRsO}kI`FG4Y-vtvnGpI*1@4#2z6G~W1?SORk8W!%4VWnZa1 zqtM>HA|o6@!mAJlR>Ygjx8Z>sn35TY?62WD1>h&#SJW+f{9H?1ae3>pBbrdGNa5Qw zZ!1@m%Oo+DM%;O?J@htC)3gSKMJ}a5jswej0c)<_+wHP9o*$d1z5_4n+jpNe=1D7J7LNc|XUI&2JGONjx4hA#uqagqLaK+n7j^2SRyzWtquhZ&rHk{i#lD*% zu#+BlXw)jF>J5VFe$E%RGLSIpr`nqBySSIb+0z6xCD1UW_{bso^($kIPcCf;RMKRP z)u<)j67C;|&uiN)rMPR8I#vE+s)MPcBO+g$^r%-rkx90KyR^@CX2iPs2DAp73@Y*T zM(+^}3-RGB3rL&x!x$uqn$%VYe6muR;E-mAELJ?=5)`NsQtd_HaAqD!E!K}JS1>_b z?KlCeaWvN$oV#jD7Dg^VTmJ&A-ndz~_wU(4LzCPwVhTThraJsVJTl(<;9vHwGP&`W zRtf_fn~~={@6hAssLf6p#x0-uAnNGY0ectj9@KlVohkse&q%6wE?Us+Y|`Prvz)Kf z*7FgQFgXI^FzV9iyLu7fz>hYsg{M9%X#)z5@9*!AR_WN06H9eKWm2I&&W2rD8KoPL z{tuG%P01M_5Qzu)nmk#@6iS~m=MPgk-et~GtIZ;jY#Y9**6I89PQt8a`R9R44TK{n z$Xem!FLD0la6mh<%aX(H?|1QnaWn#wO)9Kn#u}9QbbV)c!M>&cWrlNJmTOmoN)?ZH z>{@sc!6k!-{%c;#ntu#Q>DcR6l74ZTqF%*BJ#?%hbm*ymM%2+qAAJ(70x+n9yv476 z>fOh}^~eu^nz`2|jcrmc1)hePOUKwjr+LkZR1ffHJ3NSXkh^FdCt`2x*vO{7Gm{6! zJ<)h}E(L~trYq~t^3A;g_p`Lrbh=?2*bQGdvlVE&k|6n@aH7Y11roYRO9z0o7wp{5 z@!ts(_aAw?DBr4l@camWjt+d zMbVAN$uHDa6!c*%BgFMPF+LV!ZHtMYURZFC22M|9D|cDT;U}}5470F^>&#dLzT?Zp zzZ=s{nA_0vRv<>dP~LpN;oHhIIwnQuY{o%Gp4VJdIm`FE@4M@_OBL>wAw~csZ{+u2 z_kZt9+v41yQn?Rbb&Sj4Okc+H^EBpeqUFMG^Rpg7;De`k_m57_jKlD~8w$NY+cPZ| z@%P@6B0BR-^Deywkq0=lW12=G$Ju=D)#E%g?J6{6-os#TZmSigB5DHlE$nZFA2%i{ z+@}Y!u?EXOPMe?|9Wx<_D^GL_!oE2-zl?kpEa>?i&+1VEBxkTdH$<<%6&QOgg;bbVWip9#U*MVpo9oOa%L7jOeB|Xcx$|1TjFKJ2Y^`_$yz>6Z%xiSfdi+y8G5fE1JSUq}JF3cV z{u18lDt+=5NJNr?AiU0wM@o4PBJ#cXyfk`E%HAyU)r6w3Tuh65tkle%QI@1(lP#!$ zMI4aQsH9Zme8+XCYgI_tAFr)5W$U1ThEY|)bN0!x#nzQ@E|#WlKygYVC?`9pH`*vk zzO=KVb;^^r*duQw(nF;5#C;z!O8>mp+qxQ*b0IOOR~IiItrpJNO}u1R=VZR(Y>7!_ zucIJ2?r_M{y#|w7&zRWx*4!Nb!>9Jc)I<}Lf8mvtRlk7uo^SpDWx%^QmoqyajW&_S zfqb;i#qe3syA(L~({@5&jid;3K1^$QP=1lF$H6O@_i)IqsuJzJ+Sp^fKw8I1{_qFD zL-U)dxIDI`1x{H9`+YVh-}(8^{MgPKO1=|e9p*>bcbV*kY-$0D;v(ZN;5Bq}zjCBv zq4G=1njhhYq6vQ5S0WvEH$qla2K|V`P3hW*{=R!kAHCQ4#eoaQJ=hAa2&_`5T-D(T zchjZ0$*+SqY0b(mXDOeU(&J9710U~~=gwtn4+M}VGhSIy2mUrO-MOT%40sR1>$!SB z2QgrNzz3BQ-g*NkuQG;J=y$1lf0(OPj?b}I^%v&WsI|W8J{v3ewDTV2`%$)=b@-SS|r3lJPfkM~LD@>w56p-9& zhwDyOz28?4WrN38lnd(k=_ve+_*6Hakpo}I?%X&UUS^6?KEpP8`mIgJ1{I>8fkxUA z610n|-IDfq8=4{>$;!3wTIcd1f2?6)%m^L)vYMbU4D{Q zauV!m-FIqi@1OJ=O`9hQDus)s^Va-3i?$M=JN1DZ>600vEKggP6Cj9Z7h9Td4ES-% zb1GXgAohRd-IA@5*&uJ&@RDv7AvGaS4#>G_0gB&AnK$(bjeQ@ClJN zGnJQ1XS3{WB_x$-97gluGOCS87OUXMz^UT<+WNawDfnz2d%4P`YBK)Wtv^?j@#6^` z8=i9TKWAo(z_~|NmP%unsl!{gxHj;2XK&lq&sRscnKXf~=WUWMCrM5+qaZ%)a9H)dX>N z?=FrV9zZw2(kjSIX&@ywuYUOU={sF8LIIsPXFI5JZ5=P|wZ@V>gy}D$$%kC=-w;g-YrJUV!1Lk_6FwNC zWPVERPkF@;ci(6BbH--(DZC%v6`SQ|e0eb*U#7@%Nnqo1kUylX3-Y1`f*v)kUpVe- zTzo9e)0!7_T!v_OwR4s{&h2+C3C;oM5qm<5_!T5zIG`Oe34OsExbI6xTJ5-Z7bVGz zX1_3SJaqpYAO_Z1zBHawf(r2&`{k5Z1fCl6>bxrW_?$CdlTiA6L%0A+vJL90UP#p^ zzuf{*Oym~*?P9U;uvhT?$MUPiyvRmSy)ue?L4|m~s#OOSgrubesU!q685oPWx(Bij zqv<6H;y2Gh*!?87Skc9_$JhdhKjzFo!6ty)eD6)C+YS7cKRAT%AQa`<*c)bYLA?9^ zAKJ+NBIH(z0*_K-FriN>8Ce7_D3p)xH9BW5F$4ij&G|kUhhJE_r4sfnD*>D=O=zYf|EJkiu|w=aVi1ZzZ*Ki~8xZhsEyq182XuggZ@ z^DqhPK`d8`FkqhPz}3pw zKPwR+trCRbCGm_k0*8d6Xh%CUtW3IgeHqLhuE zMvd-enPIB3u~hBMN8zxeAG2z@*Iln_;W2bh%tsv8@;^Fv|D9Z zhs;aJc$(QLzr)xs)9Dg_0aS{t#v0P$+1g+$OBt&OGLK)=<<+Vo*ItyMnJS5r%NW}r z=w_FhWC?U4nWKvHiJ{kuV;^W|p@;Gbmq;Q6(aqkJ4Tc0UT&C~_GzKKy=wB{Pbj-Uy zb^li>5NMmb@J|8Iwb554Hb!&xn@ODj`hX7Tm%27uOH{;P;NL$x@eCP3f6TH;8Oy`( z8Ng#Wdig_&ykjTWG3a2n@Q#nJug84@#&eG)CIWM%=m7IQA(% zOSjZB~ZZLXUHsRxnXy*}4u8mRO%3rA)(u6%;qx74dGZ7fkE~ zL;390rmH6=dP&i4n_}vKNK3s2W;=5BUjw6!YiIsn;smfN&m(U71jDs@#Mdn?H>mw#O$6RYQ1gvqZ!~!?Or1PRyvr4KXdpIJ;MMJ;fet&GwHAvli zeHfmsfF2i4N#(Cr3H)^${u9jRW6EbD8IoS&Y;JYmUJt4&M;)3HysFZPbFzY}Nl+U|snq$g z`V%-o%jQVV$_H=Dpi33iI@RCyK5chLQD}iDh55)s3trl=cAU@EEW3X@n7gJ_FleuZ zaXhTevJtMr%Y5~j>J6fBL8tjb0bT0)%8O~_|0VP?<+?EHKDI@wkG!m;M3$mOPZ#6+ zq%K52b^rqQpdJpw^E9wPeiKQ-Jab1i{93YpIy9`&*WugDC*(NPe5-!NNgi^fI=b$t zThS@47iKJFXpe_B(so4c4?-@(%ET|!<9h`47&@PRE?@LTJ$qpCCy6mU+>_<^XO!j@ z9Ef63J&>#nn1TMF#tvJqnxBc9FDvg_H%rW-IG2^fjV^#rE<~gjjM@TRf#au0o~m67 zufE)yf4pRWO+ZMUGRXR@SK?sC+9VLyaQi%b|FX{mU4kU^JGaZ^8X_tm9yB)9zi|%; z<)!oAkySLc{BWwuCy* zer7g$-UqELimV&mnv5J0Yv1*iv!TQD1tCzTi@gjj-*JqNY$ZfxsdW5phL;C8=s%qW z{}BuCPWD$w^?~{iy;Z*o5mGyYw3(|IBJZT+17WDZQWpV$6w;tY(@tlS%b)?3Icgq@ z5%wnr#w;ZPH1Xm%i2O*M8`) zX_RnR_9*eMN(;r*|BR@M!{MW_r=rrpODct&C}cI;W|jb7m~iwQdA=8c=4L_HR$SJy zvVLCval5R(%sm%@h@5s!LWL)}ncfXqQNv9kvY??=WfEX{@WV_epvylePrbWTiaVwVQ{Hg>ud3O>qPiYzoL7uzCzMTjiJ3LAg0 z_Kj^-WO6k#2#-YhuF*Y%8h5yA)*AWrjFcxcl9vq(l@B!laU7l<^BUD@3H17{k!hRz zshk%XUUhiUmOt#%&e&LtWp6ey_g5Nc_(Pw@G0e)5kG>CRD=9f}NTe2X(zL!SYPoOv zYI-hliE_q7A^!L(PqB*x3X)%rJ2`~LEhLl(Tfp+NNsR}e!&w^mV3F-DYZW#etY?hB zFE1|wTAYhJYz4_HBI^jU`&0IlO{)*G_u-+u)J1HDa~?RDrFpJkQ;i;rq^IkS0zwo+ zm1i1_mHuP-aqAz!N?*G=*UNvF6!1^|`PMfO6ObEzM4 zZcLxD3o$ZR{L8YyrrOt#6*Y5Mg;#*m(y~a;;jf8XFW?^TS}t8y2OfKVDlFDZPL9qW zuWy$Tw!T>SY~7w~ysX6$Z<$qjyb`Hf%KW94hHtwDz`w`$@(`z%f5O{d8dKa_RI)5o zZ+~WigW>%1$yTpk>cCSj`+^Fz| zeuMNEisJ&-+p_ESN!OB?x$fn=p>o6BuNDP716^drqnq<824wFf)Rf5E*Im&p7N&e7 zt1AofUwVvh2v!}cr~P2gECH;ym*&BK?TTM{wbWk7##|}m$8bNKb5lFTf|*N40IlWH z%%B2MqHxBGtuf}<7fHG46gN_mVO((R2p=EsKB-g(_3m1E8#35z4e6smx^3GH2ipqv zti$y%kD^+6KG3^Em-?&2D9}qJ5QIO8gU_`3$399PlCyP+ky^xXI?C%Hy%Jr{^mecE zmUseULh8nDyn-GA)Gs0l7plTKUY;J<+#LYc+p$7TkqRQ8I7fXVzi~RTwZki?2@ABx z4$UzZ&$6n0vF$tR0ln12lCBa5PDShJ22w-!HjMs2I6Pl;i6W#VEhkf#05)pQz6B@J zx7)-Eo*|_Y=z^4w#q!c zAf_8(5&%mXi=dli^!ey|LOHI{>{eQC3>RIuVmR56%(&Z2P83m5u5guAqREH!z!H4D zL!Z|wp0nCIy81A)33p*;30PuPmnhpBbTJ zKw(?U70iRISmeX?CV>Azp3Zp_`NbCjZX7>cZ#D_?P=(*F=d9_SP!)(mwPlTTHuG4e z?QFnYflS|#NPJXI(9@x|j}wf`2h+tKzDjQeI5mdm8+vZayk^H#06eFA zEM-n-KXhX~zC$|2zVF5l_#Wa#bu@G{E#nc+-nmGiSlp`rM1Hq4y|#IO*5S!O5PVfE zooS-HYzH3c?x#=PcKQkxqFtp0Y~<6-WY)OW{gu2K-n}7ZGr6 zW?psojF|*ymkrm0u~%P!R)`iaP`>3;I2Y0FbX+wkH;fsQTKw_YoooXvC@Bqk%;Tl_ z8{X!tMw+R8Nry5zNV}fO#vA!ZI6YN$xb^uReRA0}19ul*Px&wXPLOUinK%_@B5cX+ zTKlRaa?zuPonkwyzCC>|Y!}Tn4*i`&r}kT#FK?ZGDK8qfVY>K&wSFXp{xEC>h()%P zT>RvMrG>0ZW~#Wzey}B|Tb=MdW|)<3yT4xB+$5Wk=~Lfb7@ah^JUcjYf`U`!_j;px zS(K7sd&O)$I)UXwx;%ggy87uREOs5}t}mc2_$)Y|PY&14^kK$YFzO&H?%ZJouAKcaXEO+?_mzAiFfar*8ErLNfXXuyTp`Fnlr7Rqxy z{2ow~A+V=Dt943Gbhxwj08gWI}w6C_yBAR$0-g1fr}m*DOa z+%0&}#@#hoBaOSeyF+k?hCt&kr}OUp**WJ{-TUwUq$nyWnl;y)bB$+=kJyUX4G9-% z-}Jvc>fC^ECMQYGnrPmdYgf=Pma?w^q5Mz=fq~ga{M&Beor?ZEi-s#snP~{d_?c?Ggb&B_S(~ z0KfZUN4G*gnfXgm>tBz4KPr_oRl+JL@f+B-9T&v zgoUoL5mCI$>2(*TFALJ)<(h#Bw*9b8J(aY+w$UPMi{Hhc_;t1u_>B2pXkq6@M~I1n z)520rL;d{x+(Dd2P0caEJvXSfV{2bJYXV!SC4@rBlYBc+3AeLRRVw>kg2P^VM@-~_ zw0F^uz52ynD!9xd5GeBMJqK3~jI5Ebdp=BB(Be5oBxd2xSNMgME1sF#adyd`(hO>pWq^roF?dEUG49OXK_ ztI_@OGFV{ogVdoc&eS_5E%>8Q2B&?pwxyxf&!>SKZ6ENFK5hYq_gDtGl z^V|KESGDtqJB8;h=#p<>ynkk4p>!3sp{`-bXZ=T{-6O-Qw^=yxY}!JMCHZ(O)XRHp zHeCE)_we7EGbjinfJmtBVS5j7T3%|F0k@kT*TjO(b6Ib!Nq0t>Exg|*&bKPN_p<)d zUm&gIAuk$bh7pt|cX^8)VegXzk8LlfA(4z{lr9D4*wd`((i={+tPAekpbD%bAEn1W^M zSP8F;TO^au%{bcgXJQF2n8CpUcJ`Lw&y#*XZob9C;c4l%d(Ts*l-5z`_}=eU1|v&{ zTl%WPG;B*QCx#_>gC;y)KT}Kj-4+Q!L)>}L^}?i|jrA%ly?)0^LmdhekzUS0K=~WZ z>}i-|bU$CtJS+*=ZJ_Q4U6+nkPSUQLg+YBssqPKE4q$yFaQ*3YQ5;D@0mI;4^PgAqdLN$TqcV=ap-m!gFhTJEmru# z9Dp2d5n3fE!LTv>1p_I*ZY6E2O(hwIkfRGtnfOB_N(RL_p_eNarxkfFSs3h_$c?V~ zd!~*jbn!z9k=&2eEo+%Y_rL23mn1i<(8_d3sCbv^1%=?N*+TGKgc|+L&h7*-t|_|e z4&Bq7UW1^zq{%!aLg4-U?6;A64ZU(H10s!p@Wvv)SFg@gLLfNRoQyeF=i5X?t1xb5 z0DN&mnbU0api(EFpdZ1q-meRtYR~#u?xzbpJC#2C(OKh}PF~xlmnqApsGyA%+2oj+SzZ`sDA@Th+tJY!p3ZAdQ8DCsmr0&2js}*m-R3r}EA%}tti9vLWP_*WI z_Aa-n4T*Ld^M1q`tOqRe#=7P~u$6&9W|8FEgUo$;u}O4K!e$S_Lz!sCSwXK8*E`!O zQs`dZ@%u?*y${ozR0HJlTAcw@fhV@DWS{YAA_PllF!W z4ue#e5z=10Nn33T8clsxPt_IUcO4!Zf#^d;LLNlEC~}GS`p5FoD|?_;KZK@}Mu*F! z5BXA$z-tph%mcl#YP)r))#~?3oqx45KcQ8|!G>gt$#0eY$kt0r;Q=EJWl>ud3or01 zVr0_vP6+cCi}cL*AY?4GCo&=hWvm3V=_U9Ycw7=n9~=Mt<1L`?LpFK)UW{$aYo404 z7sl(jj~4OYaMWCAIO+p}ghA5nx#hvo=>Y9-)oQhV!>yRu^>Bn%Q_Dbj1^Np%&^IB? zszNDILK6}~L+H5WNvw*hrFCK9vRX1BrPiW*NJXl7WFF3 zWBL?7t$vD6wX(Cu)b@cNd6Sru6GxtKF=+JOWk@;JJ6E?1etl)dM7w&|Fqqq~F9||> z+oTXdHv0nmU;=N7sf$=xthT25NqtduG&gYTAYV#cywt!TiJ(MZ^3jt**X6m`kg0D( zwW4MXB_h!^yL_=H5z0ZY0I_7bZ&x;>wDbi_ zu@{l9w9~|_3BYs*@_IEWSe}i<80o)LW>A1^)}_p4r${IT=A_rPMB_0R9~r!M4ZQC! zXb0|nW@80 zZ-V6?3x{US$6AB|dKoM>Ja7@}pMQ2;|H$Jr{V5*qa%+MP?R0jX(H9a*6ZV+Lz&Uw{ zLE^76tw?h`x^|xvQ^D?~K>-IfwFq8mHX1n!b?yqUsTW=JQvchz-y+JTvm=2*g(?$O zULjuYAe`J&=W2INxM&|ZsqZZN3fv{4ieU1_;+{?_ZOj3Z6QLH!mlr%D$w5y(LN$TB zR^T0KFjjWZkeueAp5t_XI7REDq|kfmCE{s`rJiUWIE!;PI^9nlH~SgN?@@4Dn$kI4 z({u{sgxX>=@g+{r8cZ91+en7kJ!|-=bUk$+`g1 zQHnKQc|QDw_Z%*D&eFJAtpN4Osi;Q=fI^dUqG>yvq4~!8vI_AE>+L{yspucVmhofR zi-wf>x<(ziXcV}PH(ctvnD>af(6x04PDEE&_?vRVtkdb}MYn9~6K?Ih*E_G*9lCUN zoK6wBe8q+2jv%*#0`Z>}AHjmRPEEB5D9=WBm(G5TyHP?EH$(sRDM2F!#WyKLNy^|S z;(Cm*Ve-H9f9V-lZT;ha-5=+M=~l38IT{uz{MMLydM1!XyX;>d?FS@(C2~n=TfGyc z+$F)dhyI4WT`|Kq>X(xe%;yqK7%Mfd$HNH1Ogc8<8)_Q5CB{t9M-7wBKO(W7bh!Qw zh^A&{G@uN^rsx72I-0onnG?5ZG4829^$9RHwq1tJ ze+_PbtKHw-#=qyd|Km#%3Ycdx{c5~_ro8{-yZ;YA8;uN;+{Hs%wIa&!cR2ii_hUg4 zU0B??)zupRAO0yq9JqG{-w%9Wixa(d@s=S?70+QB9xKFO^mCSRwf;StP79MT2*?ja z5ip^mUOSy_b_;M1^qupK?LDf z;yl=Q<{VqsTuBTcp3;HNrbKRB3-`ecWBPT_U$cTH+&c*?EhxzKev&bEOUpyKrcusq zE)k?tqr!InT(R=A=u$YP&aFWjufc~BENzL!<)FCqhd?Kz@KSi=`HnbEg?h(Xa`dqf zR8U<=W1}akHzqe6D+WOKIE5^J3n}<6t)l_>W4}^qho70uz;MDav+(;fZA9@To+(;V zUU#pV+HX&=aN1z&*IDAH?FPdun-ZI)V}i+jZsFgFiL`+#wf|sT#G*nCkQ9OIB&WkE zBLhw(N2K_)#OS(gmX-D2q&U}J1@-ykDgS|SIV!eov_JepGU*^U96JCWNO2C}mz0Mr zQ2mZ-7}+{~0DEZ(4iXj7-#cBl8(93i7 zK~R|7CEsINt=UXa7^`O=qBJyIGXzMb_0auzF`#2FBP$zW&+euSH$gC}_~I@S1aIr{(#h1?qlSxL#|({)gj-v<`$ob*iCZhwIUiiR7H; z!@(LK$&yvCS09nn=oi(pm8~-AP3|b*M&^C+N{kvG{0+!8lD{9BGBPdy^5c~r|hJH02p_=pKw*4ZDO)XU)TMEki;DZYI^%( z>z;7X*H+E?CcJ}EM=*Bx?rR#Camd`873j)o4!!E0TVJPI+HFy-Wq5(w_K5@oB`kaX ziv$|Tx^xMRZ4};MONCq#ZtIiN~GpO+gZ6|ove2A!VwKN;R z8OxQX*h0^H&5qCJl=uXh^xG%ble>Hz`h6fZvDvMxqC*GU%4^1)*o3z;x)X#pf3{GNmQYP z=3iRifUK?B;1n7%J(j_=<3RZgQ_0_G^~A))Oy)V&Qa1~#kBFI=in~X3{%OjXWk`ev zO<#UzMiG~H?}NJddx*YezA=Wq+$MmuG59@P)^5cQJZWFPd{6q~=WttpCeX#Hyg!jy zh{!D{`K6@^yy5Wd$hwU;?sc76JDC)d<(2(OJtC#!xJdXDw?*u#+huupohb}0vxLqR`H1un^(-aYrXu?Fd%!wb9du!l1Oi-vV z&u4mb5xok{U1pH367YrYCV*=vW|qYQ7n z00$xYnhTTTUR9EmVws2_|GK17|rjtDV~k2Xa&_{PQV4?*M}2dCGh^)v@VdOxK6-S=nt7%Y3$dCe6q~ zd+*83W%%gbaZ|j1m9^gmM(s>lQhsGk&KcWvNsI8_-%iWFPP6M|Ty%NdN;dY`yY?HE z(fl^$odwE0eO`Z+;>V``U__dalnG*Lm7Dk3Tdu$8OX!47PK3?3*$}!5^X@133o3eZ zYT$i)IIC=K@m@IV&2OHnKTZ1=bkJ>bb^P04dsub&X3T#dm38mlC|17-T`qa#jDEL~>okJQbFtf{>uy0evJ-^#8|@rupP{{DC;8@_;i(vpcZu;$iI_f;92tY zr#nZ|W}0?PY_k0-z<^i#_upHJLA^=np^u4@>x)K*-=e$c*k5GL?altz5faUJM3(m_ z#Mw&farGo^kJ>j-WDV{wvNo2*0jiAPr@C;T?c93UG3}c?@!~+3+)2VSz^d86>l%s1 zHP`)l*_Dru61LXY_ce{(F+S0T3CUxl+Qo<%ug}o&Jp;yNHFHROXmczsAPqlTZHhzl z%7I_H$3Cf#!!@L~Z`Zf6%{vD9i+a(}gRrj8TtuFQC^L5gBg; znnYY?1OCzS%K6mw@)2O>lms06XLSpHs-HEyt2u| z#7aOY@0#yv+`uQv)d;qj4P{{3U)Uoln=gEkNm8?Ymm1q1JQGgtMn0nnV1P)%3YyT+kZCZODxkq-(cqxThaM(KjHV zQ!tJ>Sje@RCxv9`X7_=h+H%L<8C~XZZ3zD<4s^KWQqEf0andcxc>gM{V^`(NMt>0`?*?xy_SA&eF?HRv z5p+L_uVKPd_Fj53D5EmKU*>h^yee5vK~6aZj2re0EQDPcrwF6ISDeuInxC(6d-lYU z{gZ!?`9C6G3Hi=dD=D72=wS=TI)^Xsg4TRNXVD#n7K#zeP2n>=dypGGD53!>lpw!EKxNNdnISUE1= z^7i|>-Z}Mpw)VQl*GUuX*}-0xGeA1yHE<^o`aUX~JmuO%0IbbA=Owfcab{u$?u@u( zde&as1YBoui4;Km>>dKOBt|B6Y+|CwU_~}5y5U(iWO;$*(!LTqT{?Nvz5=v(fghlQ zZeX4bE_Bb-B^@bz%jC+0_z%06_Y-uJv78>6ONzvb?8vIU^#GdoAl5Ift+w)$l1X-Q zR}a%9ihPVmD0Jt5$yd1%0rOK5(;4Mf88ky%>$Lp1FJTc>_Ht;Q5k_cD#zK5ND$Swk ziQmjljD80SpQ;L4bD15v1wOFoSxfSDP=8MRPk0ZBiUN^$%g*lej48>4jF}3@>S*Dx zYMs3hHuOEHu3xO@7q_&Suh*wr+K4av@~>r)dU*`&$gF^o$gPB=>w$$*5stcdF@(wNmL*42v}<*53ClZX=m-v(Su50eLUA4OHmwUS(h`?$u8J z2xL&V_fECfJvGYF2?I`6Un067;EaT(r(xnFr`AVj37ybXXwk|>j)*e?ixqy}abl7R z@0^c^>^{Xc5}GV*{Ei7Z|4DYql3J1E zyU4(3Gf7hg%nL}z6(tw(K1}8Gw{5>1HHZ^{0(+Dv7iu6=ub)t0?<7jCIb#bew)%$? zV0Q=|DlZ-}XhA?!AMD}`=8K+Nw-`a4x8FK)Bs0iIVIOEV)Gen;_!3x59Z}lsxq5sr zYcBe_UVd_N1hA^QmnlM7%?4-^X&AUQL#_gvFN25~{JhTL@A0;^l%T*K70Z4+mjbp1 z)v<>Y^ERucv;-gI;UUW^Q&~yAP+*WIr`5Ak8P7mKwd=RP;wEZ0|8Uz23T0clD`iQqU?4fEY$ddlQyWv?`Bg=>NvWKr9Va_0^jSq!;RtZJ5s!0 zBGv2dp2$(*zqsDn#i*|CYH_98@lo4>=4$12OXwJEb;&T#LMb7j*qd9eRkU;*}d|BVX%(%(~x^Ecpu`jJ}o%s-x z=#12f!kkQZFe0^qt_d9W8n_Y|dXmc~S9wY$4n{%fjM7ZXU4N|h&8d}}bu{BuNHhNh zcSFE1FW2$f3`S3Qf;ldX0iY3;y4|-*CvXAp(;{KN+d&mmDrF7OItRM25&T={h9iX0 zVay$@l11~^e=>(j5`NO2JR=~-8F2hDT>0j0{&;5EbWpAdwnuG=RmPuUMhy?Pmv=~n z3@%X(2wx=yCkp-lO!Z-wV}7Lj&R3?9NC)o|7-9Ri3|&g1T;j~Nh1*CV%UD?m$m9=? zQHXI;*dv%}`ZLs0V2aTmhe>V30ZXwcCab7SNNUsKIK7~WLV5sD?PKAen4Or1{j(gZ z3eD}YRqp2Iak%@{rcu*#+uPV|B7_(v&FXfD2z4vSFi<0_UrhTC5)i$i61OO#rCLl< zmg>Ry`EoOLYfcn!asWh2{@z63h4K{>DJ-Cje_lWSU3wSG z3A#2$T-(-1+KMYfab1ACVq4Ji^v;b^)x&n` zf@tIS6Nel;YAUdlCPxDo$TR-q7@#5yS3hGKNMw}0TXRYCK;0Gb)H2CXvbSIfYem&HyWUS*sBeJ3Uo0+^A6G+_@wT`m(l zSwF4jTQQBTLc@`EpmSOB^vZV)Ve&xki2&|k}?t0`KX-9XQ z&$Fj+0l@o`aDo@>-X@>-X1>oiOeK8W6&XeXu7d0E!&RQ_{V`k_3zK#2A@a;J3wS&0 z#hVymyvxhMrbG<>wY3PN`c2H7L>g2O{Mp-IwVqqq<}Gjb})+E-alHuP~#>>K}i zVVntsW;FJE(iRX|?lr;|B2gF+oBjsRykkkfQaw;)xOj_WiWYagBJ`8Y(DbyXl>w*2E)S}GgJH))Qd;L0dz_<{mKSBagfMwxeFkYCO4{3U zdVbwdR4${t3?USPo>%kEd5H=s!2g@^%$pphk1;dZCXziOup}DanKlJhaLDdrZo#!CtUMqvqqP*2+Ux! zQ3p6j;MWOPilQ>UL^RT0Mxg>Zr8TtW8~w~^oqf;8#9F0*)7M0La8`?!b)o4f%8YN5 zwd*oJy^jDYk))-IbXZ2<19M}?%Ak8woUZg8IyOo%+~{=5*%0}}TUqr|_U@PYMo}6h zwK4{vKCmrMBEksmHo&sh#68X>k(2+auX6FPEKqcS@zyhM`Qt~N%!s6?tXR%Ii zc8prH?v~FZLt|+T?g1>NPg!%oToL9%H;Pn{9o3SmK@lkj$C4sFkjpF;`^p$-X z@&+;`Q}y;2%4=%}qT6Vjr6v(6#9N7;tws1z{}g>6@%#49_jX6gZw;{KiG%s5Mmht= zw&M29*YUvZp1AbxJXCi?J4_#oa!pBa^As&aB(Sa^KNiA8T(aMq@stzUwY=0iJwDV6 zHU{TjIA(L*f9}2lf-^k;%SDAB#C3L*ehb=;sETJgo-4s`>%hsB9Yi_obZIqM@*W6m z-Sm!&=~js^hW7o8_{lzv!zeUs4Lm^v)PlXmk9X<`IE&qTJVJkeZH%L!VR~8`RCP5t z*EUOESi?>c`N25g#lWADp0SJF_cizG@+ErcwxXD8e6r4$cWp!ehkiq^JXJ9F3lVNH z%KrFbO4Pw)Z&e*DJ_7LMT5Xg1G>G7~x;S}3>kz^X^hj?r&+|YJ@rsr^-j_3fc;ba1 zt#a7{z)j)(&bxoOW+8sS6Xx;Bu-Qh#c1bctKgoS}`j(BtuL%ZCju`S#$*DxE{c`8@ zzCEP&pL(JI=ZV4nK&EFUj`hyWpk@yiKRJtO7KLg=le)ORChJz&G3!k{z|oFOnr)S9 zS~;W-hCy9B)}54|(V`yXkcw7Wf8rR&HK`-%nSb(*OM7+h+lEO^Il_Jajm5O)jKHU0 zJGx>FpeyEZwz{SndmU%5GLoI_uQPk$ESh15PVo}k^O$Eb*4@Y@!d-DkXt_6ZjX zd(E60pH|OUF4Z$@Jd;_#Hyol%gniUAa1*JkN~|9%U@cv1@mUHj`WrCp7Bv}7n9lF#`p7O< zqFtU?zZyl-w6GDrAEl3edou7%&*ceaU0X81q4lFPm-mCG_&W0$nMOO`j>MVz$^OpK zC}Tn*MbmVk@ix=st6tjlaT7+AyneuM5+OaJisVIgKH^Q3-Gg7LJg9W4ll(q$Ob(97 zs~gr^e3v_?Wl9PB+kZb}u_pr^WYv zQpcXQQP)N6gN(W{{7cRzL@4#pv2u-%cXp~Dub<0+hiSke=iKcMHnV5`<{NhzokaA< z+zIE%cN6tTqKU7?e|%w=EREg^$W%cPc;)RI>NbDbFq|9I<%N;cBCr}2nx{uY_pBRY z-ozG`A@T8fYhH(!UwV=km|gDOC-T_Lu%rZhEz-`n>0T4L)}Lamd(u)=>a3 zN$t1!&!CMEK|*xV=st_O1qNtSq3Wb!QT^!C)L5`uaff%NH41IIWR21(Ta@B%PTH#cZA*K}(- z7^<`5&B{2?*qF^I2R9v^IAbr_yKu-rv&lLwI=*poaEyA6eZz4`($}w+xhtdf-3~3!y$yb?nI;(9Zv1OWuh!XuwR9UtEri@FCk z_XUmhNJf1uiIfcnWJpz)v%2XL=!YMGqXj$cwO_q$!9%SioT|2KA-R3i-+};A) zTb?W$sP=Af@n$`hXvF)q!?}i9!9MNsc$#*&-hnX^PuR#B7fOyD-u+Hyd*ZBy6On03 znB4x?f8?8~o^dtThu!{#CH7+nmP-65mI&P_%aH%?RAPP|FhYlcA|GEC$c0rgw!TYn zwC-7%*Is3qY7qa_=Doq95t;#!Qg}Fa*vuGVEgGxMFB2oOIyONG~)xn4HG@n@e;e@nT+wLY)_TDws zwA}6F#QvL1d^3k4@>U{pa#Ieucm~6l&FUeF)ET7#FQ;JSEeEu)7czUK? z&caaMDs>v$HBFc9vHn%ALz0RX`r7Xi zJ8yzBWQD805Es|R8xRNgbfmWtf8#a@QOUaPhDDgKu=ITpoGs7RNb9JrJs=ZQc?$6l zD*OtaDNfYVi2D7u22|-wIgn1nX(-xOC#I*{%;nYtt8JVM4m4s~HQOiU2i5Fz^1FOz z@0*`r@Jfa9hGQ5p#QNhSs5%{XNYbB-&)ZUO?CD2pJpQEwjA5cgKzsk`+i)nxLBC_f zvYtV;mR8bKp~`9(x2u8<12@k8`z1m61(d|MRh*8>iRKpuIlD;9~p|6-B8QT4@YRVNtGJkSIoAcxejOM zS&sN5voXDEDgv=%q%woU zz(zt8?9?ANS!0(fE0=a35LEOB6q5i{`g0<&(4Nf+^B^QI2h*6Nige;4>5L~b31sybbZhJX;(I$&u>MB z3G{Uv(N1OBK>1)$PQ@&Bgu->%%MMSA8OwOgt&^Jauc;jFV7h?SP{-$3OZrjQS$(dP zuvE^4=Y8r}5gMkSqV}Yrb%yarsPq+;loV9dLTCKzRrr&JiC7U%2)v;x?*tb`%@EAG zgkNt7CZm3C?=6EW<7P!ZT0}5rFB;G2?>M18%BT3$*y(J3%Ng0T5T8Uak%&rGy0hSU zZHp<^+wd-A`e=9D{?!dLfamh}B9_hSp>=n|ay=H(S>l{(efq=EXr(G6MVjESY$8JI zttPL=U(8YAe`1c|R1bb!@54pMGo+E~90BjM#q^YSu`G>fYjK^H>zg#uNO(y<4}RjWzx|>3BZS zi!=rmDNsS?%`q~-2k6f^8_SQ0A+K`hY~?wYUM5RHCXArWOx^pjFO_Q7dG&!hj+faypCH{`P#bbz zlV?FLA`RdUdlf1_+Je2^!MW7)7H6m7wM#bN!lm;9$7+Y=Yse`1T)or=Ky=M~@-J-g zg%M^=mbKlsz^VDIDGcneibo}b7@5e_%sTB#sr%3m%}F`hk44*_b%q>5dOn4{0mh!w zrg!b90guHnB~}$m=XUYiUKKyqS{hZGaU%dVX^%cM({tYz0R=Ljmg|X6tZl%S;=g8t zmzAX_T&vT&jA9>Pt1^GVbd-Maq1WNv}OnUMJgRy0Xk~2`bw?? z6=6cu;QFCAPgAsGpWcmDxZGDcfpTvBX90y6oLlkkoq3S9VllR$SJhbl-QC=jF;I^%L`WfSXyJ&X}A0j}zC>QUZYB zkBcD{SD&Un$9sy2tVc@NWZ-IA$6XaDPAje`7cFyoEW!QOQt?m9hs7weifrjvlwCeE zl$3AYCEdq1(LC>mFYW=*xVg>^KCPAndcoIZ_b#AIVLJxBuhN?El^!v z(IIN#&a~6@yJXQS)c0`v>uIw^pga7^=f`@*VPoHya9Jjt3X4B*N|Vju%HiCIpv6i+ zyidCKBZ2!!X}xt~@az+h^Zd1W&SM71g_h-IZ*Oln%E2o&bV;C2ebf04Ei^cZ5l4eT z(M8!`LBrGC@0hXPY!jl0CI6a!Bgy=v8M9*l%nwtFuAI#2(*1#$xN^JTY?%roigap? z#P#in9e6j*K@m1XfJq!#bJt!ddcmF=M8~xNQR6SDlJbt94P(2prv z(4L^YB&e(+bCUAl(1Ie}4UTTmMSu3TLX4lP#eBY^7YbSS#&wLs_cCEIw(#4%UOa8= z5x9G6{Z-s4v$tXFEs_Am^TtGNNL$1_cdHGk$=C@eCZ3F&?ZXYqRtU;abNAN_E#)WR zkGT>a^W>cu7a@rh0~vj{BojWAKMpz`Ab!DPjH=J8{~EW;6uW{IuT5&8w3I>HveY zcpG0nDq1lB7+FGR7)UXix`Uwxxfh;>mi}_eACbC#I`TgbkjT(uvwnuSaar;D~{$}W{3|w^%moadR zSU_cctwFTEFZY-so=-&>j(H*+H{NTner?3-(#aJqt}!jllC{+#{F7cC>vENOM54(m z9Nsrn(Q5>TUMXMZ2CT3cy-=vnc+pouUJs?SKV+GO*W+(AcjV4)I@#`{m-Pf;zR(B` zqjLd4u-&yrx8FNIvID_~oc3fxxJ9v<+G-7NWdtWj4D?OB%<<$?dLq5Aw|L0h=0J9 zyWc}#Vv1KH5x+U@zt!$$1XSIS^F*Z28-p$Kvs7O6lNSEh03jiQbIHz*NoW(_bm-k| zYjs4`d5b`ODW&Y(??$TLf0-Zrd~^6A=Kj9ceFdt?^7_c=_SyF-<)S~sUTq|?c5CMA zs1sbAA5EwlLV5e)OOWX6wF1mw9h7ov0duM4e%RcwX)2!$XW#^k{Aiy^r*oCwbRb14 z9K~FWbRIRx@k8_RK#>YOy;rU5`E#YMQA`l{5oK?Is?vb7& zuM;l*fx8U$7&jDY4XAi^PZK)N-1h6S8CAXWR(LMq&1~L=8Lb5;j5(WN1eHVTzQ#8p znniurdNLnZ{*dGaTn}hBavSm1C_UPP3g$!0^=_NgXs}wekgEfgn6pH0qf3dGDyL5Sxw9=Yj`=sTS-$WW*+kN^6_^Oz!b8TxO5SL{%&2mL_@dkW zt`Yz2vF+IWsRH-9H8fbg+bQaNi`wo&h3o6xU~{DNhn>C)KJ)|g=j_>&nY;AO1E4R} ziL1KMOxv-Xbxq|8UfG7+Mkm!CbJRr4Mv??BPKi6;K^!WvovLFpRGl|R4sy6QG3J8c zLt_+A&DUN9Xt5p0bf2LDJi_yP@pR43N%?^!FgNrLYutDxy9Q)DE#bOt%S6gT)>+sOqEpr66zHm9;LlzfGV@kXs_|LwN$V4g1q<@47Y0J{a~am^0p z;*^doQbAps_yTr_&H+@%!&RjuTU|`1TNIQmTDVbqBszv~m1hQVxm!Nl9Th=%W^^h- zavzzrlj;!8VOLYGb63iKU|2ACd_&RW>7_&8es#C1 z%bq#iZ5h4(6ZcUjP)7_)W!#oQ6jUjU|MTL4&B;{xg*(z8O$#}mDKVnH%gf7aY`%d< zlXQZ(d{0xYv=gIcyz%wtizbgI9G)nRl^R!4$UMh9)}O(!j5hN5%E}Q}mBIWtPUz*M z@HDL%ijx&7UWJEC(@lX-2ZR4DNc1>R0#NyC2$rAduhHF4LJzh@rXPFQ@-Qa4-TEM7 zfhnrIRSos_#1TmZ0#96Z?1~?jE~~}5Tvl6YF`X|l`oMYcsO8F{S`1%D*wDVh!E7c0-p+r!f#|5Y&%58V-bg=!n&oICbdMVgBSkk7p0?%FCjwFJROo4vKX=Ba z!_#xfbyB*0t1I}7TwqL%q`?RQgKlZd`2j}NZ#vo3$t~L0D&&e=vJVf)$N#y#^~TP> z8qD<5;ADGLeJ=~7#5zzEz?Ig2dHLqY&e*`Tf@#;mTJdW%$mRC1k8o%?N+92Xrw1a^VS>fH0q}$zcN~FkJqjH6 zpvi_0AMzSKu~W0&}mA53MD&D`dmCb~`j#Z|O05T!RU ze#Y?xYhDXU!1EP7(o~#QUJeX_QGQBcjRE*7xtqRuwcfRp11U};8kYF;#eTvmh>I1L zF}WZ33AW7q(Tl@KO%=kmI|!ixkx#s(8(BcsgJ@Mlm*=bG%$740We?A7%2dytkTr&J zG!%;4=HGl!Zv{peirSCPUoutI)IWEN=3Q9XDn7zoiUxO?;t^fI|L8z?he7-F76INf;++8-QC^Y z-QC^YgWK<>r+cPnroTPwUF-epE?B@l_fY%ns@nB=9*-z*>BgOkc$_{aMHq(Fi5;D$(0 z7T$JwC5&IZr(<_ewv9`)?z^6M#9E=a6wd4Rzl&sjVu&+6?#86~8-okPQ2n}!qH__Y zTJHVieq@Th>95+}5Ym4ty)+?$aw5_UvRnpkKww#kG_3uXB~%qk)rpvsVGK6+1VwG} zD3BntKXp?o=%R*4Mic;S95%w@e};fT>qKjVc1#;`>8h$-{yYGIlOz8iF>&~ya8SS} zuE7$^i~kP*^q&wvW#Z`$2I&Far);^u!BOW zf*wNs`Vsp7;jOkHyuGcESL?oddoFR5A)MV85$V3{7CCi8D|pAnp>nuCR@K$jr4i74 zSq}s15R0pur|9?$6p%_;I$GU=_2d1!3;J^z5y+rO&}n;*#8??x1iiI4d4|@>XGIh$ z%HuNQaqn*}aL$|v_Tm;MsrGuSDlr*xq(8wR{l`11>(zkY50IWs4JESsV&r~d>e6W+ z&)mSjjOA}wU^tZB8_50ea7remP@7j?Ds!tU&HWG$3r*||oQgbuF_QJj$KjkSf0PN5j2g?c&$|m- zzMp_EF4`n2$F=knPA-rXmx}Haxb}QF@g)s=iulElnVLbgjA@eB@Krt4kZt9w6vN~V z$GhsJFC@aqKfp~qx#i4%1~*4dBpVM+Eg|7?$Pv~*mpvdGiL}Eh6bpucj-NC{ zp&@zgxk9XH)(MB&?gICh4`VRZ6;poKD0>rh9{qHy_;0^GDQMO&oG!le93g<7-bm6V z_W6Ok9B0x=8W`m2DBj-O>`7?ibGgR-zU1SVa7q~7%_WPPlLc?92$*=-{js)X?6!{GYFTJogG(c_H}peD#>9c3+K6q9!dw3$y855fg>3s z5^4|IBEJXu_NKapI&6b)^Po|Teg`4Nq{R`#Ig2EEUUjrxFQj@Jo+>Bm5JDeK%H4ds zu{8f-J8zwRQ*-$5;Ak9Za6uGyxiKkRR)H@}xB0hn7^ri~HP&9~l`^#<1rnNGUX1=H zr$G6+{2Z%DIxT9afrE&+wYhG+TFnl-^f~4WnA8F`j28hCnH@^2UtX*0dmIebWw5^l zZH5yNh&g!W55kiOS-Kouj=z#WJ&(HMRF*Fhx1Lh28(PG%dLjn-_c1F+Ov4R2=a@eG zN{H%DO`TL$v6!9w?g?h3CX(erp&|GW)4ONyt+7M|2>`_@+yT=+P(^+$S2<+JfzhNi zB%Ta}S$iS~gxH)hpDblfpfbD?3*e6M7Cf8yQWR>Z`DY72>Gaf=TpB48I8rFtKg|EbQH^(Qc+Cs--fEYC!-Vo zlSdr=8;>YR_s=|HVP1sty7~QiEZc^Qh7p_70kycSBp0P+q_W%Ef!zeqoN;i1M|Qoz z`MA{+96s2eZr!nU4;~Lz|30e6h;bUiS_Y!n-6j9+opeG0Qx8}-Blyu_AYsZQ06PdXFLL+zqA*=K?rPEIcW-JDP0 z<9*e;hCPX$ln-k!Ld)%LjU(Ez0*N~I)v%cK{ONA{o0WG2Hd2GP;ftFDFo)wRPxiR% z1pd8&rLq%II4(QIIzVN@$TW|Ly(~I&a|lW zHA^!P0r4w!wK}5k_#vpi9KLUfGwC@E`2u%dLZN}L*~oU=M;tNnNwio;y{?x}=?<+% zs~fm8OABRjq5lfy<4JpP(0IP$rm?B(GkA@vq5uw(4TR_mLyci`2n7_tMT4ut9v?Cy z^=-dKGv(tZaD__p$}MOU)3U>-0|bc;o`$7l5}SuiXfrs}Gmr02ZOTmrorEC3H!h{2PAxK#oyII{fu`aD^ByqWgR7yMwzYi6Vbx|FWwrvU8uda0lt_d@A`+p8Wye3#o>9qw<~Y*ulK0_q!& z#j>OBhz39vMF$G7_c#^##+e#ig=&k$ZxIvT!V@f24Oo1}Wrl`^VnQD65)W=+COao_ z&cE0H^vh*fn3>t>#Pd9FC4V6Nw{+s4;^=QWnV-RZklEEJlFjSABvn$&C_d3t$pcbsIz#>n5~O^==<7A_+6h|NuD{#HJEu?X#aCJU(n!HV} z=qZW%{!#V7@9ZEfWJL=nOHgkn=>oXSyt2Wh4i(Uzpvf))@-QOk*`FO8XEur$&Xb?Y z+W5dN3T@1O^VCUYv&#TDHYCGAUec5s_VLZHCpAo;b3X)LJ=EI2n2SNu)L#{v^s-X%GrQ3h*~%Q_|Xb zo-kvhc>koF^uM%3*k_=?=XIz17YWJD0Ew2c;v3+lrhjL~vV9_G(`*q}Nq7k17P#%H z9Qa47YceaAL$*>V5Ej|ammwac*B8L4eGO;Wn`BmN8kJ76!P(-S$!AT_rR9x)25t5- z(LjSX$On^Ey*~zRG|?x6_OoBe#A6cwX3)m*$DoZd*OXCz)P4j2G-%s!!D^PcmQqpL zf1y&5an%LsPbc8W#$&zl#o@=V(@ z71|>fREBF)^1QDNJtO8$T8%z)b$m$->+gF7`y)|x)WWneP(jJR{4i4za?AA1dwd2dKn)o1OyFZP=`t#QSs=JeB|$VVh6{YM+YX2Mi?@Ull6LCw1CvbFME z==|1|*cOI>ElNgYbl%U&E}jYB^{io=l47}(CiosSlX(mZ@`%ZZ76xdadUf8@qu_(3h3v2|xTpD5ZFS|?W%#jTw*x7^=FClhO@$n5Qe|KL^qihtD6p5}X)ajio>PqbVCr93p$rkPpT zBzdlALA}w*x!Mx&YXz2GAq^r7tjC6;DY16znxYA-3iQ)(kP)ouJjr&-f$h;GXyhiE z#bmN_Rp#J&2MZ_Iv_)+%El=OWk`~v>HM>Qpw8qDK>A_pMEkWRd9CxA%o&nAfgplWs zwsbyKJYI(3@46TVnHeR$i^<9+mg=sH%p511eC%Zg^VIxpKmH>?F&z;f@eiRSfw(v%+l|r!*doJ== zooi+O<+@>Duz_0)!wF?$U%$FYDJg8MpgC#?~ zF)T|${T2K%XG&_3fP92_*Rauc0>T|#4*^CJbiqVZ90ZM0r{4rz!It`(ImCnwV^F`b z%KJmMA?W3#X0Lg7HX%lv4jKT8iwNhnI$6qE|D!NaiPvsgjnTXhim*E>A*G}uKU<6CeEny{+%VDO zBJ_?vE{vQiCk-tp>+S7?UqD zd+N4A7{d=+q|klH&2~{Fb}Rz0gi3Xi)y4<+fXmk#6+>C&{BLkzvxa&&@~pm4;-DPO zhz0?9(^iNG*t}~;cCrJ^K!v>(F`%5IiMBi759r16?nHPmJ$!%OF?zc2(wXUjfx}g> zn`}r$;xvS~x&8+EJFz-fq1hKw~YXZprW z&q>7OX2IIVw6Z!y#uGpEqP4f#dG){PWO(O#{|%Wb5j9h==1=M)f(H(tAa1KsgzaY0 ztAT(H99x<8OfiJCEi2VaJ1QlXvNHZVV&)X*zY#MPJ^A{p6r^_s&&aBuRb? zV1WoDZX{IhUmg!4-UygUhnKR63}-Jc=11Ge6x@H@qg&h$Y3}a1V$uC z{qBEWPc=$h{NtV#Z8{2BjCv$7Cf(ffDz^$O2WMcN46MD$zY)qu#TTwf&hP`pRo(<( zzQwTN2=&L~l?9z%g>ma#LldYXUqh2ovrQQ-LGAIl+GL;kz|>nRbGfeZegWv<%&H$P zl;K|7g7y>RLYEHQ8!O8q`#wP>1D&(RO=scj#wq^poF#GGVaumtnbL&=6?5_X`qqeG zNGv9Tm`$9q9iJXO&BZB;Pi%$~8HHWMHgdl$@g?(R>@Flqt=%e}J3^JNF5nr6GW2{k z2}lcVLYGmIAx1i`IQgk+Rkm6M51?2aB8c)2lD-CcFITy?>F*KSG@@F zcOx{Jt!nwL7bT_p9gv;{6`3E=k(4(D`SrpVZ^v)Er)j8&#NG@{&O+#7@td#L2siJL zm!sdvLn~WvD6s-{@4Ue$u&p~uD2gd&R%T8Lj{yE(-$%h2_?OkHVZNMD<9A>60W~>I z^n#hd^Hjd-&&?^}$J-cr;fYS~ZofRm!8YP>=TkEk75e~_yqJF0uPcIKz zzs{C@>iAsIR0OrCug;ggVJHcMtd49Z2_ZcMi?ijefx=!#eH;jDmJF0Y2F{MTF{bmP zUH=5u1quLe_2B*+ixhKZmVjSJh}UE`-|AJ?gS=qPWp7H}_ygVLT2>}$zRp%z#mT4= zObPnt5M@j;lKx{58=52nB!(@{LtBz2M$5|IP!W5jDZk{f#3fyYa4mRRSc~1I_EAgc z6|;Z}N0T6S+p!|2TB14qRHTqAv7U=<1cDno%rXw9jiW&pn@twoTI~QETgoz~LaF(U zN-;ThY3TnKn1fXeSq$h5^@C#xH<40R{jgb}N(l>rgf+joSzN|@Ct+q%Bhwv`(Hp2A z)fu}_l~6h>ndp%fl$?e$8S+*QrMrQ!$AC5tG^`+w#KQOejsT5;c3MyhQ_XE$bn4}? z{Tbh-7v)87zehHU`{ki?7@7QuB0nGRCc*m*%;Qpl z6M{r2P7~W$`*LFkEX}{3C-y&X-0oUv+5CKR@&d{C=8-cSy-lzLxdJ!f5( z%m3}36AavQL{$Q4>%T$!*3d2Ac*BlPBp!c8F``DDQ(vBND{hkcG~CP17M+K6k!ngI zr0lUG4rPymMnQLY7t$iJ`9=GWJ<^|5f61`C;QOlYR%c2Lv_}$b6bxhd ze)=tjr@U&!)YLK{U!mb!?^ih!3C1;Rd2@|dgO8{s0=<}>_6lT|4}XhFCfs4{g~A`3 z6?l7aXs;{%OTV{K(i?&9NEIRQsI#_UmoScFKexi@>_06K#zpwPOKj;ra85@57ojj~ zSxEsVuDZ=VZ;mMUiPedZ?&kW@PnM#b((sG}sOwnp0Y>P9zZ`c9N9R|En0#f9 zJ8{1(in^Zt5%PM+_!*ZFbK+$6kil~UB8E*fPT)cQk_}$C2TSTrQB&WS**E70Gn_6( z?rzo`b4U_M3AVFS1Pl5uKl~2V1ALxfWJUFVy7Qm&{gv~l)iZMP%WqC6sltliGS-yHO-yi-`WJ>h45TW8jyPPN-uJ}f#j zo&N37B%J%tL;X9WA5li5iNiFR?E*)P;T89I?!S?e8C^m+8*Mh+i#Hl&wFNn8AO&pM zddl(nRuL!jQT!0fB?_dIfdzNV>+B7Lrl*o0At`5rjdol2}56Q zI^;e;R;O|Zo7aiY?eRm&t!E$qB<0w-0BvknmX}bmP%8G?fWM)K(Hv+W-PI`K355|< zH^c!e2!P2nu=F2r*|sC-kkP1;n*cH!_})S}cP*ur@{K%iV@xx1N6BpVysp^AoA#Wo z_5ZGxVMaO<*s2p4mK^p=rKhx)_8ZyThBD9af21R}t8=3nXn`hGLWGR945}0WHNlCS z3d3b8`rt=n=22ZyO|#SJRE;|x-lZ=STp@9h%Ly3;mF8X>a4Gv_I^Ty$7Ux*`S%Pv0 znZv&i{BnLVeNpPcjD{uGrbf!|^Wb{~K=tnIc3t8xNAio;uuKLi)P{j)wl{o-Z4+(Fnn@e>m6iI`WXWagm+M^$dz(U@63gxRGJ zc7x&Ne8)e<#ClFjAc&*c$U3~!?swMfrceKF`Nw`|u(>RDIL!f^DNI0$miM%Pu z!pcfXS(oYCPqdxGv1EqX$gXLZV@se~*f(Q^wml6ktwyv_c-4K=Y-K3%gpbJ$x2{1o zEwf$G_6O}UTFIJyN|luEg2GRv-o(U&zlpKMSS4>rTCroSp>;sPRRvkS!I$#E6%x=1 zyu{9QtHQgRRKyeJ-7d9mKDeC@A~vwbO*f)NAjUYJ!Z#dG?Jl`U50ejZ5ZUESQgAaX zE-y?Qbn0rooPxj1qxkHU-IEkMi9k z?1=0GIGjaI8JC1%*bCdA8Z8%a7RP_^P>?}Ef%^O+Ao$-eUGCp>gYylKK*V?Trmb$; zFiVPNh3*F9OTY=!LEmVy1M1AXkLX2v?)3q5U9aqIann&R1@W;(WYvGXe99P)s{$s0 z$^;9|g}b+aPE*lC!QR|@; z_+};lNyY|$@CT@P2$nf}JCRT+y8Uo2n|q9cJqn(rja1{+c?lFEJ*XSQ&nPM)LNZB3 zQAJpM1kAerIE;u(_$7P?3N8jeR-H4D)5~aCuJzrGx!E*mgKyWmesh7Ay?U^l_$h=* zz1ezAmX+1_L?*;&% zdju;(9M;4@H!B{@a(6fXg09($(*0_@=5(sxcYmr zbp`(FV*U)DHo7iFN)XMR_MncnWAy{9*VjI$KIDrkPc6A+_Q)wiSLcCfQ6)J*j(Sv6 zYIoNFhnF%uwBXufk%WOQMQK@7~U*PP=z?1vRkAcELFa?sB!}-bEQ>?1j5gk4z zJHJ?*ZZ~FcU*fynvb0}do~C2Sl(2-2h&z;ggo@iLv)YwI)jM^dq|7q%F0*S_-6`i4 z-2A7uDl=avQfo@}D=piRc3{4*q@3E60{&x$T%9K|v@E zst8;@nMj)%drJxm3VW2mnlI4s*+yrtqL1~B$*4;P4r2g7MxI9?{Pak^E8aOz zmCineTB~`z`OXu@ds%8u`RYcF@*3pL+1gufYtj@&es;FN$msC?lp2`*CH3vnj|1Z0 zhZTI1ce8VA-{)UTy=37|QqEg9lp{WTUfo-M$4JH-;JYYQv07U_z0{F*x}Rgj=RhH( zXrw3~Rhao3|J=1x(Gr%E2xrs-7X&$gp4i;cBqiX_suLh@$;u*tCu#UzZ#O0(wcYcB zLpl1x8O&roq18actyCeWuX^Vo!uk~j*H8hH@G}xGT*Kv-fJ2|B`{Tw-aYIK&M0E?9 z-@{Km0YZ}M5ffAAB*)_`ueN~7&TEME*ZEWEY_QziocWaW{tygzwzl^J?2s#kaMwo@ z5lyLk?13ksu%gASNoAg_1{%F9IT*MP7IC{(4=Hy(J>IGvgvX|Agk7it@l|tsK~-4e-8AXbk<8m|7l0?iR=Y+#hOyAwT#x9?7;GCR|+C{bh#AU~@P zSe!WzsJg}1OV7eAF(7-=29&gQPgvy=3OoTt0s+zFoR#oU$z+)>0TETTDjGW0bnBzy z%bh(F2S5b}bqnR=u%APf>o4ZY3{fOlYFAt zIJPj*dzH;Ml*;%M$2xrPTqS$l)DaN#Ac8&J=(eUDd)mpN$2>|=qlWliQH>%f?52y< z6F_C{LkF4yhtfrITTq8=T33q~uDaEdzHXk{eL3_4nlzen>9_vp-rq=vL#;QXqF&M; z_M?`RlQRRi1szn5m+O+ie2X&V`lZK@A{JcE-E%1a>?Om-tjWh!n`_|M#UbiM=x1a!;<5(IG|}USwFuUzU|*>QbPO5=d=GqfzPq(r zB@GJ~ue-OO7UUvP5))&w4t4;Y3+TT{bbb-oa3WoaYc-*Iu9_ua#`BkQ)S;*Klql?)!pcU3XX( zdHmq6+Kn#L?cE*n3yjd1c&%V0m1ZzI^%3I(sqFd7)vXDM%q6VFwvcnAAO|F3Pq6?` ze($jKodpLN1M09IoPg9F92}yOPQ=0BTfPX{OwOEt8>EOSS*&}7U>V0k`Y~q;8oW1% zw-h>THBn0>&rU*Rj`REHHxH-H+&H)gzE{LRH3NsQu~h5xLY0m5N&9CwjsXb6pgI9)^l$|6axg2{cc^_ zzGxw)b}yG}Ms{E*MB$d>OtVD+cbjX=_XNyu%I(%HrAt8JI&9|=`N|6ga`|Cx^47!z(W`=w;)D_hX8Jk-gP9ufw_hs$*yUFOlgEm?>P zuh?w(K>?1pL?16z&Vc}^#gT-NPuCd|KVF&fM7nR4%dPmvgr{HTSjuM86MFuTtDleq zw9D=A8W^L606!ukI5^0^dhuq$_r^kFFSK~(NWY_FV~OFb0i^1Iv{yK0eWlQNV`jH0=!M2lQ+s#&0Y*EQ zr1T_#&*IH>rM7f;uvN8o=;$bT(y@i4GZ60tQ5iyoz+{(ebfF!Fs`%nK`*b z7fdQ*&|{^NuVGBB?+Cwc^YUG=rrurM-Q_zplg!Pn>#t58{JhF8nt}?649bn_BH&~h zK4o8%4cC%p=HS?WgolSMNSY$2wOFq0%_UOWoEP&Y5^~f^roq5NmLUWYaCEp6rn`JX z3gpQ|jTigbHpy@4fN-w>HaVYcfcIwbp_hh&f-=X)*ws`w^Vrv9{@&)}z})Rt-(IPx z$C7!OkTDtP@cZGRF!1u_vv=R)1wx0{N}#-^rW8g-XLJn{lbN%jUqn1!A%?bR@be4J zCd>w&95Bc8xQw`5VtJW;xy&6IiFJIKsDsfVt)T6=(pPUWgf>VQ?iCW6n_Bx$*Krdv zXt(LyEuyZZ`n_3A4Hf;X#Cn^PV@G3X!>d6lF0}Lbd}i*}mTsb4$LoqtQDb4Et=+BZ z`pLXfp9KmAwj|2yn}VW>q?gMZ3ndj*ZE0pqj9_NxiTTLnIH8UXUrAYU0bl1M==_5E zd$@?+*j-F0ZFYSrK+)Wmk%g6Ix;t0&2<^h!R)1z8W7#FkXW8Rq{5o4lb`fsB(%4-P z^Fc-1E#B4Dbt;Up_SCz__+Wdu<(QQ%-3#RDs`JCk_4xBwtef}FJM$cPFZ#sf=R0tW zz=C~+zl!byXplv4crNsWJPZX9Davq}h-%-K(S+(&{Zv!a6;HcUF=&>DZmBu{~unJ`CHmDVIg zcShphG5kd7F@&FROBOrRn}Gxj`&rs5f$ozSA`krPSaIVD8&BgnY>QaJeoV8{X!yag z?s;l?(vG8lLmH4i0j%c(Z))t04iVmaB4Do;mR43)%|=&OmF!AY%JPbah7gP{Hc}#! z9bdsfk5U`gL`IX_yuHgpb?4`E{6GJe(M%fzpXkSClh=3Wu6MAH`dA|GXF9g+-_OkH z@rOCyfW)tgF0e^~q!&0E+1RMoLSol>nQ@3qNRc4Q&GowzHwbi-e;1^Dz2}?NdRUW& zgM%x9jzV#AztxEA^N?j6_T_0!63_H^^31$=eelZXB`;nADkwpycN&*-;$!0%hJA*a zBV!&X2%wVzV(BqVd>ow3zH$K z4{E_XlZGNnVt%&jbdq%XIfcGi>{kuA)+JdX3+D4bK2iN#Fr>|m@TR4-%y{m1=A%+M zn5ERPC@RSHp8taG-}B=c2)-122iSD=`1Z^peSuP~QZ`~3GwNF-+6uq3*&J00j_|gs ze_sbfzk!r#x@#^aXC)5Fae-?sd$8cdKKd?+b_aV$L1so>s<&_$C#NTof!l4?kcQU{ z$r8x!ycstG`HJc%QNGYf*eQzrfA(f>IfIWU-^b78-U#2omis!mi zYAc-6wgEN3|EerVU~nA_9hx)0Hev30n8iDLyZAQp(+Wzn)umRwlm`-i${_M10T~75 zIHT1wC++EUp(3}u?C7eYzOjDV&dXVw1-n0c4p6Cid``=2dtPq92(|~;v&VB)sxt$0 z%ONKw_CrUb$q9)?!;5^V^m2=(EG;eSsF2~GwC)+4gUd#w~b8j2>eXN~n?^LV1FYsmB4 zSi4~>Ie4b8y`^uJZRg>YU8G@P2f zW?x&G&e<35uD~Oo;oTgtdLmft+2oaMc|Ur1rxxOJzm(N>rn;)_BMuGb<4Pin-E_~}1ews1 zal)O2O*#h7-0K0nB^Wp}F{mYPCAfu&8L~D~Xm$0+yk0m~O8OGhN->Gs;pCN~Ntj-dzR7 zzN;8J@{Emvc_e8y9wlLJbQGG7HZ-c{EAzw5q?J;h51e;DH?-T5^90LJ@&23RC<^ ze_(HBx*-}u9QMjuIKqB{F?Eg{RkM$UUkDQ=nhin&w;jV_0#?2)Ku33tVp@FUI!sQf z{`{^aYycAeZYYZWs{u7=QdjUEHyP*PWECMFobAq@-D9~JfCGffuZGI9tZMXbci4TN z0M_eU)249S!S2~BuaeVRLBkhA*xKYM$&fIW?RtJe95d=%LLt1w=HC{WKIit96JOxT z7KQ6^@9O9|1_N|BO3S`-rdsZgRX|b_xJv>3bV$ArP}(AW|FsyFZZRAkEP*!Lz3XU{ zUFHnpGRrY%KxCa3&!RD*rnPoj7Xbn;4S9)zC`=(Qj&oy~_ROvl*^hWb6W;RFK9xOF zhG5KHcV8#3EQ)JX+e1b}+Akx{Y#G77JQC!iHv9W`wqvH9@R2L%T9$xGFI$o}EG~e05Dm+8~jy=H>NxNpln{V4h;g`YL z&7qlLBI;~+JLlPr$j`0md2CddVHfs`9_uS!0p$d??bZ}wd~F~5n5tjTj;Bf`wLIY! za69z9oo-{FudKhah4*k=4QG?5Dph;me0!LI3go+;L$?j0#_X{X2=;z+?M>`7w;&Bo zzZ2*dU}ijXS#1zGaNPl!NYu8a(CV`AvC*$ONI;Jt+;5cS)noetY=;bEvy)YoP^9<9 zdYb}yp;bqN>jhzf3?rIpQIx~fpGNZX^7e406gsB)9H*`D8LhMDM?Xs7s6Fz9R@$ye;4Nz4tA1b1xTe;A9q_ zHXOf(OZd;NE{bZs@)|J;oJ?qz2PV6)TVR`0nVjAaY`%+pjBiSom($oUB7BUiDX;Q4 zZN@hExbK`EN+xQ#7rRbo)!8!3KC>)1_=i@9rTtZv)%E&~KD-MW20qu6^vhN0+XkT| zjt7&_C+L`77e)w<-zt&ScwFxik18+F+trk7YAA;%{2Pg=j;@Wi`u3_^TkgP-#rqZ24VWQZ6SclJ+ri$vtR{sD8xx+jn?kqUTEoFd%mcL*)Yn!L53n#mG_-Y z0UQO7K$w1U|2Pzy!=z5I@60u8o1_d=JW`^7b|aBSN)Wu}8PqU@4VG}a3V%eSv>-aY zi0(h$lFp~f)%eV3W(PY) z+j7h%j16vuaB6b;ovVy4? zIn9ozkFU=;6b?C==TP(aoyWzSA#5Kxl|N99Ed*BpBUk7=gL8z=j7cq6dtDvsx{DzL zjyViK%YZO`a?=K6-_QsNXDS*A=|qiqveeRan{xjxNOF2~ zh78Fy)Fi9$RuWBN;SpS#feVk`U@{c6QH%{wC2raaoRl*TuWKKA2JoQ!UR6oydv*2C zxR|)_7-@|H%Bv%NyX3LK6R;yutDW!CXcuSkogFgqZ5ZiT_h*)D>pAy&xQ89Z>@G8a zGxk7ri_5b-;t8W${14B>Z#Fr}s4MMGbm`n?PZ1!=(2^2jRr9{Ne0*f0T7v^>P^@oL z>wjO7WTy}{hqo%007ci=g))iIJ;Xt`Q~W%=SaI>a z>`}Oi`1BwKuxv_6X@x_n2IFK!CG~)Dh$lAwEmI-WIuTM%g(q#|SlkqLpRXXz`Z6`H z!u}J}Xx!`t)z6;{=Xhm0B6PTVt1X$Cc7>k406ujyI(OJCO&U&1^}$9U%<@S%QJSzr z71`Y`lHWl>;gnZE!@=g$zY{bfpo+<7Z;v3c4K@4g+JMtYXo&3=k{+#xApC%T?1=Ux zWk#3xv_;3YNP2c)IcZ9z8!`25fqs(D`s8}K$IBQ{G%7ohkEtCh1UFbb@s$7RB+uADvM6kANlfDJUK+~wr)I1%NpI!ni$^; zms${veMtQHn6RE5V6hVVEe{7wsK@pp~}yIDd*pSPMKWa*eZV=ZCusPqj8+nXqk%x(&n>Lh}Cp zoxMm@t@-EGl>i`LQ`UmrX)vI6QXX8!)|OGL=2&pPU#RmP4VTANsaOR#?TfB_(PB}N z?}q>A@9KHnpGbaVtZVb@qWOb2m6I$D8hLPyxCn7HZ^Y~4U2UZwej4s9uJht{ajIvn z&8*q9;l~T4zo_5rR##TKua<-PjVvtJh91kEgA{n*3eHp{&l9^xcsx6_l&i39L`uT~ zLSWZ5xCwcJuG{^k%4sR>qpYr66tA*q?Y=#U^%iW*=Y+{q=gCOV9~k5NO4=wh+ zJ>q^%{itzU6j3|A1FECUc6wdDP~v<&5s<%c{curW6WA}STOM+IXsewlNLf)gzcf|3 z#DtQ-yxHGsmFw)i05&an?+zZvDzF6$*ZVf(-e20L-ablfyO-cr8={ubo zQ*+HWlWoQ_)iWiUFl{p?I_6jWRWHyvpoOzX3AJM@Zdr;&}ubGSHG#R>)gw^Bg3 z?~>TIi?@`}5tkp-T=r~MkotI@COuVsR$7ROup)e{l!sJ@SwHMZ7r(J1to<}(7|Xqi zlrwYrq?)+oY^bS9Kc4cya%M%h)fXcLSi(<`qy;w$98?xDmp6tZi~Lm1!T@S8hLEOjagjbXAS={o90doJFbV=n7xTh@*FzY8Ri| zPma&?E8VX-trKp+4{ZTnpWNJgh&o1rsbWE|C_DhJO7qzH$ad_N~zz!*DJGjD%8i<-E~j9e`_& zM?P?s3uVAA?)10H$<7^EB)gMZTul(6ZsR|_u^3z|^p0`c^BFHlC#D~p-n${^nMTE9 z!X4BH`d5|E_ee}_w;*gsA#RFYz-3_Vj?Ew}<@| zz09DC_W>aY4v}~LE_gblOQJTnlYip5kAc2OQ5!e#NeKlX59=ktP zom^(Q_h#svVlNfaB|^EE(KT@hf8e+0_xC|`h&aJAXpXgc3p`~vOn`yJdj1Z@ zi@quY+>v-xDvr6mC=5}Stt-fhH=2(n^VZNcIr=&_{DuB6FiZ8RY*fnNs2E=E*Iv+1 zWIaoSFsH{3vPGpnw^xHGX`6aG(Af>Fmh746G=@IWe7#Z|ZLiN3bw;O~mVW-rwi{{i zf|q8iH}|aA0uM7(bQJLt;HB*|3bREKc8o6iOFHVf{%@{CC<2FN8nTC(t=$n_ARx}` z^*VCWd{4h;=T;4tCmlVm@mja37O8F^UOq57Uu91i!ZZ@w?=0951q%pKw}d)f{X#G> zP%g@KY7^8~SC8XWJ_L8AI{2O+4eIioDbV&9l7~m)c8!c)+rlpn5PjwL()4>2Q(J-E zMA>!@-r7^g=>chTO1ZK(zuu8<ppZSOF|aAgh_Ux}RA1x2O%tm~98aAD>I}jweQ3cQb$kZX%fp ztTNS6L<#_}n66!S#WkoB(qb zse$N=CAHzF6l6Jkj1YR;7pj)JVDRq!urq$N`sIyy)5rOkcukPb@ArC(@PPn_pmE=9 zWuU(%7_1)R$mc3L!+xb}7^)0+p zVR%H@9tnj5E6wBpCbhD_krdrN=>KtdmO*u{*|tuQ;0_6{!QB^b!QI{6-F2aXV8Pwp z-GUR`-GaNj+g;h+r*EIrH@nX7`;)3wMe(uT_nULfXN<{IyVJ_iHa13yA*n1~Uv6Hc z$B*jQIY}kH+@qeRRAm4M5y^N5pQ1i~ha6m1(~^RS%e`K=gRqw&`WF-F_M3?;{EXCn zOq7TQ4_n{WC6KQW0K1Pp_Eux@04uz{&N1@^s+r!aPTOa3=ddo@e3w84FxGR-dvgn_ zJeDt}wq{I7oo}=R90by5R#t>or2G*PrM>r>G?8R?^-Wf;lYZfi`pBLd5SL^Z%d(i66crr9KHGME7>#f1V>kwXTT3S&o-(NSQBq33w_YJ4CtV1GX>BAT3;{kg~ zyujfwlc2AYUL3A{RtQwdvbwL7J87BLICd45Q_#(Z$eaib8^?~bD0h8jFbA^;ud(s* z!ny{m{mo@Yf~QdrEV6cVdj2$k&q={pcj;WN6S6)oNNpW^;H<6ry}b?kDYS*uAa;yG zBSk|~DV91>rWkz+kOqP!iI9z@X+PfNF8ZYJ$O=Z1sadzR#gGTwF<9NL@*DzAkUGFs zb?t*%US`fR?Il*%vpaxQ2zm~JIDJT0SB-D3%6$DOL|t^%RLTwQ^|QAFBitjWindYX z$1Oag9HsbP_k1V3kJ=uW$%Qj7J;A@efZp-f+)@>(2v+RmCLQER_?!bAaXPhLuO0Ks zr5NP0GAmoAJS!P*?EYL@zuvcTa-l~W`FmnKp)a=Mg* z0G3KjeO%S5T#=K#7-wMVv`RlLlh=rvlUf|CXW~cZ{L6_O4}en!B2iO#?hZi2A&Mj@3 zDXaPq-&K}zi$5Xa<-|)SPIat4@GN=ibqs;mxXfD2{=A}pR%aL1Y-|%QZ9>{!Y^9+`WrSH85T%N#Cpl z8kj`|*1J0nuNAUB>{xabp$ofB{F;&?FDuD2hvaF!XfCZFrg$fk%-}DC`w#k(+RjVJ zJV4G#?gI_2PR|d4f~qh z&)8G6{^Uo<;_JV7$FgUD(>64uJ${TcX(<3+vjkT6)>R0Tw=P~!ImAgrXm=xHvRhC{ z8Jg$U^7A6~!L&+#IGtloj=zrE1F40jtukynK;_r3iwNJG04WK=ZKCeNu3J3rqS6Tj z?)j=JI}5YA*2=wf=LQ9A5NIi%@{55BsrG4Rc`J3aW$|6o#?KXxB+$Imxkrv}M@#ZR z3bVUPR_|ZLrut?&IOFNyU7p3yQG(i+94yd9e<1Ojk`C^dRkdcIcq3?b$oA zqt7(4dc`hU_k;M>^n$wfr^(JCKRV6<>+&HTk%P2rTnJ&-SmgZUXIZrxpdbfKOYRN) zEvmy&U=>wP_VDZcM5M9(C@)9h?q-9xdd2a<{dJmvQkO*wwu$iif)bkwB;wM^1~9qJRSI<=YgDeRK{ zCO9ZEExVEb6rCBxkDGdl|8&+xisNkbu2j>`HykE3FDBfe{zn!Q=Pxyp`c~VtRAY#W zIq)HdXaeX_sLkBStGPHfl?l>s>;T)bQSBnzXSE1LGi_hPi;_rA93bfWun55!a%Sps zY}#njM#Qe&L1wG`I`}acn6{+poz$0vIUD?t02=Lv?5DQHL(TCK&N?KqSR7cAZ*!+a#M>iAisF05UA1XROMa52%uZ3q zst!qi6@k;=Z$L8_2KQS36TebD~^Dyf0360o+iWD zG$WdTj^Qs@4%b$%Bc|pLsALJ}`pexAYr~aoC1Vl3CWoIs&lAS_^$LHR5uLX*+L>1z z=aHt^i_#Q==vF@)@k8S!G^}A_D!{0sh8F^bDLeB71v80w}j_$If-jr}1&j}{vb2e%IW$D*^XeH)$; zP_@*OJudvbqNt@7sF-?iCg3&BGkoNKMkw1(K8f5H_GtK=zA-X#`>o{^w8^~v7up|6 zA>e~hC%N;6!o78&x`7f{`R;^<9cm&|=z0>hp+*pv4dEAg%u*_TK<6T3Y*0wxWbm`I zQXgz|n7hjQzP%e0ulAl3!3CUs;G2Jmk=snp1c$aE|3Vod*-Z4+7cP&z(zD@2o1EYb z6HXP~3Wq8;?1JINCYptyNn~of*2#=o-h#C&eMn6JB-96LFvP&5?{=wFwhg!;A_kD4 zo~Rw{`B^=5nyW|S9(graZtQ)aorRq2ft`cmi^do~ys(3s5VE4T5IDqaV(x>zZ}DVk z9fdQ|84zV@@N;GHLoQeWtMm+4@iZ@b7R!QL#fzQ{T zvTDxJAVY=5sGrNcFsqrYaY$Q%dzqs%_KvF@xD&+YGlfUWRCrKBiS(_HWEXq>S$G`rTb+ z?^uk#;U|K+zdkIYo+*wztacm?4q36rZ*q#KkG(pem(Xu~(t=MQe`?5GEZWng9l=o2 zm{k-&?do;UV`FWZ>+*_;aKGsdyZ6jXNOAO#j?J_UoHXW14@v%wA|2SLXtQgt;)sF< zydr!ROLjLr2PLh){!{}4nc?0iLLe3-rzdrFJdP_4j-mL#cATEo?G7X(Ve+Gv?bzu6 zae1vZ4}R#_qmK6j@<$(p-a*KCyQ1Nh)Nj)ycVy%lGee{2?>a6tJLPkByLLj&LP2NW z?M{OXA4X?bExT<*p;LmJKf4>0jxxaV)eG4maSiPCyn#lxPR z!SzHIGBz$&fryH3+9|Srp=HKYlqbX#AOtlIe_)dMcJNE*_*$lfN?9I0Vwv=qEp7fs zZ0tWCtsWvYG>{qUJg?+W0`33wpj`p|kO5b~k~+#i`iK78Q}ge{MLy3IT9LI;Cdha! zmOZ`Zb6^IMmjkqe{m%UVMqW0_bX_<7CNF=}mR`SU%f`XP|1E9l-|bJq$P>Idol{g@ zlde(1H6ur|)eVEl|1CY|ViE=$A3Gn0cPueTI4Ih8eMAUI!ZD z9%`0lf`_HlhuEOo>~bbAzdZ^`xHn{a|C|CeN+`OuwDIYvr*~JsCCO@p|?uGIM5gX1QCqtu4JRT-l=~_=uG=N5k;hPewW;r)yQ;!uD8Y zuZ24!NHhrb)`~xiAcA4;1VNjH$*iNmWH{&xguI?zmd)xZU}D(Q=cf;CZY(h+6I+~@ zoA1pP?}^!={b7=;YQ=pTH*00at&c^8jp|vm_MYiPP)7$JkJlPK^yfZT-}9Gg-rCJg zgO>B>h`97*@g5Qqr?P86+xeyB%-R|sy&)wHMc&!U1>TPaUynC$C3W|y)74~;jo~*R zZSUDxbW~ia-*$nU>vJxH`iM_fdBQ=$ajEgb`q|ktqn-wPo!wigfex%^nODSDlq!x$E;99RqL5D=gr|n?G&yEUK%UIbd%7 zyYG=+l%FMAAYRAfH$@8IAWk&1oy#4l-a)-e$Y)O8G@ZxxY1Z+s3tc+=aeV4|M9j(# zG(aUWv8n(ijNH`Sj;fi+AT0vVj*cW>-`_gk<&B1S8wva1EvgH2WdoJuVgz_jz?B*b zQ~2r7%ZBoXdJtHq^v;g&@{m-G!2`f3xj6&9yvL&V-D}m5DsmY1&#;^zV~3wOdd;Ow zM5Zxg14#l5(Xp}0(kk3!BhM;oz5|klDJ?1^qogN(-hb|8&W=q$lT{YQIr$Aa#xQN6 ze?PR+Bfp)OSB0Y{FKh6-Q3!AE{(?6e2d5s+0 zZ6nId+tM3a+oV|=J$fa=!or{Bc2CP6+?K`)B0f$1&ciEdt91;)13F&xNylLl$Xes! zZS%27TvS&({Wpv;+ho|9^9>AE&wnDmt<4YwAN?1JNX5p+D}x31u}IcHkeKK8=b6<@ zi-l>y(`r)XqI($~Ww!q>?!q4Pvtx9yeo7_>#Xnwlzys)}YYJoa!BjdjA$~HcDw3ti zU^P;(sjl%gRZ0XVI7NydfOwCjeJng|M;hL}qn68$Af8(Fc-%3CP8R4G((q1R>(9}llo^{h1k43uWp7s z+`zm_%V>?G7PVOeB44j$JIy*aRTM$F`k=fZ<7we?9^Et24ksd&S>gywC{UtB6Mlbo74db-hKLRR#gbbR$c|SWg z600SK5YG~BrL&ka{(FiOWRd&I^(sAy(a5a{d7>E(B#5ypVA^{iI1cFN!sTRm;a)BH zB;a*7d7O}8SJplZG~zmgdIBjHRt-`pYvSwJb&I1mlI*#?Qlhj>n!?636LG8?LZJ@1 zD2AHu-aCX#F8`b0p+offfMnXj{)Lsk!x}7yxIj_I1r`(Q77caMo-=Y63sp%1VTSi; z$Si;H3s1dVD(+3+VbUPWMkI54&QywSZ?4qw)DOV+?jfiVeqjv}9TT=YY9$j;DF@1**D#M>qx*O9S=N1n={yVNGi~>r{aULvDM**y+RVKR^iL1RGFSVs=Xr@Fo zZfpJVti2cL1cylTsA7VRiATHM?&3_`j;Q4o`9OP^C?Dd53%!M&iaY2oL>xC|WMHLG zzpS!W?BMv>!K`56Cq)UZrNYCPaBEQCJ}jO081^yhEA|_q?~rDT-z<2#1SAkyZh#it zRAp<>C4OA!Zp+4tBC+=KC>@metURLqZuIzPfVs1HRcyUfp;*dI{3X9?$GB?k(s=Lgui5Q19_YaBbgxH0Ln zLiZ4Cs}53#$#Z<9ijpcWkG6&8FhLfi{u)YJBx|8It3NWzoalofRFy7BJGIQ%37a4z zF)X?6jh26F0KT=jI$=$1>cS)U-ZB!|K#nJfv5OH6M(?Qe7$z2(vCO6m`_8!qk@n-y z>`VStLV^e~+cam8TvFT86vFCH>IhxG8p&%1Ku<)?zg>_cCcD5C4bqSmqSCgf(nrr>Jhp~Yp=9{f7 z6(d&7MMV@X!OE)lt8O6pPRVhwti?Y*1CSkg558GdmgBENt1I)3!*nv!T6QFJWX~&o zwC8ysFYoW_Qm)@DPS=J&m{C{dEtE$AZu==V7z~&xUv~fkrXZ)i0_2eH_pWVmOs+Vn z;@ajYe?f4HNEp08jesuw%w7(msibp27l&1NLJZ!xyNG|%LNY8GDC7L_SJc(am%bn- z6ebZY;lbx4HwAaEbuUDCMychW_Y?ilr6yz#s<%WNL}ez>@`qTa-dl_J`=5gnjjB;8 zlQ;qFFfps9^b$lAU-{d!Xw?X*ZK8$pYnu~MKmDWtgL+_())nS@*jkX!_`xDw%zh|a zM};8_i>C8Ed~XAf+&cS2YJ6UQ0{ST^jjOZJ$+o_nC-4I`#h_B2kfi_D*uL z#ftM@rukn$Tu~FlP94O?CsdT8TliZ_r$Wlm?K(9*xRT2BP8Wjc@b7T39s^jXzZm%LCw%!YKrY#R@ha z0C$rL)5VvwG#aTrRI%c$^OxYxF5(lT2|}qNmSt9|5HemGBW2KO3r&E>4c9l}V$2W0 zABrnGGWq46&vj4p2wu181l8yRQ~YaN{tJA=6na-`Qt>=fSe!ZwtMOnrx?7qI-y+tQ znomAN$;lL!fKHp!?fZnX;=MMm3{5i=G_(cv%a(304j8s&q)ZwV95_XMN)Dh{i0lM* zZ-#*U5}O7F(C%W$hliJ-l@4P*{c`T%TQgmEA@EQe`fsV2!p007ry?ZCTx$n`@xWDYP$F{{>EYfH0W}^ z)@lPXFG=XdZ2yMLMtNVa-zNI5#$3hM(J?UOt*mhKKa);E?pez@H!x)bhCXM5*urV& ztmo#JqqV&A2?oKh)e@`U7^0)9@&X~8e-b?ZNXd-NtuWl82~0Z44vl?Csyv;W<-T^r z?YKKRlQVRTC029=d3ecXRWJ#L|Hen|?}q~cFBP0`WRynA4i^V@O5=yOAva#tCqOEJ zq=zBSfyN*bB*e+pwBc7tYh7KeRnSQgz^dY0b(M@2 ztd5C$N0=*9l>+S$xE&7op)wMVgpu_QNLodL|9Mm@n8QyGa6Sfuy`{xQXdX5`ln<@q z>(_~8r~niE5OiJ`Im;q<$PZ*5>^R5As^bp1qRV9vM*;6&_KRIkZ7ev`27i(>fq7{N99oRE@*d`bI#RM03WR9bGg6t_SSD$TQ{VY~8o8NPM1> z8S2JWw{KLgE3ETVZAj08JRR4YlIZTt3;_PSU%r{r>1bmRp@uf7N?(l?b~=3DXihwA zKUwSTy*Ouf1b=i;a1jMF9d7WIbAvdW2MUw`+!bzi4l*%S3_C3jkCGU^)}z4x2YPnk zIWpaDqPU8dmaXQ0ve=ib?Q%WAn=JBSh-xHZ&% z-E66ZNW66gN_@nkpKS(xaPyNJJur()MMtEv{k5cB`X3>M zckEONF|j}y5DG`r0hr6~$304F#y{NS<_$Tre&Eb9z+xzJABh~7(rp>p{`nbAofTxk z;3;pqxM|!DoFJOX;ZLxSc_1P)dUTfxyVn=xQIcW+tb+r>u?A>!bG3E^Huj*!``?Gj z&6u3-mcohD2Ji(8vQorJxL(oklzSiUK`(#6*^0K7hxv%%Cv)*WJ|Q>xWWGA1)-o`z z2cN$QpoTei8q>RIB(P3`;Ay24yxRN)GMy9mrYIz-qBV$*rD6WY(2Lb`1Tkgg+ zHSmV-s=*DAgP%F!R86P^VTUM-CKQs)z&K1t)^-9R8ZSbK8JU=%V75PERK3W%t1yO^Hmt7y{`dkW>Ygs7(=!#3fMaBh`SuB9VCS*1 zUH5uVomxeXTyh%K+J1VR2+BDyNcQ>~8kt=?SCMZ(M%3=w@464veTj+0<1#6=X~3l_ z+m`bqG=EGMd!|Km>iZ=Df-7@*pd^%<>XvPvTFPHqf_$In#g;x=BC}8)!&f*%sD-Gc6k( z_cAaBt0MB#E~!MxMUGs~_DKmJxdA6kaVQqL}j<9f3vV5P|Q%&(Fv?ONSewY zs)S_Q?80zXS~GyuDV=QEsU39%|2NUqr9Z7q!aS-v-;a{raSnH4g4nq8l|Jo>^QRhO zW$C0+J3iSl?x=n@M=6{l5%I}GkL?c zf#HB?H|w(2VukI9Z;3(km=B6%NzoREwu&6z&NDvS-7_L5urX_nX}mYWAHdum_hX_= zfnk$g_j@Od$ysD`D3PkZgYiY!l6m$ChFG#A(`0zD&y0I=+W2x3O>$RGiBtdOVK0rV z+`_0Sh++l_WF|b|A+xT#VLSjo+N-jx`soIDprXSV&kS+Lx&h`u9*lP`SCOYSm|bAv z*^Rb?hoHam+ZB9r3#cT5KN$^iW_%C%Bf&9;-}t@YVHEu;La zsV$<7e^6UAa+&&MwmLp9tV=aFXKdY|5F6XEyPSwrB}X}ym+iGPg}qQO#HZ5}*NM(` z7l^RTP!=WmV7P`3PIzXnoJM~ z0X{pF8^tFg8J4IZfFBzr<%dg8ojV6=5>kc01iv#+-zC|I66{&FpYCQqWv)M5+cgP! z$=K;+%9AN%(`jgE(Lh9g5Rd$`n*G-oklA8%9Q7k!H_Rk$$D!+Vumf4YV@1N(1v%l} zeceZrFPKtKp3YnYYnvk}RGJvT*glzkx-q=1aiYcOal`hJ=?iyjTzKfTgRbx>*iZeD zZs}W4B?aI0zk!)r{oDi~eydW=@JoW;9$YCGsG&*cK|fXhzOt}AcZT)F2}*X ze{jvhPFdgjj(0m|Uwh(Nt0m@EWib{`HL_p}5<`0rPOILNY#oP!1;z*^btJ>f0MD{`#*h05uVf7S2PyVoJq4k~rQf5gG2x9{tiVSlFU&;Q8duHA>#$ zL0yB)uq*BW&O$6GB)h{GmyD5GQyZKa(8Wgsf?VxgraI22zFj}!_~ysNm+ljjIWT>s z$)dNb$hp0B@gIT!^^)v+Uai-FdUbKW6&sdVBROto*E>1(Sfc=;TTg|=o@)PiHA1^^ z4vX+{kT=%hsF|*NM_EO68W;fOWdDO?CKc)di+72x>E09=8AMAGJby*F ze5nC36Vsl`_K-tnBBv|Z^HNXbFTxiHCowHFI8!> zn0VPyUF4hyW#PMl3bX^xYkz08&}-W%C+k?DCt@L zQY~}Y?R-)ku{oCT)}BAo>ed8;=IWFsCjJ6Am&6Dh(HsM95T!d>!XU_2=mCDU=XYRS zQEoCH46e^HYN>(DVdsq|yJO%v zUj;A_m_k4FX!rxD)Z)dRjcHpWNvO89v?S2S-NO%ud7fCC$hQJF$%5~o2EKk7lLhY; z9lbDls&Ya^;M7D~4?TB8Va%3NFfOW>Fba~;y~~1CElf2lJt78CO>RzA#B|Y`{gcs} z#r@cVReH)FLu`(ACrr{L!!XG0ab0MWuXYL1HK9UorcD}tb;n4tCi>uW&Ea!6E7Wi_ zBAj)cx|Di^4T9dptH@#WmiAQjGcHBZ#s?2le$~#6>Ubq4!v_M`k;NPd&&(Rerfx=u z=-FMTtuM>h$&&QBBam2B+)9{S!`~IR7B04~2W(%5=VT-#U{|i|aVS0m0u$^PNCR~} zQ^kQiDo}35JK{^O`^e>5Epe|mV`r1A;7`p~Rm5-;10aZLac*wT+IEua6x`kA^mS5O zYCo>YR4gJ#-~-ZegcN^9gW zItq%fp{b-6X8mP9(dj_Vwd3W7u@+ia(P(0%*JB;dp|Erz(a7@-#jd&vBMczLm!#Sb z&7LEWGW;Z2RpQH8PIPtcy>$^>sUxeezcDj00KtfZPK^RefcfgkZEi5Gwu;l_=85kn zOVEDjL)LP5fN2OFQ(Bc39pm;Y)YDTpjQnvD@QLG3gY)10Nm3-JKYblU{kKQPb#1Yla0u68AAf={j#&+F{GQ^Wf4h1W=F=cCz0szD%$_a z!6)*7L+gV_FnaNOXw=^NOM_ksh49+5}%P;3JQzz=7b>!MSt+ zKaxRe)CX|JobX{kz_)D%CkfYRV<(r8RU#xc$$R(6G<(8DCxcpc)v=1 zJFD;>4I+qC9kkFs-+~~!ap-PNVJ=50-c1Hef*4#rTEJ>ejHd!69S8fAMT$YZR*O$Ef*LX)u#WV8F1XMt60>Gdq6 zT7d0xd-{C68fn=)M*wonY5B?7#3>I!7Ns%h{9dkhtAnmoHQyxOP2BBS!dTDW^bpjY zUPRfx$6>Q+al>4V?npxIj7TKTu;&urx`uK}w|fphm4Dn!L&O#A%PZ3Ok>rou!lr%q zLlW$(TkdmPuuFhIaqcYVKGgloVfC6EGePfvPyqkN5sVP6$F|rNw~4N4(5LurblB~H z-ITsPUaI^!*Vs+TpbCms9C&(P|L(GM#7M55hJ!pR^c$NBa70?4Ns+J5G$VXm?aV2o zl+j_IDq2s(f6poWy?!m2I&DxZk4TLk@1u_6pn-+$p?+gs52 zYry6Z^-1zUT+c|52n4eYFbuPEl^xZlfovpU3)bhg9hA04*6~jF&~$TjN5d}F4J7CK z$7z^-4Q*%C`C)ESgZmg^Y`~VwpuVHc*>IGn@J-rsya#8Jkr5rUa}AC}Xl8ksikNyD zYrJEoWf_pS-6MU|IZe0U`xQq!Gx6SHl@^qnt z(8!Q0JS$XrDYex}w0qWR=@GqdNS>b^1;{`K!BTIqcqPydbdO9E#W6M00>^dW3&B_k zPJWHi<`iRGYIS>~9V3^n;o9j~Pj@#(ys~7~Tm#!7V<4@Qs%pj`A!5*LSPdDJoVIT7 zpu)XZC$1#kzu{M5zzXh=JCX=lby#4h%oYN3l`phRokr!mTWV{5mkabGwo@ZjbYDOf z+|Ok!kv*mO$Y37(Q-5!Lz1Ni=tF$H!RbD4h3=JUW@jC74yW5*Eo>uUYR^!WB%gxcy z;9CH}+W_6#Kr*vra_HZc0fgs+BERt+1aU`aF6a7d1+cwS=qPyk!Qs7tPD-`(e23M* zK%sJe&{$q%0H+NaBn0t(fyg30v^r1(kvm%}Z^(tW)u)d*>}xp=U_9MK#o>dSd~>le zU4QpDenTvHV9Zo@#}Z>(!VWI2u+?l#3U4FP%}e;-e%`;+i--_n%!`QnARTjP>P-wg~`N(NG@2ZZun0tPm1oVZSR#Q1Ql!;Dj_ zA%&aOy!dQONd9626pbb!L%bE<0LU*p``6x;*P?Gyj0)v~hMOi?ntJ2NrseXLylDX+^OLR{BdvB#VD;Fh zZ)0##c5|qX*G@dgDBg+A|3G_8{}b)8`WM<0)ZFY|XLDmgwOJqfEh0a3P`sqLG{2+u z6+1pY{$O{&>NozXU)z+Imp8HJp7GGkq17MtTlX5LZ|ASCuWz_GK#N;(dWAgn+y_}h zJKqWrjznERW@b5avNY%1(3a5fd#dUU4C>UY(8UuUb7lleRJbh^tNq|1B@XF8A!v_GpGyKkK@< zayKVzVFOwTBr>v~wb-t4XnvAfJ(7lB1HLvtpZE-Zymu;|iWM{%_RLvR>+dfFUYzwg z03#n6WjkP2>2&<~GkM3Yx0)%aS%`V(u+`>Ya8hfiKKUWY`JUZ+jQjWy z&{5@h>1k+8ku)WQQR0>Akj3rLa0bi@Q&E&M^%}ZJNjqV_)vjC)pf!Rv$ z15OD^>3q-(yZbSZ@x4M^M>K55%ed1#kE~CL>i$ua zG+6L&QD{vIELcuArj>+ze8U~_l^qX#EMWas9J56rKPn*AdYpz1w1^TMWNTEp;r^e1 z(h;bdR8Nbz0*MjoTKo-=4Bt=w%4dVdRb3zO{(mPv=w%?=J-p4-mTgOaiX|dMBU?Z@ zF%v37eSfgNcUBtrAvuULPCHG4N9sBpF@ob`=9ROyI~=QWeW}ZH5b~8Qf%GbQZ|!tq7Ph~V&s-?}EBUPEd4I!9ow9Ar`0jW` zXiEz7CsVQ0c+?v`B;$qBPgs2E^(5AOmv92?2gRM(Tk2??g}0YK{kIQk7i4L(ekbGTYAl zqDq_Q_!^9FY&2D>N$*zex}2sN+erES_|1qmP-3E^@9qoRKYvCN6C@1$NC-nz7x)p3 zsLl_ruaE7t;TP?20?lGg3(YXw+ga+0`il0+Y5EThb*7O?Tj%BE+tFWKcNxrkpp}Vr zm%#^!c+X1BvRgiaYcAdt<(>Ouvi)f0MdmRkzsS8ez@k-DQluVOe65`WY8VVTU$%I% zD0y&frnBl7H6f|7er1-et!ndrt7{kcX2n65G4bMje^x$9?OM_r5=q~U`gF@?h7U4l zt)8J7*EcfrRczY2F7CHZwU4*{a${jv?e6j7ClL8nqgA03B{J0ANmwc74&PVu>P7KR zY@GY8442zYj?$6k?Luz-`ZH5+aubD>7S&nMc;0vphUhTdlw{?L5T$L?8rUZb$pku> znxqYP#m%H27fY5wboa!QY0Zvtm%;Ok9RwqpIC}Xzu?2fNZaTag_umy(&$SR}TvUVx zzc|3oNnbJVF!$y|NFYqLAROg4H>*=j=r;NUaQ5HJr_?w)#Kjz+ytR@qzo-a3rZ`#- z^|O!xQg==Lhx0Dbl$7`A?&Tiq&O!GtXSDct^8fnA^JcS39#QCBHTz9+CxYR^a<9rONjIXuDN@N%auWyK_-QDQ&zcw~vF3rs+w8xG4s*EvqxV2leTZmIBHBBZLJqX!rbTQmXSLQQk- z$q*DON?mNqV!Bb>tGou3{L5gQa!Tk*<%KPduwiEur(%O_8j3o_N5w#^1PAIm>Q}t6 zkF)_+etF#zA*aVh@D$W580bg+lAg+>xNn%c`EE|Y%YzPgxGKGLu%uo%4SlTN_?#^twnig!}#EBzFJRXctl7_K5~ zE~h1TfywS~M+-Gat2MbMhqhR+hN`tAuio#*k||h>8Y=ZboNr3-{c=OF+(#DN88B5z zC>c?m5PW-&R^T<}U{QLQ@34$N7{b-~K4?)O;foA!<4Y^>f$8ab+IT50_cISI0wZt3 z@boy}Cp+EvU`O2_2cp`T6D-D1*s zkBle-e^9n$Ecs#5jSC{{sHhG8lFG z+|IkA4>}})WB|{>arI=orftL4=sD|i%AzJ;Xr!5`2pnV1CE0m<-yRM{UV1<+P2=8Y zca{sh-D76c(x>nQ!EAJDI(2n*2dpx1jt-uiW9IC|rN22Sh~Qv&=zzfkaf!ihKYG;(##1t%a51@h&wadR#R1x?m5edt= zS@FZ;kYm7~L;*;|<&NuqggWR%)vZCumlC9fdr4*YE zOvjM>GR+#+XaenkhzymSI7CmXL;14+rgzo4efZ2u3FGPnMY?4>!Ac}{Nwf6QFoQ+s zl7nJOhQWp*us9lllc<44O9b0^Lrttf>JsjPiGHD9SMq2x-P)L0+-CU0LzEV)t6KtU zVi%^Y)g2^qw!6x)XpM&?0^(t5mgAyYx1z8L>=x!BO4%vu(uP=c`23h$tV`uDEc!-q zgX5n=lr-n*m+{e^K%aHzG~e4@kpND2P>_>BB5Dh+xIeinwZ<1DnAE4F!-jEnTxmC7 zk@KOp+~iE8bWbwMS>bYRne`5HUI)#N2=8T(YoUx`T;~nbHKPw@G0(J#m--!#&?G~< zsu{Mw34wO-ynU+j$cpcO%lftPm9nSvJhj0pVJ1!qE5sotHYNZaQ1`V7jgjXO6zOkI zzc09-#yqCf{UE1m(%;1yJP-8=V^9vdX1-jr@Nj2G7g5XXdxOn#K?C_Xr*JR=kQfaX zI0CObQYlqe>Y*FovWoX6WuNgOXx{jfcG;sL+|qQI8+~uUhrzm6r}4RpPR5;S>j81(5ZaV#}M!>FB1W$&5%fN5_5Ou~u z5`v)E-~8eQEAjiFrE-&l$9K!^rK# z8kds%^O8+m9|+tOc-@WU7cUz*Gs<-lZzAZz8+a>RrL61rDq?jc$nfcIePoDV=$(n> z%NSK2;{N=pu(;AnR>wfpfIUn&5$Z^@h-e-{Z5NxxTrB}}A9_vL)8g=@cb3E{B-@|v z;3RAm_{g##q0QexTqsR_A!z`@1{*uFiQyP5%84^?G;PGZKSH9iTgp-*uJ)$9w+>-N zpmjC_Izm&YXr_A4Q_@i3Gs{tanDeTm^y_Nx^OndKfQzrG?}B~bc*}VteN-?x9Tp$$ zi?^9lMZkg)fm!ruFnpN)^MO@ig{{mBHd`waLwI3^|#zt zIb3-lv!0v_KhSWA09-$`yTu+p{wbbZNrq8YTB$}Z`C~*;oKvLbCDWOI_DdYGaeqN% z_l*|?LHbtOP;P_w$?2GrJDC?Q+UVE9X?Cok!V!44n4!4tjhA@RIXX30j!+3=4M_W6 z-_2fxT`jYXhW4aHdy(QrTA{v@XxF)CgIumEltu}3x^z*5bHmxPNQ3jXt?K3bd@pf| zL3DX6@*HA~Qc>Iwfi$|!xS+kXLz7>57~$+GiaH0!L5s>|xU{R+?j{0iJZ>zE+b(C1 zY!$pzwOyKjy&UNlAJvX?OH$gVq@-?Fc%H$i?d1sxY&uEj503>Y>_qu`YkcG<deHEgXVtR-nzt4+l2R< zqDEu1C@JNPWNd8RvR{D%ZKfrreceWz?Pz)anfLkkVp=awd2YLc%Rm;?7q@vNU)=4> zV}+lU?)Mjb15>gIhs4^Ab;m#6ZzT(?t%=&XYVgVd(LyVpyN#&|zF$4o#8an*0KIvi zY`@?pi*NN0%bNO@YGJva{9Lp!syw2#UCo{uI zg|^~J$DWP$7eeAqRUHS$-K%&%9{uvrkkMVnO1Ej8i+g_)pHfZorF+-nu;Wx+o6bW; zCv#NTHmjUl`|YEpPOPTSpG&ySKz=Al8PveVygts%e4#oA&>rDMLbXwb1UOpJIoj2a z=mdf0PoGQ#&wpXdt$rD@C5Ko?clp$TiW2Op2Gpq(h=ssrvN= zv9rf(cT}7K8U1)8vCn6)&Z}mj_w4cPpIZP1UMN%^MRxjSso>VuOc)hoVjxk@f#1Dn zOGK^t{FB7^>x(VZCqehA-O={=J@^HDiKF?-M9*i5oC(WmJrN;3X}=UNo^Hm$eR4o6|eZgcoky&@|aV3L%Pj4kb+_*=aZ_-@zw*%%@+ zE|IwGjs;SF`3rcB-jY5q>)=2)?va3@)YR<2G)Cu=p{I-|pK^nZz+KyvOq|S5TA&TL zN2<0e%I3Y^@qtg&q#WuMZKQrHoPCF(%Ty7Vr#|n0uHw2AWH;t#`e$?6idambh zs0+^N#iM;F@?!(_NX@dD5V?Dw<8#D)dd-*~{o=q%yWvHp@;~q{U;o5213rx@HI?OE zmY~3%^=FCgI?l%Ysk@;8TM77mII^%mtR2A*wOhQ3sEKTMN-GmV%z)?lxO(CuYp{Oq z-cgknt_tGMg724syDS0bOVQCn>2(PLnQ7_F>XA#d>^?iLthCj#4b;!dbReg}w9Smy z6GDNPCoW{Xdw`^9^K!7r{Kf+Pr3Pw3dx0o_J$n|#uh!I8~^aR>Q%Wz#9tJI25ar|Eg!E$jg@?P-&@b#8a zZMV;0e4 zS;=oDnVEa$lDU!A{j?d}#12Esj`N;{==;R0t+F0f+0>hbp77jLrrHGE!gWw_zZ)ch zsRyQh3V(k|`lTwX#i?DHCe@|#z7L9ro$`(yctexN9y^XVLX~H!&W&ZSA0&ZDyE!{| z^8L9(Ue}vyY&D(g6TldYO-4I zT8eyJ6HAzPOV0K5>-W(egl==$X(LZ>QJcZAY-22_FDr!sF#xD4Wmz}CL7V^fV*u*8 z<;w#A{qqzz^wRP3Ui_U@2T?tBTSYE%`H{&}C+n*u0YXDj2h1mS9usH`$bldJVlt8x4tYC;bKVEdzEJMHF~|Myj{xJ`<%1^FemOFv#)BP=8s@`aHoow4VbKV z0f#rPVe{gdulPUe7kBMGye+}^M&t2r(TXIUjlian+5jMb{sd^DU;@g6?bTTHef*%g~8;Z7O~T*D{uS!E(RZ5G+|4tCv$!3bL2L9BtM`QfNoX z@L`h=iq7AmN;!!u=2@z0Cq?CnG!lA3e1H>2+Bf-pi+gcRO%wM=Z2GZ86(OoS_nxDg zR6)kt{~X)b5_!Mita?R+74$uje=a|3-*sZkn7RZG*^d2D(i$h~p3h|-kk#L+yfJ-A z0ch5@%hLfO$bz>bSJ^%S#zwqIjHHZDaa(=A85?X=(5_s#l^i|JQ0w1}-BJ z&y+}xA04P?X_#8ublT-h;un8$NZh-$zbv|HqB<^aH@MNq4d>t1$!Vt&G!+XvHkIa* zc}&?C?o?o@F}7>L1GICzqI)_XS6vVtQs?*DA!D5K!;z_;s&24(M8o&J#n3&sc#p*k zJIqavxHm7wRYe%GRtnB9gNosjl{JI?o~PeZ{GWerayD!!&(~Ak^6CGKlZT28Lnv_2 z%G<2t)=mOL5YOdh^q=DJzWL*Ayo)9l(&rAP^>gFzE>^z-sB@V`#FcfC3q3C80^a(S z_TutS($bb5*DI8X4q@S0(!3DK(n|@0jzbkQ`HiN3P?UOIeQSrSkMNM_t2IhW2M;%v zb8(%^uQ9E+yl*IRe;0D9xkK6a)St9mW5BQ!KvkeD0eoMrZfkd1gd^X2n6krG^Vqie zVS5?SfrXN+A-Diq84IA|r7WbL=%R>_~5PuHu+| z=v{^|u#8!4#|_77)3McomieHRYZ(ljSX3z$IXZJ)yoibLu&n!-OE=wF;ejtigDc9` zR%ljQfx_gBS8Au@p6u5)TgrM6GLE;wOcKc^*BX=A>X>Y4-AVPI`;89VAhh|4CxTo= zC7%>0V5`j?9)soH^`Ipy7MySXr*BnJ(16JBstva}yA1rgY-debH|%{%dJfx4!d*vt ziTQj{+aR<20F_Ru*hC(ZXIKcvvb&Q!8=%o5D!cwt=_?Jn?y5oDp8`~mVE5?#hCe9! zv?F(lpU5jU?KbV7Qh%}Uz#ed@itoMnjc&L{!&D{`Z6rLOkf-AqnSy{Km{#g%&FIZP z=N4HMDM#*%$GQ?tvU5oXI(^6Hj<9f~6ykXDg0w&K7oCWowN`Bf@q5FQ<2ugBPWNcj zh`*+TfuC-NmFJib5$*~QbqE;)vY@VgPW_TAf)R?3;wQs{2(*!9UI(ij?p(!<`F);* zRfM5fxD(|VXT6_rwKEbl5~z^i%pCIbW=h`*M49D0!J{gt9e$L}Z1e@K=qp)Zmy}qpUN@{K40mHHsVFhn&@=C}_x4%bK-G;)kNH_pD|H-eb#TdZN zw1AKb@SY3V&;q z2{h1vWFe1i)_*2Jm`OHyFaMmJtVAFDD6C`WX6|v-M~wrE7QP_nTk}d6 zzLi=s6bHvd@ZEW3tbj%sz{bWo@z=GOr6kGT!t9>;`r7!nRyXpGgLg>RMbGU~S(>!v z5n3j|e(~b`7QOb$1wBotZHAY0RUeNAbwkz`Wm>it_3Z7Conw&1PDeFP%@l?L^3T5k zfXX~XF51KL%U#Y{&z>?CB8=KH(ImxZ!b$1(pm7BN&fDtU31vman~UY zmG-eN!yML>Sl{y2YFWpNYmSM|WuDE_5LZ&{l`y9xo`!CxUtLFJq-rx0Zdv{8p>6pAy z702y`$kWzvKU631v-${LheWdA=*ypn4alNQcklQyn1xVM-fBe|-w=V#WmLNA`?)ivO?byE!i@mWW$x3hPD>zoU_9>yoen{(s> zN`M)A^T;}0O`lGXA~V-}UQHVOfXKyeMbDh`^FU!sIEdBPl36m^5m|d05W$eAt-Y=0 zO1O2)28)-k1 zk=)cH$_dY}Ukq1Ci{1rPHW2jM$+UKzyZ`^xf1DsRWFnl=yp#P{;u1yIi&Fhhb}@_}?9s>%$Bn68W-dhDU-PZ$T%WvT zp6Dm3|I+@+ws0F{?fsKs>;=lAXEAEYVbC$88NR?8wvpN5XqJh&ZkDZ}RZ*94?erWNg-qsI|(pUknmmJd8|D-dqEUQ)E($nfYg>j2NZQ|HgQmGOQbfs5YU544ep1D!k;-r=FtAbaeeGM!oGLXxe zBQNAx)C29MqcY}Hm4Rd}nAMDUl~%qs#fkzBRlZUK0M%KAiF&a@Fu!5-Mhi_|pPaZ~ zf$0}NIK?Sh%Xw|x3ch_%8n}M4pWKqY7j>d#LTpHKe&|I;nNB(@+x|}DXbu<^KIp5S zNSAwflp^2!LJq}9txy|&b2jGKWRdz5zAg0hVXxbeK5--#ow736a>xt;)l(fg_|1)G z_}lLK#DwKXMy;*jxJ|iFb^buhR0gVum`aI|$;80N$f}=ph8hz7-K-=-spOJ5tR7X9 zw+5i5e&7m4R3K(D6_(k>C?Cz-CNABr;LOrj6D636!e`~?9%PjpuQ}9dy7M%_+@V`s z3V6#OeIv{K>_nMl-7J<0C{hD3t>_^tufH=PltCvY|4F*PY>kLE!8-g1ZFilk{c@0o zFY~;COTlC`54E}=(rbD!|6cqjN#$k~KV)3QWMLSw z#drefMFC=hlz*}dZ2%0cI-iWX7G>9AJl}ArDCng-y?my5wGNkJXjZkg{mG7Jk&#PB zdc)ZtvYdx52#$MMQQ|?_r(pZjT_UBB@~0B*Wn`Bmla=}bzw**^Yeq!`Qq{4d_~^#x zQ+d5);J-yF!qSHIiKr~8OB2)1htxUeHR4HNENKS8>hKiECl_#!>g+Jc@s%ijrvRH5 z>7|CT+`iG`Kb$u2B1IuzXDb1VMc=}EdX@j`?uyu+d}}`G_4C(?^JS`CDw&`gf#x;; zvtKFsRliwL&=j`Zd8rN#locY^8J zYy4*o?elTt$izhizwU;nC@o@8$%GeM1^kE!@(3wY&0a#TTt0v!H6#Ah24=F^5A*GE z0E&g_co>2|rqAFBgWl8aIa4(3N`y0%gRy5?Ix0CmlxESnmC&A3(NI4|9^8n(du)&N z7xNj2nMuvMVK0I?e@5hAhkCr2n_5Ia!!c0wmNbe44?C>7=N+OTVW}$?5QG_gDtfT{pg288Uxt?=8RcGJ)RtZpIV4dTF4$&M z&`@vhtB15=GwsG>?Ac9(bQ%_x>t!hHK`WCbc)i6lobCrt{!;RzyQQY(mt|y4DxFD} za|ued7yw%;#2$MunAhk8)Zt86>%w;^|2|dLN4kT+v#Fy(ybH6|@q_!OB8*~Pc+A)l zS457C_{Nlktdr9{7i0T3dOKdSqz&K)_Li(9w!Dk5QZHi=Y*|lIF}L{Hco=<~wYYM= z{GZ#28E&}G*u3^0uW_%xZ4yg^4Y5R>g1F$#3*E5qLYAh|KoYyuRjpNNoorP=6`f>< zn{)gRw1{X`7TmSB?NuQB=BJlsGu9+-_+5_}I2B=ddDCQU?a$aUznkE~#F_#FAT0Z7 zH&!&2&if-{)h5skO!~K(*g)DQ)`tHabN@r=`n6zuI2BjosdqVb{M%V+{GQB1#^-wk zvhfx+a@HJ2XjvK$9jHnB$8+59dKFmO^XH~MLdWNK`k$B1Y*ig^I`+0MM@oq2qynKM znMv+{E=l}X>78PZbDEM{|F6B(yZ=2GB&EqJ5S0%suDcUNntEO8m)90c;Ce|NaZRrs zqNl1kKHN&Y6fGZe+f>~R`yMMAbDzhDi{=I+qsY@0 zoo>a=fPkzz!(}v2?E{I_HZ4{E-rl;w_Oh!KmzR(Ic)l5&H)^x^ZYjV1%57s~^Z4?^ z!GIuGw05hTlH)7Ku_0_3NXft-SMpIMxoZqMCL@|>$R)b`#IKq0wv(FY2Kc<(#4nEf zC$4Z3dFkPDatR4pjCZJaU{L-ah&fr%CP*FO?52;f3&x8!I0aTVwVZJ%<4yQkWO)??2v&{PJG`V zILpg6vGHaOd2bV{7C(V<=Ty`&69I{Zq!6z0>27S7Kjge}%8Dsji;P!KBa9--6OfCf z!{u-9+UQJtdle4-wg%_X3z7>F)M@n0-Rg11lBUgxi<@O?JGeik>3w=+u~u7(D32MZ zjnrtYa)i1AraPq9JzEM38pJQ!&JD!QM0z14f4jUjX8Df#b!s}K{~i2T-fpyyqk{bi+H=M+$@wDssO$QM8pa&Y3w8n9e=9cjccRS#)j&aI}^tP`e7H@GQ6vx4k zZz6ixNxA2%|J(Qkb82FYfboU@PHTh5`P$cRKH?Kno37iT*XU6};_`0dAK^FQ7qqQq zWwHVHzevZ%o9zyIU^mq}&ZW(n3(m-lq>uhN9%4y(WFIjTX&mG2MWtu_wB947fHTqI ze!9rTa`VlJQIhZGmctTrOCeM|t_eLd!DDChuvo_=`ic2V3+8(ox;p;LGuI-Q6=v_# z>{fWXhpe`}$kZ|84^hT}`*(ehgWaxwaeHWAr~@uSj*8!YymOgOw;H?1An@Kdagx>7 z&oK0v8-}&<@_IdHkGnPcVPM50DB4HgzVI3%BpG(*dN(DNEJ$pT2hqoUDghc9W#MZ| zzQ^D%^#E3hty~_RoTt+TF;>WsGe#_=H&6NrojQt?#VVJ}AOeg+Nkt{)p`|4!Wmzov zG-NK;Ak*M!_11B>-c-JDpb!TMDSRe*>aH?uA9(2Wc)0rPG8u)>j&u1F$~vP?*XZDz z{sP|+IzO+jH577`Q)_{-JKlLGu$^*v_}x7mxm3XEt!AzC>{@Gb%$4ns7S}z2E|()3 z)U$F;q!;uH9TNU)MAlh3|4d}@{X6&KGsmiri{`dtl5%InRf@YEz1Tpz;;!#jKUi`E zz9OL|2jN2tzqebjyOh_g6DOB);QESDDi3R!%%WsUN)r4MW?>cTrWxOt6sLibI2h(QU`Q z{wks@rM34@wKt$kcGn8BGe?mbuX`Py_o+kOuWJ1S;?V`?!){2NmeU+iCS}*d^tp?F zPwS|jH@O=igca_t`V_}4VVarCB`W=i_VaR(<$2Ba{$$C*bI60J!0F;bQF*Z*4zoUO z907el`7=ni0PXxr_p#obEbig36$k^gQ>}7*Tg*=!2SWBU7bo)TE_K@nJxdySJ>8XH z)mL$ybS(yp_5&}s97jwBy=WNj%u#?!H_P7< zj_Zxd(`}Oq$XR@F3v`~8r5-oOwI&LmOa9_@s&_*-jt6SPadYf&+UWwKRchf=^6qZS&`@=R8vn_^T)F>P{l? zO30k#^Io8O==zv9zq&8{54SmG&*XKK%QDJZ^Hv{-M$$=8yHSKDG!Cr z*EsP}$(48u62uq)ibwsyJ^sLyOh>d2ZnR}%Kbqg#)Cw)paOkBBqA+XFsE=IgpHjvD zRKvi21SZ(%@|$5n2+lM zvf$1yy?GJAByM0R3#R}{EFq7wWA7cedw7mbn`$fy;e+p)T#pD$o!9|WjXf!G!M))G zg2NdNU+RT54TRn@rjJb8!Sr-+@2jsa`XT)kjDkc0E=f^AMCUS|P(_ckRq{8l{ajnM z!p1zpns)63wm1Ip@m$=xf1KA2#@LCr>sJh~sqm48XTQdZ5EPMd09_9q8b~L+V#@l` zS*@GYx6W)=nI*07Yts#1f5keEaWyGd7{6 z`I2_Bl8NpfkrBJzJ?D9{d-6HM4?@%S`Q)Z{+f4iQMtF4W+z@d_%jTx@L_u9Z+q_Y z$JH1Tekm{qk}Nx8sP#ul`Jg?(;cTG=DWm71{fn%g?=^hJ{;Um^q$Oh@pN$?n{NrSw zkjyE^W`L37q(PGRU22Wyo!@J5>S7q2NJ9)S@&a0|!|}e8r%BahBi6eh-W?+Hqlc^u z&>e?^Wtzv;S7Qd8#vOTwk;U?wb|?r`x@emwq{=#!Jg~@8^C-Z6ItBp}IzbJ%oi-_e z>#YnS6E?c_eu&yU(Ijl@4ULV(9+$>43LBnI9T)G@`TRo}_JOH}1v!5sQ<)NFNCOqY zVDLc=DO9psk(A>G&l?v<)Rayc`klST$E7y}O_Rv+e|7;3kK~;F7EQVar=M-po}YhE zCYo{6Y0tJEaBS1K<|lP8Xgii0+`*mj!|#rpxzwr&twwgvZJaZGwBtu7alKV6?#jnF zTR|5lvs&pDe@}a+ttP}%|6^0iWG~tbnO{1D}PTAE{h%` z5*m@JvCzfS^_x7Vg^l@c`cLGnYJ=8_+{WK5#(r{0h-knjC$sv!%s-c!Ub$@q#;7Hv z?n#Dj{Ze2|ECCpAi9*S`9&Rc4oFW`@keiuPsQt{--28{evT~5#DJ|Hsj~1ikltn+% ze!W`)n3R}7oM7&U2GWlDX?|yS`*GF){lZBk%x>*Y!%zNGu7Xt(&t!bfoHa;vH_*qW zgPyY96YjkcCQQ_K(#Eh+S{^0x>y3Y%)R>HJyOBfITkNKTxVdMn*h$=sj6pw$jsa)m ztSDndzdS5i0W&F^Q)k6uy!5g1#FGJqqhvv(l%XrD$=>pF6N=x9tSOZK)IUJtZq>|q za7Q|JS)pls&Rogn-WfeaV_BW8+5VXz8B)~0FIbzhc!$0@oyKi6A__Bmu5&*E`d?-z zg3PxV42emipG2?4diUpkd-LApx7$e$$nf28k{5b-{h29J1gGtoWu&IQ=++P}cNn(v zXWV8^PkgWvX*6i(pYd}1m!mdB->2Ggh}IFniye#vkGL78FVAp_tn^&{`8H2#JmO#U za_pEePKcR%_pf%jj1H0AIQ;N33u!C#dGy@t5mn|F(qI9HQ(njItP6<*lrS01e?83tK3t*5uF}<@a7gx5)k1zT)5am3(u|9|dPZ4wB0Wub?ID{oyi4XWL7Umpu$d3X*_5 zbkk%W>AR1n;3s1eU?hM-Yy3Yjp zDYxd5GQG&sIqBXJOj=gf)Bcj=ab^4569|fdcJq;z9eg?$wbVmm2cE{U2G9)1(}4C) zG3Z;}PU-!!G7T~~&mnrf!TPaR(s?MQf~cQVTVSy?&T z_y?kNzKmc`oh(IyWZFvHj(2OC&05y&`D4`q z2br3J%IeT@Y?1}7tOfO;BUugkk*_V20ea}CD9fE>zIp0ybJ$vcFI@&Y%S2DEea@JSBSVxropf3z{>}5Su5d@$u{P-Pu5bmKmEO{v= zWBi|7n?TlgCfZ2f(URV!>#89DDX?RmzX~pbIc}1)CP@|rFpCHFsJ<*~b1}Kwnw>D3 z6y0*Y@~et9BJBq!eiK_3ZSxZ#L;bz9|Fy%9AwhowcTG#b=q2l@@}$CYBIleP3rARr z5AnZ*+6&O4B#U-T;XFE4KbUcP;63U-609-Kj$L|PNMh(fKb(@t3Q1k_4tis3`-j(m zdCc|>Pl>MlZy*l(z5D|b)tuAu>Rcg^NieCl6Ks2D@4K_RJ;B)aKF{}u)^Gf)K8aqG zG`U06$<)84xy%bCL{y-wcipIP^QF0g%mu!PtsZSanch^nx)^x{Ijy^50r;bX1!Stn z{9pa%%h>&>LILZ>cp>%PMDbf4L}LGSjFD z9=CkkZQ!q$@=eA!+=Nk`kAU{p(QQYBv*byI$ z!!qAJO^;}VsR`HcdYv^Om zat%I369Tn9h;pUx9j*29OT2N4HUxs!bx*Im&e4N#qZY5MbSn?uK+`QAZ{?Hw5aBS2 zDW`+W`29Sjor3zP7A}02x%pGOoI`&~cDfadtP`-(Xym#-sF}FKueJKrbr8@+DgFvL zUFnH|Jl%(kOg9__-o~pJow_=nFZod$Fqa8A-P;e#S1LfKPZUo-iVS)$P4FI9Kt!wv zB_QruIO9}@p~j1&o2$Ll(2`303GNL)5@yozsfTKDeV9dnDDt`;cSqgrWwo9iAWC9( zm`xt|>xG&q9UP)C;hHnzDocHiiSriHbig?lTJWRt{?y<^V)ZMj`bYLp&g>bxPnFZ@ zXuoXr$0+gRaw<2nwaB!Q_It2IkBvh5j*B)i_*H0$suAuZTN%;mEqK92dj(shiC>S+ zlHi5?o{E9AxN7^zbbtp<4~jT}keM~5ba_Wr5inQl#jhWbJPWczIp&`W9HFp@=I1w& zjVWT(9Qz@?^0L~WzU_+Xzwf!ke<)M^Zt;@6$fQmG<7BnTYxRaF)RWHs-(hYWzd_*K<{S9C$mWeWTLiRc(=4LkpT%SXD(rbzdFsIq+3eO`tcOt^ zo0e;&lU@{z((f&S6UI>!MyleJ16?uWNnRL61n%g`W~`Z7 z&>a7f7)y!;{SHK7+$X?b`Hr+v1gbB(*+5UVHpsWgCu}wkQ_;Y&iCSoEvk!R!kegtR z7?IMlmwz}ziOhSaiZKr*G~d?bISYbg2g@v$7#S=H4W)@>N4vZ4@*UAN`oAwl;Zbu% ze9Yd4=G+ceTydw#sMkAB<6Dgze?V=H%=s^|ehSB;mtm`Ew}sXBG08Rwyc;O#_{_zl z2o)=-x*^3EfTN%yeRJ;J8#GX4Hg1d(A&g;Qbll^i?EAMn7=jT~fol%tBqQTNOz;di z>U23$37~Xa|13YZW}2{#Uz)?5T1cD`lkUt3*GOEAo_uv~!g{`F@l%1|#b^65TVU~^ zu&R#i)6@w4&;Ad1T`O;F%Y-wmE!ms|;(9dGN9=KQ2u-BC!ft$QuOhc$#(%eQA#?wl z4Vra74q$(wtkKNdo>d!^DKl3-yF*2#|6m$8bTVr(@06kARJd!eU-OZf3n1XmJ)-#* z0JU7QWj5n2s}L2tqxy|1o@E?x&Sg~H$E^-6)1;&xx}BpTLBG0@x?)cQW2(^e%2MwS zH*I$%5tdDYmGO4PR#(OtBQp;Ql z6E%eJ)JnW$Kh`^4FznAL8X^B+V4Xl+17~crWQ4I@c{9_zG<+grN}={I=OY*nNfh zi&M_3s#GHzaG3NkJ%IJ=f`2?Q=k-5$sOM)1<5+5twZKOVBzw&ib|$N`SHrMu-B_jP zDL27P!Ik@m%_^Ke{|sZn{-@NPOxXovkyy}+sd^3QX54LyXDZeK8A622(+j$X;e4}S z0CNwLYZ)=AvY_mv?#XAEDj-T@x!mAGo|AM}7D8Oe{v)`b%3;tKyVsf1ITX5Bf@xfP>MbNC@Gm)zPZf zQHk-75*AZ2sA5{qcD0G=%SutP;wX8s+)UW_ze?gC2dR+!>&-^>|Dv^;QjXvj6jWo# zQ*~gvCZxJ#g9gsgCy!ZB0X+94SW=p@SU{Yzdv%*UY~B*67zzcw6L09U))S3xfJV>Hkn|h8J4N!y!Cs@~gMY zQ7!z8tIu0-UXOUqOVFlc3o>s*%19KM7uFG+%@*U({?hy5G*&hV#dMeS?Wsxsnhhm! zMXPQ#-{l7~(R)RKiQa(+;RiXRZl7j-)K^$@mrz)$lvw%gqYI=q5c^L?5Fe>@SLo%# zul}hpDOq@JY}aHL0&V|<8W{iwvT@t^&F8Cm{nJyPtxtAYbT0gR3cNbO3ES1g)Q{EM zbz;U6eY5>izmftk{VOX9 z{aze1c*G+p8zfQe$RF$#`%UTvoIFu~@~R)g@hjC1F=jeT+_xuq^KJIBn544h@)0z1 zM>PW-33lW?oh(dNAo*f=D4Op@QuPjx=oRmS_|Z&mX+tijbAu1xH`X=P1)%laGY>-9 z(6MzJ%7Jd_jPtx=Z$duS_gVdRQ+8ubv$tMr=wY#8wpEjdHAyQ|b$!6gO)h;40{}fI z{F$1-8()X|&UIp@DG*)E8F%J+{`4jyP3qJn8kB6(rubO5GP=QR&_3Xex8jei{;z|D z0hi&HM$zYXY!t|#WAYU~4`D_!in274ur;7Cgep9U>2YIe2;EAlWPg0(X7LnSa*FA2 z%Ke(Qe84B!^H=pr-FsnigVJ-;SEa9lQ|Rm0Kk|uP8Pfw4NZn*Rd{q>_r{}G=RQoQq z-6ytp{j#SE+d|+t{UD(a1Jk+uIKr$N|5ZIz<<2AtSxTUU6VOBMK(F)ID@g4nbhxQo zx##K;ac1Fw2N)UJ$h>miJ%%z}vM--XT&>eR`0k-BH6FkPyozh-+IFo3!}c{%)7xjgle@~yEsC2*jdxh&Yf-kJ#EGJNbt-&S(tkYUem zRUBUu+>rJ)PM>}*d5oVqbuFWb`IYk{^2-h2*?3f;P4Jd)!8`ZGcp};(kX2;i+RVko z#|CMFoBY2^Ch!G4@F`KWzG)Vk>lCO%*Xg;te&-Oj)GFr-kB@&gyBE~t2z^2`mT_~@ zc_9$sYa;e%>{u(f(V+VM1)hLM%Tl&d9z?aS5;($7QYtd7-7Y{DPyw zcI=jIp4x@>^K;0ZB+?d=AD8}!Ri*sf zy=q~Gti1qLw4j#$0y1se)$OQN;!5}dh?l&J z_ReerLS%BXC7O}moO;;B7-yg~yJ5xMMP-#&*01`GtyVaPf3S!@AYyA$wcgu)f8SgR zw%rcWezd-sCu_AnFx`}WzJ^mmK~2y$nPsO=Qk+0l!o5gCriyt-?(Jej`!w&v^lRbj{^4g=(jAq156hT3~`{xzt4Y zYn?U0dnyh1B&RjAP(hf|7y?7MV}rOxb(DQthy?5_6bU%BaqJDNyau4_>xK;Fsu6RD z-xm5kicNU8>W=cRx=$il%9r|Kew+kOv02ja9O(151vA1)(Nw0tyk^Ejf;5E(8k6lq zUkNx=63DhM3P>m{ka?ul(%FGE(GEX-xiHtCJ=>B^RW+3S<1ejyfwKM*-9(-n*C*;M zBE})Ac!==|0*W_$MZekB%pwM{sJ5I>U{%RJ+F}f1Ak3!{iPeY3mWx zhnDS;ahFAC-=nSXSya15pU!aeAX)KwY68j)BZ8{}1llFEj%^><9zbZ#9ueHHin$Vu z_Vk9I;$auR0)K!*NBv5|d}{)M6)crni%l8cL^HNeVapc!%(!i{Qq;Irs&u7@e*C1} zcRzDWU)}Lx1xiNc$CZ$G!fABqIkq(^;yW7i1*a0gVf2eAenH&D?B9^)J3Y$J~vVh@?z^|QCk0u z8aZo6Jrh04Nj<7*y>Z_RGY5s{a9=>ror>OFLMa#SGm;9un^I$<4g4BN{I}Y5aW#iB_I~3*!tGD)rQ7Yrr5FrRue|TVSMWC_R-Ua3{(EqfFu5BIvuW4iV#Ed^@ zu=qT$Z%)yz94P&1T)6f2EB<1wDm89&ay*8rovFZrIw1QK75e^0n;QHE&zyQ_1fqTw zC;6qBuHmdL`Jz&if${VykPBNZgLkBN?w~k;KYGlACCS7-mO7ehvoQXcB$)KIiMgPm z6ki*^c?z{o^JgdihZj3FD|Lbmr}%r$rHblTnc?@?)S%bXC&AXawbO$_aOmCN4qsm+ zAS&Vu%#C%>Z=OAt)Qm>cq{`03!)erJ9s_UXmk(cuM&3+vOFfxi@JpN4O%2ypXrIwj z<4&9Q<3x5T|5@V_7$J4o`q3|| zNj8d2$}<#dnm){yxA{;W854M+@*c@G&FK9kF$#~2rS7JH(^eKMQIE#UAD5>Gt0?LO zk6+R#ALpN<`{fm@kI7BYw9jqyyWHe<%12_c-Uqq1P?0kKzFj?(4^tgigy?aCZXD0A zGIFO^98NB@mOc0%SOBO(P0cLw{qv_i?sLOmIVSx~C;_&XnAHVQ< zs^gzhwq73r`80P-I91#p`g#N-qwmHINf8uFV!R=|mYZ=HE$^N9nR} zxXc*u9$d*%`Eo2?S%y(*e{D36jpefqkvo1Mc9?g9O<|5QO-K3;)ha=S-V>e|s*VZN z@FvUc1eF(ZB{yNyVe8gFTj7PHt$d_*x*TqoEl-i}5;Ej7W-0 zVYJ4=HRgmre$8ur7KgW^*HkL=U@vm2rr{@fPcQ^#UnRTF=qTuZ+WJlDJ8zk3ue7aj z+xq1uiW|+Vn_=rMt(LBOKzx?VPJ80uz7xba0#^S>mTJm-ay?G(-LgTaM81Vs1W4Qfif^4xzvj z6S#_|Ya zZ?lXlu|5b@=rzn8CNk2uEr^S+|2YtE5WtUWUcnpAU?{t5*d;ZwpwOGXb5^C%XjwH!(qb$N3<#1nrckD~aUEu`R6WaGGj1jsy2JbDe)9rVNbt~7H z)zt)F5?#8RB#vO|Jk|!=uXM*g1{;#H&(F^ML6#ehhKaB4%PN;n!NnBVyzr(j+t`{$ z8iftT;LzIxjlhKE6o0&Ri6yq&e0^v8H77%(R|-Z-5X5I~5#;h7i*Fe4*B#O9*BtO$ z*}NvU#RH3auXp=)8J;VBZ6WqK_iLA67~Y@knW*|gnTe6O5aitx(f$KET|d#Ynh>t( z+?-d1okuhMnnQ&T>0#nW=rDZbz82)=+#;m@=F`2MR=O^9R4lRqHuE-Kvx8}^3A<8G z?_prl4cUiLIY-zyRLV50MbVX*?X~gIcIC7;hh6#W(mIw%2~T${Y(LNv-G+DTQAS1X zuCGShi_*-sPy}+8+Ynr&bpFjZC8f?Q=g4h>ERapJ3J?vOcTGXM$*} z6jpA0b9@AOv!jgIo%VsOKc*A=2_+?{6fe}KrPFPNC zT*nGa>@jq4j7M;?4s7jEOesyQi3lJtY*?_qg-2hg$@lv6U?|cak<9*4v+RR+x{f>k z+{im`80}d)an~T^b`{t4v?h}Dxo`Nln4t59*ClcmFT{xvHZD&cpk~i_JL2FBk;LkH zWJz55@ARNK2mM*Vm;VKH65A3@mYOo&_H=@gMUMl;s6x0lf??q6)gy&5=Mb@dxxdWL z#zb<97ZKoQThX~<4Kwiq{?clkEI8Nq_PPI@BC3<>X7YyDJ5^vv`jastCz-v|ruKcg zXv~7Oq!9gHujC0oH^!v*ZK+?E1U5QsBEKRZmoF7QGtsblz%Z%faeD270!MPg(q>}Y zL=aVC(leu#@h5u-ktAje471P3a711sefs0_7s3;5aldksl~h3U5y>9Eat(x1vp~v8-F*F zmfpD|@%Ix-{B#^zfl;s*KRnV?on^qnu5r*1_7E0h1}2l#@lCE9J;Lu>#LB=SYNVi5 zuJoosg@8%VS`$`%b;E+a2vCE#j@Ri;e9IaY9AZUOavyd_s>zcSP(R#89r|-c7Nj(d zOWyY&<>gIQt^donAKn|p^Ce3u@;+iB>#hJPGy0m=Z49DU&_=b#v z4`HK=;t0rmwV{Nv0a1yH6Pu?d@l(3eSvq>te7fhJS%aIjH#a|XZQe3sa8oXqdySlX zz0c8?IS9OMoSOW)j3ceuOL#M57^WMGHa3Wg&vu_Da51bRnc2**9K|(0{nvzm8w=G1gljwMMB}!4i)_K{WP5i# zZgw)9_7b(}f;{32k^(&l7wr#O#Z|{aVI)b_F;+>+N6p>*Mos5y^Xl&2lYin*x;Y4H z2?LA|m#l%jwZqR)m80`}zmGhGu1Zy~a;dMFG2Ol}FG%Po^F_=I$%; zKHYSbsWos0Lra~Mw)=3ekbHPh5Zg6uhdbqz+P*RT)N7iLFjTVK?-gl%7fy%Tuo{{k~t~YOV#ZtZcSmCAemHY45PgLf`2stNyb+ zzg&^90K_pej3!boxLR-NG^87)#)=LV-`CJHnd9?YwTk3sNi+{tNiO`%W7$7K74%vX z$wEILL*kKCRlWLyS4fHeP)==Hw5`=Heja%{!mZU~#6^fWqR>w_@-}EEkJl4}v9rO# zefWv2)h+)q6t+i*+&1r^z;d;TJTVxmgp%A1kdyIr&uAI^871;%TzNv>@iM|?ty<8n zgTHNhff>OHt`)o{uZ0uxhNuT`Y5gV!fNwSN;59_f0JGJg8iAGygfvfp65bp{^$r=| zs-=o)!b!<|Gi){2N6-nru2)Rd7gV9|X-2EAcs?d`3$CggQ6v858^+UC)Ipb`Fd{4Y zI*nm2Emg4Vt;+mk+?GOQlA@UD$M$q127IuXGk#!6V-xnFq zk;weq@3gmv+)a%eOFpdv21sHSKW?;WMm2vMAjkQCD0}Ouw!Us_xW!vqpcD$lU5ggC zv{1Zwafec%xDzbJN-4!9Ew07gA-KB)x8NzRAz0u`fA@LD^WN{?@qOd{lR?f&&OU3e zz4qK|&iR*Il;@pEEDe2gNgN$IcSzalN`0!LQGCXxclDrirbqdO@8Br_z zxCd^jv*YEdAjgd;?I;;)virHoUOm;_bg`LArb_dD=!1|fC3T*Sp4^d57tUEb@zkw= zwe^Tl&PsC35GfzViAL*@q#PDCox3U6IcJqX`C`MV0igcYz0na>e6{Xr+It1;0pI<@ z-E46zE%`#(%CvOU*DlFSxDBia;m{or`Mz%X~Ii4KZn)l#`%Oer+E#*e?W**G3<;w23Z%J2wVSHL%Q#0Z2W7FqDMxb=iajg$I_+ z*X7kaF8_7vGS)!ghLL3AT{W+Sp>$#Z?l?@wOu znDT6dZZ#vycO-HaEvh2(01R7uwJouyCq7btGsM17nY(y$Ma)->t@p2_uGT=X!n)Cu z{Hsm}#u52tQPniW6J3DT?a$}||IJXf*%1d5x{O6P6}XjHc^tr?(XS11f9P9Dltty% zZjhJz5);gaxhD4Os2(3}AjzMOd$Qz%8WG;=MLQ{UWL8&K<+^A&BC)i{I5gMEC9RJorInx6`uJj|&s9%nxsR-fSop4*I_NEXiWkD0s0@a}Qnr!p7f1<@#Zj37Ga z#Mmpo&s7S03%+OLiKd3dy#)u`y>Kr?JqT}^QLF#;onx}DMchJj6|2Pl_JzoBJ%Nk! zbw?VwZq)p4Z|(7(R`_Dl4Z z?N|MpCF%39vigFhUk_v-Y?Rn!yO!IlHUFD@%gU_vn^a9bCqhS#(<@2oWv`0&=`@`t zXq^AYAYs#X&bp=e`b^1UqsAx&fYZ;D+uGJ2iaR4>d5b#w2&=O8eG?d;oInvmdG$GS zRlm{FLH!AZmXvC)WtP1QaiSKQ#uG9QtG9@qom;VGj?WWmR!4b9;qkGI5`gTP(~CpJ zo74t%*u9*!0tLhU>^OyS*ndu8-vc4To zRBS1FB|&3aaqQer3NHHN)%v-FSC|HfLMD%H!y;Y`a`HCkNwZG3eTE4DEM!+mHf7zk z*ZUHElkoMY93APrko8SF9Pa#WH85Hd4(O*`}Bq3pQo(pJFL!Vt<^HCdnj9V?} zmO+%*`QXDDd&8;+DL=@d0nlH~iF>uw!@2-BIl?ekazHQVqx|*>;WvOJu65B&-?4y%>YXimNVR|_t#zLcY+S( zwgG=Z9Uq_*e~0K~L-y(|u9qL7j_(~(a=12nwdIh~v{@F1jIwEO@&?=Yshr65}p?D$n%(1CIj=ip{1pL-D| z?sUC0ZkrQH=3rb7ls};ydm9-k6(OB|I%y*R;+fagD-iinI;;^Plyw?-$UkjF%o6%2 z0DW>eAYw+kesu@8*Q|jL(f5~nYU>V+#}T6i0OsyMw|h_j3dvr7F!OD=nN@DX=6Gq| z=Pm^o*^1_u9mUO>nwsLKC2ZODd|xhX+HxmXJ6A`AzhH9JS8N{t^E6b2%60OiL>N=7 zNoWqs71Juc?r9ZH|C5H9>=)ILn}kqK~FOFR6|7X`4orf}|t))Xu{cbQ8TsbD=U{stsF#xC+n<9nN#%ltoIFqxfVrPd5Sg&QYC+N6LZh>ADmR7 zeB&HvS(_1?7F+_QXWmNqDDU2Q;^)tqbV2?mrK%`%q)*!=xz?CQ^NCbqE)_eI#v)**(QV4zzO)t zmKvuHK+YhNh}|LgY~q?rCzUZVZ_Kx23!_9LySO2mj>!Ls5+AFFHp=~F?}jYvHq+;l{)!LR<`=eSkO7?c8M4q=>6#gGZ)Pf z@A_`TT7J@M-sW);a3=N_>zUKJ@IcehcddJwS1m&H`fH6=r^!y7*oZ90XDCYAx2bZE zNI9nOPKQm!NgRSqYwjYgAO%X~=T4AAVe1_6MOwc4S^aSGi{>h$=czB1+eOP51Ovx7 z%5b)CjPTQSPqG~;^KqW?c0|v=YmQFs`FSdvLI#nV)oTsV3GsDuok5@1BuC$Ei4rK( z$dw0sn6UxVaCfq;h!Ui4g-VKwh^?u}tyj=(0;lrJaJR=tY@(NCB44mAQ+XI$I_5AM}7L?tjlQrFoGjPSXHMH&ux6@6FV8~3zo*zx}%E9G5 z9cw0Qf??a4!`5SiGq;JwDKxK8z?*wD2*)58b;NUcIzS@$9Ide##kAdwv!QSOLL*S- zYZkCa4*`(I7SF$7FFPzh>Fb|(+3lvx-0{BQOey512080b9v>r;j;I7u2VfPnvzM2~ zkXlK>j>6h->fW(ppb2=L2TOET9^KaMeTLvi#z*>9cz!z1RwIZ3`_aj+34Uuq_|y1M zLeO1x10DoW{1Xx4Y5VrMt^?}!&{VD@?f~{}d=5YDO}qQ8NA|a_ICl)4JlGT6Urwzz zWhKF~9L9Aj$s&r}f&AhRXHwgO68|I%UZD0X+`n>lGmMYm{gj&Bhb)XxTSW*0k(ODJf)Kc|89A@_3ZZ%qex*LefiEBPFh?0tYpg`RTBhd^J|6DO|fcIrMp#K`NH(hyWjb@WHyQUCvopEVf zX`)XCnZL=zr6p(SoX_^J*eF^w{Z0@%2Bkmbq~_2gbJYKwD9lH1`&=JuJ>tQJnKt@> zdU@L;?^LL@wyseh%R+vY3~&c~V^Jo3&$u4Y-DN-@Q@FgNgJpm3$$49MOcV9MIB`;P zCC~Xa(?Qvoc4Kh0cZkYqH2*B{5|8@=LQd-msR_2uF3 zlx-w@MEmfB-8o-bURjdrVQ_%mJHTORKAxt~(l4=a7xfL5n$@RVVOvm2y%>3xsk9fZ zKdvqw_AU=jb9VVw@oq(U$#ia#O=GY=0fAE~zPA4z5c(rs)_Vrar=(WrNi3)#YJKaF+@Hbl9I+!sBBR?Z_)=d$Ihq37mst^f#yd)p%i-m+0HJnsi}!l<2BDh zly9s8s`o*bUvF5>BqC2TVDzc(kuzLLNh#OVv5f#sKqJP)slyFcBlM?e5OUpAC@d|G zUv4fA9>pcU760v*wL>slIqL$0X2?wJ&K7&|3=H=_392h=H91}O!DIoV^5xxfWVFD2POwrv_Y4`3SJJ-Jc&Y$I3 z@%CF;pme&0Y$Jg`$siWQ_@EdJR(krkhi!1(&ghfHd$@1qCK;c~kyOhT5wq0Eq7{Uhh>dMA3iVvZaShrZY>D9!Z4Azq!tDM)w{n(Vxj@t ze3Qkx*nTjwK%kh=d+;6CqU4O5pcI$O+1BuIn-30S(>2f^O71lz+@CH=})pn zrl6$seZQzk^YNj4hWD^=zR2!G8ftIG3Hn<#p2zqaTi#*O>&W*HvdVSqu_z)z%WCOo zCBI)kk)pCnqQP*_8Nh(<2!eEs39|k-$K3OroK4v^bTHOoacGnFC%&?>^5W~m?3SP) z4AFy4t*KN*=MWDLms!K{(%JM9RoS1vYmBXPt)1OQIUVE8w9Nw&QE5ETI~pa ztEh4nMBKBs3UM1vG+K(7u<6JSt+E<=F?OVaMksP>HfcF`7%Fdq!=lLnrR$o;i$=6X}W)$|yM{;x=U^I@a-ipoya zZPaFf_+&yr$KObX&N7E^B1RhI>4***XS87hC!f95%1vJ# zNW|SRCS6>d3&>wX#FXBjuAO$LBIn z*FqMIr;6*xFpWhZ@Ate$_uV!_ETaV&Rl#hr!%>?7!?7F(><)T`g4M-5RC>>VBK z4Yz&_wrezYr~Ne(?e}|QOga5z-J^%c1UqyfpLL^icN-2LLLObxgdlB|VkEsf!cHgp zB7zJi(=M^Kna(rudr=K88=Dt~PFltjqHdH0)9)WS+~ffgUXw}3@_w3PYrGL|BY5@YaxhU>@tLztD8d$T zxcqsP{4Z0wL*gcSXhU=C=H-;@&hxDh*-h5Un;g}r2Ps>l zd4xGW4K6zergZbgv$0#EcM6B~j3^+*4)yWvy!8+d=!~bjh@A{ zOT01f| zqoYX%C3lRaU-!mmJDR-*EJU4Gzc_pB7z6H16+4Z@U%Zeo&TDVnX`=kNeg5*T2Z7|= z<4_!s>h*dL*j;^foC2xB23mP1t{N&hHeK=cXTX$(>jWB$Q&!){>PP29Iw|pJsgs6W ztK8z@ubqhH9yI?Vazy2HF5`}z&CFL{-wZaS>h%NaJ?$jCB)S_aqVJq1WVt(4p`Y@1 ztGNB~)fKgBs%4?+&rd#5)5cpqWd-Izhp8|ExHn(O#y9i5(E#yOI$#S-il@Q*05c*& zx7!D&4IFS&BB4eLJbg58*sS}N;3Wr{*;{R)*0*K7L5w~oC)W~xUbfiSO=#nJHv*c9 z2!vpsSm@S$%L%OuDb6!#uZ^81lF&othTXdSXQ)3Z98PmMcLc1zX3A-?oS2d~>w4c~M& zSYU6S?GeFm`Ws;DMrB}l;G*Qak{^rDp3NPT84X1VbFURYyC|_+{#k^s1?-2!T&nWx zg+~z)@CLZVR%-E|7d#9eGUAX?{AYWI5F)szNcCai%`@M`hhQFwBfpZe)V%wf;(u!a z;M`vmF^#0K^0c06s%PrrFeFQPG`;^N@QB;!ab9Jm21PQ3)%@w!pfvzRz{1R;XrNT| z8Kh~E{oOpX!)6)A5k^2W8FpysJ}&IksC|9mP<|0n4#6N}G3@-E7TNG9pDqb&c|3+= z?J&dzgQn~AcRe;PE*0`Vv%zjfO--X$;ZZ&Xw-Qs)#dXh*rGEx-VOzAUMXBJgT+|-o zui6jxou1)r5A8$Mz_waZaqd$dlsYZ|)UDgw)*bvc&#(X(xKuBzLS(Pz^+$#!l8MlM z3B3oRmJSVDeRnMfd-Y0;*SBaL9a~x+TtYfW+&+9AjcNxM8r<|#ndVRCPyQMjW%2&x z4r{=;)5m+m$;m1C6jN>M+D)i2ddXR+EO+hl^?`5{vK$4vXvJ(TTh{|EO*LB$#MuuR zAGTm-1@E*x$0MTKRFjq-z*khDkCC7Y8CU)S0n2|GSAGEP=v`eYezoZ86gS;G1ZqvZ z*j*}iJNo73QN}zkem9VRFrS9<^h7SDrTwCG$}U~3n*f`FWZ`Gtys^gH`|!}MXu$oC z6>D8Z@^2d=I=GVuW_HhBy|NqJfrgbeToM}$Z#;Zq)77B{C*A!sHx}CP?&Q*aFWOPV zf1(T`>IT2o9{|oc8j-WiRa+0M_b|NZ-+q9PBjFsHp?0pDYT=(#Su8xiPH~#s z=2Wr1aSd_$n&6w2`uPQQpJ3WJdXPQeHk7+(BrigJ5!tjx9<{DFCv7~ajB5?=8?^u> zW&M|k9$nL>>sD8`IuuQFw`j~nES2FirFY|dw@-8WEXsRtl;^SuHs*d@bk`b^` z-R8{)F*RjqpnM^f$aI%>v#m2#RBS;Q)SCkp6CTiKgQo# zM#7-ek(Vx_eML-lt>F>j8hfwwkghV50h{RWT*=+qJy(^_zop}eL(m&X#oso*Ebm5v z@x6+|w#RnZ4r!BqNZ*;*sKUEVnc7&HZ$$@oRAyms6d_l~+INz;W5haDH!B`R%D>ajVb&NZNWED*+m{{?7X{ZF{FVb@U$ERoga6*_m16mb^aJ5KyCDA z$^V9Wkk;SB;VEeD1G~vl>xOF64YE!9Q)@rrVHlCM86Iv2j`_nX^xhI%Sq(mipUiaS zae!VB+`hP|OZAyn3=Zk%r?~we82ozdIC`|-o#FJqnw&18oLeo*jpRnjjfi!e@I38TeGivQ6 zZDs%HVML(KT@3YrV+!#`%FnoYC0Nyj1rO&dC$MJ(L?h@^XF{Y=NLB`hK(Dp)+>#DC*y(?Jp~ z)CHQ)Dg61e4v)#UsU6ASf53#)6vfVnb5SwKpby3=bov3RN|}j8(@~|AuiiPe-E)uK zOWH7vg~{$e9(@X$tXgC=Hrx%QO%V3Gr^xG;f8RNBdw_T93Xvz;e0`b1)M71GYE(N3 ze@raC&AmCNc^Tz%gF-T5q}9;G9r^OIU2!MrFcF`n zrn*#`6&EmVA!T77B@zSrl=fC}$R2?vU1L=zd6uViIe3U^mRA`LXyaTxDD2&@NXwnD zZX|=nR1vy7@Y?e~qcT8H@Bx3~aa+Swjn) zcU;vr;GR@ch|5U1(qsTR_Hx?ZCr-%(;Z7>tgpH2vATcHtFC(Uqxgmh?R12uU%%chx zy!xr+%rC78#&$VW_T9LOyTvdsu{vE}krOL_?pWVk@ahteuo^0fvct|r>^Gb?06}T$ zqE|e!0|l>MEGnmJe95Jm1ZGLjuKAgH#635}Ch%6X1l`ZC}dou6+H~{Ls*wAdEgc(U|Qp!cu68*Q8YE z$vd4=gS_F#PEB`m$r9!m#y$M!?OmXgVy^;x+r}A*eV3P;S|Lq6^T}STjg(i=qlL5C zDMv9C;W`54)o82BhbDCvFVTjBwU662qD6Jso!!OqEk!Oxx7c^!nnt^OyF2N=pd(Ix zF`=&kS7+kWjyuaeJrK*Lp26-RPnp09i3g=zY(U7HwY^ zs{NS{UK9m*j+H?!H3!#df9n{0HwFoX7|Q`R8w4&zIM+hg9tOpuc&?UveqRsf1z=Xl?ffI~!*Cv#Lt^$dbre5Ph zI=78$V_-#GiAVJ``DZN5Eo*u`dbNmNXtkkzffN?ULEy}O1#=#Y>6I*a7^bY8%x z1jMv5;3bPq&Tx*N^igO%>ImL!)*G~Ag4e4{gfQ}4pdee#iu|T>7U?qx_vEoWSGvNm znq>he$0(EOuA!3;gKd;XknORdfbeUjE!m(*60q}iSgxONo3->n2Ib#XlNm*4i_GAAOErBUhO~AjLp=bEOAyH?s(>w%)@+2 z_C3RFOd4Sd*{GxG80_%3y|36dk5(Q!`uGSGdz^S)c{=pr#zU0!8QX%Z;&W#hHUz#pMIY^C zy-B}WJBiZqxX1;>`6$3cFO0%ujK!W~Yp+61d-nuVKsG@_=pN-LUI&b~lp^*DQiinx z99|#QSLU9HG2gAmp*;}T>{qr4{6q@A;9|eJT;riX@$`I%5G>Ve465**442ceg-~#jycF?$y)ncG&nkOY`T0l;dx8 zFc`|PHZgP*j;SFTTYA!yuBE{Fwd@-W5fQ_)i}j@_z_R1at?4EF{@SQ%cx{fdorf_8 zEEv84M9&hdT`NF+QYc|t#@(Mjbbu)5gxy`}7L^pJD>6(tE?k$!a2W>0lC(K*N= z;Xxa$SlAGsGmYP-IVrw89QrbP8d7M6T#@|jrl3|Afd9Ka_P6*Fxi>S?B+AZk4x5EU zWKQ9A&`soT#aoqXR~44|Acz5@v$ZD_(kSYqO~})y#eVPTd=dvJzsaKn#+otqFsI(pv(wt<{xdTY+H6(? zIP`g3ppJ0??9_dMVejs855C?mX%?JJJ>_;BO8kA#b0PIb{eh3QcmQUX1B5c}g0Xy2 z#h|Q)du@}f$X}xjeZ{RlG<3IZZK?~M2!SG~E&?)?xc=bZ8dBzjHBl>s<4eS+l7|^S zlLH)n4@+K+if%l?Uc((R`I(D&+ATTf;ae-u8?b+^qJWwK`r04v3>1Uf0(L1J-Gi`C zv>z>;0BrKK=D+{h+)&G0E3xl1rRH>6xmpFB&%-l{LjBl%MIp}{aC!b5ZU*){ERop0 zbWog4F+Nyj#0^Q?4NORgA@`10O~%8d!gFCsuhnC39O3y1u}L=|4fyG37RFhSIS!jt z=47HDn@fa%m)B&2g3xgtYna-y%|{LjJgD>UBWS}eRnj32T1t-g9RZx4Rb|-StqyLp zcq)d{8avBh(s4QwF_~yFEvO7(y&zgtTI=^OIC*gUQIIo`4@XRcw{gU0byXjT&!Hy)n&oI{2V?Z&S9}_Kufkhwl;oz^v&FrdX2KZie&H z3D;vPn&z^byILh+@ZA--?{3B%Zeh%`^Q_hPEybeQks=h^7q)j}GS~v%SP)7g99O*M z!a{m$I5PsO8-FbMNrT5RRmmHQbgkw8N0g#7_-E4uO7<)K zY0&bF%=c&kV_u{4BO2jb*>&H^aDcGuW9FwMBCN;thrSS?EgopEGZ^X_l>&*oA?;W3 z&m(VEgnxi39J9vy3X9~l;?MJ|N54Pr4-xe7kAxp+6DAydg&5_#XgOLd*%i0D82s4% zHKR4n_dSs!Ia5QnBvzfICjvJt!c6;w#g5^bnnmYjxH|`r$!671^^hpb(dz={p_NK6_Mx6uGc6AU^QA=FF8Z({^`!(#+796u$I=7s*t+-D;#=l5Mw1QDaTSI=J49s?wkF&uQyndIJ@`mgVjOY|wLN2~lo1$bV zc`zEhMi#XsKF~>UH%`(1-4|k|gtL#S zq8EizJ>zF|^Ep4_>!Bsp<`Yv{uv(=)4SujPz8trVuKA9i3-zdDT>AdW>LW)TGEt~! zvbs1}AZ@uhwh5lM^;0BB6LQ>I)gnNOENeSvT_qCFmAPftdk1aQI_MR02VHykbfcDNPHC^r90yj!m;Qe zS#qT`nszPEwi44udB<=fxzn9NDH^J5)=kt)fwA878NZVda3zuro1gI%OFb~>5}s;& zceuRzqskr70hE!ki`H`7d<;}j&?HopY89XowW?A|7tJ#4ykBU(&qYdCoov5A7w7GS|>G%=i0oe>aANLPGIB zxa7#!T?T>^Oc(Z9TXo0M6MoGb_GpO4d#S0`|IAZ74gWx)H%=!+h}UetCZm4;wlF?9 zX}id#{0tiEiyoINQbAlgIf+L#DZj(_@9?qgf_#z)>o-4?3A8D!z2X9-x{E@Ny`*^3 zgLQn-m?;zNDf_7jB`N_0%u18=Qa2^aAvP z%XqiAU&pi3vj(c^IdRzy%g~}UZ|&o@DgT80bZsa|ydimfm*lV2)xsIXXW8{&dt!d% z#F=4&23-=j9Wf$|muSvXX1)I_UeV>@)&rp8Zq|_dXW_Kb;+zy8gCnoSg|2WO32=XlVQSi7B-Uu0&)@y!Awn#2w3%@fKe^sHii<^ndu64+3`$hb zEX4Aa=5_zcx`j{?|7Z&7)>e~$`T4Jrh+-PKqhnbay2K||dnA}srV zP`>`U%JT73HIMA3+ujIZT?AJ$A@Aw?jny#E^L~w@Z_{?OO&pI|X7jA92A6wU+U}lC zPt9n$YR0ieo85++c*g#LIkmj?y4w^I3tN7?g&4_`?D3tqwtE$64M6fD{ZoSto9WBX zWadqV0<{7t=Lt90w}xzQ|4f`0iyy7a7Bee-syrh7-byTXy>HYE@11*ZH%VO!b~<&whYn65jU=W?xc0N4P+L7oUIYGS~k3b~OEV4`%b6 z)8ZLse!8*Q@5-SrsH%iwUPgVUBE6ID+}h{Aao(4YppoLfyN2W1OfLuy8p{Zbm1~I} zGm?zYnIqVbMsewh%%#s*!twQWMz=?>bPUE^DcAtIrkoh%fg{JJJK%Z#)ltY+2F9 zTuUw^W&GckwR`+Y_^bU$n>o$!>$LZyh{X5J1REWEov~GU?=XfMFN5%>qTcGAzQGBK z6)>(qVjd}<#ZOtzSLL1W157sQdS;=D5ao(MQjT($R|@J48~$u}1OkR@OUEd?4==0r zwrgDtmTkH~9fN4N(3x2oVGKKU@K$Ym*K1 zUOo*hZ!5{PQMh=dXD1ni5YCNVWmp?CFez}eZFAQ$WgpGDeC)R!qMUmj20i-qMk?4? z`6{COFQ7B&ssRu_g-S>c*I$I(>}_*xVys_qEWckKv#X5j&xtM%34O%{=zuL z>JY;WpfTYlwd#Gv=|@SguD{iDsl<+(u>zg6C!BKYdK$+Ex4`oV-+z(^Ab$i7nut=U^pZFy$=;Ec7HpUM%bLCW9 zd-@N9jy5k0{ECEy4xQG24Ly~OyUjh#9y&1~V7ih-uAha3#$tJn1Yf>UUoq==sb>pI zNe1jp6dDOPC^#pbTB(pJ-7Ou5Nmw>UcNs&;FU*?~NBUuY_mVz4iD1L;8!j6!dmEF; z=59{c9Wjv6*b!vV9IB5rxxfh?k+poNVwB@e<93hZ5}v?I8h&RY{rf6k>nlCM z%S|lcbcLq>UfPS)um1J)DNBO(Ub0Y?Ss+tP>LqlC5-IF| zvsu0L7U#IdN`BJa^Jrl?ejEOQ5M@ML$#@JhxdU?$TaLWb$qZVGLTcV69sedK`iOT3 zT~JcpQre|QwGzUc^)vxIaA8H@jDP{bekyeW6m^a zEcKWy{N%$VPu>;=zwjxmt?=>_lOOpVn^)oRbnTwG#&5{o8$zpK3WuffV%&%KqLH|9 z0VLs`@d=dbi80`~+ofSJ!<8GKiQ-LpE)9KP1@O-lU}<$d3Bc3Jns?h6tJUs)yh^Xh z4ZWnYldaw!b_`8lysjH;-O3wtUaP|xJb)AQ1ja0Z*;xM_gCm%`*DFFf#Eq~s=a zRz!K%ytHAv?@-(jt>k`QIwQPLK>(kIWCHDuEW!KqIfhQAM6RSg_|16VYEnq~7<51E zU+0nWTz0tP23$fG9LT>fv*|2+BWfBCR*n8Yu6f3XwY(`S|2|lubxp2&< z8&bGr_6vlg8)Ib6Ce>kYnvwfbB^%|rXk5~hqwibudUnM8>Cw~c1TWh8XB8uRDSc&% z&q-SG-h7KB_;wG~xu!RKVHS(pAxqE4<`(9wsolT-q-?woX+lJ0JV!T`ckp}OUOsZ? zHX zLD+YlZHIBYQ$I|Evgk2(G1g$s#~9*$Xm(~TZ2*@q|P1;Yd#8V!5tI9}xto{+6}%0q|80B4P-6UgDpV0tScuWx3SZ6d{92R8P4chA$b%TB+7^9ZibZ^BWK^~Vs8aJX@s-)oTCMG z_zEzZF?D+?J`wTakz+lkc-4x_HEk#bGIl9EN}43C8$4Q#GaHA z2Os7vXe;n=5HSMV?|H}_H-?__~NvHtd0&Wav+S*<4j0T?XSUEMs`ex0>4 z2uWEky#B@~Lj@tOvq2?q`S%o&UL?s)y0`uq34fcp!El@pLv{2~d6M1-TjF3>OPwibaKjpT@u&px z$njHieiy39KE*gB`qSH-!t#SsZEN4h*|@#T+e`Ep+I}YZjV}Xl=KTYmgal_(M@_m~ zR-WJX!R-@2+ViM}@GBXWm6RVz4SCIV(}^XY@OcRT*Jz@Di5wleBSL@qV+DS|_;sf= z$5I3RdP=PO)wT`Mp8~v}M*~p{{9k#3-+X+=`#80K-Olwtm;Un*|9UU~ey0ES=PE^D zSs9F%=btzCzkb5(-|zK5|9waD5Uml(24Qjg*D(FJTmCN>{Jr}j`qxn*fBq6ihsb|! z_5c07???h2!zEvXXeWrH`iGmTVrzFHiTno>z{OE#Mcb z4HvZQ*t6bo*=9WRZHT3cN~CGH;((&SM+K?G#Kg%!Ox)Y_U--dq_%K$zPU6N5hR|&=;$ll+l`W_jXwb0Z=Ai6?z;3|lA(cYbD5uctpm2fGLap&p^ z`Jcn;{PWqL!w)8Y?o^0_Uo!sCssd-PXATR^vMrOZ`4U0-_2kC_`%byZUkyHQTIRBe za))O(A@P3s167y>_oJ8AjqiUL4USPMz#NR;ZB3KX10P-R3etz7JCEni1HjxXcgI(x>8!_KFJYvbgnLn8U^t4^)6V{WUlYfIMpHZijs2As^>b30LYINmI>k?W6uY*DI|N0A z;EHiw65trM|7z*3o*q*jVOX5DGwX4~E?as=Xr7 zoz{KL?Q3%%_#3<7XiF^*^Hp(A;th#T?7m@gew6e6LaCjW?}FLPvrD)2hxk%Z7PU&1 zx_~a6YsW*cx-qehk*q73ow5w{(IVo8#wP}SII`+ho@11o`+M?HpMSKf$c2cK()--< z)?X_3x3kIt`>53-JDdFijq?jMMuY@hM#HM3oF`0BIW2<-iZjMM8XY57SJyw$ZC6~S zdezA^zEw^A0YK>BP7ah=O?>XAwDSh^Yz~I=!rq?;&ZDo5t;*6 zZ0!5xdRXB?HY7V}1$B&>JmU6MW8Vw!MBx4Z2w4J3j1E$f+Z@La8CeM_$tq8ADS?4L zWTd(WXM93ZP9(A(2;JXiK<}$M!5Rk`fvFge8jkfv{&4qsjQ%JL^5Y_jEbKirDa_bg zk?MPKf#aQrS3JDp)r!h&E#-)bFnB%3($=-DvIbn4*5>l)Z6j}JIoQ!qWf6Jc9$(7l z;7+LXs+*Ge+!wFDywz7}+*^nopqBI3too2q@X6}E>wAVF88RK`Ptc`K z*%C%u&A1=K1G-fU&dva85$`V+&1b5!KLSdx!vu`m&ZQE-$(BP$*(01bx>q!?YV-ME z+c!Sk)3i1pMfa3YJ{Nf_CF6mUqe|+Xs{6uUo4j`95=$Dwt*i(}IrU=oMwMvC<#gsQ zR1al?eYT!?ztQg@WuCE*YB)2O(WiG_9NVcbZ_5Uo`sZoQl$)9W^5`lorINU`qnNNq zSJo}2bqe-!T{h+^SO=UmCJBNsnr6`+ch?Bxw#>^0@7f2#H_cAAhe21RbA=^EACN_u zuiOln?ZYS!rU47ZM&BfhKKb0 z;q(7mnVlbE=CUeSIihjeo#Y&`wd)EwMd2xgg90BI@#}iZ2g(1Pt}3TqtBl@w_K!M)PWN*$TO}4or2Hb8kBi`)x5< zH4W_f^PDM@gkUg;lXxktC4A5hNS~A_W^4cSwL?1=MY3SOS_ivIzNlUWP}Fz)xIQ-! zJLVd8r(!_FFM^jOoy=LJEqQ-J((KbCgU@sFs|wQnb#ORR&L{9JLTk4!SMxkUNGSt4m+^mfllsa3pC`<{7_`7Wh=`3#>R&%+ws$@?+o`&{JQ>F-=j z?t(hqdz*6CmKVR#bK7KtVPx!S0Cgj+;@(~Ycj}4LU`RN};yv)nR6c_$!2pvVwLq zs+iKwb|6W+Zjh2GjFR^iUKfI_L{^B*Nw+b#H&4&??2yEO51U6l`%gB+N} zZxhIzEh}3}Y@gN1BiT%q^w5qzNuMH8{k5$=bm(wTlY7qr1@&y_dX{g|Fm{7 zwhkH03a2t&YwLPGd!C@Dfeku6kd3*BGx408}R|n8T@kjNY^Fj~p<#s`P-kL{)*c z(5(U)y?LYggqqKW73jIiNf&78zgS7Lob?WVx~SxM9qTUfKrL#n(TobXHWn*k|NpS| zmQitaYZh(-!2<+OaDsbq_Yi`+I|K_(aCZU(2vQK--6_1#V4-k#_rl%b?tG_D-yVJX z{<}XJ18Oi}ueH}Z*E8pg$q-8Ae#2}~Gh40IfI?h-T19H-u|Mh5vtnWBeT(43wr{O| z_>MMHoPqnN?i)GJ|2%VKslGRYM4%8DBHi}=Ue5tlFv*t?rIZ=Ml}nujVX&mJ^VZ8c z$zij;=_;S}o&POxzm~!rqt9V>dOjsE4ZbhHw}vA}H5-59nqF^r{fS{q(t%#`+~lsY zCAQ?Xme}jTKdKt6`m>L7i1q6MTcI?xPe1LPj~pVbH7ZsKE7r>rQ5%s6Q+WbS2Y@&} zT|@ulvIM#O-9W|?=AH#L!YEAq$#|2E;@IllPM{zz`dg0a!=(251Ub#Hk!w4sTSK7( z11P0AqPvK^+$d;^KpF50*E@2sJAXWYS>H}`wIMvv^;Y>+>OtjXG>_`Cg0-4VNv3qx z8*B6R(9qf@=;3>-1)5amRcfL_FGyFrD*emL>W>$;WNB?NTl_I zil{r4WzYl;G!Qdv_-5?_H5WEI7H+6@vnC;yL z`VM|V86E;#H#l^u`iQ_`bJQ~$bdK6UiL0*>H!Tr|5mE6Mqn)D+?cC0JZ>=o9Gq_+Y z(gyOpuuM)|@!lbsSF7!3Emt#G*Y2e#)NQcMWo>_o7`A9i`NOh(1y;G<7!k?67I}zK z!Ij?9o)>xeb-DW6wh38J#PQ)Hp@1hRBm`Q(prNJVw4wujJOjuNO=9W0d~*_Bh1Dls z*GWt0z)(I6_BGJDBJj7k=k?>YGrLoqMZ@KZ*+FM`>K*@-1Q_SLY=Wd+>?a0Iei1sQ z;WS4?`BcvE|B3gy)~3D{F&0tTLhXf5APPU#X1DW~vhuPdmLM@km`!i*Grj$ePBbf* zv(wmo$InF?>?v}R99ufQ0sc)F=yW-+Yd`+oxQh;~iJ0yR{RioN#U0ti{ok|9f1jMA zMPKP?M5Hhhkl`rOm(g`pJVwkH@V<@nlt5wk-}o|FwO{G9@AV*IF1~CD1N|&mOh4Sz zBKdwF*Auzy#ruK3=odk{l=)66!hJ|;;w>J}&-38*oE$!yu$8Vi1UJU>9Kz~N-w88> zf>lX#NQnQCMEDHZfaxdlQ1#brfi86(F;@ZzphafMr$Ez*kEm}P%#95Q0APh5I|7{^Jn4F6PT z^G;J+S7KpKJ|&Sl-2xu+kN+Yofj=k}|3mp=vMkYR(`MIZg+$EN{8XoUyHS9HUUYu!m%U?1!Oi|H3 zJPFh*5PjVCh=E6zpHdw}RPEdH*6W(YZG5TynYj~vsF*fgJW1a@wsN*Oo(AWMvvoKk zA!ZgN&DrRul0k-eR&iWjWYyl~i0&1YKdp54eG`KMKi$@ri}o}-l!XdWH^ ze(i+1uOZKI^OPCMuxb0EAR(U(Z`3OP{tdq0&6K@2lmB&^u#eS#BJbcglGshbbA?M=mm9-hKw4dTYM&-D?;vSv950Df4 zvYAvTw4$+ykSBxs+RsP$Q=!lStLX!66OY6z%mrxOT0yGQ1`c+k;z~X~59pGD5mU9% z@M@?(d)!BUg~&*9oHZ5$ZQ1SN!s2V0|KPzqI1Ib#hj1D1CmU3GY!6pe%=Ai4DJvi9 z%NL5Sjn0STc{kI7kax7Fk2kWUnOGa}jdruOvFlo~(Wjg={GaHT^KPW?v2GH6AjQ7T z%-;yD72@1h=nH$ku;9X=An84$eZJ#aVH|nfkW3Nq%Q)6?GTAJ-ym`I&o?a9E^6~;k zWIt-#@bD`-Ran1Cxl7C94ho=cxW>DR|^#DrQS9a9MSKXTnW ztzI%3Vbt6c$V+%(sz6hxAkz~CLb{W--7EfZQaF0Khenc8+$*vsW?>q{onK#8n{ZcM zMz~aeY^UYhuDX1gpd86}!|zHQsK_XNBXK8Q{{n8y$*0%RApp!@>}~ev4V{V-%1;oRsA%HRE7I@dlqObmw(8P_H}M5Q657b^^Lo zFB^TU5ae~8JGC%#U%jqoXl@;fw=7>%d`I3XCD9ZHu8y-)#k zyUaAhNQ+&dQIJ6b!_BBiL4OWM7p#ogy%2>Lq_TOu9W4-^W|1AaL6=0 zL#Y(+(;yJQoZteC!z-WTY2~s?XXEw*!k%+7WcMlL!$>Bw>JSj15wiNPfOs3lcF3`T z%eB+?7H6U?ZQLKUgj19p8%s9W9`5tO{wLYXYIH*B8C z>G;_D&w|u77it9V(4=86x$_!7YiD#ue5JE5u`b#B2k`Cs2k`y)cNxZ}o{G*;s`>2? zIK?i5%_jVP`#kj^cFa@L)Ab9yx0%TtD2b{TU=S7Zd3pi!LQ2BS-TctlA9XYxg8_Bf zVP=aE8!l9tRr!~5Wofa3uL{K1xT*85-ID3Afs6X!7DJP7eJ!-x9 zBpBt9ns|dhd+Ehz)#|a<1TAbCyM~VmIdFO%xulg~pf&j=Bz_?tBq5hN2Q)O4*c7b+ zjSb$B$NRH*1o*$Xq;DtosHL=icEo+wWw#8GyS9^eutvaOl3m>*N#t`dLy#qDq@CEq z;S*L+n<0fp^ffEBHROD(9*e{y`S=tGRk)mbpezzf&~&-qw>s+W@wFTKiP#cAIE(!P zpOCz~&EHIpTzUZAd*p}^mHN+%1fHyQb6I9O$Rc)ve4SpO3q(h^ZMVf~F8!_EWJu1) zjBoC`&>m;g9v^4m(y%S#O8(+-T~@B@8|I~!I1PstQJY-3UZNVGgP;AJ)=Pq!Hsp0w zOP{u`DnFY=Bg`>X5F}VR_#hDk%)1aW<oaA&I zvL&)sFHO;JW#m6+Tfz5E>FCu)OItQEXjgpty`1K2)$8Ned?wgi&$5cEBrv2_MM(fj+Zf&;58uFz_5OZ9C zLdJTMdS|G3KC!Ay%}YpwWNA!pCZ=pZC?4ahK{FoVtzZ%x3RGvCY^GmLve z-lE(S5lO3Hwy??Al**C|DoU1vQL(Ti^7^@_Fp zGCoG+@uO{HARH1445ROXraUZ`@wO#~*A;Soqzr{+6kB}?n9!LVBKh9~VFy+ium++l zDD=MaVFlKV0212W#b5y&UzgNWQ?T2RsI7m=W^!vaan zEJk&2c4?sWto!G-_pW!^DCAF3G&~P`Zd83L8ot@WDV&K8F^J+nh`Zkgx?m*;aoK>B zHB|c(We&K*2p$H0SttoYR^UKt_kB+RCh(~7!P`3w%IALBFl%`r!T*MyVR;?^IT3sh zO>E>=3eq?3^yxHDBm{ouZ6UT=c2j#$P&^ag*SPGfS zLyYaQ7*QIu?d$lZQmw@Ga+T*KtYqgfbgEr!M{lU;zC8y#!t5y-l}7DMwfNv<-_tR1 zNl)xuRDB9Uz+Dca)Dc5{`Z3At0BI`;l+tG7EK_7mWkjtN_OVf7*4FUR~BR*=!@86pY$v7fsGxvZk1hA~6n_jh;ScQ9(_kXhDveWfN+4)fAgJ z9QSk_VfHgfm`muRs7Ec%y&X5kR#=!awN%#9X*{k)f$-QUIKa$1Q+XcEq7PJHzCXO&LbvYEs4_G;rkMCiZ! z3f)hZh&ph>Of)B_`<+-t{f5ap%Y7W!zVM8>2xj|?AE!dbwVL`+iJvT_-V%u_A-mwB z)_pKgQOQZr*V?_gU3!*uagl$NJ@+;8i_iIJxKC+9JvT;iL`H7gpU1*`yiq__s&M#( z)|08gDxEt~-={o7WX8ie%cyGHdPbuQBOe|JKE>Hn)7adsvCbagyOMKsWMeiDQE5CJ zo90jAW`*t4L<%yRearHauNsAuhON<%z%U3ojmUkDs{WK>qdvt$Wql+f&)&`bK6{a; z4+nE~p&W4+(R%Ixe^#k-@^V@vf1}s*6*gWM8R3yv2_KXFVe+<iRWq|MB?MQXqxg196~h0NB-8N&XbffW zudp~t6V~7C^qfWT>$llP)i4Ce78V^T!!qi1s#tze8r^@cH+bPAt)QmYqnCE(DJ(sXu$}C@o zDnX~%TfGjI`Hm7#oJxF#iHbg|b7s-Ym{v)0MM7e?AQWp12nj~Pt5-yPx2KqT4r);+oP zi`B9-u}7Wnk=U3-X{*Jv+bWi?!}trGvfLT>jCMSlJfKTId_#MTlqebLMXjumc~>N~ z|LA#lhD0BIo0KKF5P6j=d_M@boS(F^UH_0tU}S1yT2xUVy*+pv>ifKNZ?*gzfnj8f zQmQ0(;mQ(6A~7kF+g?MHkJw+B16<6y-uuz@8u9nhv*$JuRdHCVv-Nbj>MaI|pjOY4 zGQr`Ap+csx^OLqkX`E)oHrtQ>azTMhOpw*N-=n+o6Lf&3aAWpN{+ew68_$ly){%96 z!rReG5y-Ku+T&SGBuh3xW#Lb4@EQaYF)&--;lrlDg>gw0i_as18*(Y6tK6f&=S_7% zbCYhg&+7z|00cggs7a@(KpMZ@ZLm7wH@>$9+>wS;Q08T8$2Xo0u~mrcj1pZ>HDo}^ zPd~LhiFzZl7RXgNg=G+&2iB5)s@N=__Um{7Y7AXkwOwh`f69@z`rmWi2V`fRSMX?v z0sqbd(5Wxs)_fT`U{)q4O76k5Y(4O%kWgc0YV=Mhl`gZ~0vNEA9fe9fctrGksKE$5 zzgnG|I1Chl8kqalUGvwe2ukXyXJ7kc`aHe{f~wE8vPO`I+U6?j$%JgGFSK+(FJA} zg#5iXGl&%z&_-b;*roZ_kZ826>d>@~l4szmN_n1VX34^LVF|1AW!uSgf*)sd`lOa_ z;BmyA)GNDerf9y9w;Jx1fq3B#0lzvNL*H;PyhR^0%14|?^hviKTLnXn8w*;j#W!Zo)YFyK-QTFTRtm5~mbMW&uP9RW9)X968T)IXcdq0?ODu}#o936zM<1~G!~^qVRRdxvk|}6JcFe_r(XE1h6)=}F$IQ8 z9t6xf&A}$7CYFE!0Cvl0-*&_(u7hdS477tQyD*Q0FwqjEc?q}np9WDmi7k+Ro%K67}`8n|oM z!)3{Rz6!O*O|_#>uIZtTq?Qv%&+hFia@0tHEgJy+M-cWpod5Ty+;YxY)m1zFQOOeSpvAxXh^R&ww58Y!$W1ws#cMIg=!*DWao zRF@x|wk8SXN3b@IVX~~61mO1cuJ+Oco`pY79L4`}AB7k&94N|&M*N}8d4OR{7$tIf z2~szHss)>_@efG-&I%tQkHM;OE%$}Ts->g)$9c(y5AGz>l-YVEiKk~pks-~y39hlK z?+|O-B1S^WEC1$T&{0u^s>PO=o*>=#I_-1(tPUcY)QX)}cSHgqmGb5a_FctNHJ=%W zLx&dyQuQ(Px!^UiRP~VE@v0{pltc@7YeTDpXsYvv=_1S;zEInw=Smw?_;%g?B24{= z<0R#@n242j^QL^ZIy&NtTU)8tSHRh;gZ;G(Mz;oR1p{6V8$j_89}|H}j(B?TXd zF9jn}3X}LxGT2`qij!*19v~N~do~J6$fX?)RfB$3{8$=eo~wy7X_lIB@4XAAP^C}3 zKTOdN@kOP{n|hT$|ANKM&5Vs=TUjcrx3{1NjcGR|$HnI5=?!> zjVIOIV|9mDV>3mP`aUS*oxiY(F)mGDip~Xx1tBu7x1A40j(j^P@`MdIvSYi)x_7Sk zT553cm%RK?qLO8ekn+M5Ij6)nOz{EzI~Cx&>=R+ zT|W$iz0$klbb4#o)0fMO`*7_oyXsu}s1;08o6{s6 ze-NHs_EVL_qD(XECMIzn6Lm)zC}YDlwI|X5^ ze4mcL9%w}NoAznXeRZjDJWNV0g|Rtl$uhAwoM8{h)S4E^<+J_R(O52j+`c~^ldWu7 zR4G97=#`Cr%%!%NuPHZMBpp9!9y*(yDpqYmK6o6AWPPL8+vAYFp(S?zZ*&>jSx3WM zsS|zavtJ#1?h&z27O|C!q^l|Vt~MCYUs3S7Ovc=$l`hsIy+KvTnV-1zx1x$+0&TI> z2fZV&U9m4>QRqYGv1m;to2NSgQlteTvPm_8)dKRyrMwp-g9fcdzx0iC`q6!}@377Q zG1Trp6*ObCGwvucPCCt=)+>^Z?-awxd4^t z^y_6w$I$;{jpn`xV40Knv{2D$0fZSq;h3;1&8WqV%3kx#b|8Eb)KwQsq1?7{$`hrt62+;(${Wp8R@oi9Waqzk#>ygx%2upvLQv+LSfzegC z&E-@OK_gV_6u*VJxyTo{56a!B8g!YUZZ!#nVZL?Er*gGM?ol(doQdZ$v(dD=zr+%_TkC7*7p;CN z8*1a)7ji>W*CNo)fdh*hNR+N(pB+lXw$1;`xz>!Mp%s~JnL+b)ssw}T`hbA&gWbfV ze@1=$fea(FSIbC#e`!{3jZ>?@KmN6(U1@Sd1UF|M#x5h#%HApkzQ8T-Ky~0Wsw(N zS#of2zjGI2{R}hSEOC+Oh;#Xiv7TK=YvM;$Z9jBn9@(crbZB|p{*On9@=ZV%D)D5U z-zS#iJF>jM^sYx-*Xv{J=vW*SrYx#ozmHmSW_{*0t=kVLBfDceb}awkpr+ZH30U1G zPDx3)2zZl(nPHkYeDg_|!s_)$kd#HKxCRJ^_fap(tltb!;U2E9?u04Q6Gh;xaTqwa z-JLP+QW<=o;5@Q+!XYFd-Dv}n|yy7_#lYqUM)D8(V zvKrGy#y8>t?>8l9`YBk?1!A(aeu^QKC#Zod^twK}AgCYLZRM{ySF`A?1lNMsCdfx) zvHZo)``d*}n6x`B_+qjZ+fUpW=;(I;*bo7@=YP!2F<8i3WKrCQbK(%_wF+q;g><+q z)xqwHlzf0s>!t*~Ew7#Ud)+-F%8M5H$f&bkS$}y0#DEW2qQqtT!~zzw{a4=^?7O~< zw79;aR5CD7xDm+tPRrZKdJjvTW7t-DZV}unIsRUfo6r6i7sdOwb83-!qy{BYd&%;8 z!(?MO=i4wff1e7yFaVtLysu~x3r~M1;@3S00p-UP$?`1p9jlsqCt0h3&i~fg z{)Nc?{W*R@I6#XV)9F6>Kj5hU;N|}L9Xga_{FPUUFW(!z_+Ph4|KDF`?Q1t*t;j@43g zHD}JA-fkrzMuCL?ZHvgA!3SC_$&MdLN2~G*3I)xfo+Xv#IPGtTy{U#5gMvxYMKDV& zYXBfv;9#NtNx%9Y2Cr2IX& zmGirK4F+ z6+GD1QV>UY)nGt}dK(uR_Q>HxFY=Atr@;{mqK+1X_+W1`ZBz6}+4e=RpM+X`%uHuJ@bT z(GmnQ(Xx3L-5IznkTqCEx zxD8y-)%SWcaG+V6u;?~NJVz1jz&kZvGcLT^^!=|5qY-Bmeyisl0q|<-hC_`9*^fw9 zr8R9yA|Q;aGJMS=RV~R9dwr*|9?p$T)m_8N_D^8*ksK+q2Q*{Hn=8Yo-}S>Jmj!jC z09F)Y%?)wgpQn=Ij$B^?=t2U$anGx~_Qh-3`6?^QrRP=nihbO0g87JVRPHs~b7QyQ zGeX#+P7DzGFs?81m~43V-<;}*=V?%BMK(~DJO&_Wnaj_sBGXcvA$r=kXoc;!Y`E(= zlvlD?FI((x}}RmyAHlOwfEd`JTiFP|a=c-V8&bIY1C@Z1c%=zqNh46{LzE zh#BVJ3XU%l@gtv^nwfyrS?>ucge{=Hjj^wlYU1PLyXaeaNYp6PmX8sB zaURy35!0O#6QnB^18 z-UynsOAEb~fZsyPQUGIct^~qsC~>)&K^NbvZPC z_`clW;04cndZhGsNe(GMJ=A@AF=@IOAStTu1#h%T(Gq(R+Y|o}GWdhrN4}Hw`;p4H zAp%ONA6XmIL;3bVvc|A8qsVF4P zsKG?=TtUL?@J2qJGd%RP!ALQ~B0FKO#RaXk&l-InkbET5+@|@yqW`XF(LKK5o0^;~ zl2YPb^0`lk5nIO`pQQ<=nqlncXv7tO+bD(H7)So1!CgN8i3Xpt{WluCvT|l?ZMo1! z(&lWC@p||`F*$sX0-Of)svR=3Vk8$ZV?s{icyTqAmmc^Qxa^de%S z^ZOrr#|?U2pL(fb5?@gbS(oZl0RC%_Mpt=$m$XdlyS&QQcZ1#4@ZVOLOfoNnYEqm4 zt~g%c$6el+VqxVW=&#LR9}~k^%2o#pnnEHIrjXf5lH)7C)UB=hlB!`S*<4!^CX7|8 zKmRP)Pg)K;Q~73dt6%GZaLBN-;l<75^#T3!Nd6ss6qAL1vEfMDWGWuN(!4f`%xw5$ zuhKQkQc)ZpPj%T~rw0Nf_JH;t{QMu(h2{QMOQlLdI83_I#+>dH`P32cEzN`TwQ8joRwDA`a;qj0=$EowQ#S>lLfrp zfMuG;cewi?y-%hUqowgilv^ymH>wa&Go}=$qR5}sHPlOJr-sO+r@z>ON=2Ul5!Yi` z`JNi)P8gH=Zf~!)AuqU@hg*r?uRFtSTbtAQ&l}IxO3Vd@LkZ(tf(91;Yr)@P47))w z7WFEtKi+X9`Z`Sv!31&=K|IAakB$#)edAeA@Vo6mwcJlACF?w}H23cK>QfcmUlCvG z6~{=4g1ps{X;?|>eHM5XkxDBue>atzL9 z-koP~8pCa!zmFmj>j#`a;~rIBF{Bq8MD|zAyGGeGvE`3pnEi#?Ez0zh=C}f+RM+F~ z*R1#yk@YsGs;eKtQ=8H&0vWEyOA{5SX*eFSocs(C27deUgrk-1>v-Sp!Z<{749Z4d z9-jn{V&U?}c^Hn<9LCbbIgKVeIxgUoxGuYC$6l35NhKu#SMd{-$9D+!GLf2SyWsZw z#BG*e$6*d$>pOj)S0#zspW(n=mnUmodsn)CC8V7k=-e=4Q~0M0FRT^CCz;TzuZGf) zVwn>^v?Qi%4>W>ZS(nN4N@wBcPLQTDhN;rXve1dpuP!3YFmzIJ2@jIPhU@3pdixcH zW1kzcy5cdNQS=!-SvJ9E9TaHhj63{#Fq^3*`EzvZ_8FrkFJmZxFKCM(By*;7+4pU6&gslAxZ-uXWWNz*uFd*9)hzT?8j{C)wQy4NDvXUGaBuP@pw)w-K;BZY(Mm8c2#y<6AHYE=wV{!r*tJraqc>0iqs@mJ9 zZJ!0J$0K<)g8{T(f~y79T!XbQaV#yT$wd$fIn%d|I=8nDT1HkRQJEY#CBYA(#^8{0 z9}PR*Qv6w(Ycko^cu(cmy01I2k#Pqiy67MKlu2}t1{u;y%UVz137stKumjog{fbZf zIm6GByL~XGEt;R27pr3Qwx=}pif^Cx8r}2=aM)@PV;QZ^RrTlP5?6p zY^rq1ZhNgORCO)%cP{p-36uBf`?uViU(Vs)(j^i!7_r3xnTZwGH8o|FKff#=BYTtD zkw(+#MJIT)3+<1^ZXq1`AK%R#%6Ndo`U?IN-o@Gtr zHCi96vc&3T>yxO&fRJy2zKDlvjt`5i@-n$pHG^6KpjKWqk;ER1yqF`$=Ew zw@(W62Kk|~{BR91?q8_-hsp(mY0jpLUDB-qR1G6&M2LWZrJ?#e^(v4oC!>IIMTK;V zLg9+4av9U$NyZbn`8Qw>P{tR7@R9iD`M2xi$@@yw*-%uzJotn;En5X0t0~~TUkp1) zs8f<-UTW6a5^6qnn1N7pJqPqjb^1iKF%WMMS4#woXjj_Qs1V=U^4iX6 zzg>3Z6%)$a$dk~3RnISWc)NbaUZ=G()E5bF*{xNO;(0{(2vFQ_2-JTXzxufu@{oTC z+NPD?QL9KjrfJ|^SR)+slKP&OV|%*uaokzqRx6WIv9gy-9Vv|M5ORCIC*X?y^>JZ< zvriDSK))OgDFw`T#xH6Ga8qI&&qDa+NK@Rry*Gho4Ctk zOY2)NWk~28MencbeBc#~niw|xpVP)0)34Ld(CCCWsn6EQ+9)PIu2VqO&BFe)2sxs3 zY7}~JCJ#0qBpC$Yp?F}OZY%pJektu%(@-G(68XK*@CH(T_NiM@^nJq$H8$VL-94+n z*-E}f*~rt8Kz>Un{GcXLpbfu+0O<`Xs>}Lln?Zf5vvAnLNYnW?!xp;MO5e9RhUJ8p zfvHVZ-~FC{liU-4+cYXEYvB{j%0H)6OabC|m413~WM$CcA^`b6Z0OlpsVt3tdBnC9 z3XFb1pzw}(ftoKg$oN|Z(rn!%{_-5=jE%=Z3Vmc|<#Kh#5>K5LPF9K%4Z87+)xN8J zo{GV%Bo$89f|voKVyHh}cgGJwvr9N0QKBncEG8aKzSA)0+;3s3dSEAn83#OzI<*d= z(b*O(V%>rO)Y&^YZ;Sjd43Y$T%C_{&1>=_9RawBKqZsu=5>sQ9$}tUxWTS8xI}?%3 z5fOY}PF3#&=jveA;3}1ESVqQksV=AWB)jPp_AT|;Lep@dq2iQzRf;Y2(Q8Y^T&3Bm z5`nrKO0r)xjop{eyrrXclCJRXL-bNVXgo2_mNW1A1(`2`jai+~3X37XmDehQcbPej znMx(qvT`?6l8#h}l62CO){L(77xuQm+n(wkS!`Y1!%}8V993N)F3iu615;@o^!Z8*=Jv_A}@qOPu?x0_M|Zz5Pa%Q@scD-E?|B8h9T8(^{_opG zQk=PvXY zJm}JTww#0&8arZqNLf|fPGk!0lQ)}~ggFY?gVhUl1rrDY!~ZL2d*O@3 z>_?K|5VBYJJrABsQ!_W?SAbDq_m^c!<#|jsLD|wpB!_!ir~pA@06c~Hi-&h2fayVA zI@oE;RnP@yFCQZC%nA}okDuVpn^+6mN#+>NvO3s*7XhAc=3hQh%InqY=12WR$=3_- z>+Nr_L-HlngO>2K4hsz4Z^~3EszA_ARSUO+OG3<;qICc*D<$uSufB4)>h`UifnHr@ zR(m!t&v{tt)z86+=J{+GUIi0XMQSt4`Z{O472AFEYxt zb+IR*>Ch70^YDnR&;ssaE=;i>8$<9N_XXaK-I>mM@x!Il#|6~~5$EPl!htSBnE-|g zn{!Db_`-vEWx}Z3nAW*?qkg{rLsdI=omH)A+%^6W_=_H+mBw&~NdxOOo4n8cg?hW6 zL+gl10A;cW)mB6wBf)vwm$b=CG~%$pA^XoO6kA#Ft2x!LT@wDt7F}O*Xr2qVQ_*X_ zFKqTMZuuwvdvr^HgrpBWyXE8QU01nCb4dHXE;pg>Kn90fg-YJO7eGVqQAybp33>It z2H|QX-K9?5RWLpTpO+BveV@F}tcwoV;a5PLrZz_so}4+H1+pzCY(l@)rj8t*bvD2A zE%ir5ot~q{TJeg~kBEND{e#`h%we!ab8Un^UbPbKMxr_8j^AKaNHm=I zoGL14f-)4CW5h-~j<#vOrQk+`YrrO$kR;h0WxaL=1JN+-kGg{07vFQJhV7+p?aWSt zTd)XTRLfSKU_b$Pv*~@b(likQgj)D-ht?drIN!tC&9eEAe!+c>+PExw1&kO+}=q-+6ov|fz*uDLD<@B zFY}RM{PiQq;)CxFn`P%o|G4Ao{kbe<{LI@sAw8)y8)%e?CIF`|4=)I4Zul| zBwW8>TCCA~bP#K{OoZ5#{Rw_wg{1rH(gcBGu^j+*mZ8`hqKMB}b;fS7Fp2NC6VqCn zJLw+v>=LD1SZXrB19#ptQ78#y|ptGE8*RgMVAq3SmTWZuX|IQjW zaYL5y^TB*P?R?fi+kSL9x1?Gj>ZAe9kKFwYg+_lDwY}=V_h4$qcwh9%=n`VzE^!+2 zOAti-lL#%&ZN-sx>G6IAd(rikN|ER1Gl9!I)m`?AumZra@y)2d|4xOu&8NE{J%y^V z8*F$CIc}7d7lxun5=f}J6NnjNyw#h3FfC(@F)k9I%Oo%>=gkZeEoc$FuiFY^q3Jbr z^x92-6lx?(_nV$m>lF~IGKC9HM?E3p|FTN%5*6P1*GWPlx1&XpLUmX$0W+mb%iof5 znCr7E95zapIZjq!)HKHFdf5YqU&6XMOWv_}c)c=Xa~loplPXxhUQ7Vm&T_V>0$)~T4H7Vcu-8AJx)!5{ zwGG=>8-fXD9)#DKMt?Wj&lj@ci?R!vrtCjN*w+k&7&+pr&`>Y<7^?`ijuzv(M|>Dq zFZ?5BzhXQ8KBXP^_kHw!B)*PwCtlW_sgWSO4y+@}5L0&8yCo=cB%b~?`zn4QQW|Hx z?|cG?_E4)>tWC{p4-i3--oo^d?xo1p5Dru{zcE+_WXPWC9I_2*Ip-fL-IV&| zVr<@B*Okg5zeUzN=2BA^WOP^-DHcyBGK-rqS8^ck4FV>TMF^wY_Noc=J&NOP0WN^z zU^lNhzM@p&{Nyk}8s{;uvtx7#ibfiDo@RF>Ez{#!kylAO+$^z9Du`lCskDqPXX|wZr}P|6Tx_G9WcIa(bAq@FV~2s9_52Nx^K#O582?<9gzI@~g`ZSH zP_%z!FYT-{18e7U!?GL4u`G}WP4E5_8kuLVBYuBA1dkeE`i zsIum1dj5$#O%@jEtU@@H8uI2Eai4{3@L}|di|0mqN4=Q%+8CV6SqPuLfKyTm6bqA0#)%uTm z+l(Z!aIyKUv`gBQ2rUt!YlV!&UTvX&u{<26Df72pNnkP@V^d~>{{G|Ewf$;K+_!8p z=A86bhOF*f$o!ptxmyyZQ7P-6TAD_#QqyaX04iyu5Oojz{B@j&PMfP}c(h6`y+V0C z9>z6Nrsb^0m$ppHIg90coH_w>=^*F6Eye7kmY^=nKXm>)y`%OYwdFnE9 zy<>Kw-~vq7Xc9~Z3#Bed_{x|47^8&V$dG?!dsn2OTZzrL(ToIYJAj^xvn{Wdh={%f+N|52(|Xh1YyWmMdw(P?2kv<;OOo zQ1mLwvWc}k7shcO;VjIJ1~eXxm<=kyY-@IX;_AP2F&qinLy#~1z)XvcujoV;J= zXrc6-i!f{1f6@uz2u!j@h#n@6yt+a5x+tbC4s^2>aXSOe*^tJe-{-6OvME~%6M+X&fGQOLx zepjnu_jyyYb@9Ql$n~3Q#W07OkTYQ+Wm&ba+e$0L+}0wJ;)X9y#Nr=Nut?!U-l5k| zJU4?Y*@g|g4`@?ugcFRcZECkwGc&v_`qhh`dwYENDq^V(w(2YztBP+g4kAY;Fe}d7e)YC~@zloM$e*S*4WzY|a&pbh(Hl<}X zAWrcSQ)m~=ApcqJ0zCdvM;-{VM{1>5RbQ68>MzXPm0EYI1Z+K*L0Hp;F(T&)^8rg? z1Omr(Hr}EIDi2hL`=)V<;{B1eK3#kE3g=-9S5?RD@+++4cRCYuJ3AtZl&zflbZIV4 z3M=w<4vA0@%R;HC*Pr49H()!tkL65Hkjnc;_^-Wkr0p&c8ylynpt;wz6JYZ z%bH9~&VE3d0jS1{=h^jf(&Uo4xRRB!6NQOX)EW&{$Ae_7Qug#4M8`KdKVI8V{TR%|5pa1J=UqG>c3U8iq<+xJr925=3c$1_aEQ{BmejAK?QiNf^nxtcf>HU1QiBn?W5Y8Us=Rjb~qOzBFSf6>T zx#|B+HK*WN0>s#pAInT&)aHXeup4r~VrKx3nVOg@C*7qS2rOz|>mbv+W!A42uRAO* zs}1%N0WyJiesE0glX2>5N^eQFPX68A;uH4=MzTrUoUwSZ`ax(<;#;(~hCm*_+EMvQ z=vTw^qiTyx4SdB}E*>8!u9>Ynfv(~%`4E4)rqm;el7X_=O8YuP9S5dHg7tKT*|k)8 z-)ox_N5W$?t?=;bSAHnr4PdmEN~<0yUhtBQ7ws>O^1Q zoM3BBQj#D<@xzXL(H~@-OJmhEy9!5;JX-~qpJuQ+al4yK z@E}-M8QJoAL34gZ{QR}%pw*LTYioU&{`1rdE1{J1^QhI~89232a!=1;+N{}!N7_-i zo1H$EQNO~l?_+;)f*-x_Pt|`8nP){4d{Nfp9_l+G8($5IATu@n(7V?=*CH2L5fUhx zz4~(qgnBl=`AN>ZJ)bga848!SJL?520*p?Bh9vDzc&=oYSDoSPD*{|DVk+*1mulUO zXMblC`7ub4J;&Fk5_n9P0iR!$24_|3VPilf?EGuKBbYDCa)J9&kKeDTyAwzSt}DQa z(wrvH8&3a!7<&u1rsMbDUkNDzL10QsNrM4WgIP!kDBTE%ba!opv~(#gf`GI%8;nk= zA2c8`ews%K{2dDZIo5f#b(4q!*15bg^@b7LqEwszMS> zLX{_ot6>0h#=`69u%I#n^jOp#nsENRP$0+x;@C%Hn4rrXPRad0)O0X%}-%s=RL31dRpXL0A`GbgrDsZ)EL`!!*%Ob zO}VL;X6xTe!X1P1Z_`m!=oYOYC?=N(v?<~pa%b_sWksgpGH>sOortF)0N1>DW@X*e zkTWZkZkr4v>CTJYcD8Z3cSo9)T5qhiBn|j0;rESqMJ%#jV7H(Y)djybSq@?E?4Fw@ z(D;?%me&E=T8bC2kLf}iFak51mU5#n(d&b(bl&EjRDH z+rH@-CHc;+g`8M;gQL{4WVK9Su1BejW3%)P#_Fj`xUHz5;{$tP^c&^c5<5Dc*9EV> zol|e=z&7;)gFKj8`)G7&UzisCn@xm?F$(7FvI4!mPdzH?i;%7WqJbNHSHwa|4#?Cq3}X9c8RrNhz8 z_G{>-(E_y^MVk5$Ha%^&>+lgGl*8}`_M7@m&kQ4q=w%cp;yt&m0lwbYO0zwdjwrBd zJf8{v0QEaMlPW&x8%>23>KMJTCJj`*g@m11~5Q z2%;RX_(J(1gk9E*)6-h%Bsk$#U&CF=a!)I?Fso zVXB*$_y49MT&LdFCe}tjYQiS=l;8ppDJCD0uOTD;#FzUdh&%4gxEz_ilZfxn2T)Q9 zDC;X>*9H2lYW)c9~ilHh;C?f3~%+ z*6yLN?3I+ahniogZDC}4j)`r!+ovng(3_p=%qw1R{XtqFkUP(4fv%DwP|x*KNE3&U zc(!L+%`)2#uSRz~ML)0CA~8f{TOb}J4duXjXIkzyk6r{~jHfyEug8{e;t3TunS_u% zd(YEU^~+{F6X`IT9?`&@@dp3ZC5)@LsG3c2)ceJhz2Ph4y{|gdyANObbuT$mIy(F^ zmm4QTG}uT8jFplz*LmNWcSw^p2mSCdpESTB^6|kGr5E5ii+-f?(u?@ZW|f&P#;|$I$*MRHw|+G#WJB`Z zO;_Yfi|y$^w6Y+6sYcZUYtA>JsG5(bRYq1XLsEPboZD@io$g=V1tLcVa-4WzBL5XR z0^TS-e^XtqaYFw+k|! zpdbz8{etDcIYw51V@&@)IYuWxN?xb4eGXBx?rK}KHnB>b@@BV8`Niq`PY+^W+0m=N z!fvZ;88R40Sk^fQ8sYzoV^kR?$d~y)IL3{_Aa>DkAqjq);=->0$N0#r&4#!ytf(YM zS=6F}o3`*X`m6VMI&G&Aky4_$HyJDf76q1-o$pU%HeEByKY^@U2p{L(Ni6)j+%3W9 z$T8nt6GNGtwPdHrrBaamS#hiHorYsU;X-swoDILargta@L3+sX@UY7G)O1i|D0lS3 zts7&SIupA0>%$LUI+GkLHpv@OJsPogXPffdspC9}n(-c-iQ$ z8e6T$*%hbt+FnbcNsxNOVBCie@)sGjO^KLxHCZaYfTq4E4I?o?%hB}r6$aSK96IV5 zt*KlP>vR3hslQOH+|K*5Jpa-Q?0$`15h<}MRr+jqaOClh*3?UHLZ7P;*n4|-%6<=(_OsVtLAu8(oo}7M}>clfM*2oM9 zVgV?miV(V+tfl@aW#;^|ekZ%y}?aaLLP^S@W&sS!= zqsku7g~RWD6CdjN_wN=yItf1FiOe85 z-m%y9o4r$gtNXlSI?RQHoOzqOozJ9v=)f^LDQTrbM@yl{|8rS-Yt#89)!gylO z*}UsVK4s}{Zgzxuh|JY_X1~iYFF5dGpoEC*{}OIjq>Agp;UGq3smp*f78bw}wBFnR zx!hhd4G#0tSvZ+(uB=W^mEyEt@r&`j>-}o0lDVJCb=^M}JjpA32PGPUn!=0M*~avl zIVu9~nnuun?%!|bq8<*#W1U!b^FMkAvU=={Yl<=PObN*PKh z)<`F1lS)C)?I))@l*0Y1qJj%C6>@>Mn|CXsde|&=?Yuj#> zDCjWeA!0tlg5t45PRVc11N&01YT=JFfCZx`D~|d3mv7ed&{vyNFZ1~Ff~_#F;LMDS z8?`--x8XWBOi3OyFys>{)0|w8Wd5ND-z6@-4p5P&>L>%86huzh=Mxs%tV~~gj}-XX zA33{Nij$YNNhQ^NCs#tWHQaQuq?~NajYwoL^ol-eUV+Mz+(QOHSf4AWx|-}hH=b4r zs;agsulH+jZrL*pU^_aAE5%SxS+{ygG`d39crK4MV}uOpf^q`TK&GDK6d z&#bM$(c@oIZAAnBM#kSxQ+2Cs^!eRb8Dkb_>K(V|C=aF<@<_U)8l(1QF*DUmtfS{> zNrS5<>!dXcMMF*c#GWJr>(;;BUhO9L!7d~{Td?-sZ(5IGIn+y!%H2sw)_W$JiXLw< zcwk4Yb6oXo!a|H)$CEZhq|4wE8rrQp8_fX`IZ>Peay5D9OZ}He<`K0dCvF@fhwgpd zn@=JA$xDLC0)MJRDR1BT>s>){61dY%6_t%@1SpI zq0AS1LE}>ylBLkiE^sdNo~q>H)R~n+Sar2a*?4x%9|`>POEo5qW9?)!J(da1oOqUk z&8SHkP?uf_y$rs?yn5ux8|*P+TnCA9Gpw{p_5oLzKn~H^r1Af3cMZvLODOCbi$HgQ z;HRZeB+A#_?J`qW);-UCti=Ig$KlvMblp?vx~$>kVqeSS8hR=>pDf-}uY;T8hjnbs z^%+y<0udY4$IkOAS=)2=lesK@k)5mR+7BPZff;Z$7UAgrao=Pyt?Ew`w-{T-p9b%r z7uT~UBhuN*nx!g;JHy-9neB?Q(DJLJF?ch2+(RMB&a@CIlP1?D7on6tQ3DdOJe_UX zYa~j^Z_9LuCmc;I+J#}4v)kcvE?bE}b`(+3{7}kW-GK74a;@_ARR%-D!%g+V-I;2* z0$+FN8uf>eE_VH#52J3bT65=WJ)bCKSD+yn?1H~a(5VO zyR<&Ba~oRxd=wg@LttN~av+M+s$j(ME9=?l!-@Q{{KBr>ic1_&O@ud@V{8K2vnuj(Pg|>-&*HH63l? zNA`0}{*R)L9E8ue!NKMM4|s)_hD4LcUBT(6Q6OM2zj%PGn|rg*ApD>n3&5}?eP?66 z<-Wp8kS+4{@$C7xnBRIfrm+Omo$wKqcf1>2H<50$8Sy!je+J_3tAy-Nv^zExgjAIt z(9>(Es?+=NsN_fR$fz;-Bvp*ZXS0$t9_A0uxJ*xhPQX<*&1EvxOYpPL_3rtEYjcr} zCo6+?^IGRPpv>{5pv?*{Gg1qDZ-nX9ZJJhGPZAdlrVmIYsFbimrcNCR6|Dq`B(m}ea=CAZFK7X{a zH(BRu7C*Y$SW~WwF+d)8BiiJz31RTClv2YNQB#yAPmYiGwz6o+?FIXEgQGVZ7uqjR z$hr&rx20>v&rCxfWCEsF6rcT`Pe5_}zY0s<{f>MV>P|_wdy>Q20yHU{XUuNib|!n2 z>i5Tm5|45aNAGGJbgv)te$jVlIhs@sOj7brDJ^j8$-^)sp5^l+gW5=<2+RxCI@zyJLZEKj z?)>-Mw8I3aD_u#tqnU=JcCeqZ348XHg&8I_?Q8{?OxaOW526Hy2+Y^uXyMZ4&<47Q%;v^f*8 zvR8K@d7(lbRF}OkNhNqCT51eWnY&#!?^RR|oyXRPi$7>JVp=<=7=i~plrFj3u!ylR zc3?Xivw4M=a)=k*lc+&JQwF}@RE1zaCU=%d21Wxhr3c=wHRY*5 zvZkE05@z2mrnIE#xnaj|7b?dmRVc<34W84=pEc6*TWiH4@Ru@aUAB-D75o1LrRd`t zdaJ}rWgYWcT7yHxtcxw>(9uK6T~ciBul?>5MGs**H-xt9ep@^z6Iw!a z9WcH?ORh`O(J|?fRVIM0;kVNjteJ^q5{|TzY0NhBert7%C+wmh#albup9o+99RX^e>oK?0KJvI2LO&OOi~GH#3(RMSaPYDJeBY42;3Cn^$h*Zal=_#^hHCR zw_5vB6T&bv_`K5G=3%t(yQ*crP70yYs*NZUT~^}MY!D&Feo8f z{fmfJ_8Ngyn9~fsn;!|`EPNoKcDcxU=jGds($-0(dY!(Vbp|`*U$d>zW+g8a>dd;Q z9}y?|+_|7us+lVhoz`nM=+yr$o0wrP{%F?h?b^b3sXJL{-;;o@wr74u24tVw9&BC$ zLDMN^K%iR%hMTgJk7K+v!X}LfJd&TkCqThvHa7_;ioHmBeyBm@ zQmZzz7GH%Hik>)<;AYd|ADSCe#t9?1PmA83LjIm!$+P{uK3-;eX+ap3zqL=t)k%DF zEX(IgvT;uy@{`V(G-UqN5rB;9rnVT!GpiTMfRRfJxwVF*$k$>5$kyNVZioj&V)p{O@=g;rRkTOpblXe&W zr(MAQLH|2SSBkqcZu;v={(*(-m=&~)Tozlt2>cDiC)!vje9CiKO$LI(ZR|3pbqe;1 zO*kO`L<=CLD*mg=r*6XwaqcXAlU7z7)QqoR zkx>1fBafC_KNe{^`o&maY<8S#z14OLz9a%)c$v(~R<8y=kBzg)PxUyrnHCkcEF^yy z-6?vfYfGd+C4cILGPC>Blk7x8teeAjs9VC-&;3N!|9S~}<5{WtyeS1nLS1IQQ0S-T zUR7bltINAyv6)VhaNje0t}3;v>5xdNG$Q;B^g$-X1995LG)A3wbV2BH(=q95D?^0% z<;tz!jkL-C)U$x2Qf*vyc^;Bor1v7wE#WBc$i&(||5R~QvHuW{=4zEpGc;YiIIg=4 zEjoPEdQ)mHqIKBj6qUu_r$^)q9s|&I>SOFUg@v zj0@6!lwFz2obJxi8-eVpR-M=#fXxgb)zhoW>Q88s>6Rz+{p>I+xc z@gf7=#@cD!40*M(aOc$Qjg6GbmUxzSC2`NRgu%l4?(o3f9$g6x6K_Sm+*AQ&LjO zeY-`w=EDoiK#&e=_AMDvDQ+itIX9-ktWsVD=l=du!#aYu?ckaCPSIT+KulS}RHDrm zjgp#GSRw`!F?OurI+G2n1=(SIB^O1j=-3et&ey);)BXh>QS=xq3+%bElVq$XYpd4m zTjM+ap~+_2ksw&(?MlQt$PC;CXLYb^=uJ*S4kDg{$dv?N&K~aub4a?|%)jwF|0@z- zo;WMcKe6~`^I7SXdPSTO_a|NfUfu4kr7Ko@KjLv>J3Z3k$V|j}ebHyMy3tfFwNl#v z!WRVb5L28{Dd?uDU)lW{!uBBlT#0Ery!I!3HpNV7(d9t#zvAhgPPMy3o#EJExN^V% zX=R*k!xtUt7+5gzVaSH236f=ibC){B--B8fmLZY7H!kG z`j@`#aAF;D;g;Y`3D7Fb)w*d;??JfiTb9->faEoG;V%pzf_z;58(Y$;Fe*7fVcad2 zRNhSv$2}B)sjhCg?TS!K-e_g?XEPvqlQ7|ZM_9kHN}Uu*>A5{EH!ksL9Kj7FT}x+~ zXDm4;#azJ6AddIi{qAo|>rXrIs&Nd(_<8%SOToPUluspgKSHulxeR|~HgAzr|1`To zeA&C``P*@|D?F;+r19^opi0$c3t-QkxYJGOyj#H9Wrm-m-eW_q&d-^sWwTD+`&3uP z*+N}^(j@!$FAgGbJ7F)M2^M88eCwfmjeB*XIDL2HvyVasKEl=jB$e4!`4!one$P1b z;+8+-!T6ot;E7W(-oI}1dfA=8>tzEr#e`31SW4zuQ#7MknyTp#@#OFg+X~3_;Z93h zu4;$8wN$?VULz2^`6rj(DK1B6f?)1&UWZsZ?3{0FXP)Ux{Ew%*`=s2h5+0c3j-AnF z{Zo~I6^>$^|AKQ{0q9{}*YuqPfU{QSu*7JqKUrAJ+Kj(+3 z@aF0nv-H7HjvsWsX~{;(dzK+=X5nE_yLKUM5Fu>h&^(lu|M)HYtw18q z2P_kR5NQjcPdcOSJlao}d3y^K93?|QR^N|An)2NSwWR&@ySm>>U-6ycOF#~~nxG&^ zW|aGT+$?e^F={2ue77Qy)2e*srYKY;S^Yl7Ys$#nM|b(Y@_TeZ)j zjeF^J;?Gg+vbWu4(rKtsx^;K68UKm|m!9?ZSj<(E=F)-E+o2>>=w)`Dr2q@BuBH7~ z$0YH`?A8GLe-d?L>BIKIq@+_iNNu$gX_!8Hn*fN?n}A$N9z5~*ex0}sJuxl4)Au)n zi|*!VGg-(y-CP?@002#O!@r_$zYpGY?L4$a*beeK>U$9~9^ED&d*1S=nCu3c4txLR zQ{>dEPfm~PCpqH+;1_TvhcslGtV%<7caXH96r;@|Dd}`+&`jlL8M%PplbJ=ycSyz@ zgNPkJO7gPJwX}hsJjPY*%KfP;*V{=EG%&;UzDyg$MNY0@tOCswWrgc}Tt6ICuW0`I zRr=lD&VOW9xNS+DFVXrf>5A)ZZe5=TZ=%@$gZ&TBk{WvS=JR{pxhQYH-g$NuXj8b# zIUHI__$HQF?tJoa{8zZqQ|%pBCKC z^NG$xw!}KXkRa|JQQmUeH=VnqvOhI(q+d@5Sf2(*0mS@G*9pL5+RB==8fZ={6L$Xp zHx&qY@gSbGTJ_^15UlmT-3k7`JrBA_?-8q-zy0%nS&9C?d`-;Xs=Wo1)sr zhhIp_FSrZ1G=|!;l9H0%T3DQ0oyV|f>KHJk@CfH&8JnyeLr`eK`5qkha_H=EQ$GLvXk-W7X?pyMEQkl?7LOB~sL^R~#4g9(g_Y{p<^DPP zW;(nhAi#O$bQ{j20y;`)mlHgd93zzD=H}6``3C@|O7>+F8hn?p+y(hGknu$DPeiO* z&3CHesy|yg;8C<*qF^qDz?8}Pym02SOLcuno!k2Sq5Gkxk5-P7tVoTc6mysH;zr$tWC$Q=fXwH`w|dH_Rhs6n^kY* z>cIjhbr?OzDs|%Z;e?@lcMOwSseTRl>RyX^>@dor|K$YA?K?<+J!0Nv!4s-Jnb8MC z4kzGwk|EYmXn||n5bk%$X^op-EdjN8&4&u~OH%MeBWHntc$x*n2h@>@HYHpJxK zT^i6{!uj`Cp~K%S-{xL8G$f^^DFI}T!1{tr%ob}G z4_bLx3%HdTMtcED8W08K%F6hv`{)BIa6_>9THV1EC_9YL>+Arlyxbj=|AO$=@84YM zIm}bl7Ehb{T^9ihZN?`tqk0Yw!kI2hiZ+91ID^FYXOA*qM-4U(wE|~94WM#bT3Q=4 zbVha8XWH&DjxnCH7Xd#jOV}iw9&Bk@H&&$j)k|PXssLZC+`InLz6|!06|MW88XT*C zi)7Ef-GveAtvQ@`iny!0BQ~3TtuN2qO^@@iZN@J3;oOh{pPWJg5qO5G#F}m+EbeoC=6Dm&*z z6lnntPp|m;#ej%Fby*()1#coBTPW-?-F0+TC89Q|U`Q}>o@=p(snlUt~@89*W>jmW#m6vGNiIb0C^DladH zl|4_|IQcphWey}_k`TbVFZ$ zACVDJyOh}ncg^p1UcIg<&qIqEx`G6A^jtCj*y^bHf^0u>_wCtlk=_@M1}5^EdXlcP z0<^b|aB9b3Czuss$8k9#5`RBTzgAXguE}8Fe(>30K_GbOYH>%}8t^zUJw10HNfT$z zfNo&j_Pj9y%A70bt=$@JbP_n|Nwd0?UjM~n&BS|h18H3{rGo3`ONMCH$3bTY8!8jV zdoqlF`T`5G{H5ML=25X6n2FtAe#rb|x{a@VTiO#98qFjF6X#Y^&D=2aU+Z97p`3Tg zspOP@ANJ?17R2dJ3ic(Vm(2}zJ&@=+?;S0qCzJ6%jM+nV=8}#`TKg6;1=U9oU%ds zcIRA(1d8G5mjZWdcvxx=Z{7Ko&=V6xpClkEfgceK{xZMwoDZ zTJl@|dsj`HyJQu887s3-m%J^&S)aH*+n~&;Ov&~3^|{iF%Bz~t%_o3Og8zMw|6tgc zU6h*D7E$VP8Ep)bIu&Ds0nL}Z0s>J=-p6cK_u*8R0i%PN;{UYzYLiAj$86Vcd%+V_ zW)xXod`?imOKR)cua%}?d1JeeA-Aqd+yJCROnsq+(UV*N_R~p5^^f6+dU3 zbT+&<{o0CJ_@1Nv!$M;7Vnl`9h&65VatDq=G8N}3H>_vxGCdQM9vSLH~% zO%nyZ#%`>Rg?&r&4|MiJ zIlYYcq*$xJXp0(lP}~%`p&8tVtGS_FL?b2}B0}eT-!bQQ11}3l2l@!j?8k z(FNA;^@X!po-$o%+Ju$-ZYYEY9~q@#{M}NvW=4_cZC=bp`@#c*hv3s$h+VSy_a`zL zUdg@@g4H4FS)1E$5)c{q9&_8*!_|&Rtv;rVcK6mscU$M_aN3b(Xzk2jYYjjl;u=Jo^__7X9WUnS%>XPBk6` zdKYZ=d;H3%y|G4Ux4tG1hPV_kW!PHkj~ zjD?O7o-UN-p61hx5dPNtg8(t{Y zxzA|pSAC-N$76X8-Z;6^lyq?;19R~Ut`OR~^r2Os`_!{%cPeNnIQc7%s15s%m_WYI z>~M|v&^mo7Sgn|C+dzu3N%rZ!#?E6kJq&++XOt3IVc3K>J3nrmM0TP*&0Se0cKR1- zz2B822EWrcvSpGDO8BW((Cdi2&#+D(7~b`T@s&I!+n}%^zA!g8s82w^YB5)?&k}M$ z^^k{K!^bA=7tBRHA<{IbwXn1}AGm5al#e;ZhX=&xUT;r}Rmhxf*G+}=-kR~`ai0C0 zD3L-mI~?Fq0l(Wp<`wFx(eVS?pNsrDf^=;? zx_^S^Y;0^_N+^$hzbKPLTcL^yQ0qV5sNdRejPP_u9%N6n`Hf;KXIM`BP3pZW+W=Bg z^@r)!Pg3F6KdXr2#KuDnpjPuH7h4a1Ej)wm10JO-t1Zx&nVa5=Mt=*_hg=r;AY~J; zQvx-3vJ`Z0hxW5?1;CO)&po#$7Zq=VlbQbLp<-v97@qPaL9F2~=3r;b*~2Nh9B25= z@^u1M>aL3Zj7vnt#U3Vm#CMCIjL?}8paL81?Sz6tEL2X>@X=%8hNQK9uK?m{-Q7`d zNfx8?hC8WTtl17{*da}mY5~WtKi^EXU7Bl{K%rzu8H$OTmEpv{`PzbRQiB(Hl6*y$ zZ$fjjI?D(jI&CF87`YZU{tSI)H(l68+hJ9i@Oq~d`y+GHi!VgR=Up&ACx<)1j1J0y zX4a>XdAJU#L~Rk1DV8#L+m&REYj|U9FDmfqYNFbDHrQcjx+d|;=^W>_*UWrDCFb7scQK`;+#KufSD_>3l_txV z`F8Ka_O?bvnsel$Kq2&yKs1(B8VXj)d>s5qzJBnw^5ka|e;M6rJii;i(P zePPVC`Kpmw{B-|`am2X@Xh)lRAbaNhEJX0W@v11%9o4 z;^deatgPF+<2zB>Jbs)xcz29d6+9*5n&^8O!!#Iv7i`sXNwEf1AV-a;3H4r3lo{3`teMxbT^>YUXVIw$)RQ(N`^wsblYCA&Rt;N06k`rYl;2zlA$JY*Lfg*b%v^-FT@6d-tiihk7UMj%5ynr1h5R z&EF*mLt`#VwnvMja=Rr!~HrkphB#8-tV91GQRyf zz0|NO?e`Pu;ffc*)(g0u9BJGo^3C1IG9-FLxxvc>70V?tvuEb1#t%bzwCC>LNl6Zz zt-V`6Z$lB_Vdw9B6MmQ9LYY+eHrIaG);o`xzp^q54hs9XY-xMaVlXT8kj&W8_o$)# z`VTzD-6b(f?{oYA?0hZ#N7FUQ6)jo9*>Q^{bZ=&v4rZu-`h^=r_fSC98&YFPCRy+h zaPxu>5IwlsM{ z`AJ>JK)JkW?-u%^iV1sS9`oMe@2)+K;=gxm=lQhCPQDAdEcbF+we02jk=Fb@k&5(j z;H6tAt4tU^=YwO(x}F(%mqji9TR=kOLa|@q*!gb-es*FcR@MC{kk=_fNZD;TfX z2R)W}aYc-3rZx6=KD$obeSv@ma9Rb$s<7-S!?j#=Ag!%WB2uq#l9&?ZmH@@(Hv=%v?m98Z z`Z_MDc=W@M{%ic%o9g+e3cw4K4+f1bNPtH8C;+r$b~SqGan^LNtFLkJOYqwZR%_Ll z2<2f1V+{TmBL0XIGDC?_>X*|eQ^fB-F;N0EI$qq{$OIpS-RF$f*Tbg`m&VVf$Y6VC zSxLJ*@fvn%T~as4i564z@6rvYR@&Mp1xKGeQ1m}Mcj$gy6UJmRj-hRk_aPgUp&%b~ zY+y$)6qx&5ox)&j{8Y9zr)b|hv(WR1r{r;eUrC8@xv&Tmjhnid6}Q~PO>H?|XNw^7 zJTlZ)?!ygVKF{;T{`;^^^DzYe9$ z9lM$*Uy@m+tp_h|>6jLuG=SP1t|4=Wpu;|TJ2{s#MIL#7mXucyh`vBKrYv7#ciHby z7jI#DkWyT?~ae4kmkF%{!Z#?!ZyR|%A=P7!LQ99 ze6Atf@ZBALaW)m}@WD3Y$GT=$ug(2Hmz?@mv%PjTu&M?9svFK^2h~~pW8z|KHJ|!L z;Bp-^z^sIg-*3&Vrgj0xTlA;!RkdYbEOfgatdF?S_Ycb)U(1xtse({0rQVavDDgbf z!^kQzT#u^=zF@uvBQoqDrNUH8)xQvHadOkR`Xh}I?9iPY8PUyJKqn!UIrEs|Viuot z;L0alBUaLdvNGr>Gy)_=Z9 zVEm3e8>#?NA2}(K^STyzQf6#e9&hz23;*!DGPTJMnLM<7HED6c8KWE=TP1^dbnysR zU#a0UiJ0baWFM#D4Nm^%jkwRH|K+}Q<62o>;;iy29ayBxKJ}CNyT*=9Pn+b0sgBPu>%;18Gl27}HtsUhpPrkGZW?X@4_Rr>Xpz!;) zSMn(Bv3Vo^r;`dAC2v9teNj++TDER15W&%mYYkq3>-O1#G2%R|X7QkN=-e)v>zl~( zW3^46#sQ4FB(Nr@9n!=_RI<%W-a1X)T^)X`K5kQdCY4DcA=#-Cec#p*$CnRy~Z-026!mir9j| zaQCM!>i{k!_ zh8vT?jTWTH%LW1WZ@qlPUYh*Jv)D?B7Jfxur-evfM&Sgv?RukIL_#)zCF%NC^BAqp znk66;)7Ysyypi-OOs+YM7527<`mKiLK)O8B6bmhH%Ei_SH#hg4s8&bGxV4E`LfEUj z(IF~;(GrOsS$I8d9EC1>tLH8>#hgsBzh=!MENVx-a={MhAGk_ixxw}JVLKtPFvG?_ z3zJMO<+c;T<6LE!$^}eHQTFAmK7;JV|AcN*-WV6WLl-@%Mfa#8)HtHB>&v z`OAoCL@YISndheNao!&K+0(>D2$4U--7lwWiL;E@(3g&rh10D zy$JnbEeIPGG@=PlulDcu@IROKzPQqw;MIXw{Pj=4NOh#5R*t<)hNNj1AMWdWw7by2 zxidR0UflP;1xFJ5e|*nu&3YlBw8?(ie0&a<$l6b50|G-THZkM4hrNFHW4-nF*G03H zS8sSLZm`>n52UC5Fge_H1t;Ti#30D&KHLPDFOxk}H#EbtCE(-VocVSsUgceI@Be(? z>l2ack$9ktm7oH10oy}-1p^F>HAL#Ygmlub>bCrEe7|up=y7o?bjC}B3)mfX>D7kT zMrOT3MgH&0UIU+^H!68Jue)J7AdLs7T$zSNU1bWc{;MHXFBDhlzLPKH-nVTj$n=XO zH@P7Y}*~Gm1T0NGxTx#yf7MP~RwV!-1B_K(;KmPUBx#TS>1vTR)pCio)ryqBP!poK7 zTCQ`FcY3s67rX_>->Uje1`{hmc%KI!W2=TBS`?@1M|f`@Cu!qh^dMpUL_HtsNm@b~ z6MVW=o{Cg@jFa!_5C0XRAX|>xP7gZ1c}<(;uZSV7@VVYLnqE)QEmS(g8#Rk5#`^4J z-d$xYW*%W6>c~)FRU11WrFUCh$kon_+t0KyRQKsAlHzS-cf_xIhK8VJr@;d0*%iwC z7wbXNZN7Tm!4LM*mPQ=(A65iRYDUWKWOfnmCNgOfnBR8$VFf71){mOeu0)9@&3^YY zea9u;=Du0-O+YPO!C6+3j&SgS3}i-UqBquK^uiR3GN)xofH|R$u@ZAdu%*4bOmq@p zkN?SlT%8`vv)WX;R+gn6kj^(fh_fDfXY4|Fk+Y=Z?>m|40M3(0` zFxe^35*PVk-lysjcy9ENg@-0Zp=#|9c`yg-@}*mCqhxCLW4|9+EsxK}cg6c|%sB-O z5bT3Qb&J9tqdI`sP~QA;#F=o61=NUVlf`t}b>`jaNF=?EbSrn=Z1P6M))C(g@C9J3Fq701P!ul)9ZCVG1r3r(4px z?7@dYQDP>Lq5i%;>pfI{pZa}P^%PeyS;z6=OB8g)6Ry~wJl+W$-`i0x%iJ*4%L!YV zyc2~{xpukhoxL^_a5E43WS^tp9qM_YD+g?|xj#MiY`;zzEMdP|->ckU5{US#6Iq$+ z=5QEI0;6p>4tlx)y4v%brltN-4T)_Hd1(3hz67Izz|>g8{&ORx+o`xS)=Ms>5f zW=KUJWivrX=Bnn7tFnvzzTs>oZUD&vGNcsk|4@Z^)HBxYp97JV8piOV>haMXFe9|G zrzu^~Zkf7tuB2W*4pBoxKB6!;i;sLO(j@D(kVCE=8FEe`d294h3}#U;aM|^|3*~@1 z+il1I-=x1V=9sVBNxIXdL8R;cL9L$0O{LpO)c$)Vp1MRP$vTtm~FK7El%6$~NMs<$Nt61A`e^lq4dnO>cAB1xJ%|Bw+oKgGAaL@Y;bdf z(KiFnq0{r~B~U=%$@2HSGan}5Xgp|lD_1MkNz;fo&{ZHfk8ODgL>PIZzPdqtJUR($ zN>-9EzFim>ELfSsen6^}?KN`4*y`=#JqEEcvA*t;>?;rPfi!7(dT~FiU^XwsQ|=d# zJtHai+>8td{+K>JgfISHlwKNFwpV^2F{RmlKt(@;EYTrbk`zl-l??s1C$!NCx!NLx zjBdTr5zZ6nF)A90h6R35dDo9$OrGsaUIU2$jzHZB!r7oJ;e}4uVN{MZ&(_i{3rkN; zVQj_SLs}|Tk_lCh6%e#%W(XQ{A#8SRxH}Qq@--H`ch7z$Hbx;*9$uR_Ac9Yg_{bkK@@^?J0_?8}DF6|HL6#Nt7$K!wUz$8q(PwCyjBI@0)bCi$n zonJTk9BYf{R2sZ5VEf~Mon7Jgpi0N6K)P#K7&g=Vw{Fo!K>)e;ZLN+&?L|*0uMP(cwZ|V1ct=1F zsmmqAkB{>}lElK(TGMivtb(P8_G!PPj47|Y9$WuI-~aiN^b(HuEth~NGf`)7A381f zpUvXj88bbVGeDw_mCQ_Ut-y$;ETaN;2W4Q0jP}K#H^O+JpKVYN+s_ysx;MPL+0801 z@Yk0a3qeId^S`>dURNk#!>y8eXzkt;!0-D(J=r(fgZywmhsMPMW=u}z@9IEn=8It} zHmSGtm42&AYI3bBjd|6Y4e*nG98^tfZ0s-lQ;j5f(W}=Yz6uA~;`2EMFzD0Feuofp zU%KHkbmvx;Sn`|7Y~QcvvVEy(5-|hahMh~}0T&|WnMQu);&gKmqm*t9yl|EJO z-#ge?nHU?{ggLDG^op=b81x^%x^j`+>7ho#WXw~QICW3ShzBMR!=kC^-9Uc5K;5p& zcTe=b`F1-x>c|&dE%HDsk6fh<@wy{nUo{^PNy%g{-(64}wVC6-dRcy;}Esl52b68mvB>@Yu)F!b@op!8J>g|t9X+G$AVI}0stwCnYJsbJ*L zj0;+tlbMnHHM|B&_kpLg!s zAew7Mlw_~gYY(!f>O_r4ber{`bnbWT{92GD|E7odf~Kz^o_XRSAI{=YktR6JXcTxi z8rkC_+_3wy#{5nSI4;nNO9ixj*>jG%;5yW;wqpM$)|XV+qh9|w$F18_hn>e5W{F3r#uPun%Ab^-FZh_CY;r|~_?E7Ie(|JhU1 zeZiv*uc>yM^H>|1>pVuW7hs^)zOU8JD6?suREfF&-kaoO~At@IVuU-rXoj30QBaX%{qvQxs4;n~lm?$^<&j_{J3nR2dKC zY-{ullD_G$&iT7yL!Tk2K zu3sy)jzBAzJP*eiu&Xfyk$)toYTBv5C_vu)O(qKJ+KtncGBhCbp1WReCz2zd^ZSIy6&DxG{EetQYw?aRabr$&eUtZl9GAz5Rh)?W`67B|Glq<)7_J?R0-aMHU zqh2C>0c7l2e#rNCw7JTYuXAZy;AqwBOBKsZyzpM6_WtR@76$)Gv(@fKkshxG#Tofv z$8;DYmz|gSx*F0zlfJu44!4eDCHFC-?aUup^Snny>K6+}6=RIg8`e4N>UgMXEo(z8M@w^()}0iJSs!Hdo`i%AJbJ(@@pA*K|G-b};t0 z+#NMs*)TJq3=jGG#U%*wOPBAN+wg}*JgQ$a^E=m67t@2C<$L1k;*Q#mFI8(wUdEI@ ziF7TFQZUl&@dj$Y zQ*gzvbY-iWAR)WkPXkt|o&?~8{^Q;=v6CeVsq-=+Td)tNtAzh`A06gudbfG@GGW}ftC^TJ{hPH9*F2XAi~)dsX~ zYqwCKcq!K6Zb6E>mr|fO1a~Oz?(Qyu;!xa!TX1Vbad&rjhnwzw_8v#>9pCx&{mg(Q zE6G~#vN@l5FDNgw;}}llCu}Yl2OPKgN=x2xKu#bp=riz>#O4XzoT@48R{T5M`x_a| zD-bl&uT9}SGV}N|7Ibp$O-JrnsE+T59f{Ii;_1Hz9vmCXIjuSO5}Hp`(b(o)wP_Zx zZ?-w?=HLO-Byit)akLBx>!mpwDZvLP)+I5CP!V@PM#@KbTPFU|{XTcqEA{KB9OB&=58qge@{5V07yHnpD%?56c|8)&Epgm0fyKgl* z>}u(Jt;@~ZtKJidwk;4x66)M>N@yx2SdKgMIO}bCP>-J3K6d{FdwT6L4K^o+3pGFw zgdu0w2;^!~5OAd5SAGpFN(YI)O1+lT08&QR)zFcur4YG8n(A?yQ1_tDq+@_8^Hjz- zzhEFH@D1kTAoE&UpewCzMwZDL^Hsi!l!~&}5NiIG#XyC!N296F4$=%&JJg>{_?IY6 zc_B|lgjN?Ryey4qvo7?M*pwxBA^&KxxwZzp@Q2yu*z;D@Wl#;xi>O32PZ81E5DIII#qs97KjR8HW2kH4Eju@<) zwn!F(!4B?Se{v5{QV)sfgZM}pPwNyYT?D93==!gV?6#hYd91iPSZbd)zNk7qOm9Pt_P&`yes~Q&~!S+ zv2`&bR_S#q=xAjClbFf^>{f?Mo7`O05p=zdaOcgiZRr^mv6&HJ!X%hGqiA39^yTpk zqrSEUd5*W;;|u{m0b{WYyE_8KSanr@ZS4$+1ejdy7IModFLZ;c+2+B?EDoEdJ-RYJ zywUswr|^}MYwc@84P+B?0W!RgX+4fA?X>AqWi9{suS8@ro405tBbeM^)DwDG`WRqt z;;l9}=3n*j`Vgk|9LnR#;gZSS`IbFk#n|ed+3&YMPGA5ez7Xy}k;ivo^oVkgv#h*? ze_zf2l=@o{h^2i!NnAK zl>E=icrR7W>%EjwA7&b zjHcCFnjhUe+pk3B4{}QeipWTGddW9TCbPl}ZR?S+psrqgy~I}7o9X}0jNt!kzSqdx zy6eXf_pbl-6JRYOtNbg&)n4OF{dXh(zu)jQ2G%;afj8f86=N`-9tk5zkR2b3qy=V&o{62WvzD2f5|Agdz|tA~^ndvb9PDt`OHdhkxjd50 z^AbrH=8C1}crF}UV3m`_O1y6o_lMKTqpG6P7K_pyBS^8hCJlAH_0Er{_ELw^1=5bb zR$U!bGZ8t`dzUAHc{PpgUov%}#OyV@tGR*xH(K>SnU88qTH2EtN&%CAOm==n6Ip82 znW@i%6x$DX#I1@3J0u*o8-}G@Y?!VQ1%jH5HnK3IcHvP0zxX-hfIoiZ#Juv(btWAr zeJTd=rb~HAjy3hd-fCBf-ZZ@w{UmDzd=dIDD_I>TT#!{>>HI$UXZ{|W5<6YJj>By0 z{m&?qb}q4PM?R+lIYv_jYY-4L2CM`STdc}=ME=eC!ba`Gtp`O7GSB;xfnT|H`<*%Y znK--N=?1F`z;b9xsEM>1&56zA3ID)otu_MsU8db*8&-)G8jUgSnRgXOwLNFsov-2d z8uT97qOMwxTT@`yBKR&<1=_~WhhJ$)eBmuln%WoxUn) z*vhQb@#NYk9d{DscP;9UxodbFUZ@LlIb6W?;G(M{y*kzwsYlw^7}iuCkrON?1UTi? z)xJ*x8+pX!G*rwh@B(a%&Os z#=)IK9Y06fpCfeLkEVKRd<0QF84&!5FG{RR9H{8+B3w^^*1;_?#Jmv|_G~?P)JLDb zPPW-BwTzwEJfIg86_g<~{l$K;@BFdpm4m5$Thu-pq)yNoz}Zlh`cjo@caRGy?PG<@ z0J1DafWach1j3bVt1}4?@uccxvFiODwKSO$1JT(cS65e;W2V_k#H1IFj^;O6$=r>; zGh6$wY~SCXEZC0iVo?^$S1mgo;jZL$LCn`mOtUc-I5txRQoa5?NBq0c`X`5rs2~iF z9>XV{e>c6e(X4alow_izqf!O;4KX%+HXnpWVU!#p!y<@VFbyR}NCNu&SLzHOG&Nf- z^PmmHTI)nQJOS4#$)iwWbp8s!tOIpcozD!-e4QzbzS?(8k>AoD(o%zfL`TW1Lu~N+ zwKTG(#-tsA`0?A@F_Pl~j{I3=jnes!I<}n0uH#?G-u((L7D&)_E=J_#l{!~I#ay%h zwd#c!Bfz<4j^Ml4$r#6{M%y0&%4u?rpG+qVuXC_$q~KABWPdXXE6iGbx6Ce(0DyiT z@9Ih_HvVCTCD)6$=oQwi+i6$DtMnxEE6)t8OzWz<1rlji1m#5a=?z?Ba3o)R9Q;?F z5#`=&p^gaeLX1u_(J_bZnpoQ7u$I9TUDaFl``{&kC>9rd{7@Wg;(60?*pg4v*g7F0 z5mo@6HR)|#s>k=%Xi4&p7LGudFCKdc9w|(=*L9r;Q=hM5KZ6b$KM~Vkof6`k`{ zv0FyCqyf0VvRD7dPq~8rhQKRFV?RA}Oc9pZQ(jewke&j|?1>|?=>(IZ&FF#g=IT7l zpan*n$<*90a{rOpLjm^F^~|YUBN3jc%zgya+snzqGJClHBeREHiB09TRK>!3ycOkH zu`fl6wA;dJZ_?&TDiPG*;~dT56+-+7BP$C0C1zc$6UFEa5DLrH1Mqk~0n#N=71}T- z6fyuHJMP5yR9UkG?M6G<6D|WlV)itk6lSzVft>Gd7Ziczx-&Jwk<0&z?5VtuBk`Uu zb&h`ZO5~N)XHk_wIStgl$!?}W)#V;fw3r8GOPv+S94McsuINFXJ9f1^jzK;gj)3K}4Ix zQ?MmtY%!GZ)G=&{)kASP$I7zxa~bxy)IQPC6Xjhyj<_1yd#aI2F9|BU3R}2?F+6D} z=8lrh0#c~~_|wgZOr(C8!d3hC!1eAVJ=ovuIpD6c&qH=KB0Bc!6uMx*r1l6)VNB5* z#aHeCPGvbkOY#lj-4VL-x&LRbD~8M)D>c@&^#igO^&cJfCA2b$&=$Wdnf$A_KBHa1 z%4#u3N8rxMEW9rJ=XdVF@BIo6%e1Y-QjgVowl)~X+L8AeSo|bIVcD`RFfurp2XL3% z@9v8~MU11f^DQmEMiMPhmXKRB z5+#G(mV3nwyWVo<6O+6T?63)JFRrdx0?Dt95|?Yl!5vG1nON|_+R+C1jt?3o5C72h zlD1djNGQ1{U&ns>?%i~%$qY9W`;jvwhCnD|No-3I0l+xeY|FIia|g2)0b;10lxi`E zBprPPEN1N(80m7LQ0V~6%74j86XVkbJS!yWj7a4mlQR`0X_;TO6Br#LIh2~m??n9_ zrt{g&yJxE>D=aZoXC_BOWC^EcV_dK|Xb&YzwY%K>95z^mWctxh>8vrM^Cqx>e%5!M z0-)Ggu>TO1M@(5sh2n<)#6$c@lr|?xHtrYuq_UTG-qa8@g3r76pd{OF?O~!f#C(%s0^PK>3dpl>9oGpWlXU7uqwS>&WhuSty1n`fJ2VE7gR= zQ$}Gm%j#`Lg4B+TAYBO0O0W)CR)?28-wJDnJHWi@Pr&#ao96EoTJNBk)ygpLNUkBo z3b0TzUx_pz&R2Ny;ohMy>YZ(}Ep+o!9NB@jL@ld%T5BkAA@0?O6 z7h^EE=K!Rh2(04rycu6NYPJbbeYJQ=={x%kHvkYUGp4jVumDf;ch8scRSO z4J2rA{EucSictY?*PhXrF>i9nvpI)HxO~+U1e>I?b*)DlXn=xY1T>`lJy)! zgULzHr7PINYZh#tsPr8lY-s6Z+Qez8u=;hLQpGQpHpTuyZe4;UstPbuUReGIgCpvI z>(teCH1p<}6t`LlGYG*`N-%#f~zzSfR+i zcfVG`%&gLrH20>ydL{4DRA+;$;ANTB7L|DG^GHgPjkN*aRJj1o8dr~xIpd(_m7rhF zhtcn`Zj+*r&OT%pY=qPaa%|fS{MAQuVvrN}pTpeObeB-}7q6`E!&$g3$r&G-Ed+$% z8{MF)I@U~W*ZqV}7b7lJ}#A|EJ=u({*>03>hDC| z;Xi6N3;N;O9mRtP8ThE_B{V(++stOcyGM z+F1g7jxQ*r5-?E8yg9cbd$&f9E$z<(Sm3g%&J3%shTGamfyl>K4_5HgwbWVNMPwNx%1IOKUvhHbxk=8esQWPn$@>g?W;(g=Mn|2U!mdVgR0`g=%$u)7C1>=>2T-jywOA0A^&4bgXm95=>G*JgTv(jfuRDW z_lJF{k3%{YK_}{y`l~Mj(Hew!L3p!Qo_BLgZfngH`H-<2%2>;}tq*vu)pUZcMx7`{ zIoN>EA6(TEqU9>OYJJuG$-#IlYSjQ{#vsdt^P&lv7%-VXb$o;IC`xLBHJqI#0tTbZ z>K1V_ubl~k3~>|f#NL9VS4RHyQ4(&fb1tOGD%LO$6~GtPjSpJdFT0ae@T9A&yn4SuSc`V}LGh^F`J!5?1}k_qp!mN;>w<6gt0sI7s?nhGxN)O6{k%`s&z z91UJ$?zmun)}~^|pCf8=YG+waX#vA`96?LNxJL`Qi^H5bdCKpY7VDCd?N&sr@`(Bg zYrM5z^33;3HOS{gETmelphqiTK)iUyj?KCXguO~+NmeAM8`y`dvm_I~Hx>QebPq(MrRzTh7r4FF7z zzab4=GlHitqZ_gGmIzJ9shqYX*2XvdIWAVe*~rXtm4Uin28bNXTUHpt#7S5V1NHa( zI&#Y){%}Yr>5a1msid2|*_px`QhLIhDn96$QD&}P1l(CXH$Vvc64vKhqBnU za0QK~zp1WN>a@BUQfEE&gkzaEgWEq&`o4;-xO(I2t+3n_uiO#zlazTUr^)yJ&J=Xs z&n#RbAah`#7NU2Pu@9&S#ldG0h$XO3l73!kTt=a{wS%`VUKq1|%-`^@Cy{$P%8)Xx zHbhJg8QWV9CCvu`)FS%P%{@P}RGcNo3O{|eZVG@yV%|?P@=2WT`33S?mLzJ;>}yS` zX;B`q%Ih>|+~1D*?@P?pF!7vtILVoJ%=L?JMdykt(xjI7x5mOAMJHqNi?0m%m;1V% zQ2_4mcKV~CbKcmDCOPVUebUrBI-Pj$LpxT1>twU=FFG|}VlZKBQMLZdLj9x^lUk6Kl zIc=84c^rX7H95f|IkKZ$8#$Dg_5Jl)KM;;lf*rLxP`6Ng%p?=)7JuCwR+1x4--TB8PJ(3aIIa|%JXqH7muUF_wj0vf zs^{riID{jv01zdZDxg~_kt0(M(~?77WKB@r(0S{Fgr@%lcM-Sq_Zfe1i$)h~fqoyC z|59bHcCW73%VdPaW@F5Rfg)zm2W30c*;J-lGWhxEY!9=$5pQEKl}+Kq+{Jc?wk(WH zFg9UD|Fc9(PVVb>tXk>>Hg>pV>G+!`h|9Ws3cSoK;yn19cZn#*Gb@wuUA~U^e~Q$2 zhGR)NT%RGM<*#H4y~~Lrl7dUhz4rg5_q{OWd$v$_zqm#^nJ~@vEybY;w}`D2P4Ot3 z2?4a#|3qUB=Bq(vSErTQfBmgsukN7P%$p(kIvzR*lkJ+(0IWGcwePWmW3zuKQu zRz*5)Bujtqu%B`Vscj?Hy0+e#{ThE3U+zvTomC<5btLnjy^~)4YYL&tPxymviaj@l zZWO&ZEnm;4BksLK{ip&n=h<%id65Kr@p+OlsKxNZmr8=sYpqHG2MpmJ+qZF9THV=c zfd76>>?gYV^=}XS-*DOw7mIOtp4_nrs*UFBZOQ@)7YeR6vecd*>Dc22$Qw{faxYy< z7?@G3dTSS&I&BQpIm!Exi)qaK$FnxxYamA;L&~zXvZY20n4DF*-VyZT5#iRWs{Dyy zlT|kknnFp!RMrqpF@x&MP$?j{vR;VMm#?A}|LI;4`~QQ}UR)42(S4gG=$TZ5ib~wk z^3Oub_4$aAMxJ1A~psLpe*s=BkNLQ z(4$`aKoPsG(h}7JAj5WidyD{|LGeYPYBK1Z_;2ZIsAHXn`o;CbVo5v9>tcUKp|;Rq zTeMv6N64Z7sv=qe{G}(SwPW7yEZtL=AKb5eN-zbJ$MVCbBy-&9END zfPc5EPPcU^gs$@4{-6-HH9lkeI2NZ_glntUuknEJqMm) z^shHq`cn9UzwOvu6psC zkURbk#&2#JQDmtfeR&skpHVwph`d3|5#I9}@+v(cO$;WI!EA%$e>iW3zRPt|@(a zJQuliQ?K@PejdqEkQ)dWxo=Ly(v1pALTnFiaoW${Ea1F(sT~%Gj0v|R+*O6?GYq!I zRV;Ek=Yvz~$rGUf5b?$P)yBG3{q6$vc+VWd3Z=#lZ46C54-D|F4Oi^Hu_8BTS;}#F z-YRuuGtq)y>FJQpE@7l-+1XE3$>et1P8?Gd5D}&c`{5FVnq6HNt_5BUTWkGv1|>S;%B18X}Br zQsAm!b8^nlbvQrs-x<}?0k8wp$udV*7kD*EEhc7!g1vV$h?SSp%|Z(M&PH3Zq%qdY zzo#`7JOWmgtmU_KRU65dkj4wwDJ%w2zrfGxkeHb&mpk zQzRbCPpCx}s)ii1zzJO1pR+(JC$53g54?tNaL+2A=t(wiFf`KBUjVj_viTdp z*lNOwC9rbHq%n}J>ilq8xExK}`|c*m1mXB-wmERxPP;_?cg5n>As@QE;Ar8^vU{V_ zxB8tXo-~IRM4aXO`J=Zpj?@JOQ0i4ZpBvB8_0@}MpBQ`PM0|Ibve02{N#V2FZNejM zB}{H@r)AyE{fnx0m9vmf+sp0)q-J{&AeEsf1UyTSD#&T%$#-Us!PP}>!;vm`tGDv+ zrjAiVLQq2BqxrXwdrjo#nEWA!{lcwIHa9TM7!_=|ZA}yS?}VMXUYV#BN5XWqt1GJ4 z>DI>0sVa#7JQDp+C#0Qg0V0&BM4#I2F4(q&kRz9^7dXbx)c2>se*WoUEbzgyLM%X6 zixHb~H-96n9o3%5X&BGg-8JT^Iv(^6m5W)`CTpDMtq0Xn83SeSv}ie~g4$mlhBGdo zN#?{kur{g2a_KpyKzwdeVxXlfU6n>X`(~IM2ZBYvAl)5r@bHjXX9iwO9imur0{YL{ z9Cv4hjLaCd2KCo=)15EWSXwWEM2Y9 z4;@Rjy2@t?>G$|{szZ9BLopV0IQ zSRkwDk*Y&F1G2Cy3{k@*vwfsBG|))dg5*qOGLRvO?LkYiWO7j;eGR9VJj}8mNMAh} zwF)*Jh%$AEIMlIjGGTn2WFyHbCCr$__^!xa$L*1#&}=U;!D;>xd-65Rr;jGn2uz+T zG>@c<|O$i$skU;TmS>0$05|KO9ErvnV9bv_S8sa@@j`Pv+jDWlz- z5OrhSEou@SLq^1}!dM9cHs^JHA6s(oCc5Xrq?rt&F|+;m=h@{`RCW{XX&z_rsp zkQvTX`0CN|O^Q9e_6`Sd)Q8;@?`b<`rNqImLTa!q*~ojvqkP9JYn7`^|ITb+8dm|4 zNgrX=XhfhQK|_Z{dQ)TP2#A@9A=z=7IElX{wU33K0+Wv5Ybbc@*a@AJsci{7FJ7!Z zak#FD0Y5c7y&QTYdEpM6y^N}ed=>NCopOS2culxyz36$;mePSez!aXyGvspac+y5? zPqbjqQXSpOs-JH7?W8U6&0F%<33M-amCHg)o-J$-PVy(nE<&Vx$4f^?!#=JyuC}J8 zAtAM@M9U*C)(c9kt({H@$UZ(NSTJiiQsGH@qFR-sc?YZln0^#}%9cZImdwWa?6U!v zRqvqM>Wj$yI{~21s_VHm6~x1@u;1~rhD0}>(J5Y_!-Y zyQ|XjG}zr5RPzefWz@spkpg<&5H@w1bpde|cy8~YU$S`Tn08N>Db1L*{20I1AjDJ# zBKzK_-V1d{z#n($tJVxrK7X!p3s@-s=E5HGgVw{JOv5CT=ax>h*&KaHlU1G`t6E)I zt+c-q!P*2hf~s(Jdk<@ZSydCLZD$C z7|#v{oPE~i>l1Fj{Qgck10;^+5lBHg6T;szMDhk=sMTZKp!xzEh#d>36r_xb$rb9N zn4jcutuogewHDN~;FmFn_NUaeOOg~uAeR)fPIPCN-Wx_D%^%AlqOWf2%k8F^&!Pr9 z;k7YvxYavPI+Mu>xvUhtJ427?X9PF z+++__^(e%T`XG*6e6P3%Lk%w)y!|-1a$G^uYC+qm_V1qa?~dfZ^zOAh0>=J)9vE

    kT{;d)6*A55sV?-krVF%5N)g7&YD`M87Jxd^gw|J8gG*ajy)$x2J#%L=NL;`@W9e z4Cax|Yrjue_G<0-PH)$p$pfx38oIjOEZ*F-d`}s@fafWGfHCQ+lE^xqCUoA&M23ig zU$w^Qj6~n>1JZi%yzKTk-A46cMT-Zf?khl=leV2M-;7a4|JV^SKAbEcDZ)qjDtKEN zQ@D^(qicueSUlT(4XLtT3DENc6!%Gv#bo;3b8gzNoDOzr72f@q zssOwu|3?}4_mBCvD&(wDtXldj>rL(d_8&`kJOnY}E|}qJ=}k69g6~U%{ zGYY=T>-$$pp$ZYfZg~H(W#t_(05rOYb2NQCmq*JYP-zQ}XbOOtM4Xj>RpN+Vz+i<4 zk$BVTY^AvyDL*#ni~eVS`e$L%$Fie{!$TpW6~A8m??vmswD!N0w9c#;kqtrOS6aUR z^JD#&fBakf{`E3@A%3c>`C5_cKL78={FkDsk2Sl21xCpb)P2MF>OTtUzh3PB{V<;p zPIe+X7qLs8a@#>IaMfu32+y zAFyeG=})dX6!qsDbQt<`e5jz#vgqdFgLh{Z74GW=(jQ&vDJhZ*41i+81Fo~c@F>ak zKjL4196tstNTdf#4^>^?5F|i2%*TT~t`DVP90wH@l}+7Tj@GpO zJ3YR-Dz#Qz92|uknxxX9=S^XPY-_E(5bW5G$9wi>u z49t0)G7(dr$I#EjELo1zb54(nk`unm9*OOQ7sP@tFM&aQYg)b!2YlPw9kKieXII>M zg!s!%QbzOP2s|kgXN9uqg>O;2&~eOb$vWnWBq;NzUre{XJ+t+b0>C?r6uW_m?LcYVMX$CY~aOl`RAui=x;rW z6D>4xA>r|}wl}Smu|!hvFgj>ki)#_yP`WbmP3nfvqksG3eHou@5Wmx|h$OLnK7sJE z*9q}V@RUD1()gHUR6dD-=rT}I6b1t*bY1q1g+ifTapmo*GcFg20Mj*}qwQ}qqa!Q= zHv)WzIe~lr1Ed9WwWi#c`;+S{GP>i{-aQ|R+V-_jf5frm|6G*IiZA zuH6lJp%~q&lSIc6p-=*Dn{T#|J3Whu2Bu86Lwgm70sLQv;p9&VRK9~tcC-DZ=>=&r7^T7Imlhjha#z@wkmU({Ax>vcww+Iy_uhe=t3^&uO{aoRU?g%kChM1iePL zXoSIo#}OdQJ6~c2o^WKdg=8OfT4!|I%zB?6lzp>M8@BE7q*DuE0idCf=})f5+N!Ic!bi3_(?+-MIh833g{+N(jR$|uI|Kq^mX}3vD%R+T_oV&F- zF=edKIo*3=a*u{()u?8oPQz+R_WW?Zs!hpf=tHAlO`55~O0JH%vOVumW?fK@_AY-p zAyZm=0t2w0`8qvm4a7ys9_VmhTVkKL)a$7WchnV2hX&D+UUprJk(B;#1kQ2e%VF z{nauY)Mv|KF-KD#jnc{9N42bL6VBRI<@5WF&-c$zrsKYelO272K+lCNYF}lQqLRFD zrKraCZN*C^uxsdSF=PpwE*oSO9e9TGkqkb4H%RAFsq+!!tSHUr5(pPO>P;-x%NBB4 z>A`Gf2zUaIMQWL5dJq`=M7h7MRjkM1c{r?Pd0L-8nAA1{0Bkj@uLjnTNX6PbF7sbt zHZHv_#Scf@v}!Fr{zrw_$FRT=;Qoedk7|!fb&G5Lgg&n1 z5DyohFji73K+c3PR*z!Cgze5U%>6{ziSBAkf`B_9K@?z@zo9?zeQ3AevNDo+S3kFq zTI&IGcOs>IBU;)gU2)MZP8+bg)S(`vU1OyY1xK4$K_f}x!c)PdnWBU+91r=LmKrZ= z+xCEQb+{~k7QyE&>brbuFgb-uAm{KRjFixe6LBPgbXdU0M(QY82q!H_s zl5%~tFmZBn%V{fZnTt0%@OUct#A7dp)?L!e5)yw)>YVY?XBxT!kBF9|WqV)H671|^ zHQCdT_$c_owk=;R9is9VqpU)GyON*nTX>d8v!A_RJfOGd=5rT}uSnaU6VT?VFEks@ zO>%v*6ncNgH&1%fmJY1chS7V^h3iYkG)7fZbiZf_Jo6z#v$@^j*GIjtbK7qDug5P~ za$c8z@VM^VF@(9UoQ5IPJK?`-i{OepKfBk)gUA(V4p)PB-R7Y{3I#q6^Lcd*0;hu+ ze-E_7uj%Sz%FXo*LhhNqEZL38Ko(};9ftve^a{C7iBk`rCabSC$4Q0B6p(QYl{~wA z?L0EsRw42{{5!f#fmcJ#Ym@3=>|zPsWInMjxY9*DAs>c~r9-_BM+R`ex1+Vpmd>NC zrj#lk&Htb@zV5kELLpaQNILT#vrRN|Q3{Q)6{BXiq+ zd2{Qqjb&N_ir2t5G09j)UvCUxZTJc?ixLOR%k6ULc1->3TDc~SJ@6wYi+nx+v z;ePD8|2CtXl71SG)Eddsq`>s%`DV#=DiltrhJdJfvZoG1mg35$pZ|iA&afS}iy-qK*#)<5AQ#OXzG&iQ_ zd*;@`rfjH3el4Vv_!M@drAlu*ZNG9kJ0U;5KUHSgrc}~fg~u+{{z@K+EXrM8=!Fqv z!$94XX+2SB&4h}TFCzitMWa9ZZ#(Mvq&+HM#WuZ+pLx-=et-GV;ZOU-5#C_CQVH=` zd?@Nrps6ajG9AHDSY_0x`TWEi$+Hv1Zb@$kmgA4$2E?pCeyO<$kDM#(0zX~nO~?yo zE}QhpCBXnen7`!S$M5V{$lKo;Re!XZXsB*2l9&KCG1#8I^|Ce+{ZQEvp_VkNsXL3f zbt>k2RsD15FiwPbW6XB7feM#`j#-*^@eRTS2>(uO$Ed%yJXj4*-;iCTM}ije_A7K?l>MW0EA zz4VXwr5+Vy1gNP>Lq&ZO*y6tgL~^hsJ}TI}n{HVoBcVPjym-GxBt@iBIn4ei582m$ zi{O{ctTTEU$q&jObhh~p)LZgtdiq))XyWQZ!xufbkX#dCSNHn+f*y-M=!(?iSgC(> z7}+17y1X=H(015N7RcaR=*`M4c^q|Al{pxI%J=kvt8Vf6OA2a_ckw0KcU!n^upo4a zY-Ao$!Tcv<5#WBq{9>n9AvN+@VqH~b_GewE`JBAp4N)DtB^U^MAx> zA%HB|Y}xwe&}Z^>7i2eatRp+f5(-hF1MZ~mAyZW)d~Ytzg}?m}5e8c-?C4>&#*I-C zBURVpU&0823|r{KSTk6tQt7jxR6?m+N6*U2ncgFsB-0_xbLVSI@G`>I&elbY&2vNK zyCgn^bw4hv;s?ZxTL;R%UZQ~UA2AQ2cYQb{r||Aw{GYwCp{gbiI2qE~Ub7Z)jbe!( zdv4em!3pFn6w6|zC-GLG*$5Ce{~X17YKpF%>T=j^xQsC$OZ+d<*^3UIW$-Aw`(*9|h&YY;JKX$c;iSfURUgH@MEQOEXXBMV38Xsj5q7 z)_9GuNgKimg9$Z%Yo-^yrzC_pKuA`IpqiLI&)EFX9)5^@1_Iyk) zV&39&AKd}t(d+09<1rEzD_1oza=T08sj+4C0c4!m>^)as*YRqHb?uiR0C+~5gTE}6 z)Ege)F?v`puO4YKChBa7YJ|FRw5pe^ON=||j^ss)9L)u5?%fZKt|=T~xJ&oL_GRvk z`Fj~#N#%e7PMH~HPnVtP1|y#+>Uj3dO96KNNM>Ww(avKKx2~f#zLyA0&`S?l@TGfb z+(dv@8}SZzw{n_)eCe4(=o||r&2@?GN_wC6G}COP*YhkYdP+FoVXJqa!M_DyhWuci zlZX|*z#$&4e7#Ddk2rwl_P9s;^Ls+@(fq`jB&n?U%~GtxEYhWtE}8dLhpk(S+S^h| z1)p7CHg>iW2^Yscmc@@eNZBYD$C0iRQvOYLhHq*Jxr{#?@w_xe*}x_*vuwQDf-ro9-=GU#mgm)~r+drFf-RU!fjh6`DoNS=K`u>z z{_#eM>?RzNLXh3wc3-Sa=eX!nvFD^apa}C}zpVN=F16HO4)FV)q?Kc)7YBk*13Q1J zqqVHB%jB82x*kCY;D#*cPJ{z&k zb}AU@B?X#2OkYeozIVFj{x?9y|M1{rL~X@F!&yiub!n_bIVB6hFIJM%v)3Mx59b9C zY;-Cn{44*CeQzZ6LYu`xFrA-eh&w9YC=%pVsZ@lX{=+^Rr6h|xtA$?IL&*BKe$2${ zxTuMid7mrLJg8 z9hUH3{xVzMDX+*F#9OHqO7j-7=lrKMCvE59^lKBfjG42?x6_=yo~uHo`ZGEPaSJ@N z$rYd191x8*8sqy?b7~U$vh|wBT@ruQ=Id#ZDh_uVHllVIrL{O6|7`a?^$hHRlVtPx z3dOxqL)>W3&z`)BJPC#LbLY#;@P38Y=1BID42RqbR-Jr|et3+_*ACCvKQB0cf2o@` zEuKQ^gm%asbe_<4fXS`n3qNvT$z{SjNOH3gD;-~1g)pU7lt@&ad%d|4t0raOT(VO> zY#M!v9aBc*5LbSeT^n<@!zYJ2P2JzSGTEU#3u9bPSkX8*5soaKceQ+MR~5O~9&(nU zXuqau#aU|g^|hF3h%?$I7PmdSk0z9wXcmmL{Bll|BE^It2gG2Zs`-b-f_sYbwqH=~ zfc8hHq3~iL{UCg1Kyc}ekB)}>cmRG<<7fzHI(@A>yTuR+ag!#zL03Ef0NP$JO6(m` zPrnqgN8K6^Wor3si9Plf*<9d*{ski=J6&ra$Jw#gQWw|-so^qeC3<&120y__fbjS6JVK&zjUk5HgxWgii#Y{mM@j4;5ph1h*|8LZp-nq!a6b zU=FnYbtmurjEB($h3l($r9#3r%m7Lvnq!S0>l0!GklmLH@_;Lj1je#sIj$n`Sle7r z;#t|W5E{I%UM?ZhDO{?zm*lY=3rS@%&$(BK!a+)3gYUl`)wa*ra;5F{Vo&?{_{l+Jp?=)Jr>WCK>;3BBQy{A*%U%uff zX6VIbW1aTKrwQNyz^w7^@of$p${*G4B#q+yFb=A*&zeN2x)^zUI5`d4LQ?B;sUKvu zEECmw-x`WYaQV#ACzVE@DGNH@Ov&iWjsyw+T=u(T<-5sIoIA7k=nYfB&PM3R6N}5j z4@Isg<&6}Uv@bexL=J9Er>Nqb^yW#VtM29C$TmV&DSo@Df70>=T8A{ZKhKhD^BSj6 zDBIGE&+0W6yYp03nQL?GP+bzL%LS#Uo-y?)-%|mRt)2-two^{Cx_c6K6`@|Ddj+E3 zuR{9koeI0|DEXhlv?_Ef-diZ%9x%>l3`lI#?9pW}wpCB3gQwA*7j^LUhr+rKM8*SK z!Jcs!WKRaUkb)Biu$3&NQEbby0l8IqI*qXt#Q(@JRnHaoSqNBN!8h7dPR6O!hdXn4 zEs+kt9}k$&4}OIA4;(Rp$*Q0k^NRhxz~do^iU(M+w`Hy}w44vs znV!{{DE-h<(0wWu8~H&XX@=||pEsEw`^{gRIYTk3>(C=}hFjSI%@PVamq)1H;$R=< zG3|>Zm5`Lgp3B_%weN4ghUR6-TL88;ikHlyXpzwKyXle!c#UBnv7s9?!v6@`j+lj2FkTuslgV>EN70=2ua9T_H=V69LUYGZF@-ao%Z(+<~ z+oO3H3a7b6qDQ13`n z)}$Cg17gO}Po&rv?~$?6S+rRERUmec`eSpv)ieI6V@jkkVeGT8Nq_f~c_n5v%ZKK7 zVO@>2-{f+hV}mW~%t??WuH7zhjtV;@TT7)L1u5oHPO?kXtK3W3NyiQPxIm?b+utk- zo)drjCN$I7@s3$LY17S4Gg(qUIf;3FOr2Evn~oC-koY!sr-~X?FS=AikL4Noq9TMz zIXnGhW@`2qf9}!f>;%_gzA4&MTv0t%so~+EnsCkL^-OTIl3?&f@mKxGR-XLlvQrWa zCK+>V;b7$!c3pz5$1vE}Jhqy-TDPS5Y`OqV{2g4BBhF_1a7G&;sEZ2XgUwHmX9C?) z`m{eO@f-oL#DfUZGe>Qt(HS1}bUR-5bzopq#M7{sR9FS~gMZ);n*k~Jm*U-iYF11R60G|FnkH$hV-L-lG@}b&Xb^ z1sGYgveUPKbSV{$WyoUO3{1@a@Mg?%aRoPKKcyJnl4c77}O5ktSig z`|Fd@OiwJx4_HGh$EK^OgcK(T9(J<+-j&I@i$MYhF1)v;8Jy`iIhn+5;AU*Nm+ibK zRX~U?vf~(L(Wd z=wOSXc-6!d^P`FURcq?mX1OejHp4^70ld)bTo`SLIum>)DdQo+>YLNXysoAM6LSR3 zl=2o?jJ#x3%%63oaB!X=M_Su3{E`JaZ|R`tfYpX`Xi-O0i}`LX7R}-wkF_n_X%Fmx7!Wx%{B{cU`o9*!q;)?LmErz7l~MVz#d0muTLN(a?GXZk!dS*r zIU#>;a#5qU;*EzOD989r?mWlfN^HD(VCYW#~LXgl0<6SWrc5DRX&C=>K!eNSgD z*=>GoI-t0?<@4FOa{G9rMmb*Pxkk0co+tVhDk}!rTuq}#VgMrX6RuO z_*+L;ucj24^GyU%S?h7jtpl8K&%OWCUK`s68p$o}+zIj$I!LdGU?gN2Apa#w#Gd!5 znjq(u_vDcSM9SP#g0kLvlGg2KC}%PMz?cLi5SM(Id_u`c+WG5_8f29mL9eiB%lTE6%pIjmP0| zvn$SIw|1%e)gN8lpc-Bb%IatfZ0ubWrPh~-{cgP@!Qx86os2*8 zC{%XJ=~xR7P@hU@7-DSTRU!%%vMQHuUTW!ew^O?dW;l0a9-c?PozqnFQpJ;cylY6S zU$ORkejQRM-yJGb`Qr9+M0u&h6q2OY;?aFH-yyWRJuaHZmWl6po4wfsZrG%>6U_;8 zKE+AJhBMWvUa|16x50D3Jz98igO;dLF(+Xf+ee7nm$z%Y*cW+6YN=xi#x6AG*oWxH zn^~?GU0E|01x*Ubt>IwcOl|n(~u{+6HD6hR6d9w zPNF)^H@@gm+dy<&HS*+lXyrP6>rDz%=aTeMh)dS_6r~wsL23*E*od-FhhVsjw4mb! zqbMFZ8N`_=mJ9+AVt%x{_d_KbM){`&^;CX3z3cH;*tfA)8OISQ|zU(kUne4B}j^8cG+MtNnl7`S} zMBM_tAG%O5Kjn-4`RGi~y@Q_}@hS#7C#*L7ki(W$?APPhJSuAj18I#gYVJ=h*Bwu> z!5VB7z7P)Sa>Yt^y`uM$O_r>@od1R}cNY@9~T*vemXFYM0$nyn#4YH%^5M}s!_vs+-92*xB8kfxhDJueIL+znX1 zG@1|72KP2M>XGQ8NPj1KY`gGi#XX{*VU$B$_`jB7BLs)@8d;AyV0)Mfg9FgDq(dgB zeg@T8TQnRARVYlK%enfg*nKdKo6T*U4Ken;z7*)bCL<-7uWqgI^r>mCGK{$1ad(Rg zoA1GD^#q8^r}X7h!upoC9TQQdhlV!vmi%p1Z*7(()|YT7gu&Q<1t( z>BQ-QC-;8B{MGEPk4U@@66HZ*T30lNWg*Kb`DwlWa(qX(G7|U#59F}rI&4~nxep5m z_{uGUvtbIpqQz8&cLzR!Q4-BpCLW(@nwQV9{*)4adi2^Ys+q9Sz6eftk#OW;ZTVoq zfhCZPFf&0iC0loiGQ6I0pBBCibkqv^^mqrh4H(e@ViR_M5@G($#od+)ykj%>)in)~ zwL@?yVuFAS0znTHC;;&X-#Z#r#{|HmUh!Bm{Z-tRnzD~`<^!@|_Ulcl3jR$L;<5ni z%%j6us=^1byY?2?7~vfX`vhV26tCI|Dwgy_KOqRFqQaSBs|cja*gFLVVsys}0Z0#! z(~gQ2l>-5zG&x(OkMc`6txe~Zly0{Kyg$I>w>@L)F!4uf-@92dF5wD#5xJ@4HU~%j zjR?H#%K>!iJyj*FIPrMDV#zPlonN((sGXbx_32&I8vsBBsm&YB^%|1u%V)d!n}aT0Din-D#A>+hBo zFJJR!mbr?E89EJF00)x0p}=Ty7QM?U3z?{eeDo=|tM8vv$BynkRnDtzXnfwPs~;TS z%f6m%y$|H>H*+SBnw_f6Zc*FY&vP{_?i+*~ZQIJ;nA*`D|^f#SilZ8t| zE?(sezJ_fbIpM}r6ZyJ^LtwYY(i3o_zZTCH)<4<#OdKV_qH9_E)s#a{e$Sb^GX@!VJ8bYpARtPUn?#=FDiw)ss zM%H=!qEDwgcRQEkxrLF17hBreqgnX|`QSN8DEXc$tu_M!FuQK7s>^iq^*+Z6# zH~NuD8KQSa(L4WgH}>B_%buccIH#Q5eiu4zs~~Po1G4J>rBRl*YQs}I90qyzGk9#G zv)fcMWqs-IWDiaB?Ef7OT&#>J(1B3{2PZ=a_xKR5=sx~++X2KiR-Ln>;C9pNz?)7H z+84iko$aT1&X~^Mvdh{2%Zr#;K#&6aPxRJ67Y_MXQfjig>nH}D6Ui22RHbPKJ{L=0 zNTu{cMJyQnDqhoFi|Hvg#RXH{wj~~!K4+Qv>F8)6*qw8&3~Dqf zFyMFKG>$DE@1&Wu$%rmPbc0Y%vUUC$T!bffAF~QS&E*k5JI{XZoyO|;(SH5Y(rEse z47qsb_R?nCn;VlTi(sxRKps~@LWW^;_>q0#qDd0ig8P0&uKmWa5yIp&x;VbEV^n8QG_)4f;qxnSUzP5y*e^imeb z_N4{GR;;X~a`W>#S2T*rUxtl~EgV)WYnN+VzQ^it_HXAJyW#ONn4BS1xRDUp1DB@%Pcn5! zk(AvVHL@v*dSNG#4EK|-D``09Ff5v5n*Q$G1QTYYtf%bQwjUVA)%SRE8Cnz}hW{z) zH|vqHL6i1|trDwM;iU~d!ryzgKec}93}7*%HwpghWtEXu-Rw4qmzrPGgy?NR#POFk|;DdZ-jeOxm8a1GC^I_ z2b1l!Lb8B_RRJ3~|ANd{#*t8iqxM}%aQ5KOc+VltT!hyyIvV{+xT|S<5`%NJGyTtJ z)8{`u<+CkVzn;>4t{#imeG>{y;ihIvjx8B-Ur7IX-K>O6>35cM7-H*KgdEqXZ>**A zrKalS+lukqy_u{hKhDT3Ue#UyjJZ%1mDZG-@zApk2B5GfA{J+TSyvD{bwP3Q)qxzD zh1KCfjK-cu-#&HHLi>)<)S~I9ocTYye|eio(65=`LVJ26Jh*93yqmhg_|3SaEgO*y zbh6==OH>=v(F%u0Nzn)G??%8|Y+%-iy6k`eExE%ePmz??tEhw>rL1^y040QWz8RemN663;`{e zuBSlWRG*t%F9CTP0+?Hz3^fDM+);IyEOyiH_sGrO>3<0%t@|h!pTDXAP&-E+y_Shn zA!)n71oLd6Bb-D)J};VB+I>}iAbLO0<~!|4aQbmk9)-Xi_O=Lt0b*D&u?-~>Zkoi} z*LT(aqBNMg@f=3?@EO752O0kYGuXHCI1_m~FUrcNj}ns;x6(6yNMColS!p?Rhpe^F zwoP+>!}nv@E;SY$^EMrSa$S>n2w%R{qI#vxQfkIOcPR1@W!h9F+j@6iaN07f9Sxs6 z)iZi4d{jR@^Y-a}93rW`Mzc*N>RpN;DD$TG<~yUc5vqbZiOOWIW>z$;qEteL1q;!T zhTfuKZ`sOV)V$N;U@IJJU#``Z_SWk?7vDg`uI$(VSjX(KuTbul+;1jix*;C%2ZRDt ziMV&=)gO+NNU+Tq)d|HErs%q}#>64tI4oS)lD`=YI2-+WUm#~;F`Z6!$vTtgXx?ZJ zS0Yg^zNb3vH$`1waSxg;u_6yz|Gi6I%)A}2z?f`TX;Oapkz~cv1*9kUfV;2c`^&vx zk%(-XR4dniB2k6nnsP2Oaolj^L?_2!tl@9WfLmoQ$>kRgQNm3;QvM1ZvFr$@om<}O zTOA>9#d7`M%@M0h$e!H~1|wRdCBR%zQ=gb;{_cKMB%4|}K4D+VVevh!YG|(lKXlD$PkIB>PQDUHz~I(<>5IIPt7jG$aIt_@^^ZA`Qm?G4RSn4$R`5j6 zXs%tZQ)e5*{@~}=@o-xE$KdTxVRPixUU1U`%%#r!hv(6lZpM!jLYd2?*d`(JKWYwm z5^ZB-TUT9IuaBc!QUK_;Ec(wMh23E4yFXz}A&ykae$MnZO<&p^BrdF`@O-aN_*v{R z`hT{b-oG7977a$0b9i&C!uGFu+tM5TBjBV3)TS_R=tMtoB52S;@jc#rNOq*}BlPnc z7$?ac^zcA!jg4O2Ux=+}ZLX+>pgXap1-@soZ^3p%W%y3AjJM~mt)6j}k3vAC*{RRa)4%((P zad|=aI_u5*&Z|*FaY>y{;yb6tL|$B8oPmd%ze?u$Y#;fwbq*BPPmmeMdWs6Vxq* zL~y)Ud`kRYUHbh{G*88{pc%ih&)>VlxtB)pF8+r9Xbq0qNa@DM>hIeUkXl3c<;J9~$HQpbhN(W@o9s$2u;{(Zf<2du@gn97+#bt=wh$2-Za zZfbXQ1g`MYnP0ic^&06`~3C7qX^U3Vd~1Jy@&dU-+24QTjl%CCiJ@oZAnRG zE*N5sgCn_XaCg=C<=RMnoAdHnjru>!J~)Ek8wO~jRHu``>J0xED!FB0e$s0(KUrmJAPrEKer90eyQO0fy{Ky9}XuAS_nYC zOh*l;!ier*YfY>Ji$mI=QagL+-rg?Z64_Lxq5%TAhMQG=7lTr6lBWSLW#_2XPRHGN5jJE`AiE0;65`BD0TK@~|)52x04v85-PUk7fZ z428i`z1G5i03AAPs;81+8&b9y zeo}!3XL?3FuJg@Hji8b8EI^rM!myD_|4pl0w`b>7NI8FYpCVvH{nKzeckCdg%fU)t z8^2C0n>sAJB72!pL>Hf8LF}{q+X@e~BX0rR{q-!gn{Y~UH}MeI<1D|C_QOos@nfxEYjs!U zt&6g_q=i4`-)YVc{DyK~naZMEQfv{X(HN0fkU&MtSo~ASBaBf zu5mC$D5EIaP?oItPp1np++i8rUAZvE!MNUa{t7mp>5edKx1i*e~12+ zc#P5%)f7xy&)CLq%5}GDHJ{v?$%4OnJ)UTkxURjfPQ z?m@Qq^k??UFUJAbKdGl{?$+i{2GojEH{jvAdSg~IR9pkMkGg0qrYkkENaT?EqnGhc z@@kd!kg0`@;sO4qyL?{pD!;*RKGPOOWfazrYC+)~>nGMgvufPi5c@SuaTq*U0x0X& z3Rax0G9mc(xRT~u^+y{?jg0T|w0dNwEM7c{eJ^-4bA;`v|h zS)Hwm2o&*yW|mmwYWzLKxGp{9(Sor`A3!0bMSnZ7**;GHyaxfghpQ8hyz7GfnvUi9 z{MB5A@98oFE`!XE)rBGFG?~LAUY9-rD4g>1MqUj|cC7*Y_3Z%ST`34?vcxDnRB6YZ zqE+&ue9;Cf_odZ#E^(j4B!3qhKC$jq5DoN%az2Bo%Jx3|Xn3|&c1uvMhekOSPC_PS zhpNVpO$2dHhbi2;h-qg*!=&8}QR$P_yVR*gI=^Y5>C3;dk5<=>iKxXBuyoZ(_E zz7~nC7tF~Jv$X(gF&9s++oH{zjJh>79|&yR#xPiN$_m1ZiBqI+aQ`Gc9X1|xqBRd2 z)p%66e=|e!Z8gUieReEF{u%6dsESETgf>qUC=itk<=bkfa5Wd&fd{Hw0|;`Hr6(nS zc4@o*X-A6djrDr<8oyP+(GnHr{QS@-BC0&#HFwXj(erUn8^&BC_)g#r8_kj`F2);Wmvyy{lyb z{(J0l{idjNCzEt9`CFGln5eXPpG$%k!muWTKf<)_Dpgv38E02^XEBMhC(VC5*wpVj zQ^fzg;?1B$hLgfGGT9W_c(OKSvGt)Ga2gHA7K}TaiZH_Mb-bUXIMJM{fw6@DK&>0? zmT@i!;SXt-j;7Ra=lb)_Q2Gb8Bc``+apk2rh(9;V?P9k+d)(LvrUt zcG0AIV4NJasq=^IjN#Y(x>s69h^K7`w98c_8YH$4dpZ|-E|Rn2y39UIQm+)P=pgy) zMM2(z%k%fVov)8ML9I>S6;&Cx@^~}03GPZhWV%^qVo}gpueak2u14<_t2Z~TaBBhB`S zpN9GX=k5Vy`@p|{G>7YyR}249Z(O(i_3QFbB8OyWl*3;ET?00bzZm%&EFe&i+Dncfd;lyj zAHZrNZxit&d$k^^C_;3Xn1p0NV{hG|?X&zXTvS0QT=dh6kV}>v_LOuiigG8h>ofC& zp<%HJh8w@LKk}RtZC;mV3$caaUQDv$28P84wR3FDg^{-MCF}!#2Zu(rx<=x2IuBYw z9we6x!sv5Y$8%;=YGrMcktWfk{=~G;SaWe~`B|I1hCE^FI(+)|Qk^9JkcR`#$)km! zN4zLF!LNpoyz1U@YAAN_LSjFC&(ycivYi188yu`&e?)4!bb>{cobM;UropPOwi1*mC^^b2e zGk39H&x>@f(NZ-kK!VHz?7WWsag_$`MCvx9rd>&HZCvjISz#PVzY##uC#neqV8<>R z$27Ob!B4B2$H=p4_irbDRv`cS%m4Fz%~3^*Q>$hA%JPj{uIYHs#}6zZQ*hEBA1o}a zgjvNmtxV6~(Ci0DVf&iT)5lY%H34rK^BM(EDN|~zPm48bxS$;0?nR~a^=)lM{b>IR zcz^&jx=l`}EWZky`2QVQ|9=>T{1@M@7W!dZgVcVc@&D_8@PB9Liz5NPex|?I=-*TK zfB(1t1F?@$Ln%jMEI3>4X8#|nlK+3ge&%zw4Wb}DonooMmJFPfG_WpLw%+2^0(j4R zqzZfBYHDc}Ml`jxi7aX9N|5L`^_`gv8(V5IxSh^raC38))VmT@7}fn^VP!G9=*6@7 zt%tXSeI53pbp5URU75C{nvRZScY$m?!%Ur(G7B?vPHt{4aV`Vr(y(Z(cI%rDO!uDr zslpESOVKU%KCOm;+ZlNShc)o`zcKiFTWIBe`&zrV`cR>whJgXGi)bp1Gp!<^U-W%= z4~Dy*1|x25XPOCi8~;T5y8}9a^IopKR9?eB0FzBPG+sr;#PXIWzP7Hm*wv+^3UOCW z(hkM|O$Hx1#+5oh=-i}|!A0Y7VF5ezl%Dk>`K11?KI{1*yfZL0JxI2zC!~y}%o_Mq z60*~Wev;psM03Qy4L(TdVct>oMjXIC`bx$F4giq@nOf`B=RBhh$}h2}z9+ z>RzOjvHw>b5nDK(DHSd9sH5;k?eX2MF)+SzJv$?qDOSz1rot1y6BtbvR<+%Tl zn)#C*-4x0gG+MRtr9)cnG5>;bl=Ilt?>5LVkq6>BfZDd#^i@il;`>WVr@@>ym8G~2 zO5tBdcjIMaUsX!a!cA%aO4siAZLE1uV-8R%XDoSSA0ON4wc2N>If}!kE@#cd0;4q= z9aLIzD7h0j1di;`oA`?Uo28EYOeGkHc6Fre?RKifp2}ZDNEJTtaEt7FcdgBZmDzrC zDE0PcJ?kEJGB6y66yW^j;KJhSO>~e8bgW`XK-P*<$Td4JNpnLMDNXjr}$FDRwK1>`2v{CVk0Mu3v_$w;eJ4vAuM9R)9t)&l$Zk9p%EZ2V2 zV*qo^^aBl4d7XCz=?zj{_Q0;Tdoj8AWa<65B6^=>0#@)|zWZU{evnj3r}OJ$&Lm(Z za1BF@?1Rk7;OevgkO)Ubz;O7rxJ~Y3N!0gq_(H6->OUsC_W?9rD)+T#IAvNVcA`p| z`uFK(a4tCw2jqMuN|G~dH`7GJ=4M! zfp58Rg$V-M>owDKYUd4!Qg+0JUb{OxyHRbg=UM`b;B{H}WhON!hX2p*2ly#Kh&a1w zKFp#Hh}#GayzM9o-GOT35f?)3@ApSObdG+y#U50171rGS&IRS?y!qbmB?{iwk7*OQ ztB2NLgeM!-DYxzQG z9IxS(zoSy7_kq;cNp5M2xj@7945z6fjmCoX%qOULE-IUGBina-=Qu1uoN$PlJG7X8 zYo~^_;UV~i)2LqR*tKa=c<7FHmH=aSmONi1XF;if`R5?Kd}p6BWiNv6Nt3vO+SNlM zx@p)q<*vSZ95#-^kOJxN@$Sop_tRF!exdIZ?eeKsy(dcAa#*q;5*&Lt(r0aEOVs(& zta==AK|hAzv20X!9efxIH^p7FhGXtYPH$#C(d?!C;e^M(75M|GeM>IT+&YbY;}Rfq zZD(i!vX#`b(2g1M>S47H(`b>HVf*IX0M5sp7WBgi)VCL~+Aa{+)&zQNUUt z3`&McXdR^~tm9#VI`!^F1MB^4-wbUitD)y(Ubeqbe~g>Wl5@gjT^-hBwRuF`UEPso zO)tleN>#s3@3#&CWD&yYjES7wKkK7^Ga`fwhAM^pX7&K%yZxsz~#*}tu!)(rZUSaCN7(#R= za#|?!6L7DQ2r;id&F%sYblSs?XE_%<8+fgaX%|1=rA6!D+y42xaMius81_vyz2HE_ z&$RW%6)`D2J>d(>qu6s}KHc&I_|D$j!qY)}j}4gbaJ>yy_;!@gFSMF8+GhM$Bcvb3 zbjT&{a(qEvJ&GFr-yZ)_oP#4#ClpP{7u~JJs!cJwolN4D^P6nQ($sz^rd%&Mf~%1q z-He_Y)>%fzzI+KLAJV;`LfvGuRAT~7xd*lxgGL+befqHOEw7qfg<$AC3l?MUMmmqj zWt<0Z-N8Sjngs_AKm69TV-EE{c*(95H`vb8KHn0RG*ux_R;p^zTcBhL5Gm|Y`gvJ^XM*ZM&L5|` z{;3qxUA&-5Awt(Ky^ujA}^Of6JOd##S} zKURWm5WL-Qb=tQ6n11vp`KR0Mn+oX(gi#&70OuSga@5evb)>NJ4)z4HgErYD=GUAp zc0&jaAtm`~f9Vnml-Zu*RbGMT17Bv|7K?+&%#n4X*WUNhhd`|RyW_KAQ}rEcUYKU% z7-XAp0u4*VL!$`pci(ZE3Z2Inx7e-XINv$lG75HqE^G5@+)P&gh>Mq3i!+IsLl@Hh zMjoD9MTr|}xpzv;l_*x(Dz5_|oVm|7SMiG>8@i9-V3YycIjraK zks$P0$xAXz)vKQLGt^2wh90{u$s!VXV-4STDNn#Y-_VM#>Mf{BVU^800j9}UCPDSlrz z3eT8flO|r1|H5HuGXG4a2+i=);)N2xsXP6X@IoH@2hNL*KT651JhBu! zs%xd!vs+h?ib70xBhrx2!xI0sv`;LJ0Z#=@M-l(Zwrr7loE6_>R>y3amPllnJ5-AM zIL5fALz$evR7=6;QSsJ$-vOju8F$8(z^E*WC?5Nj`7|(c^%h+kO?J8~Xe^^?tq?Jp z$k6qxl5}tdpL7+F?^KcL&V}4(d()gnet9uN^M$q6$!bYl63zEFT*X6OFsG4hF+)k z(#hd$%}?Zz$e>gTfXjA`mZ(iU1|3>JU6nk&bP_ZRd*#W}a7znI=Ad><9a@;sw+Sbt zRt(83;kb%;XPpis@1w|K`Y6Bm;D2J{(W0w`EbvQIkq$TO`6!cBWB@-b2ABJ|zpWp2 z(>BbzH`eK6(`lYy*JYaDB1@MX;*FEFkV0+$f8l8M1j_&H+vwpQF)Q-B#6EKOd^W!K zSBC|AFRQB4=KPUwv`*Qs>A#=m;dvq)J|laH7<5-|DBQ*;W`w!Am7t3;RsG&kE9jM0 z=q?cPKnYr0e~WQ}DZ*$o{Tm0k2);eUA{5>m+gVUcmTrelO3V5y>ER6_s9wnJ^-YD* znft^iu!DiZ#8FBg={^EGbS=$q>1V(mj;8Qn6M9Vsa2Z=I_#G?6mWi zRB0H|iKK4O`ij%-9`Y{iUkDqmpWSCl0M`ZCJD{Y&ZArz!^#-8`*=6=u4<%_LO*?!{ zy*<4!8DGLJ!+|{3dbVK7`+7bq{WI^=4Ao}Z=4S(J#m?V_! zs{My6;bRB!$th!xlztHnJl+8pvBH{^Xt?jpjz{pz~SwJN%F`&^Pjc+^#Cv1`a zjybm4>aM=-?0u=19(8c{AF!5U?yIod-;a)PN`^6}4{CS^|4O!*Ne5@AR}-*8#$H%| z6o0%dU2^gh;G{Kl9c%eIY0T~B^6*_g8V9Ul}G*W2-|KFxc(o*!DLX0^5* zOQ~!Cm;WimyGbFA+V~rSC`N9VJp~Ib&lB+c#B<5uDJeEdnCO*>Li>ZC%!u3GW|VwY zG3^YomDSoglmQsmxS=7B+*0YXVVDkHtiy zeEtn`P&q4UZ?KvEQ-hHEmetm?W0w*lP_mE>mit2szWXb~Q74;sg8JjwJ<;a?Q3n`D z=h=8+)kShYQ^?y`ZX(R$Kd!vbRgCb{W5~`k!JGk=g50Gm01NxI2#tX={c+`7RlTJX0b7?w7|} zBx%?`p9+w#x1~_@lu9Q*lq7(N47&F?|G)^Z!NcJw8b!X!z zv0d=#5pjF|8wz{edNuvs6C8Ln1?x!(7Utkv9|B5EFzdyZGHp@Td6Upw8174%Om|z;_j_*y9ogawzY&|QZ%jxuIrR6lBJ4khEy^7SC`5*3yWIlv!O@i{R@aLK6 zL2uYs^+jB>K3lw6-r6ngeDS`ndUb*L$7o6Q9gYiT*KGT1d2& zC_6}6GG~-+8M`_kr<^mqS~hdn^FBGc?@{)*kb{1&rr$xXh9S8ehW$>*I2+nJ3v(%S zXCefM0B)R1QIEu+&~?r#h$^##iOp4K9^Ulclp2`s)k$H+Islx6lgXaTfBO4yk9@}S zCCisYALw}*X?O)<@55K`LHsRK&Q;uTkK1A}k~NukDTX*19{swL(B@!)7SyP0`pVLQc%I)aFMw;9SMcia5&3aECzo{QZ0ER&mudp ztzZxDZ!bKs@2dxI$z0<11C?=r5IgpA;vQ&vcjmIUd6Ov&Rg635ezje{H|YRW#q*1} z8ObI4RQ`B#ookFbaqxvhfP69{=)ov;@PAiD?B{#YZAOsVBe&Od@t5{qKuHQfN7U$4 zgm>|c)NX+P-AaGjzf)xEDo{m7aWZ~>C*jWY7mT~( zDPyM(iuqJyZQJ8SUg9f`lj}oj|H|TOTbZQ@L-QHgs1CB2<&G+g&+~FNCBaP;)Xf0` zPKBxa{_Vfl!kQz2P}{!*R)FY|OZb$+Dq*1>-ufCJ^Gwpz;iZ|D`03gO@9`;|1Q>$H^fL>d< z&#g8ook(Ve%mgj4BfL)K;1I%>z9#ryL*PpDiA#GJfr8&$d5Y_BDC2YqV|uXxJfrvA zvyFJ&-fQ31w6UsE{pCsFog^q9PRB-P?6tV-ig7w4zBTc`ID5;eIJ;$CJ3#`$-QC>@ z?!h%^;|@WBy9N*L?(R--*Wkh3-QDeb^1f@Wv-VzlkMW)Jvl-)QdUVe@t7g?**WF`* z#<}eLj}eW#tF8e;*`;dGoV1(&nNC#y>=974tK92pCn#7BTBt17dw3VV-GsEP+wFK# zOqr2a_FCd{1@@_+%owxLo#lWtxau>GfUO4{+Ti}#K+Vi3^>U##uCL9NRLuI#_tC+<2sxI0z%5OZczSUB@!pS@J0ft zy<&;Ul%xE9XeutX6j@sj)!ytd50q^1Tj_hws_5EmVhEEYmYxf&-{RIyXJ1V7m?MU@_E;|j; zJPZnItFFg+5%}iIs9grsUoL6X_AvcJsmHk2sv>~tYrFNHX~}ul50eZ?&Z2P$;iMS4 zhZMt`0LmdpuI&|aWA7&I$}Q=vX(;ib-5 z?$=4GM2qB6=@h*lEQrcxv6_}k7TUuTXtfMApZ`5eAW>-XRlE`XL}6a8x=r8Ek&dr{ zTLR$tq?TFe4|yiNECO{Tm3YV2=HlZB#EW-9h)cRnpa6G?%6qB7Zkm)2CPke+pE_%{v6tQ_A|3GnbcvQB%#dORJr36=?{`e*?TQQSMKQy z>Q|dgz}a~b*Sh!ju~|__7^%o5r~^!|vD{3Qu3UTmIXR=Q3xmfD^KUMCM7uNV@DALm z%K`O~gsVfrRX2Gw5d16WTz}t%U2zFK#9nfIH$?f~eJoFYmY?94W!I%ia=ABS^=sV! znMsVlnP3pdl&rOzd_rn<`3q)DyLfZEJ}{lDWws)71#7MwM;k3*=DkxEq`OdXW3x-k zh3F>m>>~0?@l0;WkKLOYoYqawWB_dl|K3*nC*%fYrgy@$Ykh6{VJ%ctrnwskDEvGSw+Ak5|7&nQ_;DV2?=@cnvR!)j1hQn2g&?%6I3vEy>lorKh0%JT! zcsqxXbE&m&Zj|4)U6`}1@Qi|g!cAx@Jd%YzUJ4sx3{=h6JEmSTzwULcNpDfgD)+$? zfVQA?6kq#)WZu`8Ybv!kov7f&qq^R03nA39N%9X$fy3hT?!MPgr1yY66rgg{Ku5zu z$9M8-KCjv#qUs=ze%Jeh(#L6vEo{ZxM^9Fv>r!7zXh-Tw8e?qo5>;|881q$buh0H> z*bg4cg5uArEw}=i@sTx3 z+@mH^KI_EK&Ow+`M@nG&OMA8wTCK58a!Wi&i+R(yGUo5(v{PJSPFpEH=N+J6n@-xk zQUZyu6^B{Vu6xg1N@VKlKsq~g{*@uPr4&L;~pzNG(R(8CBe_Lpd2f9w8 zt}YjpZu3MF-fNzzR&RZy4lpg~hqL*#<+Kb2ndJ*?{x8ct%Xc3UtbikZFQ}#m*u~En z)r7wWrwS&by8I5D8`io)RRw61`^F4^ADy-4M-;sI4zUk1Vsr8gBZ|Co+bl z4;OfX9zH^je%1M!(ra}uxKk!bw*#VTDjR7#;&3j>U3)^N@Ct6;;YH)FQhB`MJ>o+{!e~jk^t%T0HTD_rpdaT+`d(lC?4()Afe) z+Ksr*e6omUvMvZN-kQ4biNNjmV9q@r_)RtxLEa=OLXeXF2QK%+4RJGOS_5S8LSJn? zjH=-t!$`d1$;eDld6YA4S^FZe#+UCMbb7WoH#*kDS@%X%PjOgWJ@C7H2(!QUBa||_ zIP*jMK--g~l8qUub0u@2a+>iEo3%b`d8|U3-=R*HtG5L1+VeJlhVHWGsg?eMbPuNO z)+MT}s#8D*g;YuP1s1H=pyht~N+rXvov`q*%&Ye$Ht#C7wDDN+aY|OMOCjeaMbj9m zr8I2q%de5T6&>HK7hucfSWn$Q?@}-W{ zu;`1-He=Ds@_C80HxAkOIX{YgamJhL4OGk>74``QH3xx6#~Pq9h!%)_9g1|`YpXi) zX$5lUG+wT8**}%n$-!8Dj%X!bR>ws&okTi{6SvDL#*U(!@~?`zqvF4rRZft5F;9Wh zAI(&v3I0#qQYD<`9ZbOlep;2EU(SjL+6AQZf-WO4gvWpicuaflpc`akdXC~1x zgFXp+RWuI)ecqSHinUUQ`xCP5hu^4?$Y|6yTeeJR^{1V-nzOc5o1Y~uwg8A?37!6j z+saQ$vk#s;CUE6jATZKff$tGJo$B*_X#g~PE$ktJ;xnBsz;z|{_2+xpgf&No;NQ+J zdrU?L2ZRD0ZgnLr7>ruLEjjUHUqY{^q(XC3tKVDjUWSS)dFTDt`d+RmdjC%~xG9nd znTIMWOQo*6b}te!naVm2T>8U4WE}t=Ke~!3rl-Oci4b}SmyQ#h_QYN33J@9tW9y;7 z8K*nu;B{%*O?Wx7>x_4ra;r~RFuV}5&&Uch zJ$Q`;l?-L3a}6$Zf8Y?4d*EK-YJ0@7DI557JQJGcjlbmlqXyWI*@!NIPgr%~IMcT) zj2f!X!4Lw)%hFHabYN?DzeL!AoliN;CgTEY(Q#KP8*gYLdZAe2X{Sr$u4a@ms>>!i zoKF{v!4~iKXT!%fwRtczJQ47{rzc=?5d~iS+h45eFB^*w$Juu-uSsD-Y9b&r|1eIye8A78Nabz6X0oq?7`8NekGa zenrAW@V^k#-r~<;rd>fH20;Qi&KYyN$Wg2_?|UDMPm>N_b~`DyoqZ+v(YCBsR{jGh z(e46Vq8SA*I2jkY#>lWM$>%F$b@3e8BWap#(U7oY7ihhia=Jg;+)k$wcMw>aYT6o($l>IyvePK!k+XZi2qX*BpbWjWNO)FS)=8JZK{=a=FQUW^-Px3wq$ zXZ%CJh=S6gWDsH7XWhl{Xh9votB^pi8r8^ z7)28EJ|rIpnAB-B{iC8Dpeunm_!2u?kxHxH7uZ8Yva>HNi?WJVS}GpiQkSgQjG~vO zzjYLA7YQPeAVnz)gf9bb6S4PM{LG) z^0JXdDZzuj^uyckHV>I9X+-D6E@J&S#7<4>kR}V`EV)!5i)*ak5T zrSCw3)ovTxzl`Xl5VE&<(`R0zkjk}y{!2!!9?M^4OQRmnHurXUPZ+}oDCd)xYd6&E zJm~^b{>2ll2xR9jb<^6NwWVvwGk!Z#y`^$#4q$!do{PQ{wsa7D&TO8Xu~h#qdTXlV z7EOU;Wl&_X@HUXpJn!l_^{oL(rjfmi#pzd6wgZ|bZ>IoD+3(lO2KUmZujxM`CmkM< z0U}J@frpH7N5WWRUJQq%H_^_Sa{al#^dw9un^2NZSwpDhfVYQN>X7(AK76lwQVQB8`kazp?h&A$s`=K&iO0+RkOr+;7g=Li zv%o)QFc?L6{d-mCLj8b-6c@aZ`vba*xD+ag^KX-2f zw^I1`z|xXo`Xotf78%IIkTl&S2ZI;uYpL%1Ty?Jf1Yiy!eQ&W7w_k#N)NNT=yyFq?l z5?IN1Qk^5_SrZ@Ueu10xad#93eenULwsgF_fYf$~1>+7Wa|gg+XSrVOZkYZ>WuHe4 z6~qDDtJn-sPX|;JOf%h56lK;N$5%37j@c?gj7R@veIzNON969+7@_SF`IlrkLu|d- z^*pP~fx_DB>yYX?VvTbmfTSQV_eiZ4B}zxGx*Bw_!XHi*C31x(Lj$LDJ)TedOQ6|F z5cckj#m~L@kAk*Zzv>)*F81FPw4pc(US3`@0Z5hf$qqd;b5yWRlo{?6#Y4E$*^od6 zQb~ANKGL#~ZXSol+S{c$j!~YgzFIh*c4}n?P+msoGe8iNn19KBzwB0(-V@@n{u+K{ zO8E`^{U3r-Jw%ar49}^i9BK>!#S~=4!W;1QCJjTWd@@9qj)q9U2gl+(MhxvF@caI= zbHOlWQFWR`4=}A-U14+RAg&A6MjQ)YLbpzcmHqZd9gE_V!RJJ6%-V*%B)}(AO*g@`a;&}O?2m3 zi%sq<<%$w%|Ml&C!yZl)C2E2OLCV=({A~KyqyNW_>VK2tGKpnXU!dOp(;n-;$YuXG zZLCioi5@zqWWbz>)qnN<``;e;>)r9*mv%A_jPUFK|9^(47@Ey&p?M`UR-^kg$u)PS zS`ofcuAUFaW=nmR9?VhXF9WN@RB?56^@)yp(r&Y%ZR)uAc-iP^41ybu2_CP3E@LB17Fw$6cxoc((Vn2)t4O|?H$U9tH#gv5kk zc4TDpM|F?0#u84ym*Avg$@)Eg&_0vb+c1=ZyScgk;_P1e3vrGw-E(@_B2(FwEuBm|ARf+|K9iEv7 zCIK|Nh;4QC^#mZXofgQvH8|5$(-I9D%aE;XefZFJF`KG0-rv&)3XiO&;h0Y%q`P6- zj7d+IZFsqytMj^|Xuf-W3XAmhst8$gAr$X=M-laaxOO_{ab?bJzCm&kPV{hp1%5Xb z7>>KM)5z(y@`R4|+_36W`!#g7{*&9uNyAs{N^Fg40fy#66XAdSu>bmOjq<5-cL8rYp&uu|@b7iYnScf z#UEd}CfnB17VsiBbe{N$jyKEzg~T=XSZI@v@%!!fMPw|8I=3^oyP)I?uZc2CRTByY z0osr5Eda)o!S|^Lf?4iUW!X8E?WHt}sP4*=Kxa!d6mtAbtd1hXUFC?cQ5PwM_JONp+%K5)Z@MR!d0oL$fJYLl%ZM@a*};Qk znmGC0Otxz$ZI9n}juAI4gw;KDTgNr2+isI7;x7&KZA@rLAMC4W%VIkwYMVn`oX?us z6|e}Y!?T>WI>M5yaQ%lmVqza&HWMn_Q=7r$tpXE%;oWog2$Qyd1tBiak4XYM(x4dt|BiC{p{B zTM8muZa{=YL?Ve(fdoDIx(4KpJfdD;#oRf4^4gk04{1^tO8WAjlYhZ9V8c?v1|XGv z`>?cP4zwfWVML|C-?# z@bq@A+CUw*6ZA^+I%hmBn8v_Ix5ylN{!bAYBk4yfYZrFN7zA<+&)Z_zt;L4mabEr2#_U)?EL*DT2mizx<>u-2}r-|eXiv0vgVD;lVt*Ubh*n$BAUOG8Ju^`*QKRyHC z!)fc_#(~PK>up^=p52}}V}L> z)>g_lrVnI&WL1CcXxQeeqcR34?=31EPj(UEovA6JRN2!G(IA~|Mq(@f3m z=MPGQ&nQEA$Af0dKg(w)D&C^bky2PF6U(7e{Lvw_1c~e6{MFp+Iexeh)Zp>FMpb-u zs#k-H2?;;m9HGnzmf)ZiZ>(6XEsv-v$gAEFNwc9>8?7ihbY>4G$#;Z@IE-dShTzat zSpVE7_QQo*VtS5en;+i86g^)NP4l2L9Y5}Ui> zE1Qyqiv%w3zad~6RJnziKtlO*3z1@ok<~cDE<@FGJTzEBo2t1g%n}of@~f8Ilg>E2 zu8QOOu*?T&Xd3c3aK@!!Gd_Zxc{IVBB-zttA`PXrac7P#^WdEJ5>n#VjU|m_8U)5` zYijoGR->CuR=5|{n?mJy4$xBg3yAC1`&ucPfVn>$7HB7Ky5UF$BpWOPefVnFFT>e!zd44%ZWqqNd zw?Fi&><17Gzf{=oo&+9M6e{?U+~=$5=7->;>zJuLNweMAUQZ2`&^(S$M(*>Ts$DJg zEwB2#f4qY_o5Hp3pxzapc`f7SZ%u;l4w$k-ZuELxesI`N<2uHm3FQXqFN_&lz()?> z2|Nqh`a_ACRzAr1a^{?ruc}dkD-8xhruON_gTqS^*F5b|U(p z%UGvrb3>6-@%!iTH3l_lDI3~xT@{+MthpM0t%3<(o$4> z?BQ~J4`^;1`IzM?S^`u5!{$VR2&A?lF#vrZ3;{b{JhQ|{xnG>Y6tE*c4svJGcO}wY zCuC7V95&pMWP^=sDJm{V@6HvbPDbap`T4=Ov;DhapiynNA?-jFv!aPU%mDUB)4s8& zRH`j|@4eXjATupHG+NPj*{$SktWd%=4=46~woFmI~d#B!4=du|qr7z0$&fSWM& z1(*5;>nMX=^eS>i2zEP56o@v)v}nmHBHv3$SD)_r%@54VDdvk*^c-KPY}N$`E>!ZO z1d<_lV)u+-kHSmJ9U=1NDQH_avBqzJ^pY+j?&zq8ZP|->7vX&2atA@)%LZPRmr<7r zAE$IF>FmAK&O9S|D2&<3fHA=<82wxj{J61)T^$mH$Oa}hF2y6pZ%zq7_c-98M9x)VXWEFd0);1uWJ4_{Ah;*kDo>x1e%J zPbRhigj%QF91^JuzTz@c1avobPDw`^B^w;%5*$k2-|E$*XHf@L?c}d9&ri@;m2VS2?Z=qvma2SiUnl4I9o4`AOI^3D+R)WPf|OWc4X&s!xkM zAHMJhIYPQH_aiH8Sa0@9vYGD6M9W@Go5hU?oj%>28XGHOrMn)Qk#yvrYB$uBy|~i- z8ZJ~PXZ=bsr{3RizvBHi*#qV>J~o&?q}}451;_`-Fzt=|#?q&k*2G=Q=2H^E8calB z@ZxXJ(U5v$Lz_G?Yk$MqKO~eJ$01?L*EU&Ad0)^rpH#eM)2JT$5LKxa2bni>(LIYV z<6gHkB(#3hAkBEB4qhTi@F4t}%2vRm>4_R^ zk>EqDa=hGA8dL~o78jT1f*44H&T9!sXdd6PF-Y<;CK=v|#y zVO19mF6by0m#@A9e3IN>YR zE^%Iu*6Vua+otPIb%%gtX8H57eYsuE)6Dq;c4N3n$KlaQ{+^CUwAKBFqr*tFI1Gn* z(h-~4f4^$70Vo`>53+%!Iktlg*<^H($5e5U8&w$_nZ6_nZd97lan)e`UpJz9z^tIx zoxc^|fZF_P1#w~UBo!>#YEZ!MvUp=cNI|4zdHugLLc}&;qzatv{MQMFipnizA?*Fb zcoUDHlY?NqC17EZ6lh>Kv-7G+rr*N^vJ+b8dqo(7laH2>QYz7DdJFnYjZ2Xu9byi8 z$6LyROXGvq;Vu!YR~hE)sroJp3Ss*?3&!i+nf4F%kzwFLMaPTBE+lcI9?{`4;I=|M|W3zKDv|&c`^VUrH!(bP5IP$Z;|KUg34Wcxsu{sT7^8 zoo^5g39!pN*lz8qA|KrYm(vI9Xbyf;HgjgkeFd;8n+EFlDB|j|O8(@k^qS7tAEih@ zklAZKdM}X97VUW{WA_AT0tz_#aPPz$zv3c7=yUjQ0isl*pBJb1EneO&n3`+thvvkc zfsP}jz@Uaw7tzak%UlinShmCEM@ks!l$vtQgK+Y*sw$2k=cREI{crT1)owp2GZnoE zs1`0LuVkepvO3nldx%=h56GyP6LDq5JX|aahO*ZS2d3uMvpq zfeRCoO>@1uI~h)P}lE_nYCanYFX0PE=N9J#>zup2`TFX(@Y2T**GC^F%?887 zk6S}K0YP;J(U`-S%aYv>#4tbFU_2vNc~9iRK~5V1Io?LF6W;NBTlEgF`;^Wpk)qsY z(k>g1B=U&a`%Giy>~x)z|ESde^232%-=ta9?w6hM61X)}q~G!7tH8J6LP2ju5!m>g z2KYT*0Hy*JM^>u89GK*VFM534K+HZw-^EAJs2ak2?RtgYcah^qH-HUPv~P;OCU%0o z@~T#J37ZW|tx}&cDDej^eNymVd)HT(lSa%aS2cM_K4a&(#@Z4n&2|00F%Sum*jE5xW;*niCOZGJ2F-u5=L*g4+9-$#truF{*bXA z6p>3@Uegibs0-#a{wPzMpRrjZsPf=5Jo`eBbqU8r^U6b#NN!Zo(P4-{MxKQ++~udC7-=wTdfuQe-w3%HimPs+s1n<7L8&Q4@;@{xtS0RgE@rA6>8pxWovP2M{Uj)9 zy}W8Rwn^Bzuhq;vAOz>4Eq+n;gsd{>d5 ze9wuWo(DVD+%XF{gnhs<9iI$6mCA!o)eevNH|J+&=RO)c+8M;CuEU)jK1TejhWU@> zzn&$cmjpJTBoYvQ13lEP-}k9LOu~pmMV&57F2DXmq9)rmcxfP$)5DDN$v5J4>K>cP zQCIfrd>IrvsCi#VFjJ6Lr+JrJlb*}yZJoY+Xre_apA|8ujzf3mH=#g?Dji`@-t7oo z<}PwZrzQ!*E`4E|Io0PuJH3xYh7Rki zI;T8D(Dh*XDtLL1*b!H#a2h`!rCl`8AfyXaRS^a)lHUf(Jfw_hb7CWSGEAzN@V)uU zT3v6-ISyVBS5aYNtIZ?X8l>B5zO5!GD}D)-g$_RMhMSe7xe-iPnUb8dPdf7xEqO7% z%iz1lru|0tvla!_f1U{n6&(ZZ$6@CS#$y4Ddib#A>H@(~g69l7yN(p(n#Vwiz|&>6 z-8U6o;3l=1(;X=jd!U*Fe)J+*awWt{>mlT{brPVaTj0FHoB)7mVz%mhly~80ZqL%6~sGn&0V~<&5JFX+^qU*b0 z5<%BJw6pEAh)n;z&rdPy$YHR;jydwq@LenlF<%1`-!15yWGG$i6N^wEy#uGL^FKFkXLA|0b{HM~BWvBWoW% zEx@TM6TQoLN$`z`cC=emCTBKK&SEEA(j?~X$Je<=CP@;j;({Y{XX?lGAq7S50Y#j7 zp3VdlhS1#A3Z2DvCysbLolaFATLQ=kbMEBI3#=_CVyZout=ekt29z+4PoH3nF(L2; z+HN|E3I~?>$j(`EndtS`Y^MYkP#uk#(PA~{kX!C?JHMar zVkcagnvQ`qey?jXhckbAcSsswHs2g)IL^k#_S;;Q3UZMk&;X44n-lEK)x2m%bPeb4 zHZssxALYXhGWyY?Hw0Brn!=@ zL$bS#LRn0z%@W-dkiEQ@;;L9f&wBC3WNXi0C*!Y&`gFAOd#X1ob3< z4*Y(RYWunVfsvJ7yY=@puUkj-G}k0G`+se{{0Y+&Vx2@j4(Kzp+l%qIsAnN&Swrst zFT^3@M07CXGXHlsP4pC-v0A5%Hi+++ki{YWu^7dCS#tB+{Z?fALOhz)tAEdOy0!SH zUz%<^=Iwc|Fj&M?I3+Tv!)!%YqJ=qETd?cljN5(>LmPGxX39q?{B@skfoE*_U{I(a zCBsLTXqe(c%cM7fjE=e+oNM5LX|;BXYs-gR2(2Ml=qc2X<0~4HXVU;Crn7~Gfh^2y z`z679as|WX{Vh7KGpjs*3bA1U#h&ExEu4|1MXXU?3Ij8rPAh}<2=WyqECbbQn8s9_ z6>NcI?6R-yj043;KvfNNi1;<1cQvn!4(^m@)h{79*x$ir3i~GqH_snSVG+=*$*_-k z1sHW3Q=`X!pJsWfDt?cNL9*dj2K~}< zvfx|O5~NxNlIwWc!oPkfVf>t7Z!(G^UUfjV5#{ zF7E}lu37gb2pfFlo@%xE!gxQR3K+JV{7GQ@`XoCRs}^Q3BX=5QK3aB}lC4{jQQl*( z0dp-Zg!kAyZtbfoHC?}kF7lRe`w8L zoe*FXp!_xKD4e1LTsSr;e75utqB>z;aP(duu)&mFRalEXxb&@bvxM_0oo_E8G;S+O60YL)gB275G@}g$5e+zfAsHZXO zhI|0fhVuV|Y4!jx%^E8V5TSdRRY&z0PT%^g^X9oplUFK?ZIlaSw^ov-zujMuouxuHk?U^((F`0C3cZ8o~k3rBU z$I-f^H{MyKapHltTg*J^4~q7oWo(iGsjlnDeuMwE)2QeZqusP~%nfhL_ZtYtPC*KF z{dQV-k_TpHm&ymwbM~Y3^xfp^XWkdmhnwqH{k9RVpqSWKOEtt~%80Js@ubQQ`*^qc z4U#LT$x8nFeF2hHq|NA++fv=Bz){Vj)B#w!S?(6@gE9A4vRd@h!Re^wDMGQ_wS)o+`d-)SOOzf?<+8O$~VQ&?bmgWvk$|e;zHr7 zS9J42p;gfkeN);pa-jiPcG}UIYAjX>}3wkJNE@CAP$O1<~gUykpBj61@4Iw&Y$!aKoJhIS=^(?vd?&tx=1 zwy?H)^bnD!HylDos4unK-yW?h&isTAsl|{Hx$^rLTXTlx{Fk(ulUZgPpkTl|9n1|BzfW{N6I_Fb!kSV zpobZBUp$SuQ#{>6$k4FniCMFbe+s;?e2F8y9KBVOBi;624Rbpk3Ak| z9(K*3|f`GHZr3-B-E`Q3_Cd>ScR+KRPLU&9q2njMjkht>*@WL?y41V`My-f zRG%QcOpsfn$2P?OT$enpurh=JIEK>^7+GK*z2@|S-*rfwdRH~VNO{o&)XXIo%|rLo z%RqmdxC@8jkjE%FxDSvdMqN0h^^Gqg9xqgu|Q_UMbr@+N;ME3pr4* zLebROksU{}P5k89tPKi4pLTqT|IQ7`RnEN9D%^| z>d$(sgVW|SokfGH;|sOGGtYBOs8VVxL*&eUc|wJ9pn|O=&!y4}-d%AZ-vN>YSnZUc zi2D=2_As7CIelV6x-U)u-)tpJiBYoZY(94hm8v1;LCDN8{}W zQ5`eSBvVlM0?E~*6BPtDgl4^cx=wOW=$~kfOpIV4MMMn!6mU{^%YS4ZwD5DN?15*_ ziFp)rTW1lq++s1aQeZ>4Ybz83)q);eu!J&zbC>%Em;qwzPPuaSZtVH#1TSB4_8$T$ zpejxLM=?=k0f?CfawHA8dLXhriuL{=QnBe_c0goPdzdw;Rl#*u3!fs!S&Y<~G>i{D z1~9>deR>kqc8-4Po?ZNAX)3FeG;Od)DiQjqGNh_r5K?r}S+={8CH!ud{fDuEC_COW z9?vBr*5=RaA!){3RCOduvq7Lwcg2X%WF31a?iL4=X6F>-{FLsEXXf<2O~B8uS@-RQ z5Sskgxc3#UZ+@STaMin(1nqJ9hjsA9ZVV5b19sC}yyoxdoW#V$*Oc{>OLb;bMXGy( z{npAp6Ui13?G(`KOGjM-Ym{{AUv^Y8LmQBxY+8L%s4&D_+%;=^~4>UZ+#UV2&#`8&K;Bnr_VPNL5Ng4 z@QdIKaJeM8``xwU#N10Q0RTs=?~B@MRxdcVfOYTnpQ|?#5?HGV(W?%r<~xQRN5hx? zo9-I!tk!Gk{~}}iD=v2eiokrxvDXq6G5rvm>e;0QMz9WC!_H$y0tc=wa37P|w+rNx<9E1V&3w1G(@&*YNl93X4c3H?PEMzlB#1xk`e70q?>P_8jtWmt zPr<<`M~%*rlbCG5`uh4^jh7c^`z^BH`jB2I1S-EvNjE6y>SoThG;)F-XF%b@`ro}| zt>$k^D?h?zmKY6~`f2*UO8aoRS=o)*8@X&6evP-ME__P8Uy4E=s|} z-{{Z;1_J?T_cenaQnnAnI*h?SsTxTq^^^)Yv5gCW?AF@z>8VptS+fBH~V*b@2S zo>K!2pHPxmf0oX<-iejbT|5+Wh)Nb@x`AX7w1#38}?D)%INgc&PqYa z0f}J`H66)F-00Y+)2p`CZ^=;v;(cSg!WC4BZ0U)r*j)*{jCDV=+TeMPr83Hw`v|vx+@9!n*dVIup-~IydmAtb@ zQ#lCtXR!M_I$l`_mN>AE>N*|~z$t!l^ye%C5ZC6D#EycSRE~+ZyR|~CTtGgGUnxG6 z43+fzhc3~P@%KK0x=MY|;lUx6^0gsGwTT3J&0bc@qxH`TnrUvA#cV+x(lF{#(>qijFsne}fqdlSuU3GOrgo4&ON7s~JeIL!BftK$jemCG?y*F6CB$$K)G9Z7b0 zGz+EXA&n&=B`vP6h$yiz-=Cl)upbPAV+=QbTKu`x+H|$gl5m$i(T2z;aWq${^s3F{ zdWoh{r}sYoyg`qpvQ}Z}xNKRpvbHR{u1*=v_B7cP7TaIlb#Coi2`ERSL}uAE-}I;U zaT!HcUZk{!JuP>BZ;%sVPgi$!x=Xt?eOpArc*Xz}l*+2Wy#gF4v~T@JyrK@;u3|Bc z!4#Nh?l3fDpHX9XUqPG|o!wTJk_r7eQsHpJU|w#~Aj6syMAtbmAou^OZZ+HV_v;1V zEGJ!+&~QM%4SbGJ)#NVJ#JHA))u$Z!wuYr3GiQ^eQ9uT1%jjwdCK83RgG}jqrtJls zt;#t{5tJRP9Vd*Ad*>34I0JxF2Zf`}`=|cWwk9}tw@@cOshJ4XtRtfeyXl~i*9e;~ zskIoP*eXjx1$+aGY@*xc;}ZzJD%x)6W5VM{+mmtGbWKd{@i0J!=G{S`^Jo`rVw#tYRToHhO;Urma_n8k%cv zpKE;Id7{>>S2dv#Vp8@OH*W0dx(8X%pE`JxVaJBo={{VVj_@SyZ8>V?L=;}OCMU!Q z--L&wO|FSkKO?)J0`tPxW>foP0&9jP(DmJ7Fbxd6ds^K=H2dY=jMxR8T-#A12jXiT zsvfY8wU-pp-Pe7d}(su&bgHp5O^(#i*e(O|oQm(wvCELZmz_znm za`ykT?cCmB=f_slID@@qfN`C)+`z*}%;kRZl`}dD1@_v`*Gp%j-8Q{dUK$hPaBQJL zB6QA@BRz?)RN8lK7JOj%(B=jUi{AZp29HS`H0s~Pn74#>bWqM-1CZ!cRi5r5!KSz*<-V*Z3tvM~N8wBNH zk9yes3po4&O(B@v>kF!4L zjyI3mo|`ai*8Tn?Z#n{@v50WD`T6f10~x({u?;c$Ws7S0E1${l*GekebH>#?i5dI8 zRCJUaJv0Dg#WmP|I=81jXI+-7E+7_199dJY#mckBaHq3X*~*TWg8AvMbo)GN+Oo*4 zuQ(rvGWT4;H(NyzvIn1eZcBiq6uS(|ZTmvJ4b7vk0e6(4Qle`MX+0F6$4yD0E$%gGvWUp-JNX&iu{s&oN ztH_)u*Qk^;)*ABSM532cjIZFK-@9n*E=9RO!FM37&Vo=N8+%0WF2j0F zAnUm^TP|-Z0Rh3UqNOG4@8?F>l%IqEAwO>SO41EoLo(`gEA-zdh{InO6%|cWcEKy| zGmPt9@q@aFC5s6Qy)_SYw_DbfYo-OG=uM{)w*R*czs_A^i)(y1 zE1hZ21SKy9q~ACWkC;Dy&5{&hQ2km#S)OnKT+Ts`TUdLRfG&O~h-<=Zh?C|8;5g>V zeZ*RQ4!TMvkGLw1-!DdR=bwRPWl`9rHT^EH`3eG&b8#f~uBm)a@a-L(=$mssDkxA| z(}sFlOm_lL(cqE>EnFs*@ENboYZEUq&~W7vXbaR`)@djcaCb&zEOvH3an6!$Wyg4u ziG-Pr;BUT2Sm1+%oWI?ce+t0rocSoM$e{9~Gp-smcmf zamf3{mrZtJCugOBMyUuB;HvJ0wks-(S?clXnT$Z@mNB`pFdh&>!3Aop71&B{OML z1$|vyP4isyBCo&wrtNuG-n}uY+S+UHa#YAg#r)$02~S4{F>W5Y;RK)d_P#tHk%)b+ z!1LndZCu0K2`@*+zU@~~frVD@o~TG%R$0U^2J$qHE}Y~0kxY(T^VRWn z8e?y)a`nE&TW`$$)K|+6VJSs0dDic}JBrAu{=4^X!Ff=sJ|-*o-yrY#dL`RnQhHVL zJu_Xx3@`%L|TG!?Ss*@ZgH283cYlYZE0yqNlfFS z61%Rlk!h(`O!FjUcZDV@w*9EQucIr0jk0l1@u6=|T*ptmIQk)?x@^fv6*GZ7zTeCs z1JzpAomVaD8m{-2W8TL03sK!}3e|@LPk$tlTrykS z(~b9a2-UGS2u$3lRw4L$O_N&6BAl>K%vd1=9{J#V@KGlxxtH$&YGBl3$VS?a3f$6d?Dt z0bk0K86l&))2nN?)jcG?o_GClrWE$kMiyTVw;I#q&w+&X;@hrwJJM2CT z++ZC(>MsroTXF^-3$F$T-$HDp{gCIgd^uN!vv?(4`AG0C&rO;wo-VeFw!Ff7_DL#DwF3jdLQ)U}^#?Y0bTzTCiktb?9d-mj1LJx&7Qz zY`WYij-hyoLiX=Dr$k|e=Jahs*I#YuW+q1PE6Z*j%L>X_?HnO^%qVnoC(*#*bO(}{ z`pOVVFHxETsNVk!W4YUwVj_$gPdpCC{1c_snf*vpW%$(`t z^*m818*K4|+RafbZg>uFhd_5-Fv@5Pvj-3EjsyJ0vDVDpk{J`NqyGb(NKA!9mB|%E=!sUU}i6NMLYan*B!T z0e2M)z9BMLur?{~CRWInP1;6CNUlY(eYjka(WS?q-SY*Xj4&H25I&~PLjE|nw_r9s zRV_bRs(4?2Q;wp}MbKP1@untIYS~_QHy4pnl;-g=<@5ml7z@#jA@l>(+%cak75;Db zEx&M~+SDQeIJ+KMy+5B-l8!by6)fmIV^biKSh-ClZB2kb#1!0Wzjk4n9C_F*6w{iw z(|0O+0FK{8YDzwjL$tyJ~CJVwctt0_|%-wyCy;0(@J8l4>ZF(za@WDgI1 z!LYsrKPyRcDJnpQY7HgYi@bwt?F-C#&E2ngvb1frr*$Qm8eK=VofjH=uMxfEH9*%~ zQY7TEseT*0Lp-^dD)0%apIWx;%m&l9GIsD}w=u2B8naN=a8{|jhlL$8g`vieKK#(A zyb9bkUDM5Ewt@xj(*lmkXA;JO-{80=>V2Zreu$ZSyvi)_$}`bm)h)^!z0!V+S!?$E zgsb1VHtoHxcEWAO0ceJ!Bf>o&etgs)tVJ~HnQDb&0p8%$L#_BlwSf7p0Xo)lu3f>h zOCz~^*2*%KhaTIYC(`4C0IuzNNv@jlyC3aTeYe_nf+N;6a&_QU4(j2F3jVVr3%M2uDx=7To%g9UYrdn3hBz>Tu#>nfV!=6J>>Q*ooCzkP&lT6GdX8ndO8rQ7G!CN?h{XvHKlcTKTA6Zr-=bH>5X| zEWb_h(UkDM*$2=#(QeIK4q)xRFhq)UJ7KbegzwY;ShYJ`rT%Gq*Z5#Is$z=OWQCt- zmNMu(Vai%p_NPeeO0O zQ@H%OIya;AJ7p2seLkFG15N;+V*k@J*Ss>MppR7u2~ zLfOc5lw8~q7pIhOPg4p*mG~dQo#0}F{;e?Ta=b{U3$9Q@oY$)-aGtkrTMW``QUe?* z0$YJhNSU>ty-PQvxQGXz_HMtudwjp73`R&weiE<02&H^QbE5Sb^xMw4Ygcb^WqCyg zua_`>4k6uNv;5>&->ctXNM!hZ@$}E(YKcaYrfuIKHq|RD3?TQ$B2D*Us^I!JN56RZ zeV7J*#Gz_5VGx7(Yw5`o zWzdEMN6KAExB8*(;1%>TDxoPe_S*AoOzz_TazRyl#fN{eNU=-T$>giT3xH*Bm%p{Y zh#3jt0kV!M*saS$kuCV_mHuz!)_TA}2^t;VQX{ zk(Z#c74}iqXd+mKDqSyqI33kt;EU+Z^T2DiOL{0JPIkxanhrvaO4<8S0{&I+@i&t| z`wC?ZUHG$DG?ZMD=DFJo5evX6h&Ms_9R}N?%}mXUx_HxRlM%=WsTz+{}(nPZwZBx@)vC_8PU$9 z{1}Mjs%7Qn<9>({#q|=UD4*|O^oXUX8!Uat*ux<88!@gbyNDt&eU7APB_*1|uM;jJ z*;Bo@UGZnC#xcJz(`&~v+UgjW#q~kI6h#_F2dPS`urn=1#>$pDqv4(Nn9S5TrEr93J`q;=LYbZpCKR>SjjiDos(4)|W!iZE=@YD1f9M`NESiYJP zg$JXDf=N~@{vP^vfErZNE}Z8X#pX{=yxw^9X1;^1wSMjzaFm3ix!ES@2~xj!=|;hA ztXn;RlY*?aL1VgXcSbpQSDW_-0a-+^ZTuI=0;L53S@_k;nodRu8~1%YGj(NOq$|wv zA+kt!=Rbcy3Caq~lDY^r%1v-x?gZg0hzto9_3ahekihW*DR-DZjue}%jdqc14HBR?UoI~& zyVDDilQF7zArzlz>FH*%;9OmeLR-zaXQXhy$)V6-BAlt36E8RtGl!ssH;q#LFb;|9 zW=KY}CM#81x6j%{b&7auYK(Qh%d;QyukX+ZqKFQ)4X+2C8$sk-R?DiIHK3;9;6oE? zKlzjHNi(zkb8K?zZ>4O(Y^TR&Nvh=!G4_vWyz3|PxFx4%;=x8BZXj5om~Uz-AV=1J z&QnJSF$(C+%HAknuCiR}5s8F{EE(q5LdL(FW@ip)c5&iZGG*Z?@ z_HZ`H_YS`E*dqdL%(4fh1?o>Y%(TjW8r}ApR)?;|#0e}y*7~}#Hmq?9Tb)Z&*k?Cv z1x!jSAAu0soLN8Hf4zwK9(5iiumKKSKUJ?aIscN~PjR?ACCesSgGI^y`Hn;ynSxr6 zSRDHR@FU1SIwIn13gBO-=Dl#`vT*lO2gC*jTNb(^#i0LAtL07O#C9U&?F2^WC3|V_ zxh&wy9zUw4ou4;{cWurtKE4nglx+0EFj(kUS|~MGEZM z^@c1_i`r2u;EeNkYzu3Y6hk&ascP#qV&!@w*z=-5XlV-Ltmzi3L5C)bi>vCVRppmv z)IjB#h)<#^R_z8YSwy^)5!YJ{(VG7ZKCJ|F5RXF)!No0_Dn!ce?ye+$$`*CdlG z1QqI6ZWtCZla|`PJ!e!Lh|}bh5AS>)ttun@em%vsXjcii`_O6Qjh*g(fb2?wZf}Lf zsph+L;XZ&I15kSlt|f4~oBz~Rs^O8e_!0EY>$85ZfYR89c4o-S=yvEKDaBK9>e}Qf zKC$vurl|A&z52ViDwbuXc!(9vE30v#pWogFYMdRC%a3a~Z%;TAX|sH*~w_>>TrEQv)weGXCp+G&@&^ z@(E5bJT<*E85IUUSRygWx8hJkN91qD&?^T@X$pJe>`^K&s;z%U)Ax-P!qeNyKfo7P z9C+nT-)H4yb7xPZe8ydCK#C%eN;nE3ZsU|&US2sErcWS9U~2yOwNB4$N9HY9Z34G9 z0NS%V@dJt;F~Lr($a!HJf#pq|s?5G;bOP&z;iU+ATc(+O=y#O?Be0viA+N>gDT!A? zB}GLzV^sI>-5&H09UBC@9`K%yur>H+traKkYwlq*8(q2uK$*L?+o8=EYKDu+K+p}6 zH_4sw!YgWpk{ScC-|x^uel?r*Q5!-k^t+7fPg%?B!viEv+(jO5W(SU4S}Mx49%Fn* zU{k;~tugS9jV7*YtGbZ-kuJ`C-Kc6qP?#8(_G1UWaoKI^;Thn@%50QQdN4j&TEJukuY1 zY#z*Zep>}Nj&_MQ`cwVgFMHqqJ}Ry76PI^wj|`JxmNY9fY9KGQpJ!-Z!EM7KH}JGf zWL@<5rS7;rOtK@VF>2{P6~s*-mwHJ%`+e7@QH8yzu17!3?ryCx`da)fnGL3xMqa+H z2iDYvYhS*X(593F9%}ie9G34PI%Ms=2n$lIqcdK|T3+!!h!~(nxBo{t`!>IK2pUON zwaSo)`Xne>cDH^_AYsA*p-U$w&!|SzI^6^{DI>~M`4hN}b|p{FOrTX@&)zDV z#2k1+uOG2xm4$0fg~i2aw}jk~ltHx?V3MN^MvO$!5e_sGKTj)Ld z!i!yOQKwZ{x`gICz+ITuahXQEGJS`qW0~uS|4z`hI*|nPF>o9Nq^6np75FsLTM;jc zD=|Qxsas{Bl%5TsZR?*Zbh#)FBb_aoHK7h>$pp z?u>W3&EU@C^?(l?6p%m$xnU-Ud%fl7#R$m8) z=AV%3I9iN-`QWXYI)HSfW$*BsV6YcR8*3CSpdDa<@p9^G>g#OXR|caDJ?=E((iquh zZqLh5O?E?<9^q#KCw7fm7q~c@X*{IWJiB33gr>h$9_d+RkJu_6 zf7Lkl?}jL~`+J+|(546mU+wYYH(7lq(wL(J(^=yWDzLRthxN|92Oh3+OJ<^5y%euB z%S`FUd|TQbf<)+iVCtpWQWROy-dk#E*7-V5&2r?j^!sFA!Q}^?^wfk9Jfyv}>Z}5z z_BJ2LRRYX9gj!#g`YC`^>6>R58rBQ7rME6vM5PB(tsk0JPzntOLKGQ~?-N65T(8Kc z_!d#|#pfj#4>y$Dc(`PW+g)^VOCTeGr}5s+gthPJ@m5dovBud5nx7zJs^+w_XM5D{ zvLUAL*jpqinJ|Sipyp;sW+b43NFy-tl*#8}Ws?Kb?WvA!;RV;&($U_@>=aFNj7VAp znn6nfQC%+{bxmg*zxI7t6543Xws6&gzA%5>b!Y_Ps*mGhwUq05eb=x0EX5WAiB$jL z`W{i>ExR_t0M}!t6X*qApi zau-w_h^Vw@gh*Xn4NPNkX|9KG#RKI?M=GC$GoyIdve1C~FUqB7P*sT9@a~~8Qys?Y zoPt?C-y^PYeNtGrhyAQ}I$cBal5e#Sxvio1>mIF>8$o4Y`vlS4DeE zX#$Zx;=uq4mum^KxgzSMz%~Vcz@c1joLN=-ZS*iMSAA`rZ#w4_7b*4?h0)rCN|M0%^)a- zk$EGRJg0A=-*>6UZ?Uc06pT;jJrd&=N*p@<8r0S|9(Pplo@5mE7{}Cv&6BC*=chsM zfQQfy>R}iHRTlRNOgjjd$N6-=Jm-?m6F$O9QO5X|RguMGM+MM~flL}v*enYr<^yt7 zy~50PB$gKRILW~C9*Oj&Dv(iuV3AS3#0Wpck4*H zamqJbO9ptjy6&#mHZq93`ri`6igm8|$-f$Y%zpB-NXp!ipj#H;Qcn6}06c76Hi{N+ z=1obNtVuz;#i=gRuV|iInNwO>I4e%-!&{;YU(%*Ud)CP)t1Mp5KZ{_}6*E{?%9 zwNL|>8F9zmUT!b)>R3i8xhIWje|U|x$sjfSMNi7wGCeO4oku%1oRW)%kHkgCPsC1; zC_7G6KlK=eL~phIVUOGsyNy8PE|#yA^<{F51wG&VR0S0(0G? z_x1M;)yuXK4!~T%eLTY5A<`m5Kk-~-@ee;kV7cirr9$;`lsh_R_BVz1pz$k+e8N?e z?1vA{x;lyYTyU$GWTJ?TD>F==*jB)&-c?y$0k#?(hYhBPaz>X5LAJN`WOdzX&3|Y^ zI{%a!vrm)CUzuY6bb6HxuFi`3qo}D$bP}9zT+6--AC%cTDeMv|P44oN_~u?wod{V1 zBW^b1(Qy@(O0^o|Zto09<2@i!Y-a+!DREV)?p6Vy>Iu=1`GsiQL*qcjX-m~?i?SQv zJ>0+b0Y@LAKKNMO^<>$i5`S@VvrJx97w{D1DLzD#D`rvbfvWOtMerML z3$5b?5DwM$k@?@j?r>tT!ujcc*fv5Zl$A*I`2`*qWPTNEZVs6IQw^}n7$lE{GBK?` zY!3_a^(%&?zUN{`k=A?h{K&&Vxu3aZ)oqgssgXV&bNDp_)`FUK{U4v|wo<)BRY7** zWwIy^|8Gngf9ct@wGLg&g}E~V0wpt1XT=_0k~SUW;!6tjVfmU^W|Uesx9He&(kb=9 zG;VLqqW(X9?!v`in~;!+Db;z*a)sCiee6OP<_?Tq|2ME>+<(B10Vwo??d6lni>IW+ zi}Tip+Ov$sZRYs$Bp&^NJ)v09f(7%QRV$%(Nl>?+-zURc@N|f!F28i{Psx>zIOH1a zUravl&(Oa;WY(G_rlY4N{qQbp`>?PCw?!~(>WoNoQR-#SPqrQn6!9l1=GtfYou<3g zDI#Afz8EDbie>La1!k>ue?*ZrsbLjS*Fa{x_a@5G!*qbcyarEW1fU%ddv3a zaBcPlvr&#ZTCF7Q{-vH?pNY8$XXiu0|k#W9a^ z78puLnaJ_@BD6*Ay|{njg5fCotFq#Db?7T4!1yc7(9<>hquM7$;DXB+VaG&lplL)3 zL#nsOOrBMzj+~>lokyE~WjS!GA&8pY)!X0L&h@-i*GNK?JAU@noZs*;{GSFO)Qt** zOJ_G;2T)^h7iLt6fRl%cjKs>T!9m5mF87@lUO~^>_IpL&b%P*x5;xCI-;AqYiIsp; z&-Ez<)6=4dW=m9vKsh>grxzZzm7h{A0*MiFgUqHisCi?jSL;IRdhmH%1FveC1tQKw zp-umiF)wpYxAHL;)WAXCcCqR)!2WbM1wb+K3yZ~yiT(ipGS`vSLG~|mx1pf6zoU%m zQ>OoOvNV3N`;qUtk&y6OR^(5xC5M(bB!V|0!AuVmFQ; zuM6o2qs#qvOnN?OhPp`Q9g%93LU&PC=J*qD5Vc12qDCL#wHofyLHNYf0{ipBhgF#VEgkpeFx~nd8-LEP~CTOXevUIqbAx&dE zH*KRv*-C`zB^1efZN9g1LMV+;m4D>Kn=@>Cubaq#z73pQ_}zKAJy1OzF>7#2$Pmel zpje4~G*JqBH~0eJ+f=)SU3rgbZLNM)Z0YhG=U=weQI&7W zW#5hh8AZwsA7EXGpm!a0stkrC2LB(NGc+syFU?t=7j%d^Bm<2xdcFoNAU|+?t28k+ zQ6RD&M?4`t(?apebarpz#N|fHT?022%8=};8%F8+rQtgH>5e`$rdC>~_s`ryOae>h zQKX4|Bp_5iy?KPKPMqyR5?5yId zW7f^e<+}EqJ^Sw(ClNN3S55<&4kuTE7>vQ^ju*d6e)sO`dyWw|FFHJ{0Fd%0s%2lW z6N|Mn_Jl*ibM(GEN22b4AB@iu&zeP1&_PSN-G-ovcBc=in(&jWfhSwMpBeHls~I}#+ex*ql$|Eih)GUn>!m|bmOHzbC~UFF$v4AUjZ+`Kf`#UFqO!g zA24unNrLYarsiBQSd%*BkMetsrWZ1eJAp0)jPAaD|qktIvq88R=Em#@@kJ)AnQe)1?9?#md z1nIOc$*T2P?27BgGCaL=V$WApm)Hmj+i|Yi!ShLsH)~~;#9T?QwqE@zZj9U>y~Hc5l|bb@kb%6yTBnr* zt0w56LeTD`&QL@9^A~5!)$4hHNxz$Yz;z6wbHjAkbw>8F(=Tz*qF@ z?>@AG=o3Vt34z03$q?rOHkS{q$5nIBt8TpzTQEU@+#Fc+y9K)vHy(+riExZyCaE3! zon{%_LfOGPesMzAIIz^WKGsU&{-cvD@Nj%xV7=~mYOww)Farr47f&l_4_}7kW|HXn z0)Ag#`ai5NwT|wex~yVf?-F8FYzk@=;#a%JWwuAFqKYH&HqX0h5V$aL?^AfnAw?ej zA3=l0-a}$VY&{u%Qk1nmf>xJ|%MdyES-@@|wWkUs`TV^)YA8)xZ+>@=>dKG2Cb}1Z z!j|~KCs%c}Q1cIXB_UfJ@oT>ouqakR9jU4O9IjL5j5u#lq?YOquPISbV;PDbqWjm! zpPg5f`Ka@-V-!!Z=f~xx#XvXx%TC6OwBFDw_3J0@b}NpuwCi%~?wJB&;}h~rqxPZ` zR^s=Zfs0xn!EMm2Dq1TpeJbsa{HKbw++?YaWvZEQwica%8Ckb5yU zN$A3f;|+^zudulKG)Fp(vNf{R3{ImK-{7qJ$0%NppwSP(1`ApCI z_9Bh-=T{4I)IXFKy{57=`DDnTWt%wxWu2cmE47b9`q>~~l_Atee?jAdr@vxyE94mL zlWW4lXe=b4*wxM}&o!SeIeZR`2oOY7Cs?Jbc8QZvME%SNvB7xGUKqF2Zd!>6t=2qO zA|`ym+-8g?FzIO?kw6p(p>>Kaz~tXK;20&v&E=I44+numL~;jvdba#@N^z7MhXUGAfK ziV{TQ*Aj^Fu3e}AS7P2jdw!OQ*88E!@+4BKW091aD59RHyYbn2hjgXaVF5|YtIy@# zIDOE@Y=kCRSbylxb}YyWO?AsmyS>Dr1T zKrALNAI?qK4eLkUwJ$a8OqgM)yJi3A0F2p=(Pv8QqmJ&i|0NpL8bgRi3q$L=f>C=Q z+%6DOgVUC-JKu_+@UG+Fo3LXR!FL9ME`c@v0zthBqKw*$^?eF zTkX>+;PodlMEC~q)hMqT)^>ax;o?1`(U6dSlh&wqP8Gvjp;7R}Nq~lc1mODH(gV{N zRTWI~N>HbH_-Z0va(J~pIM8<02;<~`+y z4)UqV$(8JnDg=pf>t~yC0W!M8To7J;D7(n3R?!XpJCsC-@w0Yn@H6622+XEDX%5Oe z>|VC~Ai_4juQv~6egOu+a+(ec64FNva<}2mCd{XMW;0V?kNo3`fZ z88Q3?Ae#f_H7nxhJP2@eGHu#xAgF(K~BfHYNPw1F%`!z=8mbgX8 zfkinX-;C`g^O)Kh_f^ERgaRYnca0|xscZn&c0LkJMGV@r&U53UVEehwzI&XR<`11Z zuwcCwV!`~ysi_GY-LE{on3(fCUZU%;mL`;WKSzCxvgl9iL82PH&ZYc~2VqIBBu+83qtf%PxD-h;5|@ms^Reju^k(~_6DS#rn<FUZ23cVR@*S_Z~ZW0|5 z9RRU>g|?Y9#A{3z!4?06;QH?C}~#k1yLDZ=(J z)a{WiANSv**mxwnZg!fuM6p(6%-4~D%zARW4{U_KYzEA9aK^nMRY=p_%~8aG&k(`lZB`V{gp&RbL%< zhx~aShNswF5a{dypr`UomE|uew~6_wc&NCk#bbY3;iTmmIhiT;ZJWgEW)kb@ zhd`~5Hoh{56OtGkhqAeTqbZ?I`Bw(A;u71K7QUBjU%tbg zfb=tJ#d|Zr;*Y3lnf+{b@r+=vM<*$KUqGDc>bZ7mDAUPuD{NBYfzza^%Eb0244)={ z`AJr-KiJQU_*}6o&B%)DUwnHhV!-+j-=4Ze>o@^nZX`eqn`eVJWk`M^oLFKxk$~6V zjdh0_b3702F%G~J6wGlk2;et_r+KWa%Z&9UNq16O;DvY(QFXCf>}TmV1ymLEfYH(_ zmkZ#yL@q+7c5!}ZIumh1YaW@P`@(L-QfotU5VDbXUf`;88}e!Cv8PWJ8aFyR+PoEY$-OBj63skH=8=(XI?w2> z!lK^C=+a(5?{@>3CDTJ`j+syqNz`@tTe?R(^nk6L1R)7 z*DkasD=PDKv(1U+q_+-z#9?7PE@ApVcHawx0wNH;Tv$87atrgb95a1z&Ii2ru+K;B#S3#acGN~d50sk+>-fw;nQim<&z)s|{{aM11|yJc+CzRqrvFD* z@c#jN{|{LIO&W$W#aOS`(*FwH|DObE@FVr%|HjPT%s6=WfAOh7!siv;q?-3$oBtOd ziloQe3Sehqio88tk;}=+=}Z`$^D+#i*xe^mvpXb4kn7}p>s#B~r6pDA{%ry6zOJJt zz8{1Txg~0PdO{5i4H{k_vBBV5|J|=xbDn+1e*|yYku1dGvv0d#n3J!Gy=gXi?PeI* z*w}uir4a+Gs;J-p`4ck_%N;gpqm^XY^W|gXWT(7ys3Zkh-(6i_vkJ5NEf$3INPTJw z<~$`|-S>Ozh1lBKITb}l`d2;XqMW^&^Xxo7t#T-V1-DPV*MWY_5DM8c-)_%$a&n-D zyWX^xAYwQfV`3z)>VW9cm{_80K|@dRH(gq`@8trvDz&<9FLD&js7;G zqIJjI+aYojeT#H&sD`rG=+KXvwJP$AL}Jh3oxw+EXN^iCmATZvPH(+3RB?A8riNZu zRrq`=0PYhP+rf8$p26*79gykvK7UtLU+V^8vgAwvtE;rM^g(AE{KGv@@b(md7?DX2 z$`rDr!(m7mr~tKpJ?nT9 zuaee+^lbqq1gnY*h`D~gc*W#<_3vmNu?66vD0l2^vguuz>3#`jmUc^laJ&{o^ArGt zpuWo?3hv<=%K-Tfp|&$IFwI;JQ78>j2ff5n{7?(Xs93)WeMQ)nk}yUSb5h$5yo|Awl$4@)sfy~x$CX76?_c$Q2SeGp z{hUmN?#Al|mK}9&z=E)p*4Ea3dX`q_j!SE|c$fHv_28H^HxugDuP^+y1#%XYq?(0} zWBFLW4*etwYQ6G6WW~8c*Z9+5e}LjwnM8oumZ`r)|8U_yj?n6U5bvr0;9Lnlz`A9nse)7?NSU~@6?F%(L#RkP*UPpr6&#&`p?83gKi87Z36e+e z)NvOaj+G=veVug1r~S4Pv5t2gZGz`FD$JQV zQ*!=JptBPn8z0c=$+)MbVYhPvmDoXAR`5{FU8y~7z+lr;(Yf^$<}U?~ODt1{dpb;a z3G;r8R`_=ZZH4NW5z+-dRmMl>hxdf+UTnA4eNRh=;Uzh5_*w8q@DtjXJQG)1B3>LX$DmJ!8b9}9H^ZWSzsa?6 zX*Xl2uscP z^hbEg+Cdj<_^Y3uwIcdZI}~>G+`!w?X9L;%IRY-+lel6BrgOxbSIezI?+}@ilVBgs z@*RFdqw=ZVOLV`!CW?d4Fof}!Y$UL&)eOS?(Z-MQ#EtXxl&P=Q1YYmxoWPzDC{)k@mGdmatkGH`^cHax;FSOjF8fJ7c?JO2>nPQ zAw>k9N;fWa@hkCSNh-zy2g@~|H&FQIy?Q|RM07|XZTU^?_#Ds%YX-IgD zt%+Tm-7g(?1aS5k*7;H>_~KHo-g@oRa=lxD#y5%yV&VOoxyj#BVXW-zF{6HW)V4fr zI6--{lMpkU)1|to>b9Fo30MSyUnS)vvFNnMhN0WxF27sv^XH=ICgm0pXz+5++e4dr zjg4+H9}#vyv4>c^xkGNuBAIzF{BhQI5D)D}hJL1P#D=yTUC11mtd{Q@mi!C5G&Sktc>ts@J|D$|0z zZFq{O$~86iiq8~D*RzHOpNq@Y8yKbq_Y|jt3-QbhV7{Ml_(kvAXCmYtqtx6Eof5Tg zqQ}isUV}tQy)s}DySz#P|GWbcSkn>aX z@4F`3#L>d?^7dUSiCJg( zCq=yT?m%hYaV^7j(TczPn78u2G!6s!x7Y2%fwmYT3rvK~%M*UFhOY1A;=Urm0DbOo zcurb~-9$`)4QKF@B;$T8O1~nhxYUY{(LH?YvZTzeTUnSO0m(3SSvN!}a~pBV|0e3Y zn$Ul`=f?AWdGz0204>gg$R*+^<^5oc7v(VIq1ukYfn4mBw)pnViulPqr-^0Px`KrT z3wV1w8qP$O>bk2kv4G15Ey|;s!zRdWJBW0^Pd@MZ*v!YT-c8&bm{zI z=hA_XcR&>Kg()^m@-dFIkIT?s-O7sbv=Uh8cfHS^2fKKek?~Ewbc7WM|N7zuTVx1Y zL1kzBce1RTqU%KRgH{ux_m`KPj;aC2oZe=&^S5iI6oQ-b@r=&}`SYDl&QRQta`)B< z6V>eP*=XZ?f(wS!x;lecdwQcrr%(Zl4U|LF;Uj_lp?bRBL&YTP_Q&=xA*I zzt<=sSr-VNGT4sL6s>YY_fwyNZaNlEaiE=32Gx>gZmZKnziquWQAp@O)u`VD<|8$O zZ9xuIosn!U6CmHm3SvLI)jqd(G~DemWM0>LIEK4J;P2zSZ>+BH!v{>8DSC14)BT=% zt~5GT_IGQyOb9DIB)*z7XNtIDb`l(ZS;6}!ZPe#xZbr-($JvfDM$I2#$^Q1X8$isBubBvZ19 zY(e%QupNj#80gM~Uk=^iQVIH;EUGlE1;%i#CvTUzC>JF5MQKVIi?`Xmi!{!IqfP!?#V!j?L zP0z(WxIG^NGBkE7C@5+ibLicKpl9)$Q04)C1=DcD3~H zX|jSK*W^!*JzE4R^l#WtHTC4Jii+5pEaJaQ@8`M|OYAG4Qw)csZ7UBU@kDv$GZmd)4QTJlrTOU2vTjFOKN?)D^cRs#603Zygry_0_i|{@R zOOrD%u9+R%8$;ygyv>s>_YXt~LJy^>7SGisDsAhYy^)TWSvJZh9Ya_rA#Y6$P>{Qk zzQ3+DyanPi2_ze_n#JubG*|)%dtVQH5#@RISJji_zv0Fm++|!>J#HXw9zG>b>MUvd z@RLis(H_y*_{a3#CY9MDbJH}*E!3tARcrx#vRA0n#=0+ooqp_E1ENh7G6#KEbfq30 z`2<`EXa-xL`?@xEa7V78Cf3m)Fd<>zi#p?gtfl3zTZpFHx>FHq3%7Y%`sH`upU~~^ zD(O_eukuiHDWID&HgTCy1D4pf59F$x*RI9feo#ZBI>C52ew-2Ns!-cO5-M4h8H7K= zycdXOU33cmMU8@x))VGdZ=+-wr?hApOk-a)!%JdX6aFcpMWV(@iEwm5rmB6bsc-6Z zp!RQ2d!IlWQin~Iw%e46h=_1VI%!%iTww~SBgwI@Y=xqIe(qr~nLC(s9_DYsZ`(MM zDjZ9hU6`93o(0yX$J=zFJB>L>BjS$bNd*(6K;T-2tlI|}*-2Aa(BIZlLEMTB> zQTDL-9hGWULC#m3jz_MEmvhdr-Cn07v~did=~|=MO6Kb>tdUV4=NhFgs*juQ&&~*L zJnK{_7w1sfxU_hga6HxvkbhrP3kP>2PDkFl=j~dtFl|N1&Lim?>5lLM+|U8&M|y1r z*6Gua3G^AP?U_Xdmn|x=1Jrf~7Si9%<7ar)6U@ot`@Wt`)x=;uD|XC zrZWRwle4k)cJ0gBVsEHeS1{UO>gHOgQWp*!`qDcSW3DvEopJ7xXTpT$FOx9ZSv_H1 zRG+Db%!D)ASY@l!0p8d12%AY-?{Ow~t1Yb)0wV9orM#{m^PPhEl-W=hVQM_P18#> zzPcs)_R ziX@*^sxndcltN-@$%>RR+dM8=Mpl?f`Y?P>WwYcK745MtWxsSu%>>IbYgw4Lj&e(K zB8h15Wp~JWr0pH(>-5iaKThEc3|kQR!H8444bMfRg~V|aLHn62r?dTKc`MK(h%xyW zzwHKM$;;l5dAl(_bFv**%5Z#EH4~Utj58t$w>Q7;5Ic;m?iO*DZ#Af1$CC7>`OO8R z$l8|G7~kD$By`{9`2MmXO>QQ#qIdevhyaG|Wlfh))^iN)vALiKVL*(M+jWdU5GvxQ zdD01!(X}R7bAQZw68rd(cIwo}BX60xy*)qB6xdbfKs@zmm^52*hz(XZ-%^SGU9!kHa4H+r0L)V z|HXI+*(7aSbJ!wlwnk%B{#wu6#K{t(X@+PJw0Bnevifi$xO#LCpEwzb%#_dV`yIu) zol((MwOfPex=1;d0wE)Z$GJ(20-H(x{xT?Tb4QP%XhV~|QtGhL{PH6qrvbT>X@A$R z*gR-eBGJ4uwvg5QnUCN?yo~w*>66v)4nHcpBeUOHT`A6a;OFo3;djdL5 zdxo=!p4&*}X`Zk(Bu5j6vg+4z*)rXkC$M=?UTl zCi|NZ0);YQhY1s|Rpz;|9Y$vsycUw3U zAEI@|xI?;WdzCtzndn^CK-q?tU)w|+lgv#eBqfl4DZ;d{sgOMWl>(QWwey*9Y=bN{ z`?KYIG1a#;;YlP+GWLWDEy4sob~>9QlniViTdCK7m5^-gk%3rR-2rfT(6i(uZvl|+ zLl_>5mgHjlVN&%9VrAaO!BNR>7A#q5aziM3K?#R1j|+rDuKX#oNhyUtam-%7lv`-% zf(V0gP?R>aw4Grd%(WSP)#ZHYr38b?JX9L6zA1OYNFcQaPhx~)cT8znrKCDE-sp|` zadvhS-w~ynF@z}+4iQ8d)d3#rK@EUjc>{&jyV3CDLc$0lPh zEj~|K;DXU_QSGmxusUhE6!8RA_XNE%>tjm;ljoi8##DpS)I zA1u3?0_+NBWYzC1d-$y{lxIQnaHQ=HMWj|8Rr`PX=si3rwVDJ_KOe932+jr zmF;Y2(kJc23tZLc-(bRxu_n~FM6$?dfU(?yj%F$CChNH-pSZy+{W}hb1%6WN#Dz}C zols{R+MltpsWNw0o=oX`mw@nOsM{Lr+E6$G5j{)n6_(V! zB-N9^dpX$QLvvB6IP4yp<_tTh_PH_nssCmTJG)6ekwrjTJMBu&rwPTS*fdzuSc$V4iO&P!xI>3mzj1l2y7bMqc}Sm`H$Jb`2K&Rss>}e+m3cqg_>(y;&hcZCGc0@t@n-)A%rgAL^{u~`yuu#X!$yygw-4BB|cuC$cr+5BpJz0j40U(IIB2kLsFkB^E+k;y+b^&xvD#yRk4 zvv~052kGEhq&xTEp7t=9PSmXzl?(m%Q%%3LfNw09p5!6LQ%Cfiiyif)Hc<|`tAL77 zlQ%%>&!{9UQ*OJ2NT>YX-AzxiCJm2CE1xNGlfoEv zFkaYGA3%L9q1TjFuVb#e`e%2xgASl|fnWL3S`mUn277Oqc?tmU!Ut~i-}?Y}aHI+8 zY@;@8)?8dKr{6x$2O$@Y5d+-b-7?0cJxH@T$B&nEOl`_dOvq@7nVo@!x z9tbClRIw6{gWGh9?396~(w1dic3Kf5!{drh-wX19Nl)q7_DwFvn)Sw23K8QhmcQ-IxJKd>6 zk0j%62GSK$l30EDCSRBQ#9EEl(Hf>ahNX#z#doLRM0k+(-aBW#ZXHHcQ0AF(nq~K@ z8TH8hEnYB8z*vE?z?ySeHVxOPsLff~yrO8$jSfl~>p`oz*h8iTuVEKW?pEfHliS_K z1Hnz#b6=ZX%=b?;+gG({c57DKa9!<)txv1(;9X;A^bLY5^3@$S7uh%z{V1n2 zA9O1SH7IKq=G8Uj>!~oCfWq}Vo2RfQW$KdT?;ov4&I_za zTz%iu5@+j__XzwbzLTEnDb0gC(hvUPjRSOU5S?Q;4&3jezud4+el=a@87xg*i^#UI{PCc;P*n3}M%S#C*|4yFzRpcbldrp@K zze7_J!lFzj+K}db@&{HBquw^pheh)bfxUmDd1CXZjgRn_`C| zFvt8YEX+vgd`BGrY6s%Vu%E1jnLdJO6Fa0|>@4aHq$3^}l31Y9x{L@^Z>C=UoyNB; zGVPszc&G}J8cm!Zv-Xzz*G5WGRep`4-ninfahaG7b7R{!3LgMeY};cxR0v>?mfZD| zBH}YbpVJ&(AekprQKhr4;&?Xm_&YOTwdL!{+D1gJqfob-mzQOfpbIQC+;C^O9E(g z_Ejhz1|(>JI_kTkwsM(g#LK;n%haudN!ivwgvB{LD}87AlDwy$940Q#m4cm$+{Riw zvi9$3>ie&Z7))kw(~tvo8_y_;U$STSGc%4$&8jYEf;^a!oYOB1nRf}>3IXRUW1E;k3s3Y?%*@4ii$H5H5|ml+0Qkd_Z5Z?IP^$lZyFo-~vd z^b({ot$zm693}ircx!b*t=IMS%$84DP7de<3VUktKOls|4X?mnUOj8|TB}dP;Hv<_ zG^8$2dd7vs_zSb#4oVKLUi6?%l5WafeE?#fMpF3ZCUmrj$oEZsE zkn5CrXS$#-7>GbKNnHT<`q4y=B&9Emyq??u! zRtYgVtCWCairjJGjGL3AGS;x`!403978WWC_H!Qu)u&4O@DRzAK3Z}&P{!j53k2qv z=9RpM<(Dw~oVHig2VCsV0+pco`;;lByj!9n;6>lRS}W$cx(*Bl0Q4?<|Y11+XM_C62-2MJvVDGY0%e_oza zH~dR{6n&Nzb;|LioLVKLXzkymJPk(CG^3`hAIZGC*KwF*qsJw;Ah)y**3H5yg1Dxex!E( znZ=#^{cuX?*~lZ?ri0KB!+o)d_da>%~hAuYdPfF&5vR zK_aOgcR3oz(2@}rG-JM&5|7RHU=h)T3&hWVGnA+DaBBL9bKxA+p#Iou4&W24xIMEw z?*g9`_KzkpxL}0xcgk&bMJos!h0M{m@sR}z2bzziQI8vF@`M83YnaoFnpZ=098c~o zZr@K^uXIGJ+4HwbqI5fW)X6Bmy@S*Bd=664R4rjbep^MSgbvqd2--`(RLSIc=2M>2 z)J1rT&qt?K|FYD7H7zICt&qQ+s%!ifR2KJ0_rs*y&{@3;&nkS0gu;2T+rob44-6=x z07Xo8nBOldBMGS9@p}_U`8RAq1}FcaEg6K~U&08~`6s@FvF7EuCL0Jas^~#9VzMO% zA8nzZxwJQRp6kL@3H0lJTSDqvr-FTQMiOU5AH2Xxr(3qF;r&p&-_c+7{J}MPGJi(R ze$S|p_GHNHygHRR$QiY>0A!4h=GBwNozEV-lsAy(JP=Jw+Qy!lGPAC>)a$pFRN?H} zdLjxr^w4NM_3qz(`;?VaSpXF?;!nJiF>fi(BD#050PwXfvuE-SbF@VxD3^D>h(sdC zdBZ}!_ynSe@6DD~m+?bcY3B^Yr}L(I3-E3d(?aF^niFeh_+zJ1jPehwk?C6*_vnAl zTYsRMFT|61KS-5!`B^r<-PT#&0z8l`nqWVQiTVqHagp2r+aV_w4GOw+|tD}wTw*E3C ziXeuz>Pp-Ls_?L1O(G#K=T@3tzRT_KN~HN+yeY_GoB>YeFqy=RqWQG%3%{>{nI5G3 z8`e;{>W`>0SF59mk~?(#8dE)(qSYpKk6K`zH9QkM>;A>NhIi|_7Wt#TDH!}63a zG?kc@bG;F8bXy4m0)hWyePf(*3#>fZc}jK9k~BLpdM~S#`8hHP9&`#1c+z3l&(*2g zHQ)DSiG@F-tw5fZhQZjWcbo*iZkLRnYRvYQbW%hj;J@=3`X;>Py`S5m;p=%Z}oJRxc`Yz+iR(?DSKlD?5h>9T10K8`J23c@Tg{)96J2yL15_` z+YO_9->yEv*V|?)32E3$B?FyWwKwL>G--j&dU?~;Fa7^8&kwS+2S=E-(vrSaX=nuXh#z!GxZR5@i}}_j?5bu+$K+n2eKc zkGnm%^?C$Gw{#Cs&rwyh)qBzNL32?nw)+{GZW&}c@vaQW{!nscg6h<*dIlh(&zt%b z1V}4=uS5D84ltez08rOlv^K!!yA`}52SLg(S{O;IdJW;09X7%dMIMvK<^bTg&IV_!6WL1lO1}D>I~|rymHe1DDq*{E88in*IiDSB z`C?K!tk_V>29B)=3?&3ye+Mza#Xq#VFLxMufTB9%ZW8NP*odi4pd1s1qn!);@7KM4 zNOU#oXL%o`a>{OozY1L3^Su+YF3_(wevP-3<$9u-dTjq*6gKUq-#FVpUS)QGYqAxPG^_(*`256$So z%e0kxAc?+~zbq`ip)UGsrFl2H#Ci8)hg>oLBn1W6wsG(o(fOnd71?W!nR=u&*D(=A zm7PWfiElS3H|4d`VFy7)6#^{CV#s)lU*+AI(;?M1JiC7zk;W_eU{gP!T;xs9jDPJ! zvQg|oYlya%Vi;YhJ@+ev%`VJlm(#3{&VnjUh=)v&cv}R zZn%3fyTAzRi@kW zem?x|L11+*oF*=}hO6t5Ur2&6QznJr==%u)QAV}$u7a%meJJa%_W{Dl;;+!m`#!E_ zRWMEPLR1QVbzG10i7-_2*{Z&=M0y?I`=yKA;O$RBxV**7Ue%)>j|bDNDfSyqxP>tz z@#q{5*Go}iZ9nkfx^5DaCU(Px>;RHqRD|z+=Fw1vkt+wvS1iAuh8tg_N#g7Nxzimg zRTt>@M&`+=jY;w1Cyl${Ln%=b;@4k5s`h+IHDVZajEs!gB0m@KWLqEb)#=b%Br@G8 zOGk)iDH7LIkruG7)&8s<$<4_}5NvH6{|KM2(pbl5(KE9#r*X^E9Im z{rz42T!`C5f$mV$N5sJp_Q5|Oyn#dV%lZIkyO|Z2o~whVEF-BD)Wc{s22215vH5+P z!w~NBlt=3N&hiFMhyWo1aa_KzL{04QzXR4B^1=CB*`1+=vW`JqGvyI;*$ z1L2p~<#bEKUQ0#7VKl6TRsmh|1bzDtOLIfOb>s{(92HbyE z^g#x^G+XV9%aMq&PG7BsTDX}m;GO6dz z)`y2i<7G@6m&5r;Bn-l@+-mCT$O*qzbUx5V7IWcdgE#WTQ3q^KNzHw@+3Oa zB+Qko%|mHADW+4mAwxa2&HeCa+tGghl#oZELHzFnBk#og~+i z;3Ld5{;&7*a~3BbFKO$W{W@~GmSK<1VLqCQz1#sTa;Kr9jXdj}lJGE>_oAAcpH-$J z%M&X}8OBL|n+Yk^{rp?z3J=R22cmS^?N;x2!&}Nw63%!JsMU0$l4Cf@;59xJUyICcl*GfutGC zS6Z>>Cg9P)60EJxhtjgEjn2QEf*FjuceCMXmup+?uPq*KKsMGeVd}KrV9dO<$|u=Z zhc4?LSQVOeyc>EFEkg(E8ByFvw!B-Dy>wIp868j)q^HAve7MEO`yl)o?>vc_G*~6* zwBtput1im+UD@-yGW+%cPyLb*+~l4fk=E_#rm3C+nZ521vRc6ls7{!^#HobdtM|iq zAvU+L(kUtlyItIa_@pQQ*tgw4X@O0hhW7c z7nF0(Z~tY+%NTx>%Ww7Wj)iJzAut-C5dtP>^`r7D4lXk*8?{2mOa1L1tgLUVwA&KH z0EvjJw+FckUHk``S)^Pbhy!43@$Vz_8t}p+wP7R87oe0ks$aY3R zN(8~Vw1@QkcK7Kj7Eyyu$HOzC9+nc((NKj#a>5U2WlSrs8SNf0PD_3`$%|5*iUV_X z)b?;NhYA|Pn-H$B%v=4vd8A`4AYxfnP&=wb>#E7**$_yXsbbXDJ9J${ueuUGMn|=$ z=I$&c+BiprMH9QM!~Qn>tIU_j{94$cmz(@LqQ?Dv^o7o=ky~A4N6+N$PEGu@f*aCQM7+n`>InKQ|hj?s|sb z+k(k`y=yby95&6mux3Ap`w<`VpB?u<{yn+*4yB}v-a>6Xp@tn{Bv4%MCuSjGBevgk zj<$}vvH7HphWvq0DgLpcn7o&)sO<%pzKu>zMq?$j;=Q90;>e_(S4nPtDp$qo$poly z+O&VGbMt(di>1@ms6;Cvn$uE)!MG+7gD#XgUAQDg>Hq}>LhqROfT|2UE()QAy)l=&rGGP*g_nd?I2iQDW@bqDVlm~ei(bIv za8Em?4&pihYLO#_Wl}8~d1r94a3lmYzQfZTEYoOjjs@fTizVpB$W`A=Jt?JP-fys0 zc%WfyKK~W?*>^v%zb!Cox^q>{aCO$4UfK_FyEI!~u|ZNXa)ERd_%hP7+%PV)&v5Md z+HJ!ftSGWzW53@o1nX2K0QfZauO_#nr{^oQt(P8QmE2sH#S0TtWF3V_)t8^DEsYqn z!D)}u0?V(yr0F^Rvx3%e-52)M+u*+Q_bQk#p8Ne?(kV86{nzFpiHy(Q>CaP+sn6#U zj3RvTF$riCfy&kAs8Ee zk915Zz1Ql)h4#zi=quTrdP7l2bG7zJ=z|Yf#+=oMbY78p8M(ujdIpM9cyJ~P?whPZ zx<-BmQ}45ICV;h`;R8;e{a9+2mDW3+f!Zs*7=j8WW)O^ZON-UA)uK&)mJZ`?GBpF+ z-6CDvW0s0sixb-OeqcnhyU|o=382kD#VV5$f`@H_NkpC|1biTby{055b_q zQlqtYm%b_y4tLjFn6=IaVnt*qk5a@kJp1Gp5+`T!v^#eJ-^JPRY=%)F=XultZKlV^ z{0pRX1E5o0+n1$zXH-=pSP;rH`}la_MIRjXp-(cCPnOR)kJt0Hdg(GO zD;8nkiqU@i&HcYm`)p-O15ntKiy2mfZ@v4oxO&n@r#ZpHWcN{jb_GA8q9alQVgR+S zW-2EaXNhMxAhC3j`2HxBo=+&AiwRu16{-;%$X;S=AZQJnKb)(4Y3}1D9h{$+O{8V}od*k!W zwq(s9&U#}}O3+9^E#|q|O|23Qj?_$$JDD}YK{v}RgY!u+W{ADJ2?Igd(%8K47`1HC z%(ezhL94L>EG4j*p{-X{{z`_@vh{}lbgm3vw~J3{msvyj5CF{HjuH$OkN%Ledgs|| zr&nGhc7g6bIZLl>2K9ba*0NwI0!zX$uDCwsI1%unsG$j|>A9sbzhb)lGPDz>oUWZp zqx19gC;x&{8>_GT7^|KK_1AjV+xMY$;W6H^nWD{T}6@pC#bn z;Lx0AaKtTT8!*_hnB_tR zh9qNRvPzQO(EfmI+Fp27NB)dP<;Uvq7JJ%RQ@H!}CXfVL!jkPq(G%ZHn(tNa}QN_GBsJbUvFL5totZrb&AJTBs5gMLGq zarZ(=$6aiAzjrgTauV_b;xh$@>q|#DzZ3)!V!^Q1n1-#qMIVC8YukX-Drxi; zN>ChB<5=X$NKg8gl`**vdb5!6DuNK!8q0Y`ICQC`zsPMy6}Lb zadGeIver_&l}vgsy2K0U%SSl*!M(;-`ue3?a}mF|eQ&B8 zJ{+_fu8S9V3KPa~X0SSqzA#%#C&1A*ReOXeY#1OjK)Ye3O-Z)Upo_3IkgDz7>Z*>= z_EOvS9W0(zI9DvSD0O??7#HTscJqe_n4k9INz{!o#wH<~1JeE zlhrcDe~qHxuvoPHi!J>!JMmrjwO+X$0yWg>csX()g1DrXfY%iTVjJ@qEa0J$k|0b= z>XANjTJ}e`skQuuAMg5DA}=^PJ66Y(1iZ!SbC;Kec&4`q!m?hi{=)2-<*>YOQLY2> z7MS}ytE{hP6*&>4U5AGll`jBAbal1IY&uoZyXw*Cu!5kCj11*Z=`?+BT9tNLz3i+H zO>`$`T9i4UkOc(ezr4FevcG-4!m_gL6BROcSm}*pW#tPYAJwnME>DD$pkvkeMKXNB z9btSxo@CUA1lM2Z)N4+g`4s|ko`MZ;;EmSR`z)$eVgS|>(o&NKydFL_G>4Ld6-Z6& zI*G@?#$uwc19TpVKC20vG#)R0+1C-bQ6r|}3i=qz4=M>6Unt9~?}&5v zrRCFUbv~!>_&fm|kkYV*W)7P6-My+TTd2&zQOh^tmzDme6b7L8p7ur%q}l$$Ibgcq zOks@`*)ntf-1#gUDJmIK1Fb=RKJkI<`|!(y#~#*wn0eUj=iEJ!fKv%&53RiJkn4s0 z)-oTl5E9R7`@G$%Y+J-;w7*b=mwG#pheBbw9iomoa?uxR}5=bZ1s2Me0 z8#UPOjiIk<_=2mgK;Q$KSp52&t|^560Xa3p4blV&Ejt9u!16ZE5NWg?<87boG5nO=H; zM@vUiWAIbx{PDapG|yQ*>xU@!{^f01*^NMmQu%2$T)_v8i!}=&q=v)>>73W-i5N2Q zxNvgU4Hp=Pg)lcP6?--&UO+g`F0obcvj~UfD7L@JvMk#O)d+>Bl2+;(u7lF-od|NG z)-Gp?Q{7T-A!x>S38P_Dbha1nruVqq(>HVmGB{^Q1k1DK{C@UmZWjJ=h$Hp32nG47 zm9cnkw?|odIP_`ik2}4o5?E@2S`e6?MDQ}6iz#mfHOY_M)b0H~+L{Y`stkO6_Tq&u z9pli3j0#XW0?oT}e{vUc4WI5ctxr-jn&5yfWNE6a*BdDK)+Bz_i;?~3A*4PL46ZIv zfhI6P*Ss;lelA}1caqDUI{8b5$*<}!TMUf@OVNeIZV9#dnl<(`?R-z7dPm1&pks+k z!(-=E0hdalEC`fe$$m?IH!UMk?8WsQ5jWX@9kkkdcI2SJ^Erl6b>kb)mSigT=Btc$ z-gURrHI?7!Q^1m7{HRQWBOvT>xjy*TXuCW%k75gn8&Q)Y*TD4q1HH@Z(bPQ?V!G3k znkJpU(o4VHn+jmAFDx!6-MQoVi;V>iR<<3X@!CRNTVt48PC8x$uXVJr%z2M~Au(&$ z_&9wkJ|5l#dh+57b4Ni3#xIQH8o&6yU*CALs4Ql1Xz>zgJwD8&U40a#kjC=1&B z8rG@g1?39L5f0Xhu-1Tz29!qsxEymG>8c}P2=(+dZ?W9Cbo+29(|WM+Nw&!0cD5z&Iv1TzeHoUpZ}{6B%s zm#c@1+U6I4QLsaD@(w%NUZz|_lba5!&D<%o6V(NX?-K5v3Z|x_tK$K zx7b)ja{yFnsX?JU%unBs#X8#?h8l?I+V{=Reza8~ikptSIiCtmWrKT8y?yAV*43|* zm2tr!r``(hG{J^NStw+MOF`6&%J@%CIv%TB^)%W?^Oo+@CEeZv;vcV0wuC{8BIpUf zR$Fzh$$tycr5$L5Ho8G>VoG3cRhQOrhR%A~p_Q{Gu>UQ4=nznjJlMI^58Q2}L;v{n zjZEDETI>`Ks-06IcAA(_Fpl6Z7Gi!lCG4~!ub^I=|J)xa^cIJfN2ki$N)w4Rd(qGE zZQvcR=)koy8oh>GH(CWPV`_G~D3nxo+L5>Yx*1d+yh8b<1t*Y3L4y29`03Fon!ysW z`Q-jNgv^Wt1b*FLM>9HUf}Rn!hU<9N9a8+L>-wE&HXQ%vOSNMcm=30O?FSz;mUcA# z8y#&h>+?&pzE7lyCx_^oR-H*C??;*-^$*%$EWwaOL>&Rmk| zo@sc#6tdi#A&dS4RcW4AL6TYz*GH{~j9SfR6ww^a_LMNrl?Z|65Bz4;sYT97i7-fO zt(9THusVk+UFP@xpbw}XgoH4_P8+LiQAM{}jj>-cbz<`F^R*ABB-k4HJgwi~vko zd^CyqeI(KJ4T`STAuXBL1=}~91UO`tgJmP{`1#(UM9*~zV$g9vUqUT!kw>HAMm?UB z!{GmI`PCB?lhS_MRXGMm0{*qEsV!m5RIPPsrFw0`84|cOf=y;a$Z8mN8oj|t*Z&wIYBk*)@Prbomok9wO|K%~8fV{^k?ZDE`mrJj2kUj{oi(;oHxdN9z;mwsm5sgZMlR`1)O zJ~N}uy8Da3!&RMpLJzgtaV+>q$)>+lScaj3uJ>c$$x+Xnd8s(}?D1*GOr>y4_)jQg4X6hoeNkl9dnF#hir!jUBkY|wcYFlZ_@CjBS-x9DyJgJr1duf&kRd<1ax3i<2bL#R5{ zA&flD_vjL~OY@79ebRMbapTR@rBq!Zk+LSKFZn9^oz+sgK&ctk(;NDSjS&Uq*MmwH zt20AMCW7I4tp~hX0zMoVLe)PjoY|ph#OAyiRoB1fk3{3% zi+!>Fjx^6Wv#5k@r}qzTwVJZXCQHzfbTE^io<66gTp_GpfT{nhqt<*|F*sRwN3#SU zxpe^nojE3-6gZu8lKll9Y1$Msyp17eBTGf^;K1uLbR0Vgnfw@Ap{b0>ZQwEI0Xb7F zUL-NC*DgR9wcK$kP!{c{8%|iy_u|U{)ucrK%W#!U(Yao&q=nso+YZ3K!T3;(unVnu zQ#53{lpN8L?#*%-3`X;cKd={bg4RC0MDeJSmH6qg0N)&)QH5qZyk4LuaY1jQ=Q~wJ zNGzk9WjBT%40U=fVy=1RO*PEJPXFJnFF!LX15LjxTvf$`mBh59{cv+~<5x9G29U`R zTsXV(z*lns?QcI!9{)qIjl!l^j$kbAW=8``lUBcoW5qjO!K{3;H&m9&NwH3nL;i z@hVMYL+w<`PkWBFLGKpc+l6H#Ix?d3&c9_+;-ukXG`RX4g(_v2kQ9r3BM(}EW9{-j z-WvO@9Ah$+S~BLw1rmVQTv`4|AT{H}I;2u%`5%VuvUx}OIz;6r;23(^v?JH={+e2p zFo_|(VhvKRs{SiHCi}O&a4>yooCAwPsL#lOKYYKo6f)oGLQ|GKATKLt?F&YE{wrfs zstwP!+8Cb0D-Pnd-Qr~4Jt2fV!TrGZ``J>+4WYd`ZLiW2e&Z#4Zs2tI1Gw7lToVIN zq*PoGb66zswpvGXvHfs!TYI=ZD2;)i#K*Z?{j)iM;CzeJbMnIbhlsi)B+XGKnN|HB zlZaoVe@zje?M{6W1jumSk}BO1lSwMh({^({Ae*&|GeTpeBF%HoI{RVpgIrC~u!G=`9HQ+c1`w4n5NPM;F<=wY$~Ss@4}`*mPEF3nT3Pu z=0jPSkp+1SzhO0bM{wX;nPr=n7u(Ac!~wuOdPB z2gSNsXRpnMnu593@ zcoN`m!duP~SCJ0WHuQ8{1MNjzwM}20AaGP<0nT7E`3>r-skLGGvWU~F$*7Npd@m+b zF2(la^z@QtNot}Qc|8q|gedJ*UgT(#1K&J-vICC*N6R=yy}8(xPDa;OA>SyB^7;NB z%ICvmAiARrzKR_vegZe@f4Bf{US-+}C9ewvA)SKN8&n0gi;q~U)J_f@@%o8IQGBqa zN2Qg*(EH|5KlZ6D&mD=_?gdghuc?jy!~d*px>LQxy6zh3&@FcjVC=|^Z8v)i&aik_cWa>L%a(}$SdK24D#u~Y zb3?cho|aQ_#1zeYwePd;s*A`jYWFwd#?8_ntK`%1`RQ9ojdI_N#QQz&r72aGb!@NTWW zDlt8b_i4ZLcXz@VJWcsufYhtrHZmBYROm-JZeal*w`I#`%KYn?!n95ULDe~x_d7dR z%5r(;#6prHwD#5KFz{@FY@4OegTxF?RlW2~ovC~)bT`>vHy(yqo!W>yWmKyi``#2l z-gI#^D^&jGFT!)A7Vfsu8Z6GpQ>l`_qjEy%oDtkiYBu9>(kw%-HN(RaRp<}(^kU%c zxz^EiYq3XlePs5@x4;y%Io|yc^0_6}w8(vr-|h-q$g(4690myP5!*c$uN3?XW3@p! zA3ABi=F+NnB$%`H@$1+6H;$E88#2zadUqsm##3`B0i%qNQKOK5CofK@EY1s*T<(pc z0G&^;WXuRu{Xvuxh|u9_2tMU8gz7quyB!z~_?S+UKC7|)_Ce76Tz;wM&Ny*b=);Fv{RZ+Z5X%E<-~fSdwjTF??%T z5^F?ht~HosTm55nyMyZ>+mFGE7bN=iTRL{sQl3<*T+!e==>}3U9^&LurT&*1!SFe^ zfT-x5>Yp{~%;cnQ2U5nyY$iLBsXa;D$meno?9M{|+-z$8x%bZ!q)Kd9F*k1;rS~-z zy3o>1g6Kzg;)!S5f{j2|o7@EEZIb*RCpzwXp@XVi?z=oJ)X*;jiY8ul#?gNefzHC+ zx7U}p5@P{ZYhhs~5K>buO;y|&r`}8?h+wguO?)ki)en4o4PnH6dq;uJbhZFz+czmv zM{;|Mc~OPKZFz!h6n-7Q&OeV?H1>6S#NB-O?9NFtf!1W}vq%A?IYT4lwT-Dwctxh> zp1N{usdpu9gwOdi1ObA9_mhGs9FO^eHgR4w6*DX!Hu)@0uRmva;r;kHTFW=p46AgO z9k5y4+4`_IUBvyCu7^#*brz*j_bB zWFkAa(tT|_8D0L$^Dpv|z3fmdl>e{|)+m_S+{5*bd&uRN5)j$6-)+AoM~^z+Jq!Tu zSOB7hT!dVO#!>*$xDj@ou@zgBLg*J4#eup8TEiILn-A|U5+aa+k7sO~2EpA%^kNUgZS9cxivDkz3b&LRF8t zP9b_OKW*4;*@E%Xe&*srq;p*kKyDtBHn61cJFa0s&sDtf8;s14s{k!-nJ}{A0csj# zT;1NCcU>g^zCaFVl)s3_8PU^MnvVa_gwSAyzSP993e}W^G1IPXwZfQbu^ClCN;Hh? zB8`ZQ3EHtJTx~vz%0{0bwT-tiqB=e&Ex>^^<7RJo;44rcJRN?S*w0)PZ zD(a%RxK{0b=!=bgBdz>;!hKQRcjr}TN>*KQIryM$5zlzS^$c(ds?+{xl2kSO;&>9a zbpe9fGEVh0I(qTH(47iy!4}al>CurX=Cywkai%1JoDq}`tY>zE&;8crTQh+%ugwMW z_oD>9XOGoo=hWeMtu!Ak{4jIsMYF}Vb-t@@Uu@rd~RtiQ<%+PM>F$u~PaA!ai z9UjLn;A|Yz{+V?Q`T{7}{xWH4-H{MQL^o6-u>3nh7D;acJK`NkAU}!M z-4Ej_6wC2W=rs#?b&j{C#pwUBkq4jzs%&Ie3WVA7;K>SBaX2cDBsW-%`7Zt+#@;e2 zuJ-TJ3`u|h0RjXM65I(PP&mQe-7UC7A%zqU!QI{6t#Ee>g}b{$aO--WcY4-4{qO0S zkF3RkQw8U)pX_~Yn#mm32$Anrf?m#8v++2i>U|d)nhYp8Dg{H0Mqjf&b6A1uO|mbJ zRxmUR(CMv_=TE-i%w+j%j;m@kgcL<|mTeeh3ci_T`)!OF(O`e>cszLaqu233WG`J# z>_RZD;2_geva)@3vt4G*_DF%=ZnlcMmHX^PORF|?{~6=U^m7WmS8*I^$Lm!2d#TWI z!KybcAvzN>ug9#K{WulkZRW>+uG_qSmf~@!syCF=XV;uy#|>0-L%+wU zoL|BSs)5Avd4{VLg(C0u;T98<5b0%+%lo^$*sDJ9u zRp|J4c!s$8`aRY#!`%6XXxC>0)#dZobM2C_=`%96<6d-O#=i0}VI872UI9hnpJeJx zL7;;^O5697>mqtvc|+5F=1(mDcI3kTZzk}6cQnQ_zUkNm8EtF^4A*eww2n&suhKU- zzn_I7q-F53?lUD~ui86?YQ&dC@#X>$VijCkI?H7;BFdt`iERTXf5EQwL)Fhi$VZ{pNKo6E%w zPOnOhte?m?}3kvz)O>O_*)P_GlV(b(2)W}T!=gaypIqU!SLiP`M z@DZpqy9xifi}}BmDF1z<`j3`WolO5TN44-;ob`V`3;&@t`=7!V1Rsu6_uT}nw>lhM zx`o2Kpq8)ti@7UVHkH0mDAeq1wM|(?Wky%TImE#*KuXjEfo&QfFC!yU*g5p=fcLFV zvvYRX-!Shm7Zv4mZ9KQ{{>y)lw1h_znR6c=9=u)bt1Aap${vjAe|nL@!mtjh-}-f= zAO#1EPY6(5p7Ys5Bwp}b7H6})fLC3Ksejzr%fe9XGMbf9BmYN(V)Y#}^4>xlT%p1sJ( z&7B(ZfW6Q#e~CT9t9NxYS4pz-Yp;&3vbs1JharvR+}-?qy-T^>5;s2XGvYEjSR;k$ zj<>Rs2%BRiCYGjL3|;adTCN3PlQS%%^%*@_BY8T%bi&(|*^>3<)I5*36*4znHDF2n zeBW1Cr~s2Qz1FWtJfBMLW_O)NQ{O_}ZgWP#p2e;kh9#BrL<$+ftwjNS=GWjvrnqU| zP=cewqU;jQoy5Vp&YJ25rktmqwz!x~vEnv2#RI6Zw#gBEz1^8}uPH{oSR+#$!0pFO zT-@D`hBg4UgU>5eGQ5>OOo-||nHP4aD=o4ZKqDLJmT1fqmK+o>e6K1DG)@+IRJC4{ z4QKPQrjx-LC!}Dl z(Ilz&4eUicm{Jr`>0-NzS>Nt-lWaOUom==Pw6aZ7Z{jY6xOtDSd-MFd<5hmZ);|(B`|17POZAd(t-FKavB=6IGSn_h$>df4!9og;bAhc zAQF!cE05=E<_FMx*e0y}4xN&jsM@nFsZeO95&IkQ+HaAjfPett0EumGfK=iiKXiVI zipi?%9_wjj_0IyfJxc0BxN#_JBv`zkh`U!O^ao#!^92pNcI`i- zuimcNp2a$jbEP(4YL3ICF+mPjm;3%kk-0HY#fSACbQ(6c_;H>SU`mJG*D<|s9c>zx zDjC34+iOEa2!;&j{wPEk`h&=R^-TJD|KnxMOdB_m{rm>7H#_K}zY8WOOU3_NiL3X) zyNP_*2Y1a2F5apgkIjLC$ZJ`d?szgbCN|cbyOPspr6D}S?bLiNC3+_x28~f_2`2$$ zjPG6Zal4&g447?n!*8zDze7aJfnm3P_S|cI3)<75F`l(u{&{w;*>kxwqJmMF;aa2V zS4Dr8LI{?cnQ3=&p-Ho8e-VxUXc=jHmB!;|Tv1)kvaVv@?49V3YqP^y_cq82Vv0&E zUR(xQ7?Dga*~F~>U3x!=&llI+i`X`P6tP3ErZm%V!j)V&5C-<7p>1)W=W$`um8|C; z>HbF_+aJ*3TqLEv-)CF+#ES5_7`@9h4l`Wao0@B^^YXg%^m1#SISYT&FO+kf` z9-gPbCK#zqU3`nnj`s#WB#XDXO7$Jy&xO5&W^^KHd0zpArXUv-s=RzUhU|3)-KxRL z-XVe?KZocZWU2Z8;u)*2M#5)s|Q{QLznP=I22akGt4P-J(-p%`Pht)IW=bXHFL zoVZVma*Im}Qra`GdFj(30V%KA#-F!(%MUkVGBY=*zaHAJBrrb(guW;*^j` zO0sVGQ&uH?Se=i-e~;)Z6|baF>1~^@Tt>Hr(DDVI5&yjM+3Bh8MB|>|$W{QKzbwg6 zFC#`IF$+9E(3OYu0^~2D21||MSHPl&2e@FUMA`5RA>7Ka4U28312>5X5?8$SH*hq} zg|l7By6f3_ckv?**ZlZ{Btm&-^thjWYx1d@n)Z&~pDsZeF54PHmR~fUV}$YD)}qy0 z0uIcnF0Y0d4l9AEaL8%Gt5Gh~8?ol@7ZFKxb`jem&|d%dZYSa9KeCECHnANa5U2R_ zcvSbjU0enCKOlb@YGpQ$RnH49_yRQ1UTzQxO25@nrAWBm-@k^6oi7RsN4;}@0F{lrr^7d# zX^(-0HLrU)f}f(>Gg-CtN^@~>d#VMr7*z8}}ksn1!L;HoQ!$N49M{v)%-6(>aN z)z;+I`*q&>;z`Xvu(TaP2?4^*>?cq>fHZ=HI|1SHY(8{?J5#jEa37{hMwX9)$qYtf z)5ORo1PGE63~LbX;{f&f#>x#=lx!#F)*62B$CwSmU9vC!b->{-%EB5#2#SoU%_2Q+ zzvfb$l$-2p4Kb19(xQ8c+gp933siIwwrrFji z7n{SKE7!!Yts%KaLc!WzwS8$ldW6{)^T~Gn=}y2rfiy7@bu)-eAK{S8hIdrMLlV11@}vexIR_5 zg3F<0!O+O2L6kJC^*Hu~u%NxX>*8FtY}i@|TXMc+`NU&7aI?F^!cULl%J@zY+$>j3 z=qwiXn5Kd=qIbZ z`xrMGs#S$|kg=mxNMmBGAx1jXx`uIQWlIc}gdEdfffy=@=Ht|dXWegg$C zZAx5&&B|hlRNsHWj1hQV9>bk2sp|CwxJX+#Nmw?25&I+k6@Ed2;_~B+4!z{)uzbt&#%VF| z02wCJ*7dcdWcVOt{`OX0Mav@rU`K?Fx~Qa+3*$<en8r zyaC(6L@FASuEy(1AD1C=)Hunm_b*avWy+wQ?-TSW74Q1xvJ6`1A(>V3@F2TjD-;cW)kt2@5tr4POo#Mn9#> zZiVZbek@I9eM&PF=x6MDA_~>Yta=;>f4Z;~^L+T5O?(f3I4SiC(ugl*;7!FM>4sVH zDa--W*j^ih<=O!wBd?<(URNDkcD`=n0}?@H(?<18KKI8_6uolL_GU09Xwp1A!sdbS z@@?jGld-XH=wU0ly}{rVS=yWk&O1oEtmYHT&<(dN zK|?cIY^*hzY&<5Ieo*_^I5S3(j#yob`?tU4jAjRQ9AgdxMWSUsKAA}3uGYP>{I>Um zD$aBa{jxs7vT7B^%`~})I2BBnrgh5dg^N#kfZmaa+F@!}=-|kjFONUU-Y;ByIwZc_ zB#O=^7CYe^Rxu8u{h9#rHz-x&yOcK3zLBcz3<+%g?Rr(zE>@#r=%Omy={N?Pm>89| zVq9Rw4W76J3!}BZyr38^cokzGv9>>}(%PQCxtclrl!^v}R=@amr*qU*CZ15GPi)*= zwZNns!-Q43!;TAvt8s9?sZGCsd%boOJ?`mwA<~R!M*q+$#cgXbXyHnp>=4&L zJ=Shmj?W*;(A0Y~T#z+SC$CCTn;MyS%n)KKg_ZwnqYptHNgs|gUWX^&ZZyK7p~~F5 z&o>}TdU$^+sN9OZM8s|+x2jMFYm?S={*wxVp?{|*w&7T(bcZ3Oy*7i z8HWyIYbRD{&(LKcotmN(N{4@meE-zFqvfE>#GD5xzWZ9W3}?Rc5ocs;ff2QgNMFI9 zgL-AtGxWF$I_aOpf$!;F>Pi=0W?V+UCQ>C@)Bj+vq?caFCm0+z#8iL7&ZY>9A(@i= z)@vhSs-`{c<_8Mc%(1#o@pfy>*VbvjPU3c>n5)#*Xv%Xe%Z_XPl27wKV`fQy7OYO57Su(wdcFg!XZV}%AP|`qia7M2nB~mFK>Xz^J6$Mi(q)m+Bv#i2fCI(i^fPF zh$F3U$AM24(i!Q=ubcM*V;*ZYg>#re1%QKj5`V8plYh_;=5c@af)Oz}jn54q=%;jV znH!Fp7j~$laV1)s7}c&mmk>WRZACBL{0r5_!7YXLW1+J(`QkJVG4x07*>`>&H+s7E zl-Aq#&nV8@=hpjuD zRi{vr{S;*P+kz`j-2v?va4^NHhWkkIt`-pJ0|W9%OmSXN6V2sam0Y)&^E>x}u)RpS zns+O2S7n?M*4kt}0^Dr?=(`uTIFPiiQ(zTuls%Z3cI3$rs(D9&qP<0lv}xk+2bg2|~JU z2k%;}?j=&}NNy`9GRNZ`A1w;*5R65s23L3R_>u3XwqUck;`*XfFRmf*C* z(kY3Ed3kr}bTJ)m8PheO^jZf71>aKvID9HUH;8JhZGIg6oVUIHvXId_YJ3Cn9;$9MH;E!c1*hmZtbUxe6pa-E1=f%IJ zW}-~?t1G-fdsVcs!ytbz`Kfo%+eu=@CUebQ1qVfC{I@}?YewJcxU+)wkZd;@n{V0N zCEEvNq~5nfG>1~$xbO)2GG|IfaePi?{`NxWrD;*|65*(*Woa~um8yr`cOf6 zv(7#o9E;EewglDHEMAOML>Ar7wbe^eY;;<$W4uK!ceHn#TJzy`Fi#L%Veqoa$2!G3 zfokQ((lpG4u$D)dsTQ@)S8$PdkjH(HRB{)>c8wj+;FbSAKeoXGnTE$VbTkJnN$hMp zKQFlu41V8e{M#{Z*khX*k!rZ&PFt&w;Lt)## zA=-RD(TK6^{9Xy-CVt7iTSN2T>hEs8raD{1tj7!+jgJAA^QcIk z^!p6WaL&l~d*J*=QoO?caV*HY{E^pdz*au_Czg#q&W%iweuv~?T6*QEpgyw?dvJ zV!0Q;N!Qyx+k+%_ZQ0q&f9#My3H&SnIjyHFruqnNHp$6KV`<;v%6@oskQDo(}R>gLAw4iU+RFgI&V+WwzbEBWC`brGzox4ZtN-fFI!K_o( zFlqBl#xb9aZN{6|=Q}`jLCx+D=)I6)1+X<{{1N&jPu*faXakrVr*WR|S=8?xK{wmKgR4^2zLWel{^io@Hbv34OO!o0e{&#Xh|C$JM+fB$C zrdS8=2#l(dx~t>{BPIJ?2`p*DB5o}_5mw1zJt0wA?~D_#Efwg0afsB{>BDkvz;b#m z(WcK4d<}{)UjADs(=br5h1{c|%MHOK|96WIJnehVpd%_7^>xtzNk6pQ!JCz7A{Vnu4p(l~kFII| zGr>Cl03Q-?8E^T-RKISB5>li*D~_P02ccC<#5hb!T1m%NWvjb>W1;5hT&-JK7BiZ>=N~(*JMujg>n+(dUjK#?z!_aX(~h$0s4vsjx{11L7t;har!^KmR^>~didA?mi45mu zkvCDc_?a@g1Q;d`%5&k=H_Pl^9p=AfUw!6-=`em(nsM@@7uZ2aPbw=Qim>*_=sMUm zGh0$MhxF5Gct<-E2fdSZ=33_)cv*mINZ|$9>OUHWE8Outo!dm3G>leldOP5*dfkB7 z{J%8zR?EkKXzY>NjfA?uZVgl73hlh;^-DJT&vJ&AZ{7!G_M&97nu2p{NLby<9yoq} zThyQ0v3v6DMJejs|LKhTK_h6w{!gz7Wsd{9-|R3dY%`OX`Wj#fJ5UdKdS-+Q?YX)M zj^tccD`L_IJkQln0ULwi+7fq;!*Yx5Nw{sti8cfD`ZyJyYz4S!P7w|+IhFQljZxSa z3SHOdDUj&&RaVFPGWIxf+EvD7T5ey6uOqSi{5x* zu;zt$56If*3M#FX^~?1H`&`wf0%IJ~+f$d<8RF?J)7v;A1JWe#o&EkTCnZ3_T!sI2 zDn9)knV=2#GF83W6>WjXy{ktwnELoq#d(DHlHrZHcd(avp*2PxKE?A}1}&?rwQ0XX z>ot!(p@{J5Y*a~3X>G{S%j*8LpOyOM@1rhJ{s~pQ>6jYCUPf!qObYbG#O2@NUR{mA z-C=p}g0(#A13T6K$vzmc+e!hhChi6SZ3eO8{0 z4An5)>e^N6r@kq0nR2)N8cyK_rI8R9B#3(Q{=*Fy^GWl~ROmt94?mXEwhbGphF=;r z&Cjo3F+gG4)avQdREBIIC^NFoy``4bw*AV)#AUVhcKPxftdQoLLS&pP_4>wN+$#QH z9OYJJg(T6BTHN}_;bpPog;SG(5$N?;T3T1*&f2cC=AiB=bx1fSZYLOu2;I#8uoA65 zrWT?X1)8barA91vdu83B=92B<_Hc3K&mO?5unrG$dX}o}C|K=Y^i3w8T7VU}Qp%0O zbI-eudydNNFK;fin!Y9zVxD>j5dn-tukeSOf@>2I^9^mSzHd<30%8gr-Bsx(Y$n(h z*6nUU63mp8Fs$p<^J76H!`9msXQ@` znZ(XKD3(Eiv(B0Y=O=lEH(smG09EiWnjL2O`}u5$Qx{oz>xQHDITZ^B&k!VOH^tD7eta6O~KMdZb5f*h|k9&jny%bcAOuF7$t>;8OL%G%H0{A{?y0+yWQ zIsP75yZtSMr`dcYP_R#;4jMvj-gGh1^jt6_&hmsufNQT(1tFb`p`>x@%o3wqyctAO zZrpQpzw!Q_Hwj#33|H+JS&Y7Qbiej@u#PJ6N9l~oANi^$lWF)w)4=%{oM0U{>3Mcx zF;cdmu&}Na1LU49Q+pyehyQ%2;(ckZBN|YsV8y~N1EASiG%+q2&M{PH#x81YZ-e=P z(n@4fudl|vlw+I&6)OsUwUnqcADOZl8`Dv@5N_`L>Z5s1yLZbF+Hmh`+B;b3lH-3( zEAc+2&Y^~33uOj;xy*Nl;~&Ja^q_yf3e}6CB3jYeinmm`j&giT+ctyG@r*mgTI~(ZI=K-gDRJ@IREBd9&i|;yWcc9PxWpZb1 z73Qeh1)=BTgx+)qv_y)c5k}}7*DpSQiU)wV6ZD*&r@$}gxv#;^Xpea9V!@6sG~!#q zcdOuum1bCq#P@e3%cUDPK@`@FT<4WThB7gqo%T!+544b9g?7BR6{!<681IX6jtxD0d(!5Kk{8Zqp?gl%1*@!UdPE{SxEN=IFc zb;f#t$8HjYyPpoK%h9fLWmzU+BVv8(m$YGXdOkGOdFDSea!q6z>Rurp5!LZ7 zKU9LeSU4mff+UU-GM9K5CHMSlX{XNGBtlqu@x-@>;|OP2bR6h*^g>8>louoO4)S7&>wU~q;yeMMH$-}P zO>G9HhGz6}-xylialKEqG$VTUlWH$z%H-xY+4vLL;R;TD)PmXwY%iQ|u~keug#0So zZ30;@Csn(>!`kz9B4poxgw>DK5l>fibIGzlbU8`RId6yMxH>pP&B7hlx8>Z3*_m{A zsK}Xh?j+e?J6ouCM7B9WQsnrR_hLu&^B{a zdTn5|4k{T49;LfpmQ=Db#sjpEnb)eSY1>a0g>}N50v}nZ6`G(3er`MxBjH}-CoYuR zoeR5Rk3vvL6K~W$0T;V*aSZqBR+(v9q3UONno`-^{p-{3;z}VWb5A%nx({=Y9K-c0WeJyW#tr*^=TE z_`C3_HG(Da=@8>bN75)S_-^C7+4|4tA1fym$fJj&dI=t`Gwoj%~8CJ6Kg3KzV7?UPWjmaH+rGw(6Y7b=^(=*Y{sZe}yA{|Gfx?iRsNfj4q&frQdM0!uNf*Qa4yrYV-UZK>Be(2!{774QgG2`bh;|GQGr_woZ zpS>n{OIPHRIdqps2-e+70?;RY|_g^#@LANoMOCA6lt8o z;lP3~0)F4cEdD6D-4~vDXDP6U&v(Y=bqj=_!oZ+CENk{(D z0g5`a353zap7;B!_Vhe-_r!2DJ>85F8QY~41umZ3bsht4QB5^soKeUosBA0k%j@a{ ze2kzKOI5lp5(MZTLyj07nF7%Jp6#AowI-@`VC(mT=Lj(DzyNeO#I%tle0k5{ zs_Q#D#OV2b$8D%sSvTe&(dLQTQRchN`!O|lRv#%k8-#rB`_!yfgD6*_;-bm}!e#Gi z?U?q&BwOl)JxavZ%#d6DUHbg{`47!@8*iO3yV29au92EQ@*51>w-bU_F41c}d#nIG zrE4A)QwHGXzd2HMmw{*0us@0OWJ)5fki#|=Z%naKj%nTjCU55GH5L7Z_BN$4DSL>< z=bMqF(4o3t*`eR)97kneZ+^dSh%U_29*P>PZ|@ocUpm1hC1!-5;ZObw)@dz%tAO8O z4*Tvz9EC_{y)P-N^-kcY#l~>3a6qDcw&)QarcNnmFb-awD9dg7ZQfp8!Xb}g(%}!Y zEL8-Rt`cg=-?l=f*mUi9rqnBO_{AN)bfZJ14OF2zT9b|D@tb)J5PudNv40+PO&bM} z$Wd@6?bMkgg%E4(;HFZj16v4pq$!^QzgWTL8%(0JGEC`{|-^};O;(AUBnC`YP|A! z8Vw(;l)C%{Fq(g~M=Z)Fz*TeBx^NIQrbj%~)43vq3HR37J9(7QwNWJwF(~+BnIn@U zZh*RgJun1FYh{NWeIQ@w5FK6ZyFAFufA!I*gd><+Tik|ewe|8N)CMyxod*<8_#QWf zvg3SsgXv@ zL$@gSo4)V#y;tbxUg(NNhfW|q&uMP|z*tj!x@-Ms0#CU)bke*ADY(aK2Q=+4xAouB z(AiynLKzL+-0QMlBETh46DjMu=PAD_Ue117$ZNP1D6^|Y&TYd>LEYlxh25t8v_i;HLbaUrdKl z9r7%jaP`1Q?ogM~sZV~c$3>G}*c@LQZNH!3Cky!1P#VIBtC?=;6Vx#uuAhPt<^Z79|EED!KuXVo; zke{=~6J?bJqYlYXz+v@9&-y#L> zQEIKWsK(R}r#r{ezSe#lPhze6G8F7+ASwcL)$R||X&5pj81}X;gyS|a<%$+D>>iMA z${NGt-lxi^QmHArz`c{9?%m2R7M9oehRj4~_OU+bGVfCeJ@q6s_yXMe5?1C+@WZ=A zoyL}6dnGw?jT4(Dk08h?VTlzAq-vK{rhV65^!EozCzY+{o9M;LgjOIbaM zr%FZ~r@GDRGM@f&y-jMvv$U}EcVR57&r@>KlIW}p^M%V^$)t;(pX3U~A`x6dr8$vK zvBj4KMgv4>Cl&qqUM0lz;iB91$4KJ2(pi}BW2VW)!J~~7HQ%_hy!^tFYc^e6Q;&ip z`YGSyD&y1pa04eew-`<|gC~H$8Ct2_bRF8z--cy|BqBmRAlx4X1r}v|YwM1)ARce^ z);(C(gLw6RSzeHF^J1b8#Nqq%Z_%Cnjg_6fpoEy6-{WScmeu9QD8J;OBgsAu8{-HZ zex><-KoPu6r}2WhBZfeEG6FQX9wBv@aG>1AK@7M^5VxFV3o{<~z$?2FrlG*!=F>=F zMYJ&?8@B5&%Rc%6%|;|Hi)34@)GFJz7HknS8cwh{k`PcO$5C7LCA!2`m*QeM*bF;li8n8yuC6iZzn<6 z`4?*f#-2)oBaic=j5CDL7%Xre+fb>jtKP2nSdh~JZ;gI=AR=&h6fgJ3-5`O!PlHY^ z%~T@b?sOvue@*Wn)nwbqcQf8(z7+=;dE+zxF8M#7%7KkpSH4RSlgpU#Qt2}_8!-=h zj$lXsMW^%6K5RiMXo*{W(K@%V&*;*! zSvy$6!wItfr@m(nyZU8!K?hK|+ytBj=~R49%zB1a66?ngbYxeO zY`k3Xc~wgQ@fHbnr_##g+uXJKy9&;&I3=L-E9j{so8C6*BCE^K=_PEYjaU~I6Q)MU z*#oV}Wvpaa*R1#>2P_g{YX;}&By)a7vB6KWbg~19qNF^*Jp--K`mfh3z;p&la(fs; z8=_|CTKt!f{EIV$hTPNH27a1>-y*rxA-^`w z7;F!t(RGX_n+upWZ_}9?@wjO!y-33K`ufMz{cX|V@%)jI!%s?6gXD;Cq(XL?l~)GB z6?FQ;6h$0&Mf~Oy%QZ@k)_{F5msPtc_vti?99O3&UglQ4G6uF7=Sp)Yn*{fXXryEZ z6uZ=L8QPb4GD}=1Y#>cV*V-IL?8Bv8yu8X=w>e^NuT*UxyP4@<2YnZ-iI@l>sVm4a zMk_v=pZp!ER8}%9i9Jn6hj`%l_;=c7nht;~5{g+!$f~F!WFU6yK|#lP^>95~v0D;m zf=5EC>`-l!9`w=2y%m)UL5^=_$ETzrVL$iY)DRh8B(dq#>3F`fvb}AJG9zNnv2_jw z%pEE6@e@Ly@l*0HRI;}=nazcXufM-|O~j_gHNCO=?}`_`^`~#CxVqFFSbP z%mg@F)>q@u8~i+jb1XU%P}+_38>z}}lI)JKjar_U;vS#spdyfa9nW$v@%79!glQN- z0I4>=NW4Qh83-1m>!)MM_!OF2(IP-kT;%_4`dxc(?AM01+YWcy%dyFpD@fHP9FCSL zn3GlB8S0Fpqx*JKaE_?1`~F1)_vpd%ZfIb*^~Z&v{L5}<#{-T!7T(n99ENukQ&QSz zVcoO|Q2Jv$VIA>!4eaUr(U-0s1}8|;77Po=pQ@pkn;WGp1mzCE3+Vfx|vvYK#n9U*ugy`rEkAxq@N1Qh)+sZNn90O?#F>522b zF@GTc?ni5LAvF>W4UI)~{wobUg-;he8LiurP@7^{e|3rb%LBakd<$IuG>$Z<^C-sV zHEH-hZnYR9RQNz+01r=|I*&-@aP2+_C+XLIdPQ2(@ugLDBe<_=+9d+;Ta8}G40KF8xwe6bjO zI~yb`Epm){?q!$26F8hHvF9xPgCt)`Hr=E7g1RsAUi?8J3K$6J0Qnp|^|Qq$0(Fhu zDjn{@pn-6J5qUS)C|qb=jLCcL{=eQwzuMQp$fe>juEf2N>%%(pLqr%|N{0d$NGG4l zZY#J9dCuOl$%RY?5PnG8?#pCNpW|FXn^a-i!g<9g zNU0??x{1j!PuZe81FuIn>z~#Pd=N=$d%`*O7 zPbO!>52ga`Mx&DLK9^PWyY#*OmMhIqAS5XB^WnyJ*O@k#P4C_o6?5AKlNZ_QfyBwD z*V{NOIjGm}!are7dt^rdfkVrTTdwgg1d3+MEf257p~8}4;v-@39elD{o2$br%GL8k za^>7zH|RV=%XUuEslT(h|3Txc;JYu5cNkou_dChJC*&yGqIA;hsvlYS`@s zEjRosZYan$M!-g}nZ84sb4{!kE~*+jn8ogz;y&PMVALqUEpJ*+pUZ8b6UzcN1EHry z=h4J+%7OC1y^q{PC@U8ShQ4Y)nhsjm`^~rcboYHPO^5SyLL_PXSud!2{5YY{$~~cm z>DB$6jGCm;Ven`ZjY7fo?}p>hI_n6R^%XFD8u?UyxDex<8R@Yn>smS!$O^w|m|XaZ zEJnHmXo$>o1PoXEwz>!ee(K6EsNa*@h%!Z-6bSL%G22E;j`P#?y18aY|>4!ca2B_61O)<7m&qo3>+q8vRb+Be4d<# zQCaKV23(&{b*f|qSLHhu6a>Gw@LH1NF?FGR;*&Z`x6c(T&D9TfeB(@<_)~)Ys#N0v z-<&r|JtlMf^Q-!v)@id4qPp<{qG6&3bMMZAFR29uEz`#$=T~xc`sUWbe!L|(Lkmf|y_BRmPPzYzAnT5C?;J z5ACVRBgd$FHOo0FetNn4;=*o`XwFfHdDRnH$Xz?NhY5-v_8D5PT+X`IZ$DPb+i*vn zk!w`)5 zLZi8x4N|j1}y#4egofwW-1o&k6ZbXPkNj;Nw!p?IB*Uu_aCh< zSOg3^UJYj%tka!uiVF`OJ-T-0htORVqnzEn9)R`m)gClr13!o4c0T#ZbO4@(u0g$+ zVx|Q5dz5Na>3S`&xm8Ey`cSX4mi_DT5wmYuWv~{&aqaO z2Xuj(IeP@~yo$B8S>?P9W4(S$`*_sz>Zv?%%`zeR&*=%ZFQ+jy7Ba>!{tR&>haZAO zM30kri2kD$&Mi67c${ zWM(7q5y8BXYP4S`YJ4wFiM%uoJ?;CjqbW@Qgv`SWdNdZIzAH~(w4yL?l zG~KF{*yemmW^W4$chu?I^@Wk`wJDhV@`HX5%X+{)06GUda|{@rFG29Jri(XCM9PuD z?nk6&#^XQFD)57VEN{HN#^do2p3f8GeP@!1a;5A-v3T%zPK}Q*#+H&b7MO|~>wxa9 z50zlzAl5+^!qFs4^GvYfnCcE$RAtA;r8Uv zaj@gYC)!z&R50y75d+VSj<0pS8Ks^$qNnkNLWQZ*n%XvyU59R{)yd`2Z zGh11&doYWc-4dkw%=Gcc;1K_B`X*rw2WoNl{w#}Quyh^a*U8%83io=g8V+i@5p8te zk)e4{VKw_q1A)H+G<|7tbT88Qh9DMn3XrrD@6-HcEIqB4PBn*MC@YiTvd4fm8Rxad z8kI-TG;?xg;xd4CZc9*W6#m@{jBnbXj{7Xh$R|WeXbYTSRl|Z8TG(Ty*Y{iofjGd@j4>hWcA(^#VoXu0z*O-u!ghIgXqW#uc^OEEm}ajpoxvk{ z=U8ydv&RwO5;uNVQPdO2GM*T`nwx*pe@St9Jfzl{+I}V0`J~AM!n|6_AVnPOXF<}Y zFfNeRozLWHj}@W^NND#yLko#9!(>uOw*o;_Wx0@c-?x!IPGnnp4jxaR*H%e9{wnnJ ze`|Cif&=$b9$#ngw!d;3M1?W)UNDU}B_&Pk6WOpX`%ty8Z!>7QF`4U0v=D{MVC@-T z^~5S=s@RIOvh`kl0`f(At)AV!8l-!t(lT{isx9_aNXSi~V^K*fi*-A~8AU<~j+wn$ zvJGS=DY_L>#xH(9DU(i6)%qiUp&C3|(n4$sLZoSXqtgroU47;(n0`Oty#GsQAUyD~ z?qmIZsD@qBteHmCI)?i_Vc;8V4(`U!#hB_g8j}2P!_4F7S_&Juaxg9E%W7Qfeqr1d zkaglxg6GftR`^XbJVL+*%{LE6f*R*91%|T(hsHQ5c-wbw$A4sm8$7YT8dU-i;fkNw z3d0gyoj*Dk=pm>jPpyK)7z0J#8BV35t`7nP!MnoSP+Sf;x) zDzTe9*)Q+l3rr?Iy~3}V$I-7>+>3S&OUWKNPAX;mnPMCqn?(RznW178(n7UMbiZhY zR|Unsp?6;(`xEowm*Fj;k*bQ-JWM8TEj2roV2p7txNht9AoZ8sCHob{p1-?F^|y(I z95lq+dabV87R7ew!ygs%pft2a9X~GGgoVN?%W*neLze0ehPOCy`Nypm`TBA{JI8HL z%YWHdLm=c)fu(n+1*y9*_gsaMvqRSk1StIu&>&9x*4LRn$Z{OhHG~wKbi5!I50T9( z2i_JoOwBx<+izfyTu;Yo54*pC+qPNt1)XLv)%bAtBkaW?3}+= ziMOEs;lOx8#*jexNS_Y7JvAZ^y(yh^ z>VK&7bL#B%q$)bGYpbWQnG0j3UTT;9aST%bAil#~cBz;wXJC-s(0qV~OG$z7UZ>q5 z!1TCT`O@gB;e+~nsGm1byY|Y$ZE+xh;0qjgN0X7szpqxmQu95-+j3MN)$C4l>nwHf zKkv%F_o0tb1-tP7^kp8FPs0Jja)R~$=Xd_MpY!K4Txq9yNN}6s|NpB<%Ri{Y%W7^k z259{sZ|J}M8hs42ke`otQXmk>;`VfeAhRiDrBmuhi7nO^VmQ!zI$rlX^iB)7QCDPN|KFIvGPjWcQ`WYlQRo0({`@o3Q@ z{HE`Hs5qZLRQ{mTGQA9PoIcfQ$-NCkBPx+>v{mct6JeRlMIxhy0xu@&r_Zt+))oi8 zGQTX=S?9tuvjaa{+RCIi`dD^T{s(1m!4+56wCg5BNN@=54#66C=-}?|?!nyy32wpN z-66O(PVnFaw?>0IG;mg)SN7QB+vDtWen79bnse5yy6(H!^+rXA!Er+q<-`nhZ1gfv zQ;YH+L}pp%yWQ%S>a0NT6>zO?9lkQ=ne?umXRF-E%5JZJOU#J6FP6h`&GnJo+Zq^> zJOLtlaJQAKEUT!vlvyl0J}EuAw#8Mr**?ordw}*N5`I1KK6X9;7%;nP zGFY~2;HlLCJD?XVRdH@p$QC?k1;doabm|pX+;)jrf+X*@w*B|Tl1pCQEEzOdO;e{0 zR=Tg1?tnDW9vS6ud~*`BzA?fZ?a~B|l`pGaQ+)JS2INvWZoD8^57S7hfz4o0JGRSl z1q$CSn(KlPyh(0-zSnd9{=wuRiL<&$AtD5qdeg5!`gkysxH6j0E&O1*E_oQ3b^#b< zP9hw`6Z_Q|$vd9Lj$dEdGy%2BMtecPMW{GGA%@@Q_@I}YnU$%gr^0Jz?J9OpcLS0!}7gLgedd5AI~(E?6vNMmtXR%@#y;;sc{hf&^It3 zxhVPjSChfG&u%}clHKRw_}27wpf4ECxdMI1< z&8Fx?pMkvpT#kfWCsM_>i$EJ%a(hraG2Vis%={zEg)3U)E32TP0Dzo&PD<^t^_;o!?zyo3fyCEtCWjHNj3evU9GZ2~GA0%tSv zMy9212`9hq9E9a?8myMq1ICd-AN>avc=UYcYX*-t46C~b zL4WqM{qh7KHZM}r3O+z^#W(#6J{?O$;TKjl!**LKwS$#&C!5^uA8*zv2>nfP9;W{i z7i6qr62Y%-E&b%9~^KakaZSZ@%Pl5H`Phu0GMDrZrx(*B#Tgp)EJRA^A z8)I_+MTgaSc}8GQ{mCVtHTk&1q>GCUJoM!V!TLuWmNl)n%NH}WwD8CNdjYup8SI&C zC(PrhyoNvQ5Ublc?N%%PqQ$ozAN8CL)NpnMKu@&xKFJhEZ4<9&KY|VB4GbJ$@kP%3 zo1SU1e)?C2MmsBGgaUY{EvZQUf?DBr#pTPQqn;f&G+qOq0+|=-+3nnS^}9f%)U6ov zNF7z@JXSWWPb~wHQpOl1v-I=r5U1@m!^D}lQ`^!0mQ|j6S9}p+@sr;nSQVW!{tXdo z&#?q2WG?8-Npn)*$+jZ8qX0bvW6ve46tHbbv#+I-WrIKGgP4QlS`303(7~~6HcxnYpI!NCeeVibg*`dSE6>4Tc!GU!uJsw&PEMv zK0g)J6cux%Yae3jUsQ=0h%KfXjH|l3x`wyO*xybYsABId#@4J$CBp57B>Nd0aapb5 zW^MUb1Pg!lm*)i}w&}DEmQj4j!`W6PgXlFoEogIS@z=j4GQG}A4|r(Z@+g5Vku)P7 z*tR60Zllww$bpA#)AHO(i6<3`s80_DlW=oYS|0>Zo#Ievvyno%(c6RAdI?2 z9~g0S@#SrY-V3u-djj~M6I`Hb>7rX=mR$N)m+$YAUvNLn3eovflc4)cj=Jpk{5nnB zlU%*s6%%qi8>aUH6}a)izKf_?_~8BtH0$w2eV$hH{J_Z8C8gMs{8QMQ4yt4ij@yPN zO4w^s#1;A9m#VLxW>g^pYNqjN>V)WP3r9M!mqi-Sx?#b zp9d_oJ$r4IYGRGLdxGvcS4IoN_o$d~+aqc=EHsI5aK1vW5Cc4NS zkjl+(cW*OQ1d`i6wRu1Tn+*L6m>J7 zQN|be+H`Wf;juBbNRyhI1xM>o&rfvI-67+k7yio)E5x90Jlsw~6^zTZsXlIK$i>!ViJgAUYHjbdZ|l-J2T zu9}*DTbWI>BMymu0nM+BU71(jYDPF1{Z`EG^quvz$SO29;$MfeWxY*pZ;$SL)D4xP z9w;91hkaPVSs8w~t(!2e*cP_o!W?*i-S<(&r8e@-xogIoSN5-a6liaR244B)5Tnk& zgeSe0o;VgzHB(E}zPhp&NMaU`h^KTqk@Cnnpo7j3r$CeY(VU#`O(0^!Qp_{| zwZWs3NB@LhmAMGs)$zm08!yQDM^4|SSDS@xOLc!tch4e+DlJ zd&YKhrLu}$Pz@)wXG`x|SpoJ&iLW4DudTUWMNhC`U7Zpr6rR)yR!G>x_xHy@EC+f% zK>YCPfkD@2CLlDXo~I2BbJ(L@3j7D(3Fb=8C%99bYmb9@kX&XV^nNJ2c}1B$Yhc7# zOf1L^tJCGq>1uNXlzLDEX|7HMKZ&#jf&xLuLg=C|^OcpK_A(g5(?cCEyI* zfB!i6m#fj(e2VCKJTSVw;0u2Up`e&J36kUmKPle{e%zQ)koUjrI4Sq}Ea-!<=6uPp zLD_Vp2f63XzTs8S@b);{cAHQ*9%J+4W|KetxzRhDu4&VD2}7J^s_0ntToX7@>{B-9 zhu(#rdG2<&i><_m1l4U!G!gfBl4Tw`6ft=Z*D8J3Tu`sGchUg2-i*d`QDer*)F-*Nsu@u@a|r+P)O}>_sY;LPOp8w8xaG<%KFaIV zAf{vjjWCr`P6CH_Ue~ekTEal7IAYu8|t?cLNH)fWhYshsSm?uo!55zNso5?#C>QO~dX*c8MV9Y^c-KLhnJ zJi)@jSVdz@W52`M{E$3X)O{jaMVlNSH;TqFv$f}3Vt*^FI-h=oCevKq2m=Qcs%c4T zj@j7*z&k|^ayz6j+Z|71!)5K$6iQ>&db_LqxTJ7g=p+ryNJ^_0>rM1}g5t0|Ta>V*plbAD>7qRqm=#m#IUjVR4xbpf)T(c?QE$w9xKjr4tg^j3 zcl4myUo0$7gBbeFU{Sh{W=1b4`@{gxNLGQ%tS64U@n)s<1SP3cVqK&cdWuETYXD#VmOv2#%R5bZV2d;ck?RQV~;*BK2u5C-wrb@C&U zI43!LJC{46IW7!fNoT4)6~KZ7vZU{+4#^k+QR9@GOuxqg^#oDv+kTG}7_pUD{3+s#U|B!EHAGpO4UpPNJjN5H z)3DsMF3oxx>54Kz9^C!s<~eho-@hgQj-(f;SFS5r zpf;j+vsT_{bbcJ6m*DT}@|_gc!D^ef86Ec_mxBtOMg#zqtV;{`J3~k(3Ex>Ab=@B? zcE~(#!zXv+TiCl|Iyh;I*AUX#HEa3|cHETwI43$Ng6P|a&kyF@wTz7Dha+UvKtH9C z7}Q5c{VC``J`=@e(99V_0QNwVvJ=^i5~eRmD;^_YU6?IEZaST zlgNb?`qHU)j7zL?{+!n>QXAy}LG-C*L@j@rZ`vzU;p<4?@dD!z5#^t{EtZ(}%X^yx zMusZZYs8P4NAy-))%Ptce9xnGY9u}>yjGmYib>(UP8u=GOz}-$hx>3-+~J!0tN}U*!TB)JW++GNChAzp*w4-NFy)i|HH=V}|%ynQ)aT0%6 zER*^2!kM(#E)a2CEY6MJ>g_6JDoBMzA>HeK z?4K1UvS=6FL<%IU=sO;CQ1Ck@qYE5!h^cSkIeE8uxD4x~h_c`J6FkrzVvoVXR%w9& z1|lH7y^C+up&OX4fohS{oLM!m^P|+c)u0 zUwmmJ=7se$sEJTHg_{j5B~Wl7%saoKnJf^7LFbU=bB)JAM3Pnjlwb}h#* zo>y{lz3!qwwSvn1;@_Nxboak3p#&gh`aIEQ$tklOytVNq(xG_Nnf`|E9==j{ z)4|Xu^t(Z^&ZUzUohDQ{6$_ucLTzMs$pX`J=lJA5e}XlUj79?3A-V5ha&qiAoTcq# zM}ZjM9jRz>>V@-8)_H%{ek*|TUO4skU*?-S0PAI-g}}dPCF2eZ+w;$3(i0T_gSUYc#0rCSspD8q)2PcVht@)KNDybL67v!x(UM)FkHd2y*^c(uw zG4Juok2-Qqr|#6?Z=w@if=iRMmTQ93i<~>YM znJq!5n9;jgNuTi;&+D}YPA92y>#3EyF5kteggL|bn|cN4ki8xMx9Y{`y3gbdC)Q`C#XPPMmw+oB@;A}!dNL+xNy{NTRh0pVvUCp3 zvF7aeQMtZn@@)P}BFhC)>6~dJYr|Xapk~Ro#lpg4k(5JRgJH#mEnh}A$|<>>eurKs z-sQOQPg<(tB#1Xn1@lDV6ulz25`g3L$&C>Nr`X94@fFyD*zR%p?%!*IfVgAFxs;F@ z1Vqh|xd);XQaQ0vbg}Z&*L4hPf1{fb(*Lz41g9k-7J%Gp*Z`X|N_tKv5K#3SNDn%0 zlEng<1Pdf*nRPfdX2~^dS;mp`4txP~wiYv`poC_#v|T@s`|Y?XAGS?~e8`YI zh+S8j7%{4x_Q3uDzviO0{)@F2Z#oBs#X-{i9G~4G=Shx>H!Se>;I$P@qSD1e6P?I& z&&V1xK@L_Pc)s&*El867&*UHT@O%MmH92kQuatf@f1F}Xi?5>5JvyZd>B~GgOURyQ zd5X)kAxCkb97y%(GAhJmhlrjVdsr)bAnL87XiQB(Ym@ZF$)r4Ys9W-Q^zQ<$LDmoG3Da42+e@99b;7-;GDL7n=j{ytmJkA>yN~xRc2ZBe_fs)&m4V5bcNQprxWa+u(9CKGX_`JXZ&nn60TT2IQ>sYT&(~FVw_4f;$y4oqPn9Yhdan%KS2o+S6 zCYbLK+W`GQGIOm|k-y|H>bbxcmA~?nm&-}j?{gi{dq6Sd_mOCDNH^u2#_E*?KIDx3 z9n-i^FF%NWDR*Z)yP7d^pUjMGQ;NCM9xd)Vzh+5JyQ-c^=;Ff0ASs>h4v0^oL$ZE} zG$wSWoi>_x{-qw-ms_CF3}q>iH{(Vg^S-+FY5-zy(yCCsS>Y&GG|7S=>mnNTO- z2bU{eW6lUzOL4zlF8_w(#8PJUWdgUGrLc7_q&-mf!9fa-HOc&*}4dU&1 z7z_ao$V&~UVBFjLuE*>7xgf4JPo4PYPt+?}ZUs7%`YYa+jZKN{eN)cmU)eL4>PlO& zi~iX)fl89b@4oW%SMENM@Xc!lz6}v-ts%0Q1|QWl8ybDq#3M_4YwvXx063^+c{!wF4)F2ND!k zT#zlZGom^=U6iHIFAe?TDRp2p%swc3WPS|K6Rd_h2qo;aN^O=QZ?^ngIUO==`@j?vQ3?`Xt{>YRqCI^*h;j?Ypc<4$_~@je8{ zeiZz_f^>?C+2oxwa)6^=)SC?(`_9;@o6i<}4W{1QBUil1KYh_Mw37B|_^V5;4jk!1 zby-l~OC696(WFstyQW!ouGPf9CDD0F-=CvZ(ESR&TR=;~_R(=1&%#Or)V})i=`d3@ z(1WGm<+~rR8AUF!69r52HZ(0q#gQV)hUtz>U=#ijp|O;^ASS!z6mjrr+_?<7-)5WZ zexBFmUg=f|Va!T=JdwFooR4y1FgAPSn8+rR&OI8tzv_Vt69bZ}%fel1PjAvI2uxi9 z4vj7A_0rfc%I0VC$+R5&bMyq*aN|*PpG~sM za+nu&;PhleJV8(p$puPXHXCGmBP-%b8xQy59}xa*3SFVpvgt1bZ!C^PR5qA=%bE#! zJe3JrCeV-xd65qH2j3mTU7At`iPMuDn;DOPIm*e&DNVQP3s^(h)waU8R!TS-(WuG( zyPLC7@eozhy)v=X@?G(JlY6}4j68+si(7xO;q(9xN5SU|=h<$}0(ofrQkUWRGdwbJ z1h3}FTr!M>I;{^Pb zc#6n+;I(iw7u!i$Deg(w4b7>X+)WkIz$o&o42fci&5eRQfq(6z2OZo*7di_b|B6-M z4)*Uzy|m$qXR0vy?j<=eopVwPLjfjSzbS_z^gU*p}64=kR^`mqy^5+^8?qIzxT)f2aFcf@H&H*(k?A!RB&g; zO|O12i)Yh-XZLB+Xzk=T>aJ+;pLzsQNM!%{ z$ny=G7>?}MwkEZXix+WJY6{TJXK-g;!3}P|1*3;-4A8g;RzgQY$$lL_a(;TNsGm-8u%fI_q|=v z+sni1MogS;3!CV)7vLNAn-0Y8%CCwGceQ_Npj{wH;^`|Mn`2!Lqh;=^w(_chJYZy| zdCNu0<@SulCCeTd;`DVp86hJR8bapZf#$ybu-1XO04+IF^ivkPc!`~6-h)h13Ww0g z+9x~|!g{l~rkfw5|41PVi#yu2YrYK5i(x61L~`*$*!*xh@k{wnoFB?@h@?@|5+MWW zlpcX2-_f?p?2IH&(dXSsREX-&N4^tC@zxhdB*^h1*x1OTw(K>e9=B{kE7z_aN+OTl zznf8lDropaeuA4&J_rx`2Oys{%5y@^kTR^ByBNNW^qQc6g^RP##J&ozR@Nk0uaO?@ z3>07B8!*{DK15k}Z;CDIiD2){Kce5pB3K^Rcp5?Fm1coF2yLq-)b7#?flSFNE_x8%OrRLr>3t|#R!jEO{;|nH(=Zb3>|jO zng-u#&TBn~KF~-3V5sBo!mHx+jJRCWL5`+jW;YF~PJaah9VXwC_!9#D)pWZ(oPtqarLb8vdaWD8kV>B2%%Cq&X}$^?o(_E@ zCGFOJvso=@9CYy*!8B*)^DdQZ*vdB-?-?VJzBJ_Wv6JVdUtdjT5QaSwBdWid?f8^5 znp~PvoXPKzDYN}$LI&9V&;G8d(R`rNbH={sT~S9Ic26+S!4}7mt9K%c{mx}iD9#d&Zy=&u0R<$5 zc`KCA0h7HezCSQZKv~d!zPaKF`s2lXVtOAEPqXWZ_Rk%1dng0w*$A>(9DlBm!KUBx zI=^aKvp%oHE$9qJg_yK1_Qrr#fel|Vg9;iPRR>I1^l2GGEY%H>xoKYy7ILoY*IaiX z-`+$7>t>2x(8jflzv#IEpV#DlbmGLMAj+Y=-M+v(n4f>rFW$1JVVU>~I4i{pa}mQE zH@SlPY<~wD_7BBP4xs*7GtlXY#QSYn^!fbX$nWdxwHv+z+}GoqOgj~$vRA8@sI$B1 zGKBGSCqGGzZUiqCI^GGBoU5w)zUl9};#w;psjLj^kH7)a5i{jlDo2COWV`onr)vP2 zy;$}Qm*kC1#w$ZIRX8qoHX^}U8=FIX4{@MJH>5xZD*9W3iCg;`!(Y(M90xzoABido zMaIbpOFZkax{W7(97Ya|cx~~~VBAIj9OoN&C-tkKyt~}iMz0Iw=+EL#Se)0NAsYOM z-eyOzLscJ~H<;mgC)eMH#U`osmC%kbH27-rWI zPH`3xIE;0u7VLwZD8XPedZO3nzW?PK;As^t4Clx1fPTu~;oSp!h4Tg~C;qnqQuqRm z{~)!r@$cnR=H+So|AT7lSFjVRE#HN$j6ZH{hiB@6Ify=I1!tACN+p zmEPWVLdB?T_E00&_WROdCq7K+oq{qTrwO}6(4s>;94^oQ3p7F31vkS2Y#orkJ* zfg9+_%}_gz%)wt#^T&zQfu8|O4?^Glm{LJnb_*BvE`rHFU*5&feYFBK`z{y=ZNzzJ z?CahPha2;2DdK?x%?}?Eba#k@6UQ$GpJ3q1kDdt;73yjCSv{P1oudZ8L0atZHhmRM?X4 zdqhq8`;W&#nIwAg-{{~2VYKjf^=ZP|L``s@S7X#dEkeWZh;~VX+6sWcEpM#Rd4i&c8@M^>~J0xi3 zEiljn>!Kd$-#Hnc$IgfpW7Ve*v5n>z{5=Uyfm;9uoGv9yDkA=RB~Z;egpfOH>Him&i^Us$g@$Mqfdev$EA@cZdJLc0uT&kyIv{?W|kgom}C zMTqf(S*K6C^D)Y^c79NNq{2bwi};@gzs~50u6~&XFM}z$(*-BvPU3~ORTr^kX-e(C z!zd4_gw7G;!C}iM@400Fo%a?gux@H;MiGS$mKom2e-Heu4G@)v6T)4z4!AzY_XN+K z7A%lrr-)?10fQVw5)lB>Pt^d%Kq-O6T z19n43)_c4c`A$muPuYd%$F>HdRfkCZhz zf+=Td`QO;MmABs|z5iBehXi-+%la1Op_lx$Qm9LN-Nz`@J=v zE-F*l5*7&qP5L76Oi5sn5U_S+ zAm!P!2{s=8m!xHWhY2{xmZu*(bSr2#`m|_wI*AQ)QKX(9O_Kl8Mz1-EuWT@5Fu)F& zAveV0F72P+vHSl)$1=m&z5*MO{NHfh|Ba3XaAtMD6xPFb=f5BF|Ni$jHmPkj;9lT}|;dHKe&iZbiOt+6J(s=u7=VuO7(9X*S}A3H3) z5muxB`5V_W!yP>i+l0ilwr2^bg=DSFMmHR)-nAm%$5sT&m&q-?eU-Ixo425+QGGy1 zKU=jprk%0cL&e-#%jbP<-Q`O4kD|Ji#Z8@k{R@E-7_W5_-Ow|DB&`lF}X!N9n z_2OJr=O@EblktVQngQNY(Ui-PGhfNImX;D{1`P-F`(2u5>csw{n3)p`GHfm>u{={u z?Do`t8}xy70UL2JtfZ_!wfKa^k)*+Uo=Gigjc?9g(rQh*PlmS&I9JqvTW}^`}QO!*skM2p2NQz z?xIeO5OU-s_U4&GL$`3r*jp>r>#RXz)@2k=y>^YRZ~A}6fhEgp40?xCB@Jd5P$FXg zm>sPdU3mBvdDTY6$j^;3Iq~H!%@YQ_M&MUp{2UkLHA?bJ!3@Xkder$aP^`?_Cd?wu zd}Vu(2~)#%kwzbOJp%SW{bV|+Zu{iaXdR=s1t{ZZ7O*FDN-z@;^#-Gr3uThZ!|7<> z2p73&r7ALP@9X^_v&hW)6K5I2`O6yZ72QLi^6rKRR-&P*>YSRokWhSiZ9!n2A+(y=~a>!+eW9N|AbYa{F+`vs}&8>}{ z;F?=Y+2F(0G>E^NI+~>V8lPQL&*#|F8k3W8`T5r?4%hm1{ zuhOLG6aj;RQfuiE)!*|rd65M4Nqa%x6*T?&gXOH<*_Ic7;ZwY7tC2c#cD{^F!Y^=3 z)wd6q9UYt4QJP2((eDd#WU)-EnmBew0AQ_RzI+9~sO>X%y^ zW{S|GXzTs;DuSZoqvfvyNfaSFtr0$>l}Cpg{BWpZ+o)_JJYspcoQ~@LIhGe$8$ejl zwaMh>HN?~&7-w4{CKH~CJ}qQhDM{q9Ci!n?KUUH|E$qq(Y-K-F!#LIb@R)A@SY16h zU4Q1d>fbe4SbZ5$X^(-`+d4|S$m=?)@gSe_i>^HRJz{IQzh{v8>gRx|{b<1Df`H4; z1$wm>BT%Lv2pyYKZ_Y{kXUXajl;Gh&K3-YAeLd#I{?4zmg@gm~^V)7;ZPZdgl zJv^M8hJ(^cMW9c|Quc8cuxX|#Y6;Ct>!R%_gRp3h__j;h{W`9U-OLb{{b!FVR9(xI zn`DC_dY3YL(udYl7qcZlj zdwKMte>G&f1?&up9~4+`XEJW4BX_fT#!J(xr3ShC?zgPZ}LML|7LZLq_LY zXm3)3T}kg+y`)sUnTxur^sXGxam&$WW%-_@4mXmIPeoAFGB@VFV|(5Rz$U&86JX0_ z1=89VocFl2{o;Zh{3&^A+EES91(90CSr~L~V%*MgMO|5lz|qR|H|$>$Owno(~`g^1l8=brc>^DQqStKVRkWQ7wnn z&?>Le)395Z{w0&&Zco1oeBU41pddL$Mr=Y+GiASLznULf1d>YAoNOYlcr3EX-MC~f z2K}0BE+PZ(R9QxCM_zp> z=%hAqF2E@Lplf|JU~K+gw+YXNe!y=3ordnB*Q@rag96k3;S?{fVqKNaiv`y|mNV6= zIxQ}u6}c!YtzWA3@Fz0U3G{}gmPB@=#4Lt2uq(1~lY{Sq#J?94G6fqVryRwF;>v^{ zm2XjtC2-kU749!*K{-tDrqP+^ahrl`lW4!XK(W`FjL$!jP!*}h*%?e*PVUI6sI|)_ zRPRuj784 znU6GCuj%8$G=G(g_$0*4s2=~Y-4QlV7^0N_E6~B`@oDz@5FwVFzG|jp7!)m${pR4l0LPo=-llrA7JOUALWBMpHgc)yNaljtXt{3eg6>4q`^#Fv#c5k_}+izmtN{!-*YX%GW4d6baL2kd#P8E##2?+Djfr_ zKQXvg=QQi$GlyHKSe4~xSB5X(RB$j^*~Yt5l0VC)k{xA_{jl_ZSgvo$e~AW&(5H({};DaAAGp!=~t7GysF zH$<{vV4De{eY+F=cyC##z=3TPi1ui1f$gGvb$T?GLRyd+x+Ox4=OZW$_mt8|9PwWF zwzwxq9mRI3Aq1+FAJ@N~9CnKsDKO=V1h+sjE=_eHQ;2mafr!LZLjXlu(K9o24E2Ft zA55Yt6e0%}vyu9mYIwjiB)Ai^h=SXQ?5a!FjfhGj7x^#BOMZyy8uVIjO9s8w`|KFd z{I04t@;vxxXn{ZSO!&zydG63da%C_^g#eO**FY-OxXjI&2d?{N{z97`R-#$9s*ey+ zGp@*lG-+#yi8*y`I1tnuTte7aS^AMa1ZJzE%8NHrY}qZ9<$}-sX{0xr!zP`U?{~~z zW29rW)5cunk#^(3AvKlM^jWu^cg!re;-qq|nM@>W)Qy%n4`z_3V)#@8fnIJcauc~5 za%B*Pl0~!Z=%=-{^Vwy~rPjigmxb_H+(cgWC~xFxG}(WVz-l^zE!|oa^@Gnsx;NT{ ztv;8aNjKU6X77U%r6tQ@Zx)<5&?0K*bv+YXcP9K*T|}HO(IrZq<-xuDd4CJW_Y=Av zZ^IMjH-;$@k9f@cBxhNgx{q>O6b=#2HzO%XH>z!y+^m=PCVA60L_$iL6xrv5m1N#b z{Uu^A9)N+TvMh{#gbV&(@Yw(cHi|d>7Tf=Bg(T1C`;DOQLurE|%rW=jkBY*yI(2#H zleSu~@DDJi&WE3*IR4CL8S`1x119v3~U z9Fne40#wcZfYDjDqdl#AwY>LOgLq1~D%oPE0CrCsmYY%n^d{%EOC8KBqIkZ!+~%JVL@+#2lu_gDH6^Tksjf&B z?3(Lsr*m#O`Qh?juL8Zi2lTsyNcd9R?82?! zdI6*zzd@a+Yj*(@7NWs(Kp-t}|MM$gQ{;Pn7rAFY6qZZ&@=sd47Az1Er z7N1@Al}2ICZUztoQ$|@${~xa3mG}YF2!wRnl8ee+A+Gn4MOR3juU(gsk8@1FJLmhX zZ|Sa^XYu-twZ1J+cH_~~4lwz#Ys`*CqiqQOw)(+u9?Wn_mkPo zT#!tCT;Q2d2IWS~zLk=Qv2Q~<-Q&n3$=6<$*oOTJP;NzO033~mYc{ifTdlR?`ph2u zl1V_6|59y<0wOw{b8|0*Y11!SIziqWc_uu7v2Ir!XZ;Xuq4fnqRCLYh?@n?z_eKNX zOv35b(!)I^+B$u#vCQg9(CDL&CPcATvGp@mg%LhOh_x0Irc%qo9nOg~gJerlr08Hp z&$yzdT3HmExix*hE-j*_0Ua`sWi1kW){y3%2ea1u&RXkHIv)MX*QxiYH0>77#IY(g z%_yg$o7!V_*xku1@!`Js`Sm=O)i%^_?7*-eT_MipYBq5(ut?x`<}9veVF^9SszSxH zz#!G9sam&?mvjdi8m-!CM{)l`x0;G{Q<`6zOeRa6*CPvoe3X|TH#1iX>xH#bpLAS zf-M+3qTyfI?I=U{qv=o#INsh!DBym6D`vm>bx4og@EeOvWax}Ooz|OirPEXh0AI*m z|D*gYBWDnWFO+5=T!H^n)ehUepNuiusDCy0&IKBW*v}c3BvO!S;9KZ82R@BK?39nI z4`K7g-4)pI+R;3fZbd$`z-IUaAjGtBCx*;=>%1VuoJn<#si>h z8DQ5#aZIuz@VI{uIVj%^Og14+_M?nVc3T+z4qw>&So0-~%|5#IvPZ_n7ggYtw6g1r zzIR!{?3vlDndoc6gNi1hqs4t*p$uvHvno_^Cpu7U^>6oAXHHvchr>?SsXGV&O>H{b zjv*Shy|}AQ2H-1=5V&|enQb0P0o_#OjXc0|vlRaK_f2v$PXx?a)m7C$6^dNTJ}NW# zOsL@=CS>IO3X3s;k?=*t`j3?)+j%z019Go+@OB-~;`+;e{9v`++NPf_`lv0R`Ho%+ zW0&tQlH9(Pwk8y7j+Mjo{%=+lc%Nw zMItb$B4!Ad?_{}T%+TJ`^Q~|Fhy!JW{G)%s@BqqGB_g&J6givCQ397R%T#yN%ssf_ zy+e6_-kX(nN29D^U_|N<(laiTN5vMXE^W0aA!gZc%b*V@^&{1tye6>xB~YWDfbKi2 zvGldRxL(@xy&^^e1T30KY5J@J=m2>4I$fOtxY~>B^{I6i;)f+M2$hO7*|ici_TrPG zL$Ck5G?QEtZMt%;jEtb2^B`-|P@tWETPofBfaL)4RLiN zsUH%ScQEC?TZLBjt;7@p?M(R#=yYo&f%BpEa-!%^oX?DrBLi?jr3O6}W{Y7cw}G@CQdX%axT_d#x4&$p&S! z4iJ&yFH|F^p+WNh@$l!Z9F)BLsE}9q>UrRkrwE`Q;hg9#)WZGil=l43uz+XF*3Nn- zsI>3sxZUDf#)L=a^l)HRR)#7OoR{0?)y<&aEns0XdNrBzCbUF#NSgU6ZiWHXsiRD+J(*GaDKE{IxQPd>Tm1#ydNKO)HW#LPgp8s*yXyaOuyi`UV^>AAtIs(smI`wtXpdAnkf8=rSA=?oI{LwS%?l9UBV9|JyDy&`!f7W z)x4>1!^|HJgJ7|F5f}G3#NBXMSk(2aVKmkEywl2_z5P$;$OP$|OI30+{hAidB+dXv zU~71UrZD9M@Zk@7?|B>=Ti$b4J34!lPI735DkjlIkoh~#v5iZ(Rp=GV`jGFs?zYtZ z$vD;Bbky-Q7NkA^rz3!k}v#j}{gw0|qnl7EHpAO(MkL&xy9dE)tHnBVqg(0{$* zmjnNYH$3)nIniVMZ>&!*MXAV5c%_EKG^l*p&GrSKoy=!uXXI(iiN6$v=4J_a?d43v z#!?vnMY1aIK{{^Pf+jJ#`u5fvc>4;-9gXO<((Yy%u3m4#z6r5e_~Ol~OvQzD$8VW! zh|p&9idcDU=Z2IjY0B;zU?l)h-F>*%LuONb52fDGq`;=KJzzF3LzhpRFOxft8*h~f zY?myFMQl~Od%^S!al<3`$OJPMii^05FR7@*YK=Oj*a4e)J)+jNxVP8W2g$xQbpo%P zR|e$LjS~>gQVM0Vw(h>euFI@#&3`Frh==sf#Qo`pY=v~=>J!b@he5T zZsBj?pn=~~TxQ}2pjrgy^RmjJoQzN!?LzL4h%6Y9{jnFV7a2)9s```nB@VZWw8GLI ziCp6$6uTU;L>qXDWgfBInX!1ytn8wNG6ziKy_WQrT?;2sx;*YDqWDU!@p*GLpAQ8Vue8#Z>> z%ba{bijd`_cxeF(!||uCq>PeEkIpaU8mV%0eDm|rRgp;{s1RkVT{eSRiU?Uu-oF`} z7pK-DJXbRFO_1`Bqi$Hb2$dpt?ETF?LA;#(>sBsNM8>!RVxg(p;{vj;H_9is4Ce>m z0{xXeOQT0AsAT|UOQg)F=_;Y^)CrmclJ;b2X;nL%K4fyU|A(@-ii$IevULLlNFZo% z4Fq?0C?vQBch}(V9^8Vv1$Xztg9LYXDBKIDfLl4;_uO&2`#hXCc=<<-8vow4)?9Oa zlaC9pv6l*dP(bFJK3)dIG8t6(07*zb_;%}%f_>Mf;ahb3YX|dNHJ8m=DS{XbH#8)& zNkE}D<3sTN4z8)d7c~8)t5?5pc20!zC*WZ>)T_WHoKJ^uP@dc!+jVgRd^#mjY|!Ozr4)0vasDZ($mmj)?>8tl0Ht26!P`ONq^pFrSUgPv0W>~ zm(3<zGO z4UXXscG7NVIwdJi)DwmlC}m&HC%aS`!RPyt70!g*M&mp3rU5}}GxDx?v=W{Na$8Ot zI8o`^SDNZ1y9<+)RuygEqKF1-zYxQOh;r&~rvq$V1Fr_9sg@oCUQujq&mCFQRFNs+ z%g|pEqBX$#TC~=-Mda0(eYN%%Pp-O_vHwOQR_FD znAN2;wxhtmaNFSDdNMR*vQ#z)jXSw8@A`BR;8Gm#4_8Oy|H^w%9&Gez{uBrH`{ms2 z)FRG6iGW7*{@$0^wOimIx?wIY9wq9S7@e8?Lvj+A&5uI__ktwcL0P`n*4_C%o|Zrn z==h67fu#Qrlq0&@_U|?d8lH!UqtttR*kQx|V_W;>TmdO47u@y${<13qX|U@qQ#yX< zo~rLZ7*lQP6W<(0$ZVvCY@+{bp?@S9IFA6ehkm~JNeH|>SsW#8q-FfCuNO|BAWk3^ z4v#9$+%-XA)zu$?b|a&tIs?nwUotYGfc`kQ+fYqH*M|V4ANYL&$A;Sp5e0R?GD#tq81a*g z_Hfv+{{gKmw2?Qp)Yb0!FlSU(Z}}=Z@4JRKZ#G7w#$ln)5$?O3(ZqKruc5oyb!^?Z zt3V7cgj)-{;?JH8m|-fuyGIyi&QVB_(bJ z>TPW_+DSa?Y4PO*Nc`;wU%&rNU#w|y zJuDg9FEdS4V!G(_BjjYMrq|1ey7K&v;%U=)@>qDETAo({<#5r!mX=Am%|V0*hLs72 zNxSg8{UXQGmCJ~%8%KtDaPSfI_MHBEv`j=3YeQMJ@Emw=_kLxgY{XDLIZk}X+TS@w z$ZaG}EkJ^ORZKwv08(|mgh}LVA;fUDas3lSPxG8=aA)#9baE<%%Y`=y9(35sYU0X; zjPcy7^y`jg@;z^k=Ze+JObJ5f2NGWh>&v0cp9?7IxjKWzF6(JsO{0R@!aqb)Vfjb5 z^z-qvqJl4#_RmI=87R1R;yZRSKQ?HuF;P)X1p^=@O8rHBKxlWU!u4=;b|a0-vrsxi zztgMi!v9`*j#Dxw3^N942rW-H^J7c!f1H%8Pybw;EA>H(T(%p9DP+~E^?MrUGOY?G zx@gTu|E#&#D#RGLLrQVy>*HmIQj5-%;oTCc0sb#9q;m?uU7lypIZiq!X!wZVtpgh_ zmEVNV=WN;I?Xal7{NcKt;8$C_Pl5))}+1ZN^ zMDm9`jwg~(LV%?DM@4)^2?7-fKD&RR^kgkplZ@G$O||>*(t?$`k4+N&zJm|{oTRsL zN4Y+g$K-@Ari!bo|K)~;{@ariDXhai*utn)US(yOE)o(*DB?f$6HoRhQYKV>#}FV^ zN7Gc=Xf{pPZ`==Z*N;0dCxXtGW~4g``gZgMzT=BhVqRRhFt`krSY!I+OT#+%#>@_P zQb6}8l)sA?VeO_rByRT-ZHqGQlvht^nrz`P{*oGm4LexP|FZ&PCWC0Wf?`)&8vdnP z-1)%hs>`T_XO>gh1V3gGGq-MR2R`cN{LoH`Z>l}$4=^f!EuX$=BD`V7HrKJ-Va|cZ zv9@Dlo{mnlErK@)nMkuAE0(C*1wuRrm2PF!VA>pur0OD83QUij^l<4spi@&(f#Rwk z^m{dd30rL_ZgBms>aRVo@~`#8DawsEw0K>q$88ti=1n#~YS(G$bg5wXx@M~wk;_Y( z;b616gbV`c>`Sv&*esZg$vHKbY84tk{U$FHo3tx`{&~g6h;0}hp;G55TeuMqmlRT{ z=a5WIMmK-`bf-LDn_!)gN)e1zSQayvjqIi0_#Ow{UC^PsxgL+(tC0^Wg$L1)+V^9U zLGIomf`100XA~KSSnjm-nr0 z12_vn0dXWtGh6t^;O#>7Aj!>N_;B*{dlJwee6!3J#%bXBhclPc z7B-Sxa0~6w9QfsEb{+?67AHOb8OysP)-$P1u0~)wj&+7jQu~hyi3YJQ>U*!A$ z(D3tO^}O)?ZPu2Jr;A8ljLj$nwQJ(bBF>*L{c*0cPemc1kv)WT!Wq5Yqo8}^`@Mo zuaoY#caIyh$;&0)UW#`U2Vd7aJ37~wFDA~;&rZi`p_}LFTLwlN9p& z4tBi#hV;BWG6vqB2Dw&BcO->m#)GFi?MhM-hdF2N5>>t1@Y;imy@t}2sHq$2TL1Xd z&3T5S6fVjGa@jYqP1=y$AiATK+)i>DuIk%I5EnhQ4sA--NYE24p zLUoT}@|>Y@5ptzhW3oyxIMKAni{uYeP%{28Oa-MzNG1ny>T>u&)`FwbA4(Y-T%@Hwq0@z5;AI|@;t}p?i1Ldr8Djb5#7OZ7T{D4c2rEvl zUIeIEkok+TBmnY|Iz-E5_;)H~-I1(I8vd!7O0HBEtywN8xfh{1$&$vbVW#*ieP?Ze zc74^iHdXM*Y=xko&)IFD-O)M8{X0en%{z}|41Zz>cF&KOHtf7{PY(9GU{^Ie>q?sj7 zdosW3$w9rOJ4DAg&WDo5O<@s{vuQ(fUA7P2y1(Z7N7PU0-ek_Qt-P`S?w3Ng4uO5WGGr!Bw8G5QM7M(u}J^sY0g(ubZLLueKnlOe}pwgk* z998!M@6Qj|NvmgPu=)M%`?GJ2v6PA)APSy-9-@_1pYtZX&yNE|Ps%6Q z^Vezec}Ykz(uQZwWCvM;4u}@{mnn_IR(pk(TS`4akHVh(srd+<{T)7jBeuNB`}*a` z53CglT0iQ!d36`x0FBO@f#uA=!|!kwv_t>G|foA2}{Fvy!~Jv{XuK z-NjHNGH*aOWh%i;mQWsle(p+E&go3$zn2sL7^(j>SRM>1Wcb)bGVlKjtE8cUWZq*J zYCu)@e?H{D310uhTk9g$L+3D4CC&i&KYy72@|;sJ6j(YT#0-n`?!SL~|DS~X|9jpH zO5m7_NvZVZY;@b&It9-8=;(cWJ7=M2Szl3}E6lv=qLR3{ zxY$z5B$v_q3{HCq_$nmhAGKADNkgI+z35J}4U){-tqw418X7|aQKa5cx<$vGm57Hz zPGnNmcNloT=f@`g=Hs-rskV0}xm{fQkrtpWB^Ht!KEttl=(O0$C(o;?s(q*RV~M|# z%JTg1aW=afz1*^)_5%kUtlOG<*y_$sY*goQtguQ#YIO9cZ+>WI=$|kZ0kaza6-HG7aJ@Tf9T9L>w7kJxJ_3svzv6KKHZ*3Qm!^n z7t0T@F7A@16i4|poo)sqWkut(C2f{{ZaSR{Y_8FkH>+5rBXL5~QEAwBMG)xF%B=IY zU_9F!=+xun@84(5Fi@}X85tgle0Ae5+t42rm}^}}0%`lTFwM1FpwZ(FJ7O|3ZBlf`L9z_a*Rf%}>>Hxw4fv7Z4R!p1d_cy=~;9`xVL zci(ua0po;_X(+s6dTtuyT|dUhUQjL(JKx|D2soghEmq)ynrx&g*DU<5ZZ&SMN^&_Y z$=BMPE&f>y%J3%@^*v31Cw<3L`C>;)1%pcVIbSrcAGqv$eF)z62~`JozoT)W)7VE`o zm*gs7I#y4i@tKt#WWE=*i zd%3~heIN$t!6dF^TY&j!99cRUQq>uHz*W)!6h%Op3;eWGnHYtc#H4+NO2_VwmFdiZQPo5Far@qF=JU!Nq~ zcQBp9`OYST*V^2qLA^%csMX-2m!tKU9Ixd_E5!`Q5zjeykrxNNC#}tFXe=M(1K)92 z7z*pUumRMxJyl)hfQq9!5Q@vK*uSwS}uTzvHVQ&tTDg>5*`pKk~Plu99K_cJV z65Wx_4-RM<^0ydrIH}A3(4MYJ{-xW<)>|ukcdx$QG4K7)Bm4N3s>T^n!>wh^Kni;x zv*2F4!AGPHo`jXPgekjJ{F!`3pyk{I@gwr{O>w3ASO;R&i&LcXNJuGdh9ng7iC`ER6Yxgq zD{#f`bp=X224s`#X6zE1{8(TR6tLsMI(X^{^``uH61loa7&S%SEx#xB=#l!Nrqj|z&HPGHv(1JUIGEH;dCN>02Z_(&#S%){KyN+)x39NJ)Ln5Oe$$TRMbu0EkA!8HsR79z!VdHxHX<#*rnFd-i2-<)i?a zPqWl&AoCRAo>Vv&EJ4{lPGct^RgHahZonW~t=YyV5t!Idcs)quNMTE}k){kkF%6W; zNX(Y6=@QC$x;d`$7)X2F_<3I@Nbl0aFF;lp3@gcYF1td3ZIwWaD36aj+bp%}xLHeH zRw>y&Dof2p@UYI`wKt$N;0(dcgX7d3 zu8}8&osb@&f*uz2jUtkV@Wsf1g;SZcp7^hD&d&$PlNIYMnOeQ*bZwWoKVDs`c|W?4wN@`tpb|LQ~(p?`Of2e6qL8 zv*u%Y)VZFKd@hH=?yuX5KiGUPMG$AHZD96f$eJ?0RnN4o>(7G&B`ZN@4hYn~+`(axSixGTQo(AdXCRhh zJ1>FDjT!In`Z>A*{kxvzh=JJw(Pxy|PIV+RW{o?>`xHaF#ZIqNgD&}D-h}Ee?$*(} zpR5=0B{_M@`W8P|m)*`*78}q-+Ep8C8rOSBEm;>qt4Es)FX7M3#FGd0)_3lYgdQs` zvzF8+G+Xn#6M)xrCl3@ybnoH-Sc`#3?BhwTKW!E{YKnout}7pbT8^*>+@YO{x_$a6 z5~L3AvK?3OZ}{l{16M5UmJlu==Bg3rgoHe2=ilnR!sZ`eagBi1+k}bpGoK^SyY#xr zHHF)OIQi2`TY)Gek;#>22Xh5TujHZXZ4FD_2t0!hLh*nx*L<)aL-^}0AeSG_k|v3YEWyJjFQGO6FP$gjz4NDfEX zJb{juRYGx8u|)`7j^pb*pRufI%f5SDRRtaEwz-DxR*kXq`X1qR(Z@)w%QHyDw%)ZA zT#>Rs;2UEUP6f~?9XADjF|a^lR#@Lg_x0ue2Bcx|2bb_AW_@dk_*@b{8UTrX#<+Ia zhjZ3Y+=+AAqb2(;_~|+^DS`u2x6m_vFU%>pb^(NNMPtRCo%-QGYA4UhHOGiV{sEa` zQCPpP+FJ8gSOE1iC`Prq5F!mmwn6THpK!+#?_C4gvcnu9M3?KN_G6_Tp~W$k`?!Ch zx%L#k4yfMwTSMr2v60Q^dRS`L`H-8c+BP!7p76k;zU2{LxUQrDd5ka9eFR~5T3mLC z=YHf75iEQ$6;?lPURmq$DW?2*mDpHQo%>DSWhJlR?{g@yr^LB>E2X zQF<0W*_zFsZD1K?u~2_ib`Q%os{2bqG4jZeC#h%Xd`%hO`x`3*UoeXAK($D=|A2!I zG1(DMcy?#rB9tscYwr--8mlR_GFUyfCe@d_>WtD!WyhT_VD0y-rokZimZ`MXmF(j1 zygZS%2sMc(k5T7%=9@j01ExW>4vvVG+|WxNWXpS~^i%LH%7phx zg?@qB_cmF1V+&!hqKG6DRqi?1QpArM=8Vw1Lwd-$^#$TJvMA#Tx6pE_$(71&7O-W` zM7DEb&}#B_1CBU${OJ}@Ur8c8JMYZ`M6Rw#gzxmGtq2@P#v#zkIIXNi7H*?wYDP-J zW?UMVwEIqGR#z0ETz_c{SNXm+AZv&zN84#E7PGmwEN~zwI8To5a5}rnM!ihjVTf^a zc7IuG&dtzY4Z-+lk1w|Ic#mAS3+K?amKyFTV*2~5Fb;?8qGbAqPP3~7uPZr+PD?{T zImm-z3zunAyU~6K`KUt`g06WU(CCB^1wAe)2n(pSG@@CF(UtAuZkk7yqHd7`=HNWzjyrj)(98|<9kvEyAYInHLet7bhErug^s4Inth>>|;ai`Ld}4 za~))$TozFptH}cNuJ4kOPHo0EwZ7iUSlFt^y;;pO~wP~ zKNmDCAqg&onNJWhG+wf7-Grs+r}-P@`O;#8zY;nm6qSG_Kxj|l zaU_b#sB^*2>+nkQNXiZ6-Tgaf9`hctI6)%L#M7y|&oP2`ngmU6FiDd_dEE!_Gq|td z`V#MuJg@g4#O_pH8_(3m-wNr-?g$gTC6U3;=f>h$iLj2_e?FLUd+k}S_&+#sF`oT4 zR(5}z;8 zgdWp2j6SQayE%Lf^7zDVKCav)rO30-L%`)xd+bgPM)a5qtIAu?0`07!Sva<3DWWr7 znrtTGmI0n_X1(mbqVhhV^PO%5L+zSfdGc#)` zcwAPY=#1a?squ1CHgJvlTK|U+^cS^8A%DzqwW)a4lbG7)f$Sytc|3-Ydy3TVCEL3G z)#f*9UmqH5lrife)L$P}d^TTz2&!$ez_wR)Owi~!;xK8;4duse4TK{ze+R(_+^#ja zO8;~+TggS}w08|bd`lh4ouDlxojJPP_MBPze1v{?`)NQ6ezGCezgzUSF5GobT#R=R zPDn_&#>wDS@ZuYs^r*P+R*l!2QGXeA3oQfER)*1Z0_6=3bHI;;9o(fZzW(89S^el0 zjmxflp2VyELP3h%#%icDmGZ^+_e-con*^AOY@*Z}sB6YQ_;Jnt@8-9|p~dTJ9iKew zu_VKOdiovU$Qx=J`jgvekQiGr;W_(y8VUBwA?x>V5KBb`yl;n$td z#&&nek59%wg+B)2;ny;it>_=z>n9J0&bwU->kBZOx?8N4I?bB*k#h!BYFyi+V_}BF zD^FD*+U9&F3}{ljua-Gd8IZyI9Ap4G?ORaQQRyFv&_A9gCK^H%Ab;>ldJiG9W%TDT z*(OE8)#T-v95$^9u0zI>N*zab4HulTqd}}a;GxIXHK3_d0%#f<2I|pUY@O3Z0TOyO z^bcZPuT*&^ej-VcYK__bO`PC@doH(VsavI|@lZmfDd3WOW-7PASj3g%`+$Vfy3{9X z%i1wGGeX!%Z4CZ0i2Sgsr(Cf^=8DIfwIH887%<0pN~K*L9FjImWXF!6O$*7>EMHL< z<+T}HHy1XTN6;1_vqdx2pX+9B+G^#TFEV5B*ZCkrZTpC&;sFcLzCSM8GkP9TyG_dW zh}&xZy??1rYVTV4tcD^3bE#InFo^&8q8j%rSXQ@SoVIHxre(SuDU9IDH~}5t8JUTL3>r)>vAox zM`-U`h_9f?2Wh{d7+xr=*~=0tQII+qHj^Ia{2I z{<6dh)29IcfO1B9r4t+2nKT6Vp3G)Q#z#KEZPw~`8qERF=k-a5Vb zYD>w8dya1Rw9GuW6vm}B>5TFxQVBT^qLcV9H2GNI7J7fmt!P|#{KvIGCJNkm;J-A zTR}1Gc~=5Lq>baZ8U_^$7F&YGe`kuWq@(>bz~%|52Hr7#ue-TZ+hHW-Yo$hvj5&oz zJGryn@_Sl#O`fwVD^7-I&;MUJ>TVN>~G6s!v z?W~`L=>c65P1XHL5f&Vw#aJX$&xNUmMo8t9Fs1!g6NBnEvP*D`oq9i?xeItjLor@lg6z0gJ0l2R1Qmm+O2RJ&H`X;gJE#GSXqXUm3Biprfl=GbKk= zp)Ak6hHDJAs*iT#ZuOkv0@5?{>S{E|8FKq71F}2fGwL?!p?t~W8}!*1&1R!zW~=0s z?yc(0UfE7<#TP@Qf%KO&Mq1ZRGuw#yUxI9QiQmQg$>cvcWp&r>!km9{gQm#{==ghl z$6&^J-Q8p^B_@@;-g3QaJD>$Y2b&+#+(b>ouHa6oUC(u6hXxYkPO95e{8)Zq>5Zj) zabI`0=dgai?~}F5x#rS8IoHIzL~hD|K{v<*5>Outs@~Z$*YxORnxy7AV^XJjeaJXj zCb015yE|lMBAk`f-c5-}3;C1Dhso&SaE_qQE`Fc$Q$I`?^83gT$p% zRnK&~$uvn~ts_)iE&^IVpLeh2+vB$ol6>mZvN+dZnvrsN%)*W=spWhZ)K5BwoNwU6 zs$~Z7S^B`qE6lRuF>3~R721MVHN1gy!~LKYHt9Gefit*m?X7&1_;M}8m{$k`+Wu*K7V^cQ)QUd7!tPP-_w@`oRTtZ0~cP4gbm zx>G-K$FiF@kPAuw3{joBuh5_cJYG_N^15!bIE<8KD!8kB&%A{vt$@oda)y8`iw>*v zdvpFLnuWY_8{3hh{3iE!)F!Lc>t$sjsc3s$7oT`51*Q zc~md4p3$1P5;VXQONN~GIS3lwE`(>~`*is^u8eqT-J=f792 zOQJtih)RGBU6%t}e^o-+IrvxiyCaKI&+D6uyRi;O=5PRX)k@N;LmH$71})ZDoeb&@3ynm8e!`b0&ZAc5akL}3=X{r>V3(eOh_=%|52q;Q}gh3Kb4!_>>lCO?Pb{Lma)v9 z<7@9n1`=Up9LGb{I8FmvKdRkwb4DrD1kSfvkT)^)B~cfgHz05buhKTg@Z_mw(?3pV zrwLwAqPX6d@@!v?yXHh>w++lo15H(Ws9O3Jjq-7KsXsQee&8{BPj_f})S}23cf&MD ze&atP;#G6jbE-;udSO|me)j}Y!eM<02Z;jnqPO-Z9=;4CBKK=z#Tu9n3R078@#Azr z=M^UOMC!1tATTUrX|($Wg^UuatyT|m9)^bShmC6E#BXd#*A}Wm7fNda?p@GR#LM6j zj7~&Ss5b^`-VQ+@tO^B8Z11BWgOxVs)YZye9%9mHmF#$NN9cL{KF-+L(H5NXFFa@)35Dmh|SHttAk+d-V|x{WyNBVO=+;5sJTIKl)` zM$tCyDWJ(T+Qt)?+D#s~9uJTF5-i@evr1xPnu%@AU2OYIzv!g`-|=p(-SXq8#!Cr0 z0LeD?{@Y)!)M=wPj7G<&=u(?4TIu#P`bG3tVHuc#Te`h#BRzzW5_KCgE_rYD5zfiK z#gRMmHl3@)o;-YtH)n%J#-2?Y-s$4_;nY8GuDs z*FoiF=R*+k{P~u~1~f}v>#h@u*3&PN2Iiq^H`}E1_5Hz}tFZ@|eGf(@93h-y(ie~* z`tZrA7WTW4%sjE%*M2ibv-=-kT+VCibnxGE&L<5#qK0e6CfP9;iJ=L~twa4jrLu(p zWxyqK9?1-()*}wk3S7>8%d7S5mn;kU{o>GjULf*}>wojXOP|ab0esC0{J;`n;v!xDM7mOR+BZ+{2n0@R)cfMq@+Kfd|`p zJxB=MAMz_ubU#_FX?;*`iXu>FpL8%ld7H{81Q}Bk>YDa7(Iz>01u7mC#8ExqH`(`i zGuav(J>yU%mi_pDSpdWqt;T-4h{v`oBz6}yIo*~-low$NCn>qBJd8590S!#-mKziU z_#~ewoQg(KPSWUjldqWv(46Re>_XFBvYn^PbzS;kd`=ZZ`E!8~u;IP&^=4 zXkW4;;++HxZf32*5Kmq7L81c4Wr`;a|M4|+0F4W`bI|z|9@u;IVM7=0S@Kq9HH*j5>G;$mMg$(iHg7r02NFdfxZwAs*tw8? zig~c0=+2*dSdR1ioy2|F=r|dCKkJrrA6$5woK!Gt$xJMpvRHL{crsA{&G2zlJGh`m zluAr`%w<};7=F0~1LHlPD$`wF(=8fgArmf4_I!8(4qpTz4<;Xdsq#rr*}4j z@Ub&Atwkir$&Tz>q)de*LmeXq}Dn$U3^dgVuCpeAsmuFh_#uIt?Gn|^JQ2oPJd za_%w~tra_XUKHrmd*)eV9bUKl$*J2HbZn{o`Smy2M}_hhTk1K<&r~9jOM(QYDeJ+) z`ANu$cKQz0Rh8;8lr-{rpJwaydq`>8d20Nwz6!*L^}S!>1*E(M?A8CJI4LMfmQ=R}_rgue>dr*bImhT#{>~zxu|{U}~%(#$qXSwY!27UW1Rr zcWM>VL)1?C?m7IUcGLO9*moyl{0j}&p(bQjpgi_Yz5}jMSw4u!xnXC}eIaExvy!HS zbOzJ)yChE;{Qk*awvQLlDe((HYr0JA;M(>;N>nTk;-zGQk@dfky>vs*5vNF6Fz+eJ zjrpXdnYW~8q4v0*HIdVOOUjG$Y=KAc!|L?r!?4rS!{%RB2jLYI0tO13yrL z*uNgDVW@6#r~hch;IhIAAv@4TUz?C&Qc914MWyP-OA}rixGyx(=xkiSL4Ey_rA8L- z4{e{e*wU-)6a=p*xG0$mzj{m?vW_mjQ^zyLBKAV+WE)(_a-(l zSK~@_4$Tws?0Xm~KFSN{sp9Ag8Wo22=)a~(_Or#bi|{-Dsw*uM|l>yzUr|U z%X>{jFg9WVF*>-&ym(C&dfh8X%U-}Fp|q0Q;BIAf7(|?U+zmz>(WHj|n=1{Nx#vo{cv`(RzX`zhz1 z5a6&`C;y{c%L( zh2UCdur=}mFlKVqlE%%g2EM<*0Ji<$p`Xu2%KFzca?6t{LKxavPh(m(lpcFS)EAk6 zqbc382_jb)z7po^Eujqz(jDo;4HK(12K2OLDqDw*6^qs1?Ghwdy0+TzHYep~uEY+4 z3k=%$vKzZ+j)G&M1?f;ro)cy#X$nzV%H~ol1vzuEtCMW5RGXg@U2^KT4O#Bj&uvsk z+;U*G4Y?&sY_8zGY{+^gP~?|~{NK_VKkizXvV4l{)I)pfqyuWi*4*ev((B|IqKyxH z_8Yo<$^B?qgD0OI@cpirKErWC+Cik|&!Nk98H;&Wx<$KJMdL*_v(MFvWmg@9tT#0l#v=l@d+btOdEx5XXxmtQ^bS(Xq z5Q0h*VTY=H3QfZv0`X1vjl`UO?eg)S*SU6g=AuvKrWH9fjy{B%Rl-*xJp5(Fr&2GJ z>P8Cs$(&lJE0OIe6$35V=Ko|%oNuySXj$kSdn~d^xgz%t09(){5`3;9& z4I-7LFe<06&cRP3*n7~7O+VMP5-HPssU-y+M^i%Q7}z{YcH6$e*D^X0;_TPFe~s>W zz7UW8rMBwS;WK2i6L_16>OyuF34T}@M%0R{UN;^&nrV~0;bySnFhVRX{(-CEmg(sP zYevs^Y_RcU4b8`PCd|t%Gt=dSW)LLgPT&sNNP6?toPfXRuHekKMGm{sXx9ygYl_%m zCFS}du&QPKBxd@;jSCH7$Y{)bZ$Yj2?*PL#>~X(6qw4*w{;2>sGSzM^ROo%&F3J6q zwlDJ-Jj(fiDp864hjyCZbL06kxD<9tRUdL(&KjQ-)i~pI2#wN^*&F!@koKFob6sf2 zzbJ}WE6|mG=6jqykX=&7K+1|;+oTST{L0#x2ZRy4hXBeVhth>Qy(%^j#hP|BSL~Mb zgmM@uw(q^>hNoSK=$*W3N2`%}xbSpQ2kGV2MEd;o&&;p8L|FG#^`EhcDg3Ek&|%vO zCh<57-!Vq4mTnEg;ZVixk0bV+J2ekJ9w5I(-Z9TOxwenBI@0(-SbCu)jk2fX!g_@- zw0qjaznMsHlvL8EnJnx|fKa%yFmnH^1=`AL5L|%GkDii^GvJ_ z4(iJtYDUStX_2s7^OA~M<9l`eM34l{(E9AT;Cgk!FGEhk^s6~^)gD}I94poml-C1e zyP{jtGrb#h(*ZmAlpOW^MLMHBss2sm@yP4_tyulcURhrXoabLxT0%nHn1%>o+T363 zHN`?*fQ^fv$aTW7cf^y|eE)l7>29M0Qs$s3D0EP?VA)kbz;o?>s?{sg6SJy@lys%j zV^ipP-kJU6HVgM(TQjk$i294_6j}u=?tNHvvhM_QT}W@rTR45%F$xY#`(Mc%-=O{O zgcKnlXLltYNOs2(YZk_OlM_Z#ii`Wfia^v82qid)NhS}ZROtvbBou?kdK~*@bBLvX zm$*`qP3NBSRTbK$$|zL$49#!NRz;>wVaC?bWqLQ zC(+ZWQ|vkQ?7~hUs#B?9__<`T#1%tAw3^)#iy2+Zm&dcHI$2`V*lUTbFb?`YMY3s5 z_9UzQ{;A23$#Xt#ms8tBEbz{JEvJvj9n&Oxj40x`T*F z0c~P3wRmN#6DC78`o++N;^>c)4Z-|KGmv~XkRSY!FANr5^{jTn$y*{rVQ4Xe&Mb|Y zM(`G~c7hN`IB(yPzpH*ZPKZXrv!vSN&3Yn=Pv%Nf8wa`u`5dvtUvf18^CvC+WB)dbrkZAlITX;z%Iz*hdSNXlrn}~@A3`UpXEol;;ZL7fy zMOm1%->|#2sZpav93TFy*}%o+!eVFa3MDL#Dh53azsn@xSr?@Jak4R*0q7;j+_sSn z7oFRih&V;$=58qpl~jARw3}&}4C?`$8k$v@0i&rI;$a$p<4 zMj>p zTpCXPF=;CPPpjD+W-F@6h;G`{q9SMJD*)=g;7myRjhOTP;%$`!SOu@;>OKGAjuk?O zrpMk<$(+P{1}FVKyDzK;Fw&#z1k3R;ni6;#-zTJjs~1XP9pCpXf^r+7xott$!7JSO z7nxSx0g3>&vKj-S;`=@ZD0pABnq?v=@}UDU$~>(*%XwexM{1FkV7$=06TzPbEWqB^ ze?>oseerIPPIoalA+|z_RMn3DRoww|@PlC2cFV~otQiEQ0n^_A|X-YD>VMo zjw|l>TqKpNwM<^b)FO}pio1vJILnrc`FhW3_Tk$$@hI98NVHJ>o;}_oyJiz3Q%mM+ zCbCAAxgrK|zjkfg$w$R>vC{=|Nl#CFF8xeKT?3VM6<+&z6yM`^J0(&wi|qO>L0ddl zU*=%wqMR*6P`VrAEqe2!VM!48X<0kx0C2PPwXttn^5{VBq#_s0KVi_Sv`(^AlbhlG zRD{wuX{vimKLpJRc+8#PFZFjM+#LD3D}yb-hp)VJW<6tvslI+(G4yii&gj3G<@PqeJRM+(U}1Ia}E)=lTdLbk&{ zwB}z1;WJnwdAVuS@uDxnbv)(&6cMsTxHHjEZX#MN{@N>!gOiIPAe8e<(mSX1>m70> zNrH65Wy$0B>J6Ueeg!*Cl~MG)x_Yu}aZu4*swV=Up!f zMIHf|qAHSilsO;rU#h!yWF}fdRQEJ(f zFVyN#NG;!LII<~sSJM?=^|8_F{+TEMohdELl$?3u05TCg!dVs8Q0Y7&kXZW}nN@hJ zU@=NT$cTZM#rWrSi4YkkSh55qfnMb`=z11}&{U&xcm9?!pymNsyd6wKj8nK2>tE6x>kcn*OQg;z^^JiQaambJpfJ+Mu_&5w z2NFd;CrYapQ=q-bwZ#m;|9bgNbqziGW6S0pVg|cE3}VuT$Wp%MMBV2LrwCFQ_|?QW zkJ#$&Yr5D~=Rzmm8>FFx#FVIzi7FsHA=BCGb;sr`YR;<3YzS3j9Tlx#!X0NkZW8HS z*NmgYqPUYBw1c?jtg6uvDqieo(uj2arF-O+1a3jVNkaAj|F!MzQX|g3E}3m3?i?&o z9VtsxI1 z{T72dH3}&KRHyFS^PeAG#?4acasP%?AzQtK-V$-28!k9i(;%V$F#hU@1{HS}ru^t7t2=J1xLN z`@@55M-LcEsx&PxgD-ioK~H1{55-^^4c;lpk%`If?@?ENDp`HlA>aNB0q%|LMY+9! zz0ObNVToaR&Q{$RxpA35m+dwPtw0H)nFC}KYMxPhIkLcI)Do?cT(FlruE@RTpV&i@ za##$#dL5gc$h#6c`6lv^9$n!lzQL_SQW-7=N92El zmpqVE=1RDp^>_px{9%yjHVgD5F52}Zsp+<6Mz^S|%<9V=%W6ym%_;h2S*=fCbUY@2 znUq?nu2#^x%GRYaCeE}Dn!b3t70(}x-$sC2&9=$S2vQ8dK35lBj&4~j9^SkA-p(dfGDvS_g`xg#e8m9Dnc9LLDgI6IVIVeTb@aCKDU79gN!5f#WpY)Y+LzfWnv^LCECi{ z?f;?dt)trd7IxhhDOzZ0i@O$gm*8&2ic<=d;_mJ(6u06OcXxMp_u#=Df&{p$zrDZx z9l3X$d;VgCF_2`@rBW$0Bw# zapJRm?<8QGqhdK1cbvJo=wPG^mYEhwLX<$){j-5NMDQ`6Yb>>wHTuSpp#t5XywD{7 zX2x(EIUXC8rKg7$T)~Phs)6nA+{;MbPrY=BCR+=$PAd#8-9FfKetNH9Sw8MR(s?{KT`eXf60+z9|v z-@T-dTlK&6Z#+O=<826aq4;IWzJ{~#Iz5&n0^a~42x-h&mA6fZqwnnViBTKFv+JG4EQq%{5;b{^qdQd6%N1f?yw7 zjMKl{h4J(!;FUzI%+638=x3@qHE@q9ALX!B+#9Pgv5q0V~1<76Xba?x@T zF?U$|r1`|qpo9MN9An>}`?}^q#nLQ)KpGUu;UP2aEX4H#>6OF*e*44doe^9h)cfFS ze~9n6g?&s7%GxOb{)y=Vte9S0W>{9+nnF?8TUvvdJ8%3Io^7TeR|-8?QYQF!5Sq_? zddkgYY1>}YcQTQG*G^2s)RwRSd&Lyi0LHg$`=lB%hN%G_0HXS_L#9tX=bGMy+sCpSt_exU zquYz358~HwNRy4MO)?W)HGET1(|0q)IF^8Q|ktP8OtJPL&zuY$2sb z$I%~`M`ch8wl$8@nn|wb!n5ypt#cYI zSk6|FIEJ$K?@N%W7wkS!uf@97Z{F7(dQ{kU&cp}N?MfxaZGC+5xLSm~OVcX6?6U^e z%Hn>@6~uj44BpwQ(DM%3Y#x_jl^^uvXEZHqx-LLhON;*KBQVppAFpsY#7`CD@zu9(w{Cr`|#M++w$&S6ANf+*?4ka@@wD+O_X80s&cCY>T?1cOXER`#4Gr+K`p~% z{PZaSX4Ot)^a*Y~YFgCVnB}m8^FgQY<7kc*nSM?I(+Ao5+8JnzCAK1&BIr-lW!o)P zJ&IAfCjmvS(Cm3@gmTl?vWHroEFO>e-13xld;C)&|Kf&nH<2q^jZj`%E!aL=&qWrA z^x5wtcZA9hl%G|emd-bk8OAD*?Zz9B#O;AwHP^Qo`>o<_-u4FNx*2GrZVY>itu)QD zBcIWG^OzG$FlJCq7MvbU%ViRb>CuSk9%FEs_Z zd@(|wIz^2$e-8Z^lDZ>zc4+|?-eff+9iwXyOY%@qEkJ;$59M3DaozSoBDkwg|hGzHI4T$Yu z_;cQKNsVBq^*Is-h9y{+;QH^p=EC6F>ev&oi#vnJpL#@iheAGD-8O%vvBB+j-RF!< zs3RBxphaebU~c$+%x)OIpc~wlj%rlEtF7NJm}1ysshM3fJlkT$=($C7drnP+1u1b4 zIjA3ly{SvG8`}KgB25p)d)mdHZ$J2WD5VNCS{wadZi;vTTF1J%R~Cc#DB@B+%F(2U z88N(~o^8RX_gnR&ZsOWz&~axn1CSY_{uFAaZ3T4ZaG!9YP*;l|kWNQ|NFSj+DA+CS zMGCET!+zpWZh=(xb&K&>twp>d-u%#k-b%b&Fezz# zdvM$&?l{Iy+WvfX@d!$WrzqM2`3I)uU`zr<(@Hk}*wU&K6Z%QF%Y(0%9Irri=zwcX z>Vz!)z{y{hR~BwLXpeAoyBiVT+Y6Yw!l-2zP;Q2x4LHr|KqVpS2DrMoK4j!21$x^r z8wR>9w0UgL?spQ}o#0c$sl_9*uV)>C>7Xd?k|be8M2tAe$Tg6nn|DX6)zRbnxoJk8 zMA!mFQb5i=VI<}8%qmDbho{G&aGhxVuH}%qSv|e#g;^9;gcRq0tac`-LN!EtCmc!R zrb;4{_3gm!s?85A^)_cOmH7H_EgGE!i4IJ^_NVjl$>oKhCrm)02(A_7Q1ysWL9l$t zd+)8|noRNHb1tJ!joJzR-i+oqgIzrb4Yzk;KOap19sJFr-K?yLs&Gey%}J@TU1dW9 zMU;OrZ1pwwXof<1&R6HJoqElV@-MmTfJPdBXL_UgRGmdtgo3CoOFk4ktYGS@mCFH>R@M}`c zFEDj|ZLDlp`T5s-HwMrD$9w1Cf`e2>YV~0FKReyp&$rDHq+sf#2FWPZ(9vjJVO8tGhzqNE3m`;!viUFToxQQ$Z8#UDtzW2iDf3LUsUo`dqJS2@r z=1?-(sI&b4ZL0&Gyx%<_xhs0fAAR`0_$gWmK&DqgZD(BhPmt>WWWoRM{yp%Vyb##7 zn0FwM>66vw!j=|+Ck^dKrtf;ff*w~{L1;M2PEJlkt`@6Joa!2%KdDQYpkwC2cD8qw z%hOvw^7Dtt%GFOcT^|-U712>E!J_htp_#1{1;{z~f^6~al87`xL7Y^&Y;0@=N*O|G zDk`GAy}h*J3i>ti9^~cDV!p~(=hij0lU+3=4y(7$&2#_u*Vjsy^qRlV9VOLx%%AMF zdOzNO8mzObF;lf0x&H1y@RLIi<`90ptadWpN^-GPu66lMZK8psYdA}oj zhpZMkXX}AlBB!toC_P5Z?S9Zx)f4;h;e&|G^ntgkRlV0Akk!zmh&IZdP|vS@sHl-x z>GcbahMhh3uROkC1Ow!8x@ed{hFSV#sH%N;J*IHjx|X7u6x~QCpU50 z5Q#{BG?_DO*X|im+EN=GvfFd_voLe^Wftt;ID4xsz=jldJt%jS=zOMNU5bzpg9Q`b zOF0+a4hzE+E=M2{dEygDAQeYeXVNs5X7cjcyHo46><4dF`JPAVgcVF8wm?MzLk6eg znI!F+?;-?T)@rCHK+tQEwY=R~^w7;~^ZHK|QjpR-t{BL4v0%9Gx$ z?w|CB6V$ql&T$NUBuISjWq$dawm;rabs`Z<1AUOjCNghDcrPA{;;*z*U8f~S>))Wb)uCBU}R%x3)&LC$udZOZUcpm@0}*yAk%L{h$W!{SEQpEAaMTGy3WrtoKy?jP+)lU zCV^WP4wG&V3-Q7J+f?aF5B%7EYPC8HID?Pz0}lxp4T0zS!QqxhoByl+AEXzbvojyJ zTv`atKX>S`Pj9wfY>LnG;ZCbN@1syAe-xPt?ERLCjeL7;F?g?tQM|c7WbjD|SeaU^%vk2`M>H=^>IS znVoUU@rtQen>ZRGOm{i#Oke6%^A)d}b-a7Spl;@59{(lv7Ru0_hZ#~nNnve!|Ci89 z)yeiAHQZXSsh~YSUF?~lc_E|dCcY*m*y4ZidBec{k-WitH5O96E6=8LYO4GiW2s}I zfqst<(|S-a&gu)hUI`r4&wYPFOO?X7tyGNqGQA(oNF%F#8ZMo+tG_A8KN&uLA0xns z0NuFqJ=9Fc=f1u00&Htm)YQKounHgVaLawwFl}NfQ7S_2G`&rmNVep!%tv?VpJAqk zs8`kY>pxA?e|;K}ndZI3*n))sCvHn!%;io5ToV@`=zl>J3S(5;n|OdhgXGq&i{ZHb zlA_Q_#ILBmY5kT)0pi*N<%ovC4*a^VRuo9Q@Q@plkbPcig<9tq=`!!ac6;^5dCK$c zJ<(!tXfkdDfKgJX!$33mXma0{i^lZYi@95}w$jdwc#(9mL~Htj5d5#9q3_V4+Mh1% z)BLPYw>xT__b9S(8a>L~`Ud%EKISZqkPdEtEW^+w#nS_KuAY|C=3q;-=?h_ThYZ$d zJS{Cc2t(Z&O4!p~*QI&=`7V!yl;#~b2MMhYQaFBT=shItIj`I`zZOSk+D$M6W4=2f zpyD$2Yh00iE{qH)MU%fkr?;XzmowE}AA=+qtQ5p?6Sze?t{6p5G&vm}bf(L~4p?Ki z(fWxwCF3gsU4S;-nRjYUrE%+uS$fSk*9@FdEFX^Pt+-#$ek~iWpKhN88AY6W-M(bh zsZ7-<*Uo)U*(0CzMA_t(TELd4$9k zNR^06RquJ`_1!p$IPN=e)3*(=BjA;O`br!udrlgv#*rldZM+YT@~*RJy6I$4=xgYdes=n zP1t!EYUUF~T>8CH8XJ!KRI19g;_gJGqpqKrVQRIPVQ!bDz9VMwqi<4+LdJnl4LBf; z2c<>+04e@*EbLOgJa9s=xxJ)N)rDPzuAvpKMhI<6n|G7EYF=R50Ctwn>TBY|96YS? z9zPk3MQO*cG00`~z10Sm-SEs zTCBE|Cf|4WyZGmcdDS+JAp+gyE&{yWv6Ou4?or(;Wb4f@DA$wELb>XOT6ebI8TQ^8 z-a?-$1%W`7_?R+%qMZE$!QTr(J0;R7>>kdm;8b3^*is$28{I9BrEp146cKwH^85ib zpK!p^;G{OlGoi{QCcB2_IiUzX>I0Vg`6#B8svC;?xRX}Q`4kWhSp`3hH{h2;+%#rL z(p`jf(P_}6(sUW(Mq*rBIe7ILb%s=u3uc!-tjCT)f1-2}PG$iQv+OWyJr=%CtN?m4 znuD>K_2o&tugF@l7j+Se?FPA{XfVmzr8b?>8g*lzNcV`~G&bE`;SRkiDvm1b>`c3_ z%}H%s=~nL?O10pb@;to6cA4cH`+0TkYe_oB1So;SJ|gXD1N=;^bMcT6~pcI9`fNxYF9 zsvJI+OUS7|ryf~;=oSw6q^>8?Z0@q3(J5t}nen{+YG`Pw`{tG#Y>Y!atd~JFZ1*l= zR};%t3|@QuMDY#HI_;W5H1f{Q&f87gBakJ}_0eQN6_IBF4E?sd>{veiP2D;Lc29CO zKUeFShL$>BuU0a~j?3P{&pu)ghVZ557el7{?GU{cW*B4#7ks6+aEt+K8~@EIXK2^O z+CRzStcrj`9=ixPfM-GR5ma+J?(GR5Z|(m6PEOTD5GYT^c2)|!>3*A&iQDw zY3le2ij=)*GN!1n`LqtCjS)G=2Szkd^&bc~)mk$817e6?vr{(OIB#N}gO4xblK9!Q z+Z_=+*nx~`aRsz`mQ7OZGFjTf-1fxRC(Bgh40H!{q_a7f&|y~!w?$Te>3B^63$(YH zB3XyOrgSEed;1(-Gz80{db$4F3be8EgDhUn91Iw=)^Ckjj@OQZ^}@ zSUm=S11K}$VWth#RU=fI28NwIBGa)yd3z8ewYHf?X)yQzhhVFKUz*r71`uW2N-Nef z2TEHzjV&#@ji?4S#we}VW6MoVGN7?d&G?=kbN5{m}nz zuM=%3d#Q6Zq!A7&G%kh5)`nT1vK!;>`sks?t(vblZWkQb{_u3qy+emGF6wr%_lS95 zxmV~TAZ6fSYaGQvq`SA`zNIik;^o$hk-iW|t8de~|E=j-$w&EH+W_WC$Q#p45K`wW z!k4(C@(3s|49{iEDwV=vKTd zS1=^V{WxiYT}!KCU3lh^Bvy#^t(oj}qvXBh3PDT#siG@p;z2Zuw@PE;4blS91;L3y z#UVPY%p2r7D*D~YwnpAYS;KO?gn?E?0>{6$*q?Et?m3w|1_S1{d;1wIDNxGL-+k<^ zsykti>@RX+FrUkjbj8U_uP)RXa}A;9w@bVC#pUXc%q3qF81xPj7~(9>(Z>E5;}7OE z6@}`1A9LQ{Mf!+_khmAvX9qU|?$dNHDDem^V$U3}dPciHpL4wrt}jnE5*WAE4#6PK zTGPT8zUt$*Ils-25OWjI?JQfDzl}{u3w3;5h$Qr}yzPoDw?|68>(uQ88MV4y>dp2$ zH^r$kEB<757sZ(ml+Gh*y_LBvk?vb*E-`RvHN-J@g1B)bVYwe<`fjOdbpX!V?cGpZ zM51Bj^wha79a4<@@0yexK>(a`m%dls7tqi~diS^Mdsu>R(8Ejxn9$O$9Gw#PfWS9v zRx!C#-_o{mEg0aaau<7nvY)eRk_`)do7fIg#0{>?xXB!MzGp+ zVp81+zY;{PUhOEZ(anjEq6EdBtC0VlzgLg(Gh+ToQ_^6}G=m8!+?33%+?=OMSBAJ^ zr5XMqg66wG9diX`;f(&W+D{lNWg&qn3vV zh?Hr6^s3=0nRPy%OIyF;mrwoVn?pQhM;3o*`>7&%;Ab%$QVULH`=bx+0kL)qCao{t zbh~mnlhv-V4hwWyG0H#kTswZaWmi(gqks9v_I@|xNkIEr!qWbJED)+yI2~T95nMTC z1h0_R)~~)G+GZoVaQxiQ|CB}C=vyis0doHZ*{UFfBl~joZd~AlLdZ{Ha!1}%?Osvx z5^ZAdP1dEZ02l5xi94Krw69%Y9~ybmqmR5nwRxoLIc7Ttg4*V0O~pLY9DJgh{F!ad z&Zk)<1`dKcTSrR$Y>JA*$Z_z!ud&1W1#dpaCs^e-Kih{e=@ThjIwU4OBBQ2O>U!68SDm?)S*U@Du(LVFIOCxwlm0P?5(@&2&$o z`*n;}_@o7LQgE&9U99nGSVY}#0OZoRL;)SE@rJxxFcCpoWBf@(>1rx<#&dD=a5^c~ z(h$19oT*knoApg;vsH5O)9&V-=leaU^>`*s7-32`K-ub5_^MvorPM-ZQ};;JZW6!-cpk_iX75 z+fOoQOK#aWn3HAj(&A@h32t5JdD#Qsa&>Uv>5Jw(ZKaO?=@B* zIWwe)yLdIk!Hg4~oFN`LD#tb}=(So-<`jnpJZ_%1T@hyoR zqS|HRPdUd4zn(ZO-4X_5Jn5P)r$?~4ffmv$rkZ|&wZ8Oyo3!|Am%` zmv}9vylTGZ17qnI7wJs#3y0Xmm6awjD{dTBJh_$n8((NqjS+DQ-Sn z{@WO0$~<*Bd|us_9c~?xzK?uZfw)n0Iy!UyJ_9@@uss6-e*Rx7AXX%;-3= z5jo|5FO?j^5&=vN2-I*qES9$|$S;E(vmUy_`|oMN8R@;%sD=@h2&~lr#*#$XT_pAU zu`tf(oo?4Q^AU7PT%U45+D>NsF)A*SW9k$S5Unl}yHg~o4ypbs_sqGEJZDh@z-}Tc zgZAz^pSt090=>es#f^}!@(XuU{x-i2uC>6N(@zV>)Qq3C6U8qsEezu5qd~MjV~*aZ zHssf~yF@6PDoJv~6Ft+t^$9#| zSUP=+Jr!uvL}&XYY2%k!MXb@f@74Nj^JnI30&B+>|Jx!2S{PEPBS9TG-|j&6cNY4T zv(JM=Dcs;kW-r*FlsTEfkpAa2l+<3dro#pbw(t~k#%!{EzRF@Ed3)i7LSiMvyV6bO zx4Q5C1i+Jx^~7S(Q%%EKwk0mr%FiU5e2L03CFH&`w$_MZUHsA;>^;I@#@h}G^Ny6a z!$^^0e%f5N8eOVoMblo^l0jzUB*9Ga*~Z!RD7y=wPdNlM3ZbU;h%;-AHB#x8JmZ>_ z1QZ5Rt~jh{kM^e*Fvp0Gev?SAq7!xZ`CbX0I+Jv6Lq=2N^3xqr)0{VDhHAlsgpvJn z=f?>lZP?!)4)}s5t&>4lQcX%-q8I9k1|p#Cw0kKc?`>j445;2`efLJd^?YoB{y~~{ zW|+HL@l$*H_{S4!)XZ? z5Kr3V`4q_+qVwd_u;3-w?jHs%ogC<(M!&O(J6Z3sF9}Uv*zk0I^VSE@uB=JuOsr`!}B+q>Jz2ay8OGDjO6vr5$=)8si|WQi!nlz zLUc^pOP+Lu&4=VDV3*XqnzJK&_%+!;`5|k?crXL}V4;mkD+G8Y4ST1=@19~f#Wm&e^VP^TPD~vzznFesMpPj6tcYGMjIN&mz}WB~8}5y(SD@nOo00rR{k` zL!rO3EL`ULV11bFTwJ5$%;oNgmq8Uf9rW#`D|UYFXJbY*PWvtWf^QiTu&B!IoxDXF zes7jszCOh5$zVbfl1~dvGbKHSvvU#OpAh(Zo@-sjKUn{fM%3dhLxS!w)^YL+&O+BkCKr5IC=*5lNN+Q0utD6v&TT?f0 zi3xNBNRkxjmUIc`eD=&WHjKBv^B!T%i`X4y{!O7jHplbqxnVGgm~4Cnt!Qw&Y(Jfd zKT3u=YUD9)3y)-CE-KJCk5U1~dnw3ha6G z-KA#0WbOz?SW$!xZ}MZHD#Iemjq8kW?=H=`w@pR8aVbdjB8|h@vGg}$zQ49P`Hl>P zhUkGSmK3zhYOM3_5Nnx-;(;}{-F~jS+;ZBT=Q2JOsg=aLh0N!_N9hI@E^Am9sw{unxH)F#h>FaGRCR!9rzhXtq7pX}Z9SVFL9ajh5LL zk$?pO(xa*%Ou%W*RZPhu&(7)|Q^FyxBShG#C&%)|J*S+oPL`~GU7oma*W!wi{LT&%sJD`tmBlqgMdX~7-rc#szBCvP#BsTW+@({r|7G4` zwd5?jK=PR=z3bFlrNQs@cW5|8UtirE=HDmYvZb{LoXG{RSbUa~V&MvEu~?B=ppE`W zwy;gx;`?)b!+Iu5BFsH;crk;%pom(dQj_`5a8Skae9o%wLZQoGX$fYs1@FBYU+w3y-^UdvRP z?xXoR{t}D2N&;zyCr^GO$r|$Fh2Et$MfL@WEea)2E>{;}C)0OkL+Cg6vgBV+#I8Pr zZG{_aA3A?m8ot=t%aImHHLeK(&SdYrs&;z|alu`EI&AvXMyUxiVsuCYaw?d5-;|s8 z9Sw;T4#8QO_PK&q+DvntqAn7DJ*i#j%K`yo z3fs&Dt3cWBG!@%e&K3+vYzSJlp0dw9e(UW0@iV#4)DvC&weX^0D@h&?_l@V2xT0T} zbG-9hVSb_OW(C*tw*&5m+<}_5`^HS8q9uwS%qxilt+ZnzJJj%|GJ^iM_G_3PX_An% zh%NtJ#^^2LclZJMiIs;OJyvGGjbj(EcmDlZk5mD*6dQyy`Kgnu#UEY~Nq zDO}h%#S$uMOwn6|gkrnCZz+(iMXAL-X~{DP%AVi$^A4yaaIu63kSzV}c_!aw)cm6M zUcRh8A?~9K4p8ZQY9=LtSj+VB&CW(|#-3q-__Nz1XVYCiC|`B4RQWFtzm@m(W^u`1 zvMR&l_k++#XQ{%+Bbc%?aNjo$%yt9M3*hxYtvk=-&908?{>#3WbQ6KO2#5 zZ~G64da^!EgS5D^H3kmF=wDsp&-qc4k~kArKU5$t>pDyQAabrq)@|+6@!RS(@Z^Q0 zy5%jgBFhmE`2KU(jl@SUIU|Rd-E=II=J}iuiMB1Y?~JL_T%(|+tCCdY=MB^wHY-_T zTLv8NsP{n_F12|}nK;%nJ~t*1q#FKL7C_A39rWuYoCkHr4_i6{AS}0^K7DupnqAHp z>n=>u@B++*Axty{tBy@9e?N=ft4_O>R1@o;5&%gt(Pt2b@l)M<{p1Vdmz~{oblquCAxBTC z?fG2ly6uC)LGc|jUtSR~*SfrZuB2=-&~jU*QeE3_4O{6-@AByu^&@hlMG9lvhZHTXZoLV6(oKS9k9o|Q{-(j8M=BKlV+4G`<=?+Q)~`VD_KC+|WTe?G)yI-!C-N9s@LpN=zG)Zag$T+Ha{i-NtH8-+jpJwy>Fx~ZVx z8pFW%c|+WcC9=9gpqFH`%a3KPz+y#;!!%ci9HBT92x*d3IH|bBWZ`34?H8?SOA4GnxO|DgrNm6A<_-hv?pFbiEQXziUh9< zmHv4TL<9eQ8zaB`D)ZPy2&UZOo&L^in?mxt1rl?PwG%9;DA(`slh7ArCK#nLWi3tf zr{a+8KD+^aIXd2biesIyu#ZFVO=|FKjK47{_lBM zwE3VKlQHN@8v_b>k_FiDgAKI6;n7TJHv*38Q`c`L**o@V)s7mAd21D>>03ezi3}gs zLSpy4>(WHO;UKlT5Hx<)L`r++UOX?%IYe`}Rby_W*UW6Vm%>dXbEgsuJV3KuNyBcx zReI?>m*c(rA;D|S5Ht(jiI&{^g%Y{vjq7@5*Od`>N<0@)k`%K1U>(+u3cxgg1#iDdoOLiBVU@AELbrNP^~p|E{%M9PLe(c- zEpuME&&6!QarU-Vz$zj{z6c7d3nk!28aEGQ>$s;h3s`>LRmTrpIq$SRbQz zw8DP!(s}svej`0`WcOFNC5QDe#4cgfx9u$8OX0RHtiVOlix7AGXGY#8h8%vvSlNwO8jdIqRm+y@-+i7A)116& z6nmu1_#S_VmpLvu3guN90C0xNU`h@s>cRzI$F+-Ue|}{i)SHrpAM1ZWmIkJ=(ofAg zXFTGH`sbK2_Zm3{J}okbm=1`obp&Fssjj20^7}3|&8QF>q;F%fX}p*f4(U6!hO%X8 zdZd)AqazJYhj5koz!V{z||A zj#j|oi_BwrMstshnY9+#F-OnsXSs|W$T0JX4|OkK6Y~a9@*KKku|i_0?dh!0a$;xZ zDM5qoKV`fPFKD74@_egvh^cW?a^h>GQ~_4*V1*PhLPmtE6zB|^sChGiN{Eoryj9lD z$+(+BS1+(H219H(oBUfem0jj}#pxl>j(~8S_kqXORQ)ej3-=FJyMbjxOZ^Gn8=Y9- zw{^pb)B(jUw@-`0af!9Cb6aFOP!Ey=4?PI1$btj%3p8C_ub%9N*s!5*GIst2+hT8+ z#^nGc-NL&tVSI`gK^$M-5qD@LKvJEXROG?V{m1F3M;v9LCZqqsDrLmJeDv^cNY`^! zy4`-K-H|T24wQ&iY&c@ucn=uUpLr}c4+EL=F&b^73*=E_$P0JIeDx7KqvTNn2w0(h zu>#JPWB;I;Uq~cw8oGOPl3WAf*G>NRrS7k%xjI^)gwht*sH^w=6*7qRzt>|>p6fB_HUL- z3~>8l9R|<0kdF=v>M+a*yloRNT!rtz_y0|XRz#A$zun<)i->D6KCqoQ9uKM3Pt6f5yrG@ox&N zydC&HTep=FqusUk+pW5_QbnObu}(1Ac1vrH5$?5ECcw7NG-Yz`?CwfF7Znu~B$!bM zq(w)Ics<-u5xN{LyjA@watL<-_IVK>XPLXSwDjBKCPac%@9lSh*?rp{SghEHdHYC9hYFMlr0tD3c#rQi)JE!4W?gHKKM#pKnGXz$=$cnkM#;%z$n#whQ-E-A-SUlhgh1 z4gH8+ZWD9?^VVnZJJ5vJ-mZF_@S0}mwY82XRkU4}#%~@6 zUrM>q&w19LCM(f1L>coQk9&fPR}7MA2;vY=a=^LQb42WYy&P zh-PrHe~)R?t^ia~>^mH}E)I?k$2yGh>5RESwPM0e2ZzfU6GOb8E0mx@PCGOv#Oa@u z1TJd?9*@|CAQ2&HMovD2#m+|IcY|ygG)yKYCKmxVV1`3(n;3o{_ry!J5ufJXQ`h#$ zuP;ZG(|stt%AR`E?RN|FWeA^Mn;-Xs9ttfN8YWMOah6@~o5(wTra!lR+s!I$)Jnf! zuC5~VVUJq?F(u?Xi@sGxc>hvD_`kmJm?H;L+n#}(8da%fUj0a+AT?PsHxGX|e^c_M zkK?D@MKj^kSl=;!ZfVsJcGy0b2TW6DbR12`bhU4EN^Aa2-AIMvg^#m{ZZB1^v$s?c z$m1(-(T~I;bqmV3HsLsGBPw@@M!MIP zh1STIZ;>VMagt4@GKdO_Poj2yegmf6zgZ(hzM4g*WS^Cl7fXn(kVK9wHB&KQt z`o?+Bzy@EX?n91(xnY}e1bysC)uc+ZS=hJ(Y0nPL)dt?Q2(st-@Wm!Q^DxQN?EngI z!o~ScFe&of9fyE}A~jt8D{%%$Xx(1io${9P0}s{c&#i5X_xi!NbMx(Fc>8#LFY969 zK7;LG?Y9%n33Gml@Jp~b-j|4xoqqqmz^~7N3+$tNgeIsc+R-l0aFgA6>e?3h)erFy zcj)r8%LHerS&3iI{j6&Gx%Jj%zpu`c28q5xO1hE z&8$oOZd*!-j{SV9s}=fezfKjq0ar3l4;Si7o{9YNKVLdf{Cr8^*%u%ygYer5rd|OY zmWxb}BBEa>{PfVMP{G?uVpX zJ)my<*6Ivv@T^arH_I66L(ztTwSVv_P;3mRvDl|Z5@cU%i`s`S-empKHb(f$N)oMu z$Iy8_=iDIMl#)E!$@7sjba#x)Jmr0h0(gga&ACBE`{pFky1=1eGxMOx);h>o3HMaY zwu#ZygGIOtozBd)B_Gksm4S>`i|tQ^d2vZX9}#~zMRUd1yd3Opv{@mI* z_~X{2yaZ9WF$3rbbXqU!={mk_cW>ascBB(;pz_93vF5x~yIPXi;{YYYx?Z@_iA{@f z#w6kU@ZeB_#3h6zv0ETezE8byI_Wh$D9c=$$5hryxb zfbGO)9q|_c!axd3gi#BfL#&I@6c*d^ zaL60Kw!nh@nyuku9`bm8=uvM>NJX&P_UdIvxo8ZxWwKXjaEo2DzGfK-R5FK^KlnM~ zZXdh*hc99FHDRTJHzIii6gPt6t)bMYgUN3)hU~`36(LMyBk?0x1xEdF(|4&$UM*$i zFH5?QH|EvhJD_FI%?%T=6|Ztu|j^(lpOuOD_a$h4w0G=D?^(RB#9KR;axcZ$ZpNCV>` zpT|X{5``--Tq#$sv&6_OZuNn{xtf~S)X=@E-)c5@BASlLVRIrzlZ;dfgC*>+<#VaY8r#23_>TJ<~rdc#H{x^lV!=sUYUHg_|A9&x{8D6N*b3%)xf?3a`#&IQCH=W@xig@%Q*^}9W3!kBt_ zK*8K9^Cdbo9?;WRXsS!wNILKI3Z$HDJdC|w+HUZpR-I!Z!*7>R<8Q{9FmJb8C7-KZ zSwsDx_aE2Urd~1qrOX5{PgBX(8(hw4}J4lTZZGGZU!aHt$o3+h^UtTJ&K344Z zd=1JFi#MG&h-Qne_gEPj@PdaN)Cz%A;qvPK?lu1~w?+?~-9789+MhfUV!hK)z71fm}toFp6%#9t!sPG;=c^J8%DNIUA) zSSH~iI^Z6yR@2yu?=9niT(T*7MPr3-wqvV2#;r4|tmoxk&GF3dd&62EXhcW!emT{? zA=`C7uZP|x>c{0j-~1x8Unw)KhlVJb8dbcrYL?e+w|x>#;x@(T5{;Loe|oB4U(*hF zxajq1F?QeHZL86vi9oXjjim@m^+giU03C{56S~XJM^X9dPsNPX(Q^(pTW1=C0VI>Z zxTkn5U0`-H&LuhtcFkp$UX;pAx;Of~UX$c_>bm4ax6tjAR*#YDw98$5P$D$iY`*u> z_c)^Ga&golz{`=J1iRl>r|88y5!_tEVIKmPo}x#B`NdhYbfk?kC@BAs?(EKz1;5kj z8q!Y^tHu6JTIN%wZpD@7-LO&Qy;9i8|Nbs0^F#QBpV$WldkyM~ff)*p*au~PN|E0b z0XDZTwdg&k+^!ow#^0~O7wwN9^iTJ~?hF-09+*s0VTR%4E684aWF zPb1REbM!SV4mR>mg)FR%pom&X5tH0R4?>L++n;`pV7vXn=Nj%Dvl{Wf~H9#toAkGIJ$CcJ@stdQ+}dFfL(dI6${ ziA6V6YshjK1Xoe zBza2rIzR?C>NVUyP>Na2mlLe5z8}H!kp9qP=CtxM3s%ti^ai*ABMxfAs^ulM|sLeo1w32w(AI) z8Irk|*M+lkOPiKLbzc$*%!U18{qWi4tp42L?sB^`1S&8%nN51p?W{48;Qj66q*w_R;P_PD4 zSoXyr)eT|gidjfVWOpRlq$uhCVehSj;@-Zs&jbh(9FpMf1lPtb!QEYhySrNm-bio> zF2S9~-J0O;7Th67!*tF)=iZ;pxpm)}s<+;$n!mb=qWjxbz4qQ~KkKue#bsLhCcxt8 zrIHNHzAp}FVET5rta9Eg=Dxva*`p+dRmIF`l&ho9AF|ij1~FV4>5oc+kUstV+5WgG z6E*zlcC~HtRp>SVLkIaa-mFbrrz1PbD!lQpEvL~_Hb7f(&_a!G`nK8LpiPwRePcU? zP}8yh&Wl;@v17n;aysmu(B-PbionD2T2Kt4{})OU=WIj?4<)}q2gYY?O3fF6TTX31 zSox3-9eHP`_ccdGQ9sDQu=<}rFagCu&5(l&4ZTX}DwX)rg}Pqhu zo)ErZ!MWX;uVJ&yp`S3ZJCS(@ZKNzS_yzP-cNkOMOid5v>JI>z*}b;5n|0>cs=DsC zI&=fs^>+!r>Y);}3;5OG8?(zJwbR9Y?qe2%zS-h@%Or&m58ku1k}f4%UEbWg5i}NB z;YSzWygRyS-f$CS*o|*+f4qH8*~1t)*Lu1ndG7Td^6l_bd}`a2J8e%VX{mt89`CiK zoKxf0Ke^w^iyg)hdf?$XjAEC?t^a($$emDP`j6L;KhDwhCUMe$PO=G6cIiMwJ@ZkE zQu8zaAnH^2wVY2tXBojWC*CHlq9FrkL*Z*+T(U7Dp^iZ21SPNJ0cF>|Z3G3DfT|pB zy)5ot?RT2_PI}8Ax zGQWQXk_o5qx=T7T_J-kB5t6(w{Rhe2Gt_5&gSP zIcJuO`wwnz`RBaevaTu{<*6363RbhWECRRSjf>!?joq{IfMsBt)$=B&UUl? zX{#-uqgX-~3%B?=6v^HAbm5i(1`%B(kK#>&!umRxl}oco7Y#{(jx)$vWW;f$?2GA*6J4n9>?UG+bhuWa-=>8$+c4g zylZo0w^FnPZj-(@TF%)?Nq`9|__PlLdzuh1yck0;H8v+c*S}FO2tJg;aJ0Q7CXLyU z%*7as9yH=@-6!lg#1Z;x1|srby7m7#1?J2K4Fjr5Vw!KC9%OM##`V2nrvaCr_M2s* zfM!cK#}`-5&j{wHxFB000`MZer}eer=hW{2A=SufAsu@^yNbvATl91gTqGv-&M>+G znv)d}C8<;EOUhjllkTT7?j@2{zq090Orr0uosKT(RSyTz1}tCO(Lz;_rx8(hfG>^H z#dk1lcb4o4GRDx_f1bIfrQbOu$M&5nAqrd)UnRNGT#VZ+e}1%8+|%9AgKXT6O~mFf;~46RvvD6%y<}$p6a;Nb zeq9V5#WFlY(lH$6ex7>D1t*`+b#xaG60#xaYM7{d;GvEAWp=-7t5dD+PxYPD*<3XK zwtn27RdXhc68Uzy>0mgP;3CYbt@RWVI=zJ_Xm+G|*B;#Ysqc?9n|#|{)iKN z40X!P&&%6N5rB^IJmJZ8Iv_8mw0!h7w_OVi^xs;9STwX$z)yU+Zox12Jexx+7Pn0G zpv!r)&0&zGsPa#j{y*|mg1{Fr;b_Kb1Tf*!@K~gyJLC?u?9I(`Cw4}8CMohp{xr1$ z_KmW?*IN~6hav&!nwBfc$CD%i2NTe|LGnio`~s88qe6zg>* z&&Q9JPPSldD;^vb!=8ST%V464G#JO4E1$j<7IleX%^%0=^=7ahX)+Ka0CmaD(D#)- z_)#gtd6mY@HZOY9)42fu&P4J^%UPwubWLO<+zgR6>OIb}ik^7)6qXK5xC^FavquQ% zu(%}Di%x@WAPSymj##**nhBWiNxIWQkd}X()A~@}+N?FH92v(<(-(ea;yW<>3;saC z?}U|jG@u{*P`xip!VgfRC=ZaZ`g5u?ydD}yAdtUc{EyNV@tca5m-(- z2wzPU9lZ^~tRS&lD!4c6pSD!=y-soPrL-s4YI*>cVuWRw6qgtXdUTN42hHrc3A7SP zRB;)*GAw(n#6V+x;>z=B*|YFhcD6^i%<9>Ty&qtc@^j}0euo%}Wr*8E4YJ6nq9yL4 zUF)IzLXBx;*Ke_rUAJUy9bBS2Dmp1|q2gO5VCtiU1;#dYcuhsCAO$$+Q%;h8!wk*X+;rg3Fe+ zmRaK0&8B*)!z^pBlVAlrWa5qbinV_p;<`MAmXr1(?an(=H`_0`cj~aEJ$W)ao#(Zd zd;}0%yS%^?^We}X>3$urC>`4J*{Yx&Y$rw^&q0P~=G<20&r#{TdbEKBD3(l7Y}>Nm z=s#qTxd;P4&5DJ?p38l{48t7}`^8!muNx7(*tNFYaOkgB1wku^-OE_|bP3;Rh!esK zABs4qEWtRNo$b}{DK+v0YkY~#6U*n%wSI44J0+2j zt~(S!6_gr)h;~m}+l_cKk*1ozQEZACrd(xri{2(>&}928QfgP+OD3WcrD$c-h!gX` zGyoYoa1wbD|KXE7P2;pI^Cy(ZL-$DIgN6)L zfdLlXYPQD1o%LL?3;Q^(S#4qC$rOU!Pc=w8J7xfsjvcOQ_ojy$6#E964f~aiVQCWO zE|OZ4`Yg!9hMCZx;b(VmG7@zY_p|rjB6Vp`1QyXiJOAOBSwWDAnN&jouoSCn$iY31 z+IWds(U#{bKQ}`4KK96v>w!r3zya7sZ^A^jN^KQXTk#t376^YN!}(JkB@X43Vo>UA-y+vgaRq8{%EIa1a8OY#2ZBN&!g|Nq#KMkd@BLo0;_glmE)8Ly+c`=Tp%t^H6j&H96!0F+ZI6*JSp%uP-cv`Mk(7rC z-+p(>4b)JlGCM?Slp7i*E?&wlMbvn*$Uxr1tt<-;LcPT$A~I;-7s{`#?J1J+?R=j? z#pmFfWzNKak1gzt>YDA^-`dvtB-DL9p6sg9pDgfBhtIBIg^2PEY3yx~t{;jt(XOW! zi+^qgI=MNaD6%hHv$>T>eg%9k$Ml^cq^BAlL%z<@)WoNh?&CLaYgLPhPBei7-6PI#OtF;9TbqxYMXX+MxiNY z#G?E7&6BtaeWPmYP9@&F6lUYTd*Sry%$o;JbY|?b@7O2A`qUy5IXrsF@n?&ZkPb>P zcS}>RuHu^F>60j9$-ilPGz>6k=-ZvREd&S|x^#p7Skas|OO}a*OLPjF{U}f}h_PDed?A2yFT|3|tR+Tgj@QM(&Kiykjv^_U}T-CixE^(Is7`hPJNG;p=W6DV+9OQJx-V8^gLoo=HH4Zxodu z*ee*~#)sC}^r5zb%Z6Ap(wu@>rD6^FUGIwJ#QGix{Ok_R`kt+Wj}sK7C7MPYNC?E| z{s=F)Lz(iJzRlC1|A;SZY-)Lq&?QXx)Ag?46Xt2)zexQ7=iFBf_9OU&4##S&pBb?T6OZnDtQ8WvP(8ddps5cI;)3^|0&vAa{~3N@%~> zhI2V@=aiLC;e%VbRUOZUC1LsTmBTNp6c#T^2)_Ht4U){>np@Rf4pt~ns-(x;(X<#= z7in?J2(tE!FX;_EN!_so?D+_9aQ8(nBoyU7nxcJoxMA2HC`khEihrPuOyEcyrzJ4$ zE_PL&@kn>lt1u3839K^l5X6@PL-lj&Y26}USZsQ1pe~`#>h=&~toz{h`jA-@zovPn zm}#hQy2>DyUZG-0WqCF1L8x2ti9F@Q)K+ZThuu?y8+|+>w+z*}SxIYaTu3akzp*w6?fTw6C^*W_(#FOp_ag z7&4~aRn2{Xclg_D=ACWRHUK8#=Ik>HFY4}XjzV|4vDOGNusGL@0RBmm> z;Enba02DPs*58TAnvyZc)B`t-AEsuKbNBG${G-WUc07N#m*IPg-U5xc&q$Ybzg*+*s5~(C(5qd8Ha3`P z`Eo($$2rBk@0+p9P9?R$>`|I6uA-==y|DB<#MxZK@eacsa-ztYwO8C*KV08ha>0Yy z2i69dpFb~XJyp&X%v}b>w3gK;{!^*!cL64&7_k@O&;v4SD=sHy3Xc?Q2>(RdnIaPs zBB!lH1}qZ0ei7~jnLX~elnZw^ld7&GSK<}pO?6OpIJqMj!T?J0ubnu!JbXo#Y{yXS zBu1Q78E+=L8gG#K_9g$#W$Fj6C4|bkh#V?&?%xU9MDxa=ix0$p-ig8EymMNU`}wTUmv-(<2$ zT$szlMg%t=2A6*OX3gyqivfdRmGX!;)w{jcrAgp^1mr4D&)K!)n)czaSu*Mu4Wg;F z;kaO(ks_L13qZ{scl02@&8@8&8F=NROe9T5@9R_~2ULoDCpq-(E^PkD()KHUNQE;N zwcSYkC^=@2qQzqzh>S5H; zn~hcUw2R8u-c6a9e3hgttY=4$8^|V*RXvr918xv~>3GY0%1Ix|Q)@P)OzS8=kbZ?q z+L1!jZz+4_JFw!Ont{OJO|43o7cWtSFBKeZB)1xwze_}_JwUD?PMy(f|6QNaedQ9_ zd9mFSOvDYzqe!uJ^G*~f#PVSsMH_o=;=$b*x#>EtA4B6bD5K%Xw*6MrI1qw!?|oepj3#ptFkH z&A1$H|Cuk^w?HnPV}X`x9=&KK+|&E5e5Gtij*%cGklO{r+16&hs!M13xBf>F?=lYQ zV9R}sV)tC7TWtt}R5c}26uo^*@|NAqBl2Ub{^Xha&8Uh(LUX6H(CVz;(SFBpPwZot z4#{&?lioOpb`!#?S$o9ClH;?LTar)&0DpL9S8kO(@;wbkM-=NXFdp3}wRo=Il)LM9 zZ#Tu?YUga#Kjv*1$Xp_xzC<3?dOs_H1?^f7UWI@8tMiq8?6d&!35~_umc&SKBu;Rnc5UJ-SX|r z`Mq_;+ua4SlILxMY-4Aue>=R+(1b}VTn;jF^lf-SQty69Eu$2KFZlpOSbLsbD7!o- zgSj<(s1o+-Bk=Z>YYelbE>@3ZuZZh5afzb7`LkNF-dp_`_+U5);+WWeAJLxM`CfLu zJn@ql1J6)UC$L9I*=-hY@?`<+TW*wB;%bvssQJih0_r;)NB7IBFS5d$Ba7L#0L9v` zrJI@=!@0K3cKK&)R4wB@ka=**vF$CX13r*rO1lz9H09ziQN2v(!!$xwQn*g5FI>%? zbuX_R`S|UAJO9)EQIHiz2rJRN2&Sph zO(n%7A)C4v-vOFW>@WeIVG__k+sk#F&%XBDshc05rHKk3dw#i zm8$DRMVZH=FM)tn9T~3Gp%tnnVV&LAU$5F`Pji;`0@qA?R$U*K4aMSjP3AL^lts#) znVRszE6O?}a)7Q+kFT6o7b2s-#an@&?@x80^EF?+o1^%*C~?3d#{hP2XtEC^rcvI0 zeql=ZBU+IiC2}z0g&|CaRj9KWnU6M5wXGhXY_7XQjU<;>z!!#G7^bMLMI1k8S()z! zk)=q@Y@ardmmEnALG4|m)t6353Sn1N=hTo|X-c6zf;*b1uKmaJSRd8oPE7OTW@x!g zNQf7=eT!OKdQ^|4D?5Nc{hlF=Jd3Xydd7Y$t} zpc3kt+_rW|s`AAyQ7yb{&HyXSkRm3lx!MvLrAIivf*dA58OJsE2WT&Z+#UcI#v-dA^L1qP`DOeqAAD%Gzt7PG zCXN04W<*prvvEri^qM0k`of<*#lI5mb*ILey9uQK8qu2o*7QD(HxGyIKT(uF!IaKw zQPOglJquTQXB#}MQubc8@VdEa;%f{e3G0kS`A=}TrDZtHA8}itU-l18$It?iQkbe{ zJ@JmT3*YpR)l0{FGbF-+e>St~gRY-nyVA-VibQhThZv$7lR0pnxOnEzU#PY_2WtNo zJb!{Q>mSyeNF6eWCfEM%KmL)R`Tey)Q<$Iqi(qrMH2zgj{RgD#PXX$Wmvs*@VWf;L zTk$%f|JgInVkDq%aDT*7eDQCJ!+%0s|MCRVbC1IxNE6wBtBXOTi79t+8dt$pyi1=q z;;E^rbP_Ry@eK_Pc|R7bZONGgC(;(gb!vkPegPV%M z>zfEu1M|7K19W#KFUR;#i80Agur>= zR2JN=I@Pg>MuC)j+n;kf1jfEk>Z@#Rr6}~{M=3}mYLlS5adD(wP$A35Q$Qe- zp^)U@aPh0v*9)n6F66ANN+&JH5~5gJ@7Estm8@K=Qonq8t>p#AN@ei_xfr+EEY@fd zdtV?~_C3ag$YpcswLgV2->E`+mh2PsTHWpm`BL4IGg?3NCnm!aO@2{(Z#t69xSO%$ z#6XBneiT%IGf2p7hNlZrx-~s%x1>_PL}(Z=U(Pd!gTJ z?Y7MJ%dHW0%rt2NkwI2iUvU@jh)SN0B6f<&^-I;i$A18fy{0$AwXvSF$9mbU2=FfC zuv;Os%_MzgY=cBMMEZ`66@FM<9)_UXr#Enh6rS?oTRq59C4>?;VptzwkigE4l1vG( zp0zy#0-z#FTK%wh5sAFie{qA<8DPA;$Y8fdzf1M~;*!g(k*<64>OMiIOsRcZsrSQQ zTcTmXmhVRYXW-zjQ8^Xw>>bY{m-`9--QzWP%H6>W&xG*3?oIYJ#snJK#AnEPtoR2^ zRaltIJ`H^?F+G=IpP?bJAhn55nkKV$m2hDo2clX1^)+fBmdZYF ze%;yC@gqe*C+5&|;v>nuuN-*$H=CL;CO`=jald*flV5n%WtLmAC;51Cji9(Z7q<1C z^qcmf?2J?@o`ex9_XEm(8yiB*Yssk!fyoyC&Rj8=0>fF9eLI`v-L_zRYMr+ul5&Vk z_I#>z68HCE%WyNeQl79MRE@>cO2U7$gckwB-*8N)$tf@*?>;XN8zNOSHuR`X9(xi9 zFp=s1$r92!x9|@vp=n5utU@MHUD3ppm0Z;TjiNO)QMY7(mE>9hTv8+WBtibwDK^}b z_y5ciinaeUODOocC;9&lB}@~&=d^*VXu47sTP@eE75DRe_Epx^WoTS}6xdo8Jf5D~ zF7O9&9C1CqFAk+YMECdUB?ODm)?8osyAJ&J3JF$LDr(HtV-Clbx)B!*p>He$VO-<`-K%TCA1u^SRXnMm^j; ztTk(~O|gIat$^O7HHzusUo|bcUh|d1-Yi+tSv*YM`gamq^{%<3Ej=|8o%Vm%*)4cX#41_4;!)-julq65$k}oqQ~uu}J6d4+8mja6woAy5@ylTmLZc z(lrnxrB>GuX&1z)8JSH=RslJ-fnvk z4V?B&gH*0H=)E?R=N<-?&RB!2D9oFS{lxTyIYkXHoaQ@-lMf}DS}0;zTwj_7`1|`C z%(LC#)6@K9K?|oEsnH7?Iz*pi8VE2q%5Sk*dRnSB85l0!#B%Qb7yT84sATz-448dR;OWW$S{ltn z3sKam_exUmZ6-aEx&ZyU5HjVR<}n!=_5m+to^SQ1X6rQAC*0h)T`mMY*lmD3yB}pI zG&v3r>gNoQi|9|I~*uc3GcbX zi}Dvxm`hN+;Sx|~07QLKzG^@T)m?S7hZ;xJMk9pX(xyfMBA&}L_blyQT zZCiEQzGX%Ht$f}iizXi)jw@1nc`$BQ?aOWg~#EbZcXPD7#)Tbii8DbIAQXe8sjB}zIA#ocqb1vrzD6P zO8y2FcR8C+yAXV|9d%-G`e4kn`%S7;Lc9a+TqG6S9uM9dS^J!X;%v`p#?H*BIbF)F zz1DK$PUNn8MQZaDQxE18^6Z9lKKilIV|I9B4LGDnxT|Ffr;i$s4JW<}Zj@?-IP>Y- z_hpf&zSII*EH!+8DsnaW`(Sn_gn0>vrp7U(l{!eF68l5;#5p7m&L{?@ePeBAk`0lP z(BH>8oALwq=Wy^MZ*D@G)GS}M0tH!@F6qL~R=!=&ZkP4>duy9iGT)YOvfD)A%Ot<4 zzPTw_r*F&C3;HV7_k-J)`py$V$;ZL9Ut|2Sm{fUsX(&ZKIV5Z5?}70WLCq4c-^XTL?th^A00~j0+IJ0K zZ5##kmz2hnL*T`1lmrxme`qFyHhkE)(Van6{u1~PvV!@W&Cc6rI`7+F19KY==TJ}W zUhkp^6LiP+Dr>0Z>ZaK2c4ED;+Y5fXr;y2DIjA$Wvi#**gPAh3{9ufU zCH;uE7@hkA5biptCswaWJ0dpNOS$|oV_$g*wymmRB-N2z&;N=N-D&hW{||c4{3Q)3je^kdWT)UdvLBli4)-z)IBHzaP13I$XmnMf!#rT6&p8 z=f4OsKu1&w1Gbz!BIm!{{^Czt%bpXn;Ym+V-8s>2I%1ebHdPf_30l@wu^L|utak)& zI-44MHgjz8q(xWx)Y(LpnROur6#V4{0Ow?!I;`i@+~@fcv2ujDy?1oLE~*Im^^l`U z|6q`4m^lHZ!*X)1UgRrq1Cw3HQy=qC?r=}oHne`8bhONadopU<>{E`uXoi8h?-7$7 zQ_n(dRT@mHJ!`+bj{RhP+)@v=5_?*WiZdSVQ&CoBSbM%13+o(um5Nmnn@dk{&>{^t zad$WF0}in@8LZK3(kO2}&R#lS^lIJ_@aie5WWQbU+iqW|b}qWCyE}GUg<+b8{VPHL z^P>F^FBv(Aa2NneZ3}Ta3?oG(hhk*`SgpvR)-LYl)a{Ji^zwR&kY zoXVn<2Io78p{Q1B?YVZ|lGn*IAx1OM`lPv?-0XjVB6i3sE2;o%>cyjkG*^PC`|hmq zu=~(n(XAUj#U2z|Jg<4C&&4PJ9LNmL1e8rjtd|=ahc%iZ3t!WB^@%&QevN{D;szJ)g`h#C{ZaR=NKwi8Y5XRY@-dA}q-Rj~q$$>7#?ubYJpcSI+> z(nh4H=*`2lrYo05m`fntT-+59ChUn9?MiFiP0+v-HM?G{cVc#GV_{e{uWak0MN7_V z{g6SHgeBa~W;5NUXjiV+@vbznY+FxKv;#LcQ*u2DpA+gww5mKcn%VIy70SE$=qgs~ zLV35s<)p6$WR0um6wZDNuTELF<#fE$mz^GL^Jc{$dT~o^vM?Q*Zh`7E067K)*w3#8Et~^0JTimTU(L0lFEg(} zopdRYt+T}o<}3X!bIJ}gL&I7s1cU13VH~kPmlS3DE@~C@*7$?JY@IVSzw-=&lD$-} zsHjeJ0z%xw)H?lRKJzs#YJV%vtbY4%J^rV#s{~3j+b)^K3CESoe zks(oFboOX@B`MtM`x?I%I1jDX zr^KEcr_3qTD~U2!{Np8^-+)IGMOlfAqyk|0u3v%}VRQP|bj^+{_+rCLjvp4;7?z^v z)(pS68m$zrQ$=v_-&Wl9((DATJuS+xd@K#$3$LlzrBL0#!btx9E8T?z@x$}7LiJHL z5$sSpfp7~`n$TJZCg>Q<7t-vIv1=xnv2)${Z-vSK?C7IGhNe6*MsIXuDiG{5H7d1( ze`r(v?%o?xQ-PJJ;^g(%2ia9t)Q5}OY2hTXjW&wz2%}?(u$SYiA^Q>qmWIZ?y~{L= zAuN(OsJ`uI-;(19wWMo@Y*qL3!}MbiN6&^=iK@aDu)(1df_*br!c@sEMcGDo zy^NkM^!GpuJyZ@y@hrJs18d#Ub6UyUsF{%O;b;h(=@@&ctjXbLCh`=`SbH8*%URA# z6^u}HFwk{9!#RC>b^YCm=(Do}iW8&jv@zta#607p&J77hdiJ2_^4-#co3>-_wtOn> zaT2M09CCy+-n2so+5WGMmOQV}TPfZX9IRx0XC1Kf%OU=}fNob+>hr8>Yf6NTL7)hV zva1*s8d92Zh_Xp*8!3D%;Vn(s$g7thu;O_#m$g4nh=Y8fEE`fnZ_pm-p*-MXAr*F` z`zRtf9BrsWWKq-l<-VA8Xl;hN?&nA4DqB9`AH1VjOr!2e;oechNfX3QKdTJ%2?tD9 zvt9c6oHwt?5zwNqqeD0Y0>0@5W>BQ=Bf5t}gan?7SBW@q&;F_en{akYf^= zy+_CM&mtk#%gr^WTZ%D>D(kD1|0ei&f9KnopgPKJ@oM=E$>4nURO#gH3>&fk74*%c zQ)-bVF+&Nq3v+F1)=WPBtcGm5I3}01E^Vr~GY7vf%RS5aA>FD3w68*CF4{~M%$s{~ zu1z9IMf*Y0hjkoW-SBDr*Gh4BhA*d84XL1esd7d#On8YJ;cre-rTOE~sFTeE?A6=CDw<=H}+P=>? zse_M&_0?U#t+nrvmiW4uExzAFL>*wRS^T>n3{9weKxh6kkK}bro+T$~i6y1I1-jrB z!G1SLjv?FdCIW9L%*1qJeHoc;yPAght9kZAIzvi_A{Zf_9nI57Fi7`fJt?V;Bx~bN zOn-{|hCW$f!amuEX#KuHMZ2FORSm%bk+tggxMk?yKkHiBMQ>cDV!aK-K5pTK)J=sN%>lri)^l(!e6tV}*vQ-fUhL*zN$u6ADZXH!_)DU5 z!n5pH#ht6B|0W>VU|@k51Bg(hjWCpM@DSr)khV^?zmPTr!UQfCpn$okNT8%-D8+k% zYemz^o&8uw>=9ci5^1``C7Y&p;6gE)!Fh;l4#uOGn0ypXiKor;3d=1Ivl{JFjs$T_ zoN;;m9nqEn`AxKW2v1bJl5vR7sZK_`pd{%{nksfNZqIfgX;pWojw*TsyFJ$F{1yMEgs|dYK-)9&e*$fv;>n29b$q+;U(w+^e1^`? zB0KtAy+VHo&wD13<$#$>cL+0|1&qMm&zh_JW0s|c)`P(rzsw`BO;t+Ff z=lLB_dvtd0F+E1p*;VtTcfTR8(YW}b&&+mf*-26C#u)wNSlnbEXVW1*Jg2A~77_PF zv5Kh8`SgWA{8as~@C**q0AmJ1;&Umpq+$f8z+N25H9Ft>Tfy>{& zU%KTPXlE+ zi41GMG7nmVjNEr`U>&#ML$|&L;~DAN>t}rF=f}gdg@Mb18g#!Og!jj%{y51XXdjfI z>tW1)6C6k|FDnre6n|P;vQvIRGTkM8*B#xALpENVGej&jyS;0ukKR6`fE9MVEtegn zs0z_kQT7E~&s&iN5+ZfSgjVXMz&bp~xmB$WmGtMGNJI|825TgqPdU}d*Ec8nzRmi0%OjMKrsX2!;*vrZ{K#ZW*T3+$RR{MmlKjsW-*MMxx2gs2M7njJt>qp1IgT;Y~5Q#Y&;${cueTC zT5~Ll;7eQQ*IM63_C9^igfeOJt57B_%oqjr2HJoXnl1pMXE<2oOA@Q(#@BBEn9&rM28vG!P<@RWgFWy&(4-{2Zw zczl^P`ZV$c{tV~H+OMxh{nVyg>n(cEv$Sz)aldB?e>_VY7&orVg^ z*{xNJ{VxAGueGLfXGgk9{JnUU0Qb8SXRCCrMNV2i)jAPxrfe>akoalql|P2zC>2m& z6>-lyH$T88hrQYF(2XvG)1-3)HR#*hh)KNFlB+9~zL2JivQVQMN%C{u)}D!DjZqBT zmjgk+sWSx;!1}l2>@gI%Z(nBBwT^>2t9`3ayS!GhB@RP4`N777-ykA|~sT6Hv;@%tn4Hv(> zDMqyniP|T@LWZ!R41lfL%*|vVI%k;1ZY$V+aryPe*`Myo0AMc_&;h@%jp5M{H#p>IgAtSq7KlP&_;IcV;XWPZw z@^)xK=w*>-Ulg8GsYJn4aGb$=b##neN|~Rs=JIcfSeJ3eo&m9(dHUAhv*O&`gwl?o z{FcZhW_M)7GIPf-EN4sRM3s)FXN?-`+lqkF8>0DBDu+VyIzs1WlmRaMRsDNI{W8?Z~jdFYU&<|fbwx$p5%f)m_3T7HN4VDY3wpC zqm}TS>H7ud(|BnGKT4>CcibTiV%-}rIDHNnO-KFqg=9XG6eZhd*YYymfW3N`zHt2P zsKKANh&8%xPR@a_k7|KXD4}b{n`HCoB+Hqjs+@vaM#9D8*(!+nZ09* zUQJ`+*l#dGeo1vNfdv%zLu9DOt+ClB3qE!6Cv3MZr2Z7J=ws!@{rGG=ZG1J86C2SP z=V|$ENCV4PEKnn*N{enNOw$SYDv&DNKmVwlJwLCopVkidbyR7-5rHI5saN^1<@|MX z$qRmTXB;Kden-XnH;E3;I%u~2A|k?^1ZvuptD@jnhatQSFDbFqBvl7kizXDKkAeK7 zrkY~YDyYENYqUA1Tnz3YM(d3aU%ScipZ!Lklv1Z>XN>)V1Yv^p@ExM8q8$~n>S4F&X^q0HQ6kiIbx{Ux zFc%vA4TOJkm_z3DeWNp8y7mh|MG$+QK}_>h&1bhr_K~E-L`0;T{zCCetiBE*yOBPu zJ(T6I1;lST4!ETxpMXAg3Qvk}#P?qd_a!~z`8a8AD1NnWg$2;lk7CL@72 zgDF+aTU^sbBvlKCmXP>-Hv5b>R46~+Vh+xrs zB4Wi0WNgpU2p43ZHf^uB%Te-P;32_;=eBMF6QZmF?7?#tYsK5qnQ$xgFasEYpNik6 z)RrDA?tBI|4mfo(><`A!Xw$fByjd|}VxlD)`>9T?L4LCay-$5Qm?_wP#$ZR{eS zSq>twsipONiCC<@C@L%bn=8RT2(-}aeP_g%PDhdTjN*06j=!(K|1+wCiv+hR@lE-= z%HN(c|60m7{E0{#$nJ7k(xNsu}n#gi~^I&g`W)?+uwh(T~ZEVT}Qx;=)az zoG)~*iD4!V*);vCOKpk!tTN8e2(4V-B+907Xvyo;Ntzjsz1!ZCA<2^Z?c&k_6+~=S zT4r>>jD9ykl9xs!jsz9{{NpWxQBMk~9GIrqpXOEtF1|wTKA?Vl=XG<-$(Q;VJtXn{ z1dM*HM``K>e@82kVIaz_cXlQ6nF2orH1WVF7k)3C1vuIYykaku(gm*VoEdQ2tTB55}`JW9T;vtY+|~LTXEM zjKjJb+#uALamb;y=F(fUM3b{f%B20taX=JfGHjC^n;pt3vQY^_O;lsy8fNX0bgqw* zLGi^$p_iOI>^+83LA7Qp@XQeje4gL5%0TY-8#a_DS|$T<%OrLqrrrA)tStZwcc>R! zQcN1}X)|FtGnUQ~#awD)46yE-PstMCX9oCxBdLV)2dSil4Y1pb20tSw>yv)2O&%KH_!`36Hfm?2b)hF3Y_sciqJmAY@hNN32fi*&}>+3lm#0&-B zBWb!G_Nua0ohG{x;pEX2Hnl?k^AV>@MxnT11oX5+sP}}M;POrEweN;Ax3rW|uIK6{ z9vkwfsp(2zvG$we=frpPONMqMJWJ7i_KjPvkp>L+`L6#dlK%7Q@g;x{PSop*6AgMC zcjh(fWR+>_)h#@|_0P_VP^?h<3mS7XDe$(_P$sVwF9CtX2Z6~t>)5&z5{KzHVY(Df zu@-{#0?*TRf(rG%O*`oLSmOuVaQYod1A{9Z?}{9J3Of6M&(fok>=Gr@IaHMB#SC6b z;*4xC)hV%q-%*m+y{!3ksa8CYv)+BhVE*WAPEarRI*~^&Ys)tLeSztz-+mZxI9W+? z3w?$T*#dU|14f43$O17+980h?3Gt~V9?ES}Tn0m`qGr9Cy3AHgW zN!h`s05(al(&*E{}b9}d@|P=cUQ7xmw@m7gOH&c!Fl71Wqd#8;ZyVX@f%j< zXPf5DO{}{!1m=d;mrKz)jf1t!{LJrjfd_pZA1{z!ZzrEVea#nM0hrvQFb8fy{SPjU z_^?X+o%crBx5p|3ytfD3*Qkb4YES+5%!XvgR~(gMg#eGHCSd1@$@GIEVpsPrpih`2>OZ8jZ|bV{QB@pa2ji zGmN<5QCAA-wi3c-g~lJ$*gn`pC%eDTTkkA{Y$(J^k_;Ne()^rSsln;6w$LiFBM_d4J4AHFJ?`edV}_2WiA_@Z}h z6_x6p?b;JWu=EWvGIxyTEpyuaJhyh!6KZ1TnqaX;McVYr5qrnVP|DNb5EKo>?l5KyMPY_AkDGf!E{5@m1JZ$##k*=*>^pt zWcH|;{r_scb}_PGIN~}xD|6T-9imJ?g{S!WSAtf9Zoch5JFAg??fcVOO@FAfdR*7N z?P$RFULoXH%dc;@BcdU{4*J-){Ip1_lS7HaRtd{3u)5pTW~Y~eof_=?p0emiMSUraD<0AcAG z#fexnd&Oa_C^~Jc!@fhUh49vvlJMxR1o73!vOtxz*T`XfE%#g5bwve);EgxMMJ?h1 zZ*IA;_9fl`n7zl@cyOGR5m79Y1|hvZRR0%cZy6R>v*&9kxVzI>0tAPkja!1dJHg%E z-QBIR5Zpbu1&1cMTX1)`)6dN8IWv3a+3$Hj^_RY`UcFY;s;b}ruDkB|@BQWJZAUM@ zYit}(yQ8>%&uE4y8-;yiZZvdtskqj!8Rska$m3>r6wEFJeD@Ose|E;g8m)GjA4U}w z>}A*gDpASQ*81`O$d3v4b#(lRAP*g>51=z)mKvr_uQlVc7HcdC(wj} zf(ZT06YE(M#EVGtGgkHxC=h!Agx-B!$gu>~Jr{$`w~`{xLGJq%mDoI`8O9yhPwZG& zW3Y+30&<6(*0*{iWdANS!!`ME2Hbv?0FNhh=8(o??-YSuw=WFHGQ8KUm>vh~eC%(h zF64Vd=)&%UztF_NN#beivtCND+({U~Vl8}}kb)nOo{HQmfC@r$l?Jn{sA7R;kJShE z69c^I94?>{5BTk#i~AIFxP0nbnh*`DCC=?pC=1HeI!~yyb>ik-XX>f)doZ+B&}Ycm zQ>pBLxntvf0OsTMgaLsh^iuaz0Qd8pRCSi^&N*{68@|jUOMWW%zU1WlMaI3eHSTG?*evLOyzTR=#$?PX zIiSY$h4R+*4Ap$thFedA-L9!8;ES}jOCx&5z?F3Y?iT}U3B^!XBN}$F0g2=c<)d~d;tFfzAv`SL0hQ=SdxFRPqH6EiZEcGq^EAAbc}g1F$HV#UqO(;7 zl%ubZ<3_|~;A&^hd3`C9r>JdHC<_lic~q-O4c_gWsq~=55Yx8@~Ca z8X)9nq2l34ZxV(8>fFnX8XML}e{PTTpA!LP zeAW`3xaDfY;A~z{vF=N(>DpYC_EDLf83mn^z+1*d28#~L*Cu4EIY^oKq=KX-!H#F~z`+?a@fwNWnbM;<4?Q}NR*5?;5ZUGJvNa|ots`_xQG zv6e1c`lCdn^X()HH6RK3a@^0Rg>i3}`v72#l_NY>ef`+A9{&vnGFI>$p@ikugI?AC zZnQ}F`hQJhHlMolrEXA+8UHek*_$Ogh1%AM!E+t4KUkpwSi`WVlu!bUMt>8^eh>N~ zhXqn@(4c{~R^E zDQT=)r2$)4L<}PriZhXG)vJMu0bJef{Q@j|un1JnMZ!$vejzZc-jntEC)F4R<3Chm zEz8B&2@s3Szf_GG@Bf=>Y~#nJy8f5xDw~fpoK%A7!Kz^ZU#CJvLKA<64R=W*4XD8r zZ(|YRFPEWKB^Gz2)07Uf%(R!Bb`q({IA^{!WTDB<6;`Pf2?Ox~C#oE4H=FI8*xMiH z8#qEaj!w`hUsjTjjt*InGQV6;Z%ao)w;-Aj4bB|4R9o}Pwz*34I6^#klUnG20e=7A zRus`cvEsxk3Tx;{bmRlWt+MXkIiQFm!CIYW%IldRWXxL4k8}*(^wyX|Vi~xr-Nz}X zv~V^RB`67vP_kD1kr;@POC-fHr5#yvAOGVI97K>}f!MGYSlcqQ;1-3dY|KQhB&JBTMDlz|i%+ zHEg-;OBa~HJ%P&(f6>wp1LJnU{-n62)kt_Yu6WmrIN#x zsJ7lNDSdC?#=t-XD%JR2>HTEG*45NC?0c8C?0EaAu`$*vv%qI93BC}mJM*sKNh#FSLBBxduzdmo_Ky1nx?O9 z8xD^J+>Dk=vPzIQ2Uln+&HpBcY;cBjbCv~;&2wb6wKom+m!r_j(>3_hG&{c+;=~X$ zycnb|{g++gT+7weC`iAU7D90gjnYSH`>y&ww%`4G=yYcO*WkeuZefc6pd*y0UuOl@&a0PyF2-}UgLg>`9NjZfKc{8F4>H;x1t@12tnH_N@|d2VwHR^8 zZSj?`ER72&0Z`rC7KjERYfkjYf1G@fwSFAT^AE zXA_HqYT*>-!GNkUAYNM|+h*vPXYs??1KmYvE}Iv)uj!PEy%Jz~&8;L`ExIY(=MzF9 zZH;_=Qb|iVZ}^qkO{)c?#6(JQ#$#!juQ!r1_PIb9+z20UTM$tKU#3#7o$g{#~49-_=;y?|r{) z5y$t4uZdR7YBOK;eg_&m=?MZtEpKz8OK$stPdasZWV>&$$vUPeO^Zdn*kHs-cI8-Zfg`{A=qOdHt~;%-XQsNoK!Y(byttdWXPT@lAf(( z-+H-~z=fp8MswFKI5*4DHaCg#t1{o4p7hPLjZ9D(3o??36{CCSMSqGpBi{Yj<)YNT zi>(7T{z6gigchpf~w(V3*% zg^UE`#G+xVMVEziqUrDH4@m`r{x2C-_}|H>XV`y`QO;#Q{-D~Ir9ASqp_YD`3N}t3 zzv+FQYzKSPhe>ace=rcaiR}s@uC8YoCgyq4Mkhi0#YF)oHe^8G90Z9uQlX(K_(8y^ zHXxujF+q@U5D-w~wH7w~WYHt1>x#*VfN63&fhU>D16@FUJfPqn7ZqF`=x$h2d4-qzk*!#c zEmtm{>p#h;SnK8;F~fB49Civ6GQs6$yEI!q`3blTVDa}4bWmnLc4NM5rl@=WLV4*4 zBTY07UPUqex9H)kw*HUT?5=G-HU_$}&afJz2E*qfpPeiHc+Te-uSfu_vgu~; zpRRBQgq@VZCr+0{8^V7kEw1*!u$uhyUi)hCI@-dl;zqT-%$b~x75@kz{&^p9u-Jc7 zhh6VLo9+K4599u{9M8?!uJ+;552jvan;p?~+}?4TFk4qZjmPRx7>ANT+_|Q%7Kmr_ zW;j_?3QcOC$caK_HX~&J>#m_OLz%MO8iAwC$Q-3|mz|EKM67l%dvE2nNDHFtQ5nuq zupP>iEAecfgI7XC!o<=at9PnkTx9d*Dq}IzkI7^RpU5u5DK6~3F#Q+mFmGa_R2U^H zPz#gUJj&W_(}0fDvDtJE|E_|*c=3$g8eNDJeXu3vrvOpMs5|0e`f{xLo8aXlauELb zt|OBAoJSR!-DuJnCFbQ$)~hHFFJ#VGi2mZLm1g#`Y@`zSyQ%j;&dMJ;hqG!-onMuw zbq279pCZ6cjb#Qb`v*UTWAA+( zB^=6k`)lj+*vT&m_6mRGf!uL`g7!r`zPto4_878nfwRwE`_w$z1OF5k3z$J#H)fOd zk;ASy*R_>Jhw!Npa=>i!>gS#8t#FsOQ0-iZ@4DGKl?WZt3SJT`_%KCmyWa}NVlft< z+dL{inFsr!NW%6nU`jMmi-A|=*}ZZal3E&|XJ|)>^?&1?iB&<}4dUy3PLwv;Y*$7J zfQa>%_qK`2o6tx}5u~N5(5{?SXH`bK)?hYwYDXx7b)#{8o6!H3WDeM*z<}6l`y521 zh4sSbXbx=`nv|vRz-$!RJR9*pmh$Ge{7tHGAxeo*NZJXcw%Moj0tW{|CB!3zNw9)` zsbn~NK%98L9El+fX_2$%DdLgIhnuG(y&MLfq7 zAaZ}%d)`k5;wzFPgdE8Qc18S&A4F3OzAPI6pPb_3!cM-n;W6}l4II$=FqGttp)OZ2 z74hKPOE^zw&WNG6HNL!oy6hIJwia9x4ue#=oDW7)o3Y`}(jSx_c}Jg^1KC6_Fst;x zz*k_^|Fns*Z(xvQK?(+GjwQjV-L-GOl;u4~f3PMKpFSyg=#30SC;YXvqV0OWd1LOy zXK*nhC0+Q#I9Wv%b=y*F--ABek{FAk0X2(@5NgkQdqNYU60^rZUsD{*+9LZtqO<)c ziY}vG!HguwV=RpgM;rN*k01{Ig@(}>N#AdahJLdglP12oEpgM7plBXwG#hFMzZ3oH z;RF^WHsxWK|6sE^o^GW++53?IyC9|^%-h3h$HU&QVw;AD00?N-Y!twDWawUsH|Ugf z6w94repD;=%&5~Qn!)w<7<3ZXr&V03(a5@B;WP#*rTmpJI8Aqj#T$*Rcb>Npo2(r# zUP9mmdM}}GgndpYf6-Xa&;L$i8BlQDXN-LCh6q_593q;FDB&|~Q<@QT_G}IRkWZ3U z{nhRu_NNdsEu^{6h=ASA$G77ScyGW?Bd70-*ICCx9i=wY&n#1x@putw87wRW{$W@p zrfuXqJ&DdZd#3;lrg#_$e=q3ZL>QF!SH%P@Oa#$3D~!W+o@X7>uep1xKI--5x)y3$ zE#2JDu4b;gl@sl3ta}-IATEs)PLeAe-)X|28C)2c_rgLL82|hc@EK+R6G)B|+UkiD z@n^YB#@f#qwLPEkRE#bEIJn4$(NlSdzw;AuvL-}LIUg_TjC%u(Nefk zFI0#Py{A4dl#Ec80@ilX#(Vn0cO2LfA77c!ZlCX0Vp%1pYA`8eJ9-nS?%=_iY#xbLBpoU%c_hg5}N-a_=7- z-QvDhmT(OG&>0<>mDB3kI~Ywn6@*LFGsnj;z@wlnoBvpFf+A6Wc;RC69<5>1DA;G6qU)J)weZU2H@L7 zNE@76H&oEPu*+7Kz}7AMO{s5Sfw$+#Tz*<)h=A%kXd_ruUD=V&IyJ@Ju7Dkei2Pw& z2k3my!hd^DW<5?#MKy^E2&JK63CDAErzb;WzQ;_0;;oE5nVKqS@LCtpYXA2BWtqfn zs)|3iyd9ey`5SX5YpVLzi!9lc(kZlu>uJXmDYY{a#2J%jYBuplL6+b74d-e>$Kk-l zd9G|))z1Y~KDz6yCO+mPtXpsLG#Du{G(Mc)h+kQ#LS2YKiYT|?#@WeLm4BvpWAjyF z^9w(=Is)u2;?uxR8aB?mAQ=J+_$FT9y5}f5LaZeQ2f|l)lkUL42TSv?ieEUs>5RpD z+lZiAU(%L0;j)UVBGHOhgg-86X<6ck6;Q&x4=17aKBkW@d##ln=O3hGB^Ad5?Spmu z9%5U)S}lT8V!Mg7H9)Ezt=x({y}VS_TEcX{Qbg$yl3=kA_dHQ! zZNm(0X@o#AGfPCg;;SUNYB~NTfj2W;ndJ7@1lM?*#SA0<29*OF$_nHU^;(kj_P>4M zMcgK7dK{`Gp)urTpKj}~)~4V_Viq;Xj0Sbi@+WqK!ZPgBr5$8=*pC;>CEhozj%~}$ z9BbnAd#E)8tQ_rH@vf~Lxp}|TepKlDvbR@eg_5G^C0Tj|;Fz=3)s1WVg9U{M8yNOI zRuR3rG`5aaN`M61NE@B84rg`-2zxM6jSn)gv4Vn^${kwKbfpsNY)An#BKw#%0frc4 zf9;D2%Qf1&vlsk<8zKLO4o)0R;%XX~17DIXM_^++&SC;09FBtZLaHM}z9?Vx*7-f`s1D~k}-LbhKNU9QJX z82RMV8(+lba0)0^@W(EIu`Z5nORbd@C)ajvO%q-BP_Rx2%#`N(Zi_o*hb;8PulF;# zrO8*8KrIuOl$h>}zWkjc=?xTA?cMZI$(;znA?*T0z_ktDo~(A$xeH@n%4q7cBOC?g zYM3_XbZmC+Vv#`;sA>Zd6nF6wi=m@HV4m`RsoJD)?kv|EbeO#@o>`$^KX1cS-4fXD z%}>%edqj5h1WDx^FoAt@3W4NqpbXxt^XvPuhCT2-U@z^~a!KfV6HnPQ$x<lqKCpE@%pu4~-gU76MS&w9zMhzP?(nS9!ZzFR9!#zGgkWI?UrfifQW68dX-6F`bQ z3{J56+PqT{%e9r3oc=?FdYo~58c7hYQJ9B5BEDQS%-(6Pc+m)95todtTBKD~O*Ixd z+thk*;Hav2`hoFnFxNLyfGz&x0Bg<~R}|X_6(uE&xVM*h8&c`6HG}D>2i6`&Cv#FI zUAma>oHIf10e9@Ab=Rp5*PuYr<8ro#yQjD5l9QRYa{q>LSy*G@7+tx7@9l?e>Nzzw zKft&(x9-HOlTvP4B{?|)0#u;?|M>8(#k7~)__mZ^I!rgrFYd2|w(qLOCKEN30V7T3 z`q(>*(mZd@0=duJ_ezkjUD86uGwW2F!% zT*b><7`cn&PM5RgZ?DJTNAhq%B(h> zxTNDiP+cD)Dr%w8V639N`~)p8d_DBt9a*1$jKi(Sa2Y!Qt+an=WvcIbG?`M7Q+Qh7#u^R>6!%-sBZ8Yv`SgTdM|>$-n%AQKZ7exL0JCK?$T%6koX&!gg= zuIFLH@8YJNgPcREpl3_E>hUChocVGSgsxz5px9=l|BFmgQqp;idoK}R(C@A36_;o~ zYpLFKHDx`oyIUv?qvLj0u(ZDK(yccOu^xG%D;{qwcVZrx^!W5xQq&o%=kwx1yi4OW z35!1&SvX3={W-z%=+OME2l|?)nT?0l@@UYy%XBp3WS;Zgns7Efb5~-Z`)hUA1DuPq z^MYKK&4hIe$6i(b%(k)i(?B*!o2!z6K|T+=)tt3fi#6fS)lzIkY!RlffVzt6lpt5T zsi@oDS_!r4}^6mNhb{#K@gix5B8=RG0j;zqge&pst?;8 z`~b(IXU-|`N9TWZG8TaD!9QF8|D)ewv{2hG)N%-N#7z|>TG%^>lg!-AtgKr0Svw4P zU*0u+Qr_rXQ};apkQeeo;;leJjVaoHVV}T0o&&c|q@9Akn zMsBjGz?wiq;XXDJBEt8bjHr@`+Ev9zZeFW)3)r_cWuI_Mt)mr-IhDnCg4B3{y{dvm zbfE2!B1is>23WY04kI-2`e~i?9Qo1&$-|s|16Jw}EYb5p_}HM~Ysq1z!ZjgnSlSlL zO0-zCdTfW_KyPSsSMS}QPkq)cV+$%H;ZhDfW4gFcrJV(nu1td5VRc5+7T=7+=QFrW z27V}hz~&)8iH5^+wb&K8yt&M0qP7i8AM5e`gdIU5^VltwQ6n22Ua;su82mY}dKZjm z#HJ7O>5SDNz6aQ8|Fp+kb-iLd>^wHQq>jw~Q$7JqGBq=q_Io*PX3FlKCNxuaVzYtt za!bQSizwO;e9ysSK0HqL%~h^E`VTkK!-Iq3s_WLrb=-Al#WDYDW4zbDb<>dul%!m=K zn)K^;#%{~uOka5H#vhz79`H-_vzl!hKiZEqnKiA{QAyLJ4HKHptt?r7wC{Y&E~gEN z$O-5X?tPRq_|7C~hPLx^U+g^LbEhOL8)MmP?PYq=W!-R$e zBfpJ`hT!dhI~TpW>68>PeaZL2&{-tBkY1I?L*m0?cQyNN5onPaJR}5-JXMp)36o#c z;@#NuMwfa6QVrJ){szHmPqlmA){}S@$H><4~;kuJT?!m*6uL&&Q`U(p1x=!z3U!D8sJF8+Fo14o|q^M1V*2wwRMZ5fx(MWiH9FC9Y zN>hnTt7J4>I}>|cwF&Zx!J`54*wTNsZMLl!;L3b!ufNcxV`f%|ysWK?K)f$y;0JmE zfq+hr1Cz(%)j3E*7$qe3d#Oa789JIXr-JKW==mjoWpd`HdNt zB_MSIiBNW9|1qi23{J4&2JcQ&OHXW8RW*`;FAkS`-Oxqt^cWBs9sT3BiF37ZNiI0I zHg6Hqe5M!wYCf71(}ZFFDoFuh0Vnen4{hFZ@&z8-FY~19pbWYh&y02spPgB^j_y&swO}z)d{q9!apz^cRcV&fKu@T}R z8}{ZjyQ_Y~iF0)rue(a~Lis3$Yei8<><5S(OV-;@TRE1lX71GW&^SdJi0!JN8RKY9 zmcHoNx;Y@HmL?ltKuJfbL>ox-69DQb(DyOJ{(8k9(O8HGvPLU_zrj-B_N>frr#FQA zHIq{&Dzm|m2W#P!;)$|TY$8LjNlIWKeqOt> zmkXvE`Mz$nQvI1P$5zyC7GTd`94<~&&VaQ@f#$WjgFnmBM>Od=fPx8Z^>J%9b%0}H zspyFq%XUPF;7|p94jn3?sq&5Hw0(@9#cWAV{i8F>Iq4p=*Hm};YZ7cc1q;C(yh+qh zkWd%O?VgnrxE9W~Ff7|#&w8usV6Tv7Tt{!?a6N*^Accwomo(j~8J^KphDJ=F=@WDcrH%PKRHr^9{)adk&3hU4#yjO;MT0~KgGTM&*xy%`X7ePVt&F=<<`tO(~GGG6=> zf;@<6Lj%%4mQ1I}1)@)e7q)jsm6GxB>_wroKUTI})U+m&j z-~W@B%Ql3o(Me0rL&5I}n{7P;S!t98YvGdU5C7poIT8g5yjkVY-6|gmIFAdR9v+yby z^?%rs5BsC~a!E{Z(UXansENr&7$tJ0d(<3ZyD;g%B`V#Y7rsY(fV55HBQ<>j96o)e zrWs@tQB>trFJ~np1J3jXiI|7io*O}Z0$qXj0fH}~&+J1HNqw8my_!2P#~343HBG;m z=Hqy;RD)Q&LmaxC9x~DEue|hU4YHO-azBq5iZD^|sfEl#Av)wWg+HQr=oC#?8z3~T z;w~0D09!cAtV@fyBNjQIBQ;G_r+IdNvU&0UX6fyv4F05`@#E%x^Yj#URfh??36|oMLc?MC5SvC`3tt1GmtPhUczcZFgKNfCq_h61StL$m;dcj zx`{=J!uuGWYgI;dcJ`Vg?7sE5T)&_Y@W(2hq~NT*D`Cz3X#8_x=!L$|+@kaAp%YC; z9~Fi36sGUTx38|-bMtYP_$cpx^-)8-}Rf6JnXKQ^cL+~B;>$R)8giE1buX>&={QGiStek+quu(SC@)o*1J5Jl? z7cz&9O~i!0y+gfjTW_jNI5_qxeM3pb0?d0cF;3 zb=!6}^>*HBZf%|Cb^7QAxGeel<;!m#HRS+G&)a@{m1T#DB9T`-;4PQU>yU|GgbtdT z`*wIGN_gM*L@cdVbIkFrb|$~(?AmpQxoRK8QD(E9m6N-6r@7z9$C@*XvtMo6_1Mz= z*)tb!$5PpvPEG}86bZfGx+9|o9XEb1$1O73j%VAn6g!L`0pk)Tsr%&9IJ9l+ z_DXHhwe864TJL)s4}F7sM<>Tt$V}(!jg~L9|88+n6xt1h`b5bR{2_y;Tl*>7CzX%D zG!qUnaomH|llxjdtd^AbHT7Kk$svD|Zbg%1?@(whjndVV%hAwTgN-*EIngPl&(~~G zukF;-A&Z=er;?ldSWfO7+OB3W_)95^T z0i0wmCl?3c1fv`l2%~W|gGRqN=A^WO#n71X<*qz@SC56@5x?W22l&3?fKO&!*=hQ* zm&62cx0xo9Vv4Vl8Ooo*m87lZZnoZy@9D zkJF=rUkLuV4Vpy4te=NQoiOW{u>A6&@uJoG8&+@bEIzq1p8-Nd=#9dP250OsOq1}d zFwR(u3F||qFr(U#!8;O2<}d^O>QfPYba+%qgqBn^pJ?k8FZw-&Z8(s{vfPx>V9H2C zw=X;8$yzG>ot~S3jBy}Ba3W(YqDZix=xJ6_6&^LYE;g(<@yINHFi22&*d-)h%h*+_#{ z3qxOB6F;(7A&^$b#+DMq$HyR(jlAc*zXIrR#jXf4^7C6zBDtN^Gli=YYG{ zNz3i2l3Qo9YGKC;y1Kfe<>3iU#KPKNsaG!-STlq2{Ozgh(2e4In#^{^rwU!``*2Z@ z^pM-+Q@2`l$(qCE^w)HFz|}><^_Sn{YaeJtIHw`iPDC zTl8k=C6*ce<*vdDCY@D<%Ezy-5Xx=|gQzk!BGy;{4{RJp9h)a6RFz&(p{JPdHC;xpXC@DW>?g)rF#p`ch!n49n1Qhhjk< z#cEf%jfe(=i^I=a^mH01iUh3eAG^p?LfAmlYIvM4ABP#$~uM)JR@*EkWdPN!%t_ zIC^0oawdFS1)Bsoc9`vG28H7(uK7#s0SBX$tC}tJLutbj@v-Dw!mKuK8ps1p@b{kn zKU68Hw}6`N3`V-g`hMe$k@1TiZp{2WEqZyOkDD>gHAH5Z*%KZgqbcHrK7=}czx$-o zQtS4nr&g#VJ<5$gbjSu)fsr#b{GjW+_D~iT_IB^Z<9?o}RK4Q;B3rgHdsOxPiR`hG zr|Y&i3ddu5dRg=@G+Q8o(Ca_!-28 zBVCi7>sM7({n(@b`u@GAy3Kg%bP8WUy{%PI)A}C{s6Qtd*c^7&?tOEppvM}3p5Wc; zi*(y>BXvj8jKSPTwv$(HF5LPlQ`G9ldhQ>ZUrneun%T>}pn^Z3{KCl5jlBbv`>Im) zrIkCHKJ|BaLa%rEKD+r&&oM^6gB<*84#_CA(XLcC(ON#nm)-mS= z9S^whzi&;rB4JGDiqitVjeU%J>p|mkTNcf#I`89NaO`>2dHf;Z~Q^; zSN_ipgn3g}@7DGVMjT27EDNe2YGZF7275>yX9SDc7GneuToPw%CApzHULoKysyr?Q3Ozln3E39F3=msAjd71!I87EeH3m$Xj--^;`0bQAU=U04 zF~?gV_=I5u@Hi9UD2}ElBO4duB`eofn2(qymPQS8Ri+CQ5)6%{=5Rhn*A6^JBE|er zU5v62wiJLI)Ll9Pe6l>UDgH*?58)5Zj2W0y>gxuK+@E;UvKMN=RdPJ5nO=RPJUPbH z#(w?U$nDO{WXiBQqkhULbR{<_Q{xq~&;?MltDEJWrKH2!5Q>H{b@4QzUC3chS)8YKwyE7RTwC#ip-w6{Ff>YYyN3V=Cbam4v z{`#HY6QXY~HAn&B@lh8QP$J=-DHY&89{Ng}Vz+g#c}_q)S2IgQoAreu#~eJVZ+JZN zp!sP-m?!pT@v_hL@LL)ot!?$l6|Iho3bnchS)_x0qZWx&p@X zTi(vs5OOuLP|XP8=q}_5A>W?<=nUP7f{z8JnMthwH531@K{*Q|6l6ryC8cPjW}`G& zSGq1e7CBLtI*DrSJu@ih=wVoybggA>)D*d_>UMKG3UE@xfxf>a-I3WjQ2 zN!b{J`|NtOJdd5;{1@ICU|I54BWZR~XI9q&hKQq!+gmIF9?xs!rt2Sm4S9mX3d zswbbf`O2)T3tv=2m`L>Zu_?yRX-%MD=WQsGJ}O`Z)XbPRftb+6nQ#f3iR$7?u6vHHsT4yNTIi;y-Vr-fx0gJxKbwXMGkk= z--L&nF?(axG{pC|gIno<(E6tv?HLiDf`fVOZn)Xq(6FPQ56mui8&U112JY7DF^Nq_ z$Ge^R)+oa}%Bvq!E$=eSatpy|qdc=D6x+E?F#siL?4Tok{puM`g9=t@Nx6JFFzgM4 zKAL(CgiYqn8WvY;1u)TlAK0aCO%sI@NpOlrl(I>>v1$U$(3n1Ud>i*DlceeOy&=0R zd2wG0dpT}eE-h}3)AQQOy*`MQmu=)4`_hr&s|x?j(Ytm0jp2g3*CP-vBRxdRLRh$m zAfWGV-(|f`ErHA3_xX16O>M`8c%ZWM_Gh3b=mIoJ$IaJVWoCFfmQubCs#l|j8f5S_ z9i5$<&GgLCn{fBvXI+9M3K=tc=Df~=NjhI;#IioS&DCez{)qk`+4g@ldzom+_)r{| z-Cje3h7A{1MHs}!9g~zP?(~Wu&&|`0=eP=F1BPcWVuGvRLz$uR<|ptDOf0KY$-1e{ zVGpj5$?u{c zG#!ln#@tDt`T-wM-O0sq9I-eYUehCnhzB3V%jJibQ7XMiI=nGYb}i0Y!sX%Mlmb4ak$(vi^$AXSJ@STGJMhTNsFlI zEy|W-w>{wPZOd+N(JajpYnvw=<+Ci%Z6FBE6Ps-S7XnCuemKIZTrA90Na+kD53p|wg9{WFDu*(?=>vS?p%{ zTFiO$X@!|KeBH#G+YW%><4IyB@4tw3+0 zifN2FS^C(GyfDb5qZti@p1&e=g+pP2Jb6|H^~-88fakq{37S75Awv{}LD$G{oBV|+sIW7$O%vQLxnF6j>y07`6D!e;@x#KiA(nj-U zLuKDCuWB*)70zdBM-VBH^VcSkxM$)v8$$qGv8p-g3=OH5EO0XRo6b(!i__RtfXw=Xy>Ks%~dzWKu%@*cM%)q#3Wh@iUcAe z2DH<{x#scwruUdoy!Q9;AM7!jEJvFvkL^j@+}gRhLOyS&KrRIt!ae>R$D=lgXEvjQ zGkc+-2j+Q-oDu}f1yu@cF*NqqQiy(V28C;>lqEBT~mO>0356oaJF5W~0xoDqSk6&kjt+1Pn=H-fIMG=+e z7lb4)5F6ZkM0jfay6Dn+4JMU5P6G4FtxBA<@FmZf>*(~?-p+m6Sd(tbk+T?!M~#ju z*3Gy-B^Ru|7HC^cK0jQKIr-%~Qmg1>)Zcb5P`it2LaB9gOhM`P!J8gH1)sgpOCb3{B z&{3Nhlg%i&^t?V_enz;#?LmqRA-n=&USNd9N##E>MO^`b4I1xwvuXvC zI8!bTF8oyx_dEiO5BCY37!IzIY&I#y&mnaKa>*AK`cE7FXT){ENTNfG!9PbLMiuT= z+10TD`;aoa0J>6Jd_A*RDRqztwYSb-x zDB6iF{`MXD3Km=O&Iszom65o{_vxQ8q7b~&usDh?f8X!`opaDAeg-;}+g7Xl1Q19@ zO5{~6sGvb}NpOV52bzvjRHKRY*~J zGH!SKWS>*Ec<2qC#JMz*Nk>5!ww!i4&qe1$?`mQ0_8o0~7E+BEl*j&;)^ z7_M!riEMg)PEUr=;((4%1%X#E!RQaS=iSRF>$32Ch{Wf(UZA>Mg;>0gp5h;(}PKSq`V6G?l43;T0GdKIr6sP9y;W8Pq4uV#OvX-3dKvuMjr3}XWeDjPvg)F~0z1mS`U9P6E#5QI^cMtE#dLMHQo z3tT2;(HPpR97EJ~Ph{)hSCL$strA2d& ze!>P&Az%UU#g7R7)$1QGTF9BSdvaZ^ZxKz9MXEB|-OI;2L5Y;vS?6$7A#&+U$19ou zhqI%z;*&X+D2kpQIv9!pm;QO?f@-Z4X-|;2l+=F8{9O57c*d=B$M8Ay@Lk*biaflB z^%JD1_RbfsQm0O(k~z58YXT!yvH761l6OLx%_p-%aIDIV%}0vxpKz1^gh|o~0vuq_ zBF{?sB+Pahl;zGC`*T~piP!H_1rI5@P!b1G%=5+K={ZUws1O)uiE3u z2!O1SfwFGlHWCp0r_eqR;p~FSbWsBmquy_>Iz_!0gTk-g-p=^gv>mPK!gdC{+lGUY z(YU~kj-;do<_W$SwK1NUCejC)onONHjPbm%GBD&5k(}{fDU3#7FJ<7-2z9ff@MZ&L zhtN18UBOG^SC!$Tffh}%jYCf%Lbyxl{0AX#6^FIV-X*M7^EJ!8aal*4Lt-+ut!>Tf zu3C3ZX$Ufih=$PAhq|Mcv9YM{w7>)(v*?JBl{R48!JQxJzm|Kwo&Ve`g1*-Y-dV3g6D-3Ades=zf{igJ(LZ5uLE`!~2-;#QpFEbL? zK`!q90+TVc`G=bMHPdC_-zRXjS7e*h zwh4Z5_=SuzRJ*f}6Mc6Jn)>m%^7IvS*%W9GjYe*TbZ~VrV6W{t7NTCC9zfidMzJvN z@73ubQRTWleeZxkZX6t}>gu**$5}W~m!J3Hgo+ktPD#J^3j!j1*1waI`y9!ml867` zBn|$-Nh%=j3d?n4B%by$W?2k^CuZ!tA;S>8JJMc@_X+al@s;O+OSsGJvo!M(q(=8h z(iHj%jkrA`U>bR!&G@oJ^qR|MoJoM|2m0hPndFn5Z??QBe$umt6S4b}@gFEJ? zk6%gHc2*ANfoLm&EFH6Z5PF~ppr&N;msHZ|0IGFQd0&M+-LkiB?76pafBJt8^THqqtM%5=dBXvRynmd+lPL5##fsoq1cOXy#}kSr!fHIEO`zO6kKXYdJMgZean2bYLN2<)Hd9|^E+*w8^UVS zz&^s$L7@ewC)g8P^;vVHaSz#j534;Kofu3@0cKx;_?sA6lQt9S#(r z7HM5*Gxxyhzs$NpU-8QI+sHO|FNNr|>^x$uussWztt(?>OzeoWCTM7z_WNTGf4o(imFbb4ED!Zq{ zx92j$5~F#)YZPvD0?MiquExj*CoWjkC#Exw{pxP`l6<6>|M!HVI}P@I8VuUIp-B0? z=2x?F<2?kykme`tN^VC}DDrq1dLSrGFid_~<-S%N^o#AYoQ2aa#=8LJRXy=1x=IgJ z(<*w4MHr}uVQzpclWJ+~Im&&7H#)`jnoK6OeZ0!^{N0-=bsLY#at^N>)2incdUDd+ zzd@RUL;pV@O~pA4$r}>cYlepbewztBKkj0g>eKI3{zf`6>DszX%& z{xARi1`GuYZ2&d~me571B2E^TaAV>4jKbRYP$E)nDf@R?#{{V8u?l|c;6*V)&fTkYGT zy+Jb^SLl}oMg@Z<&GosNR?D-UOmR;6m_5N9M|)?Y(9teVvc#y@OZv+7JAu7WLxR8c zx$K%x4z@W2zxgqK#O|f#<4JFK_i?%6u_-;WKTOZ9(CNvjpKa&KY%A?-*IeCm&GrlM zy?=b1`iTCVDrES4{C`+`%cwZJZEX_`5Zql8+#$F_2=49RJhJHq#qphwi zzPPx+#g+OumAMN14^(E^E5PBHhq7(~D?yF8*j)RlL7} z=VTqAwChRFo+i_hzuQZ1`epdnx7V9Y#BzEX(8N>oyUg{cpYFq)mXx+lUbGbZe4P%< zFL|ngOQ&7cksyrJK~G9ulyKSh^z@9c&VFCtFlXgsREX7sX-bsWuMya1kSLBiAfj2- zkR`!(NH&!}2IAqB)Ve;j&xMxP0l|S7c(~s;b@gJ7+?860v_%d6Y|NZ~rbr&HhgVFQ zh$tYXfP?V}rQ+n3=Q6d=CfHOk{bJ8@6F;v>NQ9RgyQ2_hB1}onlQf33xVk}XI!eElLh7NKBt_i%06$B>qn@`w7_Yr+5&GQzHjcOAyLpTXhI ztpC8QUZ_3vtG~{u8-7ZK>4VP(Si@s83OV~MT0`KV_H2e_)S-5WDjY(eW;(G)U$T*H{CtQppe8#1So*o zD`HyBR1m{U)6qUobMy_rM)7gP`v8={1B~rQ0~`4;EGN{T+AGZmF^k|IE({cZmdIjPFepCVK_n z98BcC`aZ@WCWFjz7Lo?SKlSZmYPytCI>x3`Mcf5|)Ekaloytl|tX1W|5A5@*(pP+j zWlYmxCN_yFExhJ3vXuX3G)q?EFE*2{qPwxse@Z)X3X=TDETz82C zl^AJ4={tl6L@uoChs(0vPi3^0s^?Ek{&DS`ncP~Qr7z-vmFHbH;bDU=J^eiQ>%K}V zih2Gi-k zF_`Uky=0=B2@aiGd=IeCx68n-jYs*PcCeFES)$pO&l&5-YR*%7*Vk}&lTwUx0>jpm zca{}*FIxB|u}k?gk_eJD&70n#lH=ngZDo?EwuiN3hguarW0{#Ev9Yo9o2KsOd-@8b z*4uSF8l9{YQ0m{}jUL}`J`};we}ZCa`>%Es>HoT!4EIaD>mP@I<7F2gP@n(zw$Xok z2p6M4$ifkDrlYb~X7}?YUX;5?sFHXkxWImSQ_~kGAvYVeJ1QZu$OMNd-V&Vh>+wgl zKPmy7ONZnca(|i-^w>e4$gS0vzk7myu}`5JC&xXiNB+&03G6y@F81VG?;TdqVdr*` z4wjQbmfN=Ebm8GD>uJ;UEJ?mZ)dnq6jk4zhp_m>K=Q0%x=_FzjKfi{`7%di6#;UM* z`5G2Bu{kN=pfmY^8-c!K4sCktMuo4IsXY=#Cc2-`KT5jFYFEFxS% zgafR@iO)4?hE7h#eJpJZniaK%QP|ma9 zWW9peSX9x2BL}ky6#x7+S653cvr4;OFA5gdF&P-M2vQcBFvdCN)s|xsRogv9P06wE zBI$SQs;XI>tEr%-BTcH+AA`Xh693MZApsC=`Oc4&+hqC3a7LrWR4r)TZ1tfY%kfm} z{mQNhny-_rthX9f#P>I9+CrcR)(3GtrOnN1-k-yx|ICVp!#-!}U)+V?xOJ_&oi_%> z1XfK*cMr9#xt`?=g?Io++utX(*w=iF_`Dme!^!OIP{x>Lwma^cldbn_=ElM@1ErJb zbzAHRW=u_qC{Nl_kC$xno_2 z2nRwM?aVP}>2uV_hW4j9Nz@0O)Au=g)_8 zaiYZvKgD4WOG@$%^?XLLe$WI(zi$_%mzK4q@xT2<*Lhf|o>4@)W}ApwbKTq$x9zw; zSY|VRQ_FT{&+L97kUX%HfX#FY+FJLGC2YOFe?T%oK~ApdOi{IuLO`MLet%aUdKP>* zTPt=j#~CJ!M3Gs)tQ(X{$uJ1L{txse%>Sk@b90sjaq>^QJR>VprCs`9pg!3Vkw30A zv9W9ra@gsDf<*4*YPde_-V=O0WuiC%zT1WkD$sB-x!iVy%cjqx&I_?Fc4Q=?)3nY< zln7DduV1j5!xl8W$XVXk&f!{yL7tiKtB0DnZm5!;3e`9u{5ZF~Oc6v5?1X^9mD&iRT;R56S)3uhaAkdZXC zCjrZo;91((6%G;bP89-27^E54Ymhz4C8C&e#o5PY6_J#HIqv=gB4rsD9^HiZ(qb$0 zc0OnriWQaR`|_^5mFQkya9_4lEN(eMUE-Z3yI=U zoU|$#v?BZ=h0yG4MXLl9H0lU;1N5!gAfiJ_!=kIAmu3Q)T}(dMvr0p}KnUS}JyrqL z{g3|KW3~v+2|&hsDtJNstAf^ETqywOQX`!*vKJZ zNFy&^R=UfY4?JHyO3T~R1?AW8t6l%-bhm|~EW*pGahBhvEUPzKwbFz$Kh;_s{1;;R z#rI!`WgAB>%P)lJoM(b0n#--rB2VHcMob4d=iHko=trJ#$1%Y{`-X~yfWQEAhDUT9 zl%HbT+mXO_PBC$Wa<-UIDAHPn)@|Ya3Ks+u7w&Q+J0i&_I+uvkg43h93Q>DlDs^|! z;MJ`97wxerW3qU{y&Z$3480xR>LTC|XW)0Q)IoPRbY1y~Z4Hf~Tik6g^8ZO#YX6h4 zw5_(81^kPzRCZ$tf}-7L`xJiQ48=16CCn5bU+y;PcUNMtYw*zv*;J!TLTmtMZ*LLK z_@*;CI;Gm3Z$uZAa$ev(tkTWtogxxOnk)@*_~+U8!#h#5A*qsBXX3!wuCuuB@pmg&>H^pk#vq7&wR_%+dm;|XZpT=iXz31_zSrF6#-wkT8HJr&)Uxf9S9&Y zo<~(h7mA8WAu?JDOTEs8z#ysNbQuUNNF8Bv}{ zIxAhoIqmN;?sZTMp*U05qt7=|@u-h;`O<+Z-;{4)Gg#^0biH?EyN8%L*P_Fa3w3qT zJ=rm72Nmeh#YpUS6~9=6ilEcj9!ZP{M9Y#@$t=GN;q+C(IzQ?35kX13CAP7=^RGy_ zuqN`8GuKo1-zkVAPYL#B{9hg}6YbZRpvZM4!;^lS>Cc;CMZYtCZ2Q2iRlV=3c zgfN4gVFF*qnU(RQ(&1GXtL z?vHggV+4OF2NZ*>EVl#m)plRTa>c5`{S0_xw$Ks?|M-*?aqLXjF1*!#-#0!&UN`wt zHJdIVh04W=v)ar%^1EuLp68BeSjX}4dG0HbS5MItoJqg{b0{3mch=Gy0blQ|ruuwG zHlFZm-~o49|L7tiTt;L%f6gB!-D=fjhilc;w{h`XxQG*IyVPm;{K8{|$JSR+Axn7?M0`zV;jU>eBrzGJ+D_+&MM zWjlnidV!Bi)@2pbCcX7?j;r_6;qMDYm0B3!U=YKpsmIv&$~YpM0d#{fd#BKE1Yo~! zi_L%)BVHT8e{hsRv)#2D^}gBH#n!~WJhppYVjww0KGsz9?FhiPX6KV=QL1}J$tmcM zOD|>~mLg`DU$n4xnY)nxEnn$}`qz`Ih82Ht!vJouz^CQu2Eb%eT9GEifK4U#*;REp zuKkpzc68^=ji@G;zj_c047qLDUMp}P4r+^{>ewiMSa7AhYOrg1Qc#71G<<}MxfWFW?;EX~BZ z{mvPIJ3^nIue~p{a|3wnwyY>lxH^v&( z9t+b?wi6;T>EAFJEOjWGpPhXTlIqW1h=8HTa#`ss=sI&hiH5WSj1 z%Nx_Ng+z8#ekK%3X5MN9)g%K15*I;)z8F2Tsa*t7aUm{fOieU&8Ir?@XU~ZwrATfs z=f?Z%#w#^a=v*ik%l`&dW)dziF;ZCQk4leluFg#ZgI&CuLKtQxdiV!_{+ghw{j^*I|Gf9K(MQ}do`tfl=vH$=LIxhYJC>l0wUHl0Y_8!v!Ikff)Y0$#sZDvx}fUXlLp6a0G+zxthj zqs=y&%--cfBd@P;!(|-Vsz1mB>N6wNuwWjJb)Js`l4QJ=ZI`GD?)EE*pNf@`zrWM_ zV^wbP$Bh68_l};p^JJjyx055C-x~Mwl{NNo=+eZX z25(fQ#JNfXAx?K-!5Jugt6?Dv>sKbP59E&GP9MJU6I|GycGuNBJ;Q_mXc?jfu34BA zN`Fg$tkC!G$NdL!ISOrduR~~AW!(&KtzzjMkl(=ra9rrB2goxT@Uz!*|LCCev5@-p z&E=ce5UKWZ37K0Cw{R2|u?s)jg1YdMIg898?aaCVU&tj6i!K|>jF+>w73vGTE!mP| z3%!`v&iWGRP~CGIp3MHKLEqHBkmO4jZATHG+*s0Z3uw}jb!HSo*lrkz2?ahQE0_Us zqlE#>`DKoq8Tjypu6i+q1dwP}g z|KcjoN&ez0iLL8U|KGVv3U(xHOa&uH^o?;+@Z1q*{$Yw!Z12st_fuHXITV(woh^YM ze+^*Ps*J1IwNKu?;D0F7;IO{y)*SG%A%c+0!JTFO>kj1C73*dtIa}h+p>yvxfRfUC zSr`blH2BPe^TjL54wx-Ok(MK@DKD=Nsy6u-X}1O*U>!?wilwhL7k@LK;q|0&_AhsXC3FtE(|pnM=k;yk!XdGx)P>4j$glxqs!ulzsiV zXPvfu$$q5)YZ;SKH_#u5RovbWfe>9w`58qL$pA>f5@dysi)S)RC%FIh<(&K6HU}7516B)O}sI^tkJSeyT1aO44b7E z#KsVm`RUJq*540WPm|s;vLwPj9!np&K%ZBpNSx*wCmSSh1W6S2Shi8m^abXuthlo9 z5^IN9oMYHRWCXaidae!?9~*{5P89w!za993lj{bGDFp~fcSz5Jm5=?`=>D&!rlngw z2vg_iCKubh6H||Bi`|m=n?B)5bh?t*OtJA{Pp)M8s4@Tc zV{$9p-)}Ix%(7R}(i$GwVNp%|87K)eWrxDqRZ-+8)L4_e)+4q?j;)vHVtH~lIz0L! z%E`ay6(BUaxVBb4Z#8027j?T#r{k0cijp>^yF=lTmEse>i4x+Bkd}C?uJM#)N(^mw z@ne4aW7VCcde-g#Em!Fg`W@({izpL$cU_#l+Ed~Bg*SiYdIc|;@NZ8Z)?X)YN_BkgJQp>I zqkFO`N}Kw2Iz`l68n%HM*H0TaP<8P8cPFZ2SLWCnLU(0-SZBj;F}UY48w55q5yGj0 zCq{<3^(N9YMs%mYy+QBvzxkc<1HJphdXCTQIBOeOi4Gf$I+8`ocOp+=ryz|Rn1;|# za9Q2MBJLwzkMghTKc_X5g*(Vf<&$l4pt;C9k;ozRwUB;K&J_B!Ef}_(`M8IiMR|jf zE8w~mFvSKX8rB3x3!d+5bV@uz>eWZ7dzr+fmv2GW({-jFc^t#qR{ayn^AoGCC1?_q zO^bo1H;zX%H028sHiouH#S^n`wn2FS9I0yp=$a)+aNbHDB}0i;_q+%h@rP^;*czLX zmMdmETh^P-EdbdypcOHAGVbvoURpMb3W{b;^q$LJ+_(RbHquv@$gd^r8B2+rYac1|a{uUmJRbv*!qt~4!gJ9QSk_0# z2S$j;rP27!Q!n00j>254=K^hsSd-auAPX8cjoB_V*7WT>yGA!GU|}#9vT=vYk3g;F z-D}!HB&&>{A~d3CT*cvn%0wDL&;7X2;*rtUdy3jUFy8dASNayN)y8xQ{(_lxdJZJa zMB}dk8^VT?9i7R~C_a{Pp4ecl2RJtCP&58rfI9V8eNA$whRkyoE52EJyXnqTv5T|) z{bCZ-8y+r;`cAX|MhqJmEUyP^l*lKbLXP@Fy+IWzHrR^;To!Fu{bkLu2n`hsLv1iV9b51iVhy3FvWr!>s-vYd|n)Nf`I+=Rf22#?h7fjgQ3oMSp^ zLMoz&UW72pBdsX~uvxb#D%fk?i2RxMoQuJ`6k$s+Bfs3H~K(G(KUl)!Oa7kwa@iN&3N= z={iK-EA=+6G`CJPcv)hymQ#5xy&_5xG;&IUxVC2{1;nkaW6fY`4F99=y8dAFvbq{h z{e@m9IP|l@A)SLe0k>BQ5x^L*8W$IL^a%+Z69eZvJXZ!E)C#D^Cwvk1u?EG2jaQ5X4wd$S+e+z$U%(eQZiLAgbFY7mL zP}WV5H{fO*PO?;pBBCLGjWEM@f1RELNpPM7gK+anPo7B+@jNCmr4JeOaWi;96SkV; zTxN=?oc{h#fjYP6<)!V#TJPR{8Dc+Dz#7w1eE=0@396PUY|$#8IRg=v};zgBX-`wN>9AG*yBWO4WE#ow>Zua_iH zF;}#-zK4DYgPgWXUrGCldtELYIqz#PWxJ^aVpGqtA8-GQyIkNSG5-g5d42W|?h>@# zddBB5{q>k68MF-x@`|fG-j9TRwIp5+Ja`L+| z%hUVm(Pqk{wrd$%kIt4`-b}B)gG=Q4(-rA#MUd3%KPRyi{~+{t#mo3@Rhkq{Np`p$ z+&EX60kpuycxMwCf+FgG{!&qS)(%vES{0xqBhy-t7?vRuoRR<`spEzKsah zAgVxr9EBkh!Hi%>9PY}V-s`k4(z7-Fe5n!y`mm5Ijz@D25a9Wf^Ij?juf1TGU)pJ@ zli>n7$Wxu{JWDNzbDP6lRTK}_jT6m8A0ZPHX54MN^fvnT)?$7Mj%g$mWh`x2dn>z? zM5WPCA{8E%+y8HbAf=%*Jr+yb8o*&Xl9`f?Up5#Z}l8ea_ ztjDiTB+c#DLYF3`;P$(hwy6M#|C)di&6oq!Qf6;^F{;f5xZheW`~$Sa)0HAdFgz6K zBPDhB#N=^ztyKiIOkD60;Q-zB7c`=IM;+WG##!zcT--e%6{FiYu{%-M@YGAdo0q16 z$}xidNHt*x&;TT*><~jj`Y$OZ-aO<6A0Ov8a!=qOHdTtq57O(Gws8hE>mIK6;bNe4 zVMh1BOb)l8IOj3}576d_w{twVH{vQ7RAYbk+7Pw!erQB7*W>($AR{NK6(mU*Z6>o>|FZ^<_ zYsKff88UY&=j*Hm*?hIh{`x-znC|e9smtTWmO?=7dj*(h_tHSd8=>j)Rli6 zB8`C9>9GOJM)2|VX(fXTSrz^Z`&u=KMQ#YU40Prnn8q?QeE>4?@SAy${B7ZGhYVi5 z#|3}J_pkx5gRM#3r2yD_Tm8 z%+PuEC^Q(0hI&pJe=QMXs1cb@OBWNEGH}wx*x#xRfGu{#+SRjxbR23|*Y~CiY9!B3 z?n)f!whYzK&A(r%NT>u$gI`jP@OLdarlgZZx@V*(iVn%kI;29Ih*d0V?ax8`l;RS> zfj)2AI7dcgK`P%>i&&GkdVIGxU+2@G{}NkPxBDdi^v*ko{lk~h&n$Evq)ppKZ{{o* z0@@-#JzQgC=w`N0$^8lQA7;x}kl8Z5x1vpfMv8)gP$aY!ue=&DFHr<7K!Av7bM0 zZRbN>LNv;#&;#&l^T|b;@OEBKJ@IZdH&FiPGQ>**1?0p8^Bb5@pE5_U&XBPaOs-7> zZ&ffIdsBCgZ~hkEALKsQMNbY=VzMqYH+VXb|13{y|sPxy61@;$9bh~-vq>jNLT>21*5;$pqk>2y*+UKXhLc4O1dOQx5hF(&<= zMcV&TRr)h>K*)-en2Vo`rO_lonspKC=Sevk8I67>J4zYpb~Dl7<_ zkt(dZ`gaQR{~-wl{d4rgU}61PMYjvm{x0kK->3*{u-~OL+uNYcQh2fJSvcRIl7$P1 zx-16kW$C^SK9l&gGPiZD-tL&zqItY53Xfu5$c!Rj2#>ryheC> zbhalDT=juvxM5sirl_Dmiim`?x-b~cIM&9k@VL(@*!>lTLQ72#4aaKk4jDElr$_MQ z?4-abzv@y<3wkA00cy{Eo2Ua76?I;KQxMYoy@*zJ|1$P?2sEx{Oq>lut;;4sexQ9SE$eSMvKc4lQ^Yg=4YL~)4DJsOpD3_~lEWVV==YBP$EHbyQqp0J zN8=;~?@hpp7dD@fP@a{SHIYfQs9-iw*gI#V*v2Iw%G>Pa$J;%Z*7$D6`TY7Kv+N~i zi;ajB?TR6zJ{lRXaebVS&J^C1e32JL`KNS3>H<|`UB?t=v=;N z*l$!)+#UMzY4T$_V#T1~JGtTyD?HE?g&5>I$R4EBO0>DT+2F889}A z4refAP(H>NOaS4D`@vPoT8l6ekATu2=#PO(&}%|(iS^;AxuGi63Hg9zSHBN}?x{x@ zdybiS{zmLs`5(lddjCW0+1A|UzR&UV-2HG)?*quO+F`=Vchh4FH?@=fzI!Np=v7Db zZ3+;GEMVuEvdO=hUZuI@I~PGrv~-LW6!(Iv^fQhH1=UCPJHcT zytmg-t!#sBh8SpL`9B<>VILq7bVfzdlBU2?5y390Z`uzw6R3l z(mbYb_)$FBvuq~gT%agcYhp#-(7z1z@n=s_m=6xwq@Vr_SEzHh=ZIw z_~Jd<^XJEo$lXcY>H@C}**C7tL54pG5E#0E!7S6W4zVMRP#>CB+kIlT8 z7?ie~x^eSM;CqAX)+T_j!TvqU4*LxG`Al{zPZFkGpCB6Yj1yY4Dbp50+{7Ru9FOS0Zja^a4 zn~(4LE*s>Fr}81IzBFF49^ECsvwnEwst4&;U{emuX~cHjVC;S`JV z%Hn{d{6TqnWGeTw)m}2sSQ(9ur`^B=4^PhpY7_~OWC^JeIeYi?t=R>>n*50;666P{ zjaqsCy?E@j7;Xs>`Z4V7V?k6XMn1QFgu+vDTH4yaz`i+%dpo+dmL=Wm>HAuStmJH2 zS(!oU>VQ^9MM~$=N8ze`R#+5u05>juSb*I=Af)~Lx;(v+$$lH}FfzOr+JxaPObRa4 z!0U+0#@xgsDWLWdquJa2s7#!fw#e<8#-_sF6a70l$8I6I^fRSDNr;S1fqJ1Ut1y;W^cHHegty02bSJB1#N{5YQ!~2(ya6dB zJ*z?gf_BSal?m;x7Zx@Or|#50m`&)|H=Or5`Hj}H)&(>!r6#V0eQbt?hZL2h;GdBY z73n^V9%rSxgxwtv{=9d4j!>Hmpt7&FI`1zrlD`FFHSJ_J52JGan;m}V$%`IMr&9H; z;0H*9fS~t}`bWYmd(7bvK#}2S-B;|DB+tu@<&Zz=$DPe6XyP}Nq`6;7;mifmNCb~S zCVE(uo;yQs8+{|drsrjo@k(77{KxafM%HHLW+#WIM={SD4{+pi!?V2}^s|3{Ht}bR z_$jpbhU%BgZ5*wRz?RD#9I}6T7VZ5#N{M2@=j%Ok>$T>nTc?hfA2@LZ>B@;~eoG}_ zN-G;}*6G(?311hf8*1OAjJu>MykWjvp!SC-Fm6uRLsH>3K!|Z8v)jxD`Ml)gli>?2 zgXVn}f!>}Ed2n9xqo+(^_=s_4!CsO$=`rPt6~7g?jq5;5TZajx(wl80N+Ay2Hos0z zjN~kB;z;T6xGFRl%6{mtht7v#a_10cq09>Te@T5Lw~h<@HkuF^n95jW9ouxFqNJ{G`AE zRisAh3~zhtY6H=H7c#9!6RqRZ@}2X0ryYPrv&ke;crJ+EKI-V(ucqYWKY222lzy)D z`;sgMgGmbw92Uo@X4vs=wIZ&U8GmUw-R&0mA1aoRKU_g=tYB#nGEZX z)d`U&*Se5aXBXA*+=0Y%IBGx(V8QZ*=9)Bz0^s&~BQ2V>0o&GtscXuegv7^ofdjyk|Ar<{^o%HEWkMbV?&}xaUEs zpPrXcX4=Zs=Mcf(;4Z;yo|r<%?ze3~^v}Vv#0Hyl%|bh_SUEbqvEpW@mzvWpfgwdOW_pW-#0qVc%G_>JI6)L=_C zzB(jMoga+hzt*tB@b2x$Sm{N-ESk#t$tkMX5SGi8)hE@sz`VprtZ)!gG0T z?arSi;XHDU6C`!dy4%g8eo4Ifl&=3M@WMo|UL%-kda+i_NUzm(R%3j{ZWVKFii@<@ z0ElD}WVa3IH7p?M7JT0v8>@2y8|}zOa$4j-3`N&NN^}=Uycaif_vhJO^Rcg2+_c)M zJz%(uF{$FznweBW42ZiqC9A2`9Gr^-~y=d)Qsn|Cwy zr2GTlm+ZY@?Fj%p{+*KAqnn>Yka2|%v%DEPd&4qe#pgs>Mr`7!i*xhkn4K^VuuA5D z`<}Ev57M5i#_)~(@tLe(`|q(UL{)9Is+lmqO-A?POvtrh_7By*tP!pXRL97Po}jN$7CS$%_wN?>gF$vTjZ2}A@_cqtP)YsX*7F_ zB-}ulx}8di#7o5w;i4+nKq#rw@lt`goG}!#G&z9Wo0Jm^Y~pjrcUogj)bOl(bH1`_ zZqQuMV`M*Bi~GesAFU~4-Qbs*z_+y3;q$38_R@%rr_G)@5<4g@3Ny1=oW6j> z6_c~Q*DirBKqcEvEA3VfHt+lUz2&9Yts`1a!VBm~^=t5MfG!w;EH9H@1pJ!A6GlHW7Wfxq1HJ3v$SYr9Y= zlGV&gn|PqK60wUT)&c96A6HSD!fnJjW4>TT{7M<9y8)z!w2-kdI<#m3ijO0cBg2|r?+HBH%O*}*GV3Q3NnKOEIY-1D1W zjWp_cnlbXY<1&=7$Za~iXOOrn%RKHHZNIFJFQzz-k;s@Zp&B9{x8DaBqdtz5k!%&B z?DE4HU^?>7a3ytmfyMeDPI?3XDBuDpp2&1vtnzj7br`4b@BO{X@eYE2d z$X4_VW6v2d*Co`1zwR!Z$}LK5D>#dkm49$+01vi4Ob+#?1J7}qEr(NG_r(}8LBCNj ztx9!hn@HRkxv=#@wr_9=ylNmq<@O1nZDX#eOIYXgyO9W-*=}b(&8Yp`^sfGLgrh)v zl4d{U>rPrFdb_E`_$9WL`q)LfMFpH7NrD95iu3C3ByC9$M{i-wX!@}T_r01)&*wa? zQ`xZ-XdAyITMf~g^*`HU#IuNJ%gJdWiL_6MYWA{H(7ccc7xU3ZWKycx=poh^lWau&rU3-R)b>^-w+NAl2r*ysj3Sds-ejTIuOH4`%s4q8TPrw zf2~KIaR+UTCjLn~KP@p!uR;aV8_<^D^RSD~N~}TR)~M5u(dMbTd*%H~443Eq^oh5m zan!Q1!mx>T<2r|0t7EUWaUm)!)55UKvd!(;HOSmAz?5ekj-t0|GJJV|jem8p)+HQ>|MBBGCXwp*=u~Nvcch1^P~@pe-PR_pYC_YalvEiiiQ){tKU$_4t#aT9-cM= zG0I_vJ7{Q#xJY{@GW*+a09}VdeA2ToT{{@(@iJikm^npTxc-Ah4lI3HfC|h<QfP_o8*`}K^=0}y#+4b8Kl{JWu$+!#4+ROj^oJr9_d>$d!CCNN zrW>j~+oZ91HigCdJNN?Q@y$5|JOSs;7EcpK6UkxIUflj`LvVJbb_~tKb}qS%(Hs#1 zFK0>~$ub(X`d~egu)E9?X{=(;bbR*%@#bLZAAN0>Xuf`8Udx|XCp8!@I-lqmlCPhx zp?%JHt|{&*vwd~Ptp|=2@{dat@_+X2jo!&bNVxL8RDJY1Ck&?_S4VjB)vZ*W9)y7I zfH)UcdbK%>YND6U%fz*ZqU=p^nY4L2#-XlztV(04ShsCTwJERbqrmP5;*TQ_&2}3? zZ)V_livx}I)-h`l%oZcGL$-ML{M+$DXK`CF%kcSpnk@Du_XQyOSdZ7nW~D9_X!m-l z9e;1E;kI+l3scCxf39Q^vhm?#iAYD+MCe6{2Lk}~)6+iOtBZbe>Q95Qc(q7j`T;b0(; z002%*?YHz)uwubq`zaw^>S@M%i#Y4Z2o%!TF*i#Aa;NT9@PEa;;2p0$_ZuI65n30%QEMC{-XaGun= zY!5A4c2+~#W=C!^q*Z9i=v{v@aYG-jt}vcCRH^v%^`1K6uh-cmj?QK~eWxDw zO{a^ks^rz0>@EivWrGbIN(^k1ofHBEMm`_?EAv>T9PRJ-)}e@@mp>t49EK0-U6hLI z;EzPw_*4=*dfT(p>?h)dkz}KB*loa|h1lBDc>?D495WIlO|+IfwTTe7U(OE@ov4Hw z21Qik5yp(X@Lg z<)$8beAGv^reV@nW>S3zyb$(;yTw~TcmnslO=^`$kk^m?$0~=d2svGHx&~Zquc@1S z5fr%H?@#XPbtWeCCX{aJnXxi5TustuHmvI5^oAUMwInj3UfaIVT0`f%Zp^1EGHFTV zk@aahd6kCb@arX!7wpm+1*EdmcWjRHq42XNb?a<~q7)eZGRtHon6$7vK*U^+Ag@r0N}!qxQQsA|)7KSd!~Jq=A;kSKDzijD;B>esd3E|F+xOLT zk_B(1cPH%G$b|m2K12~Oc{=Op1^|7zR4uiEx5=S$LZ{tLR=L;MQVX_at- zTN3FnK^3zDYyNU4*ZBNEnhyFJUy+Btdu;iPTU*2_s;YP3rcDp?poZ6X1S~U@c9M3)qJG3Py>yt=C}Id~w1- z@ldDJu8h)`L{KggSg_%w3EEm%-=Gbr=Tfs)I;UQw+4NK5_5gs=?;{GqUN(KWOtL+f ze%Vz%8m0}Hfhi<&gUI5)U$2k7Q-{@^py=MF!~j9l9pY<~1?#X1Ug*_CQ{QwSd4m;5 zZ%deMCmIbuE@S)|-IVyVAjHVjrfzm;LPd224jgTh_Gi|8cSlPbyyIsW$sHK2^(N!_ zJ8*)1@0&zLgOud`rq%sbCLPQ^qX|4hcg#Sl530ud0iyV_+FULOjt||Eh5=WLV`=EK zZdlbDS4d$2;nSeb44`IcfHWnlBlTOwzPt~<;mhK^os4a@^1uq|=kF zD=(<6$*Y+Tul>&!V_`lKlp3iTlBm&6AUo%kvzksNf1ki!&q{f}H90-@1&Yn{GvoYv zXyXwD|9O-;ss-e?4tCL0MNpy1(S&rpH(20J{*JgWd59eU5Hm41#XVeJ?Zg{X%2l~+=XCi#-aN7WeB55(jH_2zpbFo<` zG`vDUnvf7xka;$>8!2_wg=7x=ee_p*C6-_3O<}X)JN0$Rz-|-~- z%nNQq3$%~j$TJ@cpX4c{dT#M}uKY)PKSJ}zL2L53&2Kzz{IB!2X_ zWJd=|9IBKOe06+3)Lgmhw_Q%edB{a!rB7ZGW$b*A96%F0lrnjhT$PzK2L&6pEf-=I zThO|sN%VR)VPrJkY{Y2p`}a%7McHo&QT#XJ`#)qZuQ|~r=eTY;FJ<;Ho~667V~Ud? zicRYO91o-U_kE2A{OR%dWLmh)G_@U>W?tdPg~A^h3E|OvT$R89&rV;MsI|Bq8`cdD z9rdZ@$68NVl*=1kbRwaqJybdLK$ynVBJGo#3{=?43)ol*E`b*z<-1TZ`qw+sca|nF z2&cUl!~RY98~1*%=`P6^uNmV;TqNlE8NMj_MPm8wpQmX>xH8nCA1G@uSmKlE2W3j< zo}5i6U;%5*EtKc4HUi#_R+`3N?9~=*RDa$T5?OQ41>8&Ji(i*lvocqtqYy)52`VK` zlQaZ|5Lve=fHx0j+{2sGwACwcYriuJ;yAj29$&td=ICE=<$$*OepD+DZC z`E@W=E3N$X%HwaE{wpboPGXOE5|Qp-h$Y3SIC70&xPM?d@oSg2@Z}V!NJ<~%uk{>C z6JHWh5^YcEBWF_(jV}^k=frTpf81|jFZ_-+a=&Q{pm)h3@PmK>pOZz*X7Y)7FPO>} z(98VP4TKTM`x1hfPrHn^m-YIADiqn1A!pYSqSe>CO;Im8!oaT*p)|(jb%K5ci4+jx zk5SBx4F(Y?lQq>d_6lYw0!G5;ph!atXuMXg}7|pYC2k_^qFA zub~eyysnqfYW5ABY6er!@Co`fzeu~r?*Ku!X5d+oy7EagPnV6tX96l`M|bo~34T-o z&kHMlXWz(O)_`P7=%^o2GM;pOL>_Ov&xxo-&V6ACWJ_d)0VF;-rRN!xqN z`6F&_FjB9*w-3#)Cke-uZM$D1P`OHaNNEmOWPaWDGD>_SIYe53-!G|0WjpqmFt_oz zfnQ5AX}w4{CW&s&mN6DeaOdx*_SCR4ob6Si_j|naAdGea=1YYfvHOj^GekR|tqX9_ z{t8^Y@GZWV|A!|S(&RRB zXua&gJyZgxoWO_lyl&tY?=KM~KccsHyGKG_Wf&>Yiiyhr$0?Q5JzQ4H#SOPZvFs`0 zd6iWgMfXJ~_ejgtwjWw!fc7IQ-N#*hL|GrT8&~x83Gjm3qyp{CPiocIlH8Y4*QA-p zoC#45F-TtaH|yt{%DU@zM`p9`#K$t@_b0?Xl!@39-QP-K#*+Mu*JH-u=?*WS#;+c|EVPbrbgZ6&(cogN~Jlq)*us?;Yh1C_U{_HbFXG8^A!pcb&W)jWT>qNvj{$i zll+=1C*ZAOS?`8GJ_9Ps42zL(O!>WPS)7Y!s)B1a>@bdIYR1v))XR3)6cv?d?xr>9 zzbSe!R5IPDO~es!y+9&K|6pD$Kz{lB*V(k zB8vzqz^K^VkQ>_gel&30vz9Z83Dn_~>h*wH@c(5g@IG|5Sss;x9O+`YIAW}b=?Fz` z+pm7H%B9}sF@5k5ip)X}E&OJRTui&C&d}gf`qti4vL5Cxl8iR%VvAd(SJ+AY6OMh> z%qe3gsIboG6f-j?J6E5}=?qh1AwY(YV6>d!Zdi9r10#hKdO^^RYp>k{vq?4Lh8OAU zIAs;}kWN&?yJzD3Odai6^Rm;{4d~F+-kQd$X8^=4SC}09cyGxW%`=6_tR5Iw^bu}6 zxi-gFRn*ym4TJxAlODeg8i07@xl+kW6$ufWC0~mMa#mYqtqu_i*YtSc*)@hvMBEWm z?uST0>DHfbwlxIQ-;4Vx2<6>YxkU;$f{BKG6z9Nu*;7bZ`ktM8xj-S0>aZiVPn!~|N`Oj>C&&%ChZi&8a@aK5pMvZd)e0WpDnqsoD zBWNsJEc>Mu5`S*SuEW;h`S3Gg;$mKJX{w5SMrFR7d`-0fcai&=D249bNQ!_){n21q zU7&|>nmzwP-K6UEP4|qNvYS$J*Nanr7NPE`~bLg~{|1njpY&8%HXbQE?eNBH#=HCnOfwA8<8=sEx5PX;hUeuciPe#l)g zLBqbi4Zo4m3=XbF@o5^d`A+D%X>km0f*&snD6M&7pJ&>Ae1K2Rx~$4;KW{CrC&?_2 zwrG8zcCUV6nGuTm!oLYe4!9E(wr8_>e^c2gou2c1dbn0ly*siF&7z5S4T&(Bw+bmg zZDnoJ^>RAU=iAZ22X^Rn{B^^CU^l@BUpAH#?AdbLJ2Ui;deTJ;KcxwURy^V$tqa^(oR4LGy9VcH#isr{4oei&*dGs)~4C z=UOc!*%=O5;JwUMpn82OEY*@cP)j#rsT%-$`$cDD^K_x(8F>nkZW!@^+e+~y{~|~l z?si?OYtBF63L9Q?IQgZ(-_92*DZyt8rEkSaVb_PzQM2pc=3RJG!`ugpRrT|pl(li# z^?TZ4!$_*#Xxg*KZ|@>~O5%7<>+DvADYEpy#{kI#W=4s^34Q*A^4CXKyE~($aXxex zC4DEf0Lj?6t4y|~1IXqnre~B}U)sFmpc`S8%;R52Tnhf(#TFj8{qiQd7kzeP)`AAq zMt|DD0{|Pr@_g^nGoB66yJ^K)sWe`1%;)m@E!01v(@-Qj$~xD~0xvmIM$0{m)A%o% z!^i5|2uX>4GTL4WG)|o(qnKqjx? z;G>ZqirE}li>;)6(43zKYwSE z9tf@`o`_MgNQ!)65En5^RuoQxW;x%tX!SKm_I+vAc_+#;n|JoIjGEQq-y19$2&)sv zh3k!RZxbB&WwjA{lKcg_6ReU^iNGd@a-)qp%;v>eMUSLOOI- z>ge1e#D9-ac$;CLR+9KP71(*wZ>}bJD_m(Wcl$ZG+-4MR7&R^wlyTCz2TD~d@-C%L zlj|z$D$W+V9l_9J>5cW!qr_eM=E^LeXJ4M2wto5_kplN~l3Cv^$Y}OBy%N)Wfa@su zZ+BljHl1LJCX8`b?v2I{~~9D1Ks_ZOl2%UXpTd2N{=ae1Hi zK(;z-VK`Z5CdYxFXVAUR%|Bz;V^I*#4M-cJ{~+W&n49jHvQWvbksM;(#YYuJ0bF<5(zfRs040zPP0Ia0eNr0bA`;pg>B@ak&3=Ul><>*=AhbbDE z19!`}*a5Xa%$-j@4DbRRo^O(E=Rvy26{f?b4|i{AVH}mTG39gDJqBuq@4

    G(>L}Pe`~bp2%1&c<|k3u1<+(rDa4h zNYkB{e_ui5pp@I}KUN@x$B587(ap~3Qr&cUe*L{mZw+Bj=-o(Vn%F9U7El12d|UTH#1(pzCjeMEsUILh$daUBZg=Zf>*j;R);+d%o8%4fUOolP!)GI@ zIq^H`mX*!l7(rFXxBcBOZ;#BHra4wP4m*)KkDTNP+1oGOz-RDtxi&@`#>y7z+p5%$ zR=d&&se@#^MQ&96K7OAiT4ukNk??e&RgdE?eci>gU%m{4Zv6NclwlwDw4E@drv*1y-d_j99rE zO@#Nm-;uVIUXQD*$60i&p_dXcP|#KRvrR`0B}n?>Ih%Y((kfdESaBKvC_5QPU<^1 z0c1E3-WqYtPE?AL)Pf|agHS1R>$yKYq&E%AI<7BwR#lE?fRtK8pMG{dm>%7vh$}HPCqdnr7EezUv!ljB5=nDh3x=++31z^~48k(HD z0zDB{^txwwU%bzRqtC>(E97T+zmtcg!&fi8wLvkM_kg_8OYVWh_Wp`5_WHE@*SYLI zb0>XqfJKw{0UcxJ;>9*^heCy;+k&#L*#_7AgoL-ka>{cen>}%)4dY9c!N00#V`A%i z38brXi9Rc5bDKX_72%IWpJ?LeCUPj)Xd*GPy$Pw};yv&FNdbWyXVfLWW(OCXTsC}D zSO2a$b9m~K6(JIk2%nlno9kuG&M84YiE|yot{2;^nf-(n*AV_%;~Zh_x{2KD`GNIe z8$w@V=;D;G+bMv0IPG?l>M4S1tJJ|1mEaf`KC6kvzfRH8&ABv}LI(*7DWj~Q!&+DSlF7Tl4EY`KZ%;hOJFSi|%@v7B2<-WL;Qr25c zE3+D*L*P&{Taky#y+0G7t0POC-l0CW;8+Cr zd^);<>|xQ*Bz%l;XVs-5RtLYZO6(dx2z3{n0%RPeU)FQ*;T>uf=y zt`KH=N;=t#!|eIH^hu?_QlT88$NRk3Hb^}ZCgLoNA$(-nbV9zVi8}vps+;(QFalQ zJtKoKc;B?3NeRiGB7&q`P3lzo<(Vpy*qE;a8@$O+O7y4=MG30$Y`@;bV6rJ!#77lc zxNdU6bIFA<_D63vc5;M^ceVx}5n`QVPqnn`7!BHt4c0WjtaX!P&bNZ2(%(k}q9jG2_UIs$X%P|+rJdlGUC63SQV)3LeDvUMNk(Vx#;rxGm_+C&cdBdNAX?f( z$UB+|C3vfKXxS3eZ`qS}Ke>8b0GCuTlJZ)lzq+kY-MdYDg=O<^haS;+Ppq@PO`pjc zm*$J_Sp6|nk45l)cygZ-iJPy|lqc%;JpjI8qh}6$WBD*#%CO@IJaWnFUS>E=hW9br zl-)$`ypK}Dp1&`UXH8ztoF@N#C)=GY{ALc6A};A9miuzDCrdY7hs1DS=Hz zerJ5~mH4;VIjsvW{1+Pw?BLDSyc^~QA|Ga6M2EnP)$QN?-@u8(0lb4P(VH?zzp+X4 zDsFM;4;L?ZP_W6ZmcXCRNqM|4#c3lhOnCa|QR-~wnf6JHDRoSd)oZ>=!^Z-$TUwh} z%^M$OwB|<67n}~_zFNZer=wXGy9$rF&^m>JysHZ&u`$t2?zGY+??p0>r5l>s=2ne?BCVpaKsT*TCeq;%?X}dhTfuQcB<;Fa% z`n=#bo}>L~aXczH{eX7key#on2c7_7{SLx>SCE67itRtnytLejGFr*LU(V;ivk6q* ze;~{^<4{)SVnUsI2Jl~P8p?Rgd#Qf1UZ_{=ZLvs6R>M4s z8DWmItd?D|>ht&*xSy8+O+Czg_?xB2o8V zR(yU~w;`=+Oai)aDZE;wdivWJxK^kz&g3c7qx$QnfObu_&7(-FSFQ28@O|#Mh_yyW z1NH}Q=!)$bs`vt~UeI_~xD21XLQ{yFvaVv!@e$LHv%c|Q$98wRb6_HYg!P#i^Yfz=rEp=B zy@!Ubq6sYf;>dK?ZQ&Pu0K3=>l`CPfdTy}R7t_=JYtCakUAMBXteAprXT2s}8Fzr3 zuTP@B;Gjh?i0#r!j`yFotDtDY>Na|!)_mJ< zjWwwZh?w!#kv=7EzNY%FD!4n75|8=X(z%D?asy^PWlYP6!!s70Y&lTpv{MMb40!HqN*Bx{isf%kHGS)^txr)TNbL0ICk4*MCP=E zie~NX%+qO8XegcF$9xd?gT`vt49Y^?gDCcw_oXOY(S%az(S4P=lkU7jt*79NSM=3y zkWql{o1H|35!qA$XpBDln_VF+=`#uyvU?ZZg+o`C2I*{7-bAOazro>)w~UAEw8`Q@LmEKZ-_;UW2ny5y(5H6ZJ$|hFqAn z#7VlF(J)*+*RD2j%0DgrokI}SMH;51sMbZ6T{!9^@g9CoN@b8CNs2N5{+&tqv7?r< zjIzs%#dR<~Z#iaS^>J&=l=S{^h+TE*%`n`y;|!3}V2cZgTM^~W)@&+$NPx$;BDT@U~raa z`*r*%M>AWDCG`<5PNjv{=K3cua>piR;KN{ig#;f(UgvLnatdJ6=4?Z0smik>e}g6X zjv?NDKW{P|_%Gc-BW0l$3y<5Vc^4$JoNRmGkV}1-vDe+W(9M}hppzhgMO8XwKQ@I5iXYyNgOp!EVcc(X6un9K#aON5Y=($=5QYt&Ky z##R7Dqv?;%>4Cw@*UZS57<)G%@B6{RqC5Lha8vXlyrBbeK&c+XisRe`mCjZuG0s+$ z@;z1yc~;5t``o^T`i$R8V9n zkjEr*bl8}@xteaHJ1VdUbs1St8eC}UzGFc)KVSbu&ti6W5rb*2-Qdw<+IGGnGhG@C zE(`#f@xaHHLkFJQ^o65!1vgZ+{ze$cTvGY=r##@x^?^)|LU&I=FQ~u4 zbn4&-Bu9VxQPLO4rUVPh!KkQvGe(2^M{_=q)z;UAzP}Uk#{nt4lF`tV(fl;SR{1Al zpF8baRsHLC?+sXA5(pSQqQv})+D7Ay`utWW`Tlr`KJ(NpzJXx3v<4ioGPoEP2kVEd%cv;K#I#52`!)8D=R;P%`ml6R9Ogs7q?h z*TS%|_x(_(s1v1N12_s~Wz{`fk8eo#+7{?@e`k(Pa`4JNq>)Q;#n9v+x-cWG=N+Pt ztug6YL7RySI!lW+OOH>P2qOBLC$A4@Bjt@{(k4958v{pJ3dhp2&m|B1 zdhk3k7+-?;S6>E1nL3ytzz~JO23Vt@4(}ZC7YHGWzQ_69P8@SF%gQTj2u4w5Te5U} z^QH?1j(6%Ba;2g(_olzr0VR;HF+qVf%#34&a2_iIpOb>iC;cSt23}dL*XHRm8o=+c zGtH+&xC(mUq)!?T^0b}K)-5;_ic7+$Q*g{txxsUbhWLFu)~K)$^3{msYi)MYpufP? zoWpW_*o9xn5T*qf8s z42Rel^teywoG2b%>(|oN5#=8ES!(ejFdknG8ZKGx4jm!OU_Mgmqav6@yZ3puQ~Agu zjGq*aBxRRPvH@=dugEct!qRVT0ZDa>iTL!;cM&jTKK@_vkBZn-bC-np^y556GNiva zA2Snm;rq+cZ^xMDf-|Yg|I~p-8DO~ov3w?1C@Uk$q@;R`QAU7=px0+&M+@&=W|oKN zC#Vm>pozR-aJh0&Y@M#Ag3$9JM_ua91l{2`EXYf;r{cy$VW#qJT7}rZp&2}z`o58R zkiI9emD(W)dyzh1?xp|N+%a9IS$7a@YKD8nc544!z^uG+t?-JObxQgmQ1zK8 zb@{y|`G#RDliqN%q7Y`K-{y!!ahwXe7v3{sWFAFwo~X9(D%h9Pdy^RYBWk!*@<5FZ zo3RV2`Gbx}x7UYl6b8`8_GP!R)gQ>rDTs6zYZ{)XhKB;6(koJ9JS6tFgqbxtq(M%` zouO@Wb^7h=-4u?Sr=$6Vx<%ZQ69$zrNgvnS@~oA%EC~G!Nki zAqVsh*^LG!-Os!>nYF{yaFzGg0Q?_00<55v^vAt&FQH!B;%Q8=h1Supxm3__MXV(4j+6iW}prU>J+6m z>s^t&rBBzOwf8*_(0$=M*yL&>UGpj(0RyJ;Rp_t6UG9Dn;?7*IP5c|M8OXms5z9Y5 zXz`L;D1TS|35U{08|~AmfEmus=o|D;T3$X~sUugyx7jH{vuF3~tooS$(T1(enS0wa zr8gL#ZMXzD3jvysI~#0Pq!}@*q`-gTUsYFsUW)4VKqGw3XQhVtE5Ihn)xx3HVRw!^ z_s1t4Uly4f)QE!4Kwx@dTO}_Q%8fx4Q?X<4YP78T8aH$7av0Ti@JgeMx?3TRVD!w% zk{IR;tCF+pY0B#0UQj67Aq9U>N=*&c@syzuOqF;#j*QWihOg2##c79R+ zS<~6>NETtZ$b2@2N5atVx7?oxa(6|VLj5D1&|}|w+Ee#IuX1}Ka!#wMC{|Q~>e3$F z`e@XH_Y9t!;gxGn5`S3&)q1}Jy>vA?_I75hCIGbS#K6|jNIfj5^W>f#szHeBa2G&w zZxpIcpObFUd(-7yYU#rfFC1(4%1KEsSWnIJ*%-Z2-W}Z@f!YI zQh=j@pYoBg5iF|K6a59ez=P|i1jAh5DSF;61L-Y(Sik2FmRtiS!=!J+G?C<+LNrjr z)OfMNu5?i&HAF-pTdInP=r6u~r4)Rj(FGSNgtGlyah3fT4NkGKz)E{3{@d&zQUt^B zwn^Mt@xo8(fc2OHA?Dww)_($!M|nRI(^uf#r|k)ye3CyS&i8F+zCQE&G@N)gGXZrL zb;EPJ&Ko+-KY29zQ@IjL|FNCywY{P_kgkJJDp@L)@RC0AWUB)r7sbBey{CeM9_d6J zYbUyczRy%qnbn~($B%o){t4(k0tXf8&|2DJWO>Of33OH*-X@RF@?9Q# zbP%qY*TR)ygTqn$G;W`-Md^23-4m@UQeIU)-(PsZ!=Tf>Ow2K$1Z&c9YRyu=|0RwTTJ zl<8xT!#=C{E1bW0dQ}}2WBa+RRzwNN#oNA)y0sTSOLQO!?CZYit4mF$cgA-cqkeA% zVJ9F>jBeDb-*%Z^(zd8pRN;J-KX}SJ{C>U6lrQEmU1VzL=cqkCvH4-nU5Ku(aFGi| z0o0`t3XuRIfBVT@CEXl5TG9`v*MfI4vT0=!*341z_Na-VYjj&5K44QZHNTzt36M@BuEisvXhr?YcjNJ~KWvqP|};Y_Jxe^bXNG8&mXaH{H51U*uP5s4`n?MBh1%&HLhm$UB))4Zz;53UM{`` zt;Ei&qR9J~!iW}3E`=wHcFdj4U+1SEx?9=}?-A3}Ew;(q%spiB zQpUGuL1gGMy1KraIZdYM#UZ@mb({}@WmF%;NJaKq^BM9*5o1gCa|u)y6nI$`Z^K_a zRbY(8fW=^m&oEOPTN68v1OWZ&7i%M1eW?lm@&Esm7lVwYqSeT{Xy5UFda}3yWk`kG z3WxYZi2sMjFV4P$$yOQStjGq z4zWUb_>CKYqYz(8J2O?sW(Bwa~GNk~^P1=O+r^8(-s;RNH3s zAvYD*r~PU`<3MJktX4rdcsA3b?*?5@&jjZohbR1=vZo|_NR9gduxyW;8Dl5AzggLD zRaH`2Pvk6Lp(s0Y<_#+ZT)0N5fAxNkYjWSJxLPLVa__(0zplQTb!Q%OVw*%L>lTB( zcTm=0F;_%I)`6xl{QXN#{KGSW#v~jI*d4i9$#+@xVOQdGf@tXK%DOy1<8ZpHBe|ch z2j$pG?R8VHFO*hf=YEUGLBk>zvlTqu?dAkMKMsH_nk9d8wxVle`}fhCrD`<6fxLpC z<0pX8#liOKqjbpUaYyAl%sNG6Q~gJdFD7Z6%Xj`5Fv7Rvb`lh#{azJW3`k-JHOy4gsgv1PvB4^Yim{ZbR?#*SzT>JZnDomYX&7 z=xbLSyG?QY`!#Z5yv`jpoVQMQy@}(WB*?{YgdP>N8*Q^C_MR*8vd1V&eL_^@Ng_X2 zUfF;JtH)HhKzD;OEa}%SoB-u%n+a8#8x86wRvG&&~wO50QUGG?TDO{f76DoB@ zTvk04#?_>V+X9-Fv9HfKov}1WEoaGSX*o@{UoQ*-A`yba08;qxLU~zv*SIQZ4<2}m zLf~9!@YIE&O{Ca<>aiR4wHwv)T`Gf@lk(c--UtaY&r=ML?1i@V5;7DFeG?CwJ=TLO z!dFy90)A7C*~|Ba7Klr)?wQCJo`e(2dE6Z5;|MNDXX=i`I6JZ*T9rY9;Sg5lQZErc!h@0e;uy$2= zyk}1Ne3V+WkRRit6xj1D>A!Syw3ukqcD7SOqgfYU^EJz>#d-f^Z|BqVH)d4jY4{;w z@-PMHKqr?DlzUOtXq9??8mi1Pt}r0biMLt)D>s7b<$jsBBIxtHek@?|bbqCE=YKyD zg@c!74`p=hoZn%Tc-Q8vmXr$n@!;a;Lt*L2=5tj zt!bIa=e7%-;w(UddSv?pvL2#)6ZJ_?$!23tpGX)0&~#uog#7ZzNOLuS>aG*WPWoC< zKCq9U(UzFYA_rpCm4GhN`5(YodWAf9_Z5bZ-kIbc37=IX)BY;^Q({}hJgEZ|t`8iJ zwYd=)5*feEdz4mdQofIpXY!%r4UkTie$MAGSi9bV8>yGY@#wg6T>USqYwW>vrxBV& zBh0(kh_8x1G>5SUFIw|z(#=pB@L6^KmY7ZW1s4A>uxPPRXZ;VYtMkOM&!4I1-}Z7Q z!TGefR|i@JcI`o1UT)1HTYPm2CqOBcwa49sgCAR`xJq?B@Wv7b)+66~&c$eDeurQD z5wH4l*rt10$Bdvx4Oh=8mFVtL?N~mKW;H(IzuaYLli_}>1+hpdjdC(dblyp~qdQtr zkym(mFo;H)snEi2;%Sol^@u9I%I!Q^gfqu!uJQB3Z)_)VR=J&&d6*IP?SAZ7@c53h z+@?e^(`=HW(QnmJohwH6ug;{LgYb`bHApl0?A&G{mZ z#K@Y>mfkWxZ{>L)TBXf6RO!Qh6k)QjXhtal(RfR)G8DYf1)oP(y) zyCr%{X5mtY`h1-aA!+>CY2f7_*Y*xxDf#YI5K2xml9pM3OtV;!LAFQ&Z12}DdI=h$ zw%QRC)eX4NyrN#YZXg;O{y>v`5i;6|v}Lgq7Et=d8H z1~BKp4nGarsjY}XI2%93djzv^{#MR7I0E45>hmsCb5R>^%o;Gmu#VR^4DZg(8p~0O zf2>k29>r_CNT?LZD?vz9Z2%OCN$rMyCG>Ef6m#tv>z#b2=NVr)76cM><~4c?%>1Yy zEwtSe8+Tr=PL{X#(@gfT6Qag!v9i>Z-R^er7H$U#O)-CzZ<+YR+NqEsw}sPqs*!2< z)xZmCrme2Lk2}t5qn+3>NkM(}F}(qN`-6$y`$;#EHW?syK*hSqlkP_8%n&*)9ka4|TVcEgoeo$^7{J@>6&P>!1DnaR!EFK=r@_*ot=GN` ziG99+R|QvTcei6DsN)iq@g}J-&4B>E1^s$E)$OSNN2cXMyyMC?2dI0jmyd!N`WuH# zJ^+Ais=o0yp^*#AG695$1Yql-?+;Spc1)Uu|t|z1evG zn*3uFztf>45jOu@Ao9KK&z4W@q_uY;;kCaHBjiBiYU*m=kWQp0vma-~t$z$l(N9!$ ziZUcx=(Retw%)dDT@~s3^-XuOq0hF^>V91Afc2DD^lJv& z;ZV6$ZlYj6r6!}{SGwfuLw--3`e(3k$p`60Ik6(e=e#2{x0kcXio*JXnF+!}?Hyg4 z)%8PdbBg|&+ud4)Jw4~*K*Sb{$pWTjw7>5<(dvG0Tp#V#V6x{B36dLr;K6;i(AC#i z^=8|WL5o!2dA_ADVQ-G-af8G$F!QxPumAqA!XevZ9zm3=|Ka<<{(Bh!_~S;V;;~#G zvgbg?C>z>0hp+_h^3WnUZK+R>V`M7#bwk2U?IrHO&d z2s1pb@D1V!!L%5BQp8rzr0Fl4p8fSOj~V8(w+|D_wI|^6Qst(n+jhd2Tp$MMd49~| z9dj=5||wE@I``f>>m=Ncbk9ohNgc%Dsh!(;aCCi-FPJts50ImvG{~@CJzJ1=H8QXlH`5z)$$W766=JIYvl$Slv-bZd0 zX4^XRL$-@_U~nXV5!57S4WZ=y6!+ny@R6dfEUkcZkt%(rEuNnJGb(OIII$fH;0KWU zoP=V6qNbF9u_~=-tvg)H4q8%$YIJdkxkBlpvi4=d{;X-Ic=C{0qq3l+b$TkACoZTy zQ0I*UD)w8(23z0NQE>8a{PWZ~iJ#qQRK0NptAT9~FZVn!CUm!KLAOIcpP!tUm+cvF z8G;xQr5r#*pkdWB^jyb4zdDHUOAA4H!>T>=@^hgugM;B(zynp!huy)>8pJiD`3gNI zO?MdyT?F*D-&iCQU$8%7@AldeYo9V6w;o;tLYg_``P6#Ph{g&l0}_z&Jqc?c!`J0R z>*oKLcif#!bbmRDSt??MaUo?%vSrqbQyNObkq^r60#O!SR>DF!%5@=LgY_B z^p`w)b}}5>kS!j;wlzm$<3vhBD;4$OT(8>CwQ*hg_$<*L=`$zySS@(nNghZ#^spMw z*2@KETp7}Ip7LPq{MfS-0Vz78nV;-X<0L^${K^QG-c)gVt_4Al0( zJ$4{hBs}I&I!BO(?ggz;iLPL~>Hxt0F@|v$f4CY`W&?dG`|VIffZ?;mtU7ghZWlY^ zx`G=$OLwF?7zepLt6n^eWwA4&H8udhn5&99YWM=CF_VFbHHts_J)A*TCcRAQ1H&#t_`kG|mWQ&-?K8&{qKDIYMqk50Ykn;3=EsEgh7I__ zO^1J{OH`o@>3iCJDpn0@AE>iTpX)x|*oZ3#-Hz=nT`AL-H^J8$>|<5}j=s*NT*RuA z9un6^mpn}#PQ<(8AgauJ(on`h7moV2Aj}DDKS9ll9ZxK~$_oSrr)_-XYL9>e%~)gMJSy{(>DiLHi(SHy6Na zv+B2YEg%WZMUWE8yZFjFXGYfCt4=N$DY7H@o3xsWAph`kR?%L>l2}79$k?CxhDU%!1mV^D zr>$BA38+zL)GOWYLY?a9SEUIO8J=m*r!=b>3Odid6VbA}9-t-OuEAIitriS~OwKYb zjoguhf(JxEY@aW>C1C2N!v(NQFAVl8w?mth8m2EaLxK2hpr!WW2tB`#KeI^mc(G4p z)7O(bBiP=VY=3^psZr!j;L)jU+%;CmbNaww>z{zk2xJ+6vu}-FQJcZTaP}>kLjNUz zVgl~ZGC{y1db9KrS>*2V6qRt$vp$-5jEfq*a0tX-!cAN^?r)>D;KK#WVf{AlI zqK;(UqtM$^2VeDgV}%&B07%?y+8r z5+XAi+Vug&Aa~Zw7bdgzV_&DTTaIjYe;OHcOJQeLpT{4vinJ!}!(;VnAmJg;ifPYBB2TTuc zBkq!)ap+8*r>`zk3-j$Mi-YRm8x-Aa);{i*D^vbzAiKyUp=rCASN=KmdUzar@FkAa z9o;IU{<-;AgYnJcJOh71s6FjDy>)|WSfR*Jt(d&=q+xdxNB>u_-|V8Vh24fq-?qbZ z!hhBiOlHUly6*%E83VP&4P+7uovpZC8EkzjmjK(J>MjVU@h;sdUpz5qvnL-&3E$J$ z-{blOnBeUGG3vHp@mQPh^PusKUhi%TogZle>arJC#`@^r6#v9wPcdGVg6Vgehqw6S zdOz9jM9wn|Pd;2r7+!ogBjt~OoDK2K_x3yG1_@XurCR&4_nkL9^ zAtW7$>PQipf=Yv^Wl?J;t`tMnD0wh`yQVYLfmgC=H;~8Opo;!>Iwro zhcN8RQn)P^U+nD)eV4Fsp2)R>xt1p7^JBlT8iYiXc==`w@Iyl@Svn@nd2$9cG*?4Q zE_s*uXUQWRkY&D`)frOuKU@%U*ptm>d#bKQlg6LCm$Re?BI_T&y22UW=KF0C#dFdv*ZSXjI$!+)kUu{)3jcWDX&LW^DQXwXZL?5BL6IdA;D61nMdCq}(wpg&@xIcn~btJ1{6kgQ?ZEj6kWOO&&%a||^Ku?c`XJ^Nu<}~G3GXfzx+A+Y?So$&gzs|b zMg7zr&VK)(A#Lz^Hdg;q3ig)j)mjdbM+^Mim4hLJz#LjDFCbTa`c&mYYvE;bM}Fc` zvhaai$Yu;~CIGALds#>Yjh~;2NDc@aJBRZd=Xs~cJ&8#PQKP(HLaBc-R@^d5=alxF z%y%&Fecd<_OnAsMz2tx0>|6d-UhtZCe+WYNs8(~nU4FBgt04R!X1yh2!Lp6tE$1rg z@&QM)z)mI4Y>HMdH77^%R6b2L@)vmNdv)Ziir|lLklvVYFb2iPFakBtc})xtH#ON2 zSjLsndC@U7YgiVbGR#R;Kf3o|<%ihpL~kP5-K&28Tx>OW;k-Ia=oy7ftSRovdMQbw$*FBeT$26FuzHIL-!K zQI%`p-Ei}YzumCYlmFn7WAAWMUZdh^bj?-p?Cq)z?`19JqQ)%aA7_=zvn0nOwOIxj z*8Q?D`Lp@LXmv^^l-ctJ3GVX*i!BIE6yDttfjC5MPV*<=bfAO#{B&d*^MoEQIpVlh z^?`A|$n>f6^+)Tk`Jbs+q6_3xiF-p#!!~2*_P1HcGpnuzf18}J!s;d1fs+2B2pr#M zG&3b|mHR26?vk@d#o8xypFz{nPD=BeR}MS>$-2ro?ROgZ`|Uqm;luyy)yPXtCb%_Md6rkO9F{v9rym!*v;(RAZaos2__fZZbH)2n3PuSSk?;E6ES9%rWx-h+Jii}fx0XU*bl#1xGl z;xqr(E2c&4zG}UiGn&*HJyqG=d!{Q}y8~=vY_KGTk5?RbNi$d0q@wKxUI(QWi*61? zsa#2v-v2=>YN$(g|B)30JEE0X5-OhhjcI244_Pou@-{s3H3YJ|z#9C8dTYWv^w4}U zD*YznObP?pyRtJhr>k6rL}Y_P`3`h0UuVBtJB(O4s6fL12H-u`fts}wY4bd3Afeqe-0U!0 zx3KHT#A$dg9pyTY3!>2PBh`PA{jTh(!pJhsr~Ni_08Q?5pPl9s_-duak+o$>C@9_{ zlU&0y)3Zt}u;*XFHJ^;(qmE~{F_JH~8{sY|H|ZP0JG4H^_xJX)NMEA< z9S45$oiY>F(Ng=IEnwZsc7*nKv-?0{i=Cv1%u?r-vv^5J}N2C z&W}2mj9uF%95}H@d*B0eS(X)S4fOODW8;E#5ra@@uA75J6vubF*P)OQ zX;|o^f=^3ofqZ5`2+3S~p8+`UF{VoRPMuX-QAvq}Kmz6nG;_=A_LTFd*Rn7a378W^ zNbCDmDe?r!mlrY0R+7C@EH@q(w`{-N9B}RBZOVc zGa(^Uve|frQZSKc4DsS!;L_z<9ZyzO)t9Ec?%-)mzCUETUz$V|ACZqKANWqw$BvuU zW(g2EDZC$*w0lEiH6_2bu;a&nYUN^mb6ZU^lGE)k)tfbC6wD=OOyXOi6; zgp17{TfsC_xX_Cn>f})~(^8z&;J@8qN-o>?$XpzxitQ_-jH6dz(6Z+!q9?acMWi4% z3wK~v~sYj#%zPSh!iZZ)T~9fce8>8k38P@1Ha{Q2+GpT>10V4p}5PLc=CrM7Cmi>4uWSVY{{k>91_+m<^g;7xqll!A!r# zT2C(f-!#lI0fqGUfwVZLl;o0=y+afax*-b6k@L0Y6aQrA0dHS?<=0gFr^^)G;!Um* z@yPmHcNZ!Cb~PQBl&TToYi`#=|3B)^GODe1UDqv6(Y8qOQi@B8I~2F#?rz21-CA4% z6nA$E?pEC0CAd2TIqCPUz4qQ`t#QsiKMxrrV`St+C zYO<~va-S>t4y_l1{g~fUmc+X@13bUN@8=`}=30f(qjjNm6Zu!&=;U2ThZp1@Dw^_c zUbFvUn+j&s@Kku1L)je_z%d=i-nPpvsp^N2H5W;`v~zE%eQ1+}W$bSmqeKV?NruP! zYQI*_QervvE>Clq@{YvqK*CrVhB%`qfg@)@urP3C=9|l%&RVmJ?hg}&Ih4ra)iEbYlUXFc^#&)OiFBDFOIMk< zY>x%u(6XsPPnf&hX6BJ1NLX1FB|>Y=ef@)Y#DB&%$ZHbg$Rv^ zGtDRP9`6dKB+B!jw5B%Mtb3AiZjWZuQhkmWYa^nu9Gmq~UH$t%)J7yfB4GRkVtbHn zWGJ2FZM|j-J9RtrP9CLvBw+pijcg$l?Ww3xnRXE;62pi_wNMprYo?OhQa#6qjbw@zK> zh`w%L!C&{`kl!1+q@58Lqx!@CgYmc!_LmfNN|8>#_} z#Z`s=X16C8VHSMA$Q0RM4La{)X{1EaX)6m z6MdBf-Hhfg|0;HU2Cg$F0YolLzvxInDsqX_!)@L6=b+|V14&)XLka3X8+kS&jx!?a zM03w5-b~~;%Gu3yb1w^0Tes-J8(UJ!gBanHJkP%Q0589%7Lp61OZ-*+FT1a=I7%TR ze^s8kopjU^m*%O}-c{WV`$1(p&U8zrIrnrq+Tt~#+98K9g3Zw<+es6Zo*!EBRbjtW zS~d`ei0%$i4wSNSYnau{6?<3&DRVQjlu-fSglP^ z-m#l?8!{NZ)ON8mT3k(}o#0|0s4qj~;1!8;X(wofr$%uTqX=d2Z>X znR!y>RPmojaT++>oVMuOKmW3|@a;Vu&+6V22D$qB7^x*b-B-AT^y#GisF-x*3KQ0G zTy$*9w{MX&ZOe^ili*LBbYau9?wZ-dBJR%E>6W=h_WrXP<3Ov@6D`<|1mZiLM#Jrs z?so%zePl%;lC);*5zN0!a-rMxX;&|rvOr(JyNBFf2}qu~7rAG`WZ@+8{kv%wyn~e9 z5V5B=#cu~6_uB{XWuG4MYPF{4#Gm1gO}e@p%o_;_fAscMJLrmxrEyq7GS0G4BDVC@ zUhc1RjOtQ!8~_$Ur+5Eslk%+m9Ah7n*W-=?eyyX# z^8H*ULH%?34(z8eCqK6PGK@mugw9n~0zE2{qZul>eUSszV>@nL z^Q(|_w%>0N9T?R~RevScwmosng}wbASQ!b!fz2`R>U!iC0_>$-CFL7>KgKI0l|a`y z_80AZo?yt2W#<*v$PZ4Az%ohz4}>t!wr#7Ndy6K2s(~hf<(~pUzg)XaehT-)}?#!AoSayfI2(G_xUnc3_ob4&Pu)inh6^ubbbU^U{KX6VK53^1TBz(G)gj zE`|Uq2SM;K04db$ zfFiyGmH#{rBRl8~j-2{Rt;yVjZ{ASCL^bwMoc9N6_3J8EdPM(CUu{{0UWu@N`}MOO zU*&be)ngkA8>qJQfBJ|HCN%pM%%UDkhxq^UK(|0wgCEsjc?4bj{cro9J@5a1UD6@Z zuTsUL{{J9nC7dNRn=TcZ$bf$8XQg`tWN>v-Z13wO;(^W5UJK6X6GAhn43l-tLPlL( zJ*K_*;J}eG1)uf1@CtMaH^=L7BRr8Z#}nm#b0m_NM_%jm@?7Y069WF)WS{AFxg)$x za(lXzi1V2k{i%>)OV>n&!s&E2ZLy9v|LBOWxUlrw(D{ySVhJ&?yIf9RL{YZ??$6W7 zYzy^I|LzAnNP>-c8XOw1lqAx?QVi(=)mmQCXf!%S3{GaNzxFAZ`*5aox+LXMOlt?9?d#} z$M(oXn|%&kbi;7G{o z1aPht4|;<5)OxmtoAdi$6s`x_T#w?)FXGCL0+)|+MImv3-riov-P4z^4!8`Nh$P;> z?`h6Fs9z>qN#Hww3HR|+d6l;w;B$T7ruyDtA%}*+^YCRPg>WIX1KK0)#^zB5GIw+Cq{V~dhoQCODr}b zE1FK9#(k?n@m4O`oZBamE0drO>Gs?|?K(i~#f5-+`)C>$ExaGW`GRGUJr%Vr$a*P% z@r@G^qY;(n*0zY1WYhUa<+1-JCGC*@hm=%f`)ILMikCL3YPcx=3p|Fx3_mo(WSovq z>EP{9A_C9ctJF7s^DF|7Y#X{ckOUlaZe_bG_tOlxER&T_ocoXGch^}cw=!fpvv zYvu7UavajEb-XJiSkZP(fkwdcqseJsQdQR-?;0$G@<$gsot2fjJ+N8zmq*}lr@((a zi_4<-D6e1HtVz8Sxp4ar767#k+v%cc%^Y23YYQGBO7p^|35{kpi;1!5<&jC?$ezG4 zq%zx-yL0oSnOcJ#9{p0aWR_J|0*9C*zVOnJl-Wdna@=h5f<1XsrNPj5jaDySn>O;3 zPq_v*)rH$EIFFo|!X9^t3ULM=Zbjc;CNH8d5DUn}TnJ=> z7Dk{DnaaFHU_BxySTtal0;?4)kI2i75p;gsd4gOA@r4RB0Jnebd!Pusv>0 z4rfgF{Kh_c3aO9TA3|6<=J@2)7&2KW9~W&8LT;wxoZnbduQ5AXvJq7_oI8om)1B@P zxa$X%X_AG4c1AADpinn+^_1}8Z%xOz?6lw!92G9!tGk?9yPW3O=QTjv*V9Vy$PLS~ zob=xnk1~~|5S4lVsn@it-SI?h^5G#*%INWrv{evtn)vJ?PJIb&E?@ImfkWq_w)pVH zw)mcZ$Z(vvHbI@wW_Qt~jweuuH9_fo)xW!j+Pd0i_8}(cv;{T7AJqEXuf9ocG-)fj zKp5CSKqct1eS)`$nOT~prGuBgB%1ec4$@?_BrIuP$wIp-ewtw&yGwI^!qO_LW1JCj zgSE7cK#)STeBLzy7sD{&vr$;KnN|gb%j8%e`Lq!hT6o;ytczD#LxQQm@XKnY05q#E zHuKcC63Db)jCZYFDyyMt-GV6_>ZN?T;b_?xPVEis?QPny%#l>| z*{_*4iVG_7+Cwhcnl{)^)k6h8TR==y#VvSapa-ugU`- zziME#JNA<}&|%w#N5gt%KR|`0KTBF(^R88Ki@pb=4LXCQTDVBla!1cENCwC9c&@+# zq1y|I<-g$CuBG*P@r8blg6FWmJ;XTjypnEfVg5+iaJGx`8YC8mlLafxY%+RAKg;3= zuz@KJp|(P-(>i}Rx&-hQj8TTG&Ja6nO5_^ACNXv9*Z-SxvM zJgThBS?NfLpy3Y;?IrRHn%uahDQBmN-O`T8(S;Ss@unItsqav|0S(wYOFOT(d)Kqg zcvnTE(TmSuaiq)h1+lV$V^(epiO%fg+;XEg|LK=;V3dy{W zZ!(*eB>(fob**0+rH_J2S5(1zAZ`g6N??s8_Ov> zvC@OpH~|jaf)ijkbKcMUnR{Fg!ykf(8BtX#G{AH;j$^eeS5pK<5^>4qZcu^m1XLNF zGYN&QhZqyuH#x6xJs3PDIx-B+*g7-TN5wahc->I9LOMMp@4y53M4KSHmbs@~W-8wf3qG#_T%EbLFm`-=t^G9( zD=25B=kfgwnR2-v&)3F*4;kcJ2xdS~$WG22wu*0r8*loED&c&G{yW}&sM@&ryo1)~ zh?`cmuHUpb=kwr9iA{K(x7UN5e95fipu}IY*eEL;$iFkJtz&L~9g&f5u;*0g1dfHm zq%HzVka|0#rwRUBrUnU9Hy2v2N@LLA%7SR*j9&x5%5BO61yK~ya!Y#m`PjV(4YE&b zDuz85VJB!Oj}tlY_Ih#)k?))4HWn4GJk-y;4P2XeF+Jt(*rtjENlotCqjROnG%1e0 z#*%yW4I##%PcPic@@0M=&Zm!!U3QCu*Lv)`LpUuYJT-`$_5%)jMPQ7-g$D zId~1`=ig$%Ngr9CP|3s%*r_f25@q- zw|pi~k4zGWUTWxhb&h-eL9;g@hWX<9&#V37h1bv7W$b|w9b<$Eaj=#P%Fzp9lYgal zEV>J?$p`*oUUW*V<(E7A0Q#_>GLb@~#^6%(?o+zUtzgnS6n`m}-$DcF5&DBNdMQ{L z!UKew`TVzdrM)UO1RZj*`9gf_x?>4;tk+JpX)C$r>XXK7RZ_LmrO^1ax}pFIsxX$G zef3J?qUt_@Cb)!V;?m!NpDHOe;lNe?m?vwPko;N`0PRPzW`3=9k2v>W%*%o@1MPi~ zc;vj_8=o^FL^PFwDmfi(#T@_c-5pTR(BKYHfBlMMO=y(D(0pfvnvYI+>+z1}9%yYv zz*fj%@F(RI6iD!)(0MR72gxT{M_*(Ray0na&+vCRF`xgWg6FKm179g^M;L3QZ(#cg zPeM2mt0XqFhw{bYZm3z-DmCkrt%w}8vsrI1exe0m32*|%{fH( ziJ}9>6>zi^1+req#%Tg@o_ezKL6KYP)HPvhRjj1A)*~8N9I?+xN|@boQVl-PyC~eg zxfu&7DrVk*KHH_>mj`F4&h{nGs&pDwzf)%t-Mp!)*5T<7QJUod->Xe=wuhLrLQZ|_#?>XRx1A*<{iq1%nZTy(B$k5d^%RWM977GuPc zTU-y-n;)vEStPa>q_`a$tGFbWXw4B_tGn|&*fTaYFOQ5W=l#jv|=A=HI&()X@vXR@t>DXkd zn*(e+Ii+ZMSHJ1{Y=ZsF^t?Er^?_#PE{9~QYATdlL`fG>im3A}yRgX%A)zam<1h$if zYzDGt1T2o^b$lyCTCRt)I!6^}v);F*IGD61yqXnixKfUM169#w7@6T3zdPruwlF<4 zB)Ui@E1>y_a&PN#QEp^_11LLA5Q4q9nVJ~`4lKETbWTZipRay7WfrkpanwP7XFuQ# z6n%GbBX*5YZsQSV7EvS?;}*t;2LSXUruJNgtW-T<_2^X7iWo{Blqf&=!69Y&Cys-iCU+` z+d_Rya?8HZ>>v4)Sx5Y`j&MWrm8xzEMM40Q&^{8F0{Bfg=(qo9h#}c6BCuBS=KzOu zLJ%ySv!UH%RzGpx_f;aFQ<9VFYU#Ds2mCr;w8LMN>E4p+y-#}{b2Y4^Zyhg?DQTpV zW9uyT)9_9&4QzB*t@wj+EnKD^gHT|>r5limSOTA><)h(m50LhBRXgNJ!$eK!@DZ7| z071U?O=?^ezaNEcYHpZnfAdLmh@>0oBJIp&*Th!%$_V?Tl5yGyjtr}1#4A+>YRh;y ze=w$5XST>2)9ZkjZvTq-&rkZ&Bw?4UtsXT&vjfG2h@8*EQ1KzqX%(Xl;>Gp73$7`a zzVXCu-P=ib5#5~Y=odm61GT)0)`8e>AKTljrRL|%a-F`G)Is}w(RXfsOnmY2=(3gY z)3L@mvLGx;Z=p)y!XS_(z&hxmCMpO{V`>_x$pRAl8xKk^um(fFIB24cR3IvB(R^0N!{A;N}g|2*{Nxcw`0LK4b04_V`0Lv1QR z*=ouBZ$lA8`Lpb5O%K)=r0*DDk zg{NRaWTsbOw8CMVB6#CORsZUu^rZ$9)dp}FL&=YopPeFU@4Z?cP_R!W3aAI^bLF9l z0-7@nVCQmpqBMWA^Br()cm`Sz+n{L+=~6!`Z>ULm5V$ae|e z7+-6xQzX+l4I!*hq7*1)Bt@?#1FjaC`l`Y-Q~7tG0Mg)aYaKRAMpdwPeCZ=^#OXMC z_27GY$6||O^TpbnnQLor!iTdiKBNTnJA#ox0xH|gO1A**OyPAZT&60}oQh-U3 z6am{Euv!ot|=L!kYu zH!;(-Df3QqyS0}0OkL&r(v#2Q`-V=0Os+o`o!aWN1*vW17}y9r`8*Of@l>>a#8kF7 zCVf8s>TS+tg$9bGM=_94aC6&fe93_tA&QDk$I@~Zmr7LADODCP?rjZ(H2|eHqNqfD zh8E17+9W`B>+LHo!3m5+2f1bb`=cpIH$&BQrS!bx$`Ue_xzt5ks}L+)08bQ&OQ7WQ z1%VA5kiEf|Z~Ezj#bfT4UmcOm*pm9xm|ia`9U9qGD+aH4iE0H-1i|F<4=v5Rv={F? zzeQVksDgRAuK$t3DkO&Xz9ovoxmoI#%GgOp1th;77}nHi@Q>@zKkIa)FdE;Acmq*eN%Y4RsQtiBXeArA8>*2Xi2k#pWo66)9%d3 zR-R$A+c1g(e#2KQB%xtr#@DLu3cuz4w0J}7D)T(;||HVsSKhqa8iQ*Df6^tJh1 zk)8gfF$y)I=h%_!^br2Fs-lyV)759qP_u)y43cQ|9!uoBE2WG)g}rOFXXj1XIasMz zL^-f?m~n56Zd-~7{9kbBG{gb6sTpeH{z1BkCh%%Z%2oDMN;oFf7t~>8KV-O0T=56O z_U|n8z#}QA&mnvdcS3nz_6wi)z%E6q)eLI;W!T2H)WBYbVZF4^=7?Y7^QX#pKk+I~ zTtzAAfKKNH9MsHnih|>Ree4P5Of~!Z@?6Ym+YY|~jGl5(|INMRQEj{?6B&VpPcoq4 zEH+RRkGS8c=#h6Ez4-}@y#MA=(ot_k=Q9xojoJo^y~nDK+ojV(2Z(Axo3P%zllWiuLjPEUwhg41`5i^OB06$7&B)#U z<$;yImZJiaO&uO_sT^uO=zE%~$mlSaCKgvxfH0<106{Y6Z(}ERG-gfOi)vbzgSvDF zSMhc~$^`ML@=Rkxhq`FbkJv`*H9x3G|FDDkex{k3Yp3;zR)Ep3<)Q65{fcl_K@2M* z`>kbXRYg^#q`G`Qa#(n!hTnxg1??VJvckeUWTj5);TE}1v<;p#HS1bGY7Fy^fZsqI zZ`w>+qb}b$#~|=vlD%R4Daq*_9(O^(!tZ@JFGu}zG(mXMQA6UxSUD;y_AZtJbMtaQ z9v)I?rHCam+z>uhdMym1q=?!`5FeCUejvK=H?_QmEFA44RUj7%Cw=(oO0z0;bLIH? z<OYZRkz|3~=34 z4!ZqLSUUw#KAVRmI+}5toplIYRSkWrkfgekr2#}p(+`?rp$x%BJ0llDX2N*Ld~$bT zZx>@f&7^eO1$?yy7Z+*XTGQ;rJQKM)cF83omeA3X2FJ!jdG+GQl?g=BAHo}SFSlH_;v4akM)9zjiC0g z5Jj(Jr!iy#bMA1h0S^@Gvp4SG5>zlY_#&Jl?_bx^xbfSwdxh%CsLb=sZ;2d$1iAyq zeUS2CF&snM=GNv6t5uzz_7|~^n+eO-R|lQ56e@y8DZGTjyczUwfCheL-)sO`M;sgQlFIKTke^fFAUA#2Y0) zo!1j3rgD8@Kt<-G+Nqh~GvPnz*pX+J7QH#3Ub!DXRsus%wjdIwj++d_Kv z>^Q=4_1?$5E6Qr~GZ}mK_r5J?Hb3#dFCMPS5i7}=dp5%sN*MmrXKH@~)zo;movQZ) z{X9BB3tV}L7-korLscI@y8@`6mdnWpD`*J+FG|3Xo5adhKZ zDERbpy(@B0%g`DgX*|UK^r&{~!Rb;`zidOzSo!kGc`Y$NeMkz>@=G-8jJ}xCp#=$` zB){*03!P@lUreTDyfDk~2jmAepk2|7Sszzke2{u91uv#a{k{6=b-{7HHbh>i#?|l{ z@xIORE$gNuxb7#$JV+xJw*+tUjGSHeujW>4TIuj~bD#A4rAo_t4RoSG-M`~o<`7T` z3*XIE5wsjH)ekWz1OIvT{QIU3`toa{NTzW~aXYe_R=JTe49tMxxsGOVLtP~(D{}T^ zLA-n3-2{41g*}&(;BwhLql%y@7fwqo0=Z?cffNcZDk~Omu6(Ba9E`15?Ga#4pU@FD z!eJv78q7a}0i@YqF85k)E7n*mxAKy;@A<@egJJ=X?mvBMI)qHuF~<&ZuI~opXlbDh z(5l>7F)ZZ+4RcUJpx9>2_#;aqzP%bZC_5A=u(eMWnQ}4HQ~~>hhN~-=OT>C6u2JVe z-@jj^%qaV&=3=2$o40HMk5E-;(!@MjNrmv*#3F{c#a0oxl1SgoWY#+x`?20J%Xr}$ zN^J(0Zu=Y02R<7jKcS(-zj2m8ba6@sRm0;we+DMSW-YH`o75dG&fPC9METHu9}iitC!00*=4G-FZ7z1i>yXzYO17?+w&3+tS$$v7K#`Qi)G9tM zqgS;em~r8-VptKiYtdr~Ujh3$L*m&u*3_DU-f;ZQY-at7*&P2Dv-varbXYw8H_gH3 z*=#65#77yPiQnU4Uun7G#|EWsjOoCn0ms3i( zL`7Ivc~@C;YTn0rGD5TF$YM@5AuhnXwP*hNB12Jk_m4iRJdNwimwWR?Q&@-GF2}i0 zT!6?-I6?UfDPr(x5uMe#JryabUtc6~&Y;FX6$y*mK1N#q(0TUdm}&x_w58X@GW14J zo!#~^JxD5X@~+BoU$uT&QWu~jEb;Rvi`(XBrG>gVE36{usGuwe%PTDz~$|Zql zRX1l3zEV^zr+x5XHO!pz@v4XgXSq`Cw#q4jb6Ri%@!pijX^SRx{v-!OWd5TLUr_z? zv2);IW9XnH_}v*)4u>{lcjk8J5NW3vhwJ8S5yJ4kUv1Nro_(@0m&m$due$a33{+=X zyoshi+#LO1*U@cVu`;>lbujf=C)mcz#^aL)2LIP3LCmNtB{QWvVyQNbY)z&-L7?j&KC zjE57!)-oqemteuFtUlLV&s?U741k@S<>dH7{v|K{a2S^^D~6FQ9yhOnfFh<_{*sJM z?IfN})j)Zr+?>OoN5Zk*>Kiccvg;FvNF*WQUGHMs3J*r7bHX$Q|LTmJS1?5-uhOh; zwM@&A(5V>R@E(?wEI<~?;j|v2lJWeJaI7YjBI$*Lwb7Ucflbhtv;PR1^iB1yO^c^N zS;$kXg*!|kwv1p-h45dEeIJQUjV`t2)}LqL^4?R*n=ds1?-7FBCldtL1(MONFGVY> zjSo7ICDEaM7yE;uIux~q^%^7rH!~aZ<}KR6BcVSHqP&nH=ANE~lc=&l$M~a2>xl#~ z@I&I)Exg065Sc8#$K~?HItKC1z}n0(;H~ti&Q8%3k0}|LwUu1 zAF8W7-G&-ILd%l~OCpN@iJjHCq<5PvvbGxAx!mqpS1EUq(Op=Ij;Z1zwnwryhf+4n zE__#Iwd`q;+h*z#&^E46faoSlF}ItOXS7&dX1q*%;Id-Uc)UMdrUQ5L$0Lr!u=TES zWS}(j$$nMqq4&65<$ArIg1$l{^4jUJDrv|q9AZf_Bqj?g_hmABOZ)1NemQ>lz+L{+ zSd(SF^PVF|-y41j5BZE-q8QsoC}CF~j+Om>mkh&RxqEAj7lZFB3J$;o-jGn*q=bVf zb z4;J?W^PQZi7mA1z7HVjjjwn?Q4YkIi*McGvurtdwK@8AXL0?d_&)Ew-#} zN_>ZPALgYn0JBzE+ccc_CrI@;JAO$L`0i~OYZ*C-Gqutf4o4N0Y@aMUNKY!S=T2Cs zmbcJgLJixR-dKF<-hZK>YDQI7XT8{{S97~r(?bKhDLVIg0eDGzI9Y;{v|e16Ng znN+w0G2o+t82{tlTrd@z-oSXsiXCa21n=qS>0lrTtIGIYDXMc#p0EXVdOX{G=~WlZ zcVp!|`y!m8VezN)oKxM?gM^dS5aQe!oS}4ar$8M!&;XCa=XQKu)oou);04_S;84rQ197(pOf6r3OG}rMC)eK!`sXwjO zttm?u_Gh>y?`Q&DAKG?vq*~8QTLfO+t(4l%Fr4$u`JrlIV6m%p0Q82UFD~p^5mFsy z7Y5OYfU!T!-$}-7bFvT(y5Q?NKK$A$Kyl`5s)IctD{By@87O?r!sK6SA;rzx5)cQeyZ|E!3V)aJYm9_TzphHh{e1kz9((m|r4bRoH%!`9*%nZQ ztd!H|rN7zBLDkjXq4%Gy@_%l!I8fjIDC`i>M7*Ne)P4z2u5@U~=sJ0}_X$r0b-+ma z4?vlz=}=}W=wXPVReTxe&h!0XAdIOdSVp7C7|fp4nAR3S_JFEi@8IVLE>usG;y8YCH3U{c$}+68U0iZB?eSc8>-QrSfp$dO19bKYzxu6ERW8us=Kx zMa}O$F0R;ZP5kO$ycM^slksDdBtHQk8cZ7AYbq9!r%R#1SZq&~^A8Vq%sx3#*wpAS zXk+AGe}*57Ffz{*W?Sj>wI#*CP)>9S6MKApF(1x*7)W0%Mqcek1G2egUO>b+v~$Ta zn4$(_Imm=0{PWeO-tp!bNd18DrZ)*^Gk+`MAvraAn%{k+W86Yu1B><&nCMu@?6KX4~)|3ri$Zk;=9d zHOb+>O=$k(-G4poKl7V4N4O@Rsn=hQ+5fej{I6%fzh^(=3z%Qtq>j##1(W=bKdJ2; z=>z;IyAB-ctN-%>{g-zg+2A(02!yo#GpvWfSqz=~>;V!}Bq7@T1^M}tSwbO&Aka;v zg{B4mw0uLZX|AG!A~Up^ULWigSC2wAUgp6rDJzBXT>z>H7z>J_6#6Y~C-0+CW5Npk zg9rCUb&Hg99WRJ3R&RVWbFil zx{K;OB`?LMNLJh(bKb@wjIU}dzjjNQ6{EYL-j^sI+l+HB?#wK2@*8wkLO?@pNKB6T z7QK*u&RZB$#jvC!5aHD6MI)UEO!#{K^BiP3t5P$}j&N&i;?0$3N~y@6P~k;a;NhaO zz);(iN8mNAH&b3*?Nw``nB+ccm1G9)4Fk0N$j@+OdOD0xG>1-Buf|(%-{XYXJb(cj z>h=(mdeBy!EdRNd+b!|R{zsg3OPt=V%}p-2#G_Qgrk8U5$%hDny%Ueq^W}Wd?{SUP zC4Wkbq_i}C+6tN7S?8nWx;X2n{W1U8HK(=KTK(>aOp95PwZ@C@Tn57*+_(5o$GK0R z``yC~NL5vyv)xwxKiJ<*;AKcvdTb(7PX68?Gn0FH2F9j-i{~*JO~1tUx*F);#MN&$ z!NtNFm*~&P+=)tDa~*Nkce>h}aJ)FDoP6aSV{K-c8(*S2VPVrI0J%A?p=H|Buz-w6 zl$T<2UZ=2XtMa#R#!BsLPDSQJh@Vl)H0p{=*eSzb#4jfCAnZ7oj=49%uitN>5Xsx< z>;FNHa+@s_$6u{?WBk-)@tn6EyNqc#qk6A#9%j-OUcK1OMzB-g=JhZSvRR!&F}psP zIY>`*tK_cGOm)8~Vo6w&LO2stfVGia96e>LXunR_Y7VD=?;oaC`3GlUHg~de)hR}T zV6T_jaI`5^=#QJQ>UFx(aec$rxSbtV1O=B~O%A=}FSqb;ZB0)VmbRp>dc5#TN=Rfu z9flcyQdXoA7|d;EyAI+;A^LdSok1XAKzX0Mk+z(1>nXhkS6v_s;?X&)o?6|1A>ksaoktK|rl=g{PFi6L%o$$ijAil+ z9>njcEhh(Q7JiqFFicii*@pLQIqJDawQf#i3Jbnf8LR-;%*h+r^Pr{32`d}d)tYot z_}lBx_ZTczT5_{mW9S^+Dv0IQo+F9Y)rlGx2|lr0$#Ri4;aWAZ!(>%Rem`E)9KrJA)Ol=5N{TvV3K_eqD{ zExWj;4tSdCDw=(MnJ_tz6CwQLGTi5%i_0o<8ZUR=Rp1u~2~lMj z;4g^cdkNQZ-S0hHCJK~t@!@wo{A?&{VTX>$22$nX_L-aw29f>5hKrUS&n4?u6m@TM zmzfDddYL)89Oy;omcZmY5wOvOW193IRn_tr2=6B$+pCCO9V3ez#3SO7C3>X|n{gJ#Q zV0;jNf$Tzv;>D`=*eM$)j~$NL)6Twd95gDBU<6RWCu2(Ja9O>8?9(HsS6_X!a-5XE zcHTOrj#gcur>DK-A*p-oSUfNHn4JVzifu1fyU9gTD2u$PoLs~Z1~oGDuG_$O)4BHnde-yf8u14m#Dad z(+^kT{pQww{G4tM6j({6!&uPHzU4J&7=-Q`LK!%k*%31giRIh>!D2;?sEvnP;Wc5t z>lKBz|J&Ko*=>KNnf>WuR`)Ew7OQTY@2~`5KKke;faLPoTOd5}*$fQPHM6_pg(iGA z?FVNRYpR*{i}G4pcC0CmZ9Y+};Z4Xt(eAU)AU)ea=k_cvot{sMMAwgyG}`i9L2FGi zkK+yx9a0-ALa2HiZTGs0`=kvTBEBYL(|$`*r#_+4;iRf7^pmGT19>nb(amrp&~D2N z(i!CI}^JEmgDV02`0oqf%+JKKu z4&_>`sq-(>@EpuIDYYfwbcRdD-SwlS5C*O&@&Qlti<*dvXACk#>D|TJbCgcH6 zZ41o?j3P&=ufA>5!=-kzbjmL>HGp;=gV2D51ga-SB1Bb!mHfuL1#e5et{kYPL+sBI zljFY$-Me*@K;_!pPOb7JZf8vg7qruY>(Gv$winepS;AxH+(q{Hh@5EHB-hd*2pg6nXF|Y^8 zV867W zV`?y-cE)-q2CC?4x;|`+d6lT|7mhnrNm_sb0Q$wczD30$Q%_PqaG|Ch;)0dfc^d67 z^|(Bf?gHB;-EvnGrK#(zqu)VMbWqNihpVOgUX_^H?r55Rl#cOC&Szt*k%b{7m)b`xuvxs za(TJA1$I~%kgOUJgi`LeZPsya4LL5V~6cQ`^$AZ#&MN% zcjB;g9yyvt`(waSEj>KZwJN!T~=#(~KL)>CR0id7+HMtApcKG@A(&FdZ%wN#?H`;LvSAU3+C ztU=;0#nB_<8jG1z!Vak{`k&Btd;20%*ryV8M-5Th_ zxMc5qfH~ige=crh@kpeJlXZ${BqUzQRS?WJ>97-%Kjja-+c=UvQmJ5IP7nh;HqIUj z=oqZbs@1`sBIy9x6&hnNDCx)Q06Y}YS0T8KQA)(i(t>WV{t|b)HKy*zB);KlvOD47$iWj!&SJ z-c_kcyrrn%SlU?3pF@AoDYCXX7LmKZJXp0VmQ;m5r$nIDS6i_&l2b9yT6kRn)c$Q2 zI~q_q{krvebV<@Jz@f*fM^FK~nW=^h*~j@_!$W%6M$m2cF;nZX0pUz!SuNSD4 zjy)XE=CyR94XYlKS#q6m5yR2gbJ^{$sofAPyNxDsi&5KjC~%I+)4JBcBb{tze|jzt zO1YLb@h-0>GlB)`>3^2SO<;7!{ahWgXsJpW7JRKTm~<@#a<67FFTGT6XHmxi$|Emx z7~jh5HjtVRPHum_6i~L_2iaq%^}9 z3h@DR1Fj}tZV7x$+EHk+W0(zY7^u9{-(BJRr_K=^s6xv8CymaVLG$mHb{Pn55Y*uu zBhaKDH#Ze5sC0n{>#$$SL%uB@Q#a2{?KR+rBo}Q+v$PD;28q#RU69rfTdI<8VOWJ| zcAlrKXl^ioFQiWVdQ}6YM_tg>=|;znCy7(foNl~C>%?T|^AYxekfBHK8rby)9o3qV zjCi_wz6m}|hZP*79Y$VR!b5e48MT8+tNba3MLYL#k4(Oml-Z5$HnJWr1QEW|<`2nm50<#4qIHv| zZyS^M@h-=auo=PE!xv1*yuQ)@jQ^$JD1I#^Kf{wrnqyO3(wJmC#m(>P-lrXFqSarM zdwHo}DD?uSr1u-Zisf4wR|A*Y=#&4INQli~t3n zy8%jlYqETqBuO#ZBtd4=v3xgDij|lupZz*j*s$X!D`&98w#cN7&fyG? z$oG)8Q}^w**7JULIoU8lNFDoHZiFTEyPwP3gUNt*K4){!-KQ_UP@kM=1{9eg%o%@k z>K}w+@S^o*n=g4+VT;-FOwQ?zapX!lYZ}9(T^}OJ_?p}Da*ZkbirrF$RUDeJqBACr zH>Mg^n82Kllha45m+Vj8y5j2%Hl4HMF%QzFsiRvg%Z(jqgh-6A>$i7BrWxntZwKS& z&Zm#vE|pK7+m)UjC9JoiWkN8YIZ*g!MaG8*JSBCriboRGNRH6Zq5sb1i}4MzJB4aR&>3FinsM$PW^LE1>=7@Z>k7Y z013(aUXzaxB2GMg#RFZ38Ii4_H1n?7oPX>X{+e=pYRc?dx=izX++SYNurXQ;w(5}I z(tT5bIODJ|u3n7E;ZeH1K388AzcjR~2HIQ3c+cL=q)>>_8|A(8km&bu+x!K{M5nQb z4&ypP^vtM@M5EOcUZnFY`8G1a=`%%GFQWN$f!MHPaolx@bWl;GQhe#CoAKDoXXq!V z%`#yA@+rc_XSlpgC~GT>sNO;;ry~CMo?1t(-ihaL6@g06xS$5lyV?exQ-glzu9lv% zv5aeXyz|O}Ro;wox@v}R_N1!v1Hc>ze+!ak7!pmx6NozTbmWAp?@)kKs$W_OwL+V= z<&|$kJg&eJ$**yhP1r$9H9zjPp4*h_QF`CPWJUaSInTRAQ)@)V*mQq=qtopfOn3rT zt;k0i@+HPDB{=EiVQbqwqtl;At9?teN)h=B7D%Ib%o@njV*cSM%T|*V<-aqg+zqo|RCdRUtj?U?LVv z|Muvs*!7u9}*ZEsDWD2A|K4eI=A_csl?SH!;pk*%c=kl7$nkT%Bp znB%>@E8z*n2mgN6(Me7~9Qy%WaJyZ&a!=ixteAd=R~ar6^68x^PGpbB`5p1o%S0;J zOEw24r!VC|CG{Af?G+%g(9IqkRbc|ksJ*^O3R!c?d1ap&PRIA(9iEcDVxOs=?3ij9 z^HncFn8R~!IKeyjZB0t$$Bz56I@?bmn<00C{q-&kp^CJ6ZgQJ?1-{*)tOVTa#sePx zfqdpK`%l<5dJ7&lZ(dcX1iVb*0NuWVtaRoC7ob9GnEo*kEHfa8SRm5;(h}U^h|OI4 zgK<;dMCylH&r3sANgnUz#!4;5b)@s!!7!#heumH;pa1UE!us$%@Eo~(7)FstDRI^K zip@73GyP@HlnHV?(Gd?PI+KV0cLmjV{5}QV^?0UaQ9fA=1|gNyG7(!PUi_7!;V3F{TGk<59Ete-1)Yu8 z$eG6k1MwQwbrjydO0pPeYM4cu*ZVIQ?GBT#b)_os&fO-MpZma8F@GWv4XnLMRRllBQ z$YP?Tt?s7P;A9lCN{glqaNxEAO~_ku+EYp`(U!GBg*QbPzk-hvMoQHLEr%||7yGV3 zBc|FWI-yd3>oj~pp6f{|SjhrHMza5@t~ubCPW1JDd*zJb8imR7^%*!4UtzT;;{(|9 zOta-!R{W=Fk&?RIRFw>+!h}oS)u}xQF!KU;P{sED80b{N9E}{e@ph|qMtP#71j;Pt z`Xq4`kONvw%w_)Lt<`ZIAdrLUecz$I<@=>6IN-5wShD0#XuWj918h_otNLGT2g&|&3l@PR{LNrRUFU&oR#tV|KMtT zwsielv8^RquD=XitkDnU#FTx|rGwRM;1gs*+tcpjM%Rg>=E-oOlHVGxl2}xg^FAu6 z)<~pne^6D>gV9)ha_jP^E0mjW7xSnlxbUh)jkCj58Tk0?nuOoe#4^#FVa-^HlIvb2 zX7@$lpKFV-YU`RH%=fuDB;|2RpZ%f7pD}W1{P&li@m57kSOr_$6ZkEy&l?QH0a&Q^ zw}u;;?#IX_4o8Og-OZj@_xC%BKnw>cg;j|q0)qUlimu$L#sB!KX_x8bxdq(;i}#}Z zS08gZ=E=4InGMO4sLsq7XXv+#+pab9T4^~VBzqq>?8(a6!)tCl$!>`V!)z6Yg`iO4 ziI{eGQTz}+Ua0QwJMp(mz>w7|;##|vPgvZ@j_+7{{FF>2IA+RLHW zeer(aAyvUG57M}o7~8u{40F0_$q1@=LAt^v-po_SdbZdOm6Rl)W3^F-JVu6F zJ5u*}?s9$r(l`cIsOq4{_%gKCEeAcTevtbV)bzcFSkL#51c}oRwBM9{ers926#FM~ z@ZfH=cojNTI^<#h$WgVX@k36}%ya+kyc5OGFh2?%)-5`^!ATTQ z5xwEBpI1qNzaue8z_!Hq6JPSVv7hcE%ewgOJgSZyAf&2?x^HIF`U&}!MNjx94AyQB zSs6^u@GYn@tV+wCc!A!{7{lGM{rJ0v>Ewj$<@cto99(EB3r^bWL<&xXh+Sm_cOEtA z@H7fvu!G#b>;Pq&akp(||I|BIq!=oftq4L$ER)55r&WUlz|aW4OT%0}ogEAc-pKmp z!VcqhJ7#jmWo4iHtm`4?ONmlN1rOWeFZr>2rBB2`CnemgO2i9$w<@!Yy8=P0>t59t z zsSdsxwHZ6BdXsJqAQDChN2Bh;q6`I_8iJ@gtLK(OmkX^pGQqdGbSB#LzO$x#q5XGz z7mjo}UltRAan|QvHJj?~I2624?~w>IAz&dvgtCgvICMOCpCsR^I^Daj8@c{#@zL~AznpJ#^u1hLK7!XkCNLV#M6J@sY~Pd^^r+L2?pFj?oy0@=^|KOM;ycOtsXq#J z2pf0Vk4dxq8Ox{jI(T!;253ll#6*8IlCfDUe zpk;|$W*O*2eC)+5Ke>B4W2z)AdLa#UV#8N_Hriv_o0MW$;fB|N?%UTGBD{^;>D4yM z(}m^|FI0UF68hak#T#r2H(u)MB!&^~JOeSjB*u9q$m*FGqvjgA{Kvs?f4+?dKcTbq zK^`vp>|%D7&?T%NfB@2 zuU=$~ty?{RdlzBgaL$anMQT(&NilCQ@V`2r=FF$6YHey>Qid{R_1T?CAoVFfOyg2v z)&}j5Yrd4&t9&kGUb^|5GC5zAQ&H2g-&hep7&*B0udWrGJs5Phl6_`Kn;Et>i0!8g zlfU7g87}lD>5+*!>y~) zBtfKy0y!q)%Xu@($|x54P}O-4Hy#|%t0;b~L0WWQsvUoYAAHch)NraQ9g^N}eHk~y zR8A$lvi5?!LMbnClF++_&gV2RrwHR32lyx|L9mRUyu*Gp0+LuMsGZ(303NIy<3rFL4vl3;qMyw2guBe5R zPSGRp5IppXV_!5={3r!alTM-O&0#P1*qRCX%x)^nyQ7-|O+myvlD^}n2Z6mL@2J`a zk#nVxpvguLXQHzF4)HXXF)%pFu zh(4xbHz3xK`mc7;d1NkIM(HK5%O{`>ectgTBSweKe$8sLy#t+Qec8syWDD3pX32Q% z>NJiw&g56>$Ecy9e@k1uSp+50-$CvslZ5FZbiUF2Z$Jhu!Q7&EQf}c!5>zKYgW|eF zCyf~WI7^UB8dRm2q|*%blY>J^h_k4DUpB8V)M?fS8AS3+N5IXh4cwi+3GBtJ6P$_l zQ>4|_{_SN!N$NJ>)?>i~udev#9|OpniaI8+seY@FCi+|*xPE;a*K1dDeT3!8pqWz7 z+4sBTdA6Z%67o?4DLyDXiP}I*P=C;@+v(ZMPh0}s8(3O|d4ssE=(vg`#IHwAw)Nc| z_|d1}%Ses(Wmg?gp6LCJP?D)(`?y~Q1;Yt`+G-4D;D2$> z3R`sYr%)5VNGeH{_E1L6-BLeEz90Yd#KQj#$sjexLr!Vs5rX{>9{e1FgI zD=bOkG8p~B_ZOQLv%6 zik{oF;*An=&AnS!a|5UwU{I5HmlnT8j_-pm!>LA(abvEbxz}3M8_e%qQho-Dvs&e5 z3TF>_W+c6T-^{5a*X7fXf(=nzfMLLVFW!^SH{NZhu3AI^rZAXOAOgDOu(c_`90`G~ z+?nGB34OSaGj}X-N8#>}=X=yEl3wWQeq1a*#l>gxMOUq-frR+Nskst&7yFg${2D^h z+M2G45|;8EpD*5J*C#{^XUo)}p8CyQ-E6ns<;93$URxH`2g}OSfJSMTD0McmcTmS& zp%<(i%>y|pj=wrPtc+PW0+`n~i3VSkH^#aboNbY~PZe+e!KK)`TgMd# zK4l!0u&-1lB+DXp)e6(;1G;^@do{gze-K-Kk+v8*b}W&utXH|{mGA`46n3ceE4%W ze^i|C>m#qta()enL!RbbsQV_$U0z>x$9c88BX3eKrC=5Mr-=2k>7v_L zZM*nBp7yooMEB*Of|F2%e@!bmOIZlKduk`!h$=YA&=(zFiAkdKDDP-<43_~z8wR+2 zc+Z3eiZ#GLAl!({nC*14r6WCVgKu~KhT5OsvMvp*4h_=cB+O1xBBnmn(+|5wf$+jiELl8|*-p@TxI+|bphr^F& zGw6PoiRX*W{H5ONXS>wmRWp`fk95~~`L8L<)F%SDRB}`0wgjY<-W@g-hyV!*#Q}8s zMV2~Zd{gf}C9Rg=T|M^0(|V70*@TW<>vgocs{WXgxaW-fs+yp7hBk*9=_T+U=d~z{ z$8&~?t+nmZ@Up0j0ptNtj+tu`gAiXIa&Xv+(SF=eWaWDkBGj zROgHy4m6{gbcjz-2||7Bhf6SX)T@Jg(^FGspQ^4whp-fK*522~7<9v3-nVbY{PCTd z2?g}v!AaN#^=S2%%s)ganO}2C!#u#jlflw5Jp?r`8&NI{JsQZB5pNL$_2g~2-T}{IG=hPk( zWO~1AwkKpi>mLh5JvcpZzFWJ%b=zO?^S_Sb(cp+Ky#2NetE>)TLW5PP#p9GeODlis z@O63G{~&0nOSQ1LMDEh4!1n|NmE_#(FFb*7qXArB( z0ugyy=*<543R7=02oxQ(w52dh_2fg^>Ogds+xJ1_S;xqWqPWZCjZpLiTrQoPOvP<> zxsR_m=csL!H3?>1t1%2__{NZ^10t8-oVzm`b8ZPCNYs0Rmc5{^@8+@~87+?gmZY{1IXKSTzp@e%rIYWmmkYZY}23 zR2Jo+EV*dVIen(YIRKpZ@>bst_KJ$WM!7PoO~+55@3Yl5e8%Zrj{aqdeI6!~T4Y6k z(nmd`g|tRj%M!`nM<4X_C{ajL5}3WhWGxA>~x# zWxk|%ea#mM%J^i!+75Qllx%nht+*^ zAw@_<7bWDZZmfKL(W-s#Gae5mWg(8@<4ZJN)*r&KBiz!v+^K;LiGy?|#E}}qVEGZ~ zJ0%r`slI;&``1wSHJM;?1lfJUlCUIy`j!;1N3~OVFy{a!WEOv3I)zZM^Adta-P3D# zDrAjO-uidW_nnrb!H!q)J^`D!Sd8Z$akkr3^ST%hffjBM+DhAsD$^%A0u&$}1UL`u z{zxp0*T7(<;jq}9{lEtyNHw0i#>+WTaQVdzVh6=zq+4`EQ@~pcXA1cDf4+ZsoGhW= z5Q1cqsT!%(c^>}kVtgW=`M|ud@{#OcnkHpzdiT@ny)YYxBJB9{#n)P@Of{8=2Mm>u zo%h`RS~9k|Q!58MmG>?Gl&?agN5_whS%9OR2P^e9$YRbD z7sC9dM@r|23bGXo&a^Jw&0Z!y-qxIQet#_exxS4U+erAZji0{? z3u*#fBm;dSsqwm4v1+%C5E@G94>8s4>(wVYPjiDq@f2;^+O|-1T zzkpp!`aMB2NIwASIqtRdEj=6Mh#KyqwQ)^T1{N5VDZb7dip!hzm4u_B+F6*@4$M<$ z7!d)(YlmERMxLg|+QvXRKxn`wUeP*GtPXDU`n;&Lc3qlmYW*x$#H8w3`?p|HLerb; zH+#ZNG37#@lMkZr?(^$7oZ<3oVfUYE3a6}7mS-B8Jzr4@R9f@U5i+a!ja6;bP)_KZ z7JszPti+4nMZGDFo|_=O&y-^=N0GNR!2;0?=v z0?rn5{tMH5C&3i;*e+YUpSH|zQW(aM@FFuflSx=#dOCX3=y-nx*K^Eq8On9J-QE{j z_vPl|NCQ@a1=l51`oE?$X4u?aTf!#FDT2ix7Mj0e@5MFQ4<<9=(7ZClC(fA5F>KVV zv3VQK+>5P7+-kT#?C87}VGC+Kn`Bbrcaf^L9unK8_lZdr!o?P7AR03Ik|y5wu){;U zdV{Fq@;YYkeQV=^X5@uWXcgA?;+iS@s255_X?1irk4_PLGg@xR`V~dE2aPAZ(f0#$ zo5I8=^%gphzfy7=4lcM{^pbVT+YG>JcgA=H#Q9f zBHvhFu<1%OcDFnr@bF$E+9WC8U24?1_uc^OuhQL@<_D17#(ya8jp-qP;ul;L!O3;VGh(O_;H+nXEm-$4r!Nf`%kzA(?`_1*1NU^dih#gXmkfKeH%ZOEpTdU;8zm_SN16G?s68V#@YRZ zvX*%!yr<%vtYMSU;%vUGvM4%;d=bPsgDQ5!)?o4rAO?qiBkHKXav&GOY`fI4O!jL`7E29?k)EPYBt6 zc{3Xc^?-0MFMJNQDzKSwe97C91wCDNzpA~=VweiryvrVPG(IjW{wJwe?(p3#Y!m93 zbbM$p5!m|f?*%-^nW1gFN_p!Wpn07RXW4JlX}{AQa-}hPO(J#;?_ezyGFjtAUF*IJ zeIRfaOs@IZ8GiGwMR#Ztj+pKMUn`wZ?xz0cNvL-ZCDeH^vN3F`r8k6vGf~adxxvE* zyr-E{iuG$fl<-yu-I$atQf57em6Xe^+Ok7+*K+USXof;4Ip2C#(Y_THGF#q2s#H0f z7OTx|oe!+7?Lv+JeEF)=prpI8MQN^T_q^k^ZG~})vVKxV5sn)+r9!>tnL=@Z4hJkb zDsB>gIRrMBm2+b^HKqh{GY@cDF3xHK3@*5f`r+~sW%D(as|(2Pt_gukKWWf!j#<# zXH776+6o1jxlSJI+^s(wa|&0z^JN;vYA6fsZ(B6r)6ar4?v|BR;-!TeSp6(C0xhw9 zJ~sGK!fsV+Uw@;_&(KN=;raW3Ut-p6lX8<3pz`%sTagf~cRG|28gr4LlFgVSBToJb zL6=2k3&|C#n7W6Bg{$}R{MVFMK&p7A)sMKpaG_}9P6sJJQcX*3#{=G6%M&8aHWWJ* zq-SuJz>&M4rbs$2J2`{5Jj zXKjnM^Y(>x-`xG5fy&(bqG_aAYMDngRv-pl=E*?LmIkB%Az>m9DN)6c*!r$jk`&)7 z{dHrx!fX_qGfF_0^Fu3w5UW!3_~0#03RO6iH2p8ZFA#jV&}a!`qe}}i?M`e5GbF661`OX09*io`$VOG3WuF-Uy@EsX6i09GApC_ELExBglS*wo6(*KM zm^NvMS1gpwdHX&a|N!xW!f^L_Y`P;+w{`^o{zuV^Z&}H|KOX-q}zh)^)^U^VM|+NMNLaN`C(hNdL3u5FAJXh#z}77RZVVWCB=p z0TuMM%1U>aFcl~GZwAkXVm_-w!rTs$<&2+mj4eksp@M9jO9gP3WD95r(vU|c| zTUjS!B?OEvS2CCcWaCQ#pFFCun(6F2pBVT8%Vis}vzfToE7NLP)8Y4YW!i;=5Rk^K z!FGOhqWVKWGPG*+mLMvg*h$hG1Nm$NFB~J}Y|HjLq*t-_iz4ur^tHhcTRTM9lyB=k z0Aj;keBX0%U>`x7FT& z1gqc5Tky}mRx5+CitR&(fh*7nqj36Nn(#`^{1DgoQDy$ZNDm-fN<0qpbuv(cIMV7 zeEdp2uB?-MxlbA1m~`;Czq`6TSn~Z631`8}_6YK>k}rRGf^;1}4Qf3ruYK(Zw6!Ez z%&^h)O3_h@n1hzRdpF{B>>dk2cum6*7B*}>h-!Aig&&9c%Cyp7giGf(%vnGUCqV}< zKra}Y{1S99#*P#TD5j(u_k^vw4}DxrR@|K(W4yuK?}o+I_#HgrF;R4a#=23{n0C^W_aB~ zWdJ;A$~sW+hf;mWF)2(9IrXnJ$t$Tf(>qKWkQJxHC2;SKTyI z>7PoQ-Gm=>inRVstO>6_=GfT6}D)#34p?u5XjWUx|W$p_9@dpT$KV ze~)4AVfOc0w)|5M?jww#_11Z2X(Nd2?4v8lp}nJ1?x(E5r#i;}M3xwhbyRz< zlp$sybB;kr zg1l+F4`y6(F7K!4RFc%6-;NRlnR)LHBR5hb1LW0B$r0x6x#JZ?y8Fr5ir3l`_C-fO z!-pW`-9R6&zE`2(&4gris|^N8c0ybf9222qp+w6t*HdlxJNkx{>#Cx zMTNC9j^4?XLxBw?lZlJXq^bmT4bX)u&KjfNBfZD#!%E^>3tP2|f*-Z0v1h6@lgnT1 z9ewMnJERT12TnZt?8k(}H1A|OPZLfe+?Ssw;3Dlwzav&O+Qx#P^}8PvFUy%-^OL>V z_Ov059W=7pvy6e}LoBY(4%nUh`WYv!8{|TU8mdkECUDFCMw^zF-OMYvzy^~LT@l6I z0KC3qj8m|go~A9_l>hPU1_BsUJxK%%&GWvaR!2SA*{N=M9 z{_@U%5I-v`T~$-e)dXQ(c*iQU%X2SZYC&jkJ|3!5;h~x4nlS=34#c2~2(5uBy!brO zX&s;BpLjqL<^+vkL-K8bh!w<_a+E+e&KeWfXJe zy@A6r5K{!cvmNQqGaO@SjrLZ+t@$Tc_ARk>a`DVF(#HYfnDdJOuelJ6d-Tm?LYokY zyzPSbk6+&|<{f1W3JYR6Xr^Z-mM!tAM(#WJo+(0YMp*5IAb2$AkFe*JOn`h}2~_XQAA#SL`QV>(gM9mKd|Ynd7s-^@_On#VJ#X<2i9jqH!8+ zIwS1&EzMFgZWtsG#%8@U&6-b4X*O`O~l5GWA5~`Lm=Bo$1+Rn3;tIM`;H5;0I1{XWst1GWYL> zv&rbx8X~L*0nw5YLKSI{deiIvsXhApZ#(8q%^i4}=3W|C&;Yc27Eyq5Ugz%DTx#F{ zKREt>jFJCOj-3f>S-8eYz48C{c>906`X7}K{IY}qmP`P7bol@6aQVMHy8cfe=Ks81 zE^tc;GKoW0HSGT1wEzF{q5k`A0H*`GfTklwdM{?tmst^!*OTddrNzKccj~~OKe^w2 z%2=1#<8O3v_&p4NIMvZ4-rLWj>gq~?SgMaqr=blR>>;sgsrtSx)%qefJzXJ`nEuDY z!h&15w)VCr#-+Xcb}q?bcRU}Dh)y7uor9yeuC7keB`VJAhurPA&K&AZgr(9zIx#KB z!DV%;mVv=X!Fj>Yjl^^!a*8QjdGGN^a1V)|z6l0=w11VA@$Ps&3%L7`&dqD&b$iY} zn&so|xKRoC^(AH}GvL?)od59$N*AScZq|hE7|TItYrK*O6}jAko)0Az#cqk>cnw&M zKh{C9%@i#3nw8{3aVzdwD3@Nq8}#3GBCTmL}G(?Ch+6USE4Q z7G&-DY9ku$e|sz>^#>_4Z>>Hvhx&r*_<6IkI&?OVm2B+Gj!*Pf_&BIGUPR;_WB z{|i9(kfOx?VY}pB0Zosle*10Za#Z+qr1dVmAyo^;!O2-5?B|TfrC%S<;C-2wTT_5u zXI@{hH+nCtk}b8|;!2o!43}m23FFYh@l~9}ed98bpfe;{Hk9ytTf^SKfKJo}c^lkY znc;gyXcfn!#FEsJsQzsw{^Q3bFKb>TZiREwx~ka?bchBtL-O*jP&yzcDL_7qj6f$ zzau%98ZyN$=kn{m&a2VzTE13F;|-U2L_0DV|25Fb>|}=P^xdeV%}klqz?h5$E-tRi z#4*dS<{cP6m~bK0;V{jjRB*OpkJb(&I6XaGtgmXk>^8}@M`yaGk8D${)%iw3CqSC0 z3EV*SN4}YWV_)L%Xezl6pFOwjQq%Z}zzQHMnqu%74ewazbZ*3)S_l;jt^6-if5JsF zjn4OjdbYZ->NG`)fx>FK8!r%VZe%%KqDfrex(c|@U8pUqyJ)n<2{?PMMkTVGddd## z1N(}>qAU9E60&Fduc>TrIMU(eZ`a5o;csUCG_Y{b3ktg28Tu8R(ejTiU#yVt|E$x6 z7&foATzk-56YC5iD_|59bq?cFB^yz)xt4uF@5m|D9f~A6tqg2Rk*hj<(ApC!CE->l zzYmiuBiTizql8)XtCm+bNmv#=4q)o0WBBPfJZGE`)JDxkB6($fKK*Mo)k8R<#q|P` zT+0sT1*u_1QhguLzx~3_$Im}`!{{TY&|p01}wgw3Gh ztmXyro6js%6GD0!%ItpSq^j70`OmVbl z99mf0Kt5!tU8aE@z@OEfyTYEow-Ol5abNObv0ZH!Wi=}Eb%96W6lHs}|KqO*1My+) zH>*zpxVdrlhGaPVL{l|s*3gHDxu!lSDc8mGYM9^hcatNpWv4{TbVpUT2>=WqO##kEp6&lkf|btjKW+wIO8&P1{!DPLeaAI#;~#E_@fW z4qWGnh7PEheHL!Yj95ywOnsZU1UPro1$!C~oEcXtu8qU>I?s&Sp<` zBE-;hE%*lJ%8g^tU;pkB6UBxFSWCo2AyBvL)oq5)A=)`qEm(cyM|14B$}!oZnpvXr zZ=P$N#=vvbTjMXju?m@J`q~V|q4*)cKWI*igDu=}^JmAK0gp2{o6XP(}W?Um>H?6t+1BW^2>&qdJPL|`S=80Plo zVXGn%gFvzcz{ehU%hsh~!Ea{4vI#881RKn!{janv*G3ion}XG9c&l^uHfYiDh5m^_ z?+ht5FkHkli$WMsp%9vd4G#eB3`%TP#U)ku4<-)LZ%2RlxYnOeiDMXk8Sf`^ z1vrCP=%4#lZ&ZF?=G`ND_w+!)cH;85L0WNg-n^liS_y&(cjiUU^G===MwiUK&Hw#~ zUeLb22#UYH^JYM(W_5v_sd99Y9~a=>jUA37bl`Hx!d2pw4I= zX=qzV3VVS-S&GU{N%)R*x3vEAHQ1FU2gAo^F8ovs;7ebdNzZ*EWzqFk%iE=;oa?9& zq`A_mNr+iUC_uxGu;}rBwkk6z6%86Id6AF|zA+A>pL;y;9#Zi|x~u?Ue~-NB$38yU z`o?{W#&>csm6VqoG2|}|Wk3IdM7iMDUpl|2Y!XPKXrjvZ6<*ix|3v90L1#wztv}}q zfP!DC7KM^mI)UOs1Xy9s%XUm`L&e|4zDD7vpG|B3<42nWbUJ+LmAikvyyj27J^$yo zA?OTeg%QzckI+8RDl;fvczEGxEA4RifUt-}{5H6uZ*VYl=q7mR#%voNf!JjM#0~bJ zZDW5D4pA?ML}k32Kz$kV_8Uqmm11XS*RMDhMaaCYd%zDlf8obU=!cL8U)rd&LuaO6 zA;QK54(qviRQyuwy7!^5;CyX}qt}S5VLW8aM!3MW_2wI=hNJ!YeCWWs}=KyO%6brC_+__5vn{4kk)pH>+J=z#6(b`# zwnKW3)wPZMedu1bLmm}fID1oXq#XSiapj5Bhm~@ElO% zYf4kZ>g-p=N{W|dBU3m5;m%d+TF&d|VgYLL@f6ArkB`~E1w1IPE{-E+5cuWS!r1)M z9(_zSRck>gT-kQ@s#R#-GQ(Y{Ur8XqRDR2e^ZppOTV$XImdbHOq$Mjea|mhwS{RRM zanrA`4KD9rD3PO3!$n8*Mrw~NXQr!#`&Z9^ioELQq#oMo#Ag?SNTo>?v9wooj!cDb zDgkejLipXSS}e(p*g-ZPW}OOTSW+c`C{VY2)XElE8V8!g`a@&WT`qEpRPl4cMr?}t z-Inw3F4c}~8@7^rSfG8Q7msnzebNKlc_Jk%B8RoED5Fza%? zO$rOokw#^Q84fqen$yuep9%+v=vc>uFF6l;qPSIug;w>Eo=cpdGeaE|u6%8_c z^btnQ1>oH3KJT$@m!R@d%+$zorZ_K~YdN@3g@R*;FE5AM58?wO`3`9leJ6TntZf=q zdHKcO&a~q$yi<1ul8b|RCHJLsn};?Vyr2#YP1Juedes>X>N`oZ;QC<>C5w#aD3H)# ztty5$vSKqydB+L;w3U*xl2zN;%?`}q10O6+T#UK%!sP7bAN|Zh0~_HtdkLdrXQwM$ zYPS8$=V8vQ`T2t8FA~FYwJ(=H#8g+)^nsN1tt2OHCRc4^WZ_1kyx8*I9b(f_m-%S-RXxnb0a zJv00R;o;V|IQzO_xof)Kl2u;&2~Stg#(aH9EY(0uOhvyE2Krrl1D?VGNc*-&5v;yq zoVbF%pv%eTlAObEqY>p7^R37ZH$_wDL1=l-=6{7_!gZ+?~$-%9~?Fh zw2f?jd%JNLP5th>*cxA*O=VLAF)DqpJ4Rfg-|@Q{fD4T90KvuwmG>{Us(Kx9Juwmr*6hn}C*{_k9(0B0(FI&tb^MkpVFhUyD+?Z*wE(g`?9~6j zbe3Z5N3@LN$l6j0w(3kEqTR7fU|?wEIcK)dAmNr90D94E%Y*YpTr9#n>&nVp$ECL9y%-M8gmm1?{_B>qDD9ku3*e~kalsp>1~>#~aC zPhAtvT#omdV6MoOpgt#m$FbMZld;{cO&RkDk|(Yf*`DbUnlxOa>p*z zGE7Xrts(f$sb=f0$d)DA6*8=w5zzh|naWrd6sHW~MWwak;%5_g1ZYip8j*wHJ-pGtaaR#ne+v@n{Uicm6I*>@M;d8 z@69YVu4?Qz#!u4)DvfW*66~A&$pA^{68odX%UH`1Es|irmxmt z3sS&7Np|S*nB6(6$wXQ{=-c3UUPH_F1`OiWsM-~FPW-XLLUvvdUa5O1eC~E{CoX@g zR<1j2#RJZjJ?k}LnGJte%^?6n%kLkCxs+SsiLqARoKGtV#^?lPTxF3y8FHkPj8%cV zM=fO<=(U?bI(uKb{Wa{YEpe=K1I*6G%y9|UQdxEkrd0&I&e5z{-&I(PhA+IyjjoO)7;}O<3+Zi6FBH` zJ6MX60ZwbpYG-L3xF9JOkhq={e)xA#C*ax>#19$dUiydu3awQ_IhOUx@)Z_lU?=k= zBk%Y&re3V6a0UnWl=(o9_RKg)RBi&CwThVgbi3Y{a@PLgFb*8(eD?!@R`?@k=UhIH zuHNnO2kccn(4T8$>_WJ6I?CY2yUD>rq794){m+O*AF8jRQ{A^sWfANv6zI!IzkHuE z9Er|dKnP%Z?+4jCB_E5vo3g3hAuSG5o!#n;ZO~UoiiU4UujdEncL+m2Rb6gVj(Bwy z0Ui@K00}02eH~tuLr~C7_@38g_+M_^8ju@zrZ@r1R4GyyfN=&ek#=omCJazNdNqgH z#Gl)~TKDfl#(f9k;%;9*_&5?~DM9w^d2q1P$gy(D$*%Gp@LaE5>s9-f&#Sg2Ve0G# zp}e%j@^EmYSDl^>lYq=vaJ83KhJbMfmtMhNlFEN+ z$sQ=Pmc%bR(O!qdrHzvTbp|#J=S`0Y*&Lu7Ux$UARwL27UWhBgx(wyX(DLp#mxVN# zKsYE>MH}oE@*DZ9yCeUJ03f7zM~PlSB?87n++4$4E<@#h_!McG=~5k6y;0VhXAKbq zVBNOG!JbjK_W@O^_qYt(MM^Y(Jn7KcTCo)oNHdzqg<7rYJa1fewfN)7_44goGr?c% zr0Cj|(q$)mrTsebC_~yO$0Z77yplOte@5f@SA0Un*5q}1!tT3T+$byz7yFJ^d`=pl zrb~GMvJmz#xfBCCniidgx%sIG$56}UKGIS<-;`z;H~A|!iP*gs9H`a6bk>?d<7aF) ze@5~CON0B;+ZkNFDb*Oo$RAKLUny+)k^=)hq8co~JM7VmN$?mC2Fsm_=m&KfM!XX5 zKOQyzsEu@%hLUcRYEcho3cja!`x*VA5E}4?D}7&fSfxxK$caQE)+ABw95BA@Q7#q+ zlQ^myUm$Zz4n_C9|3rIs5+V5S{E8MJenL)a&Po}4qXB@UZEb_!&-Yw$N}C--_O+m? zXF{9_=5?j)7%vV&Ux{WbTr*e2CUeAzTebF@ik#_ZGdq!`Js=yaAs$dw~_UM#FO~XEOn*?Xg~rC2S2^=cjX0)&J$7OR@y9JL#G8$-01Ym~@moAN8;Bdjau*rQdznEd-4JCXO2r0Rrm7&fQ zbSSWAKRLLb }h0#QXGFD5%e<#DtOr2??EKa4xX6)2iTro>4`rZ2SU2^~~*YdW@h zY|u}Enrc1(v)tEDFK0s;G!WS2GrC58N;dQKWUR^PY_DDE^bvCE-04#7Qd^vpVq5)iMK&{LD%}`Yw;fzbJ%q2b|!9Nm$yk^#fH%k)FZ+>Y%yOZ zd2sB8ee-MlBMuHrUe@i(rZH_jV#puH3SHgNCgPsBt&Z4|YO3)okG2)hjhn?&WKmlx zkNZW02%R_X?~R)xZ#R&FEbNK5^d5~p?@&roiQ&bGs}S_`7?niy=B&vO6i0&bUPaGj zmpcA5P6Cm(PUo-k5bw?T%Z#d9wn-Y1>qdc2MR1c)OMZGLqp(pxqSu_rBL8T6$^3-m z5o9jdYQKiNj3s>=w{pn9rm2&pb?Rzk2mv+>lmE1v!aZ*yNb4+SF*etKb}hTjKliTZ z%x^qgv>^30%2PdVQ-`Zy(^yqw3?p-AAiH_k4$SFzbyRE^rB6{?Ne zmZFKPXBzB?To#ojCthm2Uz_n-a6om)&&4_mUx-GWLR=uA!C)Vz3wkh*qdB z$QqTd&I%LtIA2z1|sm z67NjqKC^FX2jyBCpm_b(TOMx3%SQO!>VUcD^_9ZL(`zOS<^WE#MrH1Xad)?-l_P<` zVGe`lufu#7ZUSczRj6&-1Rs?mcTb%@6(Tj`uxNi%vWe-zORn+MM-!+aAeNS{9&36< zOnK7B*?g}8xh=+y67nK}MY4Slu@vnsie{zGG`*ojcVJtKzx<=Bs5vSd*Fy*!jh8RrFMBP^r-n#{MAnu77p<1Mz!hPM(@zAfNZ)UV9QM!NN;G=C zDhWGpX=z&aH1&3H$8RiO_<7;FfBS8d|D~+(VtQM$a=x1i{|AG3&<@u|#eaof_^d{yL9n-KBT~JYF>aVZVk-#W!ac_*2?^fYT-R zN}BhfxsM6Lt+Yxpblm88vYv~7SBfMN`sqmX6~_hCqC!h!>#K)paTOoXzhF*4kb&@^cFA1XOvl&4iy$J zFZBAsllYa9(|i1dKma2@TIFC>Bp(B&+_&rBFjuKbg=gu=812 zK|=#m@y=!1(~yXZoQdF{Gk2+{m5Bvc_lZGPH|p>w=wy8?kdn!=9K)UG#IhAC)h_N} zhkCCb?d|=U>1a`XT;DVu43^0c^Qh~knd4cWqwBKzX4*pQ<;TC4to$9W#hXSz%Ol{@ zQ}fog-hlPI^8JrmFPB{({n8+{!0NLHM-g2Jh{V^Vd)$tlM2a6RnHj%p`-WrOBOJI} z5~cK2;&dxf(+srhEa?-5X%m6*;L3+K;T3bcrX8^1di#6aLE%Dtb-yd0Dtj?+$^Ee< zZT+5aX(6GkL*GhTxx%EPvXG0pXxvTt-PbNuQP|uF+-71+zOYsRcgYvGGm_6=HJ>6! zFTeB9)V6z;65R{rmPidoU{H2N>YN;TqUvtkZT#@ZHl|?zmli-MgVj1`OxOBWP2F-1 z%N*=TyI2koFa7@UxawBEI5`qsdOvH$bZL3Pz0s@X7y*PvLPcrOBq8(-_}$cn+qyC{ zdY`Nco_^l4ge|Y%(%^I%Nb2{5){0{8g>$EPXGS%JbsJW*LR`vEhjFh}fOx+gdMK!%W;h+OB=q04$oQnK_dgT$rt<;@PLd!L_njfg$`J$yZ; z^jkIcE&?&-Fz%A;ewLh}dz-BX+yHu6Y=u(?E_Lv1TjTxe9@e_aPp>)+g_aqYuFqcQ zJ7Gl4wtpw*3l(Fu!s_%~_U9-)o}2cVPdGGYo3S>0eP!Szq1~8a)K*hd#mL0r5+X!L zR}e;*p+uIkZTRUXnNku3skJ^(`net!caGiLCdNUvL}HS3&sNOp;4|xk?PVEt3Jx^x zuR)v@i8O8)G8tZDQt4K5YcGk1`S>WP=sOZ__f^7ajSX1|99+DmgQu zfBxKLo!oiU_Mx1?%J}9)#sMaC7D+NZerhVMIY1a}dA7-KwYL5xg{t@x4Y1l~PIA+# zK3WD}cqtsK!in` z!~NVPyIBm;9@7hN%e36#djMQ#Qdx$;BhTmoLv+a#I(MOr{Iw{MD4CGi*T#alr4RwX zw@taO2_(mqt;s2c5|u{!)?t zUPXs}Z(sajab7+mgIk|<hz98V*7LUf31QvG3lt zZRoZv3~$Kv-p5#2%6tE3$mXw?4_g5fliPNNgC_N_Yptmq`N#{-#h4oSz~^q0o14FY zIO@b|Wk+>Ovz~nXo?C^T;>JrZbcIwYCq^!|9asDtJ^v*<{^5_=mDK?lly_4KFL@1! zD!g`86vaEZn+h9s8hYw4>K^5z<>eu1Q2GH{GXLxpVLHSW3&&Wr=nZLFheHS&&P%BU zmVvoCTV{C^|LeC{;4eW&J~VA-VsqfZ09|m=oZ;pofYnJ8GK~5!c@Zi)`292^W`TSh zX}Qau2i?1}%*cY_?Q=S<)Tf%A&XVAhdlu3rwdB45#Wz+WFpL8JgHXK5cFb;_n2`9M zfkj=gU)8+81)eB8vL<6;Z9JC9@2fQGrw@}=d@)bK$%$t+dz!QI_BO4YP*9@m%k?FbkdncwV-Me5`up>RYzzWqP{rw2stdcycl=z@YNZi zd2TG>1n+8KMb1Ba{gkD|=q3^pImq6uRRaFh2n>>pW5&EA!J_eU@b@6X4D2L#yXi?% z@e^yQr1(IQt;P22x#?jg7Q2eK!lQ3q1Xecg>j%2s)J^)=O%B4<38HUb?0cDOl6G$v zZnvWUxbeVmVlf)57C*3f&T?Rc)8MS?hN{5*@Otp(h=2PRQBe720QP5E>qe_9$@HWx z#)96vo3ovYHqoAs5oOt?-cTbJ7Bz#_?|zwDz0Qme^FbPXqSwo?!l)6gt(tM|w4F}F zY8U#8Z>@EnFu$1snA6r*O?jr(1>a&3E$^kT$QV{CF@E z8_C7lYk!ABi!e>i->SUcp_*}0dq-DoRvQ61)IC3!=wK2iJ8lP_V1k=7E(3nJL(7(@ z@HbE3z!*qyU%~Q+`({+1=3KS(F&+JQJQPZIEPizeI$gpO-8nDW$!$%>tF!m42_rYG zXOHtKYrx943TNkByB6>3W6L2$pNj;Lt7giafMATID!xY}KJYD;) z{aw=j=lHvmz>wOW;IB|45A+BjK(9>d)aR~%@fqP8(t}QPctB=zNASDyaS?%%X4%W7 zfs4RM6iW=nIP28S5iI{cUg#~~qJX@aVLtin;vI~4CDSt9$HOy@6+M<~SYD_@X8Sc6wgv~kqopR+@x{X8E@sLcrN7d*E#R28(+d60 zw4dxc{(>xKE`9{SsRJ!;LhWX7%By8DSFaHN3=s@&XvyqoMg$gC!RwdQeMNd%1~sW= zd5BE&Xpy7YsVt-wJSy(5!y8k=NxoOU0yp&=L=uk8x43?%bNK?+R!#W}GqOz~-=iv# zZzi~&-Bb@yU6zzmyAz)*lwdrBndWld-35y?fxlj%b-I2|+h|=$fm&BQFAgki*|WKD zi4`XD!Sn6TSW%y(QM^$LwOYY4(#6(0Dj8FN%4$T&1oLWO8tHW6PHO}|fKCNS-T z^s7#`&N%B0tnfB#p`efk-d2~!>o&yyJ%voYSD=$4Bio5NLbhaV=pG}$yt>{g#pedF zsDJK=bvVTrQETCE8siR9j5KZgG-Qgxtr^)=4)-{KVA9uXF$*MuD_Oh?pcwk4MTr1| zef$A`VA2?U<@Zik=?YV7nA!u~9Z*hzp37P{eEAaz^bqBFgQz#GxxK+B@Ln}ekJ6QU z(4*K~=6mw1)4fmK84F&ob|rx~#rL(m0Gyh>lUQxv zeAAEOY3^G6fj+)benbM^U@Zu%Sx=^Jy0R0l%bM^^UfJoqOEw%q9a?CuJ)Cmv4ptbX ziT5%g2}r1|crCFR5uxAx;xw3=)pNCMq)a_r!HBV9Jny@Au#x(V8cULm!fsk=hbBuP zwnn9Fy>LepA&0_@ix(86VnV=E#|=lZPSg3Z4_Q7;Ub*QA2`_Vb)raTu4Yi0#p=e#> zL{=(;U6^yObL}HTW(!dHR-gu z|J~VPK!qmbdI@)0tN3dU0ei(|k8Q811m|6aWx9J{G5)Y?z|j0@c>3~zj|0edH#P8f zeS8*|J)3qLV4mbs=|OO>2bujf26O>xX<;>;k4?Km_lq)nG^C2cZ5c8wH0^(LcVK$# zT*nY&ScOVmfQB9Hmz34Y)a*5RDA*%QNq5g{uu-7f21$%WfZ|0zHhFivwb5Zm$1>rp zuuIb%8&P1by|;843mG9<~E~0LBk69-`oi6x52o~Jn%m3UIkSv*7ihvU=owlcYse1 z2p35wAn28Q-4a|>IIP0&2O*gMb1GUrVvPInC!QF)|2covq4Be4^YScCtCxk%&994* z9(^CQb!-V8nONh7I=*w7?jUUae~AS8pWW(C4%noawWbOjAGk`PA5D>V3Sj<4Q`gtB z+7|O!4V-b{+Em`Bj@7<5E)<~vnd%la$NB8s2qn`>;!g+i9O$$O@IHMpT(@|c|0`mb zN{^^3`M*iW_0V>{%N-}mrmghLG0AF(@Yi$1v{YpC7%!Jaj5=3$Y;6dtM;O?)%2qT(g}6DmvY~_E^@M{ zV!mW9?KMyq@wL;wVPs@1{`7T*7z-%E+d^Oq$g#o%nlKzMSxa6>obOMOf#wu_>(H+i zAAFlA?FtY~gYZCctY1G{Xx;d$Y2`^8I}U&!mmSIf;htLTkFh#@h2KJnyQov?QD&>{@+`{ukhkdiD}4^<}`j zI#QHN4HU!)^RIq%g={hp;d)k+i@RkV#;&@LMuwk^`c`R*-^_LRac91qPujECGkHFn z^1S|wsT&Q$W=tV-;K=FFd(N+yd5D(=0HXR|7EI4MP)<)zAye}BP1EQlS`G0Ur3OS- zJyP((*#4PUt0YKUe&ZZ;@*-#f_vBB1w$HUKtV}Ndy9L*OMo9yxzJ55S>Ps;0|KC3G zG^iaM{Y828U}ngX+3{aL)qj0dqyOv64~n1L-#CFxie9GwuW8!ZGW?x{ABJ~f_hbQD z%quL3DSw2W#oDVS7c+TC$x%Q8NG+hN?1N%Ez;FiRUPnjQM_^$xUy+*Zz1{cu_wV0_ z!c(AW;Pg<`X8Yj5{_ zCd(ij+ZbvE@EIjro7VhK%?tx>L}o{|sja+J`cK%(u27^eNTblEDrfTZ{8U9GnQcme ziEGXL%&s#Vtzh)4s&Hw)Um+ExvufgO(QEdAS5xv2IcX+of!lI>F}6WpSufqK7naHz z24oi|H-(n`OBK1HqIZJ+H%+rw&; zK}jVI3kE+fQvu=oGRw}{+lSnwiKiRA6Ge_K(vGe78nZR&PNVwwKbwHm_slPsN<$B@ z+uBupycg6dH{)FLdGwo>Z9SHS@9*CC;Tf5bKAs$bVOOP@e~V4a_}tX~-X~z4daF2* z1N?=1{nE^xwwom0!rEU2CAl*z-YI!!@4DVx`UHECG5p&a^}!CY(TWB}Y%X9?XLkfDoHN+| zo+nGucBeGr9{qxp-Z;# z2C({qWhLz~%373RFgc?|vny9NR+Xe_(t0XAWpR@Udoh|Up3;Q~Ik~-haPZ|LC3CiHYP0iZzvIT=NgGRp>H}B^w z^FF~Q!ie6`GUDXS4@k_{o9Mnd&O>}5lHCa&5!7wCJH>(8sEAA#8y7&jG~Q>vsC7ch zwaov;_JTEY+TszeN_uSS53UX)k=iRHv%U>wuC>8<;}5FpwEr`iB}0Q^n*wUZogbF@ zGgLHC5K!W3)D!n3N#Q3R`_Bj32tep@+%35m=E_@+9_=KXEKw^k0r?*P!B5*5nSL+1 zM|X87lqk>~qM0gPemQopl8k$^niQ8JL0jLphQ9w!Us3<4wfZpO`>(=;K6cql4+Lmg z2y}Cy*;S~l+2+W!$~-VA7EycfhgHC8Jf|FS2?@&CG;7@5ZUaQg4vXCykJr$x2NT(9 z5AHX95W;$>M?iAIuSN z(UEE`UVgU=U8uJ3xT;1hMAr_Gw8L zzj;@Eigx8*eXMdcQJ(Rw-lvmV!^mknU*Bm;{k;eY+VS1nN1%-b!mp5< zmcB@AtLWg>x%E>C-;>>0YZD7CFH zAgnjpFcoe-t<@YPVsR0980kHSTV@p6WcV(^b4-xEQ9E+}gU{rp42qp4Cwk055yLl) zp_SGC@tj;vqx*52A4+JaEip!QA%~I#BbCK~a^+Oi}w$Xloj5+F45~Z(fLH$KTMa*hKc=|Eq{!+YVzSdgU z@jQ{UJ=&jz2%!$2{9%nYX9I_mj#~J-B4$i8F1oWs>Nf}(^&G%9Bp75Pf%%VZi1iO9 zz&3=SruM+&uWbkpQx5n4Ya24nu|P}=*oOEK5hjXuYm)L+1~c9YT3vK5O>2DR4SxB~ zDGT~1f?g}5*t7`O)-JsiVrxuOjb$4*Pl;pKp(35zg{kdO31xLK5n_3-9yTt$fXtgY zwDt*GLvgowwLK@su{#03!EOoZD!?5&#zr1Lr%nZN|0-D(31uDwGV=L*D_SK(z&NDo zCoW(d!qKn6HLcb={o!ML-o$Q2$+Cg0?#y9k>%nkIxuL>$z&d0peermvj9R~wMP=LX zmaSx=JY7NH2!%}4BFbgsUC>6emTBY*qO075SkZI_YNuD`Ir**#obx|z8F)Q19em$X zA#%F~)`gh;($W;O-gR7!xi#V!an;q_h&7&>af{k?uZIx-utMGWHc|b%r8DcJyBnKq zR1&(oti_Cn9Z;d;uY64d|L9?Xd+gfRKmM@Sdbs#PNYA-6<|=*NNzz=#`?*1zQlJdR z*wW@ruEYX;w?|sMF=dJ_RA)V6cI{6<4gW)5+F*a`!^;@B%D1F^t>s4h6q}UZ3=mf_ zro$nA;%pi6QJEyCV=uKk_l0TFN4AoZ>OfPNQ19V0@g!)@#kZWG?EUiuMU6Sy(2m9$ z+Y)iq%O7NkOg;6omrDi3)ror30^swgo9Ub{u9$D4CPffSexX4ChDxn+f8 zzjK(kAwSO?@epkSZ3w}L#Acx7VJs9JFxR83Ps6;g&&(;5W%q)2WwgIfKD}j{7E*yHXt1}hfj6W;$1Y9W zAWl}Gw|kfQf&XGtb!RjPH@j85XL52;&UU&)-BfU1J}RS}y1)e$5Cn|wy=EolZU0#< z%pIektM!zjyz&jS&dV}D(C9wO*1`Jn&cST6@a($yB=)CX%gMkXx5v2^GdpWGVXSC? zf$t4{J_GN}za=_o0e&a}3Ye)7?DTds@uwPxypsp%C+S)kbLt9-Qm#j|sV3i}{WU>F4L*69Do+#fNb8VL`fydB0|<0Q1ceXq(KaxOct4$aebX$aeT? z7NHFE;`eW&1)yI=py6fCnT};qjHyi#zE9r4y$Gh+!adPOT+Azhop9c}oW9C+o2|%A zym83?6m!po_}0PwSOuebFk#~jj2<=i05>K-3+?3Kc_-bEZHA6Xt`*!tfI?@FmqQs< z&vRc1Jw;)En!x9gQ0v=yQ9AkszlRmiy4u>@o6(%HJY=B~GD*j5QfE;P(Qae)jelt_ zJ3(_eD;D~OBAI-4FPcb4!u7b8V>tY8c0xH_tfJds$)}A7D7lA~Jw^tz!GM zDyib{W#6u7H$bpS(n-v9lQ~o}`h2gYfIhpJU1tDqJm9^7U3A?SL!yp=N zw)4;4QLWOd7)N;TBjWfu)@n=j*|5)q*jB)l7Nl1ZEisEY_i^yI*TSE zzWB>$M5`OasEe(PXE`LlPjgukRTVVeM{hSqBM#Zl^93K1-dC#N1!MRCDAq2+$)@P~ z@`t14^vf9gXs?}oAqO9=Q=%;xq1laSG0J{UJN^I`gGFzpP>jhB0SN46MZkw}yCtVf z>wHyrc^tp@_wOo%WN*XlVU14XvJJbWq>K5EDhVmiPH|wyxL1Qub92?yNwq#a)u6j4 z*nPX^*QMxR`Wv(F&-CE}Q^MfRNQ7y3-Y8w1CCA0+4Wb4*t*L@{ViqQQZ1A+FZfkCk z52Gl0<>Wf*C3U;|RPq$$cZ6G1K2txF44k1yaXz*^-%RK$uw5y6a}KjOJr|bGwjY)M zdQ!0Z?{N6zES+AjWMp$#j$l&CdPM2DQ)L9`3c(6%azoPncj*yxv0yn#3XktDKSL{q z|79xu-!q-1jmFGSn3JK@-yy-dU&!fCKLZ{J*Q!dWGKyv_@v!1A6*_$9+ZYaV z?PZ3IY-Shne+cK5R;>a}Own05op1+eOZWnLJ}DTZ*&9@eH3W|Dw%Bnx#DF)GGlAXhFa^-3tz98jCLuFn~ zazOpbXOqzq1IplYm)o)g*_^LS_wxpq-gMK?GW2Qro@){ zGUFh$NBJpBOlCXNK?Wh93B2(0JDYMlxtW!zk~b3U#pxspJr=}T0VOv_y)=&4I|+MO zGXf%Cgmh{=^pk^yy2J$+rmTDB;AtsP0+=9V$8GG=<&~c@u_Q1WR{mNUMKTYsi7sg> zMxn(&$IgMub4^Z~iYaqFY+#(pA1am>}2rCZV~749$hu?f!ZD^Qpy# zEZ;=Lg64{ZqFHlu6CJoeKxFogMUj(8`b7A4AF=)X1 z^*jp?ynBo`Y6)lNQEf>efh~1!mh5S%-uN6d2Ff7KXsTnYGG7t$LsxJalWcw%p)k`G?$nt0pTTg}BnP@Uk~eejw02K`ZqxW3>e=qVV5!oD@ZQPs8`KzGQUplo zHjKT?VLx|~x~d0XBacJ7HOl$sR#O@vfQq793V-Z zC!>LMLH+iNL(QAFaTTA_TPC{_wW$>vqs8JuUvQY%RT32~#CagRIMFz6xB8x;#cNz# z@5G2-tK8+>_YHbw?ypU*pq1;r*gQ7i&OEMgNMhHOk(ZZWdauyRV^7xMvgcMCkFw^S zZ+-v!%c%~v&m399e01W9OO4?6k`K$A%?n6JBod;+g)EZsKVCB4Bc%bhaza0F3d=CG z1o+fNkafd&EbhF7U@rV%{)atlJZKNBG5kY zf2Nr+9XoOtSWz2s_9eDc8z9dKB9aa2NtPVQP$i9_ecP6>Ep3?o)Dy2-PQxN z^g}D7H1&BaKINr*t3*(z{|X}_d`5Wk^#>9x(LI~OS?Pn5wB(D@cITq9#<)ipuRwo5 zL)9p&+UfXpgIk!eTs!z_QXy!iXNH?+-4PuBsf5CX8SN73P8;vS+Ho)Y0 z@U;Lf1C9zNla@jUW=SxSywWfYIF`X$AZ@v^q{wEyR!W5()fi)v@EMEJBVsgYK<8eoPl@@JVCLsqGg8v((Snkq+%8-}y|m*V!i7w*Pz7FKE;4vS%NrRL_r?3NO4)a! zec__E4BWCcSlf3z%(Rq|Pg==R$b8OA$j;#-%0i(HT3Of08`tVO7$7xggjh%r?4< zFbKF={}Rca&i98YxTzdBcQs%N-=zB7xG3dRJ^(|bL~a5u;|5l;`(PX&zdQ%XtG6P+ zc54W%3gg`F z1atJ@@DeRxx~Kr0xN&#=(RB-qqZk=s)UVbxK;u_@_80|KY_Jn|`v+T1m+LM20p%@g zxhM9!9z&Ng9@jrqe&JS7=WAY157mGe<9}jrdK=^(pF{PliaX6iI1Ikb5kD}PH5=dA_(Q;Pnu&Dnxh z06}t&JNY>QL4oeHgB0W3gnpTXaP$GX-`Ddj=is-$RjLK-R{88-0e(^Tu~z1X{bv%T z9zPH3^bUvleeo)GJ|N}@z*7|esfa`NH@=p1T(j=-7f@7-lozg)jDMG7U)5s!;cYGR z4-L3GBFGgI#A#mlR^=mP? z-s^xG30c>6r2G>4w*e``oN~n|J)bdq@oj3Hd(GvytJ|VVazmZ$(gJ8B3mUSZHh~(# z_2_UIJl>oB7Xxn2Lc;y|PpVG47!THDa2*~}pRq5@ zF9>K^Cl;F!OsR46u{hdsP2A<@pd&)$3EX9pODotjcY;{Newe$U$e+{MGTBq$LMnf{GSc+#FZC9*Ke0yUkW#&S{V+=Mw@2gsLI7% zZ0=C142@=s?9TF(j6_I%Vz8^!Nm-ZxNLsLeagFZ+X zyNv~lV}A915uN&W@}`4%P$urChk0ad&zNoS*e@R;vrp606RBsvTp1Etj^uFO7gx>o zse=qBsC0eXT<8g&!t5GCE#@6h%+dw_m}tg5iM>Tgp_y-7ZJ4UJ&t5X&`*G0AhWI+- z#Z~AFq9StUqV=n2y6wIkZlG8CKQ6QD(O=BbxRliF&5hI@Bs^Z_1wfv5%LG8Xo@hLy zDG^aRb6rAkIgkOJDLtxt zk6J8EwvUw158|S_*$anDc$A>YRq^9~gL087%E?KuztBFeI{F!vYgnQx6TXIyyZW7B z4HH|-EytWLWc`=camYfQu!NpsH&Y<-Xc6mf&|_6boryDpJcQ-||C6r6_TtAXo7g*;27)kb?0jC>VL z@|InCNGy?=Ucm?AkuYOd-MaKZaawcxvH;C@y zB;KLF(wuokxht%wUT`my{6XgSD!)omP#@at1UIR66d&aOa#k=C0`9mOy{kz)O^a#H7(DJjZ;_9AgW7O;RV}Yk{Uta-3$8lHl zlww(s-$(3e;Fh3iNIYD8p30;7+I={7JFuEcqI9$5y;t9JXnow$s{MTTadzncSY*J< z06(gLC0X?#^^pBCw3vc`52s%{01zt53q>aE?MyHILxQac@ z8mAPtTM%WmKz)>i9M#_*qgZtDXXGrlEIu-R^XK+jwb2U|;ti>T9Q|7Ybz{F_a94Mv z$P~T50>u%0A~>uf(`<8OM~Up&>mG;y2o%5LjSi*eQ>szN2Ly`Md6_-k1tL#z)PpmE zxm;8r>qr&uuQ4is0LqRcsla49V?T8;A7v_C;VvNp2ow+gBT$U~UjoG@r?1qLKwFgd zfIu;Q5dU9+Vt4Vk)5t8hHd=5rO_7UgL&CeUBi*c^->L&oL#)W;lbA*N8)usum z+ofhawq-omQ^47hUOR-r+#b8|Trv+O3SqO@+CtL4gJq5y3kSu;qxU`=!wtY7&tH7f z#BXq#t=i*o{(GVGy}RgU`$_n~I2wCcd_sbCXFZ&`tcT+EV?xs9=#|$T_$t*ElB>v~ zn;`^rFegij_`xIpCp79Od`vDQFP&AOimNh{;d9fYgb$pg6GM+4B0StY1e=?w??&>H z_`%NDxQ<7bb_ThD&o!ZP1^p{MR% zA%}YVj*d=3x_1Fj-&yxlkoVrNRLD#ZqagINFBr7vztp5A_&w<*m;OaUTcD z|Lk-*{5h~nau4@^+mXlvlQC8u#2C5QC* zIUFrIo=I+|Tr`ssH64aV+KF9LK@lgmd2hdr`QPmst<({PS%iqHKpM>qOtb#SrhIp# zB-S&btZFA^v_O;3_$w&?Xxke3PedF7MZE?<+ji?)2Sq0MNyd_Rq-6>H8zb)Uy&{4z zSM+AhKl)`FA+gDNd4UtOqmjBCOFiSP3Dmq}t22lk*VwOit zc#bujZf8e)$+z42da(mn=f6G zbdQ+T^+EHg_ahH39O+%DYpO<7Kv$rbHaLR0!T$;aNSB1C6Ttnn<1Ao1%a8mlNo zd8`SsE=p$%F^61VHq6XU=-`^J+v9$*LdBJ1EF;dZ*6_GuxO9UOg_B9@=bnnE&&;IN zCZq4R)YN>0QCaG&j6P3vUl@K56Zz$*8CLns&XUCC`v=rr7dK(iA0lbBuGl}sX46K) zd#NNP;Sq?<96H3v`G}RrC;|GF^t|cfHQWu8Wib`3MdLt!f=R~j_PrAFgjxfXx^Va| z{d~ZPg!^k43JizgD-Ago2|G4DaGgrVjmVKqzfMtUE$rV!#>Ix3*Zp*616bH8)5V>! zEHeWbJrrscrYIcDz?Dw!RdYG$*!poq=*sSms8D|HX;g~dZSPU)-)aJkTp{{dqeGS6 zW%a%WJCctmVln|G$(c!Sn_FWPEaF3azn8dD=;Bl?JqT#o`vGl?4PUCQ_eGXBQfB*@ zZ-QElQCEXvMf2jd^*LlVwYLv7@X~N~+fEWDNnp}@_&7;G#fMPaDXQ%Q_4#W}ccF(x z=VM`R%Vi;rw+y3dwGm>Etn92gm>vPv^c>gx7qpv9q5(aDMR;MfXZVHuYsiVe>&#wp9C1(OUOOT7{k*L z2-Cb4)BYm89z9qWxv@5D92$$;ec%3BV7-VBzgXwDzvW97_JKLf99v=r1(;Tu_u2?( zC4Es-sB*vX@-pTv>PY(PQ-`YfSshGgpW=Z)FNoiXv}u8fEROD?-CF-TZmoxNA_+bo ztA7Eje|h~Wy{L57!390ryAWqtm4w=VL{lLl+>|S$j9I+8e!1l;HM~<*>uffs$JPneKEJkuvlf z6rXpZ*tpBrxD*hxjaVJRf7+J1y32b0y>_cank!awy&ef_W7U#(CNH|?RyO;f0|@95 zD)K4!Aa@q-apAd1{ zV*bbL?FHnK(&7Fg(tq&1{~riuME&_yAriX#p{_=Ig;j)CdRCH3Yq7B{TDL?v3B3zz zys|1C1KE6>@K)|EpBQ5u|O z9@M~}BK*!HbfEgIy%DWKF4c+!U9>mit|jzZ8#E_QGEpIfpKm=cNj;9mapeNqG=?z1ps#_Pn&&&&y#K;^elyXU8XyU%S$gP84N?>QdljI` zS&UOM?5SEKSK;#%iHCY{Vm{li}wa9c}aMA4FzrjJxGd`Wwg94okO1(_I;u`Mig7)L- zmv4O{&{Y*iKD^4?XGXSl2xhNFQi<)16FJLuA8_)gEI@G_`l`w7q6(G*({Ojd^ z2vq-97SW9ssimpHPPO{Kk=1`s=KuX+96+4Y6gdJ&m;VDK^q=1#Q}C}i=h5dCi2eWm zp?0(YloZ!32su&-G)LqkJ_)i({46+GAC0k+Y; z#C~#evRZT1?E5)&fI*9ovWA6*)^DDgnmg90hqu9t*koD~0FRS9Jw09S<6K_Wr|x+% zsPN^8k#TX;Ml^s%Z3y6Z{$`}Wfd2dxev4?R!&^aT@%AY6<<+6#;k>PBVjF84;+21X8JR?lJEFwcSfmY+eb`fo)K1g)-5*3|S> z#fXSt#@n=xiFuLDWktOe1Cd2Y`HD7}=I4j9)_#${9J(Y5SL(Il$*BqQEg*DkFOQ1% zrZdAH^Zf}vl=gy6I!V8-7%eQ6GP=8e%{XV(l%{)IxynsIBT4UXl`k^`)LM=3Yo@1r zAnII-HpVVGODjbrxtm)`KC)OfJlutM@i#BEYI*CrA#D@#*5V%uO;fg)CGAc}(*vbzL5QP7F^FVd{rK zpwiOCUw4MMA}{S{Yoq5=Iv3QE*=dBM3B2Ne$LQ>9Z+W{c$rPa8K)R`Wy6}TP+V~y)b0bzH5$!C}?VacfF^B#qjm z1s8L+g@UEx-OI~+S%ChE5jt>c+<#~T9G_$_FZbNmMU$J)yDu*`_$f5yvZIf%K8P%eTjbGggy*%IO)B>wiq;ta>;Sx6Pw^j;iX`Zl>D|MHdi54!X{=lx>%E|Fso0B`vA^bT0yRHH~q><5scV z-Ceikx$V(AR4l89y0a^8+q8iMkt@NaS!9K7RoqJpJrnEX^y|fyl)VbxK3N3^c1pkQ zIJa}pnAMP=ALsp1yXWU^=@>x`i~|AG$Nt7pY^o@7!9)fAT{GrG-A{myZ*}xGMV;M} zOsw#Ym(g&ai>haBnQoJOI`zz?!x4w_)V^-ka!R>8E2p6D? zoX_ipYdlwJzBgT^ASgD(HVbpT$7##_{kz>);Dt2rtja@RP{qf`XSQVA z&6pPhFLkq;<&;1_po{v5zbi~dfW^V1LM1P&`IG#*4fsqlHpd_wzwWHr^ddrivfMpI zVQc@r?dTJk{z3OYuD#jxlg&Q|IOUVrx!CV&#aalm6~Rrg44xdhmbdn|AS(Kk!u~tw zpG37pSj2=CE?Gu*FAEK7nO5IjNGkvpI<%R9{R$2x?8C9h8nX5$10rP*K^MM{ z7--@!1p~J4khs$F;J~W8g%BEgh2e4<8I3Vd0k4y%M?a7eo_o~eZlrhk2?i==gATBG z^GUT77*+(^$yXBBy_qBxtTU_L?Dk$B8mp9u2D@*eA=MvPWds9W0UW*6J!)XB*S8|HmiaHB@pq$FUCzTiiJMI4u zWpDY_R`|AQ7m8D$g#s;FC?2E~DDK6J7l+{Pp5R(aad)>;g1bZUV8z`D6n6;(nDqC| zGi%n&ydU0wAY_H?JJ-JUc^-!xvbI`uS1RxdR#_oZHJC?p`_Wu+R-^gH_IP?@&b@HG zVSY5@d6Ujp6)~8ghl+$AfcSLySf?agx`}ymGbY&2pli-vnb% zF8Sul=dpZFNTFKlFz`{5DRab%Md~>_82W} zQ$m-cyDMXyOfz7t=V6@8M8o+J(q78{jZrQkAs(sI1{NdNg2a`e;!dt30~WJTd{7u; zNH$V*DRe_`KS^o5$&BgE#SUD`-~T?H^4~0g0g)SF%|gM0i78kS2RR<%y4dh6zs`+k zLgXQ0d~dkYB8+A^3&~L>1pn!~=^Q+C60#V1a|gQ;)kvwjrN+IgszT`;$obv&rRigS z>!Q}A!!JSv?{0>p6>=YXnYXyVdve3koG9Pyz;;;w`~+Oomg{tR`ilF%VpyCzuf8BqFK{O6yyaZRW}eI4jDvB^Z5H#o>4vjgu8vM_DN*Mp1Eti{>g4 zUcVN9t35~lj2W3%CrrI@k?D%LcH%Ji#2M7lZFEm7x_NMR+Ue|4oV#3#yIi61~$ydHS>KF8u!F#%CmW z&YaXRp8@sh0SefcM4{I4rd*irv8AkYH_lgV1&!L~D4skYbM7lHxnL}BQxraZRY@GN za43J2E0>_}dOyIWT&BS#sNKB|SUBCaeM}zla_RKheYml@TI8v7?!hQ>;*Zv1HS8_R zkj5uh;^*i8Y5=q69Olt_3@BFOA5`_&#hu=oatMHV{nuZ~_s$5^(MEwg{k3o3`Ui^R03WL}hi zy6rn}b7MDqYX6~8yYzl}7gDGFD4!D?Mj@oZ?DzrOV?6j~YO^n5#%6~T8LonA0B#fl z03XEr6U?<^r(zhPVx>aZ7!h&HnO#Cs{D5IERJrVX;lOd>(VoYU;g0j=lQFMLGjO2b zdWPSlLQ&honCX-+fvBurgWcYAQ43M0(nv2HO^7n!bj17gOr|t>_w9y9LvC(W0zLMm zyUe}(5*{K@)5)MC``&&1-KPJmR(=1BPWxRS;V_SyW0X|^ho=W>=CoZ5nc4wtCa2j7 zgEGHay>ZdqWhESnkxr#8sqa&Kou&(Az2=AIM$kaa$py=7#mXuZ#?O;(dE{n-XNl@R zj|Eg+l#+Fi2t1XO6q1)%9G!^t0mc$<wQY#29$j~TabQ42 z)|@i3d0D#*HpZ}{;}AX)BBTecZGO)r%O{ge*7grmICD;fMZ8qBJaG6zRVZ8nJNI6 zVEn12r=iw(_>v;icsljAE#;h1J1%r+Y=d~ENFYsZUo8CjFUX5RY=cN+#v1)r*5yf% z?{O>4azL`qbudEfvvey(;{}se(vgaU=jLYDED$`zn|3zO@qh}TI;Y(nExDuW(F|{f zWCrY}%VZ;v+-2!r<QP=e}KG@P!y67k-EJRY+_E-q(o<)Y|9XT#6rowM~GbjA%sY7%DrWxlZbFZ z(z>BGK__@Pai%M}Xjn)m{m>2{E%)&I_i|KoDVA2a?9g!D=LT+7d6I0qfTU*(u~Q3u z>SP0lKP_-GK54URV?hfcfzRim7QyLgjruOjM_Gxo@B-Umgc`#1{%D`X z71yqsu7u^ICt_bnM(YI+@eRJyZoGr{Q&1(XjptU~}?`0a3Y2v!BbtF1O-~z` z5<7`feZG6!TQMa=m=v;a51)HcD#wakoa`2~XgQUI>G_?;q>Utd?v6v zbdzAPXqTjj9r}?48|l3gU&JuXXW1!``d(Z}?xA~t+S7&H`7ce_hB%B(Jq|fY6Qi=hUyxu3Lcl5ROmbe^W zk>o0mDQl%2XR>4cbqxHFRFqS1#I3qAMm!b_dH~v=T|*6ReNuoa?{0s>-*dY?75bN$ zSGmT<>(e3Xw5-%=)QqbB?*2Ne-#HS~r!WAxUF}0zsr_7TSju<3@-E;|0p5#WdOm};EJtfO>^mSzSNH>cGRV?fSBYmWzZ?^pOhlfG zQKEZI489e*YZ>@PTA6ca_pczfk3Msh(Q31$Btb0CQXmtyEbQmq2ic7D2?5}DBA7q_KS zw@W^GBg_96rOApltAP;g++t{Wak;#kK?jrH{+hy(MB3=LS5MXA(^!7cM3b08m+jy5 z7$&-T0+cF{eE1^%mbu&E7V{1YL>VJb7} z=5e!`$;gjqu?h1=X>?Otrz1}*?a0s z*yTSwhI~s0c)VmNeP!0379*}A`;J6bb3tse`h(Cj+noTlqDyOI`bg3sLs(&^osFZd zDeQAvOMTqdZGa3~4nbUiXrcXXjKvp(@%fF%C+jPk&YanBlw|O;Nlk24E^Y3LLqReo^o>T5n2MZ~RMFdEs`R!^1ZvsM6*8CN{~;0yhuaZyx0awH4~jlWtAbrvYa;m`u84 zAapXwHYDWZzK&{Yx4deTN3hFF`=(&ixh^tBGFZ)U@8{$DrB3*#=>`w$1MM^vbR}Kq zIbuG`EU%W2OEOLZt(ZKTv1>kCl}tIrj`onI5RZ0>1xFLu5gA?P4OL~8mJs&I6KOk{3-}}#+tL?l>A0SU>osWWF+p#GqDiTFCKUHnw z6sIGX3K)Gz%)#_oQ!w^WhGSEl!(-10mV+SeP$^YBLW%Dh)DnA;hsuwBJdosp zU$bW*FVpq-G1IY8-sM6nwOgY;Gb{YcPj9+Vg3E%aP20V1Xr?KVcbn$rd^z95iiO1Q zOJS0v1b?13um1qcXj^#p1bBEG;a$B2fPbGhxE=Nr=u)Q z$1JDhWI%UO{EAZ6?~fX>hGMv+Y%kK7O!M#bqw!EPfxZ`tqncLaQKC%5f!H5>4~G~f z_ReTQC>aP;WhOo`LU3F|Mv9B%moue0FYPJ5ql@=R=WhzovQq(OPbaZ~Pp5@G-00Dt zDDygZ*5NRhr*Nd7@H$GSGjXuJ&X}_9KgNN3<(fIX>D(kl`^~V0hke~>NZTFLc7&t> zg6#OqCEzlg%>idRt4hPoGp)bknvM2t=bJ^o^bdG2xt#xbe7g62()BIc3Pd_xzLdDMxADq3Hzbs;wxi-|aT^XHI)tLv zWkxb<}cGa-hU$+sWNu^&>s=xcc`!*;ii_}6%qRK2IGR=QQ3Ro`V+^kTuzx9|M%Vd9DDO%M$1|N z=ZGYYg)T-Xsqp>g1&soQuOV`SSGdBR18nR=>*Qn;=Enz*;PYHE6`=|Fa*$ zUn3ijT%0RYd&j9SZ(Vt#+#ruvu?f^kcIdQtqUfzh@YRbE`B$FLiPW%M@aGiMKXgY4 zX&}<0GG_KfN{8eF3#N=dlKsGEawh~&>LH62e*`f^^uZf*bYi~p{eHA;r{9&| z7_W?JP|fi*7f)VP=!X*0dt0Tcy|BxGjzsp;WnipP$rntDjooE;WH$$py^4V&4 zm;eQCnk2a$Dr8pZ(RIQf=Yz1IGi*4*V8ri->?9gbwIy=hbO8{($2}^O6m3o z_}RVvN)=CFOZ67w6Y|A2NUt`_PVtpoyxS3*A~c905dQTn9u&`=z&52)a%a>z37>@k zC9UCL`D3RFzbz7&i=tu7GOE_P>S)cjRZ$PGggDxQH1)G+CHcHT7s!D2*)va!D99V% zv~tmWRVrnY$A9GxVrrSQo`cB?4izX0lhex&M}_M+Tiy%G`}tB7F%gC$NY2y$WP!7Z zA2;nDtIT4Jz_A8wn>Oxpez|fj&=y8(5t}Bt2mma^wVYpazbMp(!I?^qjLbJhwqQuxhmGA}eL$iP&ZDw`?nr zPYit^BImtpKeLmVutNTIG)U5t1RBO2tx{1-2mpOtAi|T0lI%h_?#{)pd%){4EMNsO zpMdL^#Vz{7jxg$cU0}WMZh7OLLiQ3KZ_5UR%aer^Nqh3?$LX6|AbZxmL9h}2CymSU z=OJTc&QBoi%Sa@^Y9YMy282rdhH~T>mFD1-A3vS!>qk~oF01n8*ujVv{@dMY2Ox9? zEHEQk;=J^m?BJ$xA&5Fa?RdGTNE$Ae&O?@D6nG z%#W$gY78JgbEf@L7&L3YoQv4*Ir;Fnx+FXI#L$DExQ0+D!jw+OV7aNDoRw7^%P!nm zi%2AsM-B!Z2JP2(S_3m19L~eM>5=DT)`+|zlOq`!`QPi-;}<$f7F7}yL}easgd8q^ za8a}UVdh>>!8ejag)pG=p80<8tJxAHSH4O&a3zOP(6OG?N38hYAxUl$@0qzRwb)U8 z$=kRaHg{ch(%B<{tKycGwO=-6WSe7n>$<2QAlK}0qKH&8LzC*q{0=?CGUegdLH~6i zfF)s1X5VV^w9mJ!z{Q(>xh+?Ob?Rjk0w>?A<>s$}cIhjHh#yI4kPSSt)>h zNGZoo+O@TVlh#HND`>o~@Am$Un@YjuQ^-;FSm8oapmIw^IWagpAcEMn)wlB8BqdCQ z(f9a$`3M6U-lh%b#7G>(4-%hs4`fpWNUR4O6$29pBq?x=df}G%(b-KaeAz zsZgIT2l+{R{G-jY>8@msNrr=X2F&{;Oj)^`VuW|+ne{Tyt6GbB(9hWfD zWI>PSgY59k=04hm_Q=00evzGvK63%hK<0(>phZF`WNzpo>qjh~StpaFE8G1Q8=uA;3R-4dMnh*hne<%33M42P+jdkQ^>Ul zCRQq(12*3Mii8t5bpG-9o}<(bY8zP8>^-6N$nW{rDI24qE%xDD?sN>VP(VT(pKCZqux!&=DdbqDTQ z!JnPKjPec5k@?RVCa;?>Bx(X%>Gt+#9cd9_@+f^*(2a9r1B}oTO*@)TiZlS27bByp zIv8D#1EwPVM5sCDocXJKY5HMSlu6E2hj~s{LlQ5;>;Y2WkgTST!w*z)Nu5NhlD9+v zJ{;|TP3i@1L$wU^Fp?jvE|(QJCXwx7%yMA6uD$UlE}4j3Q_aurE_@i?&tS%wSD2^- z{Mvy`)J6L^t#KPQn$_MsFR4&Ds`cKY@ysL+!lPg4Mo-VqOd5w#R+$8;ULS?vYEnm0amH&^H!|Nys;oA}8Tc_XXWhdSl%EU- z-RHkttKwuvXp57{b=_1F2>;wiPhqOP4fbYheL#_0vS+-g97Nw0%kSz`Z&uS^1bqpC zU&B}nkc{;v9dSblBc*>E%eKLA$j=bbrt^$Oqd=P_B5iBz8$WHB871qwm|$=l)7?xuv-iKjtKFK%vr6+0@F@{CnsocG(VohJc0+ z`D*Vhk$`Ni((at5=)TP@|7m*!hX&xnNUGUvPtC)B8a(1e&e%QYqG@8n+RgE=tb}^< zpR$rMaiUf{at3|;C-KBg48jC`8#PNqo1vUn_XBtlDZ;*K1A^b1vZ)Z28EC zt4X4J9cxHq8+k+<9}Ibx4X9NlWXi*&7CDNpoPN`ur<*N*ZBopTuKd?A1{WMcne2rt?mUWMjVQ z$_w8$8rj$?lNy8D8IULjzE`x=>iDWuezO=gp4CCh%8MNOn4*H{1E(CwE7KPquY^3x zHNNn_|Mp9p2PX6if=$$x+UBrKrR@wEU$T!QWi@~4h)x2qZ`*wC&}UTtvQ)pSRdkWR zcfJ~}M*y(Bekc_OAB zHwft|CXV7*+53)A!|l8efmcd&{4;auBvA4%*K>}mlW*5V+9lZgT^1fvlXCU<`hvaa zubAEDutfN z<%7lJU=G6@keGyu+j`ZH+rLP0yA?k+Y7g>@?|(qY8Gb1@h3**eQJLBFRz#a2Z`|tb z(2j<>9OO;N3?8d^?_&sR)vZs|GC#KK0doEp*ZGCSU_{Z!svt- za7ECx-!d*ma^QBb=I$1$-*ysP#3N&+6Z?ns|7j&fFbT_#K}WW?H~W_5QWiFc`2Eq7&#A}UNhhs^{MaKO-J;%tnoTq+RH-#wbQ?kx z^}Kc%R=|>n@Xy0@sF=xk_j!|=69o0CW9Eg zB_G0(Z0nV-m`psyq{mBim2m|+R=HTA?V?jdoYtzHHYEDmwZL(88HM^$IfYaLc!%sJg%JjvRlfZb7o_+hv`mJbz^!}>zx1tb)@6l1?-<}u|EIn(s z4b}L51^?hBR9l~CS)wAhxsmnp83^#(F!`VejD{*gZHYznF=-E;4+mvB#*MPWY%pT$ za71+2M|XmQr;OWI=2O+q6i}5^jZuiBprYZru=;zcNS^lN{qC-eMYQP;%h5oYGrX0K>h;3OJsnyH55!z(ABWxgS;UKox{1sSB$Z7eS?yV?_6Nzxvt&n82{QK( z|GW}*Vy%yq%v8G9C{QajE4i>FrD)$3Q9e~?CPb10FEm!a%)1kl1pm%L{I}!bXXP=u zcEGeod`HS(N5;fmI?N`jm>bb;Kv8JhOSiL4DL0HTYQBVB#`TYgx&&osgHzj-&YYtI z%T{I}yxA3rJX3*^w;`3RPli&iIv!V%9{3R_V(B*(|I7}TK6Pivaa)P&EOi^5-o@^< zF!N#mGX^995S7$eW#@0wBR0iTPqZ4S=qKgOIUO%UK`j(R#T85sz`6SwZq-`nmw5rr zp*H^|r0tMf7YN`t$LkH6?ytxcQvq3%+EhiObl0aR83m%)zV==5QfwixEnQZ9&*bft zriQ^&tD*dzuLBRFnR_ddnRQ%TX!Jm@X{-rM;?cQX{?YQH>@|SCR^g4kvzf5tN*IhJ zQL9P>O3t52OfxeEB|jMLewfXr|4a+e$dt`g8vbHp!X(Hf+-*6!Sii6Dtk-jQoB zib68|=pQ5ostPAUh;!3*?e!Oq)>DX>^f-+kH+KxxU0u8E13F5woM({m{1k*+$2^11 z;*=Ngq;(@cht!EU&P=t=B>q)((BUAPs4s!ZFKujqzL*F5YUcUnqyu4Qxe$NS0Xk_a zedqOTh8uk=p}@iPl+^+>An4*DMihr@Bk3G7#rBjkQk&+xdcM|q+r^_d4SE&QxB08A z|FatVr2IkN#g!`{4uBMwohLL@)@K_Wr2x07VDgJ|(41Anq2wPb>#r|B)C4R9{eOba zF)pX4o^nb2#$GU!L!vpq+4NP{tyHxj%o}1u)((aeivM6k2Hy^{x3X`}8#(d6DdpBc z$9~GMM-B`aeW@DYqpx-UxmaHgQkwSvjeBox-YbDFNcDje{DMQ|nTJ*t&zNX7+|tS? zF1c)q{IiZ{o5T+oiU zuq&C$xc})vpKNfxKl;hE%jaf`jYFZ*7md7-MM(?Sh@0k0_4P63CehTOk1OX>S5D+% zW<^8la5l{I_~*cxt)Y!Np&}1*=r(>NLs9WNr1869QQT(rW zioUe^yt|MPc=?|zgXE%D*N{=9&if46pY(9yB0uV|x&?zP_S>?ypIIaPo~B@43%~1^ z4;(wXZ6`e^_$h)xh}7y;Vm^^ZN7kdAue$}Hyn_vj0{rp**5l4b^)~7fZ~D1Ouu^m3 zPi^{g$gfDR2yTPv7p$cDL#0Za4Bi4+?&Ke^t23?3@gZt*VNB|Q zOQ)a?JKTHW&b^s|Fj<=rTrMW4k??aZ&1yoE>aR}A83u^o$)Xmo>EA2kqIfa^J`Ame zd$|h`brK2vF>@87N|gsCp_i`*1xc%;0+i3Soz3aB1!z`drj&xbn&mD3FV9=r4bI_Wr1(yDlKWQC}_Pnt1ZMD2!GU znw<@6zKZaZxlQ5%!=9Ope0kb*)jcv|Aoojoh{Mk}fS$Sihz_&lRjtzA&anbFTC^-F$0hTVP3sus`j#dfmI7?5(aWPYDYT zt-^f@ZM|D-*RodH8*90^ZWOYTYQDxp4NvL2!}Xd6e=7*1Nl9nX#p_EA8x|tEatnRq z#wz{xq>fc9LJl``->ni_ZS5pikE9a>xo7{zkwZgf3P{+^c)3u_`AlZJ+{yk9_Qa^j zEC&lx#@4M!R|U~#?e6GORlaiFSN3lW?MUU-f(2&HPQ>c9f+=$fHoBFN`awyBIDLJCLy zm%7S@F^!omXRLh$aM^!9`a7csZAQEmdu@dmER(HC89klEkPv=}amR?~rth;AcEk_2 zSNhNB#S{9KRb&u7`1c(Ma#V7HhNF3swiqJbM+riirk^-=bpUo&95^0lHd~GOfeuZP z+<$H;b`RWoEs~*KJHHh(7Sz!y`5}!>6dZVmEh%HvO6Kn0EGL}@p|+NZ&jJ2^nKHo< z>dn>hq>bRU6PKbU&=C@Q==A#&BF$_W%YR_+j@4s4)VU$N^WtBPYSHLz!KF9wplr=m zfZecJO_++`F7@u%P5$6pP6~LYl;VJ^v-8Pq)nO}Q1))iqsABR6rY1HQcc?p||8IvGa{DoosmrJC}u zVbGC$Z6_!s&LF!xh{01#lsMzLvv5@zwfvjyDf{gY9`rN4&h}m}_05dW-ga;GV1r+u zg$bH;cJ0$7zsB=Vr-toEnF?~(JOkCg5T}Z=+lYtC*n_8f;axl=5L>e6erp>pxYfZm z_cyAy8jefd5vN1QBIkx_sC3`A>y4?w2zq*jWYx3V>S^CYD;Aw5zBj!cE?W%5_r{;3Ia;a*fxG$`6jRKE5N7Xc2@Z~=VG zdqw;Er%o!`qdRm@c?NdmN`gziXy}M?U4X}>5Ge*I2KTjwe@g9=1#%7j7l&FWYUVXI z=%TT!?BFifyk4CpUb5#?Uw#|ld34dtj&VG#jbpJxF{%kIEak%QzEAEgN2^56oPtnU z9a{HDia-H`66yrk^i+6x5A08m=9!M)7PG=alNQ?M1y0Jq*)<6b87JqPuSlDY1Bl09 zYptJWP70JoX&xp*$boRZ!N(cT8lJ#UM}7&oN6#vxG^3JT<xrr}gX7>cwWm}`R;QaXuG$cZ zqT)1!J;BiozzYO?4&!l9*dJF41tnc}5`kfm)+hbN2iT2&Zacz0Mv*lAPk(wLG9HC= zKIQr}UMG!lwXnyVav!~7HmtmgN{pGi_e%lvEhtvF3IBpOVvr=df-i>fI-7x%<>w_+ z3zY9iWsvbVK#m7O?o3i7-fx>8d1%=K#r|z{cj>dV;#N!beGJE%t~^%1SEcO+z~tq) z;tl#Qjpl}skp9SWTJcSA>_9oM#b`b?(52KRE;A=ZdCAjmx5+uIDq1kGY66`c3$opt z!;>||zl`6l!M;g?*wLE%b6+c3X!l5kj3I2z_%vG$ZLS|oUME3aKbRsXUF$z<9ShSn z#Z}g>2Rktafl=m@@SY+jLn^`1dk6HUD9Ks4;_i$NOzfg0hB7LGeeU=T0PLJ~Bm6j4 zyS3sq9?=HcLmqHd=j$Hql)S$$suNOH5p4cA{!_v6e2*u4C7Hh-FI&rIA$g?F4|sw5 zWow=S?Av&NW^``q=uIm$?5uP<`OQ z5}kr<{k9-RRmX?3(YzpoN7Q#!^`DiA+c7R$r>&x*T`;rnqZ5uJRqdorvM6J7RdKzb z%$J>r@m4Y6^d4J*+j6^q`GXVPljfmPp7Obi&#j~$mHI^xYoj!j&_K%i#}HfJAKJ7k z^JcG!T~05Bk_L{G%4YI;NZM+>U#k;i%7-a1!lHlp7lsif(x}jGz=6XOp2)}4>%{g{ zx{mYCWS}u>Hz0fpCZUR6^m;R`DPc#Pdc*G*OtM;4&-C=Y_PfA3bo?=Zc6fbQ5_4Ll z^{dwIAljhLyPswgCcrj=hxyN5MGMyauO4L5iWE`+qQ_DYw*Yr^EO1JV3ueW1i*sfY z(q><3bcv<8dCIP%SN3zZB^<{?7EK{FNcFvJ-`Yk?a;1*U3@dNLYoW1@qeRBE_LPF{nuS@jK15>%Q4Or+d7b-KVcdMe%(K zlKN|MQ{rjpQZMxLHVB~POeG%d`n{ZSx|r{@F~s8%7iz!%U{qAG&U+^LC3BxrP?E;X zoaQIL-79`>)dS*0n+0EQRZ6{~`k^u&onEd(jBk38X%s?VF@M)8L=srB+V#xUCmAuk zs9E*j4j;h6J-~AL(Zr*&YGiz{8b*!NPhxFlry|&&am`cfPPjXY!^b2q=GAB^`K%|Y z?K#@XZ!b$kDkpaon=?KvYrq^rmvM9Da}`}jCCfUbYz}==*%ds=%0q?ZANGJ#d^nT#x)8db}J+D9CE$T7-)97QVJ{YD< zTau1lay?^>*iRW$roEZG4xO~)c$$AJXv6J$vwZQ_uHi6f(i{+D&{#XZ9!0K4V(KRB zgv<;d?Pql($JJ)vKT5ZfZ8$IcY29P){b-+3pXIHMXNel>lw^7Kr|t8Lgm8Lnh6xTT zwEFT{;va2mi^LXe=?oFTUIr=pf7pG0_t7Z4;3FQwH6D;PhP2ZuO|H55O&0v;4)BBW zDzsa0mrc7X8EeSx17PUGc(-u&>RY@HZmK)1;SPV{heQ+bUG>yFi#v|oaOl-EQpF&z zArwB=Cch6IgWI{kY4P{35Z9+t=3$VH&W=a&p<^5GtSI@wp9lv|DZdN3cX?zB|}@ zf;av+C|FBE-s5H$ZRyH&K{22oRrt4}qh7x|GLaVWU#I*R9KeBFPhMoK+!S(m%5KH3 zAc}{NATGJ^2vYVIpVk6WaJ9GSq%d+ORA?sTzoO-s^b2oko_VkKaCEFNfcH?5&}Mbu zmF2k~$YVv>g5T_x*Mzy+ zeh;bAGT_)4-T6U{P#NvXfZuPJKay0vw-#vi?D=;_x~<}jlg?qchs87+^z{evCZcQ8 zRhTIdSPN0KN~|9PLP_z+L5w--AHZ!HnMrcO4Ag}|ZCbj|+m|d3rxXbg@UQtdcBunq z{V$!Z+j;?KK$Rul#3e}G*COp27Tm>RyQeG>84770JLhK+8NSPL#!1r-CPlaquev1l z){6wQE-$KH_MoJ<`zU4fdZ8KIlqqRl5tdaJ!AT^q9bYY>f@_lb!;@rHLVgS2|7fzp zhDo4gh^DTPxrhjovVMhojXGZboq&369fTUzP7a27)PIPDM))?*(~sDfBb?8-XMxaN z1N>6vQi?Ab<+wW4BV5JMkBeTPp_OuhNzTjP!e>-FmRWLOl|Zi0rb+wQR{Hbr*PCuFplEsC_yN`_w! zNBGe|O(6#x{mB`~B`jdO@0E6@@vF`2x1JH()1|&kr$_3YpF5OX){6DR$isz5U;O0_ zFrc?63uVcJzoq*yZWs_Al_ z6IWCW3V<#hes3ry&cR(OZ+{pq`?q*i(W-Ojcsz@wR`{+EFp4!1Yug-2`kMFFI54-o z6^!|5=iQ1{?$>)|F@7^P!=N1Xd+GNazkI~+?`GB@SN4wHpA8HFM5>tsHTu|NP7xMK zWwIJw+Mz35x7lUgjxsq+4lXR~hC$Rd&igX6CWU*MJLr{LWY9U&&=t>wB+(KVcY)iy zFp2=9D4=opLjDve3v(mBm;><52pk;se3tKnfHDO=LT5@a+cb02G0_3hnjgm-Y1|Qc zNy%oUv@^FcRFJyX5=2o)KEUJ@m-v9$>Ddi6vU@FM=K zh|xd7lpTZNstvy}cqLG}^^+N!o)^2dp;j2`Fis43F4kQ zgRY`wM+@p+p+pdapTMA4Y`X9s#pK6t=)t&udy#p$3gc!0cNUBoUMi*pORF!8{MpsL zb=c9a5$k>ZtcWCcSgYJRhjt*emG0OqbZemMzV7uRr>97(zHL3L6L>~wRrzHAs^!TT zA&coSo8+0yGtTlF@0+ihGAa^BCS|M?cu3=n7TRfE2=z4)_-35HB&$Ls)u9Q_U`Wz- zqctKR*?uWO6hyH$BIx!$&U#IDF!laOT#Uv4h`HeXD&?@?RHd0#TJ5TLjAoS-i+%&r zvz|R>V(#%#&wQm@Gk-X_zISI2%kRruB%MVJ@iI2r)&dx1NS+oC!%BsZQUQV^y8EaC z`Icd8YgMnm;TkC-Un*rCXI_O-TI`R5Zs2p=r}+5ESSVt5p~U+6lV8f8g+sXU<)CH^ z6$n#P{#_X==8uN^lk_|xfpr~pZype`jbc5+a?Nc)V=XWYa2Bnsgb-ps_6|^F=#rLmsM2Nd_E5Ywtz}iS3h?_mp@^-(V>>M-Q#hV{c3cGDLrp^ ze_#gvh{Z>9Iw$(bfBUBO&(n~zD56Re%fd!!^GAU7TGMYwoRe*=E)8o7>0*~Tv662e z?^#dMxu6*;Uq+IURdF%19gX+s1}G?8oePce-F@pOF=7fK&@FFf3-e8lW`_*I_Q4v9OkA~mMeP*#r?M0J{(8SJRsXc_-6 z{O?~Yv@uia+V4n-`oSrz!nfR$o4Q!Y5sR+cy=yx>;b)M6WBH3E1Jlu=x=La^*JDyt zHTXqYoA!>nib^iBZNDIejTI<42hlrt)5@NVNNq#Ph5~Qjy`!8PX(P+I<}C7(=^Amo zPJOHYg8dpRwe{aW-%>JHoykdnoar0&|BZO)`u{5GpM8GEnH`!@^S^9H|J~sKziB@K z|Erb1NNe<(&z_Uh^Z(th|9}3mUSg#CC>($&cKZME?(AR5I(P#*JY6nakdf-ZIu{tu zugc08a;|@Mbv;8Fcb;SaKQe1&jY(`@_S0l9Qage13N@raBnRpOl#B7GBKhqWrpa<>f7r zX&Ys^e*H%W(@;uLkDGs0ofX`cM({t@djttbq3`H@(D*kza%mVb@mF14LKo50cTTj5 zq>n%u1Mj5WF_8|wvy|9Wv1a~enm?LfH3^l?Xl@GkB!DhW-x)+pymEg|*KtE-l}n() zNzv!KU7Dw=+z50-hXJ`ZsJjXOi+rI4>uCrZ#%I0e6GJy_N^S>=_RBll%SG}^nGD6&)dM6cVC6E5-1_X? z-0en~OX58KJS+y;Tk~Y5^gE?VjE#LcvQ=?^oJX&af>Bwt7sTNs1KS_&0WG$j9Xb1=)Y9_%%AXO}|>pE9me=8Qy7p-0k@cv_Db4qN&;v# zbi6F61-5%lRLfA7@8s_S)8E!`ZXh%C8m<)v6IuorfW=nNN-s3@*()u>vN2AKj`L1l zEBs+6b#?VYr1qMI1McJum19&#x#48II~?Y)dbqpF9ykLb(ZIf!T#S#>H6sQ$7l!+V z`=E=X#)%epTghX&snE7^g4Yr-c!NE>lZnfdQ()^QWO zvd-f{0NDc3d#I2fIX{3N&CU!=ojP{eJbJ7cXi8snW-Al7ongHm1-;}l=6vgkgO=G} zul?|NRkoRx)oHcAkhJtZ{m$dsv*oVs&%#+$a;FEglXTQ}XrLkD$7fx_cD{0s!~K zVq9=(g262trr$1O=y1KhX0JHI;kJaXwD*)8tg;>U`Jw~hH(|!&Q^K_s#BzDKkl^RP zQppV9QeXp(!w=Q(e2!CS>LVEaXd*)ld&Q1rVyG0g46j@@2w4}QIy8S8Jsq!Z28rJI zOC}*dBjgxh-L!+G5c_gER&;!oX_tL`I01h4ZSr%s)6iAZ{k0RM+VgEnHe1xO4aTro zW3Djh?PoxDE*-R0LaA(-V{(`hLVWc!3L{;>nGgosSsXK-bYpqn-D!31`U~^Nv}>gZ&iwaOI6%7w#0;aAo$2J3q9rfTA@?*$V#8^IXN{EVM}t! zO~tZX1>wC*YT5$?{8raw^s2Jv`B95^7|m=ekWu5O>|}>8W)q=0AD6Hv>uN}WuR-_2_--CAFnRe zFLmZOQ7n^JMGF8MRRVs= zI_vVSvtMfXR5d`<`Dvv0w!f|*T9qDR3YRG=g z=%-Zf_!n_VMk_4P|0Q?ql)C6hmx+ps3Rx*?M4XK$_PhT58CPIZ+y$R7PnqggFaCB) z=vN&`Om9QeK%Ol0-rN~t#3@Hg3pNtquVVuae|IvbX4cNr2a*R#j1`~L#A3b>Wqpt zqq^y1*X9`+6Weofd@?;biaf$=KeU>&-x7Dc90!-Zhe=_{q{xwsdmPBdwp}+qM`}&& zjhxPYXz$9N6s=sRoL%NIQkB%4e^PPR zX-{Qui|b(6RXrD^1rLt%RDKWbeMI>X2xtMtVv!Q?wa$pM_zO_K^3Qy;w;7tZ!!!A+ zZDTyVZF4hG>oiUI$OB2WD0DzP&rf4}L*;`|)q^QkeIZ~3!hp26Ln=_$_7&Oq6m_ch z1+rJ%yS3wq=NP+A&Q%^qWSI?lpt{POqw7Q0r=s!BTYY9nmFAT;apS!j^IzG>P9`Ae zo>!Z%{i6H6ZU{;EA3O0KJ9&t)05p_P3o3pc()#@0ECA-U#cA>MiqVLP741%AVK$w% zCYa+8VUnDV4){RWuXx2OBxRjp9JoyY-l%+!T=7s2rOefss1fA!J6F1&2|xI$5ma6z z6Ga1IedRd^HFOubb6&E;9*Vo-_;go)+^_HdY6-^@JR;92kH=GCOr0aE^F>iQ1d8q6 z$?HP*y9aXIM>=;gGST~@oO9B$v#&oQ4EGxBRwT-_DvKMDIyxYh9edDB({`icXDXj# znN5$2W_^yWKs{;gr-(es1lTid-Y$T1W7rVk5#sUX`&Qd zx8Fl+KYI)mlm4|iun7+IEZS4?bB@X8_Wd7xon=&9+qSI}JU9e*4Uj6R;BFy=;O-8= zU4l!107p)O>cy{$?zw}FWLPjrKARkRT_V5Ck?9G5oXPPA-5 zjCe(dlsbU18QKOF)k#b;ujznx8vqJ;*{(c=$E-sn_ovf{l62W5?g``rt!;GM6{Z8N z+(={1ri*pDs(`*4O4lIG1b`pzq8?Y`v4h>%EMv(x$7S2buDw7mCHU!kNq2K!zuhbJ^Q{8L)ql5dP1}D1jWoe2}RaM3;yubv>X4!#&PEw zNbhw{rT`jLzH*08Z}$sJuYQG9`;tlHRI;vy%Da`;9V6)F##8Ej#I2&+#fXm_ng;R3 zV=$%Lt*s*ymrsP?I@$1!M)*$@dYT>gN3JpBBK?i)da@PNjYflj{G_SY%LG@kelC$Ax7J`c{3!G>rD~)`=23ZNt;-n*)BLZPG4;14j-?v0rv|&q3 zDZbgeZJ8>Vi+gMNOB_5by&999HUhCeX$@r*UI+Mu+>idmX!47<*)z9N(C#on`8t%l zVC|o4BOz`Sq38UF7TvIIKjGG1|@s^Rj2JnALM#`5U0f?vX`zJC2K+fVBzzQL9d! zwjhVEt%So<*~iUmPwF4TVXYcI@>17M(GwsadtPWo64dPGYSC2Bsw!&fszlqn_?SPX zw@@(It-^Eb)zvLIcQVeC>*QCQbFR`~x|d5K`9Eh=j)9m;z9?flS~(bQ_^JB{{1wtt z^j%=()<`u)hdXuZzEuMY@?N>BF|G&|h(D>Mb&Ic>F<`YzA-wIrNeC|Y z#Lj)?Fs6}9-=v+x=Pf(L(o@=7S(j=W`nZ?)UOYeX8N?BnUWAxYp1{?o0LSpSvtb_P zLsR{yA#N2woBZG|>h4Ir=-J0T^_3Bj>V-ClY-gVYWxurL{Y`Oc$=o2Ni0b#5{nGlllBbEV0WzYS z_6O{B9Q17iz~N1!-Yfz&PS5-s8dX1pmGxVX(`ScKl<%V=OXY5VXtxugZ9o9G%+f{5O-qR`D6!I(SdJ$Na@{SfxqLj_*# zw_Y6Ev{D;b%jE~lObmtH%#o((({w*L$w&G@K75q9nOSMl4(;L1wl_;L8SmmXM~g)O zo!13lh*I~6i`eppCR1n~_fD}M zF)<$7p~nu6^h}vPyat%C&IC_%Z3dG{FTscVN>ORD2U~%^WVQ085(Yj{sIbxoa*@$= zk?uYxn#1i0qSrx*Jdz|i8_v%JM6+AROxPrl!c zzIf#1m)KNoC!qD%n;kP&(a*wp2Wh@U9oc8%h}*8mI6xgn^Bu3)h%6DjXu}#9Y<2yn zG)z0a%6%`30&^mhz}|d$qn%miNzJU;K)17w!@o7WKC5&s(@AgQ!9SRu!m;@tBwc6dUG_s3+Xbfe@vyv2P<@W(&JE49ChNch5c+%Zya z9MU|g{u+Wy(Y#7?NEzk$iNxCZhWu&yJ5ap4Suh6Z$nmMlI7AW4q8=A`aN6J=Wq$YJ zwQBmV`mW>9aJsH?gjm>ZtU5HTit=X8c2!ji;N?C(`+DB**iW82@(r#;Z1jJ@WP^|qN#Dw^CKR0C~j?3M>?4cmRf zuZc?Yq&~QO+8FXxEY)dKqwA&gmg$-?d;#~iZ6}z*lP&?15`AnmyQvW$?OT~;{namI zb8dNP=G8Yl=q$|z|c^@}= z?G3Vb6GRM!CI|z5>qsHp>=boruia}0ZG7uB2}{HE&Om)5xxPXdSrrOAQH0tMK0xM+ zi$Tq3e;)^L;d4#JPhGZgX&cT^;k>J4;^Wqh<@0VOQQ#+w5pl!CF#p@VLuYE{nZ#g0 z7O?> z60Q}Xf6)0Zt!qQRbP_L&7$P(HA$R3*Ppi7nL&AdTxX8Ae?X+@9feMT#0#YmfZXV0n z4I!y_CB@1)lOT!Vo+1Nn#x2;PX_cC6Zz3=wt0KZZ9RPyKqhgDcVY70p0HbBqZz->K zDqp%$xL*uCjqx|3gb=Z&yZzBwChrmM61xyT{MZ;@-LNKvppUGQbGt}Id^mQkx%+B} zFknh=Vz<1B!_*VXp(`;nBAJ>ELr$gNh^?|ss-$Ust}7pZw4e zz*V_Eg67*U!?z|`b;0AuExQx;Zlr>fXfYc&nUOv1Z1C=RmLza@kaeJD$_w3S;IsQI zA^@5KHs+{(i6Qwa2@65GmBUq0SThL983**6SYv%5-okbd6j8qe$`gD)2`+6}v7hVgIdj+<1T517cijGgVnG-=Oa$x+CMcVcmK{~ zYtr{Le8%J5Yvil!ht5CGySUYt{=OULIUWWSR!etldpXj1G2o2z%5#+o%85JL?l;r| znj5NL)`8@g;tS`Vxm5?hlwTXka)h*NZtHfEQd4eLFIml=!R@@fOsEPj#$QvV3Ta;b zeBVWUBe_SNLbzVv64XG}hnLajH*G(8f@3{3p1v3<>TWEt({vpGSSZ`4m$`rA_mCKU zO(9UW8r*u2TsVg~X|2`tk<~E1RK25ws4m>UurfH!O@_&?X$vRykr_-=wug&KR!8&w zO9-7Vs&-Q-ipHeMQGV8+Y$5#kHnijPWxo{qJ(?4&5WlCoC!r~!H--=Eg{9{3jbd=( z(JSl_0eL64=kQgwOMIVXC3YeqdBoa$9 zSx=->AIM6%Dy*a#?aM?>(XWtYm2b37`eAW(lEAohq86)f;|g+{>i7!{ZL!x4Q_PD= z)+b^Eqlp&h2m&$W2Ah>PjzQ|cj6dZKYHQNDnZMHx1yBNOKWpXC$M?9$aTx|LY(?Sn zuy`v-_Y>Ke^~sVV<d8Zm6~`8YDR7AZe5-W zU#YQ2J`MyT#g1K~<3PpI95=kxkb&5~v>0J4zT8JrS-~Rr;53N|;RE}zR^RNu7XCNk z{A3!0=Q?I4 z{J{#Il$!eK>)fZlTFmJP?q+e~=oz8yM* zf6#y`JiHoy!lkxMmC%%R+r*7HSMr)e^a87>CsK5;lMNgH5B7^iTvt5b7Huh zM1B{i#Z#1|^24#@gt*7b@tw~=W!lD+X&lxl>wOq}ZJfQN=e#5X$f^ZtIp#A41OIIG zp`Q%jBQ-d(CE-pS1+Sc_SJ<>LN!i`2qjuYs=W^Nd@sg;WRI==a!Q)Kv^fq;9)!ADu ztX)^nrWV)CpSANZbo=%rZ|g5SKd@oFSH`NnQD@(ak$a8YC#{5~YYGncnd%%lM*Y1naMr5>kC}6lp>Qs=6Sfc~_6m4(g z_Oy3L&8wle!^VRmf^6**xYNrQoT0YTm6{-Qg6@Z7MR^b83ZBDfyTPwE5khVVZz0j;8qC zUW8c33KZ-trwkL?C;%$1yI< zeNWSD)_iOxtbW6`va%uKpgem)1x7z^GJ8*&Z3CW{O`53A2+qt>&2F_sw8D^oa@_@ z04zn@3ES4~Bd$}QJC`l>qiw!IGV9JAqk@B6xU26KH=&>V3uO_l*0~z zBeq`G4Zquf3X~7umU6$d=ZL6nBEQnh^)lF*!_Kn3b|u?7?5vdG>7`Ekw{9NHkfeBh z-!K;A_{Q8yHXC|mBc$e`Bkpo576Q8B;K%S!ceXRTMia8zg~rV9(<>a zvGAMp`N1s8l(f}JK1Wbs_C&W~QMHmge#^5)vnH< z;rU@c!rA-5lG~zwyup2Fq}W@r1bB~b#8y+tnRpflbL#ikYd_hqW88SIK?qi&ZSUP9 zmFp1RTN@N%kPZEMS%tXvwCn@QS(%%5+3UM3i`#bwT5JeUCU{BSUQd5zIy#21{w)P= z)TCIH{F|_meMr04&^KB*=9W4*No*Z74!2b*7x}nMF!QkRoesYFi!1gyz|CNQM-Scn z8M3hvv)Bnvj8J(7PSa?@Fe}*7$}$!L6Mi@(xHVeVQC++)FMeF7hsGn`_Y;0PmJ{?i zS8-60Ti1;X*8Dv~chmJ618k0+g#2Zx?u0Y%DSdKRO<^_Py!C7%@QxYT_QnPU;q_7H zbDS~T#GJA_2zJqnJq@9zDDC?3d!xY0s(bVCi#5a!cDGZqs*jo>NfZZ{Ncecp$}GEO z#usFmM#zK;E}MiteG9-cxgR#V(mC1x{w(oIz@E}(H6q&nyLclr0Dt4NJI=o0Z21yz z(d^O*A#g0!W%CX$@84d6s&#~aH-90@B)=9Yhd^AL85$bORa-cB49Y8^Rw{ZNQ|Rd}BUN%kw%mYV<)1u;aP295MW08+H=U+pg|~Q+nSl79SI#Hu z{5}Ufs$CWMAOU>DiGvE5Ba~nR()t8{Se*Z>X;DX!BJ%oW^8xjkufCUG$-^9EN5uui z@rMrA)aq!_WjDvY>kQUBX(mGeUfU^@1Hfox5vJYyNV5&mdKcXjk-MVnNe;2~F8)Zs zA0rBID|+P3k4~T&X`k}8Ijt5-*i6VqxX$ClAWp^wS}RU%r>-GUT^jFFxiR5^ z<%d!UpF}xqM9dS&{X)Znb5OrdN5|_i(DLf#xXns=ikee$y*Wy|(_KZO0yf@qx-?7E z*Mfq=80fQgY;h~m&qP*)X|8?)PQ$rrd398KGzV#PDbY<|ObBPB`ETIG#=_NIVnt<2 zULSe>_$e+ur~6zxhuMTXVAc@Xs&7b}24qijN~kvsu<-dV=p9kWY}ntjc4?X^ee@Fn zp)HUtll})MM`n6_PTtvwu&?#I#)=_`&8K1`{Wb|`J%Is4n`4den zC_nhTu~~4XQBF0aw+VkZ;+uDd zeCW|s!s2f^*b3ArDed;uE9*>WUqUNy6HQ+W28?tKDw2o4N6M(-dEG(@&-Y1vO5J*@ zW6MsQwwu{ezT?&1@g%!yU#-_M^Yi98RAp6P!rxt~$GtI@s7_$tzZXho4|Z|1juy-D z2?^hRrah~g*3-TroN$KYO! z6#_g@suZjx7nAFihHx^-F56Hyb^xby}|pwAe6{=KZvKUC=JtMSuk zA>Fmh-V8$<u#!NyGDuR4k^eEsXTuJ=_)yuNog7pVmwgM0bVEYws? z*+2+Asq;jO19m{?+c&ZA7U-I6ZrlcQVe+bFpE}u!$_8n3i;e&!Ng--$%p&oS~{raggI|n(pB@hQ@_Ah$A?tk?6azOm~CkQGYm&%UUqkMv|3Ch{y}8SUDqz9X`73vN$F9^WWCBJ|k+=Lc7>CsH;Z zTQ}GInyA%WG4H)i)CV>;8(;oc=M+LMLoQ!c^k&6Zv&f6S;?!pt5weOmts)PSU+L%`d9jZ5Mc32Job@(ww>HawDsS_@YRzmC|+Lt4rSqbIop=qy;(YV}V;dH$Ufy z`JekS%ZEQ#ZrTICl{!-OVLARlD`b)@Tvt1h5NniFD2ZqD>YbBb}}VRI@M_?RPk1+GLdsZW}8T3MlzhL3t;|mkjD^F|3lnB zNnc%_-{oj$^i+MkCCY_A+_+2E?UL8}<_fO4voYpAgR;WpIl&yI*F9_EzJt?SPxotn z?Ny`gDb<<#?*<*kF9=Hi<~!G)KSmnrUT(K8kF#Cwz4MC6nJE3RDV}@yyVosTJ%u>l z4fbkxTj^2Gcu=;E|OGAOOw+*aI0

    Gy0^bE(qcg-H7W-(hSbNL7`PT58OnCwZja%&JT2ZT7R?V8>xBI}-!b#S zy*cC4>&~PokV(4>Mlf4q-~$GCA<0q;aJzID7g1>+$-AU|d|(WbcO{>Jwn*>IWt7@D zg|4CAiIQ3C#QbafdzP+EGa%i@nF65-4q0^1w&lVuCAj?I=gUW)cgaX~w__ap&8`6~ zzUP8Y2Om*T=4CNnCK#hx_8DhcMCv1MLq~mml51~DN|hi zUiF_YiOr%g7Yo_He8TH1Xlw82d~h97ASiVUx&~tjJu((+Rinm>M&+Qj7|5Z+r4T)5 zOvG`rr_q*Ab(-ImTJ!20;ucqtaa}JUn*IkO#AZk6sgMW;&2e580ahyb2cifpKYJrc z<;_mFf@?RbjKA`YK9X+bY*~B_V(WPmlh4HXj@%WqwYA@ z`;<>)A2A!hKlQT)&}iZ#y-a-h58+5=fYLVQ@dsOtkSYH49v^k2BDO(snalvruz=M6 z^GzzcU#HHWTwFyO1F(Gt!f=x*_D%r`-6>06|4Rl%#6}7o;u_DE!s-4K8|>LMY$5^k zHy|k&2TB`7^h5bi?4MXsoaqsUH;8nmYW!yr{q;|o2LxOPsw#`faHZZHdm&)FN7T`LOUG9?RN&h{o_n(LK zZ+z(J3!D!JUj&Hb{$C&Xx6l9ilWY`)2yys1puB+n|0vV{@r};!5klc|*=xS9_5Z(6 z)_?ylKX?+qtmGrXM`saQY+2~b4EO$~m+S~Ko?Yl#(dDA)m9#$qKGVJ4Jtj4k;=Zu3 z^!UJ9SxE^OgPc!={OV|lw8PF;j|GEL$bVpPFfTu!Y4c}2P)=$HFc|D>oiGT%(Aiwx9W@E^YMZGf!BNIq)D#%}RH`34D(F%I;&)WDW z4mmvl=YtCf^38YBQY@%x0&}C&ghC5w27OW?A5$l;T@H&ie`|@yM2Ilh?F57i5 zH>ZKmGS61COM1h~U`|DwQ1wv`VnRTusCKMZ<#~h3Qh5e(KW9d0Ql z9RNE}lRM!Bg(x{WG8Su7&}Zj3@pCB~t8%IyFG@}>-VXA8BYdyqmJU`yyj z>x;~*DbmS4W_71Iw6e|NYgw)|v68t{qtjb=5C(RS#IaO@-|OuXRVGI5+oN{m)_1pi z)mFQeBf^#5)A)AbWUNsocQ+6*jnQ7(Q>wSz{S|>ZdOQ#8W@8k$Ceh7(&WHuiEH1yuQ ztH)~{UXHkb_A2odODgRj7`AOCJ@KgFm_n<p#anDLsTLnlK0zG*K zY@H+<;@zdZ6~K7;ABz-IjH&`I*mn|&Mfz5=sBW%T>gQgRQuWtD-hClU1p2Hm&BOaC z(83v|i%W98pQXKv&mS0@)02!-8{=4V=XFiOE#@z5z(J~9us3(W{`OWUYvq`99RoiWqlKiyyzpockWMosy4w~{S~eUu|$?e zSG$2dRqMf{BPb|8=cD?GCdD8jp7tt^ z&AnEe&mtsHaCs7nS;{4hWzoiA!+ht~EWJX(8HEqmdX3-Ae<$W~J7?DLQ5PV)^Y9Q{ zew&>nR0=cMOKISQ)WFH6@O&$Fa1P zW-G5cxb=DO&0sRuiw%2Py$4!7=q<+Sqf&&u&Up|;;ae(!Fv=Odll|WheOTZh5*!ynD)^dt`jat6~E*nLew|?W^zZnZ4rC9@ZTu8(-YT$NKp|te- zz2XH6eg4JyHWJ!ESHpm)O14RyIFj_NNsdp}n;b!lI47Bci_#Xd&)o}`YXbH_ucw9( zY9<*I-^gh$WS!dHHnctL*(_L$a3|%Z7CRYqKQAoocwx-sI{FzKF&A)ZNEd%GVJR7K zhv8Wl`V6RjB`$3ZRl5~QsMT~q;6X=uCR4NL^Y(cO?N7f(=qjYc?cscPM#}QwMTtX% zPa+=G`h0iJ8dBGekFjRv2O!-@X(ei{r3B#gpQTNB9hVd}v-4?rF~x&b8O#ud_#C;! zNv6X10Rrcl2bd)giPA5XON_NBu~1`E3BAyle@xOd8BEemu@~k1>p9MIDe&7KK`zXZ z5e_}3(0o$rDXY;C1_#GNeUar;GaRphv&W(OfEOA#Ri3|C=k#i$sMd6>sCFg881Tbl z5jyrtWOJZNahNAQrvbU36aVLqhl~Vgqi$cnY;jX>Su4CKc&OL%I->%vB({tyG+U2R zE{dIW$sWHKYrWtA=^L!yWhZHEXi%nqGEmMa z`xx!<&@+4wZLk!n{obujo7^QPS^yHF0b}W8gh7AbI(#BLim)A8bGw>~j2r^`P~chM zpsF&|>gBuh4$$PeT-lmnAY27;Ho8>uSL@L0Yx!lu#(pA&GwC4rrK?40Q3gCM6{;y; zjnT%MEQjBq`A@%}c1~F%W06>$m#Y`4H2zv#^W&AC1BuR!aVovVK>>kJX{d3Y^-7E2 zz02gxw*wF4ta4e?pLSJ>N3h`JD!^8i(SDTNUyD21IVO`_ znk}XV!vIWl?CSt29!6>tqg@uv-+5?}dvVV?|M9ug>+VtfA3Xh)AbrCZI{T!(9(0up ziku}?6M@4!FmPKya1zEQREzrsDrG*Y8}<`1ZrE zM?UF&G*;q=IZe%EpO=C+D`o-ZO4#buVa%%)Y+5k|7S=;Ckz+>G!3|SWGm7k>1Gf5+ z$7>1oQMH;>x6Aq`q{5|KJN?_PgB=;cNapEYfk1 zTu+KR?!3vL!@Ch%M36v4#Dc0rvbz^HXMHatf@6-RDX70aG^{>)(4BD|_GRXAH=-95 zmgWL5%QmrXuf2FYz*ji~?!au`4F!lsL|&DKVhbd!cCFXDsxd|mvV3RIc#DUi3u=pO zSX4I~Cht<$*%I=B(GhxFA=80TN#yo&Eg;L&yIa|pilmJ-17;Y8)4^z^P#XJ(Kba)y zryFz3Ds~0yH@;X`#fXcL74Ij(?#L{C%W1!jk4F*6VWjf#w^zA)dzSuyMKsXjgG!qV zq9c<8Jc}jX7*onMP0kckB&OKwC^0*Ra46M}--!A-jsp^b(R}56GY71Kwd4Ma6>9}! z{(0HZv@*(kN5qZi17Y3QU}EmJ%ql^Sa5)K$=dUU|7y}}%xju)e3ieJ-PZgZD?LM5C zy)T&$A3@C|8kll|3}>-szE_OMFyix>cR0WQP1uptt@FPeChn-=Yydys49TJXEgrVU zInLi*TLv6hhi@jN6;3lvl~%DCQ&N%NHtD+I=GGeHZc$6 z8{@!Oe!RK*l-cL`8{$L|gx0zI8~4esmS&un2vza_l`0YSCsks-8v*=xszg`T#~J}k z)ry^aCEiCkAXOq9`d6w%(7#e8z<;MoWS^{KUk~g}Wzwfg!6C~_h}^c7oXF@g9&4~-n@QDyt8!yj@nuOG~B#yP={FN&K=kVfx=1Qn9X92kq@Wb{8y?(C|VF!HhQ?Wvc6_FtTrj#c|1?<3_h~j&E z5Nf3+6Y7kueP9sRVt2Wf0W;pWhVsEKyrVn*Ry{mx?Zzrkja?vC!Zc#|S6jbv2-0-` zrO?MbBSJK2tiU4`j~6v4j1#{s>hQL(RPj`?!0HkPXN`}s***I&SQLSqqnDgLY*={T z`T>=k?LvD`e#I>npUJKQ6wP5fQcf$OD5n;(Aof5ujlbGWA|cYDZ)AiUM#=u(gxzS? zesB1D50cvCTR&&mDMQ}9sV|2%;lx_6-vjdKtYW4G`iy38?e*F5BlaeI)79=m1XuYw z7TVHwibGBXo8})9wwEC4L9?8*9@}{_@tU zL^&j>%=|O#P1>DFtB`jdy4ttrQRmWh-Tqgc!cW;GAS(6+_uoTnY_}EbMCD8zlACkV zg3c{I?kCz5X*e73btSs3;cS|b;inve%(_g=fVa&Z z&UH*Eayd~NkQ)z_=ay|R|I;luCnhy{5A5#vR^lX9;_(K-`w>dmZq@%Svakk-kq^n% znL67lG1GVR^Er)98^;MI&sof?*Rqo&naE=O&xVzc^G}T`RDF)g{*1PNLX5Fb{79d- zZfk%{@1JZ1h1cta|$!sBV-K(Bt2!Y?F|8B>8>iK-f%cuwoLRb0jmx4xsNVrPMTlPr^#NjyG zk!xhLb7Wn6tHm4iNqH?{Ovm(PELba}2y%X=Q{&@j^vmmluO`T%oXk?BzUb?~dW!E_ zNmo-$;ow(i8PHA-Y9Yz|Ww(HdNlf-Mi3{_K{{Ge#Ek*c3FiKlPVl~F9fd)%A*X@FL^S%hSAm;k(`-^_ zoJF3pYP$Ze7ihXTS0>yYz8+rHX1T}vvu>$4)hepMfMy(7vGcVz<5#MM9r(5}NhZ8s zbqR|-M%)CwW&-xYs<5N$g-1uH54^&4n@ZK6G4>Mx@bq1My9M1HTfTHU9(HaJKl5E3^%SL3uoBLrpgeO z)6W#UF8@lE-lvugCZ9;hFI3BC)zkg*5YU(_ynIji=?ZQO>J}%>9n~j|YiHVLn%ya8 z{k+=#^lihzy0ofyguwlRl;4J0kPpS+OE{+M2rV!3r5%0m7l3H==jvvm|1)d-)p)!`J3&msYvoQ)(8-=)uHhhhB z{B{5<$;pZ-j)yTd+CkKY4{b7?#q<=ST#U1IyfqP3?IJHKZwI6XK0wypcrB=Uvvc=3f@AzY@De2u!@z$He@c9RvMhSS%a&2lxK)c(3 zGA$)reIb>Pko_XYkZ&688W3Y{PvN(Aps6S%w0STjC+}IwqCqhT?h0#ctwQXS-i~%_ zaJvYb(le^#&dyUOXF&nh+EusH(ZI%u+*)tV^y+Pvr!_U4W&xAQ4O(Stc}s=agz5O{ zx1y`!?db~k?`TZEvj+2xDY4vSXp$i0;@v6UT2`I$H7rNuef;xinInjT29jhV%L!bJ z`vBbcl>igm+t9d30T(o2EGMe5)P{?UnrP&b9pp&rzSc54E*z;V1p1u*?wt$+)7(x$ zR%s7-DPDgFts!xKaG)S~PU)TU2!)`^sD%!#sWcDNbo|y?!ur3k!xpOGyqSa=C z3KiH|T>7jfRbJjHnNrw%`M%WOLrQ~<+Rd>J?A0}pYg@!2aM_n>&H>5UD2!t29?ufEo!nl74dTe+E( zW+Y!4zSLL^GRzJuw!GuTWUKtj4{HynbWho`Q8>xHHG1+rOcFjfl;-V@1_f>t z77Iue1XqJwM^g4c#ld)l5Xb6I3tS|>QS1I3>J#4}TG64;hX5Xu&jff3gSU~LlJm$U#3%vNKp`UA%Fmw_~nr-&b7}Yc6ZFuDutJ9{0h34A`ygRTZt>b1_i-_F_q%y10#KMbVSQ( zs8is%w;hGb*z(a%GAFLx&j-@dOsu#q=9O|&mwsDVqJl$dx6r{7Y#llYqKZiS#B~+l zXr#jQ4*WWlAWKz^btAgDc+lDwYetjI!`yS}KH1Z~k~plufM^g3S>L@i-Er zG~JcjzJoyR<1YZ(^|7jG3xnh{b3{>-MWidh&1@^Af8JxB)^s`kFtK~hCz_3ZhM1kJ z$1~yH35OEXH&PTcaEd5GJ-}}Tmvr_{@3+eL-hl$_S|2V+{vl?KO{yTo9U?Wx$#Rgu zLv9g^TT^)yri)OzueA2QnFcx%XIT`SO4|c&NMqR#-tFI&I!oJm@qD|&2$@3AtmxfQ zF-#l6U|P55Tq$=N%pgqRn@!`}i51&lkgXwA_i##nHS*>fiAJJ|rY7pu_vKi~>BnQ4 z-8K{y@`kQw&6aU--967;+L7uZwD6ogU7ryY-a@nx4Atq^wi#V* ze|@Uw5>RV0NN^MKD*K=N*njRdM@?|dje*|cZD*=w_B+Lr-{9|vxmsKmgX|iko0vPf zMov^E$SxORF9N(8!pM3`vJ!bpWU>51z+fR-H!mLQ2pT;i@dH8)!LF!`HTknloo0uk zu=kJ8p;K%K?i>|?(yuA2p|UTx;b%0;N-WSB?N|TP?RSIHu4puGc2vlV#J#-YV?Wz# z3gNhh2y$6^G(zNnS#{gRETa`X*u(dW6?^_8RaSYnId7un&Sni9JPBg)CHHoSWc^sc zc&XRnEq1N*n#n>nGFTBi`UPs~k@0&+a!aN{SFG&o3av#Iy9K%H=pRZRWr*){WbaWl zl-t>mRd{UCdwZF(=LZ}w72-sQlZ6W9<)QD3O(i7D;6Hw{c=n8)N%Ebj3Zp|qd1hWX zzKi_`vVRmiFm#7nmZ%-KeFVK^tXHVD6$`HLY$|?@B+s%4Wt}>z(9k@s4rj{3*L@v1 z*qZFaJdNf#w^suCgKCN*vI&2>{q|p==3C7Je|y~CI*l4H-leFp`;7I@(Y) zk|Vgsn$uSAwU9^TAvNPyw9cRk-Il3-Hl9pX2n59@_I~^Q2E*M#+ zb2DglMq(ZAIqMMa2algbJa0&Q=SKoc6nVKJ!2K{5$jN8j><_FCz`1ohP0f8%o|Hc6 z>llW=n|FgqE&5^jDxD9A1XfdIv?l;lIy(2-)kSW@>CG=_R|%y%F&0=;TtHla*g}SQ zw~pEZi$Ca~N4U}1_Ub;7kPdiRL#IiYM{qWd*UwlK2%Ur;zL3C^I)AiFQu)vQUPN7- zYApP838cyBy2ABl^!lo_PruCvbAD_|C@~y8Zt8J0*f}`Y>ZR8&Svc>`@QJoc#_RuS7dscI)uyVOS<|yBQ6>Us zfku|BUWRLh1yW`so}h6FKom!?XV$DrAUxJ~38B>mRmY8aKw!iJVBH3t!*CX(Z1^IO zNMB=2EWdsBbl64_o-CO>Vx~kHZm{djf|j^Dh?`B~04})L5cPc+6~1rqzE+DVxoYL& ztWx^yheO30%di48Jb^iH4+nu9-Kr31X<)`Gub(S?<$lqk-^0Rvwl+B1U^iOW2<0M~ z@27`MN)}!chN@@&7u^|6{8!`73Z|ks@apE*0WhS2{1h}o2bnAaG}6fzaiR)zCYRuh zb;*}1T8wSjFLOP5heevP4X&{m7wKa0T!1j!D&g zuDeyS*e(pt_pH6+kxiLG z%$%`8|3#dNwnSYL@!OK6^r3R+3LjwC;<_P*5wWRgN`1xx33eP_;y^r1D$z)~#DIVf zw5}gvfd4r{v=lI^Ko%blh!8B$xykR|40we!SiwJ5hq;-cR(4IqZ>jR;r@DLps{19N zLHYF}boGWyX$`Rn6g-C5`ZXc>)rC>_5oHBVby3cn-C_b!2kyq%t2A@!0MYu*+vgP+~2B`eFlCny7T?qAV*!2@mlR8huI7|3V zM8gsm??%6jW_cInm=qgVfG4eHBDxTB*gCQOwKFe>ncTnuUfrNf%96W$98pbhejtD8 zMv~1y5X*=V2%Ee3JQY<8STcnV%B-fG{WmjG7|Rfy8jUCp>OK$SdIo(A8lv^1fs0k& z&4Est0+iIo=hWx_RQ8ooacxVt3GVJLjY9|$+}*Va4gmrLf?MMR2=3CjJB^3n8ZZTcvj6Nc_K(%O*REP?)vQ^wICJ4=N=u5g>?sW~zx9xy&o%bB zX!oIq&rM*9KhKy&V(M{US%Hkg9kqh%s?hNdCUZHGK3LWUb1*C@|BH;N#L1qQqMOW? zYMXEoJxwKkHvz78*muZu=C7*NyOCj~e-yhN37H{is*6;|mqdb9VU!3zH*e~Q&>Uqv z+nuEHK5WLJrqJVBzRhyRo(*aLiL9I)0~0qnj@G6$EEPEZbHhIV=1IE#zIVhwFIfXd zwj&vBU_QTc9hXma(bo40esw0Sw+=Dh@OlH!=_IJJ^x<_Ev0>=ZN{3<0>(Xo&h>J9G zF~B_It2V$3OFir{dw@851@!1P@J7DLyW6jbFjZmR3tSnbe}95aytUW0*PX3cOEZI z9c|Lv78w5l06+?S!U+sVqnLiz=IBVcl_M%qj=1O2X!R+TZV<5>U;<>L5D#50E&d(b zayh@9aarx1dh~1aP53JU0UuC`2C1O~X{0z@xD9x@t@N8v5S}fuq2>CgpENk15k43P z>oKZ*&koMDkoQleHF4{_4MN{1F$tggi}1MLLTs?ywEu7{YKu4~fRp*#@lKNXb=2gGogb*^g;LAVT=B0hdkR{5cZpnfxpOzb#P4S97(4i*g7mn z49uJQAZo^>U3zBb7w=Fg6i_7cydiRlKh&MUejcZUyUz{iP$| zyTmnaKyfH2$NJLY=X4gkW zA>M4rp4Oj5q>%z+rSb|2g3g*uamO8Q#_vU6ueg{J_- zRx-gx6eB1p_kX1r0{3tf%-IrP{Kh)G%B=STHchZzGr?Z#&bp~OeS`!A%oUk1+ylj+ z!wR!Xzp1e|S<{%ZYY#BnZV96f{!h}}|E@7f7^s%tUB~@#?Egc?l41`9K(+TO?fxIq zlYjqM|0RC8X9r_0s#$-=?T!2UasD5ej(@!_vIv}9plf&3m)HOMJO3XB2pkKQaIv;1 zQ$9=THaa=P%c3Gt$+!`CyU+q}o1UGm`fzhxcyMrl#mvzu3ji=`IH)J|n4RbmVXfI9uLVaK zLoWZWxLDGQNH+y6TP`S={D&hhQERG&GPAr6k^upo0BjwMZ`G943#XSTlq^Fe=tkHV zn!E?GY$2%j+BU6Ql`2YV%Jr7@ospKEV!$09v^H&96Dr)xMGbvfA7v#eq8)90B>bdA zyUX6ug>Nq4&cUsS?=>yy2e;niDOFs@hPdhbdB{UUL%f@f!YE476#);2SgZ{VX>}HZ zlQ5mSzg$X#V`StT;fT%72&X0?tO?Yot?&f5qFE9Mtp?SFa|SNwcs_MYK(@L3 zM{ew|HrQ$qxa2eNVSOU7a=5|Xm+fsZtizz$4Yi}^j+g%(@<>7on`&5e;1EpTPPI1_ z!^$M-(=tE#-W^OjY9NL@c$>=gii_8KiN=pz5h6x!)ufRA1hQ*}_)`~5X zNVf)C%XnJp{rGZi8v60YTv21WUPJ;j-<)ffrp+YdoZUhcxSgzn20kG%gH4UcPb zn$e`6Qkxgo6|AVHL-u~fZPR)EXK$CPG%{vM*Q$Hi@rI|57rPnaPFuY= za9#yQaJ+DF-PD5K{OKKe@OHjGno=t*`T}pBdcssQ&psO?7D^2K$B;we1s)<~f|KSI4Nq$|zJ@l~jIWj78e|?Y|lhv#k zCaLf`HTUI;^*~dvY=G@5kgGD9#Q(Tve?j~)_U3I@9;`umN~ECbP_=RLl7@OnWu?GV zQIgHp*}B`IF$n1{X@i|ZmyqffbOe*L>a!UvuZR=76Oaj{KbH63eEgUEre}0@MGezc zg;*s9`8zv-n8*H_y#Ygq0uIAQX8*$Snue2!9OXr6*3yX!vlX8Iw8Tqx3&Y{+=3M-D z_T;Z9#r3a4Lh-O1LjJf6*tkwWsxa$moj=qv%)#*`&AOl`%E6Y@@)k^rc)|v0tc<*G-f@ zqy03zZlnZK!|K=KlNDs~2UtF5@;3e|M_;O0Y0SRyt&cdE7eanh{=r=bhRJSYk>M1q z!KzLuzB8K{aQHcOnWc#A=tG^X)!hKbc2mgA)d9Luf2q{6g@GriD>e)qC3=(1aYqJI+%{VZ%O%N(aC9^e2OQj~Jz?nt+$Qi=FC+(_&Z@^n1Os zukCT;bmB)6)0E+9rz$xNh_?C5d{jvS;aaccv)Y{%Pu16gJ$?w!$3(n@(KChD|VK$svJrrHbSHH1P~1r z*@EeI3QJsIn@`g>q#&Q(4C9xs4z}0h5TPmLpHf6pczAe;wWl~luN(5%3*H+ow0@ua z!%C=m1Qg~mwmSH}Ahsz$*-+(fGecw}%hnL3434eRkOZ>ACBA|)LlyqG3PsrU8xI&C z9qrnFT*v;i4;3BH^?C6b7u{WgzB%(feeri>7%nf~dloz>+@5H^Xxxy{WGr)d3l`VGp_Ke-_ToQx!aF?Ux5uJQ{2S>uS8N%#F-9=7 zi`Tl~9Yu=Wh_gQ9savzN7nUcn$80L;Vt^n4Ym^L$cmy6gtSLBsfbb;PYJfqXO&i40LXKxJe zueH8}9aef|B{wAPzOnc1z5+ndr3HD)821)hm+r|T^_q3h@jm_l1nV}zsgwOKB|be+ z25NImEwzo+8_%PXGT0;k#TJ=0GHvOU_>kVIY?4b-JuBJFYe79^=p^NUO!i&k>>*R) z<-4a02F0R4=JIRW#8n;UWvl7;R}Q3Kw2TZo0$a4)wcOV}q+W_8Igj41SPmo)33S<3 zxsbl9$?y9mxey%U$(Q1ml3d?G)#?a%_6+E!_fj?`!;8~IUs6ZCZFuL2=VvT+t<1?{ zVtVYe70>@;VWAP-gT0BA+x6ARU3XLHknKt^`}WR@o%=rgI5QW6qQT~Q`&pySn;PKt zpn86?L8DTTRA26|i$=^QF#$G1j@B)_cdqw>tb#14Cb;r5_2VxC9Q5F?_D!qanL?4f zS237}Gc&q=N~2N{H6HCe&+QIX-rMRm9*Ml&0nbZb2Z?v+K&)Y+x~~&rp3RkVQrFWg ztCvs zQs#4VkHs(h4ygxZ%QX%3J$YZ{uLjMIux7v>r;n(-c62}yG~rv|x;a|hZKQQ>2EF*J z7S$HbpDu|S>juRvHiVoSmU;BYezRso$(%ipKG!GbaVZiD7Zb@heD-Utej3uuEae`XXEbe! z6YzXn;g^V0MMIS_!bXrZ&8zP?0761V^$skGCMjX{IPt*V9{!Twl5u4d@%v{RcDwk7 z7BVEZkuM&f-YqxAa@#wWHy6}v?r*mk_>mL;1z=R>vbV^IG+d~b*ndSFW=cEd1E8ly zx;fLg>A6e%zHz4+_yeJh)>`j`m1xLzjLk+NWn8^j-87fB<1*?+25>#e7(FrgC+~<5 zm1;wU8}f1F;my3cU#C^USmIYCWjjq#jnGNG(DwD~B7SU96M-)53gPe6lWR8GGegkE zX0*hd*h79y=x32?(l39$Qb!c0WY`ksW+1tkMRy{F=GAsk9SQg?k$_*VX8c+3UJ3w5 z7A0wlwZKE2EZ4%<@BPb!U3kA7ZMpV+v%C^ilG3lA0^CGmBS(K+j77Fm%PnxGnV8ft zSO`(s^pDlVE_h2?#H`$wK(Y0%@(T!aMk=2UlL6o2{3x~4bkAh*gZ-gIV|=&buLIJ+ z-bpEt-xK-$>^-Jx3Kq zn3aoGQ?*CnY4ZU@>r@74Mqi;0mbje=`^r#P-F%fKWOHOXY=OFy+?mUYlsf%(@?^?rd| zB4$9?_I7zXDf{>z#kILc9_=Wkv7?H-+HN<}YX&eaXysYG^=NJ+z@QLogUX_4Q_TEG zj!qo+^kKf)T0x8gdA-hJR;{U>Xh>e}y;X_H1vdVrg?!B9>XarDm)=*h*)IELk2X!X zN(x8^yXW!aF6eXuL5=li*{ER?1RmL!VMQeXpGghjhoA!H(6AQqXl4z64gAEy!LM9$ zJLg8q)UBS~>X!F$4dyd7(>-43Fh5w_Eo*Y#X|uZHXgYunYI?37i91P*X9n1f&+drs zdqjfu+JpBFuWnZPS`Ii){WA~Ed~&@X!0!LiyZrw0E<6s5DuO+$M}LJfEm3&=sGH?A z&Lta{SOfW!1Qn-x^m}`hBLk z!~^(5O+xB5-C7!!J?st4%)2^KLK=*mnm5|TU5GEzuAX@MrF@Qv`cr6dOp2H}T&k&c z7q|kQGR7EqGmq0WgnZp%U&ZrInU8MJ?5DeS={c zu`AVzisOExF@Q-u?%KMdUmfD@*??f*Em8X*%h3fxct!C{4@wSI14;Z~FWasnF&h4S zl}*7J;c+l&>CT5bC7ExjP|CvRIYkk8F-Nqifi^&OUm+P(OAm~NMJL(klR5s%8ABe; zYw+!%vZFx(YPvll;sL5>M=kq;ZVE>jnRRRHRzb+HVPqM#f?kTFxB0d<05pM3`~x*pv`~JI#9&ZkRHz~FoK6m`ePE`;%PQA1xAS`O^Ya_^ z-F(d9T=dwv{j_Fxe^9l(>l_#F`S40+ld-OjeI*7-+s>N zMrczx9kV^rhDj#>E}Z-APh1@n&FWlKyhx1uiFk@LPDNr{Bft1v^=6MMjabq1X|(HQ z$EVCUvje?l8V~(7@4daxs{GIEqZ;^=K!Z1wH2yv~e4F3g7C1Z|`!-&P4_6n;P|@%N zp3NR8t{KMwODR9BZ4S1EAaMQ?Fa&?<&FsjIua`+eJJXZ%-acsY;d|-Rh;z^v8X6Q7 zxWCf6B}}P0{QHP(SaO!s~1OP1w?t3MfDm zh%=<{oBKdLdwL}2kya4cg3MqevGVrQ%F2Nz(fk-^LEZV`Wu$T2JFni&=u?!@b<&bP zqZeL)L!cpf;xPN=K%dA^W^9Vpa_s8FmP>{N!t^!=ifHF)X5}D^$*(F|dpM_s1(l&B_wwf%mx2>e zz5x;uzYNxp0F2#_JtfA=37e7Bp^SM>Cgmi#2i$l$p>tXa9#%oghf#?jRSmWN;BbX8 zayXZ2t}kvDrU1n$iQ$I#Gi9Nh9^?9-?W>gba#6doZr>D9KcEvof^R(mb)*gM4YpW# zGZ>I&62Y;{>MntR9E0oc3y&*SPshigmy(F|A2eGe7K70<47lmc3|_q-hFI=h;`eCR z3pyN}u*W6Jc?KL!eJ2k3E9lC6rA1!83fN=CW)*aG6yzE!WzywW5y~cx*|6_xUJ-+FJa}iF#@?H% z2j!&!^V_~uUKh?)tV+S$E@OAQ6~4?Xo!1Qkn&RF^xU;K+!XC};na-R+bTQ-H${1pC zyWGGzF?qmHmT-pQyK*EDeqkuyFVs%K&JHJMU3_34of$9eVc&f)m|69n<1U{l3BW&Vg%>Ynpb+017q62Rx>$xwV`X1m_M7>=Hl1zB zTwD+A{SeTBt3#7VGTSR0!itWkHQsgRBioe-0DL5U*N}ax)3MI}9D%UQFDX)&Jkx(k zT&B1=aH!7nGc)y1JLFHb&oTp?-C}nC)X}*H;Krf(`SSi24CCJ$BoAF7DL<#{wV8D& zV|3nLL>bugypr*AP^%V-y!fhK>8YhUlnWV%dwqw}KXlUJqhsTh-b&2yy8P$e?@Ovf z2P2*m)wab>-+J>Q-V`F=y*hcF=PloU`}%QV>dn-ORMWe{eKm1)In04wR~D2qba5_2 zjx=F$bGSjPC0LK}%E1!6;|J!`0g`CB*7|4E42|NCQR+#C%Efah;})qQ!x~@wN$Qj zE+>MQD(Aeld}3Vc617D*Bfq~n7=%)erM}{c?eWL(?(-)Zb_KAL+49&D&uv3HYGO{7 z^=Q7dx3;`?J!Gjv*HUFf7fA1gN|9*jyP{x13QpT+TH>bKZYLQSVKIV1cW94f*&i7r z;sZJkS9M%nlR!-e-R&J_?X17}SS3k(pm!*ltW1(Z?EK@r6C+u)f+rwWKN4PZYp;rk z&B2N%|F3b2bNcxKBlFSR@zWTI{L$bF|6&8DWw{uV@-F=P4?`b1F4-SpX6O+=65x4` z*s#+eUY^clRjw?#g%P|uvb(MW#G?ChfE;SyR`29(Ez)NfPt3mRjx)Y=6LkFgN$0%> z1E| zD+*cJi}-e%@i`wws1!ZW;{M&plSWxW_qjJT&2K%War0!Q-ViMKv1&i=BfgH*%-MJa zQp$;n78V-U`X!>Bu5~#|vc0R*-tT zKnWLQ!o~<&NQz_685mXf0rl#sL2wDbsZomwuB5?=Q9RUBj2R;eyTqe)s_DFi(>Ai{ zg7}PFYU8A=@_3I*X$j1Q-l4^n-egziESyUb9|PLRcx{3rt#fl|P5^Atago-#0J^F- zVb^=;iBItBgS~i`2`lKKYjlzpbVyr@Lg8$V8v8O`xlYusHCPm=+ju*`hH+4r&%?e$ zHUe7VOyL-sN3^~0X3b_IcN($cIQA0?3hNgVzq)a9=d%DaH`>#7%%DoB>?9CBuHo8v z_#V-QtTrNIy;pr<=ZUnegGo;7{cy$ANj7uMpukpg|H$Z&roMis zOVpCs$eoVR3?D#>L>TlCYc>x7io?Zq3W{PvK32DGDD{8zRu3NXmL{7J4lSM{x#Uc% zNL~7~)2@7t^ho-e`yfig?^j4-1Ql%uX~LQa6@q?%h{b64({ap?XR`~E zUg@PQwf@q~tMsujPUTyU`Ai0yeZyFi^eXl`{)e34!uaJ1$_}xtI_p4!MW^**^y2cQ z#}cee?f$%1eE))L{O7QB&Ywk4cW^1wctZLn>3THu#69;e`s85WR2)CKeO>Ke=0!w6 z)ctv1XLUDVcJF+@ciW-2GvmOvV|iyh{jU{F`9H2;d!%r$(J1iH?A#=^)3dNpTp}4wUtGQR~sm=Y4YN4qFf$!wgk7W3k=3C;`9#9*nfo#|xrF?EAMyQz>>G!1o&uPu|5*9(ij-NU1a8nFKSccMI}X6lJli z`g#%aW8T8X=F3;a#sNgcVn;zqAmwGh%s#55V+3Tm%orKQpjr9>;TD*-tq!?uRT(T# z%>Eo-FhPiLJlMwvX@=58c;!goE6m?Q5Ccl49||; zeHID9n3%D&4(Egqy-%Ep(OA9)=#d>|f}ZQ|rwc}AV0Qfg}mJEG)@0GKK;BJdug3r z@u2q__t~18;rP zfRe-GMWaRncqZZh0V?avH_05m%o5HM;WguNC@I7$cT-wVw^(MVUeA~{(H?OyD9{sn z>;3`KY2}(XN8{YGGhSoU6Lst;mswQw(fzT{+3&VpkoZ%G8m&kUhKHq8R&R; zSh>OFBMt$->BF$D&g-A?ooB`7cf13QL2w;_^y^LPr5X=OT__InKF{<|(cNDm--Im9mu@pC*e443M{w#pccGhM;LX_?HBnIbm%LO3 zb1S68Q&Lu{kMK!DU>|6C^bo!usv{1;^;;!tuhOjfT{h#5;TqzUA-ae@znC1ECQ}~O z4+wVXt+aD0*`W%RvO?k(@6`(;C!8SDLET)gZ%1ccAUS9UOl`)Zb{;^ z`mPi_9*LUhbKTq+h&$gLI0$mc$7A%{@brr*=Z35Vy!|d8!(&%=sHyYaG~p}VkHSeY zAZBx^ZDf?r7zHDf&s%GNQNprJx_DX8V-U0v2Ekn^Qi``KqbG9eJYc)A_*pN;eoUb? zS1ARQvT9HryJf1I^&SI}aNGKhRGud6oincbY?xWzh25vnpp%x0qQb0vEkF^%u#2h5 zp)rJbMfe#1rOIc0|9me=z58@q=$-q;N6-rkGeM)kcEhg$gm!qnl<=Kn`8Y~#!+f9@ z=8%Kq@3+rUQ8lxG$nzOm+WL!h>uEJWy6W`0SOIlkjKZH?^{lcn5KzdT`L4k$GY*A8 zzXt&89p@Fi-AVGi*iuFGlu-s!T1Y=lV9zyv%2N8eii!D~*wimZELrh9x9T=TNYD3e z9vg&G4=y~gH#`Q-E*)6}0nD1VXj(dTCLp=ngp&?S?lPkE}p;=L8wJoT~Wa>@ER6;JJkZh$jKGF79$fMjVDo_O6Y*pD12wEk<$%T7hB zU^qk2IJ14$J7y)=gQ7SF_ZO!Uz1#}s4hcNQzA}DTsxN(lhe528~F{hZI6=9>~k5*^4J zL@xNtmPiDc*h@atO5Bv%*kX@3!Jj#*fk;ln?x)(}qV);9VDuErnH z8dz>PVo+E5HQNIbF@qDssVEpUda`UZw%?Li*6+o+Z0#j{nY3AXPF-#Xjs_}ioR_{Y zgNkg0H>rM~wRg5^V~@vGsnwkYwxQv_+|4yc@r^xSqFjqEY<@$~r&6lc^wLHPNVM(3 z)9nj}25|d2D=<^_^8D@#;2)PdGjert&1Y2ndS6$=+V9g$LG5@pPb%ittb{bACKU!r z)w-?ztzA+ITaAnSKv0UOj?E*{RgF_c$IMSPAW*KHkI26Ie>#dv(c>g`uhR~B2`+3= z5$3tUwUXI}^3b_RjZ2^I4{ps&4>9KwRVb}25&`d~REfVmSj4_U%^H~G)64Fp zC0do}`mNz08~{WF>Kolyk-&wOoV~ceV0*ik#o4dIgg3Q7OCD`%7Xc4GAU4)$z(3?b z|2x32_CgFNsa%$>UG$=a|A>=#{`tE3wW7i6Npb`%Wzcm*)C}bHvrDJ%;2T=UU$XJV zG-tSUILL&!q7`InSqC~7H}oR?aofL5^*t(X5WKI321DBK^h_kvzyC9i_iu3TwvU@{sKydK`;F$||j~KtuDu)Mt zI~_8!Qcw_}g3~H^s7Wth9yyF=+yluh`DXZ^X8hm(U#tY>(HKX7TKnkZ!r#!ve}o)q tP_EG6{CtXa_ksVHvH#Z;=MADfQlnTPW-zU!#Jz%Din3}l)lwgV{s&9hA?5%8 literal 0 HcmV?d00001 diff --git a/doc/source/ray-observability/ray-logging.rst b/doc/source/ray-observability/ray-logging.rst index 69c40509813f..58663de30d79 100644 --- a/doc/source/ray-observability/ray-logging.rst +++ b/doc/source/ray-observability/ray-logging.rst @@ -109,6 +109,15 @@ This produces the following output: (MyActor(index=2) pid=482120) hello there (MyActor(index=1) pid=482119) hello there +Coloring Actor log prefixes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +By default Ray prints Actor logs prefixes in light blue: +Users may instead activate multi-color prefixes by setting the environment variable ``RAY_COLOR_PREFIX=1``. +This will index into an array of colors modulo the PID of each process. + +.. image:: images/images/coloring-actor-log-prefixes.png + :align: center + Distributed progress bars (tqdm) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/python/ray/_private/worker.py b/python/ray/_private/worker.py index 4389510c7d21..6a66667e4a3f 100644 --- a/python/ray/_private/worker.py +++ b/python/ray/_private/worker.py @@ -1859,6 +1859,28 @@ def color_for(data: Dict[str, str], line: str) -> str: return colorama.Style.BRIGHT + colorama.Fore.YELLOW else: return colorama.Style.BRIGHT + colorama.Fore.CYAN + elif os.getenv("RAY_COLOR_PREFIX") == "1": + colors = [ + # colorama.Fore.BLUE, # Too dark + colorama.Fore.MAGENTA, + colorama.Fore.CYAN, + colorama.Fore.GREEN, + # colorama.Fore.WHITE, # Too light + # colorama.Fore.RED, + colorama.Fore.LIGHTBLACK_EX, + colorama.Fore.LIGHTBLUE_EX, + # colorama.Fore.LIGHTCYAN_EX, # Too light + # colorama.Fore.LIGHTGREEN_EX, # Too light + colorama.Fore.LIGHTMAGENTA_EX, + # colorama.Fore.LIGHTWHITE_EX, # Too light + # colorama.Fore.LIGHTYELLOW_EX, # Too light + ] + pid = data.get("pid", 0) + try: + i = int(pid) + except ValueError: + i = 0 + return colors[i % len(colors)] else: return colorama.Fore.CYAN From a252b731f47a200d9b688a4098aeea17342074d2 Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Wed, 26 Apr 2023 07:55:30 +0200 Subject: [PATCH 101/424] [docs] sphinx design migration 3/N (#34742) --- doc/source/ray-core/actors/actor-utils.rst | 20 +- .../actors/out-of-band-communication.rst | 6 +- .../ray-core/actors/terminating-actors.rst | 80 +- doc/source/ray-core/cross-language.rst | 48 +- doc/source/ray-core/fault_tolerance/gcs.rst | 34 +- doc/source/ray-core/namespaces.rst | 380 ++++---- doc/source/ray-core/objects.rst | 186 ++-- doc/source/ray-core/ray-dag.rst | 38 +- doc/source/ray-core/ray-dashboard.rst | 52 +- .../ray-core/scheduling/placement-group.rst | 436 ++++----- doc/source/ray-core/scheduling/resources.rst | 92 +- doc/source/ray-core/tasks.rst | 264 +++--- doc/source/rllib/key-concepts.rst | 44 +- doc/source/rllib/rllib-connector.rst | 78 +- doc/source/rllib/rllib-rlmodule.rst | 415 +++++---- doc/source/serve/index.md | 17 +- doc/source/serve/model_composition.md | 16 +- .../serve/production-guide/fault-tolerance.md | 19 +- .../tutorials/gradio-dag-visualization.md | 10 +- doc/source/serve/tutorials/serve-ml-models.md | 14 +- doc/source/train/dl_guide.rst | 854 +++++++++--------- doc/source/train/gbdt.rst | 59 +- doc/source/train/getting-started.rst | 256 +++--- doc/source/train/key-concepts.rst | 50 +- doc/source/train/train.rst | 43 +- doc/source/tune/index.rst | 96 +- doc/source/tune/key-concepts.rst | 42 +- doc/source/tune/tutorials/tune-output.rst | 114 +-- doc/source/tune/tutorials/tune-stopping.rst | 74 +- .../tune/tutorials/tune-trial-checkpoints.rst | 22 +- 30 files changed, 2005 insertions(+), 1854 deletions(-) diff --git a/doc/source/ray-core/actors/actor-utils.rst b/doc/source/ray-core/actors/actor-utils.rst index 6e5b3da20d4d..ccdae25a5973 100644 --- a/doc/source/ray-core/actors/actor-utils.rst +++ b/doc/source/ray-core/actors/actor-utils.rst @@ -4,22 +4,24 @@ Utility Classes Actor Pool ~~~~~~~~~~ -.. tabbed:: Python +.. tab-set:: - The ``ray.util`` module contains a utility class, ``ActorPool``. - This class is similar to multiprocessing.Pool and lets you schedule Ray tasks over a fixed pool of actors. + .. tab-item:: Python - .. literalinclude:: ../doc_code/actor-pool.py + The ``ray.util`` module contains a utility class, ``ActorPool``. + This class is similar to multiprocessing.Pool and lets you schedule Ray tasks over a fixed pool of actors. - See the :class:`package reference ` for more information. + .. literalinclude:: ../doc_code/actor-pool.py -.. tabbed:: Java + See the :class:`package reference ` for more information. - Actor pool hasn't been implemented in Java yet. + .. tab-item:: Java -.. tabbed:: C++ + Actor pool hasn't been implemented in Java yet. - Actor pool hasn't been implemented in C++ yet. + .. tab-item:: C++ + + Actor pool hasn't been implemented in C++ yet. Message passing using Ray Queue ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/ray-core/actors/out-of-band-communication.rst b/doc/source/ray-core/actors/out-of-band-communication.rst index 7d9eff73c99f..063b9a26f69d 100644 --- a/doc/source/ray-core/actors/out-of-band-communication.rst +++ b/doc/source/ray-core/actors/out-of-band-communication.rst @@ -22,9 +22,11 @@ HTTP Server You can start a http server inside the actor and expose http endpoints to clients so users outside of the ray cluster can communicate with the actor. -.. tabbed:: Python +.. tab-set:: - .. literalinclude:: ../doc_code/actor-http-server.py + .. tab-item:: Python + + .. literalinclude:: ../doc_code/actor-http-server.py Similarly, you can expose other types of servers as well (e.g., gRPC servers). diff --git a/doc/source/ray-core/actors/terminating-actors.rst b/doc/source/ray-core/actors/terminating-actors.rst index d71d8f3312bf..11ba78462f2e 100644 --- a/doc/source/ray-core/actors/terminating-actors.rst +++ b/doc/source/ray-core/actors/terminating-actors.rst @@ -18,33 +18,35 @@ be reserved for cases where an actor is unexpectedly hanging or leaking resources, and for :ref:`detached actors `, which must be manually destroyed. -.. tabbed:: Python +.. tab-set:: - .. code-block:: python + .. tab-item:: Python - ray.kill(actor_handle) - # This will not go through the normal Python sys.exit - # teardown logic, so any exit handlers installed in - # the actor using ``atexit`` will not be called. + .. code-block:: python + ray.kill(actor_handle) + # This will not go through the normal Python sys.exit + # teardown logic, so any exit handlers installed in + # the actor using ``atexit`` will not be called. -.. tabbed:: Java - .. code-block:: java + .. tab-item:: Java - actorHandle.kill(); - // This will not go through the normal Java System.exit teardown logic, so any - // shutdown hooks installed in the actor using ``Runtime.addShutdownHook(...)`` will - // not be called. + .. code-block:: java -.. tabbed:: C++ + actorHandle.kill(); + // This will not go through the normal Java System.exit teardown logic, so any + // shutdown hooks installed in the actor using ``Runtime.addShutdownHook(...)`` will + // not be called. - .. code-block:: c++ + .. tab-item:: C++ - actor_handle.Kill(); - // This will not go through the normal C++ std::exit - // teardown logic, so any exit handlers installed in - // the actor using ``std::atexit`` will not be called. + .. code-block:: c++ + + actor_handle.Kill(); + // This will not go through the normal C++ std::exit + // teardown logic, so any exit handlers installed in + // the actor using ``std::atexit`` will not be called. This will cause the actor to immediately exit its process, causing any current, @@ -63,37 +65,39 @@ Manual termination within the actor If necessary, you can manually terminate an actor from within one of the actor methods. This will kill the actor process and release resources associated/assigned to the actor. -.. tabbed:: Python +.. tab-set:: + + .. tab-item:: Python - .. code-block:: python + .. code-block:: python - ray.actor.exit_actor() + ray.actor.exit_actor() - This approach should generally not be necessary as actors are automatically garbage - collected. The ``ObjectRef`` resulting from the task can be waited on to wait - for the actor to exit (calling ``ray.get()`` on it will raise a ``RayActorError``). + This approach should generally not be necessary as actors are automatically garbage + collected. The ``ObjectRef`` resulting from the task can be waited on to wait + for the actor to exit (calling ``ray.get()`` on it will raise a ``RayActorError``). -.. tabbed:: Java + .. tab-item:: Java - .. code-block:: java + .. code-block:: java - Ray.exitActor(); + Ray.exitActor(); - Garbage collection for actors haven't been implemented yet, so this is currently the - only way to terminate an actor gracefully. The ``ObjectRef`` resulting from the task - can be waited on to wait for the actor to exit (calling ``ObjectRef::get`` on it will - throw a ``RayActorException``). + Garbage collection for actors haven't been implemented yet, so this is currently the + only way to terminate an actor gracefully. The ``ObjectRef`` resulting from the task + can be waited on to wait for the actor to exit (calling ``ObjectRef::get`` on it will + throw a ``RayActorException``). -.. tabbed:: C++ + .. tab-item:: C++ - .. code-block:: c++ + .. code-block:: c++ - ray::ExitActor(); + ray::ExitActor(); - Garbage collection for actors haven't been implemented yet, so this is currently the - only way to terminate an actor gracefully. The ``ObjectRef`` resulting from the task - can be waited on to wait for the actor to exit (calling ``ObjectRef::Get`` on it will - throw a ``RayActorException``). + Garbage collection for actors haven't been implemented yet, so this is currently the + only way to terminate an actor gracefully. The ``ObjectRef`` resulting from the task + can be waited on to wait for the actor to exit (calling ``ObjectRef::Get`` on it will + throw a ``RayActorException``). Note that this method of termination will wait until any previously submitted tasks finish executing and then exit the process gracefully with sys.exit. diff --git a/doc/source/ray-core/cross-language.rst b/doc/source/ray-core/cross-language.rst index 4c24375a95a7..487d2263daa9 100644 --- a/doc/source/ray-core/cross-language.rst +++ b/doc/source/ray-core/cross-language.rst @@ -10,39 +10,43 @@ Setup the driver We need to set :ref:`code_search_path` in your driver. -.. tabbed:: Python +.. tab-set:: - .. literalinclude:: ./doc_code/cross_language.py - :language: python - :start-after: __crosslang_init_start__ - :end-before: __crosslang_init_end__ + .. tab-item:: Python -.. tabbed:: Java + .. literalinclude:: ./doc_code/cross_language.py + :language: python + :start-after: __crosslang_init_start__ + :end-before: __crosslang_init_end__ - .. code-block:: bash + .. tab-item:: Java - java -classpath \ - -Dray.address=

    \ - -Dray.job.code-search-path=/path/to/code/ \ - + .. code-block:: bash + + java -classpath \ + -Dray.address=
    \ + -Dray.job.code-search-path=/path/to/code/ \ + You may want to include multiple directories to load both Python and Java code for workers, if they are placed in different directories. -.. tabbed:: Python +.. tab-set:: + + .. tab-item:: Python - .. literalinclude:: ./doc_code/cross_language.py - :language: python - :start-after: __crosslang_multidir_start__ - :end-before: __crosslang_multidir_end__ + .. literalinclude:: ./doc_code/cross_language.py + :language: python + :start-after: __crosslang_multidir_start__ + :end-before: __crosslang_multidir_end__ -.. tabbed:: Java + .. tab-item:: Java - .. code-block:: bash + .. code-block:: bash - java -classpath \ - -Dray.address=
    \ - -Dray.job.code-search-path=/path/to/jars:/path/to/pys \ - + java -classpath \ + -Dray.address=
    \ + -Dray.job.code-search-path=/path/to/jars:/path/to/pys \ + Python calling Java ------------------- diff --git a/doc/source/ray-core/fault_tolerance/gcs.rst b/doc/source/ray-core/fault_tolerance/gcs.rst index d94f9db27b85..9f995e518a80 100644 --- a/doc/source/ray-core/fault_tolerance/gcs.rst +++ b/doc/source/ray-core/fault_tolerance/gcs.rst @@ -22,33 +22,35 @@ However, running Ray tasks and actors remain alive and any existing objects will Setting up Redis ---------------- -.. tabbed:: KubeRay (officially supported) +.. tab-set:: - If you are using :ref:`KubeRay `, please refer to `KubeRay docs on GCS Fault Tolerance `_. + .. tab-item:: KubeRay (officially supported) -.. tabbed:: ray start + If you are using :ref:`KubeRay `, please refer to `KubeRay docs on GCS Fault Tolerance `_. - If you are using :ref:`ray start ` to start the Ray head node, - set the OS environment ``RAY_REDIS_ADDRESS`` to - the Redis address, and supply the ``--redis-password`` flag with the password when calling ``ray start``: + .. tab-item:: ray start - .. code-block:: shell + If you are using :ref:`ray start ` to start the Ray head node, + set the OS environment ``RAY_REDIS_ADDRESS`` to + the Redis address, and supply the ``--redis-password`` flag with the password when calling ``ray start``: - RAY_REDIS_ADDRESS=redis_ip:port ray start --head --redis-password PASSWORD + .. code-block:: shell -.. tabbed:: ray up + RAY_REDIS_ADDRESS=redis_ip:port ray start --head --redis-password PASSWORD - If you are using :ref:`ray up ` to start the Ray cluster, change :ref:`head_start_ray_commands ` field to add ``RAY_REDIS_ADDRESS`` and ``--redis-password`` to the ``ray start`` command: + .. tab-item:: ray up - .. code-block:: yaml + If you are using :ref:`ray up ` to start the Ray cluster, change :ref:`head_start_ray_commands ` field to add ``RAY_REDIS_ADDRESS`` and ``--redis-password`` to the ``ray start`` command: - head_start_ray_commands: - - ray stop - - ulimit -n 65536; RAY_REDIS_ADDRESS=redis_ip:port ray start --head --redis-password PASSWORD --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml --dashboard-host=0.0.0.0 + .. code-block:: yaml -.. tabbed:: Kubernetes + head_start_ray_commands: + - ray stop + - ulimit -n 65536; RAY_REDIS_ADDRESS=redis_ip:port ray start --head --redis-password PASSWORD --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml --dashboard-host=0.0.0.0 - If you are using Kubernetes but not :ref:`KubeRay `, please refer to :ref:`this doc `. + .. tab-item:: Kubernetes + + If you are using Kubernetes but not :ref:`KubeRay `, please refer to :ref:`this doc `. Once the GCS is backed by Redis, when it restarts, it'll recover the diff --git a/doc/source/ray-core/namespaces.rst b/doc/source/ray-core/namespaces.rst index 636cc138dab2..012bc8d3f653 100644 --- a/doc/source/ray-core/namespaces.rst +++ b/doc/source/ray-core/namespaces.rst @@ -9,117 +9,121 @@ named, its name must be unique within the namespace. In order to set your applications namespace, it should be specified when you first connect to the cluster. -.. tabbed:: Python +.. tab-set:: - .. literalinclude:: ./doc_code/namespaces.py - :language: python - :start-after: __init_namespace_start__ - :end-before: __init_namespace_end__ + .. tab-item:: Python -.. tabbed:: Java + .. literalinclude:: ./doc_code/namespaces.py + :language: python + :start-after: __init_namespace_start__ + :end-before: __init_namespace_end__ - .. code-block:: java + .. tab-item:: Java - System.setProperty("ray.job.namespace", "hello"); // set it before Ray.init() - Ray.init(); + .. code-block:: java -.. tabbed:: C++ + System.setProperty("ray.job.namespace", "hello"); // set it before Ray.init() + Ray.init(); - .. code-block:: c++ + .. tab-item:: C++ - ray::RayConfig config; - config.ray_namespace = "hello"; - ray::Init(config); + .. code-block:: c++ + + ray::RayConfig config; + config.ray_namespace = "hello"; + ray::Init(config); Please refer to `Driver Options `__ for ways of configuring a Java application. Named actors are only accessible within their namespaces. -.. tabbed:: Python - - .. literalinclude:: ./doc_code/namespaces.py - :language: python - :start-after: __actor_namespace_start__ - :end-before: __actor_namespace_end__ - -.. tabbed:: Java - - .. code-block:: java - - // `ray start --head` has been run to launch a local cluster. - - // Job 1 creates two actors, "orange" and "purple" in the "colors" namespace. - System.setProperty("ray.address", "localhost:10001"); - System.setProperty("ray.job.namespace", "colors"); - try { - Ray.init(); - Ray.actor(Actor::new).setName("orange").remote(); - Ray.actor(Actor::new).setName("purple").remote(); - } finally { - Ray.shutdown(); - } - - // Job 2 is now connecting to a different namespace. - System.setProperty("ray.address", "localhost:10001"); - System.setProperty("ray.job.namespace", "fruits"); - try { - Ray.init(); +.. tab-set:: + + .. tab-item:: Python + + .. literalinclude:: ./doc_code/namespaces.py + :language: python + :start-after: __actor_namespace_start__ + :end-before: __actor_namespace_end__ + + .. tab-item:: Java + + .. code-block:: java + + // `ray start --head` has been run to launch a local cluster. + + // Job 1 creates two actors, "orange" and "purple" in the "colors" namespace. + System.setProperty("ray.address", "localhost:10001"); + System.setProperty("ray.job.namespace", "colors"); + try { + Ray.init(); + Ray.actor(Actor::new).setName("orange").remote(); + Ray.actor(Actor::new).setName("purple").remote(); + } finally { + Ray.shutdown(); + } + + // Job 2 is now connecting to a different namespace. + System.setProperty("ray.address", "localhost:10001"); + System.setProperty("ray.job.namespace", "fruits"); + try { + Ray.init(); + // This fails because "orange" was defined in the "colors" namespace. + Ray.getActor("orange").isPresent(); // return false + // This succceeds because the name "orange" is unused in this namespace. + Ray.actor(Actor::new).setName("orange").remote(); + Ray.actor(Actor::new).setName("watermelon").remote(); + } finally { + Ray.shutdown(); + } + + // Job 3 connects to the original "colors" namespace. + System.setProperty("ray.address", "localhost:10001"); + System.setProperty("ray.job.namespace", "colors"); + try { + Ray.init(); + // This fails because "watermelon" was in the fruits namespace. + Ray.getActor("watermelon").isPresent(); // return false + // This returns the "orange" actor we created in the first job, not the second. + Ray.getActor("orange").isPresent(); // return true + } finally { + Ray.shutdown(); + } + + .. tab-item:: C++ + + .. code-block:: c++ + + // `ray start --head` has been run to launch a local cluster. + + // Job 1 creates two actors, "orange" and "purple" in the "colors" namespace. + ray::RayConfig config; + config.ray_namespace = "colors"; + ray::Init(config); + ray::Actor(RAY_FUNC(Counter::FactoryCreate)).SetName("orange").Remote(); + ray::Actor(RAY_FUNC(Counter::FactoryCreate)).SetName("purple").Remote(); + ray::Shutdown(); + + // Job 2 is now connecting to a different namespace. + ray::RayConfig config; + config.ray_namespace = "fruits"; + ray::Init(config); // This fails because "orange" was defined in the "colors" namespace. - Ray.getActor("orange").isPresent(); // return false + ray::GetActor("orange"); // return nullptr; // This succceeds because the name "orange" is unused in this namespace. - Ray.actor(Actor::new).setName("orange").remote(); - Ray.actor(Actor::new).setName("watermelon").remote(); - } finally { - Ray.shutdown(); - } - - // Job 3 connects to the original "colors" namespace. - System.setProperty("ray.address", "localhost:10001"); - System.setProperty("ray.job.namespace", "colors"); - try { - Ray.init(); + ray::Actor(RAY_FUNC(Counter::FactoryCreate)).SetName("orange").Remote(); + ray::Actor(RAY_FUNC(Counter::FactoryCreate)).SetName("watermelon").Remote(); + ray::Shutdown(); + + // Job 3 connects to the original "colors" namespace. + ray::RayConfig config; + config.ray_namespace = "colors"; + ray::Init(config); // This fails because "watermelon" was in the fruits namespace. - Ray.getActor("watermelon").isPresent(); // return false + ray::GetActor("watermelon"); // return nullptr; // This returns the "orange" actor we created in the first job, not the second. - Ray.getActor("orange").isPresent(); // return true - } finally { - Ray.shutdown(); - } - -.. tabbed:: C++ - - .. code-block:: c++ - - // `ray start --head` has been run to launch a local cluster. - - // Job 1 creates two actors, "orange" and "purple" in the "colors" namespace. - ray::RayConfig config; - config.ray_namespace = "colors"; - ray::Init(config); - ray::Actor(RAY_FUNC(Counter::FactoryCreate)).SetName("orange").Remote(); - ray::Actor(RAY_FUNC(Counter::FactoryCreate)).SetName("purple").Remote(); - ray::Shutdown(); - - // Job 2 is now connecting to a different namespace. - ray::RayConfig config; - config.ray_namespace = "fruits"; - ray::Init(config); - // This fails because "orange" was defined in the "colors" namespace. - ray::GetActor("orange"); // return nullptr; - // This succceeds because the name "orange" is unused in this namespace. - ray::Actor(RAY_FUNC(Counter::FactoryCreate)).SetName("orange").Remote(); - ray::Actor(RAY_FUNC(Counter::FactoryCreate)).SetName("watermelon").Remote(); - ray::Shutdown(); - - // Job 3 connects to the original "colors" namespace. - ray::RayConfig config; - config.ray_namespace = "colors"; - ray::Init(config); - // This fails because "watermelon" was in the fruits namespace. - ray::GetActor("watermelon"); // return nullptr; - // This returns the "orange" actor we created in the first job, not the second. - ray::GetActor("orange"); - ray::Shutdown(); + ray::GetActor("orange"); + ray::Shutdown(); Specifying namespace for named actors ------------------------------------- @@ -127,45 +131,47 @@ Specifying namespace for named actors You can specify a namespace for a named actor while creating it. The created actor belongs to the specified namespace, no matter what namespace of the current job is. -.. tabbed:: Python +.. tab-set:: - .. literalinclude:: ./doc_code/namespaces.py - :language: python - :start-after: __specify_actor_namespace_start__ - :end-before: __specify_actor_namespace_end__ + .. tab-item:: Python + .. literalinclude:: ./doc_code/namespaces.py + :language: python + :start-after: __specify_actor_namespace_start__ + :end-before: __specify_actor_namespace_end__ -.. tabbed:: Java - .. code-block:: java + .. tab-item:: Java - // `ray start --head` has been run to launch a local cluster. + .. code-block:: java - System.setProperty("ray.address", "localhost:10001"); - try { - Ray.init(); - // Create an actor with specified namespace. - Ray.actor(Actor::new).setName("my_actor", "actor_namespace").remote(); - // It is accessible in its namespace. - Ray.getActor("my_actor", "actor_namespace").isPresent(); // return true + // `ray start --head` has been run to launch a local cluster. - } finally { - Ray.shutdown(); - } + System.setProperty("ray.address", "localhost:10001"); + try { + Ray.init(); + // Create an actor with specified namespace. + Ray.actor(Actor::new).setName("my_actor", "actor_namespace").remote(); + // It is accessible in its namespace. + Ray.getActor("my_actor", "actor_namespace").isPresent(); // return true -.. tabbed:: C++ + } finally { + Ray.shutdown(); + } - .. code-block:: c++ + .. tab-item:: C++ - // `ray start --head` has been run to launch a local cluster. + .. code-block:: c++ - ray::RayConfig config; - ray::Init(config); - // Create an actor with specified namespace. - ray::Actor(RAY_FUNC(Counter::FactoryCreate)).SetName("my_actor", "actor_namespace").Remote(); - // It is accessible in its namespace. - ray::GetActor("orange"); - ray::Shutdown(); + // `ray start --head` has been run to launch a local cluster. + + ray::RayConfig config; + ray::Init(config); + // Create an actor with specified namespace. + ray::Actor(RAY_FUNC(Counter::FactoryCreate)).SetName("my_actor", "actor_namespace").Remote(); + // It is accessible in its namespace. + ray::GetActor("orange"); + ray::Shutdown();` Anonymous namespaces -------------------- @@ -174,56 +180,58 @@ When a namespace is not specified, Ray will place your job in an anonymous namespace. In an anonymous namespace, your job will have its own namespace and will not have access to actors in other namespaces. -.. tabbed:: Python +.. tab-set:: - .. literalinclude:: ./doc_code/namespaces.py - :language: python - :start-after: __anonymous_namespace_start__ - :end-before: __anonymous_namespace_end__ + .. tab-item:: Python -.. tabbed:: Java + .. literalinclude:: ./doc_code/namespaces.py + :language: python + :start-after: __anonymous_namespace_start__ + :end-before: __anonymous_namespace_end__ - .. code-block:: java + .. tab-item:: Java - // `ray start --head` has been run to launch a local cluster. + .. code-block:: java - // Job 1 connects to an anonymous namespace by default. - System.setProperty("ray.address", "localhost:10001"); - try { - Ray.init(); - Ray.actor(Actor::new).setName("my_actor").remote(); - } finally { - Ray.shutdown(); - } + // `ray start --head` has been run to launch a local cluster. - // Job 2 connects to a _different_ anonymous namespace by default - System.setProperty("ray.address", "localhost:10001"); - try { - Ray.init(); - // This succeeds because the second job is in its own namespace. - Ray.actor(Actor::new).setName("my_actor").remote(); - } finally { - Ray.shutdown(); - } + // Job 1 connects to an anonymous namespace by default. + System.setProperty("ray.address", "localhost:10001"); + try { + Ray.init(); + Ray.actor(Actor::new).setName("my_actor").remote(); + } finally { + Ray.shutdown(); + } -.. tabbed:: C++ + // Job 2 connects to a _different_ anonymous namespace by default + System.setProperty("ray.address", "localhost:10001"); + try { + Ray.init(); + // This succeeds because the second job is in its own namespace. + Ray.actor(Actor::new).setName("my_actor").remote(); + } finally { + Ray.shutdown(); + } - .. code-block:: c++ + .. tab-item:: C++ - // `ray start --head` has been run to launch a local cluster. + .. code-block:: c++ - // Job 1 connects to an anonymous namespace by default. - ray::RayConfig config; - ray::Init(config); - ray::Actor(RAY_FUNC(Counter::FactoryCreate)).SetName("my_actor").Remote(); - ray::Shutdown(); + // `ray start --head` has been run to launch a local cluster. - // Job 2 connects to a _different_ anonymous namespace by default - ray::RayConfig config; - ray::Init(config); - // This succeeds because the second job is in its own namespace. - ray::Actor(RAY_FUNC(Counter::FactoryCreate)).SetName("my_actor").Remote(); - ray::Shutdown(); + // Job 1 connects to an anonymous namespace by default. + ray::RayConfig config; + ray::Init(config); + ray::Actor(RAY_FUNC(Counter::FactoryCreate)).SetName("my_actor").Remote(); + ray::Shutdown(); + + // Job 2 connects to a _different_ anonymous namespace by default + ray::RayConfig config; + ray::Init(config); + // This succeeds because the second job is in its own namespace. + ray::Actor(RAY_FUNC(Counter::FactoryCreate)).SetName("my_actor").Remote(); + ray::Shutdown(); .. note:: @@ -236,34 +244,36 @@ Getting the current namespace ----------------------------- You can access to the current namespace using :ref:`runtime_context APIs `. -.. tabbed:: Python +.. tab-set:: - .. literalinclude:: ./doc_code/namespaces.py - :language: python - :start-after: __get_namespace_start__ - :end-before: __get_namespace_end__ + .. tab-item:: Python + .. literalinclude:: ./doc_code/namespaces.py + :language: python + :start-after: __get_namespace_start__ + :end-before: __get_namespace_end__ -.. tabbed:: Java - .. code-block:: java + .. tab-item:: Java - System.setProperty("ray.job.namespace", "colors"); - try { - Ray.init(); - // Will print namespace name "colors". - System.out.println(Ray.getRuntimeContext().getNamespace()); - } finally { - Ray.shutdown(); - } + .. code-block:: java -.. tabbed:: C++ + System.setProperty("ray.job.namespace", "colors"); + try { + Ray.init(); + // Will print namespace name "colors". + System.out.println(Ray.getRuntimeContext().getNamespace()); + } finally { + Ray.shutdown(); + } - .. code-block:: c++ + .. tab-item:: C++ - ray::RayConfig config; - config.ray_namespace = "colors"; - ray::Init(config); - // Will print namespace name "colors". - std::cout << ray::GetNamespace() << std::endl; - ray::Shutdown(); + .. code-block:: c++ + + ray::RayConfig config; + config.ray_namespace = "colors"; + ray::Init(config); + // Will print namespace name "colors". + std::cout << ray::GetNamespace() << std::endl; + ray::Shutdown(); diff --git a/doc/source/ray-core/objects.rst b/doc/source/ray-core/objects.rst index 721924c12c2c..e6ba74a5a61c 100644 --- a/doc/source/ray-core/objects.rst +++ b/doc/source/ray-core/objects.rst @@ -14,29 +14,31 @@ Object refs can be created in two ways. 1. They are returned by remote function calls. 2. They are returned by :func:`ray.put() `. -.. tabbed:: Python +.. tab-set:: - .. code-block:: python + .. tab-item:: Python - # Put an object in Ray's object store. - y = 1 - object_ref = ray.put(y) + .. code-block:: python -.. tabbed:: Java + # Put an object in Ray's object store. + y = 1 + object_ref = ray.put(y) - .. code-block:: java + .. tab-item:: Java - // Put an object in Ray's object store. - int y = 1; - ObjectRef objectRef = Ray.put(y); + .. code-block:: java -.. tabbed:: C++ + // Put an object in Ray's object store. + int y = 1; + ObjectRef objectRef = Ray.put(y); - .. code-block:: c++ + .. tab-item:: C++ - // Put an object in Ray's object store. - int y = 1; - ray::ObjectRef object_ref = ray::Put(y); + .. code-block:: c++ + + // Put an object in Ray's object store. + int y = 1; + ray::ObjectRef object_ref = ray::Put(y); .. note:: @@ -51,82 +53,84 @@ Fetching Object Data You can use the :func:`ray.get() ` method to fetch the result of a remote object from an object ref. If the current node's object store does not contain the object, the object is downloaded. -.. tabbed:: Python - - If the object is a `numpy array `__ - or a collection of numpy arrays, the ``get`` call is zero-copy and returns arrays backed by shared object store memory. - Otherwise, we deserialize the object data into a Python object. - - .. code-block:: python - - # Get the value of one object ref. - obj_ref = ray.put(1) - assert ray.get(obj_ref) == 1 - - # Get the values of multiple object refs in parallel. - assert ray.get([ray.put(i) for i in range(3)]) == [0, 1, 2] - - # You can also set a timeout to return early from a ``get`` - # that's blocking for too long. - from ray.exceptions import GetTimeoutError - # ``GetTimeoutError`` is a subclass of ``TimeoutError``. - - @ray.remote - def long_running_function(): - time.sleep(8) - - obj_ref = long_running_function.remote() - try: - ray.get(obj_ref, timeout=4) - except GetTimeoutError: # You can capture the standard "TimeoutError" instead - print("`get` timed out.") - -.. tabbed:: Java - - .. code-block:: java - - // Get the value of one object ref. - ObjectRef objRef = Ray.put(1); - Assert.assertTrue(objRef.get() == 1); - // You can also set a timeout(ms) to return early from a ``get`` that's blocking for too long. - Assert.assertTrue(objRef.get(1000) == 1); - - // Get the values of multiple object refs in parallel. - List> objectRefs = new ArrayList<>(); - for (int i = 0; i < 3; i++) { - objectRefs.add(Ray.put(i)); - } - List results = Ray.get(objectRefs); - Assert.assertEquals(results, ImmutableList.of(0, 1, 2)); - - // Ray.get timeout example: Ray.get will throw an RayTimeoutException if time out. - public class MyRayApp { - public static int slowFunction() throws InterruptedException { - TimeUnit.SECONDS.sleep(10); - return 1; - } - } - Assert.assertThrows(RayTimeoutException.class, - () -> Ray.get(Ray.task(MyRayApp::slowFunction).remote(), 3000)); - -.. tabbed:: C++ - - .. code-block:: c++ - - // Get the value of one object ref. - ray::ObjectRef obj_ref = ray::Put(1); - assert(*obj_ref.Get() == 1); - - // Get the values of multiple object refs in parallel. - std::vector> obj_refs; - for (int i = 0; i < 3; i++) { - obj_refs.emplace_back(ray::Put(i)); - } - auto results = ray::Get(obj_refs); - assert(results.size() == 3); - assert(*results[0] == 0); - assert(*results[1] == 1); - assert(*results[2] == 2); +.. tab-set:: + + .. tab-item:: Python + + If the object is a `numpy array `__ + or a collection of numpy arrays, the ``get`` call is zero-copy and returns arrays backed by shared object store memory. + Otherwise, we deserialize the object data into a Python object. + + .. code-block:: python + + # Get the value of one object ref. + obj_ref = ray.put(1) + assert ray.get(obj_ref) == 1 + + # Get the values of multiple object refs in parallel. + assert ray.get([ray.put(i) for i in range(3)]) == [0, 1, 2] + + # You can also set a timeout to return early from a ``get`` + # that's blocking for too long. + from ray.exceptions import GetTimeoutError + # ``GetTimeoutError`` is a subclass of ``TimeoutError``. + + @ray.remote + def long_running_function(): + time.sleep(8) + + obj_ref = long_running_function.remote() + try: + ray.get(obj_ref, timeout=4) + except GetTimeoutError: # You can capture the standard "TimeoutError" instead + print("`get` timed out.") + + .. tab-item:: Java + + .. code-block:: java + + // Get the value of one object ref. + ObjectRef objRef = Ray.put(1); + Assert.assertTrue(objRef.get() == 1); + // You can also set a timeout(ms) to return early from a ``get`` that's blocking for too long. + Assert.assertTrue(objRef.get(1000) == 1); + + // Get the values of multiple object refs in parallel. + List> objectRefs = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + objectRefs.add(Ray.put(i)); + } + List results = Ray.get(objectRefs); + Assert.assertEquals(results, ImmutableList.of(0, 1, 2)); + + // Ray.get timeout example: Ray.get will throw an RayTimeoutException if time out. + public class MyRayApp { + public static int slowFunction() throws InterruptedException { + TimeUnit.SECONDS.sleep(10); + return 1; + } + } + Assert.assertThrows(RayTimeoutException.class, + () -> Ray.get(Ray.task(MyRayApp::slowFunction).remote(), 3000)); + + .. tab-item:: C++ + + .. code-block:: c++ + + // Get the value of one object ref. + ray::ObjectRef obj_ref = ray::Put(1); + assert(*obj_ref.Get() == 1); + + // Get the values of multiple object refs in parallel. + std::vector> obj_refs; + for (int i = 0; i < 3; i++) { + obj_refs.emplace_back(ray::Put(i)); + } + auto results = ray::Get(obj_refs); + assert(results.size() == 3); + assert(*results[0] == 0); + assert(*results[1] == 1); + assert(*results[2] == 2); Passing Object Arguments ------------------------ diff --git a/doc/source/ray-core/ray-dag.rst b/doc/source/ray-core/ray-dag.rst index c1645f0b32f0..80fe6c4fdc63 100644 --- a/doc/source/ray-core/ray-dag.rst +++ b/doc/source/ray-core/ray-dag.rst @@ -40,12 +40,14 @@ functions to form more complex DAGs. Any IR node can be executed directly ``dag_node.execute()`` that acts as root of the DAG, where all other non-reachable nodes from the root will be igored. -.. tabbed:: Python +.. tab-set:: - .. literalinclude:: ./doc_code/ray-dag.py - :language: python - :start-after: __dag_tasks_begin__ - :end-before: __dag_tasks_end__ + .. tab-item:: Python + + .. literalinclude:: ./doc_code/ray-dag.py + :language: python + :start-after: __dag_tasks_begin__ + :end-before: __dag_tasks_end__ Ray DAG with classes and class methods @@ -59,13 +61,15 @@ function calls specific to the parent actor instance. DAG IR nodes generated from a function, class or classmethod can be combined together to form a DAG. -.. tabbed:: Python +.. tab-set:: + + .. tab-item:: Python + + .. literalinclude:: ./doc_code/ray-dag.py + :language: python + :start-after: __dag_actors_begin__ + :end-before: __dag_actors_end__ - .. literalinclude:: ./doc_code/ray-dag.py - :language: python - :start-after: __dag_actors_begin__ - :end-before: __dag_actors_end__ - Ray DAG with custom InputNode @@ -75,12 +79,14 @@ Ray DAG with custom InputNode runtime. It should be used within a context manager with no args, and called as args of ``dag_node.execute()`` -.. tabbed:: Python +.. tab-set:: + + .. tab-item:: Python - .. literalinclude:: ./doc_code/ray-dag.py - :language: python - :start-after: __dag_input_node_begin__ - :end-before: __dag_input_node_end__ + .. literalinclude:: ./doc_code/ray-dag.py + :language: python + :start-after: __dag_input_node_begin__ + :end-before: __dag_input_node_end__ More Resources -------------- diff --git a/doc/source/ray-core/ray-dashboard.rst b/doc/source/ray-core/ray-dashboard.rst index f24a050e693b..f7e729eec058 100644 --- a/doc/source/ray-core/ray-dashboard.rst +++ b/doc/source/ray-core/ray-dashboard.rst @@ -440,26 +440,28 @@ Advanced Usage Changing Dashboard Ports ~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: Single-node local cluster +.. tab-set:: - **CLI** + .. tab-item:: Single-node local cluster - To customize the port on which the dashboard runs, you can pass - the ``--dashboard-port`` argument with ``ray start`` in the command line. + **CLI** - **ray.init** + To customize the port on which the dashboard runs, you can pass + the ``--dashboard-port`` argument with ``ray start`` in the command line. - If you need to customize the port on which the dashboard will run, you can pass the - keyword argument ``dashboard_port`` in your call to ``ray.init()``. + **ray.init** -.. tabbed:: VM Cluster Launcher + If you need to customize the port on which the dashboard will run, you can pass the + keyword argument ``dashboard_port`` in your call to ``ray.init()``. - To disable the dashboard while using the "VM cluster launcher", include the "ray start --head --include-dashboard=False" argument - and specify the desired port number in the "head_start_ray_commands" section of the `cluster launcher's YAML file `_. + .. tab-item:: VM Cluster Launcher -.. tabbed:: Kuberay + To disable the dashboard while using the "VM cluster launcher", include the "ray start --head --include-dashboard=False" argument + and specify the desired port number in the "head_start_ray_commands" section of the `cluster launcher's YAML file `_. - See the `Specifying non-default ports `_ page. + .. tab-item:: Kuberay + + See the `Specifying non-default ports `_ page. Viewing Built-in Dashboard API Metrics ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -521,28 +523,30 @@ Dashboard is included in the `ray[default]` installation by default and automati To disable the dashboard, use the following arguments `--include-dashboard`. -.. tabbed:: Single-node local cluster +.. tab-set:: + + .. tab-item:: Single-node local cluster - **CLI** + **CLI** - .. code-block:: bash + .. code-block:: bash - ray start --include-dashboard=False + ray start --include-dashboard=False - **ray.init** + **ray.init** - .. code-block:: python + .. code-block:: python - ray.init(include_dashboard=False) + ray.init(include_dashboard=False) -.. tabbed:: VM Cluster Launcher + .. tab-item:: VM Cluster Launcher - To disable the dashboard while using the "VM cluster launcher", include the "ray start --head --include-dashboard=False" argument - in the "head_start_ray_commands" section of the `cluster launcher's YAML file `_. + To disable the dashboard while using the "VM cluster launcher", include the "ray start --head --include-dashboard=False" argument + in the "head_start_ray_commands" section of the `cluster launcher's YAML file `_. -.. tabbed:: Kuberay + .. tab-item:: Kuberay - TODO + TODO .. _dash-reference: diff --git a/doc/source/ray-core/scheduling/placement-group.rst b/doc/source/ray-core/scheduling/placement-group.rst index ab18bd297f96..bc74029d3da8 100644 --- a/doc/source/ray-core/scheduling/placement-group.rst +++ b/doc/source/ray-core/scheduling/placement-group.rst @@ -49,89 +49,93 @@ Bundles are specified by a list of dictionaries, e.g., ``[{"CPU": 1}, {"CPU": 1, Placement group scheduling is asynchronous. The `ray.util.placement_group` returns immediately. -.. tabbed:: Python +.. tab-set:: - .. literalinclude:: ../doc_code/placement_group_example.py - :language: python - :start-after: __create_pg_start__ - :end-before: __create_pg_end__ + .. tab-item:: Python + .. literalinclude:: ../doc_code/placement_group_example.py + :language: python + :start-after: __create_pg_start__ + :end-before: __create_pg_end__ -.. tabbed:: Java - .. code-block:: java + .. tab-item:: Java - // Initialize Ray. - Ray.init(); + .. code-block:: java - // Construct a list of bundles. - Map bundle = ImmutableMap.of("CPU", 1.0); - List> bundles = ImmutableList.of(bundle); + // Initialize Ray. + Ray.init(); - // Make a creation option with bundles and strategy. - PlacementGroupCreationOptions options = - new PlacementGroupCreationOptions.Builder() - .setBundles(bundles) - .setStrategy(PlacementStrategy.STRICT_SPREAD) - .build(); + // Construct a list of bundles. + Map bundle = ImmutableMap.of("CPU", 1.0); + List> bundles = ImmutableList.of(bundle); - PlacementGroup pg = PlacementGroups.createPlacementGroup(options); + // Make a creation option with bundles and strategy. + PlacementGroupCreationOptions options = + new PlacementGroupCreationOptions.Builder() + .setBundles(bundles) + .setStrategy(PlacementStrategy.STRICT_SPREAD) + .build(); -.. tabbed:: C++ + PlacementGroup pg = PlacementGroups.createPlacementGroup(options); - .. code-block:: c++ + .. tab-item:: C++ - // Initialize Ray. - ray::Init(); + .. code-block:: c++ - // Construct a list of bundles. - std::vector> bundles{{{"CPU", 1.0}}}; + // Initialize Ray. + ray::Init(); - // Make a creation option with bundles and strategy. - ray::internal::PlacementGroupCreationOptions options{ - false, "my_pg", bundles, ray::internal::PlacementStrategy::PACK}; + // Construct a list of bundles. + std::vector> bundles{{{"CPU", 1.0}}}; - ray::PlacementGroup pg = ray::CreatePlacementGroup(options); + // Make a creation option with bundles and strategy. + ray::internal::PlacementGroupCreationOptions options{ + false, "my_pg", bundles, ray::internal::PlacementStrategy::PACK}; + + ray::PlacementGroup pg = ray::CreatePlacementGroup(options); You can block your program until the placement group is ready using one of two APIs: * :func:`ready `, which is compatible with ``ray.get`` * :func:`wait `, which blocks the program until the placement group is ready) -.. tabbed:: Python +.. tab-set:: + + .. tab-item:: Python - .. literalinclude:: ../doc_code/placement_group_example.py - :language: python - :start-after: __ready_pg_start__ - :end-before: __ready_pg_end__ + .. literalinclude:: ../doc_code/placement_group_example.py + :language: python + :start-after: __ready_pg_start__ + :end-before: __ready_pg_end__ -.. tabbed:: Java + .. tab-item:: Java - .. code-block:: java + .. code-block:: java - // Wait for the placement group to be ready within the specified time(unit is seconds). - boolean ready = pg.wait(60); - Assert.assertTrue(ready); + // Wait for the placement group to be ready within the specified time(unit is seconds). + boolean ready = pg.wait(60); + Assert.assertTrue(ready); - // You can look at placement group states using this API. - List allPlacementGroup = PlacementGroups.getAllPlacementGroups(); - for (PlacementGroup group: allPlacementGroup) { - System.out.println(group); - } + // You can look at placement group states using this API. + List allPlacementGroup = PlacementGroups.getAllPlacementGroups(); + for (PlacementGroup group: allPlacementGroup) { + System.out.println(group); + } -.. tabbed:: C++ + .. tab-item:: C++ - .. code-block:: c++ + .. code-block:: c++ - // Wait for the placement group to be ready within the specified time(unit is seconds). - bool ready = pg.Wait(60); - assert(ready); + // Wait for the placement group to be ready within the specified time(unit is seconds). + bool ready = pg.Wait(60); + assert(ready); - // You can look at placement group states using this API. - std::vector all_placement_group = ray::GetAllPlacementGroups(); - for (const ray::PlacementGroup &group : all_placement_group) { - std::cout << group.GetName() << std::endl; - } + // You can look at placement group states using this API. + std::vector all_placement_group = ray::GetAllPlacementGroups(); + for (const ray::PlacementGroup &group : all_placement_group) { + std::cout << group.GetName() << std::endl; + } Let's verify the placement group is successfully created. @@ -163,12 +167,14 @@ Placement groups are atomically created; if a bundle cannot fit in any of the cu the entire placement group is not ready and no resources are reserved. To illustrate, let's create another placement group that requires ``{"CPU":1}, {"GPU": 2}`` (2 bundles). -.. tabbed:: Python +.. tab-set:: + + .. tab-item:: Python - .. literalinclude:: ../doc_code/placement_group_example.py - :language: python - :start-after: __create_pg_failed_start__ - :end-before: __create_pg_failed_end__ + .. literalinclude:: ../doc_code/placement_group_example.py + :language: python + :start-after: __create_pg_failed_start__ + :end-before: __create_pg_failed_end__ You can verify the new placement group is pending creation. @@ -234,68 +240,70 @@ Now let's schedule an actor to the placement group. You can schedule actors or tasks to a placement group using :class:`options(scheduling_strategy=PlacementGroupSchedulingStrategy(...)) `. -.. tabbed:: Python +.. tab-set:: - .. literalinclude:: ../doc_code/placement_group_example.py - :language: python - :start-after: __schedule_pg_start__ - :end-before: __schedule_pg_end__ + .. tab-item:: Python -.. tabbed:: Java + .. literalinclude:: ../doc_code/placement_group_example.py + :language: python + :start-after: __schedule_pg_start__ + :end-before: __schedule_pg_end__ - .. code-block:: java + .. tab-item:: Java - public static class Counter { - private int value; + .. code-block:: java - public Counter(int initValue) { - this.value = initValue; - } + public static class Counter { + private int value; - public int getValue() { - return value; - } + public Counter(int initValue) { + this.value = initValue; + } - public static String ping() { - return "pong"; - } - } + public int getValue() { + return value; + } - // Create GPU actors on a gpu bundle. - for (int index = 0; index < 1; index++) { - Ray.actor(Counter::new, 1) - .setPlacementGroup(pg, 0) - .remote(); - } + public static String ping() { + return "pong"; + } + } -.. tabbed:: C++ + // Create GPU actors on a gpu bundle. + for (int index = 0; index < 1; index++) { + Ray.actor(Counter::new, 1) + .setPlacementGroup(pg, 0) + .remote(); + } - .. code-block:: c++ + .. tab-item:: C++ - class Counter { - public: - Counter(int init_value) : value(init_value){} - int GetValue() {return value;} - std::string Ping() { - return "pong"; - } - private: - int value; - }; + .. code-block:: c++ - // Factory function of Counter class. - static Counter *CreateCounter() { - return new Counter(); - }; + class Counter { + public: + Counter(int init_value) : value(init_value){} + int GetValue() {return value;} + std::string Ping() { + return "pong"; + } + private: + int value; + }; - RAY_REMOTE(&Counter::Ping, &Counter::GetValue, CreateCounter); + // Factory function of Counter class. + static Counter *CreateCounter() { + return new Counter(); + }; - // Create GPU actors on a gpu bundle. - for (int index = 0; index < 1; index++) { - ray::Actor(CreateCounter) - .SetPlacementGroup(pg, 0) - .Remote(1); - } + RAY_REMOTE(&Counter::Ping, &Counter::GetValue, CreateCounter); + + // Create GPU actors on a gpu bundle. + for (int index = 0; index < 1; index++) { + ray::Actor(CreateCounter) + .SetPlacementGroup(pg, 0) + .Remote(1); + } .. note:: @@ -364,12 +372,14 @@ For example, a placement group of 2 bundles ``[{"CPU": 1}, {"GPU": 1}]`` has ind and index 1 bundle ``{"GPU": 1}``. Since we only have 1 bundle, we only have index 0. If you don't specify a bundle, the actor (or task) is scheduled on a random bundle that has unallocated reserved resources. -.. tabbed:: Python +.. tab-set:: + + .. tab-item:: Python - .. literalinclude:: ../doc_code/placement_group_example.py - :language: python - :start-after: __schedule_pg_3_start__ - :end-before: __schedule_pg_3_end__ + .. literalinclude:: ../doc_code/placement_group_example.py + :language: python + :start-after: __schedule_pg_3_start__ + :end-before: __schedule_pg_3_end__ We succeed to schedule the GPU actor! The below image describes 2 actors scheduled into the placement group. @@ -460,30 +470,32 @@ group using the :func:`remove_placement_group ` When you remove the placement group, actors or tasks that still use the reserved resources are forcefully killed. -.. tabbed:: Python +.. tab-set:: - .. literalinclude:: ../doc_code/placement_group_example.py - :language: python - :start-after: __remove_pg_start__ - :end-before: __remove_pg_end__ + .. tab-item:: Python -.. tabbed:: Java + .. literalinclude:: ../doc_code/placement_group_example.py + :language: python + :start-after: __remove_pg_start__ + :end-before: __remove_pg_end__ - .. code-block:: java + .. tab-item:: Java - PlacementGroups.removePlacementGroup(placementGroup.getId()); + .. code-block:: java - PlacementGroup removedPlacementGroup = PlacementGroups.getPlacementGroup(placementGroup.getId()); - Assert.assertEquals(removedPlacementGroup.getState(), PlacementGroupState.REMOVED); + PlacementGroups.removePlacementGroup(placementGroup.getId()); -.. tabbed:: C++ + PlacementGroup removedPlacementGroup = PlacementGroups.getPlacementGroup(placementGroup.getId()); + Assert.assertEquals(removedPlacementGroup.getState(), PlacementGroupState.REMOVED); - .. code-block:: c++ + .. tab-item:: C++ - ray::RemovePlacementGroup(placement_group.GetID()); + .. code-block:: c++ - ray::PlacementGroup removed_placement_group = ray::GetPlacementGroup(placement_group.GetID()); - assert(removed_placement_group.GetState(), ray::PlacementGroupState::REMOVED); + ray::RemovePlacementGroup(placement_group.GetID()); + + ray::PlacementGroup removed_placement_group = ray::GetPlacementGroup(placement_group.GetID()); + assert(removed_placement_group.GetState(), ray::PlacementGroupState::REMOVED); .. _ray-placement-group-observability-ref: @@ -496,39 +508,41 @@ Ray provides several useful tools to inspect the placement group states and reso - **Ray Dashboard** is a UI tool for inspecting placement group states. - **Ray State API** is a CLI for inspecting placement group states. -.. tabbed:: ray status (CLI) +.. tab-set:: + + .. tab-item:: ray status (CLI) - The CLI command ``ray status`` provides the autoscaling status of the cluster. - It provides the "resource demands" from unscheduled placement groups as well as the resource reservation status. + The CLI command ``ray status`` provides the autoscaling status of the cluster. + It provides the "resource demands" from unscheduled placement groups as well as the resource reservation status. - .. code-block:: bash + .. code-block:: bash - Resources - --------------------------------------------------------------- - Usage: - 1.0/2.0 CPU (1.0 used of 1.0 reserved in placement groups) - 0.0/2.0 GPU (0.0 used of 1.0 reserved in placement groups) - 0B/4.29GiB memory - 0B/2.00GiB object_store_memory + Resources + --------------------------------------------------------------- + Usage: + 1.0/2.0 CPU (1.0 used of 1.0 reserved in placement groups) + 0.0/2.0 GPU (0.0 used of 1.0 reserved in placement groups) + 0B/4.29GiB memory + 0B/2.00GiB object_store_memory -.. tabbed:: Dashboard + .. tab-item:: Dashboard - The :ref:`dashboard job view ` provides the placement group table that displays the scheduling state and metadata of the placement group. + The :ref:`dashboard job view ` provides the placement group table that displays the scheduling state and metadata of the placement group. - .. note:: + .. note:: - Ray dashboard is only available when you install Ray is with ``pip install "ray[default]"``. + Ray dashboard is only available when you install Ray is with ``pip install "ray[default]"``. -.. tabbed:: Ray State API + .. tab-item:: Ray State API - :ref:`Ray state API ` is a CLI tool for inspecting the state of Ray resources (tasks, actors, placement groups, etc.). + :ref:`Ray state API ` is a CLI tool for inspecting the state of Ray resources (tasks, actors, placement groups, etc.). - ``ray list placement-groups`` provides the metadata and the scheduling state of the placement group. - ``ray list placement-groups --detail`` provides statistics and scheduling state in a greater detail. + ``ray list placement-groups`` provides the metadata and the scheduling state of the placement group. + ``ray list placement-groups --detail`` provides statistics and scheduling state in a greater detail. - .. note:: + .. note:: - State API is only available when you install Ray is with ``pip install "ray[default]"`` + State API is only available when you install Ray is with ``pip install "ray[default]"`` Inspect Placement Group Scheduling State ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -548,16 +562,18 @@ By default, child actors and tasks don't share the same placement group that the To automatically schedule child actors or tasks to the same placement group, set ``placement_group_capture_child_tasks`` to True. -.. tabbed:: Python +.. tab-set:: + + .. tab-item:: Python - .. literalinclude:: ../doc_code/placement_group_capture_child_tasks_example.py - :language: python - :start-after: __child_capture_pg_start__ - :end-before: __child_capture_pg_end__ + .. literalinclude:: ../doc_code/placement_group_capture_child_tasks_example.py + :language: python + :start-after: __child_capture_pg_start__ + :end-before: __child_capture_pg_end__ -.. tabbed:: Java + .. tab-item:: Java - It's not implemented for Java APIs yet. + It's not implemented for Java APIs yet. When ``placement_group_capture_child_tasks`` is True, but you don't want to schedule child tasks and actors to the same placement group, specify ``PlacementGroupSchedulingStrategy(placement_group=None)``. @@ -577,74 +593,76 @@ the actor or task that needs it, or if you are trying to access a placement group launched by another driver. Note that the placement group is still destroyed if its lifetime isn't `detached`. -.. tabbed:: Python +.. tab-set:: - .. literalinclude:: ../doc_code/placement_group_example.py - :language: python - :start-after: __get_pg_start__ - :end-before: __get_pg_end__ + .. tab-item:: Python -.. tabbed:: Java + .. literalinclude:: ../doc_code/placement_group_example.py + :language: python + :start-after: __get_pg_start__ + :end-before: __get_pg_end__ - .. code-block:: java + .. tab-item:: Java - // Create a placement group with a unique name. - Map bundle = ImmutableMap.of("CPU", 1.0); - List> bundles = ImmutableList.of(bundle); + .. code-block:: java - PlacementGroupCreationOptions options = - new PlacementGroupCreationOptions.Builder() - .setBundles(bundles) - .setStrategy(PlacementStrategy.STRICT_SPREAD) - .setName("global_name") - .build(); + // Create a placement group with a unique name. + Map bundle = ImmutableMap.of("CPU", 1.0); + List> bundles = ImmutableList.of(bundle); - PlacementGroup pg = PlacementGroups.createPlacementGroup(options); - pg.wait(60); + PlacementGroupCreationOptions options = + new PlacementGroupCreationOptions.Builder() + .setBundles(bundles) + .setStrategy(PlacementStrategy.STRICT_SPREAD) + .setName("global_name") + .build(); - ... + PlacementGroup pg = PlacementGroups.createPlacementGroup(options); + pg.wait(60); - // Retrieve the placement group later somewhere. - PlacementGroup group = PlacementGroups.getPlacementGroup("global_name"); - Assert.assertNotNull(group); + ... -.. tabbed:: C++ + // Retrieve the placement group later somewhere. + PlacementGroup group = PlacementGroups.getPlacementGroup("global_name"); + Assert.assertNotNull(group); - .. code-block:: c++ + .. tab-item:: C++ - // Create a placement group with a globally unique name. - std::vector> bundles{{{"CPU", 1.0}}}; + .. code-block:: c++ - ray::PlacementGroupCreationOptions options{ - true/*global*/, "global_name", bundles, ray::PlacementStrategy::STRICT_SPREAD}; + // Create a placement group with a globally unique name. + std::vector> bundles{{{"CPU", 1.0}}}; - ray::PlacementGroup pg = ray::CreatePlacementGroup(options); - pg.Wait(60); + ray::PlacementGroupCreationOptions options{ + true/*global*/, "global_name", bundles, ray::PlacementStrategy::STRICT_SPREAD}; - ... + ray::PlacementGroup pg = ray::CreatePlacementGroup(options); + pg.Wait(60); - // Retrieve the placement group later somewhere. - ray::PlacementGroup group = ray::GetGlobalPlacementGroup("global_name"); - assert(!group.Empty()); + ... - We also support non-global named placement group in C++, which means that the placement group name is only valid within the job and cannot be accessed from another job. + // Retrieve the placement group later somewhere. + ray::PlacementGroup group = ray::GetGlobalPlacementGroup("global_name"); + assert(!group.Empty()); - .. code-block:: c++ + We also support non-global named placement group in C++, which means that the placement group name is only valid within the job and cannot be accessed from another job. - // Create a placement group with a job-scope-unique name. - std::vector> bundles{{{"CPU", 1.0}}}; + .. code-block:: c++ - ray::PlacementGroupCreationOptions options{ - false/*non-global*/, "non_global_name", bundles, ray::PlacementStrategy::STRICT_SPREAD}; + // Create a placement group with a job-scope-unique name. + std::vector> bundles{{{"CPU", 1.0}}}; - ray::PlacementGroup pg = ray::CreatePlacementGroup(options); - pg.Wait(60); + ray::PlacementGroupCreationOptions options{ + false/*non-global*/, "non_global_name", bundles, ray::PlacementStrategy::STRICT_SPREAD}; - ... + ray::PlacementGroup pg = ray::CreatePlacementGroup(options); + pg.Wait(60); - // Retrieve the placement group later somewhere in the same job. - ray::PlacementGroup group = ray::GetPlacementGroup("non_global_name"); - assert(!group.Empty()); + ... + + // Retrieve the placement group later somewhere in the same job. + ray::PlacementGroup group = ray::GetPlacementGroup("non_global_name"); + assert(!group.Empty()); .. _placement-group-detached: @@ -659,16 +677,18 @@ By default, the lifetimes of placement groups belong to the driver and actor. To keep the placement group alive regardless of its job or detached actor, specify `lifetime="detached"`. For example: -.. tabbed:: Python +.. tab-set:: + + .. tab-item:: Python - .. literalinclude:: ../doc_code/placement_group_example.py - :language: python - :start-after: __detached_pg_start__ - :end-before: __detached_pg_end__ + .. literalinclude:: ../doc_code/placement_group_example.py + :language: python + :start-after: __detached_pg_start__ + :end-before: __detached_pg_end__ -.. tabbed:: Java + .. tab-item:: Java - The lifetime argument is not implemented for Java APIs yet. + The lifetime argument is not implemented for Java APIs yet. Let's terminate the current script and start a new Python script. Call ``ray list placement-groups``, and you can see the placement group is not removed. diff --git a/doc/source/ray-core/scheduling/resources.rst b/doc/source/ray-core/scheduling/resources.rst index e2455cc27032..8b4ffdfbc5f7 100644 --- a/doc/source/ray-core/scheduling/resources.rst +++ b/doc/source/ray-core/scheduling/resources.rst @@ -76,49 +76,51 @@ By default, logical resources are configured by the following rule. However, you can always override that by manually specifying the quantities of pre-defined resources and adding custom resources. There are several ways to do that depending on how you start the Ray cluster: -.. tabbed:: ray.init() +.. tab-set:: - If you are using :func:`ray.init() ` to start a single node Ray cluster, you can do the following to manually specify node resources: + .. tab-item:: ray.init() - .. literalinclude:: ../doc_code/resources.py - :language: python - :start-after: __specifying_node_resources_start__ - :end-before: __specifying_node_resources_end__ + If you are using :func:`ray.init() ` to start a single node Ray cluster, you can do the following to manually specify node resources: -.. tabbed:: ray start + .. literalinclude:: ../doc_code/resources.py + :language: python + :start-after: __specifying_node_resources_start__ + :end-before: __specifying_node_resources_end__ - If you are using :ref:`ray start ` to start a Ray node, you can run: + .. tab-item:: ray start - .. code-block:: shell + If you are using :ref:`ray start ` to start a Ray node, you can run: - ray start --head --num-cpus=3 --num-gpus=4 --resources='{"special_hardware": 1, "custom_label": 1}' + .. code-block:: shell -.. tabbed:: ray up + ray start --head --num-cpus=3 --num-gpus=4 --resources='{"special_hardware": 1, "custom_label": 1}' - If you are using :ref:`ray up ` to start a Ray cluster, you can set the :ref:`resources field ` in the yaml file: + .. tab-item:: ray up - .. code-block:: yaml + If you are using :ref:`ray up ` to start a Ray cluster, you can set the :ref:`resources field ` in the yaml file: - available_node_types: - head: - ... - resources: - CPU: 3 - GPU: 4 - special_hardware: 1 - custom_label: 1 + .. code-block:: yaml -.. tabbed:: KubeRay + available_node_types: + head: + ... + resources: + CPU: 3 + GPU: 4 + special_hardware: 1 + custom_label: 1 - If you are using :ref:`KubeRay ` to start a Ray cluster, you can set the :ref:`rayStartParams field ` in the yaml file: + .. tab-item:: KubeRay - .. code-block:: yaml + If you are using :ref:`KubeRay ` to start a Ray cluster, you can set the :ref:`rayStartParams field ` in the yaml file: - headGroupSpec: - rayStartParams: - num-cpus: "3" - num-gpus: "4" - resources: '"{\"special_hardware\": 1, \"custom_label\": 1}"' + .. code-block:: yaml + + headGroupSpec: + rayStartParams: + num-cpus: "3" + num-gpus: "4" + resources: '"{\"special_hardware\": 1, \"custom_label\": 1}"' .. _resource-requirements: @@ -139,30 +141,32 @@ If resources are specified explicitly, they are required for both scheduling and You can also explicitly specify a task's or actor's resource requirements (for example, one task may require a GPU) instead of using default ones via :func:`ray.remote() ` and :meth:`task.options() `/:meth:`actor.options() `. -.. tabbed:: Python +.. tab-set:: + + .. tab-item:: Python - .. literalinclude:: ../doc_code/resources.py - :language: python - :start-after: __specifying_resource_requirements_start__ - :end-before: __specifying_resource_requirements_end__ + .. literalinclude:: ../doc_code/resources.py + :language: python + :start-after: __specifying_resource_requirements_start__ + :end-before: __specifying_resource_requirements_end__ -.. tabbed:: Java + .. tab-item:: Java - .. code-block:: java + .. code-block:: java - // Specify required resources. - Ray.task(MyRayApp::myFunction).setResource("CPU", 1.0).setResource("GPU", 1.0).setResource("special_hardware", 1.0).remote(); + // Specify required resources. + Ray.task(MyRayApp::myFunction).setResource("CPU", 1.0).setResource("GPU", 1.0).setResource("special_hardware", 1.0).remote(); - Ray.actor(Counter::new).setResource("CPU", 2.0).setResource("GPU", 1.0).remote(); + Ray.actor(Counter::new).setResource("CPU", 2.0).setResource("GPU", 1.0).remote(); -.. tabbed:: C++ + .. tab-item:: C++ - .. code-block:: c++ + .. code-block:: c++ - // Specify required resources. - ray::Task(MyFunction).SetResource("CPU", 1.0).SetResource("GPU", 1.0).SetResource("special_hardware", 1.0).Remote(); + // Specify required resources. + ray::Task(MyFunction).SetResource("CPU", 1.0).SetResource("GPU", 1.0).SetResource("special_hardware", 1.0).Remote(); - ray::Actor(CreateCounter).SetResource("CPU", 2.0).SetResource("GPU", 1.0).Remote(); + ray::Actor(CreateCounter).SetResource("CPU", 2.0).SetResource("GPU", 1.0).Remote(); Task and actor resource requirements have implications for the Ray's scheduling concurrency. In particular, the sum of the resource requirements of all of the diff --git a/doc/source/ray-core/tasks.rst b/doc/source/ray-core/tasks.rst index 429f7f715445..2a5177dcee41 100644 --- a/doc/source/ray-core/tasks.rst +++ b/doc/source/ray-core/tasks.rst @@ -5,79 +5,81 @@ Tasks Ray enables arbitrary functions to be executed asynchronously on separate Python workers. Such functions are called **Ray remote functions** and their asynchronous invocations are called **Ray tasks**. Here is an example. -.. tabbed:: Python +.. tab-set:: - .. literalinclude:: doc_code/tasks.py - :language: python - :start-after: __tasks_start__ - :end-before: __tasks_end__ + .. tab-item:: Python - See the `ray.remote package reference `__ page for specific documentation on how to use ``ray.remote``. + .. literalinclude:: doc_code/tasks.py + :language: python + :start-after: __tasks_start__ + :end-before: __tasks_end__ -.. tabbed:: Java + See the `ray.remote package reference `__ page for specific documentation on how to use ``ray.remote``. - .. code-block:: java + .. tab-item:: Java - public class MyRayApp { - // A regular Java static method. - public static int myFunction() { - return 1; - } - } + .. code-block:: java - // Invoke the above method as a Ray task. - // This will immediately return an object ref (a future) and then create - // a task that will be executed on a worker process. - ObjectRef res = Ray.task(MyRayApp::myFunction).remote(); + public class MyRayApp { + // A regular Java static method. + public static int myFunction() { + return 1; + } + } + + // Invoke the above method as a Ray task. + // This will immediately return an object ref (a future) and then create + // a task that will be executed on a worker process. + ObjectRef res = Ray.task(MyRayApp::myFunction).remote(); - // The result can be retrieved with ``ObjectRef::get``. - Assert.assertTrue(res.get() == 1); + // The result can be retrieved with ``ObjectRef::get``. + Assert.assertTrue(res.get() == 1); - public class MyRayApp { - public static int slowFunction() throws InterruptedException { - TimeUnit.SECONDS.sleep(10); - return 1; - } - } + public class MyRayApp { + public static int slowFunction() throws InterruptedException { + TimeUnit.SECONDS.sleep(10); + return 1; + } + } - // Ray tasks are executed in parallel. - // All computation is performed in the background, driven by Ray's internal event loop. - for(int i = 0; i < 4; i++) { - // This doesn't block. - Ray.task(MyRayApp::slowFunction).remote(); - } + // Ray tasks are executed in parallel. + // All computation is performed in the background, driven by Ray's internal event loop. + for(int i = 0; i < 4; i++) { + // This doesn't block. + Ray.task(MyRayApp::slowFunction).remote(); + } -.. tabbed:: C++ + .. tab-item:: C++ - .. code-block:: c++ + .. code-block:: c++ - // A regular C++ function. - int MyFunction() { - return 1; - } - // Register as a remote function by `RAY_REMOTE`. - RAY_REMOTE(MyFunction); + // A regular C++ function. + int MyFunction() { + return 1; + } + // Register as a remote function by `RAY_REMOTE`. + RAY_REMOTE(MyFunction); - // Invoke the above method as a Ray task. - // This will immediately return an object ref (a future) and then create - // a task that will be executed on a worker process. - auto res = ray::Task(MyFunction).Remote(); + // Invoke the above method as a Ray task. + // This will immediately return an object ref (a future) and then create + // a task that will be executed on a worker process. + auto res = ray::Task(MyFunction).Remote(); - // The result can be retrieved with ``ray::ObjectRef::Get``. - assert(*res.Get() == 1); + // The result can be retrieved with ``ray::ObjectRef::Get``. + assert(*res.Get() == 1); - int SlowFunction() { - std::this_thread::sleep_for(std::chrono::seconds(10)); - return 1; - } - RAY_REMOTE(SlowFunction); + int SlowFunction() { + std::this_thread::sleep_for(std::chrono::seconds(10)); + return 1; + } + RAY_REMOTE(SlowFunction); - // Ray tasks are executed in parallel. - // All computation is performed in the background, driven by Ray's internal event loop. - for(int i = 0; i < 4; i++) { - // This doesn't block. - ray::Task(SlowFunction).Remote(); - } + // Ray tasks are executed in parallel. + // All computation is performed in the background, driven by Ray's internal event loop. + for(int i = 0; i < 4; i++) { + // This doesn't block. + ray::Task(SlowFunction).Remote(); + } Specifying required resources @@ -85,26 +87,28 @@ Specifying required resources You can specify resource requirements in tasks (see :ref:`resource-requirements` for more details.) -.. tabbed:: Python +.. tab-set:: - .. literalinclude:: doc_code/tasks.py - :language: python - :start-after: __resource_start__ - :end-before: __resource_end__ + .. tab-item:: Python -.. tabbed:: Java + .. literalinclude:: doc_code/tasks.py + :language: python + :start-after: __resource_start__ + :end-before: __resource_end__ - .. code-block:: java + .. tab-item:: Java - // Specify required resources. - Ray.task(MyRayApp::myFunction).setResource("CPU", 4.0).setResource("GPU", 2.0).remote(); + .. code-block:: java -.. tabbed:: C++ + // Specify required resources. + Ray.task(MyRayApp::myFunction).setResource("CPU", 4.0).setResource("GPU", 2.0).remote(); - .. code-block:: c++ + .. tab-item:: C++ - // Specify required resources. - ray::Task(MyFunction).SetResource("CPU", 4.0).SetResource("GPU", 2.0).Remote(); + .. code-block:: c++ + + // Specify required resources. + ray::Task(MyFunction).SetResource("CPU", 4.0).SetResource("GPU", 2.0).Remote(); .. _ray-object-refs: @@ -113,45 +117,47 @@ Passing object refs to Ray tasks In addition to values, `Object refs `__ can also be passed into remote functions. When the task gets executed, inside the function body **the argument will be the underlying value**. For example, take this function: -.. tabbed:: Python +.. tab-set:: - .. literalinclude:: doc_code/tasks.py - :language: python - :start-after: __pass_by_ref_start__ - :end-before: __pass_by_ref_end__ + .. tab-item:: Python -.. tabbed:: Java + .. literalinclude:: doc_code/tasks.py + :language: python + :start-after: __pass_by_ref_start__ + :end-before: __pass_by_ref_end__ - .. code-block:: java + .. tab-item:: Java - public class MyRayApp { - public static int functionWithAnArgument(int value) { - return value + 1; + .. code-block:: java + + public class MyRayApp { + public static int functionWithAnArgument(int value) { + return value + 1; + } } - } - ObjectRef objRef1 = Ray.task(MyRayApp::myFunction).remote(); - Assert.assertTrue(objRef1.get() == 1); + ObjectRef objRef1 = Ray.task(MyRayApp::myFunction).remote(); + Assert.assertTrue(objRef1.get() == 1); - // You can pass an object ref as an argument to another Ray task. - ObjectRef objRef2 = Ray.task(MyRayApp::functionWithAnArgument, objRef1).remote(); - Assert.assertTrue(objRef2.get() == 2); + // You can pass an object ref as an argument to another Ray task. + ObjectRef objRef2 = Ray.task(MyRayApp::functionWithAnArgument, objRef1).remote(); + Assert.assertTrue(objRef2.get() == 2); -.. tabbed:: C++ + .. tab-item:: C++ - .. code-block:: c++ + .. code-block:: c++ - static int FunctionWithAnArgument(int value) { - return value + 1; - } - RAY_REMOTE(FunctionWithAnArgument); + static int FunctionWithAnArgument(int value) { + return value + 1; + } + RAY_REMOTE(FunctionWithAnArgument); - auto obj_ref1 = ray::Task(MyFunction).Remote(); - assert(*obj_ref1.Get() == 1); + auto obj_ref1 = ray::Task(MyFunction).Remote(); + assert(*obj_ref1.Get() == 1); - // You can pass an object ref as an argument to another Ray task. - auto obj_ref2 = ray::Task(FunctionWithAnArgument).Remote(obj_ref1); - assert(*obj_ref2.Get() == 2); + // You can pass an object ref as an argument to another Ray task. + auto obj_ref2 = ray::Task(FunctionWithAnArgument).Remote(obj_ref1); + assert(*obj_ref2.Get() == 2); Note the following behaviors: @@ -167,26 +173,28 @@ Calling **ray.get** on Ray task results will block until the task finished execu finished executing without blocking on all of them. This could be achieved by :func:`ray.wait() `. The function works as follows. -.. tabbed:: Python +.. tab-set:: + + .. tab-item:: Python - .. literalinclude:: doc_code/tasks.py - :language: python - :start-after: __wait_start__ - :end-before: __wait_end__ + .. literalinclude:: doc_code/tasks.py + :language: python + :start-after: __wait_start__ + :end-before: __wait_end__ -.. tabbed:: Java + .. tab-item:: Java - .. code-block:: java + .. code-block:: java - WaitResult waitResult = Ray.wait(objectRefs, /*num_returns=*/0, /*timeoutMs=*/1000); - System.out.println(waitResult.getReady()); // List of ready objects. - System.out.println(waitResult.getUnready()); // list of unready objects. + WaitResult waitResult = Ray.wait(objectRefs, /*num_returns=*/0, /*timeoutMs=*/1000); + System.out.println(waitResult.getReady()); // List of ready objects. + System.out.println(waitResult.getUnready()); // list of unready objects. -.. tabbed:: C++ + .. tab-item:: C++ - .. code-block:: c++ + .. code-block:: c++ - ray::WaitResult wait_result = ray::Wait(object_refs, /*num_objects=*/0, /*timeout_ms=*/1000); + ray::WaitResult wait_result = ray::Wait(object_refs, /*num_objects=*/0, /*timeout_ms=*/1000); .. _ray-task-returns: @@ -195,21 +203,25 @@ Multiple returns By default, a Ray task only returns a single Object Ref. However, you can configure Ray tasks to return multiple Object Refs, by setting the ``num_returns`` option. -.. tabbed:: Python +.. tab-set:: - .. literalinclude:: doc_code/tasks.py - :language: python - :start-after: __multiple_returns_start__ - :end-before: __multiple_returns_end__ + .. tab-item:: Python + + .. literalinclude:: doc_code/tasks.py + :language: python + :start-after: __multiple_returns_start__ + :end-before: __multiple_returns_end__ For tasks that return multiple objects, Ray also supports remote generators that allow a task to return one object at a time to reduce memory usage at the worker. Ray also supports an option to set the number of return values dynamically, which can be useful when the task caller does not know how many return values to expect. See the :ref:`user guide ` for more details on use cases. -.. tabbed:: Python +.. tab-set:: + + .. tab-item:: Python - .. literalinclude:: doc_code/tasks.py - :language: python - :start-after: __generator_start__ - :end-before: __generator_end__ + .. literalinclude:: doc_code/tasks.py + :language: python + :start-after: __generator_start__ + :end-before: __generator_end__ Cancelling tasks @@ -217,12 +229,14 @@ Cancelling tasks Ray tasks can be canceled by calling :func:`ray.cancel() ` on the returned Object ref. -.. tabbed:: Python +.. tab-set:: + + .. tab-item:: Python - .. literalinclude:: doc_code/tasks.py - :language: python - :start-after: __cancel_start__ - :end-before: __cancel_end__ + .. literalinclude:: doc_code/tasks.py + :language: python + :start-after: __cancel_start__ + :end-before: __cancel_end__ Scheduling diff --git a/doc/source/rllib/key-concepts.rst b/doc/source/rllib/key-concepts.rst index a5743ab9fc30..a43ccbd1435b 100644 --- a/doc/source/rllib/key-concepts.rst +++ b/doc/source/rllib/key-concepts.rst @@ -63,41 +63,43 @@ can use Ray Tune to tune hyperparameters of your reinforcement learning algorith The following example shows three equivalent ways of interacting with ``PPO``, which implements the proximal policy optimization algorithm in RLlib. -.. tabbed:: Basic RLlib Algorithm +.. tab-set:: - .. code-block:: python + .. tab-item:: Basic RLlib Algorithm - # Configure. - from ray.rllib.algorithms.ppo import PPOConfig - config = PPOConfig().environment(env="CartPole-v1").training(train_batch_size=4000) + .. code-block:: python - # Build. - algo = config.build() + # Configure. + from ray.rllib.algorithms.ppo import PPOConfig + config = PPOConfig().environment(env="CartPole-v1").training(train_batch_size=4000) - # Train. - while True: - print(algo.train()) + # Build. + algo = config.build() + # Train. + while True: + print(algo.train()) -.. tabbed:: RLlib Algorithms and Tune - .. code-block:: python + .. tab-item:: RLlib Algorithms and Tune - from ray import tune + .. code-block:: python - # Configure. - from ray.rllib.algorithms.ppo import PPOConfig - config = PPOConfig().environment(env="CartPole-v1").training(train_batch_size=4000) + from ray import tune - # Train via Ray Tune. - tune.run("PPO", config=config) + # Configure. + from ray.rllib.algorithms.ppo import PPOConfig + config = PPOConfig().environment(env="CartPole-v1").training(train_batch_size=4000) + # Train via Ray Tune. + tune.run("PPO", config=config) -.. tabbed:: RLlib Command Line - .. code-block:: bash + .. tab-item:: RLlib Command Line - rllib train --run=PPO --env=CartPole-v1 --config='{"train_batch_size": 4000}' + .. code-block:: bash + + rllib train --run=PPO --env=CartPole-v1 --config='{"train_batch_size": 4000}' RLlib `Algorithm classes `__ coordinate the distributed workflow of running rollouts and optimizing policies. diff --git a/doc/source/rllib/rllib-connector.rst b/doc/source/rllib/rllib-connector.rst index f0a78e931689..24fe81eb89f9 100644 --- a/doc/source/rllib/rllib-connector.rst +++ b/doc/source/rllib/rllib-connector.rst @@ -152,62 +152,66 @@ Lambda Connector helps turn simple transformation functions into agent or action connectors without having users worry about the high-level list or non-list APIs. Lambda Connector has separate agent and action versions, for example: -.. tabbed:: Lambda Agent Connector +.. tab-set:: - .. code-block:: python + .. tab-item:: Lambda Agent Connector - # An example agent connector that filters INFOS column out of - # observation data. - def filter(d: ActionConnectorDataType): - del d.data[SampleBatch.INFOS] - return d + .. code-block:: python - FilterInfosColumnAgentConnector = register_lambda_agent_connector( - "FilterInfosColumnAgentConnector", filter - ) + # An example agent connector that filters INFOS column out of + # observation data. + def filter(d: ActionConnectorDataType): + del d.data[SampleBatch.INFOS] + return d -.. tabbed:: Lambda Action Connector + FilterInfosColumnAgentConnector = register_lambda_agent_connector( + "FilterInfosColumnAgentConnector", filter + ) - .. code-block:: python + .. tab-item:: Lambda Action Connector - # An example action connector that scales actions output by the - # policy by a factor of 2. - ScaleActionConnector = register_lambda_action_connector( - "ScaleActionConnector", - lambda actions, states, fetches: 2 * actions, states, fetches - ) + .. code-block:: python + + # An example action connector that scales actions output by the + # policy by a factor of 2. + ScaleActionConnector = register_lambda_action_connector( + "ScaleActionConnector", + lambda actions, states, fetches: 2 * actions, states, fetches + ) Multiple connectors can be composed into a ``ConnectorPipeline``, which handles proper running of all children connectors in sequence and provides basic operations to modify and update the composition of connectors. ``ConnectorPipeline`` also has agent and action versions: -.. tabbed:: AgentConnectorPipeline +.. tab-set:: + + .. tab-item:: AgentConnectorPipeline - .. code-block:: python + .. code-block:: python - # Example construction of an AgentConnectorPipeline. - pipeline = ActionConnectorPipeline( - ctx, - [ClipRewardAgentConnector(), ViewRequirementAgentConnector()] - ) + # Example construction of an AgentConnectorPipeline. + pipeline = ActionConnectorPipeline( + ctx, + [ClipRewardAgentConnector(), ViewRequirementAgentConnector()] + ) - # For demonstration purpose, we will add an ObsPreprocessorConnector - # in front of the ViewRequirementAgentConnector. - pipeline.insert_before("ViewRequirementAgentConnector", ObsPreprocessorConnector()) + # For demonstration purpose, we will add an ObsPreprocessorConnector + # in front of the ViewRequirementAgentConnector. + pipeline.insert_before("ViewRequirementAgentConnector", ObsPreprocessorConnector()) -.. tabbed:: Action Lambda Connector + .. tab-item:: Action Lambda Connector - .. code-block:: python + .. code-block:: python - # Example construction of an ActionConnectorPipeline. - pipeline = ActionConnectorPipeline( - ctx, - [ConvertToNumpyConnector(), ClipActionsConnector(), ImmutableActionsConnector()] - ) + # Example construction of an ActionConnectorPipeline. + pipeline = ActionConnectorPipeline( + ctx, + [ConvertToNumpyConnector(), ClipActionsConnector(), ImmutableActionsConnector()] + ) - # For demonstration purpose, we will drop the last ImmutableActionsConnector here. - pipeline.remove("ImmutableActionsConnector") + # For demonstration purpose, we will drop the last ImmutableActionsConnector here. + pipeline.remove("ImmutableActionsConnector") diff --git a/doc/source/rllib/rllib-rlmodule.rst b/doc/source/rllib/rllib-rlmodule.rst index 62bddda34053..1c498df3f9a3 100644 --- a/doc/source/rllib/rllib-rlmodule.rst +++ b/doc/source/rllib/rllib-rlmodule.rst @@ -73,43 +73,46 @@ The RLModule API provides a unified way to define custom reinforcement learning To maintain consistency and usability, RLlib offers a standardized approach for defining module objects for both single-agent and multi-agent reinforcement learning environments. This is achieved through the :py:class:`~ray.rllib.core.rl_module.rl_module.SingleAgentRLModuleSpec` and :py:class:`~ray.rllib.core.rl_module.marl_module.MultiAgentRLModuleSpec` classes. The built-in RLModules in RLlib follow this consistent design pattern, making it easier for you to understand and utilize these modules. -.. tabbed:: Single Agent +.. tab-set:: - .. literalinclude:: doc_code/rlmodule_guide.py - :language: python - :start-after: __constructing-rlmodules-sa-begin__ - :end-before: __constructing-rlmodules-sa-end__ + .. tab-item:: Single Agent + .. literalinclude:: doc_code/rlmodule_guide.py + :language: python + :start-after: __constructing-rlmodules-sa-begin__ + :end-before: __constructing-rlmodules-sa-end__ -.. tabbed:: Multi Agent - .. literalinclude:: doc_code/rlmodule_guide.py - :language: python - :start-after: __constructing-rlmodules-ma-begin__ - :end-before: __constructing-rlmodules-ma-end__ + .. tab-item:: Multi Agent + + .. literalinclude:: doc_code/rlmodule_guide.py + :language: python + :start-after: __constructing-rlmodules-ma-begin__ + :end-before: __constructing-rlmodules-ma-end__ You can pass RL Module specs to the algorithm configuration to be used by the algorithm. -.. tabbed:: Single Agent +.. tab-set:: - .. literalinclude:: doc_code/rlmodule_guide.py - :language: python - :start-after: __pass-specs-to-configs-sa-begin__ - :end-before: __pass-specs-to-configs-sa-end__ + .. tab-item:: Single Agent + .. literalinclude:: doc_code/rlmodule_guide.py + :language: python + :start-after: __pass-specs-to-configs-sa-begin__ + :end-before: __pass-specs-to-configs-sa-end__ - .. note:: - For passing RL Module specs, all fields do not have to be filled as they are filled based on the described environment or other algorithm configuration parameters (i.e. ,``observation_space``, ``action_space``, ``model_config_dict`` are not required fields when passing a custom RL Module spec to the algorithm config.) + .. note:: + For passing RL Module specs, all fields do not have to be filled as they are filled based on the described environment or other algorithm configuration parameters (i.e. ,``observation_space``, ``action_space``, ``model_config_dict`` are not required fields when passing a custom RL Module spec to the algorithm config.) -.. tabbed:: Multi Agent - .. literalinclude:: doc_code/rlmodule_guide.py - :language: python - :start-after: __pass-specs-to-configs-ma-begin__ - :end-before: __pass-specs-to-configs-ma-end__ + .. tab-item:: Multi Agent + .. literalinclude:: doc_code/rlmodule_guide.py + :language: python + :start-after: __pass-specs-to-configs-ma-begin__ + :end-before: __pass-specs-to-configs-ma-end__ Writing Custom Single Agent RL Modules @@ -146,20 +149,22 @@ Also the class's constrcutor requires a dataclass config object called `~ray.rll When writing RL Modules, you need to use these fields to construct your model. -.. tabbed:: Single Agent (torch) +.. tab-set:: + + .. tab-item:: Single Agent (torch) + + .. literalinclude:: doc_code/rlmodule_guide.py + :language: python + :start-after: __write-custom-sa-rlmodule-torch-begin__ + :end-before: __write-custom-sa-rlmodule-torch-end__ - .. literalinclude:: doc_code/rlmodule_guide.py - :language: python - :start-after: __write-custom-sa-rlmodule-torch-begin__ - :end-before: __write-custom-sa-rlmodule-torch-end__ + .. tab-item:: Single Agent (tensorflow) -.. tabbed:: Single Agent (tensorflow) - - .. literalinclude:: doc_code/rlmodule_guide.py - :language: python - :start-after: __write-custom-sa-rlmodule-tf-begin__ - :end-before: __write-custom-sa-rlmodule-tf-end__ + .. literalinclude:: doc_code/rlmodule_guide.py + :language: python + :start-after: __write-custom-sa-rlmodule-tf-begin__ + :end-before: __write-custom-sa-rlmodule-tf-end__ In :py:class:`~ray.rllib.core.rl_module.rl_module.RLModule` you can enforce the checking for the existence of certain input or output keys in the data that is communicated into and out of RL Modules. This serves multiple purposes: @@ -167,35 +172,37 @@ In :py:class:`~ray.rllib.core.rl_module.rl_module.RLModule` you can enforce the - For the I/O requirement of each method to be self-documenting. - For failures to happen quickly. If users extend the modules and implement something that does not match the assumptions of the I/O specs, the check reports missing keys and their expected format. For example, RLModule should always have an ``obs`` key in the input batch and an ``action_dist`` key in the output. -.. tabbed:: Single Level Keys - - .. literalinclude:: doc_code/rlmodule_guide.py - :language: python - :start-after: __extend-spec-checking-single-level-begin__ - :end-before: __extend-spec-checking-single-level-end__ +.. tab-set:: + + .. tab-item:: Single Level Keys + + .. literalinclude:: doc_code/rlmodule_guide.py + :language: python + :start-after: __extend-spec-checking-single-level-begin__ + :end-before: __extend-spec-checking-single-level-end__ -.. tabbed:: Nested Keys + .. tab-item:: Nested Keys - .. literalinclude:: doc_code/rlmodule_guide.py - :language: python - :start-after: __extend-spec-checking-nested-begin__ - :end-before: __extend-spec-checking-nested-end__ + .. literalinclude:: doc_code/rlmodule_guide.py + :language: python + :start-after: __extend-spec-checking-nested-begin__ + :end-before: __extend-spec-checking-nested-end__ -.. tabbed:: TensorShape Spec + .. tab-item:: TensorShape Spec - .. literalinclude:: doc_code/rlmodule_guide.py - :language: python - :start-after: __extend-spec-checking-torch-specs-begin__ - :end-before: __extend-spec-checking-torch-specs-end__ + .. literalinclude:: doc_code/rlmodule_guide.py + :language: python + :start-after: __extend-spec-checking-torch-specs-begin__ + :end-before: __extend-spec-checking-torch-specs-end__ -.. tabbed:: Type Spec + .. tab-item:: Type Spec - .. literalinclude:: doc_code/rlmodule_guide.py - :language: python - :start-after: __extend-spec-checking-type-specs-begin__ - :end-before: __extend-spec-checking-type-specs-end__ + .. literalinclude:: doc_code/rlmodule_guide.py + :language: python + :start-after: __extend-spec-checking-type-specs-begin__ + :end-before: __extend-spec-checking-type-specs-end__ :py:class:`~ray.rllib.core.rl_module.rl_module.RLModule` has two methods for each forward method, totaling 6 methods that can be override to describe the specs of the input and output of each method: @@ -220,12 +227,14 @@ The :py:class:`~ray.rllib.core.rl_module.marl_module.MultiAgentRLModule` offers The following example creates a custom multi-agent RL module with underlying modules. The modules share an encoder, which gets applied to the global part of the observations space. The local part passes through a separate encoder, specific to each policy. -.. tabbed:: Multi agent with shared encoder (Torch) +.. tab-set:: - .. literalinclude:: doc_code/rlmodule_guide.py - :language: python - :start-after: __write-custom-marlmodule-shared-enc-begin__ - :end-before: __write-custom-marlmodule-shared-enc-end__ + .. tab-item:: Multi agent with shared encoder (Torch) + + .. literalinclude:: doc_code/rlmodule_guide.py + :language: python + :start-after: __write-custom-marlmodule-shared-enc-begin__ + :end-before: __write-custom-marlmodule-shared-enc-end__ To construct this custom multi-agent RL module, pass the class to the :py:class:`~ray.rllib.core.rl_module.marl_module.MultiAgentRLModuleSpec` constructor. Also, pass the :py:class:`~ray.rllib.core.rl_module.rl_module.SingleAgentRLModuleSpec` for each agent because RLlib requires the observation, action spaces, and model hyper-parameters for each agent. @@ -243,44 +252,46 @@ RLlib provides a number of RL Modules for different frameworks (e.g., PyTorch, T There are two possible ways to extend existing RL Modules: -.. tabbed:: Inheriting existing RL Modules +.. tab-set:: - One way to extend existing RL Modules is to inherit from them and override the methods you need to customize. For example, extend :py:class:`~ray.rllib.algorithms.ppo.torch.ppo_torch_rl_module.PPOTorchRLModule` and augment it with your own customization. Then pass the new customized class into the algorithm configuration to use the PPO algorithm to optimize your custom RL Module. + .. tab-item:: Inheriting existing RL Modules - .. code-block:: python + One way to extend existing RL Modules is to inherit from them and override the methods you need to customize. For example, extend :py:class:`~ray.rllib.algorithms.ppo.torch.ppo_torch_rl_module.PPOTorchRLModule` and augment it with your own customization. Then pass the new customized class into the algorithm configuration to use the PPO algorithm to optimize your custom RL Module. - class MyPPORLModule(PPORLModule): - - def __init__(self, config: RLModuleConfig): - super().__init__(config) - ... + .. code-block:: python + + class MyPPORLModule(PPORLModule): + + def __init__(self, config: RLModuleConfig): + super().__init__(config) + ... - # Pass in the custom RL Module class to the spec - algo_config = algo_config.rl_module( - rl_module_spec=SingleAgentRLModuleSpec(module_class=MyPPORLModule) - ) + # Pass in the custom RL Module class to the spec + algo_config = algo_config.rl_module( + rl_module_spec=SingleAgentRLModuleSpec(module_class=MyPPORLModule) + ) - -.. tabbed:: Extending RL Module Catalog - Another way to customize your module is by extending its :py:class:`~ray.rllib.core.models.catalog.Catalog`. The :py:class:`~ray.rllib.core.models.catalog.Catalog` is a component that defines the default architecture and behavior of a model based on factors such as ``observation_space``, ``action_space``, etc. To modify sub-components of an existing RL Module, extend the corresponding Catalog class. + .. tab-item:: Extending RL Module Catalog - For instance, to adapt the existing ``PPORLModule`` for a custom graph observation space not supported by RLlib out-of-the-box, extend the :py:class:`~ray.rllib.core.models.catalog.Catalog` class used to create the ``PPORLModule`` and override the method responsible for returning the encoder component to ensure that your custom encoder replaces the default one initially provided by RLlib. For more information on the :py:class:`~ray.rllib.core.models.catalog.Catalog` class, refer to the `Catalog user guide `__. + Another way to customize your module is by extending its :py:class:`~ray.rllib.core.models.catalog.Catalog`. The :py:class:`~ray.rllib.core.models.catalog.Catalog` is a component that defines the default architecture and behavior of a model based on factors such as ``observation_space``, ``action_space``, etc. To modify sub-components of an existing RL Module, extend the corresponding Catalog class. + For instance, to adapt the existing ``PPORLModule`` for a custom graph observation space not supported by RLlib out-of-the-box, extend the :py:class:`~ray.rllib.core.models.catalog.Catalog` class used to create the ``PPORLModule`` and override the method responsible for returning the encoder component to ensure that your custom encoder replaces the default one initially provided by RLlib. For more information on the :py:class:`~ray.rllib.core.models.catalog.Catalog` class, refer to the `Catalog user guide `__. - .. code-block:: python - class MyAwesomeCatalog(PPOCatalog): + .. code-block:: python - def get_actor_critic_encoder_config(): - # create your awesome graph encoder here and return it - pass - + class MyAwesomeCatalog(PPOCatalog): - # Pass in the custom catalog class to the spec - algo_config = algo_config.rl_module( - rl_module_spec=SingleAgentRLModuleSpec(catalog_class=MyAwesomeCatalog) - ) + def get_actor_critic_encoder_config(): + # create your awesome graph encoder here and return it + pass + + + # Pass in the custom catalog class to the spec + algo_config = algo_config.rl_module( + rl_module_spec=SingleAgentRLModuleSpec(catalog_class=MyAwesomeCatalog) + ) Migrating from Custom Policies and Models to RL Modules @@ -293,150 +304,154 @@ In the new `~ray.rllib.core.rl_module.rl_module.RLModule` API the construction o What your customization could have looked like before: -.. tabbed:: ModelV2 +.. tab-set:: - .. code-block:: python - - from ray.rllib.models.torch.torch_modelv2 import TorchModelV2 - from ray.rllib.policy.torch_policy_v2 import TorchPolicyV2 + .. tab-item:: ModelV2 + .. code-block:: python - class MyCustomModel(TorchModelV2): - """Code for your previous custom model""" - ... + from ray.rllib.models.torch.torch_modelv2 import TorchModelV2 + from ray.rllib.policy.torch_policy_v2 import TorchPolicyV2 - class CustomPolicy(TorchPolicyV2): + class MyCustomModel(TorchModelV2): + """Code for your previous custom model""" + ... - @DeveloperAPI - @OverrideToImplementCustomLogic - def make_model(self) -> ModelV2: - """Create model. - Note: only one of make_model or make_model_and_action_dist - can be overridden. + class CustomPolicy(TorchPolicyV2): - Returns: - ModelV2 model. - """ - return MyCustomModel(...) + @DeveloperAPI + @OverrideToImplementCustomLogic + def make_model(self) -> ModelV2: + """Create model. + Note: only one of make_model or make_model_and_action_dist + can be overridden. -.. tabbed:: ModelV2 + Distribution + Returns: + ModelV2 model. + """ + return MyCustomModel(...) - .. code-block:: python - - from ray.rllib.models.torch.torch_modelv2 import TorchModelV2 - from ray.rllib.policy.torch_policy_v2 import TorchPolicyV2 + .. tab-item:: ModelV2 + Distribution - class MyCustomModel(TorchModelV2): - """Code for your previous custom model""" - ... + .. code-block:: python + from ray.rllib.models.torch.torch_modelv2 import TorchModelV2 + from ray.rllib.policy.torch_policy_v2 import TorchPolicyV2 - class CustomPolicy(TorchPolicyV2): - @DeveloperAPI - @OverrideToImplementCustomLogic - def make_model_and_action_dist(self): - """Create model and action distribution function. + class MyCustomModel(TorchModelV2): + """Code for your previous custom model""" + ... - Returns: - ModelV2 model. - ActionDistribution class. - """ - my_model = MyCustomModel(...) # construct some ModelV2 instance here - dist_class = ... # Action distribution cls - - return my_model, dist_class - - -.. tabbed:: Sampler functions - - .. code-block:: python - - from ray.rllib.models.torch.torch_modelv2 import TorchModelV2 - from ray.rllib.policy.torch_policy_v2 import TorchPolicyV2 - - class CustomPolicy(TorchPolicyV2): - - @DeveloperAPI - @OverrideToImplementCustomLogic - def action_sampler_fn( - self, - model: ModelV2, - *, - obs_batch: TensorType, - state_batches: TensorType, - **kwargs, - ) -> Tuple[TensorType, TensorType, TensorType, List[TensorType]]: - """Custom function for sampling new actions given policy. - - Args: - model: Underlying model. - obs_batch: Observation tensor batch. - state_batches: Action sampling state batch. - - Returns: - Sampled action - Log-likelihood - Action distribution inputs - Updated state - """ - return None, None, None, None - - - @DeveloperAPI - @OverrideToImplementCustomLogic - def action_distribution_fn( - self, - model: ModelV2, - *, - obs_batch: TensorType, - state_batches: TensorType, - **kwargs, - ) -> Tuple[TensorType, type, List[TensorType]]: - """Action distribution function for this Policy. - - Args: - model: Underlying model. - obs_batch: Observation tensor batch. - state_batches: Action sampling state batch. - - Returns: - Distribution input. - ActionDistribution class. - State outs. - """ - return None, None, None + + class CustomPolicy(TorchPolicyV2): + + @DeveloperAPI + @OverrideToImplementCustomLogic + def make_model_and_action_dist(self): + """Create model and action distribution function. + + Returns: + ModelV2 model. + ActionDistribution class. + """ + my_model = MyCustomModel(...) # construct some ModelV2 instance here + dist_class = ... # Action distribution cls + + return my_model, dist_class + + + .. tab-item:: Sampler functions + + .. code-block:: python + + from ray.rllib.models.torch.torch_modelv2 import TorchModelV2 + from ray.rllib.policy.torch_policy_v2 import TorchPolicyV2 + + class CustomPolicy(TorchPolicyV2): + + @DeveloperAPI + @OverrideToImplementCustomLogic + def action_sampler_fn( + self, + model: ModelV2, + *, + obs_batch: TensorType, + state_batches: TensorType, + **kwargs, + ) -> Tuple[TensorType, TensorType, TensorType, List[TensorType]]: + """Custom function for sampling new actions given policy. + + Args: + model: Underlying model. + obs_batch: Observation tensor batch. + state_batches: Action sampling state batch. + + Returns: + Sampled action + Log-likelihood + Action distribution inputs + Updated state + """ + return None, None, None, None + + + @DeveloperAPI + @OverrideToImplementCustomLogic + def action_distribution_fn( + self, + model: ModelV2, + *, + obs_batch: TensorType, + state_batches: TensorType, + **kwargs, + ) -> Tuple[TensorType, type, List[TensorType]]: + """Action distribution function for this Policy. + + Args: + model: Underlying model. + obs_batch: Observation tensor batch. + state_batches: Action sampling state batch. + + Returns: + Distribution input. + ActionDistribution class. + State outs. + """ + return None, None, None All of the ``Policy.compute_***`` functions expect that `~ray.rllib.core.rl_module.rl_module.RLModule.forward_exploration` and `~ray.rllib.core.rl_module.rl_module.RLModule.forward_inference` return a dictionary that contains the key "action_dist" mapping to a ``ray.rllib.models.distributions.Distribution`` instance. Commonly used distribution implementations can be found under ``ray.rllib.models.tf.tf_distributions`` for tensorflow and ``ray.rllib.models.torch.torch_distributions`` for torch. You can choose to return determinstic actions, by creating a determinstic distribution instance. See `Writing Custom Single Agent RL Modules`_ for more details on how to implement your own custom RL Module. -.. tabbed:: The Equivalent RL Module +.. tab-set:: - .. code-block:: python + .. tab-item:: The Equivalent RL Module - """ - No need to override any policy functions. Simply instead implement any custom logic in your custom RL Module - """ - from ray.rllib.models.torch.torch_distributions import YOUR_DIST_CLASS + .. code-block:: python + """ + No need to override any policy functions. Simply instead implement any custom logic in your custom RL Module + """ + from ray.rllib.models.torch.torch_distributions import YOUR_DIST_CLASS - class MyRLModule(TorchRLModule): - def __init__(self, config: RLConfig): - # construct any custom networks here using config - # specify an action distribution class here - ... + class MyRLModule(TorchRLModule): - def _forward_inference(self, batch): - ... + def __init__(self, config: RLConfig): + # construct any custom networks here using config + # specify an action distribution class here + ... - def _forward_exploration(self, batch): - ... + def _forward_inference(self, batch): + ... + + def _forward_exploration(self, batch): + ... Notable TODOs diff --git a/doc/source/serve/index.md b/doc/source/serve/index.md index 4650ed3abb6b..ac86abc27c31 100644 --- a/doc/source/serve/index.md +++ b/doc/source/serve/index.md @@ -38,11 +38,14 @@ Define a simple "hello world" application, run it locally, and query it over HTT :language: python ``` -:::{tabbed} More examples +:::{tab-set} + +::::::{tab-item} More examples For more examples, select from the tabs. -::: +:::::: -:::{tabbed} Model composition +:::{tab-set} +:::{tab-item} Model composition Use Serve's model composition API to combine multiple deployments into a single application. @@ -51,16 +54,16 @@ Use Serve's model composition API to combine multiple deployments into a single ``` ::: -:::{tabbed} FastAPI integration +::::::{tab-item} FastAPI integration Use Serve's [FastAPI](https://fastapi.tiangolo.com/) integration to elegantly handle HTTP parsing and validation. ```{literalinclude} doc_code/fastapi_example.py :language: python ``` -::: +:::::: -:::{tabbed} Hugging Face Transformers model +::::::{tab-item} Hugging Face Transformers model To run this example, install the following: ``pip install transformers`` @@ -70,6 +73,8 @@ The model we'll use is a sentiment analysis model: it will take a text string as ```{literalinclude} doc_code/transformers_example.py :language: python ``` +:::::: + ::: ## Why choose Serve? diff --git a/doc/source/serve/model_composition.md b/doc/source/serve/model_composition.md index d07dcc68211e..93242ea66140 100644 --- a/doc/source/serve/model_composition.md +++ b/doc/source/serve/model_composition.md @@ -331,23 +331,27 @@ You can render an illustration of your deployment graph to see its nodes and the Make sure you have `pydot` and `graphviz` to follow this section: -::::{tabbed} MacOS +:::{tab-set} + +::::::{tab-item} MacOS ``` pip install -U pydot && brew install graphviz ``` -:::: +:::::: -::::{tabbed} Windows +::::{tab-item} Windows ``` pip install -U pydot && winget install graphviz ``` -:::: +:::::: -::::{tabbed} Linux +::::::{tab-item} Linux ``` pip install -U pydot && sudo apt-get install -y graphviz ``` -:::: +:::::: + +::: Here's an example graph: diff --git a/doc/source/serve/production-guide/fault-tolerance.md b/doc/source/serve/production-guide/fault-tolerance.md index eb4fbd837f41..5058c40da5aa 100644 --- a/doc/source/serve/production-guide/fault-tolerance.md +++ b/doc/source/serve/production-guide/fault-tolerance.md @@ -149,7 +149,9 @@ After adding the Redis objects, you also need to modify the `RayService` configu First, you need to update your `RayService` metadata's annotations: -::::{tabbed} Vanilla Config +:::{tab-set} + +::::{tab-item} Vanilla Config ```yaml ... apiVersion: ray.io/v1alpha1 @@ -161,7 +163,7 @@ spec: ``` :::: -::::{tabbed} Fault Tolerant Config +::::{tab-item} Fault Tolerant Config :selected: ```yaml ... @@ -176,6 +178,7 @@ spec: ... ``` :::: +::: The annotations are: * `ray.io/ft-enabled` (REQUIRED): Enables GCS fault tolerance when true @@ -183,7 +186,8 @@ The annotations are: Next, you need to add the `RAY_REDIS_ADDRESS` environment variable to the `headGroupSpec`: -::::{tabbed} Vanilla Config +:::{tab-set} +::::{tab-item} Vanilla Config ```yaml apiVersion: ray.io/v1alpha1 kind: RayService @@ -203,7 +207,7 @@ spec: ``` :::: -::::{tabbed} Fault Tolerant Config +::::{tab-item} Fault Tolerant Config :selected: ```yaml apiVersion: ray.io/v1alpha1 @@ -225,6 +229,7 @@ spec: value: redis:6379 ``` :::: +::: `RAY_REDIS_ADDRESS`'s value should be your Redis database's `redis://` address. It should contain your Redis database's host and port. An [example Redis address](https://www.iana.org/assignments/uri-schemes/prov/rediss) is `redis://user:secret@localhost:6379/0?foo=bar&qux=baz`. @@ -241,7 +246,8 @@ Check out the KubeRay guide on [GCS fault tolerance](https://ray-project.github. This section explains how Serve recovers from system failures. It uses the following Serve application and config as a working example. -::::{tabbed} Python Code +:::{tab-set} +::::{tab-item} Python Code ```{literalinclude} ../doc_code/fault_tolerance/sleepy_pid.py :start-after: __start__ :end-before: __end__ @@ -249,11 +255,12 @@ This section explains how Serve recovers from system failures. It uses the follo ``` :::: -::::{tabbed} Kubernetes Config +::::{tab-item} Kubernetes Config ```{literalinclude} ../doc_code/fault_tolerance/k8s_config.yaml :language: yaml ``` :::: +::: Follow the [KubeRay quickstart guide](kuberay-quickstart) to: * Install `kubectl` and `Helm` diff --git a/doc/source/serve/tutorials/gradio-dag-visualization.md b/doc/source/serve/tutorials/gradio-dag-visualization.md index 5e34ad62ddb7..d417a2b4d2da 100644 --- a/doc/source/serve/tutorials/gradio-dag-visualization.md +++ b/doc/source/serve/tutorials/gradio-dag-visualization.md @@ -12,24 +12,28 @@ pip install gradio ``` Additionally, you can optionally install `pydot` and `graphviz`. This will allow this tool to incorporate the complementary [graphical illustration](pydot-visualize-dag) of the nodes and edges. -::::{tabbed} MacOS + +:::{tab-set} + +::::{tab-item} MacOS ``` pip install -U pydot && brew install graphviz ``` :::: -::::{tabbed} Windows +::::{tab-item} Windows ``` pip install -U pydot && winget install graphviz ``` :::: -::::{tabbed} Linux +::::{tab-item} Linux ``` pip install -U pydot && sudo apt-get install -y graphviz ``` :::: +::: Also, for the [quickstart example](gradio-vis-quickstart), install the `transformers` module to pull models through [HuggingFace's Pipelines](https://huggingface.co/docs/transformers/main_classes/pipelines). ```console diff --git a/doc/source/serve/tutorials/serve-ml-models.md b/doc/source/serve/tutorials/serve-ml-models.md index 5ff6b1c2abf8..65c8144631ae 100644 --- a/doc/source/serve/tutorials/serve-ml-models.md +++ b/doc/source/serve/tutorials/serve-ml-models.md @@ -7,9 +7,9 @@ In this guide, we will show you how to train models from various machine learnin Please see the [Key Concepts](serve-key-concepts) to learn more general information about Ray Serve. +:::{tab-set} -::::{tabbed} Keras and Tensorflow - +::::{tab-item} Keras and Tensorflow Let's train and deploy a simple Tensorflow neural net. In particular, we will show: @@ -17,7 +17,7 @@ In particular, we will show: - How to train a Tensorflow model and load the model from your file system in your Ray Serve deployment. - How to parse the JSON request and make a prediction. -Ray Serve is framework agnostic -- you can use any version of Tensorflow. +Ray Serve is framework-agnostic -- you can use any version of Tensorflow. However, for this tutorial, we will use Tensorflow 2 and Keras. We will also need `requests` to send HTTP requests to your model deployment. If you haven't already, please install Tensorflow 2 and requests by running: ```console @@ -116,7 +116,7 @@ You should get an output like the following (the exact prediction may vary): ``` :::: -::::{tabbed} Pytorch +::::{tab-item} Pytorch Let's load and deploy a PyTorch Resnet Model. In particular, we will show: @@ -185,7 +185,7 @@ You should get an output like the following (the exact number may vary): ``` :::: -::::{tabbed} Scikit-Learn +::::{tab-item} Scikit-Learn Let's train and deploy a simple Scikit-Learn classifier. In particular, we will show: @@ -282,4 +282,6 @@ You should get an output like the following (the exact prediction may vary): ```python {"result": "versicolor"} ``` -:::: \ No newline at end of file +:::: + +::: \ No newline at end of file diff --git a/doc/source/train/dl_guide.rst b/doc/source/train/dl_guide.rst index b97fe2bdc0e9..9ee04e01240a 100644 --- a/doc/source/train/dl_guide.rst +++ b/doc/source/train/dl_guide.rst @@ -51,128 +51,130 @@ Updating your training function First, you'll want to update your training function to support distributed training. -.. tabbed:: PyTorch +.. tab-set:: - Ray Train will set up your distributed process group for you and also provides utility methods - to automatically prepare your model and data for distributed training. + .. tab-item:: PyTorch - .. note:: - Ray Train will still work even if you don't use the :func:`ray.train.torch.prepare_model` - and :func:`ray.train.torch.prepare_data_loader` utilities below, - and instead handle the logic directly inside your training function. + Ray Train will set up your distributed process group for you and also provides utility methods + to automatically prepare your model and data for distributed training. - First, use the :func:`~ray.train.torch.prepare_model` function to automatically move your model to the right device and wrap it in - ``DistributedDataParallel``: + .. note:: + Ray Train will still work even if you don't use the :func:`ray.train.torch.prepare_model` + and :func:`ray.train.torch.prepare_data_loader` utilities below, + and instead handle the logic directly inside your training function. - .. code-block:: diff + First, use the :func:`~ray.train.torch.prepare_model` function to automatically move your model to the right device and wrap it in + ``DistributedDataParallel``: - import torch - from torch.nn.parallel import DistributedDataParallel - +from ray.air import session - +from ray import train - +import ray.train.torch + .. code-block:: diff + import torch + from torch.nn.parallel import DistributedDataParallel + +from ray.air import session + +from ray import train + +import ray.train.torch - def train_func(): - - device = torch.device(f"cuda:{session.get_local_rank()}" if - - torch.cuda.is_available() else "cpu") - - torch.cuda.set_device(device) - # Create model. - model = NeuralNetwork() + def train_func(): + - device = torch.device(f"cuda:{session.get_local_rank()}" if + - torch.cuda.is_available() else "cpu") + - torch.cuda.set_device(device) - - model = model.to(device) - - model = DistributedDataParallel(model, - - device_ids=[session.get_local_rank()] if torch.cuda.is_available() else None) + # Create model. + model = NeuralNetwork() - + model = train.torch.prepare_model(model) + - model = model.to(device) + - model = DistributedDataParallel(model, + - device_ids=[session.get_local_rank()] if torch.cuda.is_available() else None) - ... - + + model = train.torch.prepare_model(model) + ... - Then, use the ``prepare_data_loader`` function to automatically add a ``DistributedSampler`` to your ``DataLoader`` - and move the batches to the right device. This step is not necessary if you are passing in Ray Data to your Trainer - (see :ref:`train-datasets`): - .. code-block:: diff - import torch - from torch.utils.data import DataLoader, DistributedSampler - +from ray.air import session - +from ray import train - +import ray.train.torch + Then, use the ``prepare_data_loader`` function to automatically add a ``DistributedSampler`` to your ``DataLoader`` + and move the batches to the right device. This step is not necessary if you are passing in Ray Data to your Trainer + (see :ref:`train-datasets`): + .. code-block:: diff - def train_func(): - - device = torch.device(f"cuda:{session.get_local_rank()}" if - - torch.cuda.is_available() else "cpu") - - torch.cuda.set_device(device) + import torch + from torch.utils.data import DataLoader, DistributedSampler + +from ray.air import session + +from ray import train + +import ray.train.torch - ... - - data_loader = DataLoader(my_dataset, batch_size=worker_batch_size, sampler=DistributedSampler(dataset)) + def train_func(): + - device = torch.device(f"cuda:{session.get_local_rank()}" if + - torch.cuda.is_available() else "cpu") + - torch.cuda.set_device(device) - + data_loader = DataLoader(my_dataset, batch_size=worker_batch_size) - + data_loader = train.torch.prepare_data_loader(data_loader) + ... - for X, y in data_loader: - - X = X.to_device(device) - - y = y.to_device(device) + - data_loader = DataLoader(my_dataset, batch_size=worker_batch_size, sampler=DistributedSampler(dataset)) - .. tip:: - Keep in mind that ``DataLoader`` takes in a ``batch_size`` which is the batch size for each worker. - The global batch size can be calculated from the worker batch size (and vice-versa) with the following equation: + + data_loader = DataLoader(my_dataset, batch_size=worker_batch_size) + + data_loader = train.torch.prepare_data_loader(data_loader) - .. code-block:: python - - global_batch_size = worker_batch_size * session.get_world_size() + for X, y in data_loader: + - X = X.to_device(device) + - y = y.to_device(device) + + .. tip:: + Keep in mind that ``DataLoader`` takes in a ``batch_size`` which is the batch size for each worker. + The global batch size can be calculated from the worker batch size (and vice-versa) with the following equation: + + .. code-block:: python -.. tabbed:: TensorFlow + global_batch_size = worker_batch_size * session.get_world_size() - .. note:: - The current TensorFlow implementation supports - ``MultiWorkerMirroredStrategy`` (and ``MirroredStrategy``). If there are - other strategies you wish to see supported by Ray Train, please let us know - by submitting a `feature request on GitHub `_. + .. tab-item:: TensorFlow - These instructions closely follow TensorFlow's `Multi-worker training - with Keras `_ - tutorial. One key difference is that Ray Train will handle the environment - variable set up for you. + .. note:: + The current TensorFlow implementation supports + ``MultiWorkerMirroredStrategy`` (and ``MirroredStrategy``). If there are + other strategies you wish to see supported by Ray Train, please let us know + by submitting a `feature request on GitHub `_. - **Step 1:** Wrap your model in ``MultiWorkerMirroredStrategy``. + These instructions closely follow TensorFlow's `Multi-worker training + with Keras `_ + tutorial. One key difference is that Ray Train will handle the environment + variable set up for you. - The `MultiWorkerMirroredStrategy `_ - enables synchronous distributed training. The ``Model`` *must* be built and - compiled within the scope of the strategy. + **Step 1:** Wrap your model in ``MultiWorkerMirroredStrategy``. - .. code-block:: python + The `MultiWorkerMirroredStrategy `_ + enables synchronous distributed training. The ``Model`` *must* be built and + compiled within the scope of the strategy. + + .. code-block:: python - with tf.distribute.MultiWorkerMirroredStrategy().scope(): - model = ... # build model - model.compile() + with tf.distribute.MultiWorkerMirroredStrategy().scope(): + model = ... # build model + model.compile() - **Step 2:** Update your ``Dataset`` batch size to the *global* batch - size. + **Step 2:** Update your ``Dataset`` batch size to the *global* batch + size. - The `batch `_ - will be split evenly across worker processes, so ``batch_size`` should be - set appropriately. + The `batch `_ + will be split evenly across worker processes, so ``batch_size`` should be + set appropriately. - .. code-block:: diff + .. code-block:: diff - -batch_size = worker_batch_size - +batch_size = worker_batch_size * session.get_world_size() + -batch_size = worker_batch_size + +batch_size = worker_batch_size * session.get_world_size() -.. tabbed:: Horovod + .. tab-item:: Horovod - If you have a training function that already runs with the `Horovod Ray - Executor `_, - you should not need to make any additional changes! + If you have a training function that already runs with the `Horovod Ray + Executor `_, + you should not need to make any additional changes! - To onboard onto Horovod, please visit the `Horovod guide - `_. + To onboard onto Horovod, please visit the `Horovod guide + `_. Creating a Ray Train Trainer ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -181,96 +183,100 @@ Creating a Ray Train Trainer execute training. You can create a simple ``Trainer`` for the backend of choice with one of the following: -.. tabbed:: PyTorch +.. tab-set:: - .. code-block:: python + .. tab-item:: PyTorch - from ray.air import ScalingConfig - from ray.train.torch import TorchTrainer - # For GPU Training, set `use_gpu` to True. - use_gpu = False - trainer = TorchTrainer( - train_func, - scaling_config=ScalingConfig(use_gpu=use_gpu, num_workers=2) - ) + .. code-block:: python + + from ray.air import ScalingConfig + from ray.train.torch import TorchTrainer + # For GPU Training, set `use_gpu` to True. + use_gpu = False + trainer = TorchTrainer( + train_func, + scaling_config=ScalingConfig(use_gpu=use_gpu, num_workers=2) + ) -.. tabbed:: TensorFlow + .. tab-item:: TensorFlow - .. warning:: - Ray will not automatically set any environment variables or configuration - related to local parallelism / threading - :ref:`aside from "OMP_NUM_THREADS" `. - If you desire greater control over TensorFlow threading, use - the ``tf.config.threading`` module (eg. - ``tf.config.threading.set_inter_op_parallelism_threads(num_cpus)``) - at the beginning of your ``train_loop_per_worker`` function. + .. warning:: + Ray will not automatically set any environment variables or configuration + related to local parallelism / threading + :ref:`aside from "OMP_NUM_THREADS" `. + If you desire greater control over TensorFlow threading, use + the ``tf.config.threading`` module (eg. + ``tf.config.threading.set_inter_op_parallelism_threads(num_cpus)``) + at the beginning of your ``train_loop_per_worker`` function. - .. code-block:: python + .. code-block:: python - from ray.air import ScalingConfig - from ray.train.tensorflow import TensorflowTrainer - # For GPU Training, set `use_gpu` to True. - use_gpu = False - trainer = TensorflowTrainer( - train_func, - scaling_config=ScalingConfig(use_gpu=use_gpu, num_workers=2) - ) + from ray.air import ScalingConfig + from ray.train.tensorflow import TensorflowTrainer + # For GPU Training, set `use_gpu` to True. + use_gpu = False + trainer = TensorflowTrainer( + train_func, + scaling_config=ScalingConfig(use_gpu=use_gpu, num_workers=2) + ) -.. tabbed:: Horovod + .. tab-item:: Horovod - .. code-block:: python + .. code-block:: python - from ray.air import ScalingConfig - from ray.train.horovod import HorovodTrainer - # For GPU Training, set `use_gpu` to True. - use_gpu = False - trainer = HorovodTrainer( - train_func, - scaling_config=ScalingConfig(use_gpu=use_gpu, num_workers=2) - ) + from ray.air import ScalingConfig + from ray.train.horovod import HorovodTrainer + # For GPU Training, set `use_gpu` to True. + use_gpu = False + trainer = HorovodTrainer( + train_func, + scaling_config=ScalingConfig(use_gpu=use_gpu, num_workers=2) + ) To customize the backend setup, you can use the :ref:`framework-specific config objects `. -.. tabbed:: PyTorch +.. tab-set:: - .. code-block:: python + .. tab-item:: PyTorch - from ray.air import ScalingConfig - from ray.train.torch import TorchTrainer, TorchConfig + .. code-block:: python + + from ray.air import ScalingConfig + from ray.train.torch import TorchTrainer, TorchConfig - trainer = TorchTrainer( - train_func, - torch_backend=TorchConfig(...), - scaling_config=ScalingConfig(num_workers=2), - ) + trainer = TorchTrainer( + train_func, + torch_backend=TorchConfig(...), + scaling_config=ScalingConfig(num_workers=2), + ) -.. tabbed:: TensorFlow + .. tab-item:: TensorFlow - .. code-block:: python + .. code-block:: python - from ray.air import ScalingConfig - from ray.train.tensorflow import TensorflowTrainer, TensorflowConfig + from ray.air import ScalingConfig + from ray.train.tensorflow import TensorflowTrainer, TensorflowConfig - trainer = TensorflowTrainer( - train_func, - tensorflow_backend=TensorflowConfig(...), - scaling_config=ScalingConfig(num_workers=2), - ) + trainer = TensorflowTrainer( + train_func, + tensorflow_backend=TensorflowConfig(...), + scaling_config=ScalingConfig(num_workers=2), + ) -.. tabbed:: Horovod + .. tab-item:: Horovod - .. code-block:: python + .. code-block:: python - from ray.air import ScalingConfig - from ray.train.horovod import HorovodTrainer, HorovodConfig + from ray.air import ScalingConfig + from ray.train.horovod import HorovodTrainer, HorovodConfig - trainer = HorovodTrainer( - train_func, - tensorflow_backend=HorovodConfig(...), - scaling_config=ScalingConfig(num_workers=2), - ) + trainer = HorovodTrainer( + train_func, + tensorflow_backend=HorovodConfig(...), + scaling_config=ScalingConfig(num_workers=2), + ) For more configurability, please reference the :py:class:`~ray.train.data_parallel_trainer.DataParallelTrainer` API. @@ -520,102 +526,104 @@ attribute. Concrete examples are provided to demonstrate how checkpoints (model weights but not models) are saved appropriately in distributed training. -.. tabbed:: PyTorch - - .. code-block:: python - :emphasize-lines: 36, 37, 38, 39, 40, 41 - - import ray.train.torch - from ray.air import session, Checkpoint, ScalingConfig - from ray.train.torch import TorchTrainer - - import torch - import torch.nn as nn - from torch.optim import Adam - import numpy as np - - def train_func(config): - n = 100 - # create a toy dataset - # data : X - dim = (n, 4) - # target : Y - dim = (n, 1) - X = torch.Tensor(np.random.normal(0, 1, size=(n, 4))) - Y = torch.Tensor(np.random.uniform(0, 1, size=(n, 1))) - # toy neural network : 1-layer - # wrap the model in DDP - model = ray.train.torch.prepare_model(nn.Linear(4, 1)) - criterion = nn.MSELoss() - - optimizer = Adam(model.parameters(), lr=3e-4) - for epoch in range(config["num_epochs"]): - y = model.forward(X) - # compute loss - loss = criterion(y, Y) - # back-propagate loss - optimizer.zero_grad() - loss.backward() - optimizer.step() - state_dict = model.state_dict() - checkpoint = Checkpoint.from_dict( - dict(epoch=epoch, model_weights=state_dict) - ) - session.report({}, checkpoint=checkpoint) - - trainer = TorchTrainer( - train_func, - train_loop_config={"num_epochs": 5}, - scaling_config=ScalingConfig(num_workers=2), - ) - result = trainer.fit() - - print(result.checkpoint.to_dict()) - # {'epoch': 4, 'model_weights': OrderedDict([('bias', tensor([-0.1215])), ('weight', tensor([[0.3253, 0.1979, 0.4525, 0.2850]]))]), '_timestamp': 1656107095, '_preprocessor': None, '_current_checkpoint_id': 4} - - -.. tabbed:: TensorFlow - - .. code-block:: python - :emphasize-lines: 23 - - from ray.air import session, Checkpoint, ScalingConfig - from ray.train.tensorflow import TensorflowTrainer - - import numpy as np - - def train_func(config): - import tensorflow as tf - n = 100 - # create a toy dataset - # data : X - dim = (n, 4) - # target : Y - dim = (n, 1) - X = np.random.normal(0, 1, size=(n, 4)) - Y = np.random.uniform(0, 1, size=(n, 1)) - - strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() - with strategy.scope(): +.. tab-set:: + + .. tab-item:: PyTorch + + .. code-block:: python + :emphasize-lines: 36, 37, 38, 39, 40, 41 + + import ray.train.torch + from ray.air import session, Checkpoint, ScalingConfig + from ray.train.torch import TorchTrainer + + import torch + import torch.nn as nn + from torch.optim import Adam + import numpy as np + + def train_func(config): + n = 100 + # create a toy dataset + # data : X - dim = (n, 4) + # target : Y - dim = (n, 1) + X = torch.Tensor(np.random.normal(0, 1, size=(n, 4))) + Y = torch.Tensor(np.random.uniform(0, 1, size=(n, 1))) # toy neural network : 1-layer - model = tf.keras.Sequential([tf.keras.layers.Dense(1, activation="linear", input_shape=(4,))]) - model.compile(optimizer="Adam", loss="mean_squared_error", metrics=["mse"]) - - for epoch in range(config["num_epochs"]): - model.fit(X, Y, batch_size=20) - checkpoint = Checkpoint.from_dict( - dict(epoch=epoch, model_weights=model.get_weights()) - ) - session.report({}, checkpoint=checkpoint) - - trainer = TensorflowTrainer( - train_func, - train_loop_config={"num_epochs": 5}, - scaling_config=ScalingConfig(num_workers=2), - ) - result = trainer.fit() - - print(result.checkpoint.to_dict()) - # {'epoch': 4, 'model_weights': [array([[-0.31858477], - # [ 0.03747174], - # [ 0.28266194], - # [ 0.8626015 ]], dtype=float32), array([0.02230084], dtype=float32)], '_timestamp': 1656107383, '_preprocessor': None, '_current_checkpoint_id': 4} + # wrap the model in DDP + model = ray.train.torch.prepare_model(nn.Linear(4, 1)) + criterion = nn.MSELoss() + + optimizer = Adam(model.parameters(), lr=3e-4) + for epoch in range(config["num_epochs"]): + y = model.forward(X) + # compute loss + loss = criterion(y, Y) + # back-propagate loss + optimizer.zero_grad() + loss.backward() + optimizer.step() + state_dict = model.state_dict() + checkpoint = Checkpoint.from_dict( + dict(epoch=epoch, model_weights=state_dict) + ) + session.report({}, checkpoint=checkpoint) + + trainer = TorchTrainer( + train_func, + train_loop_config={"num_epochs": 5}, + scaling_config=ScalingConfig(num_workers=2), + ) + result = trainer.fit() + + print(result.checkpoint.to_dict()) + # {'epoch': 4, 'model_weights': OrderedDict([('bias', tensor([-0.1215])), ('weight', tensor([[0.3253, 0.1979, 0.4525, 0.2850]]))]), '_timestamp': 1656107095, '_preprocessor': None, '_current_checkpoint_id': 4} + + + .. tab-item:: TensorFlow + + .. code-block:: python + :emphasize-lines: 23 + + from ray.air import session, Checkpoint, ScalingConfig + from ray.train.tensorflow import TensorflowTrainer + + import numpy as np + + def train_func(config): + import tensorflow as tf + n = 100 + # create a toy dataset + # data : X - dim = (n, 4) + # target : Y - dim = (n, 1) + X = np.random.normal(0, 1, size=(n, 4)) + Y = np.random.uniform(0, 1, size=(n, 1)) + + strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() + with strategy.scope(): + # toy neural network : 1-layer + model = tf.keras.Sequential([tf.keras.layers.Dense(1, activation="linear", input_shape=(4,))]) + model.compile(optimizer="Adam", loss="mean_squared_error", metrics=["mse"]) + + for epoch in range(config["num_epochs"]): + model.fit(X, Y, batch_size=20) + checkpoint = Checkpoint.from_dict( + dict(epoch=epoch, model_weights=model.get_weights()) + ) + session.report({}, checkpoint=checkpoint) + + trainer = TensorflowTrainer( + train_func, + train_loop_config={"num_epochs": 5}, + scaling_config=ScalingConfig(num_workers=2), + ) + result = trainer.fit() + + print(result.checkpoint.to_dict()) + # {'epoch': 4, 'model_weights': [array([[-0.31858477], + # [ 0.03747174], + # [ 0.28266194], + # [ 0.8626015 ]], dtype=float32), array([0.02230084], dtype=float32)], '_timestamp': 1656107383, '_preprocessor': None, '_current_checkpoint_id': 4} By default, checkpoints will be persisted to local disk in the :ref:`log @@ -703,141 +711,143 @@ Checkpoints can be loaded into the training function in 2 steps: 2. The checkpoint to start training with can be bootstrapped by passing in a :py:class:`~ray.air.checkpoint.Checkpoint` to ``Trainer`` as the ``resume_from_checkpoint`` argument. -.. tabbed:: PyTorch - - .. code-block:: python - :emphasize-lines: 23, 25, 26, 29, 30, 31, 35 - - import ray.train.torch - from ray.air import session, Checkpoint, ScalingConfig - from ray.train.torch import TorchTrainer - - import torch - import torch.nn as nn - from torch.optim import Adam - import numpy as np - - def train_func(config): - n = 100 - # create a toy dataset - # data : X - dim = (n, 4) - # target : Y - dim = (n, 1) - X = torch.Tensor(np.random.normal(0, 1, size=(n, 4))) - Y = torch.Tensor(np.random.uniform(0, 1, size=(n, 1))) - - # toy neural network : 1-layer - model = nn.Linear(4, 1) - criterion = nn.MSELoss() - optimizer = Adam(model.parameters(), lr=3e-4) - start_epoch = 0 - - checkpoint = session.get_checkpoint() - if checkpoint: - # assume that we have run the session.report() example - # and successfully save some model weights - checkpoint_dict = checkpoint.to_dict() - model.load_state_dict(checkpoint_dict.get("model_weights")) - start_epoch = checkpoint_dict.get("epoch", -1) + 1 - - # wrap the model in DDP - model = ray.train.torch.prepare_model(model) - for epoch in range(start_epoch, config["num_epochs"]): - y = model.forward(X) - # compute loss - loss = criterion(y, Y) - # back-propagate loss - optimizer.zero_grad() - loss.backward() - optimizer.step() - state_dict = model.state_dict() - checkpoint = Checkpoint.from_dict( - dict(epoch=epoch, model_weights=state_dict) - ) - session.report({}, checkpoint=checkpoint) - - trainer = TorchTrainer( - train_func, - train_loop_config={"num_epochs": 2}, - scaling_config=ScalingConfig(num_workers=2), - ) - # save a checkpoint - result = trainer.fit() - - # load checkpoint - trainer = TorchTrainer( - train_func, - train_loop_config={"num_epochs": 4}, - scaling_config=ScalingConfig(num_workers=2), - resume_from_checkpoint=result.checkpoint, - ) - result = trainer.fit() - - print(result.checkpoint.to_dict()) - # {'epoch': 3, 'model_weights': OrderedDict([('bias', tensor([0.0902])), ('weight', tensor([[-0.1549, -0.0861, 0.4353, -0.4116]]))]), '_timestamp': 1656108265, '_preprocessor': None, '_current_checkpoint_id': 2} - -.. tabbed:: TensorFlow - - .. code-block:: python - :emphasize-lines: 15, 21, 22, 25, 26, 27, 30 - - from ray.air import session, Checkpoint, ScalingConfig - from ray.train.tensorflow import TensorflowTrainer - - import numpy as np - - def train_func(config): - import tensorflow as tf - n = 100 - # create a toy dataset - # data : X - dim = (n, 4) - # target : Y - dim = (n, 1) - X = np.random.normal(0, 1, size=(n, 4)) - Y = np.random.uniform(0, 1, size=(n, 1)) - - start_epoch = 0 - strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() - - with strategy.scope(): +.. tab-set:: + + .. tab-item:: PyTorch + + .. code-block:: python + :emphasize-lines: 23, 25, 26, 29, 30, 31, 35 + + import ray.train.torch + from ray.air import session, Checkpoint, ScalingConfig + from ray.train.torch import TorchTrainer + + import torch + import torch.nn as nn + from torch.optim import Adam + import numpy as np + + def train_func(config): + n = 100 + # create a toy dataset + # data : X - dim = (n, 4) + # target : Y - dim = (n, 1) + X = torch.Tensor(np.random.normal(0, 1, size=(n, 4))) + Y = torch.Tensor(np.random.uniform(0, 1, size=(n, 1))) + # toy neural network : 1-layer - model = tf.keras.Sequential([tf.keras.layers.Dense(1, activation="linear", input_shape=(4,))]) + model = nn.Linear(4, 1) + criterion = nn.MSELoss() + optimizer = Adam(model.parameters(), lr=3e-4) + start_epoch = 0 + checkpoint = session.get_checkpoint() if checkpoint: # assume that we have run the session.report() example # and successfully save some model weights checkpoint_dict = checkpoint.to_dict() - model.set_weights(checkpoint_dict.get("model_weights")) + model.load_state_dict(checkpoint_dict.get("model_weights")) start_epoch = checkpoint_dict.get("epoch", -1) + 1 - model.compile(optimizer="Adam", loss="mean_squared_error", metrics=["mse"]) - - for epoch in range(start_epoch, config["num_epochs"]): - model.fit(X, Y, batch_size=20) - checkpoint = Checkpoint.from_dict( - dict(epoch=epoch, model_weights=model.get_weights()) - ) - session.report({}, checkpoint=checkpoint) - - trainer = TensorflowTrainer( - train_func, - train_loop_config={"num_epochs": 2}, - scaling_config=ScalingConfig(num_workers=2), - ) - # save a checkpoint - result = trainer.fit() - - # load a checkpoint - trainer = TensorflowTrainer( - train_func, - train_loop_config={"num_epochs": 5}, - scaling_config=ScalingConfig(num_workers=2), - resume_from_checkpoint=result.checkpoint, - ) - result = trainer.fit() - - print(result.checkpoint.to_dict()) - # {'epoch': 4, 'model_weights': [array([[-0.70056134], - # [-0.8839263 ], - # [-1.0043601 ], - # [-0.61634773]], dtype=float32), array([0.01889327], dtype=float32)], '_timestamp': 1656108446, '_preprocessor': None, '_current_checkpoint_id': 3} + + # wrap the model in DDP + model = ray.train.torch.prepare_model(model) + for epoch in range(start_epoch, config["num_epochs"]): + y = model.forward(X) + # compute loss + loss = criterion(y, Y) + # back-propagate loss + optimizer.zero_grad() + loss.backward() + optimizer.step() + state_dict = model.state_dict() + checkpoint = Checkpoint.from_dict( + dict(epoch=epoch, model_weights=state_dict) + ) + session.report({}, checkpoint=checkpoint) + + trainer = TorchTrainer( + train_func, + train_loop_config={"num_epochs": 2}, + scaling_config=ScalingConfig(num_workers=2), + ) + # save a checkpoint + result = trainer.fit() + + # load checkpoint + trainer = TorchTrainer( + train_func, + train_loop_config={"num_epochs": 4}, + scaling_config=ScalingConfig(num_workers=2), + resume_from_checkpoint=result.checkpoint, + ) + result = trainer.fit() + + print(result.checkpoint.to_dict()) + # {'epoch': 3, 'model_weights': OrderedDict([('bias', tensor([0.0902])), ('weight', tensor([[-0.1549, -0.0861, 0.4353, -0.4116]]))]), '_timestamp': 1656108265, '_preprocessor': None, '_current_checkpoint_id': 2} + + .. tab-item:: TensorFlow + + .. code-block:: python + :emphasize-lines: 15, 21, 22, 25, 26, 27, 30 + + from ray.air import session, Checkpoint, ScalingConfig + from ray.train.tensorflow import TensorflowTrainer + + import numpy as np + + def train_func(config): + import tensorflow as tf + n = 100 + # create a toy dataset + # data : X - dim = (n, 4) + # target : Y - dim = (n, 1) + X = np.random.normal(0, 1, size=(n, 4)) + Y = np.random.uniform(0, 1, size=(n, 1)) + + start_epoch = 0 + strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() + + with strategy.scope(): + # toy neural network : 1-layer + model = tf.keras.Sequential([tf.keras.layers.Dense(1, activation="linear", input_shape=(4,))]) + checkpoint = session.get_checkpoint() + if checkpoint: + # assume that we have run the session.report() example + # and successfully save some model weights + checkpoint_dict = checkpoint.to_dict() + model.set_weights(checkpoint_dict.get("model_weights")) + start_epoch = checkpoint_dict.get("epoch", -1) + 1 + model.compile(optimizer="Adam", loss="mean_squared_error", metrics=["mse"]) + + for epoch in range(start_epoch, config["num_epochs"]): + model.fit(X, Y, batch_size=20) + checkpoint = Checkpoint.from_dict( + dict(epoch=epoch, model_weights=model.get_weights()) + ) + session.report({}, checkpoint=checkpoint) + + trainer = TensorflowTrainer( + train_func, + train_loop_config={"num_epochs": 2}, + scaling_config=ScalingConfig(num_workers=2), + ) + # save a checkpoint + result = trainer.fit() + + # load a checkpoint + trainer = TensorflowTrainer( + train_func, + train_loop_config={"num_epochs": 5}, + scaling_config=ScalingConfig(num_workers=2), + resume_from_checkpoint=result.checkpoint, + ) + result = trainer.fit() + + print(result.checkpoint.to_dict()) + # {'epoch': 4, 'model_weights': [array([[-0.70056134], + # [-0.8839263 ], + # [-1.0043601 ], + # [-0.61634773]], dtype=float32), array([0.01889327], dtype=float32)], '_timestamp': 1656108446, '_preprocessor': None, '_current_checkpoint_id': 3} .. _train-callbacks: @@ -921,20 +931,22 @@ You may also want to collect metrics from multiple workers. While Ray Train curr worker, you can use third-party libraries or distributed primitives of your machine learning framework to report metrics from multiple workers. -.. tabbed:: PyTorch +.. tab-set:: - Ray Train natively supports `TorchMetrics `_, which provides a collection of machine learning metrics for distributed, scalable PyTorch models. + .. tab-item:: PyTorch - Here is an example of reporting both the aggregated R2 score and mean train and validation loss from all workers. + Ray Train natively supports `TorchMetrics `_, which provides a collection of machine learning metrics for distributed, scalable PyTorch models. - .. literalinclude:: doc_code/torchmetrics_example.py - :language: python - :start-after: __start__ + Here is an example of reporting both the aggregated R2 score and mean train and validation loss from all workers. + + .. literalinclude:: doc_code/torchmetrics_example.py + :language: python + :start-after: __start__ -.. tabbed:: TensorFlow + .. tab-item:: TensorFlow - TensorFlow Keras automatically aggregates metrics from all workers. If you wish to have more - control over that, consider implementing a `custom training loop `_. + TensorFlow Keras automatically aggregates metrics from all workers. If you wish to have more + control over that, consider implementing a `custom training loop `_. .. Running on the cloud .. -------------------- @@ -1177,40 +1189,42 @@ Automatic Mixed Precision Automatic mixed precision (AMP) lets you train your models faster by using a lower precision datatype for operations like linear layers and convolutions. -.. tabbed:: PyTorch +.. tab-set:: - You can train your Torch model with AMP by: + .. tab-item:: PyTorch - 1. Adding :func:`ray.train.torch.accelerate` with ``amp=True`` to the top of your training function. - 2. Wrapping your optimizer with :func:`ray.train.torch.prepare_optimizer`. - 3. Replacing your backward call with :func:`ray.train.torch.backward`. + You can train your Torch model with AMP by: - .. code-block:: diff + 1. Adding :func:`ray.train.torch.accelerate` with ``amp=True`` to the top of your training function. + 2. Wrapping your optimizer with :func:`ray.train.torch.prepare_optimizer`. + 3. Replacing your backward call with :func:`ray.train.torch.backward`. - def train_func(): - + train.torch.accelerate(amp=True) + .. code-block:: diff - model = NeuralNetwork() - model = train.torch.prepare_model(model) + def train_func(): + + train.torch.accelerate(amp=True) - data_loader = DataLoader(my_dataset, batch_size=worker_batch_size) - data_loader = train.torch.prepare_data_loader(data_loader) + model = NeuralNetwork() + model = train.torch.prepare_model(model) - optimizer = torch.optim.SGD(model.parameters(), lr=0.001) - + optimizer = train.torch.prepare_optimizer(optimizer) + data_loader = DataLoader(my_dataset, batch_size=worker_batch_size) + data_loader = train.torch.prepare_data_loader(data_loader) - model.train() - for epoch in range(90): - for images, targets in dataloader: - optimizer.zero_grad() + optimizer = torch.optim.SGD(model.parameters(), lr=0.001) + + optimizer = train.torch.prepare_optimizer(optimizer) - outputs = model(images) - loss = torch.nn.functional.cross_entropy(outputs, targets) + model.train() + for epoch in range(90): + for images, targets in dataloader: + optimizer.zero_grad() - - loss.backward() - + train.torch.backward(loss) - optimizer.step() - ... + outputs = model(images) + loss = torch.nn.functional.cross_entropy(outputs, targets) + + - loss.backward() + + train.torch.backward(loss) + optimizer.step() + ... .. note:: The performance of AMP varies based on GPU architecture, model type, @@ -1222,25 +1236,27 @@ precision datatype for operations like linear layers and convolutions. Reproducibility --------------- -.. tabbed:: PyTorch +.. tab-set:: + + .. tab-item:: PyTorch - To limit sources of nondeterministic behavior, add - :func:`ray.train.torch.enable_reproducibility` to the top of your training - function. + To limit sources of nondeterministic behavior, add + :func:`ray.train.torch.enable_reproducibility` to the top of your training + function. - .. code-block:: diff + .. code-block:: diff - def train_func(): - + train.torch.enable_reproducibility() + def train_func(): + + train.torch.enable_reproducibility() - model = NeuralNetwork() - model = train.torch.prepare_model(model) + model = NeuralNetwork() + model = train.torch.prepare_model(model) - ... + ... - .. warning:: :func:`ray.train.torch.enable_reproducibility` can't guarantee - completely reproducible results across executions. To learn more, read - the `PyTorch notes on randomness `_. + .. warning:: :func:`ray.train.torch.enable_reproducibility` can't guarantee + completely reproducible results across executions. To learn more, read + the `PyTorch notes on randomness `_. .. import ray diff --git a/doc/source/train/gbdt.rst b/doc/source/train/gbdt.rst index ce69c6fbaa65..dcd9dba6110f 100644 --- a/doc/source/train/gbdt.rst +++ b/doc/source/train/gbdt.rst @@ -12,7 +12,7 @@ Just as in the original `xgboost.train() `__ functions, the training parameters are passed as the ``params`` dictionary. -.. tabbed:: XGBoost +.. tab-item:: XGBoost Run ``pip install -U xgboost_ray``. @@ -21,7 +21,7 @@ training parameters are passed as the ``params`` dictionary. :start-after: __xgboost_start__ :end-before: __xgboost_end__ -.. tabbed:: LightGBM +.. tab-item:: LightGBM Run ``pip install -U lightgbm_ray``. @@ -75,48 +75,49 @@ adjusting the :class:`ScalingConfig `. Here are some examples for common use-cases: +.. tab-set:: -.. tabbed:: Multi-node CPU + .. tab-item:: Multi-node CPU - Setup: 4 nodes with 8 CPUs each. + Setup: 4 nodes with 8 CPUs each. - Use-case: To utilize all resources in multi-node training. + Use-case: To utilize all resources in multi-node training. - .. literalinclude:: doc_code/gbdt_user_guide.py - :language: python - :start-after: __scaling_cpu_start__ - :end-before: __scaling_cpu_end__ + .. literalinclude:: doc_code/gbdt_user_guide.py + :language: python + :start-after: __scaling_cpu_start__ + :end-before: __scaling_cpu_end__ - Note that we pass 0 CPUs for the trainer resources, so that all resources can - be allocated to the actual distributed training workers. + Note that we pass 0 CPUs for the trainer resources, so that all resources can + be allocated to the actual distributed training workers. -.. tabbed:: Single-node multi-GPU + .. tab-item:: Single-node multi-GPU - Setup: 1 node with 8 CPUs and 4 GPUs. + Setup: 1 node with 8 CPUs and 4 GPUs. - Use-case: If you have a single node with multiple GPUs, you need to use - distributed training to leverage all GPUs. + Use-case: If you have a single node with multiple GPUs, you need to use + distributed training to leverage all GPUs. - .. literalinclude:: doc_code/gbdt_user_guide.py - :language: python - :start-after: __scaling_gpu_start__ - :end-before: __scaling_gpu_end__ + .. literalinclude:: doc_code/gbdt_user_guide.py + :language: python + :start-after: __scaling_gpu_start__ + :end-before: __scaling_gpu_end__ -.. tabbed:: Multi-node multi-GPU + .. tab-item:: Multi-node multi-GPU - Setup: 4 node with 8 CPUs and 4 GPUs each. + Setup: 4 node with 8 CPUs and 4 GPUs each. - Use-case: If you have a multiple nodes with multiple GPUs, you need to - schedule one worker per GPU. + Use-case: If you have a multiple nodes with multiple GPUs, you need to + schedule one worker per GPU. - .. literalinclude:: doc_code/gbdt_user_guide.py - :language: python - :start-after: __scaling_gpumulti_start__ - :end-before: __scaling_gpumulti_end__ + .. literalinclude:: doc_code/gbdt_user_guide.py + :language: python + :start-after: __scaling_gpumulti_start__ + :end-before: __scaling_gpumulti_end__ - Note that you just have to adjust the number of workers - everything else - will be handled by Ray automatically. + Note that you just have to adjust the number of workers - everything else + will be handled by Ray automatically. How many remote actors should I use? diff --git a/doc/source/train/getting-started.rst b/doc/source/train/getting-started.rst index 0e9eead57a00..d1579ca5e43d 100644 --- a/doc/source/train/getting-started.rst +++ b/doc/source/train/getting-started.rst @@ -6,183 +6,185 @@ Getting Started with Distributed Model Training in Ray Train Ray Train offers multiple ``Trainers`` which implement scalable model training for different machine learning frameworks. Here are examples for some of the commonly used trainers: -.. tabbed:: XGBoost +.. tab-set:: - In this example we will train a model using distributed XGBoost. + .. tab-item:: XGBoost - First, we load the dataset from S3 using Ray Data and split it into a - train and validation dataset. + In this example we will train a model using distributed XGBoost. - .. literalinclude:: doc_code/gbdt_user_guide.py - :language: python - :start-after: __xgb_detail_intro_start__ - :end-before: __xgb_detail_intro_end__ + First, we load the dataset from S3 using Ray Data and split it into a + train and validation dataset. - In the :class:`ScalingConfig `, - we configure the number of workers to use: + .. literalinclude:: doc_code/gbdt_user_guide.py + :language: python + :start-after: __xgb_detail_intro_start__ + :end-before: __xgb_detail_intro_end__ - .. literalinclude:: doc_code/gbdt_user_guide.py - :language: python - :start-after: __xgb_detail_scaling_start__ - :end-before: __xgb_detail_scaling_end__ + In the :class:`ScalingConfig `, + we configure the number of workers to use: - We then instantiate our XGBoostTrainer by passing in: + .. literalinclude:: doc_code/gbdt_user_guide.py + :language: python + :start-after: __xgb_detail_scaling_start__ + :end-before: __xgb_detail_scaling_end__ - - The aforementioned ``ScalingConfig``. - - The ``label_column`` refers to the column name containing the labels in the Datastream - - The ``params`` are `XGBoost training parameters `__ + We then instantiate our XGBoostTrainer by passing in: - .. literalinclude:: doc_code/gbdt_user_guide.py - :language: python - :start-after: __xgb_detail_training_start__ - :end-before: __xgb_detail_training_end__ + - The aforementioned ``ScalingConfig``. + - The ``label_column`` refers to the column name containing the labels in the Datastream + - The ``params`` are `XGBoost training parameters `__ - Lastly, we call ``trainer.fit()`` to kick off training and obtain the results. + .. literalinclude:: doc_code/gbdt_user_guide.py + :language: python + :start-after: __xgb_detail_training_start__ + :end-before: __xgb_detail_training_end__ - .. literalinclude:: doc_code/gbdt_user_guide.py - :language: python - :start-after: __xgb_detail_fit_start__ - :end-before: __xgb_detail_fit_end__ + Lastly, we call ``trainer.fit()`` to kick off training and obtain the results. -.. tabbed:: LightGBM + .. literalinclude:: doc_code/gbdt_user_guide.py + :language: python + :start-after: __xgb_detail_fit_start__ + :end-before: __xgb_detail_fit_end__ - In this example we will train a model using distributed LightGBM. + .. tab-item:: LightGBM - First, we load the dataset from S3 using Ray Data and split it into a - train and validation dataset. + In this example we will train a model using distributed LightGBM. - .. literalinclude:: doc_code/gbdt_user_guide.py - :language: python - :start-after: __lgbm_detail_intro_start__ - :end-before: __lgbm_detail_intro_end__ + First, we load the dataset from S3 using Ray Data and split it into a + train and validation dataset. - In the :class:`ScalingConfig `, - we configure the number of workers to use: + .. literalinclude:: doc_code/gbdt_user_guide.py + :language: python + :start-after: __lgbm_detail_intro_start__ + :end-before: __lgbm_detail_intro_end__ - .. literalinclude:: doc_code/gbdt_user_guide.py - :language: python - :start-after: __xgb_detail_scaling_start__ - :end-before: __xgb_detail_scaling_end__ + In the :class:`ScalingConfig `, + we configure the number of workers to use: - We then instantiate our LightGBMTrainer by passing in: + .. literalinclude:: doc_code/gbdt_user_guide.py + :language: python + :start-after: __xgb_detail_scaling_start__ + :end-before: __xgb_detail_scaling_end__ - - The aforementioned ``ScalingConfig`` - - The ``label_column`` refers to the column name containing the labels in the Datastream - - The ``params`` are core `LightGBM training parameters `__ + We then instantiate our LightGBMTrainer by passing in: - .. literalinclude:: doc_code/gbdt_user_guide.py - :language: python - :start-after: __lgbm_detail_training_start__ - :end-before: __lgbm_detail_training_end__ + - The aforementioned ``ScalingConfig`` + - The ``label_column`` refers to the column name containing the labels in the Datastream + - The ``params`` are core `LightGBM training parameters `__ - And lastly we call ``trainer.fit()`` to kick off training and obtain the results. + .. literalinclude:: doc_code/gbdt_user_guide.py + :language: python + :start-after: __lgbm_detail_training_start__ + :end-before: __lgbm_detail_training_end__ - .. literalinclude:: doc_code/gbdt_user_guide.py - :language: python - :start-after: __lgbm_detail_fit_start__ - :end-before: __lgbm_detail_fit_end__ + And lastly we call ``trainer.fit()`` to kick off training and obtain the results. -.. tabbed:: PyTorch + .. literalinclude:: doc_code/gbdt_user_guide.py + :language: python + :start-after: __lgbm_detail_fit_start__ + :end-before: __lgbm_detail_fit_end__ - This example shows how you can use Ray Train with PyTorch. + .. tab-item:: PyTorch - First, set up your dataset and model. + This example shows how you can use Ray Train with PyTorch. - .. literalinclude:: /../../python/ray/train/examples/pytorch/torch_quick_start.py - :language: python - :start-after: __torch_setup_begin__ - :end-before: __torch_setup_end__ + First, set up your dataset and model. + .. literalinclude:: /../../python/ray/train/examples/pytorch/torch_quick_start.py + :language: python + :start-after: __torch_setup_begin__ + :end-before: __torch_setup_end__ - Now define your single-worker PyTorch training function. - .. literalinclude:: /../../python/ray/train/examples/pytorch/torch_quick_start.py - :language: python - :start-after: __torch_single_begin__ - :end-before: __torch_single_end__ + Now define your single-worker PyTorch training function. - This training function can be executed with: + .. literalinclude:: /../../python/ray/train/examples/pytorch/torch_quick_start.py + :language: python + :start-after: __torch_single_begin__ + :end-before: __torch_single_end__ - .. literalinclude:: /../../python/ray/train/examples/pytorch/torch_quick_start.py - :language: python - :start-after: __torch_single_run_begin__ - :end-before: __torch_single_run_end__ - :dedent: + This training function can be executed with: - Now let's convert this to a distributed multi-worker training function! + .. literalinclude:: /../../python/ray/train/examples/pytorch/torch_quick_start.py + :language: python + :start-after: __torch_single_run_begin__ + :end-before: __torch_single_run_end__ + :dedent: - All you have to do is use the ``ray.train.torch.prepare_model`` and - ``ray.train.torch.prepare_data_loader`` utility functions to - easily setup your model & data for distributed training. - This will automatically wrap your model with ``DistributedDataParallel`` - and place it on the right device, and add ``DistributedSampler`` to your DataLoaders. + Now let's convert this to a distributed multi-worker training function! - .. literalinclude:: /../../python/ray/train/examples/pytorch/torch_quick_start.py - :language: python - :start-after: __torch_distributed_begin__ - :end-before: __torch_distributed_end__ + All you have to do is use the ``ray.train.torch.prepare_model`` and + ``ray.train.torch.prepare_data_loader`` utility functions to + easily setup your model & data for distributed training. + This will automatically wrap your model with ``DistributedDataParallel`` + and place it on the right device, and add ``DistributedSampler`` to your DataLoaders. - Then, instantiate a ``TorchTrainer`` - with 4 workers, and use it to run the new training function! + .. literalinclude:: /../../python/ray/train/examples/pytorch/torch_quick_start.py + :language: python + :start-after: __torch_distributed_begin__ + :end-before: __torch_distributed_end__ - .. literalinclude:: /../../python/ray/train/examples/pytorch/torch_quick_start.py - :language: python - :start-after: __torch_trainer_begin__ - :end-before: __torch_trainer_end__ - :dedent: + Then, instantiate a ``TorchTrainer`` + with 4 workers, and use it to run the new training function! - See :ref:`train-porting-code` for a more comprehensive example. + .. literalinclude:: /../../python/ray/train/examples/pytorch/torch_quick_start.py + :language: python + :start-after: __torch_trainer_begin__ + :end-before: __torch_trainer_end__ + :dedent: -.. tabbed:: TensorFlow + See :ref:`train-porting-code` for a more comprehensive example. - This example shows how you can use Ray Train to set up `Multi-worker training - with Keras `_. + .. tab-item:: TensorFlow - First, set up your dataset and model. + This example shows how you can use Ray Train to set up `Multi-worker training + with Keras `_. - .. literalinclude:: /../../python/ray/train/examples/tf/tensorflow_quick_start.py - :language: python - :start-after: __tf_setup_begin__ - :end-before: __tf_setup_end__ + First, set up your dataset and model. - Now define your single-worker TensorFlow training function. + .. literalinclude:: /../../python/ray/train/examples/tf/tensorflow_quick_start.py + :language: python + :start-after: __tf_setup_begin__ + :end-before: __tf_setup_end__ - .. literalinclude:: /../../python/ray/train/examples/tf/tensorflow_quick_start.py - :language: python - :start-after: __tf_single_begin__ - :end-before: __tf_single_end__ + Now define your single-worker TensorFlow training function. - This training function can be executed with: + .. literalinclude:: /../../python/ray/train/examples/tf/tensorflow_quick_start.py + :language: python + :start-after: __tf_single_begin__ + :end-before: __tf_single_end__ - .. literalinclude:: /../../python/ray/train/examples/tf/tensorflow_quick_start.py - :language: python - :start-after: __tf_single_run_begin__ - :end-before: __tf_single_run_end__ - :dedent: + This training function can be executed with: - Now let's convert this to a distributed multi-worker training function! - All you need to do is: + .. literalinclude:: /../../python/ray/train/examples/tf/tensorflow_quick_start.py + :language: python + :start-after: __tf_single_run_begin__ + :end-before: __tf_single_run_end__ + :dedent: - 1. Set the per-worker batch size - each worker will process the same size - batch as in the single-worker code. - 2. Choose your TensorFlow distributed training strategy. In this example - we use the ``MultiWorkerMirroredStrategy``. + Now let's convert this to a distributed multi-worker training function! + All you need to do is: - .. literalinclude:: /../../python/ray/train/examples/tf/tensorflow_quick_start.py - :language: python - :start-after: __tf_distributed_begin__ - :end-before: __tf_distributed_end__ + 1. Set the per-worker batch size - each worker will process the same size + batch as in the single-worker code. + 2. Choose your TensorFlow distributed training strategy. In this example + we use the ``MultiWorkerMirroredStrategy``. - Then, instantiate a ``TensorflowTrainer`` with 4 workers, - and use it to run the new training function! + .. literalinclude:: /../../python/ray/train/examples/tf/tensorflow_quick_start.py + :language: python + :start-after: __tf_distributed_begin__ + :end-before: __tf_distributed_end__ - .. literalinclude:: /../../python/ray/train/examples/tf/tensorflow_quick_start.py - :language: python - :start-after: __tf_trainer_begin__ - :end-before: __tf_trainer_end__ - :dedent: + Then, instantiate a ``TensorflowTrainer`` with 4 workers, + and use it to run the new training function! - See :ref:`train-porting-code` for a more comprehensive example. + .. literalinclude:: /../../python/ray/train/examples/tf/tensorflow_quick_start.py + :language: python + :start-after: __tf_trainer_begin__ + :end-before: __tf_trainer_end__ + :dedent: + + See :ref:`train-porting-code` for a more comprehensive example. Next Steps diff --git a/doc/source/train/key-concepts.rst b/doc/source/train/key-concepts.rst index f50996973ac2..3e99c4d048d6 100644 --- a/doc/source/train/key-concepts.rst +++ b/doc/source/train/key-concepts.rst @@ -28,44 +28,46 @@ Deep Learning, Tree-Based, and other Trainers There are three categories of built-in Trainers: -.. tabbed:: Deep Learning Trainers +.. tab-set:: - Ray Train supports the following deep learning trainers: + .. tab-item:: Deep Learning Trainers - - :class:`TorchTrainer ` - - :class:`TensorflowTrainer ` - - :class:`HorovodTrainer ` + Ray Train supports the following deep learning trainers: - For these trainers, you usually define your own training function that loads the model - and executes single-worker training steps. Refer to the following guides for more details: + - :class:`TorchTrainer ` + - :class:`TensorflowTrainer ` + - :class:`HorovodTrainer ` - - :ref:`Deep learning user guide ` - - :ref:`Quick overview of deep-learning trainers in the Ray AIR documentation ` + For these trainers, you usually define your own training function that loads the model + and executes single-worker training steps. Refer to the following guides for more details: -.. tabbed:: Tree-Based Trainers + - :ref:`Deep learning user guide ` + - :ref:`Quick overview of deep-learning trainers in the Ray AIR documentation ` - Tree-based trainers utilize gradient-based decision trees for training. The most popular libraries - for this are XGBoost and LightGBM. + .. tab-item:: Tree-Based Trainers - - :class:`XGBoostTrainer ` - - :class:`LightGBMTrainer ` + Tree-based trainers utilize gradient-based decision trees for training. The most popular libraries + for this are XGBoost and LightGBM. - For these trainers, you just pass a dataset and parameters. The training loop is configured - automatically. + - :class:`XGBoostTrainer ` + - :class:`LightGBMTrainer ` - - :ref:`XGBoost/LightGBM user guide ` - - :ref:`Quick overview of tree-based trainers in the Ray AIR documentation ` + For these trainers, you just pass a dataset and parameters. The training loop is configured + automatically. + - :ref:`XGBoost/LightGBM user guide ` + - :ref:`Quick overview of tree-based trainers in the Ray AIR documentation ` -.. tabbed:: Other Trainers - Some trainers don't fit into the other two categories, such as: + .. tab-item:: Other Trainers - - :class:`HuggingFaceTrainer ` for NLP - - :class:`RLTrainer ` for reinforcement learning - - :class:`SklearnTrainer ` for (non-distributed) training of sklearn models. + Some trainers don't fit into the other two categories, such as: - - :ref:`Other trainers in the Ray AIR documentation ` + - :class:`HuggingFaceTrainer ` for NLP + - :class:`RLTrainer ` for reinforcement learning + - :class:`SklearnTrainer ` for (non-distributed) training of sklearn models. + + - :ref:`Other trainers in the Ray AIR documentation ` .. _train-key-concepts-config: diff --git a/doc/source/train/train.rst b/doc/source/train/train.rst index 75ed1f1211d8..e60033f329ad 100644 --- a/doc/source/train/train.rst +++ b/doc/source/train/train.rst @@ -45,37 +45,38 @@ There are three broad categories of Trainers that Train offers: Quick Start to Distributed Training with Ray Train -------------------------------------------------- -.. tabbed:: XGBoost +.. tab-set:: - .. literalinclude:: doc_code/gbdt_user_guide.py - :language: python - :start-after: __xgboost_start__ - :end-before: __xgboost_end__ + .. tab-item:: XGBoost -.. tabbed:: LightGBM + .. literalinclude:: doc_code/gbdt_user_guide.py + :language: python + :start-after: __xgboost_start__ + :end-before: __xgboost_end__ - .. literalinclude:: doc_code/gbdt_user_guide.py - :language: python - :start-after: __lightgbm_start__ - :end-before: __lightgbm_end__ + .. tab-item:: LightGBM -.. tabbed:: Pytorch + .. literalinclude:: doc_code/gbdt_user_guide.py + :language: python + :start-after: __lightgbm_start__ + :end-before: __lightgbm_end__ - .. literalinclude:: /ray-air/doc_code/torch_trainer.py - :language: python + .. tab-item:: Pytorch -.. tabbed:: Tensorflow + .. literalinclude:: /ray-air/doc_code/torch_trainer.py + :language: python - .. literalinclude:: /ray-air/doc_code/tf_starter.py - :language: python - :start-after: __air_tf_train_start__ - :end-before: __air_tf_train_end__ + .. tab-item:: Tensorflow -.. tabbed:: Horovod + .. literalinclude:: /ray-air/doc_code/tf_starter.py + :language: python + :start-after: __air_tf_train_start__ + :end-before: __air_tf_train_end__ - .. literalinclude:: /ray-air/doc_code/hvd_trainer.py - :language: python + .. tab-item:: Horovod + .. literalinclude:: /ray-air/doc_code/hvd_trainer.py + :language: python .. _train-framework-catalog: diff --git a/doc/source/tune/index.rst b/doc/source/tune/index.rst index 66b67ab6bcc9..59381e199ee8 100644 --- a/doc/source/tune/index.rst +++ b/doc/source/tune/index.rst @@ -15,76 +15,78 @@ Tune further integrates with a wide range of additional hyperparameter optimizat **Click on the following tabs to see code examples for various machine learning frameworks**: -.. tabbed:: Quickstart +.. tab-set:: - To run this example, install the following: ``pip install "ray[tune]"``. + .. tab-item:: Quickstart - In this quick-start example you `minimize` a simple function of the form ``f(x) = a**2 + b``, our `objective` function. - The closer ``a`` is to zero and the smaller ``b`` is, the smaller the total value of ``f(x)``. - We will define a so-called `search space` for ``a`` and ``b`` and let Ray Tune explore the space for good values. + To run this example, install the following: ``pip install "ray[tune]"``. - .. callout:: + In this quick-start example you `minimize` a simple function of the form ``f(x) = a**2 + b``, our `objective` function. + The closer ``a`` is to zero and the smaller ``b`` is, the smaller the total value of ``f(x)``. + We will define a so-called `search space` for ``a`` and ``b`` and let Ray Tune explore the space for good values. - .. literalinclude:: ../../../python/ray/tune/tests/example.py - :language: python - :start-after: __quick_start_begin__ - :end-before: __quick_start_end__ + .. callout:: - .. annotations:: - <1> Define an objective function. + .. literalinclude:: ../../../python/ray/tune/tests/example.py + :language: python + :start-after: __quick_start_begin__ + :end-before: __quick_start_end__ - <2> Define a search space. + .. annotations:: + <1> Define an objective function. - <3> Start a Tune run and print the best result. + <2> Define a search space. + <3> Start a Tune run and print the best result. -.. tabbed:: Keras+Hyperopt - To tune your Keras models with Hyperopt, you wrap your model in an objective function whose ``config`` you - can access for selecting hyperparameters. - In the example below we only tune the ``activation`` parameter of the first layer of the model, but you can - tune any parameter of the model you want. - After defining the search space, you can simply initialize the ``HyperOptSearch`` object and pass it to ``run``. - It's important to tell Ray Tune which metric you want to optimize and whether you want to maximize or minimize it. + .. tab-item:: Keras+Hyperopt - .. callout:: + To tune your Keras models with Hyperopt, you wrap your model in an objective function whose ``config`` you + can access for selecting hyperparameters. + In the example below we only tune the ``activation`` parameter of the first layer of the model, but you can + tune any parameter of the model you want. + After defining the search space, you can simply initialize the ``HyperOptSearch`` object and pass it to ``run``. + It's important to tell Ray Tune which metric you want to optimize and whether you want to maximize or minimize it. - .. literalinclude:: doc_code/keras_hyperopt.py - :language: python - :start-after: __keras_hyperopt_start__ - :end-before: __keras_hyperopt_end__ + .. callout:: - .. annotations:: - <1> Wrap a Keras model in an objective function. + .. literalinclude:: doc_code/keras_hyperopt.py + :language: python + :start-after: __keras_hyperopt_start__ + :end-before: __keras_hyperopt_end__ - <2> Define a search space and initialize the search algorithm. + .. annotations:: + <1> Wrap a Keras model in an objective function. - <3> Start a Tune run that maximizes accuracy. + <2> Define a search space and initialize the search algorithm. -.. tabbed:: PyTorch+Optuna + <3> Start a Tune run that maximizes accuracy. - To tune your PyTorch models with Optuna, you wrap your model in an objective function whose ``config`` you - can access for selecting hyperparameters. - In the example below we only tune the ``momentum`` and learning rate (``lr``) parameters of the model's optimizer, - but you can tune any other model parameter you want. - After defining the search space, you can simply initialize the ``OptunaSearch`` object and pass it to ``run``. - It's important to tell Ray Tune which metric you want to optimize and whether you want to maximize or minimize it. - We stop tuning this training run after ``5`` iterations, but you can easily define other stopping rules as well. + .. tab-item:: PyTorch+Optuna + To tune your PyTorch models with Optuna, you wrap your model in an objective function whose ``config`` you + can access for selecting hyperparameters. + In the example below we only tune the ``momentum`` and learning rate (``lr``) parameters of the model's optimizer, + but you can tune any other model parameter you want. + After defining the search space, you can simply initialize the ``OptunaSearch`` object and pass it to ``run``. + It's important to tell Ray Tune which metric you want to optimize and whether you want to maximize or minimize it. + We stop tuning this training run after ``5`` iterations, but you can easily define other stopping rules as well. - .. callout:: - .. literalinclude:: doc_code/pytorch_optuna.py - :language: python - :start-after: __pytorch_optuna_start__ - :end-before: __pytorch_optuna_end__ + .. callout:: - .. annotations:: - <1> Wrap a PyTorch model in an objective function. + .. literalinclude:: doc_code/pytorch_optuna.py + :language: python + :start-after: __pytorch_optuna_start__ + :end-before: __pytorch_optuna_end__ - <2> Define a search space and initialize the search algorithm. + .. annotations:: + <1> Wrap a PyTorch model in an objective function. - <3> Start a Tune run that maximizes mean accuracy and stops after 5 iterations. + <2> Define a search space and initialize the search algorithm. + + <3> Start a Tune run that maximizes mean accuracy and stops after 5 iterations. With Tune you can also launch a multi-node :ref:`distributed hyperparameter sweep ` in less than 10 lines of code. diff --git a/doc/source/tune/key-concepts.rst b/doc/source/tune/key-concepts.rst index ee559652943a..dde9f2ea5483 100644 --- a/doc/source/tune/key-concepts.rst +++ b/doc/source/tune/key-concepts.rst @@ -38,33 +38,35 @@ hyperparameters we want to tune to `minimize` the objective. Since the objective also has a variable ``x``, we need to test for different values of ``x``. Given concrete choices for ``a``, ``b`` and ``x`` we can evaluate the objective function and get a `score` to minimize. -.. tabbed:: Function API +.. tab-set:: - With the :ref:`the function-based API ` you create a function (here called ``trainable``) that - takes in a dictionary of hyperparameters. - This function computes a ``score`` in a "training loop" and `reports` this score back to Tune: + .. tab-item:: Function API - .. literalinclude:: doc_code/key_concepts.py - :language: python - :start-after: __function_api_start__ - :end-before: __function_api_end__ + With the :ref:`the function-based API ` you create a function (here called ``trainable``) that + takes in a dictionary of hyperparameters. + This function computes a ``score`` in a "training loop" and `reports` this score back to Tune: - Note that we use ``session.report(...)`` to report the intermediate ``score`` in the training loop, which can be useful - in many machine learning tasks. - If you just want to report the final ``score`` outside of this loop, you can simply return the score at the - end of the ``trainable`` function with ``return {"score": score}``. - You can also use ``yield {"score": score}`` instead of ``session.report()``. + .. literalinclude:: doc_code/key_concepts.py + :language: python + :start-after: __function_api_start__ + :end-before: __function_api_end__ -.. tabbed:: Class API + Note that we use ``session.report(...)`` to report the intermediate ``score`` in the training loop, which can be useful + in many machine learning tasks. + If you just want to report the final ``score`` outside of this loop, you can simply return the score at the + end of the ``trainable`` function with ``return {"score": score}``. + You can also use ``yield {"score": score}`` instead of ``session.report()``. - Here's an example of specifying the objective function using the :ref:`class-based API `: + .. tab-item:: Class API - .. literalinclude:: doc_code/key_concepts.py - :language: python - :start-after: __class_api_start__ - :end-before: __class_api_end__ + Here's an example of specifying the objective function using the :ref:`class-based API `: - .. tip:: ``session.report`` can't be used within a ``Trainable`` class. + .. literalinclude:: doc_code/key_concepts.py + :language: python + :start-after: __class_api_start__ + :end-before: __class_api_end__ + + .. tip:: ``session.report`` can't be used within a ``Trainable`` class. Learn more about the details of :ref:`Trainables here ` and :ref:`have a look at our examples `. diff --git a/doc/source/tune/tutorials/tune-output.rst b/doc/source/tune/tutorials/tune-output.rst index 8091267a8ad4..19f0674e1ab5 100644 --- a/doc/source/tune/tutorials/tune-output.rst +++ b/doc/source/tune/tutorials/tune-output.rst @@ -191,73 +191,75 @@ You can save trial artifacts directly in the trainable, as shown below: .. tip:: Make sure that any logging calls or objects stay within scope of the Trainable. You may see pickling or other serialization errors or inconsistent logs otherwise. -.. tabbed:: Function API +.. tab-set:: - .. code-block:: python - - import logging_library # ex: mlflow, wandb - from ray.air import session - - def trainable(config): - logging_library.init( - name=trial_id, - id=trial_id, - resume=trial_id, - reinit=True, - allow_val_change=True) - logging_library.set_log_path(os.getcwd()) - - for step in range(100): - logging_library.log_model(...) - logging_library.log(results, step=step) - - # You can also just write to a file directly. - # The working directory is set to the trial directory, so - # you don't need to worry about multiple workers saving - # to the same location. - with open(f"./artifact_{step}.txt", "w") as f: - f.write("Artifact Data") + .. tab-item:: Function API - session.report(results) - - -.. tabbed:: Class API - - .. code-block:: python + .. code-block:: python - import logging_library # ex: mlflow, wandb - from ray import tune + import logging_library # ex: mlflow, wandb + from ray.air import session - class CustomLogging(tune.Trainable) - def setup(self, config): - trial_id = self.trial_id + def trainable(config): logging_library.init( name=trial_id, id=trial_id, resume=trial_id, reinit=True, - allow_val_change=True - ) + allow_val_change=True) logging_library.set_log_path(os.getcwd()) - def step(self): - logging_library.log_model(...) - - # You can also write to a file directly. - # The working directory is set to the trial directory, so - # you don't need to worry about multiple workers saving - # to the same location. - with open(f"./artifact_{self.iteration}.txt", "w") as f: - f.write("Artifact Data") - - def log_result(self, result): - res_dict = { - str(k): v - for k, v in result.items() - if (v and "config" not in k and not isinstance(v, str)) - } - step = result["training_iteration"] - logging_library.log(res_dict, step=step) + for step in range(100): + logging_library.log_model(...) + logging_library.log(results, step=step) + + # You can also just write to a file directly. + # The working directory is set to the trial directory, so + # you don't need to worry about multiple workers saving + # to the same location. + with open(f"./artifact_{step}.txt", "w") as f: + f.write("Artifact Data") + + session.report(results) + + + .. tab-item:: Class API + + .. code-block:: python + + import logging_library # ex: mlflow, wandb + from ray import tune + + class CustomLogging(tune.Trainable) + def setup(self, config): + trial_id = self.trial_id + logging_library.init( + name=trial_id, + id=trial_id, + resume=trial_id, + reinit=True, + allow_val_change=True + ) + logging_library.set_log_path(os.getcwd()) + + def step(self): + logging_library.log_model(...) + + # You can also write to a file directly. + # The working directory is set to the trial directory, so + # you don't need to worry about multiple workers saving + # to the same location. + with open(f"./artifact_{self.iteration}.txt", "w") as f: + f.write("Artifact Data") + + def log_result(self, result): + res_dict = { + str(k): v + for k, v in result.items() + if (v and "config" not in k and not isinstance(v, str)) + } + step = result["training_iteration"] + logging_library.log(res_dict, step=step) In the code snippet above, ``logging_library`` refers to whatever 3rd party logging library you are using. diff --git a/doc/source/tune/tutorials/tune-stopping.rst b/doc/source/tune/tutorials/tune-stopping.rst index 72720cfff58f..cbe5fabddb15 100644 --- a/doc/source/tune/tutorials/tune-stopping.rst +++ b/doc/source/tune/tutorials/tune-stopping.rst @@ -48,59 +48,61 @@ In addition to manual stopping, Tune provides several ways to stop experiments p You can implement the stopping criteria using either a dictionary, a function, or a custom :class:`Stopper `. -.. tabbed:: Dictionary +.. tab-set:: - If a dictionary is passed in, the keys may be any field in the return result of ``session.report`` in the - Function API or ``step()`` in the Class API. + .. tab-item:: Dictionary - .. note:: + If a dictionary is passed in, the keys may be any field in the return result of ``session.report`` in the + Function API or ``step()`` in the Class API. - This includes :ref:`auto-filled metrics ` such as ``training_iteration``. + .. note:: - In the example below, each trial will be stopped either when it completes ``10`` iterations or when it - reaches a mean accuracy of ``0.8`` or more. + This includes :ref:`auto-filled metrics ` such as ``training_iteration``. - These metrics are assumed to be **increasing**, so the trial will stop once the reported metric has exceeded the threshold specified in the dictionary. + In the example below, each trial will be stopped either when it completes ``10`` iterations or when it + reaches a mean accuracy of ``0.8`` or more. - .. literalinclude:: /tune/doc_code/stopping.py - :language: python - :start-after: __stopping_dict_start__ - :end-before: __stopping_dict_end__ + These metrics are assumed to be **increasing**, so the trial will stop once the reported metric has exceeded the threshold specified in the dictionary. -.. tabbed:: User-defined Function + .. literalinclude:: /tune/doc_code/stopping.py + :language: python + :start-after: __stopping_dict_start__ + :end-before: __stopping_dict_end__ - For more flexibility, you can pass in a function instead. - If a function is passed in, it must take ``(trial_id: str, result: dict)`` as arguments and return a boolean - (``True`` if trial should be stopped and ``False`` otherwise). + .. tab-item:: User-defined Function - In the example below, each trial will be stopped either when it completes ``10`` iterations or when it - reaches a mean accuracy of ``0.8`` or more. + For more flexibility, you can pass in a function instead. + If a function is passed in, it must take ``(trial_id: str, result: dict)`` as arguments and return a boolean + (``True`` if trial should be stopped and ``False`` otherwise). - .. literalinclude:: /tune/doc_code/stopping.py - :language: python - :start-after: __stopping_fn_start__ - :end-before: __stopping_fn_end__ + In the example below, each trial will be stopped either when it completes ``10`` iterations or when it + reaches a mean accuracy of ``0.8`` or more. -.. tabbed:: Custom Stopper Class + .. literalinclude:: /tune/doc_code/stopping.py + :language: python + :start-after: __stopping_fn_start__ + :end-before: __stopping_fn_end__ - Finally, you can implement the :class:`~ray.tune.stopper.Stopper` interface for - stopping individual trials or even entire experiments based on custom stopping - criteria. For example, the following example stops all trials after the criteria - is achieved by any individual trial and prevents new ones from starting: + .. tab-item:: Custom Stopper Class - .. literalinclude:: /tune/doc_code/stopping.py - :language: python - :start-after: __stopping_cls_start__ - :end-before: __stopping_cls_end__ + Finally, you can implement the :class:`~ray.tune.stopper.Stopper` interface for + stopping individual trials or even entire experiments based on custom stopping + criteria. For example, the following example stops all trials after the criteria + is achieved by any individual trial and prevents new ones from starting: - In the example, once any trial reaches a ``mean_accuracy`` of 0.8 or more, all trials will stop. + .. literalinclude:: /tune/doc_code/stopping.py + :language: python + :start-after: __stopping_cls_start__ + :end-before: __stopping_cls_end__ - .. note:: + In the example, once any trial reaches a ``mean_accuracy`` of 0.8 or more, all trials will stop. - When returning ``True`` from ``stop_all``, currently running trials will not stop immediately. - They will stop after finishing their ongoing training iteration (after ``session.report`` or ``step``). + .. note:: - Ray Tune comes with a set of out-of-the-box stopper classes. See the :ref:`Stopper ` documentation. + When returning ``True`` from ``stop_all``, currently running trials will not stop immediately. + They will stop after finishing their ongoing training iteration (after ``session.report`` or ``step``). + + Ray Tune comes with a set of out-of-the-box stopper classes. See the :ref:`Stopper ` documentation. Stop trials after a certain amount of time diff --git a/doc/source/tune/tutorials/tune-trial-checkpoints.rst b/doc/source/tune/tutorials/tune-trial-checkpoints.rst index 976e1377e5b8..07cd22f2247f 100644 --- a/doc/source/tune/tutorials/tune-trial-checkpoints.rst +++ b/doc/source/tune/tutorials/tune-trial-checkpoints.rst @@ -27,19 +27,21 @@ To create an AIR checkpoint, one can either use :meth:`~ray.air.checkpoint.Check checkpoint is synced to driver node or the cloud. We are planning to work on it to address the issue. -.. tabbed:: Checkpoint a dictionary +.. tab-set:: - .. literalinclude:: /tune/doc_code/trial_checkpoint.py - :language: python - :start-after: __function_api_checkpointing_start__ - :end-before: __function_api_checkpointing_end__ + .. tab-item:: Checkpoint a dictionary -.. tabbed:: Checkpoint a directory + .. literalinclude:: /tune/doc_code/trial_checkpoint.py + :language: python + :start-after: __function_api_checkpointing_start__ + :end-before: __function_api_checkpointing_end__ - .. literalinclude:: /tune/doc_code/trial_checkpoint.py - :language: python - :start-after: __function_api_checkpointing_from_dir_start__ - :end-before: __function_api_checkpointing_from_dir_end__ + .. tab-item:: Checkpoint a directory + + .. literalinclude:: /tune/doc_code/trial_checkpoint.py + :language: python + :start-after: __function_api_checkpointing_from_dir_start__ + :end-before: __function_api_checkpointing_from_dir_end__ In the above code snippet: From 172f78f496478be59bc2811e39cc8d9ac1f5bffa Mon Sep 17 00:00:00 2001 From: Yi Cheng <74173148+iycheng@users.noreply.github.com> Date: Tue, 25 Apr 2023 23:24:46 -0700 Subject: [PATCH 102/424] [core] Add more message when accessing a dead actor. (#34697) ## Why are these changes needed? When any ray worker access the actor which doesn't belong to the cluster, it'll crash. This is really bad user experience because: - The user will lose ray worker or driver. - The error message is not very useful. This PR fixed it by avoiding crash and throw exception instead. When the user accidentially access an actor which doesn't belong to this cluster, it'll get an Exception with message: `It might be dead or it's from a different cluster`. This PR also deleted some code that's not useful. --- BUILD.bazel | 25 ------- .../ray/runtime/task/native_task_submitter.cc | 13 ++-- python/ray/_raylet.pyx | 40 ++++++----- python/ray/includes/common.pxd | 1 + python/ray/includes/libcoreworker.pxd | 5 +- python/ray/tests/test_failure_4.py | 19 ++++++ src/ray/common/status.h | 7 ++ src/ray/core_worker/core_worker.cc | 26 +++++-- src/ray/core_worker/core_worker.h | 14 ++-- ...io_ray_runtime_task_NativeTaskSubmitter.cc | 45 ++++++------ src/ray/core_worker/test/core_worker_test.cc | 38 +++++++---- .../transport/direct_actor_task_submitter.cc | 13 ++++ .../transport/direct_actor_task_submitter.h | 26 +++++-- src/ray/internal/internal.cc | 68 ------------------- src/ray/internal/internal.h | 46 ------------- 15 files changed, 163 insertions(+), 223 deletions(-) delete mode 100644 src/ray/internal/internal.cc delete mode 100644 src/ray/internal/internal.h diff --git a/BUILD.bazel b/BUILD.bazel index c7b30fc61892..b7cb26e07bd0 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -479,29 +479,6 @@ cc_library( ], ) -# This header is used to warp some internal code so we can reduce suspicious -# symbols export. -cc_library( - name = "exported_internal", - srcs = glob( - [ - "src/ray/internal/internal.cc", - ], - ), - hdrs = glob( - [ - "src/ray/internal/internal.h", - ], - ), - copts = COPTS, - strip_include_prefix = "src", - visibility = ["//visibility:public"], - deps = [ - ":core_worker_lib", - ], - alwayslink = 1, -) - cc_binary( name = "raylet", srcs = ["src/ray/raylet/main.cc"], @@ -2813,7 +2790,6 @@ pyx_library( ), deps = [ "//:core_worker_lib", - "//:exported_internal", "//:global_state_accessor_lib", "//:ray_util", "//:raylet_lib", @@ -2848,7 +2824,6 @@ cc_binary( visibility = ["//java:__subpackages__"], deps = [ "//:core_worker_lib", - "//:exported_internal", "//:global_state_accessor_lib", "//:src/ray/ray_exported_symbols.lds", "//:src/ray/ray_version_script.lds", diff --git a/cpp/src/ray/runtime/task/native_task_submitter.cc b/cpp/src/ray/runtime/task/native_task_submitter.cc index 3e3228a50da4..e69cf61fa164 100644 --- a/cpp/src/ray/runtime/task/native_task_submitter.cc +++ b/cpp/src/ray/runtime/task/native_task_submitter.cc @@ -66,11 +66,14 @@ ObjectID NativeTaskSubmitter::Submit(InvocationSpec &invocation, options.name = call_options.name; options.resources = call_options.resources; options.serialized_runtime_env_info = call_options.serialized_runtime_env_info; - std::optional> return_refs; + std::vector return_refs; if (invocation.task_type == TaskType::ACTOR_TASK) { - return_refs = core_worker.SubmitActorTask( - invocation.actor_id, BuildRayFunction(invocation), invocation.args, options); - if (!return_refs.has_value()) { + auto status = core_worker.SubmitActorTask(invocation.actor_id, + BuildRayFunction(invocation), + invocation.args, + options, + return_refs); + if (!status.ok()) { return ObjectID::Nil(); } } else { @@ -95,7 +98,7 @@ ObjectID NativeTaskSubmitter::Submit(InvocationSpec &invocation, ""); } std::vector return_ids; - for (const auto &ref : return_refs.value()) { + for (const auto &ref : return_refs) { return_ids.push_back(ObjectID::FromBinary(ref.object_id())); } return return_ids[0]; diff --git a/python/ray/_raylet.pyx b/python/ray/_raylet.pyx index 89a80aff8bef..9e2f06e31b77 100644 --- a/python/ray/_raylet.pyx +++ b/python/ray/_raylet.pyx @@ -2406,7 +2406,7 @@ cdef class CoreWorker: unordered_map[c_string, double] c_resources CRayFunction ray_function c_vector[unique_ptr[CTaskArg]] args_vector - optional[c_vector[CObjectReference]] return_refs + c_vector[CObjectReference] return_refs c_vector[CObjectID] incremented_put_arg_ids with self.profile_event(b"submit_task"): @@ -2419,12 +2419,13 @@ cdef class CoreWorker: &incremented_put_arg_ids) with nogil: - return_refs = CCoreWorkerProcess.GetCoreWorker().SubmitActorTask( + status = CCoreWorkerProcess.GetCoreWorker().SubmitActorTask( c_actor_id, ray_function, args_vector, CTaskOptions( - name, num_returns, c_resources, concurrency_group_name)) + name, num_returns, c_resources, concurrency_group_name), + return_refs) # These arguments were serialized and put into the local object # store during task submission. The backend increments their local # ref count initially to ensure that they remain in scope until we @@ -2434,28 +2435,25 @@ cdef class CoreWorker: CCoreWorkerProcess.GetCoreWorker().RemoveLocalReference( put_arg_id) - if return_refs.has_value(): + if status.ok(): # The initial local reference is already acquired internally # when adding the pending task. - return VectorToObjectRefs(return_refs.value(), + return VectorToObjectRefs(return_refs, skip_adding_local_ref=True) else: - actor = self.get_actor_handle(actor_id) - actor_handle = (CCoreWorkerProcess.GetCoreWorker() - .GetActorHandle(c_actor_id)) - raise PendingCallsLimitExceeded("The task {} could not be " - "submitted to {} because more " - "than {} tasks are queued on " - "the actor. This limit " - "can be adjusted with the " - "`max_pending_calls` actor " - "option.".format( - function_descriptor - .function_name, - repr(actor), - (dereference(actor_handle) - .MaxPendingCalls()) - )) + if status.IsOutOfResource(): + actor = self.get_actor_handle(actor_id) + actor_handle = (CCoreWorkerProcess.GetCoreWorker() + .GetActorHandle(c_actor_id)) + raise PendingCallsLimitExceeded( + f"The task {function_descriptor.function_name} could not be " + f"submitted to {repr(actor)} because more than" + f" {(dereference(actor_handle).MaxPendingCalls())}" + " tasks are queued on the actor. This limit can be adjusted" + " with the `max_pending_calls` actor option.") + else: + raise Exception(f"Failed to submit task to actor {actor_id} " + f"due to {status.message()}") def kill_actor(self, ActorID actor_id, c_bool no_restart): cdef: diff --git a/python/ray/includes/common.pxd b/python/ray/includes/common.pxd index 5d5f3ab593ea..8de7c38c4cd2 100644 --- a/python/ray/includes/common.pxd +++ b/python/ray/includes/common.pxd @@ -117,6 +117,7 @@ cdef extern from "ray/common/status.h" namespace "ray" nogil: c_bool IsNotFound() c_bool IsObjectUnknownOwner() c_bool IsRpcError() + c_bool IsOutOfResource() c_string ToString() c_string CodeAsString() diff --git a/python/ray/includes/libcoreworker.pxd b/python/ray/includes/libcoreworker.pxd index 41e29f58012e..c0aba4ca6a45 100644 --- a/python/ray/includes/libcoreworker.pxd +++ b/python/ray/includes/libcoreworker.pxd @@ -117,10 +117,11 @@ cdef extern from "ray/core_worker/core_worker.h" nogil: const CPlacementGroupID &placement_group_id) CRayStatus WaitPlacementGroupReady( const CPlacementGroupID &placement_group_id, int64_t timeout_seconds) - optional[c_vector[CObjectReference]] SubmitActorTask( + CRayStatus SubmitActorTask( const CActorID &actor_id, const CRayFunction &function, const c_vector[unique_ptr[CTaskArg]] &args, - const CTaskOptions &options) + const CTaskOptions &options, + c_vector[CObjectReference]&) CRayStatus KillActor( const CActorID &actor_id, c_bool force_kill, c_bool no_restart) diff --git a/python/ray/tests/test_failure_4.py b/python/ray/tests/test_failure_4.py index f56f2e802b0d..17d5a9940d49 100644 --- a/python/ray/tests/test_failure_4.py +++ b/python/ray/tests/test_failure_4.py @@ -700,6 +700,25 @@ def sleeper(): assert raylet["NodeManagerAddress"] in message +def test_accessing_actor_after_cluster_crashed(shutdown_only): + ray.init() + + @ray.remote + class A: + def f(self): + return + + a = A.remote() + + ray.get(a.f.remote()) + + ray.shutdown() + ray.init() + with pytest.raises(Exception) as exc_info: + ray.get(a.f.remote()) + assert "It might be dead or it's from a different cluster" in exc_info.value.args[0] + + if __name__ == "__main__": import os diff --git a/src/ray/common/status.h b/src/ray/common/status.h index c0477e652383..bda9860ddc4a 100644 --- a/src/ray/common/status.h +++ b/src/ray/common/status.h @@ -114,6 +114,7 @@ enum class StatusCode : char { OutOfDisk = 28, ObjectUnknownOwner = 29, RpcError = 30, + OutOfResource = 31 }; #if defined(__clang__) @@ -241,6 +242,10 @@ class RAY_EXPORT Status { return Status(StatusCode::RpcError, msg, rpc_code); } + static Status OutOfResource(const std::string &msg) { + return Status(StatusCode::OutOfResource, msg); + } + static StatusCode StringToCode(const std::string &str); // Returns true iff the status indicates success. @@ -287,6 +292,8 @@ class RAY_EXPORT Status { bool IsRpcError() const { return code() == StatusCode::RpcError; } + bool IsOutOfResource() const { return code() == StatusCode::OutOfResource; } + // Return a string representation of this status suitable for printing. // Returns the string "OK" for success. std::string ToString() const; diff --git a/src/ray/core_worker/core_worker.cc b/src/ray/core_worker/core_worker.cc index 329fda454b12..708fad19af45 100644 --- a/src/ray/core_worker/core_worker.cc +++ b/src/ray/core_worker/core_worker.cc @@ -20,6 +20,7 @@ #include +#include "absl/strings/str_format.h" #include "boost/fiber/all.hpp" #include "ray/common/bundle_spec.h" #include "ray/common/ray_config.h" @@ -2175,17 +2176,27 @@ Status CoreWorker::WaitPlacementGroupReady(const PlacementGroupID &placement_gro } } -std::optional> CoreWorker::SubmitActorTask( - const ActorID &actor_id, - const RayFunction &function, - const std::vector> &args, - const TaskOptions &task_options) { +Status CoreWorker::SubmitActorTask(const ActorID &actor_id, + const RayFunction &function, + const std::vector> &args, + const TaskOptions &task_options, + std::vector &task_returns) { absl::ReleasableMutexLock lock(&actor_task_mutex_); + task_returns.clear(); + if (!direct_actor_submitter_->CheckActorExists(actor_id)) { + std::string err_msg = absl::StrFormat( + "Can't find actor %s. It might be dead or it's from a different cluster", + actor_id.Hex()); + return Status::NotFound(std::move(err_msg)); + } /// Check whether backpressure may happen at the very beginning of submitting a task. if (direct_actor_submitter_->PendingTasksFull(actor_id)) { RAY_LOG(DEBUG) << "Back pressure occurred while submitting the task to " << actor_id << ". " << direct_actor_submitter_->DebugString(actor_id); - return std::nullopt; + return Status::OutOfResource(absl::StrFormat( + "Too many tasks (%d) pending to be executed for actor %s. Please try later", + direct_actor_submitter_->NumPendingTasks(actor_id), + actor_id.Hex())); } auto actor_handle = actor_manager_->GetActorHandle(actor_id); @@ -2248,7 +2259,8 @@ std::optional> CoreWorker::SubmitActorTask( rpc_address_, task_spec, CurrentCallSite(), actor_handle->MaxTaskRetries()); RAY_CHECK_OK(direct_actor_submitter_->SubmitTask(task_spec)); } - return {std::move(returned_refs)}; + task_returns = std::move(returned_refs); + return Status::OK(); } Status CoreWorker::CancelTask(const ObjectID &object_id, diff --git a/src/ray/core_worker/core_worker.h b/src/ray/core_worker/core_worker.h index 088fd620644e..e4e694610e1a 100644 --- a/src/ray/core_worker/core_worker.h +++ b/src/ray/core_worker/core_worker.h @@ -806,12 +806,14 @@ class CoreWorker : public rpc::CoreWorkerServiceHandler { /// \param[in] function The remote function to execute. /// \param[in] args Arguments of this task. /// \param[in] task_options Options for this task. - /// \return ObjectRefs returned by this task. - std::optional> SubmitActorTask( - const ActorID &actor_id, - const RayFunction &function, - const std::vector> &args, - const TaskOptions &task_options); + /// \param[out] task_returns The object returned by this task + /// + /// \return Status of this submission + Status SubmitActorTask(const ActorID &actor_id, + const RayFunction &function, + const std::vector> &args, + const TaskOptions &task_options, + std::vector &task_returns); /// Tell an actor to exit immediately, without completing outstanding work. /// diff --git a/src/ray/core_worker/lib/java/io_ray_runtime_task_NativeTaskSubmitter.cc b/src/ray/core_worker/lib/java/io_ray_runtime_task_NativeTaskSubmitter.cc index a5542eb700fb..13a420f16972 100644 --- a/src/ray/core_worker/lib/java/io_ray_runtime_task_NativeTaskSubmitter.cc +++ b/src/ray/core_worker/lib/java/io_ray_runtime_task_NativeTaskSubmitter.cc @@ -187,8 +187,8 @@ inline ActorCreationOptions ToActorCreationOptions(JNIEnv *env, max_restarts = env->GetIntField(actorCreationOptions, java_actor_creation_options_max_restarts); - max_task_retries = - env->GetIntField(actorCreationOptions, java_actor_creation_options_max_task_retries); + max_task_retries = env->GetIntField(actorCreationOptions, + java_actor_creation_options_max_task_retries); jobject java_resources = env->GetObjectField(actorCreationOptions, java_base_task_options_resources); resources = ToResources(env, java_resources); @@ -278,22 +278,21 @@ inline ActorCreationOptions ToActorCreationOptions(JNIEnv *env, placement_options.second); placement_group_scheduling_strategy->set_placement_group_capture_child_tasks(false); } - ActorCreationOptions actor_creation_options{ - max_restarts, - max_task_retries, - static_cast(max_concurrency), - resources, - resources, - dynamic_worker_options, - is_detached, - name, - ray_namespace, - is_async, - /*scheduling_strategy=*/scheduling_strategy, - serialized_runtime_env, - concurrency_groups, - /*execute_out_of_order*/ false, - max_pending_calls}; + ActorCreationOptions actor_creation_options{max_restarts, + max_task_retries, + static_cast(max_concurrency), + resources, + resources, + dynamic_worker_options, + is_detached, + name, + ray_namespace, + is_async, + /*scheduling_strategy=*/scheduling_strategy, + serialized_runtime_env, + concurrency_groups, + /*execute_out_of_order*/ false, + max_pending_calls}; return actor_creation_options; } @@ -439,10 +438,10 @@ Java_io_ray_runtime_task_NativeTaskSubmitter_nativeSubmitActorTask( auto task_args = ToTaskArgs(env, args); RAY_CHECK(callOptions != nullptr); auto task_options = ToTaskOptions(env, numReturns, callOptions); - - auto return_refs = CoreWorkerProcess::GetCoreWorker().SubmitActorTask( - actor_id, ray_function, task_args, task_options); - if (!return_refs.has_value()) { + std::vector return_refs; + auto status = CoreWorkerProcess::GetCoreWorker().SubmitActorTask( + actor_id, ray_function, task_args, task_options, return_refs); + if (!status.ok()) { std::stringstream ss; ss << "The task " << ray_function.GetFunctionDescriptor()->ToString() << " could not be submitted to " << actor_id; @@ -456,7 +455,7 @@ Java_io_ray_runtime_task_NativeTaskSubmitter_nativeSubmitActorTask( } std::vector return_ids; - for (const auto &ref : return_refs.value()) { + for (const auto &ref : return_refs) { return_ids.push_back(ObjectID::FromBinary(ref.object_id())); } diff --git a/src/ray/core_worker/test/core_worker_test.cc b/src/ray/core_worker/test/core_worker_test.cc index cba46a14d733..31a97db7bd4f 100644 --- a/src/ray/core_worker/test/core_worker_test.cc +++ b/src/ray/core_worker/test/core_worker_test.cc @@ -202,10 +202,10 @@ int CoreWorkerTest::GetActorPid(const ActorID &actor_id, TaskOptions options{"", 1, resources}; RayFunction func{Language::PYTHON, FunctionDescriptorBuilder::BuildPython("GetWorkerPid", "", "", "")}; - - auto return_ids = ObjectRefsToIds(CoreWorkerProcess::GetCoreWorker() - .SubmitActorTask(actor_id, func, args, options) - .value()); + std::vector task_returns; + auto status = CoreWorkerProcess::GetCoreWorker().SubmitActorTask( + actor_id, func, args, options, task_returns); + auto return_ids = ObjectRefsToIds(task_returns); std::vector> results; RAY_CHECK_OK(CoreWorkerProcess::GetCoreWorker().Get(return_ids, -1, &results)); @@ -298,8 +298,10 @@ void CoreWorkerTest::TestActorTask(std::unordered_map &reso Language::PYTHON, FunctionDescriptorBuilder::BuildPython("MergeInputArgsAsOutput", "", "", "")); - auto return_ids = - ObjectRefsToIds(driver.SubmitActorTask(actor_id, func, args, options).value()); + std::vector task_returns; + auto status = CoreWorkerProcess::GetCoreWorker().SubmitActorTask( + actor_id, func, args, options, task_returns); + auto return_ids = ObjectRefsToIds(task_returns); ASSERT_EQ(return_ids.size(), 1); std::vector> results; @@ -344,8 +346,10 @@ void CoreWorkerTest::TestActorTask(std::unordered_map &reso RayFunction func( Language::PYTHON, FunctionDescriptorBuilder::BuildPython("MergeInputArgsAsOutput", "", "", "")); - auto return_ids = - ObjectRefsToIds(driver.SubmitActorTask(actor_id, func, args, options).value()); + std::vector task_returns; + auto status = CoreWorkerProcess::GetCoreWorker().SubmitActorTask( + actor_id, func, args, options, task_returns); + auto return_ids = ObjectRefsToIds(task_returns); ASSERT_EQ(return_ids.size(), 1); @@ -409,8 +413,10 @@ void CoreWorkerTest::TestActorRestart( Language::PYTHON, FunctionDescriptorBuilder::BuildPython("MergeInputArgsAsOutput", "", "", "")); - auto return_ids = - ObjectRefsToIds(driver.SubmitActorTask(actor_id, func, args, options).value()); + std::vector task_returns; + auto status = CoreWorkerProcess::GetCoreWorker().SubmitActorTask( + actor_id, func, args, options, task_returns); + auto return_ids = ObjectRefsToIds(task_returns); ASSERT_EQ(return_ids.size(), 1); // Verify if it's expected data. std::vector> results; @@ -453,8 +459,10 @@ void CoreWorkerTest::TestActorFailure( Language::PYTHON, FunctionDescriptorBuilder::BuildPython("MergeInputArgsAsOutput", "", "", "")); - auto return_ids = - ObjectRefsToIds(driver.SubmitActorTask(actor_id, func, args, options).value()); + std::vector task_returns; + auto status = CoreWorkerProcess::GetCoreWorker().SubmitActorTask( + actor_id, func, args, options, task_returns); + auto return_ids = ObjectRefsToIds(task_returns); ASSERT_EQ(return_ids.size(), 1); all_results.emplace_back(std::make_pair(return_ids[0], buffer1)); @@ -611,8 +619,10 @@ TEST_F(SingleNodeTest, TestDirectActorTaskSubmissionPerf) { Language::PYTHON, FunctionDescriptorBuilder::BuildPython("MergeInputArgsAsOutput", "", "", "")); - auto return_ids = - ObjectRefsToIds(driver.SubmitActorTask(actor_id, func, args, options).value()); + std::vector task_returns; + auto status = CoreWorkerProcess::GetCoreWorker().SubmitActorTask( + actor_id, func, args, options, task_returns); + auto return_ids = ObjectRefsToIds(task_returns); ASSERT_EQ(return_ids.size(), 1); object_ids.emplace_back(return_ids[0]); } diff --git a/src/ray/core_worker/transport/direct_actor_task_submitter.cc b/src/ray/core_worker/transport/direct_actor_task_submitter.cc index cab04a6cebe5..0451e5c0ae1a 100644 --- a/src/ray/core_worker/transport/direct_actor_task_submitter.cc +++ b/src/ray/core_worker/transport/direct_actor_task_submitter.cc @@ -599,6 +599,19 @@ bool CoreWorkerDirectActorTaskSubmitter::PendingTasksFull(const ActorID &actor_i it->second.cur_pending_calls >= it->second.max_pending_calls; } +size_t CoreWorkerDirectActorTaskSubmitter::NumPendingTasks( + const ActorID &actor_id) const { + absl::MutexLock lock(&mu_); + auto it = client_queues_.find(actor_id); + RAY_CHECK(it != client_queues_.end()); + return it->second.cur_pending_calls; +} + +bool CoreWorkerDirectActorTaskSubmitter::CheckActorExists(const ActorID &actor_id) const { + absl::MutexLock lock(&mu_); + return client_queues_.find(actor_id) != client_queues_.end(); +} + std::string CoreWorkerDirectActorTaskSubmitter::DebugString( const ActorID &actor_id) const { absl::MutexLock lock(&mu_); diff --git a/src/ray/core_worker/transport/direct_actor_task_submitter.h b/src/ray/core_worker/transport/direct_actor_task_submitter.h index fa28fc485824..add2bd2fda91 100644 --- a/src/ray/core_worker/transport/direct_actor_task_submitter.h +++ b/src/ray/core_worker/transport/direct_actor_task_submitter.h @@ -90,6 +90,7 @@ class CoreWorkerDirectActorTaskSubmitter /// /// \param[in] actor_id The actor for whom to add a queue. /// \param[in] max_pending_calls The max pending calls for the actor to be added. + /// \param[in] execute_out_of_order Whether to execute tasks out of order. /// \param[in] fail_if_actor_unreachable Whether to fail newly submitted tasks /// immediately when the actor is unreachable. void AddActorQueueIfNotExists(const ActorID &actor_id, @@ -151,12 +152,31 @@ class CoreWorkerDirectActorTaskSubmitter /// \return Whether the corresponding client queue is full or not. bool PendingTasksFull(const ActorID &actor_id) const; + /// Get the number of pending tasks in the queue. + /// + /// \param[in] actor_id Actor id. + /// \return The number of pending tasks in the queue. + size_t NumPendingTasks(const ActorID &actor_id) const; + + /// Check whether the actor exists + /// + /// \param[in] actor_id Actor id. + /// + /// \return Return true if the actor exists. + bool CheckActorExists(const ActorID &actor_id) const; + /// Returns debug string for class. /// /// \param[in] actor_id The actor whose debug string to return. /// \return string. std::string DebugString(const ActorID &actor_id) const; + /// Whether the specified actor is alive. + /// + /// \param[in] actor_id The actor ID. + /// \return Whether this actor is alive. + bool IsActorAlive(const ActorID &actor_id) const; + private: /// A helper function to get task finisher without holding mu_ /// We should use this function when access @@ -280,12 +300,6 @@ class CoreWorkerDirectActorTaskSubmitter const absl::flat_hash_map> &inflight_task_callbacks) LOCKS_EXCLUDED(mu_); - /// Whether the specified actor is alive. - /// - /// \param[in] actor_id The actor ID. - /// \return Whether this actor is alive. - bool IsActorAlive(const ActorID &actor_id) const; - /// Pool for producing new core worker clients. rpc::CoreWorkerClientPool &core_worker_client_pool_; diff --git a/src/ray/internal/internal.cc b/src/ray/internal/internal.cc deleted file mode 100644 index 7821c2b1ff86..000000000000 --- a/src/ray/internal/internal.cc +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2020 The Ray Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "ray/internal/internal.h" - -#include "ray/core_worker/core_worker.h" - -namespace ray { -namespace internal { - -using ray::core::CoreWorkerProcess; -using ray::core::TaskOptions; - -std::vector SendInternal(const ActorID &peer_actor_id, - std::shared_ptr buffer, - RayFunction &function, - int return_num) { - std::unordered_map resources; - std::string name = function.GetFunctionDescriptor()->DefaultTaskName(); - TaskOptions options{name, return_num, resources}; - - char meta_data[3] = {'R', 'A', 'W'}; - std::shared_ptr meta = - std::make_shared((uint8_t *)meta_data, 3, true); - - std::vector> args; - if (function.GetLanguage() == Language::PYTHON) { - auto dummy = "__RAY_DUMMY__"; - std::shared_ptr dummyBuffer = - std::make_shared((uint8_t *)dummy, 13, true); - args.emplace_back(new TaskArgByValue(std::make_shared( - std::move(dummyBuffer), meta, std::vector(), true))); - } - args.emplace_back(new TaskArgByValue(std::make_shared( - std::move(buffer), meta, std::vector(), true))); - - std::vector> results; - auto result = CoreWorkerProcess::GetCoreWorker().SubmitActorTask( - peer_actor_id, function, args, options); - if (!result.has_value()) { - RAY_CHECK(false) << "Back pressure should not be enabled."; - } - return result.value(); -} - -const ray::stats::TagKeyType TagRegister(const std::string tag_name) { - return ray::stats::TagKeyType::Register(tag_name); -} - -const ActorID &GetCurrentActorID() { - return CoreWorkerProcess::GetCoreWorker().GetWorkerContext().GetCurrentActorID(); -} - -bool IsInitialized() { return CoreWorkerProcess::IsInitialized(); } - -} // namespace internal -} // namespace ray diff --git a/src/ray/internal/internal.h b/src/ray/internal/internal.h deleted file mode 100644 index 0eb58062c03c..000000000000 --- a/src/ray/internal/internal.h +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2020 The Ray Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once -#include "ray/common/buffer.h" -#include "ray/common/id.h" -#include "ray/core_worker/common.h" -#include "ray/stats/metric.h" - -// This header is used to warp some internal code so we can reduce suspicious -// symbols export. -namespace ray { -namespace internal { - -using ray::core::RayFunction; - -/// Send buffer internal -/// \param[in] buffer buffer to be sent. -/// \param[in] function the function descriptor of peer's function. -/// \param[in] return_num return value number of the call. -/// \param[out] return_ids return ids from SubmitActorTask. -std::vector SendInternal(const ActorID &peer_actor_id, - std::shared_ptr buffer, - RayFunction &function, - int return_num); - -const stats::TagKeyType TagRegister(const std::string tag_name); - -/// Get current actor id via internal. -const ActorID &GetCurrentActorID(); - -/// Get core worker initialization flag via internal. -bool IsInitialized(); -} // namespace internal -} // namespace ray From 7d120731ab7b78f8836febb43c94b70d2fc75314 Mon Sep 17 00:00:00 2001 From: Yi Cheng <74173148+iycheng@users.noreply.github.com> Date: Tue, 25 Apr 2023 23:25:42 -0700 Subject: [PATCH 103/424] [core] Deflakey `test_placement_group_3.py` when ray syncer is turned on. (#34687) ## Why are these changes needed? The new communication protocol will only deliver the message when necessary. But some part of ray assumes that even no new message generated, it'll still get the delivery and thus it ends up incorrect resource number. For example, in GCS's view - GCS's view has Node: {CPU: 1} - Then GCS tries to schedule a task => Node: {CPU: 0} - Then GCS got a snapshot from that node, and that node has PG scheduled, - Then it becomes {CPU:0, PG:1}, and the previous pg scheduling failed. So, GCS'll add it back and in the end it's {CPU:1, PG:1} which is incorrect. - Later GCS will try to schedule task to this node and this node will reject since it doesn't have any CPU left. This issue happens in both raylet and GCS. In raylet, distributed task scheduling can suffer from this issue. In GCS, placement group can suffer from this issue. In test_placement_group_3.py, it's because the scheduling of a placement group bundle failed and in the same time, it get the snapshot. So in the end it has one cpu in the GCS's view. PG's scheduling algorithm is deterministic, so it'll try to reschedule the bundles infinitely. The raylet issue was fixed in [29905](https://github.com/ray-project/ray/pull/29905). This PR move the logic from node manager to cluster resource manager. The logic is not added into ray syncer is because it's more related to application's logical about how to handle the update. Besides, only one module needs to update. The future receiver needs to not assume the eventually delivery of the repeat messages. --- python/ray/tests/test_placement_group_3.py | 13 ++ .../ray/gcs/gcs_server/gcs_resource_manager.h | 16 +-- src/ray/gcs/gcs_server/gcs_server.cc | 1 + .../test/gcs_actor_scheduler_mock_test.cc | 1 + .../test/gcs_actor_scheduler_test.cc | 1 + .../test/gcs_monitor_server_test.cc | 2 +- .../gcs_placement_group_manager_mock_test.cc | 4 +- .../test/gcs_placement_group_manager_test.cc | 3 +- .../gcs_placement_group_scheduler_test.cc | 1 + .../test/gcs_resource_manager_test.cc | 2 +- src/ray/raylet/node_manager.cc | 23 +--- src/ray/raylet/node_manager.h | 3 - .../placement_group_resource_manager_test.cc | 75 +++++++---- .../scheduling/cluster_resource_manager.cc | 29 +++-- .../scheduling/cluster_resource_manager.h | 22 ++-- .../cluster_resource_manager_test.cc | 3 +- .../scheduling/cluster_resource_scheduler.cc | 13 +- .../scheduling/cluster_resource_scheduler.h | 7 +- .../cluster_resource_scheduler_2_test.cc | 3 +- .../cluster_resource_scheduler_test.cc | 122 ++++++++++++------ .../scheduling/cluster_task_manager_test.cc | 3 +- .../policy/hybrid_scheduling_policy_test.cc | 7 - .../policy/scheduling_policy_test.cc | 65 +++++----- 23 files changed, 259 insertions(+), 160 deletions(-) diff --git a/python/ray/tests/test_placement_group_3.py b/python/ray/tests/test_placement_group_3.py index f43ddeb9809e..fdde1d874136 100644 --- a/python/ray/tests/test_placement_group_3.py +++ b/python/ray/tests/test_placement_group_3.py @@ -458,6 +458,19 @@ def f(): assert len(gpu_ids_res) == 2 +@pytest.mark.parametrize( + "ray_start_cluster", + [ + generate_system_config_map( + use_ray_syncer=True, + ), + generate_system_config_map( + use_ray_syncer=False, + ), + ], + indirect=True, +) +@pytest.mark.repeat(3) def test_actor_scheduling_not_block_with_placement_group(ray_start_cluster): """Tests the scheduling of lots of actors will not be blocked when using placement groups. diff --git a/src/mock/ray/gcs/gcs_server/gcs_resource_manager.h b/src/mock/ray/gcs/gcs_server/gcs_resource_manager.h index 5f37fa0220d3..44bbaf0910b5 100644 --- a/src/mock/ray/gcs/gcs_server/gcs_resource_manager.h +++ b/src/mock/ray/gcs/gcs_server/gcs_resource_manager.h @@ -16,16 +16,20 @@ namespace ray { namespace gcs { - +static instrumented_io_context __mock_io_context_; +static ClusterResourceManager __mock_cluster_resource_manager_(__mock_io_context_); class MockGcsResourceManager : public GcsResourceManager { public: using GcsResourceManager::GcsResourceManager; explicit MockGcsResourceManager() - : GcsResourceManager( - io_context_, cluster_resource_manager_, NodeID::FromRandom(), nullptr) {} + : GcsResourceManager(__mock_io_context_, + __mock_cluster_resource_manager_, + NodeID::FromRandom(), + nullptr) {} explicit MockGcsResourceManager(ClusterResourceManager &cluster_resource_manager) : GcsResourceManager( - io_context_, cluster_resource_manager, NodeID::FromRandom(), nullptr) {} + __mock_io_context_, cluster_resource_manager, NodeID::FromRandom(), nullptr) { + } MOCK_METHOD(void, HandleGetResources, @@ -51,10 +55,6 @@ class MockGcsResourceManager : public GcsResourceManager { rpc::GetAllResourceUsageReply *reply, rpc::SendReplyCallback send_reply_callback), (override)); - - private: - instrumented_io_context io_context_; - ClusterResourceManager cluster_resource_manager_; }; } // namespace gcs diff --git a/src/ray/gcs/gcs_server/gcs_server.cc b/src/ray/gcs/gcs_server/gcs_server.cc index 1edb966d4b42..2def12549a7a 100644 --- a/src/ray/gcs/gcs_server/gcs_server.cc +++ b/src/ray/gcs/gcs_server/gcs_server.cc @@ -326,6 +326,7 @@ void GcsServer::InitGcsResourceManager(const GcsInitData &gcs_init_data) { void GcsServer::InitClusterResourceScheduler() { cluster_resource_scheduler_ = std::make_shared( + main_service_, scheduling::NodeID(kGCSNodeID.Binary()), NodeResources(), /*is_node_available_fn=*/ diff --git a/src/ray/gcs/gcs_server/test/gcs_actor_scheduler_mock_test.cc b/src/ray/gcs/gcs_server/test/gcs_actor_scheduler_mock_test.cc index 825ac8e6cbaa..c58311f86927 100644 --- a/src/ray/gcs/gcs_server/test/gcs_actor_scheduler_mock_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_actor_scheduler_mock_test.cc @@ -45,6 +45,7 @@ class GcsActorSchedulerMockTest : public Test { [this](const rpc::Address &) { return raylet_client; }); local_node_id = NodeID::FromRandom(); auto cluster_resource_scheduler = std::make_shared( + io_context, scheduling::NodeID(local_node_id.Binary()), NodeResources(), /*is_node_available_fn=*/ diff --git a/src/ray/gcs/gcs_server/test/gcs_actor_scheduler_test.cc b/src/ray/gcs/gcs_server/test/gcs_actor_scheduler_test.cc index f29467fceead..681d73bae010 100644 --- a/src/ray/gcs/gcs_server/test/gcs_actor_scheduler_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_actor_scheduler_test.cc @@ -44,6 +44,7 @@ class GcsActorSchedulerTest : public ::testing::Test { std::make_shared(store_client_); local_node_id_ = NodeID::FromRandom(); auto cluster_resource_scheduler = std::make_shared( + io_service_, scheduling::NodeID(local_node_id_.Binary()), NodeResources(), /*is_node_available_fn=*/ diff --git a/src/ray/gcs/gcs_server/test/gcs_monitor_server_test.cc b/src/ray/gcs/gcs_server/test/gcs_monitor_server_test.cc index d7d743cb602f..eef847dcc667 100644 --- a/src/ray/gcs/gcs_server/test/gcs_monitor_server_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_monitor_server_test.cc @@ -73,7 +73,7 @@ class GcsMonitorServerTest : public ::testing::Test { public: GcsMonitorServerTest() : mock_node_manager_(std::make_shared()), - cluster_resource_manager_(), + cluster_resource_manager_(io_context_), mock_resource_manager_( std::make_shared(cluster_resource_manager_)), mock_placement_group_manager_( diff --git a/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_mock_test.cc b/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_mock_test.cc index 9a5f2417f045..a737124769d9 100644 --- a/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_mock_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_mock_test.cc @@ -33,6 +33,8 @@ namespace gcs { class GcsPlacementGroupManagerMockTest : public Test { public: + GcsPlacementGroupManagerMockTest() : cluster_resource_manager_(io_context_) {} + void SetUp() override { store_client_ = std::make_shared(); gcs_table_storage_ = std::make_shared(store_client_); @@ -50,6 +52,7 @@ class GcsPlacementGroupManagerMockTest : public Test { counter_.reset(new CounterMap()); } + instrumented_io_context io_context_; std::unique_ptr gcs_placement_group_manager_; std::shared_ptr gcs_placement_group_scheduler_; std::shared_ptr gcs_table_storage_; @@ -57,7 +60,6 @@ class GcsPlacementGroupManagerMockTest : public Test { ClusterResourceManager cluster_resource_manager_; std::shared_ptr resource_manager_; std::shared_ptr> counter_; - instrumented_io_context io_context_; }; TEST_F(GcsPlacementGroupManagerMockTest, PendingQueuePriorityReschedule) { diff --git a/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc b/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc index df9e74a632b6..82d46f13f145 100644 --- a/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc @@ -81,7 +81,8 @@ class MockPlacementGroupScheduler : public gcs::GcsPlacementGroupSchedulerInterf class GcsPlacementGroupManagerTest : public ::testing::Test { public: GcsPlacementGroupManagerTest() - : mock_placement_group_scheduler_(new MockPlacementGroupScheduler()) { + : mock_placement_group_scheduler_(new MockPlacementGroupScheduler()), + cluster_resource_manager_(io_service_) { gcs_publisher_ = std::make_shared(std::make_unique()); gcs_table_storage_ = std::make_shared(io_service_); diff --git a/src/ray/gcs/gcs_server/test/gcs_placement_group_scheduler_test.cc b/src/ray/gcs/gcs_server/test/gcs_placement_group_scheduler_test.cc index 1bdabd46eda0..fe639cd40d37 100644 --- a/src/ray/gcs/gcs_server/test/gcs_placement_group_scheduler_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_placement_group_scheduler_test.cc @@ -49,6 +49,7 @@ class GcsPlacementGroupSchedulerTest : public ::testing::Test { std::make_unique()); auto local_node_id = NodeID::FromRandom(); cluster_resource_scheduler_ = std::make_shared( + io_service_, scheduling::NodeID(local_node_id.Binary()), NodeResources(), /*is_node_available_fn=*/ diff --git a/src/ray/gcs/gcs_server/test/gcs_resource_manager_test.cc b/src/ray/gcs/gcs_server/test/gcs_resource_manager_test.cc index 39fd9e9a6c26..553a7270f351 100644 --- a/src/ray/gcs/gcs_server/test/gcs_resource_manager_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_resource_manager_test.cc @@ -27,7 +27,7 @@ using ::testing::_; class GcsResourceManagerTest : public ::testing::Test { public: - GcsResourceManagerTest() { + GcsResourceManagerTest() : cluster_resource_manager_(io_service_) { gcs_resource_manager_ = std::make_shared( io_service_, cluster_resource_manager_, NodeID::FromRandom()); } diff --git a/src/ray/raylet/node_manager.cc b/src/ray/raylet/node_manager.cc index 4b0593ea8515..27176b467b87 100644 --- a/src/ray/raylet/node_manager.cc +++ b/src/ray/raylet/node_manager.cc @@ -293,6 +293,7 @@ NodeManager::NodeManager(instrumented_io_context &io_service, CreateMemoryUsageRefreshCallback())) { RAY_LOG(INFO) << "Initializing NodeManager with ID " << self_node_id_; cluster_resource_scheduler_ = std::make_shared( + io_service, scheduling::NodeID(self_node_id_.Binary()), config.resource_config.ToResourceMap(), /*is_node_available_fn*/ @@ -373,23 +374,6 @@ NodeManager::NodeManager(instrumented_io_context &io_service, node_manager_server_.RegisterService(node_manager_service_); node_manager_server_.RegisterService(agent_manager_service_); if (RayConfig::instance().use_ray_syncer()) { - periodical_runner_.RunFnPeriodically( - [this]() { - auto now = absl::Now(); - auto threshold = - now - absl::Milliseconds( - RayConfig::instance().ray_syncer_message_refresh_interval_ms()); - auto &resource_manager = - cluster_resource_scheduler_->GetClusterResourceManager(); - for (auto &[node_id, resource] : resource_message_udpated_) { - auto modified_ts = resource_manager.GetNodeResourceModifiedTs( - scheduling::NodeID(node_id.Binary())); - if (modified_ts && *modified_ts < threshold) { - UpdateResourceUsage(node_id, resource); - } - } - }, - RayConfig::instance().ray_syncer_message_refresh_interval_ms()); node_manager_server_.RegisterService(ray_syncer_service_); } node_manager_server_.Run(); @@ -1048,10 +1032,6 @@ void NodeManager::NodeRemoved(const NodeID &node_id) { // Below, when we remove node_id from all of these data structures, we could // check that it is actually removed, or log a warning otherwise, but that may // not be necessary. - - // Remove the messages received - resource_message_udpated_.erase(node_id); - // Remove the node from the resource map. if (!cluster_resource_scheduler_->GetClusterResourceManager().RemoveNode( scheduling::NodeID(node_id.Binary()))) { @@ -2790,7 +2770,6 @@ void NodeManager::ConsumeSyncMessage( } // Message view shouldn't carry this field. RAY_CHECK(!data.should_global_gc()); - resource_message_udpated_[node_id] = std::move(data); } else if (message->message_type() == syncer::MessageType::COMMANDS) { rpc::ResourcesData data; data.ParseFromString(message->sync_message()); diff --git a/src/ray/raylet/node_manager.h b/src/ray/raylet/node_manager.h index 2982da44aff3..ec6980d6eba5 100644 --- a/src/ray/raylet/node_manager.h +++ b/src/ray/raylet/node_manager.h @@ -827,9 +827,6 @@ class NodeManager : public rpc::NodeManagerServiceHandler, /// Ray syncer for synchronization syncer::RaySyncer ray_syncer_; - /// Resource message updated - absl::flat_hash_map resource_message_udpated_; - /// RaySyncerService for gRPC syncer::RaySyncerService ray_syncer_service_; diff --git a/src/ray/raylet/placement_group_resource_manager_test.cc b/src/ray/raylet/placement_group_resource_manager_test.cc index b458555ff2bd..fe8be3c660d0 100644 --- a/src/ray/raylet/placement_group_resource_manager_test.cc +++ b/src/ray/raylet/placement_group_resource_manager_test.cc @@ -46,7 +46,7 @@ class NewPlacementGroupResourceManagerTest : public ::testing::Test { void InitLocalAvailableResource( absl::flat_hash_map &unit_resource) { cluster_resource_scheduler_ = std::make_shared( - scheduling::NodeID("local"), unit_resource, is_node_available_fn_); + io_context, scheduling::NodeID("local"), unit_resource, is_node_available_fn_); new_placement_group_resource_manager_ = std::make_unique( cluster_resource_scheduler_); @@ -73,6 +73,7 @@ class NewPlacementGroupResourceManagerTest : public ::testing::Test { std::make_shared(std::move(bundle_spec))); return bundle_specs; } + instrumented_io_context io_context; }; TEST_F(NewPlacementGroupResourceManagerTest, @@ -186,8 +187,11 @@ TEST_F(NewPlacementGroupResourceManagerTest, TestNewCommitBundleResource) { {"CPU", 1.0}, {"bundle_group_1_" + group_id.Hex(), 1000}, {"bundle_group_" + group_id.Hex(), 1000}}; - auto remaining_resource_scheduler = std::make_shared( - scheduling::NodeID("remaining"), remaining_resources, is_node_available_fn_); + auto remaining_resource_scheduler = + std::make_shared(io_context, + scheduling::NodeID("remaining"), + remaining_resources, + is_node_available_fn_); std::shared_ptr resource_instances = std::make_shared(); ASSERT_TRUE( @@ -216,7 +220,7 @@ TEST_F(NewPlacementGroupResourceManagerTest, TestNewReturnBundleResource) { new_placement_group_resource_manager_->ReturnBundle(bundle_spec); /// 5. check remaining resources is correct. auto remaining_resource_scheduler = std::make_shared( - scheduling::NodeID("remaining"), unit_resource, is_node_available_fn_); + io_context, scheduling::NodeID("remaining"), unit_resource, is_node_available_fn_); auto remaining_resource_instance = remaining_resource_scheduler->GetClusterResourceManager().GetNodeResources( scheduling::NodeID("remaining")); @@ -252,8 +256,11 @@ TEST_F(NewPlacementGroupResourceManagerTest, TestNewMultipleBundlesCommitAndRetu {"bundle_group_1_" + group_id.Hex(), 1000}, {"bundle_group_2_" + group_id.Hex(), 1000}, {"bundle_group_" + group_id.Hex(), 2000}}; - auto remaining_resource_scheduler = std::make_shared( - scheduling::NodeID("remaining"), remaining_resources, is_node_available_fn_); + auto remaining_resource_scheduler = + std::make_shared(io_context, + scheduling::NodeID("remaining"), + remaining_resources, + is_node_available_fn_); std::shared_ptr resource_instances = std::make_shared(); ASSERT_TRUE( @@ -272,8 +279,11 @@ TEST_F(NewPlacementGroupResourceManagerTest, TestNewMultipleBundlesCommitAndRetu {"CPU", 2.0}, {"bundle_group_1_" + group_id.Hex(), 1000}, {"bundle_group_" + group_id.Hex(), 2000}}; - remaining_resource_scheduler = std::make_shared( - scheduling::NodeID("remaining"), remaining_resources, is_node_available_fn_); + remaining_resource_scheduler = + std::make_shared(io_context, + scheduling::NodeID("remaining"), + remaining_resources, + is_node_available_fn_); ASSERT_TRUE( remaining_resource_scheduler->GetLocalResourceManager().AllocateLocalTaskResources( {{"CPU_group_" + group_id.Hex(), 1.0}, @@ -288,8 +298,11 @@ TEST_F(NewPlacementGroupResourceManagerTest, TestNewMultipleBundlesCommitAndRetu new_placement_group_resource_manager_->ReturnBundle(first_bundle_spec); /// 8. check remaining resources is correct after all bundle returned. remaining_resources = {{"CPU", 2.0}}; - remaining_resource_scheduler = std::make_shared( - scheduling::NodeID("remaining"), remaining_resources, is_node_available_fn_); + remaining_resource_scheduler = + std::make_shared(io_context, + scheduling::NodeID("remaining"), + remaining_resources, + is_node_available_fn_); remaining_resource_instance = remaining_resource_scheduler->GetClusterResourceManager().GetNodeResources( scheduling::NodeID("remaining")); @@ -312,8 +325,11 @@ TEST_F(NewPlacementGroupResourceManagerTest, TestNewIdempotencyWithMultiPrepare) } /// 4. check remaining resources is correct. absl::flat_hash_map remaining_resources = {{"CPU", 3.0}}; - auto remaining_resource_scheduler = std::make_shared( - scheduling::NodeID("remaining"), remaining_resources, is_node_available_fn_); + auto remaining_resource_scheduler = + std::make_shared(io_context, + scheduling::NodeID("remaining"), + remaining_resources, + is_node_available_fn_); std::shared_ptr resource_instances = std::make_shared(); ASSERT_TRUE( @@ -349,8 +365,11 @@ TEST_F(NewPlacementGroupResourceManagerTest, TestNewIdempotencyWithRandomOrder) {"CPU", 3.0}, {"bundle_group_1_" + group_id.Hex(), 1000}, {"bundle_group_" + group_id.Hex(), 1000}}; - auto remaining_resource_scheduler = std::make_shared( - scheduling::NodeID("remaining"), remaining_resources, is_node_available_fn_); + auto remaining_resource_scheduler = + std::make_shared(io_context, + scheduling::NodeID("remaining"), + remaining_resources, + is_node_available_fn_); std::shared_ptr resource_instances = std::make_shared(); ASSERT_TRUE( @@ -378,8 +397,11 @@ TEST_F(NewPlacementGroupResourceManagerTest, TestNewIdempotencyWithRandomOrder) new_placement_group_resource_manager_->CommitBundles( ConvertSingleSpecToVectorPtrs(bundle_spec)); // 8. check remaining resources is correct. - remaining_resource_scheduler = std::make_shared( - scheduling::NodeID("remaining"), available_resource, is_node_available_fn_); + remaining_resource_scheduler = + std::make_shared(io_context, + scheduling::NodeID("remaining"), + available_resource, + is_node_available_fn_); remaining_resource_instance = remaining_resource_scheduler->GetClusterResourceManager().GetNodeResources( scheduling::NodeID("remaining")); @@ -402,8 +424,11 @@ TEST_F(NewPlacementGroupResourceManagerTest, TestPreparedResourceBatched) { ASSERT_FALSE(new_placement_group_resource_manager_->PrepareBundles(bundle_specs)); // 4. check remaining resources is correct. absl::flat_hash_map remaining_resources = {{"CPU", 3.0}}; - auto remaining_resource_scheduler = std::make_shared( - scheduling::NodeID("remaining"), remaining_resources, is_node_available_fn_); + auto remaining_resource_scheduler = + std::make_shared(io_context, + scheduling::NodeID("remaining"), + remaining_resources, + is_node_available_fn_); auto remaining_resource_instance = remaining_resource_scheduler->GetClusterResourceManager().GetNodeResources( scheduling::NodeID("remaining")); @@ -428,8 +453,11 @@ TEST_F(NewPlacementGroupResourceManagerTest, TestPreparedResourceBatched) { {"bundle_group_3_" + group_id.Hex(), 1000}, {"bundle_group_4_" + group_id.Hex(), 1000}, {"bundle_group_" + group_id.Hex(), 4000}}; - remaining_resource_scheduler = std::make_shared( - scheduling::NodeID("remaining"), remaining_resources, is_node_available_fn_); + remaining_resource_scheduler = + std::make_shared(io_context, + scheduling::NodeID("remaining"), + remaining_resources, + is_node_available_fn_); std::shared_ptr resource_instances = std::make_shared(); absl::flat_hash_map allocating_resource; @@ -474,8 +502,11 @@ TEST_F(NewPlacementGroupResourceManagerTest, TestCommiteResourceBatched) { {"bundle_group_3_" + group_id.Hex(), 1000}, {"bundle_group_4_" + group_id.Hex(), 1000}, {"bundle_group_" + group_id.Hex(), 4000}}; - auto remaining_resource_scheduler = std::make_shared( - scheduling::NodeID("remaining"), remaining_resources, is_node_available_fn_); + auto remaining_resource_scheduler = + std::make_shared(io_context, + scheduling::NodeID("remaining"), + remaining_resources, + is_node_available_fn_); std::shared_ptr resource_instances = std::make_shared(); absl::flat_hash_map allocating_resource; diff --git a/src/ray/raylet/scheduling/cluster_resource_manager.cc b/src/ray/raylet/scheduling/cluster_resource_manager.cc index 95677a20d994..30b7b63e546e 100644 --- a/src/ray/raylet/scheduling/cluster_resource_manager.cc +++ b/src/ray/raylet/scheduling/cluster_resource_manager.cc @@ -22,7 +22,23 @@ namespace ray { -ClusterResourceManager::ClusterResourceManager() : nodes_{} {} +ClusterResourceManager::ClusterResourceManager(instrumented_io_context &io_service) + : timer_(io_service) { + if (RayConfig::instance().use_ray_syncer()) { + timer_.RunFnPeriodically( + [this]() { + auto syncer_delay = absl::Milliseconds( + RayConfig::instance().ray_syncer_message_refresh_interval_ms()); + for (auto &[node_id, resource] : received_node_resources_) { + auto modified_ts = GetNodeResourceModifiedTs(node_id); + if (modified_ts && *modified_ts + syncer_delay < absl::Now()) { + AddOrUpdateNode(node_id, resource); + } + } + }, + RayConfig::instance().ray_syncer_message_refresh_interval_ms()); + } +} std::optional ClusterResourceManager::GetNodeResourceModifiedTs( scheduling::NodeID node_id) const { @@ -76,18 +92,13 @@ bool ClusterResourceManager::UpdateNode(scheduling::NodeID node_id, } AddOrUpdateNode(node_id, local_view); + received_node_resources_[node_id] = std::move(local_view); return true; } bool ClusterResourceManager::RemoveNode(scheduling::NodeID node_id) { - auto it = nodes_.find(node_id); - if (it == nodes_.end()) { - // Node not found. - return false; - } else { - nodes_.erase(it); - return true; - } + received_node_resources_.erase(node_id); + return nodes_.erase(node_id) != 0; } bool ClusterResourceManager::GetNodeResources(scheduling::NodeID node_id, diff --git a/src/ray/raylet/scheduling/cluster_resource_manager.h b/src/ray/raylet/scheduling/cluster_resource_manager.h index b64b5d93fdc7..9a02459ec23e 100644 --- a/src/ray/raylet/scheduling/cluster_resource_manager.h +++ b/src/ray/raylet/scheduling/cluster_resource_manager.h @@ -46,7 +46,7 @@ class GcsActorSchedulerTest; /// This class is not thread safe. class ClusterResourceManager { public: - explicit ClusterResourceManager(); + explicit ClusterResourceManager(instrumented_io_context &io_service); /// Get the resource view of the cluster. const absl::flat_hash_map &GetResourceView() const; @@ -57,13 +57,6 @@ class ClusterResourceManager { /// \param resource_data The node resource data. bool UpdateNode(scheduling::NodeID node_id, const rpc::ResourcesData &resource_data); - /// Return the timestamp when the resource of the node got updated by scheduler. - /// - /// \param node_id ID of the node to query - /// \return The timestamp when the node resource got updated. If it's null, it means - /// there is no such node or the resource of the node never got updated. - std::optional GetNodeResourceModifiedTs(scheduling::NodeID node_id) const; - /// Remove node from the cluster data structure. This happens /// when a node fails or it is removed from the cluster. /// @@ -139,6 +132,13 @@ class ClusterResourceManager { friend class ClusterResourceScheduler; friend class gcs::GcsActorSchedulerTest; + /// Return the timestamp when the resource of the node got updated by scheduler. + /// + /// \param node_id ID of the node to query + /// \return The timestamp when the node resource got updated. If it's null, it means + /// there is no such node or the resource of the node never got updated. + std::optional GetNodeResourceModifiedTs(scheduling::NodeID node_id) const; + /// Add a new node or overwrite the resources of an existing node. /// /// \param node_id: Node ID. @@ -158,8 +158,14 @@ class ClusterResourceManager { /// The key of the map is the node ID. absl::flat_hash_map nodes_; + /// Resource message updated + absl::flat_hash_map received_node_resources_; + BundleLocationIndex bundle_location_index_; + /// Timer to revert local changes to the resources periodically. + ray::PeriodicalRunner timer_; + friend class ClusterResourceSchedulerTest; friend struct ClusterResourceManagerTest; friend class raylet::ClusterTaskManagerTest; diff --git a/src/ray/raylet/scheduling/cluster_resource_manager_test.cc b/src/ray/raylet/scheduling/cluster_resource_manager_test.cc index 73d4c8e994cc..f7a0a87b71f4 100644 --- a/src/ray/raylet/scheduling/cluster_resource_manager_test.cc +++ b/src/ray/raylet/scheduling/cluster_resource_manager_test.cc @@ -35,7 +35,8 @@ NodeResources CreateNodeResources(double available_cpu, struct ClusterResourceManagerTest : public ::testing::Test { void SetUp() { ::testing::Test::SetUp(); - manager = std::make_unique(); + static instrumented_io_context io_context; + manager = std::make_unique(io_context); manager->AddOrUpdateNode(node0, CreateNodeResources(/*available_cpu*/ 1, /*total_cpu*/ 1)); manager->AddOrUpdateNode(node1, diff --git a/src/ray/raylet/scheduling/cluster_resource_scheduler.cc b/src/ray/raylet/scheduling/cluster_resource_scheduler.cc index 2ad785a3cf9b..85505a82da2d 100644 --- a/src/ray/raylet/scheduling/cluster_resource_scheduler.cc +++ b/src/ray/raylet/scheduling/cluster_resource_scheduler.cc @@ -24,6 +24,7 @@ namespace ray { using namespace ::ray::raylet_scheduling_policy; ClusterResourceScheduler::ClusterResourceScheduler( + instrumented_io_context &io_service, scheduling::NodeID local_node_id, const NodeResources &local_node_resources, std::function is_node_available_fn, @@ -31,12 +32,14 @@ ClusterResourceScheduler::ClusterResourceScheduler( : local_node_id_(local_node_id), is_node_available_fn_(is_node_available_fn), is_local_node_with_raylet_(is_local_node_with_raylet) { - Init(local_node_resources, + Init(io_service, + local_node_resources, /*get_used_object_store_memory=*/nullptr, /*get_pull_manager_at_capacity=*/nullptr); } ClusterResourceScheduler::ClusterResourceScheduler( + instrumented_io_context &io_service, scheduling::NodeID local_node_id, const absl::flat_hash_map &local_node_resources, std::function is_node_available_fn, @@ -45,14 +48,18 @@ ClusterResourceScheduler::ClusterResourceScheduler( : local_node_id_(local_node_id), is_node_available_fn_(is_node_available_fn) { NodeResources node_resources = ResourceMapToNodeResources(local_node_resources, local_node_resources); - Init(node_resources, get_used_object_store_memory, get_pull_manager_at_capacity); + Init(io_service, + node_resources, + get_used_object_store_memory, + get_pull_manager_at_capacity); } void ClusterResourceScheduler::Init( + instrumented_io_context &io_service, const NodeResources &local_node_resources, std::function get_used_object_store_memory, std::function get_pull_manager_at_capacity) { - cluster_resource_manager_ = std::make_unique(); + cluster_resource_manager_ = std::make_unique(io_service); local_resource_manager_ = std::make_unique( local_node_id_, local_node_resources, diff --git a/src/ray/raylet/scheduling/cluster_resource_scheduler.h b/src/ray/raylet/scheduling/cluster_resource_scheduler.h index 5e48369b9172..2722989cb083 100644 --- a/src/ray/raylet/scheduling/cluster_resource_scheduler.h +++ b/src/ray/raylet/scheduling/cluster_resource_scheduler.h @@ -50,12 +50,14 @@ class ClusterResourceScheduler { /// with the local node. /// \param is_node_available_fn: Function to determine whether a node is available. /// \param is_local_node_with_raylet: Whether there is a raylet on the local node. - ClusterResourceScheduler(scheduling::NodeID local_node_id, + ClusterResourceScheduler(instrumented_io_context &io_service, + scheduling::NodeID local_node_id, const NodeResources &local_node_resources, std::function is_node_available_fn, bool is_local_node_with_raylet = true); ClusterResourceScheduler( + instrumented_io_context &io_service, scheduling::NodeID local_node_id, const absl::flat_hash_map &local_node_resources, std::function is_node_available_fn, @@ -127,7 +129,8 @@ class ClusterResourceScheduler { bool IsLocalNodeWithRaylet() { return is_local_node_with_raylet_; } private: - void Init(const NodeResources &local_node_resources, + void Init(instrumented_io_context &io_service, + const NodeResources &local_node_resources, std::function get_used_object_store_memory, std::function get_pull_manager_at_capacity); diff --git a/src/ray/raylet/scheduling/cluster_resource_scheduler_2_test.cc b/src/ray/raylet/scheduling/cluster_resource_scheduler_2_test.cc index 6256b941c282..ff70d836a19f 100644 --- a/src/ray/raylet/scheduling/cluster_resource_scheduler_2_test.cc +++ b/src/ray/raylet/scheduling/cluster_resource_scheduler_2_test.cc @@ -28,6 +28,7 @@ class GcsResourceSchedulerTest : public ::testing::Test { public: void SetUp() override { cluster_resource_scheduler_ = std::make_shared( + io_context_, scheduling::NodeID(NodeID::FromRandom().Binary()), NodeResources(), /*is_node_available_fn=*/ @@ -177,7 +178,7 @@ class GcsResourceSchedulerTest : public ::testing::Test { ASSERT_TRUE(result.status.IsSuccess()); ASSERT_EQ(result.selected_nodes.size(), resources_list.size()); } - + instrumented_io_context io_context_; std::shared_ptr cluster_resource_scheduler_; }; diff --git a/src/ray/raylet/scheduling/cluster_resource_scheduler_test.cc b/src/ray/raylet/scheduling/cluster_resource_scheduler_test.cc index f1e2c49dd54f..b77fcbf1ae12 100644 --- a/src/ray/raylet/scheduling/cluster_resource_scheduler_test.cc +++ b/src/ray/raylet/scheduling/cluster_resource_scheduler_test.cc @@ -251,8 +251,11 @@ TEST_F(ClusterResourceSchedulerTest, SchedulingIdInsertOrDieTest) { TEST_F(ClusterResourceSchedulerTest, SchedulingInitClusterTest) { int num_nodes = 10; + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID(num_nodes + 1), NodeResources(), [](auto) { return true; }); + io_context, scheduling::NodeID(num_nodes + 1), NodeResources(), [](auto) { + return true; + }); AssertPredefinedNodeResources(); initCluster(resource_scheduler, num_nodes); @@ -263,9 +266,11 @@ TEST_F(ClusterResourceSchedulerTest, SchedulingInitClusterTest) { TEST_F(ClusterResourceSchedulerTest, SchedulingDeleteClusterNodeTest) { int num_nodes = 4; int64_t remove_id = 2; - + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID(num_nodes + 1), NodeResources(), [](auto) { return true; }); + io_context, scheduling::NodeID(num_nodes + 1), NodeResources(), [](auto) { + return true; + }); initCluster(resource_scheduler, num_nodes); resource_scheduler.GetClusterResourceManager().RemoveNode( @@ -277,8 +282,11 @@ TEST_F(ClusterResourceSchedulerTest, SchedulingDeleteClusterNodeTest) { TEST_F(ClusterResourceSchedulerTest, SchedulingModifyClusterNodeTest) { int num_nodes = 4; int64_t update_id = 2; + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID(num_nodes + 1), NodeResources(), [](auto) { return true; }); + io_context, scheduling::NodeID(num_nodes + 1), NodeResources(), [](auto) { + return true; + }); initCluster(resource_scheduler, num_nodes); @@ -291,8 +299,9 @@ TEST_F(ClusterResourceSchedulerTest, SchedulingModifyClusterNodeTest) { TEST_F(ClusterResourceSchedulerTest, NodeAffinitySchedulingStrategyTest) { absl::flat_hash_map resource_total({{"CPU", 10}}); auto local_node_id = scheduling::NodeID(NodeID::FromRandom().Binary()); + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - local_node_id, resource_total, is_node_available_fn_); + io_context, local_node_id, resource_total, is_node_available_fn_); AssertPredefinedNodeResources(); auto remote_node_id = scheduling::NodeID(NodeID::FromRandom().Binary()); resource_scheduler.GetClusterResourceManager().AddOrUpdateNode( @@ -358,8 +367,9 @@ TEST_F(ClusterResourceSchedulerTest, NodeAffinitySchedulingStrategyTest) { TEST_F(ClusterResourceSchedulerTest, SpreadSchedulingStrategyTest) { absl::flat_hash_map resource_total({{"CPU", 10}}); auto local_node_id = scheduling::NodeID(NodeID::FromRandom().Binary()); + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - local_node_id, resource_total, is_node_available_fn_); + io_context, local_node_id, resource_total, is_node_available_fn_); AssertPredefinedNodeResources(); auto remote_node_id = scheduling::NodeID(NodeID::FromRandom().Binary()); resource_scheduler.GetClusterResourceManager().AddOrUpdateNode( @@ -396,8 +406,9 @@ TEST_F(ClusterResourceSchedulerTest, SpreadSchedulingStrategyTest) { TEST_F(ClusterResourceSchedulerTest, SchedulingWithPreferredNodeTest) { absl::flat_hash_map resource_total({{"CPU", 10}}); auto local_node_id = scheduling::NodeID(NodeID::FromRandom().Binary()); + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - local_node_id, resource_total, is_node_available_fn_); + io_context, local_node_id, resource_total, is_node_available_fn_); AssertPredefinedNodeResources(); auto remote_node_id = scheduling::NodeID(NodeID::FromRandom().Binary()); resource_scheduler.GetClusterResourceManager().AddOrUpdateNode( @@ -439,8 +450,9 @@ TEST_F(ClusterResourceSchedulerTest, SchedulingUpdateAvailableResourcesTest) { {ResourceID::GPU(), 3}, {ResourceID("custom1"), 5}, {ResourceID("custom2"), 5}}); + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID(1), node_resources, is_node_available_fn_); + io_context, scheduling::NodeID(1), node_resources, is_node_available_fn_); AssertPredefinedNodeResources(); { @@ -486,7 +498,9 @@ TEST_F(ClusterResourceSchedulerTest, SchedulingUpdateTotalResourcesTest) { absl::flat_hash_map initial_resources = { {ray::kCPU_ResourceLabel, 1}, {"custom1", 1}}; std::string name = NodeID::FromRandom().Binary(); - ClusterResourceScheduler resource_scheduler(scheduling::NodeID(name), + instrumented_io_context io_context; + ClusterResourceScheduler resource_scheduler(io_context, + scheduling::NodeID(name), initial_resources, is_node_available_fn_, nullptr, @@ -511,8 +525,9 @@ TEST_F(ClusterResourceSchedulerTest, SchedulingUpdateTotalResourcesTest) { } TEST_F(ClusterResourceSchedulerTest, SchedulingAddOrUpdateNodeTest) { + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID(0), NodeResources(), [](auto) { return true; }); + io_context, scheduling::NodeID(0), NodeResources(), [](auto) { return true; }); NodeResources nr, nr_out; int64_t node_id = 1; @@ -558,8 +573,9 @@ TEST_F(ClusterResourceSchedulerTest, SchedulingResourceRequestTest) { // Create cluster resources containing local node. NodeResources node_resources = CreateNodeResources( {{ResourceID::CPU(), 5}, {ResourceID::Memory(), 5}, {ResourceID("custom1"), 10}}); + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID(0), node_resources, is_node_available_fn_); + io_context, scheduling::NodeID(0), node_resources, is_node_available_fn_); auto node_id = NodeID::FromRandom(); rpc::SchedulingStrategy scheduling_strategy; scheduling_strategy.mutable_default_scheduling_strategy(); @@ -675,8 +691,9 @@ TEST_F(ClusterResourceSchedulerTest, GetLocalAvailableResourcesWithCpuUnitTest) {ResourceID::Memory(), 4}, {ResourceID::GPU(), 5}, {ResourceID("custom1"), 8}}); + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID(0), node_resources, is_node_available_fn_); + io_context, scheduling::NodeID(0), node_resources, is_node_available_fn_); TaskResourceInstances available_cluster_resources = resource_scheduler.GetLocalResourceManager() @@ -702,8 +719,9 @@ TEST_F(ClusterResourceSchedulerTest, GetLocalAvailableResourcesTest) { {ResourceID::Memory(), 4}, {ResourceID::GPU(), 5}, {ResourceID("custom1"), 8}}); + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID(0), node_resources, is_node_available_fn_); + io_context, scheduling::NodeID(0), node_resources, is_node_available_fn_); TaskResourceInstances available_cluster_resources = resource_scheduler.GetLocalResourceManager() @@ -736,8 +754,9 @@ TEST_F(ClusterResourceSchedulerTest, GetCPUInstancesDoubleTest) { TEST_F(ClusterResourceSchedulerTest, AvailableResourceInstancesOpsTest) { NodeResources node_resources = CreateNodeResources({{ResourceID::CPU(), 3}}); + instrumented_io_context io_context; ClusterResourceScheduler cluster( - scheduling::NodeID(0), node_resources, is_node_available_fn_); + io_context, scheduling::NodeID(0), node_resources, is_node_available_fn_); std::vector total = {6., 6., 6.}; std::vector available = {3., 2., 5.}; @@ -767,8 +786,9 @@ TEST_F(ClusterResourceSchedulerTest, TaskResourceInstancesTest) { { NodeResources node_resources = CreateNodeResources( {{ResourceID::CPU(), 3}, {ResourceID::Memory(), 4}, {ResourceID::GPU(), 5}}); + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID(0), node_resources, is_node_available_fn_); + io_context, scheduling::NodeID(0), node_resources, is_node_available_fn_); ResourceRequest resource_request = CreateResourceRequest( {{ResourceID::CPU(), 3}, {ResourceID::Memory(), 2}, {ResourceID::GPU(), 1.5}}); @@ -796,8 +816,9 @@ TEST_F(ClusterResourceSchedulerTest, TaskResourceInstancesTest) { { NodeResources node_resources = CreateNodeResources( {{ResourceID::CPU(), 3}, {ResourceID::Memory(), 4}, {ResourceID::GPU(), 5}}); + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID(0), node_resources, is_node_available_fn_); + io_context, scheduling::NodeID(0), node_resources, is_node_available_fn_); ResourceRequest resource_request = CreateResourceRequest( {{ResourceID::CPU(), 4}, {ResourceID::Memory(), 2}, {ResourceID::GPU(), 1.5}}); @@ -823,8 +844,9 @@ TEST_F(ClusterResourceSchedulerTest, TaskResourceInstancesTest) { {ResourceID::GPU(), 5}, {ResourceID("custom1"), 4}, {ResourceID("custom2"), 4}}); + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID(0), node_resources, is_node_available_fn_); + io_context, scheduling::NodeID(0), node_resources, is_node_available_fn_); ResourceRequest resource_request = CreateResourceRequest({{ResourceID::CPU(), 3}, @@ -857,8 +879,9 @@ TEST_F(ClusterResourceSchedulerTest, TaskResourceInstancesTest) { {ResourceID::GPU(), 5}, {ResourceID("custom1"), 4}, {ResourceID("custom2"), 4}}); + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID(0), node_resources, is_node_available_fn_); + io_context, scheduling::NodeID(0), node_resources, is_node_available_fn_); ResourceRequest resource_request = CreateResourceRequest({{ResourceID::CPU(), 3}, @@ -889,8 +912,9 @@ TEST_F(ClusterResourceSchedulerTest, TaskResourceInstancesAllocationFailureTest) {ResourceID("custom1"), 4}, {ResourceID("custom2"), 4}, {ResourceID("custom3"), 4}}); + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID(0), node_resources, is_node_available_fn_); + io_context, scheduling::NodeID(0), node_resources, is_node_available_fn_); ResourceRequest resource_request = CreateResourceRequest({{ResourceID("custom1"), 3}, {ResourceID("custom3"), 3}, @@ -918,8 +942,9 @@ TEST_F(ClusterResourceSchedulerTest, TaskResourceInstancesTest2) { {ResourceID::GPU(), 5}, {ResourceID("custom1"), 4}, {ResourceID("custom2"), 4}}); + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID(0), node_resources, is_node_available_fn_); + io_context, scheduling::NodeID(0), node_resources, is_node_available_fn_); ResourceRequest resource_request = CreateResourceRequest({{ResourceID::CPU(), 2}, @@ -950,7 +975,9 @@ TEST_F(ClusterResourceSchedulerTest, TaskResourceInstancesTest2) { } TEST_F(ClusterResourceSchedulerTest, DeadNodeTest) { - ClusterResourceScheduler resource_scheduler(scheduling::NodeID("local"), + instrumented_io_context io_context; + ClusterResourceScheduler resource_scheduler(io_context, + scheduling::NodeID("local"), absl::flat_hash_map{}, is_node_available_fn_); absl::flat_hash_map resource; @@ -988,12 +1015,13 @@ TEST_F(ClusterResourceSchedulerTest, DeadNodeTest) { TEST_F(ClusterResourceSchedulerTest, TaskGPUResourceInstancesTest) { { + instrumented_io_context io_context; NodeResources node_resources = CreateNodeResources({{ResourceID::CPU(), 1}, {ResourceID::Memory(), 1}, {ResourceID::GPU(), 4}, {ResourceID("custom1"), 8}}); ClusterResourceScheduler resource_scheduler( - scheduling::NodeID(0), node_resources, is_node_available_fn_); + io_context, scheduling::NodeID(0), node_resources, is_node_available_fn_); std::vector allocate_gpu_instances{0.5, 0.5, 0.5, 0.5}; resource_scheduler.GetLocalResourceManager().SubtractResourceInstances( @@ -1059,8 +1087,9 @@ TEST_F(ClusterResourceSchedulerTest, {ResourceID::Memory(), 1}, {ResourceID::GPU(), 4}, {ResourceID("custom1"), 8}}); + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID(0), node_resources, is_node_available_fn_); + io_context, scheduling::NodeID(0), node_resources, is_node_available_fn_); { std::vector allocate_gpu_instances{0.5, 0.5, 2, 0.5}; @@ -1111,8 +1140,9 @@ TEST_F(ClusterResourceSchedulerTest, TEST_F(ClusterResourceSchedulerTest, TaskResourceInstanceWithHardRequestTest) { NodeResources node_resources = CreateNodeResources( {{ResourceID::CPU(), 4}, {ResourceID::Memory(), 2}, {ResourceID::GPU(), 4}}); + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID(0), node_resources, is_node_available_fn_); + io_context, scheduling::NodeID(0), node_resources, is_node_available_fn_); ResourceRequest resource_request = CreateResourceRequest( {{ResourceID::CPU(), 2}, {ResourceID::Memory(), 2}, {ResourceID::GPU(), 1.5}}); @@ -1134,8 +1164,9 @@ TEST_F(ClusterResourceSchedulerTest, TaskResourceInstanceWithHardRequestTest) { TEST_F(ClusterResourceSchedulerTest, TaskResourceInstanceWithoutCpuUnitTest) { NodeResources node_resources = CreateNodeResources( {{ResourceID::CPU(), 4}, {ResourceID::Memory(), 2}, {ResourceID::GPU(), 4}}); + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID(0), node_resources, is_node_available_fn_); + io_context, scheduling::NodeID(0), node_resources, is_node_available_fn_); ResourceRequest resource_request = CreateResourceRequest( {{ResourceID::CPU(), 2}, {ResourceID::Memory(), 2}, {ResourceID::GPU(), 1.5}}); @@ -1156,7 +1187,9 @@ TEST_F(ClusterResourceSchedulerTest, TaskResourceInstanceWithoutCpuUnitTest) { TEST_F(ClusterResourceSchedulerTest, TestAlwaysSpillInfeasibleTask) { absl::flat_hash_map resource_spec({{"CPU", 1}}); - ClusterResourceScheduler resource_scheduler(scheduling::NodeID("local"), + instrumented_io_context io_context; + ClusterResourceScheduler resource_scheduler(io_context, + scheduling::NodeID("local"), absl::flat_hash_map{}, is_node_available_fn_); for (int i = 0; i < 100; i++) { @@ -1218,8 +1251,9 @@ TEST_F(ClusterResourceSchedulerTest, ResourceUsageReportTest) { absl::flat_hash_map initial_resources( {{"CPU", 1}, {"GPU", 2}, {"memory", 3}, {"1", 1}, {"2", 2}, {"3", 3}}); + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID("0"), initial_resources, is_node_available_fn_); + io_context, scheduling::NodeID("0"), initial_resources, is_node_available_fn_); NodeResources other_node_resources = CreateNodeResources({{ResourceID::CPU(), 1}, {ResourceID::Memory(), 1}, {ResourceID::GPU(), 1}, @@ -1301,8 +1335,9 @@ TEST_F(ClusterResourceSchedulerTest, ObjectStoreMemoryUsageTest) { {"object_store_memory", 1000 * 1024 * 1024}}); int64_t used_object_store_memory = 250 * 1024 * 1024; int64_t *ptr = &used_object_store_memory; + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID("0"), initial_resources, is_node_available_fn_, [&] { + io_context, scheduling::NodeID("0"), initial_resources, is_node_available_fn_, [&] { return *ptr; }); NodeResources other_node_resources = CreateNodeResources({{ResourceID::CPU(), 1}, @@ -1391,8 +1426,9 @@ TEST_F(ClusterResourceSchedulerTest, ObjectStoreMemoryUsageTest) { TEST_F(ClusterResourceSchedulerTest, DirtyLocalViewTest) { absl::flat_hash_map initial_resources({{"CPU", 1}}); + instrumented_io_context io_service; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID("local"), initial_resources, is_node_available_fn_); + io_service, scheduling::NodeID("local"), initial_resources, is_node_available_fn_); auto remote = scheduling::NodeID(NodeID::FromRandom().Binary()); resource_scheduler.GetClusterResourceManager().AddOrUpdateNode( remote, {{"CPU", 2.}}, {{"CPU", 2.}}); @@ -1456,8 +1492,9 @@ TEST_F(ClusterResourceSchedulerTest, DirtyLocalViewTest) { } TEST_F(ClusterResourceSchedulerTest, DynamicResourceTest) { + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID("local"), {{"CPU", 2}}, is_node_available_fn_); + io_context, scheduling::NodeID("local"), {{"CPU", 2}}, is_node_available_fn_); absl::flat_hash_map resource_request = {{"CPU", 1}, {"custom123", 2}}; @@ -1526,8 +1563,9 @@ TEST_F(ClusterResourceSchedulerTest, DynamicResourceTest) { } TEST_F(ClusterResourceSchedulerTest, AvailableResourceEmptyTest) { + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID("local"), {{"custom123", 5}}, is_node_available_fn_); + io_context, scheduling::NodeID("local"), {{"custom123", 5}}, is_node_available_fn_); std::shared_ptr resource_instances = std::make_shared(); absl::flat_hash_map resource_request = {{"custom123", 5}}; @@ -1541,8 +1579,9 @@ TEST_F(ClusterResourceSchedulerTest, AvailableResourceEmptyTest) { TEST_F(ClusterResourceSchedulerTest, TestForceSpillback) { absl::flat_hash_map resource_spec({{"CPU", 1}}); + instrumented_io_context io_context; ClusterResourceScheduler resource_scheduler( - scheduling::NodeID("local"), resource_spec, is_node_available_fn_); + io_context, scheduling::NodeID("local"), resource_spec, is_node_available_fn_); std::vector node_ids; for (int i = 0; i < 100; i++) { node_ids.emplace_back(NodeID::FromRandom().Binary()); @@ -1603,8 +1642,11 @@ TEST_F(ClusterResourceSchedulerTest, TestForceSpillback) { TEST_F(ClusterResourceSchedulerTest, CustomResourceInstanceTest) { SetUnitInstanceResourceIds({ResourceID("FPGA")}); - ClusterResourceScheduler resource_scheduler( - scheduling::NodeID("local"), {{"CPU", 4}, {"FPGA", 2}}, is_node_available_fn_); + instrumented_io_context io_context; + ClusterResourceScheduler resource_scheduler(io_context, + scheduling::NodeID("local"), + {{"CPU", 4}, {"FPGA", 2}}, + is_node_available_fn_); auto fpga_resource_id = ResourceID("FPGA"); @@ -1631,7 +1673,9 @@ TEST_F(ClusterResourceSchedulerTest, CustomResourceInstanceTest) { TEST_F(ClusterResourceSchedulerTest, TaskResourceInstancesSerializedStringTest) { SetUnitInstanceResourceIds({ResourceID("GPU")}); - ClusterResourceScheduler resource_scheduler(scheduling::NodeID("local"), + instrumented_io_context io_context; + ClusterResourceScheduler resource_scheduler(io_context, + scheduling::NodeID("local"), {{"CPU", 4}, {"memory", 4}, {"GPU", 2}}, is_node_available_fn_); std::shared_ptr cluster_resources = @@ -1651,6 +1695,7 @@ TEST_F(ClusterResourceSchedulerTest, TaskResourceInstancesSerializedStringTest) cluster_instance_resources->Set(ResourceID::Memory(), {4.}); cluster_instance_resources->Set(ResourceID::GPU(), {1., 1.}); ClusterResourceScheduler resource_scheduler_cpu_instance( + io_context, scheduling::NodeID("local"), {{"CPU", 4}, {"memory", 4}, {"GPU", 2}}, is_node_available_fn_); @@ -1671,8 +1716,11 @@ TEST_F(ClusterResourceSchedulerTest, AffinityWithBundleScheduleTest) { CreateResourceRequest(AddPlacementGroupConstraint( {{"CPU", 1}, {"memory", 100}}, bundle_1.first, bundle_1.second)); NodeResources node_resources = NodeResources(bundle_resource_request); - ClusterResourceScheduler resource_scheduler( - scheduling::NodeID(node_1.Binary()), node_resources, is_node_available_fn_); + instrumented_io_context io_service; + ClusterResourceScheduler resource_scheduler(io_service, + scheduling::NodeID(node_1.Binary()), + node_resources, + is_node_available_fn_); ResourceRequest bundle_resource_request_2 = CreateResourceRequest(AddPlacementGroupConstraint( {{"CPU", 1}, {"memory", 100}}, bundle_2.first, bundle_2.second)); diff --git a/src/ray/raylet/scheduling/cluster_task_manager_test.cc b/src/ray/raylet/scheduling/cluster_task_manager_test.cc index f95c51c2aac3..de2bd227996c 100644 --- a/src/ray/raylet/scheduling/cluster_task_manager_test.cc +++ b/src/ray/raylet/scheduling/cluster_task_manager_test.cc @@ -131,8 +131,9 @@ std::shared_ptr CreateSingleNodeScheduler( local_node_resources[ray::kCPU_ResourceLabel] = num_cpus; local_node_resources[ray::kGPU_ResourceLabel] = num_gpus; local_node_resources[ray::kMemory_ResourceLabel] = 128; - + static instrumented_io_context io_context; auto scheduler = std::make_shared( + io_context, scheduling::NodeID(id), local_node_resources, /*is_node_available_fn*/ [&gcs_client](scheduling::NodeID node_id) { diff --git a/src/ray/raylet/scheduling/policy/hybrid_scheduling_policy_test.cc b/src/ray/raylet/scheduling/policy/hybrid_scheduling_policy_test.cc index 7f8772b503ae..786fc52aac61 100644 --- a/src/ray/raylet/scheduling/policy/hybrid_scheduling_policy_test.cc +++ b/src/ray/raylet/scheduling/policy/hybrid_scheduling_policy_test.cc @@ -68,13 +68,6 @@ class HybridSchedulingPolicyTest : public ::testing::Test { schedule_top_k_absolute, scheduler_top_k_fraction); } - - ClusterResourceManager MockClusterResourceManager( - const absl::flat_hash_map &nodes) { - ClusterResourceManager cluster_resource_manager; - cluster_resource_manager.nodes_ = nodes; - return cluster_resource_manager; - } }; TEST_F(HybridSchedulingPolicyTest, GetBestNode) { diff --git a/src/ray/raylet/scheduling/policy/scheduling_policy_test.cc b/src/ray/raylet/scheduling/policy/scheduling_policy_test.cc index 86d32b1d547d..56a56a3317ae 100644 --- a/src/ray/raylet/scheduling/policy/scheduling_policy_test.cc +++ b/src/ray/raylet/scheduling/policy/scheduling_policy_test.cc @@ -66,10 +66,11 @@ class SchedulingPolicyTest : public ::testing::Test { scheduler_top_k_fraction); } - ClusterResourceManager MockClusterResourceManager( + std::unique_ptr MockClusterResourceManager( const absl::flat_hash_map &nodes) { - ClusterResourceManager cluster_resource_manager; - cluster_resource_manager.nodes_ = nodes; + static instrumented_io_context io_context; + auto cluster_resource_manager = std::make_unique(io_context); + cluster_resource_manager->nodes_ = nodes; return cluster_resource_manager; } }; @@ -86,7 +87,7 @@ TEST_F(SchedulingPolicyTest, NodeAffinityPolicyTest) { auto cluster_resource_manager = MockClusterResourceManager(nodes); raylet_scheduling_policy::CompositeSchedulingPolicy scheduling_policy( - scheduling::NodeID("local"), cluster_resource_manager, [](auto) { return true; }); + scheduling::NodeID("local"), *cluster_resource_manager, [](auto) { return true; }); auto to_schedule = scheduling_policy.Schedule( req, SchedulingOptions::NodeAffinity(false, false, "local", false)); @@ -140,7 +141,7 @@ TEST_F(SchedulingPolicyTest, SpreadPolicyTest) { nodes.emplace(remote_node_3, CreateNodeResources(20, 20, 0, 0, 0, 0)); auto cluster_resource_manager = MockClusterResourceManager(nodes); raylet_scheduling_policy::CompositeSchedulingPolicy scheduling_policy( - local_node, cluster_resource_manager, [](auto) { return true; }); + local_node, *cluster_resource_manager, [](auto) { return true; }); auto to_schedule = scheduling_policy.Schedule(req, SchedulingOptions::Spread(false, false)); @@ -178,7 +179,7 @@ TEST_F(SchedulingPolicyTest, RandomPolicyTest) { auto cluster_resource_manager = MockClusterResourceManager(nodes); raylet_scheduling_policy::CompositeSchedulingPolicy scheduling_policy( - local_node, cluster_resource_manager, [](auto) { return true; }); + local_node, *cluster_resource_manager, [](auto) { return true; }); std::map decisions; size_t num_node_0_picks = 0; @@ -268,7 +269,7 @@ TEST_F(SchedulingPolicyTest, AvailableTruncationTest) { auto cluster_resource_manager = MockClusterResourceManager(nodes); auto to_schedule = raylet_scheduling_policy::CompositeSchedulingPolicy( - local_node, cluster_resource_manager, [](auto) { return true; }) + local_node, *cluster_resource_manager, [](auto) { return true; }) .Schedule(req, HybridOptions(0.51, false, false)); ASSERT_EQ(to_schedule, local_node); } @@ -283,7 +284,7 @@ TEST_F(SchedulingPolicyTest, AvailableTieBreakTest) { auto cluster_resource_manager = MockClusterResourceManager(nodes); auto to_schedule = raylet_scheduling_policy::CompositeSchedulingPolicy( - local_node, cluster_resource_manager, [](auto) { return true; }) + local_node, *cluster_resource_manager, [](auto) { return true; }) .Schedule(req, HybridOptions(0.50, false, false)); ASSERT_EQ(to_schedule, remote_node); } @@ -298,7 +299,7 @@ TEST_F(SchedulingPolicyTest, AvailableOverFeasibleTest) { auto cluster_resource_manager = MockClusterResourceManager(nodes); auto to_schedule = raylet_scheduling_policy::CompositeSchedulingPolicy( - local_node, cluster_resource_manager, [](auto) { return true; }) + local_node, *cluster_resource_manager, [](auto) { return true; }) .Schedule(req, HybridOptions(0.50, false, false)); ASSERT_EQ(to_schedule, remote_node); } @@ -311,7 +312,7 @@ TEST_F(SchedulingPolicyTest, InfeasibleTest) { auto cluster_resource_manager = MockClusterResourceManager(nodes); auto to_schedule = raylet_scheduling_policy::CompositeSchedulingPolicy( - local_node, cluster_resource_manager, [](auto) { return true; }) + local_node, *cluster_resource_manager, [](auto) { return true; }) .Schedule(req, HybridOptions(0.50, false, false)); ASSERT_TRUE(to_schedule.IsNil()); } @@ -325,7 +326,7 @@ TEST_F(SchedulingPolicyTest, BarelyFeasibleTest) { auto cluster_resource_manager = MockClusterResourceManager(nodes); auto to_schedule = raylet_scheduling_policy::CompositeSchedulingPolicy( - local_node, cluster_resource_manager, [](auto) { return true; }) + local_node, *cluster_resource_manager, [](auto) { return true; }) .Schedule(req, HybridOptions(0.50, false, false)); ASSERT_EQ(to_schedule, local_node); } @@ -339,7 +340,7 @@ TEST_F(SchedulingPolicyTest, TruncationAcrossFeasibleNodesTest) { auto cluster_resource_manager = MockClusterResourceManager(nodes); auto to_schedule = raylet_scheduling_policy::CompositeSchedulingPolicy( - local_node, cluster_resource_manager, [](auto) { return true; }) + local_node, *cluster_resource_manager, [](auto) { return true; }) .Schedule(req, HybridOptions(0.51, false, false)); ASSERT_EQ(to_schedule, local_node); } @@ -353,7 +354,7 @@ TEST_F(SchedulingPolicyTest, ForceSpillbackIfAvailableTest) { auto cluster_resource_manager = MockClusterResourceManager(nodes); auto to_schedule = raylet_scheduling_policy::CompositeSchedulingPolicy( - local_node, cluster_resource_manager, [](auto) { return true; }) + local_node, *cluster_resource_manager, [](auto) { return true; }) .Schedule(req, HybridOptions(0.51, true, true)); ASSERT_EQ(to_schedule, remote_node); } @@ -370,7 +371,7 @@ TEST_F(SchedulingPolicyTest, AvoidSchedulingCPURequestsOnGPUNodes) { const ResourceRequest req = ResourceMapToResourceRequest({{"CPU", 1}}, false); const auto to_schedule = raylet_scheduling_policy::CompositeSchedulingPolicy( - local_node, cluster_resource_manager, [](auto) { return true; }) + local_node, *cluster_resource_manager, [](auto) { return true; }) .Schedule(ResourceMapToResourceRequest({{"CPU", 1}}, false), HybridOptions(0.51, false, true, true)); ASSERT_EQ(to_schedule, remote_node); @@ -380,7 +381,7 @@ TEST_F(SchedulingPolicyTest, AvoidSchedulingCPURequestsOnGPUNodes) { const ResourceRequest req = ResourceMapToResourceRequest({{"GPU", 1}}, false); const auto to_schedule = raylet_scheduling_policy::CompositeSchedulingPolicy( - local_node, cluster_resource_manager, [](auto) { return true; }) + local_node, *cluster_resource_manager, [](auto) { return true; }) .Schedule(req, HybridOptions(0.51, false, true, true)); ASSERT_EQ(to_schedule, local_node); } @@ -389,7 +390,7 @@ TEST_F(SchedulingPolicyTest, AvoidSchedulingCPURequestsOnGPUNodes) { const ResourceRequest req = ResourceMapToResourceRequest({{"CPU", 1}}, false); const auto to_schedule = raylet_scheduling_policy::CompositeSchedulingPolicy( - local_node, cluster_resource_manager, [](auto) { return true; }) + local_node, *cluster_resource_manager, [](auto) { return true; }) .Schedule(req, HybridOptions(0.51, false, true, true)); ASSERT_EQ(to_schedule, remote_node); } @@ -399,7 +400,7 @@ TEST_F(SchedulingPolicyTest, AvoidSchedulingCPURequestsOnGPUNodes) { ResourceMapToResourceRequest({{"CPU", 1}, {"GPU", 1}}, false); const auto to_schedule = raylet_scheduling_policy::CompositeSchedulingPolicy( - local_node, cluster_resource_manager, [](auto) { return true; }) + local_node, *cluster_resource_manager, [](auto) { return true; }) .Schedule(req, HybridOptions(0.51, false, true, true)); ASSERT_EQ(to_schedule, local_node); } @@ -415,7 +416,7 @@ TEST_F(SchedulingPolicyTest, SchedulenCPURequestsOnGPUNodeAsALastResort) { auto cluster_resource_manager = MockClusterResourceManager(nodes); const auto to_schedule = raylet_scheduling_policy::CompositeSchedulingPolicy( - local_node, cluster_resource_manager, [](auto) { return true; }) + local_node, *cluster_resource_manager, [](auto) { return true; }) .Schedule(req, HybridOptions(0.51, false, true, true)); ASSERT_EQ(to_schedule, remote_node); } @@ -429,7 +430,7 @@ TEST_F(SchedulingPolicyTest, ForceSpillbackTest) { auto cluster_resource_manager = MockClusterResourceManager(nodes); auto to_schedule = raylet_scheduling_policy::CompositeSchedulingPolicy( - local_node, cluster_resource_manager, [](auto) { return true; }) + local_node, *cluster_resource_manager, [](auto) { return true; }) .Schedule(req, HybridOptions(0.51, true, false)); ASSERT_EQ(to_schedule, remote_node); } @@ -444,7 +445,7 @@ TEST_F(SchedulingPolicyTest, ForceSpillbackOnlyFeasibleLocallyTest) { auto cluster_resource_manager = MockClusterResourceManager(nodes); auto to_schedule = raylet_scheduling_policy::CompositeSchedulingPolicy( - local_node, cluster_resource_manager, [](auto) { return true; }) + local_node, *cluster_resource_manager, [](auto) { return true; }) .Schedule(req, HybridOptions(0.51, true, false)); ASSERT_TRUE(to_schedule.IsNil()); } @@ -462,7 +463,7 @@ TEST_F(SchedulingPolicyTest, NonGpuNodePreferredSchedulingTest) { ResourceRequest req = ResourceMapToResourceRequest({{"CPU", 1}}, false); auto to_schedule = raylet_scheduling_policy::CompositeSchedulingPolicy( - local_node, cluster_resource_manager, [](auto) { return true; }) + local_node, *cluster_resource_manager, [](auto) { return true; }) .Schedule(req, HybridOptions(0.51, false, @@ -472,7 +473,7 @@ TEST_F(SchedulingPolicyTest, NonGpuNodePreferredSchedulingTest) { req = ResourceMapToResourceRequest({{"CPU", 3}}, false); to_schedule = raylet_scheduling_policy::CompositeSchedulingPolicy( - local_node, cluster_resource_manager, [](auto) { return true; }) + local_node, *cluster_resource_manager, [](auto) { return true; }) .Schedule(req, HybridOptions(0.51, false, @@ -482,7 +483,7 @@ TEST_F(SchedulingPolicyTest, NonGpuNodePreferredSchedulingTest) { req = ResourceMapToResourceRequest({{"CPU", 1}, {"GPU", 1}}, false); to_schedule = raylet_scheduling_policy::CompositeSchedulingPolicy( - local_node, cluster_resource_manager, [](auto) { return true; }) + local_node, *cluster_resource_manager, [](auto) { return true; }) .Schedule(req, HybridOptions(0.51, false, @@ -492,7 +493,7 @@ TEST_F(SchedulingPolicyTest, NonGpuNodePreferredSchedulingTest) { req = ResourceMapToResourceRequest({{"CPU", 2}}, false); to_schedule = raylet_scheduling_policy::CompositeSchedulingPolicy( - local_node, cluster_resource_manager, [](auto) { return true; }) + local_node, *cluster_resource_manager, [](auto) { return true; }) .Schedule(req, HybridOptions(0.51, false, @@ -522,22 +523,22 @@ TEST_F(SchedulingPolicyTest, BundleSchedulingMaxFractionTest) { auto cluster_resource_manager = MockClusterResourceManager(nodes); // req is unscheduleable because the max cpu fraction reaches 0.5. auto unscheduable = raylet_scheduling_policy::BundlePackSchedulingPolicy( - cluster_resource_manager, [](auto) { return true; }) + *cluster_resource_manager, [](auto) { return true; }) .Schedule(req_list, pack_op); ASSERT_TRUE(unscheduable.status.IsFailed()); unscheduable = raylet_scheduling_policy::BundleSpreadSchedulingPolicy( - cluster_resource_manager, [](auto) { return true; }) + *cluster_resource_manager, [](auto) { return true; }) .Schedule(req_list, spread_op); ASSERT_TRUE(unscheduable.status.IsFailed()); unscheduable = raylet_scheduling_policy::BundleStrictPackSchedulingPolicy( - cluster_resource_manager, [](auto) { return true; }) + *cluster_resource_manager, [](auto) { return true; }) .Schedule(req_list, strict_pack_op); ASSERT_TRUE(unscheduable.status.IsInfeasible()); unscheduable = raylet_scheduling_policy::BundleStrictSpreadSchedulingPolicy( - cluster_resource_manager, [](auto) { return true; }) + *cluster_resource_manager, [](auto) { return true; }) .Schedule(req_list, strict_spread_op); ASSERT_TRUE(unscheduable.status.IsInfeasible()); } @@ -559,7 +560,7 @@ TEST_F(SchedulingPolicyTest, BundleSchedulingMaxFractionOneCpuReservationGuarant auto cluster_resource_manager = MockClusterResourceManager(nodes); // req is unscheduleable because the max cpu fraction reaches 0.5. auto to_schedule = raylet_scheduling_policy::BundlePackSchedulingPolicy( - cluster_resource_manager, [](auto) { return true; }) + *cluster_resource_manager, [](auto) { return true; }) .Schedule(req_list, pack_op); ASSERT_TRUE(to_schedule.status.IsSuccess()); } @@ -582,7 +583,7 @@ TEST_F(SchedulingPolicyTest, auto cluster_resource_manager = MockClusterResourceManager(nodes); // req is unscheduleable because the max cpu fraction reaches 0.5. auto to_schedule = raylet_scheduling_policy::BundlePackSchedulingPolicy( - cluster_resource_manager, [](auto) { return true; }) + *cluster_resource_manager, [](auto) { return true; }) .Schedule(req_list, pack_op); ASSERT_TRUE(to_schedule.status.IsSuccess()); @@ -590,7 +591,7 @@ TEST_F(SchedulingPolicyTest, auto to_schedule_task = raylet_scheduling_policy::CompositeSchedulingPolicy( - local_node, cluster_resource_manager, [](auto) { return true; }) + local_node, *cluster_resource_manager, [](auto) { return true; }) .Schedule(req, HybridOptions(0.50, false, false)); ASSERT_TRUE(!to_schedule_task.IsNil()); } @@ -615,7 +616,7 @@ TEST_F(SchedulingPolicyTest, BundleSchedulingMaxFractionWorkingWhenNormalResourc auto cluster_resource_manager = MockClusterResourceManager(nodes); // req is unscheduleable because the max cpu fraction reaches 0.5. auto to_schedule = raylet_scheduling_policy::BundlePackSchedulingPolicy( - cluster_resource_manager, [](auto) { return true; }) + *cluster_resource_manager, [](auto) { return true; }) .Schedule(req_list, pack_op); ASSERT_TRUE(to_schedule.status.IsSuccess()); } From a5fd48ff8e1418ec3355520c5854381056563b59 Mon Sep 17 00:00:00 2001 From: Philipp Moritz Date: Wed, 26 Apr 2023 01:23:00 -0700 Subject: [PATCH 104/424] Move datastructures in GlobalStateAccessor to Cython (#34706) More progress along the lines of https://github.com/ray-project/ray/pull/33769 to remove Python gRPC from Ray Core. --- python/ray/_private/node.py | 8 ++-- python/ray/_private/state.py | 30 +----------- python/ray/includes/common.pxd | 15 ++++++ python/ray/includes/global_state_accessor.pxi | 48 ++++++++++++++++--- python/ray/tests/test_component_failures_2.py | 4 +- python/ray/tests/test_global_state.py | 22 ++++----- python/ray/tests/test_ray_init_2.py | 2 +- src/ray/gcs/gcs_client/gcs_client.cc | 6 +++ src/ray/gcs/gcs_client/gcs_client.h | 3 ++ 9 files changed, 83 insertions(+), 55 deletions(-) diff --git a/python/ray/_private/node.py b/python/ray/_private/node.py index e5ec183cc165..0c6339a85b7f 100644 --- a/python/ray/_private/node.py +++ b/python/ray/_private/node.py @@ -239,9 +239,9 @@ def __init__( self.gcs_address, self._raylet_ip_address, ) - self._plasma_store_socket_name = node_info.object_store_socket_name - self._raylet_socket_name = node_info.raylet_socket_name - self._ray_params.node_manager_port = node_info.node_manager_port + self._plasma_store_socket_name = node_info["object_store_socket_name"] + self._raylet_socket_name = node_info["raylet_socket_name"] + self._ray_params.node_manager_port = node_info["node_manager_port"] else: # If the user specified a socket name, use it. self._plasma_store_socket_name = self._prepare_socket_file( @@ -304,7 +304,7 @@ def __init__( self._raylet_ip_address, ) if self._ray_params.node_manager_port == 0: - self._ray_params.node_manager_port = node_info.node_manager_port + self._ray_params.node_manager_port = node_info["node_manager_port"] # Makes sure the Node object has valid addresses after setup. self.validate_ip_port(self.address) diff --git a/python/ray/_private/state.py b/python/ray/_private/state.py index 12ad9ea8d217..40e94e9c5db5 100644 --- a/python/ray/_private/state.py +++ b/python/ray/_private/state.py @@ -147,32 +147,7 @@ def node_table(self): """ self._check_connected() - node_table = self.global_state_accessor.get_node_table() - - results = [] - for node_info_item in node_table: - item = gcs_utils.GcsNodeInfo.FromString(node_info_item) - node_info = { - "NodeID": ray._private.utils.binary_to_hex(item.node_id), - "Alive": item.state - == gcs_utils.GcsNodeInfo.GcsNodeState.Value("ALIVE"), - "NodeManagerAddress": item.node_manager_address, - "NodeManagerHostname": item.node_manager_hostname, - "NodeManagerPort": item.node_manager_port, - "ObjectManagerPort": item.object_manager_port, - "ObjectStoreSocketName": item.object_store_socket_name, - "RayletSocketName": item.raylet_socket_name, - "MetricsExportPort": item.metrics_export_port, - "NodeName": item.node_name, - } - node_info["alive"] = node_info["Alive"] - node_info["Resources"] = ( - {key: value for key, value in item.resources_total.items()} - if node_info["Alive"] - else {} - ) - results.append(node_info) - return results + return self.global_state_accessor.get_node_table() def job_table(self): """Fetch and parse the gcs job table. @@ -749,10 +724,9 @@ def get_system_config(self): def get_node_to_connect_for_driver(self, node_ip_address): """Get the node to connect for a Ray driver.""" self._check_connected() - node_info_str = self.global_state_accessor.get_node_to_connect_for_driver( + return self.global_state_accessor.get_node_to_connect_for_driver( node_ip_address ) - return gcs_utils.GcsNodeInfo.FromString(node_info_str) state = GlobalState() diff --git a/python/ray/includes/common.pxd b/python/ray/includes/common.pxd index 8de7c38c4cd2..e0f8b8ee9712 100644 --- a/python/ray/includes/common.pxd +++ b/python/ray/includes/common.pxd @@ -342,6 +342,10 @@ cdef extern from "ray/gcs/gcs_client/gcs_client.h" nogil: CRayStatus GetAllJobInfo( int64_t timeout_ms, c_vector[CJobTableData]& result) +cdef extern from "ray/gcs/gcs_client/gcs_client.h" namespace "ray::gcs" nogil: + unordered_map[c_string, double] PythonGetResourcesTotal( + const CGcsNodeInfo& node_info) + cdef extern from "src/ray/protobuf/gcs.pb.h" nogil: cdef cppclass CJobConfig "ray::rpc::JobConfig": c_string ray_namespace() const @@ -351,6 +355,17 @@ cdef extern from "src/ray/protobuf/gcs.pb.h" nogil: c_string node_id() const c_string node_name() const int state() const + c_string node_manager_address() const + c_string node_manager_hostname() const + int node_manager_port() const + int object_manager_port() const + c_string object_store_socket_name() const + c_string raylet_socket_name() const + int metrics_export_port() const + void ParseFromString(const c_string &serialized) + + cdef enum CGcsNodeState "ray::rpc::GcsNodeInfo_GcsNodeState": + ALIVE "ray::rpc::GcsNodeInfo_GcsNodeState_ALIVE", cdef cppclass CJobTableData "ray::rpc::JobTableData": c_string job_id() const diff --git a/python/ray/includes/global_state_accessor.pxi b/python/ray/includes/global_state_accessor.pxi index 8492ee56a89b..55c36f79c7a7 100644 --- a/python/ray/includes/global_state_accessor.pxi +++ b/python/ray/includes/global_state_accessor.pxi @@ -1,5 +1,7 @@ from ray.includes.common cimport ( - CGcsClientOptions + CGcsClientOptions, + CGcsNodeState, + PythonGetResourcesTotal ) from ray.includes.unique_ids cimport ( @@ -51,10 +53,38 @@ cdef class GlobalStateAccessor: return cjob_id.ToInt() def get_node_table(self): - cdef c_vector[c_string] result - with nogil: - result = self.inner.get().GetAllNodeInfo() - return result + cdef: + c_vector[c_string] items + c_string item + CGcsNodeInfo c_node_info + unordered_map[c_string, double] c_resources + with nogil: + items = self.inner.get().GetAllNodeInfo() + results = [] + for item in items: + c_node_info.ParseFromString(item) + node_info = { + "NodeID": ray._private.utils.binary_to_hex(c_node_info.node_id()), + "Alive": c_node_info.state() == CGcsNodeState.ALIVE, + "NodeManagerAddress": c_node_info.node_manager_address().decode(), + "NodeManagerHostname": c_node_info.node_manager_hostname().decode(), + "NodeManagerPort": c_node_info.node_manager_port(), + "ObjectManagerPort": c_node_info.object_manager_port(), + "ObjectStoreSocketName": + c_node_info.object_store_socket_name().decode(), + "RayletSocketName": c_node_info.raylet_socket_name().decode(), + "MetricsExportPort": c_node_info.metrics_export_port(), + "NodeName": c_node_info.node_name().decode(), + } + node_info["alive"] = node_info["Alive"] + c_resources = PythonGetResourcesTotal(c_node_info) + node_info["Resources"] = ( + {key.decode(): value for key, value in c_resources} + if node_info["Alive"] + else {} + ) + results.append(node_info) + return results def get_all_available_resources(self): cdef c_vector[c_string] result @@ -149,9 +179,15 @@ cdef class GlobalStateAccessor: cdef CRayStatus status cdef c_string cnode_ip_address = node_ip_address cdef c_string cnode_to_connect + cdef CGcsNodeInfo c_node_info with nogil: status = self.inner.get().GetNodeToConnectForDriver( cnode_ip_address, &cnode_to_connect) if not status.ok(): raise RuntimeError(status.message()) - return cnode_to_connect + c_node_info.ParseFromString(cnode_to_connect) + return { + "object_store_socket_name": c_node_info.object_store_socket_name().decode(), + "raylet_socket_name": c_node_info.raylet_socket_name().decode(), + "node_manager_port": c_node_info.node_manager_port(), + } diff --git a/python/ray/tests/test_component_failures_2.py b/python/ray/tests/test_component_failures_2.py index e39bdc99a34a..35fc1b385183 100644 --- a/python/ray/tests/test_component_failures_2.py +++ b/python/ray/tests/test_component_failures_2.py @@ -127,7 +127,7 @@ def get_node_info(): cluster.head_node.node_ip_address, ) - assert get_node_info().raylet_socket_name == cluster.head_node.raylet_socket_name + assert get_node_info()["raylet_socket_name"] == cluster.head_node.raylet_socket_name cluster.head_node.kill_raylet() wait_for_condition( @@ -137,7 +137,7 @@ def get_node_info(): get_node_info() node2 = cluster.add_node() - assert get_node_info().raylet_socket_name == node2.raylet_socket_name + assert get_node_info()["raylet_socket_name"] == node2.raylet_socket_name if __name__ == "__main__": diff --git a/python/ray/tests/test_global_state.py b/python/ray/tests/test_global_state.py index 17899b74b6f5..facc69ad9b81 100644 --- a/python/ray/tests/test_global_state.py +++ b/python/ray/tests/test_global_state.py @@ -168,15 +168,11 @@ def test_node_name_cluster(ray_start_cluster): global_state_accessor = make_global_state_accessor(head_context) node_table = global_state_accessor.get_node_table() assert len(node_table) == 2 - for node_data in node_table: - node = gcs_utils.GcsNodeInfo.FromString(node_data) - if ( - ray._private.utils.binary_to_hex(node.node_id) - == head_context.address_info["node_id"] - ): - assert node.node_name == "head_node" + for node in node_table: + if node["NodeID"] == head_context.address_info["node_id"]: + assert node["NodeName"] == "head_node" else: - assert node.node_name == "worker_node" + assert node["NodeName"] == "worker_node" global_state_accessor.disconnect() ray.shutdown() @@ -188,9 +184,8 @@ def test_node_name_init(): new_head_context = ray.init(_node_name="new_head_node", include_dashboard=False) global_state_accessor = make_global_state_accessor(new_head_context) - node_data = global_state_accessor.get_node_table()[0] - node = gcs_utils.GcsNodeInfo.FromString(node_data) - assert node.node_name == "new_head_node" + node = global_state_accessor.get_node_table()[0] + assert node["NodeName"] == "new_head_node" ray.shutdown() @@ -198,9 +193,8 @@ def test_no_node_name(): # Test that starting ray with no node name will result in a node_name=ip_address new_head_context = ray.init(include_dashboard=False) global_state_accessor = make_global_state_accessor(new_head_context) - node_data = global_state_accessor.get_node_table()[0] - node = gcs_utils.GcsNodeInfo.FromString(node_data) - assert node.node_name == ray.util.get_node_ip_address() + node = global_state_accessor.get_node_table()[0] + assert node["NodeName"] == ray.util.get_node_ip_address() ray.shutdown() diff --git a/python/ray/tests/test_ray_init_2.py b/python/ray/tests/test_ray_init_2.py index ddf0e66782b2..3f37dd701308 100644 --- a/python/ray/tests/test_ray_init_2.py +++ b/python/ray/tests/test_ray_init_2.py @@ -294,7 +294,7 @@ def test_ray_init_from_workers(ray_start_cluster): node_info = ray._private.services.get_node_to_connect_for_driver( cluster.gcs_address, "127.0.0.3" ) - assert node_info.node_manager_port == node2.node_manager_port + assert node_info["node_manager_port"] == node2.node_manager_port def test_default_resource_not_allowed_error(shutdown_only): diff --git a/src/ray/gcs/gcs_client/gcs_client.cc b/src/ray/gcs/gcs_client/gcs_client.cc index 79b8be674404..9e7d3504f882 100644 --- a/src/ray/gcs/gcs_client/gcs_client.cc +++ b/src/ray/gcs/gcs_client/gcs_client.cc @@ -398,5 +398,11 @@ Status PythonGcsClient::GetAllJobInfo(int64_t timeout_ms, return Status::RpcError(status.error_message(), status.error_code()); } +std::unordered_map PythonGetResourcesTotal( + const rpc::GcsNodeInfo &node_info) { + return std::unordered_map(node_info.resources_total().begin(), + node_info.resources_total().end()); +} + } // namespace gcs } // namespace ray diff --git a/src/ray/gcs/gcs_client/gcs_client.h b/src/ray/gcs/gcs_client/gcs_client.h index 80e41341ad87..032e040c5035 100644 --- a/src/ray/gcs/gcs_client/gcs_client.h +++ b/src/ray/gcs/gcs_client/gcs_client.h @@ -231,6 +231,9 @@ class RAY_EXPORT PythonGcsClient { std::shared_ptr channel_; }; +std::unordered_map PythonGetResourcesTotal( + const rpc::GcsNodeInfo &node_info); + } // namespace gcs } // namespace ray From 20a09630fd66ba5073bc966ef537499f93148e36 Mon Sep 17 00:00:00 2001 From: Sven Mika Date: Wed, 26 Apr 2023 11:36:03 +0200 Subject: [PATCH 105/424] [RLlib] APPO+new-stack (Atari benchmark) - Preparatory PR 01. (#34743) --- rllib/BUILD | 25 ++++----- rllib/algorithms/algorithm.py | 2 +- rllib/algorithms/appo/appo_tf_policy.py | 3 +- rllib/algorithms/appo/appo_torch_policy.py | 3 +- rllib/algorithms/appo/utils.py | 1 + rllib/algorithms/impala/impala_tf_policy.py | 2 +- .../impala/torch/vtrace_torch_v2.py | 2 +- .../ppo/torch/ppo_torch_rl_module.py | 13 ----- rllib/algorithms/slateq/slateq_tf_policy.py | 9 --- rllib/core/learner/learner.py | 40 +++++++------ rllib/core/learner/learner_group_config.py | 2 +- rllib/core/learner/scaling_config.py | 4 +- .../core/learner/tests/test_learner_group.py | 4 +- rllib/core/models/tf/primitives.py | 8 +-- rllib/policy/eager_tf_policy_v2.py | 5 +- rllib/policy/torch_mixins.py | 31 +++++++--- rllib/tuned_examples/a2c/atari-a2c.yaml | 5 +- rllib/tuned_examples/a3c/pong-a3c.yaml | 5 +- .../apex_dqn/atari-apex-dqn.yaml | 5 +- .../apex_dqn/pong-apex-dqn.yaml | 5 +- .../appo/cartpole-appo-learner.yaml | 31 ---------- ...artpole-appo-w-rl-modules-and-learner.yaml | 27 +++++++++ rllib/tuned_examples/appo/cartpole-appo.yaml | 2 +- .../pong-appo-w-rl-modules-and-learner.yaml | 56 +++++++++++++++++++ rllib/tuned_examples/appo/pong-appo.yaml | 7 ++- rllib/tuned_examples/dqn/atari-dist-dqn.yaml | 5 +- rllib/tuned_examples/dqn/atari-dqn.yaml | 5 +- rllib/tuned_examples/dqn/atari-duel-ddqn.yaml | 5 +- rllib/tuned_examples/dqn/pong-dqn.yaml | 5 +- rllib/tuned_examples/dqn/pong-rainbow.yaml | 5 +- .../impala/atari-impala-large.yaml | 5 +- .../impala/atari-impala-multi-gpu.yaml | 5 +- rllib/tuned_examples/impala/atari-impala.yaml | 5 +- .../impala/pong-impala-fast.yaml | 5 +- .../impala/pong-impala-vectorized.yaml | 5 +- rllib/tuned_examples/impala/pong-impala.yaml | 5 +- rllib/tuned_examples/ppo/atari-ppo.yaml | 5 +- .../ppo/pendulum-ppo-with-rl-module.yaml | 4 +- rllib/tuned_examples/ppo/pong-ppo.yaml | 5 +- rllib/utils/actor_manager.py | 6 +- rllib/utils/minibatch_utils.py | 4 +- rllib/utils/torch_utils.py | 5 ++ 42 files changed, 243 insertions(+), 138 deletions(-) delete mode 100644 rllib/tuned_examples/appo/cartpole-appo-learner.yaml create mode 100644 rllib/tuned_examples/appo/cartpole-appo-w-rl-modules-and-learner.yaml create mode 100644 rllib/tuned_examples/appo/pong-appo-w-rl-modules-and-learner.yaml diff --git a/rllib/BUILD b/rllib/BUILD index 100df5ec4262..f2d4537eedda 100644 --- a/rllib/BUILD +++ b/rllib/BUILD @@ -171,6 +171,16 @@ py_test( args = ["--dir=tuned_examples/appo"] ) +py_test( + name = "learning_tests_cartpole_appo_w_rl_modules_and_learner", + main = "tests/run_regression_tests.py", + tags = ["team:rllib", "exclusive", "learning_tests", "learning_tests_cartpole", "learning_tests_discrete", "tf_only", "no_tf_static_graph"], + size = "medium", # bazel may complain about it being too long sometimes - medium is on purpose as some frameworks take longer + srcs = ["tests/run_regression_tests.py"], + data = ["tuned_examples/appo/cartpole-appo-w-rl-modules-and-learner.yaml"], + args = ["--dir=tuned_examples/appo"] +) + # py_test( # name = "learning_tests_cartpole_appo_vtrace", # main = "tests/run_regression_tests.py", @@ -589,8 +599,9 @@ py_test( args = ["--dir=tuned_examples/ppo"] ) +# TODO (Sven): Enable tf2 for this test. py_test( - name = "learning_tests_pendulum_ppo_with_rl_module_torch", + name = "learning_tests_pendulum_ppo_with_rl_module", main = "tests/run_regression_tests.py", tags = ["team:rllib", "exclusive", "learning_tests", "learning_tests_pendulum", "learning_tests_continuous", "torch_only"], size = "large", # bazel may complain about it being too long sometimes - large is on purpose as some frameworks take longer @@ -599,18 +610,6 @@ py_test( args = ["--dir=tuned_examples/ppo"] ) -# TODO (Kourosh): tf2 is way slower than torch, eager mode is no enabled, I wonder if -# it would get faster with eager mode once it is enabled. -# py_test( -# name = "learning_tests_pendulum_ppo_with_rl_module_tf2_eager", -# main = "tests/run_regression_tests.py", -# tags = ["team:rllib", "exclusive", "learning_tests", "learning_tests_pendulum", "learning_tests_continuous", "tf2_only", "no_tf_static_graph"], -# size = "large", # bazel may complain about it being too long sometimes - large is on purpose as some frameworks take longer -# srcs = ["tests/run_regression_tests.py"], -# data = ["tuned_examples/ppo/pendulum-ppo-with-rl-module.yaml"], -# args = ["--dir=tuned_examples/ppo"] -# ) - py_test( name = "learning_tests_multi_agent_pendulum_ppo", main = "tests/run_regression_tests.py", diff --git a/rllib/algorithms/algorithm.py b/rllib/algorithms/algorithm.py index 789039f28628..252f5a151f77 100644 --- a/rllib/algorithms/algorithm.py +++ b/rllib/algorithms/algorithm.py @@ -2158,7 +2158,7 @@ def default_resource_request( # Default logic for RLlib Algorithms: # Create one bundle per individual worker (local or remote). - # Use `num_cpus_for_driver` and `num_gpus` for the local worker and + # Use `num_cpus_for_local_worker` and `num_gpus` for the local worker and # `num_cpus_per_worker` and `num_gpus_per_worker` for the remote # workers to determine their CPU/GPU resource needs. diff --git a/rllib/algorithms/appo/appo_tf_policy.py b/rllib/algorithms/appo/appo_tf_policy.py index 9519cf28cfb8..d91b4516bfd7 100644 --- a/rllib/algorithms/appo/appo_tf_policy.py +++ b/rllib/algorithms/appo/appo_tf_policy.py @@ -48,8 +48,7 @@ logger = logging.getLogger(__name__) -# We need this builder function because we want to share the same -# custom logics between TF1 dynamic and TF2 eager policies. +# TODO (sven): Deprecate once APPO and IMPALA fully on RLModules/Learner APIs. def get_appo_tf_policy(name: str, base: type) -> type: """Construct an APPOTFPolicy inheriting either dynamic or eager base policies. diff --git a/rllib/algorithms/appo/appo_torch_policy.py b/rllib/algorithms/appo/appo_torch_policy.py index 2c65ed8d183c..b92b7c32fd51 100644 --- a/rllib/algorithms/appo/appo_torch_policy.py +++ b/rllib/algorithms/appo/appo_torch_policy.py @@ -54,8 +54,7 @@ logger = logging.getLogger(__name__) -# We need this builder function because we want to share the same -# custom logics between TF1 dynamic and TF2 eager policies. +# TODO (sven): Deprecate once APPO and IMPALA fully on RLModules/Learner APIs. class APPOTorchPolicy( VTraceOptimizer, LearningRateSchedule, diff --git a/rllib/algorithms/appo/utils.py b/rllib/algorithms/appo/utils.py index f0bae3e5113a..cbd2efe82161 100644 --- a/rllib/algorithms/appo/utils.py +++ b/rllib/algorithms/appo/utils.py @@ -6,6 +6,7 @@ TARGET_POLICY_SCOPE = "target_func" +# TODO (sven): Deprecate once APPO and IMPALA fully on RLModules/Learner APIs. def make_appo_models(policy) -> ModelV2: """Builds model and target model for APPO. diff --git a/rllib/algorithms/impala/impala_tf_policy.py b/rllib/algorithms/impala/impala_tf_policy.py index e0e005da69a2..e1b66f533212 100644 --- a/rllib/algorithms/impala/impala_tf_policy.py +++ b/rllib/algorithms/impala/impala_tf_policy.py @@ -85,7 +85,7 @@ def __init__( config: Algorithm config dict. """ - # Compute vtrace on the CPU for better perf. + # Compute vtrace on the CPU for better performance. with tf.device("/cpu:0"): self.vtrace_returns = vtrace.multi_from_logits( behaviour_action_log_probs=behaviour_action_logp, diff --git a/rllib/algorithms/impala/torch/vtrace_torch_v2.py b/rllib/algorithms/impala/torch/vtrace_torch_v2.py index 404904109e9b..5b67c5772a18 100644 --- a/rllib/algorithms/impala/torch/vtrace_torch_v2.py +++ b/rllib/algorithms/impala/torch/vtrace_torch_v2.py @@ -70,7 +70,7 @@ def vtrace_torch( clip_rho_threshold: Union[float, "torch.Tensor"] = 1.0, clip_pg_rho_threshold: Union[float, "torch.Tensor"] = 1.0, ): - r"""V-trace for softmax policies implemented with torch. + """V-trace for softmax policies implemented with torch. Calculates V-trace actor critic targets for softmax polices as described in "IMPALA: Scalable Distributed Deep-RL with Importance Weighted Actor-Learner diff --git a/rllib/algorithms/ppo/torch/ppo_torch_rl_module.py b/rllib/algorithms/ppo/torch/ppo_torch_rl_module.py index 48459a580cb3..ad1c026424ba 100644 --- a/rllib/algorithms/ppo/torch/ppo_torch_rl_module.py +++ b/rllib/algorithms/ppo/torch/ppo_torch_rl_module.py @@ -13,19 +13,6 @@ torch, nn = try_import_torch() -def get_ppo_loss(fwd_in, fwd_out): - # TODO: we should replace these components later with real ppo components when - # RLOptimizer and RLModule are integrated together. - # this is not exactly a ppo loss, just something to show that the - # forward train works - adv = fwd_in[SampleBatch.REWARDS] - fwd_out[SampleBatch.VF_PREDS] - actor_loss = -(fwd_out[SampleBatch.ACTION_LOGP] * adv).mean() - critic_loss = (adv**2).mean() - loss = actor_loss + critic_loss - - return loss - - class PPOTorchRLModule(PPORLModuleBase, TorchRLModule): framework: str = "torch" diff --git a/rllib/algorithms/slateq/slateq_tf_policy.py b/rllib/algorithms/slateq/slateq_tf_policy.py index c6145bced515..a3079abe5746 100644 --- a/rllib/algorithms/slateq/slateq_tf_policy.py +++ b/rllib/algorithms/slateq/slateq_tf_policy.py @@ -207,15 +207,6 @@ def build_slateq_stats(policy: Policy, batch) -> Dict[str, TensorType]: "q_loss": policy._q_loss, "mean_actions": policy._mean_actions, } - # if hasattr(policy, "_mean_grads_0"): - # stats.update({"mean_grads_0": policy._mean_grads_0}) - # stats.update({"mean_grads_1": policy._mean_grads_1}) - # stats.update({"mean_grads_2": policy._mean_grads_2}) - # stats.update({"mean_grads_3": policy._mean_grads_3}) - # stats.update({"mean_grads_4": policy._mean_grads_4}) - # stats.update({"mean_grads_5": policy._mean_grads_5}) - # stats.update({"mean_grads_6": policy._mean_grads_6}) - # stats.update({"mean_grads_7": policy._mean_grads_7}) return stats diff --git a/rllib/core/learner/learner.py b/rllib/core/learner/learner.py index 23ca2d2e3162..e4ef00dd6ad5 100644 --- a/rllib/core/learner/learner.py +++ b/rllib/core/learner/learner.py @@ -522,14 +522,14 @@ def compile_results( # We put the stats for all modules under the ALL_MODULES key. e.g. average of # the gradients across all modules will go here. - mean_grads = [ - np.mean(grad) + mean_abs_grads = [ + np.mean(np.abs(grad)) for grad in convert_to_numpy(postprocessed_gradients.values()) if grad is not None ] module_learner_stats[ALL_MODULES] = { - "mean_gradient": np.mean(mean_grads), + "mean_abs_postprocessed_gradients": np.mean(mean_abs_grads), self.TOTAL_LOSS_KEY: loss_numpy[self.TOTAL_LOSS_KEY], } @@ -754,19 +754,21 @@ def additional_update_per_module( @OverrideToImplementCustomLogic def postprocess_gradients( - self, gradients_dict: Mapping[str, Any] + self, + gradients_dict: Mapping[str, Any], ) -> Mapping[str, Any]: - """Applies potential postprocessings to the gradients. + """Applies potential postprocessing operations on the gradients. - In some algorithms, we may want to perform some postprocessing on the - gradients before they are applied. This method is called after gradients - have been computed, and modifies them before they are applied. + This method is called after gradients have been computed, and modifies them + before they are applied to the respective module(s). + This includes grad clipping by value, norm, or global-norm, or other + algorithm specific gradient postprocessing steps. Args: gradients_dict: A dictionary of gradients. Returns: - A dictionary of updated gradients. + A dictionary with the updated gradients. """ return gradients_dict @@ -776,7 +778,9 @@ def update( *, minibatch_size: Optional[int] = None, num_iters: int = 1, - reduce_fn: Callable[[ResultDict], ResultDict] = _reduce_mean_results, + reduce_fn: Callable[[List[Mapping[str, Any]]], ResultDict] = ( + _reduce_mean_results + ), ) -> Mapping[str, Any]: """Do `num_iters` minibatch updates given the original batch. @@ -957,17 +961,17 @@ def _make_module(self) -> MultiAgentRLModule: This method uses `self._module_specs` or `self._module_obj` to construct the module. If the module_class is a single agent RL module it will be wrapped to a - multi-agent RL module. Override this method if there are other things than - needs to happen for instantiation of the module. - + multi-agent RL module. Override this method if there are other things that + need to happen for instantiation of the module. Returns: - The constructed module. + A constructed MultiAgentRLModule. """ if self._module_obj is not None: module = self._module_obj else: module = self._module_spec.build() + # If not already, convert to MultiAgentRLModule. module = module.as_multi_agent() return module @@ -975,11 +979,11 @@ def _check_result(self, result: Mapping[str, Any]) -> None: """Checks whether the result has the correct format. All the keys should be referencing the module ids that got updated. There is a - special key `__all__` that hold any extra information that is not specific to a - module. + special key `ALL_MODULES` that hold any extra information that is not specific + to a module. Args: - results: The result of the update. + result: The result of the update. Raises: ValueError: If the result are not in the correct format. @@ -1000,7 +1004,7 @@ def _check_result(self, result: Mapping[str, Any]) -> None: if key not in self.module.keys(): raise ValueError( f"The key {key} in the result of the update is not a valid " - f"module id. Valid module ids are: {self.module.keys()}" + f"module id. Valid module ids are: {list(self.module.keys())}." ) @OverrideToImplementCustomLogic_CallToSuperRecommended diff --git a/rllib/core/learner/learner_group_config.py b/rllib/core/learner/learner_group_config.py index bf6454886a3d..7830bbf09c76 100644 --- a/rllib/core/learner/learner_group_config.py +++ b/rllib/core/learner/learner_group_config.py @@ -114,7 +114,7 @@ def module( def resources( self, num_learner_workers: Optional[int] = NotProvided, - num_gpus_per_learner_worker: Optional[Union[float, int]] = NotProvided, + num_gpus_per_learner_worker: Optional[int] = NotProvided, num_cpus_per_learner_worker: Optional[Union[float, int]] = NotProvided, local_gpu_idx: Optional[int] = NotProvided, ) -> "LearnerGroupConfig": diff --git a/rllib/core/learner/scaling_config.py b/rllib/core/learner/scaling_config.py index 8b02494a5efb..2fc16b0efa94 100644 --- a/rllib/core/learner/scaling_config.py +++ b/rllib/core/learner/scaling_config.py @@ -13,7 +13,9 @@ class LearnerGroupScalingConfig: training will run on a single CPU. num_gpus_per_worker: The number of GPUs to allocate per worker. If num_workers=0, any number greater than 0 will run the training on a single - GPU. A value of zero will run the training on a single CPU. + GPU. A value of zero will run the training on `num_cpus_per_worker` CPUs. + Fractional values (e.g. 0.5) are currently NOT supported as these might + cause CUDA async errors. local_gpu_idx: if num_gpus_per_worker > 0, and num_workers<2, then this gpu index will be used for training. This is an index into the available cuda devices. For example if os.environ["CUDA_VISIBLE_DEVICES"] = "1" then a diff --git a/rllib/core/learner/tests/test_learner_group.py b/rllib/core/learner/tests/test_learner_group.py index 55800981a128..bb0886ab75cc 100644 --- a/rllib/core/learner/tests/test_learner_group.py +++ b/rllib/core/learner/tests/test_learner_group.py @@ -138,8 +138,8 @@ def test_update_multigpu(self): self.assertLess(min_loss, 0.57) - # make sure the learner_group resources are freed up so that we don't - # autoscale + # Make sure the learner_group resources are freed up so that we don't + # autoscale. learner_group.shutdown() del learner_group diff --git a/rllib/core/models/tf/primitives.py b/rllib/core/models/tf/primitives.py index a36ae7a80463..816f837adaf6 100644 --- a/rllib/core/models/tf/primitives.py +++ b/rllib/core/models/tf/primitives.py @@ -24,9 +24,9 @@ def __init__( input_dim: int, hidden_layer_dims: List[int], hidden_layer_use_layernorm: bool = False, - hidden_layer_activation: Union[str, Callable] = "relu", + hidden_layer_activation: Optional[Union[str, Callable]] = "relu", output_dim: Optional[int] = None, - output_activation: Union[str, Callable] = "linear", + output_activation: Optional[Union[str, Callable]] = "linear", use_bias: bool = True, ): """Initialize a TfMLP object. @@ -112,7 +112,7 @@ def __init__( input_dims: Union[List[int], Tuple[int]], cnn_filter_specifiers: List[List[Union[int, List]]], cnn_use_layernorm: bool = False, - cnn_activation: str = "relu", + cnn_activation: Optional[str] = "relu", use_bias: bool = True, ): """Initializes a TfCNN instance. @@ -188,7 +188,7 @@ def __init__( *, input_dims: Union[List[int], Tuple[int]], cnn_transpose_filter_specifiers: List[List[Union[int, List]]], - cnn_transpose_activation: str = "relu", + cnn_transpose_activation: Optional[str] = "relu", cnn_transpose_use_layernorm: bool = False, use_bias: bool = True, ): diff --git a/rllib/policy/eager_tf_policy_v2.py b/rllib/policy/eager_tf_policy_v2.py index f38637e003a7..0fd14c72fd08 100644 --- a/rllib/policy/eager_tf_policy_v2.py +++ b/rllib/policy/eager_tf_policy_v2.py @@ -98,7 +98,7 @@ def __init__( self._loss_initialized = False # Backward compatibility workaround so Policy will call self.loss() directly. # TODO(jungong): clean up after all policies are migrated to new sub-class - # implementation. + # implementation. self._loss = None self.batch_divisibility_req = self.get_batch_divisibility_req() @@ -864,8 +864,6 @@ def _compute_actions_helper( dist_inputs = None elif is_overridden(self.action_sampler_fn): - dist_inputs = None - state_out = [] actions, logp, dist_inputs, state_out = self.action_sampler_fn( self.model, input_dict[SampleBatch.OBS], @@ -875,7 +873,6 @@ def _compute_actions_helper( ) else: if is_overridden(self.action_distribution_fn): - # Try new action_distribution_fn signature, supporting # state_batches and seq_lens. ( diff --git a/rllib/policy/torch_mixins.py b/rllib/policy/torch_mixins.py index 159c993e7826..d6c4b03a935d 100644 --- a/rllib/policy/torch_mixins.py +++ b/rllib/policy/torch_mixins.py @@ -172,7 +172,7 @@ class TargetNetworkMixin: - Adds the `update_target` method to the policy. Calling `update_target` updates all target Q-networks' weights from their - respective "main" Q-metworks, based on tau (smooth, partial updating). + respective "main" Q-networks, based on tau (smooth, partial updating). """ def __init__(self): @@ -184,17 +184,32 @@ def update_target(self, tau=None): # Update_target_fn will be called periodically to copy Q network to # target Q network, using (soft) tau-synching. tau = tau or self.config.get("tau", 1.0) + model_state_dict = self.model.state_dict() + # Support partial (soft) synching. # If tau == 1.0: Full sync from Q-model to target Q-model. - target_state_dict = next(iter(self.target_models.values())).state_dict() - model_state_dict = { - k: tau * model_state_dict[k] + (1 - tau) * v - for k, v in target_state_dict.items() - } - for target in self.target_models.values(): - target.load_state_dict(model_state_dict) + if self.config.get("_enable_rl_module_api", False): + target_current_network_pairs = self.model.get_target_network_pairs() + for target_network, current_network in target_current_network_pairs: + current_state_dict = current_network.state_dict() + new_state_dict = { + k: tau * current_state_dict[k] + (1 - tau) * v + for k, v in target_network.state_dict().items() + } + target_network.load_state_dict(new_state_dict) + else: + # Support partial (soft) synching. + # If tau == 1.0: Full sync from Q-model to target Q-model. + target_state_dict = next(iter(self.target_models.values())).state_dict() + model_state_dict = { + k: tau * model_state_dict[k] + (1 - tau) * v + for k, v in target_state_dict.items() + } + + for target in self.target_models.values(): + target.load_state_dict(model_state_dict) @override(TorchPolicy) def set_weights(self, weights): diff --git a/rllib/tuned_examples/a2c/atari-a2c.yaml b/rllib/tuned_examples/a2c/atari-a2c.yaml index f2e933e92d29..b46c83498c47 100644 --- a/rllib/tuned_examples/a2c/atari-a2c.yaml +++ b/rllib/tuned_examples/a2c/atari-a2c.yaml @@ -11,8 +11,11 @@ atari-a2c: config: # Works for both torch and tf. framework: torch + # Make analogous to old v4 + NoFrameskip. env_config: - frameskip: 1 # no frameskip + frameskip: 1 + full_action_space: false + repeat_action_probability: 0.0 train_batch_size: 500 rollout_fragment_length: auto clip_rewards: True diff --git a/rllib/tuned_examples/a3c/pong-a3c.yaml b/rllib/tuned_examples/a3c/pong-a3c.yaml index f1af764b5719..e5b4a114904f 100644 --- a/rllib/tuned_examples/a3c/pong-a3c.yaml +++ b/rllib/tuned_examples/a3c/pong-a3c.yaml @@ -6,8 +6,11 @@ pong-a3c: config: # Works for both torch and tf. framework: torch + # Make analogous to old v4 + NoFrameskip. env_config: - nondeterministic: False # deterministic + frameskip: 1 + full_action_space: false + repeat_action_probability: 0.0 num_workers: 16 rollout_fragment_length: 20 vf_loss_coeff: 0.5 diff --git a/rllib/tuned_examples/apex_dqn/atari-apex-dqn.yaml b/rllib/tuned_examples/apex_dqn/atari-apex-dqn.yaml index 094582854b64..d1f241d01c07 100644 --- a/rllib/tuned_examples/apex_dqn/atari-apex-dqn.yaml +++ b/rllib/tuned_examples/apex_dqn/atari-apex-dqn.yaml @@ -7,8 +7,11 @@ apex-breakoutnoframeskip-v5: episode_reward_mean: 20.0 timesteps_total: 7000000 config: + # Make analogous to old v4 + NoFrameskip. env_config: - frameskip: 1 # no frameskip + frameskip: 1 + full_action_space: false + repeat_action_probability: 0.0 double_q: false dueling: false num_atoms: 1 diff --git a/rllib/tuned_examples/apex_dqn/pong-apex-dqn.yaml b/rllib/tuned_examples/apex_dqn/pong-apex-dqn.yaml index 22ec786a2e6a..a3d1d978bfc0 100644 --- a/rllib/tuned_examples/apex_dqn/pong-apex-dqn.yaml +++ b/rllib/tuned_examples/apex_dqn/pong-apex-dqn.yaml @@ -10,8 +10,11 @@ pong-apex: config: # Works for both torch and tf. framework: torch + # Make analogous to old v4 + NoFrameskip. env_config: - frameskip: 1 # no frameskip + frameskip: 1 + full_action_space: false + repeat_action_probability: 0.0 target_network_update_freq: 20000 num_workers: 4 num_envs_per_worker: 8 diff --git a/rllib/tuned_examples/appo/cartpole-appo-learner.yaml b/rllib/tuned_examples/appo/cartpole-appo-learner.yaml deleted file mode 100644 index 0400f6708dc9..000000000000 --- a/rllib/tuned_examples/appo/cartpole-appo-learner.yaml +++ /dev/null @@ -1,31 +0,0 @@ -cartpole-appo-learner: - env: CartPole-v1 - run: APPO - stop: - episode_reward_mean: 150 - timesteps_total: 200000 - config: - # Works for both torch and tf. - framework: tf2 - num_workers: - grid_search: - - 3 - num_gpus: 0 - observation_filter: MeanStdFilter - num_sgd_iter: - grid_search: - - 6 - vf_loss_coeff: 0.01 - vtrace: True - model: - fcnet_hiddens: [32] - fcnet_activation: linear - vf_share_layers: true - enable_connectors: True - _enable_learner_api: True - _enable_rl_module_api: True - eager_tracing: True - lr: 0.001 - entropy_coeff: 0.1 - kl_coeff: 0.01 - exploration_config: null diff --git a/rllib/tuned_examples/appo/cartpole-appo-w-rl-modules-and-learner.yaml b/rllib/tuned_examples/appo/cartpole-appo-w-rl-modules-and-learner.yaml new file mode 100644 index 000000000000..dcbc7f7c1259 --- /dev/null +++ b/rllib/tuned_examples/appo/cartpole-appo-w-rl-modules-and-learner.yaml @@ -0,0 +1,27 @@ +cartpole-appo-w-rl-modules-and-learner: + env: CartPole-v1 + run: APPO + stop: + episode_reward_mean: 150 + timesteps_total: 200000 + config: + # Works for both torch and tf. + framework: torch + num_envs_per_worker: 5 + num_workers: 2 + train_batch_size: 1000 + lr: 0.001 + num_gpus: 0 + observation_filter: MeanStdFilter + vf_loss_coeff: 0.1 + vtrace: true + + enable_connectors: true + _enable_learner_api: true + _enable_rl_module_api: true + num_learner_workers: 2 + num_gpus_per_learner_worker: 0 + num_cpus_per_learner_worker: 1 + # Need to unset this b/c we are using the RLModule API, which + # provides exploration control via the RLModule's `forward_exploration` method. + exploration_config: {} diff --git a/rllib/tuned_examples/appo/cartpole-appo.yaml b/rllib/tuned_examples/appo/cartpole-appo.yaml index a5eb4850a3b4..8010b58a774e 100644 --- a/rllib/tuned_examples/appo/cartpole-appo.yaml +++ b/rllib/tuned_examples/appo/cartpole-appo.yaml @@ -18,4 +18,4 @@ cartpole-appo: fcnet_hiddens: [32] fcnet_activation: linear vf_share_layers: true - enable_connectors: True + enable_connectors: true diff --git a/rllib/tuned_examples/appo/pong-appo-w-rl-modules-and-learner.yaml b/rllib/tuned_examples/appo/pong-appo-w-rl-modules-and-learner.yaml new file mode 100644 index 000000000000..97f73eb10a9c --- /dev/null +++ b/rllib/tuned_examples/appo/pong-appo-w-rl-modules-and-learner.yaml @@ -0,0 +1,56 @@ +# This can reach 18.0 reward in ~10 minutes on 4x M60 GPUs +# with 30 rollout workers, 4 learning workers, and 8 envs per rollout worker. +pong-appo: + env: ALE/Pong-v5 + run: APPO + stop: + episode_reward_mean: 18.0 + timesteps_total: 20000000 + config: + # Works for both torch and tf. + framework: torch + # Make analogous to old v4 + NoFrameskip. + env_config: + frameskip: 1 # no frameskip + full_action_space: false + repeat_action_probability: 0.0 # deterministic + vtrace: true + #vtrace_drop_last_ts: false + use_kl_loss: false + rollout_fragment_length: 50 + train_batch_size: 4000 + lr: 0.0005 + # On a 32 CPU machine (g3.2xlarge), we use 30 CPUs for the rollout workers + # and 2 for the learner workers. + num_workers: 30 + broadcast_interval: 1 + max_sample_requests_in_flight_per_worker: 1 + num_envs_per_worker: 8 + num_sgd_iter: 2 + vf_loss_coeff: 1.0 + clip_param: 0.3 + num_gpus: 0 + + model: + dim: 42 + conv_filters: [[16, 4, 2], [32, 4, 2], [256, 11, 1, "valid"]] + conv_activation: relu + conv_add_final_dense: false + conv_flattened_dim: 256 + use_cnn_heads: true + + # Run with Learner API. + _enable_learner_api: true + grad_clip_by_global_norm: 10.0 + # Use N Learner worker on the GPU. + num_learner_workers: 4 + num_gpus_per_learner_worker: 1 + # Since we are using learner workers, the driver process does not need + # a CPU in particular. + num_cpus_for_local_worker: 1 + + # Run with RLModule API. + _enable_rl_module_api: true + # Need to unset this b/c we are using the RLModule API, which + # provides exploration control via the RLModule's `forward_exploration` method. + exploration_config: {} diff --git a/rllib/tuned_examples/appo/pong-appo.yaml b/rllib/tuned_examples/appo/pong-appo.yaml index 12c81437d4a4..8c4d69f96024 100644 --- a/rllib/tuned_examples/appo/pong-appo.yaml +++ b/rllib/tuned_examples/appo/pong-appo.yaml @@ -12,10 +12,13 @@ pong-appo: config: # Works for both torch and tf. framework: torch + # Make analogous to old v4 + NoFrameskip. env_config: frameskip: 1 # no frameskip - vtrace: True - use_kl_loss: False + full_action_space: false + repeat_action_probability: 0.0 # deterministic + vtrace: true + use_kl_loss: false rollout_fragment_length: 50 train_batch_size: 750 num_workers: 32 diff --git a/rllib/tuned_examples/dqn/atari-dist-dqn.yaml b/rllib/tuned_examples/dqn/atari-dist-dqn.yaml index b381dedb7293..1366fca92f96 100644 --- a/rllib/tuned_examples/dqn/atari-dist-dqn.yaml +++ b/rllib/tuned_examples/dqn/atari-dist-dqn.yaml @@ -7,8 +7,11 @@ atari-dist-dqn: - ALE/SpaceInvaders-v5 run: DQN config: + # Make analogous to old v4 + NoFrameskip. env_config: - frameskip: 1 # no frameskip + frameskip: 1 + full_action_space: false + repeat_action_probability: 0.0 double_q: false dueling: false num_atoms: 51 diff --git a/rllib/tuned_examples/dqn/atari-dqn.yaml b/rllib/tuned_examples/dqn/atari-dqn.yaml index 2e00b269936c..4e12176bf1f0 100644 --- a/rllib/tuned_examples/dqn/atari-dqn.yaml +++ b/rllib/tuned_examples/dqn/atari-dqn.yaml @@ -11,8 +11,11 @@ atari-basic-dqn: config: # Works for both torch and tf. framework: torch + # Make analogous to old v4 + NoFrameskip. env_config: - frameskip: 1 # no frameskip + frameskip: 1 + full_action_space: false + repeat_action_probability: 0.0 double_q: false dueling: false num_atoms: 1 diff --git a/rllib/tuned_examples/dqn/atari-duel-ddqn.yaml b/rllib/tuned_examples/dqn/atari-duel-ddqn.yaml index cfb15c8813b0..361eaeca346f 100644 --- a/rllib/tuned_examples/dqn/atari-duel-ddqn.yaml +++ b/rllib/tuned_examples/dqn/atari-duel-ddqn.yaml @@ -11,8 +11,11 @@ dueling-ddqn: config: # Works for both torch and tf. framework: torch + # Make analogous to old v4 + NoFrameskip. env_config: - frameskip: 1 # no frameskip + frameskip: 1 + full_action_space: false + repeat_action_probability: 0.0 double_q: true dueling: true num_atoms: 1 diff --git a/rllib/tuned_examples/dqn/pong-dqn.yaml b/rllib/tuned_examples/dqn/pong-dqn.yaml index 65b003d14747..f39e04306e8d 100644 --- a/rllib/tuned_examples/dqn/pong-dqn.yaml +++ b/rllib/tuned_examples/dqn/pong-dqn.yaml @@ -8,8 +8,11 @@ pong-deterministic-dqn: config: # Works for both torch and tf. framework: torch + # Make analogous to old v4 + NoFrameskip. env_config: - nondeterministic: False # deterministic + frameskip: 1 + full_action_space: false + repeat_action_probability: 0.0 num_gpus: 1 gamma: 0.99 lr: .0001 diff --git a/rllib/tuned_examples/dqn/pong-rainbow.yaml b/rllib/tuned_examples/dqn/pong-rainbow.yaml index 95e121e49c5a..99198ba2eb8b 100644 --- a/rllib/tuned_examples/dqn/pong-rainbow.yaml +++ b/rllib/tuned_examples/dqn/pong-rainbow.yaml @@ -4,8 +4,11 @@ pong-deterministic-rainbow: stop: episode_reward_mean: 20 config: + # Make analogous to old v4 + NoFrameskip. env_config: - nondeterministic: False # deterministic + frameskip: 1 + full_action_space: false + repeat_action_probability: 0.0 num_atoms: 51 noisy: True gamma: 0.99 diff --git a/rllib/tuned_examples/impala/atari-impala-large.yaml b/rllib/tuned_examples/impala/atari-impala-large.yaml index cc3f7363462f..8e8a882e84c7 100644 --- a/rllib/tuned_examples/impala/atari-impala-large.yaml +++ b/rllib/tuned_examples/impala/atari-impala-large.yaml @@ -11,8 +11,11 @@ atari-impala: stop: timesteps_total: 3000000 config: + # Make analogous to old v4 + NoFrameskip. env_config: - frameskip: 1 # no frameskip + frameskip: 1 + full_action_space: false + repeat_action_probability: 0.0 rollout_fragment_length: 50 train_batch_size: 500 num_workers: 128 diff --git a/rllib/tuned_examples/impala/atari-impala-multi-gpu.yaml b/rllib/tuned_examples/impala/atari-impala-multi-gpu.yaml index cc1940a82240..35568b1092b4 100644 --- a/rllib/tuned_examples/impala/atari-impala-multi-gpu.yaml +++ b/rllib/tuned_examples/impala/atari-impala-multi-gpu.yaml @@ -6,8 +6,11 @@ atari-impala: config: # Works for both torch and tf. framework: torch + # Make analogous to old v4 + NoFrameskip. env_config: - frameskip: 1 # no frameskip + frameskip: 1 + full_action_space: false + repeat_action_probability: 0.0 rollout_fragment_length: 50 train_batch_size: 4000 num_gpus: 4 diff --git a/rllib/tuned_examples/impala/atari-impala.yaml b/rllib/tuned_examples/impala/atari-impala.yaml index ec0661af2491..5c5a4d8fed9b 100644 --- a/rllib/tuned_examples/impala/atari-impala.yaml +++ b/rllib/tuned_examples/impala/atari-impala.yaml @@ -9,8 +9,11 @@ atari-impala: - ALE/SpaceInvaders-v5 run: IMPALA config: + # Make analogous to old v4 + NoFrameskip. env_config: - frameskip: 1 # no frameskip + frameskip: 1 + full_action_space: false + repeat_action_probability: 0.0 rollout_fragment_length: 50 train_batch_size: 500 num_workers: 32 diff --git a/rllib/tuned_examples/impala/pong-impala-fast.yaml b/rllib/tuned_examples/impala/pong-impala-fast.yaml index 50840de91ba4..d038f207af6c 100644 --- a/rllib/tuned_examples/impala/pong-impala-fast.yaml +++ b/rllib/tuned_examples/impala/pong-impala-fast.yaml @@ -7,8 +7,11 @@ pong-impala-fast: env: ALE/Pong-v5 run: IMPALA config: + # Make analogous to old v4 + NoFrameskip. env_config: - frameskip: 1 # no frameskip + frameskip: 1 + full_action_space: false + repeat_action_probability: 0.0 rollout_fragment_length: 50 train_batch_size: 1000 num_workers: 128 diff --git a/rllib/tuned_examples/impala/pong-impala-vectorized.yaml b/rllib/tuned_examples/impala/pong-impala-vectorized.yaml index fe6912c24c07..9623bd8d1e27 100644 --- a/rllib/tuned_examples/impala/pong-impala-vectorized.yaml +++ b/rllib/tuned_examples/impala/pong-impala-vectorized.yaml @@ -5,8 +5,11 @@ pong-impala-vectorized: env: ALE/Pong-v5 run: IMPALA config: + # Make analogous to old v4 + NoFrameskip. env_config: - frameskip: 1 # no frameskip + frameskip: 1 + full_action_space: false + repeat_action_probability: 0.0 rollout_fragment_length: 50 train_batch_size: 500 num_workers: 32 diff --git a/rllib/tuned_examples/impala/pong-impala.yaml b/rllib/tuned_examples/impala/pong-impala.yaml index 901d1e8daa8e..b003be9b850e 100644 --- a/rllib/tuned_examples/impala/pong-impala.yaml +++ b/rllib/tuned_examples/impala/pong-impala.yaml @@ -7,8 +7,11 @@ pong-impala: env: ALE/Pong-v5 run: IMPALA config: + # Make analogous to old v4 + NoFrameskip. env_config: - frameskip: 1 # no frameskip + frameskip: 1 + full_action_space: false + repeat_action_probability: 0.0 rollout_fragment_length: 50 train_batch_size: 500 num_workers: 128 diff --git a/rllib/tuned_examples/ppo/atari-ppo.yaml b/rllib/tuned_examples/ppo/atari-ppo.yaml index 187db074d6eb..22a024da04d2 100644 --- a/rllib/tuned_examples/ppo/atari-ppo.yaml +++ b/rllib/tuned_examples/ppo/atari-ppo.yaml @@ -11,8 +11,11 @@ atari-ppo: config: # Works for both torch and tf. framework: torch + # Make analogous to old v4 + NoFrameskip. env_config: - frameskip: 1 # no frameskip + frameskip: 1 + full_action_space: false + repeat_action_probability: 0.0 lambda: 0.95 kl_coeff: 0.5 clip_rewards: True diff --git a/rllib/tuned_examples/ppo/pendulum-ppo-with-rl-module.yaml b/rllib/tuned_examples/ppo/pendulum-ppo-with-rl-module.yaml index 04c98cd56cb8..08cd0abb4bee 100644 --- a/rllib/tuned_examples/ppo/pendulum-ppo-with-rl-module.yaml +++ b/rllib/tuned_examples/ppo/pendulum-ppo-with-rl-module.yaml @@ -7,7 +7,6 @@ pendulum-ppo: timesteps_total: 400000 config: # Works for both torch and tf2 - # TODO (Kourosh) tf2 is way slower than torch framework: torch train_batch_size: 512 vf_clip_param: 10.0 @@ -23,5 +22,6 @@ pendulum-ppo: model: fcnet_activation: relu _enable_rl_module_api: true + # Need to unset this b/c we are using the RLModule API, which + # provides exploration control via the RLModule's `forward_exploration` method. exploration_config: {} - eager_tracing: false diff --git a/rllib/tuned_examples/ppo/pong-ppo.yaml b/rllib/tuned_examples/ppo/pong-ppo.yaml index 2aabde87d949..3da49952e0ae 100644 --- a/rllib/tuned_examples/ppo/pong-ppo.yaml +++ b/rllib/tuned_examples/ppo/pong-ppo.yaml @@ -8,8 +8,11 @@ pong-ppo: config: # Works for both torch and tf. framework: torch + # Make analogous to old v4 + NoFrameskip. env_config: - frameskip: 1 # no frameskip + frameskip: 1 + full_action_space: false + repeat_action_probability: 0.0 lambda: 0.95 kl_coeff: 0.5 clip_rewards: True diff --git a/rllib/utils/actor_manager.py b/rllib/utils/actor_manager.py index 1d06bb9cca97..d8d0a86606e5 100644 --- a/rllib/utils/actor_manager.py +++ b/rllib/utils/actor_manager.py @@ -232,8 +232,8 @@ class _ActorState: def __init__( self, actors: Optional[List[ActorHandle]] = None, - max_remote_requests_in_flight_per_actor: Optional[int] = 2, - init_id: Optional[int] = 0, + max_remote_requests_in_flight_per_actor: int = 2, + init_id: int = 0, ): """Construct a FaultTolerantActorManager. @@ -738,7 +738,7 @@ def fetch_ready_async_reqs( Automatically mark actors unhealthy if they fail to respond. Note: If tags is an empty tuple then results from all ready async requests are - returned. + returned. Args: timeout_seconds: Ray.get() timeout. Default is 0 (only those that are diff --git a/rllib/utils/minibatch_utils.py b/rllib/utils/minibatch_utils.py index 2920e32e4080..f3dd7c2c19ab 100644 --- a/rllib/utils/minibatch_utils.py +++ b/rllib/utils/minibatch_utils.py @@ -61,9 +61,9 @@ def __iter__(self): if len(module_batch) == 0: raise ValueError( - f"The batch for module_id {module_id} is empty. " + f"The batch for module_id {module_id} is empty! " "This will create an infinite loop because we need to cover " - "the same number of samples for each module_id. " + "the same number of samples for each module_id." ) s = self._start[module_id] # start n_steps = self._minibatch_size diff --git a/rllib/utils/torch_utils.py b/rllib/utils/torch_utils.py index 4239174b0ccb..90ce959d8421 100644 --- a/rllib/utils/torch_utils.py +++ b/rllib/utils/torch_utils.py @@ -38,6 +38,11 @@ def apply_grad_clipping( ) -> Dict[str, TensorType]: """Applies gradient clipping to already computed grads inside `optimizer`. + Note: This function does NOT perform an analogous operation as + tf.clip_by_global_norm. It merely clips by norm (per gradient tensor) and + then computes the global norm across all given tensors (but without clipping + by that global norm). + Args: policy: The TorchPolicy, which calculated `loss`. optimizer: A local torch optimizer object. From 5de2ac27b4ab5629e33c91333ef028f53566443c Mon Sep 17 00:00:00 2001 From: Sven Mika Date: Wed, 26 Apr 2023 14:12:37 +0200 Subject: [PATCH 106/424] [RLlib] Add throughput per-second metrics (env/agent steps trained and -sampled) to Algorithm. (#34526) --- .../todo_tests_currently_not_covered.yaml | 4 +- .../yaml_files/a2c/a2c-breakout-v5.yaml | 2 +- .../a3c/a3c-pongdeterministic-v5.yaml | 2 +- .../apex/apex-breakoutnoframeskip-v5.yaml | 2 +- .../appo/appo-pongnoframeskip-v5.yaml | 2 +- .../yaml_files/bc/bc-halfcheetah-v4.yaml | 2 +- .../yaml_files/cql/cql-halfcheetah-v4.yaml | 2 +- .../yaml_files/ddpg/ddpg-hopper-v4.yaml | 2 +- .../dqn/dqn-breakoutnoframeskip-v5.yaml | 2 +- .../yaml_files/es/es-humanoid-v4.yaml | 2 +- .../impala/impala-breakoutnoframeskip-v5.yaml | 2 +- .../marwil/marwil-halfcheetah-v4.yaml | 2 +- .../ppo/tf/ppo-breakoutnoframeskip-v5-tf.yaml | 2 +- .../ppo-breakoutnoframeskip-v5-torch.yaml | 2 +- .../yaml_files/sac/sac-halfcheetah-v4.yaml | 2 +- .../slateq-interest-evolution-recsim-env.yaml | 2 +- .../yaml_files/td3/td3-halfcheetah-v4.yaml | 2 +- .../multi_gpu_learning_tests.yaml | 24 +++++----- ...lti_gpu_with_attention_learning_tests.yaml | 12 ++--- .../multi_gpu_with_lstm_learning_tests.yaml | 14 +++--- rllib/algorithms/algorithm.py | 48 +++++++++++++++---- .../a2c/cartpole-a2c-fake-gpus.yaml | 2 +- .../a2c/cartpole-a2c-microbatch.yaml | 2 +- rllib/tuned_examples/a2c/cartpole-a2c.yaml | 2 +- rllib/tuned_examples/a3c/cartpole-a3c.yaml | 2 +- .../multi-agent-cartpole-alpha-star.yaml | 2 +- .../cartpole-sparse-rewards-alpha-zero.yaml | 2 +- .../mountaincarcontinuous-apex-ddpg.yaml | 2 +- .../apex_ddpg/pendulum-apex-ddpg.yaml | 2 +- .../apex_dqn/atari-apex-dqn.yaml | 2 +- .../apex_dqn/cartpole-apex-dqn-fake-gpus.yaml | 2 +- .../apex_dqn/cartpole-apex-dqn.yaml | 2 +- .../apex_dqn/pong-apex-dqn.yaml | 2 +- .../appo/cartpole-appo-vtrace-fake-gpus.yaml | 2 +- .../cartpole-appo-vtrace-separate-losses.yaml | 2 +- .../appo/cartpole-appo-vtrace.yaml | 2 +- ...artpole-appo-w-rl-modules-and-learner.yaml | 2 +- rllib/tuned_examples/appo/cartpole-appo.yaml | 2 +- .../appo/frozenlake-appo-vtrace.yaml | 2 +- .../appo/multi-agent-cartpole-appo.yaml | 2 +- ...nt-cartpole-crashing-restart-env-appo.yaml | 2 +- rllib/tuned_examples/appo/pendulum-appo.yaml | 2 +- rllib/tuned_examples/appo/pong-appo.yaml | 2 +- ...st-evolution-recsim-env-bandit-linucb.yaml | 2 +- rllib/tuned_examples/cql/pendulum-cql.yaml | 2 +- rllib/tuned_examples/crr/cartpole-v1-crr.yaml | 2 +- .../crr/cartpole-v1-crr_expectation.yaml | 2 +- rllib/tuned_examples/crr/pendulum-v1-crr.yaml | 2 +- .../tuned_examples/ddpg/halfcheetah-ddpg.yaml | 2 +- .../ddpg/halfcheetah-pybullet-ddpg.yaml | 2 +- .../ddpg/hopper-pybullet-ddpg.yaml | 2 +- .../ddpg/mountaincarcontinuous-ddpg.yaml | 2 +- .../ddpg/pendulum-ddpg-fake-gpus.yaml | 2 +- rllib/tuned_examples/ddpg/pendulum-ddpg.yaml | 2 +- .../tuned_examples/ddppo/cartpole-ddppo.yaml | 2 +- .../tuned_examples/ddppo/pendulum-ddppo.yaml | 2 +- .../dqn/cartpole-dqn-fake-gpus.yaml | 2 +- .../dqn/cartpole-dqn-param-noise.yaml | 2 +- .../dqn/cartpole-dqn-softq.yaml | 2 +- rllib/tuned_examples/dqn/cartpole-dqn.yaml | 2 +- rllib/tuned_examples/dqn/pong-dqn.yaml | 2 +- rllib/tuned_examples/dqn/pong-rainbow.yaml | 2 +- rllib/tuned_examples/dt/cartpole-v1-dt.yaml | 2 +- rllib/tuned_examples/dt/pendulum-v1-dt.yaml | 2 +- .../dt/pendulum-v1-medium-expert-dt.yaml | 2 +- rllib/tuned_examples/es/cartpole-es.yaml | 2 +- rllib/tuned_examples/es/humanoid-es.yaml | 2 +- .../impala/cartpole-impala-fake-gpus.yaml | 2 +- .../impala/cartpole-impala.yaml | 2 +- .../impala/multi-agent-cartpole-impala.yaml | 2 +- .../impala/pendulum-impala.yaml | 2 +- .../maddpg/two-step-game-maddpg.yaml | 2 +- .../tuned_examples/mbmpo/cartpole-mbmpo.yaml | 2 +- .../tuned_examples/mbmpo/pendulum-mbmpo.yaml | 2 +- .../pg/cartpole-crashing-pg.yaml | 2 +- ...cartpole-crashing-with-remote-envs-pg.yaml | 2 +- .../pg/cartpole-pg-fake-gpus.yaml | 2 +- rllib/tuned_examples/pg/cartpole-pg.yaml | 2 +- ...cartpole-crashing-restart-sub-envs-pg.yaml | 2 +- ...cartpole-crashing-with-remote-envs-pg.yaml | 2 +- .../ppo/cartpole-ppo-fake-gpus.yaml | 2 +- .../ppo/cartpole-ppo-hyperband.yaml | 2 +- .../ppo/cartpole-ppo-with-rl-module.yaml | 2 +- rllib/tuned_examples/ppo/cartpole-ppo.yaml | 2 +- rllib/tuned_examples/ppo/halfcheetah-ppo.yaml | 2 +- .../tuned_examples/ppo/humanoid-ppo-gae.yaml | 2 +- rllib/tuned_examples/ppo/humanoid-ppo.yaml | 2 +- .../ppo/pendulum-ppo-with-rl-module.yaml | 2 +- rllib/tuned_examples/ppo/pendulum-ppo.yaml | 2 +- .../ppo/pendulum-transformed-actions-ppo.yaml | 2 +- .../tuned_examples/ppo/recomm-sys001-ppo.yaml | 2 +- .../ppo/repeatafterme-ppo-lstm.yaml | 2 +- .../qmix/two-step-game-qmix-no-mixer.yaml | 2 +- .../qmix/two-step-game-qmix-vdn-mixer.yaml | 2 +- .../qmix/two-step-game-qmix.yaml | 2 +- .../stateless-cartpole-r2d2-fake-gpus.yaml | 2 +- .../r2d2/stateless-cartpole-r2d2.yaml | 2 +- .../sac/cartpole-continuous-pybullet-sac.yaml | 2 +- rllib/tuned_examples/sac/cartpole-sac.yaml | 2 +- .../sac/halfcheetah-pybullet-sac.yaml | 2 +- rllib/tuned_examples/sac/halfcheetah-sac.yaml | 2 +- rllib/tuned_examples/sac/mspacman-sac.yaml | 2 +- .../sac/pendulum-sac-fake-gpus.yaml | 2 +- rllib/tuned_examples/sac/pendulum-sac.yaml | 2 +- .../sac/pendulum-transformed-actions-sac.yaml | 2 +- .../simple_q/cartpole-simpleq-fake-gpus.yaml | 2 +- .../simple_q/cartpole-simpleq-test.yaml | 2 +- .../simple_q/cartpole-simpleq.yaml | 2 +- ...andidates-recsim-env-slateq-fake-gpus.yaml | 2 +- ...ution-10-candidates-recsim-env-slateq.yaml | 2 +- ...ution-50-candidates-recsim-env-slateq.yaml | 2 +- ...g-term-satisfaction-recsim-env-slateq.yaml | 2 +- .../parametric-item-reco-env-slateq.yaml | 2 +- .../slateq/recomm-sys001-slateq.yaml | 2 +- .../td3/invertedpendulum-td3.yaml | 2 +- .../td3/pendulum-td3-fake-gpus.yaml | 2 +- rllib/tuned_examples/td3/pendulum-td3.yaml | 2 +- 117 files changed, 178 insertions(+), 148 deletions(-) diff --git a/release/rllib_tests/learning_tests/todo_tests_currently_not_covered.yaml b/release/rllib_tests/learning_tests/todo_tests_currently_not_covered.yaml index eebccd523c5b..b8f75379547b 100644 --- a/release/rllib_tests/learning_tests/todo_tests_currently_not_covered.yaml +++ b/release/rllib_tests/learning_tests/todo_tests_currently_not_covered.yaml @@ -5,7 +5,7 @@ # run: ARS # # Minimum reward and total ts (in given time_total_s) to pass this test. # pass_criteria: -# episode_reward_mean: 100.0 +# sampler_results/episode_reward_mean:: 100.0 # timesteps_total: 2000000 # stop: # time_total_s: 2000 @@ -29,7 +29,7 @@ # run: DDPPO # # Minimum reward and total ts (in given time_total_s) to pass this test. # pass_criteria: -# episode_reward_mean: 50.0 +# sampler_results/episode_reward_mean:: 50.0 # timesteps_total: 10000000 # stop: # time_total_s: 3600 diff --git a/release/rllib_tests/learning_tests/yaml_files/a2c/a2c-breakout-v5.yaml b/release/rllib_tests/learning_tests/yaml_files/a2c/a2c-breakout-v5.yaml index c4e4b0eef19d..be28e4aee400 100644 --- a/release/rllib_tests/learning_tests/yaml_files/a2c/a2c-breakout-v5.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/a2c/a2c-breakout-v5.yaml @@ -3,7 +3,7 @@ a2c-breakoutnoframeskip-v5: run: A2C # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 50.0 + sampler_results/episode_reward_mean:: 50.0 timesteps_total: 5000000 stop: time_total_s: 7200 diff --git a/release/rllib_tests/learning_tests/yaml_files/a3c/a3c-pongdeterministic-v5.yaml b/release/rllib_tests/learning_tests/yaml_files/a3c/a3c-pongdeterministic-v5.yaml index 7c7dbce9916d..9918de78a74f 100644 --- a/release/rllib_tests/learning_tests/yaml_files/a3c/a3c-pongdeterministic-v5.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/a3c/a3c-pongdeterministic-v5.yaml @@ -3,7 +3,7 @@ a3c-pongdeterministic-v5: run: A3C # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 18.0 + sampler_results/episode_reward_mean:: 18.0 timesteps_total: 5000000 stop: time_total_s: 3600 diff --git a/release/rllib_tests/learning_tests/yaml_files/apex/apex-breakoutnoframeskip-v5.yaml b/release/rllib_tests/learning_tests/yaml_files/apex/apex-breakoutnoframeskip-v5.yaml index 50002c3e4053..e277cb364eda 100644 --- a/release/rllib_tests/learning_tests/yaml_files/apex/apex-breakoutnoframeskip-v5.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/apex/apex-breakoutnoframeskip-v5.yaml @@ -3,7 +3,7 @@ apex-breakoutnoframeskip-v5: run: APEX # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 100.0 + sampler_results/episode_reward_mean:: 100.0 timesteps_total: 12000000 stop: time_total_s: 7200 diff --git a/release/rllib_tests/learning_tests/yaml_files/appo/appo-pongnoframeskip-v5.yaml b/release/rllib_tests/learning_tests/yaml_files/appo/appo-pongnoframeskip-v5.yaml index 46e57eb31994..4b25f5e105e9 100644 --- a/release/rllib_tests/learning_tests/yaml_files/appo/appo-pongnoframeskip-v5.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/appo/appo-pongnoframeskip-v5.yaml @@ -3,7 +3,7 @@ appo-pongnoframeskip-v5: run: APPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 18.0 + sampler_results/episode_reward_mean:: 18.0 timesteps_total: 5000000 stop: time_total_s: 1800 diff --git a/release/rllib_tests/learning_tests/yaml_files/bc/bc-halfcheetah-v4.yaml b/release/rllib_tests/learning_tests/yaml_files/bc/bc-halfcheetah-v4.yaml index fa47849ba227..04c9b7bb6f22 100644 --- a/release/rllib_tests/learning_tests/yaml_files/bc/bc-halfcheetah-v4.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/bc/bc-halfcheetah-v4.yaml @@ -2,7 +2,7 @@ bc-halfcheetah-v0: env: HalfCheetah-v4 run: BC pass_criteria: - evaluation/episode_reward_mean: 400.0 + evaluation/sampler_results/episode_reward_mean:: 400.0 timesteps_total: 2500000 stop: time_total_s: 1800 diff --git a/release/rllib_tests/learning_tests/yaml_files/cql/cql-halfcheetah-v4.yaml b/release/rllib_tests/learning_tests/yaml_files/cql/cql-halfcheetah-v4.yaml index 511355980f8b..55ad047ffc73 100644 --- a/release/rllib_tests/learning_tests/yaml_files/cql/cql-halfcheetah-v4.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/cql/cql-halfcheetah-v4.yaml @@ -2,7 +2,7 @@ cql-halfcheetah-v4: env: HalfCheetah-v4 run: CQL pass_criteria: - evaluation/episode_reward_mean: 400.0 + evaluation/sampler_results/episode_reward_mean:: 400.0 # Can not check throughput for offline methods. timesteps_total: 5000000 stop: diff --git a/release/rllib_tests/learning_tests/yaml_files/ddpg/ddpg-hopper-v4.yaml b/release/rllib_tests/learning_tests/yaml_files/ddpg/ddpg-hopper-v4.yaml index ecf51e28f228..c40ce18dd007 100644 --- a/release/rllib_tests/learning_tests/yaml_files/ddpg/ddpg-hopper-v4.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/ddpg/ddpg-hopper-v4.yaml @@ -3,7 +3,7 @@ ddpg-hopper-v4: run: DDPG # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 110.0 + sampler_results/episode_reward_mean:: 110.0 timesteps_total: 50000 stop: time_total_s: 1800 diff --git a/release/rllib_tests/learning_tests/yaml_files/dqn/dqn-breakoutnoframeskip-v5.yaml b/release/rllib_tests/learning_tests/yaml_files/dqn/dqn-breakoutnoframeskip-v5.yaml index 78947d81f530..2662838c8611 100644 --- a/release/rllib_tests/learning_tests/yaml_files/dqn/dqn-breakoutnoframeskip-v5.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/dqn/dqn-breakoutnoframeskip-v5.yaml @@ -3,7 +3,7 @@ dqn-breakoutnoframeskip-v5: run: DQN # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 20.0 + sampler_results/episode_reward_mean:: 20.0 timesteps_total: 400000 stop: time_total_s: 7200 diff --git a/release/rllib_tests/learning_tests/yaml_files/es/es-humanoid-v4.yaml b/release/rllib_tests/learning_tests/yaml_files/es/es-humanoid-v4.yaml index fa89b337f4c3..94262af242c8 100644 --- a/release/rllib_tests/learning_tests/yaml_files/es/es-humanoid-v4.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/es/es-humanoid-v4.yaml @@ -3,7 +3,7 @@ es-humanoid-v4: run: ES # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 100.0 + sampler_results/episode_reward_mean:: 100.0 timesteps_total: 5000000 stop: time_total_s: 3600 diff --git a/release/rllib_tests/learning_tests/yaml_files/impala/impala-breakoutnoframeskip-v5.yaml b/release/rllib_tests/learning_tests/yaml_files/impala/impala-breakoutnoframeskip-v5.yaml index e0d054164cb4..ef9a408d630d 100644 --- a/release/rllib_tests/learning_tests/yaml_files/impala/impala-breakoutnoframeskip-v5.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/impala/impala-breakoutnoframeskip-v5.yaml @@ -3,7 +3,7 @@ impala-breakoutnoframeskip-v5: run: IMPALA # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 200.0 + sampler_results/episode_reward_mean:: 200.0 timesteps_total: 6000000 stop: time_total_s: 2400 diff --git a/release/rllib_tests/learning_tests/yaml_files/marwil/marwil-halfcheetah-v4.yaml b/release/rllib_tests/learning_tests/yaml_files/marwil/marwil-halfcheetah-v4.yaml index 1a8d6b3f42c3..5bfc11256d93 100644 --- a/release/rllib_tests/learning_tests/yaml_files/marwil/marwil-halfcheetah-v4.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/marwil/marwil-halfcheetah-v4.yaml @@ -3,7 +3,7 @@ marwil-halfcheetah-v4: run: MARWIL pass_criteria: # Can not check throughput for offline methods. - evaluation/episode_reward_mean: 400.0 + evaluation/sampler_results/episode_reward_mean:: 400.0 timesteps_total: 2500000 stop: time_total_s: 1800 diff --git a/release/rllib_tests/learning_tests/yaml_files/ppo/tf/ppo-breakoutnoframeskip-v5-tf.yaml b/release/rllib_tests/learning_tests/yaml_files/ppo/tf/ppo-breakoutnoframeskip-v5-tf.yaml index c3d88cc9513d..8bb51cd0ff95 100644 --- a/release/rllib_tests/learning_tests/yaml_files/ppo/tf/ppo-breakoutnoframeskip-v5-tf.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/ppo/tf/ppo-breakoutnoframeskip-v5-tf.yaml @@ -3,7 +3,7 @@ ppo-breakoutnoframeskip-v5: run: PPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 50.0 + sampler_results/episode_reward_mean:: 50.0 timesteps_total: 7000000 stop: time_total_s: 3600 diff --git a/release/rllib_tests/learning_tests/yaml_files/ppo/torch/ppo-breakoutnoframeskip-v5-torch.yaml b/release/rllib_tests/learning_tests/yaml_files/ppo/torch/ppo-breakoutnoframeskip-v5-torch.yaml index 3241d39d37ab..e2b2a43604b8 100644 --- a/release/rllib_tests/learning_tests/yaml_files/ppo/torch/ppo-breakoutnoframeskip-v5-torch.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/ppo/torch/ppo-breakoutnoframeskip-v5-torch.yaml @@ -3,7 +3,7 @@ ppo-breakoutnoframeskip-v5: run: PPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 50.0 + sampler_results/episode_reward_mean:: 50.0 timesteps_total: 7000000 stop: # This is double the time we use for tf because of 2x throughput there. diff --git a/release/rllib_tests/learning_tests/yaml_files/sac/sac-halfcheetah-v4.yaml b/release/rllib_tests/learning_tests/yaml_files/sac/sac-halfcheetah-v4.yaml index 3c78bb8a51d0..dd57dcd79e59 100644 --- a/release/rllib_tests/learning_tests/yaml_files/sac/sac-halfcheetah-v4.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/sac/sac-halfcheetah-v4.yaml @@ -3,7 +3,7 @@ sac-halfcheetah-v4: run: SAC # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 500.0 + sampler_results/episode_reward_mean:: 500.0 timesteps_total: 400000 stop: time_total_s: 3600 diff --git a/release/rllib_tests/learning_tests/yaml_files/slateq/slateq-interest-evolution-recsim-env.yaml b/release/rllib_tests/learning_tests/yaml_files/slateq/slateq-interest-evolution-recsim-env.yaml index 39b5e8827468..9a716345d2e0 100644 --- a/release/rllib_tests/learning_tests/yaml_files/slateq/slateq-interest-evolution-recsim-env.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/slateq/slateq-interest-evolution-recsim-env.yaml @@ -2,7 +2,7 @@ slateq-interest-evolution-recsim-env: env: ray.rllib.examples.env.recommender_system_envs_with_recsim.InterestEvolutionRecSimEnv run: SlateQ pass_criteria: - episode_reward_mean: 160.0 + sampler_results/episode_reward_mean:: 160.0 timesteps_total: 300000 stop: time_total_s: 7200 diff --git a/release/rllib_tests/learning_tests/yaml_files/td3/td3-halfcheetah-v4.yaml b/release/rllib_tests/learning_tests/yaml_files/td3/td3-halfcheetah-v4.yaml index 217b658a9080..a796d28a3ce5 100644 --- a/release/rllib_tests/learning_tests/yaml_files/td3/td3-halfcheetah-v4.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/td3/td3-halfcheetah-v4.yaml @@ -3,7 +3,7 @@ td3-halfcheetah-v4: run: TD3 # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 400.0 + sampler_results/episode_reward_mean:: 400.0 timesteps_total: 1000000 stop: time_total_s: 3600 diff --git a/release/rllib_tests/multi_gpu_learning_tests/multi_gpu_learning_tests.yaml b/release/rllib_tests/multi_gpu_learning_tests/multi_gpu_learning_tests.yaml index aa1f60eb46d6..3c4277f49a98 100644 --- a/release/rllib_tests/multi_gpu_learning_tests/multi_gpu_learning_tests.yaml +++ b/release/rllib_tests/multi_gpu_learning_tests/multi_gpu_learning_tests.yaml @@ -4,7 +4,7 @@ a2c-cartpole-v1: run: A2C # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 150.0 + sampler_results/episode_reward_mean: 150.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -18,7 +18,7 @@ appo-cartpole-v1-no-vtrace: run: APPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 150.0 + sampler_results/episode_reward_mean: 150.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -42,7 +42,7 @@ appo-cartpole-v1-vtrace: run: APPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 150.0 + sampler_results/episode_reward_mean:: 150.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -65,7 +65,7 @@ ddpg-repeat-after-me-env: run: DDPG # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: -50.0 + sampler_results/episode_reward_mean:: -50.0 timesteps_total: 8000 stop: time_total_s: 600 @@ -85,7 +85,7 @@ dqn-cartpole-v1: run: DQN # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 150.0 + sampler_results/episode_reward_mean:: 150.0 timesteps_total: 50000 stop: time_total_s: 600 @@ -105,7 +105,7 @@ impala-cartpole-v1: run: IMPALA # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 150.0 + sampler_results/episode_reward_mean:: 150.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -120,7 +120,7 @@ pg-cartpole-v1: run: PG # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 130.0 + sampler_results/episode_reward_mean:: 130.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -138,7 +138,7 @@ ppo-cartpole-v1: run: PPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 150.0 + sampler_results/episode_reward_mean:: 150.0 timesteps_total: 300000 stop: time_total_s: 600 @@ -161,7 +161,7 @@ sac-repeat-after-me-env: run: SAC # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 40.0 + sampler_results/episode_reward_mean:: 40.0 timesteps_total: 4500 stop: time_total_s: 600 @@ -183,7 +183,7 @@ sac-repeat-after-me-env-continuous: run: SAC # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: -50.0 + sampler_results/episode_reward_mean:: -50.0 timesteps_total: 4500 stop: time_total_s: 600 @@ -208,7 +208,7 @@ simpleq-cartpole-v1: run: SimpleQ # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 150.0 + sampler_results/episode_reward_mean:: 150.0 timesteps_total: 85000 stop: time_total_s: 600 @@ -221,7 +221,7 @@ td3-repeat-after-me-env: run: TD3 # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: -50.0 + sampler_results/episode_reward_mean:: -50.0 timesteps_total: 25000 stop: time_total_s: 600 diff --git a/release/rllib_tests/multi_gpu_with_attention_learning_tests/multi_gpu_with_attention_learning_tests.yaml b/release/rllib_tests/multi_gpu_with_attention_learning_tests/multi_gpu_with_attention_learning_tests.yaml index e4c1393fb414..e1109d535fdc 100644 --- a/release/rllib_tests/multi_gpu_with_attention_learning_tests/multi_gpu_with_attention_learning_tests.yaml +++ b/release/rllib_tests/multi_gpu_with_attention_learning_tests/multi_gpu_with_attention_learning_tests.yaml @@ -4,7 +4,7 @@ appo-stateless-cartpole-no-vtrace: run: APPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 150.0 + sampler_results/episode_reward_mean:: 150.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -38,7 +38,7 @@ appo-stateless-cartpole-vtrace: run: APPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 150.0 + sampler_results/episode_reward_mean:: 150.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -71,7 +71,7 @@ impala-stateless-cartpole: run: IMPALA # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 150.0 + sampler_results/episode_reward_mean:: 150.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -98,7 +98,7 @@ pg-stateless-cartpole: run: PG # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 130.0 + sampler_results/episode_reward_mean:: 130.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -126,7 +126,7 @@ ppo-stateless-cartpole: run: PPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 150.0 + sampler_results/episode_reward_mean:: 150.0 timesteps_total: 200000 stop: time_total_s: 600 @@ -160,7 +160,7 @@ ppo-stateless-cartpole: # run: R2D2 # # Minimum reward and total ts (in given time_total_s) to pass this test. # pass_criteria: -# episode_reward_mean: 150.0 +# sampler_results/episode_reward_mean:: 150.0 # timesteps_total: 130000 # stop: # time_total_s: 1200 diff --git a/release/rllib_tests/multi_gpu_with_lstm_learning_tests/multi_gpu_with_lstm_learning_tests.yaml b/release/rllib_tests/multi_gpu_with_lstm_learning_tests/multi_gpu_with_lstm_learning_tests.yaml index 76142bdcfa9d..dcd692a1ebcf 100644 --- a/release/rllib_tests/multi_gpu_with_lstm_learning_tests/multi_gpu_with_lstm_learning_tests.yaml +++ b/release/rllib_tests/multi_gpu_with_lstm_learning_tests/multi_gpu_with_lstm_learning_tests.yaml @@ -4,7 +4,7 @@ a2c-stateless-cartpole: run: A2C # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 150.0 + sampler_results/episode_reward_mean:: 150.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -24,7 +24,7 @@ appo-stateless-cartpole-no-vtrace: run: APPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 150.0 + sampler_results/episode_reward_mean:: 150.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -50,7 +50,7 @@ appo-stateless-cartpole-vtrace: run: APPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 150.0 + sampler_results/episode_reward_mean:: 150.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -75,7 +75,7 @@ impala-stateless-cartpole: run: IMPALA # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 150.0 + sampler_results/episode_reward_mean:: 150.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -94,7 +94,7 @@ pg-stateless-cartpole: run: PG # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 130.0 + sampler_results/episode_reward_mean:: 130.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -114,7 +114,7 @@ ppo-stateless-cartpole: run: PPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 150.0 + sampler_results/episode_reward_mean:: 150.0 timesteps_total: 200000 stop: time_total_s: 600 @@ -140,7 +140,7 @@ ppo-stateless-cartpole: # run: R2D2 # # Minimum reward and total ts (in given time_total_s) to pass this test. # pass_criteria: -# episode_reward_mean: 150.0 +# sampler_results/episode_reward_mean:: 150.0 # timesteps_total: 65000 # stop: # time_total_s: 800 diff --git a/rllib/algorithms/algorithm.py b/rllib/algorithms/algorithm.py index 252f5a151f77..8385b3638607 100644 --- a/rllib/algorithms/algorithm.py +++ b/rllib/algorithms/algorithm.py @@ -210,8 +210,8 @@ class Algorithm(Trainable): _override_all_key_list = ["off_policy_estimation_methods", "policies"] _progress_metrics = [ - "episode_reward_mean", - "evaluation/episode_reward_mean", + "sampler_results/episode_reward_mean", + "evaluation/sampler_results/episode_reward_mean", "num_env_steps_sampled", "num_env_steps_trained", ] @@ -456,11 +456,17 @@ def default_logger_creator(config): # (although their values may be nan), so that Tune does not complain # when we use these as stopping criteria. self.evaluation_metrics = { + # TODO: Don't dump sampler results into top-level. "evaluation": { "episode_reward_max": np.nan, "episode_reward_min": np.nan, "episode_reward_mean": np.nan, - } + "sampler_results": { + "episode_reward_max": np.nan, + "episode_reward_min": np.nan, + "episode_reward_mean": np.nan, + }, + }, } super().__init__( @@ -1059,6 +1065,11 @@ def duration_fn(num_units_done): keep_custom_metrics=self.config.keep_per_episode_custom_metrics, timeout_seconds=eval_cfg.metrics_episode_collection_timeout_s, ) + + # TODO: Don't dump sampler results into top-level. + if not self.config.custom_evaluation_function: + metrics = dict({"sampler_results": metrics}, **metrics) + metrics[NUM_AGENT_STEPS_SAMPLED_THIS_ITER] = agent_steps_this_iter metrics[NUM_ENV_STEPS_SAMPLED_THIS_ITER] = env_steps_this_iter # TODO: Remove this key at some point. Here for backward compatibility. @@ -1256,11 +1267,14 @@ def remote_fn(worker): f"{unit} done)" ) - metrics = summarize_episodes( + sampler_results = summarize_episodes( rollout_metrics, keep_custom_metrics=eval_cfg["keep_per_episode_custom_metrics"], ) + # TODO: Don't dump sampler results into top-level. + metrics = dict({"sampler_results": sampler_results}, **sampler_results) + metrics[NUM_AGENT_STEPS_SAMPLED_THIS_ITER] = agent_steps_this_iter metrics[NUM_ENV_STEPS_SAMPLED_THIS_ITER] = env_steps_this_iter # TODO: Remove this key at some point. Here for backward compatibility. @@ -3023,22 +3037,32 @@ def _compile_iteration_results( NUM_ENV_STEPS_TRAINED, ]: results[c] = self._counters[c] + time_taken_sec = step_ctx.get_time_taken_sec() if self.config.count_steps_by == "agent_steps": results[NUM_AGENT_STEPS_SAMPLED + "_this_iter"] = step_ctx.sampled results[NUM_AGENT_STEPS_TRAINED + "_this_iter"] = step_ctx.trained + results[NUM_AGENT_STEPS_SAMPLED + "_throughput_per_sec"] = ( + step_ctx.sampled / time_taken_sec + ) + results[NUM_AGENT_STEPS_TRAINED + "_throughput_per_sec"] = ( + step_ctx.trained / time_taken_sec + ) # TODO: For CQL and other algos, count by trained steps. results["timesteps_total"] = self._counters[NUM_AGENT_STEPS_SAMPLED] - # TODO: Backward compatibility. - results[STEPS_TRAINED_THIS_ITER_COUNTER] = step_ctx.trained else: results[NUM_ENV_STEPS_SAMPLED + "_this_iter"] = step_ctx.sampled results[NUM_ENV_STEPS_TRAINED + "_this_iter"] = step_ctx.trained + results[NUM_ENV_STEPS_SAMPLED + "_throughput_per_sec"] = ( + step_ctx.sampled / time_taken_sec + ) + results[NUM_ENV_STEPS_TRAINED + "_throughput_per_sec"] = ( + step_ctx.trained / time_taken_sec + ) # TODO: For CQL and other algos, count by trained steps. results["timesteps_total"] = self._counters[NUM_ENV_STEPS_SAMPLED] - # TODO: Backward compatibility. - results[STEPS_TRAINED_THIS_ITER_COUNTER] = step_ctx.trained # TODO: Backward compatibility. + results[STEPS_TRAINED_THIS_ITER_COUNTER] = step_ctx.trained results["agent_timesteps_total"] = self._counters[NUM_AGENT_STEPS_SAMPLED] # Process timer results. @@ -3102,6 +3126,8 @@ def _validate_config(config, trainer_or_none): class TrainIterCtx: def __init__(self, algo: Algorithm): self.algo = algo + self.time_start = None + self.time_stop = None def __enter__(self): # Before first call to `step()`, `results` is expected to be None -> @@ -3122,7 +3148,11 @@ def __enter__(self): return self def __exit__(self, *args): - pass + self.time_stop = time.time() + + def get_time_taken_sec(self) -> float: + """Returns the time we spent in the context in seconds.""" + return self.time_stop - self.time_start def should_stop(self, results): diff --git a/rllib/tuned_examples/a2c/cartpole-a2c-fake-gpus.yaml b/rllib/tuned_examples/a2c/cartpole-a2c-fake-gpus.yaml index 8eb80d49610f..6c2e1d4964fc 100644 --- a/rllib/tuned_examples/a2c/cartpole-a2c-fake-gpus.yaml +++ b/rllib/tuned_examples/a2c/cartpole-a2c-fake-gpus.yaml @@ -2,7 +2,7 @@ cartpole-a2c-fake-gpus: env: CartPole-v1 run: A2C stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 training_iteration: 200 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/a2c/cartpole-a2c-microbatch.yaml b/rllib/tuned_examples/a2c/cartpole-a2c-microbatch.yaml index c784917ff943..1beeae3619cf 100644 --- a/rllib/tuned_examples/a2c/cartpole-a2c-microbatch.yaml +++ b/rllib/tuned_examples/a2c/cartpole-a2c-microbatch.yaml @@ -2,7 +2,7 @@ cartpole-a2c-microbatch: env: CartPole-v1 run: A2C stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 timesteps_total: 1000000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/a2c/cartpole-a2c.yaml b/rllib/tuned_examples/a2c/cartpole-a2c.yaml index 73a01bf0b51e..c5b67de577bc 100644 --- a/rllib/tuned_examples/a2c/cartpole-a2c.yaml +++ b/rllib/tuned_examples/a2c/cartpole-a2c.yaml @@ -2,7 +2,7 @@ cartpole-a2c: env: CartPole-v1 run: A2C stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 timesteps_total: 500000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/a3c/cartpole-a3c.yaml b/rllib/tuned_examples/a3c/cartpole-a3c.yaml index 6835453d36d6..351cf07ac74b 100644 --- a/rllib/tuned_examples/a3c/cartpole-a3c.yaml +++ b/rllib/tuned_examples/a3c/cartpole-a3c.yaml @@ -2,7 +2,7 @@ cartpole-a3c: env: CartPole-v1 run: A3C stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 timesteps_total: 200000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/alpha_star/multi-agent-cartpole-alpha-star.yaml b/rllib/tuned_examples/alpha_star/multi-agent-cartpole-alpha-star.yaml index 2b46df8ea8d6..6a5db5c0d3a4 100644 --- a/rllib/tuned_examples/alpha_star/multi-agent-cartpole-alpha-star.yaml +++ b/rllib/tuned_examples/alpha_star/multi-agent-cartpole-alpha-star.yaml @@ -2,7 +2,7 @@ multi-agent-cartpole-alpha-star: env: ray.rllib.examples.env.multi_agent.MultiAgentCartPole run: AlphaStar stop: - episode_reward_mean: 600 # 600 / 4 (==num_agents) = 150 + sampler_results/episode_reward_mean: 600 # 600 / 4 (==num_agents) = 150 timesteps_total: 200000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/alpha_zero/cartpole-sparse-rewards-alpha-zero.yaml b/rllib/tuned_examples/alpha_zero/cartpole-sparse-rewards-alpha-zero.yaml index e1d8768f6dbf..a18e49c669cb 100644 --- a/rllib/tuned_examples/alpha_zero/cartpole-sparse-rewards-alpha-zero.yaml +++ b/rllib/tuned_examples/alpha_zero/cartpole-sparse-rewards-alpha-zero.yaml @@ -2,7 +2,7 @@ cartpole-sparse-rewards-alpha-zero: env: ray.rllib.examples.env.cartpole_sparse_rewards.CartPoleSparseRewards run: AlphaZero stop: - episode_reward_mean: 30.0 + sampler_results/episode_reward_mean: 30.0 timesteps_total: 100000 config: # Only supported for torch right now. diff --git a/rllib/tuned_examples/apex_ddpg/mountaincarcontinuous-apex-ddpg.yaml b/rllib/tuned_examples/apex_ddpg/mountaincarcontinuous-apex-ddpg.yaml index aa8bad0fea9c..a0fc3ba2fe24 100644 --- a/rllib/tuned_examples/apex_ddpg/mountaincarcontinuous-apex-ddpg.yaml +++ b/rllib/tuned_examples/apex_ddpg/mountaincarcontinuous-apex-ddpg.yaml @@ -3,7 +3,7 @@ mountaincarcontinuous-apex-ddpg: env: MountainCarContinuous-v0 run: APEX_DDPG stop: - episode_reward_mean: 90 + sampler_results/episode_reward_mean: 90 config: # Works for both torch and tf. framework: torch diff --git a/rllib/tuned_examples/apex_ddpg/pendulum-apex-ddpg.yaml b/rllib/tuned_examples/apex_ddpg/pendulum-apex-ddpg.yaml index bf7d2a83c809..d6682897de59 100644 --- a/rllib/tuned_examples/apex_ddpg/pendulum-apex-ddpg.yaml +++ b/rllib/tuned_examples/apex_ddpg/pendulum-apex-ddpg.yaml @@ -3,7 +3,7 @@ pendulum-apex-ddpg: env: Pendulum-v1 run: APEX_DDPG stop: - episode_reward_mean: -160 + sampler_results/episode_reward_mean: -160 config: # Works for both torch and tf. framework: torch diff --git a/rllib/tuned_examples/apex_dqn/atari-apex-dqn.yaml b/rllib/tuned_examples/apex_dqn/atari-apex-dqn.yaml index d1f241d01c07..d3ed5f6d3071 100644 --- a/rllib/tuned_examples/apex_dqn/atari-apex-dqn.yaml +++ b/rllib/tuned_examples/apex_dqn/atari-apex-dqn.yaml @@ -4,7 +4,7 @@ apex-breakoutnoframeskip-v5: # Minimum reward and total ts (in given time_total_s) to pass this test. stop: time_total_s: 7200 - episode_reward_mean: 20.0 + sampler_results/episode_reward_mean: 20.0 timesteps_total: 7000000 config: # Make analogous to old v4 + NoFrameskip. diff --git a/rllib/tuned_examples/apex_dqn/cartpole-apex-dqn-fake-gpus.yaml b/rllib/tuned_examples/apex_dqn/cartpole-apex-dqn-fake-gpus.yaml index 2c67e2b6814b..64f2cf6e78b3 100644 --- a/rllib/tuned_examples/apex_dqn/cartpole-apex-dqn-fake-gpus.yaml +++ b/rllib/tuned_examples/apex_dqn/cartpole-apex-dqn-fake-gpus.yaml @@ -9,7 +9,7 @@ cartpole-apex-dqn: env: CartPole-v1 run: APEX stop: - episode_reward_mean: 150.0 + sampler_results/episode_reward_mean: 150.0 timesteps_total: 250000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/apex_dqn/cartpole-apex-dqn.yaml b/rllib/tuned_examples/apex_dqn/cartpole-apex-dqn.yaml index 45e2bfbb986a..4ee7c2406326 100644 --- a/rllib/tuned_examples/apex_dqn/cartpole-apex-dqn.yaml +++ b/rllib/tuned_examples/apex_dqn/cartpole-apex-dqn.yaml @@ -9,7 +9,7 @@ cartpole-apex-dqn-training-itr: env: CartPole-v1 run: APEX stop: - episode_reward_mean: 150.0 + sampler_results/episode_reward_mean: 150.0 timesteps_total: 250000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/apex_dqn/pong-apex-dqn.yaml b/rllib/tuned_examples/apex_dqn/pong-apex-dqn.yaml index a3d1d978bfc0..4516d520230b 100644 --- a/rllib/tuned_examples/apex_dqn/pong-apex-dqn.yaml +++ b/rllib/tuned_examples/apex_dqn/pong-apex-dqn.yaml @@ -5,7 +5,7 @@ pong-apex: env: ALE/Pong-v5 run: APEX stop: - episode_reward_mean: 19.0 + sampler_results/episode_reward_mean: 19.0 timesteps_total: 4000000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/appo/cartpole-appo-vtrace-fake-gpus.yaml b/rllib/tuned_examples/appo/cartpole-appo-vtrace-fake-gpus.yaml index bf6bbed83db8..fe69576c232f 100644 --- a/rllib/tuned_examples/appo/cartpole-appo-vtrace-fake-gpus.yaml +++ b/rllib/tuned_examples/appo/cartpole-appo-vtrace-fake-gpus.yaml @@ -2,7 +2,7 @@ cartpole-appo-vtrace-fake-gpus: env: CartPole-v1 run: APPO stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 training_iteration: 400 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/appo/cartpole-appo-vtrace-separate-losses.yaml b/rllib/tuned_examples/appo/cartpole-appo-vtrace-separate-losses.yaml index 8f55d7ed9f6a..970c36f1de17 100644 --- a/rllib/tuned_examples/appo/cartpole-appo-vtrace-separate-losses.yaml +++ b/rllib/tuned_examples/appo/cartpole-appo-vtrace-separate-losses.yaml @@ -2,7 +2,7 @@ cartpole-appo-vtrace-separate-losses: env: CartPole-v1 run: APPO stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 timesteps_total: 200000 config: # Only works for tf|tf2 so far. diff --git a/rllib/tuned_examples/appo/cartpole-appo-vtrace.yaml b/rllib/tuned_examples/appo/cartpole-appo-vtrace.yaml index f5cda41a6bef..c7a36a550b8b 100644 --- a/rllib/tuned_examples/appo/cartpole-appo-vtrace.yaml +++ b/rllib/tuned_examples/appo/cartpole-appo-vtrace.yaml @@ -2,7 +2,7 @@ cartpole-appo-vtrace: env: CartPole-v1 run: APPO stop: - episode_reward_mean: 180 + sampler_results/episode_reward_mean: 180 timesteps_total: 200000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/appo/cartpole-appo-w-rl-modules-and-learner.yaml b/rllib/tuned_examples/appo/cartpole-appo-w-rl-modules-and-learner.yaml index dcbc7f7c1259..34ec96700541 100644 --- a/rllib/tuned_examples/appo/cartpole-appo-w-rl-modules-and-learner.yaml +++ b/rllib/tuned_examples/appo/cartpole-appo-w-rl-modules-and-learner.yaml @@ -2,7 +2,7 @@ cartpole-appo-w-rl-modules-and-learner: env: CartPole-v1 run: APPO stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 timesteps_total: 200000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/appo/cartpole-appo.yaml b/rllib/tuned_examples/appo/cartpole-appo.yaml index 8010b58a774e..7ad2cc89be11 100644 --- a/rllib/tuned_examples/appo/cartpole-appo.yaml +++ b/rllib/tuned_examples/appo/cartpole-appo.yaml @@ -2,7 +2,7 @@ cartpole-appo: env: CartPole-v1 run: APPO stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 timesteps_total: 200000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/appo/frozenlake-appo-vtrace.yaml b/rllib/tuned_examples/appo/frozenlake-appo-vtrace.yaml index 2214fac64b9a..9e51375b1151 100644 --- a/rllib/tuned_examples/appo/frozenlake-appo-vtrace.yaml +++ b/rllib/tuned_examples/appo/frozenlake-appo-vtrace.yaml @@ -2,7 +2,7 @@ frozenlake-appo-vtrace: env: FrozenLake-v1 run: APPO stop: - episode_reward_mean: 0.99 + sampler_results/episode_reward_mean: 0.99 timesteps_total: 1000000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/appo/multi-agent-cartpole-appo.yaml b/rllib/tuned_examples/appo/multi-agent-cartpole-appo.yaml index 648cf579b71e..dfee4a821091 100644 --- a/rllib/tuned_examples/appo/multi-agent-cartpole-appo.yaml +++ b/rllib/tuned_examples/appo/multi-agent-cartpole-appo.yaml @@ -2,7 +2,7 @@ multi-agent-cartpole-appo: env: ray.rllib.examples.env.multi_agent.MultiAgentCartPole run: APPO stop: - episode_reward_mean: 600 # 600 / 4 (==num_agents) = 150 + sampler_results/episode_reward_mean: 600 # 600 / 4 (==num_agents) = 150 timesteps_total: 200000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/appo/multi-agent-cartpole-crashing-restart-env-appo.yaml b/rllib/tuned_examples/appo/multi-agent-cartpole-crashing-restart-env-appo.yaml index 1c7e9eed5793..34132fc02c92 100644 --- a/rllib/tuned_examples/appo/multi-agent-cartpole-crashing-restart-env-appo.yaml +++ b/rllib/tuned_examples/appo/multi-agent-cartpole-crashing-restart-env-appo.yaml @@ -2,7 +2,7 @@ multi-agent-cartpole-crashing-appo: env: ray.rllib.examples.env.cartpole_crashing.MultiAgentCartPoleCrashing run: APPO stop: - evaluation/episode_reward_mean: 300.0 + evaluation/sampler_results/episode_reward_mean: 300.0 config: # Works for both torch and tf. framework: torch diff --git a/rllib/tuned_examples/appo/pendulum-appo.yaml b/rllib/tuned_examples/appo/pendulum-appo.yaml index cd35451574da..dddf26c364a3 100644 --- a/rllib/tuned_examples/appo/pendulum-appo.yaml +++ b/rllib/tuned_examples/appo/pendulum-appo.yaml @@ -2,7 +2,7 @@ pendulum-appo-vtrace: env: Pendulum-v1 run: APPO stop: - episode_reward_mean: -1000 # just check it learns a bit + sampler_results/episode_reward_mean: -1000 # just check it learns a bit timesteps_total: 500000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/appo/pong-appo.yaml b/rllib/tuned_examples/appo/pong-appo.yaml index 8c4d69f96024..8614de9c3cfc 100644 --- a/rllib/tuned_examples/appo/pong-appo.yaml +++ b/rllib/tuned_examples/appo/pong-appo.yaml @@ -7,7 +7,7 @@ pong-appo: env: ALE/Pong-v5 run: APPO stop: - episode_reward_mean: 18.0 + sampler_results/episode_reward_mean: 18.0 timesteps_total: 5000000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/bandits/interest-evolution-recsim-env-bandit-linucb.yaml b/rllib/tuned_examples/bandits/interest-evolution-recsim-env-bandit-linucb.yaml index 4157d4e38ffd..5984b9b471df 100644 --- a/rllib/tuned_examples/bandits/interest-evolution-recsim-env-bandit-linucb.yaml +++ b/rllib/tuned_examples/bandits/interest-evolution-recsim-env-bandit-linucb.yaml @@ -2,7 +2,7 @@ interest-evolution-recsim-env-bandit-linucb: env: ray.rllib.examples.env.recommender_system_envs_with_recsim.InterestEvolutionRecSimEnv run: BanditLinUCB stop: - episode_reward_mean: 180.0 + sampler_results/episode_reward_mean: 180.0 timesteps_total: 50000 config: framework: torch diff --git a/rllib/tuned_examples/cql/pendulum-cql.yaml b/rllib/tuned_examples/cql/pendulum-cql.yaml index 7cba06eac9c3..5ba42e4b7a8c 100644 --- a/rllib/tuned_examples/cql/pendulum-cql.yaml +++ b/rllib/tuned_examples/cql/pendulum-cql.yaml @@ -6,7 +6,7 @@ pendulum-cql: env: Pendulum-v1 run: CQL stop: - evaluation/episode_reward_mean: -700 + evaluation/sampler_results/episode_reward_mean: -700 timesteps_total: 800000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/crr/cartpole-v1-crr.yaml b/rllib/tuned_examples/crr/cartpole-v1-crr.yaml index 2501ecd933f7..744c71730f8f 100644 --- a/rllib/tuned_examples/crr/cartpole-v1-crr.yaml +++ b/rllib/tuned_examples/crr/cartpole-v1-crr.yaml @@ -2,7 +2,7 @@ cartpole_crr: env: 'CartPole-v1' run: CRR stop: - evaluation/episode_reward_mean: 200 + evaluation/sampler_results/episode_reward_mean: 200 training_iteration: 100 config: input: 'dataset' diff --git a/rllib/tuned_examples/crr/cartpole-v1-crr_expectation.yaml b/rllib/tuned_examples/crr/cartpole-v1-crr_expectation.yaml index cd752aaef7f2..95863298d9f3 100644 --- a/rllib/tuned_examples/crr/cartpole-v1-crr_expectation.yaml +++ b/rllib/tuned_examples/crr/cartpole-v1-crr_expectation.yaml @@ -2,7 +2,7 @@ cartpole_crr: env: 'CartPole-v1' run: CRR stop: - evaluation/episode_reward_mean: 200 + evaluation/sampler_results/episode_reward_mean: 200 training_iteration: 100 config: input: 'dataset' diff --git a/rllib/tuned_examples/crr/pendulum-v1-crr.yaml b/rllib/tuned_examples/crr/pendulum-v1-crr.yaml index 539b3ee886b4..89373bb009cc 100644 --- a/rllib/tuned_examples/crr/pendulum-v1-crr.yaml +++ b/rllib/tuned_examples/crr/pendulum-v1-crr.yaml @@ -3,7 +3,7 @@ pendulum_crr: run: CRR stop: # We could make this -200, but given that we have 4 cpus for our tests, we will have to settle for -300. - evaluation/episode_reward_mean: -300 + evaluation/sampler_results/episode_reward_mean: -300 timesteps_total: 2000000 config: input: 'dataset' diff --git a/rllib/tuned_examples/ddpg/halfcheetah-ddpg.yaml b/rllib/tuned_examples/ddpg/halfcheetah-ddpg.yaml index 51b70ef776b3..878fe239ec2f 100644 --- a/rllib/tuned_examples/ddpg/halfcheetah-ddpg.yaml +++ b/rllib/tuned_examples/ddpg/halfcheetah-ddpg.yaml @@ -3,7 +3,7 @@ halfcheetah-ddpg: env: HalfCheetah-v2 run: DDPG stop: - episode_reward_mean: 2000 + sampler_results/episode_reward_mean: 2000 time_total_s: 5400 # 90 minutes config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/ddpg/halfcheetah-pybullet-ddpg.yaml b/rllib/tuned_examples/ddpg/halfcheetah-pybullet-ddpg.yaml index ef8c9995d73b..fa8c278228e7 100644 --- a/rllib/tuned_examples/ddpg/halfcheetah-pybullet-ddpg.yaml +++ b/rllib/tuned_examples/ddpg/halfcheetah-pybullet-ddpg.yaml @@ -3,7 +3,7 @@ ddpg-halfcheetahbulletenv-v0: env: HalfCheetahBulletEnv-v0 run: DDPG stop: - episode_reward_mean: -300.0 + sampler_results/episode_reward_mean: -300.0 timesteps_total: 200000 config: actor_hiddens: [256, 256] diff --git a/rllib/tuned_examples/ddpg/hopper-pybullet-ddpg.yaml b/rllib/tuned_examples/ddpg/hopper-pybullet-ddpg.yaml index c436cf5f5e8d..806641eb430e 100644 --- a/rllib/tuned_examples/ddpg/hopper-pybullet-ddpg.yaml +++ b/rllib/tuned_examples/ddpg/hopper-pybullet-ddpg.yaml @@ -4,7 +4,7 @@ ddpg-hopperbulletenv-v0: run: DDPG # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - episode_reward_mean: 120.0 + sampler_results/episode_reward_mean: 120.0 timesteps_total: 50000 stop: time_total_s: 2000 diff --git a/rllib/tuned_examples/ddpg/mountaincarcontinuous-ddpg.yaml b/rllib/tuned_examples/ddpg/mountaincarcontinuous-ddpg.yaml index 6d260436c0b9..0b5ed7f90995 100644 --- a/rllib/tuned_examples/ddpg/mountaincarcontinuous-ddpg.yaml +++ b/rllib/tuned_examples/ddpg/mountaincarcontinuous-ddpg.yaml @@ -3,7 +3,7 @@ mountaincarcontinuous-ddpg: env: MountainCarContinuous-v0 run: DDPG stop: - episode_reward_mean: 90 + sampler_results/episode_reward_mean: 90 time_total_s: 600 # 10 minutes config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/ddpg/pendulum-ddpg-fake-gpus.yaml b/rllib/tuned_examples/ddpg/pendulum-ddpg-fake-gpus.yaml index fa9780dfff89..325795577b05 100644 --- a/rllib/tuned_examples/ddpg/pendulum-ddpg-fake-gpus.yaml +++ b/rllib/tuned_examples/ddpg/pendulum-ddpg-fake-gpus.yaml @@ -2,7 +2,7 @@ pendulum-ddpg-fake-gpus: env: Pendulum-v1 run: DDPG stop: - episode_reward_mean: -1000 + sampler_results/episode_reward_mean: -1000 timesteps_total: 40000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/ddpg/pendulum-ddpg.yaml b/rllib/tuned_examples/ddpg/pendulum-ddpg.yaml index a4db86b9d235..de3239bc306b 100644 --- a/rllib/tuned_examples/ddpg/pendulum-ddpg.yaml +++ b/rllib/tuned_examples/ddpg/pendulum-ddpg.yaml @@ -3,7 +3,7 @@ pendulum-ddpg: env: Pendulum-v1 run: DDPG stop: - episode_reward_mean: -320 + sampler_results/episode_reward_mean: -320 timesteps_total: 30000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/ddppo/cartpole-ddppo.yaml b/rllib/tuned_examples/ddppo/cartpole-ddppo.yaml index 24a072c33954..3332b6c29ce1 100644 --- a/rllib/tuned_examples/ddppo/cartpole-ddppo.yaml +++ b/rllib/tuned_examples/ddppo/cartpole-ddppo.yaml @@ -2,7 +2,7 @@ cartpole-ddppo: env: CartPole-v1 run: DDPPO stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 timesteps_total: 100000 config: framework: torch diff --git a/rllib/tuned_examples/ddppo/pendulum-ddppo.yaml b/rllib/tuned_examples/ddppo/pendulum-ddppo.yaml index c89e5574274a..32aa9e171087 100644 --- a/rllib/tuned_examples/ddppo/pendulum-ddppo.yaml +++ b/rllib/tuned_examples/ddppo/pendulum-ddppo.yaml @@ -2,7 +2,7 @@ pendulum-ddppo: env: Pendulum-v1 run: DDPPO stop: - episode_reward_mean: -300 + sampler_results/episode_reward_mean: -300 timesteps_total: 1500000 config: framework: torch diff --git a/rllib/tuned_examples/dqn/cartpole-dqn-fake-gpus.yaml b/rllib/tuned_examples/dqn/cartpole-dqn-fake-gpus.yaml index f1970f915b7f..410826fd3227 100644 --- a/rllib/tuned_examples/dqn/cartpole-dqn-fake-gpus.yaml +++ b/rllib/tuned_examples/dqn/cartpole-dqn-fake-gpus.yaml @@ -2,7 +2,7 @@ cartpole-dqn-fake-gpus: env: CartPole-v1 run: DQN stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 training_iteration: 400 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/dqn/cartpole-dqn-param-noise.yaml b/rllib/tuned_examples/dqn/cartpole-dqn-param-noise.yaml index 968c810a4159..7da16136a00d 100644 --- a/rllib/tuned_examples/dqn/cartpole-dqn-param-noise.yaml +++ b/rllib/tuned_examples/dqn/cartpole-dqn-param-noise.yaml @@ -2,7 +2,7 @@ cartpole-dqn-w-param-noise: env: CartPole-v1 run: DQN stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 timesteps_total: 300000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/dqn/cartpole-dqn-softq.yaml b/rllib/tuned_examples/dqn/cartpole-dqn-softq.yaml index 6f24ca1e55f2..112838425c4b 100644 --- a/rllib/tuned_examples/dqn/cartpole-dqn-softq.yaml +++ b/rllib/tuned_examples/dqn/cartpole-dqn-softq.yaml @@ -2,7 +2,7 @@ cartpole-dqn: env: CartPole-v1 run: DQN stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 timesteps_total: 100000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/dqn/cartpole-dqn.yaml b/rllib/tuned_examples/dqn/cartpole-dqn.yaml index a699ea00a849..7ff4ad4a950e 100644 --- a/rllib/tuned_examples/dqn/cartpole-dqn.yaml +++ b/rllib/tuned_examples/dqn/cartpole-dqn.yaml @@ -2,7 +2,7 @@ cartpole-dqn: env: CartPole-v1 run: DQN stop: - episode_reward_mean: 100 + sampler_results/episode_reward_mean: 100 timesteps_total: 100000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/dqn/pong-dqn.yaml b/rllib/tuned_examples/dqn/pong-dqn.yaml index f39e04306e8d..7ac6a3b59fa2 100644 --- a/rllib/tuned_examples/dqn/pong-dqn.yaml +++ b/rllib/tuned_examples/dqn/pong-dqn.yaml @@ -3,7 +3,7 @@ pong-deterministic-dqn: env: ALE/Pong-v5 run: DQN stop: - episode_reward_mean: 20 + sampler_results/episode_reward_mean: 20 time_total_s: 7200 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/dqn/pong-rainbow.yaml b/rllib/tuned_examples/dqn/pong-rainbow.yaml index 99198ba2eb8b..60702c062ec1 100644 --- a/rllib/tuned_examples/dqn/pong-rainbow.yaml +++ b/rllib/tuned_examples/dqn/pong-rainbow.yaml @@ -2,7 +2,7 @@ pong-deterministic-rainbow: env: ALE/Pong-v5 run: DQN stop: - episode_reward_mean: 20 + sampler_results/episode_reward_mean: 20 config: # Make analogous to old v4 + NoFrameskip. env_config: diff --git a/rllib/tuned_examples/dt/cartpole-v1-dt.yaml b/rllib/tuned_examples/dt/cartpole-v1-dt.yaml index 845bd6d91282..a4104487d134 100644 --- a/rllib/tuned_examples/dt/cartpole-v1-dt.yaml +++ b/rllib/tuned_examples/dt/cartpole-v1-dt.yaml @@ -2,7 +2,7 @@ cartpole_dt: env: 'CartPole-v1' run: DT stop: - evaluation/episode_reward_mean: 200 + evaluation/sampler_results/episode_reward_mean: 200 training_iteration: 100 config: input: 'dataset' diff --git a/rllib/tuned_examples/dt/pendulum-v1-dt.yaml b/rllib/tuned_examples/dt/pendulum-v1-dt.yaml index 9d67afd6b06e..d5d98f6fe4bf 100644 --- a/rllib/tuned_examples/dt/pendulum-v1-dt.yaml +++ b/rllib/tuned_examples/dt/pendulum-v1-dt.yaml @@ -3,7 +3,7 @@ pendulum_dt: run: DT stop: # We could make this higher, but given that we have 4 cpus for our tests, we will have to settle for -300. - evaluation/episode_reward_mean: -300 + evaluation/sampler_results/episode_reward_mean: -300 timesteps_total: 20000000 config: input: 'dataset' diff --git a/rllib/tuned_examples/dt/pendulum-v1-medium-expert-dt.yaml b/rllib/tuned_examples/dt/pendulum-v1-medium-expert-dt.yaml index 30b027f9c2a5..aaa7156bf363 100644 --- a/rllib/tuned_examples/dt/pendulum-v1-medium-expert-dt.yaml +++ b/rllib/tuned_examples/dt/pendulum-v1-medium-expert-dt.yaml @@ -3,7 +3,7 @@ pendulum_medium_expert_dt: run: DT stop: # We could make this higher, but given that we have 4 cpus for our tests, we will have to settle for -350. - evaluation/episode_reward_mean: -350 + evaluation/sampler_results/episode_reward_mean: -350 timesteps_total: 20000000 config: input: 'dataset' diff --git a/rllib/tuned_examples/es/cartpole-es.yaml b/rllib/tuned_examples/es/cartpole-es.yaml index 64b68a50c1ba..a87c26d3ffe8 100644 --- a/rllib/tuned_examples/es/cartpole-es.yaml +++ b/rllib/tuned_examples/es/cartpole-es.yaml @@ -2,7 +2,7 @@ cartpole-es: env: CartPole-v1 run: ES stop: - episode_reward_mean: 100 + sampler_results/episode_reward_mean: 100 timesteps_total: 500000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/es/humanoid-es.yaml b/rllib/tuned_examples/es/humanoid-es.yaml index 90aa8ef761fe..00e9a2ba068c 100644 --- a/rllib/tuned_examples/es/humanoid-es.yaml +++ b/rllib/tuned_examples/es/humanoid-es.yaml @@ -2,7 +2,7 @@ humanoid-v2-es: env: Humanoid-v2 run: ES stop: - episode_reward_mean: 6000 + sampler_results/episode_reward_mean: 6000 config: # Works for both torch and tf. framework: torch diff --git a/rllib/tuned_examples/impala/cartpole-impala-fake-gpus.yaml b/rllib/tuned_examples/impala/cartpole-impala-fake-gpus.yaml index 9d871487bdff..4dbb8794612c 100644 --- a/rllib/tuned_examples/impala/cartpole-impala-fake-gpus.yaml +++ b/rllib/tuned_examples/impala/cartpole-impala-fake-gpus.yaml @@ -2,7 +2,7 @@ cartpole-impala-fake-gpus: env: CartPole-v1 run: IMPALA stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 training_iteration: 400 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/impala/cartpole-impala.yaml b/rllib/tuned_examples/impala/cartpole-impala.yaml index 02ef13fed725..63f3d7b322da 100644 --- a/rllib/tuned_examples/impala/cartpole-impala.yaml +++ b/rllib/tuned_examples/impala/cartpole-impala.yaml @@ -2,7 +2,7 @@ cartpole-impala: env: CartPole-v1 run: IMPALA stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 timesteps_total: 500000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/impala/multi-agent-cartpole-impala.yaml b/rllib/tuned_examples/impala/multi-agent-cartpole-impala.yaml index 7f2232615c5d..e47be62be3f4 100644 --- a/rllib/tuned_examples/impala/multi-agent-cartpole-impala.yaml +++ b/rllib/tuned_examples/impala/multi-agent-cartpole-impala.yaml @@ -2,7 +2,7 @@ multi-agent-cartpole-impala: env: ray.rllib.examples.env.multi_agent.MultiAgentCartPole run: IMPALA stop: - episode_reward_mean: 600 # 600 / 4 (==num_agents) = 150 + sampler_results/episode_reward_mean: 600 # 600 / 4 (==num_agents) = 150 timesteps_total: 200000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/impala/pendulum-impala.yaml b/rllib/tuned_examples/impala/pendulum-impala.yaml index fccb247e3127..4ce4ba03a826 100644 --- a/rllib/tuned_examples/impala/pendulum-impala.yaml +++ b/rllib/tuned_examples/impala/pendulum-impala.yaml @@ -2,5 +2,5 @@ pendulum-impala-tf: env: Pendulum-v1 run: IMPALA stop: - episode_reward_mean: -700 + sampler_results/episode_reward_mean: -700 timesteps_total: 500000 diff --git a/rllib/tuned_examples/maddpg/two-step-game-maddpg.yaml b/rllib/tuned_examples/maddpg/two-step-game-maddpg.yaml index 6b50cf6aaa0e..69ed92ba2bb9 100644 --- a/rllib/tuned_examples/maddpg/two-step-game-maddpg.yaml +++ b/rllib/tuned_examples/maddpg/two-step-game-maddpg.yaml @@ -2,7 +2,7 @@ two-step-game-maddpg: env: ray.rllib.examples.env.two_step_game.TwoStepGame run: MADDPG stop: - episode_reward_mean: 7.2 + sampler_results/episode_reward_mean: 7.2 timesteps_total: 20000 config: # MADDPG only supports tf for now. diff --git a/rllib/tuned_examples/mbmpo/cartpole-mbmpo.yaml b/rllib/tuned_examples/mbmpo/cartpole-mbmpo.yaml index bf2a9676225d..2d37a5a2b818 100644 --- a/rllib/tuned_examples/mbmpo/cartpole-mbmpo.yaml +++ b/rllib/tuned_examples/mbmpo/cartpole-mbmpo.yaml @@ -2,7 +2,7 @@ cartpole-mbmpo: env: ray.rllib.examples.env.mbmpo_env.CartPoleWrapper run: MBMPO stop: - episode_reward_mean: 190 + sampler_results/episode_reward_mean: 190 training_iteration: 20 config: # Only supported in torch right now. diff --git a/rllib/tuned_examples/mbmpo/pendulum-mbmpo.yaml b/rllib/tuned_examples/mbmpo/pendulum-mbmpo.yaml index 67b0b263c519..2ae964ab1e03 100644 --- a/rllib/tuned_examples/mbmpo/pendulum-mbmpo.yaml +++ b/rllib/tuned_examples/mbmpo/pendulum-mbmpo.yaml @@ -2,7 +2,7 @@ pendulum-mbmpo: env: ray.rllib.examples.env.mbmpo_env.PendulumWrapper run: MBMPO stop: - episode_reward_mean: -500 + sampler_results/episode_reward_mean: -500 training_iteration: 50 config: # Only supported in torch right now. diff --git a/rllib/tuned_examples/pg/cartpole-crashing-pg.yaml b/rllib/tuned_examples/pg/cartpole-crashing-pg.yaml index 39ce012f1ae6..babfdc4368f7 100644 --- a/rllib/tuned_examples/pg/cartpole-crashing-pg.yaml +++ b/rllib/tuned_examples/pg/cartpole-crashing-pg.yaml @@ -2,7 +2,7 @@ cartpole-crashing-pg: env: ray.rllib.examples.env.cartpole_crashing.CartPoleCrashing run: PG stop: - evaluation/episode_reward_mean: 150.0 + evaluation/sampler_results/episode_reward_mean: 150.0 num_env_steps_sampled: 150000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/pg/cartpole-crashing-with-remote-envs-pg.yaml b/rllib/tuned_examples/pg/cartpole-crashing-with-remote-envs-pg.yaml index 09e8b8c6e3fd..14bd428787bb 100644 --- a/rllib/tuned_examples/pg/cartpole-crashing-with-remote-envs-pg.yaml +++ b/rllib/tuned_examples/pg/cartpole-crashing-with-remote-envs-pg.yaml @@ -2,7 +2,7 @@ cartpole-crashing-with-remote-envs-pg: env: ray.rllib.examples.env.cartpole_crashing.CartPoleCrashing run: PG stop: - evaluation/episode_reward_mean: 35.0 + evaluation/sampler_results/episode_reward_mean: 35.0 timesteps_total: 25000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/pg/cartpole-pg-fake-gpus.yaml b/rllib/tuned_examples/pg/cartpole-pg-fake-gpus.yaml index 774e646d6831..99472f649f6a 100644 --- a/rllib/tuned_examples/pg/cartpole-pg-fake-gpus.yaml +++ b/rllib/tuned_examples/pg/cartpole-pg-fake-gpus.yaml @@ -2,7 +2,7 @@ cartpole-pg-fake-gpus: env: CartPole-v1 run: PG stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 training_iteration: 600 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/pg/cartpole-pg.yaml b/rllib/tuned_examples/pg/cartpole-pg.yaml index b288f9c2143c..e17ec213ceeb 100644 --- a/rllib/tuned_examples/pg/cartpole-pg.yaml +++ b/rllib/tuned_examples/pg/cartpole-pg.yaml @@ -2,7 +2,7 @@ cartpole-pg: env: CartPole-v1 run: PG stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 timesteps_total: 100000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/pg/multi-agent-cartpole-crashing-restart-sub-envs-pg.yaml b/rllib/tuned_examples/pg/multi-agent-cartpole-crashing-restart-sub-envs-pg.yaml index b24578b1a4d7..43c322637d91 100644 --- a/rllib/tuned_examples/pg/multi-agent-cartpole-crashing-restart-sub-envs-pg.yaml +++ b/rllib/tuned_examples/pg/multi-agent-cartpole-crashing-restart-sub-envs-pg.yaml @@ -2,7 +2,7 @@ multi-agent-cartpole-crashing-pg: env: ray.rllib.examples.env.cartpole_crashing.MultiAgentCartPoleCrashing run: PG stop: - evaluation/episode_reward_mean: 300.0 + evaluation/sampler_results/episode_reward_mean: 300.0 num_env_steps_sampled: 300000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/pg/multi-agent-cartpole-crashing-with-remote-envs-pg.yaml b/rllib/tuned_examples/pg/multi-agent-cartpole-crashing-with-remote-envs-pg.yaml index 39009d9f4fc5..492c08f6e9bb 100644 --- a/rllib/tuned_examples/pg/multi-agent-cartpole-crashing-with-remote-envs-pg.yaml +++ b/rllib/tuned_examples/pg/multi-agent-cartpole-crashing-with-remote-envs-pg.yaml @@ -2,7 +2,7 @@ multi-agent-cartpole-crashing-pg: env: ray.rllib.examples.env.cartpole_crashing.MultiAgentCartPoleCrashing run: PG stop: - evaluation/episode_reward_mean: 300.0 + evaluation/sampler_results/episode_reward_mean: 300.0 num_env_steps_sampled: 300000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/ppo/cartpole-ppo-fake-gpus.yaml b/rllib/tuned_examples/ppo/cartpole-ppo-fake-gpus.yaml index 08b47c188ff8..6263aa2c2131 100644 --- a/rllib/tuned_examples/ppo/cartpole-ppo-fake-gpus.yaml +++ b/rllib/tuned_examples/ppo/cartpole-ppo-fake-gpus.yaml @@ -2,7 +2,7 @@ cartpole-ppo-fake-gpus: env: CartPole-v1 run: PPO stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 training_iteration: 400 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/ppo/cartpole-ppo-hyperband.yaml b/rllib/tuned_examples/ppo/cartpole-ppo-hyperband.yaml index 8bed23ee363d..25638e5ac864 100644 --- a/rllib/tuned_examples/ppo/cartpole-ppo-hyperband.yaml +++ b/rllib/tuned_examples/ppo/cartpole-ppo-hyperband.yaml @@ -3,7 +3,7 @@ cartpole-ppo: run: PPO num_samples: 3 stop: - episode_reward_mean: 200 + sampler_results/episode_reward_mean: 200 time_total_s: 180 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/ppo/cartpole-ppo-with-rl-module.yaml b/rllib/tuned_examples/ppo/cartpole-ppo-with-rl-module.yaml index a675da81eaf2..fbfb6905b4ac 100644 --- a/rllib/tuned_examples/ppo/cartpole-ppo-with-rl-module.yaml +++ b/rllib/tuned_examples/ppo/cartpole-ppo-with-rl-module.yaml @@ -2,7 +2,7 @@ cartpole-ppo: env: CartPole-v1 run: PPO stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 timesteps_total: 100000 config: # Both torch and tf2 works. diff --git a/rllib/tuned_examples/ppo/cartpole-ppo.yaml b/rllib/tuned_examples/ppo/cartpole-ppo.yaml index dea271014ef6..1922f1d6256b 100644 --- a/rllib/tuned_examples/ppo/cartpole-ppo.yaml +++ b/rllib/tuned_examples/ppo/cartpole-ppo.yaml @@ -2,7 +2,7 @@ cartpole-ppo: env: CartPole-v1 run: PPO stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 timesteps_total: 100000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/ppo/halfcheetah-ppo.yaml b/rllib/tuned_examples/ppo/halfcheetah-ppo.yaml index b220b615dabe..8e442f6a0492 100644 --- a/rllib/tuned_examples/ppo/halfcheetah-ppo.yaml +++ b/rllib/tuned_examples/ppo/halfcheetah-ppo.yaml @@ -2,7 +2,7 @@ halfcheetah-ppo: env: HalfCheetah-v2 run: PPO stop: - episode_reward_mean: 9800 + sampler_results/episode_reward_mean: 9800 time_total_s: 10800 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/ppo/humanoid-ppo-gae.yaml b/rllib/tuned_examples/ppo/humanoid-ppo-gae.yaml index 2e76777afe20..707bab654f44 100644 --- a/rllib/tuned_examples/ppo/humanoid-ppo-gae.yaml +++ b/rllib/tuned_examples/ppo/humanoid-ppo-gae.yaml @@ -2,7 +2,7 @@ humanoid-ppo-gae: env: Humanoid-v1 run: PPO stop: - episode_reward_mean: 6000 + sampler_results/episode_reward_mean: 6000 config: # Works for both torch and tf. framework: torch diff --git a/rllib/tuned_examples/ppo/humanoid-ppo.yaml b/rllib/tuned_examples/ppo/humanoid-ppo.yaml index 0b5cf0955480..88dee3fe8b2c 100644 --- a/rllib/tuned_examples/ppo/humanoid-ppo.yaml +++ b/rllib/tuned_examples/ppo/humanoid-ppo.yaml @@ -2,7 +2,7 @@ humanoid-ppo: env: Humanoid-v1 run: PPO stop: - episode_reward_mean: 6000 + sampler_results/episode_reward_mean: 6000 config: # Works for both torch and tf. framework: torch diff --git a/rllib/tuned_examples/ppo/pendulum-ppo-with-rl-module.yaml b/rllib/tuned_examples/ppo/pendulum-ppo-with-rl-module.yaml index 08cd0abb4bee..5b2888d709f7 100644 --- a/rllib/tuned_examples/ppo/pendulum-ppo-with-rl-module.yaml +++ b/rllib/tuned_examples/ppo/pendulum-ppo-with-rl-module.yaml @@ -3,7 +3,7 @@ pendulum-ppo: env: Pendulum-v1 run: PPO stop: - episode_reward_mean: -400 + sampler_results/episode_reward_mean: -400 timesteps_total: 400000 config: # Works for both torch and tf2 diff --git a/rllib/tuned_examples/ppo/pendulum-ppo.yaml b/rllib/tuned_examples/ppo/pendulum-ppo.yaml index 113f593cef72..607e736c2905 100644 --- a/rllib/tuned_examples/ppo/pendulum-ppo.yaml +++ b/rllib/tuned_examples/ppo/pendulum-ppo.yaml @@ -3,7 +3,7 @@ pendulum-ppo: env: Pendulum-v1 run: PPO stop: - episode_reward_mean: -400 + sampler_results/episode_reward_mean: -400 timesteps_total: 400000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/ppo/pendulum-transformed-actions-ppo.yaml b/rllib/tuned_examples/ppo/pendulum-transformed-actions-ppo.yaml index 7be2eaa18fcd..64c15f113700 100644 --- a/rllib/tuned_examples/ppo/pendulum-transformed-actions-ppo.yaml +++ b/rllib/tuned_examples/ppo/pendulum-transformed-actions-ppo.yaml @@ -3,7 +3,7 @@ pendulum-ppo: env: ray.rllib.examples.env.transformed_action_space_env.TransformedActionPendulum run: PPO stop: - episode_reward_mean: -500 + sampler_results/episode_reward_mean: -500 timesteps_total: 400000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/ppo/recomm-sys001-ppo.yaml b/rllib/tuned_examples/ppo/recomm-sys001-ppo.yaml index abead1cc0647..808f1955dab0 100644 --- a/rllib/tuned_examples/ppo/recomm-sys001-ppo.yaml +++ b/rllib/tuned_examples/ppo/recomm-sys001-ppo.yaml @@ -2,7 +2,7 @@ recomm-sys001-ppo: env: ray.rllib.examples.env.recommender_system_envs.RecommSys001 run: PPO stop: - #evaluation/episode_reward_mean: 48.0 + #evaluation/sampler_results/episode_reward_mean: 48.0 timesteps_total: 200000 config: framework: torch diff --git a/rllib/tuned_examples/ppo/repeatafterme-ppo-lstm.yaml b/rllib/tuned_examples/ppo/repeatafterme-ppo-lstm.yaml index 6a4d74b8a326..00fb269ff1d0 100644 --- a/rllib/tuned_examples/ppo/repeatafterme-ppo-lstm.yaml +++ b/rllib/tuned_examples/ppo/repeatafterme-ppo-lstm.yaml @@ -3,7 +3,7 @@ repeat-after-me-ppo-w-lstm: env: ray.rllib.examples.env.repeat_after_me_env.RepeatAfterMeEnv run: PPO stop: - episode_reward_mean: 50 + sampler_results/episode_reward_mean: 50 timesteps_total: 100000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/qmix/two-step-game-qmix-no-mixer.yaml b/rllib/tuned_examples/qmix/two-step-game-qmix-no-mixer.yaml index 65d314f42ea1..29b0c2c8d5db 100644 --- a/rllib/tuned_examples/qmix/two-step-game-qmix-no-mixer.yaml +++ b/rllib/tuned_examples/qmix/two-step-game-qmix-no-mixer.yaml @@ -2,7 +2,7 @@ two-step-game-qmix-without-mixer: env: ray.rllib.examples.env.two_step_game.TwoStepGameWithGroupedAgents run: QMIX stop: - episode_reward_mean: 6.5 + sampler_results/episode_reward_mean: 6.5 timesteps_total: 70000 config: # QMIX only supports torch for now. diff --git a/rllib/tuned_examples/qmix/two-step-game-qmix-vdn-mixer.yaml b/rllib/tuned_examples/qmix/two-step-game-qmix-vdn-mixer.yaml index 59f7560ec160..0ef024653849 100644 --- a/rllib/tuned_examples/qmix/two-step-game-qmix-vdn-mixer.yaml +++ b/rllib/tuned_examples/qmix/two-step-game-qmix-vdn-mixer.yaml @@ -2,7 +2,7 @@ two-step-game-qmix-with-vdn-mixer: env: ray.rllib.examples.env.two_step_game.TwoStepGameWithGroupedAgents run: QMIX stop: - episode_reward_mean: 6.5 + sampler_results/episode_reward_mean: 6.5 timesteps_total: 70000 config: # QMIX only supports torch for now. diff --git a/rllib/tuned_examples/qmix/two-step-game-qmix.yaml b/rllib/tuned_examples/qmix/two-step-game-qmix.yaml index 1bf036f7bba8..9e462559bd39 100644 --- a/rllib/tuned_examples/qmix/two-step-game-qmix.yaml +++ b/rllib/tuned_examples/qmix/two-step-game-qmix.yaml @@ -2,7 +2,7 @@ two-step-game-qmix-with-qmix-mixer: env: ray.rllib.examples.env.two_step_game.TwoStepGameWithGroupedAgents run: QMIX stop: - episode_reward_mean: 7.5 + sampler_results/episode_reward_mean: 7.5 timesteps_total: 70000 config: # QMIX only supports torch for now. diff --git a/rllib/tuned_examples/r2d2/stateless-cartpole-r2d2-fake-gpus.yaml b/rllib/tuned_examples/r2d2/stateless-cartpole-r2d2-fake-gpus.yaml index 3888d5f90811..9634d6819836 100644 --- a/rllib/tuned_examples/r2d2/stateless-cartpole-r2d2-fake-gpus.yaml +++ b/rllib/tuned_examples/r2d2/stateless-cartpole-r2d2-fake-gpus.yaml @@ -2,7 +2,7 @@ stateless-cartpole-r2d2: env: ray.rllib.examples.env.stateless_cartpole.StatelessCartPole run: R2D2 stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 timesteps_total: 1000000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/r2d2/stateless-cartpole-r2d2.yaml b/rllib/tuned_examples/r2d2/stateless-cartpole-r2d2.yaml index 0288315e6a2d..8389ca24e08c 100644 --- a/rllib/tuned_examples/r2d2/stateless-cartpole-r2d2.yaml +++ b/rllib/tuned_examples/r2d2/stateless-cartpole-r2d2.yaml @@ -2,7 +2,7 @@ stateless-cartpole-r2d2: env: ray.rllib.examples.env.stateless_cartpole.StatelessCartPole run: R2D2 stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 timesteps_total: 1000000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/sac/cartpole-continuous-pybullet-sac.yaml b/rllib/tuned_examples/sac/cartpole-continuous-pybullet-sac.yaml index 86da2f1d661c..9e27a2664faa 100644 --- a/rllib/tuned_examples/sac/cartpole-continuous-pybullet-sac.yaml +++ b/rllib/tuned_examples/sac/cartpole-continuous-pybullet-sac.yaml @@ -2,7 +2,7 @@ cartpole-sac: env: CartPoleContinuousBulletEnv-v0 run: SAC stop: - episode_reward_mean: 40 + sampler_results/episode_reward_mean: 40 timesteps_total: 100000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/sac/cartpole-sac.yaml b/rllib/tuned_examples/sac/cartpole-sac.yaml index b6ab12fe630a..c599ede52e89 100644 --- a/rllib/tuned_examples/sac/cartpole-sac.yaml +++ b/rllib/tuned_examples/sac/cartpole-sac.yaml @@ -2,7 +2,7 @@ cartpole-sac: env: CartPole-v1 run: SAC stop: - episode_reward_mean: 150.0 + sampler_results/episode_reward_mean: 150.0 timesteps_total: 100000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/sac/halfcheetah-pybullet-sac.yaml b/rllib/tuned_examples/sac/halfcheetah-pybullet-sac.yaml index 5cabaaca07eb..f48d6049a98a 100644 --- a/rllib/tuned_examples/sac/halfcheetah-pybullet-sac.yaml +++ b/rllib/tuned_examples/sac/halfcheetah-pybullet-sac.yaml @@ -2,7 +2,7 @@ halfcheetah-pybullet-sac: env: HalfCheetahBulletEnv-v0 run: SAC stop: - episode_reward_mean: 800.0 + sampler_results/episode_reward_mean: 800.0 config: # Works for both torch and tf. framework: torch diff --git a/rllib/tuned_examples/sac/halfcheetah-sac.yaml b/rllib/tuned_examples/sac/halfcheetah-sac.yaml index aa6c312109d4..b69aabfe0536 100644 --- a/rllib/tuned_examples/sac/halfcheetah-sac.yaml +++ b/rllib/tuned_examples/sac/halfcheetah-sac.yaml @@ -3,7 +3,7 @@ halfcheetah_sac: env: HalfCheetah-v3 run: SAC stop: - episode_reward_mean: 9000 + sampler_results/episode_reward_mean: 9000 config: # Works for both torch and tf. framework: torch diff --git a/rllib/tuned_examples/sac/mspacman-sac.yaml b/rllib/tuned_examples/sac/mspacman-sac.yaml index 09e9d9386912..86ef3ca442f9 100644 --- a/rllib/tuned_examples/sac/mspacman-sac.yaml +++ b/rllib/tuned_examples/sac/mspacman-sac.yaml @@ -5,7 +5,7 @@ mspacman-sac-tf: env: ALE/MsPacman-v5 run: SAC stop: - episode_reward_mean: 800 + sampler_results/episode_reward_mean: 800 timesteps_total: 100000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/sac/pendulum-sac-fake-gpus.yaml b/rllib/tuned_examples/sac/pendulum-sac-fake-gpus.yaml index 96ddf12bd3dc..fb20bf925aa0 100644 --- a/rllib/tuned_examples/sac/pendulum-sac-fake-gpus.yaml +++ b/rllib/tuned_examples/sac/pendulum-sac-fake-gpus.yaml @@ -2,7 +2,7 @@ pendulum-sac-fake-gpus: env: Pendulum-v1 run: SAC stop: - episode_reward_mean: -270 + sampler_results/episode_reward_mean: -270 timesteps_total: 10000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/sac/pendulum-sac.yaml b/rllib/tuned_examples/sac/pendulum-sac.yaml index fcc662eaa7c4..7fedc4ecd8ac 100644 --- a/rllib/tuned_examples/sac/pendulum-sac.yaml +++ b/rllib/tuned_examples/sac/pendulum-sac.yaml @@ -4,7 +4,7 @@ pendulum-sac: env: Pendulum-v1 run: SAC stop: - episode_reward_mean: -250 + sampler_results/episode_reward_mean: -250 timesteps_total: 10000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/sac/pendulum-transformed-actions-sac.yaml b/rllib/tuned_examples/sac/pendulum-transformed-actions-sac.yaml index 5d98ebf7fc55..44ff5ebd1789 100644 --- a/rllib/tuned_examples/sac/pendulum-transformed-actions-sac.yaml +++ b/rllib/tuned_examples/sac/pendulum-transformed-actions-sac.yaml @@ -4,7 +4,7 @@ transformed-actions-pendulum-sac-dummy-torch: env: ray.rllib.examples.env.transformed_action_space_env.TransformedActionPendulum run: SAC stop: - episode_reward_mean: -200 + sampler_results/episode_reward_mean: -200 timesteps_total: 10000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/simple_q/cartpole-simpleq-fake-gpus.yaml b/rllib/tuned_examples/simple_q/cartpole-simpleq-fake-gpus.yaml index 59e57f8ba127..55833b1abcb5 100644 --- a/rllib/tuned_examples/simple_q/cartpole-simpleq-fake-gpus.yaml +++ b/rllib/tuned_examples/simple_q/cartpole-simpleq-fake-gpus.yaml @@ -2,7 +2,7 @@ cartpole-simpleq-fake-gpus: env: CartPole-v1 run: SimpleQ stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 training_iteration: 400 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/simple_q/cartpole-simpleq-test.yaml b/rllib/tuned_examples/simple_q/cartpole-simpleq-test.yaml index bd0490687782..1464f22703e5 100644 --- a/rllib/tuned_examples/simple_q/cartpole-simpleq-test.yaml +++ b/rllib/tuned_examples/simple_q/cartpole-simpleq-test.yaml @@ -2,7 +2,7 @@ cartpole-simpleq-test: env: CartPole-v1 run: SimpleQ stop: - episode_reward_mean: 50.0 + sampler_results/episode_reward_mean: 50.0 timesteps_total: 10000 config: framework: torch diff --git a/rllib/tuned_examples/simple_q/cartpole-simpleq.yaml b/rllib/tuned_examples/simple_q/cartpole-simpleq.yaml index 3b7bc198ddde..ab507415992c 100644 --- a/rllib/tuned_examples/simple_q/cartpole-simpleq.yaml +++ b/rllib/tuned_examples/simple_q/cartpole-simpleq.yaml @@ -2,7 +2,7 @@ cartpole-simpleq: env: CartPole-v1 run: SimpleQ stop: - episode_reward_mean: 150 + sampler_results/episode_reward_mean: 150 timesteps_total: 50000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/slateq/interest-evolution-10-candidates-recsim-env-slateq-fake-gpus.yaml b/rllib/tuned_examples/slateq/interest-evolution-10-candidates-recsim-env-slateq-fake-gpus.yaml index d5fc1ccc5c2e..8ef384ce8571 100644 --- a/rllib/tuned_examples/slateq/interest-evolution-10-candidates-recsim-env-slateq-fake-gpus.yaml +++ b/rllib/tuned_examples/slateq/interest-evolution-10-candidates-recsim-env-slateq-fake-gpus.yaml @@ -2,7 +2,7 @@ interest-evolution-recsim-env-slateq: env: ray.rllib.examples.env.recommender_system_envs_with_recsim.InterestEvolutionRecSimEnv run: SlateQ stop: - episode_reward_mean: 160.0 + sampler_results/episode_reward_mean: 160.0 timesteps_total: 100000 config: framework: torch diff --git a/rllib/tuned_examples/slateq/interest-evolution-10-candidates-recsim-env-slateq.yaml b/rllib/tuned_examples/slateq/interest-evolution-10-candidates-recsim-env-slateq.yaml index 86056565bca9..1f2cdc53e574 100644 --- a/rllib/tuned_examples/slateq/interest-evolution-10-candidates-recsim-env-slateq.yaml +++ b/rllib/tuned_examples/slateq/interest-evolution-10-candidates-recsim-env-slateq.yaml @@ -2,7 +2,7 @@ interest-evolution-recsim-env-slateq: env: ray.rllib.examples.env.recommender_system_envs_with_recsim.InterestEvolutionRecSimEnv run: SlateQ stop: - episode_reward_mean: 160.0 + sampler_results/episode_reward_mean: 160.0 timesteps_total: 120000 config: framework: torch diff --git a/rllib/tuned_examples/slateq/interest-evolution-50-candidates-recsim-env-slateq.yaml b/rllib/tuned_examples/slateq/interest-evolution-50-candidates-recsim-env-slateq.yaml index b698457260ae..668cfaf95051 100644 --- a/rllib/tuned_examples/slateq/interest-evolution-50-candidates-recsim-env-slateq.yaml +++ b/rllib/tuned_examples/slateq/interest-evolution-50-candidates-recsim-env-slateq.yaml @@ -2,7 +2,7 @@ interest-evolution-recsim-env-slateq: env: ray.rllib.examples.env.recommender_system_envs_with_recsim.InterestEvolutionRecSimEnv run: SlateQ stop: - episode_reward_mean: 162.0 + sampler_results/episode_reward_mean: 162.0 timesteps_total: 300000 config: framework: tf2 diff --git a/rllib/tuned_examples/slateq/long-term-satisfaction-recsim-env-slateq.yaml b/rllib/tuned_examples/slateq/long-term-satisfaction-recsim-env-slateq.yaml index 9f5419517c01..a83f7186361e 100644 --- a/rllib/tuned_examples/slateq/long-term-satisfaction-recsim-env-slateq.yaml +++ b/rllib/tuned_examples/slateq/long-term-satisfaction-recsim-env-slateq.yaml @@ -5,7 +5,7 @@ long-term-satisfaction-recsim-env-slateq: # Random baseline rewards: # num_candidates=20; slate_size=2; resample=true: ~951 # num_candidates=50; slate_size=3; resample=true: ~946 - evaluation/episode_reward_mean: 1000.0 + evaluation/sampler_results/episode_reward_mean: 1000.0 timesteps_total: 200000 config: # Works for both tf and torch. diff --git a/rllib/tuned_examples/slateq/parametric-item-reco-env-slateq.yaml b/rllib/tuned_examples/slateq/parametric-item-reco-env-slateq.yaml index e2bf73023eec..fdbc25476836 100644 --- a/rllib/tuned_examples/slateq/parametric-item-reco-env-slateq.yaml +++ b/rllib/tuned_examples/slateq/parametric-item-reco-env-slateq.yaml @@ -2,7 +2,7 @@ parametric-item-reco-env-slateq: env: ray.rllib.examples.env.bandit_envs_recommender_system.ParametricItemRecoEnv run: SlateQ stop: - #evaluation/episode_reward_mean: 48.0 + #evaluation/sampler_results/episode_reward_mean: 48.0 timesteps_total: 200000 config: # SlateQ only supported for torch so far. diff --git a/rllib/tuned_examples/slateq/recomm-sys001-slateq.yaml b/rllib/tuned_examples/slateq/recomm-sys001-slateq.yaml index 356629592885..2fff86d59ea9 100644 --- a/rllib/tuned_examples/slateq/recomm-sys001-slateq.yaml +++ b/rllib/tuned_examples/slateq/recomm-sys001-slateq.yaml @@ -2,7 +2,7 @@ recomm-sys001-slateq: env: ray.rllib.examples.env.recommender_system_envs.RecommSys001 run: SlateQ stop: - #evaluation/episode_reward_mean: 48.0 + #evaluation/sampler_results/episode_reward_mean: 48.0 timesteps_total: 200000 config: # SlateQ only supported for torch so far. diff --git a/rllib/tuned_examples/td3/invertedpendulum-td3.yaml b/rllib/tuned_examples/td3/invertedpendulum-td3.yaml index c0b1092bd513..081a88c1d1f8 100644 --- a/rllib/tuned_examples/td3/invertedpendulum-td3.yaml +++ b/rllib/tuned_examples/td3/invertedpendulum-td3.yaml @@ -5,7 +5,7 @@ invertedpendulum-td3: env: InvertedPendulum-v2 run: TD3 stop: - episode_reward_mean: 9999.9 + sampler_results/episode_reward_mean: 9999.9 time_total_s: 900 # 15 minutes timesteps_total: 1000000 config: diff --git a/rllib/tuned_examples/td3/pendulum-td3-fake-gpus.yaml b/rllib/tuned_examples/td3/pendulum-td3-fake-gpus.yaml index 4a2a383fec86..09e135049377 100644 --- a/rllib/tuned_examples/td3/pendulum-td3-fake-gpus.yaml +++ b/rllib/tuned_examples/td3/pendulum-td3-fake-gpus.yaml @@ -2,7 +2,7 @@ pendulum-td3-fake-gpus: env: Pendulum-v1 run: TD3 stop: - episode_reward_mean: -900 + sampler_results/episode_reward_mean: -900 timesteps_total: 100000 config: # Works for both torch and tf. diff --git a/rllib/tuned_examples/td3/pendulum-td3.yaml b/rllib/tuned_examples/td3/pendulum-td3.yaml index d772f16c6ac5..038caaa7e791 100644 --- a/rllib/tuned_examples/td3/pendulum-td3.yaml +++ b/rllib/tuned_examples/td3/pendulum-td3.yaml @@ -3,7 +3,7 @@ pendulum-td3: env: Pendulum-v1 run: TD3 stop: - episode_reward_mean: -900 + sampler_results/episode_reward_mean: -900 timesteps_total: 100000 config: # Works for both torch and tf. From ff4053abdb519ed8345abace8ad0d4f4d8838746 Mon Sep 17 00:00:00 2001 From: Alan Guo Date: Wed, 26 Apr 2023 06:39:08 -0700 Subject: [PATCH 107/424] [Dashboard] Additional polish for jobs and progress bar (#34551) - Fix progress bar not showing loading bars on first load. - Fix progress bar showing unloaded data when we have loaded data already cached - Add spinnner and icons next to job status in jobs table and job detail page - Fix progress bar using total tasks across all jobs instead of tasks specific to a single job --- dashboard/client/src/common/JobStatus.tsx | 140 ++++++++++++++++++ .../client/src/components/StatusChip.tsx | 39 +++-- .../src/pages/job/JobDetailInfoPage.tsx | 3 +- .../client/src/pages/job/JobProgressBar.tsx | 14 +- dashboard/client/src/pages/job/JobRow.tsx | 4 +- .../src/pages/job/hook/useJobProgress.ts | 13 +- .../pages/overview/cards/RecentJobsCard.tsx | 62 +------- .../serve/ServeApplicationDetailPage.tsx | 2 +- .../serve_grafana_dashboard_base.json | 1 + 9 files changed, 201 insertions(+), 77 deletions(-) create mode 100644 dashboard/client/src/common/JobStatus.tsx diff --git a/dashboard/client/src/common/JobStatus.tsx b/dashboard/client/src/common/JobStatus.tsx new file mode 100644 index 000000000000..f096132367b1 --- /dev/null +++ b/dashboard/client/src/common/JobStatus.tsx @@ -0,0 +1,140 @@ +import { Box, createStyles, makeStyles } from "@material-ui/core"; +import classNames from "classnames"; +import React from "react"; +import { + RiCheckboxCircleFill, + RiCloseCircleFill, + RiLoader4Line, +} from "react-icons/ri"; +import { StatusChip } from "../components/StatusChip"; +import { UnifiedJob } from "../type/job"; +import { ClassNameProps } from "./props"; + +const useJobRunningIconStyles = makeStyles((theme) => + createStyles({ + icon: { + width: 20, + height: 20, + }, + iconSmall: { + width: 16, + height: 16, + }, + "@keyframes spinner": { + from: { + transform: "rotate(0deg)", + }, + to: { + transform: "rotate(360deg)", + }, + }, + iconRunning: { + color: "#1E88E5", + animationName: "$spinner", + animationDuration: "1000ms", + animationIterationCount: "infinite", + animationTimingFunction: "linear", + }, + }), +); + +type JobRunningIconProps = { small?: boolean } & ClassNameProps; + +export const JobRunningIcon = ({ + className, + small = false, +}: JobRunningIconProps) => { + const classes = useJobRunningIconStyles(); + return ( + + ); +}; + +const useJobStatusIconStyles = makeStyles((theme) => + createStyles({ + icon: { + width: 20, + height: 20, + }, + iconSmall: { + width: 16, + height: 16, + }, + colorSuccess: { + color: theme.palette.success.main, + }, + colorError: { + color: theme.palette.error.main, + }, + }), +); + +type JobStatusIconProps = { + job: UnifiedJob; + small?: boolean; +} & ClassNameProps; + +export const JobStatusIcon = ({ + job, + small = false, + className, +}: JobStatusIconProps) => { + const classes = useJobStatusIconStyles(); + + switch (job.status) { + case "SUCCEEDED": + return ( + + ); + case "FAILED": + case "STOPPED": + return ( + + ); + default: + return ; + } +}; + +type JobStatusWithIconProps = { + job: UnifiedJob; +}; + +export const JobStatusWithIcon = ({ job }: JobStatusWithIconProps) => { + return ( + + } + /> + + ); +}; diff --git a/dashboard/client/src/components/StatusChip.tsx b/dashboard/client/src/components/StatusChip.tsx index ab4869db7998..2d7a0dac53a2 100644 --- a/dashboard/client/src/components/StatusChip.tsx +++ b/dashboard/client/src/components/StatusChip.tsx @@ -1,4 +1,4 @@ -import { Color } from "@material-ui/core"; +import { Color, createStyles, makeStyles } from "@material-ui/core"; import { blue, blueGrey, @@ -11,6 +11,7 @@ import { yellow, } from "@material-ui/core/colors"; import { CSSProperties } from "@material-ui/core/styles/withStyles"; +import classNames from "classnames"; import React, { ReactNode } from "react"; import { ActorEnum } from "../type/actor"; import { PlacementGroupState } from "../type/placementGroup"; @@ -96,23 +97,35 @@ const typeMap = { [key: string]: Color; }; +const useStyles = makeStyles((theme) => + createStyles({ + root: { + padding: "2px 8px", + border: "solid 1px", + borderRadius: 4, + fontSize: 12, + margin: 2, + display: "inline-flex", + alignItems: "center", + }, + afterIcon: { + marginLeft: 4, + }, + }), +); + export const StatusChip = ({ type, status, suffix, + icon, }: { type: string; status: string | ActorEnum | ReactNode; suffix?: string; + icon?: ReactNode; }) => { - const style = { - padding: "2px 8px", - border: "solid 1px", - borderRadius: 4, - fontSize: 12, - margin: 2, - } as CSSProperties; - + const classes = useStyles(); let color: Color | string = blueGrey; if (typeMap[type]) { @@ -127,6 +140,7 @@ export const StatusChip = ({ const colorValue = typeof color === "string" ? color : color[500]; + const style: CSSProperties = {}; style.color = colorValue; style.borderColor = colorValue; if (color !== blueGrey) { @@ -134,8 +148,11 @@ export const StatusChip = ({ } return ( - - {status} + + {icon} + + {status} + {suffix} ); diff --git a/dashboard/client/src/pages/job/JobDetailInfoPage.tsx b/dashboard/client/src/pages/job/JobDetailInfoPage.tsx index 2d69d75d0d11..a732c0664576 100644 --- a/dashboard/client/src/pages/job/JobDetailInfoPage.tsx +++ b/dashboard/client/src/pages/job/JobDetailInfoPage.tsx @@ -3,6 +3,7 @@ import React from "react"; import { CodeDialogButtonWithPreview } from "../../common/CodeDialogButton"; import { DurationText } from "../../common/DurationText"; import { formatDateFromTimeMs } from "../../common/formatUtils"; +import { JobStatusWithIcon } from "../../common/JobStatus"; import { CpuProfilingLink, CpuStackTraceLink, @@ -95,7 +96,7 @@ export const JobMetadataSection = ({ job }: JobMetadataSectionProps) => { }, { label: "Status", - content: , + content: , }, { label: "Job ID", diff --git a/dashboard/client/src/pages/job/JobProgressBar.tsx b/dashboard/client/src/pages/job/JobProgressBar.tsx index d5d81b41124e..ce175bff2111 100644 --- a/dashboard/client/src/pages/job/JobProgressBar.tsx +++ b/dashboard/client/src/pages/job/JobProgressBar.tsx @@ -1,4 +1,4 @@ -import { makeStyles } from "@material-ui/core"; +import { LinearProgress, makeStyles } from "@material-ui/core"; import React, { useEffect, useState } from "react"; import { UnifiedJob } from "../../type/job"; import { @@ -41,12 +41,14 @@ export const JobProgressBar = ({ const { progress, + isLoading: progressLoading, driverExists, totalTasks, latestFetchTimestamp: progressTimestamp, } = useJobProgress(jobId, advancedProgressBarExpanded); const { progressGroups, + isLoading: progressGroupsLoading, total, totalTasks: advancedTotalTasks, latestFetchTimestamp: totalTimestamp, @@ -58,10 +60,20 @@ export const JobProgressBar = ({ if (!driverExists) { return ; } + + if ( + progressLoading && + (progressGroupsLoading || !advancedProgressBarRendered) + ) { + return ; + } + const { status } = job; // Use whichever data was received the most recently // Note these values may disagree in some way. It might better to consistently use one endpoint. const [totalProgress, finalTotalTasks] = + total === undefined || + advancedTotalTasks === undefined || progressTimestamp > totalTimestamp ? [progress, totalTasks] : [total, advancedTotalTasks]; diff --git a/dashboard/client/src/pages/job/JobRow.tsx b/dashboard/client/src/pages/job/JobRow.tsx index 724d69530408..b26272c52f8b 100644 --- a/dashboard/client/src/pages/job/JobRow.tsx +++ b/dashboard/client/src/pages/job/JobRow.tsx @@ -4,11 +4,11 @@ import React from "react"; import { Link } from "react-router-dom"; import { DurationText } from "../../common/DurationText"; import { formatDateFromTimeMs } from "../../common/formatUtils"; +import { JobStatusWithIcon } from "../../common/JobStatus"; import { CpuProfilingLink, CpuStackTraceLink, } from "../../common/ProfilingLink"; -import { StatusChip } from "../../components/StatusChip"; import { UnifiedJob } from "../../type/job"; import { useJobProgress } from "./hook/useJobProgress"; import { JobLogsLink } from "./JobDetail"; @@ -74,7 +74,7 @@ export const JobRow = ({ job }: JobRowProps) => { - + {start_time && start_time > 0 ? ( diff --git a/dashboard/client/src/pages/job/hook/useJobProgress.ts b/dashboard/client/src/pages/job/hook/useJobProgress.ts index 8da49fe719fb..1a55ba5ff1cc 100644 --- a/dashboard/client/src/pages/job/hook/useJobProgress.ts +++ b/dashboard/client/src/pages/job/hook/useJobProgress.ts @@ -52,7 +52,7 @@ const useFetchStateApiProgressByTaskName = ( const summary = formatSummaryToTaskProgress( rsp.data.data.result.result, ); - return { summary, totalTasks: rsp.data.data.result.total }; + return { summary, totalTasks: rsp.data.data.result.num_filtered }; } else { setError(true); setRefresh(false); @@ -82,7 +82,7 @@ export const useJobProgress = ( const [error, setError] = useState(false); const [isRefreshing, setRefresh] = useState(true); const [latestFetchTimestamp, setLatestFetchTimestamp] = useState(0); - const { data } = useFetchStateApiProgressByTaskName( + const { data, isLoading } = useFetchStateApiProgressByTaskName( jobId, isRefreshing, setMsg, @@ -104,6 +104,7 @@ export const useJobProgress = ( return { progress: summed, totalTasks: data?.totalTasks, + isLoading, msg, error, driverExists, @@ -128,7 +129,7 @@ export const useJobProgressByTaskName = (jobId: string) => { setRefresh(event.target.checked); }; - const { data } = useFetchStateApiProgressByTaskName( + const { data, isLoading } = useFetchStateApiProgressByTaskName( jobId, isRefreshing, setMsg, @@ -167,6 +168,7 @@ export const useJobProgressByTaskName = (jobId: string) => { page: { pageNo: page, pageSize: 10 }, total: formattedTasks.length, totalTasks: data?.totalTasks, + isLoading, setPage, msg, error, @@ -252,7 +254,7 @@ export const useJobProgressByLineage = ( const [isRefreshing, setRefresh] = useState(true); const [latestFetchTimestamp, setLatestFetchTimestamp] = useState(0); - const { data } = useSWR( + const { data, isLoading } = useSWR( jobId ? ["useJobProgressByLineageAndName", jobId] : null, async ([_, jobId]) => { const rsp = await getStateApiJobProgressByLineage(jobId); @@ -263,7 +265,7 @@ export const useJobProgressByLineage = ( const summary = formatNestedJobProgressToJobProgressGroup( rsp.data.data.result.result, ); - return { summary, totalTasks: rsp.data.data.result.total }; + return { summary, totalTasks: rsp.data.data.result.num_filtered }; } else { setError(true); setRefresh(false); @@ -280,6 +282,7 @@ export const useJobProgressByLineage = ( progressGroups: data?.summary?.progressGroups, total: data?.summary?.total, totalTasks: data?.totalTasks, + isLoading, msg, error, latestFetchTimestamp, diff --git a/dashboard/client/src/pages/overview/cards/RecentJobsCard.tsx b/dashboard/client/src/pages/overview/cards/RecentJobsCard.tsx index 1a0ccdb684c3..a38ebedd6729 100644 --- a/dashboard/client/src/pages/overview/cards/RecentJobsCard.tsx +++ b/dashboard/client/src/pages/overview/cards/RecentJobsCard.tsx @@ -2,12 +2,8 @@ import { createStyles, makeStyles, Typography } from "@material-ui/core"; import classNames from "classnames"; import _ from "lodash"; import React from "react"; -import { - RiCheckboxCircleFill, - RiCloseCircleFill, - RiLoader4Line, -} from "react-icons/ri"; import { Link } from "react-router-dom"; +import { JobStatusIcon } from "../../../common/JobStatus"; import { UnifiedJob } from "../../../type/job"; import { useJobList } from "../../job/hook/useJobList"; import { LinkWithArrow, OverviewCard } from "./OverviewCard"; @@ -72,33 +68,6 @@ const useRecentJobListItemStyles = makeStyles((theme) => alignItems: "center", textDecoration: "none", }, - icon: { - width: 24, - height: 24, - marginRight: theme.spacing(1), - flex: "0 0 20px", - }, - "@keyframes spinner": { - from: { - transform: "rotate(0deg)", - }, - to: { - transform: "rotate(360deg)", - }, - }, - colorSuccess: { - color: theme.palette.success.main, - }, - colorError: { - color: theme.palette.error.main, - }, - iconRunning: { - color: "#1E88E5", - animationName: "$spinner", - animationDuration: "1000ms", - animationIterationCount: "infinite", - animationTimingFunction: "linear", - }, textContainer: { flex: "1 1 auto", width: `calc(100% - ${theme.spacing(1) + 20}px)`, @@ -112,6 +81,9 @@ const useRecentJobListItemStyles = makeStyles((theme) => whiteSpace: "nowrap", color: "#5F6469", }, + icon: { + marginRight: theme.spacing(1), + }, }), ); @@ -123,32 +95,9 @@ type RecentJobListItemProps = { const RecentJobListItem = ({ job, className }: RecentJobListItemProps) => { const classes = useRecentJobListItemStyles(); - const icon = (() => { - switch (job.status) { - case "SUCCEEDED": - return ( - - ); - case "FAILED": - case "STOPPED": - return ( - - ); - default: - return ( - - ); - } - })(); const cardContent = ( - {icon} +
    {job.job_id ?? job.submission_id} @@ -163,6 +112,7 @@ const RecentJobListItem = ({ job, className }: RecentJobListItemProps) => {
    ); + return (
    {job.job_id !== null && job.job_id !== "" ? ( diff --git a/dashboard/client/src/pages/serve/ServeApplicationDetailPage.tsx b/dashboard/client/src/pages/serve/ServeApplicationDetailPage.tsx index 3febd2a31856..efecc9939558 100644 --- a/dashboard/client/src/pages/serve/ServeApplicationDetailPage.tsx +++ b/dashboard/client/src/pages/serve/ServeApplicationDetailPage.tsx @@ -105,7 +105,7 @@ export const ServeApplicationDetailPage = () => { content: { value: Object.values(application.deployments) .map(({ replicas }) => replicas.length) - .reduce((acc, curr) => acc + curr) + .reduce((acc, curr) => acc + curr, 0) .toString(), }, }, diff --git a/dashboard/modules/metrics/dashboards/serve_grafana_dashboard_base.json b/dashboard/modules/metrics/dashboards/serve_grafana_dashboard_base.json index 4a1d66bc311a..14fb6f46404b 100644 --- a/dashboard/modules/metrics/dashboards/serve_grafana_dashboard_base.json +++ b/dashboard/modules/metrics/dashboards/serve_grafana_dashboard_base.json @@ -61,6 +61,7 @@ } ] }, + "rayMeta": ["excludesSystemRoutes"], "time": { "from": "now-30m", "to": "now" From 8d0ed2ce8ed4afb938af6629fe0c7a3ce48ac919 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Wed, 26 Apr 2023 11:45:21 -0700 Subject: [PATCH 108/424] Fix team owner tag checking. (#34731) - Check for all lists for with name of `tags`. It is not always the case where the first `` are tags. For example, it can be `visibility`. - Checks for prefix `team:` rather than just `team`, to be consistent with the error message. Signed-off-by: Lonnie Liu --- ci/lint/check-bazel-team-owner.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/ci/lint/check-bazel-team-owner.py b/ci/lint/check-bazel-team-owner.py index 11bc69d0417e..78e51f80da5c 100644 --- a/ci/lint/check-bazel-team-owner.py +++ b/ci/lint/check-bazel-team-owner.py @@ -25,8 +25,13 @@ def perform_check(raw_xml_string: str): missing_owners = [] for rule in tree.findall("rule"): test_name = rule.attrib["name"] - tags = [child.attrib["value"] for child in rule.find("list").getchildren()] - team_owner = [t for t in tags if t.startswith("team")] + tags = [] + for lst in rule.findall("list"): + if lst.attrib["name"] != "tags": + continue + tags = [child.attrib["value"] for child in lst.getchildren()] + break + team_owner = [t for t in tags if t.startswith("team:")] if len(team_owner) == 0: missing_owners.append(test_name) owners[test_name] = team_owner From 1828a5c538587eb13e60edd34011f0107f05527e Mon Sep 17 00:00:00 2001 From: Cheng Su Date: Wed, 26 Apr 2023 11:57:05 -0700 Subject: [PATCH 109/424] [Data] Fix nightly test import of pipelined_training.py (#34795) The nightly test is failing ([link](https://buildkite.com/ray-project/release-tests-branch/builds/1596#0187a5e6-2b7a-4941-b4c3-2c3f2a246e8e)), with error: ``` Traceback (most recent call last): File "pipelined_training.py", line 17, in from ray.data.datastream_pipeline import DatasetPipeline ModuleNotFoundError: No module named 'ray.data.datastream_pipeline' ``` This PR is to fix the import. Signed-off-by: Cheng Su --- release/nightly_tests/dataset/pipelined_training.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release/nightly_tests/dataset/pipelined_training.py b/release/nightly_tests/dataset/pipelined_training.py index b3f3b55b4a85..01c9dce0a1e9 100644 --- a/release/nightly_tests/dataset/pipelined_training.py +++ b/release/nightly_tests/dataset/pipelined_training.py @@ -14,7 +14,7 @@ from ray_shuffling_data_loader.data_generation import DATA_SPEC from ray_shuffling_data_loader.embedding_model import MyModel, annotation, huber_loss -from ray.data.datastream_pipeline import DatasetPipeline +from ray.data import DatasetPipeline # Training settings parser = argparse.ArgumentParser(description="Dataset ingestion Example") From ab0d028b5f5397cc412d2e967be5e2813c4d6979 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Wed, 26 Apr 2023 12:52:26 -0700 Subject: [PATCH 110/424] Update bazel flags that is renamed. (#34732) --- .bazelrc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.bazelrc b/.bazelrc index d38be3f8323a..e81c646c1569 100644 --- a/.bazelrc +++ b/.bazelrc @@ -200,5 +200,5 @@ try-import %workspace%/.llvm-local.bazelrc # It picks up the system headers when someone has protobuf installed via Homebrew. # Work around for https://github.com/bazelbuild/bazel/issues/8053 build:macos --sandbox_block_path=/usr/local/ -#This option controls whether javac checks for missing direct dependencies. -build --strict_java_deps=off +# This option controls whether javac checks for missing direct dependencies. +build --experimental_strict_java_deps=off From 247a38fa4e8d1f08f04e8ec544975f2155cfd348 Mon Sep 17 00:00:00 2001 From: Cindy Zhang Date: Wed, 26 Apr 2023 12:55:46 -0700 Subject: [PATCH 111/424] [serve][deploy refactor][2/X] Move lightweight update logic to DeploymentVersion (#34430) `DeploymentVersion` already has existing support for code version + other config options that affect the version (before, only user config was taken into account). We can leverage this to do lightweight config updates, so that: - The config's `import_path` and `runtime_env` tells us the code version for the deployments - The logic for lightweight config updates is hidden in `DeploymentVersion` So instead of having `DeploymentVersion` only rely on user config, we want it to rely on other info from `DeploymentInfo`, so we can decide which config options should trigger a redeployment and which shouldn't. I've added annotations `DeploymentOptionUpdateType` to fields in `DeploymentConfig` to indicate which options fall into which category as described below. * Heavyweight options that will force replicas to restart: `version` and `ray_actor_options` (which is part of replica config) * Lightweight options that need to call reconfigure on the replica actor: `user_config` and `graceful_shutdown_wait_loop_s` * Lightweight options (that need to update replicas in replica state container, but won't need to call into the actual actor): `max_concurrent_queries`, `graceful_shutdown_timeout_s`, `health_check_period_s`, `health_check_timeout_s` * Purely lightweight options: `num_replicas` and `autoscaling_config` Screen Shot 2023-04-20 at 6 50 13 PM Screen Shot 2023-04-20 at 6 50 31 PM This fixes the issue of deployments not listed in config being redeployed. See the newly added test `test_deployments_not_listed_in_config`. --- .../io/ray/serve/config/DeploymentConfig.java | 20 + .../serve/deployment/DeploymentVersion.java | 47 ++- .../io/ray/serve/replica/RayServeReplica.java | 6 +- .../serve/replica/RayServeReplicaImpl.java | 6 +- .../serve/replica/RayServeWrappedReplica.java | 13 +- .../serve/replica/RayServeReplicaTest.java | 13 +- python/ray/serve/_private/deploy_utils.py | 22 ++ python/ray/serve/_private/deployment_state.py | 254 +++++++------ python/ray/serve/_private/replica.py | 80 ++-- python/ray/serve/_private/utils.py | 13 + python/ray/serve/_private/version.py | 129 ++++++- python/ray/serve/config.py | 44 ++- python/ray/serve/controller.py | 152 +------- .../ray/serve/tests/test_config_files/pid.py | 67 +++- python/ray/serve/tests/test_controller.py | 95 ----- .../ray/serve/tests/test_deployment_state.py | 135 ++++--- .../serve/tests/test_deployment_version.py | 167 +++++++-- python/ray/serve/tests/test_standalone2.py | 346 +++++++++++++++--- src/ray/protobuf/serve.proto | 3 +- 19 files changed, 1028 insertions(+), 584 deletions(-) diff --git a/java/serve/src/main/java/io/ray/serve/config/DeploymentConfig.java b/java/serve/src/main/java/io/ray/serve/config/DeploymentConfig.java index 588356e7a121..a2f996e2677c 100644 --- a/java/serve/src/main/java/io/ray/serve/config/DeploymentConfig.java +++ b/java/serve/src/main/java/io/ray/serve/config/DeploymentConfig.java @@ -208,6 +208,26 @@ public byte[] toProtoBytes() { return builder.build().toByteArray(); } + public io.ray.serve.generated.DeploymentConfig toProto() { + io.ray.serve.generated.DeploymentConfig.Builder builder = + io.ray.serve.generated.DeploymentConfig.newBuilder() + .setNumReplicas(numReplicas) + .setMaxConcurrentQueries(maxConcurrentQueries) + .setGracefulShutdownWaitLoopS(gracefulShutdownWaitLoopS) + .setGracefulShutdownTimeoutS(gracefulShutdownTimeoutS) + .setHealthCheckPeriodS(healthCheckPeriodS) + .setHealthCheckTimeoutS(healthCheckTimeoutS) + .setIsCrossLanguage(isCrossLanguage) + .setDeploymentLanguage(deploymentLanguage); + if (null != userConfig) { + builder.setUserConfig(ByteString.copyFrom(MessagePackSerializer.encode(userConfig).getKey())); + } + if (null != autoscalingConfig) { + builder.setAutoscalingConfig(autoscalingConfig.toProto()); + } + return builder.build(); + } + public static DeploymentConfig fromProto(io.ray.serve.generated.DeploymentConfig proto) { DeploymentConfig deploymentConfig = new DeploymentConfig(); diff --git a/java/serve/src/main/java/io/ray/serve/deployment/DeploymentVersion.java b/java/serve/src/main/java/io/ray/serve/deployment/DeploymentVersion.java index 537edcb79279..651201063a9b 100644 --- a/java/serve/src/main/java/io/ray/serve/deployment/DeploymentVersion.java +++ b/java/serve/src/main/java/io/ray/serve/deployment/DeploymentVersion.java @@ -1,39 +1,52 @@ package io.ray.serve.deployment; -import com.google.protobuf.ByteString; +import com.google.gson.Gson; import com.google.protobuf.InvalidProtocolBufferException; -import io.ray.runtime.serializer.MessagePackSerializer; +import io.ray.serve.config.DeploymentConfig; import io.ray.serve.exception.RayServeException; import java.io.Serializable; +import java.util.Map; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.StringUtils; public class DeploymentVersion implements Serializable { + private static Gson gson = new Gson(); + private static final long serialVersionUID = 3400261981775851058L; private String codeVersion; private Object userConfig; + private DeploymentConfig deploymentConfig; + + private Map rayActorOptions; + private boolean unversioned; public DeploymentVersion() { - this(null, null); + this(null, new DeploymentConfig(), null); } public DeploymentVersion(String codeVersion) { - this(codeVersion, null); + this(codeVersion, new DeploymentConfig(), null); } - public DeploymentVersion(String codeVersion, Object userConfig) { + public DeploymentVersion( + String codeVersion, DeploymentConfig deploymentConfig, Map rayActorOptions) { if (StringUtils.isBlank(codeVersion)) { this.unversioned = true; this.codeVersion = RandomStringUtils.randomAlphabetic(6); } else { this.codeVersion = codeVersion; } - this.userConfig = userConfig; + if (deploymentConfig == null) { + deploymentConfig = new DeploymentConfig(); + } + this.deploymentConfig = deploymentConfig; + this.rayActorOptions = rayActorOptions; + this.userConfig = deploymentConfig.getUserConfig(); } public String getCodeVersion() { @@ -44,6 +57,14 @@ public Object getUserConfig() { return userConfig; } + public DeploymentConfig getDeploymentConfig() { + return deploymentConfig; + } + + public Map getRayActorOptions() { + return rayActorOptions; + } + public boolean isUnversioned() { return unversioned; } @@ -64,12 +85,8 @@ public static DeploymentVersion fromProtoBytes(byte[] bytes) { } return new DeploymentVersion( proto.getCodeVersion(), - proto.getUserConfig() != null && proto.getUserConfig().size() != 0 - ? new Object[] { - MessagePackSerializer.decode( - proto.getUserConfig().toByteArray(), Object.class) // TODO-xlang - } - : null); + DeploymentConfig.fromProto(proto.getDeploymentConfig()), + gson.fromJson(proto.getRayActorOptions(), Map.class)); } public byte[] toProtoBytes() { @@ -79,9 +96,9 @@ public byte[] toProtoBytes() { if (StringUtils.isNotBlank(codeVersion)) { proto.setCodeVersion(codeVersion); } - if (userConfig != null) { - proto.setUserConfig( - ByteString.copyFrom(MessagePackSerializer.encode(userConfig).getLeft())); // TODO-xlang + proto.setDeploymentConfig(deploymentConfig.toProto()); + if (rayActorOptions != null && !rayActorOptions.isEmpty()) { + proto.setRayActorOptions(gson.toJson(rayActorOptions)); } return proto.build().toByteArray(); } diff --git a/java/serve/src/main/java/io/ray/serve/replica/RayServeReplica.java b/java/serve/src/main/java/io/ray/serve/replica/RayServeReplica.java index 511996fb7630..65d2e2edcd79 100644 --- a/java/serve/src/main/java/io/ray/serve/replica/RayServeReplica.java +++ b/java/serve/src/main/java/io/ray/serve/replica/RayServeReplica.java @@ -1,13 +1,11 @@ package io.ray.serve.replica; -import io.ray.serve.deployment.DeploymentVersion; - public interface RayServeReplica { Object handleRequest(Object requestMetadata, Object requestArgs); - default Object reconfigure(Object userConfig) { - return new DeploymentVersion(null, userConfig); + default Object reconfigure(byte[] deploymentConfigBytes) { + return null; } default boolean checkHealth() { diff --git a/java/serve/src/main/java/io/ray/serve/replica/RayServeReplicaImpl.java b/java/serve/src/main/java/io/ray/serve/replica/RayServeReplicaImpl.java index b7989eac0cba..e605834b4e96 100644 --- a/java/serve/src/main/java/io/ray/serve/replica/RayServeReplicaImpl.java +++ b/java/serve/src/main/java/io/ray/serve/replica/RayServeReplicaImpl.java @@ -279,9 +279,11 @@ public synchronized boolean prepareForShutdown() { } @Override - public DeploymentVersion reconfigure(Object userConfig) { + public DeploymentVersion reconfigure(byte[] deploymentConfigBytes) { + config = DeploymentConfig.fromProtoBytes(deploymentConfigBytes); + Object userConfig = config.getUserConfig(); DeploymentVersion deploymentVersion = - new DeploymentVersion(version.getCodeVersion(), userConfig); + new DeploymentVersion(version.getCodeVersion(), config, version.getRayActorOptions()); version = deploymentVersion; if (userConfig == null) { return deploymentVersion; diff --git a/java/serve/src/main/java/io/ray/serve/replica/RayServeWrappedReplica.java b/java/serve/src/main/java/io/ray/serve/replica/RayServeWrappedReplica.java index dd59858061ed..12d680aebc80 100644 --- a/java/serve/src/main/java/io/ray/serve/replica/RayServeWrappedReplica.java +++ b/java/serve/src/main/java/io/ray/serve/replica/RayServeWrappedReplica.java @@ -189,8 +189,8 @@ public boolean isAllocated() { * * @return */ - public Object isInitialized(Object userConfig) { - Object deploymentVersion = reconfigure(userConfig); + public Object isInitialized(byte[] deploymentConfigBytes) { + Object deploymentVersion = reconfigure(deploymentConfigBytes); checkHealth(); return deploymentVersion; } @@ -213,13 +213,8 @@ public boolean prepareForShutdown() { * DeploymentVersion is serialized to protobuf byte[]. */ @Override - public Object reconfigure(Object userConfig) { - boolean isCrossLanguage = userConfig instanceof byte[]; - DeploymentVersion deploymentVersion = - replica.reconfigure( - isCrossLanguage && userConfig != null - ? MessagePackSerializer.decode((byte[]) userConfig, Object.class) - : userConfig); + public Object reconfigure(byte[] deploymentConfigBytes) { + DeploymentVersion deploymentVersion = replica.reconfigure(deploymentConfigBytes); return deploymentVersion.toProtoBytes(); } diff --git a/java/serve/src/test/java/io/ray/serve/replica/RayServeReplicaTest.java b/java/serve/src/test/java/io/ray/serve/replica/RayServeReplicaTest.java index f9230de84a65..8121aa5d5b35 100644 --- a/java/serve/src/test/java/io/ray/serve/replica/RayServeReplicaTest.java +++ b/java/serve/src/test/java/io/ray/serve/replica/RayServeReplicaTest.java @@ -70,11 +70,17 @@ public void test() throws IOException { // reconfigure ObjectRef versionRef = - replicHandle.task(RayServeWrappedReplica::reconfigure, (Object) null).remote(); + replicHandle + .task(RayServeWrappedReplica::reconfigure, (new DeploymentConfig()).toProtoBytes()) + .remote(); Assert.assertEquals( DeploymentVersion.fromProtoBytes((byte[]) (versionRef.get())).getCodeVersion(), version); - replicHandle.task(RayServeWrappedReplica::reconfigure, new Object()).remote().get(); + deploymentConfig = deploymentConfig.setUserConfig(new Object()); + replicHandle + .task(RayServeWrappedReplica::reconfigure, deploymentConfig.toProtoBytes()) + .remote() + .get(); resultRef = replicHandle .task( @@ -84,8 +90,9 @@ public void test() throws IOException { .remote(); Assert.assertEquals((String) resultRef.get(), "1"); + deploymentConfig = deploymentConfig.setUserConfig(ImmutableMap.of("value", "100")); replicHandle - .task(RayServeWrappedReplica::reconfigure, ImmutableMap.of("value", "100")) + .task(RayServeWrappedReplica::reconfigure, deploymentConfig.toProtoBytes()) .remote() .get(); resultRef = diff --git a/python/ray/serve/_private/deploy_utils.py b/python/ray/serve/_private/deploy_utils.py index 19b096a63f14..06077f6f1e71 100644 --- a/python/ray/serve/_private/deploy_utils.py +++ b/python/ray/serve/_private/deploy_utils.py @@ -1,8 +1,11 @@ from typing import Dict, Tuple, Union, Callable, Type, Optional, Any +import hashlib +import json import logging import time from ray.serve.config import ReplicaConfig, DeploymentConfig +from ray.serve.schema import ServeApplicationSchema from ray.serve._private.constants import SERVE_LOGGER_NAME from ray.serve._private.autoscaling_policy import BasicAutoscalingPolicy from ray.serve._private.common import DeploymentInfo @@ -135,3 +138,22 @@ def deploy_args_to_deployment_info( autoscaling_policy=autoscaling_policy, is_driver_deployment=is_driver_deployment, ) + + +def get_app_code_version(app_config: ServeApplicationSchema) -> str: + """Returns the code version of an application. + + Args: + app_config: The application config. + + Returns: a hash of the import path and (application level) runtime env representing + the code version of the application. + """ + encoded = json.dumps( + { + "import_path": app_config.import_path, + "runtime_env": app_config.runtime_env, + }, + sort_keys=True, + ).encode("utf-8") + return hashlib.md5(encoded).hexdigest() diff --git a/python/ray/serve/_private/deployment_state.py b/python/ray/serve/_private/deployment_state.py index ea0180f5efc0..63c981150c1d 100644 --- a/python/ray/serve/_private/deployment_state.py +++ b/python/ray/serve/_private/deployment_state.py @@ -100,7 +100,8 @@ def from_deployment_info( num_replicas = info.deployment_config.num_replicas version = DeploymentVersion( info.version, - user_config=info.deployment_config.user_config, + deployment_config=info.deployment_config, + ray_actor_options=info.replica_config.ray_actor_options, ) return cls(info, num_replicas, version, deleting) @@ -199,11 +200,8 @@ def __init__( self._ready_obj_ref: ObjectRef = None self._actor_resources: Dict[str, float] = None - self._max_concurrent_queries: int = None - self._graceful_shutdown_timeout_s: float = 0.0 + self._version: DeploymentVersion = None self._healthy: bool = True - self._health_check_period_s: float = 0.0 - self._health_check_timeout_s: float = 0.0 self._health_check_ref: Optional[ObjectRef] = None self._last_health_check_time: float = 0.0 self._consecutive_health_check_failures = 0 @@ -258,8 +256,33 @@ def actor_handle(self) -> Optional[ActorHandle]: return self._actor_handle @property - def max_concurrent_queries(self) -> int: - return self._max_concurrent_queries + def version(self) -> Optional[DeploymentVersion]: + return self._version + + @property + def deployment_config(self) -> Optional[DeploymentConfig]: + if self._version: + return self._version.deployment_config + + @property + def max_concurrent_queries(self) -> Optional[int]: + if self.deployment_config: + return self.deployment_config.max_concurrent_queries + + @property + def graceful_shutdown_timeout_s(self) -> Optional[float]: + if self.deployment_config: + return self.deployment_config.graceful_shutdown_timeout_s + + @property + def health_check_period_s(self) -> Optional[float]: + if self.deployment_config: + return self.deployment_config.health_check_period_s + + @property + def health_check_timeout_s(self) -> Optional[float]: + if self.deployment_config: + return self.deployment_config.health_check_timeout_s @property def pid(self) -> Optional[int]: @@ -289,18 +312,7 @@ def start(self, deployment_info: DeploymentInfo, version: DeploymentVersion): """ Start a new actor for current DeploymentReplica instance. """ - self._max_concurrent_queries = ( - deployment_info.deployment_config.max_concurrent_queries - ) - self._graceful_shutdown_timeout_s = ( - deployment_info.deployment_config.graceful_shutdown_timeout_s - ) - self._health_check_period_s = ( - deployment_info.deployment_config.health_check_period_s - ) - self._health_check_timeout_s = ( - deployment_info.deployment_config.health_check_timeout_s - ) + self._version = version self._actor_resources = deployment_info.replica_config.resource_dict # it is currently not possible to create a placement group @@ -389,17 +401,20 @@ def start(self, deployment_info: DeploymentInfo, version: DeploymentVersion): # Perform auto method name translation for java handles. # See https://github.com/ray-project/ray/issues/21474 - user_config = self._format_user_config( - deployment_info.deployment_config.user_config + deployment_config = copy(deployment_info.deployment_config) + deployment_config.user_config = self._format_user_config( + deployment_config.user_config ) if self._is_cross_language: self._actor_handle = JavaActorHandleProxy(self._actor_handle) self._allocated_obj_ref = self._actor_handle.is_allocated.remote() - self._ready_obj_ref = self._actor_handle.is_initialized.remote(user_config) + self._ready_obj_ref = self._actor_handle.is_initialized.remote( + deployment_config.to_proto_bytes() + ) else: self._allocated_obj_ref = self._actor_handle.is_allocated.remote() self._ready_obj_ref = self._actor_handle.is_initialized.remote( - user_config, + deployment_config, # Ensure that `is_allocated` will execute before `reconfigure`, # because `reconfigure` runs user code that could block the replica # asyncio loop. If that happens before `is_allocated` is executed, @@ -416,14 +431,28 @@ def _format_user_config(self, user_config: Any): temp = msgpack_deserialize(temp) return temp - def update_user_config(self, user_config: Any): + def reconfigure(self, version: DeploymentVersion) -> bool: """ - Update user config of existing actor behind current - DeploymentReplica instance. + Update replica version. Also, updates the deployment config on the actor + behind this DeploymentReplica instance if necessary. + + Returns: whether the actor is being updated. """ - self._ready_obj_ref = self._actor_handle.reconfigure.remote( - self._format_user_config(user_config) - ) + updating = False + if self._version.requires_actor_reconfigure(version): + # Call into replica actor reconfigure() with updated user config and + # graceful_shutdown_wait_loop_s + updating = True + deployment_config = copy(version.deployment_config) + deployment_config.user_config = self._format_user_config( + deployment_config.user_config + ) + self._ready_obj_ref = self._actor_handle.reconfigure.remote( + deployment_config + ) + + self._version = version + return updating def recover(self): """ @@ -446,7 +475,7 @@ def recover(self): else: self._ready_obj_ref = self._actor_handle.get_metadata.remote() - def check_ready(self) -> Tuple[ReplicaStartupStatus, Optional[DeploymentVersion]]: + def check_ready(self) -> ReplicaStartupStatus: """ Check if current replica has started by making ray API calls on relevant actor / object ref. @@ -463,36 +492,29 @@ def check_ready(self) -> Tuple[ReplicaStartupStatus, Optional[DeploymentVersion] - replica initialization failed. SUCCEEDED: - replica initialization succeeded. - version: - None: - - for PENDING_ALLOCATION, PENDING_INITIALIZATION, or FAILED states - version: - - for SUCCEEDED state """ # Check whether the replica has been allocated. if not self._check_obj_ref_ready(self._allocated_obj_ref): - return ReplicaStartupStatus.PENDING_ALLOCATION, None + return ReplicaStartupStatus.PENDING_ALLOCATION # Check whether relica initialization has completed. replica_ready = self._check_obj_ref_ready(self._ready_obj_ref) # In case of deployment constructor failure, ray.get will help to # surface exception to each update() cycle. if not replica_ready: - return ReplicaStartupStatus.PENDING_INITIALIZATION, None + return ReplicaStartupStatus.PENDING_INITIALIZATION else: try: # TODO(simon): fully implement reconfigure for Java replicas. if self._is_cross_language: - return ReplicaStartupStatus.SUCCEEDED, None + return ReplicaStartupStatus.SUCCEEDED + + # todo: The replica's userconfig whitch java client created + # is different from the controller's userconfig + if not self._deployment_is_cross_language: + _, self._version = ray.get(self._ready_obj_ref) - deployment_config, version = ray.get(self._ready_obj_ref) - self._max_concurrent_queries = deployment_config.max_concurrent_queries - self._graceful_shutdown_timeout_s = ( - deployment_config.graceful_shutdown_timeout_s - ) - self._health_check_period_s = deployment_config.health_check_period_s - self._health_check_timeout_s = deployment_config.health_check_timeout_s self._pid, self._actor_id, self._node_id, self._node_ip = ray.get( self._allocated_obj_ref ) @@ -501,13 +523,9 @@ def check_ready(self) -> Tuple[ReplicaStartupStatus, Optional[DeploymentVersion] f"Exception in replica '{self._replica_tag}', " "the replica will be stopped." ) - return ReplicaStartupStatus.FAILED, None - if self._deployment_is_cross_language: - # todo: The replica's userconfig whitch java client created - # is different from the controller's userconfig - return ReplicaStartupStatus.SUCCEEDED, None - else: - return ReplicaStartupStatus.SUCCEEDED, version + return ReplicaStartupStatus.FAILED + + return ReplicaStartupStatus.SUCCEEDED @property def actor_resources(self) -> Optional[Dict[str, float]]: @@ -528,7 +546,7 @@ def graceful_stop(self) -> Duration: except ValueError: pass - return self._graceful_shutdown_timeout_s + return self.graceful_shutdown_timeout_s def check_stopped(self) -> bool: """Check if the actor has exited.""" @@ -575,12 +593,12 @@ def _check_active_health_check(self) -> ReplicaHealthCheckResponse: f"Health check for replica {self._replica_tag} failed: {e}" ) response = ReplicaHealthCheckResponse.APP_FAILURE - elif time.time() - self._last_health_check_time > self._health_check_timeout_s: + elif time.time() - self._last_health_check_time > self.health_check_timeout_s: # Health check hasn't returned and the timeout is up, consider it failed. logger.warning( "Didn't receive health check response for replica " f"{self._replica_tag} after " - f"{self._health_check_timeout_s}s, marking it unhealthy." + f"{self.health_check_timeout_s}s, marking it unhealthy." ) response = ReplicaHealthCheckResponse.APP_FAILURE else: @@ -597,7 +615,7 @@ def _should_start_new_health_check(self) -> bool: A health check will be started if: 1) There is not already an active health check. - 2) It has been more than self._health_check_period_s since the + 2) It has been more than health_check_period_s since the previous health check was *started*. This assumes that self._health_check_ref is reset to `None` when an @@ -612,7 +630,7 @@ def _should_start_new_health_check(self) -> bool: # check. Add some randomness to avoid synchronizing across all # replicas. time_since_last = time.time() - self._last_health_check_time - randomized_period = self._health_check_period_s * random.uniform(0.9, 1.1) + randomized_period = self.health_check_period_s * random.uniform(0.9, 1.1) return time_since_last > randomized_period def check_health(self) -> bool: @@ -644,7 +662,7 @@ def check_health(self) -> bool: ): logger.warning( f"Replica {self._replica_tag} failed the health " - f"check {self._consecutive_health_check_failures}" + f"check {self._consecutive_health_check_failures} " "times in a row, marking it unhealthy." ) self._healthy = False @@ -701,7 +719,6 @@ def __init__( self._controller_name = controller_name self._deployment_name = deployment_name self._replica_tag = replica_tag - self._version = version self._start_time = None self._prev_slow_startup_warning_time = None @@ -742,7 +759,7 @@ def deployment_name(self) -> str: @property def version(self): - return self._version + return self._actor.version @property def actor_handle(self) -> ActorHandle: @@ -760,17 +777,15 @@ def start(self, deployment_info: DeploymentInfo, version: DeploymentVersion): self._actor.start(deployment_info, version) self._start_time = time.time() self._prev_slow_startup_warning_time = time.time() - self._version = version - def update_user_config(self, user_config: Any): + def reconfigure(self, version: DeploymentVersion) -> bool: """ - Update user config of existing actor behind current - DeploymentReplica instance. + Update replica version. Also, updates the deployment config on the actor + behind this DeploymentReplica instance if necessary. + + Returns: whether the actor is being updated. """ - self._actor.update_user_config(user_config) - self._version = DeploymentVersion( - self._version.code_version, user_config=user_config - ) + return self._actor.reconfigure(version) def recover(self): """ @@ -791,15 +806,7 @@ def check_started(self) -> ReplicaStartupStatus: status: Most recent state of replica by querying actor obj ref """ - status, version = self._actor.check_ready() - - if status == ReplicaStartupStatus.SUCCEEDED: - # Re-assign DeploymentVersion if start / update / recover succeeded - # by reading re-computed version in RayServeReplica - if version is not None: - self._version = version - - return status + return self._actor.check_ready() def stop(self, graceful: bool = True) -> None: """Stop the replica. @@ -1158,6 +1165,23 @@ def _set_target_state(self, target_info: DeploymentInfo) -> None: target_state = DeploymentTargetState.from_deployment_info(target_info) self._save_checkpoint_func(writeahead_checkpoints={self._name: target_state}) + if self._target_state.version == target_state.version: + # Record either num replica or autoscaling config lightweight update + if ( + self._target_state.version.deployment_config.autoscaling_config + != target_state.version.deployment_config.autoscaling_config + ): + record_extra_usage_tag( + TagKey.SERVE_AUTOSCALING_CONFIG_LIGHTWEIGHT_UPDATED, "True" + ) + elif ( + self._target_state.version.deployment_config.num_replicas + != target_state.version.deployment_config.num_replicas + ): + record_extra_usage_tag( + TagKey.SERVE_NUM_REPLICAS_LIGHTWEIGHT_UPDATED, "True" + ) + self._target_state = target_state self._curr_status_info = DeploymentStatusInfo( self._name, DeploymentStatus.UPDATING @@ -1180,10 +1204,14 @@ def deploy(self, deployment_info: DeploymentInfo) -> bool: existing_info = self._target_state.info if existing_info is not None: # Redeploying should not reset the deployment's start time. - deployment_info.start_time_ms = existing_info.start_time_ms + if not self._target_state.deleting: + deployment_info.start_time_ms = existing_info.start_time_ms if ( - existing_info.deployment_config == deployment_info.deployment_config + not self._target_state.deleting + and existing_info.deployment_config == deployment_info.deployment_config + and existing_info.replica_config.ray_actor_options + == deployment_info.replica_config.ray_actor_options and deployment_info.version is not None and existing_info.version == deployment_info.version ): @@ -1230,8 +1258,11 @@ def autoscale( def delete(self) -> None: self._set_target_state_deleting() - def _stop_wrong_version_replicas(self, max_to_stop=math.inf) -> int: - """Stop the replicas with outdated versions + def _stop_or_update_outdated_version_replicas(self, max_to_stop=math.inf) -> int: + """Stop or update replicas with outdated versions. + + Stop replicas with versions that require the actor to be restarted, and + reconfigure replicas that require refreshing deployment config values. Args: max_to_stop: max number of replicas to stop, by default, @@ -1243,33 +1274,35 @@ def _stop_wrong_version_replicas(self, max_to_stop=math.inf) -> int: max_replicas=max_to_stop, ranking_function=rank_replicas_for_stopping, ) - replicas_stopped = False + replicas_changed = False code_version_changes = 0 - user_config_changes = 0 + reconfigure_changes = 0 for replica in replicas_to_update: - # If the code version is a mismatch, we stop the replica. A new one - # with the correct version will be started later as part of the + # If the new version requires the actors to be restarted, stop the replica. + # A new one with the correct version will be started later as part of the # normal scale-up process. - if replica.version.code_version != self._target_state.version.code_version: + if replica.version.requires_actor_restart(self._target_state.version): code_version_changes += 1 replica.stop() self._replicas.add(ReplicaState.STOPPING, replica) - replicas_stopped = True - # If only the user_config is a mismatch, we update it dynamically - # without restarting the replica. - elif ( - replica.version.user_config_hash - != self._target_state.version.user_config_hash - ): - user_config_changes += 1 - replica.update_user_config(self._target_state.version.user_config) - self._replicas.add(ReplicaState.UPDATING, replica) + replicas_changed = True + # Otherwise, only lightweight options in deployment config is a mismatch, so + # we update it dynamically without restarting the replica. + else: + reconfigure_changes += 1 + if replica.version.requires_long_poll_broadcast( + self._target_state.version + ): + replicas_changed = True + actor_updating = replica.reconfigure(self._target_state.version) + if actor_updating: + self._replicas.add(ReplicaState.UPDATING, replica) + else: + self._replicas.add(ReplicaState.RUNNING, replica) logger.debug( "Adding UPDATING to replica_tag: " f"{replica.replica_tag}, deployment_name: {self._name}" ) - else: - assert False, "Update must be code version or user config." if code_version_changes > 0: logger.info( @@ -1277,14 +1310,15 @@ def _stop_wrong_version_replicas(self, max_to_stop=math.inf) -> int: f"deployment '{self._name}' with outdated versions." ) - if user_config_changes > 0: + if reconfigure_changes > 0: logger.info( - f"Updating {user_config_changes} replicas of " - f"deployment '{self._name}' with outdated " - f"user_configs." + f"Updating {reconfigure_changes} replicas of deployment '{self._name}' " + "with outdated deployment configs." ) + # Record user config lightweight update + record_extra_usage_tag(TagKey.SERVE_USER_CONFIG_LIGHTWEIGHT_UPDATED, "True") - return replicas_stopped + return replicas_changed def _check_and_stop_wrong_version_replicas(self) -> bool: """Stops replicas with outdated versions to implement rolling updates. @@ -1337,7 +1371,7 @@ def _check_and_stop_wrong_version_replicas(self) -> bool: rollout_size = max(int(0.2 * self._target_state.num_replicas), 1) max_to_stop = max(rollout_size - pending_replicas, 0) - return self._stop_wrong_version_replicas(max_to_stop) + return self._stop_or_update_outdated_version_replicas(max_to_stop) def _scale_deployment_replicas(self) -> bool: """Scale the given deployment to the number of replicas.""" @@ -1346,7 +1380,7 @@ def _scale_deployment_replicas(self) -> bool: self._target_state.num_replicas >= 0 ), "Number of replicas must be greater than or equal to 0." - replicas_stopped = self._check_and_stop_wrong_version_replicas() + replicas_changed = self._check_and_stop_wrong_version_replicas() current_replicas = self._replicas.count( states=[ReplicaState.STARTING, ReplicaState.UPDATING, ReplicaState.RUNNING] @@ -1357,7 +1391,7 @@ def _scale_deployment_replicas(self) -> bool: self._target_state.num_replicas - current_replicas - recovering_replicas ) if delta_replicas == 0: - return False + return replicas_changed elif delta_replicas > 0: # Don't ever exceed self._target_state.num_replicas. @@ -1380,7 +1414,7 @@ def _scale_deployment_replicas(self) -> bool: time.time() - self._last_retry < self._backoff_time_s + random.uniform(0, 3) ): - return replicas_stopped + return replicas_changed self._last_retry = time.time() logger.info( @@ -1407,7 +1441,7 @@ def _scale_deployment_replicas(self) -> bool: ) elif delta_replicas < 0: - replicas_stopped = True + replicas_changed = True to_remove = -delta_replicas logger.info( f"Removing {to_remove} replica{'s' if to_remove > 1 else ''} " @@ -1432,7 +1466,7 @@ def _scale_deployment_replicas(self) -> bool: replica.stop() self._replicas.add(ReplicaState.STOPPING, replica) - return replicas_stopped + return replicas_changed def _check_curr_status(self) -> Tuple[bool, bool]: """Check the current deployment status. @@ -1857,7 +1891,7 @@ def update(self) -> Tuple[bool, bool]: new_config.version = self._target_state.version.code_version self._set_target_state(new_config) max_to_stop = self._calculate_max_replicas_to_stop() - self._stop_wrong_version_replicas(max_to_stop) + self._stop_or_update_outdated_version_replicas(max_to_stop) self._deploy_driver() self._check_and_update_replicas() return self._check_curr_status() diff --git a/python/ray/serve/_private/replica.py b/python/ray/serve/_private/replica.py index 206c873f39e9..f304efe860fc 100644 --- a/python/ray/serve/_private/replica.py +++ b/python/ray/serve/_private/replica.py @@ -161,8 +161,7 @@ async def initialize_replica(): _callable, deployment_name, replica_tag, - deployment_config, - deployment_config.user_config, + deployment_config.autoscaling_config, version, is_function, controller_handle, @@ -227,13 +226,15 @@ async def is_allocated(self) -> str: ) async def is_initialized( - self, user_config: Optional[Any] = None, _after: Optional[Any] = None + self, + deployment_config: DeploymentConfig = None, + _after: Optional[Any] = None, ): # Unused `_after` argument is for scheduling: passing an ObjectRef # allows delaying reconfiguration until after this call has returned. await self._initialize_replica() - metadata = await self.reconfigure(user_config) + metadata = await self.reconfigure(deployment_config) # A new replica should not be considered healthy until it passes an # initial health check. If an initial health check fails, consider @@ -242,11 +243,9 @@ async def is_initialized( return metadata async def reconfigure( - self, user_config: Optional[Any] = None + self, deployment_config: DeploymentConfig ) -> Tuple[DeploymentConfig, DeploymentVersion]: - if user_config is not None: - await self.replica.reconfigure(user_config) - + await self.replica.reconfigure(deployment_config) return await self.get_metadata() async def get_metadata( @@ -254,7 +253,7 @@ async def get_metadata( ) -> Tuple[DeploymentConfig, DeploymentVersion]: # Wait for replica initialization to finish await self._init_finish_event.wait() - return self.replica.deployment_config, self.replica.version + return self.replica.version.deployment_config, self.replica.version async def prepare_for_shutdown(self): if self.replica is not None: @@ -281,19 +280,17 @@ def __init__( _callable: Callable, deployment_name: str, replica_tag: ReplicaTag, - deployment_config: DeploymentConfig, - user_config: Any, + autoscaling_config: Any, version: DeploymentVersion, is_function: bool, controller_handle: ActorHandle, ) -> None: - self.deployment_config = deployment_config self.deployment_name = deployment_name self.replica_tag = replica_tag self.callable = _callable self.is_function = is_function - self.user_config = user_config self.version = version + self.deployment_config = None self.rwlock = aiorwlock.RWLock() user_health_check = getattr(_callable, HEALTH_CHECK_METHOD, None) @@ -360,11 +357,9 @@ def user_health_check(): self.restart_counter.inc() - self._shutdown_wait_loop_s = deployment_config.graceful_shutdown_wait_loop_s - - if deployment_config.autoscaling_config: + if autoscaling_config: process_remote_func = controller_handle.record_autoscaling_metrics.remote - config = deployment_config.autoscaling_config + config = autoscaling_config start_metrics_pusher( interval_s=config.metrics_interval_s, collection_callback=self._collect_autoscaling_metrics, @@ -484,26 +479,36 @@ async def invoke_single(self, request_item: Query) -> Tuple[Any, bool]: return result, success - async def reconfigure(self, user_config: Any): + async def reconfigure(self, deployment_config: DeploymentConfig): async with self.rwlock.writer_lock: - self.user_config = user_config - self.version = DeploymentVersion( - self.version.code_version, user_config=user_config + user_config_changed = False + if ( + self.deployment_config is None + or self.deployment_config.user_config != deployment_config.user_config + ): + user_config_changed = True + self.deployment_config = deployment_config + self.version = DeploymentVersion.from_deployment_version( + self.version, self.deployment_config ) - if self.is_function: - raise ValueError("deployment_def must be a class to use user_config") - elif not hasattr(self.callable, RECONFIGURE_METHOD): - raise RayServeException( - "user_config specified but deployment " - + self.deployment_name - + " missing " - + RECONFIGURE_METHOD - + " method" + + if self.deployment_config.user_config is not None and user_config_changed: + if self.is_function: + raise ValueError( + "deployment_def must be a class to use user_config" + ) + elif not hasattr(self.callable, RECONFIGURE_METHOD): + raise RayServeException( + "user_config specified but deployment " + + self.deployment_name + + " missing " + + RECONFIGURE_METHOD + + " method" + ) + reconfigure_method = sync_to_async( + getattr(self.callable, RECONFIGURE_METHOD) ) - reconfigure_method = sync_to_async( - getattr(self.callable, RECONFIGURE_METHOD) - ) - await reconfigure_method(user_config) + await reconfigure_method(self.deployment_config.user_config) async def handle_request(self, request: Query) -> asyncio.Future: async with self.rwlock.reader_lock: @@ -546,7 +551,7 @@ async def prepare_for_shutdown(self): while True: # Sleep first because we want to make sure all the routers receive # the notification to remove this replica first. - await asyncio.sleep(self._shutdown_wait_loop_s) + await asyncio.sleep(self.deployment_config.graceful_shutdown_wait_loop_s) method_stat = self._get_handle_request_stats() # The handle_request method wasn't even invoked. if method_stat is None: @@ -557,8 +562,9 @@ async def prepare_for_shutdown(self): else: logger.info( "Waiting for an additional " - f"{self._shutdown_wait_loop_s}s to shut down because " - f"there are {self.num_ongoing_requests} ongoing requests." + f"{self.deployment_config.graceful_shutdown_wait_loop_s}s to shut " + f"down because there are {self.num_ongoing_requests} ongoing " + "requests." ) # Explicitly call the del method to trigger clean up. diff --git a/python/ray/serve/_private/utils.py b/python/ray/serve/_private/utils.py index 038d3f3efd30..d15d9f7f4be2 100644 --- a/python/ray/serve/_private/utils.py +++ b/python/ray/serve/_private/utils.py @@ -44,6 +44,19 @@ class DEFAULT(Enum): VALUE = 1 +class DeploymentOptionUpdateType(str, Enum): + # Nothing needs to be done other than setting the target state. + LightWeight = "LightWeight" + # Each DeploymentReplica instance (tracked in DeploymentState) uses certain options + # from the deployment config. These values need to be updated in DeploymentReplica. + NeedsReconfigure = "NeedsReconfigure" + # Options that are sent to the replica actor. If changed, reconfigure() on the actor + # needs to be called to update these values. + NeedsActorReconfigure = "NeedsActorReconfigure" + # If changed, restart all replicas. + HeavyWeight = "HeavyWeight" + + # Type alias: objects that can be DEFAULT.VALUE have type Default[T] T = TypeVar("T") Default = Union[DEFAULT, T] diff --git a/python/ray/serve/_private/version.py b/python/ray/serve/_private/version.py index 1e43baeed6f9..c0e252ebb6dc 100644 --- a/python/ray/serve/_private/version.py +++ b/python/ray/serve/_private/version.py @@ -1,14 +1,25 @@ from abc import ABC +from copy import deepcopy import json -from typing import Any, Optional +from typing import Any, Optional, Dict, List from zlib import crc32 -from ray.serve._private.utils import get_random_letters +from ray.serve._private.utils import get_random_letters, DeploymentOptionUpdateType from ray.serve.generated.serve_pb2 import DeploymentVersion as DeploymentVersionProto +from ray.serve.config import DeploymentConfig + +import logging + +logger = logging.getLogger("ray.serve") class DeploymentVersion: - def __init__(self, code_version: Optional[str], user_config: Optional[Any] = None): + def __init__( + self, + code_version: Optional[str], + deployment_config: DeploymentConfig, + ray_actor_options: Optional[Dict], + ): if code_version is not None and not isinstance(code_version, str): raise TypeError(f"code_version must be str, got {type(code_version)}.") if code_version is None: @@ -18,14 +29,18 @@ def __init__(self, code_version: Optional[str], user_config: Optional[Any] = Non self.unversioned = False self.code_version = code_version - self.user_config = user_config - # TODO(simon): make this xlang compatible - if isinstance(user_config, bytes): - serialized_user_config = user_config - else: - serialized_user_config = str.encode(json.dumps(user_config, sort_keys=True)) - self.user_config_hash = crc32(serialized_user_config) - self._hash = crc32(serialized_user_config + self.code_version.encode("utf-8")) + # Options for this field may be mutated over time, so any logic that uses this + # should access this field directly + self.deployment_config: DeploymentConfig = deployment_config + self.ray_actor_options: Dict = ray_actor_options + self.compute_hashes() + + @classmethod + def from_deployment_version(cls, deployment_version, deployment_config): + version_copy = deepcopy(deployment_version) + version_copy.deployment_config = deployment_config + version_copy.compute_hashes() + return version_copy def __hash__(self) -> int: return self._hash @@ -35,9 +50,99 @@ def __eq__(self, other: Any) -> bool: return False return self._hash == other._hash + def requires_actor_restart(self, new_version): + """Determines whether the new version requires actors of the current version to + be restarted. + """ + return ( + self.code_version != new_version.code_version + or self.ray_actor_options_hash != new_version.ray_actor_options_hash + ) + + def requires_actor_reconfigure(self, new_version): + """Determines whether the new version requires calling reconfigure() on the + replica actor. + """ + return self.reconfigure_actor_hash != new_version.reconfigure_actor_hash + + def requires_long_poll_broadcast(self, new_version): + """Determines whether lightweightly updating an existing replica to the new + version requires broadcasting through long poll that the running replicas has + changed. + """ + return ( + self.deployment_config.max_concurrent_queries + != new_version.deployment_config.max_concurrent_queries + ) + + def compute_hashes(self): + # If this changes, the controller will directly restart all existing replicas. + serialized_ray_actor_options = _serialize(self.ray_actor_options or {}) + self.ray_actor_options_hash = crc32(serialized_ray_actor_options) + + # If this changes, DeploymentReplica.reconfigure() will call reconfigure on the + # actual replica actor + self.reconfigure_actor_hash = crc32( + self._get_serialized_options( + [DeploymentOptionUpdateType.NeedsActorReconfigure] + ) + ) + + # Used by __eq__ in deployment state to either reconfigure the replicas or + # stop and restart them + self._hash = crc32( + self.code_version.encode("utf-8") + + serialized_ray_actor_options + + self._get_serialized_options( + [ + DeploymentOptionUpdateType.NeedsReconfigure, + DeploymentOptionUpdateType.NeedsActorReconfigure, + ] + ) + ) + def to_proto(self) -> bytes: # TODO(simon): enable cross language user config - return DeploymentVersionProto(code_version=self.code_version, user_config=b"") + return DeploymentVersionProto( + code_version=self.code_version, + deployment_config=self.deployment_config.to_proto(), + ray_actor_options=json.dumps(self.ray_actor_options), + ) + + @classmethod + def from_proto(cls, proto: DeploymentVersionProto): + return DeploymentVersion( + proto.code_version, + DeploymentConfig.from_proto(proto.deployment_config), + json.loads(proto.ray_actor_options), + ) + + def _get_serialized_options( + self, update_types: List[DeploymentOptionUpdateType] + ) -> bytes: + """Returns a serialized dictionary containing fields of a deployment config that + should prompt a deployment version update. + """ + reconfigure_dict = {} + for option_name, field in self.deployment_config.__fields__.items(): + option_weight = field.field_info.extra.get("update_type") + if option_weight in update_types: + reconfigure_dict[option_name] = getattr( + self.deployment_config, option_name + ) + + if ( + isinstance(self.deployment_config.user_config, bytes) + and "user_config" in reconfigure_dict + ): + del reconfigure_dict["user_config"] + return self.deployment_config.user_config + _serialize(reconfigure_dict) + + return _serialize(reconfigure_dict) + + +def _serialize(json_object): + return str.encode(json.dumps(json_object, sort_keys=True)) class VersionedReplica(ABC): diff --git a/python/ray/serve/config.py b/python/ray/serve/config.py index 6c5b4c891747..b0a6299a094c 100644 --- a/python/ray/serve/config.py +++ b/python/ray/serve/config.py @@ -12,6 +12,7 @@ NonNegativeInt, PositiveInt, validator, + Field, ) from ray import cloudpickle @@ -23,7 +24,7 @@ DEFAULT_HTTP_HOST, DEFAULT_HTTP_PORT, ) -from ray.serve._private.utils import DEFAULT +from ray.serve._private.utils import DEFAULT, DeploymentOptionUpdateType from ray.serve.generated.serve_pb2 import ( DeploymentConfig as DeploymentConfigProto, DeploymentLanguage, @@ -141,21 +142,37 @@ class DeploymentConfig(BaseModel): The names of options manually configured by the user. """ - num_replicas: NonNegativeInt = 1 - max_concurrent_queries: Optional[int] = None - user_config: Any = None + num_replicas: NonNegativeInt = Field( + default=1, update_type=DeploymentOptionUpdateType.LightWeight + ) + max_concurrent_queries: Optional[int] = Field( + default=None, update_type=DeploymentOptionUpdateType.NeedsReconfigure + ) + user_config: Any = Field( + default=None, update_type=DeploymentOptionUpdateType.NeedsActorReconfigure + ) - graceful_shutdown_timeout_s: NonNegativeFloat = ( - DEFAULT_GRACEFUL_SHUTDOWN_TIMEOUT_S # noqa: E501 + graceful_shutdown_timeout_s: NonNegativeFloat = Field( + default=DEFAULT_GRACEFUL_SHUTDOWN_TIMEOUT_S, + update_type=DeploymentOptionUpdateType.NeedsReconfigure, ) - graceful_shutdown_wait_loop_s: NonNegativeFloat = ( - DEFAULT_GRACEFUL_SHUTDOWN_WAIT_LOOP_S # noqa: E501 + graceful_shutdown_wait_loop_s: NonNegativeFloat = Field( + default=DEFAULT_GRACEFUL_SHUTDOWN_WAIT_LOOP_S, + update_type=DeploymentOptionUpdateType.NeedsActorReconfigure, ) - health_check_period_s: PositiveFloat = DEFAULT_HEALTH_CHECK_PERIOD_S - health_check_timeout_s: PositiveFloat = DEFAULT_HEALTH_CHECK_TIMEOUT_S + health_check_period_s: PositiveFloat = Field( + default=DEFAULT_HEALTH_CHECK_PERIOD_S, + update_type=DeploymentOptionUpdateType.NeedsReconfigure, + ) + health_check_timeout_s: PositiveFloat = Field( + default=DEFAULT_HEALTH_CHECK_TIMEOUT_S, + update_type=DeploymentOptionUpdateType.NeedsReconfigure, + ) - autoscaling_config: Optional[AutoscalingConfig] = None + autoscaling_config: Optional[AutoscalingConfig] = Field( + default=None, update_type=DeploymentOptionUpdateType.LightWeight + ) # This flag is used to let replica know they are deplyed from # a different language. @@ -165,7 +182,10 @@ class DeploymentConfig(BaseModel): # the deploymnent use. deployment_language: Any = DeploymentLanguage.PYTHON - version: Optional[str] = None + version: Optional[str] = Field( + default=None, + update_type=DeploymentOptionUpdateType.HeavyWeight, + ) # Contains the names of deployment options manually set by the user user_configured_option_names: Set[str] = set() diff --git a/python/ray/serve/controller.py b/python/ray/serve/controller.py index 19914e59a951..0e5cba0794a8 100644 --- a/python/ray/serve/controller.py +++ b/python/ray/serve/controller.py @@ -37,7 +37,10 @@ DEPLOYMENT_NAME_PREFIX_SEPARATOR, MULTI_APP_MIGRATION_MESSAGE, ) -from ray.serve._private.deploy_utils import deploy_args_to_deployment_info +from ray.serve._private.deploy_utils import ( + deploy_args_to_deployment_info, + get_app_code_version, +) from ray.serve._private.deployment_state import DeploymentStateManager, ReplicaState from ray.serve._private.endpoint_state import EndpointState from ray.serve._private.http_state import HTTPState @@ -55,10 +58,8 @@ from ray.serve._private.utils import ( DEFAULT, override_runtime_envs_except_env_vars, - get_random_letters, ) from ray.serve._private.application_state import ApplicationStateManager -from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag logger = logging.getLogger(SERVE_LOGGER_NAME) @@ -347,10 +348,7 @@ def _recover_config_from_checkpoint(self): deployment_time, deploy_mode, config_checkpoints_dict = pickle.loads( checkpoint ) - applications = [ - app_config_dict - for app_config_dict, _ in config_checkpoints_dict.values() - ] + applications = list(config_checkpoints_dict.values()) if deploy_mode == ServeDeployMode.SINGLE_APP: self.deploy_apps( ServeApplicationSchema.parse_obj(applications[0]), @@ -539,38 +537,13 @@ def deploy_apps( if not deployment_time: deployment_time = time.time() - # Load checkpointed data from last time deploy_apps was called - config_checkpoint = self.kv_store.get(CONFIG_CHECKPOINT_KEY) - if config_checkpoint is None: - config_checkpoints_dict = {} - else: - _, _, config_checkpoints_dict = pickle.loads(config_checkpoint) - new_config_checkpoint = {} for app_config in applications: - app_config_dict = app_config.dict(exclude_unset=True) - - # Compare new config options with old ones, set versions of new deployments - if app_config.name in config_checkpoints_dict: - (prev_app_config, prev_versions) = config_checkpoints_dict[ - app_config.name - ] - - updated_versions = _generate_deployment_config_versions( - app_config_dict, - prev_app_config, - prev_versions, - ) - else: - updated_versions = _generate_deployment_config_versions(app_config_dict) - - deployment_override_options = app_config_dict.get("deployments", []) + code_version = get_app_code_version(app_config) - new_config_checkpoint[app_config.name] = ( - app_config_dict, - updated_versions, - ) + app_config_dict = app_config.dict(exclude_unset=True) + new_config_checkpoint[app_config.name] = app_config_dict logger.info( "Starting deploy_serve_application " @@ -581,8 +554,8 @@ def deploy_apps( ).remote( app_config.import_path, app_config.runtime_env, - deployment_override_options, - updated_versions, + app_config_dict.get("deployments", []), + code_version, app_config_dict.get("route_prefix", DEFAULT.VALUE), app_config.name, app_config.args, @@ -774,7 +747,7 @@ def get_app_config(self, name: str = SERVE_DEFAULT_APP_NAME) -> Optional[Dict]: if checkpoint is not None: _, _, config_checkpoints_dict = pickle.loads(checkpoint) if name in config_checkpoints_dict: - config, _ = config_checkpoints_dict[name] + config = config_checkpoints_dict[name] return ServeApplicationSchema.parse_obj(config).dict(exclude_unset=True) def get_all_deployment_statuses(self) -> List[bytes]: @@ -809,104 +782,12 @@ def delete_apps(self, names: Iterable[str]): self.delete_deployments(deployments_to_delete) -def _generate_deployment_config_versions( - new_config: Dict, - last_deployed_config: Dict = None, - last_deployed_versions: Dict = None, -) -> Dict[str, str]: - """ - This function determines whether each deployment's version should be changed based - on the newly deployed config. - - When ``import_path`` or ``runtime_env`` is changed, the versions for all deployments - should be changed, so old replicas are torn down. When the options for a deployment - in ``deployments`` change, its version should generally change. The only deployment - options that can be changed without tearing down replicas (i.e. changing the - version) are: - * num_replicas - * user_config - * autoscaling_config - - A deployment option is considered changed when: - * it was not specified in last_deployed_config and is specified in new_config - * it was specified in last_deployed_config and is not specified in new_config - * it is specified in both last_deployed_config and new_config but the specified - value has changed - - Args: - new_config: Newly deployed config dict that follows ServeApplicationSchema - last_deployed_config: Last deployed config dict that follows - ServeApplicationSchema, which is an empty dictionary if there is no previous - deployment - last_deployed_versions: Dictionary of {deployment_name: str -> version: str} - tracking the versions of deployments listed in the last deployed config - - Returns: - Dictionary of {deployment_name: str -> version: str} containing updated - versions for deployments listed in the new config - """ - # If import_path or runtime_env is changed, it is considered a code change - if last_deployed_config is None: - last_deployed_config = {} - if last_deployed_versions is None: - last_deployed_versions = {} - - if last_deployed_config.get("import_path") != new_config.get( - "import_path" - ) or last_deployed_config.get("runtime_env") != new_config.get("runtime_env"): - last_deployed_config, last_deployed_versions = {}, {} - - new_deployments = {d["name"]: d for d in new_config.get("deployments", [])} - old_deployments = { - d["name"]: d for d in last_deployed_config.get("deployments", []) - } - - lightweight_update_options = { - "num_replicas": TagKey.SERVE_NUM_REPLICAS_LIGHTWEIGHT_UPDATED, - "user_config": TagKey.SERVE_USER_CONFIG_LIGHTWEIGHT_UPDATED, - "autoscaling_config": TagKey.SERVE_AUTOSCALING_CONFIG_LIGHTWEIGHT_UPDATED, - } - - def exclude_lightweight_update_options(dict): - # Exclude config options from dict that qualify for a lightweight config - # update. Changes in any other config options are considered a code change, - # and require a version change to trigger an update that tears - # down existing replicas and replaces them with updated ones. - return { - option: dict[option] - for option in dict - if option not in lightweight_update_options - } - - updated_versions = {} - for name in new_deployments: - old_deployment = old_deployments.get(name, {}) - new_deployment = new_deployments[name] - new_deployment_filtered = exclude_lightweight_update_options(new_deployment) - old_deployment_filtered = exclude_lightweight_update_options(old_deployment) - - # If config options haven't changed, version stays the same - # otherwise, generate a new random version - if old_deployment_filtered == new_deployment_filtered: - updated_versions[name] = last_deployed_versions[name] - - # If the rest of the options haven't changed, but a lightweight option has - # changed, then Serve will execute a lightweight update - for option, tagkey in lightweight_update_options.items(): - if old_deployment.get(option) != new_deployment.get(option): - record_extra_usage_tag(tagkey, "True") - else: - updated_versions[name] = get_random_letters() - - return updated_versions - - @ray.remote(num_cpus=0, max_calls=1) def deploy_serve_application( import_path: str, runtime_env: Dict, deployment_override_options: List[Dict], - deployment_versions: Dict, + code_version: str, route_prefix: str, name: str, args: Dict, @@ -934,7 +815,7 @@ def deploy_serve_application( app = call_app_builder_with_args_if_necessary(import_attr(import_path), args) app = build(app, name) - # Override options for each deployment. + # Override options for each deployment listed in the config. for options in deployment_override_options: deployment_name = options["name"] unique_deployment_name = ( @@ -963,13 +844,18 @@ def deploy_serve_application( ) ray_actor_options.update({"runtime_env": merged_env}) options["ray_actor_options"] = ray_actor_options - options["version"] = deployment_versions[deployment_name] options["name"] = unique_deployment_name # Update the deployment's options app.deployments[unique_deployment_name].set_options( **options, _internal=True ) + # Set code version for each deployment + for deployment_name in app.deployments: + app.deployments[deployment_name].set_options( + version=code_version, _internal=True + ) + # Run the application locally on the cluster. serve.run(app, name=name, route_prefix=route_prefix) except KeyboardInterrupt: diff --git a/python/ray/serve/tests/test_config_files/pid.py b/python/ray/serve/tests/test_config_files/pid.py index 9b30c9de510d..3b8cf91ee5ef 100644 --- a/python/ray/serve/tests/test_config_files/pid.py +++ b/python/ray/serve/tests/test_config_files/pid.py @@ -1,28 +1,71 @@ from ray import serve -from ray.serve.deployment_graph import RayServeDAGHandle + +# from ray.serve.deployment_graph import RayServeDAGHandle import os +import time +import asyncio @serve.deployment class f: - def __init__(self, name: str = "default_name"): - self.name = name + def __init__(self, async_wait: bool = False): + self._async = async_wait + self.name = "default_name" + # for __call__() + self.ready = True + self.counter = 0 + # for check_health() + self.health_check_ready = True + self.health_check_counter = 0 + + async def get_counter(self, health_check=False) -> int: + if health_check: + return self.health_check_counter + else: + return self.counter + + def send(self, clear=False, health_check=False): + if health_check: + self.health_check_ready = not clear + else: + self.ready = not clear + + def wait(self, health_check=False): + if health_check: + while not self.health_check_ready: + time.sleep(0.1) + else: + while not self.ready: + time.sleep(0.1) + + async def async_wait(self, health_check=False): + if health_check: + while not self.health_check_ready: + await asyncio.sleep(0.1) + else: + while not self.ready: + await asyncio.sleep(0.1) def reconfigure(self, config: dict): self.name = config.get("name", "default_name") async def __call__(self): - return os.getpid() - + self.counter += 1 + if self._async: + await self.async_wait() + else: + self.wait() -@serve.deployment -class BasicDriver: - def __init__(self, dag: RayServeDAGHandle): - self.dag = dag + return os.getpid(), self.name - async def __call__(self): - return await self.dag.remote() + async def check_health(self): + self.health_check_counter += 1 + if self._async: + await self.async_wait(health_check=True) + else: + self.wait(health_check=True) node = f.bind() -bnode = BasicDriver.bind(node) +dup_node = f.bind() +async_node = f.bind(async_wait=True) diff --git a/python/ray/serve/tests/test_controller.py b/python/ray/serve/tests/test_controller.py index 3f3a18077e9d..f5a3250e8c6b 100644 --- a/python/ray/serve/tests/test_controller.py +++ b/python/ray/serve/tests/test_controller.py @@ -1,13 +1,11 @@ import pytest import time -import copy import ray from ray import serve from ray.serve._private.common import DeploymentInfo from ray.serve.generated.serve_pb2 import DeploymentRoute -from ray.serve.controller import _generate_deployment_config_versions from ray.serve._private.constants import ( SERVE_DEFAULT_APP_NAME, DEPLOYMENT_NAME_PREFIX_SEPARATOR, @@ -51,99 +49,6 @@ def test(_): assert start_time_ms_1 == start_time_ms_2 -@pytest.mark.parametrize("last_config_had_option", [True, False]) -@pytest.mark.parametrize( - "option_to_update,config_update", - [ - ("num_replicas", True), - ("autoscaling_config", True), - ("user_config", True), - ("ray_actor_options", False), - ], -) -def test_config_versions_deployments_update( - last_config_had_option: bool, option_to_update: str, config_update: bool -): - """ - Check that controller._generate_deployment_config_versions() has correct behavior - when the config options in the ``deployments`` field is updated. - """ - - options = { - "num_replicas": {"old": 1, "new": 2}, - "autoscaling_config": { - "old": None, - "new": {"max_replicas": 2}, - }, - "user_config": { - "old": None, - "new": {"name": "bob"}, - }, - "ray_actor_options": { - "old": {"num_cpus": 0.1}, - "new": {"num_cpus": 0.2}, - }, - } - - old_config = { - "import_path": "ray.serve.tests.test_config_files.pid.node", - "deployments": [{"name": "f"}], - } - - if last_config_had_option: - old_config["deployments"][0][option_to_update] = options[option_to_update][ - "old" - ] - - new_config = copy.deepcopy(old_config) - new_config["deployments"][0][option_to_update] = options[option_to_update]["new"] - - versions = {"f": "v1"} - new_versions = _generate_deployment_config_versions( - new_config, old_config, versions - ) - assert ( - new_versions.get("f") is not None - and (new_versions.get("f") == versions.get("f")) == config_update - ) - - -@pytest.mark.parametrize("field_to_update", ["import_path", "runtime_env", "both"]) -def test_config_versions_non_deployments_update(field_to_update: str): - """ - Check that controller._generate_deployment_config_versions() has correct behavior - when the the ``import_path`` and ``runtime_env`` fields are updated. - """ - - old_config = { - "import_path": "ray.serve.tests.test_config_files.pid.node", - "deployments": [ - { - "name": "f", - "num_replicas": 1, - "ray_actor_options": {"num_cpus": 0.1}, - } - ], - } - - new_config = copy.deepcopy(old_config) - if field_to_update == "import_path": - new_config["import_path"] = "ray.serve.tests.test_config_files.pid.bnode" - elif field_to_update == "runtime_env": - new_config["runtime_env"] = {"env_vars": {"test_var": "test_val"}} - elif field_to_update == "both": - new_config["import_path"] = "ray.serve.tests.test_config_files.pid.bnode" - new_config["runtime_env"] = {"env_vars": {"test_var": "test_val"}} - - versions = {"f": "v1"} - new_versions = _generate_deployment_config_versions( - new_config, old_config, versions - ) - assert new_versions.get("f") is not None and ( - new_versions.get("f") != versions.get("f") - ) - - if __name__ == "__main__": import sys diff --git a/python/ray/serve/tests/test_deployment_state.py b/python/ray/serve/tests/test_deployment_state.py index d7ea620c4f6f..646a676fc3ba 100644 --- a/python/ray/serve/tests/test_deployment_state.py +++ b/python/ray/serve/tests/test_deployment_state.py @@ -27,6 +27,12 @@ VersionedReplica, rank_replicas_for_stopping, ) +from ray.serve._private.constants import ( + DEFAULT_GRACEFUL_SHUTDOWN_TIMEOUT_S, + DEFAULT_GRACEFUL_SHUTDOWN_WAIT_LOOP_S, + DEFAULT_HEALTH_CHECK_PERIOD_S, + DEFAULT_HEALTH_CHECK_TIMEOUT_S, +) from ray.serve._private.storage.kv_store import RayInternalKVStore from ray.serve._private.utils import get_random_letters from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy @@ -122,11 +128,11 @@ def start(self, deployment_info: DeploymentInfo, version: DeploymentVersion): self.version = version self.deployment_info = deployment_info - def update_user_config(self, user_config: Any): + def reconfigure(self, version: DeploymentVersion): self.started = True - self.version = DeploymentVersion( - self.version.code_version, user_config=user_config - ) + updating = self.version.requires_actor_reconfigure(version) + self.version = version + return updating def recover(self): self.recovering = True @@ -140,7 +146,7 @@ def check_ready(self) -> ReplicaStartupStatus: self.recovering = False self.started = True self.version = self.starting_version - return ready, self.version + return ready def resource_requirements(self) -> Tuple[str, str]: assert self.started @@ -194,11 +200,17 @@ def deployment_info( else: code_version = get_random_letters() - version = DeploymentVersion(code_version, info.deployment_config.user_config) + version = DeploymentVersion( + code_version, info.deployment_config, info.replica_config.ray_actor_options + ) return info, version +def deployment_version(code_version) -> DeploymentVersion: + return DeploymentVersion(code_version, DeploymentConfig(), {}) + + class MockTimer: def __init__(self, start_time=None): if start_time is None: @@ -247,7 +259,7 @@ def mock_save_checkpoint_fn(*args, **kwargs): def replica(version: Optional[DeploymentVersion] = None) -> VersionedReplica: if version is None: - version = DeploymentVersion(get_random_letters(), None) + version = DeploymentVersion(get_random_letters(), DeploymentConfig(), {}) class MockVersionedReplica(VersionedReplica): def __init__(self, version: DeploymentVersion): @@ -264,9 +276,9 @@ class TestReplicaStateContainer: def test_count(self): c = ReplicaStateContainer() r1, r2, r3 = ( - replica(DeploymentVersion("1")), - replica(DeploymentVersion("2")), - replica(DeploymentVersion("2")), + replica(deployment_version("1")), + replica(deployment_version("2")), + replica(deployment_version("2")), ) c.add(ReplicaState.STARTING, r1) c.add(ReplicaState.STARTING, r2) @@ -281,42 +293,44 @@ def test_count(self): assert c.count(states=[ReplicaState.STOPPING]) == 1 # Test filtering by version. - assert c.count(version=DeploymentVersion("1")) == 1 - assert c.count(version=DeploymentVersion("2")) == 2 - assert c.count(version=DeploymentVersion("3")) == 0 - assert c.count(exclude_version=DeploymentVersion("1")) == 2 - assert c.count(exclude_version=DeploymentVersion("2")) == 1 - assert c.count(exclude_version=DeploymentVersion("3")) == 3 + assert c.count(version=deployment_version("1")) == 1 + assert c.count(version=deployment_version("2")) == 2 + assert c.count(version=deployment_version("3")) == 0 + assert c.count(exclude_version=deployment_version("1")) == 2 + assert c.count(exclude_version=deployment_version("2")) == 1 + assert c.count(exclude_version=deployment_version("3")) == 3 # Test filtering by state and version. assert ( - c.count(version=DeploymentVersion("1"), states=[ReplicaState.STARTING]) == 1 + c.count(version=deployment_version("1"), states=[ReplicaState.STARTING]) + == 1 ) assert ( - c.count(version=DeploymentVersion("3"), states=[ReplicaState.STARTING]) == 0 + c.count(version=deployment_version("3"), states=[ReplicaState.STARTING]) + == 0 ) assert ( c.count( - version=DeploymentVersion("2"), + version=deployment_version("2"), states=[ReplicaState.STARTING, ReplicaState.STOPPING], ) == 2 ) assert ( c.count( - exclude_version=DeploymentVersion("1"), states=[ReplicaState.STARTING] + exclude_version=deployment_version("1"), states=[ReplicaState.STARTING] ) == 1 ) assert ( c.count( - exclude_version=DeploymentVersion("3"), states=[ReplicaState.STARTING] + exclude_version=deployment_version("3"), states=[ReplicaState.STARTING] ) == 2 ) assert ( c.count( - exclude_version=DeploymentVersion("2"), + exclude_version=deployment_version("2"), states=[ReplicaState.STARTING, ReplicaState.STOPPING], ) == 1 @@ -347,18 +361,18 @@ def test_pop_basic(self): def test_pop_exclude_version(self): c = ReplicaStateContainer() r1, r2, r3 = ( - replica(DeploymentVersion("1")), - replica(DeploymentVersion("1")), - replica(DeploymentVersion("2")), + replica(deployment_version("1")), + replica(deployment_version("1")), + replica(deployment_version("2")), ) c.add(ReplicaState.STARTING, r1) c.add(ReplicaState.STARTING, r2) c.add(ReplicaState.STARTING, r3) - assert c.pop(exclude_version=DeploymentVersion("1")) == [r3] - assert not c.pop(exclude_version=DeploymentVersion("1")) - assert c.pop(exclude_version=DeploymentVersion("2")) == [r1, r2] - assert not c.pop(exclude_version=DeploymentVersion("2")) + assert c.pop(exclude_version=deployment_version("1")) == [r3] + assert not c.pop(exclude_version=deployment_version("1")) + assert c.pop(exclude_version=deployment_version("2")) == [r1, r2] + assert not c.pop(exclude_version=deployment_version("2")) assert not c.pop() def test_pop_max_replicas(self): @@ -409,10 +423,10 @@ def test_pop_states(self): def test_pop_integration(self): c = ReplicaStateContainer() r1, r2, r3, r4 = ( - replica(DeploymentVersion("1")), - replica(DeploymentVersion("2")), - replica(DeploymentVersion("2")), - replica(DeploymentVersion("3")), + replica(deployment_version("1")), + replica(deployment_version("2")), + replica(deployment_version("2")), + replica(deployment_version("3")), ) c.add(ReplicaState.STOPPING, r1) @@ -420,35 +434,35 @@ def test_pop_integration(self): c.add(ReplicaState.RUNNING, r3) c.add(ReplicaState.RUNNING, r4) assert not c.pop( - exclude_version=DeploymentVersion("1"), states=[ReplicaState.STOPPING] + exclude_version=deployment_version("1"), states=[ReplicaState.STOPPING] ) assert c.pop( - exclude_version=DeploymentVersion("1"), + exclude_version=deployment_version("1"), states=[ReplicaState.RUNNING], max_replicas=1, ) == [r3] assert c.pop( - exclude_version=DeploymentVersion("1"), + exclude_version=deployment_version("1"), states=[ReplicaState.RUNNING], max_replicas=1, ) == [r4] c.add(ReplicaState.RUNNING, r3) c.add(ReplicaState.RUNNING, r4) assert c.pop( - exclude_version=DeploymentVersion("1"), states=[ReplicaState.RUNNING] + exclude_version=deployment_version("1"), states=[ReplicaState.RUNNING] ) == [r3, r4] assert c.pop( - exclude_version=DeploymentVersion("1"), states=[ReplicaState.STARTING] + exclude_version=deployment_version("1"), states=[ReplicaState.STARTING] ) == [r2] c.add(ReplicaState.STARTING, r2) c.add(ReplicaState.RUNNING, r3) c.add(ReplicaState.RUNNING, r4) assert c.pop( - exclude_version=DeploymentVersion("1"), + exclude_version=deployment_version("1"), states=[ReplicaState.RUNNING, ReplicaState.STARTING], ) == [r3, r4, r2] assert c.pop( - exclude_version=DeploymentVersion("nonsense"), + exclude_version=deployment_version("nonsense"), states=[ReplicaState.STOPPING], ) == [r1] @@ -805,8 +819,21 @@ def test_redeploy_new_version(mock_get_all_node_ids, mock_deployment_state): @pytest.mark.parametrize("mock_deployment_state", [True, False], indirect=True) +@pytest.mark.parametrize( + "option,value", + [ + ("user_config", {"hello": "world"}), + ("max_concurrent_queries", 10), + ("graceful_shutdown_timeout_s", DEFAULT_GRACEFUL_SHUTDOWN_TIMEOUT_S + 1), + ("graceful_shutdown_wait_loop_s", DEFAULT_GRACEFUL_SHUTDOWN_WAIT_LOOP_S + 1), + ("health_check_period_s", DEFAULT_HEALTH_CHECK_PERIOD_S + 1), + ("health_check_timeout_s", DEFAULT_HEALTH_CHECK_TIMEOUT_S + 1), + ], +) @patch.object(DriverDeploymentState, "_get_all_node_ids") -def test_deploy_new_config_same_version(mock_get_all_node_ids, mock_deployment_state): +def test_deploy_new_config_same_code_version( + mock_get_all_node_ids, mock_deployment_state, option, value +): # Deploying a new config with the same version should not deploy a new # replica. deployment_state, timer = mock_deployment_state @@ -829,8 +856,8 @@ def test_deploy_new_config_same_version(mock_get_all_node_ids, mock_deployment_s ) assert deployment_state.curr_status_info.status == DeploymentStatus.HEALTHY - # Update to a new config without changing the version. - b_info_2, b_version_2 = deployment_info(version="1", user_config={"hello": "world"}) + # Update to a new config without changing the code version. + b_info_2, b_version_2 = deployment_info(version="1", **{option: value}) updated = deployment_state.deploy(b_info_2) assert updated assert deployment_state.curr_status_info.status == DeploymentStatus.UPDATING @@ -841,17 +868,17 @@ def test_deploy_new_config_same_version(mock_get_all_node_ids, mock_deployment_s by_state=[(ReplicaState.RUNNING, 1)], ) - deployment_state.update() - check_counts(deployment_state, total=1) - check_counts( - deployment_state, - version=b_version_2, - total=1, - by_state=[(ReplicaState.UPDATING, 1)], - ) - - # Mark the replica as ready. - deployment_state._replicas.get()[0]._actor.set_ready() + if option in ["user_config", "graceful_shutdown_wait_loop_s"]: + deployment_state.update() + check_counts(deployment_state, total=1) + check_counts( + deployment_state, + version=b_version_2, + total=1, + by_state=[(ReplicaState.UPDATING, 1)], + ) + # Mark the replica as ready. + deployment_state._replicas.get()[0]._actor.set_ready() deployment_state.update() check_counts(deployment_state, total=1) diff --git a/python/ray/serve/tests/test_deployment_version.py b/python/ray/serve/tests/test_deployment_version.py index 63a8fe823de4..cb63917ffd13 100644 --- a/python/ray/serve/tests/test_deployment_version.py +++ b/python/ray/serve/tests/test_deployment_version.py @@ -2,24 +2,17 @@ import ray from ray.serve._private.deployment_state import DeploymentVersion +from ray.serve.config import DeploymentConfig def test_validation(): # Code version must be a string. with pytest.raises(TypeError): - DeploymentVersion(123, None) - - # Can't pass unhashable type as user config. - with pytest.raises(TypeError): - DeploymentVersion(123, set()) - - # Can't pass nested unhashable type as user config. - with pytest.raises(TypeError): - DeploymentVersion(123, {"set": set()}) + DeploymentVersion(123, DeploymentConfig(), {}) def test_other_type_equality(): - v = DeploymentVersion("1", None) + v = DeploymentVersion("1", DeploymentConfig(), {}) assert v is not None assert v != "1" @@ -27,9 +20,9 @@ def test_other_type_equality(): def test_code_version(): - v1 = DeploymentVersion("1", None) - v2 = DeploymentVersion("1", None) - v3 = DeploymentVersion("2", None) + v1 = DeploymentVersion("1", DeploymentConfig(), {}) + v2 = DeploymentVersion("1", DeploymentConfig(), {}) + v3 = DeploymentVersion("2", DeploymentConfig(), {}) assert v1 == v2 assert hash(v1) == hash(v2) @@ -37,10 +30,10 @@ def test_code_version(): assert hash(v1) != hash(v3) -def test_user_config_basic(): - v1 = DeploymentVersion("1", "1") - v2 = DeploymentVersion("1", "1") - v3 = DeploymentVersion("1", "2") +def test_deployment_config_basic(): + v1 = DeploymentVersion("1", DeploymentConfig(user_config="1"), {}) + v2 = DeploymentVersion("1", DeploymentConfig(user_config="1"), {}) + v3 = DeploymentVersion("1", DeploymentConfig(user_config="2"), {}) assert v1 == v2 assert hash(v1) == hash(v2) @@ -49,9 +42,9 @@ def test_user_config_basic(): def test_user_config_hashable(): - v1 = DeploymentVersion("1", ("1", "2")) - v2 = DeploymentVersion("1", ("1", "2")) - v3 = DeploymentVersion("1", ("1", "3")) + v1 = DeploymentVersion("1", DeploymentConfig(user_config=("1", "2")), {}) + v2 = DeploymentVersion("1", DeploymentConfig(user_config=("1", "2")), {}) + v3 = DeploymentVersion("1", DeploymentConfig(user_config=("1", "3")), {}) assert v1 == v2 assert hash(v1) == hash(v2) @@ -60,9 +53,9 @@ def test_user_config_hashable(): def test_user_config_list(): - v1 = DeploymentVersion("1", ["1", "2"]) - v2 = DeploymentVersion("1", ["1", "2"]) - v3 = DeploymentVersion("1", ["1", "3"]) + v1 = DeploymentVersion("1", DeploymentConfig(user_config=["1", "2"]), {}) + v2 = DeploymentVersion("1", DeploymentConfig(user_config=["1", "2"]), {}) + v3 = DeploymentVersion("1", DeploymentConfig(user_config=["1", "3"]), {}) assert v1 == v2 assert hash(v1) == hash(v2) @@ -71,9 +64,9 @@ def test_user_config_list(): def test_user_config_dict_keys(): - v1 = DeploymentVersion("1", {"1": "1"}) - v2 = DeploymentVersion("1", {"1": "1"}) - v3 = DeploymentVersion("1", {"2": "1"}) + v1 = DeploymentVersion("1", DeploymentConfig(user_config={"1": "1"}), {}) + v2 = DeploymentVersion("1", DeploymentConfig(user_config={"1": "1"}), {}) + v3 = DeploymentVersion("1", DeploymentConfig(user_config={"2": "1"}), {}) assert v1 == v2 assert hash(v1) == hash(v2) @@ -82,9 +75,9 @@ def test_user_config_dict_keys(): def test_user_config_dict_vals(): - v1 = DeploymentVersion("1", {"1": "1"}) - v2 = DeploymentVersion("1", {"1": "1"}) - v3 = DeploymentVersion("1", {"1": "2"}) + v1 = DeploymentVersion("1", DeploymentConfig(user_config={"1": "1"}), {}) + v2 = DeploymentVersion("1", DeploymentConfig(user_config={"1": "1"}), {}) + v3 = DeploymentVersion("1", DeploymentConfig(user_config={"1": "2"}), {}) assert v1 == v2 assert hash(v1) == hash(v2) @@ -93,9 +86,15 @@ def test_user_config_dict_vals(): def test_user_config_nested(): - v1 = DeploymentVersion("1", [{"1": "2"}, {"1": "2"}]) - v2 = DeploymentVersion("1", [{"1": "2"}, {"1": "2"}]) - v3 = DeploymentVersion("1", [{"1": "2"}, {"1": "3"}]) + v1 = DeploymentVersion( + "1", DeploymentConfig(user_config=[{"1": "2"}, {"1": "2"}]), {} + ) + v2 = DeploymentVersion( + "1", DeploymentConfig(user_config=[{"1": "2"}, {"1": "2"}]), {} + ) + v3 = DeploymentVersion( + "1", DeploymentConfig(user_config=[{"1": "2"}, {"1": "3"}]), {} + ) assert v1 == v2 assert hash(v1) == hash(v2) @@ -104,9 +103,101 @@ def test_user_config_nested(): def test_user_config_nested_in_hashable(): - v1 = DeploymentVersion("1", ([{"1": "2"}, {"1": "2"}],)) - v2 = DeploymentVersion("1", ([{"1": "2"}, {"1": "2"}],)) - v3 = DeploymentVersion("1", ([{"1": "2"}, {"1": "3"}],)) + v1 = DeploymentVersion( + "1", DeploymentConfig(user_config=([{"1": "2"}, {"1": "2"}])), {} + ) + v2 = DeploymentVersion( + "1", DeploymentConfig(user_config=([{"1": "2"}, {"1": "2"}])), {} + ) + v3 = DeploymentVersion( + "1", DeploymentConfig(user_config=([{"1": "2"}, {"1": "3"}])), {} + ) + + assert v1 == v2 + assert hash(v1) == hash(v2) + assert v1 != v3 + assert hash(v1) != hash(v3) + + +def test_num_replicas(): + v1 = DeploymentVersion("1", DeploymentConfig(num_replicas=1), {}) + v2 = DeploymentVersion("1", DeploymentConfig(num_replicas=2), {}) + + assert v1 == v2 + assert hash(v1) == hash(v2) + + +def test_autoscaling_config(): + v1 = DeploymentVersion( + "1", DeploymentConfig(autoscaling_config={"max_replicas": 2}), {} + ) + v2 = DeploymentVersion( + "1", DeploymentConfig(autoscaling_config={"max_replicas": 5}), {} + ) + + assert v1 == v2 + assert hash(v1) == hash(v2) + + +def test_max_concurrent_queries(): + v1 = DeploymentVersion("1", DeploymentConfig(max_concurrent_queries=5), {}) + v2 = DeploymentVersion("1", DeploymentConfig(max_concurrent_queries=5), {}) + v3 = DeploymentVersion("1", DeploymentConfig(max_concurrent_queries=10), {}) + + assert v1 == v2 + assert hash(v1) == hash(v2) + assert v1 != v3 + assert hash(v1) != hash(v3) + + +def test_health_check_period_s(): + v1 = DeploymentVersion("1", DeploymentConfig(health_check_period_s=5), {}) + v2 = DeploymentVersion("1", DeploymentConfig(health_check_period_s=5), {}) + v3 = DeploymentVersion("1", DeploymentConfig(health_check_period_s=10), {}) + + assert v1 == v2 + assert hash(v1) == hash(v2) + assert v1 != v3 + assert hash(v1) != hash(v3) + + +def test_health_check_timeout_s(): + v1 = DeploymentVersion("1", DeploymentConfig(health_check_timeout_s=5), {}) + v2 = DeploymentVersion("1", DeploymentConfig(health_check_timeout_s=5), {}) + v3 = DeploymentVersion("1", DeploymentConfig(health_check_timeout_s=10), {}) + + assert v1 == v2 + assert hash(v1) == hash(v2) + assert v1 != v3 + assert hash(v1) != hash(v3) + + +def test_graceful_shutdown_timeout_s(): + v1 = DeploymentVersion("1", DeploymentConfig(graceful_shutdown_timeout_s=5), {}) + v2 = DeploymentVersion("1", DeploymentConfig(graceful_shutdown_timeout_s=5), {}) + v3 = DeploymentVersion("1", DeploymentConfig(graceful_shutdown_timeout_s=10), {}) + + assert v1 == v2 + assert hash(v1) == hash(v2) + assert v1 != v3 + assert hash(v1) != hash(v3) + + +def test_graceful_shutdown_wait_loop_s(): + v1 = DeploymentVersion("1", DeploymentConfig(graceful_shutdown_wait_loop_s=5), {}) + v2 = DeploymentVersion("1", DeploymentConfig(graceful_shutdown_wait_loop_s=5), {}) + v3 = DeploymentVersion("1", DeploymentConfig(graceful_shutdown_wait_loop_s=10), {}) + + assert v1 == v2 + assert hash(v1) == hash(v2) + assert v1 != v3 + assert hash(v1) != hash(v3) + + +def test_ray_actor_options(): + v1 = DeploymentVersion("1", DeploymentConfig(), {"num_cpus": 0.1}) + v2 = DeploymentVersion("1", DeploymentConfig(), {"num_cpus": 0.1}) + v3 = DeploymentVersion("1", DeploymentConfig(), {"num_gpus": 0.1}) assert v1 == v2 assert hash(v1) == hash(v2) @@ -117,7 +208,11 @@ def test_user_config_nested_in_hashable(): def test_hash_consistent_across_processes(serve_instance): @ray.remote def get_version(): - return DeploymentVersion("1", ([{"1": "2"}, {"1": "2"}],)) + return DeploymentVersion( + "1", + DeploymentConfig(user_config=([{"1": "2"}, {"1": "2"}],)), + {}, + ) assert len(set(ray.get([get_version.remote() for _ in range(100)]))) == 1 diff --git a/python/ray/serve/tests/test_standalone2.py b/python/ray/serve/tests/test_standalone2.py index fb6dabd5d4eb..6f627f36daac 100644 --- a/python/ray/serve/tests/test_standalone2.py +++ b/python/ray/serve/tests/test_standalone2.py @@ -5,6 +5,7 @@ from contextlib import contextmanager from typing import Dict, Set from concurrent.futures.thread import ThreadPoolExecutor +from functools import partial import pytest import requests @@ -22,7 +23,7 @@ from ray.exceptions import RayActorError from ray.serve.exceptions import RayServeException from ray.serve._private.client import ServeControllerClient -from ray.serve._private.common import ApplicationStatus, DeploymentStatus +from ray.serve._private.common import ApplicationStatus, DeploymentStatus, ReplicaState from ray.serve._private.constants import ( SERVE_NAMESPACE, SERVE_DEFAULT_APP_NAME, @@ -442,6 +443,33 @@ def client(self): timeout=15, ) + def check_deployment_running(self, client: ServeControllerClient, name: str): + serve_status = client.get_serve_status() + return ( + serve_status.get_deployment_status(name) is not None + and serve_status.app_status.status == ApplicationStatus.RUNNING + and serve_status.get_deployment_status(name).status + == DeploymentStatus.HEALTHY + ) + + def check_deployments_dead(self, deployment_names): + actor_names = [ + actor["class_name"] + for actor in list_actors( + filters=[("state", "=", "ALIVE")], + ) + ] + return all( + f"ServeReplica:{name}" not in actor_names for name in deployment_names + ) + + def get_num_replicas(self, client: ServeControllerClient, deployment_name: str): + replicas = ray.get( + client._controller._dump_replica_states_for_testing.remote(deployment_name) + ) + running_replicas = replicas.get([ReplicaState.RUNNING]) + return len(running_replicas) + def get_test_config(self) -> Dict: return {"import_path": "ray.serve.tests.test_config_files.pizza.serve_dag"} @@ -1108,81 +1136,274 @@ def test_controller_recover_and_deploy(self, client: ServeControllerClient): assert client.get_serve_status().app_status.deployment_timestamp == 0 @pytest.mark.parametrize( - "field_to_update,option_to_update,config_update", - [ - ("import_path", "", False), - ("runtime_env", "", False), - ("deployments", "num_replicas", True), - ("deployments", "autoscaling_config", True), - ("deployments", "user_config", True), - ("deployments", "ray_actor_options", False), - ], + "field_to_update", + ["import_path", "runtime_env", "ray_actor_options"], ) - def test_deploy_config_update( - self, - client: ServeControllerClient, - field_to_update: str, - option_to_update: str, - config_update: bool, + def test_deploy_config_update_heavyweight( + self, client: ServeControllerClient, field_to_update: str ): - """ - Check that replicas stay alive when lightweight config updates are made and - replicas are torn down when code updates are made. - """ - - def deployment_running(): - name = f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}f" - serve_status = client.get_serve_status() - return ( - serve_status.get_deployment_status(name) is not None - and serve_status.app_status.status == ApplicationStatus.RUNNING - and serve_status.get_deployment_status(name).status - == DeploymentStatus.HEALTHY - ) - + """Check that replicas are torn down when code updates are made.""" + name = f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}f" config_template = { "import_path": "ray.serve.tests.test_config_files.pid.node", "deployments": [ { "name": "f", "autoscaling_config": None, - "user_config": None, + "user_config": {"name": "alice"}, "ray_actor_options": {"num_cpus": 0.1}, }, ], } client.deploy_apps(ServeApplicationSchema.parse_obj(config_template)) - wait_for_condition(deployment_running, timeout=15) - pid1 = requests.get("http://localhost:8000/f").text + wait_for_condition( + partial(self.check_deployment_running, client, name), timeout=15 + ) + pid1, _ = requests.get("http://localhost:8000/f").json() if field_to_update == "import_path": config_template[ "import_path" - ] = "ray.serve.tests.test_config_files.pid.bnode" + ] = "ray.serve.tests.test_config_files.pid.dup_node" elif field_to_update == "runtime_env": config_template["runtime_env"] = {"env_vars": {"test_var": "test_val"}} - elif field_to_update == "deployments": - updated_options = { - "num_replicas": 2, - "autoscaling_config": {"max_replicas": 2}, - "user_config": {"name": "bob"}, - "ray_actor_options": {"num_cpus": 0.2}, - } - config_template["deployments"][0][option_to_update] = updated_options[ - option_to_update - ] + elif field_to_update == "ray_actor_options": + config_template["deployments"][0]["ray_actor_options"] = {"num_cpus": 0.2} client.deploy_apps(ServeApplicationSchema.parse_obj(config_template)) - wait_for_condition(deployment_running, timeout=15) + wait_for_condition( + partial(self.check_deployment_running, client, name), timeout=15 + ) # This assumes that Serve implements round-robin routing for its replicas. As # long as that doesn't change, this test shouldn't be flaky; however if that # routing ever changes, this test could become mysteriously flaky pids = [] for _ in range(4): - pids.append(requests.get("http://localhost:8000/f").text) - assert (pid1 in pids) == config_update + pids.append(requests.get("http://localhost:8000/f").json()[0]) + assert pid1 not in pids + + def test_update_config_user_config(self, client: ServeControllerClient): + """Check that replicas stay alive when user config is updated.""" + + name = f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}f" + config_template = { + "import_path": "ray.serve.tests.test_config_files.pid.node", + "deployments": [{"name": "f", "user_config": {"name": "alice"}}], + } + + # Deploy first time + client.deploy_apps(ServeApplicationSchema.parse_obj(config_template)) + wait_for_condition( + partial(self.check_deployment_running, client, name), timeout=15 + ) + + # Query + pid1, res = requests.get("http://localhost:8000/f").json() + assert res == "alice" + + # Redeploy with updated option + config_template["deployments"][0]["user_config"] = {"name": "bob"} + client.deploy_apps(ServeApplicationSchema.parse_obj(config_template)) + wait_for_condition( + partial(self.check_deployment_running, client, name), timeout=15 + ) + + # This assumes that Serve implements round-robin routing for its replicas. As + # long as that doesn't change, this test shouldn't be flaky; however if that + # routing ever changes, this test could become mysteriously flaky + # Query + pids = [] + for _ in range(4): + pid, res = requests.get("http://localhost:8000/f").json() + assert res == "bob" + pids.append(pid) + assert pid1 in pids + + def test_update_config_graceful_shutdown_timeout( + self, client: ServeControllerClient + ): + """Check that replicas stay alive when graceful_shutdown_timeout_s is updated""" + name = f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}f" + config_template = { + "import_path": "ray.serve.tests.test_config_files.pid.node", + "deployments": [{"name": "f", "graceful_shutdown_timeout_s": 1000}], + } + + # Deploy first time + client.deploy_apps(ServeApplicationSchema.parse_obj(config_template)) + wait_for_condition( + partial(self.check_deployment_running, client, name), timeout=15 + ) + handle = client.get_handle(name) + + # Start off with signal ready, and send query + ray.get(handle.send.remote()) + pid1 = ray.get(handle.remote())[0] + print("PID of replica after first deployment:", pid1) + + # Redeploy with shutdown timeout set to 5 seconds + config_template["deployments"][0]["graceful_shutdown_timeout_s"] = 5 + client.deploy_apps(ServeApplicationSchema.parse_obj(config_template)) + wait_for_condition( + partial(self.check_deployment_running, client, name), timeout=15 + ) + + pid2 = ray.get(handle.remote())[0] + assert pid1 == pid2 + print("PID of replica after redeployment:", pid2) + + # Send blocking query + handle.send.remote(clear=True) + handle.remote() + # Try to delete deployment, should be blocked until the timeout at 5 seconds + client.delete_deployments([name], blocking=False) + # Replica should be dead within 10 second timeout, which means + # graceful_shutdown_timeout_s was successfully updated lightweightly + wait_for_condition(partial(self.check_deployments_dead, ["f"])) + + def test_update_config_max_concurrent_queries(self, client: ServeControllerClient): + """Check that replicas stay alive when max_concurrent_queries is updated.""" + + url = "http://localhost:8000/f" + name = f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}f" + config_template = { + "import_path": "ray.serve.tests.test_config_files.pid.async_node", + "deployments": [{"name": "f", "max_concurrent_queries": 1000}], + } + + # Deploy first time + client.deploy_apps(ServeApplicationSchema.parse_obj(config_template)) + wait_for_condition( + partial(self.check_deployment_running, client, name), timeout=15 + ) + handle = client.get_handle(name) + # Block on calls + ray.get(handle.send.remote(clear=True)) + + with ThreadPoolExecutor() as pool: + # Send 10 queries + futs = [pool.submit(partial(requests.get, url)) for _ in range(10)] + wait_for_condition(lambda: 10 == ray.get(handle.get_counter.remote())) + + # Unblock + ray.get(handle.send.remote()) + pids = [fut.result().json()[0] for fut in futs] + pid1 = pids[0] + # Check all returned pids are the same, meaning requests were served by the + # same replica + assert all(pid == pid1 for pid in pids) + + # Redeploy with max concurrent queries set to 2 + config_template["deployments"][0]["max_concurrent_queries"] = 2 + client.deploy_apps(ServeApplicationSchema.parse_obj(config_template)) + wait_for_condition( + partial(self.check_deployment_running, client, name), timeout=15 + ) + + # Re-block + ray.get(handle.send.remote(clear=True)) + + with ThreadPoolExecutor() as pool: + # Send 3 queries + futs = [pool.submit(partial(requests.get, url)) for _ in range(3)] + # Only 2 out of the 3 queries should have been sent to the replica because + # max concurrent queries is 2 + time.sleep(10) + assert ray.get(handle.get_counter.remote()) < 103 + + # Unblock + ray.get(handle.send.remote()) + pids = [fut.result().json()[0] for fut in futs] + pid2 = pids[0] + assert all(pid == pid2 for pid in pids) + + # Check that it's the same replica, it didn't get teared down + assert pid1 == pid2 + + def test_update_config_health_check_period(self, client: ServeControllerClient): + """Check that replicas stay alive when max_concurrent_queries is updated.""" + + name = f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}f" + config_template = { + "import_path": "ray.serve.tests.test_config_files.pid.async_node", + "deployments": [{"name": "f", "health_check_period_s": 100}], + } + + # Deploy first time, wait for replica running and deployment healthy + client.deploy_apps(ServeApplicationSchema.parse_obj(config_template)) + wait_for_condition( + partial(self.check_deployment_running, client, name), timeout=15 + ) + handle = client.get_handle(name) + pid1 = ray.get(handle.remote())[0] + # Health check counter shouldn't increase beyond any initial health checks done + # upon replica actor startup + initial_counter = ray.get(handle.get_counter.remote(health_check=True)) + time.sleep(5) + assert initial_counter == ray.get(handle.get_counter.remote(health_check=True)) + + # Redeploy with health check period reduced to 1 second + config_template["deployments"][0]["health_check_period_s"] = 0.1 + client.deploy_apps(ServeApplicationSchema.parse_obj(config_template)) + wait_for_condition( + partial(self.check_deployment_running, client, name), timeout=15 + ) + # health check counter should now very quickly increase + wait_for_condition( + lambda: ray.get(handle.get_counter.remote(health_check=True)) >= 30, + retry_interval_ms=1000, + timeout=5, + ) + + # Check that it's the same replica, it didn't get teared down + pid2 = ray.get(handle.remote())[0] + assert pid1 == pid2 + + def test_update_config_health_check_timeout(self, client: ServeControllerClient): + """Check that replicas stay alive when max_concurrent_queries is updated.""" + + name = f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}f" + # Deploy with a very long initial health_check_timeout_s + # Also set small health_check_period_s to make test run faster + config_template = { + "import_path": "ray.serve.tests.test_config_files.pid.async_node", + "deployments": [ + { + "name": "f", + "health_check_period_s": 1, + "health_check_timeout_s": 1000, + } + ], + } + + # Deploy first time, wait for replica running and deployment healthy + client.deploy_apps(ServeApplicationSchema.parse_obj(config_template)) + wait_for_condition( + partial(self.check_deployment_running, client, name), timeout=15 + ) + handle = client.get_handle(name) + pid1 = ray.get(handle.remote())[0] + + # Redeploy with health check timeout reduced to 1 second + config_template["deployments"][0]["health_check_timeout_s"] = 1 + client.deploy_apps(ServeApplicationSchema.parse_obj(config_template)) + wait_for_condition( + partial(self.check_deployment_running, client, name), timeout=15 + ) + # Check that it's the same replica, it didn't get teared down + # (needs to be done before the tests below because the replica will be marked + # unhealthy then stopped and restarted) + pid2 = ray.get(handle.remote())[0] + assert pid1 == pid2 + + # Block in health check + ray.get(handle.send.remote(clear=True, health_check=True)) + wait_for_condition( + lambda: client.get_serve_status().get_deployment_status(name).status + == DeploymentStatus.UNHEALTHY + ) def test_deploy_separate_runtime_envs(self, client: ServeControllerClient): """Deploy two applications with separate runtime envs.""" @@ -1398,7 +1619,9 @@ def check_app_status(): assert info_valid def test_deploy_nonexistent_deployment(self, client: ServeControllerClient): - """Remove an application from a config, it should reach a deleting state.""" + """Apply a config that lists a deployment that doesn't exist in the application. + The error message should be descriptive. + """ config = ServeDeploySchema.parse_obj(self.get_test_deploy_config()) # Change names to invalid names that don't contain "deployment" or "application" @@ -1417,6 +1640,31 @@ def check_app_message(): wait_for_condition(check_app_message) + def test_deployments_not_listed_in_config(self, client: ServeControllerClient): + """Apply a config without the app's deployments listed. The deployments should + not redeploy. + """ + + name = f"{SERVE_DEFAULT_APP_NAME}{DEPLOYMENT_NAME_PREFIX_SEPARATOR}f" + config = {"import_path": "ray.serve.tests.test_config_files.pid.node"} + client.deploy_apps(ServeApplicationSchema(**config)) + wait_for_condition( + partial(self.check_deployment_running, client, name), timeout=15 + ) + pid1, _ = requests.get("http://localhost:8000/f").json() + + # Redeploy the same config (with no deployments listed) + client.deploy_apps(ServeApplicationSchema(**config)) + wait_for_condition( + partial(self.check_deployment_running, client, name), timeout=15 + ) + + # It should be the same replica actor + pids = [] + for _ in range(4): + pids.append(requests.get("http://localhost:8000/f").json()[0]) + assert all(pid == pid1 for pid in pids) + class TestServeRequestProcessingTimeoutS: @pytest.mark.parametrize( diff --git a/src/ray/protobuf/serve.proto b/src/ray/protobuf/serve.proto index 9f47aa518f46..7d0850830c48 100644 --- a/src/ray/protobuf/serve.proto +++ b/src/ray/protobuf/serve.proto @@ -150,7 +150,8 @@ message ActorNameList { message DeploymentVersion { string code_version = 1; - bytes user_config = 2; + DeploymentConfig deployment_config = 2; + string ray_actor_options = 3; } message ReplicaConfig { From a25f95a3f2d2ab1a304b6f59e091a2f98b6998cb Mon Sep 17 00:00:00 2001 From: Artur Niederfahrenhorst Date: Wed, 26 Apr 2023 23:03:49 +0200 Subject: [PATCH 112/424] [RLlib] Migrate all remaining RLlib release tests to GCE (#34668) Signed-off-by: Artur Niederfahrenhorst --- release/release_tests.yaml | 292 +++++++++++++++--- ...node_checkpointing_compute_config_gce.yaml | 17 + 2 files changed, 268 insertions(+), 41 deletions(-) create mode 100644 release/rllib_tests/multi_node_checkpointing_compute_config_gce.yaml diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 949713a6ccde..18ff12e9e274 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -3381,6 +3381,15 @@ alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: multi_node_checkpointing_compute_config_gce.yaml + - name: rllib_learning_tests_a2c_tf group: RLlib tests @@ -3397,9 +3406,17 @@ timeout: 18000 script: python learning_tests/run.py --yaml-sub-dir=a2c --framework=tf - alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 1gpu_16cpus_gce.yaml + - name: rllib_learning_tests_a2c_torch group: RLlib tests working_dir: rllib_tests @@ -3417,6 +3434,15 @@ alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 1gpu_16cpus_gce.yaml + - name: rllib_learning_tests_a3c_tf group: RLlib tests working_dir: rllib_tests @@ -3432,29 +3458,16 @@ timeout: 18000 script: python learning_tests/run.py --yaml-sub-dir=a3c --framework=tf - alert: default -# TODO(sven, jungong, Kourosh): fix A3C on torch and tf2 and re-enable. -#- name: rllib_learning_tests_a3c_torch -# group: RLlib tests -# working_dir: rllib_tests - -# frequency: nightly -# team: rllib -# env: staging_v2 - -# cluster: -# cluster_env: app_config.yaml -# cluster_compute: 32cpus.yaml - -# run: -# timeout: 18000 -# script: python learning_tests/run.py --yaml-sub-dir=a3c --framework=torch -# type: anyscale_job -# file_manager: job - -# alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 32cpus_gce.yaml - name: rllib_learning_tests_apex_tf group: RLlib tests @@ -3474,9 +3487,17 @@ timeout: 18000 script: python learning_tests/run.py --yaml-sub-dir=apex --framework=tf - alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 1gpu_24cpus_gce.yaml + - name: rllib_learning_tests_apex_torch group: RLlib tests working_dir: rllib_tests @@ -3494,6 +3515,15 @@ alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 1gpu_24cpus_gce.yaml + - name: rllib_learning_tests_appo_tf group: RLlib tests working_dir: rllib_tests @@ -3540,6 +3570,15 @@ alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 2gpus_32cpus_gce.yaml + - name: rllib_learning_tests_bc_tf group: RLlib tests working_dir: rllib_tests @@ -3557,6 +3596,15 @@ alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 1gpu_16cpus_gce.yaml + - name: rllib_learning_tests_bc_torch group: RLlib tests working_dir: rllib_tests @@ -3574,6 +3622,15 @@ alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 1gpu_16cpus_gce.yaml + - name: rllib_learning_tests_cql_tf group: RLlib tests working_dir: rllib_tests @@ -3594,6 +3651,15 @@ alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 1gpu_16cpus_gce.yaml + - name: rllib_learning_tests_cql_torch group: RLlib tests working_dir: rllib_tests @@ -3614,6 +3680,15 @@ alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 1gpu_16cpus_gce.yaml + - name: rllib_learning_tests_ddpg_tf group: RLlib tests working_dir: rllib_tests @@ -3629,9 +3704,17 @@ timeout: 18000 script: python learning_tests/run.py --yaml-sub-dir=ddpg --framework=tf - alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 1gpu_16cpus_gce.yaml + - name: rllib_learning_tests_ddpg_torch group: RLlib tests working_dir: rllib_tests @@ -3647,9 +3730,17 @@ timeout: 18000 script: python learning_tests/run.py --yaml-sub-dir=ddpg --framework=torch - alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 1gpu_16cpus_gce.yaml + - name: rllib_learning_tests_dqn_tf group: RLlib tests working_dir: rllib_tests @@ -3665,9 +3756,17 @@ timeout: 18000 script: python learning_tests/run.py --yaml-sub-dir=dqn --framework=tf - alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 1gpu_16cpus_gce.yaml + - name: rllib_learning_tests_dqn_torch group: RLlib tests working_dir: rllib_tests @@ -3686,9 +3785,17 @@ timeout: 18000 script: python learning_tests/run.py --yaml-sub-dir=dqn --framework=torch - alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 1gpu_16cpus_gce.yaml + - name: rllib_learning_tests_es_tf group: RLlib tests working_dir: rllib_tests @@ -3704,9 +3811,17 @@ timeout: 18000 script: python learning_tests/run.py --yaml-sub-dir=es --framework=tf - alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 2gpus_64cpus_gce.yaml + - name: rllib_learning_tests_es_torch group: RLlib tests working_dir: rllib_tests @@ -3722,9 +3837,17 @@ timeout: 18000 script: python learning_tests/run.py --yaml-sub-dir=es --framework=torch - alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 2gpus_64cpus_gce.yaml + - name: rllib_learning_tests_impala_tf group: RLlib tests working_dir: rllib_tests @@ -3740,9 +3863,17 @@ timeout: 18000 script: python learning_tests/run.py --yaml-sub-dir=impala --framework=tf - alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 1gpu_16cpus_gce.yaml + - name: rllib_learning_tests_impala_torch group: RLlib tests working_dir: rllib_tests @@ -3758,9 +3889,17 @@ timeout: 18000 script: python learning_tests/run.py --yaml-sub-dir=impala --framework=torch - alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 1gpu_16cpus_gce.yaml + - name: rllib_learning_tests_marwil_tf group: RLlib tests working_dir: rllib_tests @@ -3779,9 +3918,17 @@ timeout: 18000 script: python learning_tests/run.py --yaml-sub-dir=marwil --framework=tf - alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 1gpu_16cpus_gce.yaml + - name: rllib_learning_tests_marwil_torch group: RLlib tests working_dir: rllib_tests @@ -3800,9 +3947,17 @@ timeout: 18000 script: python learning_tests/run.py --yaml-sub-dir=marwil --framework=torch - alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 1gpu_16cpus_gce.yaml + - name: rllib_learning_tests_ppo_tf group: RLlib tests working_dir: rllib_tests @@ -3818,9 +3973,17 @@ timeout: 18000 script: python learning_tests/run.py --yaml-sub-dir=ppo/tf --framework=tf - alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 2gpus_32cpus_gce.yaml + - name: rllib_learning_tests_ppo_torch group: RLlib tests working_dir: rllib_tests @@ -3865,9 +4028,17 @@ timeout: 18000 script: python learning_tests/run.py --yaml-sub-dir=sac --framework=tf - alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 1gpu_16cpus_gce.yaml + - name: rllib_learning_tests_sac_torch group: RLlib tests working_dir: rllib_tests @@ -3885,6 +4056,15 @@ alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 1gpu_16cpus_gce.yaml + - name: rllib_learning_tests_slateq_tf group: RLlib tests working_dir: rllib_tests @@ -3900,9 +4080,17 @@ timeout: 18000 script: python learning_tests/run.py --yaml-sub-dir=slateq --framework=tf - alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 1gpu_16cpus_gce.yaml + - name: rllib_learning_tests_slateq_torch group: RLlib tests working_dir: rllib_tests @@ -3921,9 +4109,17 @@ timeout: 18000 script: python learning_tests/run.py --yaml-sub-dir=slateq --framework=torch - alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 1gpu_16cpus_gce.yaml + - name: rllib_learning_tests_td3_tf group: RLlib tests working_dir: rllib_tests @@ -3939,9 +4135,17 @@ timeout: 18000 script: python learning_tests/run.py --yaml-sub-dir=td3 --framework=tf - alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 1gpu_16cpus_gce.yaml + - name: rllib_learning_tests_td3_torch group: RLlib tests working_dir: rllib_tests @@ -3957,9 +4161,17 @@ timeout: 18000 script: python learning_tests/run.py --yaml-sub-dir=td3 --framework=torch - alert: default + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: 1gpu_16cpus_gce.yaml + - name: rllib_multi_gpu_learning_tests group: RLlib tests working_dir: rllib_tests @@ -4001,7 +4213,6 @@ timeout: 7200 script: python multi_gpu_with_lstm_learning_tests/run.py - alert: default variations: @@ -4028,7 +4239,6 @@ timeout: 7200 script: python multi_gpu_with_attention_learning_tests/run.py - alert: default variations: diff --git a/release/rllib_tests/multi_node_checkpointing_compute_config_gce.yaml b/release/rllib_tests/multi_node_checkpointing_compute_config_gce.yaml new file mode 100644 index 000000000000..9be7238750de --- /dev/null +++ b/release/rllib_tests/multi_node_checkpointing_compute_config_gce.yaml @@ -0,0 +1,17 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west1 +allowed_azs: + - us-west1-b + +max_workers: 3 + +head_node_type: + name: head_node + instance_type: n2-standard-8 # m5.2xlarge + +worker_node_types: + - name: worker_node + instance_type: n1-standard-4-nvidia-tesla-t4-1 # g4dn.xlarge + min_workers: 2 + max_workers: 2 + use_spot: false From 86631b1ae84ffe851adb4ea779c0b4b3076ce55d Mon Sep 17 00:00:00 2001 From: Avnish Narayan <38871737+avnishn@users.noreply.github.com> Date: Wed, 26 Apr 2023 14:18:54 -0700 Subject: [PATCH 113/424] [RLlib] Algorithm Level Checkpointing with Learner and RL Modules (#34717) Signed-off-by: Avnish --- rllib/BUILD | 7 + rllib/algorithms/algorithm.py | 35 +++-- .../algorithms/ppo/tests/test_ppo_learner.py | 53 +++++++- rllib/core/learner/learner.py | 10 ++ rllib/core/learner/learner_group.py | 2 +- rllib/core/rl_module/rl_module.py | 2 +- rllib/evaluation/rollout_worker.py | 2 +- ..._algorithm_save_load_checkpoint_learner.py | 128 ++++++++++++++++++ rllib/utils/checkpoints.py | 27 ++-- 9 files changed, 239 insertions(+), 27 deletions(-) create mode 100644 rllib/tests/test_algorithm_save_load_checkpoint_learner.py diff --git a/rllib/BUILD b/rllib/BUILD index f2d4537eedda..5bdc5b33fb19 100644 --- a/rllib/BUILD +++ b/rllib/BUILD @@ -1982,6 +1982,13 @@ py_test( srcs = ["core/learner/torch/tests/test_torch_learner.py"] ) +py_test( + name ="tests/test_algorithm_save_load_checkpoint_learner", + tags = ["team:rllib", "core"], + size = "medium", + srcs = ["tests/test_algorithm_save_load_checkpoint_learner.py"] +) + py_test( name = "test_bc_algorithm", tags = ["team:rllib", "core"], diff --git a/rllib/algorithms/algorithm.py b/rllib/algorithms/algorithm.py index 8385b3638607..90595bb79c9c 100644 --- a/rllib/algorithms/algorithm.py +++ b/rllib/algorithms/algorithm.py @@ -76,6 +76,7 @@ ) from ray.rllib.utils.checkpoints import ( CHECKPOINT_VERSION, + CHECKPOINT_VERSION_LEARNER, get_checkpoint_info, try_import_msgpack, ) @@ -2077,6 +2078,14 @@ def save_checkpoint(self, checkpoint_dir: str) -> str: policy_state.pkl pol_2/ policy_state.pkl + learner/ + learner_state.json + module_state/ + module_1/ + ... + optimizer_state/ + optimizers_module_1/ + ... rllib_checkpoint.json algorithm_state.pkl @@ -2099,7 +2108,10 @@ def save_checkpoint(self, checkpoint_dir: str) -> str: policy_states = state["worker"].pop("policy_states", {}) # Add RLlib checkpoint version. - state["checkpoint_version"] = CHECKPOINT_VERSION + if self.config._enable_learner_api: + state["checkpoint_version"] = CHECKPOINT_VERSION_LEARNER + else: + state["checkpoint_version"] = CHECKPOINT_VERSION # Write state (w/o policies) to disk. state_file = os.path.join(checkpoint_dir, "algorithm_state.pkl") @@ -2130,21 +2142,24 @@ def save_checkpoint(self, checkpoint_dir: str) -> str: policy = self.get_policy(pid) policy.export_checkpoint(policy_dir, policy_state=policy_state) + # if we are using the learner API, save the learner group state + if self.config._enable_learner_api: + learner_state_dir = os.path.join(checkpoint_dir, "learner") + self.learner_group.save_state(learner_state_dir) + return checkpoint_dir @override(Trainable) - def load_checkpoint(self, checkpoint: Union[Dict, str]) -> None: + def load_checkpoint(self, checkpoint: str) -> None: # Checkpoint is provided as a directory name. # Restore from the checkpoint file or dir. - if isinstance(checkpoint, str): - checkpoint_info = get_checkpoint_info(checkpoint) - checkpoint_data = Algorithm._checkpoint_info_to_algorithm_state( - checkpoint_info - ) - # Checkpoint is a checkpoint-as-dict -> Restore state from it as-is. - else: - checkpoint_data = checkpoint + + checkpoint_info = get_checkpoint_info(checkpoint) + checkpoint_data = Algorithm._checkpoint_info_to_algorithm_state(checkpoint_info) self.__setstate__(checkpoint_data) + if self.config._enable_learner_api: + learner_state_dir = os.path.join(checkpoint, "learner") + self.learner_group.load_state(learner_state_dir) @override(Trainable) def log_result(self, result: ResultDict) -> None: diff --git a/rllib/algorithms/ppo/tests/test_ppo_learner.py b/rllib/algorithms/ppo/tests/test_ppo_learner.py index 12e910ed8599..40aa98389539 100644 --- a/rllib/algorithms/ppo/tests/test_ppo_learner.py +++ b/rllib/algorithms/ppo/tests/test_ppo_learner.py @@ -2,6 +2,7 @@ import unittest import numpy as np import torch +import tempfile import tensorflow as tf import tree # pip install dm-tree @@ -74,8 +75,8 @@ def test_loss(self): ) for fw in framework_iterator(config, ("tf2", "torch"), with_eager_tracing=True): - trainer = config.build() - policy = trainer.get_policy() + algo = config.build() + policy = algo.get_policy() train_batch = SampleBatch(FAKE_BATCH) train_batch = compute_gae_for_sample_batch(policy, train_batch) @@ -109,14 +110,58 @@ def test_loss(self): ) learner_group = learner_group_config.build() - # load the trainer weights onto the learner_group - learner_group.set_weights(trainer.get_weights()) + # load the algo weights onto the learner_group + learner_group.set_weights(algo.get_weights()) results = learner_group.update(train_batch.as_multi_agent()) learner_group_loss = results[ALL_MODULES]["total_loss"] check(learner_group_loss, policy_loss) + def test_save_load_state(self): + """Tests saving and loading the state of the PPO Learner Group.""" + config = ( + ppo.PPOConfig() + .environment("CartPole-v1") + .rollouts( + num_rollout_workers=0, + ) + .training( + gamma=0.99, + model=dict( + fcnet_hiddens=[10, 10], + fcnet_activation="linear", + vf_share_layers=False, + ), + _enable_learner_api=True, + ) + .rl_module( + _enable_rl_module_api=True, + ) + ) + algo = config.build() + policy = algo.get_policy() + + for fw in framework_iterator(config, ("tf2", "torch"), with_eager_tracing=True): + algo_config = config.copy(copy_frozen=False) + algo_config.validate() + algo_config.freeze() + learner_group_config = algo_config.get_learner_group_config( + SingleAgentRLModuleSpec( + module_class=algo_config.rl_module_spec.module_class, + observation_space=policy.observation_space, + action_space=policy.action_space, + model_config_dict=policy.config["model"], + catalog_class=PPOCatalog, + ) + ) + learner_group1 = learner_group_config.build() + learner_group2 = learner_group_config.build() + with tempfile.TemporaryDirectory() as tmpdir: + learner_group1.save_state(tmpdir) + learner_group2.load_state(tmpdir) + check(learner_group1.get_state(), learner_group2.get_state()) + if __name__ == "__main__": import pytest diff --git a/rllib/core/learner/learner.py b/rllib/core/learner/learner.py index e4ef00dd6ad5..4847567dac23 100644 --- a/rllib/core/learner/learner.py +++ b/rllib/core/learner/learner.py @@ -906,6 +906,16 @@ def save_state(self, path: Union[str, pathlib.Path]) -> None: NOTE: if path doesn't exist, then a new directory will be created. otherwise, it will be appended to. + the state of the learner is saved in the following format: + + checkpoint_dir/ + learner_state.json + module_state/ + module_1/ + ... + optimizer_state/ + optimizers_module_1/ + ... Args: path: The path to the directory to save the state to. diff --git a/rllib/core/learner/learner_group.py b/rllib/core/learner/learner_group.py index c53ee9b78dd7..9b2774438b69 100644 --- a/rllib/core/learner/learner_group.py +++ b/rllib/core/learner/learner_group.py @@ -475,10 +475,10 @@ def load_state(self, path: str) -> None: if not path.exists(): raise ValueError(f"Path {path} does not exist.") path = str(path.absolute()) - assert len(self._workers) == self._worker_manager.num_healthy_actors() if self.is_local: self._learner.load_state(path) else: + assert len(self._workers) == self._worker_manager.num_healthy_actors() head_node_ip = socket.gethostbyname(socket.gethostname()) workers = self._worker_manager.healthy_actor_ids() diff --git a/rllib/core/rl_module/rl_module.py b/rllib/core/rl_module/rl_module.py index 905865f52d57..c403f54a7435 100644 --- a/rllib/core/rl_module/rl_module.py +++ b/rllib/core/rl_module/rl_module.py @@ -186,7 +186,7 @@ def to_dict(self): """ catalog_class_path = ( - serialize_type(type(self.catalog_class)) if self.catalog_class else "" + serialize_type(self.catalog_class) if self.catalog_class else "" ) return { "observation_space": gym_space_to_dict(self.observation_space), diff --git a/rllib/evaluation/rollout_worker.py b/rllib/evaluation/rollout_worker.py index 003083066c17..a30c07fa33fc 100644 --- a/rllib/evaluation/rollout_worker.py +++ b/rllib/evaluation/rollout_worker.py @@ -134,7 +134,7 @@ def _update_env_seed_if_necessary( NOTE: this may not work with remote environments (issue #18154). """ - if not seed: + if seed is None: return # A single RL job is unlikely to have more than 10K diff --git a/rllib/tests/test_algorithm_save_load_checkpoint_learner.py b/rllib/tests/test_algorithm_save_load_checkpoint_learner.py new file mode 100644 index 000000000000..211d0dac10c7 --- /dev/null +++ b/rllib/tests/test_algorithm_save_load_checkpoint_learner.py @@ -0,0 +1,128 @@ +import tempfile +import unittest + +import ray +from ray.rllib.algorithms.algorithm_config import AlgorithmConfig +from ray.rllib.utils.test_utils import check, framework_iterator +from ray.rllib.algorithms.ppo import PPOConfig +from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID +from ray.rllib.utils.metrics.learner_info import LEARNER_INFO, LEARNER_STATS_KEY + + +algorithms_and_configs = { + "PPO": (PPOConfig().training(train_batch_size=2, sgd_minibatch_size=2)) +} + + +@ray.remote +def save_and_train(algo_cfg: AlgorithmConfig, env: str, tmpdir): + """Create an algo, checkpoint it, then train for 2 iterations. + + Note: This function uses a seeded algorithm that can modify the global random state. + Running it multiple times in the same process can affect other algorithms. + Making it a Ray task runs it in a separate process and prevents it from + affecting other algorithms' random state. + + Args: + algo_cfg: The algorithm config to build the algo from. + env: The gym genvironment to train on. + tmpdir: The temporary directory to save the checkpoint to. + + Returns: + The learner stats after 2 iterations of training. + """ + algo_cfg = ( + algo_cfg.training(_enable_learner_api=True) + .rl_module(_enable_rl_module_api=True) + .rollouts(num_rollout_workers=0) + # setting min_time_s_per_iteration=0 and min_sample_timesteps_per_iteration=1 + # to make sure that we get results as soon as sampling/training is done at + # least once + .reporting(min_time_s_per_iteration=0, min_sample_timesteps_per_iteration=1) + .debugging(seed=10) + ) + algo = algo_cfg.environment(env).build() + + tmpdir = str(tmpdir) + algo.save_checkpoint(tmpdir) + for _ in range(2): + results = algo.train() + return results["info"][LEARNER_INFO][DEFAULT_POLICY_ID][LEARNER_STATS_KEY] + + +@ray.remote +def load_and_train(algo_cfg: AlgorithmConfig, env: str, tmpdir): + """Loads the checkpoint saved by save_and_train and trains for 2 iterations. + + Note: This function uses a seeded algorithm that can modify the global random state. + Running it multiple times in the same process can affect other algorithms. + Making it a Ray task runs it in a separate process and prevents it from + affecting other algorithms' random state. + + Args: + algo_cfg: The algorithm config to build the algo from. + env: The gym genvironment to train on. + tmpdir: The temporary directory to save the checkpoint to. + + Returns: + The learner stats after 2 iterations of training. + + """ + algo_cfg = ( + algo_cfg.training(_enable_learner_api=True) + .rl_module(_enable_rl_module_api=True) + .rollouts(num_rollout_workers=0) + # setting min_time_s_per_iteration=0 and min_sample_timesteps_per_iteration=1 + # to make sure that we get results as soon as sampling/training is done at + # least once + .reporting(min_time_s_per_iteration=0, min_sample_timesteps_per_iteration=1) + .debugging(seed=10) + ) + algo = algo_cfg.environment(env).build() + tmpdir = str(tmpdir) + algo.load_checkpoint(tmpdir) + for _ in range(2): + results = algo.train() + return results["info"][LEARNER_INFO][DEFAULT_POLICY_ID][LEARNER_STATS_KEY] + + +class TestAlgorithmWithLearnerSaveAndRestore(unittest.TestCase): + @classmethod + def setUpClass(cls) -> None: + ray.init() + + @classmethod + def tearDowClass(cls) -> None: + ray.shutdown() + + def test_save_and_restore(self): + for algo_name in algorithms_and_configs: + config = algorithms_and_configs[algo_name] + for _ in framework_iterator(config, frameworks=["torch", "tf2"]): + with tempfile.TemporaryDirectory() as tmpdir: + # create an algorithm, checkpoint it, then train for 2 iterations + ray.get(save_and_train.remote(config, "CartPole-v1", tmpdir)) + # load that checkpoint into a new algorithm and train for 2 + # iterations + results_algo_2 = ray.get( + load_and_train.remote(config, "CartPole-v1", tmpdir) + ) + + # load that checkpoint into another new algorithm and train for 2 + # iterations + results_algo_3 = ray.get( + load_and_train.remote(config, "CartPole-v1", tmpdir) + ) + + # check that the results are the same across loaded algorithms + # they won't be the same as the first algorithm since the random + # state that is used for each algorithm is not preserved across + # checkpoints. + check(results_algo_3, results_algo_2) + + +if __name__ == "__main__": + import sys + import pytest + + sys.exit(pytest.main(["-v", __file__])) diff --git a/rllib/utils/checkpoints.py b/rllib/utils/checkpoints.py index 7bbc456ca148..19e5cc145b31 100644 --- a/rllib/utils/checkpoints.py +++ b/rllib/utils/checkpoints.py @@ -29,7 +29,11 @@ # 1.1: Same as 1.0, but has a new "format" field in the rllib_checkpoint.json file # indicating, whether the checkpoint is `cloudpickle` (default) or `msgpack`. + +# 1.2: Introduces the checkpoint for the new Learner API if the Learner api is enabled. + CHECKPOINT_VERSION = version.Version("1.1") +CHECKPOINT_VERSION_LEARNER = version.Version("1.2") @PublicAPI(stability="alpha") @@ -102,15 +106,15 @@ def get_checkpoint_info(checkpoint: Union[str, Checkpoint]) -> Dict[str, Any]: rllib_checkpoint_info["checkpoint_version"] ) info.update(rllib_checkpoint_info) - - # No rllib_checkpoint.json file present: Warn and continue trying to figure out - # checkpoint info ourselves. - if log_once("no_rllib_checkpoint_json_file"): - logger.warning( - "No `rllib_checkpoint.json` file found in checkpoint directory " - f"{checkpoint}! Trying to extract checkpoint info from other files " - f"found in that dir." - ) + else: + # No rllib_checkpoint.json file present: Warn and continue trying to figure + # out checkpoint info ourselves. + if log_once("no_rllib_checkpoint_json_file"): + logger.warning( + "No `rllib_checkpoint.json` file found in checkpoint directory " + f"{checkpoint}! Trying to extract checkpoint info from other files " + f"found in that dir." + ) # Policy checkpoint file found. for extension in ["pkl", "msgpck"]: @@ -222,7 +226,10 @@ def convert_to_msgpack_checkpoint( state["worker"]["is_policy_to_train"] = NOT_SERIALIZABLE # Add RLlib checkpoint version (as string). - state["checkpoint_version"] = str(CHECKPOINT_VERSION) + if state["config"]["_enable_learner_api"]: + state["checkpoint_version"] = str(CHECKPOINT_VERSION_LEARNER) + else: + state["checkpoint_version"] = str(CHECKPOINT_VERSION) # Write state (w/o policies) to disk. state_file = os.path.join(msgpack_checkpoint_dir, "algorithm_state.msgpck") From f723232ecd140f556b9214d01446bcbed5d07f31 Mon Sep 17 00:00:00 2001 From: SangBin Cho Date: Thu, 27 Apr 2023 07:33:47 +0900 Subject: [PATCH 114/424] [Core] Shorten the membership checking time to 5 seconds. (#34769) 60 seconds are too long in case the GCS is restarted. This can unblock the problem when the GCS is restarted, raylets won't be killed. We will discuss the fundamental fix and follow up with it. --- src/ray/common/ray_config_def.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ray/common/ray_config_def.h b/src/ray/common/ray_config_def.h index 0188646292cc..3761a6a778bf 100644 --- a/src/ray/common/ray_config_def.h +++ b/src/ray/common/ray_config_def.h @@ -799,8 +799,8 @@ RAY_CONFIG(bool, kill_idle_workers_of_terminated_job, true) // Example: RAY_preload_python_modules=tensorflow,pytorch RAY_CONFIG(std::vector, preload_python_modules, {}) -// By default, raylet send a self liveness check to GCS every 60s -RAY_CONFIG(int64_t, raylet_liveness_self_check_interval_ms, 60000) +// By default, raylet send a self liveness check to GCS every 5s +RAY_CONFIG(int64_t, raylet_liveness_self_check_interval_ms, 5000) // Instruct the CoreWorker to kill its child processes while // it exits. This prevents certain classes of resource leaks From 4a38046d81d0a697eaf730e702c8b00af56b49cc Mon Sep 17 00:00:00 2001 From: Justin Yu Date: Wed, 26 Apr 2023 16:48:55 -0700 Subject: [PATCH 115/424] [Templates] Run workspace templates release tests on multi-node setups + add smoke tests in CI (#34385) * Update notebooks to have test specific code (w/ tags for filtering) Signed-off-by: Justin Yu * Rename + delete unused compute configs Signed-off-by: Justin Yu * Switch to using the new release test compute configs Signed-off-by: Justin Yu * Add GCE testing Signed-off-by: Justin Yu * Update templates.yaml Signed-off-by: Justin Yu * Remove unnecessary cloud-specific configs Signed-off-by: Justin Yu * Fix mmt test specific code Signed-off-by: Justin Yu * Fix batch inference notebook to be large only Signed-off-by: Justin Yu * Remove small/large scale references from mmt Signed-off-by: Justin Yu * Remove references to small/large scale in serving template + upload metrics for release test Signed-off-by: Justin Yu * Formatting Signed-off-by: Justin Yu * Add release test metric reporting to all notebooks Signed-off-by: Justin Yu * Add test tags to batch inf notebook Signed-off-by: Justin Yu * Add smoke test versions to CI Signed-off-by: Justin Yu * Add a dummy change for AIR tests Signed-off-by: Justin Yu * Exclude template smoke tests from release bazel BUILD crawling Signed-off-by: Justin Yu * Remove item_ids csv (pull from s3 instead), and add requirements.txt as test data Signed-off-by: Justin Yu * Fix the glob Signed-off-by: Justin Yu * Add missing gce AZs for release test Signed-off-by: Justin Yu * Fix CI (pin statsforecast to newer version + fix req.txt relative paths) Signed-off-by: Justin Yu * Lazily load requirements (useful for users who do pip install normally) Signed-off-by: Justin Yu * Add a better error message for lazy loading failure Signed-off-by: Justin Yu * Fix path for CI vs. release tests Signed-off-by: Justin Yu * Fix requirements for stable diffusion template Signed-off-by: Justin Yu * Use runtime env Signed-off-by: Justin Yu * Add separate test versions of the notebooks (temporary until 2.5) Signed-off-by: Justin Yu * Remove test specific code from the templates shown to users Signed-off-by: Justin Yu * Remove requirements files Signed-off-by: Justin Yu * Point tests to the copy w/ test-specific code Signed-off-by: Justin Yu * Fix typo in BUILD Signed-off-by: Justin Yu * Do pip install + runtime env Signed-off-by: Justin Yu * Clear outputs Signed-off-by: Justin Yu * Update the test notebooks Signed-off-by: Justin Yu * Update the contributing guide Signed-off-by: Justin Yu * Single pip install command Signed-off-by: Justin Yu * Remove unnecessary filegroup target Signed-off-by: Justin Yu * Move BUILD file to doc/ level so that release doesn't look at this file Signed-off-by: Justin Yu * Update templates.yaml + update validation script Signed-off-by: Justin Yu * Split up validation script into multiple functions, add yaml content testing Signed-off-by: Justin Yu * Add validation to BUILD file under doc pipeline Signed-off-by: Justin Yu * Fix templates validation test (bazel struggles) Signed-off-by: Justin Yu * Update readme with new contribution workflow Signed-off-by: Justin Yu * No need for timeseries libs requirement (runtime env instead) Signed-off-by: Justin Yu * Fix lint Signed-off-by: Justin Yu --------- Signed-off-by: Justin Yu Signed-off-by: Justin Yu --- ci/env/install-dependencies.sh | 2 +- doc/BUILD | 47 + .../01_batch_inference/batch_inference.ipynb | 38 +- .../02_many_model_training/item_ids.csv | 5001 ----------------- .../many_model_training.ipynb | 157 +- .../02_many_model_training/requirements.txt | 1 - .../requirements.txt | 9 - .../serving_stable_diffusion.ipynb | 188 +- doc/source/templates/README.md | 114 +- .../templates/configs/compute/cpu/aws.yaml | 11 + .../{aws_large.yaml => aws_release_test.yaml} | 3 + .../configs/compute/cpu/aws_small.yaml | 21 - .../templates/configs/compute/cpu/gce.yaml | 11 + .../{gcp_large.yaml => gce_release_test.yaml} | 0 .../configs/compute/cpu/gcp_small.yaml | 17 - .../templates/configs/compute/gpu/aws.yaml | 11 + .../{aws_large.yaml => aws_release_test.yaml} | 3 + .../configs/compute/gpu/aws_small.yaml | 21 - .../compute/gpu/{gcp_small.yaml => gce.yaml} | 13 +- .../{gcp_large.yaml => gce_release_test.yaml} | 0 doc/source/templates/templates.yaml | 12 +- .../templates/tests/batch_inference.ipynb | 371 ++ .../templates/tests/many_model_training.ipynb | 413 ++ .../tests/serving_stable_diffusion.ipynb | 567 ++ doc/source/templates/validate.py | 177 +- python/ray/train/base_trainer.py | 2 +- release/release_tests.yaml | 86 +- 27 files changed, 1882 insertions(+), 5414 deletions(-) delete mode 100644 doc/source/templates/02_many_model_training/item_ids.csv delete mode 100644 doc/source/templates/02_many_model_training/requirements.txt delete mode 100644 doc/source/templates/03_serving_stable_diffusion/requirements.txt create mode 100644 doc/source/templates/configs/compute/cpu/aws.yaml rename doc/source/templates/configs/compute/cpu/{aws_large.yaml => aws_release_test.yaml} (84%) delete mode 100644 doc/source/templates/configs/compute/cpu/aws_small.yaml create mode 100644 doc/source/templates/configs/compute/cpu/gce.yaml rename doc/source/templates/configs/compute/cpu/{gcp_large.yaml => gce_release_test.yaml} (100%) delete mode 100644 doc/source/templates/configs/compute/cpu/gcp_small.yaml create mode 100644 doc/source/templates/configs/compute/gpu/aws.yaml rename doc/source/templates/configs/compute/gpu/{aws_large.yaml => aws_release_test.yaml} (85%) delete mode 100644 doc/source/templates/configs/compute/gpu/aws_small.yaml rename doc/source/templates/configs/compute/gpu/{gcp_small.yaml => gce.yaml} (51%) rename doc/source/templates/configs/compute/gpu/{gcp_large.yaml => gce_release_test.yaml} (100%) create mode 100644 doc/source/templates/tests/batch_inference.ipynb create mode 100644 doc/source/templates/tests/many_model_training.ipynb create mode 100644 doc/source/templates/tests/serving_stable_diffusion.ipynb diff --git a/ci/env/install-dependencies.sh b/ci/env/install-dependencies.sh index fa6cd0def65e..298363351a24 100755 --- a/ci/env/install-dependencies.sh +++ b/ci/env/install-dependencies.sh @@ -407,7 +407,7 @@ install_pip_packages() { # This cannot be included in requirements_tune.txt as it has conflicting # dependencies. if [ "${INSTALL_TIMESERIES_LIBS-}" = 1 ]; then - pip install -U "statsforecast==1.1.0" "prophet==1.1.1" + pip install -U "statsforecast==1.5.0" "prophet==1.1.1" fi # Data processing test dependencies. diff --git a/doc/BUILD b/doc/BUILD index 30a81aa1474e..7cd534c82a65 100644 --- a/doc/BUILD +++ b/doc/BUILD @@ -229,6 +229,53 @@ py_test_run_all_subdirectory( tags = ["exclusive", "team:core"], ) +# -------------------------------------------------------------------- +# Test all Workspace template notebooks in doc/source/templates +# as smoke tests. +# -------------------------------------------------------------------- + +filegroup( + name = "workspace_templates", + srcs = glob(["source/templates/tests/*.ipynb"]), + visibility = ["//doc:__subpackages__"] +) + +# Validate that all the paths and yamls within the templates.yaml file are valid. + +py_test( + name = "templates_directory_validation", + size = "small", + main = "source/templates/validate.py", + srcs = ["source/templates/validate.py"], + data = glob(["source/templates/**/*"]), + tags = ["exclusive", "team:ml"] +) + +# Templates that only require CPU + +py_test_run_all_notebooks( + size = "large", + include = ["source/templates/tests/many_model_training.ipynb"], + exclude = [], + data = ["//doc:workspace_templates"], + tags = ["exclusive", "team:ml", "ray_air"], + env = {"SMOKE_TEST": "1"}, +) + +# Templates that require GPU + +py_test_run_all_notebooks( + size = "large", + include = [ + "source/templates/tests/batch_inference.ipynb", + "source/templates/tests/serving_stable_diffusion.ipynb" + ], + exclude = [], + data = ["//doc:workspace_templates"], + tags = ["exclusive", "team:ml", "ray_air", "gpu"], + env = {"SMOKE_TEST": "1"}, +) + # -------------- # Run GPU tests # -------------- diff --git a/doc/source/templates/01_batch_inference/batch_inference.ipynb b/doc/source/templates/01_batch_inference/batch_inference.ipynb index 052813e94b6a..ce2f2ced7d06 100644 --- a/doc/source/templates/01_batch_inference/batch_inference.ipynb +++ b/doc/source/templates/01_batch_inference/batch_inference.ipynb @@ -37,31 +37,26 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "c99f142a", "metadata": {}, "source": [ - ">✂️ Replace these values depending on the template size you picked!\n", + ">✂️ Play around with these values!\n", ">\n", - ">For example, for the larger scale template with 4 GPU nodes, you may want to use 4 workers, each using 1 GPU." + ">For example, for a cluster with 4 GPU nodes, you may want 4 workers, each using 1 GPU.\n", + ">Be sure to stay within the resource constraints of your Ray Cluster if autoscaling is not enabled.\n", + ">You can check the available resources in your Ray Cluster with: `ray status`" ] }, { "cell_type": "code", "execution_count": null, - "id": "9aa792fc", - "metadata": { - "tags": [ - "small" - ] - }, + "id": "770bbdc7", + "metadata": {}, "outputs": [], "source": [ - "# Default values for the small-scale template\n", - "NUM_WORKERS: int = 1\n", - "\n", - "USE_GPU: bool = True\n", - "NUM_GPUS_PER_WORKER: float = 1\n" + "!ray status" ] }, { @@ -69,16 +64,11 @@ "execution_count": null, "id": "9d49681f-baf0-4ed8-9740-5c4e38744311", "metadata": { - "tags": [ - "large" - ] + "tags": [] }, "outputs": [], "source": [ - "# Default values for the large-scale template\n", "NUM_WORKERS: int = 4\n", - "\n", - "USE_GPU: bool = True\n", "NUM_GPUS_PER_WORKER: float = 1\n" ] }, @@ -231,7 +221,7 @@ " # Fix the number of batch inference workers to a specified value.\n", " size=NUM_WORKERS,\n", " ),\n", - " num_gpus=NUM_GPUS_PER_WORKER if USE_GPU else 0,\n", + " num_gpus=NUM_GPUS_PER_WORKER,\n", " batch_format=\"numpy\",\n", ")\n" ] @@ -293,14 +283,6 @@ " predictions.repartition(num_shards).write_parquet(f\"local://{temp_dir}\")\n", " print(f\"Predictions saved to `{temp_dir}`!\")\n" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "1e88a268", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/doc/source/templates/02_many_model_training/item_ids.csv b/doc/source/templates/02_many_model_training/item_ids.csv deleted file mode 100644 index cf7f79dd1de2..000000000000 --- a/doc/source/templates/02_many_model_training/item_ids.csv +++ /dev/null @@ -1,5001 +0,0 @@ -Unnamed: 0,item_id -0,FOODS_1_001_CA_1 -1,FOODS_1_001_CA_2 -2,FOODS_1_001_CA_3 -3,FOODS_1_001_CA_4 -4,FOODS_1_001_TX_1 -5,FOODS_1_001_TX_2 -6,FOODS_1_001_TX_3 -7,FOODS_1_001_WI_1 -8,FOODS_1_001_WI_2 -9,FOODS_1_001_WI_3 -10,FOODS_1_002_CA_1 -11,FOODS_1_002_CA_2 -12,FOODS_1_002_CA_3 -13,FOODS_1_002_CA_4 -14,FOODS_1_002_TX_1 -15,FOODS_1_002_TX_2 -16,FOODS_1_002_TX_3 -17,FOODS_1_002_WI_1 -18,FOODS_1_002_WI_2 -19,FOODS_1_002_WI_3 -20,FOODS_1_003_CA_1 -21,FOODS_1_003_CA_2 -22,FOODS_1_003_CA_3 -23,FOODS_1_003_CA_4 -24,FOODS_1_003_TX_1 -25,FOODS_1_003_TX_2 -26,FOODS_1_003_TX_3 -27,FOODS_1_003_WI_1 -28,FOODS_1_003_WI_2 -29,FOODS_1_003_WI_3 -30,FOODS_1_004_CA_1 -31,FOODS_1_004_CA_2 -32,FOODS_1_004_CA_3 -33,FOODS_1_004_CA_4 -34,FOODS_1_004_TX_1 -35,FOODS_1_004_TX_2 -36,FOODS_1_004_TX_3 -37,FOODS_1_004_WI_1 -38,FOODS_1_004_WI_2 -39,FOODS_1_004_WI_3 -40,FOODS_1_005_CA_1 -41,FOODS_1_005_CA_2 -42,FOODS_1_005_CA_3 -43,FOODS_1_005_CA_4 -44,FOODS_1_005_TX_1 -45,FOODS_1_005_TX_2 -46,FOODS_1_005_TX_3 -47,FOODS_1_005_WI_1 -48,FOODS_1_005_WI_2 -49,FOODS_1_005_WI_3 -50,FOODS_1_006_CA_1 -51,FOODS_1_006_CA_2 -52,FOODS_1_006_CA_3 -53,FOODS_1_006_CA_4 -54,FOODS_1_006_TX_1 -55,FOODS_1_006_TX_2 -56,FOODS_1_006_TX_3 -57,FOODS_1_006_WI_1 -58,FOODS_1_006_WI_2 -59,FOODS_1_006_WI_3 -60,FOODS_1_008_CA_1 -61,FOODS_1_008_CA_2 -62,FOODS_1_008_CA_3 -63,FOODS_1_008_CA_4 -64,FOODS_1_008_TX_1 -65,FOODS_1_008_TX_2 -66,FOODS_1_008_TX_3 -67,FOODS_1_008_WI_1 -68,FOODS_1_008_WI_2 -69,FOODS_1_008_WI_3 -70,FOODS_1_009_CA_1 -71,FOODS_1_009_CA_2 -72,FOODS_1_009_CA_3 -73,FOODS_1_009_CA_4 -74,FOODS_1_009_TX_1 -75,FOODS_1_009_TX_2 -76,FOODS_1_009_TX_3 -77,FOODS_1_009_WI_1 -78,FOODS_1_009_WI_2 -79,FOODS_1_009_WI_3 -80,FOODS_1_010_CA_1 -81,FOODS_1_010_CA_2 -82,FOODS_1_010_CA_3 -83,FOODS_1_010_CA_4 -84,FOODS_1_010_TX_1 -85,FOODS_1_010_TX_2 -86,FOODS_1_010_TX_3 -87,FOODS_1_010_WI_1 -88,FOODS_1_010_WI_2 -89,FOODS_1_010_WI_3 -90,FOODS_1_011_CA_1 -91,FOODS_1_011_CA_2 -92,FOODS_1_011_CA_3 -93,FOODS_1_011_CA_4 -94,FOODS_1_011_TX_1 -95,FOODS_1_011_TX_2 -96,FOODS_1_011_TX_3 -97,FOODS_1_011_WI_1 -98,FOODS_1_011_WI_2 -99,FOODS_1_011_WI_3 -100,FOODS_1_012_CA_1 -101,FOODS_1_012_CA_2 -102,FOODS_1_012_CA_3 -103,FOODS_1_012_CA_4 -104,FOODS_1_012_TX_1 -105,FOODS_1_012_TX_2 -106,FOODS_1_012_TX_3 -107,FOODS_1_012_WI_1 -108,FOODS_1_012_WI_2 -109,FOODS_1_012_WI_3 -110,FOODS_1_013_CA_1 -111,FOODS_1_013_CA_2 -112,FOODS_1_013_CA_3 -113,FOODS_1_013_CA_4 -114,FOODS_1_013_TX_1 -115,FOODS_1_013_TX_2 -116,FOODS_1_013_TX_3 -117,FOODS_1_013_WI_1 -118,FOODS_1_013_WI_2 -119,FOODS_1_013_WI_3 -120,FOODS_1_014_CA_1 -121,FOODS_1_014_CA_2 -122,FOODS_1_014_CA_3 -123,FOODS_1_014_CA_4 -124,FOODS_1_014_TX_1 -125,FOODS_1_014_TX_2 -126,FOODS_1_014_TX_3 -127,FOODS_1_014_WI_1 -128,FOODS_1_014_WI_2 -129,FOODS_1_014_WI_3 -130,FOODS_1_015_CA_1 -131,FOODS_1_015_CA_2 -132,FOODS_1_015_CA_3 -133,FOODS_1_015_CA_4 -134,FOODS_1_015_TX_1 -135,FOODS_1_015_TX_2 -136,FOODS_1_015_TX_3 -137,FOODS_1_015_WI_1 -138,FOODS_1_015_WI_2 -139,FOODS_1_015_WI_3 -140,FOODS_1_016_CA_1 -141,FOODS_1_016_CA_2 -142,FOODS_1_016_CA_3 -143,FOODS_1_016_CA_4 -144,FOODS_1_016_TX_1 -145,FOODS_1_016_TX_2 -146,FOODS_1_016_TX_3 -147,FOODS_1_016_WI_1 -148,FOODS_1_016_WI_2 -149,FOODS_1_016_WI_3 -150,FOODS_1_017_CA_1 -151,FOODS_1_017_CA_2 -152,FOODS_1_017_CA_3 -153,FOODS_1_017_CA_4 -154,FOODS_1_017_TX_1 -155,FOODS_1_017_TX_2 -156,FOODS_1_017_TX_3 -157,FOODS_1_017_WI_1 -158,FOODS_1_017_WI_2 -159,FOODS_1_017_WI_3 -160,FOODS_1_018_CA_1 -161,FOODS_1_018_CA_2 -162,FOODS_1_018_CA_3 -163,FOODS_1_018_CA_4 -164,FOODS_1_018_TX_1 -165,FOODS_1_018_TX_2 -166,FOODS_1_018_TX_3 -167,FOODS_1_018_WI_1 -168,FOODS_1_018_WI_2 -169,FOODS_1_018_WI_3 -170,FOODS_1_019_CA_1 -171,FOODS_1_019_CA_2 -172,FOODS_1_019_CA_3 -173,FOODS_1_019_CA_4 -174,FOODS_1_019_TX_1 -175,FOODS_1_019_TX_2 -176,FOODS_1_019_TX_3 -177,FOODS_1_019_WI_1 -178,FOODS_1_019_WI_2 -179,FOODS_1_019_WI_3 -180,FOODS_1_020_CA_1 -181,FOODS_1_020_CA_2 -182,FOODS_1_020_CA_3 -183,FOODS_1_020_CA_4 -184,FOODS_1_020_TX_1 -185,FOODS_1_020_TX_2 -186,FOODS_1_020_TX_3 -187,FOODS_1_020_WI_1 -188,FOODS_1_020_WI_2 -189,FOODS_1_020_WI_3 -190,FOODS_1_021_CA_1 -191,FOODS_1_021_CA_2 -192,FOODS_1_021_CA_3 -193,FOODS_1_021_CA_4 -194,FOODS_1_021_TX_1 -195,FOODS_1_021_TX_2 -196,FOODS_1_021_TX_3 -197,FOODS_1_021_WI_1 -198,FOODS_1_021_WI_2 -199,FOODS_1_021_WI_3 -200,FOODS_1_022_CA_1 -201,FOODS_1_022_CA_2 -202,FOODS_1_022_CA_3 -203,FOODS_1_022_CA_4 -204,FOODS_1_022_TX_1 -205,FOODS_1_022_TX_2 -206,FOODS_1_022_TX_3 -207,FOODS_1_022_WI_1 -208,FOODS_1_022_WI_2 -209,FOODS_1_022_WI_3 -210,FOODS_1_023_CA_1 -211,FOODS_1_023_CA_2 -212,FOODS_1_023_CA_3 -213,FOODS_1_023_CA_4 -214,FOODS_1_023_TX_1 -215,FOODS_1_023_TX_2 -216,FOODS_1_023_TX_3 -217,FOODS_1_023_WI_1 -218,FOODS_1_023_WI_2 -219,FOODS_1_023_WI_3 -220,FOODS_1_024_CA_1 -221,FOODS_1_024_CA_2 -222,FOODS_1_024_CA_3 -223,FOODS_1_024_CA_4 -224,FOODS_1_024_TX_1 -225,FOODS_1_024_TX_2 -226,FOODS_1_024_TX_3 -227,FOODS_1_024_WI_1 -228,FOODS_1_024_WI_2 -229,FOODS_1_024_WI_3 -230,FOODS_1_025_CA_1 -231,FOODS_1_025_CA_2 -232,FOODS_1_025_CA_3 -233,FOODS_1_025_CA_4 -234,FOODS_1_025_TX_1 -235,FOODS_1_025_TX_2 -236,FOODS_1_025_TX_3 -237,FOODS_1_025_WI_1 -238,FOODS_1_025_WI_2 -239,FOODS_1_025_WI_3 -240,FOODS_1_026_CA_1 -241,FOODS_1_026_CA_2 -242,FOODS_1_026_CA_3 -243,FOODS_1_026_CA_4 -244,FOODS_1_026_TX_1 -245,FOODS_1_026_TX_2 -246,FOODS_1_026_TX_3 -247,FOODS_1_026_WI_1 -248,FOODS_1_026_WI_2 -249,FOODS_1_026_WI_3 -250,FOODS_1_027_CA_1 -251,FOODS_1_027_CA_2 -252,FOODS_1_027_CA_3 -253,FOODS_1_027_CA_4 -254,FOODS_1_027_TX_1 -255,FOODS_1_027_TX_2 -256,FOODS_1_027_TX_3 -257,FOODS_1_027_WI_1 -258,FOODS_1_027_WI_2 -259,FOODS_1_027_WI_3 -260,FOODS_1_028_CA_1 -261,FOODS_1_028_CA_2 -262,FOODS_1_028_CA_3 -263,FOODS_1_028_CA_4 -264,FOODS_1_028_TX_1 -265,FOODS_1_028_TX_2 -266,FOODS_1_028_TX_3 -267,FOODS_1_028_WI_1 -268,FOODS_1_028_WI_2 -269,FOODS_1_028_WI_3 -270,FOODS_1_029_CA_1 -271,FOODS_1_029_CA_2 -272,FOODS_1_029_CA_3 -273,FOODS_1_029_CA_4 -274,FOODS_1_029_TX_1 -275,FOODS_1_029_TX_2 -276,FOODS_1_029_TX_3 -277,FOODS_1_029_WI_1 -278,FOODS_1_029_WI_2 -279,FOODS_1_029_WI_3 -280,FOODS_1_030_CA_1 -281,FOODS_1_030_CA_2 -282,FOODS_1_030_CA_3 -283,FOODS_1_030_CA_4 -284,FOODS_1_030_TX_1 -285,FOODS_1_030_TX_2 -286,FOODS_1_030_TX_3 -287,FOODS_1_030_WI_1 -288,FOODS_1_030_WI_2 -289,FOODS_1_030_WI_3 -290,FOODS_1_031_CA_1 -291,FOODS_1_031_CA_2 -292,FOODS_1_031_CA_3 -293,FOODS_1_031_CA_4 -294,FOODS_1_031_TX_1 -295,FOODS_1_031_TX_2 -296,FOODS_1_031_TX_3 -297,FOODS_1_031_WI_1 -298,FOODS_1_031_WI_2 -299,FOODS_1_031_WI_3 -300,FOODS_1_032_CA_1 -301,FOODS_1_032_CA_2 -302,FOODS_1_032_CA_3 -303,FOODS_1_032_CA_4 -304,FOODS_1_032_TX_1 -305,FOODS_1_032_TX_2 -306,FOODS_1_032_TX_3 -307,FOODS_1_032_WI_1 -308,FOODS_1_032_WI_2 -309,FOODS_1_032_WI_3 -310,FOODS_1_033_CA_1 -311,FOODS_1_033_CA_2 -312,FOODS_1_033_CA_3 -313,FOODS_1_033_CA_4 -314,FOODS_1_033_TX_1 -315,FOODS_1_033_TX_2 -316,FOODS_1_033_TX_3 -317,FOODS_1_033_WI_1 -318,FOODS_1_033_WI_2 -319,FOODS_1_033_WI_3 -320,FOODS_1_034_CA_1 -321,FOODS_1_034_CA_2 -322,FOODS_1_034_CA_3 -323,FOODS_1_034_CA_4 -324,FOODS_1_034_TX_1 -325,FOODS_1_034_TX_2 -326,FOODS_1_034_TX_3 -327,FOODS_1_034_WI_1 -328,FOODS_1_034_WI_2 -329,FOODS_1_034_WI_3 -330,FOODS_1_035_CA_1 -331,FOODS_1_035_CA_2 -332,FOODS_1_035_CA_3 -333,FOODS_1_035_CA_4 -334,FOODS_1_035_TX_1 -335,FOODS_1_035_TX_2 -336,FOODS_1_035_TX_3 -337,FOODS_1_035_WI_1 -338,FOODS_1_035_WI_2 -339,FOODS_1_035_WI_3 -340,FOODS_1_036_CA_1 -341,FOODS_1_036_CA_2 -342,FOODS_1_036_CA_3 -343,FOODS_1_036_CA_4 -344,FOODS_1_036_TX_1 -345,FOODS_1_036_TX_2 -346,FOODS_1_036_TX_3 -347,FOODS_1_036_WI_1 -348,FOODS_1_036_WI_2 -349,FOODS_1_036_WI_3 -350,FOODS_1_037_CA_1 -351,FOODS_1_037_CA_2 -352,FOODS_1_037_CA_3 -353,FOODS_1_037_CA_4 -354,FOODS_1_037_TX_1 -355,FOODS_1_037_TX_2 -356,FOODS_1_037_TX_3 -357,FOODS_1_037_WI_1 -358,FOODS_1_037_WI_2 -359,FOODS_1_037_WI_3 -360,FOODS_1_038_CA_1 -361,FOODS_1_038_CA_2 -362,FOODS_1_038_CA_3 -363,FOODS_1_038_CA_4 -364,FOODS_1_038_TX_1 -365,FOODS_1_038_TX_2 -366,FOODS_1_038_TX_3 -367,FOODS_1_038_WI_1 -368,FOODS_1_038_WI_2 -369,FOODS_1_038_WI_3 -370,FOODS_1_039_CA_1 -371,FOODS_1_039_CA_2 -372,FOODS_1_039_CA_3 -373,FOODS_1_039_CA_4 -374,FOODS_1_039_TX_1 -375,FOODS_1_039_TX_2 -376,FOODS_1_039_TX_3 -377,FOODS_1_039_WI_1 -378,FOODS_1_039_WI_2 -379,FOODS_1_039_WI_3 -380,FOODS_1_040_CA_1 -381,FOODS_1_040_CA_2 -382,FOODS_1_040_CA_3 -383,FOODS_1_040_CA_4 -384,FOODS_1_040_TX_1 -385,FOODS_1_040_TX_2 -386,FOODS_1_040_TX_3 -387,FOODS_1_040_WI_1 -388,FOODS_1_040_WI_2 -389,FOODS_1_040_WI_3 -390,FOODS_1_041_CA_1 -391,FOODS_1_041_CA_2 -392,FOODS_1_041_CA_3 -393,FOODS_1_041_CA_4 -394,FOODS_1_041_TX_1 -395,FOODS_1_041_TX_2 -396,FOODS_1_041_TX_3 -397,FOODS_1_041_WI_1 -398,FOODS_1_041_WI_2 -399,FOODS_1_041_WI_3 -400,FOODS_1_042_CA_1 -401,FOODS_1_042_CA_2 -402,FOODS_1_042_CA_3 -403,FOODS_1_042_CA_4 -404,FOODS_1_042_TX_1 -405,FOODS_1_042_TX_2 -406,FOODS_1_042_TX_3 -407,FOODS_1_042_WI_1 -408,FOODS_1_042_WI_2 -409,FOODS_1_042_WI_3 -410,FOODS_1_043_CA_1 -411,FOODS_1_043_CA_2 -412,FOODS_1_043_CA_3 -413,FOODS_1_043_CA_4 -414,FOODS_1_043_TX_1 -415,FOODS_1_043_TX_2 -416,FOODS_1_043_TX_3 -417,FOODS_1_043_WI_1 -418,FOODS_1_043_WI_2 -419,FOODS_1_043_WI_3 -420,FOODS_1_044_CA_1 -421,FOODS_1_044_CA_2 -422,FOODS_1_044_CA_3 -423,FOODS_1_044_CA_4 -424,FOODS_1_044_TX_1 -425,FOODS_1_044_TX_2 -426,FOODS_1_044_TX_3 -427,FOODS_1_044_WI_1 -428,FOODS_1_044_WI_2 -429,FOODS_1_044_WI_3 -430,FOODS_1_045_CA_1 -431,FOODS_1_045_CA_2 -432,FOODS_1_045_CA_3 -433,FOODS_1_045_CA_4 -434,FOODS_1_045_TX_1 -435,FOODS_1_045_TX_2 -436,FOODS_1_045_TX_3 -437,FOODS_1_045_WI_1 -438,FOODS_1_045_WI_2 -439,FOODS_1_045_WI_3 -440,FOODS_1_046_CA_1 -441,FOODS_1_046_CA_2 -442,FOODS_1_046_CA_3 -443,FOODS_1_046_CA_4 -444,FOODS_1_046_TX_1 -445,FOODS_1_046_TX_2 -446,FOODS_1_046_TX_3 -447,FOODS_1_046_WI_1 -448,FOODS_1_046_WI_2 -449,FOODS_1_046_WI_3 -450,FOODS_1_047_CA_1 -451,FOODS_1_047_CA_2 -452,FOODS_1_047_CA_3 -453,FOODS_1_047_CA_4 -454,FOODS_1_047_TX_1 -455,FOODS_1_047_TX_2 -456,FOODS_1_047_TX_3 -457,FOODS_1_047_WI_1 -458,FOODS_1_047_WI_2 -459,FOODS_1_047_WI_3 -460,FOODS_1_048_CA_1 -461,FOODS_1_048_CA_2 -462,FOODS_1_048_CA_3 -463,FOODS_1_048_CA_4 -464,FOODS_1_048_TX_1 -465,FOODS_1_048_TX_2 -466,FOODS_1_048_TX_3 -467,FOODS_1_048_WI_1 -468,FOODS_1_048_WI_2 -469,FOODS_1_048_WI_3 -470,FOODS_1_049_CA_1 -471,FOODS_1_049_CA_2 -472,FOODS_1_049_CA_3 -473,FOODS_1_049_CA_4 -474,FOODS_1_049_TX_1 -475,FOODS_1_049_TX_2 -476,FOODS_1_049_TX_3 -477,FOODS_1_049_WI_1 -478,FOODS_1_049_WI_2 -479,FOODS_1_049_WI_3 -480,FOODS_1_050_CA_1 -481,FOODS_1_050_CA_2 -482,FOODS_1_050_CA_3 -483,FOODS_1_050_CA_4 -484,FOODS_1_050_TX_1 -485,FOODS_1_050_TX_2 -486,FOODS_1_050_TX_3 -487,FOODS_1_050_WI_1 -488,FOODS_1_050_WI_2 -489,FOODS_1_050_WI_3 -490,FOODS_1_051_CA_1 -491,FOODS_1_051_CA_2 -492,FOODS_1_051_CA_3 -493,FOODS_1_051_CA_4 -494,FOODS_1_051_TX_1 -495,FOODS_1_051_TX_2 -496,FOODS_1_051_TX_3 -497,FOODS_1_051_WI_1 -498,FOODS_1_051_WI_2 -499,FOODS_1_051_WI_3 -500,FOODS_1_052_CA_1 -501,FOODS_1_052_CA_2 -502,FOODS_1_052_CA_3 -503,FOODS_1_052_CA_4 -504,FOODS_1_052_TX_1 -505,FOODS_1_052_TX_2 -506,FOODS_1_052_TX_3 -507,FOODS_1_052_WI_1 -508,FOODS_1_052_WI_2 -509,FOODS_1_052_WI_3 -510,FOODS_1_053_CA_1 -511,FOODS_1_053_CA_2 -512,FOODS_1_053_CA_3 -513,FOODS_1_053_CA_4 -514,FOODS_1_053_TX_1 -515,FOODS_1_053_TX_2 -516,FOODS_1_053_TX_3 -517,FOODS_1_053_WI_1 -518,FOODS_1_053_WI_2 -519,FOODS_1_053_WI_3 -520,FOODS_1_054_CA_1 -521,FOODS_1_054_CA_2 -522,FOODS_1_054_CA_3 -523,FOODS_1_054_CA_4 -524,FOODS_1_054_TX_1 -525,FOODS_1_054_TX_2 -526,FOODS_1_054_TX_3 -527,FOODS_1_054_WI_1 -528,FOODS_1_054_WI_2 -529,FOODS_1_054_WI_3 -530,FOODS_1_055_CA_1 -531,FOODS_1_055_CA_2 -532,FOODS_1_055_CA_3 -533,FOODS_1_055_CA_4 -534,FOODS_1_055_TX_1 -535,FOODS_1_055_TX_2 -536,FOODS_1_055_TX_3 -537,FOODS_1_055_WI_1 -538,FOODS_1_055_WI_2 -539,FOODS_1_055_WI_3 -540,FOODS_1_056_CA_1 -541,FOODS_1_056_CA_2 -542,FOODS_1_056_CA_3 -543,FOODS_1_056_CA_4 -544,FOODS_1_056_TX_1 -545,FOODS_1_056_TX_2 -546,FOODS_1_056_TX_3 -547,FOODS_1_056_WI_1 -548,FOODS_1_056_WI_2 -549,FOODS_1_056_WI_3 -550,FOODS_1_057_CA_1 -551,FOODS_1_057_CA_2 -552,FOODS_1_057_CA_3 -553,FOODS_1_057_CA_4 -554,FOODS_1_057_TX_1 -555,FOODS_1_057_TX_2 -556,FOODS_1_057_TX_3 -557,FOODS_1_057_WI_1 -558,FOODS_1_057_WI_2 -559,FOODS_1_057_WI_3 -560,FOODS_1_058_CA_1 -561,FOODS_1_058_CA_2 -562,FOODS_1_058_CA_3 -563,FOODS_1_058_CA_4 -564,FOODS_1_058_TX_1 -565,FOODS_1_058_TX_2 -566,FOODS_1_058_TX_3 -567,FOODS_1_058_WI_1 -568,FOODS_1_058_WI_2 -569,FOODS_1_058_WI_3 -570,FOODS_1_059_CA_1 -571,FOODS_1_059_CA_2 -572,FOODS_1_059_CA_3 -573,FOODS_1_059_CA_4 -574,FOODS_1_059_TX_1 -575,FOODS_1_059_TX_2 -576,FOODS_1_059_TX_3 -577,FOODS_1_059_WI_1 -578,FOODS_1_059_WI_2 -579,FOODS_1_059_WI_3 -580,FOODS_1_060_CA_1 -581,FOODS_1_060_CA_2 -582,FOODS_1_060_CA_3 -583,FOODS_1_060_CA_4 -584,FOODS_1_060_TX_1 -585,FOODS_1_060_TX_2 -586,FOODS_1_060_TX_3 -587,FOODS_1_060_WI_1 -588,FOODS_1_060_WI_2 -589,FOODS_1_060_WI_3 -590,FOODS_1_061_CA_1 -591,FOODS_1_061_CA_2 -592,FOODS_1_061_CA_3 -593,FOODS_1_061_CA_4 -594,FOODS_1_061_TX_1 -595,FOODS_1_061_TX_2 -596,FOODS_1_061_TX_3 -597,FOODS_1_061_WI_1 -598,FOODS_1_061_WI_2 -599,FOODS_1_061_WI_3 -600,FOODS_1_062_CA_1 -601,FOODS_1_062_CA_2 -602,FOODS_1_062_CA_3 -603,FOODS_1_062_CA_4 -604,FOODS_1_062_TX_1 -605,FOODS_1_062_TX_2 -606,FOODS_1_062_TX_3 -607,FOODS_1_062_WI_1 -608,FOODS_1_062_WI_2 -609,FOODS_1_062_WI_3 -610,FOODS_1_063_CA_1 -611,FOODS_1_063_CA_2 -612,FOODS_1_063_CA_3 -613,FOODS_1_063_CA_4 -614,FOODS_1_063_TX_1 -615,FOODS_1_063_TX_2 -616,FOODS_1_063_TX_3 -617,FOODS_1_063_WI_1 -618,FOODS_1_063_WI_2 -619,FOODS_1_063_WI_3 -620,FOODS_1_064_CA_1 -621,FOODS_1_064_CA_2 -622,FOODS_1_064_CA_3 -623,FOODS_1_064_CA_4 -624,FOODS_1_064_TX_1 -625,FOODS_1_064_TX_2 -626,FOODS_1_064_TX_3 -627,FOODS_1_064_WI_1 -628,FOODS_1_064_WI_2 -629,FOODS_1_064_WI_3 -630,FOODS_1_065_CA_1 -631,FOODS_1_065_CA_2 -632,FOODS_1_065_CA_3 -633,FOODS_1_065_CA_4 -634,FOODS_1_065_TX_1 -635,FOODS_1_065_TX_2 -636,FOODS_1_065_TX_3 -637,FOODS_1_065_WI_1 -638,FOODS_1_065_WI_2 -639,FOODS_1_065_WI_3 -640,FOODS_1_066_CA_1 -641,FOODS_1_066_CA_2 -642,FOODS_1_066_CA_3 -643,FOODS_1_066_CA_4 -644,FOODS_1_066_TX_1 -645,FOODS_1_066_TX_2 -646,FOODS_1_066_TX_3 -647,FOODS_1_066_WI_1 -648,FOODS_1_066_WI_2 -649,FOODS_1_066_WI_3 -650,FOODS_1_067_CA_1 -651,FOODS_1_067_CA_2 -652,FOODS_1_067_CA_3 -653,FOODS_1_067_CA_4 -654,FOODS_1_067_TX_1 -655,FOODS_1_067_TX_2 -656,FOODS_1_067_TX_3 -657,FOODS_1_067_WI_1 -658,FOODS_1_067_WI_2 -659,FOODS_1_067_WI_3 -660,FOODS_1_068_CA_1 -661,FOODS_1_068_CA_2 -662,FOODS_1_068_CA_3 -663,FOODS_1_068_CA_4 -664,FOODS_1_068_TX_1 -665,FOODS_1_068_TX_2 -666,FOODS_1_068_TX_3 -667,FOODS_1_068_WI_1 -668,FOODS_1_068_WI_2 -669,FOODS_1_068_WI_3 -670,FOODS_1_069_CA_1 -671,FOODS_1_069_CA_2 -672,FOODS_1_069_CA_3 -673,FOODS_1_069_CA_4 -674,FOODS_1_069_TX_1 -675,FOODS_1_069_TX_2 -676,FOODS_1_069_TX_3 -677,FOODS_1_069_WI_1 -678,FOODS_1_069_WI_2 -679,FOODS_1_069_WI_3 -680,FOODS_1_070_CA_1 -681,FOODS_1_070_CA_2 -682,FOODS_1_070_CA_3 -683,FOODS_1_070_CA_4 -684,FOODS_1_070_TX_1 -685,FOODS_1_070_TX_2 -686,FOODS_1_070_TX_3 -687,FOODS_1_070_WI_1 -688,FOODS_1_070_WI_2 -689,FOODS_1_070_WI_3 -690,FOODS_1_071_CA_1 -691,FOODS_1_071_CA_2 -692,FOODS_1_071_CA_3 -693,FOODS_1_071_CA_4 -694,FOODS_1_071_TX_1 -695,FOODS_1_071_TX_2 -696,FOODS_1_071_TX_3 -697,FOODS_1_071_WI_1 -698,FOODS_1_071_WI_2 -699,FOODS_1_071_WI_3 -700,FOODS_1_072_CA_1 -701,FOODS_1_072_CA_2 -702,FOODS_1_072_CA_3 -703,FOODS_1_072_CA_4 -704,FOODS_1_072_TX_1 -705,FOODS_1_072_TX_2 -706,FOODS_1_072_TX_3 -707,FOODS_1_072_WI_1 -708,FOODS_1_072_WI_2 -709,FOODS_1_072_WI_3 -710,FOODS_1_073_CA_1 -711,FOODS_1_073_CA_2 -712,FOODS_1_073_CA_3 -713,FOODS_1_073_CA_4 -714,FOODS_1_073_TX_1 -715,FOODS_1_073_TX_2 -716,FOODS_1_073_TX_3 -717,FOODS_1_073_WI_1 -718,FOODS_1_073_WI_2 -719,FOODS_1_073_WI_3 -720,FOODS_1_074_CA_1 -721,FOODS_1_074_CA_2 -722,FOODS_1_074_CA_3 -723,FOODS_1_074_CA_4 -724,FOODS_1_074_TX_1 -725,FOODS_1_074_TX_2 -726,FOODS_1_074_TX_3 -727,FOODS_1_074_WI_1 -728,FOODS_1_074_WI_2 -729,FOODS_1_074_WI_3 -730,FOODS_1_075_CA_1 -731,FOODS_1_075_CA_2 -732,FOODS_1_075_CA_3 -733,FOODS_1_075_CA_4 -734,FOODS_1_075_TX_1 -735,FOODS_1_075_TX_2 -736,FOODS_1_075_TX_3 -737,FOODS_1_075_WI_1 -738,FOODS_1_075_WI_2 -739,FOODS_1_075_WI_3 -740,FOODS_1_076_CA_1 -741,FOODS_1_076_CA_2 -742,FOODS_1_076_CA_3 -743,FOODS_1_076_CA_4 -744,FOODS_1_076_TX_1 -745,FOODS_1_076_TX_2 -746,FOODS_1_076_TX_3 -747,FOODS_1_076_WI_1 -748,FOODS_1_076_WI_2 -749,FOODS_1_076_WI_3 -750,FOODS_1_077_CA_1 -751,FOODS_1_077_CA_2 -752,FOODS_1_077_CA_3 -753,FOODS_1_077_CA_4 -754,FOODS_1_077_TX_1 -755,FOODS_1_077_TX_2 -756,FOODS_1_077_TX_3 -757,FOODS_1_077_WI_1 -758,FOODS_1_077_WI_2 -759,FOODS_1_077_WI_3 -760,FOODS_1_078_CA_1 -761,FOODS_1_078_CA_2 -762,FOODS_1_078_CA_3 -763,FOODS_1_078_CA_4 -764,FOODS_1_078_TX_1 -765,FOODS_1_078_TX_2 -766,FOODS_1_078_TX_3 -767,FOODS_1_078_WI_1 -768,FOODS_1_078_WI_2 -769,FOODS_1_078_WI_3 -770,FOODS_1_079_CA_1 -771,FOODS_1_079_CA_2 -772,FOODS_1_079_CA_3 -773,FOODS_1_079_CA_4 -774,FOODS_1_079_TX_1 -775,FOODS_1_079_TX_2 -776,FOODS_1_079_TX_3 -777,FOODS_1_079_WI_1 -778,FOODS_1_079_WI_2 -779,FOODS_1_079_WI_3 -780,FOODS_1_080_CA_1 -781,FOODS_1_080_CA_2 -782,FOODS_1_080_CA_3 -783,FOODS_1_080_CA_4 -784,FOODS_1_080_TX_1 -785,FOODS_1_080_TX_2 -786,FOODS_1_080_TX_3 -787,FOODS_1_080_WI_1 -788,FOODS_1_080_WI_2 -789,FOODS_1_080_WI_3 -790,FOODS_1_081_CA_1 -791,FOODS_1_081_CA_2 -792,FOODS_1_081_CA_3 -793,FOODS_1_081_CA_4 -794,FOODS_1_081_TX_1 -795,FOODS_1_081_TX_2 -796,FOODS_1_081_TX_3 -797,FOODS_1_081_WI_1 -798,FOODS_1_081_WI_2 -799,FOODS_1_081_WI_3 -800,FOODS_1_082_CA_1 -801,FOODS_1_082_CA_2 -802,FOODS_1_082_CA_3 -803,FOODS_1_082_CA_4 -804,FOODS_1_082_TX_1 -805,FOODS_1_082_TX_2 -806,FOODS_1_082_TX_3 -807,FOODS_1_082_WI_1 -808,FOODS_1_082_WI_2 -809,FOODS_1_082_WI_3 -810,FOODS_1_083_CA_1 -811,FOODS_1_083_CA_2 -812,FOODS_1_083_CA_3 -813,FOODS_1_083_CA_4 -814,FOODS_1_083_TX_1 -815,FOODS_1_083_TX_2 -816,FOODS_1_083_TX_3 -817,FOODS_1_083_WI_1 -818,FOODS_1_083_WI_2 -819,FOODS_1_083_WI_3 -820,FOODS_1_084_CA_1 -821,FOODS_1_084_CA_2 -822,FOODS_1_084_CA_3 -823,FOODS_1_084_CA_4 -824,FOODS_1_084_TX_1 -825,FOODS_1_084_TX_2 -826,FOODS_1_084_TX_3 -827,FOODS_1_084_WI_1 -828,FOODS_1_084_WI_2 -829,FOODS_1_084_WI_3 -830,FOODS_1_085_CA_1 -831,FOODS_1_085_CA_2 -832,FOODS_1_085_CA_3 -833,FOODS_1_085_CA_4 -834,FOODS_1_085_TX_1 -835,FOODS_1_085_TX_2 -836,FOODS_1_085_TX_3 -837,FOODS_1_085_WI_1 -838,FOODS_1_085_WI_2 -839,FOODS_1_085_WI_3 -840,FOODS_1_086_CA_1 -841,FOODS_1_086_CA_2 -842,FOODS_1_086_CA_3 -843,FOODS_1_086_CA_4 -844,FOODS_1_086_TX_1 -845,FOODS_1_086_TX_2 -846,FOODS_1_086_TX_3 -847,FOODS_1_086_WI_1 -848,FOODS_1_086_WI_2 -849,FOODS_1_086_WI_3 -850,FOODS_1_087_CA_1 -851,FOODS_1_087_CA_2 -852,FOODS_1_087_CA_3 -853,FOODS_1_087_CA_4 -854,FOODS_1_087_TX_1 -855,FOODS_1_087_TX_2 -856,FOODS_1_087_TX_3 -857,FOODS_1_087_WI_1 -858,FOODS_1_087_WI_2 -859,FOODS_1_087_WI_3 -860,FOODS_1_088_CA_1 -861,FOODS_1_088_CA_2 -862,FOODS_1_088_CA_3 -863,FOODS_1_088_CA_4 -864,FOODS_1_088_TX_1 -865,FOODS_1_088_TX_2 -866,FOODS_1_088_TX_3 -867,FOODS_1_088_WI_1 -868,FOODS_1_088_WI_2 -869,FOODS_1_088_WI_3 -870,FOODS_1_089_CA_1 -871,FOODS_1_089_CA_2 -872,FOODS_1_089_CA_3 -873,FOODS_1_089_CA_4 -874,FOODS_1_089_TX_1 -875,FOODS_1_089_TX_2 -876,FOODS_1_089_TX_3 -877,FOODS_1_089_WI_1 -878,FOODS_1_089_WI_2 -879,FOODS_1_089_WI_3 -880,FOODS_1_090_CA_1 -881,FOODS_1_090_CA_2 -882,FOODS_1_090_CA_3 -883,FOODS_1_090_CA_4 -884,FOODS_1_090_TX_1 -885,FOODS_1_090_TX_2 -886,FOODS_1_090_TX_3 -887,FOODS_1_090_WI_1 -888,FOODS_1_090_WI_2 -889,FOODS_1_090_WI_3 -890,FOODS_1_091_CA_1 -891,FOODS_1_091_CA_2 -892,FOODS_1_091_CA_3 -893,FOODS_1_091_CA_4 -894,FOODS_1_091_TX_1 -895,FOODS_1_091_TX_2 -896,FOODS_1_091_TX_3 -897,FOODS_1_091_WI_1 -898,FOODS_1_091_WI_2 -899,FOODS_1_091_WI_3 -900,FOODS_1_092_CA_1 -901,FOODS_1_092_CA_2 -902,FOODS_1_092_CA_3 -903,FOODS_1_092_CA_4 -904,FOODS_1_092_TX_1 -905,FOODS_1_092_TX_2 -906,FOODS_1_092_TX_3 -907,FOODS_1_092_WI_1 -908,FOODS_1_092_WI_2 -909,FOODS_1_092_WI_3 -910,FOODS_1_093_CA_1 -911,FOODS_1_093_CA_2 -912,FOODS_1_093_CA_3 -913,FOODS_1_093_CA_4 -914,FOODS_1_093_TX_1 -915,FOODS_1_093_TX_2 -916,FOODS_1_093_TX_3 -917,FOODS_1_093_WI_1 -918,FOODS_1_093_WI_2 -919,FOODS_1_093_WI_3 -920,FOODS_1_094_CA_1 -921,FOODS_1_094_CA_2 -922,FOODS_1_094_CA_3 -923,FOODS_1_094_CA_4 -924,FOODS_1_094_TX_1 -925,FOODS_1_094_TX_2 -926,FOODS_1_094_TX_3 -927,FOODS_1_094_WI_1 -928,FOODS_1_094_WI_2 -929,FOODS_1_094_WI_3 -930,FOODS_1_095_CA_1 -931,FOODS_1_095_CA_2 -932,FOODS_1_095_CA_3 -933,FOODS_1_095_CA_4 -934,FOODS_1_095_TX_1 -935,FOODS_1_095_TX_2 -936,FOODS_1_095_TX_3 -937,FOODS_1_095_WI_1 -938,FOODS_1_095_WI_2 -939,FOODS_1_095_WI_3 -940,FOODS_1_096_CA_1 -941,FOODS_1_096_CA_2 -942,FOODS_1_096_CA_3 -943,FOODS_1_096_CA_4 -944,FOODS_1_096_TX_1 -945,FOODS_1_096_TX_2 -946,FOODS_1_096_TX_3 -947,FOODS_1_096_WI_1 -948,FOODS_1_096_WI_2 -949,FOODS_1_096_WI_3 -950,FOODS_1_097_CA_1 -951,FOODS_1_097_CA_2 -952,FOODS_1_097_CA_3 -953,FOODS_1_097_CA_4 -954,FOODS_1_097_TX_1 -955,FOODS_1_097_TX_2 -956,FOODS_1_097_TX_3 -957,FOODS_1_097_WI_1 -958,FOODS_1_097_WI_2 -959,FOODS_1_097_WI_3 -960,FOODS_1_098_CA_1 -961,FOODS_1_098_CA_2 -962,FOODS_1_098_CA_3 -963,FOODS_1_098_CA_4 -964,FOODS_1_098_TX_1 -965,FOODS_1_098_TX_2 -966,FOODS_1_098_TX_3 -967,FOODS_1_098_WI_1 -968,FOODS_1_098_WI_2 -969,FOODS_1_098_WI_3 -970,FOODS_1_099_CA_1 -971,FOODS_1_099_CA_2 -972,FOODS_1_099_CA_3 -973,FOODS_1_099_CA_4 -974,FOODS_1_099_TX_1 -975,FOODS_1_099_TX_2 -976,FOODS_1_099_TX_3 -977,FOODS_1_099_WI_1 -978,FOODS_1_099_WI_2 -979,FOODS_1_099_WI_3 -980,FOODS_1_101_CA_1 -981,FOODS_1_101_CA_2 -982,FOODS_1_101_CA_3 -983,FOODS_1_101_CA_4 -984,FOODS_1_101_TX_1 -985,FOODS_1_101_TX_2 -986,FOODS_1_101_TX_3 -987,FOODS_1_101_WI_1 -988,FOODS_1_101_WI_2 -989,FOODS_1_101_WI_3 -990,FOODS_1_102_CA_1 -991,FOODS_1_102_CA_2 -992,FOODS_1_102_CA_3 -993,FOODS_1_102_CA_4 -994,FOODS_1_102_TX_1 -995,FOODS_1_102_TX_2 -996,FOODS_1_102_TX_3 -997,FOODS_1_102_WI_1 -998,FOODS_1_102_WI_2 -999,FOODS_1_102_WI_3 -1000,FOODS_1_103_CA_1 -1001,FOODS_1_103_CA_2 -1002,FOODS_1_103_CA_3 -1003,FOODS_1_103_CA_4 -1004,FOODS_1_103_TX_1 -1005,FOODS_1_103_TX_2 -1006,FOODS_1_103_TX_3 -1007,FOODS_1_103_WI_1 -1008,FOODS_1_103_WI_2 -1009,FOODS_1_103_WI_3 -1010,FOODS_1_104_CA_1 -1011,FOODS_1_104_CA_2 -1012,FOODS_1_104_CA_3 -1013,FOODS_1_104_CA_4 -1014,FOODS_1_104_TX_1 -1015,FOODS_1_104_TX_2 -1016,FOODS_1_104_TX_3 -1017,FOODS_1_104_WI_1 -1018,FOODS_1_104_WI_2 -1019,FOODS_1_104_WI_3 -1020,FOODS_1_105_CA_1 -1021,FOODS_1_105_CA_2 -1022,FOODS_1_105_CA_3 -1023,FOODS_1_105_CA_4 -1024,FOODS_1_105_TX_1 -1025,FOODS_1_105_TX_2 -1026,FOODS_1_105_TX_3 -1027,FOODS_1_105_WI_1 -1028,FOODS_1_105_WI_2 -1029,FOODS_1_105_WI_3 -1030,FOODS_1_106_CA_1 -1031,FOODS_1_106_CA_2 -1032,FOODS_1_106_CA_3 -1033,FOODS_1_106_CA_4 -1034,FOODS_1_106_TX_1 -1035,FOODS_1_106_TX_2 -1036,FOODS_1_106_TX_3 -1037,FOODS_1_106_WI_1 -1038,FOODS_1_106_WI_2 -1039,FOODS_1_106_WI_3 -1040,FOODS_1_107_CA_1 -1041,FOODS_1_107_CA_2 -1042,FOODS_1_107_CA_3 -1043,FOODS_1_107_CA_4 -1044,FOODS_1_107_TX_1 -1045,FOODS_1_107_TX_2 -1046,FOODS_1_107_TX_3 -1047,FOODS_1_107_WI_1 -1048,FOODS_1_107_WI_2 -1049,FOODS_1_107_WI_3 -1050,FOODS_1_108_CA_1 -1051,FOODS_1_108_CA_2 -1052,FOODS_1_108_CA_3 -1053,FOODS_1_108_CA_4 -1054,FOODS_1_108_TX_1 -1055,FOODS_1_108_TX_2 -1056,FOODS_1_108_TX_3 -1057,FOODS_1_108_WI_1 -1058,FOODS_1_108_WI_2 -1059,FOODS_1_108_WI_3 -1060,FOODS_1_109_CA_1 -1061,FOODS_1_109_CA_2 -1062,FOODS_1_109_CA_3 -1063,FOODS_1_109_CA_4 -1064,FOODS_1_109_TX_1 -1065,FOODS_1_109_TX_2 -1066,FOODS_1_109_TX_3 -1067,FOODS_1_109_WI_1 -1068,FOODS_1_109_WI_2 -1069,FOODS_1_109_WI_3 -1070,FOODS_1_110_CA_1 -1071,FOODS_1_110_CA_2 -1072,FOODS_1_110_CA_3 -1073,FOODS_1_110_CA_4 -1074,FOODS_1_110_TX_1 -1075,FOODS_1_110_TX_2 -1076,FOODS_1_110_TX_3 -1077,FOODS_1_110_WI_1 -1078,FOODS_1_110_WI_2 -1079,FOODS_1_110_WI_3 -1080,FOODS_1_111_CA_1 -1081,FOODS_1_111_CA_2 -1082,FOODS_1_111_CA_3 -1083,FOODS_1_111_CA_4 -1084,FOODS_1_111_TX_1 -1085,FOODS_1_111_TX_2 -1086,FOODS_1_111_TX_3 -1087,FOODS_1_111_WI_1 -1088,FOODS_1_111_WI_2 -1089,FOODS_1_111_WI_3 -1090,FOODS_1_112_CA_1 -1091,FOODS_1_112_CA_2 -1092,FOODS_1_112_CA_3 -1093,FOODS_1_112_CA_4 -1094,FOODS_1_112_TX_1 -1095,FOODS_1_112_TX_2 -1096,FOODS_1_112_TX_3 -1097,FOODS_1_112_WI_1 -1098,FOODS_1_112_WI_2 -1099,FOODS_1_112_WI_3 -1100,FOODS_1_113_CA_1 -1101,FOODS_1_113_CA_2 -1102,FOODS_1_113_CA_3 -1103,FOODS_1_113_CA_4 -1104,FOODS_1_113_TX_1 -1105,FOODS_1_113_TX_2 -1106,FOODS_1_113_TX_3 -1107,FOODS_1_113_WI_1 -1108,FOODS_1_113_WI_2 -1109,FOODS_1_113_WI_3 -1110,FOODS_1_114_CA_1 -1111,FOODS_1_114_CA_2 -1112,FOODS_1_114_CA_3 -1113,FOODS_1_114_CA_4 -1114,FOODS_1_114_TX_1 -1115,FOODS_1_114_TX_2 -1116,FOODS_1_114_TX_3 -1117,FOODS_1_114_WI_1 -1118,FOODS_1_114_WI_2 -1119,FOODS_1_114_WI_3 -1120,FOODS_1_115_CA_1 -1121,FOODS_1_115_CA_2 -1122,FOODS_1_115_CA_3 -1123,FOODS_1_115_CA_4 -1124,FOODS_1_115_TX_1 -1125,FOODS_1_115_TX_2 -1126,FOODS_1_115_TX_3 -1127,FOODS_1_115_WI_1 -1128,FOODS_1_115_WI_2 -1129,FOODS_1_115_WI_3 -1130,FOODS_1_116_CA_1 -1131,FOODS_1_116_CA_2 -1132,FOODS_1_116_CA_3 -1133,FOODS_1_116_CA_4 -1134,FOODS_1_116_TX_1 -1135,FOODS_1_116_TX_2 -1136,FOODS_1_116_TX_3 -1137,FOODS_1_116_WI_1 -1138,FOODS_1_116_WI_2 -1139,FOODS_1_116_WI_3 -1140,FOODS_1_117_CA_1 -1141,FOODS_1_117_CA_2 -1142,FOODS_1_117_CA_3 -1143,FOODS_1_117_CA_4 -1144,FOODS_1_117_TX_1 -1145,FOODS_1_117_TX_2 -1146,FOODS_1_117_TX_3 -1147,FOODS_1_117_WI_1 -1148,FOODS_1_117_WI_2 -1149,FOODS_1_117_WI_3 -1150,FOODS_1_118_CA_1 -1151,FOODS_1_118_CA_2 -1152,FOODS_1_118_CA_3 -1153,FOODS_1_118_CA_4 -1154,FOODS_1_118_TX_1 -1155,FOODS_1_118_TX_2 -1156,FOODS_1_118_TX_3 -1157,FOODS_1_118_WI_1 -1158,FOODS_1_118_WI_2 -1159,FOODS_1_118_WI_3 -1160,FOODS_1_119_CA_1 -1161,FOODS_1_119_CA_2 -1162,FOODS_1_119_CA_3 -1163,FOODS_1_119_CA_4 -1164,FOODS_1_119_TX_1 -1165,FOODS_1_119_TX_2 -1166,FOODS_1_119_TX_3 -1167,FOODS_1_119_WI_1 -1168,FOODS_1_119_WI_2 -1169,FOODS_1_119_WI_3 -1170,FOODS_1_120_CA_1 -1171,FOODS_1_120_CA_2 -1172,FOODS_1_120_CA_3 -1173,FOODS_1_120_CA_4 -1174,FOODS_1_120_TX_1 -1175,FOODS_1_120_TX_2 -1176,FOODS_1_120_TX_3 -1177,FOODS_1_120_WI_1 -1178,FOODS_1_120_WI_2 -1179,FOODS_1_120_WI_3 -1180,FOODS_1_121_CA_1 -1181,FOODS_1_121_CA_2 -1182,FOODS_1_121_CA_3 -1183,FOODS_1_121_CA_4 -1184,FOODS_1_121_TX_1 -1185,FOODS_1_121_TX_2 -1186,FOODS_1_121_TX_3 -1187,FOODS_1_121_WI_1 -1188,FOODS_1_121_WI_2 -1189,FOODS_1_121_WI_3 -1190,FOODS_1_122_CA_1 -1191,FOODS_1_122_CA_2 -1192,FOODS_1_122_CA_3 -1193,FOODS_1_122_CA_4 -1194,FOODS_1_122_TX_1 -1195,FOODS_1_122_TX_2 -1196,FOODS_1_122_TX_3 -1197,FOODS_1_122_WI_1 -1198,FOODS_1_122_WI_2 -1199,FOODS_1_122_WI_3 -1200,FOODS_1_123_CA_1 -1201,FOODS_1_123_CA_2 -1202,FOODS_1_123_CA_3 -1203,FOODS_1_123_CA_4 -1204,FOODS_1_123_TX_1 -1205,FOODS_1_123_TX_2 -1206,FOODS_1_123_TX_3 -1207,FOODS_1_123_WI_1 -1208,FOODS_1_123_WI_2 -1209,FOODS_1_123_WI_3 -1210,FOODS_1_124_CA_1 -1211,FOODS_1_124_CA_2 -1212,FOODS_1_124_CA_3 -1213,FOODS_1_124_CA_4 -1214,FOODS_1_124_TX_1 -1215,FOODS_1_124_TX_2 -1216,FOODS_1_124_TX_3 -1217,FOODS_1_124_WI_1 -1218,FOODS_1_124_WI_2 -1219,FOODS_1_124_WI_3 -1220,FOODS_1_125_CA_1 -1221,FOODS_1_125_CA_2 -1222,FOODS_1_125_CA_3 -1223,FOODS_1_125_CA_4 -1224,FOODS_1_125_TX_1 -1225,FOODS_1_125_TX_2 -1226,FOODS_1_125_TX_3 -1227,FOODS_1_125_WI_1 -1228,FOODS_1_125_WI_2 -1229,FOODS_1_125_WI_3 -1230,FOODS_1_126_CA_1 -1231,FOODS_1_126_CA_2 -1232,FOODS_1_126_CA_3 -1233,FOODS_1_126_CA_4 -1234,FOODS_1_126_TX_1 -1235,FOODS_1_126_TX_2 -1236,FOODS_1_126_TX_3 -1237,FOODS_1_126_WI_1 -1238,FOODS_1_126_WI_2 -1239,FOODS_1_126_WI_3 -1240,FOODS_1_127_CA_1 -1241,FOODS_1_127_CA_2 -1242,FOODS_1_127_CA_3 -1243,FOODS_1_127_CA_4 -1244,FOODS_1_127_TX_1 -1245,FOODS_1_127_TX_2 -1246,FOODS_1_127_TX_3 -1247,FOODS_1_127_WI_1 -1248,FOODS_1_127_WI_2 -1249,FOODS_1_127_WI_3 -1250,FOODS_1_128_CA_1 -1251,FOODS_1_128_CA_2 -1252,FOODS_1_128_CA_3 -1253,FOODS_1_128_CA_4 -1254,FOODS_1_128_TX_1 -1255,FOODS_1_128_TX_2 -1256,FOODS_1_128_TX_3 -1257,FOODS_1_128_WI_1 -1258,FOODS_1_128_WI_2 -1259,FOODS_1_128_WI_3 -1260,FOODS_1_129_CA_1 -1261,FOODS_1_129_CA_2 -1262,FOODS_1_129_CA_3 -1263,FOODS_1_129_CA_4 -1264,FOODS_1_129_TX_1 -1265,FOODS_1_129_TX_2 -1266,FOODS_1_129_TX_3 -1267,FOODS_1_129_WI_1 -1268,FOODS_1_129_WI_2 -1269,FOODS_1_129_WI_3 -1270,FOODS_1_130_CA_1 -1271,FOODS_1_130_CA_2 -1272,FOODS_1_130_CA_3 -1273,FOODS_1_130_CA_4 -1274,FOODS_1_130_TX_1 -1275,FOODS_1_130_TX_2 -1276,FOODS_1_130_TX_3 -1277,FOODS_1_130_WI_1 -1278,FOODS_1_130_WI_2 -1279,FOODS_1_130_WI_3 -1280,FOODS_1_131_CA_1 -1281,FOODS_1_131_CA_2 -1282,FOODS_1_131_CA_3 -1283,FOODS_1_131_CA_4 -1284,FOODS_1_131_TX_1 -1285,FOODS_1_131_TX_2 -1286,FOODS_1_131_TX_3 -1287,FOODS_1_131_WI_1 -1288,FOODS_1_131_WI_2 -1289,FOODS_1_131_WI_3 -1290,FOODS_1_132_CA_1 -1291,FOODS_1_132_CA_2 -1292,FOODS_1_132_CA_3 -1293,FOODS_1_132_CA_4 -1294,FOODS_1_132_TX_1 -1295,FOODS_1_132_TX_2 -1296,FOODS_1_132_TX_3 -1297,FOODS_1_132_WI_1 -1298,FOODS_1_132_WI_2 -1299,FOODS_1_132_WI_3 -1300,FOODS_1_133_CA_1 -1301,FOODS_1_133_CA_2 -1302,FOODS_1_133_CA_3 -1303,FOODS_1_133_CA_4 -1304,FOODS_1_133_TX_1 -1305,FOODS_1_133_TX_2 -1306,FOODS_1_133_TX_3 -1307,FOODS_1_133_WI_1 -1308,FOODS_1_133_WI_2 -1309,FOODS_1_133_WI_3 -1310,FOODS_1_134_CA_1 -1311,FOODS_1_134_CA_2 -1312,FOODS_1_134_CA_3 -1313,FOODS_1_134_CA_4 -1314,FOODS_1_134_TX_1 -1315,FOODS_1_134_TX_2 -1316,FOODS_1_134_TX_3 -1317,FOODS_1_134_WI_1 -1318,FOODS_1_134_WI_2 -1319,FOODS_1_134_WI_3 -1320,FOODS_1_135_CA_1 -1321,FOODS_1_135_CA_2 -1322,FOODS_1_135_CA_3 -1323,FOODS_1_135_CA_4 -1324,FOODS_1_135_TX_1 -1325,FOODS_1_135_TX_2 -1326,FOODS_1_135_TX_3 -1327,FOODS_1_135_WI_1 -1328,FOODS_1_135_WI_2 -1329,FOODS_1_135_WI_3 -1330,FOODS_1_136_CA_1 -1331,FOODS_1_136_CA_2 -1332,FOODS_1_136_CA_3 -1333,FOODS_1_136_CA_4 -1334,FOODS_1_136_TX_1 -1335,FOODS_1_136_TX_2 -1336,FOODS_1_136_TX_3 -1337,FOODS_1_136_WI_1 -1338,FOODS_1_136_WI_2 -1339,FOODS_1_136_WI_3 -1340,FOODS_1_137_CA_1 -1341,FOODS_1_137_CA_2 -1342,FOODS_1_137_CA_3 -1343,FOODS_1_137_CA_4 -1344,FOODS_1_137_TX_1 -1345,FOODS_1_137_TX_2 -1346,FOODS_1_137_TX_3 -1347,FOODS_1_137_WI_1 -1348,FOODS_1_137_WI_2 -1349,FOODS_1_137_WI_3 -1350,FOODS_1_138_CA_1 -1351,FOODS_1_138_CA_2 -1352,FOODS_1_138_CA_3 -1353,FOODS_1_138_CA_4 -1354,FOODS_1_138_TX_1 -1355,FOODS_1_138_TX_2 -1356,FOODS_1_138_TX_3 -1357,FOODS_1_138_WI_1 -1358,FOODS_1_138_WI_2 -1359,FOODS_1_138_WI_3 -1360,FOODS_1_139_CA_1 -1361,FOODS_1_139_CA_2 -1362,FOODS_1_139_CA_3 -1363,FOODS_1_139_CA_4 -1364,FOODS_1_139_TX_1 -1365,FOODS_1_139_TX_2 -1366,FOODS_1_139_TX_3 -1367,FOODS_1_139_WI_1 -1368,FOODS_1_139_WI_2 -1369,FOODS_1_139_WI_3 -1370,FOODS_1_140_CA_1 -1371,FOODS_1_140_CA_2 -1372,FOODS_1_140_CA_3 -1373,FOODS_1_140_CA_4 -1374,FOODS_1_140_TX_1 -1375,FOODS_1_140_TX_2 -1376,FOODS_1_140_TX_3 -1377,FOODS_1_140_WI_1 -1378,FOODS_1_140_WI_2 -1379,FOODS_1_140_WI_3 -1380,FOODS_1_141_CA_1 -1381,FOODS_1_141_CA_2 -1382,FOODS_1_141_CA_3 -1383,FOODS_1_141_CA_4 -1384,FOODS_1_141_TX_1 -1385,FOODS_1_141_TX_2 -1386,FOODS_1_141_TX_3 -1387,FOODS_1_141_WI_1 -1388,FOODS_1_141_WI_2 -1389,FOODS_1_141_WI_3 -1390,FOODS_1_142_CA_1 -1391,FOODS_1_142_CA_2 -1392,FOODS_1_142_CA_3 -1393,FOODS_1_142_CA_4 -1394,FOODS_1_142_TX_1 -1395,FOODS_1_142_TX_2 -1396,FOODS_1_142_TX_3 -1397,FOODS_1_142_WI_1 -1398,FOODS_1_142_WI_2 -1399,FOODS_1_142_WI_3 -1400,FOODS_1_143_CA_1 -1401,FOODS_1_143_CA_2 -1402,FOODS_1_143_CA_3 -1403,FOODS_1_143_CA_4 -1404,FOODS_1_143_TX_1 -1405,FOODS_1_143_TX_2 -1406,FOODS_1_143_TX_3 -1407,FOODS_1_143_WI_1 -1408,FOODS_1_143_WI_2 -1409,FOODS_1_143_WI_3 -1410,FOODS_1_144_CA_1 -1411,FOODS_1_144_CA_2 -1412,FOODS_1_144_CA_3 -1413,FOODS_1_144_CA_4 -1414,FOODS_1_144_TX_1 -1415,FOODS_1_144_TX_2 -1416,FOODS_1_144_TX_3 -1417,FOODS_1_144_WI_1 -1418,FOODS_1_144_WI_2 -1419,FOODS_1_144_WI_3 -1420,FOODS_1_145_CA_1 -1421,FOODS_1_145_CA_2 -1422,FOODS_1_145_CA_3 -1423,FOODS_1_145_CA_4 -1424,FOODS_1_145_TX_1 -1425,FOODS_1_145_TX_2 -1426,FOODS_1_145_TX_3 -1427,FOODS_1_145_WI_1 -1428,FOODS_1_145_WI_2 -1429,FOODS_1_145_WI_3 -1430,FOODS_1_146_CA_1 -1431,FOODS_1_146_CA_2 -1432,FOODS_1_146_CA_3 -1433,FOODS_1_146_CA_4 -1434,FOODS_1_146_TX_1 -1435,FOODS_1_146_TX_2 -1436,FOODS_1_146_TX_3 -1437,FOODS_1_146_WI_1 -1438,FOODS_1_146_WI_2 -1439,FOODS_1_146_WI_3 -1440,FOODS_1_147_CA_1 -1441,FOODS_1_147_CA_2 -1442,FOODS_1_147_CA_3 -1443,FOODS_1_147_CA_4 -1444,FOODS_1_147_TX_1 -1445,FOODS_1_147_TX_2 -1446,FOODS_1_147_TX_3 -1447,FOODS_1_147_WI_1 -1448,FOODS_1_147_WI_2 -1449,FOODS_1_147_WI_3 -1450,FOODS_1_148_CA_1 -1451,FOODS_1_148_CA_2 -1452,FOODS_1_148_CA_3 -1453,FOODS_1_148_CA_4 -1454,FOODS_1_148_TX_1 -1455,FOODS_1_148_TX_2 -1456,FOODS_1_148_TX_3 -1457,FOODS_1_148_WI_1 -1458,FOODS_1_148_WI_2 -1459,FOODS_1_148_WI_3 -1460,FOODS_1_149_CA_1 -1461,FOODS_1_149_CA_2 -1462,FOODS_1_149_CA_3 -1463,FOODS_1_149_CA_4 -1464,FOODS_1_149_TX_1 -1465,FOODS_1_149_TX_2 -1466,FOODS_1_149_TX_3 -1467,FOODS_1_149_WI_1 -1468,FOODS_1_149_WI_2 -1469,FOODS_1_149_WI_3 -1470,FOODS_1_150_CA_1 -1471,FOODS_1_150_CA_2 -1472,FOODS_1_150_CA_3 -1473,FOODS_1_150_CA_4 -1474,FOODS_1_150_TX_1 -1475,FOODS_1_150_TX_2 -1476,FOODS_1_150_TX_3 -1477,FOODS_1_150_WI_1 -1478,FOODS_1_150_WI_2 -1479,FOODS_1_150_WI_3 -1480,FOODS_1_151_CA_1 -1481,FOODS_1_151_CA_2 -1482,FOODS_1_151_CA_3 -1483,FOODS_1_151_CA_4 -1484,FOODS_1_151_TX_1 -1485,FOODS_1_151_TX_2 -1486,FOODS_1_151_TX_3 -1487,FOODS_1_151_WI_1 -1488,FOODS_1_151_WI_2 -1489,FOODS_1_151_WI_3 -1490,FOODS_1_152_CA_1 -1491,FOODS_1_152_CA_2 -1492,FOODS_1_152_CA_3 -1493,FOODS_1_152_CA_4 -1494,FOODS_1_152_TX_1 -1495,FOODS_1_152_TX_2 -1496,FOODS_1_152_TX_3 -1497,FOODS_1_152_WI_1 -1498,FOODS_1_152_WI_2 -1499,FOODS_1_152_WI_3 -1500,FOODS_1_153_CA_1 -1501,FOODS_1_153_CA_2 -1502,FOODS_1_153_CA_3 -1503,FOODS_1_153_CA_4 -1504,FOODS_1_153_TX_1 -1505,FOODS_1_153_TX_2 -1506,FOODS_1_153_TX_3 -1507,FOODS_1_153_WI_1 -1508,FOODS_1_153_WI_2 -1509,FOODS_1_153_WI_3 -1510,FOODS_1_154_CA_1 -1511,FOODS_1_154_CA_2 -1512,FOODS_1_154_CA_3 -1513,FOODS_1_154_CA_4 -1514,FOODS_1_154_TX_1 -1515,FOODS_1_154_TX_2 -1516,FOODS_1_154_TX_3 -1517,FOODS_1_154_WI_1 -1518,FOODS_1_154_WI_2 -1519,FOODS_1_154_WI_3 -1520,FOODS_1_155_CA_1 -1521,FOODS_1_155_CA_2 -1522,FOODS_1_155_CA_3 -1523,FOODS_1_155_CA_4 -1524,FOODS_1_155_TX_1 -1525,FOODS_1_155_TX_2 -1526,FOODS_1_155_TX_3 -1527,FOODS_1_155_WI_1 -1528,FOODS_1_155_WI_2 -1529,FOODS_1_155_WI_3 -1530,FOODS_1_156_CA_1 -1531,FOODS_1_156_CA_2 -1532,FOODS_1_156_CA_3 -1533,FOODS_1_156_CA_4 -1534,FOODS_1_156_TX_1 -1535,FOODS_1_156_TX_2 -1536,FOODS_1_156_TX_3 -1537,FOODS_1_156_WI_1 -1538,FOODS_1_156_WI_2 -1539,FOODS_1_156_WI_3 -1540,FOODS_1_157_CA_1 -1541,FOODS_1_157_CA_2 -1542,FOODS_1_157_CA_3 -1543,FOODS_1_157_CA_4 -1544,FOODS_1_157_TX_1 -1545,FOODS_1_157_TX_2 -1546,FOODS_1_157_TX_3 -1547,FOODS_1_157_WI_1 -1548,FOODS_1_157_WI_2 -1549,FOODS_1_157_WI_3 -1550,FOODS_1_158_CA_1 -1551,FOODS_1_158_CA_2 -1552,FOODS_1_158_CA_3 -1553,FOODS_1_158_CA_4 -1554,FOODS_1_158_TX_1 -1555,FOODS_1_158_TX_2 -1556,FOODS_1_158_TX_3 -1557,FOODS_1_158_WI_1 -1558,FOODS_1_158_WI_2 -1559,FOODS_1_158_WI_3 -1560,FOODS_1_159_CA_1 -1561,FOODS_1_159_CA_2 -1562,FOODS_1_159_CA_3 -1563,FOODS_1_159_CA_4 -1564,FOODS_1_159_TX_1 -1565,FOODS_1_159_TX_2 -1566,FOODS_1_159_TX_3 -1567,FOODS_1_159_WI_1 -1568,FOODS_1_159_WI_2 -1569,FOODS_1_159_WI_3 -1570,FOODS_1_160_CA_1 -1571,FOODS_1_160_CA_2 -1572,FOODS_1_160_CA_3 -1573,FOODS_1_160_CA_4 -1574,FOODS_1_160_TX_1 -1575,FOODS_1_160_TX_2 -1576,FOODS_1_160_TX_3 -1577,FOODS_1_160_WI_1 -1578,FOODS_1_160_WI_2 -1579,FOODS_1_160_WI_3 -1580,FOODS_1_161_CA_1 -1581,FOODS_1_161_CA_2 -1582,FOODS_1_161_CA_3 -1583,FOODS_1_161_CA_4 -1584,FOODS_1_161_TX_1 -1585,FOODS_1_161_TX_2 -1586,FOODS_1_161_TX_3 -1587,FOODS_1_161_WI_1 -1588,FOODS_1_161_WI_2 -1589,FOODS_1_161_WI_3 -1590,FOODS_1_162_CA_1 -1591,FOODS_1_162_CA_2 -1592,FOODS_1_162_CA_3 -1593,FOODS_1_162_CA_4 -1594,FOODS_1_162_TX_1 -1595,FOODS_1_162_TX_2 -1596,FOODS_1_162_TX_3 -1597,FOODS_1_162_WI_1 -1598,FOODS_1_162_WI_2 -1599,FOODS_1_162_WI_3 -1600,FOODS_1_163_CA_1 -1601,FOODS_1_163_CA_2 -1602,FOODS_1_163_CA_3 -1603,FOODS_1_163_CA_4 -1604,FOODS_1_163_TX_1 -1605,FOODS_1_163_TX_2 -1606,FOODS_1_163_TX_3 -1607,FOODS_1_163_WI_1 -1608,FOODS_1_163_WI_2 -1609,FOODS_1_163_WI_3 -1610,FOODS_1_164_CA_1 -1611,FOODS_1_164_CA_2 -1612,FOODS_1_164_CA_3 -1613,FOODS_1_164_CA_4 -1614,FOODS_1_164_TX_1 -1615,FOODS_1_164_TX_2 -1616,FOODS_1_164_TX_3 -1617,FOODS_1_164_WI_1 -1618,FOODS_1_164_WI_2 -1619,FOODS_1_164_WI_3 -1620,FOODS_1_166_CA_1 -1621,FOODS_1_166_CA_2 -1622,FOODS_1_166_CA_3 -1623,FOODS_1_166_CA_4 -1624,FOODS_1_166_TX_1 -1625,FOODS_1_166_TX_2 -1626,FOODS_1_166_TX_3 -1627,FOODS_1_166_WI_1 -1628,FOODS_1_166_WI_2 -1629,FOODS_1_166_WI_3 -1630,FOODS_1_167_CA_1 -1631,FOODS_1_167_CA_2 -1632,FOODS_1_167_CA_3 -1633,FOODS_1_167_CA_4 -1634,FOODS_1_167_TX_1 -1635,FOODS_1_167_TX_2 -1636,FOODS_1_167_TX_3 -1637,FOODS_1_167_WI_1 -1638,FOODS_1_167_WI_2 -1639,FOODS_1_167_WI_3 -1640,FOODS_1_168_CA_1 -1641,FOODS_1_168_CA_2 -1642,FOODS_1_168_CA_3 -1643,FOODS_1_168_CA_4 -1644,FOODS_1_168_TX_1 -1645,FOODS_1_168_TX_2 -1646,FOODS_1_168_TX_3 -1647,FOODS_1_168_WI_1 -1648,FOODS_1_168_WI_2 -1649,FOODS_1_168_WI_3 -1650,FOODS_1_169_CA_1 -1651,FOODS_1_169_CA_2 -1652,FOODS_1_169_CA_3 -1653,FOODS_1_169_CA_4 -1654,FOODS_1_169_TX_1 -1655,FOODS_1_169_TX_2 -1656,FOODS_1_169_TX_3 -1657,FOODS_1_169_WI_1 -1658,FOODS_1_169_WI_2 -1659,FOODS_1_169_WI_3 -1660,FOODS_1_170_CA_1 -1661,FOODS_1_170_CA_2 -1662,FOODS_1_170_CA_3 -1663,FOODS_1_170_CA_4 -1664,FOODS_1_170_TX_1 -1665,FOODS_1_170_TX_2 -1666,FOODS_1_170_TX_3 -1667,FOODS_1_170_WI_1 -1668,FOODS_1_170_WI_2 -1669,FOODS_1_170_WI_3 -1670,FOODS_1_171_CA_1 -1671,FOODS_1_171_CA_2 -1672,FOODS_1_171_CA_3 -1673,FOODS_1_171_CA_4 -1674,FOODS_1_171_TX_1 -1675,FOODS_1_171_TX_2 -1676,FOODS_1_171_TX_3 -1677,FOODS_1_171_WI_1 -1678,FOODS_1_171_WI_2 -1679,FOODS_1_171_WI_3 -1680,FOODS_1_172_CA_1 -1681,FOODS_1_172_CA_2 -1682,FOODS_1_172_CA_3 -1683,FOODS_1_172_CA_4 -1684,FOODS_1_172_TX_1 -1685,FOODS_1_172_TX_2 -1686,FOODS_1_172_TX_3 -1687,FOODS_1_172_WI_1 -1688,FOODS_1_172_WI_2 -1689,FOODS_1_172_WI_3 -1690,FOODS_1_173_CA_1 -1691,FOODS_1_173_CA_2 -1692,FOODS_1_173_CA_3 -1693,FOODS_1_173_CA_4 -1694,FOODS_1_173_TX_1 -1695,FOODS_1_173_TX_2 -1696,FOODS_1_173_TX_3 -1697,FOODS_1_173_WI_1 -1698,FOODS_1_173_WI_2 -1699,FOODS_1_173_WI_3 -1700,FOODS_1_174_CA_1 -1701,FOODS_1_174_CA_2 -1702,FOODS_1_174_CA_3 -1703,FOODS_1_174_CA_4 -1704,FOODS_1_174_TX_1 -1705,FOODS_1_174_TX_2 -1706,FOODS_1_174_TX_3 -1707,FOODS_1_174_WI_1 -1708,FOODS_1_174_WI_2 -1709,FOODS_1_174_WI_3 -1710,FOODS_1_175_CA_1 -1711,FOODS_1_175_CA_2 -1712,FOODS_1_175_CA_3 -1713,FOODS_1_175_CA_4 -1714,FOODS_1_175_TX_1 -1715,FOODS_1_175_TX_2 -1716,FOODS_1_175_TX_3 -1717,FOODS_1_175_WI_1 -1718,FOODS_1_175_WI_2 -1719,FOODS_1_175_WI_3 -1720,FOODS_1_176_CA_1 -1721,FOODS_1_176_CA_2 -1722,FOODS_1_176_CA_3 -1723,FOODS_1_176_CA_4 -1724,FOODS_1_176_TX_1 -1725,FOODS_1_176_TX_2 -1726,FOODS_1_176_TX_3 -1727,FOODS_1_176_WI_1 -1728,FOODS_1_176_WI_2 -1729,FOODS_1_176_WI_3 -1730,FOODS_1_177_CA_1 -1731,FOODS_1_177_CA_2 -1732,FOODS_1_177_CA_3 -1733,FOODS_1_177_CA_4 -1734,FOODS_1_177_TX_1 -1735,FOODS_1_177_TX_2 -1736,FOODS_1_177_TX_3 -1737,FOODS_1_177_WI_1 -1738,FOODS_1_177_WI_2 -1739,FOODS_1_177_WI_3 -1740,FOODS_1_178_CA_1 -1741,FOODS_1_178_CA_2 -1742,FOODS_1_178_CA_3 -1743,FOODS_1_178_CA_4 -1744,FOODS_1_178_TX_1 -1745,FOODS_1_178_TX_2 -1746,FOODS_1_178_TX_3 -1747,FOODS_1_178_WI_1 -1748,FOODS_1_178_WI_2 -1749,FOODS_1_178_WI_3 -1750,FOODS_1_179_CA_1 -1751,FOODS_1_179_CA_2 -1752,FOODS_1_179_CA_3 -1753,FOODS_1_179_CA_4 -1754,FOODS_1_179_TX_1 -1755,FOODS_1_179_TX_2 -1756,FOODS_1_179_TX_3 -1757,FOODS_1_179_WI_1 -1758,FOODS_1_179_WI_2 -1759,FOODS_1_179_WI_3 -1760,FOODS_1_180_CA_1 -1761,FOODS_1_180_CA_2 -1762,FOODS_1_180_CA_3 -1763,FOODS_1_180_CA_4 -1764,FOODS_1_180_TX_1 -1765,FOODS_1_180_TX_2 -1766,FOODS_1_180_TX_3 -1767,FOODS_1_180_WI_1 -1768,FOODS_1_180_WI_2 -1769,FOODS_1_180_WI_3 -1770,FOODS_1_181_CA_1 -1771,FOODS_1_181_CA_2 -1772,FOODS_1_181_CA_3 -1773,FOODS_1_181_CA_4 -1774,FOODS_1_181_TX_1 -1775,FOODS_1_181_TX_2 -1776,FOODS_1_181_TX_3 -1777,FOODS_1_181_WI_1 -1778,FOODS_1_181_WI_2 -1779,FOODS_1_181_WI_3 -1780,FOODS_1_182_CA_1 -1781,FOODS_1_182_CA_2 -1782,FOODS_1_182_CA_3 -1783,FOODS_1_182_CA_4 -1784,FOODS_1_182_TX_1 -1785,FOODS_1_182_TX_2 -1786,FOODS_1_182_TX_3 -1787,FOODS_1_182_WI_1 -1788,FOODS_1_182_WI_2 -1789,FOODS_1_182_WI_3 -1790,FOODS_1_183_CA_1 -1791,FOODS_1_183_CA_2 -1792,FOODS_1_183_CA_3 -1793,FOODS_1_183_CA_4 -1794,FOODS_1_183_TX_1 -1795,FOODS_1_183_TX_2 -1796,FOODS_1_183_TX_3 -1797,FOODS_1_183_WI_1 -1798,FOODS_1_183_WI_2 -1799,FOODS_1_183_WI_3 -1800,FOODS_1_184_CA_1 -1801,FOODS_1_184_CA_2 -1802,FOODS_1_184_CA_3 -1803,FOODS_1_184_CA_4 -1804,FOODS_1_184_TX_1 -1805,FOODS_1_184_TX_2 -1806,FOODS_1_184_TX_3 -1807,FOODS_1_184_WI_1 -1808,FOODS_1_184_WI_2 -1809,FOODS_1_184_WI_3 -1810,FOODS_1_185_CA_1 -1811,FOODS_1_185_CA_2 -1812,FOODS_1_185_CA_3 -1813,FOODS_1_185_CA_4 -1814,FOODS_1_185_TX_1 -1815,FOODS_1_185_TX_2 -1816,FOODS_1_185_TX_3 -1817,FOODS_1_185_WI_1 -1818,FOODS_1_185_WI_2 -1819,FOODS_1_185_WI_3 -1820,FOODS_1_186_CA_1 -1821,FOODS_1_186_CA_2 -1822,FOODS_1_186_CA_3 -1823,FOODS_1_186_CA_4 -1824,FOODS_1_186_TX_1 -1825,FOODS_1_186_TX_2 -1826,FOODS_1_186_TX_3 -1827,FOODS_1_186_WI_1 -1828,FOODS_1_186_WI_2 -1829,FOODS_1_186_WI_3 -1830,FOODS_1_187_CA_1 -1831,FOODS_1_187_CA_2 -1832,FOODS_1_187_CA_3 -1833,FOODS_1_187_CA_4 -1834,FOODS_1_187_TX_1 -1835,FOODS_1_187_TX_2 -1836,FOODS_1_187_TX_3 -1837,FOODS_1_187_WI_1 -1838,FOODS_1_187_WI_2 -1839,FOODS_1_187_WI_3 -1840,FOODS_1_188_CA_1 -1841,FOODS_1_188_CA_2 -1842,FOODS_1_188_CA_3 -1843,FOODS_1_188_CA_4 -1844,FOODS_1_188_TX_1 -1845,FOODS_1_188_TX_2 -1846,FOODS_1_188_TX_3 -1847,FOODS_1_188_WI_1 -1848,FOODS_1_188_WI_2 -1849,FOODS_1_188_WI_3 -1850,FOODS_1_189_CA_1 -1851,FOODS_1_189_CA_2 -1852,FOODS_1_189_CA_3 -1853,FOODS_1_189_CA_4 -1854,FOODS_1_189_TX_1 -1855,FOODS_1_189_TX_2 -1856,FOODS_1_189_TX_3 -1857,FOODS_1_189_WI_1 -1858,FOODS_1_189_WI_2 -1859,FOODS_1_189_WI_3 -1860,FOODS_1_190_CA_1 -1861,FOODS_1_190_CA_2 -1862,FOODS_1_190_CA_3 -1863,FOODS_1_190_CA_4 -1864,FOODS_1_190_TX_1 -1865,FOODS_1_190_TX_2 -1866,FOODS_1_190_TX_3 -1867,FOODS_1_190_WI_1 -1868,FOODS_1_190_WI_2 -1869,FOODS_1_190_WI_3 -1870,FOODS_1_191_CA_1 -1871,FOODS_1_191_CA_2 -1872,FOODS_1_191_CA_3 -1873,FOODS_1_191_CA_4 -1874,FOODS_1_191_TX_1 -1875,FOODS_1_191_TX_2 -1876,FOODS_1_191_TX_3 -1877,FOODS_1_191_WI_1 -1878,FOODS_1_191_WI_2 -1879,FOODS_1_191_WI_3 -1880,FOODS_1_192_CA_1 -1881,FOODS_1_192_CA_2 -1882,FOODS_1_192_CA_3 -1883,FOODS_1_192_CA_4 -1884,FOODS_1_192_TX_1 -1885,FOODS_1_192_TX_2 -1886,FOODS_1_192_TX_3 -1887,FOODS_1_192_WI_1 -1888,FOODS_1_192_WI_2 -1889,FOODS_1_192_WI_3 -1890,FOODS_1_193_CA_1 -1891,FOODS_1_193_CA_2 -1892,FOODS_1_193_CA_3 -1893,FOODS_1_193_CA_4 -1894,FOODS_1_193_TX_1 -1895,FOODS_1_193_TX_2 -1896,FOODS_1_193_TX_3 -1897,FOODS_1_193_WI_1 -1898,FOODS_1_193_WI_2 -1899,FOODS_1_193_WI_3 -1900,FOODS_1_194_CA_1 -1901,FOODS_1_194_CA_2 -1902,FOODS_1_194_CA_3 -1903,FOODS_1_194_CA_4 -1904,FOODS_1_194_TX_1 -1905,FOODS_1_194_TX_2 -1906,FOODS_1_194_TX_3 -1907,FOODS_1_194_WI_1 -1908,FOODS_1_194_WI_2 -1909,FOODS_1_194_WI_3 -1910,FOODS_1_195_CA_1 -1911,FOODS_1_195_CA_2 -1912,FOODS_1_195_CA_3 -1913,FOODS_1_195_CA_4 -1914,FOODS_1_195_TX_1 -1915,FOODS_1_195_TX_2 -1916,FOODS_1_195_TX_3 -1917,FOODS_1_195_WI_1 -1918,FOODS_1_195_WI_2 -1919,FOODS_1_195_WI_3 -1920,FOODS_1_196_CA_1 -1921,FOODS_1_196_CA_2 -1922,FOODS_1_196_CA_3 -1923,FOODS_1_196_CA_4 -1924,FOODS_1_196_TX_1 -1925,FOODS_1_196_TX_2 -1926,FOODS_1_196_TX_3 -1927,FOODS_1_196_WI_1 -1928,FOODS_1_196_WI_2 -1929,FOODS_1_196_WI_3 -1930,FOODS_1_197_CA_1 -1931,FOODS_1_197_CA_2 -1932,FOODS_1_197_CA_3 -1933,FOODS_1_197_CA_4 -1934,FOODS_1_197_TX_1 -1935,FOODS_1_197_TX_2 -1936,FOODS_1_197_TX_3 -1937,FOODS_1_197_WI_1 -1938,FOODS_1_197_WI_2 -1939,FOODS_1_197_WI_3 -1940,FOODS_1_198_CA_1 -1941,FOODS_1_198_CA_2 -1942,FOODS_1_198_CA_3 -1943,FOODS_1_198_CA_4 -1944,FOODS_1_198_TX_1 -1945,FOODS_1_198_TX_2 -1946,FOODS_1_198_TX_3 -1947,FOODS_1_198_WI_1 -1948,FOODS_1_198_WI_2 -1949,FOODS_1_198_WI_3 -1950,FOODS_1_199_CA_1 -1951,FOODS_1_199_CA_2 -1952,FOODS_1_199_CA_3 -1953,FOODS_1_199_CA_4 -1954,FOODS_1_199_TX_1 -1955,FOODS_1_199_TX_2 -1956,FOODS_1_199_TX_3 -1957,FOODS_1_199_WI_1 -1958,FOODS_1_199_WI_2 -1959,FOODS_1_199_WI_3 -1960,FOODS_1_200_CA_1 -1961,FOODS_1_200_CA_2 -1962,FOODS_1_200_CA_3 -1963,FOODS_1_200_CA_4 -1964,FOODS_1_200_TX_1 -1965,FOODS_1_200_TX_2 -1966,FOODS_1_200_TX_3 -1967,FOODS_1_200_WI_1 -1968,FOODS_1_200_WI_2 -1969,FOODS_1_200_WI_3 -1970,FOODS_1_201_CA_1 -1971,FOODS_1_201_CA_2 -1972,FOODS_1_201_CA_3 -1973,FOODS_1_201_CA_4 -1974,FOODS_1_201_TX_1 -1975,FOODS_1_201_TX_2 -1976,FOODS_1_201_TX_3 -1977,FOODS_1_201_WI_1 -1978,FOODS_1_201_WI_2 -1979,FOODS_1_201_WI_3 -1980,FOODS_1_202_CA_1 -1981,FOODS_1_202_CA_2 -1982,FOODS_1_202_CA_3 -1983,FOODS_1_202_CA_4 -1984,FOODS_1_202_TX_1 -1985,FOODS_1_202_TX_2 -1986,FOODS_1_202_TX_3 -1987,FOODS_1_202_WI_1 -1988,FOODS_1_202_WI_2 -1989,FOODS_1_202_WI_3 -1990,FOODS_1_203_CA_1 -1991,FOODS_1_203_CA_2 -1992,FOODS_1_203_CA_3 -1993,FOODS_1_203_CA_4 -1994,FOODS_1_203_TX_1 -1995,FOODS_1_203_TX_2 -1996,FOODS_1_203_TX_3 -1997,FOODS_1_203_WI_1 -1998,FOODS_1_203_WI_2 -1999,FOODS_1_203_WI_3 -2000,FOODS_1_204_CA_1 -2001,FOODS_1_204_CA_2 -2002,FOODS_1_204_CA_3 -2003,FOODS_1_204_CA_4 -2004,FOODS_1_204_TX_1 -2005,FOODS_1_204_TX_2 -2006,FOODS_1_204_TX_3 -2007,FOODS_1_204_WI_1 -2008,FOODS_1_204_WI_2 -2009,FOODS_1_204_WI_3 -2010,FOODS_1_205_CA_1 -2011,FOODS_1_205_CA_2 -2012,FOODS_1_205_CA_3 -2013,FOODS_1_205_CA_4 -2014,FOODS_1_205_TX_1 -2015,FOODS_1_205_TX_2 -2016,FOODS_1_205_TX_3 -2017,FOODS_1_205_WI_1 -2018,FOODS_1_205_WI_2 -2019,FOODS_1_205_WI_3 -2020,FOODS_1_206_CA_1 -2021,FOODS_1_206_CA_2 -2022,FOODS_1_206_CA_3 -2023,FOODS_1_206_CA_4 -2024,FOODS_1_206_TX_1 -2025,FOODS_1_206_TX_2 -2026,FOODS_1_206_TX_3 -2027,FOODS_1_206_WI_1 -2028,FOODS_1_206_WI_2 -2029,FOODS_1_206_WI_3 -2030,FOODS_1_207_CA_1 -2031,FOODS_1_207_CA_2 -2032,FOODS_1_207_CA_3 -2033,FOODS_1_207_CA_4 -2034,FOODS_1_207_TX_1 -2035,FOODS_1_207_TX_2 -2036,FOODS_1_207_TX_3 -2037,FOODS_1_207_WI_1 -2038,FOODS_1_207_WI_2 -2039,FOODS_1_207_WI_3 -2040,FOODS_1_208_CA_1 -2041,FOODS_1_208_CA_2 -2042,FOODS_1_208_CA_3 -2043,FOODS_1_208_CA_4 -2044,FOODS_1_208_TX_1 -2045,FOODS_1_208_TX_2 -2046,FOODS_1_208_TX_3 -2047,FOODS_1_208_WI_1 -2048,FOODS_1_208_WI_2 -2049,FOODS_1_208_WI_3 -2050,FOODS_1_209_CA_1 -2051,FOODS_1_209_CA_2 -2052,FOODS_1_209_CA_3 -2053,FOODS_1_209_CA_4 -2054,FOODS_1_209_TX_1 -2055,FOODS_1_209_TX_2 -2056,FOODS_1_209_TX_3 -2057,FOODS_1_209_WI_1 -2058,FOODS_1_209_WI_2 -2059,FOODS_1_209_WI_3 -2060,FOODS_1_210_CA_1 -2061,FOODS_1_210_CA_2 -2062,FOODS_1_210_CA_3 -2063,FOODS_1_210_CA_4 -2064,FOODS_1_210_TX_1 -2065,FOODS_1_210_TX_2 -2066,FOODS_1_210_TX_3 -2067,FOODS_1_210_WI_1 -2068,FOODS_1_210_WI_2 -2069,FOODS_1_210_WI_3 -2070,FOODS_1_211_CA_1 -2071,FOODS_1_211_CA_2 -2072,FOODS_1_211_CA_3 -2073,FOODS_1_211_CA_4 -2074,FOODS_1_211_TX_1 -2075,FOODS_1_211_TX_2 -2076,FOODS_1_211_TX_3 -2077,FOODS_1_211_WI_1 -2078,FOODS_1_211_WI_2 -2079,FOODS_1_211_WI_3 -2080,FOODS_1_212_CA_1 -2081,FOODS_1_212_CA_2 -2082,FOODS_1_212_CA_3 -2083,FOODS_1_212_CA_4 -2084,FOODS_1_212_TX_1 -2085,FOODS_1_212_TX_2 -2086,FOODS_1_212_TX_3 -2087,FOODS_1_212_WI_1 -2088,FOODS_1_212_WI_2 -2089,FOODS_1_212_WI_3 -2090,FOODS_1_213_CA_1 -2091,FOODS_1_213_CA_2 -2092,FOODS_1_213_CA_3 -2093,FOODS_1_213_CA_4 -2094,FOODS_1_213_TX_1 -2095,FOODS_1_213_TX_2 -2096,FOODS_1_213_TX_3 -2097,FOODS_1_213_WI_1 -2098,FOODS_1_213_WI_2 -2099,FOODS_1_213_WI_3 -2100,FOODS_1_214_CA_1 -2101,FOODS_1_214_CA_2 -2102,FOODS_1_214_CA_3 -2103,FOODS_1_214_CA_4 -2104,FOODS_1_214_TX_1 -2105,FOODS_1_214_TX_2 -2106,FOODS_1_214_TX_3 -2107,FOODS_1_214_WI_1 -2108,FOODS_1_214_WI_2 -2109,FOODS_1_214_WI_3 -2110,FOODS_1_215_CA_1 -2111,FOODS_1_215_CA_2 -2112,FOODS_1_215_CA_3 -2113,FOODS_1_215_CA_4 -2114,FOODS_1_215_TX_1 -2115,FOODS_1_215_TX_2 -2116,FOODS_1_215_TX_3 -2117,FOODS_1_215_WI_1 -2118,FOODS_1_215_WI_2 -2119,FOODS_1_215_WI_3 -2120,FOODS_1_216_CA_1 -2121,FOODS_1_216_CA_2 -2122,FOODS_1_216_CA_3 -2123,FOODS_1_216_CA_4 -2124,FOODS_1_216_TX_1 -2125,FOODS_1_216_TX_2 -2126,FOODS_1_216_TX_3 -2127,FOODS_1_216_WI_1 -2128,FOODS_1_216_WI_2 -2129,FOODS_1_216_WI_3 -2130,FOODS_1_217_CA_1 -2131,FOODS_1_217_CA_2 -2132,FOODS_1_217_CA_3 -2133,FOODS_1_217_CA_4 -2134,FOODS_1_217_TX_1 -2135,FOODS_1_217_TX_2 -2136,FOODS_1_217_TX_3 -2137,FOODS_1_217_WI_1 -2138,FOODS_1_217_WI_2 -2139,FOODS_1_217_WI_3 -2140,FOODS_1_218_CA_1 -2141,FOODS_1_218_CA_2 -2142,FOODS_1_218_CA_3 -2143,FOODS_1_218_CA_4 -2144,FOODS_1_218_TX_1 -2145,FOODS_1_218_TX_2 -2146,FOODS_1_218_TX_3 -2147,FOODS_1_218_WI_1 -2148,FOODS_1_218_WI_2 -2149,FOODS_1_218_WI_3 -2150,FOODS_1_219_CA_1 -2151,FOODS_1_219_CA_2 -2152,FOODS_1_219_CA_3 -2153,FOODS_1_219_CA_4 -2154,FOODS_1_219_TX_1 -2155,FOODS_1_219_TX_2 -2156,FOODS_1_219_TX_3 -2157,FOODS_1_219_WI_1 -2158,FOODS_1_219_WI_2 -2159,FOODS_1_219_WI_3 -2160,FOODS_2_001_CA_1 -2161,FOODS_2_001_CA_2 -2162,FOODS_2_001_CA_3 -2163,FOODS_2_001_CA_4 -2164,FOODS_2_001_TX_1 -2165,FOODS_2_001_TX_2 -2166,FOODS_2_001_TX_3 -2167,FOODS_2_001_WI_1 -2168,FOODS_2_001_WI_2 -2169,FOODS_2_001_WI_3 -2170,FOODS_2_002_CA_1 -2171,FOODS_2_002_CA_2 -2172,FOODS_2_002_CA_3 -2173,FOODS_2_002_CA_4 -2174,FOODS_2_002_TX_1 -2175,FOODS_2_002_TX_2 -2176,FOODS_2_002_TX_3 -2177,FOODS_2_002_WI_1 -2178,FOODS_2_002_WI_2 -2179,FOODS_2_002_WI_3 -2180,FOODS_2_003_CA_1 -2181,FOODS_2_003_CA_2 -2182,FOODS_2_003_CA_3 -2183,FOODS_2_003_CA_4 -2184,FOODS_2_003_TX_1 -2185,FOODS_2_003_TX_2 -2186,FOODS_2_003_TX_3 -2187,FOODS_2_003_WI_1 -2188,FOODS_2_003_WI_2 -2189,FOODS_2_003_WI_3 -2190,FOODS_2_004_CA_1 -2191,FOODS_2_004_CA_2 -2192,FOODS_2_004_CA_3 -2193,FOODS_2_004_CA_4 -2194,FOODS_2_004_TX_1 -2195,FOODS_2_004_TX_2 -2196,FOODS_2_004_TX_3 -2197,FOODS_2_004_WI_1 -2198,FOODS_2_004_WI_2 -2199,FOODS_2_004_WI_3 -2200,FOODS_2_005_CA_1 -2201,FOODS_2_005_CA_2 -2202,FOODS_2_005_CA_3 -2203,FOODS_2_005_CA_4 -2204,FOODS_2_005_TX_1 -2205,FOODS_2_005_TX_2 -2206,FOODS_2_005_TX_3 -2207,FOODS_2_005_WI_1 -2208,FOODS_2_005_WI_2 -2209,FOODS_2_005_WI_3 -2210,FOODS_2_006_CA_1 -2211,FOODS_2_006_CA_2 -2212,FOODS_2_006_CA_3 -2213,FOODS_2_006_CA_4 -2214,FOODS_2_006_TX_1 -2215,FOODS_2_006_TX_2 -2216,FOODS_2_006_TX_3 -2217,FOODS_2_006_WI_1 -2218,FOODS_2_006_WI_2 -2219,FOODS_2_006_WI_3 -2220,FOODS_2_007_CA_1 -2221,FOODS_2_007_CA_2 -2222,FOODS_2_007_CA_3 -2223,FOODS_2_007_CA_4 -2224,FOODS_2_007_TX_1 -2225,FOODS_2_007_TX_2 -2226,FOODS_2_007_TX_3 -2227,FOODS_2_007_WI_1 -2228,FOODS_2_007_WI_2 -2229,FOODS_2_007_WI_3 -2230,FOODS_2_008_CA_1 -2231,FOODS_2_008_CA_2 -2232,FOODS_2_008_CA_3 -2233,FOODS_2_008_CA_4 -2234,FOODS_2_008_TX_1 -2235,FOODS_2_008_TX_2 -2236,FOODS_2_008_TX_3 -2237,FOODS_2_008_WI_1 -2238,FOODS_2_008_WI_2 -2239,FOODS_2_008_WI_3 -2240,FOODS_2_009_CA_1 -2241,FOODS_2_009_CA_2 -2242,FOODS_2_009_CA_3 -2243,FOODS_2_009_CA_4 -2244,FOODS_2_009_TX_1 -2245,FOODS_2_009_TX_2 -2246,FOODS_2_009_TX_3 -2247,FOODS_2_009_WI_1 -2248,FOODS_2_009_WI_2 -2249,FOODS_2_009_WI_3 -2250,FOODS_2_010_CA_1 -2251,FOODS_2_010_CA_2 -2252,FOODS_2_010_CA_3 -2253,FOODS_2_010_CA_4 -2254,FOODS_2_010_TX_1 -2255,FOODS_2_010_TX_2 -2256,FOODS_2_010_TX_3 -2257,FOODS_2_010_WI_1 -2258,FOODS_2_010_WI_2 -2259,FOODS_2_010_WI_3 -2260,FOODS_2_011_CA_1 -2261,FOODS_2_011_CA_2 -2262,FOODS_2_011_CA_3 -2263,FOODS_2_011_CA_4 -2264,FOODS_2_011_TX_1 -2265,FOODS_2_011_TX_2 -2266,FOODS_2_011_TX_3 -2267,FOODS_2_011_WI_1 -2268,FOODS_2_011_WI_2 -2269,FOODS_2_011_WI_3 -2270,FOODS_2_012_CA_1 -2271,FOODS_2_012_CA_2 -2272,FOODS_2_012_CA_3 -2273,FOODS_2_012_CA_4 -2274,FOODS_2_012_TX_1 -2275,FOODS_2_012_TX_2 -2276,FOODS_2_012_TX_3 -2277,FOODS_2_012_WI_1 -2278,FOODS_2_012_WI_2 -2279,FOODS_2_012_WI_3 -2280,FOODS_2_013_CA_1 -2281,FOODS_2_013_CA_2 -2282,FOODS_2_013_CA_3 -2283,FOODS_2_013_CA_4 -2284,FOODS_2_013_TX_1 -2285,FOODS_2_013_TX_2 -2286,FOODS_2_013_TX_3 -2287,FOODS_2_013_WI_1 -2288,FOODS_2_013_WI_2 -2289,FOODS_2_013_WI_3 -2290,FOODS_2_014_CA_1 -2291,FOODS_2_014_CA_2 -2292,FOODS_2_014_CA_3 -2293,FOODS_2_014_CA_4 -2294,FOODS_2_014_TX_1 -2295,FOODS_2_014_TX_2 -2296,FOODS_2_014_TX_3 -2297,FOODS_2_014_WI_1 -2298,FOODS_2_014_WI_2 -2299,FOODS_2_014_WI_3 -2300,FOODS_2_015_CA_1 -2301,FOODS_2_015_CA_2 -2302,FOODS_2_015_CA_3 -2303,FOODS_2_015_CA_4 -2304,FOODS_2_015_TX_1 -2305,FOODS_2_015_TX_2 -2306,FOODS_2_015_TX_3 -2307,FOODS_2_015_WI_1 -2308,FOODS_2_015_WI_2 -2309,FOODS_2_015_WI_3 -2310,FOODS_2_016_CA_1 -2311,FOODS_2_016_CA_2 -2312,FOODS_2_016_CA_3 -2313,FOODS_2_016_CA_4 -2314,FOODS_2_016_TX_1 -2315,FOODS_2_016_TX_2 -2316,FOODS_2_016_TX_3 -2317,FOODS_2_016_WI_1 -2318,FOODS_2_016_WI_2 -2319,FOODS_2_016_WI_3 -2320,FOODS_2_017_CA_1 -2321,FOODS_2_017_CA_2 -2322,FOODS_2_017_CA_3 -2323,FOODS_2_017_CA_4 -2324,FOODS_2_017_TX_1 -2325,FOODS_2_017_TX_2 -2326,FOODS_2_017_TX_3 -2327,FOODS_2_017_WI_1 -2328,FOODS_2_017_WI_2 -2329,FOODS_2_017_WI_3 -2330,FOODS_2_018_CA_1 -2331,FOODS_2_018_CA_2 -2332,FOODS_2_018_CA_3 -2333,FOODS_2_018_CA_4 -2334,FOODS_2_018_TX_1 -2335,FOODS_2_018_TX_2 -2336,FOODS_2_018_TX_3 -2337,FOODS_2_018_WI_1 -2338,FOODS_2_018_WI_2 -2339,FOODS_2_018_WI_3 -2340,FOODS_2_019_CA_1 -2341,FOODS_2_019_CA_2 -2342,FOODS_2_019_CA_3 -2343,FOODS_2_019_CA_4 -2344,FOODS_2_019_TX_1 -2345,FOODS_2_019_TX_2 -2346,FOODS_2_019_TX_3 -2347,FOODS_2_019_WI_1 -2348,FOODS_2_019_WI_2 -2349,FOODS_2_019_WI_3 -2350,FOODS_2_020_CA_1 -2351,FOODS_2_020_CA_2 -2352,FOODS_2_020_CA_3 -2353,FOODS_2_020_CA_4 -2354,FOODS_2_020_TX_1 -2355,FOODS_2_020_TX_2 -2356,FOODS_2_020_TX_3 -2357,FOODS_2_020_WI_1 -2358,FOODS_2_020_WI_2 -2359,FOODS_2_020_WI_3 -2360,FOODS_2_021_CA_1 -2361,FOODS_2_021_CA_2 -2362,FOODS_2_021_CA_3 -2363,FOODS_2_021_CA_4 -2364,FOODS_2_021_TX_1 -2365,FOODS_2_021_TX_2 -2366,FOODS_2_021_TX_3 -2367,FOODS_2_021_WI_1 -2368,FOODS_2_021_WI_2 -2369,FOODS_2_021_WI_3 -2370,FOODS_2_022_CA_1 -2371,FOODS_2_022_CA_2 -2372,FOODS_2_022_CA_3 -2373,FOODS_2_022_CA_4 -2374,FOODS_2_022_TX_1 -2375,FOODS_2_022_TX_2 -2376,FOODS_2_022_TX_3 -2377,FOODS_2_022_WI_1 -2378,FOODS_2_022_WI_2 -2379,FOODS_2_022_WI_3 -2380,FOODS_2_023_CA_1 -2381,FOODS_2_023_CA_2 -2382,FOODS_2_023_CA_3 -2383,FOODS_2_023_CA_4 -2384,FOODS_2_023_TX_1 -2385,FOODS_2_023_TX_2 -2386,FOODS_2_023_TX_3 -2387,FOODS_2_023_WI_1 -2388,FOODS_2_023_WI_2 -2389,FOODS_2_023_WI_3 -2390,FOODS_2_024_CA_1 -2391,FOODS_2_024_CA_2 -2392,FOODS_2_024_CA_3 -2393,FOODS_2_024_CA_4 -2394,FOODS_2_024_TX_1 -2395,FOODS_2_024_TX_2 -2396,FOODS_2_024_TX_3 -2397,FOODS_2_024_WI_1 -2398,FOODS_2_024_WI_2 -2399,FOODS_2_024_WI_3 -2400,FOODS_2_025_CA_1 -2401,FOODS_2_025_CA_2 -2402,FOODS_2_025_CA_3 -2403,FOODS_2_025_CA_4 -2404,FOODS_2_025_TX_1 -2405,FOODS_2_025_TX_2 -2406,FOODS_2_025_TX_3 -2407,FOODS_2_025_WI_1 -2408,FOODS_2_025_WI_2 -2409,FOODS_2_025_WI_3 -2410,FOODS_2_026_CA_1 -2411,FOODS_2_026_CA_2 -2412,FOODS_2_026_CA_3 -2413,FOODS_2_026_CA_4 -2414,FOODS_2_026_TX_1 -2415,FOODS_2_026_TX_2 -2416,FOODS_2_026_TX_3 -2417,FOODS_2_026_WI_1 -2418,FOODS_2_026_WI_2 -2419,FOODS_2_026_WI_3 -2420,FOODS_2_027_CA_1 -2421,FOODS_2_027_CA_2 -2422,FOODS_2_027_CA_3 -2423,FOODS_2_027_CA_4 -2424,FOODS_2_027_TX_1 -2425,FOODS_2_027_TX_2 -2426,FOODS_2_027_TX_3 -2427,FOODS_2_027_WI_1 -2428,FOODS_2_027_WI_2 -2429,FOODS_2_027_WI_3 -2430,FOODS_2_028_CA_1 -2431,FOODS_2_028_CA_2 -2432,FOODS_2_028_CA_3 -2433,FOODS_2_028_CA_4 -2434,FOODS_2_028_TX_1 -2435,FOODS_2_028_TX_2 -2436,FOODS_2_028_TX_3 -2437,FOODS_2_028_WI_1 -2438,FOODS_2_028_WI_2 -2439,FOODS_2_028_WI_3 -2440,FOODS_2_029_CA_1 -2441,FOODS_2_029_CA_2 -2442,FOODS_2_029_CA_3 -2443,FOODS_2_029_CA_4 -2444,FOODS_2_029_TX_1 -2445,FOODS_2_029_TX_2 -2446,FOODS_2_029_TX_3 -2447,FOODS_2_029_WI_1 -2448,FOODS_2_029_WI_2 -2449,FOODS_2_029_WI_3 -2450,FOODS_2_030_CA_1 -2451,FOODS_2_030_CA_2 -2452,FOODS_2_030_CA_3 -2453,FOODS_2_030_CA_4 -2454,FOODS_2_030_TX_1 -2455,FOODS_2_030_TX_2 -2456,FOODS_2_030_TX_3 -2457,FOODS_2_030_WI_1 -2458,FOODS_2_030_WI_2 -2459,FOODS_2_030_WI_3 -2460,FOODS_2_031_CA_1 -2461,FOODS_2_031_CA_2 -2462,FOODS_2_031_CA_3 -2463,FOODS_2_031_CA_4 -2464,FOODS_2_031_TX_1 -2465,FOODS_2_031_TX_2 -2466,FOODS_2_031_TX_3 -2467,FOODS_2_031_WI_1 -2468,FOODS_2_031_WI_2 -2469,FOODS_2_031_WI_3 -2470,FOODS_2_032_CA_1 -2471,FOODS_2_032_CA_2 -2472,FOODS_2_032_CA_3 -2473,FOODS_2_032_CA_4 -2474,FOODS_2_032_TX_1 -2475,FOODS_2_032_TX_2 -2476,FOODS_2_032_TX_3 -2477,FOODS_2_032_WI_1 -2478,FOODS_2_032_WI_2 -2479,FOODS_2_032_WI_3 -2480,FOODS_2_033_CA_1 -2481,FOODS_2_033_CA_2 -2482,FOODS_2_033_CA_3 -2483,FOODS_2_033_CA_4 -2484,FOODS_2_033_TX_1 -2485,FOODS_2_033_TX_2 -2486,FOODS_2_033_TX_3 -2487,FOODS_2_033_WI_1 -2488,FOODS_2_033_WI_2 -2489,FOODS_2_033_WI_3 -2490,FOODS_2_034_CA_1 -2491,FOODS_2_034_CA_2 -2492,FOODS_2_034_CA_3 -2493,FOODS_2_034_CA_4 -2494,FOODS_2_034_TX_1 -2495,FOODS_2_034_TX_2 -2496,FOODS_2_034_TX_3 -2497,FOODS_2_034_WI_1 -2498,FOODS_2_034_WI_2 -2499,FOODS_2_034_WI_3 -2500,FOODS_2_035_CA_1 -2501,FOODS_2_035_CA_2 -2502,FOODS_2_035_CA_3 -2503,FOODS_2_035_CA_4 -2504,FOODS_2_035_TX_1 -2505,FOODS_2_035_TX_2 -2506,FOODS_2_035_TX_3 -2507,FOODS_2_035_WI_1 -2508,FOODS_2_035_WI_2 -2509,FOODS_2_035_WI_3 -2510,FOODS_2_036_CA_1 -2511,FOODS_2_036_CA_2 -2512,FOODS_2_036_CA_3 -2513,FOODS_2_036_CA_4 -2514,FOODS_2_036_TX_1 -2515,FOODS_2_036_TX_2 -2516,FOODS_2_036_TX_3 -2517,FOODS_2_036_WI_1 -2518,FOODS_2_036_WI_2 -2519,FOODS_2_036_WI_3 -2520,FOODS_2_037_CA_1 -2521,FOODS_2_037_CA_2 -2522,FOODS_2_037_CA_3 -2523,FOODS_2_037_CA_4 -2524,FOODS_2_037_TX_1 -2525,FOODS_2_037_TX_2 -2526,FOODS_2_037_TX_3 -2527,FOODS_2_037_WI_1 -2528,FOODS_2_037_WI_2 -2529,FOODS_2_037_WI_3 -2530,FOODS_2_038_CA_1 -2531,FOODS_2_038_CA_2 -2532,FOODS_2_038_CA_3 -2533,FOODS_2_038_CA_4 -2534,FOODS_2_038_TX_1 -2535,FOODS_2_038_TX_2 -2536,FOODS_2_038_TX_3 -2537,FOODS_2_038_WI_1 -2538,FOODS_2_038_WI_2 -2539,FOODS_2_038_WI_3 -2540,FOODS_2_039_CA_1 -2541,FOODS_2_039_CA_2 -2542,FOODS_2_039_CA_3 -2543,FOODS_2_039_CA_4 -2544,FOODS_2_039_TX_1 -2545,FOODS_2_039_TX_2 -2546,FOODS_2_039_TX_3 -2547,FOODS_2_039_WI_1 -2548,FOODS_2_039_WI_2 -2549,FOODS_2_039_WI_3 -2550,FOODS_2_040_CA_1 -2551,FOODS_2_040_CA_2 -2552,FOODS_2_040_CA_3 -2553,FOODS_2_040_CA_4 -2554,FOODS_2_040_TX_1 -2555,FOODS_2_040_TX_2 -2556,FOODS_2_040_TX_3 -2557,FOODS_2_040_WI_1 -2558,FOODS_2_040_WI_2 -2559,FOODS_2_040_WI_3 -2560,FOODS_2_041_CA_1 -2561,FOODS_2_041_CA_2 -2562,FOODS_2_041_CA_3 -2563,FOODS_2_041_CA_4 -2564,FOODS_2_041_TX_1 -2565,FOODS_2_041_TX_2 -2566,FOODS_2_041_TX_3 -2567,FOODS_2_041_WI_1 -2568,FOODS_2_041_WI_2 -2569,FOODS_2_041_WI_3 -2570,FOODS_2_042_CA_1 -2571,FOODS_2_042_CA_2 -2572,FOODS_2_042_CA_3 -2573,FOODS_2_042_CA_4 -2574,FOODS_2_042_TX_1 -2575,FOODS_2_042_TX_2 -2576,FOODS_2_042_TX_3 -2577,FOODS_2_042_WI_1 -2578,FOODS_2_042_WI_2 -2579,FOODS_2_042_WI_3 -2580,FOODS_2_043_CA_1 -2581,FOODS_2_043_CA_2 -2582,FOODS_2_043_CA_3 -2583,FOODS_2_043_CA_4 -2584,FOODS_2_043_TX_1 -2585,FOODS_2_043_TX_2 -2586,FOODS_2_043_TX_3 -2587,FOODS_2_043_WI_1 -2588,FOODS_2_043_WI_2 -2589,FOODS_2_043_WI_3 -2590,FOODS_2_044_CA_1 -2591,FOODS_2_044_CA_2 -2592,FOODS_2_044_CA_3 -2593,FOODS_2_044_CA_4 -2594,FOODS_2_044_TX_1 -2595,FOODS_2_044_TX_2 -2596,FOODS_2_044_TX_3 -2597,FOODS_2_044_WI_1 -2598,FOODS_2_044_WI_2 -2599,FOODS_2_044_WI_3 -2600,FOODS_2_045_CA_1 -2601,FOODS_2_045_CA_2 -2602,FOODS_2_045_CA_3 -2603,FOODS_2_045_CA_4 -2604,FOODS_2_045_TX_1 -2605,FOODS_2_045_TX_2 -2606,FOODS_2_045_TX_3 -2607,FOODS_2_045_WI_1 -2608,FOODS_2_045_WI_2 -2609,FOODS_2_045_WI_3 -2610,FOODS_2_046_CA_1 -2611,FOODS_2_046_CA_2 -2612,FOODS_2_046_CA_3 -2613,FOODS_2_046_CA_4 -2614,FOODS_2_046_TX_1 -2615,FOODS_2_046_TX_2 -2616,FOODS_2_046_TX_3 -2617,FOODS_2_046_WI_1 -2618,FOODS_2_046_WI_2 -2619,FOODS_2_046_WI_3 -2620,FOODS_2_047_CA_1 -2621,FOODS_2_047_CA_2 -2622,FOODS_2_047_CA_3 -2623,FOODS_2_047_CA_4 -2624,FOODS_2_047_TX_1 -2625,FOODS_2_047_TX_2 -2626,FOODS_2_047_TX_3 -2627,FOODS_2_047_WI_1 -2628,FOODS_2_047_WI_2 -2629,FOODS_2_047_WI_3 -2630,FOODS_2_048_CA_1 -2631,FOODS_2_048_CA_2 -2632,FOODS_2_048_CA_3 -2633,FOODS_2_048_CA_4 -2634,FOODS_2_048_TX_1 -2635,FOODS_2_048_TX_2 -2636,FOODS_2_048_TX_3 -2637,FOODS_2_048_WI_1 -2638,FOODS_2_048_WI_2 -2639,FOODS_2_048_WI_3 -2640,FOODS_2_049_CA_1 -2641,FOODS_2_049_CA_2 -2642,FOODS_2_049_CA_3 -2643,FOODS_2_049_CA_4 -2644,FOODS_2_049_TX_1 -2645,FOODS_2_049_TX_2 -2646,FOODS_2_049_TX_3 -2647,FOODS_2_049_WI_1 -2648,FOODS_2_049_WI_2 -2649,FOODS_2_049_WI_3 -2650,FOODS_2_050_CA_1 -2651,FOODS_2_050_CA_2 -2652,FOODS_2_050_CA_3 -2653,FOODS_2_050_CA_4 -2654,FOODS_2_050_TX_1 -2655,FOODS_2_050_TX_2 -2656,FOODS_2_050_TX_3 -2657,FOODS_2_050_WI_1 -2658,FOODS_2_050_WI_2 -2659,FOODS_2_050_WI_3 -2660,FOODS_2_051_CA_1 -2661,FOODS_2_051_CA_2 -2662,FOODS_2_051_CA_3 -2663,FOODS_2_051_CA_4 -2664,FOODS_2_051_TX_1 -2665,FOODS_2_051_TX_2 -2666,FOODS_2_051_TX_3 -2667,FOODS_2_051_WI_1 -2668,FOODS_2_051_WI_2 -2669,FOODS_2_051_WI_3 -2670,FOODS_2_052_CA_1 -2671,FOODS_2_052_CA_2 -2672,FOODS_2_052_CA_3 -2673,FOODS_2_052_CA_4 -2674,FOODS_2_052_TX_1 -2675,FOODS_2_052_TX_2 -2676,FOODS_2_052_TX_3 -2677,FOODS_2_052_WI_1 -2678,FOODS_2_052_WI_2 -2679,FOODS_2_052_WI_3 -2680,FOODS_2_053_CA_1 -2681,FOODS_2_053_CA_2 -2682,FOODS_2_053_CA_3 -2683,FOODS_2_053_CA_4 -2684,FOODS_2_053_TX_1 -2685,FOODS_2_053_TX_2 -2686,FOODS_2_053_TX_3 -2687,FOODS_2_053_WI_1 -2688,FOODS_2_053_WI_2 -2689,FOODS_2_053_WI_3 -2690,FOODS_2_054_CA_1 -2691,FOODS_2_054_CA_2 -2692,FOODS_2_054_CA_3 -2693,FOODS_2_054_CA_4 -2694,FOODS_2_054_TX_1 -2695,FOODS_2_054_TX_2 -2696,FOODS_2_054_TX_3 -2697,FOODS_2_054_WI_1 -2698,FOODS_2_054_WI_2 -2699,FOODS_2_054_WI_3 -2700,FOODS_2_055_CA_1 -2701,FOODS_2_055_CA_2 -2702,FOODS_2_055_CA_3 -2703,FOODS_2_055_CA_4 -2704,FOODS_2_055_TX_1 -2705,FOODS_2_055_TX_2 -2706,FOODS_2_055_TX_3 -2707,FOODS_2_055_WI_1 -2708,FOODS_2_055_WI_2 -2709,FOODS_2_055_WI_3 -2710,FOODS_2_056_CA_1 -2711,FOODS_2_056_CA_2 -2712,FOODS_2_056_CA_3 -2713,FOODS_2_056_CA_4 -2714,FOODS_2_056_TX_1 -2715,FOODS_2_056_TX_2 -2716,FOODS_2_056_TX_3 -2717,FOODS_2_056_WI_1 -2718,FOODS_2_056_WI_2 -2719,FOODS_2_056_WI_3 -2720,FOODS_2_057_CA_1 -2721,FOODS_2_057_CA_2 -2722,FOODS_2_057_CA_3 -2723,FOODS_2_057_CA_4 -2724,FOODS_2_057_TX_1 -2725,FOODS_2_057_TX_2 -2726,FOODS_2_057_TX_3 -2727,FOODS_2_057_WI_1 -2728,FOODS_2_057_WI_2 -2729,FOODS_2_057_WI_3 -2730,FOODS_2_058_CA_1 -2731,FOODS_2_058_CA_2 -2732,FOODS_2_058_CA_3 -2733,FOODS_2_058_CA_4 -2734,FOODS_2_058_TX_1 -2735,FOODS_2_058_TX_2 -2736,FOODS_2_058_TX_3 -2737,FOODS_2_058_WI_1 -2738,FOODS_2_058_WI_2 -2739,FOODS_2_058_WI_3 -2740,FOODS_2_059_CA_1 -2741,FOODS_2_059_CA_2 -2742,FOODS_2_059_CA_3 -2743,FOODS_2_059_CA_4 -2744,FOODS_2_059_TX_1 -2745,FOODS_2_059_TX_2 -2746,FOODS_2_059_TX_3 -2747,FOODS_2_059_WI_1 -2748,FOODS_2_059_WI_2 -2749,FOODS_2_059_WI_3 -2750,FOODS_2_060_CA_1 -2751,FOODS_2_060_CA_2 -2752,FOODS_2_060_CA_3 -2753,FOODS_2_060_CA_4 -2754,FOODS_2_060_TX_1 -2755,FOODS_2_060_TX_2 -2756,FOODS_2_060_TX_3 -2757,FOODS_2_060_WI_1 -2758,FOODS_2_060_WI_2 -2759,FOODS_2_060_WI_3 -2760,FOODS_2_061_CA_1 -2761,FOODS_2_061_CA_2 -2762,FOODS_2_061_CA_3 -2763,FOODS_2_061_CA_4 -2764,FOODS_2_061_TX_1 -2765,FOODS_2_061_TX_2 -2766,FOODS_2_061_TX_3 -2767,FOODS_2_061_WI_1 -2768,FOODS_2_061_WI_2 -2769,FOODS_2_061_WI_3 -2770,FOODS_2_062_CA_1 -2771,FOODS_2_062_CA_2 -2772,FOODS_2_062_CA_3 -2773,FOODS_2_062_CA_4 -2774,FOODS_2_062_TX_1 -2775,FOODS_2_062_TX_2 -2776,FOODS_2_062_TX_3 -2777,FOODS_2_062_WI_1 -2778,FOODS_2_062_WI_2 -2779,FOODS_2_062_WI_3 -2780,FOODS_2_063_CA_1 -2781,FOODS_2_063_CA_2 -2782,FOODS_2_063_CA_3 -2783,FOODS_2_063_CA_4 -2784,FOODS_2_063_TX_1 -2785,FOODS_2_063_TX_2 -2786,FOODS_2_063_TX_3 -2787,FOODS_2_063_WI_1 -2788,FOODS_2_063_WI_2 -2789,FOODS_2_063_WI_3 -2790,FOODS_2_064_CA_1 -2791,FOODS_2_064_CA_2 -2792,FOODS_2_064_CA_3 -2793,FOODS_2_064_CA_4 -2794,FOODS_2_064_TX_1 -2795,FOODS_2_064_TX_2 -2796,FOODS_2_064_TX_3 -2797,FOODS_2_064_WI_1 -2798,FOODS_2_064_WI_2 -2799,FOODS_2_064_WI_3 -2800,FOODS_2_065_CA_1 -2801,FOODS_2_065_CA_2 -2802,FOODS_2_065_CA_3 -2803,FOODS_2_065_CA_4 -2804,FOODS_2_065_TX_1 -2805,FOODS_2_065_TX_2 -2806,FOODS_2_065_TX_3 -2807,FOODS_2_065_WI_1 -2808,FOODS_2_065_WI_2 -2809,FOODS_2_065_WI_3 -2810,FOODS_2_066_CA_1 -2811,FOODS_2_066_CA_2 -2812,FOODS_2_066_CA_3 -2813,FOODS_2_066_CA_4 -2814,FOODS_2_066_TX_1 -2815,FOODS_2_066_TX_2 -2816,FOODS_2_066_TX_3 -2817,FOODS_2_066_WI_1 -2818,FOODS_2_066_WI_2 -2819,FOODS_2_066_WI_3 -2820,FOODS_2_067_CA_1 -2821,FOODS_2_067_CA_2 -2822,FOODS_2_067_CA_3 -2823,FOODS_2_067_CA_4 -2824,FOODS_2_067_TX_1 -2825,FOODS_2_067_TX_2 -2826,FOODS_2_067_TX_3 -2827,FOODS_2_067_WI_1 -2828,FOODS_2_067_WI_2 -2829,FOODS_2_067_WI_3 -2830,FOODS_2_068_CA_1 -2831,FOODS_2_068_CA_2 -2832,FOODS_2_068_CA_3 -2833,FOODS_2_068_CA_4 -2834,FOODS_2_068_TX_1 -2835,FOODS_2_068_TX_2 -2836,FOODS_2_068_TX_3 -2837,FOODS_2_068_WI_1 -2838,FOODS_2_068_WI_2 -2839,FOODS_2_068_WI_3 -2840,FOODS_2_069_CA_1 -2841,FOODS_2_069_CA_2 -2842,FOODS_2_069_CA_3 -2843,FOODS_2_069_CA_4 -2844,FOODS_2_069_TX_1 -2845,FOODS_2_069_TX_2 -2846,FOODS_2_069_TX_3 -2847,FOODS_2_069_WI_1 -2848,FOODS_2_069_WI_2 -2849,FOODS_2_069_WI_3 -2850,FOODS_2_070_CA_1 -2851,FOODS_2_070_CA_2 -2852,FOODS_2_070_CA_3 -2853,FOODS_2_070_CA_4 -2854,FOODS_2_070_TX_1 -2855,FOODS_2_070_TX_2 -2856,FOODS_2_070_TX_3 -2857,FOODS_2_070_WI_1 -2858,FOODS_2_070_WI_2 -2859,FOODS_2_070_WI_3 -2860,FOODS_2_071_CA_1 -2861,FOODS_2_071_CA_2 -2862,FOODS_2_071_CA_3 -2863,FOODS_2_071_CA_4 -2864,FOODS_2_071_TX_1 -2865,FOODS_2_071_TX_2 -2866,FOODS_2_071_TX_3 -2867,FOODS_2_071_WI_1 -2868,FOODS_2_071_WI_2 -2869,FOODS_2_071_WI_3 -2870,FOODS_2_072_CA_1 -2871,FOODS_2_072_CA_2 -2872,FOODS_2_072_CA_3 -2873,FOODS_2_072_CA_4 -2874,FOODS_2_072_TX_1 -2875,FOODS_2_072_TX_2 -2876,FOODS_2_072_TX_3 -2877,FOODS_2_072_WI_1 -2878,FOODS_2_072_WI_2 -2879,FOODS_2_072_WI_3 -2880,FOODS_2_073_CA_1 -2881,FOODS_2_073_CA_2 -2882,FOODS_2_073_CA_3 -2883,FOODS_2_073_CA_4 -2884,FOODS_2_073_TX_1 -2885,FOODS_2_073_TX_2 -2886,FOODS_2_073_TX_3 -2887,FOODS_2_073_WI_1 -2888,FOODS_2_073_WI_2 -2889,FOODS_2_073_WI_3 -2890,FOODS_2_074_CA_1 -2891,FOODS_2_074_CA_2 -2892,FOODS_2_074_CA_3 -2893,FOODS_2_074_CA_4 -2894,FOODS_2_074_TX_1 -2895,FOODS_2_074_TX_2 -2896,FOODS_2_074_TX_3 -2897,FOODS_2_074_WI_1 -2898,FOODS_2_074_WI_2 -2899,FOODS_2_074_WI_3 -2900,FOODS_2_075_CA_1 -2901,FOODS_2_075_CA_2 -2902,FOODS_2_075_CA_3 -2903,FOODS_2_075_CA_4 -2904,FOODS_2_075_TX_1 -2905,FOODS_2_075_TX_2 -2906,FOODS_2_075_TX_3 -2907,FOODS_2_075_WI_1 -2908,FOODS_2_075_WI_2 -2909,FOODS_2_075_WI_3 -2910,FOODS_2_076_CA_1 -2911,FOODS_2_076_CA_2 -2912,FOODS_2_076_CA_3 -2913,FOODS_2_076_CA_4 -2914,FOODS_2_076_TX_1 -2915,FOODS_2_076_TX_2 -2916,FOODS_2_076_TX_3 -2917,FOODS_2_076_WI_1 -2918,FOODS_2_076_WI_2 -2919,FOODS_2_076_WI_3 -2920,FOODS_2_077_CA_1 -2921,FOODS_2_077_CA_2 -2922,FOODS_2_077_CA_3 -2923,FOODS_2_077_CA_4 -2924,FOODS_2_077_TX_1 -2925,FOODS_2_077_TX_2 -2926,FOODS_2_077_TX_3 -2927,FOODS_2_077_WI_1 -2928,FOODS_2_077_WI_2 -2929,FOODS_2_077_WI_3 -2930,FOODS_2_078_CA_1 -2931,FOODS_2_078_CA_2 -2932,FOODS_2_078_CA_3 -2933,FOODS_2_078_CA_4 -2934,FOODS_2_078_TX_1 -2935,FOODS_2_078_TX_2 -2936,FOODS_2_078_TX_3 -2937,FOODS_2_078_WI_1 -2938,FOODS_2_078_WI_2 -2939,FOODS_2_078_WI_3 -2940,FOODS_2_079_CA_1 -2941,FOODS_2_079_CA_2 -2942,FOODS_2_079_CA_3 -2943,FOODS_2_079_CA_4 -2944,FOODS_2_079_TX_1 -2945,FOODS_2_079_TX_2 -2946,FOODS_2_079_TX_3 -2947,FOODS_2_079_WI_1 -2948,FOODS_2_079_WI_2 -2949,FOODS_2_079_WI_3 -2950,FOODS_2_080_CA_1 -2951,FOODS_2_080_CA_2 -2952,FOODS_2_080_CA_3 -2953,FOODS_2_080_CA_4 -2954,FOODS_2_080_TX_1 -2955,FOODS_2_080_TX_2 -2956,FOODS_2_080_TX_3 -2957,FOODS_2_080_WI_1 -2958,FOODS_2_080_WI_2 -2959,FOODS_2_080_WI_3 -2960,FOODS_2_081_CA_1 -2961,FOODS_2_081_CA_2 -2962,FOODS_2_081_CA_3 -2963,FOODS_2_081_CA_4 -2964,FOODS_2_081_TX_1 -2965,FOODS_2_081_TX_2 -2966,FOODS_2_081_TX_3 -2967,FOODS_2_081_WI_1 -2968,FOODS_2_081_WI_2 -2969,FOODS_2_081_WI_3 -2970,FOODS_2_082_CA_1 -2971,FOODS_2_082_CA_2 -2972,FOODS_2_082_CA_3 -2973,FOODS_2_082_CA_4 -2974,FOODS_2_082_TX_1 -2975,FOODS_2_082_TX_2 -2976,FOODS_2_082_TX_3 -2977,FOODS_2_082_WI_1 -2978,FOODS_2_082_WI_2 -2979,FOODS_2_082_WI_3 -2980,FOODS_2_083_CA_1 -2981,FOODS_2_083_CA_2 -2982,FOODS_2_083_CA_3 -2983,FOODS_2_083_CA_4 -2984,FOODS_2_083_TX_1 -2985,FOODS_2_083_TX_2 -2986,FOODS_2_083_TX_3 -2987,FOODS_2_083_WI_1 -2988,FOODS_2_083_WI_2 -2989,FOODS_2_083_WI_3 -2990,FOODS_2_084_CA_1 -2991,FOODS_2_084_CA_2 -2992,FOODS_2_084_CA_3 -2993,FOODS_2_084_CA_4 -2994,FOODS_2_084_TX_1 -2995,FOODS_2_084_TX_2 -2996,FOODS_2_084_TX_3 -2997,FOODS_2_084_WI_1 -2998,FOODS_2_084_WI_2 -2999,FOODS_2_084_WI_3 -3000,FOODS_2_085_CA_1 -3001,FOODS_2_085_CA_2 -3002,FOODS_2_085_CA_3 -3003,FOODS_2_085_CA_4 -3004,FOODS_2_085_TX_1 -3005,FOODS_2_085_TX_2 -3006,FOODS_2_085_TX_3 -3007,FOODS_2_085_WI_1 -3008,FOODS_2_085_WI_2 -3009,FOODS_2_085_WI_3 -3010,FOODS_2_086_CA_1 -3011,FOODS_2_086_CA_2 -3012,FOODS_2_086_CA_3 -3013,FOODS_2_086_CA_4 -3014,FOODS_2_086_TX_1 -3015,FOODS_2_086_TX_2 -3016,FOODS_2_086_TX_3 -3017,FOODS_2_086_WI_1 -3018,FOODS_2_086_WI_2 -3019,FOODS_2_086_WI_3 -3020,FOODS_2_087_CA_1 -3021,FOODS_2_087_CA_2 -3022,FOODS_2_087_CA_3 -3023,FOODS_2_087_CA_4 -3024,FOODS_2_087_TX_1 -3025,FOODS_2_087_TX_2 -3026,FOODS_2_087_TX_3 -3027,FOODS_2_087_WI_1 -3028,FOODS_2_087_WI_2 -3029,FOODS_2_087_WI_3 -3030,FOODS_2_088_CA_1 -3031,FOODS_2_088_CA_2 -3032,FOODS_2_088_CA_3 -3033,FOODS_2_088_CA_4 -3034,FOODS_2_088_TX_1 -3035,FOODS_2_088_TX_2 -3036,FOODS_2_088_TX_3 -3037,FOODS_2_088_WI_1 -3038,FOODS_2_088_WI_2 -3039,FOODS_2_088_WI_3 -3040,FOODS_2_089_CA_1 -3041,FOODS_2_089_CA_2 -3042,FOODS_2_089_CA_3 -3043,FOODS_2_089_CA_4 -3044,FOODS_2_089_TX_1 -3045,FOODS_2_089_TX_2 -3046,FOODS_2_089_TX_3 -3047,FOODS_2_089_WI_1 -3048,FOODS_2_089_WI_2 -3049,FOODS_2_089_WI_3 -3050,FOODS_2_090_CA_1 -3051,FOODS_2_090_CA_2 -3052,FOODS_2_090_CA_3 -3053,FOODS_2_090_CA_4 -3054,FOODS_2_090_TX_1 -3055,FOODS_2_090_TX_2 -3056,FOODS_2_090_TX_3 -3057,FOODS_2_090_WI_1 -3058,FOODS_2_090_WI_2 -3059,FOODS_2_090_WI_3 -3060,FOODS_2_091_CA_1 -3061,FOODS_2_091_CA_2 -3062,FOODS_2_091_CA_3 -3063,FOODS_2_091_CA_4 -3064,FOODS_2_091_TX_1 -3065,FOODS_2_091_TX_2 -3066,FOODS_2_091_TX_3 -3067,FOODS_2_091_WI_1 -3068,FOODS_2_091_WI_2 -3069,FOODS_2_091_WI_3 -3070,FOODS_2_092_CA_1 -3071,FOODS_2_092_CA_2 -3072,FOODS_2_092_CA_3 -3073,FOODS_2_092_CA_4 -3074,FOODS_2_092_TX_1 -3075,FOODS_2_092_TX_2 -3076,FOODS_2_092_TX_3 -3077,FOODS_2_092_WI_1 -3078,FOODS_2_092_WI_2 -3079,FOODS_2_092_WI_3 -3080,FOODS_2_093_CA_1 -3081,FOODS_2_093_CA_2 -3082,FOODS_2_093_CA_3 -3083,FOODS_2_093_CA_4 -3084,FOODS_2_093_TX_1 -3085,FOODS_2_093_TX_2 -3086,FOODS_2_093_TX_3 -3087,FOODS_2_093_WI_1 -3088,FOODS_2_093_WI_2 -3089,FOODS_2_093_WI_3 -3090,FOODS_2_094_CA_1 -3091,FOODS_2_094_CA_2 -3092,FOODS_2_094_CA_3 -3093,FOODS_2_094_CA_4 -3094,FOODS_2_094_TX_1 -3095,FOODS_2_094_TX_2 -3096,FOODS_2_094_TX_3 -3097,FOODS_2_094_WI_1 -3098,FOODS_2_094_WI_2 -3099,FOODS_2_094_WI_3 -3100,FOODS_2_095_CA_1 -3101,FOODS_2_095_CA_2 -3102,FOODS_2_095_CA_3 -3103,FOODS_2_095_CA_4 -3104,FOODS_2_095_TX_1 -3105,FOODS_2_095_TX_2 -3106,FOODS_2_095_TX_3 -3107,FOODS_2_095_WI_1 -3108,FOODS_2_095_WI_2 -3109,FOODS_2_095_WI_3 -3110,FOODS_2_096_CA_1 -3111,FOODS_2_096_CA_2 -3112,FOODS_2_096_CA_3 -3113,FOODS_2_096_CA_4 -3114,FOODS_2_096_TX_1 -3115,FOODS_2_096_TX_2 -3116,FOODS_2_096_TX_3 -3117,FOODS_2_096_WI_1 -3118,FOODS_2_096_WI_2 -3119,FOODS_2_096_WI_3 -3120,FOODS_2_097_CA_1 -3121,FOODS_2_097_CA_2 -3122,FOODS_2_097_CA_3 -3123,FOODS_2_097_CA_4 -3124,FOODS_2_097_TX_1 -3125,FOODS_2_097_TX_2 -3126,FOODS_2_097_TX_3 -3127,FOODS_2_097_WI_1 -3128,FOODS_2_097_WI_2 -3129,FOODS_2_097_WI_3 -3130,FOODS_2_099_CA_1 -3131,FOODS_2_099_CA_2 -3132,FOODS_2_099_CA_3 -3133,FOODS_2_099_CA_4 -3134,FOODS_2_099_TX_1 -3135,FOODS_2_099_TX_2 -3136,FOODS_2_099_TX_3 -3137,FOODS_2_099_WI_1 -3138,FOODS_2_099_WI_2 -3139,FOODS_2_099_WI_3 -3140,FOODS_2_100_CA_1 -3141,FOODS_2_100_CA_2 -3142,FOODS_2_100_CA_3 -3143,FOODS_2_100_CA_4 -3144,FOODS_2_100_TX_1 -3145,FOODS_2_100_TX_2 -3146,FOODS_2_100_TX_3 -3147,FOODS_2_100_WI_1 -3148,FOODS_2_100_WI_2 -3149,FOODS_2_100_WI_3 -3150,FOODS_2_101_CA_1 -3151,FOODS_2_101_CA_2 -3152,FOODS_2_101_CA_3 -3153,FOODS_2_101_CA_4 -3154,FOODS_2_101_TX_1 -3155,FOODS_2_101_TX_2 -3156,FOODS_2_101_TX_3 -3157,FOODS_2_101_WI_1 -3158,FOODS_2_101_WI_2 -3159,FOODS_2_101_WI_3 -3160,FOODS_2_102_CA_1 -3161,FOODS_2_102_CA_2 -3162,FOODS_2_102_CA_3 -3163,FOODS_2_102_CA_4 -3164,FOODS_2_102_TX_1 -3165,FOODS_2_102_TX_2 -3166,FOODS_2_102_TX_3 -3167,FOODS_2_102_WI_1 -3168,FOODS_2_102_WI_2 -3169,FOODS_2_102_WI_3 -3170,FOODS_2_103_CA_1 -3171,FOODS_2_103_CA_2 -3172,FOODS_2_103_CA_3 -3173,FOODS_2_103_CA_4 -3174,FOODS_2_103_TX_1 -3175,FOODS_2_103_TX_2 -3176,FOODS_2_103_TX_3 -3177,FOODS_2_103_WI_1 -3178,FOODS_2_103_WI_2 -3179,FOODS_2_103_WI_3 -3180,FOODS_2_104_CA_1 -3181,FOODS_2_104_CA_2 -3182,FOODS_2_104_CA_3 -3183,FOODS_2_104_CA_4 -3184,FOODS_2_104_TX_1 -3185,FOODS_2_104_TX_2 -3186,FOODS_2_104_TX_3 -3187,FOODS_2_104_WI_1 -3188,FOODS_2_104_WI_2 -3189,FOODS_2_104_WI_3 -3190,FOODS_2_105_CA_1 -3191,FOODS_2_105_CA_2 -3192,FOODS_2_105_CA_3 -3193,FOODS_2_105_CA_4 -3194,FOODS_2_105_TX_1 -3195,FOODS_2_105_TX_2 -3196,FOODS_2_105_TX_3 -3197,FOODS_2_105_WI_1 -3198,FOODS_2_105_WI_2 -3199,FOODS_2_105_WI_3 -3200,FOODS_2_106_CA_1 -3201,FOODS_2_106_CA_2 -3202,FOODS_2_106_CA_3 -3203,FOODS_2_106_CA_4 -3204,FOODS_2_106_TX_1 -3205,FOODS_2_106_TX_2 -3206,FOODS_2_106_TX_3 -3207,FOODS_2_106_WI_1 -3208,FOODS_2_106_WI_2 -3209,FOODS_2_106_WI_3 -3210,FOODS_2_107_CA_1 -3211,FOODS_2_107_CA_2 -3212,FOODS_2_107_CA_3 -3213,FOODS_2_107_CA_4 -3214,FOODS_2_107_TX_1 -3215,FOODS_2_107_TX_2 -3216,FOODS_2_107_TX_3 -3217,FOODS_2_107_WI_1 -3218,FOODS_2_107_WI_2 -3219,FOODS_2_107_WI_3 -3220,FOODS_2_108_CA_1 -3221,FOODS_2_108_CA_2 -3222,FOODS_2_108_CA_3 -3223,FOODS_2_108_CA_4 -3224,FOODS_2_108_TX_1 -3225,FOODS_2_108_TX_2 -3226,FOODS_2_108_TX_3 -3227,FOODS_2_108_WI_1 -3228,FOODS_2_108_WI_2 -3229,FOODS_2_108_WI_3 -3230,FOODS_2_109_CA_1 -3231,FOODS_2_109_CA_2 -3232,FOODS_2_109_CA_3 -3233,FOODS_2_109_CA_4 -3234,FOODS_2_109_TX_1 -3235,FOODS_2_109_TX_2 -3236,FOODS_2_109_TX_3 -3237,FOODS_2_109_WI_1 -3238,FOODS_2_109_WI_2 -3239,FOODS_2_109_WI_3 -3240,FOODS_2_110_CA_1 -3241,FOODS_2_110_CA_2 -3242,FOODS_2_110_CA_3 -3243,FOODS_2_110_CA_4 -3244,FOODS_2_110_TX_1 -3245,FOODS_2_110_TX_2 -3246,FOODS_2_110_TX_3 -3247,FOODS_2_110_WI_1 -3248,FOODS_2_110_WI_2 -3249,FOODS_2_110_WI_3 -3250,FOODS_2_111_CA_1 -3251,FOODS_2_111_CA_2 -3252,FOODS_2_111_CA_3 -3253,FOODS_2_111_CA_4 -3254,FOODS_2_111_TX_1 -3255,FOODS_2_111_TX_2 -3256,FOODS_2_111_TX_3 -3257,FOODS_2_111_WI_1 -3258,FOODS_2_111_WI_2 -3259,FOODS_2_111_WI_3 -3260,FOODS_2_112_CA_1 -3261,FOODS_2_112_CA_2 -3262,FOODS_2_112_CA_3 -3263,FOODS_2_112_CA_4 -3264,FOODS_2_112_TX_1 -3265,FOODS_2_112_TX_2 -3266,FOODS_2_112_TX_3 -3267,FOODS_2_112_WI_1 -3268,FOODS_2_112_WI_2 -3269,FOODS_2_112_WI_3 -3270,FOODS_2_113_CA_1 -3271,FOODS_2_113_CA_2 -3272,FOODS_2_113_CA_3 -3273,FOODS_2_113_CA_4 -3274,FOODS_2_113_TX_1 -3275,FOODS_2_113_TX_2 -3276,FOODS_2_113_TX_3 -3277,FOODS_2_113_WI_1 -3278,FOODS_2_113_WI_2 -3279,FOODS_2_113_WI_3 -3280,FOODS_2_114_CA_1 -3281,FOODS_2_114_CA_2 -3282,FOODS_2_114_CA_3 -3283,FOODS_2_114_CA_4 -3284,FOODS_2_114_TX_1 -3285,FOODS_2_114_TX_2 -3286,FOODS_2_114_TX_3 -3287,FOODS_2_114_WI_1 -3288,FOODS_2_114_WI_2 -3289,FOODS_2_114_WI_3 -3290,FOODS_2_115_CA_1 -3291,FOODS_2_115_CA_2 -3292,FOODS_2_115_CA_3 -3293,FOODS_2_115_CA_4 -3294,FOODS_2_115_TX_1 -3295,FOODS_2_115_TX_2 -3296,FOODS_2_115_TX_3 -3297,FOODS_2_115_WI_1 -3298,FOODS_2_115_WI_2 -3299,FOODS_2_115_WI_3 -3300,FOODS_2_116_CA_1 -3301,FOODS_2_116_CA_2 -3302,FOODS_2_116_CA_3 -3303,FOODS_2_116_CA_4 -3304,FOODS_2_116_TX_1 -3305,FOODS_2_116_TX_2 -3306,FOODS_2_116_TX_3 -3307,FOODS_2_116_WI_1 -3308,FOODS_2_116_WI_2 -3309,FOODS_2_116_WI_3 -3310,FOODS_2_117_CA_1 -3311,FOODS_2_117_CA_2 -3312,FOODS_2_117_CA_3 -3313,FOODS_2_117_CA_4 -3314,FOODS_2_117_TX_1 -3315,FOODS_2_117_TX_2 -3316,FOODS_2_117_TX_3 -3317,FOODS_2_117_WI_1 -3318,FOODS_2_117_WI_2 -3319,FOODS_2_117_WI_3 -3320,FOODS_2_118_CA_1 -3321,FOODS_2_118_CA_2 -3322,FOODS_2_118_CA_3 -3323,FOODS_2_118_CA_4 -3324,FOODS_2_118_TX_1 -3325,FOODS_2_118_TX_2 -3326,FOODS_2_118_TX_3 -3327,FOODS_2_118_WI_1 -3328,FOODS_2_118_WI_2 -3329,FOODS_2_118_WI_3 -3330,FOODS_2_119_CA_1 -3331,FOODS_2_119_CA_2 -3332,FOODS_2_119_CA_3 -3333,FOODS_2_119_CA_4 -3334,FOODS_2_119_TX_1 -3335,FOODS_2_119_TX_2 -3336,FOODS_2_119_TX_3 -3337,FOODS_2_119_WI_1 -3338,FOODS_2_119_WI_2 -3339,FOODS_2_119_WI_3 -3340,FOODS_2_120_CA_1 -3341,FOODS_2_120_CA_2 -3342,FOODS_2_120_CA_3 -3343,FOODS_2_120_CA_4 -3344,FOODS_2_120_TX_1 -3345,FOODS_2_120_TX_2 -3346,FOODS_2_120_TX_3 -3347,FOODS_2_120_WI_1 -3348,FOODS_2_120_WI_2 -3349,FOODS_2_120_WI_3 -3350,FOODS_2_121_CA_1 -3351,FOODS_2_121_CA_2 -3352,FOODS_2_121_CA_3 -3353,FOODS_2_121_CA_4 -3354,FOODS_2_121_TX_1 -3355,FOODS_2_121_TX_2 -3356,FOODS_2_121_TX_3 -3357,FOODS_2_121_WI_1 -3358,FOODS_2_121_WI_2 -3359,FOODS_2_121_WI_3 -3360,FOODS_2_122_CA_1 -3361,FOODS_2_122_CA_2 -3362,FOODS_2_122_CA_3 -3363,FOODS_2_122_CA_4 -3364,FOODS_2_122_TX_1 -3365,FOODS_2_122_TX_2 -3366,FOODS_2_122_TX_3 -3367,FOODS_2_122_WI_1 -3368,FOODS_2_122_WI_2 -3369,FOODS_2_122_WI_3 -3370,FOODS_2_123_CA_1 -3371,FOODS_2_123_CA_2 -3372,FOODS_2_123_CA_3 -3373,FOODS_2_123_CA_4 -3374,FOODS_2_123_TX_1 -3375,FOODS_2_123_TX_2 -3376,FOODS_2_123_TX_3 -3377,FOODS_2_123_WI_1 -3378,FOODS_2_123_WI_2 -3379,FOODS_2_123_WI_3 -3380,FOODS_2_124_CA_1 -3381,FOODS_2_124_CA_2 -3382,FOODS_2_124_CA_3 -3383,FOODS_2_124_CA_4 -3384,FOODS_2_124_TX_1 -3385,FOODS_2_124_TX_2 -3386,FOODS_2_124_TX_3 -3387,FOODS_2_124_WI_1 -3388,FOODS_2_124_WI_2 -3389,FOODS_2_124_WI_3 -3390,FOODS_2_125_CA_1 -3391,FOODS_2_125_CA_2 -3392,FOODS_2_125_CA_3 -3393,FOODS_2_125_CA_4 -3394,FOODS_2_125_TX_1 -3395,FOODS_2_125_TX_2 -3396,FOODS_2_125_TX_3 -3397,FOODS_2_125_WI_1 -3398,FOODS_2_125_WI_2 -3399,FOODS_2_125_WI_3 -3400,FOODS_2_126_CA_1 -3401,FOODS_2_126_CA_2 -3402,FOODS_2_126_CA_3 -3403,FOODS_2_126_CA_4 -3404,FOODS_2_126_TX_1 -3405,FOODS_2_126_TX_2 -3406,FOODS_2_126_TX_3 -3407,FOODS_2_126_WI_1 -3408,FOODS_2_126_WI_2 -3409,FOODS_2_126_WI_3 -3410,FOODS_2_127_CA_1 -3411,FOODS_2_127_CA_2 -3412,FOODS_2_127_CA_3 -3413,FOODS_2_127_CA_4 -3414,FOODS_2_127_TX_1 -3415,FOODS_2_127_TX_2 -3416,FOODS_2_127_TX_3 -3417,FOODS_2_127_WI_1 -3418,FOODS_2_127_WI_2 -3419,FOODS_2_127_WI_3 -3420,FOODS_2_128_CA_1 -3421,FOODS_2_128_CA_2 -3422,FOODS_2_128_CA_3 -3423,FOODS_2_128_CA_4 -3424,FOODS_2_128_TX_1 -3425,FOODS_2_128_TX_2 -3426,FOODS_2_128_TX_3 -3427,FOODS_2_128_WI_1 -3428,FOODS_2_128_WI_2 -3429,FOODS_2_128_WI_3 -3430,FOODS_2_129_CA_1 -3431,FOODS_2_129_CA_2 -3432,FOODS_2_129_CA_3 -3433,FOODS_2_129_CA_4 -3434,FOODS_2_129_TX_1 -3435,FOODS_2_129_TX_2 -3436,FOODS_2_129_TX_3 -3437,FOODS_2_129_WI_1 -3438,FOODS_2_129_WI_2 -3439,FOODS_2_129_WI_3 -3440,FOODS_2_130_CA_1 -3441,FOODS_2_130_CA_2 -3442,FOODS_2_130_CA_3 -3443,FOODS_2_130_CA_4 -3444,FOODS_2_130_TX_1 -3445,FOODS_2_130_TX_2 -3446,FOODS_2_130_TX_3 -3447,FOODS_2_130_WI_1 -3448,FOODS_2_130_WI_2 -3449,FOODS_2_130_WI_3 -3450,FOODS_2_131_CA_1 -3451,FOODS_2_131_CA_2 -3452,FOODS_2_131_CA_3 -3453,FOODS_2_131_CA_4 -3454,FOODS_2_131_TX_1 -3455,FOODS_2_131_TX_2 -3456,FOODS_2_131_TX_3 -3457,FOODS_2_131_WI_1 -3458,FOODS_2_131_WI_2 -3459,FOODS_2_131_WI_3 -3460,FOODS_2_132_CA_1 -3461,FOODS_2_132_CA_2 -3462,FOODS_2_132_CA_3 -3463,FOODS_2_132_CA_4 -3464,FOODS_2_132_TX_1 -3465,FOODS_2_132_TX_2 -3466,FOODS_2_132_TX_3 -3467,FOODS_2_132_WI_1 -3468,FOODS_2_132_WI_2 -3469,FOODS_2_132_WI_3 -3470,FOODS_2_133_CA_1 -3471,FOODS_2_133_CA_2 -3472,FOODS_2_133_CA_3 -3473,FOODS_2_133_CA_4 -3474,FOODS_2_133_TX_1 -3475,FOODS_2_133_TX_2 -3476,FOODS_2_133_TX_3 -3477,FOODS_2_133_WI_1 -3478,FOODS_2_133_WI_2 -3479,FOODS_2_133_WI_3 -3480,FOODS_2_134_CA_1 -3481,FOODS_2_134_CA_2 -3482,FOODS_2_134_CA_3 -3483,FOODS_2_134_CA_4 -3484,FOODS_2_134_TX_1 -3485,FOODS_2_134_TX_2 -3486,FOODS_2_134_TX_3 -3487,FOODS_2_134_WI_1 -3488,FOODS_2_134_WI_2 -3489,FOODS_2_134_WI_3 -3490,FOODS_2_135_CA_1 -3491,FOODS_2_135_CA_2 -3492,FOODS_2_135_CA_3 -3493,FOODS_2_135_CA_4 -3494,FOODS_2_135_TX_1 -3495,FOODS_2_135_TX_2 -3496,FOODS_2_135_TX_3 -3497,FOODS_2_135_WI_1 -3498,FOODS_2_135_WI_2 -3499,FOODS_2_135_WI_3 -3500,FOODS_2_136_CA_1 -3501,FOODS_2_136_CA_2 -3502,FOODS_2_136_CA_3 -3503,FOODS_2_136_CA_4 -3504,FOODS_2_136_TX_1 -3505,FOODS_2_136_TX_2 -3506,FOODS_2_136_TX_3 -3507,FOODS_2_136_WI_1 -3508,FOODS_2_136_WI_2 -3509,FOODS_2_136_WI_3 -3510,FOODS_2_137_CA_1 -3511,FOODS_2_137_CA_2 -3512,FOODS_2_137_CA_3 -3513,FOODS_2_137_CA_4 -3514,FOODS_2_137_TX_1 -3515,FOODS_2_137_TX_2 -3516,FOODS_2_137_TX_3 -3517,FOODS_2_137_WI_1 -3518,FOODS_2_137_WI_2 -3519,FOODS_2_137_WI_3 -3520,FOODS_2_138_CA_1 -3521,FOODS_2_138_CA_2 -3522,FOODS_2_138_CA_3 -3523,FOODS_2_138_CA_4 -3524,FOODS_2_138_TX_1 -3525,FOODS_2_138_TX_2 -3526,FOODS_2_138_TX_3 -3527,FOODS_2_138_WI_1 -3528,FOODS_2_138_WI_2 -3529,FOODS_2_138_WI_3 -3530,FOODS_2_139_CA_1 -3531,FOODS_2_139_CA_2 -3532,FOODS_2_139_CA_3 -3533,FOODS_2_139_CA_4 -3534,FOODS_2_139_TX_1 -3535,FOODS_2_139_TX_2 -3536,FOODS_2_139_TX_3 -3537,FOODS_2_139_WI_1 -3538,FOODS_2_139_WI_2 -3539,FOODS_2_139_WI_3 -3540,FOODS_2_140_CA_1 -3541,FOODS_2_140_CA_2 -3542,FOODS_2_140_CA_3 -3543,FOODS_2_140_CA_4 -3544,FOODS_2_140_TX_1 -3545,FOODS_2_140_TX_2 -3546,FOODS_2_140_TX_3 -3547,FOODS_2_140_WI_1 -3548,FOODS_2_140_WI_2 -3549,FOODS_2_140_WI_3 -3550,FOODS_2_141_CA_1 -3551,FOODS_2_141_CA_2 -3552,FOODS_2_141_CA_3 -3553,FOODS_2_141_CA_4 -3554,FOODS_2_141_TX_1 -3555,FOODS_2_141_TX_2 -3556,FOODS_2_141_TX_3 -3557,FOODS_2_141_WI_1 -3558,FOODS_2_141_WI_2 -3559,FOODS_2_141_WI_3 -3560,FOODS_2_142_CA_1 -3561,FOODS_2_142_CA_2 -3562,FOODS_2_142_CA_3 -3563,FOODS_2_142_CA_4 -3564,FOODS_2_142_TX_1 -3565,FOODS_2_142_TX_2 -3566,FOODS_2_142_TX_3 -3567,FOODS_2_142_WI_1 -3568,FOODS_2_142_WI_2 -3569,FOODS_2_142_WI_3 -3570,FOODS_2_143_CA_1 -3571,FOODS_2_143_CA_2 -3572,FOODS_2_143_CA_3 -3573,FOODS_2_143_CA_4 -3574,FOODS_2_143_TX_1 -3575,FOODS_2_143_TX_2 -3576,FOODS_2_143_TX_3 -3577,FOODS_2_143_WI_1 -3578,FOODS_2_143_WI_2 -3579,FOODS_2_143_WI_3 -3580,FOODS_2_144_CA_1 -3581,FOODS_2_144_CA_2 -3582,FOODS_2_144_CA_3 -3583,FOODS_2_144_CA_4 -3584,FOODS_2_144_TX_1 -3585,FOODS_2_144_TX_2 -3586,FOODS_2_144_TX_3 -3587,FOODS_2_144_WI_1 -3588,FOODS_2_144_WI_2 -3589,FOODS_2_144_WI_3 -3590,FOODS_2_145_CA_1 -3591,FOODS_2_145_CA_2 -3592,FOODS_2_145_CA_3 -3593,FOODS_2_145_CA_4 -3594,FOODS_2_145_TX_1 -3595,FOODS_2_145_TX_2 -3596,FOODS_2_145_TX_3 -3597,FOODS_2_145_WI_1 -3598,FOODS_2_145_WI_2 -3599,FOODS_2_145_WI_3 -3600,FOODS_2_146_CA_1 -3601,FOODS_2_146_CA_2 -3602,FOODS_2_146_CA_3 -3603,FOODS_2_146_CA_4 -3604,FOODS_2_146_TX_1 -3605,FOODS_2_146_TX_2 -3606,FOODS_2_146_TX_3 -3607,FOODS_2_146_WI_1 -3608,FOODS_2_146_WI_2 -3609,FOODS_2_146_WI_3 -3610,FOODS_2_147_CA_1 -3611,FOODS_2_147_CA_2 -3612,FOODS_2_147_CA_3 -3613,FOODS_2_147_CA_4 -3614,FOODS_2_147_TX_1 -3615,FOODS_2_147_TX_2 -3616,FOODS_2_147_TX_3 -3617,FOODS_2_147_WI_1 -3618,FOODS_2_147_WI_2 -3619,FOODS_2_147_WI_3 -3620,FOODS_2_148_CA_1 -3621,FOODS_2_148_CA_2 -3622,FOODS_2_148_CA_3 -3623,FOODS_2_148_CA_4 -3624,FOODS_2_148_TX_1 -3625,FOODS_2_148_TX_2 -3626,FOODS_2_148_TX_3 -3627,FOODS_2_148_WI_1 -3628,FOODS_2_148_WI_2 -3629,FOODS_2_148_WI_3 -3630,FOODS_2_149_CA_1 -3631,FOODS_2_149_CA_2 -3632,FOODS_2_149_CA_3 -3633,FOODS_2_149_CA_4 -3634,FOODS_2_149_TX_1 -3635,FOODS_2_149_TX_2 -3636,FOODS_2_149_TX_3 -3637,FOODS_2_149_WI_1 -3638,FOODS_2_149_WI_2 -3639,FOODS_2_149_WI_3 -3640,FOODS_2_150_CA_1 -3641,FOODS_2_150_CA_2 -3642,FOODS_2_150_CA_3 -3643,FOODS_2_150_CA_4 -3644,FOODS_2_150_TX_1 -3645,FOODS_2_150_TX_2 -3646,FOODS_2_150_TX_3 -3647,FOODS_2_150_WI_1 -3648,FOODS_2_150_WI_2 -3649,FOODS_2_150_WI_3 -3650,FOODS_2_151_CA_1 -3651,FOODS_2_151_CA_2 -3652,FOODS_2_151_CA_3 -3653,FOODS_2_151_CA_4 -3654,FOODS_2_151_TX_1 -3655,FOODS_2_151_TX_2 -3656,FOODS_2_151_TX_3 -3657,FOODS_2_151_WI_1 -3658,FOODS_2_151_WI_2 -3659,FOODS_2_151_WI_3 -3660,FOODS_2_152_CA_1 -3661,FOODS_2_152_CA_2 -3662,FOODS_2_152_CA_3 -3663,FOODS_2_152_CA_4 -3664,FOODS_2_152_TX_1 -3665,FOODS_2_152_TX_2 -3666,FOODS_2_152_TX_3 -3667,FOODS_2_152_WI_1 -3668,FOODS_2_152_WI_2 -3669,FOODS_2_152_WI_3 -3670,FOODS_2_153_CA_1 -3671,FOODS_2_153_CA_2 -3672,FOODS_2_153_CA_3 -3673,FOODS_2_153_CA_4 -3674,FOODS_2_153_TX_1 -3675,FOODS_2_153_TX_2 -3676,FOODS_2_153_TX_3 -3677,FOODS_2_153_WI_1 -3678,FOODS_2_153_WI_2 -3679,FOODS_2_153_WI_3 -3680,FOODS_2_154_CA_1 -3681,FOODS_2_154_CA_2 -3682,FOODS_2_154_CA_3 -3683,FOODS_2_154_CA_4 -3684,FOODS_2_154_TX_1 -3685,FOODS_2_154_TX_2 -3686,FOODS_2_154_TX_3 -3687,FOODS_2_154_WI_1 -3688,FOODS_2_154_WI_2 -3689,FOODS_2_154_WI_3 -3690,FOODS_2_155_CA_1 -3691,FOODS_2_155_CA_2 -3692,FOODS_2_155_CA_3 -3693,FOODS_2_155_CA_4 -3694,FOODS_2_155_TX_1 -3695,FOODS_2_155_TX_2 -3696,FOODS_2_155_TX_3 -3697,FOODS_2_155_WI_1 -3698,FOODS_2_155_WI_2 -3699,FOODS_2_155_WI_3 -3700,FOODS_2_156_CA_1 -3701,FOODS_2_156_CA_2 -3702,FOODS_2_156_CA_3 -3703,FOODS_2_156_CA_4 -3704,FOODS_2_156_TX_1 -3705,FOODS_2_156_TX_2 -3706,FOODS_2_156_TX_3 -3707,FOODS_2_156_WI_1 -3708,FOODS_2_156_WI_2 -3709,FOODS_2_156_WI_3 -3710,FOODS_2_157_CA_1 -3711,FOODS_2_157_CA_2 -3712,FOODS_2_157_CA_3 -3713,FOODS_2_157_CA_4 -3714,FOODS_2_157_TX_1 -3715,FOODS_2_157_TX_2 -3716,FOODS_2_157_TX_3 -3717,FOODS_2_157_WI_1 -3718,FOODS_2_157_WI_2 -3719,FOODS_2_157_WI_3 -3720,FOODS_2_158_CA_1 -3721,FOODS_2_158_CA_2 -3722,FOODS_2_158_CA_3 -3723,FOODS_2_158_CA_4 -3724,FOODS_2_158_TX_1 -3725,FOODS_2_158_TX_2 -3726,FOODS_2_158_TX_3 -3727,FOODS_2_158_WI_1 -3728,FOODS_2_158_WI_2 -3729,FOODS_2_158_WI_3 -3730,FOODS_2_159_CA_1 -3731,FOODS_2_159_CA_2 -3732,FOODS_2_159_CA_3 -3733,FOODS_2_159_CA_4 -3734,FOODS_2_159_TX_1 -3735,FOODS_2_159_TX_2 -3736,FOODS_2_159_TX_3 -3737,FOODS_2_159_WI_1 -3738,FOODS_2_159_WI_2 -3739,FOODS_2_159_WI_3 -3740,FOODS_2_160_CA_1 -3741,FOODS_2_160_CA_2 -3742,FOODS_2_160_CA_3 -3743,FOODS_2_160_CA_4 -3744,FOODS_2_160_TX_1 -3745,FOODS_2_160_TX_2 -3746,FOODS_2_160_TX_3 -3747,FOODS_2_160_WI_1 -3748,FOODS_2_160_WI_2 -3749,FOODS_2_160_WI_3 -3750,FOODS_2_161_CA_1 -3751,FOODS_2_161_CA_2 -3752,FOODS_2_161_CA_3 -3753,FOODS_2_161_CA_4 -3754,FOODS_2_161_TX_1 -3755,FOODS_2_161_TX_2 -3756,FOODS_2_161_TX_3 -3757,FOODS_2_161_WI_1 -3758,FOODS_2_161_WI_2 -3759,FOODS_2_161_WI_3 -3760,FOODS_2_162_CA_1 -3761,FOODS_2_162_CA_2 -3762,FOODS_2_162_CA_3 -3763,FOODS_2_162_CA_4 -3764,FOODS_2_162_TX_1 -3765,FOODS_2_162_TX_2 -3766,FOODS_2_162_TX_3 -3767,FOODS_2_162_WI_1 -3768,FOODS_2_162_WI_2 -3769,FOODS_2_162_WI_3 -3770,FOODS_2_163_CA_1 -3771,FOODS_2_163_CA_2 -3772,FOODS_2_163_CA_3 -3773,FOODS_2_163_CA_4 -3774,FOODS_2_163_TX_1 -3775,FOODS_2_163_TX_2 -3776,FOODS_2_163_TX_3 -3777,FOODS_2_163_WI_1 -3778,FOODS_2_163_WI_2 -3779,FOODS_2_163_WI_3 -3780,FOODS_2_164_CA_1 -3781,FOODS_2_164_CA_2 -3782,FOODS_2_164_CA_3 -3783,FOODS_2_164_CA_4 -3784,FOODS_2_164_TX_1 -3785,FOODS_2_164_TX_2 -3786,FOODS_2_164_TX_3 -3787,FOODS_2_164_WI_1 -3788,FOODS_2_164_WI_2 -3789,FOODS_2_164_WI_3 -3790,FOODS_2_165_CA_1 -3791,FOODS_2_165_CA_2 -3792,FOODS_2_165_CA_3 -3793,FOODS_2_165_CA_4 -3794,FOODS_2_165_TX_1 -3795,FOODS_2_165_TX_2 -3796,FOODS_2_165_TX_3 -3797,FOODS_2_165_WI_1 -3798,FOODS_2_165_WI_2 -3799,FOODS_2_165_WI_3 -3800,FOODS_2_166_CA_1 -3801,FOODS_2_166_CA_2 -3802,FOODS_2_166_CA_3 -3803,FOODS_2_166_CA_4 -3804,FOODS_2_166_TX_1 -3805,FOODS_2_166_TX_2 -3806,FOODS_2_166_TX_3 -3807,FOODS_2_166_WI_1 -3808,FOODS_2_166_WI_2 -3809,FOODS_2_166_WI_3 -3810,FOODS_2_167_CA_1 -3811,FOODS_2_167_CA_2 -3812,FOODS_2_167_CA_3 -3813,FOODS_2_167_CA_4 -3814,FOODS_2_167_TX_1 -3815,FOODS_2_167_TX_2 -3816,FOODS_2_167_TX_3 -3817,FOODS_2_167_WI_1 -3818,FOODS_2_167_WI_2 -3819,FOODS_2_167_WI_3 -3820,FOODS_2_168_CA_1 -3821,FOODS_2_168_CA_2 -3822,FOODS_2_168_CA_3 -3823,FOODS_2_168_CA_4 -3824,FOODS_2_168_TX_1 -3825,FOODS_2_168_TX_2 -3826,FOODS_2_168_TX_3 -3827,FOODS_2_168_WI_1 -3828,FOODS_2_168_WI_2 -3829,FOODS_2_168_WI_3 -3830,FOODS_2_169_CA_1 -3831,FOODS_2_169_CA_2 -3832,FOODS_2_169_CA_3 -3833,FOODS_2_169_CA_4 -3834,FOODS_2_169_TX_1 -3835,FOODS_2_169_TX_2 -3836,FOODS_2_169_TX_3 -3837,FOODS_2_169_WI_1 -3838,FOODS_2_169_WI_2 -3839,FOODS_2_169_WI_3 -3840,FOODS_2_170_CA_1 -3841,FOODS_2_170_CA_2 -3842,FOODS_2_170_CA_3 -3843,FOODS_2_170_CA_4 -3844,FOODS_2_170_TX_1 -3845,FOODS_2_170_TX_2 -3846,FOODS_2_170_TX_3 -3847,FOODS_2_170_WI_1 -3848,FOODS_2_170_WI_2 -3849,FOODS_2_170_WI_3 -3850,FOODS_2_171_CA_1 -3851,FOODS_2_171_CA_2 -3852,FOODS_2_171_CA_3 -3853,FOODS_2_171_CA_4 -3854,FOODS_2_171_TX_1 -3855,FOODS_2_171_TX_2 -3856,FOODS_2_171_TX_3 -3857,FOODS_2_171_WI_1 -3858,FOODS_2_171_WI_2 -3859,FOODS_2_171_WI_3 -3860,FOODS_2_172_CA_1 -3861,FOODS_2_172_CA_2 -3862,FOODS_2_172_CA_3 -3863,FOODS_2_172_CA_4 -3864,FOODS_2_172_TX_1 -3865,FOODS_2_172_TX_2 -3866,FOODS_2_172_TX_3 -3867,FOODS_2_172_WI_1 -3868,FOODS_2_172_WI_2 -3869,FOODS_2_172_WI_3 -3870,FOODS_2_173_CA_1 -3871,FOODS_2_173_CA_2 -3872,FOODS_2_173_CA_3 -3873,FOODS_2_173_CA_4 -3874,FOODS_2_173_TX_1 -3875,FOODS_2_173_TX_2 -3876,FOODS_2_173_TX_3 -3877,FOODS_2_173_WI_1 -3878,FOODS_2_173_WI_2 -3879,FOODS_2_173_WI_3 -3880,FOODS_2_174_CA_1 -3881,FOODS_2_174_CA_2 -3882,FOODS_2_174_CA_3 -3883,FOODS_2_174_CA_4 -3884,FOODS_2_174_TX_1 -3885,FOODS_2_174_TX_2 -3886,FOODS_2_174_TX_3 -3887,FOODS_2_174_WI_1 -3888,FOODS_2_174_WI_2 -3889,FOODS_2_174_WI_3 -3890,FOODS_2_175_CA_1 -3891,FOODS_2_175_CA_2 -3892,FOODS_2_175_CA_3 -3893,FOODS_2_175_CA_4 -3894,FOODS_2_175_TX_1 -3895,FOODS_2_175_TX_2 -3896,FOODS_2_175_TX_3 -3897,FOODS_2_175_WI_1 -3898,FOODS_2_175_WI_2 -3899,FOODS_2_175_WI_3 -3900,FOODS_2_176_CA_1 -3901,FOODS_2_176_CA_2 -3902,FOODS_2_176_CA_3 -3903,FOODS_2_176_CA_4 -3904,FOODS_2_176_TX_1 -3905,FOODS_2_176_TX_2 -3906,FOODS_2_176_TX_3 -3907,FOODS_2_176_WI_1 -3908,FOODS_2_176_WI_2 -3909,FOODS_2_176_WI_3 -3910,FOODS_2_177_CA_1 -3911,FOODS_2_177_CA_2 -3912,FOODS_2_177_CA_3 -3913,FOODS_2_177_CA_4 -3914,FOODS_2_177_TX_1 -3915,FOODS_2_177_TX_2 -3916,FOODS_2_177_TX_3 -3917,FOODS_2_177_WI_1 -3918,FOODS_2_177_WI_2 -3919,FOODS_2_177_WI_3 -3920,FOODS_2_178_CA_1 -3921,FOODS_2_178_CA_2 -3922,FOODS_2_178_CA_3 -3923,FOODS_2_178_CA_4 -3924,FOODS_2_178_TX_1 -3925,FOODS_2_178_TX_2 -3926,FOODS_2_178_TX_3 -3927,FOODS_2_178_WI_1 -3928,FOODS_2_178_WI_2 -3929,FOODS_2_178_WI_3 -3930,FOODS_2_179_CA_1 -3931,FOODS_2_179_CA_2 -3932,FOODS_2_179_CA_3 -3933,FOODS_2_179_CA_4 -3934,FOODS_2_179_TX_1 -3935,FOODS_2_179_TX_2 -3936,FOODS_2_179_TX_3 -3937,FOODS_2_179_WI_1 -3938,FOODS_2_179_WI_2 -3939,FOODS_2_179_WI_3 -3940,FOODS_2_180_CA_1 -3941,FOODS_2_180_CA_2 -3942,FOODS_2_180_CA_3 -3943,FOODS_2_180_CA_4 -3944,FOODS_2_180_TX_1 -3945,FOODS_2_180_TX_2 -3946,FOODS_2_180_TX_3 -3947,FOODS_2_180_WI_1 -3948,FOODS_2_180_WI_2 -3949,FOODS_2_180_WI_3 -3950,FOODS_2_181_CA_1 -3951,FOODS_2_181_CA_2 -3952,FOODS_2_181_CA_3 -3953,FOODS_2_181_CA_4 -3954,FOODS_2_181_TX_1 -3955,FOODS_2_181_TX_2 -3956,FOODS_2_181_TX_3 -3957,FOODS_2_181_WI_1 -3958,FOODS_2_181_WI_2 -3959,FOODS_2_181_WI_3 -3960,FOODS_2_182_CA_1 -3961,FOODS_2_182_CA_2 -3962,FOODS_2_182_CA_3 -3963,FOODS_2_182_CA_4 -3964,FOODS_2_182_TX_1 -3965,FOODS_2_182_TX_2 -3966,FOODS_2_182_TX_3 -3967,FOODS_2_182_WI_1 -3968,FOODS_2_182_WI_2 -3969,FOODS_2_182_WI_3 -3970,FOODS_2_183_CA_1 -3971,FOODS_2_183_CA_2 -3972,FOODS_2_183_CA_3 -3973,FOODS_2_183_CA_4 -3974,FOODS_2_183_TX_1 -3975,FOODS_2_183_TX_2 -3976,FOODS_2_183_TX_3 -3977,FOODS_2_183_WI_1 -3978,FOODS_2_183_WI_2 -3979,FOODS_2_183_WI_3 -3980,FOODS_2_184_CA_1 -3981,FOODS_2_184_CA_2 -3982,FOODS_2_184_CA_3 -3983,FOODS_2_184_CA_4 -3984,FOODS_2_184_TX_1 -3985,FOODS_2_184_TX_2 -3986,FOODS_2_184_TX_3 -3987,FOODS_2_184_WI_1 -3988,FOODS_2_184_WI_2 -3989,FOODS_2_184_WI_3 -3990,FOODS_2_185_CA_1 -3991,FOODS_2_185_CA_2 -3992,FOODS_2_185_CA_3 -3993,FOODS_2_185_CA_4 -3994,FOODS_2_185_TX_1 -3995,FOODS_2_185_TX_2 -3996,FOODS_2_185_TX_3 -3997,FOODS_2_185_WI_1 -3998,FOODS_2_185_WI_2 -3999,FOODS_2_185_WI_3 -4000,FOODS_2_186_CA_1 -4001,FOODS_2_186_CA_2 -4002,FOODS_2_186_CA_3 -4003,FOODS_2_186_CA_4 -4004,FOODS_2_186_TX_1 -4005,FOODS_2_186_TX_2 -4006,FOODS_2_186_TX_3 -4007,FOODS_2_186_WI_1 -4008,FOODS_2_186_WI_2 -4009,FOODS_2_186_WI_3 -4010,FOODS_2_187_CA_1 -4011,FOODS_2_187_CA_2 -4012,FOODS_2_187_CA_3 -4013,FOODS_2_187_CA_4 -4014,FOODS_2_187_TX_1 -4015,FOODS_2_187_TX_2 -4016,FOODS_2_187_TX_3 -4017,FOODS_2_187_WI_1 -4018,FOODS_2_187_WI_2 -4019,FOODS_2_187_WI_3 -4020,FOODS_2_188_CA_1 -4021,FOODS_2_188_CA_2 -4022,FOODS_2_188_CA_3 -4023,FOODS_2_188_CA_4 -4024,FOODS_2_188_TX_1 -4025,FOODS_2_188_TX_2 -4026,FOODS_2_188_TX_3 -4027,FOODS_2_188_WI_1 -4028,FOODS_2_188_WI_2 -4029,FOODS_2_188_WI_3 -4030,FOODS_2_189_CA_1 -4031,FOODS_2_189_CA_2 -4032,FOODS_2_189_CA_3 -4033,FOODS_2_189_CA_4 -4034,FOODS_2_189_TX_1 -4035,FOODS_2_189_TX_2 -4036,FOODS_2_189_TX_3 -4037,FOODS_2_189_WI_1 -4038,FOODS_2_189_WI_2 -4039,FOODS_2_189_WI_3 -4040,FOODS_2_190_CA_1 -4041,FOODS_2_190_CA_2 -4042,FOODS_2_190_CA_3 -4043,FOODS_2_190_CA_4 -4044,FOODS_2_190_TX_1 -4045,FOODS_2_190_TX_2 -4046,FOODS_2_190_TX_3 -4047,FOODS_2_190_WI_1 -4048,FOODS_2_190_WI_2 -4049,FOODS_2_190_WI_3 -4050,FOODS_2_191_CA_1 -4051,FOODS_2_191_CA_2 -4052,FOODS_2_191_CA_3 -4053,FOODS_2_191_CA_4 -4054,FOODS_2_191_TX_1 -4055,FOODS_2_191_TX_2 -4056,FOODS_2_191_TX_3 -4057,FOODS_2_191_WI_1 -4058,FOODS_2_191_WI_2 -4059,FOODS_2_191_WI_3 -4060,FOODS_2_192_CA_1 -4061,FOODS_2_192_CA_2 -4062,FOODS_2_192_CA_3 -4063,FOODS_2_192_CA_4 -4064,FOODS_2_192_TX_1 -4065,FOODS_2_192_TX_2 -4066,FOODS_2_192_TX_3 -4067,FOODS_2_192_WI_1 -4068,FOODS_2_192_WI_2 -4069,FOODS_2_192_WI_3 -4070,FOODS_2_193_CA_1 -4071,FOODS_2_193_CA_2 -4072,FOODS_2_193_CA_3 -4073,FOODS_2_193_CA_4 -4074,FOODS_2_193_TX_1 -4075,FOODS_2_193_TX_2 -4076,FOODS_2_193_TX_3 -4077,FOODS_2_193_WI_1 -4078,FOODS_2_193_WI_2 -4079,FOODS_2_193_WI_3 -4080,FOODS_2_194_CA_1 -4081,FOODS_2_194_CA_2 -4082,FOODS_2_194_CA_3 -4083,FOODS_2_194_CA_4 -4084,FOODS_2_194_TX_1 -4085,FOODS_2_194_TX_2 -4086,FOODS_2_194_TX_3 -4087,FOODS_2_194_WI_1 -4088,FOODS_2_194_WI_2 -4089,FOODS_2_194_WI_3 -4090,FOODS_2_195_CA_1 -4091,FOODS_2_195_CA_2 -4092,FOODS_2_195_CA_3 -4093,FOODS_2_195_CA_4 -4094,FOODS_2_195_TX_1 -4095,FOODS_2_195_TX_2 -4096,FOODS_2_195_TX_3 -4097,FOODS_2_195_WI_1 -4098,FOODS_2_195_WI_2 -4099,FOODS_2_195_WI_3 -4100,FOODS_2_196_CA_1 -4101,FOODS_2_196_CA_2 -4102,FOODS_2_196_CA_3 -4103,FOODS_2_196_CA_4 -4104,FOODS_2_196_TX_1 -4105,FOODS_2_196_TX_2 -4106,FOODS_2_196_TX_3 -4107,FOODS_2_196_WI_1 -4108,FOODS_2_196_WI_2 -4109,FOODS_2_196_WI_3 -4110,FOODS_2_197_CA_1 -4111,FOODS_2_197_CA_2 -4112,FOODS_2_197_CA_3 -4113,FOODS_2_197_CA_4 -4114,FOODS_2_197_TX_1 -4115,FOODS_2_197_TX_2 -4116,FOODS_2_197_TX_3 -4117,FOODS_2_197_WI_1 -4118,FOODS_2_197_WI_2 -4119,FOODS_2_197_WI_3 -4120,FOODS_2_198_CA_1 -4121,FOODS_2_198_CA_2 -4122,FOODS_2_198_CA_3 -4123,FOODS_2_198_CA_4 -4124,FOODS_2_198_TX_1 -4125,FOODS_2_198_TX_2 -4126,FOODS_2_198_TX_3 -4127,FOODS_2_198_WI_1 -4128,FOODS_2_198_WI_2 -4129,FOODS_2_198_WI_3 -4130,FOODS_2_199_CA_1 -4131,FOODS_2_199_CA_2 -4132,FOODS_2_199_CA_3 -4133,FOODS_2_199_CA_4 -4134,FOODS_2_199_TX_1 -4135,FOODS_2_199_TX_2 -4136,FOODS_2_199_TX_3 -4137,FOODS_2_199_WI_1 -4138,FOODS_2_199_WI_2 -4139,FOODS_2_199_WI_3 -4140,FOODS_2_200_CA_1 -4141,FOODS_2_200_CA_2 -4142,FOODS_2_200_CA_3 -4143,FOODS_2_200_CA_4 -4144,FOODS_2_200_TX_1 -4145,FOODS_2_200_TX_2 -4146,FOODS_2_200_TX_3 -4147,FOODS_2_200_WI_1 -4148,FOODS_2_200_WI_2 -4149,FOODS_2_200_WI_3 -4150,FOODS_2_201_CA_1 -4151,FOODS_2_201_CA_2 -4152,FOODS_2_201_CA_3 -4153,FOODS_2_201_CA_4 -4154,FOODS_2_201_TX_1 -4155,FOODS_2_201_TX_2 -4156,FOODS_2_201_TX_3 -4157,FOODS_2_201_WI_1 -4158,FOODS_2_201_WI_2 -4159,FOODS_2_201_WI_3 -4160,FOODS_2_202_CA_1 -4161,FOODS_2_202_CA_2 -4162,FOODS_2_202_CA_3 -4163,FOODS_2_202_CA_4 -4164,FOODS_2_202_TX_1 -4165,FOODS_2_202_TX_2 -4166,FOODS_2_202_TX_3 -4167,FOODS_2_202_WI_1 -4168,FOODS_2_202_WI_2 -4169,FOODS_2_202_WI_3 -4170,FOODS_2_203_CA_1 -4171,FOODS_2_203_CA_2 -4172,FOODS_2_203_CA_3 -4173,FOODS_2_203_CA_4 -4174,FOODS_2_203_TX_1 -4175,FOODS_2_203_TX_2 -4176,FOODS_2_203_TX_3 -4177,FOODS_2_203_WI_1 -4178,FOODS_2_203_WI_2 -4179,FOODS_2_203_WI_3 -4180,FOODS_2_204_CA_1 -4181,FOODS_2_204_CA_2 -4182,FOODS_2_204_CA_3 -4183,FOODS_2_204_CA_4 -4184,FOODS_2_204_TX_1 -4185,FOODS_2_204_TX_2 -4186,FOODS_2_204_TX_3 -4187,FOODS_2_204_WI_1 -4188,FOODS_2_204_WI_2 -4189,FOODS_2_204_WI_3 -4190,FOODS_2_205_CA_1 -4191,FOODS_2_205_CA_2 -4192,FOODS_2_205_CA_3 -4193,FOODS_2_205_CA_4 -4194,FOODS_2_205_TX_1 -4195,FOODS_2_205_TX_2 -4196,FOODS_2_205_TX_3 -4197,FOODS_2_205_WI_1 -4198,FOODS_2_205_WI_2 -4199,FOODS_2_205_WI_3 -4200,FOODS_2_206_CA_1 -4201,FOODS_2_206_CA_2 -4202,FOODS_2_206_CA_3 -4203,FOODS_2_206_CA_4 -4204,FOODS_2_206_TX_1 -4205,FOODS_2_206_TX_2 -4206,FOODS_2_206_TX_3 -4207,FOODS_2_206_WI_1 -4208,FOODS_2_206_WI_2 -4209,FOODS_2_206_WI_3 -4210,FOODS_2_207_CA_1 -4211,FOODS_2_207_CA_2 -4212,FOODS_2_207_CA_3 -4213,FOODS_2_207_CA_4 -4214,FOODS_2_207_TX_1 -4215,FOODS_2_207_TX_2 -4216,FOODS_2_207_TX_3 -4217,FOODS_2_207_WI_1 -4218,FOODS_2_207_WI_2 -4219,FOODS_2_207_WI_3 -4220,FOODS_2_208_CA_1 -4221,FOODS_2_208_CA_2 -4222,FOODS_2_208_CA_3 -4223,FOODS_2_208_CA_4 -4224,FOODS_2_208_TX_1 -4225,FOODS_2_208_TX_2 -4226,FOODS_2_208_TX_3 -4227,FOODS_2_208_WI_1 -4228,FOODS_2_208_WI_2 -4229,FOODS_2_208_WI_3 -4230,FOODS_2_209_CA_1 -4231,FOODS_2_209_CA_2 -4232,FOODS_2_209_CA_3 -4233,FOODS_2_209_CA_4 -4234,FOODS_2_209_TX_1 -4235,FOODS_2_209_TX_2 -4236,FOODS_2_209_TX_3 -4237,FOODS_2_209_WI_1 -4238,FOODS_2_209_WI_2 -4239,FOODS_2_209_WI_3 -4240,FOODS_2_210_CA_1 -4241,FOODS_2_210_CA_2 -4242,FOODS_2_210_CA_3 -4243,FOODS_2_210_CA_4 -4244,FOODS_2_210_TX_1 -4245,FOODS_2_210_TX_2 -4246,FOODS_2_210_TX_3 -4247,FOODS_2_210_WI_1 -4248,FOODS_2_210_WI_2 -4249,FOODS_2_210_WI_3 -4250,FOODS_2_211_CA_1 -4251,FOODS_2_211_CA_2 -4252,FOODS_2_211_CA_3 -4253,FOODS_2_211_CA_4 -4254,FOODS_2_211_TX_1 -4255,FOODS_2_211_TX_2 -4256,FOODS_2_211_TX_3 -4257,FOODS_2_211_WI_1 -4258,FOODS_2_211_WI_2 -4259,FOODS_2_211_WI_3 -4260,FOODS_2_212_CA_1 -4261,FOODS_2_212_CA_2 -4262,FOODS_2_212_CA_3 -4263,FOODS_2_212_CA_4 -4264,FOODS_2_212_TX_1 -4265,FOODS_2_212_TX_2 -4266,FOODS_2_212_TX_3 -4267,FOODS_2_212_WI_1 -4268,FOODS_2_212_WI_2 -4269,FOODS_2_212_WI_3 -4270,FOODS_2_213_CA_1 -4271,FOODS_2_213_CA_2 -4272,FOODS_2_213_CA_3 -4273,FOODS_2_213_CA_4 -4274,FOODS_2_213_TX_1 -4275,FOODS_2_213_TX_2 -4276,FOODS_2_213_TX_3 -4277,FOODS_2_213_WI_1 -4278,FOODS_2_213_WI_2 -4279,FOODS_2_213_WI_3 -4280,FOODS_2_214_CA_1 -4281,FOODS_2_214_CA_2 -4282,FOODS_2_214_CA_3 -4283,FOODS_2_214_CA_4 -4284,FOODS_2_214_TX_1 -4285,FOODS_2_214_TX_2 -4286,FOODS_2_214_TX_3 -4287,FOODS_2_214_WI_1 -4288,FOODS_2_214_WI_2 -4289,FOODS_2_214_WI_3 -4290,FOODS_2_215_CA_1 -4291,FOODS_2_215_CA_2 -4292,FOODS_2_215_CA_3 -4293,FOODS_2_215_CA_4 -4294,FOODS_2_215_TX_1 -4295,FOODS_2_215_TX_2 -4296,FOODS_2_215_TX_3 -4297,FOODS_2_215_WI_1 -4298,FOODS_2_215_WI_2 -4299,FOODS_2_215_WI_3 -4300,FOODS_2_216_CA_1 -4301,FOODS_2_216_CA_2 -4302,FOODS_2_216_CA_3 -4303,FOODS_2_216_CA_4 -4304,FOODS_2_216_TX_1 -4305,FOODS_2_216_TX_2 -4306,FOODS_2_216_TX_3 -4307,FOODS_2_216_WI_1 -4308,FOODS_2_216_WI_2 -4309,FOODS_2_216_WI_3 -4310,FOODS_2_217_CA_1 -4311,FOODS_2_217_CA_2 -4312,FOODS_2_217_CA_3 -4313,FOODS_2_217_CA_4 -4314,FOODS_2_217_TX_1 -4315,FOODS_2_217_TX_2 -4316,FOODS_2_217_TX_3 -4317,FOODS_2_217_WI_1 -4318,FOODS_2_217_WI_2 -4319,FOODS_2_217_WI_3 -4320,FOODS_2_218_CA_1 -4321,FOODS_2_218_CA_2 -4322,FOODS_2_218_CA_3 -4323,FOODS_2_218_CA_4 -4324,FOODS_2_218_TX_1 -4325,FOODS_2_218_TX_2 -4326,FOODS_2_218_TX_3 -4327,FOODS_2_218_WI_1 -4328,FOODS_2_218_WI_2 -4329,FOODS_2_218_WI_3 -4330,FOODS_2_219_CA_1 -4331,FOODS_2_219_CA_2 -4332,FOODS_2_219_CA_3 -4333,FOODS_2_219_CA_4 -4334,FOODS_2_219_TX_1 -4335,FOODS_2_219_TX_2 -4336,FOODS_2_219_TX_3 -4337,FOODS_2_219_WI_1 -4338,FOODS_2_219_WI_2 -4339,FOODS_2_219_WI_3 -4340,FOODS_2_220_CA_1 -4341,FOODS_2_220_CA_2 -4342,FOODS_2_220_CA_3 -4343,FOODS_2_220_CA_4 -4344,FOODS_2_220_TX_1 -4345,FOODS_2_220_TX_2 -4346,FOODS_2_220_TX_3 -4347,FOODS_2_220_WI_1 -4348,FOODS_2_220_WI_2 -4349,FOODS_2_220_WI_3 -4350,FOODS_2_221_CA_1 -4351,FOODS_2_221_CA_2 -4352,FOODS_2_221_CA_3 -4353,FOODS_2_221_CA_4 -4354,FOODS_2_221_TX_1 -4355,FOODS_2_221_TX_2 -4356,FOODS_2_221_TX_3 -4357,FOODS_2_221_WI_1 -4358,FOODS_2_221_WI_2 -4359,FOODS_2_221_WI_3 -4360,FOODS_2_222_CA_1 -4361,FOODS_2_222_CA_2 -4362,FOODS_2_222_CA_3 -4363,FOODS_2_222_CA_4 -4364,FOODS_2_222_TX_1 -4365,FOODS_2_222_TX_2 -4366,FOODS_2_222_TX_3 -4367,FOODS_2_222_WI_1 -4368,FOODS_2_222_WI_2 -4369,FOODS_2_222_WI_3 -4370,FOODS_2_223_CA_1 -4371,FOODS_2_223_CA_2 -4372,FOODS_2_223_CA_3 -4373,FOODS_2_223_CA_4 -4374,FOODS_2_223_TX_1 -4375,FOODS_2_223_TX_2 -4376,FOODS_2_223_TX_3 -4377,FOODS_2_223_WI_1 -4378,FOODS_2_223_WI_2 -4379,FOODS_2_223_WI_3 -4380,FOODS_2_224_CA_1 -4381,FOODS_2_224_CA_2 -4382,FOODS_2_224_CA_3 -4383,FOODS_2_224_CA_4 -4384,FOODS_2_224_TX_1 -4385,FOODS_2_224_TX_2 -4386,FOODS_2_224_TX_3 -4387,FOODS_2_224_WI_1 -4388,FOODS_2_224_WI_2 -4389,FOODS_2_224_WI_3 -4390,FOODS_2_225_CA_1 -4391,FOODS_2_225_CA_2 -4392,FOODS_2_225_CA_3 -4393,FOODS_2_225_CA_4 -4394,FOODS_2_225_TX_1 -4395,FOODS_2_225_TX_2 -4396,FOODS_2_225_TX_3 -4397,FOODS_2_225_WI_1 -4398,FOODS_2_225_WI_2 -4399,FOODS_2_225_WI_3 -4400,FOODS_2_226_CA_1 -4401,FOODS_2_226_CA_2 -4402,FOODS_2_226_CA_3 -4403,FOODS_2_226_CA_4 -4404,FOODS_2_226_TX_1 -4405,FOODS_2_226_TX_2 -4406,FOODS_2_226_TX_3 -4407,FOODS_2_226_WI_1 -4408,FOODS_2_226_WI_2 -4409,FOODS_2_226_WI_3 -4410,FOODS_2_227_CA_1 -4411,FOODS_2_227_CA_2 -4412,FOODS_2_227_CA_3 -4413,FOODS_2_227_CA_4 -4414,FOODS_2_227_TX_1 -4415,FOODS_2_227_TX_2 -4416,FOODS_2_227_TX_3 -4417,FOODS_2_227_WI_1 -4418,FOODS_2_227_WI_2 -4419,FOODS_2_227_WI_3 -4420,FOODS_2_228_CA_1 -4421,FOODS_2_228_CA_2 -4422,FOODS_2_228_CA_3 -4423,FOODS_2_228_CA_4 -4424,FOODS_2_228_TX_1 -4425,FOODS_2_228_TX_2 -4426,FOODS_2_228_TX_3 -4427,FOODS_2_228_WI_1 -4428,FOODS_2_228_WI_2 -4429,FOODS_2_228_WI_3 -4430,FOODS_2_229_CA_1 -4431,FOODS_2_229_CA_2 -4432,FOODS_2_229_CA_3 -4433,FOODS_2_229_CA_4 -4434,FOODS_2_229_TX_1 -4435,FOODS_2_229_TX_2 -4436,FOODS_2_229_TX_3 -4437,FOODS_2_229_WI_1 -4438,FOODS_2_229_WI_2 -4439,FOODS_2_229_WI_3 -4440,FOODS_2_230_CA_1 -4441,FOODS_2_230_CA_2 -4442,FOODS_2_230_CA_3 -4443,FOODS_2_230_CA_4 -4444,FOODS_2_230_TX_1 -4445,FOODS_2_230_TX_2 -4446,FOODS_2_230_TX_3 -4447,FOODS_2_230_WI_1 -4448,FOODS_2_230_WI_2 -4449,FOODS_2_230_WI_3 -4450,FOODS_2_231_CA_1 -4451,FOODS_2_231_CA_2 -4452,FOODS_2_231_CA_3 -4453,FOODS_2_231_CA_4 -4454,FOODS_2_231_TX_1 -4455,FOODS_2_231_TX_2 -4456,FOODS_2_231_TX_3 -4457,FOODS_2_231_WI_1 -4458,FOODS_2_231_WI_2 -4459,FOODS_2_231_WI_3 -4460,FOODS_2_232_CA_1 -4461,FOODS_2_232_CA_2 -4462,FOODS_2_232_CA_3 -4463,FOODS_2_232_CA_4 -4464,FOODS_2_232_TX_1 -4465,FOODS_2_232_TX_2 -4466,FOODS_2_232_TX_3 -4467,FOODS_2_232_WI_1 -4468,FOODS_2_232_WI_2 -4469,FOODS_2_232_WI_3 -4470,FOODS_2_233_CA_1 -4471,FOODS_2_233_CA_2 -4472,FOODS_2_233_CA_3 -4473,FOODS_2_233_CA_4 -4474,FOODS_2_233_TX_1 -4475,FOODS_2_233_TX_2 -4476,FOODS_2_233_TX_3 -4477,FOODS_2_233_WI_1 -4478,FOODS_2_233_WI_2 -4479,FOODS_2_233_WI_3 -4480,FOODS_2_234_CA_1 -4481,FOODS_2_234_CA_2 -4482,FOODS_2_234_CA_3 -4483,FOODS_2_234_CA_4 -4484,FOODS_2_234_TX_1 -4485,FOODS_2_234_TX_2 -4486,FOODS_2_234_TX_3 -4487,FOODS_2_234_WI_1 -4488,FOODS_2_234_WI_2 -4489,FOODS_2_234_WI_3 -4490,FOODS_2_235_CA_1 -4491,FOODS_2_235_CA_2 -4492,FOODS_2_235_CA_3 -4493,FOODS_2_235_CA_4 -4494,FOODS_2_235_TX_1 -4495,FOODS_2_235_TX_2 -4496,FOODS_2_235_TX_3 -4497,FOODS_2_235_WI_1 -4498,FOODS_2_235_WI_2 -4499,FOODS_2_235_WI_3 -4500,FOODS_2_236_CA_1 -4501,FOODS_2_236_CA_2 -4502,FOODS_2_236_CA_3 -4503,FOODS_2_236_CA_4 -4504,FOODS_2_236_TX_1 -4505,FOODS_2_236_TX_2 -4506,FOODS_2_236_TX_3 -4507,FOODS_2_236_WI_1 -4508,FOODS_2_236_WI_2 -4509,FOODS_2_236_WI_3 -4510,FOODS_2_237_CA_1 -4511,FOODS_2_237_CA_2 -4512,FOODS_2_237_CA_3 -4513,FOODS_2_237_CA_4 -4514,FOODS_2_237_TX_1 -4515,FOODS_2_237_TX_2 -4516,FOODS_2_237_TX_3 -4517,FOODS_2_237_WI_1 -4518,FOODS_2_237_WI_2 -4519,FOODS_2_237_WI_3 -4520,FOODS_2_238_CA_1 -4521,FOODS_2_238_CA_2 -4522,FOODS_2_238_CA_3 -4523,FOODS_2_238_CA_4 -4524,FOODS_2_238_TX_1 -4525,FOODS_2_238_TX_2 -4526,FOODS_2_238_TX_3 -4527,FOODS_2_238_WI_1 -4528,FOODS_2_238_WI_2 -4529,FOODS_2_238_WI_3 -4530,FOODS_2_239_CA_1 -4531,FOODS_2_239_CA_2 -4532,FOODS_2_239_CA_3 -4533,FOODS_2_239_CA_4 -4534,FOODS_2_239_TX_1 -4535,FOODS_2_239_TX_2 -4536,FOODS_2_239_TX_3 -4537,FOODS_2_239_WI_1 -4538,FOODS_2_239_WI_2 -4539,FOODS_2_239_WI_3 -4540,FOODS_2_240_CA_1 -4541,FOODS_2_240_CA_2 -4542,FOODS_2_240_CA_3 -4543,FOODS_2_240_CA_4 -4544,FOODS_2_240_TX_1 -4545,FOODS_2_240_TX_2 -4546,FOODS_2_240_TX_3 -4547,FOODS_2_240_WI_1 -4548,FOODS_2_240_WI_2 -4549,FOODS_2_240_WI_3 -4550,FOODS_2_241_CA_1 -4551,FOODS_2_241_CA_2 -4552,FOODS_2_241_CA_3 -4553,FOODS_2_241_CA_4 -4554,FOODS_2_241_TX_1 -4555,FOODS_2_241_TX_2 -4556,FOODS_2_241_TX_3 -4557,FOODS_2_241_WI_1 -4558,FOODS_2_241_WI_2 -4559,FOODS_2_241_WI_3 -4560,FOODS_2_242_CA_1 -4561,FOODS_2_242_CA_2 -4562,FOODS_2_242_CA_3 -4563,FOODS_2_242_CA_4 -4564,FOODS_2_242_TX_1 -4565,FOODS_2_242_TX_2 -4566,FOODS_2_242_TX_3 -4567,FOODS_2_242_WI_1 -4568,FOODS_2_242_WI_2 -4569,FOODS_2_242_WI_3 -4570,FOODS_2_243_CA_1 -4571,FOODS_2_243_CA_2 -4572,FOODS_2_243_CA_3 -4573,FOODS_2_243_CA_4 -4574,FOODS_2_243_TX_1 -4575,FOODS_2_243_TX_2 -4576,FOODS_2_243_TX_3 -4577,FOODS_2_243_WI_1 -4578,FOODS_2_243_WI_2 -4579,FOODS_2_243_WI_3 -4580,FOODS_2_244_CA_1 -4581,FOODS_2_244_CA_2 -4582,FOODS_2_244_CA_3 -4583,FOODS_2_244_CA_4 -4584,FOODS_2_244_TX_1 -4585,FOODS_2_244_TX_2 -4586,FOODS_2_244_TX_3 -4587,FOODS_2_244_WI_1 -4588,FOODS_2_244_WI_2 -4589,FOODS_2_244_WI_3 -4590,FOODS_2_245_CA_1 -4591,FOODS_2_245_CA_2 -4592,FOODS_2_245_CA_3 -4593,FOODS_2_245_CA_4 -4594,FOODS_2_245_TX_1 -4595,FOODS_2_245_TX_2 -4596,FOODS_2_245_TX_3 -4597,FOODS_2_245_WI_1 -4598,FOODS_2_245_WI_2 -4599,FOODS_2_245_WI_3 -4600,FOODS_2_246_CA_1 -4601,FOODS_2_246_CA_2 -4602,FOODS_2_246_CA_3 -4603,FOODS_2_246_CA_4 -4604,FOODS_2_246_TX_1 -4605,FOODS_2_246_TX_2 -4606,FOODS_2_246_TX_3 -4607,FOODS_2_246_WI_1 -4608,FOODS_2_246_WI_2 -4609,FOODS_2_246_WI_3 -4610,FOODS_2_247_CA_1 -4611,FOODS_2_247_CA_2 -4612,FOODS_2_247_CA_3 -4613,FOODS_2_247_CA_4 -4614,FOODS_2_247_TX_1 -4615,FOODS_2_247_TX_2 -4616,FOODS_2_247_TX_3 -4617,FOODS_2_247_WI_1 -4618,FOODS_2_247_WI_2 -4619,FOODS_2_247_WI_3 -4620,FOODS_2_248_CA_1 -4621,FOODS_2_248_CA_2 -4622,FOODS_2_248_CA_3 -4623,FOODS_2_248_CA_4 -4624,FOODS_2_248_TX_1 -4625,FOODS_2_248_TX_2 -4626,FOODS_2_248_TX_3 -4627,FOODS_2_248_WI_1 -4628,FOODS_2_248_WI_2 -4629,FOODS_2_248_WI_3 -4630,FOODS_2_249_CA_1 -4631,FOODS_2_249_CA_2 -4632,FOODS_2_249_CA_3 -4633,FOODS_2_249_CA_4 -4634,FOODS_2_249_TX_1 -4635,FOODS_2_249_TX_2 -4636,FOODS_2_249_TX_3 -4637,FOODS_2_249_WI_1 -4638,FOODS_2_249_WI_2 -4639,FOODS_2_249_WI_3 -4640,FOODS_2_250_CA_1 -4641,FOODS_2_250_CA_2 -4642,FOODS_2_250_CA_3 -4643,FOODS_2_250_CA_4 -4644,FOODS_2_250_TX_1 -4645,FOODS_2_250_TX_2 -4646,FOODS_2_250_TX_3 -4647,FOODS_2_250_WI_1 -4648,FOODS_2_250_WI_2 -4649,FOODS_2_250_WI_3 -4650,FOODS_2_251_CA_1 -4651,FOODS_2_251_CA_2 -4652,FOODS_2_251_CA_3 -4653,FOODS_2_251_CA_4 -4654,FOODS_2_251_TX_1 -4655,FOODS_2_251_TX_2 -4656,FOODS_2_251_TX_3 -4657,FOODS_2_251_WI_1 -4658,FOODS_2_251_WI_2 -4659,FOODS_2_251_WI_3 -4660,FOODS_2_252_CA_1 -4661,FOODS_2_252_CA_2 -4662,FOODS_2_252_CA_3 -4663,FOODS_2_252_CA_4 -4664,FOODS_2_252_TX_1 -4665,FOODS_2_252_TX_2 -4666,FOODS_2_252_TX_3 -4667,FOODS_2_252_WI_1 -4668,FOODS_2_252_WI_2 -4669,FOODS_2_252_WI_3 -4670,FOODS_2_253_CA_1 -4671,FOODS_2_253_CA_2 -4672,FOODS_2_253_CA_3 -4673,FOODS_2_253_CA_4 -4674,FOODS_2_253_TX_1 -4675,FOODS_2_253_TX_2 -4676,FOODS_2_253_TX_3 -4677,FOODS_2_253_WI_1 -4678,FOODS_2_253_WI_2 -4679,FOODS_2_253_WI_3 -4680,FOODS_2_254_CA_1 -4681,FOODS_2_254_CA_2 -4682,FOODS_2_254_CA_3 -4683,FOODS_2_254_CA_4 -4684,FOODS_2_254_TX_1 -4685,FOODS_2_254_TX_2 -4686,FOODS_2_254_TX_3 -4687,FOODS_2_254_WI_1 -4688,FOODS_2_254_WI_2 -4689,FOODS_2_254_WI_3 -4690,FOODS_2_255_CA_1 -4691,FOODS_2_255_CA_2 -4692,FOODS_2_255_CA_3 -4693,FOODS_2_255_CA_4 -4694,FOODS_2_255_TX_1 -4695,FOODS_2_255_TX_2 -4696,FOODS_2_255_TX_3 -4697,FOODS_2_255_WI_1 -4698,FOODS_2_255_WI_2 -4699,FOODS_2_255_WI_3 -4700,FOODS_2_256_CA_1 -4701,FOODS_2_256_CA_2 -4702,FOODS_2_256_CA_3 -4703,FOODS_2_256_CA_4 -4704,FOODS_2_256_TX_1 -4705,FOODS_2_256_TX_2 -4706,FOODS_2_256_TX_3 -4707,FOODS_2_256_WI_1 -4708,FOODS_2_256_WI_2 -4709,FOODS_2_256_WI_3 -4710,FOODS_2_257_CA_1 -4711,FOODS_2_257_CA_2 -4712,FOODS_2_257_CA_3 -4713,FOODS_2_257_CA_4 -4714,FOODS_2_257_TX_1 -4715,FOODS_2_257_TX_2 -4716,FOODS_2_257_TX_3 -4717,FOODS_2_257_WI_1 -4718,FOODS_2_257_WI_2 -4719,FOODS_2_257_WI_3 -4720,FOODS_2_258_CA_1 -4721,FOODS_2_258_CA_2 -4722,FOODS_2_258_CA_3 -4723,FOODS_2_258_CA_4 -4724,FOODS_2_258_TX_1 -4725,FOODS_2_258_TX_2 -4726,FOODS_2_258_TX_3 -4727,FOODS_2_258_WI_1 -4728,FOODS_2_258_WI_2 -4729,FOODS_2_258_WI_3 -4730,FOODS_2_259_CA_1 -4731,FOODS_2_259_CA_2 -4732,FOODS_2_259_CA_3 -4733,FOODS_2_259_CA_4 -4734,FOODS_2_259_TX_1 -4735,FOODS_2_259_TX_2 -4736,FOODS_2_259_TX_3 -4737,FOODS_2_259_WI_1 -4738,FOODS_2_259_WI_2 -4739,FOODS_2_259_WI_3 -4740,FOODS_2_260_CA_1 -4741,FOODS_2_260_CA_2 -4742,FOODS_2_260_CA_3 -4743,FOODS_2_260_CA_4 -4744,FOODS_2_260_TX_1 -4745,FOODS_2_260_TX_2 -4746,FOODS_2_260_TX_3 -4747,FOODS_2_260_WI_1 -4748,FOODS_2_260_WI_2 -4749,FOODS_2_260_WI_3 -4750,FOODS_2_261_CA_1 -4751,FOODS_2_261_CA_2 -4752,FOODS_2_261_CA_3 -4753,FOODS_2_261_CA_4 -4754,FOODS_2_261_TX_1 -4755,FOODS_2_261_TX_2 -4756,FOODS_2_261_TX_3 -4757,FOODS_2_261_WI_1 -4758,FOODS_2_261_WI_2 -4759,FOODS_2_261_WI_3 -4760,FOODS_2_262_CA_1 -4761,FOODS_2_262_CA_2 -4762,FOODS_2_262_CA_3 -4763,FOODS_2_262_CA_4 -4764,FOODS_2_262_TX_1 -4765,FOODS_2_262_TX_2 -4766,FOODS_2_262_TX_3 -4767,FOODS_2_262_WI_1 -4768,FOODS_2_262_WI_2 -4769,FOODS_2_262_WI_3 -4770,FOODS_2_263_CA_1 -4771,FOODS_2_263_CA_2 -4772,FOODS_2_263_CA_3 -4773,FOODS_2_263_CA_4 -4774,FOODS_2_263_TX_1 -4775,FOODS_2_263_TX_2 -4776,FOODS_2_263_TX_3 -4777,FOODS_2_263_WI_1 -4778,FOODS_2_263_WI_2 -4779,FOODS_2_263_WI_3 -4780,FOODS_2_264_CA_1 -4781,FOODS_2_264_CA_2 -4782,FOODS_2_264_CA_3 -4783,FOODS_2_264_CA_4 -4784,FOODS_2_264_TX_1 -4785,FOODS_2_264_TX_2 -4786,FOODS_2_264_TX_3 -4787,FOODS_2_264_WI_1 -4788,FOODS_2_264_WI_2 -4789,FOODS_2_264_WI_3 -4790,FOODS_2_265_CA_1 -4791,FOODS_2_265_CA_2 -4792,FOODS_2_265_CA_3 -4793,FOODS_2_265_CA_4 -4794,FOODS_2_265_TX_1 -4795,FOODS_2_265_TX_2 -4796,FOODS_2_265_TX_3 -4797,FOODS_2_265_WI_1 -4798,FOODS_2_265_WI_2 -4799,FOODS_2_265_WI_3 -4800,FOODS_2_266_CA_1 -4801,FOODS_2_266_CA_2 -4802,FOODS_2_266_CA_3 -4803,FOODS_2_266_CA_4 -4804,FOODS_2_266_TX_1 -4805,FOODS_2_266_TX_2 -4806,FOODS_2_266_TX_3 -4807,FOODS_2_266_WI_1 -4808,FOODS_2_266_WI_2 -4809,FOODS_2_266_WI_3 -4810,FOODS_2_267_CA_1 -4811,FOODS_2_267_CA_2 -4812,FOODS_2_267_CA_3 -4813,FOODS_2_267_CA_4 -4814,FOODS_2_267_TX_1 -4815,FOODS_2_267_TX_2 -4816,FOODS_2_267_TX_3 -4817,FOODS_2_267_WI_1 -4818,FOODS_2_267_WI_2 -4819,FOODS_2_267_WI_3 -4820,FOODS_2_268_CA_1 -4821,FOODS_2_268_CA_2 -4822,FOODS_2_268_CA_3 -4823,FOODS_2_268_CA_4 -4824,FOODS_2_268_TX_1 -4825,FOODS_2_268_TX_2 -4826,FOODS_2_268_TX_3 -4827,FOODS_2_268_WI_1 -4828,FOODS_2_268_WI_2 -4829,FOODS_2_268_WI_3 -4830,FOODS_2_269_CA_1 -4831,FOODS_2_269_CA_2 -4832,FOODS_2_269_CA_3 -4833,FOODS_2_269_CA_4 -4834,FOODS_2_269_TX_1 -4835,FOODS_2_269_TX_2 -4836,FOODS_2_269_TX_3 -4837,FOODS_2_269_WI_1 -4838,FOODS_2_269_WI_2 -4839,FOODS_2_269_WI_3 -4840,FOODS_2_270_CA_1 -4841,FOODS_2_270_CA_2 -4842,FOODS_2_270_CA_3 -4843,FOODS_2_270_CA_4 -4844,FOODS_2_270_TX_1 -4845,FOODS_2_270_TX_2 -4846,FOODS_2_270_TX_3 -4847,FOODS_2_270_WI_1 -4848,FOODS_2_270_WI_2 -4849,FOODS_2_270_WI_3 -4850,FOODS_2_271_CA_1 -4851,FOODS_2_271_CA_2 -4852,FOODS_2_271_CA_3 -4853,FOODS_2_271_CA_4 -4854,FOODS_2_271_TX_1 -4855,FOODS_2_271_TX_2 -4856,FOODS_2_271_TX_3 -4857,FOODS_2_271_WI_1 -4858,FOODS_2_271_WI_2 -4859,FOODS_2_271_WI_3 -4860,FOODS_2_272_CA_1 -4861,FOODS_2_272_CA_2 -4862,FOODS_2_272_CA_3 -4863,FOODS_2_272_CA_4 -4864,FOODS_2_272_TX_1 -4865,FOODS_2_272_TX_2 -4866,FOODS_2_272_TX_3 -4867,FOODS_2_272_WI_1 -4868,FOODS_2_272_WI_2 -4869,FOODS_2_272_WI_3 -4870,FOODS_2_273_CA_1 -4871,FOODS_2_273_CA_2 -4872,FOODS_2_273_CA_3 -4873,FOODS_2_273_CA_4 -4874,FOODS_2_273_TX_1 -4875,FOODS_2_273_TX_2 -4876,FOODS_2_273_TX_3 -4877,FOODS_2_273_WI_1 -4878,FOODS_2_273_WI_2 -4879,FOODS_2_273_WI_3 -4880,FOODS_2_274_CA_1 -4881,FOODS_2_274_CA_2 -4882,FOODS_2_274_CA_3 -4883,FOODS_2_274_CA_4 -4884,FOODS_2_274_TX_1 -4885,FOODS_2_274_TX_2 -4886,FOODS_2_274_TX_3 -4887,FOODS_2_274_WI_1 -4888,FOODS_2_274_WI_2 -4889,FOODS_2_274_WI_3 -4890,FOODS_2_275_CA_1 -4891,FOODS_2_275_CA_2 -4892,FOODS_2_275_CA_3 -4893,FOODS_2_275_CA_4 -4894,FOODS_2_275_TX_1 -4895,FOODS_2_275_TX_2 -4896,FOODS_2_275_TX_3 -4897,FOODS_2_275_WI_1 -4898,FOODS_2_275_WI_2 -4899,FOODS_2_275_WI_3 -4900,FOODS_2_276_CA_1 -4901,FOODS_2_276_CA_2 -4902,FOODS_2_276_CA_3 -4903,FOODS_2_276_CA_4 -4904,FOODS_2_276_TX_1 -4905,FOODS_2_276_TX_2 -4906,FOODS_2_276_TX_3 -4907,FOODS_2_276_WI_1 -4908,FOODS_2_276_WI_2 -4909,FOODS_2_276_WI_3 -4910,FOODS_2_277_CA_1 -4911,FOODS_2_277_CA_2 -4912,FOODS_2_277_CA_3 -4913,FOODS_2_277_CA_4 -4914,FOODS_2_277_TX_1 -4915,FOODS_2_277_TX_2 -4916,FOODS_2_277_TX_3 -4917,FOODS_2_277_WI_1 -4918,FOODS_2_277_WI_2 -4919,FOODS_2_277_WI_3 -4920,FOODS_2_278_CA_1 -4921,FOODS_2_278_CA_2 -4922,FOODS_2_278_CA_3 -4923,FOODS_2_278_CA_4 -4924,FOODS_2_278_TX_1 -4925,FOODS_2_278_TX_2 -4926,FOODS_2_278_TX_3 -4927,FOODS_2_278_WI_1 -4928,FOODS_2_278_WI_2 -4929,FOODS_2_278_WI_3 -4930,FOODS_2_279_CA_1 -4931,FOODS_2_279_CA_2 -4932,FOODS_2_279_CA_3 -4933,FOODS_2_279_CA_4 -4934,FOODS_2_279_TX_1 -4935,FOODS_2_279_TX_2 -4936,FOODS_2_279_TX_3 -4937,FOODS_2_279_WI_1 -4938,FOODS_2_279_WI_2 -4939,FOODS_2_279_WI_3 -4940,FOODS_2_280_CA_1 -4941,FOODS_2_280_CA_2 -4942,FOODS_2_280_CA_3 -4943,FOODS_2_280_CA_4 -4944,FOODS_2_280_TX_1 -4945,FOODS_2_280_TX_2 -4946,FOODS_2_280_TX_3 -4947,FOODS_2_280_WI_1 -4948,FOODS_2_280_WI_2 -4949,FOODS_2_280_WI_3 -4950,FOODS_2_281_CA_1 -4951,FOODS_2_281_CA_2 -4952,FOODS_2_281_CA_3 -4953,FOODS_2_281_CA_4 -4954,FOODS_2_281_TX_1 -4955,FOODS_2_281_TX_2 -4956,FOODS_2_281_TX_3 -4957,FOODS_2_281_WI_1 -4958,FOODS_2_281_WI_2 -4959,FOODS_2_281_WI_3 -4960,FOODS_2_282_CA_1 -4961,FOODS_2_282_CA_2 -4962,FOODS_2_282_CA_3 -4963,FOODS_2_282_CA_4 -4964,FOODS_2_282_TX_1 -4965,FOODS_2_282_TX_2 -4966,FOODS_2_282_TX_3 -4967,FOODS_2_282_WI_1 -4968,FOODS_2_282_WI_2 -4969,FOODS_2_282_WI_3 -4970,FOODS_2_283_CA_1 -4971,FOODS_2_283_CA_2 -4972,FOODS_2_283_CA_3 -4973,FOODS_2_283_CA_4 -4974,FOODS_2_283_TX_1 -4975,FOODS_2_283_TX_2 -4976,FOODS_2_283_TX_3 -4977,FOODS_2_283_WI_1 -4978,FOODS_2_283_WI_2 -4979,FOODS_2_283_WI_3 -4980,FOODS_2_284_CA_1 -4981,FOODS_2_284_CA_2 -4982,FOODS_2_284_CA_3 -4983,FOODS_2_284_CA_4 -4984,FOODS_2_284_TX_1 -4985,FOODS_2_284_TX_2 -4986,FOODS_2_284_TX_3 -4987,FOODS_2_284_WI_1 -4988,FOODS_2_284_WI_2 -4989,FOODS_2_284_WI_3 -4990,FOODS_2_285_CA_1 -4991,FOODS_2_285_CA_2 -4992,FOODS_2_285_CA_3 -4993,FOODS_2_285_CA_4 -4994,FOODS_2_285_TX_1 -4995,FOODS_2_285_TX_2 -4996,FOODS_2_285_TX_3 -4997,FOODS_2_285_WI_1 -4998,FOODS_2_285_WI_2 -4999,FOODS_2_285_WI_3 diff --git a/doc/source/templates/02_many_model_training/many_model_training.ipynb b/doc/source/templates/02_many_model_training/many_model_training.ipynb index a041c8a69b6a..1f9613c0b56c 100644 --- a/doc/source/templates/02_many_model_training/many_model_training.ipynb +++ b/doc/source/templates/02_many_model_training/many_model_training.ipynb @@ -21,79 +21,85 @@ }, { "cell_type": "markdown", - "id": "c56bb4d0", + "id": "08e65f8d", "metadata": {}, "source": [ - "## Installing Dependencies\n", - "\n", - "First, we'll need to install necessary dependencies in the Anyscale Workspace. To do so, first open up a terminal, and follow one of the following install steps, depending on which size template you picked:\n" + "> Slot in your code below wherever you see the ✂️ icon to build a many model training Ray application off of this template!" ] }, { + "attachments": {}, "cell_type": "markdown", - "id": "5b99c151", - "metadata": { - "tags": [ - "small" - ] - }, + "id": "c56bb4d0", + "metadata": {}, "source": [ - "### Install Dependencies (Small-scale Template)\n", + "## Handling Dependencies\n", "\n", - "The small-scale template only runs on a single node (the head node), so we just need to install the requirements *locally*." + "This template requires certain Python packages to be available to every node in the cluster.\n", + "\n", + "> ✂️ Add your own package dependencies! You can specify bounds for package versions\n", + "> in the same format as a `requirements.txt` file.\n" ] }, { "cell_type": "code", "execution_count": null, - "id": "dcd6fc93", + "id": "0c9b3dec", "metadata": { - "tags": [ - "small" - ] + "tags": [] }, "outputs": [], "source": [ - "%pip install -r requirements.txt --upgrade\n" + "requirements = [\n", + " \"statsforecast==1.5.0\",\n", + "]\n" ] }, { + "attachments": {}, "cell_type": "markdown", - "id": "c5ee8e43", - "metadata": { - "tags": [ - "large" - ] - }, + "id": "eff9369f", + "metadata": {}, "source": [ - "### Install Cluster-wide Dependencies (Large-scale Template)\n", + "First, we may want to use these modules right here in our script, which is running on the head node.\n", + "Install the Python packages on the head node using `pip install`.\n", "\n", - "When running in a distributed Ray Cluster, all nodes need to have access to the installed packages.\n", - "For this, we'll use `pip install --user` to install the necessary requirements.\n", - "On an [Anyscale Workspace](https://docs.anyscale.com/user-guide/develop-and-debug/workspaces),\n", - "this will install packages to a *shared filesystem* that will be available to all nodes in the cluster." + "You may need to restart this notebook kernel to access the installed packages.\n" ] }, { "cell_type": "code", "execution_count": null, - "id": "0c9b3dec", - "metadata": { - "tags": [ - "large" - ] - }, + "id": "5cba940c", + "metadata": {}, "outputs": [], "source": [ - "%pip install --user -r requirements.txt --upgrade\n" + "all_requirements = \" \".join(requirements)\n", + "\n", + "%pip install {all_requirements}\n" ] }, { + "attachments": {}, "cell_type": "markdown", - "id": "08e65f8d", + "id": "1dcaea58", "metadata": {}, "source": [ - "> Slot in your code below wherever you see the ✂️ icon to build a many model training Ray application off of this template!" + "Next, we need to make sure all worker nodes also have access to the dependencies.\n", + "For this, use a [Ray Runtime Environment](https://docs.ray.io/en/latest/ray-core/handling-dependencies.html#runtime-environments)\n", + "to dynamically set up dependencies throughout the cluster.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e268225d", + "metadata": {}, + "outputs": [], + "source": [ + "import ray\n", + "\n", + "ray.init(runtime_env={\"pip\": requirements})\n" ] }, { @@ -107,15 +113,8 @@ "from pyarrow import parquet as pq\n", "from sklearn.metrics import mean_squared_error\n", "\n", - "import ray\n", "from ray import tune\n", - "from ray.air import session\n", - "\n", - "try:\n", - " from statsforecast import StatsForecast\n", - " from statsforecast.models import AutoARIMA, AutoETS\n", - "except ImportError as e:\n", - " raise RuntimeError(\"Did you follow the steps above to install dependencies?\") from e\n" + "from ray.air import session\n" ] }, { @@ -128,49 +127,18 @@ "> Note that this template fits two models per data partition and reports the best performing one." ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "a40e91a5", - "metadata": { - "tags": [ - "small" - ] - }, - "outputs": [], - "source": [ - "# Default values for the small-scale template\n", - "NUM_DATA_PARTITIONS: int = 50\n" - ] - }, { "cell_type": "code", "execution_count": null, "id": "5390c232", "metadata": { - "tags": [ - "large" - ] + "tags": [] }, "outputs": [], "source": [ - "# Default values for the large-scale template\n", "NUM_DATA_PARTITIONS: int = 1000\n" ] }, - { - "cell_type": "markdown", - "id": "c260d8f8", - "metadata": {}, - "source": [ - "```{tip}\n", - "If you're running the small-scale version of the template, try setting\n", - "the number of trials to the recommended number of trials for the large-scale version.\n", - "It'll be much slower, but you'll see the dramatic speedup once distributing the load\n", - "to a multi-node Ray cluster in the large-scale version!\n", - "```" - ] - }, { "cell_type": "markdown", "id": "8b2f3d16", @@ -201,7 +169,7 @@ " return df.dropna()\n", "\n", "\n", - "def evaluate_cross_validation(df, metric):\n", + "def evaluate_cross_validation(df: pd.DataFrame, metric) -> pd.DataFrame:\n", " models = df.drop(columns=[\"ds\", \"cutoff\", \"y\"]).columns.tolist()\n", " evals = []\n", " for model in models:\n", @@ -233,15 +201,18 @@ "metadata": {}, "outputs": [], "source": [ - "model_classes = [AutoARIMA, AutoETS]\n", - "n_windows = 1\n", - "\n", - "\n", "def train_fn(config: dict):\n", + " try:\n", + " from statsforecast import StatsForecast\n", + " from statsforecast.models import AutoARIMA, AutoETS\n", + " except ImportError as e:\n", + " raise RuntimeError(\"Did you set a runtime env to install dependencies?\") from e\n", + "\n", " data_partition_id = config[\"data_partition_id\"]\n", " train_df = get_m5_partition(data_partition_id)\n", "\n", - " models = [model_cls() for model_cls in model_classes]\n", + " models = [AutoARIMA(), AutoETS()]\n", + " n_windows = 1\n", " forecast_horizon = 4\n", "\n", " sf = StatsForecast(\n", @@ -265,12 +236,11 @@ "\n", "\n", "trainable = train_fn\n", - "trainable = tune.with_resources(\n", - " trainable, resources={\"CPU\": len(model_classes) * n_windows}\n", - ")\n" + "trainable = tune.with_resources(trainable, resources={\"CPU\": 2 * 1})\n" ] }, { + "attachments": {}, "cell_type": "markdown", "id": "301c7c58", "metadata": {}, @@ -280,6 +250,7 @@ "Feel free to change this to the resources required by your application! You can also comment out the `tune.with_resources` block to assign `1 CPU` (the default) to each trial.\n", "\n", "Note that this is purely for Tune to know how many trials to schedule concurrently -- setting the number of CPUs does not actually enforce any kind of resource isolation!\n", + "In this template, `statsforecast` runs cross validation in parallel with M models * N temporal cross-validation windows (e.g. 2 * 1).\n", "```\n", "\n", "See [Ray Tune's guide on assigning resources](https://docs.ray.io/en/latest/tune/tutorials/tune-resources.html) for more information." @@ -302,7 +273,12 @@ "metadata": {}, "outputs": [], "source": [ - "data_partitions = list(pd.read_csv(\"item_ids.csv\")[\"item_id\"])\n", + "# Download the list of item ids used to partition the dataset.\n", + "data_partitions = list(\n", + " pd.read_csv(\n", + " \"https://air-example-data.s3.us-west-2.amazonaws.com/m5_benchmarks_item_ids.csv\"\n", + " )[\"item_id\"]\n", + ")\n", "if NUM_DATA_PARTITIONS > len(data_partitions):\n", " print(f\"There are only {len(data_partitions)} partitions!\")\n", "\n", @@ -331,11 +307,12 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "ba1a07d0", "metadata": {}, "source": [ - "> ✂️ Replace the metric and mode below with the metric you reported in your training function." + "View the reported results of all trials as a dataframe." ] }, { @@ -345,8 +322,8 @@ "metadata": {}, "outputs": [], "source": [ - "sample_result = result_grid[0]\n", - "sample_result.metrics\n" + "results_df = result_grid.get_dataframe()\n", + "results_df\n" ] } ], diff --git a/doc/source/templates/02_many_model_training/requirements.txt b/doc/source/templates/02_many_model_training/requirements.txt deleted file mode 100644 index f3abf6a44fdf..000000000000 --- a/doc/source/templates/02_many_model_training/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -statsforecast==1.5.0 \ No newline at end of file diff --git a/doc/source/templates/03_serving_stable_diffusion/requirements.txt b/doc/source/templates/03_serving_stable_diffusion/requirements.txt deleted file mode 100644 index 30a36e09b6af..000000000000 --- a/doc/source/templates/03_serving_stable_diffusion/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -accelerate==0.14.0 -diffusers @ git+https://github.com/huggingface/diffusers.git@25f11424f62d8d9bef8a721b806926399a1557f2 -numpy==1.23.4 -Pillow==9.3.0 -scipy==1.9.3 -tensorboard==2.12.0 -torch==1.13.0 -torchvision==0.14.0 -transformers==4.24.0 \ No newline at end of file diff --git a/doc/source/templates/03_serving_stable_diffusion/serving_stable_diffusion.ipynb b/doc/source/templates/03_serving_stable_diffusion/serving_stable_diffusion.ipynb index e1fb2fa66520..e3b7aad59433 100644 --- a/doc/source/templates/03_serving_stable_diffusion/serving_stable_diffusion.ipynb +++ b/doc/source/templates/03_serving_stable_diffusion/serving_stable_diffusion.ipynb @@ -16,75 +16,92 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "25364e8e", "metadata": {}, "source": [ - "## Installing Dependencies\n", + "## Handling Dependencies\n", + "\n", + "This template requires certain Python packages to be available to every node in the cluster.\n", "\n", - "First, we'll need to install necessary dependencies in the Anyscale Workspace. To do so, first open up a terminal, and follow one of the following install steps, depending on which size template you picked:" + "> ✂️ Add your own package dependencies! You can specify bounds for package versions\n", + "> in the same format as a `requirements.txt` file.\n" ] }, { + "cell_type": "code", + "execution_count": null, + "id": "1b79bfb9", + "metadata": {}, + "outputs": [], + "source": [ + "requirements = [\n", + " \"accelerate==0.14.0\",\n", + " \"diffusers==0.15.1\",\n", + " \"numpy>=1.21.6,<=1.23.5\",\n", + " \"Pillow==9.3.0\",\n", + " \"scipy>=1.7.3,<=1.9.3\",\n", + " \"tensorboard>=2.11.2,<=2.12.0\",\n", + " \"torch==1.13.0\",\n", + " \"torchvision==0.14.0\",\n", + " \"transformers==4.28.1\",\n", + "]\n" + ] + }, + { + "attachments": {}, "cell_type": "markdown", - "id": "94ec23af", - "metadata": { - "tags": [ - "small" - ] - }, + "id": "33419c37", + "metadata": {}, "source": [ - "### Install Dependencies (Small-scale Template)\n", + "First, we may want to use these modules right here in our script, which is running on the head node.\n", + "Install the Python packages on the head node using `pip install`.\n", "\n", - "The small-scale template only runs on a single node (the head node), so we just need to install the requirements *locally*." + "```{note}\n", + "You may need to restart this notebook kernel to access the installed packages.\n", + "```\n" ] }, { "cell_type": "code", "execution_count": null, - "id": "5cb0f0d0", - "metadata": { - "tags": [ - "small" - ] - }, + "id": "9aadf0c5", + "metadata": {}, "outputs": [], "source": [ - "%pip install -r requirements.txt --upgrade\n" + "all_requirements = \" \".join(requirements)\n", + "\n", + "%pip install {all_requirements}\n" ] }, { + "attachments": {}, "cell_type": "markdown", - "id": "a45dcc56", - "metadata": { - "tags": [ - "large" - ] - }, + "id": "4ba5feba", + "metadata": {}, "source": [ - "### Install Cluster-wide Dependencies (Large-scale Template)\n", + "Next, we need to make sure all worker nodes also have access to the dependencies.\n", + "For this, use a [Ray Runtime Environment](https://docs.ray.io/en/latest/ray-core/handling-dependencies.html#runtime-environments)\n", + "to dynamically set up dependencies throughout the cluster.\n", "\n", - "When running in a distributed Ray Cluster, all nodes need to have access to the installed packages.\n", - "For this, we'll use `pip install --user` to install the necessary requirements.\n", - "On an [Anyscale Workspace](https://docs.anyscale.com/user-guide/develop-and-debug/workspaces),\n", - "this will install packages to a *shared filesystem* that will be available to all nodes in the cluster." + "```{note}\n", + "This will be used later when setting up the Ray Serve deployment.\n", + "```\n" ] }, { "cell_type": "code", "execution_count": null, - "id": "4c248f05", - "metadata": { - "tags": [ - "large" - ] - }, + "id": "ca638dbb", + "metadata": {}, "outputs": [], "source": [ - "%pip install --user -r requirements.txt --upgrade\n" + "runtime_env = {\"pip\": requirements}\n" ] }, { + "attachments": {}, "cell_type": "markdown", "id": "520ef4d7", "metadata": {}, @@ -94,19 +111,7 @@ "First, we define the Ray Serve application with the model loading and inference logic. This includes setting up:\n", "- The `/imagine` API endpoint that we query to generate the image.\n", "- The stable diffusion model loaded inside a Ray Serve Deployment.\n", - " We'll specify the *number of model replicas* to keep active in our Ray cluster. These model replicas can process incoming requests concurrently.\n", - "\n", - "" + " We'll specify the *number of model replicas* to keep active in our Ray cluster. These model replicas can process incoming requests concurrently.\n" ] }, { @@ -120,19 +125,14 @@ "from fastapi.responses import Response\n", "from io import BytesIO\n", "import matplotlib.pyplot as plt\n", + "import numpy as np\n", "import os\n", "import requests\n", "import time\n", "import uuid\n", "\n", "import ray\n", - "from ray import serve\n", - "\n", - "try:\n", - " import torch\n", - " from diffusers import EulerDiscreteScheduler, StableDiffusionPipeline\n", - "except ImportError as e:\n", - " raise RuntimeError(\"Did you follow the steps above to install dependencies?\") from e\n" + "from ray import serve\n" ] }, { @@ -148,42 +148,51 @@ { "cell_type": "code", "execution_count": null, - "id": "c1bea30b", + "id": "90eca147", "metadata": { - "tags": [ - "small" - ] + "tags": [] }, "outputs": [], "source": [ - "# Default values for the small-scale template\n", - "NUM_REPLICAS: int = 1\n", - "NUM_GPUS_PER_REPLICA: float = 1\n" + "NUM_REPLICAS: int = 4\n", + "NUM_GPUS_PER_REPLICA: float = 1\n", + "\n", + "# Control the output size: (IMAGE_SIZE, IMAGE_SIZE)\n", + "# NOTE: Generated image quality degrades rapidly if you reduce size too much.\n", + "IMAGE_SIZE: int = 776\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "89eb3e2c", + "metadata": {}, + "source": [ + "First, we define the Ray Serve Deployment, which will load a stable diffusion model and perform inference with it.\n" ] }, { "cell_type": "code", "execution_count": null, - "id": "90eca147", - "metadata": { - "tags": [ - "large" - ] - }, + "id": "76a02213", + "metadata": {}, "outputs": [], "source": [ - "# Default values for the large-scale template\n", - "NUM_REPLICAS: int = 4\n", - "NUM_GPUS_PER_REPLICA: float = 1\n" + "# Configure each model replica to:\n", + "# 1. Setup the dependencies listed earlier.\n", + "# 2. Use the specified resources.\n", + "ray_actor_options = {\n", + " \"runtime_env\": runtime_env,\n", + " \"num_gpus\": NUM_GPUS_PER_REPLICA,\n", + "}\n" ] }, { + "attachments": {}, "cell_type": "markdown", - "id": "89eb3e2c", + "id": "880b8593", "metadata": {}, "source": [ - "First, we'll define the Ray Serve Deployment, which will load and perform inference with a stable diffusion model.\n", - "\n", "> ✂️ Modify this block to load your own model, and change the `generate` method to perform your own online inference logic!" ] }, @@ -195,12 +204,20 @@ "outputs": [], "source": [ "@serve.deployment(\n", - " ray_actor_options={\"num_gpus\": NUM_GPUS_PER_REPLICA},\n", + " ray_actor_options=ray_actor_options,\n", " num_replicas=NUM_REPLICAS,\n", ")\n", "class StableDiffusionV2:\n", " def __init__(self):\n", " # \n", + " try:\n", + " import torch\n", + " from diffusers import EulerDiscreteScheduler, StableDiffusionPipeline\n", + " except ImportError as e:\n", + " raise RuntimeError(\n", + " \"Did you set a runtime env to install dependencies?\"\n", + " ) from e\n", + "\n", " model_id = \"stabilityai/stable-diffusion-2\"\n", " scheduler = EulerDiscreteScheduler.from_pretrained(\n", " model_id, subfolder=\"scheduler\"\n", @@ -210,7 +227,7 @@ " )\n", " self.pipe = self.pipe.to(\"cuda\")\n", "\n", - " def generate(self, prompt: str, img_size: int = 512):\n", + " def generate(self, prompt: str, img_size: int = 776):\n", " # \n", " assert len(prompt), \"prompt parameter cannot be empty\"\n", " image = self.pipe(prompt, height=img_size, width=img_size).images[0]\n", @@ -248,7 +265,7 @@ " responses={200: {\"content\": {\"image/png\": {}}}},\n", " response_class=Response,\n", " )\n", - " async def generate(self, prompt: str, img_size: int = 512):\n", + " async def generate(self, prompt: str, img_size: int = 776):\n", " assert len(prompt), \"prompt parameter cannot be empty\"\n", "\n", " image = await (await self.handle.generate.remote(prompt, img_size=img_size))\n", @@ -372,7 +389,7 @@ "\n", "@ray.remote(num_cpus=1)\n", "def generate_image(prompt):\n", - " req = {\"prompt\": prompt, \"img_size\": 776}\n", + " req = {\"prompt\": prompt, \"img_size\": IMAGE_SIZE}\n", " resp = requests.get(endpoint, params=req)\n", " return resp.content\n", "\n", @@ -386,7 +403,7 @@ " plt.show()\n", "\n", "\n", - "def main():\n", + "def main() -> float:\n", " try:\n", " requests.get(endpoint, timeout=0.1)\n", " except Exception as e:\n", @@ -395,6 +412,7 @@ " \"`python server.py --num-replicas=...` in another terminal yet?\"\n", " ) from e\n", "\n", + " generation_times = []\n", " while True:\n", " prompt = (\n", " PROMPT\n", @@ -422,13 +440,15 @@ " filenames.append(filename)\n", "\n", " elapsed = time.time() - start\n", + " generation_times.append(elapsed)\n", " print(\n", " f\"\\nGenerated {len(images)} image(s) in {elapsed:.2f} seconds to \"\n", " f\"the directory: {dirname}\\n\"\n", " )\n", " show_images(filenames)\n", " if not INTERACTIVE:\n", - " break\n" + " break\n", + " return np.mean(generation_times) if generation_times else -1\n" ] }, { @@ -447,7 +467,7 @@ "metadata": {}, "outputs": [], "source": [ - "main()\n", + "mean_generation_time = main()\n", "serve.shutdown()\n" ] }, @@ -461,12 +481,6 @@ "You can modify this template and iterate your model deployment directly on your cluster within your Anyscale Workspace,\n", "testing with the local endpoint." ] - }, - { - "cell_type": "markdown", - "id": "1c96ed20", - "metadata": {}, - "source": [] } ], "metadata": { diff --git a/doc/source/templates/README.md b/doc/source/templates/README.md index da8bab08a6de..12f90b465db8 100644 --- a/doc/source/templates/README.md +++ b/doc/source/templates/README.md @@ -15,80 +15,112 @@ Coming soon... To add a template: -1. Add your template as a directory somewhere in the Ray repo. - All files needed to run the template should be contained within this directory. +1. Add your template as a directory somewhere in `doc/source/templates`. + For example: ```text ray/ doc/source/templates/ / - requirements.txt + README.md .ipynb ``` If your template requires any special dependencies that are not included in a - base `ray-ml` Docker image, be sure to specify a `requirements.txt` file within - the directory. - -2. Add an entry to `doc/source/templates/templates.yaml` that links to your template. + base `ray-ml` Docker image, be sure to list and install the necessary dependencies + within the notebook. See `03_serving_stable_diffusion` for an example. - ```yaml - - name: Many Model Training using Ray Tune - # Paths should be relative to the Ray repo root directory - path: doc/source/templates/02_many_model_training - cluster_env: doc/source/templates/configs/anyscale_cluster_env.yaml - small: - compute_config: - gcp: doc/source/templates/configs/compute/cpu/gcp_small.yaml - aws: doc/source/templates/configs/compute/cpu/aws_small.yaml - large: - compute_config: - # Relative to `path` - gcp: doc/source/templates/configs/compute/cpu/gcp_large.yaml - aws: doc/source/templates/configs/compute/cpu/aws_large.yaml + ```{note} + The template should be self-contained and not require any external files. + This requirement is to simplify the testing procedure. ``` - Make sure that you include a small/large version for the template. - See the following table for a description of template size: +2. Add another copy of the template that includes test-specific code and a smoke-test version if applicable. + + **Note:** The need for a second test copy is temporary. Only one notebook will be needed + from 2.5 onward, since the test-specific code will be filtered out. + + **Label all test-specific code with the `remove-cell` Jupyter notebook tag.** + + **Put this test copy in `doc/source/templates/tests/.ipynb`.** + +3. List the smoke-test version of the template in `doc/BUILD` under the templates section. This will configure the smoke-test version to run in pre-merge CI. + + Set the `SMOKE_TEST` environment variable, which should be used in your template to + **to make the template work for a single CI instance.** + This environment variable can also be used to conditionally set certain smoke test parameters (like limiting dataset size). + + **Make sure that you tag the test with `"gpu"` if required, and any other tags + needed for special dependencies.** - | Attributes | Small-scale Version | Large-scale Version | - | -- | -- | -- | - | Number of Nodes | 1 | > 1 | - | Dataset size | Subset (of partitions/labels/rows) | Full example dataset | - | Model size | Pruned/mini version of the model | Full model | - | Runtime | 30-60s | Up to ~5-10 minutes | + ```python + py_test_run_all_notebooks( + size = "large", + include = ["source/templates/tests/batch_inference.ipynb"], + exclude = [], + data = ["//doc:workspace_templates"], + tags = ["exclusive", "team:ml", "ray_air", "gpu"], + env = {"SMOKE_TEST": "1"}, + ) + ``` - When you specify the template's compute config, see `doc/source/templates/configs` for defaults. +4. Add a release test for the template in `release/release_tests.yaml` (for both AWS and GCE). -3. Add a nightly release test for the template in `release/release_tests.yaml`. + **Use the `release_test_cluster_env.yaml` and `*_release_test.yaml` files for cluster env / compute configs.** + These contain placeholders for regions and cloud ids that our CI infra will fill in. ```yaml - name: workspace_template_small_02_many_model_training group: Workspace templates - working_dir: workspace_templates/02_many_model_training + working_dir: workspace_templates/tests python: "3.9" - frequency: nightly + frequency: nightly-3x team: ml cluster: cluster_env: ../configs/release_test_cluster_env.yaml - cluster_compute: ../configs/compute/cpu/aws_small.yaml + cluster_compute: ../configs/compute/cpu/aws_release_test.yaml + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: ../configs/release_test_cluster_env.yaml + cluster_compute: ../configs/compute/cpu/gce_release_test.yaml run: timeout: 300 - script: pip install -U -r requirements.txt - && jupyter nbconvert --TagRemovePreprocessor.remove_input_tags='large' - --to script --output _test many_model_training.ipynb && ipython _test.py + script: jupyter nbconvert --to script --output _test many_model_training.ipynb && ipython _test.py ``` - Note: `--TagRemovePreprocessor.remove_input_tags='large'` will make sure that only the small-scale - version of the template gets tested nightly. +5. Add an entry to `doc/source/templates/templates.yaml` that links to your template. + + ```yaml + many-model-training-ray-tune: + title: Many Model Training + description: Scaling Many Model Training with Ray Tune + path: doc/source/templates/02_many_model_training + cluster_env: doc/source/templates/configs/anyscale_cluster_env.yaml + compute_config: + GCP: doc/source/templates/configs/compute/cpu/gce.yaml + AWS: doc/source/templates/configs/compute/cpu/aws.yaml + ``` + + **In this example, `many-model-training-ray-tune` is the template ID, which should be unique.** + + **Use the `anyscale_cluster_env.yaml`, `gce.yaml`, and `aws.yaml` files, NOT the release test counterparts.** + + When you specify the template's compute config, see `doc/source/templates/configs` for shared configs. + +6. Run a validation script on `templates.yaml` to make sure that the paths you specified are all valid and all yamls are properly formatted. -4. Run a validation script on `templates.yaml` to make sure that the paths you specified are all valid. + **Note:** This will also run in CI, but you can check quickly by running the validation script. ```bash $ python doc/source/templates/validate.py Success! ``` -5. Success! Your template is ready for review. +7. Success! Your template is ready for review. diff --git a/doc/source/templates/configs/compute/cpu/aws.yaml b/doc/source/templates/configs/compute/cpu/aws.yaml new file mode 100644 index 000000000000..a8b33d551afb --- /dev/null +++ b/doc/source/templates/configs/compute/cpu/aws.yaml @@ -0,0 +1,11 @@ +# 8 m5.2xlarge nodes --> 64 CPUs +head_node_type: + name: head_node_type + instance_type: m5.2xlarge + +worker_node_types: +- name: cpu_worker + instance_type: m5.2xlarge + min_workers: 7 + max_workers: 7 + use_spot: false diff --git a/doc/source/templates/configs/compute/cpu/aws_large.yaml b/doc/source/templates/configs/compute/cpu/aws_release_test.yaml similarity index 84% rename from doc/source/templates/configs/compute/cpu/aws_large.yaml rename to doc/source/templates/configs/compute/cpu/aws_release_test.yaml index c3260c7100fd..28b9115d2755 100644 --- a/doc/source/templates/configs/compute/cpu/aws_large.yaml +++ b/doc/source/templates/configs/compute/cpu/aws_release_test.yaml @@ -1,3 +1,6 @@ +cloud_id: {{ env["ANYSCALE_CLOUD_ID"] }} +region: us-west-2 + # 8 m5.2xlarge nodes --> 64 CPUs head_node_type: name: head_node_type diff --git a/doc/source/templates/configs/compute/cpu/aws_small.yaml b/doc/source/templates/configs/compute/cpu/aws_small.yaml deleted file mode 100644 index 16e628057308..000000000000 --- a/doc/source/templates/configs/compute/cpu/aws_small.yaml +++ /dev/null @@ -1,21 +0,0 @@ -cloud_id: {{ env["ANYSCALE_CLOUD_ID"] }} -region: us-west-2 - -# 1 m5.2xlarge node --> 8 CPUs -head_node_type: - name: head_node_type - instance_type: m5.2xlarge - -worker_node_types: -- name: cpu_worker - instance_type: m5.2xlarge - min_workers: 0 - max_workers: 0 - use_spot: false - -aws: - TagSpecifications: - - ResourceType: "instance" - Tags: - - Key: ttl-hours - Value: '24' \ No newline at end of file diff --git a/doc/source/templates/configs/compute/cpu/gce.yaml b/doc/source/templates/configs/compute/cpu/gce.yaml new file mode 100644 index 000000000000..c31a8a1dadb2 --- /dev/null +++ b/doc/source/templates/configs/compute/cpu/gce.yaml @@ -0,0 +1,11 @@ +# 8 n2-standard-8 nodes --> 64 CPUs +head_node_type: + name: head_node_type + instance_type: n2-standard-8 + +worker_node_types: +- name: cpu_worker + instance_type: n2-standard-8 + min_workers: 7 + max_workers: 7 + use_spot: false diff --git a/doc/source/templates/configs/compute/cpu/gcp_large.yaml b/doc/source/templates/configs/compute/cpu/gce_release_test.yaml similarity index 100% rename from doc/source/templates/configs/compute/cpu/gcp_large.yaml rename to doc/source/templates/configs/compute/cpu/gce_release_test.yaml diff --git a/doc/source/templates/configs/compute/cpu/gcp_small.yaml b/doc/source/templates/configs/compute/cpu/gcp_small.yaml deleted file mode 100644 index b5ad8b29c217..000000000000 --- a/doc/source/templates/configs/compute/cpu/gcp_small.yaml +++ /dev/null @@ -1,17 +0,0 @@ -cloud_id: {{ env["ANYSCALE_CLOUD_ID"] }} - -region: us-west1 -allowed_azs: - - us-west1-b - -# 1 n1-standard-8 node --> 8 CPUs -head_node_type: - name: head_node_type - instance_type: n1-standard-8 - -worker_node_types: -- name: cpu_worker - instance_type: n2-standard-8 - min_workers: 0 - max_workers: 0 - use_spot: false diff --git a/doc/source/templates/configs/compute/gpu/aws.yaml b/doc/source/templates/configs/compute/gpu/aws.yaml new file mode 100644 index 000000000000..101c8b7bbc4b --- /dev/null +++ b/doc/source/templates/configs/compute/gpu/aws.yaml @@ -0,0 +1,11 @@ +# 4 g4dn.4xlarge nodes --> 64 CPUs, 4 GPUs +head_node_type: + name: head_node_type + instance_type: g4dn.4xlarge + +worker_node_types: +- name: gpu_worker + instance_type: g4dn.4xlarge + min_workers: 3 + max_workers: 3 + use_spot: false diff --git a/doc/source/templates/configs/compute/gpu/aws_large.yaml b/doc/source/templates/configs/compute/gpu/aws_release_test.yaml similarity index 85% rename from doc/source/templates/configs/compute/gpu/aws_large.yaml rename to doc/source/templates/configs/compute/gpu/aws_release_test.yaml index 4d72cd8e5e24..501677653dcb 100644 --- a/doc/source/templates/configs/compute/gpu/aws_large.yaml +++ b/doc/source/templates/configs/compute/gpu/aws_release_test.yaml @@ -1,3 +1,6 @@ +cloud_id: {{ env["ANYSCALE_CLOUD_ID"] }} +region: us-west-2 + # 4 g4dn.4xlarge nodes --> 64 CPUs, 4 GPUs head_node_type: name: head_node_type diff --git a/doc/source/templates/configs/compute/gpu/aws_small.yaml b/doc/source/templates/configs/compute/gpu/aws_small.yaml deleted file mode 100644 index 237d186b749b..000000000000 --- a/doc/source/templates/configs/compute/gpu/aws_small.yaml +++ /dev/null @@ -1,21 +0,0 @@ -cloud_id: {{ env["ANYSCALE_CLOUD_ID"] }} -region: us-west-2 - -# 1 g4dn.4xlarge node --> 16 CPUs, 1 GPU -head_node_type: - name: head_node_type - instance_type: g4dn.4xlarge - -worker_node_types: -- name: gpu_worker - instance_type: g4dn.4xlarge - min_workers: 0 - max_workers: 0 - use_spot: false - -aws: - TagSpecifications: - - ResourceType: "instance" - Tags: - - Key: ttl-hours - Value: '24' \ No newline at end of file diff --git a/doc/source/templates/configs/compute/gpu/gcp_small.yaml b/doc/source/templates/configs/compute/gpu/gce.yaml similarity index 51% rename from doc/source/templates/configs/compute/gpu/gcp_small.yaml rename to doc/source/templates/configs/compute/gpu/gce.yaml index b57d71bfe75e..0de76bd875a2 100644 --- a/doc/source/templates/configs/compute/gpu/gcp_small.yaml +++ b/doc/source/templates/configs/compute/gpu/gce.yaml @@ -1,11 +1,4 @@ -cloud_id: {{ env["ANYSCALE_CLOUD_ID"] }} - -region: us-west1 -allowed_azs: - - us-west1-b - - -# 1 n1-standard-16-nvidia-tesla-t4-1 node --> 16 CPUs, 1 GPU +# 4 n1-standard-16-nvidia-tesla-t4-1 nodes --> 64 CPUs, 4 GPUs head_node_type: name: head_node_type instance_type: n1-standard-16-nvidia-tesla-t4-1 @@ -13,6 +6,6 @@ head_node_type: worker_node_types: - name: gpu_worker instance_type: n1-standard-16-nvidia-tesla-t4-1 - min_workers: 0 - max_workers: 0 + min_workers: 3 + max_workers: 3 use_spot: false diff --git a/doc/source/templates/configs/compute/gpu/gcp_large.yaml b/doc/source/templates/configs/compute/gpu/gce_release_test.yaml similarity index 100% rename from doc/source/templates/configs/compute/gpu/gcp_large.yaml rename to doc/source/templates/configs/compute/gpu/gce_release_test.yaml diff --git a/doc/source/templates/templates.yaml b/doc/source/templates/templates.yaml index 71e39dba785d..f9a7429ed494 100644 --- a/doc/source/templates/templates.yaml +++ b/doc/source/templates/templates.yaml @@ -5,21 +5,21 @@ batch-inference-ray-data: path: doc/source/templates/01_batch_inference cluster_env: doc/source/templates/configs/anyscale_cluster_env.yaml compute_config: - GCP: doc/source/templates/configs/compute/gpu/gcp_large.yaml - AWS: doc/source/templates/configs/compute/gpu/aws_large.yaml + GCP: doc/source/templates/configs/compute/gpu/gce.yaml + AWS: doc/source/templates/configs/compute/gpu/aws.yaml many-model-training-ray-tune: title: Many Model Training description: Scaling Many Model Training with Ray Tune path: doc/source/templates/02_many_model_training cluster_env: doc/source/templates/configs/anyscale_cluster_env.yaml compute_config: - GCP: doc/source/templates/configs/compute/cpu/gcp_large.yaml - AWS: doc/source/templates/configs/compute/cpu/aws_large.yaml + GCP: doc/source/templates/configs/compute/cpu/gce.yaml + AWS: doc/source/templates/configs/compute/cpu/aws.yaml serve-stable-diffusion-model-ray-serve: title: Serving Stable Diffusion description: Serving a Stable Diffusion Model with Ray Serve path: doc/source/templates/03_serving_stable_diffusion cluster_env: doc/source/templates/configs/anyscale_cluster_env.yaml compute_config: - GCP: doc/source/templates/configs/compute/cpu/gcp_large.yaml - AWS: doc/source/templates/configs/compute/cpu/aws_large.yaml + GCP: doc/source/templates/configs/compute/gpu/gce.yaml + AWS: doc/source/templates/configs/compute/gpu/aws.yaml \ No newline at end of file diff --git a/doc/source/templates/tests/batch_inference.ipynb b/doc/source/templates/tests/batch_inference.ipynb new file mode 100644 index 000000000000..6fd5bf32bc56 --- /dev/null +++ b/doc/source/templates/tests/batch_inference.ipynb @@ -0,0 +1,371 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "cfababd6", + "metadata": { + "tags": [ + "test" + ] + }, + "outputs": [], + "source": [ + "# ==== Code for testing purposes to exclude in user-facing template. ====\n", + "\n", + "import os\n", + "import time\n", + "\n", + "SMOKE_TEST = True if os.environ.get(\"SMOKE_TEST\", \"0\") == \"1\" else False\n", + "\n", + "start_time = time.monotonic()\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "6fbc3e3c", + "metadata": {}, + "source": [ + "# Scaling Batch Inference with Ray Data\n", + "\n", + "This template is a quickstart to using [Ray Data](https://docs.ray.io/en/latest/data/dataset.html) for batch inference. Ray Data is one of many libraries under the [Ray AI Runtime](https://docs.ray.io/en/latest/ray-air/getting-started.html). See [this blog post](https://www.anyscale.com/blog/model-batch-inference-in-ray-actors-actorpool-and-datasets) for more information on why and how you should perform batch inference with Ray!\n", + "\n", + "This template walks through GPU batch prediction on an image dataset using a PyTorch model, but the framework and data format are there just to help you build your own application!\n", + "\n", + "At a high level, this template will:\n", + "1. [Load your dataset using Ray Data.](https://docs.ray.io/en/latest/data/creating-datasets.html)\n", + "2. [Preprocess your dataset before feeding it to your model.](https://docs.ray.io/en/latest/data/transforming-datasets.html)\n", + "3. [Initialize your model and perform inference on a shard of your dataset with a remote actor.](https://docs.ray.io/en/latest/data/transforming-datasets.html#callable-class-udfs)\n", + "4. [Save your prediction results.](https://docs.ray.io/en/latest/data/api/input_output.html)\n", + "\n", + "> Slot in your code below wherever you see the ✂️ icon to build a many model training Ray application off of this template!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "065e7765", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import numpy as np\n", + "import tempfile\n", + "from typing import Dict\n", + "\n", + "import ray\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "c99f142a", + "metadata": {}, + "source": [ + ">✂️ Play around with these values!\n", + ">\n", + ">For example, for a cluster with 4 GPU nodes, you may want 4 workers, each using 1 GPU.\n", + ">Be sure to stay within the resource constraints of your Ray Cluster if autoscaling is not enabled.\n", + ">You can check the available resources in your Ray Cluster with: `ray status`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "770bbdc7", + "metadata": {}, + "outputs": [], + "source": [ + "!ray status" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9d49681f-baf0-4ed8-9740-5c4e38744311", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "NUM_WORKERS: int = 4\n", + "NUM_GPUS_PER_WORKER: float = 1\n" + ] + }, + { + "cell_type": "markdown", + "id": "23321ba8", + "metadata": {}, + "source": [ + "```{tip}\n", + "Try setting `NUM_GPUS_PER_WORKER` to a fractional amount! This will leverage Ray's fractional resource allocation, which means you can schedule multiple batch inference workers to happen on the same GPU.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "3b6f2352", + "metadata": {}, + "source": [ + "> ✂️ Replace this function with logic to load your own data with Ray Data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "615f4a78", + "metadata": {}, + "outputs": [], + "source": [ + "def load_ray_dataset() -> ray.data.Dataset:\n", + " from ray.data.datasource.partitioning import Partitioning\n", + "\n", + " s3_uri = \"s3://anonymous@air-example-data-2/imagenette2/val/\"\n", + " partitioning = Partitioning(\"dir\", field_names=[\"class\"], base_dir=s3_uri)\n", + " ds = ray.data.read_images(\n", + " s3_uri, size=(256, 256), partitioning=partitioning, mode=\"RGB\"\n", + " )\n", + " return ds\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "966bcfdc", + "metadata": {}, + "outputs": [], + "source": [ + "ds = load_ray_dataset()\n", + "ds.schema()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "965db5e8", + "metadata": { + "tags": [ + "test" + ] + }, + "outputs": [], + "source": [ + "if SMOKE_TEST:\n", + " ds = ds.limit(12)\n" + ] + }, + { + "cell_type": "markdown", + "id": "39d01e3c", + "metadata": {}, + "source": [ + "> ✂️ Replace this function with your own data preprocessing logic." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "652121bd", + "metadata": {}, + "outputs": [], + "source": [ + "def preprocess(batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:\n", + " from torchvision import transforms\n", + "\n", + " def to_tensor(batch: np.ndarray) -> torch.Tensor:\n", + " tensor = torch.as_tensor(batch, dtype=torch.float)\n", + " # (B, H, W, C) -> (B, C, H, W)\n", + " tensor = tensor.permute(0, 3, 1, 2).contiguous()\n", + " # [0., 255.] -> [0., 1.]\n", + " tensor = tensor.div(255)\n", + " return tensor\n", + "\n", + " transform = transforms.Compose(\n", + " [\n", + " transforms.Lambda(to_tensor),\n", + " transforms.CenterCrop(224),\n", + " transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n", + " ]\n", + " )\n", + " return {\"image\": transform(batch[\"image\"]).numpy()}\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c35f5a17", + "metadata": {}, + "outputs": [], + "source": [ + "ds = ds.map_batches(preprocess, batch_format=\"numpy\")\n", + "ds.schema()\n" + ] + }, + { + "cell_type": "markdown", + "id": "ad059e54", + "metadata": {}, + "source": [ + "> ✂️ Replace parts of this Callable class with your own model initialization and inference logic." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "42cac828", + "metadata": {}, + "outputs": [], + "source": [ + "class PredictCallable:\n", + " def __init__(self):\n", + " # \n", + " from torchvision import models\n", + "\n", + " self.model = models.resnet152(pretrained=True)\n", + " self.model.eval()\n", + " self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", + " self.model.to(self.device)\n", + "\n", + " def __call__(self, batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:\n", + " # \n", + " input_data = torch.as_tensor(batch[\"image\"], device=self.device)\n", + " with torch.no_grad():\n", + " result = self.model(input_data)\n", + " return {\"predictions\": result.cpu().numpy()}\n" + ] + }, + { + "cell_type": "markdown", + "id": "fda0c298", + "metadata": {}, + "source": [ + "Now, perform batch prediction using Ray Data! Ray Data will perform model inference using `NUM_WORKERS` copies of the `PredictCallable` class you defined." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "331e21e4", + "metadata": {}, + "outputs": [], + "source": [ + "predictions = ds.map_batches(\n", + " PredictCallable,\n", + " batch_size=128,\n", + " compute=ray.data.ActorPoolStrategy(\n", + " # Fix the number of batch inference workers to a specified value.\n", + " size=NUM_WORKERS,\n", + " ),\n", + " num_gpus=NUM_GPUS_PER_WORKER,\n", + " batch_format=\"numpy\",\n", + ")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23e77ada", + "metadata": {}, + "outputs": [], + "source": [ + "preds = predictions.materialize()\n", + "preds.schema()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8d606556", + "metadata": {}, + "outputs": [], + "source": [ + "preds.take(1)\n" + ] + }, + { + "cell_type": "markdown", + "id": "ceddd984", + "metadata": {}, + "source": [ + "```{tip}\n", + "Play around with the `min_size` and `max_size` parameters to enable autoscaling!\n", + "For example, try commenting out `max_size`: this will autoscale up to an infinite number of workers, if you have free resources in the cluster.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "90ec67e8", + "metadata": {}, + "source": [ + "Shard the predictions into a few partitions, and save each partition to a file!\n", + "\n", + "```{note}\n", + "This currently saves to the local filesystem under `/tmp/predictions`, but you could also save to a cloud bucket (e.g., `s3://predictions-bucket`).\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c1887e34", + "metadata": {}, + "outputs": [], + "source": [ + "num_shards = 3\n", + "\n", + "with tempfile.TemporaryDirectory() as temp_dir:\n", + " predictions.repartition(num_shards).write_parquet(f\"local://{temp_dir}\")\n", + " print(f\"Predictions saved to `{temp_dir}`!\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1e88a268", + "metadata": { + "tags": [ + "test" + ] + }, + "outputs": [], + "source": [ + "import json\n", + "import os\n", + "\n", + "release_test_out = os.environ.get(\"TEST_OUTPUT_JSON\", \"/tmp/release_test_out.json\")\n", + "\n", + "elapsed = time.monotonic() - start_time\n", + "with open(release_test_out, \"wt\") as f:\n", + " json.dump({\"total_runtime\": elapsed}, f)\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + }, + "vscode": { + "interpreter": { + "hash": "265d195fda5292fe8f69c6e37c435a5634a1ed3b6799724e66a975f68fa21517" + } + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/source/templates/tests/many_model_training.ipynb b/doc/source/templates/tests/many_model_training.ipynb new file mode 100644 index 000000000000..f6082a27826b --- /dev/null +++ b/doc/source/templates/tests/many_model_training.ipynb @@ -0,0 +1,413 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "46369bd2", + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "outputs": [], + "source": [ + "# ==== Code for testing purposes to exclude in user-facing template. ====\n", + "\n", + "import os\n", + "\n", + "SMOKE_TEST = True if os.environ.get(\"SMOKE_TEST\", \"0\") == \"1\" else False\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "98e0d4f3", + "metadata": {}, + "source": [ + "# Scaling Many Model Training with Ray Tune\n", + "\n", + "This template is a quickstart to using [Ray Tune](https://docs.ray.io/en/latest/tune/index.html) for batch inference. Ray Tune is one of many libraries under the [Ray AI Runtime](https://docs.ray.io/en/latest/ray-air/getting-started.html). See [this blog post](https://www.anyscale.com/blog/training-one-million-machine-learning-models-in-record-time-with-ray) for more information on the benefits of performing many model training with Ray!\n", + "\n", + "This template walks through time-series forecasting using `statsforecast`, but the framework and data format can be swapped out easily -- they are there just to help you build your own application!\n", + "\n", + "At a high level, this template will:\n", + "\n", + "1. [Define the training function for a single partition of data.](https://docs.ray.io/en/latest/tune/tutorials/tune-run.html)\n", + "2. [Define a Tune search space to run training over many partitions of data.](https://docs.ray.io/en/latest/tune/tutorials/tune-search-spaces.html)\n", + "3. [Extract the best model per dataset partition from the Tune experiment output.](https://docs.ray.io/en/latest/tune/examples/tune_analyze_results.html)" + ] + }, + { + "cell_type": "markdown", + "id": "08e65f8d", + "metadata": {}, + "source": [ + "> Slot in your code below wherever you see the ✂️ icon to build a many model training Ray application off of this template!" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "c56bb4d0", + "metadata": {}, + "source": [ + "## Handling Dependencies\n", + "\n", + "This template requires certain Python packages to be available to every node in the cluster.\n", + "\n", + "> ✂️ Add your own package dependencies! You can specify bounds for package versions\n", + "> in the same format as a `requirements.txt` file.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0c9b3dec", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "requirements = [\n", + " \"statsforecast==1.5.0\",\n", + "]\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "eff9369f", + "metadata": {}, + "source": [ + "First, we may want to use these modules right here in our script, which is running on the head node.\n", + "Install the Python packages on the head node using `pip install`.\n", + "\n", + "You may need to restart this notebook kernel to access the installed packages.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5cba940c", + "metadata": {}, + "outputs": [], + "source": [ + "all_requirements = \" \".join(requirements)\n", + "\n", + "%pip install {all_requirements}\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "1dcaea58", + "metadata": {}, + "source": [ + "Next, we need to make sure all worker nodes also have access to the dependencies.\n", + "For this, use a [Ray Runtime Environment](https://docs.ray.io/en/latest/ray-core/handling-dependencies.html#runtime-environments)\n", + "to dynamically set up dependencies throughout the cluster.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e268225d", + "metadata": {}, + "outputs": [], + "source": [ + "import ray\n", + "\n", + "ray.init(runtime_env={\"pip\": requirements})\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "389adc20", + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "from pyarrow import parquet as pq\n", + "from sklearn.metrics import mean_squared_error\n", + "\n", + "from ray import tune\n", + "from ray.air import session\n" + ] + }, + { + "cell_type": "markdown", + "id": "b8fc83d0", + "metadata": {}, + "source": [ + "> ✂️ Replace this value to change the number of data partitions you will use. This will be total the number of Tune trials you will run!\n", + ">\n", + "> Note that this template fits two models per data partition and reports the best performing one." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5390c232", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "NUM_DATA_PARTITIONS: int = 1000\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c6fb7a2d", + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "outputs": [], + "source": [ + "if SMOKE_TEST:\n", + " NUM_DATA_PARTITIONS: int = 10\n", + "\n", + "import time\n", + "\n", + "start_time = time.monotonic()\n" + ] + }, + { + "cell_type": "markdown", + "id": "8b2f3d16", + "metadata": {}, + "source": [ + "> ✂️ Replace the following with your own data-loading and evaluation helper functions. (Or, just delete these!)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "68b14061", + "metadata": {}, + "outputs": [], + "source": [ + "def get_m5_partition(unique_id: str) -> pd.DataFrame:\n", + " df = (\n", + " pq.read_table(\n", + " \"s3://anonymous@m5-benchmarks/data/train/target.parquet\",\n", + " columns=[\"item_id\", \"timestamp\", \"demand\"],\n", + " filters=[(\"item_id\", \"=\", unique_id)],\n", + " )\n", + " .to_pandas()\n", + " .rename(columns={\"item_id\": \"unique_id\", \"timestamp\": \"ds\", \"demand\": \"y\"})\n", + " )\n", + " df[\"unique_id\"] = df[\"unique_id\"].astype(str)\n", + " df[\"ds\"] = pd.to_datetime(df[\"ds\"])\n", + " return df.dropna()\n", + "\n", + "\n", + "def evaluate_cross_validation(df: pd.DataFrame, metric) -> pd.DataFrame:\n", + " models = df.drop(columns=[\"ds\", \"cutoff\", \"y\"]).columns.tolist()\n", + " evals = []\n", + " for model in models:\n", + " eval_ = (\n", + " df.groupby([\"unique_id\", \"cutoff\"])\n", + " .apply(lambda x: metric(x[\"y\"].values, x[model].values))\n", + " .to_frame()\n", + " )\n", + " eval_.columns = [model]\n", + " evals.append(eval_)\n", + " evals = pd.concat(evals, axis=1)\n", + " evals = evals.groupby([\"unique_id\"]).mean(numeric_only=True)\n", + " evals[\"best_model\"] = evals.idxmin(axis=1)\n", + " return evals\n" + ] + }, + { + "cell_type": "markdown", + "id": "060ee3ce", + "metadata": {}, + "source": [ + "> ✂️ Replace this with your own training logic." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "faaa0dad", + "metadata": {}, + "outputs": [], + "source": [ + "def train_fn(config: dict):\n", + " try:\n", + " from statsforecast import StatsForecast\n", + " from statsforecast.models import AutoARIMA, AutoETS\n", + " except ImportError as e:\n", + " raise RuntimeError(\"Did you set a runtime env to install dependencies?\") from e\n", + "\n", + " data_partition_id = config[\"data_partition_id\"]\n", + " train_df = get_m5_partition(data_partition_id)\n", + "\n", + " models = [AutoARIMA(), AutoETS()]\n", + " n_windows = 1\n", + " forecast_horizon = 4\n", + "\n", + " sf = StatsForecast(\n", + " df=train_df,\n", + " models=models,\n", + " freq=\"D\",\n", + " n_jobs=n_windows * len(models),\n", + " )\n", + " cv_df = sf.cross_validation(\n", + " h=forecast_horizon,\n", + " step_size=forecast_horizon,\n", + " n_windows=n_windows,\n", + " )\n", + "\n", + " eval_df = evaluate_cross_validation(df=cv_df, metric=mean_squared_error)\n", + " best_model = eval_df[\"best_model\"][data_partition_id]\n", + " forecast_mse = eval_df[best_model][data_partition_id]\n", + "\n", + " # Report the best-performing model and its corresponding eval metric.\n", + " session.report({\"forecast_mse\": forecast_mse, \"best_model\": best_model})\n", + "\n", + "\n", + "trainable = train_fn\n", + "trainable = tune.with_resources(trainable, resources={\"CPU\": 2 * 1})\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "301c7c58", + "metadata": {}, + "source": [ + "```{note}\n", + "`tune.with_resources` is used at the end to specify the number of resources to assign *each trial*.\n", + "Feel free to change this to the resources required by your application! You can also comment out the `tune.with_resources` block to assign `1 CPU` (the default) to each trial.\n", + "\n", + "Note that this is purely for Tune to know how many trials to schedule concurrently -- setting the number of CPUs does not actually enforce any kind of resource isolation!\n", + "In this template, `statsforecast` runs cross validation in parallel with M models * N temporal cross-validation windows (e.g. 2 * 1).\n", + "```\n", + "\n", + "See [Ray Tune's guide on assigning resources](https://docs.ray.io/en/latest/tune/tutorials/tune-resources.html) for more information." + ] + }, + { + "cell_type": "markdown", + "id": "89741e7a", + "metadata": {}, + "source": [ + "> ✂️ Replace this with your desired hyperparameter search space!\n", + ">\n", + "> For example, this template searches over the data partition ID to train a model on." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1e9f2825", + "metadata": {}, + "outputs": [], + "source": [ + "# Download the list of item ids used to partition the dataset.\n", + "data_partitions = list(\n", + " pd.read_csv(\n", + " \"https://air-example-data.s3.us-west-2.amazonaws.com/m5_benchmarks_item_ids.csv\"\n", + " )[\"item_id\"]\n", + ")\n", + "if NUM_DATA_PARTITIONS > len(data_partitions):\n", + " print(f\"There are only {len(data_partitions)} partitions!\")\n", + "\n", + "param_space = {\n", + " \"data_partition_id\": tune.grid_search(data_partitions[:NUM_DATA_PARTITIONS]),\n", + "}\n" + ] + }, + { + "cell_type": "markdown", + "id": "13b4dd3e", + "metadata": {}, + "source": [ + "Run many model training using Ray Tune!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b1ef8245", + "metadata": {}, + "outputs": [], + "source": [ + "tuner = tune.Tuner(trainable, param_space=param_space)\n", + "result_grid = tuner.fit()\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "ba1a07d0", + "metadata": {}, + "source": [ + "View the reported results of all trials as a dataframe." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d7baa29a", + "metadata": {}, + "outputs": [], + "source": [ + "results_df = result_grid.get_dataframe()\n", + "results_df\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7a66e5cc", + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "outputs": [], + "source": [ + "import json\n", + "\n", + "release_test_out = os.environ.get(\"TEST_OUTPUT_JSON\", \"/tmp/release_test_out.json\")\n", + "\n", + "elapsed = time.monotonic() - start_time\n", + "with open(release_test_out, \"wt\") as f:\n", + " json.dump({\"total_time\": elapsed}, f)\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + }, + "vscode": { + "interpreter": { + "hash": "265d195fda5292fe8f69c6e37c435a5634a1ed3b6799724e66a975f68fa21517" + } + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/source/templates/tests/serving_stable_diffusion.ipynb b/doc/source/templates/tests/serving_stable_diffusion.ipynb new file mode 100644 index 000000000000..086b29335d79 --- /dev/null +++ b/doc/source/templates/tests/serving_stable_diffusion.ipynb @@ -0,0 +1,567 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "d3939eef", + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "outputs": [], + "source": [ + "# ==== Code for testing purposes to exclude in user-facing template. ====\n", + "\n", + "import os\n", + "\n", + "SMOKE_TEST = True if os.environ.get(\"SMOKE_TEST\", \"0\") == \"1\" else False\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "597c13c0", + "metadata": {}, + "source": [ + "# Serving a Stable Diffusion Model with Ray Serve\n", + "\n", + "This guide is a quickstart to use [Ray Serve](https://docs.ray.io/en/latest/serve/index.html) for model serving. Ray Serve is one of many libraries under the [Ray AI Runtime](https://docs.ray.io/en/latest/ray-air/getting-started.html).\n", + "\n", + "This template loads a pretrained stable diffusion model from HuggingFace and serves it to a local endpoint as a Ray Serve deployment. \n", + "\n", + "> Slot in your code below wherever you see the ✂️ icon to build a model serving Ray application off of this template!" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "25364e8e", + "metadata": {}, + "source": [ + "## Handling Dependencies\n", + "\n", + "This template requires certain Python packages to be available to every node in the cluster.\n", + "\n", + "> ✂️ Add your own package dependencies! You can specify bounds for package versions\n", + "> in the same format as a `requirements.txt` file.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1b79bfb9", + "metadata": {}, + "outputs": [], + "source": [ + "requirements = [\n", + " \"accelerate==0.14.0\",\n", + " \"diffusers==0.15.1\",\n", + " \"numpy>=1.21.6,<=1.23.5\",\n", + " \"Pillow==9.3.0\",\n", + " \"scipy>=1.7.3,<=1.9.3\",\n", + " \"tensorboard>=2.11.2,<=2.12.0\",\n", + " \"torch==1.13.0\",\n", + " \"torchvision==0.14.0\",\n", + " \"transformers==4.28.1\",\n", + "]\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "33419c37", + "metadata": {}, + "source": [ + "First, we may want to use these modules right here in our script, which is running on the head node.\n", + "Install the Python packages on the head node using `pip install`.\n", + "\n", + "```{note}\n", + "You may need to restart this notebook kernel to access the installed packages.\n", + "```\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9aadf0c5", + "metadata": {}, + "outputs": [], + "source": [ + "all_requirements = \" \".join(requirements)\n", + "\n", + "%pip install {all_requirements}\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "4ba5feba", + "metadata": {}, + "source": [ + "Next, we need to make sure all worker nodes also have access to the dependencies.\n", + "For this, use a [Ray Runtime Environment](https://docs.ray.io/en/latest/ray-core/handling-dependencies.html#runtime-environments)\n", + "to dynamically set up dependencies throughout the cluster.\n", + "\n", + "```{note}\n", + "This will be used later when setting up the Ray Serve deployment.\n", + "```\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ca638dbb", + "metadata": {}, + "outputs": [], + "source": [ + "runtime_env = {\"pip\": requirements}\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "520ef4d7", + "metadata": {}, + "source": [ + "## Deploy the Ray Serve application locally\n", + "\n", + "First, we define the Ray Serve application with the model loading and inference logic. This includes setting up:\n", + "- The `/imagine` API endpoint that we query to generate the image.\n", + "- The stable diffusion model loaded inside a Ray Serve Deployment.\n", + " We'll specify the *number of model replicas* to keep active in our Ray cluster. These model replicas can process incoming requests concurrently.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "72ee2132", + "metadata": {}, + "outputs": [], + "source": [ + "from fastapi import FastAPI\n", + "from fastapi.responses import Response\n", + "from io import BytesIO\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import os\n", + "import requests\n", + "import time\n", + "import uuid\n", + "\n", + "import ray\n", + "from ray import serve\n" + ] + }, + { + "cell_type": "markdown", + "id": "de6318ac", + "metadata": {}, + "source": [ + "> ✂️ Replace these values to change the number of model replicas to serve, as well as the GPU resources required by each replica.\n", + ">\n", + "> With more model replicas, more images can be generated in parallel!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "90eca147", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "NUM_REPLICAS: int = 4\n", + "NUM_GPUS_PER_REPLICA: float = 1\n", + "\n", + "# Control the output size: (IMAGE_SIZE, IMAGE_SIZE)\n", + "# NOTE: Generated image quality degrades rapidly if you reduce size too much.\n", + "IMAGE_SIZE: int = 776\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "40a719f6", + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "outputs": [], + "source": [ + "if SMOKE_TEST:\n", + " NUM_REPLICAS: int = 1\n", + " NUM_GPUS_PER_REPLICA: float = 1\n", + " IMAGE_SIZE: int = 256\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "89eb3e2c", + "metadata": {}, + "source": [ + "First, we define the Ray Serve Deployment, which will load a stable diffusion model and perform inference with it.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "76a02213", + "metadata": {}, + "outputs": [], + "source": [ + "# Configure each model replica to:\n", + "# 1. Setup the dependencies listed earlier.\n", + "# 2. Use the specified resources.\n", + "ray_actor_options = {\n", + " \"runtime_env\": runtime_env,\n", + " \"num_gpus\": NUM_GPUS_PER_REPLICA,\n", + "}\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "880b8593", + "metadata": {}, + "source": [ + "> ✂️ Modify this block to load your own model, and change the `generate` method to perform your own online inference logic!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f203efd4", + "metadata": {}, + "outputs": [], + "source": [ + "@serve.deployment(\n", + " ray_actor_options=ray_actor_options,\n", + " num_replicas=NUM_REPLICAS,\n", + ")\n", + "class StableDiffusionV2:\n", + " def __init__(self):\n", + " # \n", + " try:\n", + " import torch\n", + " from diffusers import EulerDiscreteScheduler, StableDiffusionPipeline\n", + " except ImportError as e:\n", + " raise RuntimeError(\n", + " \"Did you set a runtime env to install dependencies?\"\n", + " ) from e\n", + "\n", + " model_id = \"stabilityai/stable-diffusion-2\"\n", + " scheduler = EulerDiscreteScheduler.from_pretrained(\n", + " model_id, subfolder=\"scheduler\"\n", + " )\n", + " self.pipe = StableDiffusionPipeline.from_pretrained(\n", + " model_id, scheduler=scheduler, revision=\"fp16\", torch_dtype=torch.float16\n", + " )\n", + " self.pipe = self.pipe.to(\"cuda\")\n", + "\n", + " def generate(self, prompt: str, img_size: int = 776):\n", + " # \n", + " assert len(prompt), \"prompt parameter cannot be empty\"\n", + " image = self.pipe(prompt, height=img_size, width=img_size).images[0]\n", + " return image\n" + ] + }, + { + "cell_type": "markdown", + "id": "0134aa54", + "metadata": {}, + "source": [ + "Next, we'll define the actual API endpoint to live at `/imagine`.\n", + "\n", + "> ✂️ Modify this block to change the endpoint URL, response schema, and add any post-processing logic needed from your model output!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6f80fee2", + "metadata": {}, + "outputs": [], + "source": [ + "app = FastAPI()\n", + "\n", + "\n", + "@serve.deployment(num_replicas=1, route_prefix=\"/\")\n", + "@serve.ingress(app)\n", + "class APIIngress:\n", + " def __init__(self, diffusion_model_handle) -> None:\n", + " self.handle = diffusion_model_handle\n", + "\n", + " @app.get(\n", + " \"/imagine\",\n", + " responses={200: {\"content\": {\"image/png\": {}}}},\n", + " response_class=Response,\n", + " )\n", + " async def generate(self, prompt: str, img_size: int = 776):\n", + " assert len(prompt), \"prompt parameter cannot be empty\"\n", + "\n", + " image = await (await self.handle.generate.remote(prompt, img_size=img_size))\n", + "\n", + " file_stream = BytesIO()\n", + " image.save(file_stream, \"PNG\")\n", + " return Response(content=file_stream.getvalue(), media_type=\"image/png\")\n" + ] + }, + { + "cell_type": "markdown", + "id": "61b8916d", + "metadata": {}, + "source": [ + "Now, we deploy the Ray Serve application locally at `http://localhost:8000`!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dfc2e244", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "entrypoint = APIIngress.bind(StableDiffusionV2.bind())\n", + "port = 8000\n", + "\n", + "# Shutdown any existing Serve replicas, if they're still around.\n", + "serve.shutdown()\n", + "serve.run(entrypoint, port=port, name=\"serving_stable_diffusion_template\")\n", + "print(\"Done setting up replicas! Now accepting requests...\")\n" + ] + }, + { + "cell_type": "markdown", + "id": "757678cc", + "metadata": {}, + "source": [ + "## Make requests to the endpoint\n", + "\n", + "Next, we'll build a simple client to submit prompts as HTTP requests to the local endpoint at `http://localhost:8000/imagine`." + ] + }, + { + "cell_type": "markdown", + "id": "3e29193b", + "metadata": {}, + "source": [ + "> ✂️ Replace this value to change the number of images to generate per prompt.\n", + ">\n", + "> Each image will be generated starting from a different set of random noise,\n", + "> so you'll be able to see multiple options per prompt!\n", + ">\n", + "> Try starting with `NUM_IMAGES_PER_PROMPT` equal to `NUM_REPLICAS` from earlier." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6aac28e1", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "NUM_IMAGES_PER_PROMPT: int = NUM_REPLICAS\n" + ] + }, + { + "cell_type": "markdown", + "id": "6b466230", + "metadata": {}, + "source": [ + "> ✂️ You can choose to run this interactively, or submit a single `PROMPT`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dd20a52d", + "metadata": {}, + "outputs": [], + "source": [ + "INTERACTIVE: bool = False\n", + "PROMPT = \"twin peaks sf in basquiat painting style\"\n" + ] + }, + { + "cell_type": "markdown", + "id": "008976b5", + "metadata": {}, + "source": [ + "Start the client script in the next few cells, and generate your first image! For example:\n", + "\n", + "If running interactively, this will look like:\n", + "\n", + "```\n", + "Enter a prompt (or 'q' to quit): twin peaks sf in basquiat painting style\n", + "\n", + "Generating image(s)...\n", + "(Take a look at the terminal serving the endpoint for more logs!)\n", + "\n", + "\n", + "Generated 1 image(s) in 69.89 seconds to the directory: 58b298d9\n", + "```\n", + "\n", + "![Example output](https://user-images.githubusercontent.com/3887863/221063452-3c5e5f6b-fc8c-410f-ad5c-202441cceb51.png)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "67ad095b", + "metadata": {}, + "outputs": [], + "source": [ + "endpoint = f\"http://localhost:{port}/imagine\"\n", + "\n", + "\n", + "@ray.remote(num_cpus=1)\n", + "def generate_image(prompt):\n", + " req = {\"prompt\": prompt, \"img_size\": IMAGE_SIZE}\n", + " resp = requests.get(endpoint, params=req)\n", + " return resp.content\n", + "\n", + "\n", + "def show_images(filenames):\n", + " fig, axs = plt.subplots(1, len(filenames), figsize=(4 * len(filenames), 4))\n", + " for i, filename in enumerate(filenames):\n", + " ax = axs if len(filenames) == 1 else axs[i]\n", + " ax.imshow(plt.imread(filename))\n", + " ax.axis(\"off\")\n", + " plt.show()\n", + "\n", + "\n", + "def main() -> float:\n", + " try:\n", + " requests.get(endpoint, timeout=0.1)\n", + " except Exception as e:\n", + " raise RuntimeWarning(\n", + " \"Did you setup the Ray Serve model replicas with \"\n", + " \"`python server.py --num-replicas=...` in another terminal yet?\"\n", + " ) from e\n", + "\n", + " generation_times = []\n", + " while True:\n", + " prompt = (\n", + " PROMPT\n", + " if not INTERACTIVE\n", + " else input(f\"\\nEnter a prompt (or 'q' to quit): \")\n", + " )\n", + " if prompt.lower() == \"q\":\n", + " break\n", + "\n", + " print(\"\\nGenerating image(s)...\\n\")\n", + " start = time.time()\n", + "\n", + " # Make `NUM_IMAGES_PER_PROMPT` requests to the endpoint at once!\n", + " images = ray.get(\n", + " [generate_image.remote(prompt) for _ in range(NUM_IMAGES_PER_PROMPT)]\n", + " )\n", + "\n", + " dirname = f\"{uuid.uuid4().hex[:8]}\"\n", + " os.makedirs(dirname)\n", + " filenames = []\n", + " for i, image in enumerate(images):\n", + " filename = os.path.join(dirname, f\"{i}.png\")\n", + " with open(filename, \"wb\") as f:\n", + " f.write(image)\n", + " filenames.append(filename)\n", + "\n", + " elapsed = time.time() - start\n", + " generation_times.append(elapsed)\n", + " print(\n", + " f\"\\nGenerated {len(images)} image(s) in {elapsed:.2f} seconds to \"\n", + " f\"the directory: {dirname}\\n\"\n", + " )\n", + " show_images(filenames)\n", + " if not INTERACTIVE:\n", + " break\n", + " return np.mean(generation_times) if generation_times else -1\n" + ] + }, + { + "cell_type": "markdown", + "id": "c8949cc7", + "metadata": {}, + "source": [ + "Once the stable diffusion model finishes generating your image, it will be included in the HTTP response body.\n", + "The client writes this to an image in your Workspace directory for you to view. It'll also show up in the notebook cell!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "71be51fa", + "metadata": {}, + "outputs": [], + "source": [ + "mean_generation_time = main()\n", + "serve.shutdown()\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "fb124968", + "metadata": {}, + "source": [ + "You've successfully served a stable diffusion model!\n", + "You can modify this template and iterate your model deployment directly on your cluster within your Anyscale Workspace,\n", + "testing with the local endpoint." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "49894fe3", + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "outputs": [], + "source": [ + "import json\n", + "import os\n", + "\n", + "release_test_out = os.environ.get(\"TEST_OUTPUT_JSON\", \"/tmp/release_test_out.json\")\n", + "\n", + "with open(release_test_out, \"wt\") as f:\n", + " json.dump({\"mean_generation_time\": mean_generation_time}, f)\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "ray_dev_py38", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + }, + "vscode": { + "interpreter": { + "hash": "265d195fda5292fe8f69c6e37c435a5634a1ed3b6799724e66a975f68fa21517" + } + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/doc/source/templates/validate.py b/doc/source/templates/validate.py index 04c2b07b10a9..28032516cc0c 100644 --- a/doc/source/templates/validate.py +++ b/doc/source/templates/validate.py @@ -2,50 +2,153 @@ from pathlib import Path import yaml -# ray/doc/source/examples/ -> ray -ray_root_path = (Path(__file__).parent / ".." / ".." / "..").resolve() -templates_catalog_path = Path(__file__).parent / "templates.yaml" -with open(templates_catalog_path, "r") as f: - templates = yaml.safe_load(f) +def get_root_path() -> Path: + """ + If we're running from a Ray repo, and just use the + current file to get the doc directory. + ray/doc/source/examples/ -> ray/ -invalid = collections.defaultdict(list) + For CI, the current file location is: + `/doc/source/examples/validate.py` + We can get the "ray root dir" in the same way: + /doc/source/examples -> / + """ + root_path = Path(__file__).parent / ".." / ".." / ".." + return root_path.resolve() -required_fields = {"name", "path", "cluster_env", "small", "large"} -for i, template in enumerate(templates): - name = template.get("name", i) - missing_fields = set(template) - required_fields - assert not missing_fields, f"Missing fields for {name}: {missing_fields}" +def validate_templates_yaml_schema(templates) -> dict: + all_missing_fields = {} + required_fields = {"title", "description", "path", "cluster_env", "compute_config"} - rel_path = template["path"] - if not (ray_root_path / rel_path).exists(): - invalid[name].append(rel_path) + for template_name, template_config in templates.items(): + # ======= Schema check for templates.yaml ======== + missing_fields = required_fields - set(template_config) + if missing_fields: + all_missing_fields[template_name] = missing_fields + continue - rel_path = template["cluster_env"] - if not (ray_root_path / rel_path).exists(): - invalid[name].append(rel_path) + return all_missing_fields - required_per_size = {"compute_config"} - sizes = ["small", "large"] - for size in sizes: - configs = template[size] - missing = set(configs) - required_per_size - assert not missing, f"Missing fields for {name} ({size}): {missing_fields}" +def validate_template_paths(templates, invalid_paths) -> None: + root_path = get_root_path() - rel_paths = list(configs["compute_config"].values()) + for template_name, template_config in templates.items(): + if "path" not in template_config: + continue + + # The yaml specifies relative paths to the ray root directory + rel_path = template_config["path"] + if not (root_path / rel_path).exists(): + invalid_paths[template_name].append(rel_path) + + +def validate_cluster_envs(templates, invalid_paths, invalid_yamls) -> None: + root_path = get_root_path() + + for template_name, template_config in templates.items(): + if "cluster_env" not in template_config: + continue + + rel_path = template_config["cluster_env"] + cluster_env_path = root_path / rel_path + if not cluster_env_path.exists(): + invalid_paths[template_name].append(rel_path) + else: + try: + # Assert that the yaml file is properly formatted. + with open(cluster_env_path, "r") as f: + yaml.safe_load(f) + except yaml.parser.ParserError as e: + invalid_yamls[template_name].append(str(e)) + + +def validate_compute_configs(templates, invalid_paths, invalid_yamls) -> dict: + root_path = get_root_path() + required_cloud_providers = {"AWS", "GCP"} + + all_missing_providers = {} + + for template_name, template_config in templates.items(): + if "compute_config" not in template_config: + continue + + compute_config_per_provider = template_config["compute_config"] + + missing_providers = required_cloud_providers - set(compute_config_per_provider) + if missing_providers: + all_missing_providers[template_name] = missing_providers + continue + + rel_paths = list(compute_config_per_provider.values()) for rel_path in rel_paths: - if not (ray_root_path / rel_path).exists(): - invalid[name].append(rel_path) - -if invalid: - print("VALIDATION FAILED!! Please fix the paths listed below:\n\n") - - for name, invalid_paths in invalid.items(): - print("Template Name:", name) - for path in invalid_paths: - print("-", path) - print() -else: - print("Success!") + compute_config_path = root_path / rel_path + if not compute_config_path.exists(): + invalid_paths[template_name].append(rel_path) + else: + try: + # Assert that the yaml file is properly formatted. + with open(compute_config_path, "r") as f: + yaml.safe_load(f) + except yaml.parser.ParserError as e: + invalid_yamls[template_name].append(str(e)) + + return all_missing_providers + + +if __name__ == "__main__": + root_path = get_root_path() + templates_catalog_path = root_path / "doc/source/templates/templates.yaml" + + with open(templates_catalog_path, "r") as f: + templates = yaml.safe_load(f) + + invalid_paths = collections.defaultdict(list) + invalid_yamls = collections.defaultdict(list) + + all_missing_fields = validate_templates_yaml_schema(templates) + validate_template_paths(templates, invalid_paths) + validate_cluster_envs(templates, invalid_paths, invalid_yamls) + all_missing_providers = validate_compute_configs( + templates, invalid_paths, invalid_yamls + ) + + # ======= Print an informative error message. ======== + if any([all_missing_fields, all_missing_providers, invalid_paths, invalid_yamls]): + msg = "TEMPLATES VALIDATION FAILED!! Please fix the issues listed below:\n\n" + + if all_missing_fields: + msg += "Please supply missing fields in `templates.yaml`:\n" + for template_name, missing_fields in all_missing_fields.items(): + msg += f"- {template_name}: {missing_fields}\n" + + if all_missing_providers: + msg += ( + "\nPlease supply paths to compute configs for these cloud providers " + "in `templates.yaml`:\n" + ) + for template_name, missing_providers in all_missing_providers.items(): + msg += f"- {template_name}: {missing_providers}\n" + + if invalid_paths: + msg += "\nPlease fix invalid paths in `templates.yaml`:\n" + for template_name, invalid_paths_for_template in invalid_paths.items(): + msg += f"- {template_name}:\n" + msg += "\n".join([f"\t- {path}" for path in invalid_paths_for_template]) + msg += "\n" + + if invalid_yamls: + msg += "\nPlease fix invalid configuration yamls:\n" + for template_name, invalid_yamls_per_template in invalid_yamls.items(): + msg += f"- {template_name}:\n\n" + msg += "\n\n".join( + f"{i + 1}. {invalid_yaml}" + for i, invalid_yaml in enumerate(invalid_yamls_per_template) + ) + msg += "\n\n" + + raise ValueError(msg) + else: + print("Success!") diff --git a/python/ray/train/base_trainer.py b/python/ray/train/base_trainer.py index 108cbb592872..bd9f47da83f2 100644 --- a/python/ray/train/base_trainer.py +++ b/python/ray/train/base_trainer.py @@ -462,7 +462,7 @@ def _maybe_sync_down_trainer_state(cls, restore_path: str) -> Path: """Sync down trainer state from remote storage. Returns: - local_dir of the synced trainer state + str: Local directory containing the trainer state """ if not is_non_local_path_uri(restore_path): return Path(os.path.expanduser(restore_path)) / _TRAINER_PKL diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 18ff12e9e274..b3b9ebbb44da 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -10,7 +10,10 @@ # working_dir: example_dir # # # How often to run the tests. -# # One of [manual, any, multi, nightly, weekly]. +# # One of [manual, any, multi, nightly, nightly-3x, weekly]. +# # Descriptions of each frequency (that's not immediately obvious): +# # - manual: Not run on a schedule, but can be manually run through the buildkite UI. +# # - nightly-3x: Run 3 times a week (Monday, Wednesday, Friday). # frequency: weekly # # Owning team. This field will be persisted to the database # team: ml @@ -834,80 +837,77 @@ # Workspace templates release tests # ##################################### -- name: workspace_template_small_01_batch_inference +- name: workspace_template_batch_inference group: Workspace templates - working_dir: workspace_templates/01_batch_inference + working_dir: workspace_templates/tests python: "3.9" - frequency: nightly + frequency: nightly-3x team: ml cluster: cluster_env: ../configs/release_test_cluster_env.yaml - cluster_compute: ../configs/compute/gpu/aws_small.yaml + cluster_compute: ../configs/compute/gpu/aws_release_test.yaml run: timeout: 600 - script: jupyter nbconvert --TagRemovePreprocessor.remove_input_tags='large' - --to script --output _test batch_inference.ipynb && ipython _test.py + script: jupyter nbconvert --to script --output _test batch_inference.ipynb && ipython _test.py variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_env: ../configs/release_test_cluster_env.yaml - cluster_compute: ../configs/compute/gpu/gcp_small.yaml + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: ../configs/release_test_cluster_env.yaml + cluster_compute: ../configs/compute/gpu/gce_release_test.yaml -- name: workspace_template_small_02_many_model_training +- name: workspace_template_many_model_training group: Workspace templates - working_dir: workspace_templates/02_many_model_training + working_dir: workspace_templates/tests python: "3.9" - frequency: nightly + frequency: nightly-3x team: ml cluster: cluster_env: ../configs/release_test_cluster_env.yaml - cluster_compute: ../configs/compute/cpu/aws_small.yaml + cluster_compute: ../configs/compute/cpu/aws_release_test.yaml run: - timeout: 300 - script: pip install -U -r requirements.txt - && jupyter nbconvert --TagRemovePreprocessor.remove_input_tags='large' - --to script --output _test many_model_training.ipynb && ipython _test.py + timeout: 600 + script: jupyter nbconvert --to script --output _test many_model_training.ipynb && ipython _test.py variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_env: ../configs/release_test_cluster_env.yaml - cluster_compute: ../configs/compute/cpu/gcp_small.yaml + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: ../configs/release_test_cluster_env.yaml + cluster_compute: ../configs/compute/cpu/gce_release_test.yaml -- name: workspace_template_small_03_serving_stable_diffusion + +- name: workspace_template_serving_stable_diffusion group: Workspace templates - working_dir: workspace_templates/03_serving_stable_diffusion + working_dir: workspace_templates/tests python: "3.9" - frequency: nightly + frequency: nightly-3x team: ml cluster: cluster_env: ../configs/release_test_cluster_env.yaml - cluster_compute: ../configs/compute/gpu/aws_small.yaml + cluster_compute: ../configs/compute/gpu/aws_release_test.yaml run: timeout: 900 - script: pip install -U -r requirements.txt - && jupyter nbconvert --TagRemovePreprocessor.remove_input_tags='large' - --to script --output _test serving_stable_diffusion.ipynb && ipython _test.py + script: jupyter nbconvert --to script --output _test serving_stable_diffusion.ipynb && ipython _test.py variations: - - __suffix__: aws - - __suffix__: gce - env: gce - frequency: manual - cluster: - cluster_env: ../configs/release_test_cluster_env.yaml - cluster_compute: ../configs/compute/gpu/gcp_small.yaml + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: ../configs/release_test_cluster_env.yaml + cluster_compute: ../configs/compute/gpu/gce_release_test.yaml + ####################### # XGBoost release tests From 1a76efbcd08605bf42371252d2f15611fe5b60b3 Mon Sep 17 00:00:00 2001 From: Larry <554538252@qq.com> Date: Thu, 27 Apr 2023 10:20:58 +0800 Subject: [PATCH 116/424] [Core] Put pg state to kv store when pg rescheduling (#34467) When a PG fails over but has not been scheduled successfully, the restart of gcs will cause the PG to no longer be rescheduled. A node is down, triggering the rescheduling of the PG bundle on this node However, due to insufficient resources, this PG bunlde cannot be scheduled successfully The gcs server sent FO In the end, even if the resources are sufficient, the PG bundle is still not rescheduled. Reproduce command: pytest -sv python/ray/tests/test_placement_group_failover.py::test_gcs_restart_when_placement_group_failover Because the rescheduling state of PG is lost when gcs restarts. solution: It is necessary to save the PG to kvstore when the PG is changed to the rescheduling state. --- python/ray/_private/state.py | 2 + .../tests/test_placement_group_failover.py | 71 ++++++++++++++++++- .../gcs_server/gcs_placement_group_manager.cc | 11 ++- .../test/gcs_placement_group_manager_test.cc | 2 + 4 files changed, 80 insertions(+), 6 deletions(-) diff --git a/python/ray/_private/state.py b/python/ray/_private/state.py index 40e94e9c5db5..2fa0dcf031f9 100644 --- a/python/ray/_private/state.py +++ b/python/ray/_private/state.py @@ -301,6 +301,8 @@ def get_state(state): return "PENDING" elif state == gcs_utils.PlacementGroupTableData.CREATED: return "CREATED" + elif state == gcs_utils.PlacementGroupTableData.RESCHEDULING: + return "RESCHEDULING" else: return "REMOVED" diff --git a/python/ray/tests/test_placement_group_failover.py b/python/ray/tests/test_placement_group_failover.py index 3bbe88536443..b8a7841eec48 100755 --- a/python/ray/tests/test_placement_group_failover.py +++ b/python/ray/tests/test_placement_group_failover.py @@ -2,9 +2,7 @@ import sys import ray import ray.cluster_utils -from ray._private.test_utils import ( - get_other_nodes, -) +from ray._private.test_utils import get_other_nodes, wait_for_condition MB = 1024 * 1024 @@ -58,5 +56,72 @@ def test_placement_group_failover_when_two_nodes_die(monkeypatch, ray_start_clus ray.get(object_ref, timeout=5) +def test_gcs_restart_when_placement_group_failover( + ray_start_cluster_head_with_external_redis, +): + @ray.remote(num_cpus=1) + class Actor(object): + def __init__(self): + self.n = 0 + + def value(self): + return self.n + + cluster = ray_start_cluster_head_with_external_redis + num_nodes = 3 + nodes = [] + for _ in range(num_nodes - 1): + nodes.append(cluster.add_node(num_cpus=1)) + + # Make sure the placement group is ready. + bundles = [{"CPU": 1, "memory": 100 * MB} for _ in range(num_nodes)] + placement_group = ray.util.placement_group( + name="name", strategy="STRICT_SPREAD", bundles=bundles + ) + assert placement_group.wait(5000) + actors = [] + for i in range(num_nodes): + actor = Actor.options( + placement_group=placement_group, + placement_group_bundle_index=i, + max_restarts=-1, + ).remote() + object_ref = actor.value.remote() + ray.get(object_ref, timeout=5) + actors.append(actor) + + # Simulate a node dead. + other_nodes = get_other_nodes(cluster, exclude_head=True) + cluster.remove_node(other_nodes[0]) + + # Make sure placement group state change to rescheduling. + def _check_pg_whether_be_reschedule(): + table = ray.util.placement_group_table(placement_group) + return table["state"] == "RESCHEDULING" + + wait_for_condition( + _check_pg_whether_be_reschedule, timeout=5, retry_interval_ms=1000 + ) + + # Simulate gcs restart. + cluster.head_node.kill_gcs_server() + cluster.head_node.start_gcs_server() + + cluster.add_node(num_cpus=1) + cluster.wait_for_nodes() + + # Check placement gorup reschedule success after gcs server restart. + def _check_actor_with_pg_is_ready(): + try: + for actor in actors: + object_ref = actor.value.remote() + ray.get(object_ref, timeout=5) + return True + except Exception: + return False + + wait_for_condition(_check_actor_with_pg_is_ready, timeout=5, retry_interval_ms=1000) + + if __name__ == "__main__": sys.exit(pytest.main(["-v", __file__])) diff --git a/src/ray/gcs/gcs_server/gcs_placement_group_manager.cc b/src/ray/gcs/gcs_server/gcs_placement_group_manager.cc index 2851fe41f494..6c222727ee60 100644 --- a/src/ray/gcs/gcs_server/gcs_placement_group_manager.cc +++ b/src/ray/gcs/gcs_server/gcs_placement_group_manager.cc @@ -756,11 +756,13 @@ void GcsPlacementGroupManager::OnNodeDead(const NodeID &node_id) { iter->second->GetMutableStats()->set_scheduling_state( rpc::PlacementGroupStats::QUEUED); AddToPendingQueue(iter->second, 0); + RAY_CHECK_OK(gcs_table_storage_->PlacementGroupTable().Put( + iter->second->GetPlacementGroupID(), + iter->second->GetPlacementGroupTableData(), + [this](Status status) { SchedulePendingPlacementGroups(); })); } } } - - SchedulePendingPlacementGroups(); } void GcsPlacementGroupManager::OnNodeAdd(const NodeID &node_id) { @@ -966,7 +968,10 @@ bool GcsPlacementGroupManager::RescheduleIfStillHasUnplacedBundles( << placement_group->GetPlacementGroupID(); placement_group->UpdateState(rpc::PlacementGroupTableData::RESCHEDULING); AddToPendingQueue(placement_group, 0); - SchedulePendingPlacementGroups(); + RAY_CHECK_OK(gcs_table_storage_->PlacementGroupTable().Put( + placement_group->GetPlacementGroupID(), + placement_group->GetPlacementGroupTableData(), + [this](Status status) { SchedulePendingPlacementGroups(); })); return true; } } diff --git a/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc b/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc index 82d46f13f145..e0cdced97ae6 100644 --- a/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc @@ -462,6 +462,7 @@ TEST_F(GcsPlacementGroupManagerTest, TestReschedulingRetry) { placement_group->GetPlacementGroupID(); mock_placement_group_scheduler_->bundles_on_dead_node_.push_back(0); gcs_placement_group_manager_->OnNodeDead(NodeID::FromRandom()); + WaitUntilIoServiceDone(); const auto &bundles = mock_placement_group_scheduler_->placement_groups_[0]->GetBundles(); EXPECT_TRUE(NodeID::FromBinary(bundles[0]->GetMessage().node_id()).IsNil()); @@ -503,6 +504,7 @@ TEST_F(GcsPlacementGroupManagerTest, TestRescheduleWhenNodeDead) { placement_group->GetPlacementGroupID(); mock_placement_group_scheduler_->bundles_on_dead_node_.push_back(0); gcs_placement_group_manager_->OnNodeDead(NodeID::FromRandom()); + WaitUntilIoServiceDone(); ASSERT_EQ(mock_placement_group_scheduler_->placement_groups_[0]->GetPlacementGroupID(), placement_group->GetPlacementGroupID()); const auto &bundles = From c3260d8e4d570c20ef52690a9659e51a6d97ea42 Mon Sep 17 00:00:00 2001 From: Jonathan Carter <42900403+joncarter1@users.noreply.github.com> Date: Thu, 27 Apr 2023 04:54:04 +0100 Subject: [PATCH 117/424] [no_early_kickoff][Core][Dashboard]Configurable dashboard RPC port and public IP (#33541) - Makes the dashboard head process' RPC port configurable using the --dashboard-grpc-port CLI flag or RAY_DASHBOARD_RPC_PORT env var. - Makes the dashboard's public IP address (put in the GCS) use --node-ip-address from the head node if set. This is necessary because the DASHBOARD_RPC_ADDRESS put into the GCS currently uses ray.util.get_node_ip_address() even if --node-ip-address is set. This does not work in Docker containers with an isolated network as the function will return the container IP. Together these changes allow Ray to work in Docker containers which are isolated from the host network (closes #33534 ) Signed-off-by: Jonathan Carter --- dashboard/consts.py | 1 + dashboard/dashboard.py | 29 +++++++++++++++++++---- dashboard/head.py | 8 ++++--- dashboard/tests/test_dashboard.py | 31 ++++++++++++++---------- doc/source/ray-core/configure.rst | 3 ++- python/ray/_private/node.py | 10 +++++++- python/ray/_private/parameter.py | 6 +++++ python/ray/_private/services.py | 9 +++++++ python/ray/scripts/scripts.py | 8 +++++++ python/ray/tests/test_dashboard.py | 38 ++++++++++++++++++++++++++++-- 10 files changed, 120 insertions(+), 23 deletions(-) diff --git a/dashboard/consts.py b/dashboard/consts.py index df345acced28..360a332570b2 100644 --- a/dashboard/consts.py +++ b/dashboard/consts.py @@ -25,6 +25,7 @@ PURGE_DATA_INTERVAL_SECONDS = 60 * 10 ORGANIZE_DATA_INTERVAL_SECONDS = 2 DASHBOARD_RPC_ADDRESS = "dashboard_rpc" +DASHBOARD_RPC_PORT = env_integer("RAY_DASHBOARD_RPC_PORT", 0) GCS_SERVER_ADDRESS = "GcsServerAddress" # GCS check alive GCS_CHECK_ALIVE_MAX_COUNT_OF_RPC_ERROR = env_integer( diff --git a/dashboard/dashboard.py b/dashboard/dashboard.py index 55f465bf024f..4732e96d23ee 100644 --- a/dashboard/dashboard.py +++ b/dashboard/dashboard.py @@ -35,6 +35,8 @@ class Dashboard: port: Port number of dashboard aiohttp server. port_retries: The retry times to select a valid port. gcs_address: GCS address of the cluster + grpc_port: Port used to listen for gRPC on. + node_ip_address: The IP address of the dashboard. serve_frontend: If configured, frontend HTML is not served from the dashboard. log_dir: Log directory of dashboard. @@ -46,6 +48,8 @@ def __init__( port: int, port_retries: int, gcs_address: str, + grpc_port: int, + node_ip_address: str, log_dir: str = None, temp_dir: str = None, session_dir: str = None, @@ -58,6 +62,8 @@ def __init__( http_port=port, http_port_retries=port_retries, gcs_address=gcs_address, + node_ip_address=node_ip_address, + grpc_port=grpc_port, log_dir=log_dir, temp_dir=temp_dir, session_dir=session_dir, @@ -88,6 +94,19 @@ async def run(self): parser.add_argument( "--gcs-address", required=True, type=str, help="The address (ip:port) of GCS." ) + parser.add_argument( + "--grpc-port", + required=False, + type=int, + default=dashboard_consts.DASHBOARD_RPC_PORT, + help="The port for the dashboard to listen for gRPC on.", + ) + parser.add_argument( + "--node-ip-address", + required=True, + type=str, + help="The IP address of the node where this is running.", + ) parser.add_argument( "--logging-level", required=False, @@ -200,10 +219,12 @@ async def run(self): # https://github.com/grpc/grpc/blob/master/src/python/grpcio/grpc/_cython/_cygrpc/aio/common.pyx.pxi#L174-L188 loop = ray._private.utils.get_or_create_event_loop() dashboard = Dashboard( - args.host, - args.port, - args.port_retries, - args.gcs_address, + host=args.host, + port=args.port, + port_retries=args.port_retries, + gcs_address=args.gcs_address, + grpc_port=args.grpc_port, + node_ip_address=args.node_ip_address, log_dir=args.log_dir, temp_dir=args.temp_dir, session_dir=args.session_dir, diff --git a/dashboard/head.py b/dashboard/head.py index ec160bfdc2ad..acf70961f969 100644 --- a/dashboard/head.py +++ b/dashboard/head.py @@ -75,6 +75,8 @@ def __init__( http_port: int, http_port_retries: int, gcs_address: str, + node_ip_address: str, + grpc_port: int, log_dir: str, temp_dir: str, session_dir: str, @@ -94,6 +96,7 @@ def __init__( minimal: Whether or not it will load the minimal modules. serve_frontend: If configured, frontend HTML is served from the dashboard. + grpc_port: The port used to listen for gRPC on. modules_to_load: A set of module name in string to load. By default (None), it loads all available modules. Note that available modules could be changed depending on @@ -124,14 +127,13 @@ def __init__( self.gcs_aio_client = None self.gcs_error_subscriber = None self.gcs_log_subscriber = None - self.ip = ray.util.get_node_ip_address() + self.ip = node_ip_address DataOrganizer.head_node_ip = self.ip - ip, port = gcs_address.split(":") self.server = aiogrpc.server(options=(("grpc.so_reuseport", 0),)) grpc_ip = "127.0.0.1" if self.ip == "127.0.0.1" else "0.0.0.0" self.grpc_port = ray._private.tls_utils.add_port_to_grpc_server( - self.server, f"{grpc_ip}:0" + self.server, f"{grpc_ip}:{grpc_port}" ) logger.info("Dashboard head grpc address: %s:%s", grpc_ip, self.grpc_port) # If the dashboard is started as non-minimal version, http server should diff --git a/dashboard/tests/test_dashboard.py b/dashboard/tests/test_dashboard.py index 19d71ffddf43..bb0a3b2e4dce 100644 --- a/dashboard/tests/test_dashboard.py +++ b/dashboard/tests/test_dashboard.py @@ -766,13 +766,14 @@ def test_dashboard_port_conflict(ray_start_with_dashboard): f"--log-dir={log_dir}", f"--gcs-address={address_info['gcs_address']}", f"--session-dir={session_dir}", + "--node-ip-address=127.0.0.1", ] logger.info("The dashboard should be exit: %s", dashboard_cmd) - p = subprocess.Popen(dashboard_cmd) - p.wait(5) + dashboard_process = subprocess.Popen(dashboard_cmd) + dashboard_process.wait(5) dashboard_cmd.append("--port-retries=10") - subprocess.Popen(dashboard_cmd) + conflicting_dashboard_process = subprocess.Popen(dashboard_cmd) timeout_seconds = 10 start_time = time.time() @@ -792,6 +793,10 @@ def test_dashboard_port_conflict(ray_start_with_dashboard): finally: if time.time() > start_time + timeout_seconds: raise Exception("Timed out while testing.") + dashboard_process.kill() + conflicting_dashboard_process.kill() + dashboard_process.wait() + conflicting_dashboard_process.wait() @pytest.mark.skipif( @@ -984,15 +989,17 @@ def test_dashboard_requests_fail_on_missing_deps(ray_start_with_dashboard): def test_dashboard_module_load(tmpdir): """Verify if the head module can load only selected modules.""" head = DashboardHead( - "127.0.0.1", - 8265, - 1, - "127.0.0.1:6379", - str(tmpdir), - str(tmpdir), - str(tmpdir), - False, - True, + http_host="127.0.0.1", + http_port=8265, + http_port_retries=1, + node_ip_address="127.0.0.1", + gcs_address="127.0.0.1:6379", + grpc_port=0, + log_dir=str(tmpdir), + temp_dir=str(tmpdir), + session_dir=str(tmpdir), + minimal=False, + serve_frontend=True, ) # Test basic. diff --git a/doc/source/ray-core/configure.rst b/doc/source/ray-core/configure.rst index 6a2c5c78272d..572762554aa5 100644 --- a/doc/source/ray-core/configure.rst +++ b/doc/source/ray-core/configure.rst @@ -96,7 +96,7 @@ Look :ref:`Logging Directory Structure ` for more d Ports configurations -------------------- -Ray requires bi-directional communication among its nodes in a cluster. Each of node is supposed to open specific ports to receive incoming network requests. +Ray requires bi-directional communication among its nodes in a cluster. Each node opens specific ports to receive incoming network requests. All Nodes ~~~~~~~~~ @@ -127,6 +127,7 @@ In addition to ports specified above, the head node needs to open several more p - ``--port``: Port of Ray (GCS server). The head node will start a GCS server listening on this port. Default: 6379. - ``--ray-client-server-port``: Listening port for Ray Client Server. Default: 10001. - ``--redis-shard-ports``: Comma-separated list of ports for non-primary Redis shards. Default: Random values. +- ``--dashboard-grpc-port``: The gRPC port used by the dashboard. Default: Random value. - If ``--include-dashboard`` is true (the default), then the head node must open ``--dashboard-port``. Default: 8265. diff --git a/python/ray/_private/node.py b/python/ray/_private/node.py index 0c6339a85b7f..ab1e044763f5 100644 --- a/python/ray/_private/node.py +++ b/python/ray/_private/node.py @@ -150,6 +150,7 @@ def __init__( self._config = ray_params._system_config or {} self._dashboard_agent_listen_port = ray_params.dashboard_agent_listen_port + self._dashboard_grpc_port = ray_params.dashboard_grpc_port # Configure log rotation parameters. self.max_bytes = int( @@ -546,6 +547,11 @@ def dashboard_agent_listen_port(self): """Get the dashboard agent's listen port""" return self._dashboard_agent_listen_port + @property + def dashboard_grpc_port(self): + """Get the dashboard head grpc port""" + return self._dashboard_grpc_port + @property def logging_config(self): """Get the logging config of the current node.""" @@ -927,13 +933,15 @@ def start_api_server(self, *, include_dashboard: bool, raise_on_failure: bool): raise_on_failure, self._ray_params.dashboard_host, self.gcs_address, + self._node_ip_address, self._temp_dir, self._logs_dir, self._session_dir, + port=self._ray_params.dashboard_port, + dashboard_grpc_port=self._ray_params.dashboard_grpc_port, fate_share=self.kernel_fate_share, max_bytes=self.max_bytes, backup_count=self.backup_count, - port=self._ray_params.dashboard_port, redirect_logging=self.should_redirect_logs(), stdout_file=stderr_file, stderr_file=stderr_file, diff --git a/python/ray/_private/parameter.py b/python/ray/_private/parameter.py index e89b9f9216d0..9dbd7f68b864 100644 --- a/python/ray/_private/parameter.py +++ b/python/ray/_private/parameter.py @@ -87,6 +87,9 @@ class RayParams: dashboard_agent_listen_port: The port for dashboard agents to listen on for HTTP requests. Defaults to 52365. + dashboard_grpc_port: The port for the dashboard head process to listen + for gRPC on. + Defaults to random available port. plasma_store_socket_name: If provided, it will specify the socket name used by the plasma store. raylet_socket_name: If provided, it will specify the socket path @@ -159,6 +162,7 @@ def __init__( dashboard_agent_listen_port: Optional[ int ] = ray_constants.DEFAULT_DASHBOARD_AGENT_LISTEN_PORT, + dashboard_grpc_port: Optional[int] = None, plasma_store_socket_name: Optional[str] = None, raylet_socket_name: Optional[str] = None, temp_dir: Optional[str] = None, @@ -211,6 +215,7 @@ def __init__( self.dashboard_host = dashboard_host self.dashboard_port = dashboard_port self.dashboard_agent_listen_port = dashboard_agent_listen_port + self.dashboard_grpc_port = dashboard_grpc_port self.plasma_store_socket_name = plasma_store_socket_name self.raylet_socket_name = raylet_socket_name self.temp_dir = temp_dir @@ -298,6 +303,7 @@ def wrap_port(port): "dashboard": wrap_port(self.dashboard_port), "dashboard_agent_grpc": wrap_port(self.metrics_agent_port), "dashboard_agent_http": wrap_port(self.dashboard_agent_listen_port), + "dashboard_grpc": wrap_port(self.dashboard_grpc_port), "metrics_export": wrap_port(self.metrics_export_port), } redis_shard_ports = self.redis_shard_ports diff --git a/python/ray/_private/services.py b/python/ray/_private/services.py index 558762681f90..cc7d620f44cd 100644 --- a/python/ray/_private/services.py +++ b/python/ray/_private/services.py @@ -1038,10 +1038,12 @@ def start_api_server( raise_on_failure: bool, host: str, gcs_address: str, + node_ip_address: str, temp_dir: str, logdir: str, session_dir: str, port: Optional[int] = None, + dashboard_grpc_port: Optional[int] = None, fate_share: Optional[bool] = None, max_bytes: int = 0, backup_count: int = 0, @@ -1060,6 +1062,7 @@ def start_api_server( a warning if we fail to start the API server. host: The host to bind the dashboard web server to. gcs_address: The gcs address the dashboard should connect to + node_ip_address: The IP address where this is running. temp_dir: The temporary directory used for log files and information for this Ray session. session_dir: The session directory under temp_dir. @@ -1067,6 +1070,8 @@ def start_api_server( logdir: The log directory used to generate dashboard log. port: The port to bind the dashboard web server to. Defaults to 8265. + dashboard_grpc_port: The port which the dashboard listens for + gRPC on. Defaults to a random, available port. max_bytes: Log rotation parameter. Corresponding to RotatingFileHandler's maxBytes. backup_count: Log rotation parameter. Corresponding to @@ -1132,6 +1137,7 @@ def start_api_server( f"--logging-rotate-bytes={max_bytes}", f"--logging-rotate-backup-count={backup_count}", f"--gcs-address={gcs_address}", + f"--node-ip-address={node_ip_address}", ] if not redirect_logging: @@ -1158,6 +1164,9 @@ def start_api_server( command.append("--modules-to-load=UsageStatsHead") command.append("--disable-frontend") + if dashboard_grpc_port is not None: + command.append(f"--grpc-port={dashboard_grpc_port}") + process_info = start_ray_process( command, ray_constants.PROCESS_TYPE_DASHBOARD, diff --git a/python/ray/scripts/scripts.py b/python/ray/scripts/scripts.py index 6cb85e53704d..d183522d1809 100644 --- a/python/ray/scripts/scripts.py +++ b/python/ray/scripts/scripts.py @@ -430,6 +430,12 @@ def debug(address): default=None, help="the port for dashboard agents to listen for grpc on.", ) +@click.option( + "--dashboard-grpc-port", + type=int, + default=None, + help="The port for the dashboard head to listen for grpc on.", +) @click.option( "--block", is_flag=True, @@ -552,6 +558,7 @@ def start( dashboard_port, dashboard_agent_listen_port, dashboard_agent_grpc_port, + dashboard_grpc_port, block, plasma_directory, autoscaling_config, @@ -638,6 +645,7 @@ def start( dashboard_port=dashboard_port, dashboard_agent_listen_port=dashboard_agent_listen_port, metrics_agent_port=dashboard_agent_grpc_port, + dashboard_grpc_port=dashboard_grpc_port, _system_config=system_config, enable_object_reconstruction=enable_object_reconstruction, metrics_export_port=metrics_export_port, diff --git a/python/ray/tests/test_dashboard.py b/python/ray/tests/test_dashboard.py index bef335b519e4..dad7ed17c7ea 100644 --- a/python/ray/tests/test_dashboard.py +++ b/python/ray/tests/test_dashboard.py @@ -98,7 +98,6 @@ def dashboard_available(): indirect=True, ) def test_port_conflict(listen_port, call_ray_stop_only, shutdown_only): - try: subprocess.check_output( [ @@ -151,6 +150,7 @@ def test_dashboard(shutdown_only): conflict_port = 34567 +configured_test_port = 34568 def run_tasks_without_runtime_env(): @@ -195,16 +195,50 @@ def f(): def test_dashboard_agent_grpc_port_conflict(listen_port, call_ray_start): address = call_ray_start ray.init(address=address) + # Tasks without runtime env still work when dashboard agent grpc port conflicts. run_tasks_without_runtime_env() # Tasks with runtime env couldn't work. with pytest.raises( ray.exceptions.RuntimeEnvSetupError, - match="the grpc service of agent is invalid", + match="Ray agent couldn't be started due to the port conflict", ): run_tasks_with_runtime_env() +@pytest.mark.parametrize( + "call_ray_start", + [f"ray start --head --num-cpus=1 --dashboard-grpc-port={configured_test_port}"], + indirect=True, +) +def test_configured_dashboard_grpc_port(call_ray_start): + address = call_ray_start + addresses = ray.init(address=address) + assert addresses.dashboard_url == "127.0.0.1:8265" + + +@pytest.mark.parametrize( + "listen_port", + [conflict_port], + indirect=True, +) +def test_dashboard_grpc_port_conflict(listen_port, call_ray_stop_only, shutdown_only): + try: + subprocess.check_output( + [ + "ray", + "start", + "--head", + "--dashboard-grpc-port", + f"{conflict_port}", + "--include-dashboard=True", + ], + stderr=subprocess.PIPE, + ) + except subprocess.CalledProcessError as e: + assert f"Failed to bind to address 0.0.0.0:{conflict_port}".encode() in e.stderr + + @pytest.mark.skipif( sys.platform == "win32", reason="`runtime_env` with `pip` not supported on Windows." ) From 14a88586eb2e87839a2b530e26b5ffcf8ab2e394 Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Thu, 27 Apr 2023 08:54:18 +0100 Subject: [PATCH 118/424] [air/tune] use ray-provided `tabulate` package (#34789) In #26159, we added the tabulate package to the Ray repo as a thirdparty dependency. However, we still import the external tabulate package in other parts of the codebase. This PR updates these imports and gets rid of the tabulate dependency. This PR also updates the shipped tabulate version to 0.9.0. Signed-off-by: Kai Fricke --- doc/requirements-doc.txt | 1 - doc/source/tune/api/cli.rst | 5 - docker/examples/Dockerfile | 1 - .../_private/thirdparty/tabulate/tabulate.py | 853 ++++++++++++++---- python/ray/air/config.py | 25 +- python/ray/data/datastream.py | 2 +- python/ray/tests/ludwig/ludwig_test_utils.py | 2 +- python/ray/train/data_parallel_trainer.py | 2 +- python/ray/tune/cli/commands.py | 6 +- python/ray/tune/examples/tune-default.yaml | 2 +- .../ray/tune/examples/tune-local-default.yaml | 2 +- python/ray/tune/experimental/output.py | 2 +- python/ray/tune/progress_reporter.py | 9 +- python/ray/tune/requirements-dev.txt | 1 - python/ray/tune/syncer.py | 9 +- python/ray/widgets/util.py | 9 +- python/requirements.txt | 1 - python/setup.py | 2 +- 18 files changed, 700 insertions(+), 234 deletions(-) diff --git a/doc/requirements-doc.txt b/doc/requirements-doc.txt index 3aa5e69bf7bb..4ec5ab31d03d 100644 --- a/doc/requirements-doc.txt +++ b/doc/requirements-doc.txt @@ -26,7 +26,6 @@ pyyaml scikit-optimize redis starlette -tabulate uvicorn==0.16.0 werkzeug wandb diff --git a/doc/source/tune/api/cli.rst b/doc/source/tune/api/cli.rst index b3d69a3f66f6..8e7ecc5bcac9 100644 --- a/doc/source/tune/api/cli.rst +++ b/doc/source/tune/api/cli.rst @@ -2,11 +2,6 @@ Tune CLI (Experimental) ======================= ``tune`` has an easy-to-use command line interface (CLI) to manage and monitor your experiments on Ray. -To do this, verify that you have the ``tabulate`` library installed: - -.. code-block:: bash - - $ pip install tabulate Here is an example command line call: diff --git a/docker/examples/Dockerfile b/docker/examples/Dockerfile index d97d40855c48..df7bc8c6d820 100644 --- a/docker/examples/Dockerfile +++ b/docker/examples/Dockerfile @@ -29,7 +29,6 @@ RUN pip install --no-cache-dir -U pip \ tensorboardX \ dragonfly-opt \ zoopt \ - tabulate \ mlflow \ pytest-remotedata>=0.3.1 \ matplotlib \ diff --git a/python/ray/_private/thirdparty/tabulate/tabulate.py b/python/ray/_private/thirdparty/tabulate/tabulate.py index 7452ad1025fc..83b1090ffaf9 100644 --- a/python/ray/_private/thirdparty/tabulate/tabulate.py +++ b/python/ray/_private/thirdparty/tabulate/tabulate.py @@ -1,70 +1,35 @@ # -*- coding: utf-8 -*- -# Version 0.8.10, commit 4892c6e9a79638c7897ccea68b602040da9cc7a7 +# Version 0.9.0, commit bf58e37e6b35e3cc9a0bd740f752abfd32b6e6f8 """Pretty-print tabular data.""" -from __future__ import print_function -from __future__ import unicode_literals from collections import namedtuple -import sys +from collections.abc import Iterable, Sized +from html import escape as htmlescape +from itertools import chain, zip_longest as izip_longest +from functools import reduce, partial +import io import re import math import textwrap - - -if sys.version_info >= (3, 3): - from collections.abc import Iterable -else: - from collections import Iterable - -if sys.version_info[0] < 3: - from itertools import izip_longest - from functools import partial - - _none_type = type(None) - _bool_type = bool - _int_type = int - _long_type = long # noqa - _float_type = float - _text_type = unicode # noqa - _binary_type = str - - def _is_file(f): - return hasattr(f, "read") - -else: - from itertools import zip_longest as izip_longest - from functools import reduce, partial - - _none_type = type(None) - _bool_type = bool - _int_type = int - _long_type = int - _float_type = float - _text_type = str - _binary_type = bytes - basestring = str - - import io - - def _is_file(f): - return isinstance(f, io.IOBase) - +import dataclasses try: import wcwidth # optional wide-character (CJK) support except ImportError: wcwidth = None -try: - from html import escape as htmlescape -except ImportError: - from cgi import escape as htmlescape + +def _is_file(f): + return isinstance(f, io.IOBase) __all__ = ["tabulate", "tabulate_formats", "simple_separated_format"] -__version__ = "0.8.10" +try: + from .version import version as __version__ # noqa: F401 +except ImportError: + pass # running __init__.py as a script, AppVeyor pytests # minimum extra space in headers @@ -74,6 +39,7 @@ def _is_file(f): PRESERVE_WHITESPACE = False _DEFAULT_FLOATFMT = "g" +_DEFAULT_INTFMT = "" _DEFAULT_MISSINGVAL = "" # default align will be overwritten by "left", "center" or "decimal" # depending on the formatter @@ -83,6 +49,9 @@ def _is_file(f): # if True, enable wide-character (CJK) support WIDE_CHARS_MODE = wcwidth is not None +# Constant that can be used as part of passed rows to generate a separating line +# It is purposely an unprintable character, very unlikely to be used in a table +SEPARATING_LINE = "\001" Line = namedtuple("Line", ["begin", "hline", "sep", "end"]) @@ -90,7 +59,7 @@ def _is_file(f): DataRow = namedtuple("DataRow", ["begin", "sep", "end"]) -# A table structure is suppposed to be: +# A table structure is supposed to be: # # --- lineabove --------- # headerrow @@ -136,6 +105,15 @@ def _is_file(f): ) +def _is_separating_line(row): + row_type = type(row) + is_sl = (row_type == list or row_type == str) and ( + (len(row) >= 1 and row[0] == SEPARATING_LINE) + or (len(row) >= 2 and row[1] == SEPARATING_LINE) + ) + return is_sl + + def _pipe_segment_with_colons(align, colwidth): """Return a segment of a horizontal line with optional colons which indicate column's alignment (as in `pipe` output format).""" @@ -206,7 +184,7 @@ def _html_row_with_attrs(celltag, unsafe, cell_values, colwidths, colaligns): ] rowhtml = "
    {}".format("".join(values_with_attrs).rstrip()) if celltag == "th": # it's a header row, create a new table header - rowhtml = "
    \n\n{}\n\n".format(rowhtml) + rowhtml = f"
    \n\n{rowhtml}\n\n" return rowhtml @@ -218,7 +196,7 @@ def _moin_row_with_attrs(celltag, cell_values, colwidths, colaligns, header=""): "decimal": '', } values_with_attrs = [ - "{0}{1} {2} ".format(celltag, alignment.get(a, ""), header + c + header) + "{}{} {} ".format(celltag, alignment.get(a, ""), header + c + header) for c, a in zip(cell_values, colaligns) ] return "".join(values_with_attrs) + "||" @@ -237,6 +215,59 @@ def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False, longtable=Fa ) +def _asciidoc_row(is_header, *args): + """handle header and data rows for asciidoc format""" + + def make_header_line(is_header, colwidths, colaligns): + # generate the column specifiers + + alignment = {"left": "<", "right": ">", "center": "^", "decimal": ">"} + # use the column widths generated by tabulate for the asciidoc column width specifiers + asciidoc_alignments = zip( + colwidths, [alignment[colalign] for colalign in colaligns] + ) + asciidoc_column_specifiers = [ + "{:d}{}".format(width, align) for width, align in asciidoc_alignments + ] + header_list = ['cols="' + (",".join(asciidoc_column_specifiers)) + '"'] + + # generate the list of options (currently only "header") + options_list = [] + + if is_header: + options_list.append("header") + + if options_list: + header_list += ['options="' + ",".join(options_list) + '"'] + + # generate the list of entries in the table header field + + return "[{}]\n|====".format(",".join(header_list)) + + if len(args) == 2: + # two arguments are passed if called in the context of aboveline + # print the table header with column widths and optional header tag + return make_header_line(False, *args) + + elif len(args) == 3: + # three arguments are passed if called in the context of dataline or headerline + # print the table line and make the aboveline if it is a header + + cell_values, colwidths, colaligns = args + data_line = "|" + "|".join(cell_values) + + if is_header: + return make_header_line(True, colwidths, colaligns) + "\n" + data_line + else: + return data_line + + else: + raise ValueError( + " _asciidoc_row() requires two (colwidths, colaligns) " + + "or three (cell_values, colwidths, colaligns) arguments) " + ) + + LATEX_ESCAPE_RULES = { r"&": r"\&", r"%": r"\%", @@ -264,7 +295,7 @@ def escape_char(c): def _rst_escape_first_column(rows, headers): def escape_empty(val): - if isinstance(val, (_text_type, _binary_type)) and not val.strip(): + if isinstance(val, (str, bytes)) and not val.strip(): return ".." else: return val @@ -312,6 +343,56 @@ def escape_empty(val): padding=1, with_header_hide=None, ), + "simple_grid": TableFormat( + lineabove=Line("┌", "─", "┬", "┐"), + linebelowheader=Line("├", "─", "┼", "┤"), + linebetweenrows=Line("├", "─", "┼", "┤"), + linebelow=Line("└", "─", "┴", "┘"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "rounded_grid": TableFormat( + lineabove=Line("╭", "─", "┬", "╮"), + linebelowheader=Line("├", "─", "┼", "┤"), + linebetweenrows=Line("├", "─", "┼", "┤"), + linebelow=Line("╰", "─", "┴", "╯"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "heavy_grid": TableFormat( + lineabove=Line("┏", "━", "┳", "┓"), + linebelowheader=Line("┣", "━", "╋", "┫"), + linebetweenrows=Line("┣", "━", "╋", "┫"), + linebelow=Line("┗", "━", "┻", "┛"), + headerrow=DataRow("┃", "┃", "┃"), + datarow=DataRow("┃", "┃", "┃"), + padding=1, + with_header_hide=None, + ), + "mixed_grid": TableFormat( + lineabove=Line("┍", "━", "┯", "┑"), + linebelowheader=Line("┝", "━", "┿", "┥"), + linebetweenrows=Line("├", "─", "┼", "┤"), + linebelow=Line("┕", "━", "┷", "┙"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "double_grid": TableFormat( + lineabove=Line("╔", "═", "╦", "╗"), + linebelowheader=Line("╠", "═", "╬", "╣"), + linebetweenrows=Line("╠", "═", "╬", "╣"), + linebelow=Line("╚", "═", "╩", "╝"), + headerrow=DataRow("║", "║", "║"), + datarow=DataRow("║", "║", "║"), + padding=1, + with_header_hide=None, + ), "fancy_grid": TableFormat( lineabove=Line("╒", "═", "╤", "╕"), linebelowheader=Line("╞", "═", "╪", "╡"), @@ -322,6 +403,66 @@ def escape_empty(val): padding=1, with_header_hide=None, ), + "outline": TableFormat( + lineabove=Line("+", "-", "+", "+"), + linebelowheader=Line("+", "=", "+", "+"), + linebetweenrows=None, + linebelow=Line("+", "-", "+", "+"), + headerrow=DataRow("|", "|", "|"), + datarow=DataRow("|", "|", "|"), + padding=1, + with_header_hide=None, + ), + "simple_outline": TableFormat( + lineabove=Line("┌", "─", "┬", "┐"), + linebelowheader=Line("├", "─", "┼", "┤"), + linebetweenrows=None, + linebelow=Line("└", "─", "┴", "┘"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "rounded_outline": TableFormat( + lineabove=Line("╭", "─", "┬", "╮"), + linebelowheader=Line("├", "─", "┼", "┤"), + linebetweenrows=None, + linebelow=Line("╰", "─", "┴", "╯"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "heavy_outline": TableFormat( + lineabove=Line("┏", "━", "┳", "┓"), + linebelowheader=Line("┣", "━", "╋", "┫"), + linebetweenrows=None, + linebelow=Line("┗", "━", "┻", "┛"), + headerrow=DataRow("┃", "┃", "┃"), + datarow=DataRow("┃", "┃", "┃"), + padding=1, + with_header_hide=None, + ), + "mixed_outline": TableFormat( + lineabove=Line("┍", "━", "┯", "┑"), + linebelowheader=Line("┝", "━", "┿", "┥"), + linebetweenrows=None, + linebelow=Line("┕", "━", "┷", "┙"), + headerrow=DataRow("│", "│", "│"), + datarow=DataRow("│", "│", "│"), + padding=1, + with_header_hide=None, + ), + "double_outline": TableFormat( + lineabove=Line("╔", "═", "╦", "╗"), + linebelowheader=Line("╠", "═", "╬", "╣"), + linebetweenrows=None, + linebelow=Line("╚", "═", "╩", "╝"), + headerrow=DataRow("║", "║", "║"), + datarow=DataRow("║", "║", "║"), + padding=1, + with_header_hide=None, + ), "fancy_outline": TableFormat( lineabove=Line("╒", "═", "╤", "╕"), linebelowheader=Line("╞", "═", "╪", "╡"), @@ -527,6 +668,16 @@ def escape_empty(val): padding=1, with_header_hide=None, ), + "asciidoc": TableFormat( + lineabove=partial(_asciidoc_row, False), + linebelowheader=None, + linebetweenrows=None, + linebelow=Line("|====", "", "", ""), + headerrow=partial(_asciidoc_row, True), + datarow=partial(_asciidoc_row, False), + padding=1, + with_header_hide=["lineabove"], + ), } @@ -539,6 +690,11 @@ def escape_empty(val): "plain": "plain", "simple": "simple", "grid": "grid", + "simple_grid": "simple_grid", + "rounded_grid": "rounded_grid", + "heavy_grid": "heavy_grid", + "mixed_grid": "mixed_grid", + "double_grid": "double_grid", "fancy_grid": "fancy_grid", "pipe": "pipe", "orgtbl": "orgtbl", @@ -561,16 +717,55 @@ def escape_empty(val): _multiline_codes = re.compile(r"\r|\n|\r\n") _multiline_codes_bytes = re.compile(b"\r|\n|\r\n") -_invisible_codes = re.compile( - r"\x1b\[\d+[;\d]*m|\x1b\[\d*\;\d*\;\d*m|\x1b\]8;;(.*?)\x1b\\" -) # ANSI color codes -_invisible_codes_bytes = re.compile( - b"\x1b\\[\\d+\\[;\\d]*m|\x1b\\[\\d*;\\d*;\\d*m|\\x1b\\]8;;(.*?)\\x1b\\\\" -) # ANSI color codes -_invisible_codes_link = re.compile( - r"\x1B]8;[a-zA-Z0-9:]*;[^\x1B]+\x1B\\([^\x1b]+)\x1B]8;;\x1B\\" -) # Terminal hyperlinks +# Handle ANSI escape sequences for both control sequence introducer (CSI) and +# operating system command (OSC). Both of these begin with 0x1b (or octal 033), +# which will be shown below as ESC. +# +# CSI ANSI escape codes have the following format, defined in section 5.4 of ECMA-48: +# +# CSI: ESC followed by the '[' character (0x5b) +# Parameter Bytes: 0..n bytes in the range 0x30-0x3f +# Intermediate Bytes: 0..n bytes in the range 0x20-0x2f +# Final Byte: a single byte in the range 0x40-0x7e +# +# Also include the terminal hyperlink sequences as described here: +# https://gist.github.com/egmontkob/eb114294efbcd5adb1944c9f3cb5feda +# +# OSC 8 ; params ; uri ST display_text OSC 8 ;; ST +# +# Example: \x1b]8;;https://example.com\x5ctext to show\x1b]8;;\x5c +# +# Where: +# OSC: ESC followed by the ']' character (0x5d) +# params: 0..n optional key value pairs separated by ':' (e.g. foo=bar:baz=qux:abc=123) +# URI: the actual URI with protocol scheme (e.g. https://, file://, ftp://) +# ST: ESC followed by the '\' character (0x5c) +_esc = r"\x1b" +_csi = rf"{_esc}\[" +_osc = rf"{_esc}\]" +_st = rf"{_esc}\\" + +_ansi_escape_pat = rf""" + ( + # terminal colors, etc + {_csi} # CSI + [\x30-\x3f]* # parameter bytes + [\x20-\x2f]* # intermediate bytes + [\x40-\x7e] # final byte + | + # terminal hyperlinks + {_osc}8; # OSC opening + (\w+=\w+:?)* # key=value params list (submatch 2) + ; # delimiter + ([^{_esc}]+) # URI - anything but ESC (submatch 3) + {_st} # ST + ([^{_esc}]+) # link text - anything but ESC (submatch 4) + {_osc}8;;{_st} # "closing" OSC sequence + ) +""" +_ansi_codes = re.compile(_ansi_escape_pat, re.VERBOSE) +_ansi_codes_bytes = re.compile(_ansi_escape_pat.encode("utf8"), re.VERBOSE) _ansi_color_reset_code = "\033[0m" _float_with_thousands_separators = re.compile( @@ -654,7 +849,7 @@ def _isnumber(string): """ if not _isconvertible(float, string): return False - elif isinstance(string, (_text_type, _binary_type)) and ( + elif isinstance(string, (str, bytes)) and ( math.isinf(float(string)) or math.isnan(float(string)) ): return string.lower() in ["inf", "-inf", "nan"] @@ -670,7 +865,7 @@ def _isint(string, inttype=int): """ return ( type(string) is inttype - or (isinstance(string, _binary_type) or isinstance(string, _text_type)) + or isinstance(string, (bytes, str)) and _isconvertible(inttype, string) ) @@ -684,8 +879,8 @@ def _isbool(string): >>> _isbool(1) False """ - return type(string) is _bool_type or ( - isinstance(string, (_binary_type, _text_type)) and string in ("True", "False") + return type(string) is bool or ( + isinstance(string, (bytes, str)) and string in ("True", "False") ) @@ -705,27 +900,23 @@ def _type(string, has_invisible=True, numparse=True): """ - if has_invisible and ( - isinstance(string, _text_type) or isinstance(string, _binary_type) - ): - string = _strip_invisible(string) + if has_invisible and isinstance(string, (str, bytes)): + string = _strip_ansi(string) if string is None: - return _none_type + return type(None) elif hasattr(string, "isoformat"): # datetime.datetime, date, and time - return _text_type + return str elif _isbool(string): - return _bool_type + return bool elif _isint(string) and numparse: return int - elif _isint(string, _long_type) and numparse: - return int elif _isnumber(string) and numparse: return float - elif isinstance(string, _binary_type): - return _binary_type + elif isinstance(string, bytes): + return bytes else: - return _text_type + return str def _afterpoint(string): @@ -794,18 +985,24 @@ def _padnone(ignore_width, s): return s -def _strip_invisible(s): - r"""Remove invisible ANSI color codes. +def _strip_ansi(s): + r"""Remove ANSI escape sequences, both CSI (color codes, etc) and OSC hyperlinks. + + CSI sequences are simply removed from the output, while OSC hyperlinks are replaced + with the link text. Note: it may be desirable to show the URI instead but this is not + supported. + + >>> repr(_strip_ansi('\x1B]8;;https://example.com\x1B\\This is a link\x1B]8;;\x1B\\')) + "'This is a link'" - >>> str(_strip_invisible('\x1B]8;;https://example.com\x1B\\This is a link\x1B]8;;\x1B\\')) - 'This is a link' + >>> repr(_strip_ansi('\x1b[31mred\x1b[0m text')) + "'red text'" """ - if isinstance(s, _text_type): - links_removed = re.sub(_invisible_codes_link, "\\1", s) - return re.sub(_invisible_codes, "", links_removed) + if isinstance(s, str): + return _ansi_codes.sub(r"\4", s) else: # a bytestring - return re.sub(_invisible_codes_bytes, "", s) + return _ansi_codes_bytes.sub(r"\4", s) def _visible_width(s): @@ -820,14 +1017,14 @@ def _visible_width(s): len_fn = wcwidth.wcswidth else: len_fn = len - if isinstance(s, _text_type) or isinstance(s, _binary_type): - return len_fn(_strip_invisible(s)) + if isinstance(s, (str, bytes)): + return len_fn(_strip_ansi(s)) else: - return len_fn(_text_type(s)) + return len_fn(str(s)) def _is_multiline(s): - if isinstance(s, _text_type): + if isinstance(s, str): return bool(re.search(_multiline_codes, s)) else: # a bytestring return bool(re.search(_multiline_codes_bytes, s)) @@ -864,7 +1061,7 @@ def _align_column_choose_padfn(strings, alignment, has_invisible): padfn = _padboth elif alignment == "decimal": if has_invisible: - decimals = [_afterpoint(_strip_invisible(s)) for s in strings] + decimals = [_afterpoint(_strip_ansi(s)) for s in strings] else: decimals = [_afterpoint(s) for s in strings] maxdecimals = max(decimals) @@ -960,20 +1157,20 @@ def _align_column( def _more_generic(type1, type2): types = { - _none_type: 0, - _bool_type: 1, + type(None): 0, # noqa + bool: 1, int: 2, float: 3, - _binary_type: 4, - _text_type: 5, + bytes: 4, + str: 5, } invtypes = { - 5: _text_type, - 4: _binary_type, + 5: str, + 4: bytes, 3: float, 2: int, - 1: _bool_type, - 0: _none_type, + 1: bool, + 0: type(None), } moregeneric = max(types.get(type1, 5), types.get(type2, 5)) return invtypes[moregeneric] @@ -982,30 +1179,30 @@ def _more_generic(type1, type2): def _column_type(strings, has_invisible=True, numparse=True): """The least generic type all column values are convertible to. - >>> _column_type([True, False]) is _bool_type + >>> _column_type([True, False]) is bool True - >>> _column_type(["1", "2"]) is _int_type + >>> _column_type(["1", "2"]) is int True - >>> _column_type(["1", "2.3"]) is _float_type + >>> _column_type(["1", "2.3"]) is float True - >>> _column_type(["1", "2.3", "four"]) is _text_type + >>> _column_type(["1", "2.3", "four"]) is str True - >>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type + >>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is str True - >>> _column_type([None, "brux"]) is _text_type + >>> _column_type([None, "brux"]) is str True - >>> _column_type([1, 2, None]) is _int_type + >>> _column_type([1, 2, None]) is int True >>> import datetime as dt - >>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type + >>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is str True """ types = [_type(s, has_invisible, numparse) for s in strings] - return reduce(_more_generic, types, _bool_type) + return reduce(_more_generic, types, bool) -def _format(val, valtype, floatfmt, missingval="", has_invisible=True): +def _format(val, valtype, floatfmt, intfmt, missingval="", has_invisible=True): """Format a value according to its type. Unicode is supported: @@ -1020,25 +1217,25 @@ def _format(val, valtype, floatfmt, missingval="", has_invisible=True): if val is None: return missingval - if valtype in [int, _text_type]: - return "{0}".format(val) - elif valtype is _binary_type: + if valtype is str: + return f"{val}" + elif valtype is int: + return format(val, intfmt) + elif valtype is bytes: try: - return _text_type(val, "ascii") - except TypeError: - return _text_type(val) + return str(val, "ascii") + except (TypeError, UnicodeDecodeError): + return str(val) elif valtype is float: - is_a_colored_number = has_invisible and isinstance( - val, (_text_type, _binary_type) - ) + is_a_colored_number = has_invisible and isinstance(val, (str, bytes)) if is_a_colored_number: - raw_val = _strip_invisible(val) + raw_val = _strip_ansi(val) formatted_val = format(float(raw_val), floatfmt) return val.replace(raw_val, formatted_val) else: return format(float(val), floatfmt) else: - return "{0}".format(val) + return f"{val}" def _align_header( @@ -1059,20 +1256,48 @@ def _align_header( elif alignment == "center": return _padboth(width, header) elif not alignment: - return "{0}".format(header) + return f"{header}" else: return _padleft(width, header) +def _remove_separating_lines(rows): + if type(rows) == list: + separating_lines = [] + sans_rows = [] + for index, row in enumerate(rows): + if _is_separating_line(row): + separating_lines.append(index) + else: + sans_rows.append(row) + return sans_rows, separating_lines + else: + return rows, None + + +def _reinsert_separating_lines(rows, separating_lines): + if separating_lines: + for index in separating_lines: + rows.insert(index, SEPARATING_LINE) + + def _prepend_row_index(rows, index): """Add a left-most index column.""" if index is None or index is False: return rows - if len(index) != len(rows): - print("index=", index) - print("rows=", rows) - raise ValueError("index must be as long as the number of data rows") - rows = [[v] + list(row) for v, row in zip(index, rows)] + if isinstance(index, Sized) and len(index) != len(rows): + raise ValueError( + "index must be as long as the number of data rows: " + + "len(index)={} len(rows)={}".format(len(index), len(rows)) + ) + sans_rows, separating_lines = _remove_separating_lines(rows) + new_rows = [] + index_iter = iter(index) + for row in sans_rows: + index_v = next(index_iter) + new_rows.append([index_v] + list(row)) + rows = new_rows + _reinsert_separating_lines(rows, separating_lines) return rows @@ -1097,6 +1322,8 @@ def _normalize_tabular_data(tabular_data, headers, showindex="default"): * list of OrderedDicts (usually used with headers="keys") + * list of dataclasses (Python 3.7+ only, usually used with headers="keys") + * 2D NumPy arrays * NumPy record arrays (usually used with headers="keys") @@ -1150,9 +1377,9 @@ def _normalize_tabular_data(tabular_data, headers, showindex="default"): raise ValueError("tabular data doesn't appear to be a dict or a DataFrame") if headers == "keys": - headers = list(map(_text_type, keys)) # headers should be strings + headers = list(map(str, keys)) # headers should be strings - else: # it's a usual an iterable of iterables, or a NumPy array + else: # it's a usual iterable of iterables, or a NumPy array, or an iterable of dataclasses rows = list(tabular_data) if headers == "keys" and not rows: @@ -1172,7 +1399,7 @@ def _normalize_tabular_data(tabular_data, headers, showindex="default"): and hasattr(rows[0], "_fields") ): # namedtuple - headers = list(map(_text_type, rows[0]._fields)) + headers = list(map(str, rows[0]._fields)) elif len(rows) > 0 and hasattr(rows[0], "keys") and hasattr(rows[0], "values"): # dict-like object uniq_keys = set() # implements hashed lookup @@ -1193,11 +1420,11 @@ def _normalize_tabular_data(tabular_data, headers, showindex="default"): elif isinstance(headers, dict): # a dict of headers for a list of dicts headers = [headers.get(k, k) for k in keys] - headers = list(map(_text_type, headers)) + headers = list(map(str, headers)) elif headers == "firstrow": if len(rows) > 0: headers = [firstdict.get(k, k) for k in keys] - headers = list(map(_text_type, headers)) + headers = list(map(str, headers)) else: headers = [] elif headers: @@ -1216,9 +1443,20 @@ def _normalize_tabular_data(tabular_data, headers, showindex="default"): # print tabulate(cursor, headers='keys') headers = [column[0] for column in tabular_data.description] + elif ( + dataclasses is not None + and len(rows) > 0 + and dataclasses.is_dataclass(rows[0]) + ): + # Python 3.7+'s dataclass + field_names = [field.name for field in dataclasses.fields(rows[0])] + if headers == "keys": + headers = field_names + rows = [[getattr(row, f) for f in field_names] for row in rows] + elif headers == "keys" and len(rows) > 0: # keys are column indices - headers = list(map(_text_type, range(len(rows[0])))) + headers = list(map(str, range(len(rows[0])))) # take headers from the first row if necessary if headers == "firstrow" and len(rows) > 0: @@ -1227,18 +1465,23 @@ def _normalize_tabular_data(tabular_data, headers, showindex="default"): index = index[1:] else: headers = rows[0] - headers = list(map(_text_type, headers)) # headers should be strings + headers = list(map(str, headers)) # headers should be strings rows = rows[1:] + elif headers == "firstrow": + headers = [] - headers = list(map(_text_type, headers)) - rows = list(map(list, rows)) + headers = list(map(str, headers)) + # rows = list(map(list, rows)) + rows = list(map(lambda r: r if _is_separating_line(r) else list(r), rows)) # add or remove an index column - showindex_is_a_str = type(showindex) in [_text_type, _binary_type] + showindex_is_a_str = type(showindex) in [str, bytes] if showindex == "default" and index is not None: rows = _prepend_row_index(rows, index) - elif isinstance(showindex, Iterable) and not showindex_is_a_str: + elif isinstance(showindex, Sized) and not showindex_is_a_str: rows = _prepend_row_index(rows, list(showindex)) + elif isinstance(showindex, Iterable) and not showindex_is_a_str: + rows = _prepend_row_index(rows, showindex) elif showindex == "always" or (_bool(showindex) and not showindex_is_a_str): if index is None: index = list(range(len(rows))) @@ -1270,7 +1513,13 @@ def _wrap_text_to_colwidths(list_of_lists, colwidths, numparses=True): if width is not None: wrapper = _CustomTextWrap(width=width) - wrapped = wrapper.wrap(cell) + # Cast based on our internal type handling + # Any future custom formatting of types (such as datetimes) + # may need to be more explicit than just `str` of the object + casted_cell = ( + str(cell) if _isnumber(cell) else _type(cell, numparse)(cell) + ) + wrapped = wrapper.wrap(casted_cell) new_row.append("\n".join(wrapped)) else: new_row.append(cell) @@ -1279,11 +1528,37 @@ def _wrap_text_to_colwidths(list_of_lists, colwidths, numparses=True): return result +def _to_str(s, encoding="utf8", errors="ignore"): + """ + A type safe wrapper for converting a bytestring to str. This is essentially just + a wrapper around .decode() intended for use with things like map(), but with some + specific behavior: + + 1. if the given parameter is not a bytestring, it is returned unmodified + 2. decode() is called for the given parameter and assumes utf8 encoding, but the + default error behavior is changed from 'strict' to 'ignore' + + >>> repr(_to_str(b'foo')) + "'foo'" + + >>> repr(_to_str('foo')) + "'foo'" + + >>> repr(_to_str(42)) + "'42'" + + """ + if isinstance(s, bytes): + return s.decode(encoding=encoding, errors=errors) + return str(s) + + def tabulate( tabular_data, headers=(), tablefmt="simple", floatfmt=_DEFAULT_FLOATFMT, + intfmt=_DEFAULT_INTFMT, numalign=_DEFAULT_ALIGN, stralign=_DEFAULT_ALIGN, missingval=_DEFAULT_MISSINGVAL, @@ -1291,6 +1566,8 @@ def tabulate( disable_numparse=False, colalign=None, maxcolwidths=None, + rowalign=None, + maxheadercolwidths=None, ): """Format a fixed width table for pretty printing. @@ -1304,8 +1581,8 @@ def tabulate( The first required argument (`tabular_data`) can be a list-of-lists (or another iterable of iterables), a list of named tuples, a dictionary of iterables, an iterable of dictionaries, - a two-dimensional NumPy array, NumPy record array, or a Pandas' - dataframe. + an iterable of dataclasses (Python 3.7+), a two-dimensional NumPy array, + NumPy record array, or a Pandas' dataframe. Table headers @@ -1357,6 +1634,10 @@ def tabulate( Table formats ------------- + `intfmt` is a format specification used for columns which + contain numeric data without a decimal point. This can also be + a list or tuple of format strings, one per column. + `floatfmt` is a format specification used for columns which contain numeric data with a decimal point. This can also be a list or tuple of format strings, one per column. @@ -1427,7 +1708,73 @@ def tabulate( | eggs | 451 | +------+----------+ - "fancy_grid" draws a grid using box-drawing characters: + "simple_grid" draws a grid using single-line box-drawing + characters: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "simple_grid")) + ┌───────────┬───────────┐ + │ strings │ numbers │ + ├───────────┼───────────┤ + │ spam │ 41.9999 │ + ├───────────┼───────────┤ + │ eggs │ 451 │ + └───────────┴───────────┘ + + "rounded_grid" draws a grid using single-line box-drawing + characters with rounded corners: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "rounded_grid")) + ╭───────────┬───────────╮ + │ strings │ numbers │ + ├───────────┼───────────┤ + │ spam │ 41.9999 │ + ├───────────┼───────────┤ + │ eggs │ 451 │ + ╰───────────┴───────────╯ + + "heavy_grid" draws a grid using bold (thick) single-line box-drawing + characters: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "heavy_grid")) + ┏━━━━━━━━━━━┳━━━━━━━━━━━┓ + ┃ strings ┃ numbers ┃ + ┣━━━━━━━━━━━╋━━━━━━━━━━━┫ + ┃ spam ┃ 41.9999 ┃ + ┣━━━━━━━━━━━╋━━━━━━━━━━━┫ + ┃ eggs ┃ 451 ┃ + ┗━━━━━━━━━━━┻━━━━━━━━━━━┛ + + "mixed_grid" draws a grid using a mix of light (thin) and heavy (thick) lines + box-drawing characters: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "mixed_grid")) + ┍━━━━━━━━━━━┯━━━━━━━━━━━┑ + │ strings │ numbers │ + ┝━━━━━━━━━━━┿━━━━━━━━━━━┥ + │ spam │ 41.9999 │ + ├───────────┼───────────┤ + │ eggs │ 451 │ + ┕━━━━━━━━━━━┷━━━━━━━━━━━┙ + + "double_grid" draws a grid using double-line box-drawing + characters: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "double_grid")) + ╔═══════════╦═══════════╗ + ║ strings ║ numbers ║ + ╠═══════════╬═══════════╣ + ║ spam ║ 41.9999 ║ + ╠═══════════╬═══════════╣ + ║ eggs ║ 451 ║ + ╚═══════════╩═══════════╝ + + "fancy_grid" draws a grid using a mix of single and + double-line box-drawing characters: >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], ... ["strings", "numbers"], "fancy_grid")) @@ -1439,6 +1786,89 @@ def tabulate( │ eggs │ 451 │ ╘═══════════╧═══════════╛ + "outline" is the same as the "grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "outline")) + +-----------+-----------+ + | strings | numbers | + +===========+===========+ + | spam | 41.9999 | + | eggs | 451 | + +-----------+-----------+ + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="outline")) + +------+----------+ + | spam | 41.9999 | + | eggs | 451 | + +------+----------+ + + "simple_outline" is the same as the "simple_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "simple_outline")) + ┌───────────┬───────────┐ + │ strings │ numbers │ + ├───────────┼───────────┤ + │ spam │ 41.9999 │ + │ eggs │ 451 │ + └───────────┴───────────┘ + + "rounded_outline" is the same as the "rounded_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "rounded_outline")) + ╭───────────┬───────────╮ + │ strings │ numbers │ + ├───────────┼───────────┤ + │ spam │ 41.9999 │ + │ eggs │ 451 │ + ╰───────────┴───────────╯ + + "heavy_outline" is the same as the "heavy_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "heavy_outline")) + ┏━━━━━━━━━━━┳━━━━━━━━━━━┓ + ┃ strings ┃ numbers ┃ + ┣━━━━━━━━━━━╋━━━━━━━━━━━┫ + ┃ spam ┃ 41.9999 ┃ + ┃ eggs ┃ 451 ┃ + ┗━━━━━━━━━━━┻━━━━━━━━━━━┛ + + "mixed_outline" is the same as the "mixed_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "mixed_outline")) + ┍━━━━━━━━━━━┯━━━━━━━━━━━┑ + │ strings │ numbers │ + ┝━━━━━━━━━━━┿━━━━━━━━━━━┥ + │ spam │ 41.9999 │ + │ eggs │ 451 │ + ┕━━━━━━━━━━━┷━━━━━━━━━━━┙ + + "double_outline" is the same as the "double_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "double_outline")) + ╔═══════════╦═══════════╗ + ║ strings ║ numbers ║ + ╠═══════════╬═══════════╣ + ║ spam ║ 41.9999 ║ + ║ eggs ║ 451 ║ + ╚═══════════╩═══════════╝ + + "fancy_outline" is the same as the "fancy_grid" format but doesn't draw lines between rows: + + >>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], + ... ["strings", "numbers"], "fancy_outline")) + ╒═══════════╤═══════════╕ + │ strings │ numbers │ + ╞═══════════╪═══════════╡ + │ spam │ 41.9999 │ + │ eggs │ 451 │ + ╘═══════════╧═══════════╛ + "pipe" is like tables in PHP Markdown Extra extension or Pandoc pipe_tables: @@ -1612,14 +2042,17 @@ def tabulate( | | | better if it is wrapped a bit | +------------+------------+-------------------------------+ + Header column width can be specified in a similar way using `maxheadercolwidth` """ if tabular_data is None: tabular_data = [] + list_of_lists, headers = _normalize_tabular_data( tabular_data, headers, showindex=showindex ) + list_of_lists, separating_lines = _remove_separating_lines(list_of_lists) if maxcolwidths is not None: num_cols = len(list_of_lists[0]) @@ -1633,6 +2066,20 @@ def tabulate( list_of_lists, maxcolwidths, numparses=numparses ) + if maxheadercolwidths is not None: + num_cols = len(list_of_lists[0]) + if isinstance(maxheadercolwidths, int): # Expand scalar for all columns + maxheadercolwidths = _expand_iterable( + maxheadercolwidths, num_cols, maxheadercolwidths + ) + else: # Ignore col width for any 'trailing' columns + maxheadercolwidths = _expand_iterable(maxheadercolwidths, num_cols, None) + + numparses = _expand_numparse(disable_numparse, num_cols) + headers = _wrap_text_to_colwidths( + [headers], maxheadercolwidths, numparses=numparses + )[0] + # empty values in the first column of RST tables should be escaped (issue #82) # "" should be escaped as "\\ " or ".." if tablefmt == "rst": @@ -1654,14 +2101,21 @@ def tabulate( # optimization: look for ANSI control codes once, # enable smart width functions only if a control code is found + # + # convert the headers and rows into a single, tab-delimited string ensuring + # that any bytestrings are decoded safely (i.e. errors ignored) plain_text = "\t".join( - ["\t".join(map(_text_type, headers))] - + ["\t".join(map(_text_type, row)) for row in list_of_lists] + chain( + # headers + map(_to_str, headers), + # rows: chain the rows together into a single iterable after mapping + # the bytestring conversino to each cell value + chain.from_iterable(map(_to_str, row) for row in list_of_lists), + ) ) - has_invisible = re.search(_invisible_codes, plain_text) - if not has_invisible: - has_invisible = re.search(_invisible_codes_link, plain_text) + has_invisible = _ansi_codes.search(plain_text) is not None + enable_widechars = wcwidth is not None and WIDE_CHARS_MODE if ( not isinstance(tablefmt, TableFormat) @@ -1678,7 +2132,7 @@ def tabulate( cols = list(izip_longest(*list_of_lists)) numparses = _expand_numparse(disable_numparse, len(cols)) coltypes = [_column_type(col, numparse=np) for col, np in zip(cols, numparses)] - if isinstance(floatfmt, basestring): # old version + if isinstance(floatfmt, str): # old version float_formats = len(cols) * [ floatfmt ] # just duplicate the string to use in each column @@ -1686,15 +2140,25 @@ def tabulate( float_formats = list(floatfmt) if len(float_formats) < len(cols): float_formats.extend((len(cols) - len(float_formats)) * [_DEFAULT_FLOATFMT]) - if isinstance(missingval, basestring): + if isinstance(intfmt, str): # old version + int_formats = len(cols) * [ + intfmt + ] # just duplicate the string to use in each column + else: # if intfmt is list, tuple etc we have one per column + int_formats = list(intfmt) + if len(int_formats) < len(cols): + int_formats.extend((len(cols) - len(int_formats)) * [_DEFAULT_INTFMT]) + if isinstance(missingval, str): missing_vals = len(cols) * [missingval] else: missing_vals = list(missingval) if len(missing_vals) < len(cols): missing_vals.extend((len(cols) - len(missing_vals)) * [_DEFAULT_MISSINGVAL]) cols = [ - [_format(v, ct, fl_fmt, miss_v, has_invisible) for v in c] - for c, ct, fl_fmt, miss_v in zip(cols, coltypes, float_formats, missing_vals) + [_format(v, ct, fl_fmt, int_fmt, miss_v, has_invisible) for v in c] + for c, ct, fl_fmt, int_fmt, miss_v in zip( + cols, coltypes, float_formats, int_formats, missing_vals + ) ] # align columns @@ -1731,7 +2195,13 @@ def tabulate( if not isinstance(tablefmt, TableFormat): tablefmt = _table_formats.get(tablefmt, _table_formats["simple"]) - return _format_table(tablefmt, headers, rows, minwidths, aligns, is_multiline) + ra_default = rowalign if isinstance(rowalign, str) else None + rowaligns = _expand_iterable(rowalign, len(rows), ra_default) + _reinsert_separating_lines(rows, separating_lines) + + return _format_table( + tablefmt, headers, rows, minwidths, aligns, is_multiline, rowaligns=rowaligns + ) def _expand_numparse(disable_numparse, column_count): @@ -1759,7 +2229,7 @@ def _expand_iterable(original, num_desired, default): If `original` is not a list to begin with (i.e. scalar value) a list of length `num_desired` completely populated with `default will be returned """ - if isinstance(original, Iterable): + if isinstance(original, Iterable) and not isinstance(original, str): return original + [default] * (num_desired - len(original)) else: return [default] * num_desired @@ -1790,20 +2260,39 @@ def _build_row(padded_cells, colwidths, colaligns, rowfmt): return _build_simple_row(padded_cells, rowfmt) -def _append_basic_row(lines, padded_cells, colwidths, colaligns, rowfmt): +def _append_basic_row(lines, padded_cells, colwidths, colaligns, rowfmt, rowalign=None): + # NOTE: rowalign is ignored and exists for api compatibility with _append_multiline_row lines.append(_build_row(padded_cells, colwidths, colaligns, rowfmt)) return lines +def _align_cell_veritically(text_lines, num_lines, column_width, row_alignment): + delta_lines = num_lines - len(text_lines) + blank = [" " * column_width] + if row_alignment == "bottom": + return blank * delta_lines + text_lines + elif row_alignment == "center": + top_delta = delta_lines // 2 + bottom_delta = delta_lines - top_delta + return top_delta * blank + text_lines + bottom_delta * blank + else: + return text_lines + blank * delta_lines + + def _append_multiline_row( - lines, padded_multiline_cells, padded_widths, colaligns, rowfmt, pad + lines, padded_multiline_cells, padded_widths, colaligns, rowfmt, pad, rowalign=None ): colwidths = [w - 2 * pad for w in padded_widths] cells_lines = [c.splitlines() for c in padded_multiline_cells] nlines = max(map(len, cells_lines)) # number of lines in the row # vertically pad cells where some lines are missing + # cells_lines = [ + # (cl + [" " * w] * (nlines - len(cl))) for cl, w in zip(cells_lines, colwidths) + # ] + cells_lines = [ - (cl + [" " * w] * (nlines - len(cl))) for cl, w in zip(cells_lines, colwidths) + _align_cell_veritically(cl, nlines, w, rowalign) + for cl, w in zip(cells_lines, colwidths) ] lines_cells = [[cl[i] for cl in cells_lines] for i in range(nlines)] for ln in lines_cells: @@ -1842,7 +2331,7 @@ def str(self): return self -def _format_table(fmt, headers, rows, colwidths, colaligns, is_multiline): +def _format_table(fmt, headers, rows, colwidths, colaligns, is_multiline, rowaligns): """Produce a plain-text representation of the table.""" lines = [] hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else [] @@ -1870,14 +2359,35 @@ def _format_table(fmt, headers, rows, colwidths, colaligns, is_multiline): if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden: # initial rows with a line below - for row in padded_rows[:-1]: - append_row(lines, row, padded_widths, colaligns, fmt.datarow) + for row, ralign in zip(padded_rows[:-1], rowaligns): + append_row( + lines, row, padded_widths, colaligns, fmt.datarow, rowalign=ralign + ) _append_line(lines, padded_widths, colaligns, fmt.linebetweenrows) # the last row without a line below - append_row(lines, padded_rows[-1], padded_widths, colaligns, fmt.datarow) + append_row( + lines, + padded_rows[-1], + padded_widths, + colaligns, + fmt.datarow, + rowalign=rowaligns[-1], + ) else: + separating_line = ( + fmt.linebetweenrows + or fmt.linebelowheader + or fmt.linebelow + or fmt.lineabove + or Line("", "", "", "") + ) for row in padded_rows: - append_row(lines, row, padded_widths, colaligns, fmt.datarow) + # test to see if either the 1st column or the 2nd column (account for showindex) has + # the SEPARATING_LINE flag + if _is_separating_line(row): + _append_line(lines, padded_widths, colaligns, separating_line) + else: + append_row(lines, row, padded_widths, colaligns, fmt.datarow) if fmt.linebelow and "linebelow" not in hidden: _append_line(lines, padded_widths, colaligns, fmt.linebelow) @@ -1909,7 +2419,7 @@ def __init__(self, *args, **kwargs): def _len(item): """Custom len that gets console column width for wide and non-wide characters as well as ignores color codes""" - stripped = _strip_invisible(item) + stripped = _strip_ansi(item) if wcwidth: return wcwidth.wcswidth(stripped) else: @@ -1921,7 +2431,7 @@ def _update_lines(self, lines, new_line): as add any colors from previous lines order to preserve the same formatting as a single unwrapped string. """ - code_matches = [x for x in re.finditer(_invisible_codes, new_line)] + code_matches = [x for x in _ansi_codes.finditer(new_line)] color_codes = [ code.string[code.span()[0] : code.span()[1]] for code in code_matches ] @@ -2109,6 +2619,7 @@ def _main(): -o FILE, --output FILE print table to FILE (default: stdout) -s REGEXP, --sep REGEXP use a custom column separator (default: whitespace) -F FPFMT, --float FPFMT floating point number format (default: g) + -I INTFMT, --int INTFMT integer point number format (default: "") -f FMT, --format FMT set output table format; supported formats: plain, simple, grid, fancy_grid, pipe, orgtbl, rst, mediawiki, html, latex, latex_raw, @@ -2124,7 +2635,7 @@ def _main(): opts, args = getopt.getopt( sys.argv[1:], "h1o:s:F:A:f:", - ["help", "header", "output", "sep=", "float=", "align=", "format="], + ["help", "header", "output", "sep=", "float=", "int=", "align=", "format="], ) except getopt.GetoptError as e: print(e) @@ -2132,6 +2643,7 @@ def _main(): sys.exit(2) headers = [] floatfmt = _DEFAULT_FLOATFMT + intfmt = _DEFAULT_INTFMT colalign = None tablefmt = "simple" sep = r"\s+" @@ -2143,6 +2655,8 @@ def _main(): outfile = value elif opt in ["-F", "--float"]: floatfmt = value + elif opt in ["-I", "--int"]: + intfmt = value elif opt in ["-C", "--colalign"]: colalign = value.split() elif opt in ["-f", "--format"]: @@ -2168,6 +2682,7 @@ def _main(): tablefmt=tablefmt, sep=sep, floatfmt=floatfmt, + intfmt=intfmt, file=out, colalign=colalign, ) @@ -2179,16 +2694,24 @@ def _main(): tablefmt=tablefmt, sep=sep, floatfmt=floatfmt, + intfmt=intfmt, file=out, colalign=colalign, ) -def _pprint_file(fobject, headers, tablefmt, sep, floatfmt, file, colalign): +def _pprint_file(fobject, headers, tablefmt, sep, floatfmt, intfmt, file, colalign): rows = fobject.readlines() table = [re.split(sep, r.rstrip()) for r in rows if r.strip()] print( - tabulate(table, headers, tablefmt, floatfmt=floatfmt, colalign=colalign), + tabulate( + table, + headers, + tablefmt, + floatfmt=floatfmt, + intfmt=intfmt, + colalign=colalign, + ), file=file, ) diff --git a/python/ray/air/config.py b/python/ray/air/config.py index bc77dce1e90d..c869b90c76d7 100644 --- a/python/ray/air/config.py +++ b/python/ray/air/config.py @@ -18,6 +18,7 @@ ) from ray._private.storage import _get_storage_uri +from ray._private.thirdparty.tabulate.tabulate import tabulate from ray.air.constants import WILDCARD_KEY from ray.util.annotations import PublicAPI from ray.widgets import Template, make_table_html_repr @@ -550,14 +551,6 @@ def __repr__(self): return _repr_dataclass(self) def _repr_html_(self): - try: - from tabulate import tabulate - except ImportError: - return ( - "Tabulate isn't installed. Run " - "`pip install tabulate` for rich notebook output." - ) - return Template("scrollableTable.html.j2").render( table=tabulate( { @@ -638,14 +631,6 @@ def __repr__(self): return _repr_dataclass(self) def _repr_html_(self) -> str: - try: - from tabulate import tabulate - except ImportError: - return ( - "Tabulate isn't installed. Run " - "`pip install tabulate` for rich notebook output." - ) - if self.num_to_keep is None: num_to_keep_repr = "All" else: @@ -840,14 +825,6 @@ def __repr__(self): ) def _repr_html_(self) -> str: - try: - from tabulate import tabulate - except ImportError: - return ( - "Tabulate isn't installed. Run " - "`pip install tabulate` for rich notebook output." - ) - reprs = [] if self.failure_config is not None: reprs.append( diff --git a/python/ray/data/datastream.py b/python/ray/data/datastream.py index 20517e1db0c4..e1539777b2d1 100644 --- a/python/ray/data/datastream.py +++ b/python/ray/data/datastream.py @@ -4495,7 +4495,7 @@ def _ipython_display_(self): ["ipywidgets", "8"], ) def _tab_repr_(self): - from tabulate import tabulate + from ray._private.thirdparty.tabulate.tabulate import tabulate from ipywidgets import Tab, HTML metadata = { diff --git a/python/ray/tests/ludwig/ludwig_test_utils.py b/python/ray/tests/ludwig/ludwig_test_utils.py index 95a7341e0f11..069d431655ad 100644 --- a/python/ray/tests/ludwig/ludwig_test_utils.py +++ b/python/ray/tests/ludwig/ludwig_test_utils.py @@ -521,7 +521,7 @@ def create_data_set_to_use(data_format, raw_data): # support for writing to a fwf dataset based on this stackoverflow posting: # https://stackoverflow.com/questions/16490261/python-pandas-write-dataframe-to-fixed-width-file-to-fwf - from tabulate import tabulate + from ray._private.thirdparty.tabulate.tabulate import tabulate def to_fwf(df, fname): content = tabulate(df.values.tolist(), list(df.columns), tablefmt="plain") diff --git a/python/ray/train/data_parallel_trainer.py b/python/ray/train/data_parallel_trainer.py index 9d2beb3b19eb..b08b8f5600ee 100644 --- a/python/ray/train/data_parallel_trainer.py +++ b/python/ray/train/data_parallel_trainer.py @@ -2,7 +2,7 @@ import logging from pathlib import Path from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple, Type, Union -from tabulate import tabulate +from ray._private.thirdparty.tabulate.tabulate import tabulate import ray from ray import tune diff --git a/python/ray/tune/cli/commands.py b/python/ray/tune/cli/commands.py index 5ce5fefe8d4b..f1a821c2571e 100644 --- a/python/ray/tune/cli/commands.py +++ b/python/ray/tune/cli/commands.py @@ -17,11 +17,7 @@ ) from ray.tune.analysis import ExperimentAnalysis from ray.tune import TuneError - -try: - from tabulate import tabulate -except ImportError: - tabulate = None +from ray._private.thirdparty.tabulate.tabulate import tabulate logger = logging.getLogger(__name__) diff --git a/python/ray/tune/examples/tune-default.yaml b/python/ray/tune/examples/tune-default.yaml index 31df4bf29f5e..79d5f003d02d 100644 --- a/python/ray/tune/examples/tune-default.yaml +++ b/python/ray/tune/examples/tune-default.yaml @@ -11,4 +11,4 @@ available_node_types: node_config: {InstanceType: c5.xlarge, ImageId: ami-0b294f219d14e6a82} head_node_type: head_node setup_commands: # Set up each node. - - pip install ray torch torchvision tabulate tensorboard + - pip install ray torch torchvision tensorboard diff --git a/python/ray/tune/examples/tune-local-default.yaml b/python/ray/tune/examples/tune-local-default.yaml index 8cf5a3fe1a4b..58331670ea6a 100644 --- a/python/ray/tune/examples/tune-local-default.yaml +++ b/python/ray/tune/examples/tune-local-default.yaml @@ -8,4 +8,4 @@ auth: {ssh_user: YOUR_USERNAME, ssh_private_key: ~/.ssh/id_rsa} min_workers: 3 max_workers: 3 setup_commands: # Set up each node. - - pip install ray torch torchvision tabulate tensorboard + - pip install ray torch torchvision tensorboard diff --git a/python/ray/tune/experimental/output.py b/python/ray/tune/experimental/output.py index 37d2ba5418c8..6970d2e505e0 100644 --- a/python/ray/tune/experimental/output.py +++ b/python/ray/tune/experimental/output.py @@ -11,7 +11,7 @@ import numpy as np import os import pandas as pd -from tabulate import tabulate +from ray._private.thirdparty.tabulate.tabulate import tabulate import textwrap import time diff --git a/python/ray/tune/progress_reporter.py b/python/ray/tune/progress_reporter.py index 9bc46281fe9f..59fd63187cd9 100644 --- a/python/ray/tune/progress_reporter.py +++ b/python/ray/tune/progress_reporter.py @@ -16,6 +16,7 @@ import ray from ray._private.dict import flatten_dict +from ray._private.thirdparty.tabulate.tabulate import tabulate from ray.experimental.tqdm_ray import safe_print from ray.air.util.node import _force_on_current_node from ray.tune.callback import Callback @@ -49,14 +50,6 @@ except ImportError: from collections import Mapping, MutableMapping -try: - from tabulate import tabulate -except ImportError: - raise ImportError( - "ray.tune in ray > 0.7.5 requires 'tabulate'. " - "Please re-run 'pip install ray[tune]' or " - "'pip install ray[rllib]'." - ) IS_NOTEBOOK = ray.widgets.util.in_notebook() diff --git a/python/ray/tune/requirements-dev.txt b/python/ray/tune/requirements-dev.txt index 1122f99be905..e4432a5471c6 100644 --- a/python/ray/tune/requirements-dev.txt +++ b/python/ray/tune/requirements-dev.txt @@ -4,7 +4,6 @@ gym>=0.21.0,<0.24.0 scikit-image pandas requests -tabulate tensorflow black==22.10.0 yq diff --git a/python/ray/tune/syncer.py b/python/ray/tune/syncer.py index 402259258863..85a2923025ea 100644 --- a/python/ray/tune/syncer.py +++ b/python/ray/tune/syncer.py @@ -19,6 +19,7 @@ from dataclasses import dataclass import ray +from ray._private.thirdparty.tabulate.tabulate import tabulate from ray.air._internal.checkpoint_manager import CheckpointStorage, _TrackedCheckpoint from ray.air._internal.remote_storage import ( fs_hint, @@ -145,14 +146,6 @@ def _repr_html_(self) -> str: Note that self.syncer is omitted here; seems to have some overlap with existing configuration settings here in the SyncConfig class. """ - try: - from tabulate import tabulate - except ImportError: - return ( - "Tabulate isn't installed. Run " - "`pip install tabulate` for rich notebook output." - ) - return Template("scrollableTable.html.j2").render( table=tabulate( { diff --git a/python/ray/widgets/util.py b/python/ray/widgets/util.py index 056d5ca0d385..e43467c6ace9 100644 --- a/python/ray/widgets/util.py +++ b/python/ray/widgets/util.py @@ -5,6 +5,7 @@ from functools import wraps from typing import Any, Callable, Iterable, Optional, TypeVar, Union +from ray._private.thirdparty.tabulate.tabulate import tabulate from ray.util.annotations import DeveloperAPI from ray.widgets import Template @@ -34,14 +35,6 @@ def make_table_html_repr( Returns: HTML representation of the object """ - try: - from tabulate import tabulate - except ImportError: - return ( - "Tabulate isn't installed. Run " - "`pip install tabulate` for rich notebook output." - ) - data = {} for k, v in vars(obj).items(): if isinstance(v, (str, bool, int, float)): diff --git a/python/requirements.txt b/python/requirements.txt index e8227a403996..4eb0bc927bc5 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -38,7 +38,6 @@ uvicorn py-spy>=0.2.0 rich urllib3 -tabulate scikit-image scipy aiohttp>=3.7 diff --git a/python/setup.py b/python/setup.py index 0aa569b5ef63..969d64963f98 100644 --- a/python/setup.py +++ b/python/setup.py @@ -254,7 +254,7 @@ def get_packages(self): "smart_open", ], "serve": ["uvicorn", "requests", "starlette", "fastapi", "aiorwlock"], - "tune": ["pandas", "tabulate", "tensorboardX>=1.9", "requests", pyarrow_dep], + "tune": ["pandas", "tensorboardX>=1.9", "requests", pyarrow_dep], "k8s": ["kubernetes", "urllib3"], "observability": [ "opentelemetry-api", From a396fa3c951efe7b6cc516ecc700aa4dfb31c3de Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Thu, 27 Apr 2023 08:55:22 +0100 Subject: [PATCH 119/424] [air] remote_storage: Prefer fsspec filesystems over native pyarrow (#34663) Following up from #34102, our benchmarks showed how inferior unthreaded syncing is over threaded syncing. However, due to pyarrow, we currently can't use threading. Since the bug affects all pyarrow versions <=11, which will likely be used by some users in the future, we have to look into workarounds. One such workaround is to use the fsspec-provided filesystem and prefer it over the native pyarrow fs. Signed-off-by: Kai Fricke --- python/ray/air/_internal/remote_storage.py | 214 ++++++++++++------ .../ray/tune/execution/ray_trial_executor.py | 4 + python/ray/tune/syncer.py | 37 +++ python/ray/tune/tests/test_syncer.py | 31 ++- python/requirements/ml/requirements_tune.txt | 1 + 5 files changed, 214 insertions(+), 73 deletions(-) diff --git a/python/ray/air/_internal/remote_storage.py b/python/ray/air/_internal/remote_storage.py index 684b8015fbc5..c899b1d20727 100644 --- a/python/ray/air/_internal/remote_storage.py +++ b/python/ray/air/_internal/remote_storage.py @@ -6,7 +6,7 @@ from pathlib import Path from pkg_resources import packaging import shutil -from typing import List, Optional, Tuple +from typing import Any, Dict, List, Optional, Tuple from ray.air._internal.filelock import TempFileLock @@ -22,22 +22,8 @@ import pyarrow import pyarrow.fs - # TODO(krfricke): Remove this once gcsfs > 2022.3.0 is released - # (and make sure to pin) - class _CustomGCSHandler(pyarrow.fs.FSSpecHandler): - """Custom FSSpecHandler that avoids a bug in gcsfs <= 2022.3.0.""" - - def create_dir(self, path, recursive): - try: - # GCSFS doesn't expose `create_parents` argument, - # so it is omitted here - self.fs.mkdir(path) - except FileExistsError: - pass - except (ImportError, ModuleNotFoundError): pyarrow = None - _CustomGCSHandler = None from ray import logger @@ -173,6 +159,127 @@ def _is_local_windows_path(path: str) -> bool: return False +def _translate_s3_options(options: Dict[str, List[str]]) -> Dict[str, Any]: + """Translate pyarrow s3 query options into s3fs ``storage_kwargs``. + + ``storage_kwargs`` are passed to ``s3fs.S3Filesystem``. They accept + ``client_kwargs``, which are passed to ``botocore.session.Session.Client``. + + In this function, we translate query string parameters from an s3 URI + (e.g. ``s3://bucket/folder?endpoint_override=somewhere``) into the respective + query parameters for the botocore clent. + + S3Filesystem API ref: https://s3fs.readthedocs.io/en/latest/api.html + + Botocore Client API ref: https://boto3.amazonaws.com/v1/documentation/api/latest/ + reference/core/session.html#boto3.session.Session.client + + """ + # Map from s3 query keys --> botocore client arguments + option_map = { + "endpoint_override": "endpoint_url", + "region": "region_name", + "access_key": "aws_access_key_id", + "secret_key": "aws_secret_access_key", + } + + client_kwargs = {} + for opt, target in option_map.items(): + if opt in options: + client_kwargs[target] = options[opt][0] + + # s3fs directory cache does not work correctly, so we pass + # `use_listings_cache` to disable it. See https://github.com/fsspec/s3fs/issues/657 + # We should keep this for s3fs versions <= 2023.4.0. + return {"client_kwargs": client_kwargs, "use_listings_cache": False} + + +def _translate_gcs_options(options: Dict[str, List[str]]) -> Dict[str, Any]: + """Translate pyarrow s3 query options into s3fs ``storage_kwargs``. + + ``storage_kwargs`` are passed to ``gcsfs.GCSFileSystem``. + + In this function, we translate query string parameters from an s3 URI + (e.g. ``s3://bucket/folder?endpoint_override=somewhere``) into the respective + arguments for the gcs filesystem. + + GCSFileSystem API ref: https://gcsfs.readthedocs.io/en/latest/api.html + + """ + # Map from gcs query keys --> gcsfs kwarg names + option_map = { + "endpoint_override": "endpoint_url", + } + + storage_kwargs = {} + for opt, target in option_map.items(): + if opt in options: + storage_kwargs[target] = options[opt][0] + + return storage_kwargs + + +def _has_compatible_gcsfs_version() -> bool: + """GCSFS does not work for versions > 2022.7.1 and < 2022.10.0. + + See https://github.com/fsspec/gcsfs/issues/498. + + In that case, and if we can't fallback to native PyArrow's GCS handler, + we raise an error. + """ + try: + import gcsfs + + # For minimal install that only needs python3-setuptools + if packaging.version.parse(gcsfs.__version__) > packaging.version.parse( + "2022.7.1" + ) and packaging.version.parse(gcsfs.__version__) < packaging.version.parse( + "2022.10.0" + ): + # PyArrow's GcsFileSystem was introduced in 9.0.0. + if packaging.version.parse(pyarrow.__version__) < packaging.version.parse( + "9.0.0" + ): + raise RuntimeError( + "`gcsfs` versions between '2022.7.1' and '2022.10.0' are not " + f"compatible with pyarrow. You have gcsfs version " + f"{gcsfs.__version__}. Please downgrade or upgrade your gcsfs " + f"version or upgrade PyArrow. See more details in " + f"https://github.com/fsspec/gcsfs/issues/498." + ) + # Returning False here means we fall back to pyarrow. + return False + except ImportError: + return False + return True + + +def _get_fsspec_fs_and_path(uri: str) -> Optional["pyarrow.fs.FileSystem"]: + parsed = urllib.parse.urlparse(uri) + + storage_kwargs = {} + if parsed.scheme in ["s3", "s3a"] and parsed.query: + storage_kwargs = _translate_s3_options(urllib.parse.parse_qs(parsed.query)) + elif parsed.scheme in ["gs", "gcs"] and parsed.query: + if not _has_compatible_gcsfs_version(): + # If gcsfs is incompatible, fallback to pyarrow.fs. + return None + storage_kwargs = _translate_gcs_options(urllib.parse.parse_qs(parsed.query)) + + try: + fsspec_fs = fsspec.filesystem(parsed.scheme, **storage_kwargs) + except Exception: + # ValueError when protocol is not known. + # ImportError when protocol is known but package not installed. + # Other errors can be raised if args/kwargs are incompatible. + # Thus we should except broadly here. + return None + + fsspec_handler = pyarrow.fs.FSSpecHandler + fs = pyarrow.fs.PyFileSystem(fsspec_handler(fsspec_fs)) + return fs + + def get_fs_and_path( uri: str, ) -> Tuple[Optional["pyarrow.fs.FileSystem"], Optional[str]]: @@ -205,68 +312,33 @@ def get_fs_and_path( fs = _cached_fs[cache_key] return fs, path - # In case of hdfs filesystem, if uri does not have the netloc part below will - # fail with hdfs access error. For example 'hdfs:///user_folder/...' will - # fail, while only 'hdfs://namenode_server/user_foler/...' will work - # we consider the two cases of uri: short_hdfs_uri or other_uri, - # other_uri includes long hdfs uri and other filesystem uri, like s3 or gcp - # filesystem. Two cases of imported module of fsspec: yes or no. So we need - # to handle 4 cases: - # (uri, fsspec) - # (short_hdfs_uri, yes) --> use fsspec - # (short_hdfs_uri, no) --> return None and avoid init pyarrow - # (other_uri, yes) --> try pyarrow, if throw use fsspec - # (other_uri, no) --> try pyarrow, if throw return None - short_hdfs_uri = parsed.scheme == "hdfs" and parsed.netloc == "" - try: - if short_hdfs_uri and not fsspec: - return None, None - if not short_hdfs_uri: - fs, path = pyarrow.fs.FileSystem.from_uri(uri) + # Prefer fsspec over native pyarrow. + if fsspec: + fs = _get_fsspec_fs_and_path(uri) + if fs: _cached_fs[cache_key] = fs return fs, path - except (pyarrow.lib.ArrowInvalid, pyarrow.lib.ArrowNotImplementedError): - # Raised when URI not recognized - if not fsspec: - # Only return if fsspec is not installed - return None, None - - # Else, try to resolve protocol via fsspec - try: - fsspec_fs = fsspec.filesystem(parsed.scheme) - except ValueError: - # Raised when protocol not known - return None, None - - fsspec_handler = pyarrow.fs.FSSpecHandler - if parsed.scheme in ["gs", "gcs"]: - - # TODO(amogkam): Remove after https://github.com/fsspec/gcsfs/issues/498 is - # resolved. - try: - import gcsfs - # For minimal install that only needs python3-setuptools - if packaging.version.parse(gcsfs.__version__) > packaging.version.parse( - "2022.7.1" - ): - raise RuntimeError( - "`gcsfs` versions greater than '2022.7.1' are not " - f"compatible with pyarrow. You have gcsfs version " - f"{gcsfs.__version__}. Please downgrade your gcsfs " - f"version. See more details in " - f"https://github.com/fsspec/gcsfs/issues/498." - ) - except ImportError: - pass + # In case of hdfs filesystem, if uri does not have the netloc part below, it will + # fail with hdfs access error. For example 'hdfs:///user_folder/...' will + # fail, while only 'hdfs://namenode_server/user_foler/...' will work. + # Thus, if fsspec didn't return a filesystem, we return None. + hdfs_uri = parsed.scheme == "hdfs" + short_hdfs_uri = hdfs_uri and parsed.netloc == "" - # GS doesn't support `create_parents` arg in `create_dir()` - fsspec_handler = _CustomGCSHandler + if short_hdfs_uri: + return None, None - fs = pyarrow.fs.PyFileSystem(fsspec_handler(fsspec_fs)) - _cached_fs[cache_key] = fs + # If no fsspec filesystem was found, use pyarrow native filesystem. + try: + fs, path = pyarrow.fs.FileSystem.from_uri(uri) + _cached_fs[cache_key] = fs + return fs, path + except (pyarrow.lib.ArrowInvalid, pyarrow.lib.ArrowNotImplementedError): + # Raised when URI not recognized + pass - return fs, path + return None, None def delete_at_uri(uri: str): diff --git a/python/ray/tune/execution/ray_trial_executor.py b/python/ray/tune/execution/ray_trial_executor.py index c9eeed663e87..5869b0100053 100644 --- a/python/ray/tune/execution/ray_trial_executor.py +++ b/python/ray/tune/execution/ray_trial_executor.py @@ -56,6 +56,10 @@ COPY_DIRECTORY_CHECKPOINTS_INSTEAD_OF_MOVING_ENV, "TUNE_CHECKPOINT_CLOUD_RETRY_NUM", "TUNE_CHECKPOINT_CLOUD_RETRY_WAIT_TIME_S", + "AWS_ACCESS_KEY_ID", + "AWS_SECRET_ACCESS_KEY", + "AWS_SECURITY_TOKEN", + "AWS_SESSION_TOKEN", } diff --git a/python/ray/tune/syncer.py b/python/ray/tune/syncer.py index 85a2923025ea..ce8c9644812b 100644 --- a/python/ray/tune/syncer.py +++ b/python/ray/tune/syncer.py @@ -1,4 +1,5 @@ import abc +import urllib.parse from functools import partial import threading from typing import ( @@ -18,6 +19,16 @@ import time from dataclasses import dataclass +try: + import fsspec +except Exception: + fsspec = None + +try: + import s3fs +except Exception: + s3fs = None + import ray from ray._private.thirdparty.tabulate.tabulate import tabulate from ray.air._internal.checkpoint_manager import CheckpointStorage, _TrackedCheckpoint @@ -34,6 +45,7 @@ from ray.tune.callback import Callback from ray.tune.result import TRAINING_ITERATION, TIME_TOTAL_S from ray.tune.utils.file_transfer import sync_dir_between_nodes +from ray.util import log_once from ray.util.annotations import PublicAPI, DeveloperAPI from ray.widgets import Template @@ -191,6 +203,31 @@ def validate_upload_dir(self, upload_dir: Optional[str] = None) -> bool: if not upload_dir and isinstance(self.syncer, Syncer): raise ValueError("Must specify an `upload_dir` to use a custom `syncer`.") + parsed = urllib.parse.urlparse(upload_dir) + # Todo: Only warn for pyarrow versions that are affected by + # https://github.com/apache/arrow/issues/32372#issuecomment-1421097792 + if ( + parsed.scheme + and not s3fs + and parsed.scheme.startswith("s3") + and log_once("fsspec_missing") + ): + logger.warning( + "You are using S3 for remote storage, but you don't have `s3fs` " + "installed. Due to a bug in PyArrow, this can lead to significant " + "slowdowns. To avoid this, install s3fs with " + "`pip install fsspec s3fs`." + ) + elif not fsspec and log_once("fsspec_missing"): + logger.warning( + "You are using remote storage, but you don't have `fsspec` " + "installed. This can lead to inefficient syncing behavior. " + "To avoid this, install fsspec with " + "`pip install fsspec`. Depending on your remote storage provider, " + "consider installing the respective fsspec-package " + "(see https://github.com/fsspec)." + ) + if isinstance(self.syncer, Syncer): return self.syncer.validate_upload_dir(upload_dir or self.upload_dir) else: diff --git a/python/ray/tune/tests/test_syncer.py b/python/ray/tune/tests/test_syncer.py index ee522c069bd7..86c57484d14e 100644 --- a/python/ray/tune/tests/test_syncer.py +++ b/python/ray/tune/tests/test_syncer.py @@ -8,6 +8,7 @@ from typing import List, Optional from unittest.mock import patch +import pyarrow.fs import pytest from freezegun import freeze_time @@ -19,7 +20,11 @@ from ray.tune import TuneError from ray.tune.syncer import _DefaultSyncer, Syncer, SyncConfig from ray.tune.utils.file_transfer import _pack_dir, _unpack_dir -from ray.air._internal.remote_storage import upload_to_uri, download_from_uri +from ray.air._internal.remote_storage import ( + upload_to_uri, + download_from_uri, + get_fs_and_path, +) @pytest.fixture @@ -897,7 +902,7 @@ def train_func(config): ) -def test_sync_folder_with_many_files_s3(mock_s3_bucket_uri, tmp_path): +def _test_sync_folder_with_many_files_s3(mock_s3_bucket_uri, tmp_path): source_dir = tmp_path / "source" check_dir = tmp_path / "check" source_dir.mkdir() @@ -912,6 +917,28 @@ def test_sync_folder_with_many_files_s3(mock_s3_bucket_uri, tmp_path): assert (check_dir / "255").exists() +def test_sync_folder_with_many_files_s3_native(mock_s3_bucket_uri, tmp_path): + with patch("ray.air._internal.remote_storage.fsspec", None): + fs, path = get_fs_and_path(mock_s3_bucket_uri) + + assert isinstance(fs, pyarrow.fs.S3FileSystem) + + _test_sync_folder_with_many_files_s3(mock_s3_bucket_uri, tmp_path) + + +def test_sync_folder_with_many_files_s3_fsspec(mock_s3_bucket_uri, tmp_path): + try: + import s3fs # noqa: F401 + except Exception as exc: + raise AssertionError("This test requires s3fs to be installed") from exc + + fs, path = get_fs_and_path(mock_s3_bucket_uri) + + assert isinstance(fs, pyarrow.fs.PyFileSystem) + + _test_sync_folder_with_many_files_s3(mock_s3_bucket_uri, tmp_path) + + def test_sync_folder_with_many_files_fs(tmpdir): # Create 256 files to upload for i in range(256): diff --git a/python/requirements/ml/requirements_tune.txt b/python/requirements/ml/requirements_tune.txt index a121cb2b2d60..44f4704454c8 100644 --- a/python/requirements/ml/requirements_tune.txt +++ b/python/requirements/ml/requirements_tune.txt @@ -34,6 +34,7 @@ lightning-bolts==0.4.0 protobuf==3.19.6 pytorch-lightning==1.6.5 fairscale==0.4.6 +s3fs==2023.1.0 shortuuid==1.0.1 scikit-optimize==0.9.0 sigopt==7.5.0 From 3d26e9f23758b46e99f40f350dc6dd42759e5fe7 Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Thu, 27 Apr 2023 13:43:50 +0100 Subject: [PATCH 120/424] [mac/arm64] Fix bazel install for mac arm64 (#34823) #34246 switched to using bazelisk for bazel installs, but an unbound variable error in the `install-bazel.sh` script: ``` install-bazel.sh: line 85: url: unbound variable ``` In result, bazel is not correctly installed, and the wheel build job fails. The solution is to check for both `aarch64` and `arm64` in architecture types, which will result in the variable being set. We also raise a proper error if the URL cannot be constructed. Lastly, it seems like bazel is occasionally found - I'm not completely where this happens, as it's on the same instance, and the previous job usually did not succeed. I'm wondering if it's manual intervention :-) Anyway, this PR should be fix the issue for now. Signed-off-by: Kai Fricke --- ci/env/install-bazel.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ci/env/install-bazel.sh b/ci/env/install-bazel.sh index 331aeabb9c3c..87c59b34875a 100755 --- a/ci/env/install-bazel.sh +++ b/ci/env/install-bazel.sh @@ -73,11 +73,14 @@ if [ "${BAZEL_CONFIG_ONLY-}" != "1" ]; then export PATH=$PATH:"$HOME/bin" fi - if [ "${architecture}" = "aarch64" ]; then + if [ "${architecture}" = "aarch64" ] || [ "${architecture}" = "arm64" ]; then # architecture is "aarch64", but the bazel tag is "arm64" url="https://github.com/bazelbuild/bazelisk/releases/download/${BAZELISK_VERSION}/bazelisk-${platform}-arm64" elif [ "${architecture}" = "x86_64" ]; then url="https://github.com/bazelbuild/bazelisk/releases/download/${BAZELISK_VERSION}/bazelisk-${platform}-amd64" + else + echo "Could not found matching bazelisk URL for platform ${platform} and architecture ${architecture}" + exit 1 fi if [ "$INSTALL_USER" = "1" ]; then From bcae135aeb6857eaeedf8116d46ec322bb966714 Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Thu, 27 Apr 2023 14:42:02 +0100 Subject: [PATCH 121/424] [rllib] Fix rlmodule_guide tensor spec (#34821) #34493 unified the tenosr specifications in rllib, but it missed a doc test, that has been failing since. This PR updates this doc test to use the new API. Signed-off-by: Kai Fricke --- doc/source/rllib/doc_code/rlmodule_guide.py | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/doc/source/rllib/doc_code/rlmodule_guide.py b/doc/source/rllib/doc_code/rlmodule_guide.py index 98e153ca6d35..77f5fa2084e0 100644 --- a/doc/source/rllib/doc_code/rlmodule_guide.py +++ b/doc/source/rllib/doc_code/rlmodule_guide.py @@ -1,13 +1,7 @@ # flake8: noqa from ray.rllib.utils.annotations import override - -# TODO (Kourosh): Remove this when the location of the import is fixed. -try: - from ray.rllib.models.specs.typing import SpecType - from ray.rllib.models.specs.specs_torch import TorchTensorSpec -except ImportError: - from ray.rllib.core.models.specs.typing import SpecType - from ray.rllib.core.models.specs.specs_torch import TorchTensorSpec +from ray.rllib.core.models.specs.typing import SpecType +from ray.rllib.core.models.specs.specs_base import TensorSpec # __enabling-rlmodules-in-configs-begin__ @@ -267,7 +261,7 @@ def input_specs_exploration(self) -> SpecType: # and its value is a torch.Tensor with shape (b, h) where b is the # batch size (determined at run-time) and h is the hidden size # (fixed at 10). - return {"obs": TorchTensorSpec("b, h", h=10)} + return {"obs": TensorSpec("b, h", h=10, framework="torch")} # __extend-spec-checking-torch-specs-end__ From 9f3c71854aa8008be409226e4e260ebb6770714a Mon Sep 17 00:00:00 2001 From: SangBin Cho Date: Thu, 27 Apr 2023 22:50:49 +0900 Subject: [PATCH 122/424] [Release tests] Add memory tracking to scheduling_test_many_0s_tasks_many_nodes (#34736) --- release/benchmarks/distributed/test_scheduling.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/release/benchmarks/distributed/test_scheduling.py b/release/benchmarks/distributed/test_scheduling.py index 2e4dd138d7a7..619a6efea0ee 100644 --- a/release/benchmarks/distributed/test_scheduling.py +++ b/release/benchmarks/distributed/test_scheduling.py @@ -3,6 +3,7 @@ from time import time, sleep from math import floor from ray._private.test_utils import safe_write_to_results_json +import ray._private.test_utils as test_utils @ray.remote @@ -86,6 +87,7 @@ def start_actor(num_actors, num_actors_per_nodes, job): ) ray.init(address="auto") + monitor_actor = test_utils.monitor_memory_usage() total_cpus_per_node = [node["Resources"].get("CPU", 0) for node in ray.nodes()] num_nodes = len(total_cpus_per_node) @@ -104,6 +106,12 @@ def start_actor(num_actors, num_actors_per_nodes, job): args.total_num_actors, args.num_actors_per_nodes, job ) + ray.get(monitor_actor.stop_run.remote()) + used_gb, usage = ray.get(monitor_actor.get_peak_memory_info.remote()) + print(f"Peak memory usage: {round(used_gb, 2)}GB") + print(f"Peak memory usage per processes:\n {usage}") + del monitor_actor + result = { "total_num_task": args.total_num_task, "num_cpu_per_task": args.num_cpu_per_task, @@ -115,6 +123,8 @@ def start_actor(num_actors, num_actors_per_nodes, job): "submission_cost": submission_cost, "ready_cost": ready_cost, "actor_job_cost": actor_job_cost, + "_peak_memory": round(used_gb, 2), + "_peak_process_memory": usage, "_runtime": submission_cost + ready_cost + actor_job_cost, } From f5293b8c9cb0b20ac94ac359c2af168a9e724466 Mon Sep 17 00:00:00 2001 From: Jiajun Yao Date: Thu, 27 Apr 2023 07:14:28 -0700 Subject: [PATCH 123/424] Move virtualenv to ray["default"] dependencies (#34667) Virtualenv is needed for runtime env and runtime env is only enabled for ray["default"] Signed-off-by: Jiajun Yao --- ci/ci.sh | 12 -- ci/env/check_minimal_install.py | 1 + .../ray/tests/test_runtime_env_ray_minimal.py | 28 +++-- python/ray/tests/test_usage_stats.py | 117 +++++++----------- python/requirements.txt | 2 +- python/setup.py | 2 +- 6 files changed, 64 insertions(+), 98 deletions(-) diff --git a/ci/ci.sh b/ci/ci.sh index 98071a8bfeaa..2478bb715d94 100755 --- a/ci/ci.sh +++ b/ci/ci.sh @@ -794,20 +794,8 @@ run_minimal_test() { # shellcheck disable=SC2086 bazel test --test_output=streamed --config=ci --test_env=RAY_MINIMAL=1 ${BAZEL_EXPORT_OPTIONS} python/ray/tests/test_runtime_env_ray_minimal # shellcheck disable=SC2086 - bazel test --test_output=streamed --config=ci --test_env=RAY_MINIMAL=1 ${BAZEL_EXPORT_OPTIONS} python/ray/tests/test_runtime_env - # shellcheck disable=SC2086 - bazel test --test_output=streamed --config=ci --test_env=RAY_MINIMAL=1 ${BAZEL_EXPORT_OPTIONS} python/ray/tests/test_runtime_env_2 - # shellcheck disable=SC2086 bazel test --test_output=streamed --config=ci ${BAZEL_EXPORT_OPTIONS} python/ray/tests/test_utils - # Todo: Make compatible with python 3.9/3.10 - if [ "$1" != "3.9" ] && [ "$1" != "3.10" ]; then - # shellcheck disable=SC2086 - bazel test --test_output=streamed --config=ci ${BAZEL_EXPORT_OPTIONS} python/ray/tests/test_runtime_env_complicated - fi - - # shellcheck disable=SC2086 - bazel test --test_output=streamed --config=ci ${BAZEL_EXPORT_OPTIONS} python/ray/tests/test_runtime_env_validation # shellcheck disable=SC2086 bazel test --test_output=streamed --config=ci --test_env=RAY_MINIMAL=1 ${BAZEL_EXPORT_OPTIONS} python/ray/tests/test_serve_ray_minimal # shellcheck disable=SC2086 diff --git a/ci/env/check_minimal_install.py b/ci/env/check_minimal_install.py index 01beb5adf4c7..aa3d6a7f337c 100644 --- a/ci/env/check_minimal_install.py +++ b/ci/env/check_minimal_install.py @@ -18,6 +18,7 @@ "opencensus", "prometheus_client", "smart_open", + "virtualenv", "torch", "tensorflow", "jax", diff --git a/python/ray/tests/test_runtime_env_ray_minimal.py b/python/ray/tests/test_runtime_env_ray_minimal.py index 82d7e77b62b8..2b3d68e51286 100644 --- a/python/ray/tests/test_runtime_env_ray_minimal.py +++ b/python/ray/tests/test_runtime_env_ray_minimal.py @@ -14,15 +14,16 @@ import pytest import ray +from ray.exceptions import RuntimeEnvSetupError -def _test_task_and_actor(capsys): +def _test_task_and_actor(): @ray.remote def f(): return 1 - # with pytest.raises(RuntimeEnvSetupError): - assert ray.get(f.options(runtime_env={"pip": ["requests"]}).remote()) == 1 + with pytest.raises(RuntimeEnvSetupError, match="install virtualenv"): + ray.get(f.options(runtime_env={"pip": ["requests"]}).remote()) @ray.remote class A: @@ -30,7 +31,9 @@ def task(self): return 1 a = A.options(runtime_env={"pip": ["requests"]}).remote() - assert ray.get(a.task.remote()) == 1 + + with pytest.raises(RuntimeEnvSetupError, match="install virtualenv"): + ray.get(a.task.remote()) @pytest.mark.skipif( @@ -45,9 +48,9 @@ def task(self): ["ray start --head --ray-client-server-port 25553 --port 0"], indirect=True, ) -def test_ray_client_task_actor(call_ray_start, capsys): +def test_ray_client_task_actor(call_ray_start): ray.init("ray://localhost:25553") - _test_task_and_actor(capsys) + _test_task_and_actor() @pytest.mark.skipif( @@ -57,9 +60,9 @@ def test_ray_client_task_actor(call_ray_start, capsys): os.environ.get("RAY_MINIMAL") != "1", reason="This test is only run in CI with a minimal Ray installation.", ) -def test_task_actor(shutdown_only, capsys): +def test_task_actor(shutdown_only): ray.init() - _test_task_and_actor(capsys) + _test_task_and_actor() @pytest.mark.skipif( @@ -69,14 +72,15 @@ def test_task_actor(shutdown_only, capsys): os.environ.get("RAY_MINIMAL") != "1", reason="This test is only run in CI with a minimal Ray installation.", ) -def test_ray_init(shutdown_only, capsys): +def test_ray_init(shutdown_only): ray.init(runtime_env={"pip": ["requests"]}) @ray.remote def f(): return 1 - assert ray.get(f.remote()) == 1 + with pytest.raises(RuntimeEnvSetupError, match="install virtualenv"): + ray.get(f.remote()) @pytest.mark.skipif( @@ -92,7 +96,9 @@ def f(): indirect=True, ) def test_ray_client_init(call_ray_start): - ray.init("ray://localhost:25552", runtime_env={"pip": ["requests"]}) + with pytest.raises(ConnectionAbortedError) as excinfo: + ray.init("ray://localhost:25552", runtime_env={"pip": ["requests"]}) + assert "install virtualenv" in str(excinfo.value) if __name__ == "__main__": diff --git a/python/ray/tests/test_usage_stats.py b/python/ray/tests/test_usage_stats.py index ba45c29ce387..2493c19c11cb 100644 --- a/python/ray/tests/test_usage_stats.py +++ b/python/ray/tests/test_usage_stats.py @@ -9,6 +9,7 @@ import requests import pytest from jsonschema import validate +from http.server import BaseHTTPRequestHandler, HTTPServer import ray import ray._private.usage.usage_constants as usage_constants @@ -966,10 +967,6 @@ def test_usage_lib_get_cluster_config_to_report( assert cluster_config_to_report.cloud_provider == "kuberay" -@pytest.mark.skipif( - sys.platform == "win32", - reason="Test depends on runtime env feature not supported on Windows.", -) # TODO(https://github.com/ray-project/ray/issues/33486) @pytest.mark.skipif( sys.version_info >= (3, 11, 0), @@ -983,8 +980,7 @@ def test_usage_lib_report_data( m.setenv("RAY_USAGE_STATS_REPORT_URL", "http://127.0.0.1:8000") cluster = ray_start_cluster cluster.add_node(num_cpus=0) - # Runtime env is required to run this test in minimal installation test. - ray.init(address=cluster.address, runtime_env={"pip": ["ray[serve]"]}) + ray.init(address=cluster.address) """ Make sure the generated data is following the schema. """ @@ -1024,43 +1020,33 @@ def test_usage_lib_report_data( Make sure report usage data works as expected """ - @ray.remote(num_cpus=0) - class ServeInitator: - def __init__(self): - # Start the ray serve server to verify requests are sent - # to the right place. - from ray import serve - - serve.start() + class UsageStatsServer(BaseHTTPRequestHandler): + expected_data = None - @serve.deployment(ray_actor_options={"num_cpus": 0}) - async def usage(request): - body = await request.json() - if body == asdict(d): - return True - else: - return False + def do_POST(self): + content_length = int(self.headers["Content-Length"]) + post_data = self.rfile.read(content_length) + if json.loads(post_data) == self.expected_data: + self.send_response(200) + else: + self.send_response(400) + self.send_header("Content-type", "text/html") + self.end_headers() - usage.deploy() - - def ready(self): - pass + @ray.remote(num_cpus=0) + def run_usage_stats_server(expected_data): + UsageStatsServer.expected_data = expected_data + server = HTTPServer(("127.0.0.1", 8000), UsageStatsServer) + server.serve_forever() - # We need to start a serve with runtime env to make this test - # work with minimal installation. - s = ServeInitator.remote() - ray.get(s.ready.remote()) + run_usage_stats_server.remote(asdict(d)) # Query our endpoint over HTTP. - r = client.report_usage_data("http://127.0.0.1:8000/usage", d) - r.raise_for_status() - assert json.loads(r.text) is True + wait_for_condition( + lambda: client.report_usage_data("http://127.0.0.1:8000", d), timeout=30 + ) -@pytest.mark.skipif( - sys.platform == "win32", - reason="Test depends on runtime env feature not supported on Windows.", -) # TODO(https://github.com/ray-project/ray/issues/33486) @pytest.mark.skipif( sys.version_info >= (3, 11, 0), @@ -1086,7 +1072,7 @@ def test_usage_report_e2e( with monkeypatch.context() as m: m.setenv("HOME", str(tmp_path)) m.setenv("RAY_USAGE_STATS_ENABLED", "1") - m.setenv("RAY_USAGE_STATS_REPORT_URL", "http://127.0.0.1:8000/usage") + m.setenv("RAY_USAGE_STATS_REPORT_URL", "http://127.0.0.1:8000") m.setenv("RAY_USAGE_STATS_REPORT_INTERVAL_S", "1") m.setenv("RAY_USAGE_STATS_EXTRA_TAGS", "extra_k1=extra_v1") cluster = ray_start_cluster @@ -1130,32 +1116,25 @@ def get_payload(self): reporter = StatusReporter.remote() - @ray.remote(num_cpus=0, runtime_env={"pip": ["ray[serve]"]}) - class ServeInitiator: - def __init__(self): - # This is used in the worker process - # so it won't be tracked as library usage. - from ray import serve - - serve.start() + class UsageStatsServer(BaseHTTPRequestHandler): + reporter = None - # Usage report should be sent to the URL every 1 second. - @serve.deployment(ray_actor_options={"num_cpus": 0}) - async def usage(request): - body = await request.json() - reporter.reported.remote() - reporter.report_payload.remote(body) - return True + def do_POST(self): + content_length = int(self.headers["Content-Length"]) + post_data = self.rfile.read(content_length) + self.reporter.reported.remote() + self.reporter.report_payload.remote(json.loads(post_data)) + self.send_response(200) + self.send_header("Content-type", "text/html") + self.end_headers() - usage.deploy() - - def ready(self): - pass + @ray.remote(num_cpus=0) + def run_usage_stats_server(reporter): + UsageStatsServer.reporter = reporter + server = HTTPServer(("127.0.0.1", 8000), UsageStatsServer) + server.serve_forever() - # We need to start a serve with runtime env to make this test - # work with minimal installation. - s = ServeInitiator.remote() - ray.get(s.ready.remote()) + run_usage_stats_server.remote(reporter) """ Verify the usage stats are reported to the server. @@ -1202,9 +1181,6 @@ def ready(self): "_test2": "extra_v3", "dashboard_metrics_grafana_enabled": "False", "dashboard_metrics_prometheus_enabled": "False", - "serve_num_deployments": "1", - "serve_num_gpu_deployments": "0", - "serve_api_version": "v1", "actor_num_created": "0", "pg_num_created": "0", "num_actor_creation_tasks": "0", @@ -1221,11 +1197,9 @@ def ready(self): assert payload["total_num_nodes"] == 1 assert payload["total_num_running_jobs"] == 1 if os.environ.get("RAY_MINIMAL") == "1": - # Since we start a serve actor for mocking a server using runtime env. - assert set(payload["library_usages"]) == {"serve"} + assert set(payload["library_usages"]) == set() else: - # Serve is recorded due to our mock server. - assert set(payload["library_usages"]) == {"rllib", "train", "tune", "serve"} + assert set(payload["library_usages"]) == {"rllib", "train", "tune"} validate(instance=payload, schema=schema) """ Verify the usage_stats.json is updated. @@ -1405,8 +1379,8 @@ def verify(): @pytest.mark.skipif( - sys.platform == "win32", - reason="Test depends on runtime env feature not supported on Windows.", + os.environ.get("RAY_MINIMAL") == "1", + reason="This test is not supposed to work for minimal installation.", ) # TODO(https://github.com/ray-project/ray/issues/33486) @pytest.mark.skipif( @@ -1442,10 +1416,7 @@ def objective(*args): tune.run(objective) - # Use a runtime env to run tests in minimal installation. - a = ActorWithLibImport.options( - runtime_env={"pip": ["ray[rllib]", "ray[tune]"]} - ).remote() + a = ActorWithLibImport.remote() ray.get(a.ready.remote()) """ diff --git a/python/requirements.txt b/python/requirements.txt index 4eb0bc927bc5..932f8f277907 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -18,7 +18,6 @@ pyyaml aiosignal frozenlist requests -virtualenv>=20.0.24, < 20.21.1 # Python version-specific requirements dataclasses; python_version < '3.7' @@ -53,6 +52,7 @@ requests kubernetes colorful lz4 +virtualenv>=20.0.24, < 20.21.1 # Manually parse pandas requirement pandas>=1.0.5; python_version < '3.7' pandas>=1.3.0; python_version >= '3.7' diff --git a/python/setup.py b/python/setup.py index 969d64963f98..ca811d7395fc 100644 --- a/python/setup.py +++ b/python/setup.py @@ -252,6 +252,7 @@ def get_packages(self): "pydantic", "prometheus_client >= 0.7.1", "smart_open", + "virtualenv >=20.0.24, < 20.21.1", # For pip runtime env. ], "serve": ["uvicorn", "requests", "starlette", "fastapi", "aiorwlock"], "tune": ["pandas", "tensorboardX>=1.9", "requests", pyarrow_dep], @@ -330,7 +331,6 @@ def get_packages(self): # Light weight requirement, can be replaced with "typing" once # we deprecate Python 3.7 (this will take a while). "typing_extensions; python_version < '3.8'", - "virtualenv >=20.0.24, < 20.21.1", # For pip runtime env. ] From 7aa5d155c6adf194a16716eaff1d0b70a7156d4b Mon Sep 17 00:00:00 2001 From: Linniem Date: Thu, 27 Apr 2023 23:13:50 +0800 Subject: [PATCH 124/424] [Dashboard] Add memory unit converter test (#33368) Add missing test(#30508). --- .../client/src/util/converter.unit.test.ts | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 dashboard/client/src/util/converter.unit.test.ts diff --git a/dashboard/client/src/util/converter.unit.test.ts b/dashboard/client/src/util/converter.unit.test.ts new file mode 100644 index 000000000000..a3d544c78d60 --- /dev/null +++ b/dashboard/client/src/util/converter.unit.test.ts @@ -0,0 +1,40 @@ +import { memoryConverter } from "./converter"; + +describe("memoryConverter", () => { + const table: { name: string; input: number; expected: string }[] = [ + { + name: "convert to Bytes", + input: 4, + expected: "4.0000B", + }, + { + name: "convert to KB", + input: 5 * 1024 ** 1, + expected: "5.00KB", + }, + { + name: "convert to MB", + input: 6 * 1024 ** 2, + expected: "6.00MB", + }, + { + name: "convert to GB", + input: 7 * 1024 ** 3, + expected: "7.00GB", + }, + { + name: "convert to TB", + input: 8 * 1024 ** 4, + expected: "8.00TB", + }, + { + name: "convert to PB", + input: 9 * 1024 ** 5, + expected: "9.00PB", + }, + ]; + + test.each(table)("$name", ({ input, expected }) => { + expect(memoryConverter(input)).toEqual(expected); + }); +}); From 2024a06570a2dbfa0d75e639be074d3f91be68be Mon Sep 17 00:00:00 2001 From: Sven Mika Date: Thu, 27 Apr 2023 18:22:17 +0200 Subject: [PATCH 125/424] [RLlib] Learner API: Fix and unify grad-clipping configs and behaviors. (#34464) --- rllib/algorithms/a3c/a3c.py | 6 + rllib/algorithms/algorithm_config.py | 44 +++++- rllib/algorithms/appo/appo.py | 28 ++-- rllib/algorithms/dreamer/dreamer.py | 6 + rllib/algorithms/impala/impala.py | 27 ++-- rllib/algorithms/qmix/qmix.py | 7 +- rllib/algorithms/simple_q/simple_q.py | 8 +- rllib/core/learner/tests/test_learner.py | 162 ++++++++++++++++++---- rllib/core/learner/tf/tf_learner.py | 43 +++--- rllib/core/learner/torch/torch_learner.py | 24 +++- rllib/utils/tf_utils.py | 48 ++++++- rllib/utils/torch_utils.py | 56 +++++++- 12 files changed, 367 insertions(+), 92 deletions(-) diff --git a/rllib/algorithms/a3c/a3c.py b/rllib/algorithms/a3c/a3c.py index 7f5a661cb94d..0ecee6f78134 100644 --- a/rllib/algorithms/a3c/a3c.py +++ b/rllib/algorithms/a3c/a3c.py @@ -67,7 +67,13 @@ def __init__(self, algo_class=None): self.use_critic = True self.use_gae = True self.lambda_ = 1.0 + self.grad_clip = 40.0 + # Note: Only when using _enable_learner_api=True can the clipping mode be + # configured by the user. On the old API stack, RLlib will always clip by + # global_norm, no matter the value of `grad_clip_by`. + self.grad_clip_by = "global_norm" + self.lr_schedule = None self.vf_loss_coeff = 0.5 self.entropy_coeff = 0.01 diff --git a/rllib/algorithms/algorithm_config.py b/rllib/algorithms/algorithm_config.py index b2b3e8467453..d2c4c19e2206 100644 --- a/rllib/algorithms/algorithm_config.py +++ b/rllib/algorithms/algorithm_config.py @@ -316,6 +316,8 @@ def __init__(self, algo_class=None): # `self.training()` self.gamma = 0.99 self.lr = 0.001 + self.grad_clip = None + self.grad_clip_by = "global_norm" self.train_batch_size = 32 self.model = copy.deepcopy(MODEL_DEFAULTS) self.optimizer = {} @@ -881,7 +883,6 @@ def validate(self) -> None: # RLModule.forward_exploration() method or setup model parameters such that it # will disable the stochasticity of this method (e.g. by setting the std to 0 # or setting temperature to 0 for the Categorical distribution). - if self._enable_rl_module_api and not self.explore: raise ValueError( "When RLModule API is enabled, explore parameter cannot be False. " @@ -895,6 +896,13 @@ def validate(self) -> None: "setting temperature to 0 for the Categorical distribution)." ) + # Validate grad clipping settings. + if self.grad_clip_by not in ["value", "norm", "global_norm"]: + raise ValueError( + f"`grad_clip_by` ({self.grad_clip_by}) must be one of: 'value', " + "'norm', or 'global_norm'!" + ) + # TODO: Deprecate self.simple_optimizer! # Multi-GPU settings. if self.simple_optimizer is True: @@ -1031,7 +1039,7 @@ def validate(self) -> None: "(i.e. num_learner_workers = 0)" ) - # resolve learner class + # Resolve learner class. if self._enable_learner_api and self.learner_class is None: learner_class_path = self.get_default_learner_class() self.learner_class = deserialize_type(learner_class_path) @@ -1591,8 +1599,11 @@ def rollouts( def training( self, + *, gamma: Optional[float] = NotProvided, lr: Optional[float] = NotProvided, + grad_clip: Optional[float] = NotProvided, + grad_clip_by: Optional[str] = NotProvided, train_batch_size: Optional[int] = NotProvided, model: Optional[dict] = NotProvided, optimizer: Optional[dict] = NotProvided, @@ -1605,6 +1616,29 @@ def training( Args: gamma: Float specifying the discount factor of the Markov Decision process. lr: The default learning rate. + grad_clip: The value to use for gradient clipping. Depending on the + `grad_clip_by` setting, gradients will either be clipped by value, + norm, or global_norm (see docstring on `grad_clip_by` below for more + details). If `grad_clip` is None, gradients will be left unclipped. + grad_clip_by: If 'value': Will clip all computed gradients individually + inside the interval [-grad_clip, +grad_clip]. + If 'norm', will compute the L2-norm of each weight/bias + gradient tensor and then clip all gradients such that this L2-norm does + not exceed `grad_clip`. The L2-norm of a tensor is computed via: + `sqrt(SUM(w0^2, w1^2, ..., wn^2))` where w[i] are the elements of the + tensor (no matter what the shape of this tensor is). + If 'global_norm', will compute the square of the L2-norm of each + weight/bias gradient tensor, sum up all these squared L2-norms across + all given gradient tensors (e.g. the entire module to + be updated), square root that overall sum, and then clip all gradients + such that this "global" L2-norm does not exceed the given value. + The global L2-norm over a list of tensors (e.g. W and V) is computed + via: + `sqrt[SUM(w0^2, w1^2, ..., wn^2) + SUM(v0^2, v1^2, ..., vm^2)]`, where + w[i] and v[j] are the elements of the tensors W and V (no matter what + the shapes of these tensors are). + Note that if `grad_clip` is None, the `grad_clip_by` setting has no + effect. train_batch_size: Training batch size, if applicable. model: Arguments passed into the policy model. See models/catalog.py for a full list of the available model options. @@ -1633,6 +1667,10 @@ def training( self.gamma = gamma if lr is not NotProvided: self.lr = lr + if grad_clip is not NotProvided: + self.grad_clip = grad_clip + if grad_clip_by is not NotProvided: + self.grad_clip_by = grad_clip_by if train_batch_size is not NotProvided: self.train_batch_size = train_batch_size if model is not NotProvided: @@ -3089,6 +3127,8 @@ def get_learner_group_config(self, module_spec: ModuleSpec) -> LearnerGroupConfi # TODO (Kourosh): optimizer config can now be more complicated. optimizer_config={ "lr": self.lr, + "grad_clip": self.grad_clip, + "grad_clip_by": self.grad_clip_by, }, learner_hps=self.learner_hps, ) diff --git a/rllib/algorithms/appo/appo.py b/rllib/algorithms/appo/appo.py index 1750457236b5..d616b8656b01 100644 --- a/rllib/algorithms/appo/appo.py +++ b/rllib/algorithms/appo/appo.py @@ -16,7 +16,6 @@ from ray.rllib.algorithms.impala.impala import Impala, ImpalaConfig from ray.rllib.algorithms.appo.tf.appo_tf_learner import AppoHPs, LEARNER_RESULTS_KL_KEY from ray.rllib.algorithms.ppo.ppo import UpdateKL -from ray.rllib.execution.common import _get_shared_metrics, STEPS_SAMPLED_COUNTER from ray.rllib.core.rl_module.rl_module import SingleAgentRLModuleSpec from ray.rllib.policy.policy import Policy from ray.rllib.utils.annotations import override @@ -104,7 +103,13 @@ def __init__(self, algo_class=None): self.learner_queue_timeout = 300 self.max_sample_requests_in_flight_per_worker = 2 self.broadcast_interval = 1 + self.grad_clip = 40.0 + # Note: Only when using _enable_learner_api=True can the clipping mode be + # configured by the user. On the old API stack, RLlib will always clip by + # global_norm, no matter the value of `grad_clip_by`. + self.grad_clip_by = "global_norm" + self.opt_type = "adam" self.lr = 0.0005 self.lr_schedule = None @@ -237,29 +242,12 @@ def validate(self) -> None: self._learner_hps.clip_param = self.clip_param +# Still used by one of the old checkpoints in tests. +# Keep a shim version of this around. class UpdateTargetAndKL: def __init__(self, workers, config): self.workers = workers self.config = config - self.update_kl = UpdateKL(workers) - self.target_update_freq = ( - config["num_sgd_iter"] * config["minibatch_buffer_size"] - ) - - def __call__(self, fetches): - metrics = _get_shared_metrics() - cur_ts = metrics.counters[STEPS_SAMPLED_COUNTER] - last_update = metrics.counters[LAST_TARGET_UPDATE_TS] - if cur_ts - last_update > self.target_update_freq: - metrics.counters[NUM_TARGET_UPDATES] += 1 - metrics.counters[LAST_TARGET_UPDATE_TS] = cur_ts - # Update Target Network - self.workers.local_worker().foreach_policy_to_train( - lambda p, _: p.update_target() - ) - # Also update KL Coeff - if self.config.use_kl_loss: - self.update_kl(fetches) class APPO(Impala): diff --git a/rllib/algorithms/dreamer/dreamer.py b/rllib/algorithms/dreamer/dreamer.py index 808f28202bf8..f0c438d47cb7 100644 --- a/rllib/algorithms/dreamer/dreamer.py +++ b/rllib/algorithms/dreamer/dreamer.py @@ -77,7 +77,13 @@ def __init__(self): self.td_model_lr = 6e-4 self.actor_lr = 8e-5 self.critic_lr = 8e-5 + self.grad_clip = 100.0 + # Note: Only when using _enable_learner_api=True can the clipping mode be + # configured by the user. On the old API stack, RLlib will always clip by + # global_norm, no matter the value of `grad_clip_by`. + self.grad_clip_by = "global_norm" + self.lambda_ = 0.95 self.dreamer_train_iters = 100 self.batch_size = 50 diff --git a/rllib/algorithms/impala/impala.py b/rllib/algorithms/impala/impala.py index 6f4c23c352da..1441593482b2 100644 --- a/rllib/algorithms/impala/impala.py +++ b/rllib/algorithms/impala/impala.py @@ -1,4 +1,5 @@ import copy +from functools import partial import logging import platform import queue @@ -15,10 +16,6 @@ _reduce_impala_results, ) from ray.rllib.algorithms.ppo.ppo_catalog import PPOCatalog -from ray.rllib.core.learner.learner_group_config import ( - LearnerGroupConfig, - ModuleSpec, -) from ray.rllib.core.rl_module.rl_module import SingleAgentRLModuleSpec from ray.rllib.evaluation.worker_set import handle_remote_call_result_errors from ray.rllib.execution.buffers.mixin_replay_buffer import MixInMultiAgentReplayBuffer @@ -125,7 +122,13 @@ def __init__(self, algo_class=None): self.timeout_s_aggregator_manager = 0.0 self.broadcast_interval = 1 self.num_aggregation_workers = 0 + self.grad_clip = 40.0 + # Note: Only when using _enable_learner_api=True can the clipping mode be + # configured by the user. On the old API stack, RLlib will always clip by + # global_norm, no matter the value of `grad_clip_by`. + self.grad_clip_by = "global_norm" + self.opt_type = "adam" self.lr_schedule = None self.decay = 0.99 @@ -422,16 +425,6 @@ def validate(self) -> None: self.vtrace_clip_pg_rho_threshold ) - @override(AlgorithmConfig) - def get_learner_group_config(self, module_spec: ModuleSpec) -> LearnerGroupConfig: - lg_config = super().get_learner_group_config(module_spec) - optim_config = lg_config.optimizer_config - # TODO(avnishn): Make grad_clip a default parameter in algorithm_config's base - # class - optim_config.update({"grad_clip": self.grad_clip}) - lg_config = lg_config.learner(optimizer_config=optim_config) - return lg_config - def get_replay_ratio(self) -> float: """Returns replay ratio (between 0.0 and 1.0) based off self.replay_proportion. @@ -1051,6 +1044,10 @@ def process_experiences_tree_aggregation( workers. """ + + def _process_episodes(actor, batch): + return actor.process_episodes(ray.get(batch)) + for _, batch in worker_to_sample_batches_refs: assert isinstance(batch, ObjectRef), ( "For efficiency, process_experiences_tree_aggregation should " @@ -1061,7 +1058,7 @@ def process_experiences_tree_aggregation( self._aggregator_actor_manager.healthy_actor_ids() ) calls_placed = self._aggregator_actor_manager.foreach_actor_async( - lambda actor: actor.process_episodes(ray.get(batch)), + partial(_process_episodes, batch=batch), remote_actor_ids=[aggregator_id], ) if calls_placed <= 0: diff --git a/rllib/algorithms/qmix/qmix.py b/rllib/algorithms/qmix/qmix.py index 5c00a6a4ac9d..66025a359ca1 100644 --- a/rllib/algorithms/qmix/qmix.py +++ b/rllib/algorithms/qmix/qmix.py @@ -77,7 +77,12 @@ def __init__(self): self.double_q = True self.optim_alpha = 0.99 self.optim_eps = 0.00001 - self.grad_clip = 10 + + self.grad_clip = 10.0 + # Note: Only when using _enable_learner_api=True can the clipping mode be + # configured by the user. On the old API stack, RLlib will always clip by + # global_norm, no matter the value of `grad_clip_by`. + self.grad_clip_by = "global_norm" # QMix-torch overrides the TorchPolicy's learn_on_batch w/o specifying a # alternative `learn_on_loaded_batch` alternative for the GPU. diff --git a/rllib/algorithms/simple_q/simple_q.py b/rllib/algorithms/simple_q/simple_q.py index 2f9a8e60bd12..ff7fa6966b98 100644 --- a/rllib/algorithms/simple_q/simple_q.py +++ b/rllib/algorithms/simple_q/simple_q.py @@ -117,7 +117,13 @@ def __init__(self, algo_class=None): self.store_buffer_in_checkpoints = False self.lr_schedule = None self.adam_epsilon = 1e-8 - self.grad_clip = 40 + + self.grad_clip = 40.0 + # Note: Only when using _enable_learner_api=True can the clipping mode be + # configured by the user. On the old API stack, RLlib will always clip by + # global_norm, no matter the value of `grad_clip_by`. + self.grad_clip_by = "global_norm" + self.tau = 1.0 # __sphinx_doc_end__ # fmt: on diff --git a/rllib/core/learner/tests/test_learner.py b/rllib/core/learner/tests/test_learner.py index 4cc4d8f895f2..9d07610f16aa 100644 --- a/rllib/core/learner/tests/test_learner.py +++ b/rllib/core/learner/tests/test_learner.py @@ -5,25 +5,27 @@ import unittest import ray - -from ray.rllib.core.rl_module.rl_module import SingleAgentRLModuleSpec +from ray.rllib.algorithms.appo.appo import APPOConfig from ray.rllib.core.learner.learner import Learner, FrameworkHPs +from ray.rllib.core.learner.scaling_config import LearnerGroupScalingConfig +from ray.rllib.core.rl_module.rl_module import SingleAgentRLModuleSpec from ray.rllib.core.testing.tf.bc_module import DiscreteBCTFModule from ray.rllib.core.testing.tf.bc_learner import BCTfLearner from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID -from ray.rllib.utils.test_utils import check, get_cartpole_dataset_reader +from ray.rllib.utils.test_utils import ( + check, + framework_iterator, + get_cartpole_dataset_reader, +) from ray.rllib.utils.metrics import ALL_MODULES -from ray.rllib.core.learner.scaling_config import LearnerGroupScalingConfig - -def get_learner(learning_rate=1e-3) -> Learner: - env = gym.make("CartPole-v1") +def get_learner(obs_space, action_space, learning_rate=1e-3) -> Learner: learner = BCTfLearner( module_spec=SingleAgentRLModuleSpec( module_class=DiscreteBCTFModule, - observation_space=env.observation_space, - action_space=env.action_space, + observation_space=obs_space, + action_space=action_space, model_config_dict={"fcnet_hiddens": [32]}, ), # made this a configurable hparam to avoid information leakage in tests where we @@ -39,6 +41,9 @@ def get_learner(learning_rate=1e-3) -> Learner: class TestLearner(unittest.TestCase): + + ENV = gym.make("CartPole-v1") + @classmethod def setUp(cls) -> None: ray.init() @@ -49,7 +54,7 @@ def tearDown(cls) -> None: def test_end_to_end_update(self): - learner = get_learner() + learner = get_learner(self.ENV.observation_space, self.ENV.action_space) reader = get_cartpole_dataset_reader(batch_size=512) min_loss = float("inf") @@ -72,11 +77,25 @@ def test_compute_gradients(self): Tests that if we sum all the trainable variables the gradient of output w.r.t. the weights is all ones. """ - learner = get_learner() + learner = BCTfLearner( + module_spec=SingleAgentRLModuleSpec( + module_class=DiscreteBCTFModule, + observation_space=self.ENV.observation_space, + action_space=self.ENV.action_space, + model_config_dict={"fcnet_hiddens": [32]}, + ), + # made this a configurable hparam to avoid information leakage in tests + # where we need to know what the learning rate is. + optimizer_config={"lr": 1e-3}, + learner_scaling_config=LearnerGroupScalingConfig(), + framework_hyperparameters=FrameworkHPs(eager_tracing=True), + ) + + learner.build() with tf.GradientTape() as tape: params = learner.module[DEFAULT_POLICY_ID].trainable_variables - loss = {"total_loss": sum([tf.reduce_sum(param) for param in params])} + loss = {"total_loss": sum(tf.reduce_sum(param) for param in params)} gradients = learner.compute_gradients(loss, tape) # type should be a mapping from ParamRefs to gradients @@ -85,6 +104,100 @@ def test_compute_gradients(self): for grad in gradients.values(): check(grad, np.ones(grad.shape)) + def test_postprocess_gradients(self): + """Tests the postprocess_gradients correctness.""" + config = ( + APPOConfig() + .environment("CartPole-v1") + .framework(eager_tracing=True) + .rollouts(rollout_fragment_length=50) + ) + + # TODO (sven): Enable torch once available for APPO. + for fw in framework_iterator(config, frameworks=("tf2")): + # Clip by value only. + config.training( + grad_clip=0.75, + grad_clip_by="value", + ) + # TODO (sven): remove this once validation does NOT cause HPs to be + # generated anymore. + config.validate() + config.freeze() + module_spec = config.get_default_rl_module_spec() + module_spec.model_config_dict = {"fcnet_hiddens": [10]} + module_spec.observation_space = self.ENV.observation_space + module_spec.action_space = self.ENV.action_space + learner_group = ( + config.get_learner_group_config(module_spec=module_spec) + .learner(learner_class=config.get_default_learner_class()) + .build() + ) + learner = learner_group._learner + # Pretend our computed gradients are our weights + 1.0. + grads = { + v.ref(): v + 1.0 + for v in learner.module[DEFAULT_POLICY_ID].trainable_variables + } + # Call the learner's postprocessing method. + processed_grads = list(learner.postprocess_gradients(grads).values()) + # Check clipped gradients. + # No single gradient must be larger than 0.1 or smaller than -0.1: + self.assertTrue( + all( + np.max(grad) <= 0.75 and np.min(grad) >= -0.75 + for grad in processed_grads + ) + ) + + # Clip by norm. + config = config.copy(copy_frozen=False).training( + grad_clip=1.0, + grad_clip_by="norm", + ) + # TODO (sven): remove this once validation does NOT cause HPs to be + # generated anymore. + config.validate() + config.freeze() + learner_group = ( + config.get_learner_group_config(module_spec=module_spec) + .learner(learner_class=config.get_default_learner_class()) + .build() + ) + learner = learner_group._learner + # Call the learner's postprocessing method. + processed_grads = list(learner.postprocess_gradients(grads).values()) + # Check clipped gradients. + for proc_grad, grad in zip(processed_grads, grads.values()): + l2_norm = np.sqrt(np.sum(grad**2.0)) + if l2_norm > 1.0: + check(proc_grad, grad * (1.0 / l2_norm)) + + # Clip by global norm. + config = config.copy(copy_frozen=False).training( + grad_clip=5.0, + grad_clip_by="global_norm", + ) + # TODO: remove this once validation does NOT cause HPs to be generated + # anymore + config.validate() + config.freeze() + learner_group = ( + config.get_learner_group_config(module_spec=module_spec) + .learner(learner_class=config.get_default_learner_class()) + .build() + ) + learner = learner_group._learner + # Call the learner's postprocessing method. + processed_grads = list(learner.postprocess_gradients(grads).values()) + # Check clipped gradients. + global_norm = np.sqrt( + np.sum(np.sum(grad**2.0) for grad in grads.values()) + ) + if global_norm > 5.0: + for proc_grad, grad in zip(processed_grads, grads.values()): + check(proc_grad, grad * (5.0 / global_norm)) + def test_apply_gradients(self): """Tests the apply_gradients correctness. @@ -92,7 +205,7 @@ def test_apply_gradients(self): standard SGD/Adam update rule. """ - learner = get_learner() + learner = get_learner(self.ENV.observation_space, self.ENV.action_space) # calculated the expected new params based on gradients of all ones. params = learner.module[DEFAULT_POLICY_ID].trainable_variables @@ -114,16 +227,15 @@ def test_add_remove_module(self): from default), and remove the default module, with a loss that is the sum of all variables the updated parameters follow the SGD update rule. """ - env = gym.make("CartPole-v1") lr = 1e-3 - learner = get_learner(lr) + learner = get_learner(self.ENV.observation_space, self.ENV.action_space, lr) learner.add_module( module_id="test", module_spec=SingleAgentRLModuleSpec( module_class=DiscreteBCTFModule, - observation_space=env.observation_space, - action_space=env.action_space, + observation_space=self.ENV.observation_space, + action_space=self.ENV.action_space, model_config_dict={"fcnet_hiddens": [16]}, ), ) @@ -139,20 +251,18 @@ def test_add_remove_module(self): expected = [param - n_steps * lr * np.ones(param.shape) for param in params] for _ in range(n_steps): with tf.GradientTape() as tape: - loss = {"total_loss": sum([tf.reduce_sum(param) for param in params])} + loss = {"total_loss": sum(tf.reduce_sum(param) for param in params)} gradients = learner.compute_gradients(loss, tape) learner.apply_gradients(gradients) check(params, expected) def test_save_load_state(self): - env = gym.make("CartPole-v1") - learner1 = BCTfLearner( module_spec=SingleAgentRLModuleSpec( module_class=DiscreteBCTFModule, - observation_space=env.observation_space, - action_space=env.action_space, + observation_space=self.ENV.observation_space, + action_space=self.ENV.action_space, model_config_dict={"fcnet_hiddens": [64]}, ), optimizer_config={"lr": 2e-3}, @@ -167,8 +277,8 @@ def test_save_load_state(self): learner2 = BCTfLearner( module_spec=SingleAgentRLModuleSpec( module_class=DiscreteBCTFModule, - observation_space=env.observation_space, - action_space=env.action_space, + observation_space=self.ENV.observation_space, + action_space=self.ENV.action_space, model_config_dict={"fcnet_hiddens": [32]}, ), optimizer_config={"lr": 1e-3}, @@ -185,8 +295,8 @@ def test_save_load_state(self): module_id="test", module_spec=SingleAgentRLModuleSpec( module_class=DiscreteBCTFModule, - observation_space=env.observation_space, - action_space=env.action_space, + observation_space=self.ENV.observation_space, + action_space=self.ENV.action_space, model_config_dict={"fcnet_hiddens": [32]}, ), ) diff --git a/rllib/core/learner/tf/tf_learner.py b/rllib/core/learner/tf/tf_learner.py index 1695940a4170..3f927381fd9c 100644 --- a/rllib/core/learner/tf/tf_learner.py +++ b/rllib/core/learner/tf/tf_learner.py @@ -5,12 +5,12 @@ import tree # pip install dm-tree from typing import ( Any, + Callable, + Hashable, Mapping, - Union, Optional, - Callable, Sequence, - Hashable, + Union, ) from ray.rllib.core.learner.learner import ( @@ -30,6 +30,7 @@ from ray.rllib.policy.sample_batch import MultiAgentBatch from ray.rllib.utils.annotations import override from ray.rllib.utils.framework import try_import_tf +from ray.rllib.utils.tf_utils import clip_gradients from ray.rllib.utils.typing import TensorType, ResultDict from ray.rllib.utils.minibatch_utils import ( MiniBatchDummyIterator, @@ -97,12 +98,28 @@ def compute_gradients( grads = tape.gradient(loss[self.TOTAL_LOSS_KEY], self._params) return grads + @override(Learner) + def postprocess_gradients( + self, + gradients_dict: Mapping[str, Any], + ) -> Mapping[str, Any]: + """Postprocesses gradients depending on the optimizer config.""" + + # Perform gradient clipping, if necessary. + clip_gradients( + gradients_dict, + grad_clip=self._optimizer_config.get("grad_clip"), + grad_clip_by=self._optimizer_config.get("grad_clip_by"), + ) + + return gradients_dict + @override(Learner) def apply_gradients(self, gradients: ParamDictType) -> None: # TODO (Avnishn, kourosh): apply gradients doesn't work in cases where - # only some agents have a sample batch that is passed but not others. - # This is probably because of the way that we are iterating over the - # parameters in the optim_to_param_dictionary + # only some agents have a sample batch that is passed but not others. + # This is probably because of the way that we are iterating over the + # parameters in the optim_to_param_dictionary. for optim, param_ref_seq in self._optimizer_parameters.items(): variable_list = [ self._params[param_ref] @@ -116,20 +133,6 @@ def apply_gradients(self, gradients: ParamDictType) -> None: ] optim.apply_gradients(zip(gradient_list, variable_list)) - @override(Learner) - def postprocess_gradients( - self, gradients_dict: Mapping[str, Any] - ) -> Mapping[str, Any]: - grad_clip = self._optimizer_config.get("grad_clip", None) - assert isinstance( - grad_clip, (int, float, type(None)) - ), "grad_clip must be a number" - if grad_clip is not None: - gradients_dict = tf.nest.map_structure( - lambda v: tf.clip_by_value(v, -grad_clip, grad_clip), gradients_dict - ) - return gradients_dict - @override(Learner) def load_state( self, diff --git a/rllib/core/learner/torch/torch_learner.py b/rllib/core/learner/torch/torch_learner.py index 732e6cde5c45..cc9576b74a61 100644 --- a/rllib/core/learner/torch/torch_learner.py +++ b/rllib/core/learner/torch/torch_learner.py @@ -2,11 +2,11 @@ import pathlib from typing import ( Any, - Mapping, - Union, - Sequence, Hashable, + Mapping, Optional, + Sequence, + Union, ) from ray.rllib.core.rl_module.rl_module import ( @@ -27,7 +27,7 @@ from ray.rllib.core.rl_module.torch.torch_rl_module import TorchDDPRLModule from ray.rllib.policy.sample_batch import MultiAgentBatch from ray.rllib.utils.annotations import override -from ray.rllib.utils.torch_utils import convert_to_torch_tensor +from ray.rllib.utils.torch_utils import clip_gradients, convert_to_torch_tensor from ray.rllib.utils.typing import TensorType from ray.rllib.utils.nested_dict import NestedDict from ray.rllib.utils.framework import try_import_torch @@ -79,6 +79,22 @@ def compute_gradients( return grads + @override(Learner) + def postprocess_gradients( + self, + gradients_dict: Mapping[str, Any], + ) -> Mapping[str, Any]: + """Postprocesses gradients depending on the optimizer config.""" + + # Perform gradient clipping, if necessary. + clip_gradients( + gradients_dict, + grad_clip=self._optimizer_config.get("grad_clip"), + grad_clip_by=self._optimizer_config.get("grad_clip_by"), + ) + + return gradients_dict + @override(Learner) def apply_gradients(self, gradients: ParamDictType) -> None: # make sure the parameters do not carry gradients on their own diff --git a/rllib/utils/tf_utils.py b/rllib/utils/tf_utils.py index 91d559262b7f..276fa886e5e9 100644 --- a/rllib/utils/tf_utils.py +++ b/rllib/utils/tf_utils.py @@ -3,7 +3,7 @@ import logging import numpy as np import tree # pip install dm_tree -from typing import Any, Callable, List, Optional, Type, TYPE_CHECKING, Union +from typing import Any, Callable, Dict, List, Optional, Type, TYPE_CHECKING, Union from ray.rllib.utils.annotations import PublicAPI, DeveloperAPI from ray.rllib.utils.framework import try_import_tf @@ -27,6 +27,48 @@ tf1, tf, tfv = try_import_tf() +@PublicAPI +def clip_gradients( + gradients_dict: Dict[str, "tf.Tensor"], + *, + grad_clip: Optional[float] = None, + grad_clip_by: str = "value", +) -> None: + """Performs gradient clipping on a grad-dict based on a clip value and clip mode. + + Changes the provided gradient dict in place. + + Args: + gradients_dict: The gradients dict, mapping str to gradient tensors. + grad_clip: The value to clip with. The way gradients are clipped is defined + by the `grad_clip_by` arg (see below). + grad_clip_by: One of 'value', 'norm', or 'global_norm'. + """ + # No clipping, return. + if grad_clip is None: + return + + # Clip by value (each gradient individually). + if grad_clip_by == "value": + for k, v in gradients_dict.copy().items(): + gradients_dict[k] = tf.clip_by_value(v, -grad_clip, grad_clip) + + # Clip by L2-norm (per gradient tensor). + elif grad_clip_by == "norm": + for k, v in gradients_dict.copy().items(): + gradients_dict[k] = tf.clip_by_norm(v, grad_clip) + + # Clip by global L2-norm (across all gradient tensors). + else: + assert grad_clip_by == "global_norm" + + clipped_grads, _ = tf.clip_by_global_norm( + list(gradients_dict.values()), grad_clip + ) + for k, v in zip(gradients_dict.copy().keys(), clipped_grads): + gradients_dict[k] = v + + @PublicAPI def explained_variance(y: TensorType, pred: TensorType) -> TensorType: """Computes the explained variance for a pair of labels and predictions. @@ -176,7 +218,7 @@ def get_placeholder( value: Optional[Any] = None, name: Optional[str] = None, time_axis: bool = False, - flatten: bool = True + flatten: bool = True, ) -> "tf1.placeholder": """Returns a tf1.placeholder object given optional hints, such as a space. @@ -413,6 +455,8 @@ def _create_placeholders(path, value): return make_wrapper +# TODO (sven): Deprecate this function once we have moved completely to the Learner API. +# Replaced with `clip_gradients()`. @PublicAPI def minimize_and_clip( optimizer: LocalOptimizer, diff --git a/rllib/utils/torch_utils.py b/rllib/utils/torch_utils.py index 90ce959d8421..0fb502de8645 100644 --- a/rllib/utils/torch_utils.py +++ b/rllib/utils/torch_utils.py @@ -1,7 +1,7 @@ import os import logging import warnings -from typing import TYPE_CHECKING, Dict, List, Optional, Union +from typing import Dict, List, Optional, TYPE_CHECKING, Union import gymnasium as gym import numpy as np @@ -32,6 +32,8 @@ FLOAT_MAX = 3.4e38 +# TODO (sven): Deprecate this function once we have moved completely to the Learner API. +# Replaced with `clip_gradients()`. @PublicAPI def apply_grad_clipping( policy: "TorchPolicy", optimizer: LocalOptimizer, loss: TensorType @@ -88,6 +90,58 @@ def atanh(x: TensorType) -> TensorType: pass +@PublicAPI +def clip_gradients( + gradients_dict: Dict[str, "torch.Tensor"], + *, + grad_clip: Optional[float] = None, + grad_clip_by: str = "value", +) -> None: + """Performs gradient clipping on a grad-dict based on a clip value and clip mode. + + Changes the provided gradient dict in place. + + Args: + gradients_dict: The gradients dict, mapping str to gradient tensors. + grad_clip: The value to clip with. The way gradients are clipped is defined + by the `grad_clip_by` arg (see below). + grad_clip_by: One of 'value', 'norm', or 'global_norm'. + """ + # No clipping, return. + if grad_clip is None: + return + + # Clip by value (each gradient individually). + if grad_clip_by == "value": + for k, v in gradients_dict.copy().items(): + gradients_dict[k] = torch.clip(v, -grad_clip, grad_clip) + + # Clip by L2-norm (per gradient tensor). + elif grad_clip_by == "norm": + for k, v in gradients_dict.copy().items(): + gradients_dict[k] = nn.utils.clip_grad_norm_(v, grad_clip) + + # Clip by global L2-norm (across all gradient tensors). + else: + assert grad_clip_by == "global_norm" + + # Compute the global L2-norm of all the gradient tensors. + grad_tensors = gradients_dict.values() + total_l2_norm = 0.0 + for tensor in grad_tensors: + # `.norm()` is the square root of the sum of all squares. + # We need to "undo" the square root b/c we want to compute the global + # norm afterwards -> `** 2`. + total_l2_norm += tensor.norm(2) ** 2 + # Now we do the square root. + total_l2_norm = torch.sqrt(total_l2_norm) + + # Clip all the gradients. + if total_l2_norm > grad_clip: + for tensor in grad_tensors: + tensor.mul_(grad_clip / total_l2_norm) + + @PublicAPI def concat_multi_gpu_td_errors( policy: Union["TorchPolicy", "TorchPolicyV2"] From ee51495c98717c3e07bfcbe3aafae66f7628a6ce Mon Sep 17 00:00:00 2001 From: Chen Shen Date: Thu, 27 Apr 2023 09:54:17 -0700 Subject: [PATCH 126/424] Revert "Revert "[Core] RetryObjectInPlasmaErrors tries to fetch all objects, not just ready ones." (#31445)" (#34805) This reverts commit a48e8d72bcadbb8d5e9c683a55ec0163ba7ecb65. --- src/ray/core_worker/core_worker.cc | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/src/ray/core_worker/core_worker.cc b/src/ray/core_worker/core_worker.cc index 708fad19af45..af09bb383ed7 100644 --- a/src/ray/core_worker/core_worker.cc +++ b/src/ray/core_worker/core_worker.cc @@ -1452,20 +1452,11 @@ void RetryObjectInPlasmaErrors(std::shared_ptr &memory_st for (auto iter = memory_object_ids.begin(); iter != memory_object_ids.end();) { auto current = iter++; const auto &mem_id = *current; - auto ready_iter = ready.find(mem_id); - if (ready_iter != ready.end()) { - std::vector> found; - RAY_CHECK_OK(memory_store->Get({mem_id}, - /*num_objects=*/1, - /*timeout=*/0, - worker_context, - /*remote_after_get=*/false, - &found)); - if (found.size() == 1 && found[0]->IsInPlasmaError()) { - plasma_object_ids.insert(mem_id); - ready.erase(ready_iter); - memory_object_ids.erase(current); - } + auto found = memory_store->GetIfExists(mem_id); + if (found != nullptr && found->IsInPlasmaError()) { + plasma_object_ids.insert(mem_id); + ready.erase(mem_id); + memory_object_ids.erase(current); } } } From 1e59a82192c4fe49a6398ddeaf214f8290b0a560 Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Thu, 27 Apr 2023 17:57:26 +0100 Subject: [PATCH 127/424] [ci/docker] Fix arm64 docker image builds by downgrading miniconda (#34825) --- docker/base-deps/Dockerfile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docker/base-deps/Dockerfile b/docker/base-deps/Dockerfile index b87bcb4f6fb4..4accfacbce9a 100644 --- a/docker/base-deps/Dockerfile +++ b/docker/base-deps/Dockerfile @@ -29,6 +29,10 @@ RUN apt-get update -y \ USER $RAY_UID ENV HOME=/home/ray +# Todo (krfricke): Move to latest miniconda version once we stop building +# images for Python 3.7. +# https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-${HOSTTYPE}.sh + RUN sudo apt-get update -y && sudo apt-get upgrade -y \ && sudo apt-get install -y \ git \ @@ -44,7 +48,7 @@ RUN sudo apt-get update -y && sudo apt-get upgrade -y \ openssh-client \ gnupg; fi) \ && wget \ - --quiet "https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-${HOSTTYPE}.sh" \ + --quiet "https://repo.anaconda.com/miniconda/Miniconda3-py37_23.1.0-1-Linux-${HOSTTYPE}.sh" \ -O /tmp/miniconda.sh \ && /bin/bash /tmp/miniconda.sh -b -u -p $HOME/anaconda3 \ && $HOME/anaconda3/bin/conda init \ From 2682fab9888409b35131575fd73dce58800069bb Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Thu, 27 Apr 2023 17:58:56 +0100 Subject: [PATCH 128/424] [linkcheck] Fix link to mongo-arrow types (#34822) Currently failing on master. The link to pymars is currently also failing - but this is presumably ephemeral and will go away when the docs page for pymars is up again. Signed-off-by: Kai Fricke --- python/ray/data/datastream.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/ray/data/datastream.py b/python/ray/data/datastream.py index e1539777b2d1..85ec35a11a2f 100644 --- a/python/ray/data/datastream.py +++ b/python/ray/data/datastream.py @@ -2825,7 +2825,7 @@ def write_mongo( Currently, this supports only a subset of the pyarrow's types, due to the limitation of pymongoarrow which is used underneath. Writing unsupported types will fail on type checking. See all the supported types at: - https://mongo-arrow.readthedocs.io/en/latest/supported_types.html. + https://mongo-arrow.readthedocs.io/en/latest/data_types.html. .. note:: The records will be inserted into MongoDB as new documents. If a record has From 9a01c27d89c56e2622b3d957820a5b24cd1ec579 Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Thu, 27 Apr 2023 10:17:14 -0700 Subject: [PATCH 129/424] Move could not upload artifacts to cloud storage as warnings (#34802) Signed-off-by: Cuong Nguyen --- release/ray_release/command_runner/_anyscale_job_wrapper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/release/ray_release/command_runner/_anyscale_job_wrapper.py b/release/ray_release/command_runner/_anyscale_job_wrapper.py index 986894f755ed..6249ac3c8c75 100644 --- a/release/ray_release/command_runner/_anyscale_job_wrapper.py +++ b/release/ray_release/command_runner/_anyscale_job_wrapper.py @@ -65,7 +65,7 @@ def run_storage_cp(source: str, target: str): return False if not Path(source).exists(): - logger.error(f"Couldn't upload to cloud storage: '{source}' does not exist.") + logger.warning(f"Couldn't upload to cloud storage: '{source}' does not exist.") return False storage_service = urlparse(target).scheme From 742b5c68d9edea29091e15089930fd67ba3dd07c Mon Sep 17 00:00:00 2001 From: Yunxuan Xiao Date: Thu, 27 Apr 2023 10:57:49 -0700 Subject: [PATCH 130/424] [CI] Add Lightning 2.0 compatibility test pipeline (#34147) PyTorch Lightning recently updated to version 2.0, but our CI pipelines only covered Lightning 1.x. This pull request aims to build a new pipeline to test the compatibility of LightningTrainer with PTL v2.0. Some minor changes to unit tests: - Updated the testing LightningModule (remove the deprecated `validation_epoch_end()`) - Use different syntax to feed model params to optimizer when using FSDP. Signed-off-by: woshiyyya Signed-off-by: Kai Fricke Co-authored-by: Kai Fricke --- .buildkite/pipeline.gpu_large.yml | 13 +++++++ ci/ci.sh | 2 + ci/env/install-minimal.sh | 2 + python/ray/train/BUILD | 8 ++-- .../ray/train/tests/lightning_test_utils.py | 37 ++++++++++++++----- .../train/tests/test_lightning_checkpoint.py | 10 ++++- .../train/tests/test_lightning_predictor.py | 2 +- .../ray/train/tests/test_lightning_trainer.py | 4 +- 8 files changed, 59 insertions(+), 19 deletions(-) diff --git a/.buildkite/pipeline.gpu_large.yml b/.buildkite/pipeline.gpu_large.yml index 2f993cd96546..e15ee57050ea 100644 --- a/.buildkite/pipeline.gpu_large.yml +++ b/.buildkite/pipeline.gpu_large.yml @@ -49,3 +49,16 @@ - pip install -Ur ./python/requirements/ml/requirements_ml_docker.txt - ./ci/env/env_info.sh - bazel test --config=ci $(./ci/run/bazel_export_options) --build_tests_only --test_tag_filters=gpu,-timeseries_libs,-py37,-post_wheel_build doc/... + +- label: ":zap: :python: Lightning 2.0 Train GPU tests" + conditions: + ["NO_WHEELS_REQUIRED", "RAY_CI_TRAIN_AFFECTED"] + commands: + - cleanup() { if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then ./ci/build/upload_build_info.sh; fi }; trap cleanup EXIT + - NO_DASHBOARD=1 ./ci/env/install-minimal.sh 3.8 + - PYTHON=3.8 DOC_TESTING=1 TRAIN_TESTING=1 TUNE_TESTING=1 ./ci/env/install-dependencies.sh + - pip install -Ur ./python/requirements/ml/requirements_ml_docker.txt + - pip uninstall -y pytorch-lightning + - pip install lightning==2.0.0 + - ./ci/env/env_info.sh + - bazel test --config=ci $(./scripts/bazel_export_options) --test_tag_filters=ptl_v2 python/ray/train/... \ No newline at end of file diff --git a/ci/ci.sh b/ci/ci.sh index 2478bb715d94..3f1d5f08828f 100755 --- a/ci/ci.sh +++ b/ci/ci.sh @@ -284,6 +284,8 @@ install_npm_project() { build_dashboard_front_end() { if [ "${OSTYPE}" = msys ]; then { echo "WARNING: Skipping dashboard due to NPM incompatibilities with Windows"; } 2> /dev/null + elif [ "${NO_DASHBOARD-}" = "1" ]; then + echo "Skipping dashboard build" else ( cd ray/dashboard/client diff --git a/ci/env/install-minimal.sh b/ci/env/install-minimal.sh index e99e453ea11e..9da00d7517c3 100755 --- a/ci/env/install-minimal.sh +++ b/ci/env/install-minimal.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +set -xe + # Python version can be specified as 3.7, 3.8, 3.9, etc.. if [ -z "$1" ]; then PYTHON_VERSION=${PYTHON-3.7} diff --git a/python/ray/train/BUILD b/python/ray/train/BUILD index e2a244455ace..1d932ca912a8 100644 --- a/python/ray/train/BUILD +++ b/python/ray/train/BUILD @@ -412,7 +412,7 @@ py_test( name = "test_lightning_checkpoint", size = "medium", srcs = ["tests/test_lightning_checkpoint.py"], - tags = ["team:ml", "exclusive", "ray_air", "gpu"], + tags = ["team:ml", "exclusive", "ray_air", "gpu", "ptl_v2"], deps = [":train_lib"] ) @@ -420,7 +420,7 @@ py_test( name = "test_lightning_trainer_restore", size = "medium", srcs = ["tests/test_lightning_trainer_restore.py"], - tags = ["team:ml", "exclusive", "ray_air", "gpu"], + tags = ["team:ml", "exclusive", "ray_air", "gpu", "ptl_v2"], deps = [":train_lib"] ) @@ -428,7 +428,7 @@ py_test( name = "test_lightning_trainer", size = "large", srcs = ["tests/test_lightning_trainer.py"], - tags = ["team:ml", "exclusive", "ray_air", "gpu"], + tags = ["team:ml", "exclusive", "ray_air", "gpu", "ptl_v2"], deps = [":train_lib"] ) @@ -436,7 +436,7 @@ py_test( name = "test_lightning_predictor", size = "medium", srcs = ["tests/test_lightning_predictor.py"], - tags = ["team:ml", "exclusive", "ray_air", "gpu"], + tags = ["team:ml", "exclusive", "ray_air", "gpu", "ptl_v2"], deps = [":train_lib"] ) diff --git a/python/ray/train/tests/lightning_test_utils.py b/python/ray/train/tests/lightning_test_utils.py index fcf37af1becc..c58ae623336b 100644 --- a/python/ray/train/tests/lightning_test_utils.py +++ b/python/ray/train/tests/lightning_test_utils.py @@ -7,9 +7,11 @@ class LinearModule(pl.LightningModule): - def __init__(self, input_dim, output_dim) -> None: + def __init__(self, input_dim, output_dim, strategy="ddp") -> None: super().__init__() self.linear = nn.Linear(input_dim, output_dim) + self.loss = [] + self.strategy = strategy def forward(self, input): return self.linear(input) @@ -22,17 +24,23 @@ def training_step(self, batch): def validation_step(self, val_batch, batch_idx): loss = self.forward(val_batch) + self.loss.append(loss) return {"val_loss": loss} - def validation_epoch_end(self, outputs) -> None: - avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean() + def on_validation_epoch_end(self) -> None: + avg_loss = torch.stack(self.loss).mean() self.log("val_loss", avg_loss) + self.loss.clear() def predict_step(self, batch, batch_idx): return self.forward(batch) def configure_optimizers(self): - return torch.optim.SGD(self.parameters(), lr=0.1) + if self.strategy == "fsdp": + # Feed FSDP wrapped model parameters to optimizer + return torch.optim.SGD(self.trainer.model.parameters(), lr=0.1) + else: + return torch.optim.SGD(self.parameters(), lr=0.1) class DoubleLinearModule(pl.LightningModule): @@ -40,6 +48,7 @@ def __init__(self, input_dim_1, input_dim_2, output_dim) -> None: super().__init__() self.linear_1 = nn.Linear(input_dim_1, output_dim) self.linear_2 = nn.Linear(input_dim_2, output_dim) + self.loss = [] def forward(self, batch): input_1 = batch["input_1"] @@ -54,12 +63,14 @@ def training_step(self, batch): def validation_step(self, val_batch, batch_idx): loss = self.forward(val_batch) + self.loss.append(loss) return {"val_loss": loss} - def validation_epoch_end(self, outputs) -> None: + def on_validation_epoch_end(self) -> None: print("Validation Epoch:", self.current_epoch) - avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean() + avg_loss = torch.stack(self.loss).mean() self.log("val_loss", avg_loss) + self.loss.clear() def predict_step(self, batch, batch_idx): return self.forward(batch) @@ -91,7 +102,9 @@ def __init__(self, lr: float, layer_1: int, layer_2: int): self.layer_1 = torch.nn.Linear(28 * 28, layer_1) self.layer_2 = torch.nn.Linear(layer_1, layer_2) self.layer_3 = torch.nn.Linear(layer_2, 10) - self.accuracy = Accuracy() + self.accuracy = Accuracy(task="multiclass", num_classes=10) + self.val_acc_list = [] + self.val_loss_list = [] def forward(self, x): batch_size, channels, width, height = x.size() @@ -121,13 +134,17 @@ def validation_step(self, val_batch, batch_idx): logits = self.forward(x) loss = F.nll_loss(logits, y) acc = self.accuracy(logits, y) + self.val_acc_list.append(acc) + self.val_loss_list.append(loss) return {"val_loss": loss, "val_accuracy": acc} - def validation_epoch_end(self, outputs): - avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean() - avg_acc = torch.stack([x["val_accuracy"] for x in outputs]).mean() + def on_validation_epoch_end(self): + avg_loss = torch.stack(self.val_loss_list).mean() + avg_acc = torch.stack(self.val_acc_list).mean() self.log("ptl/val_loss", avg_loss) self.log("ptl/val_accuracy", avg_acc) + self.val_acc_list.clear() + self.val_loss_list.clear() def predict_step(self, batch, batch_idx, dataloader_idx=None): x = batch diff --git a/python/ray/train/tests/test_lightning_checkpoint.py b/python/ray/train/tests/test_lightning_checkpoint.py index e253bb2a8b85..64bcd40b32be 100644 --- a/python/ray/train/tests/test_lightning_checkpoint.py +++ b/python/ray/train/tests/test_lightning_checkpoint.py @@ -38,7 +38,10 @@ def test_load_from_path(): # Train one epoch and save a checkpoint trainer = pl.Trainer( - max_epochs=1, enable_progress_bar=False, enable_checkpointing=False + max_epochs=1, + accelerator="cpu", + enable_progress_bar=False, + enable_checkpointing=False, ) trainer.fit(model=model, train_dataloaders=dataloader) ckpt_path = f"{tmpdir}/random_checkpoint_name.ckpt" @@ -75,7 +78,10 @@ def test_from_directory(): # Train one epoch and save a checkpoint trainer = pl.Trainer( - max_epochs=1, enable_progress_bar=False, enable_checkpointing=False + max_epochs=1, + accelerator="cpu", + enable_progress_bar=False, + enable_checkpointing=False, ) trainer.fit(model=model, train_dataloaders=dataloader) trainer.save_checkpoint(f"{tmpdir}/{MODEL_KEY}") diff --git a/python/ray/train/tests/test_lightning_predictor.py b/python/ray/train/tests/test_lightning_predictor.py index 49ee42073b16..2c34b5dcc984 100644 --- a/python/ray/train/tests/test_lightning_predictor.py +++ b/python/ray/train/tests/test_lightning_predictor.py @@ -28,7 +28,7 @@ def test_repr(): def save_checkpoint(model: pl.LightningModule, ckpt_path: str): - trainer = pl.Trainer(max_epochs=0) + trainer = pl.Trainer(max_epochs=0, accelerator="cpu") trainer.fit(model, train_dataloaders=DataLoader(torch.randn(1))) trainer.save_checkpoint(ckpt_path) diff --git a/python/ray/train/tests/test_lightning_trainer.py b/python/ray/train/tests/test_lightning_trainer.py index a35f37ac54e9..aab21fb4a6d1 100644 --- a/python/ray/train/tests/test_lightning_trainer.py +++ b/python/ray/train/tests/test_lightning_trainer.py @@ -74,7 +74,7 @@ def test_trainer_with_native_dataloader( config_builder = ( LightningConfigBuilder() - .module(LinearModule, input_dim=32, output_dim=4) + .module(LinearModule, input_dim=32, output_dim=4, strategy=strategy) .trainer(max_epochs=num_epochs, accelerator=accelerator) .strategy(strategy) ) @@ -124,7 +124,7 @@ def test_trainer_with_ray_data(ray_start_6_cpus_2_gpus, strategy, accelerator): lightning_config = ( LightningConfigBuilder() - .module(cls=LinearModule, input_dim=32, output_dim=4) + .module(cls=LinearModule, input_dim=32, output_dim=4, strategy=strategy) .trainer(max_epochs=num_epochs, accelerator=accelerator) .strategy(strategy) .build() From 7768bc840bad3440c0bc277725311e82c6622905 Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Thu, 27 Apr 2023 11:32:34 -0700 Subject: [PATCH 131/424] [CI] Unify ways to get ray logs (#34755) Currently we are getting ray logs in several ways, aka. using anyscale cli or unsupported anyscale sdk. Unify them into using anyscale cli only, which is what @shawnpanda recommended. Signed-off-by: Cuong Nguyen --- .../job_manager/anyscale_job_manager.py | 47 +++++++++---------- .../tests/test_anyscale_job_manager.py | 11 +++-- 2 files changed, 29 insertions(+), 29 deletions(-) diff --git a/release/ray_release/job_manager/anyscale_job_manager.py b/release/ray_release/job_manager/anyscale_job_manager.py index 0d02dbb8a784..16e87bac4bfd 100644 --- a/release/ray_release/job_manager/anyscale_job_manager.py +++ b/release/ray_release/job_manager/anyscale_job_manager.py @@ -1,10 +1,9 @@ -import io import os import time import subprocess import tempfile from collections import deque -from contextlib import redirect_stdout, redirect_stderr, contextmanager +from contextlib import contextmanager from typing import Any, Dict, Optional, Tuple @@ -12,7 +11,6 @@ CreateProductionJob, HaJobStates, ) -from anyscale.controllers.job_controller import JobController, terminal_state from ray_release.anyscale_util import LAST_LOGS_LENGTH, get_cluster_name from ray_release.cluster_manager.cluster_manager import ClusterManager from ray_release.exception import ( @@ -36,6 +34,7 @@ HaJobStates.BROKEN: -2, HaJobStates.TERMINATED: -3, } +terminal_state = set(job_status_to_return_code.keys()) class AnyscaleJobManager: @@ -262,7 +261,7 @@ def run_and_wait( ) return self._wait_job(timeout) - def _get_ray_error_logs(self) -> Optional[str]: + def _get_ray_logs(self) -> Tuple[Optional[str], Optional[str]]: """ Obtain any ray logs that contain keywords that indicate a crash, such as ERROR or Traceback @@ -285,10 +284,12 @@ def _get_ray_error_logs(self) -> Optional[str]: except Exception as e: logger.log(f"Failed to download logs from anyscale {e}") return None - return AnyscaleJobManager._find_ray_error_logs(tmpdir) + return AnyscaleJobManager._find_job_driver_and_ray_error_logs(tmpdir) @staticmethod - def _find_ray_error_logs(tmpdir: str) -> Optional[str]: + def _find_job_driver_and_ray_error_logs( + tmpdir: str, + ) -> Tuple[Optional[str], Optional[str]]: # Ignored some ray files that do not crash ray despite having exceptions ignored_ray_files = [ "monitor.log", @@ -296,6 +297,7 @@ def _find_ray_error_logs(tmpdir: str) -> Optional[str]: "event_JOBS.log", ] error_output = None + job_driver_output = None matched_pattern_count = 0 for root, _, files in os.walk(tmpdir): for file in files: @@ -303,13 +305,18 @@ def _find_ray_error_logs(tmpdir: str) -> Optional[str]: continue with open(os.path.join(root, file)) as lines: output = "".join(deque(lines, maxlen=3 * LAST_LOGS_LENGTH)) - # favor error logs that match with the most number of error patterns + # job-driver logs + if file.startswith("job-driver-"): + job_driver_output = output + continue + # ray error logs, favor those that match with the most number of + # error patterns if ( len([error for error in ERROR_LOG_PATTERNS if error in output]) > matched_pattern_count ): error_output = output - return error_output + return job_driver_output, error_output def get_last_logs(self): if not self.job_id: @@ -320,25 +327,13 @@ def get_last_logs(self): if self._last_logs: return self._last_logs - # TODO: replace with an actual API call. def _get_logs(): - buf = io.StringIO() - with open(os.devnull, "w") as devnull: - with redirect_stdout(buf), redirect_stderr(devnull): - job_controller = JobController() - job_controller.logs( - job_id=self.job_id, - should_follow=False, - ) - print("", flush=True) - output = buf.getvalue().strip() - # Many of Ray components have their separated logs (e.g. dashboard, - # gcs_server, etc.), so the interesting errors are not always in the - # job logs. If the job has no logs, check other ray logs for error patterns. - if "### Starting ###" not in output: - output = self._get_ray_error_logs() - assert output, "No logs fetched" - return "\n".join(output.splitlines()[-LAST_LOGS_LENGTH * 3 :]) + job_driver_log, ray_error_log = self._get_ray_logs() + assert job_driver_log or ray_error_log, "No logs fetched" + if job_driver_log: + return job_driver_log + else: + return ray_error_log ret = exponential_backoff_retry( _get_logs, diff --git a/release/ray_release/tests/test_anyscale_job_manager.py b/release/ray_release/tests/test_anyscale_job_manager.py index 528bd453be3e..61d89d7db546 100644 --- a/release/ray_release/tests/test_anyscale_job_manager.py +++ b/release/ray_release/tests/test_anyscale_job_manager.py @@ -10,6 +10,11 @@ def test_get_ray_error_logs(): f.writelines(ERROR_LOG_PATTERNS[:1]) with open(os.path.join(tmpdir, "log02"), "w") as f: f.writelines(ERROR_LOG_PATTERNS + ["haha"]) - assert AnyscaleJobManager._find_ray_error_logs(tmpdir) == "".join( - ERROR_LOG_PATTERNS + ["haha"] - ) + with open(os.path.join(tmpdir, "job-driver-w00t"), "w") as f: + f.writelines("w00t") + ( + job_driver_log, + ray_error_log, + ) = AnyscaleJobManager._find_job_driver_and_ray_error_logs(tmpdir) + assert ray_error_log == "".join(ERROR_LOG_PATTERNS + ["haha"]) + assert job_driver_log == "w00t" From 153d893b7af1b4e2701a625751f822e8400d3a2b Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Thu, 27 Apr 2023 13:10:43 -0700 Subject: [PATCH 132/424] [data] [streaming] Fix backpressure when reading directly from input datasource (#34809) The size_bytes of the input metadata was not properly included from read tasks. This meant we can easily launch too many tasks and go over the memory limit on reads of this type. This bug only happens on code of the form: ds.read_datasource().iter_batches(). But it doesn't happen if there are any intermediate transformations, which hits a different code path that has correct metadata propagation. --- .../data/_internal/execution/legacy_compat.py | 29 ++++++-- .../test_streaming_backpressure_edge_case.py | 68 ++++++++++++++++++- 2 files changed, 89 insertions(+), 8 deletions(-) diff --git a/python/ray/data/_internal/execution/legacy_compat.py b/python/ray/data/_internal/execution/legacy_compat.py index d0fc71b2fff4..40fdec8c0c2c 100644 --- a/python/ray/data/_internal/execution/legacy_compat.py +++ b/python/ray/data/_internal/execution/legacy_compat.py @@ -40,6 +40,9 @@ ) from ray.data._internal.execution.util import make_callable_class_concurrent +# Warn about tasks larger than this. +TASK_SIZE_WARN_THRESHOLD_BYTES = 100000 + def execute_to_legacy_block_iterator( executor: Executor, @@ -192,6 +195,24 @@ def _blocks_to_input_buffer(blocks: BlockList, owns_blocks: bool) -> PhysicalOpe read_tasks = blocks._tasks remote_args = blocks._remote_args assert all(isinstance(t, ReadTask) for t in read_tasks), read_tasks + + # Defensively compute the size of the block as the max size reported by the + # datasource and the actual read task size. This is to guard against issues + # with bad metadata reporting. + def cleaned_metadata(read_task): + block_meta = read_task.get_metadata() + task_size = len(cloudpickle.dumps(read_task)) + if block_meta.size_bytes is None or task_size > block_meta.size_bytes: + if task_size > TASK_SIZE_WARN_THRESHOLD_BYTES: + print( + f"WARNING: the read task size ({task_size} bytes) is larger " + "than the reported output size of the task " + f"({block_meta.size_bytes} bytes). This may be a size " + "reporting bug in the datasource being read from." + ) + block_meta.size_bytes = task_size + return block_meta + inputs = InputDataBuffer( [ RefBundle( @@ -200,13 +221,7 @@ def _blocks_to_input_buffer(blocks: BlockList, owns_blocks: bool) -> PhysicalOpe # This isn't a proper block, but it's what we are doing # in the legacy code. ray.put(read_task), - BlockMetadata( - num_rows=1, - size_bytes=len(cloudpickle.dumps(read_task)), - schema=None, - input_files=[], - exec_stats=None, - ), + cleaned_metadata(read_task), ) ], owns_blocks=True, diff --git a/python/ray/data/tests/test_streaming_backpressure_edge_case.py b/python/ray/data/tests/test_streaming_backpressure_edge_case.py index 36893d5b598e..cdedeee48fd2 100644 --- a/python/ray/data/tests/test_streaming_backpressure_edge_case.py +++ b/python/ray/data/tests/test_streaming_backpressure_edge_case.py @@ -1,12 +1,78 @@ import pytest import time +import pandas as pd import numpy as np import ray from ray._private.internal_api import memory_summary +from ray.data.datasource import Datasource, ReadTask +from ray.data.block import BlockMetadata +from ray.data.tests.conftest import * # noqa +from ray.tests.conftest import * # noqa -def test_streaming_backpressure_e2e(): +def test_input_backpressure_e2e(restore_data_context, shutdown_only): + + # Tests that backpressure applies even when reading directly from the input + # datasource. This relies on datasource metadata size estimation. + @ray.remote + class Counter: + def __init__(self): + self.count = 0 + + def increment(self): + self.count += 1 + + def get(self): + return self.count + + def reset(self): + self.count = 0 + + class CountingRangeDatasource(Datasource): + def __init__(self): + self.counter = Counter.remote() + + def prepare_read(self, parallelism, n): + def range_(i): + ray.get(self.counter.increment.remote()) + return [ + pd.DataFrame({"data": np.ones((n // parallelism * 1024 * 1024,))}) + ] + + sz = (n // parallelism) * 1024 * 1024 * 8 + print("Block size", sz) + + return [ + ReadTask( + lambda i=i: range_(i), + BlockMetadata( + num_rows=n // parallelism, + size_bytes=sz, + schema=None, + input_files=None, + exec_stats=None, + ), + ) + for i in range(parallelism) + ] + + source = CountingRangeDatasource() + ctx = ray.data.DataContext.get_current() + ctx.execution_options.resource_limits.object_store_memory = 10e6 + + # 10GiB dataset. + ds = ray.data.read_datasource(source, n=10000, parallelism=1000) + it = ds.iter_batches(batch_size=None, prefetch_batches=0) + next(it) + time.sleep(3) + launched = ray.get(source.counter.get.remote()) + + # If backpressure is broken we'll launch 15+. + assert launched < 5, launched + + +def test_streaming_backpressure_e2e(restore_data_context): # This test case is particularly challenging since there is a large input->output # increase in data size: https://github.com/ray-project/ray/issues/34041 From cb63c71294e53b0db169a8503ae029600a2fe87d Mon Sep 17 00:00:00 2001 From: Hao Chen Date: Thu, 27 Apr 2023 13:42:29 -0700 Subject: [PATCH 133/424] [Data] Fix limit operator (#34800) ## Why are these changes needed? Fixes a bug introduced by #34705 ## Related issue number #34234 --- .../_internal/execution/operators/limit_operator.py | 3 ++- python/ray/data/tests/test_consumption.py | 12 ++++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/python/ray/data/_internal/execution/operators/limit_operator.py b/python/ray/data/_internal/execution/operators/limit_operator.py index 6a8da6e76993..bd5dcaeb7baf 100644 --- a/python/ray/data/_internal/execution/operators/limit_operator.py +++ b/python/ray/data/_internal/execution/operators/limit_operator.py @@ -53,10 +53,10 @@ def add_input(self, refs: RefBundle, input_index: int) -> None: num_rows = metadata.num_rows assert num_rows is not None if self._consumed_rows + num_rows <= self._limit: - self._consumed_rows += num_rows out_blocks.append(block) out_metadata.append(metadata) self._output_metadata.append(metadata) + self._consumed_rows += num_rows else: # Slice the last block. def slice_fn(block, metadata, num_rows) -> Tuple[Block, BlockMetadata]: @@ -75,6 +75,7 @@ def slice_fn(block, metadata, num_rows) -> Tuple[Block, BlockMetadata]: metadata = ray.get(metadata_ref) out_metadata.append(metadata) self._output_metadata.append(metadata) + self._consumed_rows = self._limit break out_refs = RefBundle( list(zip(out_blocks, out_metadata)), diff --git a/python/ray/data/tests/test_consumption.py b/python/ray/data/tests/test_consumption.py index 28c8ec9470d9..3f2c336f367e 100644 --- a/python/ray/data/tests/test_consumption.py +++ b/python/ray/data/tests/test_consumption.py @@ -1,5 +1,6 @@ import logging import math +import sys import os import random import time @@ -350,6 +351,8 @@ def test_limit(ray_start_regular_shared, lazy): # NOTE: We test outside the power-of-2 range in order to ensure that we're not reading # redundant files due to exponential ramp-up. +# TODO(hchen): Re-enable this test after fixing #34234. +@pytest.mark.skip("This is not implemented for the streaming executor yet.") @pytest.mark.parametrize("limit,expected", [(10, 1), (20, 2), (30, 3), (60, 6)]) def test_limit_no_redundant_read(ray_start_regular_shared, limit, expected): # Test that dataset truncation eliminates redundant reads. @@ -381,7 +384,10 @@ def range_(i): lambda i=i: range_(i), BlockMetadata( num_rows=n, - size_bytes=None, + size_bytes=sum( + sys.getsizeof(i) + for i in range(parallelism * i, parallelism * i + n) + ), schema=None, input_files=None, exec_stats=None, @@ -414,7 +420,7 @@ def prepare_read(self, parallelism, n): lambda: [[1] * n], BlockMetadata( num_rows=None, - size_bytes=None, + size_bytes=sys.getsizeof(1) * n, schema=None, input_files=None, exec_stats=None, @@ -1782,6 +1788,4 @@ def test_nowarning_execute_with_cpu(ray_start_cluster): if __name__ == "__main__": - import sys - sys.exit(pytest.main(["-v", __file__])) From 45a577c3860288a744796144e7e796d2a951997c Mon Sep 17 00:00:00 2001 From: Sihan Wang Date: Thu, 27 Apr 2023 14:04:33 -0700 Subject: [PATCH 134/424] [Serve] Unset health replica stats when replica is under STOPPING (#34761) Whenever stopping a replica, set the health stats of the replica to 0. --- python/ray/serve/_private/deployment_state.py | 30 ++++++++----- python/ray/serve/tests/test_standalone3.py | 45 +++++++++++++++++++ 2 files changed, 63 insertions(+), 12 deletions(-) diff --git a/python/ray/serve/_private/deployment_state.py b/python/ray/serve/_private/deployment_state.py index 63c981150c1d..422443acd14d 100644 --- a/python/ray/serve/_private/deployment_state.py +++ b/python/ray/serve/_private/deployment_state.py @@ -1283,8 +1283,7 @@ def _stop_or_update_outdated_version_replicas(self, max_to_stop=math.inf) -> int # normal scale-up process. if replica.version.requires_actor_restart(self._target_state.version): code_version_changes += 1 - replica.stop() - self._replicas.add(ReplicaState.STOPPING, replica) + self._stop_replica(replica) replicas_changed = True # Otherwise, only lightweight options in deployment config is a mismatch, so # we update it dynamically without restarting the replica. @@ -1463,8 +1462,7 @@ def _scale_deployment_replicas(self) -> bool: f"Adding STOPPING to replica_tag: {replica}, " f"deployment_name: {self._name}" ) - replica.stop() - self._replicas.add(ReplicaState.STOPPING, replica) + self._stop_replica(replica) return replicas_changed @@ -1583,8 +1581,7 @@ def _check_startup_replicas( self._replica_constructor_retry_counter += 1 replicas_failed = True - replica.stop(graceful=False) - self._replicas.add(ReplicaState.STOPPING, replica) + self._stop_replica(replica) elif start_status in [ ReplicaStartupStatus.PENDING_ALLOCATION, ReplicaStartupStatus.PENDING_INITIALIZATION, @@ -1597,8 +1594,7 @@ def _check_startup_replicas( # Does it make sense to stop replicas in PENDING_ALLOCATION # state? if is_slow and stop_on_slow: - replica.stop(graceful=False) - self._replicas.add(ReplicaState.STOPPING, replica) + self._stop_replica(replica, graceful_stop=False) else: self._replicas.add(original_state, replica) @@ -1619,6 +1615,18 @@ def _check_startup_replicas( return slow_replicas, transitioned_to_running + def _stop_replica(self, replica, graceful_stop=True): + """Stop replica + 1. Stop the replica. + 2. Change the replica into stopping state. + 3. Set the health replica stats to 0. + """ + replica.stop(graceful=graceful_stop) + self._replicas.add(ReplicaState.STOPPING, replica) + self.health_check_gauge.set( + 0, tags={"deployment": self._name, "replica": replica.replica_tag} + ) + def _check_and_update_replicas(self) -> bool: """ Check current state of all DeploymentReplica being tracked, and compare @@ -1644,8 +1652,7 @@ def _check_and_update_replicas(self) -> bool: self.health_check_gauge.set( 0, tags={"deployment": self._name, "replica": replica.replica_tag} ) - replica.stop(graceful=False) - self._replicas.add(ReplicaState.STOPPING, replica) + self._stop_replica(replica, graceful_stop=False) # If this is a replica of the target version, the deployment # enters the "UNHEALTHY" status until the replica is # recovered or a new deploy happens. @@ -1855,8 +1862,7 @@ def _stop_all_replicas(self) -> bool: ReplicaState.RECOVERING, ] ): - replica.stop() - self._replicas.add(ReplicaState.STOPPING, replica) + self._stop_replica(replica) replica_changed = True return replica_changed diff --git a/python/ray/serve/tests/test_standalone3.py b/python/ray/serve/tests/test_standalone3.py index 9170bb4fe700..47b7173b187c 100644 --- a/python/ray/serve/tests/test_standalone3.py +++ b/python/ray/serve/tests/test_standalone3.py @@ -22,6 +22,7 @@ from ray.exceptions import RayActorError from ray.serve._private.constants import ( SYNC_HANDLE_IN_DAG_FEATURE_FLAG_ENV_KEY, + SERVE_DEFAULT_APP_NAME, ) from ray.serve.context import get_global_client from ray.tests.conftest import call_ray_stop_only # noqa: F401 @@ -196,6 +197,50 @@ def verify_metrics(): serve.shutdown() +@pytest.mark.parametrize( + "ray_instance", + [], + indirect=True, +) +def test_replica_health_metric(ray_instance): + """Test replica health metrics""" + + @serve.deployment(num_replicas=2) + def f(): + return "hello" + + serve.run(f.bind()) + + def count_live_replica_metrics(): + resp = requests.get("http://127.0.0.1:9999").text + resp = resp.split("\n") + count = 0 + for metrics in resp: + if "# HELP" in metrics or "# TYPE" in metrics: + continue + if "serve_deployment_replica_healthy" in metrics: + if "1.0" in metrics: + count += 1 + return count + + wait_for_condition( + lambda: count_live_replica_metrics() == 2, timeout=120, retry_interval_ms=500 + ) + + # Add more replicas + serve.run(f.options(num_replicas=10).bind()) + wait_for_condition( + lambda: count_live_replica_metrics() == 10, timeout=120, retry_interval_ms=500 + ) + + # delete the application + serve.delete(SERVE_DEFAULT_APP_NAME) + wait_for_condition( + lambda: count_live_replica_metrics() == 0, timeout=120, retry_interval_ms=500 + ) + serve.shutdown() + + def test_shutdown_remote(start_and_shutdown_ray_cli_function): """Check that serve.shutdown() works on a remote Ray cluster.""" From d4b2cd4007fe6c91398add8a8e8d86844cc504ea Mon Sep 17 00:00:00 2001 From: Sihan Wang Date: Thu, 27 Apr 2023 14:56:51 -0700 Subject: [PATCH 135/424] [Serve] Add application tag into replica metrics (#34716) Add "application" tags into all necessary metrics. V1 API (.deploy()) metrics will have application="" tag value in all necessary metrics. Update all replica level metrics to use serve.util.metrics lib, so that we don't need to deal with serve context information explicitly. --- .../serve/production-guide/monitoring.md | 12 + python/ray/serve/_private/client.py | 1 + python/ray/serve/_private/common.py | 7 + python/ray/serve/_private/deploy_utils.py | 4 + python/ray/serve/_private/deployment_state.py | 25 +- python/ray/serve/_private/http_proxy.py | 69 ++++-- python/ray/serve/_private/replica.py | 31 +-- python/ray/serve/_private/router.py | 23 +- python/ray/serve/context.py | 5 +- python/ray/serve/controller.py | 10 +- python/ray/serve/handle.py | 10 +- python/ray/serve/metrics.py | 34 ++- .../serve/tests/test_http_prefix_matching.py | 66 +++--- python/ray/serve/tests/test_long_poll.py | 4 +- python/ray/serve/tests/test_metrics.py | 215 +++++++++++++----- src/ray/protobuf/serve.proto | 1 + 16 files changed, 372 insertions(+), 145 deletions(-) diff --git a/doc/source/serve/production-guide/monitoring.md b/doc/source/serve/production-guide/monitoring.md index 03f8e61e1237..dc3aba959c33 100644 --- a/doc/source/serve/production-guide/monitoring.md +++ b/doc/source/serve/production-guide/monitoring.md @@ -243,32 +243,39 @@ The following metrics are exposed by Ray Serve: - * deployment * replica * route + * application - The number of queries that have been processed in this replica. * - ``serve_deployment_error_counter`` [**] - * deployment * replica * route + * application - The number of exceptions that have occurred in the deployment. * - ``serve_deployment_replica_starts`` [**] - * deployment * replica + * application - The number of times this replica has been restarted due to failure. * - ``serve_deployment_replica_healthy`` - * deployment * replica + * application - Whether this deployment replica is healthy. 1 means healthy, 0 unhealthy. * - ``serve_deployment_processing_latency_ms`` [**] - * deployment * replica * route + * application - The latency for queries to be processed. * - ``serve_replica_processing_queries`` [**] - * deployment * replica + * application - The current number of queries being processed. * - ``serve_num_http_requests`` [*] - * route * method + * application - The number of HTTP requests processed. * - ``serve_num_http_error_requests`` [*] - * route @@ -278,11 +285,13 @@ The following metrics are exposed by Ray Serve: * - ``serve_num_router_requests`` [*] - * deployment * route + * application - The number of requests processed by the router. * - ``serve_handle_request_counter`` [**] - * handle * deployment * route + * application - The number of requests processed by this ServeHandle. * - ``serve_deployment_queued_queries`` [*] - * deployment @@ -293,9 +302,12 @@ The following metrics are exposed by Ray Serve: * error_code * method * route + * application - The number of non-200 HTTP responses returned by each deployment. * - ``serve_http_request_latency_ms`` [*] - * route + * application + - The end-to-end latency of HTTP requests (measured from the Serve HTTP proxy). ``` [*] - only available when using HTTP calls diff --git a/python/ray/serve/_private/client.py b/python/ray/serve/_private/client.py index 081671681e35..19faa09455a3 100644 --- a/python/ray/serve/_private/client.py +++ b/python/ray/serve/_private/client.py @@ -275,6 +275,7 @@ def deploy_application( route_prefix=deployment["route_prefix"], is_driver_deployment=deployment["is_driver_deployment"], docs_path=deployment["docs_path"], + app_name=name, ) ) diff --git a/python/ray/serve/_private/common.py b/python/ray/serve/_private/common.py index 7ccfbce34fa8..680de1def242 100644 --- a/python/ray/serve/_private/common.py +++ b/python/ray/serve/_private/common.py @@ -21,11 +21,13 @@ ReplicaTag = str NodeId = str Duration = float +ApplicationName = str @dataclass class EndpointInfo: route: str + app_name: str # Keep in sync with ServeReplicaState in dashboard/client/src/type/serve.ts @@ -184,6 +186,7 @@ def __init__( end_time_ms: Optional[int] = None, autoscaling_policy: Optional[AutoscalingPolicy] = None, is_driver_deployment: Optional[bool] = False, + app_name: Optional[str] = None, ): self.deployment_config = deployment_config self.replica_config = replica_config @@ -201,6 +204,8 @@ def __init__( self.is_driver_deployment = is_driver_deployment + self.app_name = app_name + def __getstate__(self) -> Dict[Any, Any]: clean_dict = self.__dict__.copy() del clean_dict["_cached_actor_def"] @@ -242,6 +247,7 @@ def from_proto(cls, proto: DeploymentInfoProto): "version": proto.version if proto.version != "" else None, "end_time_ms": proto.end_time_ms if proto.end_time_ms != 0 else None, "deployer_job_id": ray.get_runtime_context().get_job_id(), + "app_name": proto.app_name, } return cls(**data) @@ -252,6 +258,7 @@ def to_proto(self): "actor_name": self.actor_name, "version": self.version, "end_time_ms": self.end_time_ms, + "app_name": self.app_name, } if self.deployment_config: data["deployment_config"] = self.deployment_config.to_proto() diff --git a/python/ray/serve/_private/deploy_utils.py b/python/ray/serve/_private/deploy_utils.py index 06077f6f1e71..d2c1ca350823 100644 --- a/python/ray/serve/_private/deploy_utils.py +++ b/python/ray/serve/_private/deploy_utils.py @@ -27,6 +27,7 @@ def get_deploy_args( route_prefix: Optional[str] = None, is_driver_deployment: Optional[str] = None, docs_path: Optional[str] = None, + app_name: Optional[str] = None, ) -> Dict: """ Takes a deployment's configuration, and returns the arguments needed @@ -83,6 +84,7 @@ def get_deploy_args( "deployer_job_id": ray.get_runtime_context().get_job_id(), "is_driver_deployment": is_driver_deployment, "docs_path": docs_path, + "app_name": app_name, } return controller_deploy_args @@ -95,6 +97,7 @@ def deploy_args_to_deployment_info( deployer_job_id: Union[str, bytes], previous_deployment: DeploymentInfo, is_driver_deployment: Optional[bool] = False, + app_name: Optional[str] = None, ) -> DeploymentInfo: """Takes deployment args passed to the controller after building an application and constructs a DeploymentInfo object. @@ -137,6 +140,7 @@ def deploy_args_to_deployment_info( start_time_ms=int(time.time() * 1000), autoscaling_policy=autoscaling_policy, is_driver_deployment=is_driver_deployment, + app_name=app_name, ) diff --git a/python/ray/serve/_private/deployment_state.py b/python/ray/serve/_private/deployment_state.py index 422443acd14d..c65ae1771bb6 100644 --- a/python/ray/serve/_private/deployment_state.py +++ b/python/ray/serve/_private/deployment_state.py @@ -57,7 +57,7 @@ ) from ray.serve._private.version import DeploymentVersion, VersionedReplica -from ray.util import metrics +from ray.serve import metrics from ray._raylet import GcsClient from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy @@ -358,6 +358,7 @@ def start(self, deployment_info: DeploymentInfo, version: DeploymentVersion): version, self._controller_name, self._detached, + deployment_info.app_name, ) # TODO(simon): unify the constructor arguments across language elif ( @@ -1050,7 +1051,7 @@ def __init__( "Tracks whether this deployment replica is healthy. 1 means " "healthy, 0 means unhealthy." ), - tag_keys=("deployment", "replica"), + tag_keys=("deployment", "replica", "application"), ) def should_autoscale(self) -> bool: @@ -1122,6 +1123,12 @@ def target_info(self) -> DeploymentInfo: def curr_status_info(self) -> DeploymentStatusInfo: return self._curr_status_info + @property + def app_name(self) -> str: + if self.target_info.app_name: + return self.target_info.app_name + return "" + def get_running_replica_infos(self) -> List[RunningReplicaInfo]: return [ replica.get_running_replica_info() @@ -1641,7 +1648,12 @@ def _check_and_update_replicas(self) -> bool: if replica.check_health(): self._replicas.add(ReplicaState.RUNNING, replica) self.health_check_gauge.set( - 1, tags={"deployment": self._name, "replica": replica.replica_tag} + 1, + tags={ + "deployment": self._name, + "replica": replica.replica_tag, + "application": self.app_name, + }, ) else: running_replicas_changed = True @@ -1650,7 +1662,12 @@ def _check_and_update_replicas(self) -> bool: f"{self._name} failed health check, stopping it." ) self.health_check_gauge.set( - 0, tags={"deployment": self._name, "replica": replica.replica_tag} + 0, + tags={ + "deployment": self._name, + "replica": replica.replica_tag, + "application": self.app_name, + }, ) self._stop_replica(replica, graceful_stop=False) # If this is a replica of the target version, the deployment diff --git a/python/ray/serve/_private/http_proxy.py b/python/ray/serve/_private/http_proxy.py index a4283006bd98..987357bf1105 100644 --- a/python/ray/serve/_private/http_proxy.py +++ b/python/ray/serve/_private/http_proxy.py @@ -25,7 +25,7 @@ Response, set_socket_reuse_port, ) -from ray.serve._private.common import EndpointInfo, EndpointTag +from ray.serve._private.common import EndpointInfo, EndpointTag, ApplicationName from ray.serve._private.constants import ( SERVE_LOGGER_NAME, SERVE_NAMESPACE, @@ -174,7 +174,7 @@ def __init__(self, get_handle: Callable): # Routes sorted in order of decreasing length. self.sorted_routes: List[str] = list() # Endpoints associated with the routes. - self.route_info: Dict[str, EndpointTag] = dict() + self.route_info: Dict[str, Tuple[EndpointTag, ApplicationName]] = dict() # Contains a ServeHandle for each endpoint. self.handles: Dict[str, RayServeHandle] = dict() @@ -191,7 +191,7 @@ def update_routes(self, endpoints: Dict[EndpointTag, EndpointInfo]) -> None: route_info = {} for endpoint, info in endpoints.items(): routes.append(info.route) - route_info[info.route] = endpoint + route_info[info.route] = (endpoint, info.app_name) if endpoint in self.handles: existing_handles.remove(endpoint) else: @@ -241,10 +241,10 @@ def match_route( matched = True if matched: - endpoint = self.route_info[route] - return route, self.handles[endpoint] + endpoint, app_name = self.route_info[route] + return route, self.handles[endpoint], app_name - return None, None + return None, None, None class HTTPProxy: @@ -259,7 +259,7 @@ def __init__(self, controller_name: str): # Set the controller name so that serve will connect to the # controller instance this proxy is running in. ray.serve.context._set_internal_replica_context( - None, None, controller_name, None + None, None, controller_name, None, None ) # Used only for displaying the route table. @@ -284,10 +284,7 @@ def get_handle(name): self.request_counter = metrics.Counter( "serve_num_http_requests", description="The number of HTTP requests processed.", - tag_keys=( - "route", - "method", - ), + tag_keys=("route", "method", "application"), ) self.request_error_counter = metrics.Counter( @@ -310,6 +307,7 @@ def get_handle(name): "error_code", "method", "route", + "application", ), ) self.processing_latency_tracker = metrics.Histogram( @@ -319,7 +317,10 @@ def get_handle(name): "(measured from the Serve HTTP proxy)." ), boundaries=DEFAULT_LATENCY_BUCKET_MS, - tag_keys=("route",), + tag_keys=( + "route", + "application", + ), ) def _update_routes(self, endpoints: Dict[EndpointTag, EndpointInfo]) -> None: @@ -364,21 +365,31 @@ async def __call__(self, scope, receive, send): root_path = scope["root_path"] route_path = scope["path"][len(root_path) :] - self.request_counter.inc( - tags={"route": route_path, "method": scope["method"].upper()} - ) - if route_path == "/-/routes": + self.request_counter.inc( + tags={ + "route": route_path, + "method": scope["method"].upper(), + "application": "", + } + ) return await starlette.responses.JSONResponse(self.route_info)( scope, receive, send ) if route_path == "/-/healthz": + self.request_counter.inc( + tags={ + "route": route_path, + "method": scope["method"].upper(), + "application": "", + } + ) return await starlette.responses.PlainTextResponse("success")( scope, receive, send ) - route_prefix, handle = self.prefix_router.match_route(route_path) + route_prefix, handle, app_name = self.prefix_router.match_route(route_path) if route_prefix is None: self.request_error_counter.inc( tags={ @@ -387,8 +398,21 @@ async def __call__(self, scope, receive, send): "method": scope["method"].upper(), } ) + self.request_counter.inc( + tags={ + "route": route_path, + "method": scope["method"].upper(), + "application": "", + } + ) return await self._not_found(scope, receive, send) - + self.request_counter.inc( + tags={ + "route": route_path, + "method": scope["method"].upper(), + "application": app_name, + } + ) # Modify the path and root path so that reverse lookups and redirection # work as expected. We do this here instead of in replicas so it can be # changed without restarting the replicas. @@ -399,11 +423,15 @@ async def __call__(self, scope, receive, send): start_time = time.time() ray.serve.context._serve_request_context.set( - ray.serve.context.RequestContext(route_path, get_random_letters(10)) + ray.serve.context.RequestContext( + route_path, get_random_letters(10), app_name + ) ) status_code = await _send_request_to_handle(handle, scope, receive, send) latency_ms = (time.time() - start_time) * 1000.0 - self.processing_latency_tracker.observe(latency_ms, tags={"route": route_path}) + self.processing_latency_tracker.observe( + latency_ms, tags={"route": route_path, "application": app_name} + ) logger.info( access_log_msg( method=scope["method"], @@ -426,6 +454,7 @@ async def __call__(self, scope, receive, send): "error_code": status_code, "method": scope["method"].upper(), "route": route_path, + "application": app_name, } ) diff --git a/python/ray/serve/_private/replica.py b/python/ray/serve/_private/replica.py index f304efe860fc..ba7aadaba628 100644 --- a/python/ray/serve/_private/replica.py +++ b/python/ray/serve/_private/replica.py @@ -14,7 +14,7 @@ from ray import cloudpickle from ray.actor import ActorClass, ActorHandle from ray.remote_function import RemoteFunction -from ray.util import metrics +from ray.serve import metrics from ray._private.async_compat import sync_to_async from ray.serve._private.autoscaling_metrics import start_metrics_pusher @@ -68,6 +68,7 @@ async def __init__( version: DeploymentVersion, controller_name: str, detached: bool, + app_name: str = None, ): configure_component_logger( component_type="deployment", @@ -121,6 +122,7 @@ async def __init__( replica_tag, controller_name, servable_object=None, + app_name=app_name, ) assert controller_name, "Must provide a valid controller_name" @@ -155,6 +157,7 @@ async def initialize_replica(): replica_tag, controller_name, servable_object=_callable, + app_name=app_name, ) self.replica = RayServeReplica( @@ -165,6 +168,7 @@ async def initialize_replica(): version, is_function, controller_handle, + app_name, ) self._init_finish_event.set() @@ -284,6 +288,7 @@ def __init__( version: DeploymentVersion, is_function: bool, controller_handle: ActorHandle, + app_name: str, ) -> None: self.deployment_name = deployment_name self.replica_tag = replica_tag @@ -292,6 +297,7 @@ def __init__( self.version = version self.deployment_config = None self.rwlock = aiorwlock.RWLock() + self.app_name = app_name user_health_check = getattr(_callable, HEALTH_CHECK_METHOD, None) if not callable(user_health_check): @@ -308,10 +314,7 @@ def user_health_check(): description=( "The number of queries that have been processed in this replica." ), - tag_keys=("deployment", "replica", "route"), - ) - self.request_counter.set_default_tags( - {"deployment": self.deployment_name, "replica": self.replica_tag} + tag_keys=("route",), ) self.error_counter = metrics.Counter( @@ -319,10 +322,7 @@ def user_health_check(): description=( "The number of exceptions that have occurred in this replica." ), - tag_keys=("deployment", "replica", "route"), - ) - self.error_counter.set_default_tags( - {"deployment": self.deployment_name, "replica": self.replica_tag} + tag_keys=("route",), ) self.restart_counter = metrics.Counter( @@ -330,29 +330,18 @@ def user_health_check(): description=( "The number of times this replica has been restarted due to failure." ), - tag_keys=("deployment", "replica"), - ) - self.restart_counter.set_default_tags( - {"deployment": self.deployment_name, "replica": self.replica_tag} ) self.processing_latency_tracker = metrics.Histogram( "serve_deployment_processing_latency_ms", description="The latency for queries to be processed.", boundaries=DEFAULT_LATENCY_BUCKET_MS, - tag_keys=("deployment", "replica", "route"), - ) - self.processing_latency_tracker.set_default_tags( - {"deployment": self.deployment_name, "replica": self.replica_tag} + tag_keys=("route",), ) self.num_processing_items = metrics.Gauge( "serve_replica_processing_queries", description="The current number of queries being processed.", - tag_keys=("deployment", "replica"), - ) - self.num_processing_items.set_default_tags( - {"deployment": self.deployment_name, "replica": self.replica_tag} ) self.restart_counter.inc() diff --git a/python/ray/serve/_private/router.py b/python/ray/serve/_private/router.py index 5974a94ec457..49c2622c37b4 100644 --- a/python/ray/serve/_private/router.py +++ b/python/ray/serve/_private/router.py @@ -40,6 +40,9 @@ class RequestMetadata: # HTTP route path of the request. route: str = "" + # Application Name + app_name: str = "" + @dataclass class Query: @@ -98,7 +101,7 @@ def __init__( "The current number of queries to this deployment waiting" " to be assigned to a replica." ), - tag_keys=("deployment", "route"), + tag_keys=("deployment", "route", "application"), ) self.num_queued_queries_gauge.set_default_tags( {"deployment": self.deployment_name} @@ -229,7 +232,11 @@ async def assign_replica(self, query: Query) -> ray.ObjectRef: """ self.num_queued_queries += 1 self.num_queued_queries_gauge.set( - self.num_queued_queries, tags={"route": query.metadata.route} + self.num_queued_queries, + tags={ + "route": query.metadata.route, + "application": query.metadata.app_name, + }, ) await query.resolve_async_tasks() assigned_ref = self._try_assign_replica(query) @@ -255,7 +262,11 @@ async def assign_replica(self, query: Query) -> ray.ObjectRef: assigned_ref = self._try_assign_replica(query) self.num_queued_queries -= 1 self.num_queued_queries_gauge.set( - self.num_queued_queries, tags={"route": query.metadata.route} + self.num_queued_queries, + tags={ + "route": query.metadata.route, + "application": query.metadata.app_name, + }, ) return assigned_ref @@ -279,7 +290,7 @@ def __init__( self.num_router_requests = metrics.Counter( "serve_num_router_requests", description="The number of requests processed by the router.", - tag_keys=("deployment", "route"), + tag_keys=("deployment", "route", "application"), ) self.num_router_requests.set_default_tags({"deployment": deployment_name}) @@ -305,7 +316,9 @@ async def assign_request( ): """Assign a query and returns an object ref represent the result""" - self.num_router_requests.inc(tags={"route": request_meta.route}) + self.num_router_requests.inc( + tags={"route": request_meta.route, "application": request_meta.app_name} + ) return await self._replica_set.assign_replica( Query( args=list(request_args), diff --git a/python/ray/serve/context.py b/python/ray/serve/context.py index 3497c55b138c..2912a2e66ee4 100644 --- a/python/ray/serve/context.py +++ b/python/ray/serve/context.py @@ -31,6 +31,7 @@ class ReplicaContext: replica_tag: ReplicaTag _internal_controller_name: str servable_object: Callable + app_name: str @PublicAPI(stability="alpha") @@ -73,10 +74,11 @@ def _set_internal_replica_context( replica_tag: ReplicaTag, controller_name: str, servable_object: Callable, + app_name: str, ): global _INTERNAL_REPLICA_CONTEXT _INTERNAL_REPLICA_CONTEXT = ReplicaContext( - deployment, replica_tag, controller_name, servable_object + deployment, replica_tag, controller_name, servable_object, app_name ) @@ -146,6 +148,7 @@ def _connect() -> ServeControllerClient: class RequestContext: route: str = "" request_id: str = "" + app_name: str = "" _serve_request_context = contextvars.ContextVar( diff --git a/python/ray/serve/controller.py b/python/ray/serve/controller.py index 0e5cba0794a8..ef45bb5647ed 100644 --- a/python/ray/serve/controller.py +++ b/python/ray/serve/controller.py @@ -408,14 +408,19 @@ def deploy( deployer_job_id: Union[str, bytes], docs_path: Optional[str] = None, is_driver_deployment: Optional[bool] = False, + app_name: str = None, ) -> bool: """Deploys a deployment.""" - if route_prefix is not None: assert route_prefix.startswith("/") if docs_path is not None: assert docs_path.startswith("/") + # app_name is None for V1 API, reset it to empty string to avoid + # breaking metrics. + if app_name is None: + app_name = "" + deployment_info = deploy_args_to_deployment_info( deployment_name=name, deployment_config_proto_bytes=deployment_config_proto_bytes, @@ -423,6 +428,7 @@ def deploy( deployer_job_id=deployer_job_id, previous_deployment=self.deployment_state_manager.get_deployment(name), is_driver_deployment=is_driver_deployment, + app_name=app_name, ) # TODO(architkulkarni): When a deployment is redeployed, even if @@ -431,7 +437,7 @@ def deploy( updating = self.deployment_state_manager.deploy(name, deployment_info) if route_prefix is not None: - endpoint_info = EndpointInfo(route=route_prefix) + endpoint_info = EndpointInfo(route=route_prefix, app_name=app_name) self.endpoint_state.update_endpoint(name, endpoint_info) else: self.endpoint_state.delete_endpoint(name) diff --git a/python/ray/serve/handle.py b/python/ray/serve/handle.py index c1b068c864f2..953413a92ea3 100644 --- a/python/ray/serve/handle.py +++ b/python/ray/serve/handle.py @@ -133,7 +133,7 @@ def __init__( "The number of handle.remote() calls that have been " "made on this handle." ), - tag_keys=("handle", "deployment", "route"), + tag_keys=("handle", "deployment", "route", "application"), ) self.request_counter.set_default_tags( {"handle": self.handle_tag, "deployment": self.deployment_name} @@ -236,8 +236,14 @@ def _remote(self, deployment_name, handle_options, args, kwargs) -> Coroutine: call_method=handle_options.method_name, http_arg_is_pickled=self._pickled_http_request, route=_request_context.route, + app_name=_request_context.app_name, + ) + self.request_counter.inc( + tags={ + "route": _request_context.route, + "application": _request_context.app_name, + } ) - self.request_counter.inc(tags={"route": _request_context.route}) coro = self.router.assign_request(request_metadata, *args, **kwargs) return coro diff --git a/python/ray/serve/metrics.py b/python/ray/serve/metrics.py index 356538be30bf..1e8c8c15d106 100644 --- a/python/ray/serve/metrics.py +++ b/python/ray/serve/metrics.py @@ -4,21 +4,33 @@ DEPLOYMENT_TAG = "deployment" REPLICA_TAG = "replica" +APPLICATION_TAG = "application" -def _add_serve_metric_tags(tag_keys: Optional[Tuple[str]] = None): +def _add_serve_metric_tags(tag_keys: Optional[Tuple[str]] = None) -> Tuple[str]: """Add serve context tags to the tag_keys""" + if tag_keys is None: + tag_keys = tuple() + + # If the context doesn't exist, no serve tag is added. if context.get_internal_replica_context() is None: return tag_keys + # Check no collision with customer tag if DEPLOYMENT_TAG in tag_keys: raise ValueError(f"'{DEPLOYMENT_TAG}' tag is reserved for Ray Serve metrics") if REPLICA_TAG in tag_keys: raise ValueError(f"'{REPLICA_TAG}' tag is reserved for Ray Serve metrics") + if APPLICATION_TAG in tag_keys: + raise ValueError(f"'{APPLICATION_TAG}' tag is reserved for Ray Serve metrics") + # Get serve tag inserted: + ray_serve_tags = (DEPLOYMENT_TAG, REPLICA_TAG) + if context.get_internal_replica_context().app_name: + ray_serve_tags += (APPLICATION_TAG,) if tag_keys: - tag_keys = (DEPLOYMENT_TAG, REPLICA_TAG) + tag_keys + tag_keys = ray_serve_tags + tag_keys else: - tag_keys = (DEPLOYMENT_TAG, REPLICA_TAG) + tag_keys = ray_serve_tags return tag_keys @@ -30,9 +42,13 @@ def _add_serve_metric_default_tags(default_tags: Dict[str, str]): raise ValueError(f"'{DEPLOYMENT_TAG}' tag is reserved for Ray Serve metrics") if REPLICA_TAG in default_tags: raise ValueError(f"'{REPLICA_TAG}' tag is reserved for Ray Serve metrics") + if APPLICATION_TAG in default_tags: + raise ValueError(f"'{APPLICATION_TAG}' tag is reserved for Ray Serve metrics") replica_context = context.get_internal_replica_context() default_tags[DEPLOYMENT_TAG] = replica_context.deployment default_tags[REPLICA_TAG] = replica_context.replica_tag + if replica_context.app_name: + default_tags[APPLICATION_TAG] = replica_context.app_name return default_tags @@ -40,6 +56,10 @@ class Counter(metrics.Counter): def __init__( self, name: str, description: str = "", tag_keys: Optional[Tuple[str]] = None ): + if tag_keys and not isinstance(tag_keys, tuple): + raise TypeError( + "tag_keys should be a tuple type, got: " f"{type(tag_keys)}" + ) tag_keys = _add_serve_metric_tags(tag_keys) super().__init__(name, description, tag_keys) self.set_default_tags({}) @@ -52,6 +72,10 @@ class Gauge(metrics.Gauge): def __init__( self, name: str, description: str = "", tag_keys: Optional[Tuple[str]] = None ): + if tag_keys and not isinstance(tag_keys, tuple): + raise TypeError( + "tag_keys should be a tuple type, got: " f"{type(tag_keys)}" + ) tag_keys = _add_serve_metric_tags(tag_keys) super().__init__(name, description, tag_keys) self.set_default_tags({}) @@ -68,6 +92,10 @@ def __init__( boundaries: List[float] = None, tag_keys: Optional[Tuple[str]] = None, ): + if tag_keys and not isinstance(tag_keys, tuple): + raise TypeError( + "tag_keys should be a tuple type, got: " f"{type(tag_keys)}" + ) tag_keys = _add_serve_metric_tags(tag_keys) super().__init__(name, description, boundaries, tag_keys) self.set_default_tags({}) diff --git a/python/ray/serve/tests/test_http_prefix_matching.py b/python/ray/serve/tests/test_http_prefix_matching.py index d1c6c82c43e7..a0655d0851e0 100644 --- a/python/ray/serve/tests/test_http_prefix_matching.py +++ b/python/ray/serve/tests/test_http_prefix_matching.py @@ -14,87 +14,89 @@ def mock_get_handle(name, *args, **kwargs): def test_no_match(mock_longest_prefix_router): router = mock_longest_prefix_router - router.update_routes({"endpoint": EndpointInfo(route="/hello")}) - route, handle = router.match_route("/nonexistent") - assert route is None and handle is None + router.update_routes({"endpoint": EndpointInfo(route="/hello", app_name="")}) + route, handle, app_name = router.match_route("/nonexistent") + assert route is None and handle is None and app_name is None def test_default_route(mock_longest_prefix_router): router = mock_longest_prefix_router - router.update_routes({"endpoint": EndpointInfo(route="/endpoint")}) + router.update_routes({"endpoint": EndpointInfo(route="/endpoint", app_name="")}) - route, handle = router.match_route("/nonexistent") - assert route is None and handle is None + route, handle, app_name = router.match_route("/nonexistent") + assert route is None and handle is None and app_name is None - route, handle = router.match_route("/endpoint") - assert route == "/endpoint" and handle == "endpoint" + route, handle, app_name = router.match_route("/endpoint") + assert route == "/endpoint" and handle == "endpoint" and app_name == "" def test_trailing_slash(mock_longest_prefix_router): router = mock_longest_prefix_router router.update_routes( { - "endpoint": EndpointInfo(route="/test"), + "endpoint": EndpointInfo(route="/test", app_name=""), } ) - route, handle = router.match_route("/test/") + route, handle, _ = router.match_route("/test/") assert route == "/test" and handle == "endpoint" router.update_routes( { - "endpoint": EndpointInfo(route="/test/"), + "endpoint": EndpointInfo(route="/test/", app_name=""), } ) - route, handle = router.match_route("/test") - assert route is None and handle is None + route, handle, app_name = router.match_route("/test") + assert route is None and handle is None and app_name is None def test_prefix_match(mock_longest_prefix_router): router = mock_longest_prefix_router router.update_routes( { - "endpoint1": EndpointInfo(route="/test/test2"), - "endpoint2": EndpointInfo(route="/test"), - "endpoint3": EndpointInfo(route="/"), + "endpoint1": EndpointInfo(route="/test/test2", app_name=""), + "endpoint2": EndpointInfo(route="/test", app_name=""), + "endpoint3": EndpointInfo(route="/", app_name=""), } ) - route, handle = router.match_route("/test/test2/subpath") + route, handle, _ = router.match_route("/test/test2/subpath") assert route == "/test/test2" and handle == "endpoint1" - route, handle = router.match_route("/test/test2/") + route, handle, _ = router.match_route("/test/test2/") assert route == "/test/test2" and handle == "endpoint1" - route, handle = router.match_route("/test/test2") + route, handle, _ = router.match_route("/test/test2") assert route == "/test/test2" and handle == "endpoint1" - route, handle = router.match_route("/test/subpath") + route, handle, _ = router.match_route("/test/subpath") assert route == "/test" and handle == "endpoint2" - route, handle = router.match_route("/test/") + route, handle, _ = router.match_route("/test/") assert route == "/test" and handle == "endpoint2" - route, handle = router.match_route("/test") + route, handle, _ = router.match_route("/test") assert route == "/test" and handle == "endpoint2" - route, handle = router.match_route("/test2") + route, handle, _ = router.match_route("/test2") assert route == "/" and handle == "endpoint3" - route, handle = router.match_route("/") + route, handle, _ = router.match_route("/") assert route == "/" and handle == "endpoint3" def test_update_routes(mock_longest_prefix_router): router = mock_longest_prefix_router - router.update_routes({"endpoint": EndpointInfo(route="/endpoint")}) + router.update_routes({"endpoint": EndpointInfo(route="/endpoint", app_name="app1")}) - route, handle = router.match_route("/endpoint") - assert route == "/endpoint" and handle == "endpoint" + route, handle, app_name = router.match_route("/endpoint") + assert route == "/endpoint" and handle == "endpoint" and app_name == "app1" - router.update_routes({"endpoint2": EndpointInfo(route="/endpoint2")}) + router.update_routes( + {"endpoint2": EndpointInfo(route="/endpoint2", app_name="app2")} + ) - route, handle = router.match_route("/endpoint") - assert route is None and handle is None + route, handle, app_name = router.match_route("/endpoint") + assert route is None and handle is None and app_name is None - route, handle = router.match_route("/endpoint2") - assert route == "/endpoint2" and handle == "endpoint2" + route, handle, app_name = router.match_route("/endpoint2") + assert route == "/endpoint2" and handle == "endpoint2" and app_name == "app2" if __name__ == "__main__": diff --git a/python/ray/serve/tests/test_long_poll.py b/python/ray/serve/tests/test_long_poll.py index 3cbe56f3ff0c..c5286808c666 100644 --- a/python/ray/serve/tests/test_long_poll.py +++ b/python/ray/serve/tests/test_long_poll.py @@ -182,8 +182,8 @@ def test_listen_for_change_java(serve_instance): assert poll_result_1.updated_objects["key_1"].object_snapshot.decode() == "999" request_2 = {"keys_to_snapshot_ids": {"ROUTE_TABLE": -1}} endpoints: Dict[EndpointTag, EndpointInfo] = dict() - endpoints["deployment_name"] = EndpointInfo(route="/test/xlang/poll") - endpoints["deployment_name1"] = EndpointInfo(route="/test/xlang/poll1") + endpoints["deployment_name"] = EndpointInfo(route="/test/xlang/poll", app_name="") + endpoints["deployment_name1"] = EndpointInfo(route="/test/xlang/poll1", app_name="") ray.get(host.notify_changed.remote(LongPollNamespace.ROUTE_TABLE, endpoints)) object_ref_2 = host.listen_for_change_java.remote( LongPollRequest(**request_2).SerializeToString() diff --git a/python/ray/serve/tests/test_metrics.py b/python/ray/serve/tests/test_metrics.py index 41084f76feba..946f04c5b98d 100644 --- a/python/ray/serve/tests/test_metrics.py +++ b/python/ray/serve/tests/test_metrics.py @@ -193,6 +193,7 @@ def f(*args): assert len(num_requests) == 1 assert num_requests[0]["route"] == "/fake_route" assert num_requests[0]["method"] == "GET" + assert num_requests[0]["application"] == "" print("serve_num_http_requests working as expected.") num_errors = get_metric_dictionaries("serve_num_http_error_requests") @@ -215,22 +216,131 @@ def f(*args): assert num_deployment_errors[0]["deployment"] == "app_f" assert num_deployment_errors[0]["error_code"] == "500" assert num_deployment_errors[0]["method"] == "GET" + assert num_deployment_errors[0]["application"] == "app" print("serve_num_deployment_http_error_requests working as expected.") + latency_metrics = get_metric_dictionaries("serve_http_request_latency_ms_sum") + assert len(latency_metrics) == 1 + assert latency_metrics[0]["route"] == "/real_route" + assert latency_metrics[0]["application"] == "app" + print("serve_http_request_latency_ms working as expected.") + + +def test_replica_metrics_fields(serve_start_shutdown): + """Test replica metrics fields""" + + @serve.deployment + def f(): + return "hello" + + @serve.deployment + def g(): + return "world" + + serve.run(f.bind(), name="app1", route_prefix="/f") + serve.run(g.bind(), name="app2", route_prefix="/g") + url_f = "http://127.0.0.1:8000/f" + url_g = "http://127.0.0.1:8000/g" + + assert "hello" == requests.get(url_f).text + assert "world" == requests.get(url_g).text + + def verify_metrics(metric, expected_output): + for key in expected_output: + assert metric[key] == expected_output[key] + + wait_for_condition( + lambda: len(get_metric_dictionaries("serve_deployment_request_counter")) == 2, + timeout=20, + ) + + num_requests = get_metric_dictionaries("serve_deployment_request_counter") + assert len(num_requests) == 2 + expected_output = {"route": "/f", "deployment": "app1_f", "application": "app1"} + verify_metrics(num_requests[0], expected_output) + + start_metrics = get_metric_dictionaries("serve_deployment_replica_starts") + assert len(start_metrics) == 2 + expected_output = {"deployment": "app1_f", "application": "app1"} + verify_metrics(start_metrics[0], expected_output) + expected_output = {"deployment": "app2_g", "application": "app2"} + verify_metrics(start_metrics[1], expected_output) + + # Latency metrics + wait_for_condition( + lambda: len( + get_metric_dictionaries("serve_deployment_processing_latency_ms_count") + ) + == 2, + timeout=20, + ) + for metric_name in [ + "serve_deployment_processing_latency_ms_count", + "serve_deployment_processing_latency_ms_sum", + ]: + latency_metrics = get_metric_dictionaries(metric_name) + print(f"checking metric {metric_name}, {latency_metrics}") + assert len(latency_metrics) == 2 + expected_output1 = {"deployment": "app1_f", "application": "app1"} + expected_output2 = {"deployment": "app2_g", "application": "app2"} + verify_metrics(latency_metrics[0], expected_output1) + verify_metrics(latency_metrics[1], expected_output2) + + processing_queries = get_metric_dictionaries("serve_replica_processing_queries") + assert len(processing_queries) == 2 + expected_output1 = {"deployment": "app1_f", "application": "app1"} + expected_output2 = {"deployment": "app2_g", "application": "app2"} + verify_metrics(processing_queries[0], expected_output1) + verify_metrics(processing_queries[1], expected_output2) + + @serve.deployment + def h(): + return 1 / 0 + + serve.run(h.bind(), name="app3", route_prefix="/h") + assert 500 == requests.get("http://127.0.0.1:8000/h").status_code + wait_for_condition( + lambda: len(get_metric_dictionaries("serve_deployment_error_counter")) == 1, + timeout=20, + ) + err_requests = get_metric_dictionaries("serve_deployment_error_counter") + assert len(err_requests) == 1 + expected_output = {"route": "/h", "deployment": "app3_h", "application": "app3"} + verify_metrics(err_requests[0], expected_output) + + health_metrics = get_metric_dictionaries("serve_deployment_replica_healthy") + assert len(health_metrics) == 3 + expected_outputs = [ + {"deployment": "app1_f", "application": "app1"}, + {"deployment": "app2_g", "application": "app2"}, + {"deployment": "app3_h", "application": "app3"}, + ] + for i in range(len(health_metrics)): + verify_metrics(health_metrics[i], expected_outputs[i]) + class TestRequestContextMetrics: def _generate_metrics_summary(self, metrics): - """Generate "route" information from metrics. + """Generate "route", "application" information from metrics. Args: metrics: list of metrics, each item is a dictionary generated from get_metric_dictionaries func. - Return: return a dictionary, key is deployment name, value is a set - including all routes. + Return: return a Tuple[dictionary, dictionary] + First dictionary: key is deployment name, value is a set + including all routes. string is to indicate the applicationn name. + Second dictionary: key is the deployment name, value is application name. """ - metrics_summary = DefaultDict(set) + metrics_summary_route = DefaultDict(set) + metrics_summary_app = DefaultDict(str) + for request_metrcis in metrics: - metrics_summary[request_metrcis["deployment"]].add(request_metrcis["route"]) - return metrics_summary + metrics_summary_route[request_metrcis["deployment"]].add( + request_metrcis["route"] + ) + metrics_summary_app[request_metrcis["deployment"]] = request_metrcis[ + "application" + ] + return metrics_summary_route, metrics_summary_app def test_request_context_pass_for_http_proxy(self, serve_start_shutdown): """Test HTTP proxy passing request context""" @@ -269,56 +379,46 @@ def h(): ) # Check replica qps & latency - qps_metrics = self._generate_metrics_summary( + qps_metrics_route, qps_metrics_app_name = self._generate_metrics_summary( get_metric_dictionaries("serve_deployment_request_counter") ) - print(qps_metrics) - assert qps_metrics["app1_f"] == {"/app1"} - assert qps_metrics["app2_g"] == {"/app2"} - qps_metrics = self._generate_metrics_summary( + print(qps_metrics_route) + assert qps_metrics_route["app1_f"] == {"/app1"} + assert qps_metrics_route["app2_g"] == {"/app2"} + assert qps_metrics_app_name["app1_f"] == "app1" + assert qps_metrics_app_name["app2_g"] == "app2" + qps_metrics_route, qps_metrics_app_name = self._generate_metrics_summary( get_metric_dictionaries("serve_deployment_error_counter") ) - assert qps_metrics["app3_h"] == {"/app3"} - - latency_metrics = self._generate_metrics_summary( - get_metric_dictionaries("serve_deployment_processing_latency_ms_sum") - ) - assert len(latency_metrics) == 3 - assert latency_metrics["app1_f"] == {"/app1"} - assert latency_metrics["app2_g"] == {"/app2"} - assert latency_metrics["app3_h"] == {"/app3"} + assert qps_metrics_route["app3_h"] == {"/app3"} + assert qps_metrics_app_name["app3_h"] == "app3" # Check http proxy qps & latency - qps_metrics = get_metric_dictionaries("serve_num_http_requests") - len(qps_metrics) == 3 - assert {metric["route"] for metric in qps_metrics} == { - "/app1", - "/app2", - "/app3", - } - - latency_metrics = get_metric_dictionaries("serve_http_request_latency_ms_sum") - assert {metric["route"] for metric in latency_metrics} == { - "/app1", - "/app2", - "/app3", - } - - # Check handle qps - qps_metrics = self._generate_metrics_summary( - get_metric_dictionaries("serve_handle_request_counter") - ) - assert qps_metrics["app1_f"] == {"/app1"} - assert qps_metrics["app2_g"] == {"/app2"} - assert qps_metrics["app3_h"] == {"/app3"} + for metric_name in [ + "serve_num_http_requests", + "serve_http_request_latency_ms_sum", + ]: + metrics = get_metric_dictionaries(metric_name) + assert {metric["route"] for metric in metrics} == { + "/app1", + "/app2", + "/app3", + } - # Check router qps - qps_metrics = self._generate_metrics_summary( - get_metric_dictionaries("serve_num_router_requests") - ) - assert qps_metrics["app1_f"] == {"/app1"} - assert qps_metrics["app2_g"] == {"/app2"} - assert qps_metrics["app3_h"] == {"/app3"} + for metric_name in [ + "serve_handle_request_counter", + "serve_num_router_requests", + "serve_deployment_processing_latency_ms_sum", + ]: + metrics_route, metrics_app_name = self._generate_metrics_summary( + get_metric_dictionaries("serve_handle_request_counter") + ) + assert metrics_route["app1_f"] == {"/app1"} + assert metrics_route["app2_g"] == {"/app2"} + assert metrics_route["app3_h"] == {"/app3"} + assert metrics_app_name["app1_f"] == "app1" + assert metrics_app_name["app2_g"] == "app2" + assert metrics_app_name["app3_h"] == "app3" def test_request_context_pass_for_handle_passing(self, serve_start_shutdown): """Test handle passing contexts between replicas""" @@ -365,12 +465,18 @@ async def app2(self): == 4, timeout=20, ) - requests_metrics = self._generate_metrics_summary( + ( + requests_metrics_route, + requests_metrics_app_name, + ) = self._generate_metrics_summary( get_metric_dictionaries("serve_deployment_request_counter") ) - assert requests_metrics["app_G"] == {"/api", "/api2"} - assert requests_metrics["app_g1"] == {"/api"} - assert requests_metrics["app_g2"] == {"/api2"} + assert requests_metrics_route["app_G"] == {"/api", "/api2"} + assert requests_metrics_route["app_g1"] == {"/api"} + assert requests_metrics_route["app_g2"] == {"/api2"} + assert requests_metrics_app_name["app_G"] == "app" + assert requests_metrics_app_name["app_g1"] == "app" + assert requests_metrics_app_name["app_g2"] == "app" def test_customer_metrics_with_context(self, serve_start_shutdown): @serve.deployment @@ -427,6 +533,7 @@ def __call__(self): counter_metrics[0]["my_runtime_tag"] == "100" counter_metrics[0]["replica"] == replica_tag counter_metrics[0]["deployment"] == deployment_name + counter_metrics[0]["application"] == "app" gauge_metrics = get_metric_dictionaries("my_gauge") assert len(counter_metrics) == 1 @@ -434,6 +541,7 @@ def __call__(self): gauge_metrics[0]["my_runtime_tag"] == "300" gauge_metrics[0]["replica"] == replica_tag gauge_metrics[0]["deployment"] == deployment_name + gauge_metrics[0]["application"] == "app" histogram_metrics = get_metric_dictionaries("my_histogram_sum") assert len(histogram_metrics) == 1 @@ -441,6 +549,7 @@ def __call__(self): histogram_metrics[0]["my_runtime_tag"] == "200" histogram_metrics[0]["replica"] == replica_tag histogram_metrics[0]["deployment"] == deployment_name + gauge_metrics[0]["application"] == "app" @pytest.mark.parametrize("use_actor", [False, True]) def test_serve_metrics_outside_serve(self, use_actor, serve_start_shutdown): diff --git a/src/ray/protobuf/serve.proto b/src/ray/protobuf/serve.proto index 7d0850830c48..bae767b4f747 100644 --- a/src/ray/protobuf/serve.proto +++ b/src/ray/protobuf/serve.proto @@ -170,6 +170,7 @@ message DeploymentInfo { string actor_name = 5; string version = 6; int64 end_time_ms = 7; + string app_name = 8; } // Wrap DeploymentInfo and route. The "" route value need to be convert to None/null. From 05b0215614d0331dfccfce973e661d4ead2af1de Mon Sep 17 00:00:00 2001 From: matthewdeng Date: Thu, 27 Apr 2023 16:11:17 -0700 Subject: [PATCH 136/424] [docs] Remove requirements-rtd.txt (#33618) Signed-off-by: Matthew Deng --- ci/env/install-dependencies.sh | 1 - doc/requirements-rtd.txt | 2 -- 2 files changed, 3 deletions(-) delete mode 100644 doc/requirements-rtd.txt diff --git a/ci/env/install-dependencies.sh b/ci/env/install-dependencies.sh index 298363351a24..92630a1e56e1 100755 --- a/ci/env/install-dependencies.sh +++ b/ci/env/install-dependencies.sh @@ -353,7 +353,6 @@ install_pip_packages() { if [ "${OSTYPE}" = msys ] && [ "${python_version}" = "3.8" ]; then { echo "WARNING: Pillow binaries not available on Windows; cannot build docs"; } 2> /dev/null else - pip install --use-deprecated=legacy-resolver -r "${WORKSPACE_DIR}"/doc/requirements-rtd.txt pip install --use-deprecated=legacy-resolver -r "${WORKSPACE_DIR}"/doc/requirements-doc.txt fi fi diff --git a/doc/requirements-rtd.txt b/doc/requirements-rtd.txt deleted file mode 100644 index 5d5b4e713754..000000000000 --- a/doc/requirements-rtd.txt +++ /dev/null @@ -1,2 +0,0 @@ -# CI requirements: this is the file buildkite needs. --r requirements-doc.txt \ No newline at end of file From 4d75df40abcc4a46248102377b1f34817705f6f1 Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Thu, 27 Apr 2023 17:24:45 -0700 Subject: [PATCH 137/424] [CI][Clean] Clean up file manager (#34803) * Remove one type of job manager Signed-off-by: Cuong Nguyen * Fix unittest Signed-off-by: Cuong Nguyen * Fix lints as well Signed-off-by: Cuong Nguyen --------- Signed-off-by: Cuong Nguyen --- .../file_manager/session_controller.py | 56 ------------------- release/ray_release/glue.py | 34 ++--------- release/ray_release/schema.json | 8 --- release/ray_release/tests/test_glue.py | 2 - release/release_tests.yaml | 4 -- 5 files changed, 4 insertions(+), 100 deletions(-) delete mode 100644 release/ray_release/file_manager/session_controller.py diff --git a/release/ray_release/file_manager/session_controller.py b/release/ray_release/file_manager/session_controller.py deleted file mode 100644 index 5aab4d5807fc..000000000000 --- a/release/ray_release/file_manager/session_controller.py +++ /dev/null @@ -1,56 +0,0 @@ -import os -from typing import TYPE_CHECKING, Optional - -from ray_release.cluster_manager.cluster_manager import ClusterManager -from ray_release.file_manager.file_manager import FileManager -from ray_release.logger import logger - -if TYPE_CHECKING: - from anyscale.controllers.session_controller import SessionController - - -class SessionControllerFileManager(FileManager): - def __init__( - self, - cluster_manager: ClusterManager, - session_controller: Optional["SessionController"] = None, - ): - from anyscale.controllers.session_controller import SessionController - - super(SessionControllerFileManager, self).__init__(cluster_manager) - self.session_controller = session_controller or SessionController() - - # Write legacy anyscale project yaml - with open(os.path.join(os.getcwd(), ".anyscale.yaml"), "wt") as f: - f.write(f"project_id: {self.cluster_manager.project_id}") - - def upload(self, source: Optional[str] = None, target: Optional[str] = None): - logger.info( - f"Uploading {source or ''} to {target or ''} " - f"using SessionController" - ) - - if source and os.path.isdir(source) and target: - # Add trailing slashes - source = os.path.join(source, "") - target = os.path.join(target, "") - - self.session_controller.push( - session_name=self.cluster_manager.cluster_name, - source=source, - target=target, - config=None, - all_nodes=False, - ) - - def download(self, source: str, target: str): - logger.info( - f"Downloading {source or ''} to {target or ''} " - f"using SessionController" - ) - self.session_controller.pull( - session_name=self.cluster_manager.cluster_name, - source=source, - target=target, - config=None, - ) diff --git a/release/ray_release/glue.py b/release/ray_release/glue.py index 46ef68f45ce5..016659626ec0 100644 --- a/release/ray_release/glue.py +++ b/release/ray_release/glue.py @@ -37,7 +37,6 @@ ClusterEnvCreateError, ) from ray_release.file_manager.job_file_manager import JobFileManager -from ray_release.file_manager.session_controller import SessionControllerFileManager from ray_release.logger import logger from ray_release.reporter.reporter import Reporter from ray_release.result import Result, handle_exception @@ -53,7 +52,6 @@ ) type_str_to_command_runner = { - "command": SDKRunner, "sdk_command": SDKRunner, "anyscale_job": AnyscaleJobRunner, } @@ -64,19 +62,6 @@ AnyscaleJobRunner: MinimalClusterManager, } -file_manager_str_to_file_manager = { - "sdk": SessionControllerFileManager, - "job": JobFileManager, - "anyscale_job": JobFileManager, -} - -command_runner_to_file_manager = { - SDKRunner: JobFileManager, # Use job file manager per default - JobRunner: JobFileManager, - AnyscaleJobRunner: JobFileManager, -} - - DEFAULT_RUN_TYPE = "anyscale_job" TIMEOUT_BUFFER_MINUTES = 15 @@ -139,20 +124,7 @@ def _load_test_configuration( ) cluster_manager_cls = command_runner_to_cluster_manager[command_runner_cls] - - file_manager_str = test["run"].get("file_manager", None) - if file_manager_str: - if file_manager_str not in file_manager_str_to_file_manager: - raise ReleaseTestConfigError( - f"Unknown file manager: {file_manager_str}. Must be one of " - f"{list(file_manager_str_to_file_manager.keys())}" - ) - file_manager_cls = file_manager_str_to_file_manager[file_manager_str] - else: - file_manager_cls = command_runner_to_file_manager[command_runner_cls] - logger.info(f"Got command runner cls: {command_runner_cls}") - logger.info(f"Got file manager cls: {file_manager_cls}") # Extra tags to be set on resources on cloud provider's side extra_tags = _get_extra_tags_from_env() # We don't need other attributes as they can be derived from the name @@ -169,9 +141,11 @@ def _load_test_configuration( anyscale_project, smoke_test=smoke_test, ) - file_manager = file_manager_cls(cluster_manager=cluster_manager) command_runner = command_runner_cls( - cluster_manager, file_manager, working_dir, artifact_path=artifact_path + cluster_manager, + JobFileManager(cluster_manager=cluster_manager), + working_dir, + artifact_path=artifact_path, ) except Exception as e: raise ReleaseTestSetupError(f"Error setting up release test: {e}") from e diff --git a/release/ray_release/schema.json b/release/ray_release/schema.json index a0e766d2fb39..979de3269b03 100644 --- a/release/ray_release/schema.json +++ b/release/ray_release/schema.json @@ -109,14 +109,6 @@ "anyscale_job" ] }, - "file_manager": { - "type": "string", - "enum": [ - "sdk", - "client", - "job" - ] - }, "wait_for_nodes": { "$ref": "#/definitions/WaitForNodes" }, diff --git a/release/ray_release/tests/test_glue.py b/release/ray_release/tests/test_glue.py index 1cf9cdcf1dd2..ae038d5cb79b 100644 --- a/release/ray_release/tests/test_glue.py +++ b/release/ray_release/tests/test_glue.py @@ -44,7 +44,6 @@ run_release_test, type_str_to_command_runner, command_runner_to_cluster_manager, - command_runner_to_file_manager, TIMEOUT_BUFFER_MINUTES, ) from ray_release.logger import logger @@ -161,7 +160,6 @@ def mock_alerter(test: Test, result: Result): type_str_to_command_runner["unit_test"] = MockCommandRunner command_runner_to_cluster_manager[MockCommandRunner] = MockClusterManager - command_runner_to_file_manager[MockCommandRunner] = MockFileManager self.test = Test( name="unit_test_end_to_end", diff --git a/release/release_tests.yaml b/release/release_tests.yaml index b3b9ebbb44da..5d04a91f090d 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -48,10 +48,6 @@ # # run the actual release test. # type: anyscale_job # -# # File manager to use to transfer files to and from the cluster. -# # Can be any of [sdk, job]. -# file_manager: job -# # # If you want to wait for nodes to be ready, you can specify this here: # wait_for_nodes: # # Number of nodes From da0d53aef1e2d3a7c24962028893b388e3058f3b Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Thu, 27 Apr 2023 17:25:46 -0700 Subject: [PATCH 138/424] [CI][Bisect] Check flakiness by rerun the same commits multiple times (#34721) * Move idx off boundary Signed-off-by: Cuong Nguyen * Rebase Signed-off-by: Cuong Nguyen * Support flaky rerun of a commit Signed-off-by: Cuong Nguyen * Add the ability to rerun the same commit to handle test flakiness Signed-off-by: Cuong Nguyen * testing Signed-off-by: Cuong Nguyen * Do not run sanity Signed-off-by: Cuong Nguyen * Testing Signed-off-by: Cuong Nguyen * Rebase Signed-off-by: Cuong Nguyen * Default flaky-rerun should be 1 Signed-off-by: Cuong Nguyen * Undo sanity comments Signed-off-by: Cuong Nguyen * Less indentation Signed-off-by: Cuong Nguyen * Update release/ray_release/scripts/ray_bisect.py Co-authored-by: Ricky Xu Signed-off-by: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> * Fix lints Signed-off-by: Cuong Nguyen * Change flaky_rerun to run_per_commit Signed-off-by: Cuong Nguyen --------- Signed-off-by: Cuong Nguyen Signed-off-by: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Co-authored-by: Ricky Xu --- release/ray_release/scripts/ray_bisect.py | 110 +++++++++++++--------- release/ray_release/tests/test_bisect.py | 57 ++++++++--- 2 files changed, 113 insertions(+), 54 deletions(-) diff --git a/release/ray_release/scripts/ray_bisect.py b/release/ray_release/scripts/ray_bisect.py index c8691cd4ad47..07b1a795664a 100644 --- a/release/ray_release/scripts/ray_bisect.py +++ b/release/ray_release/scripts/ray_bisect.py @@ -3,7 +3,7 @@ import os import json import time -from typing import Dict, List, Optional, Set +from typing import Dict, List, Set from ray_release.logger import logger from ray_release.buildkite.step import get_step from ray_release.config import ( @@ -27,11 +27,21 @@ "capacity, but reduce the bisect duration" ), ) +@click.option( + "--run-per-commit", + default=1, + type=int, + help=( + "The number of time we run test on the same commit, to account for test " + "flakiness. Commit passes only when it passes on all runs" + ), +) def main( test_name: str, passing_commit: str, failing_commit: str, - concurrency: Optional[int] = 1, + concurrency: int = 1, + run_per_commit: int = 1, ) -> None: if concurrency <= 0: raise ValueError( @@ -46,11 +56,16 @@ def main( ) return commit_lists = _get_commit_lists(passing_commit, failing_commit) - blamed_commit = _bisect(test, commit_lists, concurrency) + blamed_commit = _bisect(test, commit_lists, concurrency, run_per_commit) logger.info(f"Blamed commit found for test {test_name}: {blamed_commit}") -def _bisect(test: Test, commit_list: List[str], concurrency: int) -> str: +def _bisect( + test: Test, + commit_list: List[str], + concurrency: int, + run_per_commit: int, +) -> str: while len(commit_list) > 2: logger.info( f"Bisecting between {len(commit_list)} commits: " @@ -63,11 +78,13 @@ def _bisect(test: Test, commit_list: List[str], concurrency: int) -> str: # on the previously run revision idx = min(max(idx, 1), len(commit_list) - 2) idx_to_commit[idx] = commit_list[idx] - outcomes = _run_test(test, set(idx_to_commit.values())) + outcomes = _run_test(test, set(idx_to_commit.values()), run_per_commit) passing_idx = 0 failing_idx = len(commit_list) - 1 for idx, commit in idx_to_commit.items(): - is_passing = outcomes[commit] == "passed" + is_passing = all( + outcome == "passed" for outcome in outcomes[commit].values() + ) if is_passing and idx > passing_idx: passing_idx = idx if not is_passing and idx < failing_idx: @@ -92,58 +109,67 @@ def _sanity_check(test: Test, passing_revision: str, failing_revision: str) -> b ) -def _run_test(test: Test, commits: Set[str]) -> Dict[str, str]: +def _run_test(test: Test, commits: Set[str], run_per_commit: int) -> Dict[str, str]: logger.info(f'Running test {test["name"]} on commits {commits}') for commit in commits: - _trigger_test_run(test, commit) - return _obtain_test_result(commits) + _trigger_test_run(test, commit, run_per_commit) + return _obtain_test_result(commits, run_per_commit) -def _trigger_test_run(test: Test, commit: str) -> None: +def _trigger_test_run(test: Test, commit: str, run_per_commit: int) -> None: ray_wheels_url = find_and_wait_for_ray_wheels_url( commit, timeout=DEFAULT_WHEEL_WAIT_TIMEOUT, ) - step = get_step( - test, - ray_wheels=ray_wheels_url, - env={ - "RAY_COMMIT_OF_WHEEL": commit, - }, - ) - step["label"] = f'{test["name"]}:{commit[:7]}' - step["key"] = commit - pipeline = subprocess.Popen( - ["echo", json.dumps({"steps": [step]})], stdout=subprocess.PIPE - ) - subprocess.check_output( - ["buildkite-agent", "pipeline", "upload"], stdin=pipeline.stdout - ) - pipeline.stdout.close() + for run in range(run_per_commit): + step = get_step( + test, + ray_wheels=ray_wheels_url, + env={ + "RAY_COMMIT_OF_WHEEL": commit, + }, + ) + step["label"] = f'{test["name"]}:{commit[:7]}-{run}' + step["key"] = f"{commit}-{run}" + pipeline = subprocess.Popen( + ["echo", json.dumps({"steps": [step]})], stdout=subprocess.PIPE + ) + subprocess.check_output( + ["buildkite-agent", "pipeline", "upload"], stdin=pipeline.stdout + ) + pipeline.stdout.close() -def _obtain_test_result(buildkite_step_keys: List[str]) -> Dict[str, str]: +def _obtain_test_result(commits: Set[str], run_per_commit: int) -> Dict[str, str]: outcomes = {} wait = 5 total_wait = 0 while True: logger.info(f"... waiting for test result ...({total_wait} seconds)") - for key in buildkite_step_keys: - if key in outcomes: + for commit in commits: + if commit in outcomes and len(outcomes[commit]) == run_per_commit: continue - outcome = subprocess.check_output( - [ - "buildkite-agent", - "step", - "get", - "outcome", - "--step", - key, - ] - ).decode("utf-8") - if outcome: - outcomes[key] = outcome - if len(outcomes) == len(buildkite_step_keys): + for run in range(run_per_commit): + outcome = subprocess.check_output( + [ + "buildkite-agent", + "step", + "get", + "outcome", + "--step", + f"{commit}-{run}", + ] + ).decode("utf-8") + if not outcome: + continue + if commit not in outcomes: + outcomes[commit] = {} + outcomes[commit][run] = outcome + all_commit_finished = len(outcomes) == len(commits) + per_commit_finished = all( + len(outcome) == run_per_commit for outcome in outcomes.values() + ) + if all_commit_finished and per_commit_finished: break time.sleep(wait) total_wait = total_wait + wait diff --git a/release/ray_release/tests/test_bisect.py b/release/ray_release/tests/test_bisect.py index 5d363e807630..d067373f7c1c 100644 --- a/release/ray_release/tests/test_bisect.py +++ b/release/ray_release/tests/test_bisect.py @@ -1,31 +1,64 @@ from unittest import mock from typing import List, Dict -from ray_release.scripts.ray_bisect import _bisect +from ray_release.scripts.ray_bisect import _bisect, _obtain_test_result from ray_release.config import Test +def test_obtain_test_result(): + test_cases = [ + { + "c0": {0: "passed"}, + }, + { + "c0": {0: "passed", 1: "passed"}, + "c1": {0: "hard_failed", 1: "hard_failed"}, + }, + ] + + def _mock_check_output(input: List[str]) -> str: + commit, run = tuple(input[-1].split("-")) + return bytes(test_case[commit][int(run)], "utf-8") + + for test_case in test_cases: + with mock.patch( + "subprocess.check_output", + side_effect=_mock_check_output, + ): + commits = set(test_case.keys()) + rerun_per_commit = len(test_case[list(commits)[0]]) + _obtain_test_result(commits, rerun_per_commit) == test_case + + def test_bisect(): test_cases = { "c3": { - "c0": "passed", - "c1": "passed", - "c3": "hard_failed", - "c4": "soft_failed", + "c0": {0: "passed"}, + "c1": {0: "passed"}, + "c3": {0: "hard_failed"}, + "c4": {0: "soft_failed"}, }, "c1": { - "c0": "passed", - "c1": "hard_failed", - "c2": "hard_failed", + "c0": {0: "passed"}, + "c1": {0: "hard_failed"}, + "c2": {0: "hard_failed"}, }, "cc1": { - "cc0": "passed", - "cc1": "hard_failed", + "cc0": {0: "passed"}, + "cc1": {0: "hard_failed"}, + }, + "c2": { + "c0": {0: "passed", 1: "passed"}, + "c2": {0: "passed", 1: "hard_failed"}, + "c3": {0: "hard_failed", 1: "passed"}, + "c4": {0: "soft_failed", 1: "soft_failed"}, }, } for output, input in test_cases.items(): - def _mock_run_test(test: Test, commit: List[str]) -> Dict[str, str]: + def _mock_run_test( + test: Test, commit: List[str], rerun_per_commit + ) -> Dict[str, str]: return input with mock.patch( @@ -33,4 +66,4 @@ def _mock_run_test(test: Test, commit: List[str]) -> Dict[str, str]: side_effect=_mock_run_test, ): for concurreny in range(1, 4): - assert _bisect({}, list(input.keys()), concurreny) == output + assert _bisect({}, list(input.keys()), concurreny, 1) == output From 91d7ac53d92119812fce009aeaed1d714bff4c46 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Thu, 27 Apr 2023 17:28:41 -0700 Subject: [PATCH 139/424] [doc] [data] Rename "Creating Datastreams" -> "Loading data", etc. (#34837) --- doc/source/data/api/datastream.rst | 2 +- doc/source/data/consuming-datastreams.rst | 4 ++-- doc/source/data/creating-datastreams.rst | 2 +- doc/source/data/examples/nyc_taxi_basic_processing.ipynb | 2 +- doc/source/data/getting-started.rst | 8 ++++---- doc/source/data/key-concepts.rst | 4 ++-- doc/source/data/transforming-datastreams.rst | 2 +- doc/source/ray-air/computer-vision.rst | 4 ++-- doc/source/ray-overview/getting-started.md | 2 +- 9 files changed, 15 insertions(+), 15 deletions(-) diff --git a/doc/source/data/api/datastream.rst b/doc/source/data/api/datastream.rst index 1963ce142f2b..19f11588186d 100644 --- a/doc/source/data/api/datastream.rst +++ b/doc/source/data/api/datastream.rst @@ -77,7 +77,7 @@ Converting to Pipeline Datastream.repeat Datastream.window -Consuming Datastreams +Consuming Data --------------------- .. autosummary:: diff --git a/doc/source/data/consuming-datastreams.rst b/doc/source/data/consuming-datastreams.rst index 9921d7941ccf..ea80d784558e 100644 --- a/doc/source/data/consuming-datastreams.rst +++ b/doc/source/data/consuming-datastreams.rst @@ -1,7 +1,7 @@ .. _consuming_datastreams: ===================== -Consuming Datastreams +Consuming Data ===================== The data underlying a ``Datastream`` can be consumed in several ways: @@ -76,7 +76,7 @@ This is a common pattern useful for loading and sharding data between distribute .. _saving_datastreams: -Saving Datastreams +Saving Data ================== Datastreams can be written to local or remote storage in the desired data format. diff --git a/doc/source/data/creating-datastreams.rst b/doc/source/data/creating-datastreams.rst index 8c180dd3eded..882b8b5a97d2 100644 --- a/doc/source/data/creating-datastreams.rst +++ b/doc/source/data/creating-datastreams.rst @@ -1,7 +1,7 @@ .. _creating_datastreams: ==================== -Creating Datastreams +Loading Data ==================== :class:`Datastreams ` can be created from: diff --git a/doc/source/data/examples/nyc_taxi_basic_processing.ipynb b/doc/source/data/examples/nyc_taxi_basic_processing.ipynb index d31f303b2311..874f5fccf17e 100644 --- a/doc/source/data/examples/nyc_taxi_basic_processing.ipynb +++ b/doc/source/data/examples/nyc_taxi_basic_processing.ipynb @@ -591,7 +591,7 @@ "id": "0ade2a72", "metadata": {}, "source": [ - "See {ref}`Transforming Datastreams ` for more information on how we can process our data with Ray Data." + "See {ref}`Transforming Data ` for more information on how we can process our data with Ray Data." ] }, { diff --git a/doc/source/data/getting-started.rst b/doc/source/data/getting-started.rst index b769148b3adf..edba19771f7e 100644 --- a/doc/source/data/getting-started.rst +++ b/doc/source/data/getting-started.rst @@ -47,7 +47,7 @@ Ray reads from any `filesystem supported by Arrow To learn more about creating datastreams, read -:ref:`Creating datastreams `. +:ref:`Loading data `. Transform the datastream ------------------------ @@ -83,7 +83,7 @@ transform datastreams. Ray executes transformations in parallel for performance. To learn more about transforming datastreams, read -:ref:`Transforming datastreams `. +:ref:`Transforming data `. Consume the datastream ---------------------- @@ -140,7 +140,7 @@ Pass datastreams to Ray tasks or actors, and access records with methods like To learn more about consuming datastreams, read -:ref:`Consuming datastreams `. +:ref:`Consuming data `. Save the datastream ------------------- @@ -162,7 +162,7 @@ or remote filesystems. ['..._000000.parquet'] -To learn more about saving datastream contents, read :ref:`Saving datastreams `. +To learn more about saving datastream contents, read :ref:`Saving data `. Next Steps ---------- diff --git a/doc/source/data/key-concepts.rst b/doc/source/data/key-concepts.rst index 7980f3ba4665..625e88d08e19 100644 --- a/doc/source/data/key-concepts.rst +++ b/doc/source/data/key-concepts.rst @@ -39,7 +39,7 @@ Datastream uses Ray tasks to read data from remote storage in parallel. Each rea You can manually specify the number of read tasks, but the final parallelism is always capped by the number of files in the underlying datastream. -For an in-depth guide on creating datastreams, read :ref:`Creating Datastreams `. +For an in-depth guide on creating datastreams, read :ref:`Loading Data `. Transforming Data ================= @@ -56,7 +56,7 @@ pool of Ray actors. This allows you to cache expensive state initialization .. https://docs.google.com/drawings/d/12STHGV0meGWfdWyBlJMUgw7a-JcFPu9BwSOn5BjRw9k/edit -For an in-depth guide on transforming datastreams, read :ref:`Transforming Datastreams `. +For an in-depth guide on transforming datastreams, read :ref:`Transforming Data `. Shuffling Data ============== diff --git a/doc/source/data/transforming-datastreams.rst b/doc/source/data/transforming-datastreams.rst index 226ffcb7f60d..c7489ad7d62e 100644 --- a/doc/source/data/transforming-datastreams.rst +++ b/doc/source/data/transforming-datastreams.rst @@ -1,7 +1,7 @@ .. _transforming_datastreams: ======================== -Transforming Datastreams +Transforming Data ======================== Datastreams transformations take in datastreams and produce new datastreams. For example, *map_batches* diff --git a/doc/source/ray-air/computer-vision.rst b/doc/source/ray-air/computer-vision.rst index de89fd60b596..e6418c233583 100644 --- a/doc/source/ray-air/computer-vision.rst +++ b/doc/source/ray-air/computer-vision.rst @@ -116,7 +116,7 @@ Reading image data :dedent: -For more information on creating datastreams, see :ref:`Creating Datastreams `. +For more information on creating datastreams, see :ref:`Loading Data `. Transforming images @@ -157,7 +157,7 @@ standard way to preprocess data with Ray. For more information on transforming data, see :ref:`Using Preprocessors ` and -:ref:`Transforming Datastreams `. +:ref:`Transforming Data `. Training vision models ---------------------- diff --git a/doc/source/ray-overview/getting-started.md b/doc/source/ray-overview/getting-started.md index 3180513face8..b498e8648ae6 100644 --- a/doc/source/ray-overview/getting-started.md +++ b/doc/source/ray-overview/getting-started.md @@ -139,7 +139,7 @@ Use the trained model for batch prediction with a ``BatchPredictor``. Ray has a rich ecosystem of libraries and frameworks built on top of it. Simply click on the dropdowns below to see examples of our most popular libraries. -`````{dropdown} ray Data: Creating and Transforming Datastreams +`````{dropdown} ray Data: Distributed ML Preprocessing :animate: fade-in-slide-down Ray Data is the standard way to load and exchange data in Ray libraries and applications. From 898066f789bf44f33ef8a8e1617d1b390232142f Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Thu, 27 Apr 2023 19:00:21 -0700 Subject: [PATCH 140/424] [data] Fix hang in test_streaming_backpressure (#34843) --- python/ray/data/_internal/execution/streaming_executor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/ray/data/_internal/execution/streaming_executor.py b/python/ray/data/_internal/execution/streaming_executor.py index 9c3676eb65d6..02c360703441 100644 --- a/python/ray/data/_internal/execution/streaming_executor.py +++ b/python/ray/data/_internal/execution/streaming_executor.py @@ -70,7 +70,7 @@ def __init__(self, options: ExecutionOptions): self._output_node: Optional[OpState] = None Executor.__init__(self, options) - threading.Thread.__init__(self) + threading.Thread.__init__(self, daemon=True) def execute( self, dag: PhysicalOperator, initial_stats: Optional[DatastreamStats] = None From de8cb75696ce5e175c16e31fb1c2b13da469e0d0 Mon Sep 17 00:00:00 2001 From: Sihan Wang Date: Thu, 27 Apr 2023 21:12:42 -0700 Subject: [PATCH 141/424] [Serve] Fix the stats issue (#34853) --- python/ray/serve/_private/deployment_state.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/python/ray/serve/_private/deployment_state.py b/python/ray/serve/_private/deployment_state.py index c65ae1771bb6..697e4c5f8dc4 100644 --- a/python/ray/serve/_private/deployment_state.py +++ b/python/ray/serve/_private/deployment_state.py @@ -1631,7 +1631,12 @@ def _stop_replica(self, replica, graceful_stop=True): replica.stop(graceful=graceful_stop) self._replicas.add(ReplicaState.STOPPING, replica) self.health_check_gauge.set( - 0, tags={"deployment": self._name, "replica": replica.replica_tag} + 0, + tags={ + "deployment": self._name, + "replica": replica.replica_tag, + "application": self.app_name, + }, ) def _check_and_update_replicas(self) -> bool: From b184511aec602bc219c397cf0c63dcf68a71827f Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Fri, 28 Apr 2023 00:09:02 -0700 Subject: [PATCH 142/424] [data] [streaming] Autoshutdown executor threads when deleted (#34811) --- .../_internal/execution/streaming_executor.py | 20 +++++++++++++++---- python/ray/data/datastream.py | 4 ++++ .../data/tests/test_streaming_integration.py | 14 +++++++++++++ 3 files changed, 34 insertions(+), 4 deletions(-) diff --git a/python/ray/data/_internal/execution/streaming_executor.py b/python/ray/data/_internal/execution/streaming_executor.py index 02c360703441..2221983a6a95 100644 --- a/python/ray/data/_internal/execution/streaming_executor.py +++ b/python/ray/data/_internal/execution/streaming_executor.py @@ -41,6 +41,9 @@ # progress bar seeming to stall for very large scale workloads. PROGRESS_BAR_UPDATE_INTERVAL = 50 +# Visible for testing. +_num_shutdown = 0 + class StreamingExecutor(Executor, threading.Thread): """A streaming Datastream executor. @@ -80,6 +83,7 @@ def execute( We take an event-loop approach to scheduling. We block on the next scheduling event using `ray.wait`, updating operator state and dispatching new tasks. """ + self._initial_stats = initial_stats self._start_time = time.perf_counter() @@ -88,8 +92,9 @@ def execute( logger.get_logger().info("Execution config: %s", self._options) if not self._options.verbose_progress: logger.get_logger().info( - "Tip: To enable per-operator progress reporting, set " - "RAY_DATA_VERBOSE_PROGRESS=1." + "Tip: For detailed progress reporting, run " + "`ray.data.DataContext.get_current()." + "execution_options.verbose_progress = True`" ) # Setup the streaming DAG topology and start the runner thread. @@ -115,7 +120,10 @@ def get_next(self, output_split_idx: Optional[int] = None) -> RefBundle: # Translate the special sentinel values for MaybeRefBundle into # exceptions. if item is None: - raise StopIteration + if self._outer._shutdown: + raise StopIteration(f"{self._outer} is shutdown.") + else: + raise StopIteration elif isinstance(item, Exception): raise item else: @@ -132,9 +140,14 @@ def get_next(self, output_split_idx: Optional[int] = None) -> RefBundle: return StreamIterator(self) def shutdown(self): + context = DataContext.get_current() + global _num_shutdown + with self._shutdown_lock: + logger.get_logger().info(f"Shutting down {self}.") if self._shutdown: return + _num_shutdown += 1 self._shutdown = True # Give the scheduling loop some time to finish processing. self.join(timeout=2.0) @@ -143,7 +156,6 @@ def shutdown(self): stats_summary_string = self._final_stats.to_summary().to_string( include_parent=False ) - context = DataContext.get_current() logger.get_logger(log_to_stdout=context.enable_auto_log_stats).info( stats_summary_string, ) diff --git a/python/ray/data/datastream.py b/python/ray/data/datastream.py index 85ec35a11a2f..2d9ba8d48c41 100644 --- a/python/ray/data/datastream.py +++ b/python/ray/data/datastream.py @@ -4633,6 +4633,10 @@ def __setstate__(self, state): self._logical_plan = state["logical_plan"] self._current_executor = None + def __del__(self): + if self._current_executor and ray is not None and ray.is_initialized(): + self._current_executor.shutdown() + # Backwards compatibility alias. Dataset = Datastream diff --git a/python/ray/data/tests/test_streaming_integration.py b/python/ray/data/tests/test_streaming_integration.py index cc3758138de9..e7b37487d822 100644 --- a/python/ray/data/tests/test_streaming_integration.py +++ b/python/ray/data/tests/test_streaming_integration.py @@ -42,6 +42,20 @@ def ref_bundles_to_list(bundles: List[RefBundle]) -> List[List[Any]]: return output +def test_autoshutdown_dangling_executors(ray_start_10_cpus_shared): + from ray.data._internal.execution import streaming_executor + + initial = streaming_executor._num_shutdown + + for _ in range(5): + ds = ray.data.range(100) + it = ds.iter_batches(batch_size=None, prefetch_batches=0) + next(it) + + final = streaming_executor._num_shutdown - initial + assert final == 4 + + def test_pipelined_execution(ray_start_10_cpus_shared): executor = StreamingExecutor(ExecutionOptions(preserve_order=True)) inputs = make_ref_bundles([[x] for x in range(20)]) From 1baa61f374812449e6bed94aad140da06436ece4 Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Fri, 28 Apr 2023 01:40:48 -0700 Subject: [PATCH 143/424] [Docs] Add guide on how to write code snippets (#31252) Most examples in our documentation aren't tested. This PR adds a guide that explains how to write examples so that they're tested. --- doc/source/_toc.yml | 1 + .../ray-contribute/doc_code/example_module.py | 8 + doc/source/ray-contribute/docs.ipynb | 30 +- .../ray-contribute/writing-code-snippets.rst | 294 ++++++++++++++++++ 4 files changed, 306 insertions(+), 27 deletions(-) create mode 100644 doc/source/ray-contribute/doc_code/example_module.py create mode 100644 doc/source/ray-contribute/writing-code-snippets.rst diff --git a/doc/source/_toc.yml b/doc/source/_toc.yml index c9afcf13b8da..0ab8509935d2 100644 --- a/doc/source/_toc.yml +++ b/doc/source/_toc.yml @@ -396,6 +396,7 @@ parts: sections: - file: ray-contribute/development - file: ray-contribute/docs + - file: ray-contribute/writing-code-snippets - file: ray-contribute/fake-autoscaler - file: ray-core/examples/testing-tips - file: ray-core/configure diff --git a/doc/source/ray-contribute/doc_code/example_module.py b/doc/source/ray-contribute/doc_code/example_module.py new file mode 100644 index 000000000000..cba47448a945 --- /dev/null +++ b/doc/source/ray-contribute/doc_code/example_module.py @@ -0,0 +1,8 @@ +# example_module.py + +# fmt: off +# __is_even_begin__ +def is_even(x): + return (x % 2) == 0 +# __is_even_end__ +# fmt: on diff --git a/doc/source/ray-contribute/docs.ipynb b/doc/source/ray-contribute/docs.ipynb index d7636a73c082..7588db4ea287 100644 --- a/doc/source/ray-contribute/docs.ipynb +++ b/doc/source/ray-contribute/docs.ipynb @@ -156,38 +156,14 @@ "For example, in the above `autofunction` call, to change the API reference for `ray.tune.integration.docker.DockerSyncer`,\n", "you would have to [change the following source file](https://github.com/ray-project/ray/blob/7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065/python/ray/tune/integration/docker.py#L15-L38).\n", "\n", - "To show the usage of APIs, it is important to have small usage examples embedded in the API documentation. These should be self-contained and run out of the box, so a user can copy and paste them into a Python interpreter and play around with them (e.g., if applicable, they should point to example data). Users often rely on these examples to build their applications. You can use the Sphinx `testcode` primitive to embed\n", - "such examples into the `Examples:` section of the docstrings. For an example look [here](https://github.com/ray-project/ray/blob/5e61fb51400bc712449e85a7476fa4fca80f3b41/python/ray/train/torch/torch_predictor.py#L173-L221)\n", - "\n", - "These code snippets will be tested in the CI to make sure they keep working in the future and updated if there are changes to the APIs. You can use the `testoutput` primitive to specify the expected output of the code snippet, and the CI will check the output and give an error if they don't match.\n", - "\n", - "To run the doctests locally, run\n", - "\n", - "```shell\n", - "RAY_MOCK_MODULES=0 make doctest\n", - "```\n", - "\n", - "in the `ray/doc` directory.\n", + "To show the usage of APIs, it is important to have small usage examples embedded in the API documentation. These should be self-contained and run out of the box, so a user can copy and paste them into a Python interpreter and play around with them (e.g., if applicable, they should point to example data). Users often rely on these examples to build their applications. To learn more about writing examples, read [How to write code snippets](writing-code-snippets).\n", "\n", "## Adding code to an `.rST` or `.md` file\n", "\n", "Modifying text in an existing documentation file is easy, but you need to be careful when it comes to adding code.\n", "The reason is that we want to ensure every code snippet on our documentation is tested.\n", - "This requires us to have a process for including and testing code snippets in documents.\n", - "\n", - "In an `.rST` or `.md` file, you can add code snippets using `literalinclude` from the Sphinx system.\n", - "For instance, here's an example from the Tune's \"Key Concepts\" documentation: \n", - "\n", - "```markdown\n", - ".. literalinclude:: doc_code/key_concepts.py\n", - " :language: python\n", - " :start-after: __function_api_start__\n", - " :end-before: __function_api_end__\n", - "```\n", - "\n", - "Note that in the whole file there's not a single literal code block, code _has to be_ imported using the `literalinclude` directive.\n", - "The code that gets added to the document by `literalinclude`, including `start-after` and `end-before` tags,\n", - "reads as follows:" + "This requires us to have a process for including and testing code snippets in documents. To learn how to write testable code \n", + "snippets, read [How to write code snippets](writing-code-snippets).\n" ] }, { diff --git a/doc/source/ray-contribute/writing-code-snippets.rst b/doc/source/ray-contribute/writing-code-snippets.rst new file mode 100644 index 000000000000..dd579e7f3434 --- /dev/null +++ b/doc/source/ray-contribute/writing-code-snippets.rst @@ -0,0 +1,294 @@ +.. _writing-code-snippets: + +========================== +How to write code snippets +========================== + +Users learn from example. So, whether you're writing a docstring or a user guide, +include examples that illustrate the relevant APIs. Your examples should run +out-of-the-box so that users can copy them and adapt them to their own needs. + +This page describes how to write code snippets so that they're tested in CI. + +.. note:: + The examples in this guide use reStructuredText. If you're writing + Markdown, use MyST syntax. To learn more, read the + `MyST documentation `_. + +----------------- +Types of examples +----------------- + +There are three types of examples: *doctest-style*, *code-output-style*, and *literalinclude*. + +*doctest-style* examples +======================== + +*doctest-style* examples mimic interactive Python sessions. :: + + .. doctest:: + + >>> def is_even(x): + ... return (x % 2) == 0 + >>> is_even(0) + True + >>> is_even(1) + False + +They're rendered like this: + +.. doctest:: + + >>> def is_even(x): + ... return (x % 2) == 0 + >>> is_even(0) + True + >>> is_even(1) + False + +.. tip:: + + If you're writing docstrings, exclude `.. doctest::` to simplify your code. :: + + Example: + >>> def is_even(x): + ... return (x % 2) == 0 + >>> is_even(0) + True + >>> is_even(1) + False + +*code-output-style* examples +============================ + +*code-output-style* examples contain ordinary Python code. :: + + .. testcode:: + + def is_even(x): + return (x % 2) == 0 + + print(is_even(0)) + print(is_even(1)) + + .. testoutput:: + + True + False + +They're rendered like this: + +.. testcode:: + + def is_even(x): + return (x % 2) == 0 + + print(is_even(0)) + print(is_even(1)) + +.. testoutput:: + + True + False + +*literalinclude* examples +========================= + +*literalinclude* examples display Python modules. :: + + .. literalinclude:: ./doc_code/example_module.py + :language: python + :start-after: __is_even_begin__ + :end-before: __is_even_end__ + +.. literalinclude:: ./doc_code/example_module.py + :language: python + +They're rendered like this: + +.. literalinclude:: ./doc_code/example_module.py + :language: python + :start-after: __is_even_begin__ + :end-before: __is_even_end__ + +--------------------------------------- +Which type of example should you write? +--------------------------------------- + +There's no hard rule about which style you should use. Choose the style that best +illustrates your API. + +.. tip:: + If you're not sure which style to use, use *code-block-style*. + +When to use *doctest-style* +=========================== + +If you're writing a small example that emphasizes object representations, or if you +want to print intermediate objects, use *doctest-style*. :: + + .. doctest:: + + >>> import ray + >>> ds = ray.data.range(100) + >>> ds.schema() + + >>> ds.take(5) + [0, 1, 2, 3, 4] + +When to use *code-block-style* +============================== + +If you're writing a longer example, or if object representations aren't relevant to your example, use *code-block-style*. :: + + .. testcode:: + + import pandas as pd + import ray + from ray.train.batch_predictor import BatchPredictor + + def calculate_accuracy(df): + return pd.DataFrame({"correct": df["preds"] == df["label"]}) + + # Create a batch predictor that returns identity as the predictions. + batch_pred = BatchPredictor.from_pandas_udf( + lambda data: pd.DataFrame({"preds": data["feature_1"]})) + + # Create a dummy dataset. + ds = ray.data.from_pandas(pd.DataFrame({ + "feature_1": [1, 2, 3], "label": [1, 2, 3]})) + + # Execute batch prediction using this predictor. + predictions = batch_pred.predict(ds, + feature_columns=["feature_1"], keep_columns=["label"]) + + # Calculate final accuracy + correct = predictions.map_batches(calculate_accuracy) + print(f"Final accuracy: {correct.sum(on='correct') / correct.count()}") + + .. testoutput:: + + Final accuracy: 1.0 + +When to use *literalinclude* +============================ +If you're writing an end-to-end examples and your examples doesn't contain outputs, use +*literalinclude*. + +----------------------------------- +How to handle hard-to-test examples +----------------------------------- + +When is it okay to not test an example? +======================================= + +You don't need to test examples that require GPUs, or examples that depend on external +systems like Weights and Biases. + +Skipping *doctest-style* examples +================================= + +To skip a *doctest-style* example, append `# doctest: +SKIP` to your Python code. :: + + .. doctest:: + + >>> import ray + >>> ray.data.read_images("s3://private-bucket") # doctest: +SKIP + +Skipping *code-block-style* examples +==================================== + +To skip a *code-block-style* example, add `:skipif: True` to the `testoutput` block. :: + + .. testcode:: + :skipif: True + + from ray.air.integrations.wandb import WandbLoggerCallback + callback = WandbLoggerCallback( + project="Optimization_Project", + api_key_file=..., + log_config=True + ) + +---------------------------------------------- +How to handle long or non-determnistic outputs +---------------------------------------------- + +If your Python code is non-deterministic, or if your output is excessively long, you may want to skip all or part of an output. + +Ignoring *doctest-style* outputs +================================ + +To ignore parts of a *doctest-style* output, append `# doctest: +ELLIPSIS` to your Python code and replace problematic sections with ellipsis. :: + + .. doctest:: + + >>> import ray + >>> ray.data.read_images("s3://anonymous@air-example-data-2/imagenet-sample-images") # doctest: +ELLIPSIS + Datastream( + num_blocks=..., + num_rows=..., + schema={image: numpy.ndarray(shape=..., dtype=uint8)} + ) + +To ignore an output altogether, write a *code-block-style* snippet. Don't use `# doctest: +SKIP`. + +Ignoring *code-block-style* outputs +=================================== + +To ignore parts of a *code-block-style* output, add `:options: +ELLIPSIS` to the `testoutput` block and replace problematic sections with ellipsis. :: + + .. testcode:: + + import ray + ds = ray.data.read_images("s3://anonymous@air-example-data-2/imagenet-sample-images") + print(ds) + + .. testoutput:: + :options: +ELLIPSIS + + Datastream( + num_blocks=..., + num_rows=..., + schema={image: numpy.ndarray(shape=..., dtype=uint8)} + ) + +To ignore an output altogether, replace the output with a single elipsis. :: + + .. testoutput:: + :hide: + :options: +ELLIPSIS + + ... + +-------------------- +How to test examples +-------------------- + +Testing specific examples +========================= + +To test specific examples, install `pytest-sphinx`. + +.. code-block:: bash + + pip install pytest-sphinx + +Then, run pytest on a module, docstring, or user guide. + +.. code-block:: bash + + pytest --doctest-modules python/ray/data/read_api.py + pytest --doctest-modules python/ray/data/read_api.py::ray.data.read_api.range + pytest --doctest-modules doc/source/data/getting-started.rst + +Testing all examples +==================== + +To test all code snippets, run + +.. code-block:: bash + + RAY_MOCK_MODULES=0 make doctest + +in the `ray/doc` directory. \ No newline at end of file From 94b49385f53451b338df696a78f9f3edea20a943 Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Fri, 28 Apr 2023 14:12:32 +0100 Subject: [PATCH 144/424] [docker/dependencies] Upload frozen pip environment as artifact (#34820) To surface dependencies, this PR extracts the python environment information from built docker images and uploads them as buildkite artifacts. Signed-off-by: Kai Fricke --- ci/build/build-docker-images.py | 64 ++++++++++++++++++++++++++++++--- docker/ray-ml/Dockerfile | 2 ++ docker/ray/Dockerfile | 2 ++ 3 files changed, 63 insertions(+), 5 deletions(-) diff --git a/ci/build/build-docker-images.py b/ci/build/build-docker-images.py index ead5f67c39bf..fdd0d3979d48 100644 --- a/ci/build/build-docker-images.py +++ b/ci/build/build-docker-images.py @@ -1,4 +1,5 @@ import datetime +import io import json import functools import glob @@ -9,8 +10,10 @@ import shutil import subprocess import sys +import tarfile from collections import defaultdict -from typing import List, Optional, Tuple +from pathlib import Path +from typing import List, Optional, Tuple, Dict import click import docker @@ -296,6 +299,43 @@ def _build_docker_image( break print("BUILT: ", tagged_name) + return tagged_name + + +def _extract_files_from_docker(docker_image: str, files: Dict[str, str]): + """Extract files from docker container image and save to local disk. + + ``files`` is a dict mapping from paths inside the docker container to + local paths on the host system. + """ + # Create container + container = DOCKER_CLIENT.containers.create(docker_image) + for container_path, local_path in files.items(): + # Get tar stream of file + stream, stat = container.get_archive(f"{container_path}") + # Create local directory containing target file + local_path = Path(local_path) + local_path.parent.mkdir(exist_ok=True) + # Read tar stream into bytes IO + with tarfile.open(fileobj=io.BytesIO(b"".join(d for d in stream))) as tar: + # Extract file from tar archive into local path + with open(local_path, "wb") as f: + for r in tar.extractfile(os.path.basename(container_path)): + f.write(r) + container.remove() + + +def extract_image_infos(images: List[str], target_dir: str): + for image in images: + image_basename = image.replace("rayproject/", "") + _extract_files_from_docker( + image, + { + "/home/ray/pip-freeze.txt": ( + f"{target_dir}/{image_basename}_" f"pip-freeze.txt" + ) + }, + ) def copy_wheels(human_build): @@ -330,17 +370,22 @@ def check_staleness(repository, tag): return is_stale -def build_for_all_versions(image_name, py_versions, image_types, suffix, **kwargs): +def build_for_all_versions( + image_name, py_versions, image_types, suffix, **kwargs +) -> List[str]: """Builds the given Docker image for all Python & CUDA versions""" + tagged_names = [] for py_version in py_versions: for image_type in image_types: - _build_docker_image( + tagged_name = _build_docker_image( image_name, py_version=py_version, image_type=image_type, suffix=suffix, **kwargs, ) + tagged_names.append(tagged_name) + return tagged_names def build_base_images(py_versions, image_types, suffix): @@ -834,7 +879,11 @@ def main( # TODO Currently don't push ray_worker_container else: # Build Ray Docker images. - build_for_all_versions("ray", py_versions, image_types, suffix=suffix) + all_tagged_images = [] + + all_tagged_images += build_for_all_versions( + "ray", py_versions, image_types, suffix=suffix + ) # List of images to tag and push to docker hub images_to_tag_and_push = [] @@ -858,7 +907,7 @@ def main( if len(ml_image_types) > 0: prep_ray_ml() - build_for_all_versions( + all_tagged_images += build_for_all_versions( "ray-ml", py_versions, image_types=ml_image_types, @@ -866,6 +915,11 @@ def main( ) images_to_tag_and_push += ["ray-ml"] + if is_buildkite: + extract_image_infos( + all_tagged_images, target_dir="/artifact-mount/.image-info" + ) + if build_type in {MERGE, PR}: valid_branch = _valid_branch() if (not valid_branch) and is_merge: diff --git a/docker/ray-ml/Dockerfile b/docker/ray-ml/Dockerfile index 18454405bd52..e1bbcd8e4243 100644 --- a/docker/ray-ml/Dockerfile +++ b/docker/ray-ml/Dockerfile @@ -40,5 +40,7 @@ RUN sudo apt-get update \ requirements*.txt \ && sudo apt-get clean +RUN $HOME/anaconda3/bin/pip freeze > /home/ray/pip-freeze.txt + # Make sure tfp is installed correctly and matches tf version. RUN python -c "import tensorflow_probability" diff --git a/docker/ray/Dockerfile b/docker/ray/Dockerfile index ad6253b0da89..40606488dbc4 100644 --- a/docker/ray/Dockerfile +++ b/docker/ray/Dockerfile @@ -9,3 +9,5 @@ COPY $WHEEL_PATH . COPY $FIND_LINKS_PATH $FIND_LINKS_PATH RUN $HOME/anaconda3/bin/pip --no-cache-dir install `basename $WHEEL_PATH`[all] \ --find-links $FIND_LINKS_PATH && sudo rm `basename $WHEEL_PATH` + +RUN $HOME/anaconda3/bin/pip freeze > /home/ray/pip-freeze.txt From 10405aa51ceff5a1656df87ba0290b33079c069e Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Fri, 28 Apr 2023 14:13:04 +0100 Subject: [PATCH 145/424] [air/execution] Fix new execution backend for BOHB (#34828) The new execution backend did not work with BOHB. The reason was a faulty check when we start actors. We should not eagerly start paused trials, as the scheduler may want to keep them paused for synchronous training. However, a leftover code piece allowed for trials that were once pending but then paused to be started. This PR fixes the bug by requiring trials to be strictly pending, and also makes sure that the `trial_to_run` is set to PENDING when chosen by the scheduler. BOHB tests are now enabled for the new execution backend. Signed-off-by: Kai Fricke --- python/ray/tune/execution/tune_controller.py | 5 +++-- python/ray/tune/tests/test_trial_scheduler.py | 4 ---- python/ray/tune/tests/test_tune_restore_warm_start.py | 4 ---- 3 files changed, 3 insertions(+), 10 deletions(-) diff --git a/python/ray/tune/execution/tune_controller.py b/python/ray/tune/execution/tune_controller.py index f7f7051a616b..7ae7c6986d61 100644 --- a/python/ray/tune/execution/tune_controller.py +++ b/python/ray/tune/execution/tune_controller.py @@ -437,6 +437,7 @@ def _maybe_add_actors(self) -> None: and trial_to_run not in self._trial_to_actor ): logger.debug(f"Staging trial to run: {trial_to_run}") + self._set_trial_status(trial_to_run, Trial.PENDING) self._staged_trials.add(trial_to_run) self._actor_cache.increase_max(trial_to_run.placement_group_factory) # schedule_trial_actor also potentially uses cached actors @@ -460,7 +461,7 @@ def _maybe_add_actors(candidates: List[Trial]): # If the trial is part of the list, but not of the set, # we just ignore it. Removing it from the list on status # change is too expensive. - if trial not in (self._pending_trials | self._paused_trials): + if trial not in self._pending_trials: continue if trial in self._trial_to_actor: @@ -541,7 +542,7 @@ def _schedule_trial_actor(self, trial: Trial): """ logger.debug(f"Trying to schedule new ACTOR for trial {trial}") - self._set_trial_status(trial, Trial.PENDING) + assert trial.status == Trial.PENDING trial.init_logdir() # We checkpoint metadata here to try mitigating logdir duplication diff --git a/python/ray/tune/tests/test_trial_scheduler.py b/python/ray/tune/tests/test_trial_scheduler.py index 102b668277d3..79e455144310 100644 --- a/python/ray/tune/tests/test_trial_scheduler.py +++ b/python/ray/tune/tests/test_trial_scheduler.py @@ -807,10 +807,6 @@ def result(score, ts): [t.status for t in trials], [Trial.PAUSED, Trial.PENDING, Trial.PAUSED] ) - @pytest.mark.skipif( - os.environ.get("TUNE_NEW_EXECUTION") == "1", - reason="BOHB does not currently work with the new execution backend.", - ) def testNonstopBOHB(self): from ray.tune.search.bohb import TuneBOHB diff --git a/python/ray/tune/tests/test_tune_restore_warm_start.py b/python/ray/tune/tests/test_tune_restore_warm_start.py index 9f9a402e1791..dcaeb8464ec3 100644 --- a/python/ray/tune/tests/test_tune_restore_warm_start.py +++ b/python/ray/tune/tests/test_tune_restore_warm_start.py @@ -470,10 +470,6 @@ def cost(space, reporter): return search_alg, cost -@pytest.mark.skipif( - os.environ.get("TUNE_NEW_EXECUTION") == "1", - reason="BOHB does not currently work with the new execution backend.", -) class BOHBWarmStartTest(AbstractWarmStartTest, unittest.TestCase): def set_basic_conf(self): space = {"width": tune.uniform(0, 20), "height": tune.uniform(-100, 100)} From e3bd018b6df96f52013f3d1e9d5fdd540049d6fa Mon Sep 17 00:00:00 2001 From: Sven Mika Date: Fri, 28 Apr 2023 16:50:37 +0200 Subject: [PATCH 146/424] [RLlib] Fix double '::' in RLlib release test yaml files. (#34865) --- .../todo_tests_currently_not_covered.yaml | 4 ++-- .../yaml_files/a2c/a2c-breakout-v5.yaml | 2 +- .../a3c/a3c-pongdeterministic-v5.yaml | 2 +- .../apex/apex-breakoutnoframeskip-v5.yaml | 2 +- .../appo/appo-pongnoframeskip-v5.yaml | 2 +- .../yaml_files/bc/bc-halfcheetah-v4.yaml | 2 +- .../yaml_files/cql/cql-halfcheetah-v4.yaml | 2 +- .../yaml_files/ddpg/ddpg-hopper-v4.yaml | 2 +- .../dqn/dqn-breakoutnoframeskip-v5.yaml | 2 +- .../yaml_files/es/es-humanoid-v4.yaml | 2 +- .../impala/impala-breakoutnoframeskip-v5.yaml | 2 +- .../marwil/marwil-halfcheetah-v4.yaml | 2 +- .../ppo/tf/ppo-breakoutnoframeskip-v5-tf.yaml | 2 +- .../ppo-breakoutnoframeskip-v5-torch.yaml | 2 +- .../yaml_files/sac/sac-halfcheetah-v4.yaml | 2 +- .../slateq-interest-evolution-recsim-env.yaml | 2 +- .../yaml_files/td3/td3-halfcheetah-v4.yaml | 2 +- .../multi_gpu_learning_tests.yaml | 20 +++++++++---------- ...lti_gpu_with_attention_learning_tests.yaml | 12 +++++------ .../multi_gpu_with_lstm_learning_tests.yaml | 14 ++++++------- 20 files changed, 41 insertions(+), 41 deletions(-) diff --git a/release/rllib_tests/learning_tests/todo_tests_currently_not_covered.yaml b/release/rllib_tests/learning_tests/todo_tests_currently_not_covered.yaml index b8f75379547b..f769c8fd07d5 100644 --- a/release/rllib_tests/learning_tests/todo_tests_currently_not_covered.yaml +++ b/release/rllib_tests/learning_tests/todo_tests_currently_not_covered.yaml @@ -5,7 +5,7 @@ # run: ARS # # Minimum reward and total ts (in given time_total_s) to pass this test. # pass_criteria: -# sampler_results/episode_reward_mean:: 100.0 +# sampler_results/episode_reward_mean: 100.0 # timesteps_total: 2000000 # stop: # time_total_s: 2000 @@ -29,7 +29,7 @@ # run: DDPPO # # Minimum reward and total ts (in given time_total_s) to pass this test. # pass_criteria: -# sampler_results/episode_reward_mean:: 50.0 +# sampler_results/episode_reward_mean: 50.0 # timesteps_total: 10000000 # stop: # time_total_s: 3600 diff --git a/release/rllib_tests/learning_tests/yaml_files/a2c/a2c-breakout-v5.yaml b/release/rllib_tests/learning_tests/yaml_files/a2c/a2c-breakout-v5.yaml index be28e4aee400..c38c9f8fffb0 100644 --- a/release/rllib_tests/learning_tests/yaml_files/a2c/a2c-breakout-v5.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/a2c/a2c-breakout-v5.yaml @@ -3,7 +3,7 @@ a2c-breakoutnoframeskip-v5: run: A2C # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 50.0 + sampler_results/episode_reward_mean: 50.0 timesteps_total: 5000000 stop: time_total_s: 7200 diff --git a/release/rllib_tests/learning_tests/yaml_files/a3c/a3c-pongdeterministic-v5.yaml b/release/rllib_tests/learning_tests/yaml_files/a3c/a3c-pongdeterministic-v5.yaml index 9918de78a74f..3ea52a704525 100644 --- a/release/rllib_tests/learning_tests/yaml_files/a3c/a3c-pongdeterministic-v5.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/a3c/a3c-pongdeterministic-v5.yaml @@ -3,7 +3,7 @@ a3c-pongdeterministic-v5: run: A3C # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 18.0 + sampler_results/episode_reward_mean: 18.0 timesteps_total: 5000000 stop: time_total_s: 3600 diff --git a/release/rllib_tests/learning_tests/yaml_files/apex/apex-breakoutnoframeskip-v5.yaml b/release/rllib_tests/learning_tests/yaml_files/apex/apex-breakoutnoframeskip-v5.yaml index e277cb364eda..81c8fdd20e48 100644 --- a/release/rllib_tests/learning_tests/yaml_files/apex/apex-breakoutnoframeskip-v5.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/apex/apex-breakoutnoframeskip-v5.yaml @@ -3,7 +3,7 @@ apex-breakoutnoframeskip-v5: run: APEX # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 100.0 + sampler_results/episode_reward_mean: 100.0 timesteps_total: 12000000 stop: time_total_s: 7200 diff --git a/release/rllib_tests/learning_tests/yaml_files/appo/appo-pongnoframeskip-v5.yaml b/release/rllib_tests/learning_tests/yaml_files/appo/appo-pongnoframeskip-v5.yaml index 4b25f5e105e9..9b5e5a84f9bc 100644 --- a/release/rllib_tests/learning_tests/yaml_files/appo/appo-pongnoframeskip-v5.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/appo/appo-pongnoframeskip-v5.yaml @@ -3,7 +3,7 @@ appo-pongnoframeskip-v5: run: APPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 18.0 + sampler_results/episode_reward_mean: 18.0 timesteps_total: 5000000 stop: time_total_s: 1800 diff --git a/release/rllib_tests/learning_tests/yaml_files/bc/bc-halfcheetah-v4.yaml b/release/rllib_tests/learning_tests/yaml_files/bc/bc-halfcheetah-v4.yaml index 04c9b7bb6f22..199022e32d99 100644 --- a/release/rllib_tests/learning_tests/yaml_files/bc/bc-halfcheetah-v4.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/bc/bc-halfcheetah-v4.yaml @@ -2,7 +2,7 @@ bc-halfcheetah-v0: env: HalfCheetah-v4 run: BC pass_criteria: - evaluation/sampler_results/episode_reward_mean:: 400.0 + evaluation/sampler_results/episode_reward_mean: 400.0 timesteps_total: 2500000 stop: time_total_s: 1800 diff --git a/release/rllib_tests/learning_tests/yaml_files/cql/cql-halfcheetah-v4.yaml b/release/rllib_tests/learning_tests/yaml_files/cql/cql-halfcheetah-v4.yaml index 55ad047ffc73..32b7299b9f7f 100644 --- a/release/rllib_tests/learning_tests/yaml_files/cql/cql-halfcheetah-v4.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/cql/cql-halfcheetah-v4.yaml @@ -2,7 +2,7 @@ cql-halfcheetah-v4: env: HalfCheetah-v4 run: CQL pass_criteria: - evaluation/sampler_results/episode_reward_mean:: 400.0 + evaluation/sampler_results/episode_reward_mean: 400.0 # Can not check throughput for offline methods. timesteps_total: 5000000 stop: diff --git a/release/rllib_tests/learning_tests/yaml_files/ddpg/ddpg-hopper-v4.yaml b/release/rllib_tests/learning_tests/yaml_files/ddpg/ddpg-hopper-v4.yaml index c40ce18dd007..17149db121b4 100644 --- a/release/rllib_tests/learning_tests/yaml_files/ddpg/ddpg-hopper-v4.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/ddpg/ddpg-hopper-v4.yaml @@ -3,7 +3,7 @@ ddpg-hopper-v4: run: DDPG # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 110.0 + sampler_results/episode_reward_mean: 110.0 timesteps_total: 50000 stop: time_total_s: 1800 diff --git a/release/rllib_tests/learning_tests/yaml_files/dqn/dqn-breakoutnoframeskip-v5.yaml b/release/rllib_tests/learning_tests/yaml_files/dqn/dqn-breakoutnoframeskip-v5.yaml index 2662838c8611..2da9c8ac89cc 100644 --- a/release/rllib_tests/learning_tests/yaml_files/dqn/dqn-breakoutnoframeskip-v5.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/dqn/dqn-breakoutnoframeskip-v5.yaml @@ -3,7 +3,7 @@ dqn-breakoutnoframeskip-v5: run: DQN # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 20.0 + sampler_results/episode_reward_mean: 20.0 timesteps_total: 400000 stop: time_total_s: 7200 diff --git a/release/rllib_tests/learning_tests/yaml_files/es/es-humanoid-v4.yaml b/release/rllib_tests/learning_tests/yaml_files/es/es-humanoid-v4.yaml index 94262af242c8..90825f64217f 100644 --- a/release/rllib_tests/learning_tests/yaml_files/es/es-humanoid-v4.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/es/es-humanoid-v4.yaml @@ -3,7 +3,7 @@ es-humanoid-v4: run: ES # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 100.0 + sampler_results/episode_reward_mean: 100.0 timesteps_total: 5000000 stop: time_total_s: 3600 diff --git a/release/rllib_tests/learning_tests/yaml_files/impala/impala-breakoutnoframeskip-v5.yaml b/release/rllib_tests/learning_tests/yaml_files/impala/impala-breakoutnoframeskip-v5.yaml index ef9a408d630d..2a12ca052256 100644 --- a/release/rllib_tests/learning_tests/yaml_files/impala/impala-breakoutnoframeskip-v5.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/impala/impala-breakoutnoframeskip-v5.yaml @@ -3,7 +3,7 @@ impala-breakoutnoframeskip-v5: run: IMPALA # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 200.0 + sampler_results/episode_reward_mean: 200.0 timesteps_total: 6000000 stop: time_total_s: 2400 diff --git a/release/rllib_tests/learning_tests/yaml_files/marwil/marwil-halfcheetah-v4.yaml b/release/rllib_tests/learning_tests/yaml_files/marwil/marwil-halfcheetah-v4.yaml index 5bfc11256d93..59ff10051cfb 100644 --- a/release/rllib_tests/learning_tests/yaml_files/marwil/marwil-halfcheetah-v4.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/marwil/marwil-halfcheetah-v4.yaml @@ -3,7 +3,7 @@ marwil-halfcheetah-v4: run: MARWIL pass_criteria: # Can not check throughput for offline methods. - evaluation/sampler_results/episode_reward_mean:: 400.0 + evaluation/sampler_results/episode_reward_mean: 400.0 timesteps_total: 2500000 stop: time_total_s: 1800 diff --git a/release/rllib_tests/learning_tests/yaml_files/ppo/tf/ppo-breakoutnoframeskip-v5-tf.yaml b/release/rllib_tests/learning_tests/yaml_files/ppo/tf/ppo-breakoutnoframeskip-v5-tf.yaml index 8bb51cd0ff95..175fb47f3ccc 100644 --- a/release/rllib_tests/learning_tests/yaml_files/ppo/tf/ppo-breakoutnoframeskip-v5-tf.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/ppo/tf/ppo-breakoutnoframeskip-v5-tf.yaml @@ -3,7 +3,7 @@ ppo-breakoutnoframeskip-v5: run: PPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 50.0 + sampler_results/episode_reward_mean: 50.0 timesteps_total: 7000000 stop: time_total_s: 3600 diff --git a/release/rllib_tests/learning_tests/yaml_files/ppo/torch/ppo-breakoutnoframeskip-v5-torch.yaml b/release/rllib_tests/learning_tests/yaml_files/ppo/torch/ppo-breakoutnoframeskip-v5-torch.yaml index e2b2a43604b8..22e0d3826ee9 100644 --- a/release/rllib_tests/learning_tests/yaml_files/ppo/torch/ppo-breakoutnoframeskip-v5-torch.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/ppo/torch/ppo-breakoutnoframeskip-v5-torch.yaml @@ -3,7 +3,7 @@ ppo-breakoutnoframeskip-v5: run: PPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 50.0 + sampler_results/episode_reward_mean: 50.0 timesteps_total: 7000000 stop: # This is double the time we use for tf because of 2x throughput there. diff --git a/release/rllib_tests/learning_tests/yaml_files/sac/sac-halfcheetah-v4.yaml b/release/rllib_tests/learning_tests/yaml_files/sac/sac-halfcheetah-v4.yaml index dd57dcd79e59..979bda086a3d 100644 --- a/release/rllib_tests/learning_tests/yaml_files/sac/sac-halfcheetah-v4.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/sac/sac-halfcheetah-v4.yaml @@ -3,7 +3,7 @@ sac-halfcheetah-v4: run: SAC # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 500.0 + sampler_results/episode_reward_mean: 500.0 timesteps_total: 400000 stop: time_total_s: 3600 diff --git a/release/rllib_tests/learning_tests/yaml_files/slateq/slateq-interest-evolution-recsim-env.yaml b/release/rllib_tests/learning_tests/yaml_files/slateq/slateq-interest-evolution-recsim-env.yaml index 9a716345d2e0..d7170509d8e1 100644 --- a/release/rllib_tests/learning_tests/yaml_files/slateq/slateq-interest-evolution-recsim-env.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/slateq/slateq-interest-evolution-recsim-env.yaml @@ -2,7 +2,7 @@ slateq-interest-evolution-recsim-env: env: ray.rllib.examples.env.recommender_system_envs_with_recsim.InterestEvolutionRecSimEnv run: SlateQ pass_criteria: - sampler_results/episode_reward_mean:: 160.0 + sampler_results/episode_reward_mean: 160.0 timesteps_total: 300000 stop: time_total_s: 7200 diff --git a/release/rllib_tests/learning_tests/yaml_files/td3/td3-halfcheetah-v4.yaml b/release/rllib_tests/learning_tests/yaml_files/td3/td3-halfcheetah-v4.yaml index a796d28a3ce5..96d4381c7dbe 100644 --- a/release/rllib_tests/learning_tests/yaml_files/td3/td3-halfcheetah-v4.yaml +++ b/release/rllib_tests/learning_tests/yaml_files/td3/td3-halfcheetah-v4.yaml @@ -3,7 +3,7 @@ td3-halfcheetah-v4: run: TD3 # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 400.0 + sampler_results/episode_reward_mean: 400.0 timesteps_total: 1000000 stop: time_total_s: 3600 diff --git a/release/rllib_tests/multi_gpu_learning_tests/multi_gpu_learning_tests.yaml b/release/rllib_tests/multi_gpu_learning_tests/multi_gpu_learning_tests.yaml index 3c4277f49a98..8a312996532a 100644 --- a/release/rllib_tests/multi_gpu_learning_tests/multi_gpu_learning_tests.yaml +++ b/release/rllib_tests/multi_gpu_learning_tests/multi_gpu_learning_tests.yaml @@ -42,7 +42,7 @@ appo-cartpole-v1-vtrace: run: APPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 150.0 + sampler_results/episode_reward_mean: 150.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -65,7 +65,7 @@ ddpg-repeat-after-me-env: run: DDPG # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: -50.0 + sampler_results/episode_reward_mean: -50.0 timesteps_total: 8000 stop: time_total_s: 600 @@ -85,7 +85,7 @@ dqn-cartpole-v1: run: DQN # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 150.0 + sampler_results/episode_reward_mean: 150.0 timesteps_total: 50000 stop: time_total_s: 600 @@ -105,7 +105,7 @@ impala-cartpole-v1: run: IMPALA # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 150.0 + sampler_results/episode_reward_mean: 150.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -120,7 +120,7 @@ pg-cartpole-v1: run: PG # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 130.0 + sampler_results/episode_reward_mean: 130.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -138,7 +138,7 @@ ppo-cartpole-v1: run: PPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 150.0 + sampler_results/episode_reward_mean: 150.0 timesteps_total: 300000 stop: time_total_s: 600 @@ -161,7 +161,7 @@ sac-repeat-after-me-env: run: SAC # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 40.0 + sampler_results/episode_reward_mean: 40.0 timesteps_total: 4500 stop: time_total_s: 600 @@ -183,7 +183,7 @@ sac-repeat-after-me-env-continuous: run: SAC # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: -50.0 + sampler_results/episode_reward_mean: -50.0 timesteps_total: 4500 stop: time_total_s: 600 @@ -208,7 +208,7 @@ simpleq-cartpole-v1: run: SimpleQ # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 150.0 + sampler_results/episode_reward_mean: 150.0 timesteps_total: 85000 stop: time_total_s: 600 @@ -221,7 +221,7 @@ td3-repeat-after-me-env: run: TD3 # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: -50.0 + sampler_results/episode_reward_mean: -50.0 timesteps_total: 25000 stop: time_total_s: 600 diff --git a/release/rllib_tests/multi_gpu_with_attention_learning_tests/multi_gpu_with_attention_learning_tests.yaml b/release/rllib_tests/multi_gpu_with_attention_learning_tests/multi_gpu_with_attention_learning_tests.yaml index e1109d535fdc..8491f98a81f9 100644 --- a/release/rllib_tests/multi_gpu_with_attention_learning_tests/multi_gpu_with_attention_learning_tests.yaml +++ b/release/rllib_tests/multi_gpu_with_attention_learning_tests/multi_gpu_with_attention_learning_tests.yaml @@ -4,7 +4,7 @@ appo-stateless-cartpole-no-vtrace: run: APPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 150.0 + sampler_results/episode_reward_mean: 150.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -38,7 +38,7 @@ appo-stateless-cartpole-vtrace: run: APPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 150.0 + sampler_results/episode_reward_mean: 150.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -71,7 +71,7 @@ impala-stateless-cartpole: run: IMPALA # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 150.0 + sampler_results/episode_reward_mean: 150.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -98,7 +98,7 @@ pg-stateless-cartpole: run: PG # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 130.0 + sampler_results/episode_reward_mean: 130.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -126,7 +126,7 @@ ppo-stateless-cartpole: run: PPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 150.0 + sampler_results/episode_reward_mean: 150.0 timesteps_total: 200000 stop: time_total_s: 600 @@ -160,7 +160,7 @@ ppo-stateless-cartpole: # run: R2D2 # # Minimum reward and total ts (in given time_total_s) to pass this test. # pass_criteria: -# sampler_results/episode_reward_mean:: 150.0 +# sampler_results/episode_reward_mean: 150.0 # timesteps_total: 130000 # stop: # time_total_s: 1200 diff --git a/release/rllib_tests/multi_gpu_with_lstm_learning_tests/multi_gpu_with_lstm_learning_tests.yaml b/release/rllib_tests/multi_gpu_with_lstm_learning_tests/multi_gpu_with_lstm_learning_tests.yaml index dcd692a1ebcf..911c8ba0e8ef 100644 --- a/release/rllib_tests/multi_gpu_with_lstm_learning_tests/multi_gpu_with_lstm_learning_tests.yaml +++ b/release/rllib_tests/multi_gpu_with_lstm_learning_tests/multi_gpu_with_lstm_learning_tests.yaml @@ -4,7 +4,7 @@ a2c-stateless-cartpole: run: A2C # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 150.0 + sampler_results/episode_reward_mean: 150.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -24,7 +24,7 @@ appo-stateless-cartpole-no-vtrace: run: APPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 150.0 + sampler_results/episode_reward_mean: 150.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -50,7 +50,7 @@ appo-stateless-cartpole-vtrace: run: APPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 150.0 + sampler_results/episode_reward_mean: 150.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -75,7 +75,7 @@ impala-stateless-cartpole: run: IMPALA # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 150.0 + sampler_results/episode_reward_mean: 150.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -94,7 +94,7 @@ pg-stateless-cartpole: run: PG # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 130.0 + sampler_results/episode_reward_mean: 130.0 timesteps_total: 500000 stop: time_total_s: 600 @@ -114,7 +114,7 @@ ppo-stateless-cartpole: run: PPO # Minimum reward and total ts (in given time_total_s) to pass this test. pass_criteria: - sampler_results/episode_reward_mean:: 150.0 + sampler_results/episode_reward_mean: 150.0 timesteps_total: 200000 stop: time_total_s: 600 @@ -140,7 +140,7 @@ ppo-stateless-cartpole: # run: R2D2 # # Minimum reward and total ts (in given time_total_s) to pass this test. # pass_criteria: -# sampler_results/episode_reward_mean:: 150.0 +# sampler_results/episode_reward_mean: 150.0 # timesteps_total: 65000 # stop: # time_total_s: 800 From 014111a7f020b58a59f4e968c1f178bedea95892 Mon Sep 17 00:00:00 2001 From: Avnish Narayan <38871737+avnishn@users.noreply.github.com> Date: Fri, 28 Apr 2023 10:38:50 -0700 Subject: [PATCH 147/424] [RLlib] Add Optimizer State To Learner get_state (#34760) Signed-off-by: Avnish --- rllib/BUILD | 4 +- rllib/core/learner/learner.py | 43 +++++++++++++-- .../core/learner/tests/test_learner_group.py | 27 +++++++++- rllib/core/learner/tf/tf_learner.py | 19 +++++++ rllib/core/learner/torch/torch_learner.py | 38 +++++++++++-- rllib/utils/tests/test_torch_utils.py | 53 ++++++++++++++++++- rllib/utils/torch_utils.py | 31 +++++++++++ 7 files changed, 203 insertions(+), 12 deletions(-) diff --git a/rllib/BUILD b/rllib/BUILD index 5bdc5b33fb19..d4ba1d3a7774 100644 --- a/rllib/BUILD +++ b/rllib/BUILD @@ -2283,8 +2283,8 @@ py_test( py_test( name = "utils/tests/test_torch_utils", - tags = ["team:rllib", "utils"], - size = "small", + tags = ["team:rllib", "utils", "gpu"], + size = "medium", srcs = ["utils/tests/test_torch_utils.py"] ) diff --git a/rllib/core/learner/learner.py b/rllib/core/learner/learner.py index 4847567dac23..ae0503a2eadf 100644 --- a/rllib/core/learner/learner.py +++ b/rllib/core/learner/learner.py @@ -849,14 +849,29 @@ def set_state(self, state: Mapping[str, Any]) -> None: Args: state: The state of the optimizer and module. Can be obtained - from `get_state`. + from `get_state`. State is a dictionary with two keys: + "module_state" and "optimizer_state". The value of each key + is a dictionary that can be passed to `set_weights` and + `set_optimizer_weights` respectively. """ # TODO (Kourosh): We have both get(set)_state and get(set)_weights. I think # having both can become confusing. Can we simplify this API requirement? self._check_is_built() # TODO: once we figure out the optimizer format, we can set/get the state - self._module.set_state(state.get("module_state", {})) + if "module_state" not in state: + raise ValueError( + "state must have a key 'module_state' for the module weights" + ) + if "optimizer_state" not in state: + raise ValueError( + "state must have a key 'optimizer_state' for the optimizer weights" + ) + + module_state = state.get("module_state") + optimizer_state = state.get("optimizer_state") + self.set_weights(module_state) + self.set_optimizer_weights(optimizer_state) def get_state(self) -> Mapping[str, Any]: """Get the state of the learner. @@ -867,7 +882,29 @@ def get_state(self) -> Mapping[str, Any]: """ self._check_is_built() # TODO: once we figure out the optimizer format, we can set/get the state - return {"module_state": self._module.get_state()} + return { + "module_state": self.get_weights(), + "optimizer_state": self.get_optimizer_weights(), + } + # return {"module_state": self.get_weights(), "optimizer_state": {}} + + def set_optimizer_weights(self, weights: Mapping[str, Any]) -> None: + """Set the weights of the optimizer. + + Args: + weights: The weights of the optimizer. + + """ + raise NotImplementedError + + def get_optimizer_weights(self) -> Mapping[str, Any]: + """Get the weights of the optimizer. + + Returns: + The weights of the optimizer. + + """ + raise NotImplementedError def _get_metadata(self) -> Dict[str, Any]: metadata = { diff --git a/rllib/core/learner/tests/test_learner_group.py b/rllib/core/learner/tests/test_learner_group.py index bb0886ab75cc..e01e81edfb8e 100644 --- a/rllib/core/learner/tests/test_learner_group.py +++ b/rllib/core/learner/tests/test_learner_group.py @@ -36,7 +36,7 @@ LOCAL_SCALING_CONFIGS = { "local-cpu": LearnerGroupScalingConfig(num_workers=0, num_gpus_per_worker=0), - "local-gpu": LearnerGroupScalingConfig(num_workers=0, num_gpus_per_worker=0.5), + "local-gpu": LearnerGroupScalingConfig(num_workers=0, num_gpus_per_worker=1), } @@ -45,6 +45,17 @@ @ray.remote(num_gpus=1) class RemoteTrainingHelper: def local_training_helper(self, fw, scaling_mode) -> None: + if fw == "torch": + import torch + + torch.manual_seed(0) + elif fw == "tf": + import tensorflow as tf + + # this is done by rllib already inside of the policy class, but we need to + # do it here for testing purposes + tf.compat.v1.enable_eager_execution() + tf.random.set_seed(0) env = gym.make("CartPole-v1") scaling_config = LOCAL_SCALING_CONFIGS[scaling_mode] lr = 1e-3 @@ -71,13 +82,25 @@ def local_training_helper(self, fw, scaling_mode) -> None: # make the state of the learner and the local learner_group identical local_learner.set_state(learner_group.get_state()) + # learner_group.set_state(learner_group.get_state()) + check(local_learner.get_state(), learner_group.get_state()) # do another update batch = reader.next() ma_batch = MultiAgentBatch( {new_module_id: batch, DEFAULT_POLICY_ID: batch}, env_steps=batch.count ) - check(local_learner.update(ma_batch), learner_group.update(ma_batch)) + # the optimizer state is not initialized fully until the first time that + # training is completed. A call to get state before that won't contain the + # optimizer state. So we do a dummy update here to initialize the optimizer + local_learner.update(ma_batch) + learner_group.update(ma_batch) + + check(local_learner.get_state(), learner_group.get_state()) + local_learner_results = local_learner.update(ma_batch) + learner_group_results = learner_group.update(ma_batch) + + check(local_learner_results, learner_group_results) check(local_learner.get_state(), learner_group.get_state()) diff --git a/rllib/core/learner/tf/tf_learner.py b/rllib/core/learner/tf/tf_learner.py index 3f927381fd9c..d676d079cbc6 100644 --- a/rllib/core/learner/tf/tf_learner.py +++ b/rllib/core/learner/tf/tf_learner.py @@ -270,6 +270,25 @@ def _load_optimizers(self, path: Union[str, pathlib.Path]) -> None: def set_weights(self, weights: Mapping[str, Any]) -> None: self._module.set_state(weights) + @override(Learner) + def get_optimizer_weights(self) -> Mapping[str, Any]: + optim_weights = {} + with tf.init_scope(): + for name, optim in self._named_optimizers.items(): + optim_weights[name] = [var.numpy() for var in optim.variables()] + return optim_weights + + @override(Learner) + def set_optimizer_weights(self, weights: Mapping[str, Any]) -> None: + for name, weight_array in weights.items(): + if name not in self._named_optimizers: + raise ValueError( + f"Optimizer {name} in weights is not known." + f"Known optimizers are {self._named_optimizers.keys()}" + ) + optim = self._named_optimizers[name] + optim.set_weights(weight_array) + @override(Learner) def get_param_ref(self, param: ParamType) -> Hashable: return param.ref() diff --git a/rllib/core/learner/torch/torch_learner.py b/rllib/core/learner/torch/torch_learner.py index cc9576b74a61..081f49323774 100644 --- a/rllib/core/learner/torch/torch_learner.py +++ b/rllib/core/learner/torch/torch_learner.py @@ -27,9 +27,13 @@ from ray.rllib.core.rl_module.torch.torch_rl_module import TorchDDPRLModule from ray.rllib.policy.sample_batch import MultiAgentBatch from ray.rllib.utils.annotations import override -from ray.rllib.utils.torch_utils import clip_gradients, convert_to_torch_tensor from ray.rllib.utils.typing import TensorType from ray.rllib.utils.nested_dict import NestedDict +from ray.rllib.utils.torch_utils import ( + clip_gradients, + convert_to_torch_tensor, + copy_torch_tensors, +) from ray.rllib.utils.framework import try_import_torch torch, nn = try_import_torch() @@ -119,16 +123,42 @@ def set_weights(self, weights: Mapping[str, Any]) -> None: def _save_optimizers(self, path: Union[str, pathlib.Path]) -> None: path = pathlib.Path(path) path.mkdir(parents=True, exist_ok=True) - for name, optim in self._named_optimizers.items(): - torch.save(optim.state_dict(), path / f"{name}.pt") + optim_weights = self.get_optimizer_weights() + for name, weights in optim_weights.items(): + torch.save(weights, path / f"{name}.pt") @override(Learner) def _load_optimizers(self, path: Union[str, pathlib.Path]) -> None: path = pathlib.Path(path) if not path.exists(): raise ValueError(f"Directory {path} does not exist.") + weights = {} + for name in self._named_optimizers.keys(): + weights[name] = torch.load(path / f"{name}.pt") + self.set_optimizer_weights(weights) + + @override(Learner) + def get_optimizer_weights(self) -> Mapping[str, Any]: + optimizer_name_weights = {} for name, optim in self._named_optimizers.items(): - optim.load_state_dict(torch.load(path / f"{name}.pt")) + optim_state_dict = optim.state_dict() + optim_state_dict_cpu = copy_torch_tensors(optim_state_dict, device="cpu") + optimizer_name_weights[name] = optim_state_dict_cpu + return optimizer_name_weights + + @override(Learner) + def set_optimizer_weights(self, weights: Mapping[str, Any]) -> None: + for name, weight_dict in weights.items(): + if name not in self._named_optimizers: + raise ValueError( + f"Optimizer {name} in weights is not known." + f"Known optimizers are {self._named_optimizers.keys()}" + ) + optim = self._named_optimizers[name] + weight_dict_correct_device = copy_torch_tensors( + weight_dict, device=self._device + ) + optim.load_state_dict(weight_dict_correct_device) @override(Learner) def get_param_ref(self, param: ParamType) -> Hashable: diff --git a/rllib/utils/tests/test_torch_utils.py b/rllib/utils/tests/test_torch_utils.py index ee4d70b643c5..94f8e3a7e79a 100644 --- a/rllib/utils/tests/test_torch_utils.py +++ b/rllib/utils/tests/test_torch_utils.py @@ -4,7 +4,10 @@ import torch.cuda import ray -from ray.rllib.utils.torch_utils import convert_to_torch_tensor +from ray.rllib.utils.torch_utils import ( + convert_to_torch_tensor, + copy_torch_tensors, +) class TestTorchUtils(unittest.TestCase): @@ -43,6 +46,54 @@ def test_convert_to_torch_tensor(self): self.assertTrue(converted["b"].dtype is torch.float32) self.assertTrue(converted["c"] is None) + def test_copy_torch_tensors(self): + array = np.array([1, 2, 3], dtype=np.float32) + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + tensor = torch.from_numpy(array).to(device) + tensor_2 = torch.tensor([1.0, 2.0, 3.0], dtype=torch.float64).to(device) + + # Test single tensor + copied_tensor = copy_torch_tensors(tensor, device) + self.assertTrue(copied_tensor.device == device) + self.assertNotEqual(id(copied_tensor), id(tensor)) + self.assertTrue(all(copied_tensor == tensor)) + + # check that dtypes aren't modified + copied_tensor_2 = copy_torch_tensors(tensor_2, device) + self.assertTrue(copied_tensor_2.dtype == tensor_2.dtype) + self.assertFalse(copied_tensor_2.dtype == torch.float32) + + # Test nested structure can be converted + nested_structure = {"a": tensor, "b": tensor_2, "c": 1} + copied_nested_structure = copy_torch_tensors(nested_structure, device) + self.assertTrue(copied_nested_structure["a"].device == device) + self.assertTrue(copied_nested_structure["b"].device == device) + self.assertTrue(copied_nested_structure["c"] == 1) + self.assertNotEqual(id(copied_nested_structure["a"]), id(tensor)) + self.assertNotEqual(id(copied_nested_structure["b"]), id(tensor_2)) + self.assertTrue(all(copied_nested_structure["a"] == tensor)) + self.assertTrue(all(copied_nested_structure["b"] == tensor_2)) + + # if gpu is available test moving tensor from cpu to gpu and vice versa + if torch.cuda.is_available(): + tensor = torch.from_numpy(array).to("cpu") + copied_tensor = copy_torch_tensors(tensor, "cuda:0") + self.assertFalse(copied_tensor.device == torch.device("cpu")) + self.assertTrue(copied_tensor.device == torch.device("cuda:0")) + self.assertNotEqual(id(copied_tensor), id(tensor)) + self.assertTrue( + all(copied_tensor.detach().cpu().numpy() == tensor.detach().numpy()) + ) + + tensor = torch.from_numpy(array).to("cuda:0") + copied_tensor = copy_torch_tensors(tensor, "cpu") + self.assertFalse(copied_tensor.device == torch.device("cuda:0")) + self.assertTrue(copied_tensor.device == torch.device("cpu")) + self.assertNotEqual(id(copied_tensor), id(tensor)) + self.assertTrue( + all(copied_tensor.detach().numpy() == tensor.detach().cpu().numpy()) + ) + if __name__ == "__main__": import pytest diff --git a/rllib/utils/torch_utils.py b/rllib/utils/torch_utils.py index 0fb502de8645..85907ba9d1f5 100644 --- a/rllib/utils/torch_utils.py +++ b/rllib/utils/torch_utils.py @@ -231,6 +231,37 @@ def mapping(item): return tree.map_structure(mapping, x) +@PublicAPI +def copy_torch_tensors(x: TensorStructType, device: Optional[str] = None): + """Creates a copy of `x` and makes deep copies torch.Tensors in x. + + Also moves the copied tensors to the specified device (if not None). + + Note if an object in x is not a torch.Tensor, it will be shallow-copied. + + Args: + x : Any (possibly nested) struct possibly containing torch.Tensors. + device : The device to move the tensors to. + + Returns: + Any: A new struct with the same structure as `x`, but with all + torch.Tensors deep-copied and moved to the specified device. + + """ + + def mapping(item): + if isinstance(item, torch.Tensor): + return ( + torch.clone(item.detach()) + if device is None + else item.detach().to(device) + ) + else: + return item + + return tree.map_structure(mapping, x) + + @PublicAPI def explained_variance(y: TensorType, pred: TensorType) -> TensorType: """Computes the explained variance for a pair of labels and predictions. From cce78c05b43188933cf554993c7e4c51d8794ef8 Mon Sep 17 00:00:00 2001 From: Jiajun Yao Date: Fri, 28 Apr 2023 13:06:29 -0700 Subject: [PATCH 148/424] [Core] Provide good error message if the factional resource precision is beyond 0.0001 (#34590) Ray internally doesn't support resource quantity beyond the precision of 0.0001 so we should provide a good error message if it's violated. Signed-off-by: Jiajun Yao --- python/ray/_private/ray_option_utils.py | 93 ++++++++++++++++------- python/ray/_raylet.pyx | 5 ++ python/ray/dag/tests/test_class_dag.py | 2 +- python/ray/dag/tests/test_function_dag.py | 2 +- python/ray/tests/test_basic.py | 19 +++++ python/ray/tests/test_basic_3.py | 26 ++++--- python/ray/tests/test_basic_4.py | 7 +- src/ray/common/constants.h | 3 + src/ray/raylet/scheduling/fixed_point.h | 16 ++-- 9 files changed, 119 insertions(+), 54 deletions(-) diff --git a/python/ray/_private/ray_option_utils.py b/python/ray/_private/ray_option_utils.py index e51301cf2fd6..88703942f64e 100644 --- a/python/ray/_private/ray_option_utils.py +++ b/python/ray/_private/ray_option_utils.py @@ -3,7 +3,8 @@ from dataclasses import dataclass from typing import Any, Callable, Dict, Optional, Tuple, Union -import ray._private.ray_constants as ray_constants +import ray +from ray._private import ray_constants from ray._private.utils import get_ray_doc_version from ray.util.placement_group import PlacementGroup from ray.util.scheduling_strategies import ( @@ -17,9 +18,9 @@ class Option: # Type constraint of an option. type_constraint: Optional[Union[type, Tuple[type]]] = None # Value constraint of an option. - value_constraint: Optional[Callable[[Any], bool]] = None - # Error message for value constraint. - error_message_for_value_constraint: Optional[str] = None + # The callable should return None if there is no error. + # Otherwise, return the error message. + value_constraint: Optional[Callable[[Any], Optional[str]]] = None # Default value. default_value: Any = None @@ -32,8 +33,9 @@ def validate(self, keyword: str, value: Any): f"but received type {type(value)}" ) if self.value_constraint is not None: - if not self.value_constraint(value): - raise ValueError(self.error_message_for_value_constraint) + possible_error_message = self.value_constraint(value) + if possible_error_message: + raise ValueError(possible_error_message) def _counting_option(name: str, infinite: bool = True, default_value: Any = None): @@ -47,29 +49,63 @@ def _counting_option(name: str, infinite: bool = True, default_value: Any = None if infinite: return Option( (int, type(None)), - lambda x: x is None or x >= -1, - f"The keyword '{name}' only accepts None, 0, -1 or a positive integer, " - "where -1 represents infinity.", + lambda x: None + if (x is None or x >= -1) + else f"The keyword '{name}' only accepts None, 0, -1" + " or a positive integer, where -1 represents infinity.", default_value=default_value, ) return Option( (int, type(None)), - lambda x: x is None or x >= 0, - f"The keyword '{name}' only accepts None, 0 or a positive integer.", + lambda x: None + if (x is None or x >= 0) + else f"The keyword '{name}' only accepts None, 0 or a positive integer.", default_value=default_value, ) +def _validate_resource_quantity(name, quantity): + if quantity < 0: + return f"The quantity of resource {name} cannot be negative" + if ( + isinstance(quantity, float) + and quantity != 0.0 + and int(quantity * ray._raylet.RESOURCE_UNIT_SCALING) == 0 + ): + return ( + f"The precision of the fractional quantity of resource {name}" + " cannot go beyond 0.0001" + ) + return None + + def _resource_option(name: str, default_value: Any = None): - """This is used for non-negative options, typically for defining resources.""" + """This is used for resource related options.""" return Option( (float, int, type(None)), - lambda x: x is None or x >= 0, - f"The keyword '{name}' only accepts None, 0 or a positive number", + lambda x: None if (x is None) else _validate_resource_quantity(name, x), default_value=default_value, ) +def _validate_resources(resources: Optional[Dict[str, float]]) -> Optional[str]: + if resources is None: + return None + + if "CPU" in resources or "GPU" in resources: + return ( + "Use the 'num_cpus' and 'num_gpus' keyword instead of 'CPU' and 'GPU' " + "in 'resources' keyword" + ) + + for name, quantity in resources.items(): + possible_error_message = _validate_resource_quantity(name, quantity) + if possible_error_message: + return possible_error_message + + return None + + _common_options = { "accelerator_type": Option((str, type(None))), "memory": _resource_option("memory"), @@ -85,12 +121,7 @@ def _resource_option(name: str, default_value: Any = None): ), "placement_group_bundle_index": Option(int, default_value=-1), "placement_group_capture_child_tasks": Option((bool, type(None))), - "resources": Option( - (dict, type(None)), - lambda x: x is None or ("CPU" not in x and "GPU" not in x), - "Use the 'num_cpus' and 'num_gpus' keyword instead of 'CPU' and 'GPU' " - "in 'resources' keyword", - ), + "resources": Option((dict, type(None)), lambda x: _validate_resources(x)), "runtime_env": Option((dict, type(None))), "scheduling_strategy": Option( ( @@ -122,26 +153,29 @@ def issubclass_safe(obj: Any, cls_: type) -> bool: "num_cpus": _resource_option("num_cpus", default_value=1), "num_returns": Option( (int, str, type(None)), - lambda x: x is None or x == "dynamic" or x >= 0, - "The keyword 'num_returns' only accepts None, a non-negative integer, or " + lambda x: None + if (x is None or x == "dynamic" or x >= 0) + else "The keyword 'num_returns' only accepts None, a non-negative integer, or " '"dynamic" (for generators)', default_value=1, ), "object_store_memory": Option( # override "_common_options" (int, type(None)), - lambda x: x is None, - "Setting 'object_store_memory' is not implemented for tasks", + lambda x: None + if (x is None) + else "Setting 'object_store_memory' is not implemented for tasks", ), "retry_exceptions": Option( (bool, list, tuple), - lambda x: ( + lambda x: None + if ( isinstance(x, bool) or ( isinstance(x, (list, tuple)) and all(issubclass_safe(x_, Exception) for x_ in x) ) - ), - "retry_exceptions must be either a boolean or a list of exceptions", + ) + else "retry_exceptions must be either a boolean or a list of exceptions", default_value=False, ), } @@ -150,8 +184,9 @@ def issubclass_safe(obj: Any, cls_: type) -> bool: "concurrency_groups": Option((list, dict, type(None))), "lifetime": Option( (str, type(None)), - lambda x: x in (None, "detached", "non_detached"), - "actor `lifetime` argument must be one of 'detached', " + lambda x: None + if x in (None, "detached", "non_detached") + else "actor `lifetime` argument must be one of 'detached', " "'non_detached' and 'None'.", ), "max_concurrency": _counting_option("max_concurrency", False), diff --git a/python/ray/_raylet.pyx b/python/ray/_raylet.pyx index 9e2f06e31b77..867d0a0dba7c 100644 --- a/python/ray/_raylet.pyx +++ b/python/ray/_raylet.pyx @@ -174,6 +174,11 @@ current_task_id_lock = threading.Lock() job_config_initialized = False job_config_initialization_lock = threading.Lock() +cdef extern from "ray/common/constants.h" nogil: + cdef int kResourceUnitScaling + +RESOURCE_UNIT_SCALING = kResourceUnitScaling + class ObjectRefGenerator: def __init__(self, refs): diff --git a/python/ray/dag/tests/test_class_dag.py b/python/ray/dag/tests/test_class_dag.py index bb5d72760c2e..55cff1e540c4 100644 --- a/python/ray/dag/tests/test_class_dag.py +++ b/python/ray/dag/tests/test_class_dag.py @@ -115,7 +115,7 @@ def test_actor_method_options(shared_ray_instance): def test_basic_actor_dag_constructor_invalid_options(shared_ray_instance): with pytest.raises( - ValueError, match=r".*only accepts None, 0 or a positive number.*" + ValueError, match=r".*quantity of resource num_cpus cannot be negative.*" ): a1 = Actor.options(num_cpus=-1).bind(10) invalid_dag = a1.get.bind() diff --git a/python/ray/dag/tests/test_function_dag.py b/python/ray/dag/tests/test_function_dag.py index 3d891ad1deab..2c577f10448d 100644 --- a/python/ray/dag/tests/test_function_dag.py +++ b/python/ray/dag/tests/test_function_dag.py @@ -112,7 +112,7 @@ def b(x): # Ensure current DAG is executable assert ray.get(dag.execute()) == 4 with pytest.raises( - ValueError, match=r".*only accepts None, 0 or a positive number.*" + ValueError, match=r".*quantity of resource num_cpus cannot be negative.*" ): invalid_dag = b.options(num_cpus=-1).bind(a_ref) ray.get(invalid_dag.execute()) diff --git a/python/ray/tests/test_basic.py b/python/ray/tests/test_basic.py index b60464f72792..4f30b6c932b7 100644 --- a/python/ray/tests/test_basic.py +++ b/python/ray/tests/test_basic.py @@ -362,6 +362,25 @@ class A: ray.remote(_metadata={"data": 1})(f) ray.remote(_metadata={"data": 1})(A) + # Check invalid resource quantity + with pytest.raises( + ValueError, + match=( + "The precision of the fractional quantity of resource num_gpus" + " cannot go beyond 0.0001" + ), + ): + ray.remote(num_gpus=0.0000001)(f) + + with pytest.raises( + ValueError, + match=( + "The precision of the fractional quantity of resource custom_resource" + " cannot go beyond 0.0001" + ), + ): + ray.remote(resources={"custom_resource": 0.0000001})(f) + def test_options(): """General test of option keywords in Ray.""" diff --git a/python/ray/tests/test_basic_3.py b/python/ray/tests/test_basic_3.py index 14c4323b1c49..318cc5c70cd8 100644 --- a/python/ray/tests/test_basic_3.py +++ b/python/ray/tests/test_basic_3.py @@ -85,13 +85,21 @@ def f(block, accepted_resources): result_ids = [] for rand1, rand2, rand3 in np.random.uniform(size=(100, 3)): resource_set = {"CPU": int(rand1 * 10000) / 10000} - result_ids.append(f._remote([False, resource_set], num_cpus=rand1)) + result_ids.append( + f._remote([False, resource_set], num_cpus=resource_set["CPU"]) + ) resource_set = {"CPU": 1, "GPU": int(rand1 * 10000) / 10000} - result_ids.append(f._remote([False, resource_set], num_gpus=rand1)) + result_ids.append( + f._remote([False, resource_set], num_gpus=resource_set["GPU"]) + ) resource_set = {"CPU": 1, "Custom": int(rand1 * 10000) / 10000} - result_ids.append(f._remote([False, resource_set], resources={"Custom": rand1})) + result_ids.append( + f._remote( + [False, resource_set], resources={"Custom": resource_set["Custom"]} + ) + ) resource_set = { "CPU": int(rand1 * 10000) / 10000, @@ -101,17 +109,17 @@ def f(block, accepted_resources): result_ids.append( f._remote( [False, resource_set], - num_cpus=rand1, - num_gpus=rand2, - resources={"Custom": rand3}, + num_cpus=resource_set["CPU"], + num_gpus=resource_set["GPU"], + resources={"Custom": resource_set["Custom"]}, ) ) result_ids.append( f._remote( [True, resource_set], - num_cpus=rand1, - num_gpus=rand2, - resources={"Custom": rand3}, + num_cpus=resource_set["CPU"], + num_gpus=resource_set["GPU"], + resources={"Custom": resource_set["Custom"]}, ) ) assert all(ray.get(result_ids)) diff --git a/python/ray/tests/test_basic_4.py b/python/ray/tests/test_basic_4.py index 43eb63e5ee95..02242886fa08 100644 --- a/python/ray/tests/test_basic_4.py +++ b/python/ray/tests/test_basic_4.py @@ -6,7 +6,6 @@ from pathlib import Path import os -import numpy as np import pytest from unittest import mock @@ -59,11 +58,7 @@ def slow_function(): # Flood a large scale lease worker requests. for i in range(10000): - # Use random cpu resources to make sure that all tasks are sent - # to the raylet. Because core worker will cache tasks with the - # same resource shape. - num_cpus = 0.24 + np.random.uniform(0, 0.01) - slow_function.options(num_cpus=num_cpus).remote() + slow_function.options(num_cpus=0.25).remote() # Check "debug_state.txt" to ensure no extra workers were started. session_dir = ray._private.worker.global_worker.node.address_info["session_dir"] diff --git a/src/ray/common/constants.h b/src/ray/common/constants.h index f7646ee0ebe5..aa05020fd509 100644 --- a/src/ray/common/constants.h +++ b/src/ray/common/constants.h @@ -17,6 +17,9 @@ #include #include +/// The precision of fractional resource quantity. +constexpr int kResourceUnitScaling = 10000; + /// Length of Ray full-length IDs in bytes. constexpr size_t kUniqueIDSize = 28; diff --git a/src/ray/raylet/scheduling/fixed_point.h b/src/ray/raylet/scheduling/fixed_point.h index 8e9cbfae206d..ecd59150f1d2 100644 --- a/src/ray/raylet/scheduling/fixed_point.h +++ b/src/ray/raylet/scheduling/fixed_point.h @@ -19,7 +19,7 @@ #include #include -#define RESOURCE_UNIT_SCALING 10000 +#include "ray/common/constants.h" /// Fixed point data type. class FixedPoint { @@ -28,9 +28,9 @@ class FixedPoint { public: FixedPoint() : FixedPoint(0.0) {} - FixedPoint(double d) { i_ = (int64_t)(d * RESOURCE_UNIT_SCALING); } // NOLINT + FixedPoint(double d) { i_ = (int64_t)(d * kResourceUnitScaling); } // NOLINT - FixedPoint(int i) { i_ = (i * RESOURCE_UNIT_SCALING); } // NOLINT + FixedPoint(int i) { i_ = (i * kResourceUnitScaling); } // NOLINT FixedPoint(int64_t i) : FixedPoint((double)i) {} // NOLINT @@ -72,23 +72,23 @@ class FixedPoint { FixedPoint operator+(double const d) const { FixedPoint res; - res.i_ = i_ + static_cast(d * RESOURCE_UNIT_SCALING); + res.i_ = i_ + static_cast(d * kResourceUnitScaling); return res; } FixedPoint operator-(double const d) const { FixedPoint res; - res.i_ = i_ - static_cast(d * RESOURCE_UNIT_SCALING); + res.i_ = i_ - static_cast(d * kResourceUnitScaling); return res; } FixedPoint operator=(double const d) { - i_ = static_cast(d * RESOURCE_UNIT_SCALING); + i_ = static_cast(d * kResourceUnitScaling); return *this; } FixedPoint operator+=(double const d) { - i_ += static_cast(d * RESOURCE_UNIT_SCALING); + i_ += static_cast(d * kResourceUnitScaling); return *this; } @@ -104,7 +104,7 @@ class FixedPoint { bool operator==(FixedPoint const &ru1) const { return (i_ == ru1.i_); }; bool operator!=(FixedPoint const &ru1) const { return (i_ != ru1.i_); }; - [[nodiscard]] double Double() const { return round(i_) / RESOURCE_UNIT_SCALING; }; + [[nodiscard]] double Double() const { return round(i_) / kResourceUnitScaling; }; friend std::ostream &operator<<(std::ostream &out, FixedPoint const &ru1); }; From 441e052659a2f0c839be1a9ade6c4d13de478b88 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Fri, 28 Apr 2023 13:46:21 -0700 Subject: [PATCH 149/424] [CI] Pin buildkite dependencies (#34677) Signed-off-by: Lonnie Liu --- WORKSPACE | 12 + release/BUILD | 8 + release/ray_release/util.py | 2 - release/requirements_buildkite.in | 12 + release/requirements_buildkite.txt | 1176 +++++++++++++++++++++++++++- 5 files changed, 1196 insertions(+), 14 deletions(-) create mode 100644 release/requirements_buildkite.in diff --git a/WORKSPACE b/WORKSPACE index 9300a57ed14e..6faea0d927f9 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -1,5 +1,6 @@ workspace(name = "com_github_ray_project_ray") +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load("//bazel:ray_deps_setup.bzl", "ray_deps_setup") ray_deps_setup() @@ -29,3 +30,14 @@ versions.check(minimum_bazel_version = "5.4.0") load("@hedron_compile_commands//:workspace_setup.bzl", "hedron_compile_commands_setup") hedron_compile_commands_setup() + +http_archive( + name = "rules_python", + sha256 = "94750828b18044533e98a129003b6a68001204038dc4749f40b195b24c38f49f", + strip_prefix = "rules_python-0.21.0", + url = "https://github.com/bazelbuild/rules_python/releases/download/0.21.0/rules_python-0.21.0.tar.gz", +) + +load("@rules_python//python/pip_install:repositories.bzl", "pip_install_dependencies") + +pip_install_dependencies() diff --git a/release/BUILD b/release/BUILD index f246db06bcdf..2ea93f31b56b 100644 --- a/release/BUILD +++ b/release/BUILD @@ -1,4 +1,12 @@ load("@rules_python//python:defs.bzl", "py_test") +load("@rules_python//python:pip.bzl", "compile_pip_requirements") + +compile_pip_requirements( + name = "requirements_buildkite", + requirements_in = "requirements_buildkite.in", + requirements_txt = "requirements_buildkite.txt", + visibility = ["//visibility:private"], +) test_srcs = glob(["**/*.py"]) diff --git a/release/ray_release/util.py b/release/ray_release/util.py index 2a73f40418e7..a9c7a14e4538 100644 --- a/release/ray_release/util.py +++ b/release/ray_release/util.py @@ -148,8 +148,6 @@ def run_bash_script(bash_script: str) -> None: def reinstall_anyscale_dependencies() -> None: logger.info("Re-installing `anyscale` package") - - # Copy anyscale pin to requirements.txt and requirements_buildkite.txt subprocess.check_output( "pip install -U anyscale", shell=True, diff --git a/release/requirements_buildkite.in b/release/requirements_buildkite.in new file mode 100644 index 000000000000..26555177a25d --- /dev/null +++ b/release/requirements_buildkite.in @@ -0,0 +1,12 @@ +# Requirements to run release tests from buildkite (client dependencies will be installed separately) +# Copy anyscale pin to requirements.txt and util.py +anyscale +click +boto3 +google-cloud-storage +jinja2 +protobuf >= 3.15.3, != 3.19.5 +pydantic < 1.10.0 +pyyaml +requests +retry diff --git a/release/requirements_buildkite.txt b/release/requirements_buildkite.txt index 26555177a25d..d2a08ee4021f 100644 --- a/release/requirements_buildkite.txt +++ b/release/requirements_buildkite.txt @@ -1,12 +1,1164 @@ -# Requirements to run release tests from buildkite (client dependencies will be installed separately) -# Copy anyscale pin to requirements.txt and util.py -anyscale -click -boto3 -google-cloud-storage -jinja2 -protobuf >= 3.15.3, != 3.19.5 -pydantic < 1.10.0 -pyyaml -requests -retry +# +# This file is autogenerated by pip-compile with python 3.7 +# To update, run: +# +# bazel run //release:requirements_buildkite.update +# +aiohttp==3.8.4 \ + --hash=sha256:03543dcf98a6619254b409be2d22b51f21ec66272be4ebda7b04e6412e4b2e14 \ + --hash=sha256:03baa76b730e4e15a45f81dfe29a8d910314143414e528737f8589ec60cf7391 \ + --hash=sha256:0a63f03189a6fa7c900226e3ef5ba4d3bd047e18f445e69adbd65af433add5a2 \ + --hash=sha256:10c8cefcff98fd9168cdd86c4da8b84baaa90bf2da2269c6161984e6737bf23e \ + --hash=sha256:147ae376f14b55f4f3c2b118b95be50a369b89b38a971e80a17c3fd623f280c9 \ + --hash=sha256:176a64b24c0935869d5bbc4c96e82f89f643bcdf08ec947701b9dbb3c956b7dd \ + --hash=sha256:17b79c2963db82086229012cff93ea55196ed31f6493bb1ccd2c62f1724324e4 \ + --hash=sha256:1a45865451439eb320784918617ba54b7a377e3501fb70402ab84d38c2cd891b \ + --hash=sha256:1b3ea7edd2d24538959c1c1abf97c744d879d4e541d38305f9bd7d9b10c9ec41 \ + --hash=sha256:22f6eab15b6db242499a16de87939a342f5a950ad0abaf1532038e2ce7d31567 \ + --hash=sha256:3032dcb1c35bc330134a5b8a5d4f68c1a87252dfc6e1262c65a7e30e62298275 \ + --hash=sha256:33587f26dcee66efb2fff3c177547bd0449ab7edf1b73a7f5dea1e38609a0c54 \ + --hash=sha256:34ce9f93a4a68d1272d26030655dd1b58ff727b3ed2a33d80ec433561b03d67a \ + --hash=sha256:3a80464982d41b1fbfe3154e440ba4904b71c1a53e9cd584098cd41efdb188ef \ + --hash=sha256:3b90467ebc3d9fa5b0f9b6489dfb2c304a1db7b9946fa92aa76a831b9d587e99 \ + --hash=sha256:3d89efa095ca7d442a6d0cbc755f9e08190ba40069b235c9886a8763b03785da \ + --hash=sha256:3d8ef1a630519a26d6760bc695842579cb09e373c5f227a21b67dc3eb16cfea4 \ + --hash=sha256:3f43255086fe25e36fd5ed8f2ee47477408a73ef00e804cb2b5cba4bf2ac7f5e \ + --hash=sha256:40653609b3bf50611356e6b6554e3a331f6879fa7116f3959b20e3528783e699 \ + --hash=sha256:41a86a69bb63bb2fc3dc9ad5ea9f10f1c9c8e282b471931be0268ddd09430b04 \ + --hash=sha256:493f5bc2f8307286b7799c6d899d388bbaa7dfa6c4caf4f97ef7521b9cb13719 \ + --hash=sha256:4a6cadebe132e90cefa77e45f2d2f1a4b2ce5c6b1bfc1656c1ddafcfe4ba8131 \ + --hash=sha256:4c745b109057e7e5f1848c689ee4fb3a016c8d4d92da52b312f8a509f83aa05e \ + --hash=sha256:4d347a172f866cd1d93126d9b239fcbe682acb39b48ee0873c73c933dd23bd0f \ + --hash=sha256:4dac314662f4e2aa5009977b652d9b8db7121b46c38f2073bfeed9f4049732cd \ + --hash=sha256:4ddaae3f3d32fc2cb4c53fab020b69a05c8ab1f02e0e59665c6f7a0d3a5be54f \ + --hash=sha256:5393fb786a9e23e4799fec788e7e735de18052f83682ce2dfcabaf1c00c2c08e \ + --hash=sha256:59f029a5f6e2d679296db7bee982bb3d20c088e52a2977e3175faf31d6fb75d1 \ + --hash=sha256:5a7bdf9e57126dc345b683c3632e8ba317c31d2a41acd5800c10640387d193ed \ + --hash=sha256:5b3f2e06a512e94722886c0827bee9807c86a9f698fac6b3aee841fab49bbfb4 \ + --hash=sha256:5ce45967538fb747370308d3145aa68a074bdecb4f3a300869590f725ced69c1 \ + --hash=sha256:5e14f25765a578a0a634d5f0cd1e2c3f53964553a00347998dfdf96b8137f777 \ + --hash=sha256:618c901dd3aad4ace71dfa0f5e82e88b46ef57e3239fc7027773cb6d4ed53531 \ + --hash=sha256:652b1bff4f15f6287550b4670546a2947f2a4575b6c6dff7760eafb22eacbf0b \ + --hash=sha256:6c08e8ed6fa3d477e501ec9db169bfac8140e830aa372d77e4a43084d8dd91ab \ + --hash=sha256:6ddb2a2026c3f6a68c3998a6c47ab6795e4127315d2e35a09997da21865757f8 \ + --hash=sha256:6e601588f2b502c93c30cd5a45bfc665faaf37bbe835b7cfd461753068232074 \ + --hash=sha256:6e74dd54f7239fcffe07913ff8b964e28b712f09846e20de78676ce2a3dc0bfc \ + --hash=sha256:7235604476a76ef249bd64cb8274ed24ccf6995c4a8b51a237005ee7a57e8643 \ + --hash=sha256:7ab43061a0c81198d88f39aaf90dae9a7744620978f7ef3e3708339b8ed2ef01 \ + --hash=sha256:7c7837fe8037e96b6dd5cfcf47263c1620a9d332a87ec06a6ca4564e56bd0f36 \ + --hash=sha256:80575ba9377c5171407a06d0196b2310b679dc752d02a1fcaa2bc20b235dbf24 \ + --hash=sha256:80a37fe8f7c1e6ce8f2d9c411676e4bc633a8462844e38f46156d07a7d401654 \ + --hash=sha256:8189c56eb0ddbb95bfadb8f60ea1b22fcfa659396ea36f6adcc521213cd7b44d \ + --hash=sha256:854f422ac44af92bfe172d8e73229c270dc09b96535e8a548f99c84f82dde241 \ + --hash=sha256:880e15bb6dad90549b43f796b391cfffd7af373f4646784795e20d92606b7a51 \ + --hash=sha256:8b631e26df63e52f7cce0cce6507b7a7f1bc9b0c501fcde69742130b32e8782f \ + --hash=sha256:8c29c77cc57e40f84acef9bfb904373a4e89a4e8b74e71aa8075c021ec9078c2 \ + --hash=sha256:91f6d540163f90bbaef9387e65f18f73ffd7c79f5225ac3d3f61df7b0d01ad15 \ + --hash=sha256:92c0cea74a2a81c4c76b62ea1cac163ecb20fb3ba3a75c909b9fa71b4ad493cf \ + --hash=sha256:9bcb89336efa095ea21b30f9e686763f2be4478f1b0a616969551982c4ee4c3b \ + --hash=sha256:a1f4689c9a1462f3df0a1f7e797791cd6b124ddbee2b570d34e7f38ade0e2c71 \ + --hash=sha256:a3fec6a4cb5551721cdd70473eb009d90935b4063acc5f40905d40ecfea23e05 \ + --hash=sha256:a5d794d1ae64e7753e405ba58e08fcfa73e3fad93ef9b7e31112ef3c9a0efb52 \ + --hash=sha256:a86d42d7cba1cec432d47ab13b6637bee393a10f664c425ea7b305d1301ca1a3 \ + --hash=sha256:adfbc22e87365a6e564c804c58fc44ff7727deea782d175c33602737b7feadb6 \ + --hash=sha256:aeb29c84bb53a84b1a81c6c09d24cf33bb8432cc5c39979021cc0f98c1292a1a \ + --hash=sha256:aede4df4eeb926c8fa70de46c340a1bc2c6079e1c40ccf7b0eae1313ffd33519 \ + --hash=sha256:b744c33b6f14ca26b7544e8d8aadff6b765a80ad6164fb1a430bbadd593dfb1a \ + --hash=sha256:b7a00a9ed8d6e725b55ef98b1b35c88013245f35f68b1b12c5cd4100dddac333 \ + --hash=sha256:bb96fa6b56bb536c42d6a4a87dfca570ff8e52de2d63cabebfd6fb67049c34b6 \ + --hash=sha256:bbcf1a76cf6f6dacf2c7f4d2ebd411438c275faa1dc0c68e46eb84eebd05dd7d \ + --hash=sha256:bca5f24726e2919de94f047739d0a4fc01372801a3672708260546aa2601bf57 \ + --hash=sha256:bf2e1a9162c1e441bf805a1fd166e249d574ca04e03b34f97e2928769e91ab5c \ + --hash=sha256:c4eb3b82ca349cf6fadcdc7abcc8b3a50ab74a62e9113ab7a8ebc268aad35bb9 \ + --hash=sha256:c6cc15d58053c76eacac5fa9152d7d84b8d67b3fde92709195cb984cfb3475ea \ + --hash=sha256:c6cd05ea06daca6ad6a4ca3ba7fe7dc5b5de063ff4daec6170ec0f9979f6c332 \ + --hash=sha256:c844fd628851c0bc309f3c801b3a3d58ce430b2ce5b359cd918a5a76d0b20cb5 \ + --hash=sha256:c9cb1565a7ad52e096a6988e2ee0397f72fe056dadf75d17fa6b5aebaea05622 \ + --hash=sha256:cab9401de3ea52b4b4c6971db5fb5c999bd4260898af972bf23de1c6b5dd9d71 \ + --hash=sha256:cd468460eefef601ece4428d3cf4562459157c0f6523db89365202c31b6daebb \ + --hash=sha256:d1e6a862b76f34395a985b3cd39a0d949ca80a70b6ebdea37d3ab39ceea6698a \ + --hash=sha256:d1f9282c5f2b5e241034a009779e7b2a1aa045f667ff521e7948ea9b56e0c5ff \ + --hash=sha256:d265f09a75a79a788237d7f9054f929ced2e69eb0bb79de3798c468d8a90f945 \ + --hash=sha256:db3fc6120bce9f446d13b1b834ea5b15341ca9ff3f335e4a951a6ead31105480 \ + --hash=sha256:dbf3a08a06b3f433013c143ebd72c15cac33d2914b8ea4bea7ac2c23578815d6 \ + --hash=sha256:de04b491d0e5007ee1b63a309956eaed959a49f5bb4e84b26c8f5d49de140fa9 \ + --hash=sha256:e4b09863aae0dc965c3ef36500d891a3ff495a2ea9ae9171e4519963c12ceefd \ + --hash=sha256:e595432ac259af2d4630008bf638873d69346372d38255774c0e286951e8b79f \ + --hash=sha256:e75b89ac3bd27d2d043b234aa7b734c38ba1b0e43f07787130a0ecac1e12228a \ + --hash=sha256:ea9eb976ffdd79d0e893869cfe179a8f60f152d42cb64622fca418cd9b18dc2a \ + --hash=sha256:eafb3e874816ebe2a92f5e155f17260034c8c341dad1df25672fb710627c6949 \ + --hash=sha256:ee3c36df21b5714d49fc4580247947aa64bcbe2939d1b77b4c8dcb8f6c9faecc \ + --hash=sha256:f352b62b45dff37b55ddd7b9c0c8672c4dd2eb9c0f9c11d395075a84e2c40f75 \ + --hash=sha256:fabb87dd8850ef0f7fe2b366d44b77d7e6fa2ea87861ab3844da99291e81e60f \ + --hash=sha256:fe11310ae1e4cd560035598c3f29d86cef39a83d244c7466f95c27ae04850f10 \ + --hash=sha256:fe7ba4a51f33ab275515f66b0a236bcde4fb5561498fe8f898d4e549b2e4509f + # via anyscale +aiosignal==1.3.1 \ + --hash=sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc \ + --hash=sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17 + # via aiohttp +anyscale==0.5.102 \ + --hash=sha256:0fd5999703a5ea0f8c7f53cdfab734f1d65778d52d6de1396ca0254b456182db + # via -r release/requirements_buildkite.in +argon2-cffi==21.3.0 \ + --hash=sha256:8c976986f2c5c0e5000919e6de187906cfd81fb1c72bf9d88c01177e77da7f80 \ + --hash=sha256:d384164d944190a7dd7ef22c6aa3ff197da12962bd04b17f64d4e93d934dba5b + # via anyscale +argon2-cffi-bindings==21.2.0 \ + --hash=sha256:20ef543a89dee4db46a1a6e206cd015360e5a75822f76df533845c3cbaf72670 \ + --hash=sha256:2c3e3cc67fdb7d82c4718f19b4e7a87123caf8a93fde7e23cf66ac0337d3cb3f \ + --hash=sha256:3b9ef65804859d335dc6b31582cad2c5166f0c3e7975f324d9ffaa34ee7e6583 \ + --hash=sha256:3e385d1c39c520c08b53d63300c3ecc28622f076f4c2b0e6d7e796e9f6502194 \ + --hash=sha256:58ed19212051f49a523abb1dbe954337dc82d947fb6e5a0da60f7c8471a8476c \ + --hash=sha256:5e00316dabdaea0b2dd82d141cc66889ced0cdcbfa599e8b471cf22c620c329a \ + --hash=sha256:603ca0aba86b1349b147cab91ae970c63118a0f30444d4bc80355937c950c082 \ + --hash=sha256:6a22ad9800121b71099d0fb0a65323810a15f2e292f2ba450810a7316e128ee5 \ + --hash=sha256:8cd69c07dd875537a824deec19f978e0f2078fdda07fd5c42ac29668dda5f40f \ + --hash=sha256:93f9bf70084f97245ba10ee36575f0c3f1e7d7724d67d8e5b08e61787c320ed7 \ + --hash=sha256:9524464572e12979364b7d600abf96181d3541da11e23ddf565a32e70bd4dc0d \ + --hash=sha256:b2ef1c30440dbbcba7a5dc3e319408b59676e2e039e2ae11a8775ecf482b192f \ + --hash=sha256:b746dba803a79238e925d9046a63aa26bf86ab2a2fe74ce6b009a1c3f5c8f2ae \ + --hash=sha256:bb89ceffa6c791807d1305ceb77dbfacc5aa499891d2c55661c6459651fc39e3 \ + --hash=sha256:bd46088725ef7f58b5a1ef7ca06647ebaf0eb4baff7d1d0d177c6cc8744abd86 \ + --hash=sha256:ccb949252cb2ab3a08c02024acb77cfb179492d5701c7cbdbfd776124d4d2367 \ + --hash=sha256:d4966ef5848d820776f5f562a7d45fdd70c2f330c961d0d745b784034bd9f48d \ + --hash=sha256:e415e3f62c8d124ee16018e491a009937f8cf7ebf5eb430ffc5de21b900dad93 \ + --hash=sha256:ed2937d286e2ad0cc79a7087d3c272832865f779430e0cc2b4f3718d3159b0cb \ + --hash=sha256:f1152ac548bd5b8bcecfb0b0371f082037e47128653df2e8ba6e914d384f3c3e \ + --hash=sha256:f9f8b450ed0547e3d473fdc8612083fd08dd2120d6ac8f73828df9b7d45bb351 + # via argon2-cffi +async-timeout==4.0.2 \ + --hash=sha256:2163e1640ddb52b7a8c80d0a67a08587e5d245cc9c553a74a847056bc2976b15 \ + --hash=sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c + # via aiohttp +asynctest==0.13.0 \ + --hash=sha256:5da6118a7e6d6b54d83a8f7197769d046922a44d2a99c21382f0a6e4fadae676 \ + --hash=sha256:c27862842d15d83e6a34eb0b2866c323880eb3a75e4485b079ea11748fd77fac + # via aiohttp +attrs==23.1.0 \ + --hash=sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04 \ + --hash=sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015 + # via + # aiohttp + # jsonschema +backports-zoneinfo==0.2.1 \ + --hash=sha256:17746bd546106fa389c51dbea67c8b7c8f0d14b5526a579ca6ccf5ed72c526cf \ + --hash=sha256:1b13e654a55cd45672cb54ed12148cd33628f672548f373963b0bff67b217328 \ + --hash=sha256:1c5742112073a563c81f786e77514969acb58649bcdf6cdf0b4ed31a348d4546 \ + --hash=sha256:4a0f800587060bf8880f954dbef70de6c11bbe59c673c3d818921f042f9954a6 \ + --hash=sha256:5c144945a7752ca544b4b78c8c41544cdfaf9786f25fe5ffb10e838e19a27570 \ + --hash=sha256:7b0a64cda4145548fed9efc10322770f929b944ce5cee6c0dfe0c87bf4c0c8c9 \ + --hash=sha256:8439c030a11780786a2002261569bdf362264f605dfa4d65090b64b05c9f79a7 \ + --hash=sha256:8961c0f32cd0336fb8e8ead11a1f8cd99ec07145ec2931122faaac1c8f7fd987 \ + --hash=sha256:89a48c0d158a3cc3f654da4c2de1ceba85263fafb861b98b59040a5086259722 \ + --hash=sha256:a76b38c52400b762e48131494ba26be363491ac4f9a04c1b7e92483d169f6582 \ + --hash=sha256:da6013fd84a690242c310d77ddb8441a559e9cb3d3d59ebac9aca1a57b2e18bc \ + --hash=sha256:e55b384612d93be96506932a786bbcde5a2db7a9e6a4bb4bffe8b733f5b9036b \ + --hash=sha256:e81b76cace8eda1fca50e345242ba977f9be6ae3945af8d46326d776b4cf78d1 \ + --hash=sha256:e8236383a20872c0cdf5a62b554b27538db7fa1bbec52429d8d106effbaeca08 \ + --hash=sha256:f04e857b59d9d1ccc39ce2da1021d196e47234873820cbeaad210724b1ee28ac \ + --hash=sha256:fadbfe37f74051d024037f223b8e001611eac868b5c5b06144ef4d8b799862f2 + # via + # pytz-deprecation-shim + # tzlocal +boto3==1.26.118 \ + --hash=sha256:1ff703152553f4d5fc9774071d114dbf06ec661eb1b29b6051f6b1f9d0c24873 \ + --hash=sha256:d0ed43228952b55c9f44d1c733f74656418c39c55dbe36bc37feeef6aa583ded + # via + # -r release/requirements_buildkite.in + # anyscale +botocore==1.29.118 \ + --hash=sha256:44cb088a73b02dd716c5c5715143a64d5f10388957285246e11f3cc893eebf9d \ + --hash=sha256:b51fc5d50cbc43edaf58b3ec4fa933b82755801c453bf8908c8d3e70ae1142c1 + # via + # anyscale + # boto3 + # s3transfer +cachetools==5.3.0 \ + --hash=sha256:13dfddc7b8df938c21a940dfa6557ce6e94a2f1cdfa58eb90c805721d58f2c14 \ + --hash=sha256:429e1a1e845c008ea6c85aa35d4b98b65d6a9763eeef3e37e92728a12d1de9d4 + # via google-auth +certifi==2022.12.7 \ + --hash=sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3 \ + --hash=sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18 + # via + # anyscale + # requests +cffi==1.15.1 \ + --hash=sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5 \ + --hash=sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef \ + --hash=sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104 \ + --hash=sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426 \ + --hash=sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405 \ + --hash=sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375 \ + --hash=sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a \ + --hash=sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e \ + --hash=sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc \ + --hash=sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf \ + --hash=sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185 \ + --hash=sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497 \ + --hash=sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3 \ + --hash=sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35 \ + --hash=sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c \ + --hash=sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83 \ + --hash=sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21 \ + --hash=sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca \ + --hash=sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984 \ + --hash=sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac \ + --hash=sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd \ + --hash=sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee \ + --hash=sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a \ + --hash=sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2 \ + --hash=sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192 \ + --hash=sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7 \ + --hash=sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585 \ + --hash=sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f \ + --hash=sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e \ + --hash=sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27 \ + --hash=sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b \ + --hash=sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e \ + --hash=sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e \ + --hash=sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d \ + --hash=sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c \ + --hash=sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415 \ + --hash=sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82 \ + --hash=sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02 \ + --hash=sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314 \ + --hash=sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325 \ + --hash=sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c \ + --hash=sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3 \ + --hash=sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914 \ + --hash=sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045 \ + --hash=sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d \ + --hash=sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9 \ + --hash=sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5 \ + --hash=sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2 \ + --hash=sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c \ + --hash=sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3 \ + --hash=sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2 \ + --hash=sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8 \ + --hash=sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d \ + --hash=sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d \ + --hash=sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9 \ + --hash=sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162 \ + --hash=sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76 \ + --hash=sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4 \ + --hash=sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e \ + --hash=sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9 \ + --hash=sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6 \ + --hash=sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b \ + --hash=sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01 \ + --hash=sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0 + # via argon2-cffi-bindings +charset-normalizer==3.1.0 \ + --hash=sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6 \ + --hash=sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1 \ + --hash=sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e \ + --hash=sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373 \ + --hash=sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62 \ + --hash=sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230 \ + --hash=sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be \ + --hash=sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c \ + --hash=sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0 \ + --hash=sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448 \ + --hash=sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f \ + --hash=sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649 \ + --hash=sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d \ + --hash=sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0 \ + --hash=sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706 \ + --hash=sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a \ + --hash=sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59 \ + --hash=sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23 \ + --hash=sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5 \ + --hash=sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb \ + --hash=sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e \ + --hash=sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e \ + --hash=sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c \ + --hash=sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28 \ + --hash=sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d \ + --hash=sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41 \ + --hash=sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974 \ + --hash=sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce \ + --hash=sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f \ + --hash=sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1 \ + --hash=sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d \ + --hash=sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8 \ + --hash=sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017 \ + --hash=sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31 \ + --hash=sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7 \ + --hash=sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8 \ + --hash=sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e \ + --hash=sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14 \ + --hash=sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd \ + --hash=sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d \ + --hash=sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795 \ + --hash=sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b \ + --hash=sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b \ + --hash=sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b \ + --hash=sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203 \ + --hash=sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f \ + --hash=sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19 \ + --hash=sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1 \ + --hash=sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a \ + --hash=sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac \ + --hash=sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9 \ + --hash=sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0 \ + --hash=sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137 \ + --hash=sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f \ + --hash=sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6 \ + --hash=sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5 \ + --hash=sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909 \ + --hash=sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f \ + --hash=sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0 \ + --hash=sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324 \ + --hash=sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755 \ + --hash=sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb \ + --hash=sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854 \ + --hash=sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c \ + --hash=sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60 \ + --hash=sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84 \ + --hash=sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0 \ + --hash=sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b \ + --hash=sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1 \ + --hash=sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531 \ + --hash=sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1 \ + --hash=sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11 \ + --hash=sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326 \ + --hash=sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df \ + --hash=sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab + # via + # aiohttp + # requests +click==8.1.3 \ + --hash=sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e \ + --hash=sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48 + # via + # -r release/requirements_buildkite.in + # anyscale +colorama==0.4.6 \ + --hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \ + --hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6 + # via + # anyscale + # halo + # log-symbols +decorator==5.1.1 \ + --hash=sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330 \ + --hash=sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186 + # via retry +expiringdict==1.2.2 \ + --hash=sha256:09a5d20bc361163e6432a874edd3179676e935eb81b925eccef48d409a8a45e8 \ + --hash=sha256:300fb92a7e98f15b05cf9a856c1415b3bc4f2e132be07daa326da6414c23ee09 + # via anyscale +frozenlist==1.3.3 \ + --hash=sha256:008a054b75d77c995ea26629ab3a0c0d7281341f2fa7e1e85fa6153ae29ae99c \ + --hash=sha256:02c9ac843e3390826a265e331105efeab489ffaf4dd86384595ee8ce6d35ae7f \ + --hash=sha256:034a5c08d36649591be1cbb10e09da9f531034acfe29275fc5454a3b101ce41a \ + --hash=sha256:05cdb16d09a0832eedf770cb7bd1fe57d8cf4eaf5aced29c4e41e3f20b30a784 \ + --hash=sha256:0693c609e9742c66ba4870bcee1ad5ff35462d5ffec18710b4ac89337ff16e27 \ + --hash=sha256:0771aed7f596c7d73444c847a1c16288937ef988dc04fb9f7be4b2aa91db609d \ + --hash=sha256:0af2e7c87d35b38732e810befb9d797a99279cbb85374d42ea61c1e9d23094b3 \ + --hash=sha256:14143ae966a6229350021384870458e4777d1eae4c28d1a7aa47f24d030e6678 \ + --hash=sha256:180c00c66bde6146a860cbb81b54ee0df350d2daf13ca85b275123bbf85de18a \ + --hash=sha256:1841e200fdafc3d51f974d9d377c079a0694a8f06de2e67b48150328d66d5483 \ + --hash=sha256:23d16d9f477bb55b6154654e0e74557040575d9d19fe78a161bd33d7d76808e8 \ + --hash=sha256:2b07ae0c1edaa0a36339ec6cce700f51b14a3fc6545fdd32930d2c83917332cf \ + --hash=sha256:2c926450857408e42f0bbc295e84395722ce74bae69a3b2aa2a65fe22cb14b99 \ + --hash=sha256:2e24900aa13212e75e5b366cb9065e78bbf3893d4baab6052d1aca10d46d944c \ + --hash=sha256:303e04d422e9b911a09ad499b0368dc551e8c3cd15293c99160c7f1f07b59a48 \ + --hash=sha256:352bd4c8c72d508778cf05ab491f6ef36149f4d0cb3c56b1b4302852255d05d5 \ + --hash=sha256:3843f84a6c465a36559161e6c59dce2f2ac10943040c2fd021cfb70d58c4ad56 \ + --hash=sha256:394c9c242113bfb4b9aa36e2b80a05ffa163a30691c7b5a29eba82e937895d5e \ + --hash=sha256:3bbdf44855ed8f0fbcd102ef05ec3012d6a4fd7c7562403f76ce6a52aeffb2b1 \ + --hash=sha256:40de71985e9042ca00b7953c4f41eabc3dc514a2d1ff534027f091bc74416401 \ + --hash=sha256:41fe21dc74ad3a779c3d73a2786bdf622ea81234bdd4faf90b8b03cad0c2c0b4 \ + --hash=sha256:47df36a9fe24054b950bbc2db630d508cca3aa27ed0566c0baf661225e52c18e \ + --hash=sha256:4ea42116ceb6bb16dbb7d526e242cb6747b08b7710d9782aa3d6732bd8d27649 \ + --hash=sha256:58bcc55721e8a90b88332d6cd441261ebb22342e238296bb330968952fbb3a6a \ + --hash=sha256:5c11e43016b9024240212d2a65043b70ed8dfd3b52678a1271972702d990ac6d \ + --hash=sha256:5cf820485f1b4c91e0417ea0afd41ce5cf5965011b3c22c400f6d144296ccbc0 \ + --hash=sha256:5d8860749e813a6f65bad8285a0520607c9500caa23fea6ee407e63debcdbef6 \ + --hash=sha256:6327eb8e419f7d9c38f333cde41b9ae348bec26d840927332f17e887a8dcb70d \ + --hash=sha256:65a5e4d3aa679610ac6e3569e865425b23b372277f89b5ef06cf2cdaf1ebf22b \ + --hash=sha256:66080ec69883597e4d026f2f71a231a1ee9887835902dbe6b6467d5a89216cf6 \ + --hash=sha256:783263a4eaad7c49983fe4b2e7b53fa9770c136c270d2d4bbb6d2192bf4d9caf \ + --hash=sha256:7f44e24fa70f6fbc74aeec3e971f60a14dde85da364aa87f15d1be94ae75aeef \ + --hash=sha256:7fdfc24dcfce5b48109867c13b4cb15e4660e7bd7661741a391f821f23dfdca7 \ + --hash=sha256:810860bb4bdce7557bc0febb84bbd88198b9dbc2022d8eebe5b3590b2ad6c842 \ + --hash=sha256:841ea19b43d438a80b4de62ac6ab21cfe6827bb8a9dc62b896acc88eaf9cecba \ + --hash=sha256:84610c1502b2461255b4c9b7d5e9c48052601a8957cd0aea6ec7a7a1e1fb9420 \ + --hash=sha256:899c5e1928eec13fd6f6d8dc51be23f0d09c5281e40d9cf4273d188d9feeaf9b \ + --hash=sha256:8bae29d60768bfa8fb92244b74502b18fae55a80eac13c88eb0b496d4268fd2d \ + --hash=sha256:8df3de3a9ab8325f94f646609a66cbeeede263910c5c0de0101079ad541af332 \ + --hash=sha256:8fa3c6e3305aa1146b59a09b32b2e04074945ffcfb2f0931836d103a2c38f936 \ + --hash=sha256:924620eef691990dfb56dc4709f280f40baee568c794b5c1885800c3ecc69816 \ + --hash=sha256:9309869032abb23d196cb4e4db574232abe8b8be1339026f489eeb34a4acfd91 \ + --hash=sha256:9545a33965d0d377b0bc823dcabf26980e77f1b6a7caa368a365a9497fb09420 \ + --hash=sha256:9ac5995f2b408017b0be26d4a1d7c61bce106ff3d9e3324374d66b5964325448 \ + --hash=sha256:9bbbcedd75acdfecf2159663b87f1bb5cfc80e7cd99f7ddd9d66eb98b14a8411 \ + --hash=sha256:a4ae8135b11652b08a8baf07631d3ebfe65a4c87909dbef5fa0cdde440444ee4 \ + --hash=sha256:a6394d7dadd3cfe3f4b3b186e54d5d8504d44f2d58dcc89d693698e8b7132b32 \ + --hash=sha256:a97b4fe50b5890d36300820abd305694cb865ddb7885049587a5678215782a6b \ + --hash=sha256:ae4dc05c465a08a866b7a1baf360747078b362e6a6dbeb0c57f234db0ef88ae0 \ + --hash=sha256:b1c63e8d377d039ac769cd0926558bb7068a1f7abb0f003e3717ee003ad85530 \ + --hash=sha256:b1e2c1185858d7e10ff045c496bbf90ae752c28b365fef2c09cf0fa309291669 \ + --hash=sha256:b4395e2f8d83fbe0c627b2b696acce67868793d7d9750e90e39592b3626691b7 \ + --hash=sha256:b756072364347cb6aa5b60f9bc18e94b2f79632de3b0190253ad770c5df17db1 \ + --hash=sha256:ba64dc2b3b7b158c6660d49cdb1d872d1d0bf4e42043ad8d5006099479a194e5 \ + --hash=sha256:bed331fe18f58d844d39ceb398b77d6ac0b010d571cba8267c2e7165806b00ce \ + --hash=sha256:c188512b43542b1e91cadc3c6c915a82a5eb95929134faf7fd109f14f9892ce4 \ + --hash=sha256:c21b9aa40e08e4f63a2f92ff3748e6b6c84d717d033c7b3438dd3123ee18f70e \ + --hash=sha256:ca713d4af15bae6e5d79b15c10c8522859a9a89d3b361a50b817c98c2fb402a2 \ + --hash=sha256:cd4210baef299717db0a600d7a3cac81d46ef0e007f88c9335db79f8979c0d3d \ + --hash=sha256:cfe33efc9cb900a4c46f91a5ceba26d6df370ffddd9ca386eb1d4f0ad97b9ea9 \ + --hash=sha256:d5cd3ab21acbdb414bb6c31958d7b06b85eeb40f66463c264a9b343a4e238642 \ + --hash=sha256:dfbac4c2dfcc082fcf8d942d1e49b6aa0766c19d3358bd86e2000bf0fa4a9cf0 \ + --hash=sha256:e235688f42b36be2b6b06fc37ac2126a73b75fb8d6bc66dd632aa35286238703 \ + --hash=sha256:eb82dbba47a8318e75f679690190c10a5e1f447fbf9df41cbc4c3afd726d88cb \ + --hash=sha256:ebb86518203e12e96af765ee89034a1dbb0c3c65052d1b0c19bbbd6af8a145e1 \ + --hash=sha256:ee78feb9d293c323b59a6f2dd441b63339a30edf35abcb51187d2fc26e696d13 \ + --hash=sha256:eedab4c310c0299961ac285591acd53dc6723a1ebd90a57207c71f6e0c2153ab \ + --hash=sha256:efa568b885bca461f7c7b9e032655c0c143d305bf01c30caf6db2854a4532b38 \ + --hash=sha256:efce6ae830831ab6a22b9b4091d411698145cb9b8fc869e1397ccf4b4b6455cb \ + --hash=sha256:f163d2fd041c630fed01bc48d28c3ed4a3b003c00acd396900e11ee5316b56bb \ + --hash=sha256:f20380df709d91525e4bee04746ba612a4df0972c1b8f8e1e8af997e678c7b81 \ + --hash=sha256:f30f1928162e189091cf4d9da2eac617bfe78ef907a761614ff577ef4edfb3c8 \ + --hash=sha256:f470c92737afa7d4c3aacc001e335062d582053d4dbe73cda126f2d7031068dd \ + --hash=sha256:ff8bf625fe85e119553b5383ba0fb6aa3d0ec2ae980295aaefa552374926b3f4 + # via + # aiohttp + # aiosignal +gitdb==4.0.10 \ + --hash=sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a \ + --hash=sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7 + # via gitpython +gitpython==3.1.31 \ + --hash=sha256:8ce3bcf69adfdf7c7d503e78fd3b1c492af782d58893b650adb2ac8912ddd573 \ + --hash=sha256:f04893614f6aa713a60cbbe1e6a97403ef633103cdd0ef5eb6efe0deb98dbe8d + # via anyscale +google-api-core==2.11.0 \ + --hash=sha256:4b9bb5d5a380a0befa0573b302651b8a9a89262c1730e37bf423cec511804c22 \ + --hash=sha256:ce222e27b0de0d7bc63eb043b956996d6dccab14cc3b690aaea91c9cc99dc16e + # via + # google-cloud-core + # google-cloud-storage +google-auth==2.17.3 \ + --hash=sha256:ce311e2bc58b130fddf316df57c9b3943c2a7b4f6ec31de9663a9333e4064efc \ + --hash=sha256:f586b274d3eb7bd932ea424b1c702a30e0393a2e2bc4ca3eae8263ffd8be229f + # via + # anyscale + # google-api-core + # google-cloud-core + # google-cloud-storage +google-cloud-core==2.3.2 \ + --hash=sha256:8417acf6466be2fa85123441696c4badda48db314c607cf1e5d543fa8bdc22fe \ + --hash=sha256:b9529ee7047fd8d4bf4a2182de619154240df17fbe60ead399078c1ae152af9a + # via google-cloud-storage +google-cloud-storage==2.8.0 \ + --hash=sha256:248e210c13bc109909160248af546a91cb2dabaf3d7ebbf04def9dd49f02dbb6 \ + --hash=sha256:4388da1ff5bda6d729f26dbcaf1bfa020a2a52a7b91f0a8123edbda51660802c + # via -r release/requirements_buildkite.in +google-crc32c==1.5.0 \ + --hash=sha256:024894d9d3cfbc5943f8f230e23950cd4906b2fe004c72e29b209420a1e6b05a \ + --hash=sha256:02c65b9817512edc6a4ae7c7e987fea799d2e0ee40c53ec573a692bee24de876 \ + --hash=sha256:02ebb8bf46c13e36998aeaad1de9b48f4caf545e91d14041270d9dca767b780c \ + --hash=sha256:07eb3c611ce363c51a933bf6bd7f8e3878a51d124acfc89452a75120bc436289 \ + --hash=sha256:1034d91442ead5a95b5aaef90dbfaca8633b0247d1e41621d1e9f9db88c36298 \ + --hash=sha256:116a7c3c616dd14a3de8c64a965828b197e5f2d121fedd2f8c5585c547e87b02 \ + --hash=sha256:19e0a019d2c4dcc5e598cd4a4bc7b008546b0358bd322537c74ad47a5386884f \ + --hash=sha256:1c7abdac90433b09bad6c43a43af253e688c9cfc1c86d332aed13f9a7c7f65e2 \ + --hash=sha256:1e986b206dae4476f41bcec1faa057851f3889503a70e1bdb2378d406223994a \ + --hash=sha256:272d3892a1e1a2dbc39cc5cde96834c236d5327e2122d3aaa19f6614531bb6eb \ + --hash=sha256:278d2ed7c16cfc075c91378c4f47924c0625f5fc84b2d50d921b18b7975bd210 \ + --hash=sha256:2ad40e31093a4af319dadf503b2467ccdc8f67c72e4bcba97f8c10cb078207b5 \ + --hash=sha256:2e920d506ec85eb4ba50cd4228c2bec05642894d4c73c59b3a2fe20346bd00ee \ + --hash=sha256:3359fc442a743e870f4588fcf5dcbc1bf929df1fad8fb9905cd94e5edb02e84c \ + --hash=sha256:37933ec6e693e51a5b07505bd05de57eee12f3e8c32b07da7e73669398e6630a \ + --hash=sha256:398af5e3ba9cf768787eef45c803ff9614cc3e22a5b2f7d7ae116df8b11e3314 \ + --hash=sha256:3b747a674c20a67343cb61d43fdd9207ce5da6a99f629c6e2541aa0e89215bcd \ + --hash=sha256:461665ff58895f508e2866824a47bdee72497b091c730071f2b7575d5762ab65 \ + --hash=sha256:4c6fdd4fccbec90cc8a01fc00773fcd5fa28db683c116ee3cb35cd5da9ef6c37 \ + --hash=sha256:5829b792bf5822fd0a6f6eb34c5f81dd074f01d570ed7f36aa101d6fc7a0a6e4 \ + --hash=sha256:596d1f98fc70232fcb6590c439f43b350cb762fb5d61ce7b0e9db4539654cc13 \ + --hash=sha256:5ae44e10a8e3407dbe138984f21e536583f2bba1be9491239f942c2464ac0894 \ + --hash=sha256:635f5d4dd18758a1fbd1049a8e8d2fee4ffed124462d837d1a02a0e009c3ab31 \ + --hash=sha256:64e52e2b3970bd891309c113b54cf0e4384762c934d5ae56e283f9a0afcd953e \ + --hash=sha256:66741ef4ee08ea0b2cc3c86916ab66b6aef03768525627fd6a1b34968b4e3709 \ + --hash=sha256:67b741654b851abafb7bc625b6d1cdd520a379074e64b6a128e3b688c3c04740 \ + --hash=sha256:6ac08d24c1f16bd2bf5eca8eaf8304812f44af5cfe5062006ec676e7e1d50afc \ + --hash=sha256:6f998db4e71b645350b9ac28a2167e6632c239963ca9da411523bb439c5c514d \ + --hash=sha256:72218785ce41b9cfd2fc1d6a017dc1ff7acfc4c17d01053265c41a2c0cc39b8c \ + --hash=sha256:74dea7751d98034887dbd821b7aae3e1d36eda111d6ca36c206c44478035709c \ + --hash=sha256:759ce4851a4bb15ecabae28f4d2e18983c244eddd767f560165563bf9aefbc8d \ + --hash=sha256:77e2fd3057c9d78e225fa0a2160f96b64a824de17840351b26825b0848022906 \ + --hash=sha256:7c074fece789b5034b9b1404a1f8208fc2d4c6ce9decdd16e8220c5a793e6f61 \ + --hash=sha256:7c42c70cd1d362284289c6273adda4c6af8039a8ae12dc451dcd61cdabb8ab57 \ + --hash=sha256:7f57f14606cd1dd0f0de396e1e53824c371e9544a822648cd76c034d209b559c \ + --hash=sha256:83c681c526a3439b5cf94f7420471705bbf96262f49a6fe546a6db5f687a3d4a \ + --hash=sha256:8485b340a6a9e76c62a7dce3c98e5f102c9219f4cfbf896a00cf48caf078d438 \ + --hash=sha256:84e6e8cd997930fc66d5bb4fde61e2b62ba19d62b7abd7a69920406f9ecca946 \ + --hash=sha256:89284716bc6a5a415d4eaa11b1726d2d60a0cd12aadf5439828353662ede9dd7 \ + --hash=sha256:8b87e1a59c38f275c0e3676fc2ab6d59eccecfd460be267ac360cc31f7bcde96 \ + --hash=sha256:8f24ed114432de109aa9fd317278518a5af2d31ac2ea6b952b2f7782b43da091 \ + --hash=sha256:98cb4d057f285bd80d8778ebc4fde6b4d509ac3f331758fb1528b733215443ae \ + --hash=sha256:998679bf62b7fb599d2878aa3ed06b9ce688b8974893e7223c60db155f26bd8d \ + --hash=sha256:9ba053c5f50430a3fcfd36f75aff9caeba0440b2d076afdb79a318d6ca245f88 \ + --hash=sha256:9c99616c853bb585301df6de07ca2cadad344fd1ada6d62bb30aec05219c45d2 \ + --hash=sha256:a1fd716e7a01f8e717490fbe2e431d2905ab8aa598b9b12f8d10abebb36b04dd \ + --hash=sha256:a2355cba1f4ad8b6988a4ca3feed5bff33f6af2d7f134852cf279c2aebfde541 \ + --hash=sha256:b1f8133c9a275df5613a451e73f36c2aea4fe13c5c8997e22cf355ebd7bd0728 \ + --hash=sha256:b8667b48e7a7ef66afba2c81e1094ef526388d35b873966d8a9a447974ed9178 \ + --hash=sha256:ba1eb1843304b1e5537e1fca632fa894d6f6deca8d6389636ee5b4797affb968 \ + --hash=sha256:be82c3c8cfb15b30f36768797a640e800513793d6ae1724aaaafe5bf86f8f346 \ + --hash=sha256:c02ec1c5856179f171e032a31d6f8bf84e5a75c45c33b2e20a3de353b266ebd8 \ + --hash=sha256:c672d99a345849301784604bfeaeba4db0c7aae50b95be04dd651fd2a7310b93 \ + --hash=sha256:c6c777a480337ac14f38564ac88ae82d4cd238bf293f0a22295b66eb89ffced7 \ + --hash=sha256:cae0274952c079886567f3f4f685bcaf5708f0a23a5f5216fdab71f81a6c0273 \ + --hash=sha256:cd67cf24a553339d5062eff51013780a00d6f97a39ca062781d06b3a73b15462 \ + --hash=sha256:d3515f198eaa2f0ed49f8819d5732d70698c3fa37384146079b3799b97667a94 \ + --hash=sha256:d5280312b9af0976231f9e317c20e4a61cd2f9629b7bfea6a693d1878a264ebd \ + --hash=sha256:de06adc872bcd8c2a4e0dc51250e9e65ef2ca91be023b9d13ebd67c2ba552e1e \ + --hash=sha256:e1674e4307fa3024fc897ca774e9c7562c957af85df55efe2988ed9056dc4e57 \ + --hash=sha256:e2096eddb4e7c7bdae4bd69ad364e55e07b8316653234a56552d9c988bd2d61b \ + --hash=sha256:e560628513ed34759456a416bf86b54b2476c59144a9138165c9a1575801d0d9 \ + --hash=sha256:edfedb64740750e1a3b16152620220f51d58ff1b4abceb339ca92e934775c27a \ + --hash=sha256:f13cae8cc389a440def0c8c52057f37359014ccbc9dc1f0827936bcd367c6100 \ + --hash=sha256:f314013e7dcd5cf45ab1945d92e713eec788166262ae8deb2cfacd53def27325 \ + --hash=sha256:f583edb943cf2e09c60441b910d6a20b4d9d626c75a36c8fcac01a6c96c01183 \ + --hash=sha256:fd8536e902db7e365f49e7d9029283403974ccf29b13fc7028b97e2295b33556 \ + --hash=sha256:fe70e325aa68fa4b5edf7d1a4b6f691eb04bbccac0ace68e34820d283b5f80d4 + # via google-resumable-media +google-resumable-media==2.4.1 \ + --hash=sha256:15b8a2e75df42dc6502d1306db0bce2647ba6013f9cd03b6e17368c0886ee90a \ + --hash=sha256:831e86fd78d302c1a034730a0c6e5369dd11d37bad73fa69ca8998460d5bae8d + # via google-cloud-storage +googleapis-common-protos==1.59.0 \ + --hash=sha256:4168fcb568a826a52f23510412da405abd93f4d23ba544bb68d943b14ba3cb44 \ + --hash=sha256:b287dc48449d1d41af0c69f4ea26242b5ae4c3d7249a38b0984c86a4caffff1f + # via google-api-core +halo==0.0.31 \ + --hash=sha256:5350488fb7d2aa7c31a1344120cee67a872901ce8858f60da7946cef96c208ab \ + --hash=sha256:7b67a3521ee91d53b7152d4ee3452811e1d2a6321975137762eb3d70063cc9d6 + # via anyscale +httplib2==0.22.0 \ + --hash=sha256:14ae0a53c1ba8f3d37e9e27cf37eabb0fb9980f435ba405d546948b009dd64dc \ + --hash=sha256:d7a10bc5ef5ab08322488bde8c726eeee5c8618723fdb399597ec58f3d82df81 + # via oauth2client +humanize==4.6.0 \ + --hash=sha256:401201aca462749773f02920139f302450cb548b70489b9b4b92be39fe3c3c50 \ + --hash=sha256:5f1f22bc65911eb1a6ffe7659bd6598e33dcfeeb904eb16ee1e705a09bf75916 + # via anyscale +idna==3.4 \ + --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ + --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2 + # via + # requests + # yarl +importlib-metadata==6.5.1 \ + --hash=sha256:b986d197242e4e9960a12743a6ec5a9fc8b3d7054612d90489452170785c98a5 \ + --hash=sha256:cd4687a8df60d9aefd424ed9364a8f29def203a9482ec8eb8e8070ef06075f89 + # via + # attrs + # click + # humanize + # jsonschema +importlib-resources==5.12.0 \ + --hash=sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6 \ + --hash=sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a + # via jsonschema +jinja2==3.1.2 \ + --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \ + --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 + # via -r release/requirements_buildkite.in +jmespath==1.0.1 \ + --hash=sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980 \ + --hash=sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe + # via + # boto3 + # botocore +jsonpatch==1.32 \ + --hash=sha256:26ac385719ac9f54df8a2f0827bb8253aa3ea8ab7b3368457bcdb8c14595a397 \ + --hash=sha256:b6ddfe6c3db30d81a96aaeceb6baf916094ffa23d7dd5fa2c13e13f8b6e600c2 + # via anyscale +jsonpointer==2.3 \ + --hash=sha256:51801e558539b4e9cd268638c078c6c5746c9ac96bc38152d443400e4f3793e9 \ + --hash=sha256:97cba51526c829282218feb99dab1b1e6bdf8efd1c43dc9d57be093c0d69c99a + # via jsonpatch +jsonschema==4.17.3 \ + --hash=sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d \ + --hash=sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6 + # via anyscale +log-symbols==0.0.14 \ + --hash=sha256:4952106ff8b605ab7d5081dd2c7e6ca7374584eff7086f499c06edd1ce56dcca \ + --hash=sha256:cf0bbc6fe1a8e53f0d174a716bc625c4f87043cc21eb55dd8a740cfe22680556 + # via halo +markdown-it-py==2.2.0 \ + --hash=sha256:5a35f8d1870171d9acc47b99612dc146129b631baf04970128b568f190d0cc30 \ + --hash=sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1 + # via rich +markupsafe==2.1.2 \ + --hash=sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed \ + --hash=sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc \ + --hash=sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2 \ + --hash=sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460 \ + --hash=sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7 \ + --hash=sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0 \ + --hash=sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1 \ + --hash=sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa \ + --hash=sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03 \ + --hash=sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323 \ + --hash=sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65 \ + --hash=sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013 \ + --hash=sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036 \ + --hash=sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f \ + --hash=sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4 \ + --hash=sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419 \ + --hash=sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2 \ + --hash=sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619 \ + --hash=sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a \ + --hash=sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a \ + --hash=sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd \ + --hash=sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7 \ + --hash=sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666 \ + --hash=sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65 \ + --hash=sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859 \ + --hash=sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625 \ + --hash=sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff \ + --hash=sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156 \ + --hash=sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd \ + --hash=sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba \ + --hash=sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f \ + --hash=sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1 \ + --hash=sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094 \ + --hash=sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a \ + --hash=sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513 \ + --hash=sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed \ + --hash=sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d \ + --hash=sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3 \ + --hash=sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147 \ + --hash=sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c \ + --hash=sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603 \ + --hash=sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601 \ + --hash=sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a \ + --hash=sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1 \ + --hash=sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d \ + --hash=sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3 \ + --hash=sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54 \ + --hash=sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2 \ + --hash=sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6 \ + --hash=sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58 + # via jinja2 +mdurl==0.1.2 \ + --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ + --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba + # via markdown-it-py +multidict==6.0.4 \ + --hash=sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9 \ + --hash=sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8 \ + --hash=sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03 \ + --hash=sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710 \ + --hash=sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161 \ + --hash=sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664 \ + --hash=sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569 \ + --hash=sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067 \ + --hash=sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313 \ + --hash=sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706 \ + --hash=sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2 \ + --hash=sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636 \ + --hash=sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49 \ + --hash=sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93 \ + --hash=sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603 \ + --hash=sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0 \ + --hash=sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60 \ + --hash=sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4 \ + --hash=sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e \ + --hash=sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1 \ + --hash=sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60 \ + --hash=sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951 \ + --hash=sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc \ + --hash=sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe \ + --hash=sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95 \ + --hash=sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d \ + --hash=sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8 \ + --hash=sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed \ + --hash=sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2 \ + --hash=sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775 \ + --hash=sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87 \ + --hash=sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c \ + --hash=sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2 \ + --hash=sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98 \ + --hash=sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3 \ + --hash=sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe \ + --hash=sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78 \ + --hash=sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660 \ + --hash=sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176 \ + --hash=sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e \ + --hash=sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988 \ + --hash=sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c \ + --hash=sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c \ + --hash=sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0 \ + --hash=sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449 \ + --hash=sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f \ + --hash=sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde \ + --hash=sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5 \ + --hash=sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d \ + --hash=sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac \ + --hash=sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a \ + --hash=sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9 \ + --hash=sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca \ + --hash=sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11 \ + --hash=sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35 \ + --hash=sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063 \ + --hash=sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b \ + --hash=sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982 \ + --hash=sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258 \ + --hash=sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1 \ + --hash=sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52 \ + --hash=sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480 \ + --hash=sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7 \ + --hash=sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461 \ + --hash=sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d \ + --hash=sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc \ + --hash=sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779 \ + --hash=sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a \ + --hash=sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547 \ + --hash=sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0 \ + --hash=sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171 \ + --hash=sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf \ + --hash=sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d \ + --hash=sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba + # via + # aiohttp + # yarl +oauth2client==4.1.3 \ + --hash=sha256:b8a81cc5d60e2d364f0b1b98f958dbd472887acaf1a5b05e21c28c31a2d6d3ac \ + --hash=sha256:d486741e451287f69568a4d26d70d9acd73a2bbfa275746c535b4209891cccc6 + # via anyscale +packaging==23.1 \ + --hash=sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61 \ + --hash=sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f + # via anyscale +pathspec==0.11.1 \ + --hash=sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687 \ + --hash=sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293 + # via anyscale +pkgutil-resolve-name==1.3.10 \ + --hash=sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174 \ + --hash=sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e + # via jsonschema +protobuf==4.22.3 \ + --hash=sha256:13233ee2b9d3bd9a5f216c1fa2c321cd564b93d8f2e4f521a85b585447747997 \ + --hash=sha256:23452f2fdea754a8251d0fc88c0317735ae47217e0d27bf330a30eec2848811a \ + --hash=sha256:52f0a78141078077cfe15fe333ac3e3a077420b9a3f5d1bf9b5fe9d286b4d881 \ + --hash=sha256:70659847ee57a5262a65954538088a1d72dfc3e9882695cab9f0c54ffe71663b \ + --hash=sha256:7760730063329d42a9d4c4573b804289b738d4931e363ffbe684716b796bde51 \ + --hash=sha256:7cf56e31907c532e460bb62010a513408e6cdf5b03fb2611e4b67ed398ad046d \ + --hash=sha256:8b54f56d13ae4a3ec140076c9d937221f887c8f64954673d46f63751209e839a \ + --hash=sha256:d14fc1a41d1a1909998e8aff7e80d2a7ae14772c4a70e4bf7db8a36690b54425 \ + --hash=sha256:d4b66266965598ff4c291416be429cef7989d8fae88b55b62095a2331511b3fa \ + --hash=sha256:e0e630d8e6a79f48c557cd1835865b593d0547dce221c66ed1b827de59c66c97 \ + --hash=sha256:ecae944c6c2ce50dda6bf76ef5496196aeb1b85acb95df5843cd812615ec4b61 \ + --hash=sha256:f08aa300b67f1c012100d8eb62d47129e53d1150f4469fd78a29fa3cb68c66f2 \ + --hash=sha256:f2f4710543abec186aee332d6852ef5ae7ce2e9e807a3da570f36de5a732d88e + # via + # -r release/requirements_buildkite.in + # google-api-core + # googleapis-common-protos +py==1.11.0 \ + --hash=sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719 \ + --hash=sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378 + # via retry +pyasn1==0.5.0 \ + --hash=sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57 \ + --hash=sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde + # via + # oauth2client + # pyasn1-modules + # rsa +pyasn1-modules==0.3.0 \ + --hash=sha256:5bd01446b736eb9d31512a30d46c1ac3395d676c6f3cafa4c03eb54b9925631c \ + --hash=sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d + # via + # google-auth + # oauth2client +pycparser==2.21 \ + --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \ + --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206 + # via cffi +pydantic==1.9.2 \ + --hash=sha256:1061c6ee6204f4f5a27133126854948e3b3d51fcc16ead2e5d04378c199b2f44 \ + --hash=sha256:19b5686387ea0d1ea52ecc4cffb71abb21702c5e5b2ac626fd4dbaa0834aa49d \ + --hash=sha256:2bd446bdb7755c3a94e56d7bdfd3ee92396070efa8ef3a34fab9579fe6aa1d84 \ + --hash=sha256:328558c9f2eed77bd8fffad3cef39dbbe3edc7044517f4625a769d45d4cf7555 \ + --hash=sha256:32e0b4fb13ad4db4058a7c3c80e2569adbd810c25e6ca3bbd8b2a9cc2cc871d7 \ + --hash=sha256:3ee0d69b2a5b341fc7927e92cae7ddcfd95e624dfc4870b32a85568bd65e6131 \ + --hash=sha256:4aafd4e55e8ad5bd1b19572ea2df546ccace7945853832bb99422a79c70ce9b8 \ + --hash=sha256:4b3946f87e5cef3ba2e7bd3a4eb5a20385fe36521d6cc1ebf3c08a6697c6cfb3 \ + --hash=sha256:4de71c718c9756d679420c69f216776c2e977459f77e8f679a4a961dc7304a56 \ + --hash=sha256:5565a49effe38d51882cb7bac18bda013cdb34d80ac336428e8908f0b72499b0 \ + --hash=sha256:5803ad846cdd1ed0d97eb00292b870c29c1f03732a010e66908ff48a762f20e4 \ + --hash=sha256:5da164119602212a3fe7e3bc08911a89db4710ae51444b4224c2382fd09ad453 \ + --hash=sha256:615661bfc37e82ac677543704437ff737418e4ea04bef9cf11c6d27346606044 \ + --hash=sha256:78a4d6bdfd116a559aeec9a4cfe77dda62acc6233f8b56a716edad2651023e5e \ + --hash=sha256:7d0f183b305629765910eaad707800d2f47c6ac5bcfb8c6397abdc30b69eeb15 \ + --hash=sha256:7ead3cd020d526f75b4188e0a8d71c0dbbe1b4b6b5dc0ea775a93aca16256aeb \ + --hash=sha256:84d76ecc908d917f4684b354a39fd885d69dd0491be175f3465fe4b59811c001 \ + --hash=sha256:8cb0bc509bfb71305d7a59d00163d5f9fc4530f0881ea32c74ff4f74c85f3d3d \ + --hash=sha256:91089b2e281713f3893cd01d8e576771cd5bfdfbff5d0ed95969f47ef6d676c3 \ + --hash=sha256:9c9e04a6cdb7a363d7cb3ccf0efea51e0abb48e180c0d31dca8d247967d85c6e \ + --hash=sha256:a8c5360a0297a713b4123608a7909e6869e1b56d0e96eb0d792c27585d40757f \ + --hash=sha256:afacf6d2a41ed91fc631bade88b1d319c51ab5418870802cedb590b709c5ae3c \ + --hash=sha256:b34ba24f3e2d0b39b43f0ca62008f7ba962cff51efa56e64ee25c4af6eed987b \ + --hash=sha256:bd67cb2c2d9602ad159389c29e4ca964b86fa2f35c2faef54c3eb28b4efd36c8 \ + --hash=sha256:c0f5e142ef8217019e3eef6ae1b6b55f09a7a15972958d44fbd228214cede567 \ + --hash=sha256:cdb4272678db803ddf94caa4f94f8672e9a46bae4a44f167095e4d06fec12979 \ + --hash=sha256:d70916235d478404a3fa8c997b003b5f33aeac4686ac1baa767234a0f8ac2326 \ + --hash=sha256:d8ce3fb0841763a89322ea0432f1f59a2d3feae07a63ea2c958b2315e1ae8adb \ + --hash=sha256:e0b214e57623a535936005797567231a12d0da0c29711eb3514bc2b3cd008d0f \ + --hash=sha256:e631c70c9280e3129f071635b81207cad85e6c08e253539467e4ead0e5b219aa \ + --hash=sha256:e78578f0c7481c850d1c969aca9a65405887003484d24f6110458fb02cca7747 \ + --hash=sha256:f0ca86b525264daa5f6b192f216a0d1e860b7383e3da1c65a1908f9c02f42801 \ + --hash=sha256:f1a68f4f65a9ee64b6ccccb5bf7e17db07caebd2730109cb8a95863cfa9c4e55 \ + --hash=sha256:fafe841be1103f340a24977f61dee76172e4ae5f647ab9e7fd1e1fca51524f08 \ + --hash=sha256:ff68fc85355532ea77559ede81f35fff79a6a5543477e168ab3a381887caea76 + # via + # -r release/requirements_buildkite.in + # anyscale +pygments==2.15.1 \ + --hash=sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c \ + --hash=sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1 + # via rich +pyparsing==3.0.9 \ + --hash=sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb \ + --hash=sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc + # via httplib2 +pyrsistent==0.19.3 \ + --hash=sha256:016ad1afadf318eb7911baa24b049909f7f3bb2c5b1ed7b6a8f21db21ea3faa8 \ + --hash=sha256:1a2994773706bbb4995c31a97bc94f1418314923bd1048c6d964837040376440 \ + --hash=sha256:20460ac0ea439a3e79caa1dbd560344b64ed75e85d8703943e0b66c2a6150e4a \ + --hash=sha256:3311cb4237a341aa52ab8448c27e3a9931e2ee09561ad150ba94e4cfd3fc888c \ + --hash=sha256:3a8cb235fa6d3fd7aae6a4f1429bbb1fec1577d978098da1252f0489937786f3 \ + --hash=sha256:3ab2204234c0ecd8b9368dbd6a53e83c3d4f3cab10ecaf6d0e772f456c442393 \ + --hash=sha256:42ac0b2f44607eb92ae88609eda931a4f0dfa03038c44c772e07f43e738bcac9 \ + --hash=sha256:49c32f216c17148695ca0e02a5c521e28a4ee6c5089f97e34fe24163113722da \ + --hash=sha256:4b774f9288dda8d425adb6544e5903f1fb6c273ab3128a355c6b972b7df39dcf \ + --hash=sha256:4c18264cb84b5e68e7085a43723f9e4c1fd1d935ab240ce02c0324a8e01ccb64 \ + --hash=sha256:5a474fb80f5e0d6c9394d8db0fc19e90fa540b82ee52dba7d246a7791712f74a \ + --hash=sha256:64220c429e42a7150f4bfd280f6f4bb2850f95956bde93c6fda1b70507af6ef3 \ + --hash=sha256:878433581fc23e906d947a6814336eee031a00e6defba224234169ae3d3d6a98 \ + --hash=sha256:99abb85579e2165bd8522f0c0138864da97847875ecbd45f3e7e2af569bfc6f2 \ + --hash=sha256:a2471f3f8693101975b1ff85ffd19bb7ca7dd7c38f8a81701f67d6b4f97b87d8 \ + --hash=sha256:aeda827381f5e5d65cced3024126529ddc4289d944f75e090572c77ceb19adbf \ + --hash=sha256:b735e538f74ec31378f5a1e3886a26d2ca6351106b4dfde376a26fc32a044edc \ + --hash=sha256:c147257a92374fde8498491f53ffa8f4822cd70c0d85037e09028e478cababb7 \ + --hash=sha256:c4db1bd596fefd66b296a3d5d943c94f4fac5bcd13e99bffe2ba6a759d959a28 \ + --hash=sha256:c74bed51f9b41c48366a286395c67f4e894374306b197e62810e0fdaf2364da2 \ + --hash=sha256:c9bb60a40a0ab9aba40a59f68214eed5a29c6274c83b2cc206a359c4a89fa41b \ + --hash=sha256:cc5d149f31706762c1f8bda2e8c4f8fead6e80312e3692619a75301d3dbb819a \ + --hash=sha256:ccf0d6bd208f8111179f0c26fdf84ed7c3891982f2edaeae7422575f47e66b64 \ + --hash=sha256:e42296a09e83028b3476f7073fcb69ffebac0e66dbbfd1bd847d61f74db30f19 \ + --hash=sha256:e8f2b814a3dc6225964fa03d8582c6e0b6650d68a232df41e3cc1b66a5d2f8d1 \ + --hash=sha256:f0774bf48631f3a20471dd7c5989657b639fd2d285b861237ea9e82c36a415a9 \ + --hash=sha256:f0e7c4b2f77593871e918be000b96c8107da48444d57005b6a6bc61fb4331b2c + # via jsonschema +python-dateutil==2.8.2 \ + --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ + --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 + # via + # anyscale + # botocore +pytz-deprecation-shim==0.1.0.post0 \ + --hash=sha256:8314c9692a636c8eb3bda879b9f119e350e93223ae83e70e80c31675a0fdc1a6 \ + --hash=sha256:af097bae1b616dde5c5744441e2ddc69e74dfdcb0c263129610d85b87445a59d + # via tzlocal +pyyaml==6.0 \ + --hash=sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf \ + --hash=sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293 \ + --hash=sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b \ + --hash=sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57 \ + --hash=sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b \ + --hash=sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4 \ + --hash=sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07 \ + --hash=sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba \ + --hash=sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9 \ + --hash=sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287 \ + --hash=sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513 \ + --hash=sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0 \ + --hash=sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782 \ + --hash=sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0 \ + --hash=sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92 \ + --hash=sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f \ + --hash=sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2 \ + --hash=sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc \ + --hash=sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1 \ + --hash=sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c \ + --hash=sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86 \ + --hash=sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4 \ + --hash=sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c \ + --hash=sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34 \ + --hash=sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b \ + --hash=sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d \ + --hash=sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c \ + --hash=sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb \ + --hash=sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7 \ + --hash=sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737 \ + --hash=sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3 \ + --hash=sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d \ + --hash=sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358 \ + --hash=sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53 \ + --hash=sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78 \ + --hash=sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803 \ + --hash=sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a \ + --hash=sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f \ + --hash=sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174 \ + --hash=sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5 + # via + # -r release/requirements_buildkite.in + # anyscale +requests==2.28.2 \ + --hash=sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa \ + --hash=sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf + # via + # -r release/requirements_buildkite.in + # anyscale + # google-api-core + # google-cloud-storage +retry==0.9.2 \ + --hash=sha256:ccddf89761fa2c726ab29391837d4327f819ea14d244c232a1d24c67a2f98606 \ + --hash=sha256:f8bfa8b99b69c4506d6f5bd3b0aabf77f98cdb17f3c9fc3f5ca820033336fba4 + # via -r release/requirements_buildkite.in +rich==13.3.4 \ + --hash=sha256:22b74cae0278fd5086ff44144d3813be1cedc9115bdfabbfefd86400cb88b20a \ + --hash=sha256:b5d573e13605423ec80bdd0cd5f8541f7844a0e71a13f74cf454ccb2f490708b + # via anyscale +rsa==4.9 \ + --hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \ + --hash=sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21 + # via + # google-auth + # oauth2client +s3transfer==0.6.0 \ + --hash=sha256:06176b74f3a15f61f1b4f25a1fc29a4429040b7647133a463da8fa5bd28d5ecd \ + --hash=sha256:2ed07d3866f523cc561bf4a00fc5535827981b117dd7876f036b0c1aca42c947 + # via boto3 +six==1.16.0 \ + --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ + --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 + # via + # anyscale + # google-auth + # halo + # oauth2client + # python-dateutil +smart-open==6.3.0 \ + --hash=sha256:b4c9ae193ad6d3e7add50944b86afa0d150bd821ab8ec21edb26d9a06b66f6a8 \ + --hash=sha256:d5238825fe9a9340645fac3d75b287c08fbb99fb2b422477de781c9f5f09e019 + # via anyscale +smmap==5.0.0 \ + --hash=sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94 \ + --hash=sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936 + # via gitdb +spinners==0.0.24 \ + --hash=sha256:1eb6aeb4781d72ab42ed8a01dcf20f3002bf50740d7154d12fb8c9769bf9e27f \ + --hash=sha256:2fa30d0b72c9650ad12bbe031c9943b8d441e41b4f5602b0ec977a19f3290e98 + # via halo +tabulate==0.9.0 \ + --hash=sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c \ + --hash=sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f + # via anyscale +termcolor==2.2.0 \ + --hash=sha256:91ddd848e7251200eac969846cbae2dacd7d71c2871e92733289e7e3666f48e7 \ + --hash=sha256:dfc8ac3f350788f23b2947b3e6cfa5a53b630b612e6cd8965a015a776020b99a + # via halo +tqdm==4.65.0 \ + --hash=sha256:1871fb68a86b8fb3b59ca4cdd3dcccbc7e6d613eeed31f4c332531977b89beb5 \ + --hash=sha256:c4f53a17fe37e132815abceec022631be8ffe1b9381c2e6e30aa70edc99e9671 + # via anyscale +typing-extensions==4.5.0 \ + --hash=sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb \ + --hash=sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4 + # via + # aiohttp + # argon2-cffi + # async-timeout + # gitpython + # importlib-metadata + # jsonschema + # markdown-it-py + # pydantic + # rich + # yarl +tzdata==2023.3 \ + --hash=sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a \ + --hash=sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda + # via pytz-deprecation-shim +tzlocal==4.3 \ + --hash=sha256:3f21d09e1b2aa9f2dacca12da240ca37de3ba5237a93addfd6d593afe9073355 \ + --hash=sha256:b44c4388f3d34f25862cfbb387578a4d70fec417649da694a132f628a23367e2 + # via anyscale +urllib3==1.26.15 \ + --hash=sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305 \ + --hash=sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42 + # via + # anyscale + # botocore + # requests +wrapt==1.15.0 \ + --hash=sha256:02fce1852f755f44f95af51f69d22e45080102e9d00258053b79367d07af39c0 \ + --hash=sha256:077ff0d1f9d9e4ce6476c1a924a3332452c1406e59d90a2cf24aeb29eeac9420 \ + --hash=sha256:078e2a1a86544e644a68422f881c48b84fef6d18f8c7a957ffd3f2e0a74a0d4a \ + --hash=sha256:0970ddb69bba00670e58955f8019bec4a42d1785db3faa043c33d81de2bf843c \ + --hash=sha256:1286eb30261894e4c70d124d44b7fd07825340869945c79d05bda53a40caa079 \ + --hash=sha256:21f6d9a0d5b3a207cdf7acf8e58d7d13d463e639f0c7e01d82cdb671e6cb7923 \ + --hash=sha256:230ae493696a371f1dbffaad3dafbb742a4d27a0afd2b1aecebe52b740167e7f \ + --hash=sha256:26458da5653aa5b3d8dc8b24192f574a58984c749401f98fff994d41d3f08da1 \ + --hash=sha256:2cf56d0e237280baed46f0b5316661da892565ff58309d4d2ed7dba763d984b8 \ + --hash=sha256:2e51de54d4fb8fb50d6ee8327f9828306a959ae394d3e01a1ba8b2f937747d86 \ + --hash=sha256:2fbfbca668dd15b744418265a9607baa970c347eefd0db6a518aaf0cfbd153c0 \ + --hash=sha256:38adf7198f8f154502883242f9fe7333ab05a5b02de7d83aa2d88ea621f13364 \ + --hash=sha256:3a8564f283394634a7a7054b7983e47dbf39c07712d7b177b37e03f2467a024e \ + --hash=sha256:3abbe948c3cbde2689370a262a8d04e32ec2dd4f27103669a45c6929bcdbfe7c \ + --hash=sha256:3bbe623731d03b186b3d6b0d6f51865bf598587c38d6f7b0be2e27414f7f214e \ + --hash=sha256:40737a081d7497efea35ab9304b829b857f21558acfc7b3272f908d33b0d9d4c \ + --hash=sha256:41d07d029dd4157ae27beab04d22b8e261eddfc6ecd64ff7000b10dc8b3a5727 \ + --hash=sha256:46ed616d5fb42f98630ed70c3529541408166c22cdfd4540b88d5f21006b0eff \ + --hash=sha256:493d389a2b63c88ad56cdc35d0fa5752daac56ca755805b1b0c530f785767d5e \ + --hash=sha256:4ff0d20f2e670800d3ed2b220d40984162089a6e2c9646fdb09b85e6f9a8fc29 \ + --hash=sha256:54accd4b8bc202966bafafd16e69da9d5640ff92389d33d28555c5fd4f25ccb7 \ + --hash=sha256:56374914b132c702aa9aa9959c550004b8847148f95e1b824772d453ac204a72 \ + --hash=sha256:578383d740457fa790fdf85e6d346fda1416a40549fe8db08e5e9bd281c6a475 \ + --hash=sha256:58d7a75d731e8c63614222bcb21dd992b4ab01a399f1f09dd82af17bbfc2368a \ + --hash=sha256:5c5aa28df055697d7c37d2099a7bc09f559d5053c3349b1ad0c39000e611d317 \ + --hash=sha256:5fc8e02f5984a55d2c653f5fea93531e9836abbd84342c1d1e17abc4a15084c2 \ + --hash=sha256:63424c681923b9f3bfbc5e3205aafe790904053d42ddcc08542181a30a7a51bd \ + --hash=sha256:64b1df0f83706b4ef4cfb4fb0e4c2669100fd7ecacfb59e091fad300d4e04640 \ + --hash=sha256:74934ebd71950e3db69960a7da29204f89624dde411afbfb3b4858c1409b1e98 \ + --hash=sha256:75669d77bb2c071333417617a235324a1618dba66f82a750362eccbe5b61d248 \ + --hash=sha256:75760a47c06b5974aa5e01949bf7e66d2af4d08cb8c1d6516af5e39595397f5e \ + --hash=sha256:76407ab327158c510f44ded207e2f76b657303e17cb7a572ffe2f5a8a48aa04d \ + --hash=sha256:76e9c727a874b4856d11a32fb0b389afc61ce8aaf281ada613713ddeadd1cfec \ + --hash=sha256:77d4c1b881076c3ba173484dfa53d3582c1c8ff1f914c6461ab70c8428b796c1 \ + --hash=sha256:780c82a41dc493b62fc5884fb1d3a3b81106642c5c5c78d6a0d4cbe96d62ba7e \ + --hash=sha256:7dc0713bf81287a00516ef43137273b23ee414fe41a3c14be10dd95ed98a2df9 \ + --hash=sha256:7eebcdbe3677e58dd4c0e03b4f2cfa346ed4049687d839adad68cc38bb559c92 \ + --hash=sha256:896689fddba4f23ef7c718279e42f8834041a21342d95e56922e1c10c0cc7afb \ + --hash=sha256:96177eb5645b1c6985f5c11d03fc2dbda9ad24ec0f3a46dcce91445747e15094 \ + --hash=sha256:96e25c8603a155559231c19c0349245eeb4ac0096fe3c1d0be5c47e075bd4f46 \ + --hash=sha256:9d37ac69edc5614b90516807de32d08cb8e7b12260a285ee330955604ed9dd29 \ + --hash=sha256:9ed6aa0726b9b60911f4aed8ec5b8dd7bf3491476015819f56473ffaef8959bd \ + --hash=sha256:a487f72a25904e2b4bbc0817ce7a8de94363bd7e79890510174da9d901c38705 \ + --hash=sha256:a4cbb9ff5795cd66f0066bdf5947f170f5d63a9274f99bdbca02fd973adcf2a8 \ + --hash=sha256:a74d56552ddbde46c246b5b89199cb3fd182f9c346c784e1a93e4dc3f5ec9975 \ + --hash=sha256:a89ce3fd220ff144bd9d54da333ec0de0399b52c9ac3d2ce34b569cf1a5748fb \ + --hash=sha256:abd52a09d03adf9c763d706df707c343293d5d106aea53483e0ec8d9e310ad5e \ + --hash=sha256:abd8f36c99512755b8456047b7be10372fca271bf1467a1caa88db991e7c421b \ + --hash=sha256:af5bd9ccb188f6a5fdda9f1f09d9f4c86cc8a539bd48a0bfdc97723970348418 \ + --hash=sha256:b02f21c1e2074943312d03d243ac4388319f2456576b2c6023041c4d57cd7019 \ + --hash=sha256:b06fa97478a5f478fb05e1980980a7cdf2712015493b44d0c87606c1513ed5b1 \ + --hash=sha256:b0724f05c396b0a4c36a3226c31648385deb6a65d8992644c12a4963c70326ba \ + --hash=sha256:b130fe77361d6771ecf5a219d8e0817d61b236b7d8b37cc045172e574ed219e6 \ + --hash=sha256:b56d5519e470d3f2fe4aa7585f0632b060d532d0696c5bdfb5e8319e1d0f69a2 \ + --hash=sha256:b67b819628e3b748fd3c2192c15fb951f549d0f47c0449af0764d7647302fda3 \ + --hash=sha256:ba1711cda2d30634a7e452fc79eabcadaffedf241ff206db2ee93dd2c89a60e7 \ + --hash=sha256:bbeccb1aa40ab88cd29e6c7d8585582c99548f55f9b2581dfc5ba68c59a85752 \ + --hash=sha256:bd84395aab8e4d36263cd1b9308cd504f6cf713b7d6d3ce25ea55670baec5416 \ + --hash=sha256:c99f4309f5145b93eca6e35ac1a988f0dc0a7ccf9ccdcd78d3c0adf57224e62f \ + --hash=sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1 \ + --hash=sha256:cd525e0e52a5ff16653a3fc9e3dd827981917d34996600bbc34c05d048ca35cc \ + --hash=sha256:cdb4f085756c96a3af04e6eca7f08b1345e94b53af8921b25c72f096e704e145 \ + --hash=sha256:ce42618f67741d4697684e501ef02f29e758a123aa2d669e2d964ff734ee00ee \ + --hash=sha256:d06730c6aed78cee4126234cf2d071e01b44b915e725a6cb439a879ec9754a3a \ + --hash=sha256:d5fe3e099cf07d0fb5a1e23d399e5d4d1ca3e6dfcbe5c8570ccff3e9208274f7 \ + --hash=sha256:d6bcbfc99f55655c3d93feb7ef3800bd5bbe963a755687cbf1f490a71fb7794b \ + --hash=sha256:d787272ed958a05b2c86311d3a4135d3c2aeea4fc655705f074130aa57d71653 \ + --hash=sha256:e169e957c33576f47e21864cf3fc9ff47c223a4ebca8960079b8bd36cb014fd0 \ + --hash=sha256:e20076a211cd6f9b44a6be58f7eeafa7ab5720eb796975d0c03f05b47d89eb90 \ + --hash=sha256:e826aadda3cae59295b95343db8f3d965fb31059da7de01ee8d1c40a60398b29 \ + --hash=sha256:eef4d64c650f33347c1f9266fa5ae001440b232ad9b98f1f43dfe7a79435c0a6 \ + --hash=sha256:f2e69b3ed24544b0d3dbe2c5c0ba5153ce50dcebb576fdc4696d52aa22db6034 \ + --hash=sha256:f87ec75864c37c4c6cb908d282e1969e79763e0d9becdfe9fe5473b7bb1e5f09 \ + --hash=sha256:fbec11614dba0424ca72f4e8ba3c420dba07b4a7c206c8c8e4e73f2e98f4c559 \ + --hash=sha256:fd69666217b62fa5d7c6aa88e507493a34dec4fa20c5bd925e4bc12fce586639 + # via anyscale +yarl==1.9.1 \ + --hash=sha256:01a073c9175481dfed6b40704a1b67af5a9435fc4a58a27d35fd6b303469b0c7 \ + --hash=sha256:01cf88cb80411978a14aa49980968c1aeb7c18a90ac978c778250dd234d8e0ba \ + --hash=sha256:08c8599d6aa8a24425f8635f6c06fa8726afe3be01c8e53e236f519bcfa5db5b \ + --hash=sha256:098bdc06ffb4db39c73883325b8c738610199f5f12e85339afedf07e912a39af \ + --hash=sha256:09c56a32c26e24ef98d5757c5064e252836f621f9a8b42737773aa92936b8e08 \ + --hash=sha256:13a1ad1f35839b3bb5226f59816b71e243d95d623f5b392efaf8820ddb2b3cd5 \ + --hash=sha256:1baf8cdaaab65d9ccedbf8748d626ad648b74b0a4d033e356a2f3024709fb82f \ + --hash=sha256:1d7a0075a55380b19aa43b9e8056e128b058460d71d75018a4f9d60ace01e78c \ + --hash=sha256:27efc2e324f72df02818cd72d7674b1f28b80ab49f33a94f37c6473c8166ce49 \ + --hash=sha256:307a782736ebf994e7600dcaeea3b3113083584da567272f2075f1540919d6b3 \ + --hash=sha256:395ea180257a3742d09dcc5071739682a95f7874270ebe3982d6696caec75be0 \ + --hash=sha256:39a7a9108e9fc633ae381562f8f0355bb4ba00355218b5fb19cf5263fcdbfa68 \ + --hash=sha256:3abe37fd89a93ebe0010417ca671f422fa6fcffec54698f623b09f46b4d4a512 \ + --hash=sha256:4295790981630c4dab9d6de7b0f555a4c8defe3ed7704a8e9e595a321e59a0f5 \ + --hash=sha256:44fa6158e6b4b8ccfa2872c3900a226b29e8ce543ce3e48aadc99816afa8874d \ + --hash=sha256:46c4010de941e2e1365c07fb4418ddca10fcff56305a6067f5ae857f8c98f3a7 \ + --hash=sha256:4764114e261fe49d5df9b316b3221493d177247825c735b2aae77bc2e340d800 \ + --hash=sha256:4d817593d345fefda2fae877accc8a0d9f47ada57086da6125fa02a62f6d1a94 \ + --hash=sha256:518a92a34c741836a315150460b5c1c71ae782d569eabd7acf53372e437709f7 \ + --hash=sha256:56956b13ec275de31fe4fb991510b735c4fb3e1b01600528c952b9ac90464430 \ + --hash=sha256:575975d28795a61e82c85f114c02333ca54cbd325fd4e4b27598c9832aa732e7 \ + --hash=sha256:5ce0bcab7ec759062c818d73837644cde567ab8aa1e0d6c45db38dfb7c284441 \ + --hash=sha256:5faf3ec98747318cb980aaf9addf769da68a66431fc203a373d95d7ee9c1fbb4 \ + --hash=sha256:65d952e464df950eed32bb5dcbc1b4443c7c2de4d7abd7265b45b1b3b27f5fa2 \ + --hash=sha256:6b09cce412386ea9b4dda965d8e78d04ac5b5792b2fa9cced3258ec69c7d1c16 \ + --hash=sha256:6cdb47cbbacae8e1d7941b0d504d0235d686090eef5212ca2450525905e9cf02 \ + --hash=sha256:6cf47fe9df9b1ededc77e492581cdb6890a975ad96b4172e1834f1b8ba0fc3ba \ + --hash=sha256:73a4b46689f2d59c8ec6b71c9a0cdced4e7863dd6eb98a8c30ea610e191f9e1c \ + --hash=sha256:74390c2318d066962500045aa145f5412169bce842e734b8c3e6e3750ad5b817 \ + --hash=sha256:75676110bce59944dd48fd18d0449bd37eaeb311b38a0c768f7670864b5f8b68 \ + --hash=sha256:78755ce43b6e827e65ec0c68be832f86d059fcf05d4b33562745ebcfa91b26b1 \ + --hash=sha256:791357d537a09a194f92b834f28c98d074e7297bac0a8f1d5b458a906cafa17c \ + --hash=sha256:85aa6fd779e194901386709e0eedd45710b68af2709f82a84839c44314b68c10 \ + --hash=sha256:88f6413ff5edfb9609e2769e32ce87a62353e66e75d264bf0eaad26fb9daa8f2 \ + --hash=sha256:89099c887338608da935ba8bee027564a94f852ac40e472de15d8309517ad5fe \ + --hash=sha256:89da1fd6068553e3a333011cc17ad91c414b2100c32579ddb51517edc768b49c \ + --hash=sha256:8c72a1dc7e2ea882cd3df0417c808ad3b69e559acdc43f3b096d67f2fb801ada \ + --hash=sha256:90ebaf448b5f048352ec7c76cb8d452df30c27cb6b8627dfaa9cf742a14f141a \ + --hash=sha256:92a101f6d5a9464e86092adc36cd40ef23d18a25bfb1eb32eaeb62edc22776bb \ + --hash=sha256:92e37999e36f9f3ded78e9d839face6baa2abdf9344ea8ed2735f495736159de \ + --hash=sha256:97d76a3128f48fa1c721ef8a50e2c2f549296b2402dc8a8cde12ff60ed922f53 \ + --hash=sha256:9ba5a18c4fbd408fe49dc5da85478a76bc75c1ce912d7fd7b43ed5297c4403e1 \ + --hash=sha256:9bb794882818fae20ff65348985fdf143ea6dfaf6413814db1848120db8be33e \ + --hash=sha256:a21789bdf28549d4eb1de6910cabc762c9f6ae3eef85efc1958197c1c6ef853b \ + --hash=sha256:a8b8d4b478a9862447daef4cafc89d87ea4ed958672f1d11db7732b77ead49cc \ + --hash=sha256:ac8e593df1fbea820da7676929f821a0c7c2cecb8477d010254ce8ed54328ea8 \ + --hash=sha256:b20a5ddc4e243cbaa54886bfe9af6ffc4ba4ef58f17f1bb691e973eb65bba84d \ + --hash=sha256:b2b2382d59dec0f1fdca18ea429c4c4cee280d5e0dbc841180abb82e188cf6e9 \ + --hash=sha256:b3b5f8da07a21f2e57551f88a6709c2d340866146cf7351e5207623cfe8aad16 \ + --hash=sha256:b5d5fb6c94b620a7066a3adb7c246c87970f453813979818e4707ac32ce4d7bd \ + --hash=sha256:b63d41e0eecf3e3070d44f97456cf351fff7cb960e97ecb60a936b877ff0b4f6 \ + --hash=sha256:b86e98c3021b7e2740d8719bf074301361bf2f51221ca2765b7a58afbfbd9042 \ + --hash=sha256:bab67d041c78e305ff3eef5e549304d843bd9b603c8855b68484ee663374ce15 \ + --hash=sha256:c3ca8d71b23bdf164b36d06df2298ec8a5bd3de42b17bf3e0e8e6a7489195f2c \ + --hash=sha256:ca14b84091700ae7c1fcd3a6000bd4ec1a3035009b8bcb94f246741ca840bb22 \ + --hash=sha256:d21887cbcf6a3cc5951662d8222bc9c04e1b1d98eebe3bb659c3a04ed49b0eec \ + --hash=sha256:d5c407e530cf2979ea383885516ae79cc4f3c3530623acf5e42daf521f5c2564 \ + --hash=sha256:d966cd59df9a4b218480562e8daab39e87e746b78a96add51a3ab01636fc4291 \ + --hash=sha256:df747104ef27ab1aa9a1145064fa9ea26ad8cf24bfcbdba7db7abf0f8b3676b9 \ + --hash=sha256:e124b283a04cc06d22443cae536f93d86cd55108fa369f22b8fe1f2288b2fe1c \ + --hash=sha256:e2f01351b7809182822b21061d2a4728b7b9e08f4585ba90ee4c5c4d3faa0812 \ + --hash=sha256:e7ddebeabf384099814353a2956ed3ab5dbaa6830cc7005f985fcb03b5338f05 \ + --hash=sha256:e9fe3a1c073ab80a28a06f41d2b623723046709ed29faf2c56bea41848597d86 \ + --hash=sha256:ecaa5755a39f6f26079bf13f336c67af589c222d76b53cd3824d3b684b84d1f1 \ + --hash=sha256:ecad20c3ef57c513dce22f58256361d10550a89e8eaa81d5082f36f8af305375 \ + --hash=sha256:eed9827033b7f67ad12cb70bd0cb59d36029144a7906694317c2dbf5c9eb5ddd \ + --hash=sha256:ef7e2f6c47c41e234600a02e1356b799761485834fe35d4706b0094cb3a587ee \ + --hash=sha256:efec77851231410125cb5be04ec96fa4a075ca637f415a1f2d2c900b09032a8a \ + --hash=sha256:f0cd87949d619157a0482c6c14e5011f8bf2bc0b91cb5087414d9331f4ef02dd \ + --hash=sha256:f206adb89424dca4a4d0b31981869700e44cd62742527e26d6b15a510dd410a2 \ + --hash=sha256:f5bcb80006efe9bf9f49ae89711253dd06df8053ff814622112a9219346566a7 \ + --hash=sha256:f76edb386178a54ea7ceffa798cb830c3c22ab50ea10dfb25dc952b04848295f \ + --hash=sha256:f878a78ed2ccfbd973cab46dd0933ecd704787724db23979e5731674d76eb36f \ + --hash=sha256:f8e73f526140c1c32f5fca4cd0bc3b511a1abcd948f45b2a38a95e4edb76ca72 + # via aiohttp +zipp==3.15.0 \ + --hash=sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b \ + --hash=sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556 + # via + # importlib-metadata + # importlib-resources From 3aa73636ae104bbaf9be7528a111e75b5d4484c0 Mon Sep 17 00:00:00 2001 From: Cheng Su Date: Fri, 28 Apr 2023 14:28:19 -0700 Subject: [PATCH 150/424] [Data] fix CI test failing with dashboard error (#34839) Deflake the CI test to not specify `cluster`. The stack trace is: ``` > raise RuntimeError(message) E RuntimeError: The condition wasn't met before the timeout expired. Last exception: Traceback (most recent call last): E File "/ray/python/ray/_private/test_utils.py", line 528, in wait_for_condition E if condition_predictor(**kwargs): E File "/root/.cache/bazel/_bazel_root/5fe90af4e7d1ed9fcf52f59e39e126f5/execroot/com_github_ray_project_ray/bazel-out/k8-opt/bin/python/ray/data/test_formats.runfiles/com_github_ray_project_ray/python/ray/data/tests/test_formats.py", line 350, in verify_get_read_tasks E address=cluster.address, filters=[("name", "=", "_get_read_tasks")] E File "/ray/python/ray/experimental/state/api.py", line 1011, in list_tasks E return StateApiClient(address=address).list( E File "/ray/python/ray/experimental/state/api.py", line 145, in __init__ E api_server_url = get_address_for_submission_client(address) E File "/ray/python/ray/dashboard/utils.py", line 653, in get_address_for_submission_client E address = ray_address_to_api_server_url(address) E File "/ray/python/ray/dashboard/utils.py", line 609, in ray_address_to_api_server_url E num_retries=20, E File "/ray/python/ray/_private/utils.py", line 1433, in internal_kv_get_with_retry E f"Could not read '{key.decode()}' from GCS. Did GCS start successfully?" E ConnectionError: Could not read 'dashboard' from GCS. Did GCS start successfully? ``` --- python/ray/data/tests/test_formats.py | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/python/ray/data/tests/test_formats.py b/python/ray/data/tests/test_formats.py index 5658e5a425f5..6f48658a041c 100644 --- a/python/ray/data/tests/test_formats.py +++ b/python/ray/data/tests/test_formats.py @@ -329,13 +329,8 @@ def test_read_s3_file_error(shutdown_only, s3_path): # tests should only be carefully reordered to retain this invariant! -def test_get_read_tasks(ray_start_cluster): - ray.shutdown() - cluster = ray_start_cluster - cluster.add_node(num_cpus=4) - cluster.add_node(num_cpus=4) - cluster.wait_for_nodes() - ray.init(cluster.address) +def test_get_read_tasks(shutdown_only): + ray.init() head_node_id = ray.get_runtime_context().get_node_id() @@ -346,9 +341,7 @@ def test_get_read_tasks(ray_start_cluster): def verify_get_read_tasks(): from ray.experimental.state.api import list_tasks - task_states = list_tasks( - address=cluster.address, filters=[("name", "=", "_get_read_tasks")] - ) + task_states = list_tasks(filters=[("name", "=", "_get_read_tasks")]) # Verify only one task being executed on same node. assert len(task_states) == 1 assert task_states[0]["name"] == "_get_read_tasks" From 3652e42313db58bf4a0a8a3a73bdf9ab30807994 Mon Sep 17 00:00:00 2001 From: Cheng Su Date: Fri, 28 Apr 2023 14:29:32 -0700 Subject: [PATCH 151/424] [Data] Cooperatively exit producer threads for `iter_batches` (#34819) This is to fix the bug for `iter_batches` where producer daemon threads hanging there, and holding batches in memory, when caller breaks early during iteration. Example of caller like: ```py for batch in ds.iter_batches(): if ... : break ``` Change from using Python `queue.Queue`, to use a set of `Semaphore`, `Lock` and plain `deque` to allow cooperatively exit producer threads. I don't find how to achieve the same by using any classes of Python thread-safe `Queue`s, so roll my own version of producer-consumer queue here. Also verified with user this PR fixed the GRAM OOM issue, by rerunning the workload with this PR. --- .../ray/data/_internal/block_batching/util.py | 112 ++++++++++++++---- .../data/tests/block_batching/test_util.py | 82 +++++++++++++ 2 files changed, 174 insertions(+), 20 deletions(-) diff --git a/python/ray/data/_internal/block_batching/util.py b/python/ray/data/_internal/block_batching/util.py index a82b46ae43a7..63e3f31a4341 100644 --- a/python/ray/data/_internal/block_batching/util.py +++ b/python/ray/data/_internal/block_batching/util.py @@ -1,7 +1,7 @@ import logging -import queue import threading from typing import Any, Callable, Iterator, List, Optional, Tuple, TypeVar, Union +from collections import deque from contextlib import nullcontext import ray @@ -230,7 +230,7 @@ class Sentinel: def __init__(self, thread_index: int): self.thread_index = thread_index - output_queue = queue.Queue(1) + output_queue = Queue(1) # Because pulling from the base iterator cannot happen concurrently, # we must execute the expensive computation in a separate step which @@ -238,11 +238,14 @@ def __init__(self, thread_index: int): def execute_computation(thread_index: int): try: for item in fn(thread_safe_generator): - output_queue.put(item, block=True) - output_queue.put(Sentinel(thread_index), block=True) + if output_queue.put(item): + # Return early when it's instructed to do so. + return + output_queue.put(Sentinel(thread_index)) except Exception as e: - output_queue.put(e, block=True) + output_queue.put(e) + # Use separate threads to produce output batches. threads = [ threading.Thread(target=execute_computation, args=(i,), daemon=True) for i in range(num_workers) @@ -251,22 +254,28 @@ def execute_computation(thread_index: int): for thread in threads: thread.start() + # Use main thread to consume output batches. num_threads_finished = 0 - while True: - next_item = output_queue.get(block=True) - if isinstance(next_item, Exception): - output_queue.task_done() - raise next_item - if isinstance(next_item, Sentinel): - output_queue.task_done() - logger.debug(f"Thread {next_item.thread_index} finished.") - num_threads_finished += 1 - threads[next_item.thread_index].join() - else: - yield next_item - output_queue.task_done() - if num_threads_finished >= num_workers: - break + try: + while True: + next_item = output_queue.get() + if isinstance(next_item, Exception): + raise next_item + if isinstance(next_item, Sentinel): + logger.debug(f"Thread {next_item.thread_index} finished.") + num_threads_finished += 1 + else: + yield next_item + if num_threads_finished >= num_workers: + break + finally: + # Cooperatively exit all producer threads. + # This is to avoid these daemon threads hanging there with holding batches in + # memory, which can cause GRAM OOM easily. This can happen when caller breaks + # in the middle of iteration. + num_threads_alive = num_workers - num_threads_finished + if num_threads_alive > 0: + output_queue.release(num_threads_alive) PREFETCHER_ACTOR_NAMESPACE = "ray.datastream" @@ -309,3 +318,66 @@ class _BlockPretcher: def prefetch(self, *blocks) -> None: pass + + +class Queue: + """A thread-safe queue implementation for multiple producers and consumers. + + Provide `release()` to exit producer threads cooperatively for resource release. + """ + + def __init__(self, queue_size: int): + # The queue shared across multiple producer threads. + self._queue = deque() + # The boolean varilable to indicate whether producer threads should exit. + self._threads_exit = False + # The semaphore for producer threads to put item into queue. + self._producer_semaphore = threading.Semaphore(queue_size) + # The semaphore for consumer threads to get item from queue. + self._consumer_semaphore = threading.Semaphore(0) + # The mutex lock to guard access of `self._queue` and `self._threads_exit`. + self._mutex = threading.Lock() + + def put(self, item: Any) -> bool: + """Put an item into the queue. + + Block if necessary until a free slot is available in queue. + This method is called by producer threads. + + Returns: + True if the caller thread should exit immediately. + """ + self._producer_semaphore.acquire() + with self._mutex: + if self._threads_exit: + return True + else: + self._queue.append(item) + self._consumer_semaphore.release() + return False + + def get(self) -> Any: + """Remove and return an item from the queue. + + Block if necessary until an item is available in queue. + This method is called by consumer threads. + """ + self._consumer_semaphore.acquire() + with self._mutex: + next_item = self._queue.popleft() + self._producer_semaphore.release() + return next_item + + def release(self, num_threads: int): + """Release `num_threads` of producers so they would exit cooperatively.""" + with self._mutex: + self._threads_exit = True + for _ in range(num_threads): + # NOTE: After Python 3.9+, Semaphore.release(n) can be used to + # release all threads at once. + self._producer_semaphore.release() + + def qsize(self): + """Return the size of the queue.""" + with self._mutex: + return len(self._queue) diff --git a/python/ray/data/tests/block_batching/test_util.py b/python/ray/data/tests/block_batching/test_util.py index 67eeffa3e777..990e06529e7f 100644 --- a/python/ray/data/tests/block_batching/test_util.py +++ b/python/ray/data/tests/block_batching/test_util.py @@ -1,3 +1,4 @@ +import threading import pytest import time @@ -7,6 +8,7 @@ import ray from ray.data._internal.block_batching.util import ( + Queue, _calculate_ref_hits, make_async_gen, blocks_to_batches, @@ -173,6 +175,86 @@ def sleep_udf(item): assert end_time - start_time < 9.5 +def test_make_async_gen_multiple_threads_unfinished(): + """Tests that using multiple threads can overlap compute even more. + Do not finish iteration with break in the middle. + """ + + num_items = 5 + + def gen(base_iterator): + for i in base_iterator: + time.sleep(4) + yield i + + def sleep_udf(item): + time.sleep(5) + return item + + # All 5 items should be fetched concurrently. + iterator = make_async_gen( + base_iterator=iter(range(num_items)), fn=gen, num_workers=5 + ) + + start_time = time.time() + + # Only sleep for first item. + sleep_udf(next(iterator)) + + # All subsequent items should already be prefetched and should be ready. + for i, _ in enumerate(iterator): + if i > 2: + break + end_time = time.time() + + # 4 second for first item, 5 seconds for udf, 0.5 seconds buffer + assert end_time - start_time < 9.5 + + +def test_queue(): + queue = Queue(5) + num_producers = 10 + num_producers_finished = 0 + num_items = 20 + + def execute_computation(): + for item in range(num_items): + if queue.put(item): + # Return early when it's instructed to do so. + break + # Put -1 as indicator of thread being finished. + queue.put(-1) + + # Use separate threads as producers. + threads = [ + threading.Thread(target=execute_computation, daemon=True) + for _ in range(num_producers) + ] + + for thread in threads: + thread.start() + + for i in range(num_producers * num_items): + item = queue.get() + if item == -1: + num_producers_finished += 1 + if i > num_producers * num_items / 2: + num_producers_alive = num_producers - num_producers_finished + # Check there are some alive producers. + assert num_producers_alive > 0, num_producers_alive + # Release the alive producers. + queue.release(num_producers_alive) + # Consume the remaining items in queue. + while queue.qsize() > 0: + queue.get() + break + + # Sleep 5 seconds to allow producer threads to exit. + time.sleep(5) + # Then check the queue is still empty. + assert queue.qsize() == 0 + + def test_calculate_ref_hits(ray_start_regular_shared): refs = [ray.put(0), ray.put(1)] hits, misses, unknowns = _calculate_ref_hits(refs) From 86ed46cde723a06c20d061fc868bc9d68f56743f Mon Sep 17 00:00:00 2001 From: Archit Kulkarni Date: Fri, 28 Apr 2023 14:35:54 -0700 Subject: [PATCH 152/424] [Doc] [runtime env] Clarify conditions for local `pip` and `conda` requirements files (#34071) We support specifying {"pip": "requirements.txt"}, but it isn't clear in the docs where "requirements.txt" is supposed to be located for this API to work. This PR clarifies that it must be a local file, and that if a relative filepath is used, it should be relative to where the Ray script is being run, not relative to the path specified by working_dir. --- doc/source/ray-core/handling-dependencies.rst | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/doc/source/ray-core/handling-dependencies.rst b/doc/source/ray-core/handling-dependencies.rst index 9bdd069e6509..ba8600599e89 100644 --- a/doc/source/ray-core/handling-dependencies.rst +++ b/doc/source/ray-core/handling-dependencies.rst @@ -234,8 +234,8 @@ However, using runtime environments you can dynamically specify packages to be a print(ray.get(reqs.remote())) # -You may also specify your ``pip`` dependencies either via a Python list or a ``requirements.txt`` file. -Alternatively, you can specify a ``conda`` environment, either as a Python dictionary or via a ``environment.yml`` file. This conda environment can include ``pip`` packages. +You may also specify your ``pip`` dependencies either via a Python list or a local ``requirements.txt`` file. +Alternatively, you can specify a ``conda`` environment, either as a Python dictionary or via a local ``environment.yml`` file. This conda environment can include ``pip`` packages. For details, head to the :ref:`API Reference `. .. warning:: @@ -336,7 +336,7 @@ The ``runtime_env`` is a Python dictionary or a Python class :class:`ray.runtime - Example: ``{"working_dir": "/Users/my_working_dir/", "excludes": ["my_file.txt", "/subdir/, "path/to/dir", "*.log"]}`` -- ``pip`` (dict | List[str] | str): Either (1) a list of pip `requirements specifiers `_, (2) a string containing the path to a pip +- ``pip`` (dict | List[str] | str): Either (1) a list of pip `requirements specifiers `_, (2) a string containing the path to a local pip `“requirements.txt” `_ file, or (3) a python dictionary that has three fields: (a) ``packages`` (required, List[str]): a list of pip packages, (b) ``pip_check`` (optional, bool): whether to enable `pip check `_ at the end of pip install, defaults to ``False``. (c) ``pip_version`` (optional, str): the version of pip; Ray will spell the package name "pip" in front of the ``pip_version`` to form the final requirement string. @@ -351,9 +351,10 @@ The ``runtime_env`` is a Python dictionary or a Python class :class:`ray.runtime - Example: ``{"packages":["tensorflow", "requests"], "pip_check": False, "pip_version": "==22.0.2;python_version=='3.8.11'"}`` - When specifying a ``requirements.txt`` file, referencing local files `within` that file is not supported (e.g. ``-r ./my-laptop/more-requirements.txt``, ``./my-pkg.whl``). + When specifying a path to a ``requirements.txt`` file, the file must be present on your local machine and it must be a valid absolute path or relative filepath relative to your local current working directory, *not* relative to the `working_dir` specified in the `runtime_env`. + Furthermore, referencing local files `within` a `requirements.txt` file is not supported (e.g., ``-r ./my-laptop/more-requirements.txt``, ``./my-pkg.whl``). -- ``conda`` (dict | str): Either (1) a dict representing the conda environment YAML, (2) a string containing the path to a +- ``conda`` (dict | str): Either (1) a dict representing the conda environment YAML, (2) a string containing the path to a local `conda “environment.yml” `_ file, or (3) the name of a local conda environment already installed on each node in your cluster (e.g., ``"pytorch_p36"``). In the first two cases, the Ray and Python dependencies will be automatically injected into the environment to ensure compatibility, so there is no need to manually include them. @@ -366,6 +367,9 @@ The ``runtime_env`` is a Python dictionary or a Python class :class:`ray.runtime - Example: ``"pytorch_p36"`` + When specifying a path to a ``environment.yml`` file, the file must be present on your local machine and it must be a valid absolute path or a relative filepath relative to your local current working directory, *not* relative to the `working_dir` specified in the `runtime_env`. + Furthermore, referencing local files `within` a `environment.yml` file is not supported. + - ``env_vars`` (Dict[str, str]): Environment variables to set. Environment variables already set on the cluster will still be visible to the Ray workers; so there is no need to include ``os.environ`` or similar in the ``env_vars`` field. From 1b529459d5c5996b90667c5acac9689936422da4 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Fri, 28 Apr 2023 14:36:50 -0700 Subject: [PATCH 153/424] [all_tests] Strict mode on by default for Ray Data (#34643) Why are these changes needed? This PR contains all the staging code for the following pieces: Strict mode on by default Pydoc changes, with strict mode on by default Make AIR preprocessor and predictors compatible with strict mode Separate TODOs: REP / RFC for the rollout process Update data docs and examples (@bveeramani ) --------- Signed-off-by: Eric Liang --- doc/source/data/api/input_output.rst | 1 - .../data/doc_code/consuming_datastreams.py | 12 +- .../data/doc_code/creating_datastreams.py | 6 +- doc/source/data/doc_code/quick_start.py | 2 +- .../data/doc_code/saving_datastreams.py | 4 +- doc/source/data/doc_code/tensor.py | 76 +- .../data/doc_code/transforming_datastreams.py | 111 ++- doc/source/data/examples/random-access.rst | 12 +- doc/source/data/getting-started.rst | 13 +- doc/source/data/glossary.rst | 20 +- doc/source/data/transforming-datastreams.rst | 2 +- .../ray-air/doc_code/computer_vision.py | 5 +- doc/source/ray-air/doc_code/preprocessors.py | 52 +- ...ert_existing_pytorch_code_to_ray_air.ipynb | 4 +- .../convert_existing_tf_code_to_ray_air.ipynb | 4 +- .../examples/torch_image_example.ipynb | 10 +- .../examples/torch_incremental_learning.ipynb | 4 +- .../datasets_train/datasets_train.py | 2 +- python/ray/air/tests/test_dataset_config.py | 48 +- python/ray/data/__init__.py | 3 +- python/ray/data/_internal/arrow_block.py | 49 +- python/ray/data/_internal/block_builder.py | 2 +- python/ray/data/_internal/compute.py | 18 +- .../_internal/delegating_block_builder.py | 4 +- python/ray/data/_internal/execution/util.py | 8 +- python/ray/data/_internal/fast_repartition.py | 4 +- .../logical/operators/all_to_all_operator.py | 5 +- .../logical/operators/map_operator.py | 14 +- python/ray/data/_internal/null_aggregate.py | 6 +- python/ray/data/_internal/pandas_block.py | 37 +- .../ray/data/_internal/pipeline_executor.py | 17 +- python/ray/data/_internal/plan.py | 6 +- .../ray/data/_internal/planner/aggregate.py | 3 +- .../planner/exchange/aggregate_task_spec.py | 9 +- .../planner/exchange/sort_task_spec.py | 2 +- python/ray/data/_internal/planner/filter.py | 8 +- python/ray/data/_internal/planner/flat_map.py | 8 +- .../ray/data/_internal/planner/map_batches.py | 6 +- python/ray/data/_internal/planner/map_rows.py | 8 +- .../_internal/planner/plan_from_items_op.py | 8 +- python/ray/data/_internal/simple_block.py | 58 +- python/ray/data/_internal/sort.py | 2 +- python/ray/data/_internal/stage_impl.py | 3 +- python/ray/data/_internal/table_block.py | 16 +- python/ray/data/_internal/util.py | 5 +- python/ray/data/aggregate.py | 47 +- python/ray/data/block.py | 86 ++- python/ray/data/context.py | 2 +- python/ray/data/dataset_pipeline.py | 137 ++-- python/ray/data/datasource/datasource.py | 24 +- .../data/datasource/file_based_datasource.py | 3 +- .../ray/data/datasource/numpy_datasource.py | 2 - python/ray/data/datasource/sql_datasource.py | 3 +- .../data/datasource/webdataset_datasource.py | 5 +- python/ray/data/datastream.py | 686 ++++++------------ python/ray/data/examples/demo_infer.py | 2 +- python/ray/data/grouped_data.py | 260 ++----- python/ray/data/iterator.py | 35 +- python/ray/data/preprocessors/encoder.py | 10 +- python/ray/data/preprocessors/imputer.py | 8 +- python/ray/data/preprocessors/torch.py | 44 +- python/ray/data/preprocessors/vectorizer.py | 8 +- python/ray/data/random_access_dataset.py | 12 +- python/ray/data/read_api.py | 110 +-- python/ray/data/row.py | 4 +- .../tests/preprocessors/test_batch_mapper.py | 70 +- .../tests/preprocessors/test_preprocessors.py | 4 +- .../data/tests/preprocessors/test_torch.py | 10 +- python/ray/data/tests/test_all_to_all.py | 146 ++-- python/ray/data/tests/test_binary.py | 43 +- python/ray/data/tests/test_bulk_executor.py | 35 +- python/ray/data/tests/test_consumption.py | 242 +++--- .../data/tests/test_context_propagation.py | 32 +- .../data/tests/test_dynamic_block_split.py | 22 +- python/ray/data/tests/test_ecosystem.py | 4 +- .../data/tests/test_execution_optimizer.py | 156 ++-- .../test_executor_resource_management.py | 26 +- python/ray/data/tests/test_formats.py | 24 +- python/ray/data/tests/test_image.py | 2 +- python/ray/data/tests/test_iterator.py | 50 +- python/ray/data/tests/test_map.py | 184 +++-- python/ray/data/tests/test_mars.py | 8 - python/ray/data/tests/test_numpy.py | 57 +- python/ray/data/tests/test_object_gc.py | 2 +- python/ray/data/tests/test_operators.py | 20 +- python/ray/data/tests/test_optimize.py | 55 +- python/ray/data/tests/test_pandas.py | 12 +- python/ray/data/tests/test_parquet.py | 4 +- python/ray/data/tests/test_pipeline.py | 139 ++-- python/ray/data/tests/test_pipeline_nohang.py | 13 +- python/ray/data/tests/test_random_access.py | 28 +- .../data/tests/test_randomize_block_order.py | 16 +- python/ray/data/tests/test_raydp.py | 6 +- python/ray/data/tests/test_size_estimation.py | 37 +- python/ray/data/tests/test_sort.py | 44 +- python/ray/data/tests/test_split.py | 81 ++- python/ray/data/tests/test_stats.py | 9 +- .../test_streaming_backpressure_edge_case.py | 4 +- .../data/tests/test_streaming_integration.py | 16 +- python/ray/data/tests/test_tensor.py | 80 +- python/ray/data/tests/test_tf.py | 14 - python/ray/data/tests/test_torch.py | 2 +- .../ray/data/tests/test_transform_pyarrow.py | 10 +- python/ray/data/tests/test_webdataset.py | 5 +- python/ray/data/tests/util.py | 26 + .../pytorch/torch_regression_example.py | 2 +- .../train/huggingface/_huggingface_utils.py | 2 +- .../ray/train/tests/lightning_test_utils.py | 3 + python/ray/train/tests/test_base_trainer.py | 12 +- .../ray/train/tests/test_batch_predictor.py | 32 +- python/ray/train/tests/test_gpu.py | 6 +- .../train/tests/test_lightgbm_predictor.py | 3 + .../ray/train/tests/test_sklearn_predictor.py | 3 + python/ray/train/tests/test_torch_trainer.py | 5 +- .../ray/train/tests/test_xgboost_predictor.py | 3 + python/ray/workflow/tests/test_dataset.py | 6 +- .../mlperf-train/resnet50_ray_air.py | 4 +- .../dataset/aggregate_benchmark.py | 9 +- .../dataset/data_ingest_benchmark.py | 10 +- .../dataset/dataset_random_access.py | 4 +- release/nightly_tests/dataset/inference.py | 10 +- .../dataset/iter_tensor_batches_benchmark.py | 10 +- .../dataset/map_batches_benchmark.py | 4 +- .../dataset/operator_fusion_benchmark.py | 2 +- .../dataset/read_tfrecords_benchmark.py | 2 +- release/nightly_tests/dataset/sort.py | 3 +- rllib/algorithms/algorithm.py | 4 +- rllib/offline/dataset_reader.py | 2 +- rllib/offline/estimators/direct_method.py | 1 + rllib/offline/estimators/doubly_robust.py | 3 + .../offline/estimators/importance_sampling.py | 1 + .../weighted_importance_sampling.py | 1 + rllib/offline/feature_importance.py | 10 +- 133 files changed, 1855 insertions(+), 2225 deletions(-) diff --git a/doc/source/data/api/input_output.rst b/doc/source/data/api/input_output.rst index 2ebc39c22506..019ecd986939 100644 --- a/doc/source/data/api/input_output.rst +++ b/doc/source/data/api/input_output.rst @@ -12,7 +12,6 @@ Synthetic Data :toctree: doc/ range - range_table range_tensor Python Objects diff --git a/doc/source/data/doc_code/consuming_datastreams.py b/doc/source/data/doc_code/consuming_datastreams.py index 731fe4753b7b..860de83d53dd 100644 --- a/doc/source/data/doc_code/consuming_datastreams.py +++ b/doc/source/data/doc_code/consuming_datastreams.py @@ -40,7 +40,7 @@ # Consume all rows in the Datastream. for row in ds.iter_rows(): - assert isinstance(row, int) + assert isinstance(row, dict) num_rows += 1 print(num_rows) @@ -58,7 +58,7 @@ # Consume all batches in the Datastream. for batch in ds.iter_batches(batch_size=2): - assert isinstance(batch, list) + assert isinstance(batch, dict) num_batches += 1 print(num_batches) @@ -69,7 +69,7 @@ for batch in ds.iter_batches(batch_size=2, batch_format="pandas"): assert isinstance(batch, pd.DataFrame) # Simple integer Datastream is converted to a single-column Pandas DataFrame. - cum_sum += batch["value"] + cum_sum += batch["id"] print(cum_sum) # -> 49995000 @@ -81,11 +81,11 @@ import ray @ray.remote -def consume(data: ray.data.Datastream[int]) -> int: +def consume(data: ray.data.Datastream) -> int: num_batches = 0 # Consume data in 2-record batches. for batch in data.iter_batches(batch_size=2): - assert len(batch) == 2 + assert len(batch["id"]) == 2 num_batches += 1 return num_batches @@ -106,7 +106,7 @@ def __init__(self, rank: int): def train(self, shard: ray.data.DataIterator) -> int: total = 0 for batch in shard.iter_torch_batches(batch_size=256): - total += len(batch) + total += len(batch["id"]) return total workers = [Worker.remote(i) for i in range(4)] diff --git a/doc/source/data/doc_code/creating_datastreams.py b/doc/source/data/doc_code/creating_datastreams.py index 3b74578ed7dc..b43e843027a5 100644 --- a/doc/source/data/doc_code/creating_datastreams.py +++ b/doc/source/data/doc_code/creating_datastreams.py @@ -23,8 +23,8 @@ # fmt: off # __gen_synth_tabular_range_begin__ # Create a Datastream of Arrow records. -ds = ray.data.range_table(10000) -# -> Datastream(num_blocks=200, num_rows=10000, schema={value: int64}) +ds = ray.data.range(10000) +# -> Datastream(num_blocks=200, num_rows=10000, schema={id: int64}) ds.take(5) # -> [{'value': 0}, {'value': 1}, {'value': 2}, {'value': 3}, {'value': 4}] @@ -415,7 +415,7 @@ ds = ray.data.read_binary_files("example://mnist_subset_partitioned/0/1.png") # -> Datastream(num_blocks=1, num_rows=1, schema=) -ds = ds.map(lambda bytes_: np.asarray(PIL.Image.open(BytesIO(bytes_)).convert("L"))) +ds = ds.map(lambda bytes_: {"images": np.asarray(PIL.Image.open(BytesIO(bytes_["bytes"])).convert("L"))}) # -> Datastream( # num_blocks=1, # num_rows=1, diff --git a/doc/source/data/doc_code/quick_start.py b/doc/source/data/doc_code/quick_start.py index 33a457d6152f..1d17cc5b0183 100644 --- a/doc/source/data/doc_code/quick_start.py +++ b/doc/source/data/doc_code/quick_start.py @@ -75,7 +75,7 @@ def transform_batch(df: pandas.DataFrame) -> pandas.DataFrame: return df[(df["sepal.length"] < 5.5) & (df["petal.length"] > 3.5)] -transformed_ds = ds.map_batches(transform_batch) +transformed_ds = ds.map_batches(transform_batch, batch_format="pandas") # Datastream(num_blocks=10, num_rows=3, # schema={sepal.length: float64, sepal.width: float64, # petal.length: float64, petal.width: float64, variety: object}) diff --git a/doc/source/data/doc_code/saving_datastreams.py b/doc/source/data/doc_code/saving_datastreams.py index 80a5b30d4553..30740923f1d6 100644 --- a/doc/source/data/doc_code/saving_datastreams.py +++ b/doc/source/data/doc_code/saving_datastreams.py @@ -84,11 +84,11 @@ # -> {'value': array(1)} # Write out just one file. -ds.repartition(1).write_numpy("/tmp/one_numpy") +ds.repartition(1).write_numpy("/tmp/one_numpy", column="data") # -> /tmp/one_numpy/78c91652e2364a7481cf171bed6d96e4_000000.npy # Write out multiple files. -ds.repartition(3).write_numpy("/tmp/multi_numpy") +ds.repartition(3).write_numpy("/tmp/multi_numpy", column="data") # -> /tmp/multi_numpy/b837e5b5a18448bfa3f8388f5d99d033_000000.npy # -> /tmp/multi_numpy/b837e5b5a18448bfa3f8388f5d99d033_000001.npy # -> /tmp/multi_numpy/b837e5b5a18448bfa3f8388f5d99d033_000002.npy diff --git a/doc/source/data/doc_code/tensor.py b/doc/source/data/doc_code/tensor.py index 6d1a1d2606fc..80e7cfcc75b0 100644 --- a/doc/source/data/doc_code/tensor.py +++ b/doc/source/data/doc_code/tensor.py @@ -9,7 +9,7 @@ # Create a Datastream of tensors. ds = ray.data.range_tensor(10000, shape=(64, 64)) # -> Datastream(num_blocks=200, num_rows=10000, -# schema={__value__: numpy.ndarray(shape=(64, 64), dtype=int64)}) +# schema={data: numpy.ndarray(shape=(64, 64), dtype=int64)}) ds.take(2) # -> [array([[0, 0, 0, ..., 0, 0, 0], @@ -35,7 +35,7 @@ import numpy as np # Start with a tabular base datastream. -ds = ray.data.range_table(1000) +ds = ray.data.range(1000) # Create a single TensorArray column. def single_col_udf(batch: pd.DataFrame) -> pd.DataFrame: @@ -43,18 +43,18 @@ def single_col_udf(batch: pd.DataFrame) -> pd.DataFrame: # Lists of ndarrays are automatically cast to TensorArray. arr = [np.zeros((128, 128, 3)) for _ in range(bs)] - return pd.DataFrame({"__value__": arr}) + return pd.DataFrame({"data": arr}) ## Alternatively, manually construct a TensorArray from a single ndarray. # from ray.data.extensions.tensor_extension import TensorArray # arr = TensorArray(np.zeros((bs, 128, 128, 3), dtype=np.int64)) - # return pd.DataFrame({"__value__": arr}) + # return pd.DataFrame({"data": arr}) -ds.map_batches(single_col_udf) +ds.map_batches(single_col_udf, batch_format="pandas") ds.materialize() # -> Datastream(num_blocks=17, num_rows=1000, -# schema={__value__: numpy.ndarray(shape=(128, 128, 3), dtype=int64)}) +# schema={data: numpy.ndarray(shape=(128, 128, 3), dtype=int64)}) # __create_pandas_end__ # __create_pandas_2_begin__ @@ -73,7 +73,7 @@ def multi_col_udf(batch: pd.DataFrame) -> pd.DataFrame: # return pd.DataFrame({"image": image, "embed": embed}) -ds.map_batches(multi_col_udf) +ds.map_batches(multi_col_udf, batch_format="pandas") ds.materialize() # -> Datastream(num_blocks=17, num_rows=1000, # schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=int64), @@ -86,12 +86,12 @@ def multi_col_udf(batch: pd.DataFrame) -> pd.DataFrame: # From in-memory numpy data. ray.data.from_numpy(np.zeros((1000, 128, 128, 3), dtype=np.int64)) # -> Datastream(num_blocks=1, num_rows=1000, -# schema={__value__: numpy.ndarray(shape=(128, 128, 3), dtype=int64)}) +# schema={data: numpy.ndarray(shape=(128, 128, 3), dtype=int64)}) # From saved numpy files. ray.data.read_numpy("example://mnist_subset.npy") # -> Datastream(num_blocks=1, num_rows=3, -# schema={__value__: numpy.ndarray(shape=(28, 28), dtype=uint8)}) +# schema={data: numpy.ndarray(shape=(28, 28), dtype=uint8)}) # __create_numpy_end__ # __create_parquet_1_begin__ @@ -198,7 +198,7 @@ def cast_udf(block: pa.Table) -> pa.Table: # __create_images_begin__ ds = ray.data.read_images("example://image-datasets/simple") # -> Datastream(num_blocks=3, num_rows=3, -# schema={__value__: numpy.ndarray(shape=(32, 32, 3), dtype=uint8)}) +# schema={data: numpy.ndarray(shape=(32, 32, 3), dtype=uint8)}) ds.take(1) # -> [array([[[ 88, 70, 68], @@ -213,14 +213,16 @@ def cast_udf(block: pa.Table) -> pa.Table: # __consume_native_begin__ import ray +from typing import Dict # Read a single-column example datastream. ds = ray.data.read_numpy("example://mnist_subset.npy") # -> Datastream(num_blocks=1, num_rows=3, -# schema={__value__: numpy.ndarray(shape=(28, 28), dtype=uint8)}) +# schema={data: numpy.ndarray(shape=(28, 28), dtype=uint8)}) -def add_one(batch: np.ndarray) -> np.ndarray: - return batch + 1 +def add_one(batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + batch["data"] += 1 + return batch # This processes batches in numpy.ndarray format. ds = ds.map_batches(add_one) @@ -256,7 +258,7 @@ def add_one(batch: pd.DataFrame) -> pd.DataFrame: return batch # This processes batches in pd.DataFrame format. -ds = ds.map_batches(add_one) +ds = ds.map_batches(add_one, batch_format="pandas") # This returns pandas batches with List[np.ndarray] columns. next(ds.iter_batches()) @@ -272,10 +274,10 @@ def add_one(batch: pd.DataFrame) -> pd.DataFrame: # Read a single-column example datastream. ds = ray.data.read_numpy("example://mnist_subset.npy") # -> Datastream(num_blocks=1, num_rows=3, -# schema={__value__: numpy.ndarray(shape=(28, 28), dtype=uint8)}) +# schema={data: numpy.ndarray(shape=(28, 28), dtype=uint8)}) def add_one(batch: pd.DataFrame) -> pd.DataFrame: - batch["__value__"] += 1 + batch["data"] += 1 return batch # This processes batches in pd.DataFrame format. @@ -283,7 +285,7 @@ def add_one(batch: pd.DataFrame) -> pd.DataFrame: # This returns pandas batches with List[np.ndarray] columns. next(ds.iter_batches(batch_format="pandas")) -# -> __value__ +# -> data # 0 [[ 1, 1, 1, 1, 1, 1, 1, 1, 1,... # 1 [[ 1, 1, 1, 1, 1, 1, 1, 1, 1,... # 2 [[ 1, 1, 1, 1, 1, 1, 1, 1, 1,... @@ -322,20 +324,25 @@ def add_one(batch: pd.DataFrame) -> pd.DataFrame: # Read a single-column example datastream. ds = ray.data.read_numpy("example://mnist_subset.npy") # -> Datastream(num_blocks=1, num_rows=3, -# schema={__value__: numpy.ndarray(shape=(28, 28), dtype=uint8)}) +# schema={data: numpy.ndarray(shape=(28, 28), dtype=uint8)}) def add_one(batch: pyarrow.Table) -> pyarrow.Table: + + def to_numpy(buf): + if not isinstance(buf, np.ndarray): + buf = buf.as_py() + return buf + np_col = np.array( [ - np.ndarray((28, 28), buffer=buf, dtype=np.uint8) - for buf in batch.column("__value__") + to_numpy(buf) for buf in batch.column("data") ] ) np_col += 1 return batch.set_column( - batch._ensure_integer_index("__value__"), - "__value__", + batch._ensure_integer_index("data"), + "data", ArrowTensorArray.from_numpy(np_col), ) @@ -345,9 +352,9 @@ def add_one(batch: pyarrow.Table) -> pyarrow.Table: # This returns batches in pyarrow.Table format. next(ds.iter_batches(batch_format="pyarrow")) # pyarrow.Table -# __value__: extension> +# data: extension> # ---- -# __value__: [[[1,1,1,1,1,1,1,1,1,1,...],...,[1,1,1,1,1,1,1,1,1,1,...]]] +# data: [[[1,1,1,1,1,1,1,1,1,1,...],...,[1,1,1,1,1,1,1,1,1,1,...]]] # __consume_pyarrow_end__ # __consume_pyarrow_2_begin__ @@ -357,10 +364,15 @@ def add_one(batch: pyarrow.Table) -> pyarrow.Table: # schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), label: object}) def add_one(batch: pyarrow.Table) -> pyarrow.Table: + + def to_numpy(buf): + if not isinstance(buf, np.ndarray): + buf = buf.as_py() + return buf + np_col = np.array( [ - np.ndarray((128, 128, 3), buffer=buf, dtype=np.uint8) - for buf in batch.column("image") + to_numpy(buf) for buf in batch.column("image") ] ) np_col += 1 @@ -390,10 +402,10 @@ def add_one(batch: pyarrow.Table) -> pyarrow.Table: # Read a single-column example datastream. ds = ray.data.read_numpy("example://mnist_subset.npy") # -> Datastream(num_blocks=1, num_rows=3, -# schema={__value__: numpy.ndarray(shape=(28, 28), dtype=uint8)}) +# schema={data: numpy.ndarray(shape=(28, 28), dtype=uint8)}) -def add_one(batch: np.ndarray) -> np.ndarray: - batch += 1 +def add_one(batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + batch["data"] += 1 return batch # This processes batches in np.ndarray format. @@ -475,15 +487,15 @@ def add_one(batch: Dict[str, Any]) -> Dict[str, Any]: # Read a single-column example datastream. ds = ray.data.read_numpy("example://mnist_subset.npy") # -> Datastream(num_blocks=1, num_rows=3, -# schema={__value__: numpy.ndarray(shape=(28, 28), dtype=uint8)}) +# schema={data: numpy.ndarray(shape=(28, 28), dtype=uint8)}) # You can write the datastream to Parquet. -ds.write_numpy("/tmp/some_path") +ds.write_numpy("/tmp/some_path", column="data") # And you can read it back. read_ds = ray.data.read_numpy("/tmp/some_path") print(read_ds.schema()) -# -> __value__: extension> +# -> data: extension> # __write_2_end__ # fmt: off diff --git a/doc/source/data/doc_code/transforming_datastreams.py b/doc/source/data/doc_code/transforming_datastreams.py index f4345f9dc87e..45cb1e7a8a79 100644 --- a/doc/source/data/doc_code/transforming_datastreams.py +++ b/doc/source/data/doc_code/transforming_datastreams.py @@ -32,7 +32,7 @@ def transform_batch(df: pandas.DataFrame) -> pandas.DataFrame: return df[(df["sepal.length"] < 5.5) & (df["petal.length"] > 3.5)] # Map processing the datastream. -ds.map_batches(transform_batch).show() +ds.map_batches(transform_batch, batch_format="pandas").show() # -> {'sepal.length': 5.2, 'sepal.width': 2.7, # 'petal.length': 3.9, 'petal.width': 1.4, 'variety': 'Versicolor'} # -> {'sepal.length': 5.4, 'sepal.width': 3.0, @@ -80,8 +80,6 @@ def transform_batch(df: pandas.DataFrame) -> pandas.DataFrame: # Load datastream. ds = ray.data.read_csv("example://iris.csv") -print(ds.default_batch_format()) -# # UDF as a function on Pandas DataFrame batches. def pandas_transform(df_batch: pd.DataFrame) -> pd.DataFrame: @@ -95,7 +93,7 @@ def pandas_transform(df_batch: pd.DataFrame) -> pd.DataFrame: df_batch = df_batch.drop(columns=["sepal.length"]) return df_batch -ds.map_batches(pandas_transform).show(2) +ds.map_batches(pandas_transform, batch_format="pandas").show(2) # -> {'sepal.width': 3.2, 'petal.length': 4.7, 'petal.width': 1.4, # 'variety': 'Versicolor', 'normalized.sepal.length': 1.0} # -> {'sepal.width': 3.2, 'petal.length': 4.5, 'petal.width': 1.5, @@ -107,19 +105,19 @@ def pandas_transform(df_batch: pd.DataFrame) -> pd.DataFrame: # __writing_default_udfs_tensor_begin__ import ray import numpy as np +from typing import Dict # Load datastream. ds = ray.data.range_tensor(1000, shape=(2, 2)) -print(ds.default_batch_format()) -# # UDF as a function on NumPy ndarray batches. -def tensor_transform(arr: np.ndarray) -> np.ndarray: +def tensor_transform(arr: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: # Notice here that the ndarray is of shape (batch_size, 2, 2) # Multiply each element in the ndarray by a factor of 2 - return arr * 2 + arr["data"] *= 2 + return arr -ds.map_batches(tensor_transform).show(2) +ds.map_batches(tensor_transform, batch_format="numpy").show(2) # [array([[0, 0], # [0, 0]]), # array([[2, 2], @@ -131,21 +129,19 @@ def tensor_transform(arr: np.ndarray) -> np.ndarray: # fmt: off # __writing_default_udfs_list_begin__ import ray +from typing import Any # Load datastream. ds = ray.data.range(1000) -print(ds.default_batch_format()) -# -# UDF as a function on Python list batches. -def list_transform(list) -> list: +def list_transform(batch: Dict[str, Any]) -> Dict[str, Any]: # Notice here that the list is of length batch_size # Multiply each element in the list by a factor of 2 - return [x * 2 for x in list] + return {"id": [x * 2 for x in batch["id"]]} ds.map_batches(list_transform).show(2) -# 0 -# 2 +# {"id": 0} +# {"id": 2} # __writing_default_udfs_list_end__ # fmt: on @@ -168,7 +164,7 @@ def pandas_transform(df: pd.DataFrame) -> pd.DataFrame: df = df.drop(columns=["sepal.length"]) return df -ds.map_batches(pandas_transform).show(2) +ds.map_batches(pandas_transform, batch_format="pandas").show(2) # -> {'sepal.width': 3.2, 'petal.length': 4.7, 'petal.width': 1.4, # 'variety': 'Versicolor', 'normalized.sepal.length': 1.0} # -> {'sepal.width': 3.2, 'petal.length': 4.5, 'petal.width': 1.5, @@ -211,7 +207,8 @@ def pyarrow_transform(batch: pa.Table) -> pa.Table: ds = ray.data.read_numpy("example://mnist_subset.npy") # UDF as a function on NumPy ndarray batches. -def normalize(arr: np.ndarray) -> np.ndarray: +def normalize(arr: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + arr = arr["data"] # Normalizes each image to [0, 1] range. mins = arr.min((1, 2))[:, np.newaxis, np.newaxis] maxes = arr.max((1, 2))[:, np.newaxis, np.newaxis] @@ -219,7 +216,7 @@ def normalize(arr: np.ndarray) -> np.ndarray: idx = np.where(range_ == 0) mins[idx] = 0 range_[idx] = 1 - return (arr - mins) / range_ + return {"data": (arr - mins) / range_} ds = ds.map_batches(normalize, batch_format="numpy") # -> MapBatches(normalize) @@ -249,7 +246,7 @@ def __call__(self, df: pd.DataFrame) -> pd.DataFrame: df["output"] = self.model(df) return df -ds.map_batches(ModelUDF, compute="actors").show(2) +ds.map_batches(ModelUDF, batch_format="pandas", compute=ray.data.ActorPoolStrategy(size=2)).show(2) # -> {'sepal.length': 7.0, 'sepal.width': 3.2, 'petal.length': 4.7, 'petal.width': 1.4, # 'variety': 'Versicolor', 'output': True} # -> {'sepal.length': 6.4, 'sepal.width': 3.2, 'petal.length': 4.5, 'petal.width': 1.5, @@ -270,7 +267,7 @@ def repeat_dataframe(df: pd.DataFrame) -> Iterator[pd.DataFrame]: for _ in range(5): yield pd.concat([df]*20) -ds.map_batches(repeat_dataframe).show(2) +ds.map_batches(repeat_dataframe, batch_format="pandas", ).show(2) # -> {'sepal.length': 5.1, 'sepal.width': 3.5, 'petal.length': 1.4, 'petal.width': 0.2, 'variety': 'Setosa'} # -> {'sepal.length': 4.9, 'sepal.width': 3.0, 'petal.length': 1.4, 'petal.width': 0.2, 'variety': 'Setosa'} # __writing_generator_udfs_end__ @@ -284,15 +281,15 @@ def repeat_dataframe(df: pd.DataFrame) -> Iterator[pd.DataFrame]: # Load datastream. ds = ray.data.from_items(["test", "string", "teststring"]) -# -> Datastream(num_blocks=1, num_rows=3, schema=) +# -> Datastream(num_blocks=1, num_rows=3, schema={item: string}) -# Convert to Pandas. -def convert_to_pandas(text: List[str]) -> pd.DataFrame: - return pd.DataFrame({"text": text}, dtype="string") +# Convert column name. +def convert_pandas(batch: pd.DataFrame) -> pd.DataFrame: + return pd.DataFrame({"text": batch["item"]}, dtype="string") -ds = ds.map_batches(convert_to_pandas) -# -> MapBatches(convert_to_pandas) -# +- Datastream(num_blocks=3, num_rows=3, schema=) +ds = ds.map_batches(convert_pandas, batch_format="pandas") +# -> MapBatches(convert_pandas) +# +- Datastream(num_blocks=3, num_rows=3, schema={item: tsring}) ds.show(2) # -> {'text': 'test'} @@ -311,15 +308,15 @@ def convert_to_pandas(text: List[str]) -> pd.DataFrame: # Load datastream. ds = ray.data.from_items(["test", "string", "teststring"]) -# -> Datastream(num_blocks=1, num_rows=3, schema=) +# -> Datastream(num_blocks=1, num_rows=3, schema={item: string}) # Convert to Arrow. -def convert_to_arrow(text: List[str]) -> pa.Table: - return pa.table({"text": text}) +def convert_to_arrow(batch: Dict[str, np.ndarray]) -> pa.Table: + return pa.table({"text": batch["item"]}) ds = ds.map_batches(convert_to_arrow) # -> MapBatches(convert_to_arrow) -# +- Datastream(num_blocks=1, num_rows=3, schema=) +# +- Datastream(num_blocks=1, num_rows=3, schema={text: string}) ds.show(2) # -> {'text': 'test'} @@ -352,10 +349,10 @@ def convert_to_arrow(text: List[str]) -> pa.Table: # ) # Convert to NumPy. -def convert_to_numpy(df: pd.DataFrame) -> np.ndarray: - return df[["sepal.length", "sepal.width"]].to_numpy() +def convert_to_numpy(df: pd.DataFrame) -> Dict[str, np.ndarray]: + return {"data": df[["sepal.length", "sepal.width"]].to_numpy()} -ds = ds.map_batches(convert_to_numpy) +ds = ds.map_batches(convert_to_numpy, batch_format="pandas") # -> MapBatches(convert_to_numpy) # +- Datastream( # num_blocks=1, @@ -370,8 +367,8 @@ def convert_to_numpy(df: pd.DataFrame) -> np.ndarray: # ) ds.show(2) -# -> [5.1 3.5] -# [4.9 3. ] +# -> {'data': [5.1 3.5]} +# {'data': [4.9 3. ]} # __writing_numpy_out_udfs_end__ # fmt: on @@ -404,7 +401,7 @@ def convert_to_numpy(df: pd.DataFrame) -> Dict[str, np.ndarray]: "petal_width": df["petal.width"].to_numpy(), } -ds = ds.map_batches(convert_to_numpy) +ds = ds.map_batches(convert_to_numpy, batch_format="pandas") # -> MapBatches(convert_to_numpy) # +- Datastream( # num_blocks=1, @@ -445,10 +442,10 @@ def convert_to_numpy(df: pd.DataFrame) -> Dict[str, np.ndarray]: # ) # Convert to list of dicts. -def convert_to_list(df: pd.DataFrame) -> List[dict]: - return df.to_dict("records") +def convert_to_list(df: pd.DataFrame) -> pd.DataFrame: + return df -ds = ds.map_batches(convert_to_list) +ds = ds.map_batches(convert_to_list, batch_format="pandas") # -> MapBatches(convert_to_list) # +- Datastream( # num_blocks=1, @@ -497,7 +494,6 @@ def row_to_dict(row: int) -> Dict[str, int]: # fmt: off # __writing_table_row_out_row_udfs_begin__ import ray -from ray.data.row import TableRow import pandas as pd from typing import Dict @@ -516,8 +512,7 @@ def row_to_dict(row: int) -> Dict[str, int]: # ) # Treat row as dict. -def map_row(row: TableRow) -> TableRow: - row = row.as_pydict() +def map_row(row: Dict[str, Any]) -> Dict[str, Any]: row["sepal.area"] = row["sepal.length"] * row["sepal.width"] return row @@ -551,28 +546,25 @@ def map_row(row: TableRow) -> TableRow: # Load datastream. ds = ray.data.range(10) -# -> Datastream(num_blocks=10, num_rows=10, schema=) +# -> Datastream(num_blocks=10, num_rows=10, schema={id: int64}) # Convert row to NumPy ndarray. -def row_to_numpy(row: int) -> np.ndarray: - return np.full(shape=(2, 2), fill_value=row) +def row_to_numpy(row: Dict[str, Any]) -> Dict[str, np.ndarray]: + return {"data": np.full(shape=(2, 2), fill_value=row["id"])} ds = ds.map(row_to_numpy) # -> Map -# +- Datastream(num_blocks=10, num_rows=10, schema=) +# +- Datastream(num_blocks=10, num_rows=10, schema={data: np.ndarray(shape=(2, 2))}) ds.show(2) -# -> [[0 0] -# [0 0]] -# [[1 1] -# [1 1]] +# -> {'data': [[0 0], [0 0]]]} +# {'data': [[1 1], [1 1]]]} # __writing_numpy_out_row_udfs_end__ # fmt: on # fmt: off # __writing_simple_out_row_udfs_begin__ import ray -from ray.data.row import TableRow from typing import List # Load datastream. @@ -590,8 +582,9 @@ def row_to_numpy(row: int) -> np.ndarray: # ) # Convert row to simple (opaque) row. -def map_row(row: TableRow) -> tuple: - return tuple(row.items()) +def map_row(row: Dict[str, Any]) -> Dict[str, Any]: + row["petal.random_property"] = random.random() + return row ds = ds.map(map_row) # -> Map @@ -606,12 +599,6 @@ def map_row(row: TableRow) -> tuple: # variety: string, # }, # ) - -ds.show(2) -# -> (('sepal.length', 5.1), ('sepal.width', 3.5), ('petal.length', 1.4), -# ('petal.width', 0.2), ('variety', 'Setosa')) -# -> (('sepal.length', 4.9), ('sepal.width', 3.0), ('petal.length', 1.4), -# ('petal.width', 0.2), ('variety', 'Setosa')) # __writing_simple_out_row_udfs_end__ # fmt: on @@ -634,7 +621,7 @@ def pandas_transform(df: pd.DataFrame) -> pd.DataFrame: return df # Have each batch that pandas_transform receives contain 10 rows. -ds = ds.map_batches(pandas_transform, batch_size=10) +ds = ds.map_batches(pandas_transform, batch_format="pandas", batch_size=10) # -> MapBatches(pandas_transform) # +- Datastream( # num_blocks=1, diff --git a/doc/source/data/examples/random-access.rst b/doc/source/data/examples/random-access.rst index bb0d5536002d..da96549d0bda 100644 --- a/doc/source/data/examples/random-access.rst +++ b/doc/source/data/examples/random-access.rst @@ -9,17 +9,17 @@ Any Arrow-format datastream can be enabled for random access by calling ``ds.to_ .. code-block:: python # Generate a dummy embedding table as an example. - ds = ray.data.range_table(100) - ds = ds.add_column("embedding", lambda b: b["value"] ** 2) - # -> schema={value: int64, embedding: int64} + ds = ray.data.range(100) + ds = ds.add_column("embedding", lambda b: b["id"] ** 2) + # -> schema={id: int64, embedding: int64} # Enable random access on the datastream. This launches a number of actors # spread across the cluster that serve random access queries to the data. - rmap = ds.to_random_access_dataset(key="value", num_workers=4) + rmap = ds.to_random_access_dataset(key="id", num_workers=4) # Example of a point query by key. ray.get(rmap.get_async(2)) - # -> {"value": 2, "embedding": 4} + # -> {"id": 2, "embedding": 4} # Queries to missing keys return None. ray.get(rmap.get_async(-1)) @@ -27,7 +27,7 @@ Any Arrow-format datastream can be enabled for random access by calling ``ds.to_ # Example of a multiget query. rmap.multiget([4, 2]) - # -> [{"value": 4, "embedding": 16}, {"value": 2, "embedding": 4}] + # -> [{"id": 4, "embedding": 16}, {"id": 2, "embedding": 4}] Similar to Datastream, a RandomAccessDataset can be passed to and used from any Ray actor or task. diff --git a/doc/source/data/getting-started.rst b/doc/source/data/getting-started.rst index edba19771f7e..7c3e06c4d606 100644 --- a/doc/source/data/getting-started.rst +++ b/doc/source/data/getting-started.rst @@ -63,7 +63,7 @@ transform datastreams. Ray executes transformations in parallel for performance. def transform_batch(df: pd.DataFrame) -> pd.DataFrame: return df[(df["sepal length (cm)"] < 5.5) & (df["petal length (cm)"] > 3.5)] - transformed_ds = datastream.map_batches(transform_batch) + transformed_ds = datastream.map_batches(transform_batch, batch_format="pandas") print(transformed_ds) .. testoutput:: @@ -103,12 +103,11 @@ Pass datastreams to Ray tasks or actors, and access records with methods like .. testoutput:: :options: +NORMALIZE_WHITESPACE - sepal length (cm) ... target - 0 5.2 ... 1 - 1 5.4 ... 1 - 2 4.9 ... 2 - - [3 rows x 5 columns] + {'sepal length (cm)': array([5.2, 5.4, 4.9]), + 'sepal width (cm)': array([2.7, 3. , 2.5]), + 'petal length (cm)': array([3.9, 4.5, 4.5]), + 'petal width (cm)': array([1.4, 1.5, 1.7]), + 'target': array([1, 1, 2])} .. tab-item:: Tasks diff --git a/doc/source/data/glossary.rst b/doc/source/data/glossary.rst index 6547032b3490..2ef928403554 100644 --- a/doc/source/data/glossary.rst +++ b/doc/source/data/glossary.rst @@ -20,16 +20,16 @@ Ray Data Glossary >>> # Datastream is executed by streaming executor by default, which doesn't >>> # preserve the order, so we explicitly set it here. >>> ray.data.context.DataContext.get_current().execution_options.preserve_order = True - >>> datastream = ray.data.range_table(10) + >>> datastream = ray.data.range(10) >>> next(iter(datastream.iter_batches(batch_format="numpy", batch_size=5))) - {'value': array([0, 1, 2, 3, 4])} + {'id': array([0, 1, 2, 3, 4])} >>> next(iter(datastream.iter_batches(batch_format="pandas", batch_size=5))) - value - 0 0 - 1 1 - 2 2 - 3 3 - 4 4 + id + 0 0 + 1 1 + 2 2 + 3 3 + 4 4 To learn more about batch formats, read :ref:`UDF Input Batch Formats `. @@ -106,7 +106,7 @@ Ray Data Glossary >>> import ray >>> ray.data.from_items(["spam", "ham", "eggs"]) - MaterializedDatastream(num_blocks=3, num_rows=3, schema=) + MaterializedDatastream(num_blocks=3, num_rows=3, schema={item: string}) Tensor Datastream A Datastream that represents a collection of ndarrays. @@ -121,7 +121,7 @@ Ray Data Glossary MaterializedDatastream( num_blocks=1, num_rows=100, - schema={__value__: numpy.ndarray(shape=(32, 32, 3), dtype=double)} + schema={data: numpy.ndarray(shape=(32, 32, 3), dtype=double)} ) Tabular Datastream diff --git a/doc/source/data/transforming-datastreams.rst b/doc/source/data/transforming-datastreams.rst index c7489ad7d62e..750becbf808d 100644 --- a/doc/source/data/transforming-datastreams.rst +++ b/doc/source/data/transforming-datastreams.rst @@ -444,7 +444,7 @@ Compute Strategy Datastreams transformations are executed by either :ref:`Ray tasks ` or :ref:`Ray actors ` across a Ray cluster. By default, Ray tasks are -used (with ``compute="tasks"``). For transformations that require expensive setup, +used. For transformations that require expensive setup, it's preferrable to use Ray actors, which are stateful and allow setup to be reused for efficiency. For a fixed-size actor pool, specify ``compute=ActorPoolStrategy(size=n)``. For an autoscaling actor pool, use ``compute=ray.data.ActorPoolStrategy(min_size=m, max_size=n)``. diff --git a/doc/source/ray-air/doc_code/computer_vision.py b/doc/source/ray-air/doc_code/computer_vision.py index d409103154c7..e2c2905786f3 100644 --- a/doc/source/ray-air/doc_code/computer_vision.py +++ b/doc/source/ray-air/doc_code/computer_vision.py @@ -73,9 +73,8 @@ def read_numpy(): # __read_numpy2_start__ dataset = images.zip(labels) dataset = dataset.map_batches( - lambda batch: batch.rename( - columns={"__value__": "image", "__value___1": "label"} - ) + lambda batch: batch.rename(columns={"data": "image", "data_1": "label"}), + batch_format="pandas", ) # __read_numpy2_stop__ return dataset diff --git a/doc/source/ray-air/doc_code/preprocessors.py b/doc/source/ray-air/doc_code/preprocessors.py index 3cdc2e4b7bc7..acfad704862f 100644 --- a/doc/source/ray-air/doc_code/preprocessors.py +++ b/doc/source/ray-air/doc_code/preprocessors.py @@ -8,36 +8,36 @@ from ray.data.preprocessors.scaler import StandardScaler # Generate two simple datasets. -dataset = ray.data.range_table(8) +dataset = ray.data.range(8) dataset1, dataset2 = dataset.split(2) print(dataset1.take()) -# [{'value': 0}, {'value': 1}, {'value': 2}, {'value': 3}] +# [{'id': 0}, {'id': 1}, {'id': 2}, {'id': 3}] print(dataset2.take()) -# [{'value': 4}, {'value': 5}, {'value': 6}, {'value': 7}] +# [{'id': 4}, {'id': 5}, {'id': 6}, {'id': 7}] # __preprocessor_setup_end__ # __preprocessor_fit_transform_start__ # Fit the preprocessor on dataset1, and transform both dataset1 and dataset2. -preprocessor = MinMaxScaler(["value"]) +preprocessor = MinMaxScaler(["id"]) dataset1_transformed = preprocessor.fit_transform(dataset1) print(dataset1_transformed.take()) -# [{'value': 0.0}, {'value': 0.3333333333333333}, {'value': 0.6666666666666666}, {'value': 1.0}] +# [{'id': 0.0}, {'id': 0.3333333333333333}, {'id': 0.6666666666666666}, {'id': 1.0}] dataset2_transformed = preprocessor.transform(dataset2) print(dataset2_transformed.take()) -# [{'value': 1.3333333333333333}, {'value': 1.6666666666666667}, {'value': 2.0}, {'value': 2.3333333333333335}] +# [{'id': 1.3333333333333333}, {'id': 1.6666666666666667}, {'id': 2.0}, {'id': 2.3333333333333335}] # __preprocessor_fit_transform_end__ # __preprocessor_transform_batch_start__ -batch = pd.DataFrame({"value": list(range(8, 12))}) +batch = pd.DataFrame({"id": list(range(8, 12))}) batch_transformed = preprocessor.transform_batch(batch) print(batch_transformed) -# value +# id # 0 2.666667 # 1 3.000000 # 2 3.333333 @@ -110,16 +110,16 @@ # Generate one simple dataset. dataset = ray.data.from_items( - [{"value": 0}, {"value": 1}, {"value": 2}, {"value": 3}, {"value": None}] + [{"id": 0}, {"id": 1}, {"id": 2}, {"id": 3}, {"id": None}] ) print(dataset.take()) -# [{'value': 0}, {'value': 1}, {'value': 2}, {'value': 3}, {'value': None}] +# [{'id': 0}, {'id': 1}, {'id': 2}, {'id': 3}, {'id': None}] -preprocessor = Chain(SimpleImputer(["value"]), MinMaxScaler(["value"])) +preprocessor = Chain(SimpleImputer(["id"]), MinMaxScaler(["id"])) dataset_transformed = preprocessor.fit_transform(dataset) print(dataset_transformed.take()) -# [{'value': 0.0}, {'value': 0.3333333333333333}, {'value': 0.6666666666666666}, {'value': 1.0}, {'value': 0.5}] +# [{'id': 0.0}, {'id': 0.3333333333333333}, {'id': 0.6666666666666666}, {'id': 1.0}, {'id': 0.5}] # __chain_end__ @@ -128,15 +128,15 @@ from ray.data.preprocessors import BatchMapper # Generate a simple dataset. -dataset = ray.data.range_table(4) +dataset = ray.data.range(4) print(dataset.take()) -# [{'value': 0}, {'value': 1}, {'value': 2}, {'value': 3}] +# [{'id': 0}, {'id': 1}, {'id': 2}, {'id': 3}] -# Create a stateless preprocess that multiplies values by 2. +# Create a stateless preprocess that multiplies ids by 2. preprocessor = BatchMapper(lambda df: df * 2, batch_size=2, batch_format="pandas") dataset_transformed = preprocessor.transform(dataset) print(dataset_transformed.take()) -# [{'value': 0}, {'value': 2}, {'value': 4}, {'value': 6}] +# [{'id': 0}, {'id': 2}, {'id': 4}, {'id': 6}] # __custom_stateless_end__ @@ -151,22 +151,22 @@ class CustomPreprocessor(Preprocessor): def _fit(self, dataset: Dataset) -> Preprocessor: - self.stats_ = dataset.aggregate(Max("value")) + self.stats_ = dataset.aggregate(Max("id")) def _transform_pandas(self, df: DataFrame) -> DataFrame: - return df * self.stats_["max(value)"] + return df * self.stats_["max(id)"] # Generate a simple dataset. -dataset = ray.data.range_table(4) +dataset = ray.data.range(4) print(dataset.take()) -# [{'value': 0}, {'value': 1}, {'value': 2}, {'value': 3}] +# [{'id': 0}, {'id': 1}, {'id': 2}, {'id': 3}] -# Create a stateful preprocessor that finds the max value and scales each value by it. +# Create a stateful preprocessor that finds the max id and scales each id by it. preprocessor = CustomPreprocessor() dataset_transformed = preprocessor.fit_transform(dataset) print(dataset_transformed.take()) -# [{'value': 0}, {'value': 3}, {'value': 6}, {'value': 9}] +# [{'id': 0}, {'id': 3}, {'id': 6}, {'id': 9}] # __custom_stateful_end__ @@ -174,14 +174,14 @@ def _transform_pandas(self, df: DataFrame) -> DataFrame: from ray.data.preprocessors import SimpleImputer # Generate a simple dataset. -dataset = ray.data.from_items([{"value": 1.0}, {"value": None}, {"value": 3.0}]) +dataset = ray.data.from_items([{"id": 1.0}, {"id": None}, {"id": 3.0}]) print(dataset.take()) -# [{'value': 1.0}, {'value': None}, {'value': 3.0}] +# [{'id': 1.0}, {'id': None}, {'id': 3.0}] -imputer = SimpleImputer(columns=["value"], strategy="mean") +imputer = SimpleImputer(columns=["id"], strategy="mean") dataset_transformed = imputer.fit_transform(dataset) print(dataset_transformed.take()) -# [{'value': 1.0}, {'value': 2.0}, {'value': 3.0}] +# [{'id': 1.0}, {'id': 2.0}, {'id': 3.0}] # __simple_imputer_end__ diff --git a/doc/source/ray-air/examples/convert_existing_pytorch_code_to_ray_air.ipynb b/doc/source/ray-air/examples/convert_existing_pytorch_code_to_ray_air.ipynb index 4ead1c2fc71b..76db9ed3b508 100644 --- a/doc/source/ray-air/examples/convert_existing_pytorch_code_to_ray_air.ipynb +++ b/doc/source/ray-air/examples/convert_existing_pytorch_code_to_ray_air.ipynb @@ -1229,7 +1229,7 @@ ], "source": [ "predicted_classes = results.map_batches(\n", - " lambda batch: [classes[pred.argmax(0)] for pred in batch[\"predictions\"]], \n", + " lambda batch: {\"pred\": [classes[pred.argmax(0)] for pred in batch[\"predictions\"]]}, \n", " batch_size=32,\n", " batch_format=\"pandas\")" ] @@ -1277,7 +1277,7 @@ ], "source": [ "real_classes = [classes[y] for x, y in test_data]\n", - "for predicted, real in zip(predicted_classes.take(), real_classes):\n", + "for predicted, real in zip(predicted_classes.take_batch()[\"pred\"], real_classes):\n", " print((predicted, real))" ] }, diff --git a/doc/source/ray-air/examples/convert_existing_tf_code_to_ray_air.ipynb b/doc/source/ray-air/examples/convert_existing_tf_code_to_ray_air.ipynb index 4bc43970e9ac..e3e6870ac9be 100644 --- a/doc/source/ray-air/examples/convert_existing_tf_code_to_ray_air.ipynb +++ b/doc/source/ray-air/examples/convert_existing_tf_code_to_ray_air.ipynb @@ -803,10 +803,10 @@ ], "source": [ "predicted_classes = predict_results.map_batches(\n", - " lambda batch: [pred.argmax(0) for pred in batch[\"predictions\"]], \n", + " lambda batch: {\"pred\": [pred.argmax(0) for pred in batch[\"predictions\"]]}, \n", " batch_format=\"pandas\"\n", ")\n", - "predicted_classes_np = predicted_classes.take_all()\n", + "predicted_classes_np = predicted_classes.take_batch(float(\"inf\"))[\"pred\"]\n", "\n", "pred_accuracy = (predicted_classes_np == y_test).astype(int).sum() / len(predicted_classes_np)\n", "print(\"Prediction Accuracy =\", pred_accuracy)" diff --git a/doc/source/ray-air/examples/torch_image_example.ipynb b/doc/source/ray-air/examples/torch_image_example.ipynb index 1e53e1e857d7..d750bf14ad30 100644 --- a/doc/source/ray-air/examples/torch_image_example.ipynb +++ b/doc/source/ray-air/examples/torch_image_example.ipynb @@ -189,9 +189,9 @@ "import torch\n", "\n", "\n", - "def convert_batch_to_numpy(batch: Tuple[Image, int]) -> Dict[str, np.ndarray]:\n", - " images = np.stack([np.array(image) for image, _ in batch])\n", - " labels = np.array([label for _, label in batch])\n", + "def convert_batch_to_numpy(batch) -> Dict[str, np.ndarray]:\n", + " images = np.stack([np.array(image) for image, _ in batch[\"item\"]])\n", + " labels = np.array([label for _, label in batch[\"item\"]])\n", " return {\"image\": images, \"label\": labels}\n", "\n", "\n", @@ -614,7 +614,7 @@ " return df[[\"prediction\", \"label\"]]\n", "\n", "\n", - "predictions = outputs.map_batches(convert_logits_to_classes)\n", + "predictions = outputs.map_batches(convert_logits_to_classes, batch_format=\"pandas\")\n", "\n", "predictions.show(1)" ] @@ -665,7 +665,7 @@ " return df\n", "\n", "\n", - "scores = predictions.map_batches(calculate_prediction_scores)\n", + "scores = predictions.map_batches(calculate_prediction_scores, batch_format=\"pandas\")\n", "\n", "scores.show(1)" ] diff --git a/doc/source/ray-air/examples/torch_incremental_learning.ipynb b/doc/source/ray-air/examples/torch_incremental_learning.ipynb index 40c38c2ef7c8..21fdbf6b831b 100644 --- a/doc/source/ray-air/examples/torch_incremental_learning.ipynb +++ b/doc/source/ray-air/examples/torch_incremental_learning.ipynb @@ -289,8 +289,8 @@ " mnist_dataset = ray.data.from_torch(mnist_dataset)\n", " \n", " def convert_batch_to_numpy(batch):\n", - " images = np.array([np.array(item[0]) for item in batch])\n", - " labels = np.array([item[1] for item in batch])\n", + " images = np.array([np.array(item[0]) for item in batch[\"item\"]])\n", + " labels = np.array([item[1] for item in batch[\"item\"]])\n", "\n", " return {\"image\": images, \"label\": labels}\n", "\n", diff --git a/doc/source/ray-core/_examples/datasets_train/datasets_train.py b/doc/source/ray-core/_examples/datasets_train/datasets_train.py index 09874f87a909..7ced414ae374 100644 --- a/doc/source/ray-core/_examples/datasets_train/datasets_train.py +++ b/doc/source/ray-core/_examples/datasets_train/datasets_train.py @@ -268,7 +268,7 @@ def inference( num_gpus = 1 if use_gpu else 0 dataset.map_batches( model_cls, - compute="actors", + compute=ray.data.ActorPoolStrategy(), batch_size=batch_size, batch_format="pandas", num_gpus=num_gpus, diff --git a/python/ray/air/tests/test_dataset_config.py b/python/ray/air/tests/test_dataset_config.py index 0f1a919337d4..0723094a4f5b 100644 --- a/python/ray/air/tests/test_dataset_config.py +++ b/python/ray/air/tests/test_dataset_config.py @@ -60,7 +60,7 @@ class TestWildcard(TestBasic): def test_basic(ray_start_4_cpus): - ds = ray.data.range_table(10) + ds = ray.data.range(10) # Single worker basic case. test = TestBasic( @@ -105,7 +105,7 @@ def test_basic(ray_start_4_cpus): def test_error(ray_start_4_cpus): - ds = ray.data.range_table(10) + ds = ray.data.range(10) # Missing required dataset. with pytest.raises(ValueError): @@ -136,7 +136,7 @@ def test_error(ray_start_4_cpus): def test_use_stream_api_config(ray_start_4_cpus): - ds = ray.data.range_table(10) + ds = ray.data.range(10) # Single worker basic case. test = TestBasic( @@ -160,14 +160,14 @@ def test_use_stream_api_config(ray_start_4_cpus): def test_fit_transform_config(ray_start_4_cpus): - ds = ray.data.range_table(10) + ds = ray.data.range(10) def drop_odd_pandas(batch): - return batch[batch["value"] % 2 == 0] + return batch[batch["id"] % 2 == 0] def drop_odd_numpy(batch): - arr = batch["value"] - return arr[arr % 2 == 0] + arr = batch["id"] + return {"id": arr[arr % 2 == 0]} prep_pandas = BatchMapper(drop_odd_pandas, batch_format="pandas") prep_numpy = BatchMapper(drop_odd_numpy, batch_format="numpy") @@ -232,7 +232,7 @@ def train_loop_per_worker(data_shard, check_results_fn): for _ in range(2): result = [] for batch in data_shard.iter_batches(): - for row in batch["value"]: + for row in batch["id"]: result.append(row) results.append(result) check_results_fn(data_shard, results) @@ -255,11 +255,11 @@ def checker(shard, results): assert "Stage 1 ReadRange->BatchMapper: 1/1 blocks executed " in stats, stats def rand(x): - x["value"] = x["value"].multiply(x["value"]) + x["id"] = x["id"].multiply(x["id"]) return x prep = BatchMapper(rand, batch_format="pandas") - ds = ray.data.range_table(5, parallelism=1) + ds = ray.data.range(5, parallelism=1) test = TestStream( checker, preprocessor=prep, @@ -271,11 +271,11 @@ def rand(x): def test_stream_finite_window_nocache_prep(ray_start_4_cpus): def rand(x): - x["value"] = [random.random() for _ in range(len(x))] + x["id"] = [random.random() for _ in range(len(x))] return x prep = BatchMapper(rand, batch_format="pandas") - ds = ray.data.range_table(5, parallelism=1) + ds = ray.data.range(5, parallelism=1) # Test 50% object store memory.. def checker(shard, results): @@ -305,12 +305,12 @@ def test_stream_transform_config(ray_start_4_cpus): def check_batch(batch): assert isinstance(batch, dict) - assert isinstance(batch["value"], np.ndarray) - assert len(batch["value"]) == batch_size + assert isinstance(batch["id"], np.ndarray) + assert len(batch["id"]) == batch_size return batch prep = BatchMapper(check_batch, batch_format="numpy", batch_size=2) - ds = ray.data.range_table(6, parallelism=1) + ds = ray.data.range(6, parallelism=1) test = TestStream( lambda *args: None, @@ -327,7 +327,7 @@ def checker(shard, results): stats = shard.stats() assert "RandomizeBlockOrder->RandomShuffle" in stats, stats - ds = ray.data.range_table(5) + ds = ray.data.range(5) test = TestStream( checker, datasets={"train": ds}, @@ -341,7 +341,7 @@ def checker(shard, results): stats = shard.stats() assert "Stage 1 ReadRange->RandomShuffle" in stats, stats - ds = ray.data.range_table(5) + ds = ray.data.range(5) test = TestBatch( checker, datasets={"train": ds}, @@ -357,7 +357,7 @@ def checker(shard, results): stats = shard.stats() assert "RandomizeBlockOrder: 5/5 blocks executed in" in stats, stats - ds = ray.data.range_table(5) + ds = ray.data.range(5) test = TestStream( checker, datasets={"train": ds}, @@ -368,7 +368,7 @@ def checker(shard, results): stats = shard.stats() assert "RandomizeBlockOrder" not in stats, stats - ds = ray.data.range_table(5) + ds = ray.data.range(5) test = TestStream( checker, datasets={"train": ds}, @@ -384,7 +384,7 @@ def checker(shard, results): stats = shard.stats() assert "RandomizeBlockOrder: 5/5 blocks executed" in stats, stats - ds = ray.data.range_table(5) + ds = ray.data.range(5) test = TestBatch( checker, datasets={"train": ds}, @@ -399,7 +399,7 @@ def checker(shard, results): stats = shard.stats() assert "RandomizeBlockOrder: 5/5 blocks executed in" in stats, stats - ds = ray.data.range_table(5) + ds = ray.data.range(5) test = TestStream( checker, datasets={"train": ds}, @@ -426,7 +426,7 @@ def check_error(shard, results): def test_deterministic_per_epoch_preprocessor( ray_start_4_cpus, max_object_store_memory_fraction ): - ds = ray.data.range_table(5) + ds = ray.data.range(5) def multiply(x): return x * 2 @@ -477,7 +477,7 @@ def checker(shard, results): def test_nondeterministic_per_epoch_preprocessor( ray_start_4_cpus, max_object_store_memory_fraction ): - ds = ray.data.range_table(5) + ds = ray.data.range(5) # Use randomized per-epoch preprocessor to check that it gets applied once # per epoch. @@ -504,7 +504,7 @@ def checker(shard, results): def test_validate_per_epoch_preprocessor(ray_start_4_cpus): - ds = ray.data.range_table(5) + ds = ray.data.range(5) def multiply(x): return x * 2 diff --git a/python/ray/data/__init__.py b/python/ray/data/__init__.py index c47e95f52557..cb7b3d1c812f 100644 --- a/python/ray/data/__init__.py +++ b/python/ray/data/__init__.py @@ -6,7 +6,7 @@ from ray.data._internal.progress_bar import set_progress_bars from ray.data._internal.execution.interfaces import ExecutionOptions, ExecutionResources from ray.data.dataset import Dataset -from ray.data.datastream import Datastream +from ray.data.datastream import Datastream, Schema from ray.data.context import DatasetContext, DataContext from ray.data.iterator import DatasetIterator, DataIterator from ray.data.dataset_pipeline import DatasetPipeline @@ -65,6 +65,7 @@ "ExecutionOptions", "ExecutionResources", "ReadTask", + "Schema", "from_dask", "from_items", "from_arrow", diff --git a/python/ray/data/_internal/arrow_block.py b/python/ray/data/_internal/arrow_block.py index 82f65a07ac89..cb97eb0d67a4 100644 --- a/python/ray/data/_internal/arrow_block.py +++ b/python/ray/data/_internal/arrow_block.py @@ -23,13 +23,13 @@ TableBlockAccessor, TableBlockBuilder, ) +from ray.data._internal.util import _truncated_repr from ray.data.aggregate import AggregateFn from ray.data.block import ( Block, BlockAccessor, BlockExecStats, BlockMetadata, - KeyFn, KeyType, U, ) @@ -105,7 +105,7 @@ def __len__(self): return self._row.num_columns -class ArrowBlockBuilder(TableBlockBuilder[T]): +class ArrowBlockBuilder(TableBlockBuilder): def __init__(self): if pyarrow is None: raise ImportError("Run `pip install pyarrow` for Arrow support") @@ -167,7 +167,8 @@ def numpy_to_block( ): raise ValueError( "Batch must be an ndarray or dictionary of ndarrays when converting " - f"a numpy batch to a block, got: {type(batch)}" + f"a numpy batch to a block, got: {type(batch)} " + f"({_truncated_repr(batch)})" ) new_batch = {} for col_name, col in batch.items(): @@ -293,7 +294,7 @@ def num_rows(self) -> int: def size_bytes(self) -> int: return self._table.nbytes - def _zip(self, acc: BlockAccessor) -> "Block[T]": + def _zip(self, acc: BlockAccessor) -> "Block": r = self.to_arrow() s = acc.to_arrow() for col_name in s.column_names: @@ -310,7 +311,7 @@ def _zip(self, acc: BlockAccessor) -> "Block[T]": return r @staticmethod - def builder() -> ArrowBlockBuilder[T]: + def builder() -> ArrowBlockBuilder: return ArrowBlockBuilder() @staticmethod @@ -328,7 +329,7 @@ def take( """ return transform_pyarrow.take_table(self._table, indices) - def select(self, columns: List[KeyFn]) -> "pyarrow.Table": + def select(self, columns: List[str]) -> "pyarrow.Table": if not all(isinstance(col, str) for col in columns): raise ValueError( "Columns must be a list of column name strings when aggregating on " @@ -341,7 +342,7 @@ def _sample(self, n_samples: int, key: "SortKeyT") -> "pyarrow.Table": table = self._table.select([k[0] for k in key]) return transform_pyarrow.take_table(table, indices) - def count(self, on: KeyFn) -> Optional[U]: + def count(self, on: str) -> Optional[U]: """Count the number of non-null values in the provided column.""" import pyarrow.compute as pac @@ -358,7 +359,7 @@ def count(self, on: KeyFn) -> Optional[U]: return pac.count(col).as_py() def _apply_arrow_compute( - self, compute_fn: Callable, on: KeyFn, ignore_nulls: bool + self, compute_fn: Callable, on: str, ignore_nulls: bool ) -> Optional[U]: """Helper providing null handling around applying an aggregation to a column.""" import pyarrow as pa @@ -378,29 +379,29 @@ def _apply_arrow_compute( else: return compute_fn(col, skip_nulls=ignore_nulls).as_py() - def sum(self, on: KeyFn, ignore_nulls: bool) -> Optional[U]: + def sum(self, on: str, ignore_nulls: bool) -> Optional[U]: import pyarrow.compute as pac return self._apply_arrow_compute(pac.sum, on, ignore_nulls) - def min(self, on: KeyFn, ignore_nulls: bool) -> Optional[U]: + def min(self, on: str, ignore_nulls: bool) -> Optional[U]: import pyarrow.compute as pac return self._apply_arrow_compute(pac.min, on, ignore_nulls) - def max(self, on: KeyFn, ignore_nulls: bool) -> Optional[U]: + def max(self, on: str, ignore_nulls: bool) -> Optional[U]: import pyarrow.compute as pac return self._apply_arrow_compute(pac.max, on, ignore_nulls) - def mean(self, on: KeyFn, ignore_nulls: bool) -> Optional[U]: + def mean(self, on: str, ignore_nulls: bool) -> Optional[U]: import pyarrow.compute as pac return self._apply_arrow_compute(pac.mean, on, ignore_nulls) def sum_of_squared_diffs_from_mean( self, - on: KeyFn, + on: str, ignore_nulls: bool, mean: Optional[U] = None, ) -> Optional[U]: @@ -422,7 +423,7 @@ def sum_of_squared_diffs_from_mean( def sort_and_partition( self, boundaries: List[T], key: "SortKeyT", descending: bool - ) -> List["Block[T]"]: + ) -> List["Block"]: if len(key) > 1: raise NotImplementedError( "sorting by multiple columns is not supported yet" @@ -461,7 +462,7 @@ def sort_and_partition( partitions.append(table.slice(last_idx)) return partitions - def combine(self, key: KeyFn, aggs: Tuple[AggregateFn]) -> Block[ArrowRow]: + def combine(self, key: str, aggs: Tuple[AggregateFn]) -> Block: """Combine rows with the same key into an accumulator. This assumes the block is already sorted by key in ascending order. @@ -490,7 +491,7 @@ def iter_groups() -> Iterator[Tuple[KeyType, Block]]: return start = end = 0 - iter = self.iter_rows() + iter = self.iter_rows(public_row_format=False) next_row = None while True: try: @@ -540,8 +541,8 @@ def _munge_conflict(name, count): @staticmethod def merge_sorted_blocks( - blocks: List[Block[T]], key: "SortKeyT", _descending: bool - ) -> Tuple[Block[T], BlockMetadata]: + blocks: List[Block], key: "SortKeyT", _descending: bool + ) -> Tuple[Block, BlockMetadata]: stats = BlockExecStats.builder() blocks = [b for b in blocks if b.num_rows > 0] if len(blocks) == 0: @@ -553,11 +554,11 @@ def merge_sorted_blocks( @staticmethod def aggregate_combined_blocks( - blocks: List[Block[ArrowRow]], - key: KeyFn, + blocks: List[Block], + key: str, aggs: Tuple[AggregateFn], finalize: bool, - ) -> Tuple[Block[ArrowRow], BlockMetadata]: + ) -> Tuple[Block, BlockMetadata]: """Aggregate sorted, partially combined blocks with the same key range. This assumes blocks are already sorted by key in ascending order, @@ -584,7 +585,11 @@ def aggregate_combined_blocks( ) iter = heapq.merge( - *[ArrowBlockAccessor(block).iter_rows() for block in blocks], key=key_fn + *[ + ArrowBlockAccessor(block).iter_rows(public_row_format=False) + for block in blocks + ], + key=key_fn, ) next_row = None builder = ArrowBlockBuilder() diff --git a/python/ray/data/_internal/block_builder.py b/python/ray/data/_internal/block_builder.py index 0d64ddadb26f..27787f088a2c 100644 --- a/python/ray/data/_internal/block_builder.py +++ b/python/ray/data/_internal/block_builder.py @@ -7,7 +7,7 @@ class BlockBuilder(Generic[T]): """A builder class for blocks.""" @staticmethod - def for_block(block: Block) -> "BlockBuilder[T]": + def for_block(block: Block) -> "BlockBuilder": return BlockAccessor.for_block(block).builder() def add(self, item: T) -> None: diff --git a/python/ray/data/_internal/compute.py b/python/ray/data/_internal/compute.py index 63d4ce9e550d..491d4d29b8cc 100644 --- a/python/ray/data/_internal/compute.py +++ b/python/ray/data/_internal/compute.py @@ -10,14 +10,13 @@ from ray.data._internal.progress_bar import ProgressBar from ray.data._internal.remote_fn import cached_remote_fn from ray.data.block import ( - BatchUDF, + UserDefinedFunction, Block, BlockAccessor, BlockExecStats, BlockMetadata, BlockPartition, CallableClass, - RowUDF, StrictModeError, ) from ray.data.context import DEFAULT_SCHEDULING_STRATEGY, DataContext @@ -37,15 +36,12 @@ # TODO(Clark): Once Ray only supports Python 3.8+, use protocol to constrain block # transform type. # Callable[[Block, ...], Iterable[Block]] - # Callable[[Block, BatchUDF, ...], Iterable[Block]], + # Callable[[Block, UserDefinedFunction, ...], Iterable[Block]], Callable[[Iterable[Block], TaskContext], Iterable[Block]], - Callable[[Iterable[Block], TaskContext, Union[BatchUDF, RowUDF]], Iterable[Block]], + Callable[[Iterable[Block], TaskContext, UserDefinedFunction], Iterable[Block]], Callable[..., Iterable[Block]], ] -# UDF on a batch or row. -UDF = Union[BatchUDF, RowUDF] - @DeveloperAPI class ComputeStrategy: @@ -69,7 +65,7 @@ def _apply( clear_input_blocks: bool, name: Optional[str] = None, target_block_size: Optional[int] = None, - fn: Optional[UDF] = None, + fn: Optional[UserDefinedFunction] = None, fn_args: Optional[Iterable[Any]] = None, fn_kwargs: Optional[Dict[str, Any]] = None, fn_constructor_args: Optional[Iterable[Any]] = None, @@ -277,7 +273,7 @@ def _apply( clear_input_blocks: bool, name: Optional[str] = None, target_block_size: Optional[int] = None, - fn: Optional[UDF] = None, + fn: Optional[UserDefinedFunction] = None, fn_args: Optional[Iterable[Any]] = None, fn_kwargs: Optional[Dict[str, Any]] = None, fn_constructor_args: Optional[Iterable[Any]] = None, @@ -531,7 +527,7 @@ def is_task_compute(compute_spec: Union[str, ComputeStrategy]) -> bool: def _map_block_split( block_fn: BlockTransform, input_files: List[str], - fn: Optional[UDF], + fn: Optional[UserDefinedFunction], num_blocks: int, *blocks_and_fn_args: Union[Block, Any], **fn_kwargs, @@ -559,7 +555,7 @@ def _map_block_split( def _map_block_nosplit( block_fn: BlockTransform, input_files: List[str], - fn: Optional[UDF], + fn: Optional[UserDefinedFunction], num_blocks: int, *blocks_and_fn_args: Union[Block, Any], **fn_kwargs, diff --git a/python/ray/data/_internal/delegating_block_builder.py b/python/ray/data/_internal/delegating_block_builder.py index 1232cf922e0e..47baf887caf8 100644 --- a/python/ray/data/_internal/delegating_block_builder.py +++ b/python/ray/data/_internal/delegating_block_builder.py @@ -4,14 +4,14 @@ import numpy as np import ray -from ray.data.block import Block, DataBatch, T, BlockAccessor +from ray.data.block import Block, DataBatch, BlockAccessor from ray.data._internal.block_builder import BlockBuilder from ray.data._internal.simple_block import SimpleBlockBuilder from ray.data._internal.arrow_block import ArrowRow, ArrowBlockBuilder from ray.data._internal.pandas_block import PandasRow, PandasBlockBuilder -class DelegatingBlockBuilder(BlockBuilder[T]): +class DelegatingBlockBuilder(BlockBuilder): def __init__(self): self._builder = None self._empty_block = None diff --git a/python/ray/data/_internal/execution/util.py b/python/ray/data/_internal/execution/util.py index 8674ed60cccf..4a87f12c0a4a 100644 --- a/python/ray/data/_internal/execution/util.py +++ b/python/ray/data/_internal/execution/util.py @@ -1,22 +1,24 @@ from concurrent.futures import ThreadPoolExecutor -from typing import List, TYPE_CHECKING +from typing import List, Any, TYPE_CHECKING import ray -from ray.data.block import Block, BlockAccessor, CallableClass +from ray.data.block import BlockAccessor, CallableClass if TYPE_CHECKING: from ray.data._internal.execution.interfaces import RefBundle -def make_ref_bundles(simple_data: List[Block]) -> List["RefBundle"]: +def make_ref_bundles(simple_data: List[List[Any]]) -> List["RefBundle"]: """Create ref bundles from a list of block data. One bundle is created for each input block. """ from ray.data._internal.execution.interfaces import RefBundle + import pandas as pd output = [] for block in simple_data: + block = pd.DataFrame({"id": block}) output.append( RefBundle( [ diff --git a/python/ray/data/_internal/fast_repartition.py b/python/ray/data/_internal/fast_repartition.py index 461dd8d683df..fe7e8de45606 100644 --- a/python/ray/data/_internal/fast_repartition.py +++ b/python/ray/data/_internal/fast_repartition.py @@ -12,7 +12,7 @@ def fast_repartition(blocks, num_blocks, ctx: Optional[TaskContext] = None): - from ray.data.datastream import Datastream + from ray.data.datastream import Datastream, Schema wrapped_ds = Datastream( ExecutionPlan( @@ -61,6 +61,8 @@ def fast_repartition(blocks, num_blocks, ctx: Optional[TaskContext] = None): # Schema is safe to fetch here since we have already called # get_internal_block_refs and executed the datastream. schema = wrapped_ds.schema(fetch_if_missing=True) + if isinstance(schema, Schema): + schema = schema.base_schema # Early-release memory. del splits, blocks, wrapped_ds diff --git a/python/ray/data/_internal/logical/operators/all_to_all_operator.py b/python/ray/data/_internal/logical/operators/all_to_all_operator.py index 9dacd39ad5ec..95cd231065e2 100644 --- a/python/ray/data/_internal/logical/operators/all_to_all_operator.py +++ b/python/ray/data/_internal/logical/operators/all_to_all_operator.py @@ -2,7 +2,6 @@ from ray.data._internal.logical.interfaces import LogicalOperator from ray.data.aggregate import AggregateFn -from ray.data.block import KeyFn class AbstractAllToAll(LogicalOperator): @@ -89,7 +88,7 @@ class Sort(AbstractAllToAll): def __init__( self, input_op: LogicalOperator, - key: Optional[KeyFn], + key: Optional[str], descending: bool, ): super().__init__( @@ -106,7 +105,7 @@ class Aggregate(AbstractAllToAll): def __init__( self, input_op: LogicalOperator, - key: Optional[KeyFn], + key: Optional[str], aggs: List[AggregateFn], ): super().__init__( diff --git a/python/ray/data/_internal/logical/operators/map_operator.py b/python/ray/data/_internal/logical/operators/map_operator.py index 906b07a892d9..f64bd482a85c 100644 --- a/python/ray/data/_internal/logical/operators/map_operator.py +++ b/python/ray/data/_internal/logical/operators/map_operator.py @@ -1,8 +1,8 @@ from typing import Any, Dict, Iterable, Optional, Union from ray.data._internal.logical.interfaces import LogicalOperator -from ray.data._internal.compute import UDF, ComputeStrategy, TaskPoolStrategy -from ray.data.block import BatchUDF, RowUDF +from ray.data._internal.compute import ComputeStrategy, TaskPoolStrategy +from ray.data.block import UserDefinedFunction from ray.data.context import DEFAULT_BATCH_SIZE @@ -38,7 +38,7 @@ def __init__( self, name: str, input_op: LogicalOperator, - fn: UDF, + fn: UserDefinedFunction, fn_args: Optional[Iterable[Any]] = None, fn_kwargs: Optional[Dict[str, Any]] = None, fn_constructor_args: Optional[Iterable[Any]] = None, @@ -81,7 +81,7 @@ class MapBatches(AbstractUDFMap): def __init__( self, input_op: LogicalOperator, - fn: BatchUDF, + fn: UserDefinedFunction, batch_size: Optional[int] = DEFAULT_BATCH_SIZE, batch_format: Optional[str] = "default", zero_copy_batch: bool = False, @@ -116,7 +116,7 @@ class MapRows(AbstractUDFMap): def __init__( self, input_op: LogicalOperator, - fn: RowUDF, + fn: UserDefinedFunction, compute: Optional[Union[str, ComputeStrategy]] = None, ray_remote_args: Optional[Dict[str, Any]] = None, ): @@ -135,7 +135,7 @@ class Filter(AbstractUDFMap): def __init__( self, input_op: LogicalOperator, - fn: RowUDF, + fn: UserDefinedFunction, compute: Optional[Union[str, ComputeStrategy]] = None, ray_remote_args: Optional[Dict[str, Any]] = None, ): @@ -154,7 +154,7 @@ class FlatMap(AbstractUDFMap): def __init__( self, input_op: LogicalOperator, - fn: RowUDF, + fn: UserDefinedFunction, compute: Optional[Union[str, ComputeStrategy]] = None, ray_remote_args: Optional[Dict[str, Any]] = None, ): diff --git a/python/ray/data/_internal/null_aggregate.py b/python/ray/data/_internal/null_aggregate.py index dc1f969ea2c5..f5b6a5763fa8 100644 --- a/python/ray/data/_internal/null_aggregate.py +++ b/python/ray/data/_internal/null_aggregate.py @@ -190,9 +190,9 @@ def _accum(a: WrappedAggType, r: T) -> WrappedAggType: def _null_wrap_accumulate_block( ignore_nulls: bool, - accum_block: Callable[[AggType, Block[T]], AggType], + accum_block: Callable[[AggType, Block], AggType], null_merge: Callable[[WrappedAggType, WrappedAggType], WrappedAggType], -) -> Callable[[WrappedAggType, Block[T]], WrappedAggType]: +) -> Callable[[WrappedAggType, Block], WrappedAggType]: """ Wrap vectorized aggregate function with null handling. @@ -212,7 +212,7 @@ def _null_wrap_accumulate_block( A new vectorized aggregate function that handles nulls. """ - def _accum_block_null(a: WrappedAggType, block: Block[T]) -> WrappedAggType: + def _accum_block_null(a: WrappedAggType, block: Block) -> WrappedAggType: ret = accum_block(block) if ret is not None: ret = _wrap_acc(ret, has_data=True) diff --git a/python/ray/data/_internal/pandas_block.py b/python/ray/data/_internal/pandas_block.py index d6c1bc1b807c..015d82a9b303 100644 --- a/python/ray/data/_internal/pandas_block.py +++ b/python/ray/data/_internal/pandas_block.py @@ -21,7 +21,6 @@ BlockAccessor, BlockMetadata, BlockExecStats, - KeyFn, KeyType, U, ) @@ -84,7 +83,7 @@ def __len__(self): return self._row.shape[1] -class PandasBlockBuilder(TableBlockBuilder[T]): +class PandasBlockBuilder(TableBlockBuilder): def __init__(self): pandas = lazy_import_pandas() super().__init__(pandas.DataFrame) @@ -167,7 +166,7 @@ def take(self, indices: List[int]) -> "pandas.DataFrame": table.reset_index(drop=True, inplace=True) return table - def select(self, columns: List[KeyFn]) -> "pandas.DataFrame": + def select(self, columns: List[str]) -> "pandas.DataFrame": if not all(isinstance(col, str) for col in columns): raise ValueError( "Columns must be a list of column name strings when aggregating on " @@ -264,7 +263,7 @@ def _zip(self, acc: BlockAccessor) -> "pandas.DataFrame": return r @staticmethod - def builder() -> PandasBlockBuilder[T]: + def builder() -> PandasBlockBuilder: return PandasBlockBuilder() @staticmethod @@ -275,7 +274,7 @@ def _sample(self, n_samples: int, key: "SortKeyT") -> "pandas.DataFrame": return self._table[[k[0] for k in key]].sample(n_samples, ignore_index=True) def _apply_agg( - self, agg_fn: Callable[["pandas.Series", bool], U], on: KeyFn + self, agg_fn: Callable[["pandas.Series", bool], U], on: str ) -> Optional[U]: """Helper providing null handling around applying an aggregation to a column.""" pd = lazy_import_pandas() @@ -303,10 +302,10 @@ def _apply_agg( return None return val - def count(self, on: KeyFn) -> Optional[U]: + def count(self, on: str) -> Optional[U]: return self._apply_agg(lambda col: col.count(), on) - def sum(self, on: KeyFn, ignore_nulls: bool) -> Optional[U]: + def sum(self, on: str, ignore_nulls: bool) -> Optional[U]: pd = lazy_import_pandas() if on is not None and not isinstance(on, str): raise ValueError( @@ -328,18 +327,18 @@ def sum(self, on: KeyFn, ignore_nulls: bool) -> Optional[U]: return None return val - def min(self, on: KeyFn, ignore_nulls: bool) -> Optional[U]: + def min(self, on: str, ignore_nulls: bool) -> Optional[U]: return self._apply_agg(lambda col: col.min(skipna=ignore_nulls), on) - def max(self, on: KeyFn, ignore_nulls: bool) -> Optional[U]: + def max(self, on: str, ignore_nulls: bool) -> Optional[U]: return self._apply_agg(lambda col: col.max(skipna=ignore_nulls), on) - def mean(self, on: KeyFn, ignore_nulls: bool) -> Optional[U]: + def mean(self, on: str, ignore_nulls: bool) -> Optional[U]: return self._apply_agg(lambda col: col.mean(skipna=ignore_nulls), on) def sum_of_squared_diffs_from_mean( self, - on: KeyFn, + on: str, ignore_nulls: bool, mean: Optional[U] = None, ) -> Optional[U]: @@ -352,7 +351,7 @@ def sum_of_squared_diffs_from_mean( def sort_and_partition( self, boundaries: List[T], key: "SortKeyT", descending: bool - ) -> List[Block[T]]: + ) -> List[Block]: if len(key) > 1: raise NotImplementedError( "sorting by multiple columns is not supported yet" @@ -389,7 +388,7 @@ def sort_and_partition( partitions.append(table[last_idx:]) return partitions - def combine(self, key: KeyFn, aggs: Tuple[AggregateFn]) -> "pandas.DataFrame": + def combine(self, key: str, aggs: Tuple[AggregateFn]) -> "pandas.DataFrame": """Combine rows with the same key into an accumulator. This assumes the block is already sorted by key in ascending order. @@ -418,7 +417,7 @@ def iter_groups() -> Iterator[Tuple[KeyType, Block]]: return start = end = 0 - iter = self.iter_rows() + iter = self.iter_rows(public_row_format=False) next_row = None while True: try: @@ -464,7 +463,7 @@ def iter_groups() -> Iterator[Tuple[KeyType, Block]]: @staticmethod def merge_sorted_blocks( - blocks: List[Block[T]], key: "SortKeyT", _descending: bool + blocks: List[Block], key: "SortKeyT", _descending: bool ) -> Tuple["pandas.DataFrame", BlockMetadata]: pd = lazy_import_pandas() stats = BlockExecStats.builder() @@ -481,7 +480,7 @@ def merge_sorted_blocks( @staticmethod def aggregate_combined_blocks( blocks: List["pandas.DataFrame"], - key: KeyFn, + key: str, aggs: Tuple[AggregateFn], finalize: bool, ) -> Tuple["pandas.DataFrame", BlockMetadata]: @@ -509,7 +508,11 @@ def aggregate_combined_blocks( key_fn = (lambda r: r[r._row.columns[0]]) if key is not None else (lambda r: 0) iter = heapq.merge( - *[PandasBlockAccessor(block).iter_rows() for block in blocks], key=key_fn + *[ + PandasBlockAccessor(block).iter_rows(public_row_format=False) + for block in blocks + ], + key=key_fn, ) next_row = None builder = PandasBlockBuilder() diff --git a/python/ray/data/_internal/pipeline_executor.py b/python/ray/data/_internal/pipeline_executor.py index b44b65b28181..2f5a7fbc9233 100644 --- a/python/ray/data/_internal/pipeline_executor.py +++ b/python/ray/data/_internal/pipeline_executor.py @@ -1,10 +1,9 @@ -from typing import Any, Callable, List, Optional, TYPE_CHECKING +from typing import Callable, List, Optional, TYPE_CHECKING import time import concurrent.futures import logging import ray -from ray.data.block import T from ray.data.context import DataContext from ray.data.datastream import Datastream from ray.data._internal.progress_bar import ProgressBar @@ -16,7 +15,7 @@ from ray.data.dataset_pipeline import DatasetPipeline -def pipeline_stage(fn: Callable[[], Datastream[T]]) -> Datastream[T]: +def pipeline_stage(fn: Callable[[], Datastream]) -> Datastream: # Force eager evaluation of all blocks in the pipeline stage. This # prevents resource deadlocks due to overlapping stage execution (e.g., # task -> actor stage). @@ -24,9 +23,9 @@ def pipeline_stage(fn: Callable[[], Datastream[T]]) -> Datastream[T]: class PipelineExecutor: - def __init__(self, pipeline: "DatasetPipeline[T]"): - self._pipeline: "DatasetPipeline[T]" = pipeline - self._stages: List[concurrent.futures.Future[Datastream[Any]]] = [None] * ( + def __init__(self, pipeline: "DatasetPipeline"): + self._pipeline: "DatasetPipeline" = pipeline + self._stages: List[concurrent.futures.Future[Datastream]] = [None] * ( len(self._pipeline._optimized_stages) + 1 ) self._iter = iter(self._pipeline._base_iterable) @@ -160,9 +159,9 @@ def __next__(self): class PipelineSplitExecutorCoordinator: def __init__( self, - pipeline: "DatasetPipeline[T]", + pipeline: "DatasetPipeline", n: int, - splitter: Callable[[Datastream], List["Datastream[T]"]], + splitter: Callable[[Datastream], List["Datastream"]], context: DataContext, ): DataContext._set_current(context) @@ -172,7 +171,7 @@ def __init__( self.splitter = splitter self.cur_splits = [None] * self.n - def next_datastream_if_ready(self, split_index: int) -> Optional[Datastream[T]]: + def next_datastream_if_ready(self, split_index: int) -> Optional[Datastream]: # TODO(swang): This will hang if one of the consumers fails and is # re-executed from the beginning. To make this fault-tolerant, we need # to make next_datastream_if_ready idempotent. diff --git a/python/ray/data/_internal/plan.py b/python/ray/data/_internal/plan.py index 0ee077d392c8..6de3b8878835 100644 --- a/python/ray/data/_internal/plan.py +++ b/python/ray/data/_internal/plan.py @@ -22,7 +22,7 @@ from ray.types import ObjectRef from ray.data._internal.block_list import BlockList from ray.data._internal.compute import ( - UDF, + UserDefinedFunction, ActorPoolStrategy, TaskPoolStrategy, BlockTransform, @@ -875,7 +875,7 @@ def __init__( compute: Union[str, ComputeStrategy], ray_remote_args: dict, target_block_size: Optional[int] = None, - fn: Optional[UDF] = None, + fn: Optional[UserDefinedFunction] = None, fn_args: Optional[Iterable[Any]] = None, fn_kwargs: Optional[Dict[str, Any]] = None, fn_constructor_args: Optional[Iterable[Any]] = None, @@ -963,7 +963,7 @@ def fuse(self, prev: Stage): def block_fn( blocks: Iterable[Block], ctx: TaskContext, - fn: UDF, + fn: UserDefinedFunction, *fn_args, **fn_kwargs, ) -> Iterable[Block]: diff --git a/python/ray/data/_internal/planner/aggregate.py b/python/ray/data/_internal/planner/aggregate.py index bea062434b90..5b434a18843d 100644 --- a/python/ray/data/_internal/planner/aggregate.py +++ b/python/ray/data/_internal/planner/aggregate.py @@ -17,13 +17,12 @@ from ray.data._internal.planner.exchange.sort_task_spec import SortTaskSpec from ray.data._internal.stats import StatsDict from ray.data.aggregate import AggregateFn -from ray.data.block import KeyFn from ray.data.context import DataContext from ray.data._internal.util import unify_block_metadata_schema def generate_aggregate_fn( - key: Optional[KeyFn], + key: Optional[str], aggs: List[AggregateFn], ) -> AllToAllTransformFn: """Generate function to aggregate blocks by the specified key column or key diff --git a/python/ray/data/_internal/planner/exchange/aggregate_task_spec.py b/python/ray/data/_internal/planner/exchange/aggregate_task_spec.py index 5bf2f5d05099..d4f9506fa0f1 100644 --- a/python/ray/data/_internal/planner/exchange/aggregate_task_spec.py +++ b/python/ray/data/_internal/planner/exchange/aggregate_task_spec.py @@ -8,7 +8,6 @@ BlockAccessor, BlockExecStats, BlockMetadata, - KeyFn, KeyType, ) @@ -32,7 +31,7 @@ class SortAggregateTaskSpec(ExchangeTaskSpec): def __init__( self, boundaries: List[KeyType], - key: Optional[KeyFn], + key: Optional[str], aggs: List[AggregateFn], ): super().__init__( @@ -46,7 +45,7 @@ def map( block: Block, output_num_blocks: int, boundaries: List[KeyType], - key: Optional[KeyFn], + key: Optional[str], aggs: List[AggregateFn], ) -> List[Union[BlockMetadata, Block]]: stats = BlockExecStats.builder() @@ -69,7 +68,7 @@ def map( @staticmethod def reduce( - key: Optional[KeyFn], + key: Optional[str], aggs: List[AggregateFn], *mapper_outputs: List[Block], partial_reduce: bool = False, @@ -81,7 +80,7 @@ def reduce( @staticmethod def _prune_unused_columns( block: Block, - key: KeyFn, + key: str, aggs: Tuple[AggregateFn], ) -> Block: """Prune unused columns from block before aggregate.""" diff --git a/python/ray/data/_internal/planner/exchange/sort_task_spec.py b/python/ray/data/_internal/planner/exchange/sort_task_spec.py index 4fa17cec8588..b87bff6f128d 100644 --- a/python/ray/data/_internal/planner/exchange/sort_task_spec.py +++ b/python/ray/data/_internal/planner/exchange/sort_task_spec.py @@ -121,5 +121,5 @@ def sample_boundaries( return ret[1:] -def _sample_block(block: Block[T], n_samples: int, key: SortKeyT) -> Block[T]: +def _sample_block(block: Block, n_samples: int, key: SortKeyT) -> Block: return BlockAccessor.for_block(block).sample(n_samples, key) diff --git a/python/ray/data/_internal/planner/filter.py b/python/ray/data/_internal/planner/filter.py index afbd22f4e907..8374114de7a1 100644 --- a/python/ray/data/_internal/planner/filter.py +++ b/python/ray/data/_internal/planner/filter.py @@ -1,12 +1,12 @@ from typing import Callable, Iterator from ray.data._internal.execution.interfaces import TaskContext -from ray.data.block import Block, BlockAccessor, RowUDF +from ray.data.block import Block, BlockAccessor, UserDefinedFunction from ray.data.context import DataContext def generate_filter_fn() -> Callable[ - [Iterator[Block], TaskContext, RowUDF], Iterator[Block] + [Iterator[Block], TaskContext, UserDefinedFunction], Iterator[Block] ]: """Generate function to apply the UDF to each record of blocks, and filter out records that do not satisfy the given predicate. @@ -15,13 +15,13 @@ def generate_filter_fn() -> Callable[ context = DataContext.get_current() def fn( - blocks: Iterator[Block], ctx: TaskContext, row_fn: RowUDF + blocks: Iterator[Block], ctx: TaskContext, row_fn: UserDefinedFunction ) -> Iterator[Block]: DataContext._set_current(context) for block in blocks: block = BlockAccessor.for_block(block) builder = block.builder() - for row in block.iter_rows(): + for row in block.iter_rows(public_row_format=True): if row_fn(row): builder.add(row) # NOTE: this yields an empty block if all rows are filtered out. diff --git a/python/ray/data/_internal/planner/flat_map.py b/python/ray/data/_internal/planner/flat_map.py index c641f83f6b07..d2a09035e48a 100644 --- a/python/ray/data/_internal/planner/flat_map.py +++ b/python/ray/data/_internal/planner/flat_map.py @@ -2,12 +2,12 @@ from ray.data._internal.execution.interfaces import TaskContext from ray.data._internal.output_buffer import BlockOutputBuffer -from ray.data.block import Block, BlockAccessor, RowUDF +from ray.data.block import Block, BlockAccessor, UserDefinedFunction from ray.data.context import DataContext def generate_flat_map_fn() -> Callable[ - [Iterator[Block], TaskContext, RowUDF], Iterator[Block] + [Iterator[Block], TaskContext, UserDefinedFunction], Iterator[Block] ]: """Generate function to apply the UDF to each record of blocks, and then flatten results. @@ -16,13 +16,13 @@ def generate_flat_map_fn() -> Callable[ context = DataContext.get_current() def fn( - blocks: Iterator[Block], ctx: TaskContext, row_fn: RowUDF + blocks: Iterator[Block], ctx: TaskContext, row_fn: UserDefinedFunction ) -> Iterator[Block]: DataContext._set_current(context) output_buffer = BlockOutputBuffer(None, context.target_max_block_size) for block in blocks: block = BlockAccessor.for_block(block) - for row in block.iter_rows(): + for row in block.iter_rows(public_row_format=True): for r2 in row_fn(row): output_buffer.add(r2) if output_buffer.has_next(): diff --git a/python/ray/data/_internal/planner/map_batches.py b/python/ray/data/_internal/planner/map_batches.py index a0d528bd8519..27597c550bdb 100644 --- a/python/ray/data/_internal/planner/map_batches.py +++ b/python/ray/data/_internal/planner/map_batches.py @@ -6,7 +6,7 @@ from ray.data._internal.execution.interfaces import TaskContext from ray.data._internal.output_buffer import BlockOutputBuffer from ray.data._internal.util import _truncated_repr -from ray.data.block import BatchUDF, Block, DataBatch +from ray.data.block import UserDefinedFunction, Block, DataBatch from ray.data.context import DEFAULT_BATCH_SIZE, DataContext @@ -14,7 +14,7 @@ def generate_map_batches_fn( batch_size: Optional[int] = DEFAULT_BATCH_SIZE, batch_format: Optional[str] = "default", zero_copy_batch: bool = False, -) -> Callable[[Iterator[Block], TaskContext, BatchUDF], Iterator[Block]]: +) -> Callable[[Iterator[Block], TaskContext, UserDefinedFunction], Iterator[Block]]: """Generate function to apply the batch UDF to blocks.""" import numpy as np import pandas as pd @@ -25,7 +25,7 @@ def generate_map_batches_fn( def fn( blocks: Iterator[Block], ctx: TaskContext, - batch_fn: BatchUDF, + batch_fn: UserDefinedFunction, *fn_args, **fn_kwargs, ) -> Iterator[Block]: diff --git a/python/ray/data/_internal/planner/map_rows.py b/python/ray/data/_internal/planner/map_rows.py index 2c38a669e52f..fa94373f62ec 100644 --- a/python/ray/data/_internal/planner/map_rows.py +++ b/python/ray/data/_internal/planner/map_rows.py @@ -4,25 +4,25 @@ from ray.data._internal.execution.interfaces import TaskContext from ray.data._internal.output_buffer import BlockOutputBuffer from ray.data._internal.util import _truncated_repr -from ray.data.block import Block, BlockAccessor, RowUDF, StrictModeError +from ray.data.block import Block, BlockAccessor, UserDefinedFunction, StrictModeError from ray.data.context import DataContext def generate_map_rows_fn() -> Callable[ - [Iterator[Block], TaskContext, RowUDF], Iterator[Block] + [Iterator[Block], TaskContext, UserDefinedFunction], Iterator[Block] ]: """Generate function to apply the UDF to each record of blocks.""" context = DataContext.get_current() def fn( - blocks: Iterator[Block], ctx: TaskContext, row_fn: RowUDF + blocks: Iterator[Block], ctx: TaskContext, row_fn: UserDefinedFunction ) -> Iterator[Block]: DataContext._set_current(context) output_buffer = BlockOutputBuffer(None, context.target_max_block_size) for block in blocks: block = BlockAccessor.for_block(block) - for row in block.iter_rows(): + for row in block.iter_rows(public_row_format=True): item = row_fn(row) if context.strict_mode and not isinstance( item, collections.abc.Mapping diff --git a/python/ray/data/_internal/planner/plan_from_items_op.py b/python/ray/data/_internal/planner/plan_from_items_op.py index a0d3a8e62abd..95507501bc02 100644 --- a/python/ray/data/_internal/planner/plan_from_items_op.py +++ b/python/ray/data/_internal/planner/plan_from_items_op.py @@ -1,3 +1,4 @@ +import collections from typing import List import ray @@ -19,6 +20,7 @@ def _plan_from_items_op(op: FromItems) -> PhysicalOperator: """ def get_input_data() -> List[RefBundle]: + ctx = ray.data.DataContext.get_current() if op._parallelism > 0: block_size, remainder = divmod(len(op._items), op._parallelism) else: @@ -34,7 +36,11 @@ def get_input_data() -> List[RefBundle]: block_start = i * block_size + min(i, remainder) block_end = (i + 1) * block_size + min(i + 1, remainder) for j in range(block_start, block_end): - builder.add(op._items[j]) + item = op._items[j] + if ctx.strict_mode: + if not isinstance(item, collections.abc.Mapping): + item = {"item": item} + builder.add(item) block: Block = builder.build() block_metadata: BlockMetadata = BlockAccessor.for_block(block).get_metadata( diff --git a/python/ray/data/_internal/simple_block.py b/python/ray/data/_internal/simple_block.py index 92ab9310e60b..57a8923ac629 100644 --- a/python/ray/data/_internal/simple_block.py +++ b/python/ray/data/_internal/simple_block.py @@ -22,13 +22,12 @@ KeyType, AggType, BlockExecStats, - KeyFn, ) from ray.data._internal.block_builder import BlockBuilder from ray.data._internal.size_estimator import SizeEstimator -class SimpleBlockBuilder(BlockBuilder[T]): +class SimpleBlockBuilder(BlockBuilder): def __init__(self): self._items = [] self._size_estimator = SizeEstimator() @@ -68,7 +67,7 @@ def __init__(self, items: List[T]): def num_rows(self) -> int: return len(self._items) - def iter_rows(self) -> Iterator[T]: + def iter_rows(self, public_row_format: bool) -> Iterator[T]: return iter(self._items) def slice(self, start: int, end: int, copy: bool = False) -> List[T]: @@ -80,14 +79,14 @@ def slice(self, start: int, end: int, copy: bool = False) -> List[T]: def take(self, indices: List[int]) -> List[T]: return [self._items[i] for i in indices] - def select(self, columns: List[KeyFn]) -> List[T]: + def select(self, columns: List[str]) -> List[T]: if len(columns) != 1 or not callable(columns[0]): raise ValueError( "Column must be a single callable when selecting on Simple blocks, " f"but got: {columns}." ) callable_col = columns[0] - return [callable_col(row) for row in self.iter_rows()] + return [callable_col(row) for row in self.iter_rows(True)] def random_shuffle(self, random_seed: Optional[int]) -> List[T]: random = np.random.RandomState(random_seed) @@ -100,9 +99,7 @@ def to_pandas(self) -> "pandas.DataFrame": return pandas.DataFrame({"value": self._items}) - def to_numpy( - self, columns: Optional[Union[KeyFn, List[KeyFn]]] = None - ) -> np.ndarray: + def to_numpy(self, columns: Optional[Union[str, List[str]]] = None) -> np.ndarray: if columns is not None: if not isinstance(columns, list): columns = [columns] @@ -128,7 +125,7 @@ def schema(self) -> Any: else: return None - def zip(self, other: "Block[T]") -> "Block[T]": + def zip(self, other: "Block") -> "Block": if not isinstance(other, list): raise ValueError( "Cannot zip {} with block of type {}".format(type(self), type(other)) @@ -142,7 +139,7 @@ def zip(self, other: "Block[T]") -> "Block[T]": return list(zip(self._items, other)) @staticmethod - def builder() -> SimpleBlockBuilder[T]: + def builder() -> SimpleBlockBuilder: return SimpleBlockBuilder() def sample(self, n_samples: int = 1, key: "SortKeyT" = None) -> List[T]: @@ -157,7 +154,7 @@ def sample(self, n_samples: int = 1, key: "SortKeyT" = None) -> List[T]: return ret return [key(x) for x in ret] - def count(self, on: KeyFn) -> Optional[U]: + def count(self, on: str) -> Optional[U]: if on is not None and not callable(on): raise ValueError( "on must be a callable or None when aggregating on Simple blocks, but " @@ -168,7 +165,7 @@ def count(self, on: KeyFn) -> Optional[U]: return None count = 0 - for r in self.iter_rows(): + for r in self.iter_rows(True): if on is not None: r = on(r) if r is not None: @@ -179,7 +176,7 @@ def _apply_accum( self, init: AggType, accum: Callable[[AggType, T], AggType], - on: KeyFn, + on: str, ignore_nulls: bool, ) -> Optional[U]: """Helper providing null handling around applying an aggregation.""" @@ -194,7 +191,7 @@ def _apply_accum( has_data = False a = init - for r in self.iter_rows(): + for r in self.iter_rows(True): if on is not None: r = on(r) if r is None: @@ -207,16 +204,16 @@ def _apply_accum( a = accum(a, r) return a if has_data else None - def sum(self, on: KeyFn, ignore_nulls: bool) -> Optional[U]: + def sum(self, on: str, ignore_nulls: bool) -> Optional[U]: return self._apply_accum(0, lambda a, r: a + r, on, ignore_nulls) - def min(self, on: KeyFn, ignore_nulls: bool) -> Optional[U]: + def min(self, on: str, ignore_nulls: bool) -> Optional[U]: return self._apply_accum(float("inf"), min, on, ignore_nulls) - def max(self, on: KeyFn, ignore_nulls: bool) -> Optional[U]: + def max(self, on: str, ignore_nulls: bool) -> Optional[U]: return self._apply_accum(float("-inf"), max, on, ignore_nulls) - def mean(self, on: KeyFn, ignore_nulls: bool) -> Optional[U]: + def mean(self, on: str, ignore_nulls: bool) -> Optional[U]: return self._apply_accum( [0, 0], lambda a, r: [a[0] + r, a[1] + 1], @@ -224,7 +221,7 @@ def mean(self, on: KeyFn, ignore_nulls: bool) -> Optional[U]: ignore_nulls, ) - def std(self, on: KeyFn, ignore_nulls: bool) -> Optional[U]: + def std(self, on: str, ignore_nulls: bool) -> Optional[U]: def accum(a: List[float], r: float) -> List[float]: # Accumulates the current count, the current mean, and the sum of # squared differences from the current mean (M2). @@ -240,7 +237,7 @@ def accum(a: List[float], r: float) -> List[float]: def sum_of_squared_diffs_from_mean( self, - on: KeyFn, + on: str, ignore_nulls: bool, mean: Optional[U] = None, ) -> Optional[U]: @@ -256,7 +253,7 @@ def sum_of_squared_diffs_from_mean( def sort_and_partition( self, boundaries: List[T], key: "SortKeyT", descending: bool - ) -> List["Block[T]"]: + ) -> List["Block"]: items = sorted(self._items, key=key, reverse=descending) if len(boundaries) == 0: return [items] @@ -291,9 +288,7 @@ def sort_and_partition( ret.append(items[prev_i:]) return ret - def combine( - self, key: KeyFn, aggs: Tuple[AggregateFn] - ) -> Block[Tuple[KeyType, AggType]]: + def combine(self, key: str, aggs: Tuple[AggregateFn]) -> Block: """Combine rows with the same key into an accumulator. This assumes the block is already sorted by key in ascending order. @@ -323,7 +318,7 @@ def iter_groups() -> Iterator[Tuple[KeyType, Block]]: return start = end = 0 - iter = self.iter_rows() + iter = self.iter_rows(True) next_row = None # Use a bool to indicate if next_row is valid # instead of checking if next_row is None @@ -364,8 +359,8 @@ def iter_groups() -> Iterator[Tuple[KeyType, Block]]: @staticmethod def merge_sorted_blocks( - blocks: List[Block[T]], key: "SortKeyT", descending: bool - ) -> Tuple[Block[T], BlockMetadata]: + blocks: List[Block], key: "SortKeyT", descending: bool + ) -> Tuple[Block, BlockMetadata]: stats = BlockExecStats.builder() ret = [x for block in blocks for x in block] ret.sort(key=key, reverse=descending) @@ -375,11 +370,11 @@ def merge_sorted_blocks( @staticmethod def aggregate_combined_blocks( - blocks: List[Block[Tuple[KeyType, AggType]]], - key: KeyFn, + blocks: List[Block], + key: str, aggs: Tuple[AggregateFn], finalize: bool, - ) -> Tuple[Block[Tuple[KeyType, Union[U, AggType]]], BlockMetadata]: + ) -> Tuple[Block, BlockMetadata]: """Aggregate sorted, partially combined blocks with the same key range. This assumes blocks are already sorted by key in ascending order, @@ -405,7 +400,8 @@ def aggregate_combined_blocks( key_fn = (lambda r: r[0]) if key else (lambda r: 0) iter = heapq.merge( - *[SimpleBlockAccessor(block).iter_rows() for block in blocks], key=key_fn + *[SimpleBlockAccessor(block).iter_rows(True) for block in blocks], + key=key_fn, ) next_row = None ret = [] diff --git a/python/ray/data/_internal/sort.py b/python/ray/data/_internal/sort.py index f31ea9a6bb7c..02065b59f621 100644 --- a/python/ray/data/_internal/sort.py +++ b/python/ray/data/_internal/sort.py @@ -173,5 +173,5 @@ def sort_impl( ) -def _sample_block(block: Block[T], n_samples: int, key: SortKeyT) -> Block[T]: +def _sample_block(block: Block, n_samples: int, key: SortKeyT) -> Block: return BlockAccessor.for_block(block).sample(n_samples, key) diff --git a/python/ray/data/_internal/stage_impl.py b/python/ray/data/_internal/stage_impl.py index 472853055301..3ea2044d2e47 100644 --- a/python/ray/data/_internal/stage_impl.py +++ b/python/ray/data/_internal/stage_impl.py @@ -22,7 +22,6 @@ _validate_key_fn, Block, BlockPartition, - KeyFn, BlockMetadata, BlockAccessor, BlockExecStats, @@ -316,7 +315,7 @@ def _do_zip( class SortStage(AllToAllStage): """Implementation of `Datastream.sort()`.""" - def __init__(self, ds: "Datastream", key: Optional[KeyFn], descending: bool): + def __init__(self, ds: "Datastream", key: Optional[str], descending: bool): def do_sort( block_list, ctx: TaskContext, diff --git a/python/ray/data/_internal/table_block.py b/python/ray/data/_internal/table_block.py index 0971ecea6e1a..0b842edc612f 100644 --- a/python/ray/data/_internal/table_block.py +++ b/python/ray/data/_internal/table_block.py @@ -22,7 +22,7 @@ MAX_UNCOMPACTED_SIZE_BYTES = 50 * 1024 * 1024 -class TableBlockBuilder(BlockBuilder[T]): +class TableBlockBuilder(BlockBuilder): def __init__(self, block_type): # The set of uncompacted Python values buffered. self._columns = collections.defaultdict(list) @@ -180,7 +180,9 @@ def is_tensor_wrapper(self) -> bool: return False return _is_tensor_schema(self.column_names()) - def iter_rows(self) -> Iterator[Union[Mapping, np.ndarray]]: + def iter_rows( + self, public_row_format: bool + ) -> Iterator[Union[Mapping, np.ndarray]]: ctx = ray.data.DataContext.get_current() outer = self @@ -195,7 +197,11 @@ def __next__(self): self._cur += 1 if self._cur < outer.num_rows(): row = outer._get_row(self._cur) - if ctx.strict_mode and isinstance(row, TableRow): + if ( + public_row_format + and ctx.strict_mode + and isinstance(row, TableRow) + ): return row.as_pydict() else: return row @@ -203,10 +209,10 @@ def __next__(self): return Iter() - def _zip(self, acc: BlockAccessor) -> "Block[T]": + def _zip(self, acc: BlockAccessor) -> "Block": raise NotImplementedError - def zip(self, other: "Block[T]") -> "Block[T]": + def zip(self, other: "Block") -> "Block": acc = BlockAccessor.for_block(other) if not isinstance(acc, type(self)): raise ValueError( diff --git a/python/ray/data/_internal/util.py b/python/ray/data/_internal/util.py index ac3a44d60380..1d8c17576411 100644 --- a/python/ray/data/_internal/util.py +++ b/python/ray/data/_internal/util.py @@ -18,7 +18,6 @@ from ray.util.placement_group import PlacementGroup import pyarrow import pandas - from ray.data._internal.arrow_block import ArrowRow from ray.data.block import Block, BlockMetadata logger = logging.getLogger(__name__) @@ -436,7 +435,7 @@ def capitalize(s: str): return "".join(capfirst(x) for x in s.split("_")) -def pandas_df_to_arrow_block(df: "pandas.DataFrame") -> "Block[ArrowRow]": +def pandas_df_to_arrow_block(df: "pandas.DataFrame") -> "Block": from ray.data.block import BlockAccessor, BlockExecStats stats = BlockExecStats.builder() @@ -451,7 +450,7 @@ def pandas_df_to_arrow_block(df: "pandas.DataFrame") -> "Block[ArrowRow]": ) -def ndarray_to_block(ndarray: np.ndarray, strict_mode: bool) -> "Block[np.ndarray]": +def ndarray_to_block(ndarray: np.ndarray, strict_mode: bool) -> "Block": from ray.data.block import BlockAccessor, BlockExecStats stats = BlockExecStats.builder() diff --git a/python/ray/data/aggregate.py b/python/ray/data/aggregate.py index 3d0b28f0efb9..6545e8463d84 100644 --- a/python/ray/data/aggregate.py +++ b/python/ray/data/aggregate.py @@ -9,7 +9,6 @@ BlockAccessor, KeyType, AggType, - KeyFn, _validate_key_fn, ) from ray.data._internal.null_aggregate import ( @@ -31,7 +30,7 @@ def __init__( init: Callable[[KeyType], AggType], merge: Callable[[AggType, AggType], AggType], accumulate_row: Callable[[AggType, T], AggType] = None, - accumulate_block: Callable[[AggType, Block[T]], AggType] = None, + accumulate_block: Callable[[AggType, Block], AggType] = None, finalize: Callable[[AggType], U] = lambda a: a, name: Optional[str] = None, ): @@ -69,9 +68,9 @@ def __init__( ) if accumulate_block is None: - def accumulate_block(a: AggType, block: Block[T]) -> AggType: + def accumulate_block(a: AggType, block: Block) -> AggType: block_acc = BlockAccessor.for_block(block) - for r in block_acc.iter_rows(): + for r in block_acc.iter_rows(public_row_format=False): a = accumulate_row(a, r) return a @@ -87,7 +86,7 @@ def _validate(self, schema: Optional[Union[type, "pa.lib.Schema"]]) -> None: class _AggregateOnKeyBase(AggregateFn): - def _set_key_fn(self, on: KeyFn): + def _set_key_fn(self, on: str): self._key_fn = on def _validate(self, schema: Optional[Union[type, "pa.lib.Schema"]]) -> None: @@ -115,9 +114,9 @@ class Sum(_AggregateOnKeyBase): def __init__( self, - on: Optional[KeyFn] = None, + on: Optional[str] = None, ignore_nulls: bool = True, - alias_name: Optional[KeyFn] = None, + alias_name: Optional[str] = None, ): self._set_key_fn(on) if alias_name: @@ -146,9 +145,9 @@ class Min(_AggregateOnKeyBase): def __init__( self, - on: Optional[KeyFn] = None, + on: Optional[str] = None, ignore_nulls: bool = True, - alias_name: Optional[KeyFn] = None, + alias_name: Optional[str] = None, ): self._set_key_fn(on) if alias_name: @@ -177,9 +176,9 @@ class Max(_AggregateOnKeyBase): def __init__( self, - on: Optional[KeyFn] = None, + on: Optional[str] = None, ignore_nulls: bool = True, - alias_name: Optional[KeyFn] = None, + alias_name: Optional[str] = None, ): self._set_key_fn(on) if alias_name: @@ -208,9 +207,9 @@ class Mean(_AggregateOnKeyBase): def __init__( self, - on: Optional[KeyFn] = None, + on: Optional[str] = None, ignore_nulls: bool = True, - alias_name: Optional[KeyFn] = None, + alias_name: Optional[str] = None, ): self._set_key_fn(on) if alias_name: @@ -222,7 +221,7 @@ def __init__( ignore_nulls, lambda a1, a2: [a1[0] + a2[0], a1[1] + a2[1]] ) - def vectorized_mean(block: Block[T]) -> AggType: + def vectorized_mean(block: Block) -> AggType: block_acc = BlockAccessor.for_block(block) count = block_acc.count(on) if count == 0 or count is None: @@ -262,10 +261,10 @@ class Std(_AggregateOnKeyBase): def __init__( self, - on: Optional[KeyFn] = None, + on: Optional[str] = None, ddof: int = 1, ignore_nulls: bool = True, - alias_name: Optional[KeyFn] = None, + alias_name: Optional[str] = None, ): self._set_key_fn(on) if alias_name: @@ -292,7 +291,7 @@ def merge(a: List[float], b: List[float]): null_merge = _null_wrap_merge(ignore_nulls, merge) - def vectorized_std(block: Block[T]) -> AggType: + def vectorized_std(block: Block) -> AggType: block_acc = BlockAccessor.for_block(block) count = block_acc.count(on) if count == 0 or count is None: @@ -333,9 +332,9 @@ class AbsMax(_AggregateOnKeyBase): def __init__( self, - on: Optional[KeyFn] = None, + on: Optional[str] = None, ignore_nulls: bool = True, - alias_name: Optional[KeyFn] = None, + alias_name: Optional[str] = None, ): self._set_key_fn(on) on_fn = _to_on_fn(on) @@ -355,7 +354,7 @@ def __init__( ) -def _to_on_fn(on: Optional[KeyFn]): +def _to_on_fn(on: Optional[str]): if on is None: return lambda r: r elif isinstance(on, str): @@ -370,10 +369,10 @@ class Quantile(_AggregateOnKeyBase): def __init__( self, - on: Optional[KeyFn] = None, + on: Optional[str] = None, q: float = 0.5, ignore_nulls: bool = True, - alias_name: Optional[KeyFn] = None, + alias_name: Optional[str] = None, ): self._set_key_fn(on) self._q = q @@ -404,10 +403,10 @@ def merge(a: List[int], b: List[int]): null_merge = _null_wrap_merge(ignore_nulls, merge) - def block_row_ls(block: Block[T]) -> AggType: + def block_row_ls(block: Block) -> AggType: block_acc = BlockAccessor.for_block(block) ls = [] - for row in block_acc.iter_rows(): + for row in block_acc.iter_rows(public_row_format=False): ls.append(row.get(on)) return ls diff --git a/python/ray/data/block.py b/python/ray/data/block.py index d525e0f07185..de9188e314ee 100644 --- a/python/ray/data/block.py +++ b/python/ray/data/block.py @@ -8,7 +8,6 @@ Any, Callable, Dict, - Generic, Iterator, List, Optional, @@ -18,6 +17,7 @@ ) import numpy as np +import colorama import ray from ray import ObjectRefGenerator @@ -47,30 +47,36 @@ T = TypeVar("T", contravariant=True) U = TypeVar("U", covariant=True) + KeyType = TypeVar("KeyType") AggType = TypeVar("AggType") -# A function that extracts a concrete value from a record in a Datastream, used -# in ``sort(value_fns...)``, ``groupby(value_fn).agg(Agg(value_fn), ...)``. -# It can either be None (intepreted as the identity function), the name -# of a Datastream column, or a lambda function that extracts the desired value -# from the object. -KeyFn = Union[None, str, Callable[[T], Any]] +STRICT_MODE_EXPLANATION = ( + colorama.Fore.YELLOW + + "[IMPORTANT]: Ray Data strict mode is on by default in Ray 2.5. When in strict " + "mode, data schemas are required, standalone Python " + "objects are no longer supported, and the default batch format changes to `numpy` " + "from `pandas`. To disable strict mode temporarily, set the environment variable " + "RAY_DATA_STRICT_MODE=0 on all cluster processes. Strict mode will not be " + "possible to disable in future releases." + colorama.Style.RESET_ALL +) @PublicAPI class StrictModeError(ValueError): - pass + def __init__(self, message: str): + super().__init__(message + "\n\n" + STRICT_MODE_EXPLANATION) def _validate_key_fn( schema: Optional[Union[type, "pyarrow.lib.Schema"]], - key: KeyFn, + key: Optional[str], ) -> None: """Check the key function is valid on the given schema.""" if schema is None: # Datastream is empty/cleared, validation not possible. return + ctx = ray.data.DataContext.get_current() is_simple_format = isinstance(schema, type) if isinstance(key, str): if is_simple_format: @@ -83,6 +89,8 @@ def _validate_key_fn( "The column '{}' does not exist in the " "schema '{}'.".format(key, schema) ) + elif ctx.strict_mode: + raise StrictModeError(f"In strict mode, the key must be a string, was: {key}") elif key is None: if not is_simple_format: raise ValueError( @@ -103,11 +111,12 @@ def _validate_key_fn( # # Block data can be accessed in a uniform way via ``BlockAccessors`` such as # ``SimpleBlockAccessor`` and ``ArrowBlockAccessor``. -Block = Union[List[T], "pyarrow.Table", "pandas.DataFrame", bytes] +Block = Union[list, "pyarrow.Table", "pandas.DataFrame", bytes] # User-facing data batch type. This is the data type for data that is supplied to and # returned from batch UDFs. -DataBatch = Union[Block, np.ndarray, Dict[str, np.ndarray]] +DataBatch = Union["pyarrow.Table", "pandas.DataFrame", Dict[str, np.ndarray]] + # A class type that implements __call__. CallableClass = type @@ -118,29 +127,11 @@ def __call__(self, __arg: T) -> Union[U, Iterator[U]]: ... -# A UDF on data batches. -BatchUDF = Union[ - # TODO(Clark): Once Ray only supports Python 3.8+, use protocol to constraint batch - # UDF type. - # Callable[[DataBatch, ...], DataBatch] - Callable[[DataBatch], DataBatch], - Callable[[DataBatch], Iterator[DataBatch]], - "_CallableClassProtocol", -] - -# A UDF on data rows. -RowUDF = Union[ - # TODO(Clark): Once Ray only supports Python 3.8+, use protocol to constraint batch - # UDF type. - # Callable[[T, ...], U] +# A user defined function passed to map, map_batches, ec. +UserDefinedFunction = Union[ Callable[[T], U], - "_CallableClassProtocol[T, U]", -] - - -FlatMapUDF = Union[ - RowUDF, Callable[[T], Iterator[U]], + "_CallableClassProtocol", ] # A list of block references pending computation by a single task. For example, @@ -281,7 +272,7 @@ def __post_init__(self): @DeveloperAPI -class BlockAccessor(Generic[T]): +class BlockAccessor: """Provides accessor methods for a specific block. Ideally, we wouldn't need a separate accessor classes for blocks. However, @@ -298,8 +289,13 @@ def num_rows(self) -> int: """Return the number of rows contained in this block.""" raise NotImplementedError - def iter_rows(self) -> Iterator[T]: - """Iterate over the rows of this block.""" + def iter_rows(self, public_row_format: bool) -> Iterator[T]: + """Iterate over the rows of this block. + + Args: + public_row_format: Whether to cast rows into the public Dict row + format (this incurs extra copy conversions). + """ raise NotImplementedError def slice(self, start: int, end: int, copy: bool) -> Block: @@ -326,7 +322,7 @@ def take(self, indices: List[int]) -> Block: """ raise NotImplementedError - def select(self, columns: List[KeyFn]) -> Block: + def select(self, columns: List[Optional[str]]) -> Block: """Return a new block containing the provided columns.""" raise NotImplementedError @@ -405,12 +401,12 @@ def get_metadata( exec_stats=exec_stats, ) - def zip(self, other: "Block[T]") -> "Block[T]": + def zip(self, other: "Block") -> "Block": """Zip this block with another block of the same type and size.""" raise NotImplementedError @staticmethod - def builder() -> "BlockBuilder[T]": + def builder() -> "BlockBuilder": """Create a builder for this block type.""" raise NotImplementedError @@ -482,30 +478,30 @@ def for_block(block: Block) -> "BlockAccessor[T]": else: raise TypeError("Not a block type: {} ({})".format(block, type(block))) - def sample(self, n_samples: int, key: Any) -> "Block[T]": + def sample(self, n_samples: int, key: Any) -> "Block": """Return a random sample of items from this block.""" raise NotImplementedError def sort_and_partition( self, boundaries: List[T], key: Any, descending: bool - ) -> List["Block[T]"]: + ) -> List["Block"]: """Return a list of sorted partitions of this block.""" raise NotImplementedError - def combine(self, key: KeyFn, agg: "AggregateFn") -> Block[U]: + def combine(self, key: Optional[str], agg: "AggregateFn") -> Block: """Combine rows with the same key into an accumulator.""" raise NotImplementedError @staticmethod def merge_sorted_blocks( - blocks: List["Block[T]"], key: Any, descending: bool - ) -> Tuple[Block[T], BlockMetadata]: + blocks: List["Block"], key: Any, descending: bool + ) -> Tuple[Block, BlockMetadata]: """Return a sorted block by merging a list of sorted blocks.""" raise NotImplementedError @staticmethod def aggregate_combined_blocks( - blocks: List[Block], key: KeyFn, agg: "AggregateFn" - ) -> Tuple[Block[U], BlockMetadata]: + blocks: List[Block], key: Optional[str], agg: "AggregateFn" + ) -> Tuple[Block, BlockMetadata]: """Aggregate partially combined and sorted blocks.""" raise NotImplementedError diff --git a/python/ray/data/context.py b/python/ray/data/context.py index 50384d9e7633..fbbad658985b 100644 --- a/python/ray/data/context.py +++ b/python/ray/data/context.py @@ -107,7 +107,7 @@ # Enable strict schema mode (experimental). In this mode, we only allow structured # schemas, and default to numpy as the batch format. -DEFAULT_STRICT_MODE = bool(int(os.environ.get("RAY_DATA_STRICT_MODE", "0"))) +DEFAULT_STRICT_MODE = bool(int(os.environ.get("RAY_DATA_STRICT_MODE", "1"))) # Set this to True to use the legacy iter_batches codepath prior to 2.4. DEFAULT_USE_LEGACY_ITER_BATCHES = False diff --git a/python/ray/data/dataset_pipeline.py b/python/ray/data/dataset_pipeline.py index 88f69634cb2f..637e279928ad 100644 --- a/python/ray/data/dataset_pipeline.py +++ b/python/ray/data/dataset_pipeline.py @@ -7,7 +7,6 @@ Any, Callable, Dict, - Generic, Iterable, Iterator, List, @@ -33,13 +32,9 @@ from ray.data._internal.plan import ExecutionPlan from ray.data._internal.stats import DatasetPipelineStats, DatastreamStats from ray.data.block import ( - BatchUDF, + UserDefinedFunction, Block, DataBatch, - KeyFn, - RowUDF, - T, - U, _apply_strict_mode_batch_format, ) from ray.data.context import DataContext @@ -50,7 +45,6 @@ BlockWritePathProvider, DefaultBlockWritePathProvider, ) -from ray.data.row import TableRow from ray.types import ObjectRef from ray.util.annotations import DeveloperAPI, PublicAPI from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy @@ -72,7 +66,7 @@ @PublicAPI -class DatasetPipeline(Generic[T]): +class DatasetPipeline: """Implements a pipeline of Datastreams. DatasetPipelines implement pipelined execution. This allows for the @@ -91,8 +85,8 @@ class DatasetPipeline(Generic[T]): def __init__( self, - base_iterable: Iterable[Callable[[], Datastream[T]]], - stages: List[Callable[[Datastream[Any]], Datastream[Any]]] = None, + base_iterable: Iterable[Callable[[], Datastream]], + stages: List[Callable[[Datastream], Datastream]] = None, length: Optional[int] = None, progress_bars: bool = DataContext.get_current().enable_progress_bars, _executed: List[bool] = None, @@ -139,13 +133,9 @@ def iterator(self) -> DataIterator: """ return PipelinedDataIterator(self) - def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[Union[T, TableRow]]: + def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[Dict[str, Any]]: """Return a local row iterator over the data in the pipeline. - If the datastream is a tabular datastream (Arrow/Pandas blocks), dict-like - mappings :py:class:`~ray.data.row.TableRow` are yielded for each row by the - iterator. If the datastream is not tabular, the raw row is yielded. - Examples: >>> import ray >>> for i in ray.data.range(1000000).repeat(5).iter_rows(): # doctest: +SKIP @@ -161,7 +151,7 @@ def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[Union[T, TableRow]] A local iterator over the records in the pipeline. """ - def gen_rows() -> Iterator[Union[T, TableRow]]: + def gen_rows() -> Iterator[Dict[str, Any]]: time_start = time.perf_counter() for ds in self.iter_datasets(): @@ -207,12 +197,10 @@ def iter_batches( The final batch may include fewer than ``batch_size`` rows if ``drop_last`` is ``False``. Defaults to 256. batch_format: Specify ``"default"`` to use the default block format - (promotes tables to Pandas and tensors to NumPy), ``"pandas"`` to select - ``pandas.DataFrame``, "pyarrow" to select ``pyarrow.Table``, or - ``"numpy"`` to select ``numpy.ndarray`` for tensor datastreams and - ``Dict[str, numpy.ndarray]`` for tabular datastreams, or None to return - the underlying block exactly as is with no additional formatting. - The default is "default". + (NumPy), ``"pandas"`` to select ``pandas.DataFrame``, "pyarrow" to + select ``pyarrow.Table``, or ``"numpy"`` to select + ``Dict[str, numpy.ndarray]``, or None to return the underlying block + exactly as is with no additional formatting. drop_last: Whether to drop the last batch if it's incomplete. local_shuffle_buffer_size: If non-None, the data will be randomly shuffled using a local in-memory shuffle buffer, and this value will serve as the @@ -266,7 +254,7 @@ def _iter_blocks(self) -> Iterator[ObjectRef[Block]]: def split( self, n: int, *, equal: bool = False, locality_hints: List[Any] = None - ) -> List["DatasetPipeline[T]"]: + ) -> List["DatasetPipeline"]: """Split the pipeline into ``n`` disjoint pipeline shards. This returns a list of sub-pipelines that can be passed to Ray tasks @@ -309,7 +297,7 @@ def split( ), ) - def split_at_indices(self, indices: List[int]) -> List["DatasetPipeline[T]"]: + def split_at_indices(self, indices: List[int]) -> List["DatasetPipeline"]: """Split the datastreams within the pipeline at the given indices (like np.split). @@ -354,8 +342,8 @@ def split_at_indices(self, indices: List[int]) -> List["DatasetPipeline[T]"]: return self._split(len(indices) + 1, lambda ds: ds.split_at_indices(indices)) def _split( - self, n: int, splitter: Callable[[Datastream], List["Datastream[T]"]] - ) -> List["DatasetPipeline[T]"]: + self, n: int, splitter: Callable[[Datastream], List["Datastream"]] + ) -> List["DatasetPipeline"]: ctx = DataContext.get_current() scheduling_strategy = ctx.scheduling_strategy if not ray.util.client.ray.is_connected(): @@ -423,7 +411,7 @@ def __next__(self): def rewindow( self, *, blocks_per_window: int, preserve_epoch: bool = True - ) -> "DatasetPipeline[T]": + ) -> "DatasetPipeline": """Change the windowing (blocks per datastream) of this pipeline. Changes the windowing of this pipeline to the specified size. For @@ -442,9 +430,9 @@ def rewindow( class WindowIterator: def __init__(self, original_iter): self._original_iter = original_iter - self._buffer: Optional[Datastream[T]] = None + self._buffer: Optional[Datastream] = None - def __next__(self) -> Datastream[T]: + def __next__(self) -> Datastream: try: # Merge windows until we meet the requested window size. if self._buffer is None: @@ -498,7 +486,7 @@ def __iter__(self): length=length, ) - def repeat(self, times: int = None) -> "DatasetPipeline[T]": + def repeat(self, times: int = None) -> "DatasetPipeline": """Repeat this pipeline a given number or times, or indefinitely. This operation is only allowed for pipelines of a finite length. An @@ -526,7 +514,7 @@ def __init__(self, original_iter): # This is calculated later. self._max_i = None - def __next__(self) -> Callable[[], Datastream[T]]: + def __next__(self) -> Callable[[], Datastream]: # Still going through the original pipeline. if self._original_iter: try: @@ -650,10 +638,14 @@ def count(self) -> int: if self._length == float("inf"): raise ValueError("Cannot count a pipeline of infinite length.") - pipe = self.map_batches(lambda batch: [len(batch)]) + def batch_len(batch): + key0 = list(batch.keys())[0] + return len(batch[key0]) + + pipe = self.map_batches(lambda batch: {"len": np.array([batch_len(batch)])}) total = 0 for elem in pipe.iter_rows(): - total += elem + total += elem["len"] return total def sum(self) -> int: @@ -669,10 +661,12 @@ def sum(self) -> int: if self._length == float("inf"): raise ValueError("Cannot sum a pipeline of infinite length.") - pipe = self.map_batches(lambda batch: [batch.sum()[0]], batch_format="pandas") + pipe = self.map_batches( + lambda batch: {"sum": np.array([batch.sum()[0]])}, batch_format="pandas" + ) total = 0 for elem in pipe.iter_rows(): - total += elem + total += elem["sum"] return total def show_windows(self, limit_per_datastream: int = 10) -> None: @@ -692,7 +686,7 @@ def show_windows(self, limit_per_datastream: int = 10) -> None: print("=== Window {} ===".format(i)) ds.show(limit_per_datastream) - def iter_epochs(self, max_epoch: int = -1) -> Iterator["DatasetPipeline[T]"]: + def iter_epochs(self, max_epoch: int = -1) -> Iterator["DatasetPipeline"]: """Split this pipeline up by epoch. This allows reading of data per-epoch for repeated Datastreams, which is @@ -718,7 +712,7 @@ def iter_epochs(self, max_epoch: int = -1) -> Iterator["DatasetPipeline[T]"]: """ class Peekable: - def __init__(self, base_iter: Iterator[T]): + def __init__(self, base_iter: Iterator[Datastream]): self._iter = base_iter self._buffer = None @@ -730,13 +724,13 @@ def _fill_buffer_if_possible(self): except StopIteration: pass - def peek(self) -> T: + def peek(self) -> Datastream: self._fill_buffer_if_possible() if self._buffer is None: raise StopIteration return self._buffer - def __next__(self) -> T: + def __next__(self) -> Datastream: self._fill_buffer_if_possible() if self._buffer is None: raise StopIteration @@ -745,11 +739,11 @@ def __next__(self) -> T: return item class SingleEpochIterator: - def __init__(self, peekable_iter: Iterator[Datastream[T]], epoch: int): + def __init__(self, peekable_iter: Iterator[Datastream], epoch: int): self._iter = peekable_iter self._epoch = epoch - def __next__(self) -> Datastream[T]: + def __next__(self) -> Datastream: if self._iter.peek()._get_epoch() > self._epoch: raise StopIteration ds = next(self._iter) @@ -764,7 +758,7 @@ def __init__(self, pipe, max_epoch): self._cur_epoch = None self._max_epoch = max_epoch - def __next__(self) -> "DatasetPipeline[T]": + def __next__(self) -> "DatasetPipeline": if self._cur_epoch is None: self._cur_epoch = self._iter.peek()._get_epoch() else: @@ -792,11 +786,11 @@ def __iter__(self): def map( self, - fn: RowUDF, + fn: UserDefinedFunction[Dict[str, Any], Dict[str, Any]], *, compute: Union[str, ComputeStrategy] = None, **ray_remote_args, - ) -> "DatasetPipeline[U]": + ) -> "DatasetPipeline": """Apply :py:meth:`Datastream.map ` to each datastream/window in this pipeline.""" return self.foreach_window( @@ -805,7 +799,7 @@ def map( def map_batches( self, - fn: BatchUDF, + fn: UserDefinedFunction[DataBatch, DataBatch], *, batch_size: Optional[Union[int, Literal["default"]]] = "default", compute: Optional[Union[str, ComputeStrategy]] = None, @@ -815,7 +809,7 @@ def map_batches( fn_constructor_args: Optional[Iterable[Any]] = None, fn_constructor_kwargs: Optional[Dict[str, Any]] = None, **ray_remote_args, - ) -> "DatasetPipeline[U]": + ) -> "DatasetPipeline": """Apply :py:meth:`Datastream.map_batches ` to each datastream/window in this pipeline.""" @@ -836,11 +830,11 @@ def map_batches( def flat_map( self, - fn: RowUDF, + fn: UserDefinedFunction[Dict[str, Any], List[Dict[str, Any]]], *, compute: Union[str, ComputeStrategy] = None, **ray_remote_args, - ) -> "DatasetPipeline[U]": + ) -> "DatasetPipeline": """Apply :py:meth:`Datastream.flat_map ` to each datastream/window in this pipeline.""" return self.foreach_window( @@ -849,11 +843,11 @@ def flat_map( def filter( self, - fn: RowUDF, + fn: UserDefinedFunction[Dict[str, Any], bool], *, compute: Union[str, ComputeStrategy] = None, **ray_remote_args, - ) -> "DatasetPipeline[T]": + ) -> "DatasetPipeline": """Apply :py:meth:`Datastream.filter ` to each datastream/window in this pipeline.""" return self.foreach_window( @@ -867,7 +861,7 @@ def add_column( *, compute: Optional[str] = None, **ray_remote_args, - ) -> "DatasetPipeline[U]": + ) -> "DatasetPipeline": """Apply :py:meth:`Datastream.add_column ` to each datastream/window in this pipeline.""" return self.foreach_window( @@ -880,7 +874,7 @@ def drop_columns( *, compute: Optional[str] = None, **ray_remote_args, - ) -> "DatasetPipeline[U]": + ) -> "DatasetPipeline": """Apply :py:meth:`Datastream.drop_columns ` to each datastream/window in this pipeline.""" return self.foreach_window( @@ -893,7 +887,7 @@ def select_columns( *, compute: Optional[str] = None, **ray_remote_args, - ) -> "DatasetPipeline[U]": + ) -> "DatasetPipeline": """Apply :py:meth:`Datastream.select_columns ` to each datastream/window in this pipeline.""" return self.foreach_window( @@ -902,7 +896,7 @@ def select_columns( def repartition_each_window( self, num_blocks: int, *, shuffle: bool = False - ) -> "DatasetPipeline[U]": + ) -> "DatasetPipeline": """Apply :py:meth:`Datastream.repartition ` to each datastream/window in this pipeline.""" return self.foreach_window( @@ -915,7 +909,7 @@ def random_shuffle_each_window( seed: Optional[int] = None, num_blocks: Optional[int] = None, **ray_remote_args, - ) -> "DatasetPipeline[U]": + ) -> "DatasetPipeline": """Apply :py:meth:`Datastream.random_shuffle ` to each datastream/window in this pipeline.""" return self.foreach_window( @@ -925,15 +919,15 @@ def random_shuffle_each_window( ) def sort_each_window( - self, key: Optional[KeyFn] = None, descending: bool = False - ) -> "DatasetPipeline[U]": + self, key: Optional[str] = None, descending: bool = False + ) -> "DatasetPipeline": """Apply :py:meth:`Datastream.sort ` to each datastream/window in this pipeline.""" return self.foreach_window(lambda ds: ds.sort(key, descending)) def randomize_block_order_each_window( self, *, seed: Optional[int] = None - ) -> "DatasetPipeline[U]": + ) -> "DatasetPipeline": """Apply :py:meth:`Datastream.randomize_block_order ` to each datastream/window in this pipeline.""" @@ -1045,7 +1039,7 @@ def write_tfrecords( def write_datasource( self, - datasource: Datasource[T], + datasource: Datasource, *, ray_remote_args: Dict[str, Any] = None, **write_args, @@ -1060,16 +1054,23 @@ def write_datasource( ) ) - def take(self, limit: int = 20) -> List[T]: + def take(self, limit: int = 20) -> List[Dict[str, Any]]: """Call :py:meth:`Datastream.take ` over the stream of output batches from the pipeline""" return Datastream.take(self, limit) - def take_all(self, limit: Optional[int] = None) -> List[T]: + def take_all(self, limit: Optional[int] = None) -> List[Dict[str, Any]]: """Call :py:meth:`Datastream.take_all ` over the stream of output batches from the pipeline""" return Datastream.take_all(self, limit) + def take_batch( + self, batch_size: int = 20, *, batch_format: Optional[str] = "default" + ) -> DataBatch: + """Call :py:meth:`Datastream.take_batch ` + over the stream of output batches from the pipeline""" + return Datastream.take_batch(self, batch_size, batch_format=batch_format) + def show(self, limit: int = 20) -> None: """Call :py:meth:`Datastream.show ` over the stream of output batches from the pipeline""" @@ -1195,7 +1196,7 @@ def _iter_datasets_without_peek(self): return PipelineExecutor(self) @DeveloperAPI - def iter_datasets(self) -> Iterator[Datastream[T]]: + def iter_datasets(self) -> Iterator[Datastream]: """Iterate over the output datastreams of this pipeline. Returns: @@ -1236,8 +1237,8 @@ def __iter__(self): @DeveloperAPI def foreach_window( - self, fn: Callable[[Datastream[T]], Datastream[U]] - ) -> "DatasetPipeline[U]": + self, fn: Callable[[Datastream], Datastream] + ) -> "DatasetPipeline": """Apply a transform to each datastream/window in this pipeline. Args: @@ -1270,8 +1271,8 @@ def stats(self, exclude_first_window: bool = True) -> str: @staticmethod def from_iterable( - iterable: Iterable[Callable[[], Datastream[T]]], - ) -> "DatasetPipeline[T]": + iterable: Iterable[Callable[[], Datastream]], + ) -> "DatasetPipeline": """Create a pipeline from an sequence of Datastream producing functions. Args: @@ -1337,7 +1338,7 @@ def add_stage(ds, stage): ) self._optimized_stages = optimized_stages - def _peek(self) -> Datastream[T]: + def _peek(self) -> Datastream: if self._first_datastream is None: datastream_iter = iter(self._base_iterable) first_datastream_gen = next(datastream_iter) @@ -1356,7 +1357,7 @@ def _peek(self) -> Datastream[T]: return self._first_datastream - def _write_each_datastream(self, write_fn: Callable[[Datastream[T]], None]) -> None: + def _write_each_datastream(self, write_fn: Callable[[Datastream], None]) -> None: """Write output for each datastream. This is utility method used for write_json, diff --git a/python/ray/data/datasource/datasource.py b/python/ray/data/datasource/datasource.py index 5d39a96f158e..6c9c8e5a2a73 100644 --- a/python/ray/data/datasource/datasource.py +++ b/python/ray/data/datasource/datasource.py @@ -1,11 +1,10 @@ import builtins from copy import copy -from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, Tuple, Union +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple import numpy as np import ray -from ray.data._internal.arrow_block import ArrowRow from ray.data._internal.delegating_block_builder import DelegatingBlockBuilder from ray.data._internal.execution.interfaces import TaskContext from ray.data._internal.util import _check_pyarrow_version @@ -13,7 +12,6 @@ Block, BlockAccessor, BlockMetadata, - T, ) from ray.data.context import DataContext from ray.types import ObjectRef @@ -23,7 +21,7 @@ @PublicAPI -class Datasource(Generic[T]): +class Datasource: """Interface for defining a custom ``ray.data.Datastream`` datasource. To read a datasource into a datastream, use ``ray.data.read_datasource()``. @@ -36,7 +34,7 @@ class Datasource(Generic[T]): ``write()`` are called in remote tasks. """ - def create_reader(self, **read_args) -> "Reader[T]": + def create_reader(self, **read_args) -> "Reader": """Return a Reader for the given read arguments. The reader object will be responsible for querying the read metadata, and @@ -48,7 +46,7 @@ def create_reader(self, **read_args) -> "Reader[T]": return _LegacyDatasourceReader(self, **read_args) @Deprecated - def prepare_read(self, parallelism: int, **read_args) -> List["ReadTask[T]"]: + def prepare_read(self, parallelism: int, **read_args) -> List["ReadTask"]: """Deprecated: Please implement create_reader() instead.""" raise NotImplementedError @@ -131,7 +129,7 @@ def get_name(self) -> str: @PublicAPI -class Reader(Generic[T]): +class Reader: """A bound read operation for a datasource. This is a stateful class so that reads can be prepared in multiple stages. @@ -146,7 +144,7 @@ def estimate_inmemory_data_size(self) -> Optional[int]: """ raise NotImplementedError - def get_read_tasks(self, parallelism: int) -> List["ReadTask[T]"]: + def get_read_tasks(self, parallelism: int) -> List["ReadTask"]: """Execute the read and return read tasks. Args: @@ -169,7 +167,7 @@ def __init__(self, datasource: Datasource, **read_args): def estimate_inmemory_data_size(self) -> Optional[int]: return None - def get_read_tasks(self, parallelism: int) -> List["ReadTask[T]"]: + def get_read_tasks(self, parallelism: int) -> List["ReadTask"]: return self._datasource.prepare_read(parallelism, **self._read_args) @@ -223,7 +221,7 @@ def __call__(self) -> Iterable[Block]: @PublicAPI -class RangeDatasource(Datasource[Union[ArrowRow, int]]): +class RangeDatasource(Datasource): """An example datasource that generates ranges of numbers from [0..n). Examples: @@ -237,7 +235,7 @@ class RangeDatasource(Datasource[Union[ArrowRow, int]]): def create_reader( self, n: int, - block_format: str = "list", + block_format: str = "arrow", tensor_shape: Tuple = (1,), column_name: Optional[str] = None, ) -> List[ReadTask]: @@ -340,7 +338,7 @@ def make_block(start: int, count: int) -> Block: @DeveloperAPI -class DummyOutputDatasource(Datasource[Union[ArrowRow, int]]): +class DummyOutputDatasource(Datasource): """An example implementation of a writable datasource for testing. Examples: @@ -400,7 +398,7 @@ def on_write_failed( @DeveloperAPI -class RandomIntRowDatasource(Datasource[ArrowRow]): +class RandomIntRowDatasource(Datasource): """An example datasource that generates rows with random int64 columns. Examples: diff --git a/python/ray/data/datasource/file_based_datasource.py b/python/ray/data/datasource/file_based_datasource.py index 03f514fadc50..09ba71ba135c 100644 --- a/python/ray/data/datasource/file_based_datasource.py +++ b/python/ray/data/datasource/file_based_datasource.py @@ -21,7 +21,6 @@ import numpy as np from ray.air._internal.remote_storage import _is_local_windows_path -from ray.data._internal.arrow_block import ArrowRow from ray.data._internal.delegating_block_builder import DelegatingBlockBuilder from ray.data._internal.execution.interfaces import TaskContext from ray.data._internal.output_buffer import BlockOutputBuffer @@ -190,7 +189,7 @@ def __repr__(self): @DeveloperAPI -class FileBasedDatasource(Datasource[Union[ArrowRow, Any]]): +class FileBasedDatasource(Datasource): """File-based datasource, for reading and writing files. This class should not be used directly, and should instead be subclassed diff --git a/python/ray/data/datasource/numpy_datasource.py b/python/ray/data/datasource/numpy_datasource.py index ade44406cf89..e81471f24e29 100644 --- a/python/ray/data/datasource/numpy_datasource.py +++ b/python/ray/data/datasource/numpy_datasource.py @@ -4,7 +4,6 @@ import numpy as np import ray -from ray.air.constants import TENSOR_COLUMN_NAME from ray.data.block import BlockAccessor from ray.data.datasource.file_based_datasource import FileBasedDatasource from typing import Optional @@ -55,7 +54,6 @@ def _convert_block_to_tabular_block( column_name = self._COLUMN_NAME column_names = block.column_names - assert column_names[0] == TENSOR_COLUMN_NAME column_names[0] = column_name return block.rename_columns(column_names) diff --git a/python/ray/data/datasource/sql_datasource.py b/python/ray/data/datasource/sql_datasource.py index 9071069dc136..a46ce81383e7 100644 --- a/python/ray/data/datasource/sql_datasource.py +++ b/python/ray/data/datasource/sql_datasource.py @@ -2,7 +2,6 @@ from contextlib import contextmanager from typing import Any, Callable, Iterator, Iterable, List, Optional -from ray.data._internal.arrow_block import ArrowRow from ray.data.block import Block, BlockAccessor, BlockMetadata from ray.data.datasource.datasource import Datasource, Reader, ReadTask from ray.util.annotations import PublicAPI @@ -23,7 +22,7 @@ def _cursor_to_block(cursor) -> Block: @PublicAPI(stability="alpha") -class SQLDatasource(Datasource[ArrowRow]): +class SQLDatasource(Datasource): def __init__(self, connection_factory: Callable[[], Connection]): self.connection_factory = connection_factory diff --git a/python/ray/data/datasource/webdataset_datasource.py b/python/ray/data/datasource/webdataset_datasource.py index 431dea55e6d6..17ac6aaa240f 100644 --- a/python/ray/data/datasource/webdataset_datasource.py +++ b/python/ray/data/datasource/webdataset_datasource.py @@ -300,7 +300,7 @@ def _make_iterable(block: BlockAccessor): Returns: Iterable[Dict[str,Any]]: Iterable of samples """ - return block.iter_rows() + return block.iter_rows(public_row_format=False) @PublicAPI(stability="alpha") @@ -337,6 +337,7 @@ def _read_stream( Yields: List[Dict[str, Any]]: List of sample (list of length 1). """ + import pandas as pd files = _tar_file_iterator( stream, @@ -348,7 +349,7 @@ def _read_stream( for sample in samples: if decoder is not None: sample = _apply_list(decoder, sample, default=_default_decoder) - yield [sample] + yield pd.DataFrame({k: [v] for k, v in sample.items()}) def _write_block( self, diff --git a/python/ray/data/datastream.py b/python/ray/data/datastream.py index 2d9ba8d48c41..8b039dca36c0 100644 --- a/python/ray/data/datastream.py +++ b/python/ray/data/datastream.py @@ -92,17 +92,15 @@ from ray.data.aggregate import AggregateFn, Max, Mean, Min, Std, Sum from ray.data.block import ( VALID_BATCH_FORMATS, + STRICT_MODE_EXPLANATION, _apply_strict_mode_batch_format, _apply_strict_mode_batch_size, - BatchUDF, + UserDefinedFunction, Block, BlockAccessor, BlockMetadata, BlockPartition, DataBatch, - FlatMapUDF, - KeyFn, - RowUDF, StrictModeError, T, U, @@ -170,13 +168,12 @@ @PublicAPI -class Datastream(Generic[T]): +class Datastream: """A Datastream is a distributed data collection for data loading and processing. - Datastreams are distributed streams that produce ``ObjectRef[Block]`` outputs, - where each block holds an ordered collection of items, representing a shard of the - overall data collection. The block can be either a ``pyarrow.Table``, or Python - list. The block also determines the unit of parallelism. + Datastreams are distributed pipelines that produce ``ObjectRef[Block]`` outputs, + where each block holds data in Arrow format, representing a shard of the overall + data collection. The block also determines the unit of parallelism. Datastreams can be created in multiple ways: from synthetic data via ``range_*()`` APIs, from existing memory data via ``from_*()`` APIs (this creates a subclass @@ -213,31 +210,26 @@ class Datastream(Generic[T]): Examples: >>> import ray >>> ds = ray.data.range(1000) - >>> # Transform in parallel with map_batches(). - >>> ds.map_batches(lambda batch: [v * 2 for v in batch]) + >>> # Transform batches (Dict[str, np.ndarray]) with map_batches(). + >>> ds.map_batches(lambda batch: {"id": batch["id"] * 2}) MapBatches() - +- Datastream(num_blocks=17, num_rows=1000, schema=) - >>> # Compute maximum - >>> ds.max() + +- Datastream(num_blocks=17, num_rows=1000, schema={id: int64}) + >>> # Compute the maximum. + >>> ds.max("id") 999 - >>> # Group the data. - >>> ds.groupby(lambda x: x % 3).count() - Aggregate - +- Datastream(num_blocks=..., num_rows=1000, schema=) >>> # Shuffle this datastream randomly. >>> ds.random_shuffle() RandomShuffle - +- Datastream(num_blocks=..., num_rows=1000, schema=) + +- Datastream(num_blocks=..., num_rows=1000, schema={id: int64}) >>> # Sort it back in order. - >>> ds.sort() + >>> ds.sort("id") Sort - +- Datastream(num_blocks=..., num_rows=1000, schema=) + +- Datastream(num_blocks=..., num_rows=1000, schema={id: int64}) Both unexecuted and materialized Datastreams can be passed between Ray tasks and - actors without incurring a copy. Datastream supports conversion to/from several more - featureful dataframe libraries (e.g., Spark, Dask, Modin, MARS), and are also - compatible with distributed - TensorFlow / PyTorch. + actors without incurring a copy. Datastream supports conversion to/from several + more featureful dataframe libraries (e.g., Spark, Dask, Modin, MARS), and are also + compatible with distributed TensorFlow / PyTorch. """ def __init__( @@ -255,6 +247,9 @@ def __init__( assert isinstance(plan, ExecutionPlan) usage_lib.record_library_usage("dataset") # Legacy telemetry name. + if ray.util.log_once("strict_mode_explanation"): + logger.warning(STRICT_MODE_EXPLANATION) + self._plan = plan self._uuid = uuid4().hex self._epoch = epoch @@ -271,8 +266,8 @@ def __init__( @staticmethod def copy( - ds: "Datastream[T]", _deep_copy: bool = False, _as: Optional[type] = None - ) -> "Datastream[T]": + ds: "Datastream", _deep_copy: bool = False, _as: Optional[type] = None + ) -> "Datastream": if not _as: _as = Datastream if _deep_copy: @@ -282,11 +277,11 @@ def copy( def map( self, - fn: RowUDF[T, U], + fn: UserDefinedFunction[Dict[str, Any], Dict[str, Any]], *, - compute: Union[str, ComputeStrategy] = None, + compute: Optional[ComputeStrategy] = None, **ray_remote_args, - ) -> "Datastream[U]": + ) -> "Datastream": """Apply the given function to each record of this datastream. Note that mapping individual records can be quite slow. Consider using @@ -296,9 +291,10 @@ def map( >>> import ray >>> # Transform python objects. >>> ds = ray.data.range(1000) - >>> ds.map(lambda x: x * 2) + >>> # The function goes from record (Dict[str, Any]) to record. + >>> ds.map(lambda record: {"id": record["id"] * 2}) Map - +- Datastream(num_blocks=..., num_rows=1000, schema=) + +- Datastream(num_blocks=..., num_rows=1000, schema={id: int64}) >>> # Transform Arrow records. >>> ds = ray.data.from_items( ... [{"value": i} for i in range(1000)]) @@ -316,9 +312,8 @@ def map( >>> # Apply the transform in parallel on GPUs. Since >>> # compute=ActorPoolStrategy(size=8) the transform will be applied on a >>> # pool of 8 Ray actors, each allocated 1 GPU by Ray. - >>> from ray.data._internal.compute import ActorPoolStrategy >>> ds.map(CachedModel, # doctest: +SKIP - ... compute=ActorPoolStrategy(size=8), + ... compute=ray.data.ActorPoolStrategy(size=8), ... num_gpus=1) Time complexity: O(datastream size / parallelism) @@ -327,7 +322,7 @@ def map( fn: The function to apply to each record, or a class type that can be instantiated to create such a callable. Callable classes are only supported for the actor compute strategy. - compute: The compute strategy, either "tasks" (default) to use Ray + compute: The compute strategy, either None (default) to use Ray tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an autoscaling actor pool. @@ -386,10 +381,10 @@ def map( def map_batches( self, - fn: BatchUDF, + fn: UserDefinedFunction[DataBatch, DataBatch], *, - batch_size: Optional[Union[int, Literal["default"]]] = "default", - compute: Optional[Union[str, ComputeStrategy]] = None, + batch_size: Union[int, None, Literal["default"]] = "default", + compute: Optional[ComputeStrategy] = None, batch_format: Optional[str] = "default", zero_copy_batch: bool = False, fn_args: Optional[Iterable[Any]] = None, @@ -397,27 +392,15 @@ def map_batches( fn_constructor_args: Optional[Iterable[Any]] = None, fn_constructor_kwargs: Optional[Dict[str, Any]] = None, **ray_remote_args, - ) -> "Datastream[Any]": + ) -> "Datastream": """Apply the given function to batches of data. This applies the ``fn`` in parallel with map tasks, with each task handling - a block or a bundle of blocks of the datastream. Each batch is executed serially - at Ray level (at lower level, the processing of the batch is usually - vectorized). - - Batches are represented as dataframes, ndarrays, or lists. The default batch - type is determined by your datastream's schema. To determine the default batch - type, call :meth:`~Datastream.default_batch_format`. Alternatively, set the batch - type with ``batch_format``. + a batch of data (typically Dict[str, np.ndarray] or pd.DataFrame). To learn more about writing functions for :meth:`~Datastream.map_batches`, read :ref:`writing user-defined functions `. - .. tip:: - If you have a small number of big blocks, it may limit parallelism. You may - consider increasing the number of blocks via ``.repartition()`` before - applying ``.map_batches()``. - .. tip:: If ``fn`` does not mutate its input, set ``zero_copy_batch=True`` to elide a batch copy, which can improve performance and decrease memory utilization. @@ -437,54 +420,32 @@ def map_batches( Examples: - >>> import pandas as pd + >>> import numpy as np >>> import ray - >>> df = pd.DataFrame({ - ... "name": ["Luna", "Rory", "Scout"], - ... "age": [4, 14, 9] - ... }) - >>> ds = ray.data.from_pandas(df) + >>> ds = ray.data.from_items([ + ... {"name": "Luna", "age": 4}, + ... {"name": "Rory", "age": 14}, + ... {"name": "Scout", "age": 9}, + ... ]) >>> ds # doctest: +SKIP MaterializedDatastream( - num_blocks=1, + num_blocks=3, num_rows=3, - schema={name: object, age: int64} + schema={name: string, age: int64} ) - Call :meth:`.default_batch_format` to determine the default batch - type. - - >>> ds.default_batch_format() - - - .. tip:: - - Datastreams created from tabular data like Arrow tables and Parquet files - yield ``pd.DataFrame`` batches. - - Once you know the batch type, define a function that transforms batches - of data. ``ds.map_batches`` applies the function in parallel. + Here ``fn`` returns the same batch type as the input, but your ``fn`` can + also return a different batch type (e.g., pd.DataFrame). Read more about + :ref:`user-defined function output types `. - >>> def map_fn(batch: pd.DataFrame) -> pd.DataFrame: + >>> from typing import Dict + >>> def map_fn(batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: ... batch["age_in_dog_years"] = 7 * batch["age"] ... return batch >>> ds = ds.map_batches(map_fn) >>> ds MapBatches(map_fn) - +- Datastream(num_blocks=1, num_rows=3, schema={name: object, age: int64}) - - Your ``fn`` can return a different type than the input type. To learn more - about supported output types, read - :ref:`user-defined function output types `. - - >>> from typing import List - >>> def map_fn(batch: pd.DataFrame) -> List[int]: - ... return list(batch["age_in_dog_years"]) - >>> ds = ds.map_batches(map_fn) - >>> ds - MapBatches(map_fn) - +- MapBatches(map_fn) - +- Datastream(num_blocks=1, num_rows=3, schema={name: object, age: int64}) + +- Datastream(num_blocks=3, num_rows=3, schema={name: string, age: int64}) :ref:`Actors ` can improve the performance of some workloads. For example, you can use :ref:`actors ` to load a model once @@ -496,7 +457,6 @@ def map_batches( In the example below, ``CachedModel`` is called on an autoscaling pool of two to eight :ref:`actors `, each allocated one GPU by Ray. - >>> from ray.data import ActorPoolStrategy >>> init_large_model = ... # doctest: +SKIP >>> class CachedModel: ... def __init__(self): @@ -506,7 +466,7 @@ def map_batches( >>> ds.map_batches( # doctest: +SKIP ... CachedModel, # doctest: +SKIP ... batch_size=256, # doctest: +SKIP - ... compute=ActorPoolStrategy(size=8), # doctest: +SKIP + ... compute=ray.data.ActorPoolStrategy(size=8), # doctest: +SKIP ... num_gpus=1, ... ) # doctest: +SKIP @@ -515,15 +475,14 @@ def map_batches( returning a very large output batch, ``fn`` can instead yield the output batch in chunks. - >>> from typing import Iterator - >>> def map_fn_with_large_output(batch: List[int]) -> Iterator[List[int]]: + >>> def map_fn_with_large_output(batch): ... for i in range(3): - ... yield batch * 100 + ... yield {"large_output": np.ones((100, 1000))} >>> ds = ray.data.from_items([1]) >>> ds = ds.map_batches(map_fn_with_large_output) >>> ds MapBatches(map_fn_with_large_output) - +- Datastream(num_blocks=1, num_rows=1, schema=) + +- Datastream(num_blocks=1, num_rows=1, schema={item: int64}) Args: @@ -541,12 +500,10 @@ def map_batches( pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an autoscaling actor pool. batch_format: Specify ``"default"`` to use the default block format - (promotes tables to Pandas and tensors to NumPy), ``"pandas"`` to select - ``pandas.DataFrame``, "pyarrow" to select ``pyarrow.Table``, or - ``"numpy"`` to select ``numpy.ndarray`` for tensor datastreams and - ``Dict[str, numpy.ndarray]`` for tabular datastreams, or None to return - the underlying block exactly as is with no additional formatting. - The default is "default". + (NumPy), ``"pandas"`` to select ``pandas.DataFrame``, "pyarrow" to + select ``pyarrow.Table``, or ``"numpy"`` to select + ``Dict[str, numpy.ndarray]``, or None to return the underlying block + exactly as is with no additional formatting. zero_copy_batch: Whether ``fn`` should be provided zero-copy, read-only batches. If this is ``True`` and no copy is required for the ``batch_format`` conversion, the batch will be a zero-copy, read-only @@ -576,9 +533,6 @@ def map_batches( :meth:`~Datastream.iter_batches` Call this function to iterate over batches of data. - :meth:`~Datastream.default_batch_format` - Call this function to determine the default batch type. - :meth:`~Datastream.flat_map`: Call this method to create new records from existing ones. Unlike :meth:`~Datastream.map`, a function passed to :meth:`~Datastream.flat_map` @@ -698,7 +652,7 @@ def add_column( *, compute: Optional[str] = None, **ray_remote_args, - ) -> "Datastream[T]": + ) -> "Datastream": """Add the given column to the datastream. This is only supported for datastreams convertible to pandas format. @@ -707,12 +661,11 @@ def add_column( Examples: >>> import ray - >>> ds = ray.data.range_table(100) + >>> ds = ray.data.range(100) >>> # Add a new column equal to value * 2. - >>> ds = ds.add_column( - ... "new_col", lambda df: df["value"] * 2) + >>> ds = ds.add_column("new_col", lambda df: df["id"] * 2) >>> # Overwrite the existing "value" with zeros. - >>> ds = ds.add_column("value", lambda df: 0) + >>> ds = ds.add_column("id", lambda df: 0) Time complexity: O(datastream size / parallelism) @@ -750,17 +703,16 @@ def drop_columns( *, compute: Optional[str] = None, **ray_remote_args, - ) -> "Datastream[U]": + ) -> "Datastream": """Drop one or more columns from the datastream. Examples: >>> import ray - >>> ds = ray.data.range_table(100) + >>> ds = ray.data.range(100) >>> # Add a new column equal to value * 2. - >>> ds = ds.add_column( - ... "new_col", lambda df: df["value"] * 2) + >>> ds = ds.add_column("new_col", lambda df: df["id"] * 2) >>> # Drop the existing "value" column. - >>> ds = ds.drop_columns(["value"]) + >>> ds = ds.drop_columns(["id"]) Time complexity: O(datastream size / parallelism) @@ -790,7 +742,7 @@ def select_columns( *, compute: Union[str, ComputeStrategy] = None, **ray_remote_args, - ) -> "Datastream[T]": + ) -> "Datastream": """Select one or more columns from the datastream. All input columns used to select need to be in the schema of the datastream. @@ -825,6 +777,7 @@ def select_columns( """ # noqa: E501 return self.map_batches( lambda batch: BlockAccessor.for_block(batch).select(columns=cols), + batch_format="pandas", zero_copy_batch=True, compute=compute, **ray_remote_args, @@ -832,11 +785,11 @@ def select_columns( def flat_map( self, - fn: FlatMapUDF[T, U], + fn: UserDefinedFunction[Dict[str, Any], List[Dict[str, Any]]], *, - compute: Union[str, ComputeStrategy] = None, + compute: Optional[ComputeStrategy] = None, **ray_remote_args, - ) -> "Datastream[U]": + ) -> "Datastream": """Apply the given function to each record and then flatten results. Consider using ``.map_batches()`` for better performance (the batch size can be @@ -845,9 +798,9 @@ def flat_map( Examples: >>> import ray >>> ds = ray.data.range(1000) - >>> ds.flat_map(lambda x: [x, x ** 2, x ** 3]) + >>> ds.flat_map(lambda x: [{"id": 1}, {"id": 2}, {"id": 4}]) FlatMap - +- Datastream(num_blocks=..., num_rows=1000, schema=) + +- Datastream(num_blocks=..., num_rows=1000, schema={id: int64}) Time complexity: O(datastream size / parallelism) @@ -906,11 +859,11 @@ def flat_map( def filter( self, - fn: RowUDF[T, U], + fn: UserDefinedFunction[Dict[str, Any], bool], *, compute: Union[str, ComputeStrategy] = None, **ray_remote_args, - ) -> "Datastream[T]": + ) -> "Datastream": """Filter out records that do not satisfy the given predicate. Consider using ``.map_batches()`` for better performance (you can implement @@ -919,9 +872,9 @@ def filter( Examples: >>> import ray >>> ds = ray.data.range(100) - >>> ds.filter(lambda x: x % 2 == 0) + >>> ds.filter(lambda x: x["id"] % 2 == 0) Filter - +- Datastream(num_blocks=..., num_rows=100, schema=) + +- Datastream(num_blocks=..., num_rows=100, schema={id: int64}) Time complexity: O(datastream size / parallelism) @@ -967,7 +920,7 @@ def filter( return Datastream(plan, self._epoch, self._lazy, logical_plan) - def repartition(self, num_blocks: int, *, shuffle: bool = False) -> "Datastream[T]": + def repartition(self, num_blocks: int, *, shuffle: bool = False) -> "Datastream": """Repartition the datastream into exactly this number of blocks. After repartitioning, all blocks in the returned datastream will have @@ -1012,7 +965,7 @@ def random_shuffle( seed: Optional[int] = None, num_blocks: Optional[int] = None, **ray_remote_args, - ) -> "Datastream[T]": + ) -> "Datastream": """Randomly shuffle the elements of this datastream. Examples: @@ -1021,11 +974,11 @@ def random_shuffle( >>> # Shuffle this datastream randomly. >>> ds.random_shuffle() RandomShuffle - +- Datastream(num_blocks=..., num_rows=100, schema=) + +- Datastream(num_blocks=..., num_rows=100, schema={id: int64}) >>> # Shuffle this datastream with a fixed random seed. >>> ds.random_shuffle(seed=12345) RandomShuffle - +- Datastream(num_blocks=..., num_rows=100, schema=) + +- Datastream(num_blocks=..., num_rows=100, schema={id: int64}) Time complexity: O(datastream size / parallelism) @@ -1058,7 +1011,7 @@ def randomize_block_order( self, *, seed: Optional[int] = None, - ) -> "Datastream[T]": + ) -> "Datastream": """Randomly shuffle the blocks of this datastream. Examples: @@ -1090,7 +1043,7 @@ def randomize_block_order( def random_sample( self, fraction: float, *, seed: Optional[int] = None - ) -> "Datastream[T]": + ) -> "Datastream": """Randomly samples a fraction of the elements of this datastream. Note that the exact number of elements returned is not guaranteed, @@ -1139,7 +1092,7 @@ def process_batch(batch): ) raise ValueError(f"Unsupported batch type: {type(batch)}") - return self.map_batches(process_batch) + return self.map_batches(process_batch, batch_format=None) @ConsumptionAPI def streaming_split( @@ -1213,7 +1166,7 @@ def streaming_split( @ConsumptionAPI def split( self, n: int, *, equal: bool = False, locality_hints: Optional[List[Any]] = None - ) -> List["MaterializedDatastream[T]"]: + ) -> List["MaterializedDatastream"]: """Materialize and split the datastream into ``n`` disjoint pieces. This returns a list of MaterializedDatastreams that can be passed to Ray tasks @@ -1416,19 +1369,19 @@ def build_node_id_by_actor(actors: List[Any]) -> Dict[Any, str]: ] @ConsumptionAPI - def split_at_indices(self, indices: List[int]) -> List["MaterializedDatastream[T]"]: + def split_at_indices(self, indices: List[int]) -> List["MaterializedDatastream"]: """Materialize and split the datastream at the given indices (like np.split). Examples: >>> import ray >>> ds = ray.data.range(10) >>> d1, d2, d3 = ds.split_at_indices([2, 5]) - >>> d1.take() - [0, 1] - >>> d2.take() - [2, 3, 4] - >>> d3.take() - [5, 6, 7, 8, 9] + >>> d1.take_batch() + {'id': array([0, 1])} + >>> d2.take_batch() + {'id': array([2, 3, 4])} + >>> d3.take_batch() + {'id': array([5, 6, 7, 8, 9])} Time complexity: O(num splits) @@ -1481,7 +1434,7 @@ def split_at_indices(self, indices: List[int]) -> List["MaterializedDatastream[T @ConsumptionAPI def split_proportionately( self, proportions: List[float] - ) -> List["MaterializedDatastream[T]"]: + ) -> List["MaterializedDatastream"]: """Materialize and split the datastream using proportions. A common use case for this would be splitting the datastream into train @@ -1499,12 +1452,12 @@ def split_proportionately( >>> import ray >>> ds = ray.data.range(10) >>> d1, d2, d3 = ds.split_proportionately([0.2, 0.5]) - >>> d1.take() - [0, 1] - >>> d2.take() - [2, 3, 4, 5, 6] - >>> d3.take() - [7, 8, 9] + >>> d1.take_batch() + {'id': array([0, 1])} + >>> d2.take_batch() + {'id': array([2, 3, 4, 5, 6])} + >>> d3.take_batch() + {'id': array([7, 8, 9])} Time complexity: O(num splits) @@ -1554,7 +1507,7 @@ def train_test_split( *, shuffle: bool = False, seed: Optional[int] = None, - ) -> Tuple["MaterializedDatastream[T]", "MaterializedDatastream[T]"]: + ) -> Tuple["MaterializedDatastream", "MaterializedDatastream"]: """Materialize and split the datastream into train and test subsets. Examples: @@ -1562,10 +1515,10 @@ def train_test_split( >>> import ray >>> ds = ray.data.range(8) >>> train, test = ds.train_test_split(test_size=0.25) - >>> train.take() - [0, 1, 2, 3, 4, 5] - >>> test.take() - [6, 7] + >>> train.take_batch() + {'id': array([0, 1, 2, 3, 4, 5])} + >>> test.take_batch() + {'id': array([6, 7])} Args: test_size: If float, should be between 0.0 and 1.0 and represent the @@ -1606,7 +1559,7 @@ def train_test_split( return ds.split_at_indices([ds_length - test_size]) @ConsumptionAPI(pattern="Args:") - def union(self, *other: List["Datastream[T]"]) -> "Datastream[T]": + def union(self, *other: List["Datastream"]) -> "Datastream": """Materialize and combine this datastream with others of the same type. The order of the blocks in the datastreams is preserved, as is the @@ -1695,16 +1648,12 @@ def union(self, *other: List["Datastream[T]"]) -> "Datastream[T]": self._lazy, ) - def groupby(self, key: Optional[KeyFn]) -> "GroupedData[T]": + def groupby(self, key: Optional[str]) -> "GroupedData": """Group the datastream by the key function or column name. Examples: >>> import ray - >>> # Group by a key function and aggregate. - >>> ray.data.range(100).groupby(lambda x: x % 3).count() - Aggregate - +- Datastream(num_blocks=..., num_rows=100, schema=) - >>> # Group by an Arrow table column and aggregate. + >>> # Group by a table column and aggregate. >>> ray.data.from_items([ ... {"A": x % 3, "B": x} for x in range(100)]).groupby( ... "A").count() @@ -1714,8 +1663,7 @@ def groupby(self, key: Optional[KeyFn]) -> "GroupedData[T]": Time complexity: O(datastream size * log(datastream size / parallelism)) Args: - key: A key function or Arrow column name. If this is None, the - grouping is global. + key: A column name. If this is None, the grouping is global. Returns: A lazy GroupedData that can be aggregated later. @@ -1730,17 +1678,14 @@ def groupby(self, key: Optional[KeyFn]) -> "GroupedData[T]": return GroupedData(self, key) @ConsumptionAPI - def aggregate(self, *aggs: AggregateFn) -> U: + def aggregate(self, *aggs: AggregateFn) -> Union[Any, Dict[str, Any]]: """Aggregate the entire datastream as one group. Examples: >>> import ray >>> from ray.data.aggregate import Max, Mean - >>> ray.data.range(100).aggregate(Max()) - (99,) - >>> ray.data.range_table(100).aggregate( - ... Max("value"), Mean("value")) - {'max(value)': 99, 'mean(value)': 49.5} + >>> ray.data.range(100).aggregate(Max("id"), Mean("id")) + {'max(id)': 99, 'mean(id)': 49.5} Time complexity: O(datastream size / parallelism) @@ -1752,8 +1697,7 @@ def aggregate(self, *aggs: AggregateFn) -> U: a tuple of ``(agg1, agg2, ...)`` where each tuple element is the corresponding aggregation result. If the input datastream is an Arrow datastream then the output is - an ``ArrowRow`` where each column is the corresponding - aggregation result. + an dict where each column is the corresponding aggregation result. If the datastream is empty, return ``None``. """ ret = self.groupby(None).aggregate(*aggs).take(1) @@ -1761,19 +1705,13 @@ def aggregate(self, *aggs: AggregateFn) -> U: @ConsumptionAPI def sum( - self, on: Optional[Union[KeyFn, List[KeyFn]]] = None, ignore_nulls: bool = True - ) -> U: + self, on: Optional[Union[str, List[str]]] = None, ignore_nulls: bool = True + ) -> Union[Any, Dict[str, Any]]: """Compute sum over entire datastream. Examples: >>> import ray - >>> ray.data.range(100).sum() - 4950 - >>> ray.data.from_items([ - ... (i, i**2) - ... for i in range(100)]).sum(lambda x: x[1]) - 328350 - >>> ray.data.range_table(100).sum("value") + >>> ray.data.range(100).sum("id") 4950 >>> ray.data.from_items([ ... {"A": i, "B": i**2} @@ -1781,13 +1719,7 @@ def sum( {'sum(A)': 4950, 'sum(B)': 328350} Args: - on: The data subset on which to compute the sum. - - - For a simple datastream: it can be a callable or a list thereof, - and the default is to return a scalar sum of all rows. - - For an Arrow datastream: it can be a column name or a list - thereof, and the default is to return an ``ArrowRow`` - containing the column-wise sum of all columns. + on: a column name or a list of column names to aggregate. ignore_nulls: Whether to ignore null values. If ``True``, null values will be ignored when computing the sum; if ``False``, if a null value is encountered, the output will be None. @@ -1797,22 +1729,13 @@ def sum( Returns: The sum result. - For a simple datastream, the output is: + For different values of ``on``, the return varies: - - ``on=None``: a scalar representing the sum of all rows, - - ``on=callable``: a scalar representing the sum of the outputs of - the callable called on each row, - - ``on=[callable_1, ..., calalble_n]``: a tuple of - ``(sum_1, ..., sum_n)`` representing the sum of the outputs of - the corresponding callables called on each row. - - For an Arrow datastream, the output is: - - - ``on=None``: an ArrowRow containing the column-wise sum of all + - ``on=None``: a dict containing the column-wise sum of all columns, - ``on="col"``: a scalar representing the sum of all items in column ``"col"``, - - ``on=["col_1", ..., "col_n"]``: an n-column ``ArrowRow`` + - ``on=["col_1", ..., "col_n"]``: an n-column ``dict`` containing the column-wise sum of the provided columns. If the datastream is empty, all values are null, or any value is null @@ -1823,19 +1746,13 @@ def sum( @ConsumptionAPI def min( - self, on: Optional[Union[KeyFn, List[KeyFn]]] = None, ignore_nulls: bool = True - ) -> U: + self, on: Optional[Union[str, List[str]]] = None, ignore_nulls: bool = True + ) -> Union[Any, Dict[str, Any]]: """Compute minimum over entire datastream. Examples: >>> import ray - >>> ray.data.range(100).min() - 0 - >>> ray.data.from_items([ - ... (i, i**2) - ... for i in range(100)]).min(lambda x: x[1]) - 0 - >>> ray.data.range_table(100).min("value") + >>> ray.data.range(100).min("id") 0 >>> ray.data.from_items([ ... {"A": i, "B": i**2} @@ -1843,13 +1760,7 @@ def min( {'min(A)': 0, 'min(B)': 0} Args: - on: The data subset on which to compute the min. - - - For a simple datastream: it can be a callable or a list thereof, - and the default is to return a scalar min of all rows. - - For an Arrow datastream: it can be a column name or a list - thereof, and the default is to return an ``ArrowRow`` - containing the column-wise min of all columns. + on: a column name or a list of column names to aggregate. ignore_nulls: Whether to ignore null values. If ``True``, null values will be ignored when computing the min; if ``False``, if a null value is encountered, the output will be None. @@ -1859,22 +1770,13 @@ def min( Returns: The min result. - For a simple datastream, the output is: + For different values of ``on``, the return varies: - - ``on=None``: a scalar representing the min of all rows, - - ``on=callable``: a scalar representing the min of the outputs - of the callable called on each row, - - ``on=[callable_1, ..., calalble_n]``: a tuple of - ``(min_1, ..., min_n)`` representing the min of the outputs - of the corresponding callables called on each row. - - For an Arrow datastream, the output is: - - - ``on=None``: an ``ArrowRow`` containing the column-wise min of + - ``on=None``: an dict containing the column-wise min of all columns, - ``on="col"``: a scalar representing the min of all items in column ``"col"``, - - ``on=["col_1", ..., "col_n"]``: an n-column ``ArrowRow`` + - ``on=["col_1", ..., "col_n"]``: an n-column dict containing the column-wise min of the provided columns. If the datastream is empty, all values are null, or any value is null @@ -1885,19 +1787,13 @@ def min( @ConsumptionAPI def max( - self, on: Optional[Union[KeyFn, List[KeyFn]]] = None, ignore_nulls: bool = True - ) -> U: + self, on: Optional[Union[str, List[str]]] = None, ignore_nulls: bool = True + ) -> Union[Any, Dict[str, Any]]: """Compute maximum over entire datastream. Examples: >>> import ray - >>> ray.data.range(100).max() - 99 - >>> ray.data.from_items([ - ... (i, i**2) - ... for i in range(100)]).max(lambda x: x[1]) - 9801 - >>> ray.data.range_table(100).max("value") + >>> ray.data.range(100).max("id") 99 >>> ray.data.from_items([ ... {"A": i, "B": i**2} @@ -1905,13 +1801,7 @@ def max( {'max(A)': 99, 'max(B)': 9801} Args: - on: The data subset on which to compute the max. - - - For a simple datastream: it can be a callable or a list thereof, - and the default is to return a scalar max of all rows. - - For an Arrow datastream: it can be a column name or a list - thereof, and the default is to return an ``ArrowRow`` - containing the column-wise max of all columns. + on: a column name or a list of column names to aggregate. ignore_nulls: Whether to ignore null values. If ``True``, null values will be ignored when computing the max; if ``False``, if a null value is encountered, the output will be None. @@ -1921,22 +1811,13 @@ def max( Returns: The max result. - For a simple datastream, the output is: - - - ``on=None``: a scalar representing the max of all rows, - - ``on=callable``: a scalar representing the max of the outputs of - the callable called on each row, - - ``on=[callable_1, ..., calalble_n]``: a tuple of - ``(max_1, ..., max_n)`` representing the max of the outputs of - the corresponding callables called on each row. + For different values of ``on``, the return varies: - For an Arrow datastream, the output is: - - - ``on=None``: an ``ArrowRow`` containing the column-wise max of + - ``on=None``: an dict containing the column-wise max of all columns, - ``on="col"``: a scalar representing the max of all items in column ``"col"``, - - ``on=["col_1", ..., "col_n"]``: an n-column ``ArrowRow`` + - ``on=["col_1", ..., "col_n"]``: an n-column dict containing the column-wise max of the provided columns. If the datastream is empty, all values are null, or any value is null @@ -1947,19 +1828,13 @@ def max( @ConsumptionAPI def mean( - self, on: Optional[Union[KeyFn, List[KeyFn]]] = None, ignore_nulls: bool = True - ) -> U: + self, on: Optional[Union[str, List[str]]] = None, ignore_nulls: bool = True + ) -> Union[Any, Dict[str, Any]]: """Compute mean over entire datastream. Examples: >>> import ray - >>> ray.data.range(100).mean() - 49.5 - >>> ray.data.from_items([ - ... (i, i**2) - ... for i in range(100)]).mean(lambda x: x[1]) - 3283.5 - >>> ray.data.range_table(100).mean("value") + >>> ray.data.range(100).mean("id") 49.5 >>> ray.data.from_items([ ... {"A": i, "B": i**2} @@ -1967,13 +1842,7 @@ def mean( {'mean(A)': 49.5, 'mean(B)': 3283.5} Args: - on: The data subset on which to compute the mean. - - - For a simple datastream: it can be a callable or a list thereof, - and the default is to return a scalar mean of all rows. - - For an Arrow datastream: it can be a column name or a list - thereof, and the default is to return an ``ArrowRow`` - containing the column-wise mean of all columns. + on: a column name or a list of column names to aggregate. ignore_nulls: Whether to ignore null values. If ``True``, null values will be ignored when computing the mean; if ``False``, if a null value is encountered, the output will be None. @@ -1983,22 +1852,13 @@ def mean( Returns: The mean result. - For a simple datastream, the output is: - - - ``on=None``: a scalar representing the mean of all rows, - - ``on=callable``: a scalar representing the mean of the outputs - of the callable called on each row, - - ``on=[callable_1, ..., calalble_n]``: a tuple of - ``(mean_1, ..., mean_n)`` representing the mean of the outputs - of the corresponding callables called on each row. + For different values of ``on``, the return varies: - For an Arrow datastream, the output is: - - - ``on=None``: an ``ArrowRow`` containing the column-wise mean of + - ``on=None``: an dict containing the column-wise mean of all columns, - ``on="col"``: a scalar representing the mean of all items in column ``"col"``, - - ``on=["col_1", ..., "col_n"]``: an n-column ``ArrowRow`` + - ``on=["col_1", ..., "col_n"]``: an n-column dict containing the column-wise mean of the provided columns. If the datastream is empty, all values are null, or any value is null @@ -2010,21 +1870,15 @@ def mean( @ConsumptionAPI def std( self, - on: Optional[Union[KeyFn, List[KeyFn]]] = None, + on: Optional[Union[str, List[str]]] = None, ddof: int = 1, ignore_nulls: bool = True, - ) -> U: + ) -> Union[Any, Dict[str, Any]]: """Compute standard deviation over entire datastream. Examples: >>> import ray - >>> round(ray.data.range(100).std(), 5) - 29.01149 - >>> ray.data.from_items([ - ... (i, i**2) - ... for i in range(100)]).std(lambda x: x[1]) - 2968.1748039269296 - >>> round(ray.data.range_table(100).std("value", ddof=0), 5) + >>> round(ray.data.range(100).std("id", ddof=0), 5) 28.86607 >>> ray.data.from_items([ ... {"A": i, "B": i**2} @@ -2040,13 +1894,7 @@ def std( https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm Args: - on: The data subset on which to compute the std. - - - For a simple datastream: it can be a callable or a list thereof, - and the default is to return a scalar std of all rows. - - For an Arrow datastream: it can be a column name or a list - thereof, and the default is to return an ``ArrowRow`` - containing the column-wise std of all columns. + on: a column name or a list of column names to aggregate. ddof: Delta Degrees of Freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of elements. ignore_nulls: Whether to ignore null values. If ``True``, null @@ -2058,22 +1906,13 @@ def std( Returns: The standard deviation result. - For a simple datastream, the output is: + For different values of ``on``, the return varies: - - ``on=None``: a scalar representing the std of all rows, - - ``on=callable``: a scalar representing the std of the outputs of - the callable called on each row, - - ``on=[callable_1, ..., calalble_n]``: a tuple of - ``(std_1, ..., std_n)`` representing the std of the outputs of - the corresponding callables called on each row. - - For an Arrow datastream, the output is: - - - ``on=None``: an ``ArrowRow`` containing the column-wise std of + - ``on=None``: an dict containing the column-wise std of all columns, - ``on="col"``: a scalar representing the std of all items in column ``"col"``, - - ``on=["col_1", ..., "col_n"]``: an n-column ``ArrowRow`` + - ``on=["col_1", ..., "col_n"]``: an n-column dict containing the column-wise std of the provided columns. If the datastream is empty, all values are null, or any value is null @@ -2082,39 +1921,23 @@ def std( ret = self._aggregate_on(Std, on, ignore_nulls, ddof=ddof) return self._aggregate_result(ret) - def sort( - self, key: Optional[KeyFn] = None, descending: bool = False - ) -> "Datastream[T]": - # TODO ds.sort(lambda ...) fails with: - # Callable key ' at 0x1b07a4cb0>' requires - # datastream format to be 'simple', was 'arrow'. - # How do I create something "simple" here? + def sort(self, key: Optional[str] = None, descending: bool = False) -> "Datastream": """Sort the datastream by the specified key column or key function. Examples: >>> import ray - >>> # Sort using the entire record as the key. - >>> ds = ray.data.range(100) - >>> ds.sort() - Sort - +- Datastream(num_blocks=..., num_rows=100, schema=) >>> # Sort by a single column in descending order. >>> ds = ray.data.from_items( ... [{"value": i} for i in range(1000)]) >>> ds.sort("value", descending=True) Sort +- Datastream(num_blocks=200, num_rows=1000, schema={value: int64}) - >>> # Sort by a key function. - >>> ds.sort(lambda record: record["value"]) # doctest: +SKIP Time complexity: O(datastream size * log(datastream size / parallelism)) Args: - key: - - For Arrow tables, key must be a single column name. - - For datastreams of Python objects, key can be either a lambda - function that returns a comparison key to sort by, or None - to sort by the original value. + key: The column to sort by. To sort by multiple columns, use a map function + to generate the sort column beforehand. descending: Whether to sort in descending order. Returns: @@ -2133,11 +1956,10 @@ def sort( logical_plan = LogicalPlan(op) return Datastream(plan, self._epoch, self._lazy, logical_plan) - def zip(self, other: "Datastream[U]") -> "Datastream[(T, U)]": + def zip(self, other: "Datastream") -> "Datastream": """Materialize and zip this datastream with the elements of another. - The datastreams must have the same number of rows. For tabular datastreams, the - datastreams will be concatenated horizontally; namely, their column sets will be + The datastreams must have the same number of rows. Their column sets will be merged, and any duplicate column names disambiguated with _1, _2, etc. suffixes. .. note:: @@ -2151,9 +1973,9 @@ def zip(self, other: "Datastream[U]") -> "Datastream[(T, U)]": Examples: >>> import ray >>> ds1 = ray.data.range(5) - >>> ds2 = ray.data.range(5, parallelism=2).map(lambda x: x + 1) - >>> ds1.zip(ds2).take() - [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)] + >>> ds2 = ray.data.range(5) + >>> ds1.zip(ds2).take_batch() + {'id': array([0, 1, 2, 3, 4]), 'id_1': array([0, 1, 2, 3, 4])} Time complexity: O(datastream size / parallelism) @@ -2161,13 +1983,9 @@ def zip(self, other: "Datastream[U]") -> "Datastream[(T, U)]": other: The datastream to zip with on the right hand side. Returns: - If the inputs are simple datastreams, this returns a ``Datastream`` - containing (k, v) pairs, where k comes from the first datastream and v - comes from the second. - If the inputs are tabular datastreams, this returns a ``Datastream`` - containing the columns of the second datastream concatenated horizontally - with the columns of the first datastream, with duplicate column names - disambiguated with _1, _2, etc. suffixes. + A ``Datastream`` containing the columns of the second datastream + concatenated horizontally with the columns of the first datastream, + with duplicate column names disambiguated with _1, _2, etc. suffixes. """ plan = self._plan.with_stage(ZipStage(other)) @@ -2180,7 +1998,7 @@ def zip(self, other: "Datastream[U]") -> "Datastream[(T, U)]": return Datastream(plan, self._epoch, self._lazy, logical_plan) @ConsumptionAPI - def limit(self, limit: int) -> "Datastream[T]": + def limit(self, limit: int) -> "Datastream": """Materialize and truncate the datastream to the first ``limit`` records. Contrary to :meth`.take`, this will not move any data to the caller's @@ -2190,8 +2008,8 @@ def limit(self, limit: int) -> "Datastream[T]": Examples: >>> import ray >>> ds = ray.data.range(1000) - >>> ds.limit(100).map(lambda x: x * 2).take() - [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38] + >>> ds.limit(5).take_batch() + {'id': array([0, 1, 2, 3, 4])} Time complexity: O(limit specified) @@ -2226,12 +2044,10 @@ def take_batch( Args: batch_size: The max number of records to return. batch_format: Specify ``"default"`` to use the default block format - (promotes tables to Pandas and tensors to NumPy), ``"pandas"`` to select - ``pandas.DataFrame``, "pyarrow" to select ``pyarrow.Table``, or - ``"numpy"`` to select ``numpy.ndarray`` for tensor datastreams and - ``Dict[str, numpy.ndarray]`` for tabular datastreams, or None - to return the underlying block exactly as is with no additional - formatting. The default is "default". + (NumPy), ``"pandas"`` to select ``pandas.DataFrame``, "pyarrow" to + select ``pyarrow.Table``, or ``"numpy"`` to select + ``Dict[str, numpy.ndarray]``, or None to return the underlying block + exactly as is with no additional formatting. Returns: A batch of up to ``batch_size`` records from the datastream. @@ -2252,7 +2068,7 @@ def take_batch( return res @ConsumptionAPI(pattern="Time complexity:") - def take(self, limit: int = 20) -> List[T]: + def take(self, limit: int = 20) -> List[Dict[str, Any]]: """Return up to ``limit`` records from the datastream. This will move up to ``limit`` records to the caller's machine; if @@ -2281,7 +2097,7 @@ def take(self, limit: int = 20) -> List[T]: return output @ConsumptionAPI(pattern="Time complexity:") - def take_all(self, limit: Optional[int] = None) -> List[T]: + def take_all(self, limit: Optional[int] = None) -> List[Dict[str, Any]]: """Return all of the records in the datastream. This will move the entire datastream to the caller's machine; if the @@ -2354,14 +2170,9 @@ def count(self) -> int: extra_condition="or if ``fetch_if_missing=True`` (the default)", pattern="Time complexity:", ) - def schema( - self, fetch_if_missing: bool = True - ) -> Union[type, "pyarrow.lib.Schema"]: + def schema(self, fetch_if_missing: bool = True) -> Optional["Schema"]: """Return the schema of the datastream. - For datastream of Arrow records, this will return the Arrow schema. - For datastream of Python objects, this returns their Python type. - Time complexity: O(1) Args: @@ -2370,13 +2181,16 @@ def schema( Default is True. Returns: - The Python type or Arrow schema of the records, or None if the + The ``ray.data.Schema`` class of the records, or None if the schema is not known and fetch_if_missing is False. """ ctx = DataContext.get_current() base_schema = self._plan.schema(fetch_if_missing=fetch_if_missing) if ctx.strict_mode: - return Schema(base_schema) + if base_schema: + return Schema(base_schema) + else: + return None else: return base_schema @@ -2441,7 +2255,7 @@ def write_parquet( ) -> None: """Write the datastream to parquet. - This is only supported for datastream convertible to Arrow records. + This is only supported for datastreams convertible to Arrow records. To control the number of files, use ``.repartition()``. Unless a custom block path provider is given, the format of the output @@ -2756,7 +2570,7 @@ def write_numpy( self, path: str, *, - column: str = TENSOR_COLUMN_NAME, + column: Optional[str] = None, filesystem: Optional["pyarrow.fs.FileSystem"] = None, try_create_dir: bool = True, arrow_open_stream_args: Optional[Dict[str, Any]] = None, @@ -2784,8 +2598,7 @@ def write_numpy( path: The path to the destination root directory, where npy files will be written to. column: The name of the table column that contains the tensor to - be written. The default is ``"__value__"``, the column name that - Datastream uses for storing tensors in single-column tables. + be written. filesystem: The filesystem implementation to write to. try_create_dir: Try to create all directories in destination path if True. Does nothing if all directories already exist. @@ -2795,6 +2608,14 @@ def write_numpy( write each datastream block to a custom output path. ray_remote_args: Kwargs passed to ray.remote in the write tasks. """ + context = DataContext.get_current() + if context.strict_mode and not column: + raise StrictModeError( + "In strict mode, the column must be specified " + "(e.g., `write_numpy(column='data')`)." + ) + column = column or TENSOR_COLUMN_NAME + self.write_datasource( NumpyDatasource(), ray_remote_args=ray_remote_args, @@ -2869,7 +2690,7 @@ def write_mongo( @ConsumptionAPI def write_datasource( self, - datasource: Datasource[T], + datasource: Datasource, *, ray_remote_args: Dict[str, Any] = None, **write_args, @@ -3003,13 +2824,9 @@ def iterator(self) -> DataIterator: return DataIteratorImpl(self) @ConsumptionAPI - def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[Union[T, Mapping]]: + def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[Dict[str, Any]]: """Return a local row iterator over the datastream. - If the datastream is a tabular datastream (Arrow/Pandas blocks), dicts - are yielded for each row by the iterator. If the datastream is not tabular, - the raw row is yielded. - Examples: >>> import ray >>> for i in ray.data.range(1000000).iter_rows(): # doctest: +SKIP @@ -3062,12 +2879,10 @@ def iter_batches( The final batch may include fewer than ``batch_size`` rows if ``drop_last`` is ``False``. Defaults to 256. batch_format: Specify ``"default"`` to use the default block format - (promotes tables to Pandas and tensors to NumPy), ``"pandas"`` to select - ``pandas.DataFrame``, "pyarrow" to select ``pyarrow.Table``, or - ``"numpy"`` to select ``numpy.ndarray`` for tensor datastreams and - ``Dict[str, numpy.ndarray]`` for tabular datastreams, or None - to return the underlying block exactly as is with no additional - formatting. The default is "default". + (NumPy), ``"pandas"`` to select ``pandas.DataFrame``, "pyarrow" to + select ``pyarrow.Table``, or ``"numpy"`` to select + ``Dict[str, numpy.ndarray]``, or None to return the underlying block + exactly as is with no additional formatting. drop_last: Whether to drop the last batch if it's incomplete. local_shuffle_buffer_size: If non-None, the data will be randomly shuffled using a local in-memory shuffle buffer, and this value will serve as the @@ -3621,6 +3436,8 @@ def to_mars(self) -> "mars.DataFrame": refs = self.to_pandas_refs() # remove this when https://github.com/mars-project/mars/issues/2945 got fixed schema = self.schema() + if isinstance(schema, Schema): + schema = schema.base_schema # Backwards compat with non strict mode. if isinstance(schema, PandasBlockSchema): dtypes = pd.Series(schema.types, index=schema.names) elif isinstance(schema, pa.Schema): @@ -3669,8 +3486,11 @@ def to_spark(self, spark: "pyspark.sql.SparkSession") -> "pyspark.sql.DataFrame" """ import raydp + schema = self.schema() + if isinstance(schema, Schema): + schema = schema.base_schema # Backwards compat with non strict mode. return raydp.spark.ray_dataset_to_spark_dataframe( - spark, self.schema(), self.get_internal_block_refs() + spark, schema, self.get_internal_block_refs() ) @ConsumptionAPI(pattern="Time complexity:") @@ -3773,6 +3593,8 @@ def to_arrow_refs(self) -> List[ObjectRef["pyarrow.Table"]]: # Schema is safe to call since we have already triggered execution with # get_internal_block_refs. schema = self.schema(fetch_if_missing=True) + if isinstance(schema, Schema): + schema = schema.base_schema # Backwards compat with non strict mode. if isinstance(schema, pa.Schema): # Zero-copy path. return blocks @@ -3811,7 +3633,7 @@ def to_random_access_dataset( return RandomAccessDataset(self, key, num_workers=num_workers) @ConsumptionAPI - def repeat(self, times: Optional[int] = None) -> "DatasetPipeline[T]": + def repeat(self, times: Optional[int] = None) -> "DatasetPipeline": """Convert this into a DatasetPipeline by looping over this datastream. Transformations prior to the call to ``repeat()`` are evaluated once. @@ -3823,15 +3645,13 @@ def repeat(self, times: Optional[int] = None) -> "DatasetPipeline[T]": Examples: >>> import ray + >>> ds = ray.data.range(5, parallelism=1) >>> # Infinite pipeline of numbers [0, 5) - >>> ray.data.range(5, parallelism=1).repeat().take() - [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, ...] - >>> # Can apply transformations to the pipeline. - >>> ray.data.range(5, parallelism=1).repeat().map(lambda x: -x).take() - [0, -1, -2, -3, -4, 0, -1, -2, -3, -4, ...] + >>> ds.repeat().take_batch() + {'id': array([0, 1, 2, 3, 4, 0, 1, 2, 3, 4, ...])} >>> # Can shuffle each epoch (datastream) in the pipeline. - >>> ray.data.range(5).repeat().random_shuffle().take() # doctest: +SKIP - [2, 3, 0, 4, 1, 4, 0, 2, 1, 3, ...] + >>> ds.repeat().random_shuffle().take_batch() # doctest: +SKIP + {'id': array([2, 3, 0, 4, 1, 4, 0, 2, 1, 3, ...])} Args: times: The number of times to loop over this datastream, or None @@ -3861,7 +3681,7 @@ def __init__(self, blocks): self._blocks = blocks self._i = 0 - def __next__(self) -> Callable[[], "Datastream[T]"]: + def __next__(self) -> Callable[[], "Datastream"]: if times and self._i >= times: raise StopIteration epoch = self._i @@ -3905,7 +3725,7 @@ def window( *, blocks_per_window: Optional[int] = None, bytes_per_window: Optional[int] = None, - ) -> "DatasetPipeline[T]": + ) -> "DatasetPipeline": """Convert this into a DatasetPipeline by windowing over data blocks. Transformations prior to the call to ``window()`` are evaluated in @@ -3981,7 +3801,7 @@ def __init__(self, splits, epoch): self._splits = splits.copy() self._epoch = epoch - def __next__(self) -> "Datastream[T]": + def __next__(self) -> "Datastream": if not self._splits: raise StopIteration @@ -4096,7 +3916,7 @@ def __iter__(self): return pipe @Deprecated(message="Use `Datastream.materialize()` instead.") - def fully_executed(self) -> "MaterializedDatastream[T]": + def fully_executed(self) -> "MaterializedDatastream": logger.warning( "Deprecation warning: use Datastream.materialize() instead of " "fully_executed()." @@ -4116,7 +3936,7 @@ def is_fully_executed(self) -> bool: return self._plan.has_computed_output() @ConsumptionAPI(pattern="store memory.", insert_after=True) - def materialize(self) -> "MaterializedDatastream[T]": + def materialize(self) -> "MaterializedDatastream": """Execute and materialize this datastream into object store memory. This can be used to read all blocks into memory. By default, Datastream @@ -4165,7 +3985,7 @@ def get_internal_block_refs(self) -> List[ObjectRef[Block]]: message="Datastream is lazy by default, so this conversion call is no longer " "needed and this API will be removed in a future release" ) - def lazy(self) -> "Datastream[T]": + def lazy(self) -> "Datastream": """Enable lazy evaluation. Datastream is lazy by default, so this is only useful for datastreams created @@ -4275,7 +4095,7 @@ def deserialize_lineage(serialized_ds: bytes) -> "Datastream": """ return pickle.loads(serialized_ds) - def _divide(self, block_idx: int) -> ("Datastream[T]", "Datastream[T]"): + def _divide(self, block_idx: int) -> ("Datastream", "Datastream"): block_list = self._plan.execute() left, right = block_list.divide(block_idx) l_ds = Datastream( @@ -4294,71 +4114,8 @@ def _divide(self, block_idx: int) -> ("Datastream[T]", "Datastream[T]"): ) return l_ds, r_ds - @ConsumptionAPI(if_more_than_read=True, datasource_metadata="schema") + @Deprecated(message="The batch format is no longer exposed as a public API.") def default_batch_format(self) -> Type: - """Return this datastream's default batch format. - - The default batch format describes what batches of data look like. To learn more - about batch formats, read - :ref:`writing user-defined functions `. - - Examples: - - If your datastream represents a list of Python objects, then the default batch - format is ``list``. - - >>> import ray - >>> ds = ray.data.range(100) - >>> ds # doctest: +SKIP - Datastream(num_blocks=20, num_rows=100, schema=) - >>> ds.default_batch_format() - - >>> next(ds.iter_batches(batch_size=4)) - [0, 1, 2, 3] - - If your datastream contains a single ``numpy.ndarray`` - column named ``__value__`` (as created by :func:`ray.data.from_numpy`), then - the default batch format is ``np.ndarray``. For more information on tensor - formats, read the :ref:`tensor support guide `. - - >>> ds = ray.data.range_tensor(100) - >>> ds # doctest: +SKIP - Datastream(num_blocks=20, num_rows=100, schema={__value__: numpy.ndarray(shape=(1,), dtype=int64)}) - >>> ds.default_batch_format() - - >>> next(ds.iter_batches(batch_size=4)) - array([[0], - [1], - [2], - [3]]) - - If your datastream represents tabular data and doesn't only consist of a - ``__value__`` tensor column (such as is created by - :meth:`ray.data.from_numpy`), then the default batch format is - ``pd.DataFrame``. - - >>> import pandas as pd - >>> df = pd.DataFrame({"foo": ["a", "b"], "bar": [0, 1]}) - >>> ds = ray.data.from_pandas(df) - >>> ds # doctest: +SKIP - Datastream(num_blocks=1, num_rows=2, schema={foo: object, bar: int64}) - >>> ds.default_batch_format() - - >>> next(ds.iter_batches(batch_size=4)) - foo bar - 0 a 0 - 1 b 1 - - .. seealso:: - - :meth:`~Datastream.map_batches` - Call this function to transform batches of data. - - :meth:`~Datastream.iter_batches` - Call this function to iterate over batches of data. - - """ # noqa: E501 - context = DataContext.get_current() if context.strict_mode: raise StrictModeError( @@ -4379,20 +4136,8 @@ def default_batch_format(self) -> Type: return np.ndarray return pd.DataFrame - @ConsumptionAPI( - if_more_than_read=True, - datasource_metadata="schema", - pattern="for the first block.", - insert_after=True, - ) - @Deprecated(message="`dataset_format` is deprecated for streaming execution.") + @Deprecated(message="The dataset format is no longer exposed as a public API.") def dataset_format(self) -> BlockFormat: - """The format of the datastream's underlying data blocks. Possible values - are: "arrow", "pandas" and "simple". - - This may block; if the schema is unknown, this will synchronously fetch - the schema for the first block. - """ context = DataContext.get_current() if context.strict_mode: raise StrictModeError("dataset_format() is not allowed in strict mode") @@ -4427,7 +4172,7 @@ def dataset_format(self) -> BlockFormat: return BlockFormat.SIMPLE def _aggregate_on( - self, agg_cls: type, on: Optional[Union[KeyFn, List[KeyFn]]], *args, **kwargs + self, agg_cls: type, on: Optional[Union[str, List[str]]], *args, **kwargs ): """Helper for aggregating on a particular subset of the datastream. @@ -4442,7 +4187,7 @@ def _aggregate_on( def _build_multicolumn_aggs( self, agg_cls: type, - on: Optional[Union[KeyFn, List[KeyFn]]], + on: Optional[Union[str, List[str]]], ignore_nulls: bool, *args, skip_cols: Optional[List[str]] = None, @@ -4704,6 +4449,9 @@ def types(self) -> List[Union[Literal[object], "pyarrow.DataType"]]: arrow_types.append(None) return arrow_types + def __eq__(self, other): + return isinstance(other, Schema) and other.base_schema == self.base_schema + def __str__(self): return f"Schema({dict(zip(self.names, self.types))})" diff --git a/python/ray/data/examples/demo_infer.py b/python/ray/data/examples/demo_infer.py index 3815d23d2a62..22b3f8c9aaf7 100644 --- a/python/ray/data/examples/demo_infer.py +++ b/python/ray/data/examples/demo_infer.py @@ -22,7 +22,7 @@ def __call__(self, x): ds = ( ds.window(blocks_per_window=10) .map(preprocess) - .map(Model, compute="actors", num_gpus=1) + .map(Model, compute=ray.data.ActorPoolStrategy(), num_gpus=1) ) for x in ds.iter_rows(): diff --git a/python/ray/data/grouped_data.py b/python/ray/data/grouped_data.py index 23cb30533fc5..9bdb7d072c10 100644 --- a/python/ray/data/grouped_data.py +++ b/python/ray/data/grouped_data.py @@ -1,7 +1,7 @@ -from typing import Any, Callable, Generic, List, Tuple, Union, Optional +from typing import List, Tuple, Union, Optional from ray.data._internal import sort -from ray.data._internal.compute import CallableClass, ComputeStrategy +from ray.data._internal.compute import ComputeStrategy from ray.data._internal.delegating_block_builder import DelegatingBlockBuilder from ray.data._internal.execution.interfaces import TaskContext from ray.data._internal.logical.interfaces import LogicalPlan @@ -25,10 +25,8 @@ BlockAccessor, BlockExecStats, BlockMetadata, - KeyFn, KeyType, - T, - U, + UserDefinedFunction, ) from ray.data.context import DataContext from ray.data.datastream import DataBatch, Datastream @@ -42,7 +40,7 @@ def map( block: Block, output_num_blocks: int, boundaries: List[KeyType], - key: KeyFn, + key: str, aggs: Tuple[AggregateFn], ) -> List[Union[BlockMetadata, Block]]: """Partition the block and combine rows with the same key.""" @@ -66,7 +64,7 @@ def map( @staticmethod def reduce( - key: KeyFn, + key: str, aggs: Tuple[AggregateFn], *mapper_outputs: List[Block], partial_reduce: bool = False, @@ -79,7 +77,7 @@ def reduce( @staticmethod def _prune_unused_columns( block: Block, - key: KeyFn, + key: str, aggs: Tuple[AggregateFn], ) -> Block: """Prune unused columns from block before aggregate.""" @@ -118,13 +116,13 @@ class PushBasedGroupbyOp(_GroupbyOp, PushBasedShufflePlan): @PublicAPI -class GroupedData(Generic[T]): +class GroupedData: """Represents a grouped datastream created by calling ``Datastream.groupby()``. The actual groupby is deferred until an aggregation is applied. """ - def __init__(self, datastream: Datastream[T], key: KeyFn): + def __init__(self, datastream: Datastream, key: str): """Construct a datastream grouped by key (internal API). The constructor is not part of the GroupedData API. @@ -139,45 +137,15 @@ def __repr__(self) -> str: f"key={self._key!r})" ) - def aggregate(self, *aggs: AggregateFn) -> Datastream[U]: + def aggregate(self, *aggs: AggregateFn) -> Datastream: """Implements an accumulator-based aggregation. - Examples: - - .. testcode:: - - import ray - from ray.data.aggregate import AggregateFn - ds = ray.data.range(100) - grouped_ds = ds.groupby(lambda x: x % 3) - result = grouped_ds.aggregate(AggregateFn( - init=lambda k: [], - accumulate_row=lambda a, r: a + [r], - merge=lambda a1, a2: a1 + a2, - finalize=lambda a: sorted(a) - )) - result.show() - - .. testoutput:: - - (0, [0, 3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, \ -51, 54, 57, 60, 63, 66, 69, 72, 75, 78, 81, 84, 87, 90, 93, 96, 99]) - (1, [1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 31, 34, 37, 40, 43, 46, 49, \ -52, 55, 58, 61, 64, 67, 70, 73, 76, 79, 82, 85, 88, 91, 94, 97]) - (2, [2, 5, 8, 11, 14, 17, 20, 23, 26, 29, 32, 35, 38, 41, 44, 47, 50, \ -53, 56, 59, 62, 65, 68, 71, 74, 77, 80, 83, 86, 89, 92, 95, 98]) - - Args: aggs: Aggregations to do. Returns: - If the input datastream is simple datastream then the output is a simple - datastream of ``(k, v_1, ..., v_n)`` tuples where ``k`` is the groupby - key and ``v_i`` is the result of the ith given aggregation. - If the input datastream is an Arrow datastream then the output is an - Arrow datastream of ``n + 1`` columns where the first column is the - groupby key and the second through ``n + 1`` columns are the + The output is an datastream of ``n + 1`` columns where the first column + is the groupby key and the second through ``n + 1`` columns are the results of the aggregations. If groupby key is ``None`` then the key part of return is omitted. """ @@ -249,7 +217,7 @@ def do_agg(blocks, task_ctx: TaskContext, clear_input_blocks: bool, *_): def _aggregate_on( self, agg_cls: type, - on: Union[KeyFn, List[KeyFn]], + on: Union[str, List[str]], ignore_nulls: bool, *args, **kwargs, @@ -257,7 +225,7 @@ def _aggregate_on( """Helper for aggregating on a particular subset of the datastream. This validates the `on` argument, and converts a list of column names - or lambdas to a multi-aggregation. A null `on` results in a + to a multi-aggregation. A null `on` results in a multi-aggregation on all columns for an Arrow Datastream, and a single aggregation on the entire row for a simple Datastream. """ @@ -268,14 +236,12 @@ def _aggregate_on( def map_groups( self, - fn: Union[CallableClass, Callable[[DataBatch], DataBatch]], + fn: UserDefinedFunction[DataBatch, DataBatch], *, compute: Union[str, ComputeStrategy] = None, batch_format: Optional[str] = "default", **ray_remote_args, - ) -> "Datastream[Any]": - # TODO AttributeError: 'GroupedData' object has no attribute 'map_groups' - # in the example below. + ) -> "Datastream": """Apply the given function to each group of records of this datastream. While map_groups() is very flexible, note that it comes with downsides: @@ -290,11 +256,6 @@ def map_groups( >>> import ray >>> import pandas as pd >>> import numpy as np - >>> # Get median per group. Note that median is not an associative - >>> # function so cannot be computed with aggregate(). - >>> ds = ray.data.range(100) # doctest: +SKIP - >>> ds.groupby(lambda x: x % 3).map_groups( # doctest: +SKIP - ... lambda x: [np.median(x)]) >>> # Get first value per group. >>> ds = ray.data.from_items([ # doctest: +SKIP ... {"group": 1, "value": 1}, @@ -302,7 +263,7 @@ def map_groups( ... {"group": 2, "value": 3}, ... {"group": 2, "value": 4}]) >>> ds.groupby("group").map_groups( # doctest: +SKIP - ... lambda g: [g["value"][0]]) + ... lambda g: {"result": np.array([g["value"][0]])}) >>> # Return multiple records per group (dataframe in, dataframe out). >>> df = pd.DataFrame( @@ -326,12 +287,10 @@ def map_groups( pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an autoscaling actor pool. batch_format: Specify ``"default"`` to use the default block format - (promotes tables to Pandas and tensors to NumPy), ``"pandas"`` to select - ``pandas.DataFrame``, "pyarrow" to select ``pyarrow.Table``, or - ``"numpy"`` to select ``numpy.ndarray`` for tensor datastreams and - ``Dict[str, numpy.ndarray]`` for tabular datastreams, or None - to return the underlying block exactly as is with no additional - formatting. The default is "default". + (NumPy), ``"pandas"`` to select ``pandas.DataFrame``, "pyarrow" to + select ``pyarrow.Table``, or ``"numpy"`` to select + ``Dict[str, numpy.ndarray]``, or None to return the underlying block + exactly as is with no additional formatting. ray_remote_args: Additional resource requirements to request from ray (e.g., num_gpus=1 to request GPUs for the map tasks). @@ -395,38 +354,35 @@ def group_fn(batch): **ray_remote_args, ) - def count(self) -> Datastream[U]: + def count(self) -> Datastream: """Compute count aggregation. Examples: >>> import ray - >>> ray.data.range(100).groupby(lambda x: x % 3).count() # doctest: +SKIP >>> ray.data.from_items([ # doctest: +SKIP ... {"A": x % 3, "B": x} for x in range(100)]).groupby( # doctest: +SKIP ... "A").count() # doctest: +SKIP Returns: - A simple datastream of ``(k, v)`` pairs or an Arrow datastream of - ``[k, v]`` columns where ``k`` is the groupby key and ``v`` is the - number of rows with that key. + A datastream of ``[k, v]`` columns where ``k`` is the groupby key and + ``v`` is the number of rows with that key. If groupby key is ``None`` then the key part of return is omitted. """ return self.aggregate(Count()) def sum( - self, on: Union[KeyFn, List[KeyFn]] = None, ignore_nulls: bool = True - ) -> Datastream[U]: + self, on: Union[str, List[str]] = None, ignore_nulls: bool = True + ) -> Datastream: r"""Compute grouped sum aggregation. Examples: >>> import ray - >>> ray.data.range(100).groupby(lambda x: x % 3).sum() # doctest: +SKIP >>> ray.data.from_items([ # doctest: +SKIP ... (i % 3, i, i**2) # doctest: +SKIP ... for i in range(100)]) \ # doctest: +SKIP ... .groupby(lambda x: x[0] % 3) \ # doctest: +SKIP ... .sum(lambda x: x[2]) # doctest: +SKIP - >>> ray.data.range_table(100).groupby("value").sum() # doctest: +SKIP + >>> ray.data.range(100).groupby("id").sum() # doctest: +SKIP >>> ray.data.from_items([ # doctest: +SKIP ... {"A": i % 3, "B": i, "C": i**2} # doctest: +SKIP ... for i in range(100)]) \ # doctest: +SKIP @@ -434,13 +390,7 @@ def sum( ... .sum(["B", "C"]) # doctest: +SKIP Args: - on: The data subset on which to compute the sum. - - - For a simple datastream: it can be a callable or a list thereof, - and the default is to take a sum of all rows. - - For an Arrow datastream: it can be a column name or a list - thereof, and the default is to do a column-wise sum of all - columns. + on: a column name or a list of column names to aggregate. ignore_nulls: Whether to ignore null values. If ``True``, null values will be ignored when computing the sum; if ``False``, if a null value is encountered, the output will be null. @@ -450,21 +400,12 @@ def sum( Returns: The sum result. - For a simple datastream, the output is: - - - ``on=None``: a simple datastream of ``(k, sum)`` tuples where ``k`` - is the groupby key and ``sum`` is sum of all rows in that group. - - ``on=[callable_1, ..., callable_n]``: a simple datastream of - ``(k, sum_1, ..., sum_n)`` tuples where ``k`` is the groupby key - and ``sum_i`` is sum of the outputs of the ith callable called on - each row in that group. - - For an Arrow datastream, the output is: + For different values of ``on``, the return varies: - - ``on=None``: an Arrow datastream containing a groupby key column, + - ``on=None``: a datastream containing a groupby key column, ``"k"``, and a column-wise sum column for each original column in the datastream. - - ``on=["col_1", ..., "col_n"]``: an Arrow datastream of ``n + 1`` + - ``on=["col_1", ..., "col_n"]``: a datastream of ``n + 1`` columns where the first column is the groupby key and the second through ``n + 1`` columns are the results of the aggregations. @@ -473,19 +414,13 @@ def sum( return self._aggregate_on(Sum, on, ignore_nulls) def min( - self, on: Union[KeyFn, List[KeyFn]] = None, ignore_nulls: bool = True - ) -> Datastream[U]: + self, on: Union[str, List[str]] = None, ignore_nulls: bool = True + ) -> Datastream: """Compute grouped min aggregation. Examples: >>> import ray - >>> ray.data.range(100).groupby(lambda x: x % 3).min() # doctest: +SKIP - >>> ray.data.from_items([ # doctest: +SKIP - ... (i % 3, i, i**2) # doctest: +SKIP - ... for i in range(100)]) \ # doctest: +SKIP - ... .groupby(lambda x: x[0] % 3) \ # doctest: +SKIP - ... .min(lambda x: x[2]) # doctest: +SKIP - >>> ray.data.range_table(100).groupby("value").min() # doctest: +SKIP + >>> ray.data.le(100).groupby("value").min() # doctest: +SKIP >>> ray.data.from_items([ # doctest: +SKIP ... {"A": i % 3, "B": i, "C": i**2} # doctest: +SKIP ... for i in range(100)]) \ # doctest: +SKIP @@ -493,13 +428,7 @@ def min( ... .min(["B", "C"]) # doctest: +SKIP Args: - on: The data subset on which to compute the min. - - - For a simple datastream: it can be a callable or a list thereof, - and the default is to take a min of all rows. - - For an Arrow datastream: it can be a column name or a list - thereof, and the default is to do a column-wise min of all - columns. + on: a column name or a list of column names to aggregate. ignore_nulls: Whether to ignore null values. If ``True``, null values will be ignored when computing the min; if ``False``, if a null value is encountered, the output will be null. @@ -509,21 +438,12 @@ def min( Returns: The min result. - For a simple datastream, the output is: + For different values of ``on``, the return varies: - - ``on=None``: a simple datastream of ``(k, min)`` tuples where ``k`` - is the groupby key and min is min of all rows in that group. - - ``on=[callable_1, ..., callable_n]``: a simple datastream of - ``(k, min_1, ..., min_n)`` tuples where ``k`` is the groupby key - and ``min_i`` is min of the outputs of the ith callable called on - each row in that group. - - For an Arrow datastream, the output is: - - - ``on=None``: an Arrow datastream containing a groupby key column, + - ``on=None``: a datastream containing a groupby key column, ``"k"``, and a column-wise min column for each original column in the datastream. - - ``on=["col_1", ..., "col_n"]``: an Arrow datastream of ``n + 1`` + - ``on=["col_1", ..., "col_n"]``: a datastream of ``n + 1`` columns where the first column is the groupby key and the second through ``n + 1`` columns are the results of the aggregations. @@ -532,19 +452,13 @@ def min( return self._aggregate_on(Min, on, ignore_nulls) def max( - self, on: Union[KeyFn, List[KeyFn]] = None, ignore_nulls: bool = True - ) -> Datastream[U]: + self, on: Union[str, List[str]] = None, ignore_nulls: bool = True + ) -> Datastream: """Compute grouped max aggregation. Examples: >>> import ray - >>> ray.data.range(100).groupby(lambda x: x % 3).max() # doctest: +SKIP - >>> ray.data.from_items([ # doctest: +SKIP - ... (i % 3, i, i**2) # doctest: +SKIP - ... for i in range(100)]) \ # doctest: +SKIP - ... .groupby(lambda x: x[0] % 3) \ # doctest: +SKIP - ... .max(lambda x: x[2]) # doctest: +SKIP - >>> ray.data.range_table(100).groupby("value").max() # doctest: +SKIP + >>> ray.data.le(100).groupby("value").max() # doctest: +SKIP >>> ray.data.from_items([ # doctest: +SKIP ... {"A": i % 3, "B": i, "C": i**2} # doctest: +SKIP ... for i in range(100)]) \ # doctest: +SKIP @@ -552,13 +466,7 @@ def max( ... .max(["B", "C"]) # doctest: +SKIP Args: - on: The data subset on which to compute the max. - - - For a simple datastream: it can be a callable or a list thereof, - and the default is to take a max of all rows. - - For an Arrow datastream: it can be a column name or a list - thereof, and the default is to do a column-wise max of all - columns. + on: a column name or a list of column names to aggregate. ignore_nulls: Whether to ignore null values. If ``True``, null values will be ignored when computing the max; if ``False``, if a null value is encountered, the output will be null. @@ -568,21 +476,12 @@ def max( Returns: The max result. - For a simple datastream, the output is: - - - ``on=None``: a simple datastream of ``(k, max)`` tuples where ``k`` - is the groupby key and ``max`` is max of all rows in that group. - - ``on=[callable_1, ..., callable_n]``: a simple datastream of - ``(k, max_1, ..., max_n)`` tuples where ``k`` is the groupby key - and ``max_i`` is max of the outputs of the ith callable called on - each row in that group. - - For an Arrow datastream, the output is: + For different values of ``on``, the return varies: - - ``on=None``: an Arrow datastream containing a groupby key column, + - ``on=None``: a datastream containing a groupby key column, ``"k"``, and a column-wise max column for each original column in the datastream. - - ``on=["col_1", ..., "col_n"]``: an Arrow datastream of ``n + 1`` + - ``on=["col_1", ..., "col_n"]``: a datastream of ``n + 1`` columns where the first column is the groupby key and the second through ``n + 1`` columns are the results of the aggregations. @@ -591,19 +490,13 @@ def max( return self._aggregate_on(Max, on, ignore_nulls) def mean( - self, on: Union[KeyFn, List[KeyFn]] = None, ignore_nulls: bool = True - ) -> Datastream[U]: + self, on: Union[str, List[str]] = None, ignore_nulls: bool = True + ) -> Datastream: """Compute grouped mean aggregation. Examples: >>> import ray - >>> ray.data.range(100).groupby(lambda x: x % 3).mean() # doctest: +SKIP - >>> ray.data.from_items([ # doctest: +SKIP - ... (i % 3, i, i**2) # doctest: +SKIP - ... for i in range(100)]) \ # doctest: +SKIP - ... .groupby(lambda x: x[0] % 3) \ # doctest: +SKIP - ... .mean(lambda x: x[2]) # doctest: +SKIP - >>> ray.data.range_table(100).groupby("value").mean() # doctest: +SKIP + >>> ray.data.le(100).groupby("value").mean() # doctest: +SKIP >>> ray.data.from_items([ # doctest: +SKIP ... {"A": i % 3, "B": i, "C": i**2} # doctest: +SKIP ... for i in range(100)]) \ # doctest: +SKIP @@ -611,13 +504,7 @@ def mean( ... .mean(["B", "C"]) # doctest: +SKIP Args: - on: The data subset on which to compute the mean. - - - For a simple datastream: it can be a callable or a list thereof, - and the default is to take a mean of all rows. - - For an Arrow datastream: it can be a column name or a list - thereof, and the default is to do a column-wise mean of all - columns. + on: a column name or a list of column names to aggregate. ignore_nulls: Whether to ignore null values. If ``True``, null values will be ignored when computing the mean; if ``False``, if a null value is encountered, the output will be null. @@ -627,22 +514,12 @@ def mean( Returns: The mean result. - For a simple datastream, the output is: + For different values of ``on``, the return varies: - - ``on=None``: a simple datastream of ``(k, mean)`` tuples where ``k`` - is the groupby key and ``mean`` is mean of all rows in that - group. - - ``on=[callable_1, ..., callable_n]``: a simple datastream of - ``(k, mean_1, ..., mean_n)`` tuples where ``k`` is the groupby - key and ``mean_i`` is mean of the outputs of the ith callable - called on each row in that group. - - For an Arrow datastream, the output is: - - - ``on=None``: an Arrow datastream containing a groupby key column, + - ``on=None``: a datastream containing a groupby key column, ``"k"``, and a column-wise mean column for each original column in the datastream. - - ``on=["col_1", ..., "col_n"]``: an Arrow datastream of ``n + 1`` + - ``on=["col_1", ..., "col_n"]``: a datastream of ``n + 1`` columns where the first column is the groupby key and the second through ``n + 1`` columns are the results of the aggregations. @@ -652,21 +529,15 @@ def mean( def std( self, - on: Union[KeyFn, List[KeyFn]] = None, + on: Union[str, List[str]] = None, ddof: int = 1, ignore_nulls: bool = True, - ) -> Datastream[U]: + ) -> Datastream: """Compute grouped standard deviation aggregation. Examples: >>> import ray - >>> ray.data.range(100).groupby(lambda x: x % 3).std() # doctest: +SKIP - >>> ray.data.from_items([ # doctest: +SKIP - ... (i % 3, i, i**2) # doctest: +SKIP - ... for i in range(100)]) \ # doctest: +SKIP - ... .groupby(lambda x: x[0] % 3) \ # doctest: +SKIP - ... .std(lambda x: x[2]) # doctest: +SKIP - >>> ray.data.range_table(100).groupby("value").std(ddof=0) # doctest: +SKIP + >>> ray.data.range(100).groupby("id").std(ddof=0) # doctest: +SKIP >>> ray.data.from_items([ # doctest: +SKIP ... {"A": i % 3, "B": i, "C": i**2} # doctest: +SKIP ... for i in range(100)]) \ # doctest: +SKIP @@ -682,13 +553,7 @@ def std( https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm Args: - on: The data subset on which to compute the std. - - - For a simple datastream: it can be a callable or a list thereof, - and the default is to take a std of all rows. - - For an Arrow datastream: it can be a column name or a list - thereof, and the default is to do a column-wise std of all - columns. + on: a column name or a list of column names to aggregate. ddof: Delta Degrees of Freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of elements. ignore_nulls: Whether to ignore null values. If ``True``, null @@ -700,21 +565,12 @@ def std( Returns: The standard deviation result. - For a simple datastream, the output is: - - - ``on=None``: a simple datastream of ``(k, std)`` tuples where ``k`` - is the groupby key and ``std`` is std of all rows in that group. - - ``on=[callable_1, ..., callable_n]``: a simple datastream of - ``(k, std_1, ..., std_n)`` tuples where ``k`` is the groupby key - and ``std_i`` is std of the outputs of the ith callable called on - each row in that group. - - For an Arrow datastream, the output is: + For different values of ``on``, the return varies: - - ``on=None``: an Arrow datastream containing a groupby key column, + - ``on=None``: a datastream containing a groupby key column, ``"k"``, and a column-wise std column for each original column in the datastream. - - ``on=["col_1", ..., "col_n"]``: an Arrow datastream of ``n + 1`` + - ``on=["col_1", ..., "col_n"]``: a datastream of ``n + 1`` columns where the first column is the groupby key and the second through ``n + 1`` columns are the results of the aggregations. diff --git a/python/ray/data/iterator.py b/python/ray/data/iterator.py index 4957512da212..36fc86f8ee1c 100644 --- a/python/ray/data/iterator.py +++ b/python/ray/data/iterator.py @@ -11,11 +11,10 @@ Tuple, Union, Iterator, - Mapping, ) from ray.types import ObjectRef -from ray.data.block import BlockAccessor, Block, BlockMetadata, DataBatch, T +from ray.data.block import BlockAccessor, Block, BlockMetadata, DataBatch from ray.data.context import DataContext from ray.util.annotations import PublicAPI from ray.data._internal.block_batching import batch_block_refs @@ -24,11 +23,10 @@ from ray.data._internal.util import _is_tensor_schema if TYPE_CHECKING: - import pyarrow import tensorflow as tf import torch from ray.data._internal.torch_iterable_dataset import TorchTensorBatchType - from ray.data.datastream import TensorFlowTensorBatchType + from ray.data.datastream import TensorFlowTensorBatchType, Schema def _is_tensor_datastream(schema) -> bool: @@ -40,7 +38,7 @@ def _is_tensor_datastream(schema) -> bool: @PublicAPI(stability="beta") class DataIterator(abc.ABC): - """An iterator for reading items from a :class:`~Datastream` or + """An iterator for reading records from a :class:`~Datastream` or :class:`~DatasetPipeline`. For Datastreams, each iteration call represents a complete read of all items in the @@ -56,13 +54,9 @@ class DataIterator(abc.ABC): >>> import ray >>> ds = ray.data.range(5) >>> ds - Datastream(num_blocks=5, num_rows=5, schema=) + Datastream(num_blocks=5, num_rows=5, schema={id: int64}) >>> ds.iterator() - DataIterator(Datastream(num_blocks=5, num_rows=5, schema=)) - >>> ds = ds.repeat(); ds - DatasetPipeline(num_windows=inf, num_stages=2) - >>> ds.iterator() - DataIterator(DatasetPipeline(num_windows=inf, num_stages=2)) + DataIterator(Datastream(num_blocks=5, num_rows=5, schema={id: int64})) .. tip:: For debugging purposes, use @@ -125,14 +119,11 @@ def iter_batches( as batches (blocks may contain different number of rows). The final batch may include fewer than ``batch_size`` rows if ``drop_last`` is ``False``. Defaults to 256. - batch_format: The format in which to return each batch. - Specify "default" to use the default block format (promoting - tables to Pandas and tensors to NumPy), "pandas" to select - ``pandas.DataFrame``, "pyarrow" to select ``pyarrow.Table``, or "numpy" - to select ``numpy.ndarray`` for tensor datastreams and - ``Dict[str, numpy.ndarray]`` for tabular datastreams, or None to return - the underlying block exactly as is with no additional formatting. - The default is "default". + batch_format: Specify ``"default"`` to use the default block format + (NumPy), ``"pandas"`` to select ``pandas.DataFrame``, "pyarrow" to + select ``pyarrow.Table``, or ``"numpy"`` to select + ``Dict[str, numpy.ndarray]``, or None to return the underlying block + exactly as is with no additional formatting. drop_last: Whether to drop the last batch if it's incomplete. local_shuffle_buffer_size: If non-None, the data will be randomly shuffled using a local in-memory shuffle buffer, and this value will serve as the @@ -200,7 +191,7 @@ def drop_metadata(block_iterator): if stats: stats.iter_total_s.add(time.perf_counter() - time_start) - def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[Union[T, Mapping]]: + def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[Dict[str, Any]]: """Return a local row iterator over the datastream. If the datastream is a tabular datastream (Arrow/Pandas blocks), dicts @@ -233,7 +224,7 @@ def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[Union[T, Mapping]]: for batch in self.iter_batches(**iter_batch_args): batch = BlockAccessor.for_block(BlockAccessor.batch_to_block(batch)) - for row in batch.iter_rows(): + for row in batch.iter_rows(public_row_format=True): yield row @abc.abstractmethod @@ -242,7 +233,7 @@ def stats(self) -> str: raise NotImplementedError @abc.abstractmethod - def schema(self) -> Union[type, "pyarrow.lib.Schema"]: + def schema(self) -> "Schema": """Return the schema of the datastream iterated over.""" raise NotImplementedError diff --git a/python/ray/data/preprocessors/encoder.py b/python/ray/data/preprocessors/encoder.py index 18f2ce115c7f..fe1d646de015 100644 --- a/python/ray/data/preprocessors/encoder.py +++ b/python/ray/data/preprocessors/encoder.py @@ -547,19 +547,19 @@ def get_pd_value_counts(df: pd.DataFrame) -> List[Dict[str, Counter]]: result = {} for col in columns: if col in df_columns: - result[col] = get_pd_value_counts_per_column(df[col]) + result[col] = [get_pd_value_counts_per_column(df[col])] else: raise ValueError( f"Column '{col}' does not exist in DataFrame, which has columns: {df_columns}" # noqa: E501 ) - return [result] + return result value_counts = datastream.map_batches(get_pd_value_counts, batch_format="pandas") final_counters = {col: Counter() for col in columns} for batch in value_counts.iter_batches(batch_size=None): - for col_value_counts in batch: - for col, value_counts in col_value_counts.items(): - final_counters[col] += value_counts + for col, counters in batch.items(): + for counter in counters: + final_counters[col] += counter # Inspect if there is any NA values. for col in columns: diff --git a/python/ray/data/preprocessors/imputer.py b/python/ray/data/preprocessors/imputer.py index 23c7f232a9c8..dff233f6e6d2 100644 --- a/python/ray/data/preprocessors/imputer.py +++ b/python/ray/data/preprocessors/imputer.py @@ -147,14 +147,14 @@ def _get_most_frequent_values( columns = list(columns) def get_pd_value_counts(df: pd.DataFrame) -> List[Dict[str, Counter]]: - return [{col: Counter(df[col].value_counts().to_dict()) for col in columns}] + return {col: [Counter(df[col].value_counts().to_dict())] for col in columns} value_counts = datastream.map_batches(get_pd_value_counts, batch_format="pandas") final_counters = {col: Counter() for col in columns} for batch in value_counts.iter_batches(batch_size=None): - for col_value_counts in batch: - for col, value_counts in col_value_counts.items(): - final_counters[col] += value_counts + for col, counters in batch.items(): + for counter in counters: + final_counters[col] += counter return { f"most_frequent({column})": final_counters[column].most_common(1)[0][0] diff --git a/python/ray/data/preprocessors/torch.py b/python/ray/data/preprocessors/torch.py index cdede252be56..8ffdddb2435e 100644 --- a/python/ray/data/preprocessors/torch.py +++ b/python/ray/data/preprocessors/torch.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING, Callable, Dict, List, Union +from typing import TYPE_CHECKING, Callable, Dict, List, Union, Optional, Mapping import numpy as np @@ -63,6 +63,8 @@ class TorchVisionPreprocessor(Preprocessor): transform: The TorchVision transform you want to apply. This transform should accept a ``np.ndarray`` or ``torch.Tensor`` as input and return a ``torch.Tensor`` as output. + output_columns: The output name for each input column. If not specified, this + defaults to the same set of columns as the columns. batched: If ``True``, apply ``transform`` to batches of shape :math:`(B, H, W, C)`. Otherwise, apply ``transform`` to individual images. """ # noqa: E501 @@ -73,21 +75,32 @@ def __init__( self, columns: List[str], transform: Callable[[Union["np.ndarray", "torch.Tensor"]], "torch.Tensor"], + output_columns: Optional[List[str]] = None, batched: bool = False, ): + if not output_columns: + output_columns = columns + if len(columns) != len(output_columns): + raise ValueError( + "The length of columns should match the " + f"length of output_columns: {columns} vs {output_columns}." + ) self._columns = columns + self._output_columns = output_columns self._torchvision_transform = transform self._batched = batched def __repr__(self) -> str: return ( - f"{self.__class__.__name__}(columns={self._columns}, " + f"{self.__class__.__name__}(" + f"columns={self._columns}, " + f"output_columns={self._output_columns}, " f"transform={self._torchvision_transform!r})" ) def _transform_numpy( - self, np_data: Union["np.ndarray", Dict[str, "np.ndarray"]] - ) -> Union["np.ndarray", Dict[str, "np.ndarray"]]: + self, data_batch: Dict[str, "np.ndarray"] + ) -> Dict[str, "np.ndarray"]: import torch from ray.air._internal.torch_utils import convert_ndarray_to_torch_tensor @@ -98,15 +111,15 @@ def apply_torchvision_transform(array: np.ndarray) -> np.ndarray: except TypeError: # Transforms like `ToTensor` expect a `np.ndarray` as input. output = self._torchvision_transform(array) - - if not isinstance(output, torch.Tensor): + if isinstance(output, torch.Tensor): + output = output.numpy() + if not isinstance(output, np.ndarray): raise ValueError( "`TorchVisionPreprocessor` expected your transform to return a " - "`torch.Tensor`, but your transform returned a " + "`torch.Tensor` or `np.ndarray`, but your transform returned a " f"`{type(output).__name__}` instead." ) - - return output.numpy() + return output def transform_batch(batch: np.ndarray) -> np.ndarray: if self._batched: @@ -115,14 +128,15 @@ def transform_batch(batch: np.ndarray) -> np.ndarray: [apply_torchvision_transform(array) for array in batch] ) - if isinstance(np_data, dict): - outputs = np_data - for column in self._columns: - outputs[column] = transform_batch(np_data[column]) + if isinstance(data_batch, Mapping): + for input_col, output_col in zip(self._columns, self._output_columns): + data_batch[output_col] = transform_batch(data_batch[input_col]) else: - outputs = transform_batch(np_data) + # TODO(ekl) deprecate this code path. Unfortunately, predictors are still + # sending schemaless arrays to preprocessors. + data_batch = transform_batch(data_batch) - return outputs + return data_batch def preferred_batch_format(cls) -> BatchFormat: return BatchFormat.NUMPY diff --git a/python/ray/data/preprocessors/vectorizer.py b/python/ray/data/preprocessors/vectorizer.py index 6949ade9275b..8a435b878908 100644 --- a/python/ray/data/preprocessors/vectorizer.py +++ b/python/ray/data/preprocessors/vectorizer.py @@ -224,16 +224,16 @@ def get_token_counts(col): tokens = token_series.sum() return Counter(tokens) - return [{col: get_token_counts(col) for col in self.columns}] + return {col: [get_token_counts(col)] for col in self.columns} value_counts = datastream.map_batches( get_pd_value_counts, batch_format="pandas" ) total_counts = {col: Counter() for col in self.columns} for batch in value_counts.iter_batches(batch_size=None): - for x in batch: - for col, col_value_counts in x.items(): - total_counts[col].update(col_value_counts) + for col, counters in batch.items(): + for counter in counters: + total_counts[col].update(counter) def most_common(counter: Counter, n: int): return Counter(dict(counter.most_common(n))) diff --git a/python/ray/data/random_access_dataset.py b/python/ray/data/random_access_dataset.py index fd190529e9c2..9b3b52eba62c 100644 --- a/python/ray/data/random_access_dataset.py +++ b/python/ray/data/random_access_dataset.py @@ -4,11 +4,11 @@ import time from collections import defaultdict import numpy as np -from typing import List, Any, Generic, Optional, TYPE_CHECKING +from typing import List, Any, Optional, TYPE_CHECKING import ray from ray.types import ObjectRef -from ray.data.block import T, BlockAccessor +from ray.data.block import BlockAccessor from ray.data.context import DataContext, DEFAULT_SCHEDULING_STRATEGY from ray.data._internal.remote_fn import cached_remote_fn from ray.util.annotations import PublicAPI @@ -25,7 +25,7 @@ @PublicAPI(stability="alpha") -class RandomAccessDataset(Generic[T]): +class RandomAccessDataset: """A class that provides distributed, random access to a Datastream. See: ``Datastream.to_random_access_dataset()``. @@ -33,7 +33,7 @@ class RandomAccessDataset(Generic[T]): def __init__( self, - ds: "Datastream[T]", + ds: "Datastream", key: str, num_workers: int, ): @@ -130,7 +130,7 @@ def _compute_block_to_worker_assignments(self): return block_to_workers, worker_to_blocks - def get_async(self, key: Any) -> ObjectRef[Optional[T]]: + def get_async(self, key: Any) -> ObjectRef[Any]: """Asynchronously finds the record for a single key. Args: @@ -144,7 +144,7 @@ def get_async(self, key: Any) -> ObjectRef[Optional[T]]: return ray.put(None) return self._worker_for(block_index).get.remote(block_index, key) - def multiget(self, keys: List[Any]) -> List[Optional[T]]: + def multiget(self, keys: List[Any]) -> List[Optional[Any]]: """Synchronously find the records for a list of keys. Args: diff --git a/python/ray/data/read_api.py b/python/ray/data/read_api.py index b608b1bb523f..73a393acbc35 100644 --- a/python/ray/data/read_api.py +++ b/python/ray/data/read_api.py @@ -46,7 +46,6 @@ ndarray_to_block, get_table_block_metadata, ) -from ray.data.row import TableRow from ray.data.block import Block, BlockAccessor, BlockExecStats, BlockMetadata from ray.data.context import DEFAULT_SCHEDULING_STRATEGY, WARN_PREFIX, DataContext from ray.data.datastream import Datastream, MaterializedDatastream @@ -110,16 +109,16 @@ def from_items( *, parallelism: int = -1, output_arrow_format: bool = False, -) -> MaterializedDatastream[TableRow]: +) -> MaterializedDatastream: """Create a datastream from a list of local Python objects. Examples: >>> import ray >>> ds = ray.data.from_items([1, 2, 3, 4, 5]) # doctest: +SKIP >>> ds # doctest: +SKIP - MaterializedDatastream(num_blocks=5, num_rows=5, schema=) - >>> ds.take(2) # doctest: +SKIP - [1, 2] + MaterializedDatastream(num_blocks=5, num_rows=5, schema={item: int64}) + >>> ds.take_batch(2) # doctest: +SKIP + {"item": array([1, 2])} Args: items: List of local Python objects. @@ -208,7 +207,7 @@ def from_items( @PublicAPI -def range(n: int, *, parallelism: int = -1) -> Datastream[TableRow]: +def range(n: int, *, parallelism: int = -1) -> Datastream: """Create a datastream from a range of integers [0..n). Examples: @@ -241,29 +240,8 @@ def range(n: int, *, parallelism: int = -1) -> Datastream[TableRow]: ) -@PublicAPI -def range_table(n: int, *, parallelism: int = -1) -> Datastream[TableRow]: - """Create a tabular stream from a range of integers [0..n). - - Examples: - >>> import ray - >>> ds = ray.data.range_table(1000) # doctest: +SKIP - >>> ds # doctest: +SKIP - Datastream(num_blocks=200, num_rows=1000, schema={value: int64}) - >>> ds.map(lambda r: {"v2": r["value"] * 2}).take(2) # doctest: +SKIP - [ArrowRow({'v2': 0}), ArrowRow({'v2': 2})] - - This is similar to range(), but uses Arrow tables to hold the integers - in Arrow records. The datastream elements take the form {"value": N}. - - Args: - n: The upper bound of the range of integer records. - parallelism: The amount of parallelism to use for the datastream. - Parallelism may be limited by the number of items. - - Returns: - Datastream producing the integers as Arrow records. - """ +@Deprecated +def range_table(n: int, *, parallelism: int = -1) -> Datastream: ctx = ray.data.DataContext.get_current() if ctx.strict_mode: raise DeprecationWarning( @@ -284,9 +262,7 @@ def range_arrow(*args, **kwargs): @PublicAPI -def range_tensor( - n: int, *, shape: Tuple = (1,), parallelism: int = -1 -) -> Datastream[TableRow]: +def range_tensor(n: int, *, shape: Tuple = (1,), parallelism: int = -1) -> Datastream: """Create a Tensor stream from a range of integers [0..n). Examples: @@ -296,17 +272,15 @@ def range_tensor( Datastream( num_blocks=..., num_rows=1000, - schema={__value__: numpy.ndarray(shape=(2, 2), dtype=int64)} - ) + schema={data: numpy.ndarray(shape=(2, 2), dtype=int64)}) >>> ds.map_batches(lambda arr: arr * 2).take(2) # doctest: +SKIP [array([[0, 0], [0, 0]]), - array([[2, 2], + array([[2, 2], [2, 2]])] This is similar to range_table(), but uses the ArrowTensorArray extension - type. The datastream elements take the form - {"__value__": array(N, shape=shape)}. + type. The datastream elements take the form {"data": array(N, shape=shape)}. Args: n: The upper bound of the range of integer records. @@ -330,12 +304,12 @@ def range_tensor( @PublicAPI def read_datasource( - datasource: Datasource[T], + datasource: Datasource, *, parallelism: int = -1, ray_remote_args: Dict[str, Any] = None, **read_args, -) -> Datastream[T]: +) -> Datastream: """Read a stream from a custom data source. Args: @@ -485,7 +459,7 @@ def read_mongo( parallelism: int = -1, ray_remote_args: Dict[str, Any] = None, **mongo_args, -) -> Datastream[TableRow]: +) -> Datastream: """Create an Arrow datastream from MongoDB. The data to read from is specified via the ``uri``, ``database`` and ``collection`` @@ -565,7 +539,7 @@ def read_parquet( tensor_column_schema: Optional[Dict[str, Tuple[np.dtype, Tuple[int, ...]]]] = None, meta_provider: ParquetMetadataProvider = DefaultParquetMetadataProvider(), **arrow_parquet_args, -) -> Datastream[TableRow]: +) -> Datastream: """Create an Arrow datastream from parquet files. Examples: @@ -656,7 +630,7 @@ def read_images( mode: Optional[str] = None, include_paths: bool = False, ignore_missing_paths: bool = False, -) -> Datastream[TableRow]: +) -> Datastream: """Read images from the specified paths. Examples: @@ -764,7 +738,7 @@ def read_parquet_bulk( ParquetBaseDatasource.file_extension_filter() ), **arrow_parquet_args, -) -> Datastream[TableRow]: +) -> Datastream: """Create an Arrow datastream from a large number (such as >1K) of parquet files quickly. @@ -861,7 +835,7 @@ def read_json( partitioning: Partitioning = Partitioning("hive"), ignore_missing_paths: bool = False, **arrow_json_args, -) -> Datastream[TableRow]: +) -> Datastream: """Create an Arrow datastream from json files. Examples: @@ -938,7 +912,7 @@ def read_csv( partitioning: Partitioning = Partitioning("hive"), ignore_missing_paths: bool = False, **arrow_csv_args, -) -> Datastream[TableRow]: +) -> Datastream: r"""Create an Arrow datastream from csv files. Examples: @@ -978,7 +952,7 @@ def read_csv( >>> ds = ray.data.read_csv("example://year=2022/month=09/sales.csv") # doctest: + SKIP >>> ds.take(1) # doctest: + SKIP - [{'order_number': 10107, 'quantity': 30, 'year': '2022', 'month': '09'} + [{'order_number': 10107, 'quantity': 30, 'year': '2022', 'month': '09'}] By default, ``read_csv`` reads all files from file paths. If you want to filter files by file extensions, set the ``partition_filter`` parameter. @@ -1045,7 +1019,7 @@ def read_text( partition_filter: Optional[PathPartitionFilter] = None, partitioning: Partitioning = None, ignore_missing_paths: bool = False, -) -> Datastream[TableRow]: +) -> Datastream: """Create a datastream from lines stored in text files. Examples: @@ -1113,7 +1087,7 @@ def read_numpy( partitioning: Partitioning = None, ignore_missing_paths: bool = False, **numpy_load_args, -) -> Datastream[TableRow]: +) -> Datastream: """Create an Arrow datastream from numpy files. Examples: @@ -1176,7 +1150,7 @@ def read_tfrecords( partition_filter: Optional[PathPartitionFilter] = None, ignore_missing_paths: bool = False, tf_schema: Optional["schema_pb2.Schema"] = None, -) -> Datastream[TableRow]: +) -> Datastream: """Create a datastream from TFRecord files that contain `tf.train.Example `_ messages. @@ -1281,7 +1255,7 @@ def read_webdataset( filerename: Optional[Union[list, callable]] = None, suffixes: Optional[Union[list, callable]] = None, verbose_open: bool = False, -) -> Datastream[TableRow]: +) -> Datastream: """Create a datastream from WebDataset files. Args: @@ -1340,7 +1314,7 @@ def read_binary_files( partitioning: Partitioning = None, ignore_missing_paths: bool = False, output_arrow_format: bool = False, -) -> Datastream[TableRow]: +) -> Datastream: """Create a datastream from binary files of arbitrary contents. Examples: @@ -1412,7 +1386,7 @@ def read_sql( *, parallelism: int = -1, ray_remote_args: Optional[Dict[str, Any]] = None, -) -> Datastream[Any]: +) -> Datastream: """Read from a database that provides a `Python DB API2-compliant `_ connector. @@ -1488,7 +1462,7 @@ def create_connection(): @PublicAPI -def from_dask(df: "dask.DataFrame") -> MaterializedDatastream[TableRow]: +def from_dask(df: "dask.DataFrame") -> MaterializedDatastream: """Create a datastream from a Dask DataFrame. Args: @@ -1526,7 +1500,7 @@ def to_ref(df): @PublicAPI -def from_mars(df: "mars.DataFrame") -> MaterializedDatastream[TableRow]: +def from_mars(df: "mars.DataFrame") -> MaterializedDatastream: """Create a datastream from a MARS dataframe. Args: @@ -1546,7 +1520,7 @@ def from_mars(df: "mars.DataFrame") -> MaterializedDatastream[TableRow]: @PublicAPI -def from_modin(df: "modin.DataFrame") -> MaterializedDatastream[TableRow]: +def from_modin(df: "modin.DataFrame") -> MaterializedDatastream: """Create a datastream from a Modin dataframe. Args: @@ -1569,7 +1543,7 @@ def from_modin(df: "modin.DataFrame") -> MaterializedDatastream[TableRow]: @PublicAPI def from_pandas( dfs: Union["pandas.DataFrame", List["pandas.DataFrame"]] -) -> MaterializedDatastream[TableRow]: +) -> MaterializedDatastream: """Create a datastream from a list of Pandas dataframes. Args: @@ -1596,7 +1570,7 @@ def from_pandas( @DeveloperAPI def from_pandas_refs( dfs: Union[ObjectRef["pandas.DataFrame"], List[ObjectRef["pandas.DataFrame"]]], -) -> MaterializedDatastream[TableRow]: +) -> MaterializedDatastream: """Create a datastream from a list of Ray object references to Pandas dataframes. @@ -1655,9 +1629,7 @@ def from_pandas_refs( @PublicAPI -def from_numpy( - ndarrays: Union[np.ndarray, List[np.ndarray]] -) -> MaterializedDatastream[TableRow]: +def from_numpy(ndarrays: Union[np.ndarray, List[np.ndarray]]) -> MaterializedDatastream: """Create a datastream from a list of NumPy ndarrays. Args: @@ -1675,7 +1647,7 @@ def from_numpy( @DeveloperAPI def from_numpy_refs( ndarrays: Union[ObjectRef[np.ndarray], List[ObjectRef[np.ndarray]]], -) -> MaterializedDatastream[TableRow]: +) -> MaterializedDatastream: """Create a datastream from a list of NumPy ndarray futures. Args: @@ -1727,7 +1699,7 @@ def from_numpy_refs( @PublicAPI def from_arrow( tables: Union["pyarrow.Table", bytes, List[Union["pyarrow.Table", bytes]]], -) -> MaterializedDatastream[TableRow]: +) -> MaterializedDatastream: """Create a datastream from a list of Arrow tables. Args: @@ -1750,7 +1722,7 @@ def from_arrow_refs( ObjectRef[Union["pyarrow.Table", bytes]], List[ObjectRef[Union["pyarrow.Table", bytes]]], ], -) -> MaterializedDatastream[TableRow]: +) -> MaterializedDatastream: """Create a datastream from a set of Arrow tables. Args: @@ -1782,7 +1754,7 @@ def from_arrow_refs( @PublicAPI def from_spark( df: "pyspark.sql.DataFrame", *, parallelism: Optional[int] = None -) -> MaterializedDatastream[TableRow]: +) -> MaterializedDatastream: """Create a datastream from a Spark dataframe. Args: @@ -1803,9 +1775,7 @@ def from_spark( @PublicAPI def from_huggingface( dataset: Union["datasets.Dataset", "datasets.DatasetDict"], -) -> Union[ - MaterializedDatastream[TableRow], Dict[str, MaterializedDatastream[TableRow]] -]: +) -> Union[MaterializedDatastream]: """Create a datastream from a Hugging Face Datasets Dataset. This function is not parallelized, and is intended to be used @@ -1822,7 +1792,7 @@ def from_huggingface( """ import datasets - def convert(ds: "datasets.Dataset") -> Datastream[TableRow]: + def convert(ds: "datasets.Dataset") -> Datastream: ray_ds = from_arrow(ds.data.table) logical_plan = LogicalPlan(FromHuggingFace(ds)) ray_ds._logical_plan = logical_plan @@ -1917,9 +1887,9 @@ def from_torch( >>> dataset = datasets.MNIST("data", download=True) # doctest: +SKIP >>> ds = ray.data.from_torch(dataset) # doctest: +SKIP >>> ds # doctest: +SKIP - Datastream(num_blocks=200, num_rows=60000, schema=) + Datastream(num_blocks=200, num_rows=60000, schema={item: object}) >>> ds.take(1) # doctest: +SKIP - [(, 5)] + {"item": (, 5)} Args: dataset: A Torch dataset. diff --git a/python/ray/data/row.py b/python/ray/data/row.py index c25ca4855643..37252fd194eb 100644 --- a/python/ray/data/row.py +++ b/python/ray/data/row.py @@ -1,10 +1,10 @@ from collections.abc import Mapping from typing import Any -from ray.util.annotations import PublicAPI +from ray.util.annotations import Deprecated -@PublicAPI +@Deprecated("TableRow is no longer part of the public Ray Data API.") class TableRow(Mapping): """ A dict-like row of a tabular ``Datastream``. diff --git a/python/ray/data/tests/preprocessors/test_batch_mapper.py b/python/ray/data/tests/preprocessors/test_batch_mapper.py index 3279ff9e835d..915a236c736a 100644 --- a/python/ray/data/tests/preprocessors/test_batch_mapper.py +++ b/python/ray/data/tests/preprocessors/test_batch_mapper.py @@ -1,4 +1,4 @@ -from typing import Dict, Union +from typing import Dict import numpy as np import pandas as pd @@ -7,7 +7,6 @@ from pytest_lazyfixture import lazy_fixture import ray -from ray.air.constants import TENSOR_COLUMN_NAME from ray.data.preprocessors import BatchMapper from ray.tests.conftest import * # noqa @@ -78,26 +77,16 @@ def add_and_modify_udf(df: "pd.DataFrame"): def test_batch_mapper_pandas_data_format( ray_start_regular_shared, ds, expected_df, expected_numpy_df ): - """Tests batch mapper functionality for pandas data format. - - Note: - For single column pandas dataframes, we automatically convert it to - single column tensor with column name as `__value__`. - """ - def add_and_modify_udf_pandas(df: "pd.DataFrame"): df["column_1"] = df["column_1"] + 1 if "column_2" in df: df["column_2"] *= 2 return df - def add_and_modify_udf_numpy(data: Union[np.ndarray, Dict[str, np.ndarray]]): - if isinstance(data, np.ndarray): - data += 1 - else: - data["column_1"] = data["column_1"] + 1 - if "column_2" in data: - data["column_2"] *= 2 + def add_and_modify_udf_numpy(data: Dict[str, np.ndarray]): + data["column_1"] = data["column_1"] + 1 + if "column_2" in data: + data["column_2"] *= 2 return data # Test map_batches @@ -172,29 +161,6 @@ def check_batch_size(batch): } ), ), - ( - lazy_fixture("ds_arrow_single_column_tensor_format"), - pd.DataFrame( - { - TENSOR_COLUMN_NAME: [ - [[1, 2], [3, 4]], - [[5, 6], [7, 8]], - [[9, 10], [11, 12]], - [[13, 14], [15, 16]], - ] - } - ), - pd.DataFrame( - { - TENSOR_COLUMN_NAME: [ - [[1, 2], [3, 4]], - [[5, 6], [7, 8]], - [[9, 10], [11, 12]], - [[13, 14], [15, 16]], - ] - } - ), - ), ( lazy_fixture("ds_arrow_multi_column_format"), pd.DataFrame( @@ -231,13 +197,10 @@ def add_and_modify_udf_pandas(df: "pd.DataFrame"): df["column_2"] *= 2 return df - def add_and_modify_udf_numpy(data: Union[np.ndarray, Dict[str, np.ndarray]]): - if isinstance(data, np.ndarray): - data = data + 1 - else: - data["column_1"] = data["column_1"] + 1 - if "column_2" in data: - data["column_2"] = data["column_2"] * 2 + def add_and_modify_udf_numpy(data: Dict[str, np.ndarray]): + data["column_1"] = data["column_1"] + 1 + if "column_2" in data: + data["column_2"] = data["column_2"] * 2 return data # Test map_batches @@ -270,7 +233,7 @@ def add_and_modify_udf_numpy(data: Union[np.ndarray, Dict[str, np.ndarray]]): lazy_fixture("ds_numpy_single_column_tensor_format"), pd.DataFrame( { - TENSOR_COLUMN_NAME: [ + "data": [ [[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]], @@ -281,25 +244,18 @@ def add_and_modify_udf_numpy(data: Union[np.ndarray, Dict[str, np.ndarray]]): ), ( lazy_fixture("ds_numpy_list_of_ndarray_tensor_format"), - pd.DataFrame({TENSOR_COLUMN_NAME: [[[1, 2], [3, 4]]] * 4}), + pd.DataFrame({"data": [[[1, 2], [3, 4]]] * 4}), ), ], ) def test_batch_mapper_numpy_data_format(ds, expected_df): - """Tests batch mapper functionality for numpy data format. - - Note: - For single column pandas dataframes, we automatically convert it to - single column tensor with column name as `__value__`. - """ - def add_and_modify_udf_pandas(df: "pd.DataFrame"): col_name = list(df.columns)[0] df[col_name] = df[col_name] + 1 return df - def add_and_modify_udf_numpy(data: Union[np.ndarray, Dict[str, np.ndarray]]): - data = data + 1 + def add_and_modify_udf_numpy(data: Dict[str, np.ndarray]): + data["data"] = data["data"] + 1 return data # Test map_batches diff --git a/python/ray/data/tests/preprocessors/test_preprocessors.py b/python/ray/data/tests/preprocessors/test_preprocessors.py index ed8ebf763260..85485198dfae 100644 --- a/python/ray/data/tests/preprocessors/test_preprocessors.py +++ b/python/ray/data/tests/preprocessors/test_preprocessors.py @@ -129,7 +129,7 @@ def test_fit_twice(mocked_warn): scaler.fit(ds) assert scaler.stats_ == {"min(B)": 1, "max(B)": 5, "min(C)": 1, "max(C)": 1} - ds = ds.map_batches(lambda x: x * 2) + ds = ds.map_batches(lambda x: {k: v * 2 for k, v in x.items()}) # Fit again scaler.fit(ds) # Assert that the fitted state is corresponding to the second ds. @@ -182,7 +182,7 @@ def _determine_transform_to_use(self): def test_pipeline_fail(): - ds = ray.data.range_table(5).window(blocks_per_window=1).repeat(1) + ds = ray.data.range(5).window(blocks_per_window=1).repeat(1) class FittablePreprocessor(Preprocessor): _is_fittable = True diff --git a/python/ray/data/tests/preprocessors/test_torch.py b/python/ray/data/tests/preprocessors/test_torch.py index 78455bc6753f..2f2d99a69462 100644 --- a/python/ray/data/tests/preprocessors/test_torch.py +++ b/python/ray/data/tests/preprocessors/test_torch.py @@ -19,9 +19,9 @@ def __repr__(self): preprocessor = TorchVisionPreprocessor( columns=["spam"], transform=StubTransform() ) - assert ( - repr(preprocessor) - == "TorchVisionPreprocessor(columns=['spam'], transform=StubTransform())" + assert repr(preprocessor) == ( + "TorchVisionPreprocessor(columns=['spam'], " + "output_columns=['spam'], transform=StubTransform())" ) @pytest.mark.parametrize( @@ -112,9 +112,7 @@ def test_invalid_transform_raises_value_error(self): {"image": np.zeros((32, 32, 3)), "label": 1}, ] ) - # `TorchVisionPreprocessor` expects transforms to return `torch.Tensor`s, but - # this `transform` returns a `np.ndarray`. - transform = transforms.Lambda(lambda tensor: tensor.numpy()) + transform = transforms.Lambda(lambda tensor: "BLAH BLAH INVALID") preprocessor = TorchVisionPreprocessor(columns=["image"], transform=transform) with pytest.raises(ValueError): diff --git a/python/ray/data/tests/test_all_to_all.py b/python/ray/data/tests/test_all_to_all.py index c848adb8eece..e0df35390624 100644 --- a/python/ray/data/tests/test_all_to_all.py +++ b/python/ray/data/tests/test_all_to_all.py @@ -12,15 +12,18 @@ from ray.data.aggregate import AggregateFn, Count, Max, Mean, Min, Std, Sum, Quantile from ray.data.context import DataContext from ray.data.tests.conftest import * # noqa +from ray.data.tests.util import column_udf, named_values, STRICT_MODE from ray.tests.conftest import * # noqa def test_zip(ray_start_regular_shared): ds1 = ray.data.range(5, parallelism=5) - ds2 = ray.data.range(5, parallelism=5).map(lambda x: x + 1) + ds2 = ray.data.range(5, parallelism=5).map(column_udf("id", lambda x: x + 1)) ds = ds1.zip(ds2) - assert ds.schema() == tuple - assert ds.take() == [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)] + assert ds.schema().names == ["id", "id_1"] + assert ds.take() == named_values( + ["id", "id_1"], [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)] + ) with pytest.raises(ValueError): ds.zip(ray.data.range(3)).materialize() @@ -34,10 +37,14 @@ def test_zip_different_num_blocks_combinations( ): n = 12 ds1 = ray.data.range(n, parallelism=num_blocks1) - ds2 = ray.data.range(n, parallelism=num_blocks2).map(lambda x: x + 1) + ds2 = ray.data.range(n, parallelism=num_blocks2).map( + column_udf("id", lambda x: x + 1) + ) ds = ds1.zip(ds2) - assert ds.schema() == tuple - assert ds.take() == list(zip(range(n), range(1, n + 1))) + assert ds.schema().names == ["id", "id_1"] + assert ds.take() == named_values( + ["id", "id_1"], list(zip(range(n), range(1, n + 1))) + ) @pytest.mark.parametrize( @@ -81,33 +88,31 @@ def test_zip_pandas(ray_start_regular_shared): ds = ds1.zip(ds2) assert ds.count() == 2 assert "{col1: int64, col2: int64, col3: object, col4: object}" in str(ds) - result = [r.as_pydict() for r in ds.take()] + result = list(ds.take()) assert result[0] == {"col1": 1, "col2": 4, "col3": "a", "col4": "d"} ds3 = ray.data.from_pandas(pd.DataFrame({"col2": ["a", "b"], "col4": ["d", "e"]})) ds = ds1.zip(ds3) assert ds.count() == 2 assert "{col1: int64, col2: int64, col2_1: object, col4: object}" in str(ds) - result = [r.as_pydict() for r in ds.take()] + result = list(ds.take()) assert result[0] == {"col1": 1, "col2": 4, "col2_1": "a", "col4": "d"} def test_zip_arrow(ray_start_regular_shared): - ds1 = ray.data.range_table(5).map(lambda r: {"id": r["value"]}) - ds2 = ray.data.range_table(5).map( - lambda r: {"a": r["value"] + 1, "b": r["value"] + 2} - ) + ds1 = ray.data.range(5).map(lambda r: {"id": r["id"]}) + ds2 = ray.data.range(5).map(lambda r: {"a": r["id"] + 1, "b": r["id"] + 2}) ds = ds1.zip(ds2) assert ds.count() == 5 assert "{id: int64, a: int64, b: int64}" in str(ds) - result = [r.as_pydict() for r in ds.take()] + result = list(ds.take()) assert result[0] == {"id": 0, "a": 1, "b": 2} # Test duplicate column names. ds = ds1.zip(ds1).zip(ds1) assert ds.count() == 5 assert "{id: int64, id_1: int64, id_2: int64}" in str(ds) - result = [r.as_pydict() for r in ds.take()] + result = list(ds.take()) assert result[0] == {"id": 0, "id_1": 0, "id_2": 0} @@ -115,7 +120,7 @@ def test_zip_preserve_order(ray_start_regular_shared): def foo(x): import time - if x[0] < 5: + if x["item"] < 5: time.sleep(1) return x @@ -125,7 +130,9 @@ def foo(x): ds2 = ray.data.from_items(items, parallelism=num_items) ds2 = ds2.map_batches(foo, batch_size=1) result = ds1.zip(ds2).take_all() - assert result == list(zip(range(num_items), range(num_items))), result + assert result == named_values( + ["item", "item_1"], list(zip(range(num_items), range(num_items))) + ), result def test_empty_shuffle(ray_start_regular_shared): @@ -177,8 +184,6 @@ def test_repartition_noshuffle(ray_start_regular_shared): ds4 = ds.repartition(40, shuffle=False) assert ds4.num_blocks() == 40 - blocks = ray.get(ds4.get_internal_block_refs()) - assert all(isinstance(block, list) for block in blocks), blocks assert ds4.sum() == 190 assert ds4._block_num_rows() == [1] * 20 + [0] * 20 @@ -192,7 +197,7 @@ def test_repartition_noshuffle(ray_start_regular_shared): def test_repartition_shuffle_arrow(ray_start_regular_shared): - ds = ray.data.range_table(20, parallelism=10) + ds = ray.data.range(20, parallelism=10) assert ds.num_blocks() == 10 assert ds.count() == 20 assert ds._block_num_rows() == [2] * 10 @@ -207,7 +212,7 @@ def test_repartition_shuffle_arrow(ray_start_regular_shared): assert ds3.count() == 20 assert ds3._block_num_rows() == [2] * 10 + [0] * 10 - large = ray.data.range_table(10000, parallelism=10) + large = ray.data.range(10000, parallelism=10) large = large.repartition(20, shuffle=True) assert large._block_num_rows() == [500] * 20 @@ -219,40 +224,24 @@ def test_grouped_datastream_repr(ray_start_regular_shared): def test_groupby_arrow(ray_start_regular_shared, use_push_based_shuffle): # Test empty datastream. - agg_ds = ( - ray.data.range_table(10) - .filter(lambda r: r["value"] > 10) - .groupby("value") - .count() - ) + agg_ds = ray.data.range(10).filter(lambda r: r["id"] > 10).groupby("value").count() assert agg_ds.count() == 0 def test_groupby_errors(ray_start_regular_shared): ds = ray.data.range(100) - - ds.groupby(None).count().show() # OK - ds.groupby(lambda x: x % 2).count().show() # OK - with pytest.raises(ValueError): - ds.groupby("foo").count().show() - - ds = ray.data.range_table(100) ds.groupby(None).count().show() # OK with pytest.raises(ValueError): ds.groupby(lambda x: x % 2).count().show() + with pytest.raises(ValueError): + ds.groupby("foo").count().show() def test_agg_errors(ray_start_regular_shared): - ds = ray.data.range(100) from ray.data.aggregate import Max - ds.aggregate(Max()) # OK - ds.aggregate(Max(lambda x: x)) # OK - with pytest.raises(ValueError): - ds.aggregate(Max("foo")) - - ds = ray.data.range_table(100) - ds.aggregate(Max("value")) # OK + ds = ray.data.range(100) + ds.aggregate(Max("id")) # OK with pytest.raises(ValueError): ds.aggregate(Max()) with pytest.raises(ValueError): @@ -287,7 +276,7 @@ def test_groupby_agg_name_conflict(ray_start_regular_shared, num_parts): ), ) assert agg_ds.count() == 3 - assert [row.as_pydict() for row in agg_ds.sort("A").iter_rows()] == [ + assert list(agg_ds.sort("A").iter_rows()) == [ {"A": 0, "foo": 49.5, "foo_2": 49.5}, {"A": 1, "foo": 49.0, "foo_2": 49.0}, {"A": 2, "foo": 50.0, "foo_2": 50.0}, @@ -316,7 +305,7 @@ def _to_pandas(ds): ds = _to_pandas(ds) agg_ds = ds.groupby("A").count() assert agg_ds.count() == 3 - assert [row.as_pydict() for row in agg_ds.sort("A").iter_rows()] == [ + assert list(agg_ds.sort("A").iter_rows()) == [ {"A": 0, "count()": 34}, {"A": 1, "count()": 33}, {"A": 2, "count()": 33}, @@ -346,7 +335,7 @@ def _to_pandas(ds): agg_ds = ds.groupby("A").sum("B") assert agg_ds.count() == 3 - assert [row.as_pydict() for row in agg_ds.sort("A").iter_rows()] == [ + assert list(agg_ds.sort("A").iter_rows()) == [ {"A": 0, "sum(B)": 1683}, {"A": 1, "sum(B)": 1617}, {"A": 2, "sum(B)": 1650}, @@ -361,7 +350,7 @@ def _to_pandas(ds): nan_grouped_ds = ds.groupby("A") nan_agg_ds = nan_grouped_ds.sum("B") assert nan_agg_ds.count() == 3 - assert [row.as_pydict() for row in nan_agg_ds.sort("A").iter_rows()] == [ + assert list(nan_agg_ds.sort("A").iter_rows()) == [ {"A": 0, "sum(B)": 1683}, {"A": 1, "sum(B)": 1617}, {"A": 2, "sum(B)": 1650}, @@ -417,10 +406,10 @@ def _to_pandas(ds): assert ds.sum("A") == 4950 # Test empty datastream - ds = ray.data.range_table(10) + ds = ray.data.range(10) if ds_format == "pandas": ds = _to_pandas(ds) - assert ds.filter(lambda r: r["value"] > 10).sum("value") is None + assert ds.filter(lambda r: r["id"] > 10).sum("id") is None # Test built-in global sum aggregation with nans nan_ds = ray.data.from_items([{"A": x} for x in xs] + [{"A": None}]).repartition( @@ -460,7 +449,7 @@ def _to_pandas(ds): agg_ds = ds.groupby("A").min("B") assert agg_ds.count() == 3 - assert [row.as_pydict() for row in agg_ds.sort("A").iter_rows()] == [ + assert list(agg_ds.sort("A").iter_rows()) == [ {"A": 0, "min(B)": 0}, {"A": 1, "min(B)": 1}, {"A": 2, "min(B)": 2}, @@ -475,7 +464,7 @@ def _to_pandas(ds): nan_grouped_ds = ds.groupby("A") nan_agg_ds = nan_grouped_ds.min("B") assert nan_agg_ds.count() == 3 - assert [row.as_pydict() for row in nan_agg_ds.sort("A").iter_rows()] == [ + assert list(nan_agg_ds.sort("A").iter_rows()) == [ {"A": 0, "min(B)": 0}, {"A": 1, "min(B)": 1}, {"A": 2, "min(B)": 2}, @@ -534,7 +523,7 @@ def _to_pandas(ds): agg_ds = ds.groupby("A").max("B") assert agg_ds.count() == 3 - assert [row.as_pydict() for row in agg_ds.sort("A").iter_rows()] == [ + assert list(agg_ds.sort("A").iter_rows()) == [ {"A": 0, "max(B)": 99}, {"A": 1, "max(B)": 97}, {"A": 2, "max(B)": 98}, @@ -549,7 +538,7 @@ def _to_pandas(ds): nan_grouped_ds = ds.groupby("A") nan_agg_ds = nan_grouped_ds.max("B") assert nan_agg_ds.count() == 3 - assert [row.as_pydict() for row in nan_agg_ds.sort("A").iter_rows()] == [ + assert list(nan_agg_ds.sort("A").iter_rows()) == [ {"A": 0, "max(B)": 99}, {"A": 1, "max(B)": 97}, {"A": 2, "max(B)": 98}, @@ -608,7 +597,7 @@ def _to_pandas(ds): agg_ds = ds.groupby("A").mean("B") assert agg_ds.count() == 3 - assert [row.as_pydict() for row in agg_ds.sort("A").iter_rows()] == [ + assert list(agg_ds.sort("A").iter_rows()) == [ {"A": 0, "mean(B)": 49.5}, {"A": 1, "mean(B)": 49.0}, {"A": 2, "mean(B)": 50.0}, @@ -623,7 +612,7 @@ def _to_pandas(ds): nan_grouped_ds = ds.groupby("A") nan_agg_ds = nan_grouped_ds.mean("B") assert nan_agg_ds.count() == 3 - assert [row.as_pydict() for row in nan_agg_ds.sort("A").iter_rows()] == [ + assert list(nan_agg_ds.sort("A").iter_rows()) == [ {"A": 0, "mean(B)": 49.5}, {"A": 1, "mean(B)": 49.0}, {"A": 2, "mean(B)": 50.0}, @@ -736,7 +725,7 @@ def test_groupby_arrow_multicolumn(ray_start_regular_shared, num_parts): ray.data.from_pandas(df).repartition(num_parts).groupby("A").mean(["B", "C"]) ) assert agg_ds.count() == 3 - assert [row.as_pydict() for row in agg_ds.sort("A").iter_rows()] == [ + assert list(agg_ds.sort("A").iter_rows()) == [ {"A": 0, "mean(B)": 49.5, "mean(C)": 99.0}, {"A": 1, "mean(B)": 49.0, "mean(C)": 98.0}, {"A": 2, "mean(B)": 50.0, "mean(C)": 100.0}, @@ -746,7 +735,7 @@ def test_groupby_arrow_multicolumn(ray_start_regular_shared, num_parts): # groupby keys. agg_ds = ray.data.from_pandas(df).repartition(num_parts).groupby("A").mean() assert agg_ds.count() == 3 - assert [row.as_pydict() for row in agg_ds.sort("A").iter_rows()] == [ + assert list(agg_ds.sort("A").iter_rows()) == [ {"A": 0, "mean(B)": 49.5, "mean(C)": 99.0}, {"A": 1, "mean(B)": 49.0, "mean(C)": 98.0}, {"A": 2, "mean(B)": 50.0, "mean(C)": 100.0}, @@ -764,9 +753,9 @@ def test_groupby_agg_bad_on(ray_start_regular_shared): xs = list(range(100)) df = pd.DataFrame({"A": [x % 3 for x in xs], "B": xs, "C": [2 * x for x in xs]}) # Wrong type. - with pytest.raises(TypeError): + with pytest.raises(Exception): ray.data.from_pandas(df).groupby("A").mean(5).materialize() - with pytest.raises(TypeError): + with pytest.raises(Exception): ray.data.from_pandas(df).groupby("A").mean([5]).materialize() # Empty list. with pytest.raises(ValueError): @@ -782,9 +771,9 @@ def test_groupby_agg_bad_on(ray_start_regular_shared): # Test bad on for global aggregation # Wrong type. - with pytest.raises(TypeError): + with pytest.raises(Exception): ray.data.from_pandas(df).mean(5).materialize() - with pytest.raises(TypeError): + with pytest.raises(Exception): ray.data.from_pandas(df).mean([5]).materialize() # Empty list. with pytest.raises(ValueError): @@ -915,6 +904,7 @@ def test_groupby_arrow_multi_agg_alias(ray_start_regular_shared, num_parts): assert result == expected +@pytest.mark.skipif(STRICT_MODE, reason="Deprecated in strict mode") def test_groupby_simple(ray_start_regular_shared): seed = int(time.time()) print(f"Seeding RNG for test_groupby_simple with: {seed}") @@ -982,6 +972,7 @@ def test_groupby_simple(ray_start_regular_shared): assert agg_ds.count() == 0 +@pytest.mark.skipif(STRICT_MODE, reason="Deprecated in strict mode") @pytest.mark.parametrize("num_parts", [1, 30]) def test_groupby_simple_count(ray_start_regular_shared, num_parts): # Test built-in count aggregation @@ -997,6 +988,7 @@ def test_groupby_simple_count(ray_start_regular_shared, num_parts): assert agg_ds.sort(key=lambda r: r[0]).take(3) == [(0, 34), (1, 33), (2, 33)] +@pytest.mark.skipif(STRICT_MODE, reason="Deprecated in strict mode") @pytest.mark.parametrize("num_parts", [1, 30]) def test_groupby_simple_sum(ray_start_regular_shared, num_parts): # Test built-in sum aggregation @@ -1056,6 +1048,7 @@ def test_groupby_simple_sum(ray_start_regular_shared, num_parts): assert nan_ds.sum() is None +@pytest.mark.skipif(STRICT_MODE, reason="Deprecated in strict mode") def test_groupby_map_groups_for_empty_datastream(ray_start_regular_shared): ds = ray.data.from_items([]) mapped = ds.groupby(lambda x: x % 3).map_groups(lambda x: [min(x) * min(x)]) @@ -1063,6 +1056,7 @@ def test_groupby_map_groups_for_empty_datastream(ray_start_regular_shared): assert mapped.take_all() == [] +@pytest.mark.skipif(STRICT_MODE, reason="Deprecated in strict mode") def test_groupby_map_groups_merging_empty_result(ray_start_regular_shared): ds = ray.data.from_items([1, 2, 3]) # This needs to merge empty and non-empty results from different groups. @@ -1071,6 +1065,7 @@ def test_groupby_map_groups_merging_empty_result(ray_start_regular_shared): assert mapped.take_all() == [2, 3] +@pytest.mark.skipif(STRICT_MODE, reason="Deprecated in strict mode") def test_groupby_map_groups_merging_invalid_result(ray_start_regular_shared): ds = ray.data.from_items([1, 2, 3]) grouped = ds.groupby(lambda x: x) @@ -1084,12 +1079,15 @@ def test_groupby_map_groups_merging_invalid_result(ray_start_regular_shared): def test_groupby_map_groups_for_none_groupkey(ray_start_regular_shared, num_parts): ds = ray.data.from_items(list(range(100))) mapped = ( - ds.repartition(num_parts).groupby(None).map_groups(lambda x: [min(x) + max(x)]) + ds.repartition(num_parts) + .groupby(None) + .map_groups(lambda x: {"out": np.array([min(x["item"]) + max(x["item"])])}) ) assert mapped.count() == 1 - assert mapped.take_all() == [99] + assert mapped.take_all() == named_values("out", [99]) +@pytest.mark.skipif(STRICT_MODE, reason="Deprecated in strict mode") @pytest.mark.parametrize("num_parts", [1, 2, 30]) def test_groupby_map_groups_returning_empty_result(ray_start_regular_shared, num_parts): xs = list(range(100)) @@ -1114,6 +1112,7 @@ def test_groupby_map_groups_perf(ray_start_regular_shared): assert end - start < 60 +@pytest.mark.skipif(STRICT_MODE, reason="Deprecated in strict mode") @pytest.mark.parametrize("num_parts", [1, 2, 3, 30]) def test_groupby_map_groups_for_list(ray_start_regular_shared, num_parts): seed = int(time.time()) @@ -1210,12 +1209,14 @@ def test_groupby_map_groups_with_different_types(ray_start_regular_shared): def func(group): # Test output type is Python list, different from input type. - return [group["value"][0]] + value = int(group["value"][0]) + return {"out": np.array([value])} ds = ds.groupby("group").map_groups(func) - assert sorted(ds.take()) == [1, 3] + assert sorted([x["out"] for x in ds.take()]) == [1, 3] +@pytest.mark.skipif(STRICT_MODE, reason="Deprecated in strict mode") @pytest.mark.parametrize("num_parts", [1, 30]) def test_groupby_simple_min(ray_start_regular_shared, num_parts): # Test built-in min aggregation @@ -1267,6 +1268,7 @@ def test_groupby_simple_min(ray_start_regular_shared, num_parts): assert nan_ds.min() is None +@pytest.mark.skipif(STRICT_MODE, reason="Deprecated in strict mode") @pytest.mark.parametrize("num_parts", [1, 30]) def test_groupby_simple_max(ray_start_regular_shared, num_parts): # Test built-in max aggregation @@ -1318,6 +1320,7 @@ def test_groupby_simple_max(ray_start_regular_shared, num_parts): assert nan_ds.max() is None +@pytest.mark.skipif(STRICT_MODE, reason="Deprecated in strict mode") @pytest.mark.parametrize("num_parts", [1, 30]) def test_groupby_simple_mean(ray_start_regular_shared, num_parts): # Test built-in mean aggregation @@ -1378,6 +1381,7 @@ def test_groupby_simple_mean(ray_start_regular_shared, num_parts): assert nan_ds.mean() is None +@pytest.mark.skipif(STRICT_MODE, reason="Deprecated in strict mode") @pytest.mark.parametrize("num_parts", [1, 30]) def test_groupby_simple_std(ray_start_regular_shared, num_parts): # Test built-in std aggregation @@ -1479,6 +1483,7 @@ def test_groupby_simple_std(ray_start_regular_shared, num_parts): assert nan_ds.std() is None +@pytest.mark.skipif(STRICT_MODE, reason="Deprecated in strict mode") @pytest.mark.parametrize("num_parts", [1, 30]) def test_groupby_simple_multilambda(ray_start_regular_shared, num_parts): # Test built-in mean aggregation @@ -1508,6 +1513,7 @@ def test_groupby_simple_multilambda(ray_start_regular_shared, num_parts): ).mean([lambda x: x[0], lambda x: x[1]]) == (None, None) +@pytest.mark.skipif(STRICT_MODE, reason="Deprecated in strict mode") @pytest.mark.parametrize("num_parts", [1, 30]) def test_groupby_simple_multi_agg(ray_start_regular_shared, num_parts): seed = int(time.time()) @@ -1591,7 +1597,7 @@ def test_random_block_order(ray_start_regular_shared, restore_data_context): ds = ds.randomize_block_order(seed=0) results = ds.take() - expected = [6, 7, 8, 0, 1, 2, 3, 4, 5, 9, 10, 11] + expected = named_values("id", [6, 7, 8, 0, 1, 2, 3, 4, 5, 9, 10, 11]) assert results == expected # Test LazyBlockList.randomize_block_order. @@ -1603,7 +1609,9 @@ def test_random_block_order(ray_start_regular_shared, restore_data_context): lazy_blocklist_ds = ray.data.range(12, parallelism=4) lazy_blocklist_ds = lazy_blocklist_ds.randomize_block_order(seed=0) lazy_blocklist_results = lazy_blocklist_ds.take() - lazy_blocklist_expected = [6, 7, 8, 0, 1, 2, 3, 4, 5, 9, 10, 11] + lazy_blocklist_expected = named_values( + "id", [6, 7, 8, 0, 1, 2, 3, 4, 5, 9, 10, 11] + ) assert lazy_blocklist_results == lazy_blocklist_expected finally: context.optimize_fuse_read_stages = original_optimize_fuse_read_stages @@ -1648,9 +1656,9 @@ def range(n, parallelism=200): assert r1 != r0, (r1, r0) assert r1 != r3, (r1, r3) - r0 = ray.data.range_table(100, parallelism=5).take(999) - r1 = ray.data.range_table(100, parallelism=5).random_shuffle(seed=0).take(999) - r2 = ray.data.range_table(100, parallelism=5).random_shuffle(seed=0).take(999) + r0 = ray.data.range(100, parallelism=5).take(999) + r1 = ray.data.range(100, parallelism=5).random_shuffle(seed=0).take(999) + r2 = ray.data.range(100, parallelism=5).random_shuffle(seed=0).take(999) assert r1 == r2, (r1, r2) assert r1 != r0, (r1, r0) @@ -1685,6 +1693,7 @@ def test_random_shuffle_check_random(shutdown_only): num_contiguous = 1 prev = -1 for x in part: + x = x["item"] if prev != x: prev = x num_contiguous = 1 @@ -1710,6 +1719,7 @@ def test_random_shuffle_check_random(shutdown_only): num_increasing = 0 prev = -1 for x in part: + x = x["item"] if x >= prev: num_increasing += 1 else: diff --git a/python/ray/data/tests/test_binary.py b/python/ray/data/tests/test_binary.py index b5275f7afdda..2cadf64a2d79 100644 --- a/python/ray/data/tests/test_binary.py +++ b/python/ray/data/tests/test_binary.py @@ -21,6 +21,7 @@ from ray.data.tests.conftest import * # noqa from ray.data.tests.mock_http_server import * # noqa +from ray.data.tests.util import extract_values from ray.tests.conftest import * # noqa @@ -41,18 +42,12 @@ def test_read_binary_files_partitioning(ray_start_regular_shared, tmp_path): assert ds.take() == [{"bytes": b"foo", "path": path, "country": "us"}] -@pytest.mark.parametrize("output_arrow_format", [False, True]) -def test_read_binary_files(ray_start_regular_shared, output_arrow_format): +def test_read_binary_files(ray_start_regular_shared): with gen_bin_files(10) as (_, paths): - ds = ray.data.read_binary_files( - paths, parallelism=10, output_arrow_format=output_arrow_format - ) + ds = ray.data.read_binary_files(paths, parallelism=10) for i, item in enumerate(ds.iter_rows()): expected = open(paths[i], "rb").read() - if output_arrow_format: - assert expected == item["bytes"] - else: - assert expected == item + assert expected == item["bytes"] # Test metadata ops. assert ds.count() == 10 assert "bytes" in str(ds.schema()), ds @@ -84,28 +79,20 @@ def test_read_binary_files_with_fs(ray_start_regular_shared): ds = ray.data.read_binary_files(paths, filesystem=fs, parallelism=10) for i, item in enumerate(ds.iter_rows()): expected = open(paths[i], "rb").read() - assert expected == item + assert expected == item["bytes"] -@pytest.mark.parametrize("output_arrow_format", [False, True]) -def test_read_binary_files_with_paths(ray_start_regular_shared, output_arrow_format): +def test_read_binary_files_with_paths(ray_start_regular_shared): with gen_bin_files(10) as (_, paths): ds = ray.data.read_binary_files( paths, include_paths=True, parallelism=10, - output_arrow_format=output_arrow_format, ) - if output_arrow_format: - for i, item in enumerate(ds.iter_rows()): - assert paths[i] == item["path"] - expected = open(paths[i], "rb").read() - assert expected == item["bytes"] - else: - for i, (path, item) in enumerate(ds.iter_rows()): - assert path == paths[i] - expected = open(paths[i], "rb").read() - assert expected == item + for i, item in enumerate(ds.iter_rows()): + assert paths[i] == item["path"] + expected = open(paths[i], "rb").read() + assert expected == item["bytes"] # TODO(Clark): Hitting S3 in CI is currently broken due to some AWS @@ -131,7 +118,7 @@ def test_read_binary_snappy(ray_start_regular_shared, tmp_path): path, arrow_open_stream_args=dict(compression="snappy"), ) - assert sorted(ds.take()) == [byte_str] + assert sorted(extract_values("bytes", ds.take())) == [byte_str] def test_read_binary_snappy_inferred(ray_start_regular_shared, tmp_path): @@ -142,7 +129,7 @@ def test_read_binary_snappy_inferred(ray_start_regular_shared, tmp_path): bytes = BytesIO(byte_str) snappy.stream_compress(bytes, f) ds = ray.data.read_binary_files(path) - assert sorted(ds.take()) == [byte_str] + assert sorted(extract_values("bytes", ds.take())) == [byte_str] def test_read_binary_meta_provider( @@ -161,7 +148,7 @@ def test_read_binary_meta_provider( arrow_open_stream_args=dict(compression="snappy"), meta_provider=FastFileMetadataProvider(), ) - assert sorted(ds.take()) == [byte_str] + assert sorted(extract_values("bytes", ds.take())) == [byte_str] with pytest.raises(NotImplementedError): ray.data.read_binary_files( @@ -221,10 +208,10 @@ def skip_unpartitioned(kv_dict): ds, count=2, num_rows=2, - schema="", + schema="{bytes: binary}", num_computed=None, sorted_values=[b"1 a\n1 b\n1 c", b"3 e\n3 f\n3 g"], - ds_take_transform_fn=lambda t: t, + ds_take_transform_fn=lambda t: extract_values("bytes", t), ) assert ray.get(kept_file_counter.get.remote()) == 2 assert ray.get(skipped_file_counter.get.remote()) == 1 diff --git a/python/ray/data/tests/test_bulk_executor.py b/python/ray/data/tests/test_bulk_executor.py index 795c74a69eaf..862f8500ef06 100644 --- a/python/ray/data/tests/test_bulk_executor.py +++ b/python/ray/data/tests/test_bulk_executor.py @@ -1,3 +1,4 @@ +import pandas as pd import pytest import time @@ -13,12 +14,13 @@ from ray.data._internal.execution.operators.input_data_buffer import InputDataBuffer from ray.data._internal.execution.util import make_ref_bundles from ray.data.tests.conftest import * # noqa +from ray.data.tests.util import extract_values, column_udf def make_transform(block_fn): def map_fn(block_iter, ctx): for block in block_iter: - yield block_fn(block) + yield pd.DataFrame(block_fn(block)) return map_fn @@ -27,7 +29,7 @@ def ref_bundles_to_list(bundles: List[RefBundle]) -> List[List[Any]]: output = [] for bundle in bundles: for block, _ in bundle.blocks: - output.append(ray.get(block)) + output.append(list(ray.get(block)["id"])) return output @@ -49,14 +51,17 @@ def test_multi_stage_execution(ray_start_10_cpus_shared, preserve_order): o1 = InputDataBuffer(inputs) def delay_first(block): + block = block["id"] if block[0] == 0: print("Delaying first block to force de-ordering") time.sleep(2) result = [b * -1 for b in block] - return result + return {"id": result} o2 = MapOperator.create(make_transform(delay_first), o1) - o3 = MapOperator.create(make_transform(lambda block: [b * 2 for b in block]), o2) + o3 = MapOperator.create( + make_transform(lambda block: {"id": [b * 2 for b in block["id"]]}), o2 + ) def reverse_sort(inputs: List[RefBundle], ctx): reversed_list = inputs[::-1] @@ -79,10 +84,14 @@ def test_basic_stats(ray_start_10_cpus_shared): inputs = make_ref_bundles([[x] for x in range(20)]) o1 = InputDataBuffer(inputs) o2 = MapOperator.create( - make_transform(lambda block: [b * 2 for b in block]), o1, name="Foo" + make_transform(lambda block: {"id": [b * 2 for b in block["id"]]}), + o1, + name="Foo", ) o3 = MapOperator.create( - make_transform(lambda block: [b * 2 for b in block]), o2, name="Bar" + make_transform(lambda block: {"id": [b * 2 for b in block["id"]]}), + o2, + name="Bar", ) it = executor.execute(o3, initial_stats=prev_stats) output = ref_bundles_to_list(it) @@ -99,8 +108,8 @@ def test_basic_stats(ray_start_10_cpus_shared): def test_e2e_bulk_sanity(ray_start_10_cpus_shared): DataContext.get_current().new_execution_backend = True DataContext.get_current().use_streaming_executor = False - result = ray.data.range(5).map(lambda x: x + 1) - assert result.take_all() == [1, 2, 3, 4, 5], result + result = ray.data.range(5).map(column_udf("id", lambda x: x + 1)) + assert extract_values("id", result.take_all()) == [1, 2, 3, 4, 5], result # Checks new executor was enabled. assert "obj_store_mem_alloc" in result.stats(), result.stats() @@ -110,9 +119,11 @@ def test_actor_strategy(ray_start_10_cpus_shared): executor = BulkExecutor(ExecutionOptions()) inputs = make_ref_bundles([[x] for x in range(20)]) o1 = InputDataBuffer(inputs) - o2 = MapOperator.create(make_transform(lambda block: [b * -1 for b in block]), o1) + o2 = MapOperator.create( + make_transform(lambda block: {"id": [b * -1 for b in block["id"]]}), o1 + ) o3 = MapOperator.create( - make_transform(lambda block: [b * 2 for b in block]), + make_transform(lambda block: {"id": [b * 2 for b in block["id"]]}), o2, compute_strategy=ActorPoolStrategy(min_size=1, max_size=2), ray_remote_args={"num_cpus": 1}, @@ -129,11 +140,11 @@ def test_new_execution_backend_invocation(ray_start_10_cpus_shared): DataContext.get_current().use_streaming_executor = False # Read-only: will use legacy executor for now. ds = ray.data.range(10) - assert ds.take_all() == list(range(10)) + assert extract_values("id", ds.take_all()) == list(range(10)) # read->randomize_block_order: will use new executor, although it's also # a read-equivalent once fused. ds = ray.data.range(10).randomize_block_order() - assert set(ds.take_all()) == set(range(10)) + assert set(extract_values("id", ds.take_all())) == set(range(10)) if __name__ == "__main__": diff --git a/python/ray/data/tests/test_consumption.py b/python/ray/data/tests/test_consumption.py index 3f2c336f367e..aaed4409b1a7 100644 --- a/python/ray/data/tests/test_consumption.py +++ b/python/ray/data/tests/test_consumption.py @@ -12,18 +12,16 @@ from unittest.mock import patch import ray -from ray.data._internal.arrow_block import ArrowRow from ray.data._internal.block_builder import BlockBuilder from ray.data._internal.datastream_logger import DatastreamLogger from ray.data._internal.lazy_block_list import LazyBlockList -from ray.data._internal.pandas_block import PandasRow from ray.data.block import BlockAccessor, BlockMetadata from ray.data.context import DataContext from ray.data.datastream import Dataset, MaterializedDatastream, _sliding_window from ray.data.datasource.datasource import Datasource, ReadTask from ray.data.datasource.csv_datasource import CSVDatasource -from ray.data.row import TableRow from ray.data.tests.conftest import * # noqa +from ray.data.tests.util import column_udf, extract_values, STRICT_MODE from ray.tests.conftest import * # noqa from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy @@ -43,11 +41,13 @@ def test_avoid_placement_group_capture(shutdown_only, pipelined): def run(): ds0 = ray.data.range(5) ds = maybe_pipeline(ds0, pipelined) - assert sorted(ds.map(lambda x: x + 1).take()) == [1, 2, 3, 4, 5] + assert sorted( + extract_values("id", ds.map(column_udf("id", lambda x: x + 1)).take()) + ) == [1, 2, 3, 4, 5] ds = maybe_pipeline(ds0, pipelined) assert ds.count() == 5 ds = maybe_pipeline(ds0, pipelined) - assert sorted(ds.iter_rows()) == [0, 1, 2, 3, 4] + assert sorted(extract_values("id", ds.iter_rows())) == [0, 1, 2, 3, 4] pg = ray.util.placement_group([{"CPU": 1}]) ray.get( @@ -62,8 +62,8 @@ def run(): def test_dataset_lineage_serialization(shutdown_only): ray.init() ds = ray.data.range(10) - ds = ds.map(lambda x: x + 1) - ds = ds.map(lambda x: x + 1) + ds = ds.map(column_udf("id", lambda x: x + 1)) + ds = ds.map(column_udf("id", lambda x: x + 1)) ds = ds.random_shuffle() epoch = ds._get_epoch() uuid = ds._get_uuid() @@ -87,15 +87,15 @@ def test_dataset_lineage_serialization(shutdown_only): assert ds._plan._datastream_uuid == plan_uuid # Check Dataset content. assert ds.count() == 10 - assert sorted(ds.take()) == list(range(2, 12)) + assert sorted(extract_values("id", ds.take())) == list(range(2, 12)) def test_dataset_lineage_serialization_unsupported(shutdown_only): ray.init() # In-memory data sources not supported. ds = ray.data.from_items(list(range(10))) - ds = ds.map(lambda x: x + 1) - ds = ds.map(lambda x: x + 1) + ds = ds.map(column_udf("item", lambda x: x + 1)) + ds = ds.map(column_udf("item", lambda x: x + 1)) with pytest.raises(ValueError): ds.serialize_lineage() @@ -109,8 +109,8 @@ def test_dataset_lineage_serialization_unsupported(shutdown_only): ds2.serialize_lineage() # Post-lazy-read unions not supported. - ds = ray.data.range(10).map(lambda x: x + 1) - ds1 = ray.data.range(20).map(lambda x: 2 * x) + ds = ray.data.range(10).map(column_udf("id", lambda x: x + 1)) + ds1 = ray.data.range(20).map(column_udf("id", lambda x: 2 * x)) ds2 = ds.union(ds1) with pytest.raises(ValueError): @@ -123,7 +123,9 @@ def test_dataset_lineage_serialization_unsupported(shutdown_only): serialized_ds = ds2.serialize_lineage() ds3 = Dataset.deserialize_lineage(serialized_ds) - assert set(ds3.take(30)) == set(list(range(10)) + list(range(20))) + assert set(extract_values("id", ds3.take(30))) == set( + list(range(10)) + list(range(20)) + ) # Zips not supported. ds = ray.data.from_items(list(range(10))) @@ -138,23 +140,25 @@ def test_dataset_lineage_serialization_unsupported(shutdown_only): def test_basic(ray_start_regular_shared, pipelined): ds0 = ray.data.range(5) ds = maybe_pipeline(ds0, pipelined) - assert sorted(ds.map(lambda x: x + 1).take()) == [1, 2, 3, 4, 5] + assert sorted( + extract_values("id", ds.map(column_udf("id", lambda x: x + 1)).take()) + ) == [1, 2, 3, 4, 5] ds = maybe_pipeline(ds0, pipelined) assert ds.count() == 5 ds = maybe_pipeline(ds0, pipelined) - assert sorted(ds.iter_rows()) == [0, 1, 2, 3, 4] + assert sorted(extract_values("id", ds.iter_rows())) == [0, 1, 2, 3, 4] -def test_range_table(ray_start_regular_shared): - ds = ray.data.range_table(10, parallelism=10) +def test_range(ray_start_regular_shared): + ds = ray.data.range(10, parallelism=10) assert ds.num_blocks() == 10 assert ds.count() == 10 - assert ds.take() == [{"value": i} for i in range(10)] + assert ds.take() == [{"id": i} for i in range(10)] - ds = ray.data.range_table(10, parallelism=2) + ds = ray.data.range(10, parallelism=2) assert ds.num_blocks() == 2 assert ds.count() == 10 - assert ds.take() == [{"value": i} for i in range(10)] + assert ds.take() == [{"id": i} for i in range(10)] def test_empty_dataset(ray_start_regular_shared): @@ -164,7 +168,7 @@ def test_empty_dataset(ray_start_regular_shared): assert ds.schema() is None ds = ray.data.range(1) - ds = ds.filter(lambda x: x > 1) + ds = ds.filter(lambda x: x["id"] > 1) ds = ds.materialize() assert ( str(ds) @@ -217,17 +221,15 @@ def inc(x): def test_schema(ray_start_regular_shared): - ds = ray.data.range(10, parallelism=10) - ds2 = ray.data.range_table(10, parallelism=10) + ds2 = ray.data.range(10, parallelism=10) ds3 = ds2.repartition(5) ds3 = ds3.materialize() ds4 = ds3.map(lambda x: {"a": "hi", "b": 1.0}).limit(5).repartition(1) ds4 = ds4.materialize() - assert str(ds) == "Datastream(num_blocks=10, num_rows=10, schema=)" - assert str(ds2) == "Datastream(num_blocks=10, num_rows=10, schema={value: int64})" + assert str(ds2) == "Datastream(num_blocks=10, num_rows=10, schema={id: int64})" assert ( str(ds3) - == "MaterializedDatastream(num_blocks=5, num_rows=10, schema={value: int64})" + == "MaterializedDatastream(num_blocks=5, num_rows=10, schema={id: int64})" ) assert ( str(ds4) == "MaterializedDatastream(num_blocks=1, num_rows=5, " @@ -240,7 +242,7 @@ def test_schema_lazy(ray_start_regular_shared): # We do not kick off the read task by default. assert ds._plan._in_blocks._num_computed() == 0 schema = ds.schema() - assert schema == int + assert schema.names == ["id"] # Fetching the schema does not trigger execution, since # the schema is known beforehand for RangeDatasource. assert ds._plan._in_blocks._num_computed() == 0 @@ -270,63 +272,63 @@ def check_num_computed(expected): assert ds._plan.execute()._num_computed() == expected check_num_computed(0) - assert ds.take(10) == list(range(10)) + assert extract_values("id", ds.take(10)) == list(range(10)) check_num_computed(2) - assert ds.take(20) == list(range(20)) + assert extract_values("id", ds.take(20)) == list(range(20)) check_num_computed(4) - assert ds.take(30) == list(range(30)) + assert extract_values("id", ds.take(30)) == list(range(30)) check_num_computed(8) - assert ds.take(50) == list(range(50)) + assert extract_values("id", ds.take(50)) == list(range(50)) check_num_computed(16) - assert ds.take(100) == list(range(100)) + assert extract_values("id", ds.take(100)) == list(range(100)) check_num_computed(20) def test_dataset_repr(ray_start_regular_shared): ds = ray.data.range(10, parallelism=10) - assert repr(ds) == "Datastream(num_blocks=10, num_rows=10, schema=)" + assert repr(ds) == "Datastream(num_blocks=10, num_rows=10, schema={id: int64})" ds = ds.map_batches(lambda x: x) assert repr(ds) == ( "MapBatches()\n" - "+- Datastream(num_blocks=10, num_rows=10, schema=)" + "+- Datastream(num_blocks=10, num_rows=10, schema={id: int64})" ) - ds = ds.filter(lambda x: x > 0) + ds = ds.filter(lambda x: x["id"] > 0) assert repr(ds) == ( "Filter\n" "+- MapBatches()\n" - " +- Datastream(num_blocks=10, num_rows=10, schema=)" + " +- Datastream(num_blocks=10, num_rows=10, schema={id: int64})" ) ds = ds.random_shuffle() assert repr(ds) == ( "RandomShuffle\n" "+- Filter\n" " +- MapBatches()\n" - " +- Datastream(num_blocks=10, num_rows=10, schema=)" + " +- Datastream(num_blocks=10, num_rows=10, schema={id: int64})" ) ds = ds.materialize() assert ( repr(ds) - == "MaterializedDatastream(num_blocks=10, num_rows=9, schema=)" + == "MaterializedDatastream(num_blocks=10, num_rows=9, schema={id: int64})" ) ds = ds.map_batches(lambda x: x) assert repr(ds) == ( "MapBatches()\n" - "+- Datastream(num_blocks=10, num_rows=9, schema=)" + "+- Datastream(num_blocks=10, num_rows=9, schema={id: int64})" ) ds1, ds2 = ds.split(2) assert ( repr(ds1) == f"MaterializedDatastream(num_blocks=5, num_rows={ds1.count()}, " - "schema=)" + "schema={id: int64})" ) assert ( repr(ds2) == f"MaterializedDatastream(num_blocks=5, num_rows={ds2.count()}, " - "schema=)" + "schema={id: int64})" ) ds3 = ds1.union(ds2) - assert repr(ds3) == "Datastream(num_blocks=10, num_rows=9, schema=)" + assert repr(ds3) == "Datastream(num_blocks=10, num_rows=9, schema={id: int64})" ds = ds.zip(ds3) assert repr(ds) == ( - "Zip\n" "+- Datastream(num_blocks=10, num_rows=9, schema=)" + "Zip\n" "+- Datastream(num_blocks=10, num_rows=9, schema={id: int64})" ) def my_dummy_fn(x): @@ -336,7 +338,7 @@ def my_dummy_fn(x): ds = ds.map_batches(my_dummy_fn) assert repr(ds) == ( "MapBatches(my_dummy_fn)\n" - "+- Datastream(num_blocks=10, num_rows=10, schema=)" + "+- Datastream(num_blocks=10, num_rows=10, schema={id: int64})" ) @@ -346,7 +348,7 @@ def test_limit(ray_start_regular_shared, lazy): if not lazy: ds = ds.materialize() for i in range(100): - assert ds.limit(i).take(200) == list(range(i)) + assert extract_values("id", ds.limit(i).take(200)) == list(range(i)) # NOTE: We test outside the power-of-2 range in order to ensure that we're not reading @@ -377,7 +379,9 @@ def __init__(self): def prepare_read(self, parallelism, n): def range_(i): ray.get(self.counter.increment.remote()) - return [list(range(parallelism * i, parallelism * i + n))] + return [ + pd.DataFrame({"id": range(parallelism * i, parallelism * i + n)}) + ] return [ ReadTask( @@ -405,7 +409,7 @@ def range_(i): ) ds2 = ds.limit(limit) # Check content. - assert ds2.take(limit) == list(range(limit)) + assert extract_values("id", ds2.take(limit)) == list(range(limit)) # Check number of read tasks launched. assert ray.get(source.counter.get.remote()) == expected @@ -417,7 +421,7 @@ class DumbOnesDatasource(Datasource): def prepare_read(self, parallelism, n): return parallelism * [ ReadTask( - lambda: [[1] * n], + lambda: [pd.DataFrame({"id": [1] * n})], BlockMetadata( num_rows=None, size_bytes=sys.getsizeof(1) * n, @@ -430,27 +434,25 @@ def prepare_read(self, parallelism, n): ds = ray.data.read_datasource(DumbOnesDatasource(), parallelism=10, n=10) for i in range(1, 100): - assert ds.limit(i).take(100) == [1] * i + assert extract_values("id", ds.limit(i).take(100)) == [1] * i def test_convert_types(ray_start_regular_shared): plain_ds = ray.data.range(1) - arrow_ds = plain_ds.map(lambda x: {"a": x}) + arrow_ds = plain_ds.map(lambda x: {"a": x["id"]}) assert arrow_ds.take() == [{"a": 0}] - assert "ArrowRow" in arrow_ds.map(lambda x: str(type(x))).take()[0] + assert "dict" in str(arrow_ds.map(lambda x: {"out": str(type(x))}).take()[0]) - arrow_ds = ray.data.range_table(1) - assert arrow_ds.map(lambda x: "plain_{}".format(x["value"])).take() == ["plain_0"] - assert arrow_ds.map(lambda x: {"a": (x["value"],)}).take() == [{"a": [0]}] + arrow_ds = ray.data.range(1) + assert arrow_ds.map(lambda x: {"out": "plain_{}".format(x["id"])}).take() == [ + {"out": "plain_0"} + ] + assert arrow_ds.map(lambda x: {"a": (x["id"],)}).take() == [{"a": [0]}] def test_from_items(ray_start_regular_shared): ds = ray.data.from_items(["hello", "world"]) - assert ds.take() == ["hello", "world"] - assert isinstance(next(ds.iter_batches(batch_format=None)), list) - - with pytest.raises(ValueError): - ds = ray.data.from_items(["hello", "world"], output_arrow_format=True) + assert extract_values("item", ds.take()) == ["hello", "world"] ds = ray.data.from_items([{"hello": "world"}], output_arrow_format=True) assert ds.take() == [{"hello": "world"}] @@ -482,23 +484,23 @@ def test_from_items_parallelism_truncated(ray_start_regular_shared): def test_take_batch(ray_start_regular_shared): ds = ray.data.range(10, parallelism=2) - assert ds.take_batch(3) == [0, 1, 2] - assert ds.take_batch(6) == [0, 1, 2, 3, 4, 5] - assert ds.take_batch(100) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + assert ds.take_batch(3)["id"].tolist() == [0, 1, 2] + assert ds.take_batch(6)["id"].tolist() == [0, 1, 2, 3, 4, 5] + assert ds.take_batch(100)["id"].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] assert isinstance(ds.take_batch(3, batch_format="pandas"), pd.DataFrame) - assert isinstance(ds.take_batch(3, batch_format="numpy"), np.ndarray) + assert isinstance(ds.take_batch(3, batch_format="numpy"), dict) ds = ray.data.range_tensor(10, parallelism=2) - assert np.all(ds.take_batch(3) == np.array([[0], [1], [2]])) + assert np.all(ds.take_batch(3)["data"] == np.array([[0], [1], [2]])) assert isinstance(ds.take_batch(3, batch_format="pandas"), pd.DataFrame) - assert isinstance(ds.take_batch(3, batch_format="numpy"), np.ndarray) + assert isinstance(ds.take_batch(3, batch_format="numpy"), dict) with pytest.raises(ValueError): ray.data.range(0).take_batch() def test_take_all(ray_start_regular_shared): - assert ray.data.range(5).take_all() == [0, 1, 2, 3, 4] + assert extract_values("id", ray.data.range(5).take_all()) == [0, 1, 2, 3, 4] with pytest.raises(ValueError): assert ray.data.range(5).take_all(4) @@ -528,7 +530,7 @@ def test_iter_rows(ray_start_regular_shared): n = 10 ds = ray.data.range(n) for row, k in zip(ds.iter_rows(), range(n)): - assert row == k + assert row == {"id": k} # Test tabular rows. t1 = pa.Table.from_pydict({"one": [1, 2, 3], "two": [2, 3, 4]}) @@ -550,22 +552,19 @@ def to_pylist(table): # Default ArrowRows. for row, t_row in zip(ds.iter_rows(), to_pylist(t)): - assert isinstance(row, TableRow) - assert isinstance(row, ArrowRow) + assert isinstance(row, dict) assert row == t_row # PandasRows after conversion. pandas_ds = ds.map_batches(lambda x: x, batch_format="pandas") df = t.to_pandas() for row, (index, df_row) in zip(pandas_ds.iter_rows(), df.iterrows()): - assert isinstance(row, TableRow) - assert isinstance(row, PandasRow) + assert isinstance(row, dict) assert row == df_row.to_dict() # Prefetch. for row, t_row in zip(ds.iter_rows(prefetch_blocks=1), to_pylist(t)): - assert isinstance(row, TableRow) - assert isinstance(row, ArrowRow) + assert isinstance(row, dict) assert row == t_row @@ -594,14 +593,6 @@ def test_iter_batches_basic(ray_start_regular_shared): assert all(isinstance(col, np.ndarray) for col in batch.values()) pd.testing.assert_frame_equal(pd.DataFrame(batch), df) - # Numpy format (single column). - ds2 = ds.select_columns(["one"]) - for batch, df in zip(ds2.iter_batches(batch_size=None, batch_format="numpy"), dfs): - assert isinstance(batch, dict) - assert list(batch.keys()) == ["one"] - assert all(isinstance(col, np.ndarray) for col in batch.values()) - pd.testing.assert_frame_equal(pd.DataFrame(batch), df[["one"]]) - # Test NumPy format on Arrow blocks. ds2 = ds.map_batches(lambda b: b, batch_size=None, batch_format="pyarrow") for batch, df in zip(ds2.iter_batches(batch_size=None, batch_format="numpy"), dfs): @@ -610,21 +601,12 @@ def test_iter_batches_basic(ray_start_regular_shared): assert all(isinstance(col, np.ndarray) for col in batch.values()) pd.testing.assert_frame_equal(pd.DataFrame(batch), df) - # Test NumPy format on Arrow blocks (single column). - ds3 = ds2.select_columns(["one"]) - for batch, df in zip(ds3.iter_batches(batch_size=None, batch_format="numpy"), dfs): + # Default format -> numpy. + for batch, df in zip(ds.iter_batches(batch_size=None, batch_format="default"), dfs): assert isinstance(batch, dict) - assert list(batch.keys()) == ["one"] + assert list(batch.keys()) == ["one", "two"] assert all(isinstance(col, np.ndarray) for col in batch.values()) - pd.testing.assert_frame_equal(pd.DataFrame(batch), df[["one"]]) - - # Native format (deprecated). - for batch, df in zip(ds.iter_batches(batch_size=None, batch_format="native"), dfs): - assert BlockAccessor.for_block(batch).to_pandas().equals(df) - - # Default format. - for batch, df in zip(ds.iter_batches(batch_size=None, batch_format="default"), dfs): - assert BlockAccessor.for_block(batch).to_pandas().equals(df) + pd.testing.assert_frame_equal(pd.DataFrame(batch), df) # Batch size. batch_size = 2 @@ -734,12 +716,15 @@ def test_iter_batches_basic(ray_start_regular_shared): def test_iter_batches_empty_block(ray_start_regular_shared): ds = ray.data.range(1).repartition(10) - assert list(ds.iter_batches(batch_size=None)) == [[0]] - assert list(ds.iter_batches(batch_size=1, local_shuffle_buffer_size=1)) == [[0]] + assert str(list(ds.iter_batches(batch_size=None))) == "[{'id': array([0])}]" + assert ( + str(list(ds.iter_batches(batch_size=1, local_shuffle_buffer_size=1))) + == "[{'id': array([0])}]" + ) @pytest.mark.parametrize("pipelined", [False, True]) -@pytest.mark.parametrize("ds_format", ["arrow", "pandas", "simple"]) +@pytest.mark.parametrize("ds_format", ["arrow", "pandas"]) def test_iter_batches_local_shuffle(shutdown_only, pipelined, ds_format): # Input validation. # Batch size must be given for local shuffle. @@ -751,12 +736,10 @@ def test_iter_batches_local_shuffle(shutdown_only, pipelined, ds_format): ) def range(n, parallelism=200): - if ds_format == "simple": + if ds_format == "arrow": ds = ray.data.range(n, parallelism=parallelism) - elif ds_format == "arrow": - ds = ray.data.range_table(n, parallelism=parallelism) elif ds_format == "pandas": - ds = ray.data.range_table(n, parallelism=parallelism).map_batches( + ds = ray.data.range(n, parallelism=parallelism).map_batches( lambda df: df, batch_size=None, batch_format="pandas" ) if pipelined: @@ -767,16 +750,14 @@ def range(n, parallelism=200): def to_row_dicts(batch): if isinstance(batch, pd.DataFrame): - batch = batch.to_dict(orient="records") - return batch + return batch.to_dict(orient="records") + return [{"id": v} for v in batch["id"]] def unbatch(batches): return [r for batch in batches for r in to_row_dicts(batch)] def sort(r): - if ds_format == "simple": - return sorted(r) - return sorted(r, key=lambda v: v["value"]) + return sorted(r, key=lambda v: v["id"]) base = range(100).take_all() @@ -1104,19 +1085,12 @@ def test_iter_tf_batches_tensor_ds(ray_start_regular_shared, pipelined): for _ in range(num_epochs): iterations = [] for batch in ds.iter_tf_batches(batch_size=2): - iterations.append(batch) + iterations.append(batch["data"]) combined_iterations = np.concatenate(iterations) np.testing.assert_array_equal(arr, combined_iterations) def test_block_builder_for_block(ray_start_regular_shared): - # list - builder = BlockBuilder.for_block(list()) - builder.add_block([1, 2]) - assert builder.build() == [1, 2] - builder.add_block([3, 4]) - assert builder.build() == [1, 2, 3, 4] - # pandas dataframe builder = BlockBuilder.for_block(pd.DataFrame()) b1 = pd.DataFrame({"A": [1], "B": ["a"]}) @@ -1161,10 +1135,10 @@ def _to_pandas(ds): assert ds.min("A") == 0 # Test empty dataset - ds = ray.data.range_table(10) + ds = ray.data.range(10) if ds_format == "pandas": ds = _to_pandas(ds) - assert ds.filter(lambda r: r["value"] > 10).min("value") is None + assert ds.filter(lambda r: r["id"] > 10).min("id") is None # Test built-in global min aggregation with nans nan_ds = ray.data.from_items([{"A": x} for x in xs] + [{"A": None}]).repartition( @@ -1202,10 +1176,10 @@ def _to_pandas(ds): assert ds.max("A") == 99 # Test empty dataset - ds = ray.data.range_table(10) + ds = ray.data.range(10) if ds_format == "pandas": ds = _to_pandas(ds) - assert ds.filter(lambda r: r["value"] > 10).max("value") is None + assert ds.filter(lambda r: r["id"] > 10).max("id") is None # Test built-in global max aggregation with nans nan_ds = ray.data.from_items([{"A": x} for x in xs] + [{"A": None}]).repartition( @@ -1243,10 +1217,10 @@ def _to_pandas(ds): assert ds.mean("A") == 49.5 # Test empty dataset - ds = ray.data.range_table(10) + ds = ray.data.range(10) if ds_format == "pandas": ds = _to_pandas(ds) - assert ds.filter(lambda r: r["value"] > 10).mean("value") is None + assert ds.filter(lambda r: r["id"] > 10).mean("id") is None # Test built-in global mean aggregation with nans nan_ds = ray.data.from_items([{"A": x} for x in xs] + [{"A": None}]).repartition( @@ -1337,6 +1311,7 @@ def test_len(ray_start_regular_shared): len(ds) +@pytest.mark.skipif(STRICT_MODE, reason="Deprecated in strict mode") def test_simple_block_select(): xs = list(range(100)) block_accessor = BlockAccessor.for_block(xs) @@ -1379,7 +1354,7 @@ def test_unsupported_pyarrow_versions_check(shutdown_only, unsupported_pyarrow_v # Test Arrow-native creation APIs. # Test range_table. with pytest.raises(ImportError): - ray.data.range_table(10).take_all() + ray.data.range(10).take_all() # Test from_arrow. with pytest.raises(ImportError): @@ -1411,7 +1386,7 @@ def test_unsupported_pyarrow_versions_check_disabled( # Test Arrow-native creation APIs. # Test range_table. try: - ray.data.range_table(10).take_all() + ray.data.range(10).take_all() except ImportError as e: pytest.fail(f"_check_pyarrow_version failed unexpectedly: {e}") @@ -1586,9 +1561,9 @@ def flaky_mapper(x): if ray.get(count) == 1: raise ValueError("oops") else: - return ray.get(count) + return {"id": ray.get(count)} - assert sorted(ds1.map(flaky_mapper).take()) == [2, 3, 4] + assert sorted(extract_values("id", ds1.map(flaky_mapper).take())) == [2, 3, 4] with pytest.raises(ValueError): ray.data.read_datasource( @@ -1603,7 +1578,9 @@ def test_datasource(ray_start_regular): source = ray.data.datasource.RandomIntRowDatasource() assert len(ray.data.read_datasource(source, n=10, num_columns=2).take()) == 10 source = ray.data.datasource.RangeDatasource() - assert ray.data.read_datasource(source, n=10).take() == list(range(10)) + assert extract_values( + "value", ray.data.read_datasource(source, n=10).take() + ) == list(range(10)) def test_polars_lazy_import(shutdown_only): @@ -1652,26 +1629,23 @@ def f(should_import_polars): def test_batch_formats(shutdown_only): ds = ray.data.range(100) - assert ds.default_batch_format() == list - assert isinstance(next(ds.iter_batches(batch_format=None)), list) - assert isinstance(next(ds.iter_batches(batch_format="default")), list) + assert isinstance(next(ds.iter_batches(batch_format=None)), pa.Table) + assert isinstance(next(ds.iter_batches(batch_format="default")), dict) assert isinstance(next(ds.iter_batches(batch_format="pandas")), pd.DataFrame) assert isinstance(next(ds.iter_batches(batch_format="pyarrow")), pa.Table) - assert isinstance(next(ds.iter_batches(batch_format="numpy")), np.ndarray) + assert isinstance(next(ds.iter_batches(batch_format="numpy")), dict) ds = ray.data.range_tensor(100) - assert ds.default_batch_format() == np.ndarray assert isinstance(next(ds.iter_batches(batch_format=None)), pa.Table) - assert isinstance(next(ds.iter_batches(batch_format="default")), np.ndarray) + assert isinstance(next(ds.iter_batches(batch_format="default")), dict) assert isinstance(next(ds.iter_batches(batch_format="pandas")), pd.DataFrame) assert isinstance(next(ds.iter_batches(batch_format="pyarrow")), pa.Table) - assert isinstance(next(ds.iter_batches(batch_format="numpy")), np.ndarray) + assert isinstance(next(ds.iter_batches(batch_format="numpy")), dict) df = pd.DataFrame({"foo": ["a", "b"], "bar": [0, 1]}) ds = ray.data.from_pandas(df) - assert ds.default_batch_format() == pd.DataFrame assert isinstance(next(ds.iter_batches(batch_format=None)), pd.DataFrame) - assert isinstance(next(ds.iter_batches(batch_format="default")), pd.DataFrame) + assert isinstance(next(ds.iter_batches(batch_format="default")), dict) assert isinstance(next(ds.iter_batches(batch_format="pandas")), pd.DataFrame) assert isinstance(next(ds.iter_batches(batch_format="pyarrow")), pa.Table) assert isinstance(next(ds.iter_batches(batch_format="numpy")), dict) diff --git a/python/ray/data/tests/test_context_propagation.py b/python/ray/data/tests/test_context_propagation.py index 3774cb376f82..c7f50e1d7ec9 100644 --- a/python/ray/data/tests/test_context_propagation.py +++ b/python/ray/data/tests/test_context_propagation.py @@ -1,10 +1,12 @@ import pytest +import pandas as pd import ray from ray.tests.conftest import * # noqa from ray.data.block import BlockMetadata from ray.data.context import DataContext from ray.data.datasource import Datasource, ReadTask +from ray.data.tests.util import extract_values from ray._private.test_utils import run_string_as_driver @@ -15,30 +17,30 @@ def prepare_read(self, parallelism: int): meta = BlockMetadata( num_rows=1, size_bytes=8, schema=None, input_files=None, exec_stats=None ) - return [ReadTask(lambda: [[value]], meta)] + return [ReadTask(lambda: [pd.DataFrame({"id": [value]})], meta)] context = DataContext.get_current() context.foo = 12345 - assert ray.data.read_datasource(CustomDatasource()).take_all()[0] == 12345 + assert ray.data.read_datasource(CustomDatasource()).take_all()[0]["id"] == 12345 def test_map(ray_start_regular_shared): context = DataContext.get_current() context.foo = 70001 - ds = ray.data.range(1).map(lambda x: DataContext.get_current().foo) - assert ds.take_all()[0] == 70001 + ds = ray.data.range(1).map(lambda x: {"id": DataContext.get_current().foo}) + assert ds.take_all()[0]["id"] == 70001 def test_map_pipeline(ray_start_regular_shared): context = DataContext.get_current() context.foo = 8 pipe = ray.data.range(2).repeat(2) - pipe = pipe.map(lambda x: DataContext.get_current().foo) + pipe = pipe.map(lambda x: {"id": DataContext.get_current().foo}) [a, b] = pipe.split(2) @ray.remote def fetch(shard): - return shard.take_all() + return extract_values("id", shard.take_all()) assert ray.get([fetch.remote(a), fetch.remote(b)]) == [[8, 8], [8, 8]] @@ -46,24 +48,26 @@ def fetch(shard): def test_flat_map(ray_start_regular_shared): context = DataContext.get_current() context.foo = 70002 - ds = ray.data.range(1).flat_map(lambda x: [DataContext.get_current().foo]) - assert ds.take_all()[0] == 70002 + ds = ray.data.range(1).flat_map(lambda x: [{"id": DataContext.get_current().foo}]) + assert ds.take_all()[0]["id"] == 70002 def test_map_batches(ray_start_regular_shared): context = DataContext.get_current() context.foo = 70003 - ds = ray.data.range(1).map_batches(lambda x: [DataContext.get_current().foo]) - assert ds.take_all()[0] == 70003 + ds = ray.data.range(1).map_batches( + lambda x: {"id": [DataContext.get_current().foo]} + ) + assert ds.take_all()[0]["id"] == 70003 def test_filter(shutdown_only): context = DataContext.get_current() context.foo = 70004 ds = ray.data.from_items([70004]).filter( - lambda x: x == DataContext.get_current().foo + lambda x: x["item"] == DataContext.get_current().foo ) - assert ds.take_all()[0] == 70004 + assert ds.take_all()[0]["item"] == 70004 def test_context_placement_group(): @@ -88,8 +92,8 @@ def test_context_placement_group(): context.scheduling_strategy = PlacementGroupSchedulingStrategy(placement_group) pipe = ray.data.range(100, parallelism=2) \ .window(blocks_per_window=1) \ - .map(lambda x: x + 1) -assert pipe.take_all() == list(range(1, 101)) + .map(lambda x: {"id": x["id"] + 1}) +assert pipe.take_all() == [{"id": x} for x in range(1, 101)] placement_group_assert_no_leak([placement_group]) ray.shutdown() """ diff --git a/python/ray/data/tests/test_dynamic_block_split.py b/python/ray/data/tests/test_dynamic_block_split.py index 3fb95ae8737f..75eaf1d9c24a 100644 --- a/python/ray/data/tests/test_dynamic_block_split.py +++ b/python/ray/data/tests/test_dynamic_block_split.py @@ -8,7 +8,6 @@ import ray from ray.data._internal.lazy_block_list import LazyBlockList from ray.data.block import BlockMetadata -from ray.data.context import DataContext from ray.data.datasource import Datasource from ray.data.datasource.csv_datasource import CSVDatasource from ray.data.datasource.datasource import ReadTask, Reader @@ -113,18 +112,7 @@ def test_enable_in_ray_client(ray_start_cluster_enabled): "compute", [ "tasks", - # TODO(Clark): Remove skip for old execution backend once the old execution - # backend is removed. - pytest.param( - "actors", - marks=pytest.mark.skipif( - not DataContext.get_current().new_execution_backend, - reason=( - "Dynamic block splitting for the actor compute strategy is only " - "enabled for the new execution backend." - ), - ), - ), + "actors", ], ) def test_dataset( @@ -133,6 +121,10 @@ def test_dataset( target_max_block_size, compute, ): + if compute == "tasks": + compute = ray.data._internal.compute.TaskPoolStrategy() + else: + compute = ray.data.ActorPoolStrategy() ray.shutdown() # We need at least 2 CPUs to run a actorpool streaming ray.init(num_cpus=2) @@ -193,7 +185,7 @@ def test_dataset( assert len(ds.take(5)) == 5 assert len(ds.take_all()) == num_blocks_per_task * num_tasks for batch in ds.iter_batches(batch_size=10): - assert len(batch) == 10 + assert len(batch["one"]) == 10 def test_dataset_pipeline( @@ -217,7 +209,7 @@ def test_dataset_pipeline( dsp = dsp.map_batches(lambda x: x) result_batches = list(ds.iter_batches(batch_size=5)) for batch in result_batches: - assert len(batch) == 5 + assert len(batch["one"]) == 5 assert len(result_batches) == num_blocks_per_task * num_tasks / 5 dsp = ds.window(blocks_per_window=2) diff --git a/python/ray/data/tests/test_ecosystem.py b/python/ray/data/tests/test_ecosystem.py index 34abede73e94..8fb1fa71cf7f 100644 --- a/python/ray/data/tests/test_ecosystem.py +++ b/python/ray/data/tests/test_ecosystem.py @@ -80,7 +80,7 @@ def test_to_dask_tensor_column_cast_pandas(ray_start_regular_shared): ctx.enable_tensor_extension_casting = True in_df = pd.DataFrame({"a": TensorArray(data)}) ds = ray.data.from_pandas(in_df) - dtypes = ds.schema().types + dtypes = ds.schema().base_schema.types assert len(dtypes) == 1 assert isinstance(dtypes[0], TensorDtype) out_df = ds.to_dask().compute() @@ -101,7 +101,7 @@ def test_to_dask_tensor_column_cast_arrow(ray_start_regular_shared): ctx.enable_tensor_extension_casting = True in_table = pa.table({"a": ArrowTensorArray.from_numpy(data)}) ds = ray.data.from_arrow(in_table) - dtype = ds.schema().field(0).type + dtype = ds.schema().base_schema.field(0).type assert isinstance(dtype, ArrowTensorType) out_df = ds.to_dask().compute() assert out_df["a"].dtype.type is np.object_ diff --git a/python/ray/data/tests/test_execution_optimizer.py b/python/ray/data/tests/test_execution_optimizer.py index 068a7849d953..6e2b18423a09 100644 --- a/python/ray/data/tests/test_execution_optimizer.py +++ b/python/ray/data/tests/test_execution_optimizer.py @@ -54,6 +54,7 @@ from ray.data.datasource.parquet_datasource import ParquetDatasource from ray.data.tests.conftest import * # noqa +from ray.data.tests.util import extract_values, named_values, column_udf from ray.tests.conftest import * # noqa @@ -96,7 +97,7 @@ def test_from_items_operator(ray_start_regular_shared, enable_optimizer): def test_from_items_e2e(ray_start_regular_shared, enable_optimizer): data = ["Hello", "World"] ds = ray.data.from_items(data) - assert ds.take_all() == data, ds + assert ds.take_all() == named_values("item", data), ds # Check that metadata fetch is included in stats. assert "FromItems" in ds.stats() @@ -122,8 +123,8 @@ def test_map_batches_operator(ray_start_regular_shared, enable_optimizer): def test_map_batches_e2e(ray_start_regular_shared, enable_optimizer): ds = ray.data.range(5) - ds = ds.map_batches(lambda x: x) - assert ds.take_all() == list(range(5)), ds + ds = ds.map_batches(column_udf("id", lambda x: x)) + assert extract_values("id", ds.take_all()) == list(range(5)), ds _check_usage_record(["ReadRange", "MapBatches"]) @@ -145,8 +146,8 @@ def test_map_rows_operator(ray_start_regular_shared, enable_optimizer): def test_map_rows_e2e(ray_start_regular_shared, enable_optimizer): ds = ray.data.range(5) - ds = ds.map(lambda x: x + 1) - assert ds.take_all() == [1, 2, 3, 4, 5], ds + ds = ds.map(column_udf("id", lambda x: x + 1)) + assert extract_values("id", ds.take_all()) == [1, 2, 3, 4, 5], ds _check_usage_record(["ReadRange", "MapRows"]) @@ -168,8 +169,8 @@ def test_filter_operator(ray_start_regular_shared, enable_optimizer): def test_filter_e2e(ray_start_regular_shared, enable_optimizer): ds = ray.data.range(5) - ds = ds.filter(fn=lambda x: x % 2 == 0) - assert ds.take_all() == [0, 2, 4], ds + ds = ds.filter(fn=lambda x: x["id"] % 2 == 0) + assert extract_values("id", ds.take_all()) == [0, 2, 4], ds _check_usage_record(["ReadRange", "Filter"]) @@ -191,15 +192,15 @@ def test_flat_map(ray_start_regular_shared, enable_optimizer): def test_flat_map_e2e(ray_start_regular_shared, enable_optimizer): ds = ray.data.range(2) - ds = ds.flat_map(fn=lambda x: [x, x]) - assert ds.take_all() == [0, 0, 1, 1], ds + ds = ds.flat_map(fn=lambda x: [{"id": x["id"]}, {"id": x["id"]}]) + assert extract_values("id", ds.take_all()) == [0, 0, 1, 1], ds _check_usage_record(["ReadRange", "FlatMap"]) def test_column_ops_e2e(ray_start_regular_shared, enable_optimizer): ds = ray.data.range(2) ds = ds.add_column(fn=lambda df: df.iloc[:, 0], col="new_col") - assert ds.take_all() == [{"value": 0, "new_col": 0}, {"value": 1, "new_col": 1}], ds + assert ds.take_all() == [{"id": 0, "new_col": 0}, {"id": 1, "new_col": 1}], ds _check_usage_record(["ReadRange", "MapBatches"]) select_ds = ds.select_columns(cols=["new_col"]) @@ -207,7 +208,7 @@ def test_column_ops_e2e(ray_start_regular_shared, enable_optimizer): _check_usage_record(["ReadRange", "MapBatches"]) ds = ds.drop_columns(cols=["new_col"]) - assert ds.take_all() == [{"value": 0}, {"value": 1}], ds + assert ds.take_all() == [{"id": 0}, {"id": 1}], ds _check_usage_record(["ReadRange", "MapBatches"]) @@ -223,7 +224,7 @@ def ensure_sample_size_close(dataset, sample_percent=0.5): ds = ray.data.range(10, parallelism=2) ensure_sample_size_close(ds) - ds = ray.data.range_table(10, parallelism=2) + ds = ray.data.range(10, parallelism=2) ensure_sample_size_close(ds) ds = ray.data.range_tensor(5, parallelism=2, shape=(2, 2)) @@ -252,8 +253,8 @@ def test_random_shuffle_e2e( ray_start_regular_shared, enable_optimizer, use_push_based_shuffle ): ds = ray.data.range(12, parallelism=4) - r1 = ds.random_shuffle(seed=0).take_all() - r2 = ds.random_shuffle(seed=1024).take_all() + r1 = extract_values("id", ds.random_shuffle(seed=0).take_all()) + r2 = extract_values("id", ds.random_shuffle(seed=1024).take_all()) assert r1 != r2, (r1, r2) assert sorted(r1) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], r1 assert sorted(r2) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], r2 @@ -286,10 +287,6 @@ def test_repartition_e2e( ): def _check_repartition_usage_and_stats(ds): _check_usage_record(["ReadRange", "Repartition"]) - - blocks = ray.get(ds.get_internal_block_refs()) - assert all(isinstance(block, list) for block in blocks), blocks - ds_stats: DatastreamStats = ds._plan.stats() assert ds_stats.base_name == "Repartition" if shuffle: @@ -426,8 +423,8 @@ def test_read_map_batches_operator_fusion_compute_tasks_to_actors( # the former comes before the latter. planner = Planner() read_op = Read(ParquetDatasource()) - op = MapBatches(read_op, lambda x: x, compute="tasks") - op = MapBatches(op, lambda x: x, compute="actors") + op = MapBatches(read_op, lambda x: x) + op = MapBatches(op, lambda x: x, compute=ray.data.ActorPoolStrategy()) logical_plan = LogicalPlan(op) physical_plan = planner.plan(logical_plan) physical_plan = PhysicalOptimizer().optimize(physical_plan) @@ -446,7 +443,7 @@ def test_read_map_batches_operator_fusion_compute_read_to_actors( # Test that reads fuse into an actor-based map operator. planner = Planner() read_op = Read(ParquetDatasource()) - op = MapBatches(read_op, lambda x: x, compute="actors") + op = MapBatches(read_op, lambda x: x, compute=ray.data.ActorPoolStrategy()) logical_plan = LogicalPlan(op) physical_plan = planner.plan(logical_plan) physical_plan = PhysicalOptimizer().optimize(physical_plan) @@ -465,8 +462,8 @@ def test_read_map_batches_operator_fusion_incompatible_compute( # Test that map operators are not fused when compute strategies are incompatible. planner = Planner() read_op = Read(ParquetDatasource()) - op = MapBatches(read_op, lambda x: x, compute="actors") - op = MapBatches(op, lambda x: x, compute="tasks") + op = MapBatches(read_op, lambda x: x, compute=ray.data.ActorPoolStrategy()) + op = MapBatches(op, lambda x: x) logical_plan = LogicalPlan(op) physical_plan = planner.plan(logical_plan) physical_plan = PhysicalOptimizer().optimize(physical_plan) @@ -518,8 +515,8 @@ class UDF: def __call__(self, x): return x - op = MapBatches(read_op, UDF, compute="actors") - op = MapBatches(op, UDF, compute="actors") + op = MapBatches(read_op, UDF, compute=ray.data.ActorPoolStrategy()) + op = MapBatches(op, UDF, compute=ray.data.ActorPoolStrategy()) logical_plan = LogicalPlan(op) physical_plan = planner.plan(logical_plan) physical_plan = PhysicalOptimizer().optimize(physical_plan) @@ -547,8 +544,8 @@ class UDF2: def __call__(self, x): return x + 1 - op = MapBatches(read_op, UDF, compute="actors") - op = MapBatches(op, UDF2, compute="actors") + op = MapBatches(read_op, UDF, compute=ray.data.ActorPoolStrategy()) + op = MapBatches(op, UDF2, compute=ray.data.ActorPoolStrategy()) logical_plan = LogicalPlan(op) physical_plan = planner.plan(logical_plan) physical_plan = PhysicalOptimizer().optimize(physical_plan) @@ -579,10 +576,18 @@ def __init__(self, a): def __call__(self, x): return x + self._a - op = MapBatches(read_op, UDF, compute="actors", fn_constructor_args=(1,)) - op = MapBatches(op, UDF, compute="actors", fn_constructor_args=(2,)) - op = MapBatches(op, UDF, compute="actors", fn_constructor_kwargs={"a": 1}) - op = MapBatches(op, UDF, compute="actors", fn_constructor_kwargs={"a": 2}) + op = MapBatches( + read_op, UDF, compute=ray.data.ActorPoolStrategy(), fn_constructor_args=(1,) + ) + op = MapBatches( + op, UDF, compute=ray.data.ActorPoolStrategy(), fn_constructor_args=(2,) + ) + op = MapBatches( + op, UDF, compute=ray.data.ActorPoolStrategy(), fn_constructor_kwargs={"a": 1} + ) + op = MapBatches( + op, UDF, compute=ray.data.ActorPoolStrategy(), fn_constructor_kwargs={"a": 2} + ) logical_plan = LogicalPlan(op) physical_plan = planner.plan(logical_plan) physical_plan = PhysicalOptimizer().optimize(physical_plan) @@ -604,11 +609,24 @@ def __call__(self, x): def test_read_map_chain_operator_fusion_e2e(ray_start_regular_shared, enable_optimizer): ds = ray.data.range(10, parallelism=2) - ds = ds.filter(lambda x: x % 2 == 0) - ds = ds.map(lambda x: x + 1) - ds = ds.map_batches(lambda batch: [2 * x for x in batch], batch_size=None) - ds = ds.flat_map(lambda x: [-x, x]) - assert ds.take_all() == [-2, 2, -6, 6, -10, 10, -14, 14, -18, 18] + ds = ds.filter(lambda x: x["id"] % 2 == 0) + ds = ds.map(column_udf("id", lambda x: x + 1)) + ds = ds.map_batches( + lambda batch: {"id": [2 * x for x in batch["id"]]}, batch_size=None + ) + ds = ds.flat_map(lambda x: [{"id": -x["id"]}, {"id": x["id"]}]) + assert extract_values("id", ds.take_all()) == [ + -2, + 2, + -6, + 6, + -10, + 10, + -14, + 14, + -18, + 18, + ] name = "DoRead->Filter->MapRows->MapBatches->FlatMap:" assert name in ds.stats() _check_usage_record(["ReadRange", "Filter", "MapRows", "MapBatches", "FlatMap"]) @@ -660,8 +678,8 @@ def test_sort_e2e( ): ds = ray.data.range(100, parallelism=4) ds = ds.random_shuffle() - ds = ds.sort() - assert ds.take_all() == list(range(100)) + ds = ds.sort("id") + assert extract_values("id", ds.take_all()) == list(range(100)) _check_usage_record(["ReadRange", "RandomShuffle", "Sort"]) # TODO: write_XXX and from_XXX are not supported yet in new execution plan. @@ -688,13 +706,11 @@ def test_sort_validate_keys( enable_optimizer, ): ds = ray.data.range(10) - assert ds.sort().take_all() == list(range(10)) + assert extract_values("id", ds.sort("id").take_all()) == list(range(10)) invalid_col_name = "invalid_column" with pytest.raises( - ValueError, - match=f"String key '{invalid_col_name}' requires datastream format to be " - "'arrow' or 'pandas', was 'simple'", + ValueError, match=f"The column '{invalid_col_name}' does not exist" ): ds.sort(invalid_col_name).take_all() @@ -719,16 +735,6 @@ def test_sort_validate_keys( ): ds_named.sort(invalid_col_name).take_all() - def dummy_sort_fn(x): - return x - - with pytest.raises( - ValueError, - match=f"Callable key '{dummy_sort_fn}' requires datastream format to be " - "'simple'", - ): - ds_named.sort(dummy_sort_fn).take_all() - def test_aggregate_operator(ray_start_regular_shared, enable_optimizer): planner = Planner() @@ -752,11 +758,11 @@ def test_aggregate_e2e( enable_optimizer, use_push_based_shuffle, ): - ds = ray.data.range_table(100, parallelism=4) - ds = ds.groupby("value").count() + ds = ray.data.range(100, parallelism=4) + ds = ds.groupby("id").count() assert ds.count() == 100 - for idx, row in enumerate(ds.sort("value").iter_rows()): - assert row.as_pydict() == {"value": idx, "count()": 1} + for idx, row in enumerate(ds.sort("id").iter_rows()): + assert row == {"id": idx, "count()": 1} _check_usage_record(["ReadRange", "Aggregate"]) @@ -765,14 +771,9 @@ def test_aggregate_validate_keys( enable_optimizer, ): ds = ray.data.range(10) - # Test case with key=None, i.e. grouped into a single group. - assert ds.groupby(key=None).count().take_all() == [(10,)] - invalid_col_name = "invalid_column" with pytest.raises( - ValueError, - match=f"String key '{invalid_col_name}' requires datastream format to be " - "'arrow' or 'pandas', was 'simple'", + ValueError, match=f"The column '{invalid_col_name}' does not exist" ): ds.groupby(invalid_col_name).count() @@ -804,16 +805,6 @@ def test_aggregate_validate_keys( ): ds_named.groupby(invalid_col_name).count() - def dummy_sort_fn(x): - return x - - with pytest.raises( - ValueError, - match=f"Callable key '{dummy_sort_fn}' requires datastream format to be " - "'simple'", - ): - ds_named.groupby(dummy_sort_fn).count() - def test_zip_operator(ray_start_regular_shared, enable_optimizer): planner = Planner() @@ -837,9 +828,11 @@ def test_zip_operator(ray_start_regular_shared, enable_optimizer): def test_zip_e2e(ray_start_regular_shared, enable_optimizer, num_blocks1, num_blocks2): n = 12 ds1 = ray.data.range(n, parallelism=num_blocks1) - ds2 = ray.data.range(n, parallelism=num_blocks2).map(lambda x: x + 1) + ds2 = ray.data.range(n, parallelism=num_blocks2).map( + column_udf("id", lambda x: x + 1) + ) ds = ds1.zip(ds2) - assert ds.take() == list(zip(range(n), range(1, n + 1))) + assert ds.take() == named_values(["id", "id_1"], zip(range(n), range(1, n + 1))) _check_usage_record(["ReadRange", "Zip"]) @@ -1018,7 +1011,7 @@ def test_from_numpy_refs_e2e(ray_start_regular_shared, enable_optimizer): arr2 = np.expand_dims(np.arange(4, 8), axis=1) ds = ray.data.from_numpy_refs([ray.put(arr1), ray.put(arr2)]) - values = np.stack(ds.take(8)) + values = np.stack(extract_values("data", ds.take(8))) np.testing.assert_array_equal(values, np.concatenate((arr1, arr2))) # Check that conversion task is included in stats. assert "FromNumpyRefs" in ds.stats() @@ -1027,7 +1020,7 @@ def test_from_numpy_refs_e2e(ray_start_regular_shared, enable_optimizer): # Test chaining multiple operations ds2 = ds.map_batches(lambda x: x) - values = np.stack(ds2.take(8)) + values = np.stack(extract_values("data", ds2.take(8))) np.testing.assert_array_equal(values, np.concatenate((arr1, arr2))) assert "MapBatches" in ds2.stats() assert "FromNumpyRefs" in ds2.stats() @@ -1036,7 +1029,7 @@ def test_from_numpy_refs_e2e(ray_start_regular_shared, enable_optimizer): # Test from single NumPy ndarray. ds = ray.data.from_numpy_refs(ray.put(arr1)) - values = np.stack(ds.take(4)) + values = np.stack(extract_values("data", ds.take(4))) np.testing.assert_array_equal(values, arr1) # Check that conversion task is included in stats. assert "FromNumpyRefs" in ds.stats() @@ -1172,7 +1165,7 @@ def test_from_tf_e2e(ray_start_regular_shared, enable_optimizer): ray_dataset = ray.data.from_tf(tf_dataset) - actual_data = ray_dataset.take_all() + actual_data = extract_values("item", ray_dataset.take_all()) expected_data = list(tf_dataset) assert len(actual_data) == len(expected_data) for (expected_features, expected_label), (actual_features, actual_label) in zip( @@ -1212,7 +1205,7 @@ def test_from_torch_e2e(ray_start_regular_shared, enable_optimizer, tmp_path): expected_data = list(torch_dataset) actual_data = list(ray_dataset.take_all()) - assert actual_data == expected_data + assert extract_values("item", actual_data) == expected_data # Check that metadata fetch is included in stats. assert "FromItems" in ray_dataset.stats() @@ -1242,7 +1235,7 @@ def test_execute_to_legacy_block_list( assert ds._plan._snapshot_stats is None for i, row in enumerate(ds.iter_rows()): - assert row == i + assert row["id"] == i assert ds._plan._snapshot_stats is not None assert "DoRead" in ds._plan._snapshot_stats.stages @@ -1271,12 +1264,13 @@ def test_streaming_executor( ): ds = ray.data.range(100, parallelism=4) ds = ds.map_batches(lambda x: x) - ds = ds.filter(lambda x: x > 0) + ds = ds.filter(lambda x: x["id"] > 0) ds = ds.random_shuffle() ds = ds.map_batches(lambda x: x) result = [] for batch in ds.iter_batches(batch_size=3): + batch = batch["id"] assert len(batch) == 3, batch result.extend(batch) assert sorted(result) == list(range(1, 100)), result diff --git a/python/ray/data/tests/test_executor_resource_management.py b/python/ray/data/tests/test_executor_resource_management.py index 4a26cb58e084..d00b28e5b5c3 100644 --- a/python/ray/data/tests/test_executor_resource_management.py +++ b/python/ray/data/tests/test_executor_resource_management.py @@ -13,7 +13,7 @@ from ray.data.tests.conftest import * # noqa -SMALL_STR = "hello" * 12 +SMALL_STR = "hello" * 120 def test_resource_utils(ray_start_10_cpus_shared): @@ -97,7 +97,7 @@ def test_task_pool_resource_reporting(ray_start_10_cpus_shared): usage = op.current_resource_usage() assert usage.cpu == 2, usage assert usage.gpu == 0, usage - assert usage.object_store_memory == pytest.approx(128, rel=0.5), usage + assert usage.object_store_memory == pytest.approx(1280, rel=0.5), usage def test_task_pool_resource_reporting_with_bundling(ray_start_10_cpus_shared): @@ -119,20 +119,20 @@ def test_task_pool_resource_reporting_with_bundling(ray_start_10_cpus_shared): assert usage.cpu == 0, usage assert usage.gpu == 0, usage # Queued bundles (in bundler) still count against object storage usage. - assert usage.object_store_memory == pytest.approx(80, rel=0.5), usage + assert usage.object_store_memory == pytest.approx(800, rel=0.5), usage op.add_input(input_op.get_next(), 0) usage = op.current_resource_usage() # No tasks submitted yet due to bundling. assert usage.cpu == 0, usage assert usage.gpu == 0, usage # Queued bundles (in bundler) still count against object storage usage. - assert usage.object_store_memory == pytest.approx(160, rel=0.5), usage + assert usage.object_store_memory == pytest.approx(1600, rel=0.5), usage op.add_input(input_op.get_next(), 0) usage = op.current_resource_usage() # Task has now been submitted since we've met the minimum bundle size. assert usage.cpu == 1, usage assert usage.gpu == 0, usage - assert usage.object_store_memory == pytest.approx(240, rel=0.5), usage + assert usage.object_store_memory == pytest.approx(2400, rel=0.5), usage def test_actor_pool_resource_reporting(ray_start_10_cpus_shared): @@ -163,13 +163,13 @@ def test_actor_pool_resource_reporting(ray_start_10_cpus_shared): assert usage.cpu == 2, usage assert usage.gpu == 0, usage # Queued bundles still count against object store usage. - assert usage.object_store_memory == pytest.approx((i + 1) * 80, rel=0.5), usage + assert usage.object_store_memory == pytest.approx((i + 1) * 800, rel=0.5), usage # Pool is still idle while waiting for actors to start. usage = op.current_resource_usage() assert usage.cpu == 2, usage assert usage.gpu == 0, usage # Queued bundles still count against object store usage. - assert usage.object_store_memory == pytest.approx(320, rel=0.5), usage + assert usage.object_store_memory == pytest.approx(3200, rel=0.5), usage # Wait for actors to start. work_refs = op.get_work_refs() @@ -189,7 +189,7 @@ def test_actor_pool_resource_reporting(ray_start_10_cpus_shared): assert usage.cpu == 2, usage assert usage.gpu == 0, usage # Now that tasks have been submitted, object store memory is accounted for. - assert usage.object_store_memory == pytest.approx(256, rel=0.5), usage + assert usage.object_store_memory == pytest.approx(2560, rel=0.5), usage # Indicate that no more inputs will arrive. op.inputs_done() @@ -206,7 +206,7 @@ def test_actor_pool_resource_reporting(ray_start_10_cpus_shared): usage = op.current_resource_usage() assert usage.cpu == 0, usage assert usage.gpu == 0, usage - assert usage.object_store_memory == pytest.approx(550, rel=0.5), usage + assert usage.object_store_memory == pytest.approx(5500, rel=0.5), usage # Consume task outputs. while op.has_next(): @@ -248,13 +248,13 @@ def test_actor_pool_resource_reporting_with_bundling(ray_start_10_cpus_shared): assert usage.cpu == 2, usage assert usage.gpu == 0, usage # Queued bundles still count against object store usage. - assert usage.object_store_memory == pytest.approx((i + 1) * 80, rel=0.5), usage + assert usage.object_store_memory == pytest.approx((i + 1) * 800, rel=0.5), usage # Pool is still idle while waiting for actors to start. usage = op.current_resource_usage() assert usage.cpu == 2, usage assert usage.gpu == 0, usage # Queued bundles still count against object store usage. - assert usage.object_store_memory == pytest.approx(320, rel=0.5), usage + assert usage.object_store_memory == pytest.approx(3200, rel=0.5), usage # Wait for actors to start. work_refs = op.get_work_refs() @@ -273,7 +273,7 @@ def test_actor_pool_resource_reporting_with_bundling(ray_start_10_cpus_shared): usage = op.current_resource_usage() assert usage.cpu == 2, usage assert usage.gpu == 0, usage - assert usage.object_store_memory == pytest.approx(320, rel=0.5), usage + assert usage.object_store_memory == pytest.approx(3200, rel=0.5), usage # Indicate that no more inputs will arrive. op.inputs_done() @@ -290,7 +290,7 @@ def test_actor_pool_resource_reporting_with_bundling(ray_start_10_cpus_shared): usage = op.current_resource_usage() assert usage.cpu == 0, usage assert usage.gpu == 0, usage - assert usage.object_store_memory == pytest.approx(550, rel=0.5), usage + assert usage.object_store_memory == pytest.approx(5500, rel=0.5), usage # Consume task outputs. while op.has_next(): diff --git a/python/ray/data/tests/test_formats.py b/python/ray/data/tests/test_formats.py index 6f48658a041c..73e89fe84668 100644 --- a/python/ray/data/tests/test_formats.py +++ b/python/ray/data/tests/test_formats.py @@ -1,5 +1,5 @@ import os -from typing import List, Union +from typing import List import pandas as pd import pyarrow as pa @@ -13,7 +13,6 @@ import ray from ray._private.test_utils import wait_for_condition -from ray.data._internal.arrow_block import ArrowRow from ray.data._internal.execution.interfaces import TaskContext from ray.data.block import Block, BlockAccessor from ray.data.datasource import ( @@ -24,6 +23,7 @@ from ray.data.tests.conftest import * # noqa from ray.data.tests.mock_http_server import * # noqa +from ray.data.tests.util import extract_values from ray.tests.conftest import * # noqa from ray.types import ObjectRef from typing import Iterable @@ -82,17 +82,7 @@ def test_from_arrow_refs(ray_start_regular_shared): def test_to_arrow_refs(ray_start_regular_shared): n = 5 - - # Zero-copy. - df = pd.DataFrame({"value": list(range(n))}) - ds = ray.data.range_table(n) - dfds = pd.concat( - [t.to_pandas() for t in ray.get(ds.to_arrow_refs())], ignore_index=True - ) - assert df.equals(dfds) - - # Conversion. - df = pd.DataFrame({"value": list(range(n))}) + df = pd.DataFrame({"id": list(range(n))}) ds = ray.data.range(n) dfds = pd.concat( [t.to_pandas() for t in ray.get(ds.to_arrow_refs())], ignore_index=True @@ -105,7 +95,7 @@ def test_get_internal_block_refs(ray_start_regular_shared): assert len(blocks) == 10 out = [] for b in ray.get(blocks): - out.extend(list(BlockAccessor.for_block(b).iter_rows())) + out.extend(extract_values("id", BlockAccessor.for_block(b).iter_rows(True))) out = sorted(out) assert out == list(range(10)), out @@ -203,7 +193,7 @@ def test_from_tf(ray_start_regular_shared): ray_dataset = ray.data.from_tf(tf_dataset) - actual_data = ray_dataset.take_all() + actual_data = extract_values("item", ray_dataset.take_all()) expected_data = list(tf_dataset) assert len(actual_data) == len(expected_data) for (expected_features, expected_label), (actual_features, actual_label) in zip( @@ -219,11 +209,11 @@ def test_from_torch(shutdown_only, tmp_path): ray_dataset = ray.data.from_torch(torch_dataset) - actual_data = list(ray_dataset.take_all()) + actual_data = extract_values("item", list(ray_dataset.take_all())) assert actual_data == expected_data -class NodeLoggerOutputDatasource(Datasource[Union[ArrowRow, int]]): +class NodeLoggerOutputDatasource(Datasource): """A writable datasource that logs node IDs of write tasks, for testing.""" def __init__(self): diff --git a/python/ray/data/tests/test_image.py b/python/ray/data/tests/test_image.py index 4b0c9e265f82..17a3f911de7f 100644 --- a/python/ray/data/tests/test_image.py +++ b/python/ray/data/tests/test_image.py @@ -162,7 +162,7 @@ def test_e2e_prediction(self, shutdown_only): transform = transforms.ToTensor() def preprocess(batch: Dict[str, np.ndarray]): - return np.stack([transform(image) for image in batch["image"]]) + return {"out": np.stack([transform(image) for image in batch["image"]])} dataset = dataset.map_batches(preprocess, batch_format="numpy") diff --git a/python/ray/data/tests/test_iterator.py b/python/ray/data/tests/test_iterator.py index e6b57154c0fd..0f7f9e02edc2 100644 --- a/python/ray/data/tests/test_iterator.py +++ b/python/ray/data/tests/test_iterator.py @@ -29,7 +29,8 @@ def test_basic_dataset(ray_start_regular_shared): for _ in range(2): result = [] for batch in it.iter_batches(): - result += batch + batch = batch["id"] + result += batch.tolist() assert result == list(range(100)) # TODO(swang): This check currently fails nondeterministically because @@ -44,6 +45,7 @@ def test_basic_dataset_iter_rows(ray_start_regular_shared): for _ in range(2): result = [] for row in it.iter_rows(): + row = row["id"] result.append(row) assert result == list(range(100)) @@ -59,6 +61,7 @@ def test_basic_dataset_pipeline(ray_start_regular_shared): for _ in range(2): result = [] for batch in it.iter_batches(): + batch = batch["id"].tolist() result += batch assert result == list(range(100)) @@ -71,6 +74,7 @@ def test_basic_dataset_pipeline_iter_rows(ray_start_regular_shared): for _ in range(2): result = [] for row in it.iter_rows(): + row = row["id"] result.append(row) assert result == list(range(100)) @@ -78,9 +82,9 @@ def test_basic_dataset_pipeline_iter_rows(ray_start_regular_shared): def test_tf_conversion(ray_start_regular_shared): - ds = ray.data.range_table(5) + ds = ray.data.range(5) it = ds.iterator() - tf_dataset = it.to_tf("value", "value") + tf_dataset = it.to_tf("id", "id") for i, row in enumerate(tf_dataset): assert all(row[0] == i) assert all(row[1] == i) @@ -89,30 +93,30 @@ def test_tf_conversion(ray_start_regular_shared): def test_tf_e2e(ray_start_regular_shared): - ds = ray.data.range_table(5) + ds = ray.data.range(5) it = ds.iterator() model = build_model() - model.fit(it.to_tf("value", "value"), epochs=3) + model.fit(it.to_tf("id", "id"), epochs=3) def test_tf_e2e_pipeline(ray_start_regular_shared): - ds = ray.data.range_table(5).repeat(2) + ds = ray.data.range(5).repeat(2) it = ds.iterator() model = build_model() - model.fit(it.to_tf("value", "value"), epochs=2) + model.fit(it.to_tf("id", "id"), epochs=2) - ds = ray.data.range_table(5).repeat(2) + ds = ray.data.range(5).repeat(2) it = ds.iterator() model = build_model() # 3 epochs fails since we only repeated twice. with pytest.raises(Exception, match=r"generator raised StopIteration"): - model.fit(it.to_tf("value", "value"), epochs=3) + model.fit(it.to_tf("id", "id"), epochs=3) def test_tf_conversion_pipeline(ray_start_regular_shared): - ds = ray.data.range_table(5).repeat(2) + ds = ray.data.range(5).repeat(2) it = ds.iterator() - tf_dataset = it.to_tf("value", "value") + tf_dataset = it.to_tf("id", "id") for i, row in enumerate(tf_dataset): assert all(row[0] == i) assert all(row[1] == i) @@ -120,7 +124,7 @@ def test_tf_conversion_pipeline(ray_start_regular_shared): assert isinstance(row[1], tf.Tensor) # Repeated twice. - tf_dataset = it.to_tf("value", "value") + tf_dataset = it.to_tf("id", "id") for i, row in enumerate(tf_dataset): assert all(row[0] == i) assert all(row[1] == i) @@ -129,32 +133,32 @@ def test_tf_conversion_pipeline(ray_start_regular_shared): # Fails on third try. with pytest.raises(Exception, match=r"generator raised StopIteration"): - tf_dataset = it.to_tf("value", "value") + tf_dataset = it.to_tf("id", "id") for _ in tf_dataset: pass def test_torch_conversion(ray_start_regular_shared): - ds = ray.data.range_table(5) + ds = ray.data.range(5) it = ds.iterator() for batch in it.iter_torch_batches(): - assert isinstance(batch["value"], torch.Tensor) - assert batch["value"].tolist() == list(range(5)) + assert isinstance(batch["id"], torch.Tensor) + assert batch["id"].tolist() == list(range(5)) def test_torch_conversion_pipeline(ray_start_regular_shared): - ds = ray.data.range_table(5).repeat(2) + ds = ray.data.range(5).repeat(2) it = ds.iterator() # First epoch. for batch in it.iter_torch_batches(): - assert isinstance(batch["value"], torch.Tensor) - assert batch["value"].tolist() == list(range(5)) + assert isinstance(batch["id"], torch.Tensor) + assert batch["id"].tolist() == list(range(5)) # Second epoch. for batch in it.iter_torch_batches(): - assert isinstance(batch["value"], torch.Tensor) - assert batch["value"].tolist() == list(range(5)) + assert isinstance(batch["id"], torch.Tensor) + assert batch["id"].tolist() == list(range(5)) # Fails on third iteration. with pytest.raises(Exception, match=r"generator raised StopIteration"): @@ -164,9 +168,9 @@ def test_torch_conversion_pipeline(ray_start_regular_shared): def test_torch_conversion_collate_fn(ray_start_regular_shared): def collate_fn(batch: Dict[str, np.ndarray]): - return torch.as_tensor(batch["value"] + 5) + return torch.as_tensor(batch["id"] + 5) - ds = ray.data.range_table(5) + ds = ray.data.range(5) it = ds.iterator() for batch in it.iter_torch_batches(collate_fn=collate_fn): assert isinstance(batch, torch.Tensor) diff --git a/python/ray/data/tests/test_map.py b/python/ray/data/tests/test_map.py index c71b4d973cd1..4a7c2ca9cc0e 100644 --- a/python/ray/data/tests/test_map.py +++ b/python/ray/data/tests/test_map.py @@ -6,6 +6,7 @@ import time from typing import Iterator +import numpy as np import pandas as pd import pyarrow as pa import pyarrow.parquet as pq @@ -16,6 +17,7 @@ from ray.data.block import BlockAccessor from ray.data.context import DataContext from ray.data.tests.conftest import * # noqa +from ray.data.tests.util import extract_values, column_udf from ray.tests.conftest import * # noqa @@ -32,44 +34,60 @@ def test_basic_actors(shutdown_only, pipelined): n = 5 ds = ray.data.range(n) ds = maybe_pipeline(ds, pipelined) - assert sorted(ds.map(lambda x: x + 1, compute="actors").take()) == list( - range(1, n + 1) - ) + assert sorted( + extract_values( + "id", + ds.map( + column_udf("id", lambda x: x + 1), compute=ray.data.ActorPoolStrategy() + ).take(), + ) + ) == list(range(1, n + 1)) # Should still work even if num actors > num cpus. ds = ray.data.range(n) ds = maybe_pipeline(ds, pipelined) assert sorted( - ds.map(lambda x: x + 1, compute=ray.data.ActorPoolStrategy(size=4)).take() + extract_values( + "id", + ds.map( + column_udf("id", lambda x: x + 1), + compute=ray.data.ActorPoolStrategy(size=4), + ).take(), + ) ) == list(range(1, n + 1)) # Test setting custom max inflight tasks. ds = ray.data.range(10, parallelism=5) ds = maybe_pipeline(ds, pipelined) assert sorted( - ds.map( - lambda x: x + 1, - compute=ray.data.ActorPoolStrategy(max_tasks_in_flight_per_actor=3), - ).take() + extract_values( + "id", + ds.map( + column_udf("id", lambda x: x + 1), + compute=ray.data.ActorPoolStrategy(max_tasks_in_flight_per_actor=3), + ).take(), + ) ) == list(range(1, 11)) # Test invalid max tasks inflight arg. with pytest.raises(ValueError): ray.data.range(10).map( - lambda x: x, + column_udf("id", lambda x: x), compute=ray.data.ActorPoolStrategy(max_tasks_in_flight_per_actor=0), ) # Test min no more than max check. with pytest.raises(ValueError): ray.data.range(10).map( - lambda x: x, compute=ray.data.ActorPoolStrategy(min_size=8, max_size=4) + column_udf("id", lambda x: x), + compute=ray.data.ActorPoolStrategy(min_size=8, max_size=4), ) # Test conflicting args. with pytest.raises(ValueError): ray.data.range(10).map( - lambda x: x, compute=ray.data.ActorPoolStrategy(min_size=8, size=4) + column_udf("id", lambda x: x), + compute=ray.data.ActorPoolStrategy(min_size=8, size=4), ) @@ -84,7 +102,7 @@ def __init__(self): def __call__(self, x): r = self.num_reuses self.num_reuses += 1 - return r + return {"id": np.array([r])} # Need to specify compute explicitly. with pytest.raises(ValueError): @@ -92,7 +110,7 @@ def __call__(self, x): # Need to specify actor compute strategy. with pytest.raises(ValueError): - ds.map(StatefulFn, compute="tasks").take() + ds.map(StatefulFn).take() # Need to specify compute explicitly. with pytest.raises(ValueError): @@ -100,7 +118,7 @@ def __call__(self, x): # Need to specify actor compute strategy. with pytest.raises(ValueError): - ds.flat_map(StatefulFn, compute="tasks") + ds.flat_map(StatefulFn) # Need to specify compute explicitly. with pytest.raises(ValueError): @@ -108,11 +126,11 @@ def __call__(self, x): # Need to specify actor compute strategy. with pytest.raises(ValueError): - ds.filter(StatefulFn, compute="tasks") + ds.filter(StatefulFn) # map - actor_reuse = ds.map(StatefulFn, compute="actors").take() - assert sorted(actor_reuse) == list(range(10)), actor_reuse + actor_reuse = ds.map(StatefulFn, compute=ray.data.ActorPoolStrategy()).take() + assert sorted(extract_values("id", actor_reuse)) == list(range(10)), actor_reuse class StatefulFn: def __init__(self): @@ -121,14 +139,30 @@ def __init__(self): def __call__(self, x): r = self.num_reuses self.num_reuses += 1 - return [r] + return [{"id": r}] # flat map - actor_reuse = ds.flat_map(StatefulFn, compute="actors").take() + actor_reuse = extract_values( + "id", ds.flat_map(StatefulFn, compute=ray.data.ActorPoolStrategy()).take() + ) assert sorted(actor_reuse) == list(range(10)), actor_reuse + class StatefulFn: + def __init__(self): + self.num_reuses = 0 + + def __call__(self, x): + r = self.num_reuses + self.num_reuses += 1 + return {"id": np.array([r])} + # map batches - actor_reuse = ds.map_batches(StatefulFn, batch_size=1, compute="actors").take() + actor_reuse = extract_values( + "id", + ds.map_batches( + StatefulFn, batch_size=1, compute=ray.data.ActorPoolStrategy() + ).take(), + ) assert sorted(actor_reuse) == list(range(10)), actor_reuse class StatefulFn: @@ -141,7 +175,7 @@ def __call__(self, x): return r > 0 # filter - actor_reuse = ds.filter(StatefulFn, compute="actors").take() + actor_reuse = ds.filter(StatefulFn, compute=ray.data.ActorPoolStrategy()).take() assert len(actor_reuse) == 9, actor_reuse @@ -154,11 +188,14 @@ class StatefulFn: def __call__(self, x): thread_id = threading.get_ident() assert threading.current_thread() is not threading.main_thread() - return [thread_id] + return {"tid": np.array([thread_id])} - thread_ids = ds.map_batches( - StatefulFn, compute="actors", max_concurrency=2 - ).take_all() + thread_ids = extract_values( + "tid", + ds.map_batches( + StatefulFn, compute=ray.data.ActorPoolStrategy(), max_concurrency=2 + ).take_all(), + ) # Make sure user's UDF is not running concurrently. assert len(set(thread_ids)) == 1 @@ -167,7 +204,9 @@ def __call__(self, x): raise ValueError with pytest.raises(ValueError): - ds.map_batches(ErrorFn, compute="actors", max_concurrency=2).take_all() + ds.map_batches( + ErrorFn, compute=ray.data.ActorPoolStrategy(), max_concurrency=2 + ).take_all() def test_transform_failure(shutdown_only): @@ -186,25 +225,32 @@ def mapper(x): def test_flat_map_generator(ray_start_regular_shared): ds = ray.data.range(3) - def map_generator(item: int) -> Iterator[int]: + def map_generator(item: dict) -> Iterator[int]: for _ in range(2): - yield item + 1 + yield {"id": item["id"] + 1} - assert sorted(ds.flat_map(map_generator).take()) == [1, 1, 2, 2, 3, 3] + assert sorted(extract_values("id", ds.flat_map(map_generator).take())) == [ + 1, + 1, + 2, + 2, + 3, + 3, + ] def test_add_column(ray_start_regular_shared): ds = ray.data.range(5).add_column("foo", lambda x: 1) - assert ds.take(1) == [{"value": 0, "foo": 1}] + assert ds.take(1) == [{"id": 0, "foo": 1}] - ds = ray.data.range_table(5).add_column("foo", lambda x: x["value"] + 1) - assert ds.take(1) == [{"value": 0, "foo": 1}] + ds = ray.data.range(5).add_column("foo", lambda x: x["id"] + 1) + assert ds.take(1) == [{"id": 0, "foo": 1}] - ds = ray.data.range_table(5).add_column("value", lambda x: x["value"] + 1) - assert ds.take(2) == [{"value": 1}, {"value": 2}] + ds = ray.data.range(5).add_column("id", lambda x: x["id"] + 1) + assert ds.take(2) == [{"id": 1}, {"id": 2}] with pytest.raises(ValueError): - ds = ray.data.range(5).add_column("value", 0) + ds = ray.data.range(5).add_column("id", 0) def test_drop_columns(ray_start_regular_shared, tmp_path): @@ -254,11 +300,6 @@ def test_select_columns(ray_start_regular_shared): with pytest.raises(KeyError): each_ds.select_columns(cols=["col1", "col2", "dummy_col"]).materialize() - # Test simple - ds3 = ray.data.range(10) - with pytest.raises(ValueError): - ds3.select_columns(cols=[]).materialize() - def test_map_batches_basic(ray_start_regular_shared, tmp_path, restore_data_context): ctx = DataContext.get_current() @@ -267,7 +308,9 @@ def test_map_batches_basic(ray_start_regular_shared, tmp_path, restore_data_cont # Test input validation ds = ray.data.range(5) with pytest.raises(ValueError): - ds.map_batches(lambda x: x + 1, batch_format="pyarrow", batch_size=-1).take() + ds.map_batches( + column_udf("id", lambda x: x + 1), batch_format="pyarrow", batch_size=-1 + ).take() # Set up. df = pd.DataFrame({"one": [1, 2, 3], "two": [2, 3, 4]}) @@ -301,21 +344,23 @@ def test_map_batches_basic(ray_start_regular_shared, tmp_path, restore_data_cont # The pandas column is "value", and it originally has rows from 0~299. # After the map batch, it should have 1~300. row = ds_list[i] - assert row["value"] == i + 1 + assert row["id"] == i + 1 assert ds.count() == 300 # Test the lambda returns different types than the batch_format # pandas => list block ds = ray.data.read_parquet(str(tmp_path)) - ds2 = ds.map_batches(lambda df: [1], batch_size=1) - ds_list = ds2.take() + ds2 = ds.map_batches(lambda df: {"id": np.array([1])}, batch_size=1) + ds_list = extract_values("id", ds2.take()) assert ds_list == [1, 1, 1] assert ds.count() == 3 # pyarrow => list block ds = ray.data.read_parquet(str(tmp_path)) - ds2 = ds.map_batches(lambda df: [1], batch_size=1, batch_format="pyarrow") - ds_list = ds2.take() + ds2 = ds.map_batches( + lambda df: {"id": np.array([1])}, batch_size=1, batch_format="pyarrow" + ) + ds_list = extract_values("id", ds2.take()) assert ds_list == [1, 1, 1] assert ds.count() == 3 @@ -351,14 +396,13 @@ def __call__(self, df): with pytest.raises(ValueError): # CallableClass not supported for task compute strategy. - ds.map_batches(Foo, compute="tasks") + ds.map_batches(Foo) with pytest.raises(ValueError): # fn_constructor_args and fn_constructor_kwargs only supported for actor # compute strategy. ds.map_batches( lambda x: x, - compute="tasks", fn_constructor_args=(1,), fn_constructor_kwargs={"a": 1}, ) @@ -368,7 +412,7 @@ def __call__(self, df): # class UDFs. ds.map_batches( lambda x: x, - compute="actors", + compute=ray.data.ActorPoolStrategy(), fn_constructor_args=(1,), fn_constructor_kwargs={"a": 1}, ) @@ -450,7 +494,7 @@ def __call__(self, x): CallableFn, batch_size=1, batch_format="pandas", - compute="actors", + compute=ray.data.ActorPoolStrategy(), fn_constructor_args=(put(1),), ) ds_list = ds2.take() @@ -473,7 +517,7 @@ def __call__(self, x): CallableFn, batch_size=1, batch_format="pandas", - compute="actors", + compute=ray.data.ActorPoolStrategy(), fn_constructor_kwargs={"b": put(2)}, ) ds_list = ds2.take() @@ -498,7 +542,7 @@ def __call__(self, x): CallableFn, batch_size=1, batch_format="pandas", - compute="actors", + compute=ray.data.ActorPoolStrategy(), fn_constructor_args=(put(1),), fn_constructor_kwargs={"b": put(2)}, ) @@ -518,7 +562,7 @@ def __call__(self, x): CallableFn, batch_size=1, batch_format="pandas", - compute="actors", + compute=ray.data.ActorPoolStrategy(), fn_constructor_args=fn_constructor_args, fn_constructor_kwargs=fn_constructor_kwargs, ) @@ -526,7 +570,7 @@ def __call__(self, x): CallableFn, batch_size=1, batch_format="pandas", - compute="actors", + compute=ray.data.ActorPoolStrategy(), fn_constructor_args=fn_constructor_args, fn_constructor_kwargs=fn_constructor_kwargs, ) @@ -547,7 +591,7 @@ def __call__(self, x): lambda df, a, b=None: b * df + a, batch_size=1, batch_format="pandas", - compute="actors", + compute=ray.data.ActorPoolStrategy(), fn_args=(put(1),), fn_kwargs={"b": put(2)}, ) @@ -555,7 +599,7 @@ def __call__(self, x): CallableFn, batch_size=1, batch_format="pandas", - compute="actors", + compute=ray.data.ActorPoolStrategy(), fn_constructor_args=fn_constructor_args, fn_constructor_kwargs=fn_constructor_kwargs, ) @@ -602,7 +646,9 @@ def test_map_batches_actors_preserves_order(shutdown_only): ray.init(num_cpus=2) # Test that actor compute model preserves block order. ds = ray.data.range(10, parallelism=5) - assert ds.map_batches(lambda x: x, compute="actors").take() == list(range(10)) + assert extract_values( + "id", ds.map_batches(lambda x: x, compute=ray.data.ActorPoolStrategy()).take() + ) == list(range(10)) @pytest.mark.parametrize( @@ -622,16 +668,16 @@ def test_map_batches_batch_mutation( # Test that batch mutation works without encountering a read-only error (e.g. if the # batch is a zero-copy view on data in the object store). def mutate(df): - df["value"] += 1 + df["id"] += 1 return df - ds = ray.data.range_table(num_rows, parallelism=num_blocks).repartition(num_blocks) + ds = ray.data.range(num_rows, parallelism=num_blocks).repartition(num_blocks) # Convert to Pandas blocks. ds = ds.map_batches(lambda df: df, batch_format="pandas", batch_size=None) # Apply UDF that mutates the batches. ds = ds.map_batches(mutate, batch_size=batch_size) - assert [row["value"] for row in ds.iter_rows()] == list(range(1, num_rows + 1)) + assert [row["id"] for row in ds.iter_rows()] == list(range(1, num_rows + 1)) @pytest.mark.parametrize( @@ -649,10 +695,10 @@ def test_map_batches_batch_zero_copy( def mutate(df): # Check that batch is read-only. assert not df.values.flags.writeable - df["value"] += 1 + df["id"] += 1 return df - ds = ray.data.range_table(num_rows, parallelism=num_blocks).repartition(num_blocks) + ds = ray.data.range(num_rows, parallelism=num_blocks).repartition(num_blocks) # Convert to Pandas blocks. ds = ds.map_batches(lambda df: df, batch_format="pandas", batch_size=None) ds = ds.materialize() @@ -660,7 +706,9 @@ def mutate(df): # Apply UDF that mutates the batches, which should fail since the batch is # read-only. with pytest.raises(ValueError, match="tried to mutate a zero-copy read-only batch"): - ds = ds.map_batches(mutate, batch_size=batch_size, zero_copy_batch=True) + ds = ds.map_batches( + mutate, batch_format="pandas", batch_size=batch_size, zero_copy_batch=True + ) ds.materialize() @@ -755,13 +803,13 @@ def test_map_batches_block_bundling_skewed_auto( def test_map_with_mismatched_columns(ray_start_regular_shared): def bad_fn(row): - if row > 5: + if row["id"] > 5: return {"a": "hello1"} else: return {"b": "hello1"} def good_fn(row): - if row > 5: + if row["id"] > 5: return {"a": "hello1", "b": "hello2"} else: return {"b": "hello2", "a": "hello1"} @@ -786,14 +834,14 @@ def test_map_batches_combine_empty_blocks(ray_start_regular_shared): xs = [x % 3 for x in list(range(100))] # ds1 has 1 block which contains 100 rows. - ds1 = ray.data.from_items(xs).repartition(1).sort().map_batches(lambda x: x) + ds1 = ray.data.from_items(xs).repartition(1).sort("item").map_batches(lambda x: x) assert ds1._block_num_rows() == [100] # ds2 has 30 blocks, but only 3 of them are non-empty ds2 = ( ray.data.from_items(xs) .repartition(30) - .sort() + .sort("item") .map_batches(lambda x: x, batch_size=1) ) assert len(ds2._block_num_rows()) == 3 @@ -816,7 +864,7 @@ def ensure_sample_size_close(dataset, sample_percent=0.5): ds = ray.data.range(10, parallelism=2) ensure_sample_size_close(ds) - ds = ray.data.range_table(10, parallelism=2) + ds = ray.data.range(10, parallelism=2) ensure_sample_size_close(ds) ds = ray.data.range_tensor(5, parallelism=2, shape=(2, 2)) diff --git a/python/ray/data/tests/test_mars.py b/python/ray/data/tests/test_mars.py index 6dc1f00693e5..d563f1fd337d 100644 --- a/python/ray/data/tests/test_mars.py +++ b/python/ray/data/tests/test_mars.py @@ -45,10 +45,6 @@ def test_mars(ray_start_regular): pdf2, ) - # Test simple datasets - with pytest.raises(NotImplementedError): - ray.data.range(10).to_mars() - cluster.stop() @@ -99,10 +95,6 @@ def test_from_mars_e2e(ray_start_regular, enable_optimizer): assert ds3._plan._logical_plan.dag.name == "FromArrowRefs" _check_usage_record(["FromArrowRefs"]) - # Test simple datasets - with pytest.raises(NotImplementedError): - ray.data.range(10).to_mars() - cluster.stop() diff --git a/python/ray/data/tests/test_numpy.py b/python/ray/data/tests/test_numpy.py index 9ef31dd916c7..cbd1c7378b94 100644 --- a/python/ray/data/tests/test_numpy.py +++ b/python/ray/data/tests/test_numpy.py @@ -20,6 +20,7 @@ from ray.data.tests.conftest import * # noqa from ray.data.tests.mock_http_server import * # noqa +from ray.data.tests.util import extract_values from ray.tests.conftest import * # noqa @@ -43,7 +44,7 @@ def test_from_numpy(ray_start_regular_shared, from_ref): ds = ray.data.from_numpy_refs([ray.put(arr) for arr in arrs]) else: ds = ray.data.from_numpy(arrs) - values = np.stack(ds.take(8)) + values = np.stack(extract_values("data", ds.take(8))) np.testing.assert_array_equal(values, np.concatenate((arr1, arr2))) # Check that conversion task is included in stats. assert "FromNumpyRefs" in ds.stats() @@ -53,7 +54,7 @@ def test_from_numpy(ray_start_regular_shared, from_ref): ds = ray.data.from_numpy_refs(ray.put(arr1)) else: ds = ray.data.from_numpy(arr1) - values = np.stack(ds.take(4)) + values = np.stack(extract_values("data", ds.take(4))) np.testing.assert_array_equal(values, arr1) # Check that conversion task is included in stats. assert "FromNumpyRefs" in ds.stats() @@ -62,7 +63,7 @@ def test_from_numpy(ray_start_regular_shared, from_ref): def test_from_numpy_variable_shaped(ray_start_regular_shared): arr = np.array([np.ones((2, 2)), np.ones((3, 3))], dtype=object) ds = ray.data.from_numpy(arr) - values = np.array(ds.take(2), dtype=object) + values = np.array(extract_values("data", ds.take(2)), dtype=object) def recursive_to_list(a): if not isinstance(a, (list, np.ndarray)): @@ -75,19 +76,14 @@ def recursive_to_list(a): def test_to_numpy_refs(ray_start_regular_shared): - # Simple Dataset - ds = ray.data.range(10) - arr = np.concatenate(ray.get(ds.to_numpy_refs())) - np.testing.assert_equal(arr, np.arange(0, 10)) - # Tensor Dataset ds = ray.data.range_tensor(10, parallelism=2) - arr = np.concatenate(ray.get(ds.to_numpy_refs())) + arr = np.concatenate(extract_values("data", ray.get(ds.to_numpy_refs()))) np.testing.assert_equal(arr, np.expand_dims(np.arange(0, 10), 1)) # Table Dataset - ds = ray.data.range_table(10) - arr = np.concatenate([t["value"] for t in ray.get(ds.to_numpy_refs())]) + ds = ray.data.range(10) + arr = np.concatenate([t["id"] for t in ray.get(ds.to_numpy_refs())]) np.testing.assert_equal(arr, np.arange(0, 10)) # Test multi-column Arrow dataset. @@ -119,16 +115,18 @@ def test_to_numpy_refs(ray_start_regular_shared): ) def test_numpy_roundtrip(ray_start_regular_shared, fs, data_path): ds = ray.data.range_tensor(10, parallelism=2) - ds.write_numpy(data_path, filesystem=fs) + ds.write_numpy(data_path, filesystem=fs, column="data") ds = ray.data.read_numpy(data_path, filesystem=fs) assert str(ds) == ( "Datastream(\n" " num_blocks=2,\n" " num_rows=?,\n" - " schema={__value__: numpy.ndarray(shape=(1,), dtype=int64)}\n" + " schema={data: numpy.ndarray(shape=(1,), dtype=int64)}\n" ")" ) - np.testing.assert_equal(ds.take(2), [np.array([0]), np.array([1])]) + np.testing.assert_equal( + extract_values("data", ds.take(2)), [np.array([0]), np.array([1])] + ) def test_numpy_read(ray_start_regular_shared, tmp_path): @@ -140,10 +138,12 @@ def test_numpy_read(ray_start_regular_shared, tmp_path): "Datastream(\n" " num_blocks=1,\n" " num_rows=10,\n" - " schema={__value__: numpy.ndarray(shape=(1,), dtype=int64)}\n" + " schema={data: numpy.ndarray(shape=(1,), dtype=int64)}\n" ")" ) - np.testing.assert_equal(ds.take(2), [np.array([0]), np.array([1])]) + np.testing.assert_equal( + extract_values("data", ds.take(2)), [np.array([0]), np.array([1])] + ) # Add a file with a non-matching file extension. This file should be ignored. with open(os.path.join(path, "foo.txt"), "w") as f: @@ -156,10 +156,10 @@ def test_numpy_read(ray_start_regular_shared, tmp_path): "Datastream(\n" " num_blocks=1,\n" " num_rows=10,\n" - " schema={__value__: numpy.ndarray(shape=(1,), dtype=int64)}\n" + " schema={data: numpy.ndarray(shape=(1,), dtype=int64)}\n" ")" ) - assert [v.item() for v in ds.take(2)] == [0, 1] + assert [v["data"].item() for v in ds.take(2)] == [0, 1] @pytest.mark.parametrize("ignore_missing_paths", [True, False]) @@ -194,10 +194,12 @@ def test_numpy_read_meta_provider(ray_start_regular_shared, tmp_path): "Datastream(\n" " num_blocks=1,\n" " num_rows=10,\n" - " schema={__value__: numpy.ndarray(shape=(1,), dtype=int64)}\n" + " schema={data: numpy.ndarray(shape=(1,), dtype=int64)}\n" ")" ) - np.testing.assert_equal(ds.take(2), [np.array([0]), np.array([1])]) + np.testing.assert_equal( + extract_values("data", ds.take(2)), [np.array([0]), np.array([1])] + ) with pytest.raises(NotImplementedError): ray.data.read_binary_files( @@ -252,9 +254,9 @@ def skip_unpartitioned(kv_dict): val_str = "".join(f"array({v}, dtype=int8), " for v in vals)[:-2] assert_base_partitioned_ds( ds, - schema="{__value__: numpy.ndarray(shape=(2,), dtype=int8)}", + schema="{data: numpy.ndarray(shape=(2,), dtype=int8)}", sorted_values=f"[[{val_str}]]", - ds_take_transform_fn=lambda taken: [taken], + ds_take_transform_fn=lambda taken: [extract_values("data", taken)], sorted_values_transform_fn=lambda sorted_values: str(sorted_values), ) assert ray.get(kept_file_counter.get.remote()) == 2 @@ -274,7 +276,7 @@ def skip_unpartitioned(kv_dict): def test_numpy_write(ray_start_regular_shared, fs, data_path, endpoint_url): ds = ray.data.range_tensor(10, parallelism=2) ds._set_uuid("data") - ds.write_numpy(data_path, filesystem=fs) + ds.write_numpy(data_path, filesystem=fs, column="data") file_path1 = os.path.join(data_path, "data_000000.npy") file_path2 = os.path.join(data_path, "data_000001.npy") if endpoint_url is None: @@ -291,7 +293,7 @@ def test_numpy_write(ray_start_regular_shared, fs, data_path, endpoint_url): assert len(arr2) == 5 assert arr1.sum() == 10 assert arr2.sum() == 35 - np.testing.assert_equal(ds.take(1), [np.array([0])]) + np.testing.assert_equal(extract_values("data", ds.take(1)), [np.array([0])]) @pytest.mark.parametrize( @@ -312,7 +314,10 @@ def test_numpy_write_block_path_provider( ds = ray.data.range_tensor(10, parallelism=2) ds._set_uuid("data") ds.write_numpy( - data_path, filesystem=fs, block_path_provider=test_block_write_path_provider + data_path, + filesystem=fs, + block_path_provider=test_block_write_path_provider, + column="data", ) file_path1 = os.path.join(data_path, "000000_05_data.test.npy") file_path2 = os.path.join(data_path, "000001_05_data.test.npy") @@ -330,7 +335,7 @@ def test_numpy_write_block_path_provider( assert len(arr2) == 5 assert arr1.sum() == 10 assert arr2.sum() == 35 - np.testing.assert_equal(ds.take(1), [np.array([0])]) + np.testing.assert_equal(extract_values("data", ds.take(1)), [np.array([0])]) if __name__ == "__main__": diff --git a/python/ray/data/tests/test_object_gc.py b/python/ray/data/tests/test_object_gc.py index 825602539f69..724fd9caef01 100644 --- a/python/ray/data/tests/test_object_gc.py +++ b/python/ray/data/tests/test_object_gc.py @@ -44,7 +44,7 @@ def check_to_tf_no_spill(ctx, pipe): max_epoch = 10 for p in pipe.iter_epochs(max_epoch): for _ in p.to_tf( - feature_columns="__value__", label_columns="label", batch_size=None + feature_columns="data", label_columns="label", batch_size=None ): pass meminfo = memory_summary(ctx.address_info["address"], stats_only=True) diff --git a/python/ray/data/tests/test_operators.py b/python/ray/data/tests/test_operators.py index 18b1ecb461c4..690a890c54d5 100644 --- a/python/ray/data/tests/test_operators.py +++ b/python/ray/data/tests/test_operators.py @@ -1,4 +1,5 @@ import collections +import pandas as pd import random import pytest import numpy as np @@ -33,12 +34,12 @@ def _get_blocks(bundle: RefBundle, output_list: List[Block]): for block, _ in bundle.blocks: - output_list.append(ray.get(block)) + output_list.append(list(ray.get(block)["id"])) def _mul2_transform(block_iter: Iterable[Block], ctx) -> Iterable[Block]: for block in block_iter: - yield [b * 2 for b in block] + yield pd.DataFrame({"id": [b * 2 for b in block["id"]]}) def _take_outputs(op: PhysicalOperator) -> List[Any]: @@ -234,7 +235,7 @@ def test_split_operator(ray_start_regular_shared, equal, chunk_size): ref = op.get_next() assert ref.owns_blocks, ref for block, _ in ref.blocks: - output_splits[ref.output_split_idx].extend(ray.get(block)) + output_splits[ref.output_split_idx].extend(list(ray.get(block)["id"])) op.inputs_done() if equal: for i in range(3): @@ -267,7 +268,7 @@ def test_split_operator_random(ray_start_regular_shared, equal, random_seed): ref = op.get_next() assert ref.owns_blocks, ref for block, _ in ref.blocks: - output_splits[ref.output_split_idx].extend(ray.get(block)) + output_splits[ref.output_split_idx].extend(list(ray.get(block)["id"])) if equal: actual = [len(output_splits[i]) for i in range(3)] expected = [num_inputs // 3] * 3 @@ -281,13 +282,16 @@ def test_split_operator_locality_hints(ray_start_regular_shared): op = OutputSplitter(input_op, 2, equal=False, locality_hints=["node1", "node2"]) def get_fake_loc(item): + assert isinstance(item, int), item if item in [0, 1, 4, 5, 8]: return "node1" else: return "node2" def get_bundle_loc(bundle): - return get_fake_loc(ray.get(bundle.blocks[0][0])[0]) + block = ray.get(bundle.blocks[0][0]) + fval = list(block["id"])[0] + return get_fake_loc(fval) op._get_location = get_bundle_loc @@ -301,7 +305,7 @@ def get_bundle_loc(bundle): ref = op.get_next() assert ref.owns_blocks, ref for block, _ in ref.blocks: - output_splits[ref.output_split_idx].extend(ray.get(block)) + output_splits[ref.output_split_idx].extend(list(ray.get(block)["id"])) total = 0 for i in range(2): @@ -587,7 +591,7 @@ def test_map_operator_pool_delegation(compute, expected): def _get_bundles(bundle: RefBundle): output = [] for block, _ in bundle.blocks: - output.extend(ray.get(block)) + output.extend(list(ray.get(block)["id"])) return output @@ -672,7 +676,7 @@ def test_block_ref_bundler_uniform( i for bundle in out_bundles for block, _ in bundle.blocks - for i in ray.get(block) + for i in list(ray.get(block)["id"]) ] assert flat_out == list(range(n)) diff --git a/python/ray/data/tests/test_optimize.py b/python/ray/data/tests/test_optimize.py index b4f4dccfd55f..dd8643d433d9 100644 --- a/python/ray/data/tests/test_optimize.py +++ b/python/ray/data/tests/test_optimize.py @@ -14,6 +14,7 @@ from ray.data.context import DataContext from ray.data.datasource import Datasource, ReadTask from ray.data.datasource.csv_datasource import CSVDatasource +from ray.data.tests.util import column_udf, extract_values from ray.tests.conftest import * # noqa @@ -67,7 +68,7 @@ def dummy_map(x): def test_memory_sanity(shutdown_only): info = ray.init(num_cpus=1, object_store_memory=500e6) ds = ray.data.range(10) - ds = ds.map(lambda x: np.ones(100 * 1024 * 1024, dtype=np.uint8)) + ds = ds.map(lambda x: {"data": np.ones(100 * 1024 * 1024, dtype=np.uint8)}) ds.materialize() meminfo = memory_summary(info.address_info["address"], stats_only=True) @@ -142,7 +143,7 @@ def inc(x): # TODO(Clark): Remove this sleep once we have fixed memory pressure handling. time.sleep(2) - return x + 1 + return {"id": x["id"] + 1} num_rounds = 10 for _ in range(num_rounds): @@ -167,9 +168,9 @@ def test_memory_release_lazy(shutdown_only): # Should get fused into single stage. ds = ds.lazy() - ds = ds.map(lambda x: np.ones(100 * 1024 * 1024, dtype=np.uint8)) - ds = ds.map(lambda x: np.ones(100 * 1024 * 1024, dtype=np.uint8)) - ds = ds.map(lambda x: np.ones(100 * 1024 * 1024, dtype=np.uint8)) + ds = ds.map(lambda x: {"data": np.ones(100 * 1024 * 1024, dtype=np.uint8)}) + ds = ds.map(lambda x: {"data": np.ones(100 * 1024 * 1024, dtype=np.uint8)}) + ds = ds.map(lambda x: {"data": np.ones(100 * 1024 * 1024, dtype=np.uint8)}) ds.materialize() meminfo = memory_summary(info.address_info["address"], stats_only=True) assert "Spilled" not in meminfo, meminfo @@ -187,7 +188,7 @@ def test_memory_release_lazy_shuffle(shutdown_only): # Should get fused into single stage. ds = ds.lazy() - ds = ds.map(lambda x: np.ones(100 * 1024 * 1024, dtype=np.uint8)) + ds = ds.map(lambda x: {"data": np.ones(100 * 1024 * 1024, dtype=np.uint8)}) ds.random_shuffle().materialize() meminfo = memory_summary(info.address_info["address"], stats_only=True) assert "Spilled" not in meminfo, meminfo @@ -206,7 +207,6 @@ def test_lazy_fanout(shutdown_only, local_path): def inc(row): map_counter.increment.remote() - row = row.as_pydict() row["one"] += 1 return row @@ -245,7 +245,7 @@ def inc(row): def inc(x): map_counter.increment.remote() - return x + 1 + return {"item": x["item"] + 1} # The source data shouldn't be cleared since it's non-lazy. ds = ray.data.from_items(list(range(10))) @@ -254,8 +254,8 @@ def inc(x): ds2 = ds1.map(inc) ds3 = ds1.map(inc) # Test content. - assert ds2.materialize().take() == list(range(2, 12)) - assert ds3.materialize().take() == list(range(2, 12)) + assert extract_values("item", ds2.materialize().take()) == list(range(2, 12)) + assert extract_values("item", ds3.materialize().take()) == list(range(2, 12)) # Test that first map is executed twice. assert ray.get(map_counter.get.remote()) == 2 * 10 + 10 + 10 @@ -268,8 +268,8 @@ def inc(x): ds1 = ds.map(inc) ds2 = ds.map(inc) # Test content. - assert ds1.materialize().take() == list(range(2, 12)) - assert ds2.materialize().take() == list(range(2, 12)) + assert extract_values("item", ds1.materialize().take()) == list(range(2, 12)) + assert extract_values("item", ds2.materialize().take()) == list(range(2, 12)) # Test that first map is executed twice, because ds1.materialize() # clears up the previous snapshot blocks, and ds2.materialize() # has to re-execute ds.map(inc) again. @@ -278,7 +278,7 @@ def inc(x): def test_spread_hint_inherit(ray_start_regular_shared): ds = ray.data.range(10).lazy() - ds = ds.map(lambda x: x + 1) + ds = ds.map(column_udf("id", lambda x: x + 1)) ds = ds.random_shuffle() for s in ds._plan._stages_before_snapshot: assert s.ray_remote_args == {}, s.ray_remote_args @@ -301,7 +301,7 @@ def test_stage_linking(ray_start_regular_shared): assert len(ds._plan._stages_before_snapshot) == 0 assert len(ds._plan._stages_after_snapshot) == 0 assert ds._plan._last_optimized_stages is None - ds = ds.map(lambda x: x + 1) + ds = ds.map(column_udf("id", lambda x: x + 1)) assert len(ds._plan._stages_before_snapshot) == 0 _assert_has_stages(ds._plan._stages_after_snapshot, ["Map"]) assert ds._plan._last_optimized_stages is None @@ -396,7 +396,10 @@ def build_pipe(): pipe = pipe.map_batches(dummy_map) pipe = pipe.map_batches(dummy_map) pipe = pipe.random_shuffle_each_window() - results = [sorted(p.take()) for p in pipe.iter_epochs()] + results = [] + for p in pipe.iter_epochs(): + result = sorted(extract_values("id", p.take())) + results.append(result) assert results == [[0, 1, 2], [0, 1, 2]], results return pipe @@ -475,8 +478,8 @@ def test_optimize_equivalent_remote_args(ray_start_regular_shared): for kwb in equivalent_kwargs: print("CHECKING", kwa, kwb) pipe = ray.data.range(3).repeat(2) - pipe = pipe.map_batches(dummy_map, compute="tasks", **kwa) - pipe = pipe.map_batches(dummy_map, compute="tasks", **kwb) + pipe = pipe.map_batches(dummy_map, batch_size=64, **kwa) + pipe = pipe.map_batches(dummy_map, batch_size=64, **kwb) pipe.take() expect_stages( pipe, @@ -490,7 +493,7 @@ def test_optimize_equivalent_remote_args(ray_start_regular_shared): for kwb in equivalent_kwargs: print("CHECKING", kwa, kwb) pipe = ray.data.range(3).repeat(2) - pipe = pipe.map_batches(dummy_map, compute="tasks", **kwa) + pipe = pipe.map_batches(dummy_map, batch_size=64, **kwa) pipe = pipe.random_shuffle_each_window(**kwb) pipe.take() expect_stages( @@ -513,9 +516,9 @@ def test_optimize_incompatible_stages(shutdown_only): pipe = ray.data.range(3).repeat(2) # Should get fused as long as their resource types are compatible. - pipe = pipe.map_batches(dummy_map, compute="actors") + pipe = pipe.map_batches(dummy_map, compute=ray.data.ActorPoolStrategy()) # Cannot fuse actors->tasks. - pipe = pipe.map_batches(dummy_map, compute="tasks") + pipe = pipe.map_batches(dummy_map) pipe = pipe.random_shuffle_each_window() pipe.take() expect_stages( @@ -529,7 +532,7 @@ def test_optimize_incompatible_stages(shutdown_only): ) pipe = ray.data.range(3).repeat(2) - pipe = pipe.map_batches(dummy_map, compute="tasks") + pipe = pipe.map_batches(dummy_map) pipe = pipe.map_batches(dummy_map, num_cpus=0.75) pipe = pipe.random_shuffle_each_window() pipe.take() @@ -585,7 +588,7 @@ def __call__(self, x): CallableFn, batch_size=1, batch_format="pandas", - compute="actors", + compute=ray.data.ActorPoolStrategy(), fn_constructor_args=fn_constructor_args, fn_constructor_kwargs=fn_constructor_kwargs, ) @@ -593,7 +596,7 @@ def __call__(self, x): CallableFn, batch_size=1, batch_format="pandas", - compute="actors", + compute=ray.data.ActorPoolStrategy(), fn_constructor_args=fn_constructor_args, fn_constructor_kwargs=fn_constructor_kwargs, ) @@ -621,7 +624,7 @@ def __call__(self, x): lambda df, a, b=None: b * df + a, batch_size=1, batch_format="pandas", - compute="actors", + compute=ray.data.ActorPoolStrategy(), fn_args=(put(1),), fn_kwargs={"b": put(2)}, ) @@ -629,7 +632,7 @@ def __call__(self, x): CallableFn, batch_size=1, batch_format="pandas", - compute="actors", + compute=ray.data.ActorPoolStrategy(), fn_constructor_args=fn_constructor_args, fn_constructor_kwargs=fn_constructor_kwargs, ) @@ -688,7 +691,7 @@ def test_optimize_lazy_reuse_base_data( num_reads = ray.get(counter.get.remote()) assert num_reads == 1, num_reads ds = ds.lazy() - ds = ds.map(lambda x: x) + ds = ds.map(column_udf("id", lambda x: x)) if with_shuffle: ds = ds.random_shuffle() ds.take() diff --git a/python/ray/data/tests/test_pandas.py b/python/ray/data/tests/test_pandas.py index edb21f94cbb5..e83eff8662ce 100644 --- a/python/ray/data/tests/test_pandas.py +++ b/python/ray/data/tests/test_pandas.py @@ -93,8 +93,8 @@ def test_from_pandas_refs(ray_start_regular_shared, enable_pandas_block): def test_to_pandas(ray_start_regular_shared): n = 5 - df = pd.DataFrame({"value": list(range(n))}) - ds = ray.data.range_table(n) + df = pd.DataFrame({"id": list(range(n))}) + ds = ray.data.range(n) dfds = ds.to_pandas() assert df.equals(dfds) @@ -109,8 +109,8 @@ def test_to_pandas(ray_start_regular_shared): def test_to_pandas_refs(ray_start_regular_shared): n = 5 - df = pd.DataFrame({"value": list(range(n))}) - ds = ray.data.range_table(n) + df = pd.DataFrame({"id": list(range(n))}) + ds = ray.data.range(n) dfds = pd.concat(ray.get(ds.to_pandas_refs()), ignore_index=True) assert df.equals(dfds) @@ -133,7 +133,7 @@ def test_to_pandas_tensor_column_cast_pandas(ray_start_regular_shared): ctx.enable_tensor_extension_casting = True in_df = pd.DataFrame({"a": [data]}) ds = ray.data.from_pandas(in_df) - dtypes = ds.schema().types + dtypes = ds.schema().base_schema.types assert len(dtypes) == 1 # Tensor column should be automatically cast to Tensor extension. assert isinstance(dtypes[0], TensorDtype) @@ -158,7 +158,7 @@ def test_to_pandas_tensor_column_cast_arrow(ray_start_regular_shared): ctx.enable_tensor_extension_casting = True in_table = pa.table({"a": ArrowTensorArray.from_numpy(data)}) ds = ray.data.from_arrow(in_table) - dtype = ds.schema().field(0).type + dtype = ds.schema().base_schema.field(0).type assert isinstance(dtype, ArrowTensorType) out_df = ds.to_pandas() assert out_df["a"].dtype.type is np.object_ diff --git a/python/ray/data/tests/test_parquet.py b/python/ray/data/tests/test_parquet.py index 668a6c6d4c63..5386dd22e32f 100644 --- a/python/ray/data/tests/test_parquet.py +++ b/python/ray/data/tests/test_parquet.py @@ -691,7 +691,9 @@ def test_parquet_reader_estimate_data_size(shutdown_only, tmp_path): ), "estimated data size is not deterministic in multiple calls." text_output_path = os.path.join(tmp_path, "text") - ray.data.range(1000).map(lambda _: "a" * 1000).write_parquet(text_output_path) + ray.data.range(1000).map(lambda _: {"text": "a" * 1000}).write_parquet( + text_output_path + ) ds = ray.data.read_parquet(text_output_path) assert ds.num_blocks() > 1 data_size = ds.size_bytes() diff --git a/python/ray/data/tests/test_pipeline.py b/python/ray/data/tests/test_pipeline.py index d189c5736b91..1465526068d9 100644 --- a/python/ray/data/tests/test_pipeline.py +++ b/python/ray/data/tests/test_pipeline.py @@ -8,12 +8,12 @@ import ray from ray.data import datastream -from ray.data._internal.arrow_block import ArrowRow from ray.data.context import DataContext, WARN_PREFIX, OK_PREFIX from ray.data.datastream import Dataset from ray.data.dataset_pipeline import DatasetPipeline from ray.tests.conftest import * # noqa +from ray.data.tests.util import column_udf, extract_values class MockLogger: @@ -127,11 +127,15 @@ def test_pipeline_actors(shutdown_only): pipe = ( ray.data.range(3) .repeat(10) - .map(lambda x: x + 1) - .map(lambda x: x + 1, compute="actors", num_gpus=1) + .map(column_udf("id", lambda x: x + 1)) + .map( + column_udf("id", lambda x: x + 1), + compute=ray.data.ActorPoolStrategy(), + num_gpus=1, + ) ) - assert sorted(pipe.take(999)) == sorted([2, 3, 4] * 10) + assert sorted(extract_values("id", pipe.take(999))) == sorted([2, 3, 4] * 10) def test_pipeline_is_parallel(shutdown_only): @@ -173,12 +177,12 @@ def sleep(x): def test_window_by_bytes(ray_start_regular_shared): with pytest.raises(ValueError): - ray.data.range_table(10).window(blocks_per_window=2, bytes_per_window=2) + ray.data.range(10).window(blocks_per_window=2, bytes_per_window=2) - pipe = ray.data.range_table(10000000, parallelism=100).window(blocks_per_window=2) + pipe = ray.data.range(10000000, parallelism=100).window(blocks_per_window=2) assert str(pipe) == "DatasetPipeline(num_windows=50, num_stages=2)" - pipe = ray.data.range_table(10000000, parallelism=100).window( + pipe = ray.data.range(10000000, parallelism=100).window( bytes_per_window=10 * 1024 * 1024 ) assert str(pipe) == "DatasetPipeline(num_windows=8, num_stages=2)" @@ -187,19 +191,19 @@ def test_window_by_bytes(ray_start_regular_shared): for ds in dss[:-1]: assert ds.num_blocks() in [12, 13] - pipe = ray.data.range_table(10000000, parallelism=100).window(bytes_per_window=1) + pipe = ray.data.range(10000000, parallelism=100).window(bytes_per_window=1) assert str(pipe) == "DatasetPipeline(num_windows=100, num_stages=2)" for ds in pipe.iter_datasets(): assert ds.num_blocks() == 1 - pipe = ray.data.range_table(10000000, parallelism=100).window(bytes_per_window=1e9) + pipe = ray.data.range(10000000, parallelism=100).window(bytes_per_window=1e9) assert str(pipe) == "DatasetPipeline(num_windows=1, num_stages=2)" for ds in pipe.iter_datasets(): assert ds.num_blocks() == 100 # Test creating from non-lazy BlockList. pipe = ( - ray.data.range_table(10000000, parallelism=100) + ray.data.range(10000000, parallelism=100) .map_batches(lambda x: x) .window(bytes_per_window=10 * 1024 * 1024) ) @@ -210,42 +214,47 @@ def test_window_by_bytes(ray_start_regular_shared): try: context.optimize_fuse_read_stages = False dataset = ray.data.range(10).window(bytes_per_window=1) - assert dataset.take(10) == list(range(10)) + assert extract_values("id", dataset.take(10)) == list(range(10)) finally: context.optimize_fuse_read_stages = old def test_epoch(ray_start_regular_shared): # Test dataset repeat. - pipe = ray.data.range(5).map(lambda x: x * 2).repeat(3).map(lambda x: x * 2) - results = [p.take() for p in pipe.iter_epochs()] + pipe = ( + ray.data.range(5) + .map(column_udf("id", lambda x: x * 2)) + .repeat(3) + .map(column_udf("id", lambda x: x * 2)) + ) + results = [extract_values("id", p.take()) for p in pipe.iter_epochs()] assert results == [[0, 4, 8, 12, 16], [0, 4, 8, 12, 16], [0, 4, 8, 12, 16]] # Test dataset pipeline repeat. pipe = ray.data.range(3).window(blocks_per_window=2).repeat(3) - results = [p.take() for p in pipe.iter_epochs()] + results = [extract_values("id", p.take()) for p in pipe.iter_epochs()] assert results == [[0, 1, 2], [0, 1, 2], [0, 1, 2]] # Test max epochs. pipe = ray.data.range(3).window(blocks_per_window=2).repeat(3) - results = [p.take() for p in pipe.iter_epochs(2)] + results = [extract_values("id", p.take()) for p in pipe.iter_epochs(2)] assert results == [[0, 1, 2], [0, 1, 2]] # Test nested repeat. pipe = ray.data.range(5).repeat(2).repeat(2) - results = [p.take() for p in pipe.iter_epochs()] + results = [extract_values("id", p.take()) for p in pipe.iter_epochs()] assert results == [[0, 1, 2, 3, 4, 0, 1, 2, 3, 4], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]] # Test preserve_epoch=True. pipe = ray.data.range(5).repeat(2).rewindow(blocks_per_window=2) - results = [p.take() for p in pipe.iter_epochs()] + results = [extract_values("id", p.take()) for p in pipe.iter_epochs()] assert results == [[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]] # Test preserve_epoch=False. pipe = ( ray.data.range(5).repeat(2).rewindow(blocks_per_window=2, preserve_epoch=False) ) - results = [p.take() for p in pipe.iter_epochs()] + results = [extract_values("id", p.take()) for p in pipe.iter_epochs()] assert results == [[0, 1, 2, 3], [4, 0, 1, 2, 3, 4]] @@ -284,15 +293,17 @@ def test_basic_pipeline(ray_start_regular_shared): pipe = ds.window(blocks_per_window=1).map(lambda x: x).map(lambda x: x) assert str(pipe) == "DatasetPipeline(num_windows=10, num_stages=4)" - assert pipe.take() == list(range(10)) + assert extract_values("id", pipe.take()) == list(range(10)) pipe = ( - ds.window(blocks_per_window=1).map(lambda x: x).flat_map(lambda x: [x, x + 1]) + ds.window(blocks_per_window=1) + .map(lambda x: x) + .flat_map(lambda x: [{"id": x["id"]}, {"id": x["id"] + 1}]) ) assert str(pipe) == "DatasetPipeline(num_windows=10, num_stages=4)" assert pipe.count() == 20 - pipe = ds.window(blocks_per_window=1).filter(lambda x: x % 2 == 0) + pipe = ds.window(blocks_per_window=1).filter(lambda x: x["id"] % 2 == 0) assert str(pipe) == "DatasetPipeline(num_windows=10, num_stages=3)" assert pipe.count() == 5 @@ -319,10 +330,10 @@ def test_window(ray_start_regular_shared): assert str(pipe) == "DatasetPipeline(num_windows=None, num_stages=1)" datasets = list(pipe.iter_datasets()) assert len(datasets) == 4 - assert datasets[0].take() == [0, 1, 2] - assert datasets[1].take() == [3, 4, 5] - assert datasets[2].take() == [6, 7, 8] - assert datasets[3].take() == [9] + assert extract_values("id", datasets[0].take()) == [0, 1, 2] + assert extract_values("id", datasets[1].take()) == [3, 4, 5] + assert extract_values("id", datasets[2].take()) == [6, 7, 8] + assert extract_values("id", datasets[3].take()) == [9] ds = ray.data.range(10, parallelism=10) pipe = ds.window(blocks_per_window=5) @@ -331,10 +342,10 @@ def test_window(ray_start_regular_shared): assert str(pipe) == "DatasetPipeline(num_windows=None, num_stages=1)" datasets = list(pipe.iter_datasets()) assert len(datasets) == 4 - assert datasets[0].take() == [0, 1, 2] - assert datasets[1].take() == [3, 4, 5] - assert datasets[2].take() == [6, 7, 8] - assert datasets[3].take() == [9] + assert extract_values("id", datasets[0].take()) == [0, 1, 2] + assert extract_values("id", datasets[1].take()) == [3, 4, 5] + assert extract_values("id", datasets[2].take()) == [6, 7, 8] + assert extract_values("id", datasets[3].take()) == [9] def test_repeat(ray_start_regular_shared): @@ -345,7 +356,7 @@ def test_repeat(ray_start_regular_shared): assert str(pipe) == "DatasetPipeline(num_windows=5, num_stages=2)" pipe = pipe.repeat(2) assert str(pipe) == "DatasetPipeline(num_windows=10, num_stages=2)" - assert pipe.take() == (list(range(5)) + list(range(5))) + assert extract_values("id", pipe.take()) == (list(range(5)) + list(range(5))) ds = ray.data.range(5) pipe = ds.window(blocks_per_window=1) @@ -362,7 +373,7 @@ def test_from_iterable(ray_start_regular_shared): pipe = DatasetPipeline.from_iterable( [lambda: ray.data.range(3), lambda: ray.data.range(2)] ) - assert pipe.take() == [0, 1, 2, 0, 1] + assert extract_values("id", pipe.take()) == [0, 1, 2, 0, 1] def test_repeat_forever(ray_start_regular_shared): @@ -372,6 +383,7 @@ def test_repeat_forever(ray_start_regular_shared): pipe = ds.repeat() assert str(pipe) == "DatasetPipeline(num_windows=inf, num_stages=2)" for i, v in enumerate(pipe.iter_rows()): + v = v["id"] assert v == i % 10, (v, i, i % 10) if i > 1000: break @@ -404,7 +416,7 @@ def test_to_tf(ray_start_regular_shared): ds = ds.add_column("label", lambda x: 1) pipe = ds.window(blocks_per_window=2).repeat(2) batches = list( - pipe.to_tf(feature_columns="__value__", label_columns="label", batch_size=None) + pipe.to_tf(feature_columns="data", label_columns="label", batch_size=None) ) assert len(batches) == 20 @@ -425,7 +437,7 @@ def test_iter_batches_batch_across_windows(ray_start_regular_shared): # 3 windows, each containing 3 blocks, each containing 3 rows. pipe = ray.data.range(27, parallelism=9).window(blocks_per_window=3) # 4-row batches, with batches spanning both blocks and windows. - batches = list(pipe.iter_batches(batch_size=4)) + batches = list(pipe.iter_batches(batch_size=4, batch_format="pandas")) assert len(batches) == 7, batches assert all(len(e) == 4 for e in batches[:-1]) assert len(batches[-1]) == 3 @@ -443,38 +455,38 @@ def test_iter_datasets(ray_start_regular_shared): def test_foreach_window(ray_start_regular_shared): pipe = ray.data.range(5).window(blocks_per_window=2) - pipe = pipe.foreach_window(lambda ds: ds.map(lambda x: x * 2)) - assert pipe.take() == [0, 2, 4, 6, 8] + pipe = pipe.foreach_window(lambda ds: ds.map(column_udf("id", lambda x: x * 2))) + assert extract_values("id", pipe.take()) == [0, 2, 4, 6, 8] def test_schema(ray_start_regular_shared): pipe = ray.data.range(5).window(blocks_per_window=2) - assert pipe.schema() == int + assert pipe.schema().names == ["id"] def test_schema_peek(ray_start_regular_shared): # Multiple datasets pipe = ray.data.range(6, parallelism=6).window(blocks_per_window=2) - assert pipe.schema() == int + assert pipe.schema().names == ["id"] assert pipe._first_datastream is not None dss = list(pipe.iter_datasets()) assert len(dss) == 3, dss assert pipe._first_datastream is None - assert pipe.schema() == int + assert pipe.schema().names == ["id"] # Only 1 dataset pipe = ray.data.range(1).window(blocks_per_window=2) - assert pipe.schema() == int + assert pipe.schema().names == ["id"] assert pipe._first_datastream is not None dss = list(pipe.iter_datasets()) assert len(dss) == 1, dss assert pipe._first_datastream is None - assert pipe.schema() == int + assert pipe.schema().names == ["id"] # Empty datasets pipe = ( ray.data.range(6, parallelism=6) - .filter(lambda x: x < 0) + .filter(lambda x: x["id"] < 0) .window(blocks_per_window=2) ) assert pipe.schema() is None @@ -487,29 +499,30 @@ def test_schema_peek(ray_start_regular_shared): def test_schema_after_repeat(ray_start_regular_shared): pipe = ray.data.range(6, parallelism=6).window(blocks_per_window=2).repeat(2) - assert pipe.schema() == int + assert pipe.schema().names == ["id"] output = [] for ds in pipe.iter_datasets(): - output.extend(ds.take()) + output.extend(extract_values("id", ds.take())) assert sorted(output) == sorted(list(range(6)) * 2) pipe = ray.data.range(6, parallelism=6).window(blocks_per_window=2).repeat(2) - assert pipe.schema() == int + assert pipe.schema().names == ["id"] # Test that operations still work after peek. pipe = pipe.map_batches(lambda batch: batch) output = [] for ds in pipe.iter_datasets(): - output.extend(ds.take()) + output.extend(extract_values("id", ds.take())) assert sorted(output) == sorted(list(range(6)) * 2) def test_split(ray_start_regular_shared): - pipe = ray.data.range(3).map(lambda x: x + 1).repeat(10) + pipe = ray.data.range(3).map(column_udf("id", lambda x: x + 1)).repeat(10) @ray.remote(num_cpus=0) def consume(shard, i): total = 0 for row in shard.iter_rows(): + row = row["id"] total += 1 assert row == i + 1, row assert total == 10, total @@ -522,13 +535,14 @@ def consume(shard, i): def test_split_at_indices(ray_start_regular_shared): indices = [2, 5] n = 8 - pipe = ray.data.range(n).map(lambda x: x + 1).repeat(2) + pipe = ray.data.range(n).map(column_udf("id", lambda x: x + 1)).repeat(2) @ray.remote(num_cpus=0) def consume(shard, i): total = 0 out = [] for row in shard.iter_rows(): + row = row["id"] total += 1 out.append(row) if i == 0: @@ -548,7 +562,7 @@ def consume(shard, i): ) -def _prepare_dataset_to_write(tmp_dir: str) -> Tuple[Dataset[ArrowRow], pd.DataFrame]: +def _prepare_dataset_to_write(tmp_dir: str) -> Tuple[Dataset, pd.DataFrame]: df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]}) df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]}) df = pd.concat([df1, df2]) @@ -632,29 +646,22 @@ def test_sort_each_window(ray_start_regular_shared): pipe = ( ray.data.range(12, parallelism=12) .window(blocks_per_window=3) - .sort_each_window() - ) - assert pipe.take() == list(range(12)) - - pipe = ( - ray.data.range(12, parallelism=12) - .window(blocks_per_window=3) - .sort_each_window(descending=True) + .sort_each_window("id") ) - assert pipe.take() == [2, 1, 0, 5, 4, 3, 8, 7, 6, 11, 10, 9] + assert extract_values("id", pipe.take()) == list(range(12)) pipe = ( ray.data.range(12, parallelism=12) .window(blocks_per_window=3) - .sort_each_window(key=lambda x: -x, descending=True) + .sort_each_window("id", descending=True) ) - assert pipe.take() == list(range(12)) + assert extract_values("id", pipe.take()) == [2, 1, 0, 5, 4, 3, 8, 7, 6, 11, 10, 9] def test_randomize_block_order_each_window(ray_start_regular_shared): pipe = ray.data.range(12).repartition(6).window(blocks_per_window=3) pipe = pipe.randomize_block_order_each_window(seed=0) - assert pipe.take() == [0, 1, 4, 5, 2, 3, 6, 7, 10, 11, 8, 9] + assert extract_values("id", pipe.take()) == [0, 1, 4, 5, 2, 3, 6, 7, 10, 11, 8, 9] def test_add_column(ray_start_regular_shared): @@ -697,7 +704,7 @@ def test_random_shuffle_each_window_with_custom_resource(ray_start_cluster): ray.data.datasource.RangeDatasource(), parallelism=10, n=1000, - block_format="list", + block_format="arrow", ray_remote_args={"resources": {"bar": 1}}, ).repeat(3) pipe = pipe.random_shuffle_each_window(resources={"bar": 1}) @@ -717,7 +724,7 @@ def verify_integrity(p): for b in p.iter_batches(): pass # Verify the integrity of the blocks of original dataset. - assert ds.take_all() == [1, 2, 3, 4, 5, 6] + assert extract_values("item", ds.take_all()) == [1, 2, 3, 4, 5, 6] verify_integrity(ds.repeat(10).randomize_block_order_each_window()) verify_integrity( @@ -751,7 +758,7 @@ def verify_integrity(p): splits = p.split(2, equal=True) ray.get([consume.remote(p) for p in splits]) # Verify the integrity of the blocks of original dataset - assert ds.take_all() == [1, 2, 3, 4, 5, 6] + assert extract_values("item", ds.take_all()) == [1, 2, 3, 4, 5, 6] verify_integrity(ds.repeat(10).randomize_block_order_each_window()) verify_integrity( @@ -814,9 +821,9 @@ def verify_blocks(pipe, owned_by_consumer): ds.repeat(1).randomize_block_order_each_window().map_batches(lambda x: x), True ) verify_blocks(ds.repeat(1).map_batches(lambda x: x), True) - verify_blocks(ds.repeat(1).map(lambda x: x), True) - verify_blocks(ds.repeat(1).filter(lambda x: x > 3), True) - verify_blocks(ds.repeat(1).sort_each_window(), True) + verify_blocks(ds.repeat(1).map(column_udf("item", lambda x: x)), True) + verify_blocks(ds.repeat(1).filter(lambda x: x["item"] > 3), True) + verify_blocks(ds.repeat(1).sort_each_window("item"), True) verify_blocks(ds.repeat(1).random_shuffle_each_window(), True) verify_blocks(ds.repeat(1).repartition_each_window(2), True) verify_blocks(ds.repeat(1).rewindow(blocks_per_window=1), False) diff --git a/python/ray/data/tests/test_pipeline_nohang.py b/python/ray/data/tests/test_pipeline_nohang.py index af3408c81902..412f216d7880 100644 --- a/python/ray/data/tests/test_pipeline_nohang.py +++ b/python/ray/data/tests/test_pipeline_nohang.py @@ -2,6 +2,7 @@ import ray from ray.tests.conftest import * # noqa +from ray.data.tests.util import extract_values, column_udf NUM_REPEATS = 10 NUM_TASKS = 10 @@ -14,9 +15,15 @@ def test_basic_actors(shutdown_only): for _ in range(NUM_REPEATS): ds = ray.data.range(NUM_TASKS) ds = ds.window(blocks_per_window=1) - assert sorted(ds.map(lambda x: x + 1, compute="actors").take()) == list( - range(1, NUM_TASKS + 1) - ) + assert sorted( + extract_values( + "id", + ds.map( + column_udf("id", lambda x: x + 1), + compute=ray.data.ActorPoolStrategy(), + ).take(), + ) + ) == list(range(1, NUM_TASKS + 1)) if __name__ == "__main__": diff --git a/python/ray/data/tests/test_random_access.py b/python/ray/data/tests/test_random_access.py index b6c70ea6c53e..f31951d1759a 100644 --- a/python/ray/data/tests/test_random_access.py +++ b/python/ray/data/tests/test_random_access.py @@ -8,21 +8,23 @@ @pytest.mark.parametrize("pandas", [False, True]) def test_basic(ray_start_regular_shared, pandas): - ds = ray.data.range_table(100, parallelism=10) - ds = ds.add_column("embedding", lambda b: b["value"] ** 2) + ds = ray.data.range(100, parallelism=10) + ds = ds.add_column("embedding", lambda b: b["id"] ** 2) if not pandas: - ds = ds.map_batches(lambda df: pyarrow.Table.from_pandas(df)) + ds = ds.map_batches( + lambda df: pyarrow.Table.from_pandas(df), batch_format="pandas" + ) - rad = ds.to_random_access_dataset("value", num_workers=1) + rad = ds.to_random_access_dataset("id", num_workers=1) # Test get. assert ray.get(rad.get_async(-1)) is None assert ray.get(rad.get_async(100)) is None for i in range(100): - assert ray.get(rad.get_async(i)) == {"value": i, "embedding": i**2} + assert ray.get(rad.get_async(i)) == {"id": i, "embedding": i**2} def expected(i): - return {"value": i, "embedding": i**2} + return {"id": i, "embedding": i**2} # Test multiget. results = rad.multiget([-1] + list(range(10)) + [100]) @@ -30,26 +32,22 @@ def expected(i): def test_empty_blocks(ray_start_regular_shared): - ds = ray.data.range_table(10).repartition(20) + ds = ray.data.range(10).repartition(20) assert ds.num_blocks() == 20 - rad = ds.to_random_access_dataset("value") + rad = ds.to_random_access_dataset("id") for i in range(10): - assert ray.get(rad.get_async(i)) == {"value": i} + assert ray.get(rad.get_async(i)) == {"id": i} def test_errors(ray_start_regular_shared): ds = ray.data.range(10) - with pytest.raises(ValueError): - ds.to_random_access_dataset("value") - - ds = ray.data.range_table(10) with pytest.raises(ValueError): ds.to_random_access_dataset("invalid") def test_stats(ray_start_regular_shared): - ds = ray.data.range_table(100, parallelism=10) - rad = ds.to_random_access_dataset("value", num_workers=1) + ds = ray.data.range(100, parallelism=10) + rad = ds.to_random_access_dataset("id", num_workers=1) stats = rad.stats() assert "Accesses per worker: 0 min, 0 max, 0 mean" in stats, stats ray.get(rad.get_async(0)) diff --git a/python/ray/data/tests/test_randomize_block_order.py b/python/ray/data/tests/test_randomize_block_order.py index 6ae396cdea5e..bccef7aaf331 100644 --- a/python/ray/data/tests/test_randomize_block_order.py +++ b/python/ray/data/tests/test_randomize_block_order.py @@ -14,6 +14,7 @@ from ray.data._internal.logical.interfaces import LogicalPlan from ray.data._internal.logical.optimizers import LogicalOptimizer from ray.data._internal.planner.planner import Planner +from ray.data.tests.util import extract_values def test_randomize_blocks_operator(ray_start_regular_shared, enable_optimizer): @@ -112,7 +113,20 @@ def test_randomize_block_order_after_repartition(): def test_randomize_blocks_e2e(ray_start_regular_shared, enable_optimizer): ds = ray.data.range(12, parallelism=4) ds = ds.randomize_block_order(seed=0) - assert ds.take_all() == [6, 7, 8, 0, 1, 2, 3, 4, 5, 9, 10, 11], ds + assert extract_values("id", ds.take_all()) == [ + 6, + 7, + 8, + 0, + 1, + 2, + 3, + 4, + 5, + 9, + 10, + 11, + ], ds def test_randomize_blocks_rule_e2e(ray_start_regular_shared, enable_optimizer): diff --git a/python/ray/data/tests/test_raydp.py b/python/ray/data/tests/test_raydp.py index 5c06b619f288..0b5c9848da4d 100644 --- a/python/ray/data/tests/test_raydp.py +++ b/python/ray/data/tests/test_raydp.py @@ -34,10 +34,10 @@ def test_raydp_roundtrip(spark): def test_raydp_to_spark(spark): n = 5 - ds = ray.data.range_table(n) - values = [r["value"] for r in ds.take(5)] + ds = ray.data.range(n) + values = [r["id"] for r in ds.take(5)] df = ds.to_spark(spark) - rows = [r.value for r in df.take(5)] + rows = [r.id for r in df.take(5)] assert values == rows diff --git a/python/ray/data/tests/test_size_estimation.py b/python/ray/data/tests/test_size_estimation.py index 666cbe50a355..283fcd663f06 100644 --- a/python/ray/data/tests/test_size_estimation.py +++ b/python/ray/data/tests/test_size_estimation.py @@ -121,7 +121,9 @@ def test_split_read_csv(ray_start_regular_shared, tmp_path): def gen(name): path = os.path.join(tmp_path, name) - ray.data.range(1000, parallelism=1).map(lambda _: LARGE_VALUE).write_csv(path) + ray.data.range(1000, parallelism=1).map( + lambda _: {"out": LARGE_VALUE} + ).write_csv(path) return ray.data.read_csv(path) # 20MiB @@ -160,7 +162,7 @@ def gen(name): path = os.path.join(tmp_path, name) ds = ( ray.data.range(200000, parallelism=1) - .map(lambda _: uuid.uuid4().hex) + .map(lambda _: {"out": uuid.uuid4().hex}) .materialize() ) # Fully execute the operations prior to write, because with @@ -179,7 +181,7 @@ def gen(name): ctx.target_max_block_size = 3_000_000 ds2 = gen("out2") nrow = ds2._block_num_rows() - assert 2 < len(nrow) < 4, nrow + assert 3 < len(nrow) < 5, nrow for x in nrow[:-1]: assert 50000 < x < 75000, (x, nrow) @@ -198,19 +200,12 @@ def test_split_map(shutdown_only, use_actors): ray.init(num_cpus=2) kwargs = {} if use_actors: - kwargs = {"compute": "actors"} - # Simple block + kwargs = {"compute": ray.data.ActorPoolStrategy()} + + # Arrow block ctx = ray.data.context.DataContext.get_current() ctx.target_max_block_size = 20_000_000 ctx.block_splitting_enabled = True - ds1 = ray.data.range(1000, parallelism=1).map(lambda _: LARGE_VALUE, **kwargs) - nblocks = len(ds1.map(lambda x: x, **kwargs).get_internal_block_refs()) - assert nblocks == 1, nblocks - ctx.target_max_block_size = 2_000_000 - nblocks = len(ds1.map(lambda x: x, **kwargs).get_internal_block_refs()) - assert 4 < nblocks < 7 or use_actors, nblocks - - # Arrow block ctx.target_max_block_size = 20_000_000 ds2 = ray.data.range(1000, parallelism=1).map(lambda _: ARROW_LARGE_VALUE, **kwargs) nblocks = len(ds2.map(lambda x: x, **kwargs).get_internal_block_refs()) @@ -228,17 +223,9 @@ def test_split_map(shutdown_only, use_actors): def test_split_flat_map(ray_start_regular_shared): - # Simple block ctx = ray.data.context.DataContext.get_current() ctx.target_max_block_size = 20_000_000 ctx.block_splitting_enabled = True - ds1 = ray.data.range(1000, parallelism=1).map(lambda _: LARGE_VALUE) - nblocks = len(ds1.flat_map(lambda x: [x]).get_internal_block_refs()) - assert nblocks == 1, nblocks - ctx.target_max_block_size = 2_000_000 - nblocks = len(ds1.flat_map(lambda x: [x]).get_internal_block_refs()) - assert 4 < nblocks < 7, nblocks - # Arrow block ctx.target_max_block_size = 20_000_000 ds2 = ray.data.range(1000, parallelism=1).map(lambda _: ARROW_LARGE_VALUE) @@ -250,17 +237,9 @@ def test_split_flat_map(ray_start_regular_shared): def test_split_map_batches(ray_start_regular_shared): - # Simple block ctx = ray.data.context.DataContext.get_current() ctx.target_max_block_size = 20_000_000 ctx.block_splitting_enabled = True - ds1 = ray.data.range(1000, parallelism=1).map(lambda _: LARGE_VALUE) - nblocks = len(ds1.map_batches(lambda x: x, batch_size=16).get_internal_block_refs()) - assert nblocks == 1, ds1._block_num_rows() - ctx.target_max_block_size = 2_000_000 - nblocks = len(ds1.map_batches(lambda x: x, batch_size=16).get_internal_block_refs()) - assert 4 < nblocks < 7, ds1._block_num_rows() - # Arrow block ctx.target_max_block_size = 20_000_000 ds2 = ray.data.range(1000, parallelism=1).map(lambda _: ARROW_LARGE_VALUE) diff --git a/python/ray/data/tests/test_sort.py b/python/ray/data/tests/test_sort.py index b11540e376ce..1e22d6442dea 100644 --- a/python/ray/data/tests/test_sort.py +++ b/python/ray/data/tests/test_sort.py @@ -11,6 +11,7 @@ from ray.data.block import BlockAccessor from ray.data.tests.conftest import * # noqa from ray.tests.conftest import * # noqa +from ray.data.tests.util import extract_values def test_sort_simple(ray_start_regular, use_push_based_shuffle): @@ -19,18 +20,21 @@ def test_sort_simple(ray_start_regular, use_push_based_shuffle): xs = list(range(num_items)) random.shuffle(xs) ds = ray.data.from_items(xs, parallelism=parallelism) - assert ds.sort().take(num_items) == list(range(num_items)) + assert extract_values("item", ds.sort("item").take(num_items)) == list( + range(num_items) + ) # Make sure we have rows in each block. - assert len([n for n in ds.sort()._block_num_rows() if n > 0]) == parallelism - assert ds.sort(descending=True).take(num_items) == list(reversed(range(num_items))) - assert ds.sort(key=lambda x: -x).take(num_items) == list(reversed(range(num_items))) + assert len([n for n in ds.sort("item")._block_num_rows() if n > 0]) == parallelism + assert extract_values( + "item", ds.sort("item", descending=True).take(num_items) + ) == list(reversed(range(num_items))) # Test empty dataset. ds = ray.data.from_items([]) - s1 = ds.sort() + s1 = ds.sort("item") assert s1.count() == 0 assert s1.take() == ds.take() - ds = ray.data.range(10).filter(lambda r: r > 10).sort() + ds = ray.data.range(10).filter(lambda r: r["id"] > 10).sort("id") assert ds.count() == 0 @@ -40,7 +44,7 @@ def test_sort_partition_same_key_to_same_block( num_items = 100 xs = [1] * num_items ds = ray.data.from_items(xs) - sorted_ds = ds.repartition(num_items).sort() + sorted_ds = ds.repartition(num_items).sort("item") # We still have 100 blocks assert len(sorted_ds._block_num_rows()) == num_items @@ -130,21 +134,19 @@ def test_sort_arrow_with_empty_blocks( [{"A": (x % 3), "B": x} for x in range(3)], parallelism=3 ) ds = ds.filter(lambda r: r["A"] == 0) - assert [row.as_pydict() for row in ds.sort("A").iter_rows()] == [ - {"A": 0, "B": 0} - ] + assert list(ds.sort("A").iter_rows()) == [{"A": 0, "B": 0}] # Test empty dataset. - ds = ray.data.range_table(10).filter(lambda r: r["value"] > 10) + ds = ray.data.range(10).filter(lambda r: r["id"] > 10) assert ( len( ray.data._internal.sort.sample_boundaries( - ds._plan.execute().get_blocks(), "value", 3 + ds._plan.execute().get_blocks(), "id", 3 ) ) == 2 ) - assert ds.sort("value").count() == 0 + assert ds.sort("id").count() == 0 finally: ctx.use_polars = original_use_polars @@ -200,19 +202,19 @@ def test_sort_pandas_with_empty_blocks(ray_start_regular, use_push_based_shuffle ds = ray.data.from_items([{"A": (x % 3), "B": x} for x in range(3)], parallelism=3) ds = ds.filter(lambda r: r["A"] == 0) - assert [row.as_pydict() for row in ds.sort("A").iter_rows()] == [{"A": 0, "B": 0}] + assert list(ds.sort("A").iter_rows()) == [{"A": 0, "B": 0}] # Test empty dataset. - ds = ray.data.range_table(10).filter(lambda r: r["value"] > 10) + ds = ray.data.range(10).filter(lambda r: r["id"] > 10) assert ( len( ray.data._internal.sort.sample_boundaries( - ds._plan.execute().get_blocks(), "value", 3 + ds._plan.execute().get_blocks(), "id", 3 ) ) == 2 ) - assert ds.sort("value").count() == 0 + assert ds.sort("id").count() == 0 def test_push_based_shuffle_schedule(): @@ -341,9 +343,9 @@ def test_sort_multinode(ray_start_cluster, use_push_based_shuffle): ray.init(cluster.address) parallelism = 100 - ds = ray.data.range(1000, parallelism=parallelism).random_shuffle().sort() + ds = ray.data.range(1000, parallelism=parallelism).random_shuffle().sort("id") for i, row in enumerate(ds.iter_rows()): - assert row == i + assert row["id"] == i def patch_ray_remote(condition, callback): @@ -452,12 +454,12 @@ def check_pipelined(refs): assert task_context["num_instances_below_parallelism"] <= 1 task_context["num_instances_below_parallelism"] = 0 - ds = ds.sort() + ds = ds.sort("id") # Only the last round should have fewer tasks in flight. assert task_context["num_instances_below_parallelism"] <= 1 task_context["num_instances_below_parallelism"] = 0 for i, row in enumerate(ds.iter_rows()): - assert row == i + assert row["id"] == i finally: ctx.use_push_based_shuffle = original diff --git a/python/ray/data/tests/test_split.py b/python/ray/data/tests/test_split.py index b7a20365c67c..0db9a8fba635 100644 --- a/python/ray/data/tests/test_split.py +++ b/python/ray/data/tests/test_split.py @@ -4,6 +4,7 @@ import time from unittest.mock import patch +import pandas as pd import numpy as np import pytest from ray.data.block import BlockMetadata @@ -26,6 +27,7 @@ from ray.data.block import BlockAccessor from ray.data.datastream import Dataset from ray.data.tests.conftest import * # noqa +from ray.data.tests.util import extract_values from ray.tests.conftest import * # noqa @@ -97,7 +99,7 @@ def _test_equal_split_balanced(block_sizes, num_splits): metadata = [] total_rows = 0 for block_size in block_sizes: - block = list(range(total_rows, total_rows + block_size)) + block = pd.DataFrame({"id": list(range(total_rows, total_rows + block_size))}) blocks.append(ray.put(block)) metadata.append(BlockAccessor.for_block(block).get_metadata(None, None)) total_rows += block_size @@ -119,7 +121,7 @@ def _test_equal_split_balanced(block_sizes, num_splits): assert total_rows - expected_total_rows == total_rows % num_splits # Check that all rows are unique (content check). split_rows = [row for split in splits for row in split.take(total_rows)] - assert len(set(split_rows)) == len(split_rows) + assert len(set(extract_values("id", split_rows))) == len(split_rows) def test_equal_split_balanced_grid(ray_start_regular_shared): @@ -160,7 +162,7 @@ def test_split_small(ray_start_regular_shared, pipelined): @ray.remote(num_cpus=0) def take(s): - return s.take() + return extract_values("item", s.take()) for m in [1, 3]: for n in [1, 3]: @@ -216,23 +218,23 @@ def test_split_at_indices_simple(ray_start_regular_shared): ds.split_at_indices([3, 1]) splits = ds.split_at_indices([5]) - r = [s.take() for s in splits] + r = [extract_values("id", s.take()) for s in splits] assert r == [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] splits = ds.split_at_indices([2, 5]) - r = [s.take() for s in splits] + r = [extract_values("id", s.take()) for s in splits] assert r == [[0, 1], [2, 3, 4], [5, 6, 7, 8, 9]] splits = ds.split_at_indices([2, 5, 5, 100]) - r = [s.take() for s in splits] + r = [extract_values("id", s.take()) for s in splits] assert r == [[0, 1], [2, 3, 4], [], [5, 6, 7, 8, 9], []] splits = ds.split_at_indices([100]) - r = [s.take() for s in splits] + r = [extract_values("id", s.take()) for s in splits] assert r == [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], []] splits = ds.split_at_indices([0]) - r = [s.take() for s in splits] + r = [extract_values("id", s.take()) for s in splits] assert r == [[], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]] @@ -266,7 +268,7 @@ def test_split_at_indices_coverage(ray_start_regular_shared, num_blocks, indices # indices configurations. ds = ray.data.range(20, parallelism=num_blocks) splits = ds.split_at_indices(indices) - r = [s.take_all() for s in splits] + r = [extract_values("id", s.take_all()) for s in splits] # Use np.array_split() semantics as our correctness ground-truth. assert r == [arr.tolist() for arr in np.array_split(list(range(20)), indices)] @@ -304,7 +306,7 @@ def test_split_at_indices_coverage_complete( # indices configurations. ds = ray.data.range(5, parallelism=num_blocks) splits = ds.split_at_indices(indices) - r = [s.take_all() for s in splits] + r = [extract_values("id", s.take_all()) for s in splits] # Use np.array_split() semantics as our correctness ground-truth. assert r == [arr.tolist() for arr in np.array_split(list(range(5)), indices)] @@ -328,19 +330,19 @@ def test_split_proportionately(ray_start_regular_shared): ds.split_proportionately([0.5, 0.5]) splits = ds.split_proportionately([0.5]) - r = [s.take() for s in splits] + r = [extract_values("id", s.take()) for s in splits] assert r == [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] splits = ds.split_proportionately([0.2, 0.3]) - r = [s.take() for s in splits] + r = [extract_values("id", s.take()) for s in splits] assert r == [[0, 1], [2, 3, 4], [5, 6, 7, 8, 9]] splits = ds.split_proportionately([0.2, 0.3, 0.3]) - r = [s.take() for s in splits] + r = [extract_values("id", s.take()) for s in splits] assert r == [[0, 1], [2, 3, 4], [5, 6, 7], [8, 9]] splits = ds.split_proportionately([0.98, 0.01]) - r = [s.take() for s in splits] + r = [extract_values("id", s.take()) for s in splits] assert r == [[0, 1, 2, 3, 4, 5, 6, 7], [8], [9]] with pytest.raises(ValueError): @@ -357,31 +359,31 @@ def test_split(ray_start_regular_shared): assert [2] * 5 == [ dataset._plan.execute().initial_num_blocks() for dataset in datasets ] - assert 190 == sum([dataset.sum() for dataset in datasets]) + assert 190 == sum([dataset.sum("id") for dataset in datasets]) datasets = ds.split(3) assert [4, 3, 3] == [ dataset._plan.execute().initial_num_blocks() for dataset in datasets ] - assert 190 == sum([dataset.sum() for dataset in datasets]) + assert 190 == sum([dataset.sum("id") for dataset in datasets]) datasets = ds.split(1) assert [10] == [ dataset._plan.execute().initial_num_blocks() for dataset in datasets ] - assert 190 == sum([dataset.sum() for dataset in datasets]) + assert 190 == sum([dataset.sum("id") for dataset in datasets]) datasets = ds.split(10) assert [1] * 10 == [ dataset._plan.execute().initial_num_blocks() for dataset in datasets ] - assert 190 == sum([dataset.sum() for dataset in datasets]) + assert 190 == sum([dataset.sum("id") for dataset in datasets]) datasets = ds.split(11) assert [1] * 10 + [0] == [ dataset._plan.execute().initial_num_blocks() for dataset in datasets ] - assert 190 == sum([dataset.sum() or 0 for dataset in datasets]) + assert 190 == sum([dataset.sum("id") or 0 for dataset in datasets]) def test_split_hints(ray_start_regular_shared): @@ -510,6 +512,7 @@ def _create_meta(num_rows): def _create_block(data): + data = pd.DataFrame({"id": data}) return (ray.put(data), _create_meta(len(data))) @@ -528,7 +531,7 @@ def _create_blocks_with_metadata(blocks): def test_split_single_block(ray_start_regular_shared): - block = [1, 2, 3] + block = pd.DataFrame({"id": [1, 2, 3]}) metadata = _create_meta(3) results = ray.get( @@ -540,7 +543,7 @@ def test_split_single_block(ray_start_regular_shared): blocks = results[1:] assert 234 == block_id assert len(blocks) == 1 - assert blocks[0] == [1, 2, 3] + assert list(blocks[0]["id"]) == [1, 2, 3] assert meta[0].num_rows == 3 results = ray.get( @@ -552,9 +555,9 @@ def test_split_single_block(ray_start_regular_shared): blocks = results[1:] assert 234 == block_id assert len(blocks) == 2 - assert blocks[0] == [1] + assert list(blocks[0]["id"]) == [1] assert meta[0].num_rows == 1 - assert blocks[1] == [2, 3] + assert list(blocks[1]["id"]) == [2, 3] assert meta[1].num_rows == 2 results = ray.get( @@ -566,13 +569,13 @@ def test_split_single_block(ray_start_regular_shared): blocks = results[1:] assert 234 == block_id assert len(blocks) == 5 - assert blocks[0] == [] - assert blocks[1] == [1] - assert blocks[2] == [] - assert blocks[3] == [2, 3] - assert blocks[4] == [] + assert list(blocks[0]["id"]) == [] + assert list(blocks[1]["id"]) == [1] + assert list(blocks[2]["id"]) == [] + assert list(blocks[3]["id"]) == [2, 3] + assert list(blocks[4]["id"]) == [] - block = [] + block = pd.DataFrame({"id": []}) metadata = _create_meta(0) results = ray.get( @@ -584,8 +587,8 @@ def test_split_single_block(ray_start_regular_shared): blocks = results[1:] assert 234 == block_id assert len(blocks) == 2 - assert blocks[0] == [] - assert blocks[1] == [] + assert list(blocks[0]["id"]) == [] + assert list(blocks[1]["id"]) == [] def test_drop_empty_block_split(): @@ -600,7 +603,7 @@ def verify_splits(splits, blocks_by_split): assert len(blocks) == len(block_refs) assert len(blocks) == len(meta) for block, block_ref, meta in zip(blocks, block_refs, meta): - assert ray.get(block_ref) == block + assert list(ray.get(block_ref)["id"]) == block assert meta.num_rows == len(block) @@ -666,7 +669,7 @@ def equalize_helper(input_block_lists): for block_ref, _ in blocklist.get_blocks_with_metadata(): block = ray.get(block_ref) block_accessor = BlockAccessor.for_block(block) - block_list.append(block_accessor.to_default()) + block_list.append(list(block_accessor.to_default()["id"])) result_block_lists.append(block_list) return result_block_lists @@ -749,18 +752,18 @@ def test_train_test_split(ray_start_regular_shared): # float train, test = ds.train_test_split(test_size=0.25) - assert train.take() == [0, 1, 2, 3, 4, 5] - assert test.take() == [6, 7] + assert extract_values("id", train.take()) == [0, 1, 2, 3, 4, 5] + assert extract_values("id", test.take()) == [6, 7] # int train, test = ds.train_test_split(test_size=2) - assert train.take() == [0, 1, 2, 3, 4, 5] - assert test.take() == [6, 7] + assert extract_values("id", train.take()) == [0, 1, 2, 3, 4, 5] + assert extract_values("id", test.take()) == [6, 7] # shuffle train, test = ds.train_test_split(test_size=0.25, shuffle=True, seed=1) - assert train.take() == [4, 5, 3, 2, 7, 6] - assert test.take() == [0, 1] + assert extract_values("id", train.take()) == [4, 5, 3, 2, 7, 6] + assert extract_values("id", test.take()) == [0, 1] # error handling with pytest.raises(TypeError): diff --git a/python/ray/data/tests/test_stats.py b/python/ray/data/tests/test_stats.py index 58a72d57ddea..5d256cae7c3f 100644 --- a/python/ray/data/tests/test_stats.py +++ b/python/ray/data/tests/test_stats.py @@ -9,6 +9,7 @@ from ray.data._internal.datastream_logger import DatastreamLogger from ray.data.block import BlockMetadata from ray.data.context import DataContext +from ray.data.tests.util import column_udf from ray.tests.conftest import * # noqa from unittest.mock import patch @@ -447,7 +448,7 @@ def test_dataset_stats_zip(ray_start_regular_shared): def test_dataset_stats_sort(ray_start_regular_shared): ds = ray.data.range(1000, parallelism=10) - ds = ds.sort() + ds = ds.sort("id") stats = ds.materialize().stats() assert "SortMap" in stats, stats assert "SortReduce" in stats, stats @@ -496,9 +497,9 @@ def test_dataset_stats_read_parquet(ray_start_regular_shared, tmp_path): def test_dataset_split_stats(ray_start_regular_shared, tmp_path): context = DataContext.get_current() - ds = ray.data.range(100, parallelism=10).map(lambda x: x + 1) + ds = ray.data.range(100, parallelism=10).map(column_udf("id", lambda x: x + 1)) dses = ds.split_at_indices([49]) - dses = [ds.map(lambda x: x + 1) for ds in dses] + dses = [ds.map(column_udf("id", lambda x: x + 1)) for ds in dses] for ds_ in dses: stats = canonicalize(ds_.materialize().stats()) @@ -1075,7 +1076,7 @@ def test_streaming_stats_full(ray_start_regular_shared, restore_data_context): DataContext.get_current().new_execution_backend = True DataContext.get_current().use_streaming_executor = True - ds = ray.data.range(5, parallelism=5).map(lambda x: x + 1) + ds = ray.data.range(5, parallelism=5).map(column_udf("id", lambda x: x + 1)) ds.take_all() stats = canonicalize(ds.stats()) assert ( diff --git a/python/ray/data/tests/test_streaming_backpressure_edge_case.py b/python/ray/data/tests/test_streaming_backpressure_edge_case.py index cdedeee48fd2..2c4134c27165 100644 --- a/python/ray/data/tests/test_streaming_backpressure_edge_case.py +++ b/python/ray/data/tests/test_streaming_backpressure_edge_case.py @@ -79,12 +79,12 @@ def test_streaming_backpressure_e2e(restore_data_context): class TestSlow: def __call__(self, df: np.ndarray): time.sleep(2) - return np.random.randn(1, 20, 1024, 1024) + return {"id": np.random.randn(1, 20, 1024, 1024)} class TestFast: def __call__(self, df: np.ndarray): time.sleep(0.5) - return np.random.randn(1, 20, 1024, 1024) + return {"id": np.random.randn(1, 20, 1024, 1024)} ctx = ray.init(object_store_memory=4e9) ds = ray.data.range_tensor(20, shape=(3, 1024, 1024), parallelism=20) diff --git a/python/ray/data/tests/test_streaming_integration.py b/python/ray/data/tests/test_streaming_integration.py index e7b37487d822..88f20e9870f7 100644 --- a/python/ray/data/tests/test_streaming_integration.py +++ b/python/ray/data/tests/test_streaming_integration.py @@ -1,4 +1,5 @@ import itertools +import pandas as pd import random import pytest import threading @@ -24,12 +25,13 @@ from ray.data._internal.execution.util import make_ref_bundles from ray._private.test_utils import wait_for_condition from ray.data.tests.conftest import * # noqa +from ray.data.tests.util import extract_values def make_transform(block_fn): def map_fn(block_iter, ctx): for block in block_iter: - yield block_fn(block) + yield pd.DataFrame({"id": block_fn(block["id"])}) return map_fn @@ -38,7 +40,7 @@ def ref_bundles_to_list(bundles: List[RefBundle]) -> List[List[Any]]: output = [] for bundle in bundles: for block, _ in bundle.blocks: - output.append(ray.get(block)) + output.append(list(ray.get(block)["id"])) return output @@ -299,7 +301,7 @@ def func(x): # The pipeline should fully execute even when the output iterator is blocked. wait_for_condition(lambda: ray.get(counter.get.remote()) == 100) # Check we can take the rest. - assert list(it) == [[x] for x in range(1, 100)] + assert [b["id"] for b in it] == [[x] for x in range(1, 100)] def test_backpressure_from_output(ray_start_10_cpus_shared, restore_data_context): @@ -358,7 +360,7 @@ def test_e2e_liveness_with_output_backpressure_edge_case( ds = ray.data.range(10000, parallelism=100).map(lambda x: x, num_cpus=2) # This will hang forever if the liveness logic is wrong, since the output # backpressure will prevent any operators from running at all. - assert ds.take_all() == list(range(10000)) + assert extract_values("id", ds.take_all()) == list(range(10000)) def test_e2e_autoscaling_up(ray_start_10_cpus_shared, restore_data_context): @@ -482,12 +484,14 @@ def f(x): # Test recover. base = ray.data.range(1000, parallelism=100) ds1 = base.map_batches( - f, compute=ray.data.ActorPoolStrategy(4, 4), max_task_retries=999 + f, compute=ray.data.ActorPoolStrategy(size=4), max_task_retries=999 ) ds1.take_all() # Test disabling fault tolerance. - ds2 = base.map_batches(f, compute=ray.data.ActorPoolStrategy(4, 4), max_restarts=0) + ds2 = base.map_batches( + f, compute=ray.data.ActorPoolStrategy(size=4), max_restarts=0 + ) with pytest.raises(ray.exceptions.RayActorError): ds2.take_all() diff --git a/python/ray/data/tests/test_tensor.py b/python/ray/data/tests/test_tensor.py index adaad5c083fc..1d3fc7e5bd61 100644 --- a/python/ray/data/tests/test_tensor.py +++ b/python/ray/data/tests/test_tensor.py @@ -17,6 +17,7 @@ ) from ray.data.tests.conftest import * # noqa from ray.tests.conftest import * # noqa +from ray.data.tests.util import extract_values # https://github.com/ray-project/ray/issues/33695 @@ -39,28 +40,36 @@ def test_tensors_basic(ray_start_regular_shared): "Datastream(\n" " num_blocks=6,\n" " num_rows=6,\n" - " schema={__value__: numpy.ndarray(shape=(3, 5), dtype=int64)}\n" + " schema={data: numpy.ndarray(shape=(3, 5), dtype=int64)}\n" ")" ) assert ds.size_bytes() == 5 * 3 * 6 * 8 # Test row iterator yields tensors. for tensor in ds.iter_rows(): + tensor = tensor["data"] assert isinstance(tensor, np.ndarray) assert tensor.shape == tensor_shape # Test batch iterator yields tensors. for tensor in ds.iter_batches(batch_size=2): + tensor = tensor["data"] assert isinstance(tensor, np.ndarray) assert tensor.shape == (2,) + tensor_shape # Native format. def np_mapper(arr): + if "data" in arr: + arr = arr["data"] + else: + arr = arr["id"] assert isinstance(arr, np.ndarray) - return arr + 1 + return {"data": arr + 1} res = ray.data.range_tensor(2, shape=(2, 2)).map(np_mapper).take() - np.testing.assert_equal(res, [np.ones((2, 2)), 2 * np.ones((2, 2))]) + np.testing.assert_equal( + extract_values("data", res), [np.ones((2, 2)), 2 * np.ones((2, 2))] + ) # Explicit NumPy format. res = ( @@ -68,7 +77,9 @@ def np_mapper(arr): .map_batches(np_mapper, batch_format="numpy") .take() ) - np.testing.assert_equal(res, [np.ones((2, 2)), 2 * np.ones((2, 2))]) + np.testing.assert_equal( + extract_values("data", res), [np.ones((2, 2)), 2 * np.ones((2, 2))] + ) # Pandas conversion. def pd_mapper(df): @@ -76,7 +87,7 @@ def pd_mapper(df): return df + 2 res = ray.data.range_tensor(2).map_batches(pd_mapper, batch_format="pandas").take() - np.testing.assert_equal(res, [np.array([2]), np.array([3])]) + np.testing.assert_equal(extract_values("data", res), [np.array([2]), np.array([3])]) # Arrow columns in NumPy format. def multi_mapper(col_arrs): @@ -99,7 +110,7 @@ def multi_mapper(col_arrs): .take() ) np.testing.assert_equal( - [r.as_pydict() for r in res], + res, [ {"a": 2, "b": 5.0, "c": np.array([2, 3])}, {"a": 3, "b": 6.0, "c": np.array([4, 5])}, @@ -121,7 +132,7 @@ def single_mapper(col_arrs): .take() ) np.testing.assert_equal( - [r.as_pydict() for r in res], + res, [ {"c": np.array([2, 3])}, {"c": np.array([4, 5])}, @@ -156,7 +167,7 @@ def multi_mapper(col_arrs): .take() ) np.testing.assert_equal( - [r.as_pydict() for r in res], + res, [ {"a": 2, "b": 5.0, "c": np.array([2, 3])}, {"a": 3, "b": 6.0, "c": np.array([4, 5])}, @@ -178,7 +189,7 @@ def single_mapper(col_arrs): .take() ) np.testing.assert_equal( - [r.as_pydict() for r in res], + res, [ {"c": np.array([2, 3])}, {"c": np.array([4, 5])}, @@ -189,31 +200,26 @@ def single_mapper(col_arrs): # Simple dataset in NumPy format. def mapper(arr): arr = np_mapper(arr) - return arr.tolist() + return arr res = ( ray.data.range(10, parallelism=2) .map_batches(mapper, batch_format="numpy") .take() ) - assert res == list(range(1, 11)) + assert extract_values("data", res) == list(range(1, 11)) def test_batch_tensors(ray_start_regular_shared): import torch ds = ray.data.from_items([torch.tensor([0, 0]) for _ in range(40)], parallelism=40) - res = ( - "MaterializedDatastream(\n" - " num_blocks=40,\n" - " num_rows=40,\n" - " schema=\n)" - ) + res = "MaterializedDatastream(num_blocks=40, num_rows=40, schema={item: object})" assert str(ds) == res, str(ds) with pytest.raises(pa.lib.ArrowInvalid): next(ds.iter_batches(batch_format="pyarrow")) df = next(ds.iter_batches(batch_format="pandas")) - assert df.to_dict().keys() == {"value"} + assert df.to_dict().keys() == {"item"} def test_tensors_shuffle(ray_start_regular_shared): @@ -221,8 +227,8 @@ def test_tensors_shuffle(ray_start_regular_shared): tensor_shape = (3, 5) ds = ray.data.range_tensor(6, shape=tensor_shape) shuffled_ds = ds.random_shuffle() - shuffled = shuffled_ds.take() - base = ds.take() + shuffled = extract_values("data", shuffled_ds.take()) + base = extract_values("data", ds.take()) np.testing.assert_raises( AssertionError, np.testing.assert_equal, @@ -239,8 +245,8 @@ def test_tensors_shuffle(ray_start_regular_shared): ds = ray.data.range_tensor(6, shape=tensor_shape) ds = ds.map_batches(lambda df: df, batch_format="pandas") shuffled_ds = ds.random_shuffle() - shuffled = shuffled_ds.take() - base = ds.take() + shuffled = extract_values("data", shuffled_ds.take()) + base = extract_values("data", ds.take()) np.testing.assert_raises( AssertionError, np.testing.assert_equal, @@ -291,39 +297,39 @@ def test_tensors_sort(ray_start_regular_shared): def test_tensors_inferred_from_map(ray_start_regular_shared): # Test map. - ds = ray.data.range(10, parallelism=10).map(lambda _: np.ones((4, 4))) + ds = ray.data.range(10, parallelism=10).map(lambda _: {"data": np.ones((4, 4))}) ds = ds.materialize() assert str(ds) == ( "MaterializedDatastream(\n" " num_blocks=10,\n" " num_rows=10,\n" - " schema={__value__: numpy.ndarray(shape=(4, 4), dtype=double)}\n" + " schema={data: numpy.ndarray(shape=(4, 4), dtype=double)}\n" ")" ) # Test map_batches. ds = ray.data.range(16, parallelism=4).map_batches( - lambda _: np.ones((3, 4, 4)), batch_size=2 + lambda _: {"data": np.ones((3, 4, 4))}, batch_size=2 ) ds = ds.materialize() assert str(ds) == ( "MaterializedDatastream(\n" " num_blocks=4,\n" " num_rows=24,\n" - " schema={__value__: numpy.ndarray(shape=(4, 4), dtype=double)}\n" + " schema={data: numpy.ndarray(shape=(4, 4), dtype=double)}\n" ")" ) # Test flat_map. ds = ray.data.range(10, parallelism=10).flat_map( - lambda _: [np.ones((4, 4)), np.ones((4, 4))] + lambda _: [{"data": np.ones((4, 4))}, {"data": np.ones((4, 4))}] ) ds = ds.materialize() assert str(ds) == ( "MaterializedDatastream(\n" " num_blocks=10,\n" " num_rows=20,\n" - " schema={__value__: numpy.ndarray(shape=(4, 4), dtype=double)}\n" + " schema={data: numpy.ndarray(shape=(4, 4), dtype=double)}\n" ")" ) @@ -547,7 +553,7 @@ def test_tensors_in_tables_pandas_roundtrip( arr = np.arange(num_items).reshape(shape) df = pd.DataFrame({"one": list(range(outer_dim)), "two": TensorArray(arr)}) ds = ray.data.from_pandas(df) - ds = ds.map_batches(lambda df: df + 1, batch_size=2) + ds = ds.map_batches(lambda df: df + 1, batch_size=2, batch_format="pandas") ds_df = ds.to_pandas() expected_df = df + 1 if enable_automatic_tensor_extension_cast: @@ -568,7 +574,7 @@ def test_tensors_in_tables_pandas_roundtrip_variable_shaped( outer_dim = len(arrs) df = pd.DataFrame({"one": list(range(outer_dim)), "two": TensorArray(arrs)}) ds = ray.data.from_pandas(df) - ds = ds.map_batches(lambda df: df + 1, batch_size=2) + ds = ds.map_batches(lambda df: df + 1, batch_size=2, batch_format="pandas") ds_df = ds.to_pandas() expected_df = df + 1 if enable_automatic_tensor_extension_cast: @@ -586,7 +592,7 @@ def test_tensors_in_tables_parquet_roundtrip(ray_start_regular_shared, tmp_path) arr = np.arange(num_items).reshape(shape) df = pd.DataFrame({"one": list(range(outer_dim)), "two": TensorArray(arr)}) ds = ray.data.from_pandas(df) - ds = ds.map_batches(lambda df: df + 1, batch_size=2) + ds = ds.map_batches(lambda df: df + 1, batch_size=2, batch_format="pandas") ds.write_parquet(str(tmp_path)) ds = ray.data.read_parquet(str(tmp_path)) values = [[s["one"], s["two"]] for s in ds.take()] @@ -607,7 +613,7 @@ def test_tensors_in_tables_parquet_roundtrip_variable_shaped( outer_dim = len(arrs) df = pd.DataFrame({"one": list(range(outer_dim)), "two": TensorArray(arrs)}) ds = ray.data.from_pandas(df) - ds = ds.map_batches(lambda df: df + 1, batch_size=2) + ds = ds.map_batches(lambda df: df + 1, batch_size=2, batch_format="pandas") ds.write_parquet(str(tmp_path)) ds = ray.data.read_parquet(str(tmp_path)) values = [[s["one"], s["two"]] for s in ds.take()] @@ -759,7 +765,9 @@ def np_deser_udf(block: pa.Table): ds = ray.data.read_parquet(str(tmp_path), _block_udf=np_deser_udf) - assert isinstance(ds.schema().field_by_name(tensor_col_name).type, ArrowTensorType) + assert isinstance( + ds.schema().base_schema.field_by_name(tensor_col_name).type, ArrowTensorType + ) values = [[s["one"], s["two"]] for s in ds.take()] expected = list(zip(list(range(outer_dim)), arr)) @@ -793,7 +801,9 @@ def _block_udf(block: pa.Table): _block_udf=_block_udf, ) - assert isinstance(ds.schema().field_by_name(tensor_col_name).type, ArrowTensorType) + assert isinstance( + ds.schema().base_schema.field_by_name(tensor_col_name).type, ArrowTensorType + ) values = [[s["one"], s["two"]] for s in ds.take()] expected = list(zip(list(range(outer_dim)), arr + 1)) @@ -859,7 +869,7 @@ def test_tensors_in_tables_iter_batches( df.loc[:, "one"] = list(df["one"].to_numpy()) df.loc[:, "two"] = list(df["two"].to_numpy()) ds = ray.data.from_pandas([df1, df2]) - batches = list(ds.iter_batches(batch_size=2)) + batches = list(ds.iter_batches(batch_size=2, batch_format="pandas")) assert len(batches) == 3 expected_batches = [df.iloc[:2], df.iloc[2:4], df.iloc[4:]] for batch, expected_batch in zip(batches, expected_batches): diff --git a/python/ray/data/tests/test_tf.py b/python/ray/data/tests/test_tf.py index 799c59547899..14b0bf781a45 100644 --- a/python/ray/data/tests/test_tf.py +++ b/python/ray/data/tests/test_tf.py @@ -6,7 +6,6 @@ import ray from ray.air import session from ray.air.config import ScalingConfig -from ray.air.constants import TENSOR_COLUMN_NAME from ray.data.preprocessors import Concatenator from ray.train.tensorflow import TensorflowTrainer @@ -186,19 +185,6 @@ def test_invalid_column_raises_error(self): with pytest.raises(ValueError): ds.to_tf(feature_columns="foo", label_columns="bar") - def test_simple_dataset_raises_error(self): - # `range` returns a simple dataset. - ds = ray.data.range(1) - with pytest.raises(NotImplementedError): - ds.to_tf(feature_columns="spam", label_columns="ham") - - def test_tensor_dataset_raises_error(self): - ds = ray.data.range_tensor(1) - with pytest.raises(NotImplementedError): - ds.to_tf( - feature_columns=TENSOR_COLUMN_NAME, label_columns=TENSOR_COLUMN_NAME - ) - if __name__ == "__main__": import sys diff --git a/python/ray/data/tests/test_torch.py b/python/ray/data/tests/test_torch.py index e36587437f65..e2ecad237e71 100644 --- a/python/ray/data/tests/test_torch.py +++ b/python/ray/data/tests/test_torch.py @@ -319,7 +319,7 @@ def test_iter_torch_batches_tensor_ds(ray_start_regular_shared, pipelined): for _ in range(num_epochs): iterations = [] for batch in ds.iter_torch_batches(batch_size=2): - iterations.append(batch.numpy()) + iterations.append(batch["data"].numpy()) combined_iterations = np.concatenate(iterations) np.testing.assert_array_equal(arr, combined_iterations) diff --git a/python/ray/data/tests/test_transform_pyarrow.py b/python/ray/data/tests/test_transform_pyarrow.py index 3ff1e29d3263..a8ff698312b6 100644 --- a/python/ray/data/tests/test_transform_pyarrow.py +++ b/python/ray/data/tests/test_transform_pyarrow.py @@ -378,19 +378,19 @@ def test_convert_to_pyarrow(ray_start_regular_shared, tmp_path): def test_pyarrow(ray_start_regular_shared): - ds = ray.data.range_table(5) - assert ds.map(lambda x: {"b": x["value"] + 2}).take() == [ + ds = ray.data.range(5) + assert ds.map(lambda x: {"b": x["id"] + 2}).take() == [ {"b": 2}, {"b": 3}, {"b": 4}, {"b": 5}, {"b": 6}, ] - assert ds.map(lambda x: {"b": x["value"] + 2}).filter( + assert ds.map(lambda x: {"b": x["id"] + 2}).filter( lambda x: x["b"] % 2 == 0 ).take() == [{"b": 2}, {"b": 4}, {"b": 6}] - assert ds.filter(lambda x: x["value"] == 0).flat_map( - lambda x: [{"b": x["value"] + 2}, {"b": x["value"] + 20}] + assert ds.filter(lambda x: x["id"] == 0).flat_map( + lambda x: [{"b": x["id"] + 2}, {"b": x["id"] + 20}] ).take() == [{"b": 2}, {"b": 20}] diff --git a/python/ray/data/tests/test_webdataset.py b/python/ray/data/tests/test_webdataset.py index 9771ee36d042..dd2f15fba72e 100644 --- a/python/ray/data/tests/test_webdataset.py +++ b/python/ray/data/tests/test_webdataset.py @@ -142,7 +142,8 @@ def test_webdataset_coding(ray_start_2_cpus, tmp_path): image = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8) gray = np.random.randint(0, 255, (100, 100), dtype=np.uint8) dstruct = dict(a=[1], b=dict(c=2), d="hello") - ttensor = torch.tensor([1, 2, 3]) + # Note: tensors are supported as numpy format only in strict mode. + ttensor = torch.tensor([1, 2, 3]).numpy() sample = { "__key__": "foo", @@ -180,7 +181,7 @@ def test_webdataset_coding(ray_start_2_cpus, tmp_path): assert sample["mp"]["b"]["c"] == 2 assert isinstance(sample["json"], dict) assert sample["json"]["a"] == [1] - assert isinstance(sample["pt"], torch.Tensor) + assert isinstance(sample["pt"], np.ndarray) assert sample["pt"].tolist() == [1, 2, 3] # test the format argument to the default decoder and multiple decoders diff --git a/python/ray/data/tests/util.py b/python/ray/data/tests/util.py index 16f98876d921..9c66784d19ef 100644 --- a/python/ray/data/tests/util.py +++ b/python/ray/data/tests/util.py @@ -4,6 +4,8 @@ import tempfile import ray +STRICT_MODE = ray.data.DatasetContext.get_current().strict_mode + @ray.remote class Counter: @@ -31,3 +33,27 @@ def gen_bin_files(n): to_write = str(i) * 500 fp.write(to_write.encode()) yield (temp_dir, paths) + + +def column_udf(col, udf): + def wraps(row): + return {col: udf(row[col])} + + return wraps + + +# Ex: named_values("id", [1, 2, 3]) +# Ex: named_values(["id", "id2"], [(1, 1), (2, 2), (3, 3)]) +def named_values(col_names, tuples): + output = [] + if isinstance(col_names, list): + for t in tuples: + output.append({name: value for (name, value) in zip(col_names, t)}) + else: + for t in tuples: + output.append({name: value for (name, value) in zip((col_names,), (t,))}) + return output + + +def extract_values(col_name, tuples): + return [t[col_name] for t in tuples] diff --git a/python/ray/train/examples/pytorch/torch_regression_example.py b/python/ray/train/examples/pytorch/torch_regression_example.py index 3354d3bd241b..94d628087269 100644 --- a/python/ray/train/examples/pytorch/torch_regression_example.py +++ b/python/ray/train/examples/pytorch/torch_regression_example.py @@ -29,7 +29,7 @@ def combine_x(batch): } ) - dataset = dataset.map_batches(combine_x) + dataset = dataset.map_batches(combine_x, batch_format="pandas") train_dataset, validation_dataset = dataset.repartition( num_blocks=4 ).train_test_split(split, shuffle=True) diff --git a/python/ray/train/huggingface/_huggingface_utils.py b/python/ray/train/huggingface/_huggingface_utils.py index 65f54e234161..838ed9f398bf 100644 --- a/python/ray/train/huggingface/_huggingface_utils.py +++ b/python/ray/train/huggingface/_huggingface_utils.py @@ -72,7 +72,7 @@ def __init__(self, dataset: DataIterator) -> None: def __iter__(self): for row in self.generate_examples_fn(**self.kwargs): - yield (0, {k: v for k, v in row.as_pydict().items()}) + yield (0, {k: v for k, v in row.items()}) def process_dataset_for_hf( diff --git a/python/ray/train/tests/lightning_test_utils.py b/python/ray/train/tests/lightning_test_utils.py index c58ae623336b..36288308cf59 100644 --- a/python/ray/train/tests/lightning_test_utils.py +++ b/python/ray/train/tests/lightning_test_utils.py @@ -14,6 +14,9 @@ def __init__(self, input_dim, output_dim, strategy="ddp") -> None: self.strategy = strategy def forward(self, input): + # Backwards compat for Ray data strict mode. + if isinstance(input, dict) and len(input) == 1: + input = list(input.values())[0] return self.linear(input) def training_step(self, batch): diff --git a/python/ray/train/tests/test_base_trainer.py b/python/ray/train/tests/test_base_trainer.py index 236050653954..59ce56e73f3b 100644 --- a/python/ray/train/tests/test_base_trainer.py +++ b/python/ray/train/tests/test_base_trainer.py @@ -60,7 +60,7 @@ def fit(self, ds): self.fit_counter += 1 def transform(self, ds): - return ds.map(lambda x: x + 1) + return ds.map(lambda x: {"item": x["item"] + 1}) class DummyTrainer(BaseTrainer): @@ -102,7 +102,7 @@ def test_preprocess_datasets(ray_start_4_cpus): ctx.execution_options.preserve_order = True def training_loop(self): - assert self.datasets["my_dataset"].take() == [2, 3, 4] + assert self.datasets["my_dataset"].take_batch()["item"].tolist() == [2, 3, 4] datasets = {"my_dataset": ray.data.from_items([1, 2, 3])} trainer = DummyTrainer( @@ -144,8 +144,8 @@ def training_loop(self): # Fit was only called once. assert self.preprocessor.fit_counter == 1 # Datasets should all be transformed. - assert self.datasets["train"].take() == [2, 3, 4] - assert self.datasets["my_dataset"].take() == [2, 3, 4] + assert self.datasets["train"].take_batch()["item"].tolist() == [2, 3, 4] + assert self.datasets["my_dataset"].take_batch()["item"].tolist() == [2, 3, 4] if gen_dataset: datasets = { @@ -168,8 +168,8 @@ def training_loop(self): # Make sure fit is not called if preprocessor is already fit. assert self.preprocessor.fit_counter == 1 # Datasets should all be transformed. - assert self.datasets["train"].take() == [2, 3, 4] - assert self.datasets["my_dataset"].take() == [2, 3, 4] + assert self.datasets["train"].take_batch()["item"].tolist() == [2, 3, 4] + assert self.datasets["my_dataset"].take_batch()["item"].tolist() == [2, 3, 4] datasets = { "train": ray.data.from_items([1, 2, 3]), diff --git a/python/ray/train/tests/test_batch_predictor.py b/python/ray/train/tests/test_batch_predictor.py index 7b8175040108..a404ea5f0f5e 100644 --- a/python/ray/train/tests/test_batch_predictor.py +++ b/python/ray/train/tests/test_batch_predictor.py @@ -110,7 +110,7 @@ def test_separate_gpu_stage(shutdown_only): DummyPredictor, ) ds = batch_predictor.predict( - ray.data.range_table(10), + ray.data.range(10), num_gpus_per_worker=1, separate_gpu_stage=True, allow_gpu=True, @@ -118,10 +118,10 @@ def test_separate_gpu_stage(shutdown_only): stats = ds.stats() assert "Stage 1 ReadRange->DummyPreprocessor:" in stats, stats assert "Stage 2 MapBatches(ScoringWrapper):" in stats, stats - assert ds.max("value") == 36.0, ds + assert ds.max("id") == 36.0, ds ds = batch_predictor.predict( - ray.data.range_table(10), + ray.data.range(10), num_gpus_per_worker=1, separate_gpu_stage=False, allow_gpu=True, @@ -129,7 +129,7 @@ def test_separate_gpu_stage(shutdown_only): stats = ds.stats() assert "Stage 1 ReadRange:" in stats, stats assert "Stage 2 MapBatches(ScoringWrapper):" in stats, stats - assert ds.max("value") == 36.0, ds + assert ds.max("id") == 36.0, ds def test_automatic_enable_gpu_from_num_gpus_per_worker(shutdown_only): @@ -143,7 +143,7 @@ def test_automatic_enable_gpu_from_num_gpus_per_worker(shutdown_only): Checkpoint.from_dict({"factor": 2.0, PREPROCESSOR_KEY: DummyPreprocessor()}), DummyPredictor, ) - test_dataset = ray.data.range_table(4) + test_dataset = ray.data.range(4) with pytest.raises( ValueError, match="DummyPredictor does not support GPU prediction" @@ -157,7 +157,7 @@ def test_batch_prediction(): DummyPredictor, ) - test_dataset = ray.data.range_table(4) + test_dataset = ray.data.range(4) ds = batch_predictor.predict(test_dataset).materialize() # Check fusion occurred. assert "ReadRange->DummyPreprocessor" in ds.stats(), ds.stats() @@ -168,7 +168,7 @@ def test_batch_prediction(): 12.0, ] - test_dataset = ray.data.range_table(4) + test_dataset = ray.data.range(4) assert next( batch_predictor.predict_pipelined( test_dataset, blocks_per_window=2 @@ -406,12 +406,12 @@ def test_batch_predictor_transform_config(): def check_batch(batch): assert isinstance(batch, dict) - assert isinstance(batch["value"], np.ndarray) - assert len(batch["value"]) == batch_size + assert isinstance(batch["id"], np.ndarray) + assert len(batch["id"]) == batch_size return batch prep = BatchMapper(check_batch, batch_format="numpy", batch_size=2) - ds = ray.data.range_table(6, parallelism=1) + ds = ray.data.range(6, parallelism=1) batch_predictor = BatchPredictor.from_checkpoint( Checkpoint.from_dict({"factor": 2.0, PREPROCESSOR_KEY: prep}), @@ -421,7 +421,7 @@ def check_batch(batch): batch_predictor.predict(ds) # Pipelined case. - ds = ray.data.range_table(6, parallelism=1) + ds = ray.data.range(6, parallelism=1) batch_predictor.predict_pipelined(ds, blocks_per_window=1) @@ -497,7 +497,7 @@ def test_get_and_set_preprocessor(): ) assert batch_predictor.get_preprocessor() == preprocessor - test_dataset = ray.data.range_table(4) + test_dataset = ray.data.range(4) output_ds = batch_predictor.predict(test_dataset) assert output_ds.to_pandas().to_numpy().squeeze().tolist() == [ 0.0, @@ -561,26 +561,26 @@ def test_separate_gpu_stage_pipelined(shutdown_only): DummyPredictor, ) ds = batch_predictor.predict_pipelined( - ray.data.range_table(5), + ray.data.range(5), blocks_per_window=1, num_gpus_per_worker=1, separate_gpu_stage=True, allow_gpu=True, ) - out = [x["value"] for x in ds.iter_rows()] + out = [x["id"] for x in ds.iter_rows()] stats = ds.stats() assert "Stage 1 ReadRange->DummyPreprocessor:" in stats, stats assert "Stage 2 MapBatches(ScoringWrapper):" in stats, stats assert max(out) == 16.0, out ds = batch_predictor.predict_pipelined( - ray.data.range_table(5), + ray.data.range(5), blocks_per_window=1, num_gpus_per_worker=1, separate_gpu_stage=False, allow_gpu=True, ) - out = [x["value"] for x in ds.iter_rows()] + out = [x["id"] for x in ds.iter_rows()] stats = ds.stats() assert "Stage 1 ReadRange:" in stats, stats assert "Stage 2 MapBatches(ScoringWrapper):" in stats, stats diff --git a/python/ray/train/tests/test_gpu.py b/python/ray/train/tests/test_gpu.py index 1b0577a227af..6d46b0e976ab 100644 --- a/python/ray/train/tests/test_gpu.py +++ b/python/ray/train/tests/test_gpu.py @@ -371,16 +371,16 @@ def test_torch_iter_torch_batches_auto_device(ray_start_4_cpus_2_gpus, use_gpu): def train_fn(): dataset = session.get_dataset_shard("train") for batch in dataset.iter_torch_batches(dtypes=torch.float, device="cpu"): - assert str(batch.device) == "cpu" + assert str(batch["data"].device) == "cpu" # Autodetect for batch in dataset.iter_torch_batches(dtypes=torch.float): - assert str(batch.device) == str(train.torch.get_device()) + assert str(batch["data"].device) == str(train.torch.get_device()) dataset = ray.data.from_numpy(np.array([[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]).T) # Test that this works outside a Train function for batch in dataset.iter_torch_batches(dtypes=torch.float, device="cpu"): - assert str(batch.device) == "cpu" + assert str(batch["data"].device) == "cpu" trainer = TorchTrainer( train_fn, diff --git a/python/ray/train/tests/test_lightgbm_predictor.py b/python/ray/train/tests/test_lightgbm_predictor.py index 5a6d18f7fdce..61bf416ec4fa 100644 --- a/python/ray/train/tests/test_lightgbm_predictor.py +++ b/python/ray/train/tests/test_lightgbm_predictor.py @@ -79,7 +79,10 @@ def test_predict_batch(ray_start_4_cpus, batch_type): data_batch = _convert_pandas_to_batch_type(raw_batch, type=TYPE_TO_ENUM[batch_type]) if batch_type == np.ndarray: + # TODO(ekl) how do we fix this to work with "data" column? dataset = ray.data.from_numpy(dummy_data) + dataset = dataset.add_column("__value__", lambda b: b["data"]) + dataset = dataset.drop_columns(["data"]) elif batch_type == pd.DataFrame: dataset = ray.data.from_pandas(data_batch) elif batch_type == pa.Table: diff --git a/python/ray/train/tests/test_sklearn_predictor.py b/python/ray/train/tests/test_sklearn_predictor.py index 063b2ea79b89..c39949ab410c 100644 --- a/python/ray/train/tests/test_sklearn_predictor.py +++ b/python/ray/train/tests/test_sklearn_predictor.py @@ -87,7 +87,10 @@ def test_predict_batch(ray_start_4_cpus, batch_type): data_batch = _convert_pandas_to_batch_type(raw_batch, type=TYPE_TO_ENUM[batch_type]) if batch_type == np.ndarray: + # TODO(ekl) how do we fix this to work with "data" column? dataset = ray.data.from_numpy(dummy_data) + dataset = dataset.add_column("__value__", lambda b: b["data"]) + dataset = dataset.drop_columns(["data"]) elif batch_type == pd.DataFrame: dataset = ray.data.from_pandas(data_batch) elif batch_type == pa.Table: diff --git a/python/ray/train/tests/test_torch_trainer.py b/python/ray/train/tests/test_torch_trainer.py index d7a7f8ef5d81..cecbbd2c8baf 100644 --- a/python/ray/train/tests/test_torch_trainer.py +++ b/python/ray/train/tests/test_torch_trainer.py @@ -158,7 +158,10 @@ def __call__(self, x): predict_dataset = ray.data.range(9) predictions = predict_dataset.map_batches( - TorchScorer, batch_size=3, batch_format="pandas", compute="actors" + TorchScorer, + batch_size=3, + batch_format="pandas", + compute=ray.data.ActorPoolStrategy(), ) assert predictions.count() == 3 diff --git a/python/ray/train/tests/test_xgboost_predictor.py b/python/ray/train/tests/test_xgboost_predictor.py index 2ddb72c5a568..82609e637b74 100644 --- a/python/ray/train/tests/test_xgboost_predictor.py +++ b/python/ray/train/tests/test_xgboost_predictor.py @@ -68,7 +68,10 @@ def test_predict_batch(ray_start_4_cpus, batch_type): data_batch = _convert_pandas_to_batch_type(raw_batch, type=TYPE_TO_ENUM[batch_type]) if batch_type == np.ndarray: + # TODO(ekl) how do we fix this to work with "data" column? dataset = ray.data.from_numpy(dummy_data) + dataset = dataset.add_column("__value__", lambda b: b["data"]) + dataset = dataset.drop_columns(["data"]) elif batch_type == pd.DataFrame: dataset = ray.data.from_pandas(data_batch) elif batch_type == pa.Table: diff --git a/python/ray/workflow/tests/test_dataset.py b/python/ray/workflow/tests/test_dataset.py index 862371993ba7..acaeaf20db6d 100644 --- a/python/ray/workflow/tests/test_dataset.py +++ b/python/ray/workflow/tests/test_dataset.py @@ -18,17 +18,17 @@ def gen_dataset_1(): @ray.remote def gen_dataset_2(): - return ray.data.range_table(1000) + return ray.data.range(1000) @ray.remote def transform_dataset(in_data): - return in_data.map(lambda x: x * 2) + return in_data.map(lambda x: {"id": x["id"] * 2}) @ray.remote def transform_dataset_1(in_data): - return in_data.map(lambda r: {"v2": r["value"] * 2}) + return in_data.map(lambda r: {"v2": r["id"] * 2}) @ray.remote diff --git a/release/air_tests/air_benchmarks/mlperf-train/resnet50_ray_air.py b/release/air_tests/air_benchmarks/mlperf-train/resnet50_ray_air.py index c04b207b425a..da2e0b319c56 100644 --- a/release/air_tests/air_benchmarks/mlperf-train/resnet50_ray_air.py +++ b/release/air_tests/air_benchmarks/mlperf-train/resnet50_ray_air.py @@ -107,7 +107,9 @@ def ray_dataset_to_tf_dataset( if online_processing: # Apply online preprocessing on the decoded images, cropping and # flipping. - dataset = dataset.map_batches(crop_and_flip_image_batch) + dataset = dataset.map_batches( + crop_and_flip_image_batch, batch_format="pandas" + ) def to_tensor_iterator(): num_steps = 0 diff --git a/release/nightly_tests/dataset/aggregate_benchmark.py b/release/nightly_tests/dataset/aggregate_benchmark.py index a2d2a929873e..a1112a4c8c0f 100644 --- a/release/nightly_tests/dataset/aggregate_benchmark.py +++ b/release/nightly_tests/dataset/aggregate_benchmark.py @@ -2,7 +2,7 @@ import ray from ray.data.aggregate import _AggregateOnKeyBase, Max, Mean, Min, Sum -from ray.data.block import Block, KeyFn +from ray.data.block import Block from ray.data.datastream import Dataset import pyarrow.compute as pac @@ -73,7 +73,10 @@ def h2oai_q6(ds: Dataset) -> Dataset: def h2oai_q7(ds: Dataset) -> Dataset: ds = ds.groupby("id3").aggregate(Max("v1"), Min("v2")) - ds = ds.map_batches(lambda df: df.assign(result=df["max(v1)"] - df["min(v2)"])) + ds = ds.map_batches( + lambda df: df.assign(result=df["max(v1)"] - df["min(v2)"]), + batch_format="pandas", + ) return ds @@ -99,7 +102,7 @@ def merge( return (value1, value2) class Top2(_AggregateOnKeyBase): - def __init__(self, on: KeyFn): + def __init__(self, on): self._set_key_fn(on) super().__init__( init=lambda _: (float("-inf"), float("-inf")), diff --git a/release/nightly_tests/dataset/data_ingest_benchmark.py b/release/nightly_tests/dataset/data_ingest_benchmark.py index db91cd7856a3..aa2bf213259b 100644 --- a/release/nightly_tests/dataset/data_ingest_benchmark.py +++ b/release/nightly_tests/dataset/data_ingest_benchmark.py @@ -111,7 +111,7 @@ def run_ingest_streaming(dataset_size_gb, num_workers): for i in range(num_workers) ] locality_hints = ray.get([actor.get_location.remote() for actor in consumers]) - ds = ds.map_batches(lambda df: df * 2) + ds = ds.map_batches(lambda df: df * 2, batch_format="pandas") splits = ds.streaming_split(num_workers, equal=True, locality_hints=locality_hints) future = [consumers[i].consume.remote(s) for i, s in enumerate(splits)] ray.get(future) @@ -123,7 +123,7 @@ def run_ingest_bulk(dataset_size_gb, num_workers): ConsumingActor.options(scheduling_strategy="SPREAD").remote(i) for i in range(num_workers) ] - ds = ds.map_batches(lambda df: df * 2) + ds = ds.map_batches(lambda df: df * 2, batch_format="pandas") splits = ds.split(num_workers, equal=True, locality_hints=consumers) future = [consumers[i].consume.remote(s) for i, s in enumerate(splits)] ray.get(future) @@ -149,7 +149,11 @@ def run_ingest_dataset_pipeline(dataset_size_gb, num_workers): ConsumingActor.options(scheduling_strategy="SPREAD").remote(i) for i in range(num_workers) ] - p = ds.window(bytes_per_window=40 * GiB).repeat().map_batches(lambda df: df * 2) + p = ( + ds.window(bytes_per_window=40 * GiB) + .repeat() + .map_batches(lambda df: df * 2, batch_format="pandas") + ) splits = p.split(num_workers, equal=True, locality_hints=consumers) future = [consumers[i].consume.remote(s) for i, s in enumerate(splits)] ray.get(future) diff --git a/release/nightly_tests/dataset/dataset_random_access.py b/release/nightly_tests/dataset/dataset_random_access.py index e67c6cc24864..dfe955520496 100644 --- a/release/nightly_tests/dataset/dataset_random_access.py +++ b/release/nightly_tests/dataset/dataset_random_access.py @@ -26,8 +26,8 @@ def main(): num_workers = 400 run_time = 15 - ds = ray.data.range_table(nrow, parallelism=parallelism) - rmap = ds.to_random_access_dataset("value", num_workers=num_workers) + ds = ray.data.range(nrow, parallelism=parallelism) + rmap = ds.to_random_access_dataset("id", num_workers=num_workers) print("Multiget throughput: ", end="") start = time.time() diff --git a/release/nightly_tests/dataset/inference.py b/release/nightly_tests/dataset/inference.py index ba1e4b1d9b11..fc4b49b68264 100644 --- a/release/nightly_tests/dataset/inference.py +++ b/release/nightly_tests/dataset/inference.py @@ -58,7 +58,7 @@ def get_paths(bucket, path, max_files=100 * 1000): def preprocess(batch): preprocessor = Preprocessor() - return preprocessor(batch) + return {"bytes": preprocessor(batch["bytes"])} infer_initialized = False @@ -72,7 +72,7 @@ def infer(batch): model_fn = ImageModel() ndarr_obj = batch.values input_tensor_np = np.array([img.numpy() for img in ndarr_obj.reshape(-1)]) - return list(model_fn(input_tensor_np)) + return {"out": list(model_fn(input_tensor_np))} ray.init() @@ -95,7 +95,11 @@ def infer(batch): print("Inferring...") # NOTE: set a small batch size to avoid OOM on GRAM when doing inference. ds = ds.map_batches( - infer, num_gpus=0.25, batch_size=128, batch_format="pandas", compute="actors" + infer, + num_gpus=0.25, + batch_size=128, + batch_format="pandas", + compute=ray.data.ActorPoolStrategy(), ).materialize() end_time = time.time() diff --git a/release/nightly_tests/dataset/iter_tensor_batches_benchmark.py b/release/nightly_tests/dataset/iter_tensor_batches_benchmark.py index f7bc69821b0a..3419a435c985 100644 --- a/release/nightly_tests/dataset/iter_tensor_batches_benchmark.py +++ b/release/nightly_tests/dataset/iter_tensor_batches_benchmark.py @@ -67,10 +67,10 @@ def run_iter_tensor_batches_benchmark(benchmark: Benchmark, data_size_gb: int): # Add a label column. def add_label(batch): label = np.ones(shape=(len(batch), 1)) - batch["__value__"] = label + batch["label"] = label return batch - ds = ds.map_batches(add_label).materialize() + ds = ds.map_batches(add_label, batch_format="pandas").materialize() # Test iter_torch_batches() with default args. benchmark.run( @@ -86,7 +86,7 @@ def add_label(batch): to_tf, ds=ds, feature_columns="image", - label_columns="__value__", + label_columns="label", use_default_params=True, ) @@ -105,7 +105,7 @@ def add_label(batch): to_tf, ds=ds, feature_columns="image", - label_columns="__value__", + label_columns="label", batch_size=batch_size, ) @@ -139,7 +139,7 @@ def add_label(batch): to_tf, ds=ds, feature_columns="image", - label_columns="__value__", + label_columns="label", batch_size=batch_size, local_shuffle_buffer_size=shuffle_buffer_size, ) diff --git a/release/nightly_tests/dataset/map_batches_benchmark.py b/release/nightly_tests/dataset/map_batches_benchmark.py index 5518ceb6b27c..a106e7b8686c 100644 --- a/release/nightly_tests/dataset/map_batches_benchmark.py +++ b/release/nightly_tests/dataset/map_batches_benchmark.py @@ -72,9 +72,9 @@ def run_map_batches_benchmark(benchmark: Benchmark): # Test multiple calls of map_batches. for num_calls in num_calls_list: - for compute in ["tasks", ActorPoolStrategy(size=1)]: + for compute in [None, ActorPoolStrategy(size=1)]: batch_size = 4096 - if compute == "tasks": + if compute is None: compute_strategy = "tasks" else: compute_strategy = "actors" diff --git a/release/nightly_tests/dataset/operator_fusion_benchmark.py b/release/nightly_tests/dataset/operator_fusion_benchmark.py index bf24fcd99d2b..1156dfa33509 100644 --- a/release/nightly_tests/dataset/operator_fusion_benchmark.py +++ b/release/nightly_tests/dataset/operator_fusion_benchmark.py @@ -123,7 +123,7 @@ def _summarize_results(results: List[Dict[str, float]]) -> Dict[str, float]: "--ops-spec", type=str, default=( - '[{"op": "map_batches", "batch_size": 1024, "batch_format": "default"}]' + '[{"op": "map_batches", "batch_size": 1024, "batch_format": "pandas"}]' ), ) parser.add_argument("--target-max-block-size", type=int, default=None) diff --git a/release/nightly_tests/dataset/read_tfrecords_benchmark.py b/release/nightly_tests/dataset/read_tfrecords_benchmark.py index 358a92f7ce4b..6cfd817b4ceb 100644 --- a/release/nightly_tests/dataset/read_tfrecords_benchmark.py +++ b/release/nightly_tests/dataset/read_tfrecords_benchmark.py @@ -64,7 +64,7 @@ def generate_features(batch): features = {k: v for (k, v) in features.items() if len(v) > 0} return pa.table(features) - ds = ray.data.range(num_rows).map_batches(generate_features) + ds = ray.data.range(num_rows).map_batches(generate_features, batch_format="pandas") tfrecords_dir = tempfile.mkdtemp() ds.write_tfrecords(tfrecords_dir) return tfrecords_dir diff --git a/release/nightly_tests/dataset/sort.py b/release/nightly_tests/dataset/sort.py index 9dccf85bc2a4..658725b85c57 100644 --- a/release/nightly_tests/dataset/sort.py +++ b/release/nightly_tests/dataset/sort.py @@ -10,14 +10,13 @@ import ray from ray._private.internal_api import memory_summary -from ray.data._internal.arrow_block import ArrowRow from ray.data._internal.util import _check_pyarrow_version from ray.data.block import Block, BlockMetadata from ray.data.context import DataContext from ray.data.datasource import Datasource, ReadTask -class RandomIntRowDatasource(Datasource[ArrowRow]): +class RandomIntRowDatasource(Datasource): """An example datasource that generates rows with random int64 columns. Examples: diff --git a/rllib/algorithms/algorithm.py b/rllib/algorithms/algorithm.py index 90595bb79c9c..7473054b1d82 100644 --- a/rllib/algorithms/algorithm.py +++ b/rllib/algorithms/algorithm.py @@ -672,7 +672,9 @@ def setup(self, config: AlgorithmConfig) -> None: parallelism = self.evaluation_config.evaluation_num_workers or 1 batch_size = max(ds.count() // parallelism, 1) self.evaluation_dataset = ds.map_batches( - remove_time_dim, batch_size=batch_size + remove_time_dim, + batch_size=batch_size, + batch_format="pandas", ) logger.info("Evaluation dataset created") diff --git a/rllib/offline/dataset_reader.py b/rllib/offline/dataset_reader.py index 14d4c7aeb062..4dabf2050c03 100644 --- a/rllib/offline/dataset_reader.py +++ b/rllib/offline/dataset_reader.py @@ -246,7 +246,7 @@ def next(self) -> SampleBatchType: ret = [] count = 0 while count < self.batch_size: - d = next(self._iter).as_pydict() + d = next(self._iter) # Columns like obs are compressed when written by DatasetWriter. d = from_json_data(d, self._ioctx.worker) count += d.count diff --git a/rllib/offline/estimators/direct_method.py b/rllib/offline/estimators/direct_method.py index ecf902c4c9b5..c735b93a5e1b 100644 --- a/rllib/offline/estimators/direct_method.py +++ b/rllib/offline/estimators/direct_method.py @@ -157,6 +157,7 @@ def estimate_on_dataset( updated_ds = dataset.map_batches( compute_q_and_v_values, batch_size=batch_size, + batch_format="pandas", fn_kwargs={ "model_class": self.model.__class__, "model_state": self.model.get_state(), diff --git a/rllib/offline/estimators/doubly_robust.py b/rllib/offline/estimators/doubly_robust.py index 53f6387df5d1..d98028023660 100644 --- a/rllib/offline/estimators/doubly_robust.py +++ b/rllib/offline/estimators/doubly_robust.py @@ -199,6 +199,7 @@ def estimate_on_dataset( updated_ds = dataset.map_batches( compute_is_weights, batch_size=batch_size, + batch_format="pandas", fn_kwargs={ "policy_state": self.policy.get_state(), "estimator_class": self.__class__, @@ -210,6 +211,7 @@ def estimate_on_dataset( updated_ds = updated_ds.map_batches( compute_q_and_v_values, batch_size=batch_size, + batch_format="pandas", fn_kwargs={ "model_class": self.model.__class__, "model_state": self.model.get_state(), @@ -229,6 +231,7 @@ def compute_v_target(batch: pd.DataFrame, normalizer: float = 1.0): updated_ds = updated_ds.map_batches( compute_v_target, batch_size=batch_size, + batch_format="pandas", fn_kwargs={"normalizer": normalizer}, ) diff --git a/rllib/offline/estimators/importance_sampling.py b/rllib/offline/estimators/importance_sampling.py index 500cf9e147e4..ee3b5909349c 100644 --- a/rllib/offline/estimators/importance_sampling.py +++ b/rllib/offline/estimators/importance_sampling.py @@ -99,6 +99,7 @@ def estimate_on_dataset( updated_ds = dataset.map_batches( compute_is_weights, batch_size=batch_size, + batch_format="pandas", fn_kwargs={ "policy_state": self.policy.get_state(), "estimator_class": self.__class__, diff --git a/rllib/offline/estimators/weighted_importance_sampling.py b/rllib/offline/estimators/weighted_importance_sampling.py index 2bd5d566e525..5571b085c2a7 100644 --- a/rllib/offline/estimators/weighted_importance_sampling.py +++ b/rllib/offline/estimators/weighted_importance_sampling.py @@ -155,6 +155,7 @@ def estimate_on_dataset( updated_ds = dataset.map_batches( compute_is_weights, batch_size=batch_size, + batch_format="pandas", fn_kwargs={ "policy_state": self.policy.get_state(), "estimator_class": self.__class__, diff --git a/rllib/offline/feature_importance.py b/rllib/offline/feature_importance.py index 76067449c776..61adef81f60f 100644 --- a/rllib/offline/feature_importance.py +++ b/rllib/offline/feature_importance.py @@ -92,11 +92,15 @@ def get_feature_importance_on_index( difference between the expected output and the output due to the perturbation. """ perturbed_ds = dataset.map_batches( - perturb_fn, batch_size=batch_size, fn_kwargs={"index": index} + perturb_fn, + batch_size=batch_size, + batch_format="pandas", + fn_kwargs={"index": index}, ) perturbed_actions = perturbed_ds.map_batches( _compute_actions, batch_size=batch_size, + batch_format="pandas", fn_kwargs={ "output_key": "perturbed_actions", "input_key": "perturbed_obs", @@ -110,7 +114,9 @@ def delta_fn(batch): batch["delta"] = np.abs(batch["ref_actions"] - batch["perturbed_actions"]) return batch - delta = perturbed_actions.map_batches(delta_fn, batch_size=batch_size) + delta = perturbed_actions.map_batches( + delta_fn, batch_size=batch_size, batch_format="pandas" + ) return delta From 4a239eace289ddbbe66ecc37f4b2cba970a2cd57 Mon Sep 17 00:00:00 2001 From: Jiajun Yao Date: Fri, 28 Apr 2023 15:23:06 -0700 Subject: [PATCH 154/424] Use NodeAffinitySchedulingStrategy instead of node ip for chaos test (#34868) Use NodeAffinitySchedulingStrategy instead of node ip for chaos test Signed-off-by: Jiajun Yao --- python/ray/_private/test_utils.py | 8 +++++--- release/nightly_tests/chaos_test/test_chaos_basic.py | 10 ++++++++-- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/python/ray/_private/test_utils.py b/python/ray/_private/test_utils.py index f933844823ce..122787c6050d 100644 --- a/python/ray/_private/test_utils.py +++ b/python/ray/_private/test_utils.py @@ -51,6 +51,7 @@ gcs_service_pb2_grpc, ) from ray.util.queue import Empty, Queue, _QueueActor +from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy logger = logging.getLogger(__name__) @@ -1457,11 +1458,12 @@ def _get_alive_nodes(self, nodes): alive_nodes += 1 return alive_nodes - head_node_ip = ray._private.worker.global_worker.node_ip_address - head_node_id = ray._private.worker.global_worker.current_node_id.hex() + head_node_id = ray.get_runtime_context().get_node_id() # Schedule the actor on the current node. node_killer = NodeKillerActor.options( - resources={f"node:{head_node_ip}": 0.001}, + scheduling_strategy=NodeAffinitySchedulingStrategy( + node_id=head_node_id, soft=False + ), namespace=namespace, name="node_killer", lifetime=lifetime, diff --git a/release/nightly_tests/chaos_test/test_chaos_basic.py b/release/nightly_tests/chaos_test/test_chaos_basic.py index 5c53e5959ba7..dd8213e2ec63 100644 --- a/release/nightly_tests/chaos_test/test_chaos_basic.py +++ b/release/nightly_tests/chaos_test/test_chaos_basic.py @@ -11,6 +11,7 @@ import ray from ray._private.test_utils import monitor_memory_usage, wait_for_condition from ray.data._internal.progress_bar import ProgressBar +from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy def run_task_workload(total_num_cpus, smoke): @@ -87,9 +88,13 @@ def add(self, letter): if smoke: multiplier = 1 TOTAL_TASKS = int(300 * multiplier) - current_node_ip = ray._private.worker.global_worker.node_ip_address + head_node_id = ray.get_runtime_context().get_node_id() db_actors = [ - DBActor.options(resources={f"node:{current_node_ip}": 0.001}).remote() + DBActor.options( + scheduling_strategy=NodeAffinitySchedulingStrategy( + node_id=head_node_id, soft=False + ) + ).remote() for _ in range(NUM_CPUS) ] @@ -186,6 +191,7 @@ def main(): print("Warm up... Prestarting workers if necessary.") start = time.time() workload(total_num_cpus, args.smoke) + print(f"Runtime when warm up: {time.time() - start}") # Step 2 print("Running without failures") From 98aff445b106042c9639ff1ad50e809ccabcf288 Mon Sep 17 00:00:00 2001 From: Archit Kulkarni Date: Fri, 28 Apr 2023 16:37:32 -0700 Subject: [PATCH 155/424] [Doc] Remove unused "help wanted" tag from doc (#34582) The doc links to a tag which has 0 issues: https://github.com/ray-project/ray/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22 This PR removes the link. Signed-off-by: Archit Kulkarni --- doc/source/ray-contribute/getting-involved.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/ray-contribute/getting-involved.rst b/doc/source/ray-contribute/getting-involved.rst index 5b882ca9f26d..54cb8e8bb2d0 100644 --- a/doc/source/ray-contribute/getting-involved.rst +++ b/doc/source/ray-contribute/getting-involved.rst @@ -34,7 +34,7 @@ What can I work on? ------------------- We use Github to track issues, feature requests, and bugs. Take a look at the -ones labeled `"good first issue" `__ and `"help wanted" `__ for a place to start. +ones labeled `"good first issue" `__ for a place to start. Setting up your development environment --------------------------------------- From 16bcf1c7fdf737202439a0fa3ac19dc27ba0943c Mon Sep 17 00:00:00 2001 From: Jiajun Yao Date: Fri, 28 Apr 2023 17:12:20 -0700 Subject: [PATCH 156/424] Replace deprecated usage of get_runtime_context().node_id (#34874) get_runtime_context().node_id is deprecated, use get_runtime_context().get_node_id() instead. Signed-off-by: Jiajun Yao --- python/ray/tests/test_object_manager.py | 2 +- python/ray/tests/test_runtime_env_working_dir_remote_uri.py | 2 +- python/ray/tests/test_scheduling_2.py | 6 +++--- python/ray/tests/test_worker_capping.py | 4 ++-- python/ray/workflow/tests/test_error_handling.py | 2 +- release/jobs_tests/workloads/jobs_check_cuda_available.py | 2 +- release/jobs_tests/workloads/jobs_remote_multi_node.py | 2 +- release/nightly_tests/stress_tests/test_state_api_scale.py | 2 +- 8 files changed, 11 insertions(+), 11 deletions(-) diff --git a/python/ray/tests/test_object_manager.py b/python/ray/tests/test_object_manager.py index 3350783effae..b8eecc32bbbc 100644 --- a/python/ray/tests/test_object_manager.py +++ b/python/ray/tests/test_object_manager.py @@ -588,7 +588,7 @@ def test_pull_bundle_deadlock(ray_start_cluster): @ray.remote(num_cpus=0) def get_node_id(): - return ray.get_runtime_context().node_id + return ray.get_runtime_context().get_node_id() worker_node_1_id = ray.get( get_node_id.options(resources={"worker_node_1": 0.1}).remote() diff --git a/python/ray/tests/test_runtime_env_working_dir_remote_uri.py b/python/ray/tests/test_runtime_env_working_dir_remote_uri.py index 6494dd96c33c..9f5a59cbd855 100644 --- a/python/ray/tests/test_runtime_env_working_dir_remote_uri.py +++ b/python/ray/tests/test_runtime_env_working_dir_remote_uri.py @@ -97,7 +97,7 @@ def check_and_get_node_id(self): import test_module test_module.one() - return ray.get_runtime_context().node_id + return ray.get_runtime_context().get_node_id() num_cpus = int(ray.available_resources()["CPU"]) actors = [A.remote() for _ in range(num_cpus)] diff --git a/python/ray/tests/test_scheduling_2.py b/python/ray/tests/test_scheduling_2.py index b62edf36c6f6..0dcdde5c2ecd 100644 --- a/python/ray/tests/test_scheduling_2.py +++ b/python/ray/tests/test_scheduling_2.py @@ -568,7 +568,7 @@ def test_demand_report_for_node_affinity_scheduling_strategy( @ray.remote(num_cpus=1) def f(sleep_s): time.sleep(sleep_s) - return ray.get_runtime_context().node_id + return ray.get_runtime_context().get_node_id() worker_node_id = ray.get(f.remote(0)) @@ -713,13 +713,13 @@ def test_data_locality_spilled_objects( def f(): return ( np.zeros(50 * 1024 * 1024, dtype=np.uint8), - ray.runtime_context.get_runtime_context().node_id, + ray.runtime_context.get_runtime_context().get_node_id(), ) @ray.remote def check_locality(x): _, node_id = x - assert node_id == ray.runtime_context.get_runtime_context().node_id + assert node_id == ray.runtime_context.get_runtime_context().get_node_id() # Check locality works when dependent task is already submitted by the time # the upstream task finishes. diff --git a/python/ray/tests/test_worker_capping.py b/python/ray/tests/test_worker_capping.py index 9fc61ed9f0c2..128ea8f14765 100644 --- a/python/ray/tests/test_worker_capping.py +++ b/python/ray/tests/test_worker_capping.py @@ -211,7 +211,7 @@ def get(self): @ray.remote def get_node_id(): - return ray.get_runtime_context().node_id + return ray.get_runtime_context().get_node_id() @ray.remote def func(i, counter): @@ -220,7 +220,7 @@ def func(i, counter): while True: time.sleep(1) else: - return ray.get_runtime_context().node_id + return ray.get_runtime_context().get_node_id() refs = [func.remote(i, counter) for i in range(2)] diff --git a/python/ray/workflow/tests/test_error_handling.py b/python/ray/workflow/tests/test_error_handling.py index 3c0a02f03106..3fbf9a8e2c46 100644 --- a/python/ray/workflow/tests/test_error_handling.py +++ b/python/ray/workflow/tests/test_error_handling.py @@ -165,7 +165,7 @@ def test_disable_auto_lineage_reconstruction(ray_start_cluster, tmp_path): @ray.remote def get_node_id(): - return ray.get_runtime_context().node_id + return ray.get_runtime_context().get_node_id() lock_path = str(tmp_path / "lock") diff --git a/release/jobs_tests/workloads/jobs_check_cuda_available.py b/release/jobs_tests/workloads/jobs_check_cuda_available.py index 7489cae88afe..06205c908759 100644 --- a/release/jobs_tests/workloads/jobs_check_cuda_available.py +++ b/release/jobs_tests/workloads/jobs_check_cuda_available.py @@ -37,7 +37,7 @@ def f(): @ray.remote(num_cpus=1, scheduling_strategy="SPREAD") def get_node_id(): - return ray.get_runtime_context().node_id + return ray.get_runtime_context().get_node_id() node_ids = set(ray.get([get_node_id.remote() for _ in range(100)])) diff --git a/release/jobs_tests/workloads/jobs_remote_multi_node.py b/release/jobs_tests/workloads/jobs_remote_multi_node.py index 0d5c6a2b2677..bf7169ee4d2d 100644 --- a/release/jobs_tests/workloads/jobs_remote_multi_node.py +++ b/release/jobs_tests/workloads/jobs_remote_multi_node.py @@ -23,7 +23,7 @@ @ray.remote(num_cpus=1) def get_node_id(): - return ray.get_runtime_context().node_id + return ray.get_runtime_context().get_node_id() # Allow one fewer node in case a node fails to come up. diff --git a/release/nightly_tests/stress_tests/test_state_api_scale.py b/release/nightly_tests/stress_tests/test_state_api_scale.py index f6ef89ecb0fc..a7442e05cc8d 100644 --- a/release/nightly_tests/stress_tests/test_state_api_scale.py +++ b/release/nightly_tests/stress_tests/test_state_api_scale.py @@ -262,7 +262,7 @@ def write_log(self, log_file_size_byte: int): log_file_size_byte -= n sys.stdout.flush() - return ctx.hexdigest(), ray.get_runtime_context().node_id.hex() + return ctx.hexdigest(), ray.get_runtime_context().get_node_id() actor = LogActor.remote() expected_hash, node_id = ray.get( From 2713352f4e219cd191c0c01a43f0ac7ad934f488 Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Sat, 29 Apr 2023 02:29:47 +0200 Subject: [PATCH 157/424] [docs] sphinx design 4/N (#34775) --- .../references/ray-cluster-configuration.rst | 40 +- doc/source/data/creating-datastreams.rst | 860 +++++++++--------- doc/source/data/examples/ocr_example.ipynb | 340 ++----- doc/source/ray-core/actors.rst | 446 ++++----- .../ray-core/actors/concurrency_group_api.rst | 232 ++--- doc/source/ray-core/actors/named-actors.rst | 228 ++--- doc/source/ray-core/actors/task-orders.rst | 156 ++-- doc/source/ray-core/starting-ray.rst | 196 ++-- doc/source/ray-overview/getting-started.md | 24 +- doc/source/ray-overview/installation.rst | 71 +- 10 files changed, 1223 insertions(+), 1370 deletions(-) diff --git a/doc/source/cluster/vms/references/ray-cluster-configuration.rst b/doc/source/cluster/vms/references/ray-cluster-configuration.rst index b9e6f2f974b6..5e5a154b666e 100644 --- a/doc/source/cluster/vms/references/ray-cluster-configuration.rst +++ b/doc/source/cluster/vms/references/ray-cluster-configuration.rst @@ -1140,33 +1140,37 @@ A list of commands to run to set up worker nodes of this type. These commands wi ``available_node_types..node_type.resources.memory`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. tabbed:: AWS - The memory in bytes allocated for python worker heap memory on the node. - If not configured, Autoscaler will automatically detect the amount of RAM on - the node for AWS/Kubernetes and allocate 70% of it for the heap. +.. tab-set:: + + .. tab-item:: AWS + + The memory in bytes allocated for python worker heap memory on the node. + If not configured, Autoscaler will automatically detect the amount of RAM on + the node for AWS/Kubernetes and allocate 70% of it for the heap. + + * **Required:** No + * **Importance:** Low + * **Type:** Integer - * **Required:** No - * **Importance:** Low - * **Type:** Integer + .. tab-item:: Azure -.. tabbed:: Azure + The memory in bytes allocated for python worker heap memory on the node. - The memory in bytes allocated for python worker heap memory on the node. + * **Required:** No + * **Importance:** High + * **Type:** Integer - * **Required:** No - * **Importance:** High - * **Type:** Integer + .. tab-item:: GCP -.. tabbed:: GCP + The memory in bytes allocated for python worker heap memory on the node. - The memory in bytes allocated for python worker heap memory on the node. + * **Required:** No + * **Importance:** High + * **Type:** Integer - * **Required:** No - * **Importance:** High - * **Type:** Integer - .. _cluster-configuration-object-store-memory: +.. _cluster-configuration-object-store-memory: ``available_node_types..node_type.resources.object-store-memory`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/data/creating-datastreams.rst b/doc/source/data/creating-datastreams.rst index 882b8b5a97d2..c9eb670e1ede 100644 --- a/doc/source/data/creating-datastreams.rst +++ b/doc/source/data/creating-datastreams.rst @@ -22,34 +22,36 @@ if you're interested in rolling your own integration! Generating Synthetic Data ------------------------- -.. tabbed:: Int Range +.. tab-set:: - Create a ``Datastream`` from a range of integers. + .. tab-item:: Int Range - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __gen_synth_int_range_begin__ - :end-before: __gen_synth_int_range_end__ + Create a ``Datastream`` from a range of integers. -.. tabbed:: Tabular Range + .. literalinclude:: ./doc_code/creating_datastreams.py + :language: python + :start-after: __gen_synth_int_range_begin__ + :end-before: __gen_synth_int_range_end__ - Create an Arrow (tabular) ``Datastream`` from a range of integers, - with a single column containing this integer range. + .. tab-item:: Tabular Range - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __gen_synth_tabular_range_begin__ - :end-before: __gen_synth_tabular_range_end__ + Create an Arrow (tabular) ``Datastream`` from a range of integers, + with a single column containing this integer range. -.. tabbed:: Tensor Range + .. literalinclude:: ./doc_code/creating_datastreams.py + :language: python + :start-after: __gen_synth_tabular_range_begin__ + :end-before: __gen_synth_tabular_range_end__ - Create a tensor datastream from a range of integers, packing this integer range into - tensors of the provided shape. + .. tab-item:: Tensor Range - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __gen_synth_tensor_range_begin__ - :end-before: __gen_synth_tensor_range_end__ + Create a tensor datastream from a range of integers, packing this integer range into + tensors of the provided shape. + + .. literalinclude:: ./doc_code/creating_datastreams.py + :language: python + :start-after: __gen_synth_tensor_range_begin__ + :end-before: __gen_synth_tensor_range_end__ .. _datastream_reading_from_storage: @@ -72,141 +74,143 @@ will be read in parallel. Supported File Formats ====================== -.. tabbed:: Parquet +.. tab-set:: + + .. tab-item:: Parquet - Read Parquet files into a tabular ``Datastream``. The Parquet data will be read into - `Arrow Table `__ - blocks. Although this simple example demonstrates reading a single file, note that - Datastreams can also read directories of Parquet files. We also support reading partitioned - Parquet datasets with partition column values pulled from the file paths. + Read Parquet files into a tabular ``Datastream``. The Parquet data will be read into + `Arrow Table `__ + blocks. Although this simple example demonstrates reading a single file, note that + Datastreams can also read directories of Parquet files. We also support reading partitioned + Parquet datasets with partition column values pulled from the file paths. - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __read_parquet_begin__ - :end-before: __read_parquet_end__ + .. literalinclude:: ./doc_code/creating_datastreams.py + :language: python + :start-after: __read_parquet_begin__ + :end-before: __read_parquet_end__ - Datastreams' Parquet reader also supports projection and filter pushdown, allowing column - selection and row filtering to be pushed down to the file scan. For column selection, - unselected columns will never be read from the file. + Datastreams' Parquet reader also supports projection and filter pushdown, allowing column + selection and row filtering to be pushed down to the file scan. For column selection, + unselected columns will never be read from the file. - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __read_parquet_pushdown_begin__ - :end-before: __read_parquet_pushdown_end__ + .. literalinclude:: ./doc_code/creating_datastreams.py + :language: python + :start-after: __read_parquet_pushdown_begin__ + :end-before: __read_parquet_pushdown_end__ - See the API docs for :func:`read_parquet() `. + See the API docs for :func:`read_parquet() `. -.. tabbed:: CSV + .. tab-item:: CSV - Read CSV files into a tabular ``Datastream``. The CSV data will be read into - `Arrow Table `__ - blocks. Although this simple example demonstrates reading a single file, note that - Datastreams can also read directories of CSV files, with one tabular block created - per file. + Read CSV files into a tabular ``Datastream``. The CSV data will be read into + `Arrow Table `__ + blocks. Although this simple example demonstrates reading a single file, note that + Datastreams can also read directories of CSV files, with one tabular block created + per file. - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __read_csv_begin__ - :end-before: __read_csv_end__ + .. literalinclude:: ./doc_code/creating_datastreams.py + :language: python + :start-after: __read_csv_begin__ + :end-before: __read_csv_end__ - See the API docs for :func:`read_csv() `. + See the API docs for :func:`read_csv() `. -.. tabbed:: JSON + .. tab-item:: JSON - Read JSON files into a tabular ``Datastream``. The JSON data will be read into - `Arrow Table `__ - blocks. Although this simple example demonstrates reading a single file, note that - Datastreams can also read directories of JSON files, with one tabular block created - per file. + Read JSON files into a tabular ``Datastream``. The JSON data will be read into + `Arrow Table `__ + blocks. Although this simple example demonstrates reading a single file, note that + Datastreams can also read directories of JSON files, with one tabular block created + per file. - Currently, only newline-delimited JSON (NDJSON) is supported. + Currently, only newline-delimited JSON (NDJSON) is supported. - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __read_json_begin__ - :end-before: __read_json_end__ + .. literalinclude:: ./doc_code/creating_datastreams.py + :language: python + :start-after: __read_json_begin__ + :end-before: __read_json_end__ - See the API docs for :func:`read_json() `. + See the API docs for :func:`read_json() `. -.. tabbed:: NumPy + .. tab-item:: NumPy - Read NumPy files into a tensor ``Datastream``. The NumPy ndarray data will be read into - single-column - `Arrow Table `__ - blocks using our - :class:`tensor extension type `, - treating the outermost ndarray dimension as the row dimension. See our - :ref:`tensor data guide ` for more information on working - with tensors in Datastreams. Although this simple example demonstrates reading a single - file, note that Datastreams can also read directories of NumPy files, with one tensor - block created per file. + Read NumPy files into a tensor ``Datastream``. The NumPy ndarray data will be read into + single-column + `Arrow Table `__ + blocks using our + :class:`tensor extension type `, + treating the outermost ndarray dimension as the row dimension. See our + :ref:`tensor data guide ` for more information on working + with tensors in Datastreams. Although this simple example demonstrates reading a single + file, note that Datastreams can also read directories of NumPy files, with one tensor + block created per file. - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __read_numpy_begin__ - :end-before: __read_numpy_end__ + .. literalinclude:: ./doc_code/creating_datastreams.py + :language: python + :start-after: __read_numpy_begin__ + :end-before: __read_numpy_end__ - See the API docs for :func:`read_numpy() `. + See the API docs for :func:`read_numpy() `. -.. tabbed:: Text + .. tab-item:: Text - Read text files into a ``Datastream``. Each line in each text file will be treated as a - row in the datastream, resulting in a list-of-strings block being created for each text - file. + Read text files into a ``Datastream``. Each line in each text file will be treated as a + row in the datastream, resulting in a list-of-strings block being created for each text + file. - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __read_text_begin__ - :end-before: __read_text_end__ + .. literalinclude:: ./doc_code/creating_datastreams.py + :language: python + :start-after: __read_text_begin__ + :end-before: __read_text_end__ - See the API docs for :func:`read_text() `. + See the API docs for :func:`read_text() `. -.. tabbed:: Images + .. tab-item:: Images - Call :func:`~ray.data.read_images` to read images into a :class:`~ray.data.Datastream`. + Call :func:`~ray.data.read_images` to read images into a :class:`~ray.data.Datastream`. - This function stores image data in single-column - `Arrow Table `__ - blocks using the - :class:`tensor extension type `. - For more information on working with tensors in Datastreams, read the - :ref:`tensor data guide `. + This function stores image data in single-column + `Arrow Table `__ + blocks using the + :class:`tensor extension type `. + For more information on working with tensors in Datastreams, read the + :ref:`tensor data guide `. - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __read_images_begin__ - :end-before: __read_images_end__ + .. literalinclude:: ./doc_code/creating_datastreams.py + :language: python + :start-after: __read_images_begin__ + :end-before: __read_images_end__ -.. tabbed:: Binary + .. tab-item:: Binary - Read binary files into a ``Datastream``. Each binary file will be treated as a single row - of opaque bytes. These bytes can be decoded into tensor, tabular, text, or any other - kind of data using :meth:`~ray.data.Datastream.map_batches` to apply a per-row decoding - :ref:`user-defined function `. + Read binary files into a ``Datastream``. Each binary file will be treated as a single row + of opaque bytes. These bytes can be decoded into tensor, tabular, text, or any other + kind of data using :meth:`~ray.data.Datastream.map_batches` to apply a per-row decoding + :ref:`user-defined function `. - Although this simple example demonstrates reading a single file, note that Datastreams - can also read directories of binary files, with one bytes block created per file. + Although this simple example demonstrates reading a single file, note that Datastreams + can also read directories of binary files, with one bytes block created per file. - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __read_binary_begin__ - :end-before: __read_binary_end__ + .. literalinclude:: ./doc_code/creating_datastreams.py + :language: python + :start-after: __read_binary_begin__ + :end-before: __read_binary_end__ - See the API docs for :func:`read_binary_files() `. + See the API docs for :func:`read_binary_files() `. -.. tabbed:: TFRecords + .. tab-item:: TFRecords - Call :func:`~ray.data.read_tfrecords` to read TFRecord files into a tabular - :class:`~ray.data.Datastream`. + Call :func:`~ray.data.read_tfrecords` to read TFRecord files into a tabular + :class:`~ray.data.Datastream`. - .. warning:: - Only `tf.train.Example `_ - records are supported. + .. warning:: + Only `tf.train.Example `_ + records are supported. - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __read_tfrecords_begin__ - :end-before: __read_tfrecords_end__ + .. literalinclude:: ./doc_code/creating_datastreams.py + :language: python + :start-after: __read_tfrecords_begin__ + :end-before: __read_tfrecords_end__ .. _datastream_reading_remote_storage: @@ -232,88 +236,90 @@ specify a ``filesystem`` argument. We use Parquet files for the below examples, but all of the aforementioned file formats are supported for each of these storage systems. -.. tabbed:: S3 +.. tab-set:: - The AWS S3 storage system is inferred from the URI scheme (``s3://``), with required connection - configuration such as S3 credentials being pulled from the machine's environment - (e.g. the ``AWS_ACCESS_KEY_ID`` and ``AWS_SECRET_ACCESS_KEY`` environment variables). + .. tab-item:: S3 - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __read_parquet_s3_begin__ - :end-before: __read_parquet_s3_end__ + The AWS S3 storage system is inferred from the URI scheme (``s3://``), with required connection + configuration such as S3 credentials being pulled from the machine's environment + (e.g. the ``AWS_ACCESS_KEY_ID`` and ``AWS_SECRET_ACCESS_KEY`` environment variables). - If needing to customize this S3 storage system connection (credentials, region, - endpoint override, etc.), you can pass in an - `S3FileSystem `__ instance - to :func:`read_parquet() `. + .. literalinclude:: ./doc_code/creating_datastreams.py + :language: python + :start-after: __read_parquet_s3_begin__ + :end-before: __read_parquet_s3_end__ - .. literalinclude:: ./doc_code/creating_datastreams_untested.py - :language: python - :start-after: __read_parquet_s3_with_fs_begin__ - :end-before: __read_parquet_s3_with_fs_end__ + If needing to customize this S3 storage system connection (credentials, region, + endpoint override, etc.), you can pass in an + `S3FileSystem `__ instance + to :func:`read_parquet() `. -.. tabbed:: HDFS + .. literalinclude:: ./doc_code/creating_datastreams_untested.py + :language: python + :start-after: __read_parquet_s3_with_fs_begin__ + :end-before: __read_parquet_s3_with_fs_end__ - The HDFS storage system is inferred from the URI scheme (``hdfs://``), with required connection - configuration such as the host and the port being derived from the URI. + .. tab-item:: HDFS - .. note:: + The HDFS storage system is inferred from the URI scheme (``hdfs://``), with required connection + configuration such as the host and the port being derived from the URI. - This example is not runnable as-is; you'll need to point it at your HDFS - cluster/data. + .. note:: - .. literalinclude:: ./doc_code/creating_datastreams_untested.py - :language: python - :start-after: __read_parquet_hdfs_begin__ - :end-before: __read_parquet_hdfs_end__ + This example is not runnable as-is; you'll need to point it at your HDFS + cluster/data. - If needing to customize this HDFS storage system connection (host, port, user, kerb - ticket, etc.), you can pass in an `HDFSFileSystem - `__ - instance to :func:`read_parquet() `. + .. literalinclude:: ./doc_code/creating_datastreams_untested.py + :language: python + :start-after: __read_parquet_hdfs_begin__ + :end-before: __read_parquet_hdfs_end__ - .. literalinclude:: ./doc_code/creating_datastreams_untested.py - :language: python - :start-after: __read_parquet_hdfs_with_fs_begin__ - :end-before: __read_parquet_hdfs_with_fs_end__ + If needing to customize this HDFS storage system connection (host, port, user, kerb + ticket, etc.), you can pass in an `HDFSFileSystem + `__ + instance to :func:`read_parquet() `. -.. tabbed:: GCS + .. literalinclude:: ./doc_code/creating_datastreams_untested.py + :language: python + :start-after: __read_parquet_hdfs_with_fs_begin__ + :end-before: __read_parquet_hdfs_with_fs_end__ - Data can be read from Google Cloud Storage by providing a configured - `gcsfs GCSFileSystem `__, where the - appropriate Google Cloud project and credentials can be specified. + .. tab-item:: GCS - .. note:: - This example is not runnable as-is; you'll need to point it at your GCS bucket and - configure your GCP project and credentials. + Data can be read from Google Cloud Storage by providing a configured + `gcsfs GCSFileSystem `__, where the + appropriate Google Cloud project and credentials can be specified. - .. literalinclude:: ./doc_code/creating_datastreams_untested.py - :language: python - :start-after: __read_parquet_gcs_begin__ - :end-before: __read_parquet_gcs_end__ + .. note:: + This example is not runnable as-is; you'll need to point it at your GCS bucket and + configure your GCP project and credentials. - .. tip:: - To verify that your GCP project and credentials are set up, validate - that the GCS `filesystem` has permissions to read the input `path`. + .. literalinclude:: ./doc_code/creating_datastreams_untested.py + :language: python + :start-after: __read_parquet_gcs_begin__ + :end-before: __read_parquet_gcs_end__ - .. literalinclude:: ./doc_code/creating_datastreams_untested.py - :language: python - :start-after: __validate_parquet_gcs_begin__ - :end-before: __validate_parquet_gcs_end__ + .. tip:: + To verify that your GCP project and credentials are set up, validate + that the GCS `filesystem` has permissions to read the input `path`. - For more examples, see the `GCSFS Documentation `__. + .. literalinclude:: ./doc_code/creating_datastreams_untested.py + :language: python + :start-after: __validate_parquet_gcs_begin__ + :end-before: __validate_parquet_gcs_end__ -.. tabbed:: ADL/ABS (Azure) + For more examples, see the `GCSFS Documentation `__. - Data can be read from Azure Blob Storage by providing a configured - `adlfs AzureBlobFileSystem `__, where the appropriate - account name and account key can be specified. + .. tab-item:: ADL/ABS (Azure) - .. literalinclude:: ./doc_code/creating_datastreams_untested.py - :language: python - :start-after: __read_parquet_az_begin__ - :end-before: __read_parquet_az_end__ + Data can be read from Azure Blob Storage by providing a configured + `adlfs AzureBlobFileSystem `__, where the appropriate + account name and account key can be specified. + + .. literalinclude:: ./doc_code/creating_datastreams_untested.py + :language: python + :start-after: __read_parquet_az_begin__ + :end-before: __read_parquet_az_end__ Reading from Local Storage ========================== @@ -372,72 +378,74 @@ From Single-Node Data Libraries In this section, we demonstrate creating a ``Datastream`` from single-node in-memory data. -.. tabbed:: Pandas +.. tab-set:: + + .. tab-item:: Pandas - Create a ``Datastream`` from a Pandas DataFrame. This constructs a ``Datastream`` - backed by a single Pandas DataFrame block. + Create a ``Datastream`` from a Pandas DataFrame. This constructs a ``Datastream`` + backed by a single Pandas DataFrame block. - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __from_pandas_begin__ - :end-before: __from_pandas_end__ + .. literalinclude:: ./doc_code/creating_datastreams.py + :language: python + :start-after: __from_pandas_begin__ + :end-before: __from_pandas_end__ - We can also build a ``Datastream`` from more than one Pandas DataFrame, where each said - DataFrame will become a block in the ``Datastream``. + We can also build a ``Datastream`` from more than one Pandas DataFrame, where each said + DataFrame will become a block in the ``Datastream``. - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __from_pandas_mult_begin__ - :end-before: __from_pandas_mult_end__ + .. literalinclude:: ./doc_code/creating_datastreams.py + :language: python + :start-after: __from_pandas_mult_begin__ + :end-before: __from_pandas_mult_end__ -.. tabbed:: NumPy + .. tab-item:: NumPy - Create a ``Datastream`` from a NumPy ndarray. This constructs a ``Datastream`` - backed by a single-column Arrow table block; the outer dimension of the ndarray - will be treated as the row dimension, and the column will have name ``"__value__"``. + Create a ``Datastream`` from a NumPy ndarray. This constructs a ``Datastream`` + backed by a single-column Arrow table block; the outer dimension of the ndarray + will be treated as the row dimension, and the column will have name ``"__value__"``. - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __from_numpy_begin__ - :end-before: __from_numpy_end__ + .. literalinclude:: ./doc_code/creating_datastreams.py + :language: python + :start-after: __from_numpy_begin__ + :end-before: __from_numpy_end__ - We can also build a ``Datastream`` from more than one NumPy ndarray, where each said - ndarray will become a single-column Arrow table block in the ``Datastream``. + We can also build a ``Datastream`` from more than one NumPy ndarray, where each said + ndarray will become a single-column Arrow table block in the ``Datastream``. - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __from_numpy_mult_begin__ - :end-before: __from_numpy_mult_end__ + .. literalinclude:: ./doc_code/creating_datastreams.py + :language: python + :start-after: __from_numpy_mult_begin__ + :end-before: __from_numpy_mult_end__ -.. tabbed:: Arrow + .. tab-item:: Arrow - Create a ``Datastream`` from an - `Arrow Table `__. - This constructs a ``Datastream`` backed by a single Arrow ``Table`` block. + Create a ``Datastream`` from an + `Arrow Table `__. + This constructs a ``Datastream`` backed by a single Arrow ``Table`` block. - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __from_arrow_begin__ - :end-before: __from_arrow_end__ + .. literalinclude:: ./doc_code/creating_datastreams.py + :language: python + :start-after: __from_arrow_begin__ + :end-before: __from_arrow_end__ - We can also build a ``Datastream`` from more than one Arrow Table, where each said - ``Table`` will become a block in the ``Datastream``. + We can also build a ``Datastream`` from more than one Arrow Table, where each said + ``Table`` will become a block in the ``Datastream``. - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __from_arrow_mult_begin__ - :end-before: __from_arrow_mult_end__ + .. literalinclude:: ./doc_code/creating_datastreams.py + :language: python + :start-after: __from_arrow_mult_begin__ + :end-before: __from_arrow_mult_end__ -.. tabbed:: Python Objects + .. tab-item:: Python Objects - Create a ``Datastream`` from a list of Python objects; since each object in this - particular list is a dictionary, Datastreams will treat this list as a list of tabular - records, and will construct an Arrow ``Datastream``. + Create a ``Datastream`` from a list of Python objects; since each object in this + particular list is a dictionary, Datastreams will treat this list as a list of tabular + records, and will construct an Arrow ``Datastream``. - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __from_items_begin__ - :end-before: __from_items_end__ + .. literalinclude:: ./doc_code/creating_datastreams.py + :language: python + :start-after: __from_items_begin__ + :end-before: __from_items_end__ .. _datastream_from_in_memory_data_distributed: @@ -460,60 +468,62 @@ futures. integrations to work. See how these frameworks can be run on Ray in our :ref:`data processing integrations docs `. -.. tabbed:: Dask +.. tab-set:: - Create a ``MaterializedDatastream`` from a - `Dask DataFrame `__. This constructs a - ``Datastream`` backed by the distributed Pandas DataFrame partitions that underly the - Dask DataFrame. + .. tab-item:: Dask - This conversion has near-zero overhead, since Datastreams simply reinterprets existing - Dask-in-Ray partition objects as Datastream blocks. + Create a ``MaterializedDatastream`` from a + `Dask DataFrame `__. This constructs a + ``Datastream`` backed by the distributed Pandas DataFrame partitions that underly the + Dask DataFrame. - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __from_dask_begin__ - :end-before: __from_dask_end__ + This conversion has near-zero overhead, since Datastreams simply reinterprets existing + Dask-in-Ray partition objects as Datastream blocks. -.. tabbed:: Spark + .. literalinclude:: ./doc_code/creating_datastreams.py + :language: python + :start-after: __from_dask_begin__ + :end-before: __from_dask_end__ - Create a ``MaterializedDatastream`` from a `Spark DataFrame - `__. - This constructs a ``Datastream`` backed by the distributed Spark DataFrame partitions - that underly the Spark DataFrame. When this conversion happens, Spark-on-Ray (RayDP) - will save the Spark DataFrame partitions to Ray's object store in the Arrow format, - which Datastreams will then interpret as its blocks. + .. tab-item:: Spark - .. literalinclude:: ./doc_code/creating_datastreams_untested.py - :language: python - :start-after: __from_spark_begin__ - :end-before: __from_spark_end__ + Create a ``MaterializedDatastream`` from a `Spark DataFrame + `__. + This constructs a ``Datastream`` backed by the distributed Spark DataFrame partitions + that underly the Spark DataFrame. When this conversion happens, Spark-on-Ray (RayDP) + will save the Spark DataFrame partitions to Ray's object store in the Arrow format, + which Datastreams will then interpret as its blocks. -.. tabbed:: Modin + .. literalinclude:: ./doc_code/creating_datastreams_untested.py + :language: python + :start-after: __from_spark_begin__ + :end-before: __from_spark_end__ - Create a ``MaterializedDatastream`` from a Modin DataFrame. This constructs a ``Datastream`` - backed by the distributed Pandas DataFrame partitions that underly the Modin DataFrame. + .. tab-item:: Modin - This conversion has near-zero overhead, since Datastreams simply reinterprets existing - Modin partition objects as Datastream blocks. + Create a ``MaterializedDatastream`` from a Modin DataFrame. This constructs a ``Datastream`` + backed by the distributed Pandas DataFrame partitions that underly the Modin DataFrame. - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __from_modin_begin__ - :end-before: __from_modin_end__ + This conversion has near-zero overhead, since Datastreams simply reinterprets existing + Modin partition objects as Datastream blocks. -.. tabbed:: Mars + .. literalinclude:: ./doc_code/creating_datastreams.py + :language: python + :start-after: __from_modin_begin__ + :end-before: __from_modin_end__ - Create a ``MaterializedDatastream`` from a Mars DataFrame. This constructs a ``Datastream`` - backed by the distributed Pandas DataFrame partitions that underly the Mars DataFrame. + .. tab-item:: Mars - This conversion has near-zero overhead, since Datastreams simply reinterprets existing - Mars partition objects as Datastream blocks. + Create a ``MaterializedDatastream`` from a Mars DataFrame. This constructs a ``Datastream`` + backed by the distributed Pandas DataFrame partitions that underly the Mars DataFrame. - .. literalinclude:: ./doc_code/creating_datastreams_untested.py - :language: python - :start-after: __from_mars_begin__ - :end-before: __from_mars_end__ + This conversion has near-zero overhead, since Datastreams simply reinterprets existing + Mars partition objects as Datastream blocks. + + .. literalinclude:: ./doc_code/creating_datastreams_untested.py + :language: python + :start-after: __from_mars_begin__ + :end-before: __from_mars_end__ .. _datastream_from_torch_tf: @@ -521,45 +531,47 @@ futures. From Torch and TensorFlow ------------------------- -.. tabbed:: PyTorch +.. tab-set:: + + .. tab-item:: PyTorch - If you already have a Torch dataset available, you can create a Datastream using - :class:`~ray.data.from_torch`. + If you already have a Torch dataset available, you can create a Datastream using + :class:`~ray.data.from_torch`. - .. warning:: - :class:`~ray.data.from_torch` doesn't support parallel - reads. You should only use this datasource for small datastreams like MNIST or - CIFAR. + .. warning:: + :class:`~ray.data.from_torch` doesn't support parallel + reads. You should only use this datasource for small datastreams like MNIST or + CIFAR. - .. code-block:: python + .. code-block:: python - import ray - import torchvision + import ray + import torchvision - torch_ds = torchvision.datasets.MNIST("data", download=True) - datastream = ray.data.from_torch(torch_ds) - datastream.take(1) - # (, 5) + torch_ds = torchvision.datasets.MNIST("data", download=True) + datastream = ray.data.from_torch(torch_ds) + datastream.take(1) + # (, 5) -.. tabbed:: TensorFlow + .. tab-item:: TensorFlow - If you already have a TensorFlow dataset available, you can create a Datastream - using :class:`~ray.data.from_tf`. + If you already have a TensorFlow dataset available, you can create a Datastream + using :class:`~ray.data.from_tf`. - .. warning:: - :class:`~ray.data.from_tf` doesn't support parallel reads. You - should only use this function with small datastreams like MNIST or CIFAR. + .. warning:: + :class:`~ray.data.from_tf` doesn't support parallel reads. You + should only use this function with small datastreams like MNIST or CIFAR. - .. code-block:: python + .. code-block:: python - import ray - import tensorflow_datasets as tfds + import ray + import tensorflow_datasets as tfds - tf_ds, _ = tfds.load("cifar10", split=["train", "test"]) - datastream = ray.data.from_tf(tf_ds) + tf_ds, _ = tfds.load("cifar10", split=["train", "test"]) + datastream = ray.data.from_tf(tf_ds) - datastream - # -> MaterializedDatastream(num_blocks=200, num_rows=50000, schema={id: binary, image: numpy.ndarray(shape=(32, 32, 3), dtype=uint8), label: int64}) + datastream + # -> MaterializedDatastream(num_blocks=200, num_rows=50000, schema={id: binary, image: numpy.ndarray(shape=(32, 32, 3), dtype=uint8), label: int64}) .. _datastream_from_huggingface: @@ -643,187 +655,189 @@ Reading From SQL Databases Call :func:`~ray.data.read_sql` to read data from a database that provides a `Python DB API2-compliant `_ connector. -.. tabbed:: MySQL +.. tab-set:: + + .. tab-item:: MySQL + + To read from MySQL, install + `MySQL Connector/Python `_. It's the + first-party MySQL database connector. - To read from MySQL, install - `MySQL Connector/Python `_. It's the - first-party MySQL database connector. + .. code-block:: console - .. code-block:: console + pip install mysql-connector-python - pip install mysql-connector-python + Then, define your connection login and query the database. - Then, define your connection login and query the database. + .. code-block:: python - .. code-block:: python + import mysql.connector - import mysql.connector + import ray - import ray + def create_connection(): + return mysql.connector.connect( + user="admin", + password=..., + host="example-mysql-database.c2c2k1yfll7o.us-west-2.rds.amazonaws.com", + connection_timeout=30, + database="example", + ) - def create_connection(): - return mysql.connector.connect( - user="admin", - password=..., - host="example-mysql-database.c2c2k1yfll7o.us-west-2.rds.amazonaws.com", - connection_timeout=30, - database="example", + # Get all movies + datastream = ray.data.read_sql("SELECT * FROM movie", create_connection) + # Get movies after the year 1980 + datastream = ray.data.read_sql( + "SELECT title, score FROM movie WHERE year >= 1980", create_connection + ) + # Get the number of movies per year + datastream = ray.data.read_sql( + "SELECT year, COUNT(*) FROM movie GROUP BY year", create_connection ) - # Get all movies - datastream = ray.data.read_sql("SELECT * FROM movie", create_connection) - # Get movies after the year 1980 - datastream = ray.data.read_sql( - "SELECT title, score FROM movie WHERE year >= 1980", create_connection - ) - # Get the number of movies per year - datastream = ray.data.read_sql( - "SELECT year, COUNT(*) FROM movie GROUP BY year", create_connection - ) + .. tab-item:: PostgreSQL -.. tabbed:: PostgreSQL + To read from PostgreSQL, install `Psycopg 2 `_. It's + the most popular PostgreSQL database connector. - To read from PostgreSQL, install `Psycopg 2 `_. It's - the most popular PostgreSQL database connector. + .. code-block:: console - .. code-block:: console + pip install psycopg2-binary - pip install psycopg2-binary + Then, define your connection login and query the database. - Then, define your connection login and query the database. + .. code-block:: python - .. code-block:: python + import psycopg2 - import psycopg2 + import ray - import ray + def create_connection(): + return psycopg2.connect( + user="postgres", + password=..., + host="example-postgres-database.c2c2k1yfll7o.us-west-2.rds.amazonaws.com", + dbname="example", + ) - def create_connection(): - return psycopg2.connect( - user="postgres", - password=..., - host="example-postgres-database.c2c2k1yfll7o.us-west-2.rds.amazonaws.com", - dbname="example", + # Get all movies + datastream = ray.data.read_sql("SELECT * FROM movie", create_connection) + # Get movies after the year 1980 + datastream = ray.data.read_sql( + "SELECT title, score FROM movie WHERE year >= 1980", create_connection + ) + # Get the number of movies per year + datastream = ray.data.read_sql( + "SELECT year, COUNT(*) FROM movie GROUP BY year", create_connection ) - # Get all movies - datastream = ray.data.read_sql("SELECT * FROM movie", create_connection) - # Get movies after the year 1980 - datastream = ray.data.read_sql( - "SELECT title, score FROM movie WHERE year >= 1980", create_connection - ) - # Get the number of movies per year - datastream = ray.data.read_sql( - "SELECT year, COUNT(*) FROM movie GROUP BY year", create_connection - ) + .. tab-item:: Snowflake -.. tabbed:: Snowflake + To read from Snowflake, install the + `Snowflake Connector for Python `_. - To read from Snowflake, install the - `Snowflake Connector for Python `_. + .. code-block:: console - .. code-block:: console + pip install snowflake-connector-python - pip install snowflake-connector-python + Then, define your connection login and query the database. - Then, define your connection login and query the database. + .. code-block:: python - .. code-block:: python + import snowflake.connector - import snowflake.connector + import ray - import ray + def create_connection(): + return snowflake.connector.connect( + user=..., + password=... + account="ZZKXUVH-IPB52023", + database="example", + ) - def create_connection(): - return snowflake.connector.connect( - user=..., - password=... - account="ZZKXUVH-IPB52023", - database="example", + # Get all movies + datastream = ray.data.read_sql("SELECT * FROM movie", create_connection) + # Get movies after the year 1980 + datastream = ray.data.read_sql( + "SELECT title, score FROM movie WHERE year >= 1980", create_connection + ) + # Get the number of movies per year + datastream = ray.data.read_sql( + "SELECT year, COUNT(*) FROM movie GROUP BY year", create_connection ) - - # Get all movies - datastream = ray.data.read_sql("SELECT * FROM movie", create_connection) - # Get movies after the year 1980 - datastream = ray.data.read_sql( - "SELECT title, score FROM movie WHERE year >= 1980", create_connection - ) - # Get the number of movies per year - datastream = ray.data.read_sql( - "SELECT year, COUNT(*) FROM movie GROUP BY year", create_connection - ) -.. tabbed:: Databricks + .. tab-item:: Databricks - To read from Databricks, install the - `Databricks SQL Connector for Python `_. + To read from Databricks, install the + `Databricks SQL Connector for Python `_. - .. code-block:: console + .. code-block:: console - pip install databricks-sql-connector + pip install databricks-sql-connector - Then, define your connection logic and read from the Databricks SQL warehouse. + Then, define your connection logic and read from the Databricks SQL warehouse. - .. code-block:: python + .. code-block:: python - from databricks import sql + from databricks import sql - import ray + import ray - def create_connection(): - return sql.connect( - server_hostname="dbc-1016e3a4-d292.cloud.databricks.com", - http_path="/sql/1.0/warehouses/a918da1fc0b7fed0", - access_token=..., + def create_connection(): + return sql.connect( + server_hostname="dbc-1016e3a4-d292.cloud.databricks.com", + http_path="/sql/1.0/warehouses/a918da1fc0b7fed0", + access_token=..., - # Get all movies - datastream = ray.data.read_sql("SELECT * FROM movie", create_connection) - # Get movies after the year 1980 - datastream = ray.data.read_sql( - "SELECT title, score FROM movie WHERE year >= 1980", create_connection - ) - # Get the number of movies per year - datastream = ray.data.read_sql( - "SELECT year, COUNT(*) FROM movie GROUP BY year", create_connection - ) + # Get all movies + datastream = ray.data.read_sql("SELECT * FROM movie", create_connection) + # Get movies after the year 1980 + datastream = ray.data.read_sql( + "SELECT title, score FROM movie WHERE year >= 1980", create_connection + ) + # Get the number of movies per year + datastream = ray.data.read_sql( + "SELECT year, COUNT(*) FROM movie GROUP BY year", create_connection + ) -.. tabbed:: BigQuery + .. tab-item:: BigQuery - To read from BigQuery, install the - `Python Client for Google BigQuery `_. - This package includes a DB API2-compliant database connector. + To read from BigQuery, install the + `Python Client for Google BigQuery `_. + This package includes a DB API2-compliant database connector. - .. code-block:: console + .. code-block:: console - pip install google-cloud-bigquery + pip install google-cloud-bigquery - Then, define your connection login and query the dataset. + Then, define your connection login and query the dataset. - .. code-block:: python + .. code-block:: python - from google.cloud import bigquery - from google.cloud.bigquery import dbapi + from google.cloud import bigquery + from google.cloud.bigquery import dbapi - import ray + import ray - def create_connection(): - client = bigquery.Client(...) - return dbapi.Connection(client) + def create_connection(): + client = bigquery.Client(...) + return dbapi.Connection(client) - # Get all movies - datastream = ray.data.read_sql("SELECT * FROM movie", create_connection) - # Get movies after the year 1980 - datastream = ray.data.read_sql( - "SELECT title, score FROM movie WHERE year >= 1980", create_connection - ) - # Get the number of movies per year - datastream = ray.data.read_sql( - "SELECT year, COUNT(*) FROM movie GROUP BY year", create_connection - ) + # Get all movies + datastream = ray.data.read_sql("SELECT * FROM movie", create_connection) + # Get movies after the year 1980 + datastream = ray.data.read_sql( + "SELECT title, score FROM movie WHERE year >= 1980", create_connection + ) + # Get the number of movies per year + datastream = ray.data.read_sql( + "SELECT year, COUNT(*) FROM movie GROUP BY year", create_connection + ) .. _data_custom_datasource: diff --git a/doc/source/data/examples/ocr_example.ipynb b/doc/source/data/examples/ocr_example.ipynb index 637fd8cf80ec..2c1cb0b988ef 100644 --- a/doc/source/data/examples/ocr_example.ipynb +++ b/doc/source/data/examples/ocr_example.ipynb @@ -2,8 +2,8 @@ "cells": [ { "cell_type": "code", - "execution_count": 4, - "id": "905f9cad", + "execution_count": null, + "id": "49fe2185", "metadata": { "tags": [ "remove-cell" @@ -22,7 +22,7 @@ }, { "cell_type": "markdown", - "id": "6945c179", + "id": "2a344178", "metadata": {}, "source": [ "# Scaling OCR using Ray Data\n", @@ -43,72 +43,39 @@ "\n", "Let's start by preparing the dependencies and downloading the dataset. First we install the OCR software `tesseract` and its Python client:\n", "\n", - "````{tabbed} macOS\n", + "``````{tab-set}\n", + "\n", + "````{tab-item} macOS\n", "```\n", "brew install tesseract\n", "pip install pytesseract\n", "```\n", "````\n", "\n", - "````{tabbed} linux\n", + "````{tab-item} linux\n", "```\n", "sudo apt-get install tesseract-ocr\n", "pip install pytesseract\n", "```\n", "````\n", "\n", + "``````\n", + "\n", "By default, the following example will run on a tiny dataset we provide. If you want to run it on the full dataset, we recommend to run it on a cluster since processing all the images with tesseract takes a lot of time.\n", "\n", "````{note}\n", "If you want to run the example on the full [LightShot](https://www.kaggle.com/datasets/datasnaek/lightshot) dataset, you need to download the dataset and extract it. You can extract the dataset by first running `unzip archive.zip` and then `unrar x LightShot13k.rar .` and then you can upload the dataset to S3 with `aws s3 cp LightShot13k/ s3:/// --recursive`.\n", - "````" - ] - }, - { - "cell_type": "markdown", - "id": "c08612ac", - "metadata": {}, - "source": [ - "Let's now import Ray and initialize a local Ray cluster. If you want to run OCR at a very large scale, you should run this workload on a multi-node cluster." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "37f22aa8", - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2022-07-04 14:35:19,444\tINFO services.py:1476 -- View the Ray dashboard at \u001b[1m\u001b[32mhttp://127.0.0.1:8265\u001b[39m\u001b[22m\n" - ] - }, - { - "data": { - "text/plain": [ - "RayContext(dashboard_url='127.0.0.1:8265', python_version='3.7.4', ray_version='1.13.0', ray_commit='e4ce38d001dbbe09cd21c497fedd03d692b2be3e', address_info={'node_ip_address': '127.0.0.1', 'raylet_ip_address': '127.0.0.1', 'redis_address': None, 'object_store_address': '/tmp/ray/session_2022-07-04_14-35-16_950060_89285/sockets/plasma_store', 'raylet_socket_name': '/tmp/ray/session_2022-07-04_14-35-16_950060_89285/sockets/raylet', 'webui_url': '127.0.0.1:8265', 'session_dir': '/tmp/ray/session_2022-07-04_14-35-16_950060_89285', 'metrics_export_port': 60416, 'gcs_address': '127.0.0.1:61663', 'address': '127.0.0.1:61663', 'node_id': 'b6c981243d51558d13e4290f0f63552a6126f8a8d9e472baafe9dd5b'})" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ + "````\n", + "\n", + "\n", + "Let's now import Ray and initialize a local Ray cluster. If you want to run OCR at a very large scale, you should run this workload on a multi-node cluster.\n", + "\n", + "```python\n", "# Import ray and initialize a local Ray cluster.\n", "import ray\n", - "ray.init()" - ] - }, - { - "cell_type": "markdown", - "id": "ee90daa8", - "metadata": {}, - "source": [ + "ray.init()\n", + "```\n", + "\n", "### Running the OCR software on the data\n", "\n", "We can now use the {meth}`ray.data.read_binary_files ` function to read all the images from S3. We set the `include_paths=True` option to create a datastream of the S3 paths and image contents. We then run the {meth}`ds.map ` function on this datastream to execute the actual OCR process on each file and convert the screen shots into text. This will create a tabular datastream with columns `path` and `text`, see also [](transform_datastreams_row_output_types).\n", @@ -125,25 +92,9 @@ " secret_key=\"...\",\n", " session_token=\"...\"))\n", "```\n", - "````" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "id": "d31d3303", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2022-07-04 14:35:53,683\tWARNING read_api.py:256 -- The number of blocks in this datastream (3) limits its parallelism to 3 concurrent tasks. This is much less than the number of available CPU slots in the cluster. Use `.repartition(n)` to increase the number of datastream blocks.\n", - "Read->Map: 100%|██████████| 3/3 [00:07<00:00, 2.34s/it]\n" - ] - } - ], - "source": [ + "````\n", + "\n", + "```python\n", "from io import BytesIO\n", "from PIL import Image\n", "import pytesseract\n", @@ -159,46 +110,25 @@ " \"s3://anonymous@air-example-data/ocr_tiny_dataset\",\n", " include_paths=True)\n", "\n", - "results = ds.map(perform_ocr)" - ] - }, - { - "cell_type": "markdown", - "id": "e22e7cd7", - "metadata": {}, - "source": [ + "results = ds.map(perform_ocr)\n", + "```\n", + "\n", "Let us have a look at some of the data points with the {meth}`take ` function." ] }, { "cell_type": "code", - "execution_count": 6, - "id": "5518b831", + "execution_count": null, + "id": "45aa1983", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[ArrowRow({'path': 'air-example-data/ocr_tiny_dataset/gnome_screenshot.png',\n", - " 'text': '= Cancel\\n\\nTake Screenshot\\n© Grab the whole screen\\n\\nGrab the current window\\n\\n|_| eeeeeeter\\n\\nGrab after a delay of 0\\n\\nEffects\\nInclude pointer\\n\\n¥ Include the window border\\n\\nApply effect: None Sa\\n\\n+. seconds\\n'}),\n", - " ArrowRow({'path': 'air-example-data/ocr_tiny_dataset/miranda_screenshot.png',\n", - " 'text': '© Viktor (Online) : Message Session\\n\\n“etto| © Whter | steno\\n\\nremus\\ntet? Fiviha\\n\\n17: dokonca to vie aj video @\\nViktor\\n\\n1818. 55 samozrejme\\n\\n1818: len moj brat to skusal\\nremus\\n\\nWA\\n\\n098003 —\\n\\nseettsgmailcom [0]\\n\\nonline\\n\\nHacemen\\n@ Ce\\n\\nieFFo\\n169 6 je <>vin ©®\\n\\nBe 22\\n\\naway\\n\\nTue\\nhn\\n\\n& Wee\\n\\nYep, Tm here\\n\\n&\\nea\\na\\nLS]\\n\\n'}),\n", - " ArrowRow({'path': 'air-example-data/ocr_tiny_dataset/qemu_screenshot.png',\n", - " 'text': 'File Edit View Bookmarks\\n\\n[i New Tab [If] split view ~\\n\\n43044 kousekip\\n\\nPlugins\\n\\nkousekip:ako-kaede-mirai(htop)\\n\\nkousekip:ako-kaede-mirai(qemu-system-x86)\\n\\nSettings\\n\\nHelp\\n\\nkousekip:ako-kaede-miral(htop) — Konsole vax\\n\\nFl Paste Q Find\\n\\nEMU vax\\n\\nMachine View\\n\\nApplications Places System @)C) Fri Feb 18, 13:56\\n\\nTerminal\\n\\nroot root\\nroot sys\\nroot sys\\nroot sys\\nroot sys\\nroot sys\\nroot root\\nroot sys\\nroot bin\\nroot root\\nroot sys\\nroot root\\nroot sys\\nroot sys\\nroot root\\nroot root\\nroot root\\nroot sys\\nroot root\\nroot sys\\nroot sys\\n2 root —sys\\nkousekip@ako-kaede-mirai-sun:~$ If\\n\\nbin -> ./usr/bin\\nboot\\ndev\\ndevices\\netc\\nexport\\nhome\\nkernel\\nlib\\nmedia\\nmnt\\n\\nnet\\nopt\\nplatform\\nproc\\nroot\\nrpool\\nsbin\\nsystem\\n‘tmp\\nusr\\nvar\\n\\n@kousekip\\nidesktop\\n\\n©\\n\\n©\\n\\nBUNwnSunennh SnuNaeon\\n\\n(Documents\\nDownloads\\nGaMusic\\n\\n5\\n\\nBitrash\\nDevices\\n(Floppy Drive\\nNetwork\\n\\n@ Browse Netw...\\n\\n9\\n9\\n6\\n4\\n9\\n\\n53\\n5\\n6\\n4\\n9\\n10\\n0\\n6\\n18\\n7\\n\\nfovey\\\\aliarel(elare)\\n\\n'})]" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "results.take(10)" ] }, { "cell_type": "markdown", - "id": "67ed5a8d", + "id": "36741417", "metadata": {}, "source": [ "### Saving and loading the result of the OCR run\n", @@ -207,57 +137,29 @@ "Saving the datastream is optional, you can also continue with the in-memory data without persisting it to storage.\n", "````\n", "\n", - "We can save the result of running tesseract on the datastream on disk so we can read it out later if we want to re-run the NLP analysis without needing to re-run the OCR (which is very expensive on the whole datastream). This can be done with the {meth}`write_parquet ` function:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "7c2d8abe", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Write Progress: 100%|██████████| 3/3 [00:00<00:00, 207.11it/s]\n" - ] - } - ], - "source": [ + "We can save the result of running tesseract on the datastream on disk so we can read it out later if we want to re-run the NLP analysis without needing to re-run the OCR (which is very expensive on the whole datastream). This can be done with the {meth}`write_parquet ` function:\n", + "\n", + "```python\n", "import os\n", - "results.write_parquet(os.path.expanduser(\"~/LightShot13k_results\"))" - ] - }, - { - "cell_type": "markdown", - "id": "7a387f42", - "metadata": {}, - "source": [ + "results.write_parquet(os.path.expanduser(\"~/LightShot13k_results\"))\n", + "```\n", + "\n", "You can later reload the data with the {meth}`read_parquet ` function:" ] }, { "cell_type": "code", - "execution_count": 8, - "id": "af63be93", + "execution_count": null, + "id": "c8d419fa", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2022-07-04 14:36:13,515\tWARNING read_api.py:256 -- The number of blocks in this datastream (6) limits its parallelism to 6 concurrent tasks. This is much less than the number of available CPU slots in the cluster. Use `.repartition(n)` to increase the number of datastream blocks.\n" - ] - } - ], + "outputs": [], "source": [ "results = ray.data.read_parquet(os.path.expanduser(\"~/LightShot13k_results\"))" ] }, { "cell_type": "markdown", - "id": "f6a7bf0f", + "id": "decffa3c", "metadata": {}, "source": [ "### Process the extracted text data with spaCy\n", @@ -274,7 +176,7 @@ { "cell_type": "code", "execution_count": null, - "id": "69321ee3", + "id": "1604b02f", "metadata": {}, "outputs": [], "source": [ @@ -285,7 +187,7 @@ }, { "cell_type": "markdown", - "id": "b01d2add", + "id": "fc96fb8b", "metadata": {}, "source": [ "This is some code to determine the language of a piece of text:" @@ -293,21 +195,10 @@ }, { "cell_type": "code", - "execution_count": 9, - "id": "ee4cc430", + "execution_count": null, + "id": "3374fc47", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'language': 'en', 'score': 0.9999976594668697}" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "import spacy\n", "from spacy.language import Language\n", @@ -325,7 +216,7 @@ }, { "cell_type": "markdown", - "id": "95ab0646", + "id": "05d218ee", "metadata": {}, "source": [ "It gives both the language and a confidence score for that language.\n", @@ -335,29 +226,10 @@ }, { "cell_type": "code", - "execution_count": 10, - "id": "85a4a414", + "execution_count": null, + "id": "30648ced", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Read progress: 100%|██████████| 6/6 [00:00<00:00, 485.55it/s]\n", - "Map Progress (1 actors 1 pending): 100%|██████████| 6/6 [00:06<00:00, 1.04s/it]\n" - ] - }, - { - "data": { - "text/plain": [ - "Datastream(num_blocks=6, num_rows=6, schema={path: object, text: object, language: object, score: float64})" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "import spacy\n", "from spacy.language import Language\n", @@ -384,7 +256,7 @@ }, { "cell_type": "markdown", - "id": "ca995036", + "id": "490bca7c", "metadata": {}, "source": [ "We can now get language statistics over the whole dataset:" @@ -392,32 +264,10 @@ }, { "cell_type": "code", - "execution_count": 11, - "id": "f64f8b3c", - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Read: 100%|██████████| 6/6 [00:00<00:00, 19.95it/s]\n", - "Map Progress (1 actors 1 pending): 100%|██████████| 6/6 [00:05<00:00, 1.09it/s]\n", - "Sort Sample: 100%|██████████| 6/6 [00:00<00:00, 919.27it/s]\n", - "Shuffle Map: 100%|██████████| 6/6 [00:00<00:00, 159.14it/s]\n", - "Shuffle Reduce: 100%|██████████| 6/6 [00:00<00:00, 364.59it/s]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'language': 'af', 'count()': 2}\n", - "{'language': 'en', 'count()': 4}\n" - ] - } - ], + "execution_count": null, + "id": "346ac322", + "metadata": {}, + "outputs": [], "source": [ "languages = results.map_batches(SpacyBatchInference, compute=\"actors\")\n", "languages.groupby(\"language\").count().show()" @@ -425,7 +275,7 @@ }, { "cell_type": "markdown", - "id": "0d638758", + "id": "c9453342", "metadata": {}, "source": [ "````{note}\n", @@ -452,68 +302,15 @@ "{'language': 'nl', 'count()': 982}\n", "{'language': 'no', 'count()': 56}\n", "```\n", - "````" - ] - }, - { - "cell_type": "markdown", - "id": "9cc5ca11", - "metadata": {}, - "source": [ - "We can now filter to include only the English documents and also sort them according to their score." - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "8c4bd03d", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Filter: 100%|██████████| 6/6 [00:00<00:00, 561.84it/s]\n", - "Sort Sample: 100%|██████████| 6/6 [00:00<00:00, 1311.81it/s]\n", - "Shuffle Map: 100%|██████████| 6/6 [00:00<00:00, 319.24it/s]\n", - "Shuffle Reduce: 100%|██████████| 6/6 [00:00<00:00, 450.79it/s]\n" - ] - }, - { - "data": { - "text/plain": [ - "[ArrowRow({'path': 'air-example-data/ocr_tiny_dataset/gnome_screenshot.png',\n", - " 'text': '= Cancel\\n\\nTake Screenshot\\n© Grab the whole screen\\n\\nGrab the current window\\n\\n|_| eeeeeeter\\n\\nGrab after a delay of 0\\n\\nEffects\\nInclude pointer\\n\\n¥ Include the window border\\n\\nApply effect: None Sa\\n\\n+. seconds\\n',\n", - " 'language': 'en',\n", - " 'score': 0.9999976791815426}),\n", - " ArrowRow({'path': 'air-example-data/ocr_tiny_dataset/gnome_screenshot.png',\n", - " 'text': '= Cancel\\n\\nTake Screenshot\\n© Grab the whole screen\\n\\nGrab the current window\\n\\n|_| eeeeeeter\\n\\nGrab after a delay of 0\\n\\nEffects\\nInclude pointer\\n\\n¥ Include the window border\\n\\nApply effect: None Sa\\n\\n+. seconds\\n',\n", - " 'language': 'en',\n", - " 'score': 0.9999965244942747}),\n", - " ArrowRow({'path': 'air-example-data/ocr_tiny_dataset/miranda_screenshot.png',\n", - " 'text': '© Viktor (Online) : Message Session\\n\\n“etto| © Whter | steno\\n\\nremus\\ntet? Fiviha\\n\\n17: dokonca to vie aj video @\\nViktor\\n\\n1818. 55 samozrejme\\n\\n1818: len moj brat to skusal\\nremus\\n\\nWA\\n\\n098003 —\\n\\nseettsgmailcom [0]\\n\\nonline\\n\\nHacemen\\n@ Ce\\n\\nieFFo\\n169 6 je <>vin ©®\\n\\nBe 22\\n\\naway\\n\\nTue\\nhn\\n\\n& Wee\\n\\nYep, Tm here\\n\\n&\\nea\\na\\nLS]\\n\\n',\n", - " 'language': 'en',\n", - " 'score': 0.8571411027551514}),\n", - " ArrowRow({'path': 'air-example-data/ocr_tiny_dataset/miranda_screenshot.png',\n", - " 'text': '© Viktor (Online) : Message Session\\n\\n“etto| © Whter | steno\\n\\nremus\\ntet? Fiviha\\n\\n17: dokonca to vie aj video @\\nViktor\\n\\n1818. 55 samozrejme\\n\\n1818: len moj brat to skusal\\nremus\\n\\nWA\\n\\n098003 —\\n\\nseettsgmailcom [0]\\n\\nonline\\n\\nHacemen\\n@ Ce\\n\\nieFFo\\n169 6 je <>vin ©®\\n\\nBe 22\\n\\naway\\n\\nTue\\nhn\\n\\n& Wee\\n\\nYep, Tm here\\n\\n&\\nea\\na\\nLS]\\n\\n',\n", - " 'language': 'en',\n", - " 'score': 0.5714285419353925})]" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "languages.filter(lambda row: row[\"language\"] == \"en\").sort(\"score\", descending=True).take(1000)" - ] - }, - { - "cell_type": "markdown", - "id": "8c05df96", - "metadata": {}, - "source": [ + "````\n", + "\n", + "\n", + "We can now filter to include only the English documents and also sort them according to their score.\n", + "\n", + "```python\n", + "languages.filter(lambda row: row[\"language\"] == \"en\").sort(\"score\", descending=True).take(1000)\n", + "```\n", + "\n", "If you are interested in this example and want to extend it, you can do the following for the full dataset:\n", "- go throught these results in order\n", "- create labels on whether the text is a chat conversation and then train a model like [Huggingface Transformers](https://huggingface.co/docs/transformers/) on the data.\n", @@ -523,23 +320,10 @@ } ], "metadata": { - "celltoolbar": "Tags", "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.4" } }, "nbformat": 4, diff --git a/doc/source/ray-core/actors.rst b/doc/source/ray-core/actors.rst index 6f34e8b42954..bce5d1487e77 100644 --- a/doc/source/ray-core/actors.rst +++ b/doc/source/ray-core/actors.rst @@ -9,81 +9,83 @@ An actor is essentially a stateful worker (or a service). When a new actor is instantiated, a new worker is created, and methods of the actor are scheduled on that specific worker and can access and mutate the state of that worker. -.. tabbed:: Python +.. tab-set:: - The ``ray.remote`` decorator indicates that instances of the ``Counter`` class will be actors. Each actor runs in its own Python process. + .. tab-item:: Python - .. code-block:: python + The ``ray.remote`` decorator indicates that instances of the ``Counter`` class will be actors. Each actor runs in its own Python process. - @ray.remote - class Counter(object): - def __init__(self): - self.value = 0 + .. code-block:: python - def increment(self): - self.value += 1 - return self.value + @ray.remote + class Counter(object): + def __init__(self): + self.value = 0 - def get_counter(self): - return self.value + def increment(self): + self.value += 1 + return self.value - # Create an actor from this class. - counter = Counter.remote() + def get_counter(self): + return self.value -.. tabbed:: Java + # Create an actor from this class. + counter = Counter.remote() - ``Ray.actor`` is used to create actors from regular Java classes. + .. tab-item:: Java - .. code-block:: java + ``Ray.actor`` is used to create actors from regular Java classes. - // A regular Java class. - public class Counter { + .. code-block:: java - private int value = 0; + // A regular Java class. + public class Counter { - public int increment() { - this.value += 1; - return this.value; - } - } + private int value = 0; - // Create an actor from this class. - // `Ray.actor` takes a factory method that can produce - // a `Counter` object. Here, we pass `Counter`'s constructor - // as the argument. - ActorHandle counter = Ray.actor(Counter::new).remote(); + public int increment() { + this.value += 1; + return this.value; + } + } -.. tabbed:: C++ + // Create an actor from this class. + // `Ray.actor` takes a factory method that can produce + // a `Counter` object. Here, we pass `Counter`'s constructor + // as the argument. + ActorHandle counter = Ray.actor(Counter::new).remote(); - ``ray::Actor`` is used to create actors from regular C++ classes. + .. tab-item:: C++ - .. code-block:: c++ + ``ray::Actor`` is used to create actors from regular C++ classes. - // A regular C++ class. - class Counter { + .. code-block:: c++ - private: - int value = 0; + // A regular C++ class. + class Counter { - public: - int Increment() { - value += 1; - return value; - } - }; + private: + int value = 0; - // Factory function of Counter class. - static Counter *CreateCounter() { - return new Counter(); - }; + public: + int Increment() { + value += 1; + return value; + } + }; - RAY_REMOTE(&Counter::Increment, CreateCounter); + // Factory function of Counter class. + static Counter *CreateCounter() { + return new Counter(); + }; - // Create an actor from this class. - // `ray::Actor` takes a factory method that can produce - // a `Counter` object. Here, we pass `Counter`'s factory function - // as the argument. - auto counter = ray::Actor(CreateCounter).Remote(); + RAY_REMOTE(&Counter::Increment, CreateCounter); + + // Create an actor from this class. + // `ray::Actor` takes a factory method that can produce + // a `Counter` object. Here, we pass `Counter`'s factory function + // as the argument. + auto counter = ray::Actor(CreateCounter).Remote(); Specifying required resources ----------------------------- @@ -92,28 +94,30 @@ Specifying required resources You can specify resource requirements in actors too (see :ref:`resource-requirements` for more details.) -.. tabbed:: Python +.. tab-set:: + + .. tab-item:: Python - .. code-block:: python + .. code-block:: python - # Specify required resources for an actor. - @ray.remote(num_cpus=2, num_gpus=0.5) - class Actor(object): - pass + # Specify required resources for an actor. + @ray.remote(num_cpus=2, num_gpus=0.5) + class Actor(object): + pass -.. tabbed:: Java + .. tab-item:: Java - .. code-block:: java + .. code-block:: java - // Specify required resources for an actor. - Ray.actor(Counter::new).setResource("CPU", 2.0).setResource("GPU", 0.5).remote(); + // Specify required resources for an actor. + Ray.actor(Counter::new).setResource("CPU", 2.0).setResource("GPU", 0.5).remote(); -.. tabbed:: C++ + .. tab-item:: C++ - .. code-block:: c++ + .. code-block:: c++ - // Specify required resources for an actor. - ray::Actor(CreateCounter).SetResource("CPU", 2.0).SetResource("GPU", 0.5).Remote(); + // Specify required resources for an actor. + ray::Actor(CreateCounter).SetResource("CPU", 2.0).SetResource("GPU", 0.5).Remote(); Calling the actor @@ -123,202 +127,210 @@ We can interact with the actor by calling its methods with the ``remote`` operator. We can then call ``get`` on the object ref to retrieve the actual value. -.. tabbed:: Python +.. tab-set:: - .. code-block:: python + .. tab-item:: Python - # Call the actor. - obj_ref = counter.increment.remote() - assert ray.get(obj_ref) == 1 + .. code-block:: python -.. tabbed:: Java + # Call the actor. + obj_ref = counter.increment.remote() + assert ray.get(obj_ref) == 1 - .. code-block:: java + .. tab-item:: Java - // Call the actor. - ObjectRef objectRef = counter.task(&Counter::increment).remote(); - Assert.assertTrue(objectRef.get() == 1); + .. code-block:: java -.. tabbed:: C++ + // Call the actor. + ObjectRef objectRef = counter.task(&Counter::increment).remote(); + Assert.assertTrue(objectRef.get() == 1); - .. code-block:: c++ + .. tab-item:: C++ - // Call the actor. - auto object_ref = counter.Task(&Counter::increment).Remote(); - assert(*object_ref.Get() == 1); + .. code-block:: c++ + + // Call the actor. + auto object_ref = counter.Task(&Counter::increment).Remote(); + assert(*object_ref.Get() == 1); Methods called on different actors can execute in parallel, and methods called on the same actor are executed serially in the order that they are called. Methods on the same actor will share state with one another, as shown below. -.. tabbed:: Python - - .. code-block:: python - - # Create ten Counter actors. - counters = [Counter.remote() for _ in range(10)] - - # Increment each Counter once and get the results. These tasks all happen in - # parallel. - results = ray.get([c.increment.remote() for c in counters]) - print(results) # prints [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - - # Increment the first Counter five times. These tasks are executed serially - # and share state. - results = ray.get([counters[0].increment.remote() for _ in range(5)]) - print(results) # prints [2, 3, 4, 5, 6] - -.. tabbed:: Java - - .. code-block:: java - - // Create ten Counter actors. - List> counters = new ArrayList<>(); - for (int i = 0; i < 10; i++) { - counters.add(Ray.actor(Counter::new).remote()); - } - - // Increment each Counter once and get the results. These tasks all happen in - // parallel. - List> objectRefs = new ArrayList<>(); - for (ActorHandle counterActor : counters) { - objectRefs.add(counterActor.task(Counter::increment).remote()); - } - // prints [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - System.out.println(Ray.get(objectRefs)); - - // Increment the first Counter five times. These tasks are executed serially - // and share state. - objectRefs = new ArrayList<>(); - for (int i = 0; i < 5; i++) { - objectRefs.add(counters.get(0).task(Counter::increment).remote()); - } - // prints [2, 3, 4, 5, 6] - System.out.println(Ray.get(objectRefs)); - -.. tabbed:: C++ - - .. code-block:: c++ - - // Create ten Counter actors. - std::vector> counters; - for (int i = 0; i < 10; i++) { - counters.emplace_back(ray::Actor(CreateCounter).Remote()); - } - - // Increment each Counter once and get the results. These tasks all happen in - // parallel. - std::vector> object_refs; - for (ray::ActorHandle counter_actor : counters) { - object_refs.emplace_back(counter_actor.Task(&Counter::Increment).Remote()); - } - // prints 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 - auto results = ray::Get(object_refs); - for (const auto &result : results) { - std::cout << *result; - } - - // Increment the first Counter five times. These tasks are executed serially - // and share state. - object_refs.clear(); - for (int i = 0; i < 5; i++) { - object_refs.emplace_back(counters[0].Task(&Counter::Increment).Remote()); - } - // prints 2, 3, 4, 5, 6 - results = ray::Get(object_refs); - for (const auto &result : results) { - std::cout << *result; - } +.. tab-set:: + + .. tab-item:: Python + + .. code-block:: python + + # Create ten Counter actors. + counters = [Counter.remote() for _ in range(10)] + + # Increment each Counter once and get the results. These tasks all happen in + # parallel. + results = ray.get([c.increment.remote() for c in counters]) + print(results) # prints [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + + # Increment the first Counter five times. These tasks are executed serially + # and share state. + results = ray.get([counters[0].increment.remote() for _ in range(5)]) + print(results) # prints [2, 3, 4, 5, 6] + + .. tab-item:: Java + + .. code-block:: java + + // Create ten Counter actors. + List> counters = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + counters.add(Ray.actor(Counter::new).remote()); + } + + // Increment each Counter once and get the results. These tasks all happen in + // parallel. + List> objectRefs = new ArrayList<>(); + for (ActorHandle counterActor : counters) { + objectRefs.add(counterActor.task(Counter::increment).remote()); + } + // prints [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + System.out.println(Ray.get(objectRefs)); + + // Increment the first Counter five times. These tasks are executed serially + // and share state. + objectRefs = new ArrayList<>(); + for (int i = 0; i < 5; i++) { + objectRefs.add(counters.get(0).task(Counter::increment).remote()); + } + // prints [2, 3, 4, 5, 6] + System.out.println(Ray.get(objectRefs)); + + .. tab-item:: C++ + + .. code-block:: c++ + + // Create ten Counter actors. + std::vector> counters; + for (int i = 0; i < 10; i++) { + counters.emplace_back(ray::Actor(CreateCounter).Remote()); + } + + // Increment each Counter once and get the results. These tasks all happen in + // parallel. + std::vector> object_refs; + for (ray::ActorHandle counter_actor : counters) { + object_refs.emplace_back(counter_actor.Task(&Counter::Increment).Remote()); + } + // prints 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 + auto results = ray::Get(object_refs); + for (const auto &result : results) { + std::cout << *result; + } + + // Increment the first Counter five times. These tasks are executed serially + // and share state. + object_refs.clear(); + for (int i = 0; i < 5; i++) { + object_refs.emplace_back(counters[0].Task(&Counter::Increment).Remote()); + } + // prints 2, 3, 4, 5, 6 + results = ray::Get(object_refs); + for (const auto &result : results) { + std::cout << *result; + } Passing Around Actor Handles ---------------------------- Actor handles can be passed into other tasks. We can define remote functions (or actor methods) that use actor handles. -.. tabbed:: Python +.. tab-set:: + + .. tab-item:: Python - .. code-block:: python + .. code-block:: python - import time + import time - @ray.remote - def f(counter): - for _ in range(1000): - time.sleep(0.1) - counter.increment.remote() + @ray.remote + def f(counter): + for _ in range(1000): + time.sleep(0.1) + counter.increment.remote() -.. tabbed:: Java + .. tab-item:: Java - .. code-block:: java + .. code-block:: java - public static class MyRayApp { + public static class MyRayApp { - public static void foo(ActorHandle counter) throws InterruptedException { - for (int i = 0; i < 1000; i++) { - TimeUnit.MILLISECONDS.sleep(100); - counter.task(Counter::increment).remote(); + public static void foo(ActorHandle counter) throws InterruptedException { + for (int i = 0; i < 1000; i++) { + TimeUnit.MILLISECONDS.sleep(100); + counter.task(Counter::increment).remote(); + } + } } - } - } -.. tabbed:: C++ + .. tab-item:: C++ - .. code-block:: c++ + .. code-block:: c++ - void Foo(ray::ActorHandle counter) { - for (int i = 0; i < 1000; i++) { - std::this_thread::sleep_for(std::chrono::milliseconds(100)); - counter.Task(&Counter::Increment).Remote(); + void Foo(ray::ActorHandle counter) { + for (int i = 0; i < 1000; i++) { + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + counter.Task(&Counter::Increment).Remote(); + } } - } If we instantiate an actor, we can pass the handle around to various tasks. -.. tabbed:: Python +.. tab-set:: + + .. tab-item:: Python - .. code-block:: python + .. code-block:: python - counter = Counter.remote() + counter = Counter.remote() - # Start some tasks that use the actor. - [f.remote(counter) for _ in range(3)] + # Start some tasks that use the actor. + [f.remote(counter) for _ in range(3)] - # Print the counter value. - for _ in range(10): - time.sleep(1) - print(ray.get(counter.get_counter.remote())) + # Print the counter value. + for _ in range(10): + time.sleep(1) + print(ray.get(counter.get_counter.remote())) -.. tabbed:: Java + .. tab-item:: Java - .. code-block:: java + .. code-block:: java - ActorHandle counter = Ray.actor(Counter::new).remote(); + ActorHandle counter = Ray.actor(Counter::new).remote(); - // Start some tasks that use the actor. - for (int i = 0; i < 3; i++) { - Ray.task(MyRayApp::foo, counter).remote(); - } + // Start some tasks that use the actor. + for (int i = 0; i < 3; i++) { + Ray.task(MyRayApp::foo, counter).remote(); + } - // Print the counter value. - for (int i = 0; i < 10; i++) { - TimeUnit.SECONDS.sleep(1); - System.out.println(counter.task(Counter::getCounter).remote().get()); - } + // Print the counter value. + for (int i = 0; i < 10; i++) { + TimeUnit.SECONDS.sleep(1); + System.out.println(counter.task(Counter::getCounter).remote().get()); + } -.. tabbed:: C++ + .. tab-item:: C++ - .. code-block:: c++ + .. code-block:: c++ - auto counter = ray::Actor(CreateCounter).Remote(); + auto counter = ray::Actor(CreateCounter).Remote(); - // Start some tasks that use the actor. - for (int i = 0; i < 3; i++) { - ray::Task(Foo).Remote(counter); - } + // Start some tasks that use the actor. + for (int i = 0; i < 3; i++) { + ray::Task(Foo).Remote(counter); + } - // Print the counter value. - for (int i = 0; i < 10; i++) { - std::this_thread::sleep_for(std::chrono::seconds(1)); - std::cout << *counter.Task(&Counter::GetCounter).Remote().Get() << std::endl; - } + // Print the counter value. + for (int i = 0; i < 10; i++) { + std::this_thread::sleep_for(std::chrono::seconds(1)); + std::cout << *counter.Task(&Counter::GetCounter).Remote().Get() << std::endl; + } Scheduling diff --git a/doc/source/ray-core/actors/concurrency_group_api.rst b/doc/source/ray-core/actors/concurrency_group_api.rst index f27e4725fd16..9945f530e2de 100644 --- a/doc/source/ray-core/actors/concurrency_group_api.rst +++ b/doc/source/ray-core/actors/concurrency_group_api.rst @@ -17,95 +17,97 @@ into the "compute" group. Note that there is always a default concurrency group, which has a default concurrency of 1000 in Python and 1 in Java. -.. tabbed:: Python +.. tab-set:: - You can define concurrency groups for asyncio actors using the ``concurrency_group`` decorator argument: + .. tab-item:: Python - .. code-block:: python + You can define concurrency groups for asyncio actors using the ``concurrency_group`` decorator argument: - @ray.remote(concurrency_groups={"io": 2, "compute": 4}) - class AsyncIOActor: - def __init__(self): - pass + .. code-block:: python - @ray.method(concurrency_group="io") - async def f1(self): - pass + @ray.remote(concurrency_groups={"io": 2, "compute": 4}) + class AsyncIOActor: + def __init__(self): + pass - @ray.method(concurrency_group="io") - async def f2(self): - pass + @ray.method(concurrency_group="io") + async def f1(self): + pass - @ray.method(concurrency_group="compute") - async def f3(self): - pass + @ray.method(concurrency_group="io") + async def f2(self): + pass - @ray.method(concurrency_group="compute") - async def f4(self): - pass + @ray.method(concurrency_group="compute") + async def f3(self): + pass - async def f5(self): - pass + @ray.method(concurrency_group="compute") + async def f4(self): + pass - a = AsyncIOActor.remote() - a.f1.remote() # executed in the "io" group. - a.f2.remote() # executed in the "io" group. - a.f3.remote() # executed in the "compute" group. - a.f4.remote() # executed in the "compute" group. - a.f5.remote() # executed in the default group. + async def f5(self): + pass -.. tabbed:: Java + a = AsyncIOActor.remote() + a.f1.remote() # executed in the "io" group. + a.f2.remote() # executed in the "io" group. + a.f3.remote() # executed in the "compute" group. + a.f4.remote() # executed in the "compute" group. + a.f5.remote() # executed in the default group. - You can define concurrency groups for concurrent actors using the API ``setConcurrencyGroups()`` argument: + .. tab-item:: Java - .. code-block:: java + You can define concurrency groups for concurrent actors using the API ``setConcurrencyGroups()`` argument: - class ConcurrentActor { - public long f1() { - return Thread.currentThread().getId(); - } + .. code-block:: java - public long f2() { - return Thread.currentThread().getId(); - } + class ConcurrentActor { + public long f1() { + return Thread.currentThread().getId(); + } - public long f3(int a, int b) { - return Thread.currentThread().getId(); - } + public long f2() { + return Thread.currentThread().getId(); + } - public long f4() { - return Thread.currentThread().getId(); - } + public long f3(int a, int b) { + return Thread.currentThread().getId(); + } + + public long f4() { + return Thread.currentThread().getId(); + } - public long f5() { - return Thread.currentThread().getId(); + public long f5() { + return Thread.currentThread().getId(); + } } - } - - ConcurrencyGroup group1 = - new ConcurrencyGroupBuilder() - .setName("io") - .setMaxConcurrency(1) - .addMethod(ConcurrentActor::f1) - .addMethod(ConcurrentActor::f2) - .build(); - ConcurrencyGroup group2 = - new ConcurrencyGroupBuilder() - .setName("compute") - .setMaxConcurrency(1) - .addMethod(ConcurrentActor::f3) - .addMethod(ConcurrentActor::f4) - .build(); - - ActorHandle myActor = Ray.actor(ConcurrentActor::new) - .setConcurrencyGroups(group1, group2) - .remote(); - - myActor.task(ConcurrentActor::f1).remote(); // executed in the "io" group. - myActor.task(ConcurrentActor::f2).remote(); // executed in the "io" group. - myActor.task(ConcurrentActor::f3, 3, 5).remote(); // executed in the "compute" group. - myActor.task(ConcurrentActor::f4).remote(); // executed in the "compute" group. - myActor.task(ConcurrentActor::f5).remote(); // executed in the "default" group. + + ConcurrencyGroup group1 = + new ConcurrencyGroupBuilder() + .setName("io") + .setMaxConcurrency(1) + .addMethod(ConcurrentActor::f1) + .addMethod(ConcurrentActor::f2) + .build(); + ConcurrencyGroup group2 = + new ConcurrencyGroupBuilder() + .setName("compute") + .setMaxConcurrency(1) + .addMethod(ConcurrentActor::f3) + .addMethod(ConcurrentActor::f4) + .build(); + + ActorHandle myActor = Ray.actor(ConcurrentActor::new) + .setConcurrencyGroups(group1, group2) + .remote(); + + myActor.task(ConcurrentActor::f1).remote(); // executed in the "io" group. + myActor.task(ConcurrentActor::f2).remote(); // executed in the "io" group. + myActor.task(ConcurrentActor::f3, 3, 5).remote(); // executed in the "compute" group. + myActor.task(ConcurrentActor::f4).remote(); // executed in the "compute" group. + myActor.task(ConcurrentActor::f5).remote(); // executed in the "default" group. .. _default-concurrency-group: @@ -116,43 +118,45 @@ Default Concurrency Group By default, methods are placed in a default concurrency group which has a concurrency limit of 1000 in Python, 1 in Java. The concurrency of the default group can be changed by setting the ``max_concurrency`` actor option. -.. tabbed:: Python +.. tab-set:: - The following AsyncIOActor has 2 concurrency groups: "io" and "default". - The max concurrency of "io" is 2, and the max concurrency of "default" is 10. + .. tab-item:: Python - .. code-block:: python + The following AsyncIOActor has 2 concurrency groups: "io" and "default". + The max concurrency of "io" is 2, and the max concurrency of "default" is 10. - @ray.remote(concurrency_groups={"io": 2}) - class AsyncIOActor: - async def f1(self): - pass + .. code-block:: python - actor = AsyncIOActor.options(max_concurrency=10).remote() + @ray.remote(concurrency_groups={"io": 2}) + class AsyncIOActor: + async def f1(self): + pass -.. tabbed:: Java + actor = AsyncIOActor.options(max_concurrency=10).remote() - The following concurrent actor has 2 concurrency groups: "io" and "default". - The max concurrency of "io" is 2, and the max concurrency of "default" is 10. + .. tab-item:: Java - .. code-block:: java + The following concurrent actor has 2 concurrency groups: "io" and "default". + The max concurrency of "io" is 2, and the max concurrency of "default" is 10. - class ConcurrentActor: - public long f1() { - return Thread.currentThread().getId(); - } + .. code-block:: java + + class ConcurrentActor: + public long f1() { + return Thread.currentThread().getId(); + } - ConcurrencyGroup group = - new ConcurrencyGroupBuilder() - .setName("io") - .setMaxConcurrency(2) - .addMethod(ConcurrentActor::f1) - .build(); + ConcurrencyGroup group = + new ConcurrencyGroupBuilder() + .setName("io") + .setMaxConcurrency(2) + .addMethod(ConcurrentActor::f1) + .build(); - ActorHandle myActor = Ray.actor(ConcurrentActor::new) - .setConcurrencyGroups(group1) - .setMaxConcurrency(10) - .remote(); + ActorHandle myActor = Ray.actor(ConcurrentActor::new) + .setConcurrencyGroups(group1) + .setMaxConcurrency(10) + .remote(); .. _setting-the-concurrency-group-at-runtime: @@ -165,26 +169,28 @@ You can also dispatch actor methods into a specific concurrency group at runtime The following snippet demonstrates setting the concurrency group of the ``f2`` method dynamically at runtime. -.. tabbed:: Python - - You can use the ``.options`` method. +.. tab-set:: + + .. tab-item:: Python + + You can use the ``.options`` method. - .. code-block:: python + .. code-block:: python - # Executed in the "io" group (as defined in the actor class). - a.f2.options().remote() + # Executed in the "io" group (as defined in the actor class). + a.f2.options().remote() - # Executed in the "compute" group. - a.f2.options(concurrency_group="compute").remote() + # Executed in the "compute" group. + a.f2.options(concurrency_group="compute").remote() -.. tabbed:: Java + .. tab-item:: Java - You can use ``setConcurrencyGroup`` method. + You can use ``setConcurrencyGroup`` method. - .. code-block:: java + .. code-block:: java - // Executed in the "io" group (as defined in the actor creation). - myActor.task(ConcurrentActor::f2).remote(); + // Executed in the "io" group (as defined in the actor creation). + myActor.task(ConcurrentActor::f2).remote(); - // Executed in the "compute" group. - myActor.task(ConcurrentActor::f2).setConcurrencyGroup("compute").remote(); + // Executed in the "compute" group. + myActor.task(ConcurrentActor::f2).setConcurrencyGroup("compute").remote(); diff --git a/doc/source/ray-core/actors/named-actors.rst b/doc/source/ray-core/actors/named-actors.rst index 79b7367d24ac..51a624bca1b3 100644 --- a/doc/source/ray-core/actors/named-actors.rst +++ b/doc/source/ray-core/actors/named-actors.rst @@ -9,118 +9,122 @@ access an actor launched by another driver. Note that the actor will still be garbage-collected if no handles to it exist. See :ref:`actor-lifetimes` for more details. -.. tabbed:: Python +.. tab-set:: - .. code-block:: python + .. tab-item:: Python - # Create an actor with a name - counter = Counter.options(name="some_name").remote() + .. code-block:: python - ... + # Create an actor with a name + counter = Counter.options(name="some_name").remote() - # Retrieve the actor later somewhere - counter = ray.get_actor("some_name") + ... -.. tabbed:: Java + # Retrieve the actor later somewhere + counter = ray.get_actor("some_name") - .. code-block:: java + .. tab-item:: Java - // Create an actor with a name. - ActorHandle counter = Ray.actor(Counter::new).setName("some_name").remote(); + .. code-block:: java - ... + // Create an actor with a name. + ActorHandle counter = Ray.actor(Counter::new).setName("some_name").remote(); - // Retrieve the actor later somewhere - Optional> counter = Ray.getActor("some_name"); - Assert.assertTrue(counter.isPresent()); + ... -.. tabbed:: C++ + // Retrieve the actor later somewhere + Optional> counter = Ray.getActor("some_name"); + Assert.assertTrue(counter.isPresent()); - .. code-block:: c++ + .. tab-item:: C++ - // Create an actor with a globally unique name - ActorHandle counter = ray::Actor(CreateCounter).SetGlobalName("some_name").Remote(); + .. code-block:: c++ - ... + // Create an actor with a globally unique name + ActorHandle counter = ray::Actor(CreateCounter).SetGlobalName("some_name").Remote(); - // Retrieve the actor later somewhere - boost::optional> counter = ray::GetGlobalActor("some_name"); + ... - We also support non-global named actors in C++, which means that the actor name is only valid within the job and the actor cannot be accessed from another job + // Retrieve the actor later somewhere + boost::optional> counter = ray::GetGlobalActor("some_name"); - .. code-block:: c++ + We also support non-global named actors in C++, which means that the actor name is only valid within the job and the actor cannot be accessed from another job - // Create an actor with a job-scope-unique name - ActorHandle counter = ray::Actor(CreateCounter).SetName("some_name").Remote(); + .. code-block:: c++ - ... + // Create an actor with a job-scope-unique name + ActorHandle counter = ray::Actor(CreateCounter).SetName("some_name").Remote(); - // Retrieve the actor later somewhere in the same job - boost::optional> counter = ray::GetActor("some_name"); + ... + + // Retrieve the actor later somewhere in the same job + boost::optional> counter = ray::GetActor("some_name"); .. note:: Named actors are scoped by namespace. If no namespace is assigned, they will be placed in an anonymous namespace by default. -.. tabbed:: Python +.. tab-set:: + + .. tab-item:: Python - .. code-block:: python + .. code-block:: python - import ray + import ray - @ray.remote - class Actor: - pass + @ray.remote + class Actor: + pass - # driver_1.py - # Job 1 creates an actor, "orange" in the "colors" namespace. - ray.init(address="auto", namespace="colors") - Actor.options(name="orange", lifetime="detached").remote() + # driver_1.py + # Job 1 creates an actor, "orange" in the "colors" namespace. + ray.init(address="auto", namespace="colors") + Actor.options(name="orange", lifetime="detached").remote() - # driver_2.py - # Job 2 is now connecting to a different namespace. - ray.init(address="auto", namespace="fruit") - # This fails because "orange" was defined in the "colors" namespace. - ray.get_actor("orange") - # You can also specify the namespace explicitly. - ray.get_actor("orange", namespace="colors") + # driver_2.py + # Job 2 is now connecting to a different namespace. + ray.init(address="auto", namespace="fruit") + # This fails because "orange" was defined in the "colors" namespace. + ray.get_actor("orange") + # You can also specify the namespace explicitly. + ray.get_actor("orange", namespace="colors") - # driver_3.py - # Job 3 connects to the original "colors" namespace - ray.init(address="auto", namespace="colors") - # This returns the "orange" actor we created in the first job. - ray.get_actor("orange") + # driver_3.py + # Job 3 connects to the original "colors" namespace + ray.init(address="auto", namespace="colors") + # This returns the "orange" actor we created in the first job. + ray.get_actor("orange") -.. tabbed:: Java + .. tab-item:: Java - .. code-block:: java + .. code-block:: java - import ray + import ray - class Actor { - } + class Actor { + } - // Driver1.java - // Job 1 creates an actor, "orange" in the "colors" namespace. - System.setProperty("ray.job.namespace", "colors"); - Ray.init(); - Ray.actor(Actor::new).setName("orange").remote(); + // Driver1.java + // Job 1 creates an actor, "orange" in the "colors" namespace. + System.setProperty("ray.job.namespace", "colors"); + Ray.init(); + Ray.actor(Actor::new).setName("orange").remote(); - // Driver2.java - // Job 2 is now connecting to a different namespace. - System.setProperty("ray.job.namespace", "fruits"); - Ray.init(); - // This fails because "orange" was defined in the "colors" namespace. - Optional> actor = Ray.getActor("orange"); - Assert.assertFalse(actor.isPresent()); // actor.isPresent() is false. + // Driver2.java + // Job 2 is now connecting to a different namespace. + System.setProperty("ray.job.namespace", "fruits"); + Ray.init(); + // This fails because "orange" was defined in the "colors" namespace. + Optional> actor = Ray.getActor("orange"); + Assert.assertFalse(actor.isPresent()); // actor.isPresent() is false. - // Driver3.java - System.setProperty("ray.job.namespace", "colors"); - Ray.init(); - // This returns the "orange" actor we created in the first job. - Optional> actor = Ray.getActor("orange"); - Assert.assertTrue(actor.isPresent()); // actor.isPresent() is true. + // Driver3.java + System.setProperty("ray.job.namespace", "colors"); + Ray.init(); + // This returns the "orange" actor we created in the first job. + Optional> actor = Ray.getActor("orange"); + Assert.assertTrue(actor.isPresent()); // actor.isPresent() is true. Get-Or-Create a Named Actor --------------------------- @@ -133,21 +137,23 @@ If the actor already exists, a handle to the actor will be returned and the arguments will be ignored. Otherwise, a new actor will be created with the specified arguments. -.. tabbed:: Python +.. tab-set:: + + .. tab-item:: Python - .. literalinclude:: ../doc_code/get_or_create.py + .. literalinclude:: ../doc_code/get_or_create.py -.. tabbed:: Java + .. tab-item:: Java - .. code-block:: java + .. code-block:: java - // This feature is not yet available in Java. + // This feature is not yet available in Java. -.. tabbed:: C++ + .. tab-item:: C++ - .. code-block:: c++ + .. code-block:: c++ - // This feature is not yet available in C++. + // This feature is not yet available in C++. .. _actor-lifetimes: @@ -157,47 +163,49 @@ Actor Lifetimes Separately, actor lifetimes can be decoupled from the job, allowing an actor to persist even after the driver process of the job exits. We call these actors *detached*. -.. tabbed:: Python +.. tab-set:: + + .. tab-item:: Python + + .. code-block:: python - .. code-block:: python + counter = Counter.options(name="CounterActor", lifetime="detached").remote() - counter = Counter.options(name="CounterActor", lifetime="detached").remote() + The ``CounterActor`` will be kept alive even after the driver running above script + exits. Therefore it is possible to run the following script in a different + driver: - The ``CounterActor`` will be kept alive even after the driver running above script - exits. Therefore it is possible to run the following script in a different - driver: + .. code-block:: python - .. code-block:: python + counter = ray.get_actor("CounterActor") + print(ray.get(counter.get_counter.remote())) - counter = ray.get_actor("CounterActor") - print(ray.get(counter.get_counter.remote())) + Note that an actor can be named but not detached. If we only specified the + name without specifying ``lifetime="detached"``, then the CounterActor can + only be retrieved as long as the original driver is still running. - Note that an actor can be named but not detached. If we only specified the - name without specifying ``lifetime="detached"``, then the CounterActor can - only be retrieved as long as the original driver is still running. + .. tab-item:: Java -.. tabbed:: Java + .. code-block:: java - .. code-block:: java + System.setProperty("ray.job.namespace", "lifetime"); + Ray.init(); + ActorHandle counter = Ray.actor(Counter::new).setName("some_name").setLifetime(ActorLifetime.DETACHED).remote(); - System.setProperty("ray.job.namespace", "lifetime"); - Ray.init(); - ActorHandle counter = Ray.actor(Counter::new).setName("some_name").setLifetime(ActorLifetime.DETACHED).remote(); - - The CounterActor will be kept alive even after the driver running above process - exits. Therefore it is possible to run the following code in a different - driver: + The CounterActor will be kept alive even after the driver running above process + exits. Therefore it is possible to run the following code in a different + driver: - .. code-block:: java + .. code-block:: java - System.setProperty("ray.job.namespace", "lifetime"); - Ray.init(); - Optional> counter = Ray.getActor("some_name"); - Assert.assertTrue(counter.isPresent()); + System.setProperty("ray.job.namespace", "lifetime"); + Ray.init(); + Optional> counter = Ray.getActor("some_name"); + Assert.assertTrue(counter.isPresent()); -.. tabbed:: C++ + .. tab-item:: C++ - Customizing lifetime of an actor hasn't been implemented in C++ yet. + Customizing lifetime of an actor hasn't been implemented in C++ yet. Unlike normal actors, detached actors are not automatically garbage-collected by Ray. diff --git a/doc/source/ray-core/actors/task-orders.rst b/doc/source/ray-core/actors/task-orders.rst index 0131abdd66fe..bfd665a519b4 100644 --- a/doc/source/ray-core/actors/task-orders.rst +++ b/doc/source/ray-core/actors/task-orders.rst @@ -11,77 +11,81 @@ them following the submission order. In other words, a given task will not be executed until previously submitted tasks from the same submitter have finished execution. -.. tabbed:: Python +.. tab-set:: - .. code-block:: python + .. tab-item:: Python - import ray + .. code-block:: python - @ray.remote - class Counter: - def __init__(self): - self.value = 0 + import ray - def add(self, addition): - self.value += addition - return self.value + @ray.remote + class Counter: + def __init__(self): + self.value = 0 - counter = Counter.remote() + def add(self, addition): + self.value += addition + return self.value - # For tasks from the same submitter, - # they are executed according to submission order. - value0 = counter.add.remote(1) - value1 = counter.add.remote(2) + counter = Counter.remote() - # Output: 1. The first submitted task is executed first. - print(ray.get(value0)) - # Output: 3. The later submitted task is executed later. - print(ray.get(value1)) + # For tasks from the same submitter, + # they are executed according to submission order. + value0 = counter.add.remote(1) + value1 = counter.add.remote(2) + + # Output: 1. The first submitted task is executed first. + print(ray.get(value0)) + # Output: 3. The later submitted task is executed later. + print(ray.get(value1)) However, the actor does not guarantee the execution order of the tasks from different submitters. For example, suppose an unfulfilled argument blocks a previously submitted task. In this case, the actor can still execute tasks submitted by a different worker. -.. tabbed:: Python +.. tab-set:: + + .. tab-item:: Python - .. code-block:: python + .. code-block:: python - import time - import ray + import time + import ray - @ray.remote - class Counter: - def __init__(self): - self.value = 0 + @ray.remote + class Counter: + def __init__(self): + self.value = 0 - def add(self, addition): - self.value += addition - return self.value + def add(self, addition): + self.value += addition + return self.value - counter = Counter.remote() + counter = Counter.remote() - # Submit task from a worker - @ray.remote - def submitter(value): - return ray.get(counter.add.remote(value)) + # Submit task from a worker + @ray.remote + def submitter(value): + return ray.get(counter.add.remote(value)) - # Simulate delayed result resolution. - @ray.remote - def delayed_resolution(value): - time.sleep(5) - return value + # Simulate delayed result resolution. + @ray.remote + def delayed_resolution(value): + time.sleep(5) + return value - # Submit tasks from different workers, with - # the first submitted task waiting for - # dependency resolution. - value0 = submitter.remote(delayed_resolution.remote(1)) - value1 = submitter.remote(2) + # Submit tasks from different workers, with + # the first submitted task waiting for + # dependency resolution. + value0 = submitter.remote(delayed_resolution.remote(1)) + value1 = submitter.remote(2) - # Output: 3. The first submitted task is executed later. - print(ray.get(value0)) - # Output: 2. The later submitted task is executed first. - print(ray.get(value1)) + # Output: 3. The first submitted task is executed later. + print(ray.get(value0)) + # Output: 2. The later submitted task is executed first. + print(ray.get(value1)) Asynchronous or Threaded Actor @@ -90,37 +94,39 @@ Asynchronous or Threaded Actor task execution order. This means the system might execute a task even though previously submitted tasks are pending execution. -.. tabbed:: Python +.. tab-set:: + + .. tab-item:: Python - .. code-block:: python + .. code-block:: python - import time - import ray + import time + import ray - @ray.remote - class AsyncCounter: - def __init__(self): - self.value = 0 + @ray.remote + class AsyncCounter: + def __init__(self): + self.value = 0 - async def add(self, addition): - self.value += addition - return self.value + async def add(self, addition): + self.value += addition + return self.value - counter = AsyncCounter.remote() + counter = AsyncCounter.remote() - # Simulate delayed result resolution. - @ray.remote - def delayed_resolution(value): - time.sleep(5) - return value + # Simulate delayed result resolution. + @ray.remote + def delayed_resolution(value): + time.sleep(5) + return value - # Submit tasks from the driver, with - # the first submitted task waiting for - # dependency resolution. - value0 = counter.add.remote(delayed_resolution.remote(1)) - value1 = counter.add.remote(2) + # Submit tasks from the driver, with + # the first submitted task waiting for + # dependency resolution. + value0 = counter.add.remote(delayed_resolution.remote(1)) + value1 = counter.add.remote(2) - # Output: 3. The first submitted task is executed later. - print(ray.get(value0)) - # Output: 2. The later submitted task is executed first. - print(ray.get(value1)) + # Output: 3. The first submitted task is executed later. + print(ray.get(value0)) + # Output: 2. The later submitted task is executed first. + print(ray.get(value1)) diff --git a/doc/source/ray-core/starting-ray.rst b/doc/source/ray-core/starting-ray.rst index e04a8a399ffd..60c4860525df 100644 --- a/doc/source/ray-core/starting-ray.rst +++ b/doc/source/ray-core/starting-ray.rst @@ -35,114 +35,120 @@ Calling ``ray.init()`` starts a local Ray instance on your laptop/machine. This In recent versions of Ray (>=1.5), ``ray.init()`` will automatically be called on the first use of a Ray remote API. -.. tabbed:: Python +.. tab-set:: - .. code-block:: python + .. tab-item:: Python - import ray - # Other Ray APIs will not work until `ray.init()` is called. - ray.init() + .. code-block:: python -.. tabbed:: Java + import ray + # Other Ray APIs will not work until `ray.init()` is called. + ray.init() - .. code-block:: java + .. tab-item:: Java - import io.ray.api.Ray; + .. code-block:: java - public class MyRayApp { + import io.ray.api.Ray; - public static void main(String[] args) { - // Other Ray APIs will not work until `Ray.init()` is called. - Ray.init(); - ... - } - } + public class MyRayApp { -.. tabbed:: C++ + public static void main(String[] args) { + // Other Ray APIs will not work until `Ray.init()` is called. + Ray.init(); + ... + } + } - .. code-block:: c++ + .. tab-item:: C++ - #include - // Other Ray APIs will not work until `ray::Init()` is called. - ray::Init() + .. code-block:: c++ + + #include + // Other Ray APIs will not work until `ray::Init()` is called. + ray::Init() When the process calling ``ray.init()`` terminates, the Ray runtime will also terminate. To explicitly stop or restart Ray, use the shutdown API. -.. tabbed:: Python +.. tab-set:: - .. code-block:: python + .. tab-item:: Python - import ray - ray.init() - ... # ray program - ray.shutdown() + .. code-block:: python -.. tabbed:: Java + import ray + ray.init() + ... # ray program + ray.shutdown() - .. code-block:: java + .. tab-item:: Java - import io.ray.api.Ray; + .. code-block:: java - public class MyRayApp { + import io.ray.api.Ray; - public static void main(String[] args) { - Ray.init(); - ... // ray program - Ray.shutdown(); - } - } + public class MyRayApp { + + public static void main(String[] args) { + Ray.init(); + ... // ray program + Ray.shutdown(); + } + } -.. tabbed:: C++ + .. tab-item:: C++ - .. code-block:: c++ + .. code-block:: c++ - #include - ray::Init() - ... // ray program - ray::Shutdown() + #include + ray::Init() + ... // ray program + ray::Shutdown() To check if Ray is initialized, use the ``is_initialized`` API. -.. tabbed:: Python +.. tab-set:: - .. code-block:: python + .. tab-item:: Python - import ray - ray.init() - assert ray.is_initialized() + .. code-block:: python - ray.shutdown() - assert not ray.is_initialized() + import ray + ray.init() + assert ray.is_initialized() -.. tabbed:: Java + ray.shutdown() + assert not ray.is_initialized() - .. code-block:: java + .. tab-item:: Java - import io.ray.api.Ray; + .. code-block:: java - public class MyRayApp { + import io.ray.api.Ray; - public static void main(String[] args) { - Ray.init(); - Assert.assertTrue(Ray.isInitialized()); - Ray.shutdown(); - Assert.assertFalse(Ray.isInitialized()); + public class MyRayApp { + + public static void main(String[] args) { + Ray.init(); + Assert.assertTrue(Ray.isInitialized()); + Ray.shutdown(); + Assert.assertFalse(Ray.isInitialized()); + } } - } -.. tabbed:: C++ + .. tab-item:: C++ - .. code-block:: c++ + .. code-block:: c++ - #include + #include - int main(int argc, char **argv) { - ray::Init(); - assert(ray::IsInitialized()); + int main(int argc, char **argv) { + ray::Init(); + assert(ray::IsInitialized()); - ray::Shutdown(); - assert(!ray::IsInitialized()); - } + ray::Shutdown(); + assert(!ray::IsInitialized()); + } See the `Configuration `__ documentation for the various ways to configure Ray. @@ -170,47 +176,49 @@ Use ``ray start`` from the CLI to start a 1 node ray runtime on a machine. This You can connect to this Ray instance by starting a driver process on the same node as where you ran ``ray start``. ``ray.init()`` will now automatically connect to the latest Ray instance. -.. tabbed:: Python +.. tab-set:: - .. code-block:: python + .. tab-item:: Python - import ray - ray.init() + .. code-block:: python -.. tabbed:: java + import ray + ray.init() - .. code-block:: java + .. tab-item:: java - import io.ray.api.Ray; + .. code-block:: java - public class MyRayApp { + import io.ray.api.Ray; - public static void main(String[] args) { - Ray.init(); - ... - } - } + public class MyRayApp { + + public static void main(String[] args) { + Ray.init(); + ... + } + } - .. code-block:: bash + .. code-block:: bash - java -classpath \ - -Dray.address=
    \ - + java -classpath \ + -Dray.address=
    \ + -.. tabbed:: C++ + .. tab-item:: C++ - .. code-block:: c++ + .. code-block:: c++ - #include + #include - int main(int argc, char **argv) { - ray::Init(); - ... - } + int main(int argc, char **argv) { + ray::Init(); + ... + } - .. code-block:: bash + .. code-block:: bash - RAY_ADDRESS=
    ./ + RAY_ADDRESS=
    ./ You can connect other nodes to the head node, creating a Ray cluster by also calling ``ray start`` on those nodes. See :ref:`on-prem` for more details. Calling ``ray.init()`` on any of the cluster machines will connect to the same Ray cluster. diff --git a/doc/source/ray-overview/getting-started.md b/doc/source/ray-overview/getting-started.md index b498e8648ae6..d35575cc11e1 100644 --- a/doc/source/ray-overview/getting-started.md +++ b/doc/source/ray-overview/getting-started.md @@ -195,7 +195,9 @@ Datastreams also supports ``.filter()`` and ``.flat_map()``. Ray Train abstracts away the complexity of setting up a distributed training system. Let's take following simple examples: -````{tabbed} PyTorch +````{tab-set} + +````{tab-item} PyTorch This example shows how you can use Ray Train with PyTorch. @@ -249,7 +251,7 @@ with 4 workers, and use it to run the new training function! ``` ```` -````{tabbed} TensorFlow +````{tab-item} TensorFlow This example shows how you can use Ray Train to set up `Multi-worker training with Keras `_. @@ -303,6 +305,7 @@ with 4 workers, and use it to run the new training function! :dedent: 0 ``` ```` +```` ```{link-button} ../train/train :type: ref @@ -415,7 +418,9 @@ for both Python and Java. `````{dropdown} ray Core: Parallelizing Functions with Ray Tasks :animate: fade-in-slide-down -````{tabbed} Python +````{tab-set} + +````{tab-item} Python First, you import Ray and and initialize it with `ray.init()`. Then you decorate your function with ``@ray.remote`` to declare that you want to run this function remotely. @@ -437,7 +442,7 @@ print(ray.get(futures)) # [0, 1, 4, 9] ``` ```` -````{tabbed} Java +````{tab-item} Java First, use `Ray.init` to initialize Ray runtime. Then you can use `Ray.task(...).remote()` to convert any Java static method into a Ray task. @@ -472,6 +477,8 @@ public class RayDemo { } ```` +```` + In the above code block we defined some Ray Tasks. While these are great for stateless operations, sometimes you must maintain the state of your application. You can do that with Ray Actors. @@ -491,7 +498,9 @@ When you instantiate a class that is a Ray actor, Ray will start a remote instan of that class in the cluster. This actor can then execute remote method calls and maintain its own internal state. -````{tabbed} Python +````{tab-set} + +````{tab-item} Python ```{code-block} python @@ -516,7 +525,7 @@ print(ray.get(futures)) # [1, 1, 1, 1] ``` ```` -````{tabbed} Java +````{tab-item} Java ```{code-block} java import io.ray.api.ActorHandle; @@ -565,6 +574,9 @@ public class RayDemo { } ```` + +```` + ```{link-button} ../ray-core/walkthrough :type: ref :text: Learn more about Ray Core diff --git a/doc/source/ray-overview/installation.rst b/doc/source/ray-overview/installation.rst index 1d7132a2eeff..eb5c002cc031 100644 --- a/doc/source/ray-overview/installation.rst +++ b/doc/source/ray-overview/installation.rst @@ -48,42 +48,41 @@ You can install the nightly Ray wheels via the following links. These daily rele # Install Ray with minimal dependencies # pip install -U LINK_TO_WHEEL.whl - - -.. tabbed:: Linux - - =============================================== ================================================ - Linux (x86_64) Linux (arm64/aarch64) - =============================================== ================================================ - `Linux Python 3.10 (x86_64)`_ `Linux Python 3.10 (aarch64)`_ - `Linux Python 3.9 (x86_64)`_ `Linux Python 3.9 (aarch64)`_ - `Linux Python 3.8 (x86_64)`_ `Linux Python 3.8 (aarch64)`_ - `Linux Python 3.7 (x86_64)`_ `Linux Python 3.7 (aarch64)`_ - `Linux Python 3.11 (x86_64) (EXPERIMENTAL)`_ `Linux Python 3.11 (aarch64) (EXPERIMENTAL)`_ - =============================================== ================================================ - -.. tabbed:: MacOS - - ================================ ================================ - MacOS (x86_64) MacOS (arm64) - ================================ ================================ - `MacOS Python 3.10 (x86_64)`_ `MacOS Python 3.10 (arm64)`_ - `MacOS Python 3.9 (x86_64)`_ `MacOS Python 3.9 (arm64)`_ - `MacOS Python 3.8 (x86_64)`_ `MacOS Python 3.8 (arm64)`_ - `MacOS Python 3.7 (x86_64)`_ - ================================ ================================ - -.. tabbed:: Windows (beta) - - .. list-table:: - :header-rows: 1 - - * - Windows (beta) - * - `Windows Python 3.10`_ - * - `Windows Python 3.9`_ - * - `Windows Python 3.8`_ - * - `Windows Python 3.7`_ - +.. tab-set:: + + .. tab-item:: Linux + + =============================================== ================================================ + Linux (x86_64) Linux (arm64/aarch64) + =============================================== ================================================ + `Linux Python 3.10 (x86_64)`_ `Linux Python 3.10 (aarch64)`_ + `Linux Python 3.9 (x86_64)`_ `Linux Python 3.9 (aarch64)`_ + `Linux Python 3.8 (x86_64)`_ `Linux Python 3.8 (aarch64)`_ + `Linux Python 3.7 (x86_64)`_ `Linux Python 3.7 (aarch64)`_ + `Linux Python 3.11 (x86_64) (EXPERIMENTAL)`_ `Linux Python 3.11 (aarch64) (EXPERIMENTAL)`_ + =============================================== ================================================ + + .. tab-item:: MacOS + + ================================ ================================ + MacOS (x86_64) MacOS (arm64) + ================================ ================================ + `MacOS Python 3.10 (x86_64)`_ `MacOS Python 3.10 (arm64)`_ + `MacOS Python 3.9 (x86_64)`_ `MacOS Python 3.9 (arm64)`_ + `MacOS Python 3.8 (x86_64)`_ `MacOS Python 3.8 (arm64)`_ + `MacOS Python 3.7 (x86_64)`_ + ================================ ================================ + + .. tab-item:: Windows (beta) + + .. list-table:: + :header-rows: 1 + + * - Windows (beta) + * - `Windows Python 3.10`_ + * - `Windows Python 3.9`_ + * - `Windows Python 3.8`_ + * - `Windows Python 3.7`_ .. note:: From 46a276fdd363e03e2b52dc316718c9643aac74ac Mon Sep 17 00:00:00 2001 From: Hao Chen Date: Fri, 28 Apr 2023 18:10:39 -0700 Subject: [PATCH 158/424] [Data] Stop Datastream when limit operator reaches the limit (#34844) When the limit operator reaches the limit, the upstream operators should stop producing data. --- .../data/_internal/execution/interfaces.py | 24 ++++++++- .../execution/operators/limit_operator.py | 21 +++++--- .../execution/streaming_executor_state.py | 31 +++++++++-- .../ray/data/_internal/logical/interfaces.py | 10 ++++ python/ray/data/tests/test_consumption.py | 15 +++--- python/ray/data/tests/test_operators.py | 51 +++++++++++++++++++ .../ray/data/tests/test_streaming_executor.py | 10 ++++ 7 files changed, 145 insertions(+), 17 deletions(-) diff --git a/python/ray/data/_internal/execution/interfaces.py b/python/ray/data/_internal/execution/interfaces.py index cd06591bdd0b..21b9ecb7e5eb 100644 --- a/python/ray/data/_internal/execution/interfaces.py +++ b/python/ray/data/_internal/execution/interfaces.py @@ -281,18 +281,24 @@ def __init__(self, name: str, input_dependencies: List["PhysicalOperator"]): for x in input_dependencies: assert isinstance(x, PhysicalOperator), x self._inputs_complete = not input_dependencies + self._dependents_complete = False self._started = False def __reduce__(self): raise ValueError("Operator is not serializable.") def completed(self) -> bool: - """Return True when this operator is done and all outputs are taken.""" + """Return True when this operator is completed. + + An operator is completed if any of the following conditions are met: + - All upstream operators are completed and all outputs are taken. + - All downstream operators are completed. + """ return ( self._inputs_complete and len(self.get_work_refs()) == 0 and not self.has_next() - ) + ) or self._dependents_complete def get_stats(self) -> StatsDict: """Return recorded execution stats for use with DatastreamStats.""" @@ -345,6 +351,13 @@ def should_add_input(self) -> bool: """ return True + def need_more_inputs(self) -> bool: + """Return true if the operator still needs more inputs. + + Once this return false, it should never return true again. + """ + return True + def add_input(self, refs: RefBundle, input_index: int) -> None: """Called when an upstream result is available. @@ -367,6 +380,13 @@ def inputs_done(self) -> None: """ self._inputs_complete = True + def all_dependents_complete(self) -> None: + """Called when all downstream operators have completed(). + + After this is called, the operator is marked as completed. + """ + self._dependents_complete = True + def has_next(self) -> bool: """Returns when a downstream output is available. diff --git a/python/ray/data/_internal/execution/operators/limit_operator.py b/python/ray/data/_internal/execution/operators/limit_operator.py index bd5dcaeb7baf..80d66a57131a 100644 --- a/python/ray/data/_internal/execution/operators/limit_operator.py +++ b/python/ray/data/_internal/execution/operators/limit_operator.py @@ -34,14 +34,17 @@ def __init__( self._buffer: Deque[RefBundle] = deque() self._name = f"Limit[limit={limit}]" self._output_metadata: List[BlockMetadata] = [] - self._num_outputs_total = input_op.num_outputs_total() - if self._num_outputs_total is not None: - self._num_outputs_total = min(self._num_outputs_total, limit) + self._cur_output_bundles = 0 super().__init__(self._name, [input_op]) + if self._limit <= 0: + self.inputs_done() def _limit_reached(self) -> bool: return self._consumed_rows >= self._limit + def need_more_inputs(self) -> bool: + return not self._limit_reached() + def add_input(self, refs: RefBundle, input_index: int) -> None: assert not self.completed() assert input_index == 0, input_index @@ -77,11 +80,14 @@ def slice_fn(block, metadata, num_rows) -> Tuple[Block, BlockMetadata]: self._output_metadata.append(metadata) self._consumed_rows = self._limit break + self._cur_output_bundles += 1 out_refs = RefBundle( list(zip(out_blocks, out_metadata)), owns_blocks=refs.owns_blocks, ) self._buffer.append(out_refs) + if self._limit_reached(): + self.inputs_done() def has_next(self) -> bool: return len(self._buffer) > 0 @@ -93,7 +99,10 @@ def get_stats(self) -> StatsDict: return {self._name: self._output_metadata} def num_outputs_total(self) -> Optional[int]: - if self._limit_reached(): - return self._limit + # Before inputs are completed (either because the limit is reached or + # because the inputs operators are done), we don't know how many output + # bundles we will have. + if self._inputs_complete: + return self._cur_output_bundles else: - return self._num_outputs_total + return None diff --git a/python/ray/data/_internal/execution/streaming_executor_state.py b/python/ray/data/_internal/execution/streaming_executor_state.py index 24d815bd4e7c..05d32a2c2103 100644 --- a/python/ray/data/_internal/execution/streaming_executor_state.py +++ b/python/ray/data/_internal/execution/streaming_executor_state.py @@ -121,6 +121,7 @@ def __init__(self, op: PhysicalOperator, inqueues: List[Deque[MaybeRefBundle]]): self.progress_bar = None self.num_completed_tasks = 0 self.inputs_done_called = False + self.dependents_completed_called = False def initialize_progress_bars(self, index: int, verbose_progress: bool) -> int: """Create progress bars at the given index (line offset in console). @@ -334,16 +335,31 @@ def process_completed_tasks(topology: Topology) -> None: # Call inputs_done() on ops where no more inputs are coming. for op, op_state in topology.items(): + if op_state.inputs_done_called: + continue inputs_done = all( [ dep.completed() and not topology[dep].outqueue for dep in op.input_dependencies ] ) - if inputs_done and not op_state.inputs_done_called: + if inputs_done: op.inputs_done() op_state.inputs_done_called = True + # Traverse the topology in reverse topological order. + # For each op, if all of its downstream operators don't need any more inputs, + # call all_dependents_complete() to also complete this op. + for op, op_state in reversed(list(topology.items())): + if op_state.dependents_completed_called: + continue + dependents_completed = len(op.output_dependencies) > 0 and all( + not dep.need_more_inputs() for dep in op.output_dependencies + ) + if dependents_completed: + op.all_dependents_complete() + op_state.dependents_completed_called = True + def select_operator_to_run( topology: Topology, @@ -372,7 +388,12 @@ def select_operator_to_run( ops = [] for op, state in topology.items(): under_resource_limits = _execution_allowed(op, cur_usage, limits) - if state.num_queued() > 0 and op.should_add_input() and under_resource_limits: + if ( + op.need_more_inputs() + and state.num_queued() > 0 + and op.should_add_input() + and under_resource_limits + ): ops.append(op) # Update the op in all cases to enable internal autoscaling, etc. op.notify_resource_usage(state.num_queued(), under_resource_limits) @@ -396,7 +417,11 @@ def select_operator_to_run( and all(op.num_active_work_refs() == 0 for op in topology) ): # The topology is entirely idle, so choose from all ready ops ignoring limits. - ops = [op for op, state in topology.items() if state.num_queued() > 0] + ops = [ + op + for op, state in topology.items() + if op.need_more_inputs() and state.num_queued() > 0 + ] # Nothing to run. if not ops: diff --git a/python/ray/data/_internal/logical/interfaces.py b/python/ray/data/_internal/logical/interfaces.py index 0bd800a78681..9eaa7fef49ac 100644 --- a/python/ray/data/_internal/logical/interfaces.py +++ b/python/ray/data/_internal/logical/interfaces.py @@ -13,8 +13,10 @@ class Operator: def __init__(self, name: str, input_dependencies: List["Operator"]): self._name = name self._input_dependencies = input_dependencies + self._output_dependencies = [] for x in input_dependencies: assert isinstance(x, Operator), x + x._output_dependencies.append(self) @property def name(self) -> str: @@ -28,6 +30,14 @@ def input_dependencies(self) -> List["Operator"]: ), "Operator.__init__() was not called." return self._input_dependencies + @property + def output_dependencies(self) -> List["Operator"]: + """List of operators that consume outputs from this operator.""" + assert hasattr( + self, "_output_dependencies" + ), "Operator.__init__() was not called." + return self._output_dependencies + def post_order_iter(self) -> Iterator["Operator"]: """Depth-first traversal of this operator and its input dependencies.""" for op in self.input_dependencies: diff --git a/python/ray/data/tests/test_consumption.py b/python/ray/data/tests/test_consumption.py index aaed4409b1a7..2c6b879a2caa 100644 --- a/python/ray/data/tests/test_consumption.py +++ b/python/ray/data/tests/test_consumption.py @@ -353,10 +353,8 @@ def test_limit(ray_start_regular_shared, lazy): # NOTE: We test outside the power-of-2 range in order to ensure that we're not reading # redundant files due to exponential ramp-up. -# TODO(hchen): Re-enable this test after fixing #34234. -@pytest.mark.skip("This is not implemented for the streaming executor yet.") -@pytest.mark.parametrize("limit,expected", [(10, 1), (20, 2), (30, 3), (60, 6)]) -def test_limit_no_redundant_read(ray_start_regular_shared, limit, expected): +@pytest.mark.parametrize("limit,min_read_tasks", [(10, 1), (20, 2), (30, 3), (60, 6)]) +def test_limit_no_redundant_read(ray_start_regular_shared, limit, min_read_tasks): # Test that dataset truncation eliminates redundant reads. @ray.remote class Counter: @@ -402,16 +400,21 @@ def range_(i): source = CountingRangeDatasource() + parallelism = 10 ds = ray.data.read_datasource( source, - parallelism=10, + parallelism=parallelism, n=10, ) ds2 = ds.limit(limit) # Check content. assert extract_values("id", ds2.take(limit)) == list(range(limit)) # Check number of read tasks launched. - assert ray.get(source.counter.get.remote()) == expected + # min_read_tasks is the minimum number of read tasks needed for the limit. + # We may launch more tasks than this number, in order to to maximize throughput. + # But the actual number of read tasks should be less than the parallelism. + count = ray.get(source.counter.get.remote()) + assert min_read_tasks <= count < parallelism def test_limit_no_num_row_info(ray_start_regular_shared): diff --git a/python/ray/data/tests/test_operators.py b/python/ray/data/tests/test_operators.py index 690a890c54d5..f69fc830668d 100644 --- a/python/ray/data/tests/test_operators.py +++ b/python/ray/data/tests/test_operators.py @@ -5,6 +5,7 @@ import numpy as np from typing import List, Iterable, Any import time +from unittest.mock import MagicMock import ray from ray.data.block import Block @@ -15,6 +16,7 @@ ExecutionOptions, ) from ray.data._internal.execution.operators.all_to_all_operator import AllToAllOperator +from ray.data._internal.execution.operators.limit_operator import LimitOperator from ray.data._internal.execution.operators.map_operator import ( MapOperator, _BlockRefBundler, @@ -588,6 +590,55 @@ def test_map_operator_pool_delegation(compute, expected): assert isinstance(op, expected) +def test_limit_operator(ray_start_regular_shared): + """Test basic functionalities of LimitOperator.""" + num_refs = 3 + num_rows_per_block = 3 + total_rows = num_refs * num_rows_per_block + # Test limits with different values, from 0 to more than input size. + limits = list(range(0, total_rows + 2)) + for limit in limits: + refs = make_ref_bundles([[i] * num_rows_per_block for i in range(num_refs)]) + input_op = InputDataBuffer(refs) + limit_op = LimitOperator(limit, input_op) + limit_op.inputs_done = MagicMock(wraps=limit_op.inputs_done) + if limit == 0: + # If the limit is 0, the operator should be completed immediately. + assert limit_op.completed() + assert limit_op._limit_reached() + else: + # The number of output bundles is unknown until + # inputs are completed. + assert limit_op.num_outputs_total() is None, limit + cur_rows = 0 + loop_count = 0 + while input_op.has_next() and not limit_op._limit_reached(): + loop_count += 1 + assert not limit_op.completed(), limit + assert limit_op.need_more_inputs(), limit + limit_op.add_input(input_op.get_next(), 0) + while limit_op.has_next(): + # Drain the outputs. So the limit operator + # will be completed when the limit is reached. + limit_op.get_next() + cur_rows += num_rows_per_block + if cur_rows >= limit: + assert limit_op.inputs_done.call_count == 1, limit + assert limit_op.completed(), limit + assert limit_op._limit_reached(), limit + assert not limit_op.need_more_inputs(), limit + else: + assert limit_op.inputs_done.call_count == 0, limit + assert not limit_op.completed(), limit + assert not limit_op._limit_reached(), limit + assert limit_op.need_more_inputs(), limit + limit_op.inputs_done() + # After inputs done, the number of output bundles + # should be the same as the number of `add_input`s. + assert limit_op.num_outputs_total() == loop_count, limit + assert limit_op.completed(), limit + + def _get_bundles(bundle: RefBundle): output = [] for block, _ in bundle.blocks: diff --git a/python/ray/data/tests/test_streaming_executor.py b/python/ray/data/tests/test_streaming_executor.py index 50ab6a1fac2c..2b1291858610 100644 --- a/python/ray/data/tests/test_streaming_executor.py +++ b/python/ray/data/tests/test_streaming_executor.py @@ -99,19 +99,29 @@ def test_process_completed_tasks(): o2.get_work_refs = MagicMock(return_value=[sleep_ref, done_ref]) o2.notify_work_completed = MagicMock() o2.inputs_done = MagicMock() + o1.all_dependents_complete = MagicMock() process_completed_tasks(topo) o2.notify_work_completed.assert_called_once_with(done_ref) o2.inputs_done.assert_not_called() + o1.all_dependents_complete.assert_not_called() # Test input finalization. o2.get_work_refs = MagicMock(return_value=[done_ref]) o2.notify_work_completed = MagicMock() o2.inputs_done = MagicMock() + o1.all_dependents_complete = MagicMock() o1.completed = MagicMock(return_value=True) topo[o1].outqueue.clear() process_completed_tasks(topo) o2.notify_work_completed.assert_called_once_with(done_ref) o2.inputs_done.assert_called_once() + o1.all_dependents_complete.assert_not_called() + + # Test dependents completed. + o2.need_more_inputs = MagicMock(return_value=False) + o1.all_dependents_complete = MagicMock() + process_completed_tasks(topo) + o1.all_dependents_complete.assert_called_once() def test_select_operator_to_run(): From 4f7ee91475cd0fb8a7ca740ddf273c995ce4bca0 Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Sat, 29 Apr 2023 12:39:30 -0700 Subject: [PATCH 159/424] [CI][Bisect] Fix a bug in _run_test function (#34879) * Missing default value for run_per_commit Signed-off-by: Cuong Nguyen * Need to fix reading of outcomes as well Signed-off-by: Cuong Nguyen * Add unit-tests Signed-off-by: Cuong Nguyen * Fix lints Signed-off-by: Cuong Nguyen --------- Signed-off-by: Cuong Nguyen --- release/ray_release/scripts/ray_bisect.py | 12 ++++++++---- release/ray_release/tests/test_bisect.py | 19 ++++++++++++++++++- 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/release/ray_release/scripts/ray_bisect.py b/release/ray_release/scripts/ray_bisect.py index 07b1a795664a..63e45f3eb09a 100644 --- a/release/ray_release/scripts/ray_bisect.py +++ b/release/ray_release/scripts/ray_bisect.py @@ -104,12 +104,14 @@ def _sanity_check(test: Test, passing_revision: str, failing_revision: str) -> b ) outcomes = _run_test(test, [passing_revision, failing_revision]) return ( - outcomes[passing_revision] == "passed" - and outcomes[failing_revision] != "passed" + outcomes[passing_revision][0] == "passed" + and outcomes[failing_revision][0] != "passed" ) -def _run_test(test: Test, commits: Set[str], run_per_commit: int) -> Dict[str, str]: +def _run_test( + test: Test, commits: Set[str], run_per_commit: int = 1 +) -> Dict[str, Dict[int, str]]: logger.info(f'Running test {test["name"]} on commits {commits}') for commit in commits: _trigger_test_run(test, commit, run_per_commit) @@ -140,7 +142,9 @@ def _trigger_test_run(test: Test, commit: str, run_per_commit: int) -> None: pipeline.stdout.close() -def _obtain_test_result(commits: Set[str], run_per_commit: int) -> Dict[str, str]: +def _obtain_test_result( + commits: Set[str], run_per_commit: int +) -> Dict[str, Dict[int, str]]: outcomes = {} wait = 5 total_wait = 0 diff --git a/release/ray_release/tests/test_bisect.py b/release/ray_release/tests/test_bisect.py index d067373f7c1c..5e2e89f748bb 100644 --- a/release/ray_release/tests/test_bisect.py +++ b/release/ray_release/tests/test_bisect.py @@ -1,9 +1,26 @@ from unittest import mock from typing import List, Dict -from ray_release.scripts.ray_bisect import _bisect, _obtain_test_result +from ray_release.scripts.ray_bisect import _bisect, _obtain_test_result, _sanity_check from ray_release.config import Test +def test_sanity_check(): + def _mock_run_test(test: Test, commit: List[str]) -> Dict[str, Dict[int, str]]: + return { + "passing_revision": {0: "passed"}, + "failing_revision": {0: "failed"}, + } + + with mock.patch( + "ray_release.scripts.ray_bisect._run_test", + side_effect=_mock_run_test, + ): + assert _sanity_check({}, "passing_revision", "failing_revision") + assert not _sanity_check({}, "failing_revision", "passing_revision") + assert not _sanity_check({}, "passing_revision", "passing_revision") + assert not _sanity_check({}, "failing_revision", "failing_revision") + + def test_obtain_test_result(): test_cases = [ { From 0dc57024d543bde406ff95ea5530d265ea44d374 Mon Sep 17 00:00:00 2001 From: Artur Niederfahrenhorst Date: Sat, 29 Apr 2023 23:03:54 +0200 Subject: [PATCH 160/424] [CI] Fix missing team tag on test on Buildkite Requirements (#34891) Signed-off-by: Artur Niederfahrenhorst --- release/BUILD | 3 +++ 1 file changed, 3 insertions(+) diff --git a/release/BUILD b/release/BUILD index 2ea93f31b56b..9ef8f23f3ac8 100644 --- a/release/BUILD +++ b/release/BUILD @@ -6,6 +6,9 @@ compile_pip_requirements( requirements_in = "requirements_buildkite.in", requirements_txt = "requirements_buildkite.txt", visibility = ["//visibility:private"], + tags = [ + "team:core", + ], ) test_srcs = glob(["**/*.py"]) From 793e3f960d5840824aca22356cc203f26af892f6 Mon Sep 17 00:00:00 2001 From: Jiajun Yao Date: Sun, 30 Apr 2023 08:15:12 -0700 Subject: [PATCH 161/424] [Data] fetch_local once for each object ref (#34884) After #30375, ray.wait prefetches all the object refs and raylet persists those fetch requests even after ray.wait returns. As a result, for each object ref, we only need to fetch_local once. Triggering fetch_local redundantly is unnecessary and slower. Before this PR, warm-up phase of chaos_many_actors takes 60 minutes and with this PR, it takes 20 minutes. Also slightly increase the timeout of chaos_many_actors since we are running at the borderline. Signed-off-by: Jiajun Yao --- python/ray/data/_internal/progress_bar.py | 9 ++++++++- release/release_tests.yaml | 2 +- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/python/ray/data/_internal/progress_bar.py b/python/ray/data/_internal/progress_bar.py index c6e376de9b07..ee9b721a1a3e 100644 --- a/python/ray/data/_internal/progress_bar.py +++ b/python/ray/data/_internal/progress_bar.py @@ -82,8 +82,15 @@ def fetch_until_complete(self, refs: List[ObjectRef]) -> List[Any]: ref_to_result = {} remaining = refs t = threading.current_thread() + # Triggering fetch_local redundantly for the same object is slower. + # We only need to trigger the fetch_local once for each object, + # raylet will persist these fetch requests even after ray.wait returns. + # See https://github.com/ray-project/ray/issues/30375. + fetch_local = True while remaining: - done, remaining = ray.wait(remaining, fetch_local=True, timeout=0.1) + done, remaining = ray.wait(remaining, fetch_local=fetch_local, timeout=0.1) + if fetch_local: + fetch_local = False for ref, result in zip(done, ray.get(done)): ref_to_result[ref] = result self.update(len(done)) diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 5d04a91f090d..2d5a7363ecdc 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -5714,7 +5714,7 @@ cluster_compute: chaos_test/compute_template.yaml run: - timeout: 3600 + timeout: 4200 wait_for_nodes: num_nodes: 10 prepare: python setup_chaos.py --no-start From 517443b4bce0ec0aaabe76d46dcbf4e9a89c5fee Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Mon, 1 May 2023 08:14:01 +0100 Subject: [PATCH 162/424] [tune] Release test for durable multifile checkpoints (#34860) We are currently only testing single-file checkpoints. However, there have been performance regressions with multi-file checkpoints due to unthreaded uploads in pyarrow. These have since been resolved, but we should collect metrics to catch future regressions. When comparing against a [version where the improvements have been reverted](https://github.com/ray-project/ray/pull/34861), we observe significant improvements in runtime: ``` 2023-04-28 06:52:38,151 INFO tune.py:1011 -- Total run time: 362.95 seconds (337.86 seconds for the tuning loop). ``` vs. ``` 2023-04-28 06:54:57,166 INFO tune.py:1011 -- Total run time: 472.55 seconds (436.54 seconds for the tuning loop). ``` Signed-off-by: Kai Fricke --- python/ray/tune/utils/release_test_util.py | 44 ++++------------ release/release_tests.yaml | 34 +++++++++++++ .../test_durable_multifile_checkpoints.py | 51 +++++++++++++++++++ 3 files changed, 94 insertions(+), 35 deletions(-) create mode 100644 release/tune_tests/scalability_tests/workloads/test_durable_multifile_checkpoints.py diff --git a/python/ray/tune/utils/release_test_util.py b/python/ray/tune/utils/release_test_util.py index 5c6fdf943ee3..5355b3a1b1d8 100644 --- a/python/ray/tune/utils/release_test_util.py +++ b/python/ray/tune/utils/release_test_util.py @@ -79,6 +79,7 @@ def function_trainable(config): checkpoint_iters = config["checkpoint_iters"] checkpoint_size_b = config["checkpoint_size_b"] checkpoint_num_items = checkpoint_size_b // 8 # np.float64 + checkpoint_num_files = config["checkpoint_num_files"] for i in range(num_iters): if ( @@ -87,10 +88,11 @@ def function_trainable(config): and i % checkpoint_iters == 0 ): with tune.checkpoint_dir(step=i) as dir: - checkpoint_file = os.path.join(dir, "bogus.ckpt") - checkpoint_data = np.random.uniform(0, 1, size=checkpoint_num_items) - with open(checkpoint_file, "wb") as fp: - pickle.dump(checkpoint_data, fp) + for i in range(checkpoint_num_files): + checkpoint_file = os.path.join(dir, f"bogus_{i}.ckpt") + checkpoint_data = np.random.uniform(0, 1, size=checkpoint_num_items) + with open(checkpoint_file, "wb") as fp: + pickle.dump(checkpoint_data, fp) tune.report(score=i + score) time.sleep(sleep_time) @@ -104,6 +106,7 @@ def timed_tune_run( max_runtime: int = 300, checkpoint_freq_s: int = -1, checkpoint_size_b: int = 0, + checkpoint_num_files: int = 1, **tune_kwargs, ): durable = ( @@ -127,6 +130,7 @@ def timed_tune_run( "sleep_time": sleep_time, "checkpoint_iters": checkpoint_iters, "checkpoint_size_b": checkpoint_size_b, + "checkpoint_num_files": checkpoint_num_files, } print(f"Starting benchmark with config: {config}") @@ -136,38 +140,8 @@ def timed_tune_run( _train = function_trainable - aws_key_id = os.getenv("AWS_ACCESS_KEY_ID", "") - aws_secret = os.getenv("AWS_SECRET_ACCESS_KEY", "") - aws_session = os.getenv("AWS_SESSION_TOKEN", "") - if durable: - - class AwsDurableTrainable(TestDurableTrainable): - AWS_ACCESS_KEY_ID = aws_key_id - AWS_SECRET_ACCESS_KEY = aws_secret - AWS_SESSION_TOKEN = aws_session - - def setup_env(self): - if self.AWS_ACCESS_KEY_ID: - os.environ["AWS_ACCESS_KEY_ID"] = self.AWS_ACCESS_KEY_ID - if self.AWS_SECRET_ACCESS_KEY: - os.environ["AWS_SECRET_ACCESS_KEY"] = self.AWS_SECRET_ACCESS_KEY - if self.AWS_SESSION_TOKEN: - os.environ["AWS_SESSION_TOKEN"] = self.AWS_SESSION_TOKEN - - if all( - os.getenv(k, "") - for k in [ - "AWS_ACCESS_KEY_ID", - "AWS_SECRET_ACCESS_KEY", - "AWS_SESSION_TOKEN", - ] - ): - print("Worker: AWS secrets found in env.") - else: - print("Worker: No AWS secrets found in env!") - - _train = AwsDurableTrainable + _train = TestDurableTrainable run_kwargs["checkpoint_freq"] = checkpoint_iters start_time = time.monotonic() diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 2d5a7363ecdc..92b5681f7441 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -1923,6 +1923,40 @@ alert: tune_tests + +- name: tune_scalability_durable_multifile_checkpoints + group: Tune scalability tests + working_dir: tune_tests/scalability_tests + + frequency: nightly + team: ml + + cluster: + cluster_env: app_config.yaml + cluster_compute: tpl_16x2.yaml + + run: + timeout: 900 + script: python workloads/test_durable_multifile_checkpoints.py --bucket s3://tune-cloud-tests/scalability_durable_multifile_checkpoints + wait_for_nodes: + num_nodes: 16 + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + run: + timeout: 900 + script: python workloads/test_durable_multifile_checkpoints.py --bucket gs://tune-cloud-tests/scalability_durable_multifile_checkpoints + wait_for_nodes: + num_nodes: 16 + cluster: + cluster_env: app_config.yaml + cluster_compute: tpl_gce_16x2.yaml + + alert: tune_tests + - name: tune_scalability_long_running_large_checkpoints group: Tune scalability tests working_dir: tune_tests/scalability_tests diff --git a/release/tune_tests/scalability_tests/workloads/test_durable_multifile_checkpoints.py b/release/tune_tests/scalability_tests/workloads/test_durable_multifile_checkpoints.py new file mode 100644 index 000000000000..b3d4ba6bf683 --- /dev/null +++ b/release/tune_tests/scalability_tests/workloads/test_durable_multifile_checkpoints.py @@ -0,0 +1,51 @@ +"""Durable trainable with multi-file checkpoints (16 trials, checkpoint to cloud) + +In this run, we will start 16 trials on a cluster. The trials create 16 files a +1 MB checkpoints every 12 seconds and should only keep 2 checkpoints. This test +ensures that durable checkpoints don't slow down experiment progress too much. + +Cluster: cluster_16x2.yaml + +Test owner: krfricke + +Acceptance criteria: Should run faster than 750 seconds. + +Theoretical minimum time: 300 seconds +""" +import argparse + +import ray + +from ray.tune.utils.release_test_util import timed_tune_run + + +def main(bucket): + ray.init(address="auto") + + num_samples = 16 + results_per_second = 5 / 60 # 5 results per minute = 1 every 12 seconds + trial_length_s = 300 + + max_runtime = 750 + + timed_tune_run( + name="durable multi-file checkpoints", + num_samples=num_samples, + results_per_second=results_per_second, + trial_length_s=trial_length_s, + max_runtime=max_runtime, + checkpoint_freq_s=12, # Once every 12 seconds (once per result) + checkpoint_size_b=int(1 * 1000**2), # 1 MB + checkpoint_num_files=16, + keep_checkpoints_num=2, + resources_per_trial={"cpu": 2}, + storage_path=bucket, + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--bucket", type=str, help="Bucket name") + args, _ = parser.parse_known_args() + + main(args.bucket or "ray-tune-scalability-test") From 8b0bd084be5d3f8ebe7868a08eb9720cae76f961 Mon Sep 17 00:00:00 2001 From: Sven Mika Date: Mon, 1 May 2023 12:58:37 +0200 Subject: [PATCH 163/424] [RLlib] APPO+new-stack (Atari benchmark) - Preparatory PR 02. (#34777) --- rllib/BUILD | 2 +- rllib/algorithms/algorithm_config.py | 58 ++++--- rllib/algorithms/appo/appo.py | 40 ++--- rllib/algorithms/appo/appo_learner.py | 103 ++++++++++++ .../appo/tests/tf/test_appo_learner.py | 7 +- rllib/algorithms/appo/tf/appo_tf_learner.py | 157 +++++------------- rllib/algorithms/appo/tf/appo_tf_rl_module.py | 1 + rllib/algorithms/impala/impala.py | 61 ++++--- .../algorithms/impala/impala_base_learner.py | 111 ------------- rllib/algorithms/impala/impala_learner.py | 78 +++++++++ .../algorithms/impala/tf/impala_tf_learner.py | 46 ++--- .../impala/torch/impala_torch_learner.py | 46 ++--- rllib/algorithms/ppo/ppo.py | 32 ++-- .../{ppo_base_learner.py => ppo_learner.py} | 64 ++++--- rllib/algorithms/ppo/ppo_learner_config.py | 21 --- rllib/algorithms/ppo/tf/ppo_tf_learner.py | 24 +-- rllib/algorithms/ppo/tf/ppo_tf_rl_module.py | 1 + .../algorithms/ppo/torch/ppo_torch_learner.py | 39 +++-- rllib/core/learner/learner.py | 73 ++++---- rllib/core/learner/learner_group.py | 31 ++-- rllib/core/learner/learner_group_config.py | 40 +++-- rllib/core/learner/tests/test_learner.py | 18 +- rllib/core/learner/tf/tf_learner.py | 25 +-- rllib/core/learner/torch/torch_learner.py | 13 +- rllib/core/models/specs/specs_base.py | 4 +- rllib/core/testing/utils.py | 10 +- rllib/tests/run_regression_tests.py | 24 ++- rllib/utils/framework.py | 2 + 28 files changed, 615 insertions(+), 516 deletions(-) create mode 100644 rllib/algorithms/appo/appo_learner.py delete mode 100644 rllib/algorithms/impala/impala_base_learner.py create mode 100644 rllib/algorithms/impala/impala_learner.py rename rllib/algorithms/ppo/{ppo_base_learner.py => ppo_learner.py} (55%) delete mode 100644 rllib/algorithms/ppo/ppo_learner_config.py diff --git a/rllib/BUILD b/rllib/BUILD index d4ba1d3a7774..78d38e9b65f3 100644 --- a/rllib/BUILD +++ b/rllib/BUILD @@ -603,7 +603,7 @@ py_test( py_test( name = "learning_tests_pendulum_ppo_with_rl_module", main = "tests/run_regression_tests.py", - tags = ["team:rllib", "exclusive", "learning_tests", "learning_tests_pendulum", "learning_tests_continuous", "torch_only"], + tags = ["team:rllib", "exclusive", "learning_tests", "learning_tests_pendulum", "learning_tests_continuous", "no_tf_static_graph"], size = "large", # bazel may complain about it being too long sometimes - large is on purpose as some frameworks take longer srcs = ["tests/run_regression_tests.py"], data = ["tuned_examples/ppo/pendulum-ppo-with-rl-module.yaml"], diff --git a/rllib/algorithms/algorithm_config.py b/rllib/algorithms/algorithm_config.py index d2c4c19e2206..6d20f874cdef 100644 --- a/rllib/algorithms/algorithm_config.py +++ b/rllib/algorithms/algorithm_config.py @@ -1,5 +1,4 @@ import copy -import dataclasses import logging import math import os @@ -18,7 +17,7 @@ import ray from ray.rllib.algorithms.callbacks import DefaultCallbacks -from ray.rllib.core.learner.learner import LearnerHPs +from ray.rllib.core.learner.learner import LearnerHyperparameters from ray.rllib.core.learner.learner_group_config import ( LearnerGroupConfig, ModuleSpec, @@ -322,12 +321,8 @@ def __init__(self, algo_class=None): self.model = copy.deepcopy(MODEL_DEFAULTS) self.optimizer = {} self.max_requests_in_flight_per_sampler_worker = 2 - self.learner_class = None + self._learner_class = None self._enable_learner_api = False - # experimental: this will contain the hyper-parameters that are passed to the - # Learner, for computing loss, etc. New algorithms have to set this to their - # own default. .training() will modify the fields of this object. - self._learner_hps = LearnerHPs() # `self.callbacks()` self.callbacks_class = DefaultCallbacks @@ -469,10 +464,6 @@ def __init__(self, algo_class=None): self.soft_horizon = DEPRECATED_VALUE self.no_done_at_end = DEPRECATED_VALUE - @property - def learner_hps(self) -> LearnerHPs: - return self._learner_hps - def to_dict(self) -> AlgorithmConfigDict: """Converts all settings into a legacy config dict for backward compatibility. @@ -1039,11 +1030,6 @@ def validate(self) -> None: "(i.e. num_learner_workers = 0)" ) - # Resolve learner class. - if self._enable_learner_api and self.learner_class is None: - learner_class_path = self.get_default_learner_class() - self.learner_class = deserialize_type(learner_class_path) - def build( self, env: Optional[Union[str, EnvType]] = None, @@ -1706,7 +1692,7 @@ def training( if _enable_learner_api is not NotProvided: self._enable_learner_api = _enable_learner_api if learner_class is not NotProvided: - self.learner_class = learner_class + self._learner_class = learner_class return self @@ -2544,6 +2530,20 @@ def experimental( return self + @property + def learner_class(self) -> Type["Learner"]: + """Returns the Learner sub-class to use by this Algorithm. + + Either + a) User sets a specific learner class via calling `.training(learner_class=...)` + b) User leaves learner class unset (None) and the AlgorithmConfig itself + figures out the actual learner class by calling its own + `.get_default_learner_class()` method. + """ + return self._learner_class or self.get_default_learner_class() + + # TODO: Make rollout_fragment_length as read-only property and replace the current + # self.rollout_fragment_length a private variable. def get_rollout_fragment_length(self, worker_index: int = 0) -> int: """Automatically infers a proper rollout_fragment_length setting if "auto". @@ -2579,6 +2579,8 @@ def get_rollout_fragment_length(self, worker_index: int = 0) -> int: else: return self.rollout_fragment_length + # TODO: Make evaluation_config as read-only property and replace the current + # self.evaluation_config a private variable. def get_evaluation_config_object( self, ) -> Optional["AlgorithmConfig"]: @@ -2872,6 +2874,8 @@ def is_policy_to_train(pid, batch=None): return policies, is_policy_to_train + # TODO: Move this to those algorithms that really need this, which is currently + # only A2C and PG. def validate_train_batch_size_vs_rollout_fragment_length(self) -> None: """Detects mismatches for `train_batch_size` vs `rollout_fragment_length`. @@ -3130,7 +3134,7 @@ def get_learner_group_config(self, module_spec: ModuleSpec) -> LearnerGroupConfi "grad_clip": self.grad_clip, "grad_clip_by": self.grad_clip_by, }, - learner_hps=self.learner_hps, + learner_hyperparameters=self.get_learner_hyperparameters(), ) .resources( num_learner_workers=self.num_learner_workers, @@ -3143,6 +3147,20 @@ def get_learner_group_config(self, module_spec: ModuleSpec) -> LearnerGroupConfi return config + def get_learner_hyperparameters(self) -> LearnerHyperparameters: + """Returns a new LearnerHyperparameters instance for the respective Learner. + + The LearnerHyperparameters is a dataclass containing only those config settings + from AlgorithmConfig that are used by the algorithm's specific Learner + sub-class. They allow distributing only those settings relevant for learning + across a set of learner workers (instead of having to distribute the entire + AlgorithmConfig object). + + Note that LearnerHyperparameters should always be derived directly from a + AlgorithmConfig object's own settings and considered frozen/read-only. + """ + return LearnerHyperparameters() + def __setattr__(self, key, value): """Gatekeeper in case we are in frozen state and need to error.""" @@ -3247,10 +3265,6 @@ def _serialize_dict(config): config["model"]["custom_model"] ) - # Serialize dataclasses. - if isinstance(config.get("_learner_hps"), LearnerHPs): - config["_learner_hps"] = dataclasses.asdict(config["_learner_hps"]) - # List'ify `policies`, iff a set or tuple (these types are not JSON'able). ma_config = config.get("multiagent") if ma_config is not None: diff --git a/rllib/algorithms/appo/appo.py b/rllib/algorithms/appo/appo.py index d616b8656b01..44cb0733563e 100644 --- a/rllib/algorithms/appo/appo.py +++ b/rllib/algorithms/appo/appo.py @@ -9,12 +9,16 @@ Detailed documentation: https://docs.ray.io/en/master/rllib-algorithms.html#appo """ +import dataclasses from typing import Optional, Type import logging from ray.rllib.algorithms.algorithm_config import AlgorithmConfig, NotProvided +from ray.rllib.algorithms.appo.appo_learner import ( + AppoHyperparameters, + LEARNER_RESULTS_KL_KEY, +) from ray.rllib.algorithms.impala.impala import Impala, ImpalaConfig -from ray.rllib.algorithms.appo.tf.appo_tf_learner import AppoHPs, LEARNER_RESULTS_KL_KEY from ray.rllib.algorithms.ppo.ppo import UpdateKL from ray.rllib.core.rl_module.rl_module import SingleAgentRLModuleSpec from ray.rllib.policy.policy import Policy @@ -77,7 +81,6 @@ def __init__(self, algo_class=None): # __sphinx_doc_begin__ # APPO specific settings: - self._learner_hps = AppoHPs() self.vtrace = True self.use_critic = True self.use_gae = True @@ -195,24 +198,20 @@ def training( self.lambda_ = lambda_ if clip_param is not NotProvided: self.clip_param = clip_param - self._learner_hps.clip_param = clip_param if use_kl_loss is not NotProvided: self.use_kl_loss = use_kl_loss if kl_coeff is not NotProvided: self.kl_coeff = kl_coeff - self._learner_hps.kl_coeff = kl_coeff if kl_target is not NotProvided: self.kl_target = kl_target - self._learner_hps.kl_target = kl_target if tau is not NotProvided: self.tau = tau - self._learner_hps.tau = tau if target_update_frequency is not NotProvided: self.target_update_frequency = target_update_frequency return self - @override(AlgorithmConfig) + @override(ImpalaConfig) def get_default_learner_class(self): if self.framework_str == "tf2": from ray.rllib.algorithms.appo.tf.appo_tf_learner import APPOTfLearner @@ -221,7 +220,7 @@ def get_default_learner_class(self): else: raise ValueError(f"The framework {self.framework_str} is not supported.") - @override(AlgorithmConfig) + @override(ImpalaConfig) def get_default_rl_module_spec(self) -> SingleAgentRLModuleSpec: if self.framework_str == "tf2": from ray.rllib.algorithms.appo.appo_catalog import APPOCatalog @@ -234,20 +233,23 @@ def get_default_rl_module_spec(self) -> SingleAgentRLModuleSpec: raise ValueError(f"The framework {self.framework_str} is not supported.") @override(ImpalaConfig) - def validate(self) -> None: - super().validate() - self._learner_hps.tau = self.tau - self._learner_hps.kl_target = self.kl_target - self._learner_hps.kl_coeff = self.kl_coeff - self._learner_hps.clip_param = self.clip_param + def get_learner_hyperparameters(self) -> AppoHyperparameters: + base_hps = super().get_learner_hyperparameters() + return AppoHyperparameters( + use_kl_loss=self.use_kl_loss, + kl_target=self.kl_target, + kl_coeff=self.kl_coeff, + clip_param=self.clip_param, + tau=self.tau, + **dataclasses.asdict(base_hps), + ) # Still used by one of the old checkpoints in tests. # Keep a shim version of this around. class UpdateTargetAndKL: def __init__(self, workers, config): - self.workers = workers - self.config = config + pass class APPO(Impala): @@ -277,9 +279,8 @@ def setup(self, config: AlgorithmConfig): def after_train_step(self, train_results: ResultDict) -> None: """Updates the target network and the KL coefficient for the APPO-loss. - This method is called from within the `training_iteration` method after each - train update. - + This method is called from within the `training_step` method after each train + update. The target network update frequency is calculated automatically by the product of `num_sgd_iter` setting (usually 1 for APPO) and `minibatch_buffer_size`. @@ -407,7 +408,6 @@ def get_default_policy_class( return APPOTF1Policy else: if config._enable_rl_module_api: - # TODO(avnishn): This policy class doesn't work just yet from ray.rllib.algorithms.appo.tf.appo_tf_policy_rlm import ( APPOTfPolicyWithRLModule, ) diff --git a/rllib/algorithms/appo/appo_learner.py b/rllib/algorithms/appo/appo_learner.py new file mode 100644 index 000000000000..c92ac50ad687 --- /dev/null +++ b/rllib/algorithms/appo/appo_learner.py @@ -0,0 +1,103 @@ +import abc +from collections import defaultdict +from dataclasses import dataclass +from typing import Any, Dict, Mapping + +import numpy as np + +from ray.rllib.algorithms.impala.impala_learner import ( + ImpalaLearner, + ImpalaHyperparameters, +) +from ray.rllib.core.rl_module.marl_module import ModuleID +from ray.rllib.utils.annotations import override +from ray.rllib.utils.framework import get_variable + + +LEARNER_RESULTS_KL_KEY = "mean_kl_loss" +LEARNER_RESULTS_CURR_KL_COEFF_KEY = "curr_kl_coeff" +OLD_ACTION_DIST_KEY = "old_action_dist" +OLD_ACTION_DIST_LOGITS_KEY = "old_action_dist_logits" + + +@dataclass +class AppoHyperparameters(ImpalaHyperparameters): + """Hyperparameters for the APPOLearner sub-classes (framework specific). + + These should never be set directly by the user. Instead, use the APPOConfig + class to configure your algorithm. + See `ray.rllib.algorithms.appo.appo::APPOConfig::training()` for more details on the + individual properties. + """ + + use_kl_loss: bool = None + kl_coeff: float = None + kl_target: float = None + clip_param: float = None + tau: float = None + + +class AppoLearner(ImpalaLearner): + """Adds KL coeff updates via `additional_updates_per_module()` to Impala logic. + + Framework-specific sub-classes must override `_update_module_target_networks()` + and `_update_module_kl_coeff()` + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + # Create framework-specific variables (simple python vars for torch). + self.kl_coeffs = defaultdict( + lambda: get_variable( + self._hps.kl_coeff, + framework=self.framework, + trainable=False, + dtype=np.float32, + ) + ) + + @override(ImpalaLearner) + def remove_module(self, module_id: str): + super().remove_module(module_id) + self.kl_coeffs.pop(module_id) + + @override(ImpalaLearner) + def additional_update_per_module( + self, module_id: ModuleID, sampled_kls: Dict[ModuleID, float], **kwargs + ) -> Mapping[str, Any]: + """Updates the target networks and KL loss coefficients (per module). + + Args: + module_id: + """ + self._update_module_target_networks(module_id) + if self._hps.use_kl_loss: + self._update_module_kl_coeff(module_id, sampled_kls) + return {} + + @abc.abstractmethod + def _update_module_target_networks(self, module_id: ModuleID) -> None: + """Update the target policy of each module with the current policy. + + Do that update via polyak averaging. + + Args: + module_id: The module ID, whose target network(s) need to be updated. + """ + + @abc.abstractmethod + def _update_module_kl_coeff( + self, module_id: ModuleID, sampled_kls: Dict[ModuleID, float] + ) -> None: + """Dynamically update the KL loss coefficients of each module with. + + The update is completed using the mean KL divergence between the action + distributions current policy and old policy of each module. That action + distribution is computed during the most recent update/call to `compute_loss`. + + Args: + module_id: The module whose KL loss coefficient to update. + sampled_kls: The KL divergence between the action distributions of + the current policy and old policy of each module. + + """ diff --git a/rllib/algorithms/appo/tests/tf/test_appo_learner.py b/rllib/algorithms/appo/tests/tf/test_appo_learner.py index b0b2989cd8ba..8c3978fc3055 100644 --- a/rllib/algorithms/appo/tests/tf/test_appo_learner.py +++ b/rllib/algorithms/appo/tests/tf/test_appo_learner.py @@ -114,6 +114,7 @@ def test_kl_coeff_changes(self): config = ( appo.APPOConfig() .environment("CartPole-v1") + .framework(eager_tracing=True) .rollouts( num_rollout_workers=0, rollout_fragment_length=frag_length, @@ -134,13 +135,13 @@ def test_kl_coeff_changes(self): ) .exploration(exploration_config={}) ) - for _ in framework_iterator(config, "tf2", with_eager_tracing=True): + for _ in framework_iterator(config, frameworks="tf2"): algo = config.build() # Call train while results aren't returned because this is # a asynchronous trainer and results are returned asynchronously. - while 1: + while True: results = algo.train() - if results and "info" in results and LEARNER_INFO in results["info"]: + if results.get("info", {}).get(LEARNER_INFO, {}).get(DEFAULT_POLICY_ID): break curr_kl_coeff = results["info"][LEARNER_INFO][DEFAULT_POLICY_ID][ LEARNER_STATS_KEY diff --git a/rllib/algorithms/appo/tf/appo_tf_learner.py b/rllib/algorithms/appo/tf/appo_tf_learner.py index 0bd99214255b..1df6505ef182 100644 --- a/rllib/algorithms/appo/tf/appo_tf_learner.py +++ b/rllib/algorithms/appo/tf/appo_tf_learner.py @@ -1,15 +1,16 @@ -from collections import defaultdict -from dataclasses import dataclass -from typing import Any, Dict, Mapping +from typing import Dict, Mapping from ray.rllib.policy.sample_batch import SampleBatch -from ray.rllib.algorithms.appo.tf.appo_tf_rl_module import OLD_ACTION_DIST_KEY +from ray.rllib.algorithms.appo.appo_learner import ( + AppoLearner, + LEARNER_RESULTS_CURR_KL_COEFF_KEY, + LEARNER_RESULTS_KL_KEY, + OLD_ACTION_DIST_KEY, +) from ray.rllib.algorithms.impala.tf.vtrace_tf_v2 import make_time_major, vtrace_tf2 -from ray.rllib.algorithms.impala.impala_base_learner import ImpalaHPs -from ray.rllib.algorithms.impala.tf.impala_tf_learner import ImpalaTfLearner from ray.rllib.core.learner.learner import POLICY_LOSS_KEY, VF_LOSS_KEY, ENTROPY_KEY -from ray.rllib.core.rl_module.marl_module import ModuleID from ray.rllib.core.learner.tf.tf_learner import TfLearner +from ray.rllib.core.rl_module.marl_module import ModuleID from ray.rllib.utils.annotations import override from ray.rllib.utils.framework import try_import_tf from ray.rllib.utils.typing import TensorType @@ -17,68 +18,12 @@ _, tf, _ = try_import_tf() -LEARNER_RESULTS_KL_KEY = "mean_kl_loss" -LEARNER_RESULTS_CURR_KL_COEFF_KEY = "curr_kl_coeff" - - -@dataclass -class AppoHPs(ImpalaHPs): - """Hyper-parameters for APPO. - - Attributes: - rollout_frag_or_episode_len: The length of a rollout fragment or episode. - Used when making SampleBatches time major for computing loss. - recurrent_seq_len: The length of a recurrent sequence. Used when making - SampleBatches time major for computing loss. - discount_factor: The discount factor to use for computing returns. - vtrace_clip_rho_threshold: The rho threshold to use for clipping the - importance weights. - vtrace_clip_pg_rho_threshold: The rho threshold to use for clipping the - importance weights when computing the policy_gradient loss. - vtrace_drop_last_ts: Whether to drop the last timestep when computing the loss. - This is useful for stabilizing the loss. - NOTE: This shouldn't be True when training on environments where the rewards - come at the end of the episode. - vf_loss_coeff: The amount to weight the value function loss by when computing - the total loss. - entropy_coeff: The amount to weight the average entropy of the actions in the - SampleBatch towards the total_loss for module updates. The higher this - coefficient, the more that the policy network will be encouraged to output - distributions with higher entropy/std deviation, which will encourage - greater exploration. - kl_target: The target kl divergence loss coefficient to use for the KL loss. - kl_coeff: The coefficient to weight the KL divergence between the old policy - and the target policy towards the total loss for module updates. - tau: The factor by which to update the target policy network towards - the current policy network. Can range between 0 and 1. - e.g. updated_param = tau * current_param + (1 - tau) * target_param - - """ - - kl_target: float = 0.01 - kl_coeff: float = 0.1 - clip_param = 0.2 - tau = 1.0 - - -class APPOTfLearner(ImpalaTfLearner): - """Implements APPO loss / update logic on top of ImpalaTfLearner. - - This class implements the APPO loss under `_compute_loss_per_module()` and - implements the target network and KL coefficient updates under - `additional_updates_per_module()` - """ +class APPOTfLearner(TfLearner, AppoLearner): + """Implements APPO loss / update logic on top of ImpalaTfLearner.""" def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.kl_target = self._hps.kl_target - self.clip_param = self._hps.clip_param - # TODO: (avnishn) Make creating the kl coeff a utility function when we add - # torch APPO as well. - self.kl_coeffs = defaultdict( - lambda: tf.Variable(self._hps.kl_coeff, trainable=False, dtype=tf.float32) - ) - self.tau = self._hps.tau + TfLearner.__init__(self, *args, **kwargs) + AppoLearner.__init__(self, *args, **kwargs) @override(TfLearner) def compute_loss_per_module( @@ -87,7 +32,6 @@ def compute_loss_per_module( values = fwd_out[SampleBatch.VF_PREDS] target_policy_dist = fwd_out[SampleBatch.ACTION_DIST] old_target_policy_dist = fwd_out[OLD_ACTION_DIST_KEY] - old_target_policy_actions_logp = old_target_policy_dist.logp( batch[SampleBatch.ACTIONS] ) @@ -96,34 +40,34 @@ def compute_loss_per_module( behaviour_actions_logp_time_major = make_time_major( behaviour_actions_logp, - trajectory_len=self.rollout_frag_or_episode_len, - recurrent_seq_len=self.recurrent_seq_len, - drop_last=self.vtrace_drop_last_ts, + trajectory_len=self.hps.rollout_frag_or_episode_len, + recurrent_seq_len=self.hps.recurrent_seq_len, + drop_last=self.hps.vtrace_drop_last_ts, ) target_actions_logp_time_major = make_time_major( target_actions_logp, - trajectory_len=self.rollout_frag_or_episode_len, - recurrent_seq_len=self.recurrent_seq_len, - drop_last=self.vtrace_drop_last_ts, + trajectory_len=self.hps.rollout_frag_or_episode_len, + recurrent_seq_len=self.hps.recurrent_seq_len, + drop_last=self.hps.vtrace_drop_last_ts, ) old_actions_logp_time_major = make_time_major( old_target_policy_actions_logp, - trajectory_len=self.rollout_frag_or_episode_len, - recurrent_seq_len=self.recurrent_seq_len, - drop_last=self.vtrace_drop_last_ts, + trajectory_len=self.hps.rollout_frag_or_episode_len, + recurrent_seq_len=self.hps.recurrent_seq_len, + drop_last=self.hps.vtrace_drop_last_ts, ) values_time_major = make_time_major( values, - trajectory_len=self.rollout_frag_or_episode_len, - recurrent_seq_len=self.recurrent_seq_len, - drop_last=self.vtrace_drop_last_ts, + trajectory_len=self.hps.rollout_frag_or_episode_len, + recurrent_seq_len=self.hps.recurrent_seq_len, + drop_last=self.hps.vtrace_drop_last_ts, ) bootstrap_value = values_time_major[-1] rewards_time_major = make_time_major( batch[SampleBatch.REWARDS], - trajectory_len=self.rollout_frag_or_episode_len, - recurrent_seq_len=self.recurrent_seq_len, - drop_last=self.vtrace_drop_last_ts, + trajectory_len=self.hps.rollout_frag_or_episode_len, + recurrent_seq_len=self.hps.recurrent_seq_len, + drop_last=self.hps.vtrace_drop_last_ts, ) # the discount factor that is used should be gamma except for timesteps where @@ -133,21 +77,21 @@ def compute_loss_per_module( - tf.cast( make_time_major( batch[SampleBatch.TERMINATEDS], - trajectory_len=self.rollout_frag_or_episode_len, - recurrent_seq_len=self.recurrent_seq_len, - drop_last=self.vtrace_drop_last_ts, + trajectory_len=self.hps.rollout_frag_or_episode_len, + recurrent_seq_len=self.hps.recurrent_seq_len, + drop_last=self.hps.vtrace_drop_last_ts, ), dtype=tf.float32, ) - ) * self.discount_factor + ) * self.hps.discount_factor vtrace_adjusted_target_values, pg_advantages = vtrace_tf2( target_action_log_probs=old_actions_logp_time_major, behaviour_action_log_probs=behaviour_actions_logp_time_major, rewards=rewards_time_major, values=values_time_major, bootstrap_value=bootstrap_value, - clip_pg_rho_threshold=self.vtrace_clip_pg_rho_threshold, - clip_rho_threshold=self.vtrace_clip_rho_threshold, + clip_pg_rho_threshold=self.hps.vtrace_clip_pg_rho_threshold, + clip_rho_threshold=self.hps.vtrace_clip_rho_threshold, discounts=discounts_time_major, ) @@ -167,7 +111,9 @@ def compute_loss_per_module( pg_advantages * logp_ratio, ( pg_advantages - * tf.clip_by_value(logp_ratio, 1 - self.clip_param, 1 + self.clip_param) + * tf.clip_by_value( + logp_ratio, 1 - self.hps.clip_param, 1 + self.hps.clip_param + ) ), ) @@ -185,8 +131,8 @@ def compute_loss_per_module( # The summed weighted loss. total_loss = ( mean_pi_loss - + (mean_vf_loss * self.vf_loss_coeff) - + (mean_entropy_loss * self.entropy_coeff) + + (mean_vf_loss * self.hps.vf_loss_coeff) + + (mean_entropy_loss * self.hps.entropy_coeff) + (mean_kl_loss * self.kl_coeffs[module_id]) ) @@ -199,11 +145,7 @@ def compute_loss_per_module( LEARNER_RESULTS_CURR_KL_COEFF_KEY: self.kl_coeffs[module_id], } - @override(ImpalaTfLearner) - def remove_module(self, module_id: str): - super().remove_module(module_id) - self.kl_coeffs.pop(module_id) - + @override(AppoLearner) def _update_module_target_networks(self, module_id: ModuleID): """Update the target policy of each module with the current policy. @@ -220,7 +162,9 @@ def _update_module_target_networks(self, module_id: ModuleID): for old_var, current_var in zip( target_network.variables, current_network.variables ): - updated_var = self.tau * current_var + (1.0 - self.tau) * old_var + updated_var = ( + self.hps.tau * current_var + (1.0 - self.hps.tau) * old_var + ) old_var.assign(updated_var) def _update_module_kl_coeff( @@ -242,21 +186,8 @@ def _update_module_kl_coeff( sampled_kl = sampled_kls[module_id] # Update the current KL value based on the recently measured value. # Increase. - if sampled_kl > 2.0 * self.kl_target: + if sampled_kl > 2.0 * self.hps.kl_target: self.kl_coeffs[module_id].assign(self.kl_coeffs[module_id] * 1.5) # Decrease. - elif sampled_kl < 0.5 * self.kl_target: + elif sampled_kl < 0.5 * self.hps.kl_target: self.kl_coeffs[module_id].assign(self.kl_coeffs[module_id] * 0.5) - - @override(ImpalaTfLearner) - def additional_update_per_module( - self, module_id: ModuleID, sampled_kls: Dict[ModuleID, float], **kwargs - ) -> Mapping[str, Any]: - """Update the target networks and KL loss coefficients of each module. - - Args: - - """ - self._update_module_target_networks(module_id) - self._update_module_kl_coeff(module_id, sampled_kls) - return {} diff --git a/rllib/algorithms/appo/tf/appo_tf_rl_module.py b/rllib/algorithms/appo/tf/appo_tf_rl_module.py index 9972291246da..44f41581759b 100644 --- a/rllib/algorithms/appo/tf/appo_tf_rl_module.py +++ b/rllib/algorithms/appo/tf/appo_tf_rl_module.py @@ -46,6 +46,7 @@ def output_specs_train(self) -> List[str]: OLD_ACTION_DIST_KEY, ] + @override(PPOTfRLModule) def _forward_train(self, batch: NestedDict): outs = super()._forward_train(batch) old_pi_inputs_encoded = self.old_encoder(batch)[ENCODER_OUT][ACTOR] diff --git a/rllib/algorithms/impala/impala.py b/rllib/algorithms/impala/impala.py index 1441593482b2..93f0990e74bd 100644 --- a/rllib/algorithms/impala/impala.py +++ b/rllib/algorithms/impala/impala.py @@ -1,4 +1,5 @@ import copy +import dataclasses from functools import partial import logging import platform @@ -11,8 +12,8 @@ from ray.rllib import SampleBatch from ray.rllib.algorithms.algorithm import Algorithm from ray.rllib.algorithms.algorithm_config import AlgorithmConfig, NotProvided -from ray.rllib.algorithms.impala.impala_base_learner import ( - ImpalaHPs, +from ray.rllib.algorithms.impala.impala_learner import ( + ImpalaHyperparameters, _reduce_impala_results, ) from ray.rllib.algorithms.ppo.ppo_catalog import PPOCatalog @@ -105,7 +106,6 @@ def __init__(self, algo_class=None): # __sphinx_doc_begin__ # IMPALA specific settings: - self._learner_hps = ImpalaHPs() self.vtrace = True self.vtrace_clip_rho_threshold = 1.0 self.vtrace_clip_pg_rho_threshold = 1.0 @@ -141,7 +141,7 @@ def __init__(self, algo_class=None): self._lr_vf = 0.0005 self.after_train_step = None - # Override some of AlgorithmConfig's default values with ARS-specific values. + # Override some of AlgorithmConfig's default values with IMPALA-specific values. self.rollout_fragment_length = 50 self.train_batch_size = 500 self.minibatch_size = self.train_batch_size @@ -398,6 +398,7 @@ def validate(self) -> None: "term/optimizer! Try setting config.training(" "_tf_policy_handles_more_than_one_loss=True)." ) + # Learner API specific checks. if self._enable_learner_api: if not ( (self.minibatch_size % self.rollout_fragment_length == 0) @@ -410,20 +411,31 @@ def validate(self) -> None: f"{self.train_batch_size}, and rollout_fragment_length=" f"{self.get_rollout_fragment_length()}" ) - # learner hps need to be updated inside of config.validate in order to have - # the correct values for when a user starts an experiment from a dict. This is - # as oppposed to assigning the values inthe builder functions such as `training` - self._learner_hps.rollout_frag_or_episode_len = ( - self.get_rollout_fragment_length() + + @override(AlgorithmConfig) + def get_learner_hyperparameters(self) -> ImpalaHyperparameters: + base_hps = super().get_learner_hyperparameters() + learner_hps = ImpalaHyperparameters( + rollout_frag_or_episode_len=self.get_rollout_fragment_length(), + discount_factor=self.gamma, + entropy_coeff=self.entropy_coeff, + vf_loss_coeff=self.vf_loss_coeff, + vtrace_drop_last_ts=self.vtrace_drop_last_ts, + vtrace_clip_rho_threshold=self.vtrace_clip_rho_threshold, + vtrace_clip_pg_rho_threshold=(self.vtrace_clip_pg_rho_threshold), + **dataclasses.asdict(base_hps), ) - self._learner_hps.discount_factor = self.gamma - self._learner_hps.entropy_coeff = self.entropy_coeff - self._learner_hps.vf_loss_coeff = self.vf_loss_coeff - self._learner_hps.vtrace_drop_last_ts = self.vtrace_drop_last_ts - self._learner_hps.vtrace_clip_rho_threshold = self.vtrace_clip_rho_threshold - self._learner_hps.vtrace_clip_pg_rho_threshold = ( - self.vtrace_clip_pg_rho_threshold + # TODO: We currently do not use the `recurrent_seq_len` property anyways. + # We should re-think the handling of RNN/SEQ_LENs/etc.. once we start + # supporting them in RLModules and then revisit this check here. + # Also, such a check should be moved into `IMPALAConfig.validate()`. + assert (learner_hps.rollout_frag_or_episode_len is None) != ( + learner_hps.recurrent_seq_len is None + ), ( + "One of `rollout_frag_or_episode_len` or `recurrent_seq_len` must be not " + "None in ImpalaHyperparameters!" ) + return learner_hps def get_replay_ratio(self) -> float: """Returns replay ratio (between 0.0 and 1.0) based off self.replay_proportion. @@ -434,16 +446,16 @@ def get_replay_ratio(self) -> float: @override(AlgorithmConfig) def get_default_learner_class(self): - if self.framework_str == "tf2": - from ray.rllib.algorithms.impala.tf.impala_tf_learner import ImpalaTfLearner - - return ImpalaTfLearner - elif self.framework_str == "torch": + if self.framework_str == "torch": from ray.rllib.algorithms.impala.torch.impala_torch_learner import ( ImpalaTorchLearner, ) return ImpalaTorchLearner + elif self.framework_str == "tf2": + from ray.rllib.algorithms.impala.tf.impala_tf_learner import ImpalaTfLearner + + return ImpalaTfLearner else: raise ValueError(f"The framework {self.framework_str} is not supported.") @@ -592,9 +604,11 @@ def get_default_policy_class( def setup(self, config: AlgorithmConfig): super().setup(config) + # Queue of batches to be sent to the Learner. + self.batches_to_place_on_learner = [] + # Create extra aggregation workers and assign each rollout worker to # one of them. - self.batches_to_place_on_learner = [] self.batch_being_built = [] if self.config.num_aggregation_workers > 0: # This spawns `num_aggregation_workers` actors that aggregate @@ -668,7 +682,8 @@ def training_step(self) -> ResultDict: and self._aggregator_actor_manager.num_healthy_actors() > 0 ) - # Get references to sampled SampleBatches from our workers. + # Get sampled SampleBatches from our workers (by ray references if we use + # tree-aggregation). unprocessed_sample_batches = self.get_samples_from_workers( return_object_refs=use_tree_aggregation, ) diff --git a/rllib/algorithms/impala/impala_base_learner.py b/rllib/algorithms/impala/impala_base_learner.py deleted file mode 100644 index e57bf8ecabae..000000000000 --- a/rllib/algorithms/impala/impala_base_learner.py +++ /dev/null @@ -1,111 +0,0 @@ -from dataclasses import dataclass -import numpy as np -from typing import Any, List, Mapping -import tree - -from ray.rllib.policy.sample_batch import MultiAgentBatch -from ray.rllib.core.learner.learner import LearnerHPs -from ray.rllib.utils.annotations import override -from ray.rllib.core.learner.learner import Learner -from ray.rllib.utils.metrics import ( - ALL_MODULES, - NUM_AGENT_STEPS_TRAINED, - NUM_ENV_STEPS_TRAINED, -) -from ray.rllib.utils.typing import ResultDict - - -@dataclass -class ImpalaHPs(LearnerHPs): - """Hyper-parameters for IMPALA. - - Attributes: - rollout_frag_or_episode_len: The length of a rollout fragment or episode. - Used when making SampleBatches time major for computing loss. - recurrent_seq_len: The length of a recurrent sequence. Used when making - SampleBatches time major for computing loss. - discount_factor: The discount factor to use for computing returns. - vtrace_clip_rho_threshold: The rho threshold to use for clipping the - importance weights. - vtrace_clip_pg_rho_threshold: The rho threshold to use for clipping the - importance weights when computing the policy_gradient loss. - vtrace_drop_last_ts: Whether to drop the last timestep when computing the loss. - This is useful for stabilizing the loss. - NOTE: This shouldn't be True when training on environments where the rewards - come at the end of the episode. - vf_loss_coeff: The amount to weight the value function loss by when computing - the total loss. - entropy_coeff: The amount to weight the average entropy of the actions in the - SampleBatch towards the total_loss for module updates. The higher this - coefficient, the more that the policy network will be encouraged to output - distributions with higher entropy/std deviation, which will encourage - greater exploration. - - """ - - rollout_frag_or_episode_len: int = None - recurrent_seq_len: int = None - discount_factor: float = 0.99 - vtrace_clip_rho_threshold: float = 1.0 - vtrace_clip_pg_rho_threshold: float = 1.0 - vtrace_drop_last_ts: bool = True - vf_loss_coeff: float = 0.5 - entropy_coeff: float = 0.01 - - -class ImpalaBaseLearner(Learner): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - self.vtrace_clip_rho_threshold = self._hps.vtrace_clip_rho_threshold - self.vtrace_clip_pg_rho_threshold = self._hps.vtrace_clip_pg_rho_threshold - self.vtrace_drop_last_ts = self._hps.vtrace_drop_last_ts - self.vf_loss_coeff = self._hps.vf_loss_coeff - self.entropy_coeff = self._hps.entropy_coeff - self.rollout_frag_or_episode_len = self._hps.rollout_frag_or_episode_len - self.recurrent_seq_len = self._hps.recurrent_seq_len - self.discount_factor = self._hps.discount_factor - assert ( - self.rollout_frag_or_episode_len is not None - or self.recurrent_seq_len is not None - ) and not (self.rollout_frag_or_episode_len and self.recurrent_seq_len), ( - "Either rollout_frag_or_episode_len or recurrent_seq_len" - " must be set in the IMPALA HParams. " - ) - - @override(Learner) - def compile_results( - self, - batch: MultiAgentBatch, - fwd_out: Mapping[str, Any], - postprocessed_loss: Mapping[str, Any], - postprocessed_gradients: Mapping[str, Any], - ) -> Mapping[str, Any]: - results = super().compile_results( - batch, fwd_out, postprocessed_loss, postprocessed_gradients - ) - results[ALL_MODULES][NUM_AGENT_STEPS_TRAINED] = batch.agent_steps() - results[ALL_MODULES][NUM_ENV_STEPS_TRAINED] = batch.env_steps() - return results - - -def _reduce_impala_results(results: List[ResultDict]) -> ResultDict: - """Reduce/Aggregate a list of results from Impala Learners. - - Average the values of the result dicts. Add keys for the number of agent and env - steps trained. - - Args: - results: result dicts to reduce. - - Returns: - A reduced result dict. - """ - result = tree.map_structure(lambda *x: np.mean(x), *results) - agent_steps_trained = sum( - [r[ALL_MODULES][NUM_AGENT_STEPS_TRAINED] for r in results] - ) - env_steps_trained = sum([r[ALL_MODULES][NUM_ENV_STEPS_TRAINED] for r in results]) - result[ALL_MODULES][NUM_AGENT_STEPS_TRAINED] = agent_steps_trained - result[ALL_MODULES][NUM_ENV_STEPS_TRAINED] = env_steps_trained - return result diff --git a/rllib/algorithms/impala/impala_learner.py b/rllib/algorithms/impala/impala_learner.py new file mode 100644 index 000000000000..4687ce0e9f9b --- /dev/null +++ b/rllib/algorithms/impala/impala_learner.py @@ -0,0 +1,78 @@ +from dataclasses import dataclass +from typing import Any, List, Mapping + +import numpy as np +import tree # pip install dm_tree + +from ray.rllib.core.learner.learner import Learner, LearnerHyperparameters +from ray.rllib.policy.sample_batch import MultiAgentBatch +from ray.rllib.utils.annotations import override +from ray.rllib.utils.metrics import ( + ALL_MODULES, + NUM_AGENT_STEPS_TRAINED, + NUM_ENV_STEPS_TRAINED, +) +from ray.rllib.utils.typing import ResultDict + + +@dataclass +class ImpalaHyperparameters(LearnerHyperparameters): + """Hyperparameters for the ImpalaLearner sub-classes (framework specific). + + These should never be set directly by the user. Instead, use the IMPALAConfig + class to configure your algorithm. + See `ray.rllib.algorithms.impala.impala::IMPALAConfig::training()` for more details + on the individual properties. + + Attributes: + rollout_frag_or_episode_len: The length of a rollout fragment or episode. + Used when making SampleBatches time major for computing loss. + recurrent_seq_len: The length of a recurrent sequence. Used when making + SampleBatches time major for computing loss. + """ + + rollout_frag_or_episode_len: int = None + recurrent_seq_len: int = None + discount_factor: float = None + vtrace_clip_rho_threshold: float = None + vtrace_clip_pg_rho_threshold: float = None + vtrace_drop_last_ts: bool = None + vf_loss_coeff: float = None + entropy_coeff: float = None + + +class ImpalaLearner(Learner): + @override(Learner) + def compile_results( + self, + batch: MultiAgentBatch, + fwd_out: Mapping[str, Any], + postprocessed_loss: Mapping[str, Any], + postprocessed_gradients: Mapping[str, Any], + ) -> Mapping[str, Any]: + results = super().compile_results( + batch, fwd_out, postprocessed_loss, postprocessed_gradients + ) + results[ALL_MODULES][NUM_AGENT_STEPS_TRAINED] = batch.agent_steps() + results[ALL_MODULES][NUM_ENV_STEPS_TRAINED] = batch.env_steps() + return results + + +def _reduce_impala_results(results: List[ResultDict]) -> ResultDict: + """Reduce/Aggregate a list of results from Impala Learners. + + Average the values of the result dicts. Add keys for the number of agent and env + steps trained. + + Args: + results: result dicts to reduce. + + Returns: + A reduced result dict. + """ + result = tree.map_structure(lambda *x: np.mean(x), *results) + agent_steps_trained = sum(r[ALL_MODULES][NUM_AGENT_STEPS_TRAINED] for r in results) + env_steps_trained = sum(r[ALL_MODULES][NUM_ENV_STEPS_TRAINED] for r in results) + result[ALL_MODULES][NUM_AGENT_STEPS_TRAINED] = agent_steps_trained + result[ALL_MODULES][NUM_ENV_STEPS_TRAINED] = env_steps_trained + return result diff --git a/rllib/algorithms/impala/tf/impala_tf_learner.py b/rllib/algorithms/impala/tf/impala_tf_learner.py index 8bb9ce099a69..24de96064be7 100644 --- a/rllib/algorithms/impala/tf/impala_tf_learner.py +++ b/rllib/algorithms/impala/tf/impala_tf_learner.py @@ -1,6 +1,6 @@ from typing import Mapping -from ray.rllib.algorithms.impala.impala_base_learner import ImpalaBaseLearner +from ray.rllib.algorithms.impala.impala_learner import ImpalaLearner from ray.rllib.algorithms.impala.tf.vtrace_tf_v2 import make_time_major, vtrace_tf2 from ray.rllib.core.learner.tf.tf_learner import TfLearner from ray.rllib.policy.sample_batch import SampleBatch @@ -11,12 +11,12 @@ _, tf, _ = try_import_tf() -class ImpalaTfLearner(TfLearner, ImpalaBaseLearner): +class ImpalaTfLearner(TfLearner, ImpalaLearner): """Implements the IMPALA loss function in tensorflow.""" def __init__(self, *args, **kwargs): TfLearner.__init__(self, *args, **kwargs) - ImpalaBaseLearner.__init__(self, *args, **kwargs) + ImpalaLearner.__init__(self, *args, **kwargs) @override(TfLearner) def compute_loss_per_module( @@ -30,28 +30,28 @@ def compute_loss_per_module( behaviour_actions_logp_time_major = make_time_major( behaviour_actions_logp, - trajectory_len=self.rollout_frag_or_episode_len, - recurrent_seq_len=self.recurrent_seq_len, - drop_last=self.vtrace_drop_last_ts, + trajectory_len=self.hps.rollout_frag_or_episode_len, + recurrent_seq_len=self.hps.recurrent_seq_len, + drop_last=self.hps.vtrace_drop_last_ts, ) target_actions_logp_time_major = make_time_major( target_actions_logp, - trajectory_len=self.rollout_frag_or_episode_len, - recurrent_seq_len=self.recurrent_seq_len, - drop_last=self.vtrace_drop_last_ts, + trajectory_len=self.hps.rollout_frag_or_episode_len, + recurrent_seq_len=self.hps.recurrent_seq_len, + drop_last=self.hps.vtrace_drop_last_ts, ) values_time_major = make_time_major( values, - trajectory_len=self.rollout_frag_or_episode_len, - recurrent_seq_len=self.recurrent_seq_len, - drop_last=self.vtrace_drop_last_ts, + trajectory_len=self.hps.rollout_frag_or_episode_len, + recurrent_seq_len=self.hps.recurrent_seq_len, + drop_last=self.hps.vtrace_drop_last_ts, ) bootstrap_value = values_time_major[-1] rewards_time_major = make_time_major( batch[SampleBatch.REWARDS], - trajectory_len=self.rollout_frag_or_episode_len, - recurrent_seq_len=self.recurrent_seq_len, - drop_last=self.vtrace_drop_last_ts, + trajectory_len=self.hps.rollout_frag_or_episode_len, + recurrent_seq_len=self.hps.recurrent_seq_len, + drop_last=self.hps.vtrace_drop_last_ts, ) # the discount factor that is used should be gamma except for timesteps where @@ -61,13 +61,13 @@ def compute_loss_per_module( - tf.cast( make_time_major( batch[SampleBatch.TERMINATEDS], - trajectory_len=self.rollout_frag_or_episode_len, - recurrent_seq_len=self.recurrent_seq_len, - drop_last=self.vtrace_drop_last_ts, + trajectory_len=self.hps.rollout_frag_or_episode_len, + recurrent_seq_len=self.hps.recurrent_seq_len, + drop_last=self.hps.vtrace_drop_last_ts, ), dtype=tf.float32, ) - ) * self.discount_factor + ) * self.hps.discount_factor # TODO(Artur): See if we should compute v-trace corrected targets on CPU vtrace_adjusted_target_values, pg_advantages = vtrace_tf2( target_action_log_probs=target_actions_logp_time_major, @@ -75,8 +75,8 @@ def compute_loss_per_module( rewards=rewards_time_major, values=values_time_major, bootstrap_value=bootstrap_value, - clip_pg_rho_threshold=self.vtrace_clip_pg_rho_threshold, - clip_rho_threshold=self.vtrace_clip_rho_threshold, + clip_pg_rho_threshold=self.hps.vtrace_clip_pg_rho_threshold, + clip_rho_threshold=self.hps.vtrace_clip_rho_threshold, discounts=discounts_time_major, ) @@ -97,7 +97,9 @@ def compute_loss_per_module( # The summed weighted loss. total_loss = ( - pi_loss + vf_loss * self.vf_loss_coeff + entropy_loss * self.entropy_coeff + pi_loss + + vf_loss * self.hps.vf_loss_coeff + + entropy_loss * self.hps.entropy_coeff ) return { self.TOTAL_LOSS_KEY: total_loss, diff --git a/rllib/algorithms/impala/torch/impala_torch_learner.py b/rllib/algorithms/impala/torch/impala_torch_learner.py index 6809027d8df3..9160659e8f5b 100644 --- a/rllib/algorithms/impala/torch/impala_torch_learner.py +++ b/rllib/algorithms/impala/torch/impala_torch_learner.py @@ -1,6 +1,6 @@ from typing import Mapping -from ray.rllib.algorithms.impala.impala_base_learner import ImpalaBaseLearner +from ray.rllib.algorithms.impala.impala_learner import ImpalaLearner from ray.rllib.algorithms.impala.torch.vtrace_torch_v2 import ( vtrace_torch, make_time_major, @@ -15,12 +15,12 @@ torch, nn = try_import_torch() -class ImpalaTorchLearner(TorchLearner, ImpalaBaseLearner): +class ImpalaTorchLearner(TorchLearner, ImpalaLearner): """Implements the IMPALA loss function in torch.""" def __init__(self, *args, **kwargs): TorchLearner.__init__(self, *args, **kwargs) - ImpalaBaseLearner.__init__(self, *args, **kwargs) + ImpalaLearner.__init__(self, *args, **kwargs) @override(TorchLearner) def compute_loss_per_module( @@ -38,28 +38,28 @@ def compute_loss_per_module( target_actions_logp_time_major = make_time_major( target_actions_logp, - trajectory_len=self.rollout_frag_or_episode_len, - recurrent_seq_len=self.recurrent_seq_len, - drop_last=self.vtrace_drop_last_ts, + trajectory_len=self.hps.rollout_frag_or_episode_len, + recurrent_seq_len=self.hps.recurrent_seq_len, + drop_last=self.hps.vtrace_drop_last_ts, ) behaviour_actions_logp_time_major = make_time_major( behaviour_actions_logp, - trajectory_len=self.rollout_frag_or_episode_len, - recurrent_seq_len=self.recurrent_seq_len, - drop_last=self.vtrace_drop_last_ts, + trajectory_len=self.hps.rollout_frag_or_episode_len, + recurrent_seq_len=self.hps.recurrent_seq_len, + drop_last=self.hps.vtrace_drop_last_ts, ) values_time_major = make_time_major( values, - trajectory_len=self.rollout_frag_or_episode_len, - recurrent_seq_len=self.recurrent_seq_len, - drop_last=self.vtrace_drop_last_ts, + trajectory_len=self.hps.rollout_frag_or_episode_len, + recurrent_seq_len=self.hps.recurrent_seq_len, + drop_last=self.hps.vtrace_drop_last_ts, ) bootstrap_value = values_time_major[-1] rewards_time_major = make_time_major( batch[SampleBatch.REWARDS], - trajectory_len=self.rollout_frag_or_episode_len, - recurrent_seq_len=self.recurrent_seq_len, - drop_last=self.vtrace_drop_last_ts, + trajectory_len=self.hps.rollout_frag_or_episode_len, + recurrent_seq_len=self.hps.recurrent_seq_len, + drop_last=self.hps.vtrace_drop_last_ts, ) # the discount factor that is used should be gamma except for timesteps where @@ -68,11 +68,11 @@ def compute_loss_per_module( 1.0 - make_time_major( batch[SampleBatch.TERMINATEDS], - trajectory_len=self.rollout_frag_or_episode_len, - recurrent_seq_len=self.recurrent_seq_len, - drop_last=self.vtrace_drop_last_ts, + trajectory_len=self.hps.rollout_frag_or_episode_len, + recurrent_seq_len=self.hps.recurrent_seq_len, + drop_last=self.hps.vtrace_drop_last_ts, ).type(dtype=torch.float32) - ) * self.discount_factor + ) * self.hps.discount_factor # TODO(Artur) Why was there `TorchCategorical if is_multidiscrete else # dist_class` in the old code torch impala policy? @@ -86,8 +86,8 @@ def compute_loss_per_module( rewards=rewards_time_major, values=values_time_major, bootstrap_value=bootstrap_value, - clip_rho_threshold=self.vtrace_clip_rho_threshold, - clip_pg_rho_threshold=self.vtrace_clip_pg_rho_threshold, + clip_rho_threshold=self.hps.vtrace_clip_rho_threshold, + clip_pg_rho_threshold=self.hps.vtrace_clip_pg_rho_threshold, ) # Sample size is T x B, where T is the trajectory length and B is the batch size @@ -114,7 +114,9 @@ def compute_loss_per_module( # The summed weighted loss. total_loss = ( - pi_loss + vf_loss * self.vf_loss_coeff + entropy_loss * self.entropy_coeff + pi_loss + + vf_loss * self.hps.vf_loss_coeff + + entropy_loss * self.hps.entropy_coeff ) return { self.TOTAL_LOSS_KEY: total_loss, diff --git a/rllib/algorithms/ppo/ppo.py b/rllib/algorithms/ppo/ppo.py index 4ce1347cf67a..034c224362e5 100644 --- a/rllib/algorithms/ppo/ppo.py +++ b/rllib/algorithms/ppo/ppo.py @@ -9,6 +9,7 @@ Detailed documentation: https://docs.ray.io/en/master/rllib-algorithms.html#ppo """ +import dataclasses import logging from typing import List, Optional, Type, Union, TYPE_CHECKING @@ -16,8 +17,8 @@ from ray.rllib.algorithms.algorithm import Algorithm from ray.rllib.algorithms.algorithm_config import AlgorithmConfig, NotProvided from ray.rllib.algorithms.pg import PGConfig -from ray.rllib.algorithms.ppo.ppo_learner_config import PPOLearnerHPs from ray.rllib.algorithms.ppo.ppo_catalog import PPOCatalog +from ray.rllib.algorithms.ppo.ppo_learner import PPOLearnerHyperparameters from ray.rllib.core.rl_module.rl_module import SingleAgentRLModuleSpec from ray.rllib.execution.rollout_ops import ( standardize_fields, @@ -94,7 +95,6 @@ def __init__(self, algo_class=None): # fmt: off # __sphinx_doc_begin__ # PPO specific settings: - self._learner_hps = PPOLearnerHPs() self.use_critic = True self.use_gae = True self.lambda_ = 1.0 @@ -166,6 +166,21 @@ def get_default_learner_class(self) -> Union[Type["Learner"], str]: else: raise ValueError(f"The framework {self.framework_str} is not supported.") + @override(AlgorithmConfig) + def get_learner_hyperparameters(self) -> PPOLearnerHyperparameters: + base_hps = super().get_learner_hyperparameters() + return PPOLearnerHyperparameters( + use_critic=self.use_critic, + kl_coeff=self.kl_coeff, + vf_loss_coeff=self.vf_loss_coeff, + entropy_coeff=self.entropy_coeff, + entropy_coeff_schedule=self.entropy_coeff_schedule, + clip_param=self.clip_param, + vf_clip_param=self.vf_clip_param, + kl_target=self.kl_target, + **dataclasses.asdict(base_hps), + ) + @override(AlgorithmConfig) def training( self, @@ -212,7 +227,7 @@ def training( tune this if you set vf_share_layers=True inside your model's config. entropy_coeff: Coefficient of the entropy regularizer. entropy_coeff_schedule: Decay schedule for the entropy regularizer. - clip_param: PPO clip parameter. + clip_param: The PPO clip parameter. vf_clip_param: Clip param for the value function. Note that this is sensitive to the scale of the rewards. If your expected V is large, increase this. @@ -306,17 +321,6 @@ def validate(self) -> None: # Check `entropy_coeff` for correctness. if self.entropy_coeff < 0.0: raise ValueError("`entropy_coeff` must be >= 0.0") - # learner hps need to be updated inside of config.validate in order to have - # the correct values for when a user starts an experiment from a dict. This is - # as oppposed to assigning the values inthe builder functions such as `training` - self._learner_hps.use_critic = self.use_critic - self._learner_hps.kl_coeff = self.kl_coeff - self._learner_hps.vf_loss_coeff = self.vf_loss_coeff - self._learner_hps.entropy_coeff = self.entropy_coeff - self._learner_hps.entropy_coeff_schedule = self.entropy_coeff_schedule - self._learner_hps.clip_param = self.clip_param - self._learner_hps.vf_clip_param = self.vf_clip_param - self._learner_hps.kl_target = self.kl_target class UpdateKL: diff --git a/rllib/algorithms/ppo/ppo_base_learner.py b/rllib/algorithms/ppo/ppo_learner.py similarity index 55% rename from rllib/algorithms/ppo/ppo_base_learner.py rename to rllib/algorithms/ppo/ppo_learner.py index 94a933ea5e3b..dd1972e8aa6a 100644 --- a/rllib/algorithms/ppo/ppo_base_learner.py +++ b/rllib/algorithms/ppo/ppo_learner.py @@ -1,12 +1,39 @@ -from typing import Mapping, Any +from dataclasses import dataclass +from typing import Any, Mapping, List, Optional, Union import abc +from ray.rllib.core.learner.learner import LearnerHyperparameters from ray.rllib.core.rl_module.rl_module import ModuleID from ray.rllib.core.learner.learner import Learner from ray.rllib.utils.annotations import override +from ray.rllib.utils.typing import TensorType -class PPOBaseLearner(Learner): +@dataclass +class PPOLearnerHyperparameters(LearnerHyperparameters): + """Hyperparameters for the PPOLearner sub-classes (framework specific). + + These should never be set directly by the user. Instead, use the PPOConfig + class to configure your algorithm. + See `ray.rllib.algorithms.ppo.ppo::PPOConfig::training()` for more details on the + individual properties. + """ + + kl_coeff: float = None + kl_target: float = None + use_critic: bool = None + clip_param: float = None + vf_clip_param: float = None + entropy_coeff: float = None + vf_loss_coeff: float = None + + # Experimental placeholder for things that could be part of the base + # LearnerHyperparameters. + lr_schedule: Optional[List[List[Union[int, float]]]] = None + entropy_coeff_schedule: Optional[List[List[Union[int, float]]]] = None + + +class PPOLearner(Learner): def build(self) -> None: super().build() @@ -16,7 +43,7 @@ def build(self) -> None: raise ValueError("entropy_coeff_schedule is not supported in Learner yet") # TODO (Kourosh): This needs to be native tensor variable to be traced. - self.entropy_coeff = self.hps.entropy_coeff + # self.entropy_coeff = self.hps.entropy_coeff # TODO (Kourosh): Create a way on the base class for users to define arbitrary # schedulers for learning rates. @@ -24,18 +51,11 @@ def build(self) -> None: if self.hps.lr_schedule: raise ValueError("lr_schedule is not supported in Learner yet") - # TODO (Kourosh): We can still use mix-ins in the new design. Do we want that? - # Most likely not. I rather be specific about everything. kl_coeff is a - # none-gradient based update which we can define here and add as update with - # additional_update() method. - # We need to make sure that the kl_coeff is a framework tensor that is # registered as part of the graph so that upon update the graph can be updated - # (e.g. in TF with eager tracing) - self.kl_coeff_val = self.hps.kl_coeff - self.kl_coeff = self._create_kl_variable(self.hps.kl_coeff) - - self.kl_target = self.hps.kl_target + # (e.g. in TF with eager tracing). + self.curr_kl_coeff_val = self.hps.kl_coeff + self.curr_kl_coeff = self._get_kl_variable(self.hps.kl_coeff) @override(Learner) def additional_update_per_module( @@ -44,17 +64,17 @@ def additional_update_per_module( assert sampled_kl_values, "Sampled KL values are empty." sampled_kl = sampled_kl_values[module_id] - if sampled_kl > 2.0 * self.kl_target: + if sampled_kl > 2.0 * self.hps.kl_target: # TODO (Kourosh) why not 2? - self.kl_coeff_val *= 1.5 - elif sampled_kl < 0.5 * self.kl_target: - self.kl_coeff_val *= 0.5 + self.curr_kl_coeff_val *= 1.5 + elif sampled_kl < 0.5 * self.hps.kl_target: + self.curr_kl_coeff_val *= 0.5 - self._set_kl_coeff(self.kl_coeff_val) - results = {"kl_coeff": self.kl_coeff_val} + self._set_kl_coeff(self.curr_kl_coeff_val) + results = {"kl_coeff": self.curr_kl_coeff_val} # TODO (Kourosh): We may want to index into the schedulers to get the right one - # for this module + # for this module. if self.entropy_coeff_scheduler is not None: self.entropy_coeff_scheduler.update(timestep) @@ -64,8 +84,8 @@ def additional_update_per_module( return results @abc.abstractmethod - def _create_kl_variable(self, value: float) -> Any: - """Creates the kl_coeff tensor variable. + def _get_kl_variable(self, value: float) -> TensorType: + """Returns the kl_coeff (framework specific) tensor variable. This is a framework specific method that should be implemented by the framework specific sub-class. diff --git a/rllib/algorithms/ppo/ppo_learner_config.py b/rllib/algorithms/ppo/ppo_learner_config.py deleted file mode 100644 index e6850efa6b6a..000000000000 --- a/rllib/algorithms/ppo/ppo_learner_config.py +++ /dev/null @@ -1,21 +0,0 @@ -from dataclasses import dataclass -from typing import List, Optional, Union - -from ray.rllib.core.learner.learner import LearnerHPs - - -@dataclass -class PPOLearnerHPs(LearnerHPs): - """Hyperparameters for the PPO RL Trainer""" - - kl_coeff: float = 0.2 - kl_target: float = 0.01 - use_critic: bool = True - clip_param: float = 0.3 - vf_clip_param: float = 10.0 - entropy_coeff: float = 0.0 - vf_loss_coeff: float = 1.0 - - # experimental placeholder for things that could be part of the base LearnerHPs - lr_schedule: Optional[List[List[Union[int, float]]]] = None - entropy_coeff_schedule: Optional[List[List[Union[int, float]]]] = None diff --git a/rllib/algorithms/ppo/tf/ppo_tf_learner.py b/rllib/algorithms/ppo/tf/ppo_tf_learner.py index f3db723894f1..cd6382ec750b 100644 --- a/rllib/algorithms/ppo/tf/ppo_tf_learner.py +++ b/rllib/algorithms/ppo/tf/ppo_tf_learner.py @@ -1,7 +1,7 @@ import logging -from typing import Mapping, Any +from typing import Any, Mapping -from ray.rllib.algorithms.ppo.ppo_base_learner import PPOBaseLearner +from ray.rllib.algorithms.ppo.ppo_learner import PPOLearner from ray.rllib.core.learner.tf.tf_learner import TfLearner from ray.rllib.evaluation.postprocessing import Postprocessing from ray.rllib.policy.sample_batch import SampleBatch @@ -15,8 +15,8 @@ logger = logging.getLogger(__name__) -class PPOTfLearner(PPOBaseLearner, TfLearner): - """Implements tf-specific PPO loss logic on top of PPOBaseLearner. +class PPOTfLearner(PPOLearner, TfLearner): + """Implements tf-specific PPO loss logic on top of PPOLearner. This class implements the ppo loss under `_compute_loss_per_module()`. """ @@ -88,13 +88,13 @@ def compute_loss_per_module( total_loss = tf.reduce_mean( -surrogate_loss + self.hps.vf_loss_coeff * vf_loss_clipped - - self.entropy_coeff * curr_entropy + - self.hps.entropy_coeff * curr_entropy ) # Add mean_kl_loss (already processed through `reduce_mean_valid`), # if necessary. if self.hps.kl_coeff > 0.0: - total_loss += self.kl_coeff * mean_kl_loss + total_loss += self.curr_kl_coeff * mean_kl_loss return { self.TOTAL_LOSS_KEY: total_loss, @@ -106,14 +106,14 @@ def compute_loss_per_module( ), "entropy": mean_entropy, "kl": mean_kl_loss, - "entropy_coeff": self.entropy_coeff, - "cur_kl_coeff": self.kl_coeff, + "entropy_coeff": self.hps.entropy_coeff, + "cur_kl_coeff": self.curr_kl_coeff, } - @override(PPOBaseLearner) - def _create_kl_variable(self, value: float) -> Any: + @override(PPOLearner) + def _get_kl_variable(self, value: float) -> Any: return tf.Variable(value, trainable=False, dtype=tf.float32) - @override(PPOBaseLearner) + @override(PPOLearner) def _set_kl_coeff(self, value: float) -> None: - self.kl_coeff.assign(value) + self.curr_kl_coeff.assign(value) diff --git a/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py b/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py index 503866de47ff..5538a9450825 100644 --- a/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py +++ b/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py @@ -56,6 +56,7 @@ def _forward_inference(self, batch: NestedDict) -> Mapping[str, Any]: @override(RLModule) def _forward_exploration(self, batch: NestedDict) -> Mapping[str, Any]: """PPO forward pass during exploration. + Besides the action distribution, this method also returns the parameters of the policy distribution to be used for computing KL divergence between the old policy and the new policy during training. diff --git a/rllib/algorithms/ppo/torch/ppo_torch_learner.py b/rllib/algorithms/ppo/torch/ppo_torch_learner.py index f605b06eb970..9851e8f65a89 100644 --- a/rllib/algorithms/ppo/torch/ppo_torch_learner.py +++ b/rllib/algorithms/ppo/torch/ppo_torch_learner.py @@ -1,7 +1,7 @@ import logging -from typing import Mapping, Any +from typing import Any, Mapping -from ray.rllib.algorithms.ppo.ppo_base_learner import PPOBaseLearner +from ray.rllib.algorithms.ppo.ppo_learner import PPOLearner from ray.rllib.core.learner.torch.torch_learner import TorchLearner from ray.rllib.evaluation.postprocessing import Postprocessing from ray.rllib.policy.sample_batch import SampleBatch @@ -15,8 +15,8 @@ logger = logging.getLogger(__name__) -class PPOTorchLearner(PPOBaseLearner, TorchLearner): - """Implements torch-specific PPO loss logic on top of PPOBaseLearner. +class PPOTorchLearner(PPOLearner, TorchLearner): + """Implements torch-specific PPO loss logic on top of PPOLearner. This class implements the ppo loss under `_compute_loss_per_module()`. """ @@ -31,10 +31,6 @@ def compute_loss_per_module( # learning rate for that agent. # TODO (Kourosh): come back to RNNs later - # make sure all the coefficients are on the same device as the model - if self.kl_coeff.device != self._device: - self.kl_coeff = self.kl_coeff.to(self._device) - curr_action_dist = fwd_out[SampleBatch.ACTION_DIST] action_dist_class = type(fwd_out[SampleBatch.ACTION_DIST]) prev_action_dist = action_dist_class.from_logits( @@ -88,13 +84,13 @@ def compute_loss_per_module( total_loss = torch.mean( -surrogate_loss + self.hps.vf_loss_coeff * vf_loss_clipped - - self.entropy_coeff * curr_entropy + - self.hps.entropy_coeff * curr_entropy ) # Add mean_kl_loss (already processed through `reduce_mean_valid`), # if necessary. if self.hps.kl_coeff > 0.0: - total_loss += self.kl_coeff * mean_kl_loss + total_loss += self.curr_kl_coeff * mean_kl_loss return { self.TOTAL_LOSS_KEY: total_loss, @@ -106,14 +102,23 @@ def compute_loss_per_module( ), "entropy": mean_entropy, "kl": mean_kl_loss, - "entropy_coeff": self.entropy_coeff, - "cur_kl_coeff": self.kl_coeff, + "entropy_coeff": self.hps.entropy_coeff, + "cur_kl_coeff": self.curr_kl_coeff, } - @override(PPOBaseLearner) - def _create_kl_variable(self, value: float) -> Any: - return torch.tensor(value) + @override(PPOLearner) + def _get_kl_variable(self, value: float) -> Any: + return torch.tensor( + value, + requires_grad=False, + device=self._device, + dtype=torch.float32, + ) - @override(PPOBaseLearner) + @override(PPOLearner) def _set_kl_coeff(self, value: float): - self.kl_coeff.data = torch.tensor(value, device=self.kl_coeff.device) + self.curr_kl_coeff.data = torch.tensor( + value, + dtype=torch.float32, + device=self.curr_kl_coeff.device, + ) diff --git a/rllib/core/learner/learner.py b/rllib/core/learner/learner.py index ae0503a2eadf..1d8ac7659c3a 100644 --- a/rllib/core/learner/learner.py +++ b/rllib/core/learner/learner.py @@ -68,7 +68,7 @@ @dataclass -class FrameworkHPs: +class FrameworkHyperparameters: """The framework specific hyper-parameters. Args: @@ -83,15 +83,19 @@ class FrameworkHPs: @dataclass -class LearnerHPs: - """The hyper-parameters for Learner. +class LearnerHyperparameters: + """Hyperparameters for a Learner, derived from a subset of AlgorithmConfig values. - When creating a new Learner, the new hyper-parameters have to be defined by - subclassing this class and adding the new hyper-parameters as fields. + Instances of this class should only be created via calling + `get_learner_hyperparameters()` on a frozen AlgorithmConfig object and should always + considered read-only. - # TODO (Kourosh, Avnish): The things that could be part of the base class: - - a function, `validate` that runs some validation on the hyper-parameters. + When creating a new Learner, you should also define a new sub-class of this class + and make sure the respective AlgorithmConfig sub-class has a proper implementation + of the `get_learner_hyperparameters` method. + Validation of the values of these hyperparameters should be done by the + respective AlgorithmConfig class. """ pass @@ -111,7 +115,6 @@ class Learner: the TF or Torch specific sub-classes to implement their algorithm-specific update logic. - Args: module_spec: The module specification for the RLModule that is being trained. If the module is a single agent module, after building the module it will @@ -130,11 +133,12 @@ class Learner: Algorithm specific learner hyper-parameters will passed in via this argument. For example in PPO the `vf_loss_coeff` hyper-parameter will be passed in via this argument. Refer to - ray.rllib.core.learner.learner.LearnerHPs for more info. + ray.rllib.core.learner.learner.LearnerHyperparameters for more info. framework_hps: The framework specific hyper-parameters. This will be used to pass in any framework specific hyper-parameter that will impact the module creation. For example eager_tracing in TF or compile in Torch. - Refer to ray.rllib.core.learner.learner.FrameworkHPs for more info. + Refer to ray.rllib.core.learner.learner.FrameworkHyperparameters for + more info. Usage pattern: @@ -199,7 +203,7 @@ class MyLearner(TorchLearner): def compute_loss(self, fwd_out, batch): # compute the loss based on batch and output of the forward pass - # to access the learner hyper-parameters use `self.hps` + # to access the learner hyper-parameters use `self._hps` return {self.TOTAL_LOSS_KEY: loss} """ @@ -215,9 +219,9 @@ def __init__( ] = None, module: Optional[RLModule] = None, optimizer_config: Mapping[str, Any] = None, - learner_scaling_config: LearnerGroupScalingConfig = LearnerGroupScalingConfig(), - learner_hyperparameters: Optional[LearnerHPs] = LearnerHPs(), - framework_hyperparameters: Optional[FrameworkHPs] = FrameworkHPs(), + learner_group_scaling_config: Optional[LearnerGroupScalingConfig] = None, + learner_hyperparameters: Optional[LearnerHyperparameters] = None, + framework_hyperparameters: Optional[FrameworkHyperparameters] = None, ): # TODO (Kourosh): convert optimizer configs to dataclasses if module_spec is not None and module is not None: @@ -233,13 +237,20 @@ def __init__( self._module_spec = module_spec self._module_obj = module self._optimizer_config = optimizer_config - self._hps = learner_hyperparameters + self._hps = learner_hyperparameters or LearnerHyperparameters() # pick the configs that we need for the learner from scaling config - self._distributed = learner_scaling_config.num_workers > 1 - self._use_gpu = learner_scaling_config.num_gpus_per_worker > 0 + self._learner_group_scaling_config = ( + learner_group_scaling_config or LearnerGroupScalingConfig() + ) + self._distributed = self._learner_group_scaling_config.num_workers > 1 + self._use_gpu = self._learner_group_scaling_config.num_gpus_per_worker > 0 # if we are using gpu but we are not distributed, use this gpu for training - self._local_gpu_idx = learner_scaling_config.local_gpu_idx + self._local_gpu_idx = self._learner_group_scaling_config.local_gpu_idx + + self._framework_hyperparameters = ( + framework_hyperparameters or FrameworkHyperparameters() + ) # whether self.build has already been called self._is_built = False @@ -263,7 +274,7 @@ def module(self) -> MultiAgentRLModule: return self._module @property - def hps(self) -> LearnerHPs: + def hps(self) -> LearnerHyperparameters: """The hyper-parameters for the learner.""" return self._hps @@ -1069,9 +1080,9 @@ def _update( gradients = self.compute_gradients(loss) postprocessed_gradients = self.postprocess_gradients(gradients) self.apply_gradients(postprocessed_gradients) - result = self.compile_results(batch, fwd_out, loss, postprocessed_gradients) - self._check_result(result) - return convert_to_numpy(result) + results = self.compile_results(batch, fwd_out, loss, postprocessed_gradients) + self._check_result(results) + return convert_to_numpy(results) def _check_is_built(self): if self._module is None: @@ -1103,27 +1114,31 @@ class LearnerSpec: backend_config: The backend config for properly distributing the RLModule. optimizer_config: The optimizer setting to apply during training. learner_hyperparameters: The extra config for the loss/additional update. This - should be a subclass of LearnerHPs. This is useful for passing in - algorithm configs that contains the hyper-parameters for loss computation, - change of training behaviors, etc. e.g lr, entropy_coeff. + should be a subclass of LearnerHyperparameters. This is useful for passing + in algorithm configs that contains the hyper-parameters for loss + computation, change of training behaviors, etc. e.g lr, entropy_coeff. """ learner_class: Type["Learner"] module_spec: Union["SingleAgentRLModuleSpec", "MultiAgentRLModuleSpec"] = None module: Optional["RLModule"] = None - learner_scaling_config: LearnerGroupScalingConfig = field( + learner_group_scaling_config: LearnerGroupScalingConfig = field( default_factory=LearnerGroupScalingConfig ) optimizer_config: Dict[str, Any] = field(default_factory=dict) - learner_hyperparameters: LearnerHPs = field(default_factory=LearnerHPs) - framework_hyperparameters: FrameworkHPs = field(default_factory=FrameworkHPs) + learner_hyperparameters: LearnerHyperparameters = field( + default_factory=LearnerHyperparameters + ) + framework_hyperparameters: FrameworkHyperparameters = field( + default_factory=FrameworkHyperparameters + ) def get_params_dict(self) -> Dict[str, Any]: """Returns the parameters than be passed to the Learner constructor.""" return { "module": self.module, "module_spec": self.module_spec, - "learner_scaling_config": self.learner_scaling_config, + "learner_group_scaling_config": self.learner_group_scaling_config, "optimizer_config": self.optimizer_config, "learner_hyperparameters": self.learner_hyperparameters, "framework_hyperparameters": self.framework_hyperparameters, diff --git a/rllib/core/learner/learner_group.py b/rllib/core/learner/learner_group.py index 9b2774438b69..ed605b894566 100644 --- a/rllib/core/learner/learner_group.py +++ b/rllib/core/learner/learner_group.py @@ -1,10 +1,19 @@ from collections import deque import pathlib import socket -from typing import Any, List, Mapping, Type, Optional, Callable, Set, TYPE_CHECKING +from typing import ( + Any, + Callable, + List, + Mapping, + Optional, + Set, + Type, + TYPE_CHECKING, + Union, +) import ray - from ray.rllib.core.learner.reduce_result_dict_fn import _reduce_mean_results from ray.rllib.core.rl_module.rl_module import ( ModuleID, @@ -76,18 +85,18 @@ def __init__( learner_spec: LearnerSpec, max_queue_len: int = 20, ): - scaling_config = learner_spec.learner_scaling_config + scaling_config = learner_spec.learner_group_scaling_config learner_class = learner_spec.learner_class # TODO (Kourosh): Go with a _remote flag instead of _is_local to be more - # explicit + # explicit. self._is_local = scaling_config.num_workers == 0 self._learner = None self._workers = None - # if a user calls self.shutdown() on their own then this flag is set to true. + # If a user calls self.shutdown() on their own then this flag is set to true. # When del is called the backend executor isn't shutdown twice if this flag is # true. the backend executor would otherwise log a warning to the console from - # ray train + # ray train. self._is_shut_down = False self._is_module_trainable = _is_module_trainable @@ -114,9 +123,9 @@ def __init__( self._workers = [w.actor for w in backend_executor.worker_group.workers] - # run the neural network building code on remote workers + # Run the neural network building code on remote workers. ray.get([w.build.remote() for w in self._workers]) - # use only 1 max in flight request per worker since training workers have to + # Use only 1 max in flight request per worker since training workers have to # be synchronously executed. self._worker_manager = FaultTolerantActorManager( self._workers, @@ -272,9 +281,9 @@ def _get_results(self, results): def additional_update( self, *, - reduce_fn: Optional[Callable[[ResultDict], ResultDict]] = _reduce_mean_results, + reduce_fn: Callable[[ResultDict], ResultDict] = _reduce_mean_results, **kwargs, - ) -> List[Mapping[str, Any]]: + ) -> Union[Mapping[str, Any], List[Mapping[str, Any]]]: """Apply additional non-gradient based updates to the Learners. For example, this could be used to do a polyak averaging update @@ -291,7 +300,7 @@ def additional_update( """ if self.is_local: - results = [self._learner.additional_update(**kwargs)] + return self._learner.additional_update(**kwargs) else: results = self._worker_manager.foreach_actor( [lambda w: w.additional_update(**kwargs) for worker in self._workers] diff --git a/rllib/core/learner/learner_group_config.py b/rllib/core/learner/learner_group_config.py index 7830bbf09c76..322c8d062568 100644 --- a/rllib/core/learner/learner_group_config.py +++ b/rllib/core/learner/learner_group_config.py @@ -6,8 +6,8 @@ from ray.rllib.core.learner.scaling_config import LearnerGroupScalingConfig from ray.rllib.core.learner.learner import ( LearnerSpec, - LearnerHPs, - FrameworkHPs, + LearnerHyperparameters, + FrameworkHyperparameters, ) from ray.rllib.utils.from_config import NotProvided @@ -18,8 +18,15 @@ ModuleSpec = Union[SingleAgentRLModuleSpec, MultiAgentRLModuleSpec] -# TODO (Kourosh): We should make all configs come from a standard base class that -# defines the general interfaces for validation, from_dict, to_dict etc. +# TODO (Kourosh, Sven): We should make all configs come from a standard base class that +# defines the general interfaces for validation, from_dict, to_dict etc. +# Also, all these classes should abide by the following design patterns: +# - Define all default values for properties in the constructor. +# - No properties are magically set under the hood, w/o the user calling one of its +# setter methods (e.g. `.training()`). `validate()` is not one of these setter methods +# and thus should never set any properties, only validate and warn/error. +# - Any sub-configurations should be generated by calling a `.get_xyz_config()` method +# and thus be compiled on-the-fly to avoid duplicate information. class LearnerGroupConfig: """Configuration object for LearnerGroup.""" @@ -33,8 +40,9 @@ def __init__(self, cls: Type[LearnerGroup] = None) -> None: # `self.learner()` self.learner_class = None - self.optimizer_config = None - self.learner_hps = LearnerHPs() + # TODO (Kourosh): Change the optimizer config to a dataclass object. + self.optimizer_config = {"lr": 3e-4} + self.learner_hyperparameters = LearnerHyperparameters() # `self.resources()` self.num_gpus_per_learner_worker = 0 @@ -65,11 +73,6 @@ def validate(self) -> None: "the Learner class with .learner(learner_class=MyTrainerClass)." ) - if self.optimizer_config is None: - # get the default optimizer config if it's not provided - # TODO (Kourosh): Change the optimizer config to a dataclass object. - self.optimizer_config = {"lr": 1e-3} - def build(self) -> LearnerGroup: self.validate() @@ -80,14 +83,14 @@ def build(self) -> LearnerGroup: local_gpu_idx=self.local_gpu_idx, ) - framework_hps = FrameworkHPs(eager_tracing=self.eager_tracing) + framework_hps = FrameworkHyperparameters(eager_tracing=self.eager_tracing) learner_spec = LearnerSpec( learner_class=self.learner_class, module_spec=self.module_spec, optimizer_config=self.optimizer_config, - learner_scaling_config=scaling_config, - learner_hyperparameters=self.learner_hps, + learner_group_scaling_config=scaling_config, + learner_hyperparameters=self.learner_hyperparameters, framework_hyperparameters=framework_hps, ) @@ -113,6 +116,7 @@ def module( def resources( self, + *, num_learner_workers: Optional[int] = NotProvided, num_gpus_per_learner_worker: Optional[int] = NotProvided, num_cpus_per_learner_worker: Optional[Union[float, int]] = NotProvided, @@ -135,14 +139,14 @@ def learner( *, learner_class: Optional[Type["Learner"]] = NotProvided, optimizer_config: Optional[Dict] = NotProvided, - learner_hps: Optional[LearnerHPs] = NotProvided, + learner_hyperparameters: Optional[LearnerHyperparameters] = NotProvided, ) -> "LearnerGroupConfig": if learner_class is not NotProvided: self.learner_class = learner_class if optimizer_config is not NotProvided: - self.optimizer_config = optimizer_config - if learner_hps is not NotProvided: - self.learner_hps = learner_hps + self.optimizer_config.update(optimizer_config) + if learner_hyperparameters is not NotProvided: + self.learner_hyperparameters = learner_hyperparameters return self diff --git a/rllib/core/learner/tests/test_learner.py b/rllib/core/learner/tests/test_learner.py index 9d07610f16aa..2c7520f6fac7 100644 --- a/rllib/core/learner/tests/test_learner.py +++ b/rllib/core/learner/tests/test_learner.py @@ -6,7 +6,7 @@ import ray from ray.rllib.algorithms.appo.appo import APPOConfig -from ray.rllib.core.learner.learner import Learner, FrameworkHPs +from ray.rllib.core.learner.learner import Learner, FrameworkHyperparameters from ray.rllib.core.learner.scaling_config import LearnerGroupScalingConfig from ray.rllib.core.rl_module.rl_module import SingleAgentRLModuleSpec from ray.rllib.core.testing.tf.bc_module import DiscreteBCTFModule @@ -31,8 +31,8 @@ def get_learner(obs_space, action_space, learning_rate=1e-3) -> Learner: # made this a configurable hparam to avoid information leakage in tests where we # need to know what the learning rate is. optimizer_config={"lr": learning_rate}, - learner_scaling_config=LearnerGroupScalingConfig(), - framework_hyperparameters=FrameworkHPs(eager_tracing=True), + learner_group_scaling_config=LearnerGroupScalingConfig(), + framework_hyperparameters=FrameworkHyperparameters(eager_tracing=True), ) learner.build() @@ -87,8 +87,8 @@ def test_compute_gradients(self): # made this a configurable hparam to avoid information leakage in tests # where we need to know what the learning rate is. optimizer_config={"lr": 1e-3}, - learner_scaling_config=LearnerGroupScalingConfig(), - framework_hyperparameters=FrameworkHPs(eager_tracing=True), + learner_group_scaling_config=LearnerGroupScalingConfig(), + framework_hyperparameters=FrameworkHyperparameters(eager_tracing=True), ) learner.build() @@ -266,8 +266,8 @@ def test_save_load_state(self): model_config_dict={"fcnet_hiddens": [64]}, ), optimizer_config={"lr": 2e-3}, - learner_scaling_config=LearnerGroupScalingConfig(), - framework_hyperparameters=FrameworkHPs(eager_tracing=True), + learner_group_scaling_config=LearnerGroupScalingConfig(), + framework_hyperparameters=FrameworkHyperparameters(eager_tracing=True), ) learner1.build() @@ -282,8 +282,8 @@ def test_save_load_state(self): model_config_dict={"fcnet_hiddens": [32]}, ), optimizer_config={"lr": 1e-3}, - learner_scaling_config=LearnerGroupScalingConfig(), - framework_hyperparameters=FrameworkHPs(eager_tracing=True), + learner_group_scaling_config=LearnerGroupScalingConfig(), + framework_hyperparameters=FrameworkHyperparameters(eager_tracing=True), ) learner2.build() learner2.load_state(tmpdir) diff --git a/rllib/core/learner/tf/tf_learner.py b/rllib/core/learner/tf/tf_learner.py index d676d079cbc6..55a5eb05abaf 100644 --- a/rllib/core/learner/tf/tf_learner.py +++ b/rllib/core/learner/tf/tf_learner.py @@ -14,7 +14,7 @@ ) from ray.rllib.core.learner.learner import ( - FrameworkHPs, + FrameworkHyperparameters, Learner, ParamOptimizerPair, NamedParamOptimizerPairs, @@ -52,7 +52,7 @@ class TfLearner(Learner): def __init__( self, *, - framework_hyperparameters: Optional[FrameworkHPs] = FrameworkHPs(), + framework_hyperparameters: Optional[FrameworkHyperparameters] = None, **kwargs, ): @@ -66,12 +66,17 @@ def __init__( # enable_v2_behavior after variables have already been created. pass - super().__init__(framework_hyperparameters=framework_hyperparameters, **kwargs) + super().__init__( + framework_hyperparameters=( + framework_hyperparameters or FrameworkHyperparameters() + ), + **kwargs, + ) - self._enable_tf_function = framework_hyperparameters.eager_tracing + self._enable_tf_function = self._framework_hyperparameters.eager_tracing - # this is a placeholder which will be filled by - # `_make_distributed_strategy_if_necessary` + # This is a placeholder which will be filled by + # `_make_distributed_strategy_if_necessary`. self._strategy: tf.distribute.Strategy = None @override(Learner) @@ -435,7 +440,7 @@ def update( reduce_fn: Callable[[ResultDict], ResultDict] = ..., ) -> Mapping[str, Any]: # TODO (Kourosh): The update of learner is vastly differnet than the base - # class. So we need to unify them. + # class. So we need to unify them. missing_module_ids = set(batch.policy_batches.keys()) - set(self._module.keys()) if len(missing_module_ids) > 0: raise ValueError( @@ -452,7 +457,7 @@ def update( results = [] for minibatch in batch_iter(batch, minibatch_size, num_iters): # TODO (Avnish): converting to tf tensor and then from nested dict back to - # dict will most likely hit us in perf. But let's go with this for now. + # dict will most likely hit us in perf. But let's go with this for now. tensorbatch = self._convert_batch_type(minibatch) update_outs = self._update_fn(tensorbatch) loss = update_outs["loss"] @@ -474,8 +479,8 @@ def _do_update_fn(self, batch: MultiAgentBatch) -> Mapping[str, Any]: # TODO (Avnish): Match this base class's implementation. def helper(_batch): # TODO (Kourosh): We need to go back to NestedDict because that's the - # constraint on forward_train and compute_loss APIs. This seems to be - # in-efficient. Make it efficient. + # constraint on forward_train and compute_loss APIs. This seems to be + # in-efficient. Make it efficient. _batch = NestedDict(_batch) with tf.GradientTape() as tape: fwd_out = self._module.forward_train(_batch) diff --git a/rllib/core/learner/torch/torch_learner.py b/rllib/core/learner/torch/torch_learner.py index 081f49323774..c076b91123ba 100644 --- a/rllib/core/learner/torch/torch_learner.py +++ b/rllib/core/learner/torch/torch_learner.py @@ -17,7 +17,7 @@ from ray.rllib.core.rl_module.marl_module import MultiAgentRLModule from ray.rllib.core.rl_module.torch.torch_rl_module import TorchRLModule from ray.rllib.core.learner.learner import ( - FrameworkHPs, + FrameworkHyperparameters, Learner, ParamOptimizerPair, NamedParamOptimizerPairs, @@ -51,12 +51,17 @@ class TorchLearner(Learner): def __init__( self, *, - framework_hyperparameters: Optional[FrameworkHPs] = FrameworkHPs(), + framework_hyperparameters: Optional[FrameworkHyperparameters] = None, **kwargs, ): - super().__init__(**kwargs) + super().__init__( + framework_hyperparameters=( + framework_hyperparameters or FrameworkHyperparameters() + ), + **kwargs, + ) - # will be set during build + # Will be set during build. self._device = None @override(Learner) diff --git a/rllib/core/models/specs/specs_base.py b/rllib/core/models/specs/specs_base.py index 90b4c53baad3..b1f693891e92 100644 --- a/rllib/core/models/specs/specs_base.py +++ b/rllib/core/models/specs/specs_base.py @@ -145,7 +145,9 @@ def get_shape(self, tensor: TensorType) -> Tuple[int]: """ if self._framework == "tf2": # tf2 returns `Dimension` objects instead of `int` objects. - return tuple(int(i) for i in tensor.shape) + return tuple( + int(i) if i is not None else None for i in tensor.shape.as_list() + ) return tuple(tensor.shape) @OverrideToImplementCustomLogic diff --git a/rllib/core/testing/utils.py b/rllib/core/testing/utils.py index 889eb3f33141..a377c6b429a3 100644 --- a/rllib/core/testing/utils.py +++ b/rllib/core/testing/utils.py @@ -4,7 +4,7 @@ from ray.rllib.utils.annotations import DeveloperAPI from ray.rllib.core.learner.learner_group import LearnerGroup -from ray.rllib.core.learner.learner import LearnerSpec, FrameworkHPs +from ray.rllib.core.learner.learner import LearnerSpec, FrameworkHyperparameters from ray.rllib.core.learner.scaling_config import LearnerGroupScalingConfig from ray.rllib.core.rl_module.marl_module import ( @@ -144,17 +144,17 @@ def get_learner_group( """ if framework == "tf": - learner_hps = FrameworkHPs(eager_tracing=eager_tracing) + framework_hps = FrameworkHyperparameters(eager_tracing=eager_tracing) else: - learner_hps = None + framework_hps = None learner_spec = LearnerSpec( learner_class=get_learner_class(framework), module_spec=get_module_spec( framework=framework, env=env, is_multi_agent=is_multi_agent ), optimizer_config={"lr": learning_rate}, - learner_scaling_config=scaling_config, - learner_hyperparameters=learner_hps, + learner_group_scaling_config=scaling_config, + framework_hyperparameters=framework_hps, ) lg = LearnerGroup(learner_spec) diff --git a/rllib/tests/run_regression_tests.py b/rllib/tests/run_regression_tests.py index f9809bebb3ba..70827e279f0a 100644 --- a/rllib/tests/run_regression_tests.py +++ b/rllib/tests/run_regression_tests.py @@ -114,7 +114,9 @@ # for overriding the episode reward mean for tf2 tests for off policy # long learning tests such as sac and ddpg on the pendulum environment. if args.override_mean_reward != 0.0: - exp["stop"]["episode_reward_mean"] = args.override_mean_reward + exp["stop"][ + "sampler_results/episode_reward_mean" + ] = args.override_mean_reward # QMIX does not support tf yet -> skip. if exp["run"] == "QMIX" and args.framework != "torch": @@ -158,9 +160,17 @@ # we evaluate against an actual environment. check_eval = exp["config"].get("evaluation_interval", None) is not None reward_mean = ( - t.last_result["evaluation"]["episode_reward_mean"] + t.last_result["evaluation"]["sampler_results"][ + "episode_reward_mean" + ] if check_eval - else t.last_result["episode_reward_mean"] + else ( + # Some algos don't store sampler results under `sampler_results` + # e.g. ARS. Need to keep this logic around for now. + t.last_result["sampler_results"]["episode_reward_mean"] + if "sampler_results" in t.last_result + else t.last_result["episode_reward_mean"] + ) ) # If we are using evaluation workers, we may have @@ -168,12 +178,14 @@ # not, use `episode_reward_mean`. if check_eval: min_reward = t.stopping_criterion.get( - "evaluation/episode_reward_mean", - t.stopping_criterion.get("episode_reward_mean"), + "evaluation/sampler_results/episode_reward_mean", + t.stopping_criterion.get("sampler_results/episode_reward_mean"), ) # Otherwise, expect `episode_reward_mean` to be set. else: - min_reward = t.stopping_criterion.get("episode_reward_mean") + min_reward = t.stopping_criterion.get( + "sampler_results/episode_reward_mean" + ) # If min reward not defined, always pass. if min_reward is None or reward_mean >= min_reward: diff --git a/rllib/utils/framework.py b/rllib/utils/framework.py index a94a71151a96..5e6f138f13db 100644 --- a/rllib/utils/framework.py +++ b/rllib/utils/framework.py @@ -290,6 +290,8 @@ def get_variable( ) elif framework == "torch" and torch_tensor is True: torch, _ = try_import_torch() + if not isinstance(value, np.ndarray): + value = np.array(value) var_ = torch.from_numpy(value) if dtype in [torch.float32, np.float32]: var_ = var_.float() From 74d3e88991445fc2a4a911d88f406a1914ee32b1 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Mon, 1 May 2023 10:21:47 -0700 Subject: [PATCH 164/424] [CI] Only check once on need_wheels. (#34902) Do not run the same command many times, and also make it private. Signed-off-by: Lonnie Liu --- ci/ci.sh | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/ci/ci.sh b/ci/ci.sh index 3f1d5f08828f..5bf5ae3ec4d3 100755 --- a/ci/ci.sh +++ b/ci/ci.sh @@ -77,16 +77,18 @@ reload_env() { fi } -need_wheels() { - local error_code=1 +_need_wheels() { + local result="false" case "${OSTYPE}" in - linux*) if [ "${LINUX_WHEELS-}" = 1 ]; then error_code=0; fi;; - darwin*) if [ "${MAC_WHEELS-}" = 1 ]; then error_code=0; fi;; - msys*) if [ "${WINDOWS_WHEELS-}" = 1 ]; then error_code=0; fi;; + linux*) if [[ "${LINUX_WHEELS-}" == "1" ]]; then result="true"; fi;; + darwin*) if [[ "${MAC_WHEELS-}" == "1" ]]; then result="true"; fi;; + msys*) if [[ "${WINDOWS_WHEELS-}" == "1" ]]; then result="true"; fi;; esac - return "${error_code}" + echo "${result}" } +NEED_WHEELS="$(_need_wheels)" + upload_wheels() { local branch="" commit commit="$(git rev-parse --verify HEAD)" @@ -258,13 +260,14 @@ test_cpp() { } test_wheels() { - local result=0 flush_logs=0 + local result=0 + local flush_logs=0 - if need_wheels; then + if [[ "${NEED_WHEELS}" == "true" ]]; then "${WORKSPACE_DIR}"/ci/build/test-wheels.sh || { result=$? && flush_logs=1; } fi - if [ 0 -ne "${flush_logs}" ]; then + if [[ 0 -ne "${flush_logs}" ]]; then cat -- /tmp/ray/session_latest/logs/* || true sleep 60 # Explicitly sleep 60 seconds for logs to go through fi @@ -746,7 +749,7 @@ build() { _bazel_build_protobuf fi - if ! need_wheels; then + if [[ "${NEED_WHEELS}" != "true" ]]; then install_ray if [ "${LINT-}" = 1 ]; then # Try generating Sphinx documentation. To do this, we need to install Ray first. @@ -762,7 +765,7 @@ build() { install_go fi - if need_wheels; then + if [[ "${NEED_WHEELS}" == "true" ]]; then build_wheels fi } From c492d6a159b03c5b2972a49ed4aaaeb37a81faf4 Mon Sep 17 00:00:00 2001 From: Chen Shen Date: Mon, 1 May 2023 10:29:06 -0700 Subject: [PATCH 165/424] [Core/data] use wait based prefetcher by default (#34871) Turn on the wait based prefetcher to address the issues around the actor based prefetcher. - [x]: benchmark the before/after performance. --- python/ray/data/context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/ray/data/context.py b/python/ray/data/context.py index fbbad658985b..eaa0a61b401e 100644 --- a/python/ray/data/context.py +++ b/python/ray/data/context.py @@ -55,7 +55,7 @@ DEFAULT_MIN_PARALLELISM = 200 # Wether to use actor based block prefetcher. -DEFAULT_ACTOR_PREFETCHER_ENABLED = True +DEFAULT_ACTOR_PREFETCHER_ENABLED = False # Whether to use push-based shuffle by default. DEFAULT_USE_PUSH_BASED_SHUFFLE = bool( From d2485641410d00363efacce97d4a43c62a0208fe Mon Sep 17 00:00:00 2001 From: Yi Cheng <74173148+iycheng@users.noreply.github.com> Date: Mon, 1 May 2023 11:31:02 -0700 Subject: [PATCH 166/424] [test] Make test object store more accurate. (#34885) The test include ray.put and also actor start time. These should be excluded from the testing to make it more accurate. --- .../object_store/test_object_store.py | 21 +++++++++++-------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/release/benchmarks/object_store/test_object_store.py b/release/benchmarks/object_store/test_object_store.py index 2403be078a07..ef8fb5534080 100644 --- a/release/benchmarks/object_store/test_object_store.py +++ b/release/benchmarks/object_store/test_object_store.py @@ -28,8 +28,8 @@ class Actor: def foo(self): pass - def sum(self, arr): - return np.sum(arr) + def data_len(self, arr): + return len(arr) actors = [Actor.remote() for _ in range(NUM_NODES)] @@ -39,25 +39,28 @@ def sum(self, arr): for actor in tqdm(actors, desc="Ensure all actors have started."): ray.get(actor.foo.remote()) + start = perf_counter() result_refs = [] for actor in tqdm(actors, desc="Broadcasting objects"): - result_refs.append(actor.sum.remote(ref)) + result_refs.append(actor.data_len.remote(ref)) results = ray.get(result_refs) + end = perf_counter() + for result in results: assert result == OBJECT_SIZE + return end - start + ray.init(address="auto") -start = perf_counter() -test_object_broadcast() -end = perf_counter() -print(f"Broadcast time: {end - start} ({OBJECT_SIZE} B x {NUM_NODES} nodes)") +duration = test_object_broadcast() +print(f"Broadcast time: {duration} ({OBJECT_SIZE} B x {NUM_NODES} nodes)") if "TEST_OUTPUT_JSON" in os.environ: out_file = open(os.environ["TEST_OUTPUT_JSON"], "w") results = { - "broadcast_time": end - start, + "broadcast_time": duration, "object_size": OBJECT_SIZE, "num_nodes": NUM_NODES, "success": "1", @@ -66,7 +69,7 @@ def sum(self, arr): results["perf_metrics"] = [ { "perf_metric_name": perf_metric_name, - "perf_metric_value": end - start, + "perf_metric_value": duration, "perf_metric_type": "LATENCY", } ] From 01ecd7bc43ae250ee3c06d8c851280a532527892 Mon Sep 17 00:00:00 2001 From: Yunxuan Xiao Date: Mon, 1 May 2023 11:45:34 -0700 Subject: [PATCH 167/424] [Doc] Improve LightningTrainer advanced example (#34429) --- .../lightning/lightning_cola_advanced.ipynb | 1183 ++++++++++++++++- .../lightning/lightning_mnist_example.ipynb | 8 +- 2 files changed, 1121 insertions(+), 70 deletions(-) diff --git a/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb b/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb index a5af0f3e4f1c..a78b1b6703f9 100644 --- a/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb +++ b/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb @@ -11,7 +11,7 @@ "\n", ":::{note}\n", "\n", - "This is an advanced example for {class}`LightningTrainer `, which demonstrates how to use LightningTrainer with `Datastream` and `Batch Predictor`. \n", + "This is an advanced example for {class}`LightningTrainer `, which demonstrates how to use LightningTrainer with {ref}`Datastream ` and {ref}`Batch Predictor `. \n", "\n", "If you just want to quickly convert your existing PyTorch Lightning scripts into Ray AIR, you can refer to this starter example:\n", "{ref}`Train a Pytorch Lightning Image Classifier `.\n", @@ -29,7 +29,7 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 5, "metadata": { "tags": [ "remove-cell" @@ -40,9 +40,32 @@ "SMOKE_TEST = True" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Run the following line in order to install all the necessary dependencies:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "!pip install numpy datasets \"transformers>=4.19.1\" \"pytorch_lightning>=1.6.5\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's start by importing the needed libraries:" + ] + }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -61,36 +84,20 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 1. Pre-process CoLA Datastream\n", + "## Pre-process CoLA Datastream\n", "\n", "CoLA is a binary sentence classification task with 10.6K training examples. First, we download the dataset and metrics using the HuggingFace API, and create Ray Data for each split accordingly." ] }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Reusing dataset glue (/home/ray/.cache/huggingface/datasets/glue/cola/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad)\n", - "100%|██████████| 3/3 [00:00<00:00, 948.44it/s]\n" - ] - } - ], - "source": [ - "dataset = load_dataset(\"glue\", \"cola\")\n", - "metric = load_metric(\"glue\", \"cola\")" - ] - }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ + "dataset = load_dataset(\"glue\", \"cola\")\n", + "metric = load_metric(\"glue\", \"cola\")\n", + "\n", "ray_datasets = ray.data.from_huggingface(dataset)" ] }, @@ -104,7 +111,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -136,14 +143,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 2. Define a PyTorch Lightning Model\n", + "## Define a PyTorch Lightning Model\n", "\n", "You don't have to make any change of your `LightningModule` definition. Just copy and paste your code here:" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ @@ -202,16 +209,17 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 3. Finetune the model with LightningTrainer\n", + "## Configure your LightningTrainer\n", "\n", "Define a LightningTrainer with necessary configurations, including hyper-parameters, checkpointing and compute resources settings. \n", "\n", - "You may find the API of {class}`LightningConfigBuilder ` useful.\n" + "You may find the API of {class}`LightningConfigBuilder ` and the discussion {ref}`here ` useful.\n", + "\n" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 12, "metadata": {}, "outputs": [], "source": [ @@ -225,8 +233,31 @@ " .trainer(max_epochs=5, accelerator=\"gpu\")\n", " .checkpointing(save_on_train_epoch_end=False)\n", " .build()\n", - ")\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{note}\n", + "Note that the `lightning_config` is created on the head node and will be passed to the worker nodes later. Be aware that the environment variables and hardware settings may differ between the head node and worker nodes.\n", + ":::\n", "\n", + ":::{note}\n", + "{meth}`LightningConfigBuilder.checkpointing() ` creates a [ModelCheckpoint](https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.ModelCheckpoint.html#lightning.pytorch.callbacks.ModelCheckpoint) callback. This callback defines the checkpoint frequency and saves checkpoint files in Lightning style. \n", + "\n", + "If you want to save AIR checkpoints for Batch Prediction, please also provide an AIR {class}`CheckpointConfig `.\n", + ":::" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ "# Save AIR checkpoints according to the performance on validation set\n", "run_config = RunConfig(\n", " name=\"ptl-sent-classification\",\n", @@ -237,7 +268,7 @@ " ),\n", ")\n", "\n", - "# Scale the training workload across 4 GPUs\n", + "# Scale the DDP training workload across 4 GPUs\n", "# You can change this config based on your compute resources.\n", "scaling_config = ScalingConfig(\n", " num_workers=4, use_gpu=True, resources_per_worker={\"CPU\": 1, \"GPU\": 1}\n", @@ -246,7 +277,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 14, "metadata": { "tags": [ "remove-cell" @@ -272,13 +303,15 @@ "cell_type": "markdown", "metadata": {}, "source": [ + "## Fine-tune the model with LightningTrainer\n", + "\n", "Train the model with the configuration we specified above. \n", "\n", "To feed data into LightningTrainer, we need to configure the following arguments:\n", "\n", - "- datasets: A dictionary of the input Ray datasets, with special keys \"train\" and \"val\".\n", - "- datasets_iter_config: The argument list of {meth}`iter_torch_batches() `. It defines the way we iterate dataset shards for each worker.\n", - "- preprocessor: The preprocessor that will be applied to the input dataset.\n", + "- `datasets`: A dictionary of the input Ray datasets, with special keys \"train\" and \"val\".\n", + "- `datasets_iter_config`: The argument list of {meth}`iter_torch_batches() `. It defines the way we iterate dataset shards for each worker.\n", + "- `preprocessor`: The preprocessor that will be applied to the input dataset.\n", "\n", ":::{note}\n", "Note that we are using Datastream for data ingestion for faster preprocessing here, but you can also continue to use the native `PyTorch DataLoader` or `LightningDataModule`. See {ref}`this example `. \n", @@ -291,9 +324,1002 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "
    \n", + "
    \n", + "
    \n", + "

    Tune Status

    \n", + "
    \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
    Current time:2023-04-24 10:42:50
    Running for: 00:06:26.94
    Memory: 23.8/186.6 GiB
    \n", + "
    \n", + "
    \n", + "
    \n", + "

    System Info

    \n", + " Using FIFO scheduling algorithm.
    Logical resource usage: 0/48 CPUs, 0/4 GPUs (0.0/1.0 accelerator_type:T4)\n", + "
    \n", + " \n", + "
    \n", + "
    \n", + "
    \n", + "

    Trial Status

    \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
    Trial name status loc iter total time (s) train_loss matthews_correlation epoch
    LightningTrainer_87ecf_00000TERMINATED10.0.60.127:67819 5 376.028 0.0119807 0.589931 4
    \n", + "
    \n", + "
    \n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "(pid=67819) /home/ray/anaconda3/lib/python3.9/site-packages/xgboost/compat.py:31: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", + "(pid=67819) from pandas import MultiIndex, Int64Index\n", + "(LightningTrainer pid=67819) 2023-04-24 10:36:31,679\tINFO backend_executor.py:128 -- Starting distributed worker processes: ['68396 (10.0.60.127)', '68397 (10.0.60.127)', '68398 (10.0.60.127)', '68399 (10.0.60.127)']\n", + "(RayTrainWorker pid=68396) 2023-04-24 10:36:32,731\tINFO config.py:86 -- Setting up process group for: env:// [rank=0, world_size=4]\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "f9443dd2a6dc49029ef7fb4d7a596729", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "(pid=67819) - RandomizeBlockOrder 1: 0%| | 0/1 [00:00 TaskPoolMapOperator[BatchMapper] -> AllToAllOperator[RandomizeBlockOrder]\n", + "(LightningTrainer pid=67819) 2023-04-24 10:36:34,052\tINFO streaming_executor.py:88 -- Execution config: ExecutionOptions(resource_limits=ExecutionResources(cpu=None, gpu=None, object_store_memory=None), locality_with_output=False, preserve_order=False, actor_locality_enabled=True, verbose_progress=False)\n", + "(LightningTrainer pid=67819) 2023-04-24 10:36:34,053\tINFO streaming_executor.py:90 -- Tip: To enable per-operator progress reporting, set RAY_DATA_VERBOSE_PROGRESS=1.\n", + "(RayTrainWorker pid=68396) /home/ray/anaconda3/lib/python3.9/site-packages/xgboost/compat.py:31: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", + "(RayTrainWorker pid=68396) from pandas import MultiIndex, Int64Index\n", + "Downloading: 0%| | 0.00/416M [00:00 TaskPoolMapOperator[BatchMapper] -> AllToAllOperator[RandomizeBlockOrder]\n", + "(RayTrainWorker pid=68398) 2023-04-24 10:36:59,629\tINFO streaming_executor.py:88 -- Execution config: ExecutionOptions(resource_limits=ExecutionResources(cpu=None, gpu=None, object_store_memory=None), locality_with_output=False, preserve_order=False, actor_locality_enabled=True, verbose_progress=False)\n", + "(RayTrainWorker pid=68398) 2023-04-24 10:36:59,629\tINFO streaming_executor.py:90 -- Tip: To enable per-operator progress reporting, set RAY_DATA_VERBOSE_PROGRESS=1.\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "70151d1b6133418fb5bf5e39b0089dd6", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "(pid=68398) - RandomizeBlockOrder 1: 0%| | 0/1 [00:00 TaskPoolMapOperator[BatchMapper] -> AllToAllOperator[RandomizeBlockOrder] [repeated 3x across cluster]\n", + "(RayTrainWorker pid=68399) 2023-04-24 10:36:59,628\tINFO streaming_executor.py:88 -- Execution config: ExecutionOptions(resource_limits=ExecutionResources(cpu=None, gpu=None, object_store_memory=None), locality_with_output=False, preserve_order=False, actor_locality_enabled=True, verbose_progress=False) [repeated 3x across cluster]\n", + "(RayTrainWorker pid=68399) 2023-04-24 10:36:59,629\tINFO streaming_executor.py:90 -- Tip: To enable per-operator progress reporting, set RAY_DATA_VERBOSE_PROGRESS=1. [repeated 3x across cluster]\n", + "(RayTrainWorker pid=68398) [W reducer.cpp:1298] Warning: find_unused_parameters=True was specified in DDP constructor, but did not find any unused parameters in the forward pass. This flag results in an extra traversal of the autograd graph every iteration, which can adversely affect performance. If your model indeed never has any unused parameters in the forward pass, consider turning this flag off. Note that this warning may be a false positive if your model has flow control causing later iterations to have unused parameters. (function operator())\n", + "(RayTrainWorker pid=68396) 2023-04-24 10:37:27.091660: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX512F AVX512_VNNI FMA\n", + "(RayTrainWorker pid=68396) To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", + "(RayTrainWorker pid=68399) [W reducer.cpp:1298] Warning: find_unused_parameters=True was specified in DDP constructor, but did not find any unused parameters in the forward pass. This flag results in an extra traversal of the autograd graph every iteration, which can adversely affect performance. If your model indeed never has any unused parameters in the forward pass, consider turning this flag off. Note that this warning may be a false positive if your model has flow control causing later iterations to have unused parameters. (function operator()) [repeated 3x across cluster]\n", + "(RayTrainWorker pid=68396) 2023-04-24 10:37:27.373013: I tensorflow/core/util/port.cc:104] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", + "(RayTrainWorker pid=68396) 2023-04-24 10:37:28.763569: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/local/nvidia/lib:/usr/local/nvidia/lib64\n", + "(RayTrainWorker pid=68396) 2023-04-24 10:37:28.763761: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/local/nvidia/lib:/usr/local/nvidia/lib64\n", + "(RayTrainWorker pid=68396) 2023-04-24 10:37:28.763770: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n", + "(RayTrainWorker pid=68398) 2023-04-24 10:38:01,220\tINFO streaming_executor.py:87 -- Executing DAG InputDataBuffer[Input] -> TaskPoolMapOperator[BatchMapper] -> AllToAllOperator[RandomizeBlockOrder]\n", + "(RayTrainWorker pid=68398) 2023-04-24 10:38:01,221\tINFO streaming_executor.py:88 -- Execution config: ExecutionOptions(resource_limits=ExecutionResources(cpu=None, gpu=None, object_store_memory=None), locality_with_output=False, preserve_order=False, actor_locality_enabled=True, verbose_progress=False)\n", + "(RayTrainWorker pid=68398) 2023-04-24 10:38:01,221\tINFO streaming_executor.py:90 -- Tip: To enable per-operator progress reporting, set RAY_DATA_VERBOSE_PROGRESS=1.\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "50090e60317342e8a2fa5747b2dfc7dd", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "(pid=68398) - RandomizeBlockOrder 1: 0%| | 0/1 [00:00\n", + "

    Trial Progress

    \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
    Trial name _report_on date done epoch experiment_taghostname iterations_since_restore matthews_correlationnode_ip pidshould_checkpoint step time_since_restore time_this_iter_s time_total_s timestamp train_loss training_iterationtrial_id
    LightningTrainer_87ecf_00000validation_end2023-04-24_10-42-46True 4 0ip-10-0-60-127 5 0.58993110.0.60.12767819True 670 376.028 70.6609 376.028 1682358165 0.0119807 587ecf_00000
    \n", + "
    \n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "(RayTrainWorker pid=68398) 2023-04-24 10:39:03,705\tINFO streaming_executor.py:87 -- Executing DAG InputDataBuffer[Input] -> TaskPoolMapOperator[BatchMapper] -> AllToAllOperator[RandomizeBlockOrder] [repeated 4x across cluster]\n", + "(RayTrainWorker pid=68398) 2023-04-24 10:39:03,706\tINFO streaming_executor.py:88 -- Execution config: ExecutionOptions(resource_limits=ExecutionResources(cpu=None, gpu=None, object_store_memory=None), locality_with_output=False, preserve_order=False, actor_locality_enabled=True, verbose_progress=False) [repeated 4x across cluster]\n", + "(RayTrainWorker pid=68398) 2023-04-24 10:39:03,706\tINFO streaming_executor.py:90 -- Tip: To enable per-operator progress reporting, set RAY_DATA_VERBOSE_PROGRESS=1. [repeated 4x across cluster]\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "125ccea4d26e48c0bf4e45610f9ae64a", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "(pid=68398) - RandomizeBlockOrder 1: 0%| | 0/1 [00:00 TaskPoolMapOperator[BatchMapper] -> AllToAllOperator[RandomizeBlockOrder] [repeated 4x across cluster]\n", + "(RayTrainWorker pid=68398) 2023-04-24 10:40:09,873\tINFO streaming_executor.py:88 -- Execution config: ExecutionOptions(resource_limits=ExecutionResources(cpu=None, gpu=None, object_store_memory=None), locality_with_output=False, preserve_order=False, actor_locality_enabled=True, verbose_progress=False) [repeated 4x across cluster]\n", + "(RayTrainWorker pid=68398) 2023-04-24 10:40:09,873\tINFO streaming_executor.py:90 -- Tip: To enable per-operator progress reporting, set RAY_DATA_VERBOSE_PROGRESS=1. [repeated 4x across cluster]\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "db4c22b67b844a6d8ff3e1882540bce4", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "(pid=68398) - RandomizeBlockOrder 1: 0%| | 0/1 [00:00 TaskPoolMapOperator[BatchMapper] -> AllToAllOperator[RandomizeBlockOrder] [repeated 4x across cluster]\n", + "(RayTrainWorker pid=68398) 2023-04-24 10:41:18,552\tINFO streaming_executor.py:88 -- Execution config: ExecutionOptions(resource_limits=ExecutionResources(cpu=None, gpu=None, object_store_memory=None), locality_with_output=False, preserve_order=False, actor_locality_enabled=True, verbose_progress=False) [repeated 4x across cluster]\n", + "(RayTrainWorker pid=68398) 2023-04-24 10:41:18,552\tINFO streaming_executor.py:90 -- Tip: To enable per-operator progress reporting, set RAY_DATA_VERBOSE_PROGRESS=1. [repeated 4x across cluster]\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "ccc3d13c44b344e8891a81794fd17ffe", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "(pid=68398) - RandomizeBlockOrder 1: 0%| | 0/1 [00:00 TaskPoolMapOperator[BatchMapper] -> AllToAllOperator[RandomizeBlockOrder] [repeated 4x across cluster]\n", + "(RayTrainWorker pid=68398) 2023-04-24 10:42:29,325\tINFO streaming_executor.py:88 -- Execution config: ExecutionOptions(resource_limits=ExecutionResources(cpu=None, gpu=None, object_store_memory=None), locality_with_output=False, preserve_order=False, actor_locality_enabled=True, verbose_progress=False) [repeated 4x across cluster]\n", + "(RayTrainWorker pid=68398) 2023-04-24 10:42:29,325\tINFO streaming_executor.py:90 -- Tip: To enable per-operator progress reporting, set RAY_DATA_VERBOSE_PROGRESS=1. [repeated 4x across cluster]\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "55f6f7e8333341d1b57a890809bc90ad", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "(pid=68398) - RandomizeBlockOrder 1: 0%| | 0/1 [00:00`. \n", + "\n", + ":::" + ] + }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 16, "metadata": { "tags": [] }, @@ -317,13 +1354,13 @@ "data": { "text/plain": [ "Result(\n", - " metrics={'_report_on': 'validation_end', 'train_loss': 0.05989973247051239, 'matthews_correlation': 0.5175218541439164, 'epoch': 4, 'step': 670, 'should_checkpoint': True, 'done': True, 'trial_id': '5ae4c_00000', 'experiment_tag': '0'},\n", - " path='/home/ray/ray_results/ptl-sent-classification/LightningTrainer_5ae4c_00000_0_2023-04-05_12-45-05',\n", - " checkpoint=LightningCheckpoint(local_path=/home/ray/ray_results/ptl-sent-classification/LightningTrainer_5ae4c_00000_0_2023-04-05_12-45-05/checkpoint_000004)\n", + " metrics={'_report_on': 'validation_end', 'train_loss': 0.011980690062046051, 'matthews_correlation': 0.5899314497879129, 'epoch': 4, 'step': 670, 'should_checkpoint': True, 'done': True, 'trial_id': '87ecf_00000', 'experiment_tag': '0'},\n", + " path='/home/ray/ray_results/ptl-sent-classification/LightningTrainer_87ecf_00000_0_2023-04-24_10-36-23',\n", + " checkpoint=LightningCheckpoint(local_path=/home/ray/ray_results/ptl-sent-classification/LightningTrainer_87ecf_00000_0_2023-04-24_10-36-23/checkpoint_000004)\n", ")" ] }, - "execution_count": 11, + "execution_count": 16, "metadata": {}, "output_type": "execute_result" } @@ -337,7 +1374,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 4. Do Batch Inference with a Saved Checkpoint" + "## Do Batch Inference with a Saved Checkpoint" ] }, { @@ -352,7 +1389,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 17, "metadata": { "tags": [] }, @@ -365,7 +1402,8 @@ "checkpoint = result.checkpoint\n", "\n", "# You can also load a checkpoint from disk:\n", - "# checkpoint = LightningCheckpoint.from_directory(\"YOUR_CHECKPOINT_DIR\")\n", + "# YOUR_CHECKPOINT_DIR = result.checkpoint.path\n", + "# checkpoint = LightningCheckpoint.from_directory(YOUR_CHECKPOINT_DIR)\n", "\n", "batch_predictor = BatchPredictor(\n", " checkpoint=checkpoint,\n", @@ -373,17 +1411,8 @@ " use_gpu=True,\n", " model_class=SentimentModel,\n", " preprocessor=preprocessor,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ + ")\n", + "\n", "# Use 2 GPUs for batch inference\n", "predictions = batch_predictor.predict(\n", " ray_datasets[\"validation\"],\n", @@ -406,32 +1435,52 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": null, "metadata": { "tags": [] }, + "outputs": [], + "source": [ + "# Internally, BatchPredictor calls forward() method of the LightningModule.\n", + "# Convert the logits tensor into labels with argmax.\n", + "def argmax(batch):\n", + " batch[\"predictions\"] = batch[\"predictions\"].apply(lambda x: np.argmax(x))\n", + " return batch\n", + "\n", + "\n", + "results = predictions.map_batches(argmax).to_pandas()" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "{'matthews_correlation': 0.5175218541439164}\n" + " predictions label\n", + "0 1 1\n", + "1 1 1\n", + "2 0 1\n", + "3 1 1\n", + "4 0 0\n", + "5 1 0\n", + "6 1 0\n", + "7 1 1\n", + "8 1 1\n", + "9 1 1\n", + "\n", + "{'matthews_correlation': 0.5899314497879129}\n" ] } ], "source": [ - "# Internally, BatchPredictor calls forward() method of the LightningModule.\n", - "# Convert the logits tensor into labels with argmax.\n", - "def argmax(batch):\n", - " batch[\"predictions\"] = batch[\"predictions\"].apply(lambda x: np.argmax(x))\n", - " return batch\n", - "\n", - "\n", - "results = predictions.map_batches(argmax).to_pandas()\n", - "\n", "matthews_corr = metric.compute(\n", " predictions=results[\"predictions\"], references=results[\"label\"]\n", ")\n", + "print(results.head(10))\n", "print(matthews_corr)" ] } diff --git a/doc/source/train/examples/lightning/lightning_mnist_example.ipynb b/doc/source/train/examples/lightning/lightning_mnist_example.ipynb index dc41c625d122..43add5d567d2 100644 --- a/doc/source/train/examples/lightning/lightning_mnist_example.ipynb +++ b/doc/source/train/examples/lightning/lightning_mnist_example.ipynb @@ -177,6 +177,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ + "(lightning-config-builder-intro)=\n", + "\n", "## Define the Cofigurations for AIR LightningTrainer\n", "\n", "The {meth}`LightningConfigBuilder ` class stores all the parameters involved in training a PyTorch Lightning module. It takes the same parameter lists as those in PyTorch Lightning.\n", @@ -646,9 +648,9 @@ "metadata": {}, "source": [ "## What's next?\n", - "- Use Ray Data for more efficient data preprocessing.\n", - "- Use {class}`BatchPredictor ` for large-scale distributed inference.\n", - "- Find the best hyperparameter settings with Ray Tune." + "\n", + "- {ref}`Use LightningTrainer with Ray Data and Batch Predictor `\n", + "- {ref}`Hyperparameter searching with LightningTrainer + Ray Tune. `" ] } ], From fbf81a9356a05ed1f36f900c4eed2b8e7851154e Mon Sep 17 00:00:00 2001 From: Yunxuan Xiao Date: Mon, 1 May 2023 11:45:43 -0700 Subject: [PATCH 168/424] [Doc] Update LightningTrainer MNIST example. (#34867) --- .../lightning/lightning_mnist_example.ipynb | 305 ++++++++++++------ 1 file changed, 198 insertions(+), 107 deletions(-) diff --git a/doc/source/train/examples/lightning/lightning_mnist_example.ipynb b/doc/source/train/examples/lightning/lightning_mnist_example.ipynb index 43add5d567d2..8fa7372cbbd7 100644 --- a/doc/source/train/examples/lightning/lightning_mnist_example.ipynb +++ b/doc/source/train/examples/lightning/lightning_mnist_example.ipynb @@ -14,20 +14,16 @@ }, { "cell_type": "code", - "execution_count": 49, - "metadata": { - "tags": [ - "remove-cell" - ] - }, + "execution_count": null, + "metadata": {}, "outputs": [], "source": [ - "SMOKE_TEST = True" + "!pip install \"torchmetrics>=0.9\" \"pytorch_lightning>=1.6\" " ] }, { "cell_type": "code", - "execution_count": 50, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -60,7 +56,7 @@ }, { "cell_type": "code", - "execution_count": 51, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -108,7 +104,7 @@ }, { "cell_type": "code", - "execution_count": 52, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -123,7 +119,9 @@ " nn.ReLU(),\n", " )\n", " self.lr = lr\n", - " self.accuracy = Accuracy()\n", + " self.accuracy = Accuracy(task=\"multiclass\", num_classes=10)\n", + " self.eval_loss = []\n", + " self.eval_accuracy = []\n", "\n", " def forward(self, x):\n", " x = x.view(-1, 28 * 28)\n", @@ -140,6 +138,8 @@ " def validation_step(self, val_batch, batch_idx):\n", " loss, acc = self._shared_eval(val_batch)\n", " self.log(\"val_accuracy\", acc)\n", + " self.eval_loss.append(loss)\n", + " self.eval_accuracy.append(acc)\n", " return {\"val_loss\": loss, \"val_accuracy\": acc}\n", "\n", " def test_step(self, test_batch, batch_idx):\n", @@ -154,11 +154,13 @@ " acc = self.accuracy(logits, y)\n", " return loss, acc\n", "\n", - " def validation_epoch_end(self, outputs):\n", - " avg_loss = torch.stack([x[\"val_loss\"] for x in outputs]).mean()\n", - " avg_acc = torch.stack([x[\"val_accuracy\"] for x in outputs]).mean()\n", + " def on_validation_epoch_end(self):\n", + " avg_loss = torch.stack(self.eval_loss).mean()\n", + " avg_acc = torch.stack(self.eval_accuracy).mean()\n", " self.log(\"val_loss\", avg_loss, sync_dist=True)\n", " self.log(\"val_accuracy\", avg_acc, sync_dist=True)\n", + " self.eval_loss.clear()\n", + " self.eval_accuracy.clear()\n", "\n", " def configure_optimizers(self):\n", " optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\n", @@ -194,7 +196,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -207,7 +209,7 @@ ")\n", "\n", "\n", - "def build_lightning_config_from_existing_code():\n", + "def build_lightning_config_from_existing_code(use_gpu):\n", " # Create a config builder to encapsulate all required parameters.\n", " # Note that model instantiation and fitting will occur later in the LightingTrainer,\n", " # rather than in the config builder.\n", @@ -234,7 +236,7 @@ " # )\n", " config_builder.trainer(\n", " max_epochs=10,\n", - " accelerator=\"cpu\",\n", + " accelerator=\"gpu\" if use_gpu else \"cpu\",\n", " log_every_n_steps=100,\n", " logger=CSVLogger(\"logs\"),\n", " )\n", @@ -261,22 +263,23 @@ }, { "cell_type": "code", - "execution_count": 53, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "lightning_config = build_lightning_config_from_existing_code()" + "# Set it to False if you want to run without GPUs\n", + "use_gpu = True" ] }, { "cell_type": "code", - "execution_count": 54, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ - "scaling_config = ScalingConfig(\n", - " num_workers=4, use_gpu=True, resources_per_worker={\"CPU\": 1, \"GPU\": 1}\n", - ")\n", + "lightning_config = build_lightning_config_from_existing_code(use_gpu=use_gpu)\n", + "\n", + "scaling_config = ScalingConfig(num_workers=4, use_gpu=use_gpu)\n", "\n", "run_config = RunConfig(\n", " name=\"ptl-mnist-example\",\n", @@ -286,31 +289,8 @@ " checkpoint_score_attribute=\"val_accuracy\",\n", " checkpoint_score_order=\"max\",\n", " ),\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 55, - "metadata": { - "tags": [ - "remove-cell" - ] - }, - "outputs": [], - "source": [ - "if SMOKE_TEST:\n", - " scaling_config = ScalingConfig(\n", - " num_workers=4, use_gpu=False, resources_per_worker={\"CPU\": 1}\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": 56, - "metadata": {}, - "outputs": [], - "source": [ + ")\n", + "\n", "trainer = LightningTrainer(\n", " lightning_config=lightning_config,\n", " scaling_config=scaling_config,\n", @@ -327,9 +307,21 @@ }, { "cell_type": "code", - "execution_count": 57, + "execution_count": 8, "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "find: ‘.git’: No such file or directory\n", + "2023-04-28 09:30:43,657\tINFO worker.py:1432 -- Connecting to existing Ray cluster at address: 10.0.12.241:6379...\n", + "2023-04-28 09:30:43,665\tINFO worker.py:1607 -- Connected to Ray cluster. View the dashboard at https://console.anyscale-staging.com/api/v2/sessions/ses_vhpce9uvpnmhikmask3c5db399/services?redirect_to=dashboard \n", + "2023-04-28 09:30:43,671\tINFO packaging.py:347 -- Pushing file package 'gcs://_ray_pkg_c896ee9346ecab5d19a2dbcff95e2084.zip' (0.07MiB) to Ray cluster...\n", + "2023-04-28 09:30:43,672\tINFO packaging.py:360 -- Successfully pushed file package 'gcs://_ray_pkg_c896ee9346ecab5d19a2dbcff95e2084.zip'.\n", + "2023-04-28 09:30:43,725\tINFO tune.py:221 -- Initializing Ray automatically. For cluster usage or custom Ray initialization, call `ray.init(...)` before `Tuner(...)`.\n" + ] + }, { "data": { "text/html": [ @@ -339,16 +331,16 @@ "

    Tune Status

    \n", " \n", "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", "\n", "
    Current time:2023-03-23 17:06:23
    Running for: 00:00:42.86
    Memory: 5.2/62.0 GiB
    Current time:2023-04-28 09:31:32
    Running for: 00:00:48.90
    Memory: 16.9/186.6 GiB
    \n", "
    \n", "
    \n", "
    \n", "

    System Info

    \n", - " Using FIFO scheduling algorithm.
    Logical resource usage: 0/16 CPUs, 0/0 GPUs\n", + " Using FIFO scheduling algorithm.
    Logical resource usage: 0/48 CPUs, 0/4 GPUs (0.0/1.0 accelerator_type:T4)\n", "
    \n", " \n", "
    \n", @@ -357,10 +349,10 @@ "

    Trial Status

    \n", " \n", "\n", - "\n", + "\n", "\n", "\n", - "\n", + "\n", "\n", "
    Trial name status loc iter total time (s) train_loss val_accuracy val_loss
    Trial name status loc iter total time (s) train_loss val_accuracy val_loss
    LightningTrainer_9cfa6_00000TERMINATED10.0.61.115:358929 10 32.1313 0.0822004 0.969926 -12.5678
    LightningTrainer_0593e_00000TERMINATED10.0.12.241:56808 10 33.056 0.0840481 0.970436 -12.5445
    \n", "
    \n", @@ -407,39 +399,123 @@ "name": "stderr", "output_type": "stream", "text": [ - "(pid=358929) /home/ray/anaconda3/lib/python3.8/site-packages/xgboost/compat.py:31: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - "(pid=358929) from pandas import MultiIndex, Int64Index\n", - "(RayTrainWorker pid=359239) 2023-03-23 17:05:52,362\tINFO config.py:86 -- Setting up process group for: env:// [rank=0, world_size=4]\n", - "(RayTrainWorker pid=359241) /home/ray/anaconda3/lib/python3.8/site-packages/xgboost/compat.py:31: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - "(RayTrainWorker pid=359241) from pandas import MultiIndex, Int64Index\n", - "(RayTrainWorker pid=359242) /home/ray/anaconda3/lib/python3.8/site-packages/xgboost/compat.py:31: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - "(RayTrainWorker pid=359242) from pandas import MultiIndex, Int64Index\n", - "(RayTrainWorker pid=359239) /home/ray/anaconda3/lib/python3.8/site-packages/xgboost/compat.py:31: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - "(RayTrainWorker pid=359239) from pandas import MultiIndex, Int64Index\n", - "(RayTrainWorker pid=359240) /home/ray/anaconda3/lib/python3.8/site-packages/xgboost/compat.py:31: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - "(RayTrainWorker pid=359240) from pandas import MultiIndex, Int64Index\n", - "(RayTrainWorker pid=359239) GPU available: False, used: False\n", - "(RayTrainWorker pid=359239) TPU available: False, using: 0 TPU cores\n", - "(RayTrainWorker pid=359239) IPU available: False, using: 0 IPUs\n", - "(RayTrainWorker pid=359239) HPU available: False, using: 0 HPUs\n", - "(RayTrainWorker pid=359241) Missing logger folder: logs/lightning_logs\n", - "(RayTrainWorker pid=359242) Missing logger folder: logs/lightning_logs\n", - "(RayTrainWorker pid=359239) Missing logger folder: logs/lightning_logs\n", - "(RayTrainWorker pid=359239) \n", - "(RayTrainWorker pid=359239) | Name | Type | Params\n", - "(RayTrainWorker pid=359239) -------------------------------------------------\n", - "(RayTrainWorker pid=359239) 0 | linear_relu_stack | Sequential | 101 K \n", - "(RayTrainWorker pid=359239) 1 | accuracy | Accuracy | 0 \n", - "(RayTrainWorker pid=359239) -------------------------------------------------\n", - "(RayTrainWorker pid=359239) 101 K Trainable params\n", - "(RayTrainWorker pid=359239) 0 Non-trainable params\n", - "(RayTrainWorker pid=359239) 101 K Total params\n", - "(RayTrainWorker pid=359239) 0.407 Total estimated model params size (MB)\n", - "(RayTrainWorker pid=359240) Missing logger folder: logs/lightning_logs\n", - "(RayTrainWorker pid=359241) [W reducer.cpp:1298] Warning: find_unused_parameters=True was specified in DDP constructor, but did not find any unused parameters in the forward pass. This flag results in an extra traversal of the autograd graph every iteration, which can adversely affect performance. If your model indeed never has any unused parameters in the forward pass, consider turning this flag off. Note that this warning may be a false positive if your model has flow control causing later iterations to have unused parameters. (function operator())\n", - "(RayTrainWorker pid=359239) [W reducer.cpp:1298] Warning: find_unused_parameters=True was specified in DDP constructor, but did not find any unused parameters in the forward pass. This flag results in an extra traversal of the autograd graph every iteration, which can adversely affect performance. If your model indeed never has any unused parameters in the forward pass, consider turning this flag off. Note that this warning may be a false positive if your model has flow control causing later iterations to have unused parameters. (function operator())\n", - "(RayTrainWorker pid=359242) [W reducer.cpp:1298] Warning: find_unused_parameters=True was specified in DDP constructor, but did not find any unused parameters in the forward pass. This flag results in an extra traversal of the autograd graph every iteration, which can adversely affect performance. If your model indeed never has any unused parameters in the forward pass, consider turning this flag off. Note that this warning may be a false positive if your model has flow control causing later iterations to have unused parameters. (function operator())\n", - "(RayTrainWorker pid=359240) [W reducer.cpp:1298] Warning: find_unused_parameters=True was specified in DDP constructor, but did not find any unused parameters in the forward pass. This flag results in an extra traversal of the autograd graph every iteration, which can adversely affect performance. If your model indeed never has any unused parameters in the forward pass, consider turning this flag off. Note that this warning may be a false positive if your model has flow control causing later iterations to have unused parameters. (function operator())\n" + "(pid=56808) /mnt/cluster_storage/pypi/lib/python3.9/site-packages/neptune/common/warnings.py:62: NeptuneDeprecationWarning: You're importing the Neptune client library via the deprecated `neptune.new` module, which will be removed in a future release. Import directly from `neptune` instead.\n", + "(pid=56808) warnings.warn(\n", + "(LightningTrainer pid=56808) 2023-04-28 09:31:00,123\tINFO backend_executor.py:128 -- Starting distributed worker processes: ['57429 (10.0.12.241)', '57430 (10.0.12.241)', '57431 (10.0.12.241)', '57432 (10.0.12.241)']\n", + "(RayTrainWorker pid=57429) 2023-04-28 09:31:01,088\tINFO config.py:86 -- Setting up process group for: env:// [rank=0, world_size=4]\n", + "(RayTrainWorker pid=57431) warnings.warn(\n", + "(RayTrainWorker pid=57431) warnings.warn(\n", + "(RayTrainWorker pid=57429) /mnt/cluster_storage/pypi/lib/python3.9/site-packages/neptune/common/warnings.py:62: NeptuneDeprecationWarning: You're importing the Neptune client library via the deprecated `neptune.new` module, which will be removed in a future release. Import directly from `neptune` instead.\n", + "(RayTrainWorker pid=57429) warnings.warn(\n", + "(RayTrainWorker pid=57429) GPU available: True, used: True\n", + "(RayTrainWorker pid=57429) TPU available: False, using: 0 TPU cores\n", + "(RayTrainWorker pid=57429) IPU available: False, using: 0 IPUs\n", + "(RayTrainWorker pid=57429) HPU available: False, using: 0 HPUs\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(RayTrainWorker pid=57432) Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + " 0%| | 0/9912422 [00:00Trial Progress\n", " \n", "\n", - "\n", + "\n", "\n", "\n", - "\n", + "\n", "\n", "
    Trial name _report_on date done epoch experiment_taghostname iterations_since_restorenode_ip pidshould_checkpoint step time_since_restore time_this_iter_s time_total_s timestamp train_loss training_iterationtrial_id val_accuracy val_loss
    Trial name _report_on date done epoch experiment_taghostname iterations_since_restorenode_ip pidshould_checkpoint step time_since_restore time_this_iter_s time_total_s timestamp train_loss training_iterationtrial_id val_accuracy val_loss
    LightningTrainer_9cfa6_00000train_epoch_end2023-03-23_17-06-20True 9 0ip-10-0-61-115 1010.0.61.115358929True 1080 32.1313 2.26905 32.1313 1679616380 0.0822004 109cfa6_00000 0.969926 -12.5678
    LightningTrainer_0593e_00000train_epoch_end2023-04-28_09-31-29True 9 0ip-10-0-12-241 1010.0.12.24156808True 1080 33.056 1.58153 33.056 1682699489 0.0840481 100593e_00000 0.970436 -12.5445
    \n", "
    \n", @@ -481,27 +557,27 @@ "name": "stderr", "output_type": "stream", "text": [ - "2023-03-23 17:06:23,588\tINFO tune.py:817 -- Total run time: 42.87 seconds (42.86 seconds for the tuning loop).\n" + "2023-04-28 09:31:32,674\tINFO tune.py:1010 -- Total run time: 48.95 seconds (48.90 seconds for the tuning loop).\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Validation Accuracy: 0.9699258804321289\n" + "Validation Accuracy: 0.9704360961914062\n" ] }, { "data": { "text/plain": [ "Result(\n", - " metrics={'_report_on': 'train_epoch_end', 'train_loss': 0.08220043778419495, 'val_accuracy': 0.9699258804321289, 'val_loss': -12.567845344543457, 'epoch': 9, 'step': 1080, 'should_checkpoint': True, 'done': True, 'trial_id': '9cfa6_00000', 'experiment_tag': '0'},\n", - " log_dir=PosixPath('/tmp/ray_results/ptl-mnist-example/LightningTrainer_9cfa6_00000_0_2023-03-23_17-05-40'),\n", - " checkpoint=LightningCheckpoint(local_path=/tmp/ray_results/ptl-mnist-example/LightningTrainer_9cfa6_00000_0_2023-03-23_17-05-40/checkpoint_000009)\n", + " metrics={'_report_on': 'train_epoch_end', 'train_loss': 0.0840480849146843, 'val_accuracy': 0.9704360961914062, 'val_loss': -12.544519424438477, 'epoch': 9, 'step': 1080, 'should_checkpoint': True, 'done': True, 'trial_id': '0593e_00000', 'experiment_tag': '0'},\n", + " path='/tmp/ray_results/ptl-mnist-example/LightningTrainer_0593e_00000_0_2023-04-28_09-30-46',\n", + " checkpoint=LightningCheckpoint(local_path=/tmp/ray_results/ptl-mnist-example/LightningTrainer_0593e_00000_0_2023-04-28_09-30-46/checkpoint_000009)\n", ")" ] }, - "execution_count": 57, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } @@ -523,7 +599,7 @@ }, { "cell_type": "code", - "execution_count": 58, + "execution_count": 9, "metadata": { "tags": [] }, @@ -535,7 +611,7 @@ }, { "cell_type": "code", - "execution_count": 59, + "execution_count": 10, "metadata": { "tags": [] }, @@ -544,18 +620,21 @@ "name": "stderr", "output_type": "stream", "text": [ - "/home/ray/anaconda3/lib/python3.8/site-packages/pytorch_lightning/loops/utilities.py:92: PossibleUserWarning: `max_epochs` was not set. Setting it to 1000 epochs. To train without an epoch limit, set `max_epochs=-1`.\n", + "/home/ray/anaconda3/lib/python3.9/site-packages/pytorch_lightning/loops/utilities.py:92: PossibleUserWarning: `max_epochs` was not set. Setting it to 1000 epochs. To train without an epoch limit, set `max_epochs=-1`.\n", " rank_zero_warn(\n", - "GPU available: False, used: False\n", + "GPU available: True, used: False\n", "TPU available: False, using: 0 TPU cores\n", "IPU available: False, using: 0 IPUs\n", - "HPU available: False, using: 0 HPUs\n" + "HPU available: False, using: 0 HPUs\n", + "/home/ray/anaconda3/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py:1814: PossibleUserWarning: GPU available but not used. Set `accelerator` and `devices` using `Trainer(accelerator='gpu', devices=4)`.\n", + " rank_zero_warn(\n", + "Missing logger folder: /home/ray/default/doc/source/train/examples/lightning/lightning_logs\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "fe27955de52247bfadf2a4320af1cf44", + "model_id": "c3034eb12cf846b0aff76f28c348be06", "version_major": 2, "version_minor": 0 }, @@ -566,13 +645,25 @@ "metadata": {}, "output_type": "display_data" }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-04-28 09:31:33.611773: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX512F AVX512_VNNI FMA\n", + "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", + "2023-04-28 09:31:33.762802: I tensorflow/core/util/port.cc:104] oneDNN custom operations are on. You may see slightly different numerical results due to floating-point round-off errors from different computation orders. To turn them off, set the environment variable `TF_ENABLE_ONEDNN_OPTS=0`.\n", + "2023-04-28 09:31:34.628099: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer.so.7'; dlerror: libnvinfer.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/local/nvidia/lib:/usr/local/nvidia/lib64\n", + "2023-04-28 09:31:34.628189: W tensorflow/compiler/xla/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libnvinfer_plugin.so.7'; dlerror: libnvinfer_plugin.so.7: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /usr/local/nvidia/lib:/usr/local/nvidia/lib64\n", + "2023-04-28 09:31:34.628194: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Cannot dlopen some TensorRT libraries. If you would like to use Nvidia GPU with TensorRT, please make sure the missing libraries mentioned above are installed properly.\n" + ] + }, { "data": { "text/html": [ "
    ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n",
            "┃        Test metric               DataLoader 0        ┃\n",
            "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n",
    -       "│       test_accuracy           0.9742000102996826     │\n",
    +       "│       test_accuracy           0.9735999703407288     │\n",
            "└───────────────────────────┴───────────────────────────┘\n",
            "
    \n" ], @@ -580,7 +671,7 @@ "┏━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\n", "┃\u001b[1m \u001b[0m\u001b[1m Test metric \u001b[0m\u001b[1m \u001b[0m┃\u001b[1m \u001b[0m\u001b[1m DataLoader 0 \u001b[0m\u001b[1m \u001b[0m┃\n", "┡━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━┩\n", - "│\u001b[36m \u001b[0m\u001b[36m test_accuracy \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.9742000102996826 \u001b[0m\u001b[35m \u001b[0m│\n", + "│\u001b[36m \u001b[0m\u001b[36m test_accuracy \u001b[0m\u001b[36m \u001b[0m│\u001b[35m \u001b[0m\u001b[35m 0.9735999703407288 \u001b[0m\u001b[35m \u001b[0m│\n", "└───────────────────────────┴───────────────────────────┘\n" ] }, @@ -604,7 +695,7 @@ }, { "cell_type": "code", - "execution_count": 60, + "execution_count": 11, "metadata": { "tags": [] }, @@ -613,7 +704,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Accuracy: 0.9742\n" + "Accuracy: 0.9736\n" ] } ], @@ -621,7 +712,7 @@ "from ray.train.lightning import LightningPredictor\n", "\n", "predictor = LightningPredictor.from_checkpoint(\n", - " checkpoint, MNISTClassifier, use_gpu=False\n", + " checkpoint, MNISTClassifier, use_gpu=use_gpu\n", ")\n", "\n", "\n", From 8a52b1f3550500ab031d8248b185395ed004f8dd Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Mon, 1 May 2023 11:56:55 -0700 Subject: [PATCH 169/424] [CI][Jailed] Add jailed flag for release tests (#34748) The discussion about test policy, in particular, jailed tests seem to go well. I'm adding this flag now so we can start to jail some tests. Here is the policy of jailed tests: - Tests are jailed if the corresponding issue tasks missed it SLA - Jailed tests are skipped/non-blocking by default on automatic master run - Jailed tests run/block by default on release + manually triggered runs Signed-off-by: Cuong Nguyen --- release/ray_release/buildkite/filter.py | 3 +++ release/ray_release/schema.json | 3 +++ release/ray_release/scripts/build_pipeline.py | 14 +++++++++++++- 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/release/ray_release/buildkite/filter.py b/release/ray_release/buildkite/filter.py index 73af1b841252..4307068615ec 100644 --- a/release/ray_release/buildkite/filter.py +++ b/release/ray_release/buildkite/filter.py @@ -21,6 +21,7 @@ def filter_tests( frequency: Frequency, test_attr_regex_filters: Optional[Dict[str, str]] = None, prefer_smoke_tests: bool = False, + run_jailed_tests: bool = False, ) -> List[Tuple[Test, bool]]: if test_attr_regex_filters is None: test_attr_regex_filters = {} @@ -35,6 +36,8 @@ def filter_tests( break if attr_mismatch: continue + if not run_jailed_tests and test.get("jailed", False): + continue test_frequency = get_frequency(test["frequency"]) diff --git a/release/ray_release/schema.json b/release/ray_release/schema.json index 979de3269b03..032f46db3f0b 100644 --- a/release/ray_release/schema.json +++ b/release/ray_release/schema.json @@ -21,6 +21,9 @@ "stable": { "type": "boolean" }, + "jailed": { + "type": "boolean" + }, "python": { "type": "string", "enum": [ diff --git a/release/ray_release/scripts/build_pipeline.py b/release/ray_release/scripts/build_pipeline.py index 28a4fe8ce0d2..0913656b5dbb 100644 --- a/release/ray_release/scripts/build_pipeline.py +++ b/release/ray_release/scripts/build_pipeline.py @@ -45,7 +45,18 @@ "(for internal use)." ), ) -def main(test_collection_file: Optional[str] = None, no_clone_repo: bool = False): +@click.option( + "--run-jailed-tests", + is_flag=True, + show_default=True, + default=False, + help=("Will run jailed tests."), +) +def main( + test_collection_file: Optional[str] = None, + no_clone_repo: bool = False, + run_jailed_tests: bool = False, +): settings = get_pipeline_settings() repo = settings["ray_test_repo"] @@ -132,6 +143,7 @@ def main(test_collection_file: Optional[str] = None, no_clone_repo: bool = False frequency=frequency, test_attr_regex_filters=test_attr_regex_filters, prefer_smoke_tests=prefer_smoke_tests, + run_jailed_tests=run_jailed_tests, ) logger.info(f"Found {len(filtered_tests)} tests to run.") if len(filtered_tests) == 0: From 216073334741431d6565e0bf1aacdbe28f8d4613 Mon Sep 17 00:00:00 2001 From: Simran Mhatre Date: Mon, 1 May 2023 12:41:16 -0700 Subject: [PATCH 170/424] [docs] Fixed the link for ray core section (#34720) The link for the learn more in the ray core card was incorrect. Fixed the issue now. Signed-off-by: Simran Mhatre --- doc/source/index.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/source/index.md b/doc/source/index.md index cee36c217b45..5df59f327e8b 100644 --- a/doc/source/index.md +++ b/doc/source/index.md @@ -133,7 +133,7 @@ dataset_transformed = preprocessor.fit_transform(dataset=dataset)
    @@ -166,7 +166,7 @@ result = trainer.fit()

    Scale generic Python code with simple, foundational primitives that enable a high degree of control for building distributed applications or custom platforms.

    - Learn more about Core > + Learn more about Core >
    @@ -391,7 +391,7 @@ ppo_algo.evaluate()

    Contribute to Ray

    -
    +

    Contributor's guide

    From 2237c9bd74aa88eb5ded1c11479d9a8856525c73 Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Mon, 1 May 2023 12:51:52 -0700 Subject: [PATCH 171/424] [CI] Deprecate sdk_runner and prod_v1 from OSS release tests (#34804) * Run tests using anyscale job submission Signed-off-by: Cuong Nguyen * Make these tests run Signed-off-by: Cuong Nguyen * Fix no file found Signed-off-by: Cuong Nguyen * Specify key Signed-off-by: Cuong Nguyen * fix key name Signed-off-by: Cuong Nguyen * Debugging Signed-off-by: Cuong Nguyen * Keep old cloud id Signed-off-by: Cuong Nguyen * Use another role Signed-off-by: Cuong Nguyen * Update cloud id Signed-off-by: Cuong Nguyen * Use ray-test-runner Signed-off-by: Cuong Nguyen * ray-test-runner everywhere Signed-off-by: Cuong Nguyen * Go back to ray-autoscaler-v1 Signed-off-by: Cuong Nguyen * Remove logging Signed-off-by: Cuong Nguyen * Update example yaml Signed-off-by: Cuong Nguyen --------- Signed-off-by: Cuong Nguyen --- python/ray/autoscaler/aws/example-full.yaml | 2 ++ python/ray/autoscaler/aws/example-minimal.yaml | 2 ++ python/ray/autoscaler/aws/tests/aws_cluster.yaml | 2 ++ python/ray/autoscaler/aws/tests/aws_compute.yaml | 2 +- .../aws/tests/aws_launch_and_verify_cluster.py | 6 ++++-- release/release_tests.yaml | 15 --------------- 6 files changed, 11 insertions(+), 18 deletions(-) diff --git a/python/ray/autoscaler/aws/example-full.yaml b/python/ray/autoscaler/aws/example-full.yaml index 49df110fc64c..75ce0b3e99b7 100644 --- a/python/ray/autoscaler/aws/example-full.yaml +++ b/python/ray/autoscaler/aws/example-full.yaml @@ -45,6 +45,8 @@ provider: # Whether to allow node reuse. If set to False, nodes will be terminated # instead of stopped. cache_stopped_nodes: True # If not present, the default is True. + key_pair: + key_name: aws-cluster-launcher-test # How Ray will authenticate with newly launched nodes. auth: diff --git a/python/ray/autoscaler/aws/example-minimal.yaml b/python/ray/autoscaler/aws/example-minimal.yaml index 09a2727d1311..0a9e908bcc73 100644 --- a/python/ray/autoscaler/aws/example-minimal.yaml +++ b/python/ray/autoscaler/aws/example-minimal.yaml @@ -5,3 +5,5 @@ cluster_name: aws-example-minimal provider: type: aws region: us-west-2 + key_pair: + key_name: aws-cluster-launcher-test diff --git a/python/ray/autoscaler/aws/tests/aws_cluster.yaml b/python/ray/autoscaler/aws/tests/aws_cluster.yaml index b226c723129c..311c1e90673a 100644 --- a/python/ray/autoscaler/aws/tests/aws_cluster.yaml +++ b/python/ray/autoscaler/aws/tests/aws_cluster.yaml @@ -8,6 +8,8 @@ provider: type: aws region: us-west-2 cache_stopped_nodes: False + key_pair: + key_name: aws-cluster-launcher-test available_node_types: ray.head.default: diff --git a/python/ray/autoscaler/aws/tests/aws_compute.yaml b/python/ray/autoscaler/aws/tests/aws_compute.yaml index 88a9dd055311..1ef4e02ba1e8 100644 --- a/python/ray/autoscaler/aws/tests/aws_compute.yaml +++ b/python/ray/autoscaler/aws/tests/aws_compute.yaml @@ -1,4 +1,4 @@ -cloud_id: cld_17WvYIBBkdgLwEUNcLeRAE +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} region: us-west-2 aws: diff --git a/python/ray/autoscaler/aws/tests/aws_launch_and_verify_cluster.py b/python/ray/autoscaler/aws/tests/aws_launch_and_verify_cluster.py index 17253f8c4fc3..41cd8eadc03a 100644 --- a/python/ray/autoscaler/aws/tests/aws_launch_and_verify_cluster.py +++ b/python/ray/autoscaler/aws/tests/aws_launch_and_verify_cluster.py @@ -65,11 +65,13 @@ def download_ssh_key(): s3_client = boto3.client("s3", region_name="us-west-2") # Set the name of the S3 bucket and the key to download - bucket_name = "oss-release-test-ssh-keys" - key_name = "ray-autoscaler_59_us-west-2.pem" + bucket_name = "aws-cluster-launcher-test" + key_name = "aws-cluster-launcher-test.pem" # Download the key from the S3 bucket to a local file local_key_path = os.path.expanduser(f"~/.ssh/{key_name}") + if not os.path.exists(os.path.dirname(local_key_path)): + os.makedirs(os.path.dirname(local_key_path)) s3_client.download_file(bucket_name, key_name, local_key_path) # Set permissions on the key file diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 92b5681f7441..cda6f50a2d0c 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -5990,8 +5990,6 @@ working_dir: k8s_tests stable: false - # TODO: Migrate this test to Anyscale Jobs / staging_v2 - env: prod_v1 frequency: nightly team: serve @@ -6003,7 +6001,6 @@ timeout: 28800 # 8h prepare: bash prepare.sh script: python run_gcs_ft_on_k8s.py - type: sdk_command - name: aws_cluster_launcher group: cluster-launcher-test @@ -6011,9 +6008,6 @@ stable: true - # TODO: Migrate this test to Anyscale Jobs / staging_v2 - env: prod_v1 - frequency: nightly team: core cluster: @@ -6023,7 +6017,6 @@ run: timeout: 1200 script: cd tests && python aws_launch_and_verify_cluster.py aws_cluster.yaml - type: sdk_command - name: aws_cluster_launcher_minimal group: cluster-launcher-test @@ -6031,9 +6024,6 @@ stable: true - # TODO: Migrate this test to Anyscale Jobs / staging_v2 - env: prod_v1 - frequency: nightly team: core cluster: @@ -6043,7 +6033,6 @@ run: timeout: 1200 script: cd tests && python aws_launch_and_verify_cluster.py ../example-minimal.yaml - type: sdk_command - name: aws_cluster_launcher_full group: cluster-launcher-test @@ -6051,9 +6040,6 @@ stable: true - # TODO: Migrate this test to Anyscale Jobs / staging_v2 - env: prod_v1 - frequency: nightly team: core cluster: @@ -6063,4 +6049,3 @@ run: timeout: 1200 script: cd tests && python aws_launch_and_verify_cluster.py ../example-full.yaml - type: sdk_command \ No newline at end of file From d0a73848351207f3fef749394a3f77e5acb96ca8 Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Mon, 1 May 2023 13:05:54 -0700 Subject: [PATCH 172/424] [Data] Add `parquet_metadata_resolution` GCE variant (#34909) This PR tests parquet_metadata_resolution on GCE. It was previously only tested on EC2. --------- Signed-off-by: Balaji Veeramani --- release/nightly_tests/dataset/app_config.yaml | 1 - .../dataset/parquet_metadata_resolution.py | 15 ++++++++----- release/release_tests.yaml | 22 +++++++++---------- 3 files changed, 21 insertions(+), 17 deletions(-) diff --git a/release/nightly_tests/dataset/app_config.yaml b/release/nightly_tests/dataset/app_config.yaml index 613fd9e44294..12e8cd86d84b 100644 --- a/release/nightly_tests/dataset/app_config.yaml +++ b/release/nightly_tests/dataset/app_config.yaml @@ -3,7 +3,6 @@ base_image: {{ env["RAY_IMAGE_ML_NIGHTLY_GPU"] | default("anyscale/ray-ml:nightl python: pip_packages: - boto3 - - pyarrow<7.0.0 - tqdm conda_packages: [] diff --git a/release/nightly_tests/dataset/parquet_metadata_resolution.py b/release/nightly_tests/dataset/parquet_metadata_resolution.py index f3b59a554d92..119b2803ad61 100644 --- a/release/nightly_tests/dataset/parquet_metadata_resolution.py +++ b/release/nightly_tests/dataset/parquet_metadata_resolution.py @@ -5,6 +5,7 @@ parser = argparse.ArgumentParser(description="Parquet Metadata Read") parser.add_argument("--num-files", type=int, default=30) +parser.add_argument("--cloud", type=str, choices=["aws", "gcp"]) if __name__ == "__main__": @@ -16,11 +17,15 @@ num = args.num_files - files = [ - f"s3://shuffling-data-loader-benchmarks/data/r10_000_000_000-f1000" - f"/input_data_{i}.parquet.snappy" - for i in range(args.num_files) - ] + assert args.cloud in {"aws", "gcp"}, args.cloud + if args.cloud == "aws": + prefix = "s3://shuffling-data-loader-benchmarks/data/r10_000_000_000-f1000" + if args.cloud == "gcp": + # NOTE(@bveeramani): I made a mistake while transferring the files from S3 to + # GCS, so there's an extra "r10_000_000_000-f1000" in the URI. Don't worry about + # it. The files are the same. + prefix = "gs://shuffling-data-loader-benchmarks/data/r10_000_000_000-f1000/r10_000_000_000-f1000" # noqa: E501 + files = [f"{prefix}/input_data_{i}.parquet.snappy" for i in range(args.num_files)] start = time.time() ray.data.read_parquet(files).count() # This should only read Parquet metadata. diff --git a/release/release_tests.yaml b/release/release_tests.yaml index cda6f50a2d0c..2daad40a8c00 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -5247,17 +5247,17 @@ run: # Expect the test to finish around 40 seconds. timeout: 100 - script: python parquet_metadata_resolution.py --num-files 915 - - # TODO: Port s3://shuffling-data-loader-benchmarks/ to GCS. - # variations: - # - __suffix__: aws - # - __suffix__: gce - # env: gce - # frequency: manual - # cluster: - # cluster_env: app_config.yaml - # cluster_compute: single_node_benchmark_compute_gce.yaml + script: python parquet_metadata_resolution.py --num-files 915 --cloud aws + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_compute: single_node_benchmark_compute_gce.yaml + run: + script: python parquet_metadata_resolution.py --num-files 915 --cloud gcp - name: dataset_random_access group: data-tests From 102206783cd004b268d54c60a2d2668fa0aa38ef Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Mon, 1 May 2023 13:43:18 -0700 Subject: [PATCH 173/424] [doc] [data] Fix a couple broken tests from strict mode PR Signed-off-by: Eric Liang --- doc/source/ray-air/examples/gptj_batch_prediction.ipynb | 1 + .../ray-air/examples/huggingface_text_classification.ipynb | 2 +- .../ray-air/examples/stablediffusion_batch_prediction.ipynb | 1 + .../train/examples/lightning/lightning_cola_advanced.ipynb | 2 +- python/ray/data/tests/test_pipeline.py | 6 ++++++ 5 files changed, 10 insertions(+), 2 deletions(-) diff --git a/doc/source/ray-air/examples/gptj_batch_prediction.ipynb b/doc/source/ray-air/examples/gptj_batch_prediction.ipynb index 3ddc7342af02..5dafa354f6ea 100644 --- a/doc/source/ray-air/examples/gptj_batch_prediction.ipynb +++ b/doc/source/ray-air/examples/gptj_batch_prediction.ipynb @@ -167,6 +167,7 @@ " PredictCallable,\n", " batch_size=4,\n", " fn_constructor_kwargs=dict(model_id=model_id, revision=revision),\n", + " batch_format=\"pandas\",\n", " compute=\"actors\",\n", " num_gpus=1,\n", " )\n", diff --git a/doc/source/ray-air/examples/huggingface_text_classification.ipynb b/doc/source/ray-air/examples/huggingface_text_classification.ipynb index ad52466f0d5d..b5db41eafc52 100644 --- a/doc/source/ray-air/examples/huggingface_text_classification.ipynb +++ b/doc/source/ray-air/examples/huggingface_text_classification.ipynb @@ -2106,7 +2106,7 @@ " task=\"text-classification\",\n", " device=0 if use_gpu else -1, # -1 is CPU, otherwise device index\n", ")\n", - "prediction = predictor.predict(ray_datasets[\"test\"].map_batches(lambda x: x[[\"sentence\"]]), num_gpus_per_worker=int(use_gpu))\n", + "prediction = predictor.predict(ray_datasets[\"test\"].map_batches(lambda x: x[[\"sentence\"]], batch_format=\"pandas\"), num_gpus_per_worker=int(use_gpu))\n", "prediction.show()" ] }, diff --git a/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb b/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb index e22ef72a4f46..9068a6f8c8c4 100644 --- a/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb +++ b/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb @@ -161,6 +161,7 @@ " batch_size=1,\n", " fn_constructor_kwargs=dict(model_id=model_id),\n", " compute=\"actors\",\n", + " batch_format=\"pandas\",\n", " num_gpus=1,\n", ")\n", "images = preds.take_all()" diff --git a/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb b/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb index a78b1b6703f9..363a3d164aeb 100644 --- a/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb +++ b/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb @@ -1448,7 +1448,7 @@ " return batch\n", "\n", "\n", - "results = predictions.map_batches(argmax).to_pandas()" + "results = predictions.map_batches(argmax, batch_format=\"pandas\").to_pandas()" ] }, { diff --git a/python/ray/data/tests/test_pipeline.py b/python/ray/data/tests/test_pipeline.py index 1465526068d9..5a250c729ffb 100644 --- a/python/ray/data/tests/test_pipeline.py +++ b/python/ray/data/tests/test_pipeline.py @@ -22,14 +22,20 @@ def __init__(self): self.infos = [] def warning(self, msg): + if "strict mode" in msg: + return self.warnings.append(msg) print("warning:", msg) def info(self, msg): + if "strict mode" in msg: + return self.infos.append(msg) print("info:", msg) def debug(self, msg): + if "strict mode" in msg: + return print("debug:", msg) From 78b71ab8c800937f4e13c0d9a7947cf85539ceb5 Mon Sep 17 00:00:00 2001 From: Cindy Zhang Date: Mon, 1 May 2023 14:22:52 -0700 Subject: [PATCH 174/424] [serve] Add prefix to serve status protos (#34842) Rename application status and deployment status protos, so that we don't need to worry about uniqueness between enums. --- .../io/ray/serve/api/ServeControllerClient.java | 6 +++--- python/ray/serve/_private/common.py | 12 ++++++++---- src/ray/protobuf/serve.proto | 16 ++++++++-------- 3 files changed, 19 insertions(+), 15 deletions(-) diff --git a/java/serve/src/main/java/io/ray/serve/api/ServeControllerClient.java b/java/serve/src/main/java/io/ray/serve/api/ServeControllerClient.java index 39c366f1ba66..7dda3ef977ec 100644 --- a/java/serve/src/main/java/io/ray/serve/api/ServeControllerClient.java +++ b/java/serve/src/main/java/io/ray/serve/api/ServeControllerClient.java @@ -193,14 +193,14 @@ private void waitForDeploymentHealthy(String name, Long timeoutS) { "Waiting for deployment {} to be HEALTHY, but deployment doesn't exist.", name)); } - if (status.getStatus() == DeploymentStatus.HEALTHY) { + if (status.getStatus() == DeploymentStatus.DEPLOYMENT_STATUS_HEALTHY) { isTimeout = false; break; - } else if (status.getStatus() == DeploymentStatus.UNHEALTHY) { + } else if (status.getStatus() == DeploymentStatus.DEPLOYMENT_STATUS_UNHEALTHY) { throw new RayServeException( LogUtil.format("Deployment {} is UNHEALTHY: {}", name, status.getMessage())); } else { - Preconditions.checkState(status.getStatus() == DeploymentStatus.UPDATING); + Preconditions.checkState(status.getStatus() == DeploymentStatus.DEPLOYMENT_STATUS_UPDATING); } LOGGER.debug("Waiting for {} to be healthy, current status: {}.", name, status.getStatus()); diff --git a/python/ray/serve/_private/common.py b/python/ray/serve/_private/common.py index 680de1def242..0285e87e36cf 100644 --- a/python/ray/serve/_private/common.py +++ b/python/ray/serve/_private/common.py @@ -58,15 +58,16 @@ def debug_string(self): def to_proto(self): return ApplicationStatusInfoProto( - status=self.status, + status=f"APPLICATION_STATUS_{self.status}", message=self.message, deployment_timestamp=self.deployment_timestamp, ) @classmethod def from_proto(cls, proto: ApplicationStatusInfoProto): + status = ApplicationStatusProto.Name(proto.status)[len("APPLICATION_STATUS_") :] return cls( - status=ApplicationStatus(ApplicationStatusProto.Name(proto.status)), + status=ApplicationStatus(status), message=proto.message, deployment_timestamp=proto.deployment_timestamp, ) @@ -89,14 +90,17 @@ def debug_string(self): def to_proto(self): return DeploymentStatusInfoProto( - name=self.name, status=self.status, message=self.message + name=self.name, + status=f"DEPLOYMENT_STATUS_{self.status}", + message=self.message, ) @classmethod def from_proto(cls, proto: DeploymentStatusInfoProto): + status = DeploymentStatusProto.Name(proto.status)[len("DEPLOYMENT_STATUS_") :] return cls( name=proto.name, - status=DeploymentStatus(DeploymentStatusProto.Name(proto.status)), + status=DeploymentStatus(status), message=proto.message, ) diff --git a/src/ray/protobuf/serve.proto b/src/ray/protobuf/serve.proto index bae767b4f747..7f6fdbf4a57c 100644 --- a/src/ray/protobuf/serve.proto +++ b/src/ray/protobuf/serve.proto @@ -187,9 +187,9 @@ message DeploymentRouteList { enum DeploymentStatus { // Keep frontend code of ServeDeploymentStatus in dashboard/client/src/type/serve.ts // in sync with this enum - UPDATING = 0; - HEALTHY = 1; - UNHEALTHY = 2; + DEPLOYMENT_STATUS_UPDATING = 0; + DEPLOYMENT_STATUS_HEALTHY = 1; + DEPLOYMENT_STATUS_UNHEALTHY = 2; } message DeploymentStatusInfo { @@ -206,11 +206,11 @@ message DeploymentStatusInfoList { enum ApplicationStatus { // Keep frontend code of ServeApplicationStatus in dashboard/client/src/type/serve.ts // in sync with this enum - DEPLOYING = 0; - RUNNING = 1; - DEPLOY_FAILED = 2; - DELETING = 3; - NOT_STARTED = 5; + APPLICATION_STATUS_DEPLOYING = 0; + APPLICATION_STATUS_RUNNING = 1; + APPLICATION_STATUS_DEPLOY_FAILED = 2; + APPLICATION_STATUS_DELETING = 3; + APPLICATION_STATUS_NOT_STARTED = 5; } message ApplicationStatusInfo { From bafce5909de30bdafc2c057951c2e0d265838d62 Mon Sep 17 00:00:00 2001 From: Simran Mhatre Date: Mon, 1 May 2023 15:00:51 -0700 Subject: [PATCH 175/424] [docs] New Ray AIR link for try it out (#34924) replaced the Ray core collab example with Ray AIR Signed-off-by: Simran Mhatre --- doc/source/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/index.md b/doc/source/index.md index 5df59f327e8b..87bb228668fb 100644 --- a/doc/source/index.md +++ b/doc/source/index.md @@ -75,7 +75,7 @@

    pip install "ray[default]"

    Installation guide >

    -
    From 3d2a734e63f727e3c789ebd110459af2c81c9b42 Mon Sep 17 00:00:00 2001 From: Cindy Zhang Date: Mon, 1 May 2023 15:29:45 -0700 Subject: [PATCH 176/424] [serve] Add support for deploy refactor (#34845) Some preparation for upcoming deploy refactor PRs. - Rename fields of `ApplicationState` as private fields - Add route prefix to deployment info (it will be used by application state to deploy deployments in a reconciler loop) - Stop setting num replicas from autoscaling in `deployment_config.num_replicas`, instead put it in a separate field of deployment info. - determine initial autoscaled num replicas in deployment state manager .deploy() --- .../ray/serve/_private/application_state.py | 180 ++++++++++-------- python/ray/serve/_private/common.py | 19 +- python/ray/serve/_private/deploy_utils.py | 21 +- python/ray/serve/_private/deployment_state.py | 35 +++- python/ray/serve/controller.py | 10 +- .../ray/serve/tests/test_application_state.py | 158 ++++++++------- 6 files changed, 242 insertions(+), 181 deletions(-) diff --git a/python/ray/serve/_private/application_state.py b/python/ray/serve/_private/application_state.py index a89a7db41959..084780885ad5 100644 --- a/python/ray/serve/_private/application_state.py +++ b/python/ray/serve/_private/application_state.py @@ -1,5 +1,5 @@ import traceback -from typing import Dict, List +from typing import Dict, List, Optional from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag from ray.serve._private.common import ApplicationStatus from ray.serve._private.deployment_state import DeploymentStateManager @@ -38,29 +38,61 @@ def __init__( deploy_obj_ref: Task ObjRef of deploying application. deployment_time: Deployment timestamp """ + + self._name = name + self._deploy_obj_ref = deploy_obj_ref + self._app_msg = "" + self._deployment_state_manager = deployment_state_manager + self._deployment_params: List[Dict] = [] + # This set tracks old deployments that are being deleted + self._deployments_to_delete = set() + self._ready_to_be_deleted = False + self._route_prefix = None + self._docs_path = None + if deploy_obj_ref: - self.status: ApplicationStatus = ApplicationStatus.DEPLOYING + self._status: ApplicationStatus = ApplicationStatus.DEPLOYING else: - self.status: ApplicationStatus = ApplicationStatus.NOT_STARTED - self.name = name - self.deployment_params: List[Dict] = [] - self.ready_to_be_deleted = False - self.deployment_state_manager = deployment_state_manager + self._status: ApplicationStatus = ApplicationStatus.NOT_STARTED if deployment_time: - self.deployment_timestamp = deployment_time + self._deployment_timestamp = deployment_time else: - self.deployment_timestamp = time.time() - self.deploy_obj_ref = deploy_obj_ref - self.app_msg = "" - self.route_prefix = None - self.docs_path = None + self._deployment_timestamp = time.time() - # This set tracks old deployments that are being deleted - self.deployments_to_delete = set() + @property + def ready_to_be_deleted(self) -> bool: + return self._ready_to_be_deleted + + @property + def route_prefix(self) -> Optional[str]: + return self._route_prefix + + @property + def docs_path(self) -> Optional[str]: + return self._docs_path + + @property + def status(self) -> ApplicationStatus: + return self._status + + @property + def deployment_timestamp(self) -> int: + return self._deployment_timestamp + + @property + def deploy_obj_ref(self) -> Optional[ObjectRef]: + return self._deploy_obj_ref + + @property + def deployments(self) -> List[str]: + """Return all deployments name from the application""" + if self._deployment_params is None: + return [] + return [params["name"] for params in self._deployment_params] def delete(self): """Delete the application""" - self.status = ApplicationStatus.DELETING + self._status = ApplicationStatus.DELETING def deploy(self, deployment_params: List[Dict]) -> List[str]: """Deploy the application. @@ -76,26 +108,26 @@ def deploy(self, deployment_params: List[Dict]) -> List[str]: # that are not used in the new deployment_params to_be_deployed_deployments = {params["name"] for params in deployment_params} cur_deployments_to_delete = [] - for deployment_name in self.get_all_deployments(): + for deployment_name in self.deployments: if deployment_name not in to_be_deployed_deployments: cur_deployments_to_delete.append(deployment_name) - self.deployments_to_delete.add(deployment_name) - self.deployment_params = deployment_params + self._deployments_to_delete.add(deployment_name) + self._deployment_params = deployment_params # Update route prefix for application num_route_prefixes = 0 num_docs_paths = 0 for deploy_param in deployment_params: if deploy_param.get("route_prefix") is not None: - self.route_prefix = deploy_param["route_prefix"] + self._route_prefix = deploy_param["route_prefix"] num_route_prefixes += 1 if deploy_param.get("docs_path") is not None: - self.docs_path = deploy_param["docs_path"] + self._docs_path = deploy_param["docs_path"] num_docs_paths += 1 if num_route_prefixes > 1: raise RayServeException( - f'Found multiple route prefix from application "{self.name}",' + f'Found multiple route prefix from application "{self._name}",' " Please specify only one route prefix for the application " "to avoid this issue." ) @@ -103,19 +135,19 @@ def deploy(self, deployment_params: List[Dict]) -> List[str]: # if user sets the docs path to None in their FastAPI app. if num_docs_paths > 1: raise RayServeException( - f'Found multiple deployments in application "{self.name}" that have ' + f'Found multiple deployments in application "{self._name}" that have ' "a docs path. This may be due to using multiple FastAPI deployments " "in your application. Please only include one deployment with a docs " "path in your application to avoid this issue." ) - self.status = ApplicationStatus.DEPLOYING + self._status = ApplicationStatus.DEPLOYING return cur_deployments_to_delete def update_obj_ref(self, deploy_obj_ref: ObjectRef, deployment_time: int): - self.deploy_obj_ref = deploy_obj_ref - self.deployment_timestamp = deployment_time - self.status = ApplicationStatus.DEPLOYING + self._deploy_obj_ref = deploy_obj_ref + self._deployment_timestamp = deployment_time + self._status = ApplicationStatus.DEPLOYING def _process_terminating_deployments(self): """Update the tracking for all deployments being deleted @@ -123,13 +155,13 @@ def _process_terminating_deployments(self): When a deployment's status is None, the deployment will be removed from application. """ - for name in list(self.deployments_to_delete): - if self.deployment_state_manager.get_deployment(name): + for name in list(self._deployments_to_delete): + if self._deployment_state_manager.get_deployment(name): logger.warning( - f"Deleting deployment {name} from application {self.name}." + f"Deleting deployment {name} from application {self._name}." ) else: - self.deployments_to_delete.remove(name) + self._deployments_to_delete.remove(name) def update(self): """Update the application status, maintain the ApplicationStatus. @@ -141,87 +173,77 @@ def update(self): DELETING: Mark ready_to_be_deleted as True when all deployments are gone. """ - if self.ready_to_be_deleted: + if self._ready_to_be_deleted: return - if self.status == ApplicationStatus.DELETING: + if self._status == ApplicationStatus.DELETING: mark_delete = True # Application won't be deleted until all deployments get cleaned up - for name in self.get_all_deployments(): - if self.deployment_state_manager.get_deployment(name): + for name in self.deployments: + if self._deployment_state_manager.get_deployment(name): logger.debug( - f"Deleting deployment {name} from application {self.name}." + f"Deleting deployment {name} from application {self._name}." ) mark_delete = False break - if self.deployments_to_delete: + if self._deployments_to_delete: mark_delete = False - self.ready_to_be_deleted = mark_delete + self._ready_to_be_deleted = mark_delete self._process_terminating_deployments() return - if self.status == ApplicationStatus.DEPLOYING: - if self.deploy_obj_ref: - finished, pending = ray.wait([self.deploy_obj_ref], timeout=0) + if self._status == ApplicationStatus.DEPLOYING: + if self._deploy_obj_ref: + finished, pending = ray.wait([self._deploy_obj_ref], timeout=0) if pending: return try: ray.get(finished[0]) - logger.info(f"Deploy task for app '{self.name}' ran successfully.") + logger.info(f"Deploy task for app '{self._name}' ran successfully.") except RayTaskError as e: - self.status = ApplicationStatus.DEPLOY_FAILED + self._status = ApplicationStatus.DEPLOY_FAILED # NOTE(zcin): we should use str(e) instead of traceback.format_exc() # here because the full details of the error is not displayed # properly with traceback.format_exc(). RayTaskError has its own # custom __str__ function. - self.app_msg = f"Deploying app '{self.name}' failed:\n{str(e)}" - self.deploy_obj_ref = None - logger.warning(self.app_msg) + self._app_msg = f"Deploying app '{self._name}' failed:\n{str(e)}" + self._deploy_obj_ref = None + logger.warning(self._app_msg) return except RuntimeEnvSetupError: - self.status = ApplicationStatus.DEPLOY_FAILED - self.app_msg = ( - f"Runtime env setup for app '{self.name}' " + self._status = ApplicationStatus.DEPLOY_FAILED + self._app_msg = ( + f"Runtime env setup for app '{self._name}' " f"failed:\n{traceback.format_exc()}" ) - self.deploy_obj_ref = None - logger.warning(self.app_msg) + self._deploy_obj_ref = None + logger.warning(self._app_msg) return deployments_statuses = ( - self.deployment_state_manager.get_deployment_statuses( - self.get_all_deployments() - ) + self._deployment_state_manager.get_deployment_statuses(self.deployments) ) num_health_deployments = 0 for deployment_status in deployments_statuses: if deployment_status.status == DeploymentStatus.UNHEALTHY: - self.status = ApplicationStatus.DEPLOY_FAILED + self._status = ApplicationStatus.DEPLOY_FAILED return if deployment_status.status == DeploymentStatus.HEALTHY: num_health_deployments += 1 if num_health_deployments == len(deployments_statuses): - self.status = ApplicationStatus.RUNNING + self._status = ApplicationStatus.RUNNING self._process_terminating_deployments() - def get_all_deployments(self) -> List[str]: - """Return all deployments name from the application""" - if self.deployment_params is None: - return [] - return [params["name"] for params in self.deployment_params] - def get_deployments_statuses(self) -> List[DeploymentStatusInfo]: """Return all deployment status information""" - return self.deployment_state_manager.get_deployment_statuses( - self.get_all_deployments() - ) + return self._deployment_state_manager.get_deployment_statuses(self.deployments) def get_application_status_info(self) -> ApplicationStatusInfo: """Return the application status information""" return ApplicationStatusInfo( - self.status, - message=self.app_msg, - deployment_timestamp=self.deployment_timestamp, + self._status, + message=self._app_msg, + deployment_timestamp=self._deployment_timestamp, ) def list_deployment_details(self) -> Dict[str, DeploymentDetails]: @@ -235,15 +257,15 @@ def list_deployment_details(self) -> Dict[str, DeploymentDetails]: been deleted. """ details = { - name: self.deployment_state_manager.get_deployment_details(name) - for name in self.get_all_deployments() + name: self._deployment_state_manager.get_deployment_details(name) + for name in self.deployments } return {k: v for k, v in details.items() if v is not None} class ApplicationStateManager: def __init__(self, deployment_state_manager): - self.deployment_state_manager = deployment_state_manager + self._deployment_state_manager = deployment_state_manager self._application_states: Dict[str, ApplicationState] = {} def delete_application(self, name: str): @@ -287,7 +309,7 @@ def deploy_application(self, name: str, deployment_args: List[Dict]): if name not in self._application_states: self._application_states[name] = ApplicationState( name, - self.deployment_state_manager, + self._deployment_state_manager, ) record_extra_usage_tag( TagKey.SERVE_NUM_APPS, str(len(self._application_states)) @@ -298,7 +320,7 @@ def get_deployments(self, app_name: str) -> List[str]: """Return all deployment names by app name""" if app_name not in self._application_states: return [] - return self._application_states[app_name].get_all_deployments() + return self._application_states[app_name].deployments def get_deployments_statuses(self, app_name: str) -> List[DeploymentStatusInfo]: """Return all deployment statuses by app name""" @@ -315,6 +337,11 @@ def get_app_status(self, name: str) -> ApplicationStatusInfo: ) return self._application_states[name].get_application_status_info() + def get_deployment_timestamp(self, name: str) -> float: + if name not in self._application_states: + return -1 + return self._application_states[name].deployment_timestamp + def get_docs_path(self, app_name: str): return self._application_states[app_name].docs_path @@ -360,16 +387,11 @@ def create_application_state( else: self._application_states[name] = ApplicationState( name, - self.deployment_state_manager, + self._deployment_state_manager, deploy_obj_ref=deploy_obj_ref, deployment_time=deployment_time, ) - def get_deployment_timestamp(self, name: str) -> float: - if name not in self._application_states: - return -1 - return self._application_states[name].deployment_timestamp - def update(self): """Update each application state""" apps_to_be_deleted = [] diff --git a/python/ray/serve/_private/common.py b/python/ray/serve/_private/common.py index 0285e87e36cf..92b77468ac5f 100644 --- a/python/ray/serve/_private/common.py +++ b/python/ray/serve/_private/common.py @@ -6,7 +6,6 @@ import ray from ray.actor import ActorHandle from ray.serve.config import DeploymentConfig, ReplicaConfig -from ray.serve._private.autoscaling_policy import AutoscalingPolicy from ray.serve.generated.serve_pb2 import ( DeploymentInfo as DeploymentInfoProto, DeploymentStatusInfo as DeploymentStatusInfoProto, @@ -16,6 +15,7 @@ ApplicationStatusInfo as ApplicationStatusInfoProto, StatusOverview as StatusOverviewProto, ) +from ray.serve._private.autoscaling_policy import BasicAutoscalingPolicy EndpointTag = str ReplicaTag = str @@ -188,9 +188,9 @@ def __init__( actor_name: Optional[str] = None, version: Optional[str] = None, end_time_ms: Optional[int] = None, - autoscaling_policy: Optional[AutoscalingPolicy] = None, is_driver_deployment: Optional[bool] = False, app_name: Optional[str] = None, + route_prefix: str = None, ): self.deployment_config = deployment_config self.replica_config = replica_config @@ -201,7 +201,6 @@ def __init__( self.deployer_job_id = deployer_job_id # The time when this deployment was deleted. self.end_time_ms = end_time_ms - self.autoscaling_policy = autoscaling_policy # ephermal state self._cached_actor_def = None @@ -209,6 +208,17 @@ def __init__( self.is_driver_deployment = is_driver_deployment self.app_name = app_name + self.route_prefix = route_prefix + if deployment_config.autoscaling_config is not None: + self.autoscaling_policy = BasicAutoscalingPolicy( + deployment_config.autoscaling_config + ) + else: + self.autoscaling_policy = None + # Num replicas decided by the autoscaling policy. This is mutually exclusive + # from deployment_config.num_replicas. This value is updated through + # set_autoscaled_num_replicas() + self.autoscaled_num_replicas = None def __getstate__(self) -> Dict[Any, Any]: clean_dict = self.__dict__.copy() @@ -219,6 +229,9 @@ def __setstate__(self, d: Dict[Any, Any]) -> None: self.__dict__ = d self._cached_actor_def = None + def set_autoscaled_num_replicas(self, autoscaled_num_replicas): + self.autoscaled_num_replicas = autoscaled_num_replicas + @property def actor_def(self): # Delayed import as replica depends on this file. diff --git a/python/ray/serve/_private/deploy_utils.py b/python/ray/serve/_private/deploy_utils.py index d2c1ca350823..b36b6395a3fb 100644 --- a/python/ray/serve/_private/deploy_utils.py +++ b/python/ray/serve/_private/deploy_utils.py @@ -7,7 +7,6 @@ from ray.serve.config import ReplicaConfig, DeploymentConfig from ray.serve.schema import ServeApplicationSchema from ray.serve._private.constants import SERVE_LOGGER_NAME -from ray.serve._private.autoscaling_policy import BasicAutoscalingPolicy from ray.serve._private.common import DeploymentInfo import ray @@ -95,7 +94,7 @@ def deploy_args_to_deployment_info( deployment_config_proto_bytes: bytes, replica_config_proto_bytes: bytes, deployer_job_id: Union[str, bytes], - previous_deployment: DeploymentInfo, + route_prefix: Optional[str], is_driver_deployment: Optional[bool] = False, app_name: Optional[str] = None, ) -> DeploymentInfo: @@ -109,22 +108,6 @@ def deploy_args_to_deployment_info( replica_config_proto_bytes, deployment_config.needs_pickle() ) - autoscaling_config = deployment_config.autoscaling_config - if autoscaling_config is not None: - if autoscaling_config.initial_replicas is not None: - deployment_config.num_replicas = autoscaling_config.initial_replicas - else: - if previous_deployment is None: - deployment_config.num_replicas = autoscaling_config.min_replicas - else: - deployment_config.num_replicas = ( - previous_deployment.deployment_config.num_replicas - ) - - autoscaling_policy = BasicAutoscalingPolicy(autoscaling_config) - else: - autoscaling_policy = None - # Java API passes in JobID as bytes if isinstance(deployer_job_id, bytes): deployer_job_id = ray.JobID.from_int( @@ -138,9 +121,9 @@ def deploy_args_to_deployment_info( replica_config=replica_config, deployer_job_id=deployer_job_id, start_time_ms=int(time.time() * 1000), - autoscaling_policy=autoscaling_policy, is_driver_deployment=is_driver_deployment, app_name=app_name, + route_prefix=route_prefix, ) diff --git a/python/ray/serve/_private/deployment_state.py b/python/ray/serve/_private/deployment_state.py index 697e4c5f8dc4..1c18c6891b9e 100644 --- a/python/ray/serve/_private/deployment_state.py +++ b/python/ray/serve/_private/deployment_state.py @@ -20,6 +20,7 @@ ) from ray.actor import ActorHandle from ray.exceptions import RayActorError, RayError + from ray.serve._private.autoscaling_metrics import InMemoryMetricsStore from ray.serve._private.common import ( DeploymentInfo, @@ -97,7 +98,12 @@ def from_deployment_info( num_replicas = 0 version = None else: - num_replicas = info.deployment_config.num_replicas + # If autoscaling config is not none, num replicas should be decided based on + # the autoscaling policy and passed in as autoscaled_num_replicas + if info.autoscaled_num_replicas is not None: + num_replicas = info.autoscaled_num_replicas + else: + num_replicas = info.deployment_config.num_replicas version = DeploymentVersion( info.version, deployment_config=info.deployment_config, @@ -1224,6 +1230,18 @@ def deploy(self, deployment_info: DeploymentInfo) -> bool: ): return False + # If autoscaling config is not none, decide initial num replicas + autoscaling_config = deployment_info.deployment_config.autoscaling_config + if autoscaling_config is not None: + if autoscaling_config.initial_replicas is not None: + autoscaled_num_replicas = autoscaling_config.initial_replicas + else: + if existing_info is not None: + autoscaled_num_replicas = self._target_state.num_replicas + else: + autoscaled_num_replicas = autoscaling_config.min_replicas + deployment_info.set_autoscaled_num_replicas(autoscaled_num_replicas) + self._set_target_state(deployment_info) return True @@ -1248,15 +1266,15 @@ def autoscale( curr_info = self._target_state.info autoscaling_policy = self._target_state.info.autoscaling_policy decision_num_replicas = autoscaling_policy.get_decision_num_replicas( - curr_target_num_replicas=curr_info.deployment_config.num_replicas, + curr_target_num_replicas=self._target_state.num_replicas, current_num_ongoing_requests=current_num_ongoing_requests, current_handle_queued_queries=current_handle_queued_queries, ) - if decision_num_replicas == curr_info.deployment_config.num_replicas: + if decision_num_replicas == self._target_state.num_replicas: return new_config = copy(curr_info) - new_config.deployment_config.num_replicas = decision_num_replicas + new_config.set_autoscaled_num_replicas(decision_num_replicas) if new_config.version is None: new_config.version = self._target_state.version.code_version @@ -2204,6 +2222,15 @@ def deploy(self, deployment_name: str, deployment_info: DeploymentInfo) -> bool: return self._deployment_states[deployment_name].deploy(deployment_info) + def get_deployments_in_application(self, app_name: str) -> List[str]: + """Return list of deployment names in application.""" + states = [] + for name, deployment_state in self._deployment_states.items(): + if deployment_state.target_info.app_name == app_name: + states.append(name) + + return states + def delete_deployment(self, deployment_name: str): # This method must be idempotent. We should validate that the # specified deployment exists on the client. diff --git a/python/ray/serve/controller.py b/python/ray/serve/controller.py index ef45bb5647ed..f6ef48102085 100644 --- a/python/ray/serve/controller.py +++ b/python/ray/serve/controller.py @@ -353,13 +353,11 @@ def _recover_config_from_checkpoint(self): self.deploy_apps( ServeApplicationSchema.parse_obj(applications[0]), deployment_time, - False, ) else: self.deploy_apps( ServeDeploySchema.parse_obj({"applications": applications}), deployment_time, - False, ) def _all_running_replicas(self) -> Dict[str, List[RunningReplicaInfo]]: @@ -426,7 +424,7 @@ def deploy( deployment_config_proto_bytes=deployment_config_proto_bytes, replica_config_proto_bytes=replica_config_proto_bytes, deployer_job_id=deployer_job_id, - previous_deployment=self.deployment_state_manager.get_deployment(name), + route_prefix=route_prefix, is_driver_deployment=is_driver_deployment, app_name=app_name, ) @@ -473,7 +471,6 @@ def deploy_apps( self, config: Union[ServeApplicationSchema, ServeDeploySchema], deployment_time: float = 0, - _internal: bool = False, ) -> None: """Kicks off a task that deploys a set of Serve applications. @@ -494,11 +491,6 @@ def deploy_apps( deployment_time: set deployment_timestamp. If not provided, time.time() is used to indicate the deployment time. - - _internal: whether the config is provided by user or internally (i.e. it is - restored from a checkpoint). If it is provided by the user, we need to - prepend the app name to each deployment name. If not, it should already - be prepended. """ # TODO (zcin): We should still support single-app mode, i.e. # ServeApplicationSchema. Eventually, after migration is complete, we should diff --git a/python/ray/serve/tests/test_application_state.py b/python/ray/serve/tests/test_application_state.py index fb6108642f9e..28aeea2499b2 100644 --- a/python/ray/serve/tests/test_application_state.py +++ b/python/ray/serve/tests/test_application_state.py @@ -1,100 +1,109 @@ import sys import pytest -from typing import List +from typing import List, Tuple, Dict import time import ray from ray._private.test_utils import SignalActor from ray.serve._private.application_state import ApplicationStateManager -from ray.serve._private.common import ApplicationStatus +from ray.serve._private.common import ApplicationStatus, DeploymentInfo from ray.serve._private.common import DeploymentStatus, DeploymentStatusInfo +from ray.serve.config import DeploymentConfig, ReplicaConfig from ray.serve.exceptions import RayServeException class MockDeploymentStateManager: def __init__(self): - self.deployment_statuses = [ - DeploymentStatusInfo("d1", DeploymentStatus.UPDATING), - DeploymentStatusInfo("d2", DeploymentStatus.UPDATING), - ] + self.deployment_statuses: Dict[str, DeploymentStatusInfo] = dict() - def add_deployment_status(self, status: DeploymentStatusInfo): - assert type(status) == DeploymentStatusInfo - self.deployment_statuses.append(status) + def deploy(self, deployment_name: str, deployment_info: DeploymentInfo): + self.deployment_statuses[deployment_name] = DeploymentStatusInfo( + name=deployment_name, + status=DeploymentStatus.UPDATING, + message="", + ) + + @property + def deployments(self) -> List[str]: + return list(self.deployment_statuses.keys()) - def set_deployment_statuses_unhealthy(self, index: int = 0): - self.deployment_statuses[index].status = DeploymentStatus.UNHEALTHY + def set_deployment_statuses_unhealthy(self, name: str): + self.deployment_statuses[name].status = DeploymentStatus.UNHEALTHY - def set_deployment_statuses_healthy(self, index: int = 0): - self.deployment_statuses[index].status = DeploymentStatus.HEALTHY + def set_deployment_statuses_healthy(self, name: str): + self.deployment_statuses[name].status = DeploymentStatus.HEALTHY def get_deployment_statuses(self, deployment_names: List[str]): - return [ - status - for status in self.deployment_statuses - if status.name in deployment_names - ] + return list(self.deployment_statuses.values()) + + def get_deployment(self, deployment_name: str) -> DeploymentInfo: + if deployment_name in self.deployment_statuses: + # Return dummy deployment info object + return DeploymentInfo( + deployment_config=DeploymentConfig(num_replicas=1, user_config={}), + replica_config=ReplicaConfig.create(lambda x: x), + start_time_ms=0, + deployer_job_id="", + ) - def get_all_deployments(self): - return [d.name for d in self.deployment_statuses] + def delete_deployment(self, deployment_name: str): + del self.deployment_statuses[deployment_name] - def add_deployment(self, status: DeploymentStatusInfo): - self.deployment_statuses.append(status) - def get_deployment(self, deployment_name: str) -> DeploymentStatusInfo: - for deployment in self.deployment_statuses: - if deployment.name == deployment_name: - return deployment +@pytest.fixture +def mocked_application_state_manager() -> Tuple[ + ApplicationStateManager, MockDeploymentStateManager +]: + deployment_state_manager = MockDeploymentStateManager() + application_state_manager = ApplicationStateManager(deployment_state_manager) + yield application_state_manager, deployment_state_manager - def delete_deployment(self, deployment_name: str): - statuses = [] - for deployment in self.deployment_statuses: - if deployment.name != deployment_name: - statuses.append(deployment) - self.deployment_statuses = statuses - -def test_deploy_app(): +def test_deploy_app(mocked_application_state_manager): """Test DEPLOYING status""" - app_state_manager = ApplicationStateManager(MockDeploymentStateManager()) - app_state_manager.deploy_application("test_app", {}) + app_state_manager, _ = mocked_application_state_manager + app_state_manager.deploy_application("test_app", [{"name": "d1"}]) app_status = app_state_manager.get_app_status("test_app") assert app_status.status == ApplicationStatus.DEPLOYING assert app_status.deployment_timestamp > 0 -def test_delete_app(): +def test_delete_app(mocked_application_state_manager): """Test DELETING status""" - app_state_manager = ApplicationStateManager(MockDeploymentStateManager()) - app_state_manager.deploy_application("test_app", {}) + app_state_manager, _ = mocked_application_state_manager + app_state_manager.deploy_application("test_app", [{"name": "d1"}]) app_state_manager.delete_application("test_app") app_status = app_state_manager.get_app_status("test_app") assert app_status.status == ApplicationStatus.DELETING -def test_create_app(): +def test_create_app(mocked_application_state_manager): """Test object ref based deploy and set DEPLOYING""" - app_state_manager = ApplicationStateManager(MockDeploymentStateManager()) + app_state_manager, _ = mocked_application_state_manager app_state_manager.create_application_state("test_app", ray.ObjectRef.nil()) app_status = app_state_manager.get_app_status("test_app") assert app_status.status == ApplicationStatus.DEPLOYING -def test_update_app_running(): +def test_update_app_running(mocked_application_state_manager): """Test DEPLOYING -> RUNNING""" - app_state_manager = ApplicationStateManager(MockDeploymentStateManager()) + app_state_manager, deployment_state_manager = mocked_application_state_manager app_state_manager.deploy_application( "test_app", [{"name": "d1"}, {"name": "d2"}], ) + # Simulate controller + deployment_state_manager.deploy("d1", None) + deployment_state_manager.deploy("d2", None) + app_status = app_state_manager.get_app_status("test_app") assert app_status.status == ApplicationStatus.DEPLOYING - app_state_manager.deployment_state_manager.set_deployment_statuses_healthy(0) + deployment_state_manager.set_deployment_statuses_healthy("d1") app_state_manager.update() app_status = app_state_manager.get_app_status("test_app") assert app_status.status == ApplicationStatus.DEPLOYING - app_state_manager.deployment_state_manager.set_deployment_statuses_healthy(1) + deployment_state_manager.set_deployment_statuses_healthy("d2") app_state_manager.update() app_status = app_state_manager.get_app_status("test_app") assert app_status.status == ApplicationStatus.RUNNING @@ -105,13 +114,16 @@ def test_update_app_running(): assert app_status.status == ApplicationStatus.RUNNING -def test_update_app_deploy_failed(): +def test_update_app_deploy_failed(mocked_application_state_manager): """Test DEPLOYING -> DEPLOY_FAILED""" - app_state_manager = ApplicationStateManager(MockDeploymentStateManager()) + app_state_manager, deployment_state_manager = mocked_application_state_manager app_state_manager.deploy_application("test_app", [{"name": "d1"}]) + # Simulate controller + deployment_state_manager.deploy("d1", None) + app_status = app_state_manager.get_app_status("test_app") assert app_status.status == ApplicationStatus.DEPLOYING - app_state_manager.deployment_state_manager.set_deployment_statuses_unhealthy(0) + deployment_state_manager.set_deployment_statuses_unhealthy("d1") app_state_manager.update() app_status = app_state_manager.get_app_status("test_app") assert app_status.status == ApplicationStatus.DEPLOY_FAILED @@ -122,7 +134,7 @@ def test_update_app_deploy_failed(): @pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.") @pytest.mark.parametrize("fail_deploy", [False, True]) -def test_config_deploy_app(fail_deploy): +def test_config_deploy_app(mocked_application_state_manager, fail_deploy): """Test config based deploy DEPLOYING -> RUNNING DEPLOYING -> DEPLOY_FAILED @@ -136,7 +148,7 @@ def task(): raise Exception("fail!") object_ref = task.remote() - app_state_manager = ApplicationStateManager(MockDeploymentStateManager()) + app_state_manager, deployment_state_manager = mocked_application_state_manager app_state_manager.create_application_state("test_app", object_ref) app_status = app_state_manager.get_app_status("test_app") assert app_status.status == ApplicationStatus.DEPLOYING @@ -152,18 +164,25 @@ def task(): app_status = app_state_manager.get_app_status("test_app") assert app_status.status == ApplicationStatus.DEPLOY_FAILED else: - app_state_manager.deployment_state_manager.set_deployment_statuses_healthy(0) - app_state_manager.deployment_state_manager.set_deployment_statuses_healthy(1) + # Simulate task calling deploy_application on controller + app_state_manager.deploy_application("test_app", [{"name": "d1"}]) + deployment_state_manager.deploy("d1", None) + + deployment_state_manager.set_deployment_statuses_healthy("d1") app_state_manager.update() app_status = app_state_manager.get_app_status("test_app") assert app_status.status == ApplicationStatus.RUNNING -def test_redeploy_same_app(): +def test_redeploy_same_app(mocked_application_state_manager): """Test deploying the same app with different deploy_params.""" - app_state_manager = ApplicationStateManager(MockDeploymentStateManager()) + app_state_manager, deployment_state_manager = mocked_application_state_manager app_state_manager.deploy_application("test_app", [{"name": "d1"}, {"name": "d2"}]) + # Simulate controller + deployment_state_manager.deploy("d1", None) + deployment_state_manager.deploy("d2", None) + app_status = app_state_manager.get_app_status("test_app") assert app_status.status == ApplicationStatus.DEPLOYING @@ -173,25 +192,24 @@ def test_redeploy_same_app(): ) assert unused_deployments == ["d1"] - app_state_manager.deployment_state_manager.add_deployment_status( - DeploymentStatusInfo("d3", DeploymentStatus.UPDATING) - ) - assert app_state_manager._application_states["test_app"].deployments_to_delete == { + deployment_state_manager.deploy("d3", None) + assert app_state_manager._application_states["test_app"]._deployments_to_delete == { "d1" } # After updating, the deployment should be deleted successfully, and # deployments_to_delete should be empty - app_state_manager.deployment_state_manager.delete_deployment("d1") + deployment_state_manager.delete_deployment("d1") app_state_manager.update() assert ( - app_state_manager._application_states["test_app"].deployments_to_delete == set() + app_state_manager._application_states["test_app"]._deployments_to_delete + == set() ) -def test_deploy_with_route_prefix_conflict(): +def test_deploy_with_route_prefix_conflict(mocked_application_state_manager): """Test that an application fails to deploy with a route prefix conflict.""" - app_state_manager = ApplicationStateManager(MockDeploymentStateManager()) + app_state_manager, _ = mocked_application_state_manager app_state_manager.deploy_application( "test_app", [{"name": "d1", "route_prefix": "/url1"}] @@ -202,21 +220,24 @@ def test_deploy_with_route_prefix_conflict(): ) -def test_deploy_with_renamed_app(): +def test_deploy_with_renamed_app(mocked_application_state_manager): """ Test that an application deploys successfully when there is a route prefix conflict with an old app running on the cluster. """ - app_state_manager = ApplicationStateManager(MockDeploymentStateManager()) + app_state_manager, deployment_state_manager = mocked_application_state_manager # deploy app1 app_state_manager.deploy_application( "app1", [{"name": "d1", "route_prefix": "/url1"}] ) + # Simulate controller + deployment_state_manager.deploy("d1", None) + app_status = app_state_manager.get_app_status("app1") assert app_status.status == ApplicationStatus.DEPLOYING - app_state_manager.deployment_state_manager.set_deployment_statuses_healthy(0) + deployment_state_manager.set_deployment_statuses_healthy("d1") app_state_manager.update() app_status = app_state_manager.get_app_status("app1") assert app_status.status == ApplicationStatus.RUNNING @@ -230,17 +251,20 @@ def test_deploy_with_renamed_app(): app_state_manager.deploy_application( "app2", [{"name": "d2", "route_prefix": "/url1"}] ) + # Simulate controller + deployment_state_manager.deploy("d2", None) + app_status = app_state_manager.get_app_status("app2") assert app_status.status == ApplicationStatus.DEPLOYING # app2 deploys before app1 finishes deleting - app_state_manager.deployment_state_manager.set_deployment_statuses_healthy(1) + deployment_state_manager.set_deployment_statuses_healthy("d2") app_state_manager.update() app_status = app_state_manager.get_app_status("app2") assert app_status.status == ApplicationStatus.RUNNING # app1 finally finishes deleting - app_state_manager.deployment_state_manager.delete_deployment("d1") + deployment_state_manager.delete_deployment("d1") app_state_manager.update() app_status = app_state_manager.get_app_status("app1") assert app_status.status == ApplicationStatus.NOT_STARTED From 57b566c904845a01f08f01e44fa803f5f0c7d8f2 Mon Sep 17 00:00:00 2001 From: matthewdeng Date: Mon, 1 May 2023 15:53:03 -0700 Subject: [PATCH 177/424] [core] update ActorPool map/map_unordered to submit functions eagerly (#34813) [core] update ActorPool map/map_unordered to submit functions eagerly Signed-off-by: Matthew Deng --- python/ray/tests/test_actor_pool.py | 21 +++++++++++++++++++++ python/ray/util/actor_pool.py | 16 ++++++++++++---- 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/python/ray/tests/test_actor_pool.py b/python/ray/tests/test_actor_pool.py index 63fdc7b31071..a4c04276b553 100644 --- a/python/ray/tests/test_actor_pool.py +++ b/python/ray/tests/test_actor_pool.py @@ -1,6 +1,7 @@ import asyncio import sys import time +from unittest.mock import MagicMock import pytest import ray @@ -79,6 +80,26 @@ def double(self, x): index += 1 +def test_map_eager(init): + """Verify that submit is called eagerly when map is called. + + If the results are directly yielded, then the submit calls are not + executed until the results are consumed. + """ + + @ray.remote + class MyActor: + def f(self, x): + pass + + actor = MyActor.remote() + pool = ActorPool([actor]) + pool.submit = MagicMock() + + pool.map(lambda a, v: a.f.remote(v), range(1)) + pool.submit.assert_called() + + def test_map_unordered(init): @ray.remote class MyActor: diff --git a/python/ray/util/actor_pool.py b/python/ray/util/actor_pool.py index abfb66a8a1cd..bd8b88f3adde 100644 --- a/python/ray/util/actor_pool.py +++ b/python/ray/util/actor_pool.py @@ -79,8 +79,12 @@ def map(self, fn: Callable[[Any], Any], values: List[Any]): for v in values: self.submit(fn, v) - while self.has_next(): - yield self.get_next() + + def get_generator(): + while self.has_next(): + yield self.get_next() + + return get_generator() def map_unordered(self, fn: Callable[[Any], Any], values: List[Any]): """Similar to map(), but returning an unordered iterator. @@ -116,8 +120,12 @@ def map_unordered(self, fn: Callable[[Any], Any], values: List[Any]): for v in values: self.submit(fn, v) - while self.has_next(): - yield self.get_next_unordered() + + def get_generator(): + while self.has_next(): + yield self.get_next_unordered() + + return get_generator() def submit(self, fn, value): """Schedule a single task to run in the pool. From 13ee2d66a202477c3c4f9142464f115e9738467a Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Mon, 1 May 2023 16:00:17 -0700 Subject: [PATCH 178/424] [data] [docs] Improve documentation for strict mode (#34876) --- doc/source/data/faq.rst | 51 +++++ python/ray/data/BUILD | 8 + .../_internal/execution/streaming_executor.py | 2 +- .../_internal/planner/plan_from_numpy_op.py | 5 +- python/ray/data/_internal/util.py | 6 +- python/ray/data/block.py | 4 +- .../data/datasource/file_based_datasource.py | 3 +- python/ray/data/read_api.py | 5 +- python/ray/data/tests/conftest.py | 8 + python/ray/data/tests/test_nonstrict_mode.py | 200 ++++++++++++++++++ 10 files changed, 279 insertions(+), 13 deletions(-) create mode 100644 python/ray/data/tests/test_nonstrict_mode.py diff --git a/doc/source/data/faq.rst b/doc/source/data/faq.rst index f68f1ff5796d..3d33d74817c7 100644 --- a/doc/source/data/faq.rst +++ b/doc/source/data/faq.rst @@ -288,6 +288,57 @@ Ray Data doesn't perform query optimization, so some manual performance tuning may be necessary depending on your use case and data scale. Please see our :ref:`performance tuning guide ` for more information. +What is strict mode? +==================== + +In Ray 2.5, Ray Data by default always requires data schemas, dropping support for +standalone Python objects. In addition to unification and simplicity benefits, this +aligns the Ray Data API closer to industry-standard distributed data APIs like Apache +Spark and also emerging standards for machine learning datasets like HuggingFace. + +Migrating to strict mode +~~~~~~~~~~~~~~~~~~~~~~~~ + +You can disable strict mode temporarily by setting the environment variable +``RAY_DATA_STRICT_MODE=0`` on all cluster processes. Strict mode will not be +possible to disable in future releases. + +Migrating existing code is straightforward. There are two common changes you may need +to make to your code to be compatible: + +1. Pass the ``batch_format="pandas"`` argument to ``map_batches`` or ``iter_batches``, + if your code assumes pandas is the default batch format. +2. Instead of returning a standalone objects or numpy arrays from ``map`` or ``map_batches``, + return a dictionary that names the field. E.g., change function code from ``return object()`` to + ``return {"my_obj": object()}``, and ``return [1, 2, 3]`` to ``return {"my_values": [1, 2, 3]}``. + +List of strict mode changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In more detail, support for standalone Python objects is dropped. This means that +instead of directly storing, e.g., Python ``Tuple[str, int]`` instance in Ray Data, +you must either give each field a name (i.e., ``{foo: str, bar: int}``), or +use a named object-type field (i.e., ``{foo: object}``). In addition, the ``default`` +batch format is replaced with ``numpy`` by default. This means that most users +just need to be aware of ``Dict[str, Any]`` (non-batched data records) and +``Dict[str, np.ndarray]`` (batched data) types when working with Ray Data. + +**Full list of changes**: + +* All read apis return structured data, never standalone Python objects. +* Standalone Python objects are prohibited from being returned from map / map batches. +* Standalone Numpy arrays are prohibited from being returned from map / map batches. +* There is no more special interpretation of single-column schema containing just ``__value__`` as a column. +* The default batch format is ``numpy`` instead of ``default`` (pandas). +* ``schema()`` returns a unified Schema class instead of ``Union[pyarrow.lib.Schema, type]``. + +**Datasource behavior changes**: + +* ``range_tensor``: create ``data`` column instead of ``__value__``. +* ``from_numpy`` / ``from_numpy_refs`` : create ``data`` column instead of using ``__value__``. +* ``from_items``: create ``item`` column instead of using Python objects. +* ``range``: create ``id`` column instead of using Python objects. + How can I contribute to Ray Data? ===================================== diff --git a/python/ray/data/BUILD b/python/ray/data/BUILD index 9e2e0fa4f9dd..5be74db39003 100644 --- a/python/ray/data/BUILD +++ b/python/ray/data/BUILD @@ -42,6 +42,14 @@ py_test( deps = ["//:ray_lib", ":conftest"], ) +py_test( + name = "test_nonstrict_mode", + size = "small", + srcs = ["tests/test_nonstrict_mode.py"], + tags = ["team:data", "exclusive"], + deps = ["//:ray_lib", ":conftest"], +) + py_test( name = "test_sql", size = "small", diff --git a/python/ray/data/_internal/execution/streaming_executor.py b/python/ray/data/_internal/execution/streaming_executor.py index 2221983a6a95..c6485f143959 100644 --- a/python/ray/data/_internal/execution/streaming_executor.py +++ b/python/ray/data/_internal/execution/streaming_executor.py @@ -144,9 +144,9 @@ def shutdown(self): global _num_shutdown with self._shutdown_lock: - logger.get_logger().info(f"Shutting down {self}.") if self._shutdown: return + logger.get_logger().info(f"Shutting down {self}.") _num_shutdown += 1 self._shutdown = True # Give the scheduling loop some time to finish processing. diff --git a/python/ray/data/_internal/planner/plan_from_numpy_op.py b/python/ray/data/_internal/planner/plan_from_numpy_op.py index 3c792de3c753..969b4c26478b 100644 --- a/python/ray/data/_internal/planner/plan_from_numpy_op.py +++ b/python/ray/data/_internal/planner/plan_from_numpy_op.py @@ -23,10 +23,7 @@ def get_input_data() -> List[RefBundle]: ndarray_to_block_remote = cached_remote_fn(ndarray_to_block, num_returns=2) ctx = ray.data.DataContext.get_current() - res = [ - ndarray_to_block_remote.remote(arr_ref, ctx.strict_mode) - for arr_ref in op._ndarrays - ] + res = [ndarray_to_block_remote.remote(arr_ref, ctx) for arr_ref in op._ndarrays] blocks, metadata = map(list, zip(*res)) metadata = ray.get(metadata) ref_bundles: List[RefBundle] = [ diff --git a/python/ray/data/_internal/util.py b/python/ray/data/_internal/util.py index 1d8c17576411..85ec96db6a2d 100644 --- a/python/ray/data/_internal/util.py +++ b/python/ray/data/_internal/util.py @@ -450,11 +450,13 @@ def pandas_df_to_arrow_block(df: "pandas.DataFrame") -> "Block": ) -def ndarray_to_block(ndarray: np.ndarray, strict_mode: bool) -> "Block": +def ndarray_to_block(ndarray: np.ndarray, ctx: DataContext) -> "Block": from ray.data.block import BlockAccessor, BlockExecStats + DataContext._set_current(ctx) + stats = BlockExecStats.builder() - if strict_mode: + if ctx.strict_mode: block = BlockAccessor.batch_to_block({"data": ndarray}) else: block = BlockAccessor.batch_to_block(ndarray) diff --git a/python/ray/data/block.py b/python/ray/data/block.py index de9188e314ee..db43326b462a 100644 --- a/python/ray/data/block.py +++ b/python/ray/data/block.py @@ -58,7 +58,9 @@ "objects are no longer supported, and the default batch format changes to `numpy` " "from `pandas`. To disable strict mode temporarily, set the environment variable " "RAY_DATA_STRICT_MODE=0 on all cluster processes. Strict mode will not be " - "possible to disable in future releases." + colorama.Style.RESET_ALL + "possible to disable in future releases.\n\n" + "Learn more here: https://docs.ray.io/en/master/data/faq.html#what-is-strict-mode" + + colorama.Style.RESET_ALL ) diff --git a/python/ray/data/datasource/file_based_datasource.py b/python/ray/data/datasource/file_based_datasource.py index 09ba71ba135c..3d436bce8cbd 100644 --- a/python/ray/data/datasource/file_based_datasource.py +++ b/python/ray/data/datasource/file_based_datasource.py @@ -426,6 +426,7 @@ def estimate_inmemory_data_size(self) -> Optional[int]: def get_read_tasks(self, parallelism: int) -> List[ReadTask]: import numpy as np + ctx = DataContext.get_current() open_stream_args = self._open_stream_args reader_args = self._reader_args partitioning = self._partitioning @@ -446,9 +447,9 @@ def read_files( read_paths: List[str], fs: Union["pyarrow.fs.FileSystem", _S3FileSystemWrapper], ) -> Iterable[Block]: + DataContext._set_current(ctx) logger.debug(f"Reading {len(read_paths)} files.") fs = _unwrap_s3_serialization_workaround(filesystem) - ctx = DataContext.get_current() output_buffer = BlockOutputBuffer( block_udf=_block_udf, target_max_block_size=ctx.target_max_block_size ) diff --git a/python/ray/data/read_api.py b/python/ray/data/read_api.py index 73a393acbc35..af3d096e52c7 100644 --- a/python/ray/data/read_api.py +++ b/python/ray/data/read_api.py @@ -1674,10 +1674,7 @@ def from_numpy_refs( ctx = DataContext.get_current() ndarray_to_block_remote = cached_remote_fn(ndarray_to_block, num_returns=2) - res = [ - ndarray_to_block_remote.remote(ndarray, strict_mode=ctx.strict_mode) - for ndarray in ndarrays - ] + res = [ndarray_to_block_remote.remote(ndarray, ctx) for ndarray in ndarrays] blocks, metadata = map(list, zip(*res)) metadata = ray.get(metadata) diff --git a/python/ray/data/tests/conftest.py b/python/ray/data/tests/conftest.py index 9ddc28d0d731..2e132c0e4ac6 100644 --- a/python/ray/data/tests/conftest.py +++ b/python/ray/data/tests/conftest.py @@ -44,6 +44,14 @@ def enable_strict_mode(): ctx.strict_mode = False +@pytest.fixture(scope="module") +def enable_nonstrict_mode(): + ctx = ray.data.DataContext.get_current() + ctx.strict_mode = False + yield + ctx.strict_mode = True + + @pytest.fixture(scope="function") def aws_credentials(): import os diff --git a/python/ray/data/tests/test_nonstrict_mode.py b/python/ray/data/tests/test_nonstrict_mode.py new file mode 100644 index 000000000000..40742940badf --- /dev/null +++ b/python/ray/data/tests/test_nonstrict_mode.py @@ -0,0 +1,200 @@ +import numpy as np +import pandas as pd +from collections import UserDict +import pytest + +import ray +from ray.data.tests.conftest import * # noqa +from ray.tests.conftest import * # noqa + + +def test_nonstrict_read_schemas(ray_start_10_cpus_shared, enable_nonstrict_mode): + ds = ray.data.range(1) + assert ds.take()[0] == 0 + + ds = ray.data.range_table(1) + assert ds.take()[0] == {"value": 0} + + ds = ray.data.range_tensor(1) + assert ds.take()[0] == np.array([0]) + + ds = ray.data.from_items([1]) + assert ds.take()[0] == 1 + + ds = ray.data.from_items([object()]) + assert isinstance(ds.take()[0], object) + + ds = ray.data.read_numpy("example://mnist_subset.npy") + assert isinstance(ds.take()[0], np.ndarray) + + ds = ray.data.from_numpy(np.ones((100, 10))) + assert isinstance(ds.take()[0], np.ndarray) + + ds = ray.data.from_numpy_refs(ray.put(np.ones((100, 10)))) + assert isinstance(ds.take()[0], np.ndarray) + + ds = ray.data.read_binary_files("example://image-datasets/simple") + assert isinstance(ds.take()[0], bytes) + + ds = ray.data.read_images("example://image-datasets/simple") + assert "image" in ds.take()[0] + + ds = ray.data.read_text("example://sms_spam_collection_subset.txt") + assert "text" in ds.take()[0] + + +def test_nonstrict_map_output(ray_start_10_cpus_shared, enable_nonstrict_mode): + ds = ray.data.range(1) + + ds.map(lambda x: 0, max_retries=0).materialize() + ds.map(lambda x: {"id": 0}).materialize() + ds.map(lambda x: UserDict({"id": 0})).materialize() + + ds.map_batches(lambda x: np.array([0]), max_retries=0).materialize() + ds.map_batches(lambda x: {"id": np.array([0])}).materialize() + ds.map_batches(lambda x: UserDict({"id": np.array([0])})).materialize() + + ds.map(lambda x: np.ones(10), max_retries=0).materialize() + ds.map(lambda x: {"x": np.ones(10)}).materialize() + ds.map(lambda x: UserDict({"x": np.ones(10)})).materialize() + + ds.map_batches(lambda x: np.ones(10), max_retries=0).materialize() + ds.map_batches(lambda x: {"x": np.ones(10)}).materialize() + ds.map_batches(lambda x: UserDict({"x": np.ones(10)})).materialize() + + # Not allowed in normal mode either. + with pytest.raises(ValueError): + ds.map_batches(lambda x: object(), max_retries=0).materialize() + with pytest.raises(ValueError): + ds.map_batches(lambda x: {"x": object()}, max_retries=0).materialize() + ds.map_batches(lambda x: {"x": np.array([object()])}).materialize() + ds.map_batches(lambda x: UserDict({"x": np.array([object()])})).materialize() + + ds.map(lambda x: object(), max_retries=0).materialize() + ds.map(lambda x: {"x": object()}).materialize() + ds.map(lambda x: UserDict({"x": object()})).materialize() + + +def test_nonstrict_convert_map_output(ray_start_10_cpus_shared, enable_nonstrict_mode): + ds = ray.data.range(1).map_batches(lambda x: {"id": [0, 1, 2, 3]}).materialize() + assert ds.take_batch()["id"].tolist() == [0, 1, 2, 3] + + with pytest.raises(ValueError): + # Strings not converted into array. + ray.data.range(1).map_batches( + lambda x: {"id": "string"}, max_retries=0 + ).materialize() + + class UserObj: + def __eq__(self, other): + return isinstance(other, UserObj) + + ds = ( + ray.data.range(1) + .map_batches(lambda x: {"id": [0, 1, 2, UserObj()]}) + .materialize() + ) + assert ds.take_batch()["id"].tolist() == [0, 1, 2, UserObj()] + + +def test_nonstrict_default_batch_format( + ray_start_10_cpus_shared, enable_nonstrict_mode +): + ds = ray.data.range_table(1) + + @ray.remote + class Queue: + def __init__(self): + self.item = None + + def put(self, item): + old = self.item + self.item = item + return old + + q = Queue.remote() + + assert isinstance(next(ds.iter_batches()), pd.DataFrame) + assert isinstance(ds.take_batch(), pd.DataFrame) + + def f(x): + ray.get(q.put.remote(x)) + return x + + ds.map_batches(f).materialize() + batch = ray.get(q.put.remote(None)) + assert isinstance(batch, pd.DataFrame), batch + + +def test_nonstrict_tensor_support(ray_start_10_cpus_shared, enable_nonstrict_mode): + ds = ray.data.from_items([np.ones(10), np.ones(10)]) + assert np.array_equal(ds.take()[0], np.ones(10)) + + ds = ds.map(lambda x: x * 2) + assert np.array_equal(ds.take()[0], 2 * np.ones(10)) + + ds = ds.map_batches(lambda x: x * 2) + assert np.array_equal(ds.take()[0], 4 * np.ones(10)) + + +def test_nonstrict_value_repr(ray_start_10_cpus_shared, enable_nonstrict_mode): + ds = ray.data.from_items([{"__value__": np.ones(10)}]) + + ds = ds.map_batches(lambda x: {"__value__": x * 2}) + ds = ds.map(lambda x: {"__value__": x * 2}) + assert np.array_equal(ds.take()[0], 4 * np.ones(10)) + assert np.array_equal(ds.take_batch()[0], 4 * np.ones(10)) + + +def test_nonstrict_compute(ray_start_10_cpus_shared, enable_nonstrict_mode): + ray.data.range(10).map(lambda x: x, compute="actors").show() + ray.data.range(10).map(lambda x: x, compute=ray.data.ActorPoolStrategy(1, 1)).show() + ray.data.range(10).map(lambda x: x, compute="tasks").show() + + +def test_nonstrict_schema(ray_start_10_cpus_shared, enable_nonstrict_mode): + import pyarrow + from ray.data._internal.pandas_block import PandasBlockSchema + + ds = ray.data.from_items([{"x": 2}]) + schema = ds.schema() + assert isinstance(schema, pyarrow.lib.Schema) + + ds = ray.data.from_items([{"x": 2, "y": [1, 2]}]) + schema = ds.schema() + assert isinstance(schema, pyarrow.lib.Schema) + + ds = ray.data.from_items([{"x": 2, "y": object(), "z": [1, 2]}]) + schema = ds.schema() + assert isinstance(schema, type) + + ds = ray.data.from_numpy(np.ones((100, 10))) + schema = ds.schema() + assert isinstance(schema, pyarrow.lib.Schema) + + schema = ds.map_batches(lambda x: x, batch_format="pandas").schema() + assert isinstance(schema, PandasBlockSchema) + + +def test_nouse_raw_dicts(ray_start_10_cpus_shared, enable_nonstrict_mode): + assert type(ray.data.range_table(10).take(1)[0].as_pydict()) is dict + assert type(ray.data.from_items([{"x": 1}]).take(1)[0].as_pydict()) is dict + + def checker(x): + assert type(x.as_pydict()) is dict + return x + + ray.data.range_table(10).map(checker).show() + + +def test_nonstrict_require_batch_size_for_gpu(enable_nonstrict_mode): + ray.shutdown() + ray.init(num_cpus=4, num_gpus=1) + ds = ray.data.range(1) + ds.map_batches(lambda x: x, num_gpus=1) + + +if __name__ == "__main__": + import sys + + sys.exit(pytest.main(["-v", __file__])) From f1d9f410355af5ea52434274a4fdfa38d2b7428a Mon Sep 17 00:00:00 2001 From: SangBin Cho Date: Tue, 2 May 2023 09:00:01 +0900 Subject: [PATCH 179/424] Revert "[Core] Put pg state to kv store when pg rescheduling (#34467)" (#34914) This reverts commit af018f6. --- python/ray/_private/state.py | 2 - .../tests/test_placement_group_failover.py | 71 +------------------ .../gcs_server/gcs_placement_group_manager.cc | 11 +-- .../test/gcs_placement_group_manager_test.cc | 2 - 4 files changed, 6 insertions(+), 80 deletions(-) diff --git a/python/ray/_private/state.py b/python/ray/_private/state.py index 2fa0dcf031f9..40e94e9c5db5 100644 --- a/python/ray/_private/state.py +++ b/python/ray/_private/state.py @@ -301,8 +301,6 @@ def get_state(state): return "PENDING" elif state == gcs_utils.PlacementGroupTableData.CREATED: return "CREATED" - elif state == gcs_utils.PlacementGroupTableData.RESCHEDULING: - return "RESCHEDULING" else: return "REMOVED" diff --git a/python/ray/tests/test_placement_group_failover.py b/python/ray/tests/test_placement_group_failover.py index b8a7841eec48..3bbe88536443 100755 --- a/python/ray/tests/test_placement_group_failover.py +++ b/python/ray/tests/test_placement_group_failover.py @@ -2,7 +2,9 @@ import sys import ray import ray.cluster_utils -from ray._private.test_utils import get_other_nodes, wait_for_condition +from ray._private.test_utils import ( + get_other_nodes, +) MB = 1024 * 1024 @@ -56,72 +58,5 @@ def test_placement_group_failover_when_two_nodes_die(monkeypatch, ray_start_clus ray.get(object_ref, timeout=5) -def test_gcs_restart_when_placement_group_failover( - ray_start_cluster_head_with_external_redis, -): - @ray.remote(num_cpus=1) - class Actor(object): - def __init__(self): - self.n = 0 - - def value(self): - return self.n - - cluster = ray_start_cluster_head_with_external_redis - num_nodes = 3 - nodes = [] - for _ in range(num_nodes - 1): - nodes.append(cluster.add_node(num_cpus=1)) - - # Make sure the placement group is ready. - bundles = [{"CPU": 1, "memory": 100 * MB} for _ in range(num_nodes)] - placement_group = ray.util.placement_group( - name="name", strategy="STRICT_SPREAD", bundles=bundles - ) - assert placement_group.wait(5000) - actors = [] - for i in range(num_nodes): - actor = Actor.options( - placement_group=placement_group, - placement_group_bundle_index=i, - max_restarts=-1, - ).remote() - object_ref = actor.value.remote() - ray.get(object_ref, timeout=5) - actors.append(actor) - - # Simulate a node dead. - other_nodes = get_other_nodes(cluster, exclude_head=True) - cluster.remove_node(other_nodes[0]) - - # Make sure placement group state change to rescheduling. - def _check_pg_whether_be_reschedule(): - table = ray.util.placement_group_table(placement_group) - return table["state"] == "RESCHEDULING" - - wait_for_condition( - _check_pg_whether_be_reschedule, timeout=5, retry_interval_ms=1000 - ) - - # Simulate gcs restart. - cluster.head_node.kill_gcs_server() - cluster.head_node.start_gcs_server() - - cluster.add_node(num_cpus=1) - cluster.wait_for_nodes() - - # Check placement gorup reschedule success after gcs server restart. - def _check_actor_with_pg_is_ready(): - try: - for actor in actors: - object_ref = actor.value.remote() - ray.get(object_ref, timeout=5) - return True - except Exception: - return False - - wait_for_condition(_check_actor_with_pg_is_ready, timeout=5, retry_interval_ms=1000) - - if __name__ == "__main__": sys.exit(pytest.main(["-v", __file__])) diff --git a/src/ray/gcs/gcs_server/gcs_placement_group_manager.cc b/src/ray/gcs/gcs_server/gcs_placement_group_manager.cc index 6c222727ee60..2851fe41f494 100644 --- a/src/ray/gcs/gcs_server/gcs_placement_group_manager.cc +++ b/src/ray/gcs/gcs_server/gcs_placement_group_manager.cc @@ -756,13 +756,11 @@ void GcsPlacementGroupManager::OnNodeDead(const NodeID &node_id) { iter->second->GetMutableStats()->set_scheduling_state( rpc::PlacementGroupStats::QUEUED); AddToPendingQueue(iter->second, 0); - RAY_CHECK_OK(gcs_table_storage_->PlacementGroupTable().Put( - iter->second->GetPlacementGroupID(), - iter->second->GetPlacementGroupTableData(), - [this](Status status) { SchedulePendingPlacementGroups(); })); } } } + + SchedulePendingPlacementGroups(); } void GcsPlacementGroupManager::OnNodeAdd(const NodeID &node_id) { @@ -968,10 +966,7 @@ bool GcsPlacementGroupManager::RescheduleIfStillHasUnplacedBundles( << placement_group->GetPlacementGroupID(); placement_group->UpdateState(rpc::PlacementGroupTableData::RESCHEDULING); AddToPendingQueue(placement_group, 0); - RAY_CHECK_OK(gcs_table_storage_->PlacementGroupTable().Put( - placement_group->GetPlacementGroupID(), - placement_group->GetPlacementGroupTableData(), - [this](Status status) { SchedulePendingPlacementGroups(); })); + SchedulePendingPlacementGroups(); return true; } } diff --git a/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc b/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc index e0cdced97ae6..82d46f13f145 100644 --- a/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc @@ -462,7 +462,6 @@ TEST_F(GcsPlacementGroupManagerTest, TestReschedulingRetry) { placement_group->GetPlacementGroupID(); mock_placement_group_scheduler_->bundles_on_dead_node_.push_back(0); gcs_placement_group_manager_->OnNodeDead(NodeID::FromRandom()); - WaitUntilIoServiceDone(); const auto &bundles = mock_placement_group_scheduler_->placement_groups_[0]->GetBundles(); EXPECT_TRUE(NodeID::FromBinary(bundles[0]->GetMessage().node_id()).IsNil()); @@ -504,7 +503,6 @@ TEST_F(GcsPlacementGroupManagerTest, TestRescheduleWhenNodeDead) { placement_group->GetPlacementGroupID(); mock_placement_group_scheduler_->bundles_on_dead_node_.push_back(0); gcs_placement_group_manager_->OnNodeDead(NodeID::FromRandom()); - WaitUntilIoServiceDone(); ASSERT_EQ(mock_placement_group_scheduler_->placement_groups_[0]->GetPlacementGroupID(), placement_group->GetPlacementGroupID()); const auto &bundles = From 5cc4011883db84ade52bc1dc154b91099bd54d67 Mon Sep 17 00:00:00 2001 From: SangBin Cho Date: Tue, 2 May 2023 09:00:28 +0900 Subject: [PATCH 180/424] Revert "[Core] Shorten the membership checking time to 5 seconds. (#34769)" (#34912) This reverts commit 77d5e84c35de481bde9be60569e69f944cb33846. --- src/ray/common/ray_config_def.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ray/common/ray_config_def.h b/src/ray/common/ray_config_def.h index 3761a6a778bf..0188646292cc 100644 --- a/src/ray/common/ray_config_def.h +++ b/src/ray/common/ray_config_def.h @@ -799,8 +799,8 @@ RAY_CONFIG(bool, kill_idle_workers_of_terminated_job, true) // Example: RAY_preload_python_modules=tensorflow,pytorch RAY_CONFIG(std::vector, preload_python_modules, {}) -// By default, raylet send a self liveness check to GCS every 5s -RAY_CONFIG(int64_t, raylet_liveness_self_check_interval_ms, 5000) +// By default, raylet send a self liveness check to GCS every 60s +RAY_CONFIG(int64_t, raylet_liveness_self_check_interval_ms, 60000) // Instruct the CoreWorker to kill its child processes while // it exits. This prevents certain classes of resource leaks From 61f0871f847f8fb9542c40279f11bfa97680ab51 Mon Sep 17 00:00:00 2001 From: Justin Yu Date: Mon, 1 May 2023 17:28:08 -0700 Subject: [PATCH 181/424] [Templates] Reintroduce requirements.txt + temporary patch fixes (#34903) Signed-off-by: Justin Yu --- .github/CODEOWNERS | 1 + doc/BUILD | 13 ++- .../01_batch_inference/batch_inference.ipynb | 62 +++++++----- .../many_model_training.ipynb | 34 ++++--- .../02_many_model_training/requirements.txt | 1 + .../requirements.txt | 10 ++ .../serving_stable_diffusion.ipynb | 71 +++++++------- .../batch_inference.ipynb | 66 ++++++++----- .../many_model_training.ipynb | 63 +++++++++---- .../02_many_model_training/requirements.txt | 1 + .../requirements.txt | 1 + .../serving_stable_diffusion.ipynb | 94 +++++++++++-------- release/release_tests.yaml | 36 +++---- 13 files changed, 284 insertions(+), 169 deletions(-) create mode 100644 doc/source/templates/02_many_model_training/requirements.txt create mode 100644 doc/source/templates/03_serving_stable_diffusion/requirements.txt rename doc/source/templates/tests/{ => 01_batch_inference}/batch_inference.ipynb (89%) rename doc/source/templates/tests/{ => 02_many_model_training}/many_model_training.ipynb (87%) create mode 120000 doc/source/templates/tests/02_many_model_training/requirements.txt create mode 120000 doc/source/templates/tests/03_serving_stable_diffusion/requirements.txt rename doc/source/templates/tests/{ => 03_serving_stable_diffusion}/serving_stable_diffusion.ipynb (91%) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 52bf373935c9..066ff295d991 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -11,6 +11,7 @@ # NOTE: Add @ray-project/ray-docs to all following docs subdirs. /doc/ @ray-project/ray-docs /doc/source/use-cases.rst @ericl @pcmoritz +/doc/source/templates @justinvyu @sofianhnaide # ==== Ray core ==== diff --git a/doc/BUILD b/doc/BUILD index 7cd534c82a65..a2bb7a8bd79d 100644 --- a/doc/BUILD +++ b/doc/BUILD @@ -236,7 +236,10 @@ py_test_run_all_subdirectory( filegroup( name = "workspace_templates", - srcs = glob(["source/templates/tests/*.ipynb"]), + srcs = glob([ + "source/templates/tests/**/*.ipynb", + "source/templates/tests/**/requirements.txt" + ]), visibility = ["//doc:__subpackages__"] ) @@ -255,7 +258,8 @@ py_test( py_test_run_all_notebooks( size = "large", - include = ["source/templates/tests/many_model_training.ipynb"], + # TODO(justinvyu): Merge tests/ with the regular versions of the templates. + include = ["source/templates/tests/02_many_model_training/many_model_training.ipynb"], exclude = [], data = ["//doc:workspace_templates"], tags = ["exclusive", "team:ml", "ray_air"], @@ -267,8 +271,9 @@ py_test_run_all_notebooks( py_test_run_all_notebooks( size = "large", include = [ - "source/templates/tests/batch_inference.ipynb", - "source/templates/tests/serving_stable_diffusion.ipynb" + # TODO(justinvyu): Merge tests/ with the regular versions of the templates. + "source/templates/tests/01_batch_inference/batch_inference.ipynb", + "source/templates/tests/03_serving_stable_diffusion/serving_stable_diffusion.ipynb" ], exclude = [], data = ["//doc:workspace_templates"], diff --git a/doc/source/templates/01_batch_inference/batch_inference.ipynb b/doc/source/templates/01_batch_inference/batch_inference.ipynb index ce2f2ced7d06..14b109020e87 100644 --- a/doc/source/templates/01_batch_inference/batch_inference.ipynb +++ b/doc/source/templates/01_batch_inference/batch_inference.ipynb @@ -8,14 +8,14 @@ "source": [ "# Scaling Batch Inference with Ray Data\n", "\n", - "This template is a quickstart to using [Ray Data](https://docs.ray.io/en/latest/data/data.html) for batch inference. Ray Data is one of many libraries under the [Ray AI Runtime](https://docs.ray.io/en/latest/ray-air/getting-started.html). See [this blog post](https://www.anyscale.com/blog/model-batch-inference-in-ray-actors-actorpool-and-datasets) for more information on why and how you should perform batch inference with Ray!\n", + "This template is a quickstart to using [Ray Data](https://docs.ray.io/en/latest/data/dataset.html) for batch inference. Ray Data is one of many libraries under the [Ray AI Runtime](https://docs.ray.io/en/latest/ray-air/getting-started.html). See [this blog post](https://www.anyscale.com/blog/model-batch-inference-in-ray-actors-actorpool-and-datasets) for more information on why and how you should perform batch inference with Ray!\n", "\n", "This template walks through GPU batch prediction on an image dataset using a PyTorch model, but the framework and data format are there just to help you build your own application!\n", "\n", "At a high level, this template will:\n", - "1. [Load your dataset using Ray Data.](https://docs.ray.io/en/latest/data/creating-datastreams.html)\n", - "2. [Preprocess your dataset before feeding it to your model.](https://docs.ray.io/en/latest/data/transforming-datastreams.html)\n", - "3. [Initialize your model and perform inference on a shard of your dataset with a remote actor.](https://docs.ray.io/en/latest/data/transforming-datastreams.html#callable-class-udfs)\n", + "1. [Load your dataset using Ray Data.](https://docs.ray.io/en/latest/data/creating-datasets.html)\n", + "2. [Preprocess your dataset before feeding it to your model.](https://docs.ray.io/en/latest/data/transforming-datasets.html)\n", + "3. [Initialize your model and perform inference on a shard of your dataset with a remote actor.](https://docs.ray.io/en/latest/data/transforming-datasets.html#writing-user-defined-functions-udfs)\n", "4. [Save your prediction results.](https://docs.ray.io/en/latest/data/api/input_output.html)\n", "\n", "> Slot in your code below wherever you see the ✂️ icon to build a many model training Ray application off of this template!" @@ -52,42 +52,46 @@ { "cell_type": "code", "execution_count": null, - "id": "770bbdc7", - "metadata": {}, + "id": "9d49681f-baf0-4ed8-9740-5c4e38744311", + "metadata": { + "tags": [] + }, "outputs": [], "source": [ - "!ray status" + "NUM_WORKERS: int = 4\n", + "NUM_GPUS_PER_WORKER: float = 1\n" ] }, { "cell_type": "code", "execution_count": null, - "id": "9d49681f-baf0-4ed8-9740-5c4e38744311", - "metadata": { - "tags": [] - }, + "id": "770bbdc7", + "metadata": {}, "outputs": [], "source": [ - "NUM_WORKERS: int = 4\n", - "NUM_GPUS_PER_WORKER: float = 1\n" + "!ray status" ] }, { + "attachments": {}, "cell_type": "markdown", "id": "23321ba8", "metadata": {}, "source": [ "```{tip}\n", - "Try setting `NUM_GPUS_PER_WORKER` to a fractional amount! This will leverage Ray's fractional resource allocation, which means you can schedule multiple batch inference workers to happen on the same GPU.\n", + "Try setting `NUM_GPUS_PER_WORKER` to a fractional amount! This will leverage Ray's fractional resource allocation, which means you can schedule multiple batch inference workers to use the same GPU.\n", "```" ] }, { + "attachments": {}, "cell_type": "markdown", "id": "3b6f2352", "metadata": {}, "source": [ - "> ✂️ Replace this function with logic to load your own data with Ray Data." + "> ✂️ Replace this function with logic to load your own data with Ray Data.\n", + ">\n", + "> See [the Ray Data guide on creating datasets](https://docs.ray.io/en/latest/data/creating-datasets.html) to learn how to create a dataset based on the data type and how file storage format." ] }, { @@ -97,7 +101,7 @@ "metadata": {}, "outputs": [], "source": [ - "def load_ray_dataset() -> ray.data.Datastream:\n", + "def load_ray_dataset():\n", " from ray.data.datasource.partitioning import Partitioning\n", "\n", " s3_uri = \"s3://anonymous@air-example-data-2/imagenette2/val/\"\n", @@ -163,7 +167,9 @@ "outputs": [], "source": [ "ds = ds.map_batches(preprocess, batch_format=\"numpy\")\n", - "ds.schema()\n" + "\n", + "print(\"Dataset schema:\\n\", ds.schema())\n", + "print(\"Number of images:\", ds.count())\n" ] }, { @@ -194,9 +200,9 @@ " def __call__(self, batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:\n", " # \n", " input_data = torch.as_tensor(batch[\"image\"], device=self.device)\n", - " with torch.no_grad():\n", - " result = self.model(input_data)\n", - " return {\"predictions\": result.cpu().numpy()}\n" + " with torch.inference_mode():\n", + " pred = self.model(input_data)\n", + " return {\"predicted_class_index\": pred.argmax(dim=1).detach().cpu().numpy()}\n" ] }, { @@ -218,8 +224,9 @@ " PredictCallable,\n", " batch_size=128,\n", " compute=ray.data.ActorPoolStrategy(\n", - " # Fix the number of batch inference workers to a specified value.\n", - " size=NUM_WORKERS,\n", + " # Fix the number of batch inference workers to `NUM_WORKERS`.\n", + " min_size=NUM_WORKERS,\n", + " max_size=NUM_WORKERS,\n", " ),\n", " num_gpus=NUM_GPUS_PER_WORKER,\n", " batch_format=\"numpy\",\n", @@ -237,6 +244,15 @@ "preds.schema()\n" ] }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "2565ba08", + "metadata": {}, + "source": [ + "Show the first few predictions!" + ] + }, { "cell_type": "code", "execution_count": null, @@ -244,7 +260,7 @@ "metadata": {}, "outputs": [], "source": [ - "preds.take(1)\n" + "preds.take(5)\n" ] }, { diff --git a/doc/source/templates/02_many_model_training/many_model_training.ipynb b/doc/source/templates/02_many_model_training/many_model_training.ipynb index 1f9613c0b56c..0645706d75cd 100644 --- a/doc/source/templates/02_many_model_training/many_model_training.ipynb +++ b/doc/source/templates/02_many_model_training/many_model_training.ipynb @@ -37,8 +37,7 @@ "\n", "This template requires certain Python packages to be available to every node in the cluster.\n", "\n", - "> ✂️ Add your own package dependencies! You can specify bounds for package versions\n", - "> in the same format as a `requirements.txt` file.\n" + "> ✂️ Add your own package dependencies in the `requirements.txt` file!\n" ] }, { @@ -50,9 +49,21 @@ }, "outputs": [], "source": [ - "requirements = [\n", - " \"statsforecast==1.5.0\",\n", - "]\n" + "requirements_path = \"./requirements.txt\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "92161434", + "metadata": {}, + "outputs": [], + "source": [ + "with open(requirements_path, \"r\") as f:\n", + " requirements = f.read().strip().splitlines()\n", + "\n", + "print(\"Requirements:\")\n", + "print(\"\\n\".join(requirements))\n" ] }, { @@ -64,7 +75,9 @@ "First, we may want to use these modules right here in our script, which is running on the head node.\n", "Install the Python packages on the head node using `pip install`.\n", "\n", - "You may need to restart this notebook kernel to access the installed packages.\n" + "```{note}\n", + "You may need to restart this notebook kernel to access the installed packages.\n", + "```\n" ] }, { @@ -74,9 +87,7 @@ "metadata": {}, "outputs": [], "source": [ - "all_requirements = \" \".join(requirements)\n", - "\n", - "%pip install {all_requirements}\n" + "%pip install -r {requirements_path} --upgrade" ] }, { @@ -118,11 +129,12 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "b8fc83d0", "metadata": {}, "source": [ - "> ✂️ Replace this value to change the number of data partitions you will use. This will be total the number of Tune trials you will run!\n", + "> ✂️ Replace this value to change the number of data partitions you will use (<= 5000 for this dataset). This will be total the number of Tune trials you will run!\n", ">\n", "> Note that this template fits two models per data partition and reports the best performing one." ] @@ -136,7 +148,7 @@ }, "outputs": [], "source": [ - "NUM_DATA_PARTITIONS: int = 1000\n" + "NUM_DATA_PARTITIONS: int = 500\n" ] }, { diff --git a/doc/source/templates/02_many_model_training/requirements.txt b/doc/source/templates/02_many_model_training/requirements.txt new file mode 100644 index 000000000000..25eaf5428923 --- /dev/null +++ b/doc/source/templates/02_many_model_training/requirements.txt @@ -0,0 +1 @@ +statsforecast==1.5.0 diff --git a/doc/source/templates/03_serving_stable_diffusion/requirements.txt b/doc/source/templates/03_serving_stable_diffusion/requirements.txt new file mode 100644 index 000000000000..eac6df67b25e --- /dev/null +++ b/doc/source/templates/03_serving_stable_diffusion/requirements.txt @@ -0,0 +1,10 @@ +accelerate==0.14.0 +diffusers==0.15.1 +matplotlib>=3.5.3,<=3.7.1 +numpy>=1.21.6,<=1.23.5 +Pillow==9.3.0 +scipy>=1.7.3,<=1.9.3 +tensorboard>=2.11.2,<=2.12.0 +torch==1.13.0 +torchvision==0.14.0 +transformers==4.28.1 diff --git a/doc/source/templates/03_serving_stable_diffusion/serving_stable_diffusion.ipynb b/doc/source/templates/03_serving_stable_diffusion/serving_stable_diffusion.ipynb index e3b7aad59433..9c79e32010f1 100644 --- a/doc/source/templates/03_serving_stable_diffusion/serving_stable_diffusion.ipynb +++ b/doc/source/templates/03_serving_stable_diffusion/serving_stable_diffusion.ipynb @@ -16,37 +16,39 @@ ] }, { - "attachments": {}, "cell_type": "markdown", - "id": "25364e8e", + "id": "2ea9629f", "metadata": {}, "source": [ "## Handling Dependencies\n", "\n", "This template requires certain Python packages to be available to every node in the cluster.\n", "\n", - "> ✂️ Add your own package dependencies! You can specify bounds for package versions\n", - "> in the same format as a `requirements.txt` file.\n" + "> ✂️ Add your own package dependencies in the `requirements.txt` file!\n" ] }, { "cell_type": "code", "execution_count": null, - "id": "1b79bfb9", + "id": "e43b49fc", "metadata": {}, "outputs": [], "source": [ - "requirements = [\n", - " \"accelerate==0.14.0\",\n", - " \"diffusers==0.15.1\",\n", - " \"numpy>=1.21.6,<=1.23.5\",\n", - " \"Pillow==9.3.0\",\n", - " \"scipy>=1.7.3,<=1.9.3\",\n", - " \"tensorboard>=2.11.2,<=2.12.0\",\n", - " \"torch==1.13.0\",\n", - " \"torchvision==0.14.0\",\n", - " \"transformers==4.28.1\",\n", - "]\n" + "requirements_path = \"./requirements.txt\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19504900", + "metadata": {}, + "outputs": [], + "source": [ + "with open(requirements_path, \"r\") as f:\n", + " requirements = f.read().strip().splitlines()\n", + "\n", + "print(\"Requirements:\")\n", + "print(\"\\n\".join(requirements))\n" ] }, { @@ -70,9 +72,7 @@ "metadata": {}, "outputs": [], "source": [ - "all_requirements = \" \".join(requirements)\n", - "\n", - "%pip install {all_requirements}\n" + "%pip install -r {requirements_path} --upgrade" ] }, { @@ -83,11 +83,7 @@ "source": [ "Next, we need to make sure all worker nodes also have access to the dependencies.\n", "For this, use a [Ray Runtime Environment](https://docs.ray.io/en/latest/ray-core/handling-dependencies.html#runtime-environments)\n", - "to dynamically set up dependencies throughout the cluster.\n", - "\n", - "```{note}\n", - "This will be used later when setting up the Ray Serve deployment.\n", - "```\n" + "to dynamically set up dependencies throughout the cluster.\n" ] }, { @@ -97,7 +93,9 @@ "metadata": {}, "outputs": [], "source": [ - "runtime_env = {\"pip\": requirements}\n" + "import ray\n", + "\n", + "ray.init(runtime_env={\"pip\": requirements})\n" ] }, { @@ -158,7 +156,7 @@ "NUM_GPUS_PER_REPLICA: float = 1\n", "\n", "# Control the output size: (IMAGE_SIZE, IMAGE_SIZE)\n", - "# NOTE: Generated image quality degrades rapidly if you reduce size too much.\n", + "# NOTE: Generated image quality degrades rapidly if you reduce the size too much.\n", "IMAGE_SIZE: int = 776\n" ] }, @@ -178,11 +176,8 @@ "metadata": {}, "outputs": [], "source": [ - "# Configure each model replica to:\n", - "# 1. Setup the dependencies listed earlier.\n", - "# 2. Use the specified resources.\n", + "# Configure each model replica to use the specified resources.\n", "ray_actor_options = {\n", - " \"runtime_env\": runtime_env,\n", " \"num_gpus\": NUM_GPUS_PER_REPLICA,\n", "}\n" ] @@ -403,7 +398,7 @@ " plt.show()\n", "\n", "\n", - "def main() -> float:\n", + "def main():\n", " try:\n", " requests.get(endpoint, timeout=0.1)\n", " except Exception as e:\n", @@ -467,8 +462,7 @@ "metadata": {}, "outputs": [], "source": [ - "mean_generation_time = main()\n", - "serve.shutdown()\n" + "mean_generation_time = main()\n" ] }, { @@ -481,6 +475,17 @@ "You can modify this template and iterate your model deployment directly on your cluster within your Anyscale Workspace,\n", "testing with the local endpoint." ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9e360cf9", + "metadata": {}, + "outputs": [], + "source": [ + "# Shut down the model replicas once you're done!\n", + "serve.shutdown()\n" + ] } ], "metadata": { diff --git a/doc/source/templates/tests/batch_inference.ipynb b/doc/source/templates/tests/01_batch_inference/batch_inference.ipynb similarity index 89% rename from doc/source/templates/tests/batch_inference.ipynb rename to doc/source/templates/tests/01_batch_inference/batch_inference.ipynb index 6fd5bf32bc56..0dcfa9cbc4a9 100644 --- a/doc/source/templates/tests/batch_inference.ipynb +++ b/doc/source/templates/tests/01_batch_inference/batch_inference.ipynb @@ -6,7 +6,7 @@ "id": "cfababd6", "metadata": { "tags": [ - "test" + "remove-cell" ] }, "outputs": [], @@ -22,9 +22,8 @@ ] }, { - "attachments": {}, "cell_type": "markdown", - "id": "6fbc3e3c", + "id": "02ff59ce", "metadata": {}, "source": [ "# Scaling Batch Inference with Ray Data\n", @@ -36,7 +35,7 @@ "At a high level, this template will:\n", "1. [Load your dataset using Ray Data.](https://docs.ray.io/en/latest/data/creating-datasets.html)\n", "2. [Preprocess your dataset before feeding it to your model.](https://docs.ray.io/en/latest/data/transforming-datasets.html)\n", - "3. [Initialize your model and perform inference on a shard of your dataset with a remote actor.](https://docs.ray.io/en/latest/data/transforming-datasets.html#callable-class-udfs)\n", + "3. [Initialize your model and perform inference on a shard of your dataset with a remote actor.](https://docs.ray.io/en/latest/data/transforming-datasets.html#writing-user-defined-functions-udfs)\n", "4. [Save your prediction results.](https://docs.ray.io/en/latest/data/api/input_output.html)\n", "\n", "> Slot in your code below wherever you see the ✂️ icon to build a many model training Ray application off of this template!" @@ -73,24 +72,40 @@ { "cell_type": "code", "execution_count": null, - "id": "770bbdc7", - "metadata": {}, + "id": "9d49681f-baf0-4ed8-9740-5c4e38744311", + "metadata": { + "tags": [] + }, "outputs": [], "source": [ - "!ray status" + "NUM_WORKERS: int = 4\n", + "NUM_GPUS_PER_WORKER: float = 1\n" ] }, { "cell_type": "code", "execution_count": null, - "id": "9d49681f-baf0-4ed8-9740-5c4e38744311", + "id": "20e9e07c", "metadata": { - "tags": [] + "tags": [ + "remove-cell" + ] }, "outputs": [], "source": [ - "NUM_WORKERS: int = 4\n", - "NUM_GPUS_PER_WORKER: float = 1\n" + "if SMOKE_TEST:\n", + " NUM_WORKERS = 4\n", + " NUM_GPUS_PER_WORKER = 0.25\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "770bbdc7", + "metadata": {}, + "outputs": [], + "source": [ + "!ray status" ] }, { @@ -105,10 +120,12 @@ }, { "cell_type": "markdown", - "id": "3b6f2352", + "id": "245f37c9", "metadata": {}, "source": [ - "> ✂️ Replace this function with logic to load your own data with Ray Data." + "> ✂️ Replace this function with logic to load your own data with Ray Data.\n", + ">\n", + "> See [the Ray Data guide on creating datasets](https://docs.ray.io/en/latest/data/creating-datasets.html) to learn how to create a dataset based on the data type and how file storage format." ] }, { @@ -118,7 +135,7 @@ "metadata": {}, "outputs": [], "source": [ - "def load_ray_dataset() -> ray.data.Dataset:\n", + "def load_ray_dataset():\n", " from ray.data.datasource.partitioning import Partitioning\n", "\n", " s3_uri = \"s3://anonymous@air-example-data-2/imagenette2/val/\"\n", @@ -146,7 +163,7 @@ "id": "965db5e8", "metadata": { "tags": [ - "test" + "remove-cell" ] }, "outputs": [], @@ -199,7 +216,9 @@ "outputs": [], "source": [ "ds = ds.map_batches(preprocess, batch_format=\"numpy\")\n", - "ds.schema()\n" + "\n", + "print(\"Dataset schema:\\n\", ds.schema())\n", + "print(\"Number of images:\", ds.count())\n" ] }, { @@ -230,9 +249,9 @@ " def __call__(self, batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:\n", " # \n", " input_data = torch.as_tensor(batch[\"image\"], device=self.device)\n", - " with torch.no_grad():\n", - " result = self.model(input_data)\n", - " return {\"predictions\": result.cpu().numpy()}\n" + " with torch.inference_mode():\n", + " pred = self.model(input_data)\n", + " return {\"predicted_class_index\": pred.argmax(dim=1).detach().cpu().numpy()}\n" ] }, { @@ -254,8 +273,9 @@ " PredictCallable,\n", " batch_size=128,\n", " compute=ray.data.ActorPoolStrategy(\n", - " # Fix the number of batch inference workers to a specified value.\n", - " size=NUM_WORKERS,\n", + " # Fix the number of batch inference workers to `NUM_WORKERS`.\n", + " min_size=NUM_WORKERS,\n", + " max_size=NUM_WORKERS,\n", " ),\n", " num_gpus=NUM_GPUS_PER_WORKER,\n", " batch_format=\"numpy\",\n", @@ -280,7 +300,7 @@ "metadata": {}, "outputs": [], "source": [ - "preds.take(1)\n" + "preds.take(5)\n" ] }, { @@ -326,7 +346,7 @@ "id": "1e88a268", "metadata": { "tags": [ - "test" + "remove-cell" ] }, "outputs": [], diff --git a/doc/source/templates/tests/many_model_training.ipynb b/doc/source/templates/tests/02_many_model_training/many_model_training.ipynb similarity index 87% rename from doc/source/templates/tests/many_model_training.ipynb rename to doc/source/templates/tests/02_many_model_training/many_model_training.ipynb index f6082a27826b..dbc4aa42bfcd 100644 --- a/doc/source/templates/tests/many_model_training.ipynb +++ b/doc/source/templates/tests/02_many_model_training/many_model_training.ipynb @@ -26,7 +26,7 @@ "source": [ "# Scaling Many Model Training with Ray Tune\n", "\n", - "This template is a quickstart to using [Ray Tune](https://docs.ray.io/en/latest/tune/index.html) for batch inference. Ray Tune is one of many libraries under the [Ray AI Runtime](https://docs.ray.io/en/latest/ray-air/getting-started.html). See [this blog post](https://www.anyscale.com/blog/training-one-million-machine-learning-models-in-record-time-with-ray) for more information on the benefits of performing many model training with Ray!\n", + "This template is a quickstart to using [Ray Tune](https://docs.ray.io/en/latest/tune/index.html) for training many models in parallel. Ray Tune is one of many libraries in the [Ray AI Runtime](https://docs.ray.io/en/latest/ray-air/getting-started.html). See [this blog post](https://www.anyscale.com/blog/training-one-million-machine-learning-models-in-record-time-with-ray) for more information on the benefits of performing many model training with Ray!\n", "\n", "This template walks through time-series forecasting using `statsforecast`, but the framework and data format can be swapped out easily -- they are there just to help you build your own application!\n", "\n", @@ -46,61 +46,84 @@ ] }, { - "attachments": {}, "cell_type": "markdown", - "id": "c56bb4d0", + "id": "182f65ea", "metadata": {}, "source": [ "## Handling Dependencies\n", "\n", "This template requires certain Python packages to be available to every node in the cluster.\n", "\n", - "> ✂️ Add your own package dependencies! You can specify bounds for package versions\n", - "> in the same format as a `requirements.txt` file.\n" + "> ✂️ Add your own package dependencies in the `requirements.txt` file!\n" ] }, { "cell_type": "code", "execution_count": null, - "id": "0c9b3dec", + "id": "511f1722", + "metadata": {}, + "outputs": [], + "source": [ + "requirements_path = \"./requirements.txt\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a9a44498", "metadata": { - "tags": [] + "tags": [ + "remove-cell" + ] }, "outputs": [], "source": [ - "requirements = [\n", - " \"statsforecast==1.5.0\",\n", - "]\n" + "if not os.path.exists(requirements_path):\n", + " # CWD is at the ray root in CI\n", + " requirements_path = \"doc/source/templates/tests/02_many_model_training/requirements.txt\"\n", + " assert os.path.exists(requirements_path), (requirements_path, os.getcwd())\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5cd9da7f", + "metadata": {}, + "outputs": [], + "source": [ + "with open(requirements_path, \"r\") as f:\n", + " requirements = f.read().strip().splitlines()\n", + "\n", + "print(\"Requirements:\")\n", + "print(\"\\n\".join(requirements))\n" ] }, { - "attachments": {}, "cell_type": "markdown", - "id": "eff9369f", + "id": "90a96c5b", "metadata": {}, "source": [ "First, we may want to use these modules right here in our script, which is running on the head node.\n", "Install the Python packages on the head node using `pip install`.\n", "\n", - "You may need to restart this notebook kernel to access the installed packages.\n" + "```{note}\n", + "You may need to restart this notebook kernel to access the installed packages.\n", + "```\n" ] }, { "cell_type": "code", "execution_count": null, - "id": "5cba940c", + "id": "18069827", "metadata": {}, "outputs": [], "source": [ - "all_requirements = \" \".join(requirements)\n", - "\n", - "%pip install {all_requirements}\n" + "%pip install -r {requirements_path} --upgrade" ] }, { - "attachments": {}, "cell_type": "markdown", - "id": "1dcaea58", + "id": "3e17a4da", "metadata": {}, "source": [ "Next, we need to make sure all worker nodes also have access to the dependencies.\n", @@ -154,7 +177,7 @@ }, "outputs": [], "source": [ - "NUM_DATA_PARTITIONS: int = 1000\n" + "NUM_DATA_PARTITIONS: int = 500\n" ] }, { diff --git a/doc/source/templates/tests/02_many_model_training/requirements.txt b/doc/source/templates/tests/02_many_model_training/requirements.txt new file mode 120000 index 000000000000..2b363f05fc09 --- /dev/null +++ b/doc/source/templates/tests/02_many_model_training/requirements.txt @@ -0,0 +1 @@ +../../02_many_model_training/requirements.txt \ No newline at end of file diff --git a/doc/source/templates/tests/03_serving_stable_diffusion/requirements.txt b/doc/source/templates/tests/03_serving_stable_diffusion/requirements.txt new file mode 120000 index 000000000000..bb4db21916ff --- /dev/null +++ b/doc/source/templates/tests/03_serving_stable_diffusion/requirements.txt @@ -0,0 +1 @@ +../../03_serving_stable_diffusion/requirements.txt \ No newline at end of file diff --git a/doc/source/templates/tests/serving_stable_diffusion.ipynb b/doc/source/templates/tests/03_serving_stable_diffusion/serving_stable_diffusion.ipynb similarity index 91% rename from doc/source/templates/tests/serving_stable_diffusion.ipynb rename to doc/source/templates/tests/03_serving_stable_diffusion/serving_stable_diffusion.ipynb index 086b29335d79..68f8a0989ffd 100644 --- a/doc/source/templates/tests/serving_stable_diffusion.ipynb +++ b/doc/source/templates/tests/03_serving_stable_diffusion/serving_stable_diffusion.ipynb @@ -34,43 +34,61 @@ ] }, { - "attachments": {}, "cell_type": "markdown", - "id": "25364e8e", + "id": "3c8c02eb", "metadata": {}, "source": [ "## Handling Dependencies\n", "\n", "This template requires certain Python packages to be available to every node in the cluster.\n", "\n", - "> ✂️ Add your own package dependencies! You can specify bounds for package versions\n", - "> in the same format as a `requirements.txt` file.\n" + "> ✂️ Add your own package dependencies in the `requirements.txt` file!\n" ] }, { "cell_type": "code", "execution_count": null, - "id": "1b79bfb9", + "id": "814d966b", "metadata": {}, "outputs": [], "source": [ - "requirements = [\n", - " \"accelerate==0.14.0\",\n", - " \"diffusers==0.15.1\",\n", - " \"numpy>=1.21.6,<=1.23.5\",\n", - " \"Pillow==9.3.0\",\n", - " \"scipy>=1.7.3,<=1.9.3\",\n", - " \"tensorboard>=2.11.2,<=2.12.0\",\n", - " \"torch==1.13.0\",\n", - " \"torchvision==0.14.0\",\n", - " \"transformers==4.28.1\",\n", - "]\n" + "requirements_path = \"./requirements.txt\"\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "dec4a7bb", + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "outputs": [], + "source": [ + "if not os.path.exists(requirements_path):\n", + " # CWD is at the ray root in CI\n", + " requirements_path = \"doc/source/templates/tests/03_serving_stable_diffusion/requirements.txt\"\n", + " assert os.path.exists(requirements_path), (requirements_path, os.getcwd())\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d0d78e94", + "metadata": {}, + "outputs": [], + "source": [ + "with open(requirements_path, \"r\") as f:\n", + " requirements = f.read().strip().splitlines()\n", + "\n", + "print(\"Requirements:\")\n", + "print(\"\\n\".join(requirements))\n" ] }, { - "attachments": {}, "cell_type": "markdown", - "id": "33419c37", + "id": "6b73761e", "metadata": {}, "source": [ "First, we may want to use these modules right here in our script, which is running on the head node.\n", @@ -84,38 +102,33 @@ { "cell_type": "code", "execution_count": null, - "id": "9aadf0c5", + "id": "2f6eaf2b", "metadata": {}, "outputs": [], "source": [ - "all_requirements = \" \".join(requirements)\n", - "\n", - "%pip install {all_requirements}\n" + "%pip install -r {requirements_path} --upgrade" ] }, { - "attachments": {}, "cell_type": "markdown", - "id": "4ba5feba", + "id": "4b14415f", "metadata": {}, "source": [ "Next, we need to make sure all worker nodes also have access to the dependencies.\n", "For this, use a [Ray Runtime Environment](https://docs.ray.io/en/latest/ray-core/handling-dependencies.html#runtime-environments)\n", - "to dynamically set up dependencies throughout the cluster.\n", - "\n", - "```{note}\n", - "This will be used later when setting up the Ray Serve deployment.\n", - "```\n" + "to dynamically set up dependencies throughout the cluster.\n" ] }, { "cell_type": "code", "execution_count": null, - "id": "ca638dbb", + "id": "d8b21822", "metadata": {}, "outputs": [], "source": [ - "runtime_env = {\"pip\": requirements}\n" + "import ray\n", + "\n", + "ray.init(runtime_env={\"pip\": requirements})\n" ] }, { @@ -213,11 +226,8 @@ "metadata": {}, "outputs": [], "source": [ - "# Configure each model replica to:\n", - "# 1. Setup the dependencies listed earlier.\n", - "# 2. Use the specified resources.\n", + "# Configure each model replica to use the specified resources.\n", "ray_actor_options = {\n", - " \"runtime_env\": runtime_env,\n", " \"num_gpus\": NUM_GPUS_PER_REPLICA,\n", "}\n" ] @@ -502,8 +512,7 @@ "metadata": {}, "outputs": [], "source": [ - "mean_generation_time = main()\n", - "serve.shutdown()\n" + "mean_generation_time = main()\n" ] }, { @@ -517,6 +526,17 @@ "testing with the local endpoint." ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "3660120b", + "metadata": {}, + "outputs": [], + "source": [ + "# Shut down the model replicas once you're done!\n", + "serve.shutdown()\n" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 2daad40a8c00..e7ad60a468c2 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -835,13 +835,13 @@ - name: workspace_template_batch_inference group: Workspace templates - working_dir: workspace_templates/tests + working_dir: workspace_templates/tests/01_batch_inference python: "3.9" frequency: nightly-3x team: ml cluster: - cluster_env: ../configs/release_test_cluster_env.yaml - cluster_compute: ../configs/compute/gpu/aws_release_test.yaml + cluster_env: ../../configs/release_test_cluster_env.yaml + cluster_compute: ../../configs/compute/gpu/aws_release_test.yaml run: timeout: 600 @@ -853,23 +853,23 @@ env: gce frequency: manual cluster: - cluster_env: ../configs/release_test_cluster_env.yaml - cluster_compute: ../configs/compute/gpu/gce_release_test.yaml + cluster_env: ../../configs/release_test_cluster_env.yaml + cluster_compute: ../../configs/compute/gpu/gce_release_test.yaml - name: workspace_template_many_model_training group: Workspace templates - working_dir: workspace_templates/tests + working_dir: workspace_templates/tests/02_many_model_training python: "3.9" frequency: nightly-3x team: ml cluster: - cluster_env: ../configs/release_test_cluster_env.yaml - cluster_compute: ../configs/compute/cpu/aws_release_test.yaml + cluster_env: ../../configs/release_test_cluster_env.yaml + cluster_compute: ../../configs/compute/cpu/aws_release_test.yaml run: timeout: 600 - script: jupyter nbconvert --to script --output _test many_model_training.ipynb && ipython _test.py + script: pip install -U -r requirements.txt && jupyter nbconvert --to script --output _test many_model_training.ipynb && ipython _test.py variations: - __suffix__: aws @@ -877,23 +877,23 @@ env: gce frequency: manual cluster: - cluster_env: ../configs/release_test_cluster_env.yaml - cluster_compute: ../configs/compute/cpu/gce_release_test.yaml + cluster_env: ../../configs/release_test_cluster_env.yaml + cluster_compute: ../../configs/compute/cpu/gce_release_test.yaml - name: workspace_template_serving_stable_diffusion group: Workspace templates - working_dir: workspace_templates/tests + working_dir: workspace_templates/tests/03_serving_stable_diffusion python: "3.9" frequency: nightly-3x team: ml cluster: - cluster_env: ../configs/release_test_cluster_env.yaml - cluster_compute: ../configs/compute/gpu/aws_release_test.yaml + cluster_env: ../../configs/release_test_cluster_env.yaml + cluster_compute: ../../configs/compute/gpu/aws_release_test.yaml run: - timeout: 900 - script: jupyter nbconvert --to script --output _test serving_stable_diffusion.ipynb && ipython _test.py + timeout: 600 + script: pip install -U -r requirements.txt && jupyter nbconvert --to script --output _test serving_stable_diffusion.ipynb && ipython _test.py variations: - __suffix__: aws @@ -901,8 +901,8 @@ env: gce frequency: manual cluster: - cluster_env: ../configs/release_test_cluster_env.yaml - cluster_compute: ../configs/compute/gpu/gce_release_test.yaml + cluster_env: ../../configs/release_test_cluster_env.yaml + cluster_compute: ../../configs/compute/gpu/gce_release_test.yaml ####################### From a762d0d6b1b9a475ec57513030c7623fb202aa50 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Mon, 1 May 2023 19:50:13 -0700 Subject: [PATCH 182/424] [CI] Add missing release test infra dependencies. (#34900) And recompile requirements. Signed-off-by: Lonnie Liu --- release/requirements_buildkite.in | 5 ++++- release/requirements_buildkite.txt | 31 +++++++++++++++++++++++++++++- 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/release/requirements_buildkite.in b/release/requirements_buildkite.in index 26555177a25d..9006223cd6f8 100644 --- a/release/requirements_buildkite.in +++ b/release/requirements_buildkite.in @@ -1,12 +1,15 @@ # Requirements to run release tests from buildkite (client dependencies will be installed separately) # Copy anyscale pin to requirements.txt and util.py anyscale -click boto3 +click +freezegun google-cloud-storage jinja2 protobuf >= 3.15.3, != 3.19.5 pydantic < 1.10.0 +pytest pyyaml requests retry + diff --git a/release/requirements_buildkite.txt b/release/requirements_buildkite.txt index d2a08ee4021f..b27545b692be 100644 --- a/release/requirements_buildkite.txt +++ b/release/requirements_buildkite.txt @@ -346,10 +346,18 @@ decorator==5.1.1 \ --hash=sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330 \ --hash=sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186 # via retry +exceptiongroup==1.1.1 \ + --hash=sha256:232c37c63e4f682982c8b6459f33a8981039e5fb8756b2074364e5055c498c9e \ + --hash=sha256:d484c3090ba2889ae2928419117447a14daf3c1231d5e30d0aae34f354f01785 + # via pytest expiringdict==1.2.2 \ --hash=sha256:09a5d20bc361163e6432a874edd3179676e935eb81b925eccef48d409a8a45e8 \ --hash=sha256:300fb92a7e98f15b05cf9a856c1415b3bc4f2e132be07daa326da6414c23ee09 # via anyscale +freezegun==1.2.2 \ + --hash=sha256:cd22d1ba06941384410cd967d8a99d5ae2442f57dfafeff2fda5de8dc5c05446 \ + --hash=sha256:ea1b963b993cb9ea195adbd893a48d573fda951b0da64f60883d7e988b606c9f + # via -r release/requirements_buildkite.in frozenlist==1.3.3 \ --hash=sha256:008a054b75d77c995ea26629ab3a0c0d7281341f2fa7e1e85fa6153ae29ae99c \ --hash=sha256:02c9ac843e3390826a265e331105efeab489ffaf4dd86384595ee8ce6d35ae7f \ @@ -562,10 +570,16 @@ importlib-metadata==6.5.1 \ # click # humanize # jsonschema + # pluggy + # pytest importlib-resources==5.12.0 \ --hash=sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6 \ --hash=sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a # via jsonschema +iniconfig==2.0.0 \ + --hash=sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3 \ + --hash=sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374 + # via pytest jinja2==3.1.2 \ --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \ --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 @@ -737,7 +751,9 @@ oauth2client==4.1.3 \ packaging==23.1 \ --hash=sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61 \ --hash=sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f - # via anyscale + # via + # anyscale + # pytest pathspec==0.11.1 \ --hash=sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687 \ --hash=sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293 @@ -746,6 +762,10 @@ pkgutil-resolve-name==1.3.10 \ --hash=sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174 \ --hash=sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e # via jsonschema +pluggy==1.0.0 \ + --hash=sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159 \ + --hash=sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3 + # via pytest protobuf==4.22.3 \ --hash=sha256:13233ee2b9d3bd9a5f216c1fa2c321cd564b93d8f2e4f521a85b585447747997 \ --hash=sha256:23452f2fdea754a8251d0fc88c0317735ae47217e0d27bf330a30eec2848811a \ @@ -861,12 +881,17 @@ pyrsistent==0.19.3 \ --hash=sha256:f0774bf48631f3a20471dd7c5989657b639fd2d285b861237ea9e82c36a415a9 \ --hash=sha256:f0e7c4b2f77593871e918be000b96c8107da48444d57005b6a6bc61fb4331b2c # via jsonschema +pytest==7.3.1 \ + --hash=sha256:3799fa815351fea3a5e96ac7e503a96fa51cc9942c3753cda7651b93c1cfa362 \ + --hash=sha256:434afafd78b1d78ed0addf160ad2b77a30d35d4bdf8af234fe621919d9ed15e3 + # via -r release/requirements_buildkite.in python-dateutil==2.8.2 \ --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 # via # anyscale # botocore + # freezegun pytz-deprecation-shim==0.1.0.post0 \ --hash=sha256:8314c9692a636c8eb3bda879b9f119e350e93223ae83e70e80c31675a0fdc1a6 \ --hash=sha256:af097bae1b616dde5c5744441e2ddc69e74dfdcb0c263129610d85b87445a59d @@ -970,6 +995,10 @@ termcolor==2.2.0 \ --hash=sha256:91ddd848e7251200eac969846cbae2dacd7d71c2871e92733289e7e3666f48e7 \ --hash=sha256:dfc8ac3f350788f23b2947b3e6cfa5a53b630b612e6cd8965a015a776020b99a # via halo +tomli==2.0.1 \ + --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \ + --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f + # via pytest tqdm==4.65.0 \ --hash=sha256:1871fb68a86b8fb3b59ca4cdd3dcccbc7e6d613eeed31f4c332531977b89beb5 \ --hash=sha256:c4f53a17fe37e132815abceec022631be8ffe1b9381c2e6e30aa70edc99e9671 From b6bf8e577ea297de3deea893998116c2e3ce3bc3 Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Tue, 2 May 2023 12:41:18 +0800 Subject: [PATCH 183/424] [ci][rllib][core] Make rllib_multi_gpu_with_attention_learning_tests.gce run with debug wheels (#34897) Closes #34591 The current stacktrace when sigabort doesn't yield any useful --- release/release_tests.yaml | 5 ++- release/rllib_tests/debug_app_config.yaml | 49 +++++++++++++++++++++++ 2 files changed, 53 insertions(+), 1 deletion(-) create mode 100755 release/rllib_tests/debug_app_config.yaml diff --git a/release/release_tests.yaml b/release/release_tests.yaml index e7ad60a468c2..01cc62e251a0 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -4277,7 +4277,10 @@ env: gce frequency: manual cluster: - cluster_env: app_config.yaml + # TODO(https://github.com/ray-project/ray/issues/34591) + # Revert to the comment below once ^ closed. + # cluster_env: app_config.yaml + cluster_env: debug_app_config.yaml cluster_compute: 8gpus_96cpus_gce.yaml - name: rllib_stress_tests diff --git a/release/rllib_tests/debug_app_config.yaml b/release/rllib_tests/debug_app_config.yaml new file mode 100755 index 000000000000..c51fbcc4d39b --- /dev/null +++ b/release/rllib_tests/debug_app_config.yaml @@ -0,0 +1,49 @@ +base_image: {{ env["RAY_IMAGE_ML_NIGHTLY_GPU"] | default("anyscale/ray-ml:nightly-py37-gpu") }} +env_vars: {"LD_LIBRARY_PATH": "$LD_LIBRARY_PATH:/home/ray/.mujoco/mujoco210/bin", "RLLIB_TEST_NO_JAX_IMPORT": "1"} +debian_packages: + - unzip + - zip + + # Needed to run MuJoCo with gymnasium. + - libosmesa6-dev + - libgl1-mesa-glx + - libglfw3 + - patchelf + # End: MuJoCo. + +python: + pip_packages: + ## These dependencies should be handled by requirements_rllib.txt and + ## requirements_ml_docker.txt and removed here + - gymnasium[atari,mujoco]==0.26.3 + - ale-py==0.8.0 + - gym==0.26.2 + - mujoco-py<2.2,>=2.1 + # AutoROM downloads ROMs via torrent when they are built. The torrent is unreliable, + # so we built it for py3 and use that instead. This wheel was tested for python 3.7, 3.8, + # and 3.9. + - https://ray-ci-deps-wheels.s3.us-west-2.amazonaws.com/AutoROM.accept_rom_license-0.5.4-py3-none-any.whl + - pytest + conda_packages: [] + +post_build_cmds: + - pip3 uninstall -y ray || true && pip3 install -U {{ env["RAY_WHEELS"] | default("ray") }} + # TODO(https://github.com/ray-project/ray/issues/34591) + - pip3 install --force-reinstall -U https://s3-us-west-2.amazonaws.com/ray-wheels/env["RAY_TEST_BRANCH"]/env["RAY_COMMIT_OF_WHEEL"]/ray-3.0.0.dev0%2Bdebug-cp37-cp37m-manylinux2014_x86_64.whl + - {{ env["RAY_WHEELS_SANITY_CHECK"] | default("echo No Ray wheels sanity check") }} + # Clone the rl-experiments repo for offline-RL files. + - git clone https://github.com/ray-project/rl-experiments.git + - unzip rl-experiments/halfcheetah-sac/2022-12-17/halfcheetah_1500_mean_reward_sac.zip -d ~/. + # Use torch+CUDA10.2 for our release tests. CUDA11.x has known performance issues in combination with torch+GPU+CNNs + # TODO(sven): remove once nightly image gets upgraded. + - pip3 install torch==1.12.1+cu102 torchvision==0.13.1+cu102 --extra-index-url https://download.pytorch.org/whl/cu102 + + # TODO(sven): remove once nightly image gets gymnasium and the other new dependencies. + - wget https://mujoco.org/download/mujoco210-linux-x86_64.tar.gz + - mkdir ~/.mujoco + - mv mujoco210-linux-x86_64.tar.gz ~/.mujoco/. + - cd ~/.mujoco + - tar -xf ~/.mujoco/mujoco210-linux-x86_64.tar.gz + + # not strictly necessary, but makes debugging easier + - git clone https://github.com/ray-project/ray.git From 9cf0ebdd20b3c79fc307b43e85b599b450a1344c Mon Sep 17 00:00:00 2001 From: Chen Shen Date: Mon, 1 May 2023 22:51:21 -0700 Subject: [PATCH 184/424] Revert "[CI] Add Lightning 2.0 compatibility test pipeline (#34147)" (#34933) This reverts commit aeed2b3c58db4a505f2a5b5502e41898290effdb. --- .buildkite/pipeline.gpu_large.yml | 13 ------- ci/ci.sh | 2 - ci/env/install-minimal.sh | 2 - python/ray/train/BUILD | 8 ++-- .../ray/train/tests/lightning_test_utils.py | 37 +++++-------------- .../train/tests/test_lightning_checkpoint.py | 10 +---- .../train/tests/test_lightning_predictor.py | 2 +- .../ray/train/tests/test_lightning_trainer.py | 4 +- 8 files changed, 19 insertions(+), 59 deletions(-) diff --git a/.buildkite/pipeline.gpu_large.yml b/.buildkite/pipeline.gpu_large.yml index e15ee57050ea..2f993cd96546 100644 --- a/.buildkite/pipeline.gpu_large.yml +++ b/.buildkite/pipeline.gpu_large.yml @@ -49,16 +49,3 @@ - pip install -Ur ./python/requirements/ml/requirements_ml_docker.txt - ./ci/env/env_info.sh - bazel test --config=ci $(./ci/run/bazel_export_options) --build_tests_only --test_tag_filters=gpu,-timeseries_libs,-py37,-post_wheel_build doc/... - -- label: ":zap: :python: Lightning 2.0 Train GPU tests" - conditions: - ["NO_WHEELS_REQUIRED", "RAY_CI_TRAIN_AFFECTED"] - commands: - - cleanup() { if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then ./ci/build/upload_build_info.sh; fi }; trap cleanup EXIT - - NO_DASHBOARD=1 ./ci/env/install-minimal.sh 3.8 - - PYTHON=3.8 DOC_TESTING=1 TRAIN_TESTING=1 TUNE_TESTING=1 ./ci/env/install-dependencies.sh - - pip install -Ur ./python/requirements/ml/requirements_ml_docker.txt - - pip uninstall -y pytorch-lightning - - pip install lightning==2.0.0 - - ./ci/env/env_info.sh - - bazel test --config=ci $(./scripts/bazel_export_options) --test_tag_filters=ptl_v2 python/ray/train/... \ No newline at end of file diff --git a/ci/ci.sh b/ci/ci.sh index 5bf5ae3ec4d3..5a3703d161f8 100755 --- a/ci/ci.sh +++ b/ci/ci.sh @@ -287,8 +287,6 @@ install_npm_project() { build_dashboard_front_end() { if [ "${OSTYPE}" = msys ]; then { echo "WARNING: Skipping dashboard due to NPM incompatibilities with Windows"; } 2> /dev/null - elif [ "${NO_DASHBOARD-}" = "1" ]; then - echo "Skipping dashboard build" else ( cd ray/dashboard/client diff --git a/ci/env/install-minimal.sh b/ci/env/install-minimal.sh index 9da00d7517c3..e99e453ea11e 100755 --- a/ci/env/install-minimal.sh +++ b/ci/env/install-minimal.sh @@ -1,7 +1,5 @@ #!/usr/bin/env bash -set -xe - # Python version can be specified as 3.7, 3.8, 3.9, etc.. if [ -z "$1" ]; then PYTHON_VERSION=${PYTHON-3.7} diff --git a/python/ray/train/BUILD b/python/ray/train/BUILD index 1d932ca912a8..e2a244455ace 100644 --- a/python/ray/train/BUILD +++ b/python/ray/train/BUILD @@ -412,7 +412,7 @@ py_test( name = "test_lightning_checkpoint", size = "medium", srcs = ["tests/test_lightning_checkpoint.py"], - tags = ["team:ml", "exclusive", "ray_air", "gpu", "ptl_v2"], + tags = ["team:ml", "exclusive", "ray_air", "gpu"], deps = [":train_lib"] ) @@ -420,7 +420,7 @@ py_test( name = "test_lightning_trainer_restore", size = "medium", srcs = ["tests/test_lightning_trainer_restore.py"], - tags = ["team:ml", "exclusive", "ray_air", "gpu", "ptl_v2"], + tags = ["team:ml", "exclusive", "ray_air", "gpu"], deps = [":train_lib"] ) @@ -428,7 +428,7 @@ py_test( name = "test_lightning_trainer", size = "large", srcs = ["tests/test_lightning_trainer.py"], - tags = ["team:ml", "exclusive", "ray_air", "gpu", "ptl_v2"], + tags = ["team:ml", "exclusive", "ray_air", "gpu"], deps = [":train_lib"] ) @@ -436,7 +436,7 @@ py_test( name = "test_lightning_predictor", size = "medium", srcs = ["tests/test_lightning_predictor.py"], - tags = ["team:ml", "exclusive", "ray_air", "gpu", "ptl_v2"], + tags = ["team:ml", "exclusive", "ray_air", "gpu"], deps = [":train_lib"] ) diff --git a/python/ray/train/tests/lightning_test_utils.py b/python/ray/train/tests/lightning_test_utils.py index 36288308cf59..68b925098d00 100644 --- a/python/ray/train/tests/lightning_test_utils.py +++ b/python/ray/train/tests/lightning_test_utils.py @@ -7,11 +7,9 @@ class LinearModule(pl.LightningModule): - def __init__(self, input_dim, output_dim, strategy="ddp") -> None: + def __init__(self, input_dim, output_dim) -> None: super().__init__() self.linear = nn.Linear(input_dim, output_dim) - self.loss = [] - self.strategy = strategy def forward(self, input): # Backwards compat for Ray data strict mode. @@ -27,23 +25,17 @@ def training_step(self, batch): def validation_step(self, val_batch, batch_idx): loss = self.forward(val_batch) - self.loss.append(loss) return {"val_loss": loss} - def on_validation_epoch_end(self) -> None: - avg_loss = torch.stack(self.loss).mean() + def validation_epoch_end(self, outputs) -> None: + avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean() self.log("val_loss", avg_loss) - self.loss.clear() def predict_step(self, batch, batch_idx): return self.forward(batch) def configure_optimizers(self): - if self.strategy == "fsdp": - # Feed FSDP wrapped model parameters to optimizer - return torch.optim.SGD(self.trainer.model.parameters(), lr=0.1) - else: - return torch.optim.SGD(self.parameters(), lr=0.1) + return torch.optim.SGD(self.parameters(), lr=0.1) class DoubleLinearModule(pl.LightningModule): @@ -51,7 +43,6 @@ def __init__(self, input_dim_1, input_dim_2, output_dim) -> None: super().__init__() self.linear_1 = nn.Linear(input_dim_1, output_dim) self.linear_2 = nn.Linear(input_dim_2, output_dim) - self.loss = [] def forward(self, batch): input_1 = batch["input_1"] @@ -66,14 +57,12 @@ def training_step(self, batch): def validation_step(self, val_batch, batch_idx): loss = self.forward(val_batch) - self.loss.append(loss) return {"val_loss": loss} - def on_validation_epoch_end(self) -> None: + def validation_epoch_end(self, outputs) -> None: print("Validation Epoch:", self.current_epoch) - avg_loss = torch.stack(self.loss).mean() + avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean() self.log("val_loss", avg_loss) - self.loss.clear() def predict_step(self, batch, batch_idx): return self.forward(batch) @@ -105,9 +94,7 @@ def __init__(self, lr: float, layer_1: int, layer_2: int): self.layer_1 = torch.nn.Linear(28 * 28, layer_1) self.layer_2 = torch.nn.Linear(layer_1, layer_2) self.layer_3 = torch.nn.Linear(layer_2, 10) - self.accuracy = Accuracy(task="multiclass", num_classes=10) - self.val_acc_list = [] - self.val_loss_list = [] + self.accuracy = Accuracy() def forward(self, x): batch_size, channels, width, height = x.size() @@ -137,17 +124,13 @@ def validation_step(self, val_batch, batch_idx): logits = self.forward(x) loss = F.nll_loss(logits, y) acc = self.accuracy(logits, y) - self.val_acc_list.append(acc) - self.val_loss_list.append(loss) return {"val_loss": loss, "val_accuracy": acc} - def on_validation_epoch_end(self): - avg_loss = torch.stack(self.val_loss_list).mean() - avg_acc = torch.stack(self.val_acc_list).mean() + def validation_epoch_end(self, outputs): + avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean() + avg_acc = torch.stack([x["val_accuracy"] for x in outputs]).mean() self.log("ptl/val_loss", avg_loss) self.log("ptl/val_accuracy", avg_acc) - self.val_acc_list.clear() - self.val_loss_list.clear() def predict_step(self, batch, batch_idx, dataloader_idx=None): x = batch diff --git a/python/ray/train/tests/test_lightning_checkpoint.py b/python/ray/train/tests/test_lightning_checkpoint.py index 64bcd40b32be..e253bb2a8b85 100644 --- a/python/ray/train/tests/test_lightning_checkpoint.py +++ b/python/ray/train/tests/test_lightning_checkpoint.py @@ -38,10 +38,7 @@ def test_load_from_path(): # Train one epoch and save a checkpoint trainer = pl.Trainer( - max_epochs=1, - accelerator="cpu", - enable_progress_bar=False, - enable_checkpointing=False, + max_epochs=1, enable_progress_bar=False, enable_checkpointing=False ) trainer.fit(model=model, train_dataloaders=dataloader) ckpt_path = f"{tmpdir}/random_checkpoint_name.ckpt" @@ -78,10 +75,7 @@ def test_from_directory(): # Train one epoch and save a checkpoint trainer = pl.Trainer( - max_epochs=1, - accelerator="cpu", - enable_progress_bar=False, - enable_checkpointing=False, + max_epochs=1, enable_progress_bar=False, enable_checkpointing=False ) trainer.fit(model=model, train_dataloaders=dataloader) trainer.save_checkpoint(f"{tmpdir}/{MODEL_KEY}") diff --git a/python/ray/train/tests/test_lightning_predictor.py b/python/ray/train/tests/test_lightning_predictor.py index 2c34b5dcc984..49ee42073b16 100644 --- a/python/ray/train/tests/test_lightning_predictor.py +++ b/python/ray/train/tests/test_lightning_predictor.py @@ -28,7 +28,7 @@ def test_repr(): def save_checkpoint(model: pl.LightningModule, ckpt_path: str): - trainer = pl.Trainer(max_epochs=0, accelerator="cpu") + trainer = pl.Trainer(max_epochs=0) trainer.fit(model, train_dataloaders=DataLoader(torch.randn(1))) trainer.save_checkpoint(ckpt_path) diff --git a/python/ray/train/tests/test_lightning_trainer.py b/python/ray/train/tests/test_lightning_trainer.py index aab21fb4a6d1..a35f37ac54e9 100644 --- a/python/ray/train/tests/test_lightning_trainer.py +++ b/python/ray/train/tests/test_lightning_trainer.py @@ -74,7 +74,7 @@ def test_trainer_with_native_dataloader( config_builder = ( LightningConfigBuilder() - .module(LinearModule, input_dim=32, output_dim=4, strategy=strategy) + .module(LinearModule, input_dim=32, output_dim=4) .trainer(max_epochs=num_epochs, accelerator=accelerator) .strategy(strategy) ) @@ -124,7 +124,7 @@ def test_trainer_with_ray_data(ray_start_6_cpus_2_gpus, strategy, accelerator): lightning_config = ( LightningConfigBuilder() - .module(cls=LinearModule, input_dim=32, output_dim=4, strategy=strategy) + .module(cls=LinearModule, input_dim=32, output_dim=4) .trainer(max_epochs=num_epochs, accelerator=accelerator) .strategy(strategy) .build() From 4b6a37b9846ca4570c7f92bbbd20a71c4632f005 Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Tue, 2 May 2023 10:59:35 +0200 Subject: [PATCH 185/424] [docs] batch inference docs (#34567) Preview: https://anyscale-ray--34567.com.readthedocs.build/en/34567/data/batch_inference.html Signed-off-by: Max Pumperla Co-authored-by: angelinalg <122562471+angelinalg@users.noreply.github.com> --- doc/source/data/api/datastream.rst | 5 +- doc/source/data/batch_inference.rst | 751 ++++++++++++++++++ doc/source/data/doc_code/batch_formats.py | 66 ++ doc/source/data/doc_code/hf_quick_start.py | 51 ++ .../data/doc_code/pytorch_quick_start.py | 40 + doc/source/data/doc_code/tf_quick_start.py | 35 + .../doc_code/torch_image_batch_trained.py | 58 ++ .../data/images/actor_batch_prediction.png | Bin 0 -> 170593 bytes .../images/actor_pool_batch_prediction.png | Bin 0 -> 204129 bytes .../data/images/air_batch_prediction.png | Bin 0 -> 144664 bytes doc/source/data/images/batch_inference.png | Bin 0 -> 24719 bytes .../data/images/batch_inference_overview.png | Bin 0 -> 42633 bytes .../data/images/task_batch_prediction.png | Bin 0 -> 163989 bytes .../data/images/train_predict_pipeline.png | Bin 0 -> 122396 bytes doc/source/data/user-guide.rst | 9 +- doc/source/ray-air/api/predictor.rst | 1 + doc/source/ray-overview/use-cases.rst | 40 +- 17 files changed, 1029 insertions(+), 27 deletions(-) create mode 100644 doc/source/data/batch_inference.rst create mode 100644 doc/source/data/doc_code/batch_formats.py create mode 100644 doc/source/data/doc_code/hf_quick_start.py create mode 100644 doc/source/data/doc_code/pytorch_quick_start.py create mode 100644 doc/source/data/doc_code/tf_quick_start.py create mode 100644 doc/source/data/doc_code/torch_image_batch_trained.py create mode 100644 doc/source/data/images/actor_batch_prediction.png create mode 100644 doc/source/data/images/actor_pool_batch_prediction.png create mode 100644 doc/source/data/images/air_batch_prediction.png create mode 100644 doc/source/data/images/batch_inference.png create mode 100644 doc/source/data/images/batch_inference_overview.png create mode 100644 doc/source/data/images/task_batch_prediction.png create mode 100644 doc/source/data/images/train_predict_pipeline.png diff --git a/doc/source/data/api/datastream.rst b/doc/source/data/api/datastream.rst index 19f11588186d..9f0404a6f327 100644 --- a/doc/source/data/api/datastream.rst +++ b/doc/source/data/api/datastream.rst @@ -138,9 +138,10 @@ Execution --------- .. autosummary:: - :toctree: doc/ + :toctree: doc/ - Datastream.materialize + Datastream.materialize + ActorPoolStrategy Serialization ------------- diff --git a/doc/source/data/batch_inference.rst b/doc/source/data/batch_inference.rst new file mode 100644 index 000000000000..5ace6a8bb45f --- /dev/null +++ b/doc/source/data/batch_inference.rst @@ -0,0 +1,751 @@ +.. _batch_inference_home: + +Running Batch Inference with Ray +================================ + +.. note:: + + In this tutorial you'll learn what batch inference is, why you might want to use + Ray for it, and how to use Ray effectively for this task. + If you are familiar with the basics of inference tasks, jump straight to + code in the :ref:`quickstart section ` or the + :ref:`advanced guide`. + +Batch inference refers to generating model predictions on a set of input data. +The model can range from a simple Python function to a complex neural network. +In batch inference, also known as offline inference, your model is run on a large +batch of data on demand. +This is in contrast to online inference, where the model is run immediately on a +data point when it becomes available. + +Here's a simple schematic of batch inference, "mapping" batches to predictions +via model inference: + +.. figure:: images/batch_inference.png + + Evaluating a batch of input data with a model to get predictions. + +Batch inference is a foundational workload for many AI companies, especially since +more and more pre-trained models become available. +And while batch inference looks simple at the surface, it can be challenging to do right in production. +For instance, your data batches can be excessively large, too slow to process sequentially, +or might need custom preprocessing before being fed into your models. +To run inference workloads effectively at scale, you need to: + +- manage your compute infrastructure and cloud clusters +- parallelize data processing and utilize all your cluster resources (CPUs and GPUs) +- efficiently transfer data between cloud storage, CPUs for preprocessing, and GPUs for model inference + +Here's a realistic view of batch inference for modern AI applications: + +.. figure:: images/batch_inference_overview.png + + Evaluating a batch of input data with a model to get predictions. + +Why use Ray for batch inference? +--------------------------------- + +There are reasons to use Ray for batch inference, even if your current +use case does not require scaling yet: + +1. **Faster and Cheaper for modern Deep Learning Applications**: + Ray is built for + complex workloads and supports loading and preprocessing data with CPUs and model inference on GPUs. +2. **Cloud, framework, and data format agnostic**: + Ray Data works on any cloud provider or + any ML framework (like PyTorch and Tensorflow) and does not require a particular file format. +3. **Out of the box scaling**: + The same code that works on one machine also runs on a + large cluster without any changes. +4. **Python first**: + You can express your inference job directly in Python instead of + YAML files or other formats. + +.. _batch_inference_quickstart: + +Quick Start +----------- + +Install Ray with the data processing library, Ray Data: + +.. code-block:: bash + + pip install ray[data] + +Running batch inference is conceptually easy and requires three steps: + +1. Load your data into a Ray dataset and optionally apply any preprocessing you need. +2. Define your model for inference. +3. Run inference on your data by using the :meth:`ds.map_batches() ` + method from Ray Data. + +The last step also defines how your batch processing job gets distributed across your (local) cluster. +We start with very simple use cases here and build up to more complex ones in other guides and tutorials. + +.. note:: + + All advanced use cases ultimately boil down to extensions of the above three steps, + like loading and storing data from cloud storage, using complex preprocessing functions, + demanding model setups, additional postprocessing, or other customizations. + We'll cover these advanced use cases in the next sections. + +1. Loading and preprocessing data +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For this quick start guide we use very small, in-memory data sets by +leveraging common Python libraries like NumPy and Pandas. +In general, once you load your datasets using Ray Data, you also want to apply some preprocessing steps. +We skip this step here for simplicity. +In any case, the result of this step is a Ray Datastream ``ds`` that we can use to run inference on. + +.. margin:: + + For larger data sets, you can use Ray Data to load data from cloud storage like S3 or GCS. + We'll cover this later on. + +.. tabs:: + + .. group-tab:: HuggingFace + + Create a Pandas + DataFrame with text data to run a GPT-2 model on. + + .. literalinclude:: ./doc_code/hf_quick_start.py + :language: python + :start-after: __hf_quickstart_load_start__ + :end-before: __hf_quickstart_load_end__ + + .. group-tab:: PyTorch + + Create a NumPy array with 100 + entries, which represents the input to a feed-forward neural network. + + .. literalinclude:: ./doc_code/pytorch_quick_start.py + :language: python + :start-after: __pt_quickstart_load_start__ + :end-before: __pt_quickstart_load_end__ + + .. group-tab:: TensorFlow + + Create a NumPy array with 100 + entries, which represents the input to a feed-forward neural network. + + .. literalinclude:: ./doc_code/tf_quick_start.py + :language: python + :start-after: __tf_quickstart_load_start__ + :end-before: __tf_quickstart_load_end__ + +2. Setting up your model +~~~~~~~~~~~~~~~~~~~~~~~~ + +Next, you want to set up your model for inference, by defining a predictor. +The core idea is to define a class that loads your model in its ``__init__`` method and +and implements a ``__call__`` method that takes a batch of data and returns a batch of predictions. +Below you find examples for PyTorch, TensorFlow, and HuggingFace. + +.. tabs:: + + .. group-tab:: HuggingFace + + .. callout:: + + .. literalinclude:: ./doc_code/hf_quick_start.py + :language: python + :start-after: __hf_quickstart_model_start__ + :end-before: __hf_quickstart_model_end__ + + .. annotations:: + <1> Use the constructor (``__init__``) to initialize your model. + + <2> The ``__call__`` method runs inference on a batch of data. + + .. group-tab:: PyTorch + + .. callout:: + + .. literalinclude:: ./doc_code/pytorch_quick_start.py + :language: python + :start-after: __pt_quickstart_model_start__ + :end-before: __pt_quickstart_model_end__ + + .. annotations:: + <1> Use the constructor (``__init__``) to initialize your model. + + <2> The ``__call__`` method runs inference on a batch of data. + + + .. group-tab:: TensorFlow + + .. callout:: + + .. literalinclude:: ./doc_code/tf_quick_start.py + :language: python + :start-after: __tf_quickstart_model_start__ + :end-before: __tf_quickstart_model_end__ + + .. annotations:: + <1> Use the constructor (``__init__``) to initialize your model. + + <2> The ``__call__`` method runs inference on a batch of data. + + +3. Getting predictions with Ray Data +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Once you have your Ray Datastream ``ds`` and your predictor class, you can use +:meth:`ds.map_batches() ` to get predictions. +``map_batches`` takes your predictor class as an argument and allows you to specify +``compute`` resources by defining the :class:`ActorPoolStrategy `. +In the example below, we use two CPUs to run inference in parallel and then print the results. +We cover resource allocation in more detail in :ref:`the configuration section of this guide `. + +.. tabs:: + + .. group-tab:: HuggingFace + + .. literalinclude:: ./doc_code/hf_quick_start.py + :language: python + :start-after: __hf_quickstart_prediction_start__ + :end-before: __hf_quickstart_prediction_end__ + + .. group-tab:: PyTorch + + .. literalinclude:: ./doc_code/pytorch_quick_start.py + :language: python + :start-after: __pt_quickstart_prediction_start__ + :end-before: __pt_quickstart_prediction_end__ + + .. group-tab:: TensorFlow + + .. literalinclude:: ./doc_code/tf_quick_start.py + :language: python + :start-after: __tf_quickstart_prediction_start__ + :end-before: __tf_quickstart_prediction_end__ + +.. _batch_inference_advanced_pytorch_example: + +Advanced batch inference guide +------------------------------ + + Let's use batch inference on a pre-trained PyTorch model for image classification +to illustrate advanced concepts of batch processing with Ray. + +.. important:: + + If you want to dive right into example use cases next, consider reading the following + tutorials next: + + .. panels:: + :container: container pb-3 + :column: col-md-3 px-1 py-1 + :img-top-cls: p-2 w-75 d-block mx-auto fixed-height-img + + --- + :img-top: /images/ray_logo.png + + .. link-button:: /data/examples/ocr_example + :type: ref + :text: Batch OCR processing using Ray Data + :classes: btn-link btn-block stretched-link + + --- + :img-top: /images/ray_logo.png + + .. link-button:: /data/examples/torch_detection + :type: ref + :text: Fine-tuning an Object Detection Model and using it for Batch Inference + :classes: btn-link btn-block stretched-link + + --- + :img-top: /images/ray_logo.png + + .. link-button:: /data/examples/torch_image_example + :type: ref + :text: Training an Image Classifier and using it for Batch Inference + :classes: btn-link btn-block stretched-link + + +Loading data with Ray Data +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In the quick start guide we glossed over the details of loading data with Ray Data. +Your data might be stored in a variety of formats, and you might want to load it from different sources. +Ray Data supports multiple formats and sources out of the box. +The :ref:`guide to creating datasets ` is the ultimate resource +to learn more about loading data with Ray Data, but we'll cover the basics here, too. + +.. hint:: + + With Ray Data, you can :ref:`create synthetic data in Python`, + :ref:`load data from various storage solutions` such as S3, + HDFS, or GCS, using common formats such as CSV, JSON, Text, Images, Binary, + TFRecords, Parquet, and more. Ray Data also supports reading from common SQL and NoSQL + databases, and allows you to define your own, custom data sources. + + You can also read :ref:`common Python library formats ` + such as Pandas, NumPy, Arrow, or plain Python objects, as well as from + :ref:`distributed data processing frameworks ` + such as Spark, Dask, Modin, or Mars. + + Of course, Ray Data also supports :ref:`reading data from common ML frameworks ` + like PyTorch, TensorFlow or HuggingFace. + +.. callout:: + + .. literalinclude:: ./doc_code/torch_image_batch_trained.py + :language: python + :start-after: __pt_load_start__ + :end-before: __pt_load_end__ + + .. annotations:: + <1> We use one gigabyte of image data from the Imagenet dataset from S3. + + <2> We use ``read_images`` from Ray Data and limit the number of images to 1000. + +The process of loading data with Ray Data is as diverse as the data you have. +For instance, in the example above we didn't load the text labels for our images, +which would require a different data source and loading function. +For any advanced use cases, we recommend you read the +:ref:`guide to creating datasets `. + +Preprocessing with Ray Data +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +After loading your data, it often needs to be preprocessed prior to inference. +This may include cropping or resizing images, or tokenizing raw text. + +To introduce common terminology, with :ref:`Ray Data ` you can define +:term:`user-defined functions` (UDFs) that transform batches of your data. +As you've seen before, applying these UDFs via +:meth:`ds.map_batches() ` outputs a new, transformed dataset. + +.. note:: + + The way we do preprocessing here is conceptually close to how we do batch + inference, and we use the same :meth:`ds.map_batches() ` + call from Ray Data to run this task. + The main difference is that we don't use a machine learning model to transform our data, + which has some practical consequences. For instance, in the example below we simply + define a map function that we pass into ``map_batches``, and not a class. + +To transform our raw images loaded from S3 in the last step, we use functionality from +the ``torchvision`` package to define a UDF called ``preprocess_images``. + +.. callout:: + + .. literalinclude:: ./doc_code/torch_image_batch_trained.py + :language: python + :start-after: __pt_preprocess_start__ + :end-before: __pt_preprocess_end__ + + .. annotations:: + <1> We compose PyTorch tensor creation with image preprocessing, so that our processed images "fit" into a ``ResNet18`` PyTorch model. + + <2> We then define a simple UDF to transform batches of raw data accordingly. Note that these batches come as dictionaries of NumPy images stored in the ``"images"`` key. + + <3> Finally, we apply the UDF to our dataset using ``map_batches``. + +.. tip:: + + For the full suite of transformations available in Ray Data, read + :ref:`the data transformation guide `. + +.. caution:: + + Depending on how you load your data and what input data format you use, the dataset + loaded with :ref:`Ray Data ` will have different *batch formats*. + For instance, image data might be naturally stored in NumPy format, while tabular + data makes much more sense as a Pandas DataFrame. + What (default) batch format your data has and how to deal with it is explained in + detail in :ref:`the batch format section `. + +Defining predictors as stateful UDFs +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +One of the key value adds of Ray over other distributed systems is the support for +distributed stateful operations. These stateful operations are especially useful +for inference since the model only needs to be initialized once, instead of per batch. + +.. margin:: + + In short, running model inference means applying + :meth:`ds.map_batches() ` + to a dataset with a trained model as a UDF. + +You've already seen how to do this in the quickstart section of this guide, but now +that you're equipped with more knowledge, let's have a look at how to define a +stateful UDF with Ray for our pretrained ResNet model: + +.. callout:: + + .. literalinclude:: ./doc_code/torch_image_batch_trained.py + :language: python + :start-after: __pt_model_start__ + :end-before: __pt_model_end__ + + .. annotations:: + <1> The ``__init__`` method is used to initialize the model once. Ray takes care of distributing and managing this state for our batch processing task. + + <2> The ``__call__`` method is used to apply the model to a batch of data. + + <3> We're free to use any custom code in a stateful UDF, and here we prepare the data to run on GPUs. + + <4> Finally, we return the ``"class"`` key of the model predictions as Numpy array. + + +Scalable inference with Ray Data +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To get predictions, we call :meth:`ds.map_batches() `, +by making sure to specify a :class:`ActorPoolStrategy ` +which defines how many workers to use for inference. + +.. callout:: + + .. literalinclude:: ./doc_code/torch_image_batch_trained.py + :language: python + :start-after: __pt_prediction_start__ + :end-before: __pt_prediction_end__ + + .. annotations:: + <1> In this example we use a total of four Ray Actors to run inference on our dataset. + + <2> Each actor should use one GPU. + +To summarize, mapping a UDF over batches is the simplest transform for Ray Datastreams. +The UDF defines the logic for transforming individual batches of data of the dataset +Performing operations over batches of data is more performant than single element +operations as it can leverage the underlying vectorization capabilities of Pandas or NumPy. + + +.. note:: + + You can use :meth:`ds.map_batches() ` on functions, too. + This is mostly useful for quick transformations of your data that doesn't require + an ML model or other stateful objects. + To handle state, using classes like we did above is the recommended way. + In the dropdown below you find an example of mapping data with a simple Python + function. + + .. dropdown:: Example using ``map_batches`` with functions + + This example transforms example data using a simple Python function. + The ``map_function`` uses the fact that our ``data`` batches in this particular + example are Pandas dataframes. + Note that by using a map function instead of a class, we don't have to define + :class:`ActorPoolStrategy ` to specify compute resources. + + .. literalinclude:: ./doc_code/batch_formats.py + :language: python + :start-after: __simple_map_function_start__ + :end-before: __simple_map_function_end__ + +.. _batch_inference_formats: + +Working with batch formats +-------------------------- + +Now that you've seen examples of batch inference with Ray, let's have a closer look +at how to deal with different data formats. +First of all, you need to distinguish between two types of batch formats: + +- Input batch formats: This is the format of the input to your UDFs. You will often have to + refer to the right format name to run batch inference on your data. +- Output batch formats: This is the format your UDFs return. + +In many standard cases, the input batch format is the same as the output batch format, +but it's good to be aware of the differences. + +.. margin:: + We refer to batch formats by name in Ray Data (using strings). + For instance, the batch format used to represent Pandas dataframes is called ``"pandas"``. + We often use batch format names and the libraries they represent interchangeably. + +Let's focus on the three available input batch formats first, +namely Pandas, NumPy, and Arrow, and how they're used in Ray Data: + +.. tabbed:: Pandas + + The ``"pandas"`` batch format presents batches in + `pandas.DataFrame `__ + format. If converting a simple dataset to Pandas DataFrame batches, a single-column + dataframe with the column ``"__value__"`` will be created. + + .. literalinclude:: ./doc_code/batch_formats.py + :language: python + :start-after: __simple_pandas_start__ + :end-before: __simple_pandas_end__ + +.. tabbed:: NumPy + + The ``"numpy"`` batch format presents batches in + `numpy.ndarray `__ + format as follows: + + * **Tabular datasets**: Each batch will be a dictionary of NumPy + ndarrays (``Dict[str, np.ndarray]``), with each key-value pair representing a column + in the table. + + * **Tensor datasets** (single-column): Each batch will be a single + `numpy.ndarray `__ + containing the single tensor column for this batch. + + * **Simple datasets**: Each batch will be a single NumPy ndarray, where Ray Data will + attempt to convert each list-batch to an ndarray. + + .. literalinclude:: ./doc_code/batch_formats.py + :language: python + :start-after: __simple_numpy_start__ + :end-before: __simple_numpy_end__ + +.. tabbed:: Arrow + + The ``"pyarrow"`` batch format presents batches in ``pyarrow.Table`` format. + If converting a simple dataset to Arrow Table batches, a single-column table + with the column ``"__value__"`` will be created. + + .. literalinclude:: ./doc_code/batch_formats.py + :language: python + :start-after: __simple_pyarrow_start__ + :end-before: __simple_pyarrow_end__ + +When defining the return value of your UDF, you can choose between +Pandas dataframes (``pandas.DataFrame``), NumPy arrays (``numpy.ndarray``), Arrow tables +(``pyarrow.Table``), dictionaries of NumPy arrays (``Dict[str, np.ndarray]``) or simple +Python lists (``list``). +You can learn more about output formats in :ref:`the output format guide`. + +.. important:: + + No matter which batch format you use, you will always have to be familiar with + the underlying APIs used to represent your data. For instance, if you use the + ``"pandas"`` batch format, you will need to know the basics of interacting with + dataframes to make your batch inference jobs work. + +Default data formats +~~~~~~~~~~~~~~~~~~~~ + +In all the examples we've seen so far, we didn't have to specify the batch format. +In fact, the format is inferred from the input dataset, which can be straightforward. +For instance, when loading a NumPy array with :meth:`ray.data.from_numpy() `, +the batch format will be ``"numpy"``, but it's not always that easy. + +In any case, Ray Data has a ``"default"`` batch format that is computed per data type +as follows: + +.. tabbed:: Tabular data + + Each batch will be a + `pandas.DataFrame `__. + This may incur a conversion cost if the underlying Datastream block is not + zero-copy convertible from an Arrow table. + + .. literalinclude:: ./doc_code/transforming_datastreams.py + :language: python + :start-after: __writing_default_udfs_tabular_begin__ + :end-before: __writing_default_udfs_tabular_end__ + +.. tabbed:: Tensor data (single-column) + + Each batch will be a single + `numpy.ndarray `__ + containing the single tensor column for this batch. + + .. literalinclude:: ./doc_code/transforming_datastreams.py + :language: python + :start-after: __writing_default_udfs_tensor_begin__ + :end-before: __writing_default_udfs_tensor_end__ + +.. tabbed:: Simple data + + Each batch will be a Python list. + + .. literalinclude:: ./doc_code/transforming_datastreams.py + :language: python + :start-after: __writing_default_udfs_list_begin__ + :end-before: __writing_default_udfs_list_end__ + + +.. seealso:: + + As we've discussed in this guide, using :meth:`ds.map_batches() ` + on a class defining your model + should be your default choice for running inference with Ray. + For instance, if you're already using the Ray AIR framework for running your ML workflows, + you may want to use the + :ref:`framework-specific batch predictor implementations`. + + To see an extension of the quick start example using an AIR + ``HuggingFacePredictor``, see the following example: + + .. dropdown:: Batch inference example with HuggingFace and Ray AIR + + .. literalinclude:: ./doc_code/hf_quick_start.py + :language: python + :start-after: __hf_quickstart_air_start__ + :end-before: __hf_quickstart_air_end__ + + +.. _batch_inference_config: +Configuration & Troubleshooting +------------------------------- + +Configuring Batch Size +~~~~~~~~~~~~~~~~~~~~~~ + +An important parameter to set for :meth:`ds.map_batches() ` +is ``batch_size``, which controls the size of the batches provided to the UDF. +Here's a simple example of loading the IRIS dataset (which has Pandas format by default) +and processing it with a batch size of `10`: + +.. literalinclude:: ./doc_code/batch_formats.py + :language: python + :start-after: __simple_map_function_start__ + :end-before: __simple_map_function_end__ + +Increasing ``batch_size`` can result in faster execution by better leveraging vectorized +operations and hardware, reducing batch slicing and concatenation overhead, and overall +saturation of CPUs or GPUs. +On the other hand, this will also result in higher memory utilization, which can +lead to out-of-memory (OOM) failures. +If encountering OOMs, decreasing your ``batch_size`` may help. + +.. caution:: + The default ``batch_size`` of ``4096`` may be too large for datasets with large rows + (e.g. tables with many columns or a collection of large images). + +Using GPUs in batch inference +----------------------------- + +To use GPUs for inference, first pdate the callable class implementation to +move the model and data to and from Cuda device. +Here's a quick example for a PyTorch model: + +.. code-block:: diff + + from torchvision.models import resnet18 + + class TorchModel: + def __init__(self): + self.model = resnet18(pretrained=True) + + self.model = self.model.cuda() + self.model.eval() + + def __call__(self, batch: List[torch.Tensor]): + torch_batch = torch.stack(batch) + + torch_batch = torch_batch.cuda() + with torch.inference_mode(): + prediction = self.model(torch_batch) + - return {"class": prediction.argmax(dim=1).detach().numpy()} + + return {"class": prediction.argmax(dim=1).detach().cpu().numpy()} + + +Next, specify ``num_gpus=N`` in :meth:`ds.map_batches() ` +to indicate that each inference worker should use ``N`` GPUs. + +.. code-block:: diff + + predictions = dataset.map_batches( + TorchModel, + compute=ray.data.ActorPoolStrategy(size=2), + + num_gpus=1 + ) + +**How should I configure num_cpus and num_gpus for my model?** + +By default, Ray will assign 1 CPU per task or actor. For example, on a machine +with 16 CPUs, this will result in 16 tasks or actors running concurrently for inference. +To change this, you can specify ``num_cpus=N``, which will tell Ray to reserve more CPUs +for the task or actor, or ``num_gpus=N``, which will tell Ray to reserve/assign GPUs +(GPUs will be assigned via `CUDA_VISIBLE_DEVICES` env var). + +.. code-block:: python + + # Use 16 actors, each of which is assigned 1 GPU (16 GPUs total). + ds = ds.map_batches( + MyFn, + compute=ActorPoolStrategy(size=16), + num_gpus=1 + ) + + # Use 16 actors, each of which is reserved 8 CPUs (128 CPUs total). + ds = ds.map_batches( + MyFn, + compute=ActorPoolStrategy(size=16), + num_cpus=8) + + +**How should I deal with OOM errors due to heavy model memory usage?** + +It's common for models to consume a large amount of heap memory. For example, if a model +uses 5GB of RAM when created / run, and a machine has 16GB of RAM total, then no more +than three of these models can be run at the same time. The default resource assignments +of one CPU per task/actor will likely lead to OutOfMemoryErrors from Ray in this situation. + +Let's suppose our machine has 16GiB of RAM and 8 GPUs. To tell Ray to construct at most +3 of these actors per node, we can override the CPU or memory: + +.. code-block:: python + + # Require 5 CPUs per actor (so at most 3 can fit per 16 CPU node). + ds = ds.map_batches(MyFn, + compute=ActorPoolStrategy(size=16), num_cpus=5) + +Learn more +---------- + + +Batch inference is just one small part of the Machine Learning workflow, and only +a fraction of what Ray can do. + +.. figure:: images/train_predict_pipeline.png + + How batch inference fits into the bigger picture of training and prediction AI models. + +To learn more about Ray and batch inference, check out the following +tutorials and examples: + +.. panels:: + :container: container pb-3 + :column: col-md-3 px-1 py-1 + :img-top-cls: p-2 w-75 d-block mx-auto fixed-height-img + + --- + :img-top: /images/ray_logo.png + + .. link-button:: https://github.com/ray-project/ray-educational-materials/blob/main/Computer_vision_workloads/Semantic_segmentation/Scaling_batch_inference.ipynb + :type: url + :text: Scalable Batch Inference with Ray for Semantic Segmentation + :classes: btn-link btn-block stretched-link + + --- + :img-top: /images/ray_logo.png + + .. link-button:: /data/examples/nyc_taxi_basic_processing + :type: ref + :text: Batch Inference on NYC taxi data using Ray Data + :classes: btn-link btn-block stretched-link + + --- + :img-top: /images/ray_logo.png + + .. link-button:: /data/examples/ocr_example + :type: ref + :text: Batch OCR processing using Ray Data + :classes: btn-link btn-block stretched-link + + --- + :img-top: /images/ray_logo.png + + .. link-button:: /data/examples/torch_detection + :type: ref + :text: Fine-tuning an Object Detection Model and using it for Batch Inference + :classes: btn-link btn-block stretched-link + + --- + :img-top: /images/ray_logo.png + + .. link-button:: /data/examples/torch_image_example + :type: ref + :text: Training an Image Classifier and using it for Batch Inference + :classes: btn-link btn-block stretched-link diff --git a/doc/source/data/doc_code/batch_formats.py b/doc/source/data/doc_code/batch_formats.py new file mode 100644 index 000000000000..8dc1136e6124 --- /dev/null +++ b/doc/source/data/doc_code/batch_formats.py @@ -0,0 +1,66 @@ +# flake8: noqa +# isort: skip_file +# fmt: off + +# __simple_map_function_start__ +import ray + +ds = ray.data.read_csv("example://iris.csv") + +def map_function(data): + return data[data["sepal.length"] < 5] + +transformed = ds.map_batches(map_function, batch_size=10) +# __simple_map_function_end__ + +# __simple_pandas_start__ +import ray +import pandas as pd + +ds = ray.data.read_csv("example://iris.csv") +ds.show(1) +# -> {'sepal.length': 5.1, ..., 'petal.width': 0.2, 'variety': 'Setosa'} + +ds.default_batch_format() +# pandas.core.frame.DataFrame + +def transform_pandas(df_batch: pd.DataFrame) -> pd.DataFrame: + df_batch = df_batch[df_batch["variety"] == "Versicolor"] + df_batch.loc[:, "normalized.sepal.length"] = df_batch["sepal.length"] / df_batch["sepal.length"].max() + df_batch = df_batch.drop(columns=["sepal.length"]) + return df_batch + +ds.map_batches(transform_pandas).show(1) +# -> {..., 'variety': 'Versicolor', 'normalized.sepal.length': 1.0} +# __simple_pandas_end__ + +# __simple_numpy_start__ +import ray +import numpy as np + +ds = ray.data.range_tensor(1000, shape=(2, 2)) +ds.default_batch_format() +# 'numpy.ndarray' + +def transform_numpy(arr: np.ndarray) -> np.ndarray: + return arr * 2 + +ds.map_batches(transform_numpy) +# __simple_numpy_end__ + + +# __simple_pyarrow_start__ +import ray +import pyarrow as pa +import pyarrow.compute as pac + +ds = ray.data.read_csv("example://iris.csv") + +def transform_pyarrow(batch: pa.Table) -> pa.Table: + batch = batch.filter(pac.equal(batch["variety"], "Versicolor")) + return batch.drop(["sepal.length"]) + +ds.map_batches(transform_pyarrow, batch_format="pyarrow").show(1) +# -> {'sepal.width': 3.2, ..., 'variety': 'Versicolor'} +# __simple_pyarrow_end__ +# fmt: on diff --git a/doc/source/data/doc_code/hf_quick_start.py b/doc/source/data/doc_code/hf_quick_start.py new file mode 100644 index 000000000000..c9de271ad4ec --- /dev/null +++ b/doc/source/data/doc_code/hf_quick_start.py @@ -0,0 +1,51 @@ +# flake8: noqa +# isort: skip_file +# fmt: off + +# __hf_quickstart_load_start__ +import ray +import pandas as pd + + +prompts = pd.DataFrame(["Complete these sentences", "for me"], columns=["text"]) +ds = ray.data.from_pandas(prompts) +# __hf_quickstart_load_end__ + + +# __hf_quickstart_model_start__ +class HuggingFacePredictor: + def __init__(self): # <1> + from transformers import pipeline + self.model = pipeline("text-generation", model="gpt2") + + def __call__(self, batch): # <2> + return self.model(list(batch["text"]), max_length=20) +# __hf_quickstart_model_end__ + + +# __hf_quickstart_prediction_start__ +scale = ray.data.ActorPoolStrategy(2) +predictions = ds.map_batches(HuggingFacePredictor, compute=scale) + +predictions.show(limit=1) +# [{'generated_text': 'Complete these sentences until you understand them.'}] +# __hf_quickstart_prediction_end__ + +# __hf_quickstart_air_start__ +import pandas as pd +from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer +from transformers.pipelines import pipeline +from ray.train.huggingface import HuggingFacePredictor + + +tokenizer = AutoTokenizer.from_pretrained("sgugger/gpt2-like-tokenizer") +model_config = AutoConfig.from_pretrained("gpt2") +model = AutoModelForCausalLM.from_config(model_config) +pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer) + +predictor = HuggingFacePredictor(pipeline=pipeline) + +prompts = pd.DataFrame(["Complete these sentences", "for me"], columns=["sentences"]) +predictions = predictor.predict(prompts) +# __hf_quickstart_air_end__ +# fmt: on diff --git a/doc/source/data/doc_code/pytorch_quick_start.py b/doc/source/data/doc_code/pytorch_quick_start.py new file mode 100644 index 000000000000..39bcdc4f9bdc --- /dev/null +++ b/doc/source/data/doc_code/pytorch_quick_start.py @@ -0,0 +1,40 @@ +# flake8: noqa +# isort: skip_file +# fmt: off + +# __pt_quickstart_load_start__ +import ray +import numpy as np + + +dataset = ray.data.from_numpy(np.ones((1, 100))) +# __pt_quickstart_load_end__ + + +# __pt_quickstart_model_start__ +import torch +import torch.nn as nn + +class TorchPredictor: + + def __init__(self): # <1> + self.model = nn.Sequential( + nn.Linear(in_features=100, out_features=1), + nn.Sigmoid(), + ) + self.model.eval() + + def __call__(self, batch): # <2> + tensor = torch.as_tensor(batch, dtype=torch.float32) + with torch.inference_mode(): + return self.model(tensor).detach().numpy() +# __pt_quickstart_model_end__ + + +# __pt_quickstart_prediction_start__ +scale = ray.data.ActorPoolStrategy(2) +predictions = dataset.map_batches(TorchPredictor, compute=scale) +predictions.show(limit=1) +# [0.45092654] +# __pt_quickstart_prediction_end__ +# fmt: on diff --git a/doc/source/data/doc_code/tf_quick_start.py b/doc/source/data/doc_code/tf_quick_start.py new file mode 100644 index 000000000000..92885b619a89 --- /dev/null +++ b/doc/source/data/doc_code/tf_quick_start.py @@ -0,0 +1,35 @@ +# flake8: noqa +# isort: skip_file +# fmt: off + +# __tf_quickstart_load_start__ +import ray +import numpy as np + + +dataset = ray.data.from_numpy(np.ones((1, 100))) +# __tf_quickstart_load_end__ + + +# __tf_quickstart_model_start__ +class TFPredictor: + def __init__(self): # <1> + from tensorflow import keras + + input_layer = keras.Input(shape=(100,)) + output_layer = keras.layers.Dense(1, activation="sigmoid") + self.model = keras.Sequential([input_layer, output_layer]) + + def __call__(self, batch: np.ndarray): # <2> + return self.model(batch).numpy() +# __tf_quickstart_model_end__ + + +# __tf_quickstart_prediction_start__ +scale = ray.data.ActorPoolStrategy(2) + +predicted_probabilities = dataset.map_batches(TFPredictor, compute=scale) +predicted_probabilities.show(limit=1) +# [0.45119727] +# __tf_quickstart_prediction_end__ +# fmt: on diff --git a/doc/source/data/doc_code/torch_image_batch_trained.py b/doc/source/data/doc_code/torch_image_batch_trained.py new file mode 100644 index 000000000000..feb99e0f5d5a --- /dev/null +++ b/doc/source/data/doc_code/torch_image_batch_trained.py @@ -0,0 +1,58 @@ +# flake8: noqa +# isort: skip_file +# fmt: off + +# __pt_load_start__ +import ray + +data_url = "s3://anonymous@air-example-data-2/1G-image-data-synthetic-raw" # <1> +dataset = ray.data.read_images(data_url).limit(1000) # <2> +# __pt_load_end__ + +# __pt_preprocess_start__ +from typing import Dict +import numpy as np +from torchvision import transforms +from torchvision.models import ResNet18_Weights + +resnet_transforms = ResNet18_Weights.DEFAULT.transforms +transform = transforms.Compose([transforms.ToTensor(), resnet_transforms()]) # <1> + +def preprocess_images(batch: Dict[str, np.ndarray]): # <2> + transformed_images = [transform(image) for image in batch["image"]] + return transformed_images + +dataset = dataset.map_batches(preprocess_images) # <3> +# __pt_preprocess_end__ + + +# __pt_model_start__ +from typing import List +import torch +from torchvision.models import resnet18 + + +class TorchPredictor: + def __init__(self): # <1> + self.model = resnet18(pretrained=True).cuda() + self.model.eval() + + def __call__(self, batch: List[torch.Tensor]): # <2> + torch_batch = torch.stack(batch).cuda() # <3> + with torch.inference_mode(): + prediction = self.model(torch_batch) + return {"class": prediction.argmax(dim=1).detach().cpu().numpy()} # <4> +# __pt_model_end__ + + +# __pt_prediction_start__ +predictions = dataset.map_batches( + TorchPredictor, + compute=ray.data.ActorPoolStrategy(4), # <1> + num_gpus=1, # <2> +) + +predictions.show(limit=1) +# {'class': 258} +# __pt_prediction_end__ +# fmt: on diff --git a/doc/source/data/images/actor_batch_prediction.png b/doc/source/data/images/actor_batch_prediction.png new file mode 100644 index 0000000000000000000000000000000000000000..5922dde5893bfa95afd66a854d2f3b403993f359 GIT binary patch literal 170593 zcmdq}Wmr|+^FNMLk_IJ+bax{yA>G~GAPt8O2>}r}G>D`i(%p^H-Car@8l_Wy`xy7z z` z?6J}N>7Uq9w15^2- z@A4`*S(yQMhSC>c<^M7AU+?|84?i>X=6^8rbEQAt1(qs+%Fq0d*aT4V^uHj%zzD%e zi3+Q_!|pVpRq9H3KpI>?p!ZohlrssXW?c#gtjh0B7%5*k5@aD|#+BUMeYy&);V z%e-)*v1{*kytlB>u(Xi#g7P$PO2feCc)Ox6XL~ze+hsK9B@8UG5DXl;9}EJ?t(Toq zt?iG%Nu(nG*C1f30*nMo3-bS)4wQO0BP;=>$>k)w!~cCxhq$kNw=(k&i>=d`tx9aw z_D|tM{$Gs$91Ac7|NqBKQ3>BFrwcj5C4ZCb`uSR4xpE^QY3=& z7O_+U^FG$CR=Ld^)L$b)75>*?z*M)k7iNRj^mOE0=6fF8Dh?%TlQZ-@RB z95Z+Gfl-aGT7jBN_t2Yw%623R<2S~{5NfmDzd|*;g@wD6{ys?SF>Wu;KCZ@|3bMwX z7-9dOmL&lr(X-KHp?UWoK7KFjkr&+Q{OQ1I*#f)C5G>4Z!XSA+K51TySBSR&j+Y{X zyh`A#nz&%p31r7Qv(4mlvx-ZkAaE1Epytv$@4QqKiT>=H)!qXi%{14PkA}a%2`Y|@3%_~Qp2R^4lWEQVix9;?K zuc|D`5iPLC5wK#bC}m3zr@G)%l#+`H)1B$4}=u6j>dx#jj^H; zJI%3Ei{H3H3GfC!W#jJWVnmxz?(61ZINc$39enE@6?=zTDh<`+r__XQP(IUa3Oakf z{M*(t#DFzUQQ%Ka?GZ#*WYKgBAM{)jGw0{i#@_Ggs~e@cI?HZAO?_fUK)KYttEwcZ zalKJ2zMzEkOM^n`$N+6D%X~434!a>$JD_+xE+aljTN1&WDS)Cq8DMG z5{YLuRQl~=9#dfdG66(!0G+MFW30d7`ddf9=Vr*==Z+AhW_@yPP?yAr9?5g}wklkz zdI-gR$o}k(n@e|DrXG!1z{NQIi}^`NOL@bv64{(u;sXr>PxaLWh1T)HYg3exj08a929z-mthAO&=2 zZ0r2d@EeVK$uWm+w%-P^c_M%9m;R0Aiu=TdShh7r*?TMTRJEyb@H%Ng5drI%{sQ1^Aoj48$o#yZ% zjiNEnC+IFSm34Vwj*;H`VSDsJFyNT@19`UlfBD8%WUT30UpBrN>9E7}%igrNd_n?O zdHWPKx;G+>5z1W#@%JPk>wT|1c@|<1hQ0DrN$X=pPg5=3q6xf6V+}AUcZI$KTMd&XDkwuq&MDtD;3&LK@|IP#m+T>-KRyGBgRf0L_in0 zC!jpmtmF^sojg-^+b-EKzF=@+S4b!r!^>t!cHokfI!+&zS6#NjX4))LgE8EekU*Se7k&*TZqTax!7g=LLB4Xb+MYp^9;MU! z)m_#*P`_qp5ED&Ed^4X#wYRcx307Z=uRG5%h)St4F_AlG!Idpal3W!%|L9ypb+g`K zads-f@B2BW==&pP$B01J%ndudUGWh;D*OdIe7K($WQdJinq`W;NNMx;rV&mG%foq8 zPKXx#IqkAJUe~urzQ*gltCk4b`4ila4Z-zGt9^Ujx!d}##cp=FyCz;>cO)tOmSYvF z$@>Xp_$$@*?biokZtY^^$q6Qs%)3OQBnl72$ zg$9T$z6j56G9L*dhCjN+agtIhf{g0-5YF4LBhO{V4$b?l-mR3rPNAIr_I2-p;2Zl< z$L8)%&CPpL{DOnUZz{oTLOv=2vY+21xd_s`x?1K_EA*vo zpEWx$hoGN-P8C!?_T2<0)iguszo)&4XOZvxx!cjlLT&lHnT_r*Xtcg2by%qyI!&yE zARPg_1C2r4ntjW8hp&Z{`Y;J8QV-vvyOu;^l))DGiVk(wmbEak7I$XODyfq$B-Ay! z9ZXH=Lj}Qs{V6w=a3FZRyV4LK_m6}8U)*7ePxR^GZ?K0QEPS7gC%tkED!zJy;9tA;BTl#i`Fa)`aia%+d1By3 z8&v*T&S85*5pAJ+TuR*g{R4JgZ5PEZ?=HCyPSZCei=pva@iA-u>%F@DWHq-=%aAAn zRfL~HX)Q(n%yw$zL0t-e3kcFMrdgiO95hiDJvxk5c#$K1l}}vMq?0Xfc^9>^LgEV$ zi+(ZLW$^eCH0OEYdj>v({Tac~Ncm;eNC!N-`!`KkWNeIKJ4`)oBBX_(cPlx_{(bzn}&raBDIBsYkN-uOjI2%2bc$$ z+@~-sXSJUF-k{-u%&P4xlOg;qVk4LO%<8r;H4=0%rRT9H3C>9rw|}D_I}bos3m57_ zpSdQ%p!^il{}qoJu3?%{NcpR{#*T?ubAY#z^0=X%6)sR=0g8Qk!VI;U%pN&AKTe$aYeNE;7vLmhgGOL}_y@tE5tF*E+ zVOY5>p4bq!(hTps=fW^k=HpXRFT(tjpA1q!E%&}DqWqPw0D60lVi|q2MZBPM= zx9s*UXby8OKI^QWeu!}bhxC^|YET@G)N4XBAKZp7p$uAo+-i+}&nFj@&cXkV5>W)uVVotIf+F*l*D)AZ&7ny(ArmO}BVc zJj`E65&&}iQHg2Lo!g{GqDWe5atTWC0Cvm-s$a&qK@G_J`n{aNE#3*>8_%C`Zpvaa zs|%-`<2T*~p}7sU?M1-f`3E44ZseV5^% zeUI@25aEQ-QqO4uyWwr>9)1B=(_G)AN6MvpdG#G%Ch~{ASkwU=hEL257~bLs{aOtH zU2vw(1O9^1rFenjxx_EucN7C~sXr>lbt@8u#D9ipsgz~`Ob!-YQh3VUKOiiW1KbTH z9d&X$RWuhysYZ&ieuZ5b22fKDAbDFUcceWYWrX6YzKHu$Hv70Mun&Ix4m zxqXf*22K669-q{pbA#OPe8m@PnF_N3cRfX^@kXa-T=@df){^!E42PmO`8nh3%@pVX(Hb0sea#A zC=!YTR>SdIcBkKC0Gcd~fIja2|ByLDCwI9vwp%1pFZ_A6CsuzTU#$Q$o6+ulLz?W2 zQ*1?TZi%;^fWi0&EiL_!15^*hfg_Imq0C_T+=zLyuS$f~k&8 zO)7banf5?eeeZ{fkbtKO2`52y#2#LSyiAc}>cm|GTP8E+n{4%4J#ma&JYi@sKy;z0Ti zJ;q9$wgtgzcJ;6W16yuM;$M#|4TbUAoG} z>SZ|%EIc|s%#EsBq{K0gswfm&-lfucFJo4Augpf5D5pq{0YB%kTs_ALYKs2C(E7CG z#nI1P`%ho75#`7Db{$tXd$RW+EVh~^&(qr8Tob>Kp3{RqY)*65I94fzS!FR`#?-NS z{=m_u+_BJetl%KCk=f>;&4%%HQQmQZ_k3cuoV?@W2XDQ3s)UHk8qTu9cv|q>1U;9J zy1j?B_1YGBafN_fu~Jjd+hVe(>N(t#c!0oWlWm%s!&Zw5X*io48R|4!s@UnDgbj`L`x3f9okL7SDaN*IjH!y@y_5>23)6SK9gwNuB3wprYs zHz`|NPj5bq9X(u=b7R2DDI&I(fmW6?5}{5yhN=GVJw%Vn5beze&UB4BM*VfOgYsSg z`W=1Vshjjza{(67_qRnnmn+vE;#R465WJr4-0erK3hk4VXl{zvu{p-k@lZ-hENkqD znB|BGo$GLNcImP) zWmCyUcqP2gc3#~8JK0;;g14w?%_Pa2H^DYRea^cjA=0ELskl~7pNcuB@fic|HcHj; z3vIqw#l>E1ol>VY#v)(YPUBdvI&Os@5MqWx=#Cob?>N*`ltjO=KZnr(yUVQlkyV3q zziMomH+9)@^psXfV{G$s^{Bm1r&VldpRvyJ0hv~Od~{j$z;dm9EaPZNeWnwA#WaZH zwNCWFcIaH^W@E}hzDDWkM~nKatTBPDhgkc$UA+gqVFy&!j?uCOW~12| zr(H+d^ZCV%jhofe7f~s_h5YF?2i`UpMe_&kP<+?R0kQV2;rTE;+_%h%?8CBpHLdU2 zVx8`Zp%ah+4j;hcy_I;EmjTHs>?Z#^f?LNHyOrD2_SU-QPOljW-BeU@recGE-Dw!bzeoP@*lh*q zkLaahavdizlnK?I&QSp92cECl!m=flv5nzDifjpziNaQRWn@Aox*D_!=U1@{`fY|z zZF!B)x%J);f%0t$;--vuM@M7!E7JBd4>HR&-BLJ{3k5rHYzn_|n=Cl;GUHDrHD|IF zu=B-n)in+_7WUApzuM){3X`kWU+i7suBK(G0W}L$PhC{7H6_wnn-^472rzIyO{l4r zTr_=%JE5afps_f5yn47sWna&bkP@M0cJ4~2qT@BVwHd7|u z#C54^A&<4heL?EcROq8Bh6(j^Q&5mTmd5^j$MO^rLVJnBK-eXn=)uUGF8lq2_=MWv zCMU3`WhYfGjR*8;9H;XJG9Z{@vrt}KTCDV?OYm6opvqogO!9r{9wlySLgT?(G}P_K z4~cla$n&qOyonRHALM43b97n4({fE35v*+LOi@D8>S)iW1k2g$ zu9n9NQg}IeJJw0cK)Mn?42laHa8SvCZsAq3HHCaqY>~&9j|CgD4IW^o4gpj5n0P^A zO0LIXRfK;yoqdFph91ch%Be#<3mfTb`xI$Tg%C4oXYjN5fnLU$V$W@VE?Zk|)s3mWN7*A|aL|}G9rPn~E5fY3IH-sT z%8u>Puc6;*-SJ-k9G`ZjJC$$uqYhj7%&%`==UouPU!hn=B*G5<_?EI&AB2!{oq`q5 zIf3Rydup+Z>AMU3X#=mbq}8hmE@~aFlHC={4OFNONxqJMH|d!V?EpUE)F9?|=#<|^? zp8AJP-h;d97{B*=!*Ec3inHR_D_bG|5$Fq7!-A)9du{D&@ zJ@CrK4sJ{0)qqBNZ%XSoZwr%Bo{^s(j+c^UCJ%1*mY5ZY(W^Cc)8eWZD8$NK(zS zeFZNQva6Y%Ed^EZBXKyLn!j{*SK*Xb zb4QeZiYaDVY4jZm$BzQEA{^(^jH3=)XD{OON6_*VJv3{5cwWs0aQW}DizW(C9%hkv z)oEfC7S_95AK&W73Jh715^_#@<}0rv;7S%rO(+gTmY`&Sh8>m|@!vKf0#^+f9^<1~ zt^(UHEMQx%&;^x0nE7^vGvve8yWZ~qSP}wY;5GDIQLHKTE)?_sLZmvJf2Np_4#)Ue zp?(>Uq^cdCBK8lf4`+i-UgAJZJ{0%hpS}-^e3uJIz1pt78xsDPn{ZlK#=AI-_dCtX z!g-`#(kI}lc;Cebr0fV+oh9G)YK)U%orfIiaX^941u2Rm{g;GLMi@dR%_HW!+Os%_ z1H#6Rje!)dZ$Kjys!+6S(EGbLS9lDpH#R?hW&tcm6CH@YaqQUx|6956S0Xb+ds(h! zr9)BaO+a5{$mTew*rotgL6!tbT3^4GjlAiE*33r4-7r!ZeL3zw+&YVrM+#GKm+fJ>Bcas~4xf>@zSPt{ zOwN`tBbWwiigyK}BEKOr`m53xPV4W^%Hry~ItF+hSw~hIC^s10Ll(W;*&eJ0H37i` zia?eEivh@q16zad2tfKmL6ZgH|wdE{LGPDft}PUU432GGb$A=3_8-s!OZ!G? zh!%j;MPU;?Y}`Zhrf_YA^G9z$I2~;A92M24)28^Eb9>dUmkUzmrL`#x3=AIF&dz8( znM2H%SFDK|uljIO3uiUoBZ{<#Hz(f7AB=gP8MqEQS#Jv#@9KaNh^F{v({ZO z)vodNjKEDwyDzg#WwADy=3Wc+I(w%`+y7dcKPhQxWn~;ged4Fk^K(&;CU0M(#V&nw zp16+V{1i9agO7bVyaz4=2eAbj)CCLl_j|j{9DSWKm1|rF;_Nw}JT&f+TAJk`-lllU z4Ofj`+o-Gch|SvO%kaT#Ien`X%1j|Zf#Rut17>fR7;NvyDiO~~S1b__J9)mr61o3C z2GSQ|d$stz8)IcRc^e$>^ybZ2M+%KoKF!nJtyPdxxJfo&D#v^@M{O@;##$2tkP7OKVWV>y>=GC*FZ(5f)0Cz(1Ff|9*0 zE-GK;cC=3$jV6g}LwAoy46w;V%jW~5fmJqFo{3{7=Ibw)){O3>?;gD2VcT~Lu55@k zA&|TvWTQ-FTJ$Ji~P&>#m|Psq+N*(eFud44`)yTAHDS>An96P zzbby=d69RNXx^!4j%C83gLNX4(9Ia{#1VHMMn@cA!yxML)}u* z!SntZ%}Mc_710L8RiT-3k0L=to5K$-zHYr|quVyC+|s-&64p?P|KQS42mz3v#Vej? z{V1co&u;ws4Oka0QW*!&$7w7fp#;?Hl3Fcxyv~{3NzZZ>Nc1Vf#~jgk6#GMs<>PD9 zH~@d>uS8+!oIGIvI50k9**&$biGO; zb9x2P3auH!&pXXuyv!z~9kZJXO`&$mm2J>$P;|)Pc=5va8_V~}s!{btUHBgr``~0h zo>#W|6+DB>U#fk-PI`pAat^Sxcm*C94I?i{ylFlfg==a-x<7cXF|V*AG}CO$>yqgZ zuF-E9pR6h0dFP+jonLO~?rT4A?*TIen`d3$>6L@%WX58|#}j-$MNZr4jZ~WLu8l{` zKB_}4s~qDUd!DpSwY3{2*|<13-avLqojgRalPtLpl|L_u!wGFc)0l2z&Gr)>yyktz zoKU?DyZdqX>$O`0o}k4zdhFL=Fyzq3)D_;EpdCLUD8%lDb`6? zsY8vOUynP-5lG7q_*zxhlXL$lr2=RA?{@5hDe6{tju%NY34@)&X%b9;yDbxv%rchS z-zd4hzI(}h{A$L6PobIzN!lSZe5g#1TUvZ4@uBm-6!;pUGOA#uA}yJxUp`vN8$M*( ze9j|NkJ_^+&ZMDViR`E>_Ibt6SalRb8v*$!tkt|`&h+x+Gxtoj?e*)X z{g-^w7svQwg8!f$YVG83+PSHY4PuolqRfOjVtzu*$d z)70*0Or*6c2T3RwrOL>w5FUID-R2lb6Z{}+W>)3vy<`@gcJvJ-*cV+9e51SHsM8J? zW*qhSxka=~5Z)W6s1FvBL0BF38ss@S zQXaO8RjIMX_so&vmt=grj*AAA7Ql1S&>VxM7s1Ak{w=(Ol`gl0_~yc=Np+NIJFF`F zA9qq;vi*^z{dRdQ<_MLg-Y}5vH%;3s?7DNdnHbyp0PXuVW1?t5o*igG)$ZH^`;WNpTaIj992 zj-#}LF=QZ+k4A)9u95uT+uyCCxFrSxRcRok<4d#l^o&@ta{6*-E@aP=iGXtDVy>t5 zPg_$I5^9hDMYj2#E)Pb-31LUv^M}lTbMh>5aeK4jUd-;gbF}FTH~h@7Nzp!f7zcx~ zcq+{as)jasp+ikyqROk%A;%peyptuS9eiy6y1F3KQ=%sGD?O@)!TD7J>TM)5d9@a9 z>3oN%Kf5AZ-7&v5NSxFykV{x}VZmet3lYLmuR~(74w0@G_-BpE^(36@$m=g2E(bl+ zc~t4{QFk)evo2KD(e67V^}{yno;w9U-%8*_sIO)ds`!YOn!+g?-PZ?{HqmneS3KYmpPj% zA1pwpc*>Eug4=y&t{x@cV~Yrc(()QdnbxCPa&$Z6zX`DQ&FLZOe3@m6KE8?dpPSr| zfo*{W363J{z@6pHGcgeMOwfmf@`y8VTCO9P0U4FXV()>iY%sKGqBR%2n@_}MPlcbs zdA`3tbX8}`HyCYJVl5GsFXXKsc+K-G{^b=@Y?wi9#lG7cAR5~;co6<)P$T4qLXey+ z(|meF#ALb8q+S_83rpB{d=|TO*dRN&;Nohq&qAwBQQ6TpZN&D${$3kk1nMxb16EjA zSTmCS7&eaTtbQ+<%x?zCk*+9r;WyHlNJ}$OEJJ#(UtGF7?VYB^q^#&~PnB%;xjV)k z-C#PYdJg@!=j)|HzB-Ey~^k<~kb?$66tjuw|5 zgk-&U+oyc{Cw5t|5kUQuSPd-@ruS*Ys9r2^aP;wh1kpnS1oDHPi+F(Dv|V@;+(s&PsCVpY&-8=23dis zPZC>gy?UeBE3>n%qg$;Pobi>Y^!E;3L#QS@s}8PTp7sG9p96DIQ)VaH9@$NWQZBqI z^G2fiy#ZA!!V8rJDg|4fOV0#9y|LY4(m=JXjL-GOz>bDeJHN~#n|T}EdcXLp$D;QiJ{4Zx~=rG zViNov6jxRDNV9k*)Og2W-!M~8J)9vO)*(;M)7do5mYwrSLBT$zm`A8q$VYGd2x1H_ zmlUXR>lkl!8? z&&m-Yp-+Z^zWxd7F+X#io>A|dwWH%mVxI2imCRbtIy*h03*VYLW@RNlb8~)}cXPc? z3kC7(O6)IF&sREhWg4cZAjV7nz8f@YtyFqJ8b)I2Rrh3u-VKfhxnPFc$mpmh=IIM_ z(up3-XsV{o?!2m0!EoK*d0e=dQE3>Y^4=nlpkrP;t^Kf0jii&J8RZW zehfB$*`b{I`Czj1pjjcdLNk*o^;j#a#A+XH{z1gYBaSf2$EPL-ANq{rby&p93T1RA zMdcVOx~fjuSeeJH9OX<3Bv&e9cYG?!>YtD8Vqq$5mpjGR$>wL0CE(G);u`xa;-+BM zBT-0BJEwNgon(6G2{-Eq4$ZPf%gW1Ntki%I67>r-N=^=COO;yKnyNl_I_*q*9}Kr$ z)`hkEp^F&8AaL62CpVsn4Qq*xUq6ge{<+0_%C+vv4`+`7hlz$P2$Lnr@=V;2 z`RY9;R*M`S@wl2wW1t9a1CHTdvXuz6LAK8N2EHqkgI0durM_C*e-Bk%4uJPdO8lC1_~%@3eeJI41US-v&~pR|*td(CI%)C{iOGTa(tCJ^g%SdJ=Sh zl#0izyuNn+k*X(3g8D(yo{bXU5h1g>jQB?!@Dq;s><+CuTy&~1Cqes@JRC`j7moZFUBH;K90w~+rzp4MXJUiEvN9?i?^ zCvA-ObScZ>dQ=9cB9j?(?|duD-tQL41~}=a@YCjG+wA*tU&c)=vKkL!giVy}AnVpI z+3T;AnV|J(G}nh@RUa63L#6*AXNn^6djY%GGQ&qDHb1U7ra%42unb(6LW*Sb;aL_z z)r3stp-=y9vtC*oYxP5l82@|x^s-C4WA4S4o<#nyks>l7*h+E z8FC?}s_`Lvih*0lmXiAm4mo^=`13RW={P4}dch);8)r|``76Y{Yj z{o^lFQ3X4a@fK!z`pmgFju56)Gr2lbWtFDu_=A4=0*xL0Bu(u-GU87GB`z6cV{67H z7-a>PXxbI5NG@6~5rni4meT3;O7$y<#=gnq1k}u67E9Xt$Y~R!oqv~|>^S$Hs4Y)x z3f_S6ZO13;Sq11+9IAC9N%3cL-WH~c zW`)~7Ha%P$9kY4`6s`%BY^*l-#ahg37PIQ=#8M~_Up|5pd1mP8$>x#(g2;2@R)iZJTHT+F=ohD1ZFW5{uOWLP0l^0(` z5BJ`bzNIJ3^OKoCEI53tEM-l;88!K6et6XI_?^(?OfLOq2D5d#RXOHZw)a^=i*z}Y zcfRL*{=Ex&Tao-x>GKP=GC5)SgLf_+h)qT(M7cp**IqQd->;7;d(CX+l8M9Y4Mr>` z*Vgn7Wkn>DcZFYM_g|F7pog4NR8tMXI{jonbC`Er zr+D;*mn=eQ!ir5GwMtd$(4~askKgvb-g*%fR7! zN8kE+BJ$jFu!Ju=L5@v?F*`=}>-R*&I?-az;A-&wxDBb8sQY08pMr$9f#x(d?z zgR~b$<>mu#M?Z9iB{^!NR&raSy-cX~)~^PI1O;q$6=l_niFEY!k#Z{O>>Y0pU3_^d z&x?1%Ty%FgTcoHkNT%3Hd2c+ebR(H*(6ghM!glqzHb}E-RAEVbyZl`%wRg>P$Wb1@ zq#DkiT6Tt0&HUpF7s3LGJe0Lb2|6q57V5UMLpXr+KU|{XKkO6Xsy!w&Evz zX?neQ*cYdAD+*r>iQF6eyo;$YHF$B@tD1hY8M|5QGkcn{by7`Bc#(T#d9Il0i#;w8 z)WA$vJwO|0($RP6I-0SBwm`T7$~Jiwy!<-Ad-$>M7B}ZUvZ~IDeOb#dQ5L*x9>+=F zixK$r1lEjCWH{ev=NPA+^^L6Cg4VPoU4p$kv}0AIhOv*NO(+pOV%0oSzz?$62={{D z6?%M-*^kCeU}Y*|@($$KXN$SlqNG(I8@bQ^L`KO0A&5w%jDoI3P}++#UsYM@qR`6y zt%y9YMB%>F8q*n#&;6^*J+uJEizl)j$}_{A7WH36_RC(RXUQL&Gl5hzI^2k#zKVU% zL`X-%q6t^Odi~CRN5OYB#LW}wIalFFrP2Q!qW~#}<(JQSK3+U_)~);S3nCdrusm$( z(}$)`A5T-a#rpG<6!h18N0jNKtx7qo-E%cb)4A#@wIrgCF)mQ@1IXJG&ze)SnI^v( z3dA#6YTyK|=_(D6<|~+)D9fW2ke;~srcPWtgjM0JhDS-N+p9HO)gKiEjo0dG2MPGo+p0o@%FDw(N^7&%G^c`eL$$86KnxN!sgRyn z^f+==ZGC6lYdvIh<|R(^AWA-yj;RLLlB^&FZdY-8k91It@Iq}R&P-EhW$|cdwlY4& z25$e@X)o=8v9N-qmszp5TK!vM*K>rOQG<~+wmZW0&z8!a$+Wb`z1}Ik8p|xX#}h9- z88Edc6E`}y^P)=ygH??WEiI8ir_VBulA+r(wEg$mU8Tqx17yRE(<@TBHC-@ zQ^>pJ*Jyb~eDP{{K<$;rJ1QFoG=v%YA)GvQ=9p=+pyIKfN??g}u|xyM+=m?{_%jg+ zqyk*(5CMM_f&1QYhTOXs!08pNKg8U8zQU7*$9Aq7QTe#w3eHxz3I}~ftliV;jvPLY zROFGT4^wJ!Ua;nQVLy)kf^ms!Y^@WD8RKZMw&mof=jv(e*g=G-c|6+g@hL~OhG<;^ zv191Hqjz+ecV>GL6Hmg1H$SylDMwU>h;5WE>y$|-=eu;Jm|a_v@>aS&Y*sA`k`CMA zwKh7Ad0@4PhPF+$f@k5{+YGvrSKc&qF6}#wp=uiI=;QUdAapXm{g9l4RI$;cOq(hor@$^+_IKEbl-%H(c24c}@&Vv%8~y)nn(grZM9e z-39YC-(1^uRqaj?i<+D0^$v>Eh(Tw*qbU@D^zTMFH&A#RHL0vUv)Qi(bK6(tst4)*;@o z{i0OXG@i2#+r;oDWo<4rTftMdq%s7w(FLh8&cCTF%_XGVG;WEvBGvr}vLt=->irs% z{dRHlAlkOXV_hTl;>_WG+zVHw=6a^-rkx;NzK(brhGRajBIlMfe9?f%n=ZEH0;83p z%3S_=M+a0E)}1&O&+_cV^f_OzlzYzx=PWQ9`RijoB#a*v@%S)bUd&!XPN<-*i(hxa z4_~43Nz97=uxxXvYf^npn(qro(q`dvW>RYOJ~NN8)LvEEsJ-FJ4^5jjrxatU$13N9 z8mV`l%6n*C@2&S8I4z>*v|m(t^E*^j>Dxp`A3y9%@*$OvT%|K6_n%(mAfByDSAL7g zlR!bxnXX<-!~HmLu1Bz~Sk^hfbwcwiXFhQW>9~#_v+<0Pb9#2eLgbOarfY0Jp}|h| z^CO_3^^_#T;&`)3B8EM!W@hL?D#-t@_Ecyw2L8HY4}a?r4Gq5fGPMIQP%M~+F#*RF zt?wYY-}SwIe;0tf=?jgHL(zik@~ro2#@&COv;$Gc27D8bw0wX1Be*aY;Jltj#_(iX z>BisJ*f{eGhVFS#E4RH>M?&>?J}n9s!X){clg-F+XAoH-RX<&&4J+9K=0Z z%WYfedX+9sdF0Sa2D*P%SYOYI1ULl4>i&U6SddBGYI9WId;G&F-qJdn66W+on;hZ8 z>E&f(Hm^9n`qc9GPZ2JpKA(M~yxA$%Z>s;Yq&8q|gDyhLR|j--L)a6#9oA zPN0B7TaTE_P= z!;WojfCfqyCWL_4_93i6kxZUvABV7{bIx6@EP(^)Vm(5$xgK-pxH#KD0$?0J!qI8S zjn4Z(4*@C= zCT)ww)Cc;|?7Hp(DtQF0!zEr-6hLZXJp!_7#34_+e)P2bE~2So42QrnRu2v_Q>ds| z=Wlfb9Ja|A+U?ce2-$=0YrQo<=h!x(mee)3J=nGJ7g{s$Im5G#X{HHzFS(4AawUSEeav+E#ZH2u2BxMGQmvf|$l++PJaQU!dK8Pon25VRR7_UR z%JekL9@|Szpd5Q`1%;*q^$T__psgTsfJjG@bA^VLqaFQ`C!DAGu00)I0i-VV+&`-( z_O(Eqdyi>!+E>gMYdl7Cx>MgE;`z*~vP2@Msd>G|X`V^I_smW{qeTGr4d9>Sp8 z=g2)BM>zFyOh2THPmo(@&~lQ;%mzwut!L<8odc6`R)r?gV)PLNhibvgbpyTiBk3Ti z8snfWyOH>nntEXzLiCNNuuluvD-LItR8I(M#@N9zuDK)sYMT%dYQ;ghKh$5lQd2wZ zgS?0->nEfF=c)dH$dgGYs^HXrF@@w9TbnN#$p)5{c*@$Kg`WXCIGrjx#Vw2mbr2(- zJLeU6HO9KVxkqdA+~jR2;7CU(qb?qP+WE}pt6%=fMsuPXMXXvpS??cecmG*VGn7Q04uJ_fzUKn|{5XaSiJe3-q!kuONVKYe&}`sY zqoU=3=iXr$EGI6<*pyR~&5^k$ZYV$7qSmrWc10qOJhx|oStke3*n~Aq@b2>!sZl<% zgoBHT{A-tXt7J5hu7%4y0pkU)be>KO-G#O=Dr8W+A9$YEVZ%Ls&^;|TpC;*}k}@W? z@o)2m`jbagaNm$TDz*(=_rD-B1q$EwCne7y2l^;y&eiq^Yii5ui9Ffkw zUZ~G;o_hJ=wRel%=_>`g1VXxm3W^;%s{-Q3fC-UH2@SObtP>7A^tDqIP9&)1On*4w zzx6>!_*&DTOrEiOnISYZD!A1UW!yQYo6akknrhbBqjKtbDgOrHp8>UrTx$x&>q9i8 z0UXn=#{_{PtuV+Oo>aT=O&t;aeSO$ZZzcr{V`$KORSIpH z>@%PgWY_0FzojM|19q3UFNMNa`D@_Nkk$eT=C%z7QuSEkK(Zt8hprS#zy}w;0uBcr z!T8s$patDZ_}1#C6*rt{W@GewBaDjeyHH~X6COI44~rLJu@7{Cbrfa=TXGI0%P9z zIHj)v9ZM{Luvz6&DZhW4O61OT>~dC9)#cUVD3Qu(lBR!7X6ZV4NmZXhxirVVvM2h+ zIy3Y+5TD{A+$R+Hl0s4Oq|3I$XU18V!@k|=c$8auEBf8P@)8kgp%+(kNdBHwy;=vm z`vNiidJy@Sp1U*v3hG4+Zz_;tB#68kv$DQ!`3y6AepgdhT|BVt$Im1OeY(gwe2Ku= zOOJQqc%6NtT2OISrTttUD;@*dCqwc%-HS)YrZz7}bWEzrqhC}fB6~=`sM?mwpmQ>C zh)^MImaDIca==jefj<*QX;yXGQ}@EkPS+cvw+svL0e>2aU+XYBO@m}cUshFx=@cGc zEO4axu8dd&IM+q#mO5u{IHjnt-FgT-$9D8+1#$&oo4gqV#gN3pyw*Vey-8oJav`hR|H-dePkuwQ(8Vv_e28C74=gJ z?r4_BAh@HPr$py4ZBs*6J*Ro?`wsApJ74=lO}nhNV>ex_$5VjmAb-C-GLQ9E-O<4& zAn4ykGNLGbAr3)?)tI-b}-wET`=}3tU#aC~`O=?1ET1 zWV6ZcRNdNQYR=f8Nom%Zsg8R(z-T8*DXGVbm9;o*j#I`uwW6MjM`|bF&(GYkMM3mzn1oHbi88 zZ>!I3tsogvb@qnn{2>nS%1X%rUuVvxSi?x3cfxe4PCvhcU_z%=yU_E=YTtQvrHQpME==ZrvNL(9SmNo`QCh^{ z`l|i7Y_Fh%o8Dl8gZX;2=#EHsU`0D43Gmo%Z=Pgd#>uW@4S#-#n}sKRUg0a?nY_{% zlrzYZ+}*b%SLUdcpJWXWrRdutAw?`mL6eT!l)W`8udGaEn$ByLlD$)TWTDMH^08GO z7zY5-r89KyW5T~pr!H)?0d`gf+SNZh&{!xCc(2jXAG8qwU)7+b>-5IP4Gev9P*FAm z|32oJ4Vu%gz|gtt2Zf83%Hyf$HoS$zJfc^EMK9`ulv?g?O^F}3;fOy3x}eNoGCKrZK{vDyue6@Y>_n zOR2;*2_XsH%^25({jt3!;sJ5B%4v(CY92acLUjvAT@4E2DJ~hAoSH6_3q5jQjB)Lm zfZ)@FvrnJdl=NMgfD?Bv51<{fg>HC_4#2kvyg(= z$*FPzipA6DlYJj@bD4UYt?+vi4r1p!zF7GGA6su37iIfy4J)F60SF=>Dcz_@3@P0u z0@B^x&48$sba#VvcMP2}LpKcF0}Ra!1Mm3X&$IV^@Avt{Z}>2CUS}QaSjRfng@f+V zMwgX6ElL=$d8wLc5gN@fx(u**KkE!y9?Y%jhTLUOyDz)9ACQh>r}HjPNq6x`Zpv$J z8p&S_8Rs>{+$2vCYFRx#-rGKk$u(M7GDLT_)P5kRAH`NhpYFfV=D%S7Te`8K;DDMQ zfUhURxQ(?n%lxi;vSX60mh)f)%R_)P8qZ!MeZ1>^dgtnDsFRY^^<6&sQ>ru%l~k;E zROyclbhIO!6mNNZr>C~U#j0q_pB)rP8porN1ZzydcB-vCHkVX8poFZavE%g2fY=w&Pr9(R{Kuq94*& ziTEuU&nrI=hxsw*L}sBu|1fR#4UOL|3Ul===aY+gk;~`jf6iqs+Hc+`I?&r8w5=vz z%%im={V#0N&rBm0-^T^Qq%?N}{x&rf4IWnCnA+Qq_dU~V_sCZdf&m(ubEhx)oz8_iAVy-*Cn{817Sv+nG35%DB_^Ptqn8ibnl9Kpe|&9QezlMEj~IC3to{IhJi| z>|xf&e5CPu=g$HKAHv=Mw#jK-26|WxLblot5{9N=A=mH_cNY z(3Q^+g~!s%=`W1+eA##pZ~B6)i&u7xi$c8@chMFqsS1@kI~0uVjRBO@Dv<&15q~HZ z4LF3Ji$WZ0@y>26mNpNB?lqu8*ZJZ~1V38uucO*bHVbE=+VkAG4&kkpQ3qMybQ!w9 zjtWWlQq7fX(je^HAs8s*(C^eoeYySCLO)^9XyTY!z`p&C5p8&L$0d^1c2RL2u1@9uy{kO1lL$lqU^`HzX}#95EXbl^2t7LkrA_ zu2^Rkzbq;lw~0&1Y1=y-^7V$zwtq;qc$K^v&%yma=juPZz>kiaG4vZb{nLwd`V4>U zlVP6|6OHikJ(@cAq(P?|Cvng6EdALaN>!XdV~m0ANKJs$lX{!{xt9IUIwz$Z)BGm; zAt#ZcDmKr&(28g;n>2`y7i+ca?J1dq{K|c%2g3YnP-}hIe^1kYSwQj|2S7e|)lPv} z+C@F))et5A*wJ6lw`Jc;6Chwrk6ws`g(Wy4z-OELaTDuO&_=F&tDtSuC2!RB+txNc zt2A203MLMuN5VqlxaO2JXt?g3T~b%6GC*lY$*M@UBg59Q6P;Vy#Ei!<504yC^r z@G$nb${w()7t?VGYG3dw)NI8w0?o<=|0}P1fYDBS(g;L0>faD|oIJkIL78#-Xc<}X z*GPwh7N+5lC?O3du0b(B8GzHeg!AU0t@G+?*Jg?Z9vxS0EqLXz!%V(Q{@k|#YFPg9 zIAiwC>TWf_0|yziS=^3{6;g&>fE*CZlgHWq#KG@<@4a^DZDuPjq-XZD27%d)cnEW! zbhIWwphKQKzTrdP(1PMjn=6}uzVjZDKWK3Fj)Oes`~n2*ET>@@in=Y8uce^t`e_Cd zvqrUmm-loT<7jo1;HV7Y_Ci+>c7yaP1bPA9jM<%IpR93Q_sm@;N8#3T1;-(U@!jc) z9Vl8aCbF6upyp!+Q8;W(Kc;q;#P+pkA;pYIsC$ST=4@}tyCf^h(0i(^Ea-4*qgloz zbNik=%y?OjcXD|csvaI5es^5r;O|pd7Wyb|@)c;8=G*ZjU9{o6vNJh=ms>8A7evn`$ld##s?v z3(S#|Gb_X8;Dv;U@@001+ZV^@O*>KdW4x$NA1Dlo{4tBto~hIccG(Ntj{fzOMLi$g z@7aMzcOA>Z&alWxC_u(U_n6Hr)8|(?jciv?&@!~^?>va4KclFe zqOzwvI|~%}HZp2yAJ38XBrS~%s$_fixr|k5cG3zc&5dj=(MPJX&hp-0gax^0pp_`s z8H_Cl64LI~XLJl7h4(y*27M8&vG;zYZf|%vzb@^2qP;TZp8qS*pGSWxrP^OaQIGY! zjn@#`iIkpT1)S^7NP*G$4l~d+DV>SGCR~1m(}u7~NX;lFJ56$sXH055od(-Y!KF?t2+xF=iA^^Zx`0RbVaBAB&dte)7263+A5CHrfe`RI3n4x4V6j26r!j+RpJAwc|`H!z+0 zS&TSceB&ALzALI&=e1>|<&{>y>O{H6L!o~rx-R&JB^n$f~8iM!YAKSWxQQ7DYjQXo7D1=mfWPpSI4 z|KoaG4a1qqcN{iT<@lo&OQMy7qxrTVB~RjmGrlFGRITH|MdgXK+tyY-7f{Q0PUtjD z%tE%qFXc9P)Oo|ZshAuChDPupA%qpfNK0E` zoQ9a71F9c_`lLSTS)tZK6fBzpDm6x_Q$)%3lQX@sZ+q1Dvyy_zITv#$)Fn4RGG68N zzCvP?ABHzoqN_*9B2n%f`CB=(TFFNo`vPYdFjG1Y)s%eW82?H&=U>iV;-9j|gS~a0 zQ($mmnSTSQn0`Wk_45giR{+(vGYO^mshHdkfX}Z8`5CjXjGTt_m=4^3Fu^>2@=42V zlQ@C##r!ZvUvHNQ-;Gg;+-o=pC8wKqT%K@gq{q9(TPoJBUyoh&h##S0%|0=uEBa!< zI`*rCKu^vZ^OX|n&_N+rpHxJfA)B^*eVN$Z2ywx?yiSgDJb3Q6SDR(M&or(?uxw#0 z3VsPwYF$>yGLBz?;bdyL6D+44Ho#|_%=#QIBQJR z$l147RN7?Ev+@~7%ivXIhsc3Dq<0|9%3|n-vx%wsu5h+b(Pm2vJ`sz!M6d^{$yr%foLMvDUyq2AI00Cp_q;2mJ199S65WnL2luR2rjDMbs1}DR1W+5>?Oy2kkKWLv zkGqc^6`~V)iv;#Spy~S!TjQ~MZ)bQO0&v*|aj18`i8*5H#=ux^4h{T2dZ89q>~p!R z$tkb=Rx`_Vaielg>g!myRv^H)%{L`UW?%xqyLAfIweG7Eq{-8*WJ}kV=%b+yqw_+K z(8a_nf%~c&F>??*QgH$%EsYP7+?Yuwdkj!>)z#!`mg4_is!V;UHfWa9Y|>5i({)>* zxH4N-QIMC#&g;TtkAbD6B>b*of*jEbLbmExP-Li6{PL1&rO=*-QxAjzRKF`Jy?&g) zI#^xf?dJHi&sihM;1y)Vlb#+prxe-Nluq{AIHiCdCBpZ|i^m*>P)PxZ$McX*iY}31 zct>6!v%dmCjfDTpY})WSP8-b5fviUrZy@?22bJyumd*5B$8m^WdFF@R1Z!CQ+0*&5 z!FUCLdCo$%`Stxy=)&O8y2*~I>WSNtxOQTs@b5lYTLAKX^8!1*#0L$1hK3AsFD(Es zn@m4Bsr->g|HSr5mA6gMM$h1daAIC>`b+PA_T|z{svG2r<=sivaz=WGi-q&n^lS_6 z?S!QRznG2GC-4gZ6Px9ZGI)7+8!V6)P7EUMppx?hY0)JneRD$FgzHC1JivJ}r= z2TS4N#Z7|o^=|Fgx;TOFW#910rA7>#<2Nd=g7~CmE4!)A2fNZ5HC^C~(7VgcKU@$6 zeR!DWX6fJ5o>uG=q^5o0J?a0e23gfNtq*+U7a*Q*gI(r{(YWG`&Oh~s~CO zbp#sPoPH)Bs7l%H5xTDDGt)^~W+n2yIaD~At^+^k#&zWGAs~dq{J2gD2u`e@r0WJ# zW{?jD2kyRRN;hI*IFIi118Vzy(eRg+zPn~$p3GS$mXucR^N8EUjq?B5ekA7ptfqWL z>2;hcTk|sxZzK1juw_w#k={M20slVd|LRpUG{Ya;p+iNOW+iEp5=F*pSib#7C+sRt zRcrn|2%GA56{|L{aSG{hf9b$j>3>lgH$OjrM<$MTJwv;Rt(+z{(vk7lo`7SAhK5q{ z&bMDx&!yA4>!zN{adL3H?)Y+bEtFsq;eVr;lu)CEV*c*G6)meP625|S7I@XsIxKsd znx>z1o8FpbI+O)PSNnBk@}k!)8gX`im&`Rgg|@clF>OXc5r+eJ6MDY>@-e%e!{&IJ zM`JN(dA4&+UM>u83@o#Vq(UiL!=he>OUSs(tP%sa6#ef~PX_Gw2H9^EdS2>^y4l2a z?{w%oWuwlcGGNE#RSqFKZ0!x$)=s58gZFv`tx)?mc>m^L3~hOd7}`LNga!kF{Jm^q z=M?{ZoGg_2e|GB^>?NlGiag`}uqri=*f|v4o3X!vfzw6A|MohJgeoYYlaIjC!U>X2 zGQ*SA=-iiRA70)Oy7$IADbypqOz!U7?|ulR0JU-O7eC!MxHUU28<&dx!XK^>+0=@8 z>Jf&9%rz^nw==Qm(f^>4;I86>7`iO2 zhmlHnex)kl@+dNP+z(p)k8+LSbL`z7-?kuWs_l!3#=gJxy&+av<2#RIneB6qTR1L9pms@ZJ#!zQW^fT=H z2)W)5W`RnZ?AjK^l0d{1pn|+I1^Z}l*!AbD_3a011N^Q#(l$*hqkLA(ZaaFyM>=sw(vnCM7=YVViE<9Q5zz1M<_@_!nMKe z7B53(`<7;V?F{0DO@71Yg%{7pRM8zqk!|9lW@bU|d7GRHpw--Pf=T!wFGzJ)fw>f< z4xk`wxH4JuSl6 z{4VY)YMTG7yC3GGtK07onBROn&Ih`%|B+*$nN#d3KV#?|e({>_txap!{zk(Bl$WaqM1%*0L7U2na54G;wfl?@Y{ z-QDUPm$CTYuo|c*Firw7;OT2V$Lp`~AtpRf<_7$t@-qFU-D8$|D@o0E;lHIksk8^^ zwnHSU=N}GsfkZOde}cV`pQt!#%09h%j)TFP8XyF^6dVYuZ{gG6Ppoh8O-UF)Xv?+! z7;HWEmUh1KVNpV}gN~&hvFWY@0ez?f=v%z~_R3yz2KI{(M;wfXGx1yj2){rxtm2Bt z&JCKil7Y?}*P1x$XAdODAD^_WqB?wz!I99<+F-0b2o`MH6AAqv)UguuP);P$cekW za*9B)2TMM)% zV%sB$3x14)?Na^i5sv1=f+MD~aNl|2j9ST7fHB!-{@^0DAH1D0%PlhEnZBc{^W8Ij zrVHU-yUoLKT%u>ry@H6*;5aJa7Q`H!3Un1Ps!L;Vel+=9C=(q?%+C7 z*-qcQchP80JSj5m6D{X z{P}Jayz7$C^z7GrY;=ps<5skgit$(wL=;@)3*(N>a1$JZ6R;l$2 zS`Eqp>mK{qiRe^!0V zTG>opoixoGv5e6Z0m6?Ij*XLOm3_5DV5UA36*jo0 z!qbcUcyIXlPju$u%9;Lg6;qxX3W~hpoT?Wc@Txb?a<6NfhOseEN__y`1Mh!8;h+08 zOc9dQddaRXf$S%^XE{ojhf-zy3YK+;yu3pXoTkF0m;RIiNSIo2sP!5TSe~m_sd4*Irz)jCxIIt_u{9C&>O$wL7WyEWtqb9;V;>B`Ur$4F-YvF z?q%%e%f>}PzB7DH%rU`Z4XOe+F zCT@S%yuLL{#|fN z`*}p&XS7$;bJjqmtPORgKy!}2o{9fW4ZR@<5s5x!G!_5iSFEe6tFXPj9g9XxoA)S# z`%+H7iN7i~Aq)|(Y4xF>FUVru4rb`%;^IG+LG7HON69v6w17t*n@}zJ48? zV3}l{_bn`DrO*L|xjFePUBwr`c;Asm#Gxnlb5NZ0zQEr!-hhmfm^UnAcYxd2*y*EHPjzLc5tVsZjMdfuKttY z!#~3l0v#hNBOFq3p?sb%mM{X z4xUBay|Rd@!PZfJ1LCjs?z>dX^Xpbam#D%Z&zX>&5^AlxBG=TFT#bxfj)|fg{oS%z z;;sL5oPJE@96tY^m;apJ^dECi{Z0Zim7V-~OmQ+=Nm+*$0X4JFY1x`dK6}Uz7o`JR zX}nTO9s6)FQtta?c{}^!aR*y`YhJHz>*`0VdQJ544OFGi9?&;RSd`5b2uU3_NW)y_u;amPZ!=os{Fz!mnk9?3>&;3lg-C9`xm3t?_@)NcI4x z?RfUUa`s<1iSZYB4DS$QthWv6EbZg(*Sm})6cgsxJYXr3iJp}!PL$D;Ay(9Z5u@2! zLsy)p;!kP;&k}Gx;Sz4~-=xuJMy=O*?@R9X&gjntDWRn|+!)?J1-v4i;_Ld!{^PIn z-@p5J2xw$}7}VA$`P9y6Y`yJQf=+s+10xmZhwDCN=FpBD(#2AT=mV&%hqP$Jr#CmM z3wv4U7b_OgInv{qOdUc+<3C^0oLm(LIgKaZS>t-{Hzg3_}1@PmEI*h%7xMbK! zq21xD2yp5Ilocu@NPgk`^OrY1QS0-O1j9c_VgDk0Ct6dS=+Utp(lFB ze9NI=U!BP;PDT-5bVp~Zod5Ouj#tm!T24`13hkM=*(RRfeeqBQ7cXP$)gL-2V#gwQ zxWVaNKQNRQ)mktOUuUvPAVF&z{;SIVA1xjRM#G6A96$N{-`?qGn15o7oH~jV1|Urn z+k(~JXiw9GzAdi|voj}=jtF0Rus8u{2*-iXda)YA2W8$SM4ca%5m8`B%Ma#ssX1&6lcJ zh{O4<NDoAG_qZ|0H|F=y^zZy zf0)KwZi6;PBMcwn!iwaMC!Ll9yxugd%GM)w;)6|Hq{K;V#fsP=f3jG}u|bdP$a$Tv zbEx>xvf$_W!B$9}-*FuOi)%oi4HEo^I`2qSu4w0B#X5})YHDhVcEHQA98-V)rbs7C zYu6;qOyJ#(da7gmm$hB%{&Y(3#0!?EXvqWd(9VbgFlI%{tY$O+5J#T?w41`vPMbgc zxsm0KdrE#JDZBBdZAx9t^c4CY2%=dn;=B@f0e{c>wA8zL$^wDN!{#hqQf80cHmc1z;OmcDoi#?*!VdFQEG5mV>YY#_s&N%{Um{0O6@YoWyG9 z6}~7+GKaOW@lo4CYcE^Qn~ZKr)@XZ)E)nBFLX z79=l2Y$zR?ak`kXMqbVW#64vN&() zWx_VTMw>B(MUY^}2J<~UjV2}=4Ci{_?{pd+OcH3YP7?BR@v|{&tlbX+i3X#%Uoxq$ zv5IAj=gZA&=g%Blkznx|hv~cHTJRnCW_vCD7&+UH{4!lquThRT_S;`nKlqYhsWaZ1 z_{V-vuM}LKsONd@o1~c@44(qsP5zkow=RXvhb)y=>g{)27aY4$-PcA*_&D@hM-XrP zid}Y#x9#PCJ5qfm>K!H~*UGwSw@}~o#KRxE(uA9r*5#SjhNM8TNgmP~V*Uzv`3^&> z6Z?=0LjhkwQRApq2&)?5V{$Q6Byuz4`t^4E_|E?7sP&k#*Stj$`Rl=>oBECsGIsGO zj?{ctu$p~MM5*n>FAI^qP@DY6*mA6e`GsUfA`t;E`R1_46!Eot1>{^3>FhH6IUeu$ zP6?}^-nMU`1OcY37jD$5G{M} zlZE(l1w_>XdB)P;VF&~UFkH_dg=a&>rdhSNLi>t7oC2??{9O!hY=Aex!ROjId&Pq9 zVOq5@<`igZT6Km|jc4BtJwdA{)YObPP+$e^^6-}piwkD8=XnaS#=9Bddo4Y{Zq(D+ z172ws-uQ%!#rXycaU#1|tIV^FJ0FNE==vDw=$LHjHpDhC%uxEV_2pM>%yZnj=_hll zHR_ve7XQ5j|Mf_n6Ep#Fn~nZE9_9=8b4V!EZ=KQK*fF^ZodqZhR&y>oj(!~LeH0YH zaCd#NQ8om4a(nXU%{BUw6i*)ha*g%hZk-HX71QDT4HmUf^8HCsU99obHSCo|h`DUu zyYNjBtxB3CGW7c&s_?Tp2k!#zj3!?GIq2Eu_SXRYz(vSh{Fo@d71T>B$nzN2i2q~r zD7d+{e6sR(A{Z-?>V7=95SQDcVCctOqP*y$%i5P4IF;YK#$fM#1C-U(9~XyEGnSvn zLj%XS6gw?(jFIN^(>A=V;l!2h5JBX2Aw)`4bKmEBslnB6zlaj1lxeHBVB2>wUD^K8 z?24~;+P$isnAL;h{8H|i^Y)NX&ioJHy|YReG6oMS5y0C!yaY z2P;D=%xfGi55-AuXn8T&O`@5`D%dk-dr$AR^EG*kX>mR%PKU0iLjsfwzYV}G+ z8eHFGQ7v4a1mRWeF|9>=rZb-``#^-42LexydlU7@Ln4*BO^7*M`HWwo-%?ol9BTmY zbS=4fjgSgC*ZGJ-57K@jW=1+O6?JzdSr&9q9YpjI>c5?G)TgOGf6imPdlQ2P<9B0Yuvpj8H7B0=JLsmK0m)>tZvRJZ>v4gliGG3ugZ%{$qvAe{f<^%t3n|8C zpgS!|WhBf`*snt$P%OfS2n>#no7xVPz&KT)?xnyuG&{egT$kmJ$1uJn-k0zB=0JxU;5nR~S<`Nu=t-*5D< zS2m?__z11N*h@d;kE5Q6Ag>j4nXt%Qt21UR@B67FeP5iGPzw{?u1!{Mx2Kr-^!c!7 z>~^PI!lQ&f@D=16yhnIFnVF=$TaKKbwcYWtl^(K((jV~ty@&qiTnqgAl)*M8(S@t& zv^(8?eLA8>lFq*9q(~&Oowp{jdU6rBrjq=$I)b=V)lql#-C7x8Iun)m=};hybU6iR zFGH`+$j=9Q4~YFH%gP(w3q?|Gn+t>;i=5<;wFZk;qpd~9>#c;+q5g1@N}hB&KE9c! ze8m1+N&c2cn|=ue!sIM`){e=4o=#mo6FIwH8gd`XJ|Mnd!qNn=e(9Je*?A62-ok!! zkb880ttqnLw_OVzQYY9rT^`&p?8K>#iM~Vyi%bYz+=whk`>i(Vw~pfopQV$N)hjok zf((!dU1_k?LKC5&FOaAKq1TWnfz8kJ?|GL0_J9yqSBC>B+Dz|i3?Cn|~iULIHX<8uP3fb7@N zlW7r8=7vOX#(BrEs@$OR+*ipbMfMFFC2uudf2Gqkn@u+AL&{6gopULrnpN@*)0bcL z(qz>*S-+Ol229gfV=aa(1EDFpFS;Z^-9^GTfssVi+RgAj&UTW^_8Et8ZY{mD6eu!M z;=^kmwO;NIb#8}rMs8zVP%kcW$t1NJyEQlHYL)RbwnLwbY54>GxG8(r8ll%PQGZ{k zr9fl_9Kg4#3ERl@yRhNJyGpVMaDGRZ?UR_vwp4`Ym&L2oPC=h0`RDdu8y@|E+!OOF zvuaQSxa6bfCLMkTd&Sr4u7)psnn$5T^kt`=qB-(y8_Db~Ux&J%5;W|7zV1!}E$OxU z>&>eNj{y;cS;@|^gHPuF$PtrM)+^3MM68p>CwCte zY)ox~k4|6n4eI%1+;b~)@l1&*s?UoJzW56m=Grh`*bO9(4%V>8G4#cdIZ$xR6N8^? z0ROB@8n}^n_R#SYZap0vgQLSluUrh{PlQ%^cdsBA??)8yuv-PU-0tm#YybLN^8*fW z*QWs`%>8nhA!`Z5_i~s z?HR8&!VNO@W5kyYPzd=o^3d`L1ebEvv6={Kmt~;8Oxt{Kv%@L#+0MdlbB-Nf(f%1V zR>-+Oe1ucU&Y{rafzZNAI0)@Eo@ba3HJWPux zi*}<=`9$VE%RkfsvNl}PR6G>ORwv`D@@JREIUT%(-rr_HY$wrMa_9&vTpUN|kg52$ z1Lx>w7Q@lDRms*ma*M)B@>(>&;bLe4o;BWoomYRq$j-bvwYn6!Pp3)>=Yn)h z`(3Ogxu`$!vTZr(aOvqfDU88EZXjJ~7D-{gv;9z83z2NdIpnm?Osg_`aDlfhtVmrq5sJk&d#p0AL zx|ff59HpM_6q<&lz>qX3?J~9$;xSDMk%XuO4nEhqQ##1!n{Hy3@W}(}P7kkgS;^s< zbyb6R;nHRY$yM%7PRX5fON@O)-<0Vpx8KPM%*^b5MxzQ`8jNwgx?><87sB688j<}{ zW1sMC9+2@Mx}qMj83XW%MOEI9WG$%(@yR+$l@WDZBRT)Do1eBcQU_o$fXiyxV-PG^ver7v~J;d`fVw+ z^IDMH8M6Q4@jLAi)zd#7EBFV0NTlW39h|#EnkzU0pKxAS$$x<71lz=X>j0s5@-M&t z_f8i5^a%0zy#@0qiGXB(gkgQ3MW(m40{GS>O348aKJ|nglaIxmvO&1^=S66`;rmPQpy2M5Tb3#mc-`)8S;-Bp8j+lXaZPT3UGkaE;<}Y>+45^Ob z`PswFYTk^9$9rN9U&1bVNWI9gWJh?tKjdHK&*)4#@}XDSvGZki7IqQK{etLGdC}aS z#%zbA6@s;(Q3v`3+Nfvd!xTkm*v384wm(3)oDfAsz0tWJJo??cWAKOcQ96O%@$IYk zJ;9UarE7OSMZlAqaqmWLsQDI))LcU**HaW9a}&g^wXA->-5WNe-FGk(sy9`P40r?+ zL6(Xnegvy_h4Q!;mT9d~5={{Q7Ex)NFM%=5vl(yyFHC~`74YON>}o#!k&X~6-QBRk zZTPZ7e@U2!w2oE=JFBuIiLL1fGkm(drs2`bjqxLIK3Y@8o>rFzWQkkZmQ1m)Ql-?V zAkV&10K&l1MqQc;vJs?|mF+JIXS<7-@aC0O@b4d@)4DhP9Q$EgI;vNLPuhNYcnD{q zyK;yEovQH(V&^}rzh5~xfb4VG#Y!5~X^1qw{;ZBG)Y^QJxKAVOvTf5ynR_W%;^1ic|zEaAZA{=8P%lL{eW1B2RM)1*? zp6T~GyQ|D+pTs=V zkxoJa381svYTx_uE6-ANliWOub0XKjVFofJfPs<(Kb_yLq}6$ASXjH(SAAnHxnBKs zDa~AJSgoMzHG=bQ2~zHyIF&$dW2eV0WljqYuKv>|$SP+&XI1n#k|KCn^wVMkTxdI$ z_*@7?6p8)KQ<4hZ=X`c{ULlzjsZD$!d2`W=v|bEjKzG^{jPWqDf4=F;AsriW(y}I$;hV) z^}I;AFN!A@vgT8Ee5!F+ooCAWTdV&c_!b>b4{6ah^Fcn~tYdw2QaFM&8<@hoY6@X&W_L5|Xpg0)#uxL|*Hl8}xqq zN{MM~U*m@Raz~B;d(54B3{uxocp>mvR-~Xqc8g`vx~#>qL(ugeKBsSq1Pwz=*pY8} zg$FiLETD%xtu1*KD@~s7{cYhV`Sc29v!}8{uKSyzFu>vfI2g8{)?n|`3>V$m5TBf# z(9d8q5waPwYgE{YcD&E@J*+qyLhCd&5@dKIUmcnd4g3GV+7OV>c9@HNG#wd>Y6+^C z{a=&y0OL9K*N1Rpg+qZua*Q(Y^`%D#S2}aIyH**`ILotiHvSC>xRmv*+_#7m`$XyB z5(f?NtxwbgsFvdSXxb*BUa4$J0*g47=8M$B(z?-n7k@4bVWfdW@H_5o-a>}O;nZN(HzP zKM);c?hl?Dn#sO!Yb|dXRkt7K=I6{Go<47Ps&r@h1)(!KT{eQhTx~^+`CV;sQVztq zOe3_E@DpqZvWIQfRm|3xZU5A*sx)q6r$`*`ywnl0V+n*cO#r?HVGhWiOE2Eg@0i={d)>I7xvhOPDj4#7jDwqj!j1yn&z&e0 zctksa>Qr0Xa!vDOQXv1{WNi2t&mZh(-*t=QxdBga?~d@=KAjwfFx!;BLSqg*v-@2< zl*-i)PN3JQx5#-fq1Vw+szMr5uU3zSzP4=l+8JZVX@b%82%9<;>Lj>z|KtWSsl20M zQtPR>NP$TiIxYKr^Max<(d&`-uG$2gc%P) zhEx|id2~*k%FDRWM}vX#H2nFUE80%He)^{=V$uS<0L8fYg&1vG5a1 z`iWBAa7LJV^SYOB1bD~%(CRhL{fzzeJFh3HGl^LZCN!Ls>m}0Xa%2Yq!BwfkUQd`D z_~npdx%)qg9>c3x*d}zln2Q+k>xD}I!m>L=J9y>fdV7)m9)8)9Z@iA!o#zID=Wx&- z0|>5yWWTWLohtq$eDKOBt=jb^RfM~9`*By^@<=5`C@3aw$1ZJ}t&VRI@cw3$M-G#m zs*L1%J*k6t(mwqS=u}?hV;NIGC>Q%HeNVWt{c^$38S+?Y@;~g;c81wozgVdCA39;T zgE1%L^pC*na~m%8;kVghrnArZa2r}s>V3)Hotx9MD`~BA-+fdv6r4YoS!}ouJFV;j zKH**2&g|#Hc`O!-T}o^4zTd-Qf+IZ!A}L0W*`Y>$S&g}qk+J33=A{2-psJ#ypwr^ zCZ3XO(!z>Ow!K(#w#baIupoq0Dd&R0FM>h9nz?|yt+TbSPICg7N9||eqd?Es*WeF( zjWJz2R*oUYR{~sZz|1VG+{uUW*SN zV=O$+ANv)h|K5!$pZ_#wSKUGt!qU)n6o~)4x`_d%oYqqmBKop%l1jCaJoM|D`Z)mh(mlSB+1~;xS<|&KMkTV z#hwdSfh5(92fLcH-7zejN%J&!i`1zvW4Q2|5OJUZB$iBESggzY|_Rv4AeY zxJU)#cO+x0tMFi|qmG+DT+8)jyq7~YTa~XZ9UU)2C=eyYg!3#U+$IV){^t^X_UM;sYPt5 zIZ^w)*x;G|P~<@wNccVkczuU-IgP;K>u9v2alZR*?85P@NX%lfP*&Wufp7}+?o109 z0p{$hjZ8P}+x}|R*=wax(tix89p1z#8Z6ItX^>OMGdGxa?Iz$_dK>j)ROxf78NJ>k z*YS@4?K3`$)#ymCngouOlGeo1OnyQU<|4oD{6a=@ZX!2&9tD}8M`kywDKj#>WNEf> zrF%bhs<|LPV^E(3Dl=G;>~?nu?!Q9517it}zt`Nv?@QE_Tat)?YOMjzbwZkP2u~jI zo)Qa?tn!A&qTqP;RIs+JsUy6Y58kb&a~bB4ikK==q$ekWREag{A*GSDZ$MGjoT66H6or8?sN;f2(6 z3T=^0g?}hAsNJOKwL2P#u1OsJy6T~7S##sWrEi&~VW zN1UnEok}W{`N!anlOI!J@X7j9XC?@M(^KAz9ry@Vnk(W)j-;?n71Y4#oQ6vrb>0)A z`D8ltFSJCg)X8J!V%!XpIa^ECX>F$3$$(kfYK3n@(>hLL0+io$s$1wbbnH&uY!7+` z3+)!^t_^{-+=*lK3a`|nFP}RLv7R?2%P6&qCysf(w)7k+Fv8`9fzUN}XjXzUc8w?-+CDdYUA9Lblk-ahVdK zc1uWQm_+#;^|YAvo;rN#<8!k#GPIUp#&*yw~wguS7b#_k2VU4I<$<~;z}jFcd!eq3n*)x6E{x_sL5repI(2{e`osG z8gITstgn^@iWzZfu9t(y$aXB8YbQ3ETDCi&+ATnItj4IEd1&8@r~LakSj4p+x!oAm z3%%&jv$57b5=+Mb-7G>Qi72U(Q=ez;v>7_z@&b{{6Y%$?TYQhPr?tc=r90m-+tHJLiB=<9sbUM%-neuUh)pF=YV{4S}iAI}R#^CXOv0BlG89bGr zcNWiHqhDP9c#O^X-vzx(vWHI|YAbeUXNq=TBqv+YBqq>9I<9jw=mi|(9BprB(DhYm zL00tNTcxW#M{7r6nHPKvB`z2PLm(V1tT@gEmeSMNk6^rXk^{Kxb{`+-V}Y;lKWDr< z!}SQJ+6n7fp>`BZr$e`drweCXWmyD-A9K!XV4bU16i%}!=CgoOl8ZajPk;GK#l^&! zTDK~OHFzDRf3Z!s?UK}LBJfW66C6ahk!(p92^f5yPc$}kzsF+=6JIWV^oVBgC$n}X z9z||=<@Drv{rRSvATwcH&vylB`7b-|HxxN``)~ecTPHSzNPvv+&h3 ztyep%eS$en+b7lE)Mv5h0YkYur(KR^wvkKp_m?T6J_bJ<9!B+Y6Xlbek*effN_eYG z5QkeRD7Q9Aj~>h%#zxezfB%ULf!q9Sq7u4qstEns!*Zss2X%ZK9fu;!DdauCld|0w zj!TNHZu6YNz4j{nrtFrj&;P`)KuH3p@r&ljwnYP1K9NbVD>WnOax3$%_cYxnH-l?*tovqftD0l39 z*wyg4tUKAw6VNip5uOaIa%WNDWPPXI?osaiXEk7O`kmjh%``Fv{T|TE_Bh-uE%2{; zQ;UQ(Hlqur1^`$kWh=mPQLjA{mmvM64~1jmYLhXt((AH1VnGvhcd9Erue9iU6sfaJ z7_}0XK*#8O*mEsi+F4V%U=DJj<4@}Ffm%(L|0}myrGDG_K)tInTFe+|a`foYHQEi{ zJ&Dr*T%N+A*D|Ho>`V`-^SmoaGB$vT*Hm9VvF|gu*)q_t?>0#!(J`oA|CT^ut6U$` z`4kt|wX#$`x<|CT>)0Tq_LF17>g{jb==-y*+qmYPsLlQOIrnG5(rztNMD)|>lGm<3 z^bwu{K%qA%2-D&PyiG-O-dCW(3Asev@h*ycFU<;)8is4RmO|2L5CUs(G|UV=i2Sx(^1mckL0wfi?EOsfj)O^g;N4&0$w$E3{VGQg+rrQBbB@JdWY`lCOKI#Ly{vU zzViL!-Pi1-2fzK+{QqO?E5o8%zqci%Bt^-gySux)M7m2r zy1P-jLpme{hHe-_8k86sq&tUh@EttoSLgro4X%s5_p|a|>sf(%*%e3}e5fI84YC^= z54i9RQQjkYqp|(|H3{e)`7}qhSZNCS%SlgC0=ucYH2v3v%PzsJ8ZuQrk)JnF@6s%@ z#!xoQ@>CXuR&(}CMkqN19YSJ@UGrobi#0M*Vl8TaPEAS>c-i7H@2;hh=I$g_FX4DS zNmZOH)=Cc*)fOi?3k}Ef5E6sR`I6IL^wlUTmMVX+T^te(DDo)3UiS-(@?d1Xn}zuQ zmc4$|YV=3=uDsBy)vB%Zb(%zR8A{-VRGR*Yf@!+N>4y0$h`9i_-nI9`TU<}4=)*7< z^PKD`P0gn{c*Kb1@^Gl&uW(fn9ZVe2>R^yP7Z?1eu7ncSjWi?4qTgZ# zYY=%DZ=8cH(j`<(5YC%VK|B5A?4Zh{s^jshW6@h1wJSeA1;8t}9CI($KAX?4bu}3a zoux!R?J;(HzI1;K;z7LNmBt%90GGX1y8BtH_gK-fW5XxE$(fKO>Ez*G%kogczYV@i zSK5A$+|eFsXN(#f&hhD8(IoMHND6Xa{Rl@*YIR z)$|xM-6rq7m4y;@T^-cxo7;a$4YCxcKG@GjWAKYMR_48mMcz%aYti*y#d4sxcLv=} zx{(49>0m%Ig3KB)8=IFD^kd@YQ2TtYUd`aQ@Kj zi6NY?lCm%!c0)1kn8hBxiuc zQlLlxJR~d`N zO$?l1j+5OJSX}Ouwm*)7s;AGq+|2y#$0SYPh9WGZtReU#Pfr7+mVo462+g4U~OvG;P7R)DvQc24DaoH;!21Zr9p z+j~*KeW1$xvof@=!1Y(K^hF8}pB$JC)%g)eeB=OxOy3e+!^3f|;~@c8bYdeKR7kxk z$|^q_VOr$sPifejwG~V`$0jmfXhln-~T464qw3%jC05u zze|np+>F&$PHr2^K2VmLC&XphPEmY<-k=!|U>Uz1dh}R&ix{Jf4)+1mNGp-`w%AC* zq4!0{h9Wp7q~pGVmcmM0#h0$+hr1cIG#AU#Y@hkZ3vMU21Oofy0;j&hyWxrL3M=Z3 zcDocC4f&BvmyU#?mK9-5{PXe+oUyMOncvIf*CoNoVtZK($0LbO7x{b!)SsMCik8GG zE+c8&q(0bEgoet$d`%2r0G5r4My>1&n$P9Y2jV0_r!d*1oY!6 zefa{x z#YQLat;IPSn4WC?T@yV$A{H5gj`Kv}rlMtvMR856aXY_3n%l?n3s+_EdrX1=d3DPf z8qT*0l@30zA(XY_VqjTI+jcjH~Hc!7K?X&R8l_)i>TM+y69Gqc=d}kyMJza8ZExP-otMD8 zk;b~aw{)j=FjJW6PL<9Sd<8DS10^Wm=Sv}R?E4kr$>6M{d@yrPiMxBlljDm({tcq+ zb4+N6ZSVOO#lQ8NpW4R2k5v|O9hno+(O}tn^WH)q?_nw#0&7Urbqd! zqLhv!`464b%o|Xp*EF z0hU*G+2eQ2@uJ%1CwY@S;G&1cBE@=F1EcNDn;wtwD(y$FFvID@A)=PI>) z(cRavKyV>)H7>D~32C1M;aGn06sYuTx32Ww!|Gyl{+;bn91;q_4{--jZX7k@joYwx zra;TZm-gS_s}StpR}QK7Eh3}|Q&H0{0_v#mXs}B!c$-XyK2fDdp>EmL-VY`e@Ui{i zv{`slw`DDWp5mBsVK5^ZkSV8JH<4ObXNTTAA#QA?XJmjg6<3;hhcKD&&AW!@CX=G1 zfeBfx=TH%G%-97ynNiPIi?12m$jwPOktVLxu~jI)e}4r&{;Xh{iJ}_8v^o>IsDovv zD|AcSgE|M!`uy@*m`zG*yv2b1lVoa9plkOW?B=Fc8<|8@!0q+dL5${}+V3&Zya&g# zk%dZ1EH0X96T$>`H=!)AD2UBTPk2yu`x;Iu7l&(X6^3PnBPDFV#yw!Y<=#@F(cof? zS0RWnK2i%Jli;7Yv+S6U=!mHNu@rkHv0wi2E<-T4m1aown2`OObv$T^DCP_~Ao-es zf5Fd2!#s&{a^R!Z0%kpBm{6~5(P*k7${2u*gze^?SiLY|vJ?>Dtzsj1)JFLe2pnIa z8q#za;o?g<)Q?^sI&XLJo&`$=0IwNLLFyB;Hi<89X^#%9OG2z^9U?3 zG^xd@X6AsUfG-P?T;bA8IC<)Amd1r>jo&`)-SfR`09HWL9KmDX|B>c!!ZdyzBqkVC zk3f*oWuY3>6AJSPVrv;m)pzK3*LQGv;XD3(1w4lbYiXM2Z{!#m@*$rTN5&`18FX zqAjw4&}wlJn2gaV((=&OIs+^UiL}HK@8CJD2|N^sDq0(&+g%l|TulkGQ z?^^2prV|`vQQf58HIgQ_lX)?;OZzb_4yW|msHXeYe`^2^Q4!D2toHCOCOrCj(dcix|kI9T+@RKtL)Czh<3Eoyi`?;)q zW^i?}yKLQC!h}RLhAQg|z;t=rAN1J2mSbgP)lT$xH!i1kKaKlP64PskRUzX~EBw+* z86&=<*tD}7VjP2D>%QZLzjJSpsc0gss0+75Eb7L=s`vez2;Qa4SK3XX!7mvmu-Fmq zjqs(CP;Akpp=F1bgf5pC zE|_ol2DgsVu7mf3znq*_Cd5J^T7Iphx*?=l$zGjIU+33sj=PLE zyB7c$Wm!K9U5ChMeM?P9AR)uSd@u;U5)&Tj313XGzHA`y2|-MAB(+!!ju^T{{l%D?$H7A3OG-(u^c=Y_; z@<|XAL+lRK6P3tIvZPD-M(aFMMr0!zkds5ZbgFAw@vX)Z& z2aB)!suj)k2zsW~d9f+quo-B(` zEH7Q8;;hhp-2F;)(e3lL)Ci*KT}58y!La=8h79FVNEzEd-r3T2$nEJ6c)W&K>An)M zrOmJ0&NOSd#5P+uhy;isp%ZjuDX+G78(6BRAK)6Jmn0i<0gox)_FIh*YX{{H3WHWc@Wd4T8Hug>dG^YD7di_))b}1F#sn^U->@p`uvJ#o-Hkdfs~y2|uo% z=Je;K<7wu~W1`t|MXbU8z})mK&0`uNb@Y!S_X{ffHsHpeyI(1W3!-(?;76_JvyO9Ett%wNqVdjhm^&vThn!<>;=OF=R8yIS;Z2E ze+NoGvVkd@=Sq8>?2hq1^_}X*z6iyvTIe#mLtin6IV% zhGJ#T@0#d*+}mg7QEr*pB1TqV9+_5&qB=;zBAEFUV*xD_LhftQ{vOM%&54W7phsoB zM}am3^_Z@1$-rp}jDcH<-lIpWGonO_!l$`VPFKINn+}!VOp9Jw>qJsZtD_7}(*F%0 zgF-??iK6e`I(r>6T<>rH%=GGjiN!!$-J(^?VZRAaSQ8BOU^W=GYgvw&rbd@uttRWb zrMS)!S*eMHuBys@laypGn1@TWoQ*erp#OUPvzYyQB|^P5Y_rN?RXvGHZT*Etb$P-_U9N z#yu3fNO$+qPF455NA^P@4dbvSYD+TJY`uOYPU4%Q_r7_bMLSfkzI9a3FbU6NdVLb> zU?JS;;#>(CV=qQ>VBryj`bwb+_HpAtd#p zWYA#8-u-%cNi*)mZyKZJVlMutI?#mln! z8q|$A+ioQC@$2Pion_m($FBs$jt#~Zyx1g#8r&wRVwWPVzTqQeDEGXIdutii!9mb` zzBJLdniC`x;yHXkx$N#}${qu$qTVcHwt`o}q{mU(8mfOq0u)wqZ5mqAw9KrAhdWcO zDvk;r`cW@|-5u$(VHHO$pwQZ=fc>-QO00b0laSq6J&v4Rk&@E8s#@S>;y^hKVoRoZEv7S%a25?C6oV19aV+kpVlFo!%3eD~)poa-E)?1dEu}Sl_&I*Wr zXjSyH9~pr?0X%)B5)cNipzD~zCqZk~?CB+Ty&f|N!2ayZ8snzbvLrvtZ{yvxpIM^i zHY=R#U!{CAZ1Q~t1wfrVln$zOt`qQD*Hk2~gWZy~9kD1G60A@%5{7q-{Y5mI&<6R= z2W}iP%an|fxMAM)lZU8Yw)Na6Gk4hBbsW8X4K>;?hbw#j5xsN3qAKHAS;c>U@d-u% z4jXM9e~Ke*w&*SOU=~t`&-(18R>%&>G1W0O0wZu`Cv$(_10F^*_7~Qy!5V;%V63%+ z!q?~~XAtK5wd4wJ5!ZN_jI)60d$mOLu<9bE%BEm`rx&2qh|bcvqC#p7y)10}M@g=? zpNH@lE-I-V37rJ#GK>5OOK29O^sDsjiaO@)G>DuXIRyx+s6%PcQ>OW+ayqbdSoo>g zH)}y`ZcIMZ^@^u8{K@yFa(AEAF=kjNfbL-^`;ZXN0q!PX*0|b2tl|o!Bu2;|R2E&x zSERbUIJ+NIv}c2T@{@XL=f`C<6TRE@MSYzJB1OYXgGL5r30)KF0oOBN^8#34J#otC zBRP>rdnZd*|D%J&&l&gM{dcSlVj*^|RXS9Tl)$&5>IOQep>!YZYq1T0)$>7Lui8bG zY~E+x%7K7DwCnFQzv2l1CUfKapA@ZDko%Oh3ijGqC-}ap*EUq_gwZk23&&<|F8l94 zsoAs*Y#AN`>EM_z9*uieVEmK6UNZo8b=AJ#zlG>~kAd$sHae%X`KJYxDoOU3`{l74 zx4n7LLpr@|elIi)IJUM@Ne6Ps918}N3wUg6_VToC4Cv-+Wm9;6S-;QsD6Me`ZC@_B zQT{eIuV8gZJFif0@Z}!`%`)(uyQ)8oL&cEcybw+Z#z)k?ih&waNTs*U74cj?z$qt< zIL!#m`sDpZ86_{xR*n>JmQU=Ok8^C2D6H-=v3+_ffHi9$Vk`#@}gFW*B2Z zk|W5-?zH_?xpDANbWO=c=;$NjA0~ZA4%Jp>zZX=km@ma}#lvxsV$~}VPgok1dzN^HGp_+ee zO%VqSZmicRdzSP_y0_Y9I5ZH~zn?GGxYT-*)8*G1nQle&r_1E_9CO*<37?*&2krF# z_=fqT`QkcIJAmemY^%_u3TJ8et#R_~Ml;+G+$>7tKsy4ql5oThI?~Dfq@M)2K>@HPO?LNuj+Y zuYzCbeO$vcxkS>lv0hh#b3xl3;%Mnx1sP6@y|dtCt0w#@$>Z)E6LN@)vBf2{?Xmo9 z3aX=##;d+L6hPOo#&}M^x(_oy8LqxqOYAX!@R!Q*pzkZ(+em2PXk_6Jxcb1Ez_DpR zfMdYm-ZRHV6eq85fDWo;W@f^E_^$IGzJyH{rmSy zTEwpLG&9#9sunoQi3Q|8-`<{<+KR{l;?S=s1V>dTjUO?$b+5?@!KTrNA~~n5t^;HE z#;6x#%&}OFp=Jt9rlSNCU3z2OH3+)|)e=b(lALGW{(W)Go)JYJZSD?|Igrr`Qh^!z zTq?^yBsWgS%!?}^(NtZL@n^X=o$gH04hXS&Wxeco<0DbwL9?8>ud&k9E5m1fq3X{G zra3-rJ>SWz*)Kd`HbUN!<5grfFo>vcI#?Kfh$Wm5N%^J9k8W~4dsa`)o@JjzC5o|rM?gtpss7d}Bfgl*a^ECOt>n*1T(EP_P;ZP$oFn2K(sG~=*6V%WK@a=a(ifL;M106+kb_dXXzW^l-Jxmla*dpd9EUG#8%{?6MLepwo( z5?LjVE}E)We9U{_F3pSXPr9!^DTrB{9&QqxhtJkLE1>Z_mTInyz%Jy?)Ni^@MW=@B zPQLbyapfm!thklT!4a{q*NkV*sk@ITLYLyWw{>jiJ_G5COCXP?>zWFRrX!RRxd-8` z1vCNG)V0O(!@&7cm}Nqj0G7q(Wsaob z-W{7?tBC3@an8o|CSL#n=jp2+YOgdSQD>HEI z&bY`La+>DIO>m~ssI(fMXc&Tbq3f+t>^dbHf*{gp$*0+`L1*;9=3zd&X8^2C4Zk}u zHAZJhea~ac5DD?#k_Ak$=uLDM+Tfo;i32)T76qVQc{QieBM9ff zK*se<&;cGqh;@GWP0@ONSbb81r1e5E$*|E_W~g?FZPDhRpgf3s?jsxvf*5KevKw^T zSqs88L6?MB%q9RgKsmocu@nRA5fD8U+p9YK$A&Qe?hFHf#6t@KR|M5J%TeNn*~ zIRS^?tmkPe`nT3K82tzS)+*6Q5kJ?X>(m-1oB4`PvEF(eKVFF70~5uVPn;=;0ZT%w z;+h`hqT!v=jkA?vxYiz-=1TL+&r;xCvRR$QGVfP2{&P}Tzc0uc#P{y&oolgUS9{MD zRu@e0?_j$c*<8ViKEDXp@zGwgg8{RY@EqKalR_U?Qm#7SV_p#iY2!j9llPWNJ9y}n z_{Oq5DHRw&g+&6`h^sxZiExhCOYr|Nwvh8_Wzd(=K36H;%*s0JCx#6!4t2+A{D~a^2?CX}S1^}Bz564x>msxk`-`{t(yQ4;#oZDRDK6kO{ctk%=P;78@ZvLi1@D=c zEUu%XWSqp*2Q(rdu|PmkorvqIZNU~X0D1~5nUzdy=BWVAdQYRc6ng0|^l}gBwFhG^ zq#)-?ULK+4{6tRQT^-VG)C5nCj5}t|yG;X`73}rXK90gye9Hx^LDYueWI7dH&S-L@ zjURycIIIiALJj-R|Ldutt~d4t9Q{j#Y19tRX5xW5g#B@PgV;9+n}x``L5RB3-`IEM zb7SsOXtqrqwXAv7DnHq~VyS;tr#i|Lw%xb>b`$Trx=V^l5_gYMR`C>nzAlOSHO{)) z;8=j&?S+!JrlBiZ`;d)jrFO^_K~gGeBWX#y>1I)GnriM(O2+Q69Q6h%FGZ4)>CuG> zoqAp9$g5{N2K8zx>MJt7b9c>m9?_h5yX(Sd#1i8IuPrXK%Q{3f_^pSguFHn2yx)i2 zl&V%%y}&3NX%y-h3XO%#k9R(3-<(*|1~94GG=-4mXE{KYAJ>k$t(Ueo7{{7x>x z_1C4T4k}E{i==Q#TBX#5zG7iZ?)XPg+oJ12yUxYh3hd+lm(1EP z;PG6s-f)mklIp_Qp1F-HdHY(k8kvFNCkm_j<$GkjH5DHf8(l;_8_*2V;H79jy8~DD z4>UKC7uKWFoHeUg_4AXx`w&^Ihizn^uz;ymDWgGlaV}c|qk|xflZsFSR5vX=!Rru1 zK5=biSe0Q(o$WQqt*ZJYXdyaY9pCz;)mLm62y*0Pafe|M-A1qxr>RwBk%Ayt#*lNL z%wXdO<6c;fNP1Oc%woxox^M6Jv2Z|bV%DFgZbm`ax;v)3x`nan4#zdZdIf}FvA&>A zXzlNcv}y42r$Gh-v*m*Knv*BHL<(Euu5vtHMV8ZY>AStH^p7{ES3H?_Q#sU=3ooi* zFRXKlNpzE}vo-{$=(Q&6h6@#Zl@27s&LRgcxmw%<3MCqoaFyJWcAn@|6b;)8cee5m zWEcxCmqTrp6b7{8_VE@;eI`qv;BrEUVtWuT%|4+X-eR1%} z@+@U*5EfCBkqM&J4JQejqrwCQd%1*|zYv=mtdnWBfVrrd?&*o}5wa4A0%Qi&snebK zT&;F-9Jb`T@k!Pb?1xOFT;&ukHmP`&8H~Ib&|1wSraw%)o$YSAkt}DkMu4Os>ViJD zTu4xK$zU4nQ4;5296Nw6gjDsL$|v_38a2p(wTnB%!lL2LbWr08;D&!Lq8Vuju71ic z4S+)@WG1w$divZ|<$*8}e(UqK9o!2KQr~lky-C6Sofcc+I;DO2gT9Ird=d-CdF(Z< zZoQ7HbW5OKOllygo+8XlgHdI=_vN929aJJK?bcP!4&D&7NKy93QF4yF9HpGU>!2jI z{nlzuxP^`l4{c5f{-plDei?fEiv>%NhQ+aK{yH>GEDP2Ptyb=!EW~3XoB@Qs%8<<& zbXL}ORuCns@Adx6a5$ZoUUF*tg68c2`^kRR+u<3M-EueC&i&lCT6(=z{Jj|4CkZ=X zp+HTA~PgDUnjPqbZ@nn1&@TrTMg~Z90!Xt+|X8 zu=|L&K6$G5O-@F%!~>Hew56VbB)D^XAla>PuUX77UKM<3_rZ#;g+}AP%!jO88SHF4 z=mgOMi7jvYekbWL`kE}OlW2?mIy&LLlX5`DvW@IWKG|{xu2D>pg7+W;6A_o@ zZ3SLN&qw$}TAi4PXRbg)r~lLz9g@MK?jsTWO88JcvKp`8)i+&}C&q6C|&la+IGxS`ozd`!KCzTpV1jDua61q3)JNFhWo-yy@d>6|X)5#iS}YkBNk74GT&23ih))bB0_`8%F#Eaa}uc zWwCmC83OOE%s!TRCq}P{gVjv-GkOYQqZA{H#7qrpgAMk1GMxnqKY8_SRx#0Q{TlgD zr=L-l`qadKgAJ*!00wu+=$?agjNCboRKURH^-Bgx3MTIuXa)PMPrPS8f8N2n`7{rG z^jz^RqRID~xm2DX4e?|fw^KCry}Am)DJ9Cg4T?}kA#74g=+{(QG}wa^@h4L}+MS?0 zp6Auv=KSACMcNp81(sLWzad5mC&Mfrlc1Kq`w=I^bE22xHY1GM@FE8B_d3u*BhI$# z!_YfnhVU=!VEBuqZE-`v-SR#2W078!)Oynih8XrWx@uhAVq_yeGHA8E97u!F`z#V>f@y6$U$Q?-D#oH3IbZ%5(Hlsh_%l-;^Q3ONo z#lBGKnOu|AW;vr5=r`@>VpXqiVeMAyWT|NMgac4r0<#q}7`bUJFy1w{pOxl#)i>-y zJECLApn$U9k^LX(AXZp~Kv5#w#L8R#nSP25aIZXrN{qb9 zFWSYr9`GMgeUuCo20qy(GVLEEd$b22?FtzKw4$1gQ0^8r%s^w|GS^N0$v=k`r0D*+T}#=r}}Qm^qWAi#zAw6 zKU?E66WFT$;EDvSla&Ig4uOM3?kBzC_H~`nY8~AJm%L85T~%+!0`_SN;fcPgcGF#X z8y~GRbVOVj0eSReklViaBanH&Ok`QlcG84Ju-^Y6c98yH8(t-A-5*cyZ#^ zo%p<_1%`JlgfCVR(^Vw2 zOZn$x|L|2JED%7>sh{-840Cm&h+T=69s+#~G_O&CG)TvOyp!4>)VK(JZl4s?j`V?e z8h`w+4gF(F(Ai+mvnW!8N9evr^huy*XWBzsrC}uUu6z(HtnFj}93ha1>XkEv?f*fg zf4+hZwl0hKtMz((JJ`%?=Z|QNM8ELMylc0x53_?t-o~fURi@^rkOZcGD)_SPVO$C-ObyUovO%6$sl2ByXl=sUN>p#<4#HENvs z{}BWkmREEWUM)jADTF==?xi;U5#*`I0bTa-sPie}e;z#c0L)e9m>!sZiVJq~S6ei1 zdyXy8;oC1~oPW{iS^sbF_(wd$FkzH1XNt!%02S!3!NXd7C4W&Yva3Q^NqNJz9g6R3wFchp61TZMnNC%;$d-9X;rbSx*J%8Q>9jPs*o)bov5mEM&IgV2MiCo7p?_V*vH2f}PlhSFWG1_CC zn=oig^qE}EDvjX(AGzpY>!=aR)q9h|!!EiV>tecKWb*iBcF#X{dURzTZ$FnxVIbYG zUi-c9qbc`=Kr5<1472K63=Rgz%t~8Tl#lO{JAJmc3&TFb3q zC#*cB-STX}A~2xs?B`Pi#fP@LGC}BC6P-63`rT|2zSIErlewCAYAy@^ph}R1RG^%w z5)$}xMkUnM7Mdur*xcJSL0=k1807Vccj09bM#$C#%>@S?kwWc;`}kroBYOOfp*M|I z5cFCn5@VvVHPP%Mf~O4}R7?)q@;b;0`xLi_?9ATw zlN()Azo|xeHkls0D@kv&*7-4RY~sLold~HM%bo;hYOp1RA9K^~0;0M>(Ldh}RV8XT zr|3blS{(Qj+fWB4B538+^IHgDFG7wNagHkIh$$RdpN$XP_2s-aRxozx!MtO%;A|D< zd|;_g>Khn<$%-#@W8X*YGPtcU5@>@aZAp8K*2_WbeV%b zE|JSs7xuApgV+N%{#dC0hP8Uo-6$B;$Xsf)G!drBvlkT)sc6-C&&P)L!Krrg9Ivp3 z)#_aMu}@D!Z!R2PVDe7ICSI>&x{cP{fG4Xwiw0pl)2{1~H>PQHz0P{#mX@4ue6 z5#Dk!@==NY>&eiyPtkowSL--b?mtNwYj$ef^8l#fCvYlZxCfzsE0S83ZC70~)IH!a zzTr8`ir{e}Wb(g+7{lYd3~B^#>R(%mJcwRTIA%Q$jrnsJZCCjHz>C06appTgc z$AoRadVYod`#rI~?mtI%z~ps|eeFN=`(CBb8Z75`{0Q)hdwc0iQXSCnKu-x_0jZX&@qtidRZj;!8-V zw(@JR{?e^XbjcsyT4R;QzUEWhq@D2jwq9gci1zF=O)z;eV_%OF+#YuNu>qi?hr|wF zg;b)k!RkrAupAnDn4@sdu*DLI`pXV<9h0geeuZ8)qxa8zd}cEy?7rm|K&@h$ve!9z zPzT!MYdzflRnqEt0Gu&CmgnzmhNT*_fzQ*F6_jg!uD(qF_7`2?95rwj#R|sGAcK1> zc!rv_5Wh@vwY%X8H=kbWA|;ihsz{|Z8-mp1HuFE zxaXBgkYWNS%?)#D|IS?gB&~*S2pI+34&>g)W>aLgjBFcxgy+?h**SRF41scpwD%X9s~JL!O#u8LD>x10w zcv%I`XRD}$yzIG%!TKn(;vWEIqfYbQw`NYIOg@dXq8;FmPjkbChnuI^bnfetQAG#l zxmEnx%OddooyDnCbpWyyE$paukG}$n5AYu@t<468*qb2L3;vJ&KwiCa5RE~{kC}u` z2P(i9a+T70!tSW(?^`3~JK+;rUcGGzH#apY$2X^Y*JWk{Avmrt!2{ko8*1(qECMcf zp0!!ROT`O)G_9LNEZnfK{G=na;ux=b%noqiU>djXT=hv}m|CA}1EGw8<_(}AIDX&g z^oSZf1JXhol>6i1%GXX-$wd|JjvS*e>Or-3od?|}Js(Fy6dVc9By<=fqU8?WMJ&w_+v>=9c_FcV* z$A$|F1Do9m3Lz%M_DCw@@~JtNL08vN50eWM3A$GtLY9nVWg>_u@9$NcueKyb5xX{c zo&J36vmqr!nhGih9YYdp+<2u?-rJ73fZ13MARa*uP|vM=}5feh8;+oJ9<2cj40VgL+*0xG0GJtA!aYXMi;0>_?8#q{%hGA>J$+-uI4qgEh`i58oPk z4I0tlafa2v0Ln4cFl~nd0~{{cFdQhwO%<306w!|;RzLp<7TnQ-_%Wy9mn$_s^hq1T zA+eG%T80D2oP$L@v;pwiUcnoZvB?}I$~$Gr0^cLP@4pF~Mj9V0tD@TW`jQliH-N;+ zCmlXeF8`eOD1Xj-Fk-+uC!cr(Uyrwbm4p;Ueksw2vY}%TXEth0jm6*-XZ~!qDQz1j zwQS;q8NU&#LCdQOtx#asDq5B7?q09vg2(x;c6O)$aSebpb#ei;K@G*)!%%uezF<&% zDmS{by6#$fdTG?x_qW%ZDd{l@K8Ft*-U~kxO$WS>Ru2`{f=*T5qMeX`-RGRM$_X8Q)Y9?l8EFSu z!#gIg0w!?c+oPO^x+@>*lU)wT0wp%r`xu-IVm!(TPnF&L9AhhJ4$k^yUdRmHI%YX$ z3%gaQEC$sk2sBkeon56qa!>#oj%7` zDq{)A<2)ra20`g=`#NZ!HlYXpt**(t6(yc;cjMJ15|oYorWQ!CSd~CxcYiBUfjl%I zda@6MZ%IH6iX1>o_uI6${dEqm0XsY$^Psbtt&effm!OBRu(ku2xlb)5hyG_h7>B{` z<$nc3K|Rm_G5X`#adrLD(JvzSIpTwCj}@gbq|*j%GSia5V`IPg`Wyl1?5$iVe0V!7 z=AN^PN;W9=wV7YOEF`oZ-IM0%EfwP4kcVD$*P4X!A?@jmo{b2cZQNx);g?CeGwzTY*>+p;%~eYn#yXQnCe$SHQX=g}zXnp1ZTr1vW1YI5lz>&> zR$sIPJq-I8(mAKuKM>bzFsK|OBg(sc1?1SZT}`Qd-%t3xmFwRV)&S|2IHHg45`-rX z*ww%0c{+>pVyN_bkq!-6pRJt>Rwa*fq*vbX+W6n;6Fin}a2Kulpt>&}s6oxE z)I1snCv`s$(RdV4i@TfmicQ?dOfWO|O}qb+E5ep`Rui`@Qkeb66hX0&(NlkUh;_hT z3CZx29@It44Pp<@UQddhyr6cBhA!YLicKNcx6OyTsw)&2&8$_+4#caj8#AAm zy9^G7$4}cUzu#T&>uFt;$J4TMb|8uaL;mv(^WIyGVwK7Wnytfm({d8v-_Tu9 zL#jnYRB&&UpH-p@Y0CeV%usVD;SO#y-6HqVrDJq;N@^CxqNdeLY2zrQUEfU0TptE5 ztF}r+=%V{98bnKbr&%H@SK|uGTgcOs)*6HF zu#+@!qZU}Rh$|S?;wL3*vM4h|DK&N3|3Z!#{iR{Huw7T3B4g#dch+hZ8mz166lFXzrk z72GY5W1rUZH9@`_QDtmuxB>Kdlrce>aC7uCbJM==XCsM@2*t?@&QUld$-?NCgRfZH zmz-vhlE$DMo5x(m_ovG-Vun>tdKH&tBwsRA$uED?HzIQPi^DyUaS_|z2Llb3SK(P`qn20j=A-m`X~tT#4048GDp_9+E=s97;>9f)aba zt_NN;uKNSH%P~tI3H*p}omQIUt&owRh^%5xsXjOUKl~4~8u~#qZ`*&m!L?s0+kIvdSk=HLIitaLz3Ox5{*4Fusk`zYbYo(%< zZl#a*DJe%tzaQlc-b1m z^*3~^Hb(Ni91LE<%&cQYf`Uh2oA*)7aRYB=Se@eWmS7U^ta%kIWZOBkRCyv z>@R6YM27jPQCg+$kF~fvO%dg(Wjjeu>Uev0dHf0DWbmU-ky+5089-x+(_L>rKtyHR z;>O|cbyeg5YQ#jV$9i^7s6oFWafV02SNZp#$C>`sf2Wp%p!4C0C=s_cTgiL^UyQzH z&bA2NFmL+KXgOFRC#HlqjD8xuNB=iC#er5^G;=SIFpIYxlIn!E855?ygvp}P=N}1` zSiR;rEG>{E!i!DRTJC7iJ#v1gCDbd5cs~JMohvIUJT-=>YdG5e0-DctK3(XE7VS2T zUTRRgMa|Oj@Xmgyfe;;A3H{7W@}(7gN`$gL5oO)*zlB9K&|gXU^)~%f)3;8&Lhd=S zT3L^tMn*zd{*lc^r_W8;j|pK{52#5*Zpq`;)(1d1uclwL&r?l}x$sujbXZ6c+xP5q z{zW9XxEF|Z=La6>AD{2jj4iQ@KMl?1JoOxR-ye@f!L2qCa>A6#eJ@iP#WWq>G0^D$ zgUY&UDf0ZCuok+z4+9=(s&>eZqKxD0pu^#jVKmcxufAEf4Un{z`y9e~K+=26Wr(PL zVN&b3e7)2>Bx`owBweCt>O%rp!P_sjXX`oa3H+$lMEtkISv4kw4x59{v-bta`W+c_ z4g#TR>&O?egH6mGOQ7F=7#;l5effs`*tVvB&KDqVE>Hzo?;A z1n&{?B^zV7k1!Wa7d!E>H!!lRUq)XRzE#^&)Kk?IPYZ7CE`FChR7{9C6L5dt=&0*T~ zaSicSgiUGfc%3Uz4mHM(bWUmd0 za;`vdvOL)6MSePa4xniae|A`Bog;H+e9Yn}>2@ z)hSWw=>7Vn;;Ov_ z;0Y9?5xR1DCn*ulExb?~N9KNUZ1p4HM^GA^3GRgN~Paq$33l*12w z4_CQ(WeM<_-_iYW%}lrx)88^X)3eueS1( z>X$Fx$G* zac__-RogV)#!bOW+NJ673}dc0&Enijl(PW4dU8K}WU5@LF(<{aRsxCwo|14I-z~MW zH!Npy9#N&mLrN@1^vkF-qI9WsIam=UvKZ9s##CSqH@>ifDy=~9~HjfZ{+TbSfLbtPyR3=|9Ebxkc-QlfNqaL z&7Z-pe2mgP|65g@Ia-0wLYZs4nixXX6&;iriE@nK39KOGce%FpAj@-8+GamsLIRWg9-WwD{g*Mvt+)JOB0ZiF}; zZ;~2}qUx~cUJrIZ2jVh;A50z-IR!YVX$a>sp01y9rSd z#76`|aW^J%e_4{1p(%-^^QhKL?wRj~G*W@$v@)Wli4NNw(E zK+?%fQbmMkF^xQ_oWk@2?h<{7kK@PC#Gxt2OxA&p{&E!Wq;Z*|IfYiOjHFB2qMZDS zCB%zTKfe4J#o@P^gV^!88J|CY`OQQeX$+C;P4XrLoql3xyS(IDM)q@rr8KIdEqupp zb}EhEh{ou*Sj6KQ#(f2jFv+#ozG5GLT_Bs>RhDgQNia|UQ8}KP$NxjMlXI@_qqab3 zh3kxmnEU_Z>MO(I+LmpR#zJri5Zp;{cXtVd5Zv9}-QC@S6WpaCxCM8I#@*fZHT&Fs z&;8yX`lF$HtvS~$8C9dIL4yywtOP2I`8^0o33gZ^r6_AzkpGr=- zc!pQ-?9bV`X34ZJ_+xq~YzFP9vrWX6exJ+kN=_MP@Nh})SJ?=!QfOlLO2|xX>EY%k&hJARWP`jb0ZCxl^Y&ACglu2lYk!=tr~0? zS=l#jj@@Be>(85~B>$t9@Nggrr6jWXO8|%eHMY{eRAfMQ*d5S3?}Ax{7EUBVqIxES zOApn^Owk@$Gpummi$@VU;TgBoN(g6PLGk&=&0IwWE{E+>+X0LoA#1&zj%p09RFNe` zZ>WbXd7Sc#fP8llFVU5#bW%1WV72+aW+i&Vi&|2c`xP> z$~x2W_I$xeEqx{9WR3Zm7dECIm%u4Y1z`RLIOM2qp@+pdXlZ1 zF&dFqD62|I!e5+!K$OB!(S^1nO%tc?zwj7sYk$ofWz!vFp6f)uROL)TJwq72Zbsv* z{yA(;)f@rA(J<%%?Lm2X>`rKA`aIV9Z$)N>cPuDMmuk>D$2?P9_XxfkYu=hm1}uca zRKKI8zwKZjO334f4G&8Ph8d7mo5C+gT3y~(q94li-9NROI)e!Rt4}{ezDDl_T7rAr zC%;GK!C8=Pm#Z|B=9uSBIB;=HjLi^ZTfT+SVC$XZVO9d}8_&*8r& zA35Udka%FPsLzTtS$+?y6;2%P{6#Tw#qp&;L#)CxFU+&je1D5w zArk4^eDCWV{bDdQDL`?uz`Hj6I%@$`IGRr#m?3Fr52Lp(3wp9st)ikCK$)FAvI)MX z7%iEA<_KjSwdd?b*?_gIj<>9%&E!#oC3vBe3uwWM3z;(glJe(PLBUUeYm3au5EMMe z=10A+chmqLBf@Y|4fQN!n`;}YY8wfA~@l);R599nK*>AWJMLe0BC=7!$2d zF#KYj62%(`Urn*5mxR((%6L9Shzk4q24O~ z+Su!*fDvsc4e3^ortnvJR1AES0D3V^$9guo!w2lSRA9vxYM}>N%qmFMwr75b1uFr` zoX1GdR{Wd1!;kNBJPUuzrS2Zr$91M-`bDQ0UguyPh0$Cx%qgk7Y4LuOWw>5z$}V$6 z--e09S0K$yPlu86Sp0Zlmm#tLB5q(lE{rkzHRUJ9iCh#f4Rc))jLQdE5K9;E;HOy) z;!_v%shEwvja|0PGoH_nOA+UN?Qe%&;q9!7_1Wqwm+i>nMHT1BPJLYDy0PtHKw1h= zIM7)~Qi^{n4EM9T7n4lVa|K-+hJWq)@Lo{5WWGPD8rR}ucsf(!jcf()Wt%Ke2DWGZ z2sTsM$G)Hq6C=l0@N0@Ck%>oVBZ}u*x)!QBWrw_)k4|c9fkng|@th72MV5dv18wGSC=KnX1 zTa6EYe)rVCv9`bJdNjJjxS;$Q%*g1}z-H)kxfuo5S)HxUJ+ZI3wKZZ~oocVGS-5+A z=P-$+6snUu6fdz1{g^mF8KpBt1+8#->xN6pwL{jYo!K@qAiK@1xBT;)IHj~v)C-^v zh!*XR)04=eN@ok?vv~V8-*e-20;`y1|_Sdg){qh9^<0=%DIsiAClU+`AK+u!B(Crw-U4+ z(l_~OHldoG5-IzE@7e0B`;G2!@apPXN_Pd7WE=P`abl$?R_1i9f{CvivA0=NM(7=V zsn6sx#r5f*g7vM~ORP7qg++Jb#tbd=c4FI~2|rOG^O{RIJT$P&{?hyQ1MKB${Oa~I z(ebI0GCJXzQJyrMzCv6yTr!!VCwpq4w;W;t;PpRepBPpMF^S-)M*vs_x}FxY~Q=PRGrNKo0voEdvkl`IeN@;&Ix_ zveq*1aW_Z4A%6Ucp=Js%E~8)3DR2n>!tWF_C%T|F<^a@7N3lfsQEm4dT#?ieyElJE zbRqNX#pHOic{JP6F_jM5{+^{`nCqk4VOaROgiDn<_bJUNU6eo74H@ppCeh*XQO14wsW+EfHn%0UE-v1IER+RsCi6~x|`c`E7mlt!awad%jK ztKZ{aG+zOsITWjWD9r_Uk&X;~toQ0(ZP4N$q1zDQBsnFeR|ePi;^g<>pp^&37n}I+ z(+L(Vzn#266%#Ccm70)!}&fh>`Ct>*01-*Wga;UTVCFUo5-4HC5^sUOk zW|brl8HTh5=nHX$$J(W05=_5wPklS(C<|?o)GeF&x^Yrf&)bRWH*jrKWalPXr;C#w z0mT(}3Ajzyl zND?*g5HD7a-E2PuH8(D+qfn&a`TZ62Jv$t(Y6bD#cP?7t)zeUgOt_7HcoXojfnu0f z$P7zaou*f-rNBef&<^VI#pdno#S?&L!jV+H3?bp9BtgKbGxQZ)*y~3}XQsrl?AFH% zMnE7;UPs{RyM-V;hf1}pj$&H43~n8$HMFR^29JGtdyrc>yFGAkY_V}!JDp!SOLWvx z%H(!BoSFA3vzeb)f~hs^CrhxW^v6|-z|`7>=Y--Df(QYgEz@>vm*9&=DtgQ1U(2mAn=E8;!<1&(AM23ah$kofxi(43gD~_uCRYdRl*bTT9`#YWAb;e}XL6 zTy1b-mOY`TWQsV}m?|hB{}nHQ12zFPi4 z0xxu*4+@Xtq_`k1tDO4F*5Y=htM%YEvX0gdScZ8!0JoT+O5A{SB{Mk!Auo<)I`jYC zZ!m!|kX$)Y_W07O5q2-hP9EQH4*odn*T)3=4SwsJ4%Vg_ePNOy*U`8QX9Y|oNIYou?zS}&gp(22}SX(}X zVE?6b^k}J-oG!cQt@H-7>6Q8or+-c+1Qc`^xbNxej?60X%L7|@2P62C6G%|Rl9b?K zvvrb7#8S;>(?{IzN8+E8A^3*Y*$YWYO{P}c@`mgh@&4~jP(04Z;9jca(wu0jhNwog zdj-T-GxAqBct3ERKw-a)R^&s>9IG=6Ko4C$4|MTlF)bbNkdhi|;GQ zy=({)F>tWp7Xll7WiT`XIg=uTf^2!F%7{7Z`|c4TvgE$&k@{@BDx z7lQwD|3pxpx4YZhyQ3Q2Loc@{+0d);;0rhgL7b`qmWLzp_&gLPutte!8lAZK1#imM zTKK9iF82iJU=7#S zMFBc|5>uDz1b>=de+6pR_Yjmm_DCKB0|Pz^8Q^dM4R9@;W1foSNE>3D6AC&M;&he{ z#m9l4)-rctyB)YRDSY|(0`Q-O*zF+qS@*0$&uf0+0LRfA2A(1Ej& zb^AYR^Zg|t3__r)!sV{YXTq581pEecxS)F#yT_?GE(3@l8q!RbU_eb=ZLw*UXrb%Q zrkn&)g>+3QkuEMwVKjo(0?|m-ueQH&^($fn_^^=$2(IXzx>c2?DXymmrfz-KAG+H8 zDrf2LD)INz$}jMyUPJkPJ)4zQNxaUhD;XWs^5`kRpI|o81n#7BNPvG}^cnsRgOkwx zi3Y*tfA=yI=t*&KaTup9U>bR7vHfEIZkyiKJ1ivRv#{5i(Qxl%@3#x z)OTM_l>jovtm)Tv+{Fp}Rb=wG28WBhhQ9;k1jE@mQ7aNFs3(7=-DC1mNacL+7ez>L z)s{K{YEFVbA4~`tsc2UZQ4`J-rUjLzu;NLu)FmD74FkT^LA;yZpuSE8h+(9SKq5yA z0JO{~wq!$Cs&ouIxECtd!W1PcleJAA92>2j+TM^7fM|#Q*fz9I3Sn_}Xw-k^9+sSm? z;QfRd0X7{Z$C4yfEJUToQh&li^kehTD;u|VPC(-k&eAn~2-n;OKX#T#2;lP4eKo$l zwq9`}b;(2(P!=nWav@6w?hr`(rY9N|LOg==Qz-G3S(1US-uuN6J@LM>sIv&9!mK9ED*XzFXO7vs93Jrj#WP@|G@ z_bUfE0=PwTRLBupOO7Mst%>@$0}7ogN>=QFJ*Qukez8#QrAx;2@5Y$%nphk0Dp*Kp z8qASZtMF?UQSJ-}lsgMAA6otd=kXlj?Tx>v@ub_cl$ysM2uc6<^V5N>nvEaMR2%il5K6(xCJ*D-%r@Y zN}DRMDyKB>1U}0j&=zVOTso6om8!^dBa+Z4oX-#UbpJ*thF#cl{E=bZVRQwcQW-F} zM-AqlbK(&8O5Nt;(%lI}3l_nzd%%Ro@m(rsq=#M*qmzlf^t@;crj(Ix(L4OB67c<` zQK_%MRealXAZwH|WKUVwa?OA2Z3jH>{Y0Pbnb#eMW#U=}*aqc$$_LEi-ac`BUQdl3 z!ed`85VXq0*D+5dk7LzGCAsi9dGg;zyM96mO*jt!@pc3jkUB`nXrvpjzw4-bpE&1c zQ&NT(oAGJ5#+c|$N<8RdDejhAD9RUH^i=<><((om!+>Sa6wxn)Lq0Hsoo`7w(>aNT zOgbF4$~p6(k)N`%$j}JP@;RRCE9zcY=4z0U!qjn^I-56ISn5fEYK?PjfFD33*{Y8D z4fkIyVGD2%f}n!qG@B?Vz$a`o=pU0WqRZ*&!TiWy=(VqK{J5hkIJ=bzT<^2psQ=n zpW=zv3*+r+Yb<{_)=PsTdC}Sd8qRt!;Z?jPxh$X`kIUH=n2P~88fLR?I>C5o-hcFY zm)q>l{)HQUlM*?{CbwVXY_*$M{3ziFI{ z$@I)EL!8Trq|xZ)QBb3psxT2^uqhU2LnaRJf@dPhmMx|KTXr+cWsD3R53aYsz^OUu{Os?q7S61jf)WYa>?6m(iGof~;lgMF%QWdK`h$HZN z5_117iU3=#@KHJ|$6zs8tJYHxcoLsPC7n)eywrI5Icc7#Eb6@ZED1}(qmUfdk8;f^ z*yyQTP6zJkcd;U#_V~Y|2YzrdzM1&cPamUo{utopATh^z>>md7@XMrJuPWGdp`}v5 zuhYhRocRbU+nnqd_cz6yczq*_T|8+#e$l=@wjMUiPvE4Iv@}Y)n^_CVk0r@~s#b(l zdg!F&bp}gTmpH_oPoEaGUJC~<4P&$u4}FgJBo0@?z`DvQiq#StyGR`H3hq3@RQklfSAnzQUrPXX(`Xmk8nt94h6}>Ds4c zx=e;~ERB}9`)x{JmV7P8&BJ~D@5nOGaxP)3V2KD}S)4Ct=SS4KC49k zyUaw?bx|QIuchmAGDvLr1gxi%JsMeV$jPG-bwD$TB>ZQpmvlattHobTM@tLIlb;Ng zt#5|G(;$^@WzK&Q#|?4AgrC(LGuM}Yn+K~fzey3n{=jk#rYHnlnv?c%9MEy9aX)uq zi_LEyL=E7{hbF;PT98S572^T2TooU9M!lJTpsk8@;(|~FlB;Hnpm$bK7GXB}7ds-5 z`z1L#O)Sh}u!7Vl&sAStKg&-r6x;;znW(9?!CLZsW9;S_>tEbfu+#` z-1?5Ic0AU0v|CMmO%6wGRZ`uClih!C&1!S*qTCUqJ7Y;1sR0NZWr@_e3NbBUl=&v4 zEYyoi#GghJP8a5HU#>jKN%ljqcso&A94Hzk+3M?9f_N#2EL@n7Gd{W4Q(u8%A0QM_ z_j&+lep`^4fVc1{{XsHVm0;<-i39$eBv2t)e3rcCmg-H4Z--QI@U;z+$r&q&J!8)=oE1+@X7_v4#pBC)bnV5-hRpNt6kF zrZHTBXP;NVCJtStAYUHZ(yR_+4We4~I}7Fk(@M`Hun2kYQJEpE{~-P!|IF9-WtpD( zR5g%A_Vi(eyQ0JMf%(C{!)8H6A0@ieQY2?+mP~A+;i@(2Df5ok^7r`B2n)S=RlV#| zY0$BUAKDjef1<68ejC!NZ_&bET1>>1GYI9dh!7XQNdDzt@GH<`|$Bnla$ps;MCDU05fS6MQ!YkieW=k6^(TP88p z?sPShIVYBd4eF|E+u)$K#@%yDWDIi_3fBFMshEx0e{0e4x#~eNp}}$h3#_hmSy2aI zks>kJeFeG}CPp=LD`YepE($$J`yN3t*S8WbaGF=Ur?D)uh8FY!!v#du9(XLF5Cd^m zAeW;Dz5t+m0emtTTJn6yV%vTP@Hn0TTcW2n-utQqkA1Kh{d4ozc#!4Hi-9u8J}y%Q zYx^D^ZQ?oE+o2~iX*!}VmzHDPkyaT-Q_|JGNL{p4jZ$OO*c39yn$(oSJ4A=ym(5y? zJRRP_H4ZO;@u#n-SSL8FN-nkSs-@;tSX*4co%!sC6rCovEk(s{Qj4sdc07jclg;5W zB9)Nl$<%r>goeXz-wXA9QJszF`ywpwJG|=&5q#Y-z1MX4-{6y9uK0S@+hdhe_eB8F zv+lI_$aY<0Jf(2zvUh*xhDzfr`!XHKk&m=Y&i&=+snFLomuvzS%C*XR`P#P0W2J4Q zPv=E_ZMcooryxnj2tMdqTmH&2z`mD#^wN1N#&f@Im@swQx8nPU7 zvDQj>t;OQZTXbzW!1YxUx1L5r!M~3aBH3-7X6x4x_&Ifs5c>v@K3GJhU36-I{nY`1o z1?!qzekF(rD%*R_qX)X9rKs{1*XKFB+HvucTTOuZe5-=NOJQ{e*T>V?b?+g2MXSg|QddfOq7s1K(^V6PZ0XaP9S>hVB16RP~| zruz~Ac{VA8&>FK5^KDvSz;=V5;p^VZIA=w_XA9Gk`-{eEoJx!2KzxLNTh{K@HC2(A z7~pMNCyLiE{mZ=iYf%ck?vA*0d`{b$&(3h0<(w4{6av(=I2>*ZdJ=Li&YCM59tvI> z{gdWw@vH1G6T-_xA;BY}*Yl6J_8U<5yQ&P+N?JzdQN{_N_Q;d`^YjaUa&w~En=E&UX zf#?bq%9myiL@h_hotH-r>_&yIKvUcfO%raY6rUTJ=^)ZSkF{rk zDOp}}dTEFReqYgwJ6?nmH%n8Tmk9egdKYfG`;7W{_$3Z*eARS}$(K{$+gJA@AZv!$#V3)mP39T696=b3lPQ3dyQEbmH8FOV*y zhn?%~gWkcVQIZuIY>T-AERbS4SmHe9BcIy@mT`OpnaG`?6!+Xg6D3mN<+7i@=;B_f zesPgx=wSNuIzxIa%KJnp4%$_xWfXnop?A%|5H#-Z!AZfuh>4BwbDZEQn}hZqsW6Yi+{GozzycQL~7l5occ)>GpegnI=56^t6tnvWtZ+9jPGd&AZU#l_XxgHXI z*~eTT(lB@I+}RNjD_>V#da^aIFpYakMUat*TCOwPRw*Y|e^0SL>Pmgy&mmlTt7~i% znFeNk)1iX{`5bzk4T||ZJ@_bn)Wya=e*E)=i+7eh5hc!U@FN5_=FjtAKUi1wBF*m} zibfoi*mOtAv7Rr9Qjmz{cE<>&lcb%aY!2cjje-2OoJi#!&leT8HjMa z`PeNXKx){}QHb{G%O5{yy~ZrtFWxb}y}m2%E9ra0`t(m9Dj9-QUPgHebk54L(r199 z8Ap=IRvN(ljmzHNV)xpw0Krs!*`c|(Vzm*v0cEunSj)!0-c`S zT7v#|TD3^#ary}xij%F<(Fi*a&Z)IHpH0q^c%t}HUrgZpfxQP%q?c};y*v-t3ayiM zhM@F%GoZX4?GXM?3L}yoDyn^^)7YhF+??9H{aHXVIi-Js@*Fhwtj3=Ef+{A^y-{t9JjG1MobR2NDQntMH zwraJ|%(5fs`~guQ*ZbwbLVKLgxxn+*dNw!b`T+$#D0{#hDOfS<4vK<4THs;k7quTU zJXCqFDc0)a6!wv_Rtcf9eL=|I{#h~xdUBbmN25y}&NEF4siA#gxWFNIHQa-I2h4wa z0bE932`ttQ6|KDy9?Xx@-Du|W0IKP9Vh#EkovHQWqli|bC2#^HlN~J(`7EzBld@pu z2|fqtfUH4AZ4`dpuB)vzjPlC)BbhvvHM2L@Bcu?_fN?ltA!j^c@}W%BgX`G?1ch$6 zdb!L?cd+b&LzO=-)|ptcJj(u+*wk_3X`K(1|3EhEPd_?ez^q$59UmGj~6oKd&4=u?+nn;IGS% z+%f$-G|fHQR?N^aO=C_)<2{P^rVe^qF5<6@_i4XRun!(wwVl&xWuKs#X*V0 zm#*UYFhVhiTBsB9ROf#Gj~k1M&sz&!7oy5HkVgWor;IPE%r3#lB zR4u^GIzNc}?a1UUcgM!-Aj0&kZ>QVVnm6Zo5rSnM%w`X)_1R66LG#{$A;#crvD*I24SLGt3948YmcYX>$2M(ZSF}>IkEmnHG*(SV!ichY?cjuVW>S=eFK}Frg@eid4z}&$nm(UnlzvePXu@0V*L+XIe#+uRC?wA3U~is^Ta{ z={(Gw;C(rDArK(!AxY4oinhz7+uwE=f;ha-&q}!IU^nazOc;iCh><&g=l^1WUxB&R*h>%u`b8?0?u_z$q<&(#}=doM!TtL9I1 zGVz_T%JIImUp^3ju3nk#z_B-|C>Q-w*Ps&D=4$%H=N4Bo%mGzv+ zB8kM>x=)>h>O>01tCd0Cz|x(g3B2d&^qz}yjcA2-l!n;1t;J8_X&m2gL)3R0erw}6$m1P~{i~Y5M`};X646+Mr8~e~71uvt4cFUkR9@KBUe~Ma zbt1Yy8Uht3$j)^^TZ%$Ca(Yy`42KPzXXO*hp%Rsn=XJ^l-`fOO#>GYv;G`IG&dKawHewn0qG*|KB(x>4FHvg4qt{~8AvQ3*(eQ79$ zGkf2yI{dQxaNoh4#(>U6#OR;wajpljYsZULf=ufiW@Q4h8LZO3IOEZIYxF##TX5Ua zX{Z17+KN4B{#)vWUcoycb64Bd%m6_DqWPk%b`T!z_(b<>+HV$HOhbOLPRce|LZAoQ zo|p&P)1p2;h&wd*4y+7E$A50%k}LNYW>`HL-SDn(K~;J8Xl$#z*|pGVDHL9Ps1A96 zSsMLE!kP+i+g%syP-g8Y8f|uIIbFJ%a=Tz*4jiz6E00&;f!44~^zUKTMPEVr(0Dnx zl0}H7yGb-0N5H+9<;08Q+rOCc6XaUYpN{)2S)43+|Kzp1_PZ|%%uGxh0`~i7G8+|K zP)Q^Zd4_}W6nY!oLCr7f&0g?X!*00>mI=m#sMZ zckC4KuF!U!LDyLQ@`4f|Pqtd6f8BO-A|EUp^UnVzZyy~`K5FT%<^-<`qLZ=4XE1TW z403#`c!y2y6CQD=vV%aYN;EfOfuZ2v!^u0Pw}MZars!~$^sC|{#jdq70PO@c3IhYq z5e=XWI&*&L0yStPv!!ZOY3-&`(Fn2d50@I$4_t@ndX&F%yTsFbUwP*rV|?uw_`n?U{gGx{D)@qx#9kL%&HHpSY+YTFiuwIYZYzD5<&np@aFZPA#`4q`loFl&JKhd@L zZyye~_vHCS(wQ;GA~!33Lye^a{KljiLp3jxWChI;uqClHK!=Jo}*`0N1298kAIeBfnUxoJm!aMTsirl`vjul+1E`cn-5joPe^*w$W4E@O;?o( zTH?z*@`M4E?d7kUD8B?Q8TPO^bX7Fg&+qb18U+i#+fWXEC{O%?2eev@FH>ix#&*xE zg4g|Q&yu$wkiG^~Q^8m575C**5jBa3x z(4wEu6ib|LlH1gkT-R)%{3c(746h2;uVl1fK&yokTagBf`8J$S-v6dzQgk+13EPR%i<}bP(N{`*d@v%p?e6#Ky>1F-*U@N^Tg}U z>wW?U*_T7jO*)({RsDsjKC>2;*bd|}zFV1O*#zA9Wfq6#%->DJwfUkdKuEIz- zNprM%4}UQNE~Wlj#@7t+9WoZmr5Ck^FUSdN(~=L0W#5}x-?G^}ADti0cfbrQ*{cCT z(Ur85l?%1$$wglU*DtHQOK501T6d|Y4~eqz*R!lol9W5clN;Nw#U7l%mRwdzdQaYK z74guK;q5Tx=u#}rhwF^YX_H0!HN3gi#26Mb5*~-Ax_g8_f8nA)W8AbX;i78RL8-Cb z&FlT%kO1IVLbu-#a%ete++8rEsit)3(sOyYDwYxk-esucQbs~q^y0!bc^FxkSF4>dCeCC(grq`N+ zF!222z*uiBT*o{ZRUYhbuaGf33#}3_Kwo3zF-≶mU{#?(v|d@!d5%A@O}mRmo5Hq+Cmg z!-%>*PVMB5a?}`9cw-?F;PC8=C9is^vO!e+R%>Ej+TLtS9!J@w^XE9ytgUdbSrmeX zOOm=1a{wUY%E}_22jK2#1!hr$8`oAnX1Gv8`rBLj-7HOKm(*^_b1MP6xy6kH&}w1I zxXMw8zx*K59nbQ(^dey=1Q(}ur&AFjUVXMCh~-nw?S>#V2Kx%g<6aMGTxwTDwdk;A zX0!@`7e_oMaN?i?e*Yc=4hk1WGG4Un7uSlFVGk4f7J8*~e=|59`64!=@t(*L5_^}? z04$M46yy4>8n8ON#)e$&zb9qq=U-jxYh(Lw$OWG0z7x(KbM@`@Zhq&=pSEi-;OFT( zD+C7_zBMlwwm0KlzeuZ9XCtYE(}f@G9r ztZsQH&g3`h&u_XYK)X(lx`{CdB^Dm-cTQ;?wBIh7bO?^)i|(9Wi}olup5H9ui^Ax- zaQ?*k1eF@s*$Zvgf~}KOPq=Mls9aPWO?O7Q5@j_f7Wus|hO~_o*Yfuzt4@IkQMjX` z*CC^|Qlo;Kx`0*xZpm;eJ1)4erBz9@uzj}XKpEIV$1tson`!J_Um9Z{JcGOg)*>u^ zof@m89&a+HV(}A=;t6oHcmL49@F1TB5wb-*k!|`xdE8){f~7FFytc{9UoQ2NUmP#r zTq`u~Y9xOhi@L2whpzj4jI1k{02}JfZge>LI0rF7&P1f-@4gj|MzF`NTmJH2^%VLj zmpbl;h}fEO&1bd47=?&#{=UAPw|EcH$R zw$bt>@!#N``T_S5vchP(fRWvZYt^xZx!g!_Fa2bRG%GkWn*8iU?1qq9aX*}z)&cCq zF6*UF+r`;)e!u*ja1^6cnQudkuEU&hLEPoT+#@W(%X0>J)J4dKC2wNPAo}S!*lB7J z!7Qt~71bX!GB>G3ga#IH&J|WbjT!O$#}fR9dL$Zy^eTj_Yw&=bLsbvzx@NP_JJ;Da zBqG$c7&4nW$z&2O?1l6vEN3-j-`Svu-tPOME1A32VvN4q6G-UVqOI@AmU%3Ci`Q_J zL*ubCj@xS}vqv?bxvs5+G`=;W9I$hzLi`l~yhN}smiOMnxH*#Xo9r4H;W@S?!!Og9 z@d?T?`i7cPt<$1fn*6QS9{Y^2foXh&6ZjV#DgU6ha)CD60ZtcOGNWRiZ~U?M4qM@- zQDHMrt+^S+a=6 zT>qw}&-?oeaFYGi4{9cKkAMgcuoMbgeU+`7&0~r*GBQ|@>}QM49kr~#(wrTMPI;%l z91H43f28&7Jt*yGWZ~k%rKF_n23mo-b)Huknrrv|hbjp6m~UsQ!LDDm?S}A*d)X}W z+^dJ`w+PRiTD!O%;~oR`6cnp+#cM`WCsk{sb3eDo9_LgO%N^MwD-gSC@FNgu?)M6- zaa(=#tO2goB9431bF~rsie&bQAhuuoU(E^~WoxUN=mrC?2K%LYr{ZDeU0lViK57%w z8Q5Vo4Uh6+@9#0r;f}c?UWY=)PH_w$EK!W+V0Y>zG)A|}sGtoG>+c`#PmtYF8it*1tL~_1W^O*?yCAC9;~#u>@jb1oM@$a)U4IQxHYRm zoe)=lyU|l`Nl@o!qd1Z4j!8#R+#GHTA*^HvQ?YCMUf=0U;)s4H2_D^<8ZNf{p5XYMLTnb{#D?3rMkDrbE2Nn}QBKtOb`|li zhle5@t^O1d_?4oa-)YWLmYA>pno|{D+qWivSrdbAThpp=a@GFvT$ z>83eMZ@7JT4*0FeD(54~O-CGWMT{rQim_E4{gUw$xLwv_`=YyK5lKVWV z5cZ-uIZde!`IZNaQFV~6(N|MqgS7r=!CClo!iuFz(7SC0w_6s-zJG`ZT04F3^T>XT zoOV~*G4SACNwcb6Y!=ChsY9JbCmAfgDLG^JK=;93GiLM<<<`CS=?fNh_7tv zw2NW-NV;-#QDxD;@{V>s5M7n!+;df=PxXGxf%SA6L_1NIs%@;9OM#ipT}yuaSD|Uf*%8MIa7@Cb zGH#`CD?fvqn}|*LmY`Q<^DE&ba){sy&U(D-53JsW;|Bh`?suF~BDPl_=3qHeG@A09 zHTNU?%atNO1qr4Dx#;Fz`I1EZrZb-7KR)pfz~(oGe6Rv}`HiBgL|Wqpd*&#s&(|vp zO`bvcV`BLqr(O^M=EpOBZ*bSe6a2VL0X|-msFFm&@te5=eGJZaveNw&+ZB;!efz|e z&eKeENxyeSeM_XVopH)LV%f~el8W4x>exRY2xe_|K1Ll@w^GX;F~+nnIx4I<8`l5j z9#NS@wwiS2VLD`9n4eyv1$!4cGD<8p5A%7z`|r zt^9s-*3A1T{fqZ^xnzvdgh@J+VFi@agIJOdA&CihgWhl|&^br`PX$Oaq&ae$`#NAJ zvLu8T>ZTl@`LM`6QKW;~%4kMNHba)oSn}w5G$)OV4G{p$A9cw)w_HcInI6?oT)Y%FyAC!w z#=W|bA@3lsW$l}&F)&CE`DFMUBj0O5GJhJZrm0Q%qrgh< zq!vY4E^W4kd4H-0_!cS%|-DCcKoqxBTxZq7}I_kgA6ox~1P{bo`KIHFb z?QP-|zffC_4$xxTe)mvx=iVV46uFme`pp>?obCWxC*O$0LbEOJvae#V$E8^E?3 z2@2QMj1m9wGU4s2hsnZIS$1&sK^JQ5C$$9<6|r1koRAnS+_EgzOGS5|Zx8Eeo5IOe z&aK5*9tBA`CnGT-&tVgz_e_o6v0eXcR2;tIIYktt0d*hBlWgK{$x+RFDbImNIi800 z`;8kTL-Kn7FMZvgW%aUdj#CBI*qD@K!JLWqFVuQ@&Q8KJsHB&RAth6s4C<4|rfdY@ z0u^x3(g0j)){Ppw_Q>R?=eI1yLBDV@TG&!^-~{dzZS*vPLWO^)deV!;u}TEATw(i8 z;68;f$l*Q>XmA$@FaK5r}_q)aR_a7SYbiY*%HFu_OpFZd5e%MvE+wv}qMZ;1q zOxf46Rm;l%EW2S?(EQtGaa4oGBTM)+^S6k`#6vtWJn|Zgy#|^vn2^DAs6tL=Mg|HI zm*C2Gb4_FsrT_zj?g|tlv?YH>#Ux?0SBPD@oJy_AuWOdv&c^)(VC&?a~N7LiF7XkN#6^V^v0 z`)l4wJ^mgnglZg}zVJKfGG%)P7+dX1ayi~j**(eb@85KfC9t14iSo>pSbZxS5-Xba z_vToCayTlgl+<0Pd*I@i|NA}AwXMqKnpG8X<861o!nl23`>-xMPh`BAC>W*3#GF)K z($*R6ySYSx=?D4!CWt8f4W=jt3O z$3P`p#aAR~vE1pkg847aK+eu|&-xP5b+;L5nMCE`>%=Zjx{zR|MQX1@HYju-h(WrR z&$9o3t%$fSCAh2+OHcS;I}y=MOd+~)pfSXaMNFaJLa^j z!od~kn2NN7ZQ<$kut%)h4*Uh& ziNWp71;N+Ak`gO$)TjC27^k@b)8BE0T!dp6*(7w{E;9|nDnG{X5)3H62S1SGAUEp% z3*Hn(3Hv>dWdx{5N#@xcYMnR|rek4X9aL#TphXa{AHa2RIH8LI0>>KL1-mbzn zynbgX})W(W-XL%T}4uE-cP@+i>l!Ag}U)PIP@lKeH)qJ)U5N z5ulKLaNz3mz25s5T~mN_INySXgX@LFG=DZdS57!ec0on0k5k#XBvRL1>hS~{y!iV_ zPg3-ZCVDEpzqWJ${M)u6^NYA{3<{`T6K2GDvAB4~ zi8Lo!uR%i`V-REdiUd@i>zkkzUX#}=(((`FO39f39J*(w>S`=8$lY5JIo*%0hF9TM z;2hB<8Y<6NjqmVl^_QX7$+6K)JCo$yKSm3?_zeglNPFL|Jts1T8~((2WfXMQ$jYkk2Vmvnt07+r$h0 zoU@Sd%EtI7^u9>pU*aP+1jS+3zuam`0-X>y0%6g~`mQ#hXM&*xsh039b*B1iFA~h! zrKFR7izbaH_N}mO^7xIPKQdqi9sX1G8pWYPMqXz1h~3sNwt@SBZG98SYoxgChkr3l z#svK$i!k}A3|}@Ik`&a2jQ{o>!Kvj3MiwL;wJ2}UQ2K;SGkM!ia5i)Bfc zP9`_>b8=EOHJlALJfqp)r5@2)Rvh{lZu$meOT}vMmvNuxFC_mq6yG-|&>V_X#pJnI z+ME4KR$1~b&4}Nq^^~hUh-wN7y0a9YTiMwW0rBr`+xns5ng2uf{!SA*??rcsI1*YU zLMBW{DkH!`fEck$9dyzgcmRxacsQDe7Dwsh?0S&!bMVpAk02mY^jev9A{jP3u5mrC zlFgNT2EJ=XdO8TabbriRuhAts^S0kfysUV0pWMNR!5jMRl?wZB<0N3-j*|hY{R%qb=87|A=PC}Z zFJLD&2XE#C20CK*d^>d4d8IQn>vOLV23NM51>(BAr6oG13^I3;%q317kWD6{^|?>W zX@iP^+$qVY$-vXO15m0$fW?|)AUCdtG) zDxx4U90(pBz+O3Nry(c9z4ZO&0Ff93cczvr=~#jeTqU*WJXkbYna@+zkBBtbdeA@u zL<853kS^>Y$m0a^8v7!lNV)LgZ+ob2r}t#Sgu{>@d*#sV^4>kQWUhw8SV2d?4QeEP z*MHXGd^NZ|L7Jp$ayd7>iwWStYTg3u8V{)>C5oq_p6f~)WL^~kwXQ$zt@8!uzFJ@3 z+@X9quChj$eF!l!pl=Zmu|OHmgovd|650{E7tTB!8`8ZlhrU4F*o6;xINK zUir6x)M%62vF+=5mW7fg*ZKMTrNXd7&hYc}d(TxtZ9*)X)0=JW^|mhoKK0Q35}5Gh z`8JyF=V|=N>^*7VdY#X5-FMD;zTyORcXmI>+C}Fcj=~p zJ;=R^3FaYtPaPC@>>3U=5;OO4I5OFEV2C4$TBTn$1~1pot20MK^{+ZE^@N=MMUYVv zbsdTjQ1pXe^T%+c9<>sCp!%R*<(-)}&)Ft>-UrC^{fnL(KfU$hLbK&qLwDLY%cy~U zrlAq#$4J#vDX7Aqm-HoZbpQH~6ijl8pv%u)?=-OmMc~T_?Nl+wqu|b zk-e7z|DvbQGOEQ0PMfAIenu1`2}eZ&7z`Oq_Me2yy7d^>mEU^zNpKXu`KeQ>*^Y=z zWZ&w?eO`@k(DRF_IkKosWKIbr0Khi&eozqJuX=Z~v3w6O#*{Hy4gK@bN-cCXlCrT5 z`P3E?s0+S4h28(54}nk|>R^}X19l)$Tr+Hl70(Bi0sdl9k6b*V*(CDM`9PZXqHf7* zru)_Pt9+7(ui+|-lmR|wHAi>x;hT;${rG4BP{ivl9q)tjs@1aG+1zA1-O!~km z4V1v^Yg*JNqjftsb6cUfndCn#=7k9poH9@Nra55r4A-?uWn7xg6Wy40rc zqolC~{JU)g*P3&&y^k~q6NeD-nZ>T@_a|3NIjpABrYizvHGLCTTgSndW ziGOi)`>Si&FgwJTCmgqpIYhW(!oP9=IwJufvKmyDm_ZkRfCVXT;mW2^D9WH#nYC=D zqYp2rYOAAGY5)siwI$dL=Al;x-E_DPqTI;%=+=Jy^F z*E5WtyoSjTARXqoP4`iLlRH0C4tS+I_u@Z2dYR2w}Hr(qK>Y}*0jtC zUxnKyaVW#hqYQfA`%7!n8uTeKgWkvwj$(qX=Cy4(0kceszh3)~9anGf7d{uA64`kP z4`VJfeBrx>d&_Y|@8K%|a&fX77W%60iwc@Bg_Jk@i5^>qdRycB@q z)A=2Q|F*oC2YKO;Twoo#V;@}5^AV)_VM6l<1eoB@8IUDxP?4qKJL4c-$vd}w>EC>7 z>wQ>lK3e!@98Szxw(8RLug4z7%<$ex?#t!#!@2wGXPnV`I>>q#QhMMqU}wl=4742m z?*iPtE_F(Ydn>}6{V3(nQOz#>k%-G~;axaYDL)s{q!&Eg4OP6SeZbK6lFZk4ap}$ahLW{!S$q*l{v}7u@ zBLI2(I~O3j3_vP{DMMcgkHZVypeYKqY+qa^EP|wkNjmqRBWqLCAc2gRF!pN#n7yqK z&MAo&+3(5Mmu;^~XSY%&?AZ2jjTfdbG{B~Zlhy0`F!^lh}3K@+2LTL6B(;mMCmA!vW1;p@8Oxs;z^fy7mLbfW- z)EG7b8;u}`JkT)|Z-_UJGls8@kI8lI>ginvGM}R+r^*UF_5;-zveB-eGN9Pd&5x1- z_?M9GIi+fbBe86*O~eS^h105djMjZpgR1o*m88-J>hqLEA(>akQrFJ99Z20P3q*}Z zXeD7Mf)M`K_!b_P)k-p@s>2vt9oZP;^PMXdl9*{*D;)blp@5x?q`*=Sr))tkCE>4J zPyK&$KU=t)F=cSmlUiUW&OpK@C56$fri|$0-}SZp2_x{Ae{3I!0|G11QLCv$d_<`h zQEgI)F@_~kY|S*gH=SqDLg>%^3|+Ip55%QN=%1w3dJ{hOK+&!7^mb|=Ie@y29H{<> z6(Bc&mA+y3)9newZhzyHc?s?2ASs<{s2zOTX+!iU;#1}R;UVJ2b^gzUh2}h#)g^V& zkj_BFX^w^1{=+FA=L=6DkvFPfKVg$$s;A?IO=IM%|Cwr9!4ttQ?U=4@mK>s z-L`r5Zl&6sEy%uln0^UCCU9}tE`7dgHB$GFdz9ro?ThG79}?mG*ImZELiX$;7_2We zYFUq8e&Q2E1S66NMBU;#$^R3ft{z@gDDaIk*eb`hONf!~KaI zz|J8qV!#sp3IlQ_C6E(m;%Z~_n1%oK#?mBGcVyb;xMD7reMjs^sR}8htW8-K1b?z! zz>Vkt=6a3+m;HiB&0ZiuAq`EtF?vR#MQ{`~k{^wrlpkfMJ3|uX)U+H#<0Fs#(tKA| zWyAL$8>%lJ$_&PJ9K#CEoX|0qz+^Ryk_aDLWE3~m6-;_;Y}7Ju!tQEL8D`%CkzCa$ z>MS~qdY{7kz(gO~vfy~hWmafpfu=ZpK_70xtZ6x#=V?pQ&ukX8R3m5`b1^NHaWQpQ zp{`#hn3^4Nd^=wd+>aP=qv;~Bljxq_uzPn}^J=Xt$rdIVW)CYe6m2ymQw;L0?w}US zga6a^o}z_W$hcY$k%sUfUz26BXFs_d#)(zZqZ1br3~2Q~=s=vX5C*Vz#@jQ(9D;L2LA*jKEB}jmzyjj4ONd$N*khL z&?|Yt)V$v2yW601B^Ovoe5ZT+M{uwP9u+6+m3qU)@mZOEd+nb$1Q)gS(X4lTv01Nc z^DeUM!7M|B=I4MIfx0l*Cq2#tx~3Nfv+2tbUAea*^|KlSAMCH5ktDX-*XPJc9H&$k zUKiOF65LLv|M27Sc()IN*HnwpIBT{B%hDAdQ>r z*xMq1tRtf=*{n-KTCrv)uy`&6^IWT0U@X)n)%?t(e6wt5yX0ctQK{fpFGwqLKMK;$ zc&NJ8rehx3fikBs-_ckzW~XKQ`*Kkv@Aa>%rHTr$`~5+by1r~?Y~|>%qHVmc+5)Aj zzM5023jcQl-jB`X-0ix%iEy#DO1=}>urJys>PI3?0Jb{2F>9LJ@(|c!R7Kk`Lt! zuPrM+gd}}D`dF+}J@zEw;R87<(?gxteT>e~ug=>yTKUf-r$qzMiDM9&+==ZmN-gDu zuZ$kEBTwX3ww{7J;G6u<(jos)6d@SrwCXv<6D@ahK}D?~xKR9}hfww9v?Zu)Ph6ykPgvBg5*j07<04k-I5S^mfTkJYJyHe*e=95R z;-5ROzvkym!mho8&`!V4c)!|U$-S_(^aJDXFqQ2fhg#y6Wz?XdGr}Y>=Xj4YJIEmkGBo*?vY@e!Ra!EkxqDo0zrS(SyJ_$WjP7_paW^fhL#jKuP!@ z0Cc73LJfFE_DlAgcjd7Qu+?M)5~8mtMoXO@*$~|88_K_n(x1UdmoYYS^0}5gjmuMF zFRQJ1-bIWdg>l{+fW^E&TPn$!t+U{N6OGb8!$*H=;q$7UIJiN?PHBfI*WpcF&#MP6 zc2HiTl*Pk137_ti?~HRF`W;Sh7xBvX>W9cYi*vn`kXz*gs-SrWQ>nlC_`R|CCDwX< zX7?YTmSU_Mtp91(q25erytchY64dCM zR@=d}V&6HwHOeVdmm9LwdeBC`vz6H(~ccTvp!v@>i^Cp#rq>xaKI-;6U;?& ztYlB*EkyvE+|s*O}iga)htVo6>@G7J>$6A@HIV6Gt^Z#sZ^>p1gbu_ z&c9SI8#DHEK#WSYO@r>jOs2ZpdMRn3o43&Lk&Y19B^8O$p{>|2#@N(8o!y$8xkAPXKzAYJJ;Wdit`nxc0j|(`(U`Ttqsc8{Y?1sVSUgV9krc z1sR-8dwP?*I3W6T^+hUG<~5&{yvjymp{QQ=loF-4+chHwP}eaE|5$Fj1jTMf<<%f- z=rz|UyS|oq0X$zzq;jY~i(55@bod<{z*W2WY-Si6%ZuagIwtVJ&@uB_u0tZ~ui_i7 zcs|VbX9Po}BokhD<(_NUVnM=^DIp|aSDjuwS@;;Whoqql{Rho6hyxEEy3>AdBBwGL z2cbrXfJ^X7+QY>96>pXu^e<$*WQN=yhI%*UhJiv(BCSfyco_SW>(EUXgf`1)Qu!-6 z_m0C195pSu>B(TUYjz2XFiF*IUQC3^_3h==WE(aj%{oN$a!Lpez@$&=P-_&#rpV5f zx6`*TUerCA(3ouuyY*VHUtPlc>ndYoe2-@yb@_+2JgxfuNlwz2mSTD4#z5pe$E#0q z&U8r-MK_d{rMrXv^wUitU)qdQ58`=+=khAGuN`Aqgk6GGuk<_t)J~t*nAa?gGVQ3 z1SPxjsUr?ecw+C;)8`WU`g47~Gr zarerM(__(-!#E(tB`13{CRR6M&U}eAaLFbA^Sih?e5pXi?dDi00NZ667_?y1^ zR9>#P5l>-~@(E$zNC_;*iblR24i6+^t$2PDrIccOv{op5Qn(rvipY$ zRyEliRwvKDoZv|(rFT3;x?&%Fn+<}--dk#o4!0o#IyrtjO%W3-YGAU6zvYW%i;M?( z9(*%O;AsB)q+Bp^Gxfxd{Yw6g$x+#&C2-0J1;?O%v*47cCk4=FZJ;41o>bVvV!5h# zO>X}n7=YStCPA&y`bVqu)KyJMzbgum+#L@Y73iU>u0rj*5>W-*MQ7RJ|kSDT1q17QCzFR-AS0IUw(5(Ft}Dx)zC`r&7{-+URHRPvbDujYohDBwP2f z{wx*8w>1Uy8QD^8Eh=#tLHE;Hr1>|s#UG6!$@}JpI-|*#l0V^8dik~~4nK^g6Hex$ z++upV0{;6|4T) zbTIliVI(Z9?3Q>>PAkmayJbAD1d0t$ZqzB$9QVBIyqPUk&xLR&+atA?1g$L((!lH{ z8o`sq$~&=u2RYZdsZY5>zdFTHUroCT)v`xoe?Q`OQk8sQtBZGOFt9bf{Uo(D z;M8v_wiitqM0(`oamT$9*-{W|=4M%{)i&MgOH%l!aeX08K^t5JyK~^oRO&kj0+b}q z(a%`icmV}Klda`>!QOht#jf*_5PLl(z4a@7 zAsumi1`9UWG5VIkn$JQNEV|u+I_bNRxS14gXZ0;|6}<3l{|ZsljyM1Q-8<|rC1Dam zBv_PjYFot~4&N7ij^Is#!#>RDZ-&M`aT|c9U9VGxK21n~I)w5jM>%1Dw=P}1aTV%P zTB$y53|_V7t`}Uk#sE1iAcCfhJVqMg5WyjcJ%*>24;jGT7Jf(*?z4p7=uO}^O(#gc?DJHS z&(xtxGC9cv!{Tu+l;Q?uGD4*1G8j`;_7KkEFW~ba_n!Ogb!|S094=6j)xqLil$!yu z9geM4!T0d37e8YET;Y^(xjloO|#$&&5)PTpA@vMsbwb0yW=} z2!a}G{IHSE)_Gc#b9FUM;dNi*_Zr|vD?c_4WVj_lfHw6N#`5sSSkKn$54Tn1a>6b; zkl$i2a^C#|v8?>U_6m^gY^KiU@TK*dJwKuzVyI!M?SmV}kUs&B*i-?!LnH4ab=78v zF_&OurWEL{@RDC!l;J7M%E&HKUmH*HFcH!WdH}j<;y~BvnMGay+9WX@TiE)LVpu)c z1U2eP8{daTMLv)npQVAx2yqf+>yv~Y*+cXg{6nnQ>qniyl&5Li=YP02T9S79dk-^= zhb?zIWR1~<9}=6XEFM1Jjz384O8DPYQS~nRd!*Y}r&6)EVR;N6+y+b1MU)6?32BAt z&o1LorxXeNJS+CC{n~)|*JEE`6LLWTAVr_QzP9xhD1IM5LEfr%4+$V?u7VzZ{e7WW z4fsC`V1#n(&<#RlK}g2C-up9a{ZFQ7mLF%)YX3(38N1&7o7N0kHma+LjnDHaBkWe; z#VKPA;^Ze)*!|!oD5KsaY;wi7JPhIt@_m37#HlFa7tn9gy0Z+$#ni9i<7Dr zEnt&Qu$k4f!W-SRi($y5?vwG1W~uciYy_sKVEEaZz~%k@;Zgb$!kpNby4XFVkALQZ zxQIURQvCdE>03^{ZqVq9BL^O4glC1xqd7@+Rwa+NHAA(Gd=bIjBcRGW++$ahB(R-d zNsbcjCkM*UUPv*@D4qh@IHEcF>C4cY*hddTEad9Zs8m!4z7G^Xr7W8e3R2K>{mAo+ zlw^uBt@t4Eo1I6zPN}Xls)2pf_=Cm5$|Bc5To#tEt#+&`B7uz@jrzCXtw00NAc3U z`Moiv$>LP0Pco8A5}WHAYxd1a1AR@ctBvFNZL+BDtJX2j{D7dwYf_oK$4%tk`F($j ztQurUT1+ZnTbU4SPFI4^!E&~~LEb$nCEo~%g2BB|J~lng&3)IT#-lYm_uB{rs?n|r zG=@WgI(ws#t4_M_zW;%djvWhi>HdgDd|;=Mo==L1Mf~Ddo&5o({uk3DbRpbRJVc!o z_RyU9HB3e$7cv$2IwZKq<{E-@TL4=RUl$H(oHY^0=dZj1sCxuHiej4}&9vCJ5j;O; z8hOJIFfYxs_2Pmu0qX`zM{Hld{>t>F5E=|2Pkhf{NYheJ+|I_@p+S@ha+YUZMunEf zj#2*sM*vIv-Bh*NPH<7aUb0C(z>6t3($W%<9SkCtG@6|~7FP1X+b>Af&;XammZ{Vx zGZs!Y#@m>&<0)Np&tm@W1l&No+FI-&xD;2_*y$1_)p2Rj&a)EKoRg)RdX@0o3)&Nf zo+#@PN@U5dWp!hxVZTdBVzY6bT9Da#Rw>UG2`q5C7OCEG zFJgbVN%mqG@`bN6aT1sd#)8vInIF-wtg>rNUk2VoA5%!eHiq)sXbX|o+sggGbsy+< z%MaiX-IE*d?;;nVU+~slF^O3e8Nr&yC&Cj*F|Cv>v{zO0jj3B`7(C9dWTa1p1`+cq z2i_5+v#SPTjU?Ioo`ZUE5O6rylxc<2Y&t&{KbGLczd*Wep`n zr^Wef4BMFcm(VIJ0^I6gU@L_j%DiRA0|KVzS+&4_Rytn7-J|@qU0qQ%f;eXF)#2;m zD%~3=odzW_nH}3IV`UQpVh-G)v|zSAg8ezK(7|P^4zo{T;#%{9eeo{fW(F{X=sb-c z&d}kk?JKhJ!&6PWm(tedaO=0Mk0#Fu8lxDYc~1MSQan1s$48^Sz@#34K4g2CK$Vf_ z{)fddb#jXkWy1gkI@yT9@xfp>7VfLkOk&e^Gh7Ax4jh;vQcsJAv-OTjk7<(5h}@j4LLq0Jex!GF|7` z3N5E!!|{?J=C>_8A{?Ai@S##cASP~&CWwi+@%_zJoHi0Gi5ddU?+KVhejXni>A7(Z zak3s-Xt-jx^aB{ee8MivLYStgl)OUH_}o)F!qyc%@_j)BB?+!>R3uAUcSnU_NSOSW z8_k3aN^vq23ETmAO4`nKMP`=tXsS6Ym{aa1xD)IJV!kCWvil<|uf;PPQ$EIT6!N#3 z36;P#0aWl$=2Z}|rSj&AtW{?UMtd&e#gNWt+S^*UJYSTLw^dGevk2aeHCiyPI)`+Z z^cEPUr^0_gJyzv8jk(pyM>CRyv+z4rVtjK=o|_C#0xA2qmsuvh)w7e31#qe6MzN}% zS*6T7vTo9pYnM*iK`4R5_Z=aQvA;eUi@IHAW3}L$RxtQCmWU!G zg=s>_ePHFFU+bxrM3(?exbWht2-3eg3XM`hiNb~d27guP^wo7PG`YT$>Z1{Qka@Us z?LyQH4T2&X8r7on_t;W%PWr$r)L9PVzxliPBID=4KW5i{ea34pvci<^yGAha^s3hP zHLm=WK#9kQSo8ff?~U?Bd?i2$@d8#mya#>G+MS4P>G6iUs}?ld*|9$^WChOU-SvM8 z60UTtnitp7&%Tii!-a0|Vm*wS=78H90cays>y+=$Zmjixge>(egk$U!P7w|}wH}RmZK*wNV46*#QWP@wI9GD6 zLWQ<9YE4dVmo~)08IrXi63w!B&iFJyUGWfvyq`KHB%cw)5s0a1T`6#2HT*+4N1oO) zb&ofbp-@~2{JpM`DkLV2$hEOHc5#HzWQz1;@xw#Phz)qI&h>!%MOavm*+7MiJ%;dn#%hGRo)Rrz?X;5Vv?WV|{NPrsD?jD17L4<8hVkQ%kyRweVkL#3>JCZXsc^lb zgoPr71M@fTs>#+EqNc>VMbLDaB7-?m%&gOnN|<*!CWziVAHDrvmFF5HFpJ(1E`fl1 zc}nKkUi+2oJ9Epin+|=S71m%DMyw{n75>(9@^1zs@As*Fh7&hq*h*~HY5)0o_sJ+{ zxd8R)-y}Ikh)!^o^k$Ubm*Xp5^JH{JZ%k=l`vBTYan%;;as=z(>GBx*askAq+4f^J z7?>riu`s?fTmNoehuuO;Y9z$;u~3xXhad&7y^+M$!X05<*PzpO2aopf9`SC$7?h~+ zn~_2JKB*E+uuP%I<@)OZF*Na>U7r9E*OHCy)9iXXzt6+idoDIM2%}O(I3OUu<>HTd zcpf4InTGC{UD^ITp1vb4^FxxR%B*gaU`UZpM%ZjaKJ|P0Z3_cDHPUcwL#{7Fbk?Bh z(_OAcJ9WFZjn_Y(C)YBI`>+~3&usAiZ*LA1#EXt!;lIa1m|)j55Hn&{mDbFWO@H;3 z{Q22eQmtLS^=~@-J*GR#_YMhR7+`rFib5d+;{+!jsjE=f1))DO<9bb>TnF9@FkI=C zV(zFBjIBg&7OnJ{<1>tkACgi?IdzfGzhCi~y@`|v8XT#=R|@|G`3+#oDhMkCTUh-t z#)P3H8MO&KVEw5PA`kj7s#CE%>_Ss9GKG&~k~kt+@HocakD{e+R)j3+Xj8KhHX}cH z-Ks@#FMuuXts?uigtu~+&4sn@j{#UU6Y)eldN+!OkxkNYp@1o>vw@$H%j!qrni=-~ z_doh{3y~~Rc1oww*dFK>zDJoEAR8N33MoEUU$#e`MNo1|B`P?ec)nRYOz^A4?=^Hz z8(U9bIxWWNf(#zB*OJ=D=Y9a}BSoM(r|s7tS@hCwt$#f1RdrEEpVgj|k&R)@6X-!p zQHpOvUuBI538~#2-x*1QzO^am#>-Gvj#<}S3%MGHeT(4tJ^X8R&*_NI1Ja+d4S({m z6Ti=G`yge|g3--@m5{81G-rJdVYXk1sPKah_1wEp-Q&8kc;t3cCxt5Pz}0e{-r7=N z{ha5$*5Nn#tR0d??W`8rsp?x#b z5SWjDl9&^YAPo557ePp9AN%0j3j0RiH|6v-lKMxZW;72kU+&+;zEvTF_Zp`n*f=*y zsXV8MAcTCJFtU`eGcb*!akZd|6?)mNtt?CJXoQedHMwE=G+SZEoEZ;M;wO@B^N=N!NQBctfJhM#j0Wo%)b9s zu9vZ6j?g?OmrCfHCcIY0)HKZ$e|HE=7h0s7@Gc6Uk&5jo$83pLDR1!oErEQ>(lIA6 z6=`nVtijFNG{YhGOM{SOOyx*HbUl6T{3pw_f_*K3%chtwMrIaMg88YKkbUVy7~uPt z9R9RVMi^2ko*hew13-EW1(v)itZYdGifuM5V+T|i40@kS@&T3Myq2OTbO5Lr~Ppc27 zl9N_I^V7wSz28o#*kAB|e_vc^`f;aMhBOs0XKD2Ht8R3o)A*h%g&3blo@r%zq2!C4 zeqj;_?-@yC0c2%%UMD}^X)a+!W@%F!Zp9=OvnVxIOg~zr%kihXF8EQ=7;?iQx3_I* zS9@Na>jUU~bB4wKo&FBBuhIBkv~9O?-}=uu957E+n&V~Bsi58U^aB#L3g;0{eP=z_ z;5K*XtMy(^-qlGcDhYJRXToWOriMpzx98r_!SbdZ@#1NOY$~ZzXms9)8W7DOwj0PL zg@S{>UEA$bW{fxxy3I8IThW=0P79;?!ANZSk$S2}7K_M1`sG;PMY22UQ^FVSkti%1 z?NHb)5mgSVufWmm?^>Qgww=PNtl7^;4ZT;tn(Dr;x*g*^eU19Qv}>X}HV76F7sPaw9r{1VLJd;JNku&VowFBe@0gZ=oVNrTlwo7h zH0>98d3iWuezfJyNkq-i^^z6TR=_-N45B|HfqxM!VQoi+Y~^<7ji#Oe+=Mas zBX1TzG)A2TzNc_)k3v_WZ2e|y9LArdzicpJnJ7*q&!eq#oF$DhRj4<@Eix`d)D7W9 zptiT!!53B9*}g&-96X{-;Hayw)#!)oj8yc2?e_vwQTVII(iJVQI3O$g)M3vaE>2e=L2?Jp-jcki4$UM6-yRcxqQmhC zwYU9f@S9mZ<@b5f$jg#HFc;NLbWSgmyrJJ{%j56m;G|spX|sw2Z2_2lw0cC=e19~i z72CMBM9!s-e-kdg`m9z#9ND_rI_sG9!BB`v*m-`+`k-Zot5Vg%;xg#;Km0IqxPMX+ z?)O-3CDPwy0p5S-6B`Z}boEozKfBS$B&uMre-rLZ)t7M9UC@TRm@*{e_X7VgUw91Y zUs=%Sr@u6e^a5qO@{{_&rF?%7x`jiG2zRMuz`!PZQx%X2Y7k4AV}(>ytM<*%Np;Fi zGOX86s2D^`7|5V6Dpv?}Z>3?3D#8l^F@^!P8hP&pK71^ro?MZ)(1+$PuWjY3_;}04 zc@r-ebVn~QaS42%Bh@VCLcfMV4bqSmnW4NT+Tl|p!uZ$4-4l#p$xj(k;+c$`t+v6U zWeqY2V9@^D?RL~RWS_A|J=YmPOd#+1h=oPnGPE~_>rO7oBm=V*!YIS*$vn_tOZY(0 z%RV@FTuicHs`K-tN+CkK?Z-r<79a`V$!;6AE?2A2xnj9kCZea?4Cp327`fyr3)TsN-iR-6#^_Kl(BOa84}Mcq%+d2!&1=K zN0q5^)g75<*(YCNdA;HEn-YE|PR7T`PoHbho9fT!Rso2k&ruD2)lz()>x2QwHTXt0 zHCG@wYZbynukow=WJ%h|4-1(5>RT)4$21;mnb-%(pF#PyB+QHHy5@D3laJx9%OTd+Z&&m{|J1x5H?Rf?FCaZ5 z=y*NcR--?DOkV3g0Z4rc%%qh@w7y<;w({h^Ld?FNp$n%%*CSPJS^2ZzE&?J9fx&JtuRW;?0kbxFLLN zJmClCFb7`sg_#dmL?f@?5HikifM)6|zZI@6&)(2?^N@0{Ix3OWb=TV&RRQWY+eCee z!C`eez8I>zB`3fKZ!Z2%4sJ4Dq{z3J+zsxVKlQP5eePQM?zB0e}we(7wbJ~)|nLT{^JewaAwJ{YZvU>b0rjhE; zH~X@Yg222sd|5io#ijy2gNX%<0bwl-D}jQqHOlZv~pWZk-Ko)(*f&WT*lBgR{NZl znHEgp@+Mk@A6Bxyk8&7SpWIGvosJg`t zvlp%(vZSmVlU~w584=4%<1>VQl{hFsiUP;{vhroK^=C;k-V*ci&AN6@GLGhEpxAPq zEr74U4_r<1D-zdcde;z$F@^B^1X`Rd2)#Q&nsI8*S=5S;Ex^LjywU3`ftAE4 zA)Vk5{iUC2+jr?uGv4dvB*40{Z}L;xFV7A7u z3Sv{jjOX`dZ#;{!sG~J+5$;tNZu=10sMvt~GT}nnfWUr)1G-M~C)2W6{VU%~9GQM- ziuOWxABOxji=gca{A9i}ZkVGMs^444;g^}v3^6*%Q_>t*{=ot^$KNf#F*dYL_AOy9tbTy$VjHPH{vektX*{p>2vdsUv{ z@p&ER*yKUa#bd6Z!C8nF^|KJ;%Gm9bggc$)Z28Y7b3mG4Dle$)J9^G3wsYlkb!q8S z6AFb$?+Y{c^}C*df-HerJrq@ zoR^y@cZ<}?BEEQH++$C*?yL!%;M+iLT3~E#u`Z!!w}G6y2K=2f|2$BafRKt;(Q86w zfK%w9Kr_0KO**KsHNIn|@iHLsj>O5JYn+9co*;6W`oo1BQ#%hq;4jI5eY$l{TqiA{ z=_OZ}%O|`ZtRULq)7+g$sgLLavKzOFTbtF|=_}ulxnW|(^8X4X#g%QpQnjXkqMv3? z!YAGoRAXg_mp;<@OTKbYgzYA z?4%WoU+sMg{&ITeY|U+rtLt&9%T?bKKE6%@Y3Uyd>QxEQz8~6@c zVtb<$$xIX9%+s^RR7Vb*DmDL}a-Nuul1uW;&9yXWuAP#p5x_?89d>CwA8@83#koAv z(3PJIyO`9`oIYmg281V$`pS?n%3|8En6_;Jz4^?Of)qW3Q%S0CR%J*8{p%dKXsp%h zUe1b9jE{$@%Tw}f^KMkqY-L0kN}QVW6vYwkq&QP$e<@2fKYgy0B`BCZW#kY`pBrD9 z#@w)9u4kO8Bdk^>|8e)}L58yF^C^8L?`yI|>QiDTJ7RJMiLEXXXYQvd3hk1)7Bz6H zczPid2|xowS4Op6RRrPqETFZ;?S#R$@LjLu=Q-l!UCt3_i7MB81cbh`h4RYco`+0= zb&;ObKMC%IYvUOOh*Nr!LV34OJsPlMonoPkWq)OOY&^jd2PfK{BlZr*q;B8 z(l}-!7?^~AVcFuTv?^255&)PelDDD=jWpkyqx$g zWm-oS1Cytv-KAyek{~FAG7cJ!a8v12i2b)lU3bir^0gMxOoh$9zq$%jGHyuI8W@|rZ_9?o zHGQ$kL#gRV^W=0S>D(xGhLmhm5poEUSWu_zbVo{S6?IdD#eO^dsq2!dsgm<$Q>3vJ zRWL}eZ%|DlRg_ap(jAeTWR`Q!AmK~u{oAS4c37>($B6CWn_=o!7PwOCL@XyoNP$~o z4i2-4=Sjh{nY!1|oQNwbtr&OdG|u_pXmXo#e4{q6-R!$K)lOy@lJbM9q2VRQ{C9(^ z0ilU-W7|UsQ#%AySpbI-M z2kdu-kSg;lkTS+>t%b7CQL+67ViK?NKb2Y@EH^AtX{Ju&(|3yQ!mCZjc??ReWPxoD z5#`W_mv}k)p?XK26`{urKLzQ>;wx{QPaYSN;9;Ww5x>q?K0Dj3G zpqse$pe|y=TQXmCdnQ2H7m&&b||?Gy6PKVn)t9TzMVBPz$*E9D!9UUUXS>Qg@-P^Kk=Hx z!`y64>|0Ln%*3!h&3x518k~G59$T2Rt0<&9pA1 z`NPpwP?@|MToQs}eowx5Z)oe|s_Vf~U$RSPwsKXQjTGF^4+R|4n^IPM7uDP)3!}^a z<-;dxtiEeHaa4n1O1>mN#!8PULFJ0|qUFm<()rPO4dTsUY3YJyx&mTHNKaA8LGF05 zSr7ucVCzcyyAa1h!6-R-dDIwD=ccz~%AfUq5j{|0I|?=@FfU?>?6&k89tJ8{D^DL_ zriSuuxz0-F@+Yfli7h_7&6xLQOsNk6lyA@@Gn zZb%cWd^D#8$VSnm7nhz<-13Q+Ypnk?iXXOlA|eFc$n^l(d8NrRfeOoB$f)QN+O~)xT;{ot{x0rkE?F zGap)QyuDGQ4}P#5!DEA?VHCpO7Uimg_y zgMQQL4fw4~Mv}C6n_aRI0xkXag%A4Ba&R_q`y)xwHjp>-(ei1i-^IOp6aAZoPOhc) zXX%vXlB43xN%r|+o3gJ8<+kk(9`lBcbiyUFulD=${I}{Pc4@aN4KCNmPln|ZHR%kA ztTji1avNu?p|CS*ZHeP;4NZV2Lveq>b#@LX>SIT7g@@zenN)#7IWXn&&?$BmbgI5V zuz7dW(jjWVlqE^We?LRcm$?2!;{?#I6N}?5+hV)0QnTt! zT=qa)8R&|wz5uq!{p{A_kFhhUtb(z0C;`_)yQZW5qYX0?hNhvbaY_Of8-kSaJIq+$frn3qS;3V(*(mE2I*tRsg;|f9eZT$y z2eW&=9`}c9mO}UG&+6_}-YUMb8kVKh(8~ruXy|C@xcmGYk4J$vI_-L$cQQjf7rjEO zxsA4>esD4hbD{__ZW&!ERYvhk)_ekY@yB^7%_xiP^S-1RXhgnqn)+wVP~M296H zAw1QHD?5({m*9}=W6at{j9M`f0Qd&;~ zoe$@BTUfsmCDB?ATwyg9I$-f^D?;$~*QuA@bASmN<>^?1gki_K!$^$=p6G~(lvd&U z9h_FT?4rT zqqSRW$a!+kPgx%gPeYAuPxmHUMJ!YvkE3f4E)>{A5=ctLqlWs(h4v@0cNZ8zh7N0$CkNtuZA3^@7(@+6S=_#^WuGY z%?m<+Hn+cm-67>ICY`~)*WpBCs%Y~OtY~eYfLjT_K^PKBqXV#Fe2CR_c#A$q|Id+KuD`&YY0q?a~^H_ivWp zbjF0&8(sdIsc~QT%T@x%qN)4CJp#${5nGb(&FmP3Mi__z%cIwVoBaI&?C@{!<4~eY z6`#a}k2qC3B;fH76|-x^L!iuj)v<44B05l}*Yz3f*HZo}ucj1lQLGA2=N(fwOH36U zQ}atrQOm70%eO*7ZVXI2qK2ZFZNn#$t4#Eia;--wyUa&75(yf2hw1_&Wrniaq=qNuXK?LhxPlPI;h{I#Lr+_b#P0u3u`L%bp zKM-VTW_XU~W3yWD^Xu~+>--c(RHJqt8VrxwJoDZC%oaEdo$4<#gmOrWHM!b zWm4uqx~s+GOgg5`yV#fyN4OO9jvWb2%PIQyh$*ntXGWB3JzWBGZPWK;b1t%xu9fqP z&~2ORq5&eoG4<%^rxwTto$&l80qD-b?8F2=7n;92287I9u|iGw(NV90ckbQhnmg2$ zr5*zwfk@j3B)a(xJpgy2bu~Wbvb8)&6+1Pw6D5E)rNAu}M0a@5Ajm()B>g>;iXhK` zAtgAs)$xap&@S&Vb0E!jp#d8^5NBBud(+5c;S98)1uzdkgGxhnnLAd?YcL5OW^n(m z{H!H}Z);|zJTt(fQSJ!p2nir(rx!D9R*Y3uq3LI|LXadQ=)~#wvYk;$_P3=?LP@hx zB=DT^UW1d1Xjl2M+ni611o?dGy=FiO5a*GT7m7bN@=+~3rtvU9br-<{`ZHpX>GsKn zoSVJ7DWExe6Ho6k%lvLXod@Z_i335Z1IP@3AH+IYmV=#@;U<=s#_0a1(m;-{Zoduu zO;I!vLM(#CrY<&%C5CBUE;xnjby(}wU#_PLv3$Q}R?4MPMJT@DdW%U;&ta1y=R-VA zr3X$G!u-TdsmRIJ9F`De1GS_BwBE=e!Ne)zxadhrEQpGyZJA zfcju<#&Q49I|m?R7>9s!AVUEsmTjKV^$jn8^FXPQefPdQ)U3H8$*o8qI$P3jz`|~v z^Ee~vC^{x@+-md2-s_-}<<K!vJEw)@`P3l@Ich z=|8TWwy`(=WK=^M+-^Sm=@m^{(fc2@S_?V=Zm_4saGl<s3gIKvsFCx{Lg+W+_P4xAq8Up5TU21 z_dnhMG#cE?!vb&tsz6tfBv_*VkpSk3cZsF(-y*Q5xVcoPOYnaxJ|N6P# zf&i7iRV{RMdS7QDWh?-=R@%QSMYR6JID_>ofF2;zrA;pBqOuvc`Z2hFC;y)$$wMWY ze~osE5&jsNIWX%$rsj24o)9l~Bsr=rct@=-orM^_?G=E31`lC!C^mljO>lPRF$x%s z({Ms^Sw^Tmmi0~+?D0C3-k8$RMw)06@|ak-OiSlIU6~>fCLn7u%4k^1M06AL0@xP7 z6-W+i0tT+_to=K?G6Z_UcW&9e_R*)5scJ~dV-AL>d5J9{<8i+jI@W630X&pn57Q)sq^(JkTF-$=zVovK8Df)p}ABO+H~>nJk@a3M@t5QP$12puiv% zifBo|vf@WL3B)V)?2G zhHHvO!X1@Zs)h1l*P5 z_|f)N%eG**7wmOO*o27kEDb9GRm^%nnByb{#BOsvlN6);U?q6b4rOk8wZ!rvIU^PCz?1E_Y0s6 zdr5azUWeR|Vl#me8uTn1S`t`?A5+LD511+abK7n#A=;) zbgQ3NlrawiR9U8PdFy8No&HzATol}uU)CUiPG$kkQwfJzMF>e`B?&$RQ$=MY@3lZ4 z9HOkiMpDX9(JS4gGXdxtMXp2^+Fflb+5V)`l}G7#YCh8#W21h^oOC#PZ4zigFDd45 z5{U$rEtWu7Z8KjTs=~6DAJW1ysj8LzisZ}3NK~`FZ1LMX@duS?kN)xGHA@@_5;%go z__l?)?Y|h!78awe?wbXG&rDm-4n@e3DCSHme`kCKd_ha>bx{1ELppB^00w2a&AM3A z^_{jFql^Kh8jxrL=)w0!PaH*a_ZCv*N*rn2yB*vCG}Nw%u`G=n@lfg2#_g>6t96z2 z5kd`?|KW@AAK*&5&IwibebAuem6Y);X6Ud5l+nq^UfI4<1)YWgNP!A&VX4J362!~vTHE?#~tV;Km-a8&?a@%FhfP1fw3d5JC;5rivJ+K1v>1LSMm_}@q zod}kOnF4}ue&6%b8%?jR&N)*V1KtIR*!pAh7b1BV<8XX;i@!heYSYYc*o*7#g9zud z8po$g7;lD`*s;KZk?4Qs12-|{eV;zmyWPKLb%G3CwLd-NcjTuSeo7wZ-Oi{|$Ydd8 zH$uIBLD^?144f!_63D3FJz4^CqQW0xJd~{nQ4F_=MM$(=lCL>C6PJj!jsuv8FhWKM#z96-z)VtNwSl!eE0%I{B+ZILdn9pNB)p9 zM&Q0h5l^}53)#GJM1Q|{-0vJ3C&b55OAW4b9lFTx;0@oqO=az=iI*G{72w#ObjO7h zch|1<`k&N0m^|E{)h|Ied96C%K_j^mUgDb(KrYOp#Iu#XwnKHvbP3DlQPWD9j&5eB z)>ftK@#^!}8``|q1-q#jjwkd1#k5ojby_}K%8wh${b5)TyEvf}e~d&leAHyl(Mwsg z?ME0uS>YpGGl0?_G^>RO^v0P1&b}KOXYh;CI`2t2ho!Ru-mXc8lOmAIY5HUDt!VH5 zWV!t9I25Ri-LAfOKj;w|*9N55nCu)ljce3{vkk5xu{xD+>k*7S_KZDRi@#`!p%) zwItmsEPdUssHHMeX8jhEQqCH!8w&hDptc-4D`+JQgR4!f2WEKw)}nCj_zxLd<4})F zuW_iX7qL|K+h;X&^3K9i5Aj+&DdV3&(Dh$3mZ?%*^-{yurhDyR#l}J|lz=at{sIKn z;{}S{nQlwETf)$|n8f=J!vfXX%ZUz&NVm@mjR_zAKE=?y#Z>g9BT6I?GxY2gv@#?8 z?KWlZUc+=Q%b@lOgRmTgT=!bl_duG@$#4R1zjxbZRH*OE!5C|?XFq$n=eQ>(X5DGE z=m!h@*Xs@rC)6p^lV(w{L<6SlWCHjsk5c8>4UDA1$l-{4wGm&KI@l1=jkHQ-RHV;F@B<(2Vzap4jLN?N-TN_cPyc-niA; zxlJjb+{&nOko9lC?&4-+@MBMvXtfM1>6gb!guwN(qMa~&+p=xEkW1JtMr@veWCQ2yGsgTB?6b=E z4zh<)GQ8%)I=%Jir9WDV;n_DX-FVu#ZVL|5c9kmz$DZ1!P1CJqGj+CllWhN`0Sd3- z@3kj0euaNzr&_Fh_N%be00Dk5#=-`a+|{!8U$kQ@Zk)=R`)y>Q$#%V_74{d-SI=wV zwX{<4Tk&c<*cbl@{HuqBnrF;n1jAkSr;?sHav7fZ#Dx)f!HE%7Py!>6!;KI24cKXj zw{XScXujUAUODZUpaTlKmKhBJsyzz1m0qe6IaYd6^lvf0ZA4k8RjZag2{VHvP^sI0 zoE|>lV0aEHJF7&+MglzeG|gsjsIQ4r+jLv;tn6fD{{khc6V_3PzGELv_p_4-2pHVX z^7z3=Dh0X%_GOrqt8(;l}08lptGlo#Ogg0t9$Ck6f-UrkaDmi_lWKXAL zDWdVBthISj|1rF3z=P)FwuHU82iW+=Ff70&$K39GE$y@d8bUX!nsE7isSf8C0{g`e zYljlkJ3|zPn=o4n6S3u&W0M;yR(kb&05;a9xSyFZluU16`tDk=_37c5wIOkc|F|VU zzOPN@<>6j#@t+dk!VeDq?EgRtO*<1y(l$tk*v!8~u7Q}b4ojF(>wUENXK3rb*#;X~f3eYKFfRDdNM6<{Y`#`J z2%mts-TKh9{jDu|CC+lU*%!}83ve`|UH_ zrim-|w>jL|Mr5u>LmXe8p@1rO*m5%jdC*0(+`;>h-;Gf(1G=w#xS9DPNKQQeoqC^R zJY}ZA$hJ4_GP*u&Q(P_zbP-7@^lUTa*D}& zDq*I#{i0!82{_*#8ZhCREpd!3Hw>eGJY!T`wP`B*@;{S#M%Yku!5z>3X>7v{c0dkd zirORqg$%IV_o6vmK#sE7(pi21h_CMN%Qq~5^LxJ@5U8!I$u(+d>cxn+D9MfJghM3$ z4cnSTKbwT{BXGj?Jeg$TzS9kjYJ?Mz1|oZ!1wZJX#>Ig0Qh)r|eIUp6nfV=-;O?Od zQh!+o)L0{bl9m($-k+o$_|k=?+`(r1Y?DK#*;{peSeOr#AxJ!$&b z_pXuKJ050JHUEK-_Pn=tc=cSvQ ztVdiAa~E!SWHpf%!KJ&N9JOb06*0J58dIO&m`CmvhLVqptjx*8WSgVTboMQFH z;wW>4|3oVS=Zmj?{&j<^Fq-2(Z-pNY3)16vN*$%^RMrcAEv_OTYuaZ%=7>j>8@OpZ zY-;-)z;$zacc^pX!-^Trw8PR27 z)h;VW1Z|)7Iic|~84F~&Tk^L$9R%GaC_dKNm1QrWMd&X@k!^{j;dIUe(QJ)O&M@!( z$o1lq-oW4g0A+yEd&q^YjOZku=MAf4)FR5|P&k^#)nh~J3k;|*3UljB=gbzrb(=7V zV2?9E$n=SwHXge@QoM&dS7o$fSoDhFIfrW$Dar+=^PB(LPsawr=Z|l8 zds(iQD%#vP9fsetfd1|x`s2`1jZg!kd2Q4k1~GhQKJ3OuJw{)4q)H>TZ>-mC!KX^T z2v91UfIQ5}&^YXDiMT+Y`~NTy36KM(DtA=(*{_EepSOYmzxzuNruzFlY=-XX(x`vE z)^X=!>GE&)PIe-+=+-fQMed{evg;LHIv$<)z)h$G)<}?9j@0WN;wO%Okk@H!wSylk z_k-4>8dyLN*4>B<#%ty<6YtR^s!3Lg~QLt zJg+yH%(MXK8QJ64uj8;LAc-kvnP)HSt&WoXDdb^eb6;Yfk7XVPF(PM}MekL{S}Z@< zO#I(j06)1PIntzyTBP;eae8)Mht)5onlYXL8cVQ)4QjIf#|a&T_l0>^KZ#*Sh;^GN z?;a?5xbI)CErA6;?L90dd-Y$^5jR)ztfmHfHVNH)d{R5%@W5`?i54<_;%(Mjx}3Aj zH(;ChvTdv1r0c%=wzIR(-u%N%?QUJY1Gx#(G^D)^>-czYq(!#e>UfzReaO{%QHC5| z(sps&>iAD1D#Fv5gVyHll{d?bv*SCe>t1e?^ToXVeU!W68j8bfkNN$qq`8AB?Wi^$ zbbG+CwXy@kSY9Ou{8&&?zm#IEY34>cXSCITtTY*3gvDb|&GmhRy}&dLxV3o3E9MPi z#JG}FHVUv}V3*(ba)~x|jE`soEU{UipyxWmRSsz$%<>?*_E`IAV%@4pC z{!ywxAwI)pNBFD!IH+g=N85i`>Ei->YN}AYO2B#&CcVl1t?Fc+moB%-GI?{(Ye#B# z&3mQ8jmN4P(8b+wIj3JH^>gj5_3DQ{^v5t>x)I2m4cod3fuevZctv7pnNBkpRxxk~xCcj#+iQ1$vbjEx0W0`EzQ)OXF z_Q>dNXu$`gi>jZ;`X(7L*%f*%W;vgK7=XjNkMUhC$yc&z;fwmJVyfR_bKf)n?f60% z?6B#HyxNu85jOl~A*k%D-Mxv24IWQiMby_bc_l_z2jz#5wOpTb?*`i4rqeq9E|&Sd z>c4F!)~V5|;K}5PZAklxo8U#dk)x@#S4$zQzTt&#S=xJU3Kl9CsB5J$_R+PRNh{{{Tzr3q58c7Gwb<&5#a64{zeTG?H(^3$>s%YA05G# zl{y}EBh75I0YH<-XTKu|txnPaVECFiTz0Bx~ z`=Iepc}(i+e{tuaG`Vl|hJ~Q|`GWOhsLjup$PWZK@z965tk>7%y?o|GfVB=wxmlu6 z*e|{j-bIvKap0U1yjsrRJ72qc%%lduu{;?)h`zx;L*c+wF}doisM*{C=kI~>kPG}2 zUMIfx!T`bt-^-N5=vUIT5^N5&m5n@y92u@z#70QAw~W;Y{z-)HtF7+=UlVz5BOC99yEXa^!;Tywp;@XD6G52=KAy1P(^Skk>Pi#97e05$ymb%r z#}42=(f%}s@w*^O&oyl8auX+CzsxqFNPyfP;(fI#e#8`mLv3Orj6`yaYmSIN3Vuj- z3Ln;YpXyzt;e2?&)o7qZ)RD@MDHQ3iwXOskP-u3>&pbwZ9Ww2sD@@Ctsb>VWbs-R5w*x*^*&*?3g$Fvp9#-;^T1(rYyOn^pJFWAyFy zA{eGMH1TI@#rI_R_?7XMxWWNCL)o!CP^F8!LvT0$kR6CQAGf_o4#2_H>D?#zBy{-w z;h35tI}TYkFMW}%B~gf!p`b~|gHA>v(De)E%8CJtW>{s90cGw9MQT?W(W>|e4MvN` zMYWp8!hm86m-|fnWTHqMgMx1UVlGQG|03*2-}vM#+%p;PhFGTe-ZQ8{bJzOUW{yA#k3y4 z&*XRq%_LemyOSljjO!rz$6Kagt>TC9&(u?TPVyXf7wyZi#YR#a&&|`}Lo}JqWpYIsfJIHdnan?K<{BzYb4lB8sbtNNmCr|wqN&VAt~jRuM(5wRjaUwz8c@$UwY&wp z+=Cnq6zSI!oDY%@taFx=Y@f4{=Y-e;z*(L|SSLSQWJSgh*oW#-$%U<(aj%P?G5}RT$Wm zX{Gg&I@7=L2Ven+gJ}Y2z;{!>ouXey*z)X!FH(B)KFF)9gp!aQ0K>u-spc-QZZ^&g zShV%Mome;u?`xgUb-K2%ovF1KesUCcqFUIKeLOYnq0Q{hcmhURY;py+EF{NVm>J!F z0sqp35M7|;A)sv&w6)#pl*kL+HMWsp6SS<>sB!NZ!#4{+_p7y&AVjJV%8zL0f0xiG zNot1O!ZAZvWN0403pdjw_OHm!>Bb{7YOwaN-6D@UC;;$_@%K?Ax-__0W<}rv%+eDH zrxn4s72!09boeJpGjW~Y;!=agJ$x^A9e;xP>`g)J*MD=o^=Hcegw$2VEK;H}Zos2n zJFJ4;;y@=?+~X|@A$cOmo*khpuVig0dyJZn$OiZxPcobCOFv%q2RfhVp0?^Jh<#j+ z5{OOoUET@IR$U@98;+TOcdmPO;~=2h^_9JDdREKlmmFQ&7aea+qO+HDT*~+sYouD# z>5~*Qsm5?jiy=px@Pm~{5KMOpR0yLneaGFt_SC)~qs*>SpTlq;0G}O}!K2UOIwk-e zRay>9<*lWtuYM!N;-%&$tHpL`sl(W`G_kfOsSBQSCI3doIceh4=@J2fog&0+Q#7_% z=q!^qM9`vq4R`4)#|9rQi7o!PCY)x;EJ_IxH*S*{gMkLI0jmg#O{@N|e(ZyH0ex$Kb742Fo5r2H`a`J8vFRYmd#k z{kg0hR`7pc8!{EgBgc#pFLTVZTpwfU|F@Vt?0ghM^le*$q5@R%2BWEEL1Nzag;#^6 z@WKe5-z4OOgg$DdoOK!blq}{xJD_YK!1V*GZ?d{4R(TugZy2rNpA^}#$A`>{j7Mkf zvInL$@E{LxbE5CgByO=ZThM5G{TS+yA2QIL+a4RqnnpI|NZhFWc&L~k-taf@QOS4o z=VtevXx%+eE1M9aL2-ZtuWL=Z9i_-ROx#BXi|q@;{liK6KInDE>ff}J1{JHCQ5o=e zuRw#x8k&QvmAR%Gd{l@iNcn`XMYaMcM6q{`-TJwNQw0Mevb(ZV0#o=xn1kfE5g95@ zDr@FNBd+cve4(16m~)r{q0JO|(&zHizDBe0hN%IPx!7n2)B2qJEUr2`gr)aI<@Yni z`X60=w}+R~s|Ps1b_=5&SlWRIh-xZFWitGRK^T`nr@1lkUYU%4c`k;axe{CyRZ*GG z(~TN$2sZ3Q_^HuhC9(^ORgMuhiI9ygacRWcnMLe&J7WUjy9S(KxZVmsLRWvh&L8&; zRW~y&8dXrmM%ICcIkgMBXCfE|TO&e%^GH8NhVt$3>p1k12*>{zgtSPwN2Mnn>~?%_ zz0$8x!v9DwQJ9xOL6<|9$Em;UsJKsdtkF~eAmgV)*IV#Q>=@>8m>VySki!$kSz_P5+)(mE*z<}Co$R9D!_lF%zIe9i}m(TGDhFl zGquF_%=XWy4jv~AE{i*wdD4YoH_LM$wYM>c7k?uZteF3t`mezSqwp%UE3rLDaH{(r z1ariIRZ1&~ijAQ2Z^>rSGvy%mR%dKxlaM#8;o9Uf8XRExW2HWiVj_uspu(H6MOUGx zPl0!hVON~oyAY!;58U!%p*lOcEZ@^%C4a|N&yG0t#^22M81i$Xm+kDpg z5E9;Y8TfejJOU1~r$5U{1Yr11e-n5N*O+W7QmvHsYh`}o7)s57_0oQfKH@Xi2^uz{HVj!R*eCp$U6r^# zyI(I8}`0izUyyEp(2%bfjTHF z`p5h3CiI~J2%uNt8ysg0IFatuhm5loWoLMuq-Di!!4j?M%7PsD+Z!mJCs3FC_fM|A zMzKJlW`#B{SdZ`W{d9^PGwg!ql&5c~;Telec-_{!Bs94QH0MNa%_m1GH%DS921>Tc zUOY>X{*NXdGuTgfnKu2l{~nYL%UzkiL(BJIeYpo#!wSb_KkO=2x|})3vlEX&gw)8w z!b@LL4~m)^^j7>s?p`(jopXGhtoB-}OW@+B46{}jNMqj_vAmt&=rxgi@ zu)mhU3fs-F!)*GW+ta;wQnU9=Wfu*}jwrJ9PX^U1XaoyMk+`G0B5wdo0VqVHbl|K# z6>>#s*NkfciYv<>vCI5l(3S~2IT*R;MiQYkqS9^!e-!I_slbZEJ#D=dk#!iK?aoTA zP9s<;(9ku$U#cnRM&;p6+~|pVCb4)pO+j!5{YN{SaoS7 zUKR*9aI@P=qY8nvUr(Cf?N-iKZJimgpSE)ReZ9)BYgzGC$LQ?WxV|iFUfyreJm_2p zR%Lt6XS9$I{#o9Hh~-4`4LO(@I=P#hW>vk!0olKSR*wR`k2}c7eg8E$!J5+zAb7+y zAx>jla;`L!PC@u+H+J5K)!{LjPek3TCLa;eE?bHOlmQ4pwJztOw-9{G!U#!U8dyhG zhH*%^t54B4%+`-2PIx%YnmI!{(jBy}0Iw;&ICAVMCKay~-s3Uzr5Zc+BnQ9N*-k)9+wEP%`@S%pnf%6@FQBrb^ z@$^KW)MQK>Xf3bGTvB_^^4Dn_<>BYh-GS*VvU=S8M7q=tvFb8aw4u{Xg0VPt{KLBd zTn09F#)`Q8(9MMCBaD9)*}{wl(AlAZ5t@2l#Hd85h_VlJ(gx+GH>+Ly0jpQa9}ev7>Z@GOC%sBeN<#fRyhO)0Av*cAr}n3ANt0R`JPvq1q6w!y}XJ0b-K+#o2N8 zu-80U=NY-41|KdRmaZH(KbG&-TB}L5aoy6u&a*4DeJQFD`hSL16U2s5R8QBcC8p$P z3yc3hSFd|+-FQ4OU`qss5kXSfy1EgvnVHdcG>fmN&Br&NHLHJZa2*Owb-~UapT)oNq-Yh&m@#Zg z5<`Kj_$w+JE-&Pyc^bZ+_0hf_fVR{ASmt)Mvbwn~d_583wORZc?G7N%;x2wM^a{V8 zRrZ3tb$yaGqT6G8mf^IL6;xaQb;WEqe+wW4B>}2|EKs%V8Q9ARiGPxyLFXzvQe&6z z<8dbQVz6yiB=_VzR<{0e3Pt@m#rPW{f(y~_hLZt>ya6xHv)&~(F}lU;!>Axn3ky?w zq@`CeL!`1&WU9z!?R>3))XF<6YWB>en#lt!+CPRxz-Zna=+SsY=ekB|ZR@AihAjL9 za~N%3Y3_=?uWh+LLzsV3kVwWh$2UQKO0Z>EKd*tTjz{Tp-a>!?wTujJ*&ZCfxbfta z%C>%1qQtA}acu0#L=kwj`)vc)#SZeYPmd1qzdWyyL;UO-mqtPo*GAf)o9w&s~@a{7X&cy9b^B9S~g+rOe^-o_Uzy|Qu%4;yy3ax z#5o;Op5gV6z$K5vUXy)D>9GI0=8D zsN8Hw@1g1tb~R$Ui_4fHq<1S%K3~#VYm82UAJ;3b)NA#E;rcveEftR4CrjU-plq-y z(X8S)WJmickW%-(5(03UVg|8qxw3!mfxYYDmFR(E*d=`f-ex}4TrZYof)Yr3tql7B zP<>y`@=9*^!+;yQ;@o44?+xn>O_CGue^pJGp#A)^2y%EC*CMtqSYA3s<|0Khn3!VP ztB1dy6@HnI5^=&xHHa(gl*&lX)D(_AXfmdF3a$x+ZQ(f3@CnjrclHe<@AP_F^G#`% zQSw^->D??Jn@8#mKm$_SpHJ|Fk>DMS@!T{7Hp73PHLEF}a|@uEhLQ9><_`bj?gnRT z14xL+FBKO5y+-FW|_m7!fs^n>vckY{UXRbkGx_H!a0-f>4wl{qu^Rcg} z0>pDG3ds-zBNIdhOqfn~-mmuS_LgYy0Qu&DC!z!wGbLC4 zMekjWQW0%pTiLvQ*m(jK$>cj(oPP>G-H#;9pNG|;A$lcpa`zbIziM2-uQmAQd82E` zVoP9{E3|I{esY}8x}tl+dq{F`EPZMBdG1KYh4;6 z+;ARPin1P=Ksiev?p>u>gTnoBFc_^F7R6RbTy~R?uZim3x7$aixL1cNuLL5xzc;Pk zDO(Yl=v%w6R68MG?!&U!N^ieiJBv7b+#BB|EPZrK>`m4uYyMgsKpkI#Ll;13eAXk+ zvTq_T9LOfTyOSF}uza&62b%dQwnFFQWBhL@Rsbuq_|Ff&UgYs>vySg|fzJAA=KJ@h zj{Mu|J<<1G1(!FLgIA@A4+*p6N#^NdxypYFk~r+Xpg#Vul>i^$E_8yC3I!Nx8%paA z<8AdkoiRO4;oKai2)CxV^}HmM8i1VT+uV-=xtut@Jy`R3sjP3GTj>0-icp?q{3$Pp zZ!!k1nM>+0yr_M;@!bV_Y$F%i_5@d1eueF7?iVBZhoV2wRPeb>@D2WhOk$)BUc0qf zoOePt0DedYLRguwGBEAkuJ0b>TG8QiZW4lfFk!6fz0gI;%?fWB^0%&9QYa`;;Skx~l zlRww>SfaU1foIz~!vWjgq_D5zxFf8Ol#K+rWFa`8S^3sW3-diS|ON) zJtruH2)70#V5|*10ad{{npdpQrz?!3eU~EEE(i66ChDgsZ0`F|M1-yvq)9&@x%zOEytph*S(8Ks{W7dCQC~2BSLu}t`X%o{m z0sH@boaC*$xop+BvGSVSzR#%7QipA>Rv!3P+*TNI=(}(Ut8}%eG4!WrpZ6}jK-yZ~EubabG2cMk< z98ZsD>98RvO0L8_?O%BocwesJczwko#u(DC`9iw*btUro4#rr(iKbk0EZ2tT-l8|+ z)bE#ybW$pt=54N{Vga|XV?K_#DK5vYJ#wqo3 zu)~}b%s93vJBH#|3v&k&4dbHGiC}ds_Jib{#e!GpDraG13uKbu{7f1gu93_a_Yo)%oaxVn{#RHg@-usPnR*ddVs!GI?dyBvo!zK<@Q>PgUK&K+;4rA<>ueE zr(9X%X~^cloL6M{KSm^hFjC(aYx%xuCE%R0NwNO&qkyP)DWa9-ufG_p4qR- z_rJYTZ{R^`_wRg^+RAL#E1qdDP6Gcq*Ti03b`AcLEuWRTf56w`A_;FEn6^!-26RIV zpzQ3dM2_HGha%FZLs?}8EkSqR$}|7F2!p9(tdeJbB6}4GS}U!`Ei`nt9`1i`MCz1; z+erP3&T!N2z7fYEB3407JEz2K>s@rrt+%={nHs()xpKqAbKP@*{L)!CfqhO7T z;vho_stW=C!p16SCGkAZe_K4L5Uy()lqlMFc%6xa6=4l*X%WvIJv|_@r0}7Y9NwDS{KnF01Xh%6Kygou>0}j* zNRF3K%x8x|auP*MS5ePM{$7tp#}0N|G{@x7NAY(~PeVSP$fHk2E zypjTWPcQUBdi5E0>@{Czz=(L#7%h~K?C6*(amy6iW6k9=;Po{bP zu)gMPl>S4jrFcvDNts^w&K_zXJ#X>qgHb%7h?NZun$0ru;4DIt%=r9xD{CjfC`u@> zV#3{h@267xBf_iG>DExbw=C)cOB#l_Fc`EsV2vPCKHiulgvlIaF6QZp$|uJ)p8MHa z?dkQILROb<@}-5lpbVdED%Gj(Ma=KyJOPvevy@CE7Hvm&@LMr$&KvbO>ffU zjEi*l1x}Aek|^x>`^c5V4L+xi@=>Q)GX>bWxJWO!)<9-Ae6E#}_96LAVk+s+c&!u-JilX#J4_=g|(40+~rXb3i5Ibt|Euk`d z5HEg{UU$h#hXy$JQ(vHTG*x!Ko`xfC9TtFp-`Kek)hdKg8;|f8<{#8>DKEK4Fux_P z>%VPze=$$lRUjoCNldQ^R9gXhr-=r7JyiV6i@NnyKZ_$|KdpgfIpurm<{z0iAV5|- zn4>FsgQ|L9I9ecYa#I5|jqUG&LHb7Mx6Hpr$o&0ojC?K9;v#nB1MyVm6E;TDVkZd()m6d`(x%02M_n#-0 zQmz{(t}y559M5})19cs>pDrlXxWZYDgcN!>eLPdpowmup7t>mNPOQ1QlCmpKbrEq9 zx!#3#eUn5$q6{Yos!8nh2IVrD)*w3b%%@zztvP0bGnz|Q z@c;vhB;b{^AHJ8OCL#(LO-YdDL=yP-{DmG0huM0(5{hfX$ulP9-5xP~U2@r-Ve{I$ zwh==50gfxFF(^G3!_TtxCu??~tt_qIha>tO0QU`h{Bf{!@Necn>#0EaC5=uz(SZhFdV>NS!#1y;j~V z_Xrz$-iiMpT?NI*@3&aVdU~$bDLq5?MT)4BGUwU6{U-w|Gr5u5rl%*WNW@pB_Q&lqU z$jXuV7Y?O^TzEkyD#}EE(FcV|BTnS*FnDIUXZ(ZJsox?A{7WX}i;f|I#OwK&h1i48!;g+3#Yyr|?=+qQx3R+4n<{!$`f9fZAR&qtMEw}^4yi2Z=az; zOE~nR&bfZ&@}|?nBz!rbUM|pF9 zS=6;r;!4ItT1Hj9;n#1GG63acb)?`!w5-0Ld`rV(hbQ8JzAp;iUH$AVdpWOyNxb1m zEo|VRo82KT-mVsBy#XkQ0R(HnovD_;fu$JV4dYnk5nF(X7kS1|kKh_X@$*EDO~}k> zE6MW<_c8%y&+W)7pI^o2*UDw1%GD95Dt}I_0zuNWNv^R(=!LaH1d_IDbQrFVkP1={vAWS^S1cM<89j~8 z`$r6)Zq8YFV*8`0_P#Q5@7ee$3bc~}8LH;dH%|fMpyhyV;OEy3Uxq^+l`GAjy(abJx^0KHl3%GObo=n zV$(H(gm4aazw6g@K$d@Nl_JR5SGCt@VJ&i7r`WPU=EiCSVK+CK>Gi&KUY%a7bk^ho zD$=38<=^4H3OU%MapRV>lwI z$aCH`dCTAtaadYsyrugb-vVdmXs@Vs{gAcW338#7_Y^LYXxL68u|Hr&ZHdz7jwmeek5Ad_l?)6v0mCbq&4e zqmTf!*@K+MhX>&AE5*_1Pd^vPn0KLC^NQO@EBqiB>C8|dsNCsKJIQcGMq-t#qHUi? zG<`pcaOAE>%&d+chuS4@>P-849LBQr&?tQ`v!o!TvVU5dMY(g?9cEG)aFJhCmdsT~ zbWj$`&sCteLWkQPJzh{`44m#f4DAXt&k)RDaaN;`8?5%7%;Qpv7BqgZ_C6x+@$E<*Nv3W*1gqdmvP98rXFTzgd0kjN&oamB<{)qu ziWq4w|G`^?1cGN!MxM|OW-_IGAGNAD0{pR3dz7D7qnGve0?ay}X(WfRx(BWIb>z~& zkN=vd_oT=NqjD^#MdD}m$7^Vvnc#Bpyd|1c+8W5a>V!d)>iZeo7gy|~x4bts>TKvQ zy@aYHUHvA#tvfLyMI{`TVV_30a(-g7j?*`qs=4yorwr$6IPCs(!rC5@ z)LKc)B)X3Lc&YTMqwhSJgI$3zCXK3*wkt09P?7_pWjiR}`F%&*l2vQDV?+6)<8G?X zo>Y;hx~G_J1uf~(SHh9!4sdBlIHh>rp2_SHkxqcI1?qnvBdrd5yM4R>&L6#jpFaW) zrUK}Dm#5_QgUK=A_iQLou9+6Ocqz3g%Jqj}f7_0hFjb?mMfRXi<^{Z_YK^MVmw5>t zEA{n6YRe8~a>-QMbVjW7U1~A1X}h#t?QU3)wR5Imoa+=NE_)eUyPPchHJfB47dFZ< zf>YuiJ+e8@>5DLXRd*Hz2)R}Mq3Yhii@WRuTVp+S#%Mj^`49=iQka@ba_Y+YMkGaAZTxBWJyxJm`}W-eX;~aBxj{ zRQ(j#VNl*mRiDZ&Y)1DLWVyh&dvItTOcWWy*zb+*PN;-zdGm$VH z{z&8R_-ceod*ZDnLm9lCp|WP}$Zsp?#~+;sIV={iuK9C}i1QflyHxhCk=1l)=y9%l zN=E7~+tujMl;k{+b{p(qyz?!E4tM1{e(y%62*JtP->C2xNE0rptqiXYnU_AHz3_B9Mskfh)rC3~HiA{k*p7j2kbcL9qufChK5 zm*oo{p&-s5$a$4({56U+eqTS@N4Q6p)?tX6XFXHWR&hDJ z^MIQ2BH#?!Rd}o5EZW1q`$}pwXDTp|m7(z`okt-pE7Nzs)!KdDu7fg%c)>LZ370em zNfO8qo4dKjvts#;zPVSt>52PFg2xfCf_K7_(LubOu&R7krOvQ)wSBcq2?2A;{M!8T z_4Ecc+x-5G5k1wT0e*23NK8$RwBlYGh>-ermSESA#Qw68a$ScV;xs1QoN}Y=-VYGQ z?!A3zIrv$!Ut-Y4Ui&7Adl$58x9@IMI?e@OI->ZZ~H_l;#>MAkD>-?d-;=e8d`iZ*N|K0kuTbbvAZd0Pr+Hs9QbQ!(!%EE?M zj;nTbZG2dHm#9%!cUd38b%sm0H&zEMG_pH|u|s^aUT^F7c_{H)q&Ym?MyZzi<6{cL zj)c(Ha8drS&?(%C`tqrtRRP@J6!CP8w|GKBR3nt$lPPm0{S||bUw58@rxT6Qcrc1o zUtFLZlClfrJm}gpf$FviJgv^3<=jBva$xT zCXBb+_E0L+Fm$&4psBv$F4U2sG2`n=W`?Sx~NIWt(3W zfr&}HDBiHEs{guNsO*6nga@~zP=ipZva@?pG>>w{u_eeMx*(D*DB7*wR8mW*FSVxL zIRA|8cb+o1S(y?mMfNc3&Ajr7#U;XYXGwq$Y>&L(57+DPBP?69Av{Y+VZngLoJ&INaE0ji#`kM5EtjfqvPEFPW=TKwHDUy0T=PD% z99sojE^3G2;fwSl8tF4AL3fuCJ|sk`JUzdA>kX2=l~K~+>RGy+n%-8f%G`2mSbZX? z$-dicXcfEoDbPFuxWLhzmS^3rrk+XbXK5{6`{3)A$j#hxR@jP*@1&xJ?g*|Gb^oK= z!Zi($=~qXm6LjrB32Z*s;-Gqfhn?<70MPcJAYCc;%uP3M%hoW5e0-1SBw)&bqY2c7K132wNOeh;j_Y#42vAngJBqvs2IX!e*OYwn zWry4ce#LnY(a<6I8BDa28r~2=^%6xpw^;g1xBE~jYRvln9!!W$ORg94gw}1X)2y(2 zC5v@k-L*0(`gp|ds|H^TWlSM5e>62~k`hUBNLYBYNdg7KvhtjE_7?*?6%OwEI`khWlq$?V2ad6UYh#Iq3F8j_eSQL9Yg zko_Q@^CD%2V7gnOpF$unFVQvN(j%O*Qf|4XUPUy_qS*J{v!1b0gv8 z;{*jI+Y9)?#DG5Ygj@TX!Ob!vspf+vy-VZH>F;q^N#i^em03@9)Hy0bAZyBeF=A>6fuo^-kv%UKxGlcboL;qm$I*C~Gnmjn+8N z=i05#H=dWy+3Z{~)Lxewv>pvoCf&j(W%y)PQckx;#Yn6eX{^e6IU-F9i8LuMTDZpt@D%;B9B`-M+mc&4Z{hAfr! z!sP-;?gPyY2BXSXJqCc(ey}%i#($_{rrU=9Nk&_qZM0G{@4ixYpoUlMl@!2+7{Qp) z=l1b}l~HbLuA{=zK>rb@5Uwp`{3rAZ}rOt5KmjGJ20S&&P<^H9WeK;aZd*RW%_K$%sH^;Dn& z$!UJ{!Dz~1!Hekb4Jyhf8(CqzYt1abs8%R_She;cfoE#85T!$4^IdCD*zGsQ-DooZ z8)p)m73qHZPDRp#n);hw7F-mb`cZF(FH&s$?>Qh`_7R_(_B^$MI6w*_N*s)farrBu z@Y&Hx@t(Au&WA?|s^x=Ch!-$;EYp+j#XN|hbyrj6&0qg6Dsg70cf=DTue3heD8p5v z!Z%nS(p9F%D@L7BPG?n37nLc>ACsp-xDQcI%3U`)?}$n?jaJc9V|VsFMpv- zb|EZ(mT#>wl`Lt6j3iOi{t!Jwqdm;U(7lH;heMT1N2Z>k&bj_UX8jkAli{# zTp*HAopdFlds^zfV4n`lF?%k{evHO7gQURH>OmC*q9YJ{b1)B(a4gVaVbAnF|Kbz% zlrJ_nofx=4yEgTV%F>Nt<**fzR=EAHg{KIWyg%#fPm_iR7K__!tQmd0vZ`YeN4EK|7)Fg zjGi^2d)aM$O_H-DM^0A{Xce`(Wee#7TGI%4ug|xqLJwsmnS$arB7=XaRRcB4MdtYD zJ?Fx4>vp`kq@54)Nn1&cy?+graGXVl*{cc_BZ_$8AtF7{g=g4KYsD0^K9Tj54u;sm z0rYmh-~QG6!?!g%7E8ZwY!{-sJT%|jYb-Hs3I~zzS;Y3oEfYR%J>e)IKW6XVS~k?} zQTY_-SJ}IfKPBm|cG}TB-Ci3{N`r3|Xk{o^)qh&nupilJrYDqpU*C!vcg<}*-Lgkx zt$V|WXSHBTVpe(i-ci#-_Xk5f9lnXA-CaQ#LspG9W`_VOO^JnnuS^{M!`_J#3$E4G zKn?Bp^qVu*Z9prLST_=h_8PuZP3wzP%T!=c5jfSZSn5S~8|u`R>y~gn{d!NcCyXZw2>`KYJ6#(G}EZZr_K1bQaL^M(N9| zVqMD8N?=e0DjdoZJ_y%@IkMTNe_60PZ{5U?venUhYwR8bu&_ydab%FrA% z<=H&wXZ7=rBoUQa3+-z{MAq=n%(H=hB%PAX_hqaT%62EeC8##LuvTZ8yzS)!wbUAb# z99z%&Tfz6U?Xb94PS0acEHALLU0p&;AOD&7AMyZQYEBdR?+O``)M&O<*Kd^*Y~J*$ zk@dU431O2a>(coQ*P$$-YfPdzz6$HWrj6`ilvMU*})G;SfD(*pHg6=1a z8p92>E=TM8hTqa$YXmY%v;O@)174tAly}$DHyVxRBO?&=4fE0Q3W{t;-y}F!CgHGR>|8RedNGw~)3w=iJw3TcPLMv*?yM%l4(2 zcSw_807ut;XNlkfz>f0W(;x3n0Uk=mk3gjM+wcrzE1Bn9REjr4li{shFd#shwjlE` zz3<@8sY(mKGVRJ_sD(7==ak3JI?k zxK$|dnvuE3`iGO+fl`@|gh3!S_j0ouuuqaig%$~};pPoW|t)m&=I0^1SFvGD?4|@lW%arhoaOa?SWsVQCf`nIB?6z{#2v8>uHJh z@to_My|K6*HMetL$sh}B7q@t{ad@^6gmN+UbnGFk>#Q$Fy!0)*%d?{f0~#cGpf{N* z6VfhR1b?#@v3NYsTj<{GyJ`G(j?7OON;qcdn(UC=YXjhr1=MuT59ePg<+$k%Z&nHb z%&EYIP5~`lGZ&gQ{b$OIBq6BD?If;+&I@H5-AUB?=Q%~Kx5J#*X)NVJnw7~*op{YR zVw$R4Z6;WYn7yes4_^ZHMUSZl%d{~fnI4^uAzFq1QBeaBo?o-b6+14vKO$QQH4|xu zMwp#(3gA%=@Po`TquIo~s1iD`nL24rM+7#)bpySRwRLr_Gui{+|JuXt6>YSmlq#ZQ9^GQhs9#Y{2G-uydf0T34wcX0P zHOHeI|n3^N13uhr(x79{eFD{ zY5%H*+iayhz5p(&-NyXHi!p5iyoD6H zIR%6*;TUGayYdi4B2&?36H_TnPoRdwTfI5`z-zZ$+n0|#HQK=Yhe85;GT_0UfBvjp zD{9}se{*2RIyVyUp&Az=j;MHqbjo#FTNT=ZXm#TTie*wvPh09h2@bcfz%*f*)j-|} zpiQQ8UN|q|caK|)7af17ZS6{-I~~YZ-y+7A@B|uxQtG{&`H+lfxp{QuQrz={ET8m4 zvOzxS+$F)0Oe5oac>gOZHzYF9!X|h~1f=a#{b9el&)9380yK_Ag8n>-P2+O{j!*$$ zYtVU{`Wq)p2ZLg|kB(!^JD`ZIsNUmrSGU`5>ZPgB0o z;l)5<9LI3Q)yTy6U^OH2;@<`w>Bgv-q9ON89^4V>U1}|fGz+}EDPAlkfTy$|L4!+G zaH>I-P?%&>#V|rzwea3P;)LQb4W<`+QBS?HoruzS6KaJJ(2b z*i#{?|C)kCY8Hy=Hi{{?(BgY~dLBwzy;S2r$m0u25vpjYUYh;5uUR~O$$~3v!Rv#9 z^^j+=vmnR_Ll!B#B1%kh+-45z)`GI9Se@iUqJ>6fPtf|G8jC-61aL&=oj!Y!b%|DT z=kj)`)gK3s#3Hu;fDTnnG^F^Kq(cx$@m)KFKrKc*}qy_p0;kCSOc4r5BY&hel8T|B16Q%Un7iE-Wgk=g}zIn z%gqHY`9^A!>Qn*l`1w=w@>_s__FiJ1bI6eVmqfwBxcdw+t3S~Pl%Yvug>;&m3(-^T zDOS8@s#r^V-r7);+ju&wFwT`peXiay2DN6o#dU+mqx0>WTJGc0q=wYaUZ?(O;9iWX zEme(diPSQbQk7-@oI(aJ;_6!Nefom#B+4I{KL~4CA70E~4G7>IfW+t+!RhDuW!_b< z{>{o}l>1_yr}bXMr^spRsZ^cfjq7(E=h5J@4{pH@e;b9_bD8k#0jV#%{4JFlMUG#Q zB5`?B^ZwX24xT@1^t(jG`;n$`s8F0OHs}z`-52lTiTaO9uFHz5PhfoY(_#jnxR8+? zL*5ZbdQpDD&>6*NTlP?F7Uh!^GYSWh;Fs%EsCZ^!dskrpJufB{s2enVu8y7kBrf=u z%^+JzRIu3r)-S(}Mirea7%7v-A*&D_iOVi@Q6ud7XmPZ}Rdg@G{+Yn!arS$W;a8+r zp+UxTT*N7PlCMAs{$L|yYER0Xb}!b@usixR-e(#IAc@0193?NBJz|OdxMKp+HKO9nV1TUK;PS==`yzkw@jWM^%`sZXhbP$vs5{SI!B8QjqmP8{!0-ca zNtzMIBU2Kd%v8Ez^9e^fSiHII$v<&K9EOD12y<%}e?*`J+-~*oj%4`K)xh$rwBuU% zhRwfN(<=sU3MgagJ2z_l7(1P>gyMGfR`4mU6wWjj7;?2o8_Oq0(S!<_p8cTk>deZZ zL6rWCfA1P-lLVTIEG#?-spV07y`o6@4F8TAod`R9p}11v9mk;if_`60v9%LZ(^AC= zkr}?X?3WsQkzFsLx-0A)F_S)&Y zP@iCg&8ppY05|MQ!Q%GX^xV}ktNbQ^S<`3AA&zcr(Obw^1aV*^Vrr6b8UY2j(3M+* znYo%_E9EL;5FQz2SO5`K*Yqn2tvj7vHnx3}gp;HAPI~rEb6-^^Z~JkTUy_f7;_6}S zJQe;CSL-C%L5mzGEoeTbTbkbB3mE zU{U|L_mbYR5(W|q7?_6VEfCo)B$SFM&&CZ`VMR)*VYAtlwFcn?YuHcxx%IF+lAVCR zXe+SPLP|(gwRSYoxYiL@-lBI9-{m^WOig$90TPOw4|s`~dW}v1)tp>QA5>|YxbtxG z2{9={6s4F+;P4D}-cxaT9MZ_qv?^YfIs@vu!;4+?%l1`$DK#2c=lb9evvQ&OjSoq* zYg$^x_O&X@O6T7~BjyQee$_KmgLs}k+`Mli=wSczprAg9B9NN)O@QGz)z}}G^-d?SpPek{PRL`n+J2yuC#GdknU%+F&#QOPOfmYH zU4h}Vv{0ryWkb>4nKmyLaR9m}Bla2VeT67|-hJU%DXx7odjv(yABm{Z95sKDnzY)! z{#EKt8M1|?G4j~53U#CBt7o|2R@-3G zQ&5_+puN&tccw>TH$?#(+;K6HQAmPUv$`mvFQ) z$U6KA%3trBwB>c}nOvyO{CugkLi-JUX$5TK_OoMcb0Ff!$8^p#9?&vq4*I2# zaV;Cajv9u!SqM-36}kGr8#%x0SB_H<_w8tP9f3ngVJ!VyVi`uwxGvN`Qic$?W?S3= zcj)$02UcFXUmgzOL`S{kyS)n4ER7WnDdPPoJ9K7-&Brv!F|8HS2lH%p^M;|xO2W60 zKS`$^g+?+kF>~^nQmEHMrwVLxfEDDQQIZfX5^Wb5d^f-;FuM9x=bGD!`y3W2y| zY=%n-J!g>ZR?De3;&~P=kI5&js_qb&TR25C3cLCN`|d4?gix73+5=>7-AYnj3GT(w z@7TuhSn({b;y52|%qF^TXUz?dH}9NY#T^%=Xi!%H4TJZOyMI*G!;&LJ4L+k(Tn%bx zcty5|d=16nV^hseqDpWEWF(20PTuuPz`pgTF4?f|tZuV7#^dE;P02G8b;uSN!c8<4 zWBb!IHi`QqpX$Tzh;-Z5?kZ7nKJdfiEj0PC`ClEwZ%kZ8Ys^(0t#i>8D!UwM_(Z5^ z+ZrR@)w1j)5$AvQZZYGNP|lG`aBLu=mai*X&cB>wX$9RV5||L>*Br;>`p#3UQbIy< zI5tFh!n9f&ujP#93f4)~{PqJci{)1oc4`VkMD-ay&Djm}m7jdTuNwCm2T68W=$&NO z=1WAsa;21S5h1iOaeiJnrc%+DzLTb>J(v*W)itz}gZ{w#E5OpHw59f;k} z%ez7lt?VyaDqh_oT}H>+@4l@mxU?;NnrOOar)*|B%eh@$H4CMbo5$R)uG@zL)?vij zqP!O+lKN{+#MAQalKN)%{I8;xDBYj?A7x$>WSd=k)AQsIpOY%JGT7;vkru1yU89@? zQo~%1Vy?WC*42{s&4`{}K!1Z~u;;Zv)MP_rT!p1nJURfVsb!!cSJAnk?6Ph4OVP#= zVMED)E@w$;BZaqduio!Vl6DCmOOj%QEu6_!IqfuVeoEQR+#LNXydV5#c7S^P`~9lt zdxcD)FI*%8#&zpg7b^)uhAdcvo*GIZBcb#}H7oPUDDO5-Dj2!FPEh}tM8%@e zRGn?2R#PEw=2H>ISrVY{z@jp1HN+hgWyyd~=qD7ba1i;qa3d*B=UGwxRv8Ho?hkbn z8I9>Y9TSYmyed1wZhFV5twbkX-C2jBl8U7Y9TS!lq3TfGamHBYJj+qv{GvWwxXA)g za!RiGde-dzTGP1|32qhqLsJH%-p|Pvmo0UN9x9<&F&pn*mSOP8zKiBO+BC!mk+I!J z;*+VkiY)GTwvyxpc;E6<>@M&uFe@_pNob_rZDQ5!0JSpw-O)CEc{OWE-|nO@!A%tm zDPvjs=4!C|m7#ivnm))8J}FJsr0r!ag~`)Y}0 z+s<@oAX&tTj)`M+a%ICqW*<3=Twq(x;fY;3Fx!~Z(rrD+hGK)4eSDHtew$Lb(7%A` z7APCsd_o$U#qpvJJtoVcR-sdtN%%gTtw|uxCN)@fEu=Ilqs1KffFATNdL$8*)vZ9r z3|1*sJdZjBiRnaLFrW?SX5O)FXHXdKntrUztZ-oV(mglHYpP2fFj>6q`6{_%HI2nA zT)7oCT2DTyyrjA3ZC}?{*0t%T__2Fb9y;c2k+b%Y?TW@8UH-`Q#%l2@2 z;J;z?7U=a0@Cid>nCwo`NXa@8W762U9u3;=JveQ&ved(iC7G~biJ*>GZE$??`|B?gEOecpm#-^bwQQdXTAtnD{Liurw zhW9kRBVn9$)ywVUyuuZrt*-LN3MS^~u-Ym!!XqUIFV*P@j8?R5+WygN!)}Kg^97*B zZ+Lty3_hW*WmVa9eGVJGa?{V{$T9k)>dy&4C^Qy(*G>J|^vY=Er+gv^d_)FTvl0hd z5xMa*Y1j1Q#rgyY&KI;5-7!|jq^UJ@n* z_5!)FDMEe*Hgcz%Zgn;*6w3slAt9=>_2(_U4W-!)jbch)UElPGIg_M_7h0T zF-5~0j>UFD4yP5Na2os%od9xVFYv)v9^bw3*C3%ksWl}*1K~&y8 zjPD3cw3os&BU1}zv^w^SG^!%QT#BBe;&Sszl968m? zC~W4`9y(wWYvVSZR3QI)T&L!XTol~d9Z*GHt9KZ!fgN#19c3n)<^mtKQF&$<%e~|L zOD+KJ^hAQ!@8=z7)Q$)*inmm3x|3enPHqwl*|~qDL4r>@4>SbXm`*v|G|h&6$({<< z#qtF1R@KxBe#M6NM+g288~6r$V9_8+RBT0%-;hr#f2EifE>{B*$jP^#eW7 z{^}|4i5$S=1c+WK8Ul6y=K;4sv5B5|$PL`44;iK>Cf*$fpOt%o;JS2&LNN)`290%~ z(}WJo6a^5}2RzvE@G1VUlMnxs4Vv%$ePwi_DgQVgV)GvZ&2-YKlRg8 ziKapTaO2!bG{FA1fCA4V+s4bYbL*g2)+MV*BxVZziUOxWFNi;m-m$iK>8m5~zZ&@H zlOG0;{~pam56!2{3=~H9%?rlI(p{~d3gw4Sy$VMPR>?UtwrB7hm&pBX?-58!e-%k!1u@0ELkrJF;s z@F!3gHPQX@g7=wd67bj&O9!rw@-?4bK|mJ`H6RMgUKMbv{8KxDDCeUm{%vB7(G%Mb z6J|pNx@wpR$g(1vuj#GgX#WTT9>&HD%syKjN8s2=Tjm;VZ;AW-xP$z+Sp0ZD8jWEZ2*XRX)}nw-`djhJC8-t7{pN2GAB#S%8TB14^ZsnTVb;a z0!A)>2^qliU^#GKq&+s1nb=5u&hXXE*c>ZAC zYvzGc0yIF#C4qIXqNEhVANp^2X+W9SQ_^_r=f56({7MTC`-K+)GL|L?dCl$Tse%b2 z!WafX-S{Qd;O)Q1V#QM+vEJn+k$8sg*PPN07Q%X(PryE+PI3LvDb zDU?hf{w5qXHQ;&SgBrk^tqZo@Y8#yh*OU^8Pny3y%P5yoc0(He34ssr03}m$xUc?i zT%4dnehDgM0ZrMY+vb>r5>u(2zTk+lhGRg_2TvGJzs+`Lf)fv*Gw6rps_b?$^=QCh_rvB7IT8{kXWCJ~R^N4v@Y{{R+3CkLDa4%<@)w zd3+x6IXBuR1A^n(Oa*d29HTlk{V%`b$)S4a(9u?r759$B3J>4aG&rg>XJhB~S&Uv} zKNbPn;5CcDDIJ~@BmDQQn$xie#vPy9Q>wi#t^x=AMf2>lH3ggxzjyGAsT&2GR5=qTLLo3cqR#rd10vEkqn zpRW}^&;!1;7%t*?ND**=@ZvokJFJ)O)wc03^C)d<+p9-wc>-wJjM z{;M2cYiO?$G^ z08l+jb$R(vC7Y%$)gf|v64fVD3SGE}#RIh@R>?af0mCd#9Y=jB539~wY6c`Z)g7Qr#ZKZY zIF9`b1DUvySdm?9*oceqFm-oqS?ThBSS5SRh!@i!JXADf?^Wjhw?zc^C4gBLQ8Z_K zdFfEg_FqTC2h94$zwK?i|Bz#7Zjawh?hg;@39GZ0x1Xd44r2#kvX=}{jmIz~#=j;3 ze5DJjz>2?esZEeSFxTvLqNhZlU=}ET`lZVv8zC|?B{0;4_P*WUT6GJVV&LrwN>kP9 zQy^DTkpx~RlZNrdJ?*c&yl*4~N)W##DgC+c-q>Hb5AYgcc8Xgf#~_1>CT5V|RIl*y zT&=Q~mj&Y7U?e`cO5g%2abfoXfbf4KZ>ce;9!vANB`X1MZ19TOs=cl@f^YCAa1bYN z5PfO&*aR%r6nqMGefVQ=KL7I>%{W+6#3YQ2jJK0WBi~@#pb8S)$3~Ww0Hl(*Id93h zQ;|&vkm@$LR1>hR|99n4Sh>8y7mOAg_OoT(0SGMl2p*MAFUys)BSr#V)?5LHXD+3~ zzkv|QB1km{`E7p-jjl-a5gR54xIHC9mekJQ#4{E@bXFCLT?bz~5SI@n+Ci!RHE%c> zppoEOGEF=Tsp^m=q(xPP+2u#-ed8F&9Xr@#7b4_9fC5#Aqe=d-PubtYOPN4JcZ5S~ z5}x0SW0W8M9S%7nbBj-T+$|&he_&`Q%EUqh2A?jS-1}Esw(aH{_EX7-qyiWA0RL)D zp|!l_ux9Baz=;q96uDPOQWLHe{~|mufiF)SQWMVG~}3A z;UG5w1g4yE&10(xz9xUq{T6hEdR|<(ogS4HkN4Qnkl~;a^L=y{<#grdvKby}h8a>Xh!Tk0jl?@A6-5n&7hS9*`4-tP)uQV8>4D)N#ckpMT#BBr2 z6Vp9t?Ww#x4iR;?&DCRS5Z5OFW_xMm-~T=G7L*)ygc(I0AFbe&T#XX&rNOC#pu>1( zzkDD;g@}@}#Z#Xy$;nc>{8xdo;^RGu$Xb{%vYTp)?F!{AQPAK&>VbN$a`l^q!VK=} z0`e~lw@s0WroMnb@^7weQG~PLOxd#HP4O(&8+}odgKG*9|0H+w!_4^QU1u%M2>{8! zGQA7uD>tU*e`DOA6sh81R-F9w1dWdy#08cBKwdnm+D8!HidNAfaHQAIij?vg5Y_{D zxYu~GM*MG$gd2c|(2>XKcOB6E7>tl?=n_!?)nO)6OoAEP>T03;dIbPhuLaf8s@gps z2q*r%1}se(WZU3FZ7ll+ljxJO=w<>yG=Y5r&J=7oEiCbn9HTAq%+7x2k^eC`7!1Y; z=WX}v%I^bi!l6Ke)BX{N|6Z5oYJ++rNAL%50ieldM{6z4|9Ph>^3}7-Bnpz8gV2qx z0*6?*E|U$=ATu()VbL}BFbcImIMEf}%^yF%;M;7I`TGf;&EIf;RuY)f%AlndGZoPy zvNA6~oH=W|{6Bg6Y(BMl6#=V0fY0EH=bOmi<299$_2>!-J(^m-c{n)K(!uTb04H*u zx0o61(`(veV4Xq20N)K0B_;mDHFSy8xpcB1Uv$Uv#~8r4IPf~>_Vee*Y9fFSeG4iG zsyfcg_)j_n_=X32w-~FPDA3xc-DFUdB)aBE&VRGO63*s`J^FffN`(L&DB$a9nEiJaheUZ)x=Zgw; zc%PXXZRvWxiOLoc0A9^Q00@`c=OiNi?G=HwQ8LkeswPcF#YJGcZy`7FCQT|&5pM~k z(c+-G6wnS+Jh-~?l4}0f4WUCdWiK7~GN^s`)BYLDqX>k;DkCS~6KJB}v6uDxFbN3F zZMe|POMj*MTWCBzQB2tf(7i$%Of_NPHH~{p@LyO_&z~wB=d4wQ-b2`*|8IBnDcsAZc;TYIi!Y;1o=u06EK zWP`m4dywb>#HYeipd&w|rOaqUKDO*Ii^HT9E5J%yqugq`wn@Wk2Cqw`#(PK`{i9Wy zwT03b3s@C!0{%_oh&4}Koq)|}I603`zw-WTT?P`M&CKxO7WrV@uM26S!X_q=kSHUG z*AI%+_4cyy7E>Fv`z4Y%8g?u!*jLu8cM_pzTn13mB-mQ=-tBy4 zm=lylXPZygV4;zVzSl?QZDhh*-#+SN?LtAx89X&gEYzyf= z#YCkID*AHDZ~8tMgeY^XSBQ#ZJR+7UupaMAYRMNV76HqLg@lCI+LS8Ks3r4CMbp5M zQV(QrFOPCI2-B1#>Y>)T1e52SQml7g_rU_Q~dna;sO8-3tCMbY`ZD7$&lrmi7XrHUq zu!>2%wj}1Srz5PUK&b|;W9BBW>zf}jw({@NO`Xuvga8Ykj}gw9ay3-j#AS}a5?-qH=Mmys-dAqk_r;-e#GPjU)js`xFr&-E*CZNovI3x>F%mtUv`D!Ziz(P zHkq3xw$TVoq_Zu>Y6DzfV}9$3e@~7pz>|nV@|(y-*;0GDk(fL-kJvk3%OTWwk>|V` z4SjKrNt^dEaCsD#6(=7X64A{J4Eu=iy-*Gqc@-HOg|HBg$ZHp|+0%l>kf{=%U!$W- z3*6X^+C}$Z9WU32KIZ5i3h*QGlrOTxY|2Rn5>dN)FNwAHeg1-HMnu;#Ai(Ihz$*|; z#_?V+Ib(7-2m(D z_pie^N{(D#*vSg*mY}Ld5%ZIB=8Sdp?O;#$mNa%Cf0-b+4z8KMNbb6_NgGe3X=lbf&D?fZz$jvIEs52}}I_X4`fyX?KMd3@TdYo1sR7^!tkb zl`5c|jt`3Xxiy05XF5NtFGKyhBRb7+oKiPY>lBYIG1pcclIQ8@h42uIgo`vBL2p5| zuy>a}ljsS2myggA*28IVOm?oGg{14!^5X54>8@?#gw^RM@+zKnD#0C))K1X)DhCmj z)(@*{k9B)Dx3R19uu_%GPSQoZiPx}z7e{?j2i~`-ozj{5jq2V-|55?Na~6Nz2IRLx zJc%d^rXFB92L2paszAYtzXP?01sqv?2_UAOf>OMZnfAiJU8$HW+4nQSHdmWp^<($w z*w&0MA^|Xs;!I+<36Zy3EcdD06*=<5AnL^+>Tv@7#C{1v>G1;B9xSH>VpJ@N$>S2k zQocL|iecU73VdJrWEd03bo^r;z@N8KGefa2*HdmmA=tZh))yTtLd6?D8qUibzS<1= z&fgJk&+B#k;Jn9Hq%wsOWzKVxh^LsZ={ZOxrs_ILxSrx{6t0qJm)gsv`tzJ~tDEO6 zN+#hZ($=k-jE$VPDsebu()}}=*lde6!1@D|gg3f~L?DqEjeQ4SEA(KtP3g==*EZ4A zsdIatiAO;`YB#qSXuI;r-At@t>^Dum%a`eQrc>QJCS0)?FR#auWWtiBl1_Bt*QS!Q286(u9{6eOse zU28cSuxl87KjoO-E(y{JwDBjqUq88c`(7J{bS{ZM8wy--tP)bFUFV&dM(d@gcwV#e ztR+i|r7d5zY-$I6PGQhCXyW5nba|8~TW^4~TV5@jhGP9^b^0qBcW59A%>l~VJ#Exv z;!zVUy`YiT&4lTHdvbCJla;=2Fi3#DFO)wI?oI1DAMW^mSfn(Jk#T9Bp{J_f@^jV~ z<$r??D)M4Tvf9ntt0@6B`Jfglx=2CAf%zJa`u(}1iX(BML7)%Fg^w`^s9hsIf;2}w z{!t?)a zgq_khTjsv9uwZ`x`YK%N4t1O}=ymL^^jteFrR>{7M?#ihw2GqoQ+)C$K0=cN``Gxq z-0pK#MBIDB2K;g#E-=IZS_{Zt|96S&iGP}>WfM?Mtc9a0r3fX#_}!^Qe3X!{YsHXU zfI@OQZ0V8s$-0#ISy2PxpDG^ce-PY@_hbe(7@pfa1e>R|xmz>2`;JZqswSim&NJm{ z>Rk)zJwxs6F41-7i#8&or(G7m-DF9Fo?v>{>O5VyZL%8PO$;lh=DTQ9}njOCp)hG1< zxLXK5`mdMUko*g+JLT8tt~hR_scNV2?`}6;H@XCuYuAoj1mPL zLEhiwfD~>>Z^WT3yoc+A;}NHz<|tveC9%^)meQVMk(mA`@Y3(&c|^E;MZSMGTbxz^ z7C&&4vwq|fPo^iFdef>+vG!66RdrXqs>qqArj(sv|V7Ey#{gPg*OYP8ct}0`Zt#sjOA( zG|`y}=NOCtjJ}uZk7e!_xm+bYQ4+_Gn zHgDEox(nX9b*#ojc@Rs%xPE7=0bIv%CndFy+NDlo5BuFo7eSJ`z_eu4`1NvRAfKL!~)>nR=LYH$p+eL#|9Nvp$K^V|z z`r99`oc?FiPyyDE$I@ebGg*~j7avqD9MR}`he_CSz!>AEIjXS4TUm4q?e4e)qj*>i zaxPeU$6c$8oX(OJEts6aB|f`sySy<$`Mtzq$8UCyv<`k&t(gy1qs_$b3u@*c5nXi7 z4rRO_yR`dzrHtXS5@r0%?vL2wJF1y$tf|%Yn15F)!M&ENOg;5D+ye~QJ6>5f#?>WT zn|ljbnu2=E>M(o9rm5Dk1BBsIrkYF~h1?}d_O-py4K{FeG;%e(&x!F3GzB^?g8W;M zlM>|>HRB9gZGt-&Ctag)x3AE%`7&XTa@t?a(nPF~x-OEtL<#-ql6Y^8YeDDNOW`c*azURE zX7dfu2i|x20&fUrbyxW3RWr+z_~jaYkiIKzcEKvouemXF7UpVyXKT>4`;veqBxmtQ z`gcP#<`7?TgRQS>?P*kzz0U6;M~4l!9#2)3`lH+5$GM;Rovb+!uA5=&*h`;WEzASt>c^1 zJU1(I*s@47(62Dd!z- zWybaS>bOQJ0V`$#f=d_zdQ-d24pS=GW;#&9J9*g|+`@WQ3Kj3W6%qP?fR>6H6w}}U zs955s1;fLiRs(5po`|gX9gNRT{u_PO`{>uH(s6(L6&%CZQjfnE#*t!Mouh;1*G~Tf zu~SiDRqC^&gJeGtD&lvJW#Rw_lTmA$cYi0R-v-_4E2qoWYoF+mCHLVP|b6S7Owl2lxd=oyRhHxm~j zt{8I)7~|E{7|R}CA?hS)d*?2t?Z=U<_ay(v7Pl}O?7+#X&@MEgPR%qcHC-77%6}jpVK>%J3AM+U$c`n6jQ`> z%Wy+b*Yhz{J_GTDG*(ka&((}|!6Ype2jOTiuHl5|nV)NFa?rVJCKbsdX}b7m^o>g= z1N=N&fMX|2fH=n^Mjs!sL{`C%Q5ZjBSMa=FoI6tg>3?VbHdc(4Kz$OIFVZ^&u``Ypx6pzYk+thUqG zyb>L9t?asY7u5##nze+sz4Tg`U-&1aq-s~`Ct;Xt&9@w$FY(=NZtax8(l>!paPc0) z*zmfK8Xv9Z#Oq14%EO^=V7WXm;o4pLo8Frj4cT+GxW(ID?b6H2=U84Ah^A0p6BFHe zZJRR>#{pjCG+TXV)a3du!l|%tW^_txz3j5+-Bv7ID8Lwr=GPDerT<9YO!*~qug=KZ zxu)MHhA<&C6LM?)Qz?fc7`XVx6_$NC2+nJ>DW=|Kba}?Z@|<3xHC8@I_T@AB`3SkA zNM8}5LUDb|s`u|>ZnG~nV|xP&hjce*?A=A8Q=;iWtHFw^X-cK;x{>TpBMc<+Y9&QZ z%Y)JK_+{J_;AhP`ar&tU)=T76@+|BObYTpVYlxAA;^pNS-sGMtF03YzIAI@uEpEP? z6Bhc2P!Nh0dpnA;Ggr&w30&xh!{M zk4{~2VnOZ3k0c@JIYQb(D{pxYbmyVI^HRg2IScRh#^JGA{k z*+_5A|E-1I7Yz{&N&0PiW8CUk5cA0z%()vjEFF$ntEMayE0|IeRo39d-1aO<;#tCt z=FuC$3E5Kd5*p>s?;W|F__EBy#&T=B>hkLNS$cP`x<;ziWUt9`&0AL4clW-gnt)@A zT}YldditMai280A5GsabMXz`n$V~?-$(olLLk>I@^3)W2d^GX~A=|hR(OLtKkYc*y z7hk^Q>hmJyT{Z{G()prP5Dl>1-V>>$d=3unW=qb&6p7C6gi|*8&-y&4<^u4@y(zoC zHEDfgfv=fL)$42oMAQTwhb?Zwk{@2hD|Ci5xHFc{IJ9OB&XQbB*YQL&W=VSIb^g2z zvUEM(l1-s&{RAjBzR8Fo+1!Ee)JtH+o~Q+AIoYDO{#&?VDpB3wZA*ndqJ$sRCiU!5@S8qUrrlS{ zvkBXMp2I)qy=5Lh-fo9LC87cbhS)9)d#nelSO%i|F6& z*6%nX*{!e%_zWRqPw;wmJMc@K=pB)ZV!({oTC_SkeU7)FG4sfx!RrqadVN_~kSVFg zLX6)M{iN}aqGYYr^DCOrGYCRg<4FQ6w288H62IZmzhjg#VePupG2f!CnRyg4-f-Sm zH>E~x?6ZbndwvnE@G93Jk9+-6jwgU(e5y<7E`#X_L$P*}^-E$^NqgJH+4@}94g;1|o?Z4_ z!6(uKe0II~t4yxu7s@(yj>&c>{>Ka8_{Ei);AUp0@A~SGg$4tG>F2IvU-(^&9GLTw zAHVL3XIOpLgskht4x7#s5WRt@(wruNvc_y)XgzxpJ{+I&#R?)&1^>eI%uJ#l_o^ah z53eka+$JCp7lwV>enhCTk=M+f&5t91m(US!er)PVXI2v3=h-*t>84<4kv9^4NLFX~ z1LS3|q}|hdG%`%!1l;*lLG0p*YI9-EruRHBp-t zbK84Uf|+9H_|B4h?x78hgXS-k9Ts>3(Oqc64Ck_ic?Wo&H+EjgIXl#ZgI-<=u-TGS ztYq6JcU$CL#x%-)<=vzV%XSi!_11FsI+=LLFPrO6z=oSB_&|WO`8?-lx<+JogkMx+ z^Q(hko9b(9o|!+fm-IKlIp@B=zv4I zAVe(;MX}Va3ZKO$w^A+F`4WbipOJAjo|09kLouNxZ!V>yz-|`WJ~d@bjlZi|7a4+C zruR)#JHR9arWIv&C~2e;ABQ4G;DP5Itv!M5j*X{>^Kc{}jKcxU<|A z`GoCRkhchVJ?4BtinCOz^pULG^g&WmFn!CrOYhHGc+lx`Ji zyAxpBjIgU;tuBRld?R!&G8vXpbt=@69X1z97iW)^Ed_u$GMTZeFh@?Pj}WlIZJXM>%8c?A50DovAab#m>!xe*$j zVP=p*CM&^iP$RQ-Attzrviibf&>Am6zV(S&lRlMm^iR>784=~pfzN~R_C-Om6%Whk zzg2ESUMRzq&u+V3Q8QZNoj&U9j%m4)Ab>E+`oLy?zuLRrVCOFF5?Fof4K)<$*YVyy z2&f61rvoU~^b1tm#@)9r(>e7OZ2HwMJ1Nb+Y zMnGNp+P3qaS(uR$I||!OPVCzx?|;3*rJ z6-a=(ArJi7?G=vl`V|^TZo`xP-vqEJ{t#B9&xzYs)OeZm5J0m}d7;#O7@V)?yHyi3 zME|b0A_YqD%oth^8DChBM&u28JZ_&Cthy|?TSr&zZPCSh?E+mp?b4F@{{b&)S)4LI55%HU&;^!=Awkg0AxJ=cm=x3`L$62beZRN$H$}Kq{9y1be z?{2{5U_+;YUr0@V={9Ken=kqI%;x_D5#ZFeJ{UF685tXIKH9?)5=}()^FhU?6viW3 zb;8QS^s^)d2@!yHFS(6vO1-N3b_1dQ*k{k84`bSZ6#uFexc1vD(~w?DaE}=SGc1oe zVuQF&85K}_5aMm|D;>ErI)^B1&^-%tVn3SOsI{E=F_%C_?;*<#2udt){QPm!UYs|6 zOdS64zW`i7DeI(ON_vUUsLVSgr~vR49?x_zv6Tb_PK0)kFRlt2O6=IsrO_nAk1{6T zW%L-j+Mh@k95yt9dX9fd4lPT`c6&T{#lfP7c2(&wIS{k8kHITAAiec5o$VeF=x4y) z@Z*G|k{(@9Eo#%mglYhsGy316{)-asLCX+fz77iMG2yN!-gzT)Ugzi$T;y#b9_ZY` zzYbQHgGiPy=k*%6EDdJuW5eX3F8?%K0N|C#^>4XI@Dsgn zs0J{8zk$T#IV*+Ml|QfmRF188x6?V5n{=AVQxrDszhWsQmO|H!_b-YXchnBj19ty8uhrh2@&W(;qyM4P=D3!x$DNU9Ji1 zM}Zv(G<(g>TnC-(BiP7#12?ps!sfj>88~rtVq&7aY9E^rkl7DWpfD;yZ3C@LlVQ63~;F1kzGIs?v8@ShIl^$gs0fTrVM)K8^i_ z;O?WgLTiqj#vaov3QVj*mWXsJ#e;LatQ3Ph z1r0s9)M#K?irq_R&&4dl3`RU^67=xZSYr`+eH^5Ad*1uIpLZ7bx5i(p0rU#2d zhy&a@Vtyx!s*Lz|gs4<1V6n;Xb{pQplilr7o!Kq!99!yzj+$-!TCwBSK)vfKx4#$Gc z^;WP$?D47;Icl;r@Gm9ChP}xw9WV}HAY!-g9nPQ83Zz>4=FuLcrQNz(^-Gew>sRsb4W;9BjKbpY)?$7{n?tskcNUVA16vBybTtLPPg!tw&FF z(Ya$km5h(KGOl`MA+)h$93DcW9lD4AC3SS9Q!#we=z4eKSQ!Wiz5Pru&h<{h8*I0O z43CdE=)7-0#^cL}CN_mVk79a;WFLcCSF7F`2gGD6Ahc7Kr}%lMX*qh7WPxqKMw9MN zZEyvN@7d<``$FlIfP9&>Att2#5$fnca*5dJdx#dHD8U108vKay_p1s8eor3$JOUnx zIE{7ePp!h+8PwzT#UN({(YDa%Uhc2dz?2M5077H3(BE#&+Z33(D=FP5n)Efis(Xzf z7M%EK{*-#+iACQAHa=S*vvuqMesM&P>WD+JF^p2dPfSiMVy>>V{HVwSEAOzdXd=%&r-~{vzP6F*CWVrZorsJX z6Ly%eW|ethSD9r%g*AW&Y&1Yk$o4~a6j1D`do8Q9ch)f%X?76TkXTPj&Li%M1 zeJE`S1oT^|bdrB;Nva`9lyNDP#<`ocCKt4UXogf) zYrT&skmxbPZ$8&r9Q@-}Lj9oPDN97s`>6CndBI+hmCL;NH%B5_-+NkoNqW-{rbO;_ zCl2Uc5ry;b1rKY%*fC3OC%FC%^WI2WetC}vnPe!AG*pb; zhRZVsI1VH|NEXXwbW4DZ_$7WXD)=X#DblFa$P8J%sg`vy#YBw{7Av(<<2wRGkParp zn0_yb;x0%^C+ZCxl9gs7zCr7yDi4VX1x;1-0+xqX^29q+=S=XPyi4jiNkzF2nk4GV zTsjuL+!q0HF#5$}H-Fc~wt($x#T)%RM@w^`-dT$`gyyooK4ao?5oz7yOl$~7DRvwW_>MVNQnqu~Y-N-(?e_b3;VXyEiCO#TJ`fuKpl4>9?O z9lS8!9b^a_JgofyY1G6jSooM0T~)Ex)~!CCy9R7DMIf!8>+LE<7h=G942uy7ovI@d z*v@}M4%dR}!F=15^n$~sN7A}HYNdh&3>`Wcgpm5 zn~c=~^Zzd4aOM>|p&+{3K>FoE1f|uw7ygF8^=Ts(3 zN{u^9NO>0ozf(c}eB4iBTXZ|r5`lVc^H*mOCVf4EGvvdl)8<1gtPU9<;+|5TYv zzE@bkM;W#(45lJ`RqURT2o_=t2l>>v8jOz?H8ArrrEM1s_>NbjQHZ2kns-YP?etXJ zte`^xo6h;Df(cIg8AP)pBILXQ_+a=zjD zW**V@dKqjXFVty&}UIDKM@Tbz9PLu{e=*IgqL^ zhnd@vj(za}(uxd<2T}J!9P-fmnm}v3_7N-hSeN*A)pvORnj*Hqc)<4MQhMRwM{ci4 z<{3kZ??UZ#4v}a`rvnxywgCDVZlwQO(-o5PJh3=Ax2#Jkyv}2BCvUQT-FqIS<(XIH z0Ne91gScCiaZQ@?$s{_yD=?&~4GYO^Ae8P^aW0O1P6wPlkLV(eZ!ernn=gdoKomme z;UMdNiPX{T)=RR8qM8}@@r2wWh_kzZ!tuhF#`>Bdq!!4XOdC)eP;t^d5Rjn|bo0vZ zv_4T2*yc&u{;m2SB^Yqb{AVZl#pCWPep*%S7Xi+9g&;D9%`{gEkb?B1uh(k$iGWXl z8yqFQx6q+Z#lqe2OfGNIH@Rf{1tU`ANrFs4<|>{8bDdn}XI=Kl_?G*JrKgJYR9H=U zCe4z!tvlVLLD7=1MOH1B+kS&r(39aK>|f>Wo?Y3LwHP z`!QU6ovg=fqr-Pl=% z>y9r=2Of@l!4@)lo|MWOSbv58c&ZoMerkd(=MQP<6Q&m@LFp9}+l#V4u|nBuuo7lh zJ56qu&RDtlUbiKHHD5qJve`6p=rGy|E$!??BX>J@bGfZGHwU@eu9g|Bwq(zXKN||2 z2BadX!ek3bu0Hu0FmXev2}lQQ$)XF?2Y5h{{zr@*8J3LYUzV_wDSS*`FgXkdAwxa_ zZZM>_dxQN0SU|DgEB#ih9;^m!%Fq_t(yRVHdscso1}}%!{nR<^D>}Ba7m#SVBhBLz z&yG{!U|@y?HHd~&Pt+ye=Rg#cRE7`?ssTLr=_Pj?4I#L}v>i5?X6c%+oz~`zgHw*p zF?P%i4n?@s=FUv~E@s5Kp!wvf#uskX_|Ly8T_eWc$d{VtA=W(NmYLvEfslWQS0Kp& z)gsZ8TyHL8^1Td+q9iDgyUpucvGEto)aWIBhS34>o`o|{Q?ohn^nD7Gr9KB!c4eiD zJgF19o&2FF)twHVH&GxdgA)F29F}x<(cdh1Baqatgv_+)v<3bgRG`h#p+Tv;)z&)f zyn|soip#h>>er0vQ*^c|Ix^M{GX3z~an3jsXLL6z%PI+KqaP9An)n6mc)%WG@`HNx z9SQ_WCQ?qgxX86TPwTAXMLyY2>U=o`^Wck)(e8oSO9t*jwlv zzMYok#BFd>E)45j$M<6nCv_8u5?^4lJz~*ZWYD5t-3M1#jb(eVktSf~$6tjS*A-D5 zfzo-hs*05_QALh|MSE?p4%y6%>(BOjjjnr0d#^t*-tJZKB^d54)brJ!)-lGpQK``EMFxb}U#pYUZAek7tLYX-E%c+$n3ZcaTzk(>JWpZx_5dcr8~*D9 z2)z9`7s;|}Fvu^rZJ~BA!v80i2;iQ>({ABAx!-Jie3prPs6A(A7bYe@>$2e;?{aHQ z2q>Ng1pE+QHrv;)wk94+@j&l`eR2sAB6E==7xCcUOH4i)h%$OOQ5w(3+p_w-jtqy~ z(r~pUw**2 zTwd357Gs5J=Y&K9@5vcd!D2zCA4pPbJeu&tJB3aIM+<}QhSpT?@8UXHi+XzU2%^SlLms7OX0ioz+}UoV$Rmp z=ZdwP=93Z{q*U+Rs}-r?L-2e7GJ8zyT|DaTHIAr8(d~xvwWW`@hO|N<4AQYrJokcT#VHD*S_> z=8Q@ucDcmIkNnP)M$MJe-gpz9Bnj>BA8avM(xkp|l;HR_{lEJAsrM)gvhUD>kKp0A z4Bxn2Ts=~vj30B*d$0nEVe_DbmNfhbL+bvlx{eO{)-Rt#tRP`zARnQouQxg}g9rz` zU|k9NDyB)qhbCop69Il&7!kVTklJTph_VbAME;^5Yzpz_w`XYNnx z&vs!mrfu1L+bON4-S0X>&l!#G;2b}X+{GMc>6G*{3csyNzu7{nwtAIDH-5XBDpu{t z>?FM~+y7XDXY{1PwB9Azy5D7Yj$}mNbec&pDnN| zp{m+haQf4KojNp}JBp1&+T?@Xa~bvHnI%5`X}6iiP<-KBtIYw}s|iowoZkYArDfqW zPOg|%uDuZOI=6ST8_;WJ2roXWnnmOqorsLQ`T0FfqOES!r8Re4tIqBzk}(Q@&Aa)) zstkU$c!O}rSnD*tQFovX_7$dBl#S$LkMFdtV;L z$=UEwO-H1lC0&-w_8oX(KQ~cnHV6K#6}Yhbs#sK?@MZ;hlh-BY;Vi>}RD5}$!1%26 zZb^4sI{JK$1j#fEl4I~>eJhxdV@X_c%?V93Y1WoCFTK!lR>ainJ2j2K7D~})>nbztJRI!( zY`_0`?&qXq73>5fVl=IJ-7k7#w6s270VgIBsvQKQ*VU19U$krF;=Pz(yoeZfKQt}% zu(=&8{>fV?YV<-=VTu)Hywmo^!k%UaheJIfMq)V&>1&r;Zt^*W0K&O7&(Qe!c>8<_ zwD>HA+!qqqnz=m}Ndez=`8ifUvep*4U^@+^hqQUE&O zeJeRe?ix$rsqoqCb;(Xl+YWkH#(vTd{nq6`q6Q{G_SD1g<_S=|?yfiKuowO<2;=L< z164y!f!($ICRUL};lswNf#{D{CWn+>lcmy(-JIpoQ4(gpQ^be4-xh>_bR!#f&0Bxk z5;Fz;LPcHSG3J+*(Bt~##ki{U2;Okgyl8)b8tBYNzTv4kpE6UN<1N4uh$u6}_OVWY zG4ZP?e0a4|^@Dcpy_?fScUAaLP+Qsq0Xaf}^I>=RS-NUxXx4j;cU}Ji$70Zr>k51k z`iph4gxp!*>N;JzUxQTMu`&V;?{E~2c@I#xC7TFoNdj8pEunbqs3n46c%~Qes~k*- zogV%k%VVr5E}Tp0bN&_NyNf_9$bpvAZXrRQSP z&dW(r#8ZJM3{>g0G>fK$8^QaBCqH2CN8Gz6vOQ8C{LM6pWG7(&K49IVE#rojLXdDpUr>O?W2Y$Mqj zG~!?`WB(-<8WrCp@gDN}h9|6?N;^)|2=l>s`(JVT)q0bQ$y`?BBB%I;$CwA&O{XWX z?&_OwD6{sF&3E(51qP(kI5+3v0p#s1Pf6tlz6ki^31E}0p+-8sp>{=|Q`uv_SPK%3 zy=j}hYm^%Ny1swDg|?s_-@B}Oro*-?=YE%WXMS3UbhCxCMm?mRFN<#8Z}jw2f(H9= ze7;+%uY=LqHNk6Ib&T|8vc6nS9K<5*z77y;_x#RuuGTChmymtl+sgdvhy)blb$)1T;}W{H!>mUi|4 zi=k`&?Zlw#!rf8ox7+5AM8ZcOzXi1%;FUu{m<&8F;-(v27l}uG_x%>Td0WhvjBlq{ zB%1ake_F;a4YHzl)l#Dc_`CRI0|~}UTUT`JK))bHQV?*H9*C}^0{c_Ao}dLgu2%oR zH%kP2j>CJ6XKusGnfK@l_+@)~BHiBDhs>R>>#xX9YnGIC-})d;hIM>ah&(|dYB$-P zwCSA(aL=9|-Q{*V6A~3ckx%jtDTE=lCNd{AKJ016bhj+che-ymW%BLNFekvIE>MHA zeQaa92((z_b3o+#Y9Pz2bX!?9^!Q2V#?0nA<(MtuCwcsl^@Eg7$}?cR;iq@kp2&XornQls+CfiMgA^t$Y4cHHJz75wIwid4LU;5Rws@<<(5J#my~{W) zAhFcT?-2sC&wZCUK-ff@Zsu|cD$(7puGg|^uYJ@a9?46H1_iBGa!GRdxSqF|i^iCF z8|jEz2u(-`i2$kj!*d@q=_4&zu@HLz-E3x;vj>VMptU2Z$_FWD$~Sle zjbqb?nCmld){R-_mKTQg1Y#|8tHvcbwo=ClIBz@I|5$Iv-cPJTNSL_{J~<^RYF8Dsn|RF zrib)>@?~Lc^ccxWbfTB_^ASCTdt3P;KJrNT&|YXASUCsDFbopn<(eZh>e{24_1Jx1 z7WS^&&~rYf$F=aBoUTA2dGBnhRO8;Tj*}8)xGdUf-mXTDMj8*L3f0SZ4XINl?)?NH zzWgMf|Mxf-3;L^GMv!$Q>hfms?&fe}S(ZL!69Z`)zVX^9|pwd3Kp!Ao0T?+|7B(X13#&wZX3-Q6BX z-08a@g%@+?^kl6Kc6IDD6)od)X3b0QU^>KoHsdx+Xszikmikb<2Y>Y*Cp;ey#FyI_+k9r!5%&h$n0Tfd9)d zx>Ua8##M7kT8h3_JOG9VzK^bs#=0b}%r0J?j5Gml+U09u zreU}~QWRq^YDJid((SC&RY2C~I8HRGac7+`kf{G-f~FVCn>|+Iu?Hun`1yd$?Q)8Z zWn#5PRIB=>Wx>E#!Aj&R);{ro8mL^n&l;Sv1S?t7u4kiY7nL2$&|k z{H^=djB&jGZOz^3^aU+j4rthFegqU^%TnS~I^t^3*mrY&uu6EZ_0W?MTQu%S!WYs2 zAicj`H2=Xb9^(!5H;~a{*CpN8DI^GDevTr-bx*n)$8=tP%$f=$ck_X3DpoN}p#MC(1SUoOe!gp1WM<7#CCz92YH2p&)fx1Vhk* zQ+PbGr_#apnv}x4GCmsLA}NE!5?`)qVnvnS@w(>7XWM7Gx)|S#-iq zNzm!yz#FIO@La5r_1*b_Y+|WXyD)zmVr?R%SwYjvN4>KEd28$(d(hD@MuEg z^BwbOgH*4ciaO&ZBkxM>DnFOXhTHZri8N)6?Oz<{Gc-_z{SRj4zJ`U#DI337cWP zp3fSV-5%a-qn)vKlkrC25|gUv0{s#AHym;hryb~8&`?)Bjn-<~A#xA}t&p*~hs|_Y zU0>H3sb({ zz?ZRQ)zNI$@+`?m#^=4Z;NXRWgDg8Qb*E4HMiIX}#$AQ@;)T~Q&4a)uhD5z0_$h2N z{ZTVij8fx|;qr@~huuq4KTm@v&47YlR*h0(MtXo^SH&TccB7hOG1_}m-sgwY#I@lR zM%FxcL6*$nc;{DLv;zkGuL-{ASys*aA^A+*P@BjNu0e;?+3$TvOX;JTPu<6;we;2WDkMpZv?oCGnx3iQPt{MLOx{H<+l7FaCBXQD;~}%uP0p*Uw#W z$L|2mI1_L~QSqaJ!kE8%$sOYxMA`t1&g4StF)q<7g#yu)*^L@$(GzmP;w2E1Y0F~Y8~IuCDKuT)DsPh8+R|_YF>jr z-7JE}2+n8ultyDCN=Lzf=_atsfoU^`grt>w`JEXPl8G3TH2jY?n~KvL#Y45eb#~#j znzg?^<(@xyisDbWI^qG687b#xqZx1LF#?>VdL=GQJ|8!1<_>sL*bOfOYaC=8=ji7ysltWF}lrQ$ytBVy%}=pZW$>x z4w!GG&E&xA1WF4*Hx%PY03?=mEo?3f1#ClytWoNq>vTzUfcy;4qY_X({_}=_KU>3| zb7`+_9X$8@Vt+&Z>W2k7KrNz#QWto|c06vl%C5fCnx(yP$+}smx|+~<-LWHjw{TSs zV1D6AE-`DEI2Rmk>rXIX2<6Obna)F|sIjP&*k8paH9;x(*bvDuykmA;;ej3#U$;q_ z`1vJk;$D-3Yf3lEsrdsalP`kJm>IE_9*MO`ykHzHB3MKqgE#X#(~#C+FqlsDp2>8eBNdPPfioKO9< zP%j+gPdE_w#kXHGGkPx9=Lb7oQMA*YA~*1|lY5C{B90&OcJoeXw!&Q5<#gJ-6D+A= zx9wcCH|s3G?3J&;p>Aw!9APGBC1U>_GHLF~LFsP4cjUCirGtB=GTrfV+P-i{-Mh`y zn@wL^ur2R%U%}AbytX0vRU;sOq&-u9__s6^`rYn*jVMHD7JnnKGi~8(eP8u=Ym5Wk zq?ILyp}S?Bq^^tot)uD=FnkO4Q1FIEa#N^RFPvxGMNB)8oLjuzSl;ESj)I@h3hlwnwacybF8;1wxYk2pXK>0UR z3OLK<92=pyc3)6786FMUu^*A~TT*`HFcitnz##{I{Tf~2@SKf|{s?4u{4XA}4BcY+ zZ89dRDe)UxfcmCESv6<{oTp|1KhF>tMQ;}*gR63a*}~v48yR&^cQ<}fB~@XQcn5pz z7Po3DKw_~zd|-H&8oJ$-_S2SxHQOrr(mLj3K)mLP3MjUCgT$_(c+K@;3zX34;iTdJ zTq^O;jsQZy`OMFRXid6ynNrJKv{jw)>jbe}b5II(Kq(zEZ}msNSx67f_9PmhGx@cG zJ9Lr;l81GgwSG#Z|U$^B2m2M`r+ za{-Z7v+eTRTo|AP!c8Rg+g>sBxEXvW@)h%G`{#^!!Bt3}mW2Oi_9_~@Pof>NHFrJ) z42A=0ri05Gb;%EsgMeNqajcx5^10l|akd9yCeP8zQ25@#<{jX`Rz9_vF#Lys4=dFJ z+cl#7?AO^jbm>x}ra40@x<7KK0v9iE>kCq%akXaBouIiOeig_;=jfUw#C_Eg!$=GcG- z&OanjeEeU02lAfV#OEk@Tdu-IC9^Lx@{HHzqs9*j6r1ZbO4woS_JyTWhRgh_u*2#% zO)(9n@5~hp7!-NNPQ-u%0x+Ul5u#$0y|rk17n0nAJ1U^!s3=&IVn)XY_A$R9LVrLMepC$)h{xPErtcY2XjM%2a-cPr~t@jRkL`4Kp0GVIKLS( z+gmsZdEV3TsN<_6d95iO6DtpaNQ{?nFpR+@W6XP%;iYO6;6{Y|^+@VCWkX^jETi_< zQ9HL-euS4%azE?pg`26VDal5rX&@?KzL6@t+nvxsaeEqi_5pBmNing@D)MRJGfp&g zxLDXJsn@P(zUxE-Ym0@Jp`Vjz?HuQa{O?-5FX<>E{W%-KqQM zp>hPicX4Qdh+miO2omjecjhBs?K%k6ap^Z5=)djtt)~Y$tZ2OC8f+U)^^!eJAuS1E z=rW-J-EAMa4vuE$z(F?)TzhkX|BIyv`_n&kbZ=GBod`LJ%p|{YK`{qG9J5W{SHA`* zgxokm?&Z48N@Nztv8YY(WUD%EfY@(&H6CWW0*nS24>l5Rdaagv^lA|OWl*ByY|B;f z`;8{L-m!uZb&gs>ftV$EbU%@-#NWcwhko{O*Fk|)OtQhppEnyVEs+$hm6gyEF!WQS z=fK|Vep~R#j$aV;G|tj0hYSq%inlk&V*UQxy7yuDe{ zUV=SkbXhO0@tzRNEI;{a=-NU|!-NQd4gJfU@#y>fisfg2(fKm{^;^Z~%#$h00tGZ` z-xN2CwY?_C4PR$%!9PxA_Hm{8NKL5@I}jXm8G_gIdSG1ObAQ&(*7{S4Az4zStzq{E&^uhd71rLcKpC=W> zzaN>Lbvve=eBb(Uj!e6Caske|X{Rp}IrN+s2P%`*6gBHQ=_3eXMb{Lf`R1M5F>@sI z1vY{y7;B~F!w~%)IQ$_UlH>d5j=K{^vN!BiwA;&o;`j%Et6l^xA&f`g92^(xqz-77 zz5SKuwBT_ON@2KNY{gr+CrCbCuDkX9^i}aSuwJnu3ZH=8C_9Yju1`9Id_L-}7L6YL z@&{J{6PfeVTxkyOFOVKn#SY4D;BISjdS{no=27>d5~^LVC|bneUFUA2ANNeDrbaZ=}zukfrt(4cZ-9&2{2m zL$*w%-VCRCwM$`=BVd$@-pHU7UJvspCL*ZZ<_Zw3Ri>mt_blx{8Tc?oD&ttPdI7A5 z4+Lay7nQvW%`5ln8;*cGJ9rSB}dS#LGx95h{Xri2!sVy%9Cj?hXE@Mh2n}~Mdyy6fEas=neNRhhriJQC-gL45IoHPF!c)M zD}AoLcl;5FoC4Xn;K%UIQu3Pfp{0CW~NeV$+p*_575+ku=(7j4?5> zhG~pVsyCqHDU8WN&CNrW%E6s*m)qm@d*!>W+u8u<(O;YJVzKr_^=9`~7hOo4Vre-% z< za#KbUtWBCCR5=GqXcSQ|4xojHeD=mn{c&U|c>m+RToZm>bcl`E|rqkxH`=jZKg?N#x36FuqjIlX^IiKPq}DFy=R{@zGMmH?5WO=;~n|aa&tlEOHlL znUZc_96>p(+jZQ`blP%nvRKglDquoS zirTdLPJ{Uios8E7uwQ;%U zUo!nL>FA*2CuouMj!N+?7<8cti&*oZWg6x)c2j1);p?8HE)MRoPBFsEVzai6lc!*L zKNY!JhFk-|V--yP&gSRV%Ny7H!5G7ii=^XEnR@r~k`F5$FuSxQ9-0c(BgH&g8f4FW zoKkZ{hSq}4*f(>Y>S0LiC;wB2-mIlS%F}}`WjHduy-D;E%bH@aSwh-(pM%8OK=4%6 zco(%u^7^RvijU+U(tl0jAji0#bCB0fT+O99*vQU03ovC9dirN>(wCyyF%5VHfxQwz zm%p`C6O-^E{T#mP?Fql~^*wFFfX9>sloaH`y-~_7p_6_@YX+f(@wq}-O zN-tg2yF(|3oGRzNi;#|m;vj~~^nGUaOeWxKr`@l&$XXY2#$$za%DyW*!FLu%v+{1# zcj}7sYgm##b7Mc|az3eRS(p$9H;m)aPhZC)er9N>`5dVhZ1vbbez(CBq+}v@+kcS1 zpW++*I2#1RpiB`tVNSWy;MHi(Qe3&|7%atCeY-)*ah=Xt93U=Z+BIu2#>{&0gmZ-ALODC6yX#M$^wxK|GnPx zJ1&}ccNY{*8JFV6H&4Z1-JmhW5qeFJxBIxdOEr05jiF}Ozy)>WsyTX~OPUqtdh z3F#vzzd-x0w-z6F!jh;833o;SCQYcAS7y9d0LSF%tv*7>q5YpvvDT@UQ6luNoB{7mj81-y&m z-m&;wQ$76XO5A?mtGQ6uW~@n@c$=_Obcuf~elL=8p3x>0(rUgGMaz@a?3RdKOkYNU zg6*rN*-2d-5=@!3BL`?!a+xj7Tg41QgdlnfkV_OSmfG_vD15aNgFPuqoJ3++uw3N8 zjbcUiH6o(52UJu*&~HIxxaW-G0BY5AFci$zng@J2n88kVZ;bMc^d|R>T96?=1!8B< zs6#VtL1OQ&j7005m5B!It&FE3ocVd$ z{;F-sB;rXZQFC(x$UfqdeROyoiu=NMI5?s0uK)SPYm(>jDQz!@rLytF<)fZOph}Ru zO9pewOTJ4B2TazL(|m~|PtQbL8zD09MeS!B4;$x8m)cwXykMuI>%5u|h>G>E4~K?+ z^Q8uz?Y|eO?w7X*IyCp~)`MRJl56tDEH*yg7WYDTdA?C%e(B2w(mf?e8v_IsXwG&~%F2P74CwEwMszY%9(w*wb~hI80D?tEv3bYUE&5GtO7Ay1F1 z>sc$sRsMG)t2P=E5k^LUp^~#PdkgF(mB-SY&a*{Pxs+)C32V{i(*gS^w63AgoJ=OG z`JozH1&3v{g_1p~N)~f+&R8*@kuU7c`F7$UcYWDoUP<7|IdJ%@`g6}HU@P#q9x(*H-5U`z;rx8;6o3$ucA=+Cn(Ms$dEaqiPt{pfU9+*^Rb=71 z;+=!RqJPzFz79~?9S=sQ1|t5Zm>PSxrHQ`in;a$t-0g02JQd`(5D5-?vnP_HiXi^y zCs|60Y4&|1Avq3UMPhV810RjG z!i#7pEBwn(Z`CC7iKvQ-2_TMe=odp4&HFjW#=wXb9j{`s|GNu7Ri4NY)tWE@oq-0h z4HKA%e!4B>D>FVspnH<_`_h@As$ikGhyoxvfk~t+!Iyr5Fx-CFiG`vxipwgnKkL;Jda)I%0*|ZI<<*R>a)4 z7zJgkjLLYx)#15?w+_ROHj|qSUjGcn;xg@zt1=D7qh%LUI2^}j6?rx78x9|_j;Epn zqJ{}9ZG3U8X*pB7H%Aa{6bOy*cTQbI; z=!nNA3N`9nHLknb>mgiE;tNMSbP78I-uy>`0I2DGBs28a@ihRio<*~k21JB_ ziNPn&)$0FIkc~{ujwf|`0ZpCc=rS?I!um|!o0yO>jyof?aqsvOkwaHdFR;~r z#^*!~?({V6V?CutpGQ(t9&m{h_@V6cg-BJ;hKojPzh%EvwsX+t9XM3-4ajHsYtlU+ zx$V4EE>s(RF-SXGPG?tE-OMqcN{I313G{TNT04?=(H!0@-b&b5y~zM8UsHQgU>y># z+W{+5Vqy+S^k32X8RueR`~%Z)V47~oN{+li*$(~)JmuvhR=feGoig7n&0~ft~v?*NbxHR^P<+4+@41d#@OE&+^M^i4u@23PZm~#yd z(gi|KJ|p?%gM+_~z4QPH1fe$aiqak_wT*NR7UIl zf$;2NVr2el&h()SM)>JMF~%BHX{V`Qcsl_fz1CNcUv7ycBJf7m{2%kpF8+$$ zPiEf~P}mqhgjLPrfhDRWi~#!cxGv=XbBs!8!oDZz7z~K6zzbaU8sJv6w+&b|@aE5H z=Pg-KL0eF;!inkAR7%S_vVC{MHJm@Pf)FLDSP?owGm82_u8bda4|h3CTUt4{x2W!y zP6z?gwOjn|)HPrvO^wBrJEV2ier3wUG?|Cop=2|5KJqsgigDrB4Svxyk$l7C6abi; zK{{g1aB%^g{=ojUJR(kht`c==HW~akH>v_vE-_ldS;VvJK|3<4Tdtyk$CH1^Gy!)x zPbp%0N>lClk>Z9MO+H6s1E&}vu}>#KPfv<9$0E_OtnmjgHkR(_18bm)`&YTJUgP*Q&R2G2Zui4Wc>-*)jLe`QRYds zP#}=nV%n^?lXTgk#LwGC1@y;PRHRdZR^N@#v*6fB{o^qJDDTtfu0CCVSTJDyGA>tk zy0zjH>FpqBbISLqMJZ?_$vi0hu+!|_HqjlFB*r^eG`s}!Zo7DBY-z7N9W3Kp3*gf! z6y-T(!#~m&vmxH-)og6TYVb3cF${jNiBW+Z0dcc3F9#RjxxM*XUFyyy~ zz(}GIsx|}j_SYdv49#546<%>_^6KAz3Fq7Qr=)7m80gxsEA-Iyu6(5Wj-l@C2OnY@ z!Frf^AMc6tffN9wq+HL+$}+bQ4^(=a8x{z~P*rTR0gBnn(`u!`ekL!}xc>CQK0cm# z(;xqd;FI?S(X^qE`K_B%n+{ND|L{+bzQ@$l6c5QxfS#=0V60rCX+o_eP8#lt+AzIaHBhrCNj?MSe<*E5EBt=c`RshGVfxiv=wz1!+oNFRkwV* ze@7KXOlCQGNMc!4pV9L|O!yFD7qvPf34T0ICR1PfnB^25`86iZ=YW7?n`JRS;EMJ* z6>+fu%yS{=jMj&ebM>0&X$9U38GN5#5kf738Fxrg`a|-+>0gq9)dJ)3E9^WnrKY#P zVP#!tbHx1so!yXHxdfK74CwLBIV~@*-h90GjQc7_W$gX)D^=LIF_xay!?_==8>B+; z)%J|r%6rIoJ7#&)L~-`QDSPJnq2mgHqhEwO_g7QDZMfuCTiB13va`l|nK)w6>wQJ& zl2^vD$Gz9OF{`r(?6^MKH|N!zajR#?u?}MEt~p@W`pEZ~=%b&H)x6PY`>;T^@LqIu z{+s5@crWiIh}fkSEbFX8ea*6Ve&KaJq!oTFH+~JtgOYc9`BeQ4iEpRn@1zeqfwM%+PiIL<^! zQD0jD=G4fi74b|&gHAz@wtCmxgpU|G4-P5oRTn2`fsMj;4#RV`y%HPEdRf0Bh*ok?{_9f8>qqtpTT%S+foDOi0Y0sz`OY<~{w&hlQp)Reb*qaTB7w(vO( zXxcf1xgx2eEW{BH*k7=;F%Jq!W_Ss#)KI6Fv44xQGk%z<%RdwH5Jsoardd~t)Vx1X z23FkYBgDT;LTgewGiRK>L{P-^F|*kklj<;$#5u`u{k$Aalh=W;paTdLV(k;eKKu%9 z>jOY}vA_nzSZ8Y%s|p`tiHwT1jq^T4RixJ5 zJeau4wAI)A+1@J55i{YIz`zx8Z*xSKUR%B!0d9KzrRsj;wwWQ-<5F!sB3&}^bxzJJ5& z<}dq}&)C*dE5DSyMy?B7%&(3!4E!SW@)Z#Ky~e%8xl`NSXCEWo8{8*7@*Iu;XyoB( z1oyxWLW_1dN1C$DJ6m?_-0dZq$>Za0;){^vDmnB1ll`~e_nToO=$rvwyj+c}!8J|Q zw)JCgdin=FF4Pm^JX|#r;??&zn2jEOZf&({9|BQ;48~7@%2qEgi^4bfWd`aRK{BRH z?Uw{j8P5d==ou5b7d>a9O=cV5QByuDT!$T_kb)-7I6F+nu%p9g4qq&fmzc_GY_*_? zpF6b{W}09!0tMvN)0jc^8tnB;t+`7|}s(fJ_5qfYhy7XVe0~T-M?7C6KeOu4jTo#@;OdUc(RQCZ#+6ZcLs<8~U-EzyKN6{W^c7f!| zGEsQkpH)@UPW*YDJ8w+(;uv$z|A}63N!!IRdYbdBQQNe#;?h`nV;7iBl>O7NF14o6ab$$+t9CEt# zRW|PDD6Z!T^CnMb0M2?x@)sAnWir5nfVjUB%>85m*x~(L83x8PqI<`=+o#~CQgGu| z*eA<8>He{Yz8-K$`~$6x*nf!&Qg)o9(46UVhvxp+jC(rd`0u5i3RSgDt+xDW<_Hs( z9n^W3Zt;SJz2l0dv$h%@3`^R7phx{NE`x5>`MAsBHQqOzo$_=GYK0Y`NzhTsBpPWk z0~vyr@(0J;&n3O+&}o5b#>MC#33Obs7yiI^S)h{em8F1T!OJ7LjTI>zkMTab#;bs@ zK@STtgF88lX9ihteGb2U2_|epV!=_rWC_dscv|^1k=-~Uk3v+pQ5QuZ^O85SNKe+n zmVgmMaqMa&Ap?Ps_kqyiyH4kBjtseqkDD6Dsp9PTEevz~KKkG|Tm(q&t2Ha?gSY}i zoKi5JTU`l4>_$$?%H#ebJJT<#VfidEaLwHv228{#Cx8u<7eH>MH^E%7Wzx!ZuDc)SIViXO<1DvQBqCP0HxgiR`ji?mT1(?|O{HI#&W#!jWZ;-?53 z7d$u-q}^ZhN-^)%K4Isn(6oP_sY4`!bI-Wqu!hc!vc2DBPmHogm$F;?wrOu)w>p{7 z%73$FdLth-s{6_ZeZHRe26U9&YipfjNyX1`M2__^dTG-l2ES1$UlWc}=i}J>ry8`o zyW%@KjhrQ3yp>H8@@FYTUdV}_ynNb zER#eFC;H=!W7tB1gKo(> zYa+F^|6FW=@^2?UTIHa%T^5wx0YH?S2 z5ikq$S}gI{a_cb5R~Y7IZsx}qRMBE5Gm~v3sTy;i>*P`=O^C3bl^^XV5@lphYnf3@ z(376}nibt6|AgWTq=Q+6NGVi}$K^-t{T;E)r{%8!rhiX`U>V$q{jEdCnKIeX6y7zl z6-FtanBakqG)*iWQe)I|e{@YhPQ|GNQ!f)y`iKpfnXE{WB}RNc!M}@VtTE#xb0ZBa z>!)YTT?EI%lr42t``=hQY3@kf#}Yriz4B%33R#Th8Rk~|Te71$jiEfCs6W#L)c@7~ zFvvsQBg#n9AA<>Qlpe@mB9PLDRch>UHO<2U=$``Di9*pYE$D=SuCxtP zQrfbl&;dSx$et++=QeMZCdG`Pq9?Dg|vu8fF~=A%}Vz+BjQ@k-(%tjF&7>CpXs9uGxnEyEaVbR3jS9F#aiXVk2{SiF12SS-kihz zyHe!@ zi$L@@2zAQa-ab~g{nafkrdE9hb#3jOJIB_&(RC9;M!{iG8o;6Yr`wv4Otbq7k)J*0 z6PKQMzf1FJvRMgqwJvfo8Xovl+3MapSe7)f#<)uirLk8;B?xJ+1gi_lWZe_F9$A`2 zWKH3k`l}ZWfK^DO;ywle=iZo&ph_&Naz+(dBgWPEAB63A)MP}}mOZL3y4k~;9y>XdKquFc$8ADDWS z8i5|Z-oMlkb)|w;G_n8%Iqlnq?<5b-1FTstS|rf})`suiTeUSxyZS3;#v6*Lc%%xB z#g4p76;#2Z@5s)X<4Xq8XBLcuL8^{m97=Z6mg#^;h+{uOu_0ftwxee5*-+lkT)_Eq zC(Nd%XB!_QVbiT2gAp8rfIYSmh!)}H1H?!A@!$j~OBT-(F3T0wCZDxFG&H+h%U6D6 zmqEL$6TE>6tp~8_cnL55zL~XQ7#4d7dQyfZaRhC35Yl~p{^L2|y?QuGPJx}Tbh=Il zCp*|4T?|P7boCTrw}o5phO>Gk`g94~YSr&7ZfV%c4yVc^|JAJK`u6JB{=W-GW~ec^ z&l%C1Xf{G0E$%h4QiRqcwgwARqaJ4D#Fidpl)~4;6}%eTX}Suk^jx}!u_;yS%hhMR zxq#T^8yg{~J|1rZ3FN(@2)-?2Y63$Ywm>yN_akJx#*jeRb<+BMjs|PQ4{WFyd`1)F zCH|tNRL!eHq!gYT-|La%01_M>?n1NAsu}N#xFw5+f+huoc908p-G>9U(_Af88b$%H zD{9TQ`cDf@dJq(V?7khLPL1_bx3xnzW`S8=CI;!WjfzQl>Fj$Q6(y+~w?>&?Uxrwv z;!bLn_Y7O7aDrRJJjjoCgR@;`oWRnm2^|DszikUUZh|*ig6DZQ{ht80oee#(8sABY@G*(L4WrufCjVHMb8FTup2gwjbo=+{3==Fh zlBbtB9kUOMuyJ>)wcIK17P9{Md6P~P9W!Cr@m6GeJ*3|I9InVh4iG=rW7aXRPXG0_ zZ_7nrjWVpha-gZat7KbLgAwlLfd1B+S4qrsuKqr<^XXymti85HmPNuKqy36Gx!Sk* z*sU$t>i08sDz9r<=2*2T{zKWCpFM=2r}PmJxFq0cSuBe!u|fz^mU8CF;h%q0*{6tC zSzPy}@KjY$)Ek-b=!Z6xJRM1mB0P(00j~jLIwCvZ{|q`z?Ohwq2~C zy1pwd-(&E_C-??)JiwH)mG|5_`Qrge=0bkDxy&mL12$R1MTIrQr7d-IlJ<+LnseRz zQZCvtD-+ys%R2Wc^J-Gr?Ffq^qulShqZ2wdP&N0~`nI5jsc{7DQ!I`iGtMq`CpV>K zk>w;}Jxt;TD+6)WZc6ixW2ES|o82{BTRs7?9@YBx^EuUJGY7{#Vw@FvHVyg4Y%&zm zVXzmm*hoVO_a}E{b+`k>#Rxz&*9wMxy= zlG`k<;XZ4A{`Zk#LG;Q0c$25s5agYQei_G)Yg_G3v$X6?}_$jk5>ce&t0t! z$jacVqcglu7H5t>Hy$UuctNO`%6b-rKi*X6(ot$8a5h6OMcmV)TeL7?(~^V&yz;PU z1kHlK0~l}4Lm2aR7F2i_VVA9iI>g|7tdou1`Ulgq>vx7o-g)PKBUBzW2NX1t8Fg7N6| zuC|ErWQm94fjp8(zArpDau+JuDf06CsFhn zGR)do9RHuJh?uW-?UA7z=@LadlGLQHqHIVs6EFI-1Q zBG)hqZ8e04o!!qX@?Cyj1QL#7Pl!GadNL<+-VLD}41DL<{<4yL{FtsKcp_HUJ4kVj2r-6+a_gX&0b>x1$=<6f@HULs@e~0i@e@Zm#X|Z)Dq6Q9-*r6);W-f2{%cUFvD~J$boCeszj^{kP zS*Yk-H=mHp);@b^y|n#$32g}ydsSstCuaRK5c2@Pq8GN!5yOTW?=e0%PTdT>W2-n8 zGCF~@f%ydjx6XRm4rnc6v~OKfC!!ai6lV8`8l$K%p=9|lFTobv7DW#LILWU*%=dq` zMmA*B+!8CS==sWN55N)wqL>wsNY-ktSHXeNn3k+j?RyJGn&0sgsqsTN=BJ-;zwb~<^ zn(zPfV;xXG9Q=-Wu{tvi`J7|%<|AbL^AGUzcK84pH0p#bqg``s3@wuy)%*33T)sF; z;oJNSpVbMe#n?;Z886R8_6w0Yujcv$VO4^L^`|+Tpoi2$s|y}iW=n}T1l)`gBW-qE z-y>;XfFK8#d-D*IeTMmAfwgB{S7-cQhCgP^Zq4Ml@|Mwsfb3HKQ{w804;!;p;l2%m zeKTAxT77R;11f&$G7aqp?O|%td;CbLBl)%S;?-nmn|_E8?T4mn zRjKa6pCv&%x)5*zt>h1{MHtQfhUUsQE8AnQtQ|J*0lQp529-=s+D1t;64rV(g5!~q z6)Ef}e%3xFmHw|sCRx;Lzh9RwU2_q+V(aGw`rm*9nDNh|)HDuu&ueiE28gtOPs(Wp z6FHuf4*V6J{pF2;u^FMZ{XFb~b@ug*kpHKW=fTLTb|K~h6F!P-*m*Ih1VU%v8<^33 z82`_sl<43^NeCiyHT)zAAtSlCD`@Qm_dsH=Bg16Olfw zRZmdzdq*oodDNFSg>l7PSPNIcgCUt%)&;YV72cQ=D#EgM#fWau_&~wOjI)FvX-GUk zwGC=zU#q<6%~jUvu(eKHCd<0q!>x3g+x2$B{p!l4cyurZ0~Yn^|3C*4!oN9g4Q0L2 zUtOTH(X2sz#HYHG_aUD@s_E$iZ3{-oEeklIqoi1b4C3R6tr*9c@^Y6JB^j@@L1-f! z_a@H@p(11Wnd!gmE#^ zJs9&T2`#jpII~O(Vr@?iIcB1x8$Ay%4ljO~AyJY~@fqQEyjJh%e-*$56t@d70dnnx zbnFkBJM^Z~y!R*l)S#{7e8!~qDb_sd-4}bI5~~2)YCu0deCo?aZj|;p&>?($$1|>U z|Hs!`LjCjP$C%2ufx7s_ik=~Zul@~G*s!AR`YX(b`N7+f6PW?hv-jg=S41B} z7bqiAs4pJly(|!%Ca=!^>4Dq*^Z1K6ImPeN>pUWl-!r@@UrXdkkN?%dmBDu`i`J+( zA4&+3kK5JTw@%pgBefZ8P}<`G4=dTNTigmB7^Dgg0e_dX?w1kIf=ZWT6R7Hx7GB|J z9S-lXC(s-hSP`hl&JFzH>zx(T)~Tbh!p?ezslZ}{H(%~8RE-AI*F2P@p=6#Yp)o`#S~b(*zA5?4 zFT;C6BmleLMx?K zTPpqjFv}50oLp^xXVs=|!KCqM4kuw~HOZF!@s?=c4D`Jyua-i^hFs=?q6uyoWpfu4 zVuGo0C#yR{!!o=@$ogQS^PE93f#=iQqA3lLZsSr{X$yovb{xC^<#m*h!G{v zaLPJZ^Y&Y2Q6k!OA4g-A$t94_ZoxnE zDg)u~2_;vRU2WqN%4544*R#eonUFsE7=5zMk5zN5jHrA@b;%*k^f*m@K^99E}?8wL|B*|HJEV>xlt^t=}(<(<>x>2k00({<1t#*0qVP@|&sol(%RbghivJ@4d_ z>a!F_v4fRMkN1s8QjG9l(8%i0&t2SH8J3L=?WSCDPVTRWt)z-G#g3|>jeS*8Tu>v% zy-$3IjZb7JZU)x1#?2VH=G^)nypsJkqcR1tIRjNSJfAlWjj9=~8E-f3I(73UQVpi; z2MKIAM}?-(=yD+awiwTQca9U52FDwInw`Nt zanBPi`^aw83z&>?uMvC;*AJqyA|HXI?PxwJvtlp`8&_RTOn6cixbk$JbEYI&U!Ek6 zJ+PjsBUe4Mtcb)+H+16GLs(x+`$36Ne@JNY=W;5@qEFr*TTlVOlvAsJmzF{1WEa;-I&sI47CXUUnNr()nl+zUu|9H}~F3D4o#LR_l-msu7PZG4!?XywqJ@!6|i@fjK7|ckDFV zt%;HJ1hREB^Iq7?4<9>5(tnL`#ePX>tM7#+QFX9f8`!4BS{;WcyRo>+qESQ#rg38# zklhg}r>Xt*Pa0UiI(j<9?+6Y`;&di${QR~jUZJL}^M3yJSALBj)hh<8d)rY+wY=vU*{;4rq)g2_eiB)8 z-_FOv{i20IJ2G9>87{kH36?U&Pp7lG>Ji4FU0eo9x|i?8H9QD8b{R5En8<9Oqw~4v zYC?avw>$-VEy%Mk%1^%iENrst4YsOtB@HFXI^S=s^!FNp9$c2;N=3i5d1MO2k?I$6 zpQWC8fD~c=%tY&NhLl-VM~X+TKbT8r+D|W#W)O5%*7X1khB#!8TY1%GF=P~4AZ5Hl z_?Z`)&zKQ`u`T1SsmS&bx-&_#fxb6}DKP9DrU z9cDPMO>XF#J3eroSXrS}9eA~wb-I)!v?`ag6d~vO)$1&vdUmy?!fl{Kqb32DpAgLf zT<|Oh=!Foz?o)wH=%eRG&TF?M4;VFJ*6=!aW33NI10#BD` z-yfAX1kp+R@fzA%to;0n5j$>P!#0~)u!h$8b=lz{aLb~&-n0GjGQg=GOz%A zG|g)HWPTz8ui?&Zy*r16pP(3n%hn-7LQp++%E{f&s46Td<-c3-K(oHL7rP;|bpBxr zsc7~X^zcHub(W7E_Woyc9{8%6!ai?qG`WX79OigCw*F39Z|7Q;fkZUSzYDAg9}Lrf z%_^DwxzDeY^ohf#1|!qnQj(-Y>F!m9cYpPR5j32tnKfn-3{EBL&ooEryebn$*an69 z><9j6zr_)lS=%zTm?e*p(=4aOQJ;LDvL9_paJi|Pgzv;9Hk4EapLHP@dv)q!HrfK_ zrQ1yeEpL(VemaKJiuVC+mtIPywAFzM=Rvv`UDTuXXYUyS5R2YGZB9F{+YE4bsQATx zTFW#I>vNz+O<1PlSgO|3#TqEx8;i(w%~G;Vx(1iP89r>1~GL_@2)84}`3Twx-I&=p{& zx|p^)nEb3EH!VOpJm8ZPM3tMqGs~;Zp6YCTy+G&H8di0&Un#WeM+G-#1iRb|?@iK* zyX?fV2(4Oz=8#99)7W@ak2kOB?-T=Xu_HxcPh^z>3;s{*7X9r;ku*Nm)@AgYm-vpS z&3TYLQQ}YDti<@wAX4KyT%c_@HIU&<(lR;IoKVA3F?hB|t4-EO<0;m2HQaA>3h;nZk%3j-@wFlj>E-x+>550M@uVJp?h(`_t{v zR?sz}M|~CCDHe8K_QbKE0_uKb_+v)s6caO7ux)EtUr=M+x21BHnA;IMgx@kB zlutw46R*?g5rIwA9&DFX=jI^NYe+K&Rr`7qL^Q3<-D) z*boFBmHI0eS}1fh2>RVP-kVt$``yQ70n#1%Et~UgFJereb*>fc*n&gE*12%LBK>byv_CHfbT<^PnXlgAkO*5p zOAwo3b7_RaH@WRBEIv!g4PSy!Yl!n+jSri;+m5nk)3_U<2s;8<_5n{e9i>^aTlk0> z)PL-9!0TrO$7K1oUkH`^YIH-xCEA+wuK1*>fbH!5!}B@SGaU`Qtr(cnu7?4TG zYyRnM*u`=Ws!D~@=C-qG?`25{SAh-I`$5wx%R0s~OT}n6vxB40UasR9e~KWMsXF2$ zRET-UhWzw*E+CtKfBMH~&y|lQJuZ=k#Bry&bi5zW@%BKfd8mpH1{F<6MKvpY6HSPW zP5`;LxW;78u~|Leo}H04laR5VOB~qY6r^%f(dkD0ee}M9bJBw_(Vls1 z-N?xtqq9ALZw=q8%I~5kD|YpC4Fq;;r#R!eJ0H+t&Q!v7%){k>g{sHvdOfvzsz@N` z4MD3gA)fV$*f)5SURB|4%z%8Ne~rlLSsexg_}u&=_FqO+)(rpc#%F2#4khvZ(CiFi zon&YHstjLm$GyCzrqcPpB9FRTjA1aLE=xC&V;?2rG8IQuWCj(3{v88K`ni8__m=$9 zmd&w?H;&&YERi9&_l z>3PGVthh;kzpSu^^{t0FQyjIayNI*rkWnYH>+)_IV!*fAf-6@{b~AePIeWZf=N6E> zMgCg)_|s#@ek(AuC_qL9pN-;)>mToxiGs>Mez4Mb&|2VD`?yN&X$RqP)lF{=l~b*^=rJ`BvcMM|;LO)~mU6PEKnFN0-?Bp~@ZLmpJPQrLrc?<|$td0~&ap z{!ujk2sG!I?Y45+C;H5z#6N&}>HCZVdpct|iQ>w#rew+DBhEW0Ww@IAWI&a)g-XC`6Whv#>%L@#T{MK5OgnO^#E=0+R}d1zdCW&b06-vv zYlE*)$rp|Di=KbzUv{4`1}t9D?cDz_$d|6`FIIGhi{n-E!DMu^;~`JT{NFamtAlMi z<%=VvvE>jD2Z_FvfHQR(x zcQ=C>G0^!*%^1U_qBk_^h|yR`q0*k^m?3y-zQ{X;V>J?GSO>P>>lOOJ>vUi^0jKpM z{&v~x@DQI^?UlERu-j>#9tB?iTOD8~NL!A$03i9CP4si&!KK&U=oXH$TEo0Tb5(wL zWvRPy5_1hSuiKvG2mUz{Ifm~Xo0>cwAdzKwJL0-Q{nD(RF=jbq@$Oudsg-1N%Nelik+$j>sb8Te~!j`BD412N5k)k+C-D@lJILFnx9WY%|z!n z*|q<4Wrojzmbkhp{FpA=Wn+;fxF$_J7V~%8hQ0?%JE2U%@&gAqbN+^3S@H0y`x9DX z#$J=lXKgg_jzf?UAgWsMXGaW1L04Ih&sMcUQHNun%!oHG{xC%OVf2$M>-|L&(DWzS zyo|e`FX-t#th{}M`w4(d_2I5X=bHvV=5j3;A$}BL+DOap-?6?* zTP^85@`L)MC{^PaiQ8z_A!t~lPDc1&$~?CZ4Tdgx339|fS}h!1q9p~za6E4K1>WN} zpp5pjRBn$Ck^T#~`VSJZ2Y2J6rQNn{Mn$`MF(@;`|K7FpTAeDqx`1Zd`)V^TV!RFw zA=U`j1}tc(b%XMURj$r%xHjTMT2P^tyf(?$o3>Cxo=%BSJq=K$Nye%4`Veu zmgJo%H##mXXSl*!BEYh6WKa3=U2w;{+Ju=~)~Tz-(7ot!QL=U9eyaP5(+Kkvs&*nL z8;}3!o&!IMQ@fwr;JFz5ws6zGDv0CC>9wrjJPvko>X#?w36GeP$BIE@we0HpOa|c; z4*!a|^PxYHvTbwGHotHhHiDiA_;q|N&9$pzR4fDCnLFW`tn$yx>phjQdoK>NS9#myha8ahujZ~ z$tvr`X89=Sae8@pp;9!3J5ykMjv%2Ua&qP2Yd@f|*?G6ChP zg44!WU|o*P4My74(zs`iF*z=G(LWxZ0CnjydCW~M>#08{F~C4BTd?!;j0Ov{=%Wad zC(T%OlGwCT4t`Y5DixD;wMEKtL_nrE5)f$9y`4LAy0#h-1RkKOG|PB0E-MG(GEa@cvokU)NBZFD*&|6TopKvFo ze)Wwgz^rS#+SX=VC#`zQh2Yi1`{21Fy68gtcj}A4!=-T1kMKb8NrjL7Bf1|2UU9!8 z5KyjjSAkLyE2==duPgrt#-%Jyy>4_M8`X9ZUGGTUjB8U6A@{n9HeyHf!}6dLkamR3 zhuMTw_%XNG`gg`(){&1}5n<5)SMLK1{2&6k2PJD&ldZz>fLrq7lSa`+T|xDO+^uHy zxehB*(U!Di5iPzswS)dfzP-VaQun^M9G&E1mjbU(dcWiyGc`dh%K}EgQof5L1IYOy zq$W+_PTXZ*W7Xh!rk3cm4I-AJmI$SYr9x{diS_q{ zna}Ul@yDD0l046S?pf|R-*fIc_mP@h#LT*d^E&Cy=S}q_RFD7EZ|~xv|qt*`4!#=TqT+%t5TtHd;`xzLciZ}*0$sx5If^% z_KMcs2Et^xUn@-V_=79bt-rFhevnnVWjt)*nWTh|yKP6%)JWqBf4 z-5-Mxg-|hOh?4(W(Fw^fc))J+9pKUxCRF1>PcNgt)sGKbrYk`1&ikuiVjFLpn|GRi zbOK9xBwNf$QVyB@4NkQ_xzA6TibMRxW?{`7gk$poi zB9ribUq9WJR{lfm&MIQb%Q}G8Y^gziK5B&|r;jFf3YbII9^%vQQhKmZ*9QO-p2Y+2 zW{r_QE}%CA^9ZFGR8XP7q0=rDlUo(t5BIB6DyS%OvJ*-{oh)edd_8($-)8>U(8ZI7 zvC|HcE8B9BKeDg=*6N+dgSY4SL%WtIix+!^k)rU@Xzo&_QytT=BFl$x0W6Wu?iJ2r7EKL)i$UBzV(45~+;~ND zV8Aq@Y-yBH4kdlf!+|eu;f!8CO0xGX{jYYHa`1t^G(nKuHI2l*O6XzsPCO?;AUxFkziUT&kea8^lfVeo@+o9<$* z^vFfZoCxjB?h+bKMd*#s?pErT3rw8V{Z0XXWVGPzl{y7=zqlS5VIvX@?>Zs!^<9D! zidGCh7i4C9*e@?vgE^j0Dt2*G452+CY6dPKB92(76(qyf=*yMjS#%Aqk*8xZkM4dt z__3@lNXC3e-hBJA7MtrfR%BPKK4$y6H@>ma@Ss$@_ZC=U4`(AYgHSSNF|F0;g%n5B zx=hAc%E9}h%VnkBjiLHQOU)mye6sDo0TDd=weRI|H31V1*N8I2Q+&RT99kpRIb^fe zk{f8UluwU?@7C^^*A=I7#4M8DTTD4#tl8IOS+}0UZp?;idXGA!qXeqIwQayPMU||X zUNox>Woqygh2LHY8xNozIDPe)Qnk1h(fU4xQ14dQ&BLQqwVFx!`2_wfrLyeq6e}{N z=-DA9rcDDO0v*u(Rrds#K0 z6zFhFw28b^ZB;3LmK;4AU%vWYnV&C(7h2sbe7#+M4X7U^h;I?&5%%!^me9 z<3+a!7O9{sGfElCSmk&rl%A7M$*pmvjm#~_I;O|w)Ulhn<5OxYPdEY9e-|4A)mcH6 zN@gvAb-_jT^S)~Yexn8lSHOf4GOff+T%(EizhTJIj(~eYnuF$4MN8_wn5v1 zXG)yMwsTx#b~wBaC}S%KeQWs0gl_VD2A2w#Jm>nu55nH2h8JvFie6Fz=HK;rC@xMF zHDd6MHlb0fi~hHLLlv{(Ch-pr$(qE!GSS$brZyhIoO=I}McfFkzSB)bxA*}slqp`y zv-dzR9&6^;5jXfosbdty-I}MZ4+TBmjuKOjHc$R#D_3UslXpJ9Y_tPhdmt{KWg>&R;x;qg-ec%7GJyi(I~2RNIF0q;!tyMYnW;%=D9pL-PGwcec8< zP8Mv$Z?Vi>EVh@tSoC?Q|3X{NatF0{>&zE7e$xi7gj@wEUAS?=|4Z5TtA#_; zQErD)*{GLwv ztM<+pA;S^){x6Yl4=#m|4ry`~iG)2n^`F|3z+uqG+h5Xwn@yf@bdHA@q5hrDZR^B~ zpSTns6tfZ!olFA71?4rL3sVUC@2a>LdO+fx8JDQCI~VYG*_=3ti!J=Yw^Mp0uv3Qm zt!qr$Vjz55yuDA(LmNN{RUUJ9Z;qg@9t6fI-EY+Ny{p@v#Z4ZbQgu*Tw4ybCbVNd` z!FJtF9CsKJt(Ua#+!KHbK4sv3$cu=NdVYJzJtOXXw#2@enx__UrS#MaL>%SKRXNqUId@BroOoGH92$t*lb{)Kh{`G;bBW>GP zYaR>3uQS7?%H$=_IkOxUyx4=1;yZsWM*&i zMoUH_b2^}COZOoqedL}0ADhpA?ywhXVOoh%XQFm>^5uo&hXVwoDhIcdp*I?M5M)!v zM|9TgciHHFq$1ACmChqjAF|?CG~1q~_h^_#o9>^ggm3a~wuKCdX@P@6r|q(V7GX&{ zthgmHZd)eWoX9S)a&4e5w11<(8Ckd11wLo{(_eHt{n4)i*^0c9NE?%FdA*DO)lp%- zX)h}h^V(f1A+-CKTMtQ5GI@$oZi*zKU*~S?VY6fOLiJ%Tm2uB2fyH#=I2Zj^%l+kR zDsjcXTnyuODls?2k}KLdcIkyMH}Ykba24Xa0{tE0MtF|i_&LEi%&_ip8w&kfnFW8u z(#%7b55M@4q_wO$2I+O=`H@eH4u_OXw5xA9==R#4J|2TijAZu%CMqD9l$LpTtVM!GnywE|~<7t2@9N%YgSRKUE@lGnf z|JPfF>I(6Uu6L(U#JS~=p`nm@*7HR>^TN9QBb=HDyO!04t%!!fbE{t;r64Lhx7f9z zAM4kP%Hf@6>%t9R-Bwn%R*UQiZN|**t*;H6Gs@R+E6{cW#+Y_|tbWO2Ka;V>I;6jH z^-e`-L>HrSrN*bt`)A|J7q}JC=pdhM=B^pMRx(?T$f*BEk9I8#aa}IO-%nr4mU$|Y zhc0B_+w6Z2Cg@29IcR;1^SCzz0-HgX-ksY4tUq#Z_E7aB18MbhiGg~TWu85{A!xAh z%7P$xap2vykipz_d2Y@xXp}u%QOxU4(yzyF*b62-PKvqsy!8l-CQEiy=o%XHc8eDWu0?~uE{L7 z;dx9vQooZaJAJf2D!S-u#0=n={x)5^C`J6tRdYRppmw2Ix_ac^J0T1dLeJ8>b3o!= zFU1%gMcI7g@l*Mz*=3E;1;YX)W4}@F0F3Md|6FbJ)`}wA&1&($K_(SK*d8Y!DqpOq z`5wvxHp-L~E1wL8^`@wmE*rbA8;>YTD|T1|)%F_y1~=b2iCmWFja3WGCV*Nt;}=?7 z%^RX%nt6ewE(5RGKkc*cA&ORM&hv%Lr#@phln{oMWy}KVSS0p{jm7fPBn#v~F`R5{ z4wzRAdLMxwsQO%1ikz|g>t*nQ{6X@=KJBmD#waZO)QBL^S@3>yZ++aiPj25?FEduF zdDBy=00sV7-?x@xn!dT*&Y}U#4+74pVnpTjc2muTwqTw13#@126QjQ=Au$dH-l^>>ili8_4q3q5M>u2kc(aWz5~YltaeWrM~|ApgP_{yCZwfr##W zhrgnK_PL9ayPh#`bSyJxyr^upM?+F&? z;!01G(clkHZxw)_O0<=}B7ZTbCt>A8O zH$$Xk!Fs;Xt(NRC5gwOXfg|+IdpD&MQeuPhlB_xr(xiei{E~fKOzMjK4B2D2)toOY zsd^s1KgALvFlhLR9(&4^7|{{C;R(0*|HxS{H!pGUx5Dqs+m!CoUCzHeJf}g$rj`o) z(W_@`<&Gp$j6t4+L?}d4G3mV50+od8d+3%b*u+X+OggN~IOCxdLUwk?`kp66*|8v3LxAF+7YzPBwYFj6hr}N7ZR-AW8=!`Tp9I3e3-2q-_kcViVFrc|Oy592X=}~6 zt2Xo8S#shjKWkp|J*5}))w{nqQeMacHJ$6r``Kd%gX*Og6##Kd$2_aMaG@0{S|n2b zwJ4UpOz;0mCnch>T}?wuS-%)rdA--eNiA{KeRx2<9b-kLx<+D21zjGxf|47+_J33d zaW?HUl~_;)T{#o)JomOcSWjr-n2O~FUh{wp2^)p;{Zz!1?SPiVyfiC)ragN%@D?F! z^oiNQ0;#@25_IK#YKJ;IvPII&U~b#+T-D-$pnq4X?C9zXckVQi4^O?aWekm-lj9U0 zpH_>886_c1yb0ES8Qu>PJqV3A$7{+u$bl`D*6r;E#Z4fX)RkCtZ*@&+1l}hq3>u%z zus>K{0W^VCcscY&M|3^WlBKFW?I5IfXyS(3c&(*7tPap@{d0GCH=d)Cy^d=v+E%QY# zV{oe4bpi^p7>gvTuP*q2bkoOG%AuIlrsAZvEpN$GGDcWagWN{44si2zbkktJ@eo_B zx{})TTxe~pq+i9>kkTVOWT9GLyl%*8-gFIv=~5i@DU*mq*W`@^MsJ){7Sd4G*%+>* zNFNjiD`x0KohTdX8}vuL>`0pYJz_oQ9#s>Ewa5q`^*p?TfNG9@jY_&LHO&b>{w)LE zWBcTI&fcS(zW3%5e@t^lF4#fPYAKtW0zU+3sz}s1*^=IpE!!9!zSw7Vm^DANPBxNW zS9t^(2f`!HFz|4ef&ZzztBSVo$)qIt%YcC%RBANtUiG_wO)1EJ*e z__6nR@0R)kYhv>?y>F9D*(OU19U_}jm_jW-VSej}lpp zkA*?(6(nkeou-Q)y2Eof_?=KE8{?f39||*@+~101NT&Ll3uOWFQIQ!kyM?lQ9-}j60)O zbtSTASuiwCJq#pi;r<60j|f*@Iiv5<&?>*yF&~vNcEj8!Q02{pRh&`}=5W?{p1s`g zkmvGs_u8oIJ_dN=@P^;wWn||St58cVS$vX8bYhdHSn=eoUEVKyj2x5(N)MCwVw&z8_5EwuOvGNo72P z)?_+M_-i#V>5aznG{;sZIK9}&hajdq4o$DL^G*j9 z`uAh|f%eo0yThzZ?A`GyP_1$wRA7uHy~V`dPNG`sD%2?3IYSA)O@^wtV#H*Sbui%( z(e;$|TP!idqIX6QgGveajKia3g|jN{5XGeOq)+BBwwdbj`_m&m38t!e9bHB^Y`H7r zf1#I~2WIiu9mj$#J}nq;P7_J7d{(^ydWD#n!tfD#oq{iI)IcU998oN$75=eP!0K1; zg%lWy#vmcDUC+QW3tfmLf-gg77<58?7JIi?uH7gwGc$hNK6T?$Wt*RNvrxkzkP?-b z_#<`YN7{$G3&~b<(RMJld7KqhDGt$t2SA65XC`xh{Bl5L?1(+~UcqA+e`;RWtT7y%KI>pl?;r-&Aq^`W9^s?03?0kVxB2=1DMw_mU8*|t zMWy5OFNK_NZAY6yWxM~hu6kKyMa5@;^xdpUZ#34%)5;+94zdqJr%@b?e^tT2&SIk7 z%8qPYkJT5WDnVvMST?F{JEh&r3C?E>yDRnt z{^I;xu!IRy6N@zWS8fMQ)tWM*E2EB(2>e;mSYLQ$l+|+bhIkuMG{1n=6`a9pD+yds zqs(JWQfE`_4tMrCvrqX4W-nfjDebr=U>+P&5JmrW^cP}Zn)R}d5X3^H*{Mm6ZYGZ_ z9>2666eH_DJ3wMytNG*lOc80_2vDzN^9H*bvpTru8>IIjyap6P-Ja!)A+RlQJ8$~x zp4J?wK$BDKNkg`xD7jQQjmja6?@Y#;b)B1aa;vDhru7)bScC3eL<68 zs%P|!W2TNyMN0Alg^24QekDW{y(HS+-*Tpb?yOd6W<%gs&cLr&C!63dpUf-Svz=vK zPYOmK26Spu*Bu;URigYSu;lb^i&!c8EAg}LGfEAqEfTM8roe2VzRm%jy}+@xu=%oC z0x78FOj9y~7mM)O0j?NM>JZ}@qP%D7uqoz&oSNx=^m9fejNJ^i3MLXXvk?wD8ysd( z2+k8gn*BvCsIQC0=&%`ttJcPLHu|i+%T6+VUh$HT;Oj-imLcIKq~dWE+fvPT0JJGB zW6FaFcA=_%sbk3*qgLb=G|i)7eNg0HCgMz!$J#4Lfi`iekuFt{}aEyf|>@OCU zb>LFi6bp;D91F{g9Se)fOBNQv2WjwYYQP`S!_S<-ddlv z<;Qr7G4ru9i-`o_W7~tv7M?6D+jld6SXo}B?E_}BxMytcWv+ik#oo@HoyL%)9c=y zpDVd~e%}_bK@jsVAbD9i(B|C0qiW2nDuy2S9DtRX^J~bfeY^8t*S@c#24XJ$3o-xf z^xIWns~X&DpiQ=EaPPMn>unqrGGVCW&acQ#w@YOUH4CIlc{EZO?hcr)ZINjxBuLcJxBL(9pO8`zw5IY z|IxGzzNV@R@nYx6b_YIkmUl05`s+BiphH5S! z{=ZKr#sWT~6*hu-0ZX{T{(nsg%$NCpkoW)3_rFN+|110(8Gn(oZ8!LLh}sdk()?GZ zVL|Y@S4D6qLtDjUS>0c;+!w>rSW2Ag-1<9=8Zx$}`OgNCwHQl7FsiL3o1O2VM=*K7 zt*$x{XI;~Q+j;RfXn<(Wkb-rI)YuGL2d)>&o810| zKJ5g6$43HgrU?9ILPpB?-mb2+a$To&*VGmwAPLxP-g)Ym>~yx4Vgr_y*k0-J5ipG5DlQ#^?N0Rj)@HS z4(49V=XSv$-+?Qg6)d$MO{(KDGlrQnMSnnmX&AoPi_mLIaFp}8LM5j zB*at@n{#-68bdpGcCHYa81bY(sJ1x^I6njVw&`h%th14@=UQjKX|0PjYnkDEr#840 zgY!lSs1n_umAuoQqgu@>pgbtVu|1V(3O_~Du*Bs|Gb+SKKXmvFioh1jWWZF^;v;+2 zN>}9ctAY>}%Re+QzqWL*>@iKR6n@gKXS)>&6)LWtl>Ibo+roXp+hh z?NPFCfUCKI?_faiEb@BU6-3j7&H{V2Cw}6c_cZ=%re!+7voa$Sml@9#WB}W zuxr=)9rtpFvX1bC(oZvP9iGy2Jb0pImqrj&mAukB>0URjSmXAY@zK^9=a4(bfo(9G zd!s~TqkjCzTT(o^WbHdtE54SCvvyMrEmzy^9H4_o8fr|qYRQJWBHan@(GhGMKbZ;6 z<$FoDy|1^QE&CUAZVt2fJ3oU%*YYiWFMs-)oo-bsd+trz9g%21-^#HQWOobJGXDtM zVUuvP0m-D;n>!eq>vou};~J+FBx2M~d3$WG^PmiXiHNk@rQhwsSvy`fjE$!WISErA zr1xCMx>~PT|Maj|0L0o9bV6H9m)xFh=~AUssvc~zpOI@Bqieh%%~|g?elt+*b@NM- zbD2+?RdRYJ{bB5#&sSYD1Cx9cVH>X%AaPzYMnkJ2&#wOzqxN&A`ukl=`8}8zDdWG| zNzfl&?3xR-S+0=L-X36Jm}t+5^;(X`RJ~5|p6=Ma5yz@tSrlyWlFfLs>vN<%S`qDD?e(Hyot!sl4q z1`J`~;v6Vpx!u&Xx#O&5f{9rpr)xKw=S&=s>>m_F+I~n=Sc#3jo)^3Kz3nD7KO98E zp<@qliV~4yEPEeN-p6Xx;!|$SBPZi=SR~oWS5Z$y{fwK6FCFG@qGgB{LCg#0ZLtby zPRv{XzF8qrf~8as+@d1Q~q5 zOA?$_?$siL9zJZ!q6>bL&SuCR7rg$rM9s>ufmrGiX^^r#;#CXvJwLlsH%|OG^w}4U znxc~VQ+aH5ySNAuH_6F%m`Od0bEQg>etBnKM%=u$!1Sj__dGqc01X? z@%^^*)ND?8>_$^cfh#xv^qUXfR~Fk#Q=xMsT?XZVoxi}|==IkaZD%IVLw%3=%I8>> zu4{ax{2! z@rrYpd7b)d9s40y)^)7yAk0@}jhIzp-<+|wpNk_GLrC5o0 zpu4gWasu)7@+Mt&>0D*YPU(=X$W1A3dLpIgVc{kL{*Etp{o}-;W_538mj>WLe~&z4 zlXK}>en-y{#*p*o->S2j&IB^v8(!)xsvcg&K_KaO>@_Ad04o8fLRH;5UTayJmKlPG zOV<|&fUz5>tDp5kX&<=C`_}6M=jVqMmUhT+y!P5NYR+;+H9mxn3zc_p+Lb6TT8Ts0)~7iJ z4wYiP%Pt}+R|fRrVtohKH&U$UgPPV(w%zeL^B2zm z&#Ix`bHeGN#dIT#9@?2YZM!y=H2GTOo#fX|XQ~~>G<^?trLb)(+7B3w9Ovug1&M(A z@-i8ps5d@e4W(MvOZtTIgu8Ha2hiGMkt!MD(C7iEzHj1~Kues@#e0!o@{WEoIVVS4 zZEL(BqFN9pzHM`R#c{w&CS8uszgZa;_N{E(Z>EmFSDLH{|s_HR6-~^fR zx;+Y_xiQ9&TZ}6+roG7@KR+X?#oV$!r^&IW`bfJDtDJgiH2bEQOfQZAS)r-onXx}a zjV$1c{AYk@(Oyh!BQNzDMvyXG^+kA_HyP({)X zCE~bI=Zl{U@V;tjjX3yx@psMV3u~z0vI<-uhh%Pn3fT@VgAHA;g%WFAgH8Jda(Vdf za%OY^vGJAh0oGL>P`!Hr=E&BsKVzm->}OWXSkLd>WY{k_c($t(QhG8rBlCXOGQ?{| zw1EFA0Jzb#p~a$58=p~Yuf$(q#0A(%&hV6Tf1vQpEW5@^{!W*acg%Q4pg(W>5WUw; zN?SjJcZW*Ib=8KS8ag5dL?J4d2cB_k@^*8=hQE(K2=$NY3(gGiikf!|_(Zh4ohl22 z$tE}^uD3iQ-`~?4d$iqa z<)#LGL7g0Q|NRrW;2)3iGEo$f^T~YEl>7{mgjLua4+g@iSN(^~Qf;UuYJ?((ID&8e zC|=71G2yq035(byNbeXB0Jnz8@BG8=MvrCLQwMS!Z{XAQT~tZ3Yqu}e`_9wser9i+ z=Yf0%Y*Ti=1Hb*QRsP|gIbxb^NNraHe=O8cPAREJssvNmIm8R9zd zL|SiB!Uu#@i|2kQ@lWmma5~9#{K2wH22nAhqvO^hVi|whXGlGNL%$RjJQ;I$6K~*P zzLCE7!426bezOV8=^9qqk$=z9J>c_3v9QZOd?E{b(j|Z?U7fe~{RzQmM@$?hle=j< zHAfHs;?oZRsL<1jy17aHkH~}RPr=E!^>Dwr1kJT+faWs{gJ@dvZIIwjA4NmTx|J0GoYcvTG{p%FpF zqed5@sg6OoAfW2U%+lxXn0sxO9U27KgahAlY-9T)1z;TiH%pRgv)pB%3fmaaAJH45 zty$VsV{d-`nZa5D@$x`8JL{kKZNpW&-XZAgi`>Yo^CAK%R!QI-BA<2ohaGQtA%1p& z@X_YRu|Jem03FU~h-Hin`MBX|kIaN?E1z+F$4tYEqrh{QZA_2-!7Q?{n`(_r2pRe^ zO%CFK6$1Gs_Q!%O>;gb-5p($Xtu23slGuWJDXgyhWW$8c>ERr^{Rrg5&4;`{cDKAs zY)w&AqH5E2{EPP!N3$9*%#eJtD}-26L2#12di;k`kvk2jif8uuU4LHIhE1hyp(A;? zss!#6T=&r07OU|3$E{%8ZUD!DcUFFd)X#-qMl@LZ_R+k_1m(F0ywBeJz<-$@0A!zS zJG}oaUJBQsJAs5_iualWalLC!?q}9zehMHq>ey5DKR*f%i&6Y?vSDCll|f3i-1T#T zGCp8Qf$&{NHsSg&yJye7XDz_ebK`HHpt|B&?jt{V!vg-lH1&9&H&DAdFyotyv-_E> zl}R&I^`Zdc&&zEA*Q_Gy_i!O7&Hq-sQ(${D-5XWcW;Y4=w<*dJUkD^?bOvLKY_2R6 zacGHrml^mWGvBzHw(k#V^1n99XD-efqi!W0-2CXLoc=65^Wy-kw>LeJ|0An^p$lxn zTwHhk8P6{l|2QnW%9O&>-S&TG%P&`p04;J#T-h)5M^OHUWD{2`G?h8OyC$jMAfGrEFjtAKaA|K7ya&Q|`MCrqIjh13 zR^uynN}Zq#I3Qe63f~g)7;dmVzWFr8_MNtaY5VWC;=zY)e&;N@z5MeTQT5h=HqxJ_ zYR}BbtJxHFLnlMJ(oJgAV>R{ANK3SmI>fWpvtp3elhkq-WdXvrI*?yTV{6=8eX%by z9NIi9qNP1y)rk0DvQfrlVoTX4)0SZ0E^q2fk(&ObC27xo7&g%{RHo+2dT(oRO?MRU zv>ZoRCJx6JOE9?VT zOI7zWee5;$WURYqDm9Loo}IbEtQ6%HJuLkNM&E{IfkS1qjKM~%>_EGgm*RTU#?Qah| z2YaRxC#{Hc$@r3XpW-pNM{AnT(zO89erR29ulaCs(Q5ik*R?pCRV@V!&eZ}s9iog5 zfR1XoR(^t7lX{c90J18G78CG+G!IKCVHJvgiS&{&^KY*i9GV$dB7ahmQtcNdxm&nE zN9jzS1y(Ng4!z9qZXS5fG$ZDYfYS9+W=4d5_=-&iW$`VrP43%Kql}KqvagUALd7}Q zCMh<*CeHOv;$}bt!zO(w&knrF+E@9V13RPh$LC+tUY^#x8#+mHdJz!m9)lqH*Q`g4 ztD<_PGo{Tkx_wB4dFTL$qtLe_gyJKB1tIvEEd0#zez<_n-TN%9&;;_-o!fx;A(iEM`s#Y>h+#YB(@ zD!c5xrPImE)ShaQBiIL-rbz^tGtC0#on8}AGUz?o>qdxk>|Y#fiO8LtSHc^(dRoIs z(kPMz-U1#+4S+ge5>-DrX%eDIIGw!J-qLan`!RDj(+ujH*{a=yL3Hq-CRwCAJ|t-} zL{?7%dyo1CnK|y>?%iu%lG&TuqlubabW5T^OGya`X^})v4;&0T03WwkAjC5fX)eKJ z{Pg{i?Y}c#Vx*~S$5E8AE1_2;-l@^!APms_N)b@_fGecNVR{@{G2FZS($zBqYn9Q{ z+lF2Dq2(6c4z=bTO>^sSHcz&on`4N|W~s_WgE6CU(g2+L>=SD2vZFNKC)pFSghwSi zuHUZ5IXXX+x9Ih?fL&MJiNXo!PJg0_d-&(8d1p!mNTZz!%L~lSZc)>y#BnXGtZGt0 zTDOlqUWrt`v(niC0ZSvfoWxcT2PZT-TuMtUA%iX*o~fwHfgApo!;pHf#Gn$Z|AC(7 z8H;1;5}eO*5G&BhGOKlqIr=ygb-jr|FzQ{Wu=ZPUB&63671OpHo{zGCG+pL6nBajJ zwZKFrC8?s9tb7nl7E8sI$xG4*`z85)HT%NMxc;|>&}5XW?Hy$1V)J@TYtksAcI>iY zYx^XzMlcgw?rdLUA2V$^F$hh!%1N5 zkanjPhr|RW;p8bVoip5f3=8n_MEsJP61k}IpV@DSk}US=%@idp1Esn1u+U1Iq-#tM zT4n%Pos#aJ2EqS&$v!_e;^7iPo2NyVbfuL7$j5TR%EcfK{}5Gtgkr8vz{YqGI_{(4 zm2LfxP+cxA@RCG5GnY6*qVJ&s83}?#Z?BBoGI8ZjwFShgHN+9-a1iZMSVJq0UoOtX zOU-{dmhKbOI#89KZi?|HySq}%T}AK)ODOMTPuaS`xezFR-8{qlhLDTpguQityGIYM zcYP|rH5qyRV395q>6mUcR0ym45S)0zfgw{FRGdM2f0l6vXF;^^HC4twuz;87*Hn8< z7wUV|tJaY^FP$-vRYiNW*4|A)b+vj=kJDi?1nAOWo=2+lX))UsrXrr&^Qzy=2%Sk6g^^z=ZDJVFWh0H^mZ%5J5J zSZ71nZQTmYfU&iXNp8FxJi6Vw;?P@a@RQ{h&n_NP;SWdSX_f=+V;$3ciAAD zX4r!N{>h5&N&ITE6{c>gxf`ds)?7Me6Fix$-S8PKgO^q1n}#nhUVFhTc$tfUasg56#xwtBaIs}l8s8`ipcF^wmKeh)C6`B*^*JGgz6M{;Xr zY~fx7-Gs&EWP~<4`3N7XqtuNtQQ9XKaOIy`m%zCZ?_nQH-_fqbIZs11jQaYQhjInO z;tVAUxy7DRgZk&dh;Tm4ekTU6JHq)J7NuE1QU}5W1QU+}w$L`IXl^MUEkekEU(HOk zR~4m}+D^@_wGj|Pkotkc-w&UF95gnFK0UWlDTq*YMT8U*A7avGZHz;-_KYu8HV7|x zKzLzl4q?oi19&fxeN`V`*s=Y$uvZQ&yx~!%r@#qKQ|-&Owy{`UX@aOy2e+jj`JSpu zKWosl@he2k`IERVa5N;OsZQa8ZbsbV0z%&gjcpHRM>SZxegyhI=Px@B94JakqCuQ> zM^RvqsIT7~8-pBj4a1`P&A-d(k9ZndP03EH;o??QzifpYC#NxUO`x#8P#=T&lUs6@ z2G>4#;}Np$=?qYh#%Coj#nL{F;oNY`({qaw88~yFEB{mxle9)sJobELP+KyI8XJzW zJ6L5I8vYl0biUd6=!RBrPp;5kLhtXxRNXA=vny=b5jKU0*$qt``7W6eJoCf=O%A}O z*1MvXZzv^-FMmr+z_eYR^KEXSBW~|M?zUy%aZU_s@($KAA2No1>Gf$Q&zpAJO=7;L z-1?D>{$vf1(Q?+-KA#HU4FWcS8X)De^hIQb2J9oeGTMkH{HXZMMm31kSZl5@+TR=U zs>X@0xFvWnGfl8JGx-oH<<|Ib7S_*Myv#MiG0Lqxn4Yne8x(mW0Mb;8N&=yzcP zbMk=R223$1EtyX|=p_dZe5?AHYcb8_!t0Trne2C+a6ZP$2|E=Cl9Tc+LlGe-B)+f7 z8y*(wnJ_v0CSqTzJd=O3Nr3Cb@oS0xmJyYSvE5Mn;vUI+zu(!$uBu(-{_?wQQnA)| zbu~W*R+_>e38Yy<+sn(DIhx&Rz-bX53BWf4LBKG4qX*2vzi{hs!JU=UwGC5ZWL_|6h2MtopW9UDmlHllLBOLG3YFK4zbTUvdQ*zcz}cAwb?hO?l^sl_tR|4aUz=l|Yf*-=ShrgtkI z%0I<_3T#oYoTO^I^0tpzWhfH_Aia7%TH^IWAz*ou;g8!cM&W z4*1z8VWIVD^Q~Oz-$d{dkQ~RBCgI;%Rl_dKlDfCk13{cgvD2+wDbfA`=sn=_5mUNI z8~$SS6A>my0>vMAl!{C<_x1zH--ivixW+l&LuUhQyo{AGf@2(}%vGHYM`-oSmbey; z^~|lgp)}9>GYOKy!EAK(SBgd<$#-6y1C}Zd2S_uy>Uib1NzWe6;&@LK*xS>Ktfihv z`I`$Y!=9l=y+B^9r7hmnF*;E{y|txV!U|EWe7)hLoo&5+O0mnRl- zSzhjL1jYBI4c1?LRo=$yS5Q12VC?seujiQo16StXzk6u^L-O4V=U+SAxO)5A=Tjdi z#TQ0*4t3L_GS?)GX6jdmhUbxZ2;-)%&(%cHkY!)r(B66BfYuV*ep;j^9mJ`)a=n>C z8}@f2qvkhKV9rq#-_YTmUT)GLe&9@kmBEW|NS=7-_T^^X>op#5wqBTHhzQOJ)&5__ z9O2_zIDd;!i0|mp5a4s7UW+_?Y+=qe)C_WoVm7g(*LnJ}oh|#G6(g}F(GcqJe3kt( zcJTX4VvMbLyX5f@5vcMgh+XV~*S4}{!jz5Wo!<(qkzr>MFYhMH_`1S8Kq@iS4(e^G z`XghZv}Bw_TamuX)@vXbUY~Xzy%C#{W)-QeJ3@#Y2KpCNhHne**Eu_2B6Aa_VVv5bDlrTS|a@M{uV3PCx^+;N6 zpTED{fbNZ(uSuA6lAQ8})+Wr+Mnq>N*Thrn8LIUr8CX)wO>MVHw9=ee_HNTQ`GK9 zVGP@mdG#Sh0RmR5@k{Ic3-j?F^(%onVK1@0m_TM9pHeiiRei+QcERu7Wm7$SpN4I* zu!ji>;vdK&<}{oX52aabXXCzp&-X=lY(iFCxHk9F@dE3{=l-!8Uv@p!TzVb-@`y(bd+mms*(gH@z1e zn!|py$uN_L15_R8k}q~^&d`6EYpN!4IP()?u6-X{WOT!LR`2@>W%}<&$T3{3oKPqg zMe+IDT@`mE{NcUXh%lcW_#6@Sft18Y;{BQSDq3|#uRa`@c9a3aSr63Fv~k5dgw3LZgYH=5h7a$5wz4HInO4PP1eSr zX-pd!x$bIY;lp?@%19o~wckm8HPC=MeL5tzKNzLC{-OZZdTFS-clBD9BO=|ggs$dI z3_&dCp4P9UkJd1Jap)xLd;NlxS1qoSMbM!1=H?`Wq5j?u4C3tQ(KMUt|1(ZK!VZ{i!QFL_@4`G&uLhp*6arXq*VkkE<$EQ3 zRur28_p4{Qno*A7`ZsJWVos~QIfYhh4r)aVenH|o#6>V~pXRU_QHv|=6Ko*0j% z>M!233;qJBM!azrN=Y@U?<=C^gKwwmG}IgVuWlT4H~^Ga!QNubECs@*@mtJui5bKT z@VA6p_=DX|{f{)tPp7c2T#=C=j^B$ocR$cRVC%~nqWqIMNqo9)VGUTIH7Ri*s|N^9 zlrUFa&YqYLxz8iL{N}w{QpSnZHvXLJoqF2ap|1QRnUc$SMa?n8-?AyR1><(oJz$-|!y)+n4^h z$$Ui5gX4VF?vL;QNOH^vr!Asg;>4J+aLeVninb)Ior$aEg%l?Q*VDev`OiQ-H*z>D z>2YZ44drhqd__!7?ESKT_kaD2*j277u*|tuR=J)Qf!&rO(=Wt2HL~?Cgiz8oRk@FG zS{nEA7AV~{^4+c2y&e|}}; zbSTZt>Y0@)d_uo|sUve5nLB%1 zi12_cM#Vps$Aqqa)=VM1Y_&i;7|H3k$YjW{>g5_Hj7+GtB3MhXHk_!|)A3=Up?7Gj zRrO}6I!`QstOTfxIEIv<5*vP#O5Yhaf7Z`YPal<8r+N=)B*nWO1jjN+wm!>qbNjQi z8%z;|NyTB(Bgvi(weo^T#uP!#MML<8u0BEvs;x9cZPe^54-K5n&Zz8}do%F) zD#JTN7cv^hEOi@BpvkGI&q)UBL4X))@K)zK(+1FIU9)Tb**6fU1jaqE6W4Aw1MwJ~ zAIRn5Hv2ciH>+dc6Vhn0k&7S7AHJ7Q$!d>tW0yOotUID!PlrfS01wZZ$9BjlIvBrO zTCy0o1C2V+r!qg(3T^s6K%VqGP+vF5-p?!`$}LUW9oeUMY?+1(x=z&oSC`_G!Hmgm+lDpt%C3Z zsQjOwB>p9X-u|qsytXGF31b&N%FoInLnonWUt{F<1g$TmgM?P95QXYD79@u0ZV1sp z!cJZlDTeV2d_&S~EkYR9r(ruQdb)An{sPtb8P5-chR}f{0=ZNpH-@I}Spp(5n|j+8 zy>k7zwx@9%4+nn8Iy}^WGSw&8K~&5C)3DbkB!g$TJfHGZTh*2W;=yt)Xph(z?^gKw z?A;bK`i^ahYz{}cZnIHpF4t>?D6=GaOMa{MDgq!cLb5&kttCoG|7_GF-aE>t`9Q}j z8txt*2hP*0K7up-V+)0_E;)ABc`r<#LDnRQT;O(yyS;MS@NpZ12LbcBI2v+LJ5e|4SMr={! zF+mWAP!s0f5T%3LxNg}j^d#H;p}KtZli1uZp3NC*fj+3}?!HP$ChhR>6*hYGlA95o zHgM%|`+;wVV!*2->7~5L-z9YHxgcN5vBHO!2ybxPV_p1oCr!mYXC!MDaOZVr?yLJ4|4)nk{>Rr<%Xxee^Xr`ycsFYfhn>aH0i z5zFoGNiX!yKnTKv?_4%{%hMm>2Lp;1tL#wO_w(pLb$=mgo96{b;)axEo9j?h0#$M<*ZP+Ixr}1peQoe#+>ylG1v_4j1%r(VvYMK6lpJnqZaPC2%8vjfodb^bG^t_Ehk81+BA>LcvJ zwqB19uto;?qX=EOdg(UI3W-gj?b`>3W~6*E!ye--Pa-1%U!@V^o*20brPh+LP-)+O z{~`h$ICLM2@Jjp-8;L%Xc5DUs*aLl=|Z{x-Nt&ARSym z**nH|pd+PdZZKkzZUX^Yb`qW1lBOt%{WxZsXNCAATx`^-Z|^2pmf|3R9dvX12?>IQ)P&&8ylvf4F4>vjFTr<*b&{~2{TdVK zYp%ZiAyDs6EGZ}!AmTd+ zXqxu?D}^Kf^`d7kuvcB3X)u%3%gah$7H09J$Zf4Zr7a;7S6E02n7oftDGZeFSsHM= zr44wpd#NRs5eJ|JwWLEecn>Hnxx(chq;lO-q9(Srd7$KU61pqds&%7fy#rLldJ20$ zsC!JmIgK5sWE13%q%LXj8SQ*13!B66=6ZBc-Y*-PmaeCvvnP;w21uSxbdkmV?qoGb~0W zftrCavmAb{uA+VWfBkH4##x)oJ+J-PD+5aFj;kqclaYmB*~Wb60U0Ieq~zRD1$|-| z7XDVv!W80Cv8yp+>e}=UpC~Yjd*9{Rha_Xu^7VqC>%D8c84~Asr&R%ZBaY7GFCJUp zt$P1N^%Ot;s;RfyNB(8e&x=9#pWpuKWG-cY;^MGI4U*WprB(QL0a91_KpWPpLPnkBMNqIoV)49H#e#CR zi3MdNQFcZgcUflgx*3An^7i@_Oy0V}sGVw0YF7>WgjEcb{t3DjdlNcP-oAcN%R4a} zqPz6oh4#!?#4Ee2{$Ac{i%*18oa4bF3LRsYw}EQ)rbuKaZndradPoWtiZ1fD^q|GO zU?x+583GRTJ;WD&YdKMhIcu{=#PVu_G^*~wD~F=6yhWnk!O$XGBRz}*>swpl-ox$| z7fOl-yc>}Nk_x`pY_6|#zH^@BQ=hHvXpL z%_tTf4th_YD!ol_TR;9E?0cZjIU&ZuuXjvTy8!?dfsFy+OEH41&fu@2OP}z z`f_*X>ZEYb{nA$nS%x%ovjj4xaOMjgw->MYGV35|{R_ol{9^v(?)V+c2MpC`%>4(S z1Xm~T2`?0!$p}5wgRZBlmg00XC2@y&%#J2Mx?<=xc=}RdBCv9?!sOBVe2vm3L5`|~M+Ab$k`XQM`tTjbmpsBQ*~@nk{g1o?R9~3j4-+*fdnh*f zna|d}@N1^|WoA;`S)YlpF`t51 z9Z0x!u4bUDL?c$3(#QGvoo#eKr|Y|xg+nCJMJnWV+pccO_XeFS8b=WhZkB7(!$(>n zT_diNdo0R?vi784Imb-+=J7HDWl`4`-WrdPe8x^5NPKOKSQ#rYk{K9nt)9WZ(DyD3 z4*IlbnM+T@2qh9Qu6FQ2!cl{){Dr9Prt9pXPMHEYdJ& zRZn^C%-hj|fRH92YtTKL#Ss25X$tSER5!WZow=7sxk{(HULdX?={0Cru|#=kCE;EA z^s9%ohH;lHeh9rFG4UDutYX%<5??3q?n<@E#d9i8SKZeU=BZohS{P*375n3@+Q~&Z zND1D>{k>}02v2Wo8d-DVc;2fEDX0bIH$5uJbQLSv;e4*VWOylfPf)s4sSehz=!ON# zm-5Cgd2%dBCcQ}#I2o`94pRpD^w1yvTN$(y**1RhLZY?XviE|uhCeF?w(%-1aPh`= zoXv+Aj<0WaL^Tt`OSf$8-aZm<(V`Hf_!*mC)#I*a)0W#s-X@AVAGq4%avd8tP;Lc! z8WDXMg_jiSo%fuUycaT{m@D=qw$)SfB+pjbdHBF}u3Il-sUL5FRg7JH`EtU2@3T|8 z-1ZqF_wI%mmLFUHbP`IKVnKyRfwA(>M$^JE-zPF!a_AiU~c%GeL2(Qg)>tzKT5bJeJ3l zOSaz6i4Ctriaf~4udW{))`(su67TH@?trc&p7reYg=NOJ#r4;JX z{D{7WUPR|Z{%6~lD|)UNNy+=Pa@yWLkyZ|VW!QQ{6;cC(uPUHVd!SM4-CF()i?x?s zw7|`cjG;>uV%v zWm#Wo&w!PDs_ZzIM2r+rvp#5=fWK_9Bl@C`%u%P{nsJ*qP^4f>HpE9%wGZ@I8`Fqn z+g`^bRE%mdQY5+0L=puZP*Y71HaU{Txf@r^x?a2VKiD1BC4XzU&bD3VwPflj`lw&l zrJBg5Qy8h_;frTJ1}=ZriyB{Yx+^;x;qk%xbGmdKb)P)t&Di|yH!&9dsN|uw&cfB1 zdJB|m>bvXvub?vz1WnPDW&MTYlZju-9a@gu&w)=_PhP8?wUo$)VRAi!GMitxQ1ThK zFDsC@BZ4Cjy~UYl$j zXB;C<4WKHm1G9W$jn+vb1k#*~BF1L95ZSl-q2@_W&Hmn|y!Ow#pVgZ04>zke6G6i@SoU5HzW;YB47T$Zk+=n@g85XS{8e28R(dwj?8m>Ly?F^tS3{ z3EdEcA<1ljkY14~>%=QtT0`~&4u_oi0T-6)Rh{y$*A|r0!UO_9Ny%N6Xm!Eu^>G(z z?>t{b8ZeUAr4ZK9=tPYdRX2J{?>!80%U0@Oge-k{GIzlrpA(RTUvC|YhZdEp?8OV#>NP=gH1j_IFptu;+=ni}x!Az45z6s79Kq)h;)> zJu8fZNoWk#!CGbUg?Q}#MnlDfJpn4RpQ9s8)3u3aTV8uZF4}0Q8mwPveHuU8gO0yl z*pmSBD{HHTM&1c4&g?~6_PwGz8yCe-#_LqZWRv@X&5s??;6AH59Vm3Jp!EXk3;X1Q zAO?XXTe&S+--Og=OI$yFR4{nwtkC(o1cKvvT*;XgtJL#BrQM|K>Jt`0KK?FgO}hZ= zI>xlFYq2aWzaC~i@Y{A~A?Iw;79(_$Sdxuy+{4;p9Sf*IrHNQaZ zYW0j`c0pd|HufpiA=DT;=rd^RpJ0#Vmbx+#=k0K$rXXVrZ*R5DK{O58EkD1I>z4|2 zDu$L=V-z7q2elPQJSiIc90CeYe425hrD;d50DZkg2ZZdTTKk8{Q~RYa_CFc9L#Yjz zExu{tQsUXBFg@Y^rT18MvSv0H^?p>t>zKE*n(=Wu+Bz=KNhc!o>eO(>&R(;4)SSJ# zwG&5tN%o-2q1oYzEBLeI1xMRL!+(XSAg((gRy#F@dSx2~HteU5E%g#@}r0w+&jLh40v(bKl~uYBE>&6Q^sy6{nd7ts%(c4Qxv zp}oA}*J~9ZQaveOiG)|%TbjA_RvWe@j%PZkuywL()gWx4xgm{~bW*`;No6&}gIseb zDaA8r;sCDm#B#4}njZI=1P@ll_RnJ|NXyj>Lhgaqidik$BHIjv3M{tmK6zhZ%e5zP z*}@gfrHkSO6aL-M>S`nw8vN14yLf53CrD2r^?34Wbl{uVk7`h_v{R;ial>_J`SZ5O zQyhtFim)&V%F_fWDMa&Ae2TE|Nt-2(Im^aBkhE~_NR)nA(f%q3YlDuhQo9-zP_ zICeKvl_y8!{ASOdU~RfQ*({MGa*O-Xv^S~MXcu4iT)C5;-4*3~ZtP31|CSzqsFg^% z@4a;Sa|VZ=G&c65eWOuC&qG6w>a?#gEn4_p;U-g4Z;3_-3it4JK;Ci9YXXllr6?yI z+N|`IKzj4s*pk9AIZuQe@WxBMP^74JsKmgYFGzqsh zJR{o`+dCX@p2F_+=?=%yYSme< zZQ<6-wcYN|zD~#O-jh22>h4XW)~F{Po^FH-Sw+%nD$&z$m(TtOPqf`&Aqx6r@7ey^ z`=D9d$>8((SM*hdAy$RdcSl33gvqO|{Ii9D0Ehx>uI%)rF$wE3Y zEJpH~*)}hAYny@Arp3SXE+Rd~;J~ZaB;(qOlgh_eH=IhN8un=)zjHo5D>+BGA?M)* zpdAJ-bJ?d%XG}lbO$rJwt%wr}v6ThO=ALl4Y1ZhLOVt}8ya-9gY)8GGPtH{75AHXS zfKO81ly!c4Tejizkp<&ze$%7|%2JCap1x)#*tV+!KNQl^8jq(B4KwsYiU%cv-N<#J zxVz)Sd83zG=)U)HK~187qo~{_eByNRO1;%-p{ zdN#B=Q7Y6HC3_~z|L}y7O1f1m2yfnu26+ulHKr=Hn%jx4q^O z5{me6E7E*hxa9FXkz2wkXJB`4YZ( zOV(S>2@a6NaVH&WFcA{>M+ozp3@=}6{~;dc8vuZHzq(+Huw|g)ClP3y+Ckid+3V!j z{8vD=rcI?NXr&nT5M()y7;5t4iYr347kAZeuan!zqBDKI2$fa8OF zdU1WR2d~e?oT44TzrZ*IYLw9&(^KH6JG_?LV7Bd9!A566(R)SW+Tn%j;-bHRmu5%g zc6HWWC4Vqfr#TCQ{j~Z;0%tDp1X41JhQn5U?Qj#v8MCE9mcF5wXi+*q7fSp=6=V3l zNB_-#^=xX#oaObo=gYf|t?2NGV@o>atKd5h(ZscP1lvw)Tc5`wi8&9?b39I)^v82t zM77*@wKUa`c9lAl?_v|&1p)OOnd%YsKF;{s!->?e74XWGp{-bfVpL8ke`{B@9^75O zFxWJwCkXP6wL*RgHc>tRolPYg2y0$j{6MSTn&4$?UDFU38ar!dIeFlJtmcFF@^{9h zg47o;_^wXp!zFY(mf@y;^7{fT*)*iCHOh&^`&n-b!)2W{?-%iz`BLCQg?MHQZpUW} z@YOw@S}zn-s|+<#t<`OiWwUiS2fa{#i<~@DPS9&Pn1spI6GXI>B9ARNMQ-`$g^;{$ zL6m~{T^j)#Kx1!_K|*Q)UF`LSeBhh>f5_ru3%mtYiRi<5kF4!~z{)OMWin!_gLsFWeIzvY_(Rc7e=+CYuXk*W z+wmMDY3RSgt%AE6tsrf-GG7W!SV8KOCpa6b2zpP(c(-hEV`@hpR}j;%818M2AJDR# z%ZJxX-P0zP^Rq19e0u^mA1Tr8wsfsD%_G=)qn*xt8$c-;XhZl^x7z;mUHh|k5nOx= z16fgZU?UPlP9Fy@dtY&+efz`mseaCz@|q zV>}o@TgQR7PrkVopnnad!Sydaf5*PEqf}diq3q))GV0X+C2!mK*}mUlv9i}?;S4@}(ceOby#}sWul~Ws%lkN~=O5t}2*0x`{cFI0*MS2n_TwLh;sVxM0nzTJ7aGZrg^P=#(0chhz$mxnEA^u77x z9*lAPy{EvJIek6PzVQ;O(yaz}aN~@tYBB3Pj}J!`}WukBw8{ zZE>diq4*xB_3I6mnGlW)*F4LBxi?1el=Z+8#lxj6XIL8LVjo8NtUWAXXgANzXM7oV z)xAZdgyh1QTDS%GVw#&8a9JMrH!{39ptp~ORWS_-&I>U>*VM&cZs$n3=KKtw{#8v> z-`S0FfMI-c;KyHXjYZ<;b}U$8R$LVm?A>(tN7dt%4Ts;y|eY#BkhIi*~EOWB7YREZ9)ux@S7%0VUvc z0rD?M4xC|C_&DB~a;flxu@T4nQfaBAnV?P)Q4oj1FxnZ_*01+-hm`NgLk;2cGd|DA z$$Qyx#l#TXuuh!p29k6Pyq_p;Lx$KDIxI(^3nh83`HQ_SuFwa2ahJNJ3tEH02S5|` z0c6j=5HHKT7fOL@pf)xo(a5H)h?D+D&t|zXzOA zH*iiuSI%dtzQ|9}_|MCtBUm*O>aPKTf?W+@x2!^{6J9Z9NLPZ?U;W~PY zkvwLSHJ@X?OeP&(mWgq{%}lh(sxd$D1hY}b6S*Yqt?~_c=2cduvKE%zG8dL1wae{< zY?uxhwLGNws&3<$$gP(xT8s+a?u=Raq5xQLPLP)2?&HLX)cL2H_5xcEGRXfVQY-(v!m!@ z3(y(%$=hD-dl+f{z52~|&AQf%y6B9CU@0UH0@a^hj<*L&q(Eqo?(Ohpw_`h4!i8P(){Zt!r;0qHgy>PiG&} z=wWwMv8YRu9KyF(b*7t=Y7?>k8(K6_d(^48lA$s|)u*>Hj#Y(S!T~D9!x{BarQOm> zdP^>ehIk@;bFm1_;*9EFhgh=)YIvm{-GUSu0 zFYAwf&mxT8kvdMg{?+SMO|sm50>AVsI&~r|c;4Yl0aoCH^BsnpC=?(G5P8H6`@Tt~WNCJ~TL+C_VQJ+?Y=2{Nwj`)+ga= zCj#?Gj-r*^F>Uj2Ro1glt&8!MK3Jc&L~m4#mn62=m^**BPTbHdNTPyP3VwRibGOBY z)Ro<9JhQOKc@r$0QXI`wK_!zTDw9Ap+`YWM*?C5h8_he(>bmP!WS%W(dm$8&B$gA< z+{G%tOYvd^|Cp_sKBmT`$?^5=S0j`~oSJ5frn2zKG*L^}jsDM&fr8MBFEi$mR>~CO z9%J+4_KE!jUn`dxwSMPVxQg6SHqD0)2t^Bx4+lpNC)l!`G*~pQbqv#gj~Q~wG_y__ zkH@nWT~M_ZL;apVuA`{=_AZSm=gpJB?C|j7v%QLO{;$-oJ(=Q$1}sK5$37_^0@&`o_D^d67e|3O0}KLj(!N{oWuZBH-c32A z^hg?;Bcye7SY*^^=}N`D%zH}PT^qD1Xlx{Lx@4k{G|hkev0utZP$N@^Goqpbt0?G1 zo$Yt5iCR6|_?;P@M`eY%a}hOEDte?-DFzzyA3rMb5~MmXLa zV_T>5Is2(NXH0Wa+R5D$W?GgKFT=Z0=8Zj-dxw%&LML|e>iR2+XR)u7NN6x9E&{CX zt(i9@r^Dwd-YaQ*GTVfXDaLD<;0FoM}{GR8kxuSNB z%4}Y^yVT+r?30WA^w*}O1}bHV} z-%ddnCgpk}TBVY!@0*~(e5GaSwjuTosyTIrcGR^LTqWag-=MS<+bpNl*-kBL5zeEQ zl{;AvAQX*=ym)KxwOz&C`V04SU84gZRygaL+6M(XeO_?LHd#+mnQo2OM~|Cw{mV9x z<+A=O%cUUjSevpm>n8Ot<7;?Yhw1-PVnKel>k{JL)YQ0DSVS`PDk4~KaiA9`!$|cF zqVfbdod}{&*)1<4e#_aF!bp1bvAc>Ja$ss`Py^L8?1eCsHM+O0I|^O?iudZ|&?T^f ziMlbbdY&9nzvb2~k2y!OBLr!Qw-0$d_zGRO@=tM2O|{^quIXlb1`oM>vn7KqUOe+9 zx!8y>W%9Z1YZjtKowzVS65hyct$vnukQ#(7P?%3*jLk$%>P6CMMFba*2?pBas=H9N zXp28=Uzf<7GnHsIX!b6^eM28O;{KTkB5HLKfzo8zlm(CP`rj=($~Pmi`8A;rKl}0K zWSXVx@%|Fr+}33m^~eQOlUp<RVg;~SkxQatsTUhKtVCIwq!f;Oz^a<^tfo{AD?&j-JXhYAJiZAJ zx36Ykoe>GMQSM7PUCN?BiBYp7HS6CcI`KU@;Xs2nUFh7p^KPTnBZs3gMIO9Kvc?j$ zY3)WwXjKyjFPG?~pD~HkC)rA9bd|w#jTU~=7hjXs&THuo(a(b6XBbhlGrXz5hDWIVp#!C+D#;ur9FW$WSJhAodWeiv7qx zgl4Dtf*mZxakdM0;3TVQY;LOC6p6_~1&-+GPw2nXHJdnjYl+Tw)0LQrPDniL|{NiqS+f%k}D3{u_f@7tQ&5E*Dlkz9=Ko&qWI-6yz^8NBH(nVzuTp)swdw zR5eiJ+iy^sZw_C&ZYpZ$Lc7QOD!ue}k#k`xcypgEB&y7*@AM3sZT5b|^P5!}fl z&r1j7G+rXsw+FqFmdBvjoDS(l@NS+RX|0z*Cp1b4wl2r$n<+}B@%(qXT?-Ay+a>L4 z;j?D-YukqCi1k$I1ly1#qhIiB=k&8|r`_#WEf*r>?YTlKclu{7pWRbpwXTE@4=#5g z%RYiKNtBGO*QVT#AL7D5SGV-6I_pbk!pvAcOobbgQ$a5p#%&UB8|S3s|Ctvj0K79h z=5YC6g${lXR@KM-&Ti^mYV^pm?rUJAN{1IyB~5h3=LUrVwH$n_6cE%+oGVJbK=do) z`c+3n)TFQ0@0?1nohtI1h1D(O65{krf6HC%9g6MkF_N7Z{}eWuuIO6P_)JX~r_wb} z#GAw7d^eqcB0;%o>?IcA#T1DrU-sN{9#TyW4my$zoCI#V^txb+dj`jm`zo8-WUS?y zSMsBtC&j17vhT3l0x|9he$};mQ{kgtQZsdXb5q=x%OK`lx+Axy2z*rxlmZJ&yoSei zFQ!+%&JLbJxzUW6% z;je90_QNZ6TqXLlm2`FM+F>5;aR@kjEJjndu(`11`cq#Jp2eVJAFro)y6Mk{;8OKCeE?`K4$>QJa$h|@7FWGqfu2#!eZO#JC#;!u}R&M?4VZp#vPCZDwq*&i)PhaVKLG-MvLf^z` zq`%Z7yYKYr&*ZVIV$s8M-g_d?+JWj5Lbva}+UPZk;}ALr_QY@m)wpdf zOLyveQGmCz`{m5cY)nqo&-0$!Abyn2Rm{L)78n1J2D;*Zvht ztgaQ8DjNbr1C}2F*0Lk2ta}x1r6x6uB`U-5ic>Xr&0uOL5oBh)Qg4W06PyqYw}hd@ z+pwk>O=aXJG0U)*by|#MUU#pUW_MBCzT%u>W<|Y1|CNl%*b{>d3<_gg|a}I z=q{7a9Bgu~id@_BqtaK|x>cpKpf9{+q!Bj7d)|}Bp?o)EKCz?Ip~ z^VgH5%PWxbcHa?=_jYU01IwiezEdE8-l=u%rU6}Y=5_f1{MkB_ z$sJrf+^7Gf#K@-mhDv8QAL02ny#{RWGIj3OVDEwuqZnPSu}e1avbS-W>8W?+aDffZ z0VFZN7*fgT8gR^X*v+j_-yyH5+6>J>!)`gHL(}g(f77S6bC|t{%LI z)I%6FDDY8(AJ{vV-qKYX24UoCi|iE^n5=$4;O(OOeQ}@?h)THM*R&H-t>LUfEF3KzpQn zzZqHJoYEaQ+|_@ORuzH0YNT~KpQK$cIEIq?)NtE)TY?UO>ddOko9d3= z#+F^shyPGH$8Sz3awL37_}1=B~bnFqgU_+@Lu<{S4@ug>T9WisYMmekUEk<6+-(SpEmD z-oiRnYW-2>^Q7$thFrwrO0@Ld6iy?>L@ouA;C+wsBz-(?X*35rBlZb*P+;1bSdVk< zj!9a@s`kXF9~Q+cfj1aHC<4{TfTYM zqoPCBw8a};;8T5nkm$1F4=yTSGEbUYqxR+4k({%v4{x&0*T!`!#4M;8omtrl zy3ofaT!OifcY_K%U!{DAZI2vY!^am>F1dMmTq_6u!x~e>2wbR9^mTahuLK8}8pXMz zr!%n~#xi8jfy0dJ-!3PypeugVzp#!T$T8$T#w=N>PwUp{sGBme54nPHVjSI;n}5k z+-M!iusZ;Y_P@&XF3n<^12JQyX$~@7MsRB{Yaa}@5DaOrE>G4?<+&~jtT6x+mBzd` z$C(DUhl>BQoQY6Bk_Q_>`wg&0`Jdn+LW@F$;2w=QUcu6Fx% zHg_O?&M6kWDV9+@F+;`8M?K5=t&(b4Or4*szp!@XQks0*fwTzdlct0WDYAc@vEb}U z_I(S~V<3HzG2aK?thXj;0z5WHI&;qhO#qppvB!izUKaX0LN*|w+eLjEY3zui-y!zp zK6+yg(Yw2RDKy({wq-6~0NRdna$u=;U9NAF|FQ}j1&N4zMgu>U38TdgMsb4l@i*>^ z=r!kElyV74OB8TCU3<=4gppocJ?pIJPIWh2dbsjAyTg&F?Y_Ggg;YxrDW&;#_jhr< zNnB-u{gM0xDsaV_;YH{70V1Ls!qHU8FD>8>v?T8Y+*io~w2j6t{QQ1V78rb;WmH zF|C``XjS)+Wmx^H!{fGCcX~|U!SH3_@b!o? z55QJaE{L^6J-&xEUhH#^tR7Z^6@CevDTZ0~?^;C|GjT;BET@3L(6{AH>JstU6ON(GX$i}Kux#I_1NW+>Th6i<@7+4 zL<=j!0h2ExHbu@V0hVQ z1NzSJ3N5SDMYU7V4wt8e?lP6|JjjL!yz~kDyG*O@q>u~OA9Vf%D%($TopaMnZvDly z2#I$Yu4);sZzvQ&G@@aOT%A7Jo=!c;Ma}c%?rKo;dJ(i~HUiz4_}^(eCP2mipI^fN zDA^jZyFM(=D|MR(h+p_v8Qxp78_}Ssk~`wC)9;Jxo$Vqi92!#~kbu|DtmUuO9jQ;| zVdN*!#Mz^Hp|`69iq#fBPCd?R$HtCgcsdG*=mkibk z>Az1f)OmcP1*s->L_B}5eS1m&Edi7nS|4@8 zisSD}>*!3C)_U*n&}I7!17tg3x?fLtIf7fR4?bK%j1_2nABzRDaU8&GkAMyK8&Lr8 zC+Ag-BHib&yzVC1TKSLi8&c~ZQzq;_pRk{vsp!U())Ssk3s9D-e;#=CwU-TNknc(h z*W%}hsvH~ji?2Gz%FTrf6V2D+kUdP>P1kXEsE>YGvR+9F&7sswW4zlzgT|r(0t9sdvh!*`paprfYm9C*mki0F;Zu4uIR%|kV zE9{S#pMxV;Z(d+97-L2H5pZM57|UZ1#EW?BXN5HIS`NFZc(*CdB`i7f0KehEcAUm( zJ8uiZ;T8P3?%pD=vE~wX*0x#9pZh}N3*?Qu$S3At>B0^*{&tdrCC0`M?s2DQeUVJm zILedEwaFMY5k)Z;57izM!kkaq*uwidfkouRNFYeh+w`ghEA&fLdiMfmi~6pT79L4{ zh~hbC$4!C3U|N*Xb5Cv-=qBB|0x#Y7`1$vGTd_H){a+IfF9wf3^*yK`cK0RkJ=f2? zszOXBzz4Yd*?`9z_;9Ob_k=X_`ejKAwf>cx|}(Su#>MHJSMEUqSSQTbgf=dgGMqPr#Ip*#5V>% zg=jR|B#N4QXXK@K!&C!y@FgH(&lG`x?D%67>y7#sx%xX)b$ z_EK~BG5-?006li>(@7hDtY!C5`=;L)2v!wnJWWuY_N@{JED9`IShM3bnpbqG{8RE^ zNy#DaG`@Cs3=IwL@T_*5V;xNudTx5*ykt0S3T{LM$uu{qJrU?q7*U#R@MZi|w7kJR_?YHt>TOIM^8*e@#fXUp#fB9%}^^aQqV>hkX#kl@fy#cxm7l+!^ z8+EilfakcIiIqw$uubkSu}J$B&Ug#&^Kohw&4Be$eXY2Yy4+Em=7$WOL-$^KzD6IX zMrQMNX(|=VxN2#K5xup?16*3u0_vRVT|My_qfO2TvqZz>*=oQ>Q$r-O+7d`(V5Spj zw=4n4HohlB5S=TAhzcLuEP~h1#V(@>voDph&^^o19anhzmVMLJ1>z##FtYU<9rv(?Q9`-K zh=W?Dg?C?fxhQLfbk8f}IBzCaoS3S)#|A+=rW_wj#U@r#k|j7;YYg0-bqVAz*R;FB zuItqEx+176PIBXr&t3pB75I#OOanbFJcLBm|D0ucM;zofY{-wlQ^xk-#jt8*ct`7rt{E0>6Sen7)}?5ZXU3u4aeOt)!^a`(*MaILFjMuD`?ahymrxdo4JkTQ4K)Q3{b-xF`{rU$hH3X8MhE9*=UnAMur&A z(B50kHtZpGX;o8m%jG_EPLvK+08!3d7x*pbVzqgwO}5Tye?|jwp70QSvR^KaxATvg zyrjJ@?6ef;_p9_xUa%ezeGPeuFx)D>FUFT5N# zl{UBRbkTSpGr;;24n)reW?Cxt8lBz{yyFMeuqTIw2A^`7vp?Ojnr;u>oYcb7G&6G; zNB(&5a@#O^uaUqA@jcA6s}`5%^hd)@Z0TYEeAe;;=G}ib$A7{zHTLCX3|a?SQwu9W z^=4l7B9t7wMnzt8b;>9P{@ypoH^w(%DQ#fuEziX=VUaPuizP7UdJ*R(4WW77jV>DAd^`iG1)Z0sxQZXt)FOReM3Z28PrJm}A@6d=J+h-x=!j#n3S)Xnfl}@f zE#3KE^pFp!(#9|mu8$JsBROKbUF~qAcL~6Q_dDGF+XOHexB;Hn=BKgXMBV%4{3fxF zwJ)71K4cN%q+td;6!uiAFBNkd56}6IQhK@gi`TNdr_@zoj;{lp2`2PAk~=f8gg_cq zo-V2McADs4jUc(p1y8{zISw1p-|O@7Bnu};;t3)6(0$Fa-p}uy>J^si7S)&-r8@-& zc`_yv9ckk986%bnqMTo2lS_r+dH&!HWCc6=*_GS7r&UrAf?E&$!H_)#72op-+q=Lc z({&eAGnlg@!y@J;CMNp)d+IJ0WmeShl0d9cKIL1+GkY!kr8TfEvfYJ0zU8og?|)g@ z9~-!N+{XFKwHJB0y4c+Dx3@EB{cJtBsX4o){T~VwVCg6ZpYw#>j{@U{-rD2qDZR&JM&F^n103`y%8f@d1i_n%0$Up zUCrSeOa4UfYNOxnfr-?2tg{}&uD*_oFicx1@~p|#iIIrQksNEiUsfP|I6Ljxvneae z?BF_UM1)Cmc>+#5C4-oCco&81i>Mn*XO{xpRNt3cv%gjDpA+bZU8Z}Vo9RC8 z1Yk!$efsQdMSKhyyTxbriqP3Ot_7pKoC@@Txu zg$=LcdV05&2X_ggCv%IJ;$r(-?WV+z<19fI;K?Oq)uix5{?jf(_50Bt->aFibbgc} zP6+aw6W){&q~QX75`$r}oBBfZpQQZ1E?_OF6^hNx^l_cIy>i#_Wk?aK?>5W^yMn#6 zj$S2HGV-AARVM!S@2PwlNH$LP_P{6JRkU-UDn6_0PbMqS7cD|H`K}2!=9f>>-E0YV zUv~w!G(uNSd4hGXE(?gGlY`&z8-hb=NbG zUiaqL+0E2f6DfDa4rf}%(J?e=X4l44hx%&N6cRntyuiPYY~CNF&|7*$wf6fOWrPg2 ztv#APm7TxVpA()x1IEEs(BFOI$c$5P8n37-sWZ->n}XgUL(a+4zb7?-)mC%Jw)qQT zzC-h;wxtW=X3Ta=BMb~P6{1g0v2LuV6%&8Ix9pCeb;AE88aEb}8qr#erISkgZblYE zkigW-fZk?=;W1r1ZP)8?svu!ez1uAHv^CV??p(@Fq8TwDk=HR?lqKeSoMJ8a&L8ro zESwWsy~487M^E4_k4QGlg|qI;dM7p-QDNzvL6PE~HmBE9pi2Yll8a{T%kis) zvt!l!pYC29)Kl?9CpF_y-E-ri$l_#asF35Y;q_km-5)^Kus1KRvh+Z89*J;w>v_wH zuM#VBsWn-J$9lL|bt=m;LPNEcc4X=Y%L(aDs z@V76*ei@Gnp%(+pI;J)~R<>=x7rUa&QH7~TFTYGtOwLR{+o?6{x3+xc>T4Qc+A`=< z)%6)IIols@7oW*bU9G7yQaf;Chvl|duc4ezoKU>~OVfbV=@5J1r5~%H1nj;AAT-@t zxr$=CF0K}lULNd9_Od$f$6o}W+XOByiJdZP;~P$u9!hUck%)ioDy~ZCrlay8 z{ha2aXCux51f5T#IiEnN#YOt87J*Y@M7)QEq5SRVmV1}yjC)rTcuj1RjMPDza=BrQ zV_)x3zM1HP&rs|XkrP>282)-lY_g!@Vn&B8L5Ma=D^&NKul3fCR~v3VIv4#FfNM_7 z@{LQ=2sP3Y7b8!(<+Su*sep4e5x($I&ac|*dyVgTiFkSkeBeSdevXu-ZhpU}7VO+7 z?lZa&;X3CdFUr1yhara6-8yYu_uZb<3YRhCse9>cRqjH37~w&$ka!LZu`hZpA#dRgo7J?*a8Gg3s)^5C7o;aMH$? z7oS620$Y7Meub2!`OJr#7#pMksw&S{9m`Ej=mLaFmEWL)b(;UMweeOpB~Tix?UbHZ1)7h*co z^LjqD3p}yl3o$)6$`5TfrJ{_n7NCH^@Whipx$pn&|5j=OZj2h)R_6Tmy|CQNxcfQM z@1zUjg(|c{9>T((DCwVPm`}5#k=-J@4YN4>TTrm>1ZV00!1YFr8 zLD5qL-nBBaj}%tU(n^~{?Ub5T)S83X%sz~0W9{uqG z{(aBGh|r!Q@VVJsv1cu&WLFL$D7QBCQH+v|UyNv`?*kh2>^%>hF|;4dUL4{Oux8qJD%#vH z+pU>t81`P{tv4qyb?{z(^1Y1a{sNH*NySiiz{>B>)e^~J_-42rx)YM7s?Nn?&3WV7 z-FNc_xrCd6`&tgE!S9!>1(eK*o!`GwHpF(8w2RZBfKw*uNgvHz9D{TdBEFH*e!0bSjle#zZoFh++^WDeI*4dn9?`3$#dsKCaeEv8L> zBak(oyzbKv-t7jRY_unzwcTZDrfp>=?qQ(jJ7a%j;;NEHmbzL2%)&cuMO5dngq2|& zW6zf&eoOybLW-ie1!1js-tYw6&O=s`z8Zen7d5L5raHJ0&!rz#tGvnW*Yw z#A@9uYThbp*gMbI>rJ3cc+a6vi5c|Nrnho4uaAT@h?C|H)@9kO`@FQ;Yevc1dn`6? z)-p=Q*YIBX30_B_A(p^oG{q0iGcX|tt z#xtKs`Qj0c?^)JbMF^-lp}T&t2+QQnRYcuq*=Jq%*{?3wTCNrvhkX&>Vc+-A{xn-g zmv+7|VP7%oocpYrXWyXSbdQv2_Uq)f_Iumgjuj$xKL@(^;fFLM7lUTwcyjhood?}S zxGF}EXncD1jGPp#N6fCL7cQ1wR3z&*JMO_?!ltQiOU2sZcfE$2&`(ecPe&cRs)g(z zwX$3_UiN!HKsmMWmV(zr;zS6pZUU*v+fXdt1dsq@F*5UU_xPsY$;VKqmq zvtkph2@-!e4Bwf@arqeMoqKaSxR|cKWV@Q3 zc8(YI?khDFX>#AZdVSK-B$YVdN-s%Wnc8DZ>NXnlVtAGazOmyDBK{PAW`M0z1hoHw z-2T8G|EYgzu(+hX;}})5B~H9WX2OBz?0@-1>9TqkcIF^{t?=|2_%LhE>JWA|n__Ps zv~Z#_Pq6|YJ|4Y3ErM1BTz)vqj$A66n!aNhFOLCcBrxceI+uMPnLs#mvO*EA#44hL zReM){g}InvJg}7_{#DOg&NK=jo1W+Xb_Jv>d*;idnU~99SW}aM!1$&4GAXnq)(GEG zWavOWMx0XY^g#W>tiuMtT)fdgM_@z@+6Z3x!T%7Lf(0ysSaDK@%wOvf(kKC8@+j5g~-IVF7P zwqaNV*ra=3?;Ppwd1}}HZ+n7hXc0;lc%jv)J|4tXm z-f#{l`+nYOu6PF{{b#oO5ofKNgU8Ga=>8xIdXPRR3vCo(i#s=1AYv(fV{{ST*notg z($P71R#Hz*e02m|PoiI3p({o$7my8^WE#SrbFOCSIiX>kLmY9T!BbzXxYHGV`>+LO zR94t1RFL}M>c|kHRBvxDMzwyX z=Z+ahJb$u-7g@brm6CqzCJ#L(J2>gnZsAh)M(ennG>UAMD-j{R)tMzWIctt57h}Hi zF@637U1hpG{V7&yoD%FENj@43b6iqjBv1^YwU#fQTf{#P8lr*Prd=+*_-=Fs5cd>x z3dGQOuSOVstOV{Xxb%4VdzTdjqx~u1I^K2b;CUnlWOpyrhqkBC6575y$J_k-2~TEj z`fdMi`DesE)i>ZNQoH%cLKFje&&f1O!-4ZW}L0;Ur!^j)FloAj5FWE%4HUk&B90WaP z!h7{$x4(h~Xu*{`V;$?xJJ9Z4%A49e01Xi)yVHHYepm}>l*FX zBdlEG9AE5WKs};=P>wD9{e@$#5SZw{YE7Wy1U+Q&tppF=qz0wC$lC$IkMG=;nB~I^ zA@fH=Gx$_aLSM1vH6532-HGa&xr*5H_R3Q{^c?Fr7G3bw{@k!FYqT(Tk|RDJYrxbj z!m5VLVt%NxZ9Kv~;o0&0*WIv1<^rW>kjlN6K^BppUd?B`8)`!{+#(;3ms}uf8NCzs z+{Q|t-u97-s>%8zapWmu*4v7(Yj@u>_itUTIYdZ88kS)wy|UlnvwP{j`@`4L8Lv~B zw6-}&{0}#~Wv9yfa^A*8_nca4GXM5KKNaBW z(8g7re|eFc#HVSEZ*FG(w8ma*$;OU!sTDAf$;X%qs_L~J!x@n~*^!3lWQD!!5t;7& zj_>TzFPaW$+{CZMuLA_;szrICBeT-v-+9NYGHdH^y)`aX+QF-!vr4nqhsTL<&FVA- zUuI8KE}pG5OiG#Vj5@L5w8`IbwgLq$Of0E=U_*8~xVz46xVJEd1bX$l`?#mIc`0uB zULeFLR0%aY_O+Lgt<&8krw^0@65ggEmf}Kgas*@xOK{)w4>%tXQ%(u}wL@B{&H3DW|)a%a6YkQC}t+!p%# zro#CsbVW&e-mj{KA|pz5VNMlPG#v>SnjuQFqlQ;c@y}*qz>4nddUJ&<5MTeVeEX*3 z{zd8CMOi}QOFmcpSuTURpXs8I8sQsQ)j|C7cR>U7O^Gc^aCS-x%NO=MG}Y*>-RK82>$Kiw{eu560xst2bW6I+?;~v_ZEKBZva!MC8r;o* zG!)cTFbocLi%QWUzaG=*E68+c{=G75HilGLO5sVaqo3@EtP8V5y&UTx(MdWRYyRo8 zX$4MlnuE5FyTlCN1vIZCiW!r(fVkTE{e5n{0_1%%;YP@(uiyQBZSQ+`O-%#mN6<98 zY3mhQTbV@_BtUXNB|c9TbEaV1eR0aRY_qpi0+;pfAS3-sN?K89L>z1vdDCVy#tQVi z`V??c`Y+o8B&S1&so)0cO`p3-&uOuq$=45mwIX~Gmk4|FX-x|2XYSgbyU5e}kJDY< zQ-v2Hb=wU|`#alIc|T5{{5bYx0B5!Tj&k<9jbN9$n>tqD%(1GlbtdLO{?-8a-vZ{? zEBvufBsoQU*#l6hKmNyLthDiWgwL}?UQ-m7VZF0aanYM2{AQJymP;&@9~EeE_`zuO5y`Tstyf!-@g|p;fEyy zAtwUf!v$d;Daf%|2^0rKo<6*=(_h9ZI02#P67J?PRAyF^y#m#Ln`ZvF17a+slChx{ zUNX+#(}dmke>y>x%mLWoVm|%Py@#%fejVUxBca>*;bn6+VIU;@<3gV$cdaV1sV>V7 zNrTQ4AW|&;q&Gg?{y8sn50J+{asm!!OcJOPN}ZU1W0qfFriR~Q5#W{dymjl_APoY~ zF4JvB+z%k~p{w23Na%RF^j%Oq$whwpRERNI`t$0BsnJD;&6z?9f##2#w>A2;#Q_JG z2}-aky&Lm!d`0k+I;p>i<1tFd{t=zQnNh<%gA*T#x$f#fQCB=V#MzLI*?a`m_Mh6G zBn&9@rL%2j{>g;?vqWQ3tgd*9#tqLNz}ub3NXWN%N>5GO%t)acqaL{;M^)mBm<=G{ zM`dr!rDMm`Q`4Ao3?2`l4n()pUO!wedRvaeN*bLsJM@tV`)j&FRpLo<&rO-|BuIj8 zr#~JgY9EB1IQQl{))*g7v8$a>z9rv4ykrUg<6P+gN_Go-a_Nd)l?*3Zs}`uY zlbUssS#Ma}X{ZdVLc6GLa3wX zYTC%S8rP}hFk`9~Sp4>ls7{(d(S831E!}vd+xWC+gz7k0kEd9=EZB5#xpz3 z3>z3T_;f4YZC8{rqU6UQ8NuZXM7Ltz{%!?)@x(p0H?zAPGA9b5NHz_F;ZGEF&9Zo>kS`jU-@#8BfL_y5{Nt9h zq_X!h;#X%(C(+q$aIs}_v;Jr2e0RHNg}tMb&Sqkq1{Vgqs(4mLyG|eXJL1+5ga030 zZygoqvUCpzLV^T>ySsZJxVyVEc<>M)SYRNyGq{rp?(XjHZUKTj1b6rb&b_~T&ROsG zAF~!S&(q!2)w`jYM~G8pKg)!e9weX)ekTW-6@GqEm(KMb|k$w=L|3TC-s zX#eyheoO0M!XDx|+M*OEs^zS&%LVx7eRg$0U{mQDL2_{*WX9c#ebb>f2u9M zg1guFLZ^a6`!SFU4qjjW!&uA8;b_J)zL|K>b#@rscVIiu0vw0s*jq4Q=7^uRpPHg$ z6i3}=DtVaV3?62m8r-j=B&VE!ekh5zcbe=a_SZMWR9Lx`=;PK-=3i^gtHYS#TMkA1 z-qgotX($rJ1sM0cZe~;2O{ckpZ~GY7s~LC82Gg1Z&$&2!Ngs`U(<@2w3{r|K|w8fBKFY%3yn~<1UjEEl~v*q<7SvCANJ`3(Riq z?;5Tiw(Q=kFMWiDl@Nk&d%(jR$t~S5;th~Hox!en`g&z>nX$e6*wq3Yd;91T`^P0} z8*fGxK3{oS#=}1>8Dsyw?imV=#Pl4+4>(@}Hceui_il3*u`Goh_1WCv zwUHZt$w#yyJX3(6#||g{?snSXzQ2NhO^FoNw~YGz`ZgRHXtCC{__;p2ah-c~SDHn^ z8+bVDF)Tym&BZG84uk;>d&9u4NfC4T`T|P2CN3{d;TCp#FNYB5cJmH(BYtgQ&i#1V#hvCC#F+;OMK8x~ z4m8IelCv1P>(;8&)SyODbbQ8yTp+>vBr57zy&`Grj>)=^3-O*3hG>wzPL*z)^!(u^ zzw&Z4Ye7THd}3~&?y0{XYQAy?_4b#wUanzZS$plpfSc%VoVqbJDTD)-a z*`4G@PKjOo@p%5;L4Gq6d@XjHf!rQ{=4)d;NH_}oT zfu&v&a2n-b+CFM+Q!e=557Go2dAw^UMu)2s=DqH2i{2wn8AM zN_~f=$=~MWdA6KkqzKNXBj$6p9VK9%J+?`5t*$*t3>H(FbRkxnntntW{aZjwtc~(pG^!|@U-v!={uhg9FlzJ*dSkM zRtW-^r+)BPFM?TyO%-e3G7}^{CU;RGumL61^h!#c2u5%h-h##5sW0$+vAp>_;D_KDIGLVjr!U`$S~d21K#=&XMA4XF}z;QA!UG}=JWWy^Pf4w2;s1Q z14llIsi$>YE32#BLkH>2`kDxV927;WGy?*?kj5x&b1IPTIp;yJ~I zW|z1^OA}??3%d1=kM=ujA6qCg0oeC!G_5ZV6&i9lc;dhO`Txe_jY;~OqTSegTTE5= ze6B+yAR>sk->r#+y3mz^#BV;_{xoz_D^j5$i?>MdBG+rdel08pz(=IKX+r$udNo(? zxR-O-f*y{&Y~PD>l8uAGPCY?VzQBR{kyR$QI=4olqJKGyLmJy144Mn1-uiI#jT+h` zpdJF3M$L83%=Bxfh-Xe|Zy-0G9A)o?**4xgXxu8V;rP%#Zb36=1S%*HG-@d*ZtxUF7}Ie95THQM08Rper3 zSWchBHLE_&r3c$jd(XGN?0e|vegEA!>RmO5e<{|*(&GJ0fOlUE)y7`=rrbf8_Iwo^;@GkweCCi zDyqWdx8oGRI`3w>Z#-b%uju~S&A%mq6b^3gL8a9PL}tv4?tRzSbna%zOm}pL)ucOg zG+kLhtwZ%Svy~Sk|NjBFLEmHE+`-~^?3j6-*&VUGI+==R2Hp%V!9I0*S%}XE>{t@B z!%3ev5iz%0~2rz}{l(<9_cad9s!)A;gbMt|pJ~wFk)3pZvIzF1xv) zIExKSDbIBH6|#gUcKx&-Ip=!?W(w-~UqgFDTG{smTYf$Sde1%3-FVADbsAr8=D%l26q5^OB%!pGX$C$RJ9(zp?+1<6OJA2W_2fuAj8gc; zX*Jw!1~4EXr<-jnyS4CAP@#jFS@$k4)Mq6x!xqKlGrHoD z$}!@ePet*CN#3;gs@y{Y%I|}%jD-SMekGf8^KUAL7K4`h$~7R$eK?x_+;6!(Hqn-6 zQOH-Yw+%VNDZ~5O)uv#*qBW&qn~5}BS+n}AQog*~9j3}y9BVdCAJX^61FlwI#J6Jm z*p5SixRB7jEq=`IoekW&dxwmPb?O;1Y?V3E)#9&Cyd`x`$K%;nIxRDj%a;gy?hOQK zExaK>{#S%8PWF@H?L}~5$=*mRf1{L(0Q#x#RvJKUZOX`o3<(MdNT2oYw z2}V-A!fGra`}7;>*m1VUeYLRAnwqgk+&Ep$FR;U``u9ze!@Sx78p$V|jBrg<%O}m1 zzsMYMaMxh&Vl?30Kg1Jrt}s$)UzWb?P4$?R6TfohTlsUVa{oB}98xu2)|Qa80bF|t z@9fJ>Myggu7t$@-qazKne+51v3gDUAgqWxRj(d(ze?KRUdMQ%wfRn0mj$?IxYu<<-}F)B@*#~ zSs;grk?bT^_Ji{-l)N)d(Yb!Mz=Ey&sg3MQ25C^Vjk+~Hu$up0YefhbN%0#4IyX`P z!cXAsVVo0$PR;OvnY8=Lf6=da#D$qNsTBE%r#bT*mK};^D4nK+2UFl{@05~OPlkS% zNIx)I5dSI$%GeA?KWBx8*K*#4KqvB>4r_OJ-bLvTQOps|^nNB)s^HAD9`S5{x8~Tn z=tOc^J@oE3DM2_6^k)l{<-_pRq4wz#2lkJJ$Qwtcd<(Tp5bX0Cj;sx~)pKKh#=)!4 zj7{nCm*+7{YdtRWb5vjVT+U7Zb_GP(+wv$vqF_}zj4{1rj$dZa)w*3gce4Rb@yY!sG^nDAT31M=_uFQxsWj8*&Dus@qkcr|HVKqBZIk~ZK6QT0}%@@^YbfE zEjb=H(}!$XG%Ov8VRxbDwyS-Hh`Wy=#X(RZZ~5aw=}q!IQhBiN9g0ZX363}_xOT}w z)}@vNFVtRU6de^{5n^!LQ7$dkFKmqR2V}1)IPl^sH!Z|hGCojPG{ZN zLq~p~VGUa$$|MWd++`E^rg+{tPs4-a)}?WdlP6(a*~L;3SXVt?D@zjAGu<z6y3livWx0#yLbzF%fwyNO%edg= zD9Aj}mn;wL+rWZ_vFcx(!z9b(Cwm8=vF|InXIDRlheF}$mMR~NT^3)V3D~GT-?LUA ziW#FlX55lAw;>FhasB2_lh49ApYy@NV~B~oLfKZbHIeuhLH_O90Isr~Eg^GmRt4ed z4zb|KO5qwikykFyh&f@JMWq4ga9+#OW*m@^;hwf%j6&IfN`4#%XzBVMxWXDD%#mJ1 zS3}wHf`v;~Kl}E&XrQ zz({r58)HW_#0fYu=`M7T_W;+kK9{$nUh3~3A!h!Pr3`N9KMNZQaE<#HyeXaPi|Z|l2( z>%Vo>EMQjOoM3YKx^zFSIlE~^%qX-p8Q4^EPH|vNR$`$%Bm_rAMC)5PQqElSB8)`eJaFj!fKA8!^ufU@*dBJs-lL62ul+~J5y#S2vY>&BNYY*%jo$e+j`}1x4?Mf zci```JkNizmC^`$8Ygzj_0`HSL^dv>9B?p9NFW`*FyLRuU79DUH?2(L3HI}4ur?6_ zt6DMFtw%Upp*bBU_q<(7;)T}iGQY?_sS^c$5(m`ww>9&al}Ir!@Q*nrk?_pX4GwG?IBpA;B0)TsOSX!~#pO)KO4FFN&^5(2U;edg)Y))AxR-$TlDTK^X{%}El0aBSj zxstOJu)pUJ)a2Ks*Xp#r#vV%Oc6&7rVd^Vv!ie4o3xESRlYypYrsO8k53YO z9q(7Q^gp)-o-8YQ6^*!k%a1r=r3uznAV3@|UJW0GX8(gs`x=_k7TGuo%0^j5uwZPs zE=rx)tgf$6r+ld=bub89;XT#S@I65%LiSe)a1zRegsPFbfwA$v?K(Y`am3AuNL(vn z+{2Dk_L|bN>cvC;oGh0(H^4n*ev49O#5^yRuc$)>bK6Ykf*lVe6qF5$s__YNMmHY` zPCR{>->GdK?_Rj0hi6i7_Gbu6ET(86?HF6FT&ar6wg%j7nl`=#EEIm*1a#TWxAIA2 zNrB9JGLHoaL4qHkYVO>j*>>Vq?wT9vr-r~2rpQF-#G79Yy~#GhW7)=1m!{}B4T8O$6M329w;_COH>vh5~+ z&s_Ec2acO!W5!;=FY5cJ^vp8tvu=v9frfVk<;zlawOxp+IGI(aWVp@otheIs(M%N? z<2#WsrRk)E!@iNhbL+_aV707woQ=(aSs%|q^5OaN{KLd1^#VV*rR5B9ys}nmRI6J^ za(~Y5snJ^&6r8WGJ0|K^=~9@=I)9LC87NZrIA6@T36TZvW-aOE*!=lqO-_?+MkM+M z{cAvtN=!b^uu8_~ivgt*|0axd-9}|go9#X+Nyho(?LYOqf_f7f0zc9Kr6sC!ELvm= z)nTL$AFcPugAZo2`^ zzac2z2FEQN)ZF9!*$Jq2 z>1S2Tv zuKiNe%(+biTBIEV`OJ`fhsyREc5lR1eJdm2R8M-)0_D z8s{Kg{zX1;5`HGD{(|p?sP_H`3(D*v%3Km@A?P$yo-S<2Jq)m`PGoRprJe(-4T8wg z>ozE~ko?~F-uDh{A?R9sM_+28l`q;!qPZ-c?q~6QjCQrYN78cb;UMe* zp(T$p%?gL01s9Z0*8~E?^96#AwYLW}sZ8(0+=i&!iWq=kDJBi}?$o4T+RX@)38bo|_&E``x#;YmJSCMRO0L}dTo!O%#fzY2|i0$sjLn=n(r@I(n8BJePktuNbPTvn2Ni7;ClLskOFS zsPi6@@5AH1-TOos0we&Y`WHodWFnxU5?6ox=@5Cz}{=`tX7r5WKGmXXnzMYx;i+fZB*q#-4WAy1mkVz5ar()(%o7 zMgf5MFSV|AGC|HD7<uBEJiirp+c{sWvC%MC57IHp8#h| z_)7Jj@&l*Rm{xoUiS~!Xq`b6(=r?^r5ML7&k+3u)G=m~e!M^lrRyTIEI#>S=FY8b(DVYCbN=K4xuluU<4*i5O%Ok$}aNtd}lzFG6!Y~ee5 zsqLIgeZof#fJxDg2%)cZs(8n3yA~(Ff&M|`TsAo?%-?TOS8Uxj)@dJC6W^^+(Lmhc zw%C&w@Id)!l;@nSY@?XWJ297^dA-rfL8lAp=3bG{dXcZIZS9gAv5}msfUkhJP=W{g#@d^8z#SpuEmH@J1fd$ zlsQ|fiLU<9-a@BzG3>Bx^nTUVj-+R!aax$bLBQ7}d)%`3!e#7d`2-P+<^odoeGEvz zZ-b7yHF2>CuN|y>^XQtt_X=k#UnE&a=@*npED&asOhI z7`Q`h=bhF9JN<)a{`E`VkdhoiU6g(M>*$|rD-`-2?M*5f>3Bg_?ZT8|%SjI(2MqjH zFGvOWF@JcIv=#uu M$+y-% zkiYkk$`d7u;-dmcl}6`@F9Y1H7!+;%0fhK(!j>uk{X5lz^hCIX((CWVTZiax&B9cT z%;YkM$Vb>lcxu=`XNiv_E=*am$A}QGh%>_tGIXz5*qDM=9Y9gxYqVz)rOBZh+KV(J zQ5u$(*&&#g60!TFR7Yakfkkmio9bs(oK^~7y7guKQ7gXg-?8-!(Dlv+1RRWg4&5O4 zGW$Rjf!%`OTroeA8J_C;N5N)}&djy6&^1BkF`G>Hl)o#_6%9)y^LjejRQ95T)5nk& z4ARp4MZOSwQn!(-h1a4Pa2a?dZmq(nga2*wefSahs|-J0ARJj0S$^hMvaNg36f9sw zyIG2n!jrn&ly;eFN_QTQnd@}v8^vzC9sgH|vln9OEnaj#I~wAO@f7b~sDp|3r|y48 zXU~Wq3YvnGq>W2H0|tZb!FmC|q`8I{Rr;El|Jv|sGmseI8${^y+(F>W!hZ0q0obon zAcm}&q@ARbr1jcnq_QelbY48?L1-OMv_!VTZFW~sL3$d|CiysKp2CcsoL`10rjGF> zdTa#ayh$|K0=czQM4;wAM|0{VuzI8OPYnW;Gs9d7#p&3(V$qv$IZnp~MkGTg(`ExE zYf#jhuTwnRM3Ij$C2;L$mY|h!P`_^Slf(;Fk z4sQq*^4pC@Oxc-P#F;rJ-?ku5Q*k#?Z25^HSu|d%aaVkql;bqu*knu+7F-cZ^u{C5 z!DyuJyk-qGF+Z0Zk<(0)Z#Xd-7oU*P$GI!Y$e*EiU(10bOj4N-*9zYyjJ8Ej(h3AH zWj+LT&C9Sm39U+bWwBJ0?&8W0;h!Zq|74bwjVTUh^V1&SPf!>UevhT*BviBU^Efb~ zX3q+3XEZ%<{^aVm;aJQvFT@-{IzpSEdM)&X(Tz+mbv9k79-Qj|N2R-imI_2(E( zUApFsa+lKk`T!om*AjwTtYC}5+k&(yS)+xh6*drjx2J_zrR7IYKa-P`So}Rp5FVJW zj{Nbuv`ZN)rnEy`AXrtv9g%ekpTSy2xvxrKnAy5rla#^HcyAS`C1cp+a5BJ$P)ocR zixwy$(|Rs(w%J;%9w3q=fq`2nP?p5X2KY|vq#Q9Dxvhnmp|Y)M&6=g5C5?G5d5C6! zH+U7d%q$h)s2cB3o$)>e=n(|@apP2BSv;pcSzI@wB}s`OMLOIsNvgA~SO()%V`KRZ zP7ZrU)LnH1ov3HD1kH|XenGS`x+Au8-92RZC!4Qi4jd`Pe0bJiFa0w7)B~s zRN}?O;cgV7)_fyub_Bj?-}|5rLSISyRF;4)=k78Bl-BUB1Q{iAqztZ#47I{JaK;$Q z6LcKi$k77-&|GE8ZG6Z$*s_(Xe1m)>e)8r7u!iD7zZi}F)F(MEuR8thTf14aOEq3- zZC6R%fQ@lZY`G6R>$qkUxkreB0zlflSBupk3~TU)r$IBbUOfMD(Z5${c{a?*^veN2 z@(Xtco=Fva15yE(_Ab3Wgs)k(6SQLn)%PymqPQgXYSr|qEAd2;sAlS=@bdLp$wKJB z`vQ{w7Z*>Wl32c)7srh@=#w51{hr3_Qqo9EH0%t~E-i_I_$K(jK&YoXCLsBb2dI!# zOwaRTitw%dN#>gGHV=I_QOJMtX0_WtY?NrWNj8Nhi(ihR2H2wuq!fv3`r%?Thu;(X zuqRc%X?IGnuIz0_k{2^XOJ5I|#@2nq$u*2Z7g5D5kpgqt%~u^jGA5FMrtZdox0WcY z!rxAjE-3Tiii^E=#Dt9?RxBvSBM9$(X0sv=z;HVW3g$c`d70(zAkbW1jwIPoNPCcLq)jh^SM~{6zi8AZ(+#$MHsgI4rIH1UBUt zyG(F#m1EVi7sWRN4BedoPIN7q3+;PNJddh6r;PymE=mI%T!&a z8BH;%>w)>Iz(5zrY`B744`B|+@RPn}RpoSFM7R9RT`r!CrpY0hHdu3J{56gd<|~N` zOo@eFOx7w}5xnNn)3-$|GJnyuFkrTxKDvZ2-=d5iIFU<0ivx8uHJL-oud zrN4u}>`P)_dM)fH98eFj6c$`>=`3t8eGv0Q4mL-Eg7@wj*Zf+0HU(nb2Lc@vagRl_h_mN@QeBl*VB{5x$(1btcBkpz6qTb}8Cdkjxn8|a$6-Je zm6L9i3sNxdlo~0-d)*j`VHAn0T=$Nn28DLHiZ-r5v+7+AZ3y<#J1d(_u)s(L5PsmdU-u_h zPOMSiravElJil{}z?AFtIK$0nRdU>P7i>0aynmg#KWntPlyUqV7h5LJl8R zZd09@Pnw`tOCu**V5;#d>SqIwKka<*TasiYpk_B z${6d6w!UlTljWHVUp~GqDi$~87WRFT0U475PH_#4#c@iyakYiFtS`C zeWrN!C4xOJXLY;P+uLAJA+Hiq(q}WQKeDS_$mppD zR-rAZINTTE}^sD-+Y7{5Eu&0}7=GWJz zX2KD?+)HpeCe;y~%wZ;&GG3DQ4TqY;x#`v0V{EINU_UPbhw=e-s@?2_uOx8RYO7(c zK_Am%gH+Ik7ns;~(5s?9<{eMtSrLmSw?!Lw$lfrE4QFDQ=(%Pg8C#gvmz+llZ)gpK z_*SjlQORvIkABaVlHf$oZ7b|iDT+9suwfUx6W7}6YJzt9MmK}1L zE;JOah$!4=+HTIOeLGIuAU;=n!XW{t?xfCfxh8%m#UUq`u6eJief|y#e}ad}z~oB! zGQSW+5V&DAL&=fmOlOGM#GxVghlnow+Ph`M(L(*bXVBiAuajoc0NtKxfu7R%c5_x; zr9$<%&LAO;>h0Xvu6iP}bin>v3fDPNoN>Wf#$6zi3=MlJXQhM&ezgD$CMosS@OGs^ zuftkv?oCAf#V!6se%Uk-pg?g6A>BZV;rNg$$z?!7^*^d(VQng?N)FyIe72N-AZNN~ zR9WDw^^a|NQR4w{>mP7HqI^!n982q@?i&a59uyf~_%`{qV=yxRehSaFyrk> zRMgdcuO1q%d}CN3gO6`}gSWSpmdErpbZlO^L&7dlI2P03D?BT}tv>?`{Ki8xBlhYQ zIm{@5rCIGz38(mn9I*H25fdg>HT;TL>WN5$jI0(aX%`fbO@h&pS(*~{)g-2`addMF zljAsb#VX&_0;@Q)Hv8-8mLRkk&-WHcm??E<>c89%x38}KKQHIeN0!cIKDs=eCsB$LX4X z(`P`a>QHbr^+Lu!oKyO!gOOLo(F`aO;aH|Ax=RrG;h=)b*o9;5$%e${-Zr^$8U~{P z_)gCPtV<6gdfn5=&_eMl>L|lV>&3pE+(f+xGYZh(HWj1sbhF4UzrPEOK=s0idJCM; zOR|XubHMFlJ*28hYUtB;zz45Wxe2lkr0UEhUS&z&Vr7tEK)){LcM?$>au|@P3NQ{5 zAcG5-^0lEcX^L1{VNvNx%)G3NIG23XelsO`2G>d^!!3_+>?rd?CNqjOvM)yzw?{uK zWSgutP(4nvWuM2iP!4{Pl;-;sRj^|hDag9+ZH`d}h~!~RwzFB_8;*+H`Z~AH{n${x zljm0qcdO~SAklJ+cR`6mvAWehz!#HxEhxp=7zGo&?W7mFrq??+^8nx# zfWY5)Uq~Tg^S8~jbmG7672_`ggbFrwlhj5iM6B6%JxpfxKm=Jj1)ua$y&x6^ z|C1EMR5)Hjkfg_ltv@>)*yLqLaDRvdaF5Rl*Lj>SrXRM2!SAf1^vP;S8Yz7LOwcl$ zCf61?#cZhgXt5&R^;N^hX6?}A7jcIc#d$Ec`eXiJBb5cR68lTp=uy^0BN&+SIaV%F z%~0bJQf~M+9*Hv4fyW0`pt11w@YPWsBt8h(eFcT$(kg&6Zwe#Kxkzvr& zUy3&yzxE#1?&Ei*?yWn9=Jm0G!Tk?=RCMzij1TtonSjENR*LwWubI*)O(T{G z_qTUHch*L3=>_)vsS}sZrCHC)%R8bbC>Nd2VW3>u`JB9YSODl3asmAcVm<6mDY)HvS!WNpjI?si94W}sOYzsn8& zWWAtUo}Q{m|AjnA>N)UP8WDJV$?f`&61KdAP%v!tHHDbe2K|j9TAH7rn5L{p@co1( z3miMsdA?!~EglhZAirrUf-ed$dTz%^&!#afDMZxdUH&-JH!38JKG&VL@f^!{amf{X=S zs2n1O`hOKw$QB^rgo$&7teLtY)L_c1Au{y;pv5S!|wnQAH}tv`Rn>R*i3sd8(r1+m-ompDHP zucvK$%yez5bJPR|cWZE-4>yU&&Xjm!7j)rXQF!jtSX`{tL_4o5!<=FI%=C~MSIIw5 zbepmW$<^bWfMaR#Q`^s~!B)z#t2Le?ZWCK*%e&<}e8cB5fnoNuu$%V&S)up$GQn2p zD|lF@XcP+RJn36e6(q=b0HS1w`lz3(aC653VY8Hjo`E9B*`hf<3aj0 z{(j36df%co)_{cGJDFqs=_WhcsiE{OJTw4VGX?S9AkhU6mVoRnAFW1rum=tPC5NWt z7C;ga71G(@gZ%WJX=zj^RLjo!w-*u4^9!k>_8sTbK7AqS%^A@W{~dYkncH zIYZ=h*3?w-|5yqfZ&A?qXp||vm>`xuhGgBndtVettu(#TjpLYC;j2C-b$!WiQ%$|YJO7hQ{(>ty2PQa1t0=&^xI;B{lbEX| zz&hrN{=tc9jD2^wvUW<#o z@XRPg-O48s+<~}v^}j!nzofmI$=a|6ITt*grkg$WFQ>*m4O%95={cv}^=mF(5Lc)W zWH4k()h~wkKRYVRfs2e)GWNj}fpQ2*-gpC}p)0D!%ET6CUp+p_^o0;~h1fdyrd=GYs_zC}h^8x6c$H`slwX6SElyk? zL~o_vq==KLkPTB|N6!wZw#!aLl~|YZD~`&aYg@VV?Y3OW*>=Em6hRr z3`Jue*V6HD)GrEuF(Hk>S5IR<8S6{Q^4zVCnu|LS7DBxFe}q1k0fe=jEnl#uH{_@z zjMVxEJb7sjOUX=vLPNnsf<=-+LYPO&nYSn_hRtzG3Pn}l$6^(7D-*B8PHa%6sP8Dh zcOn9YCJh)_H+n579i}ew9@m5kR`UB*+esZ)bGQ2|<99y$Y^K7_<96?k(=H#_ZLA4& z87(<%LVrd7xW!du$99Lfhl-V+uUd<@d^DcTSG&l)ewAPIsr#QjH2aThJ~~qTS~Atj zzSVk+(uE0-w|}qmShZWrbLr184aM*Y(&Vh+Dxq&+d9a7~mT^}6_|CmutUPadQ13tT z&+xYEr*?*glz;VkhCs0zI2wD>XebepU|T6FNP#AFC?h_#hRGC9m63lQXXe_X6jIIK zF;QZ8HCICnTESe;!l@xbHV$KzsyLS(veyqPm*ezFTDyk!)KGzaq+4xvTQd>8Q zVqiP&?yX~rURO|Tf7_s=-#_6zw^CfnYGo4ym$`JS+!%hASEa3c_v7>ILQNn~r`uVN zXiH2;#GPWk`f4PK$U(+FuNmehP(M{GFX>ms?2QO)>riNmYizEBc z0JB_Q-QKD!phCex$3h zfM1-Z9pIzvNXLon4>@}wWPTT}@QXo27sMdZv4>ZYFGFad) z_R!J2#GTdKGm08b*Lzsin~!Xo&J5S%ncD(Lmo&slV<$14Td-5H$vQ`c4p}Yo)hL~r zN^+_V23VahI^+ErRD)T#id`73+niE+czD0VpR~aYMqjSEv0rs$|MrXKssvGG8pf6e z`^K}`{m1hvw!v)>f!*!6Z>kG`)ejL#Sg##`cGn{>OFHxBSxDeOO%V9kfByGyfXLb- zcPwH^WV%Eh@<^Q4TRKB5{aWajN@-VpExyx(=Zelv8OTOx+n>ow3Q#P$4PZTDeHSOu_q%D z{>zL9`gbRA8_#&UMRV%2lf%*1$vYxtIZKi?mGt>L;$O2-8o%r?B%$S9UPJ zKOg^%U?x*5#X?;X>*$D{MLhmB;T-N_uZ=MF9GnH6Lyg=x%f~ONEeJSL$hg1|<3WWc z7D;Tj`N%LGN8S0dVZBu*-n~rvqyr?g!@3@d!G2F}6llYNJ0mT>YO7}IaqGoSH`v3g z#|b6UO4kIvpk5+}2Kp7&o13|Q2g@U2lRq#0z1ng}+FI%f@2ro&*!wG(a{y~cLi;g5 z=`q>GEUQMZDP$vVkwS)Zc2uuv^G{ec0}VwfCANU*t3vjs9^y%5mj@PDrTl6jRH58% znh@a!jGlKq4P(kF$r;Jcyd|vtJE~flQsK_c#YDDDaniu8HQlov#pKD6w5b`7@chrf zNr@62Pu1TE^`v9Vp_jmE9*fOR_vt;xjIuA6xFbW*7#h-EYZs11OD6{sA zOr-KJfEXoiSOt!6b*%Do(b~)iw5P@DSC>8dOP(V0OdkBV+~N5wp@XpJvu$h9byS3G zzgiZ8T5iLF{i5}%x2k!DtnYSIbQZq6TZfs#$v$`pT_OS&`epv+vSA zuHvju%2+!lM{K%F3SEg@pln}y%9v{%FYPaXLU}E)nJ~{6ProvZVlqBZZiBsOR#f$u zK%51-l2plMM(NXN_)_)5VQKpc>8DCAgW^l~SA*vlF7VTx$K^K?hSo2DWgg)sGXScPRa+lcs@u^Cj%0=D@ru zZL&tU@pI{0Z|P%XvT@)97qA}-msWsjMw0bY3b~=JWpuf;Z@wb`$VE*hq*v6-qoJVd zTR-0ATwXh`5X)&>CS0Gor=uCtBlV2D>{oP16vqLD5&93b4f(*u14rXFRgxE1eW)R6 z`$$=d0H3GPDlc_d9X>n*Sd15(l@L0npZ@k#{QyPC@p0@AsbKN=c)#B%(dXU@La5Cq z@i#S{>2F?Mp%e7|(q;M}v#NehVcOtAX7L(hu%Ro*^{?1b$nf0DzNhn_0nCe+bQTwa zC#I#P756H&9k#U1Km1mSB9*a&Ci{8CK*emC!7;lP+yETS6(s7;7Nq!upBZV4iyYX( zbpwgZUdH1t`txz!dk!$TLSI!HfXT1%#qRAkq@{Z)KVU`peELVLr0&a#KpNY|7~FLZ zSN?kzUVQryL}ea$IE_b@J_o>4Jym`XpFshHAPe$WQ%2KUuMwac42Ic7S=ZXWL^%~G zIEU4nS=bMA9*$3&tV8=1r6D^ieCN{uER9Q(=l80^fFuW*{3Cn+?J!@S#()Ba7Qk4~ zAxBqJGOqv0ryP|1LjWgA*Z*Ry;{A7!yBkrCpX4IQ7sMg7$>{Xf#qFOR|T<`DXBnt>Kr5NJfYjh0WrKP~#;cD~6@ zu&rD=Q3|h3fTFsdSM<8wf`7Oy<#`z)sWEt%s=|oN6;z1MRxyF4vdjp$od*a^#H!K4 zf94*HLHbVZkFAI>|IcCm^{nU6^gTT^M6yvOM&`h@TM6jLs=gG6|F&ls-ReMOgdv)|!OwEJWM;4X14Qv$vL&Vti=M|NXZ%Ff$&ri z_r*r^+pN3byvxezy36@<%Ab1QmZlU+82dr_mp_4O7^*F9q4Y1`uGRXNZ7DA-YbFl)L5Jse`d>u# z-w%eQaFJqD(>y{FxKVPMF|YbK6x>m>eOO zFmtjO7V`}c+h~jY&ePUJp2tP1HibPsj}YV55`6aeEBy9if0^EYpYvi_Wu%Y*Ag@8c z#?-b#na!$tk^9VM{UdSnG!7-{;@z;AE#>OBnNVDNNHcf9f7phGR4}K+7*oK?mGyZA zT`C(Y*-+JqN5!-g2^(7vJOe$Lniu;@B!oDEN)xkx#f|@Yx}*5_tn}!h7{uCOvEAz6 z9%;udI`n*X=~9Z$XsAK}Nn1!60!h=)W|9${FaJqJX`_r0fl7k1wYL~mC07J*k2}X3FVKS=V8DIDLy3Se{&R7xy`nBN6aXn8Hoxh3gpb z#=Usti%0Dvho=6FzT@cLN98wx{KGOSLk& z;ypnp+oMw##6~8glSKyJ*prkJ|A`vUz8YCI> z>#xSBkhTR?x>U9bV^sUq96Z|7p0IKpRbAdrq2|G|g6Ab96-1ub<$P(BDiCcyGXMRx zw}KwwH^Wa_LO)DXSUHeL|39+cG9c=1ix&olloCN08bmq=5QauU5NVa}Mrn}l?oe8~ zRcZ)_h9QRzsiC_Yr0WjH=Q-!S@BJ_z|FhR#`P*x)y|&9U?23}W$AQlSNr<&vwm-OE zO(6$)RuUqua}Q9hRV1m8lu(|WdHC!Vw}BfAEkiBYvAMeML|PQe`O;8;>1?TF%*&8) zs_X5v@_|v4$0m9txOR{9sZPt+AGUDPkuK$=?$)nCdksX)8?7W( zl!6iczcmNX|CLsRq-~XTC4cUpYp?EYrAGDZPxrSfZPcAQ*JbLFD9l-dqE~KTwVu_` zJgVIB5{=;LNgEHT0nMMFPfNv)pDLO6ONTgU`oX4*v` z`$cgCttvU<9q)XIGty;#CaX8i)M?&1w>nw!My*LAGG6Dgj&3>=TP%xi|AF=%h`*Q{ z8?}2t*=N1a{TG+^yIo4#22fZ<@eOt-DcwJq;(sn91qX>lo)agkm_Fgv`&!KHy*049 zn*H|63I`5C6eVddAL`Lo9JL`#*97E0cUBn_L%!DHkN?W=)=8S#;fWjj`LyNpN4vK> zDie$`&M@OC(DR{Sd2(n<8QuZE5b9&h2Vnw$T8SN3pM-Rt{U@m5 zzBEk#Kw*Sg z)rDl_kobvYi(42(l-k86owVUB!qQHF-u$?@`Nf~jw);QK=kV~d(U)$C3+vkpLbgDE zaw0#LwRfz{9c6o9YV|PCeM}1EVb0V46OXOS!Ps^5HWkjvg>Ghf!%bDH<6V^n2-HRV4z0W0< ziS;;nY!Ar&msmj=j~hU5Dqcm|OfV1&Cf$5e-jn=rh%JRIeqp2cdfKEvUNHqqYtnbv z?%rG*MM}>9-!o~WU{?&1Z4kz9(`N;_1~QePl^$S?m1ET%`#tLr{|DX#_uDOqJZHzs zU%A9#Cy3Putzz6GL*>KER`VfEUK#_RoZ|n`H8kI>$zH>!9P0Zzu{qHE&w>#58r1f@ zc+aACx+u%{>AGV+O|cyB?BC24zIt^CexQ&Iold9jPc8C|y7J~r)AI$dx%m2-yuE)g z|9@2hW=Y87Rbtt#-BH|-Iej~b)Klp9w*2iH)f|X8sDVO{uU}QxVcARL`LK3# z(U^cmQ=O!fHq5A=q5tu8@X*brNxWEP)*}u=jE7n>CKH@$TECM$E6+=>y}XsED{m0V z^t{ZjNkf zK-pbw;sz<(a2SwhkTo_pwcHP&NZAsK?Jo|x*D{_7j_9X?}^UFJ@!_`Q{m_d9w zq5`)ASb0LU5PZ_T{AlXoCPUZRfMB|=X4GFy4erOjxh_v1JT$Q9P(omS`RujhR^xo* zcGDhK7I@R!;wp>v_2c;3#7?X$oI!wVdFh$>c*o?OZ`;omy#imQ*;&_xt>+DQzGw63 zvz7U5#L=F7C+2G^i7!yJFw`*KVOYM$ix4jjIDhGym+ew~dnArdYxBcKl_jkALjT+o zZ5m38jrR5Se0*hY&Wi@8RFN`zaj1QypAPv^=}>UfyjOsvPs=5%+1Dc2N(?ZT#0x_O(P4J@ z0UrbMcwymf$F#(dbceE*pB&}oe0#OlPh4z%RehmXX9i=k!_jSS!s0VA1QOW)eg%W! zeOC7|I~$AQ=x4QdFfzcJ0F%GeZ<&s;rtl(f<8tINrakv~Kn+ zP4Snnt?d%ZNelnxl-IMTjihwzzpx!BA&NnM5lTr(>g{^2eM@RDqay@R0ha<9)UpU$ zr=WolosR-4XBRn()cOba9i_o0*`X;Bov=b9I@cg09qAHmsgi%V_g_}Rc6nhv(oxx; zo_vXqh!WcBzIiUQAH!kb@wi4J-=yzWKP6p}KnRWoPwsj3?&V+pW17Gett-IM07i|GMCe%_U^QGU zJGI?~{_@n>NpD|soHr=I+UW2 zp2249JEZ!1D>~!?kvX6CMXvgGuj_skk@CVy ziCm}D!!(J`Qu>DM)}Ud0D>uK{%1Q5fF@8M7qY`#xqq z`2i7+@P<<{WG8Z|u`T+;g8`^!uiby1M_#C0s8r;pO=XIij&qGkxO8&OgapoiILPVs76{E za={NgaWl>c?P54|O#VoTBBTq^E*Ed3zIIeP+!$agw0tIY^3-x&WtR-{ z3Xi>u)I|o4*spTA+^!H?2d=0`-A)&m2?9P-jeH62k+K%-_NrrKCW~}2^T#~dBD|g( zF{6Cuk`&<1Ya3_=EA?`^JM|45d;9BwSuh2Q8!RlKwdleRj13J+(or3GXxIblpqsAEw&>xc+ayCwtN^-8G26CeEms!lMmtG)ZXPd1=^sD`6&t5+MrbuYIr zEys}6tJkC9PuC{t^c$xAtaQU?V2TKeXhSasQqX{QqPFFsBqV(JWtcS_hlnQ zR%~0;DL`?M@kL$c+rYsp#PVT1;XDcC?Q5i~_Fo5H@Huh$&ff*B6=mJl9*V_!TC&@w zf1@nrkNj^OjWliwe!%Eu$Ftv}ER)hv;$c<;P{Ti$2m*fiV`hs|S9!zVE@n2rPNIk`9Mb%l zGXCSO6?^<GlH=}fKa6%xG!9)y`84M!D$Gw_@;3CCcx+>FxE%0xQXH>pg>cmw>gFmYN>>r?uRj?PEPZqDmclyLQX%L3I0!SU!}8osIsAcc~d z&+nDFw;I~2PFA#!cH&T7ww6Bj|2gSwRUcOD2?yRCS)G6UrSU?H?I|Fd2Zi~B?Cvh} z5a>yUFyRtuaX_BpHzsr1&$x04=f1mozBuOl`VIsE0S@k0OP5XzL`d{+8xO_=!YD@> zasJ(m&R2iq@tD%hwx3+qX5Z;BzZqsPyy*ZwO@FMKb859^Ct zL_~K*n>>w{4g7>j9>NTgyTh+AW0sl}d!~ZNg&r9jYU3TwIm~i>(P7N=9Bf9hK$zpE zOYXkl5>BIow^UlBwZjKV1_nheEWXe%qt3mCY`EP>K}>bM4GGF)>>{0eSl4-9TQUAc z4TdV1mL5jxoC6YEi!V?<)$g8Ud%)xR%0bDQ4I=u0W2+8eebERByR!Is&ADY-`^UI) z@lMGuSOwM)qR!3rqWQqRRtQE~`R(c}v8zw(5+hv0+aQ}i+~0vboA2tEh6LlTgxz`V z_L%-(`$n#s0>XqOE-mHBwpYZ%+f^e^o6E?r(P|#++9qOK0HJcTRs@ifMeHh@CuF4G z+zm6IH(hp^xA@S*0gfr?x6wQI&6dX@8v0uKR`Qy(nwP&KTs>0X_4oWH4-w}P!ljyn zuQFTuKiE1_E&K3b%|lDU2JYRvR{Ekp{@67(IYql4luOvWn-lmw)Ds`+==GwlBq73o z1Q<5B+0cIObI#C~DR#Orq&#-%e0V4))GTvN{XG$AnSd$`n|Iqg4{mDM7UYi(NOt^J z0gVkUF&7W(4-NrKkfsLt;)akyWqfTIN_w?yzg8pwwYZJOasD!`i%2HIQ>eqsag0fd z$fHLE4tB_)jIY=nsu9=SbMz7lDIu;#4x?1}osz)#``5rI)S5k}Z3joc)|kOMv1s90 zQo_PAq94okan%e%HjgQ{AWQ|Y@KS<`Ui69wq_Ag%va!10g}nA&&CU&#uq>X`Q!9tk zq_@Hr-4>W_N7O^b!0#%6DFOh^MEAK++ehVI`|;+>U1rl>KYah)PE{95I>ve5(hDnt zv_pb!6pi4QUP;tOZniRut4t zu|Gfwgejh3aPsZR;{luMymjnAn*#5Q_3wQ#T>1>tex(^@++p=G`J76T_ zutyMX-!;PvE>A4Yslh0KmGNMC%gwUbQoO;{1Xy!i9cd>aPz zor-Gh?TH@$(ybGK+StC6DTP(tW+m@6L-8EgO>MNXKSM_2xPQ4M8fihlXhh4-xAnm| zU70O6YE3Z~=eDe@tc}vbTiY6Sw6fUVaVaa2thM(sQ*h~~#((?^jLC?u>U(FHp+yrF zbm48ot}XqGx(<$MH^b(geUqhqPnvWp)onQ>bvfi>JY(Cc?S3*zc$~8Q1zyI6PCR~U`fl96=|%n!ln)$b zd?LvMJpkcpk=8+|YoM6deK6`46jNMccvDDHVqO;CdF*?qZz1#T}S=W*!nky`9#CJ;*P_UMW;5l)tM&+zvy znIxann-`=qC3%wx0(U+3sIdTa{==D0l>Q`mX+G^Paw0rsRTwQwppp)bP6DM)WP0`)|T!Z40&A)aB# zv6ml%%KK8J8PzF?A4_4V~>pT3CZmh_JB2LZue7u7@Z?X5AB$5F%-PClq4 zR0iOE@MmMHs82|j01G0oJwTARyhJoNsS<;hRDX#4Yqx2k{_#ah!6N&0I_ymXLmEH7 zC?H2ig}cq@xlVtAv@gwY)qP$*KtJhGO3P-)O4xAz%%8a8*dTne+efWayaVzu=*Tp- zTREu~Tirot(R<*BzhbZCAJ!Rg*$?>#E-l_pt2Z_+e2p7m188ELt(D)IAnxNruPU63jGjrgRypmj8W*L1 z;Fxr9te~pWo2xjt8#==lV&YJu3vtX!DcV!hNK*SJK0X6LDxhDok=Mh1nfBM;cHSuH zyqd8QtsQIptmDJkMvoa{f#URA#AO#av$c?Nf-4n}qou9=$m)#D*l8+~B9yjF`$13S zFx53CypY{%MpXYh8^b=6!%l(3Cp9zMBqi@ukrtLMgh?>(Y6o`QwX9vul?^&M+p{Xm zQ{WqPaOF+hZ*rH&ROUUtOLr(NJbQi>JwvnHq~C*T&I&h)#ktFmQ~$2DbY&6B|4~N- z@{Q!HD}cZ*!;T$u+zV~lHNYr4YC&p*U~S4(qnnwFH<78XS*%5B+nN44TWOUw5a&<` zpt=b0nN(3%NFaXktK$xliBRdg(}a_{3qhnj8I#87Cbdf26F+yUjKl~VyUNYWggZ=O zQv=?HCmvrk=kkNYk5!Z#<4*R36ftHFuMKIAIix7)%Oe>pqCR%*hPrpHIWC)v{h{mz z^X3!1qv`p1XCkzEBT*Two7i}cdh}-|$SWq$#`v5FVAilUTsBJfhYljv>^E35yw$$L z2l<#FzWzPuL~HZy>y8i6MJ6W0gHq@z+AHS)Fd7rvByrq*o<{_L_DUf&N3|&L2tc1N z^QG%Jrgc6>t^I9VMRg;Ei?v{s##u~iSsh_s)3ZlU+C@g}! z?DAGd*Gk%ZcYYrptmF^5rVdW0BHEzYKM>%-4c6n$x(|tI@4c8Smu#{FVJ@0BU#8Ve zHNd#=2p3uwn5on9QHVoEdPqE%(4Va|9yn^npYPP!X04vVJrpF%g_-xG zFqdznk9FmfaoTrYRvF6cyyX{vHatvxD{Qc2cUi`B6p~CI+8~F`P@Zmd-fD>0|YFw5qz{TQs?TYdc?~af1XrQt! zIY2Hmlco~E=c56(*Xvxp{PAAi2tFlJgfD;l;6La9ee&!+cgMOk5Um1DhC%K>(hMv5 zXM8T8IaWf?HNosx4;WsZRO6Pa%RS1kR#A|bGhZR@F9esw<8Om)b@DBpi5!eIG*K97 zyzPW_;mta2`jw)IJmK;k%cJxq1Mb(HTpvV_boO*H9gO~HYlA0v^!n`h6ZN(1UW0Y*2DF&~Uf zT>3x4Rl)eEwA)~77psjv_AGz_RU86jIB1d%=6cs%NR=B##z`bx@AY_I`M1e5E=^(;r0Z(%%diF=`jR4AlKV za&0IEH$0u^?&Z_vGQV8b;c;i_ek;22x?+}E|GV5bX_A(TC{2AEu z^L?UZeR}iTN0JzIU-M8Xbn7jXkISAMtF}peDby|OyFPW$qLnkECv@hrVhj_LBz#TZ z<0xNANb(ZXee+E1C94bd5yOP6*4c_Jc+zC841S?-#qvIi!TG?0ZueG8g!0jh_RnY8 zIT+h(=YD6T!na!D;f2`0Drm`e6NCk#s<2B;biX?)MSxUj4oBI9?2I&TI9PaOUP>5z z>G#FGvDK8(8;b__NQYWty$=Q8;p!F<&<9jIQLN-XJ#Da5{e*waFZ@fA8nm(PrOSG7 zyOMVi%}hN^&7PNk^5yH+`RW`h?UP;RAKPuqycISZeC@rU`d0^t&e=2Ry$Y?iVduNN z=6AlfZ#5AOrPLgcf{~IGBZt*Rz?^D|vf2JM*)V?@AnXN#+MuTA zXdJus9UG>Mq2I=8jl@+Y1Y**y_`yWLk0iPI@Z^%Sy@UJ zIa^r?YulbDTQSq(65SYXdh16Cc6#&y7T+vVw)m1Uq5;x{S|=N|f^O}KqP{4rGu$?A zdU*iwmZJW_;v+Qgw>Cnw67$-LgM>iF&U$Z;*1qEk9URTT78Bq!5x&xyFLcF^745Vb zla!C4S^D*g!~b5@r_=tqF3A6-md$owaa<7`XL-!btZdZ8N&_rr?5qCndv4$WnSi@H z6{YF0M4WLjB}JTk=n~qQf0V4c-N7riuC0QJ`E9*tzs?VsmsSl|ctmbx=9k^RQp7GF zbmZr-pQ;^xv3H~KmeEwAr`Txuffji5E{gThrJEjUw(%gGqC%cUsHVB%9#LHTafOsqAvx-o2OiYQZk-_tlb{m?;oePLnB%h-82W zY+@bj8`8Q@C(5KE#KUv2>%oRtvp-Irei_4}j;o6E#i|faf{VYOW}G?lcQTouw+r;6 zzdGphamCA2HZhrczWmb)57Iy6sxG8}t5betGLDUAPcLeJ@d0r^ES5PDij?)98Op{d z8aILsv-W(XKA;8}Bs`%gc|HZ@v5_DD?}YDwG8HfK$cugj%LTMV_xC)qGhTk%Cc8FIqU-H!S0 zv6;y^jb$nmB6d1;C5vk?f!J7d>t#)-Le{g z$uXykOM4;wTd&E|e>1HdxU>D_FuHG29&2?^hIj~7qX0iNe8QPashe*D&vX%Bx{ zAhbf!8Wk0HtR_HsOcS;F`D8H5F(tK47rFgQ8LGDVojma6x++aa_r>7>`dYzKbRixJS(iVOipxjpcKEkb{T}BgcxSQ z2;s$v8-s-(SDdZMvDe8h)QFYQ0JUfq9f66@JKpOZ-7BKX_5uF50y0?QOmFDPPum zqN*EYWiN|jxk9VXY?1*bJH;Pl@D!3!XW^L@PEOxIAl{g?LJjt|VpLqY;M*kd;(tC&#e;AWc2u=ZfxlsY6?c|r|8`Fty^$tF=n(d=U9 za{Q^zQSc=Ktg-uzTIw6WImqFqA41m+jn-_CikON1H#%=R@g1!hDNibGe9H6a_^Tgm zrf|@M@K1d#@VHB`WXQ(?1^l z){`>-m=++Zy{MVZ4j)^pXCP)2b~i><68@^odiB|R&Lq}3V+i3osFC2x^|k5|IM*kp zeR>9*YZ^78k>hIdWQ1kImqI$-iS7YIfGo!Bkvnp>3 zQ|g$B@2?e;P&NudZ zk?tVQX%}gR@}z}U_u1F2H}DVhg~bF57@(ZC<8BYosfBwO-Jk8`HJUUkP%b_kb8cRF zn`|prqPuf@Q?&At9aicG)y)cCA2HRJSY+_XG6*rV2kd-ptq|T2qBQ1NI66^K8nD)_ zfyqA@xV2RKWAa2!Li!80$)K7_;ZHlbEt>uYwY0ZoU}Y|MvD3m2_A34vO$G8;RlA&G zZm8mw9-Xr1%bRSj)i|y$HOBbQB)3W6A!pUBVo+UF^DDl!;%zS&6S>AjDqsKI(QG7p z`-KjV3M{cP;Zlg4{V+9M?l}&14V$!1FzD)fT|2lGz|THSeh+BJa)&#gT*{eA991Kam%$$dO}F;g*XYToP4?%w*DML+nGIg5%- z9jVo}8l(1#doh0mDpbDa$}EbED0@XFGBclcipV%?mN}Rd?^l=n#3nFKRC=G?bW8!* zV5j6GDuFnuGZYU-8z>XzSxqT4!{;KYp~vu(rgFlWtM3$^TZP$?6SZT$OpP*r52s7*_D5){M}O;E(w$&*3K zG)C|o90f#u4I2#A1wIGgrIsrC`BRMvY1{31RN@|8P+?L1t&3NafA{S_hUn!+%Rkz| zu-(eH;f6Gly6ld7!aN-E9YHG00K~feQR^C{lEm%;bEIj+T{MiK2T965olN6+hELI$ zhKD#ljcdG+LY7+hOcyVDh?_qVZyZ8xR&Lk8wA&5~1N^dcJNi_PYRA?ZI(X%z>9Rdv z##7^>hluz8@Djn}sf_h8z4o6J5i!MngHVySzo-^ZFweROf z?}6I{tcM{e?$646=EUq*tLFmCgF^KoItQpbvI*NAl8Sh+Bd^1G;-#^z3xbhvV`m>m zQ%)kg$V1HYAdQv>U~oN^OZp2k=mEt+j!|Co2X0I#FF$zFWC@W{?rJraOLOIUZ%v4C>7K5U1q2`(w0F4Uc`T6Pw5JE4Ux8BPGF{dg|{V z19&+>Af6dKm{^Px4e3CjO(6Dr)z$Y${0uc~I3(PxF;l$NHKQFG`)Mh_8Nd_)6pFAYnj;z5?h>N>w76hjcGpI{2YlG9`gCdhU~n4 zp^j83qVQhshvcGptbaiPqpOdRPJzRI))e$pW!cXdGsBXfilqw1l|TXk;jiCC#kA>; z>!$hN?^NnPv7zcri)fOlDcYnjd%;(uzRcKlMqVQn)Xwheqa}=B_ai|rgt);=NoCnJ ztET*zL$(`=PirbnAo4Jg6S$zZ&A2Aa{13xJ}w5fck! z|Jy!6!{F~ue_b|GzzVXJ&2!H4p4GrisI1IrMsWQmg6zq>Jplo(5>I2O<+T>{>GHmf z`6iYfyBu;m_2hn|xa>_KQ_B#YK*xt7z<;bk@;%<74>>zw?cUPz@;gn<;Xs%7Uh?I@ z?`i(;iD{uX9rm&twsH+ClG20+)pEu(B+uQ8DrBfq zW%T65|8846>(!<95hKJ@S#_aSk5jl*sdhlwjir@$wJ16^1@?6J27>yqug&x(+@+fn zmyhfnMmPzLz;2-5=+0exk?j2Pu#It{U4ki62`E-oqiLWP?G?8rmk{qF_nlu)tP>72 zY@jRvXW=FpvCtZpQwr$6?$Don@s^i%kXF>;nJ&bBAGXJcEIJ}<0pP0Y%|ULZvS^Qm zD3>wS7tavbRBI|~>PbHshRXb_iihx?#8Z>AZ}~S6Cv%&b!rV@O90)l0~BjRE(TRYWdVF@pQ@-D zeYr!WJsIYYL5~dF^5S83Bw%<9Hzt*RK!(#njAbUH{o`3=by;Ww7k8DGI!?tv_vbeI{0rBGrh0oL`D4pmD;O_iS^- zWGUce_2%16O*Ty@%o1-gy;CUaBzfbQg>chyhI>_*wU4*!c6#z1Rup5(CS~!@$6>gm zRV?Se(5LyF6+a8EqZ19$NP&AW(~{1Zo|W2hBi&Q8wSANofw1_tnj`Gz$nHkT6Fiu7 z3bSaxjl2Nb8QWB(r850vYT*Z`;mn^rEey>va!@v$u`{66QQ_i zHYpUbK*VRfMP2X3!!K=Hfv7Pa2T?A!Ot^65aP(xa<&#Upeq267pV}a5CGN#e3He%y zh~@Z*CST#T;UDd9l-{z(vbQWgjx26r3YNb`3Y^Cq6SB8O#Y8-pG=I9FU3^tB{!OA0u|b_z!p#rlgM7|209mEblOZwo-RWOa zI?rNWUAMw#>7tM_P_(iN*_Y2HM;@w4W zX1$fnIgvK;nI{fIwzo^#(IoQYQ9Q#PY~@~?;83E+Ctia)Sc*trNP#t$y_ifk*>=1f z!Kne|By#QFnllW1!d#bXhC;x7v#&9T2+z9vR?*WsJj_28`Wfk!{s9Nnn2@ilvrdCJ z*Jl}{tREg)kkmo;Eg3~%4e~B;mQWf6QsdX})Fi2|Stiffxcj2N9eB`$IXw-4RPSE^I3})YGL|#2%)ehhkxX& zb__bbZdMZv#8a4FN_-!5;zk9C-+6}wC;x0-a|q=i_2iq~ytC`YnV!YR>-Kb#f=)?! zAj~Bct5VIt4~fVrW;CV(qyH$c>d!G7jG7iIlIAHpabBXA`f|FNA4=aD5j2R-s0f$6 zl{(0nUzX-*PQIASF4R0Sb5tn5`Uz2L2%&ph`k)fVerGu*e6uPDE46JsYX0rGTMx;= z?ZGQcr@4uC(wgTe%sfXF2}L?+4!JzY4QyTHil8HNcF7FZxx2+xh2$yK?U5@v-Y+u+ zLY>+r<) zSAX+Ep_9i3Gq=+(I%PqCP@}fVfL*A`ka?QtBeuw=drJv&z)5vE>djdgL+ngYESQ{_ z`9u+}76WPWbV-r2mQBgByU0cSSHYy{8d#4~iB?ETv0HAT>2gX0WLg+{#w z|I32vFaG-R80n_Srir3hr>42so>a~nt@txCtP1Ffgw}FyTz29ek**X3E3CMgHur zK{>wG(P8I78NZy;MA{4e<&a$!Y?zSxoqVANrHmNa#Qyv~w|$p=PFbm8BoG$tj*<=3 zuww^5RZm*ZoOe1o%jBWFy=iNNJ3%?rUg9Z4_`fLG)`nu0bpn+=oi)a5jZBvVt<*|; z|7As(e?4$9+FOg3d4~43TfUuZ22=Hc_AQ>n#UtV+f|;1=-HT@wN>?BW>;CKgpB5WF z^+M#st70M*MuR{lM!02a+YtslV;rU%{{;~Da2ws}>>5nvz% zjoK;O|L)LqnfM8wHdPx|-WD&^3hCC~8!bH(c|f%(9DvaM28mhkp&Ef*k_}#F4ndNA zdK79iETHpmb)Jw%N|-$RIW9NGzS$Mw8d-kPUqsrhpFVv>F%N3 zjzU_KhK@m}4=e{|Dhd#8L4z&toeoE3Qw#))VhaON21AIU1yls+;C2@-!`GT{@xKI6{)xe8&9ZyAX^ zEpzivyQFj&p-Wbu#4vpaD`~%$ZwIvZQI%AgtGsda3R?r@s{4N-`=_mm4s+X-PN~DV zzQ7l3e;Bvd;3KSibBKFN1G)R~Lmh9d4TWlGp?X{&ZZodY5+`x6zU5=%F@DsV30gh& zbX+|VUQNT>>;o`qd*>n?)mNt)O^W;AG$2`21%5&DZo(^+`7m6bT|F(>et&SHkL zn7n?ghdU+6+XQr(r$9PuQgxrsuP^vMjkJu>6h+T?NOdAWGcM6Kk4EOZc1X%mqh|c# zHN&d#Q?86)v-BDvV)oD9wvmUJv=Z1PB4WkHzo6d4Alu2k)FyL<>XfDhLiz+G#3Jb> zT;0WEd1JaV4D$Rw$)}B*d1A#=XT(t%yEL`Bgln=EJ~gIxfoeZGmqRo^X0L-7+^|7G zyH`#9;tX{;skbAR=>Hx+rh8!72i;=GY^@kc76GAmH^!2Y%R?jAFdp)(iV=F!`*xNw z-+;T(Kqks$EYt})aGGO~v;ky^Pk!-bU9*-VTJu<9--+YnQ?#t~ymb0+tD?L#gFOB= z@NZ4ba1r=B9^CI}Nt@*wri#cn{l(Tq(EZ^`38DZClRPr;gl z!xM5VK2hxTi<>`@8|0+~ySm@SnCKuIyrKa=2MnS_I#3`y;_OC)Nrf`9K;yz+HpXc! za3hG38eOyNE>TOpF(^HkuiANu$7H}va7QRTR`uh*K*Yw#k8qQ6sr3_yb*oFc;-G=Y z5kuRQ=ehNm^eK{qc-<)Td(P)I1PHdi_W0MyzIP5r>olS`{$$V^qipkO+4lBV3Djk7N4O~c;{A{R4?mW+Rg zBtg3y#agB&4`l*{3*^>0Lo{2)2oCmER;qutfcCv30IXbt@@)l`$zDsBc^ahz6YsOV zeS`G()uiyxQ9Z2cfDz7j6{fgcq&72cods$Jot+Dkcknly0Xpm?nJv|zJQif6!r(y% zKZ0m|YUd;lv`Izaq`vRx(sn|KkEEMAX>ytq@PhkErrZ5m0{b#_&-_dsKD$lXAu{(G z%fwWeLxMtrk2-|aW&hPBC^n$BYZ)eX0-4kSx7s$JLhbL|H?A6Bq@<)!Y%7XF;P)(4 za1#NQaP<;t=&i^4_IDp%%JC7JgtgwYoOHYGx-_@L)d-u`eoY~M?oj5Qji{=P=?}C= zMaMTpb)t~z6Nj(FDxg#w-^xwm>7cliiI)S@)2(lE9=wtX%tlK1GB4weEw{P0U(K}A zFBDA-^m$~u6#wU@JPYh-LSUmZ3>!3O_`f9{ly$1Ql%*V0I&a^cLltIUtHr9}r9A)=8%ihMdUG-9U#qdq4_D;7n z^_cO~xof@@?~9}$&8Eb39XE+8J2gx9;CVdj0?Lqij_)%6p7d|=kn`uWHi6J&ECN~_ z@FKd%ed9^>6KhR0#+Cx|@9kTOS@1Wp9^!d=iGb4WRFB{18+X+~;nLR|Zof~6e}A-i zc~4DG3owrVHKe5V^QTaRnCi!yK(sr?>a`p987sxd7kTi(XZ#-?aw+nlMJFkr{T0Gk z>rHdmtZjR9Yv!+@=*%}L^^ht~R9uQyJzR$`IRYjOU4_QNo!JrwtX+4IcX&=CBlPAm z6{}Q{W)(4&5waF23+)lqnoYqegPi$GyWhrL9is=k4>{p=aNDuV;I#8nYg2(Mw#aiS z&X>PwSu2?wlqYRNH5=V!wIJs)OVi7fWZ(f6XfT?FNvRQKTlKBY(iks|$~x9>a|IF^ z6Thz48%LHmKN68Le9$BRm*hZK-ZUjh6BPehx*=H#-+$oBC(Lbo57m?GgWWu@7U218 zHAdsTd9y)7;dVsxXb?M-=~2{BQSqii1aTJ3j}z16jS_F43X|FxO@%BA*I}Xluq)QP zgbrZDaihatcfJa89Vp1}vdWVAcTa5h`ouJ;Vcrcz+~Jj17CB!TZ0*!&x;7k;{%pW) zy0-T=xg3A(sx&=x1tVv{e8{Z@m~QgoBe)Z)i?3Is`l5Gmq-;VwW zc1&;>C%tFET6+}8CzzEoGT6g(&c4sdHg!C>>EDcUG@^~T&}$)$jzC8_U+bpIFv4=5yzQn}-ZJUP!2CFKJ$!7=<6Gj~rRUYE7)Ir-O$ zGHS>ibd(S4ed5sCFF>vV@%9ZMjwz&b=M`~%O@va(TMBctC+TLNqx>1bgO62W{M>hJ z&V=cLbP3#rf5(}#^g+DhQrOFSE0LRD;>!aQ6BZjzB^UxV^T}zbTmaQ*p`R?mhZW)1->N7dE=uUpD zgbh&3k2H)?6)K#h-;V52sOPuhnEN{s>4`diZ25XNQxf= zRcprSMc~n@bS&@aB|2}75+ny^ayIaPn0m{wsNU~;7(@ny5l{q%22rG8=mrHrrKKf? zp^@$`k&;r8P)bC)Yv_&vhHe;A7`nUpA3ne5dtJ{9-kghT=FENHXYaN4-fJD1cN8*- zJoQ&p>Ww#wkRIvTmK$wDLeNLfbVrDTg>KgG`Mfek#kbJGguo*2+Q#pj?7+}RIy+U; z@ob*#-9hGSrl#t&MY8++FAkf3!=CHnvoi+E$IuZ9m)x@{F1$Zd*Ye9qR}|fjMPmpQ zUJg$e$7*`4Gob$(YoeV!6EE$L@XinWr)nP64{bt8^`J`|q)14)c9Qhs-mACZWPS6- zoyi(rFg~s#>Q8+H@}3k4+4s1mJnD?2aA4P;f<#wxv2evcgl=;^iH?190v2>H{7hzU z^FjE~8VqF!Nip+FhEL4KpQJ%H$GLrhJLMmc_M?j#(aY#BMo_gl;;ZZZFX{f@&rqen zsn0D|fJsT{JUqdQZj)cezbm_&e-E@0t(VrJjrG?&`TdT&=fLSN;k8ZOOy<&$G1>0M<6a z*29suKR^f+;DB@kymF8ZWT^qE!u$F4e^lP`1y0iys)=UqAinDJMB_>2qCl}{Uo%6t zV_~7w=WVgoWQUVE2?Vjfm(^A?)7Re+pfIR2BL?y*~+!PxTK8xpz~=Po_+d;`cCoUGXc+ zwgw(OUdjICfV1k_oS?J|e}8`4E30<-qK0tiUpQG$JCu0$a;YUB+tsXh{)uQ<-69;Q zS($z+wcbu4b-4Zq)!;^3fA!_X6VK;@>^M`N#3KvLt;|}b9SmEq;1kSJe)92ceBwL! zNl&w)bedg1?cThqMfKZ_dfYpc~&K!V2f#f z{=u2*&`96}O6vN)a9AW8e-z=NFX|4Gb2nGc_JUgDrbIFsv4oKVyFq;(_ zaQa9*bouEF12OqotAinBOFe5t-D zZ@bz|>NKL5O{2bz#%R|AZQmhIqaawLGux!HNWT5+or9OKG9>$fExT;0tG+L}qv+aa ze_luu)g{M92U~vG65-O>+U1;BMBAekN)v|ZaA3ggGL|XaEz-^PgN$<>Mi3mG1b(f} z%%1k@yMFp6TjRk6YcqT8eFVaYRhspV8a0Cl8za5xglZ267pjk37rwo{OUGI~(o3&Q zMhH~z0sr$~H|Tc<*r9_W#jtl(xzupab9P%(s?f>ix%((0zAX?CJ-cs3rjm8W{KSXgWI#qq*Vy zP*yst5cg+uRs>&;f_cM(FY;=0-PPL*vY8Qfg@V=7GW19sKs1B=DO)?-{W zFM?sPABUj$C5ws361lW1zHQqGx#4FSaPK`F*W@OkZYKTo;UWhKTO<(rTp+&#heT!; zWm*3h52>Njp%cG}cnmE(XJ_BsR%|XtHs>YzU>+vJC!ab_6d6pjIZnd{BadR{hkkZE zpKm4UFIqyzguX@%e9YMt520PK#&Jpt;J@q=d8NyhFfkS{LQh}CQTif;6I=1T2;gbNQOvGlj^+n}~jhk$A+=K2%6Tul#>T?EILT1eVO`N~EOi3jbihuea4VS`e?@k7956YJV}U@m4>VM3@AUkQWC$8ox2% zy_sMMN@NyL5un1;K-Ex)LrXFtgEjS;jdl+K&D~wu^rMjx1aGoh=un`URu30oiTI&0 zPihxP^mKIg#^4T-R>3+i`{EUr?>M5U>xRZvgUw^+1H41WP+5aP^|=0R+}$C{7_@uS z$r_GFk?%UYxL=*EZ`fYZysxuP2SEu%qv`BDZbcE}IY)s;PZTEUvLt%&Cv75s^b9sO zjeeLT1B3nX$w0`Hau%&bd>jlX@xEVSOk;8R%;UE*x>p6oTsm}qWmMg!{z)` zszGFV>ZK1knd7+utl;b@&Hvd|yG3fRP4(V)g*JQN1^*}K;(<4kM!qKCnx%)lmZ3NP z2bkgmmpLvK|BH~A&zpXjd5YIf7nlB9G}7EeYh&JB1YRE%Wmiw1ZLOC4hBvKM!^)+2 zn#FpqV(@1yALbY8p7Z+m^4TFyqbhOa^-63awH^b94Ukx$C(RQlY~u9+75jU_ls{iK zfx5#3PoS!yfmZW5pWplsRhmK|qu8CBoJ-xmts5ys+p7(pdI3z+A*2P$oUL;>-}cPA z`5l)qf;=JKhHT=GJxn-4kAetAndVt)Y@KXjw6ShMsEYLJ)Mpjs*m;Q&*$;sDHtbOt z?m*`D#q6EPnr^-AkFUli>Z?^Lk&kH=9pnTkpGt0G&qAb9c2tefw8sDLW6^-#HwTYd zYU(k*QcY(C_2|(V+D4}bxz8;|vmc!Yy6e-c3cd+gdZ3))w@UyN2#|I2XCP-IU&|EI zyseT~O@rud+5DU9Rw!=6Y#HscL(%M>B+c#v?WTX4f<@ZyY~`c-8S-^B7D!vvM1sVs zJb^N=+H-55O3tdiy3$6l(q~w~IB3^u{MyF6YOnbDS@WY}c}$#TyFd4EnSmO~^~PgWxys1j90Xp}gY#U9oHubb?nVm?lVNDfw8i}ug& z{V^AU;btNQ)t{DFi5=r6Vzfk?ooZf=?D~yt&h878)QfZzO%oJpH(+e310m{7w-$fzz|R&78sRLHfW0mP>~)~Old3btQsy%6 z(r&{H{LD{?b)RL+>0y#qIIKY9-S0z&TVr3sAaC2NZQmD5%|4u1w z!zQoeLVyo#mMc;7uRn98QI#q>GJ%c6T72S0M|#hL_mZuP1muS|BP6A+hXC?@)%u0I zo<0(rh`Rvdy+M4Vx^KrJJT$kbz|BencL-n{-~gukvrhr(B^3 z;r1`Z;ttjwsN(9@OZj#!S~`eD`+F{WMLoXv zBJt*4U$ubMW$wVT?ajFs_;>Y`g)&L4iM)~&&}#>*I(w5m2>(BR4k&CV09;}lG|DDk z9WS||7DG@q?^n0l}@N4vQnE z-|-fVzG@cyEG!;n`(S&|g$)MHrbyN&Xye<<5^7-GDw&@-*+$E|wI5s!nAll_K`@?? zF6a1S1C(?=^W-iBEuK#j)G=p?jcbo6LhWaL6Rf>X#ftAXNqs_xZj<#Fadg|+OaJ!e z0d8k4@(3n;l}oTwPlh!7J-C^&9SccBBZoiKeK=}-JRv`SnY#CH;#&0eM2Ca1??Q?_ zch6#b-t;g@K5}kDpk()F`Eib&^Pv~^~ZA%>^S%nQPf^HKe~P9b*xuZ zUMgO!0P-kM4W(vZ@x>=Ls_}zP~wcXs~2WJ(lmIqSP8)bFM&%>jRlrKro!W^e{Mw@_DZ-XwlmGF$W zRrY04JNk8(EI(oA1evD+;aR^=GKebhZesu)vb0723Y}R)298CH&oWaRq2b;qFEV>I z+lZCr7N})Fc_b0EB5L{9 zoEiO6<`^%JO4gQ<(pF{KPE)F4i9U(E;aDQ=GPL~4anijL(jp*s^mB+Plq^{ptdkxWnk{)58gUVY zknE*}4F;0I90D|Pg=0#_7U<20^7U*wCK=FdTI+3j+>ya8+CFhKumYJY8)uhUr;(_Y zg1Wf~sNllxsUcq^rlV;ZeV(wX?d1Uy4yBsqTfeG$RckGrAkueEY}Aa6M@rIC;WL63LnR*ZGddJZr;Dwn?B^nte%l2~tB8JL4=wSdJ z99#=YH<*tY{69w5yaJadUWvWs%E;hwk3Tll6!di2aEylxN*r9+=O>=FNAg<4O+<{n z|71ZXTxR2T|5oCJYkv`!_1)pj)+id9QLP4Q{2|gfI8>fWt?gT?UXa|X6$17*-5vEh z(7X+rDmqh0+ZG;DZ6Px?c%{o+)Ow++#q0H0k9Q?KjWohZr{(o3MNi|=PnBRJqB=u( z#{SXA?1L7FLoAjv{8n`L`jewW{t&^7x3c%@aFG(FW&H6b6|s#cSDonFMuziK!)uAu zm1}n6gI~4eOTqVs%_^0g2c-HkBa>F%grOP3$Lcn z2rt{DuG);zGH;qz1d1PM!+c$J3=K~n1VstFc?CdtLEgQCgr=D)iT3f(b66{J1ld5} zdEiOcDw>uTHg`aRp}%bX_6N1wM1ZatBm1u2J&Hlo7fpR(ySm)NyHzONX7uuDY4TR{ zSSi1C)u}AxFBGNAv1D-=9&5(_?HodBve4$^R+^QOC>BjRYq{#0XrN{JBapa0-#e-G zpeO%eJk(>COzW+t?PWsKCgE=TFH>QM)s)xn&EwV`x{Rg|x(xg}NWZxM*iSL6QB2*b z!WeU9A60{0`n@NyCshk*Uh5-eH>=k~w@H-~Ic>Zx;UCV3=*5<yQB}Y2oS;-4aHkk{C-!y2rTTuq7flzpllW!I&0*9 z6UZ$L_AD;2wzc2UV7hbFONM!$Y&{Tj-*I^{$+GHvjHg}7Qe0<@HM9+<4&MvysuFv%!V8VC_lmgZVoQDU9LqE- z(&$F|mB@5%`}I1~Uvd_~uHpH278vooSZH}MVWRePM=JYK1lL#UQQ4eJ99L04S*T;`psG)3WJ+bLfGS? z#+44i@Z>DW9{%D@j(OU1eX)AI^`Fl#%5BDUof^rY_cI*sucUG-u<`dupVJxuKzvFB z2r}AUz5qij&x>>1!MA=c9hhf|b<#57^YmM=W#+qJ-Rt()M_ry#^*oT#F!d*80@6005$QRPDtH9{N?ytJmg0`nMo>1INhA9Ap7|J>IN{pi}Pn`og zo*P_QFZP?-2yrDDD3kUo$)I~Kq18O1(?qBa=53xTk{Z-pMxTuMqH%f zuhG93&q_^#Eq4~!3E~nVIN}&|V;Dg{*JOTc!u^HrA4+|lT0RQ(%o-Y%e{#0ZKD_)c z4QvgcD$;YN`Y<0uLy!t&4Z!qHCMe?)?ieJO_rKp#(m@Fs-N*gq5LLL;=@+41v=|YX z%hT+fu(~gN+FQR0gaXWkh;%&WxFDZzC9z(rmt4;Sxm({&d~(PKaA)j(ZkrCss`*lR zkcIGw;v&e{nE#KF;!B-h&!UBS!fDc9=!dj8-qZV~XonTAL51UB6qh2)Ld|b9HaW)a zubFU$(biw|Qu8=eg5|n=brr~g260jo*K>&%@vGM!T`zSHCe5#W$) zZ|cMOzQ6pO_{NNb%slLI*k63>WyY1`hc zWf6d|n{lolvL)VsjsaQZ`&&^pK@wksIt6&kh0WprTMR%;*CyQ9xh`)D46Y_h;#6zf z{rj09n%<;MUts+U_Xr0$jP}2b@+)cbKRs-E;x`_>Y;X{)kysto;<5jDh5Ac|1#15r zpBr_RP}_@F%a2&y!x+;CS^ldUbPnHU4)}aDxO`!7Tv1eZfcRn%7vIuTen&gLEaB|d zH*)Yg+*l3Vq;pNhG~6~Aoagn@H>^K2=os+YIjlYCQH;k<f5qi){p1}Sh@rBB=-6w;5b|6@NG!9tZtrmRE-t_U} z)n7TVjUZ@)mp3VjB1-;$ohm6%h^x=OdFsThU#;f<0dgG!gYETXDv&KVwJPF~naF4Y zDrDWANq0?-exm94;o`;oP2a1lUlg}aXOzx9h0|ZSMeBvF6c~}3T&89Q(7h@5fB=!r zu`!}wl}4EOsOPi^6tnPU1Q*NjNS}0L&t|Iv*TZ5j6uK6d&57P);&^eypRVeitT&Ac zf=nBHSDD-c7#_^VH;kX=-5`ue$JKSV_$st$%tGq?K-)chd$y{WeV+SJ1E`ME7nxrc zue?F~cRybgnW)bxo2s*oKvK_5IAZ`*%BtRR&G1TsUmEC(vyZP@2jEznpT@NGsm!>Xn zKI{D7sxc2Vmqc?jvyDizYUs^*LbBIYx$9nTYy9i6xE z4gqKYnYPR0m(OWrYe@59LN0zUR{+%D=C_!ld&gsUMqf!F=zSLX(iY8BY7eXs4;RS{ zPgtFQ!^bqEv7@tjL2IaarV}-1ErXjg_ExhP|9hj}UFhb(&^S{tK!^c|wTPE`dDV)2 z`dY6vMAN?JkoW+@Ns}wHZ%Vl~IcUmV;C&jp=+`@}?jGW*|B6_j_HHx0;3-PJ2#t{M@>6YtPwfS*RG zhL3=~u1A?kc-XFHztcfxTI&%Jk8t`0Zb2>EX8M)0;n|t%2q#1R2y;aQ1&QPlLWNv)UU=FVoF$gkIySf)(OiuSW*Y4cE>dWp%YqB;FI z4_v>DTF&1%EE6}_VH(ix){IM6s+l3&Vwym@=*D9()d~8=2lW!BBt2%HjXN%Luv9rI zpyj0s@>@^XniV1k64*=fi4WKP&7VI(5`95(4emu{*es}Hj1{kDJlu13pU`jRgSO|k z0_>5YB_2-l8^5^TqBs_&@jS;;TXkshbYz5|>VmLIlu7xsQgWwvk$qUnYBhXo-6yGB z!rwp-Yhv+hd)RY+uWH)+CW)nkhQe>=0giRl9#Hrsv?Vmoq8LGD6jTw|TYTL@f7w&< zvgBQsF6e5jiZ!}b&}_XoQh?AHXS?LBa9AlytJ20V1S0ZRa7x{n)#RDh-Ly|d8uqkI z>OjU*I~PnsWN!{EVmdreXB$Oo}U6vd77jc6jjZHAZw? z2JfCpyf>Wfgt=9fjDEbPrda{|rZIiaz-CU+ z_LGP}p{DabY-9;`Q|48bfQ|Qmy(S{i!RBk)u-`F=^nxVJTK4+p5c};ejJWu$=_UiN zd%OuPYOb#1b#Y9QbyJE?3}pNFkxFli)$bl@$yDuw%g9rImN0#6&R6yRdw=7e2HJ5C&8A+dX&div!HalJFgDuFH8FAD|tG-xcc>wDj2Zk1jY~-Y^DeTh0Gi5BE8~bsTI4N zhj}MD_?N$~ywvR+4Z09t_WXMo?!P>g>YHJpn_A0~fCAQB)R|*YeOxo%;kt?OW7E#^g?HR4 zZ$=iO&AM00W(!;KSV6t>bH&rj9P7yjjRiiV>L+>$@VUqAJI7_OP-3B4xguIz z=09b~!WDtl0T?Ddl`e{Bdoj6{tw8i1&4=^6@+g--yM+E%!|BsMTw8}iW#FH{&CF-s z6~ItLj!8W;{$F`*9%b^Y+V9xTV&)*bBHTm%=;v^b0cwok6yBfz3@Dq!z1wMJ)eCa8Kc???ND_yicggx=c8HIUCza))_;7_;B8NQ7{9qi$piA=Lz=?7*C(W}* zv!g~)QFrr&)C!9m)rM#4nT&yiP{)3-VISFttvN3HnV3dAouWC>N2PY*N>n zD~4oMNqdxJ9`G3V#GwYc(Y$?$WnD~t2m0E@qCJ2!n;GaVX>m7SzqtM`ZM8_B|KcI| zXsDOx0>si7Lrlg|lN$ArK$ZD~^esi^`ee^)l$e!5=H`Z6EiyOegtUCZ>M8ntetyc4 z*KGF#^QX`;6*ph&xI6Gr2X@&a?Y}%jHpW(D1o7Y{X$k+p;tR~m+=zse*!Fc{nN&2W zHNoi1R&&g~XqBwdjq^it=~_Tki)@p=OP!EtocI4>P&%BXZShWz9wBKAbj9;;{(&du z5i~7(zcB8wp{ECfaqD6#p&$4m*<<@|EIM=Rj12__1|+ldY)A~+x{m%CO*XO)R1*MR zy4u~~_aP@>%&r~DY=db|RRO7Q9GYh!(F5YFptPKSq(%gF(YOwAn)R%mabvo!5xd4-pLy$9(M@Daa;T36Cg?WR$1;0r;=`f%n`;RpeZOTki$oR9t8L zHq*OPaw1)yHz7)B4kjdb_c7*`%Tsv&W~RtiO-WN-8e}M6ST;DLo@uLd>g4@ccxUZS zP<=+8g0$v!vsknHX0rKL;nl3t4`2q?s2oZXB?o1j6qw8{^X0tHoa_*BAN|wbdo58- z=UUz)HXspZ;#71s{dGn0U~}Xd&%Xe&t?ejSxkIf~U)hw5e>pg7X?tUI&P+B;PVF`ygImz`CX^ zl5i3)nc>{y&?k-#AtU}q0{Y{UyQ;7!0UdAFqr;+N(^dYkiM!L27OfWV*|~*xj46A$ z!Jyv3-jmBc(Z_Gng@I2xRd80pn{rI3cs=L%$mnZmrr*04y--QeT(+G=aH#idcb9U+ z3`P5Dudf|ntIN-U%!DUHTscs*VJs#KXNlVXpBHvgEcW!}6&26co1yp#&d$-74J;P0 z0u!I9=c_!r-iqRh3_I1$#zHBgb;vgoNCigbkJh1O%J5a9`Mg2L+EqLY$AOSPUB#Gq zg=%9|m}|-hGT^k9JfLjWrQg<}(znYn66<9+Dl3is+qTV-TDZr}_ukX%yT_omUli+> zUn@wTBZoiNMO$$Bf0zEM<|q!U)_*pNa1cy%K5<6z|3w zgR)xI1?l&$3Mm!HCnuaz$;W|FkOcG}8q#f7&`C#if!HoFFW}qPapqOYx+$Gn7K!5Z z6d875`8q%&UsRmTwOPodVuMNaWm-9v`sk0@$9Q$g8wx7_TIzBwiF7m!Jz{})t+Q=% zt)hG}UI+Z>ILzUJ;k*L3#E)G~mDw=oRdWAqmR0v|NG*vICHG(`WHJEzhE9k?{mx{X z(;RDcKj$QE;%O%a9)G+d$fS~h9{aW%oU0(zNtFFPxL@CC_1ENcJ!epZ$3QhmEV@1@ zjL{%f9hce4vB^X)wXcwQ)RQ_B;ER(5;OYh*oB(fcVjenk2IC(zFm*cdl3ogBZe!l% zp=2dW1NYPFo39FzcrTx-xL2tv-yvbZgcBHCg|jHlhpD7;0p6PkBAleft3G6aODa+* zT->gg$1!EX@oC*m2yY~+2^3H4R6d~)?*YIF&2^`F8e-$FgyYq7!*#Y4O*1paAFQWI zsFztY+{liThJR&r8$D=sK$T*EIpAd;cjnm2;Lz6WM}#@kc#0;lqT)6a2QIm)x;GD8 z;%K1i+)Fg{Bay6aiI`~RfOBpiZ1&}jJJW)GNrqmo?`mrc13z5buJtj zx#8g*ph*}@yvB!AWpGe6Bwc((V76|U^;P|@rl^F%;*jP}(w&*8lSrb&9SZ5Fi4ZAQ zaR!`Pp{wib>7Uo{CqbFrBeTQVcatE8ymu=Pe4C{@37-VP1)x*o>9K|OW_-;M1k|N` z%}ooG$Y#Y@sfB)=L!hq^m%+{*6z!Cx&plED-l98b*rM+d`PqLGHbt~P`W9lPCA?hJevqz@{ zIVNeE^Ng;`+lPQ%QL4F^ardU+yNte`o_CBP;?ZnAPmKK|b)yqKJn2fnqA2V#d`(vt z6Wu&OqKNBN*PLwSHRQLLat^&6z$nn{{6KgTZ~2hlABQ8qH9*vOdyF1MW^W7X zX^JiE3z6dnna5%$BQe(RMwnV$LFsR)TcP{Ew1n%1y;zNTNTPAWRLRp{Tj($@JwEpL z7|8WsSTm|0`sa@8a zY-o_2NzQV*sMFBfUrvl3x}uZFJU2(htS%!SyROu5$KvkeRCq6~0fwfUcwLU59K>A$ z>ulyot7|C9NOtw4BAvj9>!4oDH_Yae!nae=t?eTdVY!XuLhMyf{<7VhhLhcX-qzp)M-&3gn|?~2KHZO z<%uV#d2R9p%-@SC172Nt$sZGE!$+~{N|%K5|^ zKosk4tF{%uaIWp_4GVj5j?{z9M9?dp)8@Q?zwBKVltuY|f`%dGf-^K}*o z-GSxDeSVys|DF=c1Hu`oKIr*#EP+QHQatZv{Z0FiA;<|GeJCWBrgC~maLYU%+zKa6GYW8TRG-+#Z6#I7M1f|`-xB5+%^)tBSLNuPn|lW+uBfx$>(#V-zeju#U7iy z&2M)O>`ga$c_+K*o#s5@aQKq-u5T!DXvb?Y|0*MoEwcpIs1Nl*go9F$Gt#XTYSg^y`FQJsr(Fn{swhm;y@D$*g_aU2*RAz@g*RgLJ+(()vh z&XztBL=%4hPiw>o`cB&%)>n4X;LT53ZIO0Ez&t}eELr%=APA=y$2iNRTSF@Fo3JD2 zD=O$7pOj`m?%`Yg7ib=3IN*Jo66m<~4iH*XCidNwkDLjL;naVF&z$VX+Y%j|V_$jF zk%EI`QKmmnAo+EYH;p`}xx|+xtGz7(w`>072m>e2M6|CPI+sN9t{!OKOgnSQ#S?mp z3%~gzU+X5^8sBt6yo0Y-IcAg%yHtl?^7(OaTGl`r^JO3QNu2*hc5Z=>R}X0##DS-NwdwYBZ{a~9!wdQ( z&XsfVp6Y{58C{hz*9;E6cWG@^;U@&!Mzu6SeI3kvE3sWge0?KNCBi+Y?YJCn(@I+S zECSE|Fo75TWDxylB(9f)U2yX`Dh$EKXQ=)cSJ0MgsmOaZc6K#^%=Y5Bw3y_o6_1NKPk+2w>_@TvjxU|_aXwbLbG z1T|eW=g(u~Jz>3_VtD$B!^V5YJ;XM~uU^Ytg!=Ad0>HgLgRGJ6H^q(ulPUA~>?`0p>`T3M#jiTcLhd!s2puS?_nkXZYCDBV7-Gn~MOY%>Nk(nwF98>9aJ#RyX zQsPVtRF-{Tp(DO-ic_>(b8Go!uHwdIL=tYu+~v$Z=?V(rTTi2E9vUqZXKWVRU|Rv< z=!{lz0mlYx!nP1g6_b2~Q4jL#5(*Uu6uXVwAUP<6%RtYlupd?dp=ngf-#f`IBV@r- zTsAg0EZ+9v0ltWzXpL&BSmW1DZ?dcE!+4%sv0^7A@Y*F#@oQwOo3o#lo=iy=FySIi zhU4(5MVUkVu%?mMWgBoGKC*8TEtqpvDE;#T!4i`jaq`8pLoWhbxn#|q=i=0_6Cuzp zR@j3Kdj4Vp(AEH@f$Q}Q_rLMRzwvN{{XMx(Nhqs@t8hK32okxGIPCOlU$09=IyXqW z*8R|{=G)`w-A47Fs9*X%QV;b6Ns)DaYtgI=O6rrZDXVxlyFLx9Ju|5@AAFk+;JZTO z->9EYv{hRkH~TD~z&8>D@ep7)0zn~>)vRcCoUgFcL@pRN?|$wx!kw()I?=o7Ons08 z(F9zZO7FY_dmQ_F?Onv<5dnGNw0Ss55KoZWf{n8RDU~_z|A-rL0p2graWPhB{}6q9 zSR7n4^1{Zu4f)wt+ow3$u+5k3J2{TtFKpo!@>HNqvrc~_T0nPZE_^+%9h}1L>x+*t zVABrRU*U|Y;Us zJS+}NvzissH83wO(YtW4d2iPOwJkqpFwW;BsnqzGes2L*zZ5dwKkC@NQI7E3?=m1B zA2e{X`Y!y>F*0$&kne}pW6B5eJBPCmmW*iM+-*IjMRUsA!xA$5vo>5kViF0((j45h zlD|WN#7K$--Tsr8_qG0>>qC`tq94>}2KZD|IsOqf#L*GEhMfH;MpOcAl)L~^pXHT# ztYJEE#IV47a#`Cpdm$j`h=UV!fS=Y-e;b1#rXdi!Syl>OvaYi=yE{C9smQNDT0%JT z_JF(XfZ|rPU}itcp;Id$<#=PWbw-BUNwY-g9EW|2bSDl%Jex*kLk04fo#w~+*ez|> zbl6{F$=-G|cIxjRxVCtCdg|)O$-z+_1a_iG_HV2TA}V-xc~MveT9fq{?V!LDo`6Um zy88p(FT{9BRYDeebVXM?yIiKKL#SaV3L=0k{QW=y#=A#j((JgT-9*)BjAVdkl(xV_p$n>z)Ii4p zc*W{*`i!?>G=H??CHv_RH!S#JKhynA5DP})3q7fWPkKh!{RawDw`*swaxKK&8dZPb zayw6sBuV!#u-uJ!+=nbo86wiVE}KkI%vhQJ6z0V<}Fl_KVu zn-&aBZezs;RL-Ea-#w_WA#D$ka@g!yH^+Qwl#OQW#T!Hpy)T8d&5lQiDGQzjmD@0d zjxqIFC5UZ!tz*uFh9~l`wYce#A%~*UM9v0pmRL}#`|6QfmLzM&GKHrL%0w0(( zd66ZQ{Q0}&^Gd!Ox_TB~W2uh!{aBR1u0ZN2h9t!)u}7{s`&v!E*CSOR^W{sOJB_tc zZ-5?tl~PF(59JB+zIA+n?;AnX_QVsB-`yNGNr4%CznBqK788lL9^0fBhOt?1(rWKA z2`N-stjhwUW^0tu&;4mew%(fM4#i5h{SykV7PHY^6L=8dB#f|RaJHZr);|w^yPlF( z*CJoJlt(`**7)J%njjjIXay?4{dam5QF7Kz9E?%%0rK#_I5=`Cm4?1dfq0@8_G{g~ zR&cL=WjssuOEPwWWe-jjXD^CD21$%6ZD7#Sf-6W1_x4wDf=j6gm0!^P;6zT+J!0=K z874$C@%o$)!tgEP;}czHUw1yfgUDP+3MdI5AmIs*i+NBT_>H$lGOOl$)J0yWtnS)n zJe<2Y(WK$u9Qde~dwceedfQ=|D8&644>$F02WkS`Gv~Bgs$Y-bKhBuftQTVsHU@B@ zu&3iq{nwt=edQDo&i)i`eEC#A*&cjT=IXTDw4a4Shr#|Qjk8+zXCNu*1!7m_cgr=? zR4`(NqPfeh4!b2=`iD-$R#>t6ilNP_!5P};PZr!<0i9>&>y6y;WT;&m( zqMN{rq&P`G1M`x82}V<#fbdcbo4fco4>VycdS?q_zAR|^(-g#kwLNv{bHzIr!h_>{ z$%T*ZZ7lHFrf(gVzj148iy z@m=UuD}8E!d#~nNJ)R)Gh$^{gHh3o}hKj>)#?(32_NoMX4)cNVP4^r>svXatCI7V1d8`0qDlXB-cvI(R17Y{xE|EwmUmFMPMenG}pVi zf-^Eb&8pqShp`hINr95^9k2zwpKiOW#FCqqp{{MQcaXY0fUg9Y{v^(?ntP&Uf8(Tc z-kH$m^Whq|t{w07dtB-THos*J8f9D?gl4fRSyQEnk5rM73b(tm(4V} zU%W)1G!|jANU{`zxA{x^?*Q&!o|w_`qinl_=l#5lz$=Rdf{7t~MnM$_exF%=q;ps8 z4pB$>=Z@wAKGj4fXH>#YopX|CA6!&c$YLm~r2>x2*#n;^>6tWHKavc`qj_rJ*ZC8~ zyP$eUYELMC`%+pw1K`01mT>>V=v|k$Zve}_#KXA_!X#h2bw7j8W_8|YwEJh>!mGlI z=_@JHC{?gx3^w}4VUET~=eqI$GZ3;Jz}Bfh)V~+@E;~{K8f{kZmru{G8}0umGX*VW#wCaye?q*Ny~{s?`&zqA8+c zK#v%GT&YaZ0h2xzee$PCO}d*+ZriToa~5gkP2(aKTBp;R0@mXNy0?{Xg_WEBdGT+G z#S7hi)er-PQVtB02RyGBNbv99Taq<;T~m7$zH6v1zP}eFDVZzL$r7w4KH06+PKXzH zbyMk3Z>W=6_I>@KMVf&$?^s*l6anUZcUOX3%)p3?K841LPUi6xL zG{)9byVb1lguumYs2Pdw-wTXqY-W4EVjud@MDLs|XtpGqc?0j>_DiFWQf1!K4E|xRZ{mEE zeHaH0|N7Ro!6e^vRGsd(*EN3DZx57a;NiLtv52QV`C+`iXHymYie+r`sAuABjT))iSJy|KLDI^p8{Xn+@i{2IynizxI}gjd z4-V8a>!#hlckQS@KkVrFtei#)&2kfSid8gX{&^zW$aTnBb(SHdfAZE;;rG(+6`P|W zV&=d*R^Lya2G~diEbP^KRABeh5Po`gpJbK?FYc>@sbH9pRa<_Oqo9Ctn+LKqZ++8MT8XOP3-$~(vx>h&jHD-Qv z;O_fvRmX3FGg?%SP7e4&V2>W-bHKdAgxbbKBV|5cRAXAtbe!Pe@P7w9)%VllG?A1F8` z`PgKr*tWj#d2oOF3)a9-AAhoeiGk9>E2{C*b2pZaDEdHkW#I~8leUY0>GE{!f(E{N z`+G&m&UwoU9|Hj0IE~c!nq;ii$_-G+0BJFC+f^cA;+`E^J?F~;u#%7o6CV^J4 zmB5p~Kj$d3JihKo|BYfV;J@p2%4hI)fB0kd8^$8uOJ4_Ci#=bQhs z65g-HA1y8L68etVycAa%BN3NKX?r@dPdZdtP~vLjH=ilx$6s(&yS^IJJFN2r54gWi za?PQdk~YbEDw6tZvj>iqH1hg9!Ni^1)S2I5bo=mVhAPhHdhtU~0QXh_`t4NS&txpZj$^Yt| zdA|9aF@`?5q~700O=j8swfLrKbxjMinSi8U z<4eCU5*tMfay7NJc@*rYUA{~$LKh?`XgeDCYj~!)_eDajW-t+$A!z!FrNN&|>eowz z%5^nTlHuEuqY?;5Ez^XkO4L@&zvf7H|3aO`)R=*WlDo%iTQ(rQtuvUir86WKWbHp- z=&c!Ki~2CRXi^wefYSOKRo5voM?8GW@5h|{X4CI03a^xpHKltdf_W^-@&$>GD88i) zB?4)})Wl!SLN`8?BPDfTt9ZIf_$FtnY7l-rzKwS4pYEdM)|t<2V=EcM+oH~Is#Oi| z{Yzrx(~X2+Hla6fG zjLiblgOw#WVN75A(X|3(ovUw1NNUDR!T-X?j0gt2R`?=zeRjQj1Uprvc7**ybxan; zKa)7rMeU7#DbBv>DSmS4dwV&yQNbKLAiCfFDnFW~X+K`?i)i*LumcMnO<<$>JMiE8OzytvDrhR04nUF6B?Y@^i8N3)E!`IwTw>eh@0sD0O0mQ6H6 z(um%8w4H4bN!zKJm(JQJ>;mt9i*;F|N1@a>9x=GVu5b8oa(9~(Hxe7^q7m|NEVK4o6Suqkh|$1a~K8!^LFyrfDE>VoIZOlj)LIx5s=ZZj!z zcv1c~0iE#pu;IjFlh@GBbm$9){R>7=*;#e|P)qq&Mikt0J*f!$yxTp@`PFa51FKVP zSaXA|--Co1sqUwDu{k)fIj6Ix^c##2r?X@CdAoMwdus%pea&{`zhAp{?2QOdy^M{M zOLy1qs`=YDBrt<8jv_El6kYA~czG>s*|kd>bFQYqu*==VrR}E~JCsM_$T0Z+{+!o2 zOfxmb?I66Pa|EW3VJej-Vd*YzsG}?DWb1r(ue4L|+C!hAfj~$;0Bbch$hm1i^72V2 zIxo%JHggzebUu>-G5^ zehGRNvek88dF-%eS9AI>&qjO}1I(p;;43)GQ&mwM5j-OI0hdZsy<^<-j#I5?lRR`h zM0>q?-ZFLyr}*7nq)!wi5yEw_Oz-%rxiNjoDE->dpk57?XTow78W-<)7lUxzngn27 zm++-+ry~yLCS-Jj_a50l)bMhlM=|EX+^T8E;W4obO3RWNHX~KD`H7FV zG-~5>riE&JRt%g!=M~r_BOIOA7EXm{6bB-!>@@A{LetDBXuq z#$D;MVN>oi;m|hfn$4a(jKSCGV`xiZ zT|p2OB;-p=BS?ozcQ?{8I;Ew1v~+iilrTC*50vg^baxE~qvM^w_aE42pWWv^yXV|< z&aJxZ%k6d6zT)=XBL?+~TzLYl_Crn1sS|KNI~=8oK4fa#=w2tcGvKB$P~i$Y2u2U) zA33&KQunLyU+r)HoCUp_GPGC7uIi`)DT)K#|I~itjX1#2&G~o{EJEe}>nL<9o&#t@pKiaal`Y3NP`8Y6saJfJq@g+r5>QLO zE(39L)5)52`d_of2|avg?6`qwCs&DN3q0?Bej;%FxS#j)9M#*1y`#1-%dL_uNJzdX z;XZualGOY57+C(ZT<}f`BYQ@dQ57&EPx1#5dTR+1%eWY`A@M8nT_w*C!+5?l$_>s4 za0DOAty0A><(q4kl-X653?9CtD{m%vGEFU81F^eIn}5;&J7edHVuC(i@!$RSO+rVC zRMwDsYLWKhr~icV-+$N2lq0R*0a~6kchpg&D2gUQ{IekU3Yo-=&%YCwJUWLVrrk6N zR38~p8d*&S#&GDdPrN(oe(bDxl*hI4O@5+bKb3pRqocsjI|zJb zmQQ3Y?P$Dm-X>}GR_OwJ?*9+J~d^1n%5A!s5;{S{?yQ z_mx+YU+_D(kJ19rMvDe(?LH@>8VO z_s)Ro<8>P_sTlt0g6>w=d;bj_1X9Ty9R4kx7of+{RxaO3>B*!8#hY5>|2W)wX_qqLQg_Qp;VIL4L;Ol1+c9lIBAAy|_iXER4N9ugaJ%~Z0Sjys?3=erBaFOCQL zD-9Yi&iSi8$?rm}fN@xzp6y!{y_QI>+;9CQ)yLK6r|WBbnWsz5h~R(YEE_7BG2BaJ zEH3w9s!ND!@7LHqelwiPi3MV{M8*^5YIJ*Pw#B0QJId%{$~+nban`Z7)1|3vct=~c zbVV`uv$S4qTyacNWjaJ9S;?K&Q4|$$x~$)hfU63=Mx^2z>%OPh*0@*I_nMCoo7OEl zPvb5oX9@qVM$^@@JMfw_B0-C%1(>5;w+0*+?lrGPwcmHe(L?N1`&UvszA=XEW^F=a ze{cE&yaVlrUNa8V1bmr#lcc`%-|n4v)&(Zlm+ZX5hn`i@A5;~``pNiq;frs;C4nNL zdZB~VFV1^CT@dA3y+f%ik7R*2Q{|(keGV%G|CuXZqZVtGtdH9l{B!*F+BRTT*7tT=|OwGgVuI*P06LTrR|#x znL`n;JML(O-BM(DD=90Wg;G%BJ=Wsn|FGnZJqB1RNitEn|Jp<3tMjYKheUj4%kI^n ziaQ4iBO~6TXDK3k;w>d2zp?#O8Sh)d7N(Uw;QGukPs$0MrKtObgw82>cXcdpUuM5*7h7=L&FB}m(%)#i&N%*Vimw-0zt^s4${hf&)YmuP_Fsd` z&F!GaIzoFXrOm3M^Kh!(F$^k))Bvnm(pb-KV0ym-@a@sEgfKtWER3ZZ(&z=J!MI8L zaT%0qQkqv+70oJnl?Kh#bWijQMfiwjEa^5no!1dpG*$kGdw%jRQMlpgeA60?da?un z>$t0eD~T2q)J&6ary3-bckoqTp-}2CJ_+}UQt&e;&2AYmPMt`un=H+pZEwY8Zp_b} z2u8I9%z1Y$*dFj2xOkse4vT!2U!P6kRb#`MRek_~^6Os4cx^7pY5 z2tvtQ7d>bdw-ql`*L+ke$_BKNQV4CK=>msT#F(KIEm2%y;b|Fvp^Z${{)bH3w1d=e zr;bEp?C;__09dFFUnQ{MSdXPkY2gXF{E2hh@dPF^Q>M>Msz4Xw*9`t{)awy+O0R;n zt~v~y02Fo11e?*YGE7de$otv(Tv6X65pmq3D_7xEVVGWcJUaG6#zNlU(4Zd2McN5u z&fN)AlG?wiv-11?&k#F0g9+QH^+6hNW$i#`-qm5OhT~^SF!Yn9 z+-vO}YJlTim8A`DMn8iKsWZd1MddxyQfBkmILEA|V6G}~t$D`JBVD~@=0arBq(m|i z%Cw~X$WB)GPv2dq>fOOO!HYnPA)izJX?OEBAedJfU#%*@A% zv3j>iHWO5pG@llyREMF88r#AO4HyTQGdQ;#x47FfMUz#d|6`-H^Pax@jtEst_04V$ zAxpkpC zSY6;t@`@p@%QGfnNd9;0ON^?;{=o`^)#!)K6;s&5iZ$_3%-7*=f#I6b%`zUZ+QsAG z;ZBYBCRfx4$vv`%vVI33c#{aY++p zDqfU*+4l&kD{HOqO|X1ET3YjH%;B_y7p7M}UktJM5`G!lo647Y8LjIzu-MqEfg?QA z#4sAkfm!o_Opy%^JP)+i>#YObhnj5{xqo}oy(^3%4X>cdFTQFZy>7Nop=HRME(J=) zOw&1)AH56+eaj}8YVsP)yNnfNxzircd~R+ZuC2nEeX&Zc9k5W;QtGhj&41UhjuR%%{`h(F=0>KAgl72uS^`pTAmF{qQqr0VYjK?#b+A@0yNhW~7jaog zcVM=@Gs-hIPsu*z4iL@n-Te)@hO872m%xPO^O|zeLa?`}C@$tlOU-G~IsN58I&aocXCp0@IfQeK#_YG8Go=E(xTwA&8#sa!fTmKR} zSPm$TbTz9mj0oqgr+G2xo=}NByoP_ZGx|7KZ7lR4S9`E{6wTrE1pH&mh03D>_6+Z^ z?RI6^#Lhi(Y%g}%pmXvmJfiu!kR}ML8j=_5p|F4%tIgk#bFi!UYVR4cClMf%@YrN# z{5zXh7rPiyfi(_vz^_hK06w$5+S!E-Ui$n%&2Bvl5pQ0!dMuFC|dSI&eKb&N!a2jA^v9!nxLT3j;!ViPg2 z)EC{wYw~?uY18`b6TU6|R}r`e)=)-(rycu!wXa`?sR+)S0F=y7^9L37glJX$6)d-3 zQ>?_b1C_^mer-u7KVJ>cI&38y=DwvYg%qGUXSd6>%!H493HyxhuM;M7SnyqG74*D< zn3dHV^4Atve`rn$ApLafBlge>D`=0m1@*Td3Nil3)LKo$#Rfl(+Zbp^2X9Aor zq-+0_?x1LE*O4oDf;$m+{1XLd3p+utGkw&iesoi(x}&gGUT)AQ!U6XvVuS-`?yBdW zQEO$to&GB8)}6yv84Cr>KhjMBPCqp0Zk5E%5cd=R0 zYpH^u)DY?lp1dbBLPRXa!IE^fXj}7yz05Z%)LXs}FgTY-28Q55wATib*z7vwKtN+d2+n$L#Kb{7h{Rx8(p7G)7Qe(cavr=|$cF<_a#~*8lV%0^P zr*^=iGhn}0uT$gKx;;1ojI>%EJM*Z(qYH=Ad47{ppFY*i(!1Z1|6#OMG#`|`KjAE1 z!-Z!Wb0ip?s_Mfn-=(XovjP$sI{e!k&!P7=7i*W5Z$w&_%Cxl7gMqYMPN&)$O_{Re zFTZYcBl%i)i>a-OJU7GA6goN+zz%zTyFtP-p>k)%)Q;*-JIm-p!rR_&BXV?wldVo4 z)5kR3zyC^_t>Q{fm!8SN-|i$%aY!6f(oWAaH_|?H`IJ+o%5c(l^qa{aM0$aMDK~IvL|*3pM+4@T9l8gurEC!O}7o z(3F+2oUOKyU;Z=yU-=!kNv*klyq$Xopzp{V@YzIMq}bptG{r^Mf{F_qSvM3TtRERZ zshI+~9!Rd*9^}zh+Elr@7OiKDwduXY&8OZ40kyp}7FPm@m2L~+u5W;sv|X#$kZb<9 zn_ct!|NO{hlM}cZh0TjAlk~Shg-Gq!-qyDgcb#NP4e5?YD^y--m@11~FHR{OGdv<%RNB^A*?)cPs0x@l;o0 zh`Own8IGv!(uS=VE}x7^ zFO6$}i~a6__!4G^2Thl4nb0;%waaaBe#=l~wZUA9mGm47JEVcHZ=h5SL)+XG0IIuN zki|%f@ys5T(X9<;Wjyp*=-95;@t(B5@|nfGwZNb7;#trxYy{JE2`j;#Xe)j77r~5` z*mNDpJGTo_CyduCIznbex}ivrNb(l7eaS92(Fo+=PQ3O-jve2)r{d1+NHT)MpF?)q zC2M3PUkmZrFXFPoSR~nEQ3x*>j9aYl8%f-b{&GUlFTxMF+1BfvYm1EBMzT{4%L#55MBvQ+cB~!oh%9SMp;2jijtiGDRk6E$%g2eM^o{ zange};Wguh23iuQW{d;dcH-OqcvOOcwPnvJxR`N!)xAcVSN?v8+*^YWZ`l)ac3P-a z31K>aV??yBKGy$iWlH~kJfrB5{I+rO4`t48)q(NXB)`LxK>1O#uiRI{r-)okl`?nU zgGl%2r#&lK^Mz2KxAXnvjNJVY9>47g#bkVL_9Z~LR)?KUbOA!imtPYDY_s3#)OI4u z?&t!BxbI3zl3cXbGnz-$X>M%?S~q5@EFAmZq+I*R*P4GVs}mhk`BXxDp#LX&DuCck zx6K_7VCmLfH2d`9a5g5VU(3yAbO-6}mm!{hn`lp?lV*++qU0vmn*z`=DZTQRWN zp!=;Gi!cMw^btklslK7QeT?lD$MEPIyGskN?%am6x8d14eWI$7o=A(yhOFv2zcNK^ zCg(4>-b>BWokAIhUWd#pW}WjZqmeXaNX6n|Vxm;Opc`*2c*)Pwl_F}j(z=P_D>~Bw zes^5kPo;y1c!^vA$6<>3Pyi>yRea8cLzaVYHYa$(Z=*Q;G4|$_jNMPsg{pVQ>%1X4 zWQ9g^(7T8?)5|WFN{~AX2ZS0SfHo1o#Ukf`9Xs=~FXWw$RljiTOA(YY%{t}1wzU%L z9%x;|&D>?o$AbBxBtpduiZ#8syGAkm2cktS-!yaIUS?fMl{+~T_m64VJLi!XX04IQEIad>f<>>HI+t@5+^61=60;z(A%@}yd2CgQp^hcVtB2!YI&G!M}6LP z%TNTC|CqZ7hjDR3U8IX2GT-Fu6K?;$wpL;||DCVhIyKyV4RWdI0IKSys)m|_pON*zggc)NiM9OlT>Nqo- z3*_49G`UBe?3v{7JG>BW(9-inXdLKiukoBK0EE-3Z*0ru0M*_H&h>Y*n!FZ@xIqsa z%rO6I4DCVhvjZ*OCV~SG+I6W(Xq!CSK+zpfP=ao{UrF}OrlEm~rX_M9C|~uM%T&QJ z)Ss@Vd!g5B6*@mQSiQD*W+$u@BiQw@|~}47<-{eu)Ac3QvxyqxAC8>s*0WlTqon-;v;pR_gw2J;D2+@`@Ra>J@Vz>^0Y^q6hPuQ4N0GF+Mck%i$vpgUq_tg@%?2UeSCs=-sA>Ih%s}S4A@$K{MId=4D@1MO0G7V?zzkd5%o#E=^9>M~IUVFbwp>T879{ybX8=aDY*zGaFHRjWC>BcsO$Qg^&u#WHo<(w% z_5cW7)KzEX^Pj9^#m^Poa1R8A^omnHgkn9G-hx@bKrRMt0bHq86MJ1r4-6f%h8 zP`!RQf$`$S|7QWn=qJN^+G4jiy^wzx58=YFmxA+hHQ|)rMPBP0;nBGsQvYN`R%p5h z?iv2p4IKq?Y~arpjl^8CkVUMiX!D#PnEzvo_VufkPZ6nU90-A=c-lS>%g!8O2D7^M zEG6){&G=SDn!;3(({zJIRvE(TEh8#KvpJ1>dsyl8I0Esvbd-(70E7Vl;cn6&a0@|hVw`Ar>C;;USS50$awAJl0@(~uh~ zL=tA~S)6o_M`I0MQ28mX5o%5_C5O7cd^PjJ6uf=ub|A_xG%?gLl+$GQ!6YalIex^= zCOZI)NJXcqdT~9YdgWdEyk)c=-u>CXWfXFMtZ`; zw>Qn!2itAM_3dW^;|07MPd?Vnf``^Nvv1W@*ZenNc&P|2Mtrvx*BcFTmX4MghCMM4 z8{9RD?wOx$V!*HkG1auN$fiAgxAfMsrS-!(=;?Uz%EkGOqv zRL_FRE~dvP%RuO3ul$EwY4Zd|ej4|wPp!rWHCwCU(a?^&OhPqOl$rmi`^NkZYEzA> z=#pZu^~FmZkN~&-oleD9^J0DB&kN;8GZ*&P8P{0q)jD0Wi?c+DEk>(67Wfvluw!-I z7GqDrB#n5_N~S#9vyh!@m2@ zY^4CnwTF$SWfiXgC{M{Ugv@<=Fi)hA#Evzk?cHq&_vXVl#-Rcz9=&3(O9rmIXWjAy&ga<#xK z>ID@+euwD@+={HNeQC(xcm!i~U|(^&R;*nvRkOEEQ93QE0YR27^{xp#chFmoi{Y52 zllqVD>nkK3`JbJ3H9L+$W4YHqeHjI=&^pT>GsT`_FN9S75$4$=e-EY^4n)weGlIWi zk%h6n;iSN=OtdPH1EFO+fEjfGM&vAMrX9r!#VU%p8q^z9{Y;uMY>IcGG~OKS<@>6V zHT{iOWmdCgIILI|lFjb?g}>VM>re;G;1vY{$u(H%P@LexkvCTYdS0}o_@d5*L3s(l zMX;dM2k_21jlz2}d8lGi+tz&3-Qi1!=FJ4rDB$b} z0MQ&VV7?e18nqZ^X=Npq_y07ir0w2!Ulcc(aK>2)M}4haro-?P8_`NP}J#*eLy zzMlGx6=O3xO!6w&=ZOTy-gf=Ip{fCkmO$K( z{bzT&BE)Nb62Q^kpov^o)QY;r)w4}03xH!=b#wRpMN|6~y=T<5XRayzJe0?Yye%0Q z7Im!|L$S@zvo89rVvO_U$ouT~kpjSBWE$E?{NKBt;2~NgK1@Hj;`6b^td9K`tz5K4 z^{TvIy!LPBoe(KLF|pHmh>^|KY#{yQS7#MJ({D2X9Lf&ItMCEyyyeOZu>@nu+-YZ> zv3|X&7{eC(<&*G754x_#1S)-hiIGs@o^-PaPE1aRNUxcV@OyulINQExCHHym1cMBg}GP&Jus$zYW=9=?GOXu><-B(?_^R#b2n%{;g z=hOWVJ4-NQs8~?1p~U0TnH1hJfRef9)lO%vFS~DP`hgMvX^at`|W9Tikd8D zm2z#%QElHce^jr|kZMZbSzYR3$RsPrqd=XD>nSMZU59HDZ(ZFlocQ}Iw2Ou?H`@zhK5 z98OBDOCzlpI!WCXU>Q{iNOpG0FRDUV^peXQ?<@q+s?YRV+m;`Qm(oXH+JR5cw`<>VNyyIQ4|VI*OFL~*Tr}#Tz|sN zTH;fbq~SKzKG1Xdr_(c)lck|(=^YOIt?$iO{4mTR1{aES%e{%lq21y=e|c!my*Pwr z-Sx!4EmhWvi}q{h8zL7fXw^dX@Z(Cnhc>%Aq3)|BX50#e^6H1+z~KJ|UxM1^Hxo`S zsVBY>m_>dTFIq$b=xOXt%npI`-l2vi7^m;`3H&ZWpr6Y3yUmilN85pb>*F_C2(ZSo z?K(tc>yyKjapjwkh+i<;5D(G2=1!+r7cimbyvKM5c+(-gPFi$?rC5KnVoj(Z`o_$+ zCp*9|zj`smFXX-X7aeH1$OUX9ol?i|Am0(BXE$b+ECQ?H2ij9QM(fYBtMA3?Cjgt) zZpEOsf2{0I611=P;H%tJ)xi&0xVF*FV_Iv?^?dixd|MFY9l;|v;4Kd^oQ1Jg7afd5 z?il)ysnsNao-_9Os`H8YlCJeqy6d-k!rMDaA3q<=97zFvFl)>XIL32ZlxbMfy~2rL z-DZ{~Adr#sd9ifv3rW?DefO2&#}7_X@mwoMj<(F1ZLHTpi-ktc$ko7p23t()=)z(& zd;`zKV5q-*qeA+mH|hm6f?*?dE{af&L%ZS|I*H4wa~BG!$3zeYk)4C|d|}CT4Zewe zBnj*lph8FC5(|yG9Y<(AaMPsi$`v&l_5;G( zrfvUcwhhY%5pmt!;{3&3jd!dmw{>v@i?GJ!RYLMd2R^lrZ6z_nnZFO2#SP{v7c1?xl7>9A zDyNw4({AJAI9;)(w2pUDi~kT7;&Wj9kF6wvlw`AUDC^C&BWdg zt`PkYa3nxzn^{$j!?G6kL8n+Y`_9N+AmtXYmk2^{!!>~Cg0=mdhJSf)$=Z$>LyjY$DK1Pt=irPKsc^%1@7E^DU#_WR6(4Rvq zmAV)hYqrka7x4{OzD^XGOUR9MD-`>nM|Y6p zZJ^{G$t+_`7g9{BwGBOQ^z<6t9YQLViEe#6e~(AiEuex%XdAGC9`3U#$(F8bBI56` z^rql?&J>eI@K&Sk(&!f~;4D;7v#vDauc%M8(-sDbM`WU@P7ZG}t6e!&o!Cz{%uBN9 zMS(dmdea#l&CkJC414^a6RRVA9YW-=Z>2|2UjKMfSmHjH2cH1&Or53Kc+kA!p=5OJ z)V?|?J%nq-NO&h7jpRIux6>h#+tNM-qJvzS_o(9yJ&(Zsqo)F|<-M;b1H7&ijOEhg z_&96NrE7SqwA)0d(xT~6UypSB;{0ZPV)P%*>K^1Q@1>@OMd_0o?2^JKjV z1-76YFqg8m{l3oASxGP!-V_6AfREN!u0ls1&>64F9z^DLwpnY7*GNyk3MJ z5f&P~XJukR_OzBdEHJ224s#L7@t5a6#xLJxMGDGV7Nq)Dm`(s``lqf;)rZYhSc-yR zX%bK}JmFy5Ocmr19X5zfG27G(578XT5O^JYOS8hBNvH+NITRSY4*hg6k|vg<>{pbj zP^#3LcbLj8=$V@kQ6H)o7w_3M+wczW`3pmNZm6P7XL(Y7y?&EoK+;lKr%@%uwgRhq zyAmv>q*A;v^sc)Ccwaoixk~-w)?iNsH%7IvcAa$o) zEoD3bWfXM=V?s2wS;h3bwTCP1wJ24VQff^XGrR7KInOVJ*S7Ajhq0xmbvm78&s)EM z34eAp*=M}qr_ZmE6m{8TX?kmv5Y+xsF^JHmTvP{k?m&7l3WLGVV*7+iCcPt7pYF^uZuXAB!}IJa&K6-}DW+vA0C+4*Y2w zTE?Th8=842BV>qnX)by0)^XsNi-OnbvX(NpK5t0p5{-@m{wTD^tHL63j{AIUxaf9* zxO%FNojqjM7P8ryU0KSeI?Jlbh<4O?Oh89<6&KsmK{D!N7p}F$t;jX1AxBS^*F=#B zfReifcSBl#J6$^%|5Ku}FurMs=gx-9xqqE70X#eT8k0whe$XKZgK)pmQyg) zv<2VFe&xRdx-qfEU;|mUZ3fAsi!<0)*?)cvCc=Uht#rs71E>!wPMcjm|}QZckL>03^Kb*FWiHx?euvp8kN7a36s zD-{ZHfE_cfJi`!e-N@Ov&2qDAcjzCrsOV1~*l*vDy#!1 zJmjXY@%sy|z3|Nok+H3h18pT2bK|S@-${$!#xjy!f8NDb=Y_t8v&hm-fj5r=X^q2r z6{=c7otC<;h2R0aY zzpVBAChHvx)9&R-5l^t{2f&A?TpgiG1>EF>lNpGx`6oawWh22UL(LaDzN{=RQFWfY zK?3S?(tq?#*E0SY7q2yjA~&`oS^A!%do$Uysz?0o7Ozlu?1thB)k=Q^Bk5re^nUI6 zX;T>Hty*iRzk?6y^{Y77i+Uwj{Z$S2mt_ZG{th&nb>VPMup%K_l?EhIZqe z?CgN{w%Swb$CZ})wo?h}&VBo7zG+MeD${GiC0SBLgDUel-GqQFL4{##yZy(ke z>#tx2brs>wuT`zRL^>9ILeDl=#0>UY=iED@&uDkEbd>bNOQhtH4ab;YGzzuzqQ$M2xTpichR^(e(%-wxy;k1qO12N z3!Raew)+n*desMp+sre@QM4EI#7Jz5Z_>le_~XTJy#MH{P!f-;d6d_Ef8KBnaJtc< zp?1@DxevdQ!sGyRSpZZz+7zK*1a4_0qIEGW(x;-bF{{C`jW@{w>8}t@wAXJhMqj@em3yE_0zT+)RbykJ5J=u@AALP*2g``C>wd@Fl3b7P(Bdd8th92c&|qpm z`_0=3t5C&fHkxChe_9mW3_Y9iu=BqJz?kOY>_2HRfxT+-^a zd(_}}DD4+SA>uqByFu#Ai)tU%JbWA(rd^>5s!cGSCcC-w{Gwnj`2sb{)58HA zqGjCuL)HH&yqDto;TK;wSYu?3j-jmq?5p+p=&pVRSsyphQ9uPP=efRDB2lWap%j2 z;Vx6vK7XqDZyESQ)8yDJbaJkD&9)9~!bng2c7I(&=@|Ky%^31lN(nAnHZZyhBr0%Ay*Rp8;ge?){hjjKt5R0NFs8;Oi5B?ddg8$sRy$R2Q7qwVLrV?dJ z=MWgB&}}7%)Y>x{eUm#PP18Ln5t-<87Fjb}>ukH(uxp9AOrx_zr3f3+A!vb7IFw2K z^N%8XY~m#fl`<)SR8m8;vQ-NrJ6jg<qp@K)NG zCjge`?fKe;A;1%QuIfJ}2Brq>N~C-t9hGmtyNXu6OHK)Lro<7ocXz%w#NGbY_u0qW z#BQLN=z+g3=bEAfW=_+IG%1^Pk(XR37O?kBu{@no$X{a`$u)>=odyIBT8IjyJT0;gi$nTvZU{zVL0T z)E9VtRw7_e6%%TqM(SmULGktdZ* z70lDkQxkDRLvVyRl>^gH4mwQt>n+U^%;*G1HF$fjGVl!U#ibV9KO{vT#lMt@`q^$~ znA{^E^^HjtyQMe--mY{-2%>QMj^e*Ax<-XQ^_NK%l*0!d-zii886LAO**#fW5P|U1 z&cj#XEkaOub4L-9=0M<+$^N)-CI_%@|CB$7)j-xwnhHbE`pA`|{W~W+&;z_BZ zm~r*%!Fwr>NHc)D<|798!bETuPcxD|7r{iMc}y`k1VCAP?aldyp3_y%KlS({ z??E~ow7#$x(D&OYLB2(&1^ki4Zj={eocPAHy(x@i)AK1x4q_(nbyZS8%5EMDLp6-7 zs}AQ|GBLHb$FJ|}t7bBam;KMI0 z@4OO&);gLc+juXUS9gCrgTw@I{d5uzd{fes$1c1()QV?4>a0&vHR?}Q7b5bXt6ojk zo%f2iy`lZ*X8hNMSSWp$cT{gJn$Uy=5#=IGEvJeN>7|UbPgIYSRD|h<9(t4UH(5W7 zlMkPNFSgvy`9{AT7cJNKcHcedCqjyU;-ic}F%v6i?SC2jJ@gZ&ibig3=g2*>gFP1fv_eOy19ZF#=#w0A|1vcd)7Nea88`pX`CUl`{9pHX zJ8VpQzn#~qvx49gR*3o1$1$SACE8U8+X<;4pyMMI73v-SN+0Y@aP4+%6F$tr@lTaU zNNntPj*YkdsTNoP68)4S$?1+p`LOx?g@=lIkk3msBZ4u43yBfJI)$#4(fg^zc`zoW zFo7kUoaw2#HU zi906#Sf|a!Fy!q)+o>)5Y>_%eTX|TE$(Cp^1Vy+AHpt$Zy)l5H`OD7m4yHOqVNBYT z^Zt(U^tU{k6gt_HBxvw>%QtE1|J#r0{*(DT&j}exj{o)WhX}Dg*Wy|tY!Z`U)V@_7 zw1b_%8aB`W?v>F3l$YlkG6h(^Xw$R!|;zsWQRmmyh>lFrke$C{2vo{ z8JFKm^9lVhSw(g1r3x~0!b}3!;_=hMVkUrsc<3y|17{%$uOcB4tQ3@^WuWrek}m|m z6=6r4t``0>)2vuahLu~9i#vk7cV=+df-YYENOxpU)qlHHJ3+~Nvt1}Xo@M2{WI8y2 z*g+=EM%N#gsq(?0|G~b0Lk=0IAx2#jh$m-mbDK>=u6l9qp}#T+6Q9ixWwX9(h;~Bj z_18>y!|xfoE1c5|CG5dM!v`f(`%>G?QSx1M7*rSpXOx8_3D;8mZ;aY#I7K*Mz6s-& zwWuhxU+l2gOON{mslb)hpSRV(tvItS)A90SK9noLahLnc5I+@WQ{n)T>)u_%z~9Nq zUgV=`sEDWCA@Y>mFEY=H&zDYd(J3_De0PY>{D8Xp4D*1wk-W&gB`DINe`~^Ly!UGr zIkUnQwvk-H6`KfBu-aQHa^2lWXtLjN;hDcOJBc&`TI+~#PmBK?#l>WG*h4Qx!#5!| z^4f?ACle;I6|!~BFL6yPoJG#*8APaL`QP$u4TPON$|C!K@Q576A=bbTt_iEk8Cw z7SikIL)P}WB7bqi^#bh+$NYi`q;>;@#gv$8?iA?hmvGd>T?+|aSy*G0+$W^tA^Y!{ zW0fto%L)TysQr7V1}_Hq67ZpeEbx>Tsk z$LEPFm5u%60?qID>veidINzA&8UlR~-LIMKvYVM^>4R89@a1)$N zriVLQpUg(~#yesnFDR_7ezlQIMu{l@7xd4kd0zMSDB3-|=#SM34aR@4d#7<0$0jyB znm*mPub4a@?ivZ@88DFRjmkuN4ey?0Nik8)J;COf>1pJ9x8sTPBkFNC)lPB~13oenhB|bxIrIUn@kTC96;l zYpW#DB(tSL&Haj0D1R3QLL!K#-gxVC_j;4sEDj(K2Ick1RIy}F4R09`w*lR41z*Cr1^+7Fgfudr@77GN)J=!+%qw0FeLPTYO zmAn30{LaEL1jty_?40zv6Sp77%M#zU;KkmnhkLZ}|CI|nqN4H%H zlZ?3^)!A#wDRcEhXLI58fuip}oxLAB{#7}uz`mN>h}0im(RVmLo%^f!jLHmr9UJ&!)U%-XQ;VjFz1gq6@@DZXzKI)pU38&N zAj$pAb9;-9^8jN^Af(MFF!TFGJr5)5aR}AJPtgejm%$H|l7Ti;)BzJVcA{1%-##KI zg|1)HVq2`WQ_9?UN&k!jpdFMO1`idL`cWq5 zRkl~f``;;7Al3hC)(5*jhz59%xBrxT+mM~h>-(Z3M8crxkd#Z=+y5i#tHYXpzxQbn zl`g3f0xBJXG$CMLP(osKm$Y<_W+2@qIl6mvY{d74zCXX;U%Redd&Qn} zo^$R~_hYiTDW&Ywo~g9|$lZ(Oy`XzQs82dG>{k5T0f^pEjU>`KOQ5J#6%y_=h! z-ISwgyo0)TX_>_Wk3%~N?V1akFylG*y(@l+JG%&M9vmqj`^hfNvA3^E#qFpL*Hmai zYo^5=(Hx9bN3NxfCEUc#BH}!)2_Bf&a(ef#@0)@zZwGVvzFhJ^NrWrMG?)&<*|CEd z!s~GGXbG$>oJiX#3zLq}dj4=PPU;|YoGly@yob%8swtfP>tH=lvi0+x_g$tNuOI|U zI5z{itu?yYUWQSq3ZhZZz`$#I2r&QbQ%i*Q@NN_oae*vFpycca+0*q>%9rxFNQ&n1 zO{Reyn|ix*pp(f=G{`yQk%r3raoYKX3BERngsbV~!_Zy%O!qBx{FaN2`3}3i!SVG} zS{4QjpFW>~j6CIYj0gs|#ZP8NGB85NJ)w?2{t=m>nigwnZ1JzW55)_j4GR3V{Ll^my zK|~*kl(X=iDy%*_n^p2}d-qSw>-6P)@6)+XiEHkp4C6gMH7;7E^jCTya4h~jBx@d6 zI?MKn;*MX?*^^aLf~P>gPbsk>E*TuLpeAh5lQ9UnS9RT?pgMi#tEir9D!w5MgqvT8p;kZ&V5U(59~CbIQ@&5?cgTHYy0 z_QhI-w?(8{9p`A2s)n0NjHzOwj=gS)XL}Rza5YB2WpzIFGYvgnq|>aLU;!1%Qvb$D zce;)+`_inOTa`yPt3-ZHaq8aR_^XGZ<;An4zOIiu!p7yJHt4^vP5SLD7Sm<~`#<5U z)so{?5uk^0w|NHwv(dgu^fB6j1}_BOV>MT<<(+YrrYlzsm|MTVy4Z)Fq6z~gE?o{g zC>u(xy}C0lHo!L<67~n-E{$3DzZv58a~Vhedn6tf738R#1C)MrLiAQp6i9q6)GCez|m(k0T(yHa`NClSdKAg2vAEoP| zzSvw~Kr%kG(J^-LcysM2m+z|2DMo{a7(#%)RQkG})+#4>hcUis_xsLH$E%GU8@QTi z8>XC%_}!wX$T0W=s^WCUET$JDw6Sf6}-SVccADGl87>|^`&q_aXWXsk?Nu+w)^KG#2Tj%|c)CFl`#PRVYfj~;ZjP01d)>X_XTsN)X+vYr3i|{)-<3GdOCa6! zoP&~I&Ur^GcdrMcZAfqUC1kh?&gPs~P+s?5Q6p3f>*Wr*N+=zAtHmvzPO!X~{_gWG zPWyM??DpOMG5#yDwk2s`A9*y<7r|X&z<$`vG4TB(m9o70bG-KOSHtYjk=yr%CtAVx6 zSna1d8p|fDEJ%}QaUK0M@H5S9ywKPyqhYJDmKud2Lq~QZ%@07Efa(=vBxud_U79AL zXcVxJe{Y!U>yri%5RBX?z1A{)Bhp!5krHdpPeetFE*|n{W!A4ZVr?t8aeK|ZX>>Hq z)t)H>^>V)9>Qna`>D%~kw9Q{&MkLNddT_5+Hj{zql(Nb$`;#Oj-~05e^1rvIV8Rge zK2q^5|5L*y=$7oa|Mth4Hcc1U=9&WpgiUv3Tt1!AVrh_#yWso@z59m4r%YuA9xtq$ z3&?Hll*@uwh}YS47YMpd_oiV<>_yFV(c8h|Y+YqiTh|V8|FISg(*&JT1K)ML+ zeM#)^_8>WWSYEq(L|;w3+U3X8&`uyrcIW0<1AIEY#Np3MJ!EcX3tx-%W(zsXTlA$h z{cM=cGhM^y(T-h|*odxRAzqKJDf-k6j4AA!3H(*z77TeQvUo`KMEqK4X>)I!q)@^o z?(?zzl6{qW(3XUt4a|Ijw|_lsW>NLx?t*A}hVIfHV|q>;jQbeA$3v2^7Y8kvA=l>% z3-`*PFNy=%uY7lVA%x(IwvLHn?Kw4i$N9@a7mAHEj}lUSykCWZd>TLg$voG`$D7Pj z%$P~Ff+{Qb%gfB3f07IJ*~dK%7Fn(5ezHCtpCJDZ_mSkCI%&=T zV0_dy=W0}r#r=*_2v~LoEJrXydY+oZzUDbg@v@Prp9xb#BM0+Ngi70_y)p6GU)fNx zVSKujrP&(7>n;DY&NoBmNSF^fj;d+Z08UGIcUY@4hxs#{!cg7ze$yD*!Apmp6qJJLqrfJlyJ%!=fI?}oIO*Ix1Fn*&&^fl z`iIqr=J)+Wo&e)rfBzj2nT&&|FNl)Wy)GB6jIC+>)=iSu)ssxd4(d3)m9h{cpBc&$ zh=yJHmC`$*NB8sOmKJ$ZL+XKr;Q?cvlnw86on3K4a#_9Yr9ADQG@|AZTwFkqGqm13 z1`5~76#?AGKi&P`$C!-NP{BonWa;`AvXaBJyYta(b4_GWsdCz{sS2;O%AYO$AhX{e z*u+v%lFE{8&Ubo`5klenHNmdWwE#tM^T(@TBhN{BX1S{h3EIQK2th~hqz*TP-KEDU zFo#D1r3`*CXW%9aE@k*4UV)JBS{Xq=8|;&KCGSFI-IAlbxwXGH{ojWmQ^tmd_>l6b z{7lh|lQ(-G&E#`t^Xhog&*~z2wzhJ||6H&EBlDthN!*mB7-!d~ee8OWQYB(2X>@eoms19PizdJJPYPSbaL%eFq?aa367#QLL6fZt8IFe+5*q<_-MGyb^f@kG+)tBA*u2660bdQ_ml{}KzspZOg@zQ_6gA@4i_bNoWI zHqqX4dp;Y5WwW+r4;80`A31P2y%Yl8aK_&!%`5QGyl7S90bg56Qa%ly5+!=^4DANt z_T|~Z-Ov)~mPjArHsn#?FAygBwxUGwKh&v+w@T=cZQPhN8XWZb-^=r1P~W1&JqnfT zYn7-kpD!TKx2e=L{)A7^4Ttx&JI3y5>@9^{I{-8Y;?wz#OvL}T^f0Nfwd?a#0VHxt za!v9{H-h(KMIyFo_(f^0*#@f)1DH&u_R^dSc-sIO29VVeuUX0GF63aTA#Jji{7XsT zd$|xaLOdHUg^Q^!3H2{h!&`LvZipT44deGs9Ho?7e|Hb7uN2Ha=&(Tl^6^!O;D4Jt z*B@;KwvSm)88MV@dRSEAznf%d$9!}E;JhDE%)wrK6P}d;(cIa6i@*_=;{B^f@$M0Z z#Q$Bq?jt4KPIBmN@Gy%SwP9dkQgA80eh%LG6{|Z}GpiE4J+JmuiT9_+2$^nBF0eou z*mo$wax6p0wrG;hU8;OPLd`4rZ~q3+qIfnQ{97vHEnd~Bp){f9?}GI7Hx{%c^uT_L zerYgPJxMt^=laHeriYD!Mg@GBouqrPCpEK!%e0~)PPa1QC)qLbPxt^}S&=voxJndM zF%A`Tua}RA4>xhJbR_7VTClnyo|^aqvFXUE5?-}PPX*$AmPmjodmmn1PkF-2JCZpI${|r=YsbG>zOApE@?I$M-41lC7!iUGIREyN7O$+O)tfcMj( z2bBW#Z;R4efO6zVu_XMx&INL^e{Pth=qGPTiPbEV`lllnytj_RfMQ_yvIP62X(aDs z0<$8?XdkX~DIWm6%GHTdeR(kH%_5VT!E8SDmK9mknC@53e*3P6$ zopa6_z{l*DZ6;?N)5+ugWPK}*JnKK>Z}6U}McgL?g~=PZS}(5qs$)7|1r=4d1T_lMmkdoQvTJPz`YB+mp4|M=CWT#)hfuoCHjg{oO-?j9+@~ddLP!k@dM>Zpxv${a!u2WY_eqn(gF21a^U^I3GJ4>rEjoY))D!|} zp??u_r6aEfnyU@3vrF^!EHRjNe>g16 z0*9g_aE+tWqRINUz6;3pZ2Zs12gpmUNZOZr_&WyJ*`!hQ!ou1JuMZ!^)$d!yZdq5q zWC@Y^C)k!EyeZAT+JZSoO@;fUol~NxD5>>E&sL2bax+WSHxq~)RfosacMimjIm4{q zGbORG91!da63l`AvBLvq$fbIqwaZ1^7^(`c!*rv7i-e3odX1uuu9Vgt90b9Q^&77+V|sEe>QBn=(#j90VXRTvy)nmzsFfB z-lJY9t~Y);7?)15P{%l3_|0^gtPs|!AhwK2s1QE_bZ3({4sn3vr$xc%(`rCX@H(@0 zo5n@$Qf%Z8cW<)w`f1YWg(D98cAB$-8b@fe&4uScnH_$ijp@9$o=eoJ*+PlY?%Fk) z{DF?*&$om;i{X-2i|d95!EC$SN}MXg8Jmw#Qe0XkSx3GnLC`PCUYV z&ZEMj?oAOQ*A!6){i${%wCS~ zv_yQGB<%JFe0RkpGI)YQ1cTdcWCUO(v}pF|YJ!kSmI)u>-6!=0NEq&uM)kN|{~Ed- z#>;=M%rH`*kgT`bKg}%;eF3!majVChM2qQ~-J55`na0*?r}#l+9ASc=T+&?41XK2& z`KfC<+S9{l0!Mh5;9O$Ab{b??<{91BR4ZI|3M@I2IN+*eFKz;8NG8SyDVQ&ky19eu zkB2T*_>tcYr^OuFO35^+^qR_E*9S{sb-8!*VzjAv?FW7jdbvekYnjdkW(iE_FUMNR zdFg$K*4Flh24wI;@*=kDf<688AV7^6NhV zm~xi=_K1L1FyEHnatxr5BVP5Dncw@muK&d>JQ`T;VYjo~7slMZPB5vsyp5R&9%K*X zgf<*>D&)y8ZM971s;&arX%l#KXR847%LgRUXW%|wp&Jmdo>I-`->CFk?*vwci|oal zIPYbBZO=Q$0ju-;l}%Vv_bKv46SC3ce5HdR88duXdhsGp;+JNFbIyHe@${or4cw&Q z9J$lEheuXMc|~mmkdLk-X6A5QgofTno+?gye=CSxJv+@^B*Q{gFRjJx{Uzi)^)1!HIIcQ$m?qU zHTZ#e&-UXW^(#fV5$1L&`oE^+_|Db@co;aZg`+`UC_-Ia#hUl)jYVQ zuC95ZWJ_esZP4cVWh6>?z>x;bpiBxe7yY~ca(?)vsXFPu)9K2(@zmx9y`dKj=^6am z%x|yFf-dhbW~TA*_<`Q%{Qu{*4Y2CFyw`a`(bzS-4P}n0F==T1GdH1;IH=Fv|3Bt>PL zC5QT}a+}3pKoO1~wU@s;x|FKtPl(K~8p66m+e%b0w-Z>!V#Su(-JI|@76##0jLb}W z5=IBzc&etmM#Uf9+iQep+|OTrnezT+-%UQVXGZ7;ad7N#vrCh{=_cU4_WR}=vqNqk z*xYnoqd)Hw^5$gzOV1iPeVzDHsER>KbWR=pWXsM#0A7BhlHd!CrgkRUW3y>(>{YZ~aDPeB^l`3>adT3#KuiSr-%1HT&3Y<10N!STbf2=*gjNzA{=> z9`Y{*qFIFZMRK^I>O|*_{xJ$`>uQMO5B2}J0P^D@)HE{%sNc}QE9T8fZ*f8HRI)zJ z^7joCXMbS{fd4*%g)$`hcv_DHa=8FXE1QQ%FQD((5oR5&VsrX^+>lLtg+~%-i=$_5 z-M1(#T$&_#X+ZWj9vsBReEaPN&l=!H0NQO>;Uu;&J8d-!W19}j>_PI|%H({%4Br=i zkBwtBVP$JjRPn{la*tfa+A1aDqW5*pEKc@&FVR+rZe7Pb{~4k#De|o(r=Zu`1OQQN zzA%*(mc{vU^z{hL`{SVJwLn8ob__iT6X(O^=&qzn$_~kk_;!lqFw-G3KSZ$SKjFL% zvLkYDge##JL3PrsrJ3}O=WLX7frrV^9_3Ka+|ccx^QeLzAA|PquX#?qPAe95K5_CQ zTE1Vw$`*wLrok@9h0^u!IvqdjVh5y3@?>r*G7o&dpDyrq*PZ!`X4ANg&)R9tjddWY zcwN!chrdoHAmI96<^f9A^~Epg<@fhZcHvMQVM!gD&6li#*u5=$`g;@E zfl8Cpn1loKLwbUpEM~i-RO4s%&N@`z=KcnQ7`_A;v!pm~>al5#07 zx{iAST=DeX0yQw57XCGsp=Db^)|(oSXK>YcK*x(p^zykzU|gm|Va!p> zvy$b8*BDzdq@r*HI6GvjXD4!9SDQ$NDP()?cp&|Hr^{LGdv*b-{s$kahQ*@+UAS-o z2gI#Qo!-4T->+(U&it)!^Lk<3=Qdowh;jShxJ<>$xzDRB$xPi7EQ}W%76kClj-vy4 z=-lSKon}6$r!Pt(586EHaehVy8cz(&9ucI996HVP44v4Z^sM1~(j3FAh0RmSD!w8M z&AYF}HGF;s+7RpG!WD~!;%=Op8QHc|qe=bv8D-?rWd5PPNyPNm6J#8~slGD{a;w zDR08=bTyXJ1vdAduKkKQm)M8WU+TK|P4Ve&`=)0;(QW-JDFDToP&BZnl!3Wv<3@U? zI`onCt1qyFVzGl?-ldlQ=}WJKFhoJKa}HJ#>RwH&vho@TTz|BN%Q(7nY$Ks>k;yz5 z)W$&@H1*Sgm!G!YtN1%WmWkr=%9fuH0!*+qy|!h=_l^(BVelw3nlT~+CKqb*^(Q9U z(rl}}?nFuv3fCA|Qr_$0FAJ-346p8?rCPf+W;Gmw#m^EIJz9F49r1Z>HV@ zSh)%d`@8jx;S%c?)@6G{$FU~-&p6)u;>1C{AZwI)vhd$0qOuq1ple6C=22>WuKBB`b>$7($ZNe@Jt!{rshWg#ICz^O$UNl2G#c zy{R}3o5CY(b>;E`cEaCVey|%4HoGU7Y{+<0cOqV4t+8)}PHUjTdKt3u*KDi8iERx{ z5=SVSU;Dl;4TiX(3^8~G(Z*M61Gn3zo?qVT+RrB!cyiOE6xKDui}(^)$_A?6v*GSc z(x@?D?CeQ@o2Uufbt(}CTR%)(iLD#0H<5r=57vG8R|=CdD8k#+s-9PfPC;RR_1mZG zo!o&irPpXov$L6KVNX0KZv{Ch{94ay|6k_!MBZR%Uc5u1Z%iz40e5%k!O@}K){T0n z{wLV^ruPWNtrQvvzB+U<)O)23kYHQftn`mT3PZ)Z*0W)k#e*P}I<}VIc`zM#_b+b` zz`9fO`wcMvEmOWSFaHCMxT*ssx(}_+JV%kUrMlL_d-rx{>)o?l7uxJm*?LksKMvFoct1zof`q^@t4z>xhBD3{FFPiT z>=kzRiu1RrUuQ@7gSfXyj!W`(>>4}fU1HUJmPln3NiVbAFYpe>-r!<9potK{Cwiav zh;fwrs%g5vCs~K`>#vaf3N@t&aMQ~s{XgHQwmyB2U6M)v>i5fba!**0&|_qR?%a9l zF^S?SatV{SQ?x*a$dCMb%%h=uV<~|Rlw?+TB79a-oARY_vXJcc$wkO+4ZN<`#&JB+ zEWn0#r#)V2eCW;CDRgUQu=mCGX~Vn)ZswjW)XB7YMrH3U{{+={LTWP>-+;O5w4OAGP%S$Coi1+er9d>sGFo_}QJ1S; z^6(Av-xL~PXeiM;XV=ygOV{EGUS85jpNA+){FH%tgzl5p@^Rj>Vzp)3t)De`&yROi zTb&_#Hxk=yxK3HPC31bPEb*;#x-BcJW_IPh?xo(f`wpvzIB8G$>b%-;(6@#hVcV^*i<4+SaI(RaMMl&oo60KdxoUh>41z&K z;Y5MNBF8Lk2EZX%Z!q<4q$EPKLh5W&b(VEC@GpO`&uc$7KX7qL<;iyjH+jNk1w5K4 zi)>Aan(oFd^Z+8hM>8G+Zy=}s=QcKVr_C5l#ld)m#_~;d0V0nCCy^mqlDG8ibGbLm zsuG=lK=mf?#_AFS6ob6Wc^W>8J#F8;@R+vBe<-a|R-^<)D$}1c z3uU|fjI>NIQ+v&1qM7)rp3)G9@lVLb@AY^X$}92gf=PQoP}@hKZhv_Y8ea5`X-JnMYYVYLm@IhC^m$JGus?)J@qp6;|y!;C9 zp|=f4l?Bddej{jjqlX8wk88J*NySECx+hCQFk2|zRq!ZI&HVBd$N92uNTWzv@r+sc z@P1J(>yEbUV8@32=0q1)^~id;q69Dv3E&2x^5l+nv@tzfzfbArR=NRSaO)U5Dqw!= zfl_MRf4NB!ZXv zHn^Icc}wsnEwVJL(}Z`%KYzLjb!fTM7R%0$A-Kcyct8twS*6I$o)r1*&t;Rtjp|_z zxV|`YcuU@Mt+VVeeK{L?+zyoYp|8vO8%Xl_bEMmZUD_gCRj?`DMjbOmL3-uh!jfTU zJdiND8lT|_=gf3?Zly7$&O=Bf(S`$M)p>%Bu^aB;{XWHz9lBjO&hXncj}n)C@D76d z_SAeIy@etZ3HfYJ$|o^HSE<24$Br~8zGM~a%SpSn=xzRy1&HRa(&cH(Ra_Hw7(w~c&t60}OE)Kd}#zXaWsjtt!XHA~ozWI#&iN-ieh`A-5zxw!ta_t{` zB&SFEgy$=OiDd;yri!tzpF*Lt_Ac?8duJs)#VMGgL3BqEj*TdGwvhT~pA&0RPme5} z9vP`8*-PX%lzVDu=SoDaGHm3e6Kt@+=v5^46MV0zPm*Z6r-EVxSbKCN=ARW;{4XsR zhl4RoLzc9U?WCm(Q`J_NqZ~WaLwPOS3*LR%j0@Xih*SB+&RX*|y>{#Y`eCgI(Uv8c3DU_CbmBZFFxHSqGn0XfIhi z?Pm%#Q#+9(!dFz5hl*^UpB?_xE9h8AoD;;Wo7j%`+{|~(i>T;vF$oTJuh21 zZs!hTejArcj)r&wS}M9{i-+G#7eg4{UUxSK9%T`_rGOezmP>drBx@Etx>^W$96RXr zvF7LF(=c*%Rd&~xBLlF$DGVpWc7JT!fSor!NV1+v9@KVJMXq^mJ^jp^D&~~?l!=BE z^7(423o#_xL`B)q)1tH`%Uzp`tkws^el$c4_QFDMh9a$!5iv)RTK-n1yDUKyQQ@hkc&57A&;FGX~iyc^LBg zuqGKc`a&tphnNRL&cF(<-AZj8jUZ|rb3N!nL1w(Lrh?;!;|Ua?6W2;BOsb$Thz@mT zWhlK2z1gsUb99=~$+h57`6@FGALm_YW4G|>b`06VoWe{E`)0F@wFIrZyxqDql(5Wt z9gizZsqi8W&0*(P17^7G`>(C08?!=MC`20+uqIsDChTr)Z(oZZrfPH_)WR-oxlf2R z8}{I76Id;-?MoHW)+V0#6eP8zuqufCGaq@nuZOypV%rh+e#D~f3f;%u@;p<;!48^} zF}yG>G1d?xYxTl4HaQ=IAq}zXWx1QdAu*n^%HWnzKfPVQ`u!hS<6RQR@0cYy9CYzX z2A+8>f>5n597z(8sWo7uB4nwn99YNry9b8YvqK2%9qw-f;)D9Vj>$ZE#)*)H&>bt= z=Ekkt!R7Z6SOg2H-Ykm(;utR|NWwo;v+*~v>``ZWMMV@pv%^(1SbGIKs)V=xNt}6M z5n-L1O3@n*68`X|Udy^|`*h)$&MlADx4>ooZ~^xL+hQ&Qg>>0uUY98p`@pq}qE(IF z?E-h}`YW$2UDb8l$$pU_U3u7X@~ey!L*0|gWSYpF{{*Lp$0Kh({{%IBH3EP9PJJkw zwzM3++bu|RO|9Uh6+(fj#zlqBY=9LiC5j;qhDG`5=GR((?x)$7`qC?&P*RlTEx$K&Exmf!?SnwIt=u5a7mCh0y0hcQEp5@a&jRF$o^_z9_N(ge&wofRe&_dICH=tOYgB)R zSpqS%jOH>A?+CAO$&u2WWyFTe*?=#>SEvbHH!RqTY)tXZMcx?2KVH`=jkfs&Jp;){ z7efiXsiEa1{R^;QgzU64y{RXOP~|73KLeS_XLgPqLeDY&?yS@hpY?B0;<(8Q`?R1- zD711h=TV5b{-yqWSVYVO%Z1yR#@IvX#M#8hqc9)-yGziSdC1T`-B%`~{X5dv*<^;9 zCLWHL)#6X%cMNLkrI5FIkc6M62{!nHH23S| zxW~;le|i1$p_S|96~gqdE8m}Xt4h)zq*cw+jvd?9i69)^4}Pjfrml^D&M-1NO zBXgm6ts}Grb2Q^vtv|gFhm#5IaSo`RZmjLW~rfO+XHs6N_Sbj&)0B-r=0uM zV#5%=`mZ+knu*1R{*P>mi}%G9LvJOk#fD;9K0auEO|Pi?fQTfs|?jkZg#^G0G1+-Sv;XdK?ZVGc_rF)X#%iQHTV}zpd zOo5lFC~~R%d!^ED_XmatO}oft`v80cUUgTo?Jt_ooQMQaZh19FKHqwj2a3!YNA)Y(Q;M!cu@`qHh3<&(Z4 zfT>m$C(ZkU zd?-Kdrv13G(OI!PWH-5a`yQrmMP&?mXhR5|XhSc(_wGtDI;Y>4)9?yp{CTLqt`+QN z0VP(~F}WFtVf^ z{jr}KzR1sSF40d9UeUGEvRJF7x?=sFGCcc(xL(R#Ae_B?swULX*uKcmiRoP>f9?|w z0e^ZM_pTvZ&RMfesq3SaUlqDzC=fM3DYiADh#~mvc^v2C)8jigaIE>KNTY zTIpG##02MZJ#;^vWwKe9qm=|{8D~^E@Fk`@N6>%ZqlM+apT8pNZq}P810{VpIlWlb zEMbW|uNjCFoX_M8b=b&6&6ivsK9Qhr=tAZ9rYcg8l^b8TkT-64ncm8`M?I47GoV7C z84D$07*w%I?s@Ittd#1Y;kR8GosnG9GuiZjiLHf@ZmniNw44`;z{_4$DWz_-E@( zy#6fo!S4~42$`$<9t}9py~m;1(Ht#Qp45Bo_A2_}M=V2$7wgw1SfBBHt}puFE_lRc zt~LdbE!^fqD`WdHHw;y*Q!)c&-GYbq%sr^JXH>hegC_kxPR0!!<|J(W7=Lww1-ma8tB*&P`liT`Ojs-jwEB z$(ak=eqPYlJ9lG%73c3$V*i{wsF!-3aDb7w6j?V+JFA%FAXZ4=`77O8zn*I-wtKB( z=7g;(vgp@8>l7-NaX#kji$>Yp>|8vD=Vc6yFjqHvx64%rTe3?QAcaw_l&-=C;@XnX#6?2sWDVGo_$y5Pic|UM z2cn6+G6tK=dNBziTdTx=D8wqemZX;L4&{}|#+#onVaiufVV&sSo<$1X^@4#&m0Qao)(-nuiZAJDv_79m9Yvo*(5yAf1p9fL)dO;BvBg%jN4A zS!qF6EFC3oAa}`(Rjl|1|L441@lH|7zvR>j!-wZl_0UqeXycU-(n90 z=6Nvkt*Z5gkNCvhY4p(;VPloueNR7!P}SELGPL>3-Rd~ay$tle1yoZ;p*gytiT0i4 zU5oS$jhW*3nsRPiL4rrFJ*I5U>{zU^Wu*PRLn65M#>~|B4^$ht-d(a>KAw-nAL{w1 z0nXeQNu-Kcy_bw}&(mXUK7ab`y__U+9UXQ8U)WJYTozE;cBsg3Vfs>U#QZ?BeGtuWwS9d$cra|EPIUHFHtLxmAF`39Aw9MpDccJ z1KPJ39|-67GpM;;2WpUwJJH1u^v28S_zg6zLO+||CMj60R6FpS-NCCD?W2wu2He;6 zJ8z(U+A4k7L@Y_u+^zITd*~7aY{m-PP1tLJdF6&y+veK^TCe+tDKC7(N*do;1wc|Nr4QL z(clzw;UcqK`k5ae+o-gMA2DsDhEcdRJUr`zZ4@73#C?PAc6b+`$vkWBEtdPf$_hu< zH@(hNe@?{jXZA1SGRV80q~ImRLY6T77hDGlpJ9YmNMB@x06`)|BwKRV`z`UeS7*<+ zjziFetdVtOEi#nIz9B;KoVTzD=MY0kRA*-`+>t?JCKDQxEB1i|{(Z7`$o(y{C zVQWjby6Tqt-z875EbV$(KY1%efL(_5A+34NkDnh)m*s_=^V>M%M7wyMdAWE>AYP6# z^Q{xz?`>!ED*llZ3oF9u3wLw(`TY{k1C~0iWf=o}>*keeJE}4>s`Z-rlhtj? z+f#P!LqoPlt&JC#zPzHfy)r)K56%2^N>x@M(gHICV~6#RMf-*dsvP)Dcb~w_EcLx7 zJclZFZ)XU_6rt<_$ea=syu{$M=s#FWj6Q&#NuAaEj^sGt=-I{(;cE*d0=s1sN0p37 zv=Ck^7-GPY&SD+#&+1n(whYdQ7d8btEy;WMa$kbi{Y+Ws+JcNi)PLmZgq{nzTf{zN~fv7JAti&kmK@u}dNvHts_N>Ps8e zXRj_~t8b^+_N}%TpGwIL@8#&b__u#}w>e8IK;rM>tLrL#z0cvNfASj5Mzel@#vxE` z;5&HfGBCGkmPN ze0TZE1}EYYPOB8W!e;(;tXe~3H=pSpp9K@oA>|gG(yk0LOwwyZDg&2pbB)&@D-|zp zK&0u$qnR0)a(EvOj_6clMc#&&Ha|a?e9dDP_u?tq847XTuub{Fr{uvO19oX{`aPlj z_Yor{aMD53B)|%eOc4uii+*`ifvd#UT2N(}wq^<*`pgU63~^)(L`2;t$$};f{~vV8eKqy3+~o`$ z@D{ZvCsom`Rk+dOz)HiZ9^W@niT;|Bev>)h#C0owy}9V7@Z&n9`-lZea~oqRmge*m zJfB6GlbSOTos~ltgan3VhBu(#8v&}e#^K@sZ+XeaLul+=2>A{KedA%l59!jP(p*_M~X#~`9?$sjfX0@`ASw1{l-<&r8 zY<~`(7=lyBL4n)tP*hyJ)`8)=DtsVKd!U>yF&DF>dlmrd^#NUUBKGMI-c$t9Da`zo zefNWoKN>4f?}}7%PRugGr#;Yn%?wwL==1|*UN*Nj+JfS`>(Ck}56asmPp4d1SQR1# z|E&7Ucofm{8I>65S=F@X;5`GoTH20wnM-c4MWqe4V37j&XGPj7d~0ev+R(4&$*`N z$dcAEKaR%gQ^$vcPOgYgI%i+@Qg?>_oV3O3C3=8w>Nb=NzbF4Sz0O=1b}+<}SnWn} zQJ#k%Qo`^#c5|(yrtU9H1NJ4{-aeV--jzbPFg$o|eDFE%fx@AL;UlPJ>SlS@sA|P% z{Ymot)i2Y8kzfhfDcJP`>8>H0+5?FmDL?-1r{&DMXAAYl1};(CGbYp59a(tBOx-7q zrHz&blaz>Brm5VI+rK5|&&gdx=|t-#r_WZe@MNluA9O!EY_32w?>)lwB93A1&ZaZy zCMxn+e@jW&{WFVmGoabHi(r)tubWXdH;B^-aS@`Db2!Ie=JC)w*{fTwKLWipZ*~ei z=^jj3@2rYEi`R{v%ps%~dof;ra!|JVV7PtSjn@Q6>eF{XA0A>Hv1fja*m$S+k?-i$ zd#?K^75^4>KbE&E#EEWObR%n0)@DM7CN$GHW$#>a z7QEUzPC0~gTs)-(ja>NQc@=9*&Ai}t=Lvm?r|R%xCW~yC$4{-baMM@pB&NW=SC#1# z`Y)cY32DpK>{`v7dCv}Um5RkbyqZdP3#GgNI_VV6?%LA8q4N%9l|99B*Cs#6V#oJ6 zoe&qyF~j+7A6~Z|cVmo5U|wztSr2{WC>)DFpq;7u6pkHsZ~y9A@{ec>l`UORUA@ye zSp`FY2rTwf1zT!l;*i9jsc?Ef4O&I~(dy~I*@2;E4oGy@%cK0x z1S;cTjiQBzZkN%X#ljsv8$jsxo9iy0mGKHsX~+DtFj)!+_g=$=n#JlbuCu2o_;zN> zF@OjiY0#Zrn^?pJKWwpVm4~fh=(~K=4XbL{{yoi!*Bd@Cx)~kAz4h7Y)BHco3FXFE zdtR`PC6~Q3+dZbR6nvWuzJM5MR6g+T383v(Y-rwur0NUjk7G6AfWHL{ydCdP=O248 z_UJD%3726&X~<2y7PV^6R=oiXcj9&*0NKenQV6V@78EH(~$Rd~?|7@k%_(FQu!SVMz{q=9V+KE9@>AtNFsRQ&Q#*Cu#z z#`oGIDI>*XIES~jPdbNelieo%>hmUQ|H9MTdgZZiwIC`WjFblMoMh7pO=Y0D?h{x^ z)IMO>Cp)Le&*L4o0g}6#5*WHo1`)dT2h;&Ew7Hv|zOTl6d%6n3#_|ecy-SZUw`&$!`79f_ayKm^z7euMG{jd9=kvCbox@CEN zf@PvxUZ7elh9@VPlsaR`1es20w9wSoLXxN>-;Bi0{5fsJ8 zR8-P4zRuWndiu^8mEK(Vh{guSxJRKa_X%9(9S(MymkpEr4PHvlIJ9T%D;x6q58ECQ z8PEq!RwaSTo=kLD!Od+N2g9zkRrweC_a@X&zC9Bm*sdfvxfkw z&HY)+FvQO%{+RsuF3IPnFAQJcT0ox(y^q3u+-8EOUHLmpZEK{@1MKBjdSLnx#pT{=DtrWX+dne-@aSC znBJ@jcGuHAywbCIWT*Ikz4X?RxVKiPr1Iv*q+#mmac=w*qC;8?5+b|!IBIVu_a>*k zHwx`xg+Ak>w$9x*P6nW#I(7mMY*R0mOMbF!PauxZp=>p2!cb;rI@V4OVz@6=u&ww~ zRFB?h@~rd^*OU*zsKpyuD?Lnm!ew*}nNxWfeKeEENwzum>fdU?p=P-xju zROPq3<~+$VDO#*k6*KfG)()cMqxLrh6LHsPY8nS2RsDAhIKZX#Z_(Kony|yKjO-l} zn)R?jq*r-aj8fe*xlxfgs{xqLHP=lYO%||^+Y@A-K6~)>i|5(dzGf<0K&n9{vo14b zgRN=RZ@O6UVt!H2B6!c9VHwD01KI@}R5QNBOD}}yE^5i! zU9nCG|JAA+ZUqQ{Iz+M}AgKDGH1ns%s_Nb7$Vo`Ff?&4I4g*?SVfr@+117y6aH01B zu^G)HCVqa|tIIAW>-;FS`$9GJZmL^voN-(waEtPv-w*!spb~jIhFM;>dolXqK%(@f zomFP{@Jr=JNL#k3Bc=o&jBClir?BWE5wA3l51+DNV)s(NI&!{!84S8tH!wEYc#%AM z9&g|KAOa+K-uW$ga-Brlm>au9c6Zln)BmI#skU)x)Ofym3v{hMIF_`#G*((yP|r-& z_j5Zta?yg1`c2!>D{ujvah_)}{u!NkcEw*MYl|Iwro{21L=n^z&XM$J^WNK%pQ6HT zF2$nCdiU68w2v)*r~gm5i<_?g8I9`|L4j04RjYD|&ywTqOt*|`q|$z;KTyEBl8Iks z+z{Y&TyEUSmv2}duz?N5T0;huX|~YQN4#)J!S77Vi{)%K%zNmb=ybKzgN3U`-Rnbe zp(wvtrR@&tAKaf&)O1=w%?!52Ty&hK*tj3ciMDFUH&e6gs|i{-3iUjZ&UeOP;%yzJ zD(nY|zr_DWR~BLG^(HXH5*4xHwC8x0p?Cmh=7&1fuo;L)JBc?Llk8s5F2>q;LDNE< z;hl#6^b5C|?{jGf0io7#Hy~|cYwpRRj{~aCN;C^k##uSV+oTzf5BA9;fbI>x=WTntAVcx zlViI1ZbSyLTyIe@=w`%3%=Uiwu>Et%!(bZ2v5>+#3D?ZM-oj9y{=&;5%N-WxtOazK z5N=$cUeg;bQw#i05K}K_gGa`gT7=~~E!)i}ng(s1 zZe#)5|6r_@U-WBkr|Iq4=(g7g(bNKRmGK%KXMf8g@$&6(nBC!FEWbJwj(2> zS-A=3E|z{GiX=OAQ{y7Vgue-~Oi_(;>*=A{IyR15B3BZRiT#x;v4zc4>) zYC^pSrM;Q;`S@wzl|iL*Gm~}$u%HJo-`gltjU93U*W1-gsA!ro3#7&}5o}!tw|I*| ziJ=Mwc?09#-3}U3?xj~6?#PhM!k`v}r%q>L%axk=m9lXX@Oe&NED=TYp;0sM=JfX! ziUMRKv;q{hv|hQ`3(lV(|3eSmNU@SgVJht$rQXb&WD_OhmGFwg%A@DPuSaCoS!PF} zPBP7;L=*|0O<(sUOT1_vtQL=Evh_=mo;x<}16aBo|QuBP#9~{K+>;7sD$AvYmMJ^h20GOSf^)# z<|*KRI(bvvST?)f7Dp@;Yli9?MwfO9!&D33Fnp3ho0%&g>C+PxSjAzdUsINnP06Qg%!oQSF;hs zHrx}|Cu>k{sseC=n6Qc3n?Z$Nxe_o!&Kv{=#MMa z)z#*hlFhhc5vJMlX?!+8;p)eJ2Q4M+Udx+>)k?lR{$$=Qh>0CPrkZ{r@eIdr?SaRTR`YAD2-=EB2l&CE?N(dV6!Z6Gh zjoeqb%FU%ID5CtL+~~fTO4gJIo#{U7qmNGcTideWE-FP64P1d*3fS;OuQb&MGZh58 zE&WWWhY8NdBarHbB-d9j2sF%6)K&SFFKCm$urCiU;{VpoaYp)129T+~nN zYU?NBJ|#y#)iYg`!(>0D%kyAmXKHb~xIpeWTzy4mULRk>FgdBUq%mwE3x4^3dhel9 z7z^b`ZO586=~tH3j(cya{bsLKi__d4kFroInRc)}m2m){fPe#J&Q}u780x`|;Wek2 zX^58H+4KVQ7g-AFO?AQVL`6_kA`@wZUOL`pc#7ay;q%S1G1EyT&h5g3KPF`zqARQ+qp-dWd~NFeVkEq9UsM#IXu3ud86jn%s7Dx0^q^!LQG#APtw^;bJs{zm38mfl({C z=#PI^PP=)Smn(~CP6 zka8{vIKFi~!!gt=&S{PJAsj_WnLd=Cl&c%{|Lam(v>AdQ+H7_{iv)~zsm(Pq0gu(w z^R*=88=Jp~^Rte-%6|QYHQMl7(fx!K-GBMjTilct_h{$N*tAYijxnas065b{XYer| zr)eg8W*8A~*%!deQzeWw#UShvqS#+`$fxdB+5JN})rsqZ)}Q!j)cB=M6W@c9_4&aL z2b0eSgC_)@QB0oH%N2e*AK?ZIS6T4#ma}$b%Joq=QeJo5P$q&?PxN}!dMIqDNE1Ij zFz=?J-#{4<1_l*Oh+js)XMik;`#y5;=Y=bL=Ab28ce`mBZ=wCEIML-}RLn}HIxq(N z*D+tx4Bpm{P@Ujqd9yyF=>)lK92vc!PbMCElm%+x?OGPcUs_H_%8Fs+mX9yGEr@{b&n-9Ty+;IDW(%zU5 z_Z_u6TEB~Z?w>ws753I8?HD0(i*iZEKcWR&cT1^7c<-&r4E5s49)nW|m zI14neyRRd&iv!$ml=aF77fK<<+wI|JbH?vi&9L1#u>`-no|#o&Es_?7d5as_J*x-! za}r^x!R0rk{5>x_{eze-)4dlbl=!F2`%IgyI8qB^OfJ9G5@T$)$5{Qh?|DT=Oy=z; z@eY!6i7SisWZpLTv|Alg_RD4 zyIU51)a>vK4rk_fV;_IyZhix|WD%|UfkyS~R1Tl$ftTygvFgh;8rp>VoEL4Y4rAgb@uzT!w2PX=}S|rIYzUV*+`lM+aG%gAj;2jzvH=>jwYSII06tG zZa5BpnBGkiPJTEXCN38AKGsL++da3Z4eq=1tl%#!3<&rg(Pyx#Uk1hmKHKh^1kdK1 zmHfX6?IlKNo{!m=!J{KIyvOjr#70M$d)~y1u?4L}hK=d9J zQ+)wGb*~tN_w?TOmg+D|+&=x@iUmSX0@}uwXYGQ%35Hp9@ORePIZ#()f2HAX5Du55IUh#w@7LWDqh0{{ zjW_gTb_?DW=HXcJq@B)3$3te;n*qF-62y%Wv#6v?*4vXCbf~;_-O?e+l|e+((MYGcQT9lOY$X=j_jkAFvon~PUPH*Id(@~4UVF#J z#w+gAv*^Nt3Bv3$DKE6c?Zy=^;~*RA-bmAC)^4GV82sLsv$l_v(Cvs$hcUexyJ$kC zqmdjoDQ^?sX+x*TQju+j4MwC^bBC%BozdB>?HB4iWT%==6{0ZgL>XPUKdgC?vT+$X z7$qz16Fa-kM2?IxgTf-EsbGb6wz(ozB6~!Xd#3O3(qPI+dKU3Owrnwvih#zyV?bMq z*gT}4KfLUZhrol`v3Sw3YZa7BNU7@4l=#qM1saelelR+BLwULAPY_f+{nEtb_k!xg zz{mb_^MkUz&36gI8s@`X?jt9H@hK{^l;5Liwj`K7(2L-UesfnEaR3WoT*E*wIAA;Y zJtfpq3;fiAY&&`&(0(^(aCK{VAUA-Y{n$LV>ukOyUoH}VP-v;wzcu+Bt|hhAE6D?G zy}2rpfhGzHH1CU*pP>Dc!-z!^ylQ=3M3(#I@KlVJV{Y}2ZNR$#puQiF7GYqpF&B84 zX=Ko7(aJFJ*hdyor=;ulNONkeU#5=_b_uY^GJ5PB(I7w_of-Z76JP@YiZmI}+o8dy zie&s9xYNwtJyP+gd-e~kgj(ZRw;vxyv2PG*v}<>TYsbm=a`Z*90*~>nscfgBBR*kbq>V@*hC;xt1bk z!mT6v^C%)M^%CO^(L^nDDtt*~5V{sN5M=gHD$NX{HqI|wQbaVU0;*Rh(Ts6zUCt=- zp$J29fza*o4;=3g{-?<+aqv=*Kacgv$OvWhY*z^R3Pdf5%DD`R*7{*^*$q1K8z0+@ zU`h1(Cdgj(8>mnM6)8Z!(>HVvRZnRQ(k4)@MN@%PULmF=Dj~3vB$G9o-^!NFT#SZ z7&l(bEK^@uCoL2BL!_iJK+xTebd>$u zfm;S%YovV4og!C8#|i~T44pNW3*2^&3jnCw;$vO}558UD&;1$))UI;U)4jGUrj8VJ z!)(|tV(xtfk~Vw1Q!=WCimGkQ>_5H)6MG`*3(H^97alE*eEP?{WTq2~AwJij@`1}qyVA!N9ah2R?#M>j8YEqmx{T4TYQ*I@p7qsOigqXSq$u4mn37$xKOdV%~eL*9UOh5cf-79VA(~m3NXZyLJ3_VwK7$@6&=o$b@BZO z(ljHPZNy#f&>P#Rt?6wxBB|4uklTFU(s2-4nk+o?MXSy>n&O){GSI2N@IQqe2&*3_ zX=p}T$u~ylu3CwI&qy`B^lx=J@7l`RVKq~^1gW<0vFc$p~S;zjBa<&6N~{nEPdCS?rb zuG7k!W)!u?c_@=)XWfdWykJ@&IeY{RMciWV>tT|bQ&A)hjmlDNmmn#{Ofko89^A%1|l=>M|_wBdpU>tUvyrXz1oVnnvA-X zWu2tuH!2)Y{w-x0TReTg+wUh@2M<#>UemgIAW6Gd>7QD;01C34)z9FYO4uefl~Lyl zt#FmesnffW8s5Jj++gD=25RBam&2N&P87d0aTV?&S&#wQQu@TiL=u#{H&|8aiyYnr z;PQ;;i%L;tK{a|ZBfZ^MJY8>vy5R&ile|}Dj`x>qr#W;+_!0+DhX$Y1dj+Vn&T_;5 zX;v*^CD_bWU`N{7z9OKE4t85vBFy1$t(MK5a&gAvi=yyh_D?oP~p21ulc* zwDdHqF>j_CE+ehZJv`Ndl*(sRVOP@=?yahh_bG~=cd}g%b{b`QG8?v>zu@RJ9`E*= z!i0Ldg3_}*TqM7W?!{QTV+EwiHfB(pBXxMnAz2>5EFLOrM0u;?*)-jA!xGl37&~<; zbcojIl%{Ki)(W34>#@%(mbM*Yoosy{~7Qc?b?T*yp9+-YDh( z`ObKZdFo;5ixbuHomVbG@*l8nSJUeh(=`7vwlJ#c!TF{rR9_hf#W)I_P_3Q_0qo27 zQsN52!Z5@JsHOE}3n6dq0jo>!Kb#6*b8MVaZT@yob;INh-einxUKs4`^|-XU50jT= zu2A0L>pH%xXWNLGV zmn@F(7d2W&cH5aLs4N6bXd|Z>Nvq^0NoV9+t+JtFpc5{Xb>9>Drb^|J!{`;fBuuu{ zrKY!Pe~HYbIfe~g7^;6^A8)EsR|R!K^n%}j)VZUgLt%DD1>r}EH+=lYy=bvkC{^O< z@%*(@&CDg?;IyHrmj<-&I*0Yy%=FHh;5FIqfo-x6mVAE3I}QH%%FM$aF`jFWjH9X( zgVUv%mhk3y`!PX|v>jL3^Nw__mI+{4T6ZKQn$2_<_$L=9cTSi@Wa-MB35TUZDF@vs zn2$^wR4JA8@8|l(u%bul?nMyyA>d#gMlqf{^W&w+%F(-5c^o4ZGQC=L-yjXzT?}li zU@^}PLXZxAn8F29ArOwzPQ#C&Z&B8uP19xtM)WiESJZsd<%Y6I?;C=Zyq11&6GBEM z9wB8DTv<*GdQJgr&I(1@d1c|lH(zMzY=2z}4#Ck7MsYorv#u;3CxVz5j8DczRB%PG zLZZrGALGO!4J47DKC*AvWZ9vui|Q-HHzHI13h!i?PcsAqH<<=(#v>yj1N4z#L!XW0 z#~R|9p5b!Pie6X%J2L-d3pDbq{SHF^cZEk5z((_B84wtccEBT?94#&6n`Z4IYhMSZ z)L^jYsND*{7iVu_rna9XSY62bS37hq#|31*&~&&_%lgSNQyc21Q=Q$N)k+~ zk{YB3)?=$?^c>`QGa)~0%K!Uy&x_Dkc439eDLdk8C@r_l{|glC*F)rz4b8Ik)VEC=v`G(CG?$4T^!=kKq~P)E0y@Yn zstU>NuTi~;dtmMWxG|9@EFM)(c@PDhBVSEu)Mg)c*S7##|BE@+GxjT&5iFu0`rkQQ zvH(8pgjY-Yk0FbNicT@H5fzn#Yht+l`=#M7E9|4837|fBy6EQVW6vAy7^r8bo?xM> zH*)jnc7{bwat8|E6}9he|X% za**70`I;l-6X(IDv|x92KMnCZ8_EaBWw~zf4zY2*nJg)vnJ%9g6=AbCQPKBBrVAR+ zCYP@l<57{hN7+rI{PLPY@U)6E*3C@$X(iTpUloS0N=~B&lU3KTp=V)sommh>cg4Fn zH#7*X3f6H|(QH&Tv>PuKV~K^cBcqyBa>j{PVuQkMUil-P$zsLce$da?yKHqRBvslr zA*YrRf(h+MepLh`x`j}(+3c}Wkj~gor3ep zM%(rQ-&+e(J3ZD%I|Bf{_oR96FjB5@>HNS?!RDtX&WhWOOWg_ZI1r);>zn*w^qgam zXK=kTQ@)5)97XmR;wHLL?q6=`i7)|KGy1VYb8GC}=+ttUlu~D^*#_wfy*5;gr7qr> z8luWu60}5P+@5V{w(R8va?Anfax(grubg8Acmh&xjq>f8bu~}8T*TIEaBo(qNDwND z8T|$*>ful&@lh4IasO;3w>*8nmholYMRp( zb6*ww;w4qS0)tlnuLu$&eoe0fC&MoT(g^TV6AG}M-uX{aoXVon--@W|!!HpQ5-_Or zd<70`^4@VhfiA*_r}qT1{H!7lP!}@1an~ukYUP)WZqlp7eP=*5Fg~ZH0n^p{g1Id{ zH`2SpIAFEC1=O&QMyy~Qw&o%g9bz$0d{tXH9UoMC6uXFAYESo3J2GoQngJFo#8lW) zBjf>zdH@mc$!A}Mx8xzm#ZdWlQlb?~K)l4n`y?yEJNZ-;y)=Ld9{_YK8@(EizA4Jt z>Wv7I-kSYPdpNWpygUTl+(9Hr-y5qgL~CzN)J-&-ABIrRb^11*FQ$qb&1yCkURh4) zh^B^dl~x?eijAs@{bzHJv@sc0lKA0r%3G(E6zPU2{-SNolcWPD5OxC!>veIkv9<}P zPKDA}UB-0a5p~nWF6(?Zc_Y9e9HW`{+|b7wRg7wHeRRaY$M+(mj(8Ydv~Nlh%N6eOuWi&GmCefI5sLysl^xtB$A3%Ol2$LoiP@Cd{)s z`scHiDI8QqlLB~JE9R-ugdD%CN-2VtfSev_O695A8!v(;Oz+V$T9WJ}3Txz65}bLo zX?m{WRQ}+UTPEU6gGEa3fxI$oc1>k+DPL6zlctv=^*4aIyST?`dEI%#fVfDy#%j{t>Qr(C$Kx#Qb znt(D-!yB1>>&dB#CU<0R_)^Q%R^d2TSmiau_N+>sI0j% zi7#fBS~)7fp=^9qOEN)-PAwgDCZq{5bFya%4nZ%0(+(y2_+ma1pzXE~eYKjlV%bkr zf)E*8`KcaAJw7M_+cs059d~)tO-K1RJas+II0X}X4%j))6PAvXLSXe)FqPe|GcQez zp|4iW20F!q-~2_*83N@hH)d$KGQ)MJ{Qpogz$d4N7>^Tu*%W!Qh0-ly@y9|s#xnjD z#b0-|cRuWP-?0sGyX~BMBmXATWH@^k`ayu*Ud2zU70PN8rH#AkGNAv=xv|%Vk+RO5 z_}u*`y7^%bQ0L|P$jDtjuib#Uj?&G4uRy$}=XgFF1g9h2Way2zW9s$;GAr`sC-sR~ z?ihNy)1PI>V3eIx#nT$78`Bi+lNFQn@|L%r(<&2k?)gd(%l{^-$vpjz)slZ zS>pnA3f}eWW$CjPCYY@lG<(JUA-Im2&zHe(KtZDueTO^j=g-helN%3g9R#$->!;$B zpPkz)z(399C>*BUM*X$6kyw1drWjjN>>R-eK-xid?zo-an^vTI$A^{DB0!qjJtmM; zS?wS`n73aGuimfWw%#DL8ynGNOtaK$u<~qoAy^(;5JB>UtJKr0&2Y+4^nS)4dwiNa zAXl3=(~T+X8C*=c0sA-Z9x@|f7>No)$aqA-6s-WL3>VVYG5=Yw<8{H2J zv-~@zV$1s$pl)p?BL1#z4`**~JMQ7I;QaxyNW5XTZ&z|wHNzh2M3iZvjf*E&Dw!of zqddbDSKo7_ZIz*3j+BEcuy^%Oiu_ISZvd-~wzc{sZ*$*cK~(FNw#S=mv^eHC4EZU*yR(HHVox~~zUMqChKZzy!8@D9hN67!`@;>T&y2hP8z4$Hy_ z7lZC_L0Xc?hD%Pyx2~fVrs$GopLCan`c$o5IG@|-_my?tjC+*{V&hZIjA56$vO6fg zGV4*L+_wPW=MP2w7vspWevWqo=UIUI z>?T+D21VU61kKI0l~i_GY7g)2J464{#;LzXnIVE3f{swS>2JHj&aDsR z^$DyiOj73$iq_ZG?|yyJu(-Vc5Xc+Si$ANX5Bt(xsvOztwWsE@D$xHT%%2O-Xuy59 z(C=ES+Bgz%sw%zhcX6JUo%13aDXce9$&rwF9-_2rEwsCRRS<7WQS#z%RV#;oF#}N+ag_RJ!pa zKW}MJ3!65Vd-swT%T#u`4rriF&Y~2Yj)8v&m0V0>IbL4Oa9Re1D8!GsY_J8&)S!nRt3(7EMsb50gA3dk6Hi~*h5YyOPgUb~-0n(9=cQR+ zofiwL#(mE#UQc!5sNcjiQnd=eB`q^U-`XC?|srX*JEZ5-sEA_d5Aw>Ga#EW?AH z%}u|b0n-)I#(cQ%?ETO$hI$7f3(dZ#KdIbQUL;c&cm7E#|L?00joaQuNwuTS8d--P z3{-uYTIk6(Fl2*%#_lCkGV-uz&?&#UFXLR=FKdAall^_y=A7t$B523Alt8BVMyVOK zqmOJw3YF{wj_uWj@PNzgm_ERgE|{2u1BIn|Lpl@LkF}B}a{GZY|Lu%uh5z+c$TF~H zvvAjo5VNe#b@t;BjE^k9E~qt_t`P3-J5Y}NK!o$0GBo9WxVbK4<#uaX+yAV68}52i z@<)Xg$V>e#bupk?o9X|MsdbqxpA}_0W)T&f&mmRD8haMXwEDL#SVfZ|_y*rlSrB3d|%F<32nA-qLVA zOu2*y+`>tE&%3r$)$Cxe!gPPEgoEIO=wMpJi@=qM>u%ld! zx3-w#ZBwfiCjQKy<>M~$uNWmSP^N4f3iJA$daY)94uX@Zc-$QKvg{}7w0BL4H+z0H z!PQEPdoWYs$mO|reAcRUF>&y*O=Fy2VohZxb&#z_34zkPO_z30pEGS zR%7H-0t)yJCm7rgZDoD<{VY@T@fxjgvj9HhzF-IxgAHUl=#1!PX4ndz=0I? z`J{HyA$TD@VNrHA;tx2E=Vm*z{i<1>a!@gNzX^1+`*!=Mx}KK8Skuk05TL}-W1(vA z181bHo5thFSjDpv8mV7ERDN~Nkl2hCxjL2|NnvdDD(M&EgY=E!Ja;Am%l^|U2%b>m ztjG6pw6|7Bl0X-wN!Pfiy;-*8Bo5^A^Ys-432fsjsl$)`=48>~!qkQHr)|*(Jni)W z0$55e_Bs4C^4*T7?u~RA9BL0D;8*;#UhnQ({oXw8zvDK63+tY3a2#HcZfllaZ;VS9 zLuxiY#k$%htpr9<%RynSY1n%$4!G;{#6M&fC^px-Z;SAg#Mf6O35bI92MMCrldI)Z zaE28U(OGXAj2Ua*lIDAEy*+EIoyhqmzTGtj&lRsV^xfI1V~-e%y=n8^#v(N;Gc<6W z)+OQ}{d9jKFPPr(3Q5VHWM3d~TuBiW6y0_g)+WL1EOJd=2=8P(Qun`T?k>Qg-Y{Zv zuqGzCnaDH24hQMv8=j@K!+GP!_1@g|`elqTtE!G^t^V6d{Qq1Im3n`UsQK3U8m>lC zs^F(7ayIqmc<);ZhY=zc8C%v^e%G%>miF)18z*+a)RxfrnA+*l&3dv~=`fSfc~ zr{j=7yO`~2wSATptCZ-y2UY1EDa)g#;=@NFdZXddr89Z#G7%C!yFp5t{+E$zSI=+i zP)6+LtxuP5aqpP`p7KeK^YJWv(>BD}F=c~bQucazRycALMz1oRI?r^Z1apK$8j&NK*$L7K31Bzo0p$JQD`FK-F;%=gd11%*D4zCGoG-PtOa2}-PrXGHqa%f_gU|5JiHc1`B4M{iAvmPNgPPmENj-=aVYI4C5A#z zivmUi@0|K$C<|OX+dbe^<#Xi)agj~!Q;Fft3cMk8@X&NmIcOqnI`nH`Mm$P4exgo@ zrHJIwMMxJn2Fb#T85x=49T>1JVmVPszc|ftJAe}VytP|ND;=d+hMRRDZTt)iyUE@c z8GyVSztu-dm!g;AE|0=)c*lD0eVZCaJqxJ{9nPBU|742xhl@h`yHA5LOKLU`zE6F} zB`XW+*a<{Gm|rMf)V$0RgdaagUE-ADLYKAwN!@Axt-`21?D2EG1?uDDl#ZoxV8k*C z8&cBj6nwr~{vCRkqME(9apGGwdsouo#@H5yUg4nlvAXN7Zy}s?XJI+j*x*9KPR)Xl zX5H25o)?w4?)lyABa;WZHlre|Um7Q}1^ETdQ%Rm&{3!1TY_l}J=O<<1yxO0Llq=E8 zPxBkN0=B}X=8NqGtHG(#1zybPzG1q}``Ix}#P8=mZg+b8?|6G~&pC)(azBL{tT|L6klWBN3 zF=?at{F$OS+zrk)USqVG@(0gDUr88NADzkw3F@4Vt)F*qBtkBI-vaxsI(`*-P4n47 ze0X#EP0?R@t%{{86_0?Yl<`x%(2ifFl3Gg_&=8HG4m|mIu2sk}-!hziSbTrXF0^&@ zVUKVVyKZ54cB)~D5wLn%MWQyZKBY-?o$3AlHU)EuN=W@a?34)$GjRj z0){e?Npf0+z)vN+RTgRNUJU0~5pBt%--Ch$Hg^NjcCWDUcL2-zwl^dD9Nsk4A5E$_ z4brD-Ik9tLFmwmrSM2)RZ~qT%3s|9P^)1KJ)s|>xlVM=T|9o6;bk>m5J9N?GoP1H0 z*cF$ifaE~36&KHTe9?|^`@7?c;pKaf6!9kS$hFcpQ|XtE;htkkb5YgSV%l*EWA`5< zFV|O*EL3ZDCGl)pZ(%Ic;JTR*;GNmPAy#j%oi`jmW4B+t>97*JT|38&yQMSNBTUaB zv@0*33oQ2b13{sDu$f;#~gI_tr4?y)CaxE8@9kSib%p6OmH9NhrufMC80IGj5nk!G=O2XDfjZc=@Pc>|VkBmV!3bXXRn#GZ^ zn^r|~HqLi2IVWZDJNMwc z5y`uHWm7B=Uqw<}ZIy@BuiGAeWorKEaY!5xEhK_Z?#PJ=JxRHrv1p0wqrzx$%Xc)k z5FVzrRk-8tQGAA&vp~@iUfP!}5xF^npGvfDe(F2c$d54RrBPMXBY@UG;!d zC{JWd=|r!Jrmm&es-z0|r8dC!EnTlC#dTtn!ajdGRdoXaS)QxtWZ@jJC&zD1cG6FgbaY6z|Ohi2~gI)#WgCR_MN?+GN3vU9-(8Rd2 zMsx9Q|M))N#ywxRAX30A?^YshQBLNs2c|x2-ec_U)nB1RHRe}h-S|`|5PgC_>b}Yd zq=B5comhn;EafQv_J#gmoTv}RfPNOr{J8g~UWe*j8N8OM0U^PT25qU^@Pg_aj?8Q56ed)G-rW-~^q z8_zIQ&r{)jT#~CmX=NeL@$hJoj^(ucGxm>(2~kGHbMm^&3&KSz!|sS;Eigf;2IC`$ zOya>?NH%ECBvof~C}j3sxMiyD=0wPDhqKKU9og1bwKk4K1_^{Eb{>}g%-v7XDpdxG z;6VnQAK}HCYiPHBex-=}4tb#?{e=wXT#cH9or8L>vVeQ`M;fx->Nhx7LaS}oA95fHf2GDRP}9%2?{Pa=f+T# z28SSGI9^4Q_kG(puGno!8Z-SeKRZm^6UGK#F7D1>lEO=`)+V(;m%})PLDW7OUK7qO z6<)oyO(GxzP`=&n8Kc?xUY)0w9V#myC<=MYW)`9&M<;pL zQnGp{hb95bs7NkaD-qBk6b}5#0&(~Y@P>q=%e~fr#U3KlMB%0prCwT7HRqaJwt>@g zu7`VGu8Q|Os6=y*_*41{3%w7HjW+wa7CQyo2q$x@*#`u(OsB=ZMz@uoxMDF0s=r@V zejWLqG=MYjgHd*6u{a1?pYe(sfSzJq%=-V|l#HE|PwQPJf z)yd=$=l4kX$F6-!Be`A63MXGR2g#h8W5#?OzQWq+-zv+@rUQtU*7exX{V7R_;AQ9Z zQ*|`V{{R_K&;HofJ6`O;Ecbn2$iVNF^7+}!E`GGp9ZWOJG z#$||IxT^)pxsHW7+mm}7ciLid!|MzVUwi423tRK?S*mFe z>~ByLdm4nFBca8y=z7>AeU>(f$2R-yQY=WQ7r=tAi zmmakw3g_8i>5*m|lVX-qurFvCxa?TtyK!}p6^$NKJnzQL?sre^ee8)+Mk4-k3IDu9 zb0R|gKir7r;i9ge{?fdL6GG;BgiXWBnW1=knV=Gc9QD`LViUi|ejT zf~`CDJW9_&gk|q>_iN=X9sY$V_YYEjq^O!Eyz~!t3D}q-ITr;v*caALhtWJnpF>wE zRfOn4eRy1E0jVAPkru@|n{K-96RY=&m2ARvF}YWjij~Gb8MmLFk9z+YH0X0?iFoaU zhzvVyE~{eM8mSWADt|1_;24m;uecM^O-3y%{E&c3w^Z?)mK2p(%m;(uZudmqwb)pp z;p~cuq_0I%bhlQ7OW(&&WLJMGu33E3EN7*M-9l@Or7VdV5VoxR4)2Nkhu*p#Ocf4K z%Wap5Lb?s;-am4qbQF&wI+h7{>#ybKA4_wFQkejVON@nG4^+Xu5uA-D*b$sRqhXgu zKlUzTU$siEe=${&kc+mIq0er2|Ex>mkmuq8p^Ap^DhK)-TCbv~`cDi;oFHrt*a`x!ss~!B>&lvU^&7#61 z9X%0}^7y_=d|9qGjnOH5sVV>F3+%5Bh{!P_{J z9R}gI(@@HS)F(ogN`{I!RPhF7Dh5u5UWIzHF*4~b4#i%3DsG=s^LLv4>8R$P#sy4& z^sma=%p_v5(wa>3DQlNw6#LQ>xh~AIMgx{i+%3+d41pxIci1hT*6c(7YPWBJibi$o#(y?4q<;@Dh)4vlY7ZIuc>wk=$NEAw*tU6 z$PDdI7T}AwjFX++WB4V`ngymos#iaW*JW|qIHH;ZoiONQvmVB_zef^&TKZ+2N$H92 z(&gudT}J)Se^a}gBQErJo&Cv6FE#A{QI!@M(dLg3xkPkqE5Z0W%{YeSgO8bEbZ-j% z1a0=xfr4ekwEHF<86$AJ;1RYW-LkV{;CIDdIk;y0(CFg$3%*+uH3h~Jp2A>r?xlRz z=itZ8^#bAF)~XD>-#NH$XQSzeHb7#MKd8P=`_|nc%qvtoz{35zycYaZ`v4vdQTQCZ$t)vL!-w|&z%@FDur#? z%02&C%2(TZqBkJiJtew2P6gaBT)1w61%-2@0dEVmVV;WRJ3E{o`dIDLg3M|$m)EiB zp@+}BP^Vy@adS~A-DMa07QIX1iB5enyEPP!=O=Y>`p%_wt(~q{rDsIBYg%GO{dHt; zIBxSyWJG;TP}I@4?fl8Z8jAUZLt{KUboSfYsopVWGEMAGYr}hK*+RJ+gfw{{E59>s z&kH3m?Jf$XebB=>o)oTEq%3C4o{Dbf7y6aY;wf+k{z?#`j!-rsr@Hq+MpNi(PgFK{sQq$zoDD1RTSemvOeCdwVp$Hj%ofShO7+;jb%HJ5qEGJUhRI}H;Rk)q{ME@ zJ7n8_DV8MVg1R9KNSyi0FuDeSd-kzW`x%a*NC`UMJwI=HtxhYdfs7@ zDJIU%wf)4jc4~u|SlF`w*2eV#f(@IPXxR6Kx;b=Jd=we^6JfFzsIf4ABFojd^YSTk(iTf-d-to?K1}!!5?eKUln_= z?s9x+jk4K2qnV?wh|)P~qVjb-L@xduzLvB$K2Hfd`#KSX{`~saT_f;)w>GHFXiOik zRdryLdYnDeU067mDx03eUQg&ABTk26vE~-7uo8v9g0$vXSus|HZ~KSHQSN!UQ$^?aK958{!&p|gh|9~<;%EX#q+{2nDPpj z1|$z=$AhXc-X++k_Nc-SO_jyusuD?n$vC?DXO2%|zIal^gIQ&G!0;u9o0zc{PGvdc z_!u3@Xq>IyH_k{Jxf6Jf6rvXH5p<{7o-qlU2qy0TarKHhp<4W-Z68q-H(D7U*5|ww zoe}jB_VlySovOvY#26GLS(W25n9=*!pS|lhD`k(%na<3W`}d93iHEI0aX~xut}QZ& zx7qPgB2Sp(3Qgl3iVp~jGaVMz=Rlt6bme z35>iTJ1eJaX)iMQwd_cLlgOeQCMgSu^IDw^RRG*6Ff$!xJ;ux_VVs>uD_I`T93P!i z4+i?r!@R125yt8L*vT|xsu4m)R|)o!{<04F`m!M6yXGeYU4rOTIc|qpSBf>=Pz(FV zrFK+;hM@H)l1`aD8Cfye)Xm`)tYU6V1R5_5kKadfc&_Ka*x-UPE7!!i#gua@wBCQ5 zOO(zhXdH)(&U9%~#u>B(!?r`_%@mY*_W zMu@0a6sYN(smB!^zJ;3gfW;vHDcphG(X0H~sCtQi&l*X%k2!bnOUc!Ux!@c|wxzlU z6*@Tpj!o(=&6W5+y1qIf%C31^aFLV{BqbCn>F)0C?ye=Iq+vxw=|&o91eOMAlm_W; zm6BaL1q8l(@%8;Z@qYiIyL+EAGuK=*XU?2+6WK>?KBf^w_pl~A3UCqqsg2qRyF>0I z;FV9a_=|UW`v4i1+M4JIA8=9JbY2kT=1~W*NK5CH@m<}91EqrQWk5><4}K=XQ26{fmL49P@Mah#RZm7o<&6+;KIEgjANBrhSi+@yt`lSMT^$(nyUuE+${tDixQ&K7sQD<<(YUfTrd zHUbV;2t;VG415svR_IH3RrK>eyIg-(u81xd9?^bAC0{D*6ZWgbPxF-m z_pk)p<#L*bosjG*2wg8ymMTwx<>C zpOuAj2L?6T>=ZA5(&KY9OH0vjpJMcXB`kzJkUP*@&10A78BZcZgF_?zLC8s7ZB8XK z+DQM`duSrFRN7vOl{Ag4pSP2|=jd$Ap#ybhe8}es16H>xm2>*eCbz_irfRFYT`W^i zc1pNCHA!7e^>=a0Y*Ja9(`z}}LodbUPZj8*_5t!z=TG8}3#R+;n|fMPkYFDq$&OSC zSOKe{I9YOe&o!C~0q=Zz$*d|l8Ed=LdGB$e12vPK!Rnv6lki|!6EC%mSG1pEfcE64 zFuPt`agoD*Ss&n#Ms=&(1MXjGs1UA+5gXeWzTAlluwBs^?w2UNJ(#%71R4jku~D6yyc?s#L1V6>YOmm~ zrAe!1Y?^){1|K^}6U&BpKOZ8>!Y(J%j6Tm@yP_Zu7C<`ePQ136nZtGDH zNzMbOvaaXfZG2aRWzv^mk?H#7;TDcHtd~4q({GJh^;e(cgnX-2mAH@kJbm{s`Yf0i zwdcNLM7Hnw`Zy7;r@-_4v}*6wL%zEnlKe7tc`6+0F2D&WwJlbs0u`ZtF+~xMDiL0V zpnSuVXOf@jH0Bjme3Gb>*R)wNR5|wNqs>ofvPGy>c@niOA$$Kh$@CY+yrV&Gn)%j7 zO?x1{00a71&cV7(p$lIFOj}jZLadt~h^{QxEv=r+*lt*-o&4T7CqVwGL*ozLv2}D# znJMH7svFWJjoI49!(GmWK}PbJ#R~Uu6D1w{YVlo&rF9tUqYlSIz=>fMZI#-dwc2Fr z-p_^N)mwr4+75I36rx+=y+_{_Wu2IBzx==gB`H;wpW>;1>jPtuZGBc6U5)MibZyn! z0ea(1h=khC6EO=q^w+n1Hh2(Wn6+J-mz6JY_+8vHkt03@3?_nWvCh-drRQO}DSVft z!PM5*5}Hqe47PdacGLTg5WPOn1B=5Tb`*M4Y@c->e1Da8a2;Qwy z5Z>3O8@PJ=^Lx^cje2yGr3h>1sk5JZmf`Gj0Jxqpw2ywHNuSt)9HB~}{Yk8qbe7ES z^s2wEFlcV+HFvGG#QVlO9~z5|LJSK7cAI6Wpq4%UzYyqLY zisEt#+b4dlUz4a;F4edHO^;U`(fOKuLcOd?eUgBq^7VfX;QR$0z{PP*E}!DP+o=aV zM5zsyb>*G2xKOM0^5plfJ`h>LQPLlsSv==i3@WgARusz0@uztV4|%Tp^cU^2`$Jg_ z{G0IC%W0%@)-*$shv(0SaI#=2*4dZjQNbdV>hg~F4Q7ZAnxr@|Y{qm>;#ors9H{vR z26}noOo4-e<;U;ax+Pyf4K#+Og!O-#dCxP>898u0tKt*IQ13KB<}=KC56}T&$Wfu+ zx3#w=z)9&$f1pWLrHwvMNT9!=f7HZnoMxHc@F|&fVv0x-4dXKrkHj(* zuZ`Q&k$)?~5QYZtKK~Kp_jn2CeqMaYM!nTW>}V!Y^GDIC)>VflQ$y-s-R+4FMD@pJ zGlu%kJSNxUZ+-))1BCYVXyWcn^;2iwqWSLAkHRLXZ#JG418Ltrhe6DcZvRxyk~EMmn8Hb3AnAu|@@9I*LR z^KjsrhRA-d0VY#Nu3dCmxFpu`slwSGJF_x(r^{(o+H&=Dqfa-`$glrD4J?25OAw1M zFHU49OunXU4f6Fq>WZdD%amy@__d#4+oZWk!N3-h$(2@EFC;~X&!VH;V3{rZR^L=2}cIVh4Z6$kEyA7Ti6qNW_&--xqPtRY^o zLh~)iv~SHs2On7MB*4DlxJC5|Vb!W2X5N7fw0g9_vL9NVF^FblTuc1Ae(Ku8|AyZI z?`Z6)m;UWl8KbX*Lrk5D-`)0!{cButG%n1HlJ<|W#Vs$*-btGD3Jp!<+UyGMU6XDz zAK^zl)3MjDHD1wkLH0@=Tft?Uou&D$Gd@hneC9NSB~T0Zs?vW~KPtK7O+9_ZxdZyf zN5$L8sew+h%bz3eUi}Mk{rN{F7pS&ak2FVGW{mPGX3tqFa6>k0VXbIr^yUKSUY9hj z6K=hu>k>xP!tHc`@rh-Qr~JP2?|V#`(Y{K|iAR{OhoQ{vL8~X~uOZ?qIIM(me7!AO zMb7HAB(EfM&8@Guz^~R9EmwL{=NzLpNfJWE^;R!_qT-FoZ{-@pEW$1 z7k-(WD04i59IP%!z}@!Nvh}4zRR29eL@ODbX{!8e1M<2(AL|p#gTO56VIubSr$>wB z2w8K)0W;sj_*cj8t>jFokmuq|%=Ax+Oy^P}qHAjXJ17Fbtt>2clQPv;3ck;V(G&dw z+a_<#2>i!h_)n;2<}VL|{mXwjo>t+1NK4Tp0>7J?EkpFZWrhN_6@(uh+x>BozeG$k zvL|d?78Z?n1aR}yW{^ySV1Wi zxiGH;UZb{5)3aN#?9Bd$P=NDPYwnyo-R+1gFCA=m_BSv~;-X-YhZ2FW-eW7@`bIi} zm6U_6)qg|n4`yy14bk*277=bBNANH^@Y{2#<(6&rpwTAL&qhAkL}f^1#o~oksU1X0 z|5+XZYoTAwr!xaV5`KZE*Pa~KM{(nqZTSIlM{yTiw_+=W13FB2V$J_Q2XmJK1_N6D zdQeoozmJS1mNn^5KAoM8u&N?~TpvAf`p(O6AQw?xzBO}u=V1r7IF!6cMg`=NS@Sh#9*mPUNJ^ZV}P; zz*WBXta^av45$aV-e=Ri%=34qcLzD`Zf;CyN(@(9pXs!#wdFQt^ZZZsp4-zf9UQo> z`;H{V&8q)XB6P?jrf|Vx12qOBp+Psrgc5xwE9~P5Oe-2Mm2J>qL?%C*&QYujp;&^E zOZ(@^dOZjN)n+qPvHnh{dEa{PGM1JCx09`!rl84W5LL?Z-2Zt)GrtMFwHzKmtWQY) zb9KZ&Xikp``T0St^XlhZLj1aq^x+wY>J-;Z2^V~Zwyn?7FKNHhKuoM6JBaPQ{kRcb z%7_FG33Plnq^FA3w5e@)jg6|lOyU^+1u3nl(l+a8thxst2#h+rjK=q~u2odj@E?tE zw}2HygM6%%Gl_^W(3+IU8&D6NW-DghD)@2;?~yThGsIOkT+Oy%h&H6Lq*pZLbrEwb ztRPivfb-2J=*sNDbsaAyli#B>KdC(7ql_2}(ci++BCIJNSN)%UqQ6@dLP9-SA1p_- z&tLnFFLHP`1IZJXkfWm)Lmq1`&O^6z@p8j+9+#L5{4E|a9HO;6C;A1-4e3S*yE2i+ z(67-ic&!v+L$;XgeVMn9HULaoib7EHzkLuHi4bRj5UZ}3_^#P3^<4b$;Pj#@7PxL! z?KE@Mv~2OueNkO#Yv)vZ6+>jLFfUc^qVf`T&`bL)b90F<3)eS3_6{|0T6=7Em!dGzmj)4sTjOO-+*Ch>; z)B|y>9NFx+!FND9(qCU{qf20esO})4i2*+W{_j73Hlz1_<4?`sv@4kjk(V|Ze$!@` z(0Pz;dh;T0-rhujvuNZ%Lk#>~fP%BAtmj7oUy4UUrw;3b+n+OucZ8Vmn4W5h`Vz1D zJca(b3t+U8r`__qAHCa7HB+g|OJFRvd&r>n9@mGXNPit0F%6!y;74J2;BbFYsI%9plREZ3ts+0$P-1$ap>~xCpPqFi!QrTMMyMmLQV48k& zgxG!WW;{&>HDZ--+<+q)rsAyJ#jNcu?M%HOw_An1Q|;I;)Tl0RR@EGuojor-Ugjhe z_FbVz1|~zT8vJ!8uKgAGZ;C|-H=Qa*{>ou4KO?r44##yp!BpP}+sEhYptjmR6zywC zOUiO9Jn{}=0NKcn=%s5Oa8sbxSDsq71Iy}?aT-G?}nI@~#kA`9) z;PPq!Gaks*>E?dc%1hDDFp?K~t#s3yq=XNZ+mp8`k#UJpkzSgve#Evz`lsH=6vT#y zUA-~H`Ec@Dw=t#0h)$qK8ey(23a>BCkM7zTv7-DaGiF8UmY9+E!BE1Nznx?y9b~+1 zp!l_L-OC`>$rE8+edWz}9{NMkamYw$kE8)?M1R+owEYJ(arMZMIU%$QQ-Lbo#1%pV z0iR~)IT|3y`r*zG``6R_Y52iO3dTC`4dVv{5uiAsVso^q2^fjG3}B56?NA?cXKER} zi;Vjt<<6&y!1HQ>KbZfcI59I8B$T&nOkhmHB&TPx1_kb8*79D_uA!vR8iGT7lL>s& z5>r16r~bzXxB2=NRBb%vg7bNw!ar`=Hd4P~nKuS7p4l-7Eimf(!F#HIKEI3+f@Z}K zD+w;_dB<5{alD=;;F(B-bB*Ev zw!d<<*8Zn8f;%KYpY@4{JKZ!4(=R%=h}{SIGn0is6tL;MYW(~-9kxtd(EQ5a0p=`ZC38d4H!cM0=_1b%= zvp^;w-Da>Q7XnC8DGEq&$!P)lr%l9ic#+*64ZBmw`0c~fpi}}@r8}9#bLLjp29mR%MppKwd2atTO3op2B&C09+s-T z0v>b+qSWD3oUVHHCP!{AOT`I1*6=-d;n(Yc7@m>`E4qw2n9mU^GC(&PomZg#mt1#{ zTk&J_Tscy|Byo~yrC~E>=G-hSTxU0}BFG%>MuZY!?-QR@@MWBnOl5wH*fACTwdZ?u zM?^T#3qm$OYN(GNTAHtR$E+@@_nez8$uuU9I27D?VR9wfWVXd%oH$U;yMGUG1T&xs zBrs}CBIn*eCjq8_qy}pYo_01CLy5_p;#LAKAjzrZJPgQ$c+i$ID^=PO_TzXzotFojA2gxn{zxID(qiF4XuhB;L3 zgZ=*r%UM0-Z$erW>kni=$piJ;$&Q)hIKPT=wRtlYWs6>#e7GbDO%9&(;Gntvq1}Vg z5p*lrB!`7knYoF$d;?b9!Lop9=EJz+x1pT26c;h9&Qe zO>6Hu@@7EK_r=}34NNC#G%x4^zW9_zuXXMykWWJp2s{Oi%d>K30VFFslr?-AsK*^! zNKt)$0NjNpXf|c=7ze>Yt9V5F?g#G`H~o`>fS}`qtv>i zPa*rg7xNgd`pwzL1K6WaCk&1s)f_z1(IZ4TLa@}aN=Jy?Ij9`jghDQ!dK_4 zof$@)n7#Ku+uryeOcA34jI?%`g;2hvm{p8MagwXzTgi3Re0%~qO3!MeugF&GI znymWz>Y%x54@{D8u~Xkd4$E{&R>ptUQZ8Y})tC+*+KK@7G6z-PPg+VszkliY+LMb) zqR8jmjzAg@0UN-@iGuF^bC5d_2wERR?_R0`_ePzg5-3fhvLs{5nBOB|wQgD(d(K}| z(uMQ6I*x7eXUjCn--$~Io@;YyHt|Q2I^L^js+)H?s(wLi%o8uv_-&P%l6=(xM&U^Z zn0~13dYj^(OfMsrgU#}=Not=VBRtMg4wUwUwb0X0sTU{yu8!MIL4qmXzJjl+G;QXD zSQIsaeSR8Rqt>j6hD-zl)R{HJKHT4?z> z>WAJkv5Y9m9+!-v85$jo7Z=OAePyby*0$TwyT<2h20t|Kbv_Xz3|PDUd8H_%vOQ}v z!L+pc>e4Yq=2i4Kqeq1Q7&nkj0ElxzAddFS#XpC-0})2+d#fjKzq4c(Jo8|zq1P?b zBUF-eEtG{Hdy}JuRkl;Zn+4fp+BnPThZHs8t#2)Z;+2*Cn4Fv%;g$W?3u@L|xieNA z&mZ>WFQ_TSVj`hMMgU5DZj4il`1)Tx3v&M{+lYPfDF@rMRE@5w`?feN-f-a5kletq z>A<`yzF|n@#(LQ#T%X`J=;wrr*^4?}W(8T{iYnNp)*lv*mQOOw>p-!P5ublSgfVR5 zwA=6gaiLq^D18D9GYfW^R+P0*X?p!gv_pFl&Ne^M*&e=X%#W7r(YN#bQJG@59wx#} zX%C8bN_Q$Mp?Ql={%7X>h{ zl)#x`4+@fA_}*;&6cGjj(Onlh|7V`|RSX2{NEmVuz}JqccNL(pJ3v7V!@}R-lqk|t zj3{OfZLm@G-C3J3#|*b1zO^?2D86-QT1Q65tNjP1WYx%CHv2XXf-+910E-Djshj_m zcu*@)AuDRMV+*XeCI&cYw9X}mohGCTi8{p^S8&=k9#&i_JEkzV3h5-@wYj42`hZZ$ zbb{Ed?hS>jA1P9$xr%?Cy2;j=YC3W)LU6u20JozCcdPv~Vg-w3gRsBrC59YRt>P4% zWvt!{#1B52ut$6 zDRVV_15nCz0)X?A7U(v{Kgl8*7gA59!$&gcR9p^b9bEs&GGOn7X~R&to@!N;wE8;C zVV^1m_45`I(c6o@n{8rn-oYB|Lw)MDct7U@(=|csvcRB z)1mZsCBJA(WmW%fdsaAF*yipDT)(;5?v`aBI>;1<)x1}(R7&MI86~wU#`>1$nfNP` z1^dH-G)zyM=4hn>e{S31$j9XMryU ze4mz~eMv19?|cK{f3}REG$sYAZQpkW*}97cu4A4Tq%)@hTvsgbuEp|L&&vQm7W+yC zXp;ODq2<5j62^@^V#~-XaIXThb~}Liq1*5eOeB8Y6+&$^!iA5kOYAVCoya@a*-#|V z5&U!w`kdwLq+h~!P=}G;-tN^b`}6Eo8{>8X!TD}nEy6&0!bsPu9w%So(EbJt#7FNQ zs|jE&47j~~OEprPthIz;gZ$Id9$oeHe{8@hS5ERHS8A`eUhyF=ADlj`JuW(M~oogU9ROdDr{?J z##oPk1MnR5xsA>+yLX*lZ+7*(Mt26^m&eI}T#y2{Q6L}|kx7|RKrlBvN3s89<90FQ?IQDj_cIloE(Z`&m&?9k&1GmD_R;3(E`Akk2B$KfbNam>BlCgIXCJ*q~s z_&4tcf~;hRmMIn#Hi@iWbn@)+)|Fm2JX~SFfnCte3@XHoT5tG$p1)n8_lN=V5<`&% z{`lITklE4m;N2!yjd%ZoUkOU<)2Kj{>;mA(AN;QdB#69 z*2v25ShSOP3k@?DPeT*C*qS?~3OcoTJ#B#OShw1oYW_k|ckM4oSX`5ec3c8DM-TEjvW|^~be- z|A{3Ck@P4Fstc8U!lZ{@`=tDO z6S)J+`CSM0!lPH2gYAQzOx^t^DE89YU%ob=OP~pmDC*PAL4JZz6_XMGwa>-G{|+lZ zQi317`DPT^vZbYU+0w>RmmZgXS08_ZRpZB3Pkvi?g2@Csd-Csc*(q%$XX@mqeBb=- z58KY>e(*LVj019R%NtDLjXEO9+cLTy31A~e$tbFS0wTDB36gq8a~EUNA`*55U zh=)sj7lPT71Nt8=L`c1fbMpV&bOt1Vi8q2zz_Wuo_lQa{Ie=UQ$S7x)Sr zAQ)zz1=T;XCzfM~>_+vPll?GOO@JspfL&%swFNd_?3Fgt^bqn9H%HT?3B}@v_R0ZU zChC!)2ZNZ*?@*0`P9RDf8x^&-f@0$m<8ZCi@fWdrHO4Ps* zn}RJ9_4ah*CkyKTX%}eGbdmx2IFdH(IS- z4sA4n%KI#W)|9Y{Y}Vda35u6LQ0Xa_4O;Q}kta6I1xn6Wxx^54+G*V}mDr4dno`u6sXq4&2HQ#FKsq$wKzrk{>+(3AaqF&$7LMI}|T ziGoAvf+4c5B4gzmGDep)8$fqn%DT8r$hL8eW`%d=&^XxtkE3_y`P@m788|a+*F@dn{od0O!p^eTZ-;*^FS75od<&bx6T+9-64Q zAU8B`?)|o(&{65H=x2j@>V(n&UZMa_cC=&_Cc90X{zJ?iKhf!nPs(d-uRL@>bp-nY z=?fPakD{`_T$6?`<@VpT;MXfmu)>JnDyY7o@EVhjVCnuCjlR7uw^&+X$sNyG|FOax z)Kb^_mFVbq)%X`YpgDn6l-Jkrffzmgu+l<=c;N{omc zTPh<4fy;<_>C&|i5Y+Q+UyXWcA4-Px6}TiyuDIdcu2Hb00Zvp~i$f&&e_|Kp{*xT4 znE*l~e!m-i9505^0xBXTXx@=*jwAP;@0Q1`G$BY+Oi8>tf{BjatJ0A%%$$#RFg2n+ z$-RkwFpfmP$LB$B+u_`+Uye5#I;5<0#dX7Ts{C0Tx9C=E&)elKKe zdn!?ZTr1j;5-KC0fx!QxNMIova$v#rS8t%I#6emr95p5Uagp3gu=f4*)uou=_ob$` zvn>AA3O7b;we9!_RbR76L;B=bz-u6B=yH*}fNh3t0qiON5G{)sC}{XL4pbL*VdmZn>8YUM!xR z_%11`WF2c-V7%I@oux(XT9eD@E3q#bdS6^$E&S5l+VFn;ssyuQQQMOy8qqRj6CKq? zlpjh^lc%?a;S)kaiT=E+LLe7kuB}0_KFRT;*#k@X#5lj&ZHw|m{Gk*dLJAHMUxe$HmyyCo)=6;6%y;5rebiwdHR03WPw;VrRb> z(R2MDeyyU;&ESDrEpQGbv8M8vGV>m-_8}q^7NYPqs(a`Bucjc54OV%v6adrmoPTLt zwC(kJvB$L|0`fx`{U^HArJy#OQiHsZJyPZ1L$sQl*|+#~rx}79D*Tw8_On<#q05*2 z1S^-2k={|*`ABOM%L|=-O`{3h7l>}W-j+j@m86&M`f)$6pS2IZ9>q&V%yJ$L_m@MS1R0>_|8l|f zyI^FK7fV+|FlS7!Ew9&4(H?OlV~j6biW{~jdOX{X(=~ej^sYIu35Mxw?=rTDMCT)E zI8Nts;kz{Nxj(pmS#lR&{ay{h%7=oD33SrjDuscD9X z4iw>H85k35irZ4N)U2Wc5H@~nc(A(VnRW9(sh4m47GcqYCl%z?1-qcO|I}ufR64ec z7A}%WO0Ti))XjA@{duK(KiqYHoPx*z{#TCOk%l|U+2Y)Mve9OvLpID2hZ(Ma*4;I4~vAsX}oRVy$@wI_n_wGHcK4txJq>R5~e6!{-!4x z&{}iyTXt5YQrei@h2N{$>PngKiB0J8z!M) zdA9oad*v9Poh!Rfw?#@q1o5$}-0trFfl0>?bk_mL;}kR$)F=BLjqX!-H`}&GnkM8c zWq;Ja@$&0p&qrgEPpM`CDpHD@wk7WAcdNJea*eX3Dko5zrr!Umdj3EhuE^VtVliyI z={i50bssK4K8>()Oh1V%j@aK2PqmlU{$yP`?0hVu}C)JY&0nWCrv zZ}0-J`_=9NaODzL1(^Yk(x#pPbNcP9yxYEC_sq%{4*cxl| zt6Mj)Fcy8AlVG$Z_i`(5fl5Y>iQgJ-_U?6Ko=)dZtsc8GztcNJ_%^#j|7e{QT^&0Chu0-W3g^qhHkV}MJCoBGS z`Eq;C?6heodZDz}e<6tijV#pSKF#O%#T)dpW*^vVCmi|??UKt|IOQ9J6Yuf>@5)bE5;`s7AUa_gkqsvTH%zdnkca&K2I%nLO$?X{bUQcHyONruDlup)SFdjTYY z^&9uo04v0j;lGZ93u^N`1)r=}1k^R1eMy!4VuOKL>5VeuweKgfUNYm|G?!9rj-WDL z>S{Z?6Z4QqI9clCW~-3?ScF{ZW^Ei&Wn^TYs)|=}aKo)Bfjw2=;j_wOJfeX{Ur)8U z9m(9ogLQxKc?C$S0+TS~1gK>`68?qdFUk9KaL4)14y=B`rDFGI&&-S&FBl1V^+}GV zSE@lc2PGO%tDFw%(^D3uu8!|~eEIfm!bsQauK@RY79Nz}Du_gM^8L*2@Y z2uo&Cq2#{ZEqxP0L#j-gmu1e%sUoy+!&9HRSCk3<@R_u@$rKG3mh;$dwj(9mmEfC` zT1wVUl)OjhkyvD_7Pj8Wj>E8m#^!1HsMHeO@wD#OT)om(J|+$IndF%oQLPTUV+w)f zwGAzy9>xzucb+TR&#b5&oUzjikD&l|F#@6@&)Iy9`>mO7F#w^K5E~Bl+iR}4$jww4 zV|WNOVU1Ek1a~idJx8`Qr?gkD$)`G5TPHKCujCW(y|ZQK`x|6n@DMJ2^iY?P#eF7ip(|_h?3au($4wxPe9d`aJ0; zAkXT#B-K~gHg&X%cIN#MIFjI>k{4(jH+js>Ei&>#-{5mf|79e%zo_QDPkNV2&okqd zD#AnzPZt@&vZpiR@=myh&*GA*&289PhGx`-PoHKAHM*4$xLHIR)U;$~zHE=tG0oLy zU00swsZk*nx@zfNd%kHd8#C8dM~QP#plJ8T-Q3C$t@R0Is&DaKV6z&r(kAminEOks zQO6jSE8MB!`8?dt=vl21J~9SJ(6yrJzAZ#}V=TrbwNpJ><%L&0AD?28h5jynjrVH^-ZKTT| z7J=PLx#0o^$^l4HCm94%ayt$gD1}tL$sQk2HzlxN=7&xt%TDfiNe(P>C8a{+=Ie5s zV&wH%pZ4Q#6{n!`PMY@oaY^T&0bNUD*AL1lmt_w}IvOm*z zAk>=HipEo4=6=jN`g1#U0EmlI(zAcTIV_~8@^XElL_Ph-rHX2C>sCIX;oQ&}4h$|VnjWDi^)fw)k88S}LC7-PVM2x0(sq zmHV2EmtSi-tCaQ@Xm`ojUz`c#d6II)pLvigi|%}?Np!q9AW^;~Rx#fqR+5aU7O4$< zkAGX_0^)Y}3esP*C>e#X5O*9c(~l=Qf+yX;D+hevsyPm!FM4h61Y&H%=!8CHp?I0I zHs6J?D83#HZFIv}OXTpB%+LK@9Gf&4e9=Yx}ofL#Hz+&98z7KBlv+Fi)tS!=& zJT~J-Nt~X4myBSj;Xo2!f)@8aEr%zgRHb?mjx^-L?!r zfwfad9X#~7I!S>h%5I{?&phSDIq_8*yvXamBP@WImHHiw3|WW2Gv%y>-kWgb`681m7tK4#>{)~b9z&N_+I z6izGFnK2#yWz@hFRd2LyT!~W$bo^=`?tA32oJj=@7+a*7hj455SI8?3mp6GD<##Q} zUg&8)d0vW|pY-cA5b9T!aqr_(mvC-v`I!m|hI@_7$=bV3-JWlvHO}Y-@9d20C@B_9 zO*(s~c&?jvog-iK8LeU(`7OjILlYXbF|K-og`1j^BaiYebCM|YokxqHxKvpXDGGj zr>vmJUbCG1mYvEZ>N(%s!?OSzHplpGa!uarIkf^+31q0-wI=p7Cdvd+q?_FRCdLG8 zPDlVTQpQ_d?RP4CYkD(x^uChkyPw>rHqrc@b}5krDanyg-i!JTZkFy{57X)3?0f^g zdz9hEMc2EteX1y1?E9I7v~fP`X0bHsBw;Z%;}Nuf>lyo8V+Kuf-F!-!#N1enS50Xe zv|n4q3A-?Bv$c2zK0dZNTPCE=I5{$3bzQqz8z=Dd+l0;LM=7$3*w`*R`}TqyD!ZnN z>=VUIk?y&^rbx4C?fVa{$tx%d84!g^@r=d*m8g8YK~YZZ)- z-XX2$8urlR{W#Je^5bP=8hz!S+U{6K+#0bxs0%p+#kFj)+%wtCl(y7HYV#2jFMQ;q zyGpd+>`ud;@;~qpNhSACCbDkN|T8FG&8{E4{ZOeXAnhL2p-;O zIrhydW`pZ@N2wW*HWd{zM)1|n5U=h{=hRKWNo<%8($XX-RG#r@uAkHB zH0;63i=n*S*#HWa8DDGuXic%lkE-Qs@@;=?`^v})H}t-oojZg&M-zwsj8BcJj?OnF z{RRmm20P{_anbH@W*pj+^JjDO9exy4Z-j6%+fSPf|4 z{<8$SgY2fW3BU}d?wOkAhyc(v-lo;IJvs>VKXHeE1+{r)QNMmoYp%z z;}rVx)D)GEY4knPX*T$UTQ!}2pt4bp&0O$ypehoUX-QzIpC`F+aKiLxu$jm~hwKNs z%=NXIk9^$^2ODMT#!e-LkpqqEzz?t!2+I|BQqz+@t>6(^TyhnGnliwabI_Z1ld4z9Gh^gS7kl~xi{Ah*) zTKCpo8HRJ;i1kKLMRJPQSo5=g2=u*77x!p)%CJ3nTWsc8!DO!6yfShTNqzpZR&@YF zB^_-OcP=P{`2)x2E|khU3Y?eDfbAS5iJx4vO_Eh4O(vh@9RYX^m*Z zU*9$rDFb#+lPdn}QrD~-w8k_Fhq?sTT=WRPsL&Jm4tRG6E}!Q`ZIBKnP!-4obs#o% z`0&%CyESA_7(#dGFPoSYqqC{{d7e$HV8e>#aTrqs`ZIU4=C|INC>DyjCU9uT_ABbB zFJ+(QFJ|mPE9wdpWPNI55}2oz8W)GmSzLm4r%yGqFV5uVUu4zv((fH|A5nEXWavL0 zA-~bVnTtzVowu4?oz(wCiL1#v?e?TyWdqbIt*f_shNu3m5t9&4NX#d zv6Z;3E~Lpw%&2QgwVj>_@_5~V zA0f-W4HhYl`S{E2#n+VRcI(p|&nHy4jnj@$(`d(2X6kk)= zriVclXQK_BZGAymgzafzfVV1(?5HALx$``OOUU%(!o@&g_2NCIc|vlc)ui5xzC2dg z(IN577t5CUg#l~BFHZH`aCRtA>vdYUixAr2fjh+1_u-7!&om9S>9wbhkP+>~^|ll@ ztRM9x?x;t%G!D0%63=Y2!masx7-LRtOrjZjndjSPGOjMtuHGB=IZsdRQXUKCu=I*? zORaK!_3A$;hc&bK%kkipm~t)-+cVTqOVZ8AP7i>SP*0T}&#{nCPvGD^!R4vV^6QV; zi)!r|MYOb`?4e|=4)JFN(Q6mmI@3{=%b|0znQ> za4?)ss+B*bS5q7{eOdns9#BiqB1)Y1#IITti(Xu3qmN?OJLOCk*3RqdSkFaBD`A zoTb03slyNy4kqYIqCTHrC8f<5BfyR(Bb_m)lTR(P7 z!P=c~A$-p5`6*2L#R(C3VA2JY@V(Hgtxyq!QURwS3PSyhdU1*0h-J+41zoqGv{Mkv zVRiu+2z$Jr6W~l)yFdwZZvi_9;0*fm2~FwT>(*lFWiFLm_!VpWQaCr!YJjfvtIpxj zw51K9-0J1@h;(uhwo)10mZN-AH#%!w{+)LypF9o~k=QQ3Apw?~#X|0n^wH}Fix0RL z`*YnP*h{;9*P&4Da%%dnevE!T_szD`<(c@PSsEfiE_(y)(mjO8{H+OT9<8gtqFw|z z!2)@XKUy+`X2H68Jh8~3MsT918Q^3+#;|#!plCApWa>gMy*GP+Dlx_|ikj=VIJ48O z!$!AC#lLYu$yS0$KXf4!(W4Y)`b$m;W=$vffwr_H5(B(U;L3ShfL9^MY5||0RJ>5G= zniF^`5v0a;?`*``Kl#OdJ~2r?+(CwKN?PdyaRY9(CrJ$g9Lf**BOPzRk6WBQ1DvF= zp7nSbGfBURKdO?z@YiM-wLU@fRz5n)_yxPLsH6bND2bguj#leeDHc)RC0|uCOfbvhV< z3}3BXE=E)XN`{UnN?1j!QnGH6czS`OZI$RC8*rM_N35(E>l?$~_|j=7L&=hP3f}p! z;(LQXRWyJT5wFc&0!^i6{yh!&3AG%(K9kp~t8KPb+;Vhxgr_*+So)ZQ)p%B&OSLd3Ph&sRc zF>{lqevQ(g;YWbRz;vOZR|uE1ffhg3{e7okIvCUD72vG$_r`T?4;^@ArI< z@Ados<${ZI&R%=1weNl3_ZpG=!?m*lihNKrYR_L6`+#o8iS!3zd~?hV(cw(q9}q6R z#DxFT@+8%Lahb8@_X{7L0+1kXyc|paNPgoTgE^_ZPY>jM=v$W~sHN<+MI3A|Y(-8c z&Z$LQ;u6cq%%T2Jo^L&gaytJtwohJtTHj@*f9B2Tg9_QDa`JQ?097SrK&i&7((P4D zqU2&YN#WC4;3>TPaLCHcNU6jKv#lyCsVwzI!J>Qy*I7?-owMcez1!1TN`QFw;VY_t zzbMe2{#0JFb)VA}yD9Fjbsq)frC~upS&W`Rl^lV~G%sE{NrKN!VUu4oeCIV?J*Qu$ z+6h^}b~4pAui4Bm8a2v3ewv{Dl*? z+qmZ~ex^j&m$h5%sOa(AD8OQCvh^*7k2%$*z86ASZeJfz9=(vbMk68jN)UdO2P(S% z2i9i*P)G)xKiTsiKtzWYMaWuVy;3Vbk1e*_P(mbC`5)ikOSy}xa>2uBMT2r#5-Sc2xQOf;sfoe=hwSNvNbx6;H8PX?) z^Q+~t-}SQ;wqDDye&5zmVlgu`utaY;9zJsMuG}Sj#{j%hBU=0OJYzLVtr*{ZOOh1! z=tCZ+`wYpIua#1ZPit#Q7cGsz-LLjpR{+xPGSPn+p+V1E?eT*DznJ-z#g5{$$qHIM z4Q6=U_m~tPWqx=6^!Iq8Mr<{7r`kugCe=0$R|ao9DX>;a%XvX+b(hb`wqf8|7QMuQ zVqe7XpVG@nz88fBA;;BuKr5tIAjNU!iEs-RA*QbeQvVVAWAD+hI}#=Slc;nxNle!-+_>n|yn}|p>4_9vz z;#}iQ9WJ*Y8f<4|OTJv^7=G_#QBb+|DZ_V@*gMvzsj5X#vr%1mWvDQi@!*N#iwnncb&RN#!%G+5Nn4m8 z&-~_QhN(~l5mBma^vlJb)T_;$U6}BAyQ?1uA}+nFMp0WGrNRYUQRuExQJ^w!m-QX>a=isqHHbn3=HtJF2099;PGsMN zaNDQBr`t%|I!AtV|D2ag3op)ggbyZacB@c=KRmZlPDO5LNZ>gzm&ZL**qo<|b{?2} zuq!x}F}?jL+_#XgFMUg$jkx|h)wE`snX+q2Re0^TM#6Bz`z7LjHQ>KZMoC_L!1K0{ z&BEm|R~OhADmZ27Jha^QuJf0wQAC0M#1RqR@01h;axx>Z$-IAO6a0NV?Z8*Pw_Sa^ zb2I@b8zv=FrlXezcJrJkT%dS(@$gd}jCQAQQ@}C(JqLzsc&Y$@*Kv$MfO8CYZ$ZA= zYi7)@cJH)hMI~gzBzx$a{^GBRm^CrWI?eA6VyG7E&BcybwQUOA;a-(MD&kT`b&%eP z<@8?a-Il2`sv;T@G~ghTZ`eE)C$u}pDKL+rSd__YmPUN^xm#N$R1#kD!oSa0_ker5 z_YLSRE~wj;k#$wVAAbM%9w$wXRZ5G3p3!m~57n~V9)}xQ+uGW8y{&^!1s+eJ00SFj z>+G8`UA>N{YKXdg1+#h{l>Ye z`rc;l?6dsO+Jg0_0)|#?fY=gP=csK>e5IJ)iekNx$+p^M;T{8BT1zd(6d7NxRXwy| zWMX7&5uCFPEWx;Xdr6+3`=drG z*~gSVw{Plh6d&l%49m~=s;`%1tTF^w9uo)mOf?9; zprsf)hpA~b5PSq|mrc0%!02hOzP+VV=ApMK{Z=+0ldWh2>azWQrzPUAE>SMjPdUfu zq!y|P5?H@prn;h>U|{4F*CUtI#|lgG%h)-YfX+J&Y=;ajX8fV|9<^z0w!@e`<>|bcTSR-@HD>wBiN$n zdNWMWZZ;@@%`o@96-#8*BzynS3LB5b!ko;E`?kw0@5lIdT`HG{+Lhd@w_&?NOcOA` zRydI`3YWn7oqh(-Vg~5h7-PAn7$b8N7HSvp7`Cq^VbWr5bB|;NEE)-3=CSa+HG)12 z8HuM8#`i3g$(5VBuH0}b4Cex}4bYpQwM^bTsh0XTxA}7uL>(d;B`{@blU$tQ`hAPvFoZYW`n#g2Xs2hRh3lHVQMsJ;u3cyQ?r8o zJeSK5*VSCE%-WXjN1eGkFVMS^zs$0}mvZupn_fR9(D0L2UK6E!e??^sX!bfgPKpEg zV*b8&7wVXg=l_a;$D|_uSOMwZW@%WkPO%;a0URKIH&jE6rWLd>?S^lidaIIi!$m@I zv2VfJ?rvTtZW7r?W-%F&{N^Ts?7J&R76mS#{(^z6PaSW!KX85+EMPpmhu�?1hRB zr0!U((jOd@PYMUL?B#ZZLSOWT>*m;7wA!#-a@ge8)~=Cm-91XsogfZttLLv=>nDka zHZ-*}dt8r<5Gi5Zar>NTaLwiLW$M0Vf~XhTT^$qI(e?B)%IGu9$C77XzJ6$FDSB2? z1UiNZ*(XNN#*LNQ+9xtwTH^9BzzQ-mM~82mug0Iw6o@M!Ew;)7HW7FieT>f!{j7Zy z14lPGSMS=MBP{yr8g+a7&W@I@4oPC0L>$d~5Cul9!RBx|O?d9La>yZRUBn~uEg%wD z>4As-RDqBFcPIYb-%vSX%};v#M6rT%@scRhbJ{#nOIs8O&klA*DdRCL7tcOhPPbpj zv$g}ze0S&4WNm!p>)fn4-<4iwB9FT{s;>RJ#+{t~Shv6;QfEJBoNx>kkOoPZxw!PF zl7WU96zyId$)2C$S=rVL8n6W_5!_VN6wAgdgHKX(n_W>cdHOhi&k?4}e^?A$L5d3_h0FM!-IxReE0Vcc!CKikXy z*jG#P}fKge4afhCC7(*Y5=yz}q4Ooz7-nz5AVsTQuvF9+=rJ5Hlo6A4Jf3nkZ{p~(RA z&1V^h17-HbXGKi5K!FG(NCIycG3iJ9iRl{T^4-9T zY|zddwAgv)_zDL6AyjfH011K(&*tyo))DeAx(|~W5ym-HYT9iMmeheH>3Y(jmOS!^ z*MB6p$dmaisifw2ZScr*UGq9-k(I<9!-N`e4<1q;Rw-ItgUf=*Tm>t(VJL5dZkcUY zq+mtg#*5v(dp+5j)Y#L%s`Bx9ia3_g?vqgaKwOV39g07tHy&4Qd2%*4mqY9KwW)7) z*bi6ofEWNBt<)=<;5;ykT@vZf1Q3J9H>VtSoly;mxF#7dGb=Z=oIZ#t_?a{Q>7VKD zwgegp3ei8^Gq5HR;{KqURZ#iDalU{i)O#LL3~xTI!JJVQE7EL zikO{bs}xnI{V9TyzER(;%$spB3A;8kU1V%Lo%vanxtIiPP3+4He9_mJT2xjhWRdLE z1MXkMY?jpkC00Z>?Li|-&*6SjD{n%p*gQAwhK9Z)9xCQ@5;PtK z@R z0@LXOb#=i54Dzy{YrmSor37o_S|ZyDJD!{-ln zY;D?_r@1p@_@BCV5A{AiQ=lQXU(?MA6r@Ao{)AJse$n5Ot#E&0wpq*fCV^7}f=q5H za<~{xED_nelm*!7XJ@QQ6SO~RvRZ_YJDZNGkkHH2Mj(5#BEusX>P&e=P!k`#>c!aR zYq6&LS`K*GaE;7JTq(5ZaUxG4&eA8R0*g^whot(D7j+%AVX}OfE;^3#B+|%OJ?sGH zyffL>{*qH!J!XHrmNUb$L-Er#348kjd{o0HQHHNGFAuAs>esQiK40w@=TJu zwH-KJ#WgAMjH#)pr;{AMJLB4;FQ#GBBOMccdpT;MF^<#9e^sqi8NoI-pFM{W{%}UDT#xb*XG^^z!te=e?+t`?7XJvIaBeb#244A&Bd?c@;{p6b2I*hx0y zSYFxT&|8(Q)$mRX7CEmCVrZYMX3ObxNoC}D+q8Jy_vT`kue5!3JaKG*kVy6;Gq;~8@s z3-yUl?DsUkFa@&b$t3(bw0;u~*;^F4UsjShxhiDFmqCTiOt{N0B~^fr)}r{}g^K!p z2J$tDD$+oQP9iL=XgRp?sRSX-U32~u$e1N4A6fp*5ckj$Xe+Glu~CpIb+_?U1t*md z2zzO|FRF)XOK$)8LI4(4^ygOoJYmIOu9~r-mW9}!uHM`;S9<*_N^a;)Jl>IKgixi{ zczg-F(Mw{+!EushFF9u2{O*&MbJp_y*@bIal8XjMBs^ZZ5jlRsnObbT95?4_+d2A0 zgmT=FIVt&_Gci%e7uz_D&#CciIGNShgZ%`+r`(qzCaqtd1P)H}B`te90$Miu^vwIY zSe%f}naZVn_shpnjAT7O89SZeWh#Xf;SuWy~Yn8a_WWZ6cU?8_t=+E$g;Tb4T znpg&VIo#UgZzgPOV$VSbb~(TMpM$ZRY*HJSeC|Nd#~XL{AI%T`C?Ef)BWUfy*P*}n zT+Sc$K4OFEv!4z?hlkwz(Ekd(gOHStsn&@4Vfu=iF=8$dZ-%+t8GO~AnMd|&?JM)eT+(OJjDV(!ieJN# zOL(4KTg)mDS;~zOQuGp+cQ!4@A?wKdElTg4)07RE_ciie`ViFLFk1UQgeEL{b~t)0 z+5S2prf=v_e&4jEb%eO(|T7HXtunoSUa`+&TQfT5~Nn3qF$l5uCRTEV~;;Ics0vTkf(vxDXML} zO^bK1l!R(VlHt*_qENO~w`PqUepz zXRu|S$;oW=My*db_LSdxN_?&6wGax0Nsc;>qe`4+pY4%fkAC44qh^aiH4^4_n|Fp3 zg835ytQ(R-`>lQUl^JDmq04mM@lZl$~V4LXP!)~cz zmN4)IR>+%?!(1Jva(yNiu6*Mp%r2+*iuTwM%#7?RlJH;Gr#}L9S*{*%Jam|7E9dig z4nK^$G^DkAIrf2cM`@Ex016dyORi6=TpRk|X z0a?Da4!Ujm@zby_Kxfapdv?nWy>iCEx^T?Y@pdCqlhbqZ1>#;(fZA$r6)-2XZ!#HK z^0tDx2L8+fd#}-ifB$4lplRatFd&&brVOrL-ZXUIVWcQ4`Ls-IFP@NQtnA4dISfvA zJV={`2jQ`r=I^bV{sNsV)%Bfa>z(4<491Y>(Rb*`m){d`Wk8x}&+63Fl)Ym9c-Q17 z-T4bpaPj_y$gs&Soj@VTclQNc;wVs5JlYA0Il%{^$~36&pq_)ajU0>tM!9aNH=^8o z^-y2Gnv4pXqySSY`syW}ijyfuCKl``>?9CwpQEqS01J|c=4%v1EH5yNaD_EfIPq?jx1|F zoq&2=r0;o@+)^UXC44I}>SA?Gy;Uqs88|I<4PqM13q*zx$)FmtqO8i?iN~s~@=^t_ zg|jvD5#aq`#0}EHR|li{TadC3^S#LgLebGOjN1wv1<1Z*8gA<*>{Yfgz<#8Dt*^)e1mxpgpF{L12*^AZS90`@=BTH~G7$>L=JIujQlw1xONI~tT?!%q)F;0n_v`>e!~f~+*c502 zg!;>?-yC&rA4EcYP6#9p2-Zxa$ILw}yWXxrh5|&r*i#7ELj97~nBK3|p>xMIIzxc_fHr9&cN&BYcl0quUf2tx zT*V46#NQ(K=5>5$4Uk7)0?d*oerGgbor{ zg)W{Qq8}k2gxiSPIBR^jmLYF7i7QeQPiKc4J-<=MNmk*D_uB=gV)7Mn?cYZB=6}M8 z4HvF8mp7m9I=l_7c7u;Qiy#OuATxnC%shbU)J8f`ffn%T{yB{PrwWl0_!Ba*2AUtK z#D~9eba_gRB1G45K(cyZUH2}04r;fnAIVjw>gGm<_y%|L2$l9J3pM1tT9*)f53+Zf z>f$=webBh)Zt18#CF(iRc#@srou{s556mdeJjt_sSH?3BMwHbEZu0N_&=u>!CUZaG zQz_4UOFVqc(jB+jewXdikxP$w9WGX0BBV>Lr&iH+QOP({&bl)({?BTmsY2 zY}`cDghYCG#TzA1Wh+?kS-O;4zP($0u15fT*zsQO-xfu16isl)wW9wF6S9waVvomn zR0H+3Klw`1R45F&x!$7ufvKJjsM}kq(2OqnwB5k7(({w5Z*YICmEp}gcou%3zF85g zNf-v#XPA$h^xT$K z`rasxQTEXYL=s|GR7*uL)OksvjhmV$o#d#ufi7znEdiyZV5+)eM(t=anA&T&p=7=3 z{1Q;**Q5)%7AKVXO@7^6$s5y;VJU=ZdmWq3I{x|&r?@0$%u;CxSnlh1=Fz+JL%Aki z#fP)`-5H%BRIkqZeJ=x1TmCM?Uej-HiTw+&R*AkgPFT~V9!4846Y9GYH0--gf^mnd zNS=TGS0gDG?-wAD+ljT2Y<1AcIrz8%i|RLxP8e+t4VM-wHaK0>v#Nf-{UNdr6nvrR zP4lKLm!fsVo~@rodu~$gBJi}4AAPVtv1`F;Inm~o#MPlK;*jUl&+en2_U|l5T41qRTX!H1d1G|ZNvOqhDS=QYnENiwvkgS^^E-+dK1l51?k_(cDTs`SI}4>ssHQ?m`++WNH? zxf|ZXEpfWI$oocw&<_E|y_%}SBQGM}-wn!-(sMQo445xNe*H1?cm&wJ!HqZY|FTc0 z*hM2e50Okf)x&6Re^ashXo?yDbtO;$o!=KhN0+Iv(Mr;zwL#r{u}Qb!N_P5-1q;UJ zg@eh(V(?knZN1S=X7dV3G2~qvCwjAiL~0zxfEP$ACdq|q5W5!zfvux|+C3>>2v-SS zHF3rKH1xn{JFt4Vcl4<7WtxsA6Wkhh?>UKabIbngoW~g`u zfTLPyWZeHGMxZ98${0giwWA8tdzJR=9^G~IvMQ39%N%Nyc08Dtg7_W1I_1=EbX{@2 zzHz?pPW3b53|Bjj7iC+Nry<8<0BW(t2)aI+s7-$zTf*;Ql`;6)q-jmIbVkUcY83K9 z{M{J|39bW?tsC8k=V#cKS)D|!HW**VYpq6$DoL6SHD7cq0 z$iVCxkz&jndH2#B*1s?A;HAQR=Nx^4x2ncPQ7!Jt-c}Bu$|o;|aHoz`AdhNmi%)03 zDU$Jd+H*Wr+xqe1|FK;m@h@u<-n@J3f4QQ#%Er;H+M6^?{g{bej{xlPHR|Ox$I;VC z1Hj+RDz_@sha-xj1JG6_^|YUs^(nE}0*KJ;!^+`lfA5jN=C1 zs@f1SUj-S)VYc^S)L0<%3U1X~ifYMr49dd2ulBtcYsNDqP7Ye8_-=s)xl2(Wa3gt< zt~JtIeTU0z!LJrRm^$~gpIBZm;SGi%fjiJPSP5${lVI%r)z%T;5?J#Z z+(J1Mxxxw(d$GM@o;WyuR%~F|#@YL2%~Li5zFDzR4CyI}tJLEv68#y!3> zA&JxW$x7bxAU`(00z0bXi$&ptAvDD+v+gau0Uh0i3WJkdRy~sYoOw}~cvXfw_7)xCB*ypScmjzz2%+$rB)DQB<;?rhJ@v-p z^w~OxtIaF0?(^H)O~tJYKV<`%sMjT|k{TG5r!k~$oZ%Paj23|)FN?`g@Tw-&xLS*Y z#CeRR*w}1_)6R=+{*Q2vS&+^>rGj#KNp#67J6~g`FvU5Xz<-?y_I96*Tj|rX_mrMA zHBMF4qfcgoaOl$T%>TYJHm~I~6F59qCew=y($l!p{cS%3>H*0UvVT6&T_t`09u=(_ zcPQ-XW5F%7`uEqL2)GZ8pSZ5lGCI{T%TPIA_Bi^)qBnyZ9s$?XUSH5#0Qu{18=a8Z z4#};^ER_m&`m;Uhf@Ho9T`_KDw?kvgSx3910DbYFI_5?;$B$H@dDUok)>0jc#giOM3W4Tb6>wXqw<#xB&OD1r)Y|h|c6{Y( z!9I9}2ON{iu5uScw~v#QU1df$%MU+le@m=ed%sme&!ebI?d^LLe>~zoMysB(<8P-m zr9;g-p1MgVmYab1MkLJGAc0M}Uhx#CL+Ek@r~`IXC_}jp3w)N{e`QRu#~$+tBW>vA z47|CP1>q{EKuh&xVN?d#hg?pm_)EHzPd6imu1jv#5=?h`QjxxJNVl@Dj8|{Jk*=X5 zbP6`38R6wLSj^`gtGUfR$n_@U9SP+D*Oyc7g(%9Nay(et4x);QR1+gG!)AC>?XmTU z2F90fT1RN{lb#cfEuDd?p{2|hcW0Tw2`QMG=x4C7=c$bdN^K8C_(fvN?cTN7%(Hr) zd=P5Gj=IgrRpIZY{3yF(yTn;&sfXbn$yj?ygToL9tJSKNz6U0wrq}# ztok!=?w@UY8n4F%G)aE5Jmrr!XZ{B~%op*U-NcW_gFCqh7^{#coUQ`NDQC z#!f`b{xp~6-b2nM>y1mFz%gZy1=Po1bjxd{C~QUgLDnqA&xsBC3W_gdz4qf>-89`a zhZRK~Bv972uNL{xb&q{&lpg>3M9^g`vK9-X*b@}@qt(ENY&)XnJ zM@MbT;f%7~hP~#kNQui4z9}Mozg7$U^Jq)4lKS)Cp(VlM$JrC;BnDiJkG}~JOa~n1 zEva1qU1;Mh_VmLh4aj~QCZcK!>XnL$#@zHv8yfz5w65XdO9|(;=S_;B&F?QhP(Ig% z?&u9;D#?xU!6nX;T(;IRIV9@7i4US1NZ7`SJK23VO3v)7=gV=jOgs^S+_c#|Lq4C4 zcWFUzN1mBO>#j`(SBrr2sIBBu^HJ>9!?OjD*nr(ML4>_F00qM=ttrpu0{MxpW=1!E z3R?5EX52y>hQE*B7R7)`lEC^C37gC{0NYuS1Hnge?_OmnYh5cU8R=fo0Uvo_W;$^gwV-3Oa|6TaF|zVZ6g zYzkW-rgP|`>=MUg%H*|Zw|lS^C!0lICh~G2CBE9}=Kd7gl3vNqfnbt;kPq9%kQk1#-$8ac`vxPKh9`!fB=G8q(m$?pzl89Wtj~28rzuQfVxUoXqs}4vd3#@fg~C%0B?2U2dF$GFZ}oH zO0-O>h}9bSK|&Su_fz9>MQVQvvr=h*9m{lh0ops@G0V{UqGj!C_J0W>kWX6Q{^SH{ z1xqfX{eDsE^Rys$ty|A~lVbUu-O8Ps6w4g$eF} zwH_|w%NB3~4X%Kj5JeOj`@W@) z37B==e6q-@%_{`h)Og7+e0&sg<$c-~p{Buu7xV^_GqNT&<>92dN ziy^1LFxDU6%bngVL|;>gKDw|0&a95HK29(1hsOOj&Ns;R{^RASBaM3V!EuUS0H=z2 za&s6-*4BK|fyw|gJ=$q9?4xYi<{A zbERs*)m@mwS-V{fIFM6ny|H}qjhSlIK|lF~iK>n&n}eM`@dmg9>n--+yWwkEZx~7y zdM_3I@<;)@7}iMdP;PDd;GTcwMj?{dHM-2cmeOo7?9RdsV3-Ghd{xc=7y|v1joDt` zlWPmeBEHi+-xTl^*=AnZP_I`-I)*5P+g2QU zBJ4p9R69KETV4rI4$6wCgr%c%%tE~RTz54@?i_);WLxcvr|(Ty&Jf4RPhynY85mC6 zmm?S($nbq*Gg}x__#IqR1`{>FB;jvcE>gzsl+LKY%GQMunz??uvnlDYx}V$awTjBigr&B^;SU91gF*?J zWn*>XY=9YESgfQ8YK);0juN>@;h`F!+4K5T7H?!?8$8Z^9AZ!(w@T|OeaBVuk7-3W z|HSj}GXalRA&e@Bt<~)nOD}dbV~f~X07Oj_>kf5!#X{ z5@>h3LGT^0izzD~2G#!)b$5W}lo@Jd^ow&p~QK8Xh+0>64M7xkSSQr z9sqhFC=7k&C+A5$q+UkOlhPMFz_UF@QEg}FjeQZFXj_jc`_lZy4Lm3_Ih{NNSLc9W3{s9w|HBUO1IZ`YzM}o|w(U z#Q|PqAB}LO*uG%9eA~Y$J~WU;XZE?ZNpWu%bH&Oj>-J<8newOL^1eJv-OPxue{5iN zP!C=No771D4QI}Kk;50%QR}*mxFv0_B=G(JA7PGht|I7 zPKsJ}-)kpvtyjlv7i_yX;i%PDuYe>R?Yqr|iS{PsE-H!>5FeEfE!re=OfAG<@BYB; zu8i#6AzKp}nbmOR1Kjrg`52chT5%-#vHIHi&93-(QDN)(rc7QufqCpYjJr2?Dd{o#MO@#CV0JDOIlsAkW9LxcR zEE4NiEiGP)nK#zXOs-fiFI&@N*RqZ!`)RdGDbvDfbOiBpS`02Ls`~g&Ss5=5aa9bV z!gRFScEYUL94GNZx)b(8+q||2&N{a@I4v0E;U!VZ1q_tZHyZFIaxuV!zO|m9C+Owu zBXz!*)YcoQm#W_l!rk3UPvL60MAMIwu zaC_af%d?=t$Bk8UtcrLJ$sdK2RUYd1jZJ*Rh9u zEN`+6x*HzDhGx?rr4)d9R2^eQTU>ODSzFhR*r&AeZCm!PwzzHJdZ6n*?LJyp z-J_Ztm_A2(TyC&(z3=W?YRT+wW3{lUFeSZpIwNHG>^$WIi?S_?-+6D#4dOo3{qb}v zmlmVoyj0zf2dION3QAY1lbVwyz814K!B+j=!WJ)|ivX^YP6Nd21&!=N_OUO) zp?QU((%FLhV);@cs>Z04RIy?2v8tM}qSxH<^srWDE&Q2vOB1tkf!KzOFa?TlLWAO} z7eH+?1+cBDj&Y*bA?r^js|bbqW|i!jf36HNU<%1JbdJ`4N?ywBRgeTBoUX)Hug52lC76&PF86yMZ7Lmv*FbuEM!YvWsyDRgMRnE_Q8wSHY3|yf zPYy7@LNBFqlxpBn6>yxNx&dUgSs?A1pOKAEX+w3DiGV|%|+7x7^l@{4}57tN9LSaX(F z*Yiv(f8H=*j@zz)Klf`%6dJl{-{!g_<3u?e}(l{MUQ7Mp3x(eR}MMGVe-~Q1X zFU9A?_e3)88&{c5^2dQ!-3RbQFP^@rZ3*3L<>r#Z{L^d#j>*G_zViR=ga8H`7K#v0 zkdp-k{wqu^b;hrR9;2IuuMg(R3*{Cb`5?>Cp*N1`#fSEf_>|jd&^vf*0U7_7YryO2 z>^Fm3YCF$)-o1DS*Qo@;pQAgE=-$93DrZ?)IAf1h^@}=cRJ><0+BgT{X}9M$XRzJP ztIV6@t%gvVY!}*dO|``qicFBuy`09|(rJ8dZO`NsM3ICMce&8>nT9??x=qc(=gD3- zi3s}u8~4hf3&9$SjP7g~cOs?yn`Ces55)|#fT8O%yO^%RyWs)+!G3mux7Ce`#i#ZP z3ZCBad70aAty-00Ij7XwpJJC<@e0BDRK={}pMDkV+=UD2-7u)-7FnymOoS`)(cV8{ zb2lO4^77R*OsW@q% z4^eo^T|%-^&Qpl?S`^Mt5qds<)pKB0NmG;d|51O(tR4G!X1CX6b8T{=^oRYf2 zBu9D7dn3H3w|_9Hsg2vGY{!q1-n3MXOY!S4X3}-K%Z~q$;jU+Hbc{!K)+J*8-HGM`>Tkv?IhyuaRqY z;ODm8%3g04BIZN-1NCYkfHTu-zHzxSpPj{!H^FO3HB#%&Es<6A zx9S;`eJLt;aS-aa?Z+rUZy^?k;Ct_M#X^a#+1`v>s>g10`Gb|LJKuCe2Ix)f`2DRt zz|}$(e{tUcj5p1X0{w_$mQ~wA34M!x>>=b>jukyQWz+1#OGT+H8HY32#`pYE&l81* zl)SRTb6Y&=$<8I6YF$Qcl};3^Zm3F_##eFH}_hI|{6o>!>)EHOD_a+{qB*GYOmV$mvhjlPWA$EHJUT zSdR3^I-IDzMjLJc4h`Rgro(1lKWfoWEZQ zfRq)u#v8Q4B@9}^*F+lQUG5M3znQ>dqJ+>?R;+_?4yLff*b6+@1m+aqqogoJ_5M8q!JKA(0iNL z+S)NU2Oj%oQxnH6nwX9e`2sa#@XpQfs9Z?o-#g03q!0Yxcgd&sb2h~-!D-7C5IYy! z-^K*CpXz`bj)u0Vkw5V+j!Fsdc>`Hp3vw78N%698JI-@>IE7Nie*OWIF=f#;o80Wz zx;5FTB9~`f*E;&97_KaR>suMR=IVyy6e>gVR~1saa8hdTO;PB{2T&zv=AK(pfKk5W ze(IY3T2_3>n$AMs!>NJP-5ag-tx6gI1Li=jp;YyoQ}!#`WuE<2FOFK1r)Pacl%~M% zhSq0JByVKyDN6SB0dO4Lyqi;*e~VrL`KPM`UbGDui-aE$E(h?}12-$n16)J4oMCfn zj(gmZV3#~~ljy!y-L&6dnwy-^M=#|`h%ZkV0$m$u_j|6B<2xLa*TJ7&yx5j1N}r6l zsWM#MNQEy_K$5(A_Upz3qvyK9pCjT5CS%sdJz2b(5@9bBN-Pg^^*5GmEGWnGXKC{1 zl)>MZ$rE35!avE-a;c$Cn+;g4KX1Q**s&GWfq$|e7S%u3k=at zyVl!7^zZ&vbO32BEcgLqcTV~Db`_=%#f7sE^9hX2N6T=Rr;V^9WPbKLMp_LovfO{S z3N+8=SydNdob%<;ykJypHKU6{xeNA;JWMJZnON z^VSp#E2Exi_mvH|cpTZ?vGohanKbvglK4X2VtX+d-86ENLo2;prcL*5y!fV*&&wz# z^7Vx;d->;QxEbfc176$lx$$t?F1EB`CbFz=wWl}EBGnP4ELE>V zo-xO~Q>Zm*vzhM^AhetE{nY^WYu)$jb6w&f$gBbRd_Up zLirn`1@BW^<#@sU&IEOG+RyEU_4!E(@pcyOS^7LQYh|YwO@{%Xt;9B>a zx%UU@4mWz)Gd}-z$12H2ay5@WzM*gCc`S~5%!oJU2qPG@dio+!;&=X1E$A!Fq^u!h zPI>7;EkZad6Jg%etaam8*ZF7YF}Y0ZAe8*mZ0~ZipPzcXcRlI2>iF57g_AZHMXp0##mDz-c5$KU z!V*#|f`HY7&-QNS4R4#OZV|(`qAhC&Pd0a)fN`+e)2b6q3Z(WohR<{P+g4{yT@BaG zy~pCjtoYwu8Ha*9zgm;e?m%Ne>Z<59pK?!oUxaVC-MbL`gFB(1lc^!Z1;$U zbVYnWa4$lk#9$7DOSY2M#n?Blsiv)Fx=gVn9uXYqN(;w>h`RyRH{1+?F7xQ4snxuY zVOSoeiJ8g(zlJ_RxEbw(OrhE}F-0h~7~NPes7`?on}vKjN6+T%`|AMEilXV)sq;s% zz9JWUPs0SykG(#V9PI|^GbnL5eU!S~2M3DnwTX5<#VNBQ)--J(18t}#gfEoHu;OMX zoMEbUe!cW|G;d0W8-;p|%obS<%2NtsAGPC#nzlKXP2Nq9KBGhtXxY6YS~~f!;q-IZ zsCE=qN7o}cau=L&EyOAi6ZB z`0(3|mlKWi*2d#kH29>`#YDe6H^=c;!8%OS*gPR5I$0-pHV%NOp1s;yUCo~StwtZW zWn4TDRd|2cq@Re0d?&=Hv~glmUMsOK6JXVYQ-jCjIeLxt`7EwgRUuteCNXTa(uFjH z?WESSg$B}-Ix}O?D)^UP&mUjkaJ^$eA9`BSH=rTv|Ez&<{9(eS$PfAPw8U2P)K0!? zS8gH)bXIn4AD#%ipW!qiDt&^P4-j8S&QKpPe@L$f$JwGTYQ`xd>5ss35vfRZ`=tpmTeWC%g-## zm>HEWm}Cdk@~4v3`nXyq@bkDel$-W`c!s)Cc4Sde7Bkw8{}{g;5C8rZ*7pn7SBxEb z|20{Bjv8%>sW7}?=2aF-*TK9a*!e8;_BW?nri8B-r1ksn_)*En*-|Ww%t_F?d2}!?SaHD_-Qx;#(`DXiL5TmkVi7PA>;}M@jeqwF$kG9 zgr&Ua?f1f&I*OZ-xLi1^x8>iuOgW9qFc3m@>=L^igo0hqy2u?@kbSa<65eK}s55_0 zE4sJBZ<`#gz&(%%_h})|Z_L-*7uAnqS0iE)g*#ZzY^8OpxFp|uStqia&#E+ZWQV(t z`zUpI@FF_zPdwSH{E+c8Q#4YR_^R8w&zmcpZAfbRg-ksAPXk`Pw<^Y%{>=*J#>!vEUp(r8TCp4O{B~(xb0(ry@&VAa1n6f2 z8rl(pwH)QhUXGS6ZOtMxyA1A$+w_%(lHUXQn?&6XLk*%ZrWR z%=3*#ojbZBk(s-u=M!K<5f#P+i+B_LB=axc;8Cgdw{;Jrb+v5`ozsjSQe}h6kAy8> zSH}Aofk5grYpzaSWg7_xG_{OJgXYm@*G&j^jTJLVYddoW8=pKo-+I;-Y;Nr0vvj^$oL?Q96qm)GT9O%ax8VXcT@zf=%(QONmE zi9n$LuhP&XVusODzOzt}RBJ>+>4-+Aq7*j&)$re9v0pxa5NGosc zL6Jmg(4=S-(!yuL_?Ap6gLruffh7SsG`le5x>$4k)JdRY8GhRZ*RGW%8hn9^b68B3 zqP=(vyLCI0xQ&H8b6qRIKw38C*YJkk$fY^Uu&X1+FyIV{@TeM zyhoVHVJeO=zi;l+^e)lDH-3;sC^Hr>F1!ARbi-iw+|+? z^{j4h`>gm^U@HYw*xKf$rt=KU1tHuIjY4q$a4+`h5te|u77o*gWDD{nT$d(QMRPh& zD-;Ovj}E&iFg}eVwY)GM>(;FuEjz_qwtbBtPK@y8uX2j)_)!QIq)_qy6n3^^;}MK= zD~Y0S7K;4r?>m;)O zUDf||0kVGKsQlJeek13d_#wHa$K$1$;bc6#7Ag5M#XZ@7}2O z1J|HBe5!yN-f-=G-)x6XZ-7x zL$0qHABBR?)zmZYqX}vVvL$fGtDTNd+6P*)m=q_@)e!zJ&DsCi2$#ufZ<^qJ^gwDm z!y5|LFwuA(&ozAiW;3jLm8wd^(%+wWi1{87)+w#@9lgkcSTlN9jWRo#? z%|05HFzJO^ax7kc=~HqlGx=1)w^PFXCCt9T9iLOHe`RI`r7?CN0&>R-EWVdRQ!E6w zX5%$e)tD0U)5n_0o0Ob%joq*V=2Xd#!!%^#JI{@RVp+ zJ$815l>9~Dg{*s_I;Nc^OP55)wZBd$13RUKAcS=Wa%DwT=7d$BTwj3I-3Y>ZJpQT$ zfF~X-5234Evl-8|^ysEa*CoK01ed~j5QQcQA9jZ^%KS|-4s8CxMf}?WAoj}qo5xL; zr)P84hv|ZD@u)yg_jBT<>D!jkb!U=r+{gq`Z!qb4A6{EIGl?ko zdAKL|B!FBN+!6vm%7@LVbNyzS{hf8`f4dY67RU*nBsSAHHAH4gw(mrkh2|OvN~|D& zYxxHmk8f*(jgBW2;%SLGl+WoqcJnPCsoQySNa!YE=)R_ena`2``?=S(&Yd>Ao?rQL z#!bm1%dl@>f46Xd_X+A5NP#sk4|1a3C(&O*bsi!bW$Ar^`g9VFV9Ck3ol@tNE9h${ z2ia8!Z5J-J^ZuFSFmOEqaPAT|X@S)aWFUy6&Rb-NaRSo_Q`E=T@LV$st=IY^H3aVdyn$(l*kP0IR)@ zKfdFFNvLFZROF^s{8QN2Z6H$ugUeO-lR1On9?ub$&9y0l%n-vc-3^{=`#O?nf332X zxdCOztjf_FrlMNC*cRdZ&dLE`^(!KfMweH z)%|KGzX6zKgY0p0VI%bHY$-b!&O`2D|6+1s zrTfj|?_cw*r+Ca)&z+pVg@B*=dA+Vy92#*1W(X|68gzl#-PN1<4fk8bOMMsj%G zZC3AtGX;Vht0S1_jti!b`Wuw&_90+Y4)yRLH)zgiAD0E!apldD`~ z(`;Eqirvz8%+i9-x)0qcz~>v4ld-y$`4r00Xo0d_A6J;0Zpej;X~*y7-S-(zkGM-| zvCPIG5biUYMdrBLoZI1qteVzx;mcI+p4i{*U+ir-3?AHdEj7x`I)oYN@aNxTEb$0s z`i-taJnhG6F=J6do~oVzWH+id^EgE*>+tcQ_q~E{B#@5Y0)jEHN2@(D92jU{uL78bLSg|pRn3cKZ%vxf!}MlY?kKIQ>HE_mVfd`lO&vNLAJ$Ap8Tc^K^&?$B zu^Ea~O8CiUR2)C?muT?(pa|t>e)Zb<^z=05Y~8@;SekU$K#S0%QI}FjK23CHzl-q9 zHU?9}^GSF`XW>pATo3UdrzE?Fhv84LvEBG!c9JIe+`Y z8Nu=fKV9V;ZIPz^D~Y@Iq{>zDCt>+e#bo>Bls{hE>J=h}vwG#R1^B!A`aB{=wQz@X zn|qidBVBEja&>!N!7~m$uV6h3gN+DWk!0L}MgeIcCnI7g2J^lM{@VE}M${$o=zCv8 z9#O@ov@Hg;n02)9)Nk?o%_V~jFWBM5#0{(h+&vkZZk@wL3C@`cDl{_i;?=K&+-Rf> ztIuL{v$@NPH6?}aBr=(?+&do$eK_S+Rc79Z4z)OC1LKLy5rt)+-Jar0G%6eGOL!n^ z#(ihyj(Gbtu`slqgN)WAtm%Sc;7XFY{;gpeH28oFG_a)q!nmoX7n^cdkGiiYK6f@K z14_ir>bkxrK=sv7@isGa5nT-}x@xz(WbgA@oK5B9VNofDoUhLb^(E*yEXomO2(>}1 z_e!*qsf3i!378E17Pah2qG2)Ly5LwCMLoBPVXy<5^@n&?u#!ia!V5L@Rf}*cJRLhz zQZxRr6Z=HNkh0mRr8C`N2RYy1dbs+ARCOD))ZcirM8zHe@{s=Js z%fAu+56BkqhbPfp7#Li~teJg)CSnG7o~E zH7afw>Lw3#AHDeoQ6Vk5bl^^6d`=tyr^#A;QBOkHBWuCN;xeFL_d#3$#lF_x<ezvi z`jAs~VAhW!)m$BDypXwpWr^q~8iTzJf{LkA29u3m3Ehj`RJy|hkx8Z&CcqvCfW0s7 zbbLN+W*G-Gk^DJKA+=gLZ1CMDZ9BC5uWyV~L(v6qf;8-E%jE?-NH|Z3+#oAK{+K|C z8M}5Doi7$YadPes`rzu(oJjhHN%Y0gCrRa0LA#Tu{zASc!_lCX6l+|rb_s!lgJXu3 zp!cJaT$K`O-W6Zj2D+D{CkjE|YurF5N0D9>piVE+lAG%J`K-4Ied_&u*sxW?@7@V%t2D;1g+qD* z1M$rtxM$u<;>M3*M;yn#RchKM-18k@rn@rjoK%PDFpW#V`iYk&YXDI*W2(&@>swF&Y{&9Tq)a94O)+k;Ox_|Q@NBnx~ns&G(QUSQ^vOiVfO z4b2yDvOIgr`Xokk($6O=ugT(vt*O9W7y{!(^iyf2IhFF!fk@da)OuVOHZDm*PD9G- zjasgh%j>af7rQ^rT`4oQgL3@JoRzgwf0?f<$MXxT14mskt(wyNV|25HOgq&K0?Dbn z3+C5S{k5tKrd;#`^sW*-E(2)Fb|!w=sV9PMD066}T!5bki%@;ni0Yp14xT3#Fuz99 z4ArFFdp+>lG3)!6^RI6`L*w|WAFz}zIl?fX!Hx$115+UcJWur4;_dcl9E+cogd=U^ zI_BPj2&T}vo&eHW*ZJa{$H{=sfhSo{{@M=PWimp^zY_RZxONHV&^FcnGPQ!8X1n%F z13f9ca9fZAWS92dk=(8(H|u@O4tB9pkHmHbMlF=IIOh$hnKu=g?Wy$`m(8YJo;grdztR6=adP)ygwu4;0pg~OOy#0wj z2}ntN)EA7T{b?Hk>P5?!XnQNk{mI+d1Lv^KR1wW&gGxa3YA}xbZ3&g2Hc=!#VBBth zW5&k3C>XUnr*d!d(0k0=bVqoYXVMLqqE zx8r9eg)UzPR$M6yQkU920!BO$ebNz0vK<|s8jT@aCJYKPOH{(>5XytLa$^JpsiGKD zNq6=GvDt>;gZ2GIL`1f2H0v`*@`2AgGHL1PZBd5i4U+5IAbih!Hzhx4QEs2xqPIj? z>gQF5nX5@Ny)h}FUSoO^Ud?i~NL`UEvT} z+_oJqgKh`I184jN=4R9t_sJ>c>*F=-rxS->J3HX9iJD6XMMsH2rgFftQT1~5uNlE2 z6&}%0LuQ-uvm6vP!wSL?uDgPjC`Z|3z8?DZdNouV0}0JN(93V~r~zu^F-OwuvY}^j z&J-3ZlHI+uY6VWy`38}KQ%ayhZhABaMb7Y9b<&BK60W<&L`I?#Z)aFs#*h-ivY~lX zQJIxY+P&i{(IL^UZ5mIEA1RYO4VJyY43p>S-)ghfNwg=z^S>1 zp4^&w(<1iggZILFMT8+gU-w@c(|M%#R9M+Ne*-B;T~?I1u6MY%3Higat~mKwS2*n{ z)y;zF(PK7*&^qLcZS!re5;XkHzrBpabykWCQfoP=lJ6`R!hF0bV3y$UZG)FKxWnc< z!O=r0c_QTI-MD|JO9dX%r2<>+asL}SJ=k%dCrE&oywmQG?swnAT`|)i`;I+Yqc2C? zHP~PDI$yZi1vIzs_rM&21fFez#Wde=+eP9mFJ9Vm1)05U!8yk`e|?T6Yg~}5{W)1H zYy0#k#KH2wY^~gyW>;~?!`lVoq+J6MFI*Al3Yz|~3A`J|cZ2jMsd}zx(pD#*_O;^;OU<;KG2?fOZ=#squhI7Xa@%*_9=^Z{r5+!K{!c` zvs$lq6*~C%$v8=LezP)m9E0kmfUtCZ8+yZW5A%9Oo#!@Dd+`y2mv+LIutvx3%?GirYTVRTy}^C-B)$GQ`NO6 z{o{ivmNyfk*tq|Ws3n6Qr9Ah5rx_EZoVvdCry&?(6@1bit2=!!VX+M7>SJ9qE00{2 zD%ZK&RgtXaPcTF&!$%U_crw}4dD|wKbNw-#MgD-9JIyTFMsw>2%Yjk>KyZbd%3zV_ zLqoU)IJt<%lEuWGhGc|MV^aQf3gFmh_p83t;G=a^Sh6dI<+xym;#>huLc&nytRL2I z5PfAhAEYys*JS32h%}FwZ#XpC4o-q`n-%JE+%$~y;xhDN_ZOOKjNYpaBiY$y6?0Sd z8lp3MX30wXC&@B3!6Gu1W^5;up4-XYfwG%JkGLqg22h@-KvA5u#?7jgpqk@B8K>B? ziOkVzPGZF^N%_N(y9Bj^Hn63rl{oH*Mt>B-snZ;d<`Ry$s?<4(UWQqIrOD>}obNwj z+H5yl(}G*yfM^No35^PXt`{1majMWdgD+K{!{us{+=){zHqWGAP4%na;i%N=CqwKL z*JC~8HOlM?Nt@WrY}$d&CEN5iGt#U((DzDknaEtng_fc~zG~Z6opY1C;|Is6i^B2^ zfrar!`T+mHO|NluMSX@)0P8cgfsk%NVBzmauW~PQee>Iz34ryD_x`MVLRgpiqmQm# zxH^Qw!Llc8kDc(4h_nv7+74F)iOye~hb3#9TiIa2P6Gz(yndL6=z)hCq1{r%!CC9j z8-WD56Mm=yCY_{}f7SV*?k~Q@lunu|nGZttp*IAH1!jv>TLKwmB@XEEFTHGUr!k)Z zyC!l_{DDE;)g1zL&tJru8yh#x!9nM5{!U zQ<^4Ia^dP062*B+b=#x)z4(vyRO_TwqzPr)3JN!~%|AXnTgMf@(PUtq-eG4v)@|*5 zTT7$jHtDTS@LlWE*KdTP&AoJf6wP}tJ3dBL<<=2aMTL(^dVbA~7*i^Ua8Nh7=qh_F zwwL*WfwWrXR!QNw+(xUE@+P481{?Q-sN}}PcU7Yqr8TseC2grEIJzx30*^t0A{BZ} ziD8M9xXFga2dg*}nstyXlXsUU)6}ebD&(t^d{)y_tMUs;6iTB)&h?J~(N*6mEk(*y8cl3sB6WDc%~H^@dyLV+6mLsT$;z9XVip~*o{k{qkk6-H{2^X2_i#g zsXzQ>t``fd4*;;mev=m1IYAr@O-Dx1PNq*vxGfb7@nefgVNc#}t8uN93(X2*-v2^v zhA6=&yTb99LKy>2ClYs}SgvM&%9+JtY*qM>bNc(lX~pJ>4B|&y;1-N_wREZ9)LXDh zR87bJa*Zc3nuCjt7Fx>B@~dY*FD~DARM#jx;~t=10PVUP%L3!dKNp_u%B*-|9)l?w z%HSHjV&yfjyRa=qe3eS#ShAjkkJnFvl{rTOxhVXW!V1fcQo(j4Y0+xUC{}F^vRmgC zRq?l-m0F;ZZ&Vq#(kVW#9YfV_wpum1!9IHlBfIRg=ZaA8Xa|-psd~!niGq>mpL~V$ zMn))o2R0DC>FbLJNA=>zyW9tK`oEdJTBgSqT;&z5ZN`E?rwSLKZ)mu?$CI~;7;HrP z(|X8VkrLAhS@K6zn;Bx1VPMG|oNfr%g}}!^(i?hzYwFl^J^b$SofB4%VB2lm=>nGE zPHtYprn&2m=eJcaVo~BC3zDw!u=dhE}?YETP_n!%LvPKHB*>4gJV`s$zGD2xY!OeEsj+|OeWOb` zvsfRW9#g3;gyqtxW*A7uzPO?@1+=dg;}E)2ulB%BgEGY7gF2BHFKN5-AQ7z%FvGQX zHb;I5o*kiX?}r$Pvvd%u5||aYbGtK8Ot|E zzP}8Q`xP(GQ$Q9yn9ouw<>CC_$ipj3cotI@gY&L>tedK?VX^t6H?9WU+=vsmhd96C z{_FxSw!(Nhz2_`Un(^eu7r6;5bn&U-E$8N(j=RaBM~m0fqomHQqCbh-WmUvM4pep6 z5BfLRvOP|C|2_qstLo%uD3LDLFXvh4XJi>yI#l)xkMi?H51z}dieQ)&SGy-(3WTej zOj2~P18pAnv31XQ@Zs<6kO2H47QqE^Pwx8KJ2O*}x`SwFAItXqUJUcuOzI41etx3= z44(Qcvyguh7fKu+a>9By*@EA2oh^K`*sg=u;mFjoFJ@4@OByYj=z#A-c*{m2vlh^4GuqF zy7h`ESJxtk2pim8WZe7S1me3MZdFMM;0?l|#r@HL(j!}x%bSo(f|>Te2VEqBkIldB zn7!tHGQGFeho7u_`-4KFLmKFJDnUSuKbRSrm2}kR%z!=vUS-5J^AjQOSpiE4TuHB0 z(?vn!j3IM2rOo&{Miuhs!OGn*anTNRykL}DDrm6ut{u6mP{MKz z*fX>=F=Y{h zugyR^($Kj-RXLr2SRV^ku%G0pG|u&uEGj7A9A=|?yk1|A*B&ks$w2z0m^}W+0m>n8 z3WWY6(?4VmW_>*CtQ(tw@f>iM$%SmHeJUCbUc~jy&wQeUj9x@LV9L@+4h+DF^?gdv zV?wgB2L9yk0ZvOPN(I4(YFS*0Lc#Ml9|UG{X)9~wB^z)Klnf>nk0u=Jam2 z3ny)Xxm(x9xJ@@&-LP|)re9FO#F0t^f}-M23-_9nTS~w90+O|S5&CkD|3GCqtUK@j zvk(5#_ygX@7DA@T-(+6_3T9G`?Y;#}X^h%?kD(}ZNQbknn|l{h1cVdmg}|*kH=Gg) zIyZh0o^jon`Hf=3U+FvDe0XQzqUj_sBpxe`3JEQf*2>P(%s%?8RdvK~YIQ1VEAP&W zitl7K z9m&}DJifzsy5CUA*q^@1eRj5(9lLPt`ErjRm}!6@<9tOy6NtDExJntx*f=EjJULNQ zJ{fWb=3L^D+rsV&H(wv_tU?77&efWF$lAVCqAVV|$*xBKJMet+5O{9L)MEZ;;E8B6 z9L+RaGvy`M3sYU2qv{~8YWsq|E=`@tf;?47pU(GiEKGgQi+ZuX@|EJ3!jTelv8F*D z2y!R3=mG4C@<@U!*VL*BXDfXZ7X$3Hx(!KlR-Q{yTy;a1ZcJYvi`9w|g}tH%z|%zw z-nx-?YiYN!f7RME zVgo{2cKE9i!hz{QGymGko#kYN5*D+htThaJu%p`ump3`eX8kXl`X8IZ11=Vh0$?!Z z_#N~uaQ7K2T{!Eyc|;*aUY!LD8mNy%58z^pvN6>iiPal;!@!{)Ja^Zdz89#EbV7MXc-C&& zgdP>I;2rv2YLjskEUF-Vt6K~dk3{-W!tA=CJjsF&*{+r+7Bxf zBun)u83EZR0CrweMKeyaqNfR_)F!#L{!ZfI(c4ipXi>I zUB6`W2Ch_tx7kKwVF_kmZshvFN0J8JQQRs#X$~f1GBuqP3%S0+%+lfvphZt z&1p(bpN~A3{&72MU@`U9ZgRr5SA|9cm$+M-6vsD{dl=vV&g-*f&hZ#LTMcvgfGh3z zdB$8z17djIVM3Na#cQi4IiJ#a9~YX;w@Q6?oP;W^5IR!*!@apnr9J&;0RFw|BXa$( z%fp3IVftm6=a4Yg73MK_Yf{L3#e{w6SKf!5qhwg?l^2_&W{&iqv)%tlNGxCB=ptuZ z>zB`DVPGb|LI;%dnHp!lVr8f!(lrmWyrfV--@s8SJAcc{(JiAu!0I0N78n+-fKf3h zU7ERd9+WN~c6~-g2sCDQ<_Ps|a~U)?SQZg|K|IWf!HS`Utnt&jHPZwWZ^T__V);WE zO~PJ-T*VKrCV~)*Nr5D{qbzQ_g;0|j_)XrAUZPb2Lr+UbJ8j-An{2^QFn4Ew=vK^Z z?fu0i(fXwj_FR4Baviwi+{NAUtwxhxlw@h7!&E;n+?N^Yl}w7jr9HtH{jTLA8nTLF ztDFg5edA@kCB1rl%}SYftA=$r?k&O!{ziLYDTNI3aiYgWnK~eXSaB7&<};huPd!mW zvRY$j3E~vG*~zN~d7lv<4G|FJ@%>nr5U4We5BN}z+dXhpIL%F-rKw;!jFrzsb-_OL z3UDF*oi9mA_E_@EmL`-CGWx`m zxd~(5B&M_JZn5KaRAqWnnDzQy{_E$x@`oOZfh9z`YqwG7eF9VY>2MDY!^tF zhmD&Km~YQ)F(va1C{BeG&lf8u*7^Uy&Idv$Khfsjd?@09Dz>hIXi5ayjC&?MlF_S_ z%%`uzX@@2siK31~rz~dAh#P#eyL2G$O2DvtY9Fd1tCfH`9(uyn&pS;e_H^X49XcQJ znW3y_30{h(!uItt{pKRmehBZydnDsT45MuRX*Jkc(!;DC3pncPar2 z@Noqm9y5~>+Q8&FKV6Pe#AZ0t1@2AEe_>i*P`>N!6737#Yet`%8^M64Z}qkY_{b?% z;%bhE4ol%i5jvM%F;Gk%O?hWarOUrH>YXcUh{#6}i4;E}zul>cTJe!2m3>sp?syIrojyM9|%B z{aRnGSne*XmKdcLI-A0#|I=(xX35m4QHP(a0cBd)bsdA`)iz05M0%YAX@qpy*j|@t zPDp@jRFSckb#Mn@_);BfY|=}Ih4km-prV0G3fc%-Hh=I()s-sE4th-SadxTe-#hiO zR9l)5m?ZxPP7dQu@T{T2cTlQN#CFA|0DiI9Y)pv~U8MP9CMeyRhL- zN;JpE?wwkrEK29mt`c_L((0CXDaprbF{lHcT@8Wygdt6J1}jsNKwt=5llrE{d;TNEOFp z+I+Yll4v>zvy`&BvzVHxI-fY567ofK(5@~{)mRG8ZqB4#aLo`#DT((3F46h~Qo*&O z7#efls0D#y17S&KeLB-0m+U^)Ol5_Mk2<-TQJY|9U@TPxl*Qgs6oC|&tHbcOI7ZG2 z=Zm@d2uMS5nOILf!G%*%@cL(Rmj@87M7<@u{kalI^4_NFdWuWWfV(CACT!vURL-gW zl`BjVXbU6cS$sAH<`08MAOC2y-h`0qGBHNz7tl23neL-^(zjID|Zw*KG}lJ|(@ z4sxri#os|!SnDNPfsc>T5?Urwdq5DtvQu;`AbvBeb9IMp=50dRK&R95rL9i=G7*vh zLwF-pbuPYnjt;ZZ#a`ey4tOp!A?+q!(e7HNxF z_R^6)GwX+*Jg;to7Ub8_w&^+=_%A}$SRxZwD-K0#qr7ygB4^UEVr`ntn8#w@DXrO3 zk;1dk7^UNph_A1q8CDlnqf@I>c-TONy8XYc&KW^!ofP- z#Wx|e@4m6PQ{^y=iHA^xkSh6~#xRP1j2DNv#F8+i?Z&dG2;ZF9iN&kb-$Smxr<|8v z4P5Jc96XJ6_pD}vMrR}eq4-79 zz1B#S0!Y?cX>QeXkZ*AxhycuH0ycvKw_w!)6)7n|H+g92#i#ccP*1SVp{?+)}H~llel;XB{tT~MP|dl z;!H&IRHX_lkG~CHJpDB9(5sG_Gy$75zl4t+6jNfA!ih>`GWaWAHEPKs5dV6Vn-TamVTwZiB}#zvK_n)V)^&QlvR z8uloMy3MRgpR=cm=LtjcWJwlAk`{dkwzRfgUyUjM&xC{VPGi)^x9TXx5ea2oEOV80 zR%~vb=KUh2mKd7vT$Y}A{BT?MJAB{Ca6ovIC6Y&j4;0!bTNQ1rJqy&bx(f&$3&VBG zdCPG8DL5zDkWzB9as2lPA0yyQ^K2TM^(-^CZ+gwM7yf0{S!>_Uwk<)sShKZm*yW?d zoj7ft;)mj?ce}X)%Q@>VQISpw>G;hfCK*EHY)o13%_P<^Ila>J+Gq`=Eq;58oh((F1N0A&0V~2h2?DQ@20t0wV+5x$(88UO#O|)u2{5z{M~q zRdZYbo@sD1@uB)MOLh|~-gSr>t-D2+Np5sJT@n=vLHYzMj&d2X!RjM#x%y7|nZ7bz z9^0{9vckCNhI5FwLzVxMqb1@xqgL(c!c# zTNi4>htf)B;c!A%KV{!sA3od@NZvT9RTigGn&bzHBHdzFq*Q4B5IYOZY7P-wLJY}_hTR9!qt;=o8+ z4L{vF%+79Z`TiXy;JGQ7BcDuG#i&_#< zYNtywSIy+ls7mH$n72Fj<^J$J<36>*aQXS#LjRVJ12a`Uqzim5$)9Vs={InN+;I_0 zP%}N?;BP?|%?V_rC{C>Wth4*QhBu&9W*MaeESo^naxK@Tulqu zbLST%8JORsq5Ok%v&?IO${3bGFE98Th7SDCV*i-Fx6A;Xb5C;EWdEU z!KISgY0&veEtGNYSW`zy0TMbwP;6GF0{v(+kxya46?kShwXqsUYq@3=P7|6{WTT@F zQv&vYd%7eYS{VZ^#9%GGw?Opn7}gtmqm3_+}cJ|sC1$V6(U2?3P!~CJi*=>H}z6K(+2(ts2N3c7d7=3jVvrG=zW=Z z>LipmJ&<2joP3^PRE7-sXU75~vk59yn1(VJjHgEG)t2lSlp~pw%pvYIJ8(WpNK%V7 z%GD*!c&6RxNIt`CjFgj>;^}6d)jYENku7<7o3tIYEi`Jw+TA~WNPBP3_b zG{CKxsOe@Mv7tbzfVmeO1!OX!46hXlu6bReoVA!?IhfvQ<$fswOQFHW&iD~+{PkNc^C|k{7Ju49 zqaN}nqxrw7y0BI)@%O}>C1CjA%+EuEDW|jx7lpvOfSXB0gT+fLMshkwEuImhaS!bP z6)n~^gSiw*Z_p1NRL-R>jV&&43Ng#|@9KW7XyRQ9S2!(^PLZ%#$thLx*fhJzTs@5e zQpFar&QZ;UX_HwBP^K3QXavfU8D%f8_y!faeR1o-%V2F!;(HweDxu{Msx}0y>$n|r3g!14-z)PIDwJ$)@b?dIKvu1=&LW4E4Wqc zew45=VZK{E3T!zRWz;HqO0$6n?f~3k8w+Sp!Ra&=1a}!4+PT03PkJqkK+G#`!Ha`5g$MmyVDR#=Osj&v-Mcvx6ly<{oY>Y{v_eVBdA zXHL)o<-UaG`hZKnJ>%1D)!AfZ+B=Cw8}hA2y+k2-DKNg5#3HSv>;2*6^e_&z?5LxwR0U*Eliq{Q&$@oE#Fu<-f zb6D?)LrgB4y1@JeluL!O&=Zwqm6^RVq{u89BST)=XpN|V-b$W)K`f`yahGkTAD4}jTp&HB#c-87{OUu!BNV^|=OdYYF4p&s$x?Y9vgp6e%-e5e~;`wxWPpcR4cl zA)nV+4P5y!4&08kM`DC$6E8L9DHs}iV(ASM$jI$!4LmNCERvscD`I`8ZI@VnGZBOl zuLssTwjEG8fx7ktxL)+(U%esA{zT&UqVlASn+a-eqxR}J@E9YKT8MM5PyVT-~D*@eZ>41HdiWqnStbCWs|s!ax&%~A0bdx2>uwJf$e6O z53lGU&)^q-(5!>$cb{gY8E1ncfM*{;Nu>9GoW^Htl{e+w^`a9kZt} zmHHD>S3SDf`BmHknw{U}okgH=&E%uP}${@*- zJ1jcYIjet^Y+{-kS@cdx@;(dx0uMNf(9!g49lsX{SiHRW25}V9#5bv3fbg<^14)P< zjR$ChDdE4G$Sh~g*Iv!?6jU`=P!w%;_elvzbr@d}n2W7AB)dGQ~*Q( zQVAFPR7ax7sERJUJ6wxG>^cNzyojm@$dtYgN`aC>`}m!MUD99Za}ZvIC5)RN$C>TC z+L8y(GEVpQjBC_AoqEl38Ry)_T8aKT}m&3zf9 z#C)&Tu(V&$v`h(DxN(Ulu0v^);Kq^cQ$%4gVVD)aabk*)$mN?h>*K5J$LF@-Ml6GTG)2;7JSVrLgb4|4EHdB`NCyr1FwzxWmLoaDWCxIzTHyRndWWCY$K=K-eX`PL)h zEHuhBrA*wp*2fI^miHOX!&|N$%4-b3eb5txr`!T_5>sJXl-~>Lv3&B?7LB z>Ty?nL3?vsbwTc8ahJB9?Gdz^Q3=SNT;}E-US%SHUcBtNi2yHu_4LNR&eM~iuf66! z0eFC-k6OJPlCef){f3qoY`B!F<<6~;4{u|a3S!}(p+(hs?~L22Jq?wlso16{8~@=Yoz4l6t9u?v*q9nn;+jl2qi{4MG2}E z8`Yf!v6pq&W=gE8T)3i#`_cDbsBwDss=fkt$ur8`h;#2|7jnxKx0m=VcaZ21zAw!E zpz?$1DLS7?ZoxI{O`QPj8Gd-BD3#uMTMd_4pm`!SA-BoCt&VJf^-70|oq2dWD+niE z&55^4`wsDC#2HHj1F!o_=z3lE*g)B~YUC6SyY}fFcz*39Ecs2+TY@ykV>{DDVuvDm zsLR5(X3Nx7eQC$(15w=x2SGtONY?=WgYo|77)l!%vH}L-KX}XA@2?pc^tE60Vk%8>$H!ba& z(y%7uJR5%~tRWC@ah~%Y)ld;qMHxzyxwNmMRE=_~O?y0rwV^t+C<;JbsAJABTDik= zZ_ym)5G~J6jTLxGlxjZx&N{TLxtjw-qkV%T{7Dtx=SN&Bg+%JlQptE;qz(Sv9Ev>S z;YEkZ{g-xnyz^4{0Fo=B9&@(+xi|+R*1a#H0~M)|g5r}b?uC-@pb++~!!*jBZwHsf zF;NQlw&^dE6HTTQnVj1{13~TaO0$L*THsDHP}Fr6mPZJD&Bq!t4<_Uta2DT1bkv`!NwMF>Nb*GsNCmR*T|l01qB^ef1(K~y^!1Rl@X{Z za*NL%|9wd8AiQJT!bj#9)iHEW*Zs_3HF$wfc!S9TG;;3;hj8%r*LoM)E41pM~gLC%>?R&+c@j zwZVl-jy!y6krB`Z&B8x`8yfUb?qE&LZs@@X{c++Cgsgx4_)3f5j+JdsW&JD+!hB~5 zu$mz&E`iLuluCj8$i_x&OOhj2tnU+!_vp|zxg=Pddf$h6t15@hO3Lda$i z-3Pk6Vs`ly$z2XPI_WJ!rYd4JPgqJOL5S%rpGqzt2^YKGLN@XKIZFg?@y6jbNBQYO z$8Sx-kst%$6x@BHUCvu7Z}0ey>d;XGpiy~%961!fU}T8M4Gcq4RqB4NSIJaRWo*SP zQ8g724TsvL-_bza=^zf1&RRBDr3Ud!ik=%dO){h!%^H=Z(;ANWai`82LRwz<7niqT ze9!~L$j1MS%<5tPF7QTb`~T0n{~6IAAkNJ%Ef#VvF_ll%Lzll zECk`opzoJQ9Z(!({t5j5)#UOjnkgl$ga(3O){O{XCwSFUUHzfZr6BlFf5v-MgmyBL zD15hd|E8l5DQ|>gla|)AdsV5TrUkJ^Dxuy>ab>T8lK*wCM_5yuO^fW3H zJ{cSQHH-u??-4o-?&d@%?cYicEsq`%eUTCqQFK-K16?gCmuXw9uF-iP`uJ;||5}83 z5X!X7M0aMvqfrS;TTJdALs|y6Kkq$g3&h`B$W9E*7`gU`s8;qEN%+zMkB&d!zqCZ) zBmL$#54Kx&ao{c>O@*>#E}7>qbz4B&hJQM#hx0W*ytvHqAP>Tcy??M){=JINv31wV zd46t2ZS4$?jZ_Hy)yfeA+BT<2UZ=nzd{624K^_Rv5}W#-DUd|M7EQDbX%k60K2;Y5#jm1V{iZzG>CH zdspq0klC*jsUOpXNoI)tl7Ncuql|ZfLT*ves$KI+zK}oDoO~d2P99bL`md{{BJrL$ zK!Z4R)^zVD9Gp6j^p{BfrN*tKq7PxFjy9Yy3NV_zcgOE zgl0O9&5%AMYWXJQ+n4)N)|vNSYmC+`Z|=IVyef{2{~BrI^T!#UXBIa1vk8lG&0Z|b z{kl<5yOM!_XRTi#Y7*p~tk%T51OL|*xDNBaOVB)-sWPc^xwyH6lcK^eov%)I4*Ftg zLvNOD5d=fw+r82S;hS6V;i4zVYXLHMEx zydm7VOm1-ED`OrKtS##(`A3?XWQhDZOJthhKM4_k^r7+PV{=y#hXUASip~*Nlc`@? z)<~B8*#Wp~IJ;$jBTg?B^Iv-Z8|Pw%RcP*ZS@_lgpZebOrexjC`hchL#D!|jg{7|WVKmDCoTCJv zC7P>Y%YPjG{_&r>Z{90Nw_N&vObPyd*&6R<#u{=PIwCy0^vn%5buHs`BR9};wRZpR z60Uu>jn(3m-j{cmaXTQq@I8apzS+_*Yzx5BtX*RRJFN^41t(Z)vbZ+gG_~ArEWBGg zr))Xe?vev~3}uKKj&6PDIIiOw$l?4KPmb%c=P@)7$4ThF>{zzPuO44Dft%}sg42&b zV1o0T40|*ybAZA#g3cs5J{PMq2iIQ59rU63f9cS$#Pa=3n~pd#wCkXQC*!dB!qEd0 zc9}gw=~CQB9nuJ|{CBsnAG)oHa?<{<-THU~UTiJNh?IGWkK|DT{VCRTS=_k)UwhXc z&1Satr>p(iIcp4^DT9`@sVYNBQ6aAFq%Ivs^>b-85*-q0EBFW@1Y^w5(z;)&K?ZfL z%ZN%EG0{#LB27}12qn|F#9bwBDcbMFbj>+)oVCvP{eALZ*2>yD@3Wu%Jip(5p8b1y z`z8_q=Y`m8_RUAMhEt1`c>#p0u!P$(rT+oc0`3p+W7|s0Je@ zGM)M*Ns;a6M4N3+t#*8Ah6{20;wwY4tn=cEOoOozT?0#UQc6e7$~87Eoty zy+0OGN{Ja4$s$lI*zsIz+Aq1p)I#x7{qiwsP7tlHW$0T@y0Z}mF}QhI*cI%Mq$H?Cx6z}gorzksVbt&J{dz#t+mJM?b!jcal3T36#rL4z zWZaEx&q$lT5yTD>MjVklLHXLYDS92a-hkQWBKU?z0;B)TOw_4G1Qgj%lxvg>7`r1R z8C-aST^RtSQg}q=^;4~0W_*?Cas;7M?N@aaA!e>qz4PUgM{TEygH2C3jDUd4AM|>= z8uVV5O|Ij);;lElS+IC-}Jdyf!wnrDEU4R_UgHQ?T8X9eZid@Dw@EX~6e zImLeAkt(zg(DpxSowNqqkFUMazkEeZ$2Hf;q_Fc1m`WONwJ8jAWZJG@$+YD~$P$t6 zcIuLNImB@!a0l`PeZEy?{5()%{7)zWNJ&4(e?(49f|?j%;h9g%S&@Nfz8X&}s`&L* zS>(-q*If#DCbq%4vGgp3u7PA?u1iByL9H5fCXC}i4T|t+@rA3u{F23|Pv-?&xIlVt z6PnPjiM|1bh`!^4fv;7t6ZsTV)XB8aTPu$d)Vd@Qb;>x>-lufpMG&V&q@`)LIB+`Z zRc1{+-dU(`zY0*4l^(u3^aY46LmuF}7ToXCH~uX*n7%+F7QJsC*HX_eZm}NpvJftt zBaG03soXU5@O*V-d!lir8;KI!2N8u8pdK8WyUG4Zp}pUmR&BSGZV;Qy-rOVll4Sex zquwmkz%vu&PpTQyPVqk5>CR#6mYPH8aqr!*&nusOk3u^AeMN2iKI01S&5|5Gu{C8V z?;RRB(0d(5Yf-e4efGVkbRugV&fvK}EleF*`U=X&BtbTu*;+)s zP2cT^^f>^$EHV$h-jP1|5LW+yaPN*k`K=hL*B+sV9>_+AA!c8WeP2ROn@>=r7;@N4 zJ%R2i&i;jCh1K@#3AId$1AW6kf1)IrUCu0AV{ z`Z>$RM&=zVyjeGJKcZT#q{m1w`Hp%h=k7#FCl;jM(5&Hgv?p9IW_7rSqc|9l>uqyC z86!pQjxv>s%(e+eFSt0@N_goL0hYG1or4_wfp!1l}FNrzEu(^U}moA?FR zfYBd47baV`m1M$@z<)H!4qaI|crD8fz>m0>Zpgv!UbY0^s@{`WGlS96JG)+B6_a}R zrum7>8oE&Y9b)S6DYAqb&og2MO0Y*K4i6Yjm^zB4G#Y1qhg4JM3=z*X9F}oxZE_Xq zJbkHCO;d~QPdrRhZ4W3xGh7(^=}V2y3>weuX%rF$-F-C+$M7tx}21O)x5?SQWXKCYxNu zYRXckv6d5e7&K)U$S_?tjB_#+IxoV@!A0v?PYd7_eX*%(j!5Hx+^%2Sc87p`qxI(4 z$b&H{Gygx2R{na|BKAw#0ww$y2^uH!7&ggs>sSAgSEq}BPD4JI8!LS06IVwP1Wi5bDOLYPG#; z`fbYc_lK;>Sv^iXZ*s2K4ofy%up-<~`Fg*{h)damF-a1Sf?BWO1QG-x^UDK9>X|7? zpq?s4oEmOlg1&o5bPKgw;AO*S)~*Ga$ObAweJRd_l1|={ftBvja5?y0jd!C9;IU}l zJv^T<;CM3Gc@%?QTOvtSJf6EKO)0V$?K+MLatqNVH#8uPl(A~YPy~wL%I23MfS07M z6NY5C7pvj=o@rq9Fwk0;522B3C8B0I!xX=Dtbtqf6_e^mH|tGZnfs~ovuSt0C>hWr z=sLyRRvxndU2TixsPVqvqWs%V{=o{{Urrx;^2{Bi4Gg#;?v?N*TRSZw_qqOYJYc!<<;!L(~NrlMkq^Rf@Up zQM&M%1;$u!ASTHrG;++M9!XbONJlN6Z}3|pN}QtKHVJyyKq&C2JHj8jg-`yX4X>^QBbiq}FA@VbKaWwJxPO{2J)M$m)B7)|vPt zkY8&?Q@%DoN>zC`sj?Rpi&Ww>UR<*|+QV5m=%KLV*bQ6PFB%Fz6YNH0R}aUtZv3tO zQ`fUoG6NRJe_h=j*OdR|P~*h#dn-|Gq%;qbU0jeEVYvv%z*+gs@~HWEb}X-ygoKFl z2?(K+y|*x;P#RuHI+3sfKH3fo7};!&NgECzkFPn#>3Ze`%OAe0HP1Y^K9JnIYyycM zv!={8zdMy525|!9(l%VJgtvVKM0p^aN43*&=1Z+P^XVeb5rZKEuT{G^)VnjcRBP>I zGE##j$NwSCAYb|=suhKS3J*4DR5uG?zk&_BNRaL;~- zrj=h&-pl%}1&<%yVko09)w;G_7|NZqK&x5g3M2B8M m_!P&bT6Nogob-d~uIN67Uo;Ws*)Lb9RW=XO%C9P%!*=~uT?;AaLg}Aoa7Y*3re9$0v8?QG4TA zi?=KJO+*-nHYV*$^rvu8#+A_XK#!HIr}5mN**(nOx#bHs9K+)aO&k=gj66zU+-#>S z36GICnVK7(cZ5DrM5u2%++wjy-)B}W#*nq1+s7*0_$hg4PLUmL(A9uRac+7N;CNC5 zGb11ca?oB>lWzGE!l*1kuN+OKj5>ZG zcK{wB75T0<-G%^7VZA>$?mNJb`~<#XY0EE5Fb=Epx8wqNyFOCaq29^Dcj-@Lo%%1X z^8FDi4*-YJXwht2n9~Sm7)2%@`0oc-pd|=e4_A4p5uL1kgo_4ah1krXm~>|lk1)E=d|ajFhOn$r!C?38Z2?A=;ybkK<{U5 zQuo(Br#;&V^oJ+K-w1;UMO-_m5e9#YC+7_+&n*{V%sS8`a~@7~rb3)<>huW-_{cbU z8R(&az#)hB9=*$$hcBW$>(&ZE$=!jpwRwSDNN}QSXHoj=f4YwbJ&SE*6<2-Z zg0m3CoKT%>Zrt!^)-rsP31qL|3L2XB=p(v8SfC5O5;wsZLo8V#rmz+rU-2hrGB46| zoRx=jgI5{4_4i5|MPNBDVq~Mnd{Rv_3AB*D0A`S)Ul}0S=f?$8;KDIi`&Rmyi+0gM z>5b`xnhlylGayAkL-q$+wqo!_tab0~%g){NW}JH_v^phW@`^d4W%fMAgs&|skqE!G z?S*|2W$%+mYOY0>Owve9J~)=;!f%{u(xf(Ri^KlU>{f`I#da?5a%leDXOVx~rx=*} zb)D;=Lf=WOz=@6-ntRZyTR7Sk4CHJ}4oK7HPVM377GA|OZJ!DA7)Ab1xrPo+K~5(9P%PqLr*AWStuL)yZIrakmDop zr%#dS6yod|We5EAH2kf+UzU=EqZj_~^X|`2obGwG6UJ4yPC*TeJnGrI<*vU(b)er? zIX)}8IS4ZJZ~fZ-cg5p?zi3)jp)z;#m+hVK_jAna1KeR)V6qVYp#-rjruqP|{xke8 pfb&1b*)_lZqow{Ip7Y*T{b&hB>&h?b9|4~Wb}r}ZzPk4P{{mD_86W@v literal 0 HcmV?d00001 diff --git a/doc/source/data/images/air_batch_prediction.png b/doc/source/data/images/air_batch_prediction.png new file mode 100644 index 0000000000000000000000000000000000000000..7741431af463b5d527320b2a634eef2a7990f621 GIT binary patch literal 144664 zcmdSBcT`j9_CE{}EMP$eQKXHIjTW)cn`1$xiAWIwK{BXxLI^E{=5>dyDA_c{7yh%ga77@z&D#aNHM^R(5*8jX9NTYIMJmW!oX*F#Ko&70s=vY1q8zW zBOtH{d=)k>Ab>tDAi!`G5YS2x5RmjvE;rW!e)x~G&83?rCISk;`^^Hvg1ZDZ0Ph5W zzXF0%0^n@GJAuoB`+vT_DtPp-F%SWPdoBV(e~qyPUco;Xfq&pNKVCPa3;r`>I^^%s z!tc^I{QZ7|KX}=t%#A4EWsCR4>%gn%Zt$O=Kw|P6 zRtfO%2Cov(3D5%GdN}*ukPq;1_e5z0=pI-dp#{7LzlI)=UmfD>rhCA~*~1+e_alI(min7*VkJM3ibE*SMfio;^lJ-s;a4}2|a!S zdg6pKFhUs>=;?bSK-m*@@W&*7&vVWhh469l_I2^{ln3X%;pBz()je*zqi#s6zF zKTf*(EpSvl5gq7HvgwKJbNI4JK)^uY(z!F&0t6?iZh6kPu=Isd3w7&Q`vw*7h0DQp zrYObS2Zvy$?T-E3Qc3EE&mFz0DbbZK0zng<`m=1SWhM)+WjT7i2F)mK3)Sac$4nQuc}y2qGIc;#VOt0|CJ`|I4&-^o%TsHafe`?_vh&yW|sEc0eGT<--Kl z_^-B0RIE95(eQ)tfi-^zt`TbBwR$_($=*P4;;?8N}z5sC-fXlehuRM zAXuGy$j0><;1K4>>C!)-BDOYz#0jZl~n^AB+8a7Y4_Z($ zAUsD>K0%j_yDxffJr)7%Z(DEfu>b$FKb-A`zI(*9MUrIJibF3NmAD@*@f~{l#8{M*gL+H0wCz6C+qUqVt^&~NTpLoJ2-FIodE3Pxo3v{nb>je93{6zeH z-SL@_llz)`Ok6WKl5&g4PF{xozs2LnA#1ma7WN8w_(j4E=_X_IY?^rqK}zA$^Cj%j)pbXlPts_e;TbL@YrzJV)dv-KNCT z0gnAbQ`cX4;p+C7-zLLga^`W{#otHti8$()PiOo_>mVdvgexr68V*@fu5B!dWi9xx z5F2Uw)jakN>w8iKQzjkHME}gf`YwG#v~3Tb$f)kU%DwR0dUBgBnULdgfz_H<+ryYF zf2~`rzd#-;i1wUe!!aS{P$ovD~b>z7$m(g)Y$ zzIeT{@H>APA#nJ)%P3mq*du%FrQeD9A2lxJX38z`LZ$lH)fJw5$6tj7*cVb#y79`U zPW|P9XOVOd>w1O8bwy)e_;Zn)xA6KE^kN9b1k#tv{fvc{+a$R;mqfI?1UaqL6}_1b z@VaTqNhzH=N>5x&aad&C*{}}t9hHS{Dm+OP-OMzS?tHELw$rkm^@?BYKSeR8_)e9K z41ckx-_3(7>u+-7VUkh8Z;Q3Gsd2wgKi%}Es+0;H6T$1x%sCcf%)UFvBey!Qvx-3Y z`S9mkZYu6w3fOntw39krqmr+{Ssot^FVDAA6HnvOh9(APgGe0Z?is8<^;+=Ei>$~S zJTKA~eIIi4hB|lQyyDo4o@P^WapY9bdoM$X9u^~kN7tpj(q?1JusV_;kad;d;UXN ze{_+Rt)f6m=Be5|4y;{UCac!VllQ174acQmj0h`k_kwLk%Q?HVS~*j9@}Vz{3tWKMD-?2f#ZD5aZwO6D2(21T8zUzs#m4V}%h(6k2 zT11`5??W&aCMA>dX3}1};BtjCZd!@kA*+665*qPv;9hnm_fvdR`h0ZNLUFubB8P%< zvn^(j0L!Da(SL?jvsJi6V%W-YVBn?LSC;z7=Cu{r%Fr-k+F zFkdOucPOg+c>aB|+|EHvlZ0iM-5Jka>sX}14v2+Bb9Dt}C5U?$>bt~vZ?FD(`qZ*1(d$c;e8JHoDeX)^kaHY4gL2zERo^~?hCHO(xL(g~ zH_Sh0ZNMw`Y4aKtX>U^9)~Z;RpMPx*>By_zG59fczzA#b&~Qyd7On@{Pm*G%$W4#W zFNV>^BHp31vvQG^*ioaBt%Fy!42QzJzs;fUz5g6=-s9Mljqi$W?Q%=f0?PG2PE}=* zk^-kNE_RaGkyM&3k^g7-!TKFd0?Q{qHO}ts?U9ofFC(LVUC+@ONhH`YgcLRPO4W<3 zHY(b{;+=-Ou|;p@XPPBd%k3BHe1yO22YcOskK5?K$xHVrzX}`h)tTq6bd~b&kA03D zy>4c!3#I8-t&}iuBJh={9pY*8;}wmTgM7si_9h|zmoF#bn740J*u1gDwZlJ058+I$ zgcMQnDCQSZs`7^VJMg(|wftXuIC3tFJDk0*pP7PP{k9ObgCF218VwtQQDvT2y)2%( zU@XEZLB_tVEEciP9j#f)(H5D)$Ucqems_D!i`Ma%PDZV-;hNz&+ix->^!bLR+k~}!Gq>guuv;D8Xll=+&psx9xq6N3F!Kn zSbLF5N}i!{-H}t!;M?-q{M(~T@aq}WxO*+wHG|2*Mu>%6b5F@0;&4&KD>UYYTZVV} z)Lr{xCtEvs@KOQCGVNZY#K1(nRQYrn5z<#NUFLV2tIHkKDylMx4!TK9FG<#4$XHP; zBn2d~QZQ%M7N^*q35$*;o_2K;lt#LtEJh{wWi@=c3*R5+=Xd3L4iuGqYLvs^!1^7| z2@c{9Z7SBcXBAA*vn0O51IBY6V-oyZB6;W6p8E^rC8M=T7@Pi|UVF8dbyz3CUGRZklO>qA}J zMHc#i)Avj%_U1wN8+R5$?_4vmgBxeZ)}aStj&9C}s=zMG50K{u?eN}&cxI)2FSNq{ z{pq_DImzmDC{-u;?MdsoylbHn9bT{t#(yZk&AKuX^(pB+K?t zRRx7}cEM-k+hXSni|3#8%R1BZ2S}CWp1z$TQ#DJp0Sx9gbH`R&d$aWn(jQxV6%ur} zI^}p^LVX<_BelH-K7WFSNQYfim@~XvwyB(>d#mEH(1WvUJM8h@@(EOuYF{{nfYaC9 z+dPPx&VOps8*bjefr@$H#*7Cf0>8n(MS1OHuJ$#MOdW|8rOCDmIZ0%9$4R{o3I{~6 z!)@tH=cXopa~mh-*qYJ}1XJc=j(%MUg-ganxL+bmg*$q9o8*;ReSGV3YqjJbJJKcA zc0gK+K!WoC7ve>%y0}=6(=PE+@f$jG93$P&W0k)kPt7z})2c3rtj*YV7a)5x3SPnm zbNj;gv3SEK8_?Ij3KyNgFvqr8zFIz7sQ<1}SO1CaI^=o+BzK2&JcJ!OE&CEZ$sXEq z$K6W*P<;H|ePx%G$Hmw!@LaJ-C6#A^XCkR21|8JHkEUAh zUQ~yGsK-Fisq6ZE~uygQ2rg0MCz z)K}^2M8A^2*x6UdB_IyZ^P>X4NV=asA+h6b)SBtFV+ds&mzVKGLV! zI&+I!Yn%d7=M z=$Ly3?3wRPvZ#Sbou%G)^*t@3`l$tn6pN=Tm<=PD%#!>uXOzm?YuX?H0d-pvDEC{W z?~qmolaj}!{PAkZcUU0Rk_n&8+Zwy3XcyUqd0gn{2WPQZtQu~Q6NyvLCnPxxr~2q@ zSxX`Xh02c@9IwE%1-DGkcXEf+KA*mA>9_RqExOMw&Vs%UQ$gJt* zZ>IqZadbPMmmTzw^ck{Oe~8ZSN7!LJp03T+^a_Ac6}(CEQY(pn;-fBC?4!f%y?Ws_ zg{80o7)+iQ^ySaJs@{4v_bs~~w+ar66W0cKa(#SK9D?F8o z^h=&vIV*lNSPX@+Y#??-;P20+(>QD*JNOVEx>gP-tDsEYfCw> zfN0^Zl0zTYyc%3S(+Y&a^rTd_uCbNX?bQNoR6Y~49ujrD82CMl^V$2xn(6|I#vb_u zk1amy?Jzt?0w8`d?hny*vCu-m>KHv*Q|>{dW}FX*PY(7JZ139oBp((AV4Zdm((4`3 z(G|o(r(S*eb8Wc?=%8e9d|=ntb&qHPI!jl%ZIXYiM<+*sQ*rWZ=NiDi{$DsL)?6Z_ zx!w_LyF>syxZm*o-Sx7H%{GozqHd3C7RHjQIvspJpSRTPVN0Do!}q~581LI3pKp6S zc5sv0_%QMGIp;%EGuu`wqC0TQ%6+=5S~O2nu1DP?$@k${P6gc@72|4FfTYkUm2WKG zRP>mm=wrsDuZv$99mt89UlARe!8)!g#flj!NSG34FahUkJyfoTW+Ru)oq~caM)XIB_e@4ZTJNz<1`f--3S_VXyOLk^ce+V)HPj0j&lq@g7lo+V zsY=u%mixkjbUr2cZkNL)8cki!F_M;SN>v|w)!$u;H?zkkhhFPLyBCF(Z!_7%F8s6d4@kmfAVTkrJ(juH)lFJ3jMX(v#7U`}Vv;TRqp!7A6ZYB;VHE}V zo2Z?m8Ex??FIeIsb)+*BMuQ}x<`VQykr`gEr+zj^dcaAO+(v#8LddJuv(i+@k-qlp z9BzHwGun`W*1JbiASb(2wb6pwUM;Hz=JmIw>BCP4KIGNYZ2T~Zxn_yMspjlzPwp%X z89e#4B30vXD@}7GpqR8wNZ!vZr@*TB&{_L&x;6d5G6X7u@f}G2QW{7>v9ZBa73w$h47)z zCcu&!DyVcLrVp*7T*J`ah}p^W9t!Y`ohtAPSutGk>}~V(v#Lr9M{%P`3fh70?*4Z6 zsm!F@&Uu@Hw@(TNM&G<$y2sPhovr6+8XxlAZ%vr{*8x0XB7 z^0+l$joVgCQsYMRM}i5`yf@MoT3G`fL2dLP?(9M*qMP(}hG05c1c>EqS&dH`naJq) zT5^L(ny;LgDP*=+UM85aV5uitu&oKXW}br@B`j%Vs&5P+Xt8m+_n6o`%CXki5?}#q zRtP>`124{(b{NUv|8@6g-aGr%`J{QT-VHHs5Xngr8qK6GjRnIF(XHx4_!PUv9+VON_ zRNBcg#?vJAn0#%+r|2Jv$^aJN$5x~<88Ng#Kn3YNgvQIMBpM8U zx@YMbeika@~)q(%UWP8=5w%2tv^yumel)D*%j57;JD`v!M)fI zB{mM9c-h*3)o0lz&b36#S(U<2P_oW@{OOW>&cX;05vYtu^lBpPlt^x^u}Q#?w7Bvc z+?@kXfrkpJSUPAn(aohpT|LBx>eEsMo84N`(_7L{kD|wn7MI1`pg;5=NEc{YdxAr3 z8Gld>-J?chD>>z}UEOqi#-$<3{0;Q#PEStSA_iR3psCt%go0*i{4|C);3#imPl`<>7^cNeNkSHXd9hzVQ~PvQPy@fM%RjX zHHfNr4^_(3_@C3eEtl)D?Ba1!S$_e$(LgPI)JJ5&z&1RHa{WTTvD}hyns}VdkjX5ZagWAIB;)N zmZoQ<$*~!|Jb~s(w^CU&4LuK(+$b>wkE)r|=*i7nU@n$<+3UF`_ow)9Rb4Ad3UWPU z`lpolh*%QG9KmQH>ZLT1s!i#Pp;OUP`wMiZm})5&WN{o;hA>E;Dmfe&R-iY0D2HkG1TNc1HhC5_-D9JZ zW_`Dlwao^wd4AZD5P#QM6jGDp4`en2e~iAupG9=)O%QnVI(jvW#}>)~^YAIQTirjk ze6h$YXcE~N8*HLi%3w5n&|_ukSWlOmqelBrXIbFw(U-Lh-TgJk2x(1?DI+j^Zj-K8 zKWkAVp6LW9Yqk~Fmz$58&wlWWPazQz_DUo@;`hR8PgH6Q;7*IEBv^^FMeWo!0J&(R{s-2U+~id-UZuxKW@|MVg>hw*3F(ek_K#-KDcBAKnJ3+dYEzk_>%AYFO+YS|70ez+#C4DnDZC|<@0UoK zv<`&*Sv;QAe}QdT@g&Wjs`H9L(lld9ziNoq^6KQ6(@~g2rVf5Tx)sUWmUJ@Zpl%ET zsoAKQ0k7WV3F!%>bdp<>{VSfPf7)Cx zTkqa?$~z+-v8`}H=Vr1Vi&^<8A!CkhL)Qy^8r(scD4Vk(Kg!LmC^5%&~|E)&j6s}PZub7)= zwdF&|8;eN9+%6o}2g<%{&R-e%wwFsLG!d8e3V2(Y(`xio=os3A;NW~@Mc9hOqEWCu z3mVuF4X!`Fq})e3UJ)qyaz7KEC{eBAG0hr8`h6@`r8}cdg$(DzreWyIXjG2!U{nn0 z5^+IC#osgSEH)SlLBrs%%TrE(r{_mBb-5r+^)K;sWjM@~rzF422qX(Lc9Vy?ha>Mgb@SMz1#0KFLOC z+R{>B9;#*f>q2m{c4^xSJ(tk#Ks|00aR7yOMO8JirV*tiW~4KFf2slilN6woe znDxiC7I4&-mIti3Sw78{9IVy0FBtA?A7wf$;I*KwkH42 zdW)2{u(8?c%z@Zho>%c*iW;7cy3Zy|h7h%iM;Z2X7(ZAxmidJD0qHVy!@~tG!=oSI zqK>Z~Q|}%_Cp52fAvu`U+r}L)=^dgya|?mLBb8CfPkXXx6#aO5!y7n|slVsSf-MlG z^+-Z8CkoXNLVP=|U*e({vLboTN3Z1az>>5RR*4+F-)efMwVO?4(GZ*IhaIsGdy$T+);5lLHo;Fn^ZEJWMiRMEDPDupx z9)2X2|GnzdTUaDOMj+1e9>H@ttPlA;M;)yzHX0nK8IVGKi{i6 zy*FFVlEB>rmBfSwI9@9a=isO)6q1L*O9MZ=z;x=WnWXVQ)F`RcOpau}kJMioI3k7n z(l`1k!0(RA(%`nl>_3Jo&PWbEzjJtr=h=Sl^wT9Ci+wOF3aUF7NceNAeKY%Qa$_H`;^DPce6_63i;aiFC;?L$fGe-gjnek2@R3hQ*;tIun}4u@ zvheO|6aba^8aj@6TyRR)@~M|EhV)*>M+thwo3%*Re^DP)atL5wo9Qy!QJ=F`V@m$c zfGM`R2(~rW-03Wi^7I>Hg9_3)pgl@Usc?Rq&^9rK!^`cq z+T>>nir!cjh3LTg&dh3+yl3ii-rsy_aLQV}3{t*E`R(1YLVzey3n;HWn{cVew#z`R z7h*pV#Odz>Z^2FjaaZA6$Zn65FC}xI!r!)9PFs>2`R15BeZS13F>PE@2Rm(ny2SCQ zA01E&Tn!NZ2lP#sfD}~vz12W3PS(cRdmXdB4G)}Xa5W1msI_9@JO|nNVZ(<3Trzx% zaGY{NzCrPmJ3ZT-DjaeNY;)eH@3;|i5>F*)Y5q+vQ zGA$jFxx8ck1_PrSF{|nH|KQEFFT)T@gCl5y^NX*XqErNUttfb{L#*=NDjk~Wwb4`F zQ+jwVT%tmtNp}r-HxZULLN?bE_R8c=(lTc|}6QRe0E^S*FMe9~253 z+Ni-=qxsOrQ;>9}0}S(g(w;7Wa(jK}ZE>I_ftluz>5{aePc=6wY45*l7wNGORjZNtPZSDlNVZSQ%E!ARR*WJJ?C1Dz!!N%d>Z|ep_(+h&kmzY- zYx=WbvAMyV$vyz=UTkh_vqt0fLqBz&Z8g~aZ_Q}858sdPy>iq$9|s{bd-9pFG9Ud0 z?GLelSF#Jy$@ka2iBWsc`J4xrjfr!`R3_n4ZoKf zQTk7t--Du<9t|q=dx535(OBtWD=>q8wpM{}kC3$fC7RNNssm_~B!#7f-?2x!jx0O3 zL$A~A&wWk(_eJ5cV%}m&WlfjIPaJ`4ZcYT1$+Hz|wyA`Fm5uDzbCZWb|1GkGJ&>3J z4S$MrenHeM7!tW1E3BUQBvZ-JsJWI>;Q%BT3k&f$PB|r*U*hn5je8o}&xh|-4CL9V z$jM((x`31*S#8w15)RPM_gp#>2(DUZ>22JAj$yxo7fSQaxmxjZOB}Nx`{l7VKX#+| z8f;P#`C#+OewtquX@C?b!8ZTyKa&-KxgZdpY(Vq4;PbRhg)jOdhCgDNX2aRLR&6r; zO!%qR6TNq&K!V#rqVM#HTqMQ2C}q_#fLNY-V9aw3>dgpu%s0*-oOl3AvK_ELzc5%3QN^$3cgR^7%!SU;a(*cH>$g z{E#v+&UpSy)lP%-Do`9|&b#iaxe`nAlTjp^5}Ra=)~eVR9v#+baxyV*ZaFP7^x*|4 zHQ`oUP6h4H6m&ECAdgn!3Fm3W>;AD;rPT3CEg^QV5i(`A&V1EiPDuhvzUQ99YnMng zVNx=W0W&47>hS-z1GGcMXo>{NP}IS{XbA`}q{Gu_))VPd`aeDu)Z-4}`vpqWNBaQ6 zW5J`DSlN(jq!xuKdU~uix@B!b1WA@1HV>KcD-WB&yD^Mw0I~I>PHEVrU|zgwVY95& zcd-AxjsQdyuy$fbU$Q_qW+R?(G(q78Geko$WY(RX{ zQ0HEAE+q?UK^8T4#xBKwDDsL=CtCl@iGT1tGd2KmP;4OFZ~YKwhHw#ojoTLl15msfj!)M%y!IqhIi zGYof^Nz?YnbIqxJ?{4KHMrUVdv84JEHKjz`0UyFh|{<5tsd(*-pJywP6{z~TVv9}uz5%6m*_B&gPAfLafPZfUME zpvIjzCcu&p=x-1uko8exTol;?KgSOu4;0;QHFdej>=kIlQi#z z#o?Fq-y62CE(Hgdf_B_E<65klJ+=H&S}l{`wU$LVZd{iK%7%85sZ#Zwta>xDzKeWtm+sN*so~NMBO+nB!Ixs& zB(x6C>t251_fzA5So0g?Og(&TY*8{$WBW-(?w@E-ikgmr(d2H$qf_y3fI^=bwC{#i z5g@SELgHb|?8ooA87`Z0iftFkhONpj=Y1uY5{0I71^o9dS>a;`Eg#v1v)<;FPkCLp z;ZudxSvvul{n5JPs3zAwMV2VR7f2E(3o z4~T7S@X~L!{dEk7Z$To~S4&%lxEA?hD=mLEa$agM8PzbwGRX*vXdP=R{#d)NwQ#w_ zx5NJ>jfHU#+vutJNGAn{YuRo-d|L-OI=EcVRZ#plbcN(FHqQNKg0q+N@}Az|z>;At zQ<)fNutBp{Y4cH=pzN)2vI^GM<&;Z>G#xxwUUYYxSnX*06I-$%#m?K4N_H$9$So~} zmsIy!;m57;-4r3>z|!uzs-JssgNh*zt#IC}&qee^0{Ncx#ht!D0+5Sz^<+msTm<0kjbDUC^w`?xI~GTfwH$J=FS5RIv;!{%RTnHV+}RHV zR6k-{>?Fn=NO6aD-v1}5seK~kly?q@kTl0$E}rUn!Wjii+_&EW#=XQLHN>SsGA9g{ z7I)A=>u`zr4@wv)qTvhdV2Ls-yz<%2u!7Ou&+#adJolu&U1f>epcS5?V(nE^OnLJ) zU(yN*ExGz%R^D*}5}EM<8vc;*Jfp33D@ZnXFrZ}iFd!WhP1i`v&3m?31E7V6R9}GC> zMbK^J*ZMuuvX9OWIz*r7L#z}MOSmBDW%9kbZ7|!) zMKK+F2Qn~P@@u?jN>YQ-SmHmPsK>y7ARgK?JeRx+STf-<5DL3f!yR6~kLV$)ku}pU zZ&48eOuMOpAoEseNO?amv73C7J6|?@)jy{VhH_!>&?jZ3v0RytR`40mckO61YD5>zbw7Cp0fIn_*aHcwInRkg(Bm2iW zyIdhBM_z-Qns{lPVu@GfO++s1ane5x>K{dcc%;D+mL_*z8MydItXD3X#Z2Md&847Q z2ih6|k5}un`L;v}N215jc^7~zezO8-_dM6ieP|h4Qdr|>9lE#d|K@ujGH|Ew?bWdU zI@f{v^kRn7aKFdIUfdy&>n{az3tw$?7TO~mlcED(nr&;lmNvAUA3eIVr_sjA{o zUa4BQf2Qz1GILYN30Ch+vJ*#`KeuUW)-hEP7Zi%SzUs!V+8eS)C(3)<$dhVvt8mIL z`TH$xf{>@80bWyCAwh4-_$f)E5&ci?PJlA8YGD&LhVRuzUWW@!d_M&{NCr8P7Yi5w zKOosEh?uymeLuf?ao&c1Z5hB#FW&P8Qb-vIjkzRL{f<=4x_B+WxdqfflVZB^MGk-d z+)UdF%@;}LsoXG|8Xa~1nH)Nv7iKseQsun#dDxUUF02aTPscMLlCjYDgc?ib>T_U+ z)ax`9y;Wj4Sr=21kd{^uW68btuf_Bq`A+5T;1JPddd>pZ*CZ!?kZqbke~LdDh((;nT1bPC$d8ngaPV&^t33`UgqzB89ihXqJA1Ur2V0N;?)} z43qlHw#r|bidNoOZ!V7bCH2o8!oar>*dp*Wh9A!YC_D-{{VBJ~RRh%7n6n{8$fR3(0OYlFlT7-%%cE$k7RTOAeAwG^; z^_Mqv6XZfK85e6@NasU@-VUrhB);zx_cq|o>4g5C*mC5f4_@I1X}-qE(=3OPVri!6rI05dsHPfK&5w;-)I z7o8ifhquPpWM$d;s8crnO|W)TK@vQMt@UfS%`$@L5z?UdYQbjV z+oiP@`nuh${a;yPw;u@9xUiH4`0g$`}1;qR8)vt#K? z#cEX5zC9c+r%8_4|3yV?{Jy{d1pW04D(SD|9q$I~V)b@ZryR~y#e5RkN~-5T0$al%uWHx%ev8n@P}mp zE`7e|K0%yxO*-`X#D3ZcXyvznpb(0ljd`$mH`gzgxi~uZdOP5Et%tkzAGyUHA^Heu z9X%czr*DA%MnENr=TUY|&+qWd^AoI@pH|76K>N=t68j*-&jWSV4>fDf^HJG{QC9m4{7mH6i;$4G@ zZ(62UpaMPg5%F)Qjhva{3Q||V)qHp_F9;0ZY&+} zE}(1xw$*+O028&V&!G>xb2$luXTIRh_>Whj| zD}W-asu$k3>q>GxQi6_8%Gdtl3KD8`H^{W6lZOFFpT#MHLSaph4gx+LL5dM~SMOM+ zH_Nn^)Xse8KC|q5$KOf}`pX{QE)5$|h|D;ui9=6SV^{!f#XgDxQ6(nw)bV^osPqTG z&X=twX6>HgWCz}%f#@RMPLw}jm8Z_nLyz8^7xbC6@8tch<2S}ZhlkVs~(5Y2Y_P0YzVh(zFj)o=VSfu zn1MV-Ep5N`-w_j4+bXU?jKcl}s6jkcFzk0S!5;ZcIqJT@SSw&7z$0 zkE|6JsYEh_lx#dy6YtEm&}h+~Q< zZ~On+b5OT3z>I-aIO|AmR(C5|if-eG6 zD`bIy7H+z%5zvF9S<&~_Wre4{U* z4EI1~hyl4Z!UyQ=3p zJo38mcV5bdKNL7KxmCpSC;J|XYB=WajRIf@S=_B9HDWNJPw52E{k1+4wc$B4B;6}_ zku9OrUIWlwZiZ}k^-?L7A->+~s#pfxw-a6oOjs#e{P%RHKmUyn!VHY^D(8oA4X{2}}I7cqw2O4dU8<3KS43ZMiiv4J|q zDSvn^sA3tLlQoc)?YH=g+wPY9a>HvXD~QHJIm0WmUu9EMPd zdG*1_LS_YiBod4Z@LPZ^r1h5tXXbG6VzxBXzBM`vw4pYU23y_TWIfKmb!PTBbv8uh z4s))K-BbdQp7d^V)}V&vwZQBDjh=`JOIw?>-VVszy2SjExE8J2I%tdZ<;4ThB;d{l zBN^bNg%n7Z`0+}yqg{={!llr`N@8#DOq>%fGb*0v+kDR;zxH{j*Rwk2Q|E7s*VreY zdv;oCKic@0u?Ki#DjIM`UNzQqWv}-vP@4pi5rEe5+3@U7Fzh`7uCa=Mtq|3cG^7D=bU>BSBk8*xF64~ked>Ad6s>A*HprO2n z1`qS|n)fAuckU)AgJx3rZRd)G?8VVPU~pPqgzV;IrA+ymm&MS|Pc~ z?{hd%JOMI5V6n-02)eIsYVLgQZq55L1ItTZY2)_q!LE=Wr5zBcI+JULbe~ZWA9kn- z!4^l@VZ7WhyR5vSi@rIpL&<^l#SrqX)?|nIz=>TDE{Jvhkb*)dFl?F%UBOfZM2;w| zn#Ug=Wt)X#f1L|Gs#7_R(uNJL?7g!Kxac?)A}Yvw+5hr`HADHbnrHtt;qom$c9}8@ zeLl{`knY)AZQ{4ufX5f~ik@*>`BEa||H=Lzd25aU5L(sre62mb&3|@+`T{^OVBvmO zw`iAQ0UD{0+xm&mh_PP{;+vm@MI;Ws{MDiK4y93Ie6TR&?7qqBZ+(To_+?Z!KtycP z^g~&{wFgyG6kcz&RSdKD+y{J$mfM~M+LRwLSoVmb(|uJ*8SO_?0S{MEPeA{{vfP30 zmCyd|w#24CqDU?V1K1xa_!fnZvQyKBp{FO zy-fKn-Bysf@J6j7p8KwC?GF5_Ck_r;j}xt`0e z8Sg}l@%6g!z9TvHw8&ULz?xBrH>z`*HPw3nt(Jb&*mZP2sNz zx^cRxtJMw1D~{D$`xz%auYMZ{_l!1|uWW3J9l9(MQlNNeNObgWSOD7lg$V+YXh$A8 zQWC9zIq%!wR(Gr08r#_2ahcvo3BTnr`{H$X=bPER5uFmED?fK2Hl7{^hL!Cxqo=la zo;lzNm^Ll2L^~zZn2YKpUhRP5ESbF!QIRa zqvkxod6&OfW34tcB<|GSdE zZCuhCr#vin#l3ze)~4qgKF!hg57363gswVg(@n*(Sy1byG6yVWXuWz@%nOawUARpc zi#q@{e9rYE>22<%f`zFtC17>VD2?ruDRn3c2FmzB|1ETv_PAhZ-#`_Jr z3>K;XcSXQW8X{!}R{QPj#lbp89Z(q&mSlD!9LnvNK9BsnXTUKB;$nQL!6{Blod!lH z*LMSbG{U^|3>fOG)lCw|CiQFbLl6M+b030DmN_+DxxYJWEDE^mUZy|GB$}20JWcJ5 zM!@**NuEjn{qU6ZVu)(UOPeVD>Knzo+e)2X<8z~C zRP)J7XN(sEvrG+T#a6%`J#bI1{+^!|MQTdB@eex;N`>yH`7#4hK&`snc-3}x%}f4S zZjpg)kg-;m1B$_61{Q+C8`*vK8*6L6wc|wdds#tmA6U6gW517%JSL?wwYG47Rs?_| zK-ZR|nImPFn=KEw>!NW{qCmWA38DfVqaqaz8D?M|{`=FfzMFlo4Bz$9eoVXcXz@wQ z{>UW!Ku_lt0WKF(|p#t0RW#-Kz(x<+rKK`Es}7>p7S5XNXWn%{ln^Z5Aw9-n`H z|8Q^&cJI1galY!juBEz~$KwRrUId*yhNJRcftRJ>q3f?YA6UMRNtZOV^FF*bZCpPk zGoQ9V{*lk#F_GkCZnoZKyu9H=MH;-ANc-q0)*Qbkr@9i*!LVwgHJ20^{XI|4PWs#wBO=7TpPKMui5k`$bY+buof4qLxf4zo_WX!iR~ z00YBTa=m4=F0cKnoY2`L;5MMO9UVKqq~q4(7x2$Iu!ZBV#uD)%hsQzID?CC^i|QC& zPa2-vW}u$->tc{mB2KovDTQb|=<>@wHz$29b!}#oih$gq;|>+8WHOYp))o56Zne=1 z>c8r6GJEeaLtmntiua=<`Z&C3)KTV}k~P45Eq?5oXdi!U#q8|znz22{9drc}Rl@F_ z#$*r(t5kq$b?P{wt0*N>!7-7=)*dzKDFFzkd6zKqB(-zba^F8!=qU+2wW!jxmRD{U zo4ze8d7AX|amB6XrAoy<&qg~~>XjMJRiSl5dSvhuZ&Pv$hdRfr_jNpDKIrn*SW<6G zO+yNNj^`cv3+GA$9fPL5(66`>WuYg%PR$b7qF|ha>_JG#aGQP8qnHlp1CPU@%~;x& zv^RaFXO7OYK~eJTjK-qw}+&A7ZmPfCs>L3PFRKLn*FmvVX=$e2oZ^s#5Nx^e(s90t1c<{aV zI?V2<<%a3u64-nb@VmoSBE$hh2gA1~vvkzEwD8w2V;`H`Ngx=n&Boqi6*|D~xz#W{ z`h_2Mm6?=Kq*SN0>Ns}T8sT^+eXXZJPWASZY}Ks3r-LDNstSH}F+ZIEHt`sL4JA#U z*1^fvv-3qLCL!c#XE^9mMooQC-NMuLgP^^M#wpqPgCL*$!?`a*+NLl@-$4gon20a) zVyx)uUj(I_2ZScldQ6tdM^Zm2L0DP zy!Jr~sn&Xz+DCY|Fk^v&CU0*>ROTp5lRk7;N5FBNwYX^A?UbHoCjLK)P7&^A4 zs;lO$c|J&)#{1QO%R4bay`#`GkHA!WS+*)#NtJTy_)$&gopTG8Y>9ZeyaOqtoygDj zST*BB;QFFyXoxFuZIhmC*0$gBrK1+S3IFjXUDFW76;zMk5Ik{5Y$MXi1U~vZ9u=pvkV%TK- zuTIb^IknQ#YUN@tjXb+h*-wD2HYE8BCVdX!2|cx=1Vi}{C0KRL4W^sJOtkIWY@IUc z4D#Jv#8-ymYia_?eO7^}WActLk@7W;SBQSlpSW9B)c)~ClFNC8DH;6sxU|74Q`)%- z86Es#yvWUz-bGx_&!tW`nHRU>uzT9+F6py~q=POQW8eA-jCA5*PV|0q|KeML$e@$q z%E-RQ1Qg+HkR;>yLTC3v^^W%yCfL#yw;>= zW&c(zU)rz_fa%-BB9n6GhP(rNzy)rmS_BL;Jt>t6LA;V@R!;$+^30{D65`JpR4ySojyDaE0mNm#cCQO_s6$#5;>0JO{|q%`Z;|2HY}x)0`<=kl+;#mrHNCs>+-Y~N;O zih#WHlyds#xEZRtb8eOGehF>J^n_X(A9{SzOHW{+_ionwJTvKQG>#P7n&en7xHB9C z9~u!j8aKK<%8DH77mFreX~&EN4!M_Zj*e7aX=>KAG^H>-f5)lBYu>x06j!8^N~S-Fbv}99c%!%HukX2WGp`X?Kg>r&d|N zdiOlZcM9?B(5;6_vGzoHUc$Hz=ITHanMi9TW3rmwV05D)X700R!J7rvyeJK)m$`Y3Hl# zkcl@b2^^4I43#Qj1?Y7EqX1@yD;)D`S|B<8F=>4hxJ+V%)xGzkoz7AGyg1EV_N`_E zi3cIdP?~H!Y9zumTKnir;)y_GAj31$NP2u*!u%GKPF|kEND;GB*qf4ElTK(((GP|I z{1&UY>pREw#!>X$`=%_j2ifi}fi;H^j`z zR9Jot-Bm6G>1kf~&4fRQEU;ig{f-a!&{O^co6lkMp8)2g&O`a4vLR^PPFrN`_hNr_ z-8tSesounm$pGUMhXr%wJK!2ZOt@hbPm-j1u+OL;xBa+riR9`y)4^C_+{t|vu{|{KS;o29rVkr)!-8b?No-fP4@TJ9Zpc4 zSS^dE;|9_l&o2B`j-s#s*i}Anhg=h^0okJ9gmOG;SLmhW{X>-u%u%Ly3T9X?!CMvN zcO+=a^V!ZtU}E3%Iba&C9v{eeH2Nm+d0_VUnc*GVX%N?)nXeNBKE4yc^{EZ=1(@R* zh){L4L(QsaL#$Hcfz$L}x}00KZ?X0_^43^o$m*qOo9{*rC*~Eg)oF|Q@f+IgwTCUO zWmQ`Ve1$@e-pt=JBCAR&U#Ly6pOvMKFoZ_(}B-hNf|&KUHXl1SD_e5f0S6FF~Lx z`$eC!zDpk{8TV;jcDi#MqQF9*|2Vy-MCtQtl1X)0UiC7)U91a*Y+c0`PyY#U`!!1* zZ-*?(xA=uu>^g9x_))(c`*~G)kXGr046Hu2&3`cK4y}Q zdeFw*=dhx>BN0E>u&*N#?!?h^Ti*|B1#>36?c}!2 zA-;SXrE>nyF!Ym&Y54T_1h&qlnR^V`v5)%rN@#Uc4!5n}k+%bfc00E0riwcKX8U6I z?S~=E8oCXu+KU^qllv`UI-XKXhZP+E#d?5Zs#nY@$oxGuuLC>uBL)L=({BNs3;J4z>@sYG%=jJU#^mspoUT21$SkpTQjzexfC|FMt_zhtFXE3WGR}^?kYPMl&JWz+!noHQ zpAv+-1Tr#adLIk8-2WsK&2};n!YiMWrb2*y43J6hnwmYctx>yQyJ>K6&9V4-VOArj zNUC86d0B2z=#Y?6ceG)NBykdurE8Z+${I-Pmqd*0M5WO8AMY;Wv>~#(hEYZXO&0nJ zPB4$-ULes_|M*b|M^91oUa6;44j6vgL)|TEtslA+uk3u>j`DzbV$yI4eGiK!f%1gh zu4z5;8XOfjWw=_amG@(Nimky2;`~7gx1W5)x)gG_YkFjf?X#E^)A+9+(a#!kYbw*y zf*0%8I6($b`qY3EOgXzY?{0iIHocwkm>;vdGFI7Eo8}o$ydGK(-)MFYAgB-VGz~Qb4hvRDs!$m{iJPx zH%@lRh&fND!nM)I&^01Q8vo3h+^Wgo`23B2xt=B0V<*%tQ&!ZUyVSgte;@@-O^HiU zFhkw$o*sryhVBvIW+@`1)Xuff0D{U>+f|8-yRYpI7V}}QEU(4J3?rZXG-V32)mQ6s7AYCL&2AoTtaQ}HrQkwnxeB^J+@qZ=f zD`2i>e}$`Ezn0^(F^JMH!1^lfi;ylGUtfdXaNkbv+(j@$ls6>aZi{tqa>B+#*ZT_) zQ-XMaGxjh30rWHWA9~7BrNzzmNvE_;veVTl9$npG$raLq7pbQw4bF`|I2|UYG^arr z@tq?LV6|j7F5n^e-*W(YbQ9JjMb6#mP27903QoJbqrh&*s;W@mu1EA|$Hd{s)ul3V>95^*tT9nB;I8!^sG_@&^yGiX#QzI0}29DNntZ=l5)4FWj3Zcc#b!sskJZPy$bAFJ2L_YLQ>CF z5dUN3`*SThy8Ew!1^)XkVfX0$ct+n<}mjW;!bwGdss1Cwv`Tla0JtCQ!cN za}pf`KDT?6$a()Qg5N;}WHboBhE>E4N9w6GF(?5#mI(>hu~Ew1eX0!&D6d_S?L3KP ztDm^sc;H*Q7XN=;^J`2$df9^OWMOK0nTDriHWoNrQMxUu0WLw38)UvG=9*^ucr+Vy zY~(B_Iu&b9H^@o<_(c49g_RiWA_4#R*No4vdiMys!p#yl)s1wZGW@j30Eqe--xcZ_Tq2E8w}bwx?GtKZ6I8eih0C)KtYm z3bEWc=!vHjT^Ppx$6J>_=|?n z4|&8K?~aT;*rToG09<6;s~_$6@miyqNWO?Z8j_)|f!xcA$gm=zk`a&7a46=9QO`@A-;LYc1}%4;xBo@f9$XnK+#BTaR+o z&4OI=k)!l!@_*K-k#R6A(tOm_Vp|wL<>)*7@h|rX8K!Yly>IJj#5Drz)plb9{Km{5 z{MenrVu*Y0jx4LVjXzH{E%^El@s;Y4s$&=(mXX*+Fdwyb--edFPfA^Si5UVmChX^6&hx|&IhKC+B>=mAsl(b{+&wpAFn3tn3uf@Y6ov5t{Zcq}Pqkyt6SU3|>c9hvz zg}pr=LBI6Iwsmc3MisA$-&XQYJAw3S?%(B4=Wo10rb9#Z*N)j{_eofi zLJG7&DqFtEVi;_Z0pqc8u%d~l3eZ#q-=5(>#97>%Qi=)M9sRW4ntb-j>-+Un({-E} zXX8F;@7t6CW5w{3{SM>{i-u=|vJs@{ z$==ge!Wrch<9$9EEZxeNz98t<(P&R>4=&eI{YUU#LqhNoRp7CFRWs`rR>uL7JP%n& z`&psye6mxNTn3#qx&7#`&k2@iA!Rp0%?Or1AeA>uB2tbVTSA5`-G#7}3A#W&;;vl) zc4Bsg?CafK-E`UK!NzY=AnmJRg2c$tgle__d5|{Q!+QfJi`n~f6q^i6GHMD#8E597)}t> ziR~2nKz#SN{6UGR@`q^kKlqw9YI;_jl7RFl(LQbE(KFj~C$MyTc_rEY=%;LtSH z#qwaEoVn}Sgsl$&0qM3k}GKoQ%ulPC7aP5=$yUdR`w((l9{T}&4Sh4npI{zjJ*OXmH7+M$KvT+O1})*x?8`t`2vjLFs; zY(kwrnOsMFb7`uKgP@g77FXl+iTfnq!Sla-gp$CXr{H*YKf;kOt+Y2np9~O7$P5h+ zmyM%-+r7$XN&BBt5CXVMQ@A8D&=@D_+&DZPi^Aie_}h+R&l{`zZ7(vS;kC~EjDH01 z{X?)L4=c)Fl%ExRi!2@RG?je82vlIT|DJ4V?98bkpUX>QRJ)H!Y#DjWSs8_|5XznO z6C5xi?cX^EF}1qc*fRbr0ANLrIDhO%bo9yxy{kSqXMHLKtMUmYHBWtONg&fg(u%+V8)GI_cc7YO>j@)Upod=*G!si@J%=H zU`~L$q=<+6RK@b7Ijv4|(#B4_hexGYppsv3Vsn%3&saW{fxx8{j zm4<0jeJo!0*GC7^?*3SK3k@$fzxUyOV<464;P0pXqg=BlGI}^IN`qd4d8~=jJC$qL z1P5V>A^{({Or(4S@niWIn6cj;5thFwqdva(8S`7L%kuKHHqs}m-GOUIjy9&I?n(jw!aQb+^J=U;Kmp~zwU(-_5On49uw18;w9W? zUH|iv|JeU;p90`Z<%y}iFzaAdA`OTPNt}eB*fbap0I*f~kQ+&@jVfv!O_M%VA@x_q z?q3eAjPQzDW)sNKT3LTd?*%j}{uf1Lx=9VQOH$+;{#I8}3z0o33)4lboB!Dw0qN}! z6LdI6WGVq%YjZZ}3>50f7n<{e9(V@{``|GlBKDhAHE_=kfwmY#M1&;1)Lj{kt>PJNN;4A!w20u z6K6-Ym^NT{bP8tUml#>1tB?XSTS}$%%u^ywofQjQ-xGsZ(^s=zuCT}scybulsRXet z*BR;nfd03z0O{mk4@nm3QV(6ONS`OjI^Hep^<<7r+aGiX9(eH<5KSXGFGoN134dY2I|r)ZB5%l3 z3q-3O;DXw<8d&%11GR?JURMUe_Z2>nmwlT9^s@xAV;&$%weJ3!PD!^8&!mONUXi@G zu&{uTxT<@Ju%a~(f8@BK^19*m=!VpVBSB6bbo|(Pije;>>Y@~3Nzt#G@|PMeifTbTi~y9c!9+OTrg9^BlGqvh?Y?tPY)U+C!?NSc zo0hQmTw1{02Acwq^0^)!i?uPas7ViK0ZshM6tam>%8Kl-{mv=M??>>L4`{b9l~gd1 z%Kva71+oUQHxwg1HkD$fiE#2wpTN=FS)YFpXPNG+s(Ynp@yE9}P}gd$K0!zM;WP*( zPb8O_&~&Xeezq1mRyH+#7123)aLoOQ=|Ar|LsSvOfDXTR9h;*mbsA%$L_-EG5G@a> zo-h^3=v>Yjs!#L_vH&&geU-ZDVR^n9>&I0@S%DPwr`@;^Z)$rF|!pRJH)m(zFE$NV;KY9P0$lgF07;9Y2#u%tA) zt^lML-(%&u%~FQ4>rDK4ogB-E>8hyMB#$p zlMHkWf64AZSzc0Qze33?9@^Gr27sj+*?Y!H_+ys*)K*89;>ph8*cZC=K&~c_C9Qsi z-H>&W|58+fU$K5k?(yhq2xLZ{+e76?T3Xsv$0dx+dkT~z24imREygb-gkO`iD&<*e z?C3+>Xb=lL(?kR@Cc&N}1YnPVU2sPPJM#N8ddqOBJ*Gb|P60Ru`}I>K(kgFcQ8i7Q z_hv>>>F2EwB4_1x2d~#BodVJY(ud!=+q%1#i9rdn6Zu9y7y6&%-E4b-A061Mx^tR;4qrD@wh6Gi^5#hi(S&tF zYk!GFQc6nn$i$QN-utUHV`Dr%D!XaPE?S)xhLEvQrw30AuEIB;l=a>Rg6Y~8SM>ec zXTM&vqB_HS@2}+Vr54{KZ+ev8;O%2)LQ&9?=#+uvTi^})1zUESuCaec?I>)sDjj+K z6<{tJRD=WkQi5VqT@SMDd_&+ijNP^LPaRh+Jj17G>%DJ0{1sD9HqFZU9DkYfA!NPk z6+X7ODZry*2fa~QH?nw0;F#m;mA>|xDBqJ!-O4a7-0JNe=wF%c(@$KB_)xVOXSNAh^ z5B0mL99g<&TRP8-2KO@bG{j0kl{IhG6ztCed!V2OAS``fY#qf z1Mj2ec?9SXR_^_*)-1JmKFq;jq`=tVF<3Lm3y}WM&jfNQw@6iDr@%|+I+EY~+2a|a zkHMH5ZKIfQ@Fi7V5Ns$1wW`&rMyjNV(J*P}d#BG}AREQ|?lTIZt>#4QB7E&GIw}*d zXpOSAPjmyxJ-7g1W9&DVh;W&8s*=}mK(?Yv2|lcUSD~Zxv?DAH9RQLZR_`u`LBnSr$BK*5tp!s3oV|$5gI$cVh#pQnS`JhvG z%hZPz4gPyRA;Cchk9n8iqa>Gf<52JD7G~g(9ZT9Bh`7uA7knd=plO4_r1Sj?hUt#` zmRD^G>vVL^{k2*hqNmPWYO%xIW9Pv>WeSU0P0Xh@M*YDDJRc;KREVzP=ZnU0|AhzX6A*-%_5@xy7lrKO}$ zC{eORSn?U~#2w89#~JS&wC?`_r{>!W8F>_O7OHW)@K=u-z(fqL^uw<+^tz8=m~{gp5Z{kcA~p8Ocf8?ZG#P3fRCYD(xKLC4bbaNxn< zlxcO;Un1>~4_3r#jq2RnPAsX;2O&6E-d@-+%*H>?A3gV8pFUWGn4OXDWIo9I)9}wC z)4<$#ZqR-*B8a>Y_m7^czkRS`K8v2UsNUY}lDheAt*Vaj4Z}`oB$IE|lMuzVxdm~> zkrCgYX{`ZTb&j;iW1X7F8N9wB9MAr_UQ?=6mmkTI#~M&JFRN(T;F{R?4uO5Y3U61( zhCQ{x%N(%u3pKwEB70R~&i!{zAs#ZVQuWp~VE&!JXLP7zEL`v?`t3qkgU-SW(E5}K zbJ|uW84c;-GU;S@E9|a;=AKVSKqusRBqG&Uc-`iBJzm$q0&$R`PR1rqvp>`)Jw%(? zu=+9lM``26gK9g%nuGbktyJGHNRwwA;(J_0z8eO_v2vRa>))%7-XYsX(NAzw`CSljy{8{eP$2s6Ra+*?>KOgTK_)%JY&reo^4@NUy*t2K7f*m(z0 z38~9noEqtpeuG8(5n>&S=}>XFo{a;d*PTrA8vsBI5ed=y>T-+u$zui0CcflFV)&tmJk=6A>(x14OTwCF^l`nnFk^7o}tD>iP+1Sz$$_% zS4L31A7+HrQUWneLypkLXTC$1+bMRHf2fPqSdURDyuL!uhi#Phjd8wCL#r86a$-WF zubwz0nIGAZx*tzg(K#Yb&RqJd)X-YUq?%gRd)3J4nu0-zTEr(!Q}|G=QSr(8($F=U z${mWhN$-~{v(VG>?q3wWO(@Xv;j`N3uOO&8D4ll33Odlvyibda_AKKXcj|>Y2pr3?aHwbh86~f@84-MTX z+lKZHTg4I`Ui7jwlS9d1SMl==(iQMw%wx@DnZ4U)L_TN@X zFzGK>DwN*G*NZe&sieYy8yA3mk^*)ti9OB7&LN*R|IOdxZGNem>S&1L{yNU5X}q4+?2WmMyW1so z+1%$iSit+1sfAW54F&{_f0l9ae2A9s!1T5H^By-pUSa@XgDK>}*!=QMET>*wFxK7w zmqUz2<4S$iX_#XsT#&tyKKgUT?L;{I!!UWps1IiOJ2tH8<&z?;^>Dz|@)E(iN-xUr zAB{OJf89AXIzh3`fw~ zdI#nZ;h*72&W8`Uqu3r*#10N&a*ZqvtF<- zwvBs?2#lAqWWXvXm(FPG=Xt=24E@p_b1#FhbI{tgk|@bYi>r+q8PqaRvKf$8Mn{(+ z>=$ZvWQ*B_@4(%3OoaU2zYgGJDhxVozYLrIO1Mu_T2G4^#z~|@1P#=i_V+OZ*w+)! zAKI-l7+uu8tnbz(geY!i=-M=#-NTy(-f0Ye>KSl6W}8L4YvEFUWDdDw&70s`Nx24p z_2hdGTDM_HbZBM-8-KgOsCzV>I!lk-^G^f;aQY?zN|V0J_Ou1ooJ|MhK6%sB{sz=C z#!)O~iICq7L>W>=wrlPM9#Za>_BXCbR#WHZT{h3HxilH=B$gXi5iiPp9<$8IX`-U1 z{-V_~e1!Hhm$=;V*J~TRh)2YWRqC1WcI_at1^>_4&pR7;zEB7Yd^WOlJP+U7uJJ6F zKH|V&DVT&F&`1lE8u)jvE>%q~F0~g!X)b@6C6QR!a^?*ZHVswxb!{7T`zImC@bv=Yc-o`l3ecfNo&`QV&qVO*0t?#ountur>Io-jn4%4JbEy5cPu$mfG+b1*$0v_BhJzIKDIYEi99hX?V_PH@I> zE_-B-wtQX}Ow<%GsxR8t8yGrN#<4kS=*H5jnm5AZ5W0%X<|BtzDj$PTBYb!f&Dv62 zWWU|6?G;8eR5+LJ4}Ji5q7fG8yj|u!M!vRas-?V33|bc%I}q$w+gqvhN`2j>diah= zO!r?hoi@Vq2|~I>3~h}p{kgll#aqzJ4sjXKlkmAGyAvPLQH@ndaS63OaXu`R5#Drs zxaHgP=q^$)e87`?q*MF#LG8$ohK0C({-Wi{yY7^5UyfQ$Q?m71ABu(%gyFO?W}X7t zMdV0Vl$V!>J?>S+UtJS2q4HfV4z1bSTZw%Id$qmtGcwv(DHlv8|AbhaYDRv1b#CV9 zWMuVMRqyen%jcw;3r?y}~_{SY-z%}-1ZT#^~qzQ;|(zUSZ}kYXtB+MaYAH}5V-!M&Vq z2@!j*g&kMAXZ`(c#D7h7k{{T*Ht`Kh8rTBO1QAE+&9d25QGI)$W)@T6&fJ;F%ga>x()E66jk-_f4vae+IxYn zs`PNB$E|N5Yk_ayta$tu#>x0ef5LDnTr|{c8V2tOUDd$Tz{=F3rqESV1wxm9UY4qC z9f2AfG%~oL#hf#7#m2PAtR$Ea_lgcW`pU9L^7%iBmAqL6FcEs>%`oswOMLA8s0?7^ z_NTpnMc~Sjdm9=%9c&$7B{ESmJ?k_}e+I|WA;sXs~GgXUE5IH0gNDk#<~sePehTgk~+ZXR7Sry5s&UlUe%` zOBY!2ODdO%DBz@Vs9^YLa$IT+(*QNmpST11Du#@=I)|g5jE>m3TSbxqsw(vS`4PIoTQSB&9o@|mWj)(adGO5B z{to|iEz6oq)t6qN9G1(&MhFFVGrgbWpvTrV%ANnuEZNI!fk??SBI>}r=10v8LgPhI zFag9GhvmI=WU5LHW2+E!fZPED3UKM@>?o29YnrU&7OiHAstoNURL{rfLys1oj*eEd zQl`WsO3rSPh&+)ZA2VDi95bP}w_QqTQ`>VEe1G^=kd3+Qg8e1ZQ;PNEF-W`U@k8Es=ZCmO{VlTI0Y6}J&w0;eG+V|(dian~M zKq)yMIdVsWOJThAv5cPhiqpsP!&iIV2{~)2@EtdZweSdl{0~^~1n^wB9Y;2PIR%L0 z3XArK9{og5!^b~3s(90?4L7$(Zy7XZH5ZE0(TIGOBu71cH@B#P*jS-lG8vVL3x*wB zrEv1(5|I&Q(a~KSg;uj90NRm_-jh)06rl@7%M#PR@tu$~e+(@6pOxW?DH58Fyc7-C0bRL6Cp;qSC5Ey=I8i&E%swsZcPzsp#lbC%4fRX4I;4a9f?~l}S7_#@aKV?SK1#YRxCy z{nbmJg@fdUcrX4RB;eM_u7X<&Nj?&uPJZg2k!uf+flB#i(Sr>DF0x;K!Vw)1J!9!D zRrXW_f>wpgNdl{&If52+=?cRwr;1h5n?zeza+k5*h(PvBGNcs#>A1Fz=#?XC+rTfw`wSK+yXnLBqZ1QoWa z)B0H1zHWILrHN_sGx5JE8r<(ApA3CSpRM*?XF7gZ8$!0IT=m7o(6q1BVNRBh!fEoI zQFHIUZNj#2gA!xog`EwrMx(2LqrwVbh)7?-fu!&Yu!98zs6h*#Qkh2@uL)JA@(pWI z7r&prP8*_H{r*f!_}8~u5Mr5Uysdf(O?x%@TN<<5QJWPb3H_<2$h8SFq>fIeDfSm( ziz0S!tj4Xm7`eD$GpDx^5KoLWbTu?Mx$!JDukvnq+eXk6qA4HNMTTURXs7eoF3s|w z&3jiXq&4|gCZKhw_iv|~2MyBq{lE6}q{kzHF9*~sC_p1wrv!vPqr0?98HZQGOou+cbR%R%PB!lv8ds0>zi-G?xZybWwXx1yx~~-u^{m>(Qs`)S)7(; zSJxUmAfhz6Z(}D~>dCIDD3W7Sfi*AMxj4*ypzE@2RP z;_|>W_dl0amIuDS^wrMd=*pfn#AXU}vFfyQ;&K|E6?Q)tiads_U?q39Bu|69 zHg}w3Iii(kEWxBOJ&d)>x3X7pZ$xL})`m7m#DrgJr)6sR*lLMYn~Bv$Vz-@gk5GHh zN)8k!v*A^wMPHY5coX##XD*A56aAn*6~&nO^47wv^kx36O&*a&Z~OFYp56j@`w>~f zg;KBXqECtrN46Ho5iF{`-G+XGYpOKGky>A`NVOcF_z=5vXWX=V(fUGqi$y$Nez#=R zf(wje1b32kB-MX%ar>{SNwB;e(RMNHM_>I>l)`DvHFx*4JhyY-vag|FHd(m)Jd$RN z#^_4OY3=1e8z=q&(~fAaOT0BC`a}v>`Q%z%zB4Z(yxzwEol~i`m`jAc$X8m0(qD0& z7d7}y>?#)~7SnWibG2DqOw>~FcCj;eafizjqCTRP+ujo;igrSpMb5U>))hxi$;5E7 z{;`|plf{LWhBf!!JacB8?fG?;%d}2AH?y*RJ;1>pCY%`xN2jc>eIEBO_5hBFoh3s& zp7w43?kgELb`b=bKeeh9Dg8;z=s++bXwP)uu3#;iUtV@tWPADN^G?!g6w9g=9bt4` zcq3*8hLmI;ox<;1B2|(Y+mF2!3XhTkT{;J^_FB4c!v(I2yV5@uh{^O;fAri(XR<_h z`k6HYcE5gNoXae&N$Ok*#TU5Am?QzkRmW?Ng z2t%%Rg7?>A<>J`(jjjW!#oJ0EF@~9The^GQQJ&{F!-O6heseqTA>I!5^#8F&V6Ii5 z+}nkb3`)7fxEnE4jQ9H){F1N&McXM*w7SoAojAHgV@n>V!D0KBQV6~ zUnK}ntl*9LvV4Z=*Bs>IGn6c3d{I|JLxks*DOr6>i zzslhAb(gkPqrjm)@VTe!dqngU_EWcM^Bd07hftw!%H_u!`gfr1-F$NDMz;;WgqnLO zd#77{JZ~SqU}R$-7t5#rsP-Z=!NK_!L5N*d@_(!#RcQj$IO_)PNJSp0*GN++%_m=+@y}wz5eaK8$=$ zS@69a<`2qc`07mq;w{g`KHs)-1cFh}w1##n06$q)Uq1miZeq9|!$QFZnGGK`-df4~ zAuS=n6mvzAzC)+BR^jB!!=f#`oLQdnB&6+hTz#s|=4Ui<32(_q?fB-rgT^iZ^+*~p z1*l8OfwPr0zT7SwR=HNUvxWOjKTt8NDtXK5|05*L&T6LK>Mkx+B{8#XveNxqrypH1 zThR8vsN>QI49ZZcB50l6vzKL_y_Va_B`48zDnvanZ<674alVjIeRWneIj29H1d^iy(RdBd34BAk@w@6BWT)}@ zrap9G0}0teD%!8XqUOS#ogXg*@Ept-Xk9>Bm2;j~shE$0Tye!OYb(8X!c>2dF1T5d z?%~e+?+@KFKb84`io6rj*tW9mhE=Q_LWb7uEZw3!HKGTP*c;b{c*l~{WIEwqDFR3df-BG0yWlqQI#lYCtBjmeY&rcLim)mdOEOQQ~pgzzabLE`|R+I z(rk_wT7Az2_F zsVtH?qH`!wA;tmP4UXZ>-V^OJRvx*jF`3er4T&q~?NzazVu?1_kHrLj&q%6w+WMaR zn~HmI^%sf@LehAns&=+l*u;My~wBy~5 ztRF|SDne}m8)3^b*%-~_k!Pd)jmB1ZpL>zm1e;aarZvU3V;PYK+cOw5efu z5+}A}`XRJ(B9P}R^M3u_n9t`sjNEdZtX)shFhrfcZn_wUf#r?E=Hk{SSDjJXDu&E9 zZ$Yo1?}|&vsX!@%7ag5nVl7O=M?F4e?l`j4R)u{c>8=Qi`AGI>f(KBKe&xnj$H~}B zj{=k<;5!V!pg@~#+>IM0cA7uh6j58YFFlwo4ay!j;5lu;s{sT^@Amr#69YF={Kv8= zx(K=6=rVS#o;AE$K`~sIcyZ#=p=v-Cf8rB{>WJk4(f;dzN zvI?+-hz_*Y2SL`drQ8NVfm8Zx#y*j1eOhO$DkSR4k_Xp{4_zvk)k!rwZTen+m;oj} zW8%hFsvQ6ZzSwr(txI=Xm7 z3pi)X6REay9Ds8pIIYo3)&uBvoum|Zd(Uda2Afp{02M2o2k92x?MLbJcd>M6K9;6w z+)G`nGI`Haf>jpW6}#aa90#WijV?_8KFz-DxgVAIA; zDPAzAx&Kis5Z=pP1UvHCe_ZC1lGFf`7_++|yT~|0eQei!*_)MVJlpS-M76BQ4him_ z;GZ-h1IH9PLH|F-6qhgKh}->1D;IpE19Lu0RA@I@7gPPg&?pvyCH~q$z&$6q$EZQ$ zlA*O*DK{=k4&wVo+*ub48^tg{fUqRhM|TRQvIGT*dr|}+dfYZg@6IRiM>-=*)F%UX zRW8DRnCSFer^PH+q^K;$<;%LvOl47gs;Qy*;Ik&0zBGrphx;q$YgiK&^uEn4s<#;d zd$MM6Ag~&!@+#*oWtOFmT=1ZaI^|2p5zXGqnuu}|YZ++qG_M^%4cz(4^vzk_3)wv{2^e;3BaDldVz|dUl zxqKGKA}vlxV`VWRd@Lm;WhR*Hqc(XMwk_`ZYJ-H}Bf)zK9@E2rjUIHyj-R(#7z&ga_#0y6 zp+y(DQ8|ATS6a}?eBvAh&+qB*{*Fnv07A}5C{ML*Q(d+R(j!+`t6v8jie3UbNW-h; zO}o#uip)*^a>7TP>=*6_Rt#lSXoWq(JOoWBiRiRD{~=Y;J{~Wr0U1^!w8N1CarB+M z&s1_n@=4d*%MyuSK81$f6t&c;v1oTJBT^i!wC^q+2243SrY^$03C19mvR*2u(KebUHd=b9W}VM1LkX3a=P zToitUb_E6t_T7$=kqWD_CThhM4XONjT#_oSKILN=qeGpQRk`nt7mv5wm@KC?x$XK= z>|h#B*9VNgAlo0oI=C#lO2HKA`AG7)=otuKOd?`jC8Y{#YJb3=A|fHO`A$Yk^m5xC zzQi~--CfhiG!L=jDd2>RSaM{Q{A2QAo46m5l0uTzPH-x$nnue|E_gwsdfoHC}g3CHeSZ*ii#g?)P;@e+lc@{{;jzxFb`Zfn8EvWEz3Q=ge3 zU=Cqx0U-LgMCn~WOvPdwXkB3rVAw9wdp0iw=p-9JQU2{JMMkl!^kN*-HpUS+oyJ+PU8j3#=Y6As!X^ z50NBCGH+{{A7Sb*oOWp+T?GhQy^5&_F+?PqaKXYGpu;CG4|&X=(m9v!vXdu^bc<6= z+S64ouFQB~dsEkYA!}hIkEhs(u{+T_W##2Gp#uvex$g%;^c|)zNRW*dyoO3xdSj%b z?$q`)EN&{crd1>cEUS=*kK)W30&zGg;euOvF(~TiRk4$KL@G z_fXzIxiF7MK&@>~hVtGi?amUUY#DRLv6k4UQlMD#yt7YxwUNOuRb>7@*1kF{s;+zc z5kVysX;JBtR*{gDM!LJZyHgZIM5J3lx_f5mlJ2e=}<$-m1&L#0~!BHm%)-Z8lV{yhrb2V9ffnQ@TW=XNs_ zv(X*yt3EoDb9Z2(4P&)$O4uLt{{drjA)&beYqyz`)B_58ZMS#lK4iMNau|z1Sbpjp zYxRX2n731i>hi1nX1TIhfvU`>yVo0%%Md+Dr$)L7U48*Z-)RQqd^7y2rZydp;3^84 za+dct@DCJiFX1j6)ZR39$HoAFh+i|&GsWG_{)*Uzw4w=cFXtws!$+f&eK1DDh0j>y}3>D ziARvY)Y$^n&BSCX)cSu!Zo&`?C6K!j3|C#qQUyyt9wE2&8Wnj70zKzvyr!SFj`w>E zK)GFnsHs#fmX^X(%{f0Z1FEXAB8H9XgHLrGC4W&1I+%|=S0hZ{per}3rC3K}60@_DUV;*Oou(^^Qm-M1oa*a*?<($CV76I#4x zXSVbM($nz`b`|5Q8me1pw!lpd67C&7ls9y~I0tG~(^WOa-$~Um^s(I$+nMg;U6B^s zF>Ke#1kZ-93l2y%43t6g$5b5YYt9(HR+X%9)c3tJIecfgmQ`P7`NK-fVkps*>4w|Dg=Mpc-AMan zfuo_n;NurX@RtOtYqGX+%=qA_Cq*u58;qya6`+^1kZUcLMfJCZp&+GUuv4Xr$CscT z+vXVwcpguCxP`4Ni5s@>g1sFpTV=~t`0Og*~LIc;oVMEeM<-lYB#2zwI0X1BO3ByP*Yc;?ILM0`!7 zVX0ImxI7fLUs57C@Wj&MlFrb=rYF8Chxgn-HFRm9-_T)ISJ!Aug%uH(wX@$d=BpPTz_awHNyreskCun`hC}wQ=ngHO)Q@rzqz}8dYSm{7d~XL)L6KXU^bh?||1TPu z7?5^H%vpRRI_IIG0SPUQZ|=T<**%4uWlc&r@xz5AGONx-FLoYePN$Y>J}hoGE$Pbi z{H*hwFG0=9K)Le@KQ7MU_Cl3(+`N8~yH>%}sfE~=-pQPo#=NB6UEQ`BtPC1V6?*Df zpu`Sv`=)L`#QkA_+mZbAQ~ND*I2)dhl|}JM%Qq3`g@-aG)>Oe>pm%!jN(7dp;gU~& z=O5gEKz~EvcHU5b+n1DD$$x}JjJm*r|&$?~vBRHSx8<2|nY9&NWv zMC32URsOtL%?Tl*>XVXD6SQ>W~%d75Bxc5LYWo z&4Xn@_p8;di}t{inx*&z=_YdZDvy!Nl&0Cv{SZGfcv~)NY zrfe$a{Xy-U>&MWbfPqICj$1{VRHYrGGrlwO%ls9PSzHT-G^fRtx-lgF7@4M{qJ9qM z$w32er-io9DjhNjNOVPlGxb|OQU~tOP+V!1y_mCkyos8lkXPrA!_aIguw{li88M+s z_c<##N%XQ>?3N{jp_>;OYNs#!U@Xz1HDma-YI!6NdPzWOHfB8~PahqqtU_oH@JgN= zh<_-K*O49bzt1H2=%Z^h_;Z#>lB1z5k~2Z)y#Ca!f<{2IvEM4k%?kPpprD%G!sYVA zPpdRN+i#{Qj`71Gc=9`y^agafAA;|-) zrM9BgI4K+I^0&RpN)s?p?}3~2YzZR zg=2m630a={a}@yY!-k8!fOZY2&`gfgv5B~Ly2NeHo0>ihMCmdy6 zB8$n=+FY$4rg?#Mq7~I4coFvYBSrqlG0_Q_SCX-U!czwb0KOehe3NQSqt%$6xA9}- zQbK=t?_xlF-Of)h5#nF)Anh}ZGxhL@i+8L#+<{2`2@x3n^O&uKC!VnPk|VQ*V)w^U z8ijc=oZ!z5h}w%!anQWU*#s@}hOXN*Z@M7oD<4Jw!p7+kf8r&@K%;3+w%(lOI7)m$OxyP1%#<0e>2UK|JMGYOrjNKIIA z(SaP@-#I9-tyav1+nc~;8tq=_s2uHFHf=`;|Mn?Nk}c_>M_)>0TKo7*XEU`>)eEZo zNXn{!|A$-~nLglK%9#!h6a2S&K+LF=B#xd^&z+gXX6&G3G;F7mM<_&Xjxja<^uGS; z62#3`%b>b9dT+FHiC@qhZfwVNeKXVD#-jB%Cj_P(e$D(E9Q#5q>~4h=a)dXL=Z^UN z5xIUVOmby^7SE=d9Z3ZWR^ne!SS_@@{xA`9)faZD;&iF_E_+3F?2Ki&(}6Dz2uqs2;9 z0-L|Yo(n7kP+`AZB=RBp&u_Mv#92H081CJ9gp3M?=+au|9yGv-DDS44n?Cv2+^r0h z?b2a_Vr6www_V13=}2@_#8dxjGwlA=W=OWFhV7)gOz2V;{ZLnZ$-;hcb5&x zAwHuTPDq>^s(!n2aepg#GbZu7-`e%SsP5>3MO#UlR?-n#sh}<2Ly^rU>z% z485sAjVmJ-OyE&M@fYr>(>+Ow@Wy!iIjF|7`X9;?)4pQd{y}`^)_?DRU<3q?jda$G zp@Cl$BK(7kk$$1&C(;@Fh4S*dai++ze{|YYRGY`JkEnK|M3IiZp)ZlHZavzocA6?R zSfEl*f1aUZd<+l{+bjf+3Vsea1n}jP8C~C*?$X2|-(Beb^jkF=P+39O6?LK!2D4bR z&H1PxUuejWc?dvzp332>3Yn6{+?!VIM*IT8x=}}_+9f4h<)yLuxvJOw-E~m--*p{6 zJwLAr6Mps<@$>@+60Y*6Z+vs7#t~l8t4t>0g)9RHzNQ(jPK1BeZFk>;_7R;^rI0sR z2>^S|@Zt{3<3vp6qDLl>ii^H_Jgk|u=PU9^y-_-khqO4yG9DYztf}-JR9$c(Vx&LekpjpU8NK` z`A6jr!mYbbXK^DUfFEm~z3EW7pCllEUbIdy&MG`)Uy%|byCnjZR5%R;7GKKT2Tet_R2`_O)8=1fYP&E^-_7PXg( zk(`HK2UYO5E-bSCKp8Cuq+{n%RVrvB0_s#g3DgP=SST*1vt$+v+tOXX$`2iNXuNa> z-HM0{rs!w_u&pBETT{HQR=8Prc8uxIZHv~cANsw08JsxB@Urgjqk3TLeMbTD3hb%5 z(RW%mvXB6~e73-*c3ZnHT#bd~{f))V6<*KFpK4!ZMD*o=X7(r2Kn4mv@~fp$9TP~A zEnk*k^I@eVNj#YPe?z#)!mG-1w-efSkyk%kCQkF+Zm+Qmy+lSYRDv0(P2hM}JmshB zxXQMdszi{uK<;G9FIfv|BsJ{(#-@`~(^AGJb#+$<0#L39+@t=PuVo&QwXnT3!}YPA z-l#fdorUVW3nS{JKSXd`I~RRLUf3UK|J4x+nh--TgE(-7W;Z7X7$C(3Nce4udn$e^ zx;;1s3<;Y_y$JMpXtU6l`N1yh^CFBQVu6F=@LKD)KFceCJF1Jj(cY29ae$51Airtr zdu-&KEtkjBy*lGy=oU3Fm>A*jG3lO98-9*XRLm5o8>y^5^|H$%T{V|*Nz>yvrUZCv zGo434r`fo{TA*^MjpH%WH)x|i^S)d& zCJT8fQrKmRFEwBfyzBG)qBTKqL^=kT`_7OShM_d9fW6D=u2u0!|I(g;_M;S?#lU46 z{3;R>QHCuGOsK)hwnf$i;w1t2nm?c?^SpN+&dp5p$-@R#)RaOwSEMkp6|VvL20c?x zOq!5BGPBipOXZD*!&eu$p)*=b;0{awP|(2|bcbQHV$;6Jby2(Q zk6A2-1}y;&JuTaMH@8hho58pI)T2sk!6;1)kF4Pm{#4P-vzlOvh9*3r`pSj32n!g` z_Cbt7I#!qL@b6j;pVD3_F*^aBG<|;f-nzyB7Sbi)q@(05WFx@3LWX`Dg(^v19nPp; z%(C1okr~gEr2&I83pR;uLu`kOV$%60bpc@?J3oOdk=G#ig;9s3GHYBUePd39>5YfY zSc$GkU5I{WjsC@YLUoyUgKs_Qd`+EwnePim*iNI!*{R0S(Xn=Vaplpq((mTyhPggd zHFjX36jHEr4;7dLFOhue9eQOOtwI^x@fYx;^JZTO0JKsl`yE*;JbB*DROVC-29uM^ zjoFI{^92bt*F27QaM+o2vpo8%Z!*Y`KqXVVyO~aZ1*S7E(tGxpyT~@6)wrTGao2Q? zm15pLbHgL1>A{}&#fW5BoBH#z^YgrbTGLH8#Iud&+Xy9%b4sdnnd=`t(A{@; z|Jl<{^k3H$HkeZ9HCBgmmm2~QsOre0z)`^}H$cy%P&h2p&58Qn3^m^V{lv#;&PG2C zE*L288k~1VST|awn#^cTme=4-a_6>PAB=|o>}&Sq;rCpa9Y)YU)0!M0kDZbsr=zCm z3Ere=-g~bhIYlGnxe}*Y25CN>by?h>C|aDDGKjF3{6Q=x;!^hgTMe}Pt&V?%8AhpQ z`~v<^^Ywn;9!Bi1M)svQ`zkSv0+OJTYwX34zR$yQd`pk8bUr&%vl+uOyGgR#pwF##m1*%F~U9(0XxdEaDE?E*11ZScId2DGI5xZ-Nt#_bceTSAoCl zVSe%3ygFH$BDq>%u7{Lpzw?|=de68uTfj;IVdk$$MNTN2#tzhds$b&;_}uE4!2I}EY$9u!=V9UhzupI*QN z-(3qhec@ke<8!RZ^H(MigOr{Ko3!$deo(G~T;MrWbGUrxMIumI{$41Tyc4^N!g;iK z;`0RTES5hNA_Fc^snM;!yUAi>FPEf-aJ_@i`wnx4xL@L%zkIMxk+Y1G0Kcd4APX2b zMCEv>>+JGu4?sCLZ)MQBZbd&304Sk)!8K(zcjs;HVGdLl&a)D(Df*CP@*TzfQD##; zY-3k{G*_ZejaX=?B%JqO5c91cs%(fb&=l*lC8#>^-8-Ltwuic(A3cBvx`x`)aTXI;qU#lo^epjeG)6$wqj0D zgMce2Jowv1C-( zU=V{A;|F9p^#qamh@60Z(-Y|RX%_Sj)R~ovwaRFR9`dmEE2)CQL@sUxd|}Kj z3yK$qh{2zjr}*H-6%6{vCg(AQN5HT(kFGiO5t?y)-+2F??pIy_a7me}{^HIdGRJwU zk1k!rTxsYMRc9Gq;*1dymOqN<;02hFNM}%k7`p0<2qDq0dw8|9qywI=!G=~2#$9i3 zFcR?Ett6?Hv3O6lt*(0CNeM6KoB+8va?EPa;0V<^Gc+KMnHPse+I+3qgK*P%%XjUi ztZz7qg}L51XK=@jw|Cgw5yyksYhGkgYD4>=f+S_I3P1kMu#cV_E_Py%)?KtOnLi9& z?6)np_-;g;3R-0U2f!FL9)Ke#eHq6*BH=Kh_7j9Xj$bY?XE!q^wqp-x1ajxjgqF9V zw$~>bLqQ=sLWEbBG8)!KIjNY?=yJ0H8}&wx+Uc1W>`-&C-N$^)Yz_dFV6+=D3#xP@ z5QfYBSamo@Y^tBYrW`tK?A?2Hqx#prA#{0s%g2@Yo0F@rSLrhc3Lep(vJ|qMEXQqL1BY?w5*D&yA?1Wj^yvmu(o-{Z{kA z&Z)t}nU1>87IWIJf^S}Lta6Li)FfPQi6kWV?+oheE%c>>;xwzQDDk_*{rIQ?Ss=1= zwKYNol>zOju9=bJ8X3p}?Edi&Z)QLH?oA2d`*-NlT$sN7LE;VNAz|HYRiyyV{EleDOZ||n# zhLGeAOd)Iu>sDNR>>+7QDKz=@7Zs3s9GPmafz2~{O?|uMl_P7yQl0oIdM(&(Rl@AN zOam7WJZfRs&TGP|sv10l!$hDLRi5=dwX5KR9tqX?nmjp9`Z+i~!T@WI&b$uUb3>gC zfO^8GR}5qrPW8szp)0_Oi6RO2J0M;mT>-jH=`)69j0+GYz3W<7T{ znY++S$?+j99vdQQYKtF8}A+|01)qdLVX%25w4F>^;&aisRq<&i83?VLd zb=_L`VcMC|`D(UJn0V&59vMJ(dSXpCs_05&aJ3GVR&psg-q8Z+W`6`WB59)MfprOt z7m;%^I#6Vu<%qW9{bMO&9y`JR#D0Yp>^GPDAvrr4Gk=Ue+MZLEeBx$)ZFyw*+?T=P zN>&0^h}tB6)qpl%;{DR+MD^&KjXGf; zD@5*(^r1f9@Mj;MV~%@lM?Wb61zC3})u?-ykd`jYc0;?H0Ydg;BHae!k56QaXtfP2 z)TfnNAkX*0xA`z2a_O3{CKi>c>Xq&EnS z%a6yeGvTRyE5=Cm)lorEcQ?qQ9mTfWgpSuzz~c?zy5Hl!vkKSa9atyS(uVrEMt4?J z?YRxw+sX&y#>>)H@0pG?()_ULK{wp8SgUAv(9tDqs*0-Aci~Vmniq45d#x_NHBQZ{ z!!<=w(bCHi#cMMseNMTV&ndR2NxDnCF@1j~AXmGKtJln~W3+dPr?P!8$cwqZL{;|d zyt!TKirf`shfUwY&ve2#T}r)_Y`sw9ReR~wr7`#~14phUa=7K!C=}M)AOeyizt;tp zK-IcD4pC=J@vDD$NFDhRT4qp7y1D943rj)Q>ZwX@tF+aSd+5+iu&Uh)bHNkJ)R`BXeSf`iL#d8k#zNT^80ErJ_xCz)Jr=Cef;2_(* zz91{iStgCyrykV!;H+u8EK_^&eeXV-&$ekiqVtk0(cu%6p&#_#rL?0w6>6w0Z|;E7 zQ(y$HOg2-|$jau^d87_zJyoc&j4<8zNVr6pGti5`xe*Gr*sT@n#^~vwXd*!tCl&`h z9O6ig+wJrw;&QD8Q^Coe`l|-_w6)#alw#u(?M`Kj)p_x18zWdCbOOv}2&V>Gh;wT5 zJa@{V_Dp*+-=IlFPovf&&hn4N{1C;^6`M9Yj#-NFiUGMSrssJMicqNMb|8FNnCm!+?B@GQRwkypu*ZN*+> zeX+igwPTM$qY6_*GpcMV2rLoFv;2q?B3NEsvD!XqEBEPj*VHlG22> zln^_GSJWOy`P$lqqSDESf7QMV-Vja3s z>DU3?sCI$&<#FARM49L(vN{=qBeI19gWsoV??tCJvKi`>fNj#pv;to8&djqB1aPK6 zS1YKo?OyXtY70U?ckSB}f=9H{!4aa&SqDP*z7u_fZJcNs&rCDzZX3I*CxF!&S9>;D zxHMP=C2bQF9`*6hCFiLrh6VXY1T0o8FH+1Tya4no{a#EN2e9D(9h6>u6+-jA@?fRA zVjcEA*>T|aMy_=}U!Zcpv%C}I8(oC&nOz5OqHo7rfJSX|^X14(?7-Yn9sV@MbP~@|7q0nVn)nun+AgT-!u!K-a!-h6QBIGkTED(;Bxm5 z*yOr zM2)Wei$WF`@gdmFy?0YL#(T}E&T)QznoOLs zk|Pt5xib_W#X4|K3$Hq&!{C&bZt>r&FsZ>S7!fcj_}g6?s-o-akWO5n0PTQ)5z=?q zWz&gizB?S2>#IM?LH1W#{0SVnX&)W~9wQ|H?$J-yqdcJhxR#HI!~^!wN%#fC*43cW zBwu%XP~5icb0gi8qV!us7<>85T@BjWIfeV#*b@~6;5`?EqceU*eLWKww}COO*N7?= zEp3A-kUuWP>9D|J)GVvK-f?RkG706d#vvIdOyCy%|FRBPIJnP^{m)?Jy3LH?FcCJQ|V8wbXLg!{fmk;zxZmw zlf5}5gu(F4t5IuNq^fKSa6>U5$??rKD&n3NNyS*(L?7XGSeak*1pGJMtU`3%*{irb zg$}yjHAKqp9F{{<4CbsulZTJ(g}ufRIgX89W!+@h!w8DhFm}6G&jFIMw#*^^zt#x2 zK?@Vmpyh7b?;-x6s?JssUU-D{9i z1|jDLFrA0O5;34g$i-|g!igyb`b<2vO8p}ibFvHWzbS(~8?1WZUxAXyL;WBi!J`|n z=c)X%+yOa$c@zS`jY-p{e5n=sdL7Xu_={`rL={=b;Dl`thszx_HyZob&^ZSpzsdKqYRpc*8PiM5ae6x3rwu=QXjX97-i9X7TGt9JJ?k6T1!Odp9MZUtCGh?2T9} zv`d9|8rNPCmeFal_bZrK%>_UDT?}=CK5~K*3F`U4?nOJYCrt#b^%^_260JHF=(~sF&r&<{ZIW0D=~QJR;}uIZ zMULJ&g?)3=f3Fi+H*YkFeDR;@*)Z;nDgCnznP2SFSI9Y>oH%1(1EfehzQ(k`Lb2Tm_xVjC(^GgWhSLzUzFwc$KFGEjB(`?b zWek>Fd%xt{%~~MC){(I=n~_wcS5I;DGpti9gZy?pKx%y5uwvZNZ+DA(z1ljDbUPoA zAS{dx)o%CL!ZG3c+S-}DmB`0U@*h#kqE~p4Zk23W9RC3#0NN*%l;Is1c2mZkCKlUB zn}2nU{?M%MOt?^xSJ5r!cHusoGc&vu=DEFs;)?xU)c|Jnm#gE$B;-s;WqpEl1W&z? zAP9iZ!J^BiU9SkJ*c3p$bl7Q-^Z)CA|84ca+^ow|PAKcDIL+m_;7TJ$@QE8>$GI~Q z&fQZIa#SmGt#(iCL`BU1dR&$lkRRRyg+XHm-(Ldr9D~I1!f#uAw5}F)tv+*QLD4n- z?{6ALV@gRnv5HsNJk#guv7P2a8Yu9K-d}m^mNJsbOA8OYe^EOBTE|s*M>t^%*wfqHqd`_4^^5h(%Qt7pOp zp+X{=4S_v}NFu}7cw3Lxy6O6r7`FMx?@g88-f&1yDVO7Uv?I)loc}#!`@;PhAo@4N zN6&ujTQT2<$}WC;;Lm@$<2%~XO{fA*nL+(n9;o$l!KjTJeAiy^xCt5cX)+W08ypQsbl8^slYBxwgd{&H1D9yn{ zRitL)+&%fYyoHC}^NVLj+g_+Tku9T8nrC<;iY7GI2Q~+eRl%d`4VcaDQ3&ldw3V(kHX{&~O zfvTcT105z{*)f<;H0o!skVi>g)v7pEMkc^t$F>8khgI zt?8p)6~_6l$KcwFe&_9jh&)FQhCcR{fiVPohLIz5(!XF1qq3JwEox6RT7z_XJ z;r{b7*(XRie7J7EJ6^pdEli|9=jeF@|~ zHH=0!C&EH3u?jU4R>xeSA^#!l^0&dX$fMcc|Bzc5bC(X7^dI;DT5F4PZi{L&q;!}` zFL(FH{5|*IP6Yor!sloO5HtdmL%5RxRNkrT0ZrH?NFGt1odB-mzd4FO0_V|{;dFAx zJL3Or_{kf|SYri-spO?;2uK=5z?XkF%2_g`LfWsG^%cc`x5v-b`+}$u(B)f!H-xXj zSdr}Se_0b(>fzUtN~CBW4BD~4{{Y$qK$Kp%ho$%o{tY~pbN{8S%0m>{||YYlei?dNSa0%KC$5UuZo_BJH0|{A=a@3~%qLSv!shL&oJi_PkL}by@Z+Ct0lb^YWzDBTI{CUT`P-J>Ep;^| z(`6tb6#VeN>?M%v#*~__%bq6dsB8UgMx+_XhyoTf2mh|>Rn6N~tJ7Hh@2d?Qk1Zdp ziyUXW7+wF`NdQ-YBv=3Twtmo#zKO~|GN*qnG19|?1B9P&2^cY+u+OMJc45WBQMkFL z!NR97EiEnFN6*r037hP^f7OemTT6Sb{MrkEHuQhmsFwcEJ2EE>6{l_Tm`i~WY**F$Ul+% z$_^0o@&Z)yH)7eyOPg@m6<|JO$m-mG&FCAXca=)qWGwA^LN9ykMraPVLn+jap}5#S z@lE4AemTRtSC<}u_FN{|W17)jreDx{2~rw-lf&y_RQ;45J5b-JD&!I1zl@wPWi#~g ztFSA>Kq?&kUsnYfQrxS;vTW~fA0I=Pq#H(4 zbE%K_Ot4q$#6-B+WZLrqR~VAFbofRt(5JRbpXA*0J@Sn3Un#*W!$&|)pqlTRyf7Il zTY}lG>@gxHi#dhQVdfO>sJEq4R~DuqzSCTPA#Afv&_(im3r=G`;Ad)2jj;nr_N~`S z0SLuRMY~C8$5$@@pSkcbc=vDZoZp56!mRnc_6TRk4_*6S%A5AB zjt&^>F`hGt$eR|*JK4muWh(^T*N`APy{-s~$Mp%a@x)af9UWE?YLh+B`AUut(|wN_ ztjXI*0xoINX#drY30?!Y3S8AMcsF#s(hf7wf8vKB46{;YXfj^{&%P&QwJ53*QGd>e zKD$s?&)_sgi~EJ&6z#YbY69%c9f*yPEO5_@*9=h6o??>dx6h#bO~w5$n`#NdmjXu@ z6hz2B3HG+rA+FeMsCwLC)6q*nsVI(ig%Q?{WR28Zw%iQKnt#%cc_lgQN6Us0?&spp@%yi+iCCy^GP6Dy(+Q(0aBx@o3~2j4pzPzA5plxy>PE z*If?_A3||en+3*)$atY2i-KT_NgpTYpyP1*s^93vzfAMNB?C^_ zcfIk$Gb)`m+IAV?u-WfPJpe*3B~nRzy#B-#(jsoPrtU8C*HF>#o;qWyW-m&+dP)cB+4w>PdIFQLm7!w$J41eqOP*<+*qL$Mj+^$lbr0bAt zvuv0-eJ~C@=zo^jOEh&~I2f@j31ug9uxKtet4lM6uE})mJx}L`FT0TRXA3 ztA~%rnI}C3)kTkI_B;GQo${Nb{__TAsc-#tJ|6nY8l)V{tf?oh#UY$N`i*f}ud({+ zkz2UU_zM6hx@S<`N-ja4idjj^scPxyX7v~Tb@vqVDvnjQxAwf_7#jsluLGv_CGG+D zGf-|N+g_ZUCthQn?QZE?AQsH7uY->doJPIN4tPpbpy!^AmqH!U@atwGnCG?oIpTU@ zw9;6pY)e;g`}*4AxOFOehjC@`NZY4K?&6bWx=BZz4<7{snF);NUb`_EK3=L)>v4b( zOz#bIv3H2MtRw=aq{E4CWvsSAzanD8s-we6O&tf;S+&xkV30ayRW}{VkIp1QdD*xN z$~N|UmX6p!Tql4OlI72t;BFl?(`*AiFi>$K;yR+R1lzKM%ZJpWTF~Gsy&2zrz6S4V zj=LG}cxgw17Zci10qV4jx-8=;vR?@}Prsb!de)H6X`N<&`Eez6CtYyM?Mob{V5u?2 z+OCObV{eL_pS`CXWK!M_%6cYqs(tI3djNBCvA+Kh*hV4m*tF6T$Guw92sfTRYbv3o zx7WMoj6;t*%K;t|u=61lc3ZM8qp8Ezz-^E62v;{^Vh~qvS7pJo{*Of;?j8<%D;pVy z(t$B1tCgydsN%*=fij6;LtHkklHNG$FdRXH49Oplbk=YMto$asIzb_1tgM~Z_Y-F3 zBknF0^)koQ`L@(F*JRtu!BeLl5wsJ@%=VNz^>yA}T(b4LF6LX7#{<~LV_ZW#hFtR7 zrHD>m3Z~ELoQ&I4gQY|#*Ohdhk`rR)r>}ZbioQHpiph9j>LzF)D96>j6AsmZA+D|i z5cBAYkWAj=g=2k1Jkrdi50s>jquLYSJbad!|Ln%~YhOH4Rkp0>Jm^Gt6!v`@nU>o4}{1Z zRwK-MEg$DE)a?YcZlmNnu=aHil*{<6c7w z?}4*0tS9gFJFQIl(7MvXDVa&HZpxX5wGaK%ZYg|_Eq=Kckk<$Mve&yRae|@NJ+^)> zk=Amrw%*8kC0cekcpJoD?o#@Lf#ejgXASC`$_QN|0q$UFIp4==G#7Z5!x&I1n17+a z0`((x^Y#6p-axdQ9r|oa%R$a65CP2}^W1Ek*8Ji2!^r7l z1zCfX&Debs_W=LR4NHjifCW2Z>A59fBMv22ViD~6yxL;EU)QoObemmcTA8cZ8)|nv$QVx`lys{C`TU zS_*-Nd&E=6=d_xC5H*%7)-D}K{4u?9va)zpOV~Se|D=O``$<%KJfKF@mj8nYJ?M{H zc7onkwCtZBzkf8H7`|^h+Igg${Vo?bYw_LqjO?~t!G%OzPhHY@N{>MP0Z$kwhx?hz zq2~P$+#sy7r|%z#-IU}D`I6F@yID#V7cQH>xWF?sGWqTvb420%9}Y5#_nn1u_(C&`E~uw}9u_i&EAC{nc8f(< zS2PvaZ(51S)jrwb9B;rUFv)#%P{X8;<)z&C(p*|bwx2-ZAa!0w$2(eZ??auc^os&n z{XnT%Nw!5ivt+KKxB~JY5$Gg(zRsmGirCY0JJc_b*MFmpx_SM3j zX`VguZl1Ae^~G38)yrp ztq?Nb=ZUj}sQ78SOo8vB&#ud=A8{@Bb5FLZa&KYXYHFt7p_CfOBGij4BR$iO>An@0 zpSTZy(pJ(y%KUq=2uCuuh)4E_seU-%RYhgUV=a2(28qEK?qs!Yd8i61ZC4i0OW# z5+>d2-rLn(T_t~hXOrKPr>KQNVK=`kjPo+kvRi&b%0oED_6MhN=a(ECx`q@8_tdlf zw|N>`O~gTOo)WKO8>?e^P%63`d$cG~V+01?AD)2D+D* zWSry=FQEcwtQcKAlCe3TXR7hpcvVInMLv#=Q>TmX=q>zGbNM>p|z6tmd+7xyBeN(n{*nFKI`A=CkY7)-mSXxKYEXTQV z-te<8_vpv1oC5y9cg`1T8MDI^R-VczDsvlv!o`VQjGWD?3ou&*ezF-W^7 zJ#wa8P?&sgU$b3&Zv4A)QbdIWh7^Nst(6+PS7wQmC>}avR~R$_+tLc$&)D3=_ol$_ ztzVD;6%k!u-?vovJpx5XdZA{#ee@6b>_)~;l~Sd!k}cDVPr+*wD<4| zw|bk?^)Og_U5aG^BarQmrSA(r{LEp#moHrpk8(YjH2j9cFm7la_NfcmpP7ED;#qz2 z?XJa%m(c^U1@}}-BQ@~_ccaR^GJf_pjh~9=U-;vw5-z+SOo`H&?zv7Znq>xga$$(# zgwLtXd_A?R-GAjdjEz}QQF zIgy{;B=-BMw~3Mn3~;%ys_kiv>0V~uqWN@7sm{HQe!i3Trj;B;{|`B5S_qZsK|d$*2xYpfr$GGsZH12wAIb$TH{Wko~LYg z-n`;!s6)$iQaSD`j@H8dq?`4*To(t+vw~k((dTT-k;PrlpJmJa5aB zm~DDyOyPg1lTYh8=^6P0I_qnpnow{PG)2$6Sj`9)dMa;*Z@?b&Z9bYyAx>_CY-V14 zIiYvz$Or#9y=}vNBkk06%rH{7?=14k`pxs?Z6_#z9 zQ}NW%;zf|HSG=;OB2%YnDbM&%Tx(+`%6;xLYFb3*j5Q@OwwE?F$-L47{|dvO9aJl` ze{{EDOa$AqkZX;L>8aYi4rifP?iPQAeLsuSKih}dx;Ey5dnQ+!{*v-wjb%EnECWwW}HK4!`^c#|JH-~CwSeQAWnx1rILx~#XB5vEuRMpYu; z##UrG#fnrPRnn!o(J6|Bo_zMyk64rO%o}Vn3_tMA4whkgA~096?Ctq}#jcR+vi`EkqCEqf|ud{GC_SAz-7TxB4kcOkKzv= zbT-;FhjpTYJs2d1F^{k-PleiFXoLH(Y0?v(8XP1VFvi?q%!=(>K9bN=71lxXLBk@r z<4-Hi5RKnJ9gF2-4Z?eWs1X-zaJXQ8s&OTBGfjQ0Hkv-cB* zK{Syb>BF@n7W+>hRD?~W;@;go!K-!{sbsUWM1U=A&w1E7Bo7u`YD~H6z|&Y z`CflMcYD6d1l8%XC%)2m80d0ly_=d< zVHRbW2yP^I(62V@^N=RDG(V$_Q{Nl5P>R@ktFG0(x2bDp%r7!0xq{Fb*t{@Lfg?^@k2r7)%*gw zj$9J*J~g;FSFN)!%XYW=<<5+lYUntX5E13OGxea?Rgry6Y25>PHCOlVtAEdP44}hS zdvrs8`y?j)>YKs#^dR%z+gHMdMt90p&MV9G)Zn0H$OJ)pO$jNO+u%XlOKDu&{y~O} zFV&;3``R)|DOUQj*$Uc*U}Hh=?m$1kSW@rLbvh0`4F?VF5}OMf%6qPNvMCF69dI`K z@Z#y{G8JRo<$RjaihWaW_DA8o#H~rJVX^w;e}7B>?cYg zd=Way8rICuo;_cHed+E#PQAgX*CVmwn>F|y?lq|3`^d%q!D`lv8c#3Jl*fdDTAdwj zZ1pQK<^70v0V1u-u z5-*tLG*-s$P(7PVJ9+FSJ2M7*g5(2DZEKI{?o+YS5$Rh@_axrGJL20rXpfzU9y|4V zIEz-TDlq%p<<=#=dywZX1N~Bcun&e4M=Y()ib{fkNAD=@#g7L0%_01E^XoNZAa^?{ z2~vxcUd~H8J={JqWh^oBH?$;iPt-5o0-Ar%^jR3)M0P(0m0+I^`3o>hgyMY84DRfE zRGaW_@+y=2z0&CN0^o%k+%BdzycDL#6RG^(bvUhwT5O=^Omx{+_SH#*G@=Nn=}$8>6v}#&+YR+1NH3 z+iGlVY;J73zs=|SJbyqoulL-Ux#ygjxpNmPu*YSaKpDHolx`Sb$lLGPcEvkAx+=mu zkJAE0&la3;KIK`n>PUfFR4LL&-`Mw+hVj{qLIAv)(wANirO_@-OA z-9e4`iK(KOrP1x+E$P+#ox8QO1CyG!fNP3HsI5vrZXgKfcKWE@e1{HHPCAVefD0E_ zQ1gr_czR&SHn!swaFU%#tKL}$uecOlf6nR9pS#W!B`wUf|f z?A>+g-u!?F`kG>Y6X+xl{3%62QurpVQhkJ8B${7`!oq zm=ISd$YPKczG4={6?DUT4y&TNCa}_CK1ad7^bg4r`SJh^U+?$JjIJ6M` ztatwE6;b`PL$UeNg{7p+#vfw`I-Kf}dqGgj+EZLyj8w)!;lz39K)(;Z=A?nbw?I2i z|B8}>Q-{4M3?cJU|+Q)p{*;LOIqUsuj@K-Ht)ab<9^prB2ecwUO>`hiiPrX ze1Vci;_FCT(E-OO&fDR$TwQu&ng<0#&On^vBUgko`_ZKIuwtUVfKGk!2!CVhK*){U zZ1;f8Msm@B*u^p&F>nZa|k3%mo0Z*H~+HA5Ej zc0=iBpA5D}x_Nl25{(S$H6(Okqs;3`8^WGdw(?lUKL&KIf(tm5&8Tq%ZVaklFO)8P zrtuUE#7y*N(l%?8hX8b?~a7-tk#dW`8rW}uOeUErK| zi?(A`WDrv7PFjj2RW5DaFeN%i3#v&8_%-vz`8k)3>8<0g)Zz`cvd9{+rUD!r|7`RfcM$p@Bi+{HeL=d#end6tajK-1? z-3?ZV`x5`4d4#f0I;iwmby2}p#7f+GQARtSY1^k`QgQRCo;~Lqn|`nH{v*vi;hpJc zCMH>Y#ycOap@dWQ&wPY82p%IXa-9S}U!|;fETVQo1%b3U1+2&9){bzjirm%&|E^|j z9{b_iwhSA-CYwfN4rA^7Q7dDgKLf#!p*O7$jyP1RJ)n=&iK!#a)oHgy8nU&?U2cg5 zTw1Ceib0~JinLr-r6n6prmxa}*YVUf9`hH{J-xPu;<`zv4|QL09c zp-#0wyZJ?9ScakZ4u;*HCW3C_CV>n_S8(il>qqkdO!#hN`aQ4F(^q&(7~}UQKg_IU z6-Gn!JKqJ{Tc!6S@4`jTY+Nh4g#6g~Xya{MgRE{Yhc3yie_!K)tgi*%4nmLzmxjLK zH^a7KX+wzT8^OB%HXk9Oj|dEh!;hCZ+HQYmsS8yc`qj)y z0P2P1wm#dFSpD<$8ndjEy-r;Ib_MNXZ$NGw_`hOd~fAF#|StYsTY z!6!-}l{wk9P%`mlu18?^s8HYiHCqv{3ftcFi3cCjiA zYn!CS?HUK;i?ScBrW1Ko-3fP<;+v$;aD7`T6L?);lE_i+yr7vX4hI<=ydlG#-;wQi zPss`!{7i=R_Y5I)5QSZ0!|Et~4qHph2D;sWg!R*TsBCjX;M^Mg>sDB^nnp%IC0KGoOH7IQ?E~3Eu27}l<2KFN z;iQc-o;Fk$WyquG5}|UI=CTLdT`KEZ?(#chEg}GE3MzZuD8Ej!Q1Z{@8cvc&0?wG| z1ve+IB-|f2M;@d@CzDVbE)cXlmHAOkokUvmg!Z6o9`Qj8AqWv?-==Ztu9d=L|InDgM7(?;ipURJeAy% znCuI~v3ogxb^7xoHnLpxw}IReknQncW3cyEH7)A5SC5kQdZm#X!ae&XJh9 z3c#wAVQRI^mV9Y6D=O%$`O>Xcw)z+rsyNB{TY?~w*lJqB_T7e}U36$(prX=_w5Gw* zipHQC&Q}Vdn<;*y3UaqQ8LDay`9g%=6woF^!|l>&P2T7@TA+Kll$y}8;t+?#XFc;H z0=sXXs)nnzAlx_i>A*IYpJFGnAEd)$BtP!KD;$ed&0{VwL6DKle_n z*BsO~2Vc|FGL^yNNb|gvwm4q}>oi2yxW1epYh*;Sptv_A(cf&^Y=cQ{9M1n)IWmU* zFp?e|vb{CZIk_>@w!IH5R{dwfo6EB{)o?AhsOq;Md7qF?0pS!RCV~ZCxZ(fBd`Yym;Uf`uE^3 zR>!}`vp3AG5k`1Lonu4C%*!S1%GpmhWjeMe@!K-y$RR1Ex~>s*&@IPgDY#OT2T0Ar z^-cDh`2pyrdolK!?64nEfQ~>gaAyZ0XTOc(&*a}HrMwlL6j|FFZS?S8zWF}GxMro? zaCCd{gDAeY)v;66g4fhdi4GQbmzRb?0SZaDJ$SNR?<`O?ETUP77x)&6GEa269cl0m zoi15KW60dVn})O^+0iyvR5cA;U;;d`gy47;{=uLXM{x0x8C+5)E8 zQ=rr?DHbnM-@Qc;(ZZO;+wvx*rHZXK`B@u{6rMTK?}{VDyOA zQ7w;j>eA`yQTCx~EF=;Flg{WZ=eI0ONDuL} z$pIPi$nO}l{nRs46(kA_CgL>2y#M+~>sMS4K+j0rY8(8{`+uBDEWci>B^@8rXoLQs zJbv#Rns=T2OQN0h1GoEcIi`2Xgy+7|??%%vomKYjOv9^H1SnSVf9N||RW>Pv59s}_ zM;;u=N5|sADCm!WhTH%El4h4Ty{>{0Y1psXc7?6g`H$ae0I~yud)p0T%)lrcqGlG9 zog4xz)cBbB-~ru$e zhqlA&oj+tj3i;(S+%qr3Ck~voMP8dI>2s1s@eY+YsSOA@}GM1|!_)Zx<7 z$ME!l_Dswf@qCCqE2}4S(?cC8yCSeV6vX;WNh5+qV>B@=RRuqH{&>kB zPhHvn5&Gx;Z*1uCuX(@FUO2Zm(Iu9m>CF=tmRc4sNyv{-4<`Dn4&4leJCF-6H81nb zXbEzahVIe2&!`OcJY*K|3hSZ9NK_b&(W)iWy|BOe(1~~*Xia4EV-a;Ct2U{D+hH;H-*-ATlOi*Qr z7i5~M@Y?eWXjc`FjlTA6zD3!Kdn}I`%qY?D$_$T%i(I?!$?R94c&@5ZABbwId^Hfr zQgEWo=IuOKO*yn7?5USsmAJU0()yw96LE0eFzfs*5L1)6SL(YR2d@@pGD{W)dh}=0 zZSGrO8iiLy)qxzCHz+MuNKhJZhjCkS`#~kfCHb@0Py+WL6$l>04G>{9o%&Ts`2{94 zx(8e5r@G)vb8|KE^Ynlx>t4?gAeW<+?gDf?pqZXHWa0F?2x3GM>7As51E6clKL6LS!|sCJu=&L z%}qPUl-xd#E%6#Qz)G@5Qv12JJ1_Q&^Wov(7T3gMV$+m(u9&^fzbkM2eK(4M+^mxu z=Cu?eJhp^+0oBq!87Ijb3K{ep`>VxMH=J-IQ#aR6XRiV=Y`@2Fa@=%}YznKvC&)@{4rd(6O)zO&Ba88r5T`SH>^MUJec;t$6wxjI!t^(uW6d226NtO(HS*yQhp$K zfG{6K%uz$<^@#!6`4WM)c7%qsNOtC{;?b9pK6Xkxx4?<9_;rcxV)&rojIRu-j1KFu zXID#BID_YwJwXP3(=|FO$IM`##zqKuVdCB7ePU)g<%#F94wmq#4tXd4p}Pctg=#%d z=}A8@zOS**px^O8M`AB2BwF!YhP${Q`l=7s4McNMVa{LfH$P#?a;gq>O(pH-F*KSm zF2YrRV|#-4$dPVKS!iQ24~bDQ|2|aY+C?n%m6vVI{ewFy!TfAk-a;ZmVK$Iqhe(K1 zZwd-oU_SzRLX;4#Xge!-Wx!dHJihMzd(9_gWP`U1X(Fuwz#C3??YwX=$c5cbJc&Id%V@G~ zfJO_C3!(yCFvX92$*5Ha?+q`e?HswU#P)>X!J7 z1hxAc0z6G|N@$L5Fv1Ek)W|DwE+N@-SA7MbfF#(zd(N&3>{7_l++F3{T)59G(D2N& z`1uxF>;NunBSJxwUA{Fw@O8#WF)VSNud!1!{e-!OGZvug-=g-pe2NY2Qh81kVYOiY z3wkD4Q0HFrILK@W$t6`n!zG5|UazZaJ8$DJJ8Rx-dv6@^DYV)coR+f62?i*9Z_pD* zR6kPbdi+}6F>~w8Yg>FL!Ib@0+y1$)@y!*tPV3T6_je9iH1#)$vcMh=a6F{)-t|^e4|2!FeR$9{&d=-U({|FpL$9u0$^UHxlUuNhH32 z_()+Y`2N>q0C_{&I@`Wy8$lgw;?kOEv&>B>@-o3$$ne0&K_H2dF4zFTZ=SO*0Q3N3 zg9e%?MYk;!TfX8^HJ<~?2%wuvGV~X!Enh_oj?7$~4{f|u9bp3kd-BP8w8LTJS_>=R zCfgNcJtNgOVc4u!4OXrYemWV*=cDHJRd>FB=GQm%H84X|2D>+lE;v@*eeX88H$mSl ziE-8P1EQ8YzYfDHX)2;z$$+1|SxqfGzJCfHMNxTnNG6WlC}be$>;JU+R~dl8X-B1m zmMQ{sOkiN6!_ZR@7c92vIzxsTE@lNJgfH~$d^PP~=u;dR>tKaTrfMghhJyk$ zG@u43h23zL#EeAmVWhFath|ZbwDQT-Pb%8@oUnKCd;7A6%V}xQP3RfJ7*`a1kMA8! z@D+K-h-76wEo!GO7DciQuW5POZ^!8=`oaiK4#@-o8TR@bwwgVks=*&1$Kfo`l^gcC) zBSDQLoR4d`Pp@fQj8ey-I)@Q7+dde0!t)`OQGq2l8Yr$q9}C8ysiAy0@}E~&^lH9>u19|os372cJ1V(VaymI^e#86p&+T%xU7Y{SEu4vFLUyCajQ8r zbi-5Dg>*;DUNn+?xdQ#W@7|>4p2!A#jHoeQ1j%AT$VRyJ7i}*Apwg;2WpI)M4{twg zH2)p4`$=mE5=tc)PGvOvD)IeCXYZb9wPHr{V`=Lo0SO-E+^1~HAodCRFW_K_3=*j%AUE9Ie7vT!^Flvtd0#Wc{W6Yn9&*)|MxROp^!dAH6dL~|6mX9? zssXk~ESmJ741pJbRd;AwPHiAV0p_)fs&ZLten?PeRzpOBqs>5+`^5rM*gH$1LAD$T z%9#R9wrCpIi1HUl$vGuU67e?PA)c8~*PV{7sOy$%=!@YvQ`JYX3&Y^Hj~S0o=55Q6 zF=-UG_D)z~O0$S<3)^yW2>8ecbE>HSGsN zTj*GTRd$Sm0*OEFj**`$-m`c7C9N$TJTgImCOvCYuU{<5OVCgJ`o3i6FDE&9L!;2Y zqQ}HHjr87np6i;@8BF%wH$MdOO^1-O~_X(lz;_j5vy*wgD3p^~pT zShiyQUL|{bWiekbK4b7`4FH-lJZ}%{SpOG*(2(5eEne_%xqPc_3P%GuKVgg&Wcll! z;wt3fJ*UEJif9TNr|=|2dHr!mVO0K8Wes6`_Ld!bX|>t~bZL^oSzn}DL!BS z9@9xsLHqf!Gb-e2^?`RbT+hl`)Q}Z@I(0WdrOR!yPly_)9>{t?B~Vy!y}#E2I-13n z)UdI*7ko%Gkt{xYPPvcfhB_g!^s5A96R1^#mEPh&yChn@u0-a`x=5WP3WR3JFcYWw z=4v-V{68yiCiMDK`S4a|e@}A3vDza}vGvTb{^}(r#^}Ma=!%K*%}N(sdq6Fw_{>0K z9`_h6L%t8MvIAKl78?sWJ_}t8=UAM2Y+_adn>IsE&ISq<~7mV3~gu4#`#|_|5MlST-)x} z3Jb%pX{vFk1U0vlqPAo8*6Rv@7KQ$(M?2Spm5nbj)P!IpP&HMNru}P?kxS4p&9woh z7<_!cg0Uu;{}s!#rc6Jl(imso;*t9ftQp_Pr*Rh-BqXXcp+c2zC9!T?6TE}|bj-2+ zhM#*N8J}uI3Tj-qqAVEVrWAue>R1!ph*0&b((h9Td#l8^U$u?aRQq8bJ%x81CW=z2 zOe#N2#;BW*lz-rEwG71RmYyaFbLjAGtvg3NYX3 zzJ7tl>UXgFiOak|{Ao`C@rPAw{ttX1m*&b&W<~s{nw22dQEIJB#m~e=m47-LvgB@v z9DJq}p7yXbz&|2K3YCR=@%Ur#H&%Im)*-5kl%r~hh&QoZLXhVb&Q0hY%07zFJ}u!8Ywe~;a*<$Brb$5fV5RwvdZmiJ-+S}Z~Ba|O2pl<8xAow09M zF?}XX2%CO`@of2m{r@{jJV+w;;ET{xU6a&z7FeE<31-`qz+d~K6OQR{;j;ujUD90- z(Hs$5+j~+KJ~-f9c_S*4zNq1d?;$}l1veT&zuUI-99XVbu6XGBdnwP^(*rJA2g8zs^%LXf1CCvl6@4^Z zGnj=9SA$M^Z!)LJvD9@|$fA`Z-5UvG9Y|*J=^}=cIt#416O9QHp$lrH;1!!OlahMV zQJnwIhY1Sc;2&-l`F+vR(K{1=jbV}oCjeN72cNpJ2({u1Us1qf%R=>d8@K=IIl36Z zCx<Y1uxmDA^DQv9%!|6M{%!b=`U^UB=KugEAg}Zu;|EW6Y^^~v% zg9#oP4-$EKadk=8zDu`i@!GNCb@|m3ufCmCJX|+cU@gio5qJ%<9=64lw9PcWTjiZ0 z_qJ`ZZ1hfC`C&@;fzTizzkGToJ7W^HKrZu05h?3+N<-gD=e{A#f+`4KFI>*q2k;5k z3GzPwrkZmeXG792qlcHng#y3FBB*vtHsB=B{nk)_c!1Ep^2QyOG6Fi#FNjed;Q8i) z#Z|ZEt$$2E4O3almJ+;eEQU)yI2mwo%k)PWBH|M~B2IV1%F!SG z2Z`<_aT3qf4C`e635^ou)MK3Q72_l3)*LRdgzC8+$QNg)lU!Vw0 zVQeDFg6>U7-fE8jqShGzu|I+l88*QC+&6dUF~>)$2_}wd>@Mqi{~C_XTs1DS6!#$M zJK8n;$!wIm6~%5w2QGnwodap?CJe~vdJWch!wv*C9hedj!tjL(qZ2#>{N_IG3NHpF z%pL>dm(hl+`QLh`jBT+Dzh7wu5}=J`%*1HlQfYtu+t6Pz%*@6GV}n}Wf`hoGKG@tv z@GTh8`WQDHS!BrHNb@;d~`@`rSTlY1Mx7P*Nb#_(jif(hNqjcVBB+??{h3n#mXimy7HM#WrM17>QW z4ey8WS-y#x+dPLnOHUT1!Ofb+TO>Pt5HU0;06B>TaxX_=I;*5Uo&_`{pJ>0|S?l|~ zvapgnJaqcARK>1@n3VDSW3Ns)8OFTQk^&UelNaxTG=S5k zIcnC6j1z%T@0IJ9Mzq0UFEHtwm!^X!n!vVFqeAIHM4AUs6$oS|Rs2ID z_TxL_&!BVmtzd;uxUQWSWh+=I&-$SFg) z%bY}QXksii3L@QMPnLFoN{tw2qV%cKDxWG_hfIJ`zM1vxc`sjYHH%Oi`Fq31J>d+a zX}P$&iM0j3}gk*83k%wN8_#A+@o^^MvIGt3kYrfxF2salHayaSO)yq!_ z%v7*bc(>80p8wDM9WW&bd39U1m> zBdVhCL&CTb`Bh(Q|1=;t59Ss8-=ED`4fMETSu?&XeYr+x#tPfXSA|ZjdZc}lLjv!? z8ggp3vyf{d@+_E(C71o+td#J%-5lo#)whk5&$-UZt&~YP#RU-%H`|i+hO_M2dVgv5 zT}5LHhW?oa7@P zJ1mQk^QT8r$-@iH^@ALjUgRJ?hB&jFf=v~DB`ZzK*VfUdTHLk_IQc7?lw1!4$RnnF zpcQ9mvZNoe;z03ehW*Pfd~kbNCCPp6@wQv?{eDIgXP=sDCU8kY0)pb6*{;30?O@~X zW{3a(nbo25>q&AI8*Td2*9=cGHfgdPxyGfa0mcLy+-l4Effl+IOGR9U@7Zg^O)^mb zyQf&7=+!H39b<%UxXm-jYuqjB>s&V0T{Z*h8IdLNA@!WOy_W2RJ0apdDS+{hc8pgM z6gGU&yn%uqN+;yl;Tb%yc ztG{$?Gv4zSX9-gHT}dgFGqEpw^B%tp*Vxv=p_M ze2&^c`Mhq{eOQn3rChzT41xc$T(yLmRFC;hkQU13qOE%;NZI?Ol32~n8WqtnFUJ}N z+q!@9@2uwJ>^>r@>G)(?o|5yTdZ}&5Hy9NLoNTZ*haO^{YRe+7q*`9RN z+SzSG7PzkBjaTRHbv)@e4lj-y>xk>8==o-i`tS^lwn_?!-u=Es)0}mNKc&Yk#?_Gv zVz}fXF67;S+2!|n;Jcj8oCi4A4WBPiV`-Z3_x`bA^2^V$+#K9O^xMb%x1T}%KyV&m zch?hzNqtCt#I~uHuro@MdKHWXUVLel3|P?-Fs(OT^1J-DlvD>B5Fu&E8npy5fSVm7 z1RkQP4AEyU%JcD|)1TStzNnnSYek}eqSUm=BwE>aQSFaPnCRReZ7m6q1NVc=!9fXK zn_TJynY_L%FVD`r0!NVpeDn`%)cEDYc-zy&e(D!)J_^u%!kfZi0OQ5|AQ5D0$*dDg z5)0O^8iQ#{H%8i{0A8rB{?t&o3Egi+{uS)^ z9s@!|7CH%k@qlMU%k5o{4`o58KW(Gw;`){VGX#AO)$8liPNs0oMg4DA)j|utU2f|) zW_QYzuEy9~9tl2DT#^qaQv%&$pN8YGc=sloQ?$wLnaZ!7x(S3=#66=)Dapff zRVKh9*eh;A!uS{33zu@5t#&)INdZc;k+dJYGY8&st$N9nuQxi0g=(uUcIOb9k>$ly zW0)EqvKZQqnalcI+V+Nbx{ozNoJ6MoDAY92kX9b75Wzv(9s9x*XZUa%N__{6Z-@V| zCSrnmTG4-?s47Y5RoAvWXd?4=xzI``n3olH+(3rrWPANpzyq|i-obTdRcc;`z3|kK z0_vBeR(?Kqn)@`kQ=s^iix<^=!9wl+CEEg~fK4iOc!abIq^?8%=bl#^#>C6-^J5W- z@RaF$eR?s`;Xn*m=i4~at0krp>_z$e&1k^nuH7Pcl_N<)Ua@bZc&2dYW^P51MR#3o zD8mEcx)Kz@_AH~nkX7a%-`S}}&lyINHqXW=BGEdK7zqc7dXb!{VFkM8soqi~!e9qr zIsNC}0aP4yN>~zCnUFI*=2xcP>@6e3+v;Ll(IncNz0j864i*WL^!{h4B9 zv7MJgV7gv95Dsb_hOnbZXh)(&raq?RiBME`{8&p8Vc?@8wIA^-hI$Ymdfd)(nGyUb zeB4BCP=1S*xX1(9<2x%_YCE@`C9$A3hi+vKE4FAVuwte(S@$M!VUB50GOZgK>^`jy`TkSk&|p;S&vu} z?|GrpxlKvY83?%3;wamG#{)ZtUsK~(U^vbGBeX%}Jq|lW>j0U|mpjART!KWKJQ8=9 zITEGe`=GA|O*B(e$9hH=E@7b>5Zg8rBxg+u^T6HR=pmI+|1p%sRl9wDbKLOi_o>O7 z26x}9v1g1R6G0~m4}%bMGGiRib2s0SQzygR&E1%Xk;rlUjY+_5ed{ZO{o(blf;l~R zoF{;uAe>F)d8uA>`25ssT*0*OV3BqRjwpE{syDo)_hwf>t0)->$<0PMRv)+Z$(%mY z$h)p>X0F-16=jKc3+U*nExi3vB1!!o(EzKQ=_6Fo!j%Y^;W9NHlEf|J17&%t?maD3 z{U_FrNRTeXR&>ovLeUFqOLnuQF&W&1PWi4`7Vdkm@rCRQ@I}o6eT{p=JFZDx+W6$G z&f>{?rj0Y79>;n;SK<+sQe!xvY7wZ!3l$F&5}nrFrQL_xy5!D3OEeQLx^K2NwXH;t z_H2#U;t_6(rq);7)E8A-cM>`+3L4-)PF{7MWOpA}$WMtbxO4IMgO-Rk&OFdTQ$tg$ zYn1|nS3Af7rbzG%y-Z5jz_$m0wlHtvNELb3yl_TDf$0Vu!Nk9@`5wE_voPD%R=gJO zX1G%`D6Og!rQ;6nxXzW@nAIB&E+*R>g8FWanOrmPR)ww+C{Rfm;R zDTl)~mQ6`@i1kv~Q@8kq;$SDAw7#h!1nEZ-cK6vh#zz=Y!v#dUBE)%Yaq^-tFs5-s zt>rF*b<7i^m74CT{^e^6Bf?EdbEbWyiI~DK9~6}VA^_JI-*7M-TpeqNbzqVhB7|J5 z@2V>CuE~yZ*K-&Tr#E$JUa@p(iy&tlLmyCk_U^n#6DBUy*=5DZsrg6mQ^MLFg^<&< zKNe}naIWqyLjV@;H2|W6*e7Iz2Z#Vaz{JnjD1)(N1!I@^Ta3m%o>o7dwz8p6=jf4| z;7OF#;T~nm%=))jxN4`}` zz~T*fmkj>E^z4}~$cPNIsS@Zvm=XckdSDxj<3MWkt1m6soH-U%Jk>FQ25|zkg)JT! zZsDQ>%k&m3uDiaqFd1N`h!8-!QIeQ}U%Kqe_0_u1mna%Du#k$o-CcUpTMH&P45B}U zpAkB*e$(fVnnkd^>&T!IgfVZmHQPX@X&~i6$WgW=5AFRA=D$sym-%^t`>3P1*MF?L z4RwN{eKiT})o*0gU5Y5}||ze;p}W%Ilo#NE+Yni9mKu z^^Gr%&8f;hta=?itR7%Y>ewgRGn9xzKbUq3E1;|0Xw9xj0E)8j6RoIM-=@x4vszR= zmNJa%9*TM_<%xEhMxu<1ppYhhC3X90uP*2kIl<7O#hYxZ#h75oyK*Tv$f`J(@nC|FFJ1szFg`*-?QZHGC19GC*(tdpjJxwISPeD&B05 zdM^i{Ch_~DF+dI*%8uCR)j5UWUoq-Tz=FaofsDMIdV#FXzPQMZ6g#sxy8QSCd%Uu^ zl?z@{Mt$Hnr4Oh14Pi1hl!H>i5?X`zckUWoS>5L#6%lr@fXpd>qo>kpS?I%#_QraS zdeb^M^5IE){9>;FV>dEc;`dxNb(Z^YGUffx%1MtHKd?=r>jvb1oGftB0M?;0kIP8( zk3g(#&RyJ+kIT9*R8g6YdTU_)CTG!|z$LN(2aHp2IfbaAbf7OEGh<~H{ zxsU{w4LO=`XM15)j2y7)Lu{Jk#4$pvTQrjr;CI{ejN)fH{AS*uZ_)ip)1H}@$o=3( zw9Wp!gb_ODEe~-8v5{zsJ|!N!s5eTVhtc;0%R%rUy_9vH3!J#cxFNO*7B|R0%G}d9 zP*z1}ZIiv{_0-I}-OP{FQMAS3EWgB8*I;%;ugMMk@|z^w1!EHZFl&-7z(qMUL-9&E zlRq;9_`b^ae~&QK+_kEm9_13}lNddo_J9Nx=hpSIR4 z0q9S_n4Hy&G?0TF0&}9ST=Pk!e|6Bhg^-W7R0ol0=wheg;m}8dT{Vi`AjOKIE3vTqA_Pgc?@W?`{jSZWiQ68S*#euVpnJ z%lem%x%SvOMG&p5a#rvB@|vJfx*G&@O|*UHggf^n{rNR=B6nT>)5;u|zI%bqqVkS2 z1?j|TBA_~Pi*MzV@yxulN%-ekM}}Y|b7vE-sxP=LPY&V{nbEXHI1lC-mA86ppj3=J z_IDv>jLywygw}rnQFfW?B&oM>c!#x5$8}&iyGUU5)0zIa+wUY3OmL~vgDJ&&PjSId z_{ikl6>ccu;9DIt=GPpQUsvjQLtnp%5`HRKkGRo|$at6wqB+b2N`HN8fXqq)hrOxK z_nHqSlp9J2`K?w3&?)csevJ6C+_vB#Av{H}GD26X5Ua+vlKO&SwQF9lQ`khtq0bDHQGBL z&A?M<&ke`NB=03zjYO~FvoV`oN+0)J+1Y7PBG2TN&|%g^)pCX3LC%joS^bfoTAJyd z*j@UZHy3%al@I9dF=OT!83axaXHc{kd!`yn9giqbM)sCF!ND3)W)}Bz10BLty40Ol zwPNmXfRu-kVMhO(2@Um(j9Tyd3H4UtqZWYb3yY?S*nrZTh!3Rk7ASi7aHf0>I4WD!-17J(7@ z>@SFdZoGfMpgfx5ytkq#UMQiGM&t!gKX%9d*BW%~961gHQ%qT@zZf6Yg=dLhjkpvM zu1nuvA{({Xn|}M9`cwh)z&83}nU2wMx-?9YOKY_TcX<-JG=U=p$ihIk(~fU2 zXx)ojJ@iBy`&7W@J`w_02Ln$Gsow=UYxYcaBhtT+oO31-V=GzvZz&OOo4{i=h zGN6DCv*}3-$h%!IPYX0yIme=-bj$=cQZh8;qM6-vlX8v=VVBFA)3DMpE+dS~o3GvB zSM9aJxFRizXJw6@c%FQCUFTiqvRUTmI$8I*F6J^fpEU|8HQ*6OWMjtU_;^);bZX6$ zP$#ffN7DXb;;?fRuob+E$x&aE*23T>x4W=9hK-dRm+k^=DE9)!K<9PJZab~3Poem# zbHuXhM2cQfZ%mMhS1E~0t&CXw)6v`Wwf9vZBDU^X5e2u=8%(NaNY`|YIqy(27g9#$ z)a|U;qWu03k;!2|njjiyLeGwGwEmVz z{IfrB~Um1WF`IG(gR-iD&eHOIs5rY`&YsJYW>O!Y`5|dt7-S`rE0m2-D_*f z+g-Z7y0EshQ{$QXguJ=DF4S1$uS$Af*r6?BcZ1@{RhtD6R>d6GcldrnI}9zHWmEpb zL-xGxnfbixyu2xo?%KJwaavAOJX6nSyQ$jx!UL`yA?ug3H|dzH>?UVL3O60yMWztqK5xqOIRd*%iLq5{LpVF~wS#Zy6O55K$q?mygMZI()kYvEU&HMjx8>AoR9VoQb7dEjoXx2H)Z$Z5ZisjbxJ{Vk)J zVZwOvl9WG;)o*M%>KAUWW8p4~U$r*Yjg3NyK-SxN|NVTivX^j|n|QMotH>CLgMe#E zFryOI!J56jL?+>HdW~g&!J}*t1OaNpzA*#p&L=i-mkX9mURhle#>nnhiWTJ>DuXi$ z(l`FTx=#zh*FieSGfKSm(#FbRRWw|ffU)XVW!G(C|ALi$BONuV^EaS%m{ihw*cQ(2 zCS3Oktjn)*_*;m75^%`C3F^S)r$E0Mfu-9jL&x;6TJcGYTq-Iyou_<)?3lRw^qh6R zatK^IcTeDbJrH2Cf976Q-u1ptvbfXjb#bs@1hx2jiqlAz@kk`;f27f^K{TCDQQZQP z5V5JhT(<0K5&uMx(ZsVL>y@HWrm{*)s#JYcsd#yyko}NgPQ&FXR4r!15%nHzJbPQ2 zLdEnWgOyr%6nW*WzVYUnMI7}SC^30!zIaC(Z6ftJJ~RPWAiBa%k~Wwpr(-ZwT0f(X zJyWkQwEZcrS3Bsd5PcBB6b$rb>}1a6eze@p+GCas`)bt%paKR7N)J7jDZ%dK`y7=c zsYz4nnEg!xMx2UrSFozjoX^M@CZ+$MB81e1_O9IdbAE$0uTbB4nAyg?&fC?0>Uz(W z=I(EFVW%3l^`O0T1^#;z9Ci0Ktm8B{;D(Ir;>PRE@k5MotKG)Kzmt_?5(U1hXhq*& zd6O*%yEgVwzCIz0n&3$ZG-u*Pp*NM1bJ7L*L9-W9EMc3P+^XV%20y7ewLz42m(BhzlwT+~)&+r);OTt=o_*nq2Vq%O^< zk_OyXa?u*}$qL$w@=e)anCkvE?8?&ob>P|bdpVnb=z8cphGq8nn+dgdU&0%KB>F*l zoZ$DKe=2Q%xT7wj7*-=a4O4nkHCw?xMTzNIY}7!t?+TM*e&$7s4Q7<)YjpeKkJGjR zLmAvK$9p{fGo`bx8`bCJ`xgy89lgh-F`MVz`?tObE_444z{BF&7hy(=lxs?Ydgj+r z?&Fj1zuujq?ox=x&VLS3twTF2o|25mp>Ihp_xfR;7J_UL(JtR(C)|%o>XKP-to;BS z6YMA;p5Fe?=0Yi}2fjn6>{wE^ug?kDN<{vT8}l+2)mQfzOcv%bDi;!P83M?R4_Kmu z0&*QKN1~*z@cZT&1&aJf!N;m3!p(ep!{FIquQvh#dCf=du7!gMU%++b2c2Q~3^h?e zK1gG|`pR*i&N;g?$BCe8niCtH9?W$&8)_kRlQuuEVW1~?6`@$c)PBnL(4Iu^TTJkk zsuH}w4?cIZaQ73@ESN_P=wd-(8f9J!u$db)o z&2ebC4OuNZ_?R#(QXQnpjzN7rk2~Re<^qaopPKwh_~}gWeyN~DREG!P?H&30F;)N} zSX>Jb4%Y{FTcX@_K>a{`^yKNrZDsM~DZxu(DgH^VmfU4Gt$?)6qBGPyW-ZznGRI({ zmcG+o{os2MX3l9WtLbK4PoFZyKOXLpiH8}PAq3-*>)1bLpq=G7g0mL>=5_*?;1fJf zs=@OIHhD`O0Lvqkvr~)}F#R$^a!5kVMk2;_{>m=Ace7)ID##W@JAr+yq%}zk+wI`y z4(<9P5x!D1F+%}vhYpSURV}8+$c?m}Uliv1BUDw`=Jq+{n~W%l+b!$-GKtVUEZ;=| z$tC+dsvIie5kLF6$@c@i$pQLC1pz1kr-4-SU~0+Dm$H`6Fw+*jlJ{^vzI@>{l-B{M zKDpYRUX9J>OhH`Y?6(iWTT1E5XsKW=hZ*LqdHutk)DRmpc9hb;RO}>9^zCqJcgg2A z-2mTxgm7NZfU*bVAsXE(aFEMmU>DiGHVvfmWZe~0UcvGZ#|VFx3F7^a1+ zz&Vwq#9U)OPwLIe_5A3<9hr&eq|c7U^X_kT^9C@RX60%o%ywu)t6G7U(uoUI{lh_cwy*c{kNFoBL9MQ=)zmq>W83K;ti%g0ed3@HJI1zdp z{#9qaQ2vwsx(LitdpIk`lg~@*_lMu1;LNs*NeV-`Gu?-{^ih zaVAaxR(9HUfl*AIChEk)Rf_dP4ZV#{$#TFj{iz^rS2#hsOVN%IS?&wUbFhu}>`Ykq zHe;#v>{Lms=0QOY<2L6}07b*G;`D!AeGITrR_mq}c>Ylvl6u^dt3{=rYn}?2LU6<$ zxdv5!C@aAAUB9URdAp&|t_S#b`Z`Tsi_&y0V?R5Gsop*f-!HbXP=$=$cyBRV!R%+l z`hPTibyO4n`@MvK(xTD~Ma80H^x%zxii8D{1L>5Iju8SPEz&WNlALriLKxjWw$U|U zc=DW9qw^vv9(xY@wb4hySq(tVG&}38dNZn zA^sBaw94byp~3t^o?9FKC#C3%>-_!%c0`NYU0A7)TZceZu=9g7VWR@L~c=~#0@ zua8VDqe}7H)jzDt>Q(Yc_Wdcwk$we-djaLVQo&6N+p#jyAm=>XOt|ZBo5ABwC-ow- zoa!ZAcD#%9L~){m^ZR>ck$nbo{Jrq}f-cu}Vx`&)=?As*M}|I^%wP;(&886KZ0 zcRPFtUix`p^W;s}sz!Em)RL6cb%=p}yuOb0RY^O}^`3Dls)eKJ{qr0R}Ln;=_?bOWo4{BX` zPmlU>?tt$CH{u*mtQ9P~YjVLxBqAVwCx-n~2lR;i>!7}~e_eX9%-~cG;_!4+kd$5i zAu=U$@75sKa&EHIkJ?D%69IfcL}fTmkXHv=&&qY#No(DAZ_hjUnhXO#NyOk%hHPs* z8ybntJX4`vfek5kGCm;idVRdIvieq?OSNZa(U7}pkWMk(7ikME5*F*+cPRJw>ouz8 zXo1y`%`@h}@SJlp9NO(hIi0;qSUg)5@hbRnd&OKCvT0U0q%V@_l^%UN1aA~}7Vxzr zP7`&;bTr%N|Ca#`mgtnW&Kj;6&$L%84s)2>5Qq>Do1K}fX9boI7!psK38tr9sOjuCs>{;t_E?|?!yKl6hHo;-x zx%|3M_7%w7`KT1wp{jU!A|&YSMbl7KW<8bV2!7#Q8QW>}!OtA-D%E7$=5(Ff(8^4cKpUfnGe_ z{`LgBcwNs9aF;URbH}cOUPDCzBBh(&rvah@Qa#jL515GkI(Jf203Dq_%Gv)gs`{+? zy)ny1v~r8qmdagu)!6N#*B-_{YZMSY$J~u}9ViEy9bV7*QA1`eS!p(=m$oJ z<48wsx>&phh?lvXu7= z3kUp+GTc!L6Cn`f6<%w1P?ZnX7Fv7Qc{y9s-gBXgY$0*VNAeUds|v3VY;X4@&U${K;%~>tC0H@nyKJ)_kOhswKXHT0<@m*31eKCTm_JVaA$>S$d1$y7ie2vtnUGGZ! zyFUk93z04Z%`nUjEH)zpo3*n(GxKPIaEDu}YWM)l^Kt{*{k%JEaz&%l8+L~DF7GmT zyLz9LtC&hFjDW|Nd^oG&2Z^xv+Fl)OzqAuuC4^8p)ovXu&c3g>;NJC6EAqn;=CF zhJUL__hk`Ot&K^@)*2JtTU&P)0K?qs#kA@eXL$rWTzEJ_AN7Z$5FD8ZiQn0IGQ3);Hl(s5vSl{(wO=wAP42Okn*JhPFT3s#^k@fMBsEAQLAgdU zVXEDqoe2sEhdht52Mi-f1&99f${O&|OT!Zl^`CnSfwdh80?I2kPrM6LO6lFtj;+~& ze!j*KkBxPA*J?&)mYGoE=r%hAPjqKZGWPh&v$Ot1iFtpOB6oAdDVXWnE8g5F<@xZg z&cX9;0nI+zG?jGOHMZJA=98jWC{~{G(&CazQArrQJ=A40rxcqiQDN=Ylv0tNtcG{R zRb=+La+ry2;l-0aZ77a;$U(p0@0(Do?&afy)uZNRECxJ3pNMbCRrs5=wV+J4PNjVt zmjB~%t;Vi)CAZAW4`xSIoa5zjE;ojF5!vVxRLki@MMPXRXSXkF8uNqo46kg!%U>(8 z0bBiUTveB2L(|=HifLNkXY;BmbYoS%Mz_nRePS<4x=C>!1jCJXO)*zb>0R=-p8v@; z0zl@;0##zdI@EDrb5e=6)XC{e()+%SW`0pix>IKhfp1q`!*|Ou?s3*Xonx-}XvjU) zhK4xW4H$ zPX8^ZEWz_P2Q9|i(uKx;4DT}v41unNFL+*x%duToms?>9S#Xv0xLe0q&tc<pvpn+t`6V#y40ol6jr(&hI*U;pDT4Z*>e@nO|Rl^ zO4R(hrdVP3Aw0k^O742>{&VFu$w6*!4HLH>R?>|gxDxKQHt7iLKfw4vX+rGRHhJCWzq1fEIB5Jm|TGW&0gE$bb zSfU|9FquvkUTe0ZUq=KfBbYX38OC|s56ZMNMY;Q159c^OWkX8_7Y5W_BM}a(R!uSf z^i9-X0UC&BHZ8rvAh758`MX67h-?^MJVj`vcoX@${zi*9~7EMm=g(=!F}37W(2IxS#(AYH@Nz zZM2Zg(^JNucI|I2P5*s&W5xsNSc3cvM;ZDaPwMH31nfta^ozio4J=PA|L5u*4{cNK z-gW|RwoU4m2ebGjwukTPW&NqCRPXmZb7;3uC|P^s^!>FR(Bbg8!rgCtKy{9qwCY8v zM?kx~p&sYLHP5!QowzGM2-3V<>5HD+zA?m@$f55xE4GJU3g1siethujC8s6GcHLpU zdmFE1I}|>|IQxSU5Q1f`H76WmoIHf;R{J+^siE~O)ttwgI18R*20=@!BB^FQw|RFI zu#Dll)99l}pSl(g`Slrparve+-dV@omea*|Onhe=O0%1-gh2Jt6@?Jo_B24%y;T(s z@I$&s{pT0fPOW-PE)W8a7qHy;`X;Wf=MN9vz433uZWHW>nZv%dRU&KfXs4%(J*;aH z>L9$!q$a3oG?XwPI1CFos|A;#Nwn}s7x@AV9wf!}oA67^x)xR&L$A~=-l+{1sAxf%LKUTk3f-Ax2 zcHhL7^EH4W%=q)7!jYTQ1-wC!%jwVZ)Ld>2A`aCB4`B7h=V}V(Pjm)?+oYzA-Whu> zhECd>9H#02ex>`5&MT%{@c}OeSn`rh|Dh)7-b$P*-(;ta`kgV);3caJT_`q{kReV?2v#a@vv-C;x)DULgS+hYyLREa9YCuLQy>rAj zkT*9cvw3g%1=9}MTQ8(;;B@i9XV*4$GIic&2HUMXxdHPrvHo7Pv;UghUh}Mm!Kp*2fUDAdBbJZC z)L#HUFwoyltE|I^iNYW_KxdX5Mo*R(t1B;;afe&YcdgYqNs@WK5&9rwk6B9-^_xda zdSm~LbE~R>4Q8!Q{~G%(Db7{4cXZr_oBw^uAW!96kqM6* z-HPu_>37;!F@mqBx>3`m{MHoz@7*-eKxWYLeUcx~V*KHviZX#Kq80!t$HKxHhVI~ab?=o&f4{lvRYC6;PLyL?kCjk^BUm6A!(-TCh%w3 zl)~}pbM|26b&+TKOKz|Ev4oknPwLShPoYzOLou8UN>j(WC?%NE@44msA|>TUFDCOH z{s|j`+SG^P-?;7MnLbH?B)A88)MGw=gTw$TB&$7Kg4)PAhhx#z8Tb2}AAlEaJ0r}W zqc7Hg(}<`M(&3Oi%m;m#rH68#HXf%@xBaYQdY8p!YC8mI&2KKfUBpo6wnq0P`cKB! zgR2512v(1VQLdNr`CTm9cfq6h6z5Lm&(VO0q#DoZm_DYNS|i6l2UV?NbDy5_P4v#B z!zEey=*7l75-47~87+c--|^LH5T&6PsA1!4;Q6~Q{#Z4{9sueL3GC{AWS3To3jtrM zLByhuxaggX{!UJfj$HjwaHhdZ2s3IZHfZ?H9e_61$xkQN?|YUh+{ISA{Pi@l zRKtmb!j-hT?Z=0?rJW@g3hans$4&$2KLP&76;(vp-UAP?QysUL7slp$$SY02JL z`nKj9w^IrrQGDorJ&yc6mYn$1cOd>;8ojLWGY{(y_k0WYAerh2fS z7%cF9O?=AJ9wlvvhYJW74o1(SPLrzhRTub?L_~0US1z~ajN+$A=beYW$BSbx-^j$? zpj3j-&aX>+)lKIyRPe5gdz zv|r7kWonZ`5nMpOQA4bGqWU80xbXRsig}BHn?Ubg-V-TidWyl`jZMGkScO*-RUYo` zWSR~7hncC7s0T1<*_xycuRzw2bGHANzrf0}SJ6{n3Bi9Yea)@v)n-BE%xagSaC^-1 zVDi6Bu`WRrBCZ9&=llNDm7@|&n48wc?&RDnkB;B?pXe%zoZceoPs7LWWD^ax&wP{N8EIn6TQuU*a$C7)#Hx)6c z=^xKif4tl`A^CZJIavY`#G^bpM=mOkpu=R@@`i29x};JSIR)nB;Oy|6KT5hMi&P|X z+>T?vbu9e12HZHKEzcys9~|>|Sa$ThQ^AN^b%+N<`qRQYZL%%1EDuW0aI1x4)*}Uc z_jz<1ksyQOWU2XwFxl|X$?D0HfCYOW`({61hMmR3;R~HET6M_mWp-x?(Lqm|nP0JN zU^7OQoQ(PO0i_bj!MM8P_Lu|ZAECRgZ!E;HDbGwcM5HHrG{479e(2u<2 zVNV7SQJXPs6}I3lF?hY>@*mwxB<%}M7r_X74U^K$zBV<}PH_Fg!CT*wP58vwQvvsX za6`o|`fEIw+`>FSh|s+oz4JAID=Xgj=l|oENU5i|Z<%}eW&1_Rvj{CwO5Q_$e^qUn zr!OX?Z3%uo<#6p3F~cE9M?jk$rlqXf_E>gqDX;}cy(*>&$(WaDZLhFen`QNvFW4~O`a$4*D+!B8K`%PC{9FJ0q8tEf zN!k~~jN)18>VE#!dQtmGwwTA^(Hno?QugVcKRo*p&E8&rY+(OUFYd?BXCBL|?|-&{Es2;LO;B+t}WYoRhae4JI#5ZZkkzFi^vDcXzq4Q-KpyRZOg* zvOVyE)2!RfDoz?NJ4~*CM-}8D_yU1R>tAy|ngvY9nUE zw*&W#-c0P8%ST8I{lxOU!l|P>Zf*cq^2`x-+VgLmy(MKeutubDZSAgmky4%bf9B>K zy84{p{Hmhu^LmHKxW6ev=ROZQWQE7<8$d-V$M!w9KP7w*nFAJ+@Uh5ZpSgFnC0&GJ z0I|7|)LrAwvwVDcdYNV5*fJTSxTEJWSKGJ;G1f= zr}Wi_VY6xsWoMj29Ac1l&C78QJ~C64DAr6~lJ3p({J^OH(gN!hzdZ1}iKod!jJzuA z1E(<(k=e75h0{e(_0>iX6X1K^ykWjk&4ktyRbCqVIqmZjgH4WZg5#n5w#-i+>y^%r z8Qk5WZa{#KGnwUQnEpyjj|h0m_u{uh9oA)vn|X6a_7bgqnF(Ic037S68%oEpQ8>XfaDTec087e0NK#FDi?HEm0kYlEIHG;cFGf*yh%>kL`*Zte zCIqIma_p{D6=`Gt>k*SF9Vv>l+8tk$ZAvQ!tz}IzmlZ{?HN}6Rxy-a$al-w4z^mk_ zD&5o+3-T*?!*i};4D<1=p;Gk#hA12d_wtiPx1yK$=Yj)mI*v}}bc$iy(rY2L-ZMvH zOFDfIuF>3}{zMzB)Oq{Q1n52Wr`;IY^KEA6R+z)!I&I4?fC3WG0PR3Q3+^O&a>4eQmQu!O=-;S#@;BGBzV#F`ex})#y_XzV5jl zK;3aEbcb*B!quq)3y=my!8wACZE?-$-FW6Q#@U|hM32}R*O^b#$L2=EsPU|~9@gt# zPxWTjY0?2R42r+CxV@~U6OW7YGKJf({K~8|S~z;AEs%b@e4eiBnHk8)r8BWE{cr7Z z*vqatZ`JQrxjyMab)`e~EA%zg9N|csCAq3wZO-R0dhVD-y zhq+j5m3>;S=r{8f$EG@@@QPEnwa2fjcLtU+VMzzAzs`C5oue7F9!>Kt4p+N`xi_`XXK3a7~ zW^?WPqH~rpchYG^U3s1#GUGN|n@l7%aEPTQ{Vx(5JIl)u%a-3pKC-VEb0d`Jvs`Xt z8vyGXK=oUz%_*zE<*yINSrGN{GT^FUa8E=R7{L)?u=4Y|R< z!%@LJgKhBV^cH&x_km%G(0Ed0iAH1d0;@^$LopA_ON-GF-Ii+eK|MIIlO>iSAX%`g zL}4qd?32m$-v0X_?@eIIlK_SB-x%R1ROBHuu?b*Zsl$N0P#5D9cpUwttV=Xy@7L4u z;Zod3_${C@oh0_$2FS%_Uc0M7o;+!iKQ94~Ld=1eYKJW_8z0zlO~Ox@n99Pg{Isqo z#zp=3_s(*%qWN9?#J7k$$mucZAxU&zPGsvYnsLtUqddB0!mF@S!f-d3UikN#xJ9H# z1JU|&3STSH0HS&*pETQW0R?5*j(>D&NG@ru@kE1ii^<=C0*+#ACY9m5oIi{cn6u6@ zv$kXRdTs@HTvK=XiSObamF?it46}- z*A1gu(1>@tHr^cH#L%us&wwFTK--*4dijETprK#fsJD4$Mv8cIXl1UHFl^R*#*)?@ zX_luryP}Yd82S{IyVuV+x3^J)O!hFpg0tfIJe`RUg!C-;j9%%-&`*(lBb?Vo0**tM>F#DXFokdmx9WW%eMLpx-|p)+T>^gQ%%@df z!Dg^Am!3Wi>wM8~trTM}BD%12z&pzk=;^gB;IsR8_XGQdu+lLd@L?zObo4Y?G?%xP zg_CB!+O*#fmYfv=X&VlB{E_#K=5$s*z7_;N{C}*{SU3WMVKxxMuCQwuCIX4imZ#Js z?)7c$)(mRTc%sV=?>7N4&c^NoXkR(T?(Khe+zFa%zn8(L1hz`Iq_Z*n{N@@Aw*t{`PbR1z@-AFI@+FcxuED z`5$%gLLji7KNf<~A*Z?mKJOTRR`;6!82zI?1>5H2>JfMNXE74oV5hPp6uKUM#oVj) z-zZQz@kLNya?eiv7@}RT$tEQI9yxap??-E#@}$F|y_y^NI(JTuA=9tg@Wd5KocG`e zgId$PixK1uV>LW5jq9?BWavR^Oc4iI{PKF`lEsoPo(wAvdLN5#n9ulUO(Np&9J)`0 zEM^F(S}1g9N8iajGNEzY`pYpq%HKc%cI8hmhmQH2H8b1@QD~l7%;K*3Kofw_&+KgO z{*j*^YXtg(ea@})84>kI!`~vaC`hONw(@<`ziaR2zuaxBsSD?p@o6tHaeHyR373P( zZ5)3-g8unzs$P4-yhi`#{-qyj{f`(gzywV1A4Z-rx+z zBX$h{pYR{v9|W^x)^{Am)-@lZCevf(N26!BrKQ*1f7NXQU6_@}Ti7j``CmS9y2(<% zpol9jzrX{HZ$?d8``5T3P@A89-#Ko#o}FVKZ~Rl>tE(K{RsBzy8Q(O^xMn|)a@)CG z9s2S!IodyK#*=L_nTr?gIZCco!;!b$Vx|@Z*sUc%zt7X*&;~PY$%GX*n&aPDs?lbd z63vm^U~00AQbT`b-K=CYEpTQ1`g7Y~ydMCvV$!7_+qJXFRG1b+vR^FX6Yy;!?wP;u%ympPSW0kpAvkm2wsSiZR#H{OJ*SqE| zX@)kiW%6X*;?TyhddrvP%Dy*`xgS#+L7-PV{ww=QhsBL zvXlOgz}e4fSk}8hp*nQqocnO{2W<%6tyo<#pfP5y$J256+QeNJ9n2X{HfOCGN%khq zO)7N_244vga%&BEF!b>?_EB_altXby$5kk^2ggv-Rl%jJun}BtpsEO!)VdXpSP3qP zo}0@@4W24aX0}KMJZ6oHNsra~Cnaorn^ye(TitS==ex(3KC42H-x8;W29es>zjvtb z`7T#|OSJzltK%8=r5N|P+R#g0b|4^RPjoJf)kDmaRYV%z_1B`pvXjGPevE%Xt|P38 z(RVy_4d!svHa79GKISoH%~YX;!#y={+l9)EQm@-tZu&FCKD9VG4ebJiM_;w6D=o$O z4a@rHG;KPuaIvzGS67+ai6;h%X>mqzNVL0L=Y5%26cM*@pVd8>&EAfuWGPC*^6s_k zkG};cT>CK2QTF}gRTbJ#AAXG(RNS!TXSeyK%pgR^h#E~O>r|6=GXbbqDi=v>WN2>=!`SqJTEu_4> zVE0FEWdYK)X5YYnMED*Ifg7qy7OGF_tUNupABwh0cDpwHOmNTPa&woPaaQ01$UFF@_j7UzZpQVKz2Gd169JnCA}#X#@0tmG^Lu zfPj}%)1*m3Cl?mG- z(53iWK=a49wwE*KKQIRrI1(1?z(TGle;UseUr*vO`!AS+e(f2IBChj^N$r{cPS!Y+ zL>M#AfLf}y)w0bKL5ziRDEzGKl2tn2@tPGf%R!aUt{%AUj6CZ{(89 zGBYq|JYe>^uy2iA)N1QBw(u&w5x{|`Z{s8>=d_yb z)0FqB;11Vojnd4H}6~n{(W>HvQyYoGVmA+yqpBQ zpXffQ%Gy6guQ6l%Fx{qjmAU<-I#T#8Fw4XwmT??EP;C_MDVokofd-*CrDisf70T# z0W%s}tKQZ9Ss12YCvEKxUwtX_w;6xAndZGph}&t|{Uaq^dtGDCBJT&g(VZJhFRnIh z)YkcTRxSoD2IYG53r?rp`c!(j_)~A-`XkA$bsiPjw6u2rB?V=GX=XNX1%{Y4;CGfjg0A`Tm?|td`99@#P@f;-Pi6VdSXZR9C5XM=dE- zE)hP=W>l5{R!EWwCN!!R#v$<3^jR_o(6=y}AX25Fj1s1-ISU-6}` zs5@b@kG;7ZBi0>Y0Oq;n<_0&uk~^cgU>&!ZyL;o-HuFW*&SvZI>vjid2FgMr$br0s z_C94B=6n!bA=YN#J*qyEZYrm;<9^K_vv+|6(mWdsDgAujEi%ihBTdSdrT8*eA^Z{k zGj#X!W$k(?nl!|`RlR=$a(|0D=-N%3U~lW?;uAginc<}^7{7>UnTvjXQT2$W1x#>2 zrUz`E0BMjXpSBva-|xBWh58nP+QWRomJvVPJAs_H zP8!2Wx>viwr@SKRe_@YMY(a<2Ex6s0!^K;;tu7ehc0}-kk0()r5@EkTGS^NWDfbiyT&+uH9KrbO zRm=Om_=T!d+_8-ZsUM?+Yda-BB;y7hU9@W-)q@cqAX(_$9IF$j=fY4a4Lk2AQBAMZ zyjtO}wwUvh5^A-YlRnkGruKxwK0iYe5P|7 z)$Eh^ATug}q!3p&{qCYE7F)x^tmO5ku&tuMb`t+ypY~C1a>9aCc7Kt5U5H-6JpM1Y zdvS_s>G*WOF?*TJ%N@26+<9hfm_KCRghD*?ErcMiWOI8JKgdw85n3xuHJed+*+zSL zFZc|4%eu9EC4#7N)#N0|{{&5F%c6kPai=z4|B7#eH-E0q6cI?mSt1~lO1rZ-BJy$! zC*praLFwKd-%Or6u(2)|^EJuj6Z^iPkXem(cgmvG`@F|x&@bj8C5lc8wEN0GcWgzs zMYs*SU^X`ygmhKh^Zwn~@y#GPlI29E}VQam9!kjoXzEjlFN=4lLPUYzIU%4o$2|0nc7T&2xMz&&!&8h zqW9h#T*pQm8AkA@yyl++Y4tGQLLcMn%Catn#^v+i1QCTgBE=#-ucWjUt;p%TYtF?=03h)f5dBGY7X^uE7l&5CUwR)4_- zo9Gkh^-}YDC^bKDI%44Xp6{zrz1BB;|Eg^iZ68CaqCT&6cs$TcE1LGU?~%c=M1)V| z?IVYpwe`24qSlXDF(z4eO==?-N~vZwGO#^fT|j^0){vLuHHrhxO+_53EZbLQq{Wpj zVgYxt3e?~`O21Lq2PfD=n+i)cFR%;LaVO(U*Z}nzZ=(o)xz>PVW@1~nvUH>C-@C=ulsC_QfA>w*>eq({MxhP(@oQql z@9DOX1%XzM+3siANz=-u^xt=_L{xg}njP1hat1?Q3sMd@t2feAMhjfFDMBY~jXVEr z+JfUi+jLRmEyqXr0_EDXYn8hvnX_y9$!t2tTf{+3+GUrp`=9Z8t;-n zc0f->x!kW@>(0jUfDWuZ&nD?{W%f(IA29WZLQ8-8J>=fa)8pyE;Rm0$s7NlaC~e)I=aShSyn33Ctdyrcj)=P?GN7^ z$^Dsjed)uqYg6SHPwUO9UCLaZ4O>kp7Tei}}zVq!+K_k+v zVdTz9j158)h-s|lgvwgf z0LZFgfcdh$c4^3!GikwCIFB!<#)!#p$CNf-BPTcc@+8Mogi%S##@c>-pzk zh4XyCY1XEQiO2ZROQYb5rhHFSB8<;KT{W;e%W7?s)3giVFA3)X@j?KhA8 zDj?3ND--nVKu*J^j)_fA4}D_N=;ocScmJo}cB%=lCc*TJGnOAuE6a=T_nSoUg9@`V z_u{pqxpfbj{r1>u8gpO&X+qbnzTEbC#cgFXx;_p2$nSdUV16(UNjcjlSNES25fhf{ zlPV*+xpoa>jv5Wf{!d(&2aDhEVWExB_~>_^(oy&+Jaz0BnkVNAw`_J#+QT6}xCLk< z(Tnud`U$tMUG&l;AeUb2(X?OpX>7uktgqURhw7ak`^x1M>%T-sc*R+Mpzfzu+x$)$ zw*Qj%UC-SpjC_xo7Ad-K9>M~CCg*UteZknrw9MX7@bURL&%6O>fDX6jRLTuaOl73^ zdt|M%OJ4}eppLmer|Vxvd9fcBth5QY&;2X@&z(=ECV!9yUHTen^mB-4_+>GlSFM8y zbUzG0h}UO1k69-+@Lhz)WbfMF4$`A+#}JlbX`?FrPc(P^+_R|bGTWHb=@|56n|)UE zK^Jfw-Ay(Hm|%a!G=(MFe(=fbJ;Z^#5SH?!HgUTCJ;2FMLaF(1G6B9Q+Y{@hgu9C2 zIN9gc1FAWb@Lu6(wfDO75O=u?NL&x2-_(vQ9m_h+dCvR+*#C2qdXy zdz^87(=>MAj4J(-k!SKiixqHh^28XlQ&czf5xcp>`RPrN6D$EipQL|WD>yG%g44rA zZC=+cj{V~Z6JZbyRTe*KXyvj1vH|>TPDIS`V-dz1NB#>MG0CKw%U7ak>>xXDf8dD| z-bg3|Z&z`fN0L`2r6rk?p{Vz`lk20U@n)rCMje0k6wyJo(U`Lb-R_dF$$Ow3M@we~ zAx!%ZUaRyCZ@ud)spCuae0^{qljtdbsgSx6Z`|C*A0 z(r(=@-Ikore1ySbsEBuJ09|FD;n;SaR$M(z!nM=Z3k1YfNWQh6fP$mIvlM^}ELxyd zw~ewV0(;KIzI#pS=+V)u80IsIS6VLed!!_~t;<9&=5x98z9yt^#}m}#V+nwLMwgQ9 zz_2eSbXSB>w(9GaC3Kyz+089n%6IC33IzLuSfW-A7i0b8&?U`;-eo%>&}sdn$#=6O zEImp#qD`$?%jej2@Vy=2srSUfVI{BcRVU!t@~d{$Ts7dk z(@{=Z$9J(Zu37b`4;OY8J8w4d#Y}ecah>h8nJJE_zhMwIA^hdQ{&@)1Yg_pmtqZ+D(^5>yWMUHul>6hvgS$b z20<&DkHR5ebiukZ-Z9Y3Bp9QZA4pbkzgb;KWKgUU&SlzY(iC43;6Y?=)siyc-b<|(d zGFjygq<0b@T6Khy_!S88z8R7l$oD6^NqA{Xnu_-0|6Cu<)$VYkmyagnq!gTW726cD z13ge*5T~>oXA7n0R@H`Pan7pcwFR04Adguqv}3EGD8U0&3~*-6@YI(mUw*g$p1vK+Cb5A>F%liKAV+E zBdIa>>fL0DrQE)BL(gii!V-JA9_9yA%BOF&620dw;q2*zlTvF1r{hv)rt#B}E z24TDF_yT(Qmq8uifpO0^m1YaOot-4YYkF}*%RgOkk=Fz9OK!)BAPo#=dpGN7VP--L z3|Q%;>ZB^{q)M|$r93@_RUGYUl)7F#B5c<86FzsvxSMy+@y$+ves%t_BJZZ=rN$u| zIhTJkkck|gE~@L_|IT~@6t)ys=Dy%*79626Nf4<6Q0JCQHP?hTq%6!fM6Q~BkbJr? zriTDAm1L|ui#tW$jcb9n9hwNnfbj94jdFetvhk4=Y=Zf`h-QX()%c+8Zuf+pa1I9y zwLW8cwocwWEOm$Q$>G@yc`$utu4Irlz6CI6c?AMeAep^5+?~WDm((BE$%_5zn^qV9 zU@q1D3$ZPYgh)A-98OPGn%fLod8U#6dk? zGt~@6AS@n%(?A~>BWcMJbpJyUzAXHNVeJ-iXvVW&Xu3qdA5bzsp{wXXW@_ zx&`0OA>vA+v>%;=xjcyBW$MwkNR#6+`UGNxn^gL}y_{!Vs&q_WK7Lq%xAbvRN(rO7 zTBT?vqn;?p+}D;I_TIoo>)8(hZnKM1u<{&*H;IO*(vAbb4N=mh$=XTDu}MFK3SPbxRZ2jFbpUIWY`xf zrzQkp$0<>E@tb$2-59_hlVwh%Yts^jFGeup`?-+eh!Y_kelF(zu*oqOH98WXeHt?3 z{jS=v=Dahcn1x7YLG8MeE3LAh7|50~U$c?t)eSp?c?`9x507z-bsOy#nZs_ncjx2h z^pGP$W4N>H2}bw}X}?6^D>siLC_TII@5^~o=rq~E3IL#gHF;eW%oFwr)*$gyGKX1b zy_X7zZeCqOd@C=522i{%H82rLR z6u>+f<4GXW7SAKq10H#Ewd-0nbFG{qj;=?waJ^7I{v9+yM%4OtPK}{~M?bxi@d4vR zXcleWNq-&V7sO(xqYdJXdza zHGW7oCnNtIDCFF3yeUy=_#%ApTJVeqsyma=GSiGU!VK)by#!2G_jt6ECVi z=NvcuTyX#l22TC4(uMgC<`Gpd7WpYHca3@m#T?hAXz2M0a`q}YcT4f{l#K3+vFj3?X@GGWcy(cvAukg2r77PcShWa&Zz>8=&PVQ zx91f@TP4Q+RgelVeuU9(?~qunB)=Pj(;SR1a_NqRy`tz~LqdfAoMo$%|2^wnUFNrg z$OW}6KK?5^rqZx;uN}C|MQnx;<{dGoEB1p&ekA4jiO*nnu%&L-E|(5zgiv_mF-!;3 z$v+=~G0w&ozw#wi;bxfor^ZCJ5&iyCbHKIDULowK`;(x7H`Go+A5QNrIwbBE-N zHUQpNVL38{pY=C2(Z#tgu`4ohv2m#Jsi1tg>Lqx8?UiHBY&J4Ul&joXm#=%vgS?M! z^b|x(5!c`sAqF43Iji$|t4~BJ94RmMlscBHx}u!5@nO3Z-M~ z)6J7Mn;gY9MHm*9@9{%qnp)_Vfrz)4fNiEuSKQZF?7!1{qd0;$6RZC8ac#jowxI8g zB{L~uJ4jA@oT>4vwdu~hKEjYn5ei+wn><3|zMXuAY^nwH zU%XgBC>@0RlIT{*bm!FLH62T>DuVhWoHq{TTN~nmzAW05Jk_j2`)qjV0dAxqVp`DF zq#qsuKfKto_y&zVT&6Gv@mj&;ivg+D%cQ@FeNm8ymmM)hj9o7gm4P} z_tpPD(E)NhJPVw6-r0%~@wae4frC8lsOB=x{R#4*`tyR%7h?f1y59#4C(E3_n^!BQ z2hkXYJN2H#U3K|bdg{_^t*(tT5%1uBbTLY&BVD`{4i>CEimwa`lLMauoT;(XHB^SE zp<;-!a~(~)x6n(*BVDxn>c1bq*>+OoPC3WK;KqDP*$#gAK44q^TUMWMoVZdkg(q#f z`*($1X1p-mYcJw7WJ6YX4tqDwACYWig8DKpRf7H1;k6q1|7iNkfGFGNYkC1G0i~A) zk*+0|5F{iO>6DP}T4G5lDW#SMNs(B(Q@XpmySv}@`TgH7`+4u{y6>4Y=bV{&)55fq z>52Dn4O==8s(GivG-2(bzbL93>&47Ezpde~^DH`%XzY0E(IB+_`#+(_06zlvdbg?O z-X;mirkJw&^C5=*ivhhe?&==FhjmMZP{s)Ff9Z}3Xo1SYBcl~nZt1{&ogFgD1yL>m zh~hV(bK72TJiR;e;h8^HS^RyTI{Ek9@WQqtujMx1#~6WmF_oWtYJ3%*8@EDyjAY8a z!F|?GN0j39QP^ik-j}Sf71U{uVTh7f)q3P}cXUznKdRFA$n$;`bx#CZ>*Y07<-w9& zy_pktyZ6y6SGd`DCH|Mkr{uHWGrK|&LVf2G*;zMeENz_piO|Q(X;!(bnUoTJXeUfh zN8kMr%W42wiP7Ocq2}>kFbphirsS72%+vE><@}#O!z$RM{K|jcH9zc?YE-6$5}N9N zv@|+ZeWN^Qek*mr?4@Q+~@`a}`{SR<7}a+uv8Y&c6IuRpy|r z@@SH7Cg^zzy?hr{lb-wa@y(h-Vaj@y09yGYZ6@^pYYhfeUr34I%!cbQ*Y+AZPF4#$ z7wxa8iO=C-L8{>ndNa4L9cY1_|A=C>HDuh=Gh)5#m52B)jbZ!z4WNxaZ{q#@F5Owt zWIA_zg|Di8c+lP0Hw3kV8;Hn#5ddSws$ZKgH&dA+>TWw0dRUErXDF~OK&6nY54Dc5Ydo~92e=3=z( zumW)aVGKdjMSWuL73d;1WHTLZT4s_X%>z6#rZ zbKiSeu^XROd}S?z8TE4DYCDf&?ULBg$EM(%F zOu;eFyxj>z@E($1G7Y;D?Jx;;6=jQu{?trPc>H4n<1g` zOQT5hSTNr0zvQQhNPcV9pjDo)Hm&eJ@GLM_N3xRq<`>(2C&%kQQcA4-9C4`p2#{DB z4&eJ<>%fX?$2xb5V8Zkwd= zIgk~;S!{W#o&+B^=pxUXdv{XBNIE2D9y>B#_#8(+=b&sS+y)Cmp_)5g$|sf4cb_;hsLa?(`Jcu@C_yC3P zI4RrspY}+E=NyNxy(4hSdGu&4!#-QBNfSHYwLzI}#Dn2C3%egV7jApcN)_Tik8V!N zKKBQuMacTSX(f{!T9=dk`QCxh=M%VB(eJW(ShNx(*qAbi=D&Pa{hX=R-9uj-+iA^r zMeZF~Q#0F;uhHhdL+xMc5_DDhy>cJn9p3{fuowx~H$ zUa`AU%RZiGKpZbG^7Zx%c@`n-k%ir&Til+Vm~g%pOp?oIvR^#O1sTNzjd>xEAmL89mmtLA0j2)``8lV;bR98tLQZ>MU7gFwE zeT6IZXL`p=5!7Q;U-(P9|Pt6V>h z$y*GlmyX_l*lsuUAUZ*%MlrbwmbmH+Cke_K+O|~jfw;C`;v<-5al;DFoz?LMfZ;W8 zx7|%WBUp^>`QuGg1(j@>3DxoBMZd6DLaA-palJJs-0t~rc%iPn!TS|qL)JB-*$wYg zm$+-Q@gr1Usy!^^uYJmKNz!izYGdQAwc~pYX!11dEP8j-@CEoFej$LNOn~UiO3^$K zFpgKcU?7l8$qv|)J3;0TujI9P)Gv)uB3YFIkq=aFP+uY2Uu@`dY zQ>dQ6FHQ#I_>JTFA&SvF$gZ$c6yL8+HO;GN0fx5;%QNS$B&l*tNDqC6kC*zsT&JDO z*MW#bzbPax)rULg#tx%>`vorf#)ckBuiB`4*1NapLKK%@e6Qrq;)f5*pM*W^$bc&I z8Gjri=RqX-61n$k{kP7Otw^88%ieEg zt`K0+CNI;A+cRyH@6u@1)@movXBl{{A^v6)C8CKrPoGreKZyNObfn8y^uWr8HD5mA zeEDS@&J@ua@{!OZ%{_|D`C5S%UufEIT;exE!hZwaIf1R$h5uEw#Q0q4P7B;ih$V(^ z<9WqUK{5*;AH2#v`1fVRRhdI0=vcO`^ocT?i7u6Y09**`s5F;`^#cF6i`qW{_X2ga z%@%?1%NhH#-8HYJ>vTbu!LY4m126%6tM;d+-GcZ2-j%ty2uhW&>okqqZKAf!X zzGT?|y3=(0GFj7=&FmC4)V+gMNDj>l7N+Ss;pX_qLv3vm_)mJ=mA=w%IUQbqobVl) zKh4PpLoXNL&kDbuh3>5Y!j|pe<0dcHW7#JflS9z4-KG98v z?1)z#t(>e-Hm!ZAQ^H=ifq#lk;>T3<^81bqM^&GVpRyOsR1++m;#n=XEH4V?KQ;|y zP+DcD_$E6E%}ME6_IE0bia9}nA8%JfsH22!a}G%~4ez(8HKaJ-WlGLk(6;H-=r-pDi_T&0ZFRxmJb)Ow*PHQF`=Ua;-Tfp#{VU}#jYtEs zm;G@&wdc;SNmP8%(z}IivHamce6d=(sMxa9@IvvYj@wCDaktgaEuN@9gQVoM1_ry0PuG zg*ne}-c8Vf+Qepmbu(1;U(i$kiy=+GmD$AUGN%OACz@cWJN5IxE7xX`CIeFTec(!8y$-9o&#Zo0P4fL^b&;A)k;pDMPEhXq%0;63`ct|W2YWJ~Eu?A= z14O%o!Pz70`Y-rQcw|PoZB_Npc^!*ArwKDhVAx*x0pFk~wCohPCqHD-Qwz6Z7)MuC z^pghtgR}bJk?UV`E!@>J-JO(F)Q5)w4S4kg_Q;eBk z3gwwMq~zWb#fu)FLvWxU2R6cJ@hs*^RmxYCjmsa!Ce`M-pbACt0&~z@GK5$Y2KJ*H`_*C&1R zxYMqrYCH1ChPgT_5q9iVi`O%+V-j!m5Anivr8>MGt~ae3OcC=qNr76PevV69K{#K} zJC|-8%W_kOKBQe{`B>CU;S>X0;+T26{yd+?jgb{D=7_P=(mLlJ8`z)*pWxUlz28 zQ&b=jP>yfiZQzYiiH2Wi{Wz*;)}2n9M>=U;dYF2)7}=YDIwQG0n`9fX8HC!+kR#u7 zeII1Y%nADxE^~2NZ5sp6dz8BV7u@47{kmG10&;;{-(~{4O~GTcXMWS7I!e!(Lof;$ zr#@N?zL~)rt!F1jd1iF*qUC>HDU%DNlxjJfe$fk25D{;Qc`vHZkwc0zbq2o!tkT!s znctbG<%DwAsgd^s@)5UuenponWJM6%TKl~&8DY@N1ep-TxpVw+|NpZ9n2&?S!nGsg z8`t1I1y_kTwAV>>BeNGh-hZ)X6&WBl=U|+QkRJR1iGfWm*V7>vc*DcV(b_HZ$4LHiS_F}H-(JX) z0|hnkFTkpDlUk+`pT0jwDELIU=KQnQ4yE0fTOJFhI@oKiZjfkZV0?Q>e*`8PK@^Sa)D4;WF@@yP*X#1sVGG?A_^6&l5Vrd~_CraB%>+HD%^)sKo$Wa~s9tJdutZMuK`KaLz zL!s47e?sp=h1f6G_I)`clVZyMEU%=Zty+CVnEEp|2d+DBQs3i44zKhMlVC{6&3ui^+_EIY5eR+x)KkKlk0LigltZ8TRv91SW2_we>=uL5Lo4dXxgTOg38$r%+irEm zwNL|53iqYXG;byMI~w1Q?O{cGX=!*1E8F;<@sem{JnEs<98>!?d4=loYL)x|2tprc zQaEIgL;?`CsrX-tdT*2zr0QoKf5#P~SvH7c?4+iT2W1!)0#;bxm{%7q zs*&^Gqj=qRqmevc7)n~xx)j_mlmqRhh=Otdu?yV?{#3_ZS3@HCAtqCxjx;pJ<&L;L4s?=|4q zJ?^>lz2fKW|MV(7;E2{bk{`ej6X<+-v~>AkD*lnVu36-|^=K3MVJDMP(kerSQT4DD zWCo}ZsyfvCf#j}AVIY?3*2HKq?QnZWUntAbRd9H(i8z}8lzFsT#M=g`6m^jD?)`fY z$1+}aBJvITagI?IId_buC=#lVI)kStQmV8lY14(nQ?e!Mg3v~TJluh}zxAW>+(Ua_^CpS(I85f^n37X5*0XtL-@CN*CcUWSbt?uvkIpAW)8h{$jLV%c;ovSZejW|_)wfKFV^LiCS zew_*UyX|@vErM_Ma5X_IwTB+OkH0;Yx#OW$HZ)Jqhg&11_#CTE;~6zPi}c{)Quos% zTDm0KY*aE8vRPAkaCrwJU9w?~^QtwGGsX>t`W5rUua0zs)SvnU{-?b3ppxIaZkW%@ z(A*O*uA}mqE$WPB)0N?Q#%FdD>}^rAOuqoVdeV?yK{!qSbMSw?=LDILkHl7zJrSG% z`gc2A=?7p!ed)!_fj6;nd#dRNIG51594P$L<-E-;Q5~ zqQdizxR0(j?QhxNq#cJ_5>U;cq8)09WA6MkH@YK7FQA&Z3OgWQaFdu91XNKGAmZ#| z)k~X**!ebHB#CdfW$`3_q#!}wcrGYU}rl&h$4AGI}QvwFmaNID>U_GzF z+=Y3Is6q2Va=$!IHD-63d1RYCG3en+m1UweWk=WeJl{3to5aXf9O-Sl2ny+;ZVK6x zDn%%wk%nTPywJ58oo;6HT}G*`&0y*OJ%j2t|6wgN?w_+#NkQa>&)BZoiM6rU?J=H4?L5Kj#SG;@G|mT$)A1^jdB#Yb{zURs6MEYNBUIGT%u=i zPwx*VtV12&Kr4K7M4A^mab>QqbLa0-k~k|9=*<3`>%9%~Joa5V+iMFs2QZ4?&pHO& z_bqN7!bH<*queWGSCTamy|v;JAH0|5I0^_K-m{wshvr ziHuuSj5ab{Z}Ue{4TeRZt<^(IkVQ2^JAStyjn}_S?3pD_eC9(~Lo6XHqvx%B)Yp!h zPkWN;%3wRrtU8#Ea82a*DrFGrIW3_nySg{a5CH|gC6!_E7 z@;I9hrH(QT$Gh6D1=A)4RWW~`<3JCe@@8GEbm)hOp#C}egxjn~u2k?QO*p>z*!7IV z#=5;nPQ&?HuCwR+UepZzVxZ?YBzNz%c*a&);agzs(Hulc4(*SSBNV*3&$+8ki&g>l z-D);_s>cUpq!Hzmm;s7+Yw(N%5qk{Sh0c2A3#=O+JrRWg0G3i|lXQ25 z)yYswi%W~wSgO?^6I}%}uQY0dGLT)T4#J z*JKHvzamDJ~+3UH)R3OSd}f1$_+-vVgVzxc!r?s&+*eJAu5 zRd6in6vUpw&3dFLZbwco%xSc_Afk&I<9vE>2 z{_f(x4urYxuk`rq;S;yAm7-<77s&i;@4Uqtn^Rmu47JRJO4Je5kn{Re_g%QSyA zJUEJZbyx=<=1&pE|Ax&bJ>BaeytlpP;+9glc(vSe?937MpCJ1KXf*J%mS@RIKh$w{ z64h0Ie92jJ&wt@H{%^Rfult(V8STk$*S7?C!Xj`vcTI3f@&ls09EaGg_z>Y;(9CiUPiC2< z_g<$POQsJr4=jhVcAns%08@vm)Lda4`Uh#%FC%UKWl>^JzdHXM(Y|EXyLVlbqnOhC z9EcK8XW4>lUl!Oo1mF{v-xo9x8LDvWG6?qTtLxv^Y?tPR{+=usQbgx~erfnqgk8bknGwq7yk9SEh$i)HycWsg;mYl#Y0x$>;5Q_LoXdEyUTmDvk^`_t(t1>lCxRXdh&FstY3ydz`3 zlU>~eXZP3EljH_H%Y7*Qh$2q}Wx5&OfTZPlEnZaXRSZ7a{)WW1$|SqgP|qd;v%0Ui zHY01Ea)=mqSJ-lO@l*W;sZA-@fN=^dzKwc(;C)u4)ET$e^$G_6SJ-SL4+3bT07hHb zYn+NNC1ivffICpX%XZ`sQ$KG;%pZJgni7~ma%ILvLlqzEfB2RI(8wrlj8HfIKq-<_ zuTkFCfy|WMv%AaGzeS^LqN!RyUYdfjVhk#A8w0yf5(6X4X9^){>BYM)W&8-&Z)g2O@hMHbXIMp>~wjJM4HHACZPSQ(TQA$hxA9Y&JSu7brSTUmB9` z)YTPTC1Id20NE@i{YCk2&wPIyD!tF+x&2smX(TS|aENnpAX&cmFRx(h_@t6|rFctU zUer-F%Y5yuQ&j#VRIUT>1?<~~2L$yuX zmBFbNsGZk2EZ`xSoVR+!#fZMiwwoW=UvLuGBi?@iwxWXTZSyzyYVq7YYLqfcfV|D@Qx{Ubw2tYoXPi@< z0cnJ75QX2K7ZEDVaX*I0CA*)_$+ zb*s;n_~sC$0`s|=ds1rjameZx|B{e{7z3y=v*oJKQ}z>QF(>cVc$?DzPU0ATY6vY!efxS{Q8J=LiF=#erRR!Ia+sM%N<5X z#D7KechOAF(k$?R^}uU3^$k(B2$w0a1a;^NhMlT=sKWfg?UxwWfF_zU!8<$$dpwgD zeIJOR{r(3B`bDftiVl#-cam3)Sg$iadLuufh$j2dK)r*fe=5(a)kI+d?5nT z#-Ib~VzArD-d=2K0QldNj($%oXwwS04fxCRvWC8w1i7kx;q|xo+F8jhqK1KeOUMav z7;NunD&J+%2^c`xtiQS!H+1@O0Qhr|Fg5l98h}@FKGrGo%NbwVRii1AhJtQk-)!q; zdyGnCNQBV4@TpOzgcAImRbF)jGsS#?4A&PkFPmiWEdm|W2K)20uzdEff8So%cTxY5VTi?#Q2*qW5C$SZ)S=aN#yea>mZ;SU^t#}NBBAq-} z5M0^Z=^r9Tr_MUHcWQvY70o9JebtO+!isowPF))BP!{e!k=fT}0%%a@Ty`wE?BoN3 z$^HwgJBS?;e;2q64WmBx6Sz8oTl2zPTUljotk(RGKcSh$8(yFq%`c#v*?kG12R=dk zwuk394Zq_OtGMfY$|BxZe60q8!b^&E3dz}^u(WaL1mu3QaU&0=_!jwm%W|#%Dp73u zcA=7y?XqsxGNU`RUDDDSFLgxWf>!sHkdKtd*al3dlH!=*?@i8X^4Vg-w;XAC<&Ypf zQ&W-rsVws~2QOUOngu2dx%d6DVHGWq?O*eH8#Q0&d~R8dM%R)WE6vANo8q8YWrwNz zNe~C)zCp2yD4z}c%J>S$uaZI<&4GfS7E zR6m21WI;+C=|txzOCr@tCcR%)bzkL=Wyq*}IdwVcAJ4ph1TH-Y*~Qu$ja@VPZ~+C% zWk(0(!yUgydMfrxf6a!QQEyEKx|l%<8^6_MsHEe)RS!@Qx^&&_`@6B#c1Q|2fga5{i z^cb@JJYmL4gQi<@-NQe+kTWn|E6;C>*5NUWPZQ%@r{d}9nD?$sk4uW=hB4hnaxdHx zF3pe3Y+Wf<^J((!>!swF^*Y?hm6%6bAGX^SuEf|xZ79suHY^Z7$(GdhnMH|?vzev577V+q*K2j09Y;mrz4_Iub=$E|mIF0mZeA-c#vmIU;@3M17p1>? z4z;!V&|XTzOdOdmTDuP2o7bs3{mj-ZmLXNhI2jKkz?FhntyY+scaA?MsPF0g{SK?` z6x0k!F_v^>aa`J-qQ`x&y(=NhsNvH%hpolohglYS+l-662@fc9Q#`!C!OUN;rc7Fg zX+OO($)|j9B9EWB&M~enKTYl4Uxd*Y@t+q&Gm4O#s2UYXCe>7HEtazx0Twqb^T|OK`-PoAba4FI8R-rEpU$Kh6-YYyYb^ zmJ}zX{STH5FQUZ`7mKv*eR+Dl3EGGX=-Tss4pjnnP?IU*^I6NUWs;Dv(sC~wFq6=J zw3=f)JdhK11)*T_@I)ntZXHymX*YiPjjUDDhm(7T^tnmM zf(Cco3kQ_kGiYoi&hmGxkBJ{X-u3y`zv6{9xsvm6NJwqKq*Vg49!_ z2;R1E;7Va;RC4m;be#|WnSqOMp;+mo-fsnVE~cxvsM^uw7Oz<1t@cQ~3L865{SeMc zJ{$dl1md$yBh0Mcb};ydzW<_|vSyTOl3HoM@k$M{kP*vS@JH`Fsd}sHCohmRzkxCG zHBNY(@JXjK-g;{XHWSLcl9@J!#^Hw~OuX=^#2io{=HH2+Z;a8l&lzG9 zhv&LaqVN zG!&sySi0lmWE>u4RD8?j6rAMY8fGSk*)EXD969Bp2U7yF(o(v-hx}vpnBAMU;@JE7 zT2pbH@fHsKhs{<-RN-7J3`Q%~DJ+~G1NLO`_(CT?6!Sk6>ZV&NnnU6(>w=S}&SZS} z@F@|^kSW5;Y!-XC^O5Ea4%|1SemF=N+$ljf-SRk9%ub9LDlQK7f?u%m2&(^17~#9% zOwC+aas>(6I=@a4lt!njJh_-ync#E~#!Zfr)?hN?Q2UYk&fGLeQ=j)~CHrWk@uJhr zQZrs%{gTIRqry)@J;W5n)=3|3OE8ByURD3_wjg)0*zUFI#{@+iNtx_jZB#${kB1M% zF;IcGks9Bc(XyFlTh(u;=FPNAUPc;<;cDeqM4?o?b;?GUGCxIVTiTKgDMo8*1SiP6 zGaeERcs>5H*!x9ndi0eML#V0SO+6G?8)|#sh~!+*m7DSc_NrglqyH?luN1yO=dY3# zNPW+FGWXehD674j;+XVCsCr)HdFRF5%!^Nc!9RlmCw^WQo`KD4Vu^()%=dJ1f5WW1 zGN&+sH*}R$`{B*Rxv1TM=2YEJOay}@MudE>N1tKS+U_<@)OQ-+-W+1DxM&G`2pGJm zuuL8xeK|-;5swyN&6P!-+?tZ}JzGyLx8OuuDDfSLTP?;`t_-s$=7OL6` zE2%+Z&ky`$a@B*j7d>pFnXgOCmcPeU6idh8uU0?{p01|+y|smH-h6avVGvh`f0<+S zVg6Sw3)!spncr4Zsx(+?17eR#CHi>u!;j@iIx}L0wqeHVU z&GSxWQuEOU5^C%0Y3%PM>Zb~~lB*`h@j5p#c?F69CYY(Y7ux;eLE)vPg zSGE;8lBK2YJF{Og-IAx7nWl_2@3HojM@`tk9tx~Jm?=P zuF`5MQX!+?9NK>HkP?dw@jMbXDapqh(pvT0@FF&ZoqygE#-7LaBN)?*6Ctwg`g2BaZGpv} z=p%nAK*e=EOI6^kB4kUUf?HFhMtz(vEiFZuB19|H*_~^@)kcO;z}Aklf;Ou4{Zp?{ z;J2OhC)p%7Px|tTK^yEwtUlY%zwNoyIQN5N`&$Zf{1oP~`NCJ`N~CiqG6YvfyO3yQ8GwKQM=5!oE#<0F^Gz(<-DQ;ge**1OW9ND*lyPP}C8A9)$ zn+~U?&q!KM;nH?Z&Lss2P*eA`0+Y30ao0?_-TY4tDon0DxYX|w%Kc*&o;EU!daK3_ z#+|S8ywbcUr#(0od2idD)SNI5V&*ILHDWm%Pz4vo`yG~UCS`v#^m*2jr|fH`_Nd#r5J4RPimqWvuX(H~Z?!KZt~hnXBEv2Mx%ZAgYsqHK zr-ln9tNkU(T9XLNvkjP+%ymST5Jg%e+Qi0|Q*k9$6;u54=WWZ%=y6%Wm}<4bKHtE@ z{$5udc%o2mntlFaYW2?U_S2H^oK;Hy5bPpY1P*CWrI1bAaI@)ClCmS)hqT4M9UNnB zSdH=8t~&Fp8o-8^zWw!=B_)51bZeoO$^<>&2a;%aI8sa!0Dw|*>zSM($pkL)xBoj> zAl9D>9y^hMNIb?PSe7J%$DBvTlLduc2De7)lvWTk{1P22{}alvH#=6bc$w_UQ|G#$ zX3WKgPrm#-m~c{Z+-9ee82i!2CebZvI8iru)Wzq#C>uA?jVq6qc@m^5(3~JJdgl3K z@#g0$G0%tvHMt_kJqw!|a(GTl59UIId#qA#jJG83I!mogshW9gL+?~>XqBsJk#+d$ z(3g_U-+L0oD>oO-L}vMT03(BKCLI_&yP#BK%iza>z$7YD6S6>hi}BwNd)}lX3(SLy z2k_Y&i>D!ppEq+3{#|FI{0J+}jQCXziytou!X)583DmlRn>Fp7IG5iu)#{W~a%hU0 z^}`*;W9$Pc%zgIa3Kfl%yCyw1`4>Doi`V%VduqB-L-4-B_f7kwK3l$-zPqYLQqW4B z9jgXhkrH!9j7bNKx1xu?*XeE`?2K2NXHa&v2pp(U@w~R|TNgAqGD#MNboiGnyvg(p z2vAa-8^7qD;(dfw`|Z1Y>}dQ(_Li}pfw6NU)=3+wO|kb5r@*yeE~>I=uOg>_aq9J$ zg9J_9RbDVzw8aWSNXYccOXy&zG&l`oN;!(ILq8>@YufoRu;8X^CRz?NqnVY`C=_T~ zbw&J~y7H_pML&-2!J96;I=HXZs|OB?YkRT7e%NCc)u)Z69~2K`joD^2#)9}+`eJO$ z`GpwkZ$q6{?sM#$VvEvjYhR-iXEgsN)1G$1;3*Mzs2r7 zFhG`FyK5#~z?Hl|cL8~L8}sfA3cRQkUkX()I{C{fjP)U77KT{6E~bk(c4SxHJ4|$8 zhEoFI6`dB;UwkZzKY&wYKRI!8A!636^zG=gye;Lyn{2sy;Sf1#75PVgN&R5cczz`E zeaoDEUY2CltpZ`D1JiQ@mw0pSgyPDV6p~$L%U2vhZA&+dR#G0lU(-ie@h7fo8#f#m zZ}#Z>12F{K%8655&))(_uCamVLU5N>IdzqH8Bp3cslPu0L)0NcRbGm`9=S9=%h>&`Q#+(kQ z*!di8ono0PweBTrm+o4f++BBJ6sl%|!Z)u1@N43=Siv@x$`+oM`=zz6ya~bxB84rA zwx+h8hi$`;+7}Zs>>w-HkXGoh&|51Hf%pqUW&{|;b%x5DH=Z-5nUNWuKh9}*=!s#& z%_1K9{#UElCFC8twYQxn)t|)NznyLek#*}(pcdCK?v?Abg=#%Dt=fiHEisZc{-d~4 zzSxok#)&TX!YFz;GV-jr=#y{@<6N|^zuAP%vDufSfeiQhWD(_L;GLe>NF%Bq`|ICx z4eMKIeqK`#tCV*J2G}Uma~lhx?Hi=I)p!}D|IEAlrO7-)p{R4a1zy@(M;z1s=xXTV+3?hw! zbd|RC7~Xg1)qB6}p}H1;h?LySmW)2KpS-#9LWp#7@pDUgMPqE3x?n{98c5n>SN4==I|#4FcWoI9wT3>attY4{ zuSTU7)hFYv;8BwIOOnTMk13Be+7EUmLivG?<;W$`4@I7>Uf5L`PsACb&_f|1Pl9z? zyxfkjV#*z7^B6+R_i%I4i`JVB?(i+8;8ohWCvw=MLRfgNz{xm zIVM)DEV8HKW&T&n_TlSlAJ9^!t z>QDSoVl$DF69a6@-U~a}ex#S!NjpZV--RQ}jOlZnY68zH_y-4X`(ja-SRofdo zwM=X6i7Dzz!qL5xzSGbsr)`D0Fe~AX81Ex^8j$KL=)$)4v+iPo-|mvD-ia}NSP=Wl z-u)!VVT2@YCTFbr5%K+Rp%~IcHk^^Ui@hu)m3PF>w zlfPM6r$fk7XE8n#DsTWE@jzazMFqi=!%>vfB6Yh%N`F@xv8Gkp^xR-Te?r*t0Db3P z|E^5{A;a^XP&iLZ=uiogfvw#sNq*(~lotZTbbgZYJ=uED7h!&LqNeQd7fgPV^al2y zQ{9(V9P4&dc2W-fm=#ghd1k-Me5#}?DZ%3FwN;nfi^Jf9_l=w;Gu(BH%Hz8 zPF`RB0Mnjo;Tm3HsX4&-Ir2ucnT(BK(Ea)5QoMpLu}SfHgT#P>qW!$Er$0c_iZiSIaRzJ(5a}pGo<;;#OCVoJe>gVGAZt zbEm6Ro<+E^hdL6&HXTn!$>35T#h{nW2HtRZ2woJ^)|`6NX+>t7YV!(<;^u0Hi038i zepY5AbcFyqbllhFv{x_j=Q{&d-Y09GdRc^wl4)PdS>u!K$Plgg7@FyN!PBXLskaaJ z%sRG+bnwLh@?h>%FU6biSe^3bWPPCg8_U#9Kj8pMv~a#cFV|cPr}*R}FZfXxH1?C7 zYILapO2p0>jIv&^Zr;pNDY9Z`ud`20HeUMzv3Pfb-H6uffVRI1XV22QDyS^YkRuQt z=r$yyXcP-6Q^$*B;!p?o}Q&PcBf}>A|P}n>%pF@T7P)?|8nb;VAS%<3KE!>{$pV7YrtaoFlDu zugD)cM&N7#eRrEX!b)~dw2~r!2MzYnNUB|`f)qWokEB2!iKFxidilVmx%M$!@G~;o z;#Q54(p=mhHUa+gO3V=ED&_}FD0?96x$cWU{p7s@)f*67tP5hS_V)f+K$~}P0s9UX zw2PJ`?deDX_*UYybBpkIM#u zRx0M>fcom{a8sY25F&A^J?3UKhE#b~L*p_fb+TIxk0A4oyt?^2?1)aDgTmgbz82+c zVecUIq`1O}?}aC3bz^lA&?&Vq92DDbNQTVQPKwvzaIed~XP-yJDDgR{y4id_3 z&E->l&Wv5S(;wh2F47Udw|?{I$~eGMsSbvWPTv9=kz-vV+;8f&Qq2IZ!C$=^nmva3 zpNO=QO?5IgB4?XJ}Xrwm>odt91imi+56JL>&D+_F3Pu#l~i@kf0s= z23t4uUZ%(JVovUpU(+O6E;(14OyoEhr$bz~#7Q)HNS0TUgS5rGOt_*fNIJQ6DtlRW zUa>~?PIQi8*2~mkZ*(W_#9x`#h~Dzh%%?Xn(=)%)=7C$i9t#yNAp;)vFa==7pS>UJ z169=MiErd_W>h@(QuMq@wLW&;+z(&^qu-Lhg{%yL%BszYBGTvPXcni6+?V+UT`%TM zgFqAM3lmO9TnkQC%}Q{giW4Gz-=G|GtqWU2(^xCfe6k{gj@^Y)jR z0YZf=e0|kncFdEl-Wo!M2W@XuPuks`nX%f`3E3ZMRlW(a6yqDm&>}lJac3CPP%=>S7K5;ps zf61OcJ|Lb;JW%8*1$?~`is|9sKr{D-xH-zcI(PDxZCrvUuNpl)F72)` z;!kXOiy%X^73_U)=@1bEZ_8{5fxFLJ2ecNZ%%U;@g^as?aR`CHwLhtQrT8W-8BwV< zwhU?+-jQ~~{Mn05CLmLY%>++HEGd;1$)`~OLJ8eoTiGkNlv2X(qU(m$tp{pD}6 zoYScedb&Cz&+E*h6>-H9d`?-dJRZBfxw>;o9h%yWa!Zbkv@`ic)4(nYTQjX@01R1I zLg|B%=3sOd-a(LTct^gG$DGa6=2kZLPCbrv1}?9nkXuy{?~AbFO)_YqR=RVm9nofq zf&p?2WCeGcJJLR6u$RBZuDt(l%|KhM&gFB%)|lrZF8r}zlJjZ7+Vk;6`d#Iu?^92R z`g1_O^li3wa4_z*0$t|frGy!ko$f~c^Tn$}ISO2eMcE+Vmf>G)TNu7=_1jBb_{t#) zb-`L`wm{Cc;8dhAZbL}ypHPtifMlnzCZ@jl}&?!d|ewG>3S>^?n9ae1&?Z{6r#^o8>L5$W!45+#M zmwi)${XI9)zT1#uBEFwFQSZjvXXqr*+-u6hUX{(IAfXGg|L*UvQ=8t^+1pRnl& z0KLgYt)GNAD@%P*k9j$4`on;%_#I3;Y3edN(*u6D>q+ZVoDfh9q~jm^YfZ3<%}UE zz`a=SBeScNFr3~bbD|&ygN}j-ef^E_(5sL@shuPF=*b)o82S#NmayV@UIkV*Z0{q1 z_q}F_UWO!ql_AeNaKca@@F(&6yN^-Xzen2mzVgxOP@8b96I)+MUahd_vR${ga@(zC zbV=+tEHQ_HZW#U_d+#09{pMV2>dc+iP1MZ2d|zJXScS25rqGeGtpvcq6VX=>xXzx@{kD5VLU?UPQLuq= zHKZf0#=%j^bUo7{#V$0=x8!{0JC-mRdg`;lC*^~aAL_37J~e&k9*#Sk>yHUnM`264 zH^Od)F(Y0Nizgl*7q|+oWn=h~FZ#E_0{JEb6K`omq}CKR9d4lPD#PR}mG-wb)70&= z<7y{EY6-BUzu!=*!6AbAgK(jd#?ywq$2HIdlab-fZ{CdiII)xY za+m2>vzLa5gVz1)g%4j<1g9dd|nRf!3`83S^E;&~?P=;N9vrhI+lE-@d7tg8-j z&lZmk3-q<92%2%}e(Nc^?x9uj{d-Vp@XND_Ik@%bMz$PV44{xr)Xx?fa^j>XrFdZgLX;@qEhM08>;(6zGC)i5aa64 zgJt*uYPh&hdo#IIko}3T>DFR-5rsd$Pdz&Z_NwgpS<`VvhiM+4?57aBxRIrItdG%9 zTWZiu*|WZS{vx%-iRLjwVWr$az*p$DF-|gyzbkzEW!OXa?NLLfHQ%Q$D6RZSCN#meuKW0*Ki?g0RYAgj6+lmzqPGR~imx7B zKmFK#Tia6d&@<^H3d5IGo5$zs?t=c9f|>@O^XTE5knX4|EJ&Wi$iI9Y5--8^zDWLd zf?&^}r2_OGEotb*^A8d$m%K_PwcVEu9zmc_7r%Y@Gvy1TS-Sf3Ly5*KPOYQqp@$Q6 z|5&V1;W7%n{M6uE6n6(j^d5=r~`v`R zOIC?>LDbXFkBr_Sv^X1-KU?XF({^gT6P$1S;^uLlCqq0U@@6gkj91SM7j7X-O}QL* zVv2tmX(wd2=H&`y#%W&+!9>A^m%oMzUFMP$KZ$JupHR$P#tj6Hg9blxiKF6{{^)cX zBuET3<#N~G5A@F-S4OLm0E)Mw2O?H)`G_RIfL7SnQr1UU-osPNQD$<6us?Mkuj}fX z1WZ5ss-q9?ERj2P=JN0-!;7IjNB1v%6qToCn7&Ln4cPA!=JduyZ%uys!@QZU(*m>b z-k_$v>o^aqRy95)!9K!gy;puEf#=;Eh+b8cBKLzS6dSO{UMg@9)XB?o zcv4sH?Hi{GfgdjdoIjq@NJ9Nmp16Tu`*SBW>Fh_Bklga_71v=z{D9W+hegQXt4Gs2 zh>?-}Ta8}f4@l9*zHtQ|9=R-9uI|o#h;#PGM)XtB#u9mh){+59$)&G?$U||1SsOW; zOT-UZC88-xD~C6whT1hbeapg92I_V;<}y~SnpT2FFSqI z?N;_;7F_ZskIJ1^*bzarqNNbQd= z{YCXj?Um`Qb3JK+fAQVuvOMrJ)+`Zn9QiDs3feoayYkqN{SlZU5L+XOs&csEvr_c_ zp1DXs;j5N$pY73MGXIN-L;CSO{2wSEh$^s`mKP5#2cFlfScra7R#Y|$xwCyFJx>ej zBPe3?^_h?R`}ZRsvCt!L5R!dYmIp(=`%zFlx9$mA`;lZRwW#^NWBuQIf*u@i+%7mn z*z&mnDVlj0_+_f)+sgbqt;YhDCwe@`i*@+I!Je{akE^ow1)sB3-Rig#pBza#Fr0|- z8Zme>H=aMzorq>0+Futl>w_S;p8dc$nn6w;~zZ0Ir|I7BqgX_Ee+5iMkro%lo14ijr`$9qJOHbUP8nNEQ zn{jW9(n>;-ubtQIehXI;k4r00G{PyTb9<>D9bL}9K6xZEo&`5)cwgpMr1D9dw3y7h zqW|@FPxg~aZHj7hU#|5V>T^W}RUbq&#J_duU;|{ow>xB{O-l<~ov~@6`yAwWC7=EB zr@P6|u!TOS{J`dY<%0(WC%xt(R(G4NisS8TB@e@}mnYuI8U}t__e$OjBzxVLfS#0m zcL@@x{(yq(`+(j&v-^!))+lVb<60}Mg^^Uw`GIrVyPiOfrAZr~EL)dr`1$<}(b(vH zkHyiMH=DTQM-qN~?TLRGu*bGJ();mLMd+6!K8TToM?VeyfXlD^dSrba49=Zxf2!3c^eIaT88DLxF zJlM={Y2euC#Mr!cuDp{e*V&m z-z(Fqt&1E3?>sMLcMfJm`joq*{We1FLHfxxX+&}Bhr>sDHx~Qcaw=4xNxd;zfwBV2 zkKFpZOLsxTD{I@9y7ES#gcZd*9)08eNC#LSs98CDbY-OfW2RLIpomX@=659I32uJhyX%EGP#*s=QNq8yzUNc)Vyza z{N4DPuYIOC>BHtrshN7`XMdXOA=l0(r3*H3f7^lHHj{5GRPh~KbFMj6~&EPUk z*_If4KiQ@1t5=2PX11ESVkXLV-guCSvBy-WIOsde?kx1o?ZZMqlGaTp5k7vWY zxH|6_L8I-Ld0+)d+uGA2szy}}O;OL3*%%HZS z%hI?d{ntN|X6X3WUY<1g;abx-V7}eg$5$2{8NDeH-@c!E1Yg+1y|1}jV)kBI7Jl=1 zJ>b#ag5%!(AC7>-zHZG%ue{I96<~42IpClN-N2yt#i{{7@g>bGPq2+Qnjc(Vr2Q zLuvLHB#2yGi<4=!){}$&?Bi{GS!@6=y$8CdY;XK#>-!SRHY#>bTM{9sU&Etkqw-F@ zl2aT(gtaoPI$)D-5_>m0UvB#>nVVw*CixF(l`ITLEX7EhBcyK-6&_!pKAJT%CEhi) zXsT~B>^$^Zp6lV*)KXm3$DXv*3d86>FO#pZb6Y*k6+h;FnW!|z+4X7uHQ^tt<7(7< zT}H^}J6!|YrJke1uOed%9)zs_Y&w?Yf7kz%y_PZ6&yl)Y@I?8{0Mq*AQc01x0)%S% ztib!?vwDX+S4Xi&%tM^7-<+ZKSfBK0&MW?SZ*o280uifm{LQ)ZWs{c;iPD^LOa_ju zI|iP*dPuPGvVR!R@*B_}$vHLwl6VI2%eyEm>@7^r`WA6iUhLLd53p%*^wquF^8wzG zGXiJ6s)x^nC`<4DnI-q^q+r@;hU=9+XUUEme`xyMRr7ms=*sbj&}t(|<)j@T>RW-` z#8V%Tj=+$530HH%z~6t?#jy1nBcm(ZE;G9H`dBL0F4=8J7A$iqSiyg^Az)rf&7a9Z zuMsDWK6vT+Ky-`)kDsDn%poHiuiUfy`Pf_H*}c^7be7b$1=;ZtdY`Odu~dL&iiaH| zjx1WaGV*?T??$h*_~7?xOpEaYH=8HY22$V>i0z>Z8e@5pybnl+q%PX)0qb?&vG2+r ztK7X3T8YqF+PZkm7woU}Bq@bEWz6r@!1upvON7TIwZ86rmE7W*_^xSCauP%s=&{an=WTPUeOm zq#@W9dc&(=xu&PcdzA9%;)^urI&DMWByBawdSP?IODy)R=xbnuda#JSO6_5hTS>3Z zBdEuRU3srqdtQNl{QERM7W^oZi^4hKpxJs=pG!Hmai}g1MO=zd`*ShqhU{b35K$=N z-R+HvXqnQsv)3x<6;OArY8Vdx;S?d{vh$>Eg_ofo}K z_D7z9e*Rg%T4lR&@?babC2hA*$}d%T@yX_Hp1n_LGLjb5wCtfO%W^?1q+kn_s`aBbd%E>>gl~<_C{6jFYj50sk=$#)_pUO<^e`ntc=c_NGj%+wX5dG z|M7~2v8vOE_^CfFcN&O|*@O$;jgJLb8-X5}+#hKOyM+&Fyub@IcM}}`+ENK!9;ovH zH{AZ*>Z4Imu^{DzUHM+C_0qt*VcGDBV^C{4B$N9`dD(Q-)%Wk*zqo{bxsfron7W<1 z{Kg3At+IGGu=Ts3*pVv*3C|YH8#4U=e%DfN4&=UhS#~a|(d^dNC&7WgioCAeoFCVF zw`0iF-ZN0Bwb%c)MEq+a89Lbe*?Z$oT*@w;Z*SaWVF&y&Y<2se4lj{hgPB~sV*M*I z?yRDpg-&umzofAb#M_$|huzQFA#<(y>N_a!5lKN_4_9miO; z59$(E!UflUrbK+L6nweAvMPHr?rExV-L<<7)O9}T#O9ryg9>N#UJv~JHqCKQkaHaK zr+lTWYj+0T!$GWjC`9tLiC}Xob>8@yz6_sY*@OI#PuHWKzlm-AIJJ1>ywL7|PVAG+ z>j}*Z@B7-{1_Xq!wIN+XjaF_EvUFdy!aHll&Mn23FDmA|C`BFk-whea`p5Dx1Ty%~S#|dY`U`bj|xTsX`+v z$Q*_!i?qy5_c?u z&GtT=odlGwIO0}ML%NQar>gFJzKP^qC~c0H5lZ0&XPg`;^+fWujj z4TdVDWtrt&mUlPIe|=UNQ?ZWxnbE1p9o0;G3=-5KDjK;uE|h>aJ6xVbUlsi-J&`@` z_+$3l(prRptv9E~T}F!GgSszrk#$#Y;XT)L+{rik&R*j2>3LDwoYrcuB3|cit#UK1 zOlb-P(BZ<$R44f|i@)Z6wb&-$XP!6^gn&oNl>)rRd~eZ=Tq_JC6VgXNf9ZDDfnpe1n_=Oq_0i z7@9co`Te7Zonbwt?hlX74$s61huJwkx zdB zT-)C?_(L6;tIVX^`1lEpdw+bo{K)l;n~}!0+eicJNwrqec$df2)6tFWCWxoe5U{Dw zbz9q2a<7!Af{MQH^{w={oD&*m(1G7~< zkFI3JaPsecz@bm8g<`-l@Pd8AQj;F$=~Q-5 zaGby`=0Lb#b7r-m^4y_HQVG^|PAfaOQC2n>G}Jxr!Ls%2(r7x?u#irN1a|g#38?Lv zE%fwj%5U5?zDCptbs0!s4l6LtFu~QB2S#2D$vxD!Q@o&MFo=8__p~rfe@1QO7I2|B z#Gu~g<%^aE?rirq0LuIEXq3c%<%xJ=e5<_uRza*9bb$w|o3w&o-JzGVrC8tW4zLv}{CAnH!134{u?^@zhTGBhzw%V`yZs|a_ z$8d-`r{`6UUcT=z8d+f9YLiojG3dM%y}2xsta6qI#HHw9;*@dtN@s5Tk`p#iMk!J0 zYh#>mJ>`~Xsi3-=(}C+3unQg(7dhny3CmfkL-aTt(XhyBH(7nx9kCsJa0~W_n?ACb zhx#<7VrL(~2JDO-+#GOCt_~=Q>;-ObfG|cQ@2(<3Fd+v{d;2|*B&@%K?9LL;=zGjS zsDQ?LgsXJwrX?|>*|aKfqp4>^e0lx85$h(-QLF6{8dS8C+sI|}$*Gd)g7mf8xz)Jh zDxjb|6uwdpy!O|wqniI}wtMRIHS8HJ2l(nfVhV)``GjZGBV3K&M?d_R1~0rqU2L6k zNZ?YNR8&qy*m&@jjS9a-;Y#JJ1LK7I#H+_97yCM~Fj+OwTP^ug1OS$_5VTx)>E^1) z5JP;qqWs3;Q}>*;PL_}MPmgp{H}m+0Z(-AfC)lqvR*eX%%YQF(nwZOo0bdO6HYs8+ zsTeoLiie5Z>X{lSiyHtwFM4Xbmn^WGzHxk9|4R_7$~;SE`g#3CLT(2p&5@{F3Y?NZ zeX$6#LY$e>VR0i3yg_;1{vt}uGXPVTGU7cuw&#{gbEE1O*WW!}sl#ZMpYPK5l^q~p zvO^gME>|r{{-eRw7Orl3^p)5hNR?t`vu&Ci=RtXTL>6*FpJh8-eVWB2a z8Iwlpm8h!@_spx-GCntXYO18gt{Uca>H6B9b4!!T_?Wke%eIzLgbZdR%;>rX13xN~ z{N2F1)U;N6w<5a&vl(o1TAHz7#~STYg*L&z!H2;YI+?T$}8!&=Je z4^AVRR(!{-#&q(6gT^iduc`#oHsB4X`)?`Uq~}zdG+LOpH9-jPem}%zwKivtYk!HlEMUYudQP+|uA|}|#|Hf$GX2j#(*@qc zUFOI^Z+??(I%Wy>s0Dn=V6?qOt_MiuZ1p+S@3__5q51um0IA717%?)^`bjNpt~0JD%8MTQ zdA5x4u5mnZSx4LM8aI}=@3y+fN}tOm7g$OC8g z5TK3To=%AA-?^u818Nx>DV+D7v5#4<$Skxg0Y2QqO~_#Z6&Z})9I_9r-LPI%XmzP! zVyF2^qrG<2)$N@^uLX}6TTJn>ssMQ>Vrk-{Z$^wN=<=J363dW4EG z9X~%#Uc7}JvJb&EOiwL3HZng7j+$!ig>JTl{u=(9#K@bTw|Q6=f2%jGWQG{l7qR(# zb$`&ud*Jj!%f=9B2DD1uJlBam%y~E#BIYTfMw+TKQmmGt_Q#JihfD!J85`$vC zY^(A^NeQmHmAAit(9p1S39GreF5k3ZY^b*y8Acs34Fqg=4ADhWNB1`XTVYdq`-Ny5 z#b8ys&cVFWRFOoO-&o~`)aswHW$~5VBNkoA{n{A}xYtb_JvBgT3{+KG+g$OG!LUPu zl!QP^J|44qL8?j!_i@nb^oCqeQ0;0ck|e(1NMD((yOx!8_^GWvF;T)0QWhTYTb>gp z&L1_ZU$t?0x&fak+1gR+1hdi?5kHkNPEDgZb?exD*rhQUr{{OVej9}UZr%O1j3mn= z2Xu2M>dqyHps_8^9ReZb_n*+;a$)_`ooZ>nr?AM$@=#9|K*40nK+7K&N6O6$pPbhy<7z&biZ2qRj&a9qFF%)W$5B>S~N(tIYgNLqF_;2VHMYmpC(VS^wHBU6m z1Z~PqO)WSr4(3fXt;shfZ5UjH9p~9gHGk7n_&eY^f9ct$g9r9YSw~Nz6@w)%DQ?`? z$+FrVp7=a(el*OhYfb6NTTh7)$~~7(XtO|vM%??0qu^zN{>lRh;QDxoUG1QthEJu5T@1FgJRllSg#p$f zwDpOa8*D{zc#diI9^dRQf|>%I50R6tE(_=IGq%UGRFWeW8Xy-+ihdkNaxG&>+KK9j zQ;EA&SEqZkUH>o3D~{P?%&lHU$?r-tW6(lp>2Uri^dRCf{2qONRBg)@gKAmvxo5gO z|Gf}f)OBftZfK0QR7_{GL$t@nt5OF&+z=V58$3?bium=zW-3x&01cPO{oE%OAR9vj3SR`s_ zH?i|URK0S`Hc2Rntzck}(!U=a_QWVvk)^*b~Q$K|9aOwNs`p)F+!ZVHQ4+E>$-vE=2`g+Lw zK&@+P-56y^T)j*%&1XS5o;l8kL9|7!tR74sB%iTNIpgp4*&_E@to0)i5v^RUvmjZd zdSG}s0DL2$3_sJ0zYZy5mYRfPEt)uljQ*Dit*(Mrldm!)el)!it6mESyAMFZu$ES9h>MK zxu#mp(6$8?Z|QJl@XhSDy`Ddsk$oo~wf{rViIUhOrBYR)e`8+fc(xt@`JZA3ACX@qC35F(ehi1%msY*|z2LjP1adEkcOpKIl zeAuqF3`n7_B6jgsH3l1?I8L8#_c|ZzDw?9Z_v*S2h+JijrgG@74JB0IK95PBnwpP*$8Y zZ_jq(%{HN))-<*7P@Sr{rwa_~C}~mH2B6_LOX@`ppk7iyA& zTpEp$K7o)+g2eI(16;c!<^2*{I&r~S8Y+P8FbKrmkx@9-W>bbDK!DA})UB`K5IO9|;t;Z7RM~ zgaFXubU33<&6u((FK2zWltE(^iaa#B9R2M?8IlsCIs$g?p~4(vH@wgxuD1uHJpdSZ zrSz@%nDN}^kLtHwOpH~L2#GW1X0V47=fxgWYE-`B~Aq)?9>V zJl-USWFw`%zbNeLYreW4j)TNnNLHjEe4za$oC`z8CW)P)(Ok>~yrr;{-T*p$1LhlL z2A@_R)fce)S+!&L=qN89dd_A@Va#O%iP7<(;7;qw(T0}1Wf514Xo&l)ov1$;>rYp%#VP=1wY8)%7M&c2 z^#4GxpK6t98tZfFH*qLp+EEX~V zh4x3ic)-Vz-%BINc9oGZtvhN#!QyQ@mw0!2(&L4CsbU%kvh-y#AMeKKeU?g5HF^|9FB@A%;;rqTDC^FGj$jKrq=F||PH=5BTJBn&t>+}T=$>S){^hQuu@2TrTAOO1d57<0i8 zN=&DB&E$w_~vG@-&30(Wr_;+ zQgn=k7DKnN6S)M9!@Jkel95J6anNNbaE+t{5W$-4x-!5VO9gQrYNYA}*oiWu*2!3M zIu#v2Z6=wg%$srS(;^4eB3nzTocK+OB(>(6^x0AXspStp2!bQ8rlp>3zAD(R{6)f! zpyYH5yvvy&_w4l=fLq=8Q4(>QB8b#tvtpqxy0f0|C>Qw6*P1pBKlY1^u!F;GJmgb zwqi8WZF&VZ`zwkQKff}+=j=2=r@t*n}}QFfZsaZYX0f?7)bB__QeA z>?lH9I7II!sgt)u+Viq^xrwM@xSQTOl^GMhzq_Tib6+fgnp@cc^68HeEgPfMFfh!{ z+0rH|)X`NoXd;ke+czJ=E((%!UCTt$=cx*nDCF0{}mIe4>(?5icBc3Gwp3IwMa z5Y~~?Xe=h!oKI{TPlsEG!k z^z4MSJ=Us;Si)N5{ybWZ!>~(pcJEQojBQ8Kl_NzEW!SHCK4H*slmexFdL_yq5w%t7zOU&Ts*pL zfet9uQZJmcqeJPW<`QkK*~(gMX%oSwn)sLL^_{yu45gyZbiR;=q22hgZTwn8UVWoW zOuSdX>_yu7Ql~xYZ=}LR@KqQmy7)m3f3K*2!fjtL^OihfD2If(bRuL{ap+nhG|(h= zfRAdyNI-4#m7*VlFv8g-4k0PF&>M{QVr|siyWRZe-prW6`tFuqTUU%t;fhx9bf3O5 zFT=Ynd`6!3jSn3(k?3~Ve3EX;P>`gGI|o+Q#L0!5lb9FgadHuZa*HiuF<0b zZ2h~56mhp^uRXl`1%@zxj#cO`?_C#kuvfoep7!K(is?V_&db*3tnokZP$E)WKI;y( zc514KjB_qFHNOPdKmbJWomLQY|I1vQ4UQc3J31+#6qU=P0l6-f-2UE@LO<#x%}gXj znKY@uuNTFkeJy;(hO(8(BJ_{FBGs|E_C$S4JJb{vk2IK_^Bk?d+ZE-w*Hf}Oz+MlR ziOjsmN`w*CT#;h7$ohHSWgY;wyGJu3dy$|okwmITQe#@;@dS8mY4f6M9mcbgcN_ni zQAn}OIS;O>)4|MNm^cq^I{{BAH;2+&?h~!`s=QHg)0A2oZoj{zOD$S5O|9xTPo|oT z{A~7wva9Q8w)@6Bfms|ESFe+lu3%uY8;b)rlS-0FLqLye$ZZ8O6OPwY1&oH&S=!NW(GS z(f+|GuM==*#Thve;mIKiUkiTyV!T?wMRUk7Aax#zG8CVs7`Si_ON>-YG}AG;^1-3= z`oz7&DCvq$e5%=)WGD8B?;T0tD?g$ z^NH@b#($Vj$QBX=NMWNo8+TQ5t=)EA)jbg#98IX#7zqv7H6s8GsW;JTqg`pR-2i+P zU`9Lqpd>}PU<_19oh}^=x6V^j2481lBq4(dCesOMEYCkwCOK4wX5jDxTFC3o&Z?^B zq2lCC!q@M64*t>YX=(wbd>|ml8y-v_FY3io*-}4*w2c1IOtb8Iv$gx^rf|S|vxo>Y z=yP0KnkMyu6mXZl%h6okCRfttMMaiE&ZhZ%W6OcLhOaPco}t822)m3X7ij#0$Oaza z0()>tz_q)%{PuovdC|98hl=5dRy|$;jkRQa4j_hj|9k-05j%&*+QKQjZGvG>67u8^ zl?TVU%XtNu3|7&8MLD3fr!3<;uDhZY788YISefVj3dR&HkA}FIw@~KFSh^v_TLF8P zfS7j2F&QBM!#>&mAb+@1#v>QSbkA*rtx3ra?(4@v!Vqnyn#^=71E3n-#q@e`=x~OW zlNUGmroKQ#o;pezSViHt+QUDn-i}5qhP!mpv3dl-ydSIZk5xPSB*;|Aaw%Ut=BMp5 zam6mHWqUL`N^bzL?kf75uD5?QB4^0mB|+Ik*iLk&0tv8z=%HhO=|1F|&#Cd=KUsC-+^>4jvL$Dhq6%Ik@j09OrDS^$Q$E z5)=iDN&1HprYW)&@tID`9Jd$MRAKDR#PD&nChD5P*Z&E>R%poWJJP02KrV`}*`?%!U)t~3P_xVlJWT0ELu=g1jc7ZI&6+%;P{hV4NCSwB`= zS`HemBKKP^tR`bNpV61Et*Ux8ZYl=-2KW+MZ@&6rGWm@DUKvoeb#sAl``DVcrdyXK zK=t8iMt;s`_s)VByQ5%Q=L|U-m89;9=^IsxO@}E*hU(ygDN{!)aL^${S9<>D)}u z9DT~LDyipUOD2AdG_hiIl)oVxKc|9)?;=1@*0l-&x>0yq{obe*%8N%p$ znR+lMtbWs}H0E@bk^oTRnFH57{dqCGy|XfHMph+V-73ifbZwg8;ywa2b%*>=+5kNUv7L(wDvM5TQDhY_fq z=s0btS3tapm$WZQpgohsfrv18LgrenuX1f?C-8YV_&aA*5OYw~N-}xgv^tPcJlshL zpo$>6^o@7d%w&UUU1MaLrh*QW11U*ydPo$6-VX2ZLJ-@KFW=iReIZ*9_UUj7hCu?8bZ4FiHZ@IqCOKs8ABw<5D|i z&Gey}gQiv5FJYH+8e`S#dR=LsVlBu@r=#D&F{AfLU4GRm% zJwFR+Qi59a6Toh6%@-YwUC4 z3Y68DuD*;)VU8&(WEp=~u~GqvACee%V!Ffk_gBN5qK5dZNp-m?s+>8~CLt_()Gnnw z?K~a-4@7p)8Td-)sfQ;b^bq4!KOi&ovMbu(YhI7oNJTe`0JI_+m%KJ*X&nzY^^d{U zEI^Rtp~jRFbBLP>YEgKN5hd1)<5l2nBlg@JxwhzhPITdN!5j`igujd43P3`+{auo> z+8T)^w78#_B%I2MJof!8I5wJrnaoeGH)V<1#pxE^@FgX#6`DsJe0d4m)$w&@aYCr> zOTc|@O7}cB&-z?Uu67uw19kYQ?I==1J;^p>j2+md00`PmX3drE*anuZ62oS@y6RS7Ll${Z6F+^r6 zR4IifsJ|s_0$Ad9Cm1~+4qoNJXkCtT;IQ5K4y!$ktKoHqWgrDlfc+$H zPXsHQI{Z^vv8~@ME^`d{Jdw#4Q*OQhHQFUaO7s9OB7{X;G4q4~NcjGtHlYxd`J8P? zPBP~nUewu%bTsj?mRCk~OQsVuP<$3s9Y_&|-oPQM56tG&3UkIpNh9skF<=Ka~*0vi3Kx6=(OjPp`L`?32PLZc}us)^$z8Tqd+0 z0TjBaF@yWFMS%oJ=q}#XlVeZF_FkMUMqwu(s`hdVYHX#5qVLHc#gUw$m?Cg@{(DS0 zaL{b5)s&ZEdrh4s5(RbiBDz$DQ|2E;q<7bY=!i1Rym=l?8K+164uN1g6WrtpU1Y+! zrcO&OP9aly?rm1Kh_G@Hczj0?RM&3@B=ty4_=t9e-m4m8e>Kk|1@aN}xvRDds#|Ou zsLDVZKQafaH1xzDW1PK*6ak1IB$${?X{@oyd!tNzKyo8D@rS%73Z#I+hR>4pDU0{~Xqv%O<`le{zsn*#Axa$4(!00) zP^vBffKliIWVESwlSrN$7W$~tHu09T{ zR0OOdgEeV)1!BbBmIYY1#G(B+?#)7`6mm5E$h+X86KeP9i3rcucQ9#YK{6+^Q&WeR zmX8P!u!5L0jpWTgf7sw5D#eH=bA4Y~0B(!X-UI$z03xyazceB&*r%IKfI;%~j zG2Qu}ZKM7}RLw;3kshg{0gl^Eh`>u^yf|T$q5n``eG6q|lx1kNu4pami=~We>=~Fa zYjOQ+Qppa>8C_PJ?_hef#cB#0pyG)Cjf54ADbfn8gqx*l=6ZzYWIxB8-ro(-0@oywz(tN&Z1DPvmRUnbJ3hOg zHgm&Lhz=sLQ~@zi#!JzG;V~yLBq@j^ZKx4@NfwtWBw9$0T8DVM{ID4-Q6MFj3nJp! zL_MztS$ZPDDj+m*6j>fTyJq?>2Y@IEq`LUHuGI!omeo1Q=qSfc{qP3wxRU1W@Q(No z=V%w~K>lvCii*F*m82Q^|MT^~BCkf>W|63Pv0j4AKR{6XvG|V}IUl=#AGX>br+{u2 zTU^@e{xN5Q>Jjbb=(9DKz8&@RtH@}DvP)kh2kwdsD?YJ?J;$^z%i%zT3ZrdNIk5Qz zX!p=|j2~F*xf{6O)o#pL+)G)W=~8rK2f8qNCn-Xj55&Nk5C<>FCz-hx&gr1{IhZ^rElbZ+iY5;kY90EwkAEp zDwkdMArN)OjuQb5Xm^1g6?Q&|c|im(%s(Kjy7foc| zwG37wq!AEB^oA;Xj@yIXmbuqS=3W7O<*#zs{lZX9R(CxIdY9!x$U$nne(8FQW|z3L z7YaUe%22Wen<~aBZYK7$vW72KmnIXHn+eIP?_yA>S6TsAhj8AM{}PXQnRX~jSL?xtgi}^us1cE@qo5Pc;z#7#yCjZspnha}{x1HDxXqkh zPC&6PFGNw6FNBgJyMMLB0fJBEM9cnP*Z2s<3ZoZuv1Bzh3jsj`$6ZpCOBsqff4x+P zcP*n(%G_0AqW;7d3G7JiFqz87+Sv`*sHBP5XhdkT{3=hL_-sWl|8}2`@fY15M8*7hTzmP1jn|i6~8P_1-ZrfFz0!Gx3L z<52?>KK6D+IYOdSR$o0GH$KGMLc__X*!a{_^uwd5Xc z%b9dX*RGBHNs$B0!e_;TeaF9O^;*unE2T7`r7{}4_7 zhi_N|$VI$buD2}wxf>@BP`2Oxji#;>jKxqAcxKWy@0nyx3{EHuZ3)hBQtbb? z^70t({Zf z9opSg4tV96*FfI;)&)oJ&{wra_6CJ=mzsA83-yL+=!zM=so~47;G3gLQ&ugk{$dcX zDA|8)@!!`2-+p{#IwS0hkn>b3`T)l{-%OhcIQS(58qrl(NoyrQ#x`$Nt$uzH!l}z_ za`SudncFm}#b6o=?yc^&n;L%zdg8N!4P5wlJUeHdBK1#G${J?Jy(d2pf6UnXiMFTi zvp~T$JItZbfQLW&$CakW?dn%jCk6v+vq2*Pdt;HgY9oeSdi*b_*M^2zupcFcqO-qG!v9hRiKher84o8q? z_(atcd+kr_yYj*`C}>WQJxnO6F>c>R5Wm<+#9wXoqEFXI3D$-NT7$d4RXU}8tGWAu z_gHx)!k8QJ>W9`?C-Rcw%ZeA|9U(MIF?POsX~hf)$w>LLc$iTKJ zC)+Z5;wqX`&UI4G_$Q!%>A9Seayju3_g*j;WP)dl-e|WBIkM zlT@#nz~E2WpGfWg+%%2NGdZFZ_J+UMFz}bn{(Ib$KP2*O&goNMPY`%5Lz3n3Ry370 zmrh(p&JFqG4ZmF4KedaFt zw1%w2KwB(G3OmnMa&GEHwjazKh-4nj$(7Alm>U?{hf%5`a`lC!E2Cgg!H2-y^lAFTN55IY?i15}zu4dZps&OAk4F zAf^>z9_UQw=Nq)|Ufwi)Ws9kgvdJFw{R)`g<3VLy8NyGv<92ep`OJ4gk()LQbb7zM zIg<8|hy16S8q>gnLwcwIZ5{!Sd7A7Qa~_Xgtq8?cm(rDDY2j_4(%s~@Hf{i7Y`kA> zi_6>Y3wn=RPJ1X7Y88-aTiL@q*U?GwNz86Ybg{bP`&S!@#Y$(rLow^gYq}Rts?n?F z7j?N;*AH~sZJROL`5`QTeua;Kt9nKyQ}0_=aL%^}`U{?qjICGvVs%lPQu(*MRg9nY z2ec%Qwv?tzhm~vwn1Q^shj~MpJOf*R{MBPWC+R?31Ng5Enwp^ccbyJ_@Hx_5S*7e3 z7T9+)Fe{8p%zxAoDm8d{H==dBf8S)p_%;)ZoCS3VY*evbk%c`i!Dq) z_?OktXKIH#W0XcEyK#PbKko(07d@WG~;mbkFly zGWMQnNu?ebUhL4Jt!#LOV1+!}VThv}{bQe=(ZNpdo{ghbN*z@bupUyA2It3WM|EqA zU!I;H7J10`I2heyhhM!?L$KZ%VgaRR__I1#!?etU0wy2X_7`k^`hbM+XqWwShVdV0 z4&Tn6TeWJh<)QsQ{i;ChxD<%5iF9m0kNX;Nu4mXTy1y;9c*ZNh2sn&Nsxh$5*CWh% z=+o)E{r=~j{zOf7+4g_31w1iMqhq%+S{Da108hCWmpc}U2o_qkVr#0_UI!km`{$p3 zQ~&qX@pnXBD<^Hts>S%B-u%VB1EF`a?`)jh^Zr*ApxC(i3&0oG{z7}GkJo&Jfx$e} zZR^xFJOgH|-hW@2L?3Oqs)%u%UVdbU4jo!vy?hSU1#lMC_1FA&%B((o8(7^=|2Fq@ zf!)!ytO8M;_ipiVfI9@!cde)IFXxxvAzSnxP(rOkR)Hu@k2M zM4{S-F{|_k4|H0Q*7;~|1uk{jvcFcC{eVVK-iOiv zQ)3l4=pLMPC(S^VsAX=zDA_daN^g+fT?O>y@bljtAn(l`unh(Hu(&K5;vAfUbjI__ z03ZPdGQ2%{zcPpa@n;aa>+c2bp%tJn2`KEeozYR$vkuaE?`U^L6Xtu1SmPvc4KNQ} zptlEB5$IL-8-H<-(qd&dJ!3E84L|$^KtyvzP>o4wj_5x3)>+$$ueTg@kS5)%rn-qL z8kV(8ybdVE6kn+#a>+<5>myi4C|>JP)u}&jtcBifZ1WM_p;PQZQjj^Rf^;yPz_dhO zJ@kaob!czX@>+b z@Qk{I@B5-=^dS*H5+UKH*CA;H^UYb-*a>=DMaN1UsxG?V$@{AMn_&g$#f332`tDz@ zuGJ!v)<}7$R;EWx$=2R;-5XgY)!AI{2>I!BT<0Rm*tUx_mUNRvuYf}}o5wjhf*j)9 zGVe1S-#+4{i-+#6{N)#Rf=RCZEW%>EM{y zf@N10M?!9xN#iqChMz!Dqx_S~wT@WLeoQod+_5P@ixEz)Eez#6`J#nIFU5_TyYV}{ zcc56XFwBx|Y0VQU*?uLShJC_!S31%#cyq0Z^ztg28J6ueAdKNg>59XHS0ns^!mzGl z9yN0dqBs^uODPNERgHR)MW`$DQ1FO9wd3FbA zHi^7JrriN2anT1u{>#l9uPc^-HyXhKQX#*w>Vd7JD^b;`PYD2GO1Y_)e_|iD+Lm@< z_qUxsp;T}0ATmyYUtf?HTqpQkiT~yRE)j`QN{!zEtnWPlkKi6_)Ht7O`m`%q3umGB z&l>Up(+6isfzQm}p&5wMbh}{6c`EQ`a1gEb*ronsm|n@r&6ma=T?x6mPR@__8DfRe zuJOB?bEoViEgE?6nm6T;eQeaa}EzQL#tQRDkVXHF@Bx}=KRv2u4y6E16rCg zFhP>B+QiK8kRfM#$)bliJNnlI6n2ypeYjy#o}PQdPxKcVkvm~Kno#Ohma*wGrDy$a zHzq6ACjFD{AnF)RMu9byv9OYn`4(+U0|72dRN8(7V^&O^6WLrWva zS`M))WhM~JZk&Lq%j08({5-4JnKikCtIC6vr%k{g|2u^%KCVSW1aQK^xnT8y);C5O z#;1#^L-Nk%SomBG-kgQQpjMiU=3eTpoLRYWtHHV^n0#w4Ta7i;OsXf2zL=+N#o%zF zHc(=~iz^8(b8K;vdHeFQA&#h5i{Z3M!p4vgQ0)9{VIAIX?yo}~f~dFQUgUkDY)Qo{ z%hzkfq-W8dK8De?aLy7d5-lFz>0yW*ziDDgy}#GQ_UK44?E_}dMQhHjupQw&7~W8TiEv;>9%ldx0X7FTF(`w zn1(iRAJE(~hwWv_%?7ULbmKx%LtnaGsZV-I0NFGRf~WP*r>U~qD|PH6b|<#F zj@~M}j@G1FZcG%FS(O=Hw{CBqx^`%)a+9bk=;FgSG7w|_UKHhdqJD5bc(~s8?3CSh zy{`za{1g`J4Z&MrK!HGw7VrZtFz8gokTpBv7{uBKy3|VEljZdL`TVZ?&Edg@p&j<0 zW&4GQ?pZ(7TbZLtN#0qel(6cEK)1pN*|PC2{pou)@OsoPlcMyB^J9eh`wyX%V|a_X z?LFVD$i?+b1&(Olt~q)vF7nkuk(btNmyRlv{;RaGhtLNtuehX1XV`Q{$R_d2jzi0` zDc5F~3L+G(K64BkARV|pR`6h%#umocNd-h7pXF28c8b~_jN6j%X zLvceiwYp=fZ$SmQ)DOP9mZEzh1nE3nv))DPX|8s_dAOQ@ujg>oDM!kLmd_UUtC@EA z#l&x8IHF53{FcY%Zh?-ZQEi5`)aj4j**v|N^X_4FlJ2+iM{@emE#_HSaAbc3|8-E- zvQU)XB*DkbtmNHSdUpUopk_xvhsqv!e? z^e*0Z{OC7Gw0ebJ#zbhpp4KTb?X|F|LL5cAlu1>)Yu#iI@SfA(W>uS42W@GPC5qeA zPy#!j0&mU1$mIm(nIU-ip~mK0+J@8WCy@-j-K)DFINcmZYE@0ctigFCO=2WPbT&z3 z$#$sZm=OiF+>xBSA~&urCArmh{1hY7E<(prxEDg5Agt_r>XNial_#tRBy-(^>WNy! zd75?E(p>XJPa$L%iNx%x=iQ_2BA6XDDO;etof^W+U(pCW!@#sKcN`q8v$VbGc*Cu- z98Qw_=5}sNx~-AP;*0hR=Mt3@uG@`gznIwriT^3Y75U7S+b*=)jX{Q;3KfUyJTWCB z*;Q`~Wy_3E9b3iGln>ajMc`TyddZD3>E0TN6lM}D+*`X0n`7pX>0<3_c1C^ko*z%` zKyQ_`xJK%tb6%2O7j^KQJqY|m+3ZK8MLtiYw_fyR$o3D$eOEwqJPQ0Sk`K=&!abYK}?40Ll z(S-fVEVbh>XU}&-EJ1ic{vMfw0E-mA6kywUec^DwH%q^@4Zd|>ma=EBXEYoi0gw~p zi9{>Ob`&azgBSv>wrnn?tuCC+px3T$n3)}#?yk9zzu~?CivJ@4vz7E7)*9c_Ns+{1jRR zi#~*c#BQni#C~qzuAXcdBFpYaWl!&RU82d`vv7@|8tsN}pshvJs=;e|1ID$wfW?9};k#^i}Jfc2B;pG{`{JelTSKUT~`&W|# za|`#XGAXBYbw{$%J+rk={}> z)eRamo64b~Mw@H$n5{8oxVs0NQm}c^JjytX*-cJAY+{YqXl==J3{AUYIu;gh4#9tA zd2siXc2)gy>SVUCVR2Mo!|U!>9(N~Az;+SIPh;$9RSGWe8wut{SuAJV4MTBJ$$KgLJ0}j`Hf&Ns8$Yc5+^)NZuqjZB?zD=2M+KJNZY)!f;9d9d zxXqgM$~aqewP;pA`AUUR0L&FD)|ZR*&>u1#(U}8pTN15)&r)3O@*x5Og}T`)iWHKs z?};Olrx@L%(Yut&3})inrlr@?bqd zvTfJd4XOtI?-kK5<63vGo4P8!DP%}d6dcE%RQn1{%aWm08moGCD_1g5!)`D$WHjP) z6l@_G67SJ>Se1q^uT8>(^ko!=E#6s4Z7z6#EzSzwqgvc}#s1ee@J8^xfx7%EjvXpkFC9-*#&wG`h?8Onz&8ti{x17`f>|EiYb9OaS zt34+ZzGFcUg-1_PdD z!#v(Exc=K`Pr?=QEKOQVSng8p;cczztb-Bc*zf{ z9OdA!zaqcaBp7<9T2~|o6#c)7!p!J^I0l^iKSZ&>j93S+ar${{2U2t3peEC){Eq`_ zsf`SF1WH_G&Ng|n)T6hQ$nQ=?RxF*}sm2s{^Rv}R=o2CsKBi1MKkDA0dNU89NTMX4 zYIjMP$*xQ7h>I}B+Q{Z>VJyXZ!I%OW8Ic;h#I0C*eE8x+gV9-I`>=f1bN#1|vxm&n z6)8Wp+YY3>mfcwI`rueoFUr?_i;&esFOvGu4J`|o)q$gR>{PFXmuH0E zT_jaDI4m_p(XLc<*IF;xuO#EFvP%DCMuW-(ioEi`_WO59dNMRYylY~=io|%!fcZzc z86?ytZuS7Qm@y%W62!(}YM)3zO4t|Cs7)p6$tNtmy;K_AN>ToycyLxlI}0VG-D)U$Jk<}OukRQ$0*{;$*D%$Oe$v=>BWG8o;R?@BbX#&vdob$$I>#!l^k zxpwcPrtjUGvpg6k(>P+sz%GvBG43(bpH5s0E%MG;Bac{lmf?C2Mb%}$XwEczOEJI>kvCNjY?TVio-$m!B zV^fyb+0V41Yiot3q9M9@dHhs4RZJR9lIz zkSIAdr#&7&Buyi~kxQBqf-4JSuQj;YwWR;R#PS8m5#Va9}m!jPNknVIE|9 zmyIRFb3(qg*{i=7v!C_}hWMOm>m_(C2O$=&W)#6jXOfG5BDst4D~lkhoSKVBHmwOu z4^M2>1XWUVoWKJ$=zcCtd`X8cG>%ygOKHBjA2&=+ci|J7Dm$OHN zUGiIWjxDx?H_MPN81tE4a*>?=CY^9b5G#S;lM9q@J>G6~1WP?>b2?(dwIC$BAUyDl zN#P?>R9_19ph?z5KqfiTZNAz|Qe_+UR#>g9o^b4PNxPlUe{<9PtbT5$O+o(P zQL#gcqm&OmybIh6B{i`|S^yFu#Y2;oS1@)@1ChzrG3&gYQmT zeQMtkT~HWu%A!x(g^B7<=zub?y`(1PIPzN+{b6x=2E#DNq|6!3zM(XZ-#Ub5M=oEl z^tpe7X_*}xf}cL=*K2TT#MrC0&Sfz9RYdcAvTtRPa*X&yeSmcB(iOEg{qY-mC&K5! zHtc8Xf{M&sr`8Qw%YJJ%U+|d+J01(p8a?xJpu}C_FAN*dk?vl}m40vn%7!mJWcQV?{DPr)*Z|7|L> z*F`Ad(JQ=kE$?{D2S%l#$-O^f;KG2(>V~Dh6sJTGcr1>GVF5IqN%Ci73p-gkb!7 z8re-8e-52~ilq|FrUL;m_)L(hXM&oo0Amz!5R=e+~BX0&Z;p6J1LO;Mq*e0CNY`&Q?HSa zq-HA5)UKb?J1Y|`Rn;br6gOwnT9LH&dcJWYN3_HaCz(MCz#9;aKGVm$dPUyxfgdx zANhuARkBHMP9SIIMT;^dkq`Ye=fv~UAY?vYEq3_W-CtDjTVL4v2)L-}%;P@j>8yTc zYGUHj#jj}QQE&O2E6hljm*(XFo>`As7>bLRu{yKJ8o_FmMY*e@gxeRC@i ziJvwxE2K)vSohZ_mvgqNY@a-)NL&jvCjMYN-ozbzl_cZQr={k%nAsf_MLo}SE^3#) zFg26U4M|d4Vf(?x5K?OWL{R^Ic6d2jb4uvAjPU0q&BjZeQfyA@N`La!)tZYVC(G!@ zIP4>ipP}HXxYhfviXb~_z8Wr>n7GNU`d%Dnv6kRP?6(y&!&ncU)9VUzW9yp+*XpPoL5U&LQ zrppb88fM2675Jh+IK?g_K52UJSI~_J5U#@b$}PSM)}|oCht~2g26gwHK3i&Ycx}dY zw4=V{)A7$#77_G=|5U{Fck(>XdT4Q1&p<&f_R>zzQ!4lwXgYbL$j}yDXn$^Z<(ig* zorO!93}o3Ma}^~Mpd{ZzU<&DLD_hT2Cb*_Vr%*@PDnJI@BCKdQNb?TkHxfFygT3r- z*#A=2aDLarw9y(}Jjm3&*`=cLsE~lA(0ldH(5uqciV9V`Ac|!<`7*1+wf>?r&l2M+ zEZTg&`!RX{w+_lE^54W6qz-`2gnXOir*3m}EdnC-x9~i!N_iGh3R~vPCJ;KIPZ0sR zZwt=+-Dj?B{KgxK^27RoV=q99QoUo)YdNb>wX=e*aP-5n15ueYgnv(OQb3^Rr89UH z1#WO-oF$OjGbWBfEJOHLi*`E>cPSI$T^Q^J+n&+RMq{0Ox}Omexo??|Y`^^1(FJ9{ zX{=L31HohBvh;9|Y%0fw9j^PrR_2b;cfv^ZoAsgvmIJmJ$^XqbEUWfZe9!mLsl9)$yR_3vqma;;yIBW@n-)OJF-yZ7Im%bw8sX~$d`qyA;iunK?^w=l z7m$!qlcf#5CR#?XJGKYQrsf-dSWH66u6GR95S3%cfTERH`l9OFLx|Htt7vr8BSZj> zyM|*BU(;p(I1a9cYI?+cxK`pcK9A*bsqQP=COjL)gOZh%Re=Rozq7#h!pLk#bgJcC z3D#IzDQ50PocD?5OVWOCOWEwLJE#~vuz)0h$YbBQKJ~=7V7#H(ajm^MvtLx7pDdRuAZ?(Vc z$rAuRZF8xP(TMY59-<{7#}ET=&e^=AM_Q=j15aZ0$@u|#=H*@K(q9AYK&hLX#Gr<+ zjN_J@j)nNCDQ#EN|M6W{+|UNZNe+PrKsh8GofJ#cffMcPgf+<;Mhl5AIE8rBK<^PWW@W z^)u8w_N7n*HAz{d>J$!pm6M%#LpfeK$=Sx=cPzTI<(Wp93mn^Qah3Az)ZuwWzCKO^ zdxxOLluZYy+2Up*l`L>mJbui7fMt}x>u~b78(FDxw*jt)50Ap?w=Wp`&2@S=E~}U| z+x?1S!YuWQEQ_9Py2<)$Bb4=0I<#BECPCJq7#E@&{RMrm1A zW$ZHw(WKSxRGc&58hO>3Mjil%A2@~`f#+VlnRGTrH!QcL@~g=f*kXqOj>JGr_oSlX zsL2M4IVN3B7&xi3#X@^^iu(hu%>Iuv+&ZqT0$dsVNq$21?Cu7^IkC?&T}q_IjC!%I zZPf6?zqY)eTB3Yac=|oCGU|1z)v#SpF-3=N`pnpe%$$9U*Ll+gImH+H)OKO#%u7YC z)8viFt&A=3%^QMA(Gzott=S9@iPLQh z{os>pjlf|W5^o2bh^m*K5hs?`R7Nwjd7=v zD7*J+ixa#2PT28NVUKRwgG(?a;F52BGlnI4>hr!cfA#aXg>%Mf3*bT(yZF$S0|wWv zOCVe@9U#$xxj&P$zfqptG*0RRZd>@at}rCSL-YFET8OofS(XjjPUwh>oK&ZzvA_NR zuP>TI96Zwk>I&7`S#0=j}NqbAbNR>;WtDI0Y-Z zhVwR({(MK>^88DxX0M;D-wj+)Ub@h%o3{9jACcQY~1iSr<3w>9*NDPUVgh zZEwwz7EkmKuHdXO&juYo+yBv*k`+C#pvh|qCrfsI8a2DwA}`euBGq@la){$D=A3DI z<{vwHY&y~?vLi!CFst$&B{&Mzg_$d-^@qs(2K7S_>u62-S6^-TV(Y%kt3F!2cKtv9 zV+uNUQCkx!^@F``Fk-1`H8FkBw4B{E&KxQ+g!gB{O$62H@RI6C0U|(l3R5krlA}3% zW{jF}c~VkXu@vwq!^1I`181}7gSnz(NSRl=Nf2)Jno*k#H!h7eA(pMU<-79w z{R5N#$Fmhe3`c(qL=a{vVqE#{^$>wiB){9{Izy#cVbZ+tCrGnQq%tGZ9V- zVNt_Gd;LiE8*#Y#0GPO+L$LqLE2!T(6xVbnE1IZj+aq+RmIxcl1P=sdevcSZj3rmj zh|X9Tn&WML-_Yy7umDf8`zpo#e>%WFV$eR0^=jz4_HWPl=MV2b8UAITiS!E>tN(PJ ze}93g(b-N$@g0@S4*c5UOYq#aDQ?Q2D+cx#WD!5~jCWuE9Q;-7dA|CGvjLwpVn6EO zNhgBzKQt})B+dc+)#1lUp-6_GHQrz8(Yw_alPLV)?P~6?^AD-u& z^L}6WCHH1CJ3BLL*Uruos;nprMk7H3007|k@7{g_0DxHVbPNgz{u!hZ=mvivnMo)} z002Lu(C-ajz`s)&z5Ap90Jy&b0DJ=hfLnN$?=}G7!T|v68UO%-$p8SMeR|VJVfYV$ zCK~Te6%+tW@H7hG1;R@J5S~JS{{aw40MFduDZmE=(*LDDAu#@j1`z-Vu>c_bheiwj ze15%yzn{ze`vhhq{2#|`#Q#zQv9f{xO#@<{i`loF62l*;_U|;E004B{=Qjc%HJu2~ zP18bE!&yT?UeL(ShRx8}?u!YVn~nW*DS)t>AUtVf;%rFeW@ByZBj+Nm9V3+sosPj~Tx@oZX6$bS1O(VQIN3QlS>Y6{P7qsXLpN4iC)$6R{Fl#L6DK1_3wvh^J6oz} zzJ_1yT%1K{Xr3$j-|JsFoh?lNuO?fk|EL95kp1}=_BU)C?Em8pFDm?;E2!jXVFIuD znZM{8;eROqe|i6@N0|M&^8b%8|7!YAE?iYnG-3AtnN1XpSZ@^>01yYfe=DKthH%ho z6>sv{WuzUyYlBv~Eswj_%h?xDonvscfngUdACTT&_DdR3RN5b`(?*bRI8MF9 z`~XOR7b{I--B^RtR#*mQJ`^{YHP#AUh>8sTONUM%EuF+3bm>#+irSKzZsn|QyUVVR zWu}@(mh6mx%cZDU#hc3%t5Kdg9j8&R&1I*^jHzVGTVl@ z;CR|R1j6GF>#~U!GEz$ej!Wx3=5#5^M4`&gU8Swv5z`$e6vEc&)iz3Ua&U(H@0`xg zh)p6j^pc}ErgTbHtwj>fauiZ&9yp_FE7TyyXPeee{MNbF4)wy-7WuZG&Mds7q@@xk zB>HUHnVDbzPuy%SFe4EacC?WacDCpDqi~P9VpJNuZ;Vw`6I5aX#Jh;;DBKj*&PT!? z#}o9rLkFhzOw>A|4(~FvEk&^qB+5uw3chgT-}Y9#>gD(3hqQTN&FJTa|Hs{-=@LM&EBN zKM^-`l|90MZZRPtQZr;cJZuh>Ld`&4sDP#)=!^JIGHZ@FiRey=%%;K6Q2)e8*Z(Rc zF2Lh^CMLv18G!HtS%q3d17kRt(Ss=pgeT9TEus9=soyQVm8yzr`#2tMP?jfj6r^OH zWK;xyf8YxcOe8@TIHHU#&)As!hZDz)y*{|ZUlT*=pLXqlH}iG~2<2r#0b8&UTUu)) z5n^zVd&dM8q-Ffj(idhZi}H&9Redi+mt<+iW=z!5*iXJ1(ow$ z$8B!}G&B_Z?X$-6NZS(9?PI}Ld;fGG&}ZPAcPPW(lO?D$V85hEack1Kf= zWP*TZB+KjoWK*2KIEX@<+4Fno_7QP^h0qzo{p*9 z=|VH3YBaLDrp)zGLH7noctBiX4kr$;M^B8G1?=fjX>lUT zMDj-an??0@iv(t`QRtG(k9~8~7F_iHu|c*w$;4FN8oSOhanNo?+b6j);_o8VMMWNmgCDGiD&8K8S8?E*;wOKE8N7~VnCp1VySbkmdzJ#9bl5MGE z+MM93kx+v#PmealH810a^`VCi$@!=h$-4YbhdR7GPNG7D+3ZrgSNh#|cCtVgrv(C2 zT36h6fhnkZlzac37j7#N5QAxD$4vHhkD)=2Sy)L14wPd%Z`1qfJlOs-uzI&h7$t4Rz5rmot27R-p4JV3g~7v`{`DzU#M9JCH5 z8q2jf7Tx~HmmFk5kDh;yTX2A9qB=m6DZm|Va?E_IlUYiSqvpTe$b_eZr8`b3YcSJ& zx9rHUXkXL0v$@zWG<332_HQ5M{NnTjjtH1yzQO)}J#;_F{+c4#_E9ALvl>sw=Sqmx zbmzn8KCBrv7c#bne;X1_rcXaT+@vksWV7G}09WlK$`&JO zJY3L)*3IqfP515cyuh;d-W+{z9elYCulHOAc*nky|pRcqlZPBwU@5E#a!eWwB&$$=*IQ5aO=aEj+KcNo{Buh|`LI zo+^7tXwSWGD{Ad!W;eeHIEyzaR2(&4d0Zr{hC*L_4uM-~!U$Xyzmu4xw|v5^Cxk}P zNTsCT7%k$mo?>n5-PSMVcnKe^0KaZIOfrm0w_dTIH>9a=)CT zwk9j4D3f;P>(-&%pC+dt)In(t7wa~pGx^E~FNzy(Xx!zHMc$vHp&{c%*zZblm}x$BixOmexkZX^Ho){dNY_4l%%(eM0PrqYZdOZKAr*YbPQ)%k^E zLsFU#u?_DXi`t4ZwtuTXd{oy{=JqF0$~3+~WpJM<&dAtb8Eb6~ zIo}z^O(=-K`}1!yTTk(kyEqGGJ*XGZC^k2@su{ApX0LcLM)Z4-KS_h-ZBqukVGCB> zoCPOL-ziPToa(ug;cI+~t7BUaZBY5H%54NorCNP$2J7OzdtYon1C}hM^!3F#dt1l3 zPnBL<`9k5^`{CO0?)+;SlF4F<D#^=#+0~cse)>(kh#&NhvGU0WpHU@f*Spil@6S6-6qo z=}ZZlQ+c?Ehq%0Ei>l0jzc*u6Bl=rL z`F^UpH?BcA%seMgaj+hzLRaH=$Ti+im1n5#WVJ*$C^tO8jDM1EQ|OZW*{2@sv`RDru)M30wv`KLL^JB^&UsfpP)s7e_vf{9*x3&#SJC6(EQ3YuKdc_hA^wc=0x$r0(S!n7A-T z%PIPlNQ5yc!KlcTA$81M#iBC^=esMj`ED0R7BJ1g&pwG*jkO~DlhF+M8>albr|ZaB zNm?gkr4635DwXx(`Kpxlm?YYjmzL;wt@$dg+pH0zVPjpnyov4kIC}iUa*!ERv@GOa zC2W#YK}I8By&kuC+QQEGRMP9Hec4Dw514X6+~4X(kR!MKeUE(AO!2j}rALOpv)XY- z@7{XKG_<(J%uz#N2)^XQQNoylf(e6S7N-7>Wpe2c8Iha=p?TSF^)|Te%*fu-k{txe z<+!2e1_&d|UqGUH|CZjyD^Wz4tIVU?EF@MR2yNsGPNHhv%z(%s2{Y<$3$fOkkh_mATdY z2P)U@LXG6x!en0hV29^9kuyz?A%Ybf< zrpYn5#?$~a1Dyi%Dmj%+PV-jox~C!rUpRu#KLIuRJ~U>Sr0GBU%wDw{u+Inif7ctD zL|=}5g3_u$_j`NwoL7b6h|G`83ZU(Ec(bz)^COR`sSvB16GFlxq{qxO9%H{wB5yA9 zG20^94h3xzZq{%af)E#3wPS01b(-4d?>}9C5^+NWVGa!eMV?n_o<#|);)ueH>b(#3 z4_%??CWJE^PH=1<4$?yzPMY#?lsfJhz58<>Z}C_28K&%$@C&(A9J(dFR~O(DPm`4y zLSs{9-;%77H*$a@kcSChYpKiJLhpxY6A>>BG}LELXz;9*m?Y62vy5^Q|*LS4>xC=T~3!`a64S8g@qEIqGLDFTQ3Cbd@}g zzS@jq+D5zid2mMPX(EL`mNdLg5)ES0)A=siH!$$-p;JE{G67CcL2i3>@E^7WU0?w( zCS~=VU$fD8jEwfMg*+)m8O?r}<3ISdns6}-KHTzOYf}g;RSdhT>rl*z-;s3m!zdzH zow$*(9(yXiqy_V>vgJuSiZ;F_NmTqU{-{Yvf$kz9=^YW!0*AO&R#bJQ=hS33B`ijg zrqHXd+(q84u{f)2CX(bd{qI%{Ef<3Fxk`ItrOI*@v;6Ds3|J;LWUcD)CT-H&X1CVIw6p5twc$*Ag zfTvSqliOf+lE>{~R@om&lF{vSVn5P;O(8?~|7Lp1Xy7XY>62WJvoe_Hp<2YLy)`${ zPOQ&@3}UYHi>-A^iH(O`9gu9R0_Rz-jG>$dd^?DAZingZHpMEvf~{8BStK2%Z+3q- z-o1`vbvRnbr^eZQif&%bE1GR-2=w(2M?ZEG|(ALAv>KLI}u& zqfEb8EAe_JWT{}?S3iGk&;O`qlMxr`sG6uhX|XaO;-S(u+@G&!MgRPwm8zkY_k`}O zYh-A8gekn!#2Ji{1}YdEFHd!u;4zQmbQD#_RO0lynW}nF@N7|=?kF}WoM#$bP9EmO ztYDqBs4~&D3k3S(s(NYm{dH^v4gEma52Vw01wAz$Y!(%vY}48sPCBjK;GYwJ;_ z;`dv9BEVj4>V4)&5?gCBxtHw%@EcRcyCfGzZgH4B-Tda4Q!SIjo$8I-5vdZKSu&ra z&_ZF~clc{2bK+S%;;`t20*nQcuUWm}OWF{+yzF%q;(;7ulo=towlyGQO0_wOWq$il z!oEefDK0n2?}5^OK%?5LGdR0t*jakKuwei5v_P8YN0Zi5B=BxupGhjVhUym*J?Ta6 zYyVuAZ4x3G@BrKXe0jOHXUf^Au{xkLr05^S|1jOL-y8IYI7B#fmkSF*_?l!ub5%<9 zpH_ObFu1)BQscsm1>Mbj41PdF!=qDSDjX8m_4s6NA$;@?*GTTm-Efn4NLXIjyuyThA7*O*Rr_x#7jYu%5n+hk3kV0hn6V_KAAn9G|sV>cZU z-(M6;iHM9h0=W=9nu+$U&H()GPH^G4J1k5gA=@fuAM%!vzSFcEnmvfolN0dS)X2za z^j+&1e4uXogHZlJAe}cHdlQP5{gI2S(+a7ouXH^-3T653;Ja`?i3#8Rpr7-eYJIQn z2(9V)C}YyHvc_BwiN;-z{!D%Uv-dos^H};qgQv~Rx<$MD=8Cs|t}PfPz3opy4sTit zdkHVm>h@;w>&^21_kVSC&qFXJMVS|RFwkPS{6jiurSq%cU7{;F@+9izC(*3~lA>5p zi7oEIEm1mdW^ksk2&t%+P}0E)Uy6b?arsoSqoyt}e-_|Ds+gV{QRA6`Vwvsi4;Eibg(O8WM5pjI^whGguzy7E7_#vwPkm_vQzN4BX5DDzroHO$>Keuxr z@z*0ai}ok!kKK4m*e4GRnHVyy%!fh8`SQiVE&C+q`f%d$_G8IKxp)>c>joywMlwoW zyH6Sk4jK<0%RCe4-~j?-u3ahKJV$6w-%oXc8yX&DiJr&eBAbIlnvH1A>L~ z%PlskO?p1t3ZT$j?k^v3;S~En^Uw9WKmva$O>;^~M;oLgF1N0a6m}sfN(1Q~`Qnbf8sTsb=~D2UlAr5g2+>SAFVQ;paCe-Vgw?8oq!Tw#tlZDr5g-Wig`p zHc#r0`p;_B25Q3LUUy=nOxz38Wl#BFPM%|{6ZYv*&-K|^F#m0-N`qb~rfl{`WBkSd z`U2U`Y@xzjUWL{k`1s#%I@Tx%# zL*>_{HkYZDF}e+YGD~pPO-jVXKH;u2tuu44X1A^M;-0(l&q)8#>4sfDL3h@;v#(%P z1>k(Jf(5{EZzwgX{7SOzI(0CPE9PQ$aMIdLW2zR;4GD^_D>UwCmAzPlA)PnVD%@s+ zN|T9JKvStzw{Iu{v$V8C49M1N(wdh{p6azTNpX zi40GC5P>gg*q5gTUWb2F`#KADZ46&^UCGo6^62TifU!A_EsYY-Ts#t~XChoirklJI z@X3mNJiG?uK41Lamjt zF3ql%pkcPReJZ>Ne;7kr>;iOU`$uG0SnR4crJN|*pI z(keP3g$w~hBc__JYCe+85BPRQz8?!Dv)7&*Zz0Q24W<#0)>q9sEtb4l9)FN_wQZ`e zMTD$eM7C!gEQtPkHQ9bT+F#v%zqxi%&)&YHd>N(p^xgfJd`e+dMp>BC;r-C{-T<4~ zn8K(rvseA&_Q#~=BUB+PZqtm$_&f~yKK+ZO8;pkBZC^7@! zLNn_QU${w0;jfw9BMw#%=={YfC5dSh?Y(lhoaA*ooi!$Uo`YpVc`bfmQM-=!)-;X1 zo2ZGtyEW^wAS58p^7DKU3NIAjW@%$kp8u z)`A3u^#JeBDkV97CPG6h1BsDI`7x@#dzuT7M0?HOar0}BSUy>G z)r|7-I3(?IT1g-bsV0)R1{(>NEDo0zdOaV^KE(KP`a~+*@7GOluz_LT6?<*zmkORH z%!FCV^S)Q0UKXUzp8E1SBgZdt%|+I(!^4g6z) ztO2a9^u~6 zm6oQzd-v&w>&$?QY;l|Q`^iA2AtZ;{+EtQYD6d{g0t^FC^*2=uxYTtuuPbp_M)X>U z_=LuTJyQZ+SZGNE>^EY-=h$@phZ+5dFAD(wY)OQ`Rx0WSRx8f{(9y#_A>7@RDeB5} z_w`DwmW?MJEHV6R2RdN1bs5TY)EteSxWa>ch1P0oe&Z@N=7Rp)3{=c&M7a-%_PUT0 z_7Bp(tJ4o#Y9JLqmz!0*>+PH#>Dj5Nn}wU)X!ST>F~icCf9HmhmF2Gri|HBlyJXar zSDF~4x25UMB1{J(Qd}E=`&E%|Kz) zI9%t#Qypc7464IlTbYcT{i2sYiNvd1-HCA1vV6|lW;>-= zCo~}4pPHMRo`zS34jb>TxX8|0@XWX(MexkHm(L>kOew8Sm}WvKeR%Iqt{(Q(##-HQ z(;2Cnjt+g;oaNl?P;5~mzpww$j}*H(eM<7$j`WFdy~}c`|5KU|pnny8K$Uyy5AUGg zR}=x%kh|ArmA8oj1F;PBtdLIzs}e!nTtv|kS7(OM$@{K+{;=#PKxa1xo$lAa!4~+S z(E)trs`dBp$m1|02G*cies%Pb(Y84dSZii6?c7jxaE+BVaB zsMTJ_0hHm2(U+p0e{r+EZDig3$|^mYVVovv%11w0#V>b^Ew3rR6~0dDd_Bkf&gM>7 zYgLp4F;3Bx`Eh3JQTWc)ej=_eVm0**YUMAp*HNs{htLzH&(#r-P7CJzs|S|R1@>bZhV>7F630Od+koW;m(GOFt}gI&Ks3;hYmmiXAg`kml(aDz9;8}S z(qB2>uiutxS*Z7gee&qm6ie_&Fijjd&THR*sfZBxUtqK7(wuA(uzfgkpKJcGMjyP zpCDw}3B=@%Y)IdTe=zHMWicwK{PiyC!G0J1ufv7c~Vv(j8+n1ytDHi{Z4bokf+mIytV+gy1Xd@>08NU^Z^R-e*m zMM(yNVi3QedrE&>BDrU6#8+L)8Cl5aJAsZHO?)G=VFl<q#KE*{AXlC2mNEHxs_+)yaZn<9g)mN*Aha5?`tG zAmFvn`LH12AQ8S9K_m=2Ne@^6S;m3SxhH;f3RX51*%B z({`SNt0|!WHWfI^$9u+#aw3ge`yPl~O6`xrE3k6r!Ax46B2m@Z4L7N2+Ehp9Y|PX3 zoj1)$8TU}k)Q^jS6cA?p=)o%U!f!dskyzpT`I!CLG%U?(zv048#7mW6K34qKg6zKH zrh3V+=O!+8V^Hct<;ByO5b{Gt6%I8pI@{C|H``H7@*Wh<50ziIpTvENCkxRZoMK^h zs4QZ|nMzI}2NVMQTFiU84Uc#_TwdEIzsfVuy;{Pw#cC-ojBB{Swr0#0XGWsH3GN8M z+v{S!*@bX58!k%wA9Ga;=rCeL93O~_oG4jr><5I%j6Hm^KaKy4o$X$CVn8)xdNhQD zMJKS+x>wwge$;xkyx-vmgdQ|tU8Y`s1<7h)dQ8f$O+|I#zaY8{=ZG`JeW?9#gu0S; zy;K$WemE8;0TBO?;hBD|ZfCkcY)ZXmWke_$Yxqqw(@%H78m9cwM2x5AhutJwa+X446Z&RvQCcSOPk>^&U68D z&j7q-bn4$0>?+Dm$0XKP6PTB^~s=?v6yX^GF zQ&jXQZAHQoyR`1^IKYsU3VIFOZ)e*NZG4ie>j)67joF;Gu#i9^SZauKEE4@|L@t*6LuoTB)@&&IzlzS&E z`;ZSwii$wjdWc+~O*HF&zT%gW5Nc+5i5S=ueLZ^zEATRj}5s*f~H zFYY+<)h5l{-0{-WqqJb!C%LX_UAV$kQ4s+D zfR=roJBlHLJ@vwUhY4@w;})Ax*y@+-&B*_V`mRz4z8jk%iPSjO}bK#6zzxrCWqXl50Qs5HHvNX6ZfBE%R)n0b!P}Tld*%>{cCBzd%>NLc(-k zdwGrJbKQM1A%!5f2YI%hAW5_xb>n+N8fp9);_q_1aH)QfM2i$&d&mD8*p|$Dys55j zV*?+K2_21~P*^if6P4+-4y;tuZC_I$cNr!Pwf5I9{Q;A9w$48ZK{%vzQtHd;DJ6Xd`ju!LQ?9v`ssplX@%Cc} zFLM|5pQK7G(F;0J?o(f2d#)}VRvN@P3)URZ9=ofjU2BkXkw8L;!cl30<&#R0X^CDOGOskc(}F zU&L=fBoe9+o75)&6~s-AHe`zZVT!%?s%?u^jVVrr4&s68@B`9R*^0Isw(+67t(Zy+ zUCet+$=GL$yj@S$>%2`YbU4+6r<*tQX=^m`}f3_f^eGR1C((r6x6 z`=bIsH$JL)StMyJU9I|5>`#b8@qmfC7AsO>2@{Gi-7Uvn^`Jgo8u?62BzU=4 zNlAwx>PNJ}k!{S9!&Q5^%1gQ*ev++@0%tkc{4ICncJh-y??bXYKVLq>!R~Oxnio5< z3_P1N)gzYNasxL%kKYI~n{j3S?VXdlO3S7d$7; zXtz&gF0-BWTVR<3ekhCD7lLf4cEb3ityM?u|bRVL= zw`qnSs?8*XE``&}k4)O)IuHbcp=}-ziu49616rPJjU2;GiGc{bp1WsvXj7>l=lmPa zfuE}iAF>|L3u~GWo0w_cU_zM1wA5PJr%N**o*GX~snhLVDlMM9tbF}o;CA{HwDGyN zO^D?U(}zpu@rGqj7aYyL&&2|6sZS~F!to~;7ObzkaAetbS5lDJq%nxQ^RAwTCB3a@ zHxE880G2D#C-~s9R-z9lC*w;T=HjiuRJB_PlRn{3s)6kepbH>~cEjY_l&SgcM)`+& zYm=Xx+AUkpcC>`Mj7D6xs+HXw3=OH%($a0Lw;;ZGdRRCc_pNw3RVNT~DHOd$HSU7KcpriOWW=wS88yYD4ByI=K3Y5o zVcjkR>qqsFp^XLzVVOB_f54S#*6r`YR1l`~mI@_yaKG`KxFs^vm5}}R+oRD6gp6Yct!G*?5}2SW7KQUirVw+spl1O?K~*c9 z3suG+GRTJLyVIl_MLf_(eN&JUkKyE@ZhDT-tKg7vzdFIji(lB#u$WP-E8_FXW{^7kPz?(@ zX*uNhrR0^JK;8DIU!dDU1SrQ`?(J6!#vg=*kD=Ql&uV3ucP!YZWFAu|vGJ2cB_s&W z0Bth1$>BXk6DaNaQCd5u9P8|c(2~ez-O7fI%^^O?2LQcdY{qC_xawov%K9AS-3#9*7=_OQdJ0_IMs# zmL$=v!)&S*x!xr#9{3j-3}-n3i#6JMO_h2V19=QJwEYFUCWs+Er%^r}i$A@D|JpX% ze!e_V zKGPC*Q3+AVc>1~4U(=uEmaBIuf5l?}Zod07*Z(mx0P#ELa8p|he2Nsm;*KUX9u7X; zZJKks5=fBHOJ5^jY~UvU^XJbO*T28zRZ>gT|5~p(GoPF<7kDKp%>t2+Cyo}I>!>@b z&Pg*El7Bu27#(&7s11NN_l6UBtH%AdcTbm^YkvOxnP?;c*_7uM)32~-Pn7%`3M(MB zHHWQtN(BfuX+?95ftxV+EIYYxBiX}{Qdz-oZ|`o*mSvO5=4@oQ-zJc&#L z;o%{ehehOuUJn627tR)mhv%Ps7!=2zBX1`6XT%)Uh>pIvmH4piL$+;>CkEa4u|_D~ zNJ-jXb@+Hx$89P$D}oJ%tJiQ$kmd##)3(eF?BLty2P=KjjO)9n`|+Byt??~ocsxUu zHyl<)#`FqedWIqDFcaG}Us&}Bz=u>N!u~tI0IIx_A&fw4y($*7o{Fm^d6t<~<&yGp z%HhZDC`x?GY-$gbp{6Kb@n8|6OkO=8uYL9v{~1*P!5_DuB};2M8i_solBvll^KCZT z%W9fA#Mk9daE^e%?? z^;<0(YWAI%hh#qvs@^@@m=Ga1l`J1EC`}iFvZ5x{jpL2##8^D@1Q33LIEj?w(n9O55!rC_T z%MD*?Mq>Y{eida^8s6({ckffj-?VFV{pvL$Td#xP{~N-tjU9 z*4hPjb=F_hvyo%BtlS6hIbI;LHu(|C<-ogdyY{sR`ef)6JZSUV)+;5~(j!yhRcYUw z-OsveSigJpu^;b|wN7vzWF(+YtSuO=>N{U*KYrL%7i%f_U0`GSI<7a)N7KFx3KPz3 z8+^^;_hlB*ER)K=6&H!%GD2nrW2|9$-}!im*$5DO+RzDdvCdQsKqm7v?ikchNaPE} z)u8HM5xq6P%&@P8hh9ebhP<(NE2#%HW|~I@kWuM6BL$iLxV+7~wY2KU{|YYC2hRJ~ z#a%eQRCfJrC?hjG7y`A)B}tlBRgF>)7@M@fJ>(IwvM<+k%Z&AS9)7A;WS z9X!GM)G%9ig@TY+!fDz0KR_n0lpd~!tC8Ez-+HEI&R*q)Pe&LnD}FzHm`*ao5Tu0b5Vmg<%Vb;>Y@Jf&iv&x;p+t6avI`L6%*&aii{Pv?~^FlB#Q z#<*q-8!}jamY5jy6@I3#4%SGkf}F=B4wvL{>Lv+J;wZ%?XlzcgKs)?Q7__{9Bg$v7VW%~$?k~rxf9e(H@Jnjhr-FA&rANE4YsI^@580nRixYl^w51bmP*Cb zW8;Eu!3mbLDSi$A-oriK*Pb`dK9v9>*h z_{L=xunv}~Rfe%Zm<2CyC96+~moX;Na7gq~PiRUaLSi3SDUsj}#X{j$#80m7_`^~O zA9c|&Xaj|)o>==ma*=FEENhff*&i6CM{B4D#Wv+yMTwtX)uZ5HB*QMR1vI8tg0^4N zEfM*|IE!8ciOi#G1XzQ>e<1iaO)`=1&042Ca5yc#?KZ;B*a;n-sbXP!G-SydMnz1= z#YKaJ;3_{55uRJj#Ya5)|UIE&u(U@Q7XVidkRlIg%^Ol8EkYcYon^`lM4P z+L+VVfA}q08Rp=(9jMZxovSH_*5AKYc$>H_%PF_~GIUlIFR^yuV_;PVR{tuDOD}u3^n-wmgW{9Uqm#XKtom@RI^&ts_Ry;_ z+1pFT+sE0jnOF!?8m9I4y$3|%_pPO$1@01)P9eeqcKcX{S>a>oMzjVsPslG`xah6A zG`i}CJ_3&?rx>YDp=Xtsq^J-gMxlyvVS5CoJ85&;gx?WuZZuJAW781&18rqZ4lo%7 z`qM}AI(a6l@Vx3Y2XX_R&eH;v?y(vA5 z>XEskh6Aufa5J;*A|lx@_{UnE{igvD;&fm#I)+Go39Qq>oc{M)$_BJr=xHbGVjV30 zB6A^KUnQ-NIYiZ0Sh1l556HlSfv2EkwmVz@)Isb8-FYMHbvxbogMy-p9zK^Xo4#X~aR7I#pP-1=yaa zGd68Sy74fx0BX7Za1@D1Q^vo`;g#Mqv2JZap!2uleM|l=g)`~|@QH4Rh?u_c^!0wR z@7Nz6{)18l%904LS*}3VCl~2agUz`Pad{N;AORq(56vNsq zYU}E)$OX)go|CtZmJ*={Ja2}`y!nShix#@)dsg`a`Xb>B2GoCaq!1+h z%88`7gAq(`tT{WqTCSvpDQzF97-st3T6;W61=y5nKlxEKL6wkhRjp#Ezml#&YEGP; zo65ttVRS3OWAdr$tmn-p^RT<)iacfUnVCh(>vyL{gpCU6GI zcn;vuIme>;ftP#NuyYp$s`bMBoB!Gvq+Tu}HI$#vr&!ZvSTrU~)P(|t>8bMTqkYd$ z_J9sfFh{EEe8`8xE$|t`PH!<5C)hek_`Og6;xfTXY`nLj`{F5SIFYcTv(T{1TeBqM za#@H_IL5KzxCGzy&9Qo+da~6Z=RGK}Yny1o@-!B7lBVlHMuvo4> zl2ESvuns&BOxtt0oOY;POXovbSo22&G+7Xg>A_ z%h(@3d2TMg@ipf1XsQj({+pO)wNh&^Te@WDd~TVJOAS*}`Ey3qy(w91)*rc_jEq&B zsg>xIG#6NS(4zM3bIBv+(=lb=#XS;E%Vg1Z*l$)LP9*6&WQA7V?{F23_=Mr>BiPXpL;h9)d{|yD#6F zvmm-b)~S+0P>91OSeierzHR#{n_ZJ|rlG44^Tz$AZnrY-)|tzp>CG={ z$@=BXjC|O(5cUhXlt-zbZeB88!t`R`UTFODp0w~F3dFWjSDItAuL)XK{^*bB=x_}2 z7<{iOfy7Tvk3A<1zwV+2oDZ^og^$!?%~;N?t?9^_`EVyw&AOIkLbzpFnaY}0)*3Cl zLWpfQTh~w%C3*f@9{8foW&*R_x$^B$gIWr^?P7e}`+?aDTDx|XOxr`Y*uqOtQGzso z2Fq8bztGp2XM-;VFcDFW&1iWbFZEe{)+ZOPgakKQ_1vv5Cj1sM$twit4E}H^$Pm6J z!y%LPDQVS9;Hx(Sb-hYtvakbo2Kfg!wiJ&>ba~(wPM#idXB|2ewM!SP$SAhwkaVbH z={QK&>RTt%0auhIV#n+W-VM<7`&A!zeXQKW;-8Va`*YY%nN`8Y$|^1FGMXq3t%p4L ztN5#PcOeAfmb;o2Rf)IOIPO{rz^hdrs@O_l_KQz5WSOA&?AWt$L-k zAuA(86X{DrR+U-lg70HPIfPD9{mGQaG|)oI5j7qBL%n6%`>@{ zrqx(C^0n5Ds}T~haSlyuRim*(UJxU@kVLs7F|;6}?8XwVHf zUTv5y=0h&%J+;QV8@k)SDdC7k=w2}GJ;`fAvPTPDESVWI3F>3Vr!JYys@m|WQxK-<%Gkru1Ik);<7W26))EF8!J9L&v-L|S+ z%d2<)+q)dQf41%QHwWy-A1id&PtHbeLSyWT?JfnqrR3%=7$SlK4yTeL+aDnInEQLW zR*@cd`cWUm^PUh+?Z-|L+nqx-nC@iauk^h&gUc7J67~f^SS1}*5Hv?=H=if?#fX}k z={ltXyTIsxKge?N=V+fIrftLfcKUFV*2Yd>wV`%?Dr~7`NT!sue~S_Qe)e1<@@YvXA8X&bu^1La59&Py zM|}?V`Xvo&PMbcr1%mBJQ6|}qOu~6Zu2-^t_9iQ3825F$&Ak-IR-RNJ6L7={q)zkd zQmT3p0Vc!li!@Bg)|EU)5FbRAbMDC`x>G%r%>7`d*avrTla+Ny-R+WaLU%+u&WQEgx{qQ6%ex57~tV` z{r9w!xpJfpr)?{nkE=7PqYFeb!7FExZNCQQGbWU(qRFhc(ZiOiGBokc)Qny1DzHnN zdIJ?4|Edo(>deMETqKMjN!#ei{<+3UzqdUQDlR&RhMDq$zByA>MOM#bH)lgx)U*pr zxDy?1<#p|W4TY!$^i*A!B2bZYk$0#RypijvmGl`@h(!5GuYH#mevL=iHv^V!H=A{h zKk28WqW5H#^O1CzeN>9}X1Yo7q+ZNzgPeaFc9XTIOkDIsn0u(SVqf``MhFw->P^LG z+|L+s-T(jv()0fUz|DiQ^0Aseo&6lHzM1!kZ?)EW)7i7jv=GuY!`|ybBZ|@BB#h{PN5PpbQp%5CUOqz(k9|^sF@Thes7=a z^ZgxezuRs9?e^a7{od>Oe!iZs=kxKn%X|vQIpbbnYz10rI4fZ@p85|F`{yjdUY7r7#r^~K zAAi3Jqx@Mvt)9I05`pra0#)?B=m8*R&M9Pw8F8X?HZ(e3U9{38^>92l?i0>$!Q$N0WpJA08e4ZHXY|Ya|KJ zv>kMZnsNkYn>qt9I@lveye-~f^^)7h0q3T0!ajX!VBkYFxNVh4D0(@~3}>MlZpT%k zGHJ1^8Cfg7*ka!WDk!%aa9JfMuDECFyKxPS zSPhm#Bqt?UlT%X+>I!4A_`PmVt$?z{S31NLi&yQb@)`wguF&p3jY&+E7!2#WqppVK z$8F_oeH6zNYL3omQBFb}ptGnjeOhY9tV>%D{qC>J3Xpx$&!Luq#I-!Rr6%+CjqWml`6x!O#4HMN;J>YafBSh_x_?#Dyx=+nPS@+JjZf>Qj|ZF`8>!Qg z*h(EQc@X?|@_3z3S_UfO$XoE5)srcE@80>0y@l5^g!(q`$+yNUIpd%>$JYwrE7|C? z=@jbQVRUdi=bJf`ZGQWguh4@zGU;dO-#5B#TL`P+X^;kod9a+`$W7o(RaS1>2?22r z4}>|nberkcZgttFUR_<9o;Teit3hlhm=g zLJnDT9lZl9d;v7yc+R=nQsQJIz(Ur5D`M26;ytXe2>#LYQf9^w9i*D=*%`(|R2CC~ z8OxFH%g9?`NVBmOIPDwdWIMOF$Z^%6!^q&1*-q{Yn_0V#g{X*8?SP=wH&%Ld+4!EB z_KD*?Gg<^YeZW^6ru*@!b+beeortP!r4Uhs*VKq$)pRvRRQ3Ai>|Jwwhp8UpkuyH7 zKGb+$w^qQuWd95iQ<&_Is0+WFOm`q%c_JCF5r+l8ksM*D{qfc(!|0`I;mga<3dZrv zd0g{v24=P^_eLDj3w!r<2VbL-9-z7Chou@ zT)adPM=zl9!(Rwt7GirW8`J*J^4d~5CUo5x4EH9^=XaN+p-P&ojH~C7Ttd5MDquM2 zY#vTxpzja`XK4Ro?sVYC2vzD^rfqa@+}C!N&t(Pd`_e%YtM~OEW^XSZ_POcpeOx`@ zr3hY$4*N8m%sKPUsMJk9$s9uP-#;95`{(0=Sn4CB-24FQ43}x7^w=%by3aVJI59Sf z95*!SE*QxVN5kQPHtd)0Znp;;VT&+ut2{V3Q1KdYn3m@jVt>A9sGA}ph=G98=A@5N z6L0B$-snzl(g21JEfEgw*&9#&QQRp59{tH(JJXV+-8*;gcraZ*VM+>5tmC_j0fQenS}Zd{>V#&UyQmqLk70mi|^_sUy{Oaw)x7w-OhY{oV;7G-n#_ z`aTW%>Xz*v;o7U!ZhIs5krX12aHFeh5-yz6QhZ$ux2T1kVH@Y00;=8A7^oq14;lk) z@4LVBN8;Bbb#IOHh^W7_(w$*v=kzFcOH2GdvMyV^3PabPK5%-{KDPVky`zh5bB&Yy zRJi1>sHS*&g#gFx`CqDQAuq%sJS6C^)B$Dym(I_T;KO{-!(Yp_0!yGC(^~!lg<5?Z zcGZnyOT!ZBA2-~lxNz+y+hTfbQ$MDeid^tFyL;(~zY2CgeM*OEbWr~UE+GrJ-ZLv$ zz_(Mi^(tRYiDP&K_>RBu01T=#A*6VmX8rJdPIAcvowq4;rQ3MJYA3~Pi>N>uBokUc0qk))V++_~K{>t&|z3+|C1wOYlal=~TBbVa}_2h>Yd)=c=dC zezS(uH4PCZl|Qu}&F#+wPZS!=d~M@yMo>Opt=Qd+anz=1RmFFHxs_0$m?v?qUn@|t z<*}F0sXF?D@h#K$V!4~j%|f|VP9?g!K}1?>&G7uBk=~+OspC#_OH))2HuIHzjH?gh z(GJDU#oZpa<7(J4{F&u7NUzqcYFaFhNr0V9QSO*a2(+A6Ap-mTo@pV>^b|aO!6A}= z;MDzAFFDPWjH>5EM(MVLsFwFLFJ$saKDW%fxxe|ZI)AYj!)r|0Lq^ z&1GPs8|nmkt9wP%ko$ZrE$-4y&Ghq3+K^Oti!Q^B_tm?LQi`j{8mW~voxaay7K8o| zj5nYs@3ihp87BIB{$m$I3}ZhJ9oiKw-gR^ynIBl%QgPuAp`WNGoJdu7e0bxoQ#79` zu<&ZbpG_h6a#r4qeO{C>xhp&}w#eFCQBxo4KALM4WjC91FXGrrDBMRWY=m+8PHC`z z>3olQICX9qUzWHGazuH6`&fZq{+0toH zTO+*KrCpnT|Mtmcx!?Rz{J#Xiu&C7XXNDuK}W{z<|-})7NGD zs6g}g@`2xS`5ttd(O|T85p*r}!}ZhG(_CO)e@8tZXgofUr+==?^<^hU#iaywCWa1B z4@RfI4{_$*OgvX7Gj|k7?mzx zF*(s*0KvH{K!=N z(L1`5T^Vd*7_&yoh~cQ``>+b7AFP~y;Eh+ic-MNdVSONbqJ+AvroL_-hF-KYT+b=1 z_3!#~R3cJs=Bwg9xZlt!cw2^JiYuPUDMr1X@j3E1qRZ_|yKgL+g@()CN+Op@=DbH( zu5?x+aA00pwgLIm3CqMzh@hr_8bekWdQn8F`w-3Z-@^F)a^>Sk3O=A8rSs~p5rtCaE&|-z2*nhJavsAMkUq62rd%M^a&P0 zGF_JyQ@Hx#>Oq2WZ@%peHrgqrFadN9pmz+Q4QmGrj%xYTUE#XH^z>~3?SqX>-am;N zW<^K}{I$4JVsohq5`XMGEoIAIQ714>ZkJ!_g<@ttUy$v8e<7B9)g3My%O|VNK(iGq z@TpgqeoWsE%pSb>w-VOUN~NzBUW#A1J3Bk-TB55Vo7zO2Y~?>kV5l107)iR`cUW}L zh=|&pO~ExBMwmuFu3OCgY?|HIJ$4t@P$PsI;D{0M#oIdZ{y~1WMLIl!ixw=1y%@tA3tx1j9aRom-sl59>E#6x;_PZbYb+4PF+s*L@M@{e+;i(nF`q#@r zrJqgtDHgxpm`7vBpAK^r>%Xhc(XXx|gHoLU+DM&B(4vFeYny2J*zjl9jkr8znF&YD z`shn?zNxd$f6qoC_EAOKe0O{E2>2~D+fufgzK(AGN+G=dwm#N;G@!t*(tJl2^>jkX z2y#Nfpk4>IjmCP*#BRemGVC*Y3$Dz725vyj3OT>> zrq%accg)L_vp6*WeP0l5Xvbja2YA2RLc89Nx8$WYqg2pE(;=Sr7UbCI=-o zeG(JyEPs{be(7va_xt%ke6%EHl<)2{2j|c_4}`+(^iB1L;-5E?9@Q9xZ={%?j?i{Mz1WUKJDpPJ`p~$bN}iobmrAl;UnXqmGyB@TG7Qx zYVjR3;1E1tAq2Yw;pSr)Cv=>7s~srRi^Mi#;spvk;;3}AfUJu;ynEiH|0Os}x|Oq= zab8LlzeOSv#`Qb!D!ax@Dwqo8LB5Gzb&^7XjZqSYdbK9!$${<%G3rQ zs=p2VesSu_IRp^waeh6{q3* zdlSdx8*qw^J#D1ddM;$0S&HT}ZaZxbfA!NBFF4W*L-0OM^H`%?9ZZ4U3S*8p|5y#C+ z&VPMU1H;XpzMSgPh zh`Y3E6Dd%L0&xcsUP+Vq_Tj~ol%=0_puD4^NXs@_JE49hYFVON|&8Li7MY?>;qhG&2w0~L^| z$i+ZvmuwCv38aJJZpyUNr%ua(`M|tjK5Ia(J4VYB;SxC^qIu$SW<>@j>p_TTgOC5L=vZ%`P0mZueE}tB^t_r$tNxu+qmEt9bKX)RbNwFjk@Igk6 z(1{%tMF#Q)0n_0#Hc2NuE0OYMr#`iGf{3n3NxHA#HNPVkw+^UO0AUNjP*l6`fxLi~ zS6p!I|4J5UW4F`|Rk&^sB}kbLC&=8Hc^S+dZW!eXq%Cp*6fWM|i%%QOINsN7TUpnv zMOd)T?9;GujsffKNbyM`?GI<_a#XoljKPU^Et;o`?zsM6jNj`&mRA3=ZjtI(V>-4#ofwiIX#{aO0H;yVoKo!PD=fMJ_Y z$x@IhBa=Wf6rEtX3;p>((Z1Zzk4geJ3wU+W@=ZM)<9@>)x}jxi zHwOAUPic5czrgw1qvZU_3X zVU?XEHbq6)XoLIvDMC2g19q2nUsGQoU-?rm1`EN#!Z})3tW*rqS>g1W&e40vtZ%Xu z8&b|lBJ9Zz|O8*l?O^d$@f>e`C&Ei-+Fh-vi+Xk3uK+~9C z{y}1^ZPHP+_2MWOsOvM^(%xGbE@Eb`krHNF>ikE&xIK%JtM0X(R?=5#UYE_hZ8!}E<> zoP7lzH8093p;v&w2}7M^_#sU_`rDHtn*HH*Ap>lC*aYaX*<%Qm!*-lA9v#V}hLe>T zK}7A(S^tHNa{Y*WHj0k?s`9SQJ$Xw>9M%6WQv+N#WS5jNdsju%Ku?UB(|-nw@pz6t zaM8VFA5y+vE1>}o#Zj92biMz_eIG7hJ zZpEDl<_q5BmSB-H5=TDUW_rdrOZpnrSwlu^NT<^^VBj?j(=#Bd?CkJ~gvKnL;{k!} z%;&rrpzQ1Y&lNnGQ#B5-x@AJ-7XM)jP03$5?!tuPCsh*ADN(S++`zoejEe+0JG8 z4eVt<|B*tvsB?Mg7>_XV70_UB=ru=FRFzP{Fis0HUP>+0-?NOudjl?RmlF8;!U@BN! zybbiC^UlP1FWSTtoE~}BRzsFGA`#2S>)AE%ky5Lx>kU26nv(OoY-mD~Y){Lr z49Q+HBxHYKF-?RMgwDGzswvlFzvV1qYj6#4YlHPvp;G}8oLo4<*7Qb}g%*!KG4+Ob@Vfp%qzTz#bxB#^R~-+ck7e(&4u-0G*w@s@*< z)}_iDl{n%6wg3x#s=~$mCay@R)?G<$cvybbe!Cgs})f2ZhxV zAJk)_>D}`=u#>eKuD~o-x2yI_s8I#}2Z~|#ZRK25oZ}0W3qT+nsEW=M}u*HL?&^^Gv34cW(hZKiq=T!$b)z^3;6<;S5QOo|_{z*6a^I66_e~8;}ot%S7vz zYX`e|g_UPoUcDm$Bf`S@RC*UAjP@Y%6&>yB*w%BWcWNf0P%3?v%R*Hgw|!olX#|l$ zi5H*w?Of}GhnmdHC$!pVGm$0Ky1_wy^N;?*_lyb&)&>J(?=_omJr*0z+v=;T>&dRh zg)f@c-w32!I924Ud1Bzyt0P4OZ2NM9nPo}5GPV3RkMLtimtAPyZA)x%ZeinrQSM;H zf&~LBp`rU#)|`{o#Fe_+O8ToRH5XrNu8(u7?YbRSIt9;lqfU>*l{$M-h^b zNCgghR6n(8eT-VwM6^;tY4+GTS}uy>Z(kp%38_UORPQy;*5b9hq&g?Ik;OXK*yYDQ zuGzfKk@9Rn*|%uLH?7;lfJ*(}dj{xLHJ~pJ%3s_U9HtE3&TTpf*L~T#b%Cj8U$5WX zMBU_!xVj7|qekYD_%L+VHn#ryCeuD?YHnZEuCGkrs&p4JU!hx`ILv7bEeB-*9U{-f zeQ|#e-}q$X^nAX(aKA(9!h9-_M}Z;qRFzO=tLnxTduWyk|AKIRtig;BMKmniNY)@F8M zeRdz)xk8xMeuJh&9_~Cmg|rf@1*7=;toAc@Cp7JaFZ>iE_~*=<%N+{!f1=A=oJfV9 zXP{7F%FXZM{TMF%`(gi9g$Ly<`LN*f9zkjS#nK}~6MBZO?o8v5t1Phdw{ddpJ{pG+ z*L%#hfgV=GDBt6t+&HE$z(bK`Gs{|c2~2?4_q!WQQ)nv)NyV@Gd4}W^9%DR{DPDQX znxSrAmM5IBDxu&|rapuJ5BR%7cBIYiM~H>z*LBHiYNDU3t2v&!aCBj`T3c0Ep;Q?=+9rf%m3l^ z{!c_7R4Z=dH2Ul3uX@L1{tNT{zrT#HIk;})>2Ci2cb&mVEKJzI=5bT#|M{=}jX8mG b=-%x*BkP1m;dZb|>sqxarb zb=9t`5~-vh2@it<0|W#FFD)ge0t5t-4Fm+t1_c4QW6JbA0QeDXEh?%cEh=Vy!8G+|JZeU;cwDuBhNC1A75uZo5!$ zw$yO85<}QNq|hH3jJ^<~#g1Hf)-At?TW1#c>zgyzaSyL2&`8ga);4s35?k}?yBtw_ z2ZmGk05l`tjj2kKI|qKO#At|xy~~Px0{U}k4O?+O=dN$TS~UY`Xysq;g^ zKj-9n)QAm^ea4S8?e#{}4~FgDt2SkvxfR?Kwu#M%Na9gwpn`-4DX6(s(AIZ$+Lib* zDJTay$X%2Y!-QYBb?dyklY?2v?x>2zPN44t=i2wvTAAS@7~Qr+WK9EhC*dD*TbmO_ zjrw47qUGj`^QU0h%Ua5YCqwLCDFzqq{k~aJ4AU!q!py3(PkkpZc#GyMv<`U4I67w7 zoe%1+2=w*5;T)uMoZdh-odnE@hPjrsg}giv4d5CI2sqpt2n=uq3^;HA2M`cw(pMlz zz!?Q_h!ueR`xGR*0QBE$V75Om3ag4rO9RfTrY`2@4z5;?ZW!}H#DJ=nt<|*LwB+S@ zOdahRjLjTP%o)7wo&FR7;`8DGT-uwv854Qg+c~)Mc=40`^#%{%`p<1f5~9CeakJ$o z(UMmp5_NPjC*ojWVqhW>fFU9x;&U;x;878i_@_AF7e9%Wo0}64BcrFMCxa&|gQJTj zBQrNQHzN}ZBMS>X;0=0LZwEJHFM0=8(!VSDw;nNbS5p^jCpT+H2ckdq8k;z}yYZ8d z{AuVve}DJW+{^m^T5@px=d%DG$oS_8BQpaNJb*R? zSedx^{(Ara_vHUt{I8N)|5uWOgY&;j{@0WLx1_qOxr?ZyJ)lW9f&ZJCe+vKi!+#3$ zG5-1T{~C$EhxxC&fN>Up;bZ*IoC(0l?$(6* zGV3c@88naPD=K8y`}&}r*K^z?@M9ajGboA(156qCR6gp#K-A~D`||hkCX+EL2$@;3 zU6Sjrg~jz|`>$zE-5%~s9V{s#5-2E;kRT$We@|>6AZf9)Wp32}-uUM+71$TMaa>^} zBrq^wVWGcI1#?J9jcZo3G9&+80~Ck|$07fJD*UVbtQ8d0njM=}iRr&(|E_{K`;Oqh zYZOAVVgoe9fzv$S@PB>!Q$>QjH{$=(Qyf4;xbQlsTL052q0!PV$bb5%g9lPlQX(ZS zE$rgLA|oSHIrTXdiK^A@!`|4`RJFA73!JfDQ%&tlV`JlM-n!Z4O0{;Vey0cfniop~ zkrOwd&s}(n2Y7$?IoLwf=+_8<93B@EVc`4iVa_($v0Mfda6I3gRB{Y?C=^I~bkNda9@R1QzoRZ631o|tPNyAW zy-?b}w1lp#tXyAT50pH>Y&vXowb|Zw*}73FAK8*58HX1fACLI{{=V4I*2Z;ka6rVu zg0j54Y(#$=3<{~Hucd{x-eOyJ_H?-u9tw$AY1(zU-crRQ$j^^28Bd`8cDeCAytg+{ z-QUE7WMFVGF)OR`=;ZLQB<_}wfRmP1_OZ!vM*s~2qmmozdqsJ?1kG@^Io-yJ~lb+4OB9FA5ZgM zvmphu$>s5_?e6V;n?COGilg)MFRoDYZ)sVoW2N(rAu%Vdk zay0`U%)jM^2_2+5F*+_z;=99E%1aKrhn_?%PMG~h>t42FPq0E+6-SOiouq|D>0S3C z0vZ|`5ho|+186ilt%lJ|9zW5~uY>{5z+%5s);7n-VLW&Izn|rg;^hVY_AaR*D=dHY z0d(@_^}KFydKw{K;I;e?k)GmHVOi?;?Fb>!UG(g%lD$;Rp|dt?W@2J6=e8Fp=Z;^~ zyvKB*vsa$p$Z;GVH;NdTI^jeb{on*+Z#7%Z=kH&=OD*+JPlBH>@dF_(jdaz>-B%sw z^`6%Td=Imt8TYq%4;O2we2OF$S{-f`>vtK3fgdgNJ+)fxDtiSy34$R)q$EUUP00PJ zwcvki4!Llk?4eAu#)|Bz<5DC`OUot9H@`e?SBPj#MhM6)Fi6OKz$a*$B};Gk|Jo{x zz{?(4o18XFfkhqs@&!!%sQD!ULp}2i5+0{mU44VWum_LTa(aliuB#Br`r&k8bY^Dc z{pDUbOO{Tn9;nG;YJf7|55(8kw_2w~CUNTH?UyMO@(!ZFGf+JL6P8qw&Jv@wb$?H_ z-TbjYRCE-x^*ouo{l(z80~np0g>*6uMT6ky1M2guI9TwogwUvkDJk6t?BCLx0S2tA ztLx?CLnA1&59PLPNjM5v9n|*bQ~g?RV_AaoF~3IQ@S_1UA@!m#oyU)>-{IEZG$T+o zn!9YUk(8-pO_aF`_(sntv^gqMKDay=| zR6^?fiOUSNrcZK0(?)zOB&EYpY!nogmKoRmfK|$r$zL`O7dt#=qevw+-9MvfZ!W*@ z&;-tX-wDuNx^>TTE%vv6me!vTD|k{Ao#?{IZ24Qm3eidgfm(A>MTB~<(oG&j!Di3A z{ko2aB=8bSQGn002Fy4vbQZ!l3A2Ik1JTWbU#i6R1`<(^t`TZj?FQM^rM;U^mI(9GgJ4ChDDmHguqVPIuyo~n z(Wbd1v*p(A93K(%-}IdLny}Tq{+FUS{lCdQ#XJS#~&gMz}vC_j7o1Z@E)+m{yOK4F}nwQNFa&|nHB4aBvw{w302s@b>?f*a}BNs3;d$(j!8vK76%wPpc z*Wrp!|5`1db=OsjSJi5p$|KyD@~5-pS@Y@tNMRFc-+96Hb~kRGKmb(SMdXDED2yzy zz5P+i{mzem{A>oUnbY_g2_vrq?~!A^PoT#6q?xK?7)VLW&HKqtCpV8OkN3sBEv@%c z+u8JV&8igxI08;|Qp%mu?&@`h-v{V9TsNLe=UKe0`F{Ani7#z6_6IGmMHM90y&v`V zjt-+U6}urMv#OeE!1*recK3{eL*D%W7O0$k(UxDC20R$zj~WjK8bAGK5T@dRY(ZFA zSn9oQO{(=fF%HG9Hc7*Y4k{UrDw&ybryMu#8_Ya$)3mlJ^63Cf(1_Hn|nwN3~p4*~_{k zXt}E(=5Dp$=h)L3p?}B6#qf^+pq`&s+D*GhR|vAQw~bX0d0kdWAQ%t8O9W3ii-|dO z|4s}WB#5`S_w!yjVZHGn1OPep%?~5g?se~NJ2T~M{IZRsZYgJGMSPEZe!Mp>rwYx@ z#RovVnA3%_!5=?L#P!4Fmi>^C8m)VNNmx+)U8xf^n2M_E?NPSlPk;c{H&Z07)ndMf zZ^GDO=F8?#hsPo_rKA!$FRY9T{Kg!QhTT0oiz>_WfdRm~;VCmiq&7Oh5;nlFb4mJV zv2qhJSzduu>%4+qbc-T$nLy-#@bo(c0};Ar)N#)QLjgyzm?kz`1G~&zgl1>sT1;jH z?+u1$-#42(&;tQGk6QtAN|#do(>VbUO4i5G*fxS-&CR$7T!SyBMkz>B_r{g3@X2nx zGfH0+13(P)B}wYa;UpD*2-U5uv?E$mK`nj_vf{866+in&%qDSypZWMPE@(yf+O;s7 z@*JbHb9(T8U_(dP5k~2DYz!C-MCxwRvz0+dTP);UeVr8lTOfCnpwQBQdkN?<3%#X_ zac2AGVLT49kkVo8WRWCxfXEKjME<~Z_Klg|Q-I@8(?4E-BDEb|_uC z`$Qrfy8HU6wtuOZ0(6puq@)l*%^I=yv>##|wu*Y&FpL{?4AjI`XtHUa&^?eOAwvFu z>D4wID(?PQ8j0%7#!y;V5_Cs^>s=DQ?8|$jns71>cpCZ_qa1S{wy|iO3jx5ud0V^- z`dtJ)-oi$^*@?vmJZ6y%&gqmUf)04 z{JmA6e0rTAjmH)Z9i1B9Rd+qKSn1d1VWS zT#~FtzmhJ(v*@6NsaFz;%YPJd7G7{8iw;M`(7<@<*hm^Mv?nY{c71~T{Na?7Hq@w1 zSvq)j4%xqg*h~-Bu?!I**&rNaM@b5}zM9Pgl6sF;9&fFOde z^SQ-wi5`W$)$AqX_>bm^4GKz4MW?H&9?PK>)QttI19+=!G$40g^W`@($N#)kDzpm_ zPKm-#FSXTU4Ceom_=>HSmNX*ql`4dz~mJ(3=Elz~ zG>_1iaDy7}T+me86SUUW`?Ct4YT(e(6n+G&2PMa{SJ%)Pi!yHF)F@r z*;$JIh>K#!yR&Av*^hWai^38~}zNzEVxz!|>nFz}I=HiE6 ztJ+p^61zH31V}oT4lisC{32z0JaSsaAiwgxd>q z>let=`dgf9@Xn!@&uo*%qLtt~~j zRMhu%v^Y+xDDJL7g~Uf^wp)0`PJ=QXGub8;iaL3&Q3UKvcCLR5SyaStc4-FGcrR6K zad`NBJ~Yx|p?X`I1^XX~8Wt;{3t*SnWN&5;XPW5w&af$RZ-ccq8Oyh?uiY{bOb6ODLT4hDEM}tW$U20R3kS@dd{)wWTK)nt1aJqk#q@~_TG4QP1(frJi0kWv<4m6Ao=5HMeqm4xkC_Q z=8*U_v5|B>^e?IESxKWG1MbfZYisJ4+)}||@(HZSC#U3lxrPF~dE2n`z(m)XoYpe(tE_G8pxrZ3y;S5;CfD0B$d&(~Y!tXjx%UbvDqF(|tAp0U2~`D|OxXQ!o=7COGLW7g0+ zW@5SeDk>_nDF6}6~PtEi|*-6s*?C!?RPHCZY?zrAtF4waSuSVS_Gw-5~*vp{+2`hlFe-R4X> zox@#JTLaiGLfcx`MmAqAxO4fECDHQDbo?bGNE*4IS;Xn4hHlZB1+^~8u7C-i*!%XT z=mTcn<4BvED0);A2bGD|XO{FA?1|OHkp;T4La?ZfY~oRigZJ_}a}jNM6-DYXID^DM zmL$f$sZD-MlHSaSb@;hge&L+5Fk&VemYKZrS^pup69Z$2zBsi@gc@NBIec9a<-GpD z$-2!vH*q#tX~bXlY$_ZGwRR`qO+GW*-`~H#^ca8~Oy`TGWn|a+Mw{9STmtd5!QV6Xi-g#OhW%LT~7J zQ@N75JkzIN+v22h*)xxGm4WduH;Pjm5CwX}-Nn zWNDpd#9Wny-qOTqTqECgW_v|^4oeD1kt(maBePNB#I};PK=rz#U2fZ&xxSL-4u`(MK zOB1d3q(=6Vr(f`Lv&9z-84u8vt2(K3D@-lAA<9db$*m#DZ6ytu@ix4Rh{BtbPE!Nc zTbgSP(62-k&(?ELc}Wbt_fLr&>jbj4Adgd#D`5h zwC3dC4@6>$!ekIWNsZt13@~_POZ>kVj!HpFQqIS<4GwOhZ(k{l!~M_+4hjM~>~(L& zT?3drAy!f(Ow2M3RAWMk#C2pNdrDiLHOLfyC2=rdz_$Bl=*g8nFd`x%a4`hP_Xqfq z)NckE?OwbAxI6e&)5WGwj=na{Ja6+$$d2)sA(DZs-)ImFRuJ>A;|G1Ko>sPd&=*^E z8pnF1FIMw2#Rat6xko8?s=Z`heb^{)rLA8^$EPHT8HGz+MKY&ROCWgFw75kav7=xr z7ZzYZePC4#^U=FYSi6_=QsC;xEd!xc29vdtc(z$efI6b^w9f7e7AyysS&%gjH;j~% zpm}|9+d8=hb~cdlLpG5wj$ttGqFqWNR6IE`AN$QOL;fsAHh7Tzk@%6LsciLPF55@0 z4&CUpBPd4(dUNx*3Go=hR&JU+SV5!X1y7vpg%{wQ-gt>PJnwe>3GW&-U|USbHXlt3 zF{|73lOrH5)o&%#q5Qc>>7u}#V+U{6_TVE(Gcr@fq~wzEGnI>45N-Vn^B~w2W?yGY zTIm)ggE(sx)YKq2b$sS0;;7K+v6z0w`xme?&~DTZFXX2v73avv63bf#FiS{2@731! zds%qG!6Y@#Js#&S9zS~mSsss~lU@8#fH*|9;WeY$;EJCEG}x7&MuHtNK+a<)UFwxN~psrR|wJx^1b;S!< z_e}1_#1=1&C=6|7k?033TlZ{yfh6xf~;)gEzzgh-K8Imu{ul1$PEOm zVz0W=!LE^GP&FcmHcWY?XhEv+taCtCvX79(QrU_>a$(Br6yA$6bdV%&0Fa2h4#_ed zFX&r-Kv;4ul0T1kfv>M37#IW*ADh-kpR}8mywPC`(JT{KhMMypl%0a^VsBvJ(l?BX zX|h;Y+usU#d=<@sa`VqkqhSj;w=Z;Eb%)K1234SbG0o&zt6CdWD>w?0Zp?eX)uM{- zT>GI8&5x(&rU6?v_e%!0*70p|Kj!9?kuBqxj@2Xq)=7OiB@M$_!2i{athM*Ba69SvN#OlB97;V|fKC4Aaq0^%?4hMziZc$94LnqcN* zspTagZTdhJGl5LfxIf;P&hRHgR`sL)NT$~|n(+qGm>z@so%aU87CN?Q4ijj<5qTY` z7xiTlrC9fuWSTvqn%2|T?KrILZi}N@S;eEJ(v(r=bi2k)T_v*3iVT)_w^FED@=jzc ztr#Fqe1{AkX#ck-bV8;34tJJpjR+uAX^t9K;~GPGV-n3KC)`@o`{q{1F7;H)Ppe~` zm8H!UD0}-yy^#^^j9r_zb?#x^r4yDGM6RKs)pa`u~=LVL{~36F*KX)@jtE^`%4eA z=h_v~@}3X!A5S$}=*JlcmIYh;6W?DL#5opzE~ssk2js5ftzD$zhVxGgi>*^>RRp6< z2=j@tX>0s2+m$8>CjzM~`!yS1rV?BY((LITgnJuZQtW@=)19{+Gz9zh++jqR-Il7M z8c!HUX$qIw5nIBVd7KPGeN&b}fF*tZywo9@8tzYRu1r6lzs#-3SomZ&P5E*ur*oK7 zZ6%Zh%0`$biVbemIH|K}VCXnJ0L`E1f2?$&I59ZWS*VWd`eyoM#+r(^`1#wH(}Pfc zsMYkx=-eCe&J#jP$4e$|8bx+|aGnz!Mr$#2i+IN%Rk0~q99pT-@#dh@+bb;Gg;8i; zF38+>MMiT7*0sL*M1XNxga(eCT&i$}br?!~!xOS*NJ<^i*@T@bEvyzp4yXysQ&~YP z8TmQ%>oW}1nv1_9ZXQ8@xizF&wL}ar?t&}CQTCK&G0IIY@>z2!Nkqw* zOUz((GoDY&IwU(IL8%VLcZ@1OGq3=`IGaUwdT8BV`>BdrO0n8FCGngfSN} z2#LQBM74thpP=j5ig2e)rQK+*t_WfjF8vb~uXEQx*Y^cC6p8Z+7caTPlruPyCtQ(F zc&&DF*}j>+wvO1aj0w(B!tLNG5bela0=G8aSH=TbE9IZ zQ-82(07mZx{m~h2Jlw2HPH3>OgNCh)Wc3I&nZu3h|N6k3eV)@@aZqUSFfe4w^|B9} z!jlM1*v^x%ydsokK`XPikG6d%^s92wqISrBs6dc;mszgS*=9gSj1Or-1H# zbRRB{7xHG1sa~Vsf~ihdtYg0f8y(I3SgC=z&E52Fk2e)Dn3^k~-ZP2mu+}EY6=?1y z%tCU9s%&N*svf`(obJ?dn2OWa5f?zmN;rdP6OMjx??qAult}n4jiGXGgUI%V_+zg` z$P=&KA1bqhzW%_BrT!;S8ggRJ;x^<+DZDs_HGLp=CX6Bi63HH0uGw3kz+IIeUG_hTXP1&W35ObSO80 zcz`jOZb|zn+^uaXwRS;Y52nJZyQH2R+p@qXWvx|ybiaS6(GdEej@*q1YcS;?U_)Gp zR$1-n+K(-nFw=dF63vQEbz!o`UgS0YLI!rEFG zAIBo$=W2#os?uf#-iN6c+V4WH%6nrr&yuya}lRV4eD*iPv*sY`N}59AU-G zczl-hy9!y=dMp*miWIgS_6GX3g^TP;0`IPZ0%TLft2w#yh=`V%J6o{d_Z&R${Ar8t zrCi4>>=O(hu@KJoq@0sDH3|B%Hxy=bB1X*Suq;uB5hBMLk9u@Os-UpNNA)B!E){RN&MJSC2ZKQl4vU1S)_=x53b=bH{BpX$8yC>{jkOA*EmKO$1kiz((NWOdspMJlD{a)Vv*exVnD5mkE`)8p; zXMMq}x1=Ptx$tZQ^rBA&nYkK1$?jZSm>+|wPZk+f@Oz(nq*uS2_pU3(%8LST>GC6I zFy_6;ETt-wu|Ni2ZnY79OYEi3;GGKrpXUpJSz$0w z*xA;G<+km~}((SS7JA8oNL9n9O9brJ#$XZIBtkR@7aHou2>W z($vq6AJ;G9(HM!rSpnA2Rn#F7J{fa4O0UI0l%#Tt2ZHHf}Q(nbfrrGiI zPlb`%m_Xh|p8?_CkWd@1Fm#TWTN*xm9=ARK8K+hS$=Z^Y`EXH!y(LjLjjkU+tf&Z3 z0x)1|*fn(p!bxXZ`rxhvd3bb|ZbNx7uj)z;fNDIP=`X_I$1gCC=4~iLIUd< zbM$gXcUbe;^6^tsZT7NvNVEO!eBiD%XxFNF+EcG{++g~M3Fk33RI>8FLWzHK>!?{GjpBFYRUNA5XPe=i~~7l%CbJ^_*}fy$Y@R7o=3jD`el4o3(Dp84m#Oo z4w5O*k?nzWDi7FvofajcqJjdjX4*;8^ZlQ&0nT-}`}wl*&!I@0mOM;DKV(Qm0yS@b z$IeR*KR^VUrq@zYXLj#6KTzYNL2>hJsj6RBf#6ta>N1Fy{p6Z-N2&zy-V5kxa%vkfGrk~o7LvkVYD3W`Ln6)_~#+7Vd%6>_(ipy zTply9K|mgt@aKBr&EO(2OFCzURjbF16)`p_e@0d^8}-@&RmxT{M~YlyEnFXUL(zMxb~F+JOS#53&0>uLRkdkRXvD)b9Hz&g#7be+8@=-~?0( zIi)N`gPL&7r%ZO+G?L^Tqi$6-%9a^?Clv4HsJpvFITh{?B`2r)%FpY-&nmxP8Rd=j zh#P+xBmiy(89DjhoFt+6@pMX37+WPtskqVTP%zY~RJ7f*FBJ&aW$$-VL0ZCqet~hb zfWbl4y$gd=7>v1YegoYnl-9b{$eb8HH?mlZk!|E+-3ql@xpqqupZK#df83|R@abzY zoN4^PD-kKO%&*csh$k_afnFG$%NG&?;xeA6gkc_A14|7aTN}p13kyg~-{*H!)mwuF zd0Yg&zEu(C@rs|e(dkneg@?FOLCJnW+P!&jxqGZ09U;@Fd4=!%QEg<+0zh8Jbv-Y{ z9L0|k9jcDwq}B%28`W1)k(8>&r$MMG*$+QndX04+us3}sVvjSg<@k!-D509Q!7{LH z#1fzno2?gS0h|sx{SLHnM8e_43RPy`C%eblU;U7TL9i1UbEZxU@|@JF@34$rK>+?* z5PKSc|1v0w$p2-8Fc2>@Gt&#O{1Ouql)@Mp^SJ|m`-w?Ns3`pGb>4Dc*(-_{Q29_} zPr^?oLR#x2ZL^y9M-msvN5`GlFmRqyx-3~=UtQN@6ZUlyBWkl^IXX&QuU+pza>kdg z0$V~eBq{6Rd|Rw`cvy)GPQ1Bu3a_a-C3!n%wWgh4=69cQ+eAW*U+R4Nc|667WT+OL z?12Vnq1cm6A@yL8|H9_G0dvPqKOLlNj6?GHa)P~t%sOlkjIg(NRY?a=G#@ugDU=8c)Q)Tj3m=;JuWg*Iln!TR8k{$V(6x-j;1JfZ6)c0OIr)qMqVce zz-OW&l&7e|bDo}%pUz~JY?&jO(=NhR5M=Q4Tdg;?St2!Msl=He6z&6q0*mdy1r2p8 z7I#EBQ#y}%X+mE5mP>=U?MBZf+du6O>GXwQypft+)vkg(4(+rdI<4+&UUME=MQu`# z?ra)MTe<)8SDIL>lC<{a;H;`7*P$froPpo8u-Bnd=Gf@bE@E1zb7R{e_yAIX!W!he zhzN7rv3f*IMETvZLv4kj-6nm?#`S zT#1wjGI!bMB8oeGAIWytodE<6dvM^Vy@$ugA43g#!{swDfq79CDJdx^g0$i|+L+U1 zWF*Ao=*$(?5wbGk4h95KFRbR*K`yksXu4}e`EqyGX{{-9VbD~lYK4+E<|ID$)y#*f zfebAs$f#m6$m-;F+Z;&V2aA`758jj{rv(dx>1X;8LOcmy=R2B_I!I_*mz`YPLh|D= zMg0Acq{!)J35kP&>q{ZEY-w#**VYE&ew_w4mq-;QPwfPLf>y)@nE*hg1?_l4+AS=r)(SU5l!c@hj`2K=Gb!9+e^1y>Jzy?GMs^yg5_Snl{`oQV%_#o_3lCJ?|N*|fB@@t9)EMX zx^AmOJs~^h?@0^*;bQNQN0%!@=^sUQ!`Z%qCh%VkjYgi6Tu*%0A|3*XUR; z^=f=@DOKQ&2VKa@(z492^W|I8M{9e=f@N*1ZRci(1bMdKLYh5aBv&V|Re(i#ZI9lf z&bv1Tm(F~?pViKf2}Zt!pLG%pLP+JXAOK$Npsr(gNr>IE64l7yyC3OMfi7Mv!Cp_vt&hZH2&N@_ zJBTP~wAN^@t|LYfWG7)+0|6@;+r%9QUEZKkETH-0=&DFEo`xPef8aJv~?<+oqy;kww2a8Y4d zdSBt&TlzJp1hO^7*(&oY`(0BICJ}=rNV6|Eokt#gokp+eRcu=EdO_|x#bY^1tD^My z_}w5Ky7oGTam9Qvz_O5o`R=BCjxsIP3Sv3nDJ-wM?iJA67o;{y<+J@rB)Y2OL?d!mP-LHv zpwnhUd#w%r0NMpOleKD?P*I{h$3!vK5T9;XGximhQRM0(3u{iMoHP;>JP2UlqQCd| zBV);yS<KJME{tMSilXMR|1U3~tJ-EC!79F+XJ@@x7 zuzQq`fJn8G02CX}+5zJ-oWG`v9!q|7YNncGP;b2}3>A531ktPvvQZ6%q#x_4nagmg zi-NEjjVg08j{$x}wA4OHAFwQ?#wYxY0qQQ68$+hgejH8r9}X-vfUge#6_*-^FH|jw zi#(-m%CQTOjE?GyT>7$gf1+D`>qG6X$CYjr3uTIs*)#gom!mV&iFT4 z?FRvZ2N(D`eobsJHp0cy#cuFjFZr)f=h0W8BAj;pJI;m2<-CW*%^<7j`v#_z{!+Sj zdV>x#=g@ zRl4|2^dBK~kKRsGAD|^3mpZQ{qwnCsG+P{2Dq z$w)@Ybu2_|<)qy0Q-J!irbs#w>S^Rd^`a{5FinP-g#oe#8Z~&VuK%0eMi{5U=(!0mggjPL}t<caCbc@Va-?-PG= z;~#2V*<)I>cDTGHq0VB%C>w2n$GvQ12U?`0=qN@u!>2T7HL9t8@$Q>SsLQijitac|uQOswvN%2x# zf9(^!a}Hi>a}}@)S;aweM}Cx#JfSf2dj{nZ9XbBPsj7n1(uWG`*##L|2ptE-Vt3NM zJu8f5j(VrEB`24Aqfy2nh8!r$nt%l)Z9qcF>X+XqMe#6mc2EfxehiGa6{J5b+K1S( zZuOHYpg~$lXUhe$kH0nx)$SV|w8aD)p5bY!OOf2VuCh(#`QJKwl}(9ey~&F`0X%{2 zUF-#xfJ4dnLOyD2^KgVB5^m`Ti%?;EZhWk9nNwg=baTnL0upLu^kzWxjD01k)rGGkalZ8vIEc0OavocSTx#C+u&eUiM~&LHSr zIWL*soU5VF6S|o6n8i~k{Ci%OvEp&sJC*29eBO!=?B|N#*N2~IZ~?E*ZS1o+b6!m; zc~i*26!Ug@oew72PQ~7vz#BinUfPULrORrw^M9e9}eI6k@u=26oZH7@`j07lnK2T73Tv9RsW2#1 zVp?;7XdG%g&FDgh_Jd%jY?prtX`U2@t6rKM4GJEmkcpZymSKTp6$j@>W=SqH)djDqC2xVdC**bM*?J?RAZvoGlu-F!0@9Qu<`c1Ug5PnMLz>u~^K73}sT;7;$}@ z?XC@P$-W7$CbqD>D49J(vrkA?7*iJV8TWV)-}-84@uCs>X-$d8tGg2}S$ft;OKqnY z^bB>C=l$Zl(1BjeF?b$C8fQW+B6IWJ&*8+;ph$G&UHxg=o z3QNWiOo`qAjo=1qh)fVU8 zm+i+LMVqir#-wLAmn>!CMIp?--b0VNW6OWZ1)-Z1i`J$k`$5hWQKwilD@FctpN6tW zN8NqGn*n_^o=SVWVi>r0T9*GcK3=g&WaOTj)ZUJpUaU`-0sVlX5WV)8LxR`XjLO$u99SbQJS8EdGT$ph%8PC*t0&4dL+EUEZ`aRI?Yf;60sub4Ei?b!w2p@J~MZ10QQKKtgI+R=8fvu zMZRK{fiqiIXecNbBpD>tTB1f8Y_gCpZ{z=cvj=SYU>UxIAP<&+h?lAZ}{I_qT94HX5FLPGjztk-J+M$?kUvCBx_s-8@ z;{+^4EMbgmOc^q-GsP^-&54siR=p<3d3kk_1r+WFieshqoSc|C+|MBZ89(YaHbnBw zWnUlqR|d&4U}y5{_5sLz6~L-;adAQ8K00^y^sLk9>gt-j^T7!T%dyn~tH#$iR#$*! z&NykZTwjxRJc&@Px|kU^iuwJw<{Ta+%_OZ=p*mPJ)E*-32v_7NG>QWu~D z>u;`!18E8K;7qj{j*@MJ#^X^o580kZI-0vX|J#oKq0xLjqz@d~2;_0gbBCCN1B0>a zCPaE3uYtp9@GJakhkMn$#Mkg}QZ_cV#IdK#4cb^TTUz$118_=93&@Oj7V4vcbvaXG zytOYgI7-V2cWKb40KeAh^}$-56ObvAif5=IjZI4V79(_`j#!kQ4@}U!Sp64IR|jg1 z&r@2iRi-^bf_0{$S4P9Z&?m_c^VcA^BR(7tB|6@qK!LfLL2bd3%Xm%stgY*vW%D9t zUk8cq;XpIZOi&!jm^*q{JrmmOyZ)#qd(ynHJcWz&c@seG+2aKRQq)X8ULV_jzn;(4 z>UTnX>p!}7U(~dbfe{fAVRAi^E4j2)Dx*Izo7!2(5Ne9-2Ba^Y!K{)6rfB?n1tei1 zBOwI=oRxv??Vd2bT)CzOO_Ab-wdN{2Ovy@}oxoHN@ddOC0VqHyz#Jcc@~8mcG#6`om%TGk^<+&ilsB^2KM3FQPd3D|9|WsColZznxUoE^Uw&P+@Ud{H(kkZ}=D4N_cIMhd{C zFLgH08L$kV+uoEY>sv8NQD3L1{8vy;CU94!2TNz^EOk$+cej3JSzzlzpo8gPuXWwP zvuo)jzij1CUjN2)Ad>_b{=RCY)gJ#&8P|uis=9%hnHe)zTuz4^7yysv9pE0OfLFZ6KrLd z(SEmyX|uJm?P`uTSXd6zJ!wmriVh)z#Dx#$5Cvr73jdXfivteKvDfo<)!Fv6Zd>IC zBhQWWmg|1Gu8vM&f)`t>f~(BN13BRoCr3|6VXM^+kIIIj;5bwf&2^awn(0}?wS-*4 z`g7oSf?gXu?Du^i`Db_iP{*j~=q0>nSMGA@Y*4~|ZkIhDhC{S>RjGjVoZk_lfo*YN zBW8`0E0c*sdTnt<^W0RGjypXxCq*TwHt>{d90(vkteK@s9?ZT?s2no*K$~0z@%#+>$RVHI%sG`7_BKr28rON7jgPbf(;fpi3 za2E4sUTBpgy6~LQIup!|W4v^hQEcILA3!wgQh$38vMBg~shlXDak8K&;hGqe@Ngh#Y;;u; z%b&^t(9dV@+QU*NTFaZ^ydPdHI)%cvgH&{Y>r&SBr0h4ueJX8>~Lo%fe2Z3=Z$CHfYnSR3y@`QTF#K!OJiGtPE?d?r6p z6Vv6R^HbD`V&PDvf1pQ{B`H-)%7H77z?g){!HlJ=qr;Yp=5(gtYpV9u_UDSQTNz3M z7y;!dEBZ7e?48wEG4COdhpEi?hidsEk9=Z&)Q_Uk99Sw;i=k&;!qG0Z3xnBlkn36` zebXqx9vU|dO+ncuzz7Vr%}k+;>)~L;R8&&@c;C@%zaWRW-IDxqI4HqfW(3wca@bRs zPsyy+D8W`0U4KNO+ibZi>;mjF6Uc>VmMaf3Po0t$?{eO81Jw(eiYI-T;x%>sBV0&H z>;bt$&2^q1Nt$*F>64rt-ty&o{3T>OQ5g|o#Zw;FH`3T1_(XBtMl~&*IXnt)1oKML<0lL^sS!{h^r*p#)Xd){fZWXjPLPozl<`T<@VeiR8}_BfW(F@)o(@%1 zWTckJPXGO102Ud%Juhi;H0W9Z*WKwh3?t{kLd1z))3;n!vqUij{2c_AtyiUX9-jc5 zq-LOmuxlON2na3W>9)Xp+y9N|pgd?B)+dxdmZRF4fZL7vWYB+e!U95&d+JX{yS*5t zDO){RfZouf^Nz|v`Tr62PQjUVU$l28opjW3C+XO>({abPZQDl2wr$(CZQHhW_WQs1 zs=kX;=lZF7F4o?At-0nL<2MZSRdHef8*e@F!9T~GE2}0YDN~E|U;z&Nph`E0Elxk2 zvs>}&fzr9|Q2t|G^}sSu3)?;an`6=ax%B*mEp+>TYKODQknMF=*t5svOJSMkqa(Pa zejc`gLPC1@1>&|W(33jImX=0AO^cO1o5{&)BP4Gxze(v2xpNt3WrgzP`yC*E#lbJE z*&k7C<$J&H;X1&C-6n zZGCkboDx66dhW*v(>a~1pipb6XGO?90q*8v#YBv}tJgWdx&%3+92JQl!5>3=rk&E;vWO|Fv>W@)B;9kLv3k3L_GV^ckKT5?co3sy^P^Ty6=-0fKt9LWKa_sCfAd~< z<_UB@_1`d_+}4q+ZEajGw^(nm!4K?s!ukA*ky|liY;%F8tut3L_Z&LjI3zDGE1_+m z@wBJ)IUE}$Ek$EQt(HHTa{Gd|>;cR&&ApTTy?DW0{J;+BjCfQJJ}C*!o|gtqNA|Z@=&n{!Dvsua5RZg0h4e_ zJqA;U*yKbbMdO7kirH1^C8O0q2KH=^jE2e29hsntuQtD*W>$M&T~IYm`1m|ih*2Z8 z7i?4~MJJk<_|FGh@I^HaeIqgViOp^)M2r)0qKxN7#gu#PN5L;NDufSq&q--J&eKs7OaV0$VC(aAnVDkj@nD@5+vmayI$VBoAel zYuiS~QP?v@wnt4{I?OQ}c+Pe-XeDT%wlCn%pW7fB;;9^3J%jPJ00IFpGb#yA!QpVK z6d>)!^Aw8gZ-(HQ0Y><*k`r!#M?g`10)}|_`SG>~Sh@Swth*|PNohZpJKUYi2^g<| zHg>@8*<9SvdM0uaAeNg+J++A#r7QZ%HF(;BxHZ}BavT`^#Mn$(>C>19(MoU%sZIIas~VMkM>J%}T4QtX z+2r_bbpg>`pvgn@b>qw>oOpQn@0+)d(3A%o8De`|PlrU=F^$8+ga&K;Tc!ZW~#;0DsM)A*UQ-*7m3zCYid~ z#OJ@JjG6in_~u0lY>OYf9Sv4odM6NHuwreDe!+d4{sQeaM$A$&Jjyw-7OgW;9|Qzh zJpwe^`96tJ!|`1~N(G3us$Cpe(gI(3dBO-H`Cev(uci#L>HuRPb=^%3lPh+eYMBZf z2S=+53bR2{7&1@1l4*L4rXe?kH}U>La&WK?Smdja$o!~f@gmm#TZ4?hYAidK6$YWFD+x7RV%}3r3j19K`)!liXu0}2SWk4jy z0~Q$@$mWX_6C_X5x^&W^|F?xul6;VIry4yn;7;#0Hlchblg&0)%);xB{w=hC1gh#@ z?+cUa*f7QjI1Fm>CEIey2p&vYC*ZR$dhF}%RiKdoI$~r}<)8h?(#0AnSsu3`ujApl ziXLoagCimJti}OxRsXR8PEoXB-hyw{uJ~9eL27Lu1t7r@{si6oOm3?BO&F}@9~N()fqVyI1Y&m`*eah$^1uvsN zeccH@ol+|=jPZ$Kq^A=RmYp)<#eUxDIX(Xl1~JJ7zSI1JAKdgIxmXcFRzE`9mPKPM zZd>BenUaA~d79O$qyX;V{$gTG;ix2D*deqKu|Qv}*s{UI$*e4Q+*AY2Zf%q}49)Yo zL3YZr!7CZEZras%p4->sL(zSfyY{+vk)3@j!APJU9ZtboRR5NabIOBY@q~SLnW3lA zR^Xo>DN;FEdS*lO>hDt0yD$A8SHJHdN*bo3l`(V~3BSIv3b+iQp-LHbB;@vey`Iq) z?Cjq&rzgAr{>Wx2w86;r)^;txAV`n3=aKkKTpPBYJFxJP!2*Yc<>c_4%Wr5;Hu)=v zF7c1kT)M88C=LdO^wLmw79V)bZnLfXP1ABQTN&K2Xua+TrrDAu=tl{@&A-P0+ab!x zz5@PWkM^MDX|4AFit_eMWh^wo+ zZYPn>EPrg_L1h)QtAd)!h~qLJ*=u-hY;PclYf;~y7V^{i|Lc{19!!aZ7;CU;AF7VzP@}yjDt%3TmK;2+uqdY4Z=IxlSu+KGw!-^qo>#vP`r0Jlk-ju4 zJdRG3g?vZk69a`8`?*C)fnaX+8#2oI^V>1Eawz!oOj0rNd==tHpu(d-7xC$lz?);q zqIsAnzCqoybjqg~A=d{_NmM@2JB+t@=bHj%Sg? z`Sh<>)?@S?U*F0JdD^d>EQa{=E#0i%pUsX&!vgVj1hs3Gk;!4sra`ok58t)g<7y$R zQL7rqK_0Lp@;X)9Mhv1Ff~hP{4;Ml(C?rA~6y4aT-~Xs3dJ6m@$na!9lapBm#2Wcb z#aIXiR34V^=(-maSI#H1-~h}B!s%)yqDDDV@hq{fz(?{fjzm(fsIf8&SD%TdgGZ
    From 51ed3945a9a30d93645d046a839d18ab52ea8a79 Mon Sep 17 00:00:00 2001 From: Jiajun Yao Date: Wed, 3 May 2023 21:53:33 -0700 Subject: [PATCH 222/424] Print out the address of the connected gcs (#35028) Signed-off-by: Jiajun Yao --- src/ray/gcs/gcs_client/gcs_client.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/ray/gcs/gcs_client/gcs_client.cc b/src/ray/gcs/gcs_client/gcs_client.cc index 9e7d3504f882..7d54c770da44 100644 --- a/src/ray/gcs/gcs_client/gcs_client.cc +++ b/src/ray/gcs/gcs_client/gcs_client.cc @@ -128,7 +128,8 @@ Status GcsClient::Connect(instrumented_io_context &io_service) { internal_kv_accessor_ = std::make_unique(this); task_accessor_ = std::make_unique(this); - RAY_LOG(DEBUG) << "GcsClient connected."; + RAY_LOG(INFO) << "GcsClient connected " << options_.gcs_address_ << ":" + << options_.gcs_port_; return Status::OK(); } From 5bc6b9029285b79e92055c6d6888d5991d5c9a01 Mon Sep 17 00:00:00 2001 From: Chen Shen Date: Wed, 3 May 2023 22:12:20 -0700 Subject: [PATCH 223/424] [autoscaler v2][3/n] introducing instance storage (#34979) Why are these changes needed? this is the stack of PRs to introduce new node_provider for autoscaler v2. Stack of PRs #34976 #34977 #34979 <- this PR #34983 #34985 This PR introduces instance storage, which is a wrapper around the storage that allows us to store and update the state of instances; it also allows users to subscribe to instance state change, which enables the followup reconciler to reconcile the instance state with cloud provider. --- BUILD.bazel | 1 + python/ray/autoscaler/v2/BUILD | 8 + .../v2/instance_manager/instance_storage.py | 199 ++++++++++++ .../v2/tests/test_instance_storage.py | 290 ++++++++++++++++++ 4 files changed, 498 insertions(+) create mode 100644 python/ray/autoscaler/v2/instance_manager/instance_storage.py create mode 100644 python/ray/autoscaler/v2/tests/test_instance_storage.py diff --git a/BUILD.bazel b/BUILD.bazel index b7cb26e07bd0..32a453b9a086 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -2883,6 +2883,7 @@ filegroup( "//src/ray/protobuf:event_py_proto", "//src/ray/protobuf:gcs_py_proto", "//src/ray/protobuf:gcs_service_py_proto", + "//src/ray/protobuf:instance_manager_py_proto", "//src/ray/protobuf:job_agent_py_proto", "//src/ray/protobuf:monitor_py_proto", "//src/ray/protobuf:node_manager_py_proto", diff --git a/python/ray/autoscaler/v2/BUILD b/python/ray/autoscaler/v2/BUILD index bef608e8d148..723d585ebc70 100644 --- a/python/ray/autoscaler/v2/BUILD +++ b/python/ray/autoscaler/v2/BUILD @@ -5,6 +5,14 @@ # -------------------------------------------------------------------- load("//bazel:python.bzl", "py_test_module_list") +py_test( + name = "test_instance_storage", + size = "small", + srcs = ["tests/test_instance_storage.py"], + tags = ["team:core"], + deps = ["//:ray_lib",], +) + py_test( name = "test_storage", size = "small", diff --git a/python/ray/autoscaler/v2/instance_manager/instance_storage.py b/python/ray/autoscaler/v2/instance_manager/instance_storage.py new file mode 100644 index 000000000000..990cb1f00733 --- /dev/null +++ b/python/ray/autoscaler/v2/instance_manager/instance_storage.py @@ -0,0 +1,199 @@ +import logging +from abc import ABCMeta, abstractmethod +from dataclasses import dataclass +from typing import Dict, List, Optional, Set, Tuple + +from ray.autoscaler.v2.instance_manager.storage import Storage, StoreStatus +from ray.core.generated.instance_manager_pb2 import Instance + +logger = logging.getLogger(__name__) + + +@dataclass +class InstanceUpdateEvent: + """Notifies the status change of an instance.""" + + instance_id: str + new_status: int + + +class InstanceUpdatedSuscriber(metaclass=ABCMeta): + """Subscribers to instance status changes.""" + + @abstractmethod + def notify(self, events: List[InstanceUpdateEvent]) -> None: + pass + + +class InstanceStorage: + """Instance storage stores the states of instances in the storage. It also + allows users to subscribe to instance status changes to trigger reconciliation + with cloud provider.""" + + def __init__( + self, + cluster_id: str, + storage: Storage, + status_change_subscriber: Optional[InstanceUpdatedSuscriber] = None, + ) -> None: + self._storage = storage + self._cluster_id = cluster_id + self._table_name = f"instance_table@{cluster_id}" + self._status_change_subscriber = status_change_subscriber + + def batch_upsert_instances( + self, + updates: List[Instance], + expected_storage_version: Optional[int] = None, + ) -> StoreStatus: + """Upsert instances into the storage. If the instance already exists, + it will be updated. Otherwise, it will be inserted. If the + expected_storage_version is specified, the update will fail if the + current storage version does not match the expected version. + + Note the version of the upserted instances will be set to the current + storage version. + + Args: + updates: A list of instances to be upserted. + expected_storage_version: The expected storage version. + + Returns: + StoreStatus: A tuple of (success, storage_version). + """ + mutations = {} + version = self._storage.get_version() + # handle version mismatch + if expected_storage_version and expected_storage_version != version: + return StoreStatus(False, version) + + for instance in updates: + # the instance version is set to 0, it will be + # populated by the storage entry's verion on read + instance.version = 0 + mutations[instance.instance_id] = instance.SerializeToString() + + result, version = self._storage.batch_update( + self._table_name, mutations, {}, expected_storage_version + ) + + if result and self._status_change_subscriber: + self._status_change_subscriber.notify( + [ + InstanceUpdateEvent( + instance_id=instance.instance_id, + new_status=instance.status, + ) + for instance in updates + ], + ) + + return StoreStatus(result, version) + + def upsert_instance( + self, + instance: Instance, + expected_instance_version: Optional[int] = None, + expected_storage_verison: Optional[int] = None, + ) -> StoreStatus: + """Upsert an instance in the storage. + If the expected_instance_version is specified, the update will fail + if the current instance version does not match the expected version. + Similarly, if the expected_storage_version is + specified, the update will fail if the current storage version does not + match the expected version. + + Note the version of the upserted instances will be set to the current + storage version. + + Args: + instance: The instance to be updated. + expected_instance_version: The expected instance version. + expected_storage_version: The expected storage version. + + Returns: + StoreStatus: A tuple of (success, storage_version). + """ + # the instance version is set to 0, it will be + # populated by the storage entry's verion on read + instance.version = 0 + result, version = self._storage.update( + self._table_name, + key=instance.instance_id, + value=instance.SerializeToString(), + expected_entry_version=expected_instance_version, + expected_storage_version=expected_storage_verison, + insert_only=False, + ) + + if result and self._status_change_subscriber: + self._status_change_subscriber.notify( + [ + InstanceUpdateEvent( + instance_id=instance.instance_id, + new_status=instance.status, + ) + ], + ) + + return StoreStatus(result, version) + + def get_instances( + self, instance_ids: List[str] = None, status_filter: Set[int] = None + ) -> Tuple[Dict[str, Instance], int]: + """Get instances from the storage. + + Args: + instance_ids: A list of instance ids to be retrieved. If empty, all + instances will be retrieved. + + Returns: + Tuple[Dict[str, Instance], int]: A tuple of (instances, version). + The instances is a dictionary of (instance_id, instance) pairs. + """ + instance_ids = instance_ids or [] + status_filter = status_filter or set() + pairs, version = self._storage.get(self._table_name, instance_ids) + instances = {} + for instance_id, (instance_data, entry_version) in pairs.items(): + instance = Instance() + instance.ParseFromString(instance_data) + instance.version = entry_version + if status_filter and instance.status not in status_filter: + continue + instances[instance_id] = instance + return instances, version + + def batch_delete_instances( + self, instance_ids: List[str], expected_storage_version: Optional[int] = None + ) -> StoreStatus: + """Delete instances from the storage. If the expected_version is + specified, the update will fail if the current storage version does not + match the expected version. + + Args: + to_delete: A list of instances to be deleted. + expected_version: The expected storage version. + + Returns: + StoreStatus: A tuple of (success, storage_version). + """ + version = self._storage.get_version() + if expected_storage_version and expected_storage_version != version: + return StoreStatus(False, version) + + result = self._storage.batch_update( + self._table_name, {}, instance_ids, expected_storage_version + ) + + if result[0] and self._status_change_subscriber: + self._status_change_subscriber.notify( + [ + InstanceUpdateEvent( + instance_id=instance_id, + new_status=Instance.GARAGE_COLLECTED, + ) + for instance_id in instance_ids + ], + ) + return result diff --git a/python/ray/autoscaler/v2/tests/test_instance_storage.py b/python/ray/autoscaler/v2/tests/test_instance_storage.py new file mode 100644 index 000000000000..881520ea6cf4 --- /dev/null +++ b/python/ray/autoscaler/v2/tests/test_instance_storage.py @@ -0,0 +1,290 @@ +# coding: utf-8 +import copy +import os +import sys + +import pytest # noqa + +from ray.autoscaler.v2.instance_manager.instance_storage import ( + InstanceStorage, + InstanceUpdatedSuscriber, + InstanceUpdateEvent, +) +from ray.autoscaler.v2.instance_manager.storage import InMemoryStorage +from ray.core.generated.instance_manager_pb2 import Instance + + +class DummySubscriber(InstanceUpdatedSuscriber): + def __init__(self): + self.events = [] + + def notify(self, events): + self.events.extend(events) + + +def create_instance( + instance_id, status=Instance.INSTANCE_STATUS_UNSPECIFIED, version=0 +): + return Instance(instance_id=instance_id, status=status, version=version) + + +def test_upsert(): + subscriber = DummySubscriber() + + storage = InstanceStorage( + cluster_id="test_cluster", + storage=InMemoryStorage(), + status_change_subscriber=subscriber, + ) + instance1 = create_instance("instance1") + instance2 = create_instance("instance2") + instance3 = create_instance("instance3") + + assert (True, 1) == storage.batch_upsert_instances( + [instance1, instance2], + expected_storage_version=None, + ) + + assert subscriber.events == [ + InstanceUpdateEvent("instance1", Instance.INSTANCE_STATUS_UNSPECIFIED), + InstanceUpdateEvent("instance2", Instance.INSTANCE_STATUS_UNSPECIFIED), + ] + + instance1.version = 1 + instance2.version = 1 + entries, storage_version = storage.get_instances() + + assert storage_version == 1 + assert entries == { + "instance1": instance1, + "instance2": instance2, + } + + assert (False, 1) == storage.batch_upsert_instances( + [create_instance("instance1"), create_instance("instance2")], + expected_storage_version=0, + ) + + assert subscriber.events == [ + InstanceUpdateEvent("instance1", Instance.INSTANCE_STATUS_UNSPECIFIED), + InstanceUpdateEvent("instance2", Instance.INSTANCE_STATUS_UNSPECIFIED), + ] + + instance2.status = Instance.IDLE + assert (True, 2) == storage.batch_upsert_instances( + [instance3, instance2], + expected_storage_version=1, + ) + + instance1.version = 1 + instance2.version = 2 + instance3.version = 2 + entries, storage_version = storage.get_instances() + + assert storage_version == 2 + assert entries == { + "instance1": instance1, + "instance2": instance2, + "instance3": instance3, + } + + assert subscriber.events == [ + InstanceUpdateEvent("instance1", Instance.INSTANCE_STATUS_UNSPECIFIED), + InstanceUpdateEvent("instance2", Instance.INSTANCE_STATUS_UNSPECIFIED), + InstanceUpdateEvent("instance3", Instance.INSTANCE_STATUS_UNSPECIFIED), + InstanceUpdateEvent("instance2", Instance.IDLE), + ] + + +def test_update(): + subscriber = DummySubscriber() + + storage = InstanceStorage( + cluster_id="test_cluster", + storage=InMemoryStorage(), + status_change_subscriber=subscriber, + ) + instance1 = create_instance("instance1") + instance2 = create_instance("instance2") + + assert (True, 1) == storage.upsert_instance(instance=instance1) + assert subscriber.events == [ + InstanceUpdateEvent("instance1", Instance.INSTANCE_STATUS_UNSPECIFIED), + ] + assert (True, 2) == storage.upsert_instance(instance=instance2) + + assert subscriber.events == [ + InstanceUpdateEvent("instance1", Instance.INSTANCE_STATUS_UNSPECIFIED), + InstanceUpdateEvent("instance2", Instance.INSTANCE_STATUS_UNSPECIFIED), + ] + + assert ( + { + "instance1": create_instance("instance1", version=1), + "instance2": create_instance("instance2", version=2), + }, + 2, + ) == storage.get_instances() + + # failed because instance version is not correct + assert (False, 2) == storage.upsert_instance( + instance=instance1, + expected_instance_version=0, + ) + + # failed because storage version is not correct + assert (False, 2) == storage.upsert_instance( + instance=instance1, + expected_storage_verison=0, + ) + + assert subscriber.events == [ + InstanceUpdateEvent("instance1", Instance.INSTANCE_STATUS_UNSPECIFIED), + InstanceUpdateEvent("instance2", Instance.INSTANCE_STATUS_UNSPECIFIED), + ] + + assert (True, 3) == storage.upsert_instance( + instance=instance2, + expected_storage_verison=2, + ) + + assert ( + { + "instance1": create_instance("instance1", version=1), + "instance2": create_instance("instance2", version=3), + }, + 3, + ) == storage.get_instances() + + assert subscriber.events == [ + InstanceUpdateEvent("instance1", Instance.INSTANCE_STATUS_UNSPECIFIED), + InstanceUpdateEvent("instance2", Instance.INSTANCE_STATUS_UNSPECIFIED), + InstanceUpdateEvent("instance2", Instance.INSTANCE_STATUS_UNSPECIFIED), + ] + + assert (True, 4) == storage.upsert_instance( + instance=instance1, + expected_instance_version=1, + ) + + assert ( + { + "instance1": create_instance("instance1", version=4), + "instance2": create_instance("instance2", version=3), + }, + 4, + ) == storage.get_instances() + + assert subscriber.events == [ + InstanceUpdateEvent("instance1", Instance.INSTANCE_STATUS_UNSPECIFIED), + InstanceUpdateEvent("instance2", Instance.INSTANCE_STATUS_UNSPECIFIED), + InstanceUpdateEvent("instance2", Instance.INSTANCE_STATUS_UNSPECIFIED), + InstanceUpdateEvent("instance1", Instance.INSTANCE_STATUS_UNSPECIFIED), + ] + + +def test_delete(): + subscriber = DummySubscriber() + + storage = InstanceStorage( + cluster_id="test_cluster", + storage=InMemoryStorage(), + status_change_subscriber=subscriber, + ) + instance1 = create_instance("instance1") + instance2 = create_instance("instance2") + instance3 = create_instance("instance3") + + assert (True, 1) == storage.batch_upsert_instances( + [instance1, instance2, instance3], + expected_storage_version=None, + ) + + assert (False, 1) == storage.batch_delete_instances( + instance_ids=["instance1"], expected_storage_version=0 + ) + assert (True, 2) == storage.batch_delete_instances(instance_ids=["instance1"]) + + assert subscriber.events == [ + InstanceUpdateEvent("instance1", Instance.INSTANCE_STATUS_UNSPECIFIED), + InstanceUpdateEvent("instance2", Instance.INSTANCE_STATUS_UNSPECIFIED), + InstanceUpdateEvent("instance3", Instance.INSTANCE_STATUS_UNSPECIFIED), + InstanceUpdateEvent("instance1", Instance.GARAGE_COLLECTED), + ] + + assert ( + { + "instance2": create_instance("instance2", version=1), + "instance3": create_instance("instance3", version=1), + }, + 2, + ) == storage.get_instances() + + assert (True, 3) == storage.batch_delete_instances( + instance_ids=["instance2"], expected_storage_version=2 + ) + + assert ( + { + "instance3": create_instance("instance3", version=1), + }, + 3, + ) == storage.get_instances() + + assert subscriber.events == [ + InstanceUpdateEvent("instance1", Instance.INSTANCE_STATUS_UNSPECIFIED), + InstanceUpdateEvent("instance2", Instance.INSTANCE_STATUS_UNSPECIFIED), + InstanceUpdateEvent("instance3", Instance.INSTANCE_STATUS_UNSPECIFIED), + InstanceUpdateEvent("instance1", Instance.GARAGE_COLLECTED), + InstanceUpdateEvent("instance2", Instance.GARAGE_COLLECTED), + ] + + +def test_get_instances(): + storage = InstanceStorage( + cluster_id="test_cluster", + storage=InMemoryStorage(), + ) + instance1 = create_instance("instance1", version=1) + instance2 = create_instance("instance2", status=Instance.RUNNING, version=1) + instance3 = create_instance("instance3", status=Instance.IDLE, version=1) + + assert (True, 1) == storage.batch_upsert_instances( + [copy.deepcopy(instance1), copy.deepcopy(instance2), copy.deepcopy(instance3)], + expected_storage_version=None, + ) + + assert ( + { + "instance1": instance1, + "instance2": instance2, + "instance3": instance3, + }, + 1, + ) == storage.get_instances() + + assert ( + { + "instance1": instance1, + "instance2": instance2, + }, + 1, + ) == storage.get_instances(instance_ids=["instance1", "instance2"]) + + assert ({"instance2": instance2}, 1) == storage.get_instances( + instance_ids=["instance1", "instance2"], status_filter={Instance.RUNNING} + ) + + assert ( + { + "instance2": instance2, + }, + 1, + ) == storage.get_instances(status_filter={Instance.RUNNING}) + + +if __name__ == "__main__": + if os.environ.get("PARALLEL_CI"): + sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__])) + else: + sys.exit(pytest.main(["-sv", __file__])) From d8fdc9399a5c2dd66957298df1819f818952a5d8 Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Thu, 4 May 2023 08:12:35 +0200 Subject: [PATCH 224/424] [docs] sphinx design migration 6/N (#35002) --- doc/source/data/batch_inference.rst | 110 +++++---- doc/source/data/data.rst | 214 ++++++++++-------- doc/source/rllib/index.rst | 81 ++++--- doc/source/rllib/user-guides.rst | 177 +++++++-------- doc/source/serve/index.md | 205 ++++++++++------- doc/source/serve/model_composition.md | 20 +- .../serve/production-guide/fault-tolerance.md | 40 ++-- .../tutorials/gradio-dag-visualization.md | 20 +- doc/source/serve/tutorials/serve-ml-models.md | 9 +- doc/source/train/examples.rst | 184 +++++++-------- doc/source/train/user-guides.rst | 76 +++---- doc/source/tune/examples/hpo-frameworks.rst | 210 ++++++++--------- doc/source/tune/examples/ml-frameworks.rst | 193 ++++++++-------- doc/source/tune/index.rst | 180 ++++++++------- doc/source/tune/tutorials/overview.rst | 193 ++++++++-------- 15 files changed, 977 insertions(+), 935 deletions(-) diff --git a/doc/source/data/batch_inference.rst b/doc/source/data/batch_inference.rst index 0c605eeaf96b..1e7b0d3ca17d 100644 --- a/doc/source/data/batch_inference.rst +++ b/doc/source/data/batch_inference.rst @@ -244,34 +244,33 @@ to illustrate advanced concepts of batch processing with Ray. If you want to dive right into example use cases next, consider reading the following tutorials next: - .. panels:: - :container: container pb-3 - :column: col-md-3 px-1 py-1 - :img-top-cls: p-2 w-75 d-block mx-auto fixed-height-img + .. grid:: 1 2 3 4 + :gutter: 1 + :class-container: container pb-3 - --- - :img-top: /images/ray_logo.png + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - .. link-button:: /data/examples/ocr_example - :type: ref - :text: Batch OCR processing using Ray Data - :classes: btn-link btn-block stretched-link + .. button-ref:: /data/examples/ocr_example - --- - :img-top: /images/ray_logo.png + Batch OCR processing using Ray Data - .. link-button:: /data/examples/torch_detection - :type: ref - :text: Fine-tuning an Object Detection Model and using it for Batch Inference - :classes: btn-link btn-block stretched-link + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - --- - :img-top: /images/ray_logo.png + .. button-ref:: /data/examples/torch_detection + + Fine-tuning an Object Detection Model and using it for Batch Inference - .. link-button:: /data/examples/torch_image_example - :type: ref - :text: Training an Image Classifier and using it for Batch Inference - :classes: btn-link btn-block stretched-link + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: /data/examples/torch_image_example + + Training an Image Classifier and using it for Batch Inference Loading data with Ray Data @@ -650,47 +649,46 @@ a fraction of what Ray can do. To learn more about Ray and batch inference, check out the following tutorials and examples: -.. panels:: - :container: container pb-3 - :column: col-md-3 px-1 py-1 - :img-top-cls: p-2 w-75 d-block mx-auto fixed-height-img +.. grid:: 1 2 3 4 + :gutter: 1 + :class-container: container pb-3 - --- - :img-top: /images/ray_logo.png + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - .. link-button:: https://github.com/ray-project/ray-educational-materials/blob/main/Computer_vision_workloads/Semantic_segmentation/Scaling_batch_inference.ipynb - :type: url - :text: Scalable Batch Inference with Ray for Semantic Segmentation - :classes: btn-link btn-block stretched-link + .. button-link:: https://github.com/ray-project/ray-educational-materials/blob/main/Computer_vision_workloads/Semantic_segmentation/Scaling_batch_inference.ipynb - --- - :img-top: /images/ray_logo.png + Scalable Batch Inference with Ray for Semantic Segmentation - .. link-button:: /data/examples/nyc_taxi_basic_processing - :type: ref - :text: Batch Inference on NYC taxi data using Ray Data - :classes: btn-link btn-block stretched-link + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - --- - :img-top: /images/ray_logo.png + .. button-ref:: /data/examples/nyc_taxi_basic_processing - .. link-button:: /data/examples/ocr_example - :type: ref - :text: Batch OCR processing using Ray Data - :classes: btn-link btn-block stretched-link + Batch Inference on NYC taxi data using Ray Data - --- - :img-top: /images/ray_logo.png + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: /data/examples/ocr_example + + Batch OCR processing using Ray Data + + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - .. link-button:: /data/examples/torch_detection - :type: ref - :text: Fine-tuning an Object Detection Model and using it for Batch Inference - :classes: btn-link btn-block stretched-link + .. button-ref:: /data/examples/torch_detection + + Fine-tuning an Object Detection Model and using it for Batch Inference + + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - --- - :img-top: /images/ray_logo.png + .. button-ref:: /data/examples/torch_image_example - .. link-button:: /data/examples/torch_image_example - :type: ref - :text: Training an Image Classifier and using it for Batch Inference - :classes: btn-link btn-block stretched-link + Training an Image Classifier and using it for Batch Inference diff --git a/doc/source/data/data.rst b/doc/source/data/data.rst index 0d266b9dd92a..0b588cacaafb 100644 --- a/doc/source/data/data.rst +++ b/doc/source/data/data.rst @@ -72,102 +72,124 @@ If you've run your first examples already, you might want to dive into Ray Data' :ref:`key concepts ` or our :ref:`User Guide ` instead. Advanced users can refer directly to the Ray Data :ref:`API reference ` for their projects. -.. panels:: - :container: text-center - :column: col-lg-6 px-2 py-2 - :card: - - **Getting Started** - ^^^ - - Start with our quick start tutorials for working with Data. - These concrete examples will give you an idea of how to use Ray Data. - - +++ - .. link-button:: data_getting_started - :type: ref - :text: Get Started with Ray Data - :classes: btn-outline-info btn-block - --- - - **Key Concepts** - ^^^ - - Understand the key concepts behind Ray Data. - Learn what :ref:`Datastreams ` are and how they are executed in Ray - Data. - - +++ - .. link-button:: data_key_concepts - :type: ref - :text: Learn Key Concepts - :classes: btn-outline-info btn-block - --- - - **User Guides** - ^^^ - - Learn how to :ref:`load data `, :ref:`save - data `, :ref:`transform data `, - :ref:`access and exchange data `, or - :ref:`work with tensor data `. - - +++ - .. link-button:: data_user_guide - :type: ref - :text: Start Using Ray Data - :classes: btn-outline-info btn-block - --- - - **Examples** - ^^^ - - Find both simple and scaling-out examples of using Ray Data for data - processing and ML ingest. - - +++ - .. link-button:: data-recipes - :type: ref - :text: Ray Data Examples - :classes: btn-outline-info btn-block - --- - - **Ray Data FAQ** - ^^^ - - Find answers to commonly asked questions in our detailed FAQ. - - +++ - .. link-button:: data_faq - :type: ref - :text: Ray Data FAQ - :classes: btn-outline-info btn-block - --- - - **API** - ^^^ - - Get more in-depth information about the Ray Data API. - - +++ - .. link-button:: data-api - :type: ref - :text: Read the API Reference - :classes: btn-outline-info btn-block - --- - - **Other Data Processing Solutions** - ^^^ - - For running ETL pipelines, check out :ref:`Spark-on-Ray `. For scaling - up your data science workloads, check out :ref:`Dask-on-Ray `, - :ref:`Modin `, and :ref:`Mars-on-Ray `. - - +++ - .. link-button:: integrations - :type: ref - :text: Check Out Other Data Processing Options - :classes: btn-outline-info btn-block +.. grid:: 1 2 2 2 + :gutter: 1 + :class-container: container pb-6 + + .. grid-item-card:: + + **Getting Started** + ^^^ + + Start with our quick start tutorials for working with Data. + These concrete examples will give you an idea of how to use Ray Data. + + +++ + .. button-ref:: data_getting_started + :color: primary + :outline: + :expand: + + Get Started with Ray Data + + .. grid-item-card:: + + **Key Concepts** + ^^^ + + Understand the key concepts behind Ray Data. + Learn what :ref:`Datastreams ` are and how they are executed in Ray + Data. + + +++ + .. button-ref:: data_key_concepts + :color: primary + :outline: + :expand: + + Learn Key Concepts + + .. grid-item-card:: + + **User Guides** + ^^^ + + Learn how to :ref:`load data `, :ref:`save + data `, :ref:`transform data `, + :ref:`access and exchange data `, or + :ref:`work with tensor data `. + + +++ + .. button-ref:: data_user_guide + :color: primary + :outline: + :expand: + + Start Using Ray Data + + .. grid-item-card:: + + **Examples** + ^^^ + + Find both simple and scaling-out examples of using Ray Data for data + processing and ML ingest. + + +++ + .. button-ref:: data-recipes + :color: primary + :outline: + :expand: + + Ray Data Examples + + .. grid-item-card:: + + **Ray Data FAQ** + ^^^ + + Find answers to commonly asked questions in our detailed FAQ. + + +++ + .. button-ref:: data_faq + :color: primary + :outline: + :expand: + + Ray Data FAQ + + .. grid-item-card:: + + **API** + ^^^ + + Get more in-depth information about the Ray Data API. + + +++ + .. button-ref:: data-api + :color: primary + :outline: + :expand: + + Read the API Reference + + .. grid-item-card:: + + **Other Data Processing Solutions** + ^^^ + + For running ETL pipelines, check out :ref:`Spark-on-Ray `. For scaling + up your data science workloads, check out :ref:`Dask-on-Ray `, + :ref:`Modin `, and :ref:`Mars-on-Ray `. + + +++ + .. button-ref:: integrations + :color: primary + :outline: + :expand: + + Check Out Other Data Processing Options + ------------------------ Datasource Compatibility diff --git a/doc/source/rllib/index.rst b/doc/source/rllib/index.rst index 5fcbd85444bd..2b8c4cc80fae 100644 --- a/doc/source/rllib/index.rst +++ b/doc/source/rllib/index.rst @@ -171,42 +171,51 @@ click on the dropdowns below: Feature Overview ---------------- -.. panels:: - :container: text-center - :column: col-lg-4 px-2 py-2 - :card: - - **RLlib Key Concepts** - ^^^ - Learn more about the core concepts of RLlib, such as environments, algorithms and - policies. - +++ - .. link-button:: rllib-core-concepts - :type: ref - :text: Key Concepts - :classes: btn-outline-info btn-block - --- - - **RLlib Algorithms** - ^^^ - Check out the many available RL algorithms of RLlib for model-free and model-based - RL, on-policy and off-policy training, multi-agent RL, and more. - +++ - .. link-button:: rllib-algorithms-doc - :type: ref - :text: Algorithms - :classes: btn-outline-info btn-block - --- - - **RLlib Environments** - ^^^ - Get started with environments supported by RLlib, such as Farama foundation's Gymnasium, Petting Zoo, - and many custom formats for vectorized and multi-agent environments. - +++ - .. link-button:: rllib-environments-doc - :type: ref - :text: Environments - :classes: btn-outline-info btn-block +.. grid:: 1 2 3 3 + :gutter: 1 + :class-container: container pb-4 + + .. grid-item-card:: + + **RLlib Key Concepts** + ^^^ + Learn more about the core concepts of RLlib, such as environments, algorithms and + policies. + +++ + .. button-ref:: rllib-core-concepts + :color: primary + :outline: + :expand: + + Key Concepts + + .. grid-item-card:: + + **RLlib Algorithms** + ^^^ + Check out the many available RL algorithms of RLlib for model-free and model-based + RL, on-policy and off-policy training, multi-agent RL, and more. + +++ + .. button-ref:: rllib-algorithms-doc + :color: primary + :outline: + :expand: + + Algorithms + + .. grid-item-card:: + + **RLlib Environments** + ^^^ + Get started with environments supported by RLlib, such as Farama foundation's Gymnasium, Petting Zoo, + and many custom formats for vectorized and multi-agent environments. + +++ + .. button-ref:: rllib-environments-doc + :color: primary + :outline: + :expand: + + Environments The following is a summary of RLlib's most striking features. diff --git a/doc/source/rllib/user-guides.rst b/doc/source/rllib/user-guides.rst index fed9cc8352f8..4b325deb2f27 100644 --- a/doc/source/rllib/user-guides.rst +++ b/doc/source/rllib/user-guides.rst @@ -13,97 +13,86 @@ User Guides RLlib Feature Guides -------------------- -.. panels:: - :container: container pb-4 full-width - :column: col-md-4 px-2 py-2 - :img-top-cls: pt-5 w-75 d-block mx-auto - - --- - :img-top: /rllib/images/rllib-logo.svg - - +++ - .. link-button:: rllib-advanced-api-doc - :type: ref - :text: Advanced Feautures of the RLlib Python API - :classes: btn-link btn-block stretched-link - - --- - :img-top: /rllib/images/rllib-logo.svg - - +++ - .. link-button:: rllib-models - :type: ref - :text: Working With Models, Preprocessors and Action Distributions - :classes: btn-link btn-block stretched-link - - --- - :img-top: /rllib/images/rllib-logo.svg - - +++ - .. link-button:: rllib-saving-and-loading-algos-and-policies - :type: ref - :text: Checkpointing your Algorithms and Policies, and Exporting your NN Models - :classes: btn-link btn-block stretched-link - - --- - :img-top: /rllib/images/rllib-logo.svg - - +++ - .. link-button:: rllib-concepts - :type: ref - :text: How To Customize Your Policies? - :classes: btn-link btn-block stretched-link - - --- - :img-top: /rllib/images/rllib-logo.svg - - +++ - .. link-button:: rllib-sample-collection - :type: ref - :text: How To Use Sample Collections and Trajectory Views? - :classes: btn-link btn-block stretched-link - - --- - :img-top: /rllib/images/rllib-logo.svg - - +++ - .. link-button:: rllib-offline - :type: ref - :text: Working With Offline Data - :classes: btn-link btn-block stretched-link - - --- - :img-top: /rllib/images/rllib-logo.svg - - +++ - .. link-button:: rllib-replay-buffers - :type: ref - :text: Working with ReplayBuffers - :classes: btn-link btn-block stretched-link - - --- - :img-top: /rllib/images/rllib-logo.svg - - +++ - .. link-button:: rllib-dev - :type: ref - :text: How To Contribute To RLlib? - :classes: btn-link btn-block stretched-link - - --- - :img-top: /rllib/images/rllib-logo.svg - - +++ - .. link-button:: rllib-cli - :type: ref - :text: How To Work With the RLlib CLI? - :classes: btn-link btn-block stretched-link - - --- - :img-top: /rllib/images/rllib-logo.svg - - +++ - .. link-button:: rllib-catalogs - :type: ref - :text: How To Use the RLlib Catalogs - :classes: btn-link btn-block stretched-link +.. grid:: 1 2 3 4 + :gutter: 1 + :class-container: container pb-3 + + .. grid-item-card:: + :img-top: /rllib/images/rllib-logo.svg + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: rllib-advanced-api-doc + + Advanced Features of the RLlib Python API + + .. grid-item-card:: + :img-top: /rllib/images/rllib-logo.svg + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: rllib-models + + Working With Models, Preprocessors and Action Distributions + + .. grid-item-card:: + :img-top: /rllib/images/rllib-logo.svg + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: rllib-saving-and-loading-algos-and-policies + + Checkpointing your Algorithms and Policies, and Exporting your Models + + .. grid-item-card:: + :img-top: /rllib/images/rllib-logo.svg + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: rllib-concepts + + How To Customize Your Policies? + + .. grid-item-card:: + :img-top: /rllib/images/rllib-logo.svg + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: rllib-sample-collection + + How To Use Sample Collections and Trajectory Views? + + .. grid-item-card:: + :img-top: /rllib/images/rllib-logo.svg + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: rllib-offline + + Working With Offline Data + + .. grid-item-card:: + :img-top: /rllib/images/rllib-logo.svg + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: rllib-replay-buffers + + Working with ReplayBuffers + + .. grid-item-card:: + :img-top: /rllib/images/rllib-logo.svg + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: rllib-dev + + How To Contribute To RLlib? + + .. grid-item-card:: + :img-top: /rllib/images/rllib-logo.svg + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: rllib-cli + + How To Work With the RLlib CLI? + + .. grid-item-card:: + :img-top: /rllib/images/rllib-logo.svg + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: rllib-catalogs + + How To Use the RLlib Catalogs diff --git a/doc/source/serve/index.md b/doc/source/serve/index.md index ac86abc27c31..3c81829c6d03 100644 --- a/doc/source/serve/index.md +++ b/doc/source/serve/index.md @@ -19,7 +19,7 @@ (rayserve-overview)= Ray Serve is a scalable model serving library for building online inference APIs. -Serve is framework agnostic, so you can use a single toolkit to serve everything from deep learning models built with frameworks like PyTorch, Tensorflow, and Keras, to Scikit-Learn models, to arbitrary Python business logic. +Serve is framework-agnostic, so you can use a single toolkit to serve everything from deep learning models built with frameworks like PyTorch, Tensorflow, and Keras, to Scikit-Learn models, to arbitrary Python business logic. Serve is particularly well suited for [model composition](serve-model-composition), enabling you to build a complex inference service consisting of multiple ML models and business logic all in Python code. @@ -38,13 +38,18 @@ Define a simple "hello world" application, run it locally, and query it over HTT :language: python ``` -:::{tab-set} +::::{tab-set} + +:::{tab-item} More examples -::::::{tab-item} More examples For more examples, select from the tabs. -:::::: -:::{tab-set} +::: + +:::: + +::::{tab-set} + :::{tab-item} Model composition Use Serve's model composition API to combine multiple deployments into a single application. @@ -52,18 +57,20 @@ Use Serve's model composition API to combine multiple deployments into a single ```{literalinclude} doc_code/quickstart_composed.py :language: python ``` + ::: -::::::{tab-item} FastAPI integration +:::{tab-item} FastAPI integration Use Serve's [FastAPI](https://fastapi.tiangolo.com/) integration to elegantly handle HTTP parsing and validation. ```{literalinclude} doc_code/fastapi_example.py :language: python ``` -:::::: -::::::{tab-item} Hugging Face Transformers model +::: + +:::{tab-item} Hugging Face Transformers model To run this example, install the following: ``pip install transformers`` @@ -73,10 +80,11 @@ The model we'll use is a sentiment analysis model: it will take a text string as ```{literalinclude} doc_code/transformers_example.py :language: python ``` -:::::: ::: +:::: + ## Why choose Serve? :::{dropdown} Build end-to-end ML-powered applications @@ -158,7 +166,7 @@ Serve supports arbitrary Python code and therefore integrates well with the MLOp :::{dropdown} TFServing, TorchServe, ONNXRuntime :animate: fade-in-slide-down -Ray Serve is *framework agnostic*, so you can use it alongside any other Python framework or library. +Ray Serve is *framework-agnostic*, so you can use it alongside any other Python framework or library. We believe data scientists should not be bound to a particular machine learning framework. They should be empowered to use the best tool available for the job. @@ -214,83 +222,106 @@ or head over to the {doc}`tutorials/index` to get started building your Ray Serv ```{eval-rst} -.. panels:: - :container: text-center - :column: col-lg-6 px-2 py-2 - :card: - - **Getting Started** - ^^^ - - Start with our quick start tutorials for :ref:`deploying a single model locally ` and how to :ref:`convert an existing model into a Ray Serve deployment ` . - - +++ - .. link-button:: getting-started - :type: ref - :text: Get Started with Ray Serve - :classes: btn-outline-info btn-block - --- - - **Key Concepts** - ^^^ - - Understand the key concepts behind Ray Serve. - Learn about :ref:`Deployments `, :ref:`how to query them `, and the :ref:`Deployment Graph ` API for composing models into a graph structure. - - +++ - .. link-button:: serve-key-concepts - :type: ref - :text: Learn Key Concepts - :classes: btn-outline-info btn-block - --- - - **User Guides** - ^^^ - Learn best practices for common patterns like :ref:`scaling and resource allocation ` and :ref:`model composition `. - Learn how to :ref:`develop Serve applications locally ` and :ref:`go to production `. - - +++ - .. link-button:: serve-user-guides - :type: ref - :text: Start Using Ray Serve - :classes: btn-outline-info btn-block - --- - - **Examples** - ^^^ - - Follow the tutorials to learn how to integrate Ray Serve with :ref:`TensorFlow `, :ref:`Scikit-Learn `, and :ref:`RLlib `. - - +++ - .. link-button:: serve-examples - :type: ref - :text: Serve Examples - :classes: btn-outline-info btn-block - --- - - **API Reference** - ^^^ - - Get more in-depth information about the Ray Serve API. - - +++ - .. link-button:: serve-api - :type: ref - :text: Read the API Reference - :classes: btn-outline-info btn-block - - --- - - **Serve Architecture** - ^^^ - - Understand how each component in Ray Serve works. - - +++ - .. link-button:: serve-architecture - :type: ref - :text: Understand Serve Architecture - :classes: btn-outline-info btn-block +.. grid:: 1 2 2 2 + :gutter: 1 + :class-container: container pb-3 + + .. grid-item-card:: + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + **Getting Started** + ^^^ + + Start with our quick start tutorials for :ref:`deploying a single model locally ` and how to :ref:`convert an existing model into a Ray Serve deployment ` . + + +++ + .. button-ref:: getting-started + :color: primary + :outline: + :expand: + + Get Started with Ray Serve + + .. grid-item-card:: + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + **Key Concepts** + ^^^ + + Understand the key concepts behind Ray Serve. + Learn about :ref:`Deployments `, :ref:`how to query them `, and the :ref:`Deployment Graph ` API for composing models into a graph structure. + + +++ + .. button-ref:: serve-key-concepts + :color: primary + :outline: + :expand: + + Learn Key Concepts + + .. grid-item-card:: + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + **User Guides** + ^^^ + Learn best practices for common patterns like :ref:`scaling and resource allocation ` and :ref:`model composition `. + Learn how to :ref:`develop Serve applications locally ` and :ref:`go to production `. + + +++ + .. button-ref:: serve-user-guides + :color: primary + :outline: + :expand: + + Start Using Ray Serve + + .. grid-item-card:: + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + **Examples** + ^^^ + + Follow the tutorials to learn how to integrate Ray Serve with :ref:`TensorFlow `, :ref:`Scikit-Learn `, and :ref:`RLlib `. + + +++ + .. button-ref:: serve-examples + :color: primary + :outline: + :expand: + + Serve Examples + + .. grid-item-card:: + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + **API Reference** + ^^^ + + Get more in-depth information about the Ray Serve API. + + +++ + .. button-ref:: serve-api + :color: primary + :outline: + :expand: + + Read the API Reference + + .. grid-item-card:: + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + **Serve Architecture** + ^^^ + + Understand how each component in Ray Serve works. + + +++ + .. button-ref:: serve-architecture + :color: primary + :outline: + :expand: + + Understand Serve Architecture ``` For more, see the following blog posts about Ray Serve: diff --git a/doc/source/serve/model_composition.md b/doc/source/serve/model_composition.md index 93242ea66140..f7d9c542b936 100644 --- a/doc/source/serve/model_composition.md +++ b/doc/source/serve/model_composition.md @@ -331,28 +331,34 @@ You can render an illustration of your deployment graph to see its nodes and the Make sure you have `pydot` and `graphviz` to follow this section: -:::{tab-set} +::::{tab-set} + +:::{tab-item} MacOS -::::::{tab-item} MacOS ``` pip install -U pydot && brew install graphviz ``` -:::::: -::::{tab-item} Windows +::: + +:::{tab-item} Windows + ``` pip install -U pydot && winget install graphviz ``` -:::::: -::::::{tab-item} Linux +::: + +:::{tab-item} Linux + ``` pip install -U pydot && sudo apt-get install -y graphviz ``` -:::::: ::: +:::: + Here's an example graph: ```{literalinclude} doc_code/model_composition/deployment_graph_viz.py diff --git a/doc/source/serve/production-guide/fault-tolerance.md b/doc/source/serve/production-guide/fault-tolerance.md index 5058c40da5aa..ed28518240f8 100644 --- a/doc/source/serve/production-guide/fault-tolerance.md +++ b/doc/source/serve/production-guide/fault-tolerance.md @@ -60,7 +60,7 @@ See Serve's [Kubernetes production guide](serve-in-production-kubernetes) to lea In this section, you'll learn how to add fault tolerance to Ray's Global Control Store (GCS), which allows your Serve application to serve traffic even when the head node crashes. -By default the Ray head node is a single point of failure: if it crashes, the entire Ray cluster crashes and must be restarted. When running on Kubernetes, the `RayService` controller health-checks the Ray cluster and restarts it if this occurs, but this introduces some downtime. +By default, the Ray head node is a single point of failure: if it crashes, the entire Ray cluster crashes and must be restarted. When running on Kubernetes, the `RayService` controller health-checks the Ray cluster and restarts it if this occurs, but this introduces some downtime. In Ray 2.0, KubeRay added **experimental support** for [Global Control Store (GCS) fault tolerance](https://ray-project.github.io/kuberay/guidance/gcs-ft/#ray-gcs-fault-tolerancegcs-ft-experimental), preventing the Ray cluster from crashing if the head node goes down. While the head node is recovering, Serve applications can still handle traffic via worker nodes but cannot be updated or recover from other failures (e.g. actors or worker nodes crashing). @@ -149,9 +149,9 @@ After adding the Redis objects, you also need to modify the `RayService` configu First, you need to update your `RayService` metadata's annotations: -:::{tab-set} +::::{tab-set} -::::{tab-item} Vanilla Config +:::{tab-item} Vanilla Config ```yaml ... apiVersion: ray.io/v1alpha1 @@ -161,9 +161,9 @@ metadata: spec: ... ``` -:::: +::: -::::{tab-item} Fault Tolerant Config +:::{tab-item} Fault Tolerant Config :selected: ```yaml ... @@ -177,17 +177,20 @@ metadata: spec: ... ``` -:::: ::: +:::: + The annotations are: * `ray.io/ft-enabled` (REQUIRED): Enables GCS fault tolerance when true * `ray.io/external-storage-namespace` (OPTIONAL): Sets the [external storage namespace] Next, you need to add the `RAY_REDIS_ADDRESS` environment variable to the `headGroupSpec`: -:::{tab-set} -::::{tab-item} Vanilla Config +::::{tab-set} + +:::{tab-item} Vanilla Config + ```yaml apiVersion: ray.io/v1alpha1 kind: RayService @@ -205,10 +208,12 @@ spec: env: ... ``` -:::: -::::{tab-item} Fault Tolerant Config +::: + +:::{tab-item} Fault Tolerant Config :selected: + ```yaml apiVersion: ray.io/v1alpha1 kind: RayService @@ -228,9 +233,10 @@ spec: - name: RAY_REDIS_ADDRESS value: redis:6379 ``` -:::: ::: +:::: + `RAY_REDIS_ADDRESS`'s value should be your Redis database's `redis://` address. It should contain your Redis database's host and port. An [example Redis address](https://www.iana.org/assignments/uri-schemes/prov/rediss) is `redis://user:secret@localhost:6379/0?foo=bar&qux=baz`. In the example above, the Redis deployment name (`redis`) is the host within the Kubernetes cluster, and the Redis port is `6379`. The example is compatible with the previous section's [example config](one-node-redis-example). @@ -246,22 +252,24 @@ Check out the KubeRay guide on [GCS fault tolerance](https://ray-project.github. This section explains how Serve recovers from system failures. It uses the following Serve application and config as a working example. -:::{tab-set} -::::{tab-item} Python Code +::::{tab-set} + +:::{tab-item} Python Code ```{literalinclude} ../doc_code/fault_tolerance/sleepy_pid.py :start-after: __start__ :end-before: __end__ :language: python ``` -:::: +::: -::::{tab-item} Kubernetes Config +:::{tab-item} Kubernetes Config ```{literalinclude} ../doc_code/fault_tolerance/k8s_config.yaml :language: yaml ``` -:::: ::: +:::: + Follow the [KubeRay quickstart guide](kuberay-quickstart) to: * Install `kubectl` and `Helm` * Prepare a Kubernetes cluster diff --git a/doc/source/serve/tutorials/gradio-dag-visualization.md b/doc/source/serve/tutorials/gradio-dag-visualization.md index d417a2b4d2da..516581cfaba8 100644 --- a/doc/source/serve/tutorials/gradio-dag-visualization.md +++ b/doc/source/serve/tutorials/gradio-dag-visualization.md @@ -13,28 +13,34 @@ pip install gradio Additionally, you can optionally install `pydot` and `graphviz`. This will allow this tool to incorporate the complementary [graphical illustration](pydot-visualize-dag) of the nodes and edges. -:::{tab-set} +::::{tab-set} + +:::{tab-item} MacOS -::::{tab-item} MacOS ``` pip install -U pydot && brew install graphviz ``` -:::: -::::{tab-item} Windows +::: + +:::{tab-item} Windows + ``` pip install -U pydot && winget install graphviz ``` -:::: -::::{tab-item} Linux +::: + +:::{tab-item} Linux + ``` pip install -U pydot && sudo apt-get install -y graphviz ``` -:::: ::: +:::: + Also, for the [quickstart example](gradio-vis-quickstart), install the `transformers` module to pull models through [HuggingFace's Pipelines](https://huggingface.co/docs/transformers/main_classes/pipelines). ```console pip install transformers diff --git a/doc/source/serve/tutorials/serve-ml-models.md b/doc/source/serve/tutorials/serve-ml-models.md index 65c8144631ae..93432e46f6f8 100644 --- a/doc/source/serve/tutorials/serve-ml-models.md +++ b/doc/source/serve/tutorials/serve-ml-models.md @@ -7,7 +7,7 @@ In this guide, we will show you how to train models from various machine learnin Please see the [Key Concepts](serve-key-concepts) to learn more general information about Ray Serve. -:::{tab-set} +:::::{tab-set} ::::{tab-item} Keras and Tensorflow @@ -38,7 +38,7 @@ Next, let's train a simple MNIST model using Keras. :end-before: __doc_train_model_end__ ``` -Next, we define a class `TFMnistModel` that will accept HTTP requests and run the MNIST model that we trained. It is decorated with `@serve.deployment` to make it a deployment object so it can be deployed onto Ray Serve. Note that the Serve deployment is exposed over an HTTP route, and by default the `__call__` method is invoked when a request is sent to your deployment over HTTP. +Next, we define a class `TFMnistModel` that will accept HTTP requests and run the MNIST model that we trained. It is decorated with `@serve.deployment` to make it a deployment object, so it can be deployed onto Ray Serve. Note that the Serve deployment is exposed over an HTTP route, and by default the `__call__` method is invoked when a request is sent to your deployment over HTTP. ```{literalinclude} ../doc_code/tutorial_tensorflow.py :start-after: __doc_define_servable_begin__ @@ -193,7 +193,7 @@ In particular, we will show: - How to load the Scikit-Learn model from file system in your Ray Serve definition. - How to parse the JSON request and make a prediction. -Ray Serve is framework agnostic. You can use any version of sklearn. We will also need `requests` to send HTTP requests to your model deployment. If you haven't already, please install scikit-learn and requests by running: +Ray Serve is framework-agnostic. You can use any version of sklearn. We will also need `requests` to send HTTP requests to your model deployment. If you haven't already, please install scikit-learn and requests by running: ```console $ pip install scikit-learn requests @@ -282,6 +282,7 @@ You should get an output like the following (the exact prediction may vary): ```python {"result": "versicolor"} ``` + :::: -::: \ No newline at end of file +::::: diff --git a/doc/source/train/examples.rst b/doc/source/train/examples.rst index c1ad510ebaa5..121d9caf3a96 100644 --- a/doc/source/train/examples.rst +++ b/doc/source/train/examples.rst @@ -31,109 +31,98 @@ and use cases. You can filter these examples by the following categories: Distributed Training Examples using Ray Train --------------------------------------------- -.. panels:: - :container: container pb-4 full-width - :column: col-md-4 px-2 py-2 - :img-top-cls: pt-5 w-75 d-block mx-auto - - --- - :img-top: /images/pytorch_logo.png - - +++ - .. link-button:: torch_fashion_mnist_ex - :type: ref - :text: PyTorch Fashion MNIST Training Example - :classes: btn-link btn-block stretched-link trainTorchFashionMnist - - --- - :img-top: /images/hugging.png - - +++ - .. link-button:: train_transformers_example - :type: ref - :text: Transformers with PyTorch Training Example - :classes: btn-link btn-block stretched-link trainTransformers - - --- - :img-top: /images/tf_logo.png - - +++ - .. link-button:: tensorflow_mnist_example - :type: ref - :text: TensorFlow MNIST Training Example - :classes: btn-link btn-block stretched-link trainTensorflowMnist - - --- - :img-top: /images/horovod.png - - +++ - .. link-button:: horovod_example - :type: ref - :text: End-to-end Horovod Training Example - :classes: btn-link btn-block stretched-link trainHorovod - - --- - :img-top: /images/pytorch_lightning_small.png - - +++ - .. link-button:: lightning_mnist_example - :type: ref - :text: End-to-end PyTorch Lightning Training Example - :classes: btn-link btn-block stretched-link trainLightning - - --- - :img-top: /images/pytorch_lightning_small.png - - +++ - .. link-button:: lightning_advanced_example - :type: ref - :text: Use LightningTrainer with Ray Data and Batch Predictor - :classes: btn-link btn-block stretched-link trainLightning +.. grid:: 1 2 3 3 + :gutter: 1 + :class-container: container pb-4 + + .. grid-item-card:: + :img-top: /images/pytorch_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: torch_fashion_mnist_ex + + PyTorch Fashion MNIST Training Example + + .. grid-item-card:: + :img-top: images/hugging.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: train_transformers_example + + Transformers with PyTorch Training Example + + .. grid-item-card:: + :img-top: /images/tf_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: tensorflow_mnist_example + + TensorFlow MNIST Training Example + + .. grid-item-card:: + :img-top: /images/horovod.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: horovod_example + + End-to-end Horovod Training Example + + .. grid-item-card:: + :img-top: /images/pytorch_lightning_small.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: lightning_mnist_example + + End-to-end PyTorch Lightning Training Example + + .. grid-item-card:: + :img-top: /images/pytorch_lightning_small.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: lightning_advanced_example + + Use LightningTrainer with Ray Data and Batch Predictor Ray Train Examples Using Loggers & Callbacks -------------------------------------------- -.. panels:: - :container: container pb-4 full-width - :column: col-md-4 px-2 py-2 - :img-top-cls: pt-5 w-75 d-block mx-auto - --- - :img-top: /images/mlflow.png +.. grid:: 1 2 3 4 + :gutter: 1 + :class-container: container pb-3 - +++ - .. link-button:: train_mlflow_example - :type: ref - :text: Logging Training Runs with MLflow - :classes: btn-link btn-block stretched-link trainMlflow + .. grid-item-card:: + :img-top: /images/mlflow.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: train_mlflow_example + + Logging Training Runs with MLflow Ray Train & Tune Integration Examples ------------------------------------- -.. panels:: - :container: container pb-4 full-width - :column: col-md-4 px-2 py-2 - :img-top-cls: pt-5 w-75 d-block mx-auto +.. grid:: 1 2 3 4 + :gutter: 1 + :class-container: container pb-3 + + .. grid-item-card:: + :img-top: /images/tune.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - --- - :img-top: /images/tune.png + .. button-ref:: tune_train_tf_example - +++ - .. link-button:: tune_train_tf_example - :type: ref - :text: End-to-end Example for Tuning a TensorFlow Model - :classes: btn-link btn-block stretched-link trainTuneTensorflow + End-to-end Example for Tuning a TensorFlow Model - --- - :img-top: /images/tune.png + .. grid-item-card:: + :img-top: /images/tune.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - +++ - .. link-button:: tune_train_torch_example - :type: ref - :text: End-to-end Example for Tuning a PyTorch Model with PBT - :classes: btn-link btn-block stretched-link trainTunePyTorch + .. button-ref:: tune_train_torch_example + + End-to-end Example for Tuning a PyTorch Model with PBT .. TODO implement these examples! @@ -152,16 +141,15 @@ Ray Train & Tune Integration Examples Ray Train Benchmarks -------------------- -.. panels:: - :container: container pb-4 full-width - :column: col-md-4 px-2 py-2 - :img-top-cls: pt-5 w-75 d-block mx-auto - --- - :img-top: /ray-overview/images/ray_svg_logo.svg +.. grid:: 1 2 3 4 + :gutter: 1 + :class-container: container pb-3 + + .. grid-item-card:: + :img-top: /ray-overview/images/ray_svg_logo.svg + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: train_benchmark - +++ - .. link-button:: train_benchmark - :type: ref - :text: Benchmark example for the PyTorch data transfer auto pipeline - :classes: btn-link btn-block stretched-link trainBenchmark + Benchmark example for the PyTorch data transfer auto pipeline diff --git a/doc/source/train/user-guides.rst b/doc/source/train/user-guides.rst index 67d6cca88930..be20df04242a 100644 --- a/doc/source/train/user-guides.rst +++ b/doc/source/train/user-guides.rst @@ -3,45 +3,39 @@ Ray Train User Guides ===================== -.. panels:: - :container: container pb-4 full-width - :column: col-md-4 px-2 py-2 - :img-top-cls: pt-5 w-75 d-block mx-auto - - --- - :img-top: /ray-overview/images/ray_svg_logo.svg - - +++ - .. link-button:: config_guide - :type: ref - :text: Configurations User Guide - :classes: btn-link btn-block stretched-link - - --- - :img-top: /ray-overview/images/ray_svg_logo.svg - - +++ - .. link-button:: dl_guide - :type: ref - :text: Deep Learning User Guide - :classes: btn-link btn-block stretched-link - - - --- - :img-top: /ray-overview/images/ray_svg_logo.svg - - +++ - .. link-button:: gbdt - :type: ref - :text: XGBoost / LightGBM User Guide - :classes: btn-link btn-block stretched-link - - --- - :img-top: /ray-overview/images/ray_svg_logo.svg - - +++ - .. link-button:: architecture - :type: ref - :text: Ray Train Architecture - :classes: btn-link btn-block stretched-link +.. grid:: 1 2 3 4 + :gutter: 1 + :class-container: container pb-3 + + .. grid-item-card:: + :img-top: /ray-overview/images/ray_svg_logo.svg + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: config_guide + + Configurations User Guide + + .. grid-item-card:: + :img-top: /ray-overview/images/ray_svg_logo.svg + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: dl_guide + + Deep Learning User Guide + + .. grid-item-card:: + :img-top: /ray-overview/images/ray_svg_logo.svg + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: gbdt + + XGBoost / LightGBM User Guide + + .. grid-item-card:: + :img-top: /ray-overview/images/ray_svg_logo.svg + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: architecture + + Ray Train Architecture diff --git a/doc/source/tune/examples/hpo-frameworks.rst b/doc/source/tune/examples/hpo-frameworks.rst index 6541964d2b83..cd66e61d01b2 100644 --- a/doc/source/tune/examples/hpo-frameworks.rst +++ b/doc/source/tune/examples/hpo-frameworks.rst @@ -5,116 +5,102 @@ Tune integrates with a wide variety of hyperparameter optimization frameworks and their respective search algorithms. Here you can find detailed examples on each of our integrations: -.. panels:: - :container: container pb-4 - :column: col-md-4 px-2 py-2 - :img-top-cls: pt-5 w-75 d-block mx-auto - - --- - :img-top: ../images/ax.png - - +++ - .. link-button:: ax_example - :type: ref - :text: How To Use Tune With Ax - :classes: btn-link btn-block stretched-link - - --- - :img-top: ../images/dragonfly.png - - +++ - .. link-button:: dragonfly_example - :type: ref - :text: How To Use Tune With Dragonfly - :classes: btn-link btn-block stretched-link - - --- - :img-top: ../images/skopt.png - - +++ - .. link-button:: skopt_example - :type: ref - :text: How To Use Tune With Scikit-Optimize - :classes: btn-link btn-block stretched-link - - --- - :img-top: ../images/hyperopt.png - - +++ - .. link-button:: hyperopt_example - :type: ref - :text: How To Use Tune With HyperOpt - :classes: btn-link btn-block stretched-link - - --- - :img-top: ../images/bayesopt.png - - +++ - .. link-button:: bayesopt_example - :type: ref - :text: How To Use Tune With BayesOpt - :classes: btn-link btn-block stretched-link - - --- - :img-top: ../images/flaml.png - - +++ - .. link-button:: flaml_example - :type: ref - :text: How To Use Tune With BlendSearch and CFO - :classes: btn-link btn-block stretched-link - - --- - :img-top: ../images/bohb.png - - +++ - .. link-button:: bohb_example - :type: ref - :text: How To Use Tune With TuneBOHB - :classes: btn-link btn-block stretched-link - - --- - :img-top: ../images/nevergrad.png - - +++ - .. link-button:: nevergrad_example - :type: ref - :text: How To Use Tune With Nevergrad - :classes: btn-link btn-block stretched-link - - --- - :img-top: ../images/optuna.png - - +++ - .. link-button:: optuna_example - :type: ref - :text: How To Use Tune With Optuna - :classes: btn-link btn-block stretched-link - - --- - :img-top: ../images/zoopt.png - - +++ - .. link-button:: zoopt_example - :type: ref - :text: How To Use Tune With ZOOpt - :classes: btn-link btn-block stretched-link - - --- - :img-top: ../images/sigopt.png - - +++ - .. link-button:: sigopt_example - :type: ref - :text: How To Use Tune With SigOpt - :classes: btn-link btn-block stretched-link - - --- - :img-top: ../images/hebo.png - - +++ - .. link-button:: hebo_example - :type: ref - :text: How To Use Tune With HEBO - :classes: btn-link btn-block stretched-link +.. grid:: 1 2 3 4 + :gutter: 1 + :class-container: container pb-3 + .. grid-item-card:: + :img-top: ../images/ax.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: ax_example + + How To Use Tune With Ax + + .. grid-item-card:: + :img-top: ../images/dragonfly.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: dragonfly_example + + How To Use Tune With Dragonfly + + .. grid-item-card:: + :img-top: ../images/skopt.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: skopt_example + + How To Use Tune With Scikit-Optimize + + .. grid-item-card:: + :img-top: ../images/hyperopt.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: hyperopt_example + + How To Use Tune With HyperOpt + + .. grid-item-card:: + :img-top: ../images/bayesopt.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: bayesopt_example + + How To Use Tune With BayesOpt + + .. grid-item-card:: + :img-top: ../images/flaml.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: flaml_example + + How To Use Tune With BlendSearch and CFO + + .. grid-item-card:: + :img-top: ../images/bohb.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: bohb_example + + How To Use Tune With TuneBOHB + + .. grid-item-card:: + :img-top: ../images/nevergrad.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: nevergrad_example + + How To Use Tune With Nevergrad + + .. grid-item-card:: + :img-top: ../images/optuna.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: optuna_example + + How To Use Tune With Optuna + + .. grid-item-card:: + :img-top: ../images/zoopt.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: zoopt_example + + How To Use Tune With ZOOpt + + .. grid-item-card:: + :img-top: ../images/sigopt.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: sigopt_example + + How To Use Tune With SigOpt + + .. grid-item-card:: + :img-top: ../images/hebo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: hebo_example + + How To Use Tune With HEBO diff --git a/doc/source/tune/examples/ml-frameworks.rst b/doc/source/tune/examples/ml-frameworks.rst index a284be504916..441c8286693f 100644 --- a/doc/source/tune/examples/ml-frameworks.rst +++ b/doc/source/tune/examples/ml-frameworks.rst @@ -5,107 +5,94 @@ Ray Tune integrates with many popular machine learning frameworks. Here you find a few practical examples showing you how to tune your models. At the end of these guides you will often find links to even more examples. -.. panels:: - :container: container pb-4 - :column: col-md-4 px-2 py-2 - :img-top-cls: pt-5 w-75 d-block mx-auto - - --- - :img-top: /images/tune-sklearn.png - - +++ - .. link-button:: tune-sklearn - :type: ref - :text: How To Use Tune's Scikit-Learn Adapters? - :classes: btn-link btn-block stretched-link - - --- - :img-top: /images/keras.png - - +++ - .. link-button:: tune-mnist-keras - :type: ref - :text: How To Use Tune With Keras & TF Models - :classes: btn-link btn-block stretched-link - - --- - :img-top: /images/pytorch_logo.png - - +++ - .. link-button:: tune-pytorch-cifar-ref - :type: ref - :text: How To Use Tune With PyTorch Models - :classes: btn-link btn-block stretched-link - - --- - :img-top: /images/pytorch_lightning_small.png - - +++ - .. link-button:: tune-pytorch-lightning-ref - :type: ref - :text: How To Tune PyTorch Lightning Models - :classes: btn-link btn-block stretched-link - - --- - :img-top: /images/mxnet_logo.png - - +++ - .. link-button:: tune-mxnet-example - :type: ref - :text: How To Tune MXNet Models - :classes: btn-link btn-block stretched-link - - --- - :img-top: /images/serve.svg - - +++ - .. link-button:: tune-serve-integration-mnist - :type: ref - :text: Model Selection & Serving With Ray Serve - :classes: btn-link btn-block stretched-link - - --- - :img-top: /rllib/images/rllib-logo.png - - +++ - .. link-button:: tune-rllib-example - :type: ref - :text: Tuning RL Experiments With Ray Tune & Ray Serve - :classes: btn-link btn-block stretched-link - - --- - :img-top: /images/xgboost_logo.png - - +++ - .. link-button:: tune-xgboost-ref - :type: ref - :text: A Guide To Tuning XGBoost Parameters With Tune - :classes: btn-link btn-block stretched-link - - --- - :img-top: /images/lightgbm_logo.png - - +++ - .. link-button:: tune-lightgbm-example - :type: ref - :text: A Guide To Tuning LightGBM Parameters With Tune - :classes: btn-link btn-block stretched-link - - --- - :img-top: /images/horovod.png - - +++ - .. link-button:: tune-horovod-example - :type: ref - :text: A Guide To Tuning Horovod Parameters With Tune - :classes: btn-link btn-block stretched-link - - --- - :img-top: /images/hugging.png - - +++ - .. link-button:: tune-huggingface-example - :type: ref - :text: A Guide To Tuning Huggingface Transformers With Tune - :classes: btn-link btn-block stretched-link +.. grid:: 1 2 3 4 + :gutter: 1 + :class-container: container pb-3 + .. grid-item-card:: + :img-top: /images/tune-sklearn.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: tune-sklearn + + How To Use Tune's Scikit-Learn Adapters? + + .. grid-item-card:: + :img-top: /images/keras.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: tune-mnist-keras + + How To Use Tune With Keras & TF Models + + .. grid-item-card:: + :img-top: /images/pytorch_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: tune-pytorch-cifar-ref + + How To Use Tune With PyTorch Models + + .. grid-item-card:: + :img-top: /images/pytorch_lightning_small.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: tune-pytorch-lightning-ref + + How To Tune PyTorch Lightning Models + + .. grid-item-card:: + :img-top: /images/mxnet_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: tune-mxnet-example + + How To Tune MXNet Models + + .. grid-item-card:: + :img-top: /images/serve.svg + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: tune-serve-integration-mnist + + Model Selection & Serving With Ray Serve + + .. grid-item-card:: + :img-top: /rllib/images/rllib-logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: tune-rllib-example + + Tuning RL Experiments With Ray Tune & Ray Serve + + .. grid-item-card:: + :img-top: /images/xgboost_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: tune-xgboost-ref + + A Guide To Tuning XGBoost Parameters With Tune + + .. grid-item-card:: + :img-top: /images/lightgbm_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: tune-lightgbm-example + + A Guide To Tuning LightGBM Parameters With Tune + + .. grid-item-card:: + :img-top: /images/horovod.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: tune-horovod-example + + A Guide To Tuning Horovod Parameters With Tune + + .. grid-item-card:: + :img-top: /images/hugging.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: tune-huggingface-example + + A Guide To Tuning Huggingface Transformers With Tune diff --git a/doc/source/tune/index.rst b/doc/source/tune/index.rst index 59381e199ee8..74c5ea165615 100644 --- a/doc/source/tune/index.rst +++ b/doc/source/tune/index.rst @@ -95,87 +95,105 @@ And you can move your models from training to serving on the same infrastructure .. _`Ray Serve`: ../serve/index.html -.. panels:: - :container: text-center - :column: col-md-4 px-2 py-2 - :card: - - **Getting Started** - ^^^ - - In our getting started tutorial you will learn how to tune a PyTorch model - effectively with Tune. - - +++ - .. link-button:: tune-tutorial - :type: ref - :text: Get Started with Tune - :classes: btn-outline-info btn-block - --- - - **Key Concepts** - ^^^ - - Understand the key concepts behind Ray Tune. - Learn about tune runs, search algorithms, schedulers and other features. - - +++ - .. link-button:: tune-60-seconds - :type: ref - :text: Tune's Key Concepts - :classes: btn-outline-info btn-block - --- - - **User Guides** - ^^^ - - Our guides teach you about key features of Tune, - such as distributed training or early stopping. - - - +++ - .. link-button:: tune-guides - :type: ref - :text: Learn How To Use Tune - :classes: btn-outline-info btn-block - --- - - **Examples** - ^^^ - - In our examples you can find practical tutorials for using frameworks such as - scikit-learn, Keras, TensorFlow, PyTorch, and mlflow, and state of the art search algorithm integrations. - - +++ - .. link-button:: tune-examples-ref - :type: ref - :text: Ray Tune Examples - :classes: btn-outline-info btn-block - --- - - **Ray Tune FAQ** - ^^^ - - Find answers to commonly asked questions in our detailed FAQ. - - +++ - .. link-button:: tune-faq - :type: ref - :text: Ray Tune FAQ - :classes: btn-outline-info btn-block - --- - - **Ray Tune API** - ^^^ - - Get more in-depth information about the Ray Tune API, including all about search spaces, - algorithms and training configurations. - - +++ - .. link-button:: tune-api-ref - :type: ref - :text: Read the API Reference - :classes: btn-outline-info btn-block +.. grid:: 1 2 3 4 + :gutter: 1 + :class-container: container pb-3 + + .. grid-item-card:: + + **Getting Started** + ^^^ + + In our getting started tutorial you will learn how to tune a PyTorch model + effectively with Tune. + + +++ + .. button-ref:: tune-tutorial + :color: primary + :outline: + :expand: + + Get Started with Tune + + .. grid-item-card:: + + **Key Concepts** + ^^^ + + Understand the key concepts behind Ray Tune. + Learn about tune runs, search algorithms, schedulers and other features. + + +++ + .. button-ref:: tune-60-seconds + :color: primary + :outline: + :expand: + + Tune's Key Concepts + + .. grid-item-card:: + + **User Guides** + ^^^ + + Our guides teach you about key features of Tune, + such as distributed training or early stopping. + + + +++ + .. button-ref:: tune-guides + :color: primary + :outline: + :expand: + + Learn How To Use Tune + + .. grid-item-card:: + + **Examples** + ^^^ + + In our examples you can find practical tutorials for using frameworks such as + scikit-learn, Keras, TensorFlow, PyTorch, and mlflow, and state of the art search algorithm integrations. + + +++ + .. button-ref:: tune-examples-ref + :color: primary + :outline: + :expand: + + Ray Tune Examples + + .. grid-item-card:: + + **Ray Tune FAQ** + ^^^ + + Find answers to commonly asked questions in our detailed FAQ. + + +++ + .. button-ref:: tune-faq + :color: primary + :outline: + :expand: + + Ray Tune FAQ + + .. grid-item-card:: + + **Ray Tune API** + ^^^ + + Get more in-depth information about the Ray Tune API, including all about search spaces, + algorithms and training configurations. + + +++ + .. button-ref:: tune-api-ref + :color: primary + :outline: + :expand: + + Read the API Reference Why choose Tune? diff --git a/doc/source/tune/tutorials/overview.rst b/doc/source/tune/tutorials/overview.rst index ef3dc851fbf6..6936d0ca38dc 100644 --- a/doc/source/tune/tutorials/overview.rst +++ b/doc/source/tune/tutorials/overview.rst @@ -15,128 +15,127 @@ You can follow our :ref:`Tune Feature Guides `, but can als Tune Feature Guides ------------------- -.. panels:: - :container: container pb-4 - :column: col-md-4 px-2 py-2 - :img-top-cls: pt-5 w-50 d-block mx-auto - --- - :img-top: /images/tune.png - - .. link-button:: tune-run - :type: ref - :text: Running Basic Experiments - :classes: btn-link btn-block stretched-link +.. grid:: 1 2 3 4 + :gutter: 1 + :class-container: container pb-3 - --- - :img-top: /images/tune.png - - .. link-button:: tune-output - :type: ref - :text: Logging Tune Runs - :classes: btn-link btn-block stretched-link + .. grid-item-card:: + :img-top: /images/tune.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - --- - :img-top: /images/tune.png - - .. link-button:: tune-resources - :type: ref - :text: Setting Trial Resources - :classes: btn-link btn-block stretched-link + .. button-ref:: tune-run - --- - :img-top: /images/tune.png + Running Basic Experiments - .. link-button:: tune-search-space-tutorial - :type: ref - :text: Using Search Spaces - :classes: btn-link btn-block stretched-link + .. grid-item-card:: + :img-top: /images/tune.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - --- - :img-top: /images/tune.png + .. button-ref:: tune-output - .. link-button:: tune-stopping - :type: ref - :text: How to Define Stopping Criteria for a Ray Tune Experiment - :classes: btn-link btn-block stretched-link + Logging Tune Runs - --- - :img-top: /images/tune.png + .. grid-item-card:: + :img-top: /images/tune.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - .. link-button:: tune-trial-checkpoints - :type: ref - :text: How to Save and Load Trial Checkpoints - :classes: btn-link btn-block stretched-link + .. button-ref:: tune-resources - --- - :img-top: /images/tune.png + Setting Trial Resources - .. link-button:: tune-storage - :type: ref - :text: How to Configure Storage Options for a Distributed Tune Experiment - :classes: btn-link btn-block stretched-link + .. grid-item-card:: + :img-top: /images/tune.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - --- - :img-top: /images/tune.png + .. button-ref:: tune-search-space-tutorial - .. link-button:: tune-fault-tolerance - :type: ref - :text: How to Enable Fault Tolerance in Ray Tune - :classes: btn-link btn-block stretched-link + Using Search Spaces - --- - :img-top: /images/tune.png + .. grid-item-card:: + :img-top: /images/tune.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - .. link-button:: tune-metrics - :type: ref - :text: Using Callbacks and Metrics - :classes: btn-link btn-block stretched-link + .. button-ref:: tune-stopping - --- - :img-top: /images/tune.png + How to Define Stopping Criteria for a Ray Tune Experiment - .. link-button:: ../tutorials/tune_get_data_in_and_out - :type: ref - :text: Getting Data in and out of Tune - :classes: btn-link btn-block stretched-link + .. grid-item-card:: + :img-top: /images/tune.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - --- - :img-top: /images/tune.png + .. button-ref:: tune-trial-checkpoints - .. link-button:: ../examples/tune_analyze_results - :type: ref - :text: Analyzing Tune Experiment Results - :classes: btn-link btn-block stretched-link + How to Save and Load Trial Checkpoints - --- - :img-top: /images/tune.png + .. grid-item-card:: + :img-top: /images/tune.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - .. link-button:: ../examples/pbt_guide - :type: ref - :text: A Guide to Population-Based Training - :classes: btn-link btn-block stretched-link + .. button-ref:: tune-storage - --- - :img-top: /images/tune.png + How to Configure Storage Options for a Distributed Tune Experiment - .. link-button:: tune-distributed - :type: ref - :text: Deploying Tune in the Cloud - :classes: btn-link btn-block stretched-link + .. grid-item-card:: + :img-top: /images/tune.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - --- - :img-top: /images/tune.png + .. button-ref:: tune-fault-tolerance - .. link-button:: tune-lifecycle - :type: ref - :text: Tune Architecture - :classes: btn-link btn-block stretched-link + How to Enable Fault Tolerance in Ray Tune - --- - :img-top: /images/tune.png + .. grid-item-card:: + :img-top: /images/tune.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - .. link-button:: tune-scalability - :type: ref - :text: Scalability Benchmarks - :classes: btn-link btn-block stretched-link + .. button-ref:: tune-metrics + Using Callbacks and Metrics + + .. grid-item-card:: + :img-top: /images/tune.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: ../tutorials/tune_get_data_in_and_out + + Getting Data in and out of Tune + + .. grid-item-card:: + :img-top: /images/tune.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: ../examples/tune_analyze_results + + Analyzing Tune Experiment Results + + .. grid-item-card:: + :img-top: /images/tune.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: ../examples/pbt_guide + + A Guide to Population-Based Training + + .. grid-item-card:: + :img-top: /images/tune.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: tune-distributed + + Deploying Tune in the Cloud + + .. grid-item-card:: + :img-top: /images/tune.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: tune-lifecycle + + Tune Architecture + + .. grid-item-card:: + :img-top: /images/tune.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: tune-scalability + + Scalability Benchmarks From c54e2458f5d000a4ff3e18e45a45dbfa3744539e Mon Sep 17 00:00:00 2001 From: Philipp Moritz Date: Wed, 3 May 2023 23:38:14 -0700 Subject: [PATCH 225/424] Add debugging instructions for test_gcs_ha_e2e.py (#35016) Add some debugging instructions for the `test_gcs_ha_e2e.py` tests. --- python/ray/tests/test_gcs_ha_e2e.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/python/ray/tests/test_gcs_ha_e2e.py b/python/ray/tests/test_gcs_ha_e2e.py index 8b12e7a91e5d..bbd1b22eae72 100644 --- a/python/ray/tests/test_gcs_ha_e2e.py +++ b/python/ray/tests/test_gcs_ha_e2e.py @@ -8,6 +8,29 @@ from http.client import HTTPConnection +# If you need to debug these tests, comment in the volume +# mounts in the head node and worker node containers below and use +# the repro-ci.py script to spin up an instance. The test +# setup is a little intricate, as it uses docker-in-docker. +# You need to ssh into the host machine, find the +# docker-in-docker container with +# +# docker ps +# +# Log into the container with +# +# docker exec -it sh +# +# And run +# +# mkdir -p /tmp/ray +# chmod 777 /tmp/ray +# +# Now you can re-run the test and the logs will show +# up in /tmp/ray in the docker-in-docker container. +# Good luck! + + class Container(wrappers.Container): def ready(self): self._container.reload() @@ -68,6 +91,9 @@ def client(self): ports={ "8000/tcp": None, }, + # volumes={ + # "/tmp/ray/": {"bind": "/tmp/ray/", "mode": "rw"} + # }, ) worker_node = container( @@ -89,6 +115,9 @@ def client(self): ports={ "8000/tcp": None, }, + # volumes={ + # "/tmp/ray/": {"bind": "/tmp/ray/", "mode": "rw"} + # }, ) From ca6856481d3e0e4bf00bc2798d5b2aff7ba2113a Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Thu, 4 May 2023 11:17:23 +0100 Subject: [PATCH 226/424] [air/output] Print experiment information at experiment start (#34952) Includes #34788 This PR prints experiment information at the start of the experiment. Signed-off-by: Kai Fricke --- python/ray/tune/experimental/output.py | 59 +++++++++++++++++++++++++- python/ray/tune/tune.py | 15 +++++++ 2 files changed, 73 insertions(+), 1 deletion(-) diff --git a/python/ray/tune/experimental/output.py b/python/ray/tune/experimental/output.py index fe649658269d..111999778b16 100644 --- a/python/ray/tune/experimental/output.py +++ b/python/ray/tune/experimental/output.py @@ -1,3 +1,4 @@ +import sys from typing import List, Dict, Optional, Tuple, Any, TYPE_CHECKING, Collection import contextlib @@ -463,6 +464,26 @@ def __init__(self, verbosity: AirVerbosity): self._start_time = time.time() self._last_heartbeat_time = 0 + def experiment_started( + self, + experiment_name: str, + experiment_path: str, + searcher_str: str, + scheduler_str: str, + total_num_samples: int, + tensorboard_path: Optional[str] = None, + **kwargs, + ): + print(f"\nView detailed results here: {experiment_path}") + + if tensorboard_path: + print( + f"To visualize your results with TensorBoard, run: " + f"`tensorboard --logdir {tensorboard_path}`" + ) + + print("") + @property def _time_heartbeat_str(self): current_time_str, running_for_str = _get_time_str(self._start_time, time.time()) @@ -557,6 +578,42 @@ def _print_heartbeat(self, trials, *sys_args): class TuneTerminalReporter(TuneReporterBase): + def experiment_started( + self, + experiment_name: str, + experiment_path: str, + searcher_str: str, + scheduler_str: str, + total_num_samples: int, + tensorboard_path: Optional[str] = None, + **kwargs, + ): + if total_num_samples > sys.maxsize: + total_num_samples_str = "infinite" + else: + total_num_samples_str = str(total_num_samples) + + print( + tabulate( + [ + ["Search algorithm", searcher_str], + ["Scheduler", scheduler_str], + ["Number of trials", total_num_samples_str], + ], + headers=["Configuration for experiment", experiment_name], + tablefmt=AIR_TABULATE_TABLEFMT, + ) + ) + super().experiment_started( + experiment_name=experiment_name, + experiment_path=experiment_path, + searcher_str=searcher_str, + scheduler_str=scheduler_str, + total_num_samples=total_num_samples, + tensorboard_path=tensorboard_path, + **kwargs, + ) + def _print_heartbeat(self, trials, *sys_args): if self._verbosity < self._heartbeat_threshold: return @@ -575,7 +632,7 @@ def _print_heartbeat(self, trials, *sys_args): tabulate( all_infos, headers=header, - tablefmt="simple", + tablefmt=AIR_TABULATE_TABLEFMT, showindex=False, ) ) diff --git a/python/ray/tune/tune.py b/python/ray/tune/tune.py index 9feff6028e29..f7ba58d6e923 100644 --- a/python/ray/tune/tune.py +++ b/python/ray/tune/tune.py @@ -37,6 +37,7 @@ ) from ray.tune.impl.placeholder import create_resolvers_map, inject_placeholders +from ray.tune.logger import TBXLoggerCallback from ray.tune.progress_reporter import ( ProgressReporter, _detect_reporter, @@ -957,10 +958,24 @@ class and registered trainables. with contextlib.ExitStack() as stack: from ray.tune.experimental.output import TuneRichReporter + if any(isinstance(cb, TBXLoggerCallback) for cb in callbacks): + tensorboard_path = runner._local_experiment_path + else: + tensorboard_path = None + if air_progress_reporter and isinstance( air_progress_reporter, TuneRichReporter ): stack.enter_context(air_progress_reporter.with_live()) + elif air_progress_reporter: + air_progress_reporter.experiment_started( + experiment_name=runner._experiment_dir_name, + experiment_path=runner.experiment_path, + searcher_str=search_alg.__class__.__name__, + scheduler_str=scheduler.__class__.__name__, + total_num_samples=search_alg.total_samples, + tensorboard_path=tensorboard_path, + ) try: while ( From f1706c4ffc75b3c4d97dfdf40ee76bf3d86c24d2 Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Thu, 4 May 2023 12:51:39 +0100 Subject: [PATCH 227/424] Revert "[Core] Print out the address of the connected gcs" (#35036) Reverts ray-project/ray#35028 broke master --- src/ray/gcs/gcs_client/gcs_client.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/ray/gcs/gcs_client/gcs_client.cc b/src/ray/gcs/gcs_client/gcs_client.cc index 7d54c770da44..9e7d3504f882 100644 --- a/src/ray/gcs/gcs_client/gcs_client.cc +++ b/src/ray/gcs/gcs_client/gcs_client.cc @@ -128,8 +128,7 @@ Status GcsClient::Connect(instrumented_io_context &io_service) { internal_kv_accessor_ = std::make_unique(this); task_accessor_ = std::make_unique(this); - RAY_LOG(INFO) << "GcsClient connected " << options_.gcs_address_ << ":" - << options_.gcs_port_; + RAY_LOG(DEBUG) << "GcsClient connected."; return Status::OK(); } From c7a3e681736edea9014ad0e0b3aacb609f456cb2 Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Thu, 4 May 2023 13:51:18 +0100 Subject: [PATCH 228/424] [dependencies] Install dependencies in one go in CI (#34741) This PR updates the way we install our dependencies in the CI in one go. This resolve a number of incompatible dependencies and matches the process we employ in our docker images. Previously, in CI, we installed a number of dependencies _sequentially_. For instance, we would e.g. first install `requirements_test.txt`, then `requirements_tune.txt`, and then `requirements_train.txt`. In all cases, we would pass our `python/requirements.txt` as a constraints file. However, because of conflicting downstream dependencies, this can lead to an incompatible state. With this PR, we update the dependency installation to collect the files and packages that should be installed, and installs them in a single command. This ensures that common subdependencies are resolved at the same time, and conflicts are resolved (or errors are thrown). This is also the way we install dependencies in our Docker images. For this, we refactor parts of `install-dependencies.sh` and pin some dependencies/subdependencies that were incompatible. Please note that this does not ensure we end up with the same python libraries in docker - in Docker, we don't install `requirements_test.txt`, so there are less constraints on the dependencies. However, this PR can be used to e.g. use `pip-compile` to compile a list of dependency constraints, and use these in the docker image build to achieve the same dependencies. Signed-off-by: Kai Fricke --- .buildkite/pipeline.build.yml | 2 + ci/build/build-docker-images.py | 5 +- ci/env/env_info.sh | 8 + ci/env/install-dependencies.sh | 230 +++++++++++------- doc/requirements-doc.txt | 4 +- docker/ray-ml/Dockerfile | 12 +- .../data_processing/requirements.txt | 8 +- python/requirements/ml/requirements_dl.txt | 3 +- .../requirements/ml/requirements_no_deps.txt | 6 + python/requirements/ml/requirements_rllib.txt | 1 - python/requirements/ml/requirements_train.txt | 4 +- python/requirements/ml/requirements_tune.txt | 6 +- .../requirements/ml/requirements_upstream.txt | 2 +- python/requirements_test.txt | 27 +- 14 files changed, 208 insertions(+), 110 deletions(-) create mode 100644 python/requirements/ml/requirements_no_deps.txt diff --git a/.buildkite/pipeline.build.yml b/.buildkite/pipeline.build.yml index fb10e7c3523c..81debe8a17bf 100644 --- a/.buildkite/pipeline.build.yml +++ b/.buildkite/pipeline.build.yml @@ -493,6 +493,8 @@ - cleanup() { if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then ./ci/build/upload_build_info.sh; fi }; trap cleanup EXIT - pip uninstall -y ray - RAY_DEBUG_BUILD=debug ./ci/ci.sh build + # Install latest pyspark. We cannot move this to the requirements file as subdependencies conflict + - pip install -U https://ml-team-public-read.s3.us-west-2.amazonaws.com/spark-pkgs/pyspark-3.4.0.dev0-0cb0fa313979e1b82ddd711a05d8c4e78cf6c9f5.tar.gz - ./ci/env/env_info.sh - bazel test --config=ci-debug $(./ci/run/bazel_export_options) --test_env=RAY_ON_SPARK_BACKGROUND_JOB_STARTUP_WAIT=1 diff --git a/ci/build/build-docker-images.py b/ci/build/build-docker-images.py index fdd0d3979d48..6b5b35f45055 100644 --- a/ci/build/build-docker-images.py +++ b/ci/build/build-docker-images.py @@ -433,7 +433,9 @@ def build_or_pull_base_images( def prep_ray_ml(): root_dir = _get_root_dir() - requirements_files = ["python/requirements.txt"] + requirements_files = [ + "python/requirements.txt", + ] ml_requirements_files = [ "python/requirements/ml/requirements_ml_docker.txt", "python/requirements/ml/requirements_dl.txt", @@ -441,6 +443,7 @@ def prep_ray_ml(): "python/requirements/ml/requirements_rllib.txt", "python/requirements/ml/requirements_train.txt", "python/requirements/ml/requirements_upstream.txt", + "python/requirements/ml/requirements_no_deps.txt", ] # We don't need these in the ml docker image ignore_requirements = [ diff --git a/ci/env/env_info.sh b/ci/env/env_info.sh index f39376fcc548..e84666a524ef 100755 --- a/ci/env/env_info.sh +++ b/ci/env/env_info.sh @@ -8,6 +8,10 @@ echo "Installed pip packages:" python -m pip freeze 2>/dev/null || echo 'Pip not installed' echo "----------------------------" +if [ -n "${BUILDKITE-}" ] && [ -d "/artifact-mount" ]; then + python -m pip freeze > /artifact-mount/pip_freeze.txt +fi + echo "GPU information" echo "----------------------------" GPUCMD="nvidia-smi" @@ -17,5 +21,9 @@ then else eval "${GPUCMD}" python -c "import torch; print('Torch cuda available:', torch.cuda.is_available())" + + if [ -n "${BUILDKITE-}" ] && [ -d "/artifact-mount" ]; then + eval "${GPUCMD}" > /artifact-mount/nvidia_smi.txt + fi fi echo "----------------------------" diff --git a/ci/env/install-dependencies.sh b/ci/env/install-dependencies.sh index 92630a1e56e1..a6a19977e0e8 100755 --- a/ci/env/install-dependencies.sh +++ b/ci/env/install-dependencies.sh @@ -297,101 +297,84 @@ download_mnist() { unzip "${HOME}/data/mnist.zip" -d "${HOME}/data" } -install_pip_packages() { +retry_pip_install() { + local pip_command=$1 + local status="0" + local errmsg="" + + # Try n times; we often encounter OpenSSL.SSL.WantReadError (or others) + # that break the entire CI job: Simply retry installation in this case + # after n seconds. + for _ in {1..3}; do + errmsg=$(eval "${pip_command}" 2>&1) && break + status=$errmsg && echo "'pip install ...' failed, will retry after n seconds!" && sleep 30 + done + if [ "$status" != "0" ]; then + echo "${status}" && return 1 + fi +} +install_pip_packages() { # Install modules needed in all jobs. # shellcheck disable=SC2262 alias pip="python -m pip" - if [ "${MINIMAL_INSTALL-}" != 1 ]; then - # Some architectures will build dm-tree from source. - # Move bazelrc to a different location temporarily to disable --config=ci settings - mv "$HOME/.bazelrc" "$HOME/._brc" || true - pip install --no-clean dm-tree==0.1.5 # --no-clean is due to: https://github.com/deepmind/tree/issues/5 - mv "$HOME/._brc" "$HOME/.bazelrc" || true - fi - - if { [ -n "${PYTHON-}" ] || [ "${DL-}" = "1" ]; } && [ "${MINIMAL_INSTALL-}" != 1 ]; then - # Remove this entire section once Serve dependencies are fixed. - if { [ -z "${BUILDKITE-}" ] || [ "${DL-}" = "1" ]; } && [ "${DOC_TESTING-}" != 1 ] && [ "${TRAIN_TESTING-}" != 1 ] && [ "${TUNE_TESTING-}" != 1 ] && [ "${RLLIB_TESTING-}" != 1 ]; then - # We want to install the CPU version only. - pip install -U -c "${WORKSPACE_DIR}"/python/requirements.txt -r "${WORKSPACE_DIR}"/python/requirements/ml/requirements_dl.txt - fi - - # Try n times; we often encounter OpenSSL.SSL.WantReadError (or others) - # that break the entire CI job: Simply retry installation in this case - # after n seconds. - local status="0"; - local errmsg=""; - for _ in {1..3}; do - errmsg=$(CC=gcc pip install -Ur "${WORKSPACE_DIR}"/python/requirements.txt 2>&1) && break; - status=$errmsg && echo "'pip install ...' failed, will retry after n seconds!" && sleep 30; - done - if [ "$status" != "0" ]; then - echo "${status}" && return 1 - fi + # Array to hold all requirements files to install later + requirements_files=() + # Single packages to install in sync with files + requirements_packages=() + # Packages to install _after_ previous files have been installed + # (e.g. to install a custom pyarrow or torch version). This + # would otherwise conflict with pinned dependencies in our requirements + # files. + delayed_packages=() - # Repeat for requirements_test.txt - local status="0"; - local errmsg=""; - for _ in {1..3}; do - errmsg=$(CC=gcc pip install -U -c "${WORKSPACE_DIR}"/python/requirements.txt -r "${WORKSPACE_DIR}"/python/requirements_test.txt 2>&1) && break; - status=$errmsg && echo "'pip install ...' failed, will retry after n seconds!" && sleep 30; - done - if [ "$status" != "0" ]; then - echo "${status}" && return 1 - fi - - fi + requirements_files+=("${WORKSPACE_DIR}/python/requirements_test.txt") if [ "${LINT-}" = 1 ]; then install_linters - # readthedocs has an antiquated build env. - # This is a best effort to reproduce it locally to avoid doc build failures and hidden errors. - local python_version - python_version="$(python -s -c "import sys; print('%s.%s' % sys.version_info[:2])")" - if [ "${OSTYPE}" = msys ] && [ "${python_version}" = "3.8" ]; then - { echo "WARNING: Pillow binaries not available on Windows; cannot build docs"; } 2> /dev/null - else - pip install --use-deprecated=legacy-resolver -r "${WORKSPACE_DIR}"/doc/requirements-doc.txt - fi + + requirements_files+=("${WORKSPACE_DIR}/doc/requirements-doc.txt") fi # Additional default doc testing dependencies. if [ "${DOC_TESTING-}" = 1 ]; then - # For Ray Core and Ray Serve DAG visualization docs test - sudo apt-get install -y graphviz - pip install -U pydot # For DAG visualization - # For the dataset examples - sudo apt-get install -y tesseract-ocr - pip install -U pytesseract "spacy>=3" spacy_langdetect - python -m spacy download en_core_web_sm + # For Ray Core and Ray Serve DAG visualization docs test + dataset examples + sudo apt-get install -y graphviz tesseract-ocr + + # For DAG visualization + requirements_packages+=("pydot") + requirements_packages+=("pytesseract") + requirements_packages+=("spacy>=3") + requirements_packages+=("spacy_langdetect") fi # Additional RLlib test dependencies. if [ "${RLLIB_TESTING-}" = 1 ] || [ "${DOC_TESTING-}" = 1 ]; then - pip install -U -c "${WORKSPACE_DIR}"/python/requirements.txt -r "${WORKSPACE_DIR}"/python/requirements/ml/requirements_rllib.txt + requirements_files+=("${WORKSPACE_DIR}/python/requirements/ml/requirements_rllib.txt") #TODO(amogkam): Add this back to requirements_rllib.txt once mlagents no longer pins torch<1.9.0 version. pip install --no-dependencies mlagents==0.28.0 fi - SITE_PACKAGES=$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())') + # Some Ray Train dependencies have to be installed with --no-deps, + # as sub-dependencies conflict. The packages still work for our workflows. + # Todo(krfricke): Try to remove once we move to Python 3.8 in CI. + local install_ml_no_deps=0 # Additional Train test dependencies. if [ "${TRAIN_TESTING-}" = 1 ] || [ "${DOC_TESTING-}" = 1 ]; then - pip install -U -c "${WORKSPACE_DIR}"/python/requirements.txt -r "${WORKSPACE_DIR}"/python/requirements/ml/requirements_train.txt + requirements_files+=("${WORKSPACE_DIR}/python/requirements/ml/requirements_train.txt") + install_ml_no_deps=1 fi - # Additional Tune/Doc test dependencies. if [ "${TUNE_TESTING-}" = 1 ] || [ "${DOC_TESTING-}" = 1 ]; then - pip install -U -c "${WORKSPACE_DIR}"/python/requirements.txt -r "${WORKSPACE_DIR}"/python/requirements/ml/requirements_tune.txt - download_mnist + requirements_files+=("${WORKSPACE_DIR}/python/requirements/ml/requirements_tune.txt") fi # For Tune, install upstream dependencies. if [ "${TUNE_TESTING-}" = 1 ] || [ "${DOC_TESTING-}" = 1 ]; then - pip install -U -c "${WORKSPACE_DIR}"/python/requirements.txt -r "${WORKSPACE_DIR}"/python/requirements/ml/requirements_upstream.txt + requirements_files+=("${WORKSPACE_DIR}/python/requirements/ml/requirements_upstream.txt") fi # Additional dependency for Ludwig. @@ -399,51 +382,86 @@ install_pip_packages() { # dependencies with Modin. if [ "${INSTALL_LUDWIG-}" = 1 ]; then # TODO: eventually pin this to master. - pip install -U "ludwig[test]>=0.4" "jsonschema>=4" + requirements_packages+=("ludwig[test]>=0.4") + requirements_packages+=("jsonschema>=4") fi # Additional dependency for time series libraries. # This cannot be included in requirements_tune.txt as it has conflicting # dependencies. if [ "${INSTALL_TIMESERIES_LIBS-}" = 1 ]; then - pip install -U "statsforecast==1.5.0" "prophet==1.1.1" + requirements_packages+=("statsforecast==1.5.0") + requirements_packages+=("prophet==1.1.1") fi # Data processing test dependencies. if [ "${DATA_PROCESSING_TESTING-}" = 1 ] || [ "${DOC_TESTING-}" = 1 ]; then - pip install -U -c "${WORKSPACE_DIR}"/python/requirements.txt -r "${WORKSPACE_DIR}"/python/requirements/data_processing/requirements.txt + requirements_files+=("${WORKSPACE_DIR}/python/requirements/data_processing/requirements.txt") fi if [ "${DATA_PROCESSING_TESTING-}" = 1 ]; then - pip install -U -c "${WORKSPACE_DIR}"/python/requirements.txt -r "${WORKSPACE_DIR}"/python/requirements/data_processing/requirements_dataset.txt + requirements_files+=("${WORKSPACE_DIR}/python/requirements/data_processing/requirements_dataset.txt") if [ -n "${ARROW_VERSION-}" ]; then if [ "${ARROW_VERSION-}" = nightly ]; then - pip install --extra-index-url https://pypi.fury.io/arrow-nightlies/ --prefer-binary --pre pyarrow + delayed_packages+=("--extra-index-url") + delayed_packages+=("https://pypi.fury.io/arrow-nightlies/") + delayed_packages+=("--prefer-binary") + delayed_packages+=("--pre") + delayed_packages+=("pyarrow") else - pip install -U pyarrow=="${ARROW_VERSION}" + delayed_packages+=("pyarrow==${ARROW_VERSION}") fi fi if [ -n "${ARROW_MONGO_VERSION-}" ]; then - pip install -U pymongoarrow=="${ARROW_MONGO_VERSION}" + delayed_packages+=("pymongoarrow==${ARROW_MONGO_VERSION}") fi fi - # Remove this entire section once Serve dependencies are fixed. - if [ "${MINIMAL_INSTALL-}" != 1 ] && [ "${DOC_TESTING-}" != 1 ] && [ "${TRAIN_TESTING-}" != 1 ] && [ "${TUNE_TESTING-}" != 1 ] && [ "${RLLIB_TESTING-}" != 1 ]; then - # If CI has deemed that a different version of Torch - # should be installed, then upgrade/downgrade to that specific version. - if [ -n "${TORCH_VERSION-}" ]; then - case "${TORCH_VERSION-1.9.0}" in - 1.9.0) TORCHVISION_VERSION=0.10.0;; - 1.8.1) TORCHVISION_VERSION=0.9.1;; - 1.5) TORCHVISION_VERSION=0.6.0;; - *) TORCHVISION_VERSION=0.5.0;; - esac - pip install --use-deprecated=legacy-resolver --upgrade torch=="${TORCH_VERSION-1.9.0}" torchvision=="${TORCHVISION_VERSION}" - fi + if [ "${install_ml_no_deps}" = 1 ]; then + # Install these requirements first. Their dependencies may be overwritten later + # by the main install. + pip install -r "${WORKSPACE_DIR}/python/requirements/ml/requirements_no_deps.txt" + fi + + retry_pip_install "CC=gcc pip install -Ur ${WORKSPACE_DIR}/python/requirements.txt" + + # Install deeplearning libraries (Torch + TensorFlow) + if [ -n "${TORCH_VERSION-}" ] || [ "${DL-}" = "1" ] || [ "${RLLIB_TESTING-}" = 1 ] || [ "${TRAIN_TESTING-}" = 1 ] || [ "${TUNE_TESTING-}" = 1 ]; then + # If we require a custom torch version, use that + if [ -n "${TORCH_VERSION-}" ]; then + case "${TORCH_VERSION-1.9.0}" in + 1.9.0) TORCHVISION_VERSION=0.10.0;; + 1.8.1) TORCHVISION_VERSION=0.9.1;; + 1.6) TORCHVISION_VERSION=0.7.0;; + 1.5) TORCHVISION_VERSION=0.6.0;; + *) TORCHVISION_VERSION=0.5.0;; + esac + # Install right away, as some dependencies (e.g. torch-spline-conv) need + # torch to be installed for their own install. + pip install -U "torch==${TORCH_VERSION-1.9.0}" "torchvision==${TORCHVISION_VERSION}" + # We won't add requirements_dl.txt as it would otherwise overwrite our custom + # torch. Thus we have also have to install tensorflow manually. + TF_PACKAGE=$(grep "tensorflow==" "${WORKSPACE_DIR}/python/requirements/ml/requirements_dl.txt") + TFPROB_PACKAGE=$(grep "tensorflow-probability==" "${WORKSPACE_DIR}/python/requirements/ml/requirements_dl.txt") + + # %%;* deletes everything after ; to get rid of e.g. python version specifiers + pip install -U "${TF_PACKAGE%%;*}" "${TFPROB_PACKAGE%%;*}" + else + # Otherwise, use pinned default torch version. + # Again, install right away, as some dependencies (e.g. torch-spline-conv) need + # torch to be installed for their own install. + TORCH_PACKAGE=$(grep "torch==" "${WORKSPACE_DIR}/python/requirements/ml/requirements_dl.txt") + TORCHVISION_PACKAGE=$(grep "torchvision==" "${WORKSPACE_DIR}/python/requirements/ml/requirements_dl.txt") + + # %%;* deletes everything after ; to get rid of e.g. python version specifiers + pip install "${TORCH_PACKAGE%%;*}" "${TORCHVISION_PACKAGE%%;*}" + requirements_files+=("${WORKSPACE_DIR}/python/requirements/ml/requirements_dl.txt") + fi fi # Inject our own mirror for the CIFAR10 dataset if [ "${TRAIN_TESTING-}" = 1 ] || [ "${TUNE_TESTING-}" = 1 ] || [ "${DOC_TESTING-}" = 1 ]; then + SITE_PACKAGES=$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())') + TF_CIFAR="${SITE_PACKAGES}/tensorflow/python/keras/datasets/cifar10.py" TORCH_CIFAR="${SITE_PACKAGES}/torchvision/datasets/cifar.py" @@ -453,17 +471,46 @@ install_pip_packages() { "$TORCH_CIFAR" fi + # Generate the pip command with collected requirements files + pip_cmd="pip install -U -c ${WORKSPACE_DIR}/python/requirements.txt" + for file in "${requirements_files[@]}"; do + pip_cmd+=" -r ${file}" + done + + # Expand single requirements + if [ "${#requirements_packages[@]}" -gt 0 ]; then + pip_cmd+=" ${requirements_packages[*]}" + fi + + # Install + eval "${pip_cmd}" + + # Install delayed packages + if [ "${#delayed_packages[@]}" -gt 0 ]; then + pip install -U -c "${WORKSPACE_DIR}/python/requirements.txt" "${delayed_packages[@]}" + fi + # Additional Tune dependency for Horovod. # This must be run last (i.e., torch cannot be re-installed after this) if [ "${INSTALL_HOROVOD-}" = 1 ]; then "${SCRIPT_DIR}"/install-horovod.sh fi - # install hdfs if needed. - if [ "${INSTALL_HDFS-}" = 1 ]; then - "${SCRIPT_DIR}"/install-hdfs.sh + if [ "${TUNE_TESTING-}" = 1 ] || [ "${DOC_TESTING-}" = 1 ]; then + download_mnist + fi + + if [ "${DOC_TESTING-}" = 1 ]; then + # Todo: This downgrades spacy and related dependencies because + # `en_core_web_sm` is only compatible with spacy < 3.6. + # We should move to a model that does not depend on a stale version. + python -m spacy download en_core_web_sm fi +} +install_thirdparty_packages() { + # shellcheck disable=SC2262 + alias pip="python -m pip" CC=gcc pip install psutil setproctitle==1.2.2 colorama --target="${WORKSPACE_DIR}/python/ray/thirdparty_files" } @@ -490,7 +537,16 @@ install_dependencies() { fi fi - install_pip_packages + # install hdfs if needed. + if [ "${INSTALL_HDFS-}" = 1 ]; then + "${SCRIPT_DIR}"/install-hdfs.sh + fi + + if [ "${MINIMAL_INSTALL-}" != "1" ]; then + install_pip_packages + fi + + install_thirdparty_packages } install_dependencies "$@" diff --git a/doc/requirements-doc.txt b/doc/requirements-doc.txt index d475b3d9e606..3212c1ef2c9b 100644 --- a/doc/requirements-doc.txt +++ b/doc/requirements-doc.txt @@ -46,7 +46,7 @@ git+https://github.com/ray-project/lightgbm_ray@main#lightgbm_ray git+https://github.com/ray-project/ray_lightning@main#ray_lightning # Syntax highlighting -Pygments==2.11.2 +Pygments==2.13.0 # Sphinx sphinx==4.3.2 @@ -57,7 +57,7 @@ sphinx-jsonschema==1.17.2 sphinx-panels==0.6.0 sphinx-version-warning==1.1.2 sphinx-book-theme==0.3.3 -sphinx-external-toc==0.2.3 +sphinx-external-toc==0.2.4 sphinx-sitemap==2.2.0 sphinxcontrib-redoc==1.6.0 sphinx-tabs==3.4.0 diff --git a/docker/ray-ml/Dockerfile b/docker/ray-ml/Dockerfile index e1bbcd8e4243..72949a1a2374 100644 --- a/docker/ray-ml/Dockerfile +++ b/docker/ray-ml/Dockerfile @@ -4,6 +4,7 @@ ARG PYTHON_MINOR_VERSION=7 # We have to uninstall wrapt this way for Tensorflow compatibility COPY requirements.txt ./ +COPY requirements_no_deps.txt ./ COPY requirements_dl.txt ./ COPY requirements_ml_docker.txt ./ COPY requirements_rllib.txt ./ @@ -19,11 +20,15 @@ RUN sudo apt-get update \ libgl1-mesa-dev \ unzip \ unrar \ - && $HOME/anaconda3/bin/pip --no-cache-dir install -U pip \ - # First, install requirements + && $HOME/anaconda3/bin/pip --no-cache-dir install -U pip pip-tools \ + # Install no-deps requirements. Their dependencies may be overwritten + # in subsequent steps + && $HOME/anaconda3/bin/pip --no-cache-dir install -U \ + -r requirements_no_deps.txt \ + # Then, install requirements && $HOME/anaconda3/bin/pip --no-cache-dir install -U \ -r requirements.txt \ - # Then, keep requirements bounds as constraints and install remaining test dependencies + # Install other requirements. Keep pinned requirements bounds as constraints && $HOME/anaconda3/bin/pip --no-cache-dir install -U \ -c requirements.txt \ -r requirements_rllib.txt \ @@ -32,6 +37,7 @@ RUN sudo apt-get update \ -r requirements_upstream.txt \ # explicitly install (overwrite) pytorch with CUDA support && $HOME/anaconda3/bin/pip --no-cache-dir install -U \ + -c requirements.txt \ -r requirements_ml_docker.txt \ # Remove dataclasses & typing because they are included in Python > 3.6 && if [ $(python -c 'import sys; print(sys.version_info.minor)') != "6" ]; then \ diff --git a/python/requirements/data_processing/requirements.txt b/python/requirements/data_processing/requirements.txt index e33537d03db4..4ec222774ace 100644 --- a/python/requirements/data_processing/requirements.txt +++ b/python/requirements/data_processing/requirements.txt @@ -3,12 +3,12 @@ dask[complete]==2022.2.0; python_version < '3.8' dask[complete]==2022.10.1; python_version >= '3.8' -aioboto3==8.3.0 +aioboto3==11.0.1 crc32c==2.3 flask_cors -s3fs -modin>=0.8.3; python_version < '3.7' -modin>=0.11.0; python_version >= '3.7' +s3fs==2023.1.0 +modin==0.12.1; python_version < '3.8' +modin==0.18.1; python_version >= '3.8' pytest-repeat raydp>=0.0.dev0 responses==0.13.4 diff --git a/python/requirements/ml/requirements_dl.txt b/python/requirements/ml/requirements_dl.txt index 5549414a9f69..baab31deaaf9 100644 --- a/python/requirements/ml/requirements_dl.txt +++ b/python/requirements/ml/requirements_dl.txt @@ -5,7 +5,8 @@ tensorflow==2.11.0; sys_platform != 'darwin' or platform_machine != 'arm64' tensorflow-macos==2.11.0; sys_platform == 'darwin' and platform_machine == 'arm64' tensorflow-probability==0.19.0 -# If you make changes below this line, please also make the corresponding changes to `requirements_ml_docker.txt`! +# If you make changes below this line, please also make the corresponding changes to `requirements_ml_docker.txt` +# and to `install-dependencies.sh`! --extra-index-url https://download.pytorch.org/whl/cpu # for CPU versions of torch, torchvision --find-links https://data.pyg.org/whl/torch-1.13.0+cpu.html # for CPU versions of torch-scatter, torch-sparse, torch-cluster, torch-spline-conv diff --git a/python/requirements/ml/requirements_no_deps.txt b/python/requirements/ml/requirements_no_deps.txt new file mode 100644 index 000000000000..6d2754211cd2 --- /dev/null +++ b/python/requirements/ml/requirements_no_deps.txt @@ -0,0 +1,6 @@ +# These requirements have outdated or incompatible downstream dependencies. +# Thus we install them on a best effort basis before any other packages +# (without constraints), but their dependencies may be overwritten afterwards. + +# mosaicml requires importlib-metadata>5, but flake8 is not compatible with it +mosaicml==0.12.1 diff --git a/python/requirements/ml/requirements_rllib.txt b/python/requirements/ml/requirements_rllib.txt index c976b3f0889a..2d1b6da4b695 100644 --- a/python/requirements/ml/requirements_rllib.txt +++ b/python/requirements/ml/requirements_rllib.txt @@ -43,7 +43,6 @@ imageio-ffmpeg==0.4.5 onnx==1.12.0; sys_platform != 'darwin' or platform_machine != 'arm64' onnxruntime==1.14.1; sys_platform != 'darwin' or platform_machine != 'arm64' tf2onnx==1.13.0; sys_platform != 'darwin' or platform_machine != 'arm64' -typer==0.6.1 rich==12.0.1 # Msgpack checkpoint stuff. msgpack diff --git a/python/requirements/ml/requirements_train.txt b/python/requirements/ml/requirements_train.txt index cbec6de14581..5f999b3648f4 100644 --- a/python/requirements/ml/requirements_train.txt +++ b/python/requirements/ml/requirements_train.txt @@ -2,8 +2,8 @@ -r requirements_dl.txt -mosaicml==0.12.1 -mlflow==1.30.0 +mlflow==1.30.0; python_version <= '3.7' +mlflow==2.2.2; python_version > '3.7' tensorboardX==2.4.1 # Dependencies for Hugging Face examples & tests: diff --git a/python/requirements/ml/requirements_tune.txt b/python/requirements/ml/requirements_tune.txt index 44f4704454c8..f85c4d9299f5 100644 --- a/python/requirements/ml/requirements_tune.txt +++ b/python/requirements/ml/requirements_tune.txt @@ -11,7 +11,7 @@ dragonfly-opt==0.1.6 flaml==1.1.1 freezegun==1.1.0 # Requires decord which is unavailable for arm64 -gluoncv==0.10.1.post0; platform_machine != "arm64" +gluoncv==0.10.5.post0; platform_machine != "arm64" gpy==1.10.0 # Requires libtorrent which is unavailable for arm64 autorom[accept-rom-license]; platform_machine != "arm64" @@ -22,7 +22,8 @@ hyperopt==0.2.5 jupyterlab==3.6.1 lightgbm==3.3.5 matplotlib!=3.4.3 -mlflow==1.30.0 +mlflow==1.30.0; python_version <= '3.7' +mlflow==2.2.2; python_version > '3.7' # Unavailable for arm64 in more recent versions mxnet==1.8.0.post0; platform_machine != "arm64" nevergrad==0.4.3.post7 @@ -31,7 +32,6 @@ optuna==2.10.0 pymoo==0.5.0 pytest-remotedata==0.3.2 lightning-bolts==0.4.0 -protobuf==3.19.6 pytorch-lightning==1.6.5 fairscale==0.4.6 s3fs==2023.1.0 diff --git a/python/requirements/ml/requirements_upstream.txt b/python/requirements/ml/requirements_upstream.txt index a5da3cce16ed..7c0b3abbeaee 100644 --- a/python/requirements/ml/requirements_upstream.txt +++ b/python/requirements/ml/requirements_upstream.txt @@ -6,5 +6,5 @@ ray_lightning==0.3.0 tune-sklearn==0.4.4 xgboost_ray==0.1.15 lightgbm_ray==0.1.8 -modin==0.18.1; python_version >= '3.8' modin==0.12.1; python_version < '3.8' +modin==0.18.1; python_version >= '3.8' diff --git a/python/requirements_test.txt b/python/requirements_test.txt index be87fb41babb..2a6e27c02ece 100644 --- a/python/requirements_test.txt +++ b/python/requirements_test.txt @@ -12,7 +12,7 @@ azure-mgmt-network==19.0.0 azure-mgmt-resource==20.0.0 msrestazure==0.6.4 beautifulsoup4==4.11.1 -boto3==1.23.10 +boto3==1.24.59 # Todo: investigate if we can get rid of this and exchange for ray.cloudpickle cloudpickle==2.2.0 # Keep in sync with `ci/build/upload_build_info.sh` @@ -41,7 +41,7 @@ openpyxl==3.0.10 opentelemetry-api==1.1.0 opentelemetry-sdk==1.1.0 opentelemetry-exporter-otlp==1.1.0 -opentelemetry-exporter-opencensus +opentelemetry-exporter-opencensus==0.20b0 pexpect==4.8.0 Pillow==9.2.0; platform_system != "Windows" proxy.py==2.4.3 @@ -54,8 +54,9 @@ PyOpenSSL==22.1.0 pygame==2.1.2; python_version < '3.11' Pygments==2.13.0 pymongo==4.3.2 -# TODO: Replace this with pyspark==3.4 once it is released. -https://ml-team-public-read.s3.us-west-2.amazonaws.com/spark-pkgs/pyspark-3.4.0.dev0-0cb0fa313979e1b82ddd711a05d8c4e78cf6c9f5.tar.gz +# TODO: Upgrade to pyspark 3.4.0 once raydp supports it +# https://ml-team-public-read.s3.us-west-2.amazonaws.com/spark-pkgs/pyspark-3.4.0.dev0-0cb0fa313979e1b82ddd711a05d8c4e78cf6c9f5.tar.gz +pyspark==3.3.1 pytest==7.0.1 pytest-asyncio==0.16.0 pytest-rerunfailures==10.2 @@ -75,13 +76,20 @@ memray; platform_system != "Windows" and sys_platform != "darwin" # For doc tests myst-parser==0.15.2 myst-nb==0.13.1 +sphinx==4.3.2 jupytext==1.13.6 +jinja2==3.0.3 pytest-docker-tools==3.1.3 pytest-forked==1.4.0 # For dataset tests polars==0.14.21 +# ale-py requires 4.10.0. It's also compatible with flake8. +# It's not compatible with mosaicml though. Try to remove once +# we are at Python 3.8 +importlib-metadata==4.10.0 + # Some packages have downstream dependencies that we have to specify here to resolve conflicts. # Feel free to add (or remove!) packages here liberally. tensorboardX==2.4.1 @@ -89,7 +97,16 @@ starlette==0.17.1 h11==0.12.0 markdown-it-py==1.1.0 attrs==21.4.0 -importlib-metadata==4.13.0 +pytz==2022.7.1 +# Compatibility with spacy 3.5 (model en_core_web_sm) +typing-extensions==4.5.0 +networkx==2.6.3; python_version <= '3.7' +# Aim requires segment-analytics-python, which requires backoff~=2.10, +# which conflicts with the opentelemetry-api 1.1.0. +segment-analytics-python==2.2.0 +httpcore==0.15.0 +backoff==1.10 +sympy==1.10.1; python_version <= '3.7' # For test_basic.py::test_omp_threads_set threadpoolctl==3.1.0 From c2f4965b4691d5272e18d1f1c7936315f693473b Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Thu, 4 May 2023 15:52:18 +0100 Subject: [PATCH 229/424] [air/output] Use flat metrics in results report, use Trainable._progress_metrics (#35035) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We already have a `Trainable._progress_metrics` property that can be used to define default metrics to be displayed in the results table. With this PR, we port this functionality to the new output. For rllib, an example output can look like this: ``` Trial PPO_CartPole-v1_cdf47_00000 finished iteration 4 at 2023-05-04 11:03:06 (running for 00:00:21.57). ╭─────────────────────────────────────────────────────╮ │ Trial PPO_CartPole-v1_cdf47_00000 result │ ├─────────────────────────────────────────────────────┤ │ episodes_total 423 │ │ num_env_steps_sampled 28000 │ │ num_env_steps_trained 28000 │ │ sampler_results/episode_len_mean 153.15 │ │ sampler_results/episode_reward_mean 153.15 │ ╰─────────────────────────────────────────────────────╯ ``` Signed-off-by: Kai Fricke --- python/ray/tune/experimental/output.py | 44 +++++++++++---------- python/ray/tune/progress_reporter.py | 4 +- python/ray/tune/tests/output/test_output.py | 14 +++---- python/ray/tune/utils/callback.py | 8 ++-- rllib/algorithms/algorithm.py | 10 +++-- 5 files changed, 42 insertions(+), 38 deletions(-) diff --git a/python/ray/tune/experimental/output.py b/python/ray/tune/experimental/output.py index 111999778b16..1fd861b433d1 100644 --- a/python/ray/tune/experimental/output.py +++ b/python/ray/tune/experimental/output.py @@ -1,5 +1,5 @@ import sys -from typing import List, Dict, Optional, Tuple, Any, TYPE_CHECKING, Collection +from typing import Any, Collection, Dict, Iterable, List, Optional, Tuple, TYPE_CHECKING import contextlib import collections @@ -23,7 +23,7 @@ rich = None import ray -from ray._private.dict import unflattened_lookup +from ray._private.dict import unflattened_lookup, flatten_dict from ray._private.thirdparty.tabulate.tabulate import ( tabulate, TableFormat, @@ -361,36 +361,36 @@ def _best_trial_str( ) -def _render_table_item(key: str, item: Any, prefix: str = ""): +def _render_table_item( + key: str, item: Any, prefix: str = "" +) -> Iterable[Tuple[str, str]]: key = prefix + key if isinstance(item, float): # tabulate does not work well with mixed-type columns, so we format # numbers ourselves. yield key, f"{item:.5f}".rstrip("0") - elif isinstance(item, list): - yield key, None - for sv in item: - yield from _render_table_item("", sv, prefix=prefix + "-") - elif isinstance(item, Dict): - yield key, None - for sk, sv in item.items(): - yield from _render_table_item(str(sk), sv, prefix=prefix + "/") else: - yield key, item + yield key, _max_len(item, 20) def _get_dict_as_table_data( data: Dict, + include: Optional[Collection] = None, exclude: Optional[Collection] = None, upper_keys: Optional[Collection] = None, ): + include = include or set() exclude = exclude or set() upper_keys = upper_keys or set() upper = [] lower = [] - for key, value in sorted(data.items()): + flattened = flatten_dict(data) + + for key, value in sorted(flattened.items()): + if include and key not in include: + continue if key in exclude: continue @@ -424,11 +424,12 @@ def _get_dict_as_table_data( def _print_dict_as_table( data: Dict, header: Optional[str] = None, - exclude: Optional[Collection] = None, - division: Optional[Collection] = None, + include: Optional[Collection[str]] = None, + exclude: Optional[Collection[str]] = None, + division: Optional[Collection[str]] = None, ): table_data = _get_dict_as_table_data( - data=data, exclude=exclude, upper_keys=division + data=data, include=include, exclude=exclude, upper_keys=division ) headers = [header, ""] if header else [] @@ -758,9 +759,10 @@ def _print_heartbeat(self, trials, *args): class AirResultCallbackWrapper(Callback): # This is only to bypass the issue that by the time default callbacks # are added, there is no information on `num_samples` yet. - def __init__(self, verbosity): + def __init__(self, verbosity: AirVerbosity, metrics: Collection[str] = ()): self._verbosity = verbosity self._callback = None + self._metrics = metrics def setup( self, @@ -770,9 +772,9 @@ def setup( **info, ): self._callback = ( - TuneResultProgressCallback(self._verbosity) + TuneResultProgressCallback(self._verbosity, metrics=self._metrics) if total_num_samples > 1 - else TrainResultProgressCallback(self._verbosity) + else TrainResultProgressCallback(self._verbosity, metrics=self._metrics) ) # everything ELSE is just passing through.. @@ -794,10 +796,11 @@ class AirResultProgressCallback(Callback): _intermediate_result_verbosity = None _addressing_tmpl = None - def __init__(self, verbosity): + def __init__(self, verbosity: AirVerbosity, metrics: Collection[str] = ()): self._verbosity = verbosity self._start_time = time.time() self._trial_last_printed_results = {} + self._metrics = metrics def _print_result(self, trial, result: Optional[Dict] = None, force: bool = False): """Only print result if a different result has been reported, or force=True""" @@ -810,6 +813,7 @@ def _print_result(self, trial, result: Optional[Dict] = None, force: bool = Fals _print_dict_as_table( result, header=f"{self._addressing_tmpl.format(trial)} result", + include=self._metrics, exclude=BLACKLISTED_KEYS, division=AUTO_RESULT_KEYS, ) diff --git a/python/ray/tune/progress_reporter.py b/python/ray/tune/progress_reporter.py index 59fd63187cd9..d829793ce0b7 100644 --- a/python/ray/tune/progress_reporter.py +++ b/python/ray/tune/progress_reporter.py @@ -9,7 +9,7 @@ import textwrap import time import warnings -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from typing import Any, Callable, Collection, Dict, List, Optional, Tuple, Union import numpy as np import pandas as pd @@ -1517,7 +1517,7 @@ def _detect_reporter(**kwargs) -> TuneReporterBase: def _detect_progress_metrics( trainable: Optional[Union["Trainable", Callable]] -) -> Optional[List[str]]: +) -> Optional[Collection[str]]: """Detect progress metrics to report.""" if not trainable: return None diff --git a/python/ray/tune/tests/output/test_output.py b/python/ray/tune/tests/output/test_output.py index 7b45a8078446..dbff7c1da510 100644 --- a/python/ray/tune/tests/output/test_output.py +++ b/python/ray/tune/tests/output/test_output.py @@ -185,10 +185,8 @@ def test_result_table_no_divison(): ["c", 5], ["x", "19.12312"], ["y", 20], - ["z", None], - ["/m", 4], - ["/n", None], - ["//o", "p"], + ["z/m", 4], + ["z/n/o", "p"], ] @@ -204,16 +202,14 @@ def test_result_table_divison(): "z": {"m": 4, "n": {"o": "p"}}, }, exclude={"ignore"}, - upper_keys={"x", "y", "z"}, + upper_keys={"x", "y", "z", "z/m", "z/n/o"}, ) assert data == [ ["x", "19.12312"], ["y", 20], - ["z", None], - ["/m", 4], - ["/n", None], - ["//o", "p"], + ["z/m", 4], + ["z/n/o", "p"], ["a", 8], ["b", 6], ["c", 5], diff --git a/python/ray/tune/utils/callback.py b/python/ray/tune/utils/callback.py index 448084712652..00a049c048f3 100644 --- a/python/ray/tune/utils/callback.py +++ b/python/ray/tune/utils/callback.py @@ -1,6 +1,6 @@ import logging import os -from typing import List, Optional, Type, Union, TYPE_CHECKING +from typing import Collection, List, Optional, Type, Union, TYPE_CHECKING from ray.tune.callback import Callback, CallbackList @@ -45,7 +45,7 @@ def _create_default_callbacks( sync_config: SyncConfig, air_verbosity: Optional["AirVerbosity"] = None, metric: Optional[str] = None, - progress_metrics: Optional[List[str]] = None, + progress_metrics: Optional[Collection[str]] = None, ): """Create default callbacks for `Tuner.fit()`. @@ -93,7 +93,9 @@ def _create_default_callbacks( if air_verbosity is not None: # new flow from ray.tune.experimental.output import AirResultCallbackWrapper - callbacks.append(AirResultCallbackWrapper(air_verbosity)) + callbacks.append( + AirResultCallbackWrapper(air_verbosity, metrics=progress_metrics) + ) elif not has_trial_progress_callback: # old flow trial_progress_callback = TrialProgressCallback( metric=metric, progress_metrics=progress_metrics diff --git a/rllib/algorithms/algorithm.py b/rllib/algorithms/algorithm.py index 7473054b1d82..d1c12aca3467 100644 --- a/rllib/algorithms/algorithm.py +++ b/rllib/algorithms/algorithm.py @@ -210,12 +210,14 @@ class Algorithm(Trainable): # List of keys that are always fully overridden if present in any dict or sub-dict _override_all_key_list = ["off_policy_estimation_methods", "policies"] - _progress_metrics = [ - "sampler_results/episode_reward_mean", - "evaluation/sampler_results/episode_reward_mean", + _progress_metrics = ( "num_env_steps_sampled", "num_env_steps_trained", - ] + "episodes_total", + "sampler_results/episode_len_mean", + "sampler_results/episode_reward_mean", + "evaluation/sampler_results/episode_reward_mean", + ) @staticmethod def from_checkpoint( From 38e0a6c6d9483f22526f318e75828a4b7c597be6 Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Thu, 4 May 2023 17:51:38 +0100 Subject: [PATCH 230/424] [tune] Set config on trial restore (#35000) This PR fixes a bug in the Ray Tune trial restoration that comes up when a trial is restored, but not continued in training. In that case, an empty `config` dict is not restored. It also fixes an oversight in trial restoration when updating the last trial result: The metrics are stored flattened in the checkpoint, but should be unflattened for `trial.last_result`. Signed-off-by: Kai Fricke --- python/ray/tune/experiment/trial.py | 8 ++++++-- python/ray/tune/tests/test_tune_restore.py | 22 ++++++++++++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/python/ray/tune/experiment/trial.py b/python/ray/tune/experiment/trial.py index 453aa9b06ab9..c67bb045be42 100644 --- a/python/ray/tune/experiment/trial.py +++ b/python/ray/tune/experiment/trial.py @@ -16,6 +16,7 @@ import uuid import ray +from ray._private.dict import unflatten_dict from ray.air import CheckpointConfig from ray.air._internal.uri_utils import URI from ray.air._internal.checkpoint_manager import _TrackedCheckpoint, CheckpointStorage @@ -626,6 +627,7 @@ def last_result(self) -> dict: @last_result.setter def last_result(self, val: dict): self._last_result = val + self.invalidate_json_state() def get_runner_ip(self) -> Optional[str]: if self.location.hostname: @@ -987,7 +989,8 @@ def on_checkpoint(self, checkpoint: _TrackedCheckpoint): def on_restore(self): """Handles restoration completion.""" assert self.is_restoring - self.last_result = self.restoring_from.metrics + self.last_result = unflatten_dict(self.restoring_from.metrics) + self.last_result.setdefault("config", self.config) self.restoring_from = None self.num_restore_failures = 0 self.invalidate_json_state() @@ -1058,7 +1061,8 @@ def update_last_result(self, result): self.metric_analysis[metric][key] = sum( self.metric_n_steps[metric][str(n)] ) / len(self.metric_n_steps[metric][str(n)]) - self.invalidate_json_state() + + # json state is invalidated in last_result.setter def get_trainable_cls(self): if self.stub: diff --git a/python/ray/tune/tests/test_tune_restore.py b/python/ray/tune/tests/test_tune_restore.py index e97162db3f4c..96295af8dc93 100644 --- a/python/ray/tune/tests/test_tune_restore.py +++ b/python/ray/tune/tests/test_tune_restore.py @@ -17,6 +17,7 @@ import ray from ray import tune from ray._private.test_utils import recursive_fnmatch, run_string_as_driver +from ray.air._internal.checkpoint_manager import _TrackedCheckpoint, CheckpointStorage from ray.exceptions import RayTaskError from ray.rllib import _register_all from ray.tune import TuneError @@ -643,6 +644,27 @@ def training_func(config): tune.run(training_func) +@pytest.mark.parametrize( + "trial_config", [{}, {"attr": 4}, {"nested": {"key": "value"}}] +) +def test_trial_last_result_restore(trial_config): + metrics = {"metric1": 4, "nested2": {"metric3": 6}} + metrics["config"] = trial_config + + trial = Trial(trainable_name="stub", config=trial_config, stub=True) + trial.update_last_result(metrics) + + checkpoint = _TrackedCheckpoint( + dir_or_data="no_data", + storage_mode=CheckpointStorage.PERSISTENT, + metrics=metrics, + ) + + trial.restoring_from = checkpoint + trial.on_restore() + assert trial.last_result == metrics + + def test_stacktrace(): """Test proper stacktrace is printed for RayTaskError.""" CMD = """ From 93c314a393c559295fc8265d8002659b754cd3f4 Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Thu, 4 May 2023 17:56:39 +0100 Subject: [PATCH 231/424] [air/output] Improve passed time display (#34951) This changes the formatting of the total running time like this: ``` Trial easy_objective_b12ae_00003 completed training after 5 iterations at 2023-05-02 14:46:11. Total running time: 4d 3hr 20min, 2s Trial easy_objective_d3d85_00008 completed training after 5 iterations at 2023-05-02 14:47:09. Total running time: 4d 0hr 20min 2s Trial easy_objective_cbd93_00007 finished iteration 5 at 2023-05-02 14:46:56. Total running time: 20min 2s Trial easy_objective_14e6b_00001 completed training after 5 iterations at 2023-05-02 14:48:58. Total running time: 1hr 0min 2s ``` It does not turn metrics (e.g. `time_this_iter` or `total_time_s`) into written sentences, as this will make it harder to identify these metrics for configuration purposes. Signed-off-by: Kai Fricke --- python/ray/tune/experimental/output.py | 37 +++++++++++++-------- python/ray/tune/tests/output/test_output.py | 25 ++++++++++++-- 2 files changed, 45 insertions(+), 17 deletions(-) diff --git a/python/ray/tune/experimental/output.py b/python/ray/tune/experimental/output.py index 1fd861b433d1..0cdbd152a766 100644 --- a/python/ray/tune/experimental/output.py +++ b/python/ray/tune/experimental/output.py @@ -111,22 +111,27 @@ def _get_time_str(start_time: float, current_time: float) -> Tuple[str, str]: delta: datetime.timedelta = current_time_dt - start_time_dt rest = delta.total_seconds() - days = rest // (60 * 60 * 24) + days = int(rest // (60 * 60 * 24)) rest -= days * (60 * 60 * 24) - hours = rest // (60 * 60) + hours = int(rest // (60 * 60)) rest -= hours * (60 * 60) - minutes = rest // 60 + minutes = int(rest // 60) - seconds = rest - minutes * 60 + seconds = int(rest - minutes * 60) + running_for_str = "" if days > 0: - running_for_str = f"{days:.0f} days, " - else: - running_for_str = "" + running_for_str += f"{days:d}d " + + if hours > 0 or running_for_str: + running_for_str += f"{hours:d}hr " + + if minutes > 0 or running_for_str: + running_for_str += f"{minutes:d}min " - running_for_str += f"{hours:02.0f}:{minutes:02.0f}:{seconds:05.2f}" + running_for_str += f"{seconds:d}s" return f"{current_time_dt:%Y-%m-%d %H:%M:%S}", running_for_str @@ -487,8 +492,12 @@ def experiment_started( @property def _time_heartbeat_str(self): - current_time_str, running_for_str = _get_time_str(self._start_time, time.time()) - return f"Current time: {current_time_str} " f"(running for {running_for_str})" + current_time_str, running_time_str = _get_time_str( + self._start_time, time.time() + ) + return ( + f"Current time: {current_time_str}. Total running time: " + running_time_str + ) def print_heartbeat(self, trials, *args, force: bool = False): if self._verbosity < self._heartbeat_threshold: @@ -834,11 +843,11 @@ def on_trial_result( ): if self._verbosity < self._intermediate_result_verbosity: return - curr_time, running_time = _get_time_str(self._start_time, time.time()) + curr_time_str, running_time_str = _get_time_str(self._start_time, time.time()) print( f"{self._addressing_tmpl.format(trial)} " f"finished iteration {result[TRAINING_ITERATION]} " - f"at {curr_time} (running for {running_time})." + f"at {curr_time_str}. Total running time: " + running_time_str ) self._print_result(trial, result) @@ -847,14 +856,14 @@ def on_trial_complete( ): if self._verbosity < self._start_end_verbosity: return - curr_time, running_time = _get_time_str(self._start_time, time.time()) + curr_time_str, running_time_str = _get_time_str(self._start_time, time.time()) finished_iter = 0 if trial.last_result and TRAINING_ITERATION in trial.last_result: finished_iter = trial.last_result[TRAINING_ITERATION] print( f"{self._addressing_tmpl.format(trial)} " f"completed training after {finished_iter} iterations " - f"at {curr_time} (running for {running_time})." + f"at {curr_time_str}. Total running time: " + running_time_str ) self._print_result(trial) diff --git a/python/ray/tune/tests/output/test_output.py b/python/ray/tune/tests/output/test_output.py index dbff7c1da510..30bd73ed16c5 100644 --- a/python/ray/tune/tests/output/test_output.py +++ b/python/ray/tune/tests/output/test_output.py @@ -1,7 +1,6 @@ import pytest import sys -import time from freezegun import freeze_time from ray.tune.experimental.output import ( @@ -55,8 +54,28 @@ @freeze_time("Mar 27th, 2023", auto_tick_seconds=15) def test_get_time_str(): - result = _get_time_str(time.time(), time.time()) - assert result == ("2023-03-27 00:00:15", "00:00:15.00") + base = 1679875200 # 2023-03-27 00:00:00 + + assert _get_time_str(base, base) == ("2023-03-27 00:00:00", "0s") + assert _get_time_str(base, base + 15) == ("2023-03-27 00:00:15", "15s") + assert _get_time_str(base, base + 60) == ("2023-03-27 00:01:00", "1min 0s") + assert _get_time_str(base, base + 65) == ("2023-03-27 00:01:05", "1min 5s") + assert _get_time_str(base, base + 3600) == ( + "2023-03-27 01:00:00", + "1hr 0min 0s", + ) + assert _get_time_str(base, base + 3605) == ( + "2023-03-27 01:00:05", + "1hr 0min 5s", + ) + assert _get_time_str(base, base + 3660) == ( + "2023-03-27 01:01:00", + "1hr 1min 0s", + ) + assert _get_time_str(base, base + 86400) == ( + "2023-03-28 00:00:00", + "1d 0hr 0min 0s", + ) def test_get_trials_by_state(): From 6761dbe0a446724bb954cb8cafcc7cd59e898062 Mon Sep 17 00:00:00 2001 From: Yunxuan Xiao Date: Thu, 4 May 2023 11:41:39 -0700 Subject: [PATCH 232/424] Unpinning the "packaging" library with specific python versions in requirements.txt (#34807) --- dashboard/http_server_agent.py | 5 +---- dashboard/http_server_head.py | 5 +---- python/ray/autoscaler/_private/aws/config.py | 4 ++-- python/ray/data/tests/test_csv.py | 4 ++-- .../ray/train/huggingface/accelerate/_accelerate_utils.py | 8 +------- .../train/huggingface/accelerate/accelerate_trainer.py | 5 +---- python/ray/train/huggingface/huggingface_trainer.py | 6 +----- python/ray/train/torch/train_loop_utils.py | 4 ++-- python/ray/widgets/util.py | 8 ++------ python/requirements.txt | 2 +- python/setup.py | 2 +- rllib/examples/export/onnx_torch.py | 6 +----- rllib/utils/debug/deterministic.py | 5 +---- 13 files changed, 17 insertions(+), 47 deletions(-) diff --git a/dashboard/http_server_agent.py b/dashboard/http_server_agent.py index 590da4cab2a2..9547d899a7ee 100644 --- a/dashboard/http_server_agent.py +++ b/dashboard/http_server_agent.py @@ -1,10 +1,7 @@ import logging from ray._private.utils import get_or_create_event_loop -try: - from packaging.version import Version -except ImportError: - from distutils.version import LooseVersion as Version +from packaging.version import Version import ray.dashboard.optional_utils as dashboard_optional_utils diff --git a/dashboard/http_server_head.py b/dashboard/http_server_head.py index 8583ff57aed5..d66de2d68746 100644 --- a/dashboard/http_server_head.py +++ b/dashboard/http_server_head.py @@ -9,10 +9,7 @@ from ray._private.utils import get_or_create_event_loop from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag -try: - from packaging.version import Version -except ImportError: - from distutils.version import LooseVersion as Version +from packaging.version import Version import ray.dashboard.optional_utils as dashboard_optional_utils import ray.dashboard.utils as dashboard_utils diff --git a/python/ray/autoscaler/_private/aws/config.py b/python/ray/autoscaler/_private/aws/config.py index 76a03b99daa8..3390ea11af0c 100644 --- a/python/ray/autoscaler/_private/aws/config.py +++ b/python/ray/autoscaler/_private/aws/config.py @@ -4,12 +4,12 @@ import logging import os import time -from distutils.version import StrictVersion from functools import lru_cache, partial from typing import Any, Dict, List, Optional, Set, Tuple import boto3 import botocore +from packaging.version import Version from ray.autoscaler._private.aws.cloudwatch.cloudwatch_helper import ( CloudwatchHelper as cwh, @@ -58,7 +58,7 @@ # todo: cli_logger should handle this assert properly # this should probably also happens somewhere else -assert StrictVersion(boto3.__version__) >= StrictVersion( +assert Version(boto3.__version__) >= Version( "1.4.8" ), "Boto3 version >= 1.4.8 required, try `pip install -U boto3`" diff --git a/python/ray/data/tests/test_csv.py b/python/ray/data/tests/test_csv.py index 69c571630d92..9c9852a50315 100644 --- a/python/ray/data/tests/test_csv.py +++ b/python/ray/data/tests/test_csv.py @@ -2,7 +2,7 @@ import os import shutil from functools import partial -from distutils.version import LooseVersion +from packaging.version import Version import pandas as pd import pyarrow as pa @@ -895,7 +895,7 @@ def test_csv_read_filter_non_csv_file(shutdown_only, tmp_path): @pytest.mark.skipif( - LooseVersion(pa.__version__) < LooseVersion("7.0.0"), + Version(pa.__version__) < Version("7.0.0"), reason="invalid_row_handler was added in pyarrow 7.0.0", ) def test_csv_invalid_file_handler(shutdown_only, tmp_path): diff --git a/python/ray/train/huggingface/accelerate/_accelerate_utils.py b/python/ray/train/huggingface/accelerate/_accelerate_utils.py index 64a362bc3867..497878df2d78 100644 --- a/python/ray/train/huggingface/accelerate/_accelerate_utils.py +++ b/python/ray/train/huggingface/accelerate/_accelerate_utils.py @@ -20,16 +20,10 @@ from typing import Optional, Tuple, Union import tempfile from pathlib import Path - +from packaging.version import Version from contextlib import nullcontext - -try: - from packaging.version import Version -except ImportError: - from distutils.version import LooseVersion as Version - import accelerate if Version(accelerate.__version__) < Version("0.17.0.dev0"): diff --git a/python/ray/train/huggingface/accelerate/accelerate_trainer.py b/python/ray/train/huggingface/accelerate/accelerate_trainer.py index 726bc10569f2..aab3f5121dc9 100644 --- a/python/ray/train/huggingface/accelerate/accelerate_trainer.py +++ b/python/ray/train/huggingface/accelerate/accelerate_trainer.py @@ -4,10 +4,7 @@ from pathlib import Path from typing import TYPE_CHECKING, Callable, Dict, Optional, Type, Tuple, Union -try: - from packaging.version import Version -except ImportError: - from distutils.version import LooseVersion as Version +from packaging.version import Version import accelerate diff --git a/python/ray/train/huggingface/huggingface_trainer.py b/python/ray/train/huggingface/huggingface_trainer.py index 99bd1ad96ea4..5c22aea79912 100644 --- a/python/ray/train/huggingface/huggingface_trainer.py +++ b/python/ray/train/huggingface/huggingface_trainer.py @@ -4,11 +4,7 @@ import sys import warnings from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Type - -try: - from packaging.version import Version -except ImportError: - from distutils.version import LooseVersion as Version +from packaging.version import Version import transformers diff --git a/python/ray/train/torch/train_loop_utils.py b/python/ray/train/torch/train_loop_utils.py index 952e97fb3944..c41177f73a80 100644 --- a/python/ray/train/torch/train_loop_utils.py +++ b/python/ray/train/torch/train_loop_utils.py @@ -3,7 +3,7 @@ import random import types import collections -from distutils.version import LooseVersion +from packaging.version import Version from typing import Any, Dict, List, Optional, Callable, Union @@ -19,7 +19,7 @@ from torch.cuda.amp import autocast, GradScaler from torch.nn.parallel import DistributedDataParallel -if LooseVersion(torch.__version__) < LooseVersion("1.11.0"): +if Version(torch.__version__) < Version("1.11.0"): FullyShardedDataParallel = None else: from torch.distributed.fsdp import FullyShardedDataParallel diff --git a/python/ray/widgets/util.py b/python/ray/widgets/util.py index e43467c6ace9..ebf97f66b014 100644 --- a/python/ray/widgets/util.py +++ b/python/ray/widgets/util.py @@ -5,16 +5,12 @@ from functools import wraps from typing import Any, Callable, Iterable, Optional, TypeVar, Union +from packaging.version import Version + from ray._private.thirdparty.tabulate.tabulate import tabulate from ray.util.annotations import DeveloperAPI from ray.widgets import Template -try: - from packaging.version import Version -except ImportError: - from distutils.version import LooseVersion as Version - - logger = logging.getLogger(__name__) F = TypeVar("F", bound=Callable[..., Any]) diff --git a/python/requirements.txt b/python/requirements.txt index 932f8f277907..e6bfd5a7e466 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -18,6 +18,7 @@ pyyaml aiosignal frozenlist requests +packaging # Python version-specific requirements dataclasses; python_version < '3.7' @@ -29,7 +30,6 @@ grpcio >= 1.32.0, <= 1.51.3; python_version < '3.10' and sys_platform != 'darwin grpcio >= 1.42.0, <= 1.51.3; python_version >= '3.10' and sys_platform != 'darwin' numpy>=1.16; python_version < '3.9' numpy>=1.19.3; python_version >= '3.9' -packaging; python_version >= '3.10' typing_extensions; python_version < '3.8' # ray[all] diff --git a/python/setup.py b/python/setup.py index ca811d7395fc..11b725ed85a6 100644 --- a/python/setup.py +++ b/python/setup.py @@ -322,7 +322,7 @@ def get_packages(self): "msgpack >= 1.0.0, < 2.0.0", "numpy >= 1.16; python_version < '3.9'", "numpy >= 1.19.3; python_version >= '3.9'", - "packaging; python_version >= '3.10'", + "packaging", "protobuf >= 3.15.3, != 3.19.5", "pyyaml", "aiosignal", diff --git a/rllib/examples/export/onnx_torch.py b/rllib/examples/export/onnx_torch.py index b8196f36e8d6..3438d51840b7 100644 --- a/rllib/examples/export/onnx_torch.py +++ b/rllib/examples/export/onnx_torch.py @@ -1,8 +1,4 @@ -try: - from packaging.version import Version -except ImportError: - from distutils.version import LooseVersion as Version - +from packaging.version import Version import numpy as np import ray import ray.rllib.algorithms.ppo as ppo diff --git a/rllib/utils/debug/deterministic.py b/rllib/utils/debug/deterministic.py index f41fdabf323b..d3696c92b54d 100644 --- a/rllib/utils/debug/deterministic.py +++ b/rllib/utils/debug/deterministic.py @@ -37,10 +37,7 @@ def update_global_seed_if_necessary( if cuda_version is not None and float(torch.version.cuda) >= 10.2: os.environ["CUBLAS_WORKSPACE_CONFIG"] = "4096:8" else: - try: - from packaging.version import Version - except ImportError: - from distutils.version import LooseVersion as Version + from packaging.version import Version if Version(torch.__version__) >= Version("1.8.0"): # Not all Operations support this. From 3139e843978ead08f1b54370282574c0cd0f3554 Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Thu, 4 May 2023 11:56:28 -0700 Subject: [PATCH 233/424] Jailed 4 core tests that have been failing for weeks (#35045) Jail the following 4 tests: - ray-data-bulk-ingest-file-size-benchmark - ray-data-bulk-ingest-heterogeneity-benchmark - ray-data-bulk-ingest-out-of-core-benchmark - tune_air_oom Signed-off-by: Cuong Nguyen --- release/release_tests.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 73aa336e7cb4..b5e8093a090b 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -701,6 +701,7 @@ working_dir: air_tests/air_benchmarks/mlperf-train stable: false + jailed: true frequency: nightly team: core @@ -727,6 +728,7 @@ working_dir: air_tests/air_benchmarks/mlperf-train stable: false + jailed: true frequency: nightly team: core @@ -753,6 +755,7 @@ working_dir: air_tests/air_benchmarks/mlperf-train stable: false + jailed: true frequency: nightly team: core @@ -4799,7 +4802,9 @@ - name: tune_air_oom group: core-daily-test working_dir: air_tests + stable: false + jailed: true frequency: nightly team: core From 26f3c35c48e66c3bd84f25183a0b05f3d5e955a0 Mon Sep 17 00:00:00 2001 From: Cindy Zhang Date: Thu, 4 May 2023 11:59:16 -0700 Subject: [PATCH 234/424] [serve] Better surfacing of errors in serve status (#34773) 1. Surface tracebacks from constructor failures. Output from `serve status`: ``` name: default app_status: status: DEPLOY_FAILED message: |- Deploying app 'default' failed: ray::deploy_serve_application() (pid=15878, ip=192.168.1.14) File "/Users/cindyz/ray/python/ray/serve/controller.py", line 947, in deploy_serve_application serve.run(app, name=name, route_prefix=route_prefix) File "/Users/cindyz/ray/python/ray/serve/api.py", line 539, in run client.deploy_application( File "/Users/cindyz/ray/python/ray/serve/_private/client.py", line 43, in check return f(self, *args, **kwargs) File "/Users/cindyz/ray/python/ray/serve/_private/client.py", line 299, in deploy_application self._wait_for_deployment_healthy(deployment_name) File "/Users/cindyz/ray/python/ray/serve/_private/client.py", line 183, in _wait_for_deployment_healthy raise RuntimeError( RuntimeError: Deployment default_Fail is UNHEALTHY: The Deployment failed to start 3 times in a row. This may be due to a problem with the deployment constructor or the initial health check failing. See controller logs for details. Retrying after 1 seconds. Error: ray::ServeReplica:default_Fail.is_initialized() (pid=15919, ip=192.168.1.14, repr=) File "/Users/cindyz/miniforge3/envs/ray/lib/python3.8/concurrent/futures/_base.py", line 437, in result return self.__get_result() File "/Users/cindyz/miniforge3/envs/ray/lib/python3.8/concurrent/futures/_base.py", line 389, in __get_result raise self._exception File "/Users/cindyz/ray/python/ray/serve/_private/replica.py", line 234, in is_initialized await self._initialize_replica() File "/Users/cindyz/ray/python/ray/serve/_private/replica.py", line 150, in initialize_replica await sync_to_async(_callable.__init__)(*init_args, **init_kwargs) File "/Users/cindyz/Desktop/constructor_fail.py", line 16, in __init__ raise Exception("I need to know about this!") Exception: I need to know about this! deployment_timestamp: 1682476137.8513532 deployment_statuses: - name: default_Fail status: UNHEALTHY message: |- The Deployment failed to start 3 times in a row. This may be due to a problem with the deployment constructor or the initial health check failing. See controller logs for details. Retrying after 1 seconds. Error: ray::ServeReplica:default_Fail.is_initialized() (pid=15919, ip=192.168.1.14, repr=) File "/Users/cindyz/miniforge3/envs/ray/lib/python3.8/concurrent/futures/_base.py", line 437, in result return self.__get_result() File "/Users/cindyz/miniforge3/envs/ray/lib/python3.8/concurrent/futures/_base.py", line 389, in __get_result raise self._exception File "/Users/cindyz/ray/python/ray/serve/_private/replica.py", line 234, in is_initialized await self._initialize_replica() File "/Users/cindyz/ray/python/ray/serve/_private/replica.py", line 150, in initialize_replica await sync_to_async(_callable.__init__)(*init_args, **init_kwargs) File "/Users/cindyz/Desktop/constructor_fail.py", line 16, in __init__ raise Exception("I need to know about this!") Exception: I need to know about this! ``` 2. Serializes exceptions from the replica actor, so that they are displayed properly when surfaced through the controller. --- python/ray/serve/_private/deployment_state.py | 42 ++++++++---- python/ray/serve/_private/replica.py | 28 ++++---- python/ray/serve/scripts.py | 2 + python/ray/serve/tests/test_cli.py | 66 ++++++++++++++++--- python/ray/serve/tests/test_cluster.py | 6 +- .../test_config_files/deployment_fail.yaml | 2 +- .../ray/serve/tests/test_config_files/fail.py | 11 +++- .../tests/test_config_files/sqlalchemy.py | 15 +++++ .../tests/test_config_files/sqlalchemy.yaml | 13 ++++ .../ray/serve/tests/test_deployment_state.py | 2 +- 10 files changed, 149 insertions(+), 38 deletions(-) create mode 100644 python/ray/serve/tests/test_config_files/sqlalchemy.py create mode 100644 python/ray/serve/tests/test_config_files/sqlalchemy.yaml diff --git a/python/ray/serve/_private/deployment_state.py b/python/ray/serve/_private/deployment_state.py index 1c18c6891b9e..c4cc6090360e 100644 --- a/python/ray/serve/_private/deployment_state.py +++ b/python/ray/serve/_private/deployment_state.py @@ -19,7 +19,7 @@ record_extra_usage_tag, ) from ray.actor import ActorHandle -from ray.exceptions import RayActorError, RayError +from ray.exceptions import RayActorError, RayError, RayTaskError from ray.serve._private.autoscaling_metrics import InMemoryMetricsStore from ray.serve._private.common import ( @@ -482,7 +482,7 @@ def recover(self): else: self._ready_obj_ref = self._actor_handle.get_metadata.remote() - def check_ready(self) -> ReplicaStartupStatus: + def check_ready(self) -> Tuple[ReplicaStartupStatus, Optional[str]]: """ Check if current replica has started by making ray API calls on relevant actor / object ref. @@ -499,23 +499,28 @@ def check_ready(self) -> ReplicaStartupStatus: - replica initialization failed. SUCCEEDED: - replica initialization succeeded. + error_msg: + None: + - for PENDING_ALLOCATION, PENDING_INITIALIZATION or SUCCEEDED states + str: + - for FAILED state """ # Check whether the replica has been allocated. if not self._check_obj_ref_ready(self._allocated_obj_ref): - return ReplicaStartupStatus.PENDING_ALLOCATION + return ReplicaStartupStatus.PENDING_ALLOCATION, None # Check whether relica initialization has completed. replica_ready = self._check_obj_ref_ready(self._ready_obj_ref) # In case of deployment constructor failure, ray.get will help to # surface exception to each update() cycle. if not replica_ready: - return ReplicaStartupStatus.PENDING_INITIALIZATION + return ReplicaStartupStatus.PENDING_INITIALIZATION, None else: try: # TODO(simon): fully implement reconfigure for Java replicas. if self._is_cross_language: - return ReplicaStartupStatus.SUCCEEDED + return ReplicaStartupStatus.SUCCEEDED, None # todo: The replica's userconfig whitch java client created # is different from the controller's userconfig @@ -525,14 +530,23 @@ def check_ready(self) -> ReplicaStartupStatus: self._pid, self._actor_id, self._node_id, self._node_ip = ray.get( self._allocated_obj_ref ) - except Exception: + except RayTaskError as e: logger.exception( f"Exception in replica '{self._replica_tag}', " "the replica will be stopped." ) - return ReplicaStartupStatus.FAILED + # NOTE(zcin): we should use str(e) instead of traceback.format_exc() + # here because the full details of the error is not displayed properly + # with traceback.format_exc(). + return ReplicaStartupStatus.FAILED, str(e.as_instanceof_cause()) + except Exception as e: + logger.exception( + f"Exception in replica '{self._replica_tag}', " + "the replica will be stopped." + ) + return ReplicaStartupStatus.FAILED, repr(e) - return ReplicaStartupStatus.SUCCEEDED + return ReplicaStartupStatus.SUCCEEDED, None @property def actor_resources(self) -> Optional[Dict[str, float]]: @@ -804,7 +818,7 @@ def recover(self): # Replica version is fetched from recovered replica dynamically in # check_started() below - def check_started(self) -> ReplicaStartupStatus: + def check_started(self) -> Tuple[ReplicaStartupStatus, Optional[str]]: """Check if the replica has started. If so, transition to RUNNING. Should handle the case where the replica has already stopped. @@ -1046,6 +1060,7 @@ def __init__( self._last_retry: float = 0.0 self._backoff_time_s: int = 1 self._replica_constructor_retry_counter: int = 0 + self._replica_constructor_error_msg: Optional[str] = None self._replicas: ReplicaStateContainer = ReplicaStateContainer() self._curr_status_info: DeploymentStatusInfo = DeploymentStatusInfo( self._name, DeploymentStatus.UPDATING @@ -1543,8 +1558,10 @@ def _check_curr_status(self) -> Tuple[bool, bool]: message=( f"The Deployment failed to start {failed_to_start_count} times " "in a row. This may be due to a problem with the deployment " - "constructor or the initial health check failing. See logs for " - f"details. Retrying after {self._backoff_time_s} seconds." + "constructor or the initial health check failing. See " + "controller logs for details. Retrying after " + f"{self._backoff_time_s} seconds. Error:\n" + f"{self._replica_constructor_error_msg}" ), ) return False, any_replicas_recovering @@ -1589,7 +1606,7 @@ def _check_startup_replicas( transitioned_to_running = False replicas_failed = False for replica in self._replicas.pop(states=[original_state]): - start_status = replica.check_started() + start_status, error_msg = replica.check_started() if start_status == ReplicaStartupStatus.SUCCEEDED: # This replica should be now be added to handle's replica # set. @@ -1604,6 +1621,7 @@ def _check_startup_replicas( if self._replica_constructor_retry_counter >= 0: # Increase startup failure counter if we're tracking it self._replica_constructor_retry_counter += 1 + self._replica_constructor_error_msg = error_msg replicas_failed = True self._stop_replica(replica) diff --git a/python/ray/serve/_private/replica.py b/python/ray/serve/_private/replica.py index ba7aadaba628..32ba7155190b 100644 --- a/python/ray/serve/_private/replica.py +++ b/python/ray/serve/_private/replica.py @@ -7,6 +7,7 @@ import pickle import time from typing import Any, Callable, Optional, Tuple, Dict +import traceback import starlette.responses @@ -236,21 +237,26 @@ async def is_initialized( ): # Unused `_after` argument is for scheduling: passing an ObjectRef # allows delaying reconfiguration until after this call has returned. - await self._initialize_replica() - - metadata = await self.reconfigure(deployment_config) - - # A new replica should not be considered healthy until it passes an - # initial health check. If an initial health check fails, consider - # it an initialization failure. - await self.check_health() - return metadata + try: + await self._initialize_replica() + metadata = await self.reconfigure(deployment_config) + + # A new replica should not be considered healthy until it passes an + # initial health check. If an initial health check fails, consider + # it an initialization failure. + await self.check_health() + return metadata + except Exception: + raise RuntimeError(traceback.format_exc()) from None async def reconfigure( self, deployment_config: DeploymentConfig ) -> Tuple[DeploymentConfig, DeploymentVersion]: - await self.replica.reconfigure(deployment_config) - return await self.get_metadata() + try: + await self.replica.reconfigure(deployment_config) + return await self.get_metadata() + except Exception: + raise RuntimeError(traceback.format_exc()) from None async def get_metadata( self, diff --git a/python/ray/serve/scripts.py b/python/ray/serve/scripts.py index 949ed8059c90..dbb8e8b7aa1d 100644 --- a/python/ray/serve/scripts.py +++ b/python/ray/serve/scripts.py @@ -99,6 +99,8 @@ def process_dict_for_yaml_dump(data): for k, v in data.items(): if isinstance(v, dict): data[k] = process_dict_for_yaml_dump(v) + if isinstance(v, list): + data[k] = [process_dict_for_yaml_dump(item) for item in v] elif isinstance(v, str): data[k] = remove_ansi_escape_sequences(v) diff --git a/python/ray/serve/tests/test_cli.py b/python/ray/serve/tests/test_cli.py index d68a5608f096..14ecb989eadf 100644 --- a/python/ray/serve/tests/test_cli.py +++ b/python/ray/serve/tests/test_cli.py @@ -517,22 +517,20 @@ def test_status_error_msg_format(ray_start_stop): subprocess.check_output(["serve", "deploy", config_file_name]) - status_response = subprocess.check_output( - ["serve", "status", "-a", "http://localhost:52365/"] - ) - serve_status = yaml.safe_load(status_response) - print("serve_status", serve_status) - def check_for_failed_deployment(): + serve_status = yaml.safe_load( + subprocess.check_output( + ["serve", "status", "-a", "http://localhost:52365/"] + ) + ) app_status = ServeSubmissionClient("http://localhost:52365").get_status() return ( - len(serve_status["deployment_statuses"]) == 0 - and serve_status["app_status"]["status"] == "DEPLOY_FAILED" + serve_status["app_status"]["status"] == "DEPLOY_FAILED" and remove_ansi_escape_sequences(app_status["app_status"]["message"]) in serve_status["app_status"]["message"] ) - wait_for_condition(check_for_failed_deployment, timeout=2) + wait_for_condition(check_for_failed_deployment) @pytest.mark.skipif(sys.platform == "win32", reason="File path incorrect on Windows.") @@ -579,6 +577,56 @@ def check_for_failed_deployment(): wait_for_condition(check_for_failed_deployment) +@pytest.mark.skipif(sys.platform == "win32", reason="File path incorrect on Windows.") +def test_status_constructor_error(ray_start_stop): + """Deploys Serve deployment that errors out in constructor, checks that the + traceback is surfaced. + """ + + config_file_name = os.path.join( + os.path.dirname(__file__), "test_config_files", "deployment_fail.yaml" + ) + + subprocess.check_output(["serve", "deploy", config_file_name]) + + def check_for_failed_deployment(): + status_response = subprocess.check_output( + ["serve", "status", "-a", "http://localhost:52365/"] + ) + serve_status = yaml.safe_load(status_response) + return ( + serve_status["app_status"]["status"] == "DEPLOY_FAILED" + and "ZeroDivisionError" in serve_status["deployment_statuses"][0]["message"] + ) + + wait_for_condition(check_for_failed_deployment) + + +@pytest.mark.skipif(sys.platform == "win32", reason="File path incorrect on Windows.") +def test_status_package_unavailable_in_controller(ray_start_stop): + """Test that exceptions raised from packages that are installed on deployment actors + but not on controller is serialized and surfaced properly. + """ + + config_file_name = os.path.join( + os.path.dirname(__file__), "test_config_files", "sqlalchemy.yaml" + ) + + subprocess.check_output(["serve", "deploy", config_file_name]) + + def check_for_failed_deployment(): + status_response = subprocess.check_output( + ["serve", "status", "-a", "http://localhost:52365/"] + ) + serve_status = yaml.safe_load(status_response) + return ( + serve_status["app_status"]["status"] == "DEPLOY_FAILED" + and "some_wrong_url" in serve_status["deployment_statuses"][0]["message"] + ) + + wait_for_condition(check_for_failed_deployment, timeout=15) + + @pytest.mark.skipif(sys.platform == "win32", reason="File path incorrect on Windows.") def test_status_multi_app(ray_start_stop): """Deploys a multi-app config file and checks their status.""" diff --git a/python/ray/serve/tests/test_cluster.py b/python/ray/serve/tests/test_cluster.py index 97d8927894d1..209e6da71c76 100644 --- a/python/ray/serve/tests/test_cluster.py +++ b/python/ray/serve/tests/test_cluster.py @@ -152,7 +152,7 @@ def get_replicas(replica_state): replica = get_replicas(ReplicaState.STARTING)[0] # currently there are no resources to allocate the replica - assert replica.check_started() == ReplicaStartupStatus.PENDING_ALLOCATION + assert replica.check_started()[0] == ReplicaStartupStatus.PENDING_ALLOCATION # add the necessary resources to allocate the replica cluster.add_node(num_cpus=4) @@ -160,7 +160,7 @@ def get_replicas(replica_state): wait_for_condition(lambda: (ray.available_resources().get("CPU", 0) >= 2)) def is_replica_pending_initialization(): - status = replica.check_started() + status, _ = replica.check_started() print(status) return status == ReplicaStartupStatus.PENDING_INITIALIZATION @@ -169,7 +169,7 @@ def is_replica_pending_initialization(): # send signal to complete replica intialization signal.send.remote() wait_for_condition( - lambda: replica.check_started() == ReplicaStartupStatus.SUCCEEDED + lambda: replica.check_started()[0] == ReplicaStartupStatus.SUCCEEDED ) diff --git a/python/ray/serve/tests/test_config_files/deployment_fail.yaml b/python/ray/serve/tests/test_config_files/deployment_fail.yaml index 25db75ea389d..a7d87c39243c 100644 --- a/python/ray/serve/tests/test_config_files/deployment_fail.yaml +++ b/python/ray/serve/tests/test_config_files/deployment_fail.yaml @@ -1 +1 @@ -import_path: fail.node +import_path: ray.serve.tests.test_config_files.fail.node diff --git a/python/ray/serve/tests/test_config_files/fail.py b/python/ray/serve/tests/test_config_files/fail.py index 72dca4d5e478..4b69ed6aed89 100644 --- a/python/ray/serve/tests/test_config_files/fail.py +++ b/python/ray/serve/tests/test_config_files/fail.py @@ -1 +1,10 @@ -1 / 0 +from ray import serve + + +@serve.deployment +class A: + def __init__(self): + 1 / 0 + + +node = A.bind() diff --git a/python/ray/serve/tests/test_config_files/sqlalchemy.py b/python/ray/serve/tests/test_config_files/sqlalchemy.py new file mode 100644 index 000000000000..6ec900f40f4d --- /dev/null +++ b/python/ray/serve/tests/test_config_files/sqlalchemy.py @@ -0,0 +1,15 @@ +from ray import serve + + +@serve.deployment +class TestDeployment: + def __init__(self): + from sqlalchemy import create_engine + import pymysql + + pymysql.install_as_MySQLdb() + + create_engine("mysql://some_wrong_url:3306").connect() + + +app = TestDeployment.bind() diff --git a/python/ray/serve/tests/test_config_files/sqlalchemy.yaml b/python/ray/serve/tests/test_config_files/sqlalchemy.yaml new file mode 100644 index 000000000000..ebc54442a148 --- /dev/null +++ b/python/ray/serve/tests/test_config_files/sqlalchemy.yaml @@ -0,0 +1,13 @@ +import_path: ray.serve.tests.test_config_files.sqlalchemy.app + +host: 127.0.0.1 +port: 8000 + +deployments: + - name: TestDeployment + num_replicas: 1 + ray_actor_options: + runtime_env: + pip: + - PyMySQL + - sqlalchemy==1.3.19 \ No newline at end of file diff --git a/python/ray/serve/tests/test_deployment_state.py b/python/ray/serve/tests/test_deployment_state.py index 646a676fc3ba..c10a405390ee 100644 --- a/python/ray/serve/tests/test_deployment_state.py +++ b/python/ray/serve/tests/test_deployment_state.py @@ -146,7 +146,7 @@ def check_ready(self) -> ReplicaStartupStatus: self.recovering = False self.started = True self.version = self.starting_version - return ready + return ready, None def resource_requirements(self) -> Tuple[str, str]: assert self.started From 674e4950b675b37b943a372f998bf5eeae074864 Mon Sep 17 00:00:00 2001 From: Cindy Zhang Date: Thu, 4 May 2023 14:12:17 -0700 Subject: [PATCH 235/424] [serve] Make applications required in multi-app config (#34401) Make applications required in multi-app config. --- python/ray/serve/schema.py | 4 ++-- python/ray/serve/tests/test_schema.py | 12 ++++++++++++ python/ray/serve/tests/test_standalone2.py | 21 +++++++++++++++++++++ 3 files changed, 35 insertions(+), 2 deletions(-) diff --git a/python/ray/serve/schema.py b/python/ray/serve/schema.py index ee61b8c5f0e2..a72d622d49c1 100644 --- a/python/ray/serve/schema.py +++ b/python/ray/serve/schema.py @@ -319,6 +319,7 @@ class ServeApplicationSchema(BaseModel, extra=Extra.forbid): ), ) import_path: str = Field( + ..., description=( "An import path to a bound deployment node. Should be of the " 'form "module.submodule_1...submodule_n.' @@ -535,8 +536,7 @@ class ServeDeploySchema(BaseModel, extra=Extra.forbid): default=HTTPOptionsSchema(), description="Options to start the HTTP Proxy with." ) applications: List[ServeApplicationSchema] = Field( - default=[], - description=("The set of Serve applications to run on the Ray cluster."), + ..., description=("The set of Serve applications to run on the Ray cluster.") ) @validator("applications") diff --git a/python/ray/serve/tests/test_schema.py b/python/ray/serve/tests/test_schema.py index cfed7f823973..5c69af93080a 100644 --- a/python/ray/serve/tests/test_schema.py +++ b/python/ray/serve/tests/test_schema.py @@ -659,6 +659,18 @@ def test_deploy_empty_name(self): # Error message should be descriptive, mention name must be nonempty assert "name" in str(e.value) and "empty" in str(e.value) + def test_deploy_no_applications(self): + """Applications must be specified.""" + + deploy_config_dict = { + "http_options": { + "host": "127.0.0.1", + "port": 8000, + }, + } + with pytest.raises(ValidationError): + ServeDeploySchema.parse_obj(deploy_config_dict) + class TestServeStatusSchema: def get_valid_serve_status_schema(self): diff --git a/python/ray/serve/tests/test_standalone2.py b/python/ray/serve/tests/test_standalone2.py index 6f627f36daac..285fd0f4df3f 100644 --- a/python/ray/serve/tests/test_standalone2.py +++ b/python/ray/serve/tests/test_standalone2.py @@ -1640,6 +1640,27 @@ def check_app_message(): wait_for_condition(check_app_message) + def test_deploy_with_no_applications(self, client: ServeControllerClient): + """Deploy an empty list of applications, serve should just be started.""" + + config = ServeDeploySchema.parse_obj({"applications": []}) + client.deploy_apps(config) + + def serve_running(): + ServeInstanceDetails.parse_obj( + ray.get(client._controller.get_serve_instance_details.remote()) + ) + actors = list_actors( + filters=[ + ("ray_namespace", "=", SERVE_NAMESPACE), + ("state", "=", "ALIVE"), + ] + ) + actor_names = [actor["class_name"] for actor in actors] + return "ServeController" in actor_names and "HTTPProxyActor" in actor_names + + wait_for_condition(serve_running) + def test_deployments_not_listed_in_config(self, client: ServeControllerClient): """Apply a config without the app's deployments listed. The deployments should not redeploy. From 07c3241c8b50602b7d314b4c568ee22fcfe24404 Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Thu, 4 May 2023 14:21:27 -0700 Subject: [PATCH 236/424] [ci/bazel][1] bazelize some unit-tests (#35030) Bazelize some of the tests I created. Without this it does not run on CI at all. Signed-off-by: Cuong Nguyen --- release/BUILD | 22 +++++++++++++++++++ release/ray_release/tests/test_bisect.py | 6 +++++ .../ray_release/tests/test_log_aggregator.py | 6 +++++ 3 files changed, 34 insertions(+) diff --git a/release/BUILD b/release/BUILD index fd2326c4a15c..42918e52ba2a 100644 --- a/release/BUILD +++ b/release/BUILD @@ -371,6 +371,17 @@ py_test( ], ) +py_test( + name = "test_bisect", + size = "small", + srcs = ["ray_release/tests/test_bisect.py"], + tags = [ + "release_unit", + "team:ci", + ], + deps = ["//:ray_lib"], +) + py_test( name = "test_buildkite", size = "small", @@ -422,6 +433,17 @@ py_test( ], ) +py_test( + name = "test_log_aggregator", + size = "small", + srcs = ["ray_release/tests/test_log_aggregator.py"], + tags = [ + "release_unit", + "team:ci", + ], + deps = ["//:ray_lib"], +) + py_test( name = "test_run_script", size = "small", diff --git a/release/ray_release/tests/test_bisect.py b/release/ray_release/tests/test_bisect.py index 5e2e89f748bb..8c75abf1bc9c 100644 --- a/release/ray_release/tests/test_bisect.py +++ b/release/ray_release/tests/test_bisect.py @@ -1,3 +1,5 @@ +import sys +import pytest from unittest import mock from typing import List, Dict from ray_release.scripts.ray_bisect import _bisect, _obtain_test_result, _sanity_check @@ -84,3 +86,7 @@ def _mock_run_test( ): for concurreny in range(1, 4): assert _bisect({}, list(input.keys()), concurreny, 1) == output + + +if __name__ == "__main__": + sys.exit(pytest.main(["-v", __file__])) diff --git a/release/ray_release/tests/test_log_aggregator.py b/release/ray_release/tests/test_log_aggregator.py index 5bc88db568d2..08293ca0dab6 100644 --- a/release/ray_release/tests/test_log_aggregator.py +++ b/release/ray_release/tests/test_log_aggregator.py @@ -1,3 +1,5 @@ +import sys +import pytest from ray_release.log_aggregator import LogAggregator @@ -66,3 +68,7 @@ def test_compute_stack_trace(): ) == error_trace ) + + +if __name__ == "__main__": + sys.exit(pytest.main(["-v", __file__])) From 747772232330030a40fcec2d3b6863e7ccffa9a1 Mon Sep 17 00:00:00 2001 From: Justin Yu Date: Thu, 4 May 2023 14:40:46 -0700 Subject: [PATCH 237/424] [Tune] Make `Tuner.restore(trainable=...) a required argument (#34982) **Hard deprecation**: This PR makes `Tuner.restore(..., trainable)` a required argument. This removes the special case of `tune.with_parameters` throwing a very complicated error message if you try to restore without re-specifying the trainable. Users will have access to the `trainable` on restore anyways, and the restore API now mirrors the regular `Tuner` constructor, where `trainable` is required and `param_space` is optional. This PR also simplifies validation logic on restore -- and also adding loose validation that the `param_space` has at least the same keys as before (preventing users from trying to add new hyperparameters after restoration). Signed-off-by: Justin Yu --- python/ray/train/tests/test_tune.py | 16 +- python/ray/tune/impl/tuner_internal.py | 385 ++++++++++++-------- python/ray/tune/tests/test_tuner_restore.py | 57 ++- python/ray/tune/trainable/util.py | 32 -- python/ray/tune/tuner.py | 28 +- 5 files changed, 298 insertions(+), 220 deletions(-) diff --git a/python/ray/train/tests/test_tune.py b/python/ray/train/tests/test_tune.py index 3745313dd98d..900d036d19c6 100644 --- a/python/ray/train/tests/test_tune.py +++ b/python/ray/train/tests/test_tune.py @@ -185,7 +185,7 @@ def train_func(config): tuner = Tuner( trainer, param_space={"train_loop_config": {"max_iter": 10}}, - ).restore(trial.local_dir) + ).restore(trial.local_dir, trainable=trainer) analysis = tuner.fit()._experiment_analysis trial_dfs = list(analysis.trial_dataframes.values()) assert len(trial_dfs[0]["training_iteration"]) == 5 @@ -261,14 +261,12 @@ def train_func(config): ) caplog.clear() with caplog.at_level(logging.WARNING, logger="ray.tune.impl.tuner_internal"): - with pytest.warns() as warn_record: - tuner = Tuner.restore( - str(tmpdir / "restore_new_trainer"), - trainable=trainer, - resume_errored=True, - ) - # Should warn about the RunConfig being ignored - assert any("RunConfig" in str(record.message) for record in warn_record) + tuner = Tuner.restore( + str(tmpdir / "restore_new_trainer"), + trainable=trainer, + resume_errored=True, + ) + assert "they will be ignored in the resumed run" in caplog.text results = tuner.fit() assert not results.errors diff --git a/python/ray/tune/impl/tuner_internal.py b/python/ray/tune/impl/tuner_internal.py index 13e64c0d8ada..b92dcab2da18 100644 --- a/python/ray/tune/impl/tuner_internal.py +++ b/python/ray/tune/impl/tuner_internal.py @@ -7,7 +7,17 @@ import shutil import tempfile from pathlib import Path -from typing import Any, Callable, Dict, Optional, Type, Union, TYPE_CHECKING, Tuple +from typing import ( + Any, + Callable, + Dict, + List, + Optional, + Type, + Union, + TYPE_CHECKING, + Tuple, +) import ray import ray.cloudpickle as pickle @@ -23,13 +33,13 @@ from ray.tune.trainable import Trainable from ray.tune.tune import run from ray.tune.tune_config import TuneConfig +from ray.tune.utils import flatten_dict if TYPE_CHECKING: from ray.train.trainer import BaseTrainer from ray.util.queue import Queue -_TRAINABLE_PKL = "trainable.pkl" _TUNER_PKL = "tuner.pkl" _TRAINABLE_KEY = "_trainable" _CONVERTED_TRAINABLE_KEY = "_converted_trainable" @@ -90,29 +100,16 @@ def __init__( param_space=param_space, ) - self.trainable = trainable - param_space = param_space or {} - if isinstance(param_space, _Config): - param_space = param_space.to_dict() - if not isinstance(param_space, dict): - raise ValueError( - "The `param_space` passed to the Tuner` must be a dict. " - f"Got '{type(param_space)}' instead." - ) - self.param_space = param_space - self._tune_config = tune_config or TuneConfig() self._run_config = run_config or RunConfig() - self._missing_params_error_message = None - # Restore from Tuner checkpoint. if restore_path: self._restore_from_path_or_uri( path_or_uri=restore_path, - resume_config=resume_config, - overwrite_trainable=trainable, + trainable=trainable, overwrite_param_space=param_space, + resume_config=resume_config, ) return @@ -120,14 +117,18 @@ def __init__( if not trainable: raise TuneError("You need to provide a trainable to tune.") - self._is_restored = False - self._resume_config = None + self.trainable = trainable + assert self.converted_trainable + self._validate_trainable(self.converted_trainable) + + self.param_space = param_space + self._resume_config = None + self._is_restored = False self._tuner_kwargs = copy.deepcopy(_tuner_kwargs) or {} self._experiment_checkpoint_dir = self.setup_create_experiment_checkpoint_dir( self.converted_trainable, self._run_config ) - self._experiment_analysis = None # This needs to happen before `tune.run()` is kicked in. @@ -138,22 +139,7 @@ def __init__( # to restore from. experiment_checkpoint_path = Path(self._experiment_checkpoint_dir) with open(experiment_checkpoint_path / _TUNER_PKL, "wb") as fp: - pickle.dump(self, fp) - - try: - with open(experiment_checkpoint_path / _TRAINABLE_PKL, "wb") as fp: - pickle.dump(self.trainable, fp) - except TypeError as e: - sio = io.StringIO() - inspect_serializability(self.trainable, print_file=sio) - msg = ( - "The provided trainable is not serializable, which is a requirement " - "since the trainable is serialized and deserialized when transferred " - "to remote workers. See below for a trace of the non-serializable " - "objects that were found in your trainable:\n" - f"{sio.getvalue()}" - ) - raise TypeError(msg) from e + pickle.dump(self.__getstate__(), fp) self._maybe_warn_resource_contention() @@ -225,129 +211,211 @@ def _maybe_warn_resource_contention(self): stacklevel=4, ) - def _validate_overwrite_trainable( - self, - original_trainable: TrainableTypeOrTrainer, - overwrite_trainable: Optional[TrainableTypeOrTrainer], + def _validate_trainable( + self, trainable: TrainableType, required_trainable_name: Optional[str] = None ): - """Determines whether the new `overwrite_trainable` is compatible - with the restored experiment with some basic sanity checks - (ensuring same type and name as the original trainable). - """ + """Determines whether or not the trainable is valid. - # TODO(ml-team): Remove (https://github.com/ray-project/ray/issues/33546) - # Check if the trainable was wrapped with `tune.with_parameters`, - # Set the Tuner to fail on fit if the trainable is not re-specified. - trainable_wrapped_params = getattr( - original_trainable, "_attached_param_names", None - ) - if trainable_wrapped_params and not overwrite_trainable: - self._missing_params_error_message = ( - "The original trainable cannot be used to resume training, since " - "`tune.with_parameters` attached references to objects " - "in the Ray object store that may not exist anymore. " - "You must re-supply the trainable with the same parameters " - f"{trainable_wrapped_params} attached:\n\n" - "from ray import tune\n\n" - "# Reconstruct the trainable with the same parameters\n" - "trainable_with_params = tune.with_parameters(trainable, ...)\n" - "tuner = tune.Tuner.restore(\n" - " ..., trainable=trainable_with_params\n" - ")\n\nSee https://docs.ray.io/en/latest/tune/api/doc/" - "ray.tune.with_parameters.html for more details." + This includes checks on the serializability of the trainable, as well + asserting that the trainable name is as expected on restoration. + + This trainable name validation is needed due to an implementation detail + where the trainable name (which is differently generated depending on + the trainable type) is saved in the Trial metadata and needs to match + upon restoration. This does not affect the typical path, since `Tuner.restore` + expects the exact same trainable (which will have the same name). + + Raises: + ValueError: if the trainable name does not match or if the trainable + is not serializable. + """ + try: + pickle.dumps(trainable) + except TypeError as e: + sio = io.StringIO() + inspect_serializability(trainable, print_file=sio) + msg = ( + "The provided trainable is not serializable, which is a requirement " + "since the trainable is serialized and deserialized when transferred " + "to remote workers. See below for a trace of the non-serializable " + "objects that were found in your trainable:\n" + f"{sio.getvalue()}" ) - if not overwrite_trainable: + raise TypeError(msg) from e + + if not required_trainable_name: return - error_message = ( - "Invalid trainable input. To avoid errors, pass in the same trainable " - "that was used to initialize the Tuner." - ) + trainable_name = Experiment.get_trainable_name(trainable) - if type(original_trainable) != type(overwrite_trainable): + if trainable_name != required_trainable_name: raise ValueError( - f"{error_message}\n" - f"Got new trainable of type {type(overwrite_trainable)} " - f"but expected {type(original_trainable)}." + "Invalid `trainable` input to `Tuner.restore()`. To fix this error, " + "pass in the same trainable that was used to initialize the Tuner. " + "Got a trainable with identifier " + f"'{trainable_name}' but expected '{required_trainable_name}'." ) - from ray.train.trainer import BaseTrainer + def _set_trainable_on_restore( + self, trainable: TrainableType, old_trainable_name: Optional[str] + ): + from ray.train.base_trainer import BaseTrainer - if isinstance(overwrite_trainable, BaseTrainer): - if overwrite_trainable.run_config != original_trainable.run_config: - warnings.warn( - "Overwriting the AIR Trainer with a new `RunConfig` is not " - "supported - the restored experiment will continue with the old " - "config. To avoid this warning, revert changes made to `RunConfig`." - ) - overwrite_trainable.run_config = original_trainable.run_config - else: - original_name = Experiment.get_trainable_name(original_trainable) - overwrite_name = Experiment.get_trainable_name(overwrite_trainable) - if original_name != overwrite_name: - raise ValueError( - f"{error_message}\nGot new trainable with identifier " - f"{overwrite_name} but expected {original_name}." + self.trainable = trainable + assert self.converted_trainable + self._validate_trainable( + trainable=self.converted_trainable, + required_trainable_name=old_trainable_name, + ) + + if isinstance(self.trainable, BaseTrainer): + # Log a warning in case the user tries to modify the + # `RunConfig` from the Trainer + trainer: BaseTrainer = self.trainable + + # Only log if the Trainer has a non-default RunConfig + if trainer.run_config != RunConfig(): + logger.warning( + "The Tune experiment will restore using the original run's " + "`RunConfig`. If you made any changes to the `RunConfig` " + "within the Trainer you passed into `Tuner.restore`, " + "they will be ignored in the resumed run." ) - def _restore_from_path_or_uri( + trainer.run_config = self._run_config + + def _validate_param_space_on_restore( self, - path_or_uri: str, - resume_config: Optional[_ResumeConfig], - overwrite_trainable: Optional[TrainableTypeOrTrainer], - overwrite_param_space: Optional[Dict[str, Any]], + new_param_space: Dict[str, Any], + flattened_param_space_keys: Optional[List[str]], ): - # Sync down from cloud storage if needed - synced, experiment_checkpoint_dir = self._maybe_sync_down_tuner_state( - path_or_uri - ) - experiment_checkpoint_path = Path(experiment_checkpoint_dir) + """Determines whether the (optionally) re-specified `param_space` is valid. - if ( - not (experiment_checkpoint_path / _TRAINABLE_PKL).exists() - or not (experiment_checkpoint_path / _TUNER_PKL).exists() - ): + This method performs very loose validation on the new param_space to + prevent users from trying to specify new hyperparameters to tune over. + + Raises: + ValueError: if not all keys match the original param_space. + """ + if flattened_param_space_keys is None: + # Backwards compatibility: skip validation + return + + keys = sorted(flatten_dict(new_param_space).keys()) + if keys != flattened_param_space_keys: + raise ValueError( + "Invalid `param_space` input to `Tuner.restore()`. To fix this error, " + "pass in the same `param_space` that was used to initialize the Tuner. " + "Only re-specify the `param_space` to refresh Ray object references " + "that no longer exist due to restoring from a new Ray cluster session. " + "It should not be used to introduce new hyperparameters to tune." + f"\n\nGot: {keys}\nExpected: {flattened_param_space_keys}" + ) + + def _set_param_space_on_restore( + self, + param_space: Optional[Dict[str, Any]], + flattened_param_space_keys: Optional[List[str]], + ): + self.param_space = param_space + + if self.param_space is not None: + # param_space = None -> use the original param_space + self._validate_param_space_on_restore( + new_param_space=self.param_space, + flattened_param_space_keys=flattened_param_space_keys, + ) + + def _load_tuner_state( + self, tuner_pkl_path: Path + ) -> Tuple[Optional[str], Optional[List[str]]]: + """Loads Tuner state from the previously saved `tuner.pkl`. + + Args: + tuner_pkl_path: pathlib.Path of the `tuner.pkl` file saved during the + original Tuner initialization. + + Returns: + tuple: of `(old_trainable_name, flattened_param_space_keys)` used for + validating the re-specified `trainable` and `param_space`. + """ + if not tuner_pkl_path.exists(): raise RuntimeError( f"Could not find Tuner state in restore directory. Did you pass" - f"the correct path (including experiment directory?) Got: " - f"{path_or_uri}" + f"the correct path (the top-level experiment directory?) Got: " + f"{tuner_pkl_path.parent}" ) - # Load trainable and tuner state - with open(experiment_checkpoint_path / _TRAINABLE_PKL, "rb") as fp: - trainable = pickle.load(fp) + with open(tuner_pkl_path, "rb") as fp: + tuner_state = pickle.load(fp) - with open(experiment_checkpoint_path / _TUNER_PKL, "rb") as fp: - tuner = pickle.load(fp) - self.__dict__.update(tuner.__dict__) + if isinstance(tuner_state, TunerInternal): + # TODO(ml-team): Remove in 2.7. + # Backwards compatibility: ray<=2.4 pickles the full Tuner object + # within `tuner.pkl`. ray>=2.5 pickles the object state as a dict. + tuner: TunerInternal = tuner_state + self.__setstate__(tuner.__getstate__()) - self._validate_overwrite_trainable(trainable, overwrite_trainable) - if overwrite_trainable: - trainable = overwrite_trainable + logger.warning( + "You are restoring a Tune experiment that was run with an older " + "version of Ray. Note that backwards compatibility of restoring " + "this experiment will only be guaranteed until Ray 2.7." + ) - self._is_restored = True - self.trainable = trainable - if overwrite_param_space: - self.param_space = overwrite_param_space - self._resume_config = resume_config + old_trainable_name, flattened_param_space_keys = None, None + else: + # NOTE: These are magic keys used for validating restore args. + old_trainable_name = tuner_state.pop("__trainable_name", None) + flattened_param_space_keys = tuner_state.pop( + "__flattened_param_space_keys", None + ) - if not synced: - # If we didn't sync, use the restore_path local dir - self._experiment_checkpoint_dir = os.path.abspath( - os.path.expanduser(path_or_uri) - ) + self.__setstate__(tuner_state) - # Update local_dir to use the parent of the experiment path - # provided to `Tuner.restore` - experiment_path = Path(self._experiment_checkpoint_dir) - self._run_config.storage_path = str(experiment_path.parent) - self._run_config.name = experiment_path.name - else: - # Set the experiment `name` and `storage_path` according to the URI - uri = URI(path_or_uri) - self._run_config.name = uri.name - self._run_config.storage_path = str(uri.parent) + return old_trainable_name, flattened_param_space_keys + def _restore_from_path_or_uri( + self, + path_or_uri: str, + trainable: TrainableTypeOrTrainer, + overwrite_param_space: Optional[Dict[str, Any]], + resume_config: _ResumeConfig, + ): + # Sync down from cloud storage if needed + ( + restoring_from_cloud, + local_experiment_checkpoint_dir, + ) = self._maybe_sync_down_tuner_state(path_or_uri) + experiment_checkpoint_path = Path(local_experiment_checkpoint_dir) + + old_trainable_name, flattened_param_space_keys = self._load_tuner_state( + experiment_checkpoint_path / _TUNER_PKL + ) + + # Perform validation and set the re-specified `trainable` and `param_space` + self._set_trainable_on_restore( + trainable=trainable, old_trainable_name=old_trainable_name + ) + self._set_param_space_on_restore( + param_space=overwrite_param_space, + flattened_param_space_keys=flattened_param_space_keys, + ) + + # Update RunConfig to reflect changes in the experiment directory + path_or_uri_obj: Union[Path, URI] = ( + URI(path_or_uri) if restoring_from_cloud else experiment_checkpoint_path + ) + # Infer the `storage_path` and run `name` of the restored run using the + # experiment directory. + # Ex: ~/ray_results/exp_name -> ~/ray_results, exp_name + # Ex: s3://bucket/exp_name -> s3://bucket, exp_name + self._run_config.name = path_or_uri_obj.name + self._run_config.storage_path = str(path_or_uri_obj.parent) + + # Set the experiment directory + if not restoring_from_cloud: + self._experiment_checkpoint_dir = local_experiment_checkpoint_dir + else: # If we synced, `experiment_checkpoint_dir` will contain a temporary # directory. Create an experiment checkpoint dir instead and move # our data there. @@ -361,6 +429,7 @@ def _restore_from_path_or_uri( shutil.rmtree(experiment_checkpoint_path) self._experiment_checkpoint_dir = str(new_exp_path) + # Load the experiment results at the point where it left off. try: self._experiment_analysis = ExperimentAnalysis( experiment_checkpoint_path=path_or_uri, @@ -370,6 +439,9 @@ def _restore_from_path_or_uri( except Exception: self._experiment_analysis = None + self._resume_config = resume_config + self._is_restored = True + def _maybe_sync_down_tuner_state(self, restore_path: str) -> Tuple[bool, str]: """Sync down trainable state from remote storage. @@ -377,14 +449,11 @@ def _maybe_sync_down_tuner_state(self, restore_path: str) -> Tuple[bool, str]: Tuple of (downloaded from remote, local_dir) """ if not is_non_local_path_uri(restore_path): - return False, os.path.expanduser(restore_path) + return False, os.path.abspath(os.path.expanduser(restore_path)) tempdir = Path(tempfile.mkdtemp("tmp_experiment_dir")) restore_uri = URI(restore_path) - download_from_uri( - str(restore_uri / _TRAINABLE_PKL), str(tempdir / _TRAINABLE_PKL) - ) download_from_uri(str(restore_uri / _TUNER_PKL), str(tempdir / _TUNER_PKL)) return True, str(tempdir) @@ -476,13 +545,26 @@ def trainable(self, trainable: TrainableTypeOrTrainer): self._converted_trainable = self._convert_trainable(trainable) @property - def param_space(self) -> Dict[str, Any]: + def param_space(self) -> Optional[Dict[str, Any]]: return self._param_space @param_space.setter - def param_space(self, param_space: Dict[str, Any]): + def param_space(self, param_space: Optional[Dict[str, Any]]): + # Handle any configs that adhere to the `to_dict` interface. + # Ex: AlgorithmConfig from RLlib + if isinstance(param_space, _Config): + param_space = param_space.to_dict() + + if not isinstance(param_space, dict) and param_space is not None: + raise ValueError( + "The `param_space` passed to the `Tuner` must be a dict. " + f"Got '{type(param_space)}' instead." + ) + self._param_space = param_space - self._process_scaling_config() + + if param_space: + self._process_scaling_config() def _convert_trainable(self, trainable: TrainableTypeOrTrainer) -> TrainableType: """Converts an AIR Trainer to a Tune trainable and saves the converted @@ -628,9 +710,6 @@ def _fit_resume( self, trainable: TrainableType, param_space: Optional[Dict[str, Any]] ) -> ExperimentAnalysis: """Fitting for a restored Tuner.""" - if self._missing_params_error_message: - raise ValueError(self._missing_params_error_message) - resume = "AUTO" if self._resume_config: @@ -665,10 +744,24 @@ def __getstate__(self): state["_tuner_kwargs"] = state["_tuner_kwargs"].copy() state["_tuner_kwargs"].pop("_remote_string_queue", None) state.pop(_TRAINABLE_KEY, None) - state.pop(_CONVERTED_TRAINABLE_KEY, None) - state.pop(_PARAM_SPACE_KEY, None) + trainable = state.pop(_CONVERTED_TRAINABLE_KEY, None) + param_space = state.pop(_PARAM_SPACE_KEY, None) state.pop(_EXPERIMENT_ANALYSIS_KEY, None) + + state["__trainable_name"] = ( + Experiment.get_trainable_name(trainable) if trainable else None + ) + state["__flattened_param_space_keys"] = ( + sorted(flatten_dict(param_space).keys()) + if param_space is not None + else None + ) + return state def __setstate__(self, state): + # Make sure the magic metadata gets removed first. + state.pop("__flattened_param_space_keys", None) + state.pop("__trainable_name", None) + self.__dict__.update(state) diff --git a/python/ray/tune/tests/test_tuner_restore.py b/python/ray/tune/tests/test_tuner_restore.py index 034dfe916c05..fcc9fb566618 100644 --- a/python/ray/tune/tests/test_tuner_restore.py +++ b/python/ray/tune/tests/test_tuner_restore.py @@ -1,4 +1,5 @@ import json +import logging import os from pathlib import Path import shutil @@ -8,6 +9,7 @@ import pytest import ray +import ray.cloudpickle as ray_pickle from ray import tune from ray.air import ( Checkpoint, @@ -416,7 +418,6 @@ def _test_tuner_restore_from_cloud(tmpdir, configure_storage_path, storage_path) remote_contents = os.listdir(check_path / "exp_dir") assert "tuner.pkl" in remote_contents - assert "trainable.pkl" in remote_contents prev_cp = _find_newest_experiment_checkpoint(str(check_path / "exp_dir")) prev_lstat = os.lstat(prev_cp) @@ -429,7 +430,6 @@ def _test_tuner_restore_from_cloud(tmpdir, configure_storage_path, storage_path) assert results[0].metrics["_metric"] == 1 local_contents = os.listdir(tmpdir / "ray_results" / "exp_dir") assert "tuner.pkl" in local_contents - assert "trainable.pkl" in local_contents after_cp = _find_newest_experiment_checkpoint( str(tmpdir / "ray_results" / "exp_dir") @@ -595,7 +595,7 @@ def load_checkpoint(self, checkpoint_path): assert result.metrics["score"] == 2 -def test_restore_overwrite_trainable(ray_start_2_cpus, tmpdir, caplog): +def test_restore_overwrite_trainable(ray_start_2_cpus, tmpdir): """Test validation for trainable compatibility, when re-specifying a trainable on restore.""" @@ -633,7 +633,7 @@ def train_func_2(config): resume_errored=True, ) - # Can still change trainable code, but logs a warning + # Can technically change trainable code (not recommended!) def train_func_1(config): checkpoint = session.get_checkpoint() assert checkpoint and checkpoint.to_dict()["data"] == config["data"] @@ -707,8 +707,8 @@ def create_trainable_with_params(): fail_marker.unlink() tuner = Tuner.restore( str(tmp_path / exp_name), - resume_errored=True, trainable=create_trainable_with_params(), + resume_errored=True, ) results = tuner.fit() assert not results.errors @@ -1053,7 +1053,40 @@ def test_tuner_can_restore(tmp_path, upload_dir): assert not Tuner.can_restore(tmp_path / "new_exp") -def testParamSpaceOverwrite(tmp_path, monkeypatch): +def testParamSpaceOverwriteValidation(ray_start_4_cpus, tmp_path): + """Check that validation on restore fails if we try adding or removing + hyperparameters to the param_space.""" + name = "test_param_space_valid" + param_space = {"a": 1, "b": {"c": tune.choice([0, 1])}, "d": tune.uniform(0, 1)} + tuner = Tuner( + _train_fn_sometimes_failing, + param_space=param_space, + run_config=RunConfig(storage_path=str(tmp_path), name=name), + ) + tuner.fit() + + bad_param_spaces = [ + {}, + {"a": 1, "b": {}, "d": 2}, + {"a": 1, "b": {"c": 2, "e": 3}, "d": 4}, + ] + for bad_param_space in bad_param_spaces: + with pytest.raises(ValueError): + Tuner.restore( + str(tmp_path / name), + trainable=_train_fn_sometimes_failing, + param_space=bad_param_space, + ) + + # Should work with the original param space + Tuner.restore( + str(tmp_path / name), + trainable=_train_fn_sometimes_failing, + param_space=param_space, + ) + + +def testParamSpaceOverwrite(ray_start_4_cpus, tmp_path, monkeypatch): """Test that overwriting param space on restore propagates new refs to existing trials and newly generated trials.""" @@ -1133,6 +1166,18 @@ def train_fn(config): assert r.config["test2"].name in ["11", "12", "13", "14"] +def test_tuner_pkl_backwards_compatibility(tmp_path, caplog): + tuner_internal = Tuner( + _train_fn_sometimes_failing, param_space={"a": 1} + )._local_tuner + with open(tmp_path / "tuner.pkl", "wb") as f: + ray_pickle.dump(tuner_internal, f) + + with caplog.at_level(logging.WARNING, "ray.tune.impl.tuner_internal"): + tuner_internal._load_tuner_state(tmp_path / "tuner.pkl") + assert "run with an older version of Ray" in caplog.text + + if __name__ == "__main__": import sys diff --git a/python/ray/tune/trainable/util.py b/python/ray/tune/trainable/util.py index cac898afb89c..ff26376e220f 100644 --- a/python/ray/tune/trainable/util.py +++ b/python/ray/tune/trainable/util.py @@ -338,35 +338,6 @@ def step(self): tune.with_parameters(MyTrainable, data=data), # ... ) - - .. note:: - When restoring a Tune experiment, you need to re-specify the trainable - wrapped with ``tune.with_parameters``. - The reasoning behind this is as follows: - - 1. ``tune.with_parameters`` stores parameters in the object store and - attaches object references to the trainable, but the objects they point to - may not exist anymore upon restoring in a new Ray cluster. - - 2. The attached objects could be arbitrarily large, so Tune does not save the - object data along with the trainable. - - To restore, Tune allows the trainable to be re-specified in - :meth:`Tuner.restore(path, trainable=...) `. - Continuing from the previous examples, here's an example of restoration: - - .. code-block:: python - - from ray.tune import Tuner - - data = HugeDataset(download=True) - - tuner = Tuner.restore( - "/path/to/experiment/", - trainable=tune.with_parameters(MyTrainable, data=data), - # ... - ) - """ from ray.tune.trainable import Trainable @@ -437,9 +408,6 @@ def _inner(config): trainable_with_params._resources = trainable._resources trainable_with_params.__name__ = trainable_name - - # Mark this trainable as being wrapped by saving the attached parameter names - trainable_with_params._attached_param_names = keys return trainable_with_params diff --git a/python/ray/tune/tuner.py b/python/ray/tune/tuner.py index 42b1c63235a1..daa88006f745 100644 --- a/python/ray/tune/tuner.py +++ b/python/ray/tune/tuner.py @@ -1,7 +1,6 @@ import logging from pathlib import Path from typing import Any, Callable, Dict, Optional, Type, Union, TYPE_CHECKING -import warnings import ray @@ -175,16 +174,10 @@ def __init__( def restore( cls, path: str, - trainable: Optional[ - Union[str, Callable, Type[Trainable], "BaseTrainer"] - ] = None, + trainable: Union[str, Callable, Type[Trainable], "BaseTrainer"], resume_unfinished: bool = True, resume_errored: bool = False, restart_errored: bool = False, - # Deprecated - overwrite_trainable: Optional[ - Union[str, Callable, Type[Trainable], "BaseTrainer"] - ] = None, param_space: Optional[Dict[str, Any]] = None, ) -> "Tuner": """Restores Tuner after a previously failed run. @@ -215,7 +208,6 @@ def restore( trainable: The trainable to use upon resuming the experiment. This should be the same trainable that was used to initialize the original Tuner. - NOTE: Starting in 2.5, this will be a required parameter. param_space: The same `param_space` that was passed to the original Tuner. This can be optionally re-specified due to the `param_space` potentially containing Ray object @@ -230,30 +222,12 @@ def restore( restore from their latest checkpoints. restart_errored: If True, will re-schedule errored trials but force restarting them from scratch (no checkpoint will be loaded). - overwrite_trainable: Deprecated. Use the `trainable` argument instead. """ # TODO(xwjiang): Add some comments to clarify the config behavior across # retored runs. # For example, is callbacks supposed to be automatically applied # when a Tuner is restored and fit again? - if overwrite_trainable: - if not trainable: - trainable = overwrite_trainable - warning_message = ( - "`overwrite_trainable` has been renamed to `trainable`. " - "The old argument will be removed starting from version 2.5." - ) - warnings.warn(warning_message, DeprecationWarning) - - if not trainable: - warning_message = ( - "Passing in the experiment's `trainable` will be a required argument " - "to `Tuner.restore` starting from version 2.5. " - "Please specify the trainable to avoid this warning." - ) - warnings.warn(warning_message) - resume_config = _ResumeConfig( resume_unfinished=resume_unfinished, resume_errored=resume_errored, From e06c2c5df45a756d28144d9576f87c63061cc639 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Thu, 4 May 2023 15:06:56 -0700 Subject: [PATCH 238/424] [docs] [data] Fix some data docs nits (#35049) --- doc/source/data/doc_code/transforming_data.py | 28 +++++++++---------- .../examples/nyc_taxi_basic_processing.ipynb | 4 +-- doc/source/data/examples/ocr_example.ipynb | 4 +-- doc/source/data/transforming-data.rst | 10 ++++--- 4 files changed, 24 insertions(+), 22 deletions(-) diff --git a/doc/source/data/doc_code/transforming_data.py b/doc/source/data/doc_code/transforming_data.py index 6a348ade11f3..da79efe09486 100644 --- a/doc/source/data/doc_code/transforming_data.py +++ b/doc/source/data/doc_code/transforming_data.py @@ -48,11 +48,11 @@ def to_lowercase(row: Dict[str, Any]) -> Dict[str, Any]: ds = ray.data.read_csv("example://iris.csv") -def numpy_transform(arr: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: - new_col = arr["sepal.length"] / np.max(arr["sepal.length"]) - arr["normalized.sepal.length"] = new_col - del arr["sepal.length"] - return arr +def numpy_transform(batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + new_col = batch["sepal.length"] / np.max(batch["sepal.length"]) + batch["normalized.sepal.length"] = new_col + del batch["sepal.length"] + return batch ds.map_batches(numpy_transform, batch_format="numpy").show(2) # -> {'sepal.width': 3.2, 'petal.length': 4.7, 'petal.width': 1.4, @@ -102,19 +102,19 @@ def pyarrow_transform(batch: pa.Table) -> pa.Table: # __datastream_compute_strategy_begin__ import ray -import pandas -import numpy +import pandas as pd +import numpy as np from ray.data import ActorPoolStrategy # Dummy model to predict Iris variety. -def predict_iris(df: pandas.DataFrame) -> pandas.DataFrame: +def predict_iris(df: pd.DataFrame) -> pd.DataFrame: conditions = [ (df["sepal.length"] < 5.0), (df["sepal.length"] >= 5.0) & (df["sepal.length"] < 6.0), (df["sepal.length"] >= 6.0) ] values = ["Setosa", "Versicolor", "Virginica"] - return pandas.DataFrame({"predicted_variety": numpy.select(conditions, values)}) + return pd.DataFrame({"predicted_variety": np.select(conditions, values)}) class IrisInferModel: # Do any expensive model setup in the __init__ function. @@ -122,7 +122,7 @@ def __init__(self): self._model = predict_iris # This method is called repeatedly by Ray Data to process batches. - def __call__(self, batch: pandas.DataFrame) -> pandas.DataFrame: + def __call__(self, batch: pd.DataFrame) -> pd.DataFrame: return self._model(batch) ds = ray.data.read_csv("example://iris.csv").repartition(10) @@ -166,8 +166,8 @@ def repeat_dataframe(df: pd.DataFrame) -> Iterator[pd.DataFrame]: # Repartition the data into 200 blocks, and force a full data shuffle. # This operation will be more expensive -ds = ds.repartition(200, shuffle=True) -# -> MaterializedDatastream(num_blocks=50, num_rows=10000, schema={id: int64}) +ds = ds.repartition(200, shuffle=True).materialize() +# -> MaterializedDatastream(num_blocks=200, num_rows=10000, schema={id: int64}) # __shuffle_end__ # __map_groups_begin__ @@ -181,7 +181,7 @@ def repeat_dataframe(df: pd.DataFrame) -> Iterator[pd.DataFrame]: # The user function signature for `map_groups` is the same as that of `map_batches`. # It takes in a batch representing the grouped data, and must return a batch of # zero or more records as the result. -def process_group(group: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: +def custom_count(group: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: # Since we are grouping by variety, all elements in this batch are equal. variety = group["variety"][0] count = len(group["variety"]) @@ -191,7 +191,7 @@ def process_group(group: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: "count": np.array([count]), } -ds = ds.groupby("variety").map_groups(process_group) +ds = ds.groupby("variety").map_groups(custom_count) ds.show() # -> {'variety': 'Setosa', 'count': 50} # {'variety': 'Versicolor', 'count': 50} diff --git a/doc/source/data/examples/nyc_taxi_basic_processing.ipynb b/doc/source/data/examples/nyc_taxi_basic_processing.ipynb index 797bfae5b7b4..a8cbf4348e2b 100644 --- a/doc/source/data/examples/nyc_taxi_basic_processing.ipynb +++ b/doc/source/data/examples/nyc_taxi_basic_processing.ipynb @@ -912,7 +912,7 @@ } ], "source": [ - "ds.map_batches(BatchInferModel, batch_size=2048, compute=\"actors\").take()" + "ds.map_batches(BatchInferModel, batch_size=2048, compute=ray.data.ActorPoolStrategy()).take()" ] }, { @@ -974,7 +974,7 @@ " BatchInferModel,\n", " batch_size=256,\n", " #num_gpus=1, # Uncomment this to run this on GPUs!\n", - " compute=\"actors\",\n", + " compute=ray.data.ActorPoolStrategy(),\n", ").take()" ] }, diff --git a/doc/source/data/examples/ocr_example.ipynb b/doc/source/data/examples/ocr_example.ipynb index 230501b6c399..6f12fb052583 100644 --- a/doc/source/data/examples/ocr_example.ipynb +++ b/doc/source/data/examples/ocr_example.ipynb @@ -251,7 +251,7 @@ " df[\"score\"] = [doc._.language[\"score\"] for doc in docs]\n", " return df\n", "\n", - "results.limit(10).map_batches(SpacyBatchInference, compute=\"actors\")" + "results.limit(10).map_batches(SpacyBatchInference, compute=ray.data.ActorPoolStrategy())" ] }, { @@ -269,7 +269,7 @@ "metadata": {}, "outputs": [], "source": [ - "languages = results.map_batches(SpacyBatchInference, compute=\"actors\")\n", + "languages = results.map_batches(SpacyBatchInference, compute=ray.data.ActorPoolStrategy())\n", "languages.groupby(\"language\").count().show()" ] }, diff --git a/doc/source/data/transforming-data.rst b/doc/source/data/transforming-data.rst index a3af026c5f7e..d3b3a5bfda62 100644 --- a/doc/source/data/transforming-data.rst +++ b/doc/source/data/transforming-data.rst @@ -74,12 +74,14 @@ Use ``map_batches`` to efficiently transform records in batches, or ``map`` to t :start-after: __map_begin__ :end-before: __map_end__ -Configuring resources -===================== +Configuring CPUs and GPUs +========================= By default, each task used for (e.g., `map` or `map_batches`) requests 1 CPU from Ray. To increase the resources reserved per task, you can increase the CPU request by specifying -``.map_batches(..., num_cpus=)``, which will instead reserve ``N`` CPUs per task: +``.map_batches(..., num_cpus=)``, which will instead reserve ``N`` CPUs per task. +Increasing the CPUs per task can help with avoiding out of memory (OOM) errors +for resource intensive tasks. .. code-block:: python @@ -273,7 +275,7 @@ Note that Ray Data currently only supports grouping by a single column. In order Map Groups ========== -Arbitrary processing can be applied to each group of records using :meth:`ds.groupby().map_groups() `. For example, this could be used to implement custom aggregations, train a model per group, etc. +Custom processing can be applied to each group of records using :meth:`ds.groupby().map_groups() `. For example, this could be used to implement custom aggregations, train a model per group, etc. .. literalinclude:: ./doc_code/transforming_data.py :language: python From 71240481a3d87e16a06d85d44d4186683833309a Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Fri, 5 May 2023 06:35:50 +0800 Subject: [PATCH 239/424] [core][dashboard] [no_early_kickoff] Make dashboard address connectable from remote nodes when not set to 127.0.0.1 (localhost) (#35027) Why are these changes needed? This PR changes how dashboard host ip in the cluster is made available through internal kv. This PR sets the dashboard url to node's ip address if it's not using localhost. This is needed such that state api works across clusters. Signed-off-by: rickyyx --- dashboard/head.py | 11 ++++++++- .../modules/job/tests/test_http_job_server.py | 1 - dashboard/tests/test_dashboard.py | 24 +++++++++++++++---- python/ray/_private/services.py | 4 +++- python/ray/scripts/scripts.py | 4 ++-- 5 files changed, 35 insertions(+), 9 deletions(-) diff --git a/dashboard/head.py b/dashboard/head.py index acf70961f969..85de8854639e 100644 --- a/dashboard/head.py +++ b/dashboard/head.py @@ -304,12 +304,21 @@ async def _async_notify(): logger.info("Initialize the http server.") self.http_server = await self._configure_http_server(modules) http_host, http_port = self.http_server.get_address() + logger.info(f"http server initialized at {http_host}:{http_port}") else: logger.info("http server disabled.") + + # We need to expose dashboard's node's ip for other worker nodes + # if it's listening to all interfaces. + dashboard_http_host = ( + self.ip + if self.http_host != ray_constants.DEFAULT_DASHBOARD_IP + else http_host + ) await asyncio.gather( self.gcs_aio_client.internal_kv_put( ray_constants.DASHBOARD_ADDRESS.encode(), - f"{http_host}:{http_port}".encode(), + f"{dashboard_http_host}:{http_port}".encode(), True, namespace=ray_constants.KV_NAMESPACE_DASHBOARD, ), diff --git a/dashboard/modules/job/tests/test_http_job_server.py b/dashboard/modules/job/tests/test_http_job_server.py index 4b4c1b5378a5..0351a9f7773d 100644 --- a/dashboard/modules/job/tests/test_http_job_server.py +++ b/dashboard/modules/job/tests/test_http_job_server.py @@ -610,7 +610,6 @@ def test_version_endpoint(job_sdk_client): def test_request_headers(job_sdk_client): client = job_sdk_client - with patch("requests.request") as mock_request: _ = client._do_request( "POST", diff --git a/dashboard/tests/test_dashboard.py b/dashboard/tests/test_dashboard.py index bb0a3b2e4dce..3b8937fd7956 100644 --- a/dashboard/tests/test_dashboard.py +++ b/dashboard/tests/test_dashboard.py @@ -277,22 +277,38 @@ def test_agent_report_unexpected_raylet_death_large_file(shutdown_only): "ray_start_with_dashboard", [ {"dashboard_host": "127.0.0.1"}, + {"dashboard_host": "localhost"}, + ], + indirect=True, +) +def test_dashboard_address_local(ray_start_with_dashboard): + webui_url = ray_start_with_dashboard["webui_url"] + if os.environ.get("RAY_MINIMAL") == "1": + # In the minimal installation, webui url shouldn't be configured. + assert webui_url == "" + else: + webui_ip = webui_url.split(":")[0] + assert not ipaddress.ip_address(webui_ip).is_unspecified + assert webui_ip == "127.0.0.1" + + +@pytest.mark.parametrize( + "ray_start_with_dashboard", + [ {"dashboard_host": "0.0.0.0"}, {"dashboard_host": "::"}, ], indirect=True, ) -def test_dashboard_address(ray_start_with_dashboard): +def test_dashboard_address_global(ray_start_with_dashboard): webui_url = ray_start_with_dashboard["webui_url"] if os.environ.get("RAY_MINIMAL") == "1": # In the minimal installation, webui url shouldn't be configured. assert webui_url == "" else: webui_ip = webui_url.split(":")[0] - print(ipaddress.ip_address(webui_ip)) - print(webui_ip) assert not ipaddress.ip_address(webui_ip).is_unspecified - assert webui_ip in ["127.0.0.1", ray_start_with_dashboard["node_ip_address"]] + assert webui_ip == ray_start_with_dashboard["node_ip_address"] @pytest.mark.skipif( diff --git a/python/ray/_private/services.py b/python/ray/_private/services.py index cc7d620f44cd..42cf2c3837ca 100644 --- a/python/ray/_private/services.py +++ b/python/ray/_private/services.py @@ -1084,7 +1084,9 @@ def start_api_server( no redirection should happen, then this should be None. Returns: - ProcessInfo for the process that was started. + A tuple of : + - Dashboard URL if dashboard enabled and started. + - ProcessInfo for the process that was started. """ try: # Make sure port is available. diff --git a/python/ray/scripts/scripts.py b/python/ray/scripts/scripts.py index d183522d1809..85bfea133c2b 100644 --- a/python/ray/scripts/scripts.py +++ b/python/ray/scripts/scripts.py @@ -404,10 +404,10 @@ def debug(address): @click.option( "--dashboard-host", required=False, - default="localhost", + default=ray_constants.DEFAULT_DASHBOARD_IP, help="the host to bind the dashboard server to, either localhost " "(127.0.0.1) or 0.0.0.0 (available from all interfaces). By default, this " - "is localhost.", + "is 127.0.0.1", ) @click.option( "--dashboard-port", From 25e4efc517d08b628aee2dde32f9019e72b3bc49 Mon Sep 17 00:00:00 2001 From: pomcho555 Date: Fri, 5 May 2023 07:59:59 +0900 Subject: [PATCH 240/424] [docs][aws] Add VolumeType to aws example (#34690) Why are these changes needed? I wanted to use different VolumeType of ec2 instance, but I didn't find any descripition inside Cluster YAML Configuration Options. That would be helpful for users like me. Related issue number #34570 Signed-off-by: pomcho555 --- python/ray/autoscaler/aws/example-full.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/python/ray/autoscaler/aws/example-full.yaml b/python/ray/autoscaler/aws/example-full.yaml index 49df110fc64c..18d7b88ef5a7 100644 --- a/python/ray/autoscaler/aws/example-full.yaml +++ b/python/ray/autoscaler/aws/example-full.yaml @@ -80,6 +80,7 @@ available_node_types: - DeviceName: /dev/sda1 Ebs: VolumeSize: 140 + VolumeType: gp3 # Additional options in the boto docs. ray.worker.default: # The minimum number of worker nodes of this type to launch. From ec1f808d010c62fc724e0277e0be12734f659c25 Mon Sep 17 00:00:00 2001 From: Cheng Su Date: Thu, 4 May 2023 16:33:51 -0700 Subject: [PATCH 241/424] Update data team code owners (#35058) Signed-off-by: Cheng Su --- .github/CODEOWNERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8bc22a8aadfb..0036840885d1 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -76,8 +76,8 @@ # ==== Libraries and frameworks ==== # Ray data. -/python/ray/data/ @ericl @scv119 @c21 @amogkam @scottjlee @bveeramani -/doc/source/data/ @ericl @scv119 @c21 @amogkam @scottjlee @bveeramani @maxpumperla @ray-project/ray-docs +/python/ray/data/ @ericl @scv119 @c21 @amogkam @scottjlee @bveeramani @raulchen +/doc/source/data/ @ericl @scv119 @c21 @amogkam @scottjlee @bveeramani @raulchen @maxpumperla @ray-project/ray-docs # Ray workflows. /python/ray/workflow/ @ericl @iycheng @stephanie-wang @suquark From cc9ddaa5a4ff7e0c0cca2af5f210956baf37beea Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Thu, 4 May 2023 17:01:19 -0700 Subject: [PATCH 242/424] [ci/bisect] allow sanity check to run multiple times per commit (#35043) Signed-off-by: Cuong Nguyen --- release/ray_release/scripts/ray_bisect.py | 19 +++++++++++-------- release/ray_release/tests/test_bisect.py | 21 +++++++++++++-------- 2 files changed, 24 insertions(+), 16 deletions(-) diff --git a/release/ray_release/scripts/ray_bisect.py b/release/ray_release/scripts/ray_bisect.py index 63e45f3eb09a..907cc57b24f2 100644 --- a/release/ray_release/scripts/ray_bisect.py +++ b/release/ray_release/scripts/ray_bisect.py @@ -48,7 +48,9 @@ def main( f"Concurrency input need to be a positive number, received: {concurrency}" ) test = _get_test(test_name) - pre_sanity_check = _sanity_check(test, passing_commit, failing_commit) + pre_sanity_check = _sanity_check( + test, passing_commit, failing_commit, run_per_commit + ) if not pre_sanity_check: logger.info( "Failed pre-saniy check, the test might be flaky or fail due to" @@ -93,7 +95,9 @@ def _bisect( return commit_list[-1] -def _sanity_check(test: Test, passing_revision: str, failing_revision: str) -> bool: +def _sanity_check( + test: Test, passing_revision: str, failing_revision: str, run_per_commit: int +) -> bool: """ Sanity check that the test indeed passes on the passing revision, and fails on the failing revision @@ -102,15 +106,14 @@ def _sanity_check(test: Test, passing_revision: str, failing_revision: str) -> b f"Sanity check passing revision: {passing_revision}" f" and failing revision: {failing_revision}" ) - outcomes = _run_test(test, [passing_revision, failing_revision]) - return ( - outcomes[passing_revision][0] == "passed" - and outcomes[failing_revision][0] != "passed" - ) + outcomes = _run_test(test, [passing_revision, failing_revision], run_per_commit) + if any(map(lambda x: x != "passed", outcomes[passing_revision].values())): + return False + return any(map(lambda x: x != "passed", outcomes[failing_revision].values())) def _run_test( - test: Test, commits: Set[str], run_per_commit: int = 1 + test: Test, commits: Set[str], run_per_commit: int ) -> Dict[str, Dict[int, str]]: logger.info(f'Running test {test["name"]} on commits {commits}') for commit in commits: diff --git a/release/ray_release/tests/test_bisect.py b/release/ray_release/tests/test_bisect.py index 8c75abf1bc9c..a00ab10a5cb9 100644 --- a/release/ray_release/tests/test_bisect.py +++ b/release/ray_release/tests/test_bisect.py @@ -1,26 +1,31 @@ import sys import pytest from unittest import mock -from typing import List, Dict +from typing import List, Set, Dict from ray_release.scripts.ray_bisect import _bisect, _obtain_test_result, _sanity_check from ray_release.config import Test def test_sanity_check(): - def _mock_run_test(test: Test, commit: List[str]) -> Dict[str, Dict[int, str]]: + def _mock_run_test( + test: Test, commit: Set[str], run_per_commit: int + ) -> Dict[str, Dict[int, str]]: return { - "passing_revision": {0: "passed"}, - "failing_revision": {0: "failed"}, + "passing_revision": {0: "passed", 1: "passed"}, + "failing_revision": {0: "failed", 1: "failed"}, + "flaky_revision": {0: "failed", 1: "passed"}, } with mock.patch( "ray_release.scripts.ray_bisect._run_test", side_effect=_mock_run_test, ): - assert _sanity_check({}, "passing_revision", "failing_revision") - assert not _sanity_check({}, "failing_revision", "passing_revision") - assert not _sanity_check({}, "passing_revision", "passing_revision") - assert not _sanity_check({}, "failing_revision", "failing_revision") + assert _sanity_check({}, "passing_revision", "failing_revision", 2) + assert _sanity_check({}, "passing_revision", "flaky_revision", 2) + assert not _sanity_check({}, "failing_revision", "passing_revision", 2) + assert not _sanity_check({}, "passing_revision", "passing_revision", 2) + assert not _sanity_check({}, "failing_revision", "failing_revision", 2) + assert not _sanity_check({}, "flaky_revision", "failing_revision", 2) def test_obtain_test_result(): From 191d43570f3149a6f13df4ef131a22d547e40843 Mon Sep 17 00:00:00 2001 From: Jiajun Yao Date: Thu, 4 May 2023 21:17:18 -0700 Subject: [PATCH 243/424] Add actor_id as an attribute of RayActorError when the actor constructor fails (#34958) When the actor constructor fails, RayActorError is raised and we should set the actor_id attribute so that people know which actor fails. Signed-off-by: Jiajun Yao --- python/ray/_raylet.pyx | 26 +++++++++++++++++++------ python/ray/exceptions.py | 7 ++++++- python/ray/tests/test_actor_advanced.py | 3 ++- python/ray/tests/test_actor_failures.py | 4 ++++ python/ray/tests/test_traceback.py | 14 ++++++++----- 5 files changed, 41 insertions(+), 13 deletions(-) diff --git a/python/ray/_raylet.pyx b/python/ray/_raylet.pyx index 867d0a0dba7c..6a09c94859e0 100644 --- a/python/ray/_raylet.pyx +++ b/python/ray/_raylet.pyx @@ -591,6 +591,7 @@ cdef store_task_errors( exc, task_exception, actor, + actor_id, function_name, CTaskType task_type, proctitle, @@ -611,15 +612,22 @@ cdef store_task_errors( # Generate the actor repr from the actor class. actor_repr = repr(actor) if actor else None + if actor_id is None or actor_id.is_nil(): + actor_id = None + else: + actor_id = actor_id.hex() + if isinstance(exc, RayTaskError): # Avoid recursive nesting of RayTaskError. failure_object = RayTaskError(function_name, backtrace, exc.cause, proctitle=proctitle, - actor_repr=actor_repr) + actor_repr=actor_repr, + actor_id=actor_id) else: failure_object = RayTaskError(function_name, backtrace, exc, proctitle=proctitle, - actor_repr=actor_repr) + actor_repr=actor_repr, + actor_id=actor_id) # Pass the failure object back to the CoreWorker. # We also cap the size of the error message to the last @@ -703,6 +711,7 @@ cdef execute_dynamic_generator_and_store_task_outputs( worker, error, False, # task_exception None, # actor + None, # actor id function_name, task_type, title, dynamic_returns, application_error) if num_errors_stored == 0: @@ -745,6 +754,7 @@ cdef void execute_task( worker = ray._private.worker.global_worker manager = worker.function_actor_manager actor = None + actor_id = None cdef: CoreWorker core_worker = worker.core_worker JobID job_id = core_worker.get_current_job_id() @@ -785,7 +795,8 @@ cdef void execute_task( print(task_name_magic_token, end="") print(task_name_magic_token, file=sys.stderr, end="") else: - actor = worker.actors[core_worker.get_actor_id()] + actor_id = core_worker.get_actor_id() + actor = worker.actors[actor_id] class_name = actor.__class__.__name__ next_title = f"ray::{class_name}" @@ -871,7 +882,8 @@ cdef void execute_task( args, kwargs = ray._private.signature.recover_args(args) if (task_type == TASK_TYPE_ACTOR_CREATION_TASK): - actor = worker.actors[core_worker.get_actor_id()] + actor_id = core_worker.get_actor_id() + actor = worker.actors[actor_id] class_name = actor.__class__.__name__ actor_title = f"{class_name}({args!r}, {kwargs!r})" core_worker.set_actor_title(actor_title.encode("utf-8")) @@ -1010,7 +1022,7 @@ cdef void execute_task( returns) except Exception as e: num_errors_stored = store_task_errors( - worker, e, task_exception, actor, function_name, + worker, e, task_exception, actor, actor_id, function_name, task_type, title, returns, application_error) if returns[0].size() > 0 and num_errors_stored == 0: logger.exception( @@ -1144,7 +1156,9 @@ cdef execute_task_with_cancellation_handler( # Task cancellation can happen anytime so we don't really need # to differentiate between mid-task or not. False, # task_exception - actor, execution_info.function_name, + actor, + actor_id, + execution_info.function_name, task_type, title, returns, # application_error: we are passing NULL since we don't want the # cancel tasks to fail. diff --git a/python/ray/exceptions.py b/python/ray/exceptions.py index 12f895af4f40..276acfd372c6 100644 --- a/python/ray/exceptions.py +++ b/python/ray/exceptions.py @@ -102,6 +102,7 @@ def __init__( pid=None, ip=None, actor_repr=None, + actor_id=None, ): """Initialize a RayTaskError.""" import ray @@ -119,6 +120,7 @@ def __init__( self.function_name = function_name self.traceback_str = traceback_str self.actor_repr = actor_repr + self._actor_id = actor_id # TODO(edoakes): should we handle non-serializable exception objects? self.cause = cause assert traceback_str is not None @@ -183,7 +185,9 @@ def __str__(self): f"(pid={self.pid}, ip={self.ip}" ) if self.actor_repr: - traceback_line += f", repr={self.actor_repr})" + traceback_line += ( + f", actor_id={self._actor_id}, repr={self.actor_repr})" + ) else: traceback_line += ")" code_from_internal_file = False @@ -273,6 +277,7 @@ def __init__(self, cause: Union[RayTaskError, ActorDiedErrorContext] = None): self.error_msg = self.base_error_msg elif isinstance(cause, RayTaskError): self._actor_init_failed = True + self.actor_id = cause._actor_id self.error_msg = ( "The actor died because of an error" " raised in its creation task, " diff --git a/python/ray/tests/test_actor_advanced.py b/python/ray/tests/test_actor_advanced.py index 5f18194be475..d90008242b82 100644 --- a/python/ray/tests/test_actor_advanced.py +++ b/python/ray/tests/test_actor_advanced.py @@ -768,8 +768,9 @@ def f(self): # Verify an exception is thrown. a = Actor.remote() - with pytest.raises(ray.exceptions.RayActorError): + with pytest.raises(ray.exceptions.RayActorError) as excinfo: ray.get(a.f.remote()) + assert excinfo.value.actor_id == a._actor_id.hex() # Test an actor can be restarted successfully # afte it dies in its constructor. diff --git a/python/ray/tests/test_actor_failures.py b/python/ray/tests/test_actor_failures.py index 7e82a4975156..fe499b2181d7 100644 --- a/python/ray/tests/test_actor_failures.py +++ b/python/ray/tests/test_actor_failures.py @@ -712,6 +712,7 @@ def create_actor(self): ray.exceptions.RayActorError, match="it was killed by `ray.kill" ) as exc_info: ray.get(a.check_alive.remote()) + assert exc_info.value.actor_id == a._actor_id.hex() print(exc_info._excinfo[1]) # Test actor killed because of worker failure. @@ -723,6 +724,7 @@ def create_actor(self): match=("The actor is dead because its worker process has died"), ) as exc_info: ray.get(a.check_alive.remote()) + assert exc_info.value.actor_id == a._actor_id.hex() print(exc_info._excinfo[1]) # Test acator killed because of owner failure. @@ -734,6 +736,7 @@ def create_actor(self): match="The actor is dead because its owner has died", ) as exc_info: ray.get(a.check_alive.remote()) + assert exc_info.value.actor_id == a._actor_id.hex() print(exc_info._excinfo[1]) # Test actor killed because the node is dead. @@ -746,6 +749,7 @@ def create_actor(self): match="The actor is dead because its node has died.", ) as exc_info: ray.get(a.check_alive.remote()) + assert exc_info.value.actor_id == a._actor_id.hex() print(exc_info._excinfo[1]) diff --git a/python/ray/tests/test_traceback.py b/python/ray/tests/test_traceback.py index f5383782b4e4..be9d309351ea 100644 --- a/python/ray/tests/test_traceback.py +++ b/python/ray/tests/test_traceback.py @@ -67,7 +67,7 @@ def clean_noqa(ex): ) def test_actor_creation_stacktrace(ray_start_regular): """Test the actor creation task stacktrace.""" - expected_output = """The actor died because of an error raised in its creation task, ray::A.__init__() (pid=XXX, ip=YYY, repr=ZZZ) # noqa + expected_output = """The actor died because of an error raised in its creation task, ray::A.__init__() (pid=XXX, ip=YYY, actor_id={actor_id}, repr=ZZZ) # noqa File "FILE", line ZZ, in __init__ g(3) File "FILE", line ZZ, in g @@ -85,12 +85,14 @@ def __init__(self): def ping(self): pass + a = A.remote() try: - a = A.remote() ray.get(a.ping.remote()) except RayActorError as ex: print(ex) - assert clean_noqa(expected_output) == scrub_traceback(str(ex)) + assert clean_noqa( + expected_output.format(actor_id=a._actor_id.hex()) + ) == scrub_traceback(str(ex)) @pytest.mark.skipif( @@ -128,7 +130,7 @@ def f(): ) def test_actor_task_stacktrace(ray_start_regular): """Test the actor task stacktrace.""" - expected_output = """ray::A.f() (pid=XXX, ip=YYY, repr=ZZZ) # noqa + expected_output = """ray::A.f() (pid=XXX, ip=YYY, actor_id={actor_id}, repr=ZZZ) # noqa File "FILE", line ZZ, in f return g(c) File "FILE", line ZZ, in g @@ -151,7 +153,9 @@ def f(self): ray.get(a.f.remote()) except ValueError as ex: print(ex) - assert clean_noqa(expected_output) == scrub_traceback(str(ex)) + assert clean_noqa( + expected_output.format(actor_id=a._actor_id.hex()) + ) == scrub_traceback(str(ex)) @pytest.mark.skipif( From a9eadca241e8ae7da95b2a813b173d2add48bf95 Mon Sep 17 00:00:00 2001 From: Yi Cheng <74173148+iycheng@users.noreply.github.com> Date: Thu, 4 May 2023 21:58:56 -0700 Subject: [PATCH 244/424] [core] Start the synchronization connection after receiving all nodes info. (#34645) There is a race condition between resource broadcasting and node registration. When the node is registered, it'll start the sync connection immediately. If the first message from the GCS is received before the Raylet got all node infos, it's going to drop the message from the node which it doesn't know. The newly started raylet will only see its local resource which might cause some problem if it's going to schedule some task around. This PR move the connection to the place after getting all nodes infos. --- src/ray/common/ray_syncer/ray_syncer.cc | 14 +++++ src/ray/common/ray_syncer/ray_syncer.h | 14 +++++ src/ray/common/test/ray_syncer_test.cc | 8 ++- src/ray/raylet/node_manager.cc | 82 ++++++++++++++----------- 4 files changed, 82 insertions(+), 36 deletions(-) diff --git a/src/ray/common/ray_syncer/ray_syncer.cc b/src/ray/common/ray_syncer/ray_syncer.cc index 1ebbb1793e23..8a76175e58f1 100644 --- a/src/ray/common/ray_syncer/ray_syncer.cc +++ b/src/ray/common/ray_syncer/ray_syncer.cc @@ -180,6 +180,20 @@ RaySyncer::~RaySyncer() { ""); } +std::shared_ptr RaySyncer::GetSyncMessage( + const std::string &node_id, MessageType message_type) const { + auto task = std::packaged_task()>( + [this, &node_id, message_type]() -> std::shared_ptr { + auto &view = node_state_->GetClusterView(); + if (auto iter = view.find(node_id); iter != view.end()) { + return iter->second[message_type]; + } + return nullptr; + }); + + return boost::asio::dispatch(io_context_.get_executor(), std::move(task)).get(); +} + std::vector RaySyncer::GetAllConnectedNodeIDs() const { std::promise> promise; io_context_.dispatch( diff --git a/src/ray/common/ray_syncer/ray_syncer.h b/src/ray/common/ray_syncer/ray_syncer.h index abe835c60e0a..0674ba704fcd 100644 --- a/src/ray/common/ray_syncer/ray_syncer.h +++ b/src/ray/common/ray_syncer/ray_syncer.h @@ -102,6 +102,16 @@ class RaySyncer { void Disconnect(const std::string &node_id); + /// Get the latest sync message sent from a specific node. + /// + /// \param node_id The node id where the message comes from. + /// \param message_type The message type of the component. + /// + /// \return The latest sync message sent from the node. If the node doesn't + /// have one, nullptr will be returned. + std::shared_ptr GetSyncMessage(const std::string &node_id, + MessageType message_type) const; + /// Register the components to the syncer module. Syncer will make sure eventually /// it'll have a global view of the cluster. /// @@ -129,6 +139,10 @@ class RaySyncer { /// version of message, false will be returned. bool OnDemandBroadcasting(MessageType message_type); + /// WARNING: DON'T USE THIS METHOD. It breaks the abstraction of the syncer. + /// Instead, register the component to the syncer and call + /// OnDemandBroadcasting. + /// /// Request trigger a broadcasting for a constructed message immediately instead of /// waiting for ray syncer to poll the message. /// diff --git a/src/ray/common/test/ray_syncer_test.cc b/src/ray/common/test/ray_syncer_test.cc index ae9391f1a6dc..0f9dc4643a36 100644 --- a/src/ray/common/test/ray_syncer_test.cc +++ b/src/ray/common/test/ray_syncer_test.cc @@ -593,7 +593,6 @@ TEST_F(SyncerTest, Broadcast) { // Change the resource in s2 and make sure s1 && s3 are correct s2.local_versions[0] = 1; - ASSERT_TRUE(s1.WaitUntil( [&s1, node_id = s2.syncer->GetLocalNodeID()]() mutable { return s1.received_versions[node_id][0] == 1; @@ -605,6 +604,13 @@ TEST_F(SyncerTest, Broadcast) { return s3.received_versions[node_id][0] == 1; }, 5)); + ASSERT_EQ( + 0, + s1.syncer->GetSyncMessage(s1.syncer->GetLocalNodeID(), MessageType::RESOURCE_VIEW) + ->version()); + ASSERT_EQ(nullptr, + s1.syncer->GetSyncMessage(NodeID::FromRandom().Binary(), + MessageType::RESOURCE_VIEW)); } bool CompareViews(const std::vector &servers, diff --git a/src/ray/raylet/node_manager.cc b/src/ray/raylet/node_manager.cc index 27176b467b87..4fd965c5b940 100644 --- a/src/ray/raylet/node_manager.cc +++ b/src/ray/raylet/node_manager.cc @@ -425,10 +425,44 @@ ray::Status NodeManager::RegisterGcs() { // If the node resource message is received first and then the node message is received, // ForwardTask will throw exception, because it can't get node info. - auto on_done = [](Status status) { RAY_CHECK_OK(status); }; + auto on_node_change_subscribe_done = [this](Status status) { + RAY_CHECK_OK(status); + + if (RayConfig::instance().use_ray_syncer()) { + // Register resource manager and scheduler + ray_syncer_.Register( + /* message_type */ syncer::MessageType::RESOURCE_VIEW, + /* reporter */ &cluster_resource_scheduler_->GetLocalResourceManager(), + /* receiver */ this, + /* pull_from_reporter_interval_ms */ + RayConfig::instance().raylet_report_resources_period_milliseconds()); + + // Register a commands channel. + // It's only used for GC right now. + ray_syncer_.Register( + /* message_type */ syncer::MessageType::COMMANDS, + /* reporter */ this, + /* receiver */ this, + /* pull_from_reporter_interval_ms */ 0); + + auto gcs_channel = gcs_client_->GetGcsRpcClient().GetChannel(); + ray_syncer_.Connect(kGCSNodeID.Binary(), gcs_channel); + periodical_runner_.RunFnPeriodically( + [this] { + auto triggered_by_global_gc = TryLocalGC(); + // If plasma store is under high pressure, we should try to schedule a global + // gc. + if (triggered_by_global_gc) { + ray_syncer_.OnDemandBroadcasting(syncer::MessageType::COMMANDS); + } + }, + RayConfig::instance().raylet_check_gc_period_milliseconds(), + "NodeManager.CheckGC"); + } + }; // Register a callback to monitor new nodes and a callback to monitor removed nodes. - RAY_RETURN_NOT_OK( - gcs_client_->Nodes().AsyncSubscribeToNodeChange(on_node_change, on_done)); + RAY_RETURN_NOT_OK(gcs_client_->Nodes().AsyncSubscribeToNodeChange( + on_node_change, on_node_change_subscribe_done)); // Subscribe to all unexpected failure notifications from the local and // remote raylets. Note that this does not include workers that failed due to @@ -493,38 +527,6 @@ ray::Status NodeManager::RegisterGcs() { event_stats_print_interval_ms, "NodeManager.deadline_timer.print_event_loop_stats"); } - - if (RayConfig::instance().use_ray_syncer()) { - // Register resource manager and scheduler - ray_syncer_.Register( - /* message_type */ syncer::MessageType::RESOURCE_VIEW, - /* reporter */ &cluster_resource_scheduler_->GetLocalResourceManager(), - /* receiver */ this, - /* pull_from_reporter_interval_ms */ - RayConfig::instance().raylet_report_resources_period_milliseconds()); - - // Register a commands channel. - // It's only used for GC right now. - ray_syncer_.Register( - /* message_type */ syncer::MessageType::COMMANDS, - /* reporter */ this, - /* receiver */ this, - /* pull_from_reporter_interval_ms */ 0); - - auto gcs_channel = gcs_client_->GetGcsRpcClient().GetChannel(); - ray_syncer_.Connect(kGCSNodeID.Binary(), gcs_channel); - periodical_runner_.RunFnPeriodically( - [this] { - auto triggered_by_global_gc = TryLocalGC(); - // If plasma store is under high pressure, we should try to schedule a global - // gc. - if (triggered_by_global_gc) { - ray_syncer_.OnDemandBroadcasting(syncer::MessageType::COMMANDS); - } - }, - RayConfig::instance().raylet_check_gc_period_milliseconds(), - "NodeManager.CheckGC"); - } // Raylet periodically check whether it's alive in GCS. // For failure cases, GCS might think this raylet dead, but this // raylet still think it's alive. This could happen when the cluster setup is wrong, @@ -991,6 +993,7 @@ void NodeManager::NodeAdded(const GcsNodeInfo &node_info) { [this, node_id]( Status status, const boost::optional &data) { + // TODO: Always use the message from ray syncer. if (data) { ResourceRequest resources; for (auto &resource_entry : *data) { @@ -1001,6 +1004,15 @@ void NodeManager::NodeAdded(const GcsNodeInfo &node_info) { cluster_task_manager_->ScheduleAndDispatchTasks(); } } + // Update the resource view if a new message has been sent. + if (RayConfig::instance().use_ray_syncer()) { + if (auto sync_msg = ray_syncer_.GetSyncMessage( + node_id.Binary(), syncer::MessageType::RESOURCE_VIEW)) { + if (sync_msg) { + ConsumeSyncMessage(sync_msg); + } + } + } })); } From bbb309da6837b9f29c7b956f4b57e8b898eaa35c Mon Sep 17 00:00:00 2001 From: Yi Cheng <74173148+iycheng@users.noreply.github.com> Date: Thu, 4 May 2023 22:01:16 -0700 Subject: [PATCH 245/424] Deflakey test advanced 9 (#34883) Previously a bug was fixed in [PR](https://github.com/ray-project/ray/pull/33311) where pubsub causes the leak. Somehow the fix has race conditions and got triggered later when code changes. The test is flakey because there is a race condition between raylet sending node failure and core worker exit itself. When disconnect is sent to Raylet, Raylet will start to report worker failure. But the worker still continue to run. GCS uses worker failure to close the connection. But if the worker is still alive, the worker might send another request the GCS which will lead to the FD leak. This PR did two improvements: - Move the heavy workload before sending disconnect request - Raylet will report worker failure if the socket is closed. --- .../serve/tests/test_controller_recovery.py | 6 ++- python/ray/tests/test_advanced_9.py | 2 +- python/ray/tests/test_failure_3.py | 13 ++++-- src/ray/common/client_connection.h | 6 +++ src/ray/core_worker/core_worker.cc | 8 ++-- src/ray/gcs/gcs_client/accessor.cc | 4 +- .../gcs/gcs_client/test/gcs_client_test.cc | 40 ------------------- src/ray/gcs/gcs_server/gcs_actor_manager.cc | 19 +++++---- src/ray/gcs/gcs_server/pubsub_handler.cc | 1 - src/ray/gcs/pb_util.h | 2 + src/ray/raylet/node_manager.cc | 24 ++++++++--- 11 files changed, 60 insertions(+), 65 deletions(-) diff --git a/python/ray/serve/tests/test_controller_recovery.py b/python/ray/serve/tests/test_controller_recovery.py index 77d262c26ee2..e3471c9b3ba5 100644 --- a/python/ray/serve/tests/test_controller_recovery.py +++ b/python/ray/serve/tests/test_controller_recovery.py @@ -238,8 +238,10 @@ def get_actor_info(name: str): _, controller1_pid = get_actor_info(SERVE_CONTROLLER_NAME) ray.kill(serve.context._global_client._controller, no_restart=False) # wait for controller is alive again - wait_for_condition(get_actor_info, name=SERVE_CONTROLLER_NAME) - assert controller1_pid != get_actor_info(SERVE_CONTROLLER_NAME)[1] + wait_for_condition( + lambda: get_actor_info(SERVE_CONTROLLER_NAME) is not None + and get_actor_info(SERVE_CONTROLLER_NAME)[1] != controller1_pid + ) # Let the actor proceed initialization ray.get(signal.send.remote()) diff --git a/python/ray/tests/test_advanced_9.py b/python/ray/tests/test_advanced_9.py index b61e5aac9216..accddc1b3164 100644 --- a/python/ray/tests/test_advanced_9.py +++ b/python/ray/tests/test_advanced_9.py @@ -258,7 +258,7 @@ def ready(self): run_string_as_driver(script.format(address=call_ray_start_2, val=2)) -@pytest.mark.skipif(sys.platform != "linux", reason="Only works on linux.") +@pytest.mark.skipif(sys.platform == "win32", reason="Not valid on win32.") def test_gcs_connection_no_leak(ray_start_cluster): cluster = ray_start_cluster head_node = cluster.add_node() diff --git a/python/ray/tests/test_failure_3.py b/python/ray/tests/test_failure_3.py index 926b7c76ec01..b666a9157e72 100644 --- a/python/ray/tests/test_failure_3.py +++ b/python/ray/tests/test_failure_3.py @@ -365,6 +365,8 @@ def test_no_worker_child_process_leaks(ray_start_cluster, tmp_path): the list of PIDs that are children of the Ray worker processes. """ + ray_start_cluster.add_node() + ray_start_cluster.wait_for_nodes() output_file_path = tmp_path / "leaked_pids.json" driver_script = f""" @@ -374,7 +376,7 @@ def test_no_worker_child_process_leaks(ray_start_cluster, tmp_path): import shutil import time import os - +ray.init("{ray_start_cluster.address}") @ray.remote class Actor: def create_leaked_child_process(self, num_to_leak): @@ -424,7 +426,6 @@ def task(): print(os.getpid()) time.sleep(1) """ - driver_proc = run_string_as_driver_nonblocking(driver_script) # Wait for the json file containing the child PIDS @@ -443,9 +444,15 @@ def task(): assert all([proc.status() == psutil.STATUS_SLEEPING for proc in processes]) # Valdiate children of worker process die after SIGINT. + def check(): + for proc in processes: + if proc.is_running(): + print(proc) + return all([not proc.is_running() for proc in processes]) + driver_proc.send_signal(signal.SIGINT) wait_for_condition( - condition_predictor=lambda: all([not proc.is_running() for proc in processes]), + condition_predictor=check, timeout=30, ) diff --git a/src/ray/common/client_connection.h b/src/ray/common/client_connection.h index 89d30fbbcdbc..9a86ffb808e8 100644 --- a/src/ray/common/client_connection.h +++ b/src/ray/common/client_connection.h @@ -125,6 +125,12 @@ class ServerConnection : public std::enable_shared_from_this { std::string DebugString() const; + void AsyncWaitTerminated(std::function callback) { + // Async wait until the connection is disconnected. + socket_.async_wait(local_stream_socket::wait_type::wait_error, + [callback = std::move(callback)](auto) { callback(); }); + } + protected: /// A private constructor for a server connection. ServerConnection(local_stream_socket &&socket); diff --git a/src/ray/core_worker/core_worker.cc b/src/ray/core_worker/core_worker.cc index af09bb383ed7..34f1ba1fc6cb 100644 --- a/src/ray/core_worker/core_worker.cc +++ b/src/ray/core_worker/core_worker.cc @@ -785,8 +785,9 @@ void CoreWorker::Exit( detail = std::move(detail), creation_task_exception_pb_bytes]() { rpc::DrainServerCallExecutor(); - Disconnect(exit_type, detail, creation_task_exception_pb_bytes); KillChildProcs(); + // Disconnect here after KillChildProcs to make the Raylet async wait shorter. + Disconnect(exit_type, detail, creation_task_exception_pb_bytes); Shutdown(); }, "CoreWorker.Shutdown"); @@ -830,10 +831,11 @@ void CoreWorker::ForceExit(const rpc::WorkerExitType exit_type, const std::string &detail) { RAY_LOG(WARNING) << "Force exit the process. " << " Details: " << detail; - Disconnect(exit_type, detail); - KillChildProcs(); + // Disconnect here before KillChildProcs to make the Raylet async wait shorter. + Disconnect(exit_type, detail); + // NOTE(hchen): Use `QuickExit()` to force-exit this process without doing cleanup. // `exit()` will destruct static objects in an incorrect order, which will lead to // core dumps. diff --git a/src/ray/gcs/gcs_client/accessor.cc b/src/ray/gcs/gcs_client/accessor.cc index ccb225a62931..358b3940e6dc 100644 --- a/src/ray/gcs/gcs_client/accessor.cc +++ b/src/ray/gcs/gcs_client/accessor.cc @@ -852,7 +852,9 @@ Status WorkerInfoAccessor::AsyncReportWorkerFailure( const std::shared_ptr &data_ptr, const StatusCallback &callback) { rpc::Address worker_address = data_ptr->worker_address(); - RAY_LOG(DEBUG) << "Reporting worker failure, " << worker_address.DebugString(); + RAY_LOG(DEBUG) << "Reporting worker failure, " << worker_address.DebugString() + << " WorkerID=" << WorkerID::FromBinary(worker_address.worker_id()) + << " NodeID=" << NodeID::FromBinary(worker_address.raylet_id()); rpc::ReportWorkerFailureRequest request; request.mutable_worker_failure()->CopyFrom(*data_ptr); client_impl_->GetGcsRpcClient().ReportWorkerFailure( diff --git a/src/ray/gcs/gcs_client/test/gcs_client_test.cc b/src/ray/gcs/gcs_client/test/gcs_client_test.cc index d3baeeb964d0..10325d448f5e 100644 --- a/src/ray/gcs/gcs_client/test/gcs_client_test.cc +++ b/src/ray/gcs/gcs_client/test/gcs_client_test.cc @@ -947,46 +947,6 @@ TEST_P(GcsClientTest, DISABLED_TestGetActorPerf) { << actor_count << " actors."; } -TEST_P(GcsClientTest, TestEvictExpiredDestroyedActors) { - // Restart doesn't work with in memory storage - if (RayConfig::instance().gcs_storage() == "memory") { - return; - } - // Register actors and the actors will be destroyed. - JobID job_id = JobID::FromInt(1); - AddJob(job_id); - absl::flat_hash_set actor_ids; - int actor_count = RayConfig::instance().maximum_gcs_destroyed_actor_cached_count(); - for (int index = 0; index < actor_count; ++index) { - auto actor_table_data = Mocker::GenActorTableData(job_id); - RegisterActor(actor_table_data, false); - actor_ids.insert(ActorID::FromBinary(actor_table_data->actor_id())); - } - - // Restart GCS. - RestartGcsServer(); - - for (int index = 0; index < actor_count; ++index) { - auto actor_table_data = Mocker::GenActorTableData(job_id); - RegisterActor(actor_table_data, false); - actor_ids.insert(ActorID::FromBinary(actor_table_data->actor_id())); - } - - // NOTE: GCS will not reply when actor registration fails, so when GCS restarts, gcs - // client will register the actor again and the status of the actor may be - // `DEPENDENCIES_UNREADY` or `DEAD`. We should get all dead actors. - auto condition = [this]() { - return GetAllActors(true).size() == - RayConfig::instance().maximum_gcs_destroyed_actor_cached_count(); - }; - EXPECT_TRUE(WaitForCondition(condition, timeout_ms_.count())); - - auto actors = GetAllActors(true); - for (const auto &actor : actors) { - EXPECT_TRUE(actor_ids.contains(ActorID::FromBinary(actor.actor_id()))); - } -} - TEST_P(GcsClientTest, TestEvictExpiredDeadNodes) { // Restart GCS. RestartGcsServer(); diff --git a/src/ray/gcs/gcs_server/gcs_actor_manager.cc b/src/ray/gcs/gcs_server/gcs_actor_manager.cc index ee328510ea82..e6a347ddebe1 100644 --- a/src/ray/gcs/gcs_server/gcs_actor_manager.cc +++ b/src/ray/gcs/gcs_server/gcs_actor_manager.cc @@ -761,7 +761,8 @@ void GcsActorManager::PollOwnerForActorOutOfScope( auto it = workers.find(owner_id); if (it == workers.end()) { RAY_LOG(DEBUG) << "Adding owner " << owner_id << " of actor " << actor_id - << ", job id = " << actor_id.JobId(); + << ", job id = " << actor_id.JobId() + << " owner node id = " << owner_node_id; std::shared_ptr client = worker_client_factory_(actor->GetOwnerAddress()); it = workers.emplace(owner_id, Owner(std::move(client))).first; @@ -776,14 +777,15 @@ void GcsActorManager::PollOwnerForActorOutOfScope( [this, owner_node_id, owner_id, actor_id]( Status status, const rpc::WaitForActorOutOfScopeReply &reply) { if (!status.ok()) { - RAY_LOG(INFO) << "Worker " << owner_id - << " failed, destroying actor child, job id = " - << actor_id.JobId(); - } else { - RAY_LOG(INFO) << "Actor " << actor_id - << " is out of scope, destroying actor, job id = " - << actor_id.JobId(); + RAY_LOG(WARNING) << "Failed to wait for actor " << actor_id + << " out of scope, job id = " << actor_id.JobId() + << ", error: " << status.ToString(); + // TODO(iycheng): Retry it in other PR. + return; } + RAY_LOG(INFO) << "Actor " << actor_id + << " is out of scope, destroying actor, job id = " + << actor_id.JobId(); auto node_it = owners_.find(owner_node_id); if (node_it != owners_.end() && node_it->second.count(owner_id)) { @@ -957,6 +959,7 @@ void GcsActorManager::OnWorkerDead(const ray::NodeID &node_id, bool need_reconstruct = disconnect_type != rpc::WorkerExitType::INTENDED_USER_EXIT && disconnect_type != rpc::WorkerExitType::USER_ERROR; + // Destroy all actors that are owned by this worker. const auto it = owners_.find(node_id); if (it != owners_.end() && it->second.count(worker_id)) { diff --git a/src/ray/gcs/gcs_server/pubsub_handler.cc b/src/ray/gcs/gcs_server/pubsub_handler.cc index cf34b4f1e8a6..cf1417b35220 100644 --- a/src/ray/gcs/gcs_server/pubsub_handler.cc +++ b/src/ray/gcs/gcs_server/pubsub_handler.cc @@ -104,7 +104,6 @@ void InternalPubSubHandler::HandleGcsSubscriberCommandBatch( if (sender_id.empty()) { sender_id = request.subscriber_id(); } - auto iter = sender_to_subscribers_.find(sender_id); if (iter == sender_to_subscribers_.end()) { iter = sender_to_subscribers_.insert({sender_id, {}}).first; diff --git a/src/ray/gcs/pb_util.h b/src/ray/gcs/pb_util.h index 7f99aa35924d..7aa91e6538da 100644 --- a/src/ray/gcs/pb_util.h +++ b/src/ray/gcs/pb_util.h @@ -107,6 +107,7 @@ inline std::shared_ptr CreateActorTableData( /// Helper function to produce worker failure data. inline std::shared_ptr CreateWorkerFailureData( + const NodeID &node_id, const WorkerID &worker_id, int64_t timestamp, rpc::WorkerExitType disconnect_type, @@ -117,6 +118,7 @@ inline std::shared_ptr CreateWorkerFailureData( // Only report the worker id + delta (new data upon worker failures). // GCS will merge the data with original worker data. worker_failure_info_ptr->mutable_worker_address()->set_worker_id(worker_id.Binary()); + worker_failure_info_ptr->mutable_worker_address()->set_raylet_id(node_id.Binary()); worker_failure_info_ptr->set_timestamp(timestamp); worker_failure_info_ptr->set_exit_type(disconnect_type); worker_failure_info_ptr->set_exit_detail(disconnect_detail); diff --git a/src/ray/raylet/node_manager.cc b/src/ray/raylet/node_manager.cc index 4fd965c5b940..4895222e52c8 100644 --- a/src/ray/raylet/node_manager.cc +++ b/src/ray/raylet/node_manager.cc @@ -1480,15 +1480,13 @@ void NodeManager::DisconnectClient(const std::shared_ptr &clie } // Publish the worker failure. auto worker_failure_data_ptr = - gcs::CreateWorkerFailureData(worker->WorkerId(), + gcs::CreateWorkerFailureData(self_node_id_, + worker->WorkerId(), time(nullptr), disconnect_type, disconnect_detail, worker->GetProcess().GetId(), creation_task_exception); - RAY_CHECK_OK( - gcs_client_->Workers().AsyncReportWorkerFailure(worker_failure_data_ptr, nullptr)); - if (is_worker) { const ActorID &actor_id = worker->GetActorId(); const TaskID &task_id = worker->GetAssignedTaskId(); @@ -1563,9 +1561,23 @@ void NodeManager::DisconnectClient(const std::shared_ptr &clie local_task_manager_->ClearWorkerBacklog(worker->WorkerId()); cluster_task_manager_->CancelTaskForOwner(worker->GetAssignedTaskId()); - +#ifdef _WIN32 + // On Windows, when the worker is killed, client async wait won't get notified + // somehow. + RAY_CHECK_OK( + gcs_client_->Workers().AsyncReportWorkerFailure(worker_failure_data_ptr, nullptr)); client->Close(); - +#else + // ReportWorkerFailure should happen after the worker exit completely. + // A better way is to monitor the pid exit. But that needs Process.h + // support async operation. + // Here we monitor the socket to achieve similar result. + // When the worker exited, the pid will be disconnected (local stream socket). + client->AsyncWaitTerminated([client, worker_failure_data_ptr, this] { + RAY_CHECK_OK(gcs_client_->Workers().AsyncReportWorkerFailure(worker_failure_data_ptr, + nullptr)); + }); +#endif // TODO(rkn): Tell the object manager that this client has disconnected so // that it can clean up the wait requests for this client. Currently I think // these can be leaked. From a4eaa7cd435a636082b60c12118c60f22e02a85c Mon Sep 17 00:00:00 2001 From: Jiajun Yao Date: Thu, 4 May 2023 23:52:56 -0700 Subject: [PATCH 246/424] [release-test] Stop using spot instance for chaos tests (#35063) Signed-off-by: Jiajun Yao --- release/nightly_tests/chaos_test/compute_template.yaml | 2 +- release/nightly_tests/chaos_test/compute_template_gce.yaml | 2 +- .../dask_on_ray/chaos_dask_on_ray_stress_compute.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/release/nightly_tests/chaos_test/compute_template.yaml b/release/nightly_tests/chaos_test/compute_template.yaml index c1319d3c5660..925f482cdf12 100644 --- a/release/nightly_tests/chaos_test/compute_template.yaml +++ b/release/nightly_tests/chaos_test/compute_template.yaml @@ -15,7 +15,7 @@ worker_node_types: instance_type: m5.4xlarge min_workers: 9 max_workers: 9 - use_spot: true + use_spot: false resources: custom_resources: worker: 1 diff --git a/release/nightly_tests/chaos_test/compute_template_gce.yaml b/release/nightly_tests/chaos_test/compute_template_gce.yaml index af329090a4d3..55d4fd840415 100644 --- a/release/nightly_tests/chaos_test/compute_template_gce.yaml +++ b/release/nightly_tests/chaos_test/compute_template_gce.yaml @@ -16,7 +16,7 @@ worker_node_types: instance_type: n2-standard-16 min_workers: 9 max_workers: 9 - use_spot: true + use_spot: false resources: custom_resources: worker: 1 diff --git a/release/nightly_tests/dask_on_ray/chaos_dask_on_ray_stress_compute.yaml b/release/nightly_tests/dask_on_ray/chaos_dask_on_ray_stress_compute.yaml index fad7750e2f41..e249486f0377 100644 --- a/release/nightly_tests/dask_on_ray/chaos_dask_on_ray_stress_compute.yaml +++ b/release/nightly_tests/dask_on_ray/chaos_dask_on_ray_stress_compute.yaml @@ -17,4 +17,4 @@ worker_node_types: instance_type: m6i.8xlarge min_workers: 20 max_workers: 20 - use_spot: true + use_spot: false From 04994dca347a5070326dde048b386674282035ec Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Fri, 5 May 2023 09:43:22 +0200 Subject: [PATCH 247/424] [docs] ssl bug fix (#35067) --- doc/requirements-doc.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/requirements-doc.txt b/doc/requirements-doc.txt index 3212c1ef2c9b..d1792bb49b20 100644 --- a/doc/requirements-doc.txt +++ b/doc/requirements-doc.txt @@ -71,3 +71,6 @@ myst-nb==0.13.1 # Jupyter conversion jupytext==1.13.6 + +# Pin urllib to avoid downstream ssl incompatibility issues +urllib3 < 1.27 \ No newline at end of file From 2baa63f61c9aa6150b4fed1694114567f4c4ea26 Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Fri, 5 May 2023 20:22:32 +0800 Subject: [PATCH 248/424] [core][state] Push down filtering to GCS for listing/getting task from state api (#34433) Similar to #34348 This pushes down the below filters to GCS (source-side) filtering. actor_id task id task name job id --- dashboard/state_aggregator.py | 8 +- .../ray/experimental/state/state_manager.py | 40 +++++++-- python/ray/tests/test_state_api.py | 10 ++- src/ray/gcs/gcs_server/gcs_task_manager.cc | 40 ++++++--- .../gcs_server/test/gcs_task_manager_test.cc | 84 +++++++++++++++++-- src/ray/protobuf/gcs_service.proto | 21 +++-- 6 files changed, 163 insertions(+), 40 deletions(-) diff --git a/dashboard/state_aggregator.py b/dashboard/state_aggregator.py index e4e38c9f323b..b7cfd20b5c9c 100644 --- a/dashboard/state_aggregator.py +++ b/dashboard/state_aggregator.py @@ -377,16 +377,10 @@ async def list_tasks(self, *, option: ListApiOptions) -> ListApiResponse: {task_id -> task_data_in_dict} task_data_in_dict's schema is in TaskState """ - job_id = None - for filter in option.filters: - if filter[0] == "job_id" and filter[1] == "=": - # Filtering by job_id == xxxx, pass it to source side filtering. - # tuple consists of (job_id, predicate, value) - job_id = filter[2] try: reply = await self._client.get_all_task_info( timeout=option.timeout, - job_id=job_id, + filters=option.filters, exclude_driver=option.exclude_driver, ) except DataSourceUnavailable: diff --git a/python/ray/experimental/state/state_manager.py b/python/ray/experimental/state/state_manager.py index 11ea98b89c4c..19e1fa318e38 100644 --- a/python/ray/experimental/state/state_manager.py +++ b/python/ray/experimental/state/state_manager.py @@ -12,7 +12,7 @@ from ray._private import ray_constants from ray._private.gcs_utils import GcsAioClient from ray._private.utils import hex_to_binary -from ray._raylet import ActorID, JobID +from ray._raylet import ActorID, JobID, TaskID from ray.core.generated import gcs_service_pb2_grpc from ray.core.generated.gcs_pb2 import ActorTableData from ray.core.generated.gcs_service_pb2 import ( @@ -262,16 +262,40 @@ async def get_all_task_info( self, timeout: int = None, limit: int = None, - job_id: Optional[str] = None, - exclude_driver: bool = True, + filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, + exclude_driver: bool = False, ) -> Optional[GetTaskEventsReply]: if not limit: limit = RAY_MAX_LIMIT_FROM_DATA_SOURCE - if job_id: - job_id = JobID(hex_to_binary(job_id)).binary() - request = GetTaskEventsRequest( - limit=limit, exclude_driver=exclude_driver, job_id=job_id - ) + + if filters is None: + filters = [] + + req_filters = GetTaskEventsRequest.Filters() + for filter in filters: + key, predicate, value = filter + if predicate != "=": + # We only support EQUAL predicate for source side filtering. + continue + + if key == "actor_id": + req_filters.actor_id = ActorID(hex_to_binary(value)).binary() + elif key == "job_id": + req_filters.job_id = JobID(hex_to_binary(value)).binary() + elif key == "name": + req_filters.name = value + elif key == "task_id": + req_filters.task_ids.append(TaskID(hex_to_binary(value)).binary()) + else: + continue + + # Remove the filter from the list so that we don't have to + # filter it again later. + filters.remove(filter) + + req_filters.exclude_driver = exclude_driver + + request = GetTaskEventsRequest(limit=limit, filters=req_filters) reply = await self._gcs_task_info_stub.GetTaskEvents(request, timeout=timeout) return reply diff --git a/python/ray/tests/test_state_api.py b/python/ray/tests/test_state_api.py index 64882421c856..38d097ae5d9c 100644 --- a/python/ray/tests/test_state_api.py +++ b/python/ray/tests/test_state_api.py @@ -2282,7 +2282,7 @@ def g(dep): def impossible(): pass - out = [f.remote() for _ in range(2)] # noqa + out = [f.options(name=f"f_{i}").remote() for i in range(2)] # noqa g_out = g.remote(f.remote()) # noqa im = impossible.remote() # noqa @@ -2350,6 +2350,9 @@ def verify(): for task in tasks: assert task["job_id"] == job_id + tasks = list_tasks(filters=[("name", "=", "f_0")]) + assert len(tasks) == 1 + return True wait_for_condition(verify) @@ -2540,7 +2543,6 @@ def verify(): for task in tasks: assert task["job_id"] == job_id for task in tasks: - print(task) assert task["actor_id"] == actor_id # Actor.__init__: 1 finished # Actor.call: 1 running, 9 waiting for execution (queued). @@ -2590,6 +2592,10 @@ def verify(): == 1 ) + # Filters with actor id. + assert len(list_tasks(filters=[("actor_id", "=", actor_id)])) == 11 + assert len(list_tasks(filters=[("actor_id", "!=", actor_id)])) == 0 + return True wait_for_condition(verify) diff --git a/src/ray/gcs/gcs_server/gcs_task_manager.cc b/src/ray/gcs/gcs_server/gcs_task_manager.cc index 6771e042bb24..e733856b8ee5 100644 --- a/src/ray/gcs/gcs_server/gcs_task_manager.cc +++ b/src/ray/gcs/gcs_server/gcs_task_manager.cc @@ -313,16 +313,17 @@ void GcsTaskManager::HandleGetTaskEvents(rpc::GetTaskEventsRequest request, rpc::SendReplyCallback send_reply_callback) { RAY_LOG(DEBUG) << "Getting task status:" << request.ShortDebugString(); - // Select candidate events by indexing. + // Select candidate events by indexing if possible. std::vector task_events; - if (request.has_task_ids()) { + const auto &filters = request.filters(); + if (filters.task_ids_size() > 0) { absl::flat_hash_set task_ids; - for (const auto &task_id_str : request.task_ids().vals()) { + for (const auto &task_id_str : filters.task_ids()) { task_ids.insert(TaskID::FromBinary(task_id_str)); } task_events = task_event_storage_->GetTaskEvents(task_ids); - } else if (request.has_job_id()) { - task_events = task_event_storage_->GetTaskEvents(JobID::FromBinary(request.job_id())); + } else if (filters.has_job_id()) { + task_events = task_event_storage_->GetTaskEvents(JobID::FromBinary(filters.job_id())); } else { task_events = task_event_storage_->GetTaskEvents(); } @@ -334,15 +335,34 @@ void GcsTaskManager::HandleGetTaskEvents(rpc::GetTaskEventsRequest request, int32_t num_profile_event_limit = 0; int32_t num_status_event_limit = 0; - for (auto itr = task_events.rbegin(); itr != task_events.rend(); ++itr) { - auto &task_event = *itr; + // A lambda filter fn, where it returns true for task events to be included in the + // result. Task ids and job ids are already filtered by the storage with indexing above. + auto filter_fn = [&filters](const rpc::TaskEvents &task_event) { if (!task_event.has_task_info()) { // Skip task events w/o task info. - continue; + return false; } - - if (request.exclude_driver() && + if (filters.exclude_driver() && task_event.task_info().type() == rpc::TaskType::DRIVER_TASK) { + return false; + } + + if (filters.has_actor_id() && task_event.task_info().has_actor_id() && + ActorID::FromBinary(task_event.task_info().actor_id()) != + ActorID::FromBinary(filters.actor_id())) { + return false; + } + + if (filters.has_name() && task_event.task_info().name() != filters.name()) { + return false; + } + + return true; + }; + + for (auto itr = task_events.rbegin(); itr != task_events.rend(); ++itr) { + auto &task_event = *itr; + if (!filter_fn(task_event)) { continue; } diff --git a/src/ray/gcs/gcs_server/test/gcs_task_manager_test.cc b/src/ray/gcs/gcs_server/test/gcs_task_manager_test.cc index 91070fe1cf35..d60ea97f100f 100644 --- a/src/ray/gcs/gcs_server/test/gcs_task_manager_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_task_manager_test.cc @@ -115,26 +115,36 @@ class GcsTaskManagerTest : public ::testing::Test { rpc::GetTaskEventsReply SyncGetTaskEvents(absl::flat_hash_set task_ids, absl::optional job_id = absl::nullopt, int64_t limit = -1, - bool exclude_driver = true) { + bool exclude_driver = true, + const std::string &name = "", + const ActorID &actor_id = ActorID::Nil()) { rpc::GetTaskEventsRequest request; rpc::GetTaskEventsReply reply; std::promise promise; if (!task_ids.empty()) { for (const auto &task_id : task_ids) { - request.mutable_task_ids()->add_vals(task_id.Binary()); + request.mutable_filters()->add_task_ids(task_id.Binary()); } } + if (!name.empty()) { + request.mutable_filters()->set_name(name); + } + + if (!actor_id.IsNil()) { + request.mutable_filters()->set_actor_id(actor_id.Binary()); + } + if (job_id) { - request.set_job_id(job_id->Binary()); + request.mutable_filters()->set_job_id(job_id->Binary()); } if (limit >= 0) { request.set_limit(limit); } - request.set_exclude_driver(exclude_driver); + request.mutable_filters()->set_exclude_driver(exclude_driver); task_manager->GetIoContext().dispatch( [this, &promise, &request, &reply]() { task_manager->HandleGetTaskEvents( @@ -155,11 +165,15 @@ class GcsTaskManagerTest : public ::testing::Test { static rpc::TaskInfoEntry GenTaskInfo( JobID job_id, TaskID parent_task_id = TaskID::Nil(), - rpc::TaskType task_type = rpc::TaskType::NORMAL_TASK) { + rpc::TaskType task_type = rpc::TaskType::NORMAL_TASK, + const ActorID actor_id = ActorID::Nil(), + const std::string name = "") { rpc::TaskInfoEntry task_info; task_info.set_job_id(job_id.Binary()); task_info.set_parent_task_id(parent_task_id.Binary()); task_info.set_type(task_type); + task_info.set_actor_id(actor_id.Binary()); + task_info.set_name(name); return task_info; } @@ -490,6 +504,66 @@ TEST_F(GcsTaskManagerTest, TestGetTaskEventsByJob) { reply_job2.mutable_events_by_task()); } +TEST_F(GcsTaskManagerTest, TestGetTaskEventsFilters) { + // Generate task events + + // A task event with actor id + ActorID actor_id = ActorID::Of(JobID::FromInt(1), TaskID::Nil(), 1); + { + auto task_ids = GenTaskIDs(1); + auto task_info_actor_id = + GenTaskInfo(JobID::FromInt(1), TaskID::Nil(), rpc::ACTOR_TASK, actor_id); + auto events = GenTaskEvents(task_ids, + /* attempt_number */ + 0, + /* job_id */ 1, + absl::nullopt, + absl::nullopt, + task_info_actor_id); + auto data = Mocker::GenTaskEventsData(events); + SyncAddTaskEventData(data); + } + + // A task event with name. + { + auto task_ids = GenTaskIDs(1); + auto task_info_name = GenTaskInfo( + JobID::FromInt(1), TaskID::Nil(), rpc::NORMAL_TASK, ActorID::Nil(), "task_name"); + auto events = GenTaskEvents(task_ids, + /* attempt_number */ + 0, + /* job_id */ 1, + absl::nullopt, + absl::nullopt, + task_info_name); + auto data = Mocker::GenTaskEventsData(events); + SyncAddTaskEventData(data); + } + + auto reply_name = SyncGetTaskEvents({}, + /* job_id */ absl::nullopt, + /* limit */ -1, + /* exclude_driver */ false, + "task_name"); + EXPECT_EQ(reply_name.events_by_task_size(), 1); + + auto reply_actor_id = SyncGetTaskEvents({}, + /* job_id */ absl::nullopt, + /* limit */ -1, + /* exclude_driver */ false, + /* name */ "", + actor_id); + EXPECT_EQ(reply_name.events_by_task_size(), 1); + + auto reply_both_and = SyncGetTaskEvents({}, + /* job_id */ absl::nullopt, + /* limit */ -1, + /* exclude_driver */ false, + "task_name", + actor_id); + EXPECT_EQ(reply_both_and.events_by_task_size(), 0); +} + TEST_F(GcsTaskManagerTest, TestMarkTaskAttemptFailedIfNeeded) { auto tasks = GenTaskIDs(3); auto tasks_running = tasks[0]; diff --git a/src/ray/protobuf/gcs_service.proto b/src/ray/protobuf/gcs_service.proto index 38280e48d3f6..7bc382bc0842 100644 --- a/src/ray/protobuf/gcs_service.proto +++ b/src/ray/protobuf/gcs_service.proto @@ -644,22 +644,27 @@ message AddTaskEventDataReply { } message GetTaskEventsRequest { - message TaskIDs { - repeated string vals = 1; - } - oneof select_by { + // Filter object where predicates are AND together. + message Filters { // Get task events from a job. - string job_id = 1; + optional bytes job_id = 1; // Get task events from a set of tasks. - TaskIDs task_ids = 2; + repeated bytes task_ids = 2; + // Get the task events with an actor id. + optional bytes actor_id = 3; + // Get the task events of task with names. + optional string name = 4; + // True if task events from driver (only profiling events) should be excluded. + optional bool exclude_driver = 5; } // Maximum number of TaskEvents to return. // If set, the exact `limit` TaskEvents returned do not have any ordering or selection // guarantee. optional int64 limit = 3; - // True if task events from driver (only profiling events) should be excluded. - bool exclude_driver = 4; + + // Filters to apply to the get query. + optional Filters filters = 4; } message GetTaskEventsReply { From ebb2d2215b34bc7182aca03350bddb01914490c5 Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Fri, 5 May 2023 20:33:21 +0800 Subject: [PATCH 249/424] [core][state] Task log - Improve log tailing from log_client and support tailing from offsets [2/4] (#28188) With verbose logging, the log file size might grow significantly. This PR prevents the grpc buffer overflow when tailing with large number of lines specified: Instead of reading last X lines into memory, it looks for the start of the last X lines, and read afterwards. Always stream log data in chunks --- dashboard/modules/log/log_agent.py | 296 ++++++++++++++---- dashboard/utils.py | 4 +- python/ray/tests/conftest.py | 20 ++ python/ray/tests/test_state_api_log.py | 258 +++++++++++++-- .../stress_tests/test_state_api_scale.py | 2 +- 5 files changed, 481 insertions(+), 99 deletions(-) diff --git a/dashboard/modules/log/log_agent.py b/dashboard/modules/log/log_agent.py index a3abeeb54cdf..5ae03d4cf357 100644 --- a/dashboard/modules/log/log_agent.py +++ b/dashboard/modules/log/log_agent.py @@ -5,9 +5,11 @@ import ray.dashboard.utils as dashboard_utils import ray.dashboard.optional_utils as dashboard_optional_utils import asyncio +import grpc import io import os + from pathlib import Path from ray.core.generated import reporter_pb2 @@ -16,6 +18,193 @@ logger = logging.getLogger(__name__) routes = dashboard_optional_utils.ClassMethodRouteTable +# 64 KB +BLOCK_SIZE = 1 << 16 + +# Keep-alive interval for reading the file +DEFAULT_KEEP_ALIVE_INTERVAL_SEC = 1 + + +def find_end_offset_file(file: io.BufferedIOBase) -> int: + """ + Find the offset of the end of a file without changing the file pointer. + + Args: + file: File object + + Returns: + Offset of the end of a file. + """ + old_pos = file.tell() # store old position + file.seek(0, io.SEEK_END) # move file pointer to end of file + end = file.tell() # return end of file offset + file.seek(old_pos, io.SEEK_SET) + return end + + +def find_end_offset_next_n_lines_from_offset( + file: io.BufferedIOBase, start_offset: int, n: int +) -> int: + """ + Find the offsets of next n lines from a start offset. + + Args: + file: File object + start_offset: Start offset to read from, inclusive. + n: Number of lines to find. + + Returns: + Offset of the end of the next n line (exclusive) + """ + file.seek(start_offset) # move file pointer to start offset + end_offset = None + for _ in range(n): # loop until we find n lines or reach end of file + line = file.readline() # read a line and consume new line character + if not line: # end of file + break + end_offset = file.tell() # end offset. + + logger.debug(f"Found next {n} lines from {start_offset} offset") + return ( + end_offset if end_offset is not None else file.seek(0, io.SEEK_END) + ) # return last line offset or end of file offset if no lines found + + +def find_start_offset_last_n_lines_from_offset( + file: io.BufferedIOBase, offset: int, n: int, block_size: int = BLOCK_SIZE +) -> int: + """ + Find the offset of the beginning of the line of the last X lines from an offset. + + Args: + file: File object + offset: Start offset from which to find last X lines, -1 means end of file. + The offset is exclusive, i.e. data at the offset is not included + in the result. + n: Number of lines to find + block_size: Block size to read from file + + Returns: + Offset of the beginning of the line of the last X lines from a start offset. + """ + logger.debug(f"Finding last {n} lines from {offset} offset") + if offset == -1: + offset = file.seek(0, io.SEEK_END) # move file pointer to end of file + else: + file.seek(offset, io.SEEK_SET) # move file pointer to start offset + + if n == 0: + return offset + nbytes_from_end = ( + 0 # Number of bytes that should be tailed from the end of the file + ) + # Non new line terminating offset, adjust the line count and treat the non-newline + # terminated line as the last line. e.g. line 1\nline 2 + file.seek(max(0, offset - 1), os.SEEK_SET) + if file.read(1) != b"\n": + n -= 1 + + # Remaining number of lines to tail + lines_more = n + read_offset = max(0, offset - block_size) + # So that we know how much to read on the last block (the block 0) + prev_offset = offset + + while lines_more >= 0 and read_offset >= 0: + # Seek to the current block start + file.seek(read_offset, 0) + # Read the current block (or less than block) data + block_data = file.read(min(block_size, prev_offset - read_offset)) + num_lines = block_data.count(b"\n") + if num_lines > lines_more: + # This is the last block to read. + # Need to find the offset of exact number of lines to tail + # in the block. + # Use `split` here to split away the extra lines, i.e. + # first `num_lines - lines_more` lines. + lines = block_data.split(b"\n", num_lines - lines_more) + # Added the len of those lines that at the end of the block. + nbytes_from_end += len(lines[-1]) + break + + # Need to read more blocks. + lines_more -= num_lines + nbytes_from_end += len(block_data) + + if read_offset == 0: + # We have read all blocks (since the start) + break + # Continuing with the previous block + prev_offset = read_offset + read_offset = max(0, read_offset - block_size) + + offset_read_start = offset - nbytes_from_end + assert ( + offset_read_start >= 0 + ), f"Read start offset({offset_read_start}) should be non-negative" + return offset_read_start + + +async def _stream_log_in_chunk( + context: grpc.aio.ServicerContext, + file: io.BufferedIOBase, + start_offset: int, + end_offset: int = -1, + keep_alive_interval_sec: int = -1, + block_size: int = BLOCK_SIZE, +): + """Streaming log in chunk from start to end offset. + + Stream binary file content in chunks from start offset to an end + offset if provided, else to the end of the file. + + Args: + context: gRPC server side context + file: Binary file to stream + start_offset: File offset where streaming starts + end_offset: If -1, implying streaming til the EOF. + keep_alive_interval_sec: Duration for which streaming will be + retried when reaching the file end, -1 means no retry. + block_size: Number of bytes per chunk, exposed for testing + + Return: + Async generator of StreamReply + """ + assert "b" in file.mode, "Only binary file is supported." + assert not ( + keep_alive_interval_sec >= 0 and end_offset is not -1 + ), "Keep-alive is not allowed when specifying an end offset" + + file.seek(start_offset, 0) + cur_offset = start_offset + + # Until gRPC is done + while not context.done(): + # Read in block + if end_offset != -1: + to_read = min(end_offset - cur_offset, block_size) + else: + to_read = block_size + + bytes = file.read(to_read) + + if bytes == b"": + # Stop reading + if keep_alive_interval_sec >= 0: + await asyncio.sleep(keep_alive_interval_sec) + # Try reading again + continue + + # Have read the entire file, done + break + logger.debug(f"Sending {len(bytes)} bytes at {cur_offset}") + yield reporter_pb2.StreamLogReply(data=bytes) + + # Have read the requested section [start_offset, end_offset), done + cur_offset += len(bytes) + if end_offset != -1 and cur_offset >= end_offset: + break + class LogAgent(dashboard_utils.DashboardAgentModule): def __init__(self, dashboard_agent): @@ -31,13 +220,7 @@ def is_minimal_module(): return False -# 64 KB -BLOCK_SIZE = 1 << 16 - - -class LogAgentV1Grpc( - dashboard_utils.DashboardAgentModule, reporter_pb2_grpc.ReporterServiceServicer -): +class LogAgentV1Grpc(dashboard_utils.DashboardAgentModule): def __init__(self, dashboard_agent): super().__init__(dashboard_agent) @@ -45,15 +228,17 @@ async def run(self, server): if server: reporter_pb2_grpc.add_LogServiceServicer_to_server(self, server) - # TODO: should this return True @staticmethod def is_minimal_module(): + # Dashboard is only available with non-minimal install now. return False async def ListLogs(self, request, context): """ Lists all files in the active Ray logs directory. + Part of `LogService` gRPC. + NOTE: These RPCs are used by state_head.py, not log_head.py """ path = Path(self._dashboard_agent.log_dir) @@ -73,6 +258,8 @@ async def StreamLog(self, request, context): the end of the file if `request.keep_alive == True`. Else, it terminates the stream once there are no more bytes to read from the log file. + Part of `LogService` gRPC. + NOTE: These RPCs are used by state_head.py, not log_head.py """ # NOTE: If the client side connection is closed, this handler will @@ -80,75 +267,48 @@ async def StreamLog(self, request, context): lines = request.lines if request.lines else 1000 filepath = f"{self._dashboard_agent.log_dir}/{request.log_file_name}" - if "/" in request.log_file_name or not os.path.isfile(filepath): + if not os.path.isfile(filepath): await context.send_initial_metadata( [[log_consts.LOG_GRPC_ERROR, log_consts.FILE_NOT_FOUND]] ) else: with open(filepath, "rb") as f: await context.send_initial_metadata([]) - # If requesting the whole file, we stream it since it may be large. - if lines == -1: - while not context.done(): - bytes = f.read(BLOCK_SIZE) - if bytes == b"": - end = f.tell() - break - yield reporter_pb2.StreamLogReply(data=bytes) - else: - bytes, end = tail(f, lines) - yield reporter_pb2.StreamLogReply(data=bytes + b"\n") - if request.keep_alive: - interval = request.interval if request.interval else 1 - f.seek(end) - while not context.done(): - await asyncio.sleep(interval) - bytes = f.read() - if bytes != b"": - yield reporter_pb2.StreamLogReply(data=bytes) + # Default stream entire file + start_offset = 0 + end_offset = find_end_offset_file(f) -def tail(f: io.TextIOBase, lines: int): - """Tails the given file (in 'rb' mode) + if lines != -1: + # If specified tail line number, + # look for the file offset with the line count + start_offset = find_start_offset_last_n_lines_from_offset( + f, offset=end_offset, n=lines + ) - We assume that any "lines" parameter is not significant (<100,000 lines) - and will result in a buffer with a small memory profile (<1MB) - - Taken from: https://stackoverflow.com/a/136368/8299684 - - Examples: - Args: - f: text file in 'rb' mode - lines: The number of lines to read from the end of the file. - Returns: - string containing the lines of the file, - the position of the last byte read in units of bytes - """ - - total_lines_wanted = lines + # If keep alive: following the log every 'interval' + keep_alive_interval_sec = -1 + if request.keep_alive: + keep_alive_interval_sec = ( + request.interval + if request.interval + else DEFAULT_KEEP_ALIVE_INTERVAL_SEC + ) - # Seek to the end of the file - f.seek(0, 2) - block_end_byte = f.tell() + # When following (keep_alive), it will read beyond the end + end_offset = -1 - last_byte_read = block_end_byte - lines_to_go = total_lines_wanted - block_number = -1 - blocks = [] + logger.info( + f"Tailing logs from {start_offset} to {end_offset} for {lines}, " + f"with keep_alive={keep_alive_interval_sec}" + ) - # Read blocks into memory until we have seen at least - # `total_lines_wanted` number of lines. Then, return a string - # containing the last `total_lines_wanted` number of lines - while lines_to_go > 0 and block_end_byte > 0: - if block_end_byte - BLOCK_SIZE > 0: - f.seek(block_number * BLOCK_SIZE, 2) - blocks.append(f.read(BLOCK_SIZE)) - else: - f.seek(0, 0) - blocks.append(f.read(block_end_byte)) - lines_found = blocks[-1].count(b"\n") - lines_to_go -= lines_found - block_end_byte -= BLOCK_SIZE - block_number -= 1 - all_read_text = b"".join(reversed(blocks)) - return b"\n".join(all_read_text.splitlines()[-total_lines_wanted:]), last_byte_read + # Read and send the file data in chunk + async for chunk_res in _stream_log_in_chunk( + context=context, + file=f, + start_offset=start_offset, + end_offset=end_offset, + keep_alive_interval_sec=keep_alive_interval_sec, + ): + yield chunk_res diff --git a/dashboard/utils.py b/dashboard/utils.py index 9ef0ad986ed4..6434ce5c1b60 100644 --- a/dashboard/utils.py +++ b/dashboard/utils.py @@ -58,7 +58,7 @@ async def run(self, server): def is_minimal_module(): """ Return True if the module is minimal, meaning it - should work with `pip install ray` that doesn't requires additonal + should work with `pip install ray` that doesn't requires additional dependencies. """ @@ -87,7 +87,7 @@ async def run(self, server): def is_minimal_module(): """ Return True if the module is minimal, meaning it - should work with `pip install ray` that doesn't requires additonal + should work with `pip install ray` that doesn't requires additional dependencies. """ diff --git a/python/ray/tests/conftest.py b/python/ray/tests/conftest.py index cdc049ca860f..475b6c8b7ede 100644 --- a/python/ray/tests/conftest.py +++ b/python/ray/tests/conftest.py @@ -1143,3 +1143,23 @@ def enable_syncer_test(request, monkeypatch): yield monkeypatch.delenv("RAY_use_ray_syncer") ray._raylet.Config.initialize("") + + +@pytest.fixture(scope="function") +def temp_file(request): + with tempfile.NamedTemporaryFile("r+b") as fp: + yield fp + + +@pytest.fixture(scope="module") +def random_ascii_file(request): + import random + import string + + file_size = getattr(request, "param", 1 << 10) + + with tempfile.NamedTemporaryFile(mode="r+b") as fp: + fp.write("".join(random.choices(string.ascii_letters, k=file_size)).encode()) + fp.flush() + + yield fp diff --git a/python/ray/tests/test_state_api_log.py b/python/ray/tests/test_state_api_log.py index d821799982f2..a5f0ed9bdfbe 100644 --- a/python/ray/tests/test_state_api_log.py +++ b/python/ray/tests/test_state_api_log.py @@ -8,6 +8,7 @@ from ray.experimental.state.state_cli import logs_state_cli_group import requests from click.testing import CliRunner +import grpc import ray from ray._private.test_utils import ( @@ -20,7 +21,13 @@ from ray.core.generated.gcs_pb2 import ActorTableData from ray.core.generated.reporter_pb2 import ListLogsReply, StreamLogReply from ray.dashboard.modules.actor.actor_head import actor_table_data_to_dict -from ray.dashboard.modules.log.log_agent import tail as tail_file +from ray.dashboard.modules.log.log_agent import ( + find_end_offset_file, + find_end_offset_next_n_lines_from_offset, + find_start_offset_last_n_lines_from_offset, +) +from ray.dashboard.modules.log.log_agent import _stream_log_in_chunk + from ray.dashboard.modules.log.log_manager import LogsManager from ray.dashboard.tests.conftest import * # noqa from ray.experimental.state.api import get_log, list_logs, list_nodes, list_workers @@ -57,34 +64,228 @@ def generate_actor_data(id, node_id, worker_id): # Unit Tests (Log Agent) - - -@pytest.mark.skipif(sys.platform == "win32", reason="File path incorrect on Windows.") -def test_logs_tail(): +def _read_file(fp, start, end): + """Help func to read a file with offsets""" + fp.seek(start, 0) + if end == -1: + return fp.read() + return fp.read(end - start) + + +async def _stream_log(context, fp, start, end): + """Help func to stream a log with offsets""" + result = bytearray() + async for chunk_res in _stream_log_in_chunk( + context=context, + file=fp, + start_offset=start, + end_offset=end, + keep_alive_interval_sec=-1, + ): + result += chunk_res.data + return result + + +def _write_lines_and_get_offset_at_index( + f, num_lines, start_offset=0, trailing_new_line=True +): """ - Unit test for tail + Write multiple lines into a file, and record offsets + + Args: + f: a binary file object that's writable + num_lines: Number of lines to write + start_offset: The offset to start writing + trailing_new_line: True if a '\n' is added at the end of the + lines. + + Return: + offsets: A list of offsets of the lines. + offset_end: The offset of the end of file. """ - TOTAL_LINES = 1000 - FILE_NAME = "test_file.txt" - try: - with open(FILE_NAME, "w") as f: - for i in range(TOTAL_LINES): - # Check this works with unicode - f.write(f"Message 日志 {i:4}\n") - file = open(FILE_NAME, "rb") - text, byte_pos = tail_file(file, 100) - assert byte_pos == TOTAL_LINES * len( - "Message 日志 1000\n".encode(encoding="utf-8") - ) - lines = text.decode("utf-8").split("\n") - assert len(lines) == 100 - assert lines[0] == "Message 日志 900" - assert lines[99] == "Message 日志 999" - except Exception as e: - raise e - finally: - if os.path.exists(FILE_NAME): - os.remove(FILE_NAME) + f.seek(start_offset, 0) + + offsets = [] + for i in range(num_lines): + offsets.append(f.tell()) + if i == num_lines - 1 and not trailing_new_line: + # Last line no newline + line = f"{i}-test-line" + else: + line = f"{i}-test-line\n" + f.write(line.encode("utf-8")) + + f.flush() + f.seek(0, 2) + offset_end = f.tell() + + return offsets, offset_end + + +@pytest.mark.parametrize("new_line", [True, False]) +@pytest.mark.parametrize("block_size", [4, 16, 256]) +def test_find_start_offset_last_n_lines_from_offset(new_line, temp_file, block_size): + file = temp_file + o, end_file = _write_lines_and_get_offset_at_index( + file, num_lines=50, start_offset=0, trailing_new_line=new_line + ) + # Test the function with different offsets and number of lines to find + assert find_start_offset_last_n_lines_from_offset(file, o[3], 1, block_size) == o[2] + assert ( + find_start_offset_last_n_lines_from_offset(file, o[10], 10, block_size) == o[0] + ) + + # Test end of file last 1 line + assert find_start_offset_last_n_lines_from_offset(file, -1, 1, block_size) == o[-1] + + # Test end of file no line + assert ( + find_start_offset_last_n_lines_from_offset(file, -1, 0, block_size) == end_file + ) + + # Test no line from middle of file + assert ( + find_start_offset_last_n_lines_from_offset(file, o[30], 0, block_size) == o[30] + ) + + # Test more lines than file + assert ( + find_start_offset_last_n_lines_from_offset(file, o[30], 100, block_size) == o[0] + ) + + # Test offsets in the middle of a line + assert ( + find_start_offset_last_n_lines_from_offset(file, o[2] + 1, 1, block_size) + == o[2] + ) + assert ( + find_start_offset_last_n_lines_from_offset(file, o[2] - 1, 1, block_size) + == o[1] + ) + + +def test_find_end_offset_next_n_lines_from_offset(temp_file): + file = temp_file + o, end_file = _write_lines_and_get_offset_at_index( + file, num_lines=10, start_offset=0 + ) + # Test the function with different offsets and number of lines to find + assert find_end_offset_next_n_lines_from_offset(file, o[3], 1) == o[4] + assert find_end_offset_next_n_lines_from_offset(file, o[3], 2) == o[5] + assert find_end_offset_next_n_lines_from_offset(file, 0, 1) == o[1] + + # Test end of file + assert find_end_offset_next_n_lines_from_offset(file, o[3], 999) == end_file + + # Test offset diff + assert find_end_offset_next_n_lines_from_offset(file, 1, 1) == o[1] + assert find_end_offset_next_n_lines_from_offset(file, o[1] - 1, 1) == o[1] + + +@pytest.mark.asyncio +@pytest.mark.parametrize("random_ascii_file", [1 << 20], indirect=True) +@pytest.mark.parametrize( + "start_offset,end_offset", + [ + (0, 1 << 20), + (1 << 20, 1 << 20), + (0, 0), + (0, 1), + (1 << 16, 1 << 20), + (1024, 2042), + ], +) +async def test_stream_log_in_chunk(random_ascii_file, start_offset, end_offset): + """Test streaming of a file from different offsets""" + test_file = random_ascii_file + context = MagicMock(grpc.aio.ServicerContext) + context.done.return_value = False + + expected_file_content = _read_file(test_file, start_offset, end_offset) + actual_log_content = await _stream_log(context, test_file, start_offset, end_offset) + + assert ( + expected_file_content == actual_log_content + ), "Non-matching content from log streamed" + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "lines_to_tail,total_lines", + [(0, 100), (100, 100), (10, 100), (1, 100), (99, 100)], +) +@pytest.mark.parametrize("trailing_new_line", [True, False]) +async def test_log_tails(lines_to_tail, total_lines, trailing_new_line, temp_file): + """Test tailing a file works""" + _write_lines_and_get_offset_at_index( + temp_file, + total_lines, + trailing_new_line=trailing_new_line, + ) + test_file = temp_file + context = MagicMock(grpc.aio.ServicerContext) + context.done.return_value = False + start_offset = find_start_offset_last_n_lines_from_offset( + test_file, offset=-1, n=lines_to_tail + ) + + actual_data = await _stream_log(context, test_file, start_offset, -1) + expected_data = _read_file(test_file, start_offset, -1) + + assert actual_data == expected_data, "Non-matching data from stream log" + + all_lines = actual_data.decode("utf-8") + assert all_lines.count("\n") == ( + lines_to_tail if trailing_new_line or lines_to_tail == 0 else lines_to_tail - 1 + ), "Non-matching number of lines tailed" + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "lines_to_tail,total_lines", + [(0, 5), (5, 5), (2, 5), (1, 5), (4, 5)], +) +async def test_log_tails_with_appends(lines_to_tail, total_lines, temp_file): + """Test tailing a log file that grows at the same time""" + _write_lines_and_get_offset_at_index(temp_file, total_lines) + test_file = temp_file + context = MagicMock(grpc.aio.ServicerContext) + context.done.return_value = False + start_offset = find_start_offset_last_n_lines_from_offset( + test_file, offset=-1, n=lines_to_tail + ) + + actual_data = await _stream_log(context, test_file, start_offset, -1) + + end_offset = find_end_offset_file(test_file) + expected_data = _read_file(test_file, start_offset, end_offset) + assert actual_data == expected_data, "Non-matching data from stream log" + + all_lines = actual_data.decode("utf-8") + assert all_lines.count("\n") == lines_to_tail, "Non-matching number of lines tailed" + + # Modify the file with append here + num_new_lines = 2 + _write_lines_and_get_offset_at_index( + temp_file, num_new_lines, start_offset=end_offset + ) + + # Tail again should read the new lines written + start_offset = find_start_offset_last_n_lines_from_offset( + test_file, offset=-1, n=lines_to_tail + num_new_lines + ) + + expected_data = _read_file(test_file, start_offset, -1) + actual_data = await _stream_log(context, test_file, start_offset, -1) + + assert ( + actual_data == expected_data + ), "Non-matching data from stream log after append" + + all_lines = actual_data.decode("utf-8") + assert ( + all_lines.count("\n") == lines_to_tail + num_new_lines + ), "Non-matching number of lines tailed after append" # Unit Tests (LogsManager) @@ -566,7 +767,8 @@ def verify_basic(): lines = [] for line in stream_response.iter_lines(): lines.append(line.decode("utf-8")) - return len(lines) == 5 or len(lines) == 6 + assert len(lines) == 5 or len(lines) == 6 + return True wait_for_condition(verify_basic) diff --git a/release/nightly_tests/stress_tests/test_state_api_scale.py b/release/nightly_tests/stress_tests/test_state_api_scale.py index a7442e05cc8d..0c43c19a0e81 100644 --- a/release/nightly_tests/stress_tests/test_state_api_scale.py +++ b/release/nightly_tests/stress_tests/test_state_api_scale.py @@ -276,7 +276,7 @@ def write_log(self, log_file_size_byte: int): time_taken = 0 t_start = time.perf_counter() - for s in get_log(actor_id=actor._actor_id.hex(), tail=-1): + for s in get_log(actor_id=actor._actor_id.hex(), tail=1000000000): t_end = time.perf_counter() time_taken += t_end - t_start # Not including this time From fdf4c5aeb8b933392bd4e33a0b1d8c24c7009b14 Mon Sep 17 00:00:00 2001 From: Jun Gong Date: Fri, 5 May 2023 08:23:46 -0700 Subject: [PATCH 250/424] [Doc] [Example] Add an example for OPT DeepSpeed batch inference. (#34361) Signed-off-by: Jun Gong --- doc/source/_toc.yml | 1 + doc/source/ray-air/examples/BUILD | 1 + doc/source/ray-air/examples/index.rst | 1 + .../opt_deepspeed_batch_inference.ipynb | 911 ++++++++++++++++++ .../30b_deepspeed_compute.yaml | 15 + .../30b_deepspeed_env.yaml | 18 + .../opt_deepspeed_batch_inference.ipynb | 1 + .../test_myst_doc.py | 1 + release/release_tests.yaml | 19 + 9 files changed, 968 insertions(+) create mode 100644 doc/source/ray-air/examples/opt_deepspeed_batch_inference.ipynb create mode 100644 release/air_examples/opt_deepspeed_batch_inference/30b_deepspeed_compute.yaml create mode 100644 release/air_examples/opt_deepspeed_batch_inference/30b_deepspeed_env.yaml create mode 120000 release/air_examples/opt_deepspeed_batch_inference/opt_deepspeed_batch_inference.ipynb create mode 120000 release/air_examples/opt_deepspeed_batch_inference/test_myst_doc.py diff --git a/doc/source/_toc.yml b/doc/source/_toc.yml index 0ab8509935d2..88247c248ba6 100644 --- a/doc/source/_toc.yml +++ b/doc/source/_toc.yml @@ -55,6 +55,7 @@ parts: - file: ray-air/deployment - file: ray-air/examples/index sections: + - file: ray-air/examples/opt_deepspeed_batch_inference - file: ray-air/examples/torch_image_example - file: ray-air/examples/torch_detection - file: ray-air/examples/convert_existing_pytorch_code_to_ray_air diff --git a/doc/source/ray-air/examples/BUILD b/doc/source/ray-air/examples/BUILD index ff2f4930cdb5..5b6ce7351c76 100644 --- a/doc/source/ray-air/examples/BUILD +++ b/doc/source/ray-air/examples/BUILD @@ -51,6 +51,7 @@ py_test_run_all_notebooks( "gptj_serving.ipynb", # Requires GPUs "stablediffusion_batch_prediction.ipynb", # Requires GPUs "gptj_deepspeed_fine_tuning.ipynb", # Requires release test + "opt_deepspeed_batch_inference.ipynb", # Requires release test ], data = ["//doc/source/ray-air/examples:air_examples"], tags = ["exclusive", "team:ml", "ray_air"], diff --git a/doc/source/ray-air/examples/index.rst b/doc/source/ray-air/examples/index.rst index 252364456b63..48c2227a072d 100644 --- a/doc/source/ray-air/examples/index.rst +++ b/doc/source/ray-air/examples/index.rst @@ -29,6 +29,7 @@ Text/NLP - :doc:`/ray-air/examples/gptj_batch_prediction`: How to use Ray AIR to do batch prediction with the Hugging Face Transformers GPT-J model. - :doc:`/ray-air/examples/gptj_serving`: How to use Ray AIR to do online serving with the Hugging Face Transformers GPT-J model. - :doc:`/ray-air/examples/dreambooth_finetuning`: How to fine-tune a DreamBooth text-to-image model with your own images. +- :doc:`/ray-air/examples/opt_deepspeed_batch_inference`: How to run batch inference on a dataset of texts with a 30B OPT model. Image/CV -------- diff --git a/doc/source/ray-air/examples/opt_deepspeed_batch_inference.ipynb b/doc/source/ray-air/examples/opt_deepspeed_batch_inference.ipynb new file mode 100644 index 000000000000..465ff50bf2df --- /dev/null +++ b/doc/source/ray-air/examples/opt_deepspeed_batch_inference.ipynb @@ -0,0 +1,911 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "id": "dfdf1047", + "metadata": {}, + "source": [ + "# Batch Inference with OPT 30B and Ray Dataset\n", + "\n", + "This notebook was tested on a single p3.16xlarge instance with 8 V100 GPUs.\n", + "\n", + "## Set Up\n", + "Initialize Ray and a runtime environment to ensure that all dependent packages are available." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "36bb842b-b6b6-4cbc-a4f9-a3a65ec069ce", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-04-22 11:12:15,071\tINFO worker.py:1314 -- Using address localhost:9031 set in the environment variable RAY_ADDRESS\n", + "fatal: not a git repository (or any parent up to mount point /home/ray)\n", + "Stopping at filesystem boundary (GIT_DISCOVERY_ACROSS_FILESYSTEM not set).\n", + "2023-04-22 11:12:15,676\tINFO worker.py:1432 -- Connecting to existing Ray cluster at address: 172.31.244.129:9031...\n", + "2023-04-22 11:12:15,724\tINFO worker.py:1607 -- Connected to Ray cluster. View the dashboard at https://console.anyscale.com/api/v2/sessions/ses_jgkdnu2723aleytwqqhebr12vs/services?redirect_to=dashboard \n", + "2023-04-22 11:12:15,732\tINFO packaging.py:347 -- Pushing file package 'gcs://_ray_pkg_7ad665e3661cefc8f8037daeb0b5ba6e.zip' (0.03MiB) to Ray cluster...\n", + "2023-04-22 11:12:15,733\tINFO packaging.py:360 -- Successfully pushed file package 'gcs://_ray_pkg_7ad665e3661cefc8f8037daeb0b5ba6e.zip'.\n" + ] + }, + { + "data": { + "text/html": [ + "
    \n", + "
    \n", + "

    Ray

    \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "\n", + "\n", + "
    Python version:3.9.15
    Ray version: 3.0.0.dev0
    Dashboard:http://console.anyscale.com/api/v2/sessions/ses_jgkdnu2723aleytwqqhebr12vs/services?redirect_to=dashboard
    \n", + "
    \n", + "
    \n" + ], + "text/plain": [ + "RayContext(dashboard_url='console.anyscale.com/api/v2/sessions/ses_jgkdnu2723aleytwqqhebr12vs/services?redirect_to=dashboard', python_version='3.9.15', ray_version='3.0.0.dev0', ray_commit='17df2ef17983406bb178c251044c9dc654b378c0', address_info={'node_ip_address': '172.31.244.129', 'raylet_ip_address': '172.31.244.129', 'redis_address': None, 'object_store_address': '/tmp/ray/session_2023-04-22_11-09-11_790337_150/sockets/plasma_store', 'raylet_socket_name': '/tmp/ray/session_2023-04-22_11-09-11_790337_150/sockets/raylet', 'webui_url': 'console.anyscale.com/api/v2/sessions/ses_jgkdnu2723aleytwqqhebr12vs/services?redirect_to=dashboard', 'session_dir': '/tmp/ray/session_2023-04-22_11-09-11_790337_150', 'metrics_export_port': 61073, 'gcs_address': '172.31.244.129:9031', 'address': '172.31.244.129:9031', 'dashboard_agent_listen_port': 52365, 'node_id': 'e6e9dfeda4469dd816c080bec2cf1cd12abdd978ae74b87e869164eb'})" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import ray\n", + "\n", + "ray.init(\n", + " runtime_env={\n", + " \"pip\": [\n", + " \"numpy==1.23\",\n", + " \"protobuf==3.20.0\",\n", + " \"transformers==4.27.2\",\n", + " \"accelerate==0.17.1\",\n", + " \"deepspeed==0.8.3\",\n", + " ],\n", + " \"env_vars\": {\n", + " \"HF_HUB_DISABLE_PROGRESS_BARS\": \"1\",\n", + " }\n", + " }\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "b619a878", + "metadata": {}, + "source": [ + "## Define Hyperparameters\n", + "\n", + "Define a list of hyperparameters as a global dataclass.\n", + "\n", + "Refer to https://deepspeed.readthedocs.io/en/stable/inference-init.html#deepspeed.inference.config.DeepSpeedInferenceConfig for more details about the configurations of a DeepSpeed inference job." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "613df744", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from dataclasses import dataclass\n", + "from typing import Optional\n", + "\n", + "\n", + "@dataclass\n", + "class Config:\n", + " model_name: str = \"facebook/opt-30b\"\n", + " # Path to HuggingFace cache directory. Default is ~/.cache/huggingface/.\n", + " cache_dir: Optional[str] = None\n", + " # Path to the directory that actually holds model files.\n", + " # e.g., ~/.cache/huggingface/models--facebook--opt-30b/snapshots/xxx/\n", + " # If this path is not None, we skip download models from HuggingFace.\n", + " repo_root: Optional[str] = None\n", + " # This is how many DeepSpeed-inference replicas to run for\n", + " # this batch inference job.\n", + " num_worker_groups: int = 1\n", + " # Number of DeepSpeed workers per group.\n", + " num_workers_per_group: int = 8\n", + "\n", + " batch_size: int = 1\n", + " dtype: str = \"float16\"\n", + " # Maximum number of tokens DeepSpeed inference-engine can work with,\n", + " # including the input and output tokens.\n", + " max_tokens: int = 1024\n", + " # Use meta tensors to initialize model.\n", + " use_meta_tensor: bool = True\n", + " # Use cache for generation.\n", + " use_cache: bool = True\n", + " # The path for which we want to save the loaded model with a checkpoint.\n", + " save_mp_checkpoint_path: Optional[str] = None\n", + "\n", + "\n", + "config = Config()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "28df05bf", + "metadata": {}, + "source": [ + "## Download and Cache Model\n", + "\n", + "Next, we will download and cache model files on all instances of the cluster before we run the job.\n", + "\n", + "Notice that when we download model snapshots from HuggingFace, we skip files that end with safetensors, msgpack, and h5 extensions. These are Tensorflow and JAX weight files. We only need PyTorch weights for this example.\n", + "\n", + "We execute the ``download_model()`` function on every node of the cluster by using a ``NodeAffinitySchedulingStrategy`` from Ray Core." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "63b8a84d-57a6-4430-8fe8-9811760b8b7c", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Caching model locally ...\n", + "Done. Model saved in /home/ray/.cache/huggingface/hub/models--facebook--opt-30b/snapshots/ceea0a90ac0f6fae7c2c34bcb40477438c152546\n" + ] + } + ], + "source": [ + "\n", + "from huggingface_hub import snapshot_download\n", + "import ray\n", + "from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy\n", + "\n", + "\n", + "@ray.remote\n", + "def download_model(config: Config):\n", + " # This function downloads the specified HF model into a local directory.\n", + " # This can also download models from cloud storages like S3.\n", + " return snapshot_download(\n", + " repo_id=config.model_name,\n", + " cache_dir=config.cache_dir,\n", + " allow_patterns=[\"*\"],\n", + " # Skip downloading TF and FLAX weight files.\n", + " ignore_patterns=[\"*.safetensors\", \"*.msgpack\", \"*.h5\"],\n", + " revision=None,\n", + " )\n", + "\n", + "if config.repo_root is None:\n", + " # Download model files to all GPU nodes, and set correct repo_root.\n", + " refs = []\n", + " for node in ray.nodes():\n", + " if node[\"Alive\"] and node[\"Resources\"].get(\"GPU\", None):\n", + " node_id = node[\"NodeID\"]\n", + " scheduling_strategy = NodeAffinitySchedulingStrategy(\n", + " node_id=node_id, soft=False\n", + " )\n", + " options = {\"scheduling_strategy\": scheduling_strategy}\n", + " refs.append(\n", + " download_model.options(scheduling_strategy=scheduling_strategy).remote(config)\n", + " )\n", + "\n", + " print(\"Caching model locally ...\")\n", + "\n", + " # Wait for models to finish downloading.\n", + " config.repo_root = ray.get(refs)[0]\n", + "\n", + " print(f\"Done. Model saved in {config.repo_root}\")\n", + "else:\n", + " print(f\"Using existing model saved in {config.repo_root}\")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "6b14b7d9", + "metadata": {}, + "source": [ + "## Define DeepSpeed Utility Classes\n", + "\n", + "Next, we define a few utility classes and functions that are useful for setting up and running the DeepSpeed inference job.\n", + "\n", + "Note that the Pipeline is modeled after https://github.com/microsoft/DeepSpeedExamples/tree/efacebb3ddbea86bb20c3af30fd060be0fa41ac8/inference/huggingface/text-generation." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "f9aad2a9", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/ray/anaconda3/lib/python3.9/site-packages/xgboost/compat.py:31: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", + " from pandas import MultiIndex, Int64Index\n" + ] + } + ], + "source": [ + "import gc\n", + "import io\n", + "import json\n", + "import math\n", + "import os\n", + "from pathlib import Path\n", + "from typing import List\n", + "\n", + "import deepspeed\n", + "import torch\n", + "from deepspeed.runtime.utils import see_memory_usage\n", + "from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer\n", + "\n", + "\n", + "class DSPipeline:\n", + " \"\"\"\n", + " Example helper class for comprehending DeepSpeed Meta Tensors, meant to mimic HF pipelines.\n", + " The DSPipeline can run with and without meta tensors.\n", + " \"\"\"\n", + "\n", + " def __init__(\n", + " self,\n", + " model_name,\n", + " dtype=torch.float16,\n", + " is_meta=True,\n", + " device=-1,\n", + " repo_root=None,\n", + " ):\n", + " self.model_name = model_name\n", + " self.dtype = dtype\n", + "\n", + " if isinstance(device, torch.device):\n", + " self.device = device\n", + " elif isinstance(device, str):\n", + " self.device = torch.device(device)\n", + " elif device < 0:\n", + " self.device = torch.device(\"cpu\")\n", + " else:\n", + " self.device = torch.device(f\"cuda:{device}\")\n", + "\n", + " self.tokenizer = AutoTokenizer.from_pretrained(model_name, padding_side=\"right\")\n", + " self.tokenizer.pad_token = self.tokenizer.eos_token\n", + "\n", + " if is_meta:\n", + " # When meta tensors enabled, use checkpoints\n", + " self.config = AutoConfig.from_pretrained(self.model_name)\n", + " self.checkpoints_json = self._generate_json(repo_root)\n", + "\n", + " with deepspeed.OnDevice(dtype=dtype, device=\"meta\"):\n", + " self.model = AutoModelForCausalLM.from_config(self.config)\n", + " else:\n", + " self.model = AutoModelForCausalLM.from_pretrained(self.model_name)\n", + "\n", + " self.model.eval()\n", + "\n", + " def __call__(self, inputs, **kwargs):\n", + " input_list = [inputs] if isinstance(inputs, str) else inputs\n", + " outputs = self.generate_outputs(input_list, **kwargs)\n", + " return outputs\n", + "\n", + " def _generate_json(self, repo_root):\n", + " if os.path.exists(os.path.join(repo_root, \"ds_inference_config.json\")):\n", + " # Simply use the available inference config.\n", + " return os.path.join(repo_root, \"ds_inference_config.json\")\n", + "\n", + " # Write a checkpoints config file in local directory.\n", + " checkpoints_json = \"checkpoints.json\"\n", + "\n", + " with io.open(checkpoints_json, \"w\", encoding=\"utf-8\") as f:\n", + " file_list = [\n", + " str(entry).split(\"/\")[-1]\n", + " for entry in Path(repo_root).rglob(\"*.[bp][it][n]\")\n", + " if entry.is_file()\n", + " ]\n", + " data = {\n", + " # Hardcode bloom for now.\n", + " # Possible choices are \"bloom\", \"ds_model\", \"Megatron\".\n", + " \"type\": \"bloom\",\n", + " \"checkpoints\": file_list,\n", + " \"version\": 1.0\n", + " }\n", + " json.dump(data, f)\n", + "\n", + " return checkpoints_json\n", + "\n", + " def generate_outputs(self, inputs, **generate_kwargs):\n", + " input_tokens = self.tokenizer.batch_encode_plus(\n", + " inputs, return_tensors=\"pt\", padding=True\n", + " )\n", + " for t in input_tokens:\n", + " if torch.is_tensor(input_tokens[t]):\n", + " input_tokens[t] = input_tokens[t].to(self.device)\n", + "\n", + " self.model.cuda().to(self.device)\n", + "\n", + " outputs = self.model.generate(**input_tokens, **generate_kwargs)\n", + " outputs = self.tokenizer.batch_decode(outputs, skip_special_tokens=True)\n", + "\n", + " return outputs\n", + "\n", + "\n", + "def _memory_usage(gpu_id: int, msg: str):\n", + " \"\"\"Print memory usage.\"\"\"\n", + " if gpu_id != 0:\n", + " return\n", + " see_memory_usage(msg, True)\n", + "\n", + "\n", + "def init_model(config: Config, world_size: int, gpu_id: int) -> DSPipeline:\n", + " \"\"\"Initialize the deepspeed model.\"\"\"\n", + " data_type = getattr(torch, config.dtype)\n", + "\n", + " _memory_usage(gpu_id, \"before init\")\n", + " pipe = DSPipeline(\n", + " model_name=config.model_name,\n", + " dtype=data_type,\n", + " is_meta=config.use_meta_tensor,\n", + " device=gpu_id,\n", + " repo_root=config.repo_root,\n", + " )\n", + " _memory_usage(gpu_id, \"after init\")\n", + "\n", + " if config.use_meta_tensor:\n", + " ds_kwargs = dict(\n", + " base_dir=config.repo_root, checkpoint=pipe.checkpoints_json\n", + " )\n", + " else:\n", + " ds_kwargs = dict()\n", + "\n", + " gc.collect()\n", + "\n", + " pipe.model = deepspeed.init_inference(\n", + " pipe.model,\n", + " dtype=data_type,\n", + " mp_size=world_size,\n", + " replace_with_kernel_inject=True,\n", + " replace_method=True,\n", + " max_tokens=config.max_tokens,\n", + " save_mp_checkpoint_path=config.save_mp_checkpoint_path,\n", + " **ds_kwargs,\n", + " )\n", + " _memory_usage(gpu_id, \"after init_inference\")\n", + "\n", + " return pipe\n", + "\n", + "\n", + "def generate(\n", + " input_sentences: List[str], pipe: DSPipeline, batch_size: int, **generate_kwargs\n", + ") -> List[str]:\n", + " \"\"\"Generate predictions using a DSPipeline.\"\"\"\n", + " if batch_size > len(input_sentences):\n", + " # Dynamically extend to support larger bs by repetition.\n", + " input_sentences *= math.ceil(batch_size / len(input_sentences))\n", + "\n", + " inputs = input_sentences[:batch_size]\n", + " outputs = pipe(inputs, **generate_kwargs)\n", + " return outputs" + ] + }, + { + "cell_type": "markdown", + "id": "bd20d4d9", + "metadata": {}, + "source": [] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "62eee91d", + "metadata": {}, + "source": [ + "## Define a DeepSpeed Predictor\n", + "\n", + "Define an AIR Predictor to be instantiated by the Dataset pipeline below.\n", + "\n", + "Each DeepSpeedPredictor is a stateful Ray actor that understands how to process the input prompt using a group of DeepSpeed inference workers.\n", + "\n", + "More specifically, each DeepSpeedPredictor sets up a proper PyTorch DDP process group before spinning up multiple PredictionWorkers. Since the model is loaded using the DeepSpeed inference framework, each PredictionWorker handles a shard of the entire DeepSpeed inference model.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "516a200d-14e4-4b52-a615-e09778ba4117", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from typing import List\n", + "\n", + "import pandas as pd\n", + "import ray\n", + "import ray.util\n", + "from ray.air import Checkpoint, ScalingConfig\n", + "from ray.air.util.torch_dist import (\n", + " TorchDistributedWorker,\n", + " init_torch_dist_process_group,\n", + " shutdown_torch_dist_process_group,\n", + ")\n", + "from ray.train.predictor import Predictor\n", + "from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy\n", + "\n", + "\n", + "@ray.remote\n", + "class PredictionWorker(TorchDistributedWorker):\n", + " \"\"\"A PredictionWorker is a Ray remote actor that runs a single shard of a DeepSpeed job.\n", + " \n", + " Multiple PredictionWorkers of the same WorkerGroup form a PyTorch DDP process\n", + " group and work together under the orchestration of DeepSpeed.\n", + " \"\"\"\n", + " def __init__(self, config: Config, world_size: int):\n", + " self.config = config\n", + " self.world_size = world_size\n", + "\n", + " def init_model(self, local_rank: int):\n", + " \"\"\"Initialize model for inference.\"\"\"\n", + " # Note: We have to provide the local_rank that was used to initiate\n", + " # the DDP process group here. e.g., a PredictionWorker may be the\n", + " # rank 0 worker of a group, but occupies gpu 7.\n", + " self.generator = init_model(self.config, self.world_size, local_rank)\n", + "\n", + " def generate(self, data: pd.DataFrame, column: str, **kwargs) -> List[str]:\n", + " return generate(\n", + " list(data[column]), self.generator, self.config.batch_size, **kwargs\n", + " )\n", + "\n", + "\n", + "# TODO: This Predictor should be part of Ray AIR.\n", + "class DeepSpeedPredictor(Predictor):\n", + " def __init__(self, checkpoint: Checkpoint, scaling_config: ScalingConfig) -> None:\n", + " self.checkpoint = checkpoint\n", + " self.scaling_config = scaling_config\n", + " self.init_worker_group(scaling_config)\n", + "\n", + " def __del__(self):\n", + " shutdown_torch_dist_process_group(self.prediction_workers)\n", + "\n", + " def init_worker_group(self, scaling_config: ScalingConfig):\n", + " \"\"\"Create the worker group.\n", + "\n", + " Each worker in the group communicates with other workers through the\n", + " torch distributed backend. The worker group is inelastic (a failure of\n", + " one worker destroys the entire group). Each worker in the group\n", + " recieves the same input data and outputs the same generated text.\n", + " \"\"\"\n", + " config = self.checkpoint.to_dict()[\"config\"]\n", + "\n", + " # Start a placement group for the workers.\n", + " self.pg = scaling_config.as_placement_group_factory().to_placement_group()\n", + " prediction_worker_cls = PredictionWorker.options(\n", + " num_cpus=scaling_config.num_cpus_per_worker,\n", + " num_gpus=scaling_config.num_gpus_per_worker,\n", + " resources=scaling_config.additional_resources_per_worker,\n", + " scheduling_strategy=PlacementGroupSchedulingStrategy(\n", + " placement_group=self.pg, placement_group_capture_child_tasks=True\n", + " ),\n", + " )\n", + " # Create the prediction workers.\n", + " self.prediction_workers = [\n", + " prediction_worker_cls.remote(config, scaling_config.num_workers)\n", + " for i in range(scaling_config.num_workers)\n", + " ]\n", + "\n", + " # Initialize torch distributed process group for the workers.\n", + " local_ranks = init_torch_dist_process_group(self.prediction_workers, backend=\"nccl\")\n", + "\n", + " # Initialize the model on each worker.\n", + " ray.get([\n", + " worker.init_model.remote(local_rank)\n", + " for worker, local_rank in zip(self.prediction_workers, local_ranks)\n", + " ])\n", + "\n", + " def _predict_pandas(\n", + " self,\n", + " data: pd.DataFrame,\n", + " input_column: str = \"prompt\",\n", + " output_column: str = \"output\",\n", + " **kwargs\n", + " ) -> pd.DataFrame:\n", + " data_ref = ray.put(data)\n", + " prediction = ray.get(\n", + " [\n", + " worker.generate.remote(data_ref, column=input_column, **kwargs)\n", + " for worker in self.prediction_workers\n", + " ]\n", + " )[0]\n", + "\n", + " return pd.DataFrame(prediction, columns=[output_column])\n", + "\n", + " @classmethod\n", + " def from_checkpoint(cls, checkpoint: Checkpoint, **kwargs) -> \"Predictor\":\n", + " return cls(checkpoint=checkpoint, **kwargs)\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "ca57e150", + "metadata": {}, + "source": [ + "## Create a Ray Dataset Pipeline\n", + "\n", + "Finally, we connect all these pieces together, and use a BatchPredictor to run multiple copies of the DeepSpeedPredictor actors.\n", + "\n", + "This step helps parallelize our batch inference job and utilize all available resources in the cluster." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "48bf4a4f-0ac4-4e77-a05a-710d42e0dc4e", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-04-22 11:14:12,074\tWARNING datastream.py:4124 -- Deprecation warning: use Datastream.materialize() instead of fully_executed().\n", + "2023-04-22 11:14:12,079\tINFO streaming_executor.py:87 -- Executing DAG InputDataBuffer[Input] -> AllToAllOperator[Repartition] -> AllToAllOperator[RandomShuffle]\n", + "2023-04-22 11:14:12,081\tINFO streaming_executor.py:88 -- Execution config: ExecutionOptions(resource_limits=ExecutionResources(cpu=None, gpu=None, object_store_memory=None), locality_with_output=False, preserve_order=False, actor_locality_enabled=True, verbose_progress=False)\n", + "2023-04-22 11:14:12,082\tINFO streaming_executor.py:90 -- Tip: To enable per-operator progress reporting, set RAY_DATA_VERBOSE_PROGRESS=1.\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "- Repartition 1: 0%| | 0/16 [00:00 ActorPoolMapOperator[MapBatches(ScoringWrapper)]\n", + "2023-04-22 11:14:12,682\tINFO streaming_executor.py:88 -- Execution config: ExecutionOptions(resource_limits=ExecutionResources(cpu=None, gpu=None, object_store_memory=None), locality_with_output=False, preserve_order=False, actor_locality_enabled=True, verbose_progress=False)\n", + "2023-04-22 11:14:12,683\tINFO streaming_executor.py:90 -- Tip: To enable per-operator progress reporting, set RAY_DATA_VERBOSE_PROGRESS=1.\n", + "2023-04-22 11:14:12,785\tINFO actor_pool_map_operator.py:114 -- MapBatches(ScoringWrapper): Waiting for 1 pool actors to start...\n", + "(_MapWorker pid=7005) The cache for model files in Transformers v4.22.0 has been updated. Migrating your old cache. This is a one-time only operation. You can interrupt this and resume the migration later on by calling `transformers.utils.move_cache()`.\n", + "0it [00:00, ?it/s]05) \n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(PredictionWorker pid=10038) [2023-04-22 11:14:30,762] [INFO] [utils.py:829:see_memory_usage] before init\n", + "(PredictionWorker pid=10038) [2023-04-22 11:14:30,762] [INFO] [utils.py:830:see_memory_usage] MA 0.0 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB \n", + "(PredictionWorker pid=10038) [2023-04-22 11:14:30,762] [INFO] [utils.py:838:see_memory_usage] CPU Virtual Memory: used = 11.63 GB, percent = 2.4%\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "(PredictionWorker pid=10040) --------------------------------------------------------------------------\n", + "(PredictionWorker pid=10040) Aim collects anonymous usage analytics. \n", + "(PredictionWorker pid=10040) Read how to opt-out here: \n", + "(PredictionWorker pid=10040) https://aimstack.readthedocs.io/en/latest/community/telemetry.html \n", + "(PredictionWorker pid=10040) --------------------------------------------------------------------------\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(PredictionWorker pid=10045) [2023-04-22 11:14:33,061] [INFO] [logging.py:93:log_dist] [Rank -1] DeepSpeed info: version=0.8.3, git-hash=unknown, git-branch=unknown\n", + "(PredictionWorker pid=10045) [2023-04-22 11:14:33,062] [WARNING] [config_utils.py:75:_process_deprecated_field] Config parameter replace_method is deprecated. This parameter is no longer needed, please remove from your call to DeepSpeed-inference\n", + "(PredictionWorker pid=10045) [2023-04-22 11:14:33,062] [WARNING] [config_utils.py:75:_process_deprecated_field] Config parameter mp_size is deprecated use tensor_parallel.tp_size instead\n", + "(PredictionWorker pid=10045) [2023-04-22 11:14:33,062] [INFO] [logging.py:93:log_dist] [Rank -1] quantize_bits = 8 mlp_extra_grouping = False, quantize_groups = 1\n", + "(PredictionWorker pid=10038) [2023-04-22 11:14:33,074] [INFO] [utils.py:829:see_memory_usage] after init\n", + "(PredictionWorker pid=10038) [2023-04-22 11:14:33,075] [INFO] [utils.py:830:see_memory_usage] MA 0.0 GB Max_MA 0.0 GB CA 0.0 GB Max_CA 0 GB \n", + "(PredictionWorker pid=10038) [2023-04-22 11:14:33,075] [INFO] [utils.py:838:see_memory_usage] CPU Virtual Memory: used = 12.25 GB, percent = 2.6%\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "(PredictionWorker pid=10040) Using /home/ray/.cache/torch_extensions/py39_cu116 as PyTorch extensions root...\n", + "(PredictionWorker pid=10038) Creating extension directory /home/ray/.cache/torch_extensions/py39_cu116/transformer_inference...\n", + "(PredictionWorker pid=10038) Detected CUDA files, patching ldflags\n", + "(PredictionWorker pid=10038) Emitting ninja build file /home/ray/.cache/torch_extensions/py39_cu116/transformer_inference/build.ninja...\n", + "(PredictionWorker pid=10038) Building extension module transformer_inference...\n", + "(PredictionWorker pid=10038) Allowing ninja to set a default number of workers... (overridable by setting the environment variable MAX_JOBS=N)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(PredictionWorker pid=10038) [1/9] /usr/local/cuda/bin/nvcc -DTORCH_EXTENSION_NAME=transformer_inference -DTORCH_API_INCLUDE_EXTENSION_H -DPYBIND11_COMPILER_TYPE=\\\"_gcc\\\" -DPYBIND11_STDLIB=\\\"_libstdcpp\\\" -DPYBIND11_BUILD_ABI=\\\"_cxxabi1011\\\" -I/home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/includes -I/home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/includes -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/torch/csrc/api/include -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/TH -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/THC -isystem /usr/local/cuda/include -isystem /home/ray/anaconda3/include/python3.9 -D_GLIBCXX_USE_CXX11_ABI=0 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_70,code=compute_70 -gencode=arch=compute_70,code=sm_70 --compiler-options '-fPIC' -O3 --use_fast_math -std=c++14 -U__CUDA_NO_HALF_OPERATORS__ -U__CUDA_NO_HALF_CONVERSIONS__ -U__CUDA_NO_HALF2_OPERATORS__ -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_70,code=compute_70 -c /home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/dequantize.cu -o dequantize.cuda.o \n", + "(PredictionWorker pid=10038) [2/9] /usr/local/cuda/bin/nvcc -DTORCH_EXTENSION_NAME=transformer_inference -DTORCH_API_INCLUDE_EXTENSION_H -DPYBIND11_COMPILER_TYPE=\\\"_gcc\\\" -DPYBIND11_STDLIB=\\\"_libstdcpp\\\" -DPYBIND11_BUILD_ABI=\\\"_cxxabi1011\\\" -I/home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/includes -I/home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/includes -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/torch/csrc/api/include -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/TH -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/THC -isystem /usr/local/cuda/include -isystem /home/ray/anaconda3/include/python3.9 -D_GLIBCXX_USE_CXX11_ABI=0 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_70,code=compute_70 -gencode=arch=compute_70,code=sm_70 --compiler-options '-fPIC' -O3 --use_fast_math -std=c++14 -U__CUDA_NO_HALF_OPERATORS__ -U__CUDA_NO_HALF_CONVERSIONS__ -U__CUDA_NO_HALF2_OPERATORS__ -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_70,code=compute_70 -c /home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/relu.cu -o relu.cuda.o \n", + "(PredictionWorker pid=10038) [3/9] /usr/local/cuda/bin/nvcc -DTORCH_EXTENSION_NAME=transformer_inference -DTORCH_API_INCLUDE_EXTENSION_H -DPYBIND11_COMPILER_TYPE=\\\"_gcc\\\" -DPYBIND11_STDLIB=\\\"_libstdcpp\\\" -DPYBIND11_BUILD_ABI=\\\"_cxxabi1011\\\" -I/home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/includes -I/home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/includes -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/torch/csrc/api/include -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/TH -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/THC -isystem /usr/local/cuda/include -isystem /home/ray/anaconda3/include/python3.9 -D_GLIBCXX_USE_CXX11_ABI=0 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_70,code=compute_70 -gencode=arch=compute_70,code=sm_70 --compiler-options '-fPIC' -O3 --use_fast_math -std=c++14 -U__CUDA_NO_HALF_OPERATORS__ -U__CUDA_NO_HALF_CONVERSIONS__ -U__CUDA_NO_HALF2_OPERATORS__ -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_70,code=compute_70 -c /home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu -o apply_rotary_pos_emb.cuda.o \n", + "(PredictionWorker pid=10038) [4/9] /usr/local/cuda/bin/nvcc -DTORCH_EXTENSION_NAME=transformer_inference -DTORCH_API_INCLUDE_EXTENSION_H -DPYBIND11_COMPILER_TYPE=\\\"_gcc\\\" -DPYBIND11_STDLIB=\\\"_libstdcpp\\\" -DPYBIND11_BUILD_ABI=\\\"_cxxabi1011\\\" -I/home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/includes -I/home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/includes -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/torch/csrc/api/include -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/TH -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/THC -isystem /usr/local/cuda/include -isystem /home/ray/anaconda3/include/python3.9 -D_GLIBCXX_USE_CXX11_ABI=0 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_70,code=compute_70 -gencode=arch=compute_70,code=sm_70 --compiler-options '-fPIC' -O3 --use_fast_math -std=c++14 -U__CUDA_NO_HALF_OPERATORS__ -U__CUDA_NO_HALF_CONVERSIONS__ -U__CUDA_NO_HALF2_OPERATORS__ -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_70,code=compute_70 -c /home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/transform.cu -o transform.cuda.o \n", + "(PredictionWorker pid=10038) /home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/transform.cu(56): warning #177-D: variable \"lane\" was declared but never referenced\n", + "(PredictionWorker pid=10038) \n", + "(PredictionWorker pid=10038) /home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/transform.cu(93): warning #177-D: variable \"half_dim\" was declared but never referenced\n", + "(PredictionWorker pid=10038) \n", + "(PredictionWorker pid=10038) /home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/transform.cu(110): warning #177-D: variable \"vals_half\" was declared but never referenced\n", + "(PredictionWorker pid=10038) \n", + "(PredictionWorker pid=10038) /home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/transform.cu(111): warning #177-D: variable \"output_half\" was declared but never referenced\n", + "(PredictionWorker pid=10038) \n", + "(PredictionWorker pid=10038) /home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/transform.cu(128): warning #177-D: variable \"lane\" was declared but never referenced\n", + "(PredictionWorker pid=10038) \n", + "(PredictionWorker pid=10038) [5/9] /usr/local/cuda/bin/nvcc -DTORCH_EXTENSION_NAME=transformer_inference -DTORCH_API_INCLUDE_EXTENSION_H -DPYBIND11_COMPILER_TYPE=\\\"_gcc\\\" -DPYBIND11_STDLIB=\\\"_libstdcpp\\\" -DPYBIND11_BUILD_ABI=\\\"_cxxabi1011\\\" -I/home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/includes -I/home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/includes -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/torch/csrc/api/include -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/TH -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/THC -isystem /usr/local/cuda/include -isystem /home/ray/anaconda3/include/python3.9 -D_GLIBCXX_USE_CXX11_ABI=0 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_70,code=compute_70 -gencode=arch=compute_70,code=sm_70 --compiler-options '-fPIC' -O3 --use_fast_math -std=c++14 -U__CUDA_NO_HALF_OPERATORS__ -U__CUDA_NO_HALF_CONVERSIONS__ -U__CUDA_NO_HALF2_OPERATORS__ -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_70,code=compute_70 -c /home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/softmax.cu -o softmax.cuda.o \n", + "(PredictionWorker pid=10038) /home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/softmax.cu(272): warning #177-D: variable \"alibi_offset\" was declared but never referenced\n", + "(PredictionWorker pid=10038) \n", + "(PredictionWorker pid=10038) /home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/softmax.cu(427): warning #177-D: variable \"warp_num\" was declared but never referenced\n", + "(PredictionWorker pid=10038) \n", + "(PredictionWorker pid=10038) [6/9] /usr/local/cuda/bin/nvcc -DTORCH_EXTENSION_NAME=transformer_inference -DTORCH_API_INCLUDE_EXTENSION_H -DPYBIND11_COMPILER_TYPE=\\\"_gcc\\\" -DPYBIND11_STDLIB=\\\"_libstdcpp\\\" -DPYBIND11_BUILD_ABI=\\\"_cxxabi1011\\\" -I/home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/includes -I/home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/includes -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/torch/csrc/api/include -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/TH -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/THC -isystem /usr/local/cuda/include -isystem /home/ray/anaconda3/include/python3.9 -D_GLIBCXX_USE_CXX11_ABI=0 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_70,code=compute_70 -gencode=arch=compute_70,code=sm_70 --compiler-options '-fPIC' -O3 --use_fast_math -std=c++14 -U__CUDA_NO_HALF_OPERATORS__ -U__CUDA_NO_HALF_CONVERSIONS__ -U__CUDA_NO_HALF2_OPERATORS__ -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_70,code=compute_70 -c /home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/gelu.cu -o gelu.cuda.o \n", + "(PredictionWorker pid=10038) [2023-04-22 11:14:33,250] [INFO] [logging.py:93:log_dist] [Rank -1] DeepSpeed info: version=0.8.3, git-hash=unknown, git-branch=unknown [repeated 7x across cluster] (Ray deduplicates logs by default. Set RAY_DEDUP_LOGS=0 to disable log deduplication, or see https://docs.ray.io/en/master/ray-observability/ray-logging.html#log-deduplication for more options.)\n", + "(PredictionWorker pid=10038) [2023-04-22 11:14:33,251] [WARNING] [config_utils.py:75:_process_deprecated_field] Config parameter replace_method is deprecated. This parameter is no longer needed, please remove from your call to DeepSpeed-inference [repeated 7x across cluster]\n", + "(PredictionWorker pid=10038) [2023-04-22 11:14:33,251] [WARNING] [config_utils.py:75:_process_deprecated_field] Config parameter mp_size is deprecated use tensor_parallel.tp_size instead [repeated 7x across cluster]\n", + "(PredictionWorker pid=10038) [2023-04-22 11:14:33,251] [INFO] [logging.py:93:log_dist] [Rank -1] quantize_bits = 8 mlp_extra_grouping = False, quantize_groups = 1 [repeated 7x across cluster]\n", + "(PredictionWorker pid=10038) [7/9] /usr/local/cuda/bin/nvcc -DTORCH_EXTENSION_NAME=transformer_inference -DTORCH_API_INCLUDE_EXTENSION_H -DPYBIND11_COMPILER_TYPE=\\\"_gcc\\\" -DPYBIND11_STDLIB=\\\"_libstdcpp\\\" -DPYBIND11_BUILD_ABI=\\\"_cxxabi1011\\\" -I/home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/includes -I/home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/includes -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/torch/csrc/api/include -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/TH -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/THC -isystem /usr/local/cuda/include -isystem /home/ray/anaconda3/include/python3.9 -D_GLIBCXX_USE_CXX11_ABI=0 -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr -gencode=arch=compute_70,code=compute_70 -gencode=arch=compute_70,code=sm_70 --compiler-options '-fPIC' -O3 --use_fast_math -std=c++14 -U__CUDA_NO_HALF_OPERATORS__ -U__CUDA_NO_HALF_CONVERSIONS__ -U__CUDA_NO_HALF2_OPERATORS__ -gencode=arch=compute_70,code=sm_70 -gencode=arch=compute_70,code=compute_70 -c /home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/layer_norm.cu -o layer_norm.cuda.o \n", + "(PredictionWorker pid=10038) [8/9] c++ -MMD -MF pt_binding.o.d -DTORCH_EXTENSION_NAME=transformer_inference -DTORCH_API_INCLUDE_EXTENSION_H -DPYBIND11_COMPILER_TYPE=\\\"_gcc\\\" -DPYBIND11_STDLIB=\\\"_libstdcpp\\\" -DPYBIND11_BUILD_ABI=\\\"_cxxabi1011\\\" -I/home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/includes -I/home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/includes -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/torch/csrc/api/include -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/TH -isystem /home/ray/anaconda3/lib/python3.9/site-packages/torch/include/THC -isystem /usr/local/cuda/include -isystem /home/ray/anaconda3/include/python3.9 -D_GLIBCXX_USE_CXX11_ABI=0 -fPIC -std=c++14 -O3 -std=c++14 -g -Wno-reorder -c /home/ray/anaconda3/lib/python3.9/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/pt_binding.cpp -o pt_binding.o \n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "(PredictionWorker pid=10038) Loading extension module transformer_inference...\n", + "(PredictionWorker pid=10041) -------------------------------------------------------------------------- [repeated 14x across cluster]\n", + "(PredictionWorker pid=10041) Aim collects anonymous usage analytics. [repeated 7x across cluster]\n", + "(PredictionWorker pid=10041) Read how to opt-out here: [repeated 7x across cluster]\n", + "(PredictionWorker pid=10041) https://aimstack.readthedocs.io/en/latest/community/telemetry.html [repeated 7x across cluster]\n", + "(PredictionWorker pid=10041) Using /home/ray/.cache/torch_extensions/py39_cu116 as PyTorch extensions root... [repeated 7x across cluster]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "(PredictionWorker pid=10038) [9/9] c++ pt_binding.o gelu.cuda.o relu.cuda.o layer_norm.cuda.o softmax.cuda.o dequantize.cuda.o apply_rotary_pos_emb.cuda.o transform.cuda.o -shared -lcurand -L/home/ray/anaconda3/lib/python3.9/site-packages/torch/lib -lc10 -lc10_cuda -ltorch_cpu -ltorch_cuda_cu -ltorch_cuda_cpp -ltorch -ltorch_python -L/usr/local/cuda/lib64 -lcudart -o transformer_inference.so\n", + "(PredictionWorker pid=10038) Time to load transformer_inference op: 46.834928035736084 seconds\n", + "(PredictionWorker pid=10038) [2023-04-22 11:15:21,799] [INFO] [logging.py:93:log_dist] [Rank 0] DeepSpeed-Inference config: {'layer_id': 0, 'hidden_size': 7168, 'intermediate_size': 28672, 'heads': 56, 'num_hidden_layers': -1, 'fp16': True, 'pre_layer_norm': True, 'local_rank': -1, 'stochastic_mode': False, 'epsilon': 1e-12, 'mp_size': 8, 'q_int8': False, 'scale_attention': True, 'triangular_masking': True, 'local_attention': False, 'window_size': 1, 'rotary_dim': -1, 'rotate_half': False, 'rotate_every_two': True, 'return_tuple': True, 'mlp_after_attn': True, 'mlp_act_func_type': , 'specialized_mode': False, 'training_mp_size': 1, 'bigscience_bloom': False, 'max_out_tokens': 1024, 'scale_attn_by_inverse_layer_idx': False, 'enable_qkv_quantization': False, 'use_mup': False, 'return_single_tuple': False}\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "(PredictionWorker pid=10040) No modifications detected for re-loaded extension module transformer_inference, skipping build step...\n", + "Loading 7 checkpoint shards: 0%| | 0/7 [00:00 Date: Fri, 5 May 2023 08:36:29 -0700 Subject: [PATCH 251/424] Add debug logs to show UpdateResourceUsage rpc source (#35062) Signed-off-by: Jiajun Yao --- src/ray/gcs/gcs_client/gcs_client.cc | 3 ++- src/ray/raylet_client/raylet_client.cc | 1 - src/ray/rpc/node_manager/node_manager_client_pool.cc | 3 ++- src/ray/rpc/server_call.h | 8 ++++++++ 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/src/ray/gcs/gcs_client/gcs_client.cc b/src/ray/gcs/gcs_client/gcs_client.cc index 9e7d3504f882..fb721893d7ea 100644 --- a/src/ray/gcs/gcs_client/gcs_client.cc +++ b/src/ray/gcs/gcs_client/gcs_client.cc @@ -128,7 +128,8 @@ Status GcsClient::Connect(instrumented_io_context &io_service) { internal_kv_accessor_ = std::make_unique(this); task_accessor_ = std::make_unique(this); - RAY_LOG(DEBUG) << "GcsClient connected."; + RAY_LOG(DEBUG) << "GcsClient connected " << options_.gcs_address_ << ":" + << options_.gcs_port_; return Status::OK(); } diff --git a/src/ray/raylet_client/raylet_client.cc b/src/ray/raylet_client/raylet_client.cc index a2cc2dd45924..cf7f89cc9ab5 100644 --- a/src/ray/raylet_client/raylet_client.cc +++ b/src/ray/raylet_client/raylet_client.cc @@ -519,7 +519,6 @@ void raylet::RayletClient::GlobalGC( } void raylet::RayletClient::UpdateResourceUsage( - std::string &serialized_resource_usage_batch, const rpc::ClientCallback &callback) { rpc::UpdateResourceUsageRequest request; diff --git a/src/ray/rpc/node_manager/node_manager_client_pool.cc b/src/ray/rpc/node_manager/node_manager_client_pool.cc index 17d8afddb9b9..34911a465e67 100644 --- a/src/ray/rpc/node_manager/node_manager_client_pool.cc +++ b/src/ray/rpc/node_manager/node_manager_client_pool.cc @@ -30,7 +30,8 @@ shared_ptr NodeManagerClientPool::GetOrConnectByAddr auto connection = client_factory_(address); client_map_[raylet_id] = connection; - RAY_LOG(DEBUG) << "Connected to " << address.ip_address() << ":" << address.port(); + RAY_LOG(INFO) << "Connected to raylet " << raylet_id << " at " << address.ip_address() + << ":" << address.port(); RAY_CHECK(connection != nullptr); return connection; } diff --git a/src/ray/rpc/server_call.h b/src/ray/rpc/server_call.h index 8242c6b69fe8..efab149087ba 100644 --- a/src/ray/rpc/server_call.h +++ b/src/ray/rpc/server_call.h @@ -204,6 +204,14 @@ class ServerCallImpl : public ServerCall { // a new request comes in. factory.CreateCall(); } + // TODO(jjyao) Remove after debugging is done. + if (call_name_ == "NodeManagerService.grpc_server.UpdateResourceUsage") { + static std::string gcs_address = ""; + if (gcs_address == "" || gcs_address != context_.peer()) { + gcs_address = context_.peer(); + RAY_LOG(INFO) << "Handle " << call_name_ << " request from " << context_.peer(); + } + } (service_handler_.*handle_request_function_)( std::move(request_), reply_, From 004bd6bfc7794c8cd0e398344dc8a3cbcab9925f Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Fri, 5 May 2023 18:00:02 +0200 Subject: [PATCH 252/424] [docs] crisp chat for kapa.ai integration (#34782) --- doc/source/_static/css/custom.css | 4 +++- doc/source/_templates/layout.html | 12 +++++++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/doc/source/_static/css/custom.css b/doc/source/_static/css/custom.css index 7586bdfae7ce..4eaafef3043d 100644 --- a/doc/source/_static/css/custom.css +++ b/doc/source/_static/css/custom.css @@ -638,4 +638,6 @@ padding: 20px; font-weight: 500; } - +.ratd-widget { + right: 100px !important; +} diff --git a/doc/source/_templates/layout.html b/doc/source/_templates/layout.html index f9a687d27080..106de816972c 100644 --- a/doc/source/_templates/layout.html +++ b/doc/source/_templates/layout.html @@ -15,6 +15,17 @@ gtag('config', 'UA-110413294-1'); + + - {% endblock %} From 9570285475f2e0ea3cabc52499ff6a1dd3b51f7b Mon Sep 17 00:00:00 2001 From: Ethan Brooks Date: Fri, 5 May 2023 18:43:43 +0200 Subject: [PATCH 253/424] Update ray-logging.rst (#34998) Broken image link Signed-off-by: Ethan Brooks --- doc/source/ray-observability/ray-logging.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/ray-observability/ray-logging.rst b/doc/source/ray-observability/ray-logging.rst index 58663de30d79..3efae19e2d2f 100644 --- a/doc/source/ray-observability/ray-logging.rst +++ b/doc/source/ray-observability/ray-logging.rst @@ -115,7 +115,7 @@ By default Ray prints Actor logs prefixes in light blue: Users may instead activate multi-color prefixes by setting the environment variable ``RAY_COLOR_PREFIX=1``. This will index into an array of colors modulo the PID of each process. -.. image:: images/images/coloring-actor-log-prefixes.png +.. image:: images/coloring-actor-log-prefixes.png :align: center Distributed progress bars (tqdm) From 32c34fc6359c7447dc6161755eeddd05d7d22024 Mon Sep 17 00:00:00 2001 From: Philipp Moritz Date: Fri, 5 May 2023 09:45:03 -0700 Subject: [PATCH 254/424] [Core] Port GcsPublisher to Cython (#34393) Next step in https://github.com/ray-project/ray/pull/33769 --- .buildkite/pipeline.build.yml | 6 ++ dashboard/agent.py | 6 +- dashboard/dashboard.py | 3 +- python/ray/_private/gcs_pubsub.py | 45 ---------- python/ray/_private/log_monitor.py | 8 +- python/ray/_private/utils.py | 29 +------ python/ray/_private/worker.py | 3 +- python/ray/_raylet.pyx | 63 ++++++++++++++ python/ray/autoscaler/_private/monitor.py | 3 +- python/ray/includes/common.pxd | 38 +++++++++ python/ray/includes/common.pxi | 1 + python/ray/tests/test_failure.py | 3 +- python/ray/tests/test_gcs_fault_tolerance.py | 14 ++-- python/ray/tests/test_gcs_pubsub.py | 26 +++--- src/ray/gcs/gcs_client/gcs_client.cc | 5 +- src/ray/gcs/pubsub/gcs_pub_sub.cc | 87 ++++++++++++++++++++ src/ray/gcs/pubsub/gcs_pub_sub.h | 37 +++++++++ 17 files changed, 268 insertions(+), 109 deletions(-) diff --git a/.buildkite/pipeline.build.yml b/.buildkite/pipeline.build.yml index 81debe8a17bf..8bdd31723559 100644 --- a/.buildkite/pipeline.build.yml +++ b/.buildkite/pipeline.build.yml @@ -368,6 +368,9 @@ - DL=1 ./ci/env/install-dependencies.sh - bash ./ci/ci.sh prepare_docker - ./ci/env/env_info.sh + # This is needed or else the Ray Client tests run into a gRPC forking problem + # similar to https://github.com/grpc/grpc/issues/31885 + - pip install pip install grpcio==1.50.0 - bazel test --config=ci $(./ci/run/bazel_export_options) --test_tag_filters=client_tests,small_size_python_tests -- python/ray/tests/... @@ -418,6 +421,9 @@ - cleanup() { if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then ./ci/build/upload_build_info.sh; fi }; trap cleanup EXIT - DL=1 ./ci/env/install-dependencies.sh - ./ci/env/env_info.sh + # This is needed or else the Ray Client tests run into a gRPC forking problem + # similar to https://github.com/grpc/grpc/issues/31885 + - pip install pip install grpcio==1.50.0 - bazel test --config=ci $(./scripts/bazel_export_options) --test_tag_filters=client_tests,small_size_python_tests --test_env=TEST_EXTERNAL_REDIS=1 diff --git a/dashboard/agent.py b/dashboard/agent.py index 345099ff7c25..df57590ff0b6 100644 --- a/dashboard/agent.py +++ b/dashboard/agent.py @@ -15,7 +15,7 @@ import ray.dashboard.consts as dashboard_consts import ray.dashboard.utils as dashboard_utils from ray.dashboard.consts import _PARENT_DEATH_THREASHOLD -from ray._private.gcs_pubsub import GcsAioPublisher, GcsPublisher +from ray._private.gcs_pubsub import GcsAioPublisher from ray._raylet import GcsClient from ray._private.gcs_utils import GcsAioClient from ray._private.ray_logging import setup_component_logger @@ -263,7 +263,9 @@ async def _check_parent(): ray._private.utils.publish_error_to_driver( ray_constants.RAYLET_DIED_ERROR, msg, - gcs_publisher=GcsPublisher(address=self.gcs_address), + gcs_publisher=ray._raylet.GcsPublisher( + address=self.gcs_address + ), ) else: logger.info(msg) diff --git a/dashboard/dashboard.py b/dashboard/dashboard.py index 4732e96d23ee..273fbc4c904d 100644 --- a/dashboard/dashboard.py +++ b/dashboard/dashboard.py @@ -13,7 +13,6 @@ import ray.dashboard.consts as dashboard_consts import ray.dashboard.head as dashboard_head import ray.dashboard.utils as dashboard_utils -from ray._private.gcs_pubsub import GcsPublisher from ray._private.ray_logging import setup_component_logger from typing import Optional, Set @@ -261,7 +260,7 @@ def sigterm_handler(): raise e # Something went wrong, so push an error to all drivers. - gcs_publisher = GcsPublisher(address=args.gcs_address) + gcs_publisher = ray._raylet.GcsPublisher(address=args.gcs_address) ray._private.utils.publish_error_to_driver( ray_constants.DASHBOARD_DIED_ERROR, message, diff --git a/python/ray/_private/gcs_pubsub.py b/python/ray/_private/gcs_pubsub.py index c1d39e728b15..2168b9dfed9d 100644 --- a/python/ray/_private/gcs_pubsub.py +++ b/python/ray/_private/gcs_pubsub.py @@ -4,10 +4,8 @@ import random import threading from typing import Optional, Tuple, List -import time import grpc -from grpc._channel import _InactiveRpcError from ray._private.utils import get_or_create_event_loop try: @@ -160,49 +158,6 @@ def _pop_actors(queue, batch_size=100): return msgs -class GcsPublisher(_PublisherBase): - """Publisher to GCS.""" - - def __init__(self, address: str): - channel = gcs_utils.create_gcs_channel(address) - self._stub = gcs_service_pb2_grpc.InternalPubSubGcsServiceStub(channel) - - def publish_error( - self, key_id: bytes, error_info: ErrorTableData, num_retries=None - ) -> None: - """Publishes error info to GCS.""" - msg = pubsub_pb2.PubMessage( - channel_type=pubsub_pb2.RAY_ERROR_INFO_CHANNEL, - key_id=key_id, - error_info_message=error_info, - ) - req = gcs_service_pb2.GcsPublishRequest(pub_messages=[msg]) - self._gcs_publish(req, num_retries, timeout=1) - - def publish_logs(self, log_batch: dict) -> None: - """Publishes logs to GCS.""" - req = self._create_log_request(log_batch) - self._gcs_publish(req) - - def publish_function_key(self, key: bytes) -> None: - """Publishes function key to GCS.""" - req = self._create_function_key_request(key) - self._gcs_publish(req) - - def _gcs_publish(self, req, num_retries=None, timeout=None) -> None: - count = num_retries or MAX_GCS_PUBLISH_RETRIES - while count > 0: - try: - self._stub.GcsPublish(req, timeout=timeout) - return - except _InactiveRpcError: - pass - count -= 1 - if count > 0: - time.sleep(1) - raise TimeoutError(f"Failed to publish after retries: {req}") - - class _SyncSubscriber(_SubscriberBase): def __init__( self, diff --git a/python/ray/_private/log_monitor.py b/python/ray/_private/log_monitor.py index 7f06343625ae..444ac5b34bec 100644 --- a/python/ray/_private/log_monitor.py +++ b/python/ray/_private/log_monitor.py @@ -11,11 +11,9 @@ import traceback from typing import Callable, List, Set -import ray._private.gcs_pubsub as gcs_pubsub import ray._private.ray_constants as ray_constants import ray._private.services as services import ray._private.utils -from ray._private.gcs_pubsub import GcsPublisher from ray._private.ray_logging import setup_component_logger # Logger for this module. It should be configured at the entry point @@ -135,7 +133,7 @@ class LogMonitor: def __init__( self, logs_dir, - gcs_publisher: gcs_pubsub.GcsPublisher, + gcs_publisher, is_proc_alive_fn: Callable[[int], bool], max_files_open: int = ray_constants.LOG_MONITOR_MAX_OPEN_FILES, ): @@ -525,14 +523,14 @@ def is_proc_alive(pid): ) log_monitor = LogMonitor( - args.logs_dir, gcs_pubsub.GcsPublisher(address=args.gcs_address), is_proc_alive + args.logs_dir, ray._raylet.GcsPublisher(address=args.gcs_address), is_proc_alive ) try: log_monitor.run() except Exception as e: # Something went wrong, so push an error to all drivers. - gcs_publisher = GcsPublisher(address=args.gcs_address) + gcs_publisher = ray._raylet.GcsPublisher(address=args.gcs_address) traceback_str = ray._private.utils.format_error_message(traceback.format_exc()) message = ( f"The log monitor on node {platform.node()} " diff --git a/python/ray/_private/utils.py b/python/ray/_private/utils.py index 6174890cd8ea..8d1793114ac9 100644 --- a/python/ray/_private/utils.py +++ b/python/ray/_private/utils.py @@ -44,7 +44,6 @@ import ray import ray._private.ray_constants as ray_constants from ray._private.tls_utils import load_certs_from_env -from ray.core.generated.gcs_pb2 import ErrorTableData from ray.core.generated.runtime_env_common_pb2 import ( RuntimeEnvInfo as ProtoRuntimeEnvInfo, ) @@ -182,27 +181,6 @@ def push_error_to_driver( worker.core_worker.push_error(job_id, error_type, message, time.time()) -def construct_error_message(job_id, error_type, message, timestamp): - """Construct an ErrorTableData object. - - Args: - job_id: The ID of the job that the error should go to. If this is - nil, then the error will go to all drivers. - error_type: The type of the error. - message: The error message. - timestamp: The time of the error. - - Returns: - The ErrorTableData object. - """ - data = ErrorTableData() - data.job_id = job_id.binary() - data.type = error_type - data.error_message = message - data.timestamp = timestamp - return data - - def publish_error_to_driver( error_type: str, message: str, @@ -228,11 +206,12 @@ def publish_error_to_driver( if job_id is None: job_id = ray.JobID.nil() assert isinstance(job_id, ray.JobID) - error_data = construct_error_message(job_id, error_type, message, time.time()) try: - gcs_publisher.publish_error(job_id.hex().encode(), error_data, num_retries) + gcs_publisher.publish_error( + job_id.hex().encode(), error_type, message, job_id, num_retries + ) except Exception: - logger.exception(f"Failed to publish error {error_data}") + logger.exception(f"Failed to publish error: {message} [type {error_type}]") def decode(byte_str: str, allow_none: bool = False, encode_type: str = "utf-8"): diff --git a/python/ray/_private/worker.py b/python/ray/_private/worker.py index 81ff80881719..5976e90bd607 100644 --- a/python/ray/_private/worker.py +++ b/python/ray/_private/worker.py @@ -68,7 +68,6 @@ GcsErrorSubscriber, GcsFunctionKeySubscriber, GcsLogSubscriber, - GcsPublisher, ) from ray._private.inspect_util import is_cython from ray._private.ray_logging import ( @@ -2074,7 +2073,7 @@ def connect( ray._private.state.state._initialize_global_state( ray._raylet.GcsClientOptions.from_gcs_address(node.gcs_address) ) - worker.gcs_publisher = GcsPublisher(address=worker.gcs_client.address) + worker.gcs_publisher = ray._raylet.GcsPublisher(address=worker.gcs_client.address) # Initialize some fields. if mode in (WORKER_MODE, RESTORE_WORKER_MODE, SPILL_WORKER_MODE): # We should not specify the job_id if it's `WORKER_MODE`. diff --git a/python/ray/_raylet.pyx b/python/ray/_raylet.pyx index 6a09c94859e0..6aa8bf792221 100644 --- a/python/ray/_raylet.pyx +++ b/python/ray/_raylet.pyx @@ -61,14 +61,17 @@ from ray.includes.common cimport ( CObjectReference, CRayObject, CRayStatus, + CErrorTableData, CGcsClientOptions, CGcsNodeInfo, CJobTableData, + CLogBatch, CTaskArg, CTaskArgByReference, CTaskArgByValue, CTaskType, CPlacementStrategy, + CPythonFunction, CSchedulingStrategy, CPlacementGroupSchedulingStrategy, CNodeAffinitySchedulingStrategy, @@ -1746,6 +1749,66 @@ cdef class GcsClient: } return result +cdef class GcsPublisher: + """Cython wrapper class of C++ `ray::gcs::PythonGcsPublisher`.""" + cdef: + shared_ptr[CPythonGcsPublisher] inner + + def __cinit__(self, address): + self.inner.reset(new CPythonGcsPublisher(address)) + check_status(self.inner.get().Connect()) + + def publish_error(self, key_id: bytes, error_type: str, message: str, + job_id=None, num_retries=None): + cdef: + CErrorTableData error_info + int64_t c_num_retries = num_retries if num_retries else -1 + c_string c_key_id = key_id + + if job_id is None: + job_id = ray.JobID.nil() + assert isinstance(job_id, ray.JobID) + error_info.set_job_id(job_id.binary()) + error_info.set_type(error_type) + error_info.set_error_message(message) + error_info.set_timestamp(time.time()) + + with nogil: + check_status( + self.inner.get().PublishError(c_key_id, error_info, c_num_retries)) + + def publish_logs(self, log_json: dict): + cdef: + CLogBatch log_batch + c_string c_job_id + + job_id = log_json.get("job") + log_batch.set_ip(log_json.get("ip") if log_json.get("ip") else b"") + log_batch.set_pid( + str(log_json.get("pid")).encode() if log_json.get("pid") else b"") + log_batch.set_job_id(job_id.encode() if job_id else b"") + log_batch.set_is_error(bool(log_json.get("is_err"))) + for line in log_json.get("lines", []): + log_batch.add_lines(line) + actor_name = log_json.get("actor_name") + log_batch.set_actor_name(actor_name.encode() if actor_name else b"") + task_name = log_json.get("task_name") + log_batch.set_task_name(task_name.encode() if task_name else b"") + + c_job_id = job_id.encode() if job_id else b"" + with nogil: + check_status(self.inner.get().PublishLogs(c_job_id, log_batch)) + + def publish_function_key(self, key: bytes): + cdef: + CPythonFunction python_function + + python_function.set_key(key) + + with nogil: + check_status(self.inner.get().PublishFunctionKey(python_function)) + + cdef class CoreWorker: def __cinit__(self, worker_type, store_socket, raylet_socket, diff --git a/python/ray/autoscaler/_private/monitor.py b/python/ray/autoscaler/_private/monitor.py index 14faf14fa8e9..f15e109fc9d4 100644 --- a/python/ray/autoscaler/_private/monitor.py +++ b/python/ray/autoscaler/_private/monitor.py @@ -16,7 +16,6 @@ import ray._private.ray_constants as ray_constants import ray._private.utils from ray._private.event.event_logger import get_event_logger -from ray._private.gcs_pubsub import GcsPublisher from ray._private.ray_logging import setup_component_logger from ray._raylet import GcsClient from ray.autoscaler._private.autoscaler import StandardAutoscaler @@ -560,7 +559,7 @@ def _handle_failure(self, error): _internal_kv_put( ray_constants.DEBUG_AUTOSCALING_ERROR, message, overwrite=True ) - gcs_publisher = GcsPublisher(address=self.gcs_address) + gcs_publisher = ray._raylet.GcsPublisher(address=self.gcs_address) from ray._private.utils import publish_error_to_driver publish_error_to_driver( diff --git a/python/ray/includes/common.pxd b/python/ray/includes/common.pxd index e0f8b8ee9712..4250470f3013 100644 --- a/python/ray/includes/common.pxd +++ b/python/ray/includes/common.pxd @@ -346,6 +346,21 @@ cdef extern from "ray/gcs/gcs_client/gcs_client.h" namespace "ray::gcs" nogil: unordered_map[c_string, double] PythonGetResourcesTotal( const CGcsNodeInfo& node_info) +cdef extern from "ray/gcs/pubsub/gcs_pub_sub.h" nogil: + + cdef cppclass CPythonGcsPublisher "ray::gcs::PythonGcsPublisher": + + CPythonGcsPublisher(const c_string& gcs_address) + + CRayStatus Connect() + + CRayStatus PublishError( + const c_string &key_id, const CErrorTableData &data, int64_t num_retries) + + CRayStatus PublishLogs(const c_string &key_id, const CLogBatch &data) + + CRayStatus PublishFunctionKey(const CPythonFunction& python_function) + cdef extern from "src/ray/protobuf/gcs.pb.h" nogil: cdef cppclass CJobConfig "ray::rpc::JobConfig": c_string ray_namespace() const @@ -372,6 +387,29 @@ cdef extern from "src/ray/protobuf/gcs.pb.h" nogil: c_bool is_dead() const CJobConfig config() const + cdef cppclass CPythonFunction "ray::rpc::PythonFunction": + void set_key(const c_string &key) + + cdef cppclass CErrorTableData "ray::rpc::ErrorTableData": + c_string job_id() const + c_string type() const + c_string error_message() const + double timestamp() const + + void set_job_id(const c_string &job_id) + void set_type(const c_string &type) + void set_error_message(const c_string &error_message) + void set_timestamp(double timestamp) + + cdef cppclass CLogBatch "ray::rpc::LogBatch": + void set_ip(const c_string &ip) + void set_pid(const c_string &pid) + void set_job_id(const c_string &job_id) + void set_is_error(c_bool is_error) + void add_lines(const c_string &line) + void set_actor_name(const c_string &actor_name) + void set_task_name(const c_string &task_name) + cdef extern from "ray/common/task/task_spec.h" nogil: cdef cppclass CConcurrencyGroup "ray::ConcurrencyGroup": diff --git a/python/ray/includes/common.pxi b/python/ray/includes/common.pxi index 89983ff8808c..d7c3c121bc69 100644 --- a/python/ray/includes/common.pxi +++ b/python/ray/includes/common.pxi @@ -6,6 +6,7 @@ from ray.includes.common cimport ( CObjectLocation, CGcsClientOptions, CPythonGcsClient, + CPythonGcsPublisher, ) diff --git a/python/ray/tests/test_failure.py b/python/ray/tests/test_failure.py index 71bb7a98dd9a..93f1c734ee0a 100644 --- a/python/ray/tests/test_failure.py +++ b/python/ray/tests/test_failure.py @@ -10,7 +10,6 @@ import ray._private.gcs_utils as gcs_utils import ray._private.ray_constants as ray_constants import ray._private.utils -from ray._private.gcs_pubsub import GcsPublisher from ray._private.test_utils import ( SignalActor, convert_actor_state, @@ -69,7 +68,7 @@ def interceptor(e): def test_publish_error_to_driver(ray_start_regular, error_pubsub): address_info = ray_start_regular - gcs_publisher = GcsPublisher(address=address_info["gcs_address"]) + gcs_publisher = ray._raylet.GcsPublisher(address=address_info["gcs_address"]) error_message = "Test error message" ray._private.utils.publish_error_to_driver( diff --git a/python/ray/tests/test_gcs_fault_tolerance.py b/python/ray/tests/test_gcs_fault_tolerance.py index fedd531d6cb8..72caad2f0f6e 100644 --- a/python/ray/tests/test_gcs_fault_tolerance.py +++ b/python/ray/tests/test_gcs_fault_tolerance.py @@ -18,10 +18,8 @@ run_string_as_driver, ) from ray._private.gcs_pubsub import ( - GcsPublisher, GcsErrorSubscriber, ) -from ray.core.generated.gcs_pb2 import ErrorTableData import psutil @@ -675,20 +673,20 @@ def test_publish_and_subscribe_error_info(ray_start_regular_with_external_redis) subscriber = GcsErrorSubscriber(address=gcs_server_addr) subscriber.subscribe() - publisher = GcsPublisher(address=gcs_server_addr) - err1 = ErrorTableData(error_message="test error message 1") - err2 = ErrorTableData(error_message="test error message 2") + publisher = ray._raylet.GcsPublisher(address=gcs_server_addr) print("sending error message 1") - publisher.publish_error(b"aaa_id", err1) + publisher.publish_error(b"aaa_id", "", "test error message 1") ray._private.worker._global_node.kill_gcs_server() ray._private.worker._global_node.start_gcs_server() print("sending error message 2") - publisher.publish_error(b"bbb_id", err2) + publisher.publish_error(b"bbb_id", "", "test error message 2") print("done") - assert subscriber.poll() == (b"bbb_id", err2) + (key_id, err) = subscriber.poll() + assert key_id == b"bbb_id" + assert err.error_message == "test error message 2" subscriber.close() diff --git a/python/ray/tests/test_gcs_pubsub.py b/python/ray/tests/test_gcs_pubsub.py index b9a4eddee7a4..71d4ae802f26 100644 --- a/python/ray/tests/test_gcs_pubsub.py +++ b/python/ray/tests/test_gcs_pubsub.py @@ -3,8 +3,8 @@ import threading import re +import ray from ray._private.gcs_pubsub import ( - GcsPublisher, GcsErrorSubscriber, GcsLogSubscriber, GcsFunctionKeySubscriber, @@ -24,14 +24,16 @@ def test_publish_and_subscribe_error_info(ray_start_regular): subscriber = GcsErrorSubscriber(address=gcs_server_addr) subscriber.subscribe() - publisher = GcsPublisher(address=gcs_server_addr) - err1 = ErrorTableData(error_message="test error message 1") - err2 = ErrorTableData(error_message="test error message 2") - publisher.publish_error(b"aaa_id", err1) - publisher.publish_error(b"bbb_id", err2) + publisher = ray._raylet.GcsPublisher(address=gcs_server_addr) + publisher.publish_error(b"aaa_id", "", "test error message 1") + publisher.publish_error(b"bbb_id", "", "test error message 2") - assert subscriber.poll() == (b"aaa_id", err1) - assert subscriber.poll() == (b"bbb_id", err2) + (key_id1, err1) = subscriber.poll() + assert key_id1 == b"aaa_id" + assert err1.error_message == "test error message 1" + (key_id2, err2) = subscriber.poll() + assert key_id2 == b"bbb_id" + assert err2.error_message == "test error message 2" subscriber.close() @@ -63,7 +65,7 @@ def test_publish_and_subscribe_logs(ray_start_regular): subscriber = GcsLogSubscriber(address=gcs_server_addr) subscriber.subscribe() - publisher = GcsPublisher(address=gcs_server_addr) + publisher = ray._raylet.GcsPublisher(address=gcs_server_addr) log_batch = { "ip": "127.0.0.1", "pid": 1234, @@ -114,7 +116,7 @@ def test_publish_and_subscribe_function_keys(ray_start_regular): subscriber = GcsFunctionKeySubscriber(address=gcs_server_addr) subscriber.subscribe() - publisher = GcsPublisher(address=gcs_server_addr) + publisher = ray._raylet.GcsPublisher(address=gcs_server_addr) publisher.publish_function_key(b"111") publisher.publish_function_key(b"222") @@ -196,9 +198,9 @@ def receive_logs(): t2 = threading.Thread(target=receive_logs) t2.start() - publisher = GcsPublisher(address=gcs_server_addr) + publisher = ray._raylet.GcsPublisher(address=gcs_server_addr) for i in range(0, num_messages): - publisher.publish_error(b"msg_id", ErrorTableData(error_message=f"error {i}")) + publisher.publish_error(b"msg_id", "", f"error {i}") publisher.publish_logs( { "ip": "127.0.0.1", diff --git a/src/ray/gcs/gcs_client/gcs_client.cc b/src/ray/gcs/gcs_client/gcs_client.cc index fb721893d7ea..ae342b05eec0 100644 --- a/src/ray/gcs/gcs_client/gcs_client.cc +++ b/src/ray/gcs/gcs_client/gcs_client.cc @@ -146,10 +146,7 @@ std::pair GcsClient::GetGcsServerAddress() const { PythonGcsClient::PythonGcsClient(const GcsClientOptions &options) : options_(options) {} Status PythonGcsClient::Connect() { - grpc::ChannelArguments arguments; - arguments.SetInt(GRPC_ARG_MAX_MESSAGE_LENGTH, 512 * 1024 * 1024); - arguments.SetInt(GRPC_ARG_KEEPALIVE_TIME_MS, 60 * 1000); - arguments.SetInt(GRPC_ARG_KEEPALIVE_TIMEOUT_MS, 60 * 1000); + auto arguments = PythonGrpcChannelArguments(); channel_ = rpc::BuildChannel(options_.gcs_address_, options_.gcs_port_, arguments); kv_stub_ = rpc::InternalKVGcsService::NewStub(channel_); runtime_env_stub_ = rpc::RuntimeEnvGcsService::NewStub(channel_); diff --git a/src/ray/gcs/pubsub/gcs_pub_sub.cc b/src/ray/gcs/pubsub/gcs_pub_sub.cc index 32c0e9f41367..b03a9157da46 100644 --- a/src/ray/gcs/pubsub/gcs_pub_sub.cc +++ b/src/ray/gcs/pubsub/gcs_pub_sub.cc @@ -15,6 +15,7 @@ #include "ray/gcs/pubsub/gcs_pub_sub.h" #include "absl/strings/str_cat.h" +#include "ray/rpc/grpc_client.h" namespace ray { namespace gcs { @@ -212,5 +213,91 @@ Status GcsSubscriber::SubscribeAllWorkerFailures( return Status::OK(); } +grpc::ChannelArguments PythonGrpcChannelArguments() { + grpc::ChannelArguments arguments; + arguments.SetInt(GRPC_ARG_MAX_MESSAGE_LENGTH, 512 * 1024 * 1024); + arguments.SetInt(GRPC_ARG_KEEPALIVE_TIME_MS, 60 * 1000); + arguments.SetInt(GRPC_ARG_KEEPALIVE_TIMEOUT_MS, 60 * 1000); + return arguments; +} + +PythonGcsPublisher::PythonGcsPublisher(const std::string &gcs_address) { + std::vector address = absl::StrSplit(gcs_address, ':'); + RAY_LOG(DEBUG) << "Connect to gcs server via address: " << gcs_address; + RAY_CHECK(address.size() == 2); + gcs_address_ = address[0]; + gcs_port_ = std::stoi(address[1]); +} + +Status PythonGcsPublisher::Connect() { + auto arguments = PythonGrpcChannelArguments(); + channel_ = rpc::BuildChannel(gcs_address_, gcs_port_, arguments); + pubsub_stub_ = rpc::InternalPubSubGcsService::NewStub(channel_); + return Status::OK(); +} + +constexpr int MAX_GCS_PUBLISH_RETRIES = 60; + +Status PythonGcsPublisher::DoPublishWithRetries(const rpc::GcsPublishRequest &request, + int64_t num_retries, + int64_t timeout_ms) { + int count = num_retries == -1 ? MAX_GCS_PUBLISH_RETRIES : num_retries; + rpc::GcsPublishReply reply; + grpc::Status status; + while (count > 0) { + grpc::ClientContext context; + if (timeout_ms != -1) { + context.set_deadline(std::chrono::system_clock::now() + + std::chrono::milliseconds(timeout_ms)); + } + status = pubsub_stub_->GcsPublish(&context, request, &reply); + if (status.error_code() == grpc::StatusCode::OK) { + if (reply.status().code() != static_cast(StatusCode::OK)) { + return Status::Invalid(reply.status().message()); + } + return Status::OK(); + } else if (status.error_code() == grpc::StatusCode::UNAVAILABLE || + status.error_code() == grpc::StatusCode::UNKNOWN) { + // This is the case in which we will retry + count -= 1; + std::this_thread::sleep_for(std::chrono::seconds(1)); + continue; + } else { + return Status::Invalid(status.error_message()); + } + } + return Status::TimedOut("Failed to publish after retries: " + status.error_message()); +} + +Status PythonGcsPublisher::PublishError(const std::string &key_id, + const rpc::ErrorTableData &error_info, + int64_t num_retries) { + rpc::GcsPublishRequest request; + auto *message = request.add_pub_messages(); + message->set_channel_type(rpc::RAY_ERROR_INFO_CHANNEL); + message->set_key_id(key_id); + message->mutable_error_info_message()->MergeFrom(error_info); + return DoPublishWithRetries(request, num_retries, 1000); +} + +Status PythonGcsPublisher::PublishLogs(const std::string &key_id, + const rpc::LogBatch &log_batch) { + rpc::GcsPublishRequest request; + auto *message = request.add_pub_messages(); + message->set_channel_type(rpc::RAY_LOG_CHANNEL); + message->set_key_id(key_id); + message->mutable_log_batch_message()->MergeFrom(log_batch); + return DoPublishWithRetries(request, -1, -1); +} + +Status PythonGcsPublisher::PublishFunctionKey( + const rpc::PythonFunction &python_function) { + rpc::GcsPublishRequest request; + auto *message = request.add_pub_messages(); + message->set_channel_type(rpc::RAY_PYTHON_FUNCTION_CHANNEL); + message->mutable_python_function_message()->MergeFrom(python_function); + return DoPublishWithRetries(request, -1, -1); +} + } // namespace gcs } // namespace ray diff --git a/src/ray/gcs/pubsub/gcs_pub_sub.h b/src/ray/gcs/pubsub/gcs_pub_sub.h index ffd79a6adfab..db621938dc98 100644 --- a/src/ray/gcs/pubsub/gcs_pub_sub.h +++ b/src/ray/gcs/pubsub/gcs_pub_sub.h @@ -25,6 +25,7 @@ #include "ray/pubsub/publisher.h" #include "ray/pubsub/subscriber.h" #include "src/ray/protobuf/gcs.pb.h" +#include "src/ray/protobuf/gcs_service.grpc.pb.h" #include "src/ray/protobuf/gcs_service.pb.h" namespace ray { @@ -132,5 +133,41 @@ class GcsSubscriber { const std::unique_ptr subscriber_; }; +// This client is only supposed to be used from Cython / Python +class RAY_EXPORT PythonGcsPublisher { + public: + explicit PythonGcsPublisher(const std::string &gcs_address); + + /// Connect to the publisher service of the GCS. + /// This function must be called before calling other functions. + /// + /// \return Status + Status Connect(); + + /// Publish error information to GCS. + Status PublishError(const std::string &key_id, + const rpc::ErrorTableData &data, + int64_t num_retries); + + /// Publish logs to GCS. + Status PublishLogs(const std::string &key_id, const rpc::LogBatch &log_batch); + + /// Publish a function key to GCS. + Status PublishFunctionKey(const rpc::PythonFunction &python_function); + + private: + Status DoPublishWithRetries(const rpc::GcsPublishRequest &request, + int64_t num_retries, + int64_t timeout_ms); + std::unique_ptr pubsub_stub_; + std::shared_ptr channel_; + std::string gcs_address_; + int gcs_port_; +}; + +/// Construct the arguments for synchronous gRPC clients +/// (the ones wrapped in Python) +grpc::ChannelArguments PythonGrpcChannelArguments(); + } // namespace gcs } // namespace ray From 9df7ae0f2514c4caa4445ef800319eaa7701f3e0 Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Fri, 5 May 2023 19:10:28 +0200 Subject: [PATCH 255/424] Revert "[docs] crisp chat for kapa.ai integration (#34782)" (#35080) This reverts commit 4714fcd8b6ced28f70e4678052a05466f0b8320a. --- doc/source/_static/css/custom.css | 4 +--- doc/source/_templates/layout.html | 12 +----------- 2 files changed, 2 insertions(+), 14 deletions(-) diff --git a/doc/source/_static/css/custom.css b/doc/source/_static/css/custom.css index 4eaafef3043d..7586bdfae7ce 100644 --- a/doc/source/_static/css/custom.css +++ b/doc/source/_static/css/custom.css @@ -638,6 +638,4 @@ padding: 20px; font-weight: 500; } -.ratd-widget { - right: 100px !important; -} + diff --git a/doc/source/_templates/layout.html b/doc/source/_templates/layout.html index 106de816972c..f9a687d27080 100644 --- a/doc/source/_templates/layout.html +++ b/doc/source/_templates/layout.html @@ -15,17 +15,6 @@ gtag('config', 'UA-110413294-1'); - - + {% endblock %} From 198373aa15bb1b13335e570961ca1909e83276e7 Mon Sep 17 00:00:00 2001 From: Jiajun Yao Date: Fri, 5 May 2023 10:42:42 -0700 Subject: [PATCH 256/424] [Doc] Make doc code snippet testable (#35057) Change code snippet from ..code-block:: to ..testcode:: Signed-off-by: Jiajun Yao --- doc/source/ray-core/actors.rst | 51 ++++++++++++----- python/ray/_private/worker.py | 102 +++++++++++++++++---------------- 2 files changed, 92 insertions(+), 61 deletions(-) diff --git a/doc/source/ray-core/actors.rst b/doc/source/ray-core/actors.rst index bce5d1487e77..201b21d79d26 100644 --- a/doc/source/ray-core/actors.rst +++ b/doc/source/ray-core/actors.rst @@ -15,10 +15,12 @@ that specific worker and can access and mutate the state of that worker. The ``ray.remote`` decorator indicates that instances of the ``Counter`` class will be actors. Each actor runs in its own Python process. - .. code-block:: python + .. testcode:: + + import ray @ray.remote - class Counter(object): + class Counter: def __init__(self): self.value = 0 @@ -98,11 +100,11 @@ You can specify resource requirements in actors too (see :ref:`resource-requirem .. tab-item:: Python - .. code-block:: python + .. testcode:: # Specify required resources for an actor. @ray.remote(num_cpus=2, num_gpus=0.5) - class Actor(object): + class Actor: pass .. tab-item:: Java @@ -131,11 +133,15 @@ value. .. tab-item:: Python - .. code-block:: python + .. testcode:: # Call the actor. obj_ref = counter.increment.remote() - assert ray.get(obj_ref) == 1 + print(ray.get(obj_ref)) + + .. testoutput:: + + 1 .. tab-item:: Java @@ -159,7 +165,7 @@ Methods called on different actors can execute in parallel, and methods called o .. tab-item:: Python - .. code-block:: python + .. testcode:: # Create ten Counter actors. counters = [Counter.remote() for _ in range(10)] @@ -167,12 +173,17 @@ Methods called on different actors can execute in parallel, and methods called o # Increment each Counter once and get the results. These tasks all happen in # parallel. results = ray.get([c.increment.remote() for c in counters]) - print(results) # prints [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + print(results) # Increment the first Counter five times. These tasks are executed serially # and share state. results = ray.get([counters[0].increment.remote() for _ in range(5)]) - print(results) # prints [2, 3, 4, 5, 6] + print(results) + + .. testoutput:: + + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + [2, 3, 4, 5, 6] .. tab-item:: Java @@ -245,13 +256,13 @@ Actor handles can be passed into other tasks. We can define remote functions (or .. tab-item:: Python - .. code-block:: python + .. testcode:: import time @ray.remote def f(counter): - for _ in range(1000): + for _ in range(10): time.sleep(0.1) counter.increment.remote() @@ -286,7 +297,7 @@ If we instantiate an actor, we can pass the handle around to various tasks. .. tab-item:: Python - .. code-block:: python + .. testcode:: counter = Counter.remote() @@ -295,9 +306,23 @@ If we instantiate an actor, we can pass the handle around to various tasks. # Print the counter value. for _ in range(10): - time.sleep(1) + time.sleep(0.1) print(ray.get(counter.get_counter.remote())) + .. testoutput:: + :options: +SKIP + + 0 + 3 + 8 + 10 + 15 + 18 + 20 + 25 + 30 + 30 + .. tab-item:: Java .. code-block:: java diff --git a/python/ray/_private/worker.py b/python/ray/_private/worker.py index 5976e90bd607..ace1fdbdefb4 100644 --- a/python/ray/_private/worker.py +++ b/python/ray/_private/worker.py @@ -3054,62 +3054,68 @@ def remote( This function can be used as a decorator with no arguments to define a remote function or actor as follows: - >>> import ray - >>> - >>> @ray.remote - ... def f(a, b, c): - ... return a + b + c - >>> - >>> object_ref = f.remote(1, 2, 3) - >>> result = ray.get(object_ref) - >>> assert result == (1 + 2 + 3) - >>> - >>> @ray.remote - ... class Foo: - ... def __init__(self, arg): - ... self.x = arg - ... - ... def method(self, a): - ... return self.x + a - >>> - >>> actor_handle = Foo.remote(123) - >>> object_ref = actor_handle.method.remote(321) - >>> result = ray.get(object_ref) - >>> assert result == (123 + 321) + .. testcode:: + + import ray + + @ray.remote + def f(a, b, c): + return a + b + c + + object_ref = f.remote(1, 2, 3) + result = ray.get(object_ref) + assert result == (1 + 2 + 3) + + @ray.remote + class Foo: + def __init__(self, arg): + self.x = arg + + def method(self, a): + return self.x + a + + actor_handle = Foo.remote(123) + object_ref = actor_handle.method.remote(321) + result = ray.get(object_ref) + assert result == (123 + 321) Equivalently, use a function call to create a remote function or actor. - >>> def g(a, b, c): - ... return a + b + c - >>> - >>> remote_g = ray.remote(g) - >>> object_ref = remote_g.remote(1, 2, 3) - >>> assert ray.get(object_ref) == (1 + 2 + 3) + .. testcode:: - >>> class Bar: - ... def __init__(self, arg): - ... self.x = arg - ... - ... def method(self, a): - ... return self.x + a - >>> - >>> RemoteBar = ray.remote(Bar) - >>> actor_handle = RemoteBar.remote(123) - >>> object_ref = actor_handle.method.remote(321) - >>> result = ray.get(object_ref) - >>> assert result == (123 + 321) + def g(a, b, c): + return a + b + c + + remote_g = ray.remote(g) + object_ref = remote_g.remote(1, 2, 3) + assert ray.get(object_ref) == (1 + 2 + 3) + + class Bar: + def __init__(self, arg): + self.x = arg + + def method(self, a): + return self.x + a + + RemoteBar = ray.remote(Bar) + actor_handle = RemoteBar.remote(123) + object_ref = actor_handle.method.remote(321) + result = ray.get(object_ref) + assert result == (123 + 321) It can also be used with specific keyword arguments as follows: - >>> @ray.remote(num_gpus=1, max_calls=1, num_returns=2) - ... def f(): - ... return 1, 2 - >>> - >>> @ray.remote(num_cpus=2, resources={"CustomResource": 1}) - ... class Foo: - ... def method(self): - ... return 1 + .. testcode:: + + @ray.remote(num_gpus=1, max_calls=1, num_returns=2) + def f(): + return 1, 2 + + @ray.remote(num_cpus=2, resources={"CustomResource": 1}) + class Foo: + def method(self): + return 1 Remote task and actor objects returned by @ray.remote can also be dynamically modified with the same arguments as above using From cbddb5e479c13d412d1e1d45b1392ab8141d9569 Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Fri, 5 May 2023 12:24:00 -0700 Subject: [PATCH 257/424] [Data] Fix examples in batch inference guide (#35083) This PR fixes broken links and adds "Stable Diffusion Batch Prediction with Ray AIR" to the list of examples. --- doc/source/data/batch_inference.rst | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/doc/source/data/batch_inference.rst b/doc/source/data/batch_inference.rst index 1e7b0d3ca17d..aeede1a1f182 100644 --- a/doc/source/data/batch_inference.rst +++ b/doc/source/data/batch_inference.rst @@ -681,7 +681,7 @@ tutorials and examples: :img-top: /images/ray_logo.png :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - .. button-ref:: /data/examples/torch_detection + .. button-ref:: /ray-air/examples/torch_detection Fine-tuning an Object Detection Model and using it for Batch Inference @@ -689,6 +689,14 @@ tutorials and examples: :img-top: /images/ray_logo.png :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - .. button-ref:: /data/examples/torch_image_example + .. button-ref:: /ray-air/examples/torch_image_example Training an Image Classifier and using it for Batch Inference + + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: /ray-air/examples/stablediffusion_batch_prediction + + Stable Diffusion Batch Prediction with Ray AIR From 0dd98d885c267ad238ceafa63dc641f8cba16b67 Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Fri, 5 May 2023 12:59:47 -0700 Subject: [PATCH 258/424] [Data][Docs] Revise "ML Tensor Support" (#34999) Users are confused by the inconsistent use of "tensor" and "array" in the Ray Data documentation. This PR clarifies that tensor data is represented as ndarrays. In addition, this PR revises the "ML Tensor Support" user guide to remove redundant or outdated information. Co-authored-by: angelinalg <122562471+angelinalg@users.noreply.github.com> Co-authored-by: Eric Liang --- doc/source/data/data-tensor-support.rst | 192 ----------------------- doc/source/data/data.rst | 8 +- doc/source/data/doc_code/loading_data.py | 36 +---- doc/source/data/key-concepts.rst | 5 +- doc/source/data/loading-data.rst | 37 +++-- doc/source/data/performance-tips.rst | 4 +- doc/source/data/user-guide.rst | 2 +- doc/source/data/working-with-tensors.rst | 128 +++++++++++++++ python/ray/data/read_api.py | 2 +- 9 files changed, 165 insertions(+), 249 deletions(-) delete mode 100644 doc/source/data/data-tensor-support.rst create mode 100644 doc/source/data/working-with-tensors.rst diff --git a/doc/source/data/data-tensor-support.rst b/doc/source/data/data-tensor-support.rst deleted file mode 100644 index bb326f6bd0f0..000000000000 --- a/doc/source/data/data-tensor-support.rst +++ /dev/null @@ -1,192 +0,0 @@ -.. _data_tensor_support: - -ML Tensor Support -================= - -Tensor (multi-dimensional array) data is ubiquitous in ML workloads. However, popular data formats such as Pandas, Parquet, and Arrow don't natively support tensor data types. To bridge this gap, Ray Data provides tensor extension types that integrate with Pandas and Arrow. - -* For Pandas, Ray Data will transparently convert ``List[np.ndarray]`` columns to and from the :class:`TensorDtype ` extension type. -* For Parquet, Ray Data has an Arrow extension :class:`ArrowTensorType ` that allows tensors to be loaded from and stored in the Parquet format. - -Ray Data automatically converts between the extension types/arrays above. This means you can think of a ``Tensor`` as a first-class data type in Ray Data. - -Loading Tensor Data -------------------- - -This section shows how to create datastreams that include tensor data. - -.. tab-set:: - - .. tab-item:: Synthetic Data - - Create synthetic tensor data from a range of integers. - - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __create_range_begin__ - :end-before: __create_range_end__ - - .. tab-item:: Images - - Load image data stored as individual files using :func:`~ray.data.read_images`: - - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __create_images_begin__ - :end-before: __create_images_end__ - - .. tab-item:: Pandas UDF - - Create tensor columns by returning ``List[np.ndarray]`` columns from a Pandas - :ref:`user-defined function `. - - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __create_pandas_2_begin__ - :end-before: __create_pandas_2_end__ - - .. tab-item:: NumPy - - Create from in-memory NumPy data or previously saved NumPy (.npy) files. - - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __create_numpy_begin__ - :end-before: __create_numpy_end__ - - .. tab-item:: Parquet - - There are two ways to construct a Parquet tensor datastream: (1) loading a - previously-saved tensor datastream, or (2) casting non-tensor Parquet columns to tensor - type. When casting data, a tensor schema or deserialization - :ref:`user-defined function ` must be provided. The - following are examples for each method. - - **Previously-saved tensor datastreams**: - - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __create_parquet_1_begin__ - :end-before: __create_parquet_1_end__ - - **Cast from data stored in C-contiguous format**: - - For tensors stored as raw NumPy ndarray bytes in C-contiguous order (e.g., via - `ndarray.tobytes() `__), all you need to specify is the tensor column schema. The following is an end-to-end example: - - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __create_parquet_2_begin__ - :end-before: __create_parquet_2_end__ - - **Cast from data stored in custom formats**: - - For tensors stored in other formats (e.g., pickled), you can specify a deserializer - :ref:`user-defined function ` that returns - :class:`~ray.data.extensions.tensor_extension.TensorArray` columns: - - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __create_parquet_3_begin__ - :end-before: __create_parquet_3_end__ - -Processing Tensor Data ----------------------- - -Like any other Datastream, Datastreams with tensor columns can be processed in batches via :meth:`ds.iter_batches ` and :meth:`ds.map_batches ` APIs. This section shows the available batch formats and their behavior: - -.. tab-set:: - - .. tab-item:: "numpy" (default) - - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __consume_numpy_2_begin__ - :end-before: __consume_numpy_2_end__ - - .. tab-item:: "pandas" - - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __consume_pandas_2_begin__ - :end-before: __consume_pandas_2_end__ - - .. tab-item:: "pyarrow" - - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __consume_pyarrow_2_begin__ - :end-before: __consume_pyarrow_2_end__ - -Saving Tensor Data ------------------- - -Because tensor data relies on Datastream-specific extension types, they can only be -saved in formats that preserve Arrow metadata (currently only Parquet). In addition, -single-column tensor datastreams can be saved in NumPy format. - -.. tab-set:: - - .. tab-item:: Parquet - - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __write_1_begin_ - :end-before: __write_1_end__ - - .. tab-item:: NumPy - - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __write_2_begin_ - :end-before: __write_2_end__ - -.. _ragged_tensor_support: - -Ragged Tensor Support ---------------------- - -`Ragged tensors `__, i.e. tensors with non-uniform dimensions, pop up in NLP -(`textual sentences/documents of different lengths `__, -`N-grams `__), -computer vision (images of differing resolution, -`ssd300_vgg16 detection outputs `__), -and audio ML (differing durations). Datastreams has basic support for ragged tensors, -namely tensors that are a collection (batch) of variably-shaped subtensors, e.g. a batch -of images of differing sizes or a batch of sentences of differing lengths. - -.. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __create_variable_shaped_tensors_begin__ - :end-before: __create_variable_shaped_tensors_end__ - -These variable-shaped tensors can be exchanged with popular training frameworks that support ragged tensors, such as `TensorFlow `__. - -.. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __tf_variable_shaped_tensors_begin__ - :end-before: __tf_variable_shaped_tensors_end__ - -.. _disable_tensor_extension_casting: - -Disabling Tensor Extension Casting ----------------------------------- - -To disable automatic casting of Pandas and Arrow arrays to -:class:`~ray.data.extensions.tensor_extension.TensorArray`, run the code -below. - -.. code-block:: - - from ray.data import DataContext - - ctx = DataContext.get_current() - ctx.enable_tensor_extension_casting = False - - -Limitations ------------ - -The following are current limitations of tensor datastreams. - -* Arbitrarily `nested/ragged tensors `__ are not supported. Only tensors with all uniform dimensions (i.e. a fully well-defined shape) and tensors representing a collection of variable-shaped tensor elements (e.g. a collection of images with different shapes) are supported; arbitrary raggedness and nested ragged tensors is not supported. diff --git a/doc/source/data/data.rst b/doc/source/data/data.rst index 0b588cacaafb..08744ed5c90e 100644 --- a/doc/source/data/data.rst +++ b/doc/source/data/data.rst @@ -29,7 +29,7 @@ Streaming Batch Inference ------------------------- Ray Data simplifies general purpose parallel GPU and CPU compute in Ray through its -powerful :ref:`Datastream ` primitive. Datastreams enable workloads such as +powerful :ref:`Datastream ` primitive. Datastreams enable workloads such as :ref:`GPU batch inference ` to run efficiently on large datasets, maximizing resource utilization by keeping the working data fitting into Ray object store memory. @@ -44,7 +44,7 @@ As part of the Ray ecosystem, Ray Data can leverage the full functionality of Ra e.g., using actors for optimizing setup time and GPU scheduling, and supports data throughputs of 100GiB/s or more for common inference workloads. -To learn more about the features Ray Data supports, read the +To learn more about the features Ray Data supports, read the :ref:`Data User Guide `. --------------------------------------- @@ -52,8 +52,8 @@ Streaming Preprocessing for ML Training --------------------------------------- Use Ray Data to load and preprocess data for distributed :ref:`ML training pipelines ` in a streaming fashion. -Ray Data is intended to serve as a last-mile bridge from storage or ETL pipeline outputs to distributed -applications and libraries in Ray. Don't use it as a replacement for more general data +Ray Data serves as a last-mile bridge from storage or ETL pipeline outputs to distributed +applications and libraries in Ray. Don't use it as a replacement for more general data processing systems. .. image:: images/datastream-loading-1.png diff --git a/doc/source/data/doc_code/loading_data.py b/doc/source/data/doc_code/loading_data.py index ff34b4ec4377..c161737f8484 100644 --- a/doc/source/data/doc_code/loading_data.py +++ b/doc/source/data/doc_code/loading_data.py @@ -20,36 +20,6 @@ # __gen_synth_tabular_range_end__ # fmt: on -# fmt: off -# __gen_synth_tensor_range_begin__ -# Create a Datastream of tensors. -ds = ray.data.range_tensor(100 * 64 * 64, shape=(64, 64)) -# -> Datastream( -# num_blocks=200, -# num_rows=409600, -# schema={data: numpy.ndarray(shape=(64, 64), dtype=int64)} -# ) - -ds.take_batch(5) -# -> {'data': array( -# [[[0, 0, 0, ..., 0, 0, 0], -# [0, 0, 0, ..., 0, 0, 0], -# [0, 0, 0, ..., 0, 0, 0], -# ..., -# [0, 0, 0, ..., 0, 0, 0], -# [0, 0, 0, ..., 0, 0, 0], -# [0, 0, 0, ..., 0, 0, 0]], -# ... -# [[4, 4, 4, ..., 4, 4, 4], -# [4, 4, 4, ..., 4, 4, 4], -# [4, 4, 4, ..., 4, 4, 4], -# ..., -# [4, 4, 4, ..., 4, 4, 4], -# [4, 4, 4, ..., 4, 4, 4], -# [4, 4, 4, ..., 4, 4, 4]]])} -# __gen_synth_tensor_range_end__ -# fmt: on - # fmt: off # __from_items_begin__ # Create a Datastream from python dicts. @@ -131,7 +101,7 @@ # fmt: off # __read_images_begin__ ds = ray.data.read_images("example://image-datasets/simple") -# -> Datastream(num_blocks=3, num_rows=3, +# -> Datastream(num_blocks=3, num_rows=3, # schema={image: numpy.ndarray(shape=(32, 32, 3), dtype=uint8)}) ds.take(1) @@ -436,7 +406,7 @@ # 'passenger_count': 1, # 'trip_distance': 1.5, # 'rate_code_id': '1', -# 'store_and_fwd_flag': 'N', +# 'store_and_fwd_flag': 'N', # ..., # } # { @@ -446,7 +416,7 @@ # 'passenger_count': 1, # 'trip_distance': 2.5999999046325684, # 'rate_code_id': '1', -# 'store_and_fwd_flag': 'N', +# 'store_and_fwd_flag': 'N', # ..., # } # __read_parquet_s3_end__ diff --git a/doc/source/data/key-concepts.rst b/doc/source/data/key-concepts.rst index 6b8fec5003e8..6469021530e9 100644 --- a/doc/source/data/key-concepts.rst +++ b/doc/source/data/key-concepts.rst @@ -15,7 +15,8 @@ Each block holds a set of records in an `Arrow table `_. Having multiple blocks in a datastream allows for parallel transformation and ingest. -For ML use cases, Datastream also natively supports mixing :ref:`Tensors ` and tabular data. +For ML use cases, Datastream natively supports mixing tensors with tabular data. To +learn more, read :ref:`Working with tensor data `. The following figure visualizes a datastream with three blocks, each holding 1000 rows. Note that certain blocks may not be computed yet. Normally, callers iterate over datastream blocks in a streaming fashion, so that not all @@ -100,7 +101,7 @@ Fault tolerance Datastream performs *lineage reconstruction* to recover data. If an application error or system failure occurs, Datastream recreates lost blocks by re-executing tasks. If ``compute=ActorPoolStrategy(size=n)`` is used, then Ray -will restart the actor used for computing the block prior to re-executing the task. +restarts the actor used for computing the block prior to re-executing the task. Fault tolerance is not supported if the original worker process that created the Datastream dies. This is because the creator stores the metadata for the :ref:`objects ` that comprise the Datastream. diff --git a/doc/source/data/loading-data.rst b/doc/source/data/loading-data.rst index 0d4a8cea08fc..e82d6301850b 100644 --- a/doc/source/data/loading-data.rst +++ b/doc/source/data/loading-data.rst @@ -30,12 +30,22 @@ Generating Synthetic Data .. tab-item:: Tensor Range Create a datastream from a range of integers, packing this integer range into - tensors of the provided shape. - - .. literalinclude:: ./doc_code/loading_data.py - :language: python - :start-after: __gen_synth_tensor_range_begin__ - :end-before: __gen_synth_tensor_range_end__ + ndarrays of the provided shape. + + .. doctest:: + + >>> import ray + >>> ds = ray.data.range_tensor(100 * 64 * 64, shape=(64, 64)) + >>> ds.schema() + Schema({'data': numpy.ndarray(shape=(64, 64), dtype=int64)}) + >>> ds.show(1) + {'data': array([[0, 0, 0, ..., 0, 0, 0], + [0, 0, 0, ..., 0, 0, 0], + [0, 0, 0, ..., 0, 0, 0], + ..., + [0, 0, 0, ..., 0, 0, 0], + [0, 0, 0, ..., 0, 0, 0], + [0, 0, 0, ..., 0, 0, 0]])} .. _datastream_reading_from_storage: @@ -105,10 +115,10 @@ Common File Formats .. tab-item:: NumPy - Read NumPy files and directories. The NumPy data will be represented via the Ray Data - :class:`tensor extension type `. - Refer to the :ref:`tensor data guide ` for more information on working - with tensors. + Read NumPy files and directories. + + This function represents NumPy data as ndarrays. To learn more, read + :ref:`Working with tensor data `. .. literalinclude:: ./doc_code/loading_data.py :language: python @@ -132,9 +142,8 @@ Common File Formats Call :func:`~ray.data.read_images` to read images. - This function represents image data using the Ray Data - :class:`tensor extension type `. - For more information on working with tensors, refer to the :ref:`tensor data guide `. + This function represents images as ndarrays. To learn more, read + :ref:`Working with tensor data `. .. literalinclude:: ./doc_code/loading_data.py :language: python @@ -588,7 +597,7 @@ the collection. The execution results are then used to create a Datastream. Reading From SQL Databases -------------------------- -Call :func:`~ray.data.read_sql` to read data from a database that provides a +Call :func:`~ray.data.read_sql` to read data from a database that provides a `Python DB API2-compliant `_ connector. .. tab-set:: diff --git a/doc/source/data/performance-tips.rst b/doc/source/data/performance-tips.rst index 6c56149431f5..edc33b25267c 100644 --- a/doc/source/data/performance-tips.rst +++ b/doc/source/data/performance-tips.rst @@ -29,7 +29,7 @@ These stats can be used to understand the performance of your Datastream workloa .. code-block:: - Stage 1 ReadRange->Map->Map: 16/16 blocks executed in 0.37s + Stage 1 ReadRange->Map->Map: 16/16 blocks executed in 0.37s * Remote wall time: 101.55ms min, 331.39ms max, 135.24ms mean, 2.16s total * Remote cpu time: 7.42ms min, 15.88ms max, 11.01ms mean, 176.15ms total * Peak heap memory usage (MiB): 157.18 min, 157.73 max, 157 mean @@ -89,7 +89,7 @@ may incur data copies; which conversions cause data copying is given in the belo .. note:: \* No copies occur when converting between Arrow, Pandas, and NumPy formats for columns - represented in the Ray Data tensor extension type (except for bool arrays). + represented as ndarrays (except for bool arrays). Parquet Column Pruning diff --git a/doc/source/data/user-guide.rst b/doc/source/data/user-guide.rst index e9fe208432f3..029cda45dac5 100644 --- a/doc/source/data/user-guide.rst +++ b/doc/source/data/user-guide.rst @@ -16,7 +16,7 @@ show you how achieve several tasks. transforming-data consuming-data batch_inference - data-tensor-support + working-with-tensors custom-datasource data-internals performance-tips diff --git a/doc/source/data/working-with-tensors.rst b/doc/source/data/working-with-tensors.rst new file mode 100644 index 000000000000..c30a62d592d8 --- /dev/null +++ b/doc/source/data/working-with-tensors.rst @@ -0,0 +1,128 @@ +.. _working_with_tensors: + +Working with Tensors +==================== + +N-dimensional arrays (i.e., tensors) are ubiquitous in ML workloads. This guide +describes the limitations and best practices of working with such data. + +Tensor data representation +-------------------------- + +Ray Data represents tensors as +`NumPy ndarrays `__. + +.. testcode:: + + import ray + + ds = ray.data.read_images("s3://anonymous@air-example-data/digits") + print(ds) + +.. testoutput:: + :options: +ELLIPSIS + + Datastream( + num_blocks=..., + num_rows=100, + schema={image: numpy.ndarray(shape=(28, 28), dtype=uint8)} + ) + +Batches of fixed-shape tensors +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If your tensors have a fixed shape, Ray Data represents batches as regular ndarrays. + +.. doctest:: + + >>> import ray + >>> ds = ray.data.read_images("s3://anonymous@air-example-data/digits") + >>> batch = ds.take_batch(batch_size=32) + >>> batch["image"].shape + (32, 28, 28) + >>> batch["image"].dtype + dtype('uint8') + +Batches of variable-shape tensors +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If your tensors vary in shape, Ray Data represents batches as ragged arrays. + +.. doctest:: + + >>> import ray + >>> ds = ray.data.read_images("s3://anonymous@air-example-data/AnimalDetection") + >>> batch = ds.take_batch(batch_size=32) + >>> batch["image"].shape + (32,) + >>> batch["image"].dtype + dtype('O') + +Elements of ragged arrays are regular ndarrays. + +.. doctest:: + + >>> batch["image"][0].dtype + dtype('uint8') + >>> batch["image"][0].shape # doctest: +SKIP + (375, 500, 3) + >>> batch["image"][3].shape # doctest: +SKIP + (333, 465, 3) + + +Saving tensor data +------------------ + +Save tensor data in Parquet or Numpy files. Other formats aren't supported. + +.. tab-set:: + + .. tab-item:: Parquet + + Call :meth:`~ray.data.Dataset.write_parquet` to save data in Parquet files. + + .. testcode:: + + import ray + + ds = ray.data.read_images("example://image-datasets/simple") + ds.write_parquet("/tmp/simple") + + + .. tab-item:: NumPy + + Call :meth:`~ray.data.Dataset.write_numpy` to save an ndarray column in a NumPy + file. + + .. testcode:: + + import ray + + ds = ray.data.read_images("example://image-datasets/simple") + ds.write_numpy("/tmp/simple.npy", column="image") + +For more information on saving data, read :ref:`Saving data `. + +Transforming variable-shape tensor data +--------------------------------------- + +Call :meth:`~ray.data.Dataset.map` to transform variable-shape tensor data. Don't use +:meth:`~ray.data.Dataset.map_batches`. + +.. testcode:: + + from typing import Any, Dict + + import ray + import numpy as np + + ds = ray.data.read_images("s3://anonymous@air-example-data/AnimalDetection") + + def increase_brightness(row: Dict[str, Any]) -> Dict[str, Any]: + row["image"] = np.clip(row["image"] + 4, 0, 255) + return row + + ds.map(increase_brightness) + +For more information on transforming data, read +:ref:`Transforming data `. diff --git a/python/ray/data/read_api.py b/python/ray/data/read_api.py index 5170a3dad2f7..bfdd635ca61e 100644 --- a/python/ray/data/read_api.py +++ b/python/ray/data/read_api.py @@ -702,7 +702,7 @@ def read_images( Returns: A :class:`~ray.data.Datastream` producing tensors that represent the images at the specified paths. For information on working with tensors, read the - :ref:`tensor data guide `. + :ref:`tensor data guide `. Raises: ValueError: if ``size`` contains non-positive numbers. From 33a332669c1b95f548f98ae8e352aa073f03daf7 Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Fri, 5 May 2023 13:02:19 -0700 Subject: [PATCH 259/424] [Docs] Revise "How to write code snippets" (#35066) This PR adds instructions on how to illustrate outputs when your example is non-deterministic. It also makes some minor revisions. --- .../ray-contribute/writing-code-snippets.rst | 61 ++++++++++++++----- 1 file changed, 46 insertions(+), 15 deletions(-) diff --git a/doc/source/ray-contribute/writing-code-snippets.rst b/doc/source/ray-contribute/writing-code-snippets.rst index dd579e7f3434..84cc98fc15eb 100644 --- a/doc/source/ray-contribute/writing-code-snippets.rst +++ b/doc/source/ray-contribute/writing-code-snippets.rst @@ -132,9 +132,9 @@ want to print intermediate objects, use *doctest-style*. :: >>> import ray >>> ds = ray.data.range(100) >>> ds.schema() - + Schema({'id': DataType(int64)}) >>> ds.take(5) - [0, 1, 2, 3, 4] + [{'id': 0}, {'id': 1}, {'id': 2}, {'id': 3}, {'id': 4}] When to use *code-block-style* ============================== @@ -219,24 +219,37 @@ If your Python code is non-deterministic, or if your output is excessively long, Ignoring *doctest-style* outputs ================================ -To ignore parts of a *doctest-style* output, append `# doctest: +ELLIPSIS` to your Python code and replace problematic sections with ellipsis. :: +To ignore parts of a *doctest-style* output, add `:options: +ELLIPSIS` to +the `doctest` directive and replace problematic sections with ellipsis. :: .. doctest:: + :options: +ELLIPSIS >>> import ray - >>> ray.data.read_images("s3://anonymous@air-example-data-2/imagenet-sample-images") # doctest: +ELLIPSIS + >>> ray.data.read_images("s3://anonymous@air-example-data-2/imagenet-sample-images") Datastream( - num_blocks=..., - num_rows=..., - schema={image: numpy.ndarray(shape=..., dtype=uint8)} + num_blocks=..., + num_rows=..., + schema={image: numpy.ndarray(shape=..., dtype=uint8)} ) +If you omit the `doctest` directive, append `# doctest: +ELLIPSIS` to your code instead. + + >>> import ray + >>> ray.data.read_images("s3://anonymous@air-example-data-2/imagenet-sample-images") # doctest: +ELLIPSIS + Datastream( + num_blocks=..., + num_rows=..., + schema={image: numpy.ndarray(shape=..., dtype=uint8)} + ) + To ignore an output altogether, write a *code-block-style* snippet. Don't use `# doctest: +SKIP`. Ignoring *code-block-style* outputs =================================== -To ignore parts of a *code-block-style* output, add `:options: +ELLIPSIS` to the `testoutput` block and replace problematic sections with ellipsis. :: +If parts of your output are long or non-deterministic, add `:options: +ELLIPSIS` to +the `testoutput` directive and replace problematic sections with ellipsis. :: .. testcode:: @@ -248,18 +261,36 @@ To ignore parts of a *code-block-style* output, add `:options: +ELLIPSIS` to the :options: +ELLIPSIS Datastream( - num_blocks=..., - num_rows=..., - schema={image: numpy.ndarray(shape=..., dtype=uint8)} + num_blocks=..., + num_rows=..., + schema={image: numpy.ndarray(shape=..., dtype=uint8)} ) -To ignore an output altogether, replace the output with a single elipsis. :: +If your output is nondeterministic and you want to display a sample output, add +`:options: +SKIP`. :: + + .. testcode:: + + import random + print(random.random()) + + .. testoutput:: + :options: +SKIP + + 0.969461416250246 + +If your output is hard to test and you don't want to display a sample output, add +`:options: +SKIP` and `:hide:`. :: + + .. testcode:: + + print("This output is hidden and untested") .. testoutput:: :hide: - :options: +ELLIPSIS + :options: +SKIP - ... + ... # Add ellipsis. Otherwise, Sphinx can't parse the block. -------------------- How to test examples @@ -291,4 +322,4 @@ To test all code snippets, run RAY_MOCK_MODULES=0 make doctest -in the `ray/doc` directory. \ No newline at end of file +in the `ray/doc` directory. From 634094e36155243752202d2a83a78a7a79437e8d Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Fri, 5 May 2023 13:02:37 -0700 Subject: [PATCH 260/424] [Data][Docs] Remove next steps from "Getting Started" page (#34836) --- doc/source/data/getting-started.rst | 5 ----- doc/source/data/performance-tips.rst | 6 ++++++ 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/doc/source/data/getting-started.rst b/doc/source/data/getting-started.rst index 8ddc797c0bb2..7be122b1eeb4 100644 --- a/doc/source/data/getting-started.rst +++ b/doc/source/data/getting-started.rst @@ -164,8 +164,3 @@ or remote filesystems. To learn more about saving datastream contents, read :ref:`Saving data `. - -Next Steps ----------- - -* To check how your application is doing, you can use the :ref:`Ray dashboard`. diff --git a/doc/source/data/performance-tips.rst b/doc/source/data/performance-tips.rst index edc33b25267c..41bcf691a1b6 100644 --- a/doc/source/data/performance-tips.rst +++ b/doc/source/data/performance-tips.rst @@ -3,6 +3,12 @@ Performance Tips and Tuning =========================== +Monitoring your application +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +View the Ray dashboard to monitor your application and troubleshoot issues. To learn +more about the Ray dashboard, read :ref:`Ray Dashboard `. + Debugging Statistics ~~~~~~~~~~~~~~~~~~~~ From 93a96d093829a9a6c486abd6ba2e91131986f289 Mon Sep 17 00:00:00 2001 From: Sven Mika Date: Fri, 5 May 2023 22:03:40 +0200 Subject: [PATCH 261/424] [RLlib] APPO+new-stack (Atari benchmark) - Preparatory PR 03 - PyTorch. (#34779) --- rllib/BUILD | 5 +- rllib/algorithms/algorithm.py | 8 +- rllib/algorithms/algorithm_config.py | 14 +- rllib/algorithms/alpha_star/alpha_star.py | 1 - rllib/algorithms/appo/appo.py | 91 ++++---- rllib/algorithms/appo/appo_learner.py | 70 ++++-- .../appo/tests/{tf => }/__init__.py | 0 .../appo/tests/{tf => }/test_appo_learner.py | 40 ++-- rllib/algorithms/appo/tf/appo_tf_learner.py | 83 +++---- .../algorithms/appo/tf/appo_tf_policy_rlm.py | 10 +- rllib/algorithms/appo/tf/appo_tf_rl_module.py | 17 +- rllib/algorithms/appo/torch/__init__.py | 0 .../appo/torch/appo_torch_learner.py | 206 +++++++++++++++++ .../appo/torch/appo_torch_policy_rlm.py | 213 ++++++++++++++++++ .../appo/torch/appo_torch_rl_module.py | 51 +++++ rllib/algorithms/impala/impala.py | 107 ++++++--- rllib/algorithms/impala/impala_learner.py | 26 ++- .../impala/tests/test_impala_learner.py | 26 +-- .../algorithms/impala/tf/impala_tf_learner.py | 44 ++-- .../impala/tf/impala_tf_policy_rlm.py | 10 +- rllib/algorithms/impala/tf/vtrace_tf_v2.py | 50 ++-- .../impala/torch/impala_torch_learner.py | 41 ++-- .../impala/torch/impala_torch_policy_rlm.py | 10 +- .../impala/torch/vtrace_torch_v2.py | 36 +-- rllib/algorithms/ppo/ppo.py | 21 +- rllib/algorithms/ppo/ppo_learner.py | 111 ++++----- .../algorithms/ppo/tests/test_ppo_learner.py | 13 +- .../ppo/tests/test_ppo_rl_module.py | 14 +- .../ppo/tests/test_ppo_with_rl_module.py | 78 +++---- rllib/algorithms/ppo/tf/ppo_tf_learner.py | 66 ++++-- rllib/algorithms/ppo/tf/ppo_tf_policy_rlm.py | 4 +- rllib/algorithms/ppo/tf/ppo_tf_rl_module.py | 53 +++-- .../algorithms/ppo/torch/ppo_torch_learner.py | 73 +++--- rllib/core/learner/learner.py | 53 ++++- rllib/core/learner/learner_group.py | 19 +- rllib/core/learner/tf/tf_learner.py | 52 ++++- rllib/core/learner/torch/torch_learner.py | 77 +++++-- rllib/core/models/base.py | 3 + rllib/core/models/catalog.py | 3 + rllib/core/models/tests/test_catalog.py | 13 +- rllib/core/models/tf/encoder.py | 5 +- rllib/core/rl_module/torch/torch_rl_module.py | 17 +- rllib/evaluation/postprocessing.py | 26 ++- rllib/policy/eager_tf_policy_v2.py | 128 ++++++----- rllib/policy/torch_policy_v2.py | 1 - .../multi-agent-cartpole-alpha-star.yaml | 1 - .../appo/cartpole-appo-vtrace-fake-gpus.yaml | 1 - .../appo/cartpole-appo-vtrace.yaml | 1 - .../appo/frozenlake-appo-vtrace.yaml | 1 - .../appo/multi-agent-cartpole-appo.yaml | 1 - ...ulti-agent-cartpole-w-100-policies-appo.py | 1 - .../pong-appo-w-rl-modules-and-learner.yaml | 1 - .../impala/cartpole-impala-fake-gpus.yaml | 2 - .../impala/cartpole-impala.yaml | 1 - .../impala/multi-agent-cartpole-impala.yaml | 1 - rllib/utils/torch_utils.py | 26 ++- 56 files changed, 1371 insertions(+), 655 deletions(-) rename rllib/algorithms/appo/tests/{tf => }/__init__.py (100%) rename rllib/algorithms/appo/tests/{tf => }/test_appo_learner.py (79%) create mode 100644 rllib/algorithms/appo/torch/__init__.py create mode 100644 rllib/algorithms/appo/torch/appo_torch_learner.py create mode 100644 rllib/algorithms/appo/torch/appo_torch_policy_rlm.py create mode 100644 rllib/algorithms/appo/torch/appo_torch_rl_module.py diff --git a/rllib/BUILD b/rllib/BUILD index 6312a7675c34..30df6d9413f6 100644 --- a/rllib/BUILD +++ b/rllib/BUILD @@ -174,7 +174,7 @@ py_test( py_test( name = "learning_tests_cartpole_appo_w_rl_modules_and_learner", main = "tests/run_regression_tests.py", - tags = ["team:rllib", "exclusive", "learning_tests", "learning_tests_cartpole", "learning_tests_discrete", "tf_only", "no_tf_static_graph"], + tags = ["team:rllib", "exclusive", "learning_tests", "learning_tests_cartpole", "learning_tests_discrete", "no_tf_static_graph"], size = "medium", # bazel may complain about it being too long sometimes - medium is on purpose as some frameworks take longer srcs = ["tests/run_regression_tests.py"], data = ["tuned_examples/appo/cartpole-appo-w-rl-modules-and-learner.yaml"], @@ -599,7 +599,6 @@ py_test( args = ["--dir=tuned_examples/ppo"] ) -# TODO (Sven): Enable tf2 for this test. py_test( name = "learning_tests_pendulum_ppo_with_rl_module", main = "tests/run_regression_tests.py", @@ -930,7 +929,7 @@ py_test( name = "test_appo_learner", tags = ["team:rllib", "algorithms_dir"], size = "medium", - srcs = ["algorithms/appo/tests/tf/test_appo_learner.py"] + srcs = ["algorithms/appo/tests/test_appo_learner.py"] ) # ARS diff --git a/rllib/algorithms/algorithm.py b/rllib/algorithms/algorithm.py index d1c12aca3467..d47704ae7373 100644 --- a/rllib/algorithms/algorithm.py +++ b/rllib/algorithms/algorithm.py @@ -723,10 +723,10 @@ def setup(self, config: AlgorithmConfig) -> None: self.learner_group = None if self.config._enable_learner_api: # TODO (Kourosh): This is an interim solution where policies and modules - # co-exist. In this world we have both policy_map and MARLModule that need - # to be consistent with one another. To make a consistent parity between - # the two we need to loop through the policy modules and create a simple - # MARLModule from the RLModule within each policy. + # co-exist. In this world we have both policy_map and MARLModule that need + # to be consistent with one another. To make a consistent parity between + # the two we need to loop through the policy modules and create a simple + # MARLModule from the RLModule within each policy. local_worker = self.workers.local_worker() module_spec = local_worker.marl_module_spec learner_group_config = self.config.get_learner_group_config(module_spec) diff --git a/rllib/algorithms/algorithm_config.py b/rllib/algorithms/algorithm_config.py index 6d20f874cdef..f24b2affeb7c 100644 --- a/rllib/algorithms/algorithm_config.py +++ b/rllib/algorithms/algorithm_config.py @@ -7,6 +7,7 @@ Callable, Container, Dict, + List, Mapping, Optional, Tuple, @@ -315,6 +316,7 @@ def __init__(self, algo_class=None): # `self.training()` self.gamma = 0.99 self.lr = 0.001 + self.lr_schedule = None self.grad_clip = None self.grad_clip_by = "global_norm" self.train_batch_size = 32 @@ -1588,6 +1590,7 @@ def training( *, gamma: Optional[float] = NotProvided, lr: Optional[float] = NotProvided, + lr_schedule: Optional[List[List[Union[int, float]]]] = NotProvided, grad_clip: Optional[float] = NotProvided, grad_clip_by: Optional[str] = NotProvided, train_batch_size: Optional[int] = NotProvided, @@ -1602,6 +1605,10 @@ def training( Args: gamma: Float specifying the discount factor of the Markov Decision process. lr: The default learning rate. + lr_schedule: Learning rate schedule. In the format of + [[timestep, lr-value], [timestep, lr-value], ...] + Intermediary timesteps will be assigned to interpolated learning rate + values. A schedule should normally start from timestep 0. grad_clip: The value to use for gradient clipping. Depending on the `grad_clip_by` setting, gradients will either be clipped by value, norm, or global_norm (see docstring on `grad_clip_by` below for more @@ -1653,6 +1660,8 @@ def training( self.gamma = gamma if lr is not NotProvided: self.lr = lr + if lr_schedule is not NotProvided: + self.lr_schedule = lr_schedule if grad_clip is not NotProvided: self.grad_clip = grad_clip if grad_clip_by is not NotProvided: @@ -3129,6 +3138,9 @@ def get_learner_group_config(self, module_spec: ModuleSpec) -> LearnerGroupConfi .learner( learner_class=self.learner_class, # TODO (Kourosh): optimizer config can now be more complicated. + # TODO (Sven): Shouldn't optimizer config be part of learner HPs? + # E.g. if we have a lr schedule, this will have to be managed by + # the learner, NOT the optimizer directly. optimizer_config={ "lr": self.lr, "grad_clip": self.grad_clip, @@ -3159,7 +3171,7 @@ def get_learner_hyperparameters(self) -> LearnerHyperparameters: Note that LearnerHyperparameters should always be derived directly from a AlgorithmConfig object's own settings and considered frozen/read-only. """ - return LearnerHyperparameters() + return LearnerHyperparameters(lr_schedule=self.lr_schedule) def __setattr__(self, key, value): """Gatekeeper in case we are in frozen state and need to error.""" diff --git a/rllib/algorithms/alpha_star/alpha_star.py b/rllib/algorithms/alpha_star/alpha_star.py index 02d05fcc4324..09a4d6039289 100644 --- a/rllib/algorithms/alpha_star/alpha_star.py +++ b/rllib/algorithms/alpha_star/alpha_star.py @@ -138,7 +138,6 @@ def __init__(self, algo_class=None): # Override some of APPOConfig's default values with AlphaStar-specific # values. - self.vtrace_drop_last_ts = False self.min_time_s_per_iteration = 2 self.policies = None self.simple_optimizer = True diff --git a/rllib/algorithms/appo/appo.py b/rllib/algorithms/appo/appo.py index 44cb0733563e..5b503f239192 100644 --- a/rllib/algorithms/appo/appo.py +++ b/rllib/algorithms/appo/appo.py @@ -28,8 +28,6 @@ NUM_AGENT_STEPS_SAMPLED, NUM_ENV_STEPS_SAMPLED, NUM_TARGET_UPDATES, - NUM_ENV_STEPS_TRAINED, - NUM_AGENT_STEPS_TRAINED, ) from ray.rllib.utils.metrics import ALL_MODULES, LEARNER_STATS_KEY from ray.rllib.utils.typing import ( @@ -213,7 +211,13 @@ def training( @override(ImpalaConfig) def get_default_learner_class(self): - if self.framework_str == "tf2": + if self.framework_str == "torch": + from ray.rllib.algorithms.appo.torch.appo_torch_learner import ( + APPOTorchLearner, + ) + + return APPOTorchLearner + elif self.framework_str == "tf2": from ray.rllib.algorithms.appo.tf.appo_tf_learner import APPOTfLearner return APPOTfLearner @@ -222,16 +226,21 @@ def get_default_learner_class(self): @override(ImpalaConfig) def get_default_rl_module_spec(self) -> SingleAgentRLModuleSpec: - if self.framework_str == "tf2": - from ray.rllib.algorithms.appo.appo_catalog import APPOCatalog - from ray.rllib.algorithms.appo.tf.appo_tf_rl_module import APPOTfRLModule - - return SingleAgentRLModuleSpec( - module_class=APPOTfRLModule, catalog_class=APPOCatalog + if self.framework_str == "torch": + from ray.rllib.algorithms.appo.torch.appo_torch_rl_module import ( + APPOTorchRLModule as RLModule, + ) + elif self.framework_str == "tf2": + from ray.rllib.algorithms.appo.tf.appo_tf_rl_module import ( + APPOTfRLModule as RLModule, ) else: raise ValueError(f"The framework {self.framework_str} is not supported.") + from ray.rllib.algorithms.appo.appo_catalog import APPOCatalog + + return SingleAgentRLModuleSpec(module_class=RLModule, catalog_class=APPOCatalog) + @override(ImpalaConfig) def get_learner_hyperparameters(self) -> AppoHyperparameters: base_hps = super().get_learner_hyperparameters() @@ -241,6 +250,9 @@ def get_learner_hyperparameters(self) -> AppoHyperparameters: kl_coeff=self.kl_coeff, clip_param=self.clip_param, tau=self.tau, + target_update_frequency_ts=( + self.train_batch_size * self.num_sgd_iter * self.target_update_frequency + ), **dataclasses.asdict(base_hps), ) @@ -289,43 +301,14 @@ def after_train_step(self, train_results: ResultDict) -> None: training step. """ - last_update = self._counters[LAST_TARGET_UPDATE_TS] - - if self.config._enable_learner_api and train_results: - # using steps trained here instead of sampled ... I'm not sure why the - # other implemenetation uses sampled. - # to be quite frank, im not sure if I understand how their target update - # freq would work. The difference in steps sampled/trained is pretty - # much always going to be larger than self.config.num_sgd_iter * - # self.config.minibatch_buffer_size unless the number of steps collected - # is really small. The thing is that the default rollout fragment length - # is 50, so the minibatch buffer size * num_sgd_iter is going to be - # have to be 50 to even meet the threshold of having delayed target - # updates. - # we should instead have the target / kl threshold update be based off - # of the train_batch_size * some target update frequency * num_sgd_iter. - cur_ts = self._counters[ - NUM_ENV_STEPS_TRAINED - if self.config.count_steps_by == "env_steps" - else NUM_AGENT_STEPS_TRAINED - ] - target_update_steps_freq = ( - self.config.train_batch_size - * self.config.num_sgd_iter - * self.config.target_update_frequency - ) - if (cur_ts - last_update) >= target_update_steps_freq: - kls_to_update = {} - for module_id, module_results in train_results.items(): - if module_id != ALL_MODULES: - kls_to_update[module_id] = module_results[LEARNER_STATS_KEY][ - LEARNER_RESULTS_KL_KEY - ] - self._counters[NUM_TARGET_UPDATES] += 1 - self._counters[LAST_TARGET_UPDATE_TS] = cur_ts - self.learner_group.additional_update(sampled_kls=kls_to_update) - + if self.config._enable_learner_api: + if NUM_TARGET_UPDATES in train_results: + self._counters[NUM_TARGET_UPDATES] += train_results[NUM_TARGET_UPDATES] + self._counters[LAST_TARGET_UPDATE_TS] = train_results[ + LAST_TARGET_UPDATE_TS + ] else: + last_update = self._counters[LAST_TARGET_UPDATE_TS] cur_ts = self._counters[ NUM_AGENT_STEPS_SAMPLED if self.config.count_steps_by == "agent_steps" @@ -367,6 +350,17 @@ def update(pi, pi_id): # Worker. self.workers.local_worker().foreach_policy_to_train(update) + @override(Impala) + def _get_additional_update_kwargs(self, train_results) -> dict: + return dict( + last_update=self._counters[LAST_TARGET_UPDATE_TS], + mean_kl_loss_per_module={ + mid: r[LEARNER_STATS_KEY][LEARNER_RESULTS_KL_KEY] + for mid, r in train_results.items() + if mid != ALL_MODULES + }, + ) + @override(Impala) def training_step(self) -> ResultDict: train_results = super().training_step() @@ -388,10 +382,11 @@ def get_default_policy_class( ) -> Optional[Type[Policy]]: if config["framework"] == "torch": if config._enable_rl_module_api: - raise ValueError( - "APPO with the torch backend is not yet supported by " - " the RLModule and Learner API." + from ray.rllib.algorithms.appo.torch.appo_torch_policy_rlm import ( + APPOTorchPolicyWithRLModule, ) + + return APPOTorchPolicyWithRLModule else: from ray.rllib.algorithms.appo.appo_torch_policy import APPOTorchPolicy diff --git a/rllib/algorithms/appo/appo_learner.py b/rllib/algorithms/appo/appo_learner.py index c92ac50ad687..d067fdb25587 100644 --- a/rllib/algorithms/appo/appo_learner.py +++ b/rllib/algorithms/appo/appo_learner.py @@ -3,15 +3,13 @@ from dataclasses import dataclass from typing import Any, Dict, Mapping -import numpy as np - from ray.rllib.algorithms.impala.impala_learner import ( ImpalaLearner, ImpalaHyperparameters, ) from ray.rllib.core.rl_module.marl_module import ModuleID from ray.rllib.utils.annotations import override -from ray.rllib.utils.framework import get_variable +from ray.rllib.utils.metrics import LAST_TARGET_UPDATE_TS, NUM_TARGET_UPDATES LEARNER_RESULTS_KL_KEY = "mean_kl_loss" @@ -35,6 +33,7 @@ class to configure your algorithm. kl_target: float = None clip_param: float = None tau: float = None + target_update_frequency_ts: int = None class AppoLearner(ImpalaLearner): @@ -46,34 +45,63 @@ class AppoLearner(ImpalaLearner): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - # Create framework-specific variables (simple python vars for torch). - self.kl_coeffs = defaultdict( - lambda: get_variable( - self._hps.kl_coeff, - framework=self.framework, - trainable=False, - dtype=np.float32, - ) + + # We need to make sure kl_coeff are available as framework tensors that are + # registered as part of the graph so that upon update the graph can be updated + # (e.g. in TF with eager tracing). + self.curr_kl_coeffs_per_module = defaultdict( + lambda: self._get_tensor_variable(self.hps.kl_coeff) ) @override(ImpalaLearner) def remove_module(self, module_id: str): super().remove_module(module_id) - self.kl_coeffs.pop(module_id) + self.curr_kl_coeffs_per_module.pop(module_id) @override(ImpalaLearner) def additional_update_per_module( - self, module_id: ModuleID, sampled_kls: Dict[ModuleID, float], **kwargs + self, + module_id: ModuleID, + *, + last_update: int, + mean_kl_loss_per_module: dict, + timestep: int, + **kwargs, ) -> Mapping[str, Any]: """Updates the target networks and KL loss coefficients (per module). Args: module_id: """ - self._update_module_target_networks(module_id) - if self._hps.use_kl_loss: - self._update_module_kl_coeff(module_id, sampled_kls) - return {} + # TODO (avnish) Using steps trained here instead of sampled ... I'm not sure + # why the other implementation uses sampled. + # The difference in steps sampled/trained is pretty + # much always going to be larger than self.config.num_sgd_iter * + # self.config.minibatch_buffer_size unless the number of steps collected + # is really small. The thing is that the default rollout fragment length + # is 50, so the minibatch buffer size * num_sgd_iter is going to be + # have to be 50 to even meet the threshold of having delayed target + # updates. + # We should instead have the target / kl threshold update be based off + # of the train_batch_size * some target update frequency * num_sgd_iter. + results = super().additional_update_per_module(module_id, timestep=timestep) + + if (timestep - last_update) >= self.hps.target_update_frequency_ts: + self._update_module_target_networks(module_id) + results[NUM_TARGET_UPDATES] = 1 + results[LAST_TARGET_UPDATE_TS] = timestep + else: + results[NUM_TARGET_UPDATES] = 0 + results[LAST_TARGET_UPDATE_TS] = last_update + + if self.hps.use_kl_loss and module_id in mean_kl_loss_per_module: + results.update( + self._update_module_kl_coeff( + module_id, mean_kl_loss_per_module[module_id] + ) + ) + + return results @abc.abstractmethod def _update_module_target_networks(self, module_id: ModuleID) -> None: @@ -88,7 +116,7 @@ def _update_module_target_networks(self, module_id: ModuleID) -> None: @abc.abstractmethod def _update_module_kl_coeff( self, module_id: ModuleID, sampled_kls: Dict[ModuleID, float] - ) -> None: + ) -> Mapping[str, Any]: """Dynamically update the KL loss coefficients of each module with. The update is completed using the mean KL divergence between the action @@ -97,7 +125,7 @@ def _update_module_kl_coeff( Args: module_id: The module whose KL loss coefficient to update. - sampled_kls: The KL divergence between the action distributions of - the current policy and old policy of each module. - + sampled_kls: Mapping from Module ID to this module's KL divergence between + the action distributions of the current (most recently updated) module + and the old module version. """ diff --git a/rllib/algorithms/appo/tests/tf/__init__.py b/rllib/algorithms/appo/tests/__init__.py similarity index 100% rename from rllib/algorithms/appo/tests/tf/__init__.py rename to rllib/algorithms/appo/tests/__init__.py diff --git a/rllib/algorithms/appo/tests/tf/test_appo_learner.py b/rllib/algorithms/appo/tests/test_appo_learner.py similarity index 79% rename from rllib/algorithms/appo/tests/tf/test_appo_learner.py rename to rllib/algorithms/appo/tests/test_appo_learner.py index 8c3978fc3055..af954bf701e7 100644 --- a/rllib/algorithms/appo/tests/tf/test_appo_learner.py +++ b/rllib/algorithms/appo/tests/test_appo_learner.py @@ -1,6 +1,8 @@ import unittest import numpy as np +import tree # pip install dm_tree + import ray import ray.rllib.algorithms.appo as appo from ray.rllib.algorithms.appo.tf.appo_tf_learner import ( @@ -8,10 +10,10 @@ ) from ray.rllib.core.rl_module.rl_module import SingleAgentRLModuleSpec from ray.rllib.policy.sample_batch import SampleBatch, DEFAULT_POLICY_ID -from ray.rllib.utils.metrics import ALL_MODULES -from ray.rllib.utils.metrics.learner_info import LEARNER_INFO, LEARNER_STATS_KEY +from ray.rllib.utils.metrics.learner_info import LEARNER_INFO from ray.rllib.utils.framework import try_import_tf -from ray.rllib.utils.test_utils import check, framework_iterator +from ray.rllib.utils.test_utils import framework_iterator +from ray.rllib.utils.torch_utils import convert_to_torch_tensor tf1, tf, _ = try_import_tf() @@ -66,26 +68,28 @@ def test_appo_loss(self): fcnet_activation="linear", vf_share_layers=False, ), + _enable_learner_api=True, ) .rl_module( _enable_rl_module_api=True, ) ) # We have to set exploration_config here manually because setting it through - # config.exploration() only deepupdates it + # config.exploration() only deep-updates it config.exploration_config = {} - for fw in framework_iterator(config, ("tf2")): - trainer = config.build() - policy = trainer.get_policy() + for fw in framework_iterator(config, frameworks=("torch", "tf2")): + algo = config.build() + policy = algo.get_policy() if fw == "tf2": train_batch = SampleBatch( - tf.nest.map_structure(lambda x: tf.convert_to_tensor(x), FAKE_BATCH) + tree.map_structure(lambda x: tf.convert_to_tensor(x), FAKE_BATCH) ) else: - train_batch = SampleBatch(FAKE_BATCH) - policy_loss = policy.loss(policy.model, policy.dist_class, train_batch) + train_batch = SampleBatch( + tree.map_structure(lambda x: convert_to_torch_tensor(x), FAKE_BATCH) + ) algo_config = config.copy(copy_frozen=False) algo_config.training(_enable_learner_api=True) @@ -103,11 +107,10 @@ def test_appo_loss(self): ) learner_group_config.num_learner_workers = 0 learner_group = learner_group_config.build() - learner_group.set_weights(trainer.get_weights()) - results = learner_group.update(train_batch.as_multi_agent()) - learner_group_loss = results[ALL_MODULES]["total_loss"] + learner_group.set_weights(algo.get_weights()) + learner_group.update(train_batch.as_multi_agent()) - check(learner_group_loss, policy_loss) + algo.stop() def test_kl_coeff_changes(self): initial_kl_coeff = 0.01 @@ -115,6 +118,8 @@ def test_kl_coeff_changes(self): appo.APPOConfig() .environment("CartPole-v1") .framework(eager_tracing=True) + # Asynchronous Algo, make sure we have some results after 1 iteration. + .reporting(min_time_s_per_iteration=10) .rollouts( num_rollout_workers=0, rollout_fragment_length=frag_length, @@ -128,6 +133,7 @@ def test_kl_coeff_changes(self): vf_share_layers=False, ), _enable_learner_api=True, + use_kl_loss=True, kl_coeff=initial_kl_coeff, ) .rl_module( @@ -135,7 +141,7 @@ def test_kl_coeff_changes(self): ) .exploration(exploration_config={}) ) - for _ in framework_iterator(config, frameworks="tf2"): + for _ in framework_iterator(config, frameworks=("torch", "tf2")): algo = config.build() # Call train while results aren't returned because this is # a asynchronous trainer and results are returned asynchronously. @@ -144,8 +150,8 @@ def test_kl_coeff_changes(self): if results.get("info", {}).get(LEARNER_INFO, {}).get(DEFAULT_POLICY_ID): break curr_kl_coeff = results["info"][LEARNER_INFO][DEFAULT_POLICY_ID][ - LEARNER_STATS_KEY - ][LEARNER_RESULTS_CURR_KL_COEFF_KEY] + LEARNER_RESULTS_CURR_KL_COEFF_KEY + ] self.assertNotEqual(curr_kl_coeff, initial_kl_coeff) diff --git a/rllib/algorithms/appo/tf/appo_tf_learner.py b/rllib/algorithms/appo/tf/appo_tf_learner.py index 1df6505ef182..cdf8ee9f8361 100644 --- a/rllib/algorithms/appo/tf/appo_tf_learner.py +++ b/rllib/algorithms/appo/tf/appo_tf_learner.py @@ -1,4 +1,4 @@ -from typing import Dict, Mapping +from typing import Any, Dict, Mapping from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.algorithms.appo.appo_learner import ( @@ -18,20 +18,18 @@ _, tf, _ = try_import_tf() -class APPOTfLearner(TfLearner, AppoLearner): +class APPOTfLearner(AppoLearner, TfLearner): """Implements APPO loss / update logic on top of ImpalaTfLearner.""" - def __init__(self, *args, **kwargs): - TfLearner.__init__(self, *args, **kwargs) - AppoLearner.__init__(self, *args, **kwargs) - @override(TfLearner) def compute_loss_per_module( self, module_id: str, batch: SampleBatch, fwd_out: Mapping[str, TensorType] ) -> TensorType: values = fwd_out[SampleBatch.VF_PREDS] + target_policy_dist = fwd_out[SampleBatch.ACTION_DIST] old_target_policy_dist = fwd_out[OLD_ACTION_DIST_KEY] + old_target_policy_actions_logp = old_target_policy_dist.logp( batch[SampleBatch.ACTIONS] ) @@ -42,32 +40,27 @@ def compute_loss_per_module( behaviour_actions_logp, trajectory_len=self.hps.rollout_frag_or_episode_len, recurrent_seq_len=self.hps.recurrent_seq_len, - drop_last=self.hps.vtrace_drop_last_ts, ) target_actions_logp_time_major = make_time_major( target_actions_logp, trajectory_len=self.hps.rollout_frag_or_episode_len, recurrent_seq_len=self.hps.recurrent_seq_len, - drop_last=self.hps.vtrace_drop_last_ts, ) old_actions_logp_time_major = make_time_major( old_target_policy_actions_logp, trajectory_len=self.hps.rollout_frag_or_episode_len, recurrent_seq_len=self.hps.recurrent_seq_len, - drop_last=self.hps.vtrace_drop_last_ts, ) values_time_major = make_time_major( values, trajectory_len=self.hps.rollout_frag_or_episode_len, recurrent_seq_len=self.hps.recurrent_seq_len, - drop_last=self.hps.vtrace_drop_last_ts, ) bootstrap_value = values_time_major[-1] rewards_time_major = make_time_major( batch[SampleBatch.REWARDS], trajectory_len=self.hps.rollout_frag_or_episode_len, recurrent_seq_len=self.hps.recurrent_seq_len, - drop_last=self.hps.vtrace_drop_last_ts, ) # the discount factor that is used should be gamma except for timesteps where @@ -79,20 +72,21 @@ def compute_loss_per_module( batch[SampleBatch.TERMINATEDS], trajectory_len=self.hps.rollout_frag_or_episode_len, recurrent_seq_len=self.hps.recurrent_seq_len, - drop_last=self.hps.vtrace_drop_last_ts, ), dtype=tf.float32, ) ) * self.hps.discount_factor + + # Note that vtrace will compute the main loop on the CPU for better performance. vtrace_adjusted_target_values, pg_advantages = vtrace_tf2( target_action_log_probs=old_actions_logp_time_major, behaviour_action_log_probs=behaviour_actions_logp_time_major, + discounts=discounts_time_major, rewards=rewards_time_major, values=values_time_major, bootstrap_value=bootstrap_value, clip_pg_rho_threshold=self.hps.vtrace_clip_pg_rho_threshold, clip_rho_threshold=self.hps.vtrace_clip_rho_threshold, - discounts=discounts_time_major, ) # The policy gradients loss. @@ -117,8 +111,11 @@ def compute_loss_per_module( ), ) - action_kl = old_target_policy_dist.kl(target_policy_dist) - mean_kl_loss = tf.math.reduce_mean(action_kl) + if self.hps.use_kl_loss: + action_kl = old_target_policy_dist.kl(target_policy_dist) + mean_kl_loss = tf.math.reduce_mean(action_kl) + else: + mean_kl_loss = 0.0 mean_pi_loss = -tf.math.reduce_mean(surrogate_loss) # The baseline loss. @@ -126,35 +123,29 @@ def compute_loss_per_module( mean_vf_loss = 0.5 * tf.math.reduce_mean(delta**2) # The entropy loss. - mean_entropy_loss = -tf.math.reduce_mean(target_actions_logp_time_major) + mean_entropy_loss = -tf.math.reduce_mean(target_policy_dist.entropy()) # The summed weighted loss. total_loss = ( mean_pi_loss + (mean_vf_loss * self.hps.vf_loss_coeff) + (mean_entropy_loss * self.hps.entropy_coeff) - + (mean_kl_loss * self.kl_coeffs[module_id]) + + (mean_kl_loss * self.curr_kl_coeffs_per_module[module_id]) ) return { self.TOTAL_LOSS_KEY: total_loss, POLICY_LOSS_KEY: mean_pi_loss, VF_LOSS_KEY: mean_vf_loss, - ENTROPY_KEY: mean_entropy_loss, + ENTROPY_KEY: -mean_entropy_loss, LEARNER_RESULTS_KL_KEY: mean_kl_loss, - LEARNER_RESULTS_CURR_KL_COEFF_KEY: self.kl_coeffs[module_id], + LEARNER_RESULTS_CURR_KL_COEFF_KEY: ( + self.curr_kl_coeffs_per_module[module_id] + ), } @override(AppoLearner) def _update_module_target_networks(self, module_id: ModuleID): - """Update the target policy of each module with the current policy. - - Do that update via polyak averaging. - - Args: - module_id: The module whose target networks need to be updated. - - """ module = self.module[module_id] target_current_network_pairs = module.get_target_network_pairs() @@ -167,27 +158,19 @@ def _update_module_target_networks(self, module_id: ModuleID): ) old_var.assign(updated_var) + @override(AppoLearner) def _update_module_kl_coeff( - self, module_id: ModuleID, sampled_kls: Dict[ModuleID, float] - ): - """Dynamically update the KL loss coefficients of each module with. - - The update is completed using the mean KL divergence between the action - distributions current policy and old policy of each module. That action - distribution is computed during the most recent update/call to `compute_loss`. - - Args: - module_id: The module whose KL loss coefficient to update. - sampled_kls: The KL divergence between the action distributions of - the current policy and old policy of each module. - - """ - if module_id in sampled_kls: - sampled_kl = sampled_kls[module_id] - # Update the current KL value based on the recently measured value. - # Increase. - if sampled_kl > 2.0 * self.hps.kl_target: - self.kl_coeffs[module_id].assign(self.kl_coeffs[module_id] * 1.5) - # Decrease. - elif sampled_kl < 0.5 * self.hps.kl_target: - self.kl_coeffs[module_id].assign(self.kl_coeffs[module_id] * 0.5) + self, module_id: ModuleID, sampled_kl: float + ) -> Dict[str, Any]: + # Update the current KL value based on the recently measured value. + # Increase. + kl_coeff_var = self.curr_kl_coeffs_per_module[module_id] + + if sampled_kl > 2.0 * self.hps.kl_target: + # TODO (Kourosh) why not *2.0? + kl_coeff_var.assign(kl_coeff_var * 1.5) + # Decrease. + elif sampled_kl < 0.5 * self.hps.kl_target: + kl_coeff_var.assign(kl_coeff_var * 0.5) + + return {LEARNER_RESULTS_CURR_KL_COEFF_KEY: kl_coeff_var.numpy()} diff --git a/rllib/algorithms/appo/tf/appo_tf_policy_rlm.py b/rllib/algorithms/appo/tf/appo_tf_policy_rlm.py index f01235834d85..24a4ec9cb649 100644 --- a/rllib/algorithms/appo/tf/appo_tf_policy_rlm.py +++ b/rllib/algorithms/appo/tf/appo_tf_policy_rlm.py @@ -11,12 +11,10 @@ GradStatsMixin, TargetNetworkMixin, ) - from ray.rllib.algorithms.impala.impala_tf_policy import ( VTraceClipGradients, VTraceOptimizer, ) - from ray.rllib.policy.eager_tf_policy_v2 import EagerTFPolicyV2 from ray.rllib.utils.annotations import override from ray.rllib.utils.deprecation import Deprecated @@ -61,7 +59,7 @@ def __init__(self, observation_space, action_space, config): KLCoeffMixin.__init__(self, config) GradStatsMixin.__init__(self) EagerTFPolicyV2.__init__(self, observation_space, action_space, config) - # construct the target model and make its weights the same as the model + # Construct the target model and make its weights the same as the model. self.target_model = self.make_rl_module() self.target_model.set_weights(self.model.get_weights()) @@ -146,15 +144,17 @@ def loss( dtype=tf.float32, ) ) * self.config["gamma"] + + # Note that vtrace will compute the main loop on the CPU for better performance. vtrace_adjusted_target_values, pg_advantages = vtrace_tf2( target_action_log_probs=old_target_actions_logp_time_major, behaviour_action_log_probs=behaviour_actions_logp_time_major, + discounts=discounts_time_major, rewards=rewards_time_major, values=values_time_major, bootstrap_value=bootstrap_value, clip_pg_rho_threshold=self.config["vtrace_clip_pg_rho_threshold"], clip_rho_threshold=self.config["vtrace_clip_rho_threshold"], - discounts=discounts_time_major, ) is_ratio = tf.clip_by_value( @@ -185,7 +185,7 @@ def loss( mean_vf_loss = 0.5 * tf.math.reduce_mean(delta**2) # The entropy loss. - mean_entropy_loss = -tf.math.reduce_mean(target_actions_logp_time_major) + mean_entropy_loss = -tf.math.reduce_mean(target_policy_dist.entropy()) # The summed weighted loss. total_loss = ( diff --git a/rllib/algorithms/appo/tf/appo_tf_rl_module.py b/rllib/algorithms/appo/tf/appo_tf_rl_module.py index 44f41581759b..48287375a1f1 100644 --- a/rllib/algorithms/appo/tf/appo_tf_rl_module.py +++ b/rllib/algorithms/appo/tf/appo_tf_rl_module.py @@ -1,6 +1,9 @@ from typing import List - +from ray.rllib.algorithms.appo.appo_learner import ( + OLD_ACTION_DIST_LOGITS_KEY, + OLD_ACTION_DIST_KEY, +) from ray.rllib.algorithms.ppo.tf.ppo_tf_rl_module import PPOTfRLModule from ray.rllib.core.models.base import ACTOR from ray.rllib.core.models.tf.encoder import ENCODER_OUT @@ -14,14 +17,8 @@ _, tf, _ = try_import_tf() -OLD_ACTION_DIST_KEY = "old_action_dist" -OLD_ACTION_DIST_LOGITS_KEY = "old_action_dist_logits" - class APPOTfRLModule(PPOTfRLModule, RLModuleWithTargetNetworksInterface): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - def setup(self): super().setup() catalog = self.config.get_catalog() @@ -41,16 +38,16 @@ def get_target_network_pairs(self): @override(PPOTfRLModule) def output_specs_train(self) -> List[str]: return [ - SampleBatch.ACTION_DIST, + SampleBatch.ACTION_DIST_INPUTS, SampleBatch.VF_PREDS, - OLD_ACTION_DIST_KEY, + OLD_ACTION_DIST_LOGITS_KEY, ] @override(PPOTfRLModule) def _forward_train(self, batch: NestedDict): outs = super()._forward_train(batch) old_pi_inputs_encoded = self.old_encoder(batch)[ENCODER_OUT][ACTOR] - old_action_dist_logits = self.old_pi(old_pi_inputs_encoded) + old_action_dist_logits = tf.stop_gradient(self.old_pi(old_pi_inputs_encoded)) old_action_dist = self.action_dist_cls.from_logits(old_action_dist_logits) outs[OLD_ACTION_DIST_KEY] = old_action_dist outs[OLD_ACTION_DIST_LOGITS_KEY] = old_action_dist_logits diff --git a/rllib/algorithms/appo/torch/__init__.py b/rllib/algorithms/appo/torch/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/rllib/algorithms/appo/torch/appo_torch_learner.py b/rllib/algorithms/appo/torch/appo_torch_learner.py new file mode 100644 index 000000000000..56f48a0fe0ed --- /dev/null +++ b/rllib/algorithms/appo/torch/appo_torch_learner.py @@ -0,0 +1,206 @@ +from typing import Any, Dict, Mapping + +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.algorithms.appo.appo_learner import ( + AppoLearner, + LEARNER_RESULTS_CURR_KL_COEFF_KEY, + LEARNER_RESULTS_KL_KEY, + OLD_ACTION_DIST_KEY, +) +from ray.rllib.algorithms.impala.torch.vtrace_torch_v2 import ( + make_time_major, + vtrace_torch, +) +from ray.rllib.core.learner.learner import POLICY_LOSS_KEY, VF_LOSS_KEY, ENTROPY_KEY +from ray.rllib.core.learner.torch.torch_learner import TorchLearner +from ray.rllib.core.rl_module.marl_module import ModuleID, MultiAgentRLModule +from ray.rllib.core.rl_module.torch.torch_rl_module import ( + TorchDDPRLModuleWithTargetNetworksInterface, + TorchRLModule, +) +from ray.rllib.core.rl_module.rl_module_with_target_networks_interface import ( + RLModuleWithTargetNetworksInterface, +) +from ray.rllib.utils.annotations import override +from ray.rllib.utils.framework import try_import_torch +from ray.rllib.utils.typing import TensorType + +torch, nn = try_import_torch() + + +class APPOTorchLearner(AppoLearner, TorchLearner): + """Implements APPO loss / update logic on top of ImpalaTorchLearner.""" + + @override(TorchLearner) + def compute_loss_per_module( + self, module_id: str, batch: SampleBatch, fwd_out: Mapping[str, TensorType] + ) -> TensorType: + + values = fwd_out[SampleBatch.VF_PREDS] + target_policy_dist = fwd_out[SampleBatch.ACTION_DIST] + old_target_policy_dist = fwd_out[OLD_ACTION_DIST_KEY] + old_target_policy_actions_logp = old_target_policy_dist.logp( + batch[SampleBatch.ACTIONS] + ) + behaviour_actions_logp = batch[SampleBatch.ACTION_LOGP] + target_actions_logp = target_policy_dist.logp(batch[SampleBatch.ACTIONS]) + + behaviour_actions_logp_time_major = make_time_major( + behaviour_actions_logp, + trajectory_len=self.hps.rollout_frag_or_episode_len, + recurrent_seq_len=self.hps.recurrent_seq_len, + ) + target_actions_logp_time_major = make_time_major( + target_actions_logp, + trajectory_len=self.hps.rollout_frag_or_episode_len, + recurrent_seq_len=self.hps.recurrent_seq_len, + ) + old_actions_logp_time_major = make_time_major( + old_target_policy_actions_logp, + trajectory_len=self.hps.rollout_frag_or_episode_len, + recurrent_seq_len=self.hps.recurrent_seq_len, + ) + values_time_major = make_time_major( + values, + trajectory_len=self.hps.rollout_frag_or_episode_len, + recurrent_seq_len=self.hps.recurrent_seq_len, + ) + bootstrap_value = values_time_major[-1] + rewards_time_major = make_time_major( + batch[SampleBatch.REWARDS], + trajectory_len=self.hps.rollout_frag_or_episode_len, + recurrent_seq_len=self.hps.recurrent_seq_len, + ) + + # the discount factor that is used should be gamma except for timesteps where + # the episode is terminated. In that case, the discount factor should be 0. + discounts_time_major = ( + 1.0 + - make_time_major( + batch[SampleBatch.TERMINATEDS], + trajectory_len=self.hps.rollout_frag_or_episode_len, + recurrent_seq_len=self.hps.recurrent_seq_len, + ).float() + ) * self.hps.discount_factor + + # Note that vtrace will compute the main loop on the CPU for better performance. + vtrace_adjusted_target_values, pg_advantages = vtrace_torch( + target_action_log_probs=old_actions_logp_time_major, + behaviour_action_log_probs=behaviour_actions_logp_time_major, + discounts=discounts_time_major, + rewards=rewards_time_major, + values=values_time_major, + bootstrap_value=bootstrap_value, + clip_pg_rho_threshold=self.hps.vtrace_clip_pg_rho_threshold, + clip_rho_threshold=self.hps.vtrace_clip_rho_threshold, + ) + + # The policy gradients loss. + is_ratio = torch.clip( + torch.exp(behaviour_actions_logp_time_major - old_actions_logp_time_major), + 0.0, + 2.0, + ) + logp_ratio = is_ratio * torch.exp( + target_actions_logp_time_major - behaviour_actions_logp_time_major + ) + + surrogate_loss = torch.minimum( + pg_advantages * logp_ratio, + pg_advantages + * torch.clip(logp_ratio, 1 - self.hps.clip_param, 1 + self.hps.clip_param), + ) + + if self.hps.use_kl_loss: + action_kl = old_target_policy_dist.kl(target_policy_dist) + mean_kl_loss = torch.mean(action_kl) + else: + mean_kl_loss = 0.0 + mean_pi_loss = -torch.mean(surrogate_loss) + + # The baseline loss. + delta = values_time_major - vtrace_adjusted_target_values + mean_vf_loss = 0.5 * torch.mean(delta**2) + + # The entropy loss. + mean_entropy_loss = -torch.mean(target_policy_dist.entropy()) + + # The summed weighted loss. + total_loss = ( + mean_pi_loss + + (mean_vf_loss * self.hps.vf_loss_coeff) + + (mean_entropy_loss * self.hps.entropy_coeff) + + (mean_kl_loss * self.curr_kl_coeffs_per_module[module_id]) + ) + + return { + self.TOTAL_LOSS_KEY: total_loss, + POLICY_LOSS_KEY: mean_pi_loss, + VF_LOSS_KEY: mean_vf_loss, + ENTROPY_KEY: -mean_entropy_loss, + LEARNER_RESULTS_KL_KEY: mean_kl_loss, + } + + @override(TorchLearner) + def _make_modules_ddp_if_necessary(self) -> None: + """Logic for (maybe) making all Modules within self._module DDP. + + This implementation differs from the super's default one in using the special + TorchDDPRLModuleWithTargetNetworksInterface wrapper, instead of the default + TorchDDPRLModule one. + """ + + # If the module is a MultiAgentRLModule and nn.Module we can simply assume + # all the submodules are registered. Otherwise, we need to loop through + # each submodule and move it to the correct device. + # TODO (Kourosh): This can result in missing modules if the user does not + # register them in the MultiAgentRLModule. We should find a better way to + # handle this. + if self._distributed: + # Single agent module: Convert to + # `TorchDDPRLModuleWithTargetNetworksInterface`. + if isinstance(self._module, RLModuleWithTargetNetworksInterface): + self._module = TorchDDPRLModuleWithTargetNetworksInterface(self._module) + # Multi agent module: Convert each submodule to + # `TorchDDPRLModuleWithTargetNetworksInterface`. + else: + assert isinstance(self._module, MultiAgentRLModule) + for key in self._module.keys(): + sub_module = self._module[key] + if isinstance(sub_module, TorchRLModule): + # Wrap and override the module ID key in self._module. + self._module.add_module( + key, + TorchDDPRLModuleWithTargetNetworksInterface(sub_module), + override=True, + ) + + @override(AppoLearner) + def _update_module_target_networks(self, module_id: ModuleID): + module = self.module[module_id] + + target_current_network_pairs = module.get_target_network_pairs() + for target_network, current_network in target_current_network_pairs: + current_state_dict = current_network.state_dict() + new_state_dict = { + k: self.hps.tau * current_state_dict[k] + (1 - self.hps.tau) * v + for k, v in target_network.state_dict().items() + } + target_network.load_state_dict(new_state_dict) + + @override(AppoLearner) + def _update_module_kl_coeff( + self, module_id: ModuleID, sampled_kl: float + ) -> Dict[str, Any]: + # Update the current KL value based on the recently measured value. + # Increase. + kl_coeff_var = self.curr_kl_coeffs_per_module[module_id] + + if sampled_kl > 2.0 * self.hps.kl_target: + # TODO (Kourosh) why not *2.0? + kl_coeff_var.data *= 1.5 + # Decrease. + elif sampled_kl < 0.5 * self.hps.kl_target: + kl_coeff_var.data *= 0.5 + + return {LEARNER_RESULTS_CURR_KL_COEFF_KEY: kl_coeff_var.item()} diff --git a/rllib/algorithms/appo/torch/appo_torch_policy_rlm.py b/rllib/algorithms/appo/torch/appo_torch_policy_rlm.py new file mode 100644 index 000000000000..81bc072eca43 --- /dev/null +++ b/rllib/algorithms/appo/torch/appo_torch_policy_rlm.py @@ -0,0 +1,213 @@ +import logging + +from ray.rllib.algorithms.impala.torch.vtrace_torch_v2 import ( + make_time_major, + vtrace_torch, +) +from ray.rllib.policy.torch_mixins import ( + EntropyCoeffSchedule, + LearningRateSchedule, + KLCoeffMixin, + TargetNetworkMixin, +) +from ray.rllib.algorithms.impala.impala_torch_policy import ( + VTraceOptimizer, +) +from ray.rllib.algorithms.ppo.ppo_torch_policy import validate_config +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.policy.torch_policy_v2 import TorchPolicyV2 +from ray.rllib.utils.annotations import override +from ray.rllib.utils.framework import try_import_torch +from ray.rllib.utils.torch_utils import ( + convert_to_torch_tensor, + explained_variance, + global_norm, +) + +torch, _ = try_import_torch() + +logger = logging.getLogger(__name__) + + +# TODO: Remove once we have a RLModule capable sampler class that can replace +# `Policy.compute_actions_from_input_dict()`. +class APPOTorchPolicyWithRLModule( + VTraceOptimizer, + LearningRateSchedule, + KLCoeffMixin, + EntropyCoeffSchedule, + TargetNetworkMixin, + TorchPolicyV2, +): + def __init__(self, observation_space, action_space, config): + validate_config(config) + # Initialize MixIns before super().__init__ because base class will call + # self.loss, which requires these MixIns to be initialized. + LearningRateSchedule.__init__(self, config["lr"], config["lr_schedule"]) + EntropyCoeffSchedule.__init__( + self, config["entropy_coeff"], config["entropy_coeff_schedule"] + ) + # Although this is a no-op, we call __init__ here to make it clear + # that base.__init__ will use the make_model() call. + # VTraceClipGradients.__init__(self) + VTraceOptimizer.__init__(self) + self.framework = "tf2" + KLCoeffMixin.__init__(self, config) + # GradStatsMixin.__init__(self) + TorchPolicyV2.__init__(self, observation_space, action_space, config) + # Construct the target model and make its weights the same as the model. + self.target_model = self.make_rl_module() + self.target_model.load_state_dict(self.model.state_dict()) + + # Initiate TargetNetwork ops after loss initialization. + self._initialize_loss_from_dummy_batch() + TargetNetworkMixin.__init__(self) + + @override(TorchPolicyV2) + def loss(self, model, dist_class, train_batch): + train_batch[SampleBatch.ACTION_LOGP] + train_batch[SampleBatch.ACTIONS] + train_batch[SampleBatch.REWARDS] + train_batch[SampleBatch.TERMINATEDS] + + seqs_len = train_batch.get(SampleBatch.SEQ_LENS) + rollout_frag_or_episode_len = ( + self.config["rollout_fragment_length"] if not seqs_len else None + ) + drop_last = self.config["vtrace_drop_last_ts"] + + target_policy_fwd_out = model.forward_train(train_batch) + values = target_policy_fwd_out[SampleBatch.VF_PREDS] + target_policy_dist = target_policy_fwd_out[SampleBatch.ACTION_DIST] + + old_target_policy_fwd_out = self.target_model.forward_train(train_batch) + old_target_policy_dist = old_target_policy_fwd_out[SampleBatch.ACTION_DIST] + + behaviour_actions_logp = train_batch[SampleBatch.ACTION_LOGP] + target_actions_logp = target_policy_dist.logp(train_batch[SampleBatch.ACTIONS]) + old_target_actions_logp = old_target_policy_dist.logp( + train_batch[SampleBatch.ACTIONS] + ) + behaviour_actions_logp_time_major = make_time_major( + behaviour_actions_logp, + trajectory_len=rollout_frag_or_episode_len, + recurrent_seq_len=seqs_len, + drop_last=drop_last, + ) + target_actions_logp_time_major = make_time_major( + target_actions_logp, + trajectory_len=rollout_frag_or_episode_len, + recurrent_seq_len=seqs_len, + drop_last=drop_last, + ) + old_target_actions_logp_time_major = make_time_major( + old_target_actions_logp, + trajectory_len=rollout_frag_or_episode_len, + recurrent_seq_len=seqs_len, + drop_last=drop_last, + ) + values_time_major = make_time_major( + values, + trajectory_len=rollout_frag_or_episode_len, + recurrent_seq_len=seqs_len, + drop_last=drop_last, + ) + bootstrap_value = values_time_major[-1] + rewards_time_major = make_time_major( + train_batch[SampleBatch.REWARDS], + trajectory_len=rollout_frag_or_episode_len, + recurrent_seq_len=seqs_len, + drop_last=drop_last, + ) + + # how to compute discouts? + # should they be pre computed? + discounts_time_major = ( + 1.0 + - make_time_major( + train_batch[SampleBatch.TERMINATEDS], + trajectory_len=rollout_frag_or_episode_len, + recurrent_seq_len=seqs_len, + drop_last=drop_last, + ).float() + ) * self.config["gamma"] + + # Note that vtrace will compute the main loop on the CPU for better performance. + vtrace_adjusted_target_values, pg_advantages = vtrace_torch( + target_action_log_probs=old_target_actions_logp_time_major, + behaviour_action_log_probs=behaviour_actions_logp_time_major, + discounts=discounts_time_major, + rewards=rewards_time_major, + values=values_time_major, + bootstrap_value=bootstrap_value, + clip_pg_rho_threshold=self.config["vtrace_clip_pg_rho_threshold"], + clip_rho_threshold=self.config["vtrace_clip_rho_threshold"], + ) + + is_ratio = torch.clip( + torch.exp( + behaviour_actions_logp_time_major - target_actions_logp_time_major + ), + 0.0, + 2.0, + ) + logp_ratio = is_ratio * torch.exp( + target_actions_logp_time_major - behaviour_actions_logp_time_major + ) + + clip_param = self.config["clip_param"] + surrogate_loss = torch.minimum( + pg_advantages * logp_ratio, + (pg_advantages * torch.clip(logp_ratio, 1 - clip_param, 1 + clip_param)), + ) + action_kl = old_target_policy_dist.kl(target_policy_dist) + mean_kl_loss = torch.mean(action_kl) + mean_pi_loss = -torch.mean(surrogate_loss) + + # The baseline loss. + delta = values_time_major - vtrace_adjusted_target_values + mean_vf_loss = 0.5 * torch.mean(delta**2) + + # The entropy loss. + mean_entropy_loss = -torch.mean(target_policy_dist.entropy()) + + # The summed weighted loss. + total_loss = ( + mean_pi_loss + + (mean_vf_loss * self.config["vf_loss_coeff"]) + + (mean_entropy_loss * self.entropy_coeff) + + (mean_kl_loss * self.kl_coeff) + ) + + self.stats = { + "total_loss": total_loss, + "policy_loss": mean_pi_loss, + "vf_loss": mean_vf_loss, + "values": values_time_major, + "entropy_loss": mean_entropy_loss, + "vtrace_adjusted_target_values": vtrace_adjusted_target_values, + "mean_kl": mean_kl_loss, + } + return total_loss + + @override(TorchPolicyV2) + def stats_fn(self, train_batch: SampleBatch): + return { + "cur_lr": convert_to_torch_tensor(self.cur_lr).type(torch.float64), + "policy_loss": self.stats["policy_loss"], + "entropy": self.stats["entropy_loss"], + "entropy_coeff": convert_to_torch_tensor(self.entropy_coeff).type( + torch.float64 + ), + "var_gnorm": global_norm(self.model.parameters()), + "vf_loss": self.stats["vf_loss"], + "vf_explained_var": explained_variance( + torch.reshape(self.stats["vtrace_adjusted_target_values"], [-1]), + torch.reshape(self.stats["values"], [-1]), + ), + "mean_kl": self.stats["mean_kl"], + } + + @override(TorchPolicyV2) + def get_batch_divisibility_req(self) -> int: + return self.config["rollout_fragment_length"] diff --git a/rllib/algorithms/appo/torch/appo_torch_rl_module.py b/rllib/algorithms/appo/torch/appo_torch_rl_module.py new file mode 100644 index 000000000000..805ca11c3352 --- /dev/null +++ b/rllib/algorithms/appo/torch/appo_torch_rl_module.py @@ -0,0 +1,51 @@ +from typing import List + +from ray.rllib.algorithms.appo.appo_learner import ( + OLD_ACTION_DIST_KEY, + OLD_ACTION_DIST_LOGITS_KEY, +) +from ray.rllib.algorithms.ppo.torch.ppo_torch_rl_module import PPOTorchRLModule +from ray.rllib.core.models.base import ACTOR +from ray.rllib.core.models.tf.encoder import ENCODER_OUT +from ray.rllib.core.rl_module.rl_module_with_target_networks_interface import ( + RLModuleWithTargetNetworksInterface, +) +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.utils.annotations import override +from ray.rllib.utils.nested_dict import NestedDict + + +class APPOTorchRLModule(PPOTorchRLModule, RLModuleWithTargetNetworksInterface): + def setup(self): + super().setup() + catalog = self.config.get_catalog() + # Old pi and old encoder are the "target networks" that are used for + # the stabilization of the updates of the current pi and encoder. + self.old_pi = catalog.build_pi_head(framework=self.framework) + self.old_encoder = catalog.build_actor_critic_encoder(framework=self.framework) + self.old_pi.load_state_dict(self.pi.state_dict()) + self.old_encoder.load_state_dict(self.encoder.state_dict()) + self.old_pi.trainable = False + self.old_encoder.trainable = False + + @override(RLModuleWithTargetNetworksInterface) + def get_target_network_pairs(self): + return [(self.old_pi, self.pi), (self.old_encoder, self.encoder)] + + @override(PPOTorchRLModule) + def output_specs_train(self) -> List[str]: + return [ + SampleBatch.ACTION_DIST, + SampleBatch.VF_PREDS, + OLD_ACTION_DIST_KEY, + ] + + @override(PPOTorchRLModule) + def _forward_train(self, batch: NestedDict): + outs = super()._forward_train(batch) + old_pi_inputs_encoded = self.old_encoder(batch)[ENCODER_OUT][ACTOR] + old_action_dist_logits = self.old_pi(old_pi_inputs_encoded) + old_action_dist = self.action_dist_cls.from_logits(old_action_dist_logits) + outs[OLD_ACTION_DIST_KEY] = old_action_dist + outs[OLD_ACTION_DIST_LOGITS_KEY] = old_action_dist_logits + return outs diff --git a/rllib/algorithms/impala/impala.py b/rllib/algorithms/impala/impala.py index 93f0990e74bd..57c47e3ec940 100644 --- a/rllib/algorithms/impala/impala.py +++ b/rllib/algorithms/impala/impala.py @@ -109,7 +109,11 @@ def __init__(self, algo_class=None): self.vtrace = True self.vtrace_clip_rho_threshold = 1.0 self.vtrace_clip_pg_rho_threshold = 1.0 - self.vtrace_drop_last_ts = True + # TODO (sven): Deprecate this setting. It makes no sense to drop the last ts. + # It's actually dangerous if there are important rewards "hiding" in that ts. + # This setting is already ignored (always False) on the new Learner API + # (if _enable_learner_api=True). + self.vtrace_drop_last_ts = False self.num_multi_gpu_tower_stacks = 1 self.minibatch_buffer_size = 1 self.num_sgd_iter = 1 @@ -144,7 +148,7 @@ def __init__(self, algo_class=None): # Override some of AlgorithmConfig's default values with IMPALA-specific values. self.rollout_fragment_length = 50 self.train_batch_size = 500 - self.minibatch_size = self.train_batch_size + self._minibatch_size = "auto" self.num_rollout_workers = 2 self.num_gpus = 1 self.lr = 0.0005 @@ -176,7 +180,7 @@ def training( gamma: Optional[float] = NotProvided, num_multi_gpu_tower_stacks: Optional[int] = NotProvided, minibatch_buffer_size: Optional[int] = NotProvided, - minibatch_size: Optional[int] = NotProvided, + minibatch_size: Optional[Union[int, str]] = NotProvided, num_sgd_iter: Optional[int] = NotProvided, replay_proportion: Optional[float] = NotProvided, replay_buffer_num_slots: Optional[int] = NotProvided, @@ -230,10 +234,11 @@ def training( minibatch_buffer_size: How many train batches should be retained for minibatching. This conf only has an effect if `num_sgd_iter > 1`. minibatch_size: The size of minibatches that are trained over during - each SGD iteration. Note this only has an effect if - `_enable_learner_api` == True. - Note: minibatch_size must be a multiple of rollout_fragment_length or - sequence_length and smaller than or equal to train_batch_size. + each SGD iteration. If "auto", will use the same value as + `train_batch_size`. + Note that this setting only has an effect if `_enable_learner_api=True` + and it must be a multiple of `rollout_fragment_length` or + `sequence_length` and smaller than or equal to `train_batch_size`. num_sgd_iter: Number of passes to make over each train batch. replay_proportion: Set >0 to enable experience replay. Saved samples will be replayed with a p:1 proportion to new data samples. @@ -349,7 +354,7 @@ def training( if gamma is not NotProvided: self.gamma = gamma if minibatch_size is not NotProvided: - self.minibatch_size = minibatch_size + self._minibatch_size = minibatch_size return self @@ -405,11 +410,10 @@ def validate(self) -> None: and self.minibatch_size <= self.train_batch_size ): raise ValueError( - "minibatch_size must be a multiple of rollout_fragment_length and " - "must be smaller than or equal to train_batch_size. Got" - f" minibatch_size={self.minibatch_size}, train_batch_size=" - f"{self.train_batch_size}, and rollout_fragment_length=" - f"{self.get_rollout_fragment_length()}" + f"`minibatch_size` ({self._minibatch_size}) must either be 'auto' " + "or a multiple of `rollout_fragment_length` " + f"({self.rollout_fragment_length}) while at the same time smaller " + f"than or equal to `train_batch_size` ({self.train_batch_size})!" ) @override(AlgorithmConfig) @@ -420,9 +424,8 @@ def get_learner_hyperparameters(self) -> ImpalaHyperparameters: discount_factor=self.gamma, entropy_coeff=self.entropy_coeff, vf_loss_coeff=self.vf_loss_coeff, - vtrace_drop_last_ts=self.vtrace_drop_last_ts, vtrace_clip_rho_threshold=self.vtrace_clip_rho_threshold, - vtrace_clip_pg_rho_threshold=(self.vtrace_clip_pg_rho_threshold), + vtrace_clip_pg_rho_threshold=self.vtrace_clip_pg_rho_threshold, **dataclasses.asdict(base_hps), ) # TODO: We currently do not use the `recurrent_seq_len` property anyways. @@ -437,6 +440,7 @@ def get_learner_hyperparameters(self) -> ImpalaHyperparameters: ) return learner_hps + # TODO (sven): Make these get_... methods all read-only @properties instead. def get_replay_ratio(self) -> float: """Returns replay ratio (between 0.0 and 1.0) based off self.replay_proportion. @@ -444,6 +448,16 @@ def get_replay_ratio(self) -> float: """ return (1 / self.replay_proportion) if self.replay_proportion > 0 else 0.0 + @property + def minibatch_size(self): + # If 'auto', use the train_batch_size (meaning each SGD iter is a single pass + # through the entire train batch). Otherwise, use user provided setting. + return ( + self.train_batch_size + if self._minibatch_size == "auto" + else self._minibatch_size + ) + @override(AlgorithmConfig) def get_default_learner_class(self): if self.framework_str == "torch": @@ -709,8 +723,27 @@ def training_step(self) -> ResultDict: self._counters[NUM_AGENT_STEPS_SAMPLED] += batch.agent_steps() # Concatenate single batches into batches of size `train_batch_size`. self.concatenate_batches_and_pre_queue(batches) + # Using the Learner API. Call `update()` on our LearnerGroup object with + # all collected batches. if self.config._enable_learner_api: train_results = self.learn_on_processed_samples() + additional_results = self.learner_group.additional_update( + module_ids_to_update=set(train_results.keys()) - {ALL_MODULES}, + timestep=self._counters[ + NUM_ENV_STEPS_TRAINED + if self.config.count_steps_by == "env_steps" + else NUM_AGENT_STEPS_TRAINED + ], + # TODO (sven): Feels hacked, but solves the problem of algos inheriting + # from IMPALA (like APPO). In the old stack, we didn't have this + # problem b/c IMPALA didn't need to call any additional update methods + # as the entropy- and lr-schedules were handled by + # `Policy.on_global_var_update()`. + **self._get_additional_update_kwargs(train_results), + ) + for key, res in additional_results.items(): + if key in train_results: + train_results[key].update(res) else: # Move train batches (of size `train_batch_size`) onto learner queue. self.place_processed_samples_on_learner_thread_queue() @@ -722,12 +755,10 @@ def training_step(self) -> ResultDict: if self.config._enable_learner_api: if train_results: pids = list(set(train_results.keys()) - {ALL_MODULES}) - else: - pids = [] - self.update_workers_from_learner_group( - workers_that_need_updates=workers_that_need_updates, - policy_ids=pids, - ) + self.update_workers_from_learner_group( + workers_that_need_updates=workers_that_need_updates, + policy_ids=pids, + ) else: pids = list(train_results.keys()) self.update_workers_if_necessary( @@ -747,7 +778,7 @@ def training_step(self) -> ResultDict: if self.config._enable_learner_api: if train_results: - # store the most recent result and return it if no new result is + # Store the most recent result and return it if no new result is # available. This keeps backwards compatibility with the old # training stack / results reporting stack. This is necessary # any time we develop an asynchronous algorithm. @@ -920,6 +951,7 @@ def learn_on_processed_samples(self) -> ResultDict: """ result = {} + # There are batches on the queue -> Send them to the learner group. if self.batches_to_place_on_learner: batch = self.batches_to_place_on_learner.pop(0) # If there are no learner workers and learning is directly on the driver @@ -932,18 +964,18 @@ def learn_on_processed_samples(self) -> ResultDict: num_iters=self.config.num_sgd_iter, minibatch_size=self.config.minibatch_size, ) + # Nothing on the queue -> Don't send requests to learner group. else: lg_results = None if lg_results: - self._counters[NUM_ENV_STEPS_TRAINED] += lg_results[ALL_MODULES][ + self._counters[NUM_ENV_STEPS_TRAINED] += lg_results[ALL_MODULES].pop( NUM_ENV_STEPS_TRAINED - ] - self._counters[NUM_AGENT_STEPS_TRAINED] += lg_results[ALL_MODULES][ + ) + self._counters[NUM_AGENT_STEPS_TRAINED] += lg_results[ALL_MODULES].pop( NUM_AGENT_STEPS_TRAINED - ] - del lg_results[ALL_MODULES][NUM_ENV_STEPS_TRAINED] - del lg_results[ALL_MODULES][NUM_AGENT_STEPS_TRAINED] + ) + self._counters.update(self.learner_group.get_in_queue_stats()) result = lg_results return result @@ -1025,10 +1057,9 @@ def process_experiences_directly( Batches that have been processed by the mixin buffer. """ - processed_batches = [] batches = [b for _, b in worker_to_sample_batches] - if not batches: - return processed_batches + processed_batches = [] + for batch in batches: assert not isinstance( batch, ObjectRef @@ -1191,14 +1222,18 @@ def update_workers_if_necessary( timeout_seconds=0, # Don't wait for the workers to finish. ) + def _get_additional_update_kwargs(self, train_results: dict) -> dict: + """Returns the kwargs to `LearnerGroup.additional_update()`. + + Should be overridden by subclasses to specify wanted/needed kwargs for + their own implementation of `Learner.additional_update_per_module()`. + """ + return {} + @override(Algorithm) def _compile_iteration_results(self, *args, **kwargs): result = super()._compile_iteration_results(*args, **kwargs) - if self.config._enable_learner_api: - result["custom_metrics"] = { - "learner_group_queue_size": self.learner_group.in_queue_size - } - else: + if not self.config._enable_learner_api: result = self._learner_thread.add_learner_metrics( result, overwrite_learner_info=False ) diff --git a/rllib/algorithms/impala/impala_learner.py b/rllib/algorithms/impala/impala_learner.py index 4687ce0e9f9b..db0d28e1f9a9 100644 --- a/rllib/algorithms/impala/impala_learner.py +++ b/rllib/algorithms/impala/impala_learner.py @@ -1,5 +1,6 @@ +from collections import defaultdict from dataclasses import dataclass -from typing import Any, List, Mapping +from typing import Any, List, Mapping, Optional, Union import numpy as np import tree # pip install dm_tree @@ -12,6 +13,7 @@ NUM_AGENT_STEPS_TRAINED, NUM_ENV_STEPS_TRAINED, ) +from ray.rllib.utils.schedules.piecewise_schedule import PiecewiseSchedule from ray.rllib.utils.typing import ResultDict @@ -36,12 +38,32 @@ class to configure your algorithm. discount_factor: float = None vtrace_clip_rho_threshold: float = None vtrace_clip_pg_rho_threshold: float = None - vtrace_drop_last_ts: bool = None vf_loss_coeff: float = None entropy_coeff: float = None + entropy_coeff_schedule: Optional[List[List[Union[int, float]]]] = None class ImpalaLearner(Learner): + @override(Learner) + def build(self) -> None: + super().build() + + # Build entropy coeff scheduling tools. + self.entropy_coeff_scheduler = None + if self.hps.entropy_coeff_schedule: + # Custom schedule, based on list of + # ([ts], [value to be reached by ts])-tuples. + self.entropy_coeff_schedule_per_module = defaultdict( + lambda: PiecewiseSchedule( + self.hps.entropy_coeff_schedule, + outside_value=self.hps.entropy_coeff_schedule[-1][-1], + framework=None, + ) + ) + self.curr_entropy_coeffs_per_module = defaultdict( + lambda: self._get_tensor_variable(self.hps.entropy_coeff) + ) + @override(Learner) def compile_results( self, diff --git a/rllib/algorithms/impala/tests/test_impala_learner.py b/rllib/algorithms/impala/tests/test_impala_learner.py index 5358816b8195..6adc936a45ba 100644 --- a/rllib/algorithms/impala/tests/test_impala_learner.py +++ b/rllib/algorithms/impala/tests/test_impala_learner.py @@ -7,10 +7,7 @@ from ray.rllib.core.rl_module.rl_module import SingleAgentRLModuleSpec from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.framework import try_import_torch, try_import_tf -from ray.rllib.utils.metrics import ALL_MODULES -from ray.rllib.utils.test_utils import check from ray.rllib.utils.test_utils import framework_iterator -from ray.rllib.utils.torch_utils import convert_to_torch_tensor torch, nn = try_import_torch() tf1, tf, _ = try_import_tf() @@ -80,18 +77,9 @@ def test_impala_loss(self): # Deprecate the current default and set it to {}. config.exploration_config = {} - for fw in framework_iterator(config, frameworks=["tf2", "torch"]): - trainer = config.build() - policy = trainer.get_policy() - - if fw == "tf2": - train_batch = tf.nest.map_structure( - lambda x: tf.convert_to_tensor(x), FAKE_BATCH - ) - elif fw == "torch": - train_batch = convert_to_torch_tensor(SampleBatch(FAKE_BATCH)) - - policy_loss = policy.loss(policy.model, policy.dist_class, train_batch) + for _ in framework_iterator(config, frameworks=["tf2", "torch"]): + algo = config.build() + policy = algo.get_policy() train_batch = SampleBatch(FAKE_BATCH) algo_config = config.copy(copy_frozen=False) @@ -109,12 +97,10 @@ def test_impala_loss(self): ) learner_group_config.num_learner_workers = 0 learner_group = learner_group_config.build() - learner_group.set_weights(trainer.get_weights()) - results = learner_group.update(train_batch.as_multi_agent()) - - learner_group_loss = results[ALL_MODULES]["total_loss"] + learner_group.set_weights(algo.get_weights()) + learner_group.update(train_batch.as_multi_agent()) - check(learner_group_loss, policy_loss) + algo.stop() if __name__ == "__main__": diff --git a/rllib/algorithms/impala/tf/impala_tf_learner.py b/rllib/algorithms/impala/tf/impala_tf_learner.py index 24de96064be7..fa2481b5bbd5 100644 --- a/rllib/algorithms/impala/tf/impala_tf_learner.py +++ b/rllib/algorithms/impala/tf/impala_tf_learner.py @@ -1,8 +1,11 @@ -from typing import Mapping +from typing import Any, Dict, Mapping from ray.rllib.algorithms.impala.impala_learner import ImpalaLearner from ray.rllib.algorithms.impala.tf.vtrace_tf_v2 import make_time_major, vtrace_tf2 +from ray.rllib.algorithms.ppo.ppo_learner import LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY +from ray.rllib.core.learner.learner import ENTROPY_KEY from ray.rllib.core.learner.tf.tf_learner import TfLearner +from ray.rllib.core.rl_module.rl_module import ModuleID from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.annotations import override from ray.rllib.utils.framework import try_import_tf @@ -11,13 +14,9 @@ _, tf, _ = try_import_tf() -class ImpalaTfLearner(TfLearner, ImpalaLearner): +class ImpalaTfLearner(ImpalaLearner, TfLearner): """Implements the IMPALA loss function in tensorflow.""" - def __init__(self, *args, **kwargs): - TfLearner.__init__(self, *args, **kwargs) - ImpalaLearner.__init__(self, *args, **kwargs) - @override(TfLearner) def compute_loss_per_module( self, module_id: str, batch: SampleBatch, fwd_out: Mapping[str, TensorType] @@ -32,26 +31,22 @@ def compute_loss_per_module( behaviour_actions_logp, trajectory_len=self.hps.rollout_frag_or_episode_len, recurrent_seq_len=self.hps.recurrent_seq_len, - drop_last=self.hps.vtrace_drop_last_ts, ) target_actions_logp_time_major = make_time_major( target_actions_logp, trajectory_len=self.hps.rollout_frag_or_episode_len, recurrent_seq_len=self.hps.recurrent_seq_len, - drop_last=self.hps.vtrace_drop_last_ts, ) values_time_major = make_time_major( values, trajectory_len=self.hps.rollout_frag_or_episode_len, recurrent_seq_len=self.hps.recurrent_seq_len, - drop_last=self.hps.vtrace_drop_last_ts, ) bootstrap_value = values_time_major[-1] rewards_time_major = make_time_major( batch[SampleBatch.REWARDS], trajectory_len=self.hps.rollout_frag_or_episode_len, recurrent_seq_len=self.hps.recurrent_seq_len, - drop_last=self.hps.vtrace_drop_last_ts, ) # the discount factor that is used should be gamma except for timesteps where @@ -63,21 +58,21 @@ def compute_loss_per_module( batch[SampleBatch.TERMINATEDS], trajectory_len=self.hps.rollout_frag_or_episode_len, recurrent_seq_len=self.hps.recurrent_seq_len, - drop_last=self.hps.vtrace_drop_last_ts, ), dtype=tf.float32, ) ) * self.hps.discount_factor - # TODO(Artur): See if we should compute v-trace corrected targets on CPU + + # Note that vtrace will compute the main loop on the CPU for better performance. vtrace_adjusted_target_values, pg_advantages = vtrace_tf2( target_action_log_probs=target_actions_logp_time_major, behaviour_action_log_probs=behaviour_actions_logp_time_major, + discounts=discounts_time_major, rewards=rewards_time_major, values=values_time_major, bootstrap_value=bootstrap_value, clip_pg_rho_threshold=self.hps.vtrace_clip_pg_rho_threshold, clip_rho_threshold=self.hps.vtrace_clip_rho_threshold, - discounts=discounts_time_major, ) # Sample size is T x B, where T is the trajectory length and B is the batch size @@ -93,16 +88,35 @@ def compute_loss_per_module( mean_vf_loss = vf_loss / batch_size # The entropy loss. - entropy_loss = -tf.reduce_sum(target_actions_logp_time_major) + mean_entropy_loss = -tf.reduce_mean(target_policy_dist.entropy()) # The summed weighted loss. total_loss = ( pi_loss + vf_loss * self.hps.vf_loss_coeff - + entropy_loss * self.hps.entropy_coeff + + mean_entropy_loss * self.hps.entropy_coeff ) return { self.TOTAL_LOSS_KEY: total_loss, "pi_loss": mean_pi_loss, "vf_loss": mean_vf_loss, + ENTROPY_KEY: -mean_entropy_loss, } + + @override(ImpalaLearner) + def additional_update_per_module( + self, module_id: ModuleID, timestep: int + ) -> Dict[str, Any]: + results = super().additional_update_per_module( + module_id, + timestep=timestep, + ) + + # Update entropy coefficient. + value = self.hps.entropy_coeff + if self.hps.entropy_coeff_schedule is not None: + value = self.entropy_coeff_schedule_per_module[module_id].value(t=timestep) + self.curr_entropy_coeffs_per_module[module_id].assign(value) + results.update({LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY: value}) + + return results diff --git a/rllib/algorithms/impala/tf/impala_tf_policy_rlm.py b/rllib/algorithms/impala/tf/impala_tf_policy_rlm.py index 0244a96c0ac2..f24c6e88c9a1 100644 --- a/rllib/algorithms/impala/tf/impala_tf_policy_rlm.py +++ b/rllib/algorithms/impala/tf/impala_tf_policy_rlm.py @@ -104,15 +104,17 @@ def loss( dtype=tf.float32, ) ) * self.config["gamma"] + + # Note that vtrace will compute the main loop on the CPU for better performance. vtrace_adjusted_target_values, pg_advantages = vtrace_tf2( target_action_log_probs=target_actions_logp_time_major, behaviour_action_log_probs=behaviour_actions_logp_time_major, + discounts=discounts_time_major, rewards=rewards_time_major, values=values_time_major, bootstrap_value=bootstrap_value, clip_pg_rho_threshold=self.config["vtrace_clip_pg_rho_threshold"], clip_rho_threshold=self.config["vtrace_clip_rho_threshold"], - discounts=discounts_time_major, ) # The policy gradients loss. @@ -125,20 +127,20 @@ def loss( mean_vf_loss = 0.5 * tf.reduce_mean(tf.math.pow(delta, 2.0)) # The entropy loss. - entropy_loss = -tf.reduce_sum(target_actions_logp_time_major) + mean_entropy_loss = -tf.reduce_mean(target_policy_dist.entropy()) # The summed weighted loss. total_loss = ( pi_loss + vf_loss * self.config["vf_loss_coeff"] - + entropy_loss * self.entropy_coeff + + mean_entropy_loss * self.entropy_coeff ) self.stats = { "total_loss": total_loss, "pi_loss": mean_pi_loss, "vf_loss": mean_vf_loss, "values": values_time_major, - "entropy_loss": entropy_loss, + "entropy_loss": mean_entropy_loss, "vtrace_adjusted_target_values": vtrace_adjusted_target_values, } return total_loss diff --git a/rllib/algorithms/impala/tf/vtrace_tf_v2.py b/rllib/algorithms/impala/tf/vtrace_tf_v2.py index 5712a191d811..5f878ddbc1c1 100644 --- a/rllib/algorithms/impala/tf/vtrace_tf_v2.py +++ b/rllib/algorithms/impala/tf/vtrace_tf_v2.py @@ -1,11 +1,8 @@ -from typing import List, Union, TYPE_CHECKING +from typing import List, Union from ray.rllib.utils.framework import try_import_tf _, tf, _ = try_import_tf() -if TYPE_CHECKING: - _, tf, _ = try_import_tf() - def make_time_major( tensor: Union["tf.Tensor", List["tf.Tensor"]], @@ -115,28 +112,6 @@ def vtrace_tf2( """ log_rhos = target_action_log_probs - behaviour_action_log_probs - discounts = tf.convert_to_tensor(discounts, dtype=tf.float32) - rewards = tf.convert_to_tensor(rewards, dtype=tf.float32) - values = tf.convert_to_tensor(values, dtype=tf.float32) - bootstrap_value = tf.convert_to_tensor(bootstrap_value, dtype=tf.float32) - if clip_rho_threshold is not None: - clip_rho_threshold = tf.convert_to_tensor(clip_rho_threshold, dtype=tf.float32) - if clip_pg_rho_threshold is not None: - clip_pg_rho_threshold = tf.convert_to_tensor( - clip_pg_rho_threshold, dtype=tf.float32 - ) - - # Make sure tensor ranks are consistent. - rho_rank = log_rhos.shape.ndims # Usually 2. - values.shape.assert_has_rank(rho_rank) - bootstrap_value.shape.assert_has_rank(rho_rank - 1) - discounts.shape.assert_has_rank(rho_rank) - rewards.shape.assert_has_rank(rho_rank) - if clip_rho_threshold is not None: - clip_rho_threshold.shape.assert_has_rank(0) - if clip_pg_rho_threshold is not None: - clip_pg_rho_threshold.shape.assert_has_rank(0) - rhos = tf.math.exp(log_rhos) if clip_rho_threshold is not None: clipped_rhos = tf.minimum(clip_rho_threshold, rhos, name="clipped_rhos") @@ -164,17 +139,18 @@ def scanfunc(acc, sequence_item): discount_t, c_t, delta_t = sequence_item return delta_t + discount_t * c_t * acc - initial_values = tf.zeros_like(bootstrap_value) - vs_minus_v_xs = tf.nest.map_structure( - tf.stop_gradient, - tf.scan( - fn=scanfunc, - elems=sequences, - initializer=initial_values, - parallel_iterations=1, - name="scan", - ), - ) + with tf.device("/cpu:0"): + initial_values = tf.zeros_like(bootstrap_value) + vs_minus_v_xs = tf.nest.map_structure( + tf.stop_gradient, + tf.scan( + fn=scanfunc, + elems=sequences, + initializer=initial_values, + parallel_iterations=1, + name="scan", + ), + ) # Reverse the results back to original order. vs_minus_v_xs = tf.reverse(vs_minus_v_xs, [0]) diff --git a/rllib/algorithms/impala/torch/impala_torch_learner.py b/rllib/algorithms/impala/torch/impala_torch_learner.py index 9160659e8f5b..907c2d4e3261 100644 --- a/rllib/algorithms/impala/torch/impala_torch_learner.py +++ b/rllib/algorithms/impala/torch/impala_torch_learner.py @@ -1,11 +1,14 @@ -from typing import Mapping +from typing import Any, Dict, Mapping from ray.rllib.algorithms.impala.impala_learner import ImpalaLearner from ray.rllib.algorithms.impala.torch.vtrace_torch_v2 import ( vtrace_torch, make_time_major, ) +from ray.rllib.algorithms.ppo.ppo_learner import LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY +from ray.rllib.core.learner.learner import ENTROPY_KEY from ray.rllib.core.learner.torch.torch_learner import TorchLearner +from ray.rllib.core.rl_module.rl_module import ModuleID from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.annotations import override from ray.rllib.utils.framework import try_import_torch @@ -15,13 +18,9 @@ torch, nn = try_import_torch() -class ImpalaTorchLearner(TorchLearner, ImpalaLearner): +class ImpalaTorchLearner(ImpalaLearner, TorchLearner): """Implements the IMPALA loss function in torch.""" - def __init__(self, *args, **kwargs): - TorchLearner.__init__(self, *args, **kwargs) - ImpalaLearner.__init__(self, *args, **kwargs) - @override(TorchLearner) def compute_loss_per_module( self, module_id: str, batch: SampleBatch, fwd_out: Mapping[str, TensorType] @@ -40,26 +39,22 @@ def compute_loss_per_module( target_actions_logp, trajectory_len=self.hps.rollout_frag_or_episode_len, recurrent_seq_len=self.hps.recurrent_seq_len, - drop_last=self.hps.vtrace_drop_last_ts, ) behaviour_actions_logp_time_major = make_time_major( behaviour_actions_logp, trajectory_len=self.hps.rollout_frag_or_episode_len, recurrent_seq_len=self.hps.recurrent_seq_len, - drop_last=self.hps.vtrace_drop_last_ts, ) values_time_major = make_time_major( values, trajectory_len=self.hps.rollout_frag_or_episode_len, recurrent_seq_len=self.hps.recurrent_seq_len, - drop_last=self.hps.vtrace_drop_last_ts, ) bootstrap_value = values_time_major[-1] rewards_time_major = make_time_major( batch[SampleBatch.REWARDS], trajectory_len=self.hps.rollout_frag_or_episode_len, recurrent_seq_len=self.hps.recurrent_seq_len, - drop_last=self.hps.vtrace_drop_last_ts, ) # the discount factor that is used should be gamma except for timesteps where @@ -70,7 +65,6 @@ def compute_loss_per_module( batch[SampleBatch.TERMINATEDS], trajectory_len=self.hps.rollout_frag_or_episode_len, recurrent_seq_len=self.hps.recurrent_seq_len, - drop_last=self.hps.vtrace_drop_last_ts, ).type(dtype=torch.float32) ) * self.hps.discount_factor @@ -78,7 +72,7 @@ def compute_loss_per_module( # dist_class` in the old code torch impala policy? device = behaviour_actions_logp_time_major[0].device - # TODO(Artur): See if we should compute v-trace corrected targets on CPU + # Note that vtrace will compute the main loop on the CPU for better performance. vtrace_adjusted_target_values, pg_advantages = vtrace_torch( target_action_log_probs=target_actions_logp_time_major, behaviour_action_log_probs=behaviour_actions_logp_time_major, @@ -110,16 +104,35 @@ def compute_loss_per_module( mean_vf_loss = vf_loss / batch_size # The entropy loss. - entropy_loss = -torch.sum(target_actions_logp_time_major) + mean_entropy_loss = -torch.mean(target_policy_dist.entropy()) # The summed weighted loss. total_loss = ( pi_loss + vf_loss * self.hps.vf_loss_coeff - + entropy_loss * self.hps.entropy_coeff + + mean_entropy_loss * self.hps.entropy_coeff ) return { self.TOTAL_LOSS_KEY: total_loss, "pi_loss": mean_pi_loss, "vf_loss": mean_vf_loss, + ENTROPY_KEY: -mean_entropy_loss, } + + @override(ImpalaLearner) + def additional_update_per_module( + self, module_id: ModuleID, timestep: int + ) -> Dict[str, Any]: + results = super().additional_update_per_module( + module_id, + timestep=timestep, + ) + + # Update entropy coefficient. + value = self.hps.entropy_coeff + if self.hps.entropy_coeff_schedule is not None: + value = self.entropy_coeff_schedule_per_module[module_id].value(t=timestep) + self.curr_entropy_coeffs_per_module[module_id].data = torch.tensor(value) + results.update({LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY: value}) + + return results diff --git a/rllib/algorithms/impala/torch/impala_torch_policy_rlm.py b/rllib/algorithms/impala/torch/impala_torch_policy_rlm.py index 751b441098a8..6e10a0a23839 100644 --- a/rllib/algorithms/impala/torch/impala_torch_policy_rlm.py +++ b/rllib/algorithms/impala/torch/impala_torch_policy_rlm.py @@ -104,15 +104,17 @@ def loss( drop_last=drop_last, ).type(dtype=torch.float32) ) * self.config["gamma"] + + # Note that vtrace will compute the main loop on the CPU for better performance. vtrace_adjusted_target_values, pg_advantages = vtrace_torch( target_action_log_probs=target_actions_logp_time_major, behaviour_action_log_probs=behaviour_actions_logp_time_major, + discounts=discounts_time_major, rewards=rewards_time_major, values=values_time_major, bootstrap_value=bootstrap_value, clip_pg_rho_threshold=self.config["vtrace_clip_pg_rho_threshold"], clip_rho_threshold=self.config["vtrace_clip_rho_threshold"], - discounts=discounts_time_major, ) # The policy gradients loss. @@ -125,20 +127,20 @@ def loss( mean_vf_loss = 0.5 * torch.mean(torch.pow(delta, 2.0)) # The entropy loss. - entropy_loss = -torch.sum(target_actions_logp_time_major) + mean_entropy_loss = -torch.mean(target_policy_dist.entropy()) # The summed weighted loss. total_loss = ( pi_loss + vf_loss * self.config["vf_loss_coeff"] - + entropy_loss * self.entropy_coeff + + mean_entropy_loss * self.entropy_coeff ) self.stats = { "total_loss": total_loss, "pi_loss": mean_pi_loss, "vf_loss": mean_vf_loss, "values": values_time_major, - "entropy_loss": entropy_loss, + "entropy_loss": mean_entropy_loss, "vtrace_adjusted_target_values": vtrace_adjusted_target_values, } return total_loss diff --git a/rllib/algorithms/impala/torch/vtrace_torch_v2.py b/rllib/algorithms/impala/torch/vtrace_torch_v2.py index 5b67c5772a18..1f4b6f9411fa 100644 --- a/rllib/algorithms/impala/torch/vtrace_torch_v2.py +++ b/rllib/algorithms/impala/torch/vtrace_torch_v2.py @@ -1,6 +1,5 @@ from typing import List, Union from ray.rllib.utils.framework import try_import_torch -from ray.rllib.utils.torch_utils import convert_to_torch_tensor torch, nn = try_import_torch() @@ -113,25 +112,6 @@ def vtrace_torch( on rho_s in \rho_s \delta log \pi(a|x) (r + \gamma v_{s+1} - V(x_s)). """ log_rhos = target_action_log_probs - behaviour_action_log_probs - discounts = convert_to_torch_tensor(discounts) - rewards = convert_to_torch_tensor(rewards) - values = convert_to_torch_tensor(values) - bootstrap_value = convert_to_torch_tensor(bootstrap_value) - if clip_rho_threshold is not None: - clip_rho_threshold = convert_to_torch_tensor(clip_rho_threshold) - if clip_pg_rho_threshold is not None: - clip_pg_rho_threshold = convert_to_torch_tensor(clip_pg_rho_threshold) - - # Make sure tensor ranks are consistent. - rho_rank = log_rhos.dim() # Usually 2. - assert values.dim() == rho_rank - assert bootstrap_value.dim() == rho_rank - 1 - assert discounts.dim() == rho_rank - assert rewards.dim() == rho_rank - if clip_rho_threshold is not None: - assert clip_rho_threshold.dim() == 0 - if clip_pg_rho_threshold is not None: - assert clip_pg_rho_threshold.dim() == 0 rhos = torch.exp(log_rhos) if clip_rho_threshold is not None: @@ -147,11 +127,17 @@ def vtrace_torch( deltas = clipped_rhos * (rewards + discounts * values_t_plus_1 - values) - vs_minus_v_xs = [torch.zeros_like(bootstrap_value)] - for i in reversed(range(len(discounts))): - discount_t, c_t, delta_t = discounts[i], cs[i], deltas[i] - vs_minus_v_xs.append(delta_t + discount_t * c_t * vs_minus_v_xs[-1]) - vs_minus_v_xs = torch.stack(vs_minus_v_xs[1:]) + # Only move the for-loop to CPU. + discounts_cpu = discounts.to("cpu") + cs_cpu = cs.to("cpu") + deltas_cpu = deltas.to("cpu") + vs_minus_v_xs_cpu = [torch.zeros_like(bootstrap_value, device="cpu")] + for i in reversed(range(len(discounts_cpu))): + discount_t, c_t, delta_t = discounts_cpu[i], cs_cpu[i], deltas_cpu[i] + vs_minus_v_xs_cpu.append(delta_t + discount_t * c_t * vs_minus_v_xs_cpu[-1]) + vs_minus_v_xs_cpu = torch.stack(vs_minus_v_xs_cpu[1:]) + # Move results back to GPU - if applicable. + vs_minus_v_xs = vs_minus_v_xs_cpu.to(deltas.device) # Reverse the results back to original order. vs_minus_v_xs = torch.flip(vs_minus_v_xs, dims=[0]) diff --git a/rllib/algorithms/ppo/ppo.py b/rllib/algorithms/ppo/ppo.py index 034c224362e5..89481a835257 100644 --- a/rllib/algorithms/ppo/ppo.py +++ b/rllib/algorithms/ppo/ppo.py @@ -18,7 +18,10 @@ from ray.rllib.algorithms.algorithm_config import AlgorithmConfig, NotProvided from ray.rllib.algorithms.pg import PGConfig from ray.rllib.algorithms.ppo.ppo_catalog import PPOCatalog -from ray.rllib.algorithms.ppo.ppo_learner import PPOLearnerHyperparameters +from ray.rllib.algorithms.ppo.ppo_learner import ( + PPOLearnerHyperparameters, + LEARNER_RESULTS_KL_KEY, +) from ray.rllib.core.rl_module.rl_module import SingleAgentRLModuleSpec from ray.rllib.execution.rollout_ops import ( standardize_fields, @@ -247,6 +250,7 @@ def training( # Pass kwargs onto super's `training()` method. super().training(**kwargs) + # TODO (sven): Move to generic AlgorithmConfig. if lr_schedule is not NotProvided: self.lr_schedule = lr_schedule if use_critic is not NotProvided: @@ -436,12 +440,12 @@ def training_step(self) -> ResultDict: train_results = multi_gpu_train_one_step(self, train_batch) if self.config._enable_learner_api: - # the train results's loss keys are pids to their loss values. But we also + # The train results's loss keys are pids to their loss values. But we also # return a total_loss key at the same level as the pid keys. So we need to # subtract that to get the total set of pids to update. # TODO (Kourosh): We should also not be using train_results as a message - # passing medium to infer whcih policies to update. We could use - # policies_to_train variable that is given by the user to infer this. + # passing medium to infer which policies to update. We could use + # policies_to_train variable that is given by the user to infer this. policies_to_update = set(train_results.keys()) - {ALL_MODULES} else: policies_to_update = list(train_results.keys()) @@ -475,18 +479,17 @@ def training_step(self) -> ResultDict: if self.config._enable_learner_api: kl_dict = { - # TODO (Kourosh): Train results don't match the old format. The thing - # that used to be under `kl` is now under `mean_kl_loss`. Fix this. Do - # we need get here? - pid: train_results[pid][LEARNER_STATS_KEY].get("kl") + pid: train_results[pid][LEARNER_STATS_KEY][LEARNER_RESULTS_KL_KEY] for pid in policies_to_update } # triggers a special update method on RLOptimizer to update the KL values. - self.learner_group.additional_update( + additional_results = self.learner_group.additional_update( module_ids_to_update=policies_to_update, sampled_kl_values=kl_dict, timestep=self._counters[NUM_AGENT_STEPS_SAMPLED], ) + for pid, res in additional_results.items(): + train_results[pid].update(res) return train_results diff --git a/rllib/algorithms/ppo/ppo_learner.py b/rllib/algorithms/ppo/ppo_learner.py index dd1972e8aa6a..576471b285ed 100644 --- a/rllib/algorithms/ppo/ppo_learner.py +++ b/rllib/algorithms/ppo/ppo_learner.py @@ -1,12 +1,18 @@ +from collections import defaultdict from dataclasses import dataclass -from typing import Any, Mapping, List, Optional, Union +from typing import List, Optional, Union -import abc from ray.rllib.core.learner.learner import LearnerHyperparameters -from ray.rllib.core.rl_module.rl_module import ModuleID from ray.rllib.core.learner.learner import Learner from ray.rllib.utils.annotations import override -from ray.rllib.utils.typing import TensorType +from ray.rllib.utils.schedules.piecewise_schedule import PiecewiseSchedule + + +LEARNER_RESULTS_VF_LOSS_UNCLIPPED_KEY = "vf_loss_unclipped" +LEARNER_RESULTS_VF_EXPLAINED_VAR_KEY = "vf_explained_var" +LEARNER_RESULTS_KL_KEY = "mean_kl_loss" +LEARNER_RESULTS_CURR_KL_COEFF_KEY = "curr_kl_coeff" +LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY = "curr_entropy_coeff" @dataclass @@ -25,82 +31,43 @@ class to configure your algorithm. clip_param: float = None vf_clip_param: float = None entropy_coeff: float = None + entropy_coeff_schedule: Optional[List[List[Union[int, float]]]] = None vf_loss_coeff: float = None - # Experimental placeholder for things that could be part of the base - # LearnerHyperparameters. + # TODO: Move to base LearnerHyperparameter class (and handling of this setting + # into base Learners). lr_schedule: Optional[List[List[Union[int, float]]]] = None - entropy_coeff_schedule: Optional[List[List[Union[int, float]]]] = None class PPOLearner(Learner): + @override(Learner) def build(self) -> None: super().build() - # TODO (Kourosh): Move these failures to config.validate() or support them. + # Build entropy coeff scheduling tools. self.entropy_coeff_scheduler = None if self.hps.entropy_coeff_schedule: - raise ValueError("entropy_coeff_schedule is not supported in Learner yet") - - # TODO (Kourosh): This needs to be native tensor variable to be traced. - # self.entropy_coeff = self.hps.entropy_coeff - - # TODO (Kourosh): Create a way on the base class for users to define arbitrary - # schedulers for learning rates. - self.lr_scheduler = None - if self.hps.lr_schedule: - raise ValueError("lr_schedule is not supported in Learner yet") - - # We need to make sure that the kl_coeff is a framework tensor that is - # registered as part of the graph so that upon update the graph can be updated - # (e.g. in TF with eager tracing). - self.curr_kl_coeff_val = self.hps.kl_coeff - self.curr_kl_coeff = self._get_kl_variable(self.hps.kl_coeff) - - @override(Learner) - def additional_update_per_module( - self, module_id: ModuleID, sampled_kl_values: dict, timestep: int - ) -> Mapping[str, Any]: - assert sampled_kl_values, "Sampled KL values are empty." - - sampled_kl = sampled_kl_values[module_id] - if sampled_kl > 2.0 * self.hps.kl_target: - # TODO (Kourosh) why not 2? - self.curr_kl_coeff_val *= 1.5 - elif sampled_kl < 0.5 * self.hps.kl_target: - self.curr_kl_coeff_val *= 0.5 - - self._set_kl_coeff(self.curr_kl_coeff_val) - results = {"kl_coeff": self.curr_kl_coeff_val} - - # TODO (Kourosh): We may want to index into the schedulers to get the right one - # for this module. - if self.entropy_coeff_scheduler is not None: - self.entropy_coeff_scheduler.update(timestep) - - if self.lr_scheduler is not None: - self.lr_scheduler.update(timestep) - - return results - - @abc.abstractmethod - def _get_kl_variable(self, value: float) -> TensorType: - """Returns the kl_coeff (framework specific) tensor variable. - - This is a framework specific method that should be implemented by the - framework specific sub-class. - - Args: - value: The initial value for the kl_coeff variable. - """ - - @abc.abstractmethod - def _set_kl_coeff(self, value: float) -> None: - """Sets the value of the kl_coeff variable. - - This is a framework specific method that should be implemented by the - framework specific sub-class. - - Args: - value: The new value for the kl_coeff variable. - """ + # Custom schedule, based on list of + # ([ts], [value to be reached by ts])-tuples. + self.entropy_coeff_schedule_per_module = defaultdict( + lambda: PiecewiseSchedule( + self.hps.entropy_coeff_schedule, + outside_value=self.hps.entropy_coeff_schedule[-1][-1], + framework=None, + ) + ) + self.curr_entropy_coeffs_per_module = defaultdict( + lambda: self._get_tensor_variable(self.hps.entropy_coeff) + ) + # If no schedule, pin entropy coeff to its given (fixed) value. + else: + self.curr_entropy_coeffs_per_module = defaultdict( + lambda: self.hps.entropy_coeff + ) + + # Set up KL coefficient variables (per module). + # Note that the KL coeff is not controlled by a schedul, but seeks + # to stay close to a given kl_target value. + self.curr_kl_coeffs_per_module = defaultdict( + lambda: self._get_tensor_variable(self.hps.kl_coeff) + ) diff --git a/rllib/algorithms/ppo/tests/test_ppo_learner.py b/rllib/algorithms/ppo/tests/test_ppo_learner.py index 40aa98389539..ceb726e7deac 100644 --- a/rllib/algorithms/ppo/tests/test_ppo_learner.py +++ b/rllib/algorithms/ppo/tests/test_ppo_learner.py @@ -12,7 +12,6 @@ from ray.rllib.core.rl_module.rl_module import SingleAgentRLModuleSpec from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.test_utils import check, framework_iterator -from ray.rllib.utils.metrics import ALL_MODULES from ray.rllib.evaluation.postprocessing import ( compute_gae_for_sample_batch, @@ -92,8 +91,6 @@ def test_loss(self): lambda x: tf.convert_to_tensor(x), train_batch ) - policy_loss = policy.loss(policy.model, policy.dist_class, train_batch) - algo_config = config.copy(copy_frozen=False) algo_config.training(_enable_learner_api=True) algo_config.validate() @@ -110,13 +107,11 @@ def test_loss(self): ) learner_group = learner_group_config.build() - # load the algo weights onto the learner_group + # Load the algo weights onto the learner_group. learner_group.set_weights(algo.get_weights()) - results = learner_group.update(train_batch.as_multi_agent()) - - learner_group_loss = results[ALL_MODULES]["total_loss"] + learner_group.update(train_batch.as_multi_agent()) - check(learner_group_loss, policy_loss) + algo.stop() def test_save_load_state(self): """Tests saving and loading the state of the PPO Learner Group.""" @@ -142,7 +137,7 @@ def test_save_load_state(self): algo = config.build() policy = algo.get_policy() - for fw in framework_iterator(config, ("tf2", "torch"), with_eager_tracing=True): + for _ in framework_iterator(config, ("tf2", "torch"), with_eager_tracing=True): algo_config = config.copy(copy_frozen=False) algo_config.validate() algo_config.freeze() diff --git a/rllib/algorithms/ppo/tests/test_ppo_rl_module.py b/rllib/algorithms/ppo/tests/test_ppo_rl_module.py index 3c861d6aa8ed..678cc02b8af8 100644 --- a/rllib/algorithms/ppo/tests/test_ppo_rl_module.py +++ b/rllib/algorithms/ppo/tests/test_ppo_rl_module.py @@ -15,6 +15,7 @@ from ray.rllib.algorithms.ppo.torch.ppo_torch_rl_module import ( PPOTorchRLModule, ) +from ray.rllib.core.models.base import STATE_IN from ray.rllib.core.rl_module.rl_module import RLModuleConfig from ray.rllib.models.preprocessors import get_preprocessor from ray.rllib.utils.numpy import convert_to_numpy @@ -105,9 +106,13 @@ def _get_input_batch_from_obs(framework, obs): if framework == "torch": batch = { SampleBatch.OBS: convert_to_torch_tensor(obs)[None], + STATE_IN: None, } else: - batch = {SampleBatch.OBS: tf.convert_to_tensor([obs])} + batch = { + SampleBatch.OBS: tf.convert_to_tensor([obs]), + STATE_IN: None, + } return batch @@ -133,9 +138,6 @@ def test_rollouts(self): if lstm and fw == "tf2": # LSTM not implemented in TF2 yet continue - if env_name == "ALE/Breakout-v5" and fw == "tf2": - # TODO(Artur): Implement CNN in TF2. - continue print(f"[FW={fw} | [ENV={env_name}] | [FWD={fwd_fn}] | LSTM" f"={lstm}") if env_name.startswith("ALE/"): env = gym.make("GymV26Environment-v0", env_id=env_name) @@ -181,9 +183,6 @@ def test_forward_train(self): if lstm and fw == "tf2": # LSTM not implemented in TF2 yet continue - if env_name == "ALE/Breakout-v5" and fw == "tf2": - # TODO(Artur): Implement CNN in TF2. - continue print(f"[FW={fw} | [ENV={env_name}] | LSTM={lstm}") # TODO(Artur): Figure out why this is needed and fix it. if env_name.startswith("ALE/"): @@ -233,6 +232,7 @@ def test_forward_train(self): SampleBatch.REWARDS: np.array(reward), SampleBatch.TERMINATEDS: np.array(terminated), SampleBatch.TRUNCATEDS: np.array(truncated), + STATE_IN: None, } # TODO (Artur): Un-uncomment once Policy supports RNN diff --git a/rllib/algorithms/ppo/tests/test_ppo_with_rl_module.py b/rllib/algorithms/ppo/tests/test_ppo_with_rl_module.py index 62853861b65a..3f2fc1d007b1 100644 --- a/rllib/algorithms/ppo/tests/test_ppo_with_rl_module.py +++ b/rllib/algorithms/ppo/tests/test_ppo_with_rl_module.py @@ -4,13 +4,19 @@ import ray import ray.rllib.algorithms.ppo as ppo +from ray.rllib.algorithms.ppo.ppo_learner import ( + LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY, +) from ray.rllib.algorithms.callbacks import DefaultCallbacks from ray.rllib.algorithms.ppo.tests.test_ppo import PENDULUM_FAKE_BATCH +from ray.rllib.core.learner.learner import ( + LEARNER_RESULTS_CURR_LR_KEY, +) from ray.rllib.evaluation.postprocessing import ( compute_gae_for_sample_batch, ) from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID -from ray.rllib.utils.metrics.learner_info import LEARNER_INFO, LEARNER_STATS_KEY +from ray.rllib.utils.metrics.learner_info import LEARNER_INFO from ray.rllib.utils.test_utils import ( check, check_compute_single_action, @@ -47,36 +53,27 @@ def get_model_config(framework, lstm=False): class MyCallbacks(DefaultCallbacks): - @staticmethod - def _check_lr_torch(policy, policy_id): - for j, opt in enumerate(policy._optimizers): - for p in opt.param_groups: - assert p["lr"] == policy.cur_lr, "LR scheduling error!" - - @staticmethod - def _check_lr_tf(policy, policy_id): - lr = policy.cur_lr - sess = policy.get_session() - if sess: - lr = sess.run(lr) - optim_lr = sess.run(policy._optimizer._lr) - else: - lr = lr.numpy() - optim_lr = policy._optimizer.lr.numpy() - assert lr == optim_lr, "LR scheduling error!" - def on_train_result(self, *, algorithm, result: dict, **kwargs): - stats = result["info"][LEARNER_INFO][DEFAULT_POLICY_ID][LEARNER_STATS_KEY] - # Learning rate should go to 0 after 1 iter. - check(stats["cur_lr"], 5e-5 if algorithm.iteration == 1 else 0.0) + stats = result["info"][LEARNER_INFO][DEFAULT_POLICY_ID] # Entropy coeff goes to 0.05, then 0.0 (per iter). - check(stats["entropy_coeff"], 0.1 if algorithm.iteration == 1 else 0.05) + check( + stats[LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY], + 0.05 if algorithm.iteration == 1 else 0.0, + ) - algorithm.workers.foreach_policy( - self._check_lr_torch + # Learning rate should decrease by 0.0001 per iteration. + check( + stats[LEARNER_RESULTS_CURR_LR_KEY], + 0.0003 if algorithm.iteration == 1 else 0.0002, + ) + # Compare reported curr lr vs the actual lr found in the optimizer object. + optim = algorithm.learner_group._learner._named_optimizers[DEFAULT_POLICY_ID] + actual_optimizer_lr = ( + optim.param_groups[0]["lr"] if algorithm.config.framework_str == "torch" - else self._check_lr_tf + else optim.lr ) + check(stats[LEARNER_RESULTS_CURR_LR_KEY], actual_optimizer_lr) class TestPPO(unittest.TestCase): @@ -96,26 +93,24 @@ def test_ppo_compilation_and_schedule_mixins(self): ppo.PPOConfig() .training( num_sgd_iter=2, - # Setup lr schedule for testing. - lr_schedule=[[0, 5e-5], [128, 0.0]], + # Setup lr schedule for testing lr-scheduling correctness. + lr_schedule=[[0, 0.0004], [512, 0.0]], # 512=4x128 # Set entropy_coeff to a faulty value to proof that it'll get # overridden by the schedule below (which is expected). entropy_coeff=100.0, - entropy_coeff_schedule=[[0, 0.1], [256, 0.0]], + entropy_coeff_schedule=[[0, 0.1], [256, 0.0]], # 256=2x128 train_batch_size=128, - # TODO (Kourosh): Enable when the scheduler is supported in the new - # Learner API stack. - _enable_learner_api=False, + _enable_learner_api=True, ) .rollouts( num_rollout_workers=1, # Test with compression. - compress_observations=True, + # compress_observations=True, enable_connectors=True, ) .callbacks(MyCallbacks) .rl_module(_enable_rl_module_api=True) - ) # For checking lr-schedule correctness. + ) num_iterations = 2 @@ -124,9 +119,6 @@ def test_ppo_compilation_and_schedule_mixins(self): ): # TODO (Kourosh) Bring back "FrozenLake-v1" for env in ["CartPole-v1", "Pendulum-v1", "ALE/Breakout-v5"]: - if env == "ALE/Breakout-v5" and fw == "tf2": - # TODO(Artur): Implement CNN in TF2. - continue print("Env={}".format(env)) # TODO (Kourosh, Avnishn): for now just do lstm=False for lstm in [False]: @@ -134,10 +126,13 @@ def test_ppo_compilation_and_schedule_mixins(self): config.training(model=get_model_config(fw, lstm=lstm)) algo = config.build(env=env) - policy = algo.get_policy() + optim = algo.learner_group._learner._named_optimizers[ + DEFAULT_POLICY_ID + ] entropy_coeff = algo.get_policy().entropy_coeff - lr = policy.cur_lr + lr = optim.param_groups[0]["lr"] if fw == "torch" else optim.lr check(entropy_coeff, 0.1) + # Check initial LR directly set in optimizer. check(lr, config.lr) for i in range(num_iterations): @@ -167,7 +162,7 @@ def test_ppo_exploration_setup(self): ) obs = np.array(0) - for fw in framework_iterator( + for _ in framework_iterator( config, frameworks=("torch", "tf2"), with_eager_tracing=True ): # Default Agent should be setup with StochasticSampling. @@ -220,8 +215,7 @@ def test_ppo_free_log_std_with_rl_modules(self): .training(_enable_learner_api=True) ) - # TODO(Artur): Enable this test for tf2 once we support CNNs - for fw in framework_iterator(config, frameworks=["tf2", "torch"]): + for fw in framework_iterator(config, frameworks=("torch", "tf2")): trainer = config.build() policy = trainer.get_policy() diff --git a/rllib/algorithms/ppo/tf/ppo_tf_learner.py b/rllib/algorithms/ppo/tf/ppo_tf_learner.py index cd6382ec750b..e65f65868825 100644 --- a/rllib/algorithms/ppo/tf/ppo_tf_learner.py +++ b/rllib/algorithms/ppo/tf/ppo_tf_learner.py @@ -1,8 +1,17 @@ import logging -from typing import Any, Mapping - -from ray.rllib.algorithms.ppo.ppo_learner import PPOLearner +from typing import Any, Dict, Mapping + +from ray.rllib.algorithms.ppo.ppo_learner import ( + LEARNER_RESULTS_KL_KEY, + LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY, + LEARNER_RESULTS_CURR_KL_COEFF_KEY, + LEARNER_RESULTS_VF_EXPLAINED_VAR_KEY, + LEARNER_RESULTS_VF_LOSS_UNCLIPPED_KEY, + PPOLearner, +) +from ray.rllib.core.learner.learner import POLICY_LOSS_KEY, VF_LOSS_KEY, ENTROPY_KEY from ray.rllib.core.learner.tf.tf_learner import TfLearner +from ray.rllib.core.rl_module.rl_module import ModuleID from ray.rllib.evaluation.postprocessing import Postprocessing from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.framework import try_import_tf @@ -88,32 +97,53 @@ def compute_loss_per_module( total_loss = tf.reduce_mean( -surrogate_loss + self.hps.vf_loss_coeff * vf_loss_clipped - - self.hps.entropy_coeff * curr_entropy + - self.curr_entropy_coeffs_per_module[module_id] * curr_entropy ) # Add mean_kl_loss (already processed through `reduce_mean_valid`), # if necessary. if self.hps.kl_coeff > 0.0: - total_loss += self.curr_kl_coeff * mean_kl_loss + total_loss += self.curr_kl_coeffs_per_module[module_id] * mean_kl_loss return { self.TOTAL_LOSS_KEY: total_loss, - "policy_loss": -tf.reduce_mean(surrogate_loss), - "vf_loss": mean_vf_loss, - "unclipped_vf_loss": mean_vf_unclipped_loss, - "vf_explained_var": explained_variance( + POLICY_LOSS_KEY: -tf.reduce_mean(surrogate_loss), + VF_LOSS_KEY: mean_vf_loss, + LEARNER_RESULTS_VF_LOSS_UNCLIPPED_KEY: mean_vf_unclipped_loss, + LEARNER_RESULTS_VF_EXPLAINED_VAR_KEY: explained_variance( batch[Postprocessing.VALUE_TARGETS], value_fn_out ), - "entropy": mean_entropy, - "kl": mean_kl_loss, - "entropy_coeff": self.hps.entropy_coeff, - "cur_kl_coeff": self.curr_kl_coeff, + ENTROPY_KEY: mean_entropy, + LEARNER_RESULTS_KL_KEY: mean_kl_loss, } @override(PPOLearner) - def _get_kl_variable(self, value: float) -> Any: - return tf.Variable(value, trainable=False, dtype=tf.float32) + def additional_update_per_module( + self, module_id: ModuleID, sampled_kl_values: dict, timestep: int + ) -> Dict[str, Any]: + assert sampled_kl_values, "Sampled KL values are empty." + + results = super().additional_update_per_module( + module_id, + sampled_kl_values=sampled_kl_values, + timestep=timestep, + ) - @override(PPOLearner) - def _set_kl_coeff(self, value: float) -> None: - self.curr_kl_coeff.assign(value) + # Update KL coefficient. + sampled_kl = sampled_kl_values[module_id] + curr_var = self.curr_kl_coeffs_per_module[module_id] + if sampled_kl > 2.0 * self.hps.kl_target: + # TODO (Kourosh) why not 2? + curr_var.assign(curr_var * 1.5) + elif sampled_kl < 0.5 * self.hps.kl_target: + curr_var.assign(curr_var * 0.5) + results.update({LEARNER_RESULTS_CURR_KL_COEFF_KEY: curr_var.numpy()}) + + # Update entropy coefficient. + value = self.hps.entropy_coeff + if self.hps.entropy_coeff_schedule is not None: + value = self.entropy_coeff_schedule_per_module[module_id].value(t=timestep) + self.curr_entropy_coeffs_per_module[module_id].assign(value) + results.update({LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY: value}) + + return results diff --git a/rllib/algorithms/ppo/tf/ppo_tf_policy_rlm.py b/rllib/algorithms/ppo/tf/ppo_tf_policy_rlm.py index 66aa0e408d3a..c99a17ad840a 100644 --- a/rllib/algorithms/ppo/tf/ppo_tf_policy_rlm.py +++ b/rllib/algorithms/ppo/tf/ppo_tf_policy_rlm.py @@ -16,7 +16,7 @@ from ray.rllib.policy.eager_tf_policy_v2 import EagerTFPolicyV2 from ray.rllib.utils.annotations import override from ray.rllib.utils.framework import try_import_tf - +from ray.rllib.utils.nested_dict import NestedDict from ray.rllib.utils.tf_utils import ( explained_variance, warn_if_infinite_kl_divergence, @@ -81,6 +81,8 @@ def loss( train_batch: SampleBatch, ) -> Union[TensorType, List[TensorType]]: + if not isinstance(train_batch, NestedDict): + train_batch = NestedDict(train_batch) fwd_out = model.forward_train(train_batch) curr_action_dist = fwd_out[SampleBatch.ACTION_DIST] diff --git a/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py b/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py index 5538a9450825..b70aeb5a3ebb 100644 --- a/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py +++ b/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py @@ -1,7 +1,7 @@ from typing import Mapping, Any from ray.rllib.algorithms.ppo.ppo_base_rl_module import PPORLModuleBase -from ray.rllib.core.models.base import ACTOR, CRITIC +from ray.rllib.core.models.base import ACTOR, CRITIC, STATE_IN from ray.rllib.core.models.tf.encoder import ENCODER_OUT from ray.rllib.core.rl_module.rl_module import RLModule from ray.rllib.core.rl_module.tf.tf_rl_module import TfRLModule @@ -33,14 +33,15 @@ def _forward_inference(self, batch: NestedDict) -> Mapping[str, Any]: output = {} # TODO (Artur): Remove this once Policy supports RNN - # if self.encoder.config.shared: - # batch[STATE_IN] = None - # else: - # batch[STATE_IN] = { - # ACTOR: None, - # CRITIC: None, - # } - # batch[SampleBatch.SEQ_LENS] = None + batch = batch.copy() + if self.encoder.config.shared: + batch[STATE_IN] = None + else: + batch[STATE_IN] = { + ACTOR: None, + CRITIC: None, + } + batch[SampleBatch.SEQ_LENS] = None encoder_outs = self.encoder(batch) # TODO (Artur): Un-uncomment once Policy supports RNN @@ -64,14 +65,15 @@ def _forward_exploration(self, batch: NestedDict) -> Mapping[str, Any]: output = {} # TODO (Artur): Remove this once Policy supports RNN - # if self.encoder.config.shared: - # batch[STATE_IN] = None - # else: - # batch[STATE_IN] = { - # ACTOR: None, - # CRITIC: None, - # } - # batch[SampleBatch.SEQ_LENS] = None + batch = batch.copy() + if self.encoder.config.shared: + batch[STATE_IN] = None + else: + batch[STATE_IN] = { + ACTOR: None, + CRITIC: None, + } + batch[SampleBatch.SEQ_LENS] = None # Shared encoder encoder_outs = self.encoder(batch) @@ -97,14 +99,15 @@ def _forward_train(self, batch: NestedDict): output = {} # TODO (Artur): Remove this once Policy supports RNN - # if self.encoder.config.shared: - # batch[STATE_IN] = None - # else: - # batch[STATE_IN] = { - # ACTOR: None, - # CRITIC: None, - # } - # batch[SampleBatch.SEQ_LENS] = None + batch = batch.copy() + if self.encoder.config.shared: + batch[STATE_IN] = None + else: + batch[STATE_IN] = { + ACTOR: None, + CRITIC: None, + } + batch[SampleBatch.SEQ_LENS] = None # Shared encoder encoder_outs = self.encoder(batch) diff --git a/rllib/algorithms/ppo/torch/ppo_torch_learner.py b/rllib/algorithms/ppo/torch/ppo_torch_learner.py index 9851e8f65a89..675539f50f5c 100644 --- a/rllib/algorithms/ppo/torch/ppo_torch_learner.py +++ b/rllib/algorithms/ppo/torch/ppo_torch_learner.py @@ -1,8 +1,17 @@ import logging -from typing import Any, Mapping - -from ray.rllib.algorithms.ppo.ppo_learner import PPOLearner +from typing import Any, Dict, Mapping + +from ray.rllib.algorithms.ppo.ppo_learner import ( + LEARNER_RESULTS_KL_KEY, + LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY, + LEARNER_RESULTS_CURR_KL_COEFF_KEY, + LEARNER_RESULTS_VF_EXPLAINED_VAR_KEY, + LEARNER_RESULTS_VF_LOSS_UNCLIPPED_KEY, + PPOLearner, +) +from ray.rllib.core.learner.learner import POLICY_LOSS_KEY, VF_LOSS_KEY, ENTROPY_KEY from ray.rllib.core.learner.torch.torch_learner import TorchLearner +from ray.rllib.core.rl_module.rl_module import ModuleID from ray.rllib.evaluation.postprocessing import Postprocessing from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.framework import try_import_torch @@ -84,41 +93,53 @@ def compute_loss_per_module( total_loss = torch.mean( -surrogate_loss + self.hps.vf_loss_coeff * vf_loss_clipped - - self.hps.entropy_coeff * curr_entropy + - self.curr_entropy_coeffs_per_module[module_id] * curr_entropy ) # Add mean_kl_loss (already processed through `reduce_mean_valid`), # if necessary. if self.hps.kl_coeff > 0.0: - total_loss += self.curr_kl_coeff * mean_kl_loss + total_loss += self.curr_kl_coeffs_per_module[module_id] * mean_kl_loss return { self.TOTAL_LOSS_KEY: total_loss, - "policy_loss": -torch.mean(surrogate_loss), - "vf_loss": mean_vf_loss, - "unclipped_vf_loss": mean_vf_unclipped_loss, - "vf_explained_var": explained_variance( + POLICY_LOSS_KEY: -torch.mean(surrogate_loss), + VF_LOSS_KEY: mean_vf_loss, + LEARNER_RESULTS_VF_LOSS_UNCLIPPED_KEY: mean_vf_unclipped_loss, + LEARNER_RESULTS_VF_EXPLAINED_VAR_KEY: explained_variance( batch[Postprocessing.VALUE_TARGETS], value_fn_out ), - "entropy": mean_entropy, - "kl": mean_kl_loss, - "entropy_coeff": self.hps.entropy_coeff, - "cur_kl_coeff": self.curr_kl_coeff, + ENTROPY_KEY: mean_entropy, + LEARNER_RESULTS_KL_KEY: mean_kl_loss, } @override(PPOLearner) - def _get_kl_variable(self, value: float) -> Any: - return torch.tensor( - value, - requires_grad=False, - device=self._device, - dtype=torch.float32, + def additional_update_per_module( + self, module_id: ModuleID, sampled_kl_values: dict, timestep: int + ) -> Dict[str, Any]: + assert sampled_kl_values, "Sampled KL values are empty." + + results = super().additional_update_per_module( + module_id, + sampled_kl_values=sampled_kl_values, + timestep=timestep, ) - @override(PPOLearner) - def _set_kl_coeff(self, value: float): - self.curr_kl_coeff.data = torch.tensor( - value, - dtype=torch.float32, - device=self.curr_kl_coeff.device, - ) + # Update KL coefficient. + sampled_kl = sampled_kl_values[module_id] + curr_var = self.curr_kl_coeffs_per_module[module_id] + if sampled_kl > 2.0 * self.hps.kl_target: + # TODO (Kourosh) why not 2? + curr_var.data *= 1.5 + elif sampled_kl < 0.5 * self.hps.kl_target: + curr_var.data *= 0.5 + results.update({LEARNER_RESULTS_CURR_KL_COEFF_KEY: curr_var.item()}) + + # Update entropy coefficient. + value = self.hps.entropy_coeff + if self.hps.entropy_coeff_schedule is not None: + value = self.entropy_coeff_schedule_per_module[module_id].value(t=timestep) + self.curr_entropy_coeffs_per_module[module_id].data = torch.tensor(value) + results.update({LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY: value}) + + return results diff --git a/rllib/core/learner/learner.py b/rllib/core/learner/learner.py index 1d8ac7659c3a..688d1574d034 100644 --- a/rllib/core/learner/learner.py +++ b/rllib/core/learner/learner.py @@ -47,6 +47,7 @@ OverrideToImplementCustomLogic, OverrideToImplementCustomLogic_CallToSuperRecommended, ) +from ray.rllib.utils.schedules.piecewise_schedule import PiecewiseSchedule torch, _ = try_import_torch() tf1, tf, tfv = try_import_tf() @@ -66,6 +67,9 @@ VF_LOSS_KEY = "vf_loss" ENTROPY_KEY = "entropy" +# Additional update keys +LEARNER_RESULTS_CURR_LR_KEY = "curr_lr" + @dataclass class FrameworkHyperparameters: @@ -98,7 +102,10 @@ class LearnerHyperparameters: respective AlgorithmConfig class. """ - pass + # TODO (Sven): Move lr from - currently - optimizer config to only exist here. + # lr: float = None + + lr_schedule: Optional[List[List[Union[int, float]]]] = None class Learner: @@ -614,6 +621,26 @@ def build(self) -> None: logger.debug("Learner already built. Skipping build.") return self._is_built = True + + # Generic LR scheduling tools. + self.lr_scheduler = None + if self.hps.lr_schedule is not None: + # Custom schedule, based on list of + # ([ts], [value to be reached by ts])-tuples. + self.lr_schedule_per_module = defaultdict( + lambda: PiecewiseSchedule( + self.hps.lr_schedule, + outside_value=self.hps.lr_schedule[-1][-1], + framework=None, + ) + ) + self.curr_lr_per_module = defaultdict( + lambda: self._get_tensor_variable(self._optimizer_config["lr"]) + ) + # If no schedule, pin learning rate to its given (fixed) value. + else: + self.curr_lr_per_module = defaultdict(lambda: self._optimizer_config["lr"]) + self._module = self._make_module() for param_seq, optimizer in self.configure_optimizers(): self._optimizer_parameters[optimizer] = [] @@ -749,7 +776,7 @@ def additional_update_per_module(self, module_id: ModuleID, tau: float): @OverrideToImplementCustomLogic def additional_update_per_module( self, module_id: ModuleID, **kwargs - ) -> Mapping[str, Any]: + ) -> Dict[str, Any]: """Apply additional non-gradient based updates for a single module. See `additional_update` for more details. @@ -761,7 +788,7 @@ def additional_update_per_module( Returns: A dictionary of results from the update """ - raise NotImplementedError + return {} @OverrideToImplementCustomLogic def postprocess_gradients( @@ -1101,6 +1128,26 @@ def _reset(self): def apply(self, func, *_args, **_kwargs): return func(self, *_args, **_kwargs) + @abc.abstractmethod + def _get_tensor_variable( + self, + value: Any, + dtype: Any = None, + trainable: bool = False, + ) -> TensorType: + """Returns a framework-specific tensor variable with the initial given value. + + This is a framework specific method that should be implemented by the + framework specific sub-class. + + Args: + value: The initial value for the tensor variable variable. + + Returns: + The framework specific tensor variable of the given initial value, + dtype and trainable/requires_grad property. + """ + @dataclass class LearnerSpec: diff --git a/rllib/core/learner/learner_group.py b/rllib/core/learner/learner_group.py index ed605b894566..cbb6870a72fd 100644 --- a/rllib/core/learner/learner_group.py +++ b/rllib/core/learner/learner_group.py @@ -101,6 +101,9 @@ def __init__( self._is_module_trainable = _is_module_trainable + # How many timesteps had to be dropped due to a full input queue? + self._in_queue_ts_dropped = 0 + if self._is_local: self._learner = learner_class(**learner_spec.get_params_dict()) self._learner.build() @@ -133,14 +136,12 @@ def __init__( ) self._in_queue = deque(maxlen=max_queue_len) - @property - def in_queue_size(self) -> int: - """Returns the number of batches currently in the in queue to be processed. - - If the queue is reaching its max size, then this learner group likely needs - more workers to process incoming batches. - """ - return len(self._in_queue) + def get_in_queue_stats(self) -> Mapping[str, Any]: + """Returns the current stats for the input queue for this learner group.""" + return { + "learner_group_queue_size": len(self._in_queue), + "learner_group_queue_ts_dropped": self._in_queue_ts_dropped, + } @property def is_local(self) -> bool: @@ -303,7 +304,7 @@ def additional_update( return self._learner.additional_update(**kwargs) else: results = self._worker_manager.foreach_actor( - [lambda w: w.additional_update(**kwargs) for worker in self._workers] + [lambda w: w.additional_update(**kwargs) for _ in self._workers] ) results = self._get_results(results) if reduce_fn is None: diff --git a/rllib/core/learner/tf/tf_learner.py b/rllib/core/learner/tf/tf_learner.py index 55a5eb05abaf..45f98d4f7520 100644 --- a/rllib/core/learner/tf/tf_learner.py +++ b/rllib/core/learner/tf/tf_learner.py @@ -16,6 +16,7 @@ from ray.rllib.core.learner.learner import ( FrameworkHyperparameters, Learner, + LEARNER_RESULTS_CURR_LR_KEY, ParamOptimizerPair, NamedParamOptimizerPairs, ParamType, @@ -28,7 +29,10 @@ ) from ray.rllib.core.rl_module.tf.tf_rl_module import TfRLModule from ray.rllib.policy.sample_batch import MultiAgentBatch -from ray.rllib.utils.annotations import override +from ray.rllib.utils.annotations import ( + override, + OverrideToImplementCustomLogic_CallToSuperRecommended, +) from ray.rllib.utils.framework import try_import_tf from ray.rllib.utils.tf_utils import clip_gradients from ray.rllib.utils.typing import TensorType, ResultDict @@ -84,13 +88,15 @@ def configure_optimizer_per_module( self, module_id: ModuleID ) -> Union[ParamOptimizerPair, NamedParamOptimizerPairs]: module = self._module[module_id] - lr = self._optimizer_config["lr"] + # TODO (sven): Move lr from optimizer config to Learner HPs? + # We might not need optimizer config. + lr = self.curr_lr_per_module[module_id] optim = tf.keras.optimizers.Adam(learning_rate=lr) pair: ParamOptimizerPair = ( self.get_parameters(module), optim, ) - # this isn't strictly necessary, but makes it so that if a checkpoint is + # This isn't strictly necessary, but makes it so that if a checkpoint is # computed before training actually starts, then it will be the same in # shape / size as a checkpoint after training starts. optim.build(module.trainable_variables) @@ -475,7 +481,11 @@ def update( return results return reduce_fn(results) - def _do_update_fn(self, batch: MultiAgentBatch) -> Mapping[str, Any]: + def _do_update_fn( + self, + batch: MultiAgentBatch, + _ray_trace_ctx=None, + ) -> Mapping[str, Any]: # TODO (Avnish): Match this base class's implementation. def helper(_batch): # TODO (Kourosh): We need to go back to NestedDict because that's the @@ -512,3 +522,37 @@ def filter_fwd_out(x): } return self._strategy.run(helper, args=(batch,)) + + @OverrideToImplementCustomLogic_CallToSuperRecommended + @override(Learner) + def additional_update_per_module( + self, module_id: ModuleID, *, timestep: int, **kwargs + ) -> Mapping[str, Any]: + # Handle lr scheduling updates and apply new learning rates to the optimizers. + if self.hps.lr_schedule is not None: + value = self.lr_schedule_per_module[module_id].value(t=timestep) + self.curr_lr_per_module[module_id].assign(value) + # Not sure why we need to do this here besides setting the original + # tf Variable `self.curr_lr_per_module[module_id]`. When tf creates the + # optimizer, maybe it detaches its lr value from the given variable? + self._named_optimizers[module_id].lr = value + return { + LEARNER_RESULTS_CURR_LR_KEY: self._named_optimizers[module_id].lr.numpy() + } + + @override(Learner) + def _get_tensor_variable(self, value, dtype=None, trainable=False) -> "tf.Tensor": + return tf.Variable( + value, + trainable=trainable, + dtype=( + dtype + or ( + tf.float32 + if isinstance(value, float) + else tf.int32 + if isinstance(value, int) + else None + ) + ), + ) diff --git a/rllib/core/learner/torch/torch_learner.py b/rllib/core/learner/torch/torch_learner.py index c076b91123ba..43eb0ac9910b 100644 --- a/rllib/core/learner/torch/torch_learner.py +++ b/rllib/core/learner/torch/torch_learner.py @@ -19,6 +19,7 @@ from ray.rllib.core.learner.learner import ( FrameworkHyperparameters, Learner, + LEARNER_RESULTS_CURR_LR_KEY, ParamOptimizerPair, NamedParamOptimizerPairs, ParamType, @@ -26,7 +27,11 @@ ) from ray.rllib.core.rl_module.torch.torch_rl_module import TorchDDPRLModule from ray.rllib.policy.sample_batch import MultiAgentBatch -from ray.rllib.utils.annotations import override +from ray.rllib.utils.annotations import ( + override, + OverrideToImplementCustomLogic, + OverrideToImplementCustomLogic_CallToSuperRecommended, +) from ray.rllib.utils.typing import TensorType from ray.rllib.utils.nested_dict import NestedDict from ray.rllib.utils.torch_utils import ( @@ -69,7 +74,9 @@ def configure_optimizer_per_module( self, module_id: ModuleID ) -> Union[ParamOptimizerPair, NamedParamOptimizerPairs]: module = self._module[module_id] - lr = self._optimizer_config["lr"] + # TODO (sven): Move lr from optimizer config to Learner HPs? + # We might not need optimizer config. + lr = self.curr_lr_per_module[module_id] pair: ParamOptimizerPair = ( self.get_parameters(module), torch.optim.Adam(self.get_parameters(module), lr=lr), @@ -88,6 +95,18 @@ def compute_gradients( return grads + @OverrideToImplementCustomLogic_CallToSuperRecommended + @override(Learner) + def additional_update_per_module( + self, module_id: ModuleID, *, timestep: int, **kwargs + ) -> Mapping[str, Any]: + # Handle lr scheduling updates and apply new learning rates to the optimizers. + value = self._optimizer_config["lr"] + if self.hps.lr_schedule is not None: + value = self.lr_schedule_per_module[module_id].value(t=timestep) + self.curr_lr_per_module[module_id].data = torch.tensor(value) + return {LEARNER_RESULTS_CURR_LR_KEY: value} + @override(Learner) def postprocess_gradients( self, @@ -205,14 +224,14 @@ def build(self) -> None: """Builds the TorchLearner. This method is specific to TorchLearner. Before running super() it will - initialzed the device properly based on use_gpu and distributed flags, so that - _make_module() can place the created module on the correct device. After - running super() it will wrap the module in a TorchDDPRLModule if distributed is - set. + initialze the device properly based on the `_use_gpu` and `_distributed` + flags, so that `_make_module()` can place the created module on the correct + device. After running super() it will wrap the module in a TorchDDPRLModule + if `_distributed` is True. """ - # TODO (Kourosh): How do we handle model parallism? + # TODO (Kourosh): How do we handle model parallelism? # TODO (Kourosh): Instead of using _TorchAccelerator, we should use the public - # api in ray.train but allow for session to be None without any errors raised. + # API in ray.train but allow for session to be None without any errors raised. if self._use_gpu: # get_device() returns the 0th device if # it is called from outside of a Ray Train session. Its necessary to give @@ -234,20 +253,32 @@ def build(self) -> None: self._device = torch.device("cpu") super().build() - # if the module is a MultiAgentRLModule and nn.Module we can simply assume + + self._make_modules_ddp_if_necessary() + + @OverrideToImplementCustomLogic + def _make_modules_ddp_if_necessary(self) -> None: + """Default logic for (maybe) making all Modules within self._module DDP.""" + + # If the module is a MultiAgentRLModule and nn.Module we can simply assume # all the submodules are registered. Otherwise, we need to loop through # each submodule and move it to the correct device. # TODO (Kourosh): This can result in missing modules if the user does not - # register them in the MultiAgentRLModule. We should find a better way to - # handle this. + # register them in the MultiAgentRLModule. We should find a better way to + # handle this. if self._distributed: + # Single agent module: Convert to `TorchDDPRLModule`. if isinstance(self._module, TorchRLModule): self._module = TorchDDPRLModule(self._module) + # Multi agent module: Convert each submodule to `TorchDDPRLModule`. else: + assert isinstance(self._module, MultiAgentRLModule) for key in self._module.keys(): - if isinstance(self._module[key], TorchRLModule): + sub_module = self._module[key] + if isinstance(sub_module, TorchRLModule): + # Wrap and override the module ID key in self._module. self._module.add_module( - key, TorchDDPRLModule(self._module[key]), override=True + key, TorchDDPRLModule(sub_module), override=True ) def _is_module_compatible_with_learner(self, module: RLModule) -> bool: @@ -284,3 +315,23 @@ def _map_module_to_device(self, module: MultiAgentRLModule) -> None: for key in module.keys(): if isinstance(module[key], torch.nn.Module): module[key].to(self._device) + + @override(Learner) + def _get_tensor_variable( + self, value, dtype=None, trainable=False + ) -> "torch.Tensor": + return torch.tensor( + value, + requires_grad=trainable, + device=self._device, + dtype=( + dtype + or ( + torch.float32 + if isinstance(value, float) + else torch.int32 + if isinstance(value, int) + else None + ) + ), + ) diff --git a/rllib/core/models/base.py b/rllib/core/models/base.py index d977a99a37c2..da8b199e229f 100644 --- a/rllib/core/models/base.py +++ b/rllib/core/models/base.py @@ -58,6 +58,7 @@ def build(self, framework: str): raise NotImplementedError +@ExperimentalAPI class Model(abc.ABC): """Framework-agnostic base class for RLlib models. @@ -225,6 +226,7 @@ def _set_to_dummy_weights(self, value_sequence=(-0.02, -0.01, 0.01, 0.02)) -> No """ +@ExperimentalAPI class Encoder(Model, abc.ABC): """The framework-agnostic base class for all RLlib encoders. @@ -333,6 +335,7 @@ def _forward(self, input_dict: dict, **kwargs) -> dict: raise NotImplementedError +@ExperimentalAPI class ActorCriticEncoder(Encoder): """An encoder that potentially holds two encoders. diff --git a/rllib/core/models/catalog.py b/rllib/core/models/catalog.py index b8dde4673ad0..8494f6babadb 100644 --- a/rllib/core/models/catalog.py +++ b/rllib/core/models/catalog.py @@ -314,6 +314,9 @@ def get_encoder_config( activation = model_config_dict["fcnet_activation"] output_activation = model_config_dict["fcnet_activation"] fcnet_hiddens = model_config_dict["fcnet_hiddens"] + # TODO (sven): Move to a new ModelConfig object (dataclass) asap, instead of + # "linking" into the old ModelConfig (dict)! This just causes confusion as to + # which old keys now mean what for the new RLModules-based default models. encoder_latent_dim = ( model_config_dict["encoder_latent_dim"] or fcnet_hiddens[-1] ) diff --git a/rllib/core/models/tests/test_catalog.py b/rllib/core/models/tests/test_catalog.py index da22c2b1864f..bb3bb52c7110 100644 --- a/rllib/core/models/tests/test_catalog.py +++ b/rllib/core/models/tests/test_catalog.py @@ -83,12 +83,13 @@ def _check_model_outputs(self, model, framework, model_config_dict, input_space) outputs = model(inputs) self.assertEqual(outputs[ENCODER_OUT].shape, (32, latent_dim)) - tree.map_structure_with_path( - lambda p, v: ( - self.assertEqual(v.shape, states[p].shape) if v is not None else True - ), - outputs[STATE_OUT], - ) + if STATE_OUT in outputs: + tree.map_structure_with_path( + lambda p, v: ( + True if v is None else self.assertEqual(v.shape, states[p].shape) + ), + outputs[STATE_OUT], + ) def test_get_encoder_config(self): """Tests if we can create a bunch of encoders from the base catalog class.""" diff --git a/rllib/core/models/tf/encoder.py b/rllib/core/models/tf/encoder.py index f83b51e1b49e..71f308436b18 100644 --- a/rllib/core/models/tf/encoder.py +++ b/rllib/core/models/tf/encoder.py @@ -47,7 +47,10 @@ def __init__(self, config: CNNEncoderConfig) -> None: TfModel.__init__(self, config) Encoder.__init__(self, config) - layers = [] + # Add an input layer for the Sequential, created below. This is really + # important to be able to derive the model's trainable_variables early on + # (inside our Learners). + layers = [tf.keras.layers.Input(shape=config.input_dims)] # The bare-bones CNN (no flatten, no succeeding dense). cnn = TfCNN( input_dims=config.input_dims, diff --git a/rllib/core/rl_module/torch/torch_rl_module.py b/rllib/core/rl_module/torch/torch_rl_module.py index ce332ce6cf03..a31c6e758137 100644 --- a/rllib/core/rl_module/torch/torch_rl_module.py +++ b/rllib/core/rl_module/torch/torch_rl_module.py @@ -1,9 +1,13 @@ import pathlib -from typing import Any, Mapping, Union +from typing import Any, List, Mapping, Tuple, Union -from ray.rllib.core.rl_module import RLModule +from ray.rllib.core.rl_module.rl_module import RLModule +from ray.rllib.core.rl_module.rl_module_with_target_networks_interface import ( + RLModuleWithTargetNetworksInterface, +) from ray.rllib.utils.annotations import override from ray.rllib.utils.framework import try_import_torch +from ray.rllib.utils.typing import NetworkType torch, nn = try_import_torch() @@ -93,3 +97,12 @@ def _module_metadata(self, *args, **kwargs): @override(RLModule) def unwrapped(self) -> "RLModule": return self.module + + +class TorchDDPRLModuleWithTargetNetworksInterface( + TorchDDPRLModule, + RLModuleWithTargetNetworksInterface, +): + @override(RLModuleWithTargetNetworksInterface) + def get_target_network_pairs(self) -> List[Tuple[NetworkType, NetworkType]]: + return self.module.get_target_network_pairs() diff --git a/rllib/evaluation/postprocessing.py b/rllib/evaluation/postprocessing.py index 0b54c85bcb15..cf5653585d6b 100644 --- a/rllib/evaluation/postprocessing.py +++ b/rllib/evaluation/postprocessing.py @@ -2,12 +2,15 @@ import scipy.signal from typing import Dict, Optional +from ray.rllib.core.models.base import STATE_IN from ray.rllib.evaluation.episode import Episode from ray.rllib.policy.policy import Policy from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.annotations import DeveloperAPI -from ray.rllib.utils.typing import AgentID +from ray.rllib.utils.nested_dict import NestedDict from ray.rllib.utils.numpy import convert_to_numpy +from ray.rllib.utils.torch_utils import convert_to_torch_tensor +from ray.rllib.utils.typing import AgentID @DeveloperAPI @@ -197,15 +200,18 @@ def compute_gae_for_sample_batch( # correct? Does this mean that I need to preserve the trajectory # information during training and compute the advantages inside the loss # function? - # TODO (Kourosh) - # Another thing I need to figure out is which end point to call here? - # forward_exploration? what if this method is getting called inside the - # learner loop? or via another abstraction like - # RLSampler.postprocess_trajectory() which is non-batched cpu/gpu task - # running across different processes for different trajectories? - # This implementation right now will compute even the action_dist which - # will not be needed but takes time to compute. - input_dict = policy._lazy_tensor_dict(input_dict) + # TODO (Kourosh): Another thing we need to figure out is which end point + # to call here (why forward_exploration)? What if this method is getting + # called inside the learner loop or via another abstraction like + # RLSampler.postprocess_trajectory() which is non-batched cpu/gpu task + # running across different processes for different trajectories? + # This implementation right now will compute even the action_dist which + # will not be needed but takes time to compute. + if policy.framework == "torch": + input_dict = convert_to_torch_tensor(input_dict) + # TODO (sven): Fix this once we support RNNs on the new stack. + input_dict[STATE_IN] = input_dict[SampleBatch.SEQ_LENS] = None + input_dict = NestedDict(input_dict) fwd_out = policy.model.forward_exploration(input_dict) last_r = fwd_out[SampleBatch.VF_PREDS][-1] else: diff --git a/rllib/policy/eager_tf_policy_v2.py b/rllib/policy/eager_tf_policy_v2.py index 0fd14c72fd08..7e1e543b08ab 100644 --- a/rllib/policy/eager_tf_policy_v2.py +++ b/rllib/policy/eager_tf_policy_v2.py @@ -10,6 +10,7 @@ import tree # pip install dm_tree from typing import Dict, List, Optional, Tuple, Type, Union +from ray.rllib.core.models.base import STATE_IN from ray.rllib.evaluation.episode import Episode from ray.rllib.models.catalog import ModelCatalog from ray.rllib.models.modelv2 import ModelV2 @@ -39,6 +40,7 @@ NUM_GRAD_UPDATES_LIFETIME, ) from ray.rllib.utils.metrics.learner_info import LEARNER_STATS_KEY +from ray.rllib.utils.nested_dict import NestedDict from ray.rllib.utils.numpy import convert_to_numpy from ray.rllib.utils.spaces.space_utils import normalize_action from ray.rllib.utils.tf_utils import get_gpu_devices @@ -839,75 +841,81 @@ def _compute_actions_helper( # Add default and custom fetches. extra_fetches = {} - # Use Exploration object. - with tf.variable_creator_scope(_disallow_var_creation): - if self.config.get("_enable_rl_module_api", False): + if self.config.get("_enable_rl_module_api", False) is False: + scope = tf.variable_creator_scope(_disallow_var_creation) + scope.__enter__() - if explore: - fwd_out = self.model.forward_exploration(input_dict) - else: - fwd_out = self.model.forward_inference(input_dict) + if self.config.get("_enable_rl_module_api", False): + input_dict = NestedDict(input_dict) + input_dict[STATE_IN] = state_batches + input_dict[SampleBatch.SEQ_LENS] = seq_lens - action_dist = fwd_out[SampleBatch.ACTION_DIST] - if explore: - actions = action_dist.sample() - logp = action_dist.logp(actions) - else: - actions = action_dist.sample() - logp = None - state_out = fwd_out.get("state_out", {}) - - # anything but action_dist and state_out is an extra fetch - for k, v in fwd_out.items(): - if k not in [SampleBatch.ACTION_DIST, "state_out"]: - extra_fetches[k] = v - dist_inputs = None - - elif is_overridden(self.action_sampler_fn): - actions, logp, dist_inputs, state_out = self.action_sampler_fn( + if explore: + fwd_out = self.model.forward_exploration(input_dict) + else: + fwd_out = self.model.forward_inference(input_dict) + + action_dist = fwd_out[SampleBatch.ACTION_DIST] + if explore: + actions = action_dist.sample() + logp = action_dist.logp(actions) + else: + actions = action_dist.sample() + logp = None + state_out = fwd_out.get("state_out", {}) + + # anything but action_dist and state_out is an extra fetch + for k, v in fwd_out.items(): + if k not in [SampleBatch.ACTION_DIST, "state_out"]: + extra_fetches[k] = v + dist_inputs = None + + elif is_overridden(self.action_sampler_fn): + actions, logp, dist_inputs, state_out = self.action_sampler_fn( + self.model, + input_dict[SampleBatch.OBS], + explore=explore, + timestep=timestep, + episodes=episodes, + ) + else: + if is_overridden(self.action_distribution_fn): + # Try new action_distribution_fn signature, supporting + # state_batches and seq_lens. + ( + dist_inputs, + self.dist_class, + state_out, + ) = self.action_distribution_fn( self.model, - input_dict[SampleBatch.OBS], + obs_batch=input_dict[SampleBatch.OBS], + state_batches=state_batches, + seq_lens=seq_lens, explore=explore, timestep=timestep, - episodes=episodes, + is_training=False, ) + elif isinstance(self.model, tf.keras.Model): + input_dict = SampleBatch(input_dict, seq_lens=seq_lens) + if state_batches and "state_in_0" not in input_dict: + for i, s in enumerate(state_batches): + input_dict[f"state_in_{i}"] = s + self._lazy_tensor_dict(input_dict) + dist_inputs, state_out, extra_fetches = self.model(input_dict) else: - if is_overridden(self.action_distribution_fn): - # Try new action_distribution_fn signature, supporting - # state_batches and seq_lens. - ( - dist_inputs, - self.dist_class, - state_out, - ) = self.action_distribution_fn( - self.model, - obs_batch=input_dict[SampleBatch.OBS], - state_batches=state_batches, - seq_lens=seq_lens, - explore=explore, - timestep=timestep, - is_training=False, - ) - elif isinstance(self.model, tf.keras.Model): - input_dict = SampleBatch(input_dict, seq_lens=seq_lens) - if state_batches and "state_in_0" not in input_dict: - for i, s in enumerate(state_batches): - input_dict[f"state_in_{i}"] = s - self._lazy_tensor_dict(input_dict) - dist_inputs, state_out, extra_fetches = self.model(input_dict) - else: - dist_inputs, state_out = self.model( - input_dict, state_batches, seq_lens - ) + dist_inputs, state_out = self.model(input_dict, state_batches, seq_lens) - action_dist = self.dist_class(dist_inputs, self.model) + action_dist = self.dist_class(dist_inputs, self.model) - # Get the exploration action from the forward results. - actions, logp = self.exploration.get_exploration_action( - action_distribution=action_dist, - timestep=timestep, - explore=explore, - ) + # Get the exploration action from the forward results. + actions, logp = self.exploration.get_exploration_action( + action_distribution=action_dist, + timestep=timestep, + explore=explore, + ) + + if self.config.get("_enable_rl_module_api", False) is False: + scope.__exit__(None, None, None) # Action-logp and action-prob. if logp is not None: diff --git a/rllib/policy/torch_policy_v2.py b/rllib/policy/torch_policy_v2.py index 79546b0623ba..ea648fd912ad 100644 --- a/rllib/policy/torch_policy_v2.py +++ b/rllib/policy/torch_policy_v2.py @@ -1179,7 +1179,6 @@ def _compute_action_helper( return convert_to_numpy((actions, state_out, extra_fetches)) def _lazy_tensor_dict(self, postprocessed_batch: SampleBatch, device=None): - # TODO: (sven): Keep for a while to ensure backward compatibility. if not isinstance(postprocessed_batch, SampleBatch): postprocessed_batch = SampleBatch(postprocessed_batch) postprocessed_batch.set_get_interceptor( diff --git a/rllib/tuned_examples/alpha_star/multi-agent-cartpole-alpha-star.yaml b/rllib/tuned_examples/alpha_star/multi-agent-cartpole-alpha-star.yaml index 6a5db5c0d3a4..daade938b441 100644 --- a/rllib/tuned_examples/alpha_star/multi-agent-cartpole-alpha-star.yaml +++ b/rllib/tuned_examples/alpha_star/multi-agent-cartpole-alpha-star.yaml @@ -22,7 +22,6 @@ multi-agent-cartpole-alpha-star: num_sgd_iter: 1 vf_loss_coeff: 0.005 vtrace: true - vtrace_drop_last_ts: false model: fcnet_hiddens: [32] fcnet_activation: linear diff --git a/rllib/tuned_examples/appo/cartpole-appo-vtrace-fake-gpus.yaml b/rllib/tuned_examples/appo/cartpole-appo-vtrace-fake-gpus.yaml index fe69576c232f..73581ac2b267 100644 --- a/rllib/tuned_examples/appo/cartpole-appo-vtrace-fake-gpus.yaml +++ b/rllib/tuned_examples/appo/cartpole-appo-vtrace-fake-gpus.yaml @@ -13,7 +13,6 @@ cartpole-appo-vtrace-fake-gpus: num_sgd_iter: 6 vf_loss_coeff: 0.01 vtrace: true - vtrace_drop_last_ts: false # Double batch size (2 GPUs). train_batch_size: 1000 diff --git a/rllib/tuned_examples/appo/cartpole-appo-vtrace.yaml b/rllib/tuned_examples/appo/cartpole-appo-vtrace.yaml index c7a36a550b8b..1c4a9755a214 100644 --- a/rllib/tuned_examples/appo/cartpole-appo-vtrace.yaml +++ b/rllib/tuned_examples/appo/cartpole-appo-vtrace.yaml @@ -14,7 +14,6 @@ cartpole-appo-vtrace: num_sgd_iter: 1 vf_loss_coeff: 0.01 vtrace: true - vtrace_drop_last_ts: false model: fcnet_hiddens: [32] fcnet_activation: linear diff --git a/rllib/tuned_examples/appo/frozenlake-appo-vtrace.yaml b/rllib/tuned_examples/appo/frozenlake-appo-vtrace.yaml index 9e51375b1151..52587329d163 100644 --- a/rllib/tuned_examples/appo/frozenlake-appo-vtrace.yaml +++ b/rllib/tuned_examples/appo/frozenlake-appo-vtrace.yaml @@ -24,7 +24,6 @@ frozenlake-appo-vtrace: rollout_fragment_length: 10 batch_mode: complete_episodes vtrace: true - vtrace_drop_last_ts: false num_envs_per_worker: 5 num_workers: 4 diff --git a/rllib/tuned_examples/appo/multi-agent-cartpole-appo.yaml b/rllib/tuned_examples/appo/multi-agent-cartpole-appo.yaml index dfee4a821091..71e521aefe20 100644 --- a/rllib/tuned_examples/appo/multi-agent-cartpole-appo.yaml +++ b/rllib/tuned_examples/appo/multi-agent-cartpole-appo.yaml @@ -22,7 +22,6 @@ multi-agent-cartpole-appo: num_sgd_iter: 1 vf_loss_coeff: 0.005 vtrace: true - vtrace_drop_last_ts: false model: fcnet_hiddens: [32] fcnet_activation: linear diff --git a/rllib/tuned_examples/appo/multi-agent-cartpole-w-100-policies-appo.py b/rllib/tuned_examples/appo/multi-agent-cartpole-w-100-policies-appo.py index 551c1e578562..0af819627e9b 100644 --- a/rllib/tuned_examples/appo/multi-agent-cartpole-w-100-policies-appo.py +++ b/rllib/tuned_examples/appo/multi-agent-cartpole-w-100-policies-appo.py @@ -32,7 +32,6 @@ num_sgd_iter=1, vf_loss_coeff=0.005, vtrace=True, - vtrace_drop_last_ts=False, ) .multi_agent( # 2 agents per sub-env. diff --git a/rllib/tuned_examples/appo/pong-appo-w-rl-modules-and-learner.yaml b/rllib/tuned_examples/appo/pong-appo-w-rl-modules-and-learner.yaml index 97f73eb10a9c..79a6fbf2aef5 100644 --- a/rllib/tuned_examples/appo/pong-appo-w-rl-modules-and-learner.yaml +++ b/rllib/tuned_examples/appo/pong-appo-w-rl-modules-and-learner.yaml @@ -15,7 +15,6 @@ pong-appo: full_action_space: false repeat_action_probability: 0.0 # deterministic vtrace: true - #vtrace_drop_last_ts: false use_kl_loss: false rollout_fragment_length: 50 train_batch_size: 4000 diff --git a/rllib/tuned_examples/impala/cartpole-impala-fake-gpus.yaml b/rllib/tuned_examples/impala/cartpole-impala-fake-gpus.yaml index 4dbb8794612c..1281e75e3f94 100644 --- a/rllib/tuned_examples/impala/cartpole-impala-fake-gpus.yaml +++ b/rllib/tuned_examples/impala/cartpole-impala-fake-gpus.yaml @@ -17,5 +17,3 @@ cartpole-impala-fake-gpus: # Fake 2 GPUs. num_gpus: 2 _fake_gpus: true - - vtrace_drop_last_ts: false diff --git a/rllib/tuned_examples/impala/cartpole-impala.yaml b/rllib/tuned_examples/impala/cartpole-impala.yaml index 63f3d7b322da..46c37c52ea69 100644 --- a/rllib/tuned_examples/impala/cartpole-impala.yaml +++ b/rllib/tuned_examples/impala/cartpole-impala.yaml @@ -8,7 +8,6 @@ cartpole-impala: # Works for both torch and tf. framework: tf2 num_gpus: 0 - vtrace_drop_last_ts: false _enable_rl_module_api: True _enable_learner_api: True grad_clip: 40 diff --git a/rllib/tuned_examples/impala/multi-agent-cartpole-impala.yaml b/rllib/tuned_examples/impala/multi-agent-cartpole-impala.yaml index e47be62be3f4..56f2ac106207 100644 --- a/rllib/tuned_examples/impala/multi-agent-cartpole-impala.yaml +++ b/rllib/tuned_examples/impala/multi-agent-cartpole-impala.yaml @@ -22,7 +22,6 @@ multi-agent-cartpole-impala: num_sgd_iter: 1 vf_loss_coeff: 0.005 vtrace: true - vtrace_drop_last_ts: false model: fcnet_hiddens: [32] fcnet_activation: linear diff --git a/rllib/utils/torch_utils.py b/rllib/utils/torch_utils.py index 85907ba9d1f5..6349273732e9 100644 --- a/rllib/utils/torch_utils.py +++ b/rllib/utils/torch_utils.py @@ -114,32 +114,40 @@ def clip_gradients( # Clip by value (each gradient individually). if grad_clip_by == "value": for k, v in gradients_dict.copy().items(): - gradients_dict[k] = torch.clip(v, -grad_clip, grad_clip) + gradients_dict[k] = ( + None if v is None else torch.clip(v, -grad_clip, grad_clip) + ) # Clip by L2-norm (per gradient tensor). elif grad_clip_by == "norm": for k, v in gradients_dict.copy().items(): - gradients_dict[k] = nn.utils.clip_grad_norm_(v, grad_clip) + gradients_dict[k] = ( + None if v is None else nn.utils.clip_grad_norm_(v, grad_clip) + ) # Clip by global L2-norm (across all gradient tensors). else: - assert grad_clip_by == "global_norm" + assert ( + grad_clip_by == "global_norm" + ), f"`grad_clip_by` ({grad_clip_by}) must be one of [value|norm|global_norm]!" # Compute the global L2-norm of all the gradient tensors. - grad_tensors = gradients_dict.values() - total_l2_norm = 0.0 - for tensor in grad_tensors: + total_l2_norm = sum( # `.norm()` is the square root of the sum of all squares. # We need to "undo" the square root b/c we want to compute the global # norm afterwards -> `** 2`. - total_l2_norm += tensor.norm(2) ** 2 + t.norm(2) ** 2 + for t in gradients_dict.values() + if t is not None + ) # Now we do the square root. total_l2_norm = torch.sqrt(total_l2_norm) # Clip all the gradients. if total_l2_norm > grad_clip: - for tensor in grad_tensors: - tensor.mul_(grad_clip / total_l2_norm) + for tensor in gradients_dict.values(): + if tensor is not None: + tensor.mul_(grad_clip / total_l2_norm) @PublicAPI From 8064fea1f1aa9a6ffc9b97d7a0898a1c63c7fded Mon Sep 17 00:00:00 2001 From: Yi Cheng <74173148+iycheng@users.noreply.github.com> Date: Fri, 5 May 2023 13:11:09 -0700 Subject: [PATCH 262/424] Revert "Deflakey test advanced 9 (#34883)" (#35090) This reverts commit d60e73ed5657aa0c991bd2afdaadab502d993327. --- .../serve/tests/test_controller_recovery.py | 6 +-- python/ray/tests/test_advanced_9.py | 2 +- python/ray/tests/test_failure_3.py | 13 ++---- src/ray/common/client_connection.h | 6 --- src/ray/core_worker/core_worker.cc | 8 ++-- src/ray/gcs/gcs_client/accessor.cc | 4 +- .../gcs/gcs_client/test/gcs_client_test.cc | 40 +++++++++++++++++++ src/ray/gcs/gcs_server/gcs_actor_manager.cc | 19 ++++----- src/ray/gcs/gcs_server/pubsub_handler.cc | 1 + src/ray/gcs/pb_util.h | 2 - src/ray/raylet/node_manager.cc | 24 +++-------- 11 files changed, 65 insertions(+), 60 deletions(-) diff --git a/python/ray/serve/tests/test_controller_recovery.py b/python/ray/serve/tests/test_controller_recovery.py index e3471c9b3ba5..77d262c26ee2 100644 --- a/python/ray/serve/tests/test_controller_recovery.py +++ b/python/ray/serve/tests/test_controller_recovery.py @@ -238,10 +238,8 @@ def get_actor_info(name: str): _, controller1_pid = get_actor_info(SERVE_CONTROLLER_NAME) ray.kill(serve.context._global_client._controller, no_restart=False) # wait for controller is alive again - wait_for_condition( - lambda: get_actor_info(SERVE_CONTROLLER_NAME) is not None - and get_actor_info(SERVE_CONTROLLER_NAME)[1] != controller1_pid - ) + wait_for_condition(get_actor_info, name=SERVE_CONTROLLER_NAME) + assert controller1_pid != get_actor_info(SERVE_CONTROLLER_NAME)[1] # Let the actor proceed initialization ray.get(signal.send.remote()) diff --git a/python/ray/tests/test_advanced_9.py b/python/ray/tests/test_advanced_9.py index accddc1b3164..b61e5aac9216 100644 --- a/python/ray/tests/test_advanced_9.py +++ b/python/ray/tests/test_advanced_9.py @@ -258,7 +258,7 @@ def ready(self): run_string_as_driver(script.format(address=call_ray_start_2, val=2)) -@pytest.mark.skipif(sys.platform == "win32", reason="Not valid on win32.") +@pytest.mark.skipif(sys.platform != "linux", reason="Only works on linux.") def test_gcs_connection_no_leak(ray_start_cluster): cluster = ray_start_cluster head_node = cluster.add_node() diff --git a/python/ray/tests/test_failure_3.py b/python/ray/tests/test_failure_3.py index b666a9157e72..926b7c76ec01 100644 --- a/python/ray/tests/test_failure_3.py +++ b/python/ray/tests/test_failure_3.py @@ -365,8 +365,6 @@ def test_no_worker_child_process_leaks(ray_start_cluster, tmp_path): the list of PIDs that are children of the Ray worker processes. """ - ray_start_cluster.add_node() - ray_start_cluster.wait_for_nodes() output_file_path = tmp_path / "leaked_pids.json" driver_script = f""" @@ -376,7 +374,7 @@ def test_no_worker_child_process_leaks(ray_start_cluster, tmp_path): import shutil import time import os -ray.init("{ray_start_cluster.address}") + @ray.remote class Actor: def create_leaked_child_process(self, num_to_leak): @@ -426,6 +424,7 @@ def task(): print(os.getpid()) time.sleep(1) """ + driver_proc = run_string_as_driver_nonblocking(driver_script) # Wait for the json file containing the child PIDS @@ -444,15 +443,9 @@ def task(): assert all([proc.status() == psutil.STATUS_SLEEPING for proc in processes]) # Valdiate children of worker process die after SIGINT. - def check(): - for proc in processes: - if proc.is_running(): - print(proc) - return all([not proc.is_running() for proc in processes]) - driver_proc.send_signal(signal.SIGINT) wait_for_condition( - condition_predictor=check, + condition_predictor=lambda: all([not proc.is_running() for proc in processes]), timeout=30, ) diff --git a/src/ray/common/client_connection.h b/src/ray/common/client_connection.h index 9a86ffb808e8..89d30fbbcdbc 100644 --- a/src/ray/common/client_connection.h +++ b/src/ray/common/client_connection.h @@ -125,12 +125,6 @@ class ServerConnection : public std::enable_shared_from_this { std::string DebugString() const; - void AsyncWaitTerminated(std::function callback) { - // Async wait until the connection is disconnected. - socket_.async_wait(local_stream_socket::wait_type::wait_error, - [callback = std::move(callback)](auto) { callback(); }); - } - protected: /// A private constructor for a server connection. ServerConnection(local_stream_socket &&socket); diff --git a/src/ray/core_worker/core_worker.cc b/src/ray/core_worker/core_worker.cc index 34f1ba1fc6cb..af09bb383ed7 100644 --- a/src/ray/core_worker/core_worker.cc +++ b/src/ray/core_worker/core_worker.cc @@ -785,9 +785,8 @@ void CoreWorker::Exit( detail = std::move(detail), creation_task_exception_pb_bytes]() { rpc::DrainServerCallExecutor(); - KillChildProcs(); - // Disconnect here after KillChildProcs to make the Raylet async wait shorter. Disconnect(exit_type, detail, creation_task_exception_pb_bytes); + KillChildProcs(); Shutdown(); }, "CoreWorker.Shutdown"); @@ -831,11 +830,10 @@ void CoreWorker::ForceExit(const rpc::WorkerExitType exit_type, const std::string &detail) { RAY_LOG(WARNING) << "Force exit the process. " << " Details: " << detail; - KillChildProcs(); - - // Disconnect here before KillChildProcs to make the Raylet async wait shorter. Disconnect(exit_type, detail); + KillChildProcs(); + // NOTE(hchen): Use `QuickExit()` to force-exit this process without doing cleanup. // `exit()` will destruct static objects in an incorrect order, which will lead to // core dumps. diff --git a/src/ray/gcs/gcs_client/accessor.cc b/src/ray/gcs/gcs_client/accessor.cc index 358b3940e6dc..ccb225a62931 100644 --- a/src/ray/gcs/gcs_client/accessor.cc +++ b/src/ray/gcs/gcs_client/accessor.cc @@ -852,9 +852,7 @@ Status WorkerInfoAccessor::AsyncReportWorkerFailure( const std::shared_ptr &data_ptr, const StatusCallback &callback) { rpc::Address worker_address = data_ptr->worker_address(); - RAY_LOG(DEBUG) << "Reporting worker failure, " << worker_address.DebugString() - << " WorkerID=" << WorkerID::FromBinary(worker_address.worker_id()) - << " NodeID=" << NodeID::FromBinary(worker_address.raylet_id()); + RAY_LOG(DEBUG) << "Reporting worker failure, " << worker_address.DebugString(); rpc::ReportWorkerFailureRequest request; request.mutable_worker_failure()->CopyFrom(*data_ptr); client_impl_->GetGcsRpcClient().ReportWorkerFailure( diff --git a/src/ray/gcs/gcs_client/test/gcs_client_test.cc b/src/ray/gcs/gcs_client/test/gcs_client_test.cc index 10325d448f5e..d3baeeb964d0 100644 --- a/src/ray/gcs/gcs_client/test/gcs_client_test.cc +++ b/src/ray/gcs/gcs_client/test/gcs_client_test.cc @@ -947,6 +947,46 @@ TEST_P(GcsClientTest, DISABLED_TestGetActorPerf) { << actor_count << " actors."; } +TEST_P(GcsClientTest, TestEvictExpiredDestroyedActors) { + // Restart doesn't work with in memory storage + if (RayConfig::instance().gcs_storage() == "memory") { + return; + } + // Register actors and the actors will be destroyed. + JobID job_id = JobID::FromInt(1); + AddJob(job_id); + absl::flat_hash_set actor_ids; + int actor_count = RayConfig::instance().maximum_gcs_destroyed_actor_cached_count(); + for (int index = 0; index < actor_count; ++index) { + auto actor_table_data = Mocker::GenActorTableData(job_id); + RegisterActor(actor_table_data, false); + actor_ids.insert(ActorID::FromBinary(actor_table_data->actor_id())); + } + + // Restart GCS. + RestartGcsServer(); + + for (int index = 0; index < actor_count; ++index) { + auto actor_table_data = Mocker::GenActorTableData(job_id); + RegisterActor(actor_table_data, false); + actor_ids.insert(ActorID::FromBinary(actor_table_data->actor_id())); + } + + // NOTE: GCS will not reply when actor registration fails, so when GCS restarts, gcs + // client will register the actor again and the status of the actor may be + // `DEPENDENCIES_UNREADY` or `DEAD`. We should get all dead actors. + auto condition = [this]() { + return GetAllActors(true).size() == + RayConfig::instance().maximum_gcs_destroyed_actor_cached_count(); + }; + EXPECT_TRUE(WaitForCondition(condition, timeout_ms_.count())); + + auto actors = GetAllActors(true); + for (const auto &actor : actors) { + EXPECT_TRUE(actor_ids.contains(ActorID::FromBinary(actor.actor_id()))); + } +} + TEST_P(GcsClientTest, TestEvictExpiredDeadNodes) { // Restart GCS. RestartGcsServer(); diff --git a/src/ray/gcs/gcs_server/gcs_actor_manager.cc b/src/ray/gcs/gcs_server/gcs_actor_manager.cc index e6a347ddebe1..ee328510ea82 100644 --- a/src/ray/gcs/gcs_server/gcs_actor_manager.cc +++ b/src/ray/gcs/gcs_server/gcs_actor_manager.cc @@ -761,8 +761,7 @@ void GcsActorManager::PollOwnerForActorOutOfScope( auto it = workers.find(owner_id); if (it == workers.end()) { RAY_LOG(DEBUG) << "Adding owner " << owner_id << " of actor " << actor_id - << ", job id = " << actor_id.JobId() - << " owner node id = " << owner_node_id; + << ", job id = " << actor_id.JobId(); std::shared_ptr client = worker_client_factory_(actor->GetOwnerAddress()); it = workers.emplace(owner_id, Owner(std::move(client))).first; @@ -777,15 +776,14 @@ void GcsActorManager::PollOwnerForActorOutOfScope( [this, owner_node_id, owner_id, actor_id]( Status status, const rpc::WaitForActorOutOfScopeReply &reply) { if (!status.ok()) { - RAY_LOG(WARNING) << "Failed to wait for actor " << actor_id - << " out of scope, job id = " << actor_id.JobId() - << ", error: " << status.ToString(); - // TODO(iycheng): Retry it in other PR. - return; + RAY_LOG(INFO) << "Worker " << owner_id + << " failed, destroying actor child, job id = " + << actor_id.JobId(); + } else { + RAY_LOG(INFO) << "Actor " << actor_id + << " is out of scope, destroying actor, job id = " + << actor_id.JobId(); } - RAY_LOG(INFO) << "Actor " << actor_id - << " is out of scope, destroying actor, job id = " - << actor_id.JobId(); auto node_it = owners_.find(owner_node_id); if (node_it != owners_.end() && node_it->second.count(owner_id)) { @@ -959,7 +957,6 @@ void GcsActorManager::OnWorkerDead(const ray::NodeID &node_id, bool need_reconstruct = disconnect_type != rpc::WorkerExitType::INTENDED_USER_EXIT && disconnect_type != rpc::WorkerExitType::USER_ERROR; - // Destroy all actors that are owned by this worker. const auto it = owners_.find(node_id); if (it != owners_.end() && it->second.count(worker_id)) { diff --git a/src/ray/gcs/gcs_server/pubsub_handler.cc b/src/ray/gcs/gcs_server/pubsub_handler.cc index cf1417b35220..cf34b4f1e8a6 100644 --- a/src/ray/gcs/gcs_server/pubsub_handler.cc +++ b/src/ray/gcs/gcs_server/pubsub_handler.cc @@ -104,6 +104,7 @@ void InternalPubSubHandler::HandleGcsSubscriberCommandBatch( if (sender_id.empty()) { sender_id = request.subscriber_id(); } + auto iter = sender_to_subscribers_.find(sender_id); if (iter == sender_to_subscribers_.end()) { iter = sender_to_subscribers_.insert({sender_id, {}}).first; diff --git a/src/ray/gcs/pb_util.h b/src/ray/gcs/pb_util.h index 7aa91e6538da..7f99aa35924d 100644 --- a/src/ray/gcs/pb_util.h +++ b/src/ray/gcs/pb_util.h @@ -107,7 +107,6 @@ inline std::shared_ptr CreateActorTableData( /// Helper function to produce worker failure data. inline std::shared_ptr CreateWorkerFailureData( - const NodeID &node_id, const WorkerID &worker_id, int64_t timestamp, rpc::WorkerExitType disconnect_type, @@ -118,7 +117,6 @@ inline std::shared_ptr CreateWorkerFailureData( // Only report the worker id + delta (new data upon worker failures). // GCS will merge the data with original worker data. worker_failure_info_ptr->mutable_worker_address()->set_worker_id(worker_id.Binary()); - worker_failure_info_ptr->mutable_worker_address()->set_raylet_id(node_id.Binary()); worker_failure_info_ptr->set_timestamp(timestamp); worker_failure_info_ptr->set_exit_type(disconnect_type); worker_failure_info_ptr->set_exit_detail(disconnect_detail); diff --git a/src/ray/raylet/node_manager.cc b/src/ray/raylet/node_manager.cc index 4895222e52c8..4fd965c5b940 100644 --- a/src/ray/raylet/node_manager.cc +++ b/src/ray/raylet/node_manager.cc @@ -1480,13 +1480,15 @@ void NodeManager::DisconnectClient(const std::shared_ptr &clie } // Publish the worker failure. auto worker_failure_data_ptr = - gcs::CreateWorkerFailureData(self_node_id_, - worker->WorkerId(), + gcs::CreateWorkerFailureData(worker->WorkerId(), time(nullptr), disconnect_type, disconnect_detail, worker->GetProcess().GetId(), creation_task_exception); + RAY_CHECK_OK( + gcs_client_->Workers().AsyncReportWorkerFailure(worker_failure_data_ptr, nullptr)); + if (is_worker) { const ActorID &actor_id = worker->GetActorId(); const TaskID &task_id = worker->GetAssignedTaskId(); @@ -1561,23 +1563,9 @@ void NodeManager::DisconnectClient(const std::shared_ptr &clie local_task_manager_->ClearWorkerBacklog(worker->WorkerId()); cluster_task_manager_->CancelTaskForOwner(worker->GetAssignedTaskId()); -#ifdef _WIN32 - // On Windows, when the worker is killed, client async wait won't get notified - // somehow. - RAY_CHECK_OK( - gcs_client_->Workers().AsyncReportWorkerFailure(worker_failure_data_ptr, nullptr)); + client->Close(); -#else - // ReportWorkerFailure should happen after the worker exit completely. - // A better way is to monitor the pid exit. But that needs Process.h - // support async operation. - // Here we monitor the socket to achieve similar result. - // When the worker exited, the pid will be disconnected (local stream socket). - client->AsyncWaitTerminated([client, worker_failure_data_ptr, this] { - RAY_CHECK_OK(gcs_client_->Workers().AsyncReportWorkerFailure(worker_failure_data_ptr, - nullptr)); - }); -#endif + // TODO(rkn): Tell the object manager that this client has disconnected so // that it can clean up the wait requests for this client. Currently I think // these can be leaked. From b2e4e0dc87970dcf59b267046082dcab0d603c30 Mon Sep 17 00:00:00 2001 From: Edward Oakes Date: Fri, 5 May 2023 15:21:56 -0500 Subject: [PATCH 263/424] [serve] Clean up API reference and various docstrings (#34711) Cleans up the API reference page formatting, adds/updates docstrings where applicable (corrects typing + verbiage, adds inline examples). Co-authored-by: Cindy Zhang Co-authored-by: angelinalg <122562471+angelinalg@users.noreply.github.com> --- .../autosummary/class_without_init_args.rst | 6 + doc/source/conf.py | 5 + doc/source/serve/api/index.md | 393 +++++++++++++++++- doc/source/serve/api/python_api.md | 53 --- doc/source/serve/api/rest_api.md | 317 -------------- doc/source/serve/api/serve_cli.md | 9 - python/ray/serve/__init__.py | 2 + python/ray/serve/api.py | 216 +++++----- python/ray/serve/batching.py | 38 +- python/ray/serve/built_application.py | 6 +- python/ray/serve/deployment.py | 88 ++-- python/ray/serve/handle.py | 11 +- python/ray/serve/scripts.py | 18 +- 13 files changed, 611 insertions(+), 551 deletions(-) create mode 100644 doc/source/_templates/autosummary/class_without_init_args.rst delete mode 100644 doc/source/serve/api/python_api.md delete mode 100644 doc/source/serve/api/rest_api.md delete mode 100644 doc/source/serve/api/serve_cli.md diff --git a/doc/source/_templates/autosummary/class_without_init_args.rst b/doc/source/_templates/autosummary/class_without_init_args.rst new file mode 100644 index 000000000000..643b939c2eb4 --- /dev/null +++ b/doc/source/_templates/autosummary/class_without_init_args.rst @@ -0,0 +1,6 @@ +{{ fullname | escape | underline}} + +.. currentmodule:: {{ module }} + +.. autoclass:: {{ objname }}() + :members: diff --git a/doc/source/conf.py b/doc/source/conf.py index 49624d214126..f54836737bb7 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -432,3 +432,8 @@ def setup(app): ] redoc_uri = "https://cdn.redoc.ly/redoc/latest/bundles/redoc.standalone.js" + +autosummary_filename_map = { + "ray.serve.deployment": "ray.serve.deployment_decorator", + "ray.serve.Deployment": "ray.serve.Deployment", +} diff --git a/doc/source/serve/api/index.md b/doc/source/serve/api/index.md index c124fa6b2bf5..9b6a9dbc024c 100644 --- a/doc/source/serve/api/index.md +++ b/doc/source/serve/api/index.md @@ -1,12 +1,395 @@ (serve-api)= # Ray Serve API -```{toctree} -:maxdepth: '-1' +## Python API -python_api -rest_api -serve_cli +(core-apis)= + +```{eval-rst} +.. module:: ray +``` + +### Writing Applications + + + +#### Deployment Decorators + +```{eval-rst} +.. autosummary:: + :nosignatures: + :toctree: doc/ + + serve.deployment + :noindex: + serve.ingress + serve.batch +``` + +#### Object Types + +```{eval-rst} +.. autosummary:: + :nosignatures: + :toctree: doc/ + :template: autosummary/class_without_init_args.rst + + serve.Deployment + serve.Application + serve.handle.RayServeHandle + serve.handle.RayServeSyncHandle +``` + +#### Advanced APIs + +```{eval-rst} +.. autosummary:: + :nosignatures: + :toctree: doc/ + + serve.get_replica_context +``` + +### Running Applications + +```{eval-rst} +.. autosummary:: + :nosignatures: + :toctree: doc/ + + serve.run + serve.delete + serve.start + serve.shutdown +``` + +(serve-cli)= + +## Command Line Interface (CLI) + +```{eval-rst} +.. click:: ray.serve.scripts:cli + :prog: serve + :nested: full +``` + +(serve-rest-api)= + +## Serve REST API + +### V1 REST API (Single-application) + +#### `PUT "/api/serve/deployments/"` + +Declaratively deploys the Serve application. Starts Serve on the Ray cluster if it's not already running. See [single-app config schema](serve-rest-api-config-schema) for the request's JSON schema. + +**Example Request**: + +```http +PUT /api/serve/deployments/ HTTP/1.1 +Host: http://localhost:52365/ +Accept: application/json +Content-Type: application/json + +{ + "import_path": "fruit.deployment_graph", + "runtime_env": { + "working_dir": "https://github.com/ray-project/serve_config_examples/archive/HEAD.zip" + }, + "deployments": [ + {"name": "MangoStand", "user_config": {"price": 1}}, + {"name": "OrangeStand", "user_config": {"price": 2}}, + {"name": "PearStand", "user_config": {"price": 3}} + ] +} +``` + +**Example Response** + + +```http +HTTP/1.1 200 OK +Content-Type: application/json +``` + +#### `GET "/api/serve/deployments/"` + +Gets the config for the application currently deployed on the Ray cluster. This config represents the current goal state for the Serve application. See [single-app config schema](serve-rest-api-config-schema) for the response's JSON schema. + +**Example Request**: +```http +GET /api/serve/deployments/ HTTP/1.1 +Host: http://localhost:52365/ +Accept: application/json +``` + +**Example Response**: + +```http +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "import_path": "fruit.deployment_graph", + "runtime_env": { + "working_dir": "https://github.com/ray-project/serve_config_examples/archive/HEAD.zip" + }, + "deployments": [ + {"name": "MangoStand", "user_config": {"price": 1}}, + {"name": "OrangeStand", "user_config": {"price": 2}}, + {"name": "PearStand", "user_config": {"price": 3}} + ] +} +``` + + +#### `GET "/api/serve/deployments/status"` + +Gets the Serve application's current status, including all the deployment statuses. See [status schema](serve-rest-api-response-schema) for the response's JSON schema. + +**Example Request**: + +```http +GET /api/serve/deployments/ HTTP/1.1 +Host: http://localhost:52365/ +Accept: application/json +``` + +**Example Response** + +```http +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "app_status": { + "status": "RUNNING", + "message": "", + "deployment_timestamp": 1855994527.146304 + }, + "deployment_statuses": [ + { + "name": "MangoStand", + "status": "HEALTHY", + "message": "" + }, + { + "name": "OrangeStand", + "status": "HEALTHY", + "message": "" + }, + { + "name": "PearStand", + "status": "HEALTHY", + "message": "" + }, + { + "name": "FruitMarket", + "status": "HEALTHY", + "message": "" + }, + { + "name": "DAGDriver", + "status": "HEALTHY", + "message": "" + } + ] +} +``` + +#### `DELETE "/api/serve/deployments/"` + +Shuts down Serve and the Serve application running on the Ray cluster. Has no effect if Serve is not running on the Ray cluster. + +**Example Request**: + +```http +DELETE /api/serve/deployments/ HTTP/1.1 +Host: http://localhost:52365/ +Accept: application/json +``` + +**Example Response** + +```http +HTTP/1.1 200 OK +Content-Type: application/json +``` + +### V2 REST API (Multi-application) + +#### `PUT "/api/serve/applications/"` + +Declaratively deploys a list of Serve applications. If Serve is already running on the Ray cluster, removes all applications not listed in the new config. If Serve is not running on the Ray cluster, starts Serve. See [multi-app config schema](serve-rest-api-config-schema) for the request's JSON schema. + +**Example Request**: + +```http +PUT /api/serve/applications/ HTTP/1.1 +Host: http://localhost:52365/ +Accept: application/json +Content-Type: application/json + +{ + "applications": [ + { + "name": "fruit_stand", + "route_prefix": "/fruit", + "import_path": "fruit.deployment_graph", + "runtime_env": { + "working_dir": "https://github.com/ray-project/serve_config_examples/archive/HEAD.zip" + }, + "deployments": [ + {"name": "MangoStand", "user_config": {"price": 1}}, + {"name": "OrangeStand", "user_config": {"price": 2}}, + {"name": "PearStand", "user_config": {"price": 3}} + ] + }, + { + "name": "calculator", + "route_prefix": "/math", + "import_path": "conditional_dag.serve_dag", + "runtime_env": { + "working_dir": "https://github.com/ray-project/test_dag/archive/HEAD.zip" + }, + "deployments": [ + {"name": "Multiplier", "ray_actor_options": {"num_cpus": 0.5}}, + { + "name": "Adder", + "ray_actor_options": {"env_vars": {"override_increment": "5"}} + }, + ] + } + ] +} ``` + +**Example Response** + + +```http +HTTP/1.1 200 OK +Content-Type: application/json +``` + +#### `GET "/api/serve/applications/"` + +Gets cluster-level info and comprehensive details on all Serve applications deployed on the Ray cluster. See [metadata schema](serve-rest-api-response-schema) for the response's JSON schema. + +```http +GET /api/serve/applications/ HTTP/1.1 +Host: http://localhost:52365/ +Accept: application/json +``` + +**Example Response (abridged JSON)**: + +```http +HTTP/1.1 200 OK +Content-Type: application/json + +{ + "proxy_location": "HeadOnly", + "http_options": { + "host": "127.0.0.1", + "port": 8000 + }, + "deploy_mode": "MULTI_APP", + "applications": { + "fruit_stand": { + "name": "fruit_stand", + "route_prefix": "/fruit", + "docs_path": null, + "status": "RUNNING", + "message": "", + "last_deployed_time_s": 1679952253.748111, + "deployed_app_config": "...", + "deployments": { + "fruit_app_MangoStand": { + "name": "fruit_app_MangoStand", + "status": "HEALTHY", + "message": "", + "deployment_config": "...", + "replicas": [ + { + "replica_id": "fruit_app_MangoStand#bSkrHK", + "state": "RUNNING", + "pid": 59350, + "actor_name": "...", + "actor_id": "...", + "node_id": "...", + "node_ip": "...", + "start_time_s": 1679952254.3458009 + } + ] + }, + } + }, + } +} +``` + +#### `DELETE "/api/serve/applications/"` + +Shuts down Serve and all applications running on the Ray cluster. Has no effect if Serve is not running on the Ray cluster. + +**Example Request**: + +```http +DELETE /api/serve/applications/ HTTP/1.1 +Host: http://localhost:52365/ +Accept: application/json +``` + +**Example Response** + +```http +HTTP/1.1 200 OK +Content-Type: application/json +``` + +(serve-rest-api-config-schema)= +## Config Schemas + +```{eval-rst} +.. currentmodule:: ray.serve +``` + + +```{eval-rst} +.. autosummary:: + :toctree: doc/ + + schema.ServeDeploySchema + schema.HTTPOptionsSchema + schema.ServeApplicationSchema + schema.DeploymentSchema + schema.RayActorOptionsSchema +``` + +(serve-rest-api-response-schema)= +## Response Schemas + +#### V1 REST API +```{eval-rst} +.. autosummary:: + :toctree: doc/ + + schema.ServeStatusSchema +``` + +#### V2 REST API +```{eval-rst} +.. autosummary:: + :toctree: doc/ + + schema.ServeInstanceDetails + schema.ApplicationDetails + schema.DeploymentDetails + schema.ReplicaDetails +``` diff --git a/doc/source/serve/api/python_api.md b/doc/source/serve/api/python_api.md deleted file mode 100644 index b3ee7719a7d3..000000000000 --- a/doc/source/serve/api/python_api.md +++ /dev/null @@ -1,53 +0,0 @@ -# Ray Serve Python API - -(core-apis)= - -```{eval-rst} -.. currentmodule:: ray -``` - -## Core APIs - -```{eval-rst} -.. autosummary:: - :toctree: doc/ - - serve.run - serve.start - serve.shutdown - serve.delete -``` - -(servehandle-api)= -## ServeHandle API - -```{eval-rst} -.. autosummary:: - :toctree: doc/ - - serve.handle.RayServeHandle - -.. autosummary:: - :toctree: doc/ - - serve.handle.RayServeHandle.remote - serve.handle.RayServeHandle.options -``` - -## Batching Requests - -```{eval-rst} -.. autosummary:: - :toctree: doc/ - - serve.batch -``` - -## Deployment Graph APIs - -```{eval-rst} -.. autosummary:: - :toctree: doc/ - - serve.api.build -``` \ No newline at end of file diff --git a/doc/source/serve/api/rest_api.md b/doc/source/serve/api/rest_api.md deleted file mode 100644 index d0de6fa069c9..000000000000 --- a/doc/source/serve/api/rest_api.md +++ /dev/null @@ -1,317 +0,0 @@ -(serve-rest-api)= - -# Serve REST API - -## V1 REST API (Single-application) - -### `PUT "/api/serve/deployments/"` - -Declaratively deploys the Serve application. Starts Serve on the Ray cluster if it's not already running. See [single-app config schema](serve-rest-api-config-schema) for the request's JSON schema. - -**Example Request**: - -```http -PUT /api/serve/deployments/ HTTP/1.1 -Host: http://localhost:52365/ -Accept: application/json -Content-Type: application/json - -{ - "import_path": "fruit.deployment_graph", - "runtime_env": { - "working_dir": "https://github.com/ray-project/serve_config_examples/archive/HEAD.zip" - }, - "deployments": [ - {"name": "MangoStand", "user_config": {"price": 1}}, - {"name": "OrangeStand", "user_config": {"price": 2}}, - {"name": "PearStand", "user_config": {"price": 3}} - ] -} -``` - -**Example Response** - - -```http -HTTP/1.1 200 OK -Content-Type: application/json -``` - -### `GET "/api/serve/deployments/"` - -Gets the config for the application currently deployed on the Ray cluster. This config represents the current goal state for the Serve application. See [single-app config schema](serve-rest-api-config-schema) for the response's JSON schema. - -**Example Request**: -```http -GET /api/serve/deployments/ HTTP/1.1 -Host: http://localhost:52365/ -Accept: application/json -``` - -**Example Response**: - -```http -HTTP/1.1 200 OK -Content-Type: application/json - -{ - "import_path": "fruit.deployment_graph", - "runtime_env": { - "working_dir": "https://github.com/ray-project/serve_config_examples/archive/HEAD.zip" - }, - "deployments": [ - {"name": "MangoStand", "user_config": {"price": 1}}, - {"name": "OrangeStand", "user_config": {"price": 2}}, - {"name": "PearStand", "user_config": {"price": 3}} - ] -} -``` - - -### `GET "/api/serve/deployments/status"` - -Gets the Serve application's current status, including all the deployment statuses. See [status schema](serve-rest-api-response-schema) for the response's JSON schema. - -**Example Request**: - -```http -GET /api/serve/deployments/ HTTP/1.1 -Host: http://localhost:52365/ -Accept: application/json -``` - -**Example Response** - -```http -HTTP/1.1 200 OK -Content-Type: application/json - -{ - "app_status": { - "status": "RUNNING", - "message": "", - "deployment_timestamp": 1855994527.146304 - }, - "deployment_statuses": [ - { - "name": "MangoStand", - "status": "HEALTHY", - "message": "" - }, - { - "name": "OrangeStand", - "status": "HEALTHY", - "message": "" - }, - { - "name": "PearStand", - "status": "HEALTHY", - "message": "" - }, - { - "name": "FruitMarket", - "status": "HEALTHY", - "message": "" - }, - { - "name": "DAGDriver", - "status": "HEALTHY", - "message": "" - } - ] -} -``` - -### `DELETE "/api/serve/deployments/"` - -Shuts down Serve and the Serve application running on the Ray cluster. Has no effect if Serve is not running on the Ray cluster. - -**Example Request**: - -```http -DELETE /api/serve/deployments/ HTTP/1.1 -Host: http://localhost:52365/ -Accept: application/json -``` - -**Example Response** - -```http -HTTP/1.1 200 OK -Content-Type: application/json -``` - -## V2 REST API (Multi-application) - -### `PUT "/api/serve/applications/"` - -Declaratively deploys a list of Serve applications. If Serve is already running on the Ray cluster, removes all applications not listed in the new config. If Serve is not running on the Ray cluster, starts Serve. See [multi-app config schema](serve-rest-api-config-schema) for the request's JSON schema. - -**Example Request**: - -```http -PUT /api/serve/applications/ HTTP/1.1 -Host: http://localhost:52365/ -Accept: application/json -Content-Type: application/json - -{ - "applications": [ - { - "name": "fruit_stand", - "route_prefix": "/fruit", - "import_path": "fruit.deployment_graph", - "runtime_env": { - "working_dir": "https://github.com/ray-project/serve_config_examples/archive/HEAD.zip" - }, - "deployments": [ - {"name": "MangoStand", "user_config": {"price": 1}}, - {"name": "OrangeStand", "user_config": {"price": 2}}, - {"name": "PearStand", "user_config": {"price": 3}} - ] - }, - { - "name": "calculator", - "route_prefix": "/math", - "import_path": "conditional_dag.serve_dag", - "runtime_env": { - "working_dir": "https://github.com/ray-project/test_dag/archive/HEAD.zip" - }, - "deployments": [ - {"name": "Multiplier", "ray_actor_options": {"num_cpus": 0.5}}, - { - "name": "Adder", - "ray_actor_options": {"env_vars": {"override_increment": "5"}} - }, - ] - } - ] -} -``` - - - -**Example Response** - - -```http -HTTP/1.1 200 OK -Content-Type: application/json -``` - -### `GET "/api/serve/applications/"` - -Gets cluster-level info and comprehensive details on all Serve applications deployed on the Ray cluster. See [metadata schema](serve-rest-api-response-schema) for the response's JSON schema. - -```http -GET /api/serve/applications/ HTTP/1.1 -Host: http://localhost:52365/ -Accept: application/json -``` - -**Example Response (abridged JSON)**: - -```http -HTTP/1.1 200 OK -Content-Type: application/json - -{ - "proxy_location": "HeadOnly", - "http_options": { - "host": "127.0.0.1", - "port": 8000 - }, - "deploy_mode": "MULTI_APP", - "applications": { - "fruit_stand": { - "name": "fruit_stand", - "route_prefix": "/fruit", - "docs_path": null, - "status": "RUNNING", - "message": "", - "last_deployed_time_s": 1679952253.748111, - "deployed_app_config": "...", - "deployments": { - "fruit_app_MangoStand": { - "name": "fruit_app_MangoStand", - "status": "HEALTHY", - "message": "", - "deployment_config": "...", - "replicas": [ - { - "replica_id": "fruit_app_MangoStand#bSkrHK", - "state": "RUNNING", - "pid": 59350, - "actor_name": "...", - "actor_id": "...", - "node_id": "...", - "node_ip": "...", - "start_time_s": 1679952254.3458009 - } - ] - }, - } - }, - } -} -``` - -### `DELETE "/api/serve/applications/"` - -Shuts down Serve and all applications running on the Ray cluster. Has no effect if Serve is not running on the Ray cluster. - -**Example Request**: - -```http -DELETE /api/serve/applications/ HTTP/1.1 -Host: http://localhost:52365/ -Accept: application/json -``` - -**Example Response** - -```http -HTTP/1.1 200 OK -Content-Type: application/json -``` - -(serve-rest-api-config-schema)= -## Serve Config Schema - -```{eval-rst} -.. currentmodule:: ray.serve -``` - - -```{eval-rst} -.. autosummary:: - :toctree: doc/ - - schema.ServeDeploySchema - schema.HTTPOptionsSchema - schema.ServeApplicationSchema - schema.DeploymentSchema - schema.RayActorOptionsSchema -``` - -(serve-rest-api-response-schema)= -## Serve Response Schemas - -### V1 REST API -```{eval-rst} -.. autosummary:: - :toctree: doc/ - - schema.ServeStatusSchema -``` - -### V2 REST API -```{eval-rst} -.. autosummary:: - :toctree: doc/ - - schema.ServeInstanceDetails - schema.ApplicationDetails - schema.DeploymentDetails - schema.ReplicaDetails -``` \ No newline at end of file diff --git a/doc/source/serve/api/serve_cli.md b/doc/source/serve/api/serve_cli.md deleted file mode 100644 index 7f3f2705c7c7..000000000000 --- a/doc/source/serve/api/serve_cli.md +++ /dev/null @@ -1,9 +0,0 @@ -(serve-cli)= - -# Serve CLI - -```{eval-rst} -.. click:: ray.serve.scripts:cli - :prog: serve - :show-nested: -``` \ No newline at end of file diff --git a/python/ray/serve/__init__.py b/python/ray/serve/__init__.py index 12d3c360092d..0d188e673355 100644 --- a/python/ray/serve/__init__.py +++ b/python/ray/serve/__init__.py @@ -2,6 +2,7 @@ try: from ray.serve.api import ( + build, deployment, get_deployment, get_replica_context, @@ -32,6 +33,7 @@ __all__ = [ "batch", + "build", "start", "HTTPOptions", "get_replica_context", diff --git a/python/ray/serve/api.py b/python/ray/serve/api.py index 99f4bb9cd81d..1785dda9ed2d 100644 --- a/python/ray/serve/api.py +++ b/python/ray/serve/api.py @@ -1,7 +1,7 @@ import collections import inspect import logging -from typing import Any, Callable, Dict, Optional, Tuple, Union, overload +from typing import Any, Callable, Dict, Optional, Tuple, Union from fastapi import APIRouter, FastAPI from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag @@ -60,7 +60,7 @@ def start( dedicated_cpu: bool = False, **kwargs, ) -> ServeControllerClient: - """Initialize a serve instance. + """Start Serve on the cluster. By default, the instance will be scoped to the lifetime of the returned Client object (or when the script exits). If detached is set to True, the @@ -72,7 +72,7 @@ def start( detached: Whether not the instance should be detached from this script. If set, the instance will live on the Ray cluster until it is explicitly stopped with serve.shutdown(). - http_options (Optional[Dict, serve.HTTPOptions]): Configuration options + http_options: Configuration options for HTTP proxy. You can pass in a dictionary or HTTPOptions object with fields: @@ -108,10 +108,9 @@ def start( @PublicAPI(stability="stable") def shutdown() -> None: - """Completely shut down the connected Serve instance. + """Completely shut down Serve on the cluster. - Shuts down all processes and deletes all state associated with the - instance. + Deletes all applications and shuts down Serve system actors. """ try: @@ -129,21 +128,28 @@ def shutdown() -> None: @PublicAPI(stability="beta") def get_replica_context() -> ReplicaContext: - """If called from a deployment, returns the deployment and replica tag. + """Returns the deployment and replica tag from within a replica at runtime. A replica tag uniquely identifies a single replica for a Ray Serve - deployment at runtime. Replica tags are of the form - `#`. + deployment. Raises: RayServeException: if not called from within a Ray Serve deployment. Example: - >>> from ray import serve - >>> # deployment_name - >>> serve.get_replica_context().deployment # doctest: +SKIP - >>> # deployment_name#krcwoa - >>> serve.get_replica_context().replica_tag # doctest: +SKIP + + .. code-block:: python + + from ray import serve + @serve.deployment + class MyDeployment: + def __init__(self): + # Prints "MyDeployment" + print(serve.get_replica_context().deployment) + + # Prints "MyDeployment#" + print(serve.get_replica_context().replica_tag) + """ internal_replica_context = get_internal_replica_context() if internal_replica_context is None: @@ -156,24 +162,30 @@ def get_replica_context() -> ReplicaContext: @PublicAPI(stability="beta") -def ingress(app: Union["FastAPI", "APIRouter", Callable]): - """Mark an ASGI application ingress for Serve. - - Args: - app (FastAPI,APIRouter,Starlette,etc): the app or router object serve - as ingress for this deployment. It can be any ASGI compatible - object. +def ingress(app: Union["FastAPI", "APIRouter", Callable]) -> Callable: + """Wrap a deployment class with a FastAPI application for HTTP request parsing. Example: - >>> from fastapi import FastAPI - >>> from ray import serve - >>> app = FastAPI() # doctest: +SKIP - >>> app = FastAPI() # doctest: +SKIP - >>> @serve.deployment # doctest: +SKIP - ... @serve.ingress(app) # doctest: +SKIP - ... class App: # doctest: +SKIP - ... pass # doctest: +SKIP - >>> App.deploy() # doctest: +SKIP + + .. code-block:: python + + from ray import serve + from fastapi import FastAPI + + app = FastAPI() + + @serve.deployment + @serve.ingress(app) + class MyFastAPIDeployment: + @app.get("/hi") + def say_hi(self) -> str: + return "Hello world!" + + app = MyFastAPIDeployment.bind() + + Args: + app: the FastAPI app or router object to wrap this class with. + Can be any ASGI-compatible callable. """ def decorator(cls): @@ -251,32 +263,6 @@ async def __del__(self): return decorator -@overload -def deployment(func_or_class: Callable) -> Deployment: - pass - - -@overload -def deployment( - name: Default[str] = DEFAULT.VALUE, - version: Default[str] = DEFAULT.VALUE, - num_replicas: Default[int] = DEFAULT.VALUE, - init_args: Default[Tuple[Any]] = DEFAULT.VALUE, - init_kwargs: Default[Dict[Any, Any]] = DEFAULT.VALUE, - route_prefix: Default[Union[str, None]] = DEFAULT.VALUE, - ray_actor_options: Default[Dict] = DEFAULT.VALUE, - user_config: Default[Any] = DEFAULT.VALUE, - max_concurrent_queries: Default[int] = DEFAULT.VALUE, - autoscaling_config: Default[Union[Dict, AutoscalingConfig]] = DEFAULT.VALUE, - graceful_shutdown_wait_loop_s: Default[float] = DEFAULT.VALUE, - graceful_shutdown_timeout_s: Default[float] = DEFAULT.VALUE, - health_check_period_s: Default[float] = DEFAULT.VALUE, - health_check_timeout_s: Default[float] = DEFAULT.VALUE, - is_driver_deployment: Optional[bool] = DEFAULT.VALUE, -) -> Callable[[Callable], Deployment]: - pass - - @PublicAPI(stability="beta") def deployment( _func_or_class: Optional[Callable] = None, @@ -296,62 +282,56 @@ def deployment( health_check_timeout_s: Default[float] = DEFAULT.VALUE, is_driver_deployment: Optional[bool] = DEFAULT.VALUE, ) -> Callable[[Callable], Deployment]: - """Define a Serve deployment. - - Args: - name (Default[str]): Globally-unique name identifying this - deployment. If not provided, the name of the class or function will - be used. - version [DEPRECATED] (Default[str]): Version of the deployment. - This is used to indicate a code change for the deployment; when it - is re-deployed with a version change, a rolling update of the - replicas will be performed. If not provided, every deployment will - be treated as a new version. - num_replicas (Default[Optional[int]]): The number of processes to start up that - will handle requests to this deployment. Defaults to 1. - init_args (Default[Tuple[Any]]): Positional args to be passed to the - class constructor when starting up deployment replicas. These can - also be passed when you call `.deploy()` on the returned Deployment. - init_kwargs (Default[Dict[Any, Any]]): Keyword args to be passed to the - class constructor when starting up deployment replicas. These can - also be passed when you call `.deploy()` on the returned Deployment. - route_prefix (Default[Union[str, None]]): Requests to paths under this - HTTP path prefix will be routed to this deployment. Defaults to - '/{name}'. When set to 'None', no HTTP endpoint will be created. - Routing is done based on longest-prefix match, so if you have - deployment A with a prefix of '/a' and deployment B with a prefix - of '/a/b', requests to '/a', '/a/', and '/a/c' go to A and requests - to '/a/b', '/a/b/', and '/a/b/c' go to B. Routes must not end with - a '/' unless they're the root (just '/'), which acts as a - catch-all. - ray_actor_options (Default[Dict]): Options to be passed to the Ray - actor constructor such as resource requirements. Valid options are - `accelerator_type`, `memory`, `num_cpus`, `num_gpus`, - `object_store_memory`, `resources`, and `runtime_env`. - user_config (Default[Optional[Any]]): Config to pass to the - reconfigure method of the deployment. This can be updated - dynamically without changing the version of the deployment and - restarting its replicas. The user_config must be json-serializable - to keep track of updates, so it must only contain json-serializable - types, or json-serializable types nested in lists and dictionaries. - max_concurrent_queries (Default[int]): The maximum number of queries - that will be sent to a replica of this deployment without receiving - a response. Defaults to 100. - is_driver_deployment (Optional[bool]): [Experiment] when set it as True, serve - will deploy exact one deployment to every node. + """Decorator that converts a Python class to a `Deployment`. Example: - >>> from ray import serve - >>> @serve.deployment(name="deployment1") # doctest: +SKIP - ... class MyDeployment: # doctest: +SKIP - ... pass # doctest: +SKIP - >>> MyDeployment.bind(*init_args) # doctest: +SKIP - >>> MyDeployment.options( # doctest: +SKIP - ... num_replicas=2, init_args=init_args).bind() + .. code-block:: python + + from ray import serve + + @serve.deployment(num_replicas=2) + class MyDeployment: + pass + + app = MyDeployment.bind() + + Args: + name: Name uniquely identifying this deployment within the application. + If not provided, the name of the class or function is used. + num_replicas: The number of replicas to run that handle requests to + this deployment. Defaults to 1. + autoscaling_config: Parameters to configure autoscaling behavior. If this + is set, `num_replicas` cannot be set. + init_args: [DEPRECATED] These should be passed to `.bind()` instead. + init_kwargs: [DEPRECATED] These should be passed to `.bind()` instead. + route_prefix: Requests to paths under this HTTP path prefix are routed + to this deployment. Defaults to '/{name}'. This can only be set for the + ingress (top-level) deployment of an application. + ray_actor_options: Options to be passed to the Ray actor decorator, such as + resource requirements. Valid options are `accelerator_type`, `memory`, + `num_cpus`, `num_gpus`, `object_store_memory`, `resources`, + and `runtime_env`. + user_config: Config to pass to the reconfigure method of the deployment. This + can be updated dynamically without restarting the replicas of the + deployment. The user_config must be fully JSON-serializable. + max_concurrent_queries: The maximum number of queries that are sent to a + replica of this deployment without receiving a response. Defaults to 100. + health_check_period_s: How often the health check is called on the replica. + Defaults to 10s. The health check is by default a no-op actor call to the + replica, but you can define your own as a "check_health" method that raises + an exception when unhealthy. + health_check_timeout_s: How long to wait for a health check method to return + before considering it failed. Defaults to 30s. + graceful_shutdown_wait_loop_s: Duration that replicas wait until there is + no more work to be done before shutting down. + graceful_shutdown_timeout_s: Duration that a replica can be gracefully shutting + down before being forcefully killed. + is_driver_deployment: [EXPERIMENTAL] when set, exactly one replica of this + deployment runs on every node (like a daemon set). Returns: - Deployment + `Deployment` """ # NOTE: The user_configured_option_names should be the first thing that's @@ -457,7 +437,7 @@ def list_deployments() -> Dict[str, Deployment]: @PublicAPI(stability="beta") def run( - target: Union[Application, BuiltApplication], + target: Application, _blocking: bool = True, host: str = DEFAULT_HTTP_HOST, port: int = DEFAULT_HTTP_PORT, @@ -466,12 +446,16 @@ def run( ) -> Optional[RayServeSyncHandle]: """Run an application and return a handle to its ingress deployment. - The application is returned by `Deployment.bind()` or `serve.build`. + The application is returned by `Deployment.bind()`. Example: + + .. code-block:: python + + handle = serve.run(MyDeployment.bind()) + ray.get(handle.remote()) Args: - target (Union[Application, BuiltApplication]): - A Serve application returned from `Deployment.bind()` or a built application - returned from `serve.build()`. + target: + A Serve application returned by `Deployment.bind()`. host: Host for HTTP servers to listen on. Defaults to "127.0.0.1". To expose Serve publicly, you probably want to set this to "0.0.0.0". @@ -481,6 +465,9 @@ def run( route_prefix: Route prefix for HTTP requests. If not provided, it will use route_prefix of the ingress deployment. If specified neither as an argument nor in the ingress deployment, the route prefix will default to '/'. + + Returns: + RayServeSyncHandle: A handle that can be used to call the application. """ client = _private_api.serve_start( detached=True, @@ -578,12 +565,9 @@ def build(target: Application, name: str = None) -> BuiltApplication: @PublicAPI(stability="alpha") def delete(name: str, _blocking: bool = True): - """Delete an app by its name + """Delete an application by its name. Deletes the app with all corresponding deployments. - - Args: - name: the name of app to delete. """ client = get_global_client() client.delete_apps([name], blocking=_blocking) diff --git a/python/ray/serve/batching.py b/python/ray/serve/batching.py index 43a354b56769..a44f918bc079 100644 --- a/python/ray/serve/batching.py +++ b/python/ray/serve/batching.py @@ -210,13 +210,17 @@ def batch(func: F) -> G: # "Decorator factory" use case (called with arguments). @overload def batch( - max_batch_size: Optional[int] = 10, batch_wait_timeout_s: Optional[float] = 0.0 + max_batch_size: int = 10, batch_wait_timeout_s: float = 0.0 ) -> Callable[[F], G]: pass @PublicAPI(stability="beta") -def batch(_func=None, max_batch_size=10, batch_wait_timeout_s=0.0): +def batch( + _func: Optional[Callable] = None, + max_batch_size: int = 10, + batch_wait_timeout_s: float = 0.0, +): """Converts a function to asynchronously handle batches. The function can be a standalone function or a class method. In both @@ -228,19 +232,33 @@ def batch(_func=None, max_batch_size=10, batch_wait_timeout_s=0.0): or `batch_wait_timeout_s` has elapsed, whichever occurs first. Example: - >>> from ray import serve - >>> @serve.batch(max_batch_size=50, batch_wait_timeout_s=0.5) # doctest: +SKIP - ... async def handle_batch(batch: List[str]): # doctest: +SKIP - ... return [s.lower() for s in batch] # doctest: +SKIP - >>> async def handle_single(s: str): # doctest: +SKIP - ... # Returns s.lower(). - ... return await handle_batch(s) # doctest: +SKIP + + .. code-block:: python + + from ray import serve + from starlette.requests import Request + + @serve.deployment + class BatchedDeployment: + @serve.batch(max_batch_size=10, batch_wait_timeout_s=0.1) + async def batch_handler(self, requests: List[Request]) -> List[str]: + response_batch = [] + for r in requests: + name = (await requests.json())["name"] + response_batch.append(f"Hello {name}!") + + return response_batch + + async def __call__(self, request: Request): + return await self.batch_handler(request) + + app = BatchedDeployment.bind() Arguments: max_batch_size: the maximum batch size that will be executed in one call to the underlying function. batch_wait_timeout_s: the maximum duration to wait for - `max_batch_size` elements before running the underlying function. + `max_batch_size` elements before running the current batch. """ # `_func` will be None in the case when the decorator is parametrized. # See the comment at the end of this function for a detailed explanation. diff --git a/python/ray/serve/built_application.py b/python/ray/serve/built_application.py index 195ad35019b2..0f450d8094ec 100644 --- a/python/ray/serve/built_application.py +++ b/python/ray/serve/built_application.py @@ -5,10 +5,10 @@ ) from ray.serve.deployment import Deployment -from ray.util.annotations import DeveloperAPI +from ray.util.annotations import PublicAPI -@DeveloperAPI +@PublicAPI(stability="alpha") class ImmutableDeploymentDict(dict): def __init__(self, deployments: Dict[str, Deployment]): super().__init__() @@ -22,7 +22,7 @@ def __setitem__(self, *args): ) -@DeveloperAPI +@PublicAPI(stability="alpha") class BuiltApplication: """A static, pre-built Serve application. diff --git a/python/ray/serve/deployment.py b/python/ray/serve/deployment.py index 460066982844..bd776e28afd3 100644 --- a/python/ray/serve/deployment.py +++ b/python/ray/serve/deployment.py @@ -32,18 +32,46 @@ logger = logging.getLogger(SERVE_LOGGER_NAME) -@PublicAPI +@PublicAPI(stability="beta") class Application(DAGNodeBase): - """Returned from `Deployment.bind()`. + """One or more deployments bound with arguments that can be deployed together. Can be passed into another `Deployment.bind()` to compose multiple deployments in a single application, passed to `serve.run`, or deployed via a Serve config file. + + For example, to define an Application and run it in Python: + + .. code-block:: python + + from ray import serve + from ray.serve import Application + + @serve.deployment + class MyDeployment: + pass + + app: Application = MyDeployment.bind(OtherDeployment.bind()) + serve.run(app) + + To run the same app using the command line interface (CLI): + + .. code-block:: bash + + serve run python_file:app + + To deploy the same app via a config file: + + .. code-block:: yaml + + applications: + my_app: + import_path: python_file:app + """ def __init__( self, *, _internal_dag_node: Optional[Union[ClassNode, FunctionNode]] = None ): - """This class should not be constructed directly.""" if _internal_dag_node is None: raise RuntimeError("This class should not be constructed directly.") @@ -67,6 +95,32 @@ def __getattr__(self, name: str) -> Any: @PublicAPI class Deployment: + """Class (or function) decorated with the `@serve.deployment` decorator. + + This is run on a number of replica actors. Requests to those replicas call + this class. + + One or more deployments can be composed together into an `Application` which is + then run via `serve.run` or a config file. + + Example: + + .. code-block:: python + + @serve.deployment + class MyDeployment: + def __init__(self, name: str): + self._name = name + + def __call__(self, request): + return "Hello world!" + + app = MyDeployment.bind() + # Run via `serve.run` or the `serve run` CLI command. + serve.run(app) + + """ + def __init__( self, func_or_class: Union[Callable, str], @@ -80,13 +134,6 @@ def __init__( is_driver_deployment: Optional[bool] = False, _internal=False, ) -> None: - """Construct a Deployment. CONSTRUCTOR SHOULDN'T BE USED DIRECTLY. - - Deployments should be created, retrieved, and updated using - `@serve.deployment`, `serve.get_deployment`, and `Deployment.options`, - respectively. - """ - if not _internal: raise RuntimeError( "The Deployment constructor should not be called " @@ -154,10 +201,6 @@ def name(self) -> str: @property def version(self) -> Optional[str]: - """Version of this deployment. - - If None, will be redeployed every time `.deploy()` is called. - """ return self._version @property @@ -194,17 +237,14 @@ def ray_actor_options(self) -> Optional[Dict]: @property def init_args(self) -> Tuple[Any]: - """Positional args passed to the underlying class's constructor.""" return self._init_args @property def init_kwargs(self) -> Tuple[Any]: - """Keyword args passed to the underlying class's constructor.""" return self._init_kwargs @property def url(self) -> Optional[str]: - """Full HTTP url for this deployment.""" if self._route_prefix is None or self._is_driver_deployment: # this deployment is not exposed over HTTP return None @@ -378,14 +418,7 @@ def options( Only those options passed in will be updated, all others will remain unchanged from the existing deployment. - Args: - Refer to @serve.deployment decorator docstring for all non-private - arguments. - - _internal: If True, this function won't log deprecation warnings - and won't update this deployment's config's - user_configured_option_names. It should only be True when used - internally by Serve. It should be False when called by users. + Refer to the `@serve.deployment` decorator docs for available arguments. """ # NOTE: The user_configured_option_names should be the first thing that's @@ -505,10 +538,13 @@ def set_options( is_driver_deployment: bool = DEFAULT.VALUE, _internal: bool = False, ) -> None: - """Overwrite this deployment's options. Mutates the deployment. + """Overwrite this deployment's options in-place. Only those options passed in will be updated, all others will remain unchanged. + + Refer to the @serve.deployment decorator docstring for all non-private + arguments. """ validated = self.options( diff --git a/python/ray/serve/handle.py b/python/ray/serve/handle.py index 953413a92ea3..e5187a9d069e 100644 --- a/python/ray/serve/handle.py +++ b/python/ray/serve/handle.py @@ -78,8 +78,11 @@ class HandleOptions: class RayServeHandle: """A handle used to make requests from one deployment to another. - This is used to compose multiple deployments in a single application by binding - them together when building the application. For example: + This is used to compose multiple deployments into a single application. After + building the application, this handle is substituted at runtime for deployments + passed as arguments via `.bind()`. + + Example: .. code-block:: python @@ -254,7 +257,9 @@ async def remote(self, *args, **kwargs) -> asyncio.Task: Returns an `asyncio.Task` whose underlying result is a Ray ObjectRef that points to the final result of the request. - The final result can be retrieved by `await`ing the ObjectRef. Example: + The final result can be retrieved by awaiting the ObjectRef. + + Example: .. code-block:: python diff --git a/python/ray/serve/scripts.py b/python/ray/serve/scripts.py index dbb8e8b7aa1d..87d2f207705a 100644 --- a/python/ray/serve/scripts.py +++ b/python/ray/serve/scripts.py @@ -122,12 +122,12 @@ def convert_args_to_dict(args: Tuple[str]) -> Dict[str, str]: return args_dict -@click.group(help="CLI for managing Serve instances on a Ray cluster.") +@click.group(help="CLI for managing Serve applications on a Ray cluster.") def cli(): pass -@cli.command(help="Start a detached Serve instance on the Ray cluster.") +@cli.command(help="Start Serve on the Ray cluster.") @click.option( "--address", "-a", @@ -141,21 +141,21 @@ def cli(): default=DEFAULT_HTTP_HOST, required=False, type=str, - help="Host for HTTP servers to listen on. " f"Defaults to {DEFAULT_HTTP_HOST}.", + help="Host for HTTP proxies to listen on. " f"Defaults to {DEFAULT_HTTP_HOST}.", ) @click.option( "--http-port", default=DEFAULT_HTTP_PORT, required=False, type=int, - help="Port for HTTP servers to listen on. " f"Defaults to {DEFAULT_HTTP_PORT}.", + help="Port for HTTP proxies to listen on. " f"Defaults to {DEFAULT_HTTP_PORT}.", ) @click.option( "--http-location", default=DeploymentMode.HeadOnly, required=False, type=click.Choice(list(DeploymentMode)), - help="Location of the HTTP servers. Defaults to HeadOnly.", + help="Location of the HTTP proxies. Defaults to HeadOnly.", ) def start(address, http_host, http_port, http_location): ray.init( @@ -300,7 +300,7 @@ def deploy(config_file_name: str, address: str): "-p", required=False, type=int, - help=f"Port for HTTP servers to listen on. Defaults to {DEFAULT_HTTP_PORT}.", + help=f"Port for HTTP proxies to listen on. Defaults to {DEFAULT_HTTP_PORT}.", ) @click.option( "--blocking/--non-blocking", @@ -505,7 +505,7 @@ def config(address: str, name: Optional[str]): @cli.command( - short_help="Get the current status of all live Serve applications and deployments.", + short_help="Get the current status of all Serve applications on the cluster.", help=( "Prints status information about all applications on the cluster.\n\n" "An application may be:\n\n" @@ -584,7 +584,7 @@ def status(address: str, name: Optional[str]): @cli.command( - help="Deletes the Serve app.", + help="Shuts down Serve on the cluster, deleting all applications.", ) @click.option( "--address", @@ -612,7 +612,7 @@ def shutdown(address: str, yes: bool): @cli.command( - short_help="Writes a Serve Deployment Graph's config file.", + short_help="Generate a config file for the specified application(s).", help=( "Imports the Application at IMPORT_PATH(S) and generates a " "structured config for it. If the flag --multi-app is set, accepts multiple " From c2260bafbee2f043f435332f4b54eed1fa2d4788 Mon Sep 17 00:00:00 2001 From: Scott Lee Date: Fri, 5 May 2023 14:02:12 -0700 Subject: [PATCH 264/424] [Data] Allow fusing MapOperator -> AllToAllOperator (#34847) Currently, we only support operator fusion for MapOperator. We should also support fusing MapOperator with AllToAllOperator; this PR allows for one direction, MapOperator -> AllToAllOperator. In a future PR, we can work on the other direction, AllToAllOperator -> MapOperator. Signed-off-by: Scott Lee --- .../data/_internal/execution/interfaces.py | 4 + .../logical/operators/all_to_all_operator.py | 3 +- .../logical/rules/operator_fusion.py | 180 ++++++++++++++---- .../planner/exchange/shuffle_task_spec.py | 12 +- .../data/_internal/planner/random_shuffle.py | 16 +- .../data/tests/test_execution_optimizer.py | 141 ++++++++++++++ 6 files changed, 316 insertions(+), 40 deletions(-) diff --git a/python/ray/data/_internal/execution/interfaces.py b/python/ray/data/_internal/execution/interfaces.py index 21b9ecb7e5eb..734384232c45 100644 --- a/python/ray/data/_internal/execution/interfaces.py +++ b/python/ray/data/_internal/execution/interfaces.py @@ -233,6 +233,10 @@ class TaskContext: # TODO(chengsu): clean it up from TaskContext with new optimizer framework. sub_progress_bar_dict: Optional[Dict[str, ProgressBar]] = None + # The underlying function called in a MapOperator; this is used when fusing + # an AllToAllOperator with an upstream MapOperator. + upstream_map_transform_fn: Optional["MapTransformFn"] = None + # Block transform function applied by task and actor pools in MapOperator. MapTransformFn = Callable[[Iterable[Block], TaskContext], Iterable[Block]] diff --git a/python/ray/data/_internal/logical/operators/all_to_all_operator.py b/python/ray/data/_internal/logical/operators/all_to_all_operator.py index 95cd231065e2..d39a42e6e561 100644 --- a/python/ray/data/_internal/logical/operators/all_to_all_operator.py +++ b/python/ray/data/_internal/logical/operators/all_to_all_operator.py @@ -52,12 +52,13 @@ class RandomShuffle(AbstractAllToAll): def __init__( self, input_op: LogicalOperator, + name: str = "RandomShuffle", seed: Optional[int] = None, num_outputs: Optional[int] = None, ray_remote_args: Optional[Dict[str, Any]] = None, ): super().__init__( - "RandomShuffle", + name, input_op, num_outputs=num_outputs, ray_remote_args=ray_remote_args, diff --git a/python/ray/data/_internal/logical/rules/operator_fusion.py b/python/ray/data/_internal/logical/rules/operator_fusion.py index ea8e91dc6b59..eb5d0b2e0820 100644 --- a/python/ray/data/_internal/logical/rules/operator_fusion.py +++ b/python/ray/data/_internal/logical/rules/operator_fusion.py @@ -1,11 +1,23 @@ -from typing import Iterator +from typing import Iterator, List, Tuple +from ray.data._internal.execution.operators.map_operator import MapOperator +from ray.data._internal.logical.operators.all_to_all_operator import ( + AbstractAllToAll, + RandomShuffle, +) +from ray.data._internal.stats import StatsDict from ray.data.block import Block # TODO(Clark): Remove compute dependency once we delete the legacy compute. from ray.data._internal.compute import is_task_compute, CallableClass, get_compute -from ray.data._internal.execution.interfaces import PhysicalOperator, TaskContext +from ray.data._internal.execution.interfaces import ( + PhysicalOperator, + RefBundle, + TaskContext, +) from ray.data._internal.logical.interfaces import Rule, PhysicalPlan +from ray.data._internal.execution.operators.all_to_all_operator import AllToAllOperator +from ray.data._internal.logical.operators.map_operator import AbstractUDFMap # Scheduling strategy can be inherited from upstream operator if not specified. @@ -17,35 +29,72 @@ class OperatorFusionRule(Rule): def apply(self, plan: PhysicalPlan) -> PhysicalPlan: self._op_map = plan.op_map.copy() - # Do DFS fusion. - root = self._apply(plan.dag) - return PhysicalPlan(root, self._op_map) + # Do DFS fusion on compatible pairwise operators in two passes. + # In the first pass, only fuse back-to-back map operators together. + fused_dag = self._fuse_map_operators_in_dag(plan.dag) - def _apply(self, op: PhysicalOperator) -> PhysicalOperator: - """Performs DFS fusion of linear chains of physical map operators, provided that - they are pairwise-compatible. + # Now that we have fused together all back-to-back map operators, + # we fuse together MapOperator -> AllToAllOperator pairs. + fused_dag = self._fuse_all_to_all_operators_in_dag(fused_dag) - Args: - op: The op that we're trying to fuse with its input. + return PhysicalPlan(fused_dag, self._op_map) + + def _fuse_map_operators_in_dag(self, dag: PhysicalOperator) -> MapOperator: + """Starting at the given operator, traverses up the DAG of operators + and recursively fuses compatible MapOperator -> MapOperator pairs. + Returns the current (root) operator after completing upstream operator fusions. """ - upstream_ops = op.input_dependencies - # Fuse with upstream ops while possible. - while len(upstream_ops) == 1 and self._can_fuse(op, upstream_ops[0]): + upstream_ops = dag.input_dependencies + while ( + len(upstream_ops) == 1 + and isinstance(dag, MapOperator) + and isinstance(upstream_ops[0], MapOperator) + and self._can_fuse(dag, upstream_ops[0]) + ): # Fuse operator with its upstream op. - op = self._fuse(op, upstream_ops[0]) - upstream_ops = op.input_dependencies - # Can no longer fuse with upstream ops, proceed up the DAG. - op._input_dependencies = [ - self._apply(upstream_op) for upstream_op in upstream_ops + dag = self._get_fused_map_operator(dag, upstream_ops[0]) + upstream_ops = dag.input_dependencies + + # Done fusing back-to-back map operators together here, + # move up the DAG to find the next map operators to fuse. + dag._input_dependencies = [ + self._fuse_map_operators_in_dag(upstream_op) for upstream_op in upstream_ops ] - return op + return dag + + def _fuse_all_to_all_operators_in_dag( + self, dag: AllToAllOperator + ) -> AllToAllOperator: + """Starting at the given operator, traverses up the DAG of operators + and recursively fuses compatible MapOperator -> AllToAllOperator pairs. + Returns the current (root) operator after completing upstream operator fusions. + """ + upstream_ops = dag.input_dependencies + while ( + len(upstream_ops) == 1 + and isinstance(dag, AllToAllOperator) + and isinstance(upstream_ops[0], MapOperator) + and self._can_fuse(dag, upstream_ops[0]) + ): + # Fuse operator with its upstream op. + dag = self._get_fused_all_to_all_operator(dag, upstream_ops[0]) + upstream_ops = dag.input_dependencies + + # Done fusing MapOperator -> AllToAllOperator together here, + # move up the DAG to find the next pair of operators to fuse. + dag._input_dependencies = [ + self._fuse_all_to_all_operators_in_dag(upstream_op) + for upstream_op in upstream_ops + ] + return dag def _can_fuse(self, down_op: PhysicalOperator, up_op: PhysicalOperator) -> bool: """Returns whether the provided downstream operator can be fused with the given upstream operator. We currently support fusing two operators if the following are all true: - * They are both MapOperators. + * We are fusing either MapOperator -> MapOperator or + MapOperator -> AllToAllOperator. * They either use the same compute configuration, or the upstream operator uses a task pool while the downstream operator uses an actor pool. * If both operators involve callable classes, the callable classes are @@ -56,8 +105,13 @@ def _can_fuse(self, down_op: PhysicalOperator, up_op: PhysicalOperator) -> bool: from ray.data._internal.logical.operators.map_operator import AbstractMap from ray.data._internal.logical.operators.map_operator import AbstractUDFMap - # We only support fusing MapOperators. - if not isinstance(down_op, MapOperator) or not isinstance(up_op, MapOperator): + # We currently only support fusing for the following cases: + # - MapOperator -> MapOperator + # - MapOperator -> AllToAllOperator (only RandomShuffle + # LogicalOperator is currently supported) + if not isinstance(down_op, (MapOperator, AllToAllOperator)) or not isinstance( + up_op, MapOperator + ): return False down_logical_op = self._op_map[down_op] @@ -68,17 +122,20 @@ def _can_fuse(self, down_op: PhysicalOperator, up_op: PhysicalOperator) -> bool: if not down_logical_op._input_dependencies: return False - # We only support fusing AbstractMap -> AbstractMap operators. - if not isinstance(down_logical_op, AbstractMap) or not isinstance( - up_logical_op, AbstractMap - ): + # We currently only support fusing for the following cases: + # - AbstractMap -> AbstractMap + # - AbstractMap -> RandomShuffle + if not isinstance( + down_logical_op, (AbstractMap, RandomShuffle) + ) or not isinstance(up_logical_op, AbstractMap): return False # Allow fusing tasks->actors if the resources are compatible (read->map), but # not the other way around. The latter (downstream op) will be used as the # compute if fused. if ( - is_task_compute(down_logical_op._compute) + isinstance(down_logical_op, AbstractUDFMap) + and is_task_compute(down_logical_op._compute) and isinstance(up_logical_op, AbstractUDFMap) and get_compute(up_logical_op._compute) != get_compute(down_logical_op._compute) @@ -116,12 +173,13 @@ def _can_fuse(self, down_op: PhysicalOperator, up_op: PhysicalOperator) -> bool: # Otherwise, ops are compatible for fusion. return True - def _fuse(self, down_op: PhysicalOperator, up_op: PhysicalOperator): - """Fuse the downstream operator with its upstream operator.""" - from ray.data._internal.execution.operators.map_operator import MapOperator - from ray.data._internal.logical.operators.map_operator import AbstractUDFMap - - assert self._can_fuse(down_op, up_op) + def _get_fused_map_operator( + self, down_op: MapOperator, up_op: MapOperator + ) -> MapOperator: + assert self._can_fuse(down_op, up_op), ( + "Current rule supports fusing MapOperator->MapOperator, but received: " + f"{type(up_op).__name__} -> {type(down_op).__name__}" + ) # Fuse operator names. name = up_op.name + "->" + down_op.name @@ -147,9 +205,11 @@ def _fuse(self, down_op: PhysicalOperator, up_op: PhysicalOperator): down_transform_fn = down_op.get_transformation_fn() up_transform_fn = up_op.get_transformation_fn() - def transform_fn(blocks: Iterator[Block], ctx: TaskContext) -> Iterator[Block]: + def fused_map_transform_fn( + blocks: Iterator[Block], ctx: TaskContext + ) -> Iterator[Block]: blocks = up_transform_fn(blocks, ctx) - # TODO(Clark): Add zero-copy batching between transform functions. + # TODO(Scott): Add zero-copy batching between transform functions. return down_transform_fn(blocks, ctx) # We take the downstream op's compute in case we're fusing upstream tasks with a @@ -163,7 +223,7 @@ def transform_fn(blocks: Iterator[Block], ctx: TaskContext) -> Iterator[Block]: # Fused physical map operator. op = MapOperator.create( - transform_fn, + fused_map_transform_fn, input_op, name=name, compute_strategy=compute, @@ -172,7 +232,7 @@ def transform_fn(blocks: Iterator[Block], ctx: TaskContext) -> Iterator[Block]: ) # Build a map logical operator to be used as a reference for further fusion. - # TODO(Clark): This is hacky, remove this once we push fusion to be purely based + # TODO(Scott): This is hacky, remove this once we push fusion to be purely based # on a lower-level operator spec. if isinstance(up_logical_op, AbstractUDFMap): input_op = up_logical_op.input_dependencies[0] @@ -205,6 +265,52 @@ def transform_fn(blocks: Iterator[Block], ctx: TaskContext) -> Iterator[Block]: # Return the fused physical operator. return op + def _get_fused_all_to_all_operator( + self, down_op: AllToAllOperator, up_op: MapOperator + ) -> AllToAllOperator: + assert self._can_fuse(down_op, up_op), ( + "Current rule supports fusing MapOperator -> AllToAllOperator" + f", but received: {type(up_op).__name__} -> {type(down_op).__name__}" + ) + + # Fuse operator names. + name = up_op.name + "->" + down_op.name + + down_logical_op: AbstractAllToAll = self._op_map.pop(down_op) + up_logical_op: AbstractUDFMap = self._op_map.pop(up_op) + + # Fuse transformation functions. + down_transform_fn = down_op.get_transformation_fn() + up_transform_fn = up_op.get_transformation_fn() + + def fused_all_to_all_transform_fn( + blocks: List[RefBundle], ctx: TaskContext + ) -> Tuple[List[RefBundle], StatsDict]: + """To fuse MapOperator->AllToAllOperator, we store the map function + in the TaskContext so that it may be used by the downstream + AllToAllOperator's transform function.""" + ctx.upstream_map_transform_fn = up_transform_fn + return down_transform_fn(blocks, ctx) + + ray_remote_args = down_logical_op._ray_remote_args + # Make the upstream operator's inputs the new, fused operator's inputs. + input_deps = up_op.input_dependencies + assert len(input_deps) == 1 + input_op = input_deps[0] + + op = AllToAllOperator( + fused_all_to_all_transform_fn, + input_op, + name=name, + ) + # Bottom out at the source logical op (e.g. Read()). + input_op = up_logical_op + + logical_op = RandomShuffle(input_op, name=name, ray_remote_args=ray_remote_args) + self._op_map[op] = logical_op + # Return the fused physical operator. + return op + def _are_remote_args_compatible(up_args, down_args): """Check if Ray remote arguments are compatible for merging.""" diff --git a/python/ray/data/_internal/planner/exchange/shuffle_task_spec.py b/python/ray/data/_internal/planner/exchange/shuffle_task_spec.py index 474d69b03279..9611041a299e 100644 --- a/python/ray/data/_internal/planner/exchange/shuffle_task_spec.py +++ b/python/ray/data/_internal/planner/exchange/shuffle_task_spec.py @@ -4,6 +4,7 @@ import numpy as np from ray.data._internal.delegating_block_builder import DelegatingBlockBuilder +from ray.data._internal.execution.interfaces import MapTransformFn from ray.data._internal.planner.exchange.interfaces import ExchangeTaskSpec from ray.data.block import Block, BlockAccessor, BlockExecStats, BlockMetadata @@ -19,9 +20,10 @@ def __init__( self, random_shuffle: bool = False, random_seed: Optional[int] = None, + upstream_map_fn: Optional[MapTransformFn] = None, ): super().__init__( - map_args=[random_shuffle, random_seed], + map_args=[upstream_map_fn, random_shuffle, random_seed], reduce_args=[random_shuffle, random_seed], ) @@ -30,11 +32,19 @@ def map( idx: int, block: Block, output_num_blocks: int, + upstream_map_fn: Optional[MapTransformFn], random_shuffle: bool, random_seed: Optional[int], ) -> List[Union[BlockMetadata, Block]]: # TODO: Support fusion with other upstream operators. stats = BlockExecStats.builder() + if upstream_map_fn: + mapped_blocks = list(upstream_map_fn([block])) + assert len(mapped_blocks) == 1, ( + "Expected upstream_map_fn to return one block, but instead" + f" returned {len(mapped_blocks)} blocks" + ) + block = mapped_blocks[0] block = BlockAccessor.for_block(block) # Randomize the distribution of records to blocks. diff --git a/python/ray/data/_internal/planner/random_shuffle.py b/python/ray/data/_internal/planner/random_shuffle.py index 8f22741aa93c..5827c34802d4 100644 --- a/python/ray/data/_internal/planner/random_shuffle.py +++ b/python/ray/data/_internal/planner/random_shuffle.py @@ -2,6 +2,7 @@ from ray.data._internal.execution.interfaces import ( AllToAllTransformFn, + MapTransformFn, RefBundle, TaskContext, ) @@ -28,7 +29,20 @@ def fn( ctx: TaskContext, ) -> Tuple[List[RefBundle], StatsDict]: num_input_blocks = sum(len(r.blocks) for r in refs) - shuffle_spec = ShuffleTaskSpec(random_shuffle=True, random_seed=seed) + + # If map_transform_fn is specified (e.g. from fusing + # MapOperator->AllToAllOperator), we pass a map function which + # is applied to each block before shuffling. + map_transform_fn: Optional[MapTransformFn] = ctx.upstream_map_transform_fn + upstream_map_fn = None + if map_transform_fn: + upstream_map_fn = lambda block: map_transform_fn(block, ctx) # noqa: E731 + + shuffle_spec = ShuffleTaskSpec( + random_shuffle=True, + random_seed=seed, + upstream_map_fn=upstream_map_fn, + ) if DataContext.get_current().use_push_based_shuffle: if num_outputs is not None: diff --git a/python/ray/data/tests/test_execution_optimizer.py b/python/ray/data/tests/test_execution_optimizer.py index 6e2b18423a09..b2b4450d70a5 100644 --- a/python/ray/data/tests/test_execution_optimizer.py +++ b/python/ray/data/tests/test_execution_optimizer.py @@ -607,6 +607,147 @@ def __call__(self, x): assert isinstance(physical_op.input_dependencies[0], InputDataBuffer) +def test_read_map_batches_operator_fusion_with_randomize_blocks_operator( + ray_start_regular_shared, enable_optimizer +): + # Note: We currently do not fuse MapBatches->RandomizeBlocks. + # This test is to ensure that we don't accidentally fuse them. + # There is also an additional optimization rule, under ReorderRandomizeBlocksRule, + # which collapses RandomizeBlocks operators, so we should not be fusing them + # to begin with. + def fn(batch): + return {"id": [x + 1 for x in batch["id"]]} + + n = 10 + ds = ray.data.range(n) + ds = ds.randomize_block_order() + ds = ds.map_batches(fn, batch_size=None) + assert set(extract_values("id", ds.take_all())) == set(range(1, n + 1)) + assert "RandomizeBlocks" not in ds.stats() + assert "DoRead->MapBatches->RandomizeBlocks" not in ds.stats() + assert "DoRead->MapBatches" in ds.stats() + _check_usage_record(["ReadRange", "MapBatches", "RandomizeBlocks"]) + + +def test_read_map_batches_operator_fusion_with_random_shuffle_operator( + ray_start_regular_shared, enable_optimizer +): + # Note: we currently only support fusing MapOperator->AllToAllOperator. + def fn(batch): + return {"id": [x + 1 for x in batch["id"]]} + + n = 10 + ds = ray.data.range(n) + ds = ds.map_batches(fn, batch_size=None) + ds = ds.random_shuffle() + assert set(extract_values("id", ds.take_all())) == set(range(1, n + 1)) + assert "DoRead->MapBatches->RandomShuffle" in ds.stats() + _check_usage_record(["ReadRange", "MapBatches", "RandomShuffle"]) + + ds = ray.data.range(n) + ds = ds.random_shuffle() + ds = ds.map_batches(fn, batch_size=None) + assert set(extract_values("id", ds.take_all())) == set(range(1, n + 1)) + # TODO(Scott): Update below assertion after supporting fusion in + # the other direction (AllToAllOperator->MapOperator) + assert "DoRead->RandomShuffle->MapBatches" not in ds.stats() + assert all(op in ds.stats() for op in ("DoRead", "RandomShuffle", "MapBatches")) + _check_usage_record(["ReadRange", "RandomShuffle", "MapBatches"]) + + # Test fusing multiple `map_batches` with multiple `random_shuffle` operations. + ds = ray.data.range(n) + for _ in range(5): + ds = ds.map_batches(fn, batch_size=None) + ds = ds.random_shuffle() + assert set(extract_values("id", ds.take_all())) == set(range(5, n + 5)) + assert f"DoRead->{'MapBatches->' * 5}RandomShuffle" in ds.stats() + + # For interweaved map_batches and random_shuffle operations, we expect to fuse the + # two pairs of MapBatches->RandomShuffle, but not the resulting + # RandomShuffle operators. + ds = ray.data.range(n) + ds = ds.map_batches(fn, batch_size=None) + ds = ds.random_shuffle() + ds = ds.map_batches(fn, batch_size=None) + ds = ds.random_shuffle() + assert set(extract_values("id", ds.take_all())) == set(range(2, n + 2)) + assert "Stage 1 DoRead->MapBatches->RandomShuffle" in ds.stats() + assert "Stage 2 MapBatches->RandomShuffle" + _check_usage_record(["ReadRange", "RandomShuffle", "MapBatches"]) + + +def test_read_map_batches_operator_fusion_with_repartition_operator( + ray_start_regular_shared, enable_optimizer +): + # Note: We currently do not fuse MapBatches->Repartition. + # This test is to ensure that we don't accidentally fuse them, until + # we implement it later. + def fn(batch): + return {"id": [x + 1 for x in batch["id"]]} + + n = 10 + ds = ray.data.range(n) + ds = ds.map_batches(fn, batch_size=None) + ds = ds.repartition(2) + assert set(extract_values("id", ds.take_all())) == set(range(1, n + 1)) + # TODO(Scott): update the below assertions after we support fusion. + assert "DoRead->MapBatches->Repartition" not in ds.stats() + assert "DoRead->MapBatches" in ds.stats() + assert "Repartition" in ds.stats() + _check_usage_record(["ReadRange", "MapBatches", "Repartition"]) + + +def test_read_map_batches_operator_fusion_with_sort_operator( + ray_start_regular_shared, enable_optimizer +): + # Note: We currently do not fuse MapBatches->Sort. + # This test is to ensure that we don't accidentally fuse them, until + # we implement it later. + def fn(batch): + return {"id": [x + 1 for x in batch["id"]]} + + n = 10 + ds = ray.data.range(n) + ds = ds.map_batches(fn, batch_size=None) + ds = ds.sort("id") + assert extract_values("id", ds.take_all()) == list(range(1, n + 1)) + # TODO(Scott): update the below assertions after we support fusion. + assert "DoRead->MapBatches->Sort" not in ds.stats() + assert "DoRead->MapBatches" in ds.stats() + assert "Sort" in ds.stats() + _check_usage_record(["ReadRange", "MapBatches", "Sort"]) + + +def test_read_map_batches_operator_fusion_with_aggregate_operator( + ray_start_regular_shared, enable_optimizer +): + from ray.data.aggregate import AggregateFn + + # Note: We currently do not fuse MapBatches->Aggregate. + # This test is to ensure that we don't accidentally fuse them, until + # we implement it later. + def fn(batch): + return {"id": [x % 2 for x in batch["id"]]} + + n = 100 + grouped_ds = ray.data.range(n).map_batches(fn, batch_size=None).groupby("id") + agg_ds = grouped_ds.aggregate( + AggregateFn( + init=lambda k: [0, 0], + accumulate_row=lambda a, r: [a[0] + r["id"], a[1] + 1], + merge=lambda a1, a2: [a1[0] + a2[0], a1[1] + a2[1]], + finalize=lambda a: a[0] / a[1], + name="foo", + ), + ) + agg_ds.take_all() == [{"id": 0, "foo": 0.0}, {"id": 1, "foo": 1.0}] + # TODO(Scott): update the below assertions after we support fusion. + assert "DoRead->MapBatches->Aggregate" not in agg_ds.stats() + assert "DoRead->MapBatches" in agg_ds.stats() + assert "Aggregate" in agg_ds.stats() + _check_usage_record(["ReadRange", "MapBatches", "Aggregate"]) + + def test_read_map_chain_operator_fusion_e2e(ray_start_regular_shared, enable_optimizer): ds = ray.data.range(10, parallelism=2) ds = ds.filter(lambda x: x["id"] % 2 == 0) From bb5d4e6ae545c475a30e47f93b9f75e5e2f86639 Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Fri, 5 May 2023 14:03:24 -0700 Subject: [PATCH 265/424] [Data] Fix `map_batches_benchmark_single_node` (#35072) map_batches no longer supports "tasks" and "actors" as arguments to compute. This PR fixes the map_batches benchmark accordingly. Signed-off-by: Balaji Veeramani --- release/nightly_tests/dataset/map_batches_benchmark.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/release/nightly_tests/dataset/map_batches_benchmark.py b/release/nightly_tests/dataset/map_batches_benchmark.py index a106e7b8686c..971810b4a09c 100644 --- a/release/nightly_tests/dataset/map_batches_benchmark.py +++ b/release/nightly_tests/dataset/map_batches_benchmark.py @@ -130,8 +130,13 @@ def run_map_batches_benchmark(benchmark: Benchmark): ).materialize() for batch_format in batch_formats: - for compute in ["tasks", "actors"]: - test_name = f"map-batches-{batch_format}-{compute}-multi-files" + for compute in [None, ActorPoolStrategy(min_size=1, max_size=float("inf"))]: + if compute is None: + compute_strategy = "tasks" + else: + compute_strategy = "actors" + test_name = f"map-batches-{batch_format}-{compute_strategy}-multi-files" + benchmark.run( test_name, map_batches, From 01d9a7913cfe905933b724d03e0cd9aee709a2e0 Mon Sep 17 00:00:00 2001 From: Yunxuan Xiao Date: Fri, 5 May 2023 15:46:55 -0700 Subject: [PATCH 266/424] [Train] LightningTrainer enable checkpoint full dict with FSDP strategy (#34967) Signed-off-by: woshiyyya --- .../ray/train/lightning/_lightning_utils.py | 45 ++++++++++++++++--- .../train/tests/test_lightning_checkpoint.py | 44 +++++++++++++++++- 2 files changed, 81 insertions(+), 8 deletions(-) diff --git a/python/ray/train/lightning/_lightning_utils.py b/python/ray/train/lightning/_lightning_utils.py index 4d2c90987b3d..1495fdcf21dc 100644 --- a/python/ray/train/lightning/_lightning_utils.py +++ b/python/ray/train/lightning/_lightning_utils.py @@ -1,26 +1,38 @@ +import ray +from ray.air import session +from ray.air.constants import MODEL_KEY +from ray.data.datastream import DataIterator +from ray.train.lightning.lightning_checkpoint import LightningCheckpoint + import logging import shutil import torch import tempfile from packaging.version import Version from typing import Any, Dict, Optional +from torch.utils.data import IterableDataset, DataLoader import pytorch_lightning as pl from pytorch_lightning.callbacks import ModelCheckpoint from pytorch_lightning.plugins.environments import LightningEnvironment from pytorch_lightning.strategies import DDPStrategy -if Version(pl.__version__) >= Version("2.0.0"): +_LIGHTNING_GREATER_EQUAL_2_0 = Version(pl.__version__) >= Version("2.0.0") +_TORCH_GREATER_EQUAL_1_12 = Version(torch.__version__) >= Version("1.12.0") +_TORCH_FSDP_AVAILABLE = _TORCH_GREATER_EQUAL_1_12 and torch.distributed.is_available() + +if _LIGHTNING_GREATER_EQUAL_2_0: from pytorch_lightning.strategies import FSDPStrategy else: from pytorch_lightning.strategies import DDPFullyShardedStrategy as FSDPStrategy -import ray -from ray.air import session -from ray.air.constants import MODEL_KEY -from ray.train.lightning.lightning_checkpoint import LightningCheckpoint -from torch.utils.data import IterableDataset, DataLoader -from ray.data.datastream import DataIterator +if _TORCH_FSDP_AVAILABLE: + from torch.distributed.fsdp import ( + FullStateDictConfig, + FullyShardedDataParallel, + StateDictType, + ) + logger = logging.getLogger(__name__) @@ -65,6 +77,25 @@ def distributed_sampler_kwargs(self) -> Dict[str, Any]: rank=self.global_rank, ) + def lightning_module_state_dict(self) -> Dict[str, Any]: + """Gathers the full state dict to rank 0 on CPU.""" + assert self.model is not None, "Failed to get the state dict for a None model!" + + if _LIGHTNING_GREATER_EQUAL_2_0 and _TORCH_FSDP_AVAILABLE: + with FullyShardedDataParallel.state_dict_type( + module=self.model, + state_dict_type=StateDictType.FULL_STATE_DICT, + state_dict_config=FullStateDictConfig( + offload_to_cpu=True, rank0_only=True + ), + ): + state_dict = self.model.state_dict() + prefix_len = len("_forward_module.") + return {k[prefix_len:]: v for k, v in state_dict.items()} + else: + # Otherwise Lightning uses Fairscale FSDP, no need to unshard by ourself. + return super().lightning_module_state_dict() + class RayEnvironment(LightningEnvironment): """Setup Lightning DDP training environment for Ray cluster.""" diff --git a/python/ray/train/tests/test_lightning_checkpoint.py b/python/ray/train/tests/test_lightning_checkpoint.py index 64bcd40b32be..5109fb0a051b 100644 --- a/python/ray/train/tests/test_lightning_checkpoint.py +++ b/python/ray/train/tests/test_lightning_checkpoint.py @@ -4,9 +4,15 @@ import torch.nn as nn import tempfile -from ray.train.lightning import LightningCheckpoint +import ray from ray.air.constants import MODEL_KEY from torch.utils.data import DataLoader +from ray.train.tests.lightning_test_utils import LinearModule, DummyDataModule +from ray.train.lightning import ( + LightningCheckpoint, + LightningConfigBuilder, + LightningTrainer, +) class Net(pl.LightningModule): @@ -100,6 +106,42 @@ def test_from_directory(): assert torch.equal(output, checkpoint_output) +def test_fsdp_checkpoint(): + num_epochs = 1 + batch_size = 8 + input_dim = 32 + output_dim = 4 + dataset_size = 256 + + datamodule = DummyDataModule(batch_size, dataset_size) + + config_builder = ( + LightningConfigBuilder() + .module( + LinearModule, input_dim=input_dim, output_dim=output_dim, strategy="fsdp" + ) + .trainer(max_epochs=num_epochs, accelerator="gpu") + .strategy("fsdp") + .checkpointing(save_last=True) + .fit_params(datamodule=datamodule) + ) + + scaling_config = ray.air.ScalingConfig(num_workers=2, use_gpu=True) + + trainer = LightningTrainer( + lightning_config=config_builder.build(), scaling_config=scaling_config + ) + + results = trainer.fit() + + with results.checkpoint.as_directory() as checkpoint_dir: + checkpoint = torch.load(f"{checkpoint_dir}/{MODEL_KEY}") + model = LinearModule(input_dim=input_dim, output_dim=output_dim) + + for key in model.state_dict().keys(): + assert key in checkpoint["state_dict"] + + if __name__ == "__main__": import sys From deb787468b998a59e96ad380cc13b5139a36ac0d Mon Sep 17 00:00:00 2001 From: Yi Cheng <74173148+iycheng@users.noreply.github.com> Date: Fri, 5 May 2023 21:38:25 -0700 Subject: [PATCH 267/424] Revert "[Core] Port GcsPublisher to Cython (#34393)" (#35106) This reverts commit fb29282a9e40e9a1fd53cec8a03783a6fdb64d31. --- .buildkite/pipeline.build.yml | 6 -- dashboard/agent.py | 6 +- dashboard/dashboard.py | 3 +- python/ray/_private/gcs_pubsub.py | 45 ++++++++++ python/ray/_private/log_monitor.py | 8 +- python/ray/_private/utils.py | 29 ++++++- python/ray/_private/worker.py | 3 +- python/ray/_raylet.pyx | 63 -------------- python/ray/autoscaler/_private/monitor.py | 3 +- python/ray/includes/common.pxd | 38 --------- python/ray/includes/common.pxi | 1 - python/ray/tests/test_failure.py | 3 +- python/ray/tests/test_gcs_fault_tolerance.py | 14 ++-- python/ray/tests/test_gcs_pubsub.py | 26 +++--- src/ray/gcs/gcs_client/gcs_client.cc | 5 +- src/ray/gcs/pubsub/gcs_pub_sub.cc | 87 -------------------- src/ray/gcs/pubsub/gcs_pub_sub.h | 37 --------- 17 files changed, 109 insertions(+), 268 deletions(-) diff --git a/.buildkite/pipeline.build.yml b/.buildkite/pipeline.build.yml index 8bdd31723559..81debe8a17bf 100644 --- a/.buildkite/pipeline.build.yml +++ b/.buildkite/pipeline.build.yml @@ -368,9 +368,6 @@ - DL=1 ./ci/env/install-dependencies.sh - bash ./ci/ci.sh prepare_docker - ./ci/env/env_info.sh - # This is needed or else the Ray Client tests run into a gRPC forking problem - # similar to https://github.com/grpc/grpc/issues/31885 - - pip install pip install grpcio==1.50.0 - bazel test --config=ci $(./ci/run/bazel_export_options) --test_tag_filters=client_tests,small_size_python_tests -- python/ray/tests/... @@ -421,9 +418,6 @@ - cleanup() { if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then ./ci/build/upload_build_info.sh; fi }; trap cleanup EXIT - DL=1 ./ci/env/install-dependencies.sh - ./ci/env/env_info.sh - # This is needed or else the Ray Client tests run into a gRPC forking problem - # similar to https://github.com/grpc/grpc/issues/31885 - - pip install pip install grpcio==1.50.0 - bazel test --config=ci $(./scripts/bazel_export_options) --test_tag_filters=client_tests,small_size_python_tests --test_env=TEST_EXTERNAL_REDIS=1 diff --git a/dashboard/agent.py b/dashboard/agent.py index df57590ff0b6..345099ff7c25 100644 --- a/dashboard/agent.py +++ b/dashboard/agent.py @@ -15,7 +15,7 @@ import ray.dashboard.consts as dashboard_consts import ray.dashboard.utils as dashboard_utils from ray.dashboard.consts import _PARENT_DEATH_THREASHOLD -from ray._private.gcs_pubsub import GcsAioPublisher +from ray._private.gcs_pubsub import GcsAioPublisher, GcsPublisher from ray._raylet import GcsClient from ray._private.gcs_utils import GcsAioClient from ray._private.ray_logging import setup_component_logger @@ -263,9 +263,7 @@ async def _check_parent(): ray._private.utils.publish_error_to_driver( ray_constants.RAYLET_DIED_ERROR, msg, - gcs_publisher=ray._raylet.GcsPublisher( - address=self.gcs_address - ), + gcs_publisher=GcsPublisher(address=self.gcs_address), ) else: logger.info(msg) diff --git a/dashboard/dashboard.py b/dashboard/dashboard.py index 273fbc4c904d..4732e96d23ee 100644 --- a/dashboard/dashboard.py +++ b/dashboard/dashboard.py @@ -13,6 +13,7 @@ import ray.dashboard.consts as dashboard_consts import ray.dashboard.head as dashboard_head import ray.dashboard.utils as dashboard_utils +from ray._private.gcs_pubsub import GcsPublisher from ray._private.ray_logging import setup_component_logger from typing import Optional, Set @@ -260,7 +261,7 @@ def sigterm_handler(): raise e # Something went wrong, so push an error to all drivers. - gcs_publisher = ray._raylet.GcsPublisher(address=args.gcs_address) + gcs_publisher = GcsPublisher(address=args.gcs_address) ray._private.utils.publish_error_to_driver( ray_constants.DASHBOARD_DIED_ERROR, message, diff --git a/python/ray/_private/gcs_pubsub.py b/python/ray/_private/gcs_pubsub.py index 2168b9dfed9d..c1d39e728b15 100644 --- a/python/ray/_private/gcs_pubsub.py +++ b/python/ray/_private/gcs_pubsub.py @@ -4,8 +4,10 @@ import random import threading from typing import Optional, Tuple, List +import time import grpc +from grpc._channel import _InactiveRpcError from ray._private.utils import get_or_create_event_loop try: @@ -158,6 +160,49 @@ def _pop_actors(queue, batch_size=100): return msgs +class GcsPublisher(_PublisherBase): + """Publisher to GCS.""" + + def __init__(self, address: str): + channel = gcs_utils.create_gcs_channel(address) + self._stub = gcs_service_pb2_grpc.InternalPubSubGcsServiceStub(channel) + + def publish_error( + self, key_id: bytes, error_info: ErrorTableData, num_retries=None + ) -> None: + """Publishes error info to GCS.""" + msg = pubsub_pb2.PubMessage( + channel_type=pubsub_pb2.RAY_ERROR_INFO_CHANNEL, + key_id=key_id, + error_info_message=error_info, + ) + req = gcs_service_pb2.GcsPublishRequest(pub_messages=[msg]) + self._gcs_publish(req, num_retries, timeout=1) + + def publish_logs(self, log_batch: dict) -> None: + """Publishes logs to GCS.""" + req = self._create_log_request(log_batch) + self._gcs_publish(req) + + def publish_function_key(self, key: bytes) -> None: + """Publishes function key to GCS.""" + req = self._create_function_key_request(key) + self._gcs_publish(req) + + def _gcs_publish(self, req, num_retries=None, timeout=None) -> None: + count = num_retries or MAX_GCS_PUBLISH_RETRIES + while count > 0: + try: + self._stub.GcsPublish(req, timeout=timeout) + return + except _InactiveRpcError: + pass + count -= 1 + if count > 0: + time.sleep(1) + raise TimeoutError(f"Failed to publish after retries: {req}") + + class _SyncSubscriber(_SubscriberBase): def __init__( self, diff --git a/python/ray/_private/log_monitor.py b/python/ray/_private/log_monitor.py index 444ac5b34bec..7f06343625ae 100644 --- a/python/ray/_private/log_monitor.py +++ b/python/ray/_private/log_monitor.py @@ -11,9 +11,11 @@ import traceback from typing import Callable, List, Set +import ray._private.gcs_pubsub as gcs_pubsub import ray._private.ray_constants as ray_constants import ray._private.services as services import ray._private.utils +from ray._private.gcs_pubsub import GcsPublisher from ray._private.ray_logging import setup_component_logger # Logger for this module. It should be configured at the entry point @@ -133,7 +135,7 @@ class LogMonitor: def __init__( self, logs_dir, - gcs_publisher, + gcs_publisher: gcs_pubsub.GcsPublisher, is_proc_alive_fn: Callable[[int], bool], max_files_open: int = ray_constants.LOG_MONITOR_MAX_OPEN_FILES, ): @@ -523,14 +525,14 @@ def is_proc_alive(pid): ) log_monitor = LogMonitor( - args.logs_dir, ray._raylet.GcsPublisher(address=args.gcs_address), is_proc_alive + args.logs_dir, gcs_pubsub.GcsPublisher(address=args.gcs_address), is_proc_alive ) try: log_monitor.run() except Exception as e: # Something went wrong, so push an error to all drivers. - gcs_publisher = ray._raylet.GcsPublisher(address=args.gcs_address) + gcs_publisher = GcsPublisher(address=args.gcs_address) traceback_str = ray._private.utils.format_error_message(traceback.format_exc()) message = ( f"The log monitor on node {platform.node()} " diff --git a/python/ray/_private/utils.py b/python/ray/_private/utils.py index 8d1793114ac9..6174890cd8ea 100644 --- a/python/ray/_private/utils.py +++ b/python/ray/_private/utils.py @@ -44,6 +44,7 @@ import ray import ray._private.ray_constants as ray_constants from ray._private.tls_utils import load_certs_from_env +from ray.core.generated.gcs_pb2 import ErrorTableData from ray.core.generated.runtime_env_common_pb2 import ( RuntimeEnvInfo as ProtoRuntimeEnvInfo, ) @@ -181,6 +182,27 @@ def push_error_to_driver( worker.core_worker.push_error(job_id, error_type, message, time.time()) +def construct_error_message(job_id, error_type, message, timestamp): + """Construct an ErrorTableData object. + + Args: + job_id: The ID of the job that the error should go to. If this is + nil, then the error will go to all drivers. + error_type: The type of the error. + message: The error message. + timestamp: The time of the error. + + Returns: + The ErrorTableData object. + """ + data = ErrorTableData() + data.job_id = job_id.binary() + data.type = error_type + data.error_message = message + data.timestamp = timestamp + return data + + def publish_error_to_driver( error_type: str, message: str, @@ -206,12 +228,11 @@ def publish_error_to_driver( if job_id is None: job_id = ray.JobID.nil() assert isinstance(job_id, ray.JobID) + error_data = construct_error_message(job_id, error_type, message, time.time()) try: - gcs_publisher.publish_error( - job_id.hex().encode(), error_type, message, job_id, num_retries - ) + gcs_publisher.publish_error(job_id.hex().encode(), error_data, num_retries) except Exception: - logger.exception(f"Failed to publish error: {message} [type {error_type}]") + logger.exception(f"Failed to publish error {error_data}") def decode(byte_str: str, allow_none: bool = False, encode_type: str = "utf-8"): diff --git a/python/ray/_private/worker.py b/python/ray/_private/worker.py index ace1fdbdefb4..9598ef02aa52 100644 --- a/python/ray/_private/worker.py +++ b/python/ray/_private/worker.py @@ -68,6 +68,7 @@ GcsErrorSubscriber, GcsFunctionKeySubscriber, GcsLogSubscriber, + GcsPublisher, ) from ray._private.inspect_util import is_cython from ray._private.ray_logging import ( @@ -2073,7 +2074,7 @@ def connect( ray._private.state.state._initialize_global_state( ray._raylet.GcsClientOptions.from_gcs_address(node.gcs_address) ) - worker.gcs_publisher = ray._raylet.GcsPublisher(address=worker.gcs_client.address) + worker.gcs_publisher = GcsPublisher(address=worker.gcs_client.address) # Initialize some fields. if mode in (WORKER_MODE, RESTORE_WORKER_MODE, SPILL_WORKER_MODE): # We should not specify the job_id if it's `WORKER_MODE`. diff --git a/python/ray/_raylet.pyx b/python/ray/_raylet.pyx index 6aa8bf792221..6a09c94859e0 100644 --- a/python/ray/_raylet.pyx +++ b/python/ray/_raylet.pyx @@ -61,17 +61,14 @@ from ray.includes.common cimport ( CObjectReference, CRayObject, CRayStatus, - CErrorTableData, CGcsClientOptions, CGcsNodeInfo, CJobTableData, - CLogBatch, CTaskArg, CTaskArgByReference, CTaskArgByValue, CTaskType, CPlacementStrategy, - CPythonFunction, CSchedulingStrategy, CPlacementGroupSchedulingStrategy, CNodeAffinitySchedulingStrategy, @@ -1749,66 +1746,6 @@ cdef class GcsClient: } return result -cdef class GcsPublisher: - """Cython wrapper class of C++ `ray::gcs::PythonGcsPublisher`.""" - cdef: - shared_ptr[CPythonGcsPublisher] inner - - def __cinit__(self, address): - self.inner.reset(new CPythonGcsPublisher(address)) - check_status(self.inner.get().Connect()) - - def publish_error(self, key_id: bytes, error_type: str, message: str, - job_id=None, num_retries=None): - cdef: - CErrorTableData error_info - int64_t c_num_retries = num_retries if num_retries else -1 - c_string c_key_id = key_id - - if job_id is None: - job_id = ray.JobID.nil() - assert isinstance(job_id, ray.JobID) - error_info.set_job_id(job_id.binary()) - error_info.set_type(error_type) - error_info.set_error_message(message) - error_info.set_timestamp(time.time()) - - with nogil: - check_status( - self.inner.get().PublishError(c_key_id, error_info, c_num_retries)) - - def publish_logs(self, log_json: dict): - cdef: - CLogBatch log_batch - c_string c_job_id - - job_id = log_json.get("job") - log_batch.set_ip(log_json.get("ip") if log_json.get("ip") else b"") - log_batch.set_pid( - str(log_json.get("pid")).encode() if log_json.get("pid") else b"") - log_batch.set_job_id(job_id.encode() if job_id else b"") - log_batch.set_is_error(bool(log_json.get("is_err"))) - for line in log_json.get("lines", []): - log_batch.add_lines(line) - actor_name = log_json.get("actor_name") - log_batch.set_actor_name(actor_name.encode() if actor_name else b"") - task_name = log_json.get("task_name") - log_batch.set_task_name(task_name.encode() if task_name else b"") - - c_job_id = job_id.encode() if job_id else b"" - with nogil: - check_status(self.inner.get().PublishLogs(c_job_id, log_batch)) - - def publish_function_key(self, key: bytes): - cdef: - CPythonFunction python_function - - python_function.set_key(key) - - with nogil: - check_status(self.inner.get().PublishFunctionKey(python_function)) - - cdef class CoreWorker: def __cinit__(self, worker_type, store_socket, raylet_socket, diff --git a/python/ray/autoscaler/_private/monitor.py b/python/ray/autoscaler/_private/monitor.py index f15e109fc9d4..14faf14fa8e9 100644 --- a/python/ray/autoscaler/_private/monitor.py +++ b/python/ray/autoscaler/_private/monitor.py @@ -16,6 +16,7 @@ import ray._private.ray_constants as ray_constants import ray._private.utils from ray._private.event.event_logger import get_event_logger +from ray._private.gcs_pubsub import GcsPublisher from ray._private.ray_logging import setup_component_logger from ray._raylet import GcsClient from ray.autoscaler._private.autoscaler import StandardAutoscaler @@ -559,7 +560,7 @@ def _handle_failure(self, error): _internal_kv_put( ray_constants.DEBUG_AUTOSCALING_ERROR, message, overwrite=True ) - gcs_publisher = ray._raylet.GcsPublisher(address=self.gcs_address) + gcs_publisher = GcsPublisher(address=self.gcs_address) from ray._private.utils import publish_error_to_driver publish_error_to_driver( diff --git a/python/ray/includes/common.pxd b/python/ray/includes/common.pxd index 4250470f3013..e0f8b8ee9712 100644 --- a/python/ray/includes/common.pxd +++ b/python/ray/includes/common.pxd @@ -346,21 +346,6 @@ cdef extern from "ray/gcs/gcs_client/gcs_client.h" namespace "ray::gcs" nogil: unordered_map[c_string, double] PythonGetResourcesTotal( const CGcsNodeInfo& node_info) -cdef extern from "ray/gcs/pubsub/gcs_pub_sub.h" nogil: - - cdef cppclass CPythonGcsPublisher "ray::gcs::PythonGcsPublisher": - - CPythonGcsPublisher(const c_string& gcs_address) - - CRayStatus Connect() - - CRayStatus PublishError( - const c_string &key_id, const CErrorTableData &data, int64_t num_retries) - - CRayStatus PublishLogs(const c_string &key_id, const CLogBatch &data) - - CRayStatus PublishFunctionKey(const CPythonFunction& python_function) - cdef extern from "src/ray/protobuf/gcs.pb.h" nogil: cdef cppclass CJobConfig "ray::rpc::JobConfig": c_string ray_namespace() const @@ -387,29 +372,6 @@ cdef extern from "src/ray/protobuf/gcs.pb.h" nogil: c_bool is_dead() const CJobConfig config() const - cdef cppclass CPythonFunction "ray::rpc::PythonFunction": - void set_key(const c_string &key) - - cdef cppclass CErrorTableData "ray::rpc::ErrorTableData": - c_string job_id() const - c_string type() const - c_string error_message() const - double timestamp() const - - void set_job_id(const c_string &job_id) - void set_type(const c_string &type) - void set_error_message(const c_string &error_message) - void set_timestamp(double timestamp) - - cdef cppclass CLogBatch "ray::rpc::LogBatch": - void set_ip(const c_string &ip) - void set_pid(const c_string &pid) - void set_job_id(const c_string &job_id) - void set_is_error(c_bool is_error) - void add_lines(const c_string &line) - void set_actor_name(const c_string &actor_name) - void set_task_name(const c_string &task_name) - cdef extern from "ray/common/task/task_spec.h" nogil: cdef cppclass CConcurrencyGroup "ray::ConcurrencyGroup": diff --git a/python/ray/includes/common.pxi b/python/ray/includes/common.pxi index d7c3c121bc69..89983ff8808c 100644 --- a/python/ray/includes/common.pxi +++ b/python/ray/includes/common.pxi @@ -6,7 +6,6 @@ from ray.includes.common cimport ( CObjectLocation, CGcsClientOptions, CPythonGcsClient, - CPythonGcsPublisher, ) diff --git a/python/ray/tests/test_failure.py b/python/ray/tests/test_failure.py index 93f1c734ee0a..71bb7a98dd9a 100644 --- a/python/ray/tests/test_failure.py +++ b/python/ray/tests/test_failure.py @@ -10,6 +10,7 @@ import ray._private.gcs_utils as gcs_utils import ray._private.ray_constants as ray_constants import ray._private.utils +from ray._private.gcs_pubsub import GcsPublisher from ray._private.test_utils import ( SignalActor, convert_actor_state, @@ -68,7 +69,7 @@ def interceptor(e): def test_publish_error_to_driver(ray_start_regular, error_pubsub): address_info = ray_start_regular - gcs_publisher = ray._raylet.GcsPublisher(address=address_info["gcs_address"]) + gcs_publisher = GcsPublisher(address=address_info["gcs_address"]) error_message = "Test error message" ray._private.utils.publish_error_to_driver( diff --git a/python/ray/tests/test_gcs_fault_tolerance.py b/python/ray/tests/test_gcs_fault_tolerance.py index 72caad2f0f6e..fedd531d6cb8 100644 --- a/python/ray/tests/test_gcs_fault_tolerance.py +++ b/python/ray/tests/test_gcs_fault_tolerance.py @@ -18,8 +18,10 @@ run_string_as_driver, ) from ray._private.gcs_pubsub import ( + GcsPublisher, GcsErrorSubscriber, ) +from ray.core.generated.gcs_pb2 import ErrorTableData import psutil @@ -673,20 +675,20 @@ def test_publish_and_subscribe_error_info(ray_start_regular_with_external_redis) subscriber = GcsErrorSubscriber(address=gcs_server_addr) subscriber.subscribe() - publisher = ray._raylet.GcsPublisher(address=gcs_server_addr) + publisher = GcsPublisher(address=gcs_server_addr) + err1 = ErrorTableData(error_message="test error message 1") + err2 = ErrorTableData(error_message="test error message 2") print("sending error message 1") - publisher.publish_error(b"aaa_id", "", "test error message 1") + publisher.publish_error(b"aaa_id", err1) ray._private.worker._global_node.kill_gcs_server() ray._private.worker._global_node.start_gcs_server() print("sending error message 2") - publisher.publish_error(b"bbb_id", "", "test error message 2") + publisher.publish_error(b"bbb_id", err2) print("done") - (key_id, err) = subscriber.poll() - assert key_id == b"bbb_id" - assert err.error_message == "test error message 2" + assert subscriber.poll() == (b"bbb_id", err2) subscriber.close() diff --git a/python/ray/tests/test_gcs_pubsub.py b/python/ray/tests/test_gcs_pubsub.py index 71d4ae802f26..b9a4eddee7a4 100644 --- a/python/ray/tests/test_gcs_pubsub.py +++ b/python/ray/tests/test_gcs_pubsub.py @@ -3,8 +3,8 @@ import threading import re -import ray from ray._private.gcs_pubsub import ( + GcsPublisher, GcsErrorSubscriber, GcsLogSubscriber, GcsFunctionKeySubscriber, @@ -24,16 +24,14 @@ def test_publish_and_subscribe_error_info(ray_start_regular): subscriber = GcsErrorSubscriber(address=gcs_server_addr) subscriber.subscribe() - publisher = ray._raylet.GcsPublisher(address=gcs_server_addr) - publisher.publish_error(b"aaa_id", "", "test error message 1") - publisher.publish_error(b"bbb_id", "", "test error message 2") + publisher = GcsPublisher(address=gcs_server_addr) + err1 = ErrorTableData(error_message="test error message 1") + err2 = ErrorTableData(error_message="test error message 2") + publisher.publish_error(b"aaa_id", err1) + publisher.publish_error(b"bbb_id", err2) - (key_id1, err1) = subscriber.poll() - assert key_id1 == b"aaa_id" - assert err1.error_message == "test error message 1" - (key_id2, err2) = subscriber.poll() - assert key_id2 == b"bbb_id" - assert err2.error_message == "test error message 2" + assert subscriber.poll() == (b"aaa_id", err1) + assert subscriber.poll() == (b"bbb_id", err2) subscriber.close() @@ -65,7 +63,7 @@ def test_publish_and_subscribe_logs(ray_start_regular): subscriber = GcsLogSubscriber(address=gcs_server_addr) subscriber.subscribe() - publisher = ray._raylet.GcsPublisher(address=gcs_server_addr) + publisher = GcsPublisher(address=gcs_server_addr) log_batch = { "ip": "127.0.0.1", "pid": 1234, @@ -116,7 +114,7 @@ def test_publish_and_subscribe_function_keys(ray_start_regular): subscriber = GcsFunctionKeySubscriber(address=gcs_server_addr) subscriber.subscribe() - publisher = ray._raylet.GcsPublisher(address=gcs_server_addr) + publisher = GcsPublisher(address=gcs_server_addr) publisher.publish_function_key(b"111") publisher.publish_function_key(b"222") @@ -198,9 +196,9 @@ def receive_logs(): t2 = threading.Thread(target=receive_logs) t2.start() - publisher = ray._raylet.GcsPublisher(address=gcs_server_addr) + publisher = GcsPublisher(address=gcs_server_addr) for i in range(0, num_messages): - publisher.publish_error(b"msg_id", "", f"error {i}") + publisher.publish_error(b"msg_id", ErrorTableData(error_message=f"error {i}")) publisher.publish_logs( { "ip": "127.0.0.1", diff --git a/src/ray/gcs/gcs_client/gcs_client.cc b/src/ray/gcs/gcs_client/gcs_client.cc index ae342b05eec0..fb721893d7ea 100644 --- a/src/ray/gcs/gcs_client/gcs_client.cc +++ b/src/ray/gcs/gcs_client/gcs_client.cc @@ -146,7 +146,10 @@ std::pair GcsClient::GetGcsServerAddress() const { PythonGcsClient::PythonGcsClient(const GcsClientOptions &options) : options_(options) {} Status PythonGcsClient::Connect() { - auto arguments = PythonGrpcChannelArguments(); + grpc::ChannelArguments arguments; + arguments.SetInt(GRPC_ARG_MAX_MESSAGE_LENGTH, 512 * 1024 * 1024); + arguments.SetInt(GRPC_ARG_KEEPALIVE_TIME_MS, 60 * 1000); + arguments.SetInt(GRPC_ARG_KEEPALIVE_TIMEOUT_MS, 60 * 1000); channel_ = rpc::BuildChannel(options_.gcs_address_, options_.gcs_port_, arguments); kv_stub_ = rpc::InternalKVGcsService::NewStub(channel_); runtime_env_stub_ = rpc::RuntimeEnvGcsService::NewStub(channel_); diff --git a/src/ray/gcs/pubsub/gcs_pub_sub.cc b/src/ray/gcs/pubsub/gcs_pub_sub.cc index b03a9157da46..32c0e9f41367 100644 --- a/src/ray/gcs/pubsub/gcs_pub_sub.cc +++ b/src/ray/gcs/pubsub/gcs_pub_sub.cc @@ -15,7 +15,6 @@ #include "ray/gcs/pubsub/gcs_pub_sub.h" #include "absl/strings/str_cat.h" -#include "ray/rpc/grpc_client.h" namespace ray { namespace gcs { @@ -213,91 +212,5 @@ Status GcsSubscriber::SubscribeAllWorkerFailures( return Status::OK(); } -grpc::ChannelArguments PythonGrpcChannelArguments() { - grpc::ChannelArguments arguments; - arguments.SetInt(GRPC_ARG_MAX_MESSAGE_LENGTH, 512 * 1024 * 1024); - arguments.SetInt(GRPC_ARG_KEEPALIVE_TIME_MS, 60 * 1000); - arguments.SetInt(GRPC_ARG_KEEPALIVE_TIMEOUT_MS, 60 * 1000); - return arguments; -} - -PythonGcsPublisher::PythonGcsPublisher(const std::string &gcs_address) { - std::vector address = absl::StrSplit(gcs_address, ':'); - RAY_LOG(DEBUG) << "Connect to gcs server via address: " << gcs_address; - RAY_CHECK(address.size() == 2); - gcs_address_ = address[0]; - gcs_port_ = std::stoi(address[1]); -} - -Status PythonGcsPublisher::Connect() { - auto arguments = PythonGrpcChannelArguments(); - channel_ = rpc::BuildChannel(gcs_address_, gcs_port_, arguments); - pubsub_stub_ = rpc::InternalPubSubGcsService::NewStub(channel_); - return Status::OK(); -} - -constexpr int MAX_GCS_PUBLISH_RETRIES = 60; - -Status PythonGcsPublisher::DoPublishWithRetries(const rpc::GcsPublishRequest &request, - int64_t num_retries, - int64_t timeout_ms) { - int count = num_retries == -1 ? MAX_GCS_PUBLISH_RETRIES : num_retries; - rpc::GcsPublishReply reply; - grpc::Status status; - while (count > 0) { - grpc::ClientContext context; - if (timeout_ms != -1) { - context.set_deadline(std::chrono::system_clock::now() + - std::chrono::milliseconds(timeout_ms)); - } - status = pubsub_stub_->GcsPublish(&context, request, &reply); - if (status.error_code() == grpc::StatusCode::OK) { - if (reply.status().code() != static_cast(StatusCode::OK)) { - return Status::Invalid(reply.status().message()); - } - return Status::OK(); - } else if (status.error_code() == grpc::StatusCode::UNAVAILABLE || - status.error_code() == grpc::StatusCode::UNKNOWN) { - // This is the case in which we will retry - count -= 1; - std::this_thread::sleep_for(std::chrono::seconds(1)); - continue; - } else { - return Status::Invalid(status.error_message()); - } - } - return Status::TimedOut("Failed to publish after retries: " + status.error_message()); -} - -Status PythonGcsPublisher::PublishError(const std::string &key_id, - const rpc::ErrorTableData &error_info, - int64_t num_retries) { - rpc::GcsPublishRequest request; - auto *message = request.add_pub_messages(); - message->set_channel_type(rpc::RAY_ERROR_INFO_CHANNEL); - message->set_key_id(key_id); - message->mutable_error_info_message()->MergeFrom(error_info); - return DoPublishWithRetries(request, num_retries, 1000); -} - -Status PythonGcsPublisher::PublishLogs(const std::string &key_id, - const rpc::LogBatch &log_batch) { - rpc::GcsPublishRequest request; - auto *message = request.add_pub_messages(); - message->set_channel_type(rpc::RAY_LOG_CHANNEL); - message->set_key_id(key_id); - message->mutable_log_batch_message()->MergeFrom(log_batch); - return DoPublishWithRetries(request, -1, -1); -} - -Status PythonGcsPublisher::PublishFunctionKey( - const rpc::PythonFunction &python_function) { - rpc::GcsPublishRequest request; - auto *message = request.add_pub_messages(); - message->set_channel_type(rpc::RAY_PYTHON_FUNCTION_CHANNEL); - message->mutable_python_function_message()->MergeFrom(python_function); - return DoPublishWithRetries(request, -1, -1); -} - } // namespace gcs } // namespace ray diff --git a/src/ray/gcs/pubsub/gcs_pub_sub.h b/src/ray/gcs/pubsub/gcs_pub_sub.h index db621938dc98..ffd79a6adfab 100644 --- a/src/ray/gcs/pubsub/gcs_pub_sub.h +++ b/src/ray/gcs/pubsub/gcs_pub_sub.h @@ -25,7 +25,6 @@ #include "ray/pubsub/publisher.h" #include "ray/pubsub/subscriber.h" #include "src/ray/protobuf/gcs.pb.h" -#include "src/ray/protobuf/gcs_service.grpc.pb.h" #include "src/ray/protobuf/gcs_service.pb.h" namespace ray { @@ -133,41 +132,5 @@ class GcsSubscriber { const std::unique_ptr subscriber_; }; -// This client is only supposed to be used from Cython / Python -class RAY_EXPORT PythonGcsPublisher { - public: - explicit PythonGcsPublisher(const std::string &gcs_address); - - /// Connect to the publisher service of the GCS. - /// This function must be called before calling other functions. - /// - /// \return Status - Status Connect(); - - /// Publish error information to GCS. - Status PublishError(const std::string &key_id, - const rpc::ErrorTableData &data, - int64_t num_retries); - - /// Publish logs to GCS. - Status PublishLogs(const std::string &key_id, const rpc::LogBatch &log_batch); - - /// Publish a function key to GCS. - Status PublishFunctionKey(const rpc::PythonFunction &python_function); - - private: - Status DoPublishWithRetries(const rpc::GcsPublishRequest &request, - int64_t num_retries, - int64_t timeout_ms); - std::unique_ptr pubsub_stub_; - std::shared_ptr channel_; - std::string gcs_address_; - int gcs_port_; -}; - -/// Construct the arguments for synchronous gRPC clients -/// (the ones wrapped in Python) -grpc::ChannelArguments PythonGrpcChannelArguments(); - } // namespace gcs } // namespace ray From 082be41dfa45af81e5271d2edc3c308b177bd2e0 Mon Sep 17 00:00:00 2001 From: Yi Cheng <74173148+iycheng@users.noreply.github.com> Date: Fri, 5 May 2023 21:38:34 -0700 Subject: [PATCH 268/424] Revert "[core][state] Push down filtering to GCS for listing/getting task from state api (#34433)" (#35107) This reverts commit d8321a799e56739167a33a3ea8aefe940e4b581f. --- dashboard/state_aggregator.py | 8 +- .../ray/experimental/state/state_manager.py | 40 ++------- python/ray/tests/test_state_api.py | 10 +-- src/ray/gcs/gcs_server/gcs_task_manager.cc | 40 +++------ .../gcs_server/test/gcs_task_manager_test.cc | 84 ++----------------- src/ray/protobuf/gcs_service.proto | 21 ++--- 6 files changed, 40 insertions(+), 163 deletions(-) diff --git a/dashboard/state_aggregator.py b/dashboard/state_aggregator.py index b7cfd20b5c9c..e4e38c9f323b 100644 --- a/dashboard/state_aggregator.py +++ b/dashboard/state_aggregator.py @@ -377,10 +377,16 @@ async def list_tasks(self, *, option: ListApiOptions) -> ListApiResponse: {task_id -> task_data_in_dict} task_data_in_dict's schema is in TaskState """ + job_id = None + for filter in option.filters: + if filter[0] == "job_id" and filter[1] == "=": + # Filtering by job_id == xxxx, pass it to source side filtering. + # tuple consists of (job_id, predicate, value) + job_id = filter[2] try: reply = await self._client.get_all_task_info( timeout=option.timeout, - filters=option.filters, + job_id=job_id, exclude_driver=option.exclude_driver, ) except DataSourceUnavailable: diff --git a/python/ray/experimental/state/state_manager.py b/python/ray/experimental/state/state_manager.py index 19e1fa318e38..11ea98b89c4c 100644 --- a/python/ray/experimental/state/state_manager.py +++ b/python/ray/experimental/state/state_manager.py @@ -12,7 +12,7 @@ from ray._private import ray_constants from ray._private.gcs_utils import GcsAioClient from ray._private.utils import hex_to_binary -from ray._raylet import ActorID, JobID, TaskID +from ray._raylet import ActorID, JobID from ray.core.generated import gcs_service_pb2_grpc from ray.core.generated.gcs_pb2 import ActorTableData from ray.core.generated.gcs_service_pb2 import ( @@ -262,40 +262,16 @@ async def get_all_task_info( self, timeout: int = None, limit: int = None, - filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, - exclude_driver: bool = False, + job_id: Optional[str] = None, + exclude_driver: bool = True, ) -> Optional[GetTaskEventsReply]: if not limit: limit = RAY_MAX_LIMIT_FROM_DATA_SOURCE - - if filters is None: - filters = [] - - req_filters = GetTaskEventsRequest.Filters() - for filter in filters: - key, predicate, value = filter - if predicate != "=": - # We only support EQUAL predicate for source side filtering. - continue - - if key == "actor_id": - req_filters.actor_id = ActorID(hex_to_binary(value)).binary() - elif key == "job_id": - req_filters.job_id = JobID(hex_to_binary(value)).binary() - elif key == "name": - req_filters.name = value - elif key == "task_id": - req_filters.task_ids.append(TaskID(hex_to_binary(value)).binary()) - else: - continue - - # Remove the filter from the list so that we don't have to - # filter it again later. - filters.remove(filter) - - req_filters.exclude_driver = exclude_driver - - request = GetTaskEventsRequest(limit=limit, filters=req_filters) + if job_id: + job_id = JobID(hex_to_binary(job_id)).binary() + request = GetTaskEventsRequest( + limit=limit, exclude_driver=exclude_driver, job_id=job_id + ) reply = await self._gcs_task_info_stub.GetTaskEvents(request, timeout=timeout) return reply diff --git a/python/ray/tests/test_state_api.py b/python/ray/tests/test_state_api.py index 38d097ae5d9c..64882421c856 100644 --- a/python/ray/tests/test_state_api.py +++ b/python/ray/tests/test_state_api.py @@ -2282,7 +2282,7 @@ def g(dep): def impossible(): pass - out = [f.options(name=f"f_{i}").remote() for i in range(2)] # noqa + out = [f.remote() for _ in range(2)] # noqa g_out = g.remote(f.remote()) # noqa im = impossible.remote() # noqa @@ -2350,9 +2350,6 @@ def verify(): for task in tasks: assert task["job_id"] == job_id - tasks = list_tasks(filters=[("name", "=", "f_0")]) - assert len(tasks) == 1 - return True wait_for_condition(verify) @@ -2543,6 +2540,7 @@ def verify(): for task in tasks: assert task["job_id"] == job_id for task in tasks: + print(task) assert task["actor_id"] == actor_id # Actor.__init__: 1 finished # Actor.call: 1 running, 9 waiting for execution (queued). @@ -2592,10 +2590,6 @@ def verify(): == 1 ) - # Filters with actor id. - assert len(list_tasks(filters=[("actor_id", "=", actor_id)])) == 11 - assert len(list_tasks(filters=[("actor_id", "!=", actor_id)])) == 0 - return True wait_for_condition(verify) diff --git a/src/ray/gcs/gcs_server/gcs_task_manager.cc b/src/ray/gcs/gcs_server/gcs_task_manager.cc index e733856b8ee5..6771e042bb24 100644 --- a/src/ray/gcs/gcs_server/gcs_task_manager.cc +++ b/src/ray/gcs/gcs_server/gcs_task_manager.cc @@ -313,17 +313,16 @@ void GcsTaskManager::HandleGetTaskEvents(rpc::GetTaskEventsRequest request, rpc::SendReplyCallback send_reply_callback) { RAY_LOG(DEBUG) << "Getting task status:" << request.ShortDebugString(); - // Select candidate events by indexing if possible. + // Select candidate events by indexing. std::vector task_events; - const auto &filters = request.filters(); - if (filters.task_ids_size() > 0) { + if (request.has_task_ids()) { absl::flat_hash_set task_ids; - for (const auto &task_id_str : filters.task_ids()) { + for (const auto &task_id_str : request.task_ids().vals()) { task_ids.insert(TaskID::FromBinary(task_id_str)); } task_events = task_event_storage_->GetTaskEvents(task_ids); - } else if (filters.has_job_id()) { - task_events = task_event_storage_->GetTaskEvents(JobID::FromBinary(filters.job_id())); + } else if (request.has_job_id()) { + task_events = task_event_storage_->GetTaskEvents(JobID::FromBinary(request.job_id())); } else { task_events = task_event_storage_->GetTaskEvents(); } @@ -335,34 +334,15 @@ void GcsTaskManager::HandleGetTaskEvents(rpc::GetTaskEventsRequest request, int32_t num_profile_event_limit = 0; int32_t num_status_event_limit = 0; - // A lambda filter fn, where it returns true for task events to be included in the - // result. Task ids and job ids are already filtered by the storage with indexing above. - auto filter_fn = [&filters](const rpc::TaskEvents &task_event) { + for (auto itr = task_events.rbegin(); itr != task_events.rend(); ++itr) { + auto &task_event = *itr; if (!task_event.has_task_info()) { // Skip task events w/o task info. - return false; - } - if (filters.exclude_driver() && - task_event.task_info().type() == rpc::TaskType::DRIVER_TASK) { - return false; - } - - if (filters.has_actor_id() && task_event.task_info().has_actor_id() && - ActorID::FromBinary(task_event.task_info().actor_id()) != - ActorID::FromBinary(filters.actor_id())) { - return false; - } - - if (filters.has_name() && task_event.task_info().name() != filters.name()) { - return false; + continue; } - return true; - }; - - for (auto itr = task_events.rbegin(); itr != task_events.rend(); ++itr) { - auto &task_event = *itr; - if (!filter_fn(task_event)) { + if (request.exclude_driver() && + task_event.task_info().type() == rpc::TaskType::DRIVER_TASK) { continue; } diff --git a/src/ray/gcs/gcs_server/test/gcs_task_manager_test.cc b/src/ray/gcs/gcs_server/test/gcs_task_manager_test.cc index d60ea97f100f..91070fe1cf35 100644 --- a/src/ray/gcs/gcs_server/test/gcs_task_manager_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_task_manager_test.cc @@ -115,36 +115,26 @@ class GcsTaskManagerTest : public ::testing::Test { rpc::GetTaskEventsReply SyncGetTaskEvents(absl::flat_hash_set task_ids, absl::optional job_id = absl::nullopt, int64_t limit = -1, - bool exclude_driver = true, - const std::string &name = "", - const ActorID &actor_id = ActorID::Nil()) { + bool exclude_driver = true) { rpc::GetTaskEventsRequest request; rpc::GetTaskEventsReply reply; std::promise promise; if (!task_ids.empty()) { for (const auto &task_id : task_ids) { - request.mutable_filters()->add_task_ids(task_id.Binary()); + request.mutable_task_ids()->add_vals(task_id.Binary()); } } - if (!name.empty()) { - request.mutable_filters()->set_name(name); - } - - if (!actor_id.IsNil()) { - request.mutable_filters()->set_actor_id(actor_id.Binary()); - } - if (job_id) { - request.mutable_filters()->set_job_id(job_id->Binary()); + request.set_job_id(job_id->Binary()); } if (limit >= 0) { request.set_limit(limit); } - request.mutable_filters()->set_exclude_driver(exclude_driver); + request.set_exclude_driver(exclude_driver); task_manager->GetIoContext().dispatch( [this, &promise, &request, &reply]() { task_manager->HandleGetTaskEvents( @@ -165,15 +155,11 @@ class GcsTaskManagerTest : public ::testing::Test { static rpc::TaskInfoEntry GenTaskInfo( JobID job_id, TaskID parent_task_id = TaskID::Nil(), - rpc::TaskType task_type = rpc::TaskType::NORMAL_TASK, - const ActorID actor_id = ActorID::Nil(), - const std::string name = "") { + rpc::TaskType task_type = rpc::TaskType::NORMAL_TASK) { rpc::TaskInfoEntry task_info; task_info.set_job_id(job_id.Binary()); task_info.set_parent_task_id(parent_task_id.Binary()); task_info.set_type(task_type); - task_info.set_actor_id(actor_id.Binary()); - task_info.set_name(name); return task_info; } @@ -504,66 +490,6 @@ TEST_F(GcsTaskManagerTest, TestGetTaskEventsByJob) { reply_job2.mutable_events_by_task()); } -TEST_F(GcsTaskManagerTest, TestGetTaskEventsFilters) { - // Generate task events - - // A task event with actor id - ActorID actor_id = ActorID::Of(JobID::FromInt(1), TaskID::Nil(), 1); - { - auto task_ids = GenTaskIDs(1); - auto task_info_actor_id = - GenTaskInfo(JobID::FromInt(1), TaskID::Nil(), rpc::ACTOR_TASK, actor_id); - auto events = GenTaskEvents(task_ids, - /* attempt_number */ - 0, - /* job_id */ 1, - absl::nullopt, - absl::nullopt, - task_info_actor_id); - auto data = Mocker::GenTaskEventsData(events); - SyncAddTaskEventData(data); - } - - // A task event with name. - { - auto task_ids = GenTaskIDs(1); - auto task_info_name = GenTaskInfo( - JobID::FromInt(1), TaskID::Nil(), rpc::NORMAL_TASK, ActorID::Nil(), "task_name"); - auto events = GenTaskEvents(task_ids, - /* attempt_number */ - 0, - /* job_id */ 1, - absl::nullopt, - absl::nullopt, - task_info_name); - auto data = Mocker::GenTaskEventsData(events); - SyncAddTaskEventData(data); - } - - auto reply_name = SyncGetTaskEvents({}, - /* job_id */ absl::nullopt, - /* limit */ -1, - /* exclude_driver */ false, - "task_name"); - EXPECT_EQ(reply_name.events_by_task_size(), 1); - - auto reply_actor_id = SyncGetTaskEvents({}, - /* job_id */ absl::nullopt, - /* limit */ -1, - /* exclude_driver */ false, - /* name */ "", - actor_id); - EXPECT_EQ(reply_name.events_by_task_size(), 1); - - auto reply_both_and = SyncGetTaskEvents({}, - /* job_id */ absl::nullopt, - /* limit */ -1, - /* exclude_driver */ false, - "task_name", - actor_id); - EXPECT_EQ(reply_both_and.events_by_task_size(), 0); -} - TEST_F(GcsTaskManagerTest, TestMarkTaskAttemptFailedIfNeeded) { auto tasks = GenTaskIDs(3); auto tasks_running = tasks[0]; diff --git a/src/ray/protobuf/gcs_service.proto b/src/ray/protobuf/gcs_service.proto index 7bc382bc0842..38280e48d3f6 100644 --- a/src/ray/protobuf/gcs_service.proto +++ b/src/ray/protobuf/gcs_service.proto @@ -644,27 +644,22 @@ message AddTaskEventDataReply { } message GetTaskEventsRequest { - // Filter object where predicates are AND together. - message Filters { + message TaskIDs { + repeated string vals = 1; + } + oneof select_by { // Get task events from a job. - optional bytes job_id = 1; + string job_id = 1; // Get task events from a set of tasks. - repeated bytes task_ids = 2; - // Get the task events with an actor id. - optional bytes actor_id = 3; - // Get the task events of task with names. - optional string name = 4; - // True if task events from driver (only profiling events) should be excluded. - optional bool exclude_driver = 5; + TaskIDs task_ids = 2; } // Maximum number of TaskEvents to return. // If set, the exact `limit` TaskEvents returned do not have any ordering or selection // guarantee. optional int64 limit = 3; - - // Filters to apply to the get query. - optional Filters filters = 4; + // True if task events from driver (only profiling events) should be excluded. + bool exclude_driver = 4; } message GetTaskEventsReply { From ad1b12cccba37bac3832facce16808ae211dfcf2 Mon Sep 17 00:00:00 2001 From: Artur Niederfahrenhorst Date: Sat, 6 May 2023 19:34:59 +0200 Subject: [PATCH 269/424] [RLlib][RLModule] Fix test_rllib_train_and_evaluate for rlmodules (#34392) Signed-off-by: Artur Niederfahrenhorst --- rllib/algorithms/algorithm_config.py | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/rllib/algorithms/algorithm_config.py b/rllib/algorithms/algorithm_config.py index f24b2affeb7c..d83b5705a071 100644 --- a/rllib/algorithms/algorithm_config.py +++ b/rllib/algorithms/algorithm_config.py @@ -868,27 +868,6 @@ def validate(self) -> None: self.rl_module(_enable_rl_module_api=True) self.enable_connectors = True - # Explore parameter cannot be False with RLModule API enabled. - # The reason is that `explore` is not just a parameter that will get passed - # down to the policy.compute_actions() anymore. It is a phase in which RLModule. - # forward_exploration() will get called during sampling. If user needs to - # really disable the stochasticity during this phase, they need to override the - # RLModule.forward_exploration() method or setup model parameters such that it - # will disable the stochasticity of this method (e.g. by setting the std to 0 - # or setting temperature to 0 for the Categorical distribution). - if self._enable_rl_module_api and not self.explore: - raise ValueError( - "When RLModule API is enabled, explore parameter cannot be False. " - "Please set explore=None or disable RLModule API via " - "`config.rl_module(_enable_rl_module_api=False)`." - "If you want to disable the stochasticity during the exploration " - "phase, you can customize your RLModule and override the RLModule." - "forward_exploration() method " - "or setup model parameters such that it will disable the " - "stochasticity of this method (e.g. by setting the std to 0 or " - "setting temperature to 0 for the Categorical distribution)." - ) - # Validate grad clipping settings. if self.grad_clip_by not in ["value", "norm", "global_norm"]: raise ValueError( From 05c15505b7df7fe375b43a443a9f2f537dbe884f Mon Sep 17 00:00:00 2001 From: Matti Picus Date: Sun, 7 May 2023 10:52:38 +0300 Subject: [PATCH 270/424] Add include for newer MSVC (#34968) Signed-off-by: mattip --- cpp/include/ray/api/metric.h | 1 + 1 file changed, 1 insertion(+) diff --git a/cpp/include/ray/api/metric.h b/cpp/include/ray/api/metric.h index d3d87df60698..10cb95257dd5 100644 --- a/cpp/include/ray/api/metric.h +++ b/cpp/include/ray/api/metric.h @@ -15,6 +15,7 @@ #pragma once #include +#include #include #include From ba8edfef2a56447321f3129212ab12d697c2be2c Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Sun, 7 May 2023 14:07:56 +0100 Subject: [PATCH 271/424] [air/mlflow] Flatten config and metrics before passing to mlflow (#35074) Metrics and parameters are passed as-is to mlflow e.g. in the MlFlowCallback. However, mlflow can't deal with nested dicts. Instead, we should flatten these dicts before passing them over. Signed-off-by: Kai Fricke --- python/ray/air/_internal/mlflow.py | 4 +++ .../ray/air/tests/test_integration_mlflow.py | 31 ++++++++++++------- 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/python/ray/air/_internal/mlflow.py b/python/ray/air/_internal/mlflow.py index d0fd1168dd60..a7b553100933 100644 --- a/python/ray/air/_internal/mlflow.py +++ b/python/ray/air/_internal/mlflow.py @@ -4,6 +4,8 @@ from copy import deepcopy from typing import TYPE_CHECKING, Dict, Optional +from ray._private.dict import flatten_dict + if TYPE_CHECKING: from mlflow.entities import Run from mlflow.tracking import MlflowClient @@ -262,6 +264,7 @@ def log_params(self, params_to_log: Dict, run_id: Optional[str] = None): params_to_log: Dictionary of parameters to log. run_id (Optional[str]): The ID of the run to log to. """ + params_to_log = flatten_dict(params_to_log) if run_id and self._run_exists(run_id): client = self._get_client() @@ -284,6 +287,7 @@ def log_metrics(self, step, metrics_to_log: Dict, run_id: Optional[str] = None): metrics_to_log: Dictionary of metrics to log. run_id (Optional[str]): The ID of the run to log to. """ + metrics_to_log = flatten_dict(metrics_to_log) metrics_to_log = self._parse_dict(metrics_to_log) if run_id and self._run_exists(run_id): diff --git a/python/ray/air/tests/test_integration_mlflow.py b/python/ray/air/tests/test_integration_mlflow.py index 7b6ea45c0642..85cab89080e1 100644 --- a/python/ray/air/tests/test_integration_mlflow.py +++ b/python/ray/air/tests/test_integration_mlflow.py @@ -7,6 +7,7 @@ from mlflow.tracking import MlflowClient +from ray._private.dict import flatten_dict from ray.train._internal.session import init_session from ray.tune.trainable import wrap_function from ray.tune.trainable.session import _shutdown as tune_session_shutdown @@ -367,7 +368,7 @@ def test_setup_fail(self): ) def test_log_params(self): - params = {"a": "a"} + params = {"a": "a", "x": {"y": "z"}} self.mlflow_util.setup_mlflow( tracking_uri=self.tracking_uri, experiment_name="new_experiment" ) @@ -376,21 +377,23 @@ def test_log_params(self): self.mlflow_util.log_params(params_to_log=params, run_id=run_id) run = self.mlflow_util._mlflow.get_run(run_id=run_id) - assert run.data.params == params + assert run.data.params == flatten_dict(params) params2 = {"b": "b"} self.mlflow_util.start_run(set_active=True) self.mlflow_util.log_params(params_to_log=params2, run_id=run_id) run = self.mlflow_util._mlflow.get_run(run_id=run_id) - assert run.data.params == { - **params, - **params2, - } + assert run.data.params == flatten_dict( + { + **params, + **params2, + } + ) self.mlflow_util.end_run() def test_log_metrics(self): - metrics = {"a": 1.0} + metrics = {"a": 1.0, "x": {"y": 2.0}} self.mlflow_util.setup_mlflow( tracking_uri=self.tracking_uri, experiment_name="new_experiment" ) @@ -399,15 +402,19 @@ def test_log_metrics(self): self.mlflow_util.log_metrics(metrics_to_log=metrics, run_id=run_id, step=0) run = self.mlflow_util._mlflow.get_run(run_id=run_id) - assert run.data.metrics == metrics + assert run.data.metrics == flatten_dict(metrics) metrics2 = {"b": 1.0} self.mlflow_util.start_run(set_active=True) self.mlflow_util.log_metrics(metrics_to_log=metrics2, run_id=run_id, step=0) - assert self.mlflow_util._mlflow.get_run(run_id=run_id).data.metrics == { - **metrics, - **metrics2, - } + assert self.mlflow_util._mlflow.get_run( + run_id=run_id + ).data.metrics == flatten_dict( + { + **metrics, + **metrics2, + } + ) self.mlflow_util.end_run() From d0a27c283998edd1326996ab7f1c69ab5264e802 Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Sun, 7 May 2023 23:39:00 +0200 Subject: [PATCH 272/424] [docs] sphinx design 7/N (fin!) (#35037) last PR for this migration, next up would be a sphinx + theme upgrade Signed-off-by: Max Pumperla Signed-off-by: Philipp Moritz Co-authored-by: angelinalg <122562471+angelinalg@users.noreply.github.com> Co-authored-by: Philipp Moritz --- doc/requirements-doc.txt | 1 - doc/source/cluster/getting-started.rst | 113 +++++----- doc/source/cluster/kubernetes/index.md | 111 +++++----- doc/source/cluster/vms/index.md | 113 +++++----- doc/source/conf.py | 2 +- doc/source/custom_directives.py | 42 ++-- doc/source/data/batch_inference.rst | 51 ++--- .../examples/gptj_deepspeed_fine_tuning.ipynb | 4 +- doc/source/ray-contribute/development.rst | 18 +- .../ray-contribute/writing-code-snippets.rst | 2 +- doc/source/ray-core/examples/overview.rst | 196 +++++++++--------- doc/source/ray-core/namespaces.rst | 6 +- doc/source/ray-core/objects.rst | 2 +- doc/source/ray-core/objects/serialization.rst | 2 +- doc/source/ray-core/walkthrough.rst | 56 ++--- doc/source/ray-observability/ray-logging.rst | 2 +- doc/source/ray-overview/eco-gallery.yml | 11 +- doc/source/ray-overview/getting-started.md | 187 ++++++++++------- doc/source/ray-overview/index.md | 116 ++++++----- doc/source/ray-overview/use-cases.rst | 2 +- doc/source/train/examples.rst | 2 +- doc/source/train/gbdt.rst | 26 +-- .../tune/examples/experiment-tracking.rst | 75 ++++--- 23 files changed, 603 insertions(+), 537 deletions(-) diff --git a/doc/requirements-doc.txt b/doc/requirements-doc.txt index d1792bb49b20..05156ea26c2a 100644 --- a/doc/requirements-doc.txt +++ b/doc/requirements-doc.txt @@ -54,7 +54,6 @@ sphinx-click==3.0.2 sphinx-copybutton==0.4.0 sphinxemoji==0.2.0 sphinx-jsonschema==1.17.2 -sphinx-panels==0.6.0 sphinx-version-warning==1.1.2 sphinx-book-theme==0.3.3 sphinx-external-toc==0.2.4 diff --git a/doc/source/cluster/getting-started.rst b/doc/source/cluster/getting-started.rst index 8024ef751a33..e0054beea1b8 100644 --- a/doc/source/cluster/getting-started.rst +++ b/doc/source/cluster/getting-started.rst @@ -31,57 +31,66 @@ or onto :ref:`platforms not listed here `. What's next? ------------ -.. panels:: - :container: text-center - :column: col-lg-6 px-3 py-2 - :card: - - **I want to learn key Ray cluster concepts** - ^^^ - Understand the key concepts and main ways of interacting with a Ray cluster. - - +++ - .. link-button:: cluster-key-concepts - :type: ref - :text: Learn Key Concepts - :classes: btn-outline-info btn-block - - --- - - **I want to run Ray on Kubernetes** - ^^^ - Deploy a Ray application to a Kubernetes cluster. You can run the tutorial on a - Kubernetes cluster or on your laptop via KinD. - - +++ - .. link-button:: kuberay-quickstart - :type: ref - :text: Get Started with Ray on Kubernetes - :classes: btn-outline-info btn-block - - --- - - **I want to run Ray on a cloud provider** - ^^^ - Take a sample application designed to run on a laptop and scale it up in the - cloud. Access to an AWS or GCP account is required. - - +++ - .. link-button:: vm-cluster-quick-start - :type: ref - :text: Get Started with Ray on VMs - :classes: btn-outline-info btn-block - - --- - - **I want to run my application on an existing Ray cluster** - ^^^ - Guide to submitting applications as Jobs to existing Ray clusters. - - +++ - .. link-button:: jobs-quickstart - :type: ref - :text: Job Submission - :classes: btn-outline-info btn-block +.. grid:: 1 2 2 2 + :gutter: 1 + :class-container: container pb-3 + + .. grid-item-card:: + + **I want to learn key Ray cluster concepts** + ^^^ + Understand the key concepts and main ways of interacting with a Ray cluster. + + +++ + .. button-ref:: cluster-key-concepts + :color: primary + :outline: + :expand: + + Learn Key Concepts + + .. grid-item-card:: + + **I want to run Ray on Kubernetes** + ^^^ + Deploy a Ray application to a Kubernetes cluster. You can run the tutorial on a + Kubernetes cluster or on your laptop via KinD. + + +++ + .. button-ref:: kuberay-quickstart + :color: primary + :outline: + :expand: + + Get Started with Ray on Kubernetes + + .. grid-item-card:: + + **I want to run Ray on a cloud provider** + ^^^ + Take a sample application designed to run on a laptop and scale it up in the + cloud. Access to an AWS or GCP account is required. + + +++ + .. button-ref:: vm-cluster-quick-start + :color: primary + :outline: + :expand: + + Get Started with Ray on VMs + + .. grid-item-card:: + + **I want to run my application on an existing Ray cluster** + ^^^ + Guide to submitting applications as Jobs to existing Ray clusters. + + +++ + .. button-ref:: jobs-quickstart + :color: primary + :outline: + :expand: + + Job Submission .. include:: /_includes/clusters/announcement_bottom.rst diff --git a/doc/source/cluster/kubernetes/index.md b/doc/source/cluster/kubernetes/index.md index 2d5b5d98d96e..614a69521030 100644 --- a/doc/source/cluster/kubernetes/index.md +++ b/doc/source/cluster/kubernetes/index.md @@ -31,54 +31,69 @@ Concretely, you will learn how to: The Ray docs present all the information you need to start running Ray workloads on Kubernetes. ```{eval-rst} -.. panels:: - :container: text-center - :column: col-lg-6 px-2 py-2 - :card: - - **Getting Started** - ^^^ - - Learn how to start a Ray cluster and deploy Ray applications on Kubernetes. - - +++ - .. link-button:: kuberay-quickstart - :type: ref - :text: Get Started with Ray on Kubernetes - :classes: btn-outline-info btn-block - --- - **Examples** - ^^^ - - Try example Ray workloads on Kubernetes. - - +++ - .. link-button:: kuberay-examples - :type: ref - :text: Try example workloads - :classes: btn-outline-info btn-block - --- - **User Guides** - ^^^ - - Learn best practices for configuring Ray clusters on Kubernetes. - - +++ - .. link-button:: kuberay-guides - :type: ref - :text: Read the User Guides - :classes: btn-outline-info btn-block - --- - **API Reference** - ^^^ - - Find API references on RayCluster configuration. - - +++ - .. link-button:: kuberay-api-reference - :type: ref - :text: Check API references - :classes: btn-outline-info btn-block +.. grid:: 1 2 2 2 + :gutter: 1 + :class-container: container pb-3 + + .. grid-item-card:: + + **Getting Started** + ^^^ + + Learn how to start a Ray cluster and deploy Ray applications on Kubernetes. + + +++ + .. button-ref:: kuberay-quickstart + :color: primary + :outline: + :expand: + + Get Started with Ray on Kubernetes + + .. grid-item-card:: + + **Examples** + ^^^ + + Try example Ray workloads on Kubernetes. + + +++ + .. button-ref:: kuberay-examples + :color: primary + :outline: + :expand: + + Try example workloads + + .. grid-item-card:: + + **User Guides** + ^^^ + + Learn best practices for configuring Ray clusters on Kubernetes. + + +++ + .. button-ref:: kuberay-guides + :color: primary + :outline: + :expand: + + Read the User Guides + + .. grid-item-card:: + + **API Reference** + ^^^ + + Find API references on RayCluster configuration. + + +++ + .. button-ref:: kuberay-api-reference + :color: primary + :outline: + :expand: + + Check API references ``` ## About KubeRay diff --git a/doc/source/cluster/vms/index.md b/doc/source/cluster/vms/index.md index 2ef1bcd2f898..b61a894ba311 100644 --- a/doc/source/cluster/vms/index.md +++ b/doc/source/cluster/vms/index.md @@ -8,7 +8,7 @@ for launching AWS and GCP clusters, and also has community-maintained integratio Each Ray cluster consists of a head node and a collection of worker nodes. Optional [autoscaling](vms-autoscaling) support allows the Ray cluster to be sized according to the requirements of your Ray workload, adding and removing worker nodes as needed. Ray supports -clusters composed of multiple heterogenous compute nodes (including GPU nodes). +clusters composed of multiple heterogeneous compute nodes (including GPU nodes). Concretely, you will learn how to: @@ -20,52 +20,67 @@ Concretely, you will learn how to: The Ray docs present all the information you need to start running Ray workloads on VMs. ```{eval-rst} -.. panels:: - :container: text-center - :column: col-lg-6 px-2 py-2 - :card: - - **Getting Started** - ^^^ - - Learn how to start a Ray cluster and deploy Ray applications in the cloud. - - +++ - .. link-button:: vm-cluster-quick-start - :type: ref - :text: Get Started with Ray on Cloud VMs - :classes: btn-outline-info btn-block - --- - **Examples** - ^^^ - - Try example Ray workloads in the Cloud - - +++ - .. link-button:: vm-cluster-examples - :type: ref - :text: Try example workloads - :classes: btn-outline-info btn-block - --- - **User Guides** - ^^^ - - Learn best practices for configuring cloud clusters - - +++ - .. link-button:: vm-cluster-guides - :type: ref - :text: Read the User Guides - :classes: btn-outline-info btn-block - --- - **API Reference** - ^^^ - - Find API references for cloud clusters - - +++ - .. link-button:: vm-cluster-api-references - :type: ref - :text: Check API references - :classes: btn-outline-info btn-block +.. grid:: 1 2 2 2 + :gutter: 1 + :class-container: container pb-3 + + .. grid-item-card:: + + **Getting Started** + ^^^ + + Learn how to start a Ray cluster and deploy Ray applications in the cloud. + + +++ + .. button-ref:: vm-cluster-quick-start + :color: primary + :outline: + :expand: + + Get Started with Ray on Cloud VMs + + .. grid-item-card:: + + **Examples** + ^^^ + + Try example Ray workloads in the Cloud + + +++ + .. button-ref:: vm-cluster-examples + :color: primary + :outline: + :expand: + + Try example workloads + + .. grid-item-card:: + + **User Guides** + ^^^ + + Learn best practices for configuring cloud clusters + + +++ + .. button-ref:: vm-cluster-guides + :color: primary + :outline: + :expand: + + Read the User Guides + + .. grid-item-card:: + + **API Reference** + ^^^ + + Find API references for cloud clusters + + +++ + .. button-ref:: vm-cluster-api-references + :color: primary + :outline: + :expand: + + Check API references ``` diff --git a/doc/source/conf.py b/doc/source/conf.py index f54836737bb7..7aa0c31db9f9 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -56,7 +56,6 @@ "sphinxcontrib.redoc", "sphinx_tabs.tabs", "sphinx_remove_toctrees", - "sphinx_panels", "sphinx_design", ] @@ -236,6 +235,7 @@ "https://www.datanami.com/2018/02/01/rays-new-library-targets-high-speed-reinforcement-learning/", # 403 Client Error: Forbidden for url. # They ratelimit bots. + "https://www.researchgate.net/publication/222573328_Stochastic_Gradient_Boosting", "https://www.datanami.com/2019/11/05/why-every-python-developer-will-love-ray/", "https://dev.mysql.com/doc/connector-python/en/", # Returning 522s intermittently. diff --git a/doc/source/custom_directives.py b/doc/source/custom_directives.py index 39636421e51a..6f9e057fd38c 100644 --- a/doc/source/custom_directives.py +++ b/doc/source/custom_directives.py @@ -286,16 +286,15 @@ def build_gallery(app): source = yaml.safe_load((Path(app.srcdir) / gallery).read_text()) meta = source["meta"] - is_titled = True if meta.get("section-titles") else False - meta.pop("section-titles") + grid = meta.pop("grid") projects = source["projects"] - buttons = source["buttons"] + classes = source["classes"] for item in projects: - ref = ":type: url" + ref = "button-link" website = item["website"] if "://" not in website: # if it has no http/s protocol, it's a "ref" - ref = ref.replace("url", "ref") + ref = ref.replace("link", "ref") if not item.get("image"): item["image"] = "https://docs.ray.io/_images/ray_logo.png" @@ -308,40 +307,37 @@ def build_gallery(app): gh_stars = ( f".. image:: https://img.shields.io/github/" f"stars/{org}/{repo}?style=social)]\n" - f"\t\t:target: {item['repo']}" + f"\t\t\t:target: {item['repo']}" ) except Exception: pass item = f""" - --- + .. grid-item-card:: :img-top: {item["image"]} + :class-img-top: {classes["class-img-top"]} {gh_stars} {item["description"]} +++ - .. link-button:: {item["website"]} - {ref} - :text: {item["name"]} - :classes: {buttons["classes"]} + .. {ref}:: {item["website"]} + :color: primary + :outline: + :expand: + + {item["name"]} """ + panel_items.append(item) - panel_header = ".. panels::\n" + panel_header = f".. grid:: {grid}\n" for k, v in meta.items(): - panel_header += f"\t:{k}: {v}\n" - - if is_titled: - panels = "" - for item, panel in zip(projects, panel_items): - title = item["section_title"] - underline_title = "-" * len(title) - panels += f"{title}\n{underline_title}\n\n{panel_header}{panel}\n\n" - else: - panel_items = "\n".join(panel_items) - panels = panel_header + panel_items + panel_header += f" :{k}: {v}\n" + + panel_items = "\n".join(panel_items) + panels = panel_header + panel_items gallery_out = gallery.replace(".yml", ".txt") (Path(app.srcdir) / gallery_out).write_text(panels) diff --git a/doc/source/data/batch_inference.rst b/doc/source/data/batch_inference.rst index aeede1a1f182..af5b0de3390e 100644 --- a/doc/source/data/batch_inference.rst +++ b/doc/source/data/batch_inference.rst @@ -236,7 +236,7 @@ We cover resource allocation in more detail in :ref:`the configuration section o Advanced batch inference guide ------------------------------ - Let's use batch inference on a pre-trained PyTorch model for image classification +Let's use batch inference on a pre-trained PyTorch model for image classification to illustrate advanced concepts of batch processing with Ray. .. important:: @@ -260,7 +260,7 @@ to illustrate advanced concepts of batch processing with Ray. :img-top: /images/ray_logo.png :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - .. button-ref:: /data/examples/torch_detection + .. button-ref:: /ray-air/examples/torch_detection Fine-tuning an Object Detection Model and using it for Batch Inference @@ -268,7 +268,7 @@ to illustrate advanced concepts of batch processing with Ray. :img-top: /images/ray_logo.png :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - .. button-ref:: /data/examples/torch_image_example + .. button-ref:: /ray-air/examples/torch_image_example Training an Image Classifier and using it for Batch Inference @@ -456,6 +456,7 @@ In many standard cases, the input batch format is the same as the output batch f but it's good to be aware of the differences. .. margin:: + We refer to batch formats by name in Ray Data (using strings). For instance, the batch format used to represent Pandas dataframes is called ``"pandas"``. We often use batch format names and the libraries they represent interchangeably. @@ -465,35 +466,37 @@ namely NumPy, Pandas and Arrow, and how they're used in Ray Data. By default, the batch format will be ``"numpy"``, but you can specify other formats as you see fit. -.. tabbed:: NumPy (default) +.. tab-set:: - The ``"numpy"`` batch format presents batches as dictionary of - `numpy.ndarray `__ (``Dict[str, np.ndarray]``), with each key-value pair representing one column. + .. tab-item:: NumPy (default) - .. literalinclude:: ./doc_code/batch_formats.py - :language: python - :start-after: __simple_numpy_start__ - :end-before: __simple_numpy_end__ + The ``"numpy"`` batch format presents batches as dictionary of + `numpy.ndarray `__ (``Dict[str, np.ndarray]``), with each key-value pair representing one column. -.. tabbed:: Pandas + .. literalinclude:: ./doc_code/batch_formats.py + :language: python + :start-after: __simple_numpy_start__ + :end-before: __simple_numpy_end__ - The ``"pandas"`` batch format presents batches in - `pandas.DataFrame `__ - format. + .. tab-item:: Pandas - .. literalinclude:: ./doc_code/batch_formats.py - :language: python - :start-after: __simple_pandas_start__ - :end-before: __simple_pandas_end__ + The ``"pandas"`` batch format presents batches in + `pandas.DataFrame `__ + format. -.. tabbed:: Arrow + .. literalinclude:: ./doc_code/batch_formats.py + :language: python + :start-after: __simple_pandas_start__ + :end-before: __simple_pandas_end__ - The ``"pyarrow"`` batch format presents batches in ``pyarrow.Table`` format. + .. tab-item:: Arrow - .. literalinclude:: ./doc_code/batch_formats.py - :language: python - :start-after: __simple_pyarrow_start__ - :end-before: __simple_pyarrow_end__ + The ``"pyarrow"`` batch format presents batches in ``pyarrow.Table`` format. + + .. literalinclude:: ./doc_code/batch_formats.py + :language: python + :start-after: __simple_pyarrow_start__ + :end-before: __simple_pyarrow_end__ When defining the return value of your function, you can choose between dictionaries of NumPy arrays (``Dict[str, np.ndarray]``), Pandas dataframes diff --git a/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb b/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb index 2063a7cd4b86..86fcf7aa2b0f 100644 --- a/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb +++ b/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb @@ -11,12 +11,12 @@ "\n", "We will use Ray AIR (with the 🤗 Transformers integration) and a pretrained model from Hugging Face hub. Note that you can easily adapt this example to use other similar models.\n", "\n", - "This example focuses more on the performance and distributed computing aspects of Ray AIR. If you are looking for a more beginner friendly introduction to Ray AIR 🤗 Transformers integration, see {doc}`this example `.\n", + "This example focuses more on the performance and distributed computing aspects of Ray AIR. If you are looking for a more beginner-friendly introduction to Ray AIR 🤗 Transformers integration, see {doc}`this example `.\n", "\n", "It is highly recommended to read [Ray AIR Key Concepts](air-key-concepts) and [Ray Data Key Concepts](data_key_concepts) before starting this example.\n", "\n", "```{note}\n", - "In order to run this example, make sure your Ray cluster has access to at least one GPU with 16 or more GBs of memory. The amount of memory needed will depend on the model. This notebook is being tested with 16 g4dn.4xlarge instances (including the head node). If you wish to use a CPU head node, turn on [cloud checkpointing](train-config-sync>) to avoid OOM errors that may happen due to the default behavior of syncing the checkpoint files to head node.\n", + "To run this example, make sure your Ray cluster has access to at least one GPU with 16 or more GBs of memory. The required amount of memory depends on the model. This notebook is tested with 16 g4dn.4xlarge instances (including the head node). If you wish to use a CPU head node, turn on [cloud checkpointing](tune-cloud-checkpointing) to avoid OOM errors that may happen due to the default behavior of syncing the checkpoint files to the head node.\n", "```\n", "\n", "In this notebook, we will:\n", diff --git a/doc/source/ray-contribute/development.rst b/doc/source/ray-contribute/development.rst index 4e4b7179aa4e..e77d80c335cc 100644 --- a/doc/source/ray-contribute/development.rst +++ b/doc/source/ray-contribute/development.rst @@ -19,21 +19,23 @@ Clone the repository To build Ray locally you will need to have the Git repository, so first, fork it on GitHub. Then you can clone it to your machine: -.. tab-item:: Git SSH +.. tab-set:: + + .. tab-item:: Git SSH - To clone the repository using Git with SSH (the default) run: + To clone the repository using Git with SSH (the default) run: - .. code-block:: shell + .. code-block:: shell - git clone git@github.com:[your username]/ray.git + git clone git@github.com:[your username]/ray.git -.. tab-item:: Git HTTPS + .. tab-item:: Git HTTPS - To clone the repository using Git with HTTPS run: + To clone the repository using Git with HTTPS run: - .. code-block:: shell + .. code-block:: shell - git clone https://github.com/[your username]/ray.git + git clone https://github.com/[your username]/ray.git Then you can enter into the Ray git repository directory: diff --git a/doc/source/ray-contribute/writing-code-snippets.rst b/doc/source/ray-contribute/writing-code-snippets.rst index 84cc98fc15eb..0e7a7f5570c7 100644 --- a/doc/source/ray-contribute/writing-code-snippets.rst +++ b/doc/source/ray-contribute/writing-code-snippets.rst @@ -1,4 +1,4 @@ -.. _writing-code-snippets: +.. _writing-code-snippets_ref: ========================== How to write code snippets diff --git a/doc/source/ray-core/examples/overview.rst b/doc/source/ray-core/examples/overview.rst index 00753e8b2090..19d746baae7c 100644 --- a/doc/source/ray-core/examples/overview.rst +++ b/doc/source/ray-core/examples/overview.rst @@ -7,60 +7,57 @@ Ray Tutorials and Examples Machine Learning Examples ------------------------- -.. panels:: - :container: container pb-4 - :column: col-md-4 px-2 py-2 - :img-top-cls: pt-5 w-75 d-block mx-auto - - --- - :img-top: /images/timeseries.png - - +++ - .. link-button:: automl_for_time_series - :type: ref - :text: Build Simple AutoML for Time Series Using Ray - :classes: btn-link btn-block stretched-link - --- - :img-top: /ray-overview/images/ray_svg_logo.svg - - +++ - .. link-button:: batch_prediction - :type: ref - :text: Build Batch Prediction Using Ray - :classes: btn-link btn-block stretched-link - - --- - :img-top: /ray-overview/images/ray_svg_logo.svg - - +++ - .. link-button:: batch_training - :type: ref - :text: Build Batch Training Using Ray - :classes: btn-link btn-block stretched-link - --- - :img-top: images/param_actor.png - - +++ - .. link-button:: plot_parameter_server - :type: ref - :text: Build a Simple Parameter Server Using Ray - :classes: btn-link btn-block stretched-link - --- - :img-top: images/hyperparameter.png - - +++ - .. link-button:: plot_hyperparameter - :type: ref - :text: Simple Parallel Model Selection - :classes: btn-link btn-block stretched-link - --- - :img-top: /ray-overview/images/ray_svg_logo.svg - - +++ - .. link-button:: plot_example-lm - :type: ref - :text: Fault-Tolerant Fairseq Training - :classes: btn-link btn-block stretched-link +.. grid:: 1 2 3 4 + :gutter: 1 + :class-container: container pb-3 + + .. grid-item-card:: + :img-top: /images/timeseries.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: automl_for_time_series + + Build Simple AutoML for Time Series Using Ray + + .. grid-item-card:: + :img-top: /ray-overview/images/ray_svg_logo.svg + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: batch_prediction + + Build Batch Prediction Using Ray + + .. grid-item-card:: + :img-top: /ray-overview/images/ray_svg_logo.svg + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: batch_training + + Build Batch Training Using Ray + + .. grid-item-card:: + :img-top: images/param_actor.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: plot_parameter_server + + Build a Simple Parameter Server Using Ray + + .. grid-item-card:: + :img-top: images/hyperparameter.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: plot_hyperparameter + + Simple Parallel Model Selection + + .. grid-item-card:: + :img-top: /ray-overview/images/ray_svg_logo.svg + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: plot_example-lm + + Fault-Tolerant Fairseq Training Reinforcement Learning Examples @@ -70,61 +67,54 @@ These are simple examples that show you how to leverage Ray Core. For Ray's production-grade reinforcement learning library, see `RLlib `__. -.. panels:: - :container: container pb-4 - :column: col-md-4 px-2 py-2 - :img-top-cls: pt-5 w-75 d-block mx-auto +.. grid:: 1 2 3 4 + :gutter: 1 + :class-container: container pb-3 + + .. grid-item-card:: + :img-top: images/pong.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - --- - :img-top: images/pong.png + .. button-ref:: plot_pong_example - +++ - .. link-button:: plot_pong_example - :type: ref - :text: Learning to Play Pong - :classes: btn-link btn-block stretched-link + Learning to Play Pong - --- - :img-top: images/a3c.png + .. grid-item-card:: + :img-top: images/a3c.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - +++ - .. link-button:: plot_example-a3c - :type: ref - :text: Asynchronous Advantage Actor Critic (A3C) - :classes: btn-link btn-block stretched-link + .. button-ref:: plot_example-a3c + + Asynchronous Advantage Actor Critic (A3C) Basic Examples -------------- -.. panels:: - :container: container pb-4 - :column: col-md-4 px-2 py-2 - :img-top-cls: pt-5 w-75 d-block mx-auto - - --- - :img-top: /ray-overview/images/ray_svg_logo.svg - - +++ - .. link-button:: gentle_walkthrough - :type: ref - :text: A Gentle Introduction to Ray Core by Example - :classes: btn-link btn-block stretched-link - - --- - :img-top: /ray-overview/images/ray_svg_logo.svg - - +++ - .. link-button:: highly_parallel - :type: ref - :text: Using Ray for Highly Parallelizable Tasks - :classes: btn-link btn-block stretched-link - - --- - :img-top: /ray-overview/images/ray_svg_logo.svg - - +++ - .. link-button:: map_reduce - :type: ref - :text: Running a Simple MapReduce Example with Ray Core - :classes: btn-link btn-block stretched-link +.. grid:: 1 2 3 4 + :gutter: 1 + :class-container: container pb-3 + + .. grid-item-card:: + :img-top: /ray-overview/images/ray_svg_logo.svg + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: gentle_walkthrough + + A Gentle Introduction to Ray Core by Example + + .. grid-item-card:: + :img-top: /ray-overview/images/ray_svg_logo.svg + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: highly_parallel + + Using Ray for Highly Parallelizable Tasks + + .. grid-item-card:: + :img-top: /ray-overview/images/ray_svg_logo.svg + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: map_reduce + + Running a Simple MapReduce Example with Ray Core diff --git a/doc/source/ray-core/namespaces.rst b/doc/source/ray-core/namespaces.rst index 012bc8d3f653..c74c263ebecb 100644 --- a/doc/source/ray-core/namespaces.rst +++ b/doc/source/ray-core/namespaces.rst @@ -110,7 +110,7 @@ Named actors are only accessible within their namespaces. ray::Init(config); // This fails because "orange" was defined in the "colors" namespace. ray::GetActor("orange"); // return nullptr; - // This succceeds because the name "orange" is unused in this namespace. + // This succeeds because the name "orange" is unused in this namespace. ray::Actor(RAY_FUNC(Counter::FactoryCreate)).SetName("orange").Remote(); ray::Actor(RAY_FUNC(Counter::FactoryCreate)).SetName("watermelon").Remote(); ray::Shutdown(); @@ -161,10 +161,9 @@ the specified namespace, no matter what namespace of the current job is. .. tab-item:: C++ - .. code-block:: c++ + .. code-block:: // `ray start --head` has been run to launch a local cluster. - ray::RayConfig config; ray::Init(config); // Create an actor with specified namespace. @@ -173,6 +172,7 @@ the specified namespace, no matter what namespace of the current job is. ray::GetActor("orange"); ray::Shutdown();` + Anonymous namespaces -------------------- diff --git a/doc/source/ray-core/objects.rst b/doc/source/ray-core/objects.rst index e6ba74a5a61c..e168c6ebd220 100644 --- a/doc/source/ray-core/objects.rst +++ b/doc/source/ray-core/objects.rst @@ -98,7 +98,7 @@ If the current node's object store does not contain the object, the object is do // Get the values of multiple object refs in parallel. List> objectRefs = new ArrayList<>(); for (int i = 0; i < 3; i++) { - objectRefs.add(Ray.put(i)); + objectRefs.add(Ray.put(i)); } List results = Ray.get(objectRefs); Assert.assertEquals(results, ImmutableList.of(0, 1, 2)); diff --git a/doc/source/ray-core/objects/serialization.rst b/doc/source/ray-core/objects/serialization.rst index b73447c8d3d1..af5a236267db 100644 --- a/doc/source/ray-core/objects/serialization.rst +++ b/doc/source/ray-core/objects/serialization.rst @@ -3,7 +3,7 @@ Serialization ============= -Since Ray processes do not share memory space, data transferred between workers and nodes will need to **serialized** and **deserialized**. Ray uses the `Plasma object store `_ to efficiently transfer objects across different processes and different nodes. Numpy arrays in the object store are shared between workers on the same node (zero-copy deserialization). +Since Ray processes do not share memory space, data transferred between workers and nodes will need to **serialized** and **deserialized**. Ray uses the `Plasma object store `_ to efficiently transfer objects across different processes and different nodes. Numpy arrays in the object store are shared between workers on the same node (zero-copy deserialization). Overview -------- diff --git a/doc/source/ray-core/walkthrough.rst b/doc/source/ray-core/walkthrough.rst index 80b42f8ac665..e4f721a7d59e 100644 --- a/doc/source/ray-core/walkthrough.rst +++ b/doc/source/ray-core/walkthrough.rst @@ -65,34 +65,34 @@ Next Steps Ray's key primitives are simple, but can be composed together to express almost any kind of distributed computation. Learn more about Ray's :ref:`key concepts ` with the following user guides: -.. panels:: - :container: container pb-4 - :column: col-md-4 px-2 py-2 - :img-top-cls: pt-5 w-50 d-block mx-auto - - --- - :img-top: /images/tasks.png - - .. link-button:: ray-remote-functions - :type: ref - :text: Using remote functions (Tasks) - :classes: btn-link btn-block stretched-link - - --- - :img-top: /images/actors.png - - .. link-button:: ray-remote-classes - :type: ref - :text: Using remote classes (Actors) - :classes: btn-link btn-block stretched-link - - --- - :img-top: /images/objects.png - - .. link-button:: objects-in-ray - :type: ref - :text: Working with Ray Objects - :classes: btn-link btn-block stretched-link +.. grid:: 1 2 3 3 + :gutter: 1 + :class-container: container pb-3 + + + .. grid-item-card:: + :img-top: /images/tasks.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: ray-remote-functions + + Using remote functions (Tasks) + + .. grid-item-card:: + :img-top: /images/actors.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: ray-remote-classes + + Using remote classes (Actors) + + .. grid-item-card:: + :img-top: /images/objects.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: objects-in-ray + + Working with Ray Objects .. include:: /_includes/core/announcement_bottom.rst diff --git a/doc/source/ray-observability/ray-logging.rst b/doc/source/ray-observability/ray-logging.rst index 3efae19e2d2f..ac238cc4a715 100644 --- a/doc/source/ray-observability/ray-logging.rst +++ b/doc/source/ray-observability/ray-logging.rst @@ -115,7 +115,7 @@ By default Ray prints Actor logs prefixes in light blue: Users may instead activate multi-color prefixes by setting the environment variable ``RAY_COLOR_PREFIX=1``. This will index into an array of colors modulo the PID of each process. -.. image:: images/coloring-actor-log-prefixes.png +.. image:: ./images/coloring-actor-log-prefixes.png :align: center Distributed progress bars (tqdm) diff --git a/doc/source/ray-overview/eco-gallery.yml b/doc/source/ray-overview/eco-gallery.yml index 0cc423dd913b..05e2006ea660 100644 --- a/doc/source/ray-overview/eco-gallery.yml +++ b/doc/source/ray-overview/eco-gallery.yml @@ -1,11 +1,10 @@ meta: - section-titles: false - container: container pb-4 - column: col-md-4 px-1 py-1 - img-top-cls: p-2 w-75 d-block mx-auto fixed-height-img + grid: 1 2 2 3 + gutter: 1 + class-container: container pb-3 -buttons: - classes: btn-outline-info btn-block +classes: + class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img projects: - name: Classy Vision Integration diff --git a/doc/source/ray-overview/getting-started.md b/doc/source/ray-overview/getting-started.md index d35575cc11e1..9f6d879b977d 100644 --- a/doc/source/ray-overview/getting-started.md +++ b/doc/source/ray-overview/getting-started.md @@ -9,25 +9,27 @@ This guide gives a quick tour of Ray's features. ## Starting a local Ray cluster To get started, install, import, and initialize Ray. Most of the examples in this guide are based on Python, and some examples use Ray Core in Java. -````{panels} -:container: text-center -:column: col-lg-6 px-2 py-2 -:card: - -Python -^^^ -To use Ray in Python, install it with -``` -pip install ray -``` - ---- - -Java -^^^ +````{eval-rst} +.. grid:: 1 2 2 2 + :gutter: 1 + :class-container: container pb-3 + + .. grid-item-card:: + + Python + ^^^ + To use Ray in Python, install it with + ``` + pip install ray + ``` + + .. grid-item-card:: -To use Ray in Java, first add the [ray-api](https://mvnrepository.com/artifact/io.ray/ray-api) and -[ray-runtime](https://mvnrepository.com/artifact/io.ray/ray-runtime) dependencies in your project. + Java + ^^^ + + To use Ray in Java, first add the [ray-api](https://mvnrepository.com/artifact/io.ray/ray-api) and + [ray-runtime](https://mvnrepository.com/artifact/io.ray/ray-runtime) dependencies in your project. ```` @@ -125,14 +127,16 @@ Use the trained model for batch prediction with a ``BatchPredictor``. :start-after: __air_xgb_batchpred_start__ :end-before: __air_xgb_batchpred_end__ ``` -````` +```{button-ref} air +:color: primary +:outline: +:expand: -```{link-button} air -:type: ref -:text: Learn more about Ray AIR -:classes: btn-outline-primary btn-block +Learn more about Ray AIR ``` +````` + ## Ray Libraries Quick Start @@ -182,20 +186,22 @@ Datastreams also supports ``.filter()`` and ``.flat_map()``. :end-before: __data_transform_end__ ``` -```{link-button} ../data/data -:type: ref -:text: Learn more about Ray Data -:classes: btn-outline-primary btn-block +```{button-ref} ../data/data +:color: primary +:outline: +:expand: + +Learn more about Ray Data ``` ````` -`````{dropdown} ray Train: Distributed Model Training +``````{dropdown} ray Train: Distributed Model Training :animate: fade-in-slide-down Ray Train abstracts away the complexity of setting up a distributed training system. Let's take following simple examples: -````{tab-set} +`````{tab-set} ````{tab-item} PyTorch @@ -304,16 +310,21 @@ with 4 workers, and use it to run the new training function! :end-before: __tf_trainer_end__ :dedent: 0 ``` -```` -```` -```{link-button} ../train/train -:type: ref -:text: Learn more about Ray Train -:classes: btn-outline-primary btn-block +```{button-ref} ../train/train +:color: primary +:outline: +:expand: + +Learn more about Ray Train ``` + +```` + ````` +`````` + `````{dropdown} ray Tune: Hyperparameter Tuning at Scale :animate: fade-in-slide-down @@ -343,10 +354,12 @@ If TensorBoard is installed, automatically visualize all trial results: tensorboard --logdir ~/ray_results ``` -```{link-button} ../tune/index -:type: ref -:text: Learn more about Ray Tune -:classes: btn-outline-primary btn-block +```{button-ref} ../tune/index +:color: primary +:outline: +:expand: + +Learn more about Ray Tune ``` ````` @@ -374,11 +387,14 @@ This example runs serves a scikit-learn gradient boosting classifier. As a result you will see `{"result": "versicolor"}`. -```{link-button} ../serve/index -:type: ref -:text: Learn more about Ray Serve -:classes: btn-outline-primary btn-block +```{button-ref} ../serve/index +:color: primary +:outline: +:expand: + +Learn more about Ray Serve ``` + ````` @@ -401,10 +417,12 @@ pip install "ray[rllib]" tensorflow # or torch :start-after: __quick_start_begin__ ``` -```{link-button} ../rllib/index -:type: ref -:text: Learn more about Ray RLlib -:classes: btn-outline-primary btn-block +```{button-ref} ../rllib/index +:color: primary +:outline: +:expand: + +Learn more about Ray RLlib ``` ````` @@ -415,10 +433,10 @@ Ray Core provides simple primitives for building and running distributed applica Below you find examples that show you how to turn your functions and classes easily into Ray tasks and actors, for both Python and Java. -`````{dropdown} ray Core: Parallelizing Functions with Ray Tasks +``````{dropdown} ray Core: Parallelizing Functions with Ray Tasks :animate: fade-in-slide-down -````{tab-set} +`````{tab-set} ````{tab-item} Python @@ -438,8 +456,8 @@ def f(x): futures = [f.remote(i) for i in range(4)] print(ray.get(futures)) # [0, 1, 4, 9] - ``` + ```` ````{tab-item} Java @@ -475,22 +493,26 @@ public class RayDemo { System.out.println(Ray.get(objectRefList)); // [0, 1, 4, 9] } } -```` - -```` +``` In the above code block we defined some Ray Tasks. While these are great for stateless operations, sometimes you must maintain the state of your application. You can do that with Ray Actors. -```{link-button} ../ray-core/walkthrough -:type: ref -:text: Learn more about Ray Core -:classes: btn-outline-primary btn-block +```{button-ref} ../ray-core/walkthrough +:color: primary +:outline: +:expand: + +Learn more about Ray Core ``` +```` + ````` -`````{dropdown} ray Core: Parallelizing Classes with Ray Actors +`````` + +``````{dropdown} ray Core: Parallelizing Classes with Ray Actors :animate: fade-in-slide-down Ray provides actors to allow you to parallelize an instance of a class in Python or Java. @@ -498,7 +520,7 @@ When you instantiate a class that is a Ray actor, Ray will start a remote instan of that class in the cluster. This actor can then execute remote method calls and maintain its own internal state. -````{tab-set} +`````{tab-set} ````{tab-item} Python @@ -572,19 +594,22 @@ public class RayDemo { System.out.println(Ray.get(objectRefList)); // [1, 1, 1, 1] } } +``` -```` +```{button-ref} ../ray-core/walkthrough +:color: primary +:outline: +:expand: -```` - -```{link-button} ../ray-core/walkthrough -:type: ref -:text: Learn more about Ray Core -:classes: btn-outline-primary btn-block +Learn more about Ray Core ``` +```` + ````` +`````` + ## Ray Cluster Quick Start You can deploy your applications on Ray clusters, often with minimal code changes to your existing code. @@ -623,10 +648,12 @@ Assuming you have stored this configuration in a file called `cluster.yaml`, you ray submit cluster.yaml example.py --start ``` -```{link-button} cluster-index -:type: ref -:text: Learn more about launching Ray Clusters -:classes: btn-outline-primary btn-block +```{button-ref} cluster-index +:color: primary +:outline: +:expand: + +Learn more about launching Ray Clusters ``` ````` @@ -652,10 +679,12 @@ pip install "ray[default]" ``` ```` -```{link-button} ../ray-core/ray-dashboard -:type: ref -:text: Learn more about Ray Dashboard. -:classes: btn-outline-primary btn-block +```{button-ref} ../ray-core/ray-dashboard +:color: primary +:outline: +:expand: + +Learn more about Ray Dashboard ``` ````` @@ -728,10 +757,12 @@ See the summarized statistics of Ray tasks using ``ray summary tasks``. ``` -```{link-button} ../ray-observability/state/state-api -:type: ref -:text: Learn more about Ray State APIs -:classes: btn-outline-primary btn-block +```{button-ref} ../ray-observability/state/state-api +:color: primary +:outline: +:expand: + +Learn more about Ray State APIs ``` ````` diff --git a/doc/source/ray-overview/index.md b/doc/source/ray-overview/index.md index 38e3e7baa7f2..865a2d63b540 100644 --- a/doc/source/ray-overview/index.md +++ b/doc/source/ray-overview/index.md @@ -38,63 +38,71 @@ These are some common ML workloads that individuals, organizations, and companie |:--:| |Stack of Ray libraries - unified toolkit for ML workloads.| -Ray's unified compute framework comprises of three layers: +Ray's unified compute framework consists of three layers: 1. **Ray AI Runtime**--An open-source, Python, domain-specific set of libraries that equip ML engineers, data scientists, and researchers with a scalable and unified toolkit for ML applications. -1. **Ray Core**--An open-source, Python, general purpose, distributed computing library that enables ML engineers and Python developers to scale Python applications and accelerate machine learning workloads. -1. **Ray cluster**--A set of worker nodes connected to a common Ray head node. Ray clusters can be fixed-size, or they can autoscale up and down according to the resources requested by applications running on the cluster. - -````{panels} -:container: text-left -:column: col-lg-4 px-2 py-2 -:card: - -**Scale machine learning workloads** -^^^ -Build ML applications with a toolkit of libraries for distributed -[data processing](../data/data.rst), -[model training](../train/train.rst), -[tuning](tune/../index.rst), -[reinforcement learning](../rllib/index.rst), -[model serving](../serve/index.rst), -and [more](../ray-more-libs/index.rst). -+++ -```{link-button} ../ray-air/getting-started -:type: ref -:text: Ray AIR -:classes: btn-outline-info btn-block +2. **Ray Core**--An open-source, Python, general purpose, distributed computing library that enables ML engineers and Python developers to scale Python applications and accelerate machine learning workloads. +3. **Ray cluster**--A set of worker nodes connected to a common Ray head node. Ray clusters can be fixed-size, or they can autoscale up and down according to the resources requested by applications running on the cluster. + +```{eval-rst} +.. grid:: 1 2 3 3 + :gutter: 1 + :class-container: container pb-3 + + .. grid-item-card:: + + **Scale machine learning workloads** + ^^^ + Build ML applications with a toolkit of libraries for distributed + :doc:`data processing <../data/data>`, + :doc:`model training <../train/train>`, + :doc:`tuning <../tune/index>`, + :doc:`reinforcement learning <../rllib/index>`, + :doc:`model serving <../serve/index>`, + and :doc:`more <../ray-more-libs/index>`. + +++ + .. button-ref:: ../ray-air/getting-started + :color: primary + :outline: + :expand: + + Ray AIR + + .. grid-item-card:: + + **Build distributed applications** + ^^^ + Build and run distributed applications with a + :doc:`simple and flexible API <../ray-core/walkthrough>`. + :doc:`Parallelize <../ray-core/walkthrough>` single machine code with + little to zero code changes. + + +++ + .. button-ref:: ../ray-core/walkthrough + :color: primary + :outline: + :expand: + + Ray Core + + .. grid-item-card:: + + **Deploy large-scale workloads** + ^^^ + Deploy workloads on :doc:`AWS, GCP, Azure <../cluster/getting-started>` or + :doc:`on premise <../cluster/vms/user-guides/launching-clusters/on-premises>`. + Use Ray cluster managers to run Ray on existing + :doc:`Kubernetes <../cluster/kubernetes/index>`, + :doc:`YARN <../cluster/vms/user-guides/community/yarn>`, + or :doc:`Slurm <../cluster/vms/user-guides/community/slurm>` clusters. + +++ + .. button-ref:: ../cluster/getting-started + :color: primary + :outline: + :expand: + + Ray Clusters ``` ---- - -**Build distributed applications** -^^^ -Build and run distributed applications with a [simple and flexible API](../ray-core/walkthrough.rst). -[Parallelize](../ray-core/walkthrough.rst) single machine code with little to zero code changes. - -+++ -```{link-button} ../ray-core/walkthrough -:type: ref -:text: Ray Core -:classes: btn-outline-info btn-block -``` ---- - -**Deploy large-scale workloads** -^^^ -Deploy workloads on [AWS, GCP, Azure](../cluster/getting-started) or -[on premise](../cluster/vms/user-guides/launching-clusters/on-premises). -Use Ray cluster managers to run Ray on existing -[Kubernetes](../cluster/kubernetes/index), -[YARN](../cluster/vms/user-guides/community/yarn), -or [Slurm](../cluster/vms/user-guides/community/slurm) clusters. -+++ - -```{link-button} ../cluster/getting-started -:type: ref -:text: Ray Clusters -:classes: btn-outline-info btn-block -``` -```` Each of [Ray AIR's](../ray-air/getting-started) five native libraries distributes a specific ML task: - [Data](../data/data): Scalable, framework-agnostic data loading and transformation across training, tuning, and prediction. diff --git a/doc/source/ray-overview/use-cases.rst b/doc/source/ray-overview/use-cases.rst index 95f4a02b1b38..06e3d292b4b1 100644 --- a/doc/source/ray-overview/use-cases.rst +++ b/doc/source/ray-overview/use-cases.rst @@ -107,7 +107,7 @@ It scales from single machines to large clusters with minimal code changes. As a Python-first framework, you can easily express and interactively develop your inference workloads in Ray. To learn more about running batch inference with Ray, see the :ref:`batch inference guide`. -.. figure:: batch_inference/images/batch_inference.png +.. figure:: ../data/images/batch_inference.png .. grid:: 1 2 3 4 diff --git a/doc/source/train/examples.rst b/doc/source/train/examples.rst index 121d9caf3a96..8639b9345211 100644 --- a/doc/source/train/examples.rst +++ b/doc/source/train/examples.rst @@ -44,7 +44,7 @@ Distributed Training Examples using Ray Train PyTorch Fashion MNIST Training Example .. grid-item-card:: - :img-top: images/hugging.png + :img-top: /images/hugging.png :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img .. button-ref:: train_transformers_example diff --git a/doc/source/train/gbdt.rst b/doc/source/train/gbdt.rst index dcd9dba6110f..d470ff4c49af 100644 --- a/doc/source/train/gbdt.rst +++ b/doc/source/train/gbdt.rst @@ -12,23 +12,25 @@ Just as in the original `xgboost.train() `__ functions, the training parameters are passed as the ``params`` dictionary. -.. tab-item:: XGBoost +.. tab-set:: + + .. tab-item:: XGBoost - Run ``pip install -U xgboost_ray``. + Run ``pip install -U xgboost_ray``. - .. literalinclude:: doc_code/gbdt_user_guide.py - :language: python - :start-after: __xgboost_start__ - :end-before: __xgboost_end__ + .. literalinclude:: doc_code/gbdt_user_guide.py + :language: python + :start-after: __xgboost_start__ + :end-before: __xgboost_end__ -.. tab-item:: LightGBM + .. tab-item:: LightGBM - Run ``pip install -U lightgbm_ray``. + Run ``pip install -U lightgbm_ray``. - .. literalinclude:: doc_code/gbdt_user_guide.py - :language: python - :start-after: __lightgbm_start__ - :end-before: __lightgbm_end__ + .. literalinclude:: doc_code/gbdt_user_guide.py + :language: python + :start-after: __lightgbm_start__ + :end-before: __lightgbm_end__ Ray-specific params are passed in through the trainer constructors. diff --git a/doc/source/tune/examples/experiment-tracking.rst b/doc/source/tune/examples/experiment-tracking.rst index 3fbbb8157cbf..2a14d75b2301 100644 --- a/doc/source/tune/examples/experiment-tracking.rst +++ b/doc/source/tune/examples/experiment-tracking.rst @@ -6,42 +6,39 @@ such as CometML, or Weights & Biases. If you're interested in learning how to use Ray Tune with Tensorboard, you can find more information in our :ref:`Guide to logging and outputs `. -.. panels:: - :container: container pb-4 - :column: col-md-4 px-2 py-2 - :img-top-cls: pt-5 w-75 d-block mx-auto - - --- - :img-top: /images/aim_logo.png - - +++ - .. link-button:: tune-aim-ref - :type: ref - :text: Using Aim with Ray Tune For Experiment Management - :classes: btn-link btn-block stretched-link - --- - :img-top: /images/comet_logo_full.png - - +++ - .. link-button:: tune-comet-ref - :type: ref - :text: Using Comet with Ray Tune For Experiment Management - :classes: btn-link btn-block stretched-link - - --- - :img-top: /images/wandb_logo.png - - +++ - .. link-button:: tune-wandb-ref - :type: ref - :text: Tracking Your Experiment Process Weights & Biases - :classes: btn-link btn-block stretched-link - - --- - :img-top: /images/mlflow.png - - +++ - .. link-button:: tune-mlflow-ref - :type: ref - :text: Using MLflow Tracking & AutoLogging with Tune - :classes: btn-link btn-block stretched-link \ No newline at end of file +.. grid:: 1 2 3 4 + :gutter: 1 + :class-container: container pb-3 + + + .. grid-item-card:: + :img-top: /images/aim_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: tune-aim-ref + + Using Aim with Ray Tune For Experiment Management + + .. grid-item-card:: + :img-top: /images/comet_logo_full.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: tune-comet-ref + + Using Comet with Ray Tune For Experiment Management + + .. grid-item-card:: + :img-top: /images/wandb_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: tune-wandb-ref + + Tracking Your Experiment Process Weights & Biases + + .. grid-item-card:: + :img-top: /images/mlflow.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: tune-mlflow-ref + + Using MLflow Tracking & AutoLogging with Tune From 4ce6be8e8096077bc33b92b3e924e91f45111f2c Mon Sep 17 00:00:00 2001 From: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Date: Sun, 7 May 2023 17:07:58 -0700 Subject: [PATCH 273/424] [RLlib] Fixed bug in restoring a gpu trained algorithm (#35024) Signed-off-by: Kourosh Hakhamaneshi --- rllib/BUILD | 29 ++++++++++++++ rllib/policy/torch_policy.py | 8 +++- rllib/policy/torch_policy_v2.py | 8 +++- .../test_algorithm_checkpoint_restore.py | 40 +++++++++++++++---- 4 files changed, 76 insertions(+), 9 deletions(-) diff --git a/rllib/BUILD b/rllib/BUILD index 30df6d9413f6..20d81511ac16 100644 --- a/rllib/BUILD +++ b/rllib/BUILD @@ -2458,6 +2458,16 @@ py_test( args = ["TestCheckpointRestorePG"] ) + +py_test( + name = "tests/test_checkpoint_restore_pg_gpu", + main = "tests/test_algorithm_checkpoint_restore.py", + tags = ["team:rllib", "tests_dir", "gpu"], + size = "large", + srcs = ["tests/test_algorithm_checkpoint_restore.py"], + args = ["TestCheckpointRestorePG"] +) + py_test( name = "tests/test_checkpoint_restore_off_policy", main = "tests/test_algorithm_checkpoint_restore.py", @@ -2467,6 +2477,16 @@ py_test( args = ["TestCheckpointRestoreOffPolicy"] ) + +py_test( + name = "tests/test_checkpoint_restore_off_policy_gpu", + main = "tests/test_algorithm_checkpoint_restore.py", + tags = ["team:rllib", "tests_dir", "gpu"], + size = "large", + srcs = ["tests/test_algorithm_checkpoint_restore.py"], + args = ["TestCheckpointRestoreOffPolicy"] +) + py_test( name = "tests/test_checkpoint_restore_evolution_algos", main = "tests/test_algorithm_checkpoint_restore.py", @@ -2476,6 +2496,15 @@ py_test( args = ["TestCheckpointRestoreEvolutionAlgos"] ) +py_test( + name = "tests/test_checkpoint_restore_evolution_algos_gpu", + main = "tests/test_algorithm_checkpoint_restore.py", + tags = ["team:rllib", "tests_dir", "gpu"], + size = "medium", + srcs = ["tests/test_algorithm_checkpoint_restore.py"], + args = ["TestCheckpointRestoreEvolutionAlgos"] +) + py_test( name = "policy/tests/test_policy_checkpoint_restore", main = "policy/tests/test_policy_checkpoint_restore.py", diff --git a/rllib/policy/torch_policy.py b/rllib/policy/torch_policy.py index f78b13858390..9c2d573635e3 100644 --- a/rllib/policy/torch_policy.py +++ b/rllib/policy/torch_policy.py @@ -775,7 +775,13 @@ def set_state(self, state: PolicyState) -> None: if optimizer_vars: assert len(optimizer_vars) == len(self._optimizers) for o, s in zip(self._optimizers, optimizer_vars): - optim_state_dict = convert_to_torch_tensor(s, device=self.device) + # Torch optimizer param_groups include things like beta, etc. These + # parameters should be left as scalar and not converted to tensors. + # otherwise, torch.optim.step() will start to complain. + optim_state_dict = {"param_groups": s["param_groups"]} + optim_state_dict["state"] = convert_to_torch_tensor( + s["state"], device=self.device + ) o.load_state_dict(optim_state_dict) # Set exploration's state. if hasattr(self, "exploration") and "_exploration_state" in state: diff --git a/rllib/policy/torch_policy_v2.py b/rllib/policy/torch_policy_v2.py index ea648fd912ad..0d58dbc55c2b 100644 --- a/rllib/policy/torch_policy_v2.py +++ b/rllib/policy/torch_policy_v2.py @@ -993,7 +993,13 @@ def set_state(self, state: PolicyState) -> None: if optimizer_vars: assert len(optimizer_vars) == len(self._optimizers) for o, s in zip(self._optimizers, optimizer_vars): - optim_state_dict = convert_to_torch_tensor(s, device=self.device) + # Torch optimizer param_groups include things like beta, etc. These + # parameters should be left as scalar and not converted to tensors. + # otherwise, torch.optim.step() will start to complain. + optim_state_dict = {"param_groups": s["param_groups"]} + optim_state_dict["state"] = convert_to_torch_tensor( + s["state"], device=self.device + ) o.load_state_dict(optim_state_dict) # Set exploration's state. if hasattr(self, "exploration") and "_exploration_state" in state: diff --git a/rllib/tests/test_algorithm_checkpoint_restore.py b/rllib/tests/test_algorithm_checkpoint_restore.py index 2c60ec2a9c58..aa19ac0de77a 100644 --- a/rllib/tests/test_algorithm_checkpoint_restore.py +++ b/rllib/tests/test_algorithm_checkpoint_restore.py @@ -16,6 +16,7 @@ from ray.rllib.algorithms.ars import ARSConfig from ray.rllib.algorithms.a3c import A3CConfig from ray.tune.registry import get_trainable_cls +import os def get_mean_action(alg, obs): @@ -32,7 +33,12 @@ def get_mean_action(alg, obs): # explore=None if we compare the mean of the distribution of actions for the # same observation to be the same. algorithms_and_configs = { - "A3C": (A3CConfig().exploration(explore=False).rollouts(num_rollout_workers=1)), + "A3C": ( + A3CConfig() + .exploration(explore=False) + .rollouts(num_rollout_workers=1) + .resources(num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0"))) + ), "APEX_DDPG": ( ApexDDPGConfig() .exploration(explore=False) @@ -42,29 +48,34 @@ def get_mean_action(alg, obs): optimizer={"num_replay_buffer_shards": 1}, num_steps_sampled_before_learning_starts=0, ) + .resources(num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0"))) ), "ARS": ( ARSConfig() .exploration(explore=False) .rollouts(num_rollout_workers=2, observation_filter="MeanStdFilter") .training(num_rollouts=10, noise_size=2500000) + .resources(num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0"))) ), "DDPG": ( DDPGConfig() .exploration(explore=False) .reporting(min_sample_timesteps_per_iteration=100) .training(num_steps_sampled_before_learning_starts=0) + .resources(num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0"))) ), "DQN": ( DQNConfig() .exploration(explore=False) .training(num_steps_sampled_before_learning_starts=0) + .resources(num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0"))) ), "ES": ( ESConfig() .exploration(explore=False) .training(episodes_per_batch=10, train_batch_size=100, noise_size=2500000) .rollouts(observation_filter="MeanStdFilter", num_rollout_workers=2) + .resources(num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0"))) ), "PPO": ( # See the comment before the `algorithms_and_configs` dict. @@ -72,21 +83,30 @@ def get_mean_action(alg, obs): PPOConfig() .training(num_sgd_iter=5, train_batch_size=1000) .rollouts(num_rollout_workers=2) + .resources(num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0"))) ), "SimpleQ": ( SimpleQConfig() .exploration(explore=False) .training(num_steps_sampled_before_learning_starts=0) + .resources(num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0"))) ), "SAC": ( SACConfig() .exploration(explore=False) .training(num_steps_sampled_before_learning_starts=0) + .resources(num_gpus=int(os.environ.get("RLLIB_NUM_GPUS", "0"))) ), } -def ckpt_restore_test(algo_name, tf2=False, object_store=False, replay_buffer=False): +def ckpt_restore_test( + algo_name, + tf2=False, + object_store=False, + replay_buffer=False, + run_restored_algorithm=True, +): config = algorithms_and_configs[algo_name].to_dict() # If required, store replay buffer data in checkpoints as well. if replay_buffer: @@ -172,22 +192,28 @@ def ckpt_restore_test(algo_name, tf2=False, object_store=False, replay_buffer=Fa raise AssertionError( "algo={} [a1={} a2={}]".format(algo_name, a1, a2) ) - # Stop both algos. + # Stop algo 1. alg1.stop() + + if run_restored_algorithm: + # Check that algo 2 can still run. + print("Starting second run on Algo 2...") + alg2.train() alg2.stop() class TestCheckpointRestorePG(unittest.TestCase): @classmethod def setUpClass(cls): - ray.init(num_cpus=5) + ray.init() @classmethod def tearDownClass(cls): ray.shutdown() def test_a3c_checkpoint_restore(self): - ckpt_restore_test("A3C") + # TODO(Kourosh) A3C cannot run a restored algorithm for some reason. + ckpt_restore_test("A3C", run_restored_algorithm=False) def test_ppo_checkpoint_restore(self): ckpt_restore_test("PPO", object_store=True) @@ -196,7 +222,7 @@ def test_ppo_checkpoint_restore(self): class TestCheckpointRestoreOffPolicy(unittest.TestCase): @classmethod def setUpClass(cls): - ray.init(num_cpus=5) + ray.init() @classmethod def tearDownClass(cls): @@ -221,7 +247,7 @@ def test_simpleq_checkpoint_restore(self): class TestCheckpointRestoreEvolutionAlgos(unittest.TestCase): @classmethod def setUpClass(cls): - ray.init(num_cpus=5) + ray.init() @classmethod def tearDownClass(cls): From 9edf062557ee5efcbfb7d461d5ad80e1a6017bdf Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Mon, 8 May 2023 10:52:20 +0100 Subject: [PATCH 274/424] [air/output] Improve leaked mentions of Tune concepts (#35003) Ray Tune is the execution backend for Ray Train. This means that sometimes error/warning messages use Tune concepts, that don't make sense in a single-trial run, such as with Ray Train trainers. This PR improves three such occurrences: 1. The insufficient resources warnings message has been adjusted in the case where only one trial is run 2. Calculation of `max_pending_trials` now uses `search_alg.total_samples` as the minimum, which was an oversight before. 3. On interrupt of a training run, a `Tuner.restore()` message was suggested, but it should be `Trainer.restore()` Signed-off-by: Kai Fricke --- python/ray/train/base_trainer.py | 5 +- .../insufficient_resources_manager.py | 130 +++++++++++------- python/ray/tune/execution/trial_runner.py | 20 ++- python/ray/tune/execution/tune_controller.py | 2 + python/ray/tune/impl/tuner_internal.py | 3 + .../ray/tune/tests/test_ray_trial_executor.py | 15 +- python/ray/tune/tune.py | 36 +++-- python/ray/tune/tuner.py | 1 + 8 files changed, 143 insertions(+), 69 deletions(-) diff --git a/python/ray/train/base_trainer.py b/python/ray/train/base_trainer.py index bd9f47da83f2..7f40d0a1ac01 100644 --- a/python/ray/train/base_trainer.py +++ b/python/ray/train/base_trainer.py @@ -578,7 +578,10 @@ def fit(self) -> Result: ) else: tuner = Tuner( - trainable=trainable, param_space=param_space, run_config=self.run_config + trainable=trainable, + param_space=param_space, + run_config=self.run_config, + _trainer_api=True, ) experiment_path = Path( diff --git a/python/ray/tune/execution/insufficient_resources_manager.py b/python/ray/tune/execution/insufficient_resources_manager.py index cc255ed9b267..c0755914c8ac 100644 --- a/python/ray/tune/execution/insufficient_resources_manager.py +++ b/python/ray/tune/execution/insufficient_resources_manager.py @@ -3,7 +3,7 @@ import os import ray import time -from typing import Dict +from typing import Dict, Optional, Tuple from ray.tune.execution.cluster_info import _is_ray_cluster from ray.tune.experiment import Trial @@ -18,10 +18,10 @@ def _get_cluster_resources_no_autoscaler() -> Dict: return ray.cluster_resources() -def _get_trial_cpu_and_gpu(trial: Trial) -> Dict: +def _get_trial_cpu_and_gpu(trial: Trial) -> Tuple[int, int]: cpu = trial.placement_group_factory.required_resources.get("CPU", 0) gpu = trial.placement_group_factory.required_resources.get("GPU", 0) - return {"CPU": cpu, "GPU": gpu} + return cpu, gpu def _can_fulfill_no_autoscaler(trial: Trial) -> bool: @@ -30,11 +30,11 @@ def _can_fulfill_no_autoscaler(trial: Trial) -> bool: For no autoscaler case. """ assert trial.status == Trial.PENDING - trial_cpu_gpu = _get_trial_cpu_and_gpu(trial) + asked_cpus, asked_gpus = _get_trial_cpu_and_gpu(trial) - return trial_cpu_gpu["CPU"] <= _get_cluster_resources_no_autoscaler().get( + return asked_cpus <= _get_cluster_resources_no_autoscaler().get( "CPU", 0 - ) and trial_cpu_gpu["GPU"] <= _get_cluster_resources_no_autoscaler().get("GPU", 0) + ) and asked_gpus <= _get_cluster_resources_no_autoscaler().get("GPU", 0) @lru_cache() @@ -52,38 +52,68 @@ def _get_insufficient_resources_warning_threshold() -> float: return float(os.environ.get("TUNE_WARN_INSUFFICENT_RESOURCE_THRESHOLD_S", "60")) +MSG_TRAIN_START = ( + "Training has not started in the last {wait_time:.0f} seconds. " + "This could be due to the cluster not having enough resources available. " +) +MSG_TRAIN_INSUFFICIENT = ( + "You asked for {asked_cpus} CPUs and {asked_gpus} GPUs, but the cluster only " + "has {cluster_cpus} CPUs and {cluster_gpus} GPUs available. " +) +MSG_TRAIN_END = ( + "Stop the training and adjust the required resources (e.g. via the " + "`ScalingConfig` or `resources_per_trial`, or `num_workers` for rllib), " + "or add more resources to your cluster." +) + +MSG_TUNE_START = ( + "No trial is running and no new trial has been started within " + "the last {wait_time:.0f} seconds. " + "This could be due to the cluster not having enough resources available. " +) +MSG_TUNE_INSUFFICIENT = ( + "You asked for {asked_cpus} CPUs and {asked_gpus} GPUs per trial, " + "but the cluster only has {cluster_cpus} CPUs and {cluster_gpus} GPUs available. " +) +MSG_TUNE_END = ( + "Stop the tuning and adjust the required resources (e.g. via the " + "`ScalingConfig` or `resources_per_trial`, or `num_workers` for rllib), " + "or add more resources to your cluster." +) + + # TODO(xwjiang): Consider having a help page with more detailed instructions. @lru_cache() -def _get_insufficient_resources_warning_msg() -> str: - msg = ( - f"No trial is running and no new trial has been started within" - f" at least the last " - f"{_get_insufficient_resources_warning_threshold()} seconds. " - f"This could be due to the cluster not having enough " - f"resources available to start the next trial. " - f"Stop the tuning job and adjust the resources requested per trial " - f"(possibly via `resources_per_trial` or via `num_workers` for rllib) " - f"and/or add more resources to your Ray runtime." - ) - if _is_ray_cluster(): - return "Ignore this message if the cluster is autoscaling. " + msg +def _get_insufficient_resources_warning_msg( + for_train: bool = False, trial: Optional[Trial] = None +) -> str: + msg = "Ignore this message if the cluster is autoscaling. " + + if for_train: + start = MSG_TRAIN_START + insufficient = MSG_TRAIN_INSUFFICIENT + end = MSG_TRAIN_END else: - return msg + start = MSG_TUNE_START + insufficient = MSG_TUNE_INSUFFICIENT + end = MSG_TUNE_END + + msg += start.format(wait_time=_get_insufficient_resources_warning_threshold()) + + if trial: + asked_cpus, asked_gpus = _get_trial_cpu_and_gpu(trial) + cluster_resources = _get_cluster_resources_no_autoscaler() + + msg += insufficient.format( + asked_cpus=asked_cpus, + asked_gpus=asked_gpus, + cluster_cpus=cluster_resources.get("CPU", 0), + cluster_gpus=cluster_resources.get("GPU", 0), + ) + msg += end -# A beefed up version when Tune Error is raised. -def _get_insufficient_resources_error_msg(trial: Trial) -> str: - trial_cpu_gpu = _get_trial_cpu_and_gpu(trial) - return ( - f"Ignore this message if the cluster is autoscaling. " - f"You asked for {trial_cpu_gpu['CPU']} cpu and " - f"{trial_cpu_gpu['GPU']} gpu per trial, but the cluster only has " - f"{_get_cluster_resources_no_autoscaler().get('CPU', 0)} cpu and " - f"{_get_cluster_resources_no_autoscaler().get('GPU', 0)} gpu. " - f"Stop the tuning job and adjust the resources requested per trial " - f"(possibly via `resources_per_trial` or via `num_workers` for rllib) " - f"and/or add more resources to your Ray runtime." - ) + return msg class _InsufficientResourcesManager: @@ -94,10 +124,11 @@ class _InsufficientResourcesManager: act upon. """ - def __init__(self): + def __init__(self, for_train: bool = False): # The information tracked across the life time of Tune loop. self._no_running_trials_since = -1 self._last_trial_num = -1 + self._for_train = for_train def on_no_available_trials(self, all_trials): """Tracks information across the life of Tune loop and makes guesses @@ -115,22 +146,21 @@ def on_no_available_trials(self, all_trials): time.monotonic() - self._no_running_trials_since > _get_insufficient_resources_warning_threshold() ): - if not _is_ray_cluster(): # autoscaler not enabled - # If any of the pending trial cannot be fulfilled, - # that's a good enough hint of trial resources not enough. - for trial in all_trials: - if ( - trial.status is Trial.PENDING - and not _can_fulfill_no_autoscaler(trial) - ): - # TODO(xwjiang): - # Raise an Error once #18608 is resolved. - logger.warning(_get_insufficient_resources_error_msg(trial)) - break - else: - # TODO(xwjiang): #17799. - # Output a more helpful msg for autoscaler. - logger.warning(_get_insufficient_resources_warning_msg()) + can_fulfill_any = any( + trial.status == Trial.PENDING and _can_fulfill_no_autoscaler(trial) + for trial in all_trials + ) + + if can_fulfill_any: + # If one trial can be fulfilled, it will be fulfilled eventually + self._no_running_trials_since = -1 + return + + # Otherwise, can fulfill none + msg = _get_insufficient_resources_warning_msg( + for_train=self._for_train, trial=all_trials[0] + ) + logger.warning(msg) self._no_running_trials_since = time.monotonic() else: self._no_running_trials_since = -1 diff --git a/python/ray/tune/execution/trial_runner.py b/python/ray/tune/execution/trial_runner.py index f912fdf6f0c6..196291a21a53 100644 --- a/python/ray/tune/execution/trial_runner.py +++ b/python/ray/tune/execution/trial_runner.py @@ -134,12 +134,15 @@ def __init__( callbacks: Optional[List[Callback]] = None, metric: Optional[str] = None, trial_checkpoint_config: Optional[CheckpointConfig] = None, + _trainer_api: bool = False, ): self._search_alg = search_alg or BasicVariantGenerator() self._placeholder_resolvers = placeholder_resolvers self._scheduler_alg = scheduler or FIFOScheduler() self._callbacks = CallbackList(callbacks or []) - self._insufficient_resources_manager = _InsufficientResourcesManager() + self._insufficient_resources_manager = _InsufficientResourcesManager( + for_train=_trainer_api + ) self._pending_trial_queue_times = {} self._max_pending_trials = _get_max_pending_trials(self._search_alg) @@ -519,6 +522,11 @@ def resume( trial_to_add.status = Trial.TERMINATED self.add_trial(trial_to_add) + def update_max_pending_trials(self, max_pending_trials: Optional[int] = None): + self._max_pending_trials = max_pending_trials or _get_max_pending_trials( + self._search_alg + ) + def update_pending_trial_resources( self, resources: Union[dict, PlacementGroupFactory] ): @@ -1252,6 +1260,7 @@ def __init__( callbacks: Optional[List[Callback]] = None, metric: Optional[str] = None, trial_checkpoint_config: Optional[CheckpointConfig] = None, + _trainer_api: bool = False, # Deprecated local_checkpoint_dir: Optional[str] = None, ): @@ -1287,6 +1296,7 @@ def __init__( callbacks=callbacks, metric=metric, trial_checkpoint_config=trial_checkpoint_config, + _trainer_api=_trainer_api, ) self.trial_executor.setup( @@ -1308,6 +1318,10 @@ def _wrapped(self): executor_whitelist_attr={"has_resources_for_trial", "pause_trial", "save"}, ) + def update_max_pending_trials(self, max_pending_trials: Optional[int] = None): + super().update_max_pending_trials(max_pending_trials=max_pending_trials) + self.trial_executor._max_staged_actors = self._max_pending_trials + def _used_resources_string(self) -> str: return self.trial_executor.debug_string() @@ -1604,7 +1618,9 @@ def _get_max_pending_trials(search_alg: SearchAlgorithm) -> int: # Use a minimum of 16 to trigger fast autoscaling # Scale up to at most the number of available cluster CPUs cluster_cpus = ray.cluster_resources().get("CPU", 1.0) - max_pending_trials = max(16, int(cluster_cpus * 1.1)) + max_pending_trials = min( + max(search_alg.total_samples, 16), max(16, int(cluster_cpus * 1.1)) + ) if max_pending_trials > 128: logger.warning( diff --git a/python/ray/tune/execution/tune_controller.py b/python/ray/tune/execution/tune_controller.py index b2b710dae9ee..de77eeecf80b 100644 --- a/python/ray/tune/execution/tune_controller.py +++ b/python/ray/tune/execution/tune_controller.py @@ -66,6 +66,7 @@ def __init__( chdir_to_trial_dir: bool = False, reuse_actors: bool = False, resource_manager_factory: Optional[Callable[[], ResourceManager]] = None, + _trainer_api: bool = False, ): if resource_manager_factory: self._resource_manager = resource_manager_factory() @@ -144,6 +145,7 @@ def __init__( callbacks=callbacks, metric=metric, trial_checkpoint_config=trial_checkpoint_config, + _trainer_api=_trainer_api, ) def _wrapped(self): diff --git a/python/ray/tune/impl/tuner_internal.py b/python/ray/tune/impl/tuner_internal.py index b92dcab2da18..3985c7ed58b0 100644 --- a/python/ray/tune/impl/tuner_internal.py +++ b/python/ray/tune/impl/tuner_internal.py @@ -90,6 +90,7 @@ def __init__( tune_config: Optional[TuneConfig] = None, run_config: Optional[RunConfig] = None, _tuner_kwargs: Optional[Dict] = None, + _trainer_api: bool = False, ): from ray.train.trainer import BaseTrainer @@ -102,6 +103,7 @@ def __init__( self._tune_config = tune_config or TuneConfig() self._run_config = run_config or RunConfig() + self._trainer_api = _trainer_api # Restore from Tuner checkpoint. if restore_path: @@ -681,6 +683,7 @@ def _get_tune_run_arguments(self, trainable: TrainableType) -> Dict[str, Any]: trial_dirname_creator=self._tune_config.trial_dirname_creator, chdir_to_trial_dir=self._tune_config.chdir_to_trial_dir, _tuner_api=True, + _trainer_api=self._trainer_api, ) def _fit_internal( diff --git a/python/ray/tune/tests/test_ray_trial_executor.py b/python/ray/tune/tests/test_ray_trial_executor.py index ad8a99d09b47..b289bcd2bd49 100644 --- a/python/ray/tune/tests/test_ray_trial_executor.py +++ b/python/ray/tune/tests/test_ray_trial_executor.py @@ -99,13 +99,14 @@ def train(config): ) msg = ( "Ignore this message if the cluster is autoscaling. " - "You asked for 5.0 cpu and 3.0 gpu per trial, " - "but the cluster only has 4.0 cpu and 2.0 gpu. " - "Stop the tuning job and " - "adjust the resources requested per trial " - "(possibly via `resources_per_trial` " - "or via `num_workers` for rllib) " - "and/or add more resources to your Ray runtime." + "No trial is running and no new trial has been started " + "within the last 0 seconds. This could be due to the cluster not having " + "enough resources available. You asked for 5.0 CPUs and 3.0 GPUs per " + "trial, but the cluster only has 4.0 CPUs and 2.0 GPUs available. " + "Stop the tuning and adjust the required resources " + "(e.g. via the `ScalingConfig` or `resources_per_trial`, " + "or `num_workers` for rllib), " + "or add more resources to your cluster." ) mocked_warn.assert_called_once_with(msg) diff --git a/python/ray/tune/tune.py b/python/ray/tune/tune.py index f7ba58d6e923..97ad0b53f957 100644 --- a/python/ray/tune/tune.py +++ b/python/ray/tune/tune.py @@ -278,7 +278,10 @@ def run( _remote: Optional[bool] = None, # Passed by the Tuner. _remote_string_queue: Optional[Queue] = None, + # Todo (krfricke): Find a better way to pass entrypoint information, e.g. + # a context object or similar. _tuner_api: bool = False, + _trainer_api: bool = False, ) -> ExperimentAnalysis: """Executes training. @@ -487,19 +490,24 @@ class and registered trainables. remote_run_kwargs = locals().copy() remote_run_kwargs.pop("_remote") - error_message_map = ( - { + if _tuner_api and _trainer_api: + error_message_map = { + "entrypoint": "Trainer(...)", + "search_space_arg": "param_space", + "restore_entrypoint": 'Trainer.restore(path="{path}", ...)', + } + elif _tuner_api and not _trainer_api: + error_message_map = { "entrypoint": "Tuner(...)", "search_space_arg": "param_space", "restore_entrypoint": 'Tuner.restore(path="{path}", trainable=...)', } - if _tuner_api - else { + else: + error_message_map = { "entrypoint": "tune.run(...)", "search_space_arg": "config", "restore_entrypoint": "tune.run(..., resume=True)", } - ) _ray_auto_init(entrypoint=error_message_map["entrypoint"]) if _remote is None: @@ -908,6 +916,7 @@ class and registered trainables. callbacks=callbacks, metric=metric, trial_checkpoint_config=experiments[0].checkpoint_config, + _trainer_api=_trainer_api, ) if bool(int(os.environ.get("TUNE_NEW_EXECUTION", "1"))): @@ -923,6 +932,9 @@ class and registered trainables. if not runner.resumed: for exp in experiments: search_alg.add_configurations([exp]) + # search_alg.total_samples has been updated, so we should + # update the number of pending trials + runner.update_max_pending_trials() else: logger.debug( "You have resumed the Tune run, which means that any newly specified " @@ -1031,10 +1043,16 @@ class and registered trainables. restore_entrypoint = error_message_map["restore_entrypoint"].format( path=runner.experiment_path, ) - logger.warning( - "Experiment has been interrupted, but the most recent state was saved.\n" - f"Continue running this experiment with: {restore_entrypoint}" - ) + if _trainer_api: + logger.warning( + f"Training has been interrupted, but the most recent state was saved.\n" + f"Resume training with: {restore_entrypoint}" + ) + else: + logger.warning( + f"Experiment has been interrupted, but the most recent state was " + f"saved.\nResume experiment with: {restore_entrypoint}" + ) ea = ExperimentAnalysis( experiment_checkpoint, trials=all_trials, diff --git a/python/ray/tune/tuner.py b/python/ray/tune/tuner.py index daa88006f745..edcdbe802beb 100644 --- a/python/ray/tune/tuner.py +++ b/python/ray/tune/tuner.py @@ -145,6 +145,7 @@ def __init__( # TODO(xwjiang): Remove this later. _tuner_kwargs: Optional[Dict] = None, _tuner_internal: Optional[TunerInternal] = None, + _trainer_api: bool = False, ): """Configure and construct a tune run.""" kwargs = locals().copy() From 53790584a04384c1174b800059549d24f70ca452 Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Mon, 8 May 2023 02:53:55 -0700 Subject: [PATCH 275/424] [ci/release-tests] remove anyscale run type (#35042) There is only one remaining run type for release tests, which is anyscale_job, so we don't need to specify these any more. Remove the instructions as well so people do not bother specify a different type Signed-off-by: Cuong Nguyen Signed-off-by: Kai Fricke Co-authored-by: Kai Fricke --- release/release_tests.yaml | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 3d5e08e3eaa9..f8c6fed0958f 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -43,11 +43,6 @@ # # # Run configuration for the test # run: -# # Type of test. Can be [anyscale_job] -# # Uses either Ray jobs, anyscale jobs or anyscale SDK commands -# # run the actual release test. -# type: anyscale_job -# # # If you want to wait for nodes to be ready, you can specify this here: # wait_for_nodes: # # Number of nodes @@ -952,8 +947,6 @@ # wait_for_nodes: # num_nodes: 4 -# type: anyscale_job - # alert: xgboost_tests - name: xgboost_train_moderate @@ -1215,8 +1208,6 @@ # wait_for_nodes: # num_nodes: 4 -# type: anyscale_job - # alert: default - name: lightgbm_train_moderate @@ -2111,7 +2102,6 @@ run: timeout: 600 script: python workloads/test_result_throughput_single_node.py - type: anyscale_job - name: tune_scalability_xgboost_sweep group: Tune scalability tests @@ -2943,8 +2933,6 @@ # wait_for_nodes: # num_nodes: 1 -# type: anyscale_job - # alert: default @@ -4898,8 +4886,6 @@ # wait_for_nodes: # num_nodes: 251 # -# type: anyscale_job -# file_manager: sdk - name: pg_autoscaling_regression_test group: core-daily-test From 57ba66e038b6a4796ee61b2432d4c7c092b34dd2 Mon Sep 17 00:00:00 2001 From: Sven Mika Date: Mon, 8 May 2023 17:55:32 +0200 Subject: [PATCH 276/424] [RLlib] Learner API: Policies using RLModules (for sampler only) do not need loss/stats/mixins. (#34445) --- rllib/BUILD | 2 +- rllib/algorithms/algorithm_config.py | 25 +- rllib/algorithms/appo/appo.py | 17 +- rllib/algorithms/appo/appo_tf_policy.py | 17 +- rllib/algorithms/appo/appo_torch_policy.py | 13 +- .../appo/tests/test_appo_learner.py | 1 - .../algorithms/appo/tf/appo_tf_policy_rlm.py | 227 ---------------- .../appo/torch/appo_torch_policy_rlm.py | 213 --------------- rllib/algorithms/impala/impala.py | 78 +++--- rllib/algorithms/impala/impala_learner.py | 43 +-- rllib/algorithms/impala/impala_tf_policy.py | 19 +- .../algorithms/impala/impala_torch_policy.py | 20 +- .../impala/tests/test_impala_learner.py | 12 +- .../tests/test_impala_off_policyness.py | 40 +-- .../algorithms/impala/tf/impala_tf_learner.py | 22 +- .../impala/tf/impala_tf_policy_rlm.py | 165 ------------ .../impala/torch/impala_torch_learner.py | 22 +- .../impala/torch/impala_torch_policy_rlm.py | 167 ------------ rllib/algorithms/ppo/ppo.py | 40 ++- rllib/algorithms/ppo/ppo_learner.py | 52 ++-- rllib/algorithms/ppo/ppo_tf_policy.py | 2 +- rllib/algorithms/ppo/ppo_torch_policy.py | 3 - .../algorithms/ppo/tests/test_ppo_learner.py | 3 +- .../ppo/tests/test_ppo_with_rl_module.py | 49 ++-- rllib/algorithms/ppo/tf/ppo_tf_learner.py | 11 +- rllib/algorithms/ppo/tf/ppo_tf_policy_rlm.py | 185 ------------- .../algorithms/ppo/torch/ppo_torch_learner.py | 10 +- .../ppo/torch/ppo_torch_policy_rlm.py | 246 ------------------ .../algorithms/tests/test_algorithm_config.py | 85 +++--- rllib/core/learner/learner.py | 32 +-- rllib/core/learner/tf/tf_learner.py | 28 +- rllib/core/learner/torch/torch_learner.py | 15 +- rllib/core/models/tests/test_catalog.py | 1 + .../tests/test_trajectory_view_api.py | 8 +- rllib/examples/self_play_with_open_spiel.py | 5 +- rllib/models/tests/test_preprocessors.py | 17 +- rllib/policy/eager_tf_policy_v2.py | 21 +- rllib/policy/policy.py | 52 ++-- rllib/policy/tf_mixins.py | 62 ++--- rllib/policy/torch_mixins.py | 12 +- rllib/policy/torch_policy_v2.py | 53 ++-- .../impala/cartpole-impala.yaml | 4 +- .../ppo/cartpole-ppo-with-rl-module.yaml | 2 +- .../ppo/pendulum-ppo-with-rl-module.yaml | 1 + rllib/utils/schedules/scheduler.py | 157 +++++++++++ 45 files changed, 596 insertions(+), 1663 deletions(-) delete mode 100644 rllib/algorithms/appo/tf/appo_tf_policy_rlm.py delete mode 100644 rllib/algorithms/appo/torch/appo_torch_policy_rlm.py delete mode 100644 rllib/algorithms/impala/tf/impala_tf_policy_rlm.py delete mode 100644 rllib/algorithms/impala/torch/impala_torch_policy_rlm.py delete mode 100644 rllib/algorithms/ppo/tf/ppo_tf_policy_rlm.py delete mode 100644 rllib/algorithms/ppo/torch/ppo_torch_policy_rlm.py create mode 100644 rllib/utils/schedules/scheduler.py diff --git a/rllib/BUILD b/rllib/BUILD index 20d81511ac16..a57156320112 100644 --- a/rllib/BUILD +++ b/rllib/BUILD @@ -805,7 +805,7 @@ py_test( py_test( name = "test_algorithm_config", tags = ["team:rllib", "algorithms_dir", "algorithms_dir_generic"], - size = "small", + size = "medium", srcs = ["algorithms/tests/test_algorithm_config.py"], ) diff --git a/rllib/algorithms/algorithm_config.py b/rllib/algorithms/algorithm_config.py index d83b5705a071..241645f632a7 100644 --- a/rllib/algorithms/algorithm_config.py +++ b/rllib/algorithms/algorithm_config.py @@ -52,6 +52,7 @@ try_import_gymnasium_and_gym, ) from ray.rllib.utils.policy import validate_policy_id +from ray.rllib.utils.schedules.scheduler import Scheduler from ray.rllib.utils.serialization import ( deserialize_type, NOT_SERIALIZABLE, @@ -846,7 +847,7 @@ def validate(self) -> None: error=True, ) - # RLModule API only works with connectors. + # RLModule API only works with connectors and with Learner API. if not self.enable_connectors and self._enable_rl_module_api: raise ValueError( "RLModule API only works with connectors. " @@ -855,19 +856,26 @@ def validate(self) -> None: ) # Learner API requires RLModule API. - if self._enable_learner_api and not self._enable_rl_module_api: + if self._enable_learner_api is not self._enable_rl_module_api: raise ValueError( - "Learner API requires RLModule API. " - "Please enable RLModule API via " - "`config.training(_enable_rl_module_api=True)`." + "Learner API requires RLModule API and vice-versa! " + "Enable RLModule API via " + "`config.rl_module(_enable_rl_module_api=True)` and the Learner API " + "via `config.training(_enable_learner_api=True)` (or set both to " + "False)." ) if bool(os.environ.get("RLLIB_ENABLE_RL_MODULE", False)): - # enable RLModule API and connectors if env variable is set + # Enable RLModule API and connectors if env variable is set # (to be used in unittesting) self.rl_module(_enable_rl_module_api=True) + self.training(_enable_learner_api=True) self.enable_connectors = True + # LR-schedule checking. + if self._enable_learner_api: + Scheduler.validate(self.lr_schedule, "lr_schedule", "learning rate") + # Validate grad clipping settings. if self.grad_clip_by not in ["value", "norm", "global_norm"]: raise ValueError( @@ -1587,7 +1595,8 @@ def training( lr_schedule: Learning rate schedule. In the format of [[timestep, lr-value], [timestep, lr-value], ...] Intermediary timesteps will be assigned to interpolated learning rate - values. A schedule should normally start from timestep 0. + values. A schedule config's first entry must start with timestep 0, + i.e.: [[0, initial_value], [...]]. grad_clip: The value to use for gradient clipping. Depending on the `grad_clip_by` setting, gradients will either be clipped by value, norm, or global_norm (see docstring on `grad_clip_by` below for more @@ -1664,7 +1673,7 @@ def training( deprecation_warning( old="AlgorithmConfig.training(_use_default_native_models=True)", help="_use_default_native_models is not supported " - "anymore. To get rid of this error, set `experimental(" + "anymore. To get rid of this error, set `rl_module(" "_enable_rl_module_api` to True. Native models will " "be better supported by the upcoming RLModule API.", # Error out if user tries to enable this diff --git a/rllib/algorithms/appo/appo.py b/rllib/algorithms/appo/appo.py index 5b503f239192..8b27ef5100cc 100644 --- a/rllib/algorithms/appo/appo.py +++ b/rllib/algorithms/appo/appo.py @@ -381,16 +381,9 @@ def get_default_policy_class( cls, config: AlgorithmConfig ) -> Optional[Type[Policy]]: if config["framework"] == "torch": - if config._enable_rl_module_api: - from ray.rllib.algorithms.appo.torch.appo_torch_policy_rlm import ( - APPOTorchPolicyWithRLModule, - ) - - return APPOTorchPolicyWithRLModule - else: - from ray.rllib.algorithms.appo.appo_torch_policy import APPOTorchPolicy + from ray.rllib.algorithms.appo.appo_torch_policy import APPOTorchPolicy - return APPOTorchPolicy + return APPOTorchPolicy elif config["framework"] == "tf": if config._enable_rl_module_api: raise ValueError( @@ -402,12 +395,6 @@ def get_default_policy_class( return APPOTF1Policy else: - if config._enable_rl_module_api: - from ray.rllib.algorithms.appo.tf.appo_tf_policy_rlm import ( - APPOTfPolicyWithRLModule, - ) - - return APPOTfPolicyWithRLModule from ray.rllib.algorithms.appo.appo_tf_policy import APPOTF2Policy return APPOTF2Policy diff --git a/rllib/algorithms/appo/appo_tf_policy.py b/rllib/algorithms/appo/appo_tf_policy.py index d91b4516bfd7..8441f8032ede 100644 --- a/rllib/algorithms/appo/appo_tf_policy.py +++ b/rllib/algorithms/appo/appo_tf_policy.py @@ -81,10 +81,15 @@ def __init__( # First thing first, enable eager execution if necessary. base.enable_eager_execution_if_necessary() - # Although this is a no-op, we call __init__ here to make it clear - # that base.__init__ will use the make_model() call. - VTraceClipGradients.__init__(self) - VTraceOptimizer.__init__(self) + # If Learner API is used, we don't need any loss-specific mixins. + # However, we also would like to avoid creating special Policy-subclasses + # for this as the entire Policy concept will soon not be used anymore with + # the new Learner- and RLModule APIs. + if not config.get("_enable_learner_api", False): + # Although this is a no-op, we call __init__ here to make it clear + # that base.__init__ will use the make_model() call. + VTraceClipGradients.__init__(self) + VTraceOptimizer.__init__(self) # Initialize base class. base.__init__( @@ -104,7 +109,9 @@ def __init__( ) ValueNetworkMixin.__init__(self, config) KLCoeffMixin.__init__(self, config) - GradStatsMixin.__init__(self) + + if not config.get("_enable_learner_api", False): + GradStatsMixin.__init__(self) # Note: this is a bit ugly, but loss and optimizer initialization must # happen after all the MixIns are initialized. diff --git a/rllib/algorithms/appo/appo_torch_policy.py b/rllib/algorithms/appo/appo_torch_policy.py index b92b7c32fd51..4a7754830f32 100644 --- a/rllib/algorithms/appo/appo_torch_policy.py +++ b/rllib/algorithms/appo/appo_torch_policy.py @@ -69,9 +69,15 @@ class APPOTorchPolicy( def __init__(self, observation_space, action_space, config): config = dict(ray.rllib.algorithms.appo.appo.APPOConfig().to_dict(), **config) - # Although this is a no-op, we call __init__ here to make it clear - # that base.__init__ will use the make_model() call. - VTraceOptimizer.__init__(self) + # If Learner API is used, we don't need any loss-specific mixins. + # However, we also would like to avoid creating special Policy-subclasses + # for this as the entire Policy concept will soon not be used anymore with + # the new Learner- and RLModule APIs. + if not config.get("_enable_learner_api", False): + # Although this is a no-op, we call __init__ here to make it clear + # that base.__init__ will use the make_model() call. + VTraceOptimizer.__init__(self) + LearningRateSchedule.__init__(self, config["lr"], config["lr_schedule"]) TorchPolicyV2.__init__( @@ -88,7 +94,6 @@ def __init__(self, observation_space, action_space, config): ValueNetworkMixin.__init__(self, config) KLCoeffMixin.__init__(self, config) - # TODO: Don't require users to call this manually. self._initialize_loss_from_dummy_batch() # Initiate TargetNetwork ops after loss initialization. diff --git a/rllib/algorithms/appo/tests/test_appo_learner.py b/rllib/algorithms/appo/tests/test_appo_learner.py index af954bf701e7..1bc1bd1b0a08 100644 --- a/rllib/algorithms/appo/tests/test_appo_learner.py +++ b/rllib/algorithms/appo/tests/test_appo_learner.py @@ -92,7 +92,6 @@ def test_appo_loss(self): ) algo_config = config.copy(copy_frozen=False) - algo_config.training(_enable_learner_api=True) algo_config.validate() algo_config.freeze() diff --git a/rllib/algorithms/appo/tf/appo_tf_policy_rlm.py b/rllib/algorithms/appo/tf/appo_tf_policy_rlm.py deleted file mode 100644 index 24a4ec9cb649..000000000000 --- a/rllib/algorithms/appo/tf/appo_tf_policy_rlm.py +++ /dev/null @@ -1,227 +0,0 @@ -import logging -from typing import Dict, List, Union - -from ray.rllib.algorithms.ppo.ppo_tf_policy import validate_config -from ray.rllib.models.modelv2 import ModelV2 -from ray.rllib.policy.sample_batch import SampleBatch -from ray.rllib.policy.tf_mixins import ( - EntropyCoeffSchedule, - LearningRateSchedule, - KLCoeffMixin, - GradStatsMixin, - TargetNetworkMixin, -) -from ray.rllib.algorithms.impala.impala_tf_policy import ( - VTraceClipGradients, - VTraceOptimizer, -) -from ray.rllib.policy.eager_tf_policy_v2 import EagerTFPolicyV2 -from ray.rllib.utils.annotations import override -from ray.rllib.utils.deprecation import Deprecated -from ray.rllib.utils.framework import try_import_tf -from ray.rllib.utils.tf_utils import ( - explained_variance, -) - - -from ray.rllib.algorithms.impala.tf.vtrace_tf_v2 import make_time_major, vtrace_tf2 -from ray.rllib.utils.typing import TensorType - -tf1, tf, tfv = try_import_tf() - -logger = logging.getLogger(__name__) - - -class APPOTfPolicyWithRLModule( - VTraceClipGradients, - VTraceOptimizer, - LearningRateSchedule, - KLCoeffMixin, - EntropyCoeffSchedule, - TargetNetworkMixin, - GradStatsMixin, - EagerTFPolicyV2, -): - def __init__(self, observation_space, action_space, config): - validate_config(config) - EagerTFPolicyV2.enable_eager_execution_if_necessary() - # Initialize MixIns before super().__init__ because base class will call - # self.loss, which requires these MixIns to be initialized. - LearningRateSchedule.__init__(self, config["lr"], config["lr_schedule"]) - EntropyCoeffSchedule.__init__( - self, config["entropy_coeff"], config["entropy_coeff_schedule"] - ) - # Although this is a no-op, we call __init__ here to make it clear - # that base.__init__ will use the make_model() call. - VTraceClipGradients.__init__(self) - VTraceOptimizer.__init__(self) - self.framework = "tf2" - KLCoeffMixin.__init__(self, config) - GradStatsMixin.__init__(self) - EagerTFPolicyV2.__init__(self, observation_space, action_space, config) - # Construct the target model and make its weights the same as the model. - self.target_model = self.make_rl_module() - self.target_model.set_weights(self.model.get_weights()) - - # Initiate TargetNetwork ops after loss initialization. - self.maybe_initialize_optimizer_and_loss() - TargetNetworkMixin.__init__(self) - - @Deprecated(new="APPOTfLearner.compute_loss_per_module()", error=False) - @override(EagerTFPolicyV2) - def loss( - self, - model: Union[ModelV2, "tf.keras.Model"], - dist_class, - train_batch: SampleBatch, - ) -> Union[TensorType, List[TensorType]]: - train_batch[SampleBatch.ACTIONS] - train_batch[SampleBatch.ACTION_LOGP] - train_batch[SampleBatch.REWARDS] - train_batch[SampleBatch.TERMINATEDS] - - seqs_len = train_batch.get(SampleBatch.SEQ_LENS) - rollout_frag_or_episode_len = ( - self.config["rollout_fragment_length"] if not seqs_len else None - ) - drop_last = self.config["vtrace_drop_last_ts"] - - target_policy_fwd_out = model.forward_train(train_batch) - values = target_policy_fwd_out[SampleBatch.VF_PREDS] - target_policy_dist = target_policy_fwd_out[SampleBatch.ACTION_DIST] - - old_target_policy_fwd_out = self.target_model.forward_train(train_batch) - old_target_policy_dist = old_target_policy_fwd_out[SampleBatch.ACTION_DIST] - - behaviour_actions_logp = train_batch[SampleBatch.ACTION_LOGP] - target_actions_logp = target_policy_dist.logp(train_batch[SampleBatch.ACTIONS]) - old_target_actions_logp = old_target_policy_dist.logp( - train_batch[SampleBatch.ACTIONS] - ) - behaviour_actions_logp_time_major = make_time_major( - behaviour_actions_logp, - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=seqs_len, - drop_last=drop_last, - ) - target_actions_logp_time_major = make_time_major( - target_actions_logp, - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=seqs_len, - drop_last=drop_last, - ) - old_target_actions_logp_time_major = make_time_major( - old_target_actions_logp, - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=seqs_len, - drop_last=drop_last, - ) - values_time_major = make_time_major( - values, - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=seqs_len, - drop_last=drop_last, - ) - bootstrap_value = values_time_major[-1] - rewards_time_major = make_time_major( - train_batch[SampleBatch.REWARDS], - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=seqs_len, - drop_last=drop_last, - ) - - # how to compute discouts? - # should they be pre computed? - discounts_time_major = ( - 1.0 - - tf.cast( - make_time_major( - train_batch[SampleBatch.TERMINATEDS], - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=seqs_len, - drop_last=drop_last, - ), - dtype=tf.float32, - ) - ) * self.config["gamma"] - - # Note that vtrace will compute the main loop on the CPU for better performance. - vtrace_adjusted_target_values, pg_advantages = vtrace_tf2( - target_action_log_probs=old_target_actions_logp_time_major, - behaviour_action_log_probs=behaviour_actions_logp_time_major, - discounts=discounts_time_major, - rewards=rewards_time_major, - values=values_time_major, - bootstrap_value=bootstrap_value, - clip_pg_rho_threshold=self.config["vtrace_clip_pg_rho_threshold"], - clip_rho_threshold=self.config["vtrace_clip_rho_threshold"], - ) - - is_ratio = tf.clip_by_value( - tf.math.exp( - behaviour_actions_logp_time_major - target_actions_logp_time_major - ), - 0.0, - 2.0, - ) - logp_ratio = is_ratio * tf.math.exp( - target_actions_logp_time_major - behaviour_actions_logp_time_major - ) - - clip_param = self.config["clip_param"] - surrogate_loss = tf.math.minimum( - pg_advantages * logp_ratio, - ( - pg_advantages - * tf.clip_by_value(logp_ratio, 1 - clip_param, 1 + clip_param) - ), - ) - action_kl = old_target_policy_dist.kl(target_policy_dist) - mean_kl_loss = tf.math.reduce_mean(action_kl) - mean_pi_loss = -tf.math.reduce_mean(surrogate_loss) - - # The baseline loss. - delta = values_time_major - vtrace_adjusted_target_values - mean_vf_loss = 0.5 * tf.math.reduce_mean(delta**2) - - # The entropy loss. - mean_entropy_loss = -tf.math.reduce_mean(target_policy_dist.entropy()) - - # The summed weighted loss. - total_loss = ( - mean_pi_loss - + (mean_vf_loss * self.config["vf_loss_coeff"]) - + (mean_entropy_loss * self.entropy_coeff) - + (mean_kl_loss * self.kl_coeff) - ) - - self.stats = { - "total_loss": total_loss, - "policy_loss": mean_pi_loss, - "vf_loss": mean_vf_loss, - "values": values_time_major, - "entropy_loss": mean_entropy_loss, - "vtrace_adjusted_target_values": vtrace_adjusted_target_values, - "mean_kl": mean_kl_loss, - } - return total_loss - - @override(EagerTFPolicyV2) - def stats_fn(self, train_batch: SampleBatch) -> Dict[str, TensorType]: - return { - "cur_lr": tf.cast(self.cur_lr, tf.float64), - "policy_loss": self.stats["policy_loss"], - "entropy": self.stats["entropy_loss"], - "entropy_coeff": tf.cast(self.entropy_coeff, tf.float64), - "var_gnorm": tf.linalg.global_norm(self.model.trainable_variables), - "vf_loss": self.stats["vf_loss"], - "vf_explained_var": explained_variance( - tf.reshape(self.stats["vtrace_adjusted_target_values"], [-1]), - tf.reshape(self.stats["values"], [-1]), - ), - "mean_kl": self.stats["mean_kl"], - } - - @override(EagerTFPolicyV2) - def get_batch_divisibility_req(self) -> int: - return self.config["rollout_fragment_length"] diff --git a/rllib/algorithms/appo/torch/appo_torch_policy_rlm.py b/rllib/algorithms/appo/torch/appo_torch_policy_rlm.py deleted file mode 100644 index 81bc072eca43..000000000000 --- a/rllib/algorithms/appo/torch/appo_torch_policy_rlm.py +++ /dev/null @@ -1,213 +0,0 @@ -import logging - -from ray.rllib.algorithms.impala.torch.vtrace_torch_v2 import ( - make_time_major, - vtrace_torch, -) -from ray.rllib.policy.torch_mixins import ( - EntropyCoeffSchedule, - LearningRateSchedule, - KLCoeffMixin, - TargetNetworkMixin, -) -from ray.rllib.algorithms.impala.impala_torch_policy import ( - VTraceOptimizer, -) -from ray.rllib.algorithms.ppo.ppo_torch_policy import validate_config -from ray.rllib.policy.sample_batch import SampleBatch -from ray.rllib.policy.torch_policy_v2 import TorchPolicyV2 -from ray.rllib.utils.annotations import override -from ray.rllib.utils.framework import try_import_torch -from ray.rllib.utils.torch_utils import ( - convert_to_torch_tensor, - explained_variance, - global_norm, -) - -torch, _ = try_import_torch() - -logger = logging.getLogger(__name__) - - -# TODO: Remove once we have a RLModule capable sampler class that can replace -# `Policy.compute_actions_from_input_dict()`. -class APPOTorchPolicyWithRLModule( - VTraceOptimizer, - LearningRateSchedule, - KLCoeffMixin, - EntropyCoeffSchedule, - TargetNetworkMixin, - TorchPolicyV2, -): - def __init__(self, observation_space, action_space, config): - validate_config(config) - # Initialize MixIns before super().__init__ because base class will call - # self.loss, which requires these MixIns to be initialized. - LearningRateSchedule.__init__(self, config["lr"], config["lr_schedule"]) - EntropyCoeffSchedule.__init__( - self, config["entropy_coeff"], config["entropy_coeff_schedule"] - ) - # Although this is a no-op, we call __init__ here to make it clear - # that base.__init__ will use the make_model() call. - # VTraceClipGradients.__init__(self) - VTraceOptimizer.__init__(self) - self.framework = "tf2" - KLCoeffMixin.__init__(self, config) - # GradStatsMixin.__init__(self) - TorchPolicyV2.__init__(self, observation_space, action_space, config) - # Construct the target model and make its weights the same as the model. - self.target_model = self.make_rl_module() - self.target_model.load_state_dict(self.model.state_dict()) - - # Initiate TargetNetwork ops after loss initialization. - self._initialize_loss_from_dummy_batch() - TargetNetworkMixin.__init__(self) - - @override(TorchPolicyV2) - def loss(self, model, dist_class, train_batch): - train_batch[SampleBatch.ACTION_LOGP] - train_batch[SampleBatch.ACTIONS] - train_batch[SampleBatch.REWARDS] - train_batch[SampleBatch.TERMINATEDS] - - seqs_len = train_batch.get(SampleBatch.SEQ_LENS) - rollout_frag_or_episode_len = ( - self.config["rollout_fragment_length"] if not seqs_len else None - ) - drop_last = self.config["vtrace_drop_last_ts"] - - target_policy_fwd_out = model.forward_train(train_batch) - values = target_policy_fwd_out[SampleBatch.VF_PREDS] - target_policy_dist = target_policy_fwd_out[SampleBatch.ACTION_DIST] - - old_target_policy_fwd_out = self.target_model.forward_train(train_batch) - old_target_policy_dist = old_target_policy_fwd_out[SampleBatch.ACTION_DIST] - - behaviour_actions_logp = train_batch[SampleBatch.ACTION_LOGP] - target_actions_logp = target_policy_dist.logp(train_batch[SampleBatch.ACTIONS]) - old_target_actions_logp = old_target_policy_dist.logp( - train_batch[SampleBatch.ACTIONS] - ) - behaviour_actions_logp_time_major = make_time_major( - behaviour_actions_logp, - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=seqs_len, - drop_last=drop_last, - ) - target_actions_logp_time_major = make_time_major( - target_actions_logp, - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=seqs_len, - drop_last=drop_last, - ) - old_target_actions_logp_time_major = make_time_major( - old_target_actions_logp, - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=seqs_len, - drop_last=drop_last, - ) - values_time_major = make_time_major( - values, - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=seqs_len, - drop_last=drop_last, - ) - bootstrap_value = values_time_major[-1] - rewards_time_major = make_time_major( - train_batch[SampleBatch.REWARDS], - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=seqs_len, - drop_last=drop_last, - ) - - # how to compute discouts? - # should they be pre computed? - discounts_time_major = ( - 1.0 - - make_time_major( - train_batch[SampleBatch.TERMINATEDS], - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=seqs_len, - drop_last=drop_last, - ).float() - ) * self.config["gamma"] - - # Note that vtrace will compute the main loop on the CPU for better performance. - vtrace_adjusted_target_values, pg_advantages = vtrace_torch( - target_action_log_probs=old_target_actions_logp_time_major, - behaviour_action_log_probs=behaviour_actions_logp_time_major, - discounts=discounts_time_major, - rewards=rewards_time_major, - values=values_time_major, - bootstrap_value=bootstrap_value, - clip_pg_rho_threshold=self.config["vtrace_clip_pg_rho_threshold"], - clip_rho_threshold=self.config["vtrace_clip_rho_threshold"], - ) - - is_ratio = torch.clip( - torch.exp( - behaviour_actions_logp_time_major - target_actions_logp_time_major - ), - 0.0, - 2.0, - ) - logp_ratio = is_ratio * torch.exp( - target_actions_logp_time_major - behaviour_actions_logp_time_major - ) - - clip_param = self.config["clip_param"] - surrogate_loss = torch.minimum( - pg_advantages * logp_ratio, - (pg_advantages * torch.clip(logp_ratio, 1 - clip_param, 1 + clip_param)), - ) - action_kl = old_target_policy_dist.kl(target_policy_dist) - mean_kl_loss = torch.mean(action_kl) - mean_pi_loss = -torch.mean(surrogate_loss) - - # The baseline loss. - delta = values_time_major - vtrace_adjusted_target_values - mean_vf_loss = 0.5 * torch.mean(delta**2) - - # The entropy loss. - mean_entropy_loss = -torch.mean(target_policy_dist.entropy()) - - # The summed weighted loss. - total_loss = ( - mean_pi_loss - + (mean_vf_loss * self.config["vf_loss_coeff"]) - + (mean_entropy_loss * self.entropy_coeff) - + (mean_kl_loss * self.kl_coeff) - ) - - self.stats = { - "total_loss": total_loss, - "policy_loss": mean_pi_loss, - "vf_loss": mean_vf_loss, - "values": values_time_major, - "entropy_loss": mean_entropy_loss, - "vtrace_adjusted_target_values": vtrace_adjusted_target_values, - "mean_kl": mean_kl_loss, - } - return total_loss - - @override(TorchPolicyV2) - def stats_fn(self, train_batch: SampleBatch): - return { - "cur_lr": convert_to_torch_tensor(self.cur_lr).type(torch.float64), - "policy_loss": self.stats["policy_loss"], - "entropy": self.stats["entropy_loss"], - "entropy_coeff": convert_to_torch_tensor(self.entropy_coeff).type( - torch.float64 - ), - "var_gnorm": global_norm(self.model.parameters()), - "vf_loss": self.stats["vf_loss"], - "vf_explained_var": explained_variance( - torch.reshape(self.stats["vtrace_adjusted_target_values"], [-1]), - torch.reshape(self.stats["values"], [-1]), - ), - "mean_kl": self.stats["mean_kl"], - } - - @override(TorchPolicyV2) - def get_batch_divisibility_req(self) -> int: - return self.config["rollout_fragment_length"] diff --git a/rllib/algorithms/impala/impala.py b/rllib/algorithms/impala/impala.py index 57c47e3ec940..d1ecfda9d6e4 100644 --- a/rllib/algorithms/impala/impala.py +++ b/rllib/algorithms/impala/impala.py @@ -46,10 +46,10 @@ SYNCH_WORKER_WEIGHTS_TIMER, SAMPLE_TIMER, ) +from ray.rllib.utils.metrics.learner_info import LearnerInfoBuilder from ray.rllib.utils.replay_buffers.multi_agent_replay_buffer import ReplayMode from ray.rllib.utils.replay_buffers.replay_buffer import _ALL_POLICIES - -from ray.rllib.utils.metrics.learner_info import LearnerInfoBuilder +from ray.rllib.utils.schedules.scheduler import Scheduler from ray.rllib.utils.typing import ( PartialAlgorithmConfigDict, PolicyID, @@ -371,6 +371,13 @@ def validate(self) -> None: # Check `entropy_coeff` for correctness. if self.entropy_coeff < 0.0: raise ValueError("`entropy_coeff` must be >= 0.0!") + # Entropy coeff schedule checking. + if self._enable_learner_api: + Scheduler.validate( + self.entropy_coeff_schedule, + "entropy_coeff_schedule", + "entropy coefficient", + ) # Check whether worker to aggregation-worker ratio makes sense. if self.num_aggregation_workers > self.num_rollout_workers: @@ -561,58 +568,39 @@ def get_default_policy_class( if not config["vtrace"]: raise ValueError("IMPALA with the learner API does not support non-VTrace ") - if config._enable_rl_module_api: - if config["framework"] == "tf2": - from ray.rllib.algorithms.impala.tf.impala_tf_policy_rlm import ( - ImpalaTfPolicyWithRLModule, + if config["framework"] == "torch": + if config["vtrace"]: + from ray.rllib.algorithms.impala.impala_torch_policy import ( + ImpalaTorchPolicy, ) - return ImpalaTfPolicyWithRLModule - if config["framework"] == "torch": - from ray.rllib.algorithms.impala.torch.impala_torch_policy_rlm import ( - ImpalaTorchPolicyWithRLModule, - ) - - return ImpalaTorchPolicyWithRLModule + return ImpalaTorchPolicy else: - raise ValueError( - f"IMPALA with the learner API does not support framework " - f"{config['framework']} " - ) - else: - if config["framework"] == "torch": - if config["vtrace"]: - from ray.rllib.algorithms.impala.impala_torch_policy import ( - ImpalaTorchPolicy, - ) + from ray.rllib.algorithms.a3c.a3c_torch_policy import A3CTorchPolicy - return ImpalaTorchPolicy - else: - from ray.rllib.algorithms.a3c.a3c_torch_policy import A3CTorchPolicy + return A3CTorchPolicy + elif config["framework"] == "tf": + if config["vtrace"]: + from ray.rllib.algorithms.impala.impala_tf_policy import ( + ImpalaTF1Policy, + ) - return A3CTorchPolicy - elif config["framework"] == "tf": - if config["vtrace"]: - from ray.rllib.algorithms.impala.impala_tf_policy import ( - ImpalaTF1Policy, - ) + return ImpalaTF1Policy + else: + from ray.rllib.algorithms.a3c.a3c_tf_policy import A3CTFPolicy - return ImpalaTF1Policy - else: - from ray.rllib.algorithms.a3c.a3c_tf_policy import A3CTFPolicy + return A3CTFPolicy + else: + if config["vtrace"]: + from ray.rllib.algorithms.impala.impala_tf_policy import ( + ImpalaTF2Policy, + ) - return A3CTFPolicy + return ImpalaTF2Policy else: - if config["vtrace"]: - from ray.rllib.algorithms.impala.impala_tf_policy import ( - ImpalaTF2Policy, - ) - - return ImpalaTF2Policy - else: - from ray.rllib.algorithms.a3c.a3c_tf_policy import A3CTFPolicy + from ray.rllib.algorithms.a3c.a3c_tf_policy import A3CTFPolicy - return A3CTFPolicy + return A3CTFPolicy @override(Algorithm) def setup(self, config: AlgorithmConfig): diff --git a/rllib/algorithms/impala/impala_learner.py b/rllib/algorithms/impala/impala_learner.py index db0d28e1f9a9..7e1153f4acc4 100644 --- a/rllib/algorithms/impala/impala_learner.py +++ b/rllib/algorithms/impala/impala_learner.py @@ -1,11 +1,11 @@ -from collections import defaultdict from dataclasses import dataclass -from typing import Any, List, Mapping, Optional, Union +from typing import Any, Dict, List, Mapping, Optional, Union import numpy as np import tree # pip install dm_tree from ray.rllib.core.learner.learner import Learner, LearnerHyperparameters +from ray.rllib.core.rl_module.rl_module import ModuleID from ray.rllib.policy.sample_batch import MultiAgentBatch from ray.rllib.utils.annotations import override from ray.rllib.utils.metrics import ( @@ -13,10 +13,13 @@ NUM_AGENT_STEPS_TRAINED, NUM_ENV_STEPS_TRAINED, ) -from ray.rllib.utils.schedules.piecewise_schedule import PiecewiseSchedule +from ray.rllib.utils.schedules.scheduler import Scheduler from ray.rllib.utils.typing import ResultDict +LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY = "curr_entropy_coeff" + + @dataclass class ImpalaHyperparameters(LearnerHyperparameters): """Hyperparameters for the ImpalaLearner sub-classes (framework specific). @@ -49,20 +52,26 @@ def build(self) -> None: super().build() # Build entropy coeff scheduling tools. - self.entropy_coeff_scheduler = None - if self.hps.entropy_coeff_schedule: - # Custom schedule, based on list of - # ([ts], [value to be reached by ts])-tuples. - self.entropy_coeff_schedule_per_module = defaultdict( - lambda: PiecewiseSchedule( - self.hps.entropy_coeff_schedule, - outside_value=self.hps.entropy_coeff_schedule[-1][-1], - framework=None, - ) - ) - self.curr_entropy_coeffs_per_module = defaultdict( - lambda: self._get_tensor_variable(self.hps.entropy_coeff) - ) + self.entropy_coeff_scheduler = Scheduler( + fixed_value=self.hps.entropy_coeff, + schedule=self.hps.entropy_coeff_schedule, + framework=self.framework, + device=self._device, + ) + + @override(Learner) + def additional_update_per_module( + self, module_id: ModuleID, timestep: int + ) -> Dict[str, Any]: + results = super().additional_update_per_module(module_id, timestep=timestep) + + # Update entropy coefficient via our Scheduler. + new_entropy_coeff = self.entropy_coeff_scheduler.update( + module_id, timestep=timestep + ) + results.update({LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY: new_entropy_coeff}) + + return results @override(Learner) def compile_results( diff --git a/rllib/algorithms/impala/impala_tf_policy.py b/rllib/algorithms/impala/impala_tf_policy.py index e1b66f533212..d8b830ef7653 100644 --- a/rllib/algorithms/impala/impala_tf_policy.py +++ b/rllib/algorithms/impala/impala_tf_policy.py @@ -297,13 +297,18 @@ def __init__( existing_model=existing_model, ) - GradStatsMixin.__init__(self) - VTraceClipGradients.__init__(self) - VTraceOptimizer.__init__(self) - LearningRateSchedule.__init__(self, config["lr"], config["lr_schedule"]) - EntropyCoeffSchedule.__init__( - self, config["entropy_coeff"], config["entropy_coeff_schedule"] - ) + # If Learner API is used, we don't need any loss-specific mixins. + # However, we also would like to avoid creating special Policy-subclasses + # for this as the entire Policy concept will soon not be used anymore with + # the new Learner- and RLModule APIs. + if not self.config.get("_enable_learner_api"): + GradStatsMixin.__init__(self) + VTraceClipGradients.__init__(self) + VTraceOptimizer.__init__(self) + LearningRateSchedule.__init__(self, config["lr"], config["lr_schedule"]) + EntropyCoeffSchedule.__init__( + self, config["entropy_coeff"], config["entropy_coeff_schedule"] + ) # Note: this is a bit ugly, but loss and optimizer initialization must # happen after all the MixIns are initialized. diff --git a/rllib/algorithms/impala/impala_torch_policy.py b/rllib/algorithms/impala/impala_torch_policy.py index 73d4b3c7bd12..71aed0320601 100644 --- a/rllib/algorithms/impala/impala_torch_policy.py +++ b/rllib/algorithms/impala/impala_torch_policy.py @@ -201,13 +201,18 @@ def __init__(self, observation_space, action_space, config): ray.rllib.algorithms.impala.impala.ImpalaConfig().to_dict(), **config ) - VTraceOptimizer.__init__(self) - # Need to initialize learning rate variable before calling - # TorchPolicyV2.__init__. - LearningRateSchedule.__init__(self, config["lr"], config["lr_schedule"]) - EntropyCoeffSchedule.__init__( - self, config["entropy_coeff"], config["entropy_coeff_schedule"] - ) + # If Learner API is used, we don't need any loss-specific mixins. + # However, we also would like to avoid creating special Policy-subclasses + # for this as the entire Policy concept will soon not be used anymore with + # the new Learner- and RLModule APIs. + if not config.get("_enable_learner_api"): + VTraceOptimizer.__init__(self) + # Need to initialize learning rate variable before calling + # TorchPolicyV2.__init__. + LearningRateSchedule.__init__(self, config["lr"], config["lr_schedule"]) + EntropyCoeffSchedule.__init__( + self, config["entropy_coeff"], config["entropy_coeff_schedule"] + ) TorchPolicyV2.__init__( self, @@ -217,7 +222,6 @@ def __init__(self, observation_space, action_space, config): max_seq_len=config["model"]["max_seq_len"], ) - # TODO: Don't require users to call this manually. self._initialize_loss_from_dummy_batch() @override(TorchPolicyV2) diff --git a/rllib/algorithms/impala/tests/test_impala_learner.py b/rllib/algorithms/impala/tests/test_impala_learner.py index 6adc936a45ba..8725434b9751 100644 --- a/rllib/algorithms/impala/tests/test_impala_learner.py +++ b/rllib/algorithms/impala/tests/test_impala_learner.py @@ -1,6 +1,7 @@ import unittest import numpy as np +import tree # pip install dm_tree import ray from ray.rllib.algorithms.impala import ImpalaConfig @@ -8,6 +9,7 @@ from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.framework import try_import_torch, try_import_tf from ray.rllib.utils.test_utils import framework_iterator +from ray.rllib.utils.torch_utils import convert_to_torch_tensor torch, nn = try_import_torch() tf1, tf, _ = try_import_tf() @@ -77,11 +79,17 @@ def test_impala_loss(self): # Deprecate the current default and set it to {}. config.exploration_config = {} - for _ in framework_iterator(config, frameworks=["tf2", "torch"]): + for fw in framework_iterator(config, frameworks=["tf2", "torch"]): algo = config.build() policy = algo.get_policy() - train_batch = SampleBatch(FAKE_BATCH) + if fw == "tf2": + train_batch = SampleBatch( + tree.map_structure(lambda x: tf.convert_to_tensor(x), FAKE_BATCH) + ) + elif fw == "torch": + train_batch = convert_to_torch_tensor(SampleBatch(FAKE_BATCH)) + algo_config = config.copy(copy_frozen=False) algo_config.validate() algo_config.freeze() diff --git a/rllib/algorithms/impala/tests/test_impala_off_policyness.py b/rllib/algorithms/impala/tests/test_impala_off_policyness.py index 09600ff3f046..82a92916172f 100644 --- a/rllib/algorithms/impala/tests/test_impala_off_policyness.py +++ b/rllib/algorithms/impala/tests/test_impala_off_policyness.py @@ -1,4 +1,3 @@ -import itertools import unittest import ray @@ -6,7 +5,6 @@ from ray.rllib.utils.framework import try_import_tf from ray.rllib.utils.test_utils import ( check_compute_single_action, - check_off_policyness, framework_iterator, ) @@ -28,43 +26,29 @@ def test_impala_off_policyness(self): .environment("CartPole-v1") .resources(num_gpus=0) .rollouts(num_rollout_workers=4) + .training(_enable_learner_api=True) + .rl_module(_enable_rl_module_api=True) ) num_iterations = 3 num_aggregation_workers_options = [0, 1] - enable_rlm_learner_group_options = [True, False] - - default_exploration_config = config.exploration_config.copy() - - for permutation in itertools.product( - num_aggregation_workers_options, enable_rlm_learner_group_options - ): - num_aggregation_workers, enable_learner_api = permutation - for fw in framework_iterator( - config, with_eager_tracing=True, frameworks=["tf2"] + for num_aggregation_workers in num_aggregation_workers_options: + for _ in framework_iterator( + config, frameworks=("tf2", "torch"), with_eager_tracing=True ): - # TODO(avnishn): Enable this for torch when we merge the torch learner. - if enable_learner_api and fw != "tf2": - continue - config.training(_enable_learner_api=enable_learner_api) - config.rl_module(_enable_rl_module_api=enable_learner_api) - if enable_learner_api: - # We have to set exploration_config here manually because setting - # it through config.exploration() only deepupdates it - config.exploration_config = {} - else: - config.exploration_config = default_exploration_config + # We have to set exploration_config here manually because setting + # it through config.exploration() only deepupdates it + config.exploration_config = {} config.num_aggregation_workers = num_aggregation_workers print("aggregation-workers={}".format(config.num_aggregation_workers)) algo = config.build() for i in range(num_iterations): - results = algo.train() + algo.train() # TODO (Avnish): Add off-policiness check when the metrics are - # added back to the IMPALA Learner - if not enable_learner_api: - off_policy_ness = check_off_policyness(results, upper_limit=2.0) - print(f"off-policy'ness={off_policy_ness}") + # added back to the IMPALA Learner. + # off_policy_ness = check_off_policyness(results, upper_limit=2.0) + # print(f"off-policy'ness={off_policy_ness}") check_compute_single_action( algo, diff --git a/rllib/algorithms/impala/tf/impala_tf_learner.py b/rllib/algorithms/impala/tf/impala_tf_learner.py index fa2481b5bbd5..fd04a6bf8d23 100644 --- a/rllib/algorithms/impala/tf/impala_tf_learner.py +++ b/rllib/algorithms/impala/tf/impala_tf_learner.py @@ -1,11 +1,9 @@ -from typing import Any, Dict, Mapping +from typing import Mapping from ray.rllib.algorithms.impala.impala_learner import ImpalaLearner from ray.rllib.algorithms.impala.tf.vtrace_tf_v2 import make_time_major, vtrace_tf2 -from ray.rllib.algorithms.ppo.ppo_learner import LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY from ray.rllib.core.learner.learner import ENTROPY_KEY from ray.rllib.core.learner.tf.tf_learner import TfLearner -from ray.rllib.core.rl_module.rl_module import ModuleID from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.annotations import override from ray.rllib.utils.framework import try_import_tf @@ -102,21 +100,3 @@ def compute_loss_per_module( "vf_loss": mean_vf_loss, ENTROPY_KEY: -mean_entropy_loss, } - - @override(ImpalaLearner) - def additional_update_per_module( - self, module_id: ModuleID, timestep: int - ) -> Dict[str, Any]: - results = super().additional_update_per_module( - module_id, - timestep=timestep, - ) - - # Update entropy coefficient. - value = self.hps.entropy_coeff - if self.hps.entropy_coeff_schedule is not None: - value = self.entropy_coeff_schedule_per_module[module_id].value(t=timestep) - self.curr_entropy_coeffs_per_module[module_id].assign(value) - results.update({LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY: value}) - - return results diff --git a/rllib/algorithms/impala/tf/impala_tf_policy_rlm.py b/rllib/algorithms/impala/tf/impala_tf_policy_rlm.py deleted file mode 100644 index f24c6e88c9a1..000000000000 --- a/rllib/algorithms/impala/tf/impala_tf_policy_rlm.py +++ /dev/null @@ -1,165 +0,0 @@ -import logging -from typing import Dict, List, Union - -from ray.rllib.algorithms.ppo.ppo_tf_policy import validate_config -from ray.rllib.policy.sample_batch import SampleBatch -from ray.rllib.policy.tf_mixins import ( - EntropyCoeffSchedule, - LearningRateSchedule, -) -from ray.rllib.policy.eager_tf_policy_v2 import EagerTFPolicyV2 -from ray.rllib.utils.annotations import override -from ray.rllib.utils.deprecation import Deprecated -from ray.rllib.algorithms.ppo.tf.ppo_tf_rl_module import PPOTfRLModule -from ray.rllib.utils.framework import try_import_tf -from ray.rllib.utils.tf_utils import ( - explained_variance, -) -from ray.rllib.algorithms.impala.tf.vtrace_tf_v2 import make_time_major, vtrace_tf2 -from ray.rllib.utils.typing import TensorType - -tf1, tf, tfv = try_import_tf() - -logger = logging.getLogger(__name__) - - -class ImpalaTfPolicyWithRLModule( - LearningRateSchedule, - EntropyCoeffSchedule, - EagerTFPolicyV2, -): - def __init__(self, observation_space, action_space, config): - validate_config(config) - EagerTFPolicyV2.enable_eager_execution_if_necessary() - # Initialize MixIns before super().__init__ because base class will call - # self.loss, which requires these MixIns to be initialized. - LearningRateSchedule.__init__(self, config["lr"], config["lr_schedule"]) - EntropyCoeffSchedule.__init__( - self, config["entropy_coeff"], config["entropy_coeff_schedule"] - ) - EagerTFPolicyV2.__init__(self, observation_space, action_space, config) - - self.maybe_initialize_optimizer_and_loss() - - @Deprecated(new="ImpalaTfLearner.compute_loss_per_module()", error=False) - @override(EagerTFPolicyV2) - def loss( - self, - model: PPOTfRLModule, - dist_class, - train_batch: SampleBatch, - ) -> Union[TensorType, List[TensorType]]: - seq_len = train_batch.get(SampleBatch.SEQ_LENS) - rollout_frag_or_episode_len = ( - self.config["rollout_fragment_length"] if not seq_len else None - ) - drop_last = self.config["vtrace_drop_last_ts"] - - fwd_out = model.forward_train(train_batch) - - values = fwd_out[SampleBatch.VF_PREDS] - target_policy_dist = fwd_out[SampleBatch.ACTION_DIST] - - # this is probably a horribly inefficient way to do this. I should be able to - # compute this in a batch fashion - behaviour_actions_logp = train_batch[SampleBatch.ACTION_LOGP] - target_actions_logp = target_policy_dist.logp(train_batch[SampleBatch.ACTIONS]) - behaviour_actions_logp_time_major = make_time_major( - behaviour_actions_logp, - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=seq_len, - drop_last=drop_last, - ) - target_actions_logp_time_major = make_time_major( - target_actions_logp, - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=seq_len, - drop_last=drop_last, - ) - values_time_major = make_time_major( - values, - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=seq_len, - drop_last=drop_last, - ) - bootstrap_value = values_time_major[-1] - rewards_time_major = make_time_major( - train_batch[SampleBatch.REWARDS], - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=seq_len, - drop_last=drop_last, - ) - - # how to compute discouts? - # should they be pre computed? - discounts_time_major = ( - 1.0 - - tf.cast( - make_time_major( - train_batch[SampleBatch.TERMINATEDS], - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=seq_len, - drop_last=drop_last, - ), - dtype=tf.float32, - ) - ) * self.config["gamma"] - - # Note that vtrace will compute the main loop on the CPU for better performance. - vtrace_adjusted_target_values, pg_advantages = vtrace_tf2( - target_action_log_probs=target_actions_logp_time_major, - behaviour_action_log_probs=behaviour_actions_logp_time_major, - discounts=discounts_time_major, - rewards=rewards_time_major, - values=values_time_major, - bootstrap_value=bootstrap_value, - clip_pg_rho_threshold=self.config["vtrace_clip_pg_rho_threshold"], - clip_rho_threshold=self.config["vtrace_clip_rho_threshold"], - ) - - # The policy gradients loss. - pi_loss = -tf.reduce_sum(target_actions_logp_time_major * pg_advantages) - mean_pi_loss = -tf.reduce_mean(target_actions_logp_time_major * pg_advantages) - - # The baseline loss. - delta = values_time_major - vtrace_adjusted_target_values - vf_loss = 0.5 * tf.reduce_sum(tf.math.pow(delta, 2.0)) - mean_vf_loss = 0.5 * tf.reduce_mean(tf.math.pow(delta, 2.0)) - - # The entropy loss. - mean_entropy_loss = -tf.reduce_mean(target_policy_dist.entropy()) - - # The summed weighted loss. - total_loss = ( - pi_loss - + vf_loss * self.config["vf_loss_coeff"] - + mean_entropy_loss * self.entropy_coeff - ) - self.stats = { - "total_loss": total_loss, - "pi_loss": mean_pi_loss, - "vf_loss": mean_vf_loss, - "values": values_time_major, - "entropy_loss": mean_entropy_loss, - "vtrace_adjusted_target_values": vtrace_adjusted_target_values, - } - return total_loss - - @override(EagerTFPolicyV2) - def stats_fn(self, train_batch: SampleBatch) -> Dict[str, TensorType]: - return { - "cur_lr": tf.cast(self.cur_lr, tf.float64), - "policy_loss": self.stats["pi_loss"], - "entropy": self.stats["entropy_loss"], - "entropy_coeff": tf.cast(self.entropy_coeff, tf.float64), - "var_gnorm": tf.linalg.global_norm(self.model.trainable_variables), - "vf_loss": self.stats["vf_loss"], - "vf_explained_var": explained_variance( - tf.reshape(self.stats["vtrace_adjusted_target_values"], [-1]), - tf.reshape(self.stats["values"], [-1]), - ), - } - - @override(EagerTFPolicyV2) - def get_batch_divisibility_req(self) -> int: - return self.config["rollout_fragment_length"] diff --git a/rllib/algorithms/impala/torch/impala_torch_learner.py b/rllib/algorithms/impala/torch/impala_torch_learner.py index 907c2d4e3261..50dd9911823f 100644 --- a/rllib/algorithms/impala/torch/impala_torch_learner.py +++ b/rllib/algorithms/impala/torch/impala_torch_learner.py @@ -1,14 +1,12 @@ -from typing import Any, Dict, Mapping +from typing import Mapping from ray.rllib.algorithms.impala.impala_learner import ImpalaLearner from ray.rllib.algorithms.impala.torch.vtrace_torch_v2 import ( vtrace_torch, make_time_major, ) -from ray.rllib.algorithms.ppo.ppo_learner import LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY from ray.rllib.core.learner.learner import ENTROPY_KEY from ray.rllib.core.learner.torch.torch_learner import TorchLearner -from ray.rllib.core.rl_module.rl_module import ModuleID from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.annotations import override from ray.rllib.utils.framework import try_import_torch @@ -118,21 +116,3 @@ def compute_loss_per_module( "vf_loss": mean_vf_loss, ENTROPY_KEY: -mean_entropy_loss, } - - @override(ImpalaLearner) - def additional_update_per_module( - self, module_id: ModuleID, timestep: int - ) -> Dict[str, Any]: - results = super().additional_update_per_module( - module_id, - timestep=timestep, - ) - - # Update entropy coefficient. - value = self.hps.entropy_coeff - if self.hps.entropy_coeff_schedule is not None: - value = self.entropy_coeff_schedule_per_module[module_id].value(t=timestep) - self.curr_entropy_coeffs_per_module[module_id].data = torch.tensor(value) - results.update({LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY: value}) - - return results diff --git a/rllib/algorithms/impala/torch/impala_torch_policy_rlm.py b/rllib/algorithms/impala/torch/impala_torch_policy_rlm.py deleted file mode 100644 index 6e10a0a23839..000000000000 --- a/rllib/algorithms/impala/torch/impala_torch_policy_rlm.py +++ /dev/null @@ -1,167 +0,0 @@ -import logging -from typing import Dict, List, Union - -from ray.rllib.algorithms.impala.torch.vtrace_torch_v2 import ( - make_time_major, - vtrace_torch, -) -from ray.rllib.algorithms.ppo.ppo_torch_policy import validate_config -from ray.rllib.algorithms.ppo.torch.ppo_torch_rl_module import PPOTorchRLModule -from ray.rllib.policy.sample_batch import SampleBatch -from ray.rllib.utils.torch_utils import convert_to_torch_tensor -from ray.rllib.policy.torch_mixins import ( - EntropyCoeffSchedule, - LearningRateSchedule, -) -from ray.rllib.policy.torch_policy_v2 import TorchPolicyV2 -from ray.rllib.utils.annotations import override, Deprecated -from ray.rllib.utils.framework import try_import_torch -from ray.rllib.utils.torch_utils import ( - explained_variance, - global_norm, -) -from ray.rllib.utils.typing import TensorType - -torch, nn = try_import_torch() - -logger = logging.getLogger(__name__) - - -class ImpalaTorchPolicyWithRLModule( - LearningRateSchedule, - EntropyCoeffSchedule, - TorchPolicyV2, -): - def __init__(self, observation_space, action_space, config): - validate_config(config) - TorchPolicyV2.__init__(self, observation_space, action_space, config) - # Initialize MixIns. - LearningRateSchedule.__init__(self, config["lr"], config["lr_schedule"]) - EntropyCoeffSchedule.__init__( - self, config["entropy_coeff"], config["entropy_coeff_schedule"] - ) - - # TODO: Don't require users to call this manually. - self._initialize_loss_from_dummy_batch() - - @Deprecated(new="ImpalaTorchLearner.compute_loss_per_module()", error=False) - @override(TorchPolicyV2) - def loss( - self, - model: PPOTorchRLModule, - dist_class, - train_batch: SampleBatch, - ) -> Union[TensorType, List[TensorType]]: - seq_len = train_batch.get(SampleBatch.SEQ_LENS) - rollout_frag_or_episode_len = ( - self.config["rollout_fragment_length"] if not seq_len else None - ) - drop_last = self.config["vtrace_drop_last_ts"] - - fwd_out = model.forward_train(train_batch) - - values = fwd_out[SampleBatch.VF_PREDS] - target_policy_dist = fwd_out[SampleBatch.ACTION_DIST] - - # this is probably a horribly inefficient way to do this. I should be able to - # compute this in a batch fashion - behaviour_actions_logp = train_batch[SampleBatch.ACTION_LOGP] - target_actions_logp = target_policy_dist.logp(train_batch[SampleBatch.ACTIONS]) - behaviour_actions_logp_time_major = make_time_major( - behaviour_actions_logp, - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=seq_len, - drop_last=drop_last, - ) - target_actions_logp_time_major = make_time_major( - target_actions_logp, - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=seq_len, - drop_last=drop_last, - ) - values_time_major = make_time_major( - values, - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=seq_len, - drop_last=drop_last, - ) - bootstrap_value = values_time_major[-1] - rewards_time_major = make_time_major( - train_batch[SampleBatch.REWARDS], - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=seq_len, - drop_last=drop_last, - ) - - # how to compute discouts? - # should they be pre computed? - discounts_time_major = ( - 1.0 - - make_time_major( - train_batch[SampleBatch.TERMINATEDS], - trajectory_len=rollout_frag_or_episode_len, - recurrent_seq_len=seq_len, - drop_last=drop_last, - ).type(dtype=torch.float32) - ) * self.config["gamma"] - - # Note that vtrace will compute the main loop on the CPU for better performance. - vtrace_adjusted_target_values, pg_advantages = vtrace_torch( - target_action_log_probs=target_actions_logp_time_major, - behaviour_action_log_probs=behaviour_actions_logp_time_major, - discounts=discounts_time_major, - rewards=rewards_time_major, - values=values_time_major, - bootstrap_value=bootstrap_value, - clip_pg_rho_threshold=self.config["vtrace_clip_pg_rho_threshold"], - clip_rho_threshold=self.config["vtrace_clip_rho_threshold"], - ) - - # The policy gradients loss. - pi_loss = -torch.sum(target_actions_logp_time_major * pg_advantages) - mean_pi_loss = -torch.mean(target_actions_logp_time_major * pg_advantages) - - # The baseline loss. - delta = values_time_major - vtrace_adjusted_target_values - vf_loss = 0.5 * torch.sum(torch.pow(delta, 2.0)) - mean_vf_loss = 0.5 * torch.mean(torch.pow(delta, 2.0)) - - # The entropy loss. - mean_entropy_loss = -torch.mean(target_policy_dist.entropy()) - - # The summed weighted loss. - total_loss = ( - pi_loss - + vf_loss * self.config["vf_loss_coeff"] - + mean_entropy_loss * self.entropy_coeff - ) - self.stats = { - "total_loss": total_loss, - "pi_loss": mean_pi_loss, - "vf_loss": mean_vf_loss, - "values": values_time_major, - "entropy_loss": mean_entropy_loss, - "vtrace_adjusted_target_values": vtrace_adjusted_target_values, - } - return total_loss - - @override(TorchPolicyV2) - def stats_fn(self, train_batch: SampleBatch) -> Dict[str, TensorType]: - return { - "cur_lr": convert_to_torch_tensor(self.cur_lr).type(torch.float64), - "policy_loss": self.stats["pi_loss"], - "entropy": self.stats["entropy_loss"], - "entropy_coeff": convert_to_torch_tensor(self.entropy_coeff).type( - torch.float64 - ), - "var_gnorm": global_norm(self.model.parameters()), - "vf_loss": self.stats["vf_loss"], - "vf_explained_var": explained_variance( - torch.reshape(self.stats["vtrace_adjusted_target_values"], [-1]), - torch.reshape(self.stats["values"], [-1]), - ), - } - - @override(TorchPolicyV2) - def get_batch_divisibility_req(self) -> int: - return self.config["rollout_fragment_length"] diff --git a/rllib/algorithms/ppo/ppo.py b/rllib/algorithms/ppo/ppo.py index 89481a835257..05f531117901 100644 --- a/rllib/algorithms/ppo/ppo.py +++ b/rllib/algorithms/ppo/ppo.py @@ -13,7 +13,6 @@ import logging from typing import List, Optional, Type, Union, TYPE_CHECKING -from ray.util.debug import log_once from ray.rllib.algorithms.algorithm import Algorithm from ray.rllib.algorithms.algorithm_config import AlgorithmConfig, NotProvided from ray.rllib.algorithms.pg import PGConfig @@ -25,21 +24,19 @@ from ray.rllib.core.rl_module.rl_module import SingleAgentRLModuleSpec from ray.rllib.execution.rollout_ops import ( standardize_fields, + synchronous_parallel_sample, ) from ray.rllib.execution.train_ops import ( train_one_step, multi_gpu_train_one_step, ) -from ray.rllib.utils.annotations import ExperimentalAPI from ray.rllib.policy.policy import Policy -from ray.rllib.utils.annotations import override +from ray.rllib.utils.annotations import ExperimentalAPI, override from ray.rllib.utils.deprecation import ( DEPRECATED_VALUE, deprecation_warning, ) from ray.rllib.utils.metrics.learner_info import LEARNER_STATS_KEY -from ray.rllib.utils.typing import ResultDict -from ray.rllib.execution.rollout_ops import synchronous_parallel_sample from ray.rllib.utils.metrics import ( NUM_AGENT_STEPS_SAMPLED, NUM_ENV_STEPS_SAMPLED, @@ -47,6 +44,9 @@ SAMPLE_TIMER, ALL_MODULES, ) +from ray.rllib.utils.schedules.scheduler import Scheduler +from ray.rllib.utils.typing import ResultDict +from ray.util.debug import log_once if TYPE_CHECKING: from ray.rllib.core.learner.learner import Learner @@ -325,6 +325,13 @@ def validate(self) -> None: # Check `entropy_coeff` for correctness. if self.entropy_coeff < 0.0: raise ValueError("`entropy_coeff` must be >= 0.0") + # Entropy coeff schedule checking. + if self._enable_learner_api: + Scheduler.validate( + self.entropy_coeff_schedule, + "entropy_coeff_schedule", + "entropy coefficient", + ) class UpdateKL: @@ -371,32 +378,17 @@ def get_default_policy_class( ) -> Optional[Type[Policy]]: if config["framework"] == "torch": - if config._enable_rl_module_api: - from ray.rllib.algorithms.ppo.torch.ppo_torch_policy_rlm import ( - PPOTorchPolicyWithRLModule, - ) - - return PPOTorchPolicyWithRLModule - else: - from ray.rllib.algorithms.ppo.ppo_torch_policy import PPOTorchPolicy + from ray.rllib.algorithms.ppo.ppo_torch_policy import PPOTorchPolicy - return PPOTorchPolicy + return PPOTorchPolicy elif config["framework"] == "tf": from ray.rllib.algorithms.ppo.ppo_tf_policy import PPOTF1Policy return PPOTF1Policy else: - if config._enable_rl_module_api: - from ray.rllib.algorithms.ppo.tf.ppo_tf_policy_rlm import ( - PPOTfPolicyWithRLModule, - ) - - return PPOTfPolicyWithRLModule - else: - - from ray.rllib.algorithms.ppo.ppo_tf_policy import PPOTF2Policy + from ray.rllib.algorithms.ppo.ppo_tf_policy import PPOTF2Policy - return PPOTF2Policy + return PPOTF2Policy @ExperimentalAPI def training_step(self) -> ResultDict: diff --git a/rllib/algorithms/ppo/ppo_learner.py b/rllib/algorithms/ppo/ppo_learner.py index 576471b285ed..be16bbb53112 100644 --- a/rllib/algorithms/ppo/ppo_learner.py +++ b/rllib/algorithms/ppo/ppo_learner.py @@ -1,11 +1,12 @@ from collections import defaultdict from dataclasses import dataclass -from typing import List, Optional, Union +from typing import Any, Dict, List, Optional, Union from ray.rllib.core.learner.learner import LearnerHyperparameters from ray.rllib.core.learner.learner import Learner +from ray.rllib.core.rl_module.rl_module import ModuleID from ray.rllib.utils.annotations import override -from ray.rllib.utils.schedules.piecewise_schedule import PiecewiseSchedule +from ray.rllib.utils.schedules.scheduler import Scheduler LEARNER_RESULTS_VF_LOSS_UNCLIPPED_KEY = "vf_loss_unclipped" @@ -34,10 +35,6 @@ class to configure your algorithm. entropy_coeff_schedule: Optional[List[List[Union[int, float]]]] = None vf_loss_coeff: float = None - # TODO: Move to base LearnerHyperparameter class (and handling of this setting - # into base Learners). - lr_schedule: Optional[List[List[Union[int, float]]]] = None - class PPOLearner(Learner): @override(Learner) @@ -45,25 +42,12 @@ def build(self) -> None: super().build() # Build entropy coeff scheduling tools. - self.entropy_coeff_scheduler = None - if self.hps.entropy_coeff_schedule: - # Custom schedule, based on list of - # ([ts], [value to be reached by ts])-tuples. - self.entropy_coeff_schedule_per_module = defaultdict( - lambda: PiecewiseSchedule( - self.hps.entropy_coeff_schedule, - outside_value=self.hps.entropy_coeff_schedule[-1][-1], - framework=None, - ) - ) - self.curr_entropy_coeffs_per_module = defaultdict( - lambda: self._get_tensor_variable(self.hps.entropy_coeff) - ) - # If no schedule, pin entropy coeff to its given (fixed) value. - else: - self.curr_entropy_coeffs_per_module = defaultdict( - lambda: self.hps.entropy_coeff - ) + self.entropy_coeff_scheduler = Scheduler( + fixed_value=self.hps.entropy_coeff, + schedule=self.hps.entropy_coeff_schedule, + framework=self.framework, + device=self._device, + ) # Set up KL coefficient variables (per module). # Note that the KL coeff is not controlled by a schedul, but seeks @@ -71,3 +55,21 @@ def build(self) -> None: self.curr_kl_coeffs_per_module = defaultdict( lambda: self._get_tensor_variable(self.hps.kl_coeff) ) + + @override(Learner) + def additional_update_per_module( + self, module_id: ModuleID, sampled_kl_values: dict, timestep: int + ) -> Dict[str, Any]: + results = super().additional_update_per_module( + module_id, + sampled_kl_values=sampled_kl_values, + timestep=timestep, + ) + + # Update entropy coefficient via our Scheduler. + new_entropy_coeff = self.entropy_coeff_scheduler.update( + module_id, timestep=timestep + ) + results.update({LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY: new_entropy_coeff}) + + return results diff --git a/rllib/algorithms/ppo/ppo_tf_policy.py b/rllib/algorithms/ppo/ppo_tf_policy.py index a00f8c037eb6..76e8d0161689 100644 --- a/rllib/algorithms/ppo/ppo_tf_policy.py +++ b/rllib/algorithms/ppo/ppo_tf_policy.py @@ -89,11 +89,11 @@ def __init__( # Initialize MixIns. ValueNetworkMixin.__init__(self, config) - KLCoeffMixin.__init__(self, config) EntropyCoeffSchedule.__init__( self, config["entropy_coeff"], config["entropy_coeff_schedule"] ) LearningRateSchedule.__init__(self, config["lr"], config["lr_schedule"]) + KLCoeffMixin.__init__(self, config) # Note: this is a bit ugly, but loss and optimizer initialization must # happen after all the MixIns are initialized. diff --git a/rllib/algorithms/ppo/ppo_torch_policy.py b/rllib/algorithms/ppo/ppo_torch_policy.py index df45eefeb14c..26a52dbe4d2b 100644 --- a/rllib/algorithms/ppo/ppo_torch_policy.py +++ b/rllib/algorithms/ppo/ppo_torch_policy.py @@ -44,8 +44,6 @@ class PPOTorchPolicy( def __init__(self, observation_space, action_space, config): config = dict(ray.rllib.algorithms.ppo.ppo.PPOConfig().to_dict(), **config) - # TODO: Move into Policy API, if needed at all here. Why not move this into - # `PPOConfig`?. validate_config(config) TorchPolicyV2.__init__( @@ -63,7 +61,6 @@ def __init__(self, observation_space, action_space, config): ) KLCoeffMixin.__init__(self, config) - # TODO: Don't require users to call this manually. self._initialize_loss_from_dummy_batch() @override(TorchPolicyV2) diff --git a/rllib/algorithms/ppo/tests/test_ppo_learner.py b/rllib/algorithms/ppo/tests/test_ppo_learner.py index ceb726e7deac..e16ea35641e6 100644 --- a/rllib/algorithms/ppo/tests/test_ppo_learner.py +++ b/rllib/algorithms/ppo/tests/test_ppo_learner.py @@ -67,6 +67,7 @@ def test_loss(self): fcnet_activation="linear", vf_share_layers=False, ), + _enable_learner_api=True, ) .rl_module( _enable_rl_module_api=True, @@ -86,13 +87,11 @@ def test_loss(self): lambda x: torch.as_tensor(x).float(), train_batch ) else: - # tf train_batch = tree.map_structure( lambda x: tf.convert_to_tensor(x), train_batch ) algo_config = config.copy(copy_frozen=False) - algo_config.training(_enable_learner_api=True) algo_config.validate() algo_config.freeze() diff --git a/rllib/algorithms/ppo/tests/test_ppo_with_rl_module.py b/rllib/algorithms/ppo/tests/test_ppo_with_rl_module.py index 3f2fc1d007b1..c700ff7ab16e 100644 --- a/rllib/algorithms/ppo/tests/test_ppo_with_rl_module.py +++ b/rllib/algorithms/ppo/tests/test_ppo_with_rl_module.py @@ -126,14 +126,22 @@ def test_ppo_compilation_and_schedule_mixins(self): config.training(model=get_model_config(fw, lstm=lstm)) algo = config.build(env=env) + # TODO: Maybe add an API to get the Learner(s) instances within + # a learner group, remote or not. + learner = algo.learner_group._learner optim = algo.learner_group._learner._named_optimizers[ DEFAULT_POLICY_ID ] - entropy_coeff = algo.get_policy().entropy_coeff + # Check initial LR directly set in optimizer vs the first (ts=0) + # value from the schedule. lr = optim.param_groups[0]["lr"] if fw == "torch" else optim.lr + check(lr, config.lr_schedule[0][1]) + + # Check current entropy coeff value using the respective Scheduler. + entropy_coeff = learner.entropy_coeff_scheduler.get_current_value( + DEFAULT_POLICY_ID + ) check(entropy_coeff, 0.1) - # Check initial LR directly set in optimizer. - check(lr, config.lr) for i in range(num_iterations): results = algo.train() @@ -159,6 +167,7 @@ def test_ppo_exploration_setup(self): enable_connectors=True, ) .rl_module(_enable_rl_module_api=True) + .training(_enable_learner_api=True) ) obs = np.array(0) @@ -166,14 +175,14 @@ def test_ppo_exploration_setup(self): config, frameworks=("torch", "tf2"), with_eager_tracing=True ): # Default Agent should be setup with StochasticSampling. - trainer = config.build() + algo = config.build() # explore=False, always expect the same (deterministic) action. - a_ = trainer.compute_single_action( + a_ = algo.compute_single_action( obs, explore=False, prev_action=np.array(2), prev_reward=np.array(1.0) ) for _ in range(50): - a = trainer.compute_single_action( + a = algo.compute_single_action( obs, explore=False, prev_action=np.array(2), @@ -185,12 +194,12 @@ def test_ppo_exploration_setup(self): actions = [] for _ in range(300): actions.append( - trainer.compute_single_action( + algo.compute_single_action( obs, prev_action=np.array(2), prev_reward=np.array(1.0) ) ) check(np.mean(actions), 1.5, atol=0.2) - trainer.stop() + algo.stop() def test_ppo_free_log_std_with_rl_modules(self): """Tests the free log std option works.""" @@ -216,25 +225,22 @@ def test_ppo_free_log_std_with_rl_modules(self): ) for fw in framework_iterator(config, frameworks=("torch", "tf2")): - trainer = config.build() - policy = trainer.get_policy() + algo = config.build() + policy = algo.get_policy() + learner = algo.learner_group._learner + module = learner.module[DEFAULT_POLICY_ID] # Check the free log std var is created. if fw == "torch": - matching = [ - v for (n, v) in policy.model.named_parameters() if "log_std" in n - ] + matching = [v for (n, v) in module.named_parameters() if "log_std" in n] else: matching = [ - v for v in policy.model.trainable_variables if "log_std" in str(v) + v for v in module.trainable_variables if "log_std" in str(v) ] assert len(matching) == 1, matching log_std_var = matching[0] - # linter yells at you if you don't pass in the parameters. - # reason: https://docs.python-guide.org/writing/gotchas/ - # #late-binding-closures - def get_value(fw=fw, policy=policy, log_std_var=log_std_var): + def get_value(): if fw == "torch": return log_std_var.detach().cpu().numpy()[0] else: @@ -244,14 +250,13 @@ def get_value(fw=fw, policy=policy, log_std_var=log_std_var): init_std = get_value() assert init_std == 0.0, init_std batch = compute_gae_for_sample_batch(policy, PENDULUM_FAKE_BATCH.copy()) - if fw == "torch": - batch = policy._lazy_tensor_dict(batch) - policy.learn_on_batch(batch) + batch = policy._lazy_tensor_dict(batch) + algo.learner_group.update(batch.as_multi_agent()) # Check the variable is updated. post_std = get_value() assert post_std != 0.0, post_std - trainer.stop() + algo.stop() if __name__ == "__main__": diff --git a/rllib/algorithms/ppo/tf/ppo_tf_learner.py b/rllib/algorithms/ppo/tf/ppo_tf_learner.py index e65f65868825..6ab8960a326a 100644 --- a/rllib/algorithms/ppo/tf/ppo_tf_learner.py +++ b/rllib/algorithms/ppo/tf/ppo_tf_learner.py @@ -3,7 +3,6 @@ from ray.rllib.algorithms.ppo.ppo_learner import ( LEARNER_RESULTS_KL_KEY, - LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY, LEARNER_RESULTS_CURR_KL_COEFF_KEY, LEARNER_RESULTS_VF_EXPLAINED_VAR_KEY, LEARNER_RESULTS_VF_LOSS_UNCLIPPED_KEY, @@ -97,7 +96,8 @@ def compute_loss_per_module( total_loss = tf.reduce_mean( -surrogate_loss + self.hps.vf_loss_coeff * vf_loss_clipped - - self.curr_entropy_coeffs_per_module[module_id] * curr_entropy + - self.entropy_coeff_scheduler.get_current_value(module_id) * curr_entropy + # - self.curr_entropy_coeffs_per_module[module_id] * curr_entropy ) # Add mean_kl_loss (already processed through `reduce_mean_valid`), @@ -139,11 +139,4 @@ def additional_update_per_module( curr_var.assign(curr_var * 0.5) results.update({LEARNER_RESULTS_CURR_KL_COEFF_KEY: curr_var.numpy()}) - # Update entropy coefficient. - value = self.hps.entropy_coeff - if self.hps.entropy_coeff_schedule is not None: - value = self.entropy_coeff_schedule_per_module[module_id].value(t=timestep) - self.curr_entropy_coeffs_per_module[module_id].assign(value) - results.update({LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY: value}) - return results diff --git a/rllib/algorithms/ppo/tf/ppo_tf_policy_rlm.py b/rllib/algorithms/ppo/tf/ppo_tf_policy_rlm.py deleted file mode 100644 index c99a17ad840a..000000000000 --- a/rllib/algorithms/ppo/tf/ppo_tf_policy_rlm.py +++ /dev/null @@ -1,185 +0,0 @@ -import logging -from typing import Dict, List, Union - -from ray.rllib.algorithms.ppo.ppo_tf_policy import validate_config -from ray.rllib.evaluation.postprocessing import ( - Postprocessing, - compute_gae_for_sample_batch, -) -from ray.rllib.models.modelv2 import ModelV2 -from ray.rllib.policy.sample_batch import SampleBatch -from ray.rllib.policy.tf_mixins import ( - EntropyCoeffSchedule, - KLCoeffMixin, - LearningRateSchedule, -) -from ray.rllib.policy.eager_tf_policy_v2 import EagerTFPolicyV2 -from ray.rllib.utils.annotations import override -from ray.rllib.utils.framework import try_import_tf -from ray.rllib.utils.nested_dict import NestedDict -from ray.rllib.utils.tf_utils import ( - explained_variance, - warn_if_infinite_kl_divergence, -) - -from ray.rllib.utils.typing import TensorType - -tf1, tf, tfv = try_import_tf() - -logger = logging.getLogger(__name__) - - -class PPOTfPolicyWithRLModule( - LearningRateSchedule, - EntropyCoeffSchedule, - KLCoeffMixin, - EagerTFPolicyV2, -): - """PyTorch policy class used with PPO. - - This class is copied from PPOTFPolicy and is modified to support RLModules. - Some subtle differences: - - if config._enable_rl_module api is true make_rl_module should be implemented by - the policy the policy is assumed to be compatible with rl_modules (i.e. self.model - would be an RLModule) - - Tower stats no longer belongs to the model (i.e. RLModule) instead it belongs to - the policy itself. - - Connectors should be enabled to use this policy - - So far it only works for vectorized obs and action spaces (Fully connected neural - networks). we need model catalog to work for other obs and action spaces. - - # TODO: In the future we will deprecate doing all phases of training, exploration, - # and inference via one policy abstraction. Instead, we will use separate - # abstractions for each phase. For training (i.e. gradient updates, given the - # sample that have been collected) we will use Learner which will own one or - # possibly many RLModules, and RLOptimizer. For exploration, we will use RLSampler - # which will own RLModule, and RLTrajectoryProcessor. The exploration and inference - # phase details are TBD but the whole point is to make rllib extremely modular. - """ - - def __init__(self, observation_space, action_space, config): - # TODO: Move into Policy API, if needed at all here. Why not move this into - # `PPOConfig`?. - self.framework = "tf2" - EagerTFPolicyV2.enable_eager_execution_if_necessary() - validate_config(config) - # Initialize MixIns. - LearningRateSchedule.__init__(self, config["lr"], config["lr_schedule"]) - EntropyCoeffSchedule.__init__( - self, config["entropy_coeff"], config["entropy_coeff_schedule"] - ) - KLCoeffMixin.__init__(self, config) - EagerTFPolicyV2.__init__(self, observation_space, action_space, config) - - self.maybe_initialize_optimizer_and_loss() - - @override(EagerTFPolicyV2) - def loss( - self, - model: Union[ModelV2, "tf.keras.Model"], - dist_class, - train_batch: SampleBatch, - ) -> Union[TensorType, List[TensorType]]: - - if not isinstance(train_batch, NestedDict): - train_batch = NestedDict(train_batch) - fwd_out = model.forward_train(train_batch) - curr_action_dist = fwd_out[SampleBatch.ACTION_DIST] - - action_dist_class = type(fwd_out[SampleBatch.ACTION_DIST]) - prev_action_dist = action_dist_class.from_logits( - train_batch[SampleBatch.ACTION_DIST_INPUTS] - ) - - logp_ratio = tf.exp( - fwd_out[SampleBatch.ACTION_LOGP] - train_batch[SampleBatch.ACTION_LOGP] - ) - - # Only calculate kl loss if necessary (kl-coeff > 0.0). - if self.config["kl_coeff"] > 0.0: - action_kl = prev_action_dist.kl(curr_action_dist) - mean_kl_loss = tf.reduce_mean(action_kl) - warn_if_infinite_kl_divergence(self, mean_kl_loss) - else: - mean_kl_loss = tf.constant(0.0) - - curr_entropy = fwd_out["entropy"] - mean_entropy = tf.reduce_mean(curr_entropy) - - surrogate_loss = tf.minimum( - train_batch[Postprocessing.ADVANTAGES] * logp_ratio, - train_batch[Postprocessing.ADVANTAGES] - * tf.clip_by_value( - logp_ratio, - 1 - self.config["clip_param"], - 1 + self.config["clip_param"], - ), - ) - - # Compute a value function loss. - if self.config["use_critic"]: - value_fn_out = fwd_out[SampleBatch.VF_PREDS] - vf_loss = tf.math.square( - value_fn_out - train_batch[Postprocessing.VALUE_TARGETS] - ) - vf_loss_clipped = tf.clip_by_value( - vf_loss, - 0, - self.config["vf_clip_param"], - ) - mean_vf_loss = tf.reduce_mean(vf_loss_clipped) - mean_vf_unclipped_loss = tf.reduce_mean(vf_loss) - # Ignore the value function. - else: - mean_vf_unclipped_loss = tf.constant(0.0) - value_fn_out = vf_loss_clipped = mean_vf_loss = tf.constant(0.0) - - total_loss = tf.reduce_mean( - -surrogate_loss - + self.config["vf_loss_coeff"] * vf_loss_clipped - - self.entropy_coeff * curr_entropy - ) - # Add mean_kl_loss (already processed through `reduce_mean_valid`), - # if necessary. - if self.config["kl_coeff"] > 0.0: - total_loss += self.kl_coeff * mean_kl_loss - - # Store stats in policy for stats_fn. - self._total_loss = total_loss - self._mean_policy_loss = tf.reduce_mean(-surrogate_loss) - self._mean_vf_loss = mean_vf_loss - self._unclipped_mean_vf_loss = mean_vf_unclipped_loss - self._mean_entropy = mean_entropy - # Backward compatibility: Deprecate self._mean_kl. - self._mean_kl_loss = self._mean_kl = mean_kl_loss - self._value_fn_out = value_fn_out - self._value_mean = tf.reduce_mean(value_fn_out) - - return total_loss - - @override(EagerTFPolicyV2) - def stats_fn(self, train_batch: SampleBatch) -> Dict[str, TensorType]: - return { - "cur_kl_coeff": tf.cast(self.kl_coeff, tf.float64), - "cur_lr": tf.cast(self.cur_lr, tf.float64), - "total_loss": self._total_loss, - "policy_loss": self._mean_policy_loss, - "vf_loss": self._mean_vf_loss, - "unclipped_vf_loss": self._unclipped_mean_vf_loss, - "vf_explained_var": explained_variance( - train_batch[Postprocessing.VALUE_TARGETS], self._value_fn_out - ), - "kl": self._mean_kl_loss, - "entropy": self._mean_entropy, - "entropy_coeff": tf.cast(self.entropy_coeff, tf.float64), - "value_mean": tf.cast(self._value_mean, tf.float64), - } - - @override(EagerTFPolicyV2) - def postprocess_trajectory( - self, sample_batch, other_agent_batches=None, episode=None - ): - sample_batch = super().postprocess_trajectory(sample_batch) - return compute_gae_for_sample_batch( - self, sample_batch, other_agent_batches, episode - ) diff --git a/rllib/algorithms/ppo/torch/ppo_torch_learner.py b/rllib/algorithms/ppo/torch/ppo_torch_learner.py index 675539f50f5c..aa32a956d978 100644 --- a/rllib/algorithms/ppo/torch/ppo_torch_learner.py +++ b/rllib/algorithms/ppo/torch/ppo_torch_learner.py @@ -3,7 +3,6 @@ from ray.rllib.algorithms.ppo.ppo_learner import ( LEARNER_RESULTS_KL_KEY, - LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY, LEARNER_RESULTS_CURR_KL_COEFF_KEY, LEARNER_RESULTS_VF_EXPLAINED_VAR_KEY, LEARNER_RESULTS_VF_LOSS_UNCLIPPED_KEY, @@ -93,7 +92,7 @@ def compute_loss_per_module( total_loss = torch.mean( -surrogate_loss + self.hps.vf_loss_coeff * vf_loss_clipped - - self.curr_entropy_coeffs_per_module[module_id] * curr_entropy + - self.entropy_coeff_scheduler.get_current_value(module_id) * curr_entropy ) # Add mean_kl_loss (already processed through `reduce_mean_valid`), @@ -135,11 +134,4 @@ def additional_update_per_module( curr_var.data *= 0.5 results.update({LEARNER_RESULTS_CURR_KL_COEFF_KEY: curr_var.item()}) - # Update entropy coefficient. - value = self.hps.entropy_coeff - if self.hps.entropy_coeff_schedule is not None: - value = self.entropy_coeff_schedule_per_module[module_id].value(t=timestep) - self.curr_entropy_coeffs_per_module[module_id].data = torch.tensor(value) - results.update({LEARNER_RESULTS_CURR_ENTROPY_COEFF_KEY: value}) - return results diff --git a/rllib/algorithms/ppo/torch/ppo_torch_policy_rlm.py b/rllib/algorithms/ppo/torch/ppo_torch_policy_rlm.py deleted file mode 100644 index 04ac92fc2ba5..000000000000 --- a/rllib/algorithms/ppo/torch/ppo_torch_policy_rlm.py +++ /dev/null @@ -1,246 +0,0 @@ -import logging -from typing import Dict, List, Type, Union - -from ray.rllib.algorithms.ppo.ppo_tf_policy import validate_config -from ray.rllib.evaluation.postprocessing import ( - Postprocessing, - compute_gae_for_sample_batch, -) -from ray.rllib.models.action_dist import ActionDistribution -from ray.rllib.models.modelv2 import ModelV2 -from ray.rllib.policy.sample_batch import SampleBatch -from ray.rllib.policy.torch_mixins import ( - EntropyCoeffSchedule, - KLCoeffMixin, - LearningRateSchedule, -) -from ray.rllib.policy.torch_policy_v2 import TorchPolicyV2 -from ray.rllib.utils.annotations import override -from ray.rllib.utils.framework import try_import_torch -from ray.rllib.utils.numpy import convert_to_numpy -from ray.rllib.utils.torch_utils import ( - apply_grad_clipping, - explained_variance, - sequence_mask, - warn_if_infinite_kl_divergence, -) -from ray.rllib.utils.typing import TensorType - -torch, nn = try_import_torch() - -logger = logging.getLogger(__name__) - - -class PPOTorchPolicyWithRLModule( - LearningRateSchedule, - EntropyCoeffSchedule, - KLCoeffMixin, - TorchPolicyV2, -): - """PyTorch policy class used with PPO. - - This class is copied from PPOTorchPolicyV2 and is modified to support RLModules. - Some subtle differences: - - if config._enable_rl_module api is true make_rl_module should be implemented by - the policy the policy is assumed to be compatible with rl_modules (i.e. self.model - would be an RLModule) - - Tower stats no longer belongs to the model (i.e. RLModule) instead it belongs to - the policy itself. - - Connectors should be enabled to use this policy - - So far it only works for vectorized obs and action spaces (Fully connected neural - networks). we need model catalog to work for other obs and action spaces. - - # TODO: In the future we will deprecate doing all phases of training, exploration, - # and inference via one policy abstraction. Instead, we will use separate - # abstractions for each phase. For training (i.e. gradient updates, given the - # sample that have been collected) we will use Learner which will own one or - # possibly many RLModules, and RLOptimizer. For exploration, we will use RLSampler - # which will own RLModule, and RLTrajectoryProcessor. The exploration and inference - # phase details are TBD but the whole point is to make rllib extremely modular. - """ - - def __init__(self, observation_space, action_space, config): - # TODO: Move into Policy API, if needed at all here. Why not move this into - # `PPOConfig`?. - validate_config(config) - - TorchPolicyV2.__init__( - self, - observation_space, - action_space, - config, - max_seq_len=config["model"]["max_seq_len"], - ) - - LearningRateSchedule.__init__(self, config["lr"], config["lr_schedule"]) - EntropyCoeffSchedule.__init__( - self, config["entropy_coeff"], config["entropy_coeff_schedule"] - ) - KLCoeffMixin.__init__(self, config) - - # TODO: Don't require users to call this manually. - self._initialize_loss_from_dummy_batch() - - @override(TorchPolicyV2) - def loss( - self, - model: ModelV2, - dist_class: Type[ActionDistribution], - train_batch: SampleBatch, - ) -> Union[TensorType, List[TensorType]]: - """Compute loss for Proximal Policy Objective. - - Args: - model: The Model to calculate the loss for. - dist_class: The action distr. class. - train_batch: The training data. - - Returns: - The PPO loss tensor given the input batch. - """ - - fwd_out = model.forward_train(train_batch) - curr_action_dist = fwd_out[SampleBatch.ACTION_DIST] - state = fwd_out.get("state_out", {}) - - # TODO (Kourosh): come back to RNNs later - # RNN case: Mask away 0-padded chunks at end of time axis. - if state: - B = len(train_batch[SampleBatch.SEQ_LENS]) - max_seq_len = train_batch[SampleBatch.OBS].shape[0] // B - mask = sequence_mask( - train_batch[SampleBatch.SEQ_LENS], - max_seq_len, - time_major=self.config["model"]["_time_major"], - ) - mask = torch.reshape(mask, [-1]) - num_valid = torch.sum(mask) - - def reduce_mean_valid(t): - return torch.sum(t[mask]) / num_valid - - # non-RNN case: No masking. - else: - mask = None - reduce_mean_valid = torch.mean - - action_dist_class = type(fwd_out[SampleBatch.ACTION_DIST]) - prev_action_dist = action_dist_class.from_logits( - train_batch[SampleBatch.ACTION_DIST_INPUTS] - ) - - logp_ratio = torch.exp( - fwd_out[SampleBatch.ACTION_LOGP] - train_batch[SampleBatch.ACTION_LOGP] - ) - - # Only calculate kl loss if necessary (kl-coeff > 0.0). - if self.config["kl_coeff"] > 0.0: - action_kl = prev_action_dist.kl(curr_action_dist) - mean_kl_loss = reduce_mean_valid(action_kl) - # TODO smorad: should we do anything besides warn? Could discard KL term - # for this update - warn_if_infinite_kl_divergence(self, mean_kl_loss) - else: - mean_kl_loss = torch.tensor(0.0, device=logp_ratio.device) - - curr_entropy = fwd_out["entropy"] - mean_entropy = reduce_mean_valid(curr_entropy) - - surrogate_loss = torch.min( - train_batch[Postprocessing.ADVANTAGES] * logp_ratio, - train_batch[Postprocessing.ADVANTAGES] - * torch.clamp( - logp_ratio, 1 - self.config["clip_param"], 1 + self.config["clip_param"] - ), - ) - - # Compute a value function loss. - if self.config["use_critic"]: - value_fn_out = fwd_out[SampleBatch.VF_PREDS] - vf_loss = torch.pow( - value_fn_out - train_batch[Postprocessing.VALUE_TARGETS], 2.0 - ) - vf_loss_clipped = torch.clamp(vf_loss, 0, self.config["vf_clip_param"]) - mean_vf_loss = reduce_mean_valid(vf_loss_clipped) - mean_vf_unclipped_loss = reduce_mean_valid(vf_loss) - # Ignore the value function. - else: - value_fn_out = torch.tensor(0.0).to(surrogate_loss.device) - mean_vf_unclipped_loss = vf_loss_clipped = mean_vf_loss = torch.tensor( - 0.0 - ).to(surrogate_loss.device) - - total_loss = reduce_mean_valid( - -surrogate_loss - + self.config["vf_loss_coeff"] * vf_loss_clipped - - self.entropy_coeff * curr_entropy - ) - - # Add mean_kl_loss (already processed through `reduce_mean_valid`), - # if necessary. - if self.config["kl_coeff"] > 0.0: - total_loss += self.kl_coeff * mean_kl_loss - - # TODO (Kourosh) Where would tower_stats go? How should stats_fn be implemented - # here? - # Store values for stats function in model (tower), such that for - # multi-GPU, we do not override them during the parallel loss phase. - self.tower_stats[model]["total_loss"] = total_loss - self.tower_stats[model]["mean_policy_loss"] = reduce_mean_valid(-surrogate_loss) - self.tower_stats[model]["mean_vf_loss"] = mean_vf_loss - self.tower_stats[model]["unclipped_vf_loss"] = mean_vf_unclipped_loss - self.tower_stats[model]["vf_explained_var"] = explained_variance( - train_batch[Postprocessing.VALUE_TARGETS], value_fn_out - ) - self.tower_stats[model]["mean_entropy"] = mean_entropy - self.tower_stats[model]["mean_kl_loss"] = mean_kl_loss - - return total_loss - - # TODO: Make this an event-style subscription (e.g.: - # "after_gradients_computed"). - @override(TorchPolicyV2) - def extra_grad_process(self, local_optimizer, loss): - return apply_grad_clipping(self, local_optimizer, loss) - - @override(TorchPolicyV2) - def stats_fn(self, train_batch: SampleBatch) -> Dict[str, TensorType]: - return convert_to_numpy( - { - "cur_kl_coeff": self.kl_coeff, - "cur_lr": self.cur_lr, - "total_loss": torch.mean( - torch.stack(self.get_tower_stats("total_loss")) - ), - "policy_loss": torch.mean( - torch.stack(self.get_tower_stats("mean_policy_loss")) - ), - "vf_loss": torch.mean( - torch.stack(self.get_tower_stats("mean_vf_loss")) - ), - "vf_explained_var": torch.mean( - torch.stack(self.get_tower_stats("vf_explained_var")) - ), - "kl": torch.mean(torch.stack(self.get_tower_stats("mean_kl_loss"))), - "entropy": torch.mean( - torch.stack(self.get_tower_stats("mean_entropy")) - ), - "entropy_coeff": self.entropy_coeff, - "unclipped_vf_loss": torch.mean( - torch.stack(self.get_tower_stats("unclipped_vf_loss")) - ), - } - ) - - @override(TorchPolicyV2) - def postprocess_trajectory( - self, sample_batch, other_agent_batches=None, episode=None - ): - # Do all post-processing always with no_grad(). - # Not using this here will introduce a memory leak - # in torch (issue #6962). - # TODO: no_grad still necessary? - with torch.no_grad(): - return compute_gae_for_sample_batch( - self, sample_batch, other_agent_batches, episode - ) diff --git a/rllib/algorithms/tests/test_algorithm_config.py b/rllib/algorithms/tests/test_algorithm_config.py index cc949c45f0e5..7b6804ea92cc 100644 --- a/rllib/algorithms/tests/test_algorithm_config.py +++ b/rllib/algorithms/tests/test_algorithm_config.py @@ -179,6 +179,7 @@ def test_rl_module_api(self): .framework("torch") .rollouts(enable_connectors=True) .rl_module(_enable_rl_module_api=True) + .training(_enable_learner_api=True) ) config.validate() @@ -328,7 +329,11 @@ def get_default_rl_module_spec(self): ######################################## # This is the simplest case where we have to construct the marl module based on # the default specs only. - config = SingleAgentAlgoConfig().rl_module(_enable_rl_module_api=True) + config = ( + SingleAgentAlgoConfig() + .rl_module(_enable_rl_module_api=True) + .training(_enable_learner_api=True) + ) config.validate() spec, expected = self._get_expected_marl_spec(config, DiscreteBCTorchModule) @@ -343,14 +348,18 @@ def get_default_rl_module_spec(self): ######################################## # This is the case where we pass in a multi-agent RLModuleSpec that asks the # algorithm to assign a specific type of RLModule class to certain module_ids. - config = SingleAgentAlgoConfig().rl_module( - _enable_rl_module_api=True, - rl_module_spec=MultiAgentRLModuleSpec( - module_specs={ - "p1": SingleAgentRLModuleSpec(module_class=CustomRLModule1), - "p2": SingleAgentRLModuleSpec(module_class=CustomRLModule1), - } - ), + config = ( + SingleAgentAlgoConfig() + .rl_module( + _enable_rl_module_api=True, + rl_module_spec=MultiAgentRLModuleSpec( + module_specs={ + "p1": SingleAgentRLModuleSpec(module_class=CustomRLModule1), + "p2": SingleAgentRLModuleSpec(module_class=CustomRLModule1), + }, + ), + ) + .training(_enable_learner_api=True) ) config.validate() @@ -360,9 +369,13 @@ def get_default_rl_module_spec(self): ######################################## # This is the case where we ask the algorithm to assign a specific type of # RLModule class to ALL module_ids. - config = SingleAgentAlgoConfig().rl_module( - _enable_rl_module_api=True, - rl_module_spec=SingleAgentRLModuleSpec(module_class=CustomRLModule1), + config = ( + SingleAgentAlgoConfig() + .rl_module( + _enable_rl_module_api=True, + rl_module_spec=SingleAgentRLModuleSpec(module_class=CustomRLModule1), + ) + .training(_enable_learner_api=True) ) config.validate() @@ -377,11 +390,15 @@ def get_default_rl_module_spec(self): ######################################## # This is an alternative way to ask the algorithm to assign a specific type of # RLModule class to ALL module_ids. - config = SingleAgentAlgoConfig().rl_module( - _enable_rl_module_api=True, - rl_module_spec=MultiAgentRLModuleSpec( - module_specs=SingleAgentRLModuleSpec(module_class=CustomRLModule1) - ), + config = ( + SingleAgentAlgoConfig() + .rl_module( + _enable_rl_module_api=True, + rl_module_spec=MultiAgentRLModuleSpec( + module_specs=SingleAgentRLModuleSpec(module_class=CustomRLModule1) + ), + ) + .training(_enable_learner_api=True) ) config.validate() @@ -398,15 +415,19 @@ def get_default_rl_module_spec(self): # This is not only assigning a specific type of RLModule class to EACH # module_id, but also defining a new custom MultiAgentRLModule class to be used # in the multi-agent scenario. - config = SingleAgentAlgoConfig().rl_module( - _enable_rl_module_api=True, - rl_module_spec=MultiAgentRLModuleSpec( - marl_module_class=CustomMARLModule1, - module_specs={ - "p1": SingleAgentRLModuleSpec(module_class=CustomRLModule1), - "p2": SingleAgentRLModuleSpec(module_class=CustomRLModule1), - }, - ), + config = ( + SingleAgentAlgoConfig() + .rl_module( + _enable_rl_module_api=True, + rl_module_spec=MultiAgentRLModuleSpec( + marl_module_class=CustomMARLModule1, + module_specs={ + "p1": SingleAgentRLModuleSpec(module_class=CustomRLModule1), + "p2": SingleAgentRLModuleSpec(module_class=CustomRLModule1), + }, + ), + ) + .training(_enable_learner_api=True) ) config.validate() @@ -435,8 +456,10 @@ def get_default_rl_module_spec(self): # This is the case where we ask the algorithm to use its default # MultiAgentRLModuleSpec, but the MultiAgentRLModuleSpec has not defined its # SingleAgentRLmoduleSpecs. - config = MultiAgentAlgoConfigWithNoSingleAgentSpec().rl_module( - _enable_rl_module_api=True + config = ( + MultiAgentAlgoConfigWithNoSingleAgentSpec() + .rl_module(_enable_rl_module_api=True) + .training(_enable_learner_api=True) ) self.assertRaisesRegex( @@ -449,7 +472,11 @@ def get_default_rl_module_spec(self): # This is the case where we ask the algorithm to use its default # MultiAgentRLModuleSpec, and the MultiAgentRLModuleSpec has defined its # SingleAgentRLmoduleSpecs. - config = MultiAgentAlgoConfig().rl_module(_enable_rl_module_api=True) + config = ( + MultiAgentAlgoConfig() + .rl_module(_enable_rl_module_api=True) + .training(_enable_learner_api=True) + ) config.validate() spec, expected = self._get_expected_marl_spec( diff --git a/rllib/core/learner/learner.py b/rllib/core/learner/learner.py index 688d1574d034..ef2e3fc01217 100644 --- a/rllib/core/learner/learner.py +++ b/rllib/core/learner/learner.py @@ -47,7 +47,7 @@ OverrideToImplementCustomLogic, OverrideToImplementCustomLogic_CallToSuperRecommended, ) -from ray.rllib.utils.schedules.piecewise_schedule import PiecewiseSchedule +from ray.rllib.utils.schedules.scheduler import Scheduler torch, _ = try_import_torch() tf1, tf, tfv = try_import_tf() @@ -245,6 +245,7 @@ def __init__( self._module_obj = module self._optimizer_config = optimizer_config self._hps = learner_hyperparameters or LearnerHyperparameters() + self._device = None # pick the configs that we need for the learner from scaling config self._learner_group_scaling_config = ( @@ -622,24 +623,15 @@ def build(self) -> None: return self._is_built = True - # Generic LR scheduling tools. - self.lr_scheduler = None - if self.hps.lr_schedule is not None: - # Custom schedule, based on list of - # ([ts], [value to be reached by ts])-tuples. - self.lr_schedule_per_module = defaultdict( - lambda: PiecewiseSchedule( - self.hps.lr_schedule, - outside_value=self.hps.lr_schedule[-1][-1], - framework=None, - ) - ) - self.curr_lr_per_module = defaultdict( - lambda: self._get_tensor_variable(self._optimizer_config["lr"]) - ) - # If no schedule, pin learning rate to its given (fixed) value. - else: - self.curr_lr_per_module = defaultdict(lambda: self._optimizer_config["lr"]) + # Build learning rate scheduling tools. + # TODO (sven): Move lr from optimizer config to Learner HPs? + # We might not need optimizer config. + self.lr_scheduler = Scheduler( + fixed_value=self._optimizer_config["lr"], + schedule=self.hps.lr_schedule, + framework=self.framework, + device=self._device, + ) self._module = self._make_module() for param_seq, optimizer in self.configure_optimizers(): @@ -773,7 +765,7 @@ def additional_update_per_module(self, module_id: ModuleID, tau: float): return results_all_modules - @OverrideToImplementCustomLogic + @OverrideToImplementCustomLogic_CallToSuperRecommended def additional_update_per_module( self, module_id: ModuleID, **kwargs ) -> Dict[str, Any]: diff --git a/rllib/core/learner/tf/tf_learner.py b/rllib/core/learner/tf/tf_learner.py index 45f98d4f7520..bfe03311cadc 100644 --- a/rllib/core/learner/tf/tf_learner.py +++ b/rllib/core/learner/tf/tf_learner.py @@ -88,9 +88,7 @@ def configure_optimizer_per_module( self, module_id: ModuleID ) -> Union[ParamOptimizerPair, NamedParamOptimizerPairs]: module = self._module[module_id] - # TODO (sven): Move lr from optimizer config to Learner HPs? - # We might not need optimizer config. - lr = self.curr_lr_per_module[module_id] + lr = self.lr_scheduler.get_current_value(module_id) optim = tf.keras.optimizers.Adam(learning_rate=lr) pair: ParamOptimizerPair = ( self.get_parameters(module), @@ -528,17 +526,23 @@ def filter_fwd_out(x): def additional_update_per_module( self, module_id: ModuleID, *, timestep: int, **kwargs ) -> Mapping[str, Any]: + + results = super().additional_update_per_module(module_id, timestep=timestep) + # Handle lr scheduling updates and apply new learning rates to the optimizers. + new_lr = self.lr_scheduler.update(module_id=module_id, timestep=timestep) + + # Not sure why we need to do this here besides setting the original + # tf Variable `self.curr_lr_per_module[module_id]`. But when tf creates the + # optimizer, it seems to detach its lr value from the given variable. + # Updating this variable is NOT sufficient to update the actual optimizer's + # learning rate, so we have to explicitly set it here. if self.hps.lr_schedule is not None: - value = self.lr_schedule_per_module[module_id].value(t=timestep) - self.curr_lr_per_module[module_id].assign(value) - # Not sure why we need to do this here besides setting the original - # tf Variable `self.curr_lr_per_module[module_id]`. When tf creates the - # optimizer, maybe it detaches its lr value from the given variable? - self._named_optimizers[module_id].lr = value - return { - LEARNER_RESULTS_CURR_LR_KEY: self._named_optimizers[module_id].lr.numpy() - } + self._named_optimizers[module_id].lr = new_lr + + results.update({LEARNER_RESULTS_CURR_LR_KEY: new_lr}) + + return results @override(Learner) def _get_tensor_variable(self, value, dtype=None, trainable=False) -> "tf.Tensor": diff --git a/rllib/core/learner/torch/torch_learner.py b/rllib/core/learner/torch/torch_learner.py index 43eb0ac9910b..29c4a2e4fc45 100644 --- a/rllib/core/learner/torch/torch_learner.py +++ b/rllib/core/learner/torch/torch_learner.py @@ -74,9 +74,7 @@ def configure_optimizer_per_module( self, module_id: ModuleID ) -> Union[ParamOptimizerPair, NamedParamOptimizerPairs]: module = self._module[module_id] - # TODO (sven): Move lr from optimizer config to Learner HPs? - # We might not need optimizer config. - lr = self.curr_lr_per_module[module_id] + lr = self.lr_scheduler.get_current_value(module_id) pair: ParamOptimizerPair = ( self.get_parameters(module), torch.optim.Adam(self.get_parameters(module), lr=lr), @@ -100,12 +98,13 @@ def compute_gradients( def additional_update_per_module( self, module_id: ModuleID, *, timestep: int, **kwargs ) -> Mapping[str, Any]: + results = super().additional_update_per_module(module_id, timestep=timestep) + # Handle lr scheduling updates and apply new learning rates to the optimizers. - value = self._optimizer_config["lr"] - if self.hps.lr_schedule is not None: - value = self.lr_schedule_per_module[module_id].value(t=timestep) - self.curr_lr_per_module[module_id].data = torch.tensor(value) - return {LEARNER_RESULTS_CURR_LR_KEY: value} + new_lr = self.lr_scheduler.update(module_id=module_id, timestep=timestep) + results.update({LEARNER_RESULTS_CURR_LR_KEY: new_lr}) + + return results @override(Learner) def postprocess_gradients( diff --git a/rllib/core/models/tests/test_catalog.py b/rllib/core/models/tests/test_catalog.py index bb3bb52c7110..9d17766ee8c4 100644 --- a/rllib/core/models/tests/test_catalog.py +++ b/rllib/core/models/tests/test_catalog.py @@ -385,6 +385,7 @@ def build_vf_head(self, framework): _enable_rl_module_api=True, rl_module_spec=SingleAgentRLModuleSpec(catalog_class=MyCatalog), ) + .training(_enable_learner_api=True) .framework("torch") ) diff --git a/rllib/evaluation/tests/test_trajectory_view_api.py b/rllib/evaluation/tests/test_trajectory_view_api.py index 0b81dc45bd3e..6bcf6225444a 100644 --- a/rllib/evaluation/tests/test_trajectory_view_api.py +++ b/rllib/evaluation/tests/test_trajectory_view_api.py @@ -7,9 +7,6 @@ from ray.rllib.algorithms.callbacks import DefaultCallbacks import ray.rllib.algorithms.dqn as dqn import ray.rllib.algorithms.ppo as ppo -from ray.rllib.algorithms.ppo.torch.ppo_torch_policy_rlm import ( - PPOTorchPolicyWithRLModule, -) from ray.rllib.examples.env.debug_counter_env import MultiAgentDebugCounterEnv from ray.rllib.examples.env.multi_agent import MultiAgentPendulum from ray.rllib.evaluation.rollout_worker import RolloutWorker @@ -236,12 +233,9 @@ def test_traj_view_next_action(self): .rollouts(rollout_fragment_length=200, num_rollout_workers=0) ) config.validate() - enable_rl_module_api = config._enable_rl_module_api rollout_worker_w_api = RolloutWorker( env_creator=lambda _: gym.make("CartPole-v1"), - default_policy_class=PPOTorchPolicyWithRLModule - if enable_rl_module_api - else ppo.PPOTorchPolicy, + default_policy_class=ppo.PPOTorchPolicy, config=config, ) # Add the next action (a') and 2nd next action (a'') to the view diff --git a/rllib/examples/self_play_with_open_spiel.py b/rllib/examples/self_play_with_open_spiel.py index 3c5360de15de..f611cac7d155 100644 --- a/rllib/examples/self_play_with_open_spiel.py +++ b/rllib/examples/self_play_with_open_spiel.py @@ -274,6 +274,7 @@ def policy_mapping_fn(agent_id, episode, worker, **kwargs): # Train the "main" policy to play really well using self-play. results = None if not args.from_checkpoint: + create_checkpoints = not bool(os.environ.get("RLLIB_ENABLE_RL_MODULE", False)) results = tune.Tuner( "PPO", param_space=config, @@ -294,8 +295,8 @@ def policy_mapping_fn(agent_id, episode, worker, **kwargs): sort_by_metric=True, ), checkpoint_config=air.CheckpointConfig( - checkpoint_at_end=True, - checkpoint_frequency=10, + checkpoint_at_end=create_checkpoints, + checkpoint_frequency=10 if create_checkpoints else 0, ), ), ).fit() diff --git a/rllib/models/tests/test_preprocessors.py b/rllib/models/tests/test_preprocessors.py index d26ba8b028ba..409326063458 100644 --- a/rllib/models/tests/test_preprocessors.py +++ b/rllib/models/tests/test_preprocessors.py @@ -39,6 +39,7 @@ def tearDownClass(cls) -> None: def test_rlms_and_preprocessing(self): config = ( ppo.PPOConfig() + .framework("tf2") .environment( env="ray.rllib.examples.env.random_env.RandomEnv", env_config={ @@ -48,18 +49,18 @@ def test_rlms_and_preprocessing(self): }, ) # Run this very quickly locally. - .rollouts(rollout_fragment_length=10) - .rollouts(num_rollout_workers=0) - .training(train_batch_size=10, sgd_minibatch_size=1, num_sgd_iter=1) + .rollouts(num_rollout_workers=0, rollout_fragment_length=10) + .training( + train_batch_size=10, + sgd_minibatch_size=1, + num_sgd_iter=1, + _enable_learner_api=True, + ) + .rl_module(_enable_rl_module_api=True) # Set this to True to enforce no preprocessors being used. .experimental(_disable_preprocessor_api=True) - .framework("tf2") ) - # TODO (Artur): No need to manually enable RLModules here since we have not - # fully migrated. Clear this up after migration. - config.rl_module(_enable_rl_module_api=True) - for _ in framework_iterator(config, frameworks=("torch", "tf2")): algo = config.build() results = algo.train() diff --git a/rllib/policy/eager_tf_policy_v2.py b/rllib/policy/eager_tf_policy_v2.py index 7e1e543b08ab..161a4bea0a22 100644 --- a/rllib/policy/eager_tf_policy_v2.py +++ b/rllib/policy/eager_tf_policy_v2.py @@ -432,16 +432,17 @@ def _init_view_requirements(self): self.view_requirements[SampleBatch.INFOS].used_for_training = False def maybe_initialize_optimizer_and_loss(self): - optimizers = force_list(self.optimizer()) - if self.exploration: - # Policies with RLModules don't have an exploration object. - optimizers = self.exploration.get_exploration_optimizer(optimizers) - - # The list of local (tf) optimizers (one per loss term). - self._optimizers: List[LocalOptimizer] = optimizers - # Backward compatibility: A user's policy may only support a single - # loss term and optimizer (no lists). - self._optimizer: LocalOptimizer = optimizers[0] if optimizers else None + if not self.config.get("_enable_learner_api", False): + optimizers = force_list(self.optimizer()) + if self.exploration: + # Policies with RLModules don't have an exploration object. + optimizers = self.exploration.get_exploration_optimizer(optimizers) + + # The list of local (tf) optimizers (one per loss term). + self._optimizers: List[LocalOptimizer] = optimizers + # Backward compatibility: A user's policy may only support a single + # loss term and optimizer (no lists). + self._optimizer: LocalOptimizer = optimizers[0] if optimizers else None self._initialize_loss_from_dummy_batch( auto_remove_unneeded_view_reqs=True, diff --git a/rllib/policy/policy.py b/rllib/policy/policy.py index bb534497e3d2..c1ca60b83904 100644 --- a/rllib/policy/policy.py +++ b/rllib/policy/policy.py @@ -1470,35 +1470,39 @@ def _initialize_loss_from_dummy_batch( seq_len = sample_batch_size // B seq_lens = np.array([seq_len for _ in range(B)], dtype=np.int32) postprocessed_batch[SampleBatch.SEQ_LENS] = seq_lens - # Switch on lazy to-tensor conversion on `postprocessed_batch`. - train_batch = self._lazy_tensor_dict(postprocessed_batch) - # Calling loss, so set `is_training` to True. - train_batch.set_training(True) - if seq_lens is not None: - train_batch[SampleBatch.SEQ_LENS] = seq_lens - train_batch.count = self._dummy_batch.count - # Call the loss function, if it exists. - # TODO(jungong) : clean up after all agents get migrated. - # We should simply do self.loss(...) here. - if self._loss is not None: - self._loss(self, self.model, self.dist_class, train_batch) - elif ( - is_overridden(self.loss) or self.config.get("_enable_rl_module_api", False) - ) and not self.config["in_evaluation"]: - self.loss(self.model, self.dist_class, train_batch) - # Call the stats fn, if given. - # TODO(jungong) : clean up after all agents get migrated. - # We should simply do self.stats_fn(train_batch) here. - if stats_fn is not None: - stats_fn(self, train_batch) - if hasattr(self, "stats_fn") and not self.config["in_evaluation"]: - self.stats_fn(train_batch) + + if not self.config.get("_enable_learner_api"): + # Switch on lazy to-tensor conversion on `postprocessed_batch`. + train_batch = self._lazy_tensor_dict(postprocessed_batch) + # Calling loss, so set `is_training` to True. + train_batch.set_training(True) + if seq_lens is not None: + train_batch[SampleBatch.SEQ_LENS] = seq_lens + train_batch.count = self._dummy_batch.count + + # Call the loss function, if it exists. + # TODO(jungong) : clean up after all agents get migrated. + # We should simply do self.loss(...) here. + if self._loss is not None: + self._loss(self, self.model, self.dist_class, train_batch) + elif is_overridden(self.loss) and not self.config["in_evaluation"]: + self.loss(self.model, self.dist_class, train_batch) + # Call the stats fn, if given. + # TODO(jungong) : clean up after all agents get migrated. + # We should simply do self.stats_fn(train_batch) here. + if stats_fn is not None: + stats_fn(self, train_batch) + if hasattr(self, "stats_fn") and not self.config["in_evaluation"]: + self.stats_fn(train_batch) # Re-enable tracing. self._no_tracing = False # Add new columns automatically to view-reqs. - if auto_remove_unneeded_view_reqs: + if ( + not self.config.get("_enable_learner_api") + and auto_remove_unneeded_view_reqs + ): # Add those needed for postprocessing and training. all_accessed_keys = ( train_batch.accessed_keys diff --git a/rllib/policy/tf_mixins.py b/rllib/policy/tf_mixins.py index 8ce18df5e979..fe5e23a330e8 100644 --- a/rllib/policy/tf_mixins.py +++ b/rllib/policy/tf_mixins.py @@ -33,7 +33,9 @@ class LearningRateSchedule: @DeveloperAPI def __init__(self, lr, lr_schedule): self._lr_schedule = None - if lr_schedule is None: + # Disable any scheduling behavior related to learning if Learner API is active. + # Schedules are handled by Learner class. + if lr_schedule is None or self.config.get("_enable_learner_api", False): self.cur_lr = tf1.get_variable("lr", initializer=lr, trainable=False) else: self._lr_schedule = PiecewiseSchedule( @@ -78,7 +80,11 @@ class EntropyCoeffSchedule: @DeveloperAPI def __init__(self, entropy_coeff, entropy_coeff_schedule): self._entropy_coeff_schedule = None - if entropy_coeff_schedule is None: + # Disable any scheduling behavior related to learning if Learner API is active. + # Schedules are handled by Learner class. + if entropy_coeff_schedule is None or ( + self.config.get("_enable_learner_api", False) + ): self.entropy_coeff = get_variable( entropy_coeff, framework="tf", tf_name="entropy_coeff", trainable=False ) @@ -208,37 +214,32 @@ class TargetNetworkMixin: """ def __init__(self): - if self.config.get("_enable_rl_module_api", False): - # In order to access the variables for rl modules, we need to - # use the underlying keras api model.trainable_variables. - model_vars = self.model.trainable_variables - target_model_vars = self.target_model.trainable_variables - else: + if not self.config.get("_enable_rl_module_api", False): model_vars = self.model.trainable_variables() target_model_vars = self.target_model.trainable_variables() - @make_tf_callable(self.get_session()) - def update_target_fn(tau): - tau = tf.convert_to_tensor(tau, dtype=tf.float32) - update_target_expr = [] - assert len(model_vars) == len(target_model_vars), ( - model_vars, - target_model_vars, - ) - for var, var_target in zip(model_vars, target_model_vars): - update_target_expr.append( - var_target.assign(tau * var + (1.0 - tau) * var_target) + @make_tf_callable(self.get_session()) + def update_target_fn(tau): + tau = tf.convert_to_tensor(tau, dtype=tf.float32) + update_target_expr = [] + assert len(model_vars) == len(target_model_vars), ( + model_vars, + target_model_vars, ) - logger.debug("Update target op {}".format(var_target)) - return tf.group(*update_target_expr) - - # Hard initial update. - self._do_update = update_target_fn - # TODO: The previous SAC implementation does an update(1.0) here. - # If this is changed to tau != 1.0 the sac_loss_function test fails. Why? - # Also the test is not very maintainable, we need to change that unittest - # anyway. - self.update_target(tau=1.0) # self.config.get("tau", 1.0)) + for var, var_target in zip(model_vars, target_model_vars): + update_target_expr.append( + var_target.assign(tau * var + (1.0 - tau) * var_target) + ) + logger.debug("Update target op {}".format(var_target)) + return tf.group(*update_target_expr) + + # Hard initial update. + self._do_update = update_target_fn + # TODO: The previous SAC implementation does an update(1.0) here. + # If this is changed to tau != 1.0 the sac_loss_function test fails. Why? + # Also the test is not very maintainable, we need to change that unittest + # anyway. + self.update_target(tau=1.0) # self.config.get("tau", 1.0)) @property def q_func_vars(self): @@ -276,7 +277,8 @@ def set_weights(self, weights): EagerTFPolicyV2.set_weights(self, weights) elif isinstance(self, EagerTFPolicy): # Handle TF2 policies. EagerTFPolicy.set_weights(self, weights) - self.update_target(self.config.get("tau", 1.0)) + if not self.config.get("_enable_rl_module_api", False): + self.update_target(self.config.get("tau", 1.0)) class ValueNetworkMixin: diff --git a/rllib/policy/torch_mixins.py b/rllib/policy/torch_mixins.py index d6c4b03a935d..b258c1d74560 100644 --- a/rllib/policy/torch_mixins.py +++ b/rllib/policy/torch_mixins.py @@ -8,8 +8,6 @@ torch, nn = try_import_torch() -# TODO: (sven) Unify hyperparam annealing procedures across RLlib (tf/torch) -# and for all possible hyperparams, not just lr. @DeveloperAPI class LearningRateSchedule: """Mixin for TorchPolicy that adds a learning rate schedule.""" @@ -17,6 +15,8 @@ class LearningRateSchedule: @DeveloperAPI def __init__(self, lr, lr_schedule): self._lr_schedule = None + # Disable any scheduling behavior related to learning if Learner API is active. + # Schedules are handled by Learner class. if lr_schedule is None: self.cur_lr = lr else: @@ -28,7 +28,7 @@ def __init__(self, lr, lr_schedule): @override(Policy) def on_global_var_update(self, global_vars): super().on_global_var_update(global_vars) - if self._lr_schedule: + if self._lr_schedule and not self.config.get("_enable_learner_api", False): self.cur_lr = self._lr_schedule.value(global_vars["timestep"]) for opt in self._optimizers: for p in opt.param_groups: @@ -42,7 +42,11 @@ class EntropyCoeffSchedule: @DeveloperAPI def __init__(self, entropy_coeff, entropy_coeff_schedule): self._entropy_coeff_schedule = None - if entropy_coeff_schedule is None: + # Disable any scheduling behavior related to learning if Learner API is active. + # Schedules are handled by Learner class. + if entropy_coeff_schedule is None or ( + self.config.get("_enable_learner_api", False) + ): self.entropy_coeff = entropy_coeff else: # Allows for custom schedule similar to lr_schedule format diff --git a/rllib/policy/torch_policy_v2.py b/rllib/policy/torch_policy_v2.py index 0d58dbc55c2b..1962a04adc60 100644 --- a/rllib/policy/torch_policy_v2.py +++ b/rllib/policy/torch_policy_v2.py @@ -182,29 +182,32 @@ def __init__( self.exploration = None else: self.exploration = self._create_exploration() - self._optimizers = force_list(self.optimizer()) - - # Backward compatibility workaround so Policy will call self.loss() directly. - # TODO(jungong): clean up after all policies are migrated to new sub-class - # implementation. - self._loss = None - - # Store, which params (by index within the model's list of - # parameters) should be updated per optimizer. - # Maps optimizer idx to set or param indices. - self.multi_gpu_param_groups: List[Set[int]] = [] - main_params = {p: i for i, p in enumerate(self.model.parameters())} - for o in self._optimizers: - param_indices = [] - for pg_idx, pg in enumerate(o.param_groups): - for p in pg["params"]: - param_indices.append(main_params[p]) - self.multi_gpu_param_groups.append(set(param_indices)) - - # Create n sample-batch buffers (num_multi_gpu_tower_stacks), each - # one with m towers (num_gpus). - num_buffers = self.config.get("num_multi_gpu_tower_stacks", 1) - self._loaded_batches = [[] for _ in range(num_buffers)] + + if not self.config.get("_enable_learner_api", False): + self._optimizers = force_list(self.optimizer()) + + # Backward compatibility workaround so Policy will call self.loss() + # directly. + # TODO (jungong): clean up after all policies are migrated to new sub-class + # implementation. + self._loss = None + + # Store, which params (by index within the model's list of + # parameters) should be updated per optimizer. + # Maps optimizer idx to set or param indices. + self.multi_gpu_param_groups: List[Set[int]] = [] + main_params = {p: i for i, p in enumerate(self.model.parameters())} + for o in self._optimizers: + param_indices = [] + for pg_idx, pg in enumerate(o.param_groups): + for p in pg["params"]: + param_indices.append(main_params[p]) + self.multi_gpu_param_groups.append(set(param_indices)) + + # Create n sample-batch buffers (num_multi_gpu_tower_stacks), each + # one with m towers (num_gpus). + num_buffers = self.config.get("num_multi_gpu_tower_stacks", 1) + self._loaded_batches = [[] for _ in range(num_buffers)] # If set, means we are using distributed allreduce during learning. self.distributed_world_size = None @@ -1104,7 +1107,7 @@ def _compute_action_helper( if self.model: self.model.eval() - extra_fetches = {} + extra_fetches = None if isinstance(self.model, RLModule): if explore: fwd_out = self.model.forward_exploration(input_dict) @@ -1166,7 +1169,7 @@ def _compute_action_helper( ) # Add default and custom fetches. - if not extra_fetches: + if extra_fetches is None: extra_fetches = self.extra_action_out( input_dict, state_batches, self.model, action_dist ) diff --git a/rllib/tuned_examples/impala/cartpole-impala.yaml b/rllib/tuned_examples/impala/cartpole-impala.yaml index 46c37c52ea69..1df02c4313cb 100644 --- a/rllib/tuned_examples/impala/cartpole-impala.yaml +++ b/rllib/tuned_examples/impala/cartpole-impala.yaml @@ -8,8 +8,8 @@ cartpole-impala: # Works for both torch and tf. framework: tf2 num_gpus: 0 - _enable_rl_module_api: True - _enable_learner_api: True + _enable_rl_module_api: true + _enable_learner_api: true grad_clip: 40 num_workers: 2 num_learner_workers: 1 diff --git a/rllib/tuned_examples/ppo/cartpole-ppo-with-rl-module.yaml b/rllib/tuned_examples/ppo/cartpole-ppo-with-rl-module.yaml index fbfb6905b4ac..2f6afebd53ef 100644 --- a/rllib/tuned_examples/ppo/cartpole-ppo-with-rl-module.yaml +++ b/rllib/tuned_examples/ppo/cartpole-ppo-with-rl-module.yaml @@ -19,5 +19,5 @@ cartpole-ppo: vf_share_layers: true enable_connectors: true _enable_rl_module_api: true - _enable_learner_api: false + _enable_learner_api: true eager_tracing: false \ No newline at end of file diff --git a/rllib/tuned_examples/ppo/pendulum-ppo-with-rl-module.yaml b/rllib/tuned_examples/ppo/pendulum-ppo-with-rl-module.yaml index 5b2888d709f7..98da67a36fdb 100644 --- a/rllib/tuned_examples/ppo/pendulum-ppo-with-rl-module.yaml +++ b/rllib/tuned_examples/ppo/pendulum-ppo-with-rl-module.yaml @@ -21,6 +21,7 @@ pendulum-ppo: enable_connectors: true model: fcnet_activation: relu + _enable_learner_api: true _enable_rl_module_api: true # Need to unset this b/c we are using the RLModule API, which # provides exploration control via the RLModule's `forward_exploration` method. diff --git a/rllib/utils/schedules/scheduler.py b/rllib/utils/schedules/scheduler.py new file mode 100644 index 000000000000..7d349329791b --- /dev/null +++ b/rllib/utils/schedules/scheduler.py @@ -0,0 +1,157 @@ +from collections import defaultdict +from typing import List, Optional, Tuple + +from ray.rllib.core.rl_module.rl_module import ModuleID +from ray.rllib.utils.framework import try_import_tf, try_import_torch +from ray.rllib.utils.schedules.piecewise_schedule import PiecewiseSchedule +from ray.rllib.utils.typing import TensorType + + +_, tf, _ = try_import_tf() +torch, _ = try_import_torch() + + +class Scheduler: + """Class to manage a scheduled (framework-dependent) tensor variable. + + Uses the PiecewiseSchedule (for maximum configuration flexibility) + """ + + def __init__( + self, + *, + fixed_value: Optional[float] = None, + schedule: Optional[List[Tuple[int, float]]] = None, + framework: str = "torch", + device: Optional[str] = None, + ): + """Initializes a Scheduler instance. + + Args: + fixed_value: A fixed, constant value (in case no schedule should be used). + Set `schedule` to None to always just use this fixed value. + If `fixed_value` is None, `schedule` must be provided. + schedule: The schedule configuration to use. In the format of + [[timestep, value], [timestep, value], ...] + Intermediary timesteps will be assigned to interpolated values (linear + interpolation will be used). A schedule config's first entry must + start with timestep 0, i.e.: [[0, initial_value], [...]]. + framework: The framework string, for which to create the tensor variable + that hold the current value. This is the variable that can be used in + the graph, e.g. in a loss function. + device: Optional device (for torch) to place the tensor variable on. + """ + self.use_schedule = schedule is not None + self.framework = framework + self.device = device + + if self.use_schedule: + # Custom schedule, based on list of + # ([ts], [value to be reached by ts])-tuples. + self.schedule_per_module = defaultdict( + lambda: PiecewiseSchedule( + schedule, + outside_value=schedule[-1][-1], + framework=None, + ) + ) + # As initial tensor valie, use the first timestep's (must be 0) value. + self.curr_value_per_module = defaultdict( + lambda: self._create_tensor_variable(initial_value=schedule[0][1]) + ) + # If no schedule, pin (fix) given value. + else: + self.curr_value_per_module = defaultdict(lambda: fixed_value) + + @staticmethod + def validate( + schedule: Optional[List[Tuple[int, float]]], + schedule_name: str, + value_name: str, + ) -> None: + """Performs checking of a certain schedule configuration. + + The first entry in `schedule` must have a timestep of 0. + + Args: + schedule: The schedule configuration to check. In the format of + [[timestep, value], [timestep, value], ...] + Intermediary timesteps will be assigned to interpolated values (linear + interpolation will be used). A schedule config's first entry must + start with timestep 0, i.e.: [[0, initial_value], [...]]. + schedule_name: The name of the schedule, e.g. `lr_schedule`. + value_name: A full text description of the variable that's being scheduled, + e.g. `learning rate`. + + Raises: + ValueError: In case, errors are found in the schedule's format. + """ + if schedule is not None: + if not isinstance(schedule, (list, tuple)) or (len(schedule) < 2): + raise ValueError( + f"Invalid `{schedule_name}` ({schedule}) specified! Must be a " + "list of at least 2 tuples, each of the form " + f"(`timestep`, `{value_name} to reach`), e.g. " + "`[(0, 0.001), (1e6, 0.0001), (2e6, 0.00005)]`." + ) + elif schedule[0][0] != 0: + raise ValueError( + f"When providing a `{schedule_name}`, the first timestep must be 0 " + f"and the corresponding lr value is the initial {value_name}! You " + f"provided ts={schedule[0][0]} {value_name}={schedule[0][1]}." + ) + + def get_current_value(self, module_id: ModuleID) -> TensorType: + """Returns the current value (as a tensor variable), given a ModuleID. + + Args: + module_id: The module ID, for which to retrueve the current tensor value. + + Returns: + The tensor variable (holding the current value to be used). + """ + return self.curr_value_per_module[module_id] + + def update(self, module_id: ModuleID, timestep: int) -> float: + """Updates the underlying (framework specific) tensor variable. + + Args: + module_id: The module ID, for which to update the tensor variable. + timestep: The current timestep. + + Returns: + The current value of the tensor variable as a python float. + """ + if self.use_schedule: + python_value = self.schedule_per_module[module_id].value(t=timestep) + if self.framework == "torch": + self.curr_value_per_module[module_id].data = torch.tensor(python_value) + else: + self.curr_value_per_module[module_id].assign(python_value) + else: + python_value = self.curr_value_per_module[module_id] + + return python_value + + def _create_tensor_variable(self, initial_value: float) -> TensorType: + """Creates a framework-specific tensor variable to be scheduled. + + Args: + initial_value: The initial (float) value for the variable to hold. + + Returns: + The created framework-specific tensor variable. + """ + if self.framework == "torch": + return torch.tensor( + initial_value, + requires_grad=False, + dtype=torch.float32, + device=self.device, + ) + else: + return tf.Variable( + initial_value, + trainable=False, + dtype=tf.float32, + ) From edc04c2f3987fb59e69824843966e15837082a73 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Mon, 8 May 2023 09:14:40 -0700 Subject: [PATCH 277/424] Properly bazelize all release test infra scripts (#34689) --- .buildkite/pipeline.test.yml | 1 - WORKSPACE | 28 ++++ release/BUILD | 144 +++++++++++++++++- release/ray_release/bazel.py | 22 +++ release/ray_release/buildkite/concurrency.py | 11 +- release/ray_release/config.py | 5 +- release/ray_release/env.py | 8 +- release/ray_release/scripts/ray_bisect.py | 1 + release/ray_release/template.py | 18 +-- .../tests/test_anyscale_job_manager.py | 1 + .../tests/test_anyscale_job_wrapper.py | 1 + release/ray_release/tests/test_bisect.py | 1 + release/ray_release/tests/test_buildkite.py | 18 ++- release/ray_release/tests/test_config.py | 9 +- release/ray_release/tests/test_wheels.py | 5 +- release/requirements_buildkite.in | 1 + release/requirements_buildkite.txt | 3 + release/run_release_test.sh | 2 +- 18 files changed, 237 insertions(+), 42 deletions(-) create mode 100644 release/ray_release/bazel.py diff --git a/.buildkite/pipeline.test.yml b/.buildkite/pipeline.test.yml index 947055b58dd3..3de2ecf6d92d 100644 --- a/.buildkite/pipeline.test.yml +++ b/.buildkite/pipeline.test.yml @@ -12,7 +12,6 @@ instance_size: small commands: - cleanup() { if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then ./ci/build/upload_build_info.sh; fi }; trap cleanup EXIT - - pip install -e release/ - ./ci/env/env_info.sh - bazel test --config=ci $(./ci/run/bazel_export_options) --build_tests_only diff --git a/WORKSPACE b/WORKSPACE index 6faea0d927f9..eb6aeba907e3 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -38,6 +38,34 @@ http_archive( url = "https://github.com/bazelbuild/rules_python/releases/download/0.21.0/rules_python-0.21.0.tar.gz", ) +load("@rules_python//python:repositories.bzl", "python_register_toolchains") + +python_register_toolchains( + name = "python3_9", + python_version = "3.9", + register_toolchains = False, +) + +load("@python3_9//:defs.bzl", bk_python = "interpreter") load("@rules_python//python/pip_install:repositories.bzl", "pip_install_dependencies") pip_install_dependencies() + +load("@rules_python//python:pip.bzl", "pip_parse") + +pip_parse( + name = "py_deps_buildkite", + python_interpreter_target = bk_python, + requirements_lock = "//release:requirements_buildkite.txt", +) + +load("@py_deps_buildkite//:requirements.bzl", install_py_deps_buildkite = "install_deps") + +install_py_deps_buildkite() + +register_toolchains("//release:python_toolchain") + +register_execution_platforms( + "@local_config_platform//:host", + "//release:hermetic_python_platform", +) diff --git a/release/BUILD b/release/BUILD index 42918e52ba2a..b15f22324acb 100644 --- a/release/BUILD +++ b/release/BUILD @@ -1,5 +1,7 @@ -load("@rules_python//python:defs.bzl", "py_test") +load("@rules_python//python:defs.bzl", "py_library", "py_runtime", "py_runtime_pair", "py_test") load("@rules_python//python:pip.bzl", "compile_pip_requirements") +load("@py_deps_buildkite//:requirements.bzl", bk_require = "requirement") +load("@python3_9//:defs.bzl", bk_python = "interpreter") compile_pip_requirements( name = "requirements_buildkite", @@ -351,97 +353,216 @@ py_test( # RELEASE TEST INFRA unit tests #### +py_runtime( + name = "python3_runtime", + interpreter = bk_python, + python_version = "PY3", + visibility = ["//visibility:private"], +) + +py_runtime_pair( + name = "python_runtime_pair", + py2_runtime = None, + py3_runtime = ":python3_runtime", + visibility = ["//visibility:private"], +) + +constraint_setting(name = "hermetic") + +constraint_value( + name = "hermetic_python", + constraint_setting = ":hermetic", + visibility = ["//visibility:private"], +) + +toolchain( + name = "python_toolchain", + exec_compatible_with = [":hermetic_python"], + toolchain = ":python_runtime_pair", + toolchain_type = "@bazel_tools//tools/python:toolchain_type", +) + +platform( + name = "hermetic_python_platform", + constraint_values = [":hermetic_python"], + parents = ["@local_config_platform//:host"], + visibility = ["//visibility:private"], +) + +py_library( + name = "ray_release", + srcs = glob( + ["ray_release/**/*.py"], + exclude = ["ray_release/tests/*.py"], + ), + data = glob(["ray_release/environments/*.env"]) + [ + "ray_release/buildkite/aws_instance_types.csv", + "ray_release/schema.json", + ], + imports = ["."], + visibility = ["//visibility:public"], + deps = [ + bk_require("anyscale"), + bk_require("bazel-runfiles"), + bk_require("boto3"), + bk_require("botocore"), + bk_require("click"), + bk_require("google-cloud-storage"), + bk_require("jinja2"), + bk_require("retry"), + ], +) + +py_library( + name = "test_utils", + srcs = ["ray_release/tests/utils.py"], + imports = ["."], + visibility = ["//visibility:private"], +) + py_test( name = "test_alerts", size = "small", srcs = ["ray_release/tests/test_alerts.py"], + exec_compatible_with = [":hermetic_python"], tags = [ "release_unit", "team:ci", ], + deps = [ + ":ray_release", + bk_require("pytest"), + ], ) py_test( name = "test_anyscale_job_wrapper", size = "small", srcs = ["ray_release/tests/test_anyscale_job_wrapper.py"], + exec_compatible_with = [":hermetic_python"], tags = [ "release_unit", "team:ci", ], + deps = [ + ":ray_release", + bk_require("pytest"), + ], ) py_test( name = "test_bisect", size = "small", srcs = ["ray_release/tests/test_bisect.py"], + exec_compatible_with = [":hermetic_python"], tags = [ "release_unit", "team:ci", ], - deps = ["//:ray_lib"], + deps = [ + ":ray_release", + bk_require("pytest"), + ], ) py_test( name = "test_buildkite", size = "small", srcs = ["ray_release/tests/test_buildkite.py"], + exec_compatible_with = [":hermetic_python"], tags = [ "release_unit", "team:ci", ], + deps = [ + ":ray_release", + bk_require("pyyaml"), + bk_require("pytest"), + ], ) py_test( name = "test_cluster_manager", size = "small", srcs = ["ray_release/tests/test_cluster_manager.py"], + exec_compatible_with = [":hermetic_python"], tags = [ "release_unit", "team:ci", ], + deps = [ + ":ray_release", + ":test_utils", + bk_require("freezegun"), + bk_require("pytest"), + ], ) py_test( name = "test_config", size = "small", srcs = ["ray_release/tests/test_config.py"], - data = ["release_tests.yaml"], + data = glob( + ["**/*.yaml"], + exclude = ["ray_release/**/*.yaml"], + ) + [ + "//python/ray/autoscaler/aws:test_configs", + ], + exec_compatible_with = [":hermetic_python"], tags = [ "release_unit", "team:ci", ], + deps = [ + ":ray_release", + bk_require("pytest"), + ], ) py_test( name = "test_env", size = "small", srcs = ["ray_release/tests/test_env.py"], + exec_compatible_with = [":hermetic_python"], tags = [ "release_unit", "team:ci", ], + deps = [ + ":ray_release", + bk_require("pytest"), + ], ) py_test( name = "test_glue", size = "small", srcs = ["ray_release/tests/test_glue.py"], + exec_compatible_with = [":hermetic_python"], tags = [ "release_unit", "team:ci", ], + deps = [ + ":ray_release", + ":test_utils", + bk_require("pytest"), + ], ) py_test( name = "test_log_aggregator", size = "small", srcs = ["ray_release/tests/test_log_aggregator.py"], + exec_compatible_with = [":hermetic_python"], tags = [ "release_unit", "team:ci", ], - deps = ["//:ray_lib"], + deps = [ + ":ray_release", + bk_require("pytest"), + ], ) py_test( @@ -453,19 +574,32 @@ py_test( "ray_release/tests/_test_run_release_test_sh.py", "run_release_test.sh", ], + exec_compatible_with = [":hermetic_python"], tags = [ "release_unit", "team:ci", ], + deps = [ + ":ray_release", + bk_require("pytest"), + ], ) py_test( name = "test_wheels", size = "small", srcs = ["ray_release/tests/test_wheels.py"], + data = [ + "//:python_sources", + ], + exec_compatible_with = [":hermetic_python"], tags = [ "release_unit", "team:ci", ], - deps = ["//:ray_lib"], + deps = [ + ":ray_release", + bk_require("freezegun"), + bk_require("pytest"), + ], ) diff --git a/release/ray_release/bazel.py b/release/ray_release/bazel.py new file mode 100644 index 000000000000..ba68ba0c82e1 --- /dev/null +++ b/release/ray_release/bazel.py @@ -0,0 +1,22 @@ +import os + +import runfiles + +REPO_NAME = "com_github_ray_project_ray" +_LEGACY_REPO_ROOT = os.path.abspath( + os.path.join(os.path.dirname(__file__), "../.."), +) + +the_runfiles = runfiles.Create() + + +def _norm_path_join(*args): + return os.path.normpath(os.path.join(*args)) + + +def bazel_runfile(*args): + """Return the path to a runfile in the release directory.""" + p = _norm_path_join(*args) + if the_runfiles: + return the_runfiles.Rlocation(os.path.join(REPO_NAME, p)) + return os.path.join(_LEGACY_REPO_ROOT, p) diff --git a/release/ray_release/buildkite/concurrency.py b/release/ray_release/buildkite/concurrency.py index 5f7e21b446b4..1e201f9628dd 100644 --- a/release/ray_release/buildkite/concurrency.py +++ b/release/ray_release/buildkite/concurrency.py @@ -1,9 +1,9 @@ import csv -import os from collections import namedtuple from typing import Tuple, Optional, Dict -from ray_release.config import Test, RELEASE_PACKAGE_DIR +from ray_release.bazel import bazel_runfile +from ray_release.config import Test from ray_release.template import load_test_cluster_compute from ray_release.logger import logger @@ -67,9 +67,10 @@ def load_instance_types(path: Optional[str] = None) -> Dict[str, Tuple[int, int]]: - path = path or os.path.join( - RELEASE_PACKAGE_DIR, "ray_release", "buildkite", "aws_instance_types.csv" - ) + if not path: + path = bazel_runfile( + "release/ray_release/buildkite/aws_instance_types.csv", + ) instance_to_resources = {} with open(path, "rt") as fp: diff --git a/release/ray_release/config.py b/release/ray_release/config.py index 02a0422d148a..cc72baf69e8a 100644 --- a/release/ray_release/config.py +++ b/release/ray_release/config.py @@ -7,6 +7,7 @@ import jsonschema import yaml from ray_release.anyscale_util import find_cloud_by_name +from ray_release.bazel import bazel_runfile from ray_release.exception import ReleaseTestCLIError, ReleaseTestConfigError from ray_release.logger import logger from ray_release.util import DeferredEnvVar, deep_update @@ -50,9 +51,7 @@ class TestDefinition(dict): RELEASE_PACKAGE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) -RELEASE_TEST_SCHEMA_FILE = os.path.join( - RELEASE_PACKAGE_DIR, "ray_release", "schema.json" -) +RELEASE_TEST_SCHEMA_FILE = bazel_runfile("release/ray_release/schema.json") def read_and_validate_release_test_collection( diff --git a/release/ray_release/env.py b/release/ray_release/env.py index 0ba8a906122c..475db22648e0 100644 --- a/release/ray_release/env.py +++ b/release/ray_release/env.py @@ -1,16 +1,16 @@ import os from typing import Dict +from ray_release.bazel import bazel_runfile from ray_release.exception import ReleaseTestConfigError DEFAULT_ENVIRONMENT = "aws" def load_environment(environment_name: str) -> Dict[str, str]: - this_dir = os.path.dirname(__file__) - env_file = os.path.join(this_dir, "environments", f"{environment_name}.env") - - if not os.path.exists(env_file): + file_base = f"{environment_name}.env" + env_file = bazel_runfile("release/ray_release/environments", file_base) + if not env_file or not os.path.isfile(env_file): raise ReleaseTestConfigError( f"Unknown environment with name: {environment_name}" ) diff --git a/release/ray_release/scripts/ray_bisect.py b/release/ray_release/scripts/ray_bisect.py index 907cc57b24f2..d46dffe167c4 100644 --- a/release/ray_release/scripts/ray_bisect.py +++ b/release/ray_release/scripts/ray_bisect.py @@ -4,6 +4,7 @@ import json import time from typing import Dict, List, Set + from ray_release.logger import logger from ray_release.buildkite.step import get_step from ray_release.config import ( diff --git a/release/ray_release/template.py b/release/ray_release/template.py index 8444d4970d7f..1f8cf74a86ce 100644 --- a/release/ray_release/template.py +++ b/release/ray_release/template.py @@ -7,8 +7,8 @@ import jinja2 import yaml +from ray_release.bazel import bazel_runfile from ray_release.config import ( - RELEASE_PACKAGE_DIR, parse_python_version, DEFAULT_PYTHON_VERSION, get_test_cloud_id, @@ -73,7 +73,7 @@ def load_and_render_yaml_template( if not template_path: return None - if not os.path.exists(template_path): + if not os.path.isfile(template_path): raise ReleaseTestConfigError( f"Cannot load yaml template from {template_path}: Path not found." ) @@ -99,11 +99,9 @@ def render_yaml_template(template: str, env: Optional[Dict] = None): def get_cluster_env_path(test: "Test") -> str: + working_dir = test.get("working_dir", "") cluster_env_file = test["cluster"]["cluster_env"] - cluster_env_path = os.path.join( - RELEASE_PACKAGE_DIR, test.get("working_dir", ""), cluster_env_file - ) - return cluster_env_path + return bazel_runfile("release", working_dir, cluster_env_file) def load_test_cluster_env(test: "Test", ray_wheels_url: str) -> Optional[Dict]: @@ -144,12 +142,10 @@ def populate_cluster_env_variables(test: "Test", ray_wheels_url: str) -> Dict: def load_test_cluster_compute(test: "Test") -> Optional[Dict]: cluster_compute_file = test["cluster"]["cluster_compute"] - cluster_compute_path = os.path.join( - RELEASE_PACKAGE_DIR, test.get("working_dir", ""), cluster_compute_file - ) + working_dir = test.get("working_dir", "") + f = bazel_runfile("release", working_dir, cluster_compute_file) env = populate_cluster_compute_variables(test) - - return load_and_render_yaml_template(cluster_compute_path, env=env) + return load_and_render_yaml_template(f, env=env) def populate_cluster_compute_variables(test: "Test") -> Dict: diff --git a/release/ray_release/tests/test_anyscale_job_manager.py b/release/ray_release/tests/test_anyscale_job_manager.py index 61d89d7db546..5985aa5105f9 100644 --- a/release/ray_release/tests/test_anyscale_job_manager.py +++ b/release/ray_release/tests/test_anyscale_job_manager.py @@ -1,5 +1,6 @@ import tempfile import os + from ray_release.util import ERROR_LOG_PATTERNS from ray_release.job_manager.anyscale_job_manager import AnyscaleJobManager diff --git a/release/ray_release/tests/test_anyscale_job_wrapper.py b/release/ray_release/tests/test_anyscale_job_wrapper.py index 7f17df27270b..f7ba2053bef2 100644 --- a/release/ray_release/tests/test_anyscale_job_wrapper.py +++ b/release/ray_release/tests/test_anyscale_job_wrapper.py @@ -1,6 +1,7 @@ import pytest import sys import json + from ray_release.command_runner._anyscale_job_wrapper import ( main, run_bash_command, diff --git a/release/ray_release/tests/test_bisect.py b/release/ray_release/tests/test_bisect.py index a00ab10a5cb9..3e64f19cbf29 100644 --- a/release/ray_release/tests/test_bisect.py +++ b/release/ray_release/tests/test_bisect.py @@ -2,6 +2,7 @@ import pytest from unittest import mock from typing import List, Set, Dict + from ray_release.scripts.ray_bisect import _bisect, _obtain_test_result, _sanity_check from ray_release.config import Test diff --git a/release/ray_release/tests/test_buildkite.py b/release/ray_release/tests/test_buildkite.py index 8d45079db243..253928ecd859 100644 --- a/release/ray_release/tests/test_buildkite.py +++ b/release/ray_release/tests/test_buildkite.py @@ -2,7 +2,7 @@ import sys import tempfile import unittest -from typing import Dict +from typing import Dict, Callable from unittest.mock import patch import yaml @@ -29,7 +29,6 @@ ) from ray_release.config import Test from ray_release.exception import ReleaseTestConfigError -from ray_release.tests.test_glue import MockReturn from ray_release.wheels import ( DEFAULT_BRANCH, ) @@ -43,6 +42,20 @@ def __call__(self, key: str): return self.return_dict.get(key, None) +class MockReturn: + return_dict = {} + + def __getattribute__(self, item): + return_dict = object.__getattribute__(self, "return_dict") + if item in return_dict: + mocked = return_dict[item] + if isinstance(mocked, Callable): + return mocked() + else: + return lambda *a, **kw: mocked + return object.__getattribute__(self, item) + + class MockBuildkitePythonAPI(MockReturn): def builds(self): return self @@ -271,7 +284,6 @@ def testSettingsOverrideBuildkite(self): "ray_release.buildkite.settings.get_buildkite_prompt_value", self.buildkite_mock, ): - # With no buildkite variables, default settings shouldn't be updated updated_settings = settings.copy() update_settings_from_buildkite(updated_settings) diff --git a/release/ray_release/tests/test_config.py b/release/ray_release/tests/test_config.py index a544c68f3c56..c09900c495e8 100644 --- a/release/ray_release/tests/test_config.py +++ b/release/ray_release/tests/test_config.py @@ -1,8 +1,8 @@ -import os import sys import yaml import pytest +from ray_release.bazel import bazel_runfile from ray_release.config import ( read_and_validate_release_test_collection, Test, @@ -13,10 +13,7 @@ ) from ray_release.exception import ReleaseTestConfigError -TEST_COLLECTION_FILE = os.path.join( - os.path.dirname(__file__), "..", "..", "release_tests.yaml" -) - +_TEST_COLLECTION_FILE = bazel_runfile("release/release_tests.yaml") VALID_TEST = Test( **{ @@ -219,7 +216,7 @@ def test_compute_config_invalid_ebs(): def test_load_and_validate_test_collection_file(): - read_and_validate_release_test_collection(TEST_COLLECTION_FILE) + read_and_validate_release_test_collection(_TEST_COLLECTION_FILE) if __name__ == "__main__": diff --git a/release/ray_release/tests/test_wheels.py b/release/ray_release/tests/test_wheels.py index fb62f8ba2093..b55a0e95e99c 100644 --- a/release/ray_release/tests/test_wheels.py +++ b/release/ray_release/tests/test_wheels.py @@ -6,6 +6,7 @@ from freezegun import freeze_time +from ray_release.bazel import bazel_runfile from ray_release.config import Test from ray_release.template import load_test_cluster_env from ray_release.exception import RayWheelsNotFoundError, RayWheelsTimeoutError @@ -31,9 +32,7 @@ def remove_buildkite_env(): def test_get_ray_version(remove_buildkite_env): - init_file = os.path.join( - os.path.dirname(__file__), "..", "..", "..", "python", "ray", "__init__.py" - ) + init_file = bazel_runfile("python/ray/__init__.py") with open(init_file, "rt") as fp: content = [line.encode() for line in fp.readlines()] diff --git a/release/requirements_buildkite.in b/release/requirements_buildkite.in index 9006223cd6f8..02f90acb2c3b 100644 --- a/release/requirements_buildkite.in +++ b/release/requirements_buildkite.in @@ -1,6 +1,7 @@ # Requirements to run release tests from buildkite (client dependencies will be installed separately) # Copy anyscale pin to requirements.txt and util.py anyscale +bazel-runfiles boto3 click freezegun diff --git a/release/requirements_buildkite.txt b/release/requirements_buildkite.txt index b27545b692be..e4e8394185f4 100644 --- a/release/requirements_buildkite.txt +++ b/release/requirements_buildkite.txt @@ -161,6 +161,9 @@ backports-zoneinfo==0.2.1 \ # via # pytz-deprecation-shim # tzlocal +bazel-runfiles==0.21.0 \ + --hash=sha256:3e430dd9a5aba90a90bc2493fdcfce02a3ece47fb574db0f4ac898261e6b068d + # via -r release/requirements_buildkite.in boto3==1.26.118 \ --hash=sha256:1ff703152553f4d5fc9774071d114dbf06ec661eb1b29b6051f6b1f9d0c24873 \ --hash=sha256:d0ed43228952b55c9f44d1c733f74656418c39c55dbe36bc37feeef6aa583ded diff --git a/release/run_release_test.sh b/release/run_release_test.sh index 52b157a80c8f..8507284cb5ae 100755 --- a/release/run_release_test.sh +++ b/release/run_release_test.sh @@ -44,7 +44,7 @@ export RAY_TEST_REPO RAY_TEST_BRANCH RELEASE_RESULTS_DIR BUILDKITE_MAX_RETRIES B if [ -z "${NO_INSTALL}" ]; then pip install --use-deprecated=legacy-resolver -q -r requirements.txt - pip install -q -U boto3 botocore + pip install -q -U boto3 botocore bazel-runfiles if [ "${INSTALL_MATCHING_RAY-false}" == "true" ]; then # Find ray-wheels parameter and install locally From bb9faf425c0d9378ce8fa59fd97ad4b25c5882ef Mon Sep 17 00:00:00 2001 From: Yunxuan Xiao Date: Mon, 8 May 2023 10:53:40 -0700 Subject: [PATCH 278/424] [CI] Fix stable_diffusion_batch_prediction by switching to strict_mode (#35100) Fix this failed air example due to the recent "strict_mode" change in Ray Data. Previously, we could return a list in the batch mapper function, but now we must return a dictionary. Signed-off-by: woshiyyya --- .../examples/stablediffusion_batch_prediction.ipynb | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb b/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb index 387bdea5d80b..d86def70d50e 100644 --- a/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb +++ b/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb @@ -121,12 +121,14 @@ "\n", " def __call__(self, batch: pd.DataFrame) -> pd.DataFrame:\n", " import torch\n", + " import numpy as np\n", "\n", " # Set a different seed for every image in batch\n", " self.pipe.generator = [\n", " torch.Generator(device=\"cuda\").manual_seed(i) for i in range(len(batch))\n", " ]\n", - " return self.pipe(list(batch[\"prompt\"])).images" + " images = self.pipe(list(batch[\"prompt\"])).images\n", + " return {\"images\": np.array(images, dtype=object)}" ] }, { @@ -164,7 +166,7 @@ " batch_format=\"pandas\",\n", " num_gpus=1,\n", ")\n", - "images = preds.take_all()" + "results = preds.take_all()" ] }, { @@ -193,7 +195,7 @@ } ], "source": [ - "images[0]" + "results[0][\"images\"]" ] }, { @@ -214,7 +216,7 @@ } ], "source": [ - "images[1]" + "results[1][\"images\"]" ] }, { From 78444ef98700b16a85ece877376fd9a028b2f862 Mon Sep 17 00:00:00 2001 From: Justin Yu Date: Mon, 8 May 2023 10:55:09 -0700 Subject: [PATCH 279/424] [AIR][Telemetry] Environment variable usage (#34921) This PR adds telemetry for **environment variable usage** by adding 1 usage tag: - `AIR_ENV_VARS`: JSON string representation of a list of string environment variable keys. This will help us "deprecate" / remove environment variables, since there is currently no way of doing phasing these out, so environment variables keep accumulating. Also, if certain environment variables are always set or never set, we can consider changing the default behavior within the library. Signed-off-by: Justin Yu --- python/ray/air/BUILD | 8 +++ python/ray/air/_internal/usage.py | 34 +++++++++++++ python/ray/air/constants.py | 20 ++++++-- python/ray/air/tests/test_air_usage.py | 67 ++++++++++++++++++++++++++ python/ray/train/constants.py | 14 +++++- python/ray/tune/constants.py | 40 +++++++++++++++ python/ray/tune/tune.py | 7 +++ src/ray/protobuf/usage.proto | 6 +++ 8 files changed, 191 insertions(+), 5 deletions(-) create mode 100644 python/ray/air/tests/test_air_usage.py create mode 100644 python/ray/tune/constants.py diff --git a/python/ray/air/BUILD b/python/ray/air/BUILD index 8b92eefb4274..fc0a1a9def1d 100644 --- a/python/ray/air/BUILD +++ b/python/ray/air/BUILD @@ -34,6 +34,14 @@ py_test( deps = [":ml_lib"] ) +py_test( + name = "test_air_usage", + size = "small", + srcs = ["tests/test_air_usage.py"], + tags = ["team:ml", "exclusive"], + deps = [":ml_lib"] +) + py_test( name = "test_checkpoints", size = "small", diff --git a/python/ray/air/_internal/usage.py b/python/ray/air/_internal/usage.py index fb9492dfd46e..b248f82fb501 100644 --- a/python/ray/air/_internal/usage.py +++ b/python/ray/air/_internal/usage.py @@ -1,3 +1,5 @@ +import json +import os from typing import TYPE_CHECKING, Set, Union from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag @@ -115,3 +117,35 @@ def tag_scheduler(scheduler: "TrialScheduler"): assert isinstance(scheduler, TrialScheduler) scheduler_name = _find_class_name(scheduler, "ray.tune.schedulers", TUNE_SCHEDULERS) record_extra_usage_tag(TagKey.TUNE_SCHEDULER, scheduler_name) + + +def tag_ray_air_env_vars() -> bool: + """Records usage of environment variables exposed by the Ray AIR libraries. + + NOTE: This does not track the values of the environment variables, nor + does this track environment variables not explicitly included in the + `all_ray_air_env_vars` allow-list. + + Returns: + bool: True if at least one environment var is supplied by the user. + """ + from ray.air.constants import AIR_ENV_VARS + from ray.tune.constants import TUNE_ENV_VARS + from ray.train.constants import TRAIN_ENV_VARS + + all_ray_air_env_vars = sorted( + set().union(AIR_ENV_VARS, TUNE_ENV_VARS, TRAIN_ENV_VARS) + ) + + user_supplied_env_vars = [] + + for env_var in all_ray_air_env_vars: + if env_var in os.environ: + user_supplied_env_vars.append(env_var) + + if user_supplied_env_vars: + env_vars_str = json.dumps(user_supplied_env_vars) + record_extra_usage_tag(TagKey.AIR_ENV_VARS, env_vars_str) + return True + + return False diff --git a/python/ray/air/constants.py b/python/ray/air/constants.py index 1accc998eebd..ddde5372d4bf 100644 --- a/python/ray/air/constants.py +++ b/python/ray/air/constants.py @@ -40,6 +40,15 @@ # training with Ray Train CHECKPOINT_ID_ATTR = "_current_checkpoint_id" +# Name of the marker dropped by the Trainable. If a worker detects +# the presence of the marker in the trial dir, it will use lazy +# checkpointing. +LAZY_CHECKPOINT_MARKER_FILE = ".lazy_checkpoint_marker" + +# ================================================== +# Environment Variables +# ================================================== + # Integer value which if set will copy files in reported AIR directory # checkpoints instead of moving them (if worker is on the same node as Trainable) COPY_DIRECTORY_CHECKPOINTS_INSTEAD_OF_MOVING_ENV = ( @@ -51,7 +60,10 @@ # as Trainable) DISABLE_LAZY_CHECKPOINTING_ENV = "TRAIN_DISABLE_LAZY_CHECKPOINTING" -# Name of the marker dropped by the Trainable. If a worker detects -# the presence of the marker in the trial dir, it will use lazy -# checkpointing. -LAZY_CHECKPOINT_MARKER_FILE = ".lazy_checkpoint_marker" + +# NOTE: When adding a new environment variable, please track it in this list. +# TODO(ml-team): Most env var constants should get moved here. +AIR_ENV_VARS = { + COPY_DIRECTORY_CHECKPOINTS_INSTEAD_OF_MOVING_ENV, + DISABLE_LAZY_CHECKPOINTING_ENV, +} diff --git a/python/ray/air/tests/test_air_usage.py b/python/ray/air/tests/test_air_usage.py new file mode 100644 index 000000000000..8fa29c833a31 --- /dev/null +++ b/python/ray/air/tests/test_air_usage.py @@ -0,0 +1,67 @@ +"""Unit tests for AIR telemetry.""" + +import json +import os + +import pytest +from unittest import mock + +import ray +from ray import air, tune +from ray._private.usage.usage_lib import TagKey +from ray.air import session + + +@pytest.fixture +def mock_record(monkeypatch): + import ray.air._internal.usage + + recorded = {} + + def mock_record_extra_usage_tag(key: TagKey, value: str): + recorded[key] = value + + monkeypatch.setattr( + ray.air._internal.usage, + "record_extra_usage_tag", + mock_record_extra_usage_tag, + ) + yield recorded + + +@pytest.fixture(scope="module") +def ray_start_2_cpus(): + address_info = ray.init(num_cpus=2) + yield address_info + ray.shutdown() + + +def train_fn(config): + session.report({"score": 1}) + + +@pytest.fixture +def tuner(tmp_path): + yield tune.Tuner(train_fn, run_config=air.RunConfig(storage_path=str(tmp_path))) + + +def test_tag_env_vars(ray_start_2_cpus, mock_record, tuner): + """Test that env vars are recorded properly, and arbitrary user environment + variables are ignored.""" + env_vars_to_record = { + "RAY_AIR_LOCAL_CACHE_DIR": "~/ray_results", + "TUNE_DISABLE_AUTO_CALLBACK_SYNCER": "1", + } + untracked_env_vars = {"RANDOM_USER_ENV_VAR": "asdf"} + + with mock.patch.dict(os.environ, {**env_vars_to_record, **untracked_env_vars}): + tuner.fit() + + recorded_env_vars = json.loads(mock_record[TagKey.AIR_ENV_VARS]) + assert sorted(env_vars_to_record) == sorted(recorded_env_vars) + + +if __name__ == "__main__": + import sys + + sys.exit(pytest.main(["-v", "-x", __file__])) diff --git a/python/ray/train/constants.py b/python/ray/train/constants.py index 4b6b1ac61bce..2d7d198c48cf 100644 --- a/python/ray/train/constants.py +++ b/python/ray/train/constants.py @@ -49,7 +49,11 @@ # is restarted, the checkpoint_id can continue to increment. TUNE_CHECKPOINT_ID = "_current_checkpoint_id" -# Env var name + +# ================================================== +# Environment Variables +# ================================================== + ENABLE_DETAILED_AUTOFILLED_METRICS_ENV = ( "TRAIN_RESULT_ENABLE_DETAILED_AUTOFILLED_METRICS" ) @@ -67,6 +71,14 @@ TRAIN_ENABLE_WORKER_SPREAD_ENV = "TRAIN_ENABLE_WORKER_SPREAD" +# NOTE: When adding a new environment variable, please track it in this list. +TRAIN_ENV_VARS = { + ENABLE_DETAILED_AUTOFILLED_METRICS_ENV, + ENABLE_SHARE_CUDA_VISIBLE_DEVICES_ENV, + TRAIN_PLACEMENT_GROUP_TIMEOUT_S_ENV, + TRAIN_ENABLE_WORKER_SPREAD_ENV, +} + # Blacklist virtualized networking. DEFAULT_NCCL_SOCKET_IFNAME = "^lo,docker,veth" diff --git a/python/ray/tune/constants.py b/python/ray/tune/constants.py new file mode 100644 index 000000000000..6d3c84dc4c7f --- /dev/null +++ b/python/ray/tune/constants.py @@ -0,0 +1,40 @@ +# ================================================== +# Environment Variables +# ================================================== + +# NOTE: When adding a new environment variable, please track it in this list. +TUNE_ENV_VARS = { + "RAY_AIR_LOCAL_CACHE_DIR", + "RAY_AIR_FULL_TRACEBACKS", + "TUNE_DISABLE_AUTO_CALLBACK_LOGGERS", + "TUNE_DISABLE_AUTO_CALLBACK_SYNCER", + "TUNE_DISABLE_AUTO_INIT", + "TUNE_DISABLE_DATED_SUBDIR", + "TUNE_DISABLE_STRICT_METRIC_CHECKING", + "TUNE_DISABLE_SIGINT_HANDLER", + "TUNE_FALLBACK_TO_LATEST_CHECKPOINT", + "TUNE_FORCE_TRIAL_CLEANUP_S", + "TUNE_GET_EXECUTOR_EVENT_WAIT_S", + "TUNE_FUNCTION_THREAD_TIMEOUT_S", + "TUNE_GLOBAL_CHECKPOINT_S", + "TUNE_MAX_LEN_IDENTIFIER", + "TUNE_MAX_PENDING_TRIALS_PG", + "TUNE_NODE_SYNCING_MIN_ITER_THRESHOLD", + "TUNE_NODE_SYNCING_MIN_TIME_S_THRESHOLD", + "TUNE_PLACEMENT_GROUP_PREFIX", + "TUNE_PLACEMENT_GROUP_RECON_INTERVAL", + "TUNE_PRINT_ALL_TRIAL_ERRORS", + "TUNE_RESULT_DIR", + "TUNE_RESULT_BUFFER_LENGTH", + "TUNE_RESULT_DELIM", + "TUNE_RESULT_BUFFER_MAX_TIME_S", + "TUNE_RESULT_BUFFER_MIN_TIME_S", + "TUNE_WARN_THRESHOLD_S", + "TUNE_WARN_INSUFFICENT_RESOURCE_THRESHOLD_S", + "TUNE_WARN_INSUFFICENT_RESOURCE_THRESHOLD_S_AUTOSCALER", + "TUNE_WARN_EXCESSIVE_EXPERIMENT_CHECKPOINT_SYNC_THRESHOLD_S", + "TUNE_STATE_REFRESH_PERIOD", + "TUNE_RESTORE_RETRY_NUM", + "TUNE_CHECKPOINT_CLOUD_RETRY_NUM", + "TUNE_CHECKPOINT_CLOUD_RETRY_WAIT_TIME_S", +} diff --git a/python/ray/tune/tune.py b/python/ray/tune/tune.py index 97ad0b53f957..1dab032bd425 100644 --- a/python/ray/tune/tune.py +++ b/python/ray/tune/tune.py @@ -24,6 +24,7 @@ import ray from ray._private.storage import _get_storage_uri from ray.air import CheckpointConfig +from ray.air._internal import usage as air_usage from ray.air.util.node import _force_on_current_node from ray.tune.analysis import ExperimentAnalysis from ray.tune.callback import Callback @@ -565,6 +566,12 @@ class and registered trainables. ray._private.usage.usage_lib.record_library_usage("tune") + # Track environment variable usage here will also catch: + # 1.) Tuner.fit() usage + # 2.) Trainer.fit() usage + # 3.) Ray client usage (env variables are inherited by the Ray runtime env) + air_usage.tag_ray_air_env_vars() + all_start = time.time() if mode and mode not in ["min", "max"]: diff --git a/src/ray/protobuf/usage.proto b/src/ray/protobuf/usage.proto index 0f5197cf1529..6773166ec92e 100644 --- a/src/ray/protobuf/usage.proto +++ b/src/ray/protobuf/usage.proto @@ -130,4 +130,10 @@ enum TagKey { // Name of Tune scheduler algorithm or "Custom" if user-defined. // Example: "FIFOScheduler" TUNE_SCHEDULER = 502; + + // Ray AIR environment variable usage stored in JSON list format + // This lists which of the environment variables exposed by the AIR libraries + // are provided by the user. + // Ex: ["RAY_AIR_LOCAL_CACHE_DIR", "TUNE_FALLBACK_TO_LATEST_CHECKPOINT"] + AIR_ENV_VARS = 503; } From 27eaa8d5be02b6dbe5f8b97a0ce9abffe07aec6d Mon Sep 17 00:00:00 2001 From: matthewdeng Date: Mon, 8 May 2023 12:01:16 -0700 Subject: [PATCH 280/424] [docs] Improve documentation for installing extras (#33617) Signed-off-by: Matthew Deng --- doc/source/ray-overview/installation.rst | 67 +++++++++++++++++++----- 1 file changed, 54 insertions(+), 13 deletions(-) diff --git a/doc/source/ray-overview/installation.rst b/doc/source/ray-overview/installation.rst index eb5c002cc031..78f7881d6750 100644 --- a/doc/source/ray-overview/installation.rst +++ b/doc/source/ray-overview/installation.rst @@ -11,25 +11,66 @@ Official Releases From Wheels ~~~~~~~~~~~ -You can install the latest official version of Ray from PyPI on Linux, Windows -and macOS as follows: +You can install the latest official version of Ray from PyPI on Linux, Windows, +and macOS by choosing the option that best matches your use case. -.. code-block:: bash +.. tab-set:: - # Install Ray with support for the dashboard + cluster launcher - pip install -U "ray[default]" + .. tab-item:: Recommended - # Install Ray with minimal dependencies - # pip install -U ray + **For machine learning applications** -To install Ray libraries: + .. code-block:: shell -.. code-block:: bash + pip install -U "ray[air]" + + # For reinforcement learning support, install RLlib instead. + # pip install -U "ray[rllib]" - pip install -U "ray[air]" # installs Ray + dependencies for Ray AI Runtime - pip install -U "ray[tune]" # installs Ray + dependencies for Ray Tune - pip install -U "ray[rllib]" # installs Ray + dependencies for Ray RLlib - pip install -U "ray[serve]" # installs Ray + dependencies for Ray Serve + **For general Python applications** + + .. code-block:: shell + + pip install -U "ray[default]" + + # If you don't want Ray Dashboard or Cluster Launcher, install Ray with minimal dependencies instead. + # pip install -U "ray" + + .. tab-item:: Advanced + + .. list-table:: + :widths: 2 3 + :header-rows: 1 + + * - Command + - Installed components + * - `pip install -U "ray"` + - Core + * - `pip install -U "ray[default]"` + - Core, Dashboard, Cluster Launcher + * - `pip install -U "ray[data]"` + - Core, Data + * - `pip install -U "ray[train]"` + - Core, Train + * - `pip install -U "ray[tune]"` + - Core, Tune + * - `pip install -U "ray[serve]"` + - Core, Dashboard, Cluster Launcher, Serve + * - `pip install -U "ray[rllib]"` + - Core, Tune, RLlib + * - `pip install -U "ray[air]"` + - Core, Dashboard, Cluster Launcher, Data, Train, Tune, Serve + * - `pip install -U "ray[all]"` + - Core, Dashboard, Cluster Launcher, Data, Train, Tune, Serve, RLlib + + .. tip:: + + You can combine installation extras. + For example, to install Ray with Dashboard, Cluster Launcher, and Train support, you can run: + + .. code-block:: shell + + pip install -U "ray[default,train]" .. _install-nightlies: From 7c2469cedcacc2399122d355ec215d99bc507890 Mon Sep 17 00:00:00 2001 From: Antoni Baum Date: Mon, 8 May 2023 12:58:41 -0700 Subject: [PATCH 281/424] [no_early_kickoff][Train] `ray.train.huggingface` restructure (#33278) Renames HuggingFaceTrainer/Predictor/Checkpoint to TransformersTrainer/Predictor/Checkpoint for clarity and moves them to ray.train.transformers. A soft deprecation is introduced so that the old location and names still work. --------- Signed-off-by: Antoni Baum --- doc/source/ray-air/api/predictor.rst | 2 +- .../ray-air/doc_code/accelerate_trainer.py | 2 +- doc/source/ray-air/doc_code/hf_trainer.py | 4 +- .../examples/gptj_deepspeed_fine_tuning.ipynb | 26 +- .../huggingface_text_classification.ipynb | 74 +-- doc/source/ray-air/trainers.rst | 20 +- doc/source/train/api/api.rst | 22 +- doc/source/train/config_guide.rst | 2 +- doc/source/train/key-concepts.rst | 2 +- doc/source/train/train.rst | 6 +- python/ray/air/_internal/usage.py | 2 +- python/ray/train/BUILD | 80 +-- ...ingface_basic_language_modeling_example.py | 11 +- .../train/examples/transformers/cluster.yaml | 2 +- .../transformers/transformers_example.py | 2 +- python/ray/train/hf_accelerate/__init__.py | 5 + .../_accelerate_utils.py | 0 .../accelerate_trainer.py | 4 +- python/ray/train/hf_transformers/__init__.py | 15 + .../_transformers_utils.py} | 8 +- .../transformers_checkpoint.py | 104 ++++ .../hf_transformers/transformers_predictor.py | 243 +++++++++ .../hf_transformers/transformers_trainer.py | 469 +++++++++++++++++ python/ray/train/huggingface/__init__.py | 8 +- .../ray/train/huggingface/_deprecation_msg.py | 8 + python/ray/train/huggingface/accelerate.py | 10 + .../train/huggingface/accelerate/__init__.py | 5 - .../huggingface/huggingface_checkpoint.py | 114 +---- .../huggingface/huggingface_predictor.py | 251 +-------- .../train/huggingface/huggingface_trainer.py | 476 +----------------- .../tests/test_accelerate_trainer_gpu.py | 2 +- python/ray/train/tests/test_checkpoints.py | 4 +- .../ray/train/tests/test_trainer_restore.py | 8 +- ...int.py => test_transformers_checkpoint.py} | 13 +- ...ngface_gpu.py => test_transformers_gpu.py} | 13 +- ...ctor.py => test_transformers_predictor.py} | 23 +- ...rainer.py => test_transformers_trainer.py} | 61 ++- ....py => test_transformers_trainer_steps.py} | 11 +- 38 files changed, 1136 insertions(+), 976 deletions(-) create mode 100644 python/ray/train/hf_accelerate/__init__.py rename python/ray/train/{huggingface/accelerate => hf_accelerate}/_accelerate_utils.py (100%) rename python/ray/train/{huggingface/accelerate => hf_accelerate}/accelerate_trainer.py (99%) create mode 100644 python/ray/train/hf_transformers/__init__.py rename python/ray/train/{huggingface/_huggingface_utils.py => hf_transformers/_transformers_utils.py} (96%) create mode 100644 python/ray/train/hf_transformers/transformers_checkpoint.py create mode 100644 python/ray/train/hf_transformers/transformers_predictor.py create mode 100644 python/ray/train/hf_transformers/transformers_trainer.py create mode 100644 python/ray/train/huggingface/_deprecation_msg.py create mode 100644 python/ray/train/huggingface/accelerate.py delete mode 100644 python/ray/train/huggingface/accelerate/__init__.py rename python/ray/train/tests/{test_huggingface_checkpoint.py => test_transformers_checkpoint.py} (85%) rename python/ray/train/tests/{test_huggingface_gpu.py => test_transformers_gpu.py} (83%) rename python/ray/train/tests/{test_huggingface_predictor.py => test_transformers_predictor.py} (85%) rename python/ray/train/tests/{test_huggingface_trainer.py => test_transformers_trainer.py} (85%) rename python/ray/train/tests/{test_huggingface_trainer_steps.py => test_transformers_trainer_steps.py} (95%) diff --git a/doc/source/ray-air/api/predictor.rst b/doc/source/ray-air/api/predictor.rst index 92a4c818f720..34ab11a484a2 100644 --- a/doc/source/ray-air/api/predictor.rst +++ b/doc/source/ray-air/api/predictor.rst @@ -92,6 +92,6 @@ Built-in Predictors for Library Integrations ~lightgbm.LightGBMPredictor ~tensorflow.TensorflowPredictor ~torch.TorchPredictor - ~huggingface.HuggingFacePredictor + ~hf_transformers.TransformersPredictor ~sklearn.SklearnPredictor ~rl.RLPredictor diff --git a/doc/source/ray-air/doc_code/accelerate_trainer.py b/doc/source/ray-air/doc_code/accelerate_trainer.py index ccefa8776ae8..ebd4c148817b 100644 --- a/doc/source/ray-air/doc_code/accelerate_trainer.py +++ b/doc/source/ray-air/doc_code/accelerate_trainer.py @@ -5,7 +5,7 @@ import ray from ray.air import session, Checkpoint -from ray.train.huggingface.accelerate import AccelerateTrainer +from ray.train.hf_accelerate import AccelerateTrainer from ray.air.config import ScalingConfig diff --git a/doc/source/ray-air/doc_code/hf_trainer.py b/doc/source/ray-air/doc_code/hf_trainer.py index 1d81d36dc35c..36b3b23164f1 100644 --- a/doc/source/ray-air/doc_code/hf_trainer.py +++ b/doc/source/ray-air/doc_code/hf_trainer.py @@ -9,7 +9,7 @@ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer import ray -from ray.train.huggingface import HuggingFaceTrainer +from ray.train.hf_transformers import TransformersTrainer from ray.air.config import ScalingConfig @@ -81,7 +81,7 @@ def trainer_init_per_worker(train_dataset, eval_dataset, **config): scaling_config = ScalingConfig(num_workers=3, use_gpu=use_gpu) -trainer = HuggingFaceTrainer( +trainer = TransformersTrainer( trainer_init_per_worker=trainer_init_per_worker, scaling_config=scaling_config, datasets={"train": ray_train_ds, "evaluation": ray_evaluation_ds}, diff --git a/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb b/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb index 86fcf7aa2b0f..fa941d84c640 100644 --- a/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb +++ b/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb @@ -402,16 +402,17 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "### Fine-tuning the model with Ray AIR \n", "\n", - "We can now configure Ray AIR's {class}`~ray.train.huggingface.huggingface_trainer.HuggingFaceTrainer` to perform distributed fine-tuning of the model. In order to do that, we specify a `trainer_init_per_worker` function, which creates a 🤗 Transformers `Trainer` that will be distributed by Ray using Distributed Data Parallelism (using PyTorch Distributed backend internally). This means that each worker will have its own copy of the model, but operate on different data, At the end of each step, all the workers will sync gradients.\n", + "We can now configure Ray AIR's {class}`~ray.train.hf_transformers.TransformersTrainer` to perform distributed fine-tuning of the model. In order to do that, we specify a `trainer_init_per_worker` function, which creates a 🤗 Transformers `Trainer` that will be distributed by Ray using Distributed Data Parallelism (using PyTorch Distributed backend internally). This means that each worker will have its own copy of the model, but operate on different data, At the end of each step, all the workers will sync gradients.\n", "\n", "Because GPT-J is a relatively large model, it may not be possible to fit it on smaller GPU types (<=16 GB GRAM). To deal with that issue, we can use [DeepSpeed](https://github.com/microsoft/DeepSpeed), a library to optimize the training process and allow us to (among other things) offload and partition optimizer and parameter states, reducing GRAM usage. Furthermore, DeepSpeed ZeRO Stage 3 allows us to load large models without running out of memory.\n", "\n", - "🤗 Transformers and Ray AIR's integration ({class}`~ray.train.huggingface.huggingface_trainer.HuggingFaceTrainer`) allow you to easily configure and use DDP and DeepSpeed. All you need to do is specify the DeepSpeed configuration in the [`TrainingArguments`](https://huggingface.co/docs/transformers/en/main_classes/trainer#transformers.TrainingArguments) object.\n", + "🤗 Transformers and Ray AIR's integration ({class}`~ray.train.hf_transformers.TransformersTrainer`) allow you to easily configure and use DDP and DeepSpeed. All you need to do is specify the DeepSpeed configuration in the [`TrainingArguments`](https://huggingface.co/docs/transformers/en/main_classes/trainer#transformers.TrainingArguments) object.\n", "\n", "```{tip}\n", "There are many DeepSpeed settings that allow you to trade-off speed for memory usage. The settings used below are tailored to the cluster setup used (16 g4dn.4xlarge nodes) and per device batch size of 16. Some things to keep in mind:\n", @@ -564,7 +565,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "With our `trainer_init_per_worker` complete, we can now instantiate the {class}`~ray.train.huggingface.huggingface_trainer.HuggingFaceTrainer`. Aside from the function, we set the `scaling_config`, controlling the amount of workers and resources used, and the `datasets` we will use for training and evaluation.\n", + "With our `trainer_init_per_worker` complete, we can now instantiate the {class}`~ray.train.hf_transformers.TransformersTrainer`. Aside from the function, we set the `scaling_config`, controlling the amount of workers and resources used, and the `datasets` we will use for training and evaluation.\n", "\n", "We pass the preprocessors we have defined earlier as an argument, wrapped in a {class}`~ray.data.preprocessors.chain.Chain`. The preprocessor will be included with the returned {class}`~ray.air.checkpoint.Checkpoint`, meaning it will also be applied during inference.\n", "\n", @@ -579,12 +580,12 @@ "metadata": {}, "outputs": [], "source": [ - "from ray.train.huggingface import HuggingFaceTrainer\n", + "from ray.train.hf_transformers import TransformersTrainer\n", "from ray.air.config import ScalingConfig\n", "from ray.data.preprocessors import Chain\n", "\n", "\n", - "trainer = HuggingFaceTrainer(\n", + "trainer = TransformersTrainer(\n", " trainer_init_per_worker=trainer_init_per_worker,\n", " trainer_init_config={\n", " \"batch_size\": 16, # per device\n", @@ -601,10 +602,11 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "Finally, we call the {meth}`~ray.train.huggingface.huggingface_trainer.HuggingFaceTrainer.fit` method to start training with Ray AIR. We will save the {class}`~ray.air.Result` object to a variable so we can access metrics and checkpoints." + "Finally, we call the {meth}`~ray.train.hf_transformers.TransformersTrainer.fit` method to start training with Ray AIR. We will save the {class}`~ray.air.Result` object to a variable so we can access metrics and checkpoints." ] }, { @@ -642,7 +644,7 @@ "Trial name status loc iter total time (s) loss learning_rate epoch\n", "\n", "\n", - "HuggingFaceTrainer_f623d_00000TERMINATED10.0.30.196:30861 85 2579.30.0715 4.70588e-07 1\n", + "TransformersTrainer_f623d_00000TERMINATED10.0.30.196:30861 85 2579.30.0715 4.70588e-07 1\n", "\n", "\n", "

    C`C*cb(gi`(DR!@Tyq>0+xwF4 zuYJ^R)_w@j!TW#=t~k8fP&pp=r7J-XCe!uxp8!3OxH?u_&76s`PC|=5^V!3gz@O$!Fk_lq2 zaapmLQMX2=n!;({OV|xGyL0X(Ox4cecJN;b+0NOx9#~R3=4RZNTQ2`3h#YX@)GouW z%A9C2?)EJCZOQV!O?`C2yOZ%8eqf4RiD@cNEJUs*A#ZS+lgq2AT?QSQi{FvNmL^i~ zrkL1M7TgTOmJ^3YPb9i5NpKo37IJ<*@4;R^02~&1;2jS3=Hwc-u<3(k)p=L7D?hACS~g-8(L%LmcIxi9qwi%nEK~tUr@249vO_M8ehf0vzVF)*TN@2URU@4`%C5XO`RDd3*Prd&H+etpgxL zSUL=s(h~wSW%c^Mr@}T*@K+NY=X7HYHPHAfHo#$avOgI;mo;~+{-j!L%s z;!t@UQT2)cLb1Lwh{aAGqP`6M)1}y_0V)rp;mqa&2(2ZD;(~o}5)5}&E#~1BJAc1I6iS_tjzu!s?p-J%gY)-6Ft^=SI4=dC z9AB&Cb~~O;44irTQ-@MOVcD48I zQ=-19l_gH>Rv|(qGut3U#zDKbG;+gHfbBf-P#T2&Mz_@^h z)bp*(k}`FoI!x7c4xd*ZmHS@fQ@G7vDN zZ&^jM{wZdMxx-5uQz(d6t1L{F)dtF6g$@cK?0&xC@a-bEHztJ-EFhp0<3C9_;Z~RM z))xfmD!9%=Z-YvW4vJS+6Oz6)c7*2HBOV>qH^!`t9~{Q+R9;V{2Kh#%b1U9=YszQ@ z&7KIC-XAy^eQ@I3pD1Zw8~+fOVC5~%K3AG{@n$C#oYCm5;<93*$_2mZD_hGy1TbOB z^9+Bb@iS$a3n&*15Y#zg5px z?|=*1v1Vu~2x>NH+TJ7$@5)YjYUU#0@X#SYvpjAF0LXFoF8Lb zr>p9-S@zEP#1m?VW{HsD=!`!!Qu*2C=U)o)zneA#`a428-Ze#aZ?(g)uo{Q#Z+O6- zB9IeT6=jvZSVhHMSsh?LEi7S0-K4Nu zXWpEUtQ`TDeHVs+GT#vrLm})3jLq*nUeAn9RV#kkk$>ao=-{H--|#x!ygE}2|6kH_ zvZ$+hh{A07uF;?X^Q$y}Pj!mBp2ESx93KWOo6i3KwB;GvLL>@KEz51TJbZ+HiWxV! zQ##Je%j%fQifd8-QgG_C&s zq~=_t_4bgndQ(qT@B|U-14NYc5Sbr^TID0eQHEZ3Y<3L0bcZ)V@ho4lq{+=7ZMjqg zte9#LaO6%_%TkEPHbjQsiaupQ{vN@z6?D+DTt*P16r=Eo_<2wiA;jb#P#tY!H%i68 z8$fYsScXsa_jzaZBS;J1KRON6CjXQZQPU}If=NVL5%5WK)3gM0`=%rB!j`g<0Oqc1 z7tN7~_&49NycAS8VoEgkfOsgtWj6}DD1nyDTm{}}UzhBmD%_BXc!0&V4_cO>p7p&E zS2)DRi@lobah>N!$EK#};RCeQjGn--QCN#2gY4hz3;b>ju1We4|7gis34gYZXxn&a zPgz~P5wAq7feG$7i$)_$(zK=)?Z{grzA}`@Dhmqd=vOUq&$T)jNb?~!8)?dOYYhIf z+WvvTl5eC=5Om-rRafgY3tA~Mlw`@H+43i5Gz*>dQESUkA zc7g&+PRn(sw2v0c!bT3><-~HMAO`<8bvv`+myR}D3tLQ=F}Eo<2Zi=_fVEeY#qGiw zeY<*vLEY)TQm6ea`L`es5p|0*wIa%n&OF@K4G!oGu8e3Y5WcbDsoN0xjHwK9$0%#g zP-l_2;dLvGW;D3Cu^xzgL{GOdTMu_dXNzMoknl<;istwAkb2>x$=5r)Oh(KfD}{|e8hBXL?9_aH;K>kLKKKpIeXK9ev}j_=&^xI}@2pv6aNW4d-HFYRnf$vjbM^qcYEN zvdw&9qR|lfr7G(qaJ*jPzIpfkW&P$#{bmL^s}v9S@oaozvJX!zh3T~rvAUsdaCvP| z=LUjjrvhS2K6MiB_H4d?U=F-as~tIuiwli*>jlFheu+L!i7$;)AGH0Q)ArwIgW?|n z*<(lI)bJTyMtiL11z9SAd?^hwYAs=Z;n2r`HV z4*@Wz%@c)RHV&?8I0R@P1-T1PSfN3rF)Lo1lOnm0PtPu=Q3{)8QoqXcCraSo3{m6d zO8v0FylZfjqYv-oQ}0TCGb~VE7>~I@0LUBFCvl@SF+{eQ#7K8k>V5<0ayNe^on2Ck z7<))M5WPBf;RNr$7*bM+lV?HF8SILP-S*UHT|=IAKNQ7v73v@si|IiQZrHXA_2dX1 zFE`3W$dK7554-ynP5DgEc&k#t)?}kGgyZb}*^zlkTvM>)}LwPfAu{kUi2NdSteQ{JV5Gi={ru!}>|K-j)vA zYZ@r}GldNSAKP!|6femtF(lM`j~a?;EQabs<-!H`z>Ei}Wx zZIWMj*2L)?`>#O3?Ht!ZX2>yDDcYmuGCVGO7vM5=es6)nk4gA+&wz-Yx`m(8TT0y+ zo+9T>`jj%ghHrs$P|xy(AJGHjW<{VQc_D)fdMiLoY5jCtozhW!gzZiH68fj(hE zA{B5l+-}6RG+}_niXuD4C${dJIFXP@PzX#4`#`OK7${u{%dP-S(wxzek(o1C`m!t@ zu?Gwivnm9#CCVTEK*5u+_ zTblN%i)S`8FX}5?Smrq^LU(5{G|5)@)7*>R!NniS8jSU)%vUxW14`eaBO!sG2G=>| zfYJnwppwuuAv<+fe1HDw@bqse^V+aEUd-(8gJ|gjkvjm}SNC$|I}WJiv7{yCd8#|( zXM>;qG#NpJ{%$J5)&9)kNzrQ5&_kq28J(G zRz-&cq3H|}Wcu5IMeL3N!ryFLgyxI0m-ugfpvax!-dCgF11@UITeC!S|3IC5PY4$hUbm3crL^gSu3v&rQR|VqERv}Fr zIlfh6jY^zLvPY{%2pzZJNR@lcqB#UHn78KzM#Co+F3GnuP&dfM1wD(=A zRn{^Qe4EwF!?Y{a!y@FCu#K}KzB)Y8WtYCs6{4dqLKJ$EpwIlfPBaK<5%E93NMYY1 zaJw?S-e zY=gHoDSf-c6xiG?ex-KmWhix>mX`g5fAB=&)Le2fYZY(6s{@6 zg3ARV*%8Xt1})C;ww=lNt{tqwip+B90t(|a@!NgI4dzj<@SHmR#&hiV%UU-!TFsV} zI0?T}Xd}=G$sPTwHNdPca^)YVG?`YDZ*kW-VAgM2zn+~`0`VPsAeM&%Q?9jP#Jt04`4P*~MX=@0iq%sCOrf0lX`t!!W$u~bOZ z-Q=zplC?^c{x4~z*F)q5%jrAaYxb1YZbvso z*1RZgR`@QwR6b^~^AGWZbF~_IrW6hPAv*TzgGl>0YUQ=4(;Xy?{EN1$hL#46+$ar4x9}f?N4YhtN(7e*g+`8eQPM_vE7Acvb25De zwki7GF%5BQHGkqw`jK_}P21KD1QH9PseeK!UrV zl{Svo@%qfq<~_+twcZM$U38?|C2tU}xVQ>2a%CodExYvm00(<+h1Qp%*}ji$_L|3Sx7?R7WGs6T6-R}z|Nj8~dK96{)8pMS} zX)bSGVaiJk9I8c%?FH*mI!!>P73F($SQ6=JZZ2@DeouKjBrPEcf(aqY%XGv zrnckawDAS~suOC(a!Ktr)TohPt)U}TtmUjylE{<=p+a2tJCByl+4iL8ar1W4UU$r^ zp=_QCdW z?Fo{`aslOLY{^zhnbMw;s`=|jVZ`pXeSJ}#?VkR7a9qhpbheJI#VDVaX^QMntLNLm zzpRjGzuo)aN#Ol7PxY;8e;ma;E5fS^+vlrAsX8UM0iTVq(h;VNZEA?TTDoHgAZoo{ z6c!fT9jr;(723%b@&(YzyJdfP^hRp6Wys8z8Ts~VD9EHTyA5Gx$N8*Ms$ta%f znW=iG;Lj~~jiyec4Y-mDlm{n?Z=QvOdG!V$ll(U4O0wFD1i4t-3$16e6{y90`Vf6= zh~wQxXRzE9zQm;5Jf)nSqgTa>;?}NHyHwRtQR85#(uPFV{6W{KjPc23jWg-~!K|-& z^h5Qn1Wt++y1-rn!K<}SzDOz=FY^?cm}44Qn$uU@prju~SwpwK?|zP^PMrwB2F&46 z7?%R-=Doxn@=aADP8DMw9angCA9y`q5GghF!bmBFH;DkGF}1iC_yN}WTNkiK&$ ziiAp zaR}6nkcH0EczP3jr*`NBTnH-F$J&m9Qq@#x_a+wFY7^VLLEcsZ{fA$)=6UY~a%c_h zQb1j+%FQG84gpJt=kju=KB5WD<}Q3l-FO*6@BHa=GTTx3pS&RYi}cm@mYZJ|3))x^ zV`E90fmRFyuai?@Ikq&`+-FprtZwx+KjfrS+_yYk?!##5?B_`O#~faTn>HfF-4uov z+zroF?h&kyoxMI+RCrv|*uHZSM)I*w*!z`#hmFzJsiJ;H3>(PvNLN|;BtI(a{*WtY z^!_|l?@L9;pL_kHOtJaVWF7RNmvTc>#B1m2Ax=WY;G?&ZQ8Wg-`@TXE~Imv+9 zRQP1hKLCa}M3A^}%a-&K?I=(4HmuQ2Z=;zi+q*wxdTBoKKTo7O2b}6BrN|mFM>$Z=Enyr$k zaN_E^`>a~WCV-{%3k}^{-t5x3t7-V^E80)I+Ckeuz#q|GajPjF!KtSafe>BL|9r^+ zDb?j~U7Tqm!-XFYYFu&-{Hif#qdTBB=-E=%T&-s18rkUmC+TAC!KNmVcJnHs=?tMk zk~pqn`@8k-0%^q9y4M(9Xm_pv<#~&Cv}U`7tbBc^``H<;Y)n;JQ0!A4P3TklM%3&s zst@YVS5LbrJd3DtI)`(vS>KQ4CJWQ+*VbVBa5w)d*Y}$rT}@3J)e_?mU+El6!zz#2 zDpm{s%5YA(zCNA%+`R(&GwuiU&Xy4uFE@0p)*An&RdwyMjC}WlmYR6B2LcZG?!4Xi zaL0aJG~;fb2LzxLaYbAPsic;RpD=0%H5l+{c|3~p<1woaln)<4Jr@|(6_1CcP!cOD zfLGIR_@2NE>#3x|b_JukP2|py_kB3iWAXYfG@0~O7?0U2*}!RIQ~8iUbbrF>0=R7|p+q{qm&Xasjb?iwV>8+oj_ zG4R14sa1Dev(Sdc*3~9isLnsos~SMB2M&vNMpiUq9&6V8WR!wqvrM#XO86_osWN4w z2(k1!RsEFDN7>-Qq{v^uqh9Y1H)7QDAp`bZw8Vn;LQ&IFecdK(%vGS5Fh<#Ne1Z{X zD(M&%6Xbg;vw3z2P1=|p80r;i_Mfc_c1KfKzmgcQC>I{^#%M5)I3Mp2$rKt&gffG% z-;6jd&Fl&ih6ym*<;DHdU8^$y;S60V9pIrY9)v!6?6uj|$Jz^rF4+4A&mF9lRjXqK zm{FbGO~^jXDz){88X#R+=-V~~T9|a-er!0iZf~@IDPi=UM4_cyTWoHvve(O?%BJ!0 z>v#yC+Ps!z=9bU|iQfCtAQhgXgdo{WaXoE+zFiHi)E*|zofe5Tm{z8-*%ByCqz1$z z?jg=Oip(jDs)_i3Vfg!Khe*O|-zSxvD%dVV<%@x~9ig;@o8E?7kXm`k^ATW&o5d66 zMB`*?5VVv??&iRqPucIDkrLoXno~vab`#|U;-!hsvb5ILVdnhUoK=Q8cIG>*7qn60 zA!2dj0RX2Frj8LqB`*ui3;%KV|5R))Z@YRU;raRyZ7WBXog&01T*WeZP4cT3dMrL9 ztKBp6x6j`7zn1KvX8lI{M~f4IM5`Sp(F2+U|C29Ph3Ckc)6UZPVd34i6-=UbahmD` z&2o3ZlKt1)xx>}ynP-NN7ur2X&KB%PB$J{n?Xr$6qnt`VoQPxP| zDe4(eI^uy;5pFB%)p0vGVtiz*ppcc>>{KZS>dtZ@y6o1Vf+k?0$#ezp%j)x*RFFrKE7$yldf*kYjz>+pBY4jzhH|XBj z^CClrng!x1j87gAW(az%I^iE=P z5V!F4vHxUNHCCA+N2Ckkj}{atRQ>l`-QXkNiQ2iGbgbV>SAI8fYJh9VZF--2Dci;q_>gLITaklR_cVafe zAW9)pRGwikDkiKf3Z+57r-_jiI&cML-MFT904p(*oPOdn88UY>4`|lA#n78QW>e+h`B2H`8WBMsMVeb7 z$Lv$LqLN!_)2<`8K=&_|D7%Y|AN! z+>~JMux2rAgsBHqTci5OI=qq;ewI2!IMFGXSog*_bBY03Pvb=7i31G!e|TlxhE#uk zqUH&&9UAR*;4Iy(4|q!OTtz$Po}yGe5DTg2(=f)SrA&)0hF>Nz=Lt70dZK-KD$eLC z?%@CF)MRL|vYW0wiORc(b<_%MYJpdU912I*-&b9{Z^J(bsSJ<8p2 zkszUb@<2H7Ber`6>|M&j$?Ff5 z3I3igUK>ugqo*AcOxh)k-T4srP0GKK{h#kc5exL-Rk6|H zsUs%+U%znh<)UzCc(w=$`uF+y`lp2RdfJMy==7o!~a3SOHd5-$!WK21SnfW z1<6Y!SK*`=W_9EsNsH3C(I|4hpd}hd`;bB&|Ee6^Q8-&Ax=~Vw$kiB;p5g=b7070? z=XkVS>s~+hWjcnPR|f7{vU+6a6nxf^!A(yCxi1&c95^Vp$21t5CcLF6VND7KI=NqFfeNWZ-<**TE(B#sI6?jc%^{IAC7%@eStJvBmqjQ)m*eAP_f+UPdet?TJ2-)r&iKf1A4K`~m*&Xi96TxYF zY=H0kif~X4?wWJ!=UR|j9#=%C7T^7)HusKEdY8#Od_vR39jFjWu-Gp%yLf1Odi zuwz4zIy!&=#}5vJl}Ak`#10CMN33w83g=h2r7GO}NuQkms6%_A*9BEA&0Te+_Pu(7 zt=EI>ClLtJH8CiV;xY*6Ps5~nK7M_@AMw~u{yEwn#I%lYzL$(Mn@duusrYZ>&IWrI zvnYL#k(Y(LFd8TdlibDI?s4s6-T+P**)4AXPzSk~hi!vaocIY;GDe?-;kQX-)m+c>3NM z#&`1wzkqW{L8JXvx^6?p{`3!D~e1lh%YPsi(Y{gZAms+8sw2(r_ey|yI z6v5>DB`IGv8mihQyFZB)(;!@&?7S!sTs|jwdiPKzE~Kp#$n0mHihs^Z3dNHWt=`^r z7K1ymJxYoL6X|Y)DvoY3dj`@8=&aV+isKN+ zPLQXWa@CC|_(p?of^(KRJ+s@tLowll03EW`f$8VodxrBw8(~jFNdsRM$}g%jN^1@t zur!9!ifC<`nlM5sa++9jYkJP@zN}GLbAxz)tGcoKW;LO#*XEx|k4^Z?6gU^PS3LF< zGI&aJJ6smU2$V-{cNF0bIX!&ReFUDZ#j^=QX*9$dsF)_34<8pM}>(ytRqd_ z&EgL=tj0-uT|7#-b1xqdqzOBxbqAdUoDE$69h;r}yx_)wBOmrC2Z$qELdZyCvFruR zZ`JNkR()|D0G^?94! z>eW;VF8Q6F47CYel{6{NsJ9(As6{%KzXlt7k?g-i+>vk{r;MsnLOmmb1oa02X<8*U zk#%Cbl_yn7np-7j?C2yX_YiKq{!qyWjTTPWcCW1a*r$>@ck#k9nQ@U#n{F_G2lJsb z6Ag^U*AHp<>sztltqY&Ld)dDmm)si{CIzz0V+<}QPWqC!DCcvR2U{MfXrC*D>62R| z(U#KOxsRV&nVFG*J7(6Iy(bZPlcRYF5w6v1Rv^n=#DNM7iH+uej(mWv&X7kSoyZ(o zxlvxs#?T@V(dQu!%FT~QQ9!zyWlO@xm&`D4{9{5KL#*b6_mWOOC+7!T;7Fna(_9QXp+S{Xyle z5HH%>_@|)#PZN@3k7_Nei<9!EergKL5_5oe^?E^B`|na_qv_la zQ&ZF9xgz*^wbBPSH6Orgh&^F1FN;5!^}nw|&_{2sX)vsU7-;E&ftp>s0e8dyVh;h{ z6^8mh2_9|71;y!-V`Nn<%$z@xZezv8WcShC_pPR~s1&^R_VzVEP7#pLg03C}hy9bX zVhv+0Kf=He_};iBfqyOvii!Z1-UJf#3UG_;?(Wh7aX^918PZ$)0S%Sl3l&DZuf*bs z+dz!k5Foj^ockyn-tg>~1I@aW;KHzuC7bS3(LGn)yj{Ky(ibTQZOla`ksHL#N6fn_pk5H?}A97jaEm-^kmKjRYDan#R7wu zhT+;kv!POXQ*oJK6wwW}%+UgF&xuOr6YdQ!mdz$88H?A)=b8s#onU!+iz5Hau>(E< z3{deUax)JJ&At+x<5n-(J%dw)HmlQ)ik$!luA<7DdB>&kbA*-3s-e}M&<{T!_V1^w z1|K}xRhM6Auh|7D-S;_V61f}-t7ZEtIWwk&;yMKa9?_8SLzI7WbgNY?2X$W>K4PevlaDFvwHS)T4y03r$^O)xofdo*7P#7Z+*C5ga26qq=qpKM-=2S@SmI%S z9lA0QysCIupn4sW9jH|!cAR@_<0mR%mf1Qh3pMntatsOz!UR=aGA!Mb?(+HwcS+xT z50Y9*GAinm)zYYY^NU8zTNMkC*(kLzk`m6A-%f0m6eeT z++{!v@5w2y1@F;?5$Ys|gczr|Ct8XSr#hl>WiB&`pTMv$hD#P?S&_oaDBV|>ki004 z%rVzUCcV>mbhAVX>Y#8U$vk=MENQ#IAC;<`?l_Fv!1!jto@MewRmTiss6_OE&?th!)ufh8@LGvY}Ca-~>Q0{j>lM zz?)MZrcTaAi#BIOvKUqWfidNsp$K(){XHiO>3B9gQuT-}98PqSQ#?_8kWh1BFfDU~9nre|rwh zf&HyvNsEz+`ZCgl_a0zF?j3wK~6E?F$9yK4af0bChawrXg}7%u%4fT zxY*oRSzUH2(`>A^HnsQ*5if?d=7d@N|5rg|_*A030fp0l(Uy9^EeWvZQ<6xf2LsdJ zWgCXlm2aBLg=duJXeg<`8C`2@d7H2}+||=RXW&85L0W(U z58?~KgeE13>A59zlIP<@TFC1bP(bt?XG04;oJ%D*DxJP?8;<_k`_OdW2#Df&=|@O) z%T;3m!X$My@-k})g(I0J<&84|BH}ybObOo|a2N^#-|$SUqxpX*n?XmQ=K?EWQKi-}3Td9J>O9}}G0r&EzuDn# zJ0pmOo}lTwQC8kIe=YuB7A#<_v|Us+M#s5I(sWAMamnwQZ?aqq z6pbT6-;q^tFk1oa=7_l>ezQ|4j6(=%0J*GpI)^`E@AJ0rISmPmWNc-gzxi^2SgS{n zLy3^Hgpw*}!F==7i+Spv1vFsSLOwbAheAj~7XdukvMe=Q2AqFrlQUWRFmp(K&FHJ7 zxy^%7qH>tUsiqYhjH`N8E>sBDuAb;>HPXTqn^ zpo4%>w8s!^nDr#R)oF`n1PKfCrv}q(jNfJE8MZk$K8WXS2z1pJ?IrSNG=!)RSU7VU z>xM(zU%sM6T-SZXdh0D5T#suI1_p-zH6T-rGwCDGI)iZBhF!9NIAnbOz~6fE0ej$R z8a{xje-5(r^ZjlL)0}64{1_+_xO!{zZ|EFbwWv!0(IlN+`WK*!ec-5ZU9)V$ubX=) zE_S+QOHghrr@gH55ARX3rRArqUg{HEZR!19-BA4$=^tpC8?B$}4mMxkr3 zfUnkX5R3(bWNuk}PFoX@JB<G3ZEQS3kT0W8#dJ$}*=Kd$~KpR1Nv@^S4g_;rukeW~&pIe>T8% z7<3JCns>|FmE~HGi!B8Y(E8f;gU2v#*|Rj`x_nNiU}F}kms(;~T4}b;Lo~0ix%AF` zJG5-<)0$fOo20oLF7}gO*zgYn>N{Ga4rZdHcF5!R)jZW&3D!mZjjeF!&fZ#eMimpRmIeKm;nhbIoH`3QX+%5j>0;VOxN>oQullI8Ul+ag;l6$6#- zkO_qpn$6N&N$P|lN`lF3br%?sZajSshtk1Tg&gDH4znKTe)l+kjM-^^wRWn<){|wcq{XQP;DNHh{d$ZTNq}x zFIW}5p>AF7=lE^m?e`o?RAH9MQ~XSe{|Zl7A>QxQWr}iKGmfts6_LOeLqEy=W}(mS z5f{wdAY-Q5=5!(Zch#|r|7%!u#=UqzLLbUZ79t0LHlzElC>FcrdOR#Bt8{sFkd_Lm zdL|Ukh=sVSnJ%}5OFu$$I8}-bRW7(s6EK5pOkVwKW)Ge0ZPz>BiVH!pSJpk>Na{sZ z;9^uzQwXgh^$Obkr^h7C{m!BIbYvLDm~4;(1q<`nW6UU*yi_pQ3*wgHPDV|ExR&s*v8z8PW23la;^R zicq`?6s}iV7O&lK18c>{JGa$w-3v2_y9c?LK6n zI4Q878Lv8o3feC{Rpk)y+y#@QQjzfAC++_tq!Ha^Djg_^G`yPh4BW|iyH-*L$xgh>amYbG2WhY9H( z?tb;cC^&@6X`~SI{xJhoEEU+2miofe$Eub zZ(zXKuQ%HU?do!0cYha}bZqb`HF-Zhs|wdRTRoD^6GGpKn~yEotp&OS)4x;UdEV)~ z;;ThY6_^00{`*aoH`8U+LrdNFmXroDGsArrXK$-x<&4rN1*jEOkU3E!I`byUIrY@} zRARn<3?z`S5>RXiM_#e3MaZZaD#jw2*i~ZP)2_n2tCx-eIn$odKa?}=hMW$PMQt9} zPw+M7V^$h=X9LG>Mra-%lTFWti%9v8=RWvJS$#-#yrchSNE0>67@+?Pw_NhQ))CRFZ<%_$u_4klQ)EYgdH7FLdsp?ver zX*SHE+8ob}G~&|I%*!W$7(Z73vOr%~Ado!bt{%=g<#DB+-<)mZ?R`gasaOg1ppQ7m zS|WC;wNfy^NZ`y$1hAm-sXGZ>{yUnnG&yS3@*2N;{VCa}{Plurj#|Ng7CEw9qsvk0 zd5`gb%KHkpsGj$2zy&F3knR?wkrv4X1ObU9B$k#|T3Wi6kP;-7T%|)=TDnVV5b5sl zcNXyL=lvUAuDSN&+MP4!nfr;EGtYfyJSFCp7B8AprEDOT`1O4%EoP046QtSEZ@5~{ zm?(g^JBz?u6PRChzE)psbvs;b2a%2^EuZhf&J6@+ENo`O7n!o3Zd7UBrXP0l(%4SyMpcCW{647(w%=&nCfBrKugqL{@zv~&h=hIv z`d$D2UBg(3A-?c3A_deq$Bu2TozA#?m=TokPK2*6lV%;&VF#R=@?Z)$Ml{YKGfU%< zYrZX;hLYLl`ACCY(de0zvc;zQxpxIDea&JyJ5@~6X>)x|r!1NmDh935!YZ#RZ=<9P z`?$ZI*PZVyk^95<%gOG(EM#w*_d>`Ee?gBaqu;$AC$u+Au{OZRsq4V)cjopxkK)3S z!UCs^I{4HBVfM2fX(`i<`D2_~X-`$t&-w82-b8^EyU>X@4(}nVFzVN~ZY(8ZX?YhH zgX;Vg-rJ!n!mA}iY%tPd9x*O^<2IN018Bd0t*HV^G zK;AJ)nZU_|5?2_=PK;6k^Tmb1&^B0M@ma0_a84VzF~B_3(K05LHKG5o^JzbCCr4tw zOYPI}Dn;AChxMX#9AZRe5^3R{CH`7nSKG{eA~MA~>SF$kTF$2WjMN^N5@Njb$c(Ov z2^DEiev7E)gx#s$z=~{1{Qh=Vx;6$f`b%s#Yu!UX(`t+M^qZMDhx)=`X1I)#LvW$u zJAs-_Q{fN@YYg5CWj+z2%i*cc1Aodwou${Xt?vXbzjxG|4!lmQ?Pr*EGG6F<<#2S6 zjwpeO&Zj0E#GK9Fh)otz4O9v%@?tsk*F`RT_Z}!<+x-1gd23Q-YDG|M>dtY|${nVuJ9Yup?;|(NoV(o0zOu=Vkq{jD;pj3W^-GtdAyb`JU|J`~ zlcIbweJ9am4~u4D)%q$dBnfP4ZklDASQ!-e4B|wJhP@3tHRNcDTTCM{wt6c&wpbIc z%f;2;%xGSB&Ph#m$uc2hYQE9gLi(Ky_(qUZGg=Iq~ zt%Zb`ZxcGIL4$E5^Meg+Zg8Kw!dihHBzEn6B!eeK9H)AfjYwXyp5CSIG-;>i0$-@# zg=zP12Q8OAK1qF89QHgpL6Dx^!e9pXN#mxpZeqg8MixI@1UT>7^iN8mt8sgujZ*}c zd|?Ql*l^jsG@UAn^vJEqCC+5-+}8P~LkpA0)LC{hQ25cW`6HN5j9^8ZAzBV}FwHXn zH93ZyFzVy+FJL#<3y&4|6Dvbx=8A}R>q3{s*~f6csd=TxI6k!$Uq|0x&f_k&9b8Rf zKfU^8yAfGWtP;7_KlB^K5k>FB>T#dzYL(=pizS7sj}Qgb!v{N9DD2DvCjCu*cOvx{ z*VG!IZJ0g47H6WaTLFN$EPWzuY;2apIgv5Mom?UhKMWjZ0&_TC!OcPu7Au-J$6Jx! zmuF;NqqDPVsZz%zW7Bt?i5EkO)g09bCZ>ZPm{OS=Xd$cDRPvzIq>H+6FSHiqFZbYI z4(4kYpbxpX3n$JxW1Q&^XrnY}>%r|#yYE5qH^bX_iPJ|Kh|faH263UQ8$*pA;;Wq$ zTryL_;ym_KQbfOq7xs2dTIuo=9k;bM^`J`?ErU5_kPe6n*yx1|{#HsQWe9q6Wo^*> z&Jv;IUw#LnlB{eJ3<570c3LDV@cC;_M zEm3f#hCBQ1haGX@-Yt!zLu}BU)BskSskA^E*A*ja6^t~(LCi=OhvKKyKgnopro!*$ z6yYhQqKaN|6Wr;~qwc6@A|@4ZE`f`qdS70iRmWL6?k|DrHlc#4S(>R0i{V zUo_?#q&&5GdG|1J)LGjg=!mU*_h3#g0~zqO~nzXUIVt-C$o0x6kX(sJjm$& zfu{JtAvN8CEj2y8^5eq$U$<-Gl?E7fiRS;!FDF~8ukA4QrFWtd8lwwn8Nwss9Jjfe8;dw&X3 z>oo#`IA7I5)B1elK-)FJk*Wr+Dl^TZI1T|iY3TdP#MM`vZ^F76XR~eSv;w-wd-Ti- zJTpRMIcdU3ZAeUcc)xIUv&A-5Er7}&(P-ZN(@4RxiW>?erzgv<3~d_`vUv88q$@|3 z!AaA~!HvPKuB7`UhLX%bpWZaGBIQ-1_)yN4BG%2Rpk&R#q1Fe8IM}d#v}^k=2#+C4Bgbo%*mW zOwLO)m`)rP2vSLu>|*k1Z8CKY&)`v+r;ByN>5huWnnGPdBOlaSNLMbmrHK z#<(A=S{i=vJ;jSUF5F|EN}`p6mv7&g>Ag>5PyiL|Je_r^w*G_U%?@Jl#dnPwE5RNn39WCP>9si0gX?N>45~H8-ZVJbvGhc*x`5xHb-MH& zEW4GDmn5PcY{dk4^Y;1#KK)}Tp2r|TIZ24$?Q)Wu9V6e!gFEATAFT)yT(sgD8mI^F z#G(mfFWi3UE54xgI}`Vb=48l&PHq{#qyXEE{mXr|NIf8*`xtbD0Ry?g~96G7* z`9qa4={%HA6TmBy>}GWewo@4Fg-#rzdVsC%qR;~OpUDP{zOP3PK zT82#PV)i7#nV*hv9)UK94U2Y#GxKLrLc?GFegpGi(E2j%`{+s><42(f1B+)dMC35h z`ExEhtZGP#xnFfI!&sRbQv zhSKSL`QeGQN)tz__n2uu)asaTn+C*dP?CJ^YG*5qMht~O zF`l9@>+tf5bp=kkUJ&Q_g#&Tlg5@A&{e?X0PlYyJ0 z5d?Y`8p+wM3pxx(8k^xLb7jDDR$&=lOnk~fUn$XaZF$G%af7fE6Obzo66FRxko`o$ zcQZC_vkLF!q>;ePt>^X4({vp*y(gEuZXmMh{br~%KXNwF8Hua$P=aJ70a}uu40+LP zMp&DyGcHfPIw7bvCglAEM~Z-_i7UJWTe9`j^VG1^c(BuwkttZ%?&~X#po7g#%@;?y zC965_3{^uof4CTJ^*2HX3v5|mwP&+>(ssR7|CtMFD#W6TUkw5FVkXtY>5F&Me|zpK z%YtD-#ISn3@Cl~HL+1~ezy_&Aj&=;yrwpkS$hWb12!tp+JuYFwp?aBT6? zqkW@nV{v7_TEe%#<^LRC2eJt&oVukXWh*@$2B9A+3OP_wbBHu_7w5S*+>+4fm)?3Yq8 z7AjzkAwhmfGYx)7jsXQu&6ub*qo`Eem{>@)d8cIAc1PvGkydhve5I{QFNhIkgt5rZ zI!{+-Iz=s17u;six+5_@PEPWCT6=$=heC7H&FOb^kR-4E?4j&Tq3jT2%A3Xpa=O?N zMK+2JOS4xQ@v^)s3VHbn3(-W&tny$dYyzfV_OK|b0bzkr(mRn)=ds$X$85%k$L9Fn zNL4jwshch@(aSf7dNw+6YT2JCm+EU|@R##-`PX)ish~Y?)M!wXVt1v5dlE`@Re?8j zaznaQNF80YTU+nRNtG$J)>eq%B-$SYeFpN`Kr)@yFNY~Zjdo~gD5Cd8SmZFa*8ycErF8SBlZo@GlK}MWm>*?>|KFP6KSoyGz?22XM`p z7QUz-X~xHgm=k?-i+~ba7qm}0Cx#TOf)w;M;{!uuV>rMI7VWYMBVhwZtuivSBD|D? zu@xM0N2qs$o9I_wdd{d$^1n;AV+n6r|I)2z=%r#)^F&Ue;rX=s59Y-l?@=A%y*;d* z#aQKuN@qgMSHI}%xl!(RwWyiGn{14p4{oIVq;65#q8&IS{V?~BA zxTYTdkjTAnC7*FNS11UC@R5maYf9V_eEY+CO^m}ML)*<5&`?T+m%nF2(p0*Zt=jGG zhv$Nn4C-9&kJ(HG^z__e?dkQ)hO2@si8h{u(o425wOf3A+SZZTMZ!tXuOE;PKNf-< zh^^2m?|n_J-998qF4#&hXa+S8NshP_$x6uPT;jgF@l`L3HZBW7cgf-@etX%z)nx8)U5XP)M{89j{`K{|> zCV#lkSM!~HU0Qh*3ns9fDjd*_&P2!@b6E+uNm_g{8#>MLH2yx;A_Gi5cz>J#{fc^DqUqTX*EX> zKY-%ffg$7k`|Z*@qJXiT@*}tG38Zvd$&yvVmR7mHRV*;d#nyE#(K7Q`74P;(1#FM@ zSvk#8@Kq)u?uZiQJD+3~P_Q3KJBIv7a)TQq)fb88nF>;H@QtW-Z<~PBbMf>PS!Jur z%ph&0KOkQph*fYsB8=tc9&TZhK=uf~w(%Z2PSB$x4N(Xe5STA99LVdhdfZU2CB2bX zk_t$WkKq#m58gMHB+2YxG&+8=y#Af_p+xEfv1s?=ZD$tA5*=49F@V8z4Him*}k@1ob3u7c0smv@sG_>wg4*q^3@n*qfd+u=AO(8og4m3OL_OxNT%017rY!_+hp?*{kLp z)=x^70CNH0C4`2bD3_kPf_E+{nyvtTEy8|)BFR7iAR$v-Cvf}C2^O-v4xM87-kg)j zLc|rtK^aJpqTYY#YH*F=`&xMbt%h=g{;c|UzdyItquxr{?m>}{Yhbh}r zQ&mRZN(|i5GZm?R(908P()0FOOH35H-dH?*8^2z~)d8Zq%X}PflM5^U$@_mRFru*C z7e`mtCL50!%8bD~-?SBu+^Ul9j*9soc)lxuJ$ra#Pu$pqUmPA-QBlfi{Y!CRTiVi7 zYJhdJBZyKS_~u7QvG|R8(ihkoH7m=XO*$p)YvYJ?$toAackh3S>`XeAXh=UH*xTQyC?T1d zqCwD67W06kB5%wKfin{VLsME{&GveHTO@>(Kv9o4udtVeYhgYFFrYOzCIG)8N)o}a zYy2{p{QxQt?d8fdwX}-tUv>!2)ebTOc5bi@Fa`UI#tA!cnIV;MWy*hG_XSu_xYz_K z|A}J_ioRq4p3X}Xc7KtULMi!hm#%#h)!5ir8Se?L<&Gcx({Ra7S(U1W#uDzdG0ppEAT$v+n_th>5oi~J_5}Y# zL^k%wSy|;5x%h1!;Q=7w`dL)msKl8?RYwQI!i;PVUT`jp2zgBp&;gTb<1WU(slh_e z?Y?IU_gJ@EvOisx$=xLd!v1~0XAhd67{}d2$SB5)1m$4k$&aFEF{US>pt^Jb^X5RAFovJFJq)k~9BlD}g`2SyU7)^BrYmnsYvSR{bZlWpsS!R-Y* zNPZu&wj@_bMWIlrn5t;uff=Bu1~CsC(Z71a_bYD20V`J0vW5(0hDM(&3#7^&r(~MS zU+cjK1K1_BiG&;@j;Yf}^m|&2Z#n_@*N-PwvlE;*_jpXd)7*@y!MztFFYlBA5tRQl zRJuC&WanF#)#SmqCnj;X?toC5HUQLF7r5lZt#!{mK{;B`Ct5cDw(8_jObnm8k1+p* zyprDA3InYm7oR`m5v02r zB%H|lOkfb8GXfC#@lS0$_~$76Jjh|a%xHaUH`+sd(nbTFK^w9xE4q~xFdjhM-;pz| zO*&M6?Ty~AIDkkd@UD`bQx5iF+wQ(;3Y(y~Lb~&b#uHp4t#4m<$w9_SKues$q_z>b1C%w-g(3*DzV#1{zc#;lLYv(b7*)1m( z%n!+a54e+Op8Pkh0%aQVhecKe-fl}EuB-p7vS_$X8!1_Bpwqy8k+5S zk`fO?;9+dFkfFl`p>Ihf0`8RF>isCu=%l2O<`34PJM|xz&Q}POVY5zs;!d8%3xn;8HRBg`;9FHG0c&q%C@zEsXZtb`bftd9^uZ3 z>X`MFm9_Fy{TM}~Sb2QsBxS(iMec-+P(mHywGm4a5~yw;0QLnvO8si-)#nWPU@J{b z$elifE~L0;i~)O+6p%`u7_EzL@9~=a*hM70tj-L`>Bi9DP#~XQ4=Al^2 zr4ePNtQ2@FBuZeRo-sTidR3!%PG{n$mlqJ=yb-2c4$SME4*q%XHDlB-IsMaCit4&& zDE^kc2jA=)T_5SB86!AJ*S>p(m}BsylkK&!Ueq0$#(laYyBD}h4y;cTXp?$K zLK4FV)$}0zy0B{P0LxZ*C?R7$|a>Ld(XvDBS~@;y^#za3CTHLoKRC(bS|bm4R4=hS~-d*1pFlF%!+IsI3y+ zDeBoo^dM7Nir-FAbdOy8YViu{>F&RN))8-H^m%=Q7^m)Z(_o2mr3TLejplpCq zZ|UVD(HWsW0S|+7t=_1~cAZ*G?gwuYV=%yP%j5-MvQknO2W!(5Ey4Gs!C-Lj%M*a} z7J&pzWwDv;uZL{LIc3m>i)dF@m#Uf?a&U`rh_dTB{~=UL3Wfijdat^UPAD7>e_tEr zR^-0{*<}uIQ($}i_)o}dYwv-K-@rUUz?H1|7)V(Rh`=<KR$0>Zur!y@Xx1|?CP0hiX%Ru_Z)n=^qkW&n#{kBqqgzo`}Q&j|3SVE#g3SM0BZ e|KA9C@%UNFmm-mCC6=#DX79hnFT6}UnMzLAhAkPxE*S4fJ;l>b~m zMrQkc4+;`es0|WA0>nLfzz^a}0r)|@{`DI*1NnbO%s~0i-57)!sQXKU{);vvrP^9~W<8u2y{1KrPCT%L(D=qRhv$vQwR=>)lXxp^5Ru<7XN#2^+= zL>}Lh`~5iZN}R#k#l=yChsWLBo!gzC+W}(5a|a9t^YHTV@bPg0cW^m-+Pj!~aM?RE z{u<;zz+HFI!v5ocgPO!Uv!FF9Rop8U^D_RhbT1uT#U@fV&u z+`K&hj14?0hIm&*6=Guv%#0Xc;*QwQJOB0G?|H;{5R?BQ%&(RHd>2@%1hyE@KWdY} zCO2BZL_(56dT>u#!vlGv=|-HPyytcu^9PF$M%5O!C-lUUyr%wd?(b0B%79{o-KDks z#cswtn?q_d0o9d5P1~tKcvy9dvuNfD3cS!DCSJ5BZa*YAAh0^X~zNqhe6ddN=!j*CG{8&)42zv?VZBf{5eUVT0HR3QU2)eC3Tm*}&h< znoJwT)=Hp6HNolnN<&`w`0C`L+$Fr-GI7Iz zfr0ft>lC~1n!~oyV{I5J6-cQp=9ebT<&aq0--xS)pk4zTnH&m)&UT{8<&$*0v(!b) z0%J{ZM`9wyR$(WLRX_?Z{#JTEJNK?%MKI%crJ7AH3q9rn3#0vw8+u`{@qe3%5=EG> zbLkNu!L_K;>PRf?wPHANNv6GQ3 zJ5$q8I)+`&qFwBe&(LqAIWd;)b0B>elF8B*OY##^ zn^fZrt}qgg=?`aWhlBlo?qSfDJhfM~-&U7{N%4*Z;`59lBsvc2rnPlBF0gBTh{x;9>u;Yinv%+B*6yC%veFj6`k{OuB4vHo zKLt;(Y2blcG6X43n=9{<`RfuaOe2~KC%sT7-_bIk$A)st;bOh8iQ+?H3-?x6{z|%{}%Gi)npnUvxGN11-7oyU#0$%2EVwfh^ zx=Z!;EqNBN&CAH^0F#L6vkY6)IP(qiUlXuyQh542KPEyR+c} zr(#aF^xKYKw*uq!i8aBLJzj8ym60&V6J{%PtG3hkoAG8>9bg0&E*@!-rXg@VI!eiH=F?*UztlNTa*5-u2yu$ALxhP zZSzoHrg^f;fO8S^x2r)12#k*OxB&V!yuZ;vNfs*Z2N1m1u@<|5r`-gJ+h1>7=ywM$m^JL?BB=P+y) z^vJ;|95DIxS7y@Bzsb-Ad# z-wJUUakqLYOjZ40O9tWn+{ zQPl$O?aQp=sJdRjfSUxo%zbWX6w8u)j`rMGc7^<@4LZP{S6aWJ6T`nyq2%_& z^SKjS)i*hbpH^~_Gg)ncrH;yCUzZRWq(L+wbXbawahfb~Qv4obho3DQX1Q2`eQQg^ z^+9#S3SUf3!FOztmW<&&0t0!GQEkfI`0c|2ys+kq_S+_gshz8DL879?BxiSNJKcE? zRSOBLHirV+>gMCI^4Ud6B?yjtGxyr9j0vc*DL9aSNie_#M1vV3S(@ttK`pTw*;2qe z`7I6J-8Cn!cx}vIwL}T4x}Hzum4-}?I>}2A?H>{q;OUGBojNr5K7TC&rW$D(#h%3I ziHhsx0T-@>R6E8xOGgEkhDva#1g78!`7A!0ke5uR1r?9pvUQImV!-{0)?PxLBT9Ns z)_dT;1SJAqWb%?G*QmYfmHEYaVTHiMRi1Q?3@)$ghYp|99NTR2tzSv(tdwtv9*I4k z=ExLz*8W`Zoe*gVC78VDqC5g;YZK2(Yr?Ae_`=rSuT8UJ#yeVIrckPoU~=^~d#R&Y z$LWWK47l>EKGMX^m&+;}lXc98GT$8s_=nemd1k7Gik`f2=pNer_zPyp#5(I}cm5`4 z-fRDeOU;n0=;`wr>(3=m*Y7XvzzZ@&cv>#^7K(yr2FXe)Q7s6rJ{5aAZDI;H-!-Y8?d$B;73-TsGXJ$IUp`!z>ZjIl3jl`GQs?89AEaGoI;FMC`4xx4CNaOvzV z?b~tip)|wLBUS4TgiVWe0*G?R8S)j`@{DxzD5a^ zBkfzgcsi}s6FkVu%8n(g;F!q%#-FcP>>1u@&|46 zzPi1n7|9)`Fl~veZyUZ2ep+)Om$?RwSrwF$KlTTOZ)8^Naq+=PP6CCltcUprb%{4q zM7T6w{lYu3lu2JV0gFir&h<4RK?zwh#Lu^*Rwz$nSNxq2lEdFH&d z7aQ)0d7gunJUX#L_T-aKpCU-Kb6eOuy4-fSDwv<6^MKzPV?H|S5Ft7n1WuglVk#PS%xx~Nr-**MTm-u8jiZtI6Uq0V=GaG`? zWt&?e9`^eAa0bH3+Wk-iT?@_rt+hn8n+<1)ILespszaV2OQe2KAurX|v@e?|AOE)g z?**tK7O>yOZ+mS4t=YuKD^@KHXl?QR$wid}JxJU*R*d6eK<$V7EVoo#?dHNS-_8=z zIuaC_xbU~a1at@Ytj;lJ=W!?$cja}&N+|d2BPzj*gQ+7+ZG-A(u}-{hh>3|+0Z#q# z3LEKP%l~N*UQe{Dt1D8~E@7jCMYzI=wVO9*{brP}Cd02y)rv-FBASKeHI74iZ5IOx z0jIo_3L03K_~l=A%(!v=g( zn_naYSf(Sx!&!c-{{(}7*&~AsTB(xG-LpL%NSx^}jhLeqji@z@7mA-s{}-Q)1a$*s za((*9$Kpwe!t39QU<4Min+9h5&0v4S^tTG^ux#I^vG*KpCEp%m`uU#8E|6osX|#g9 z_{)~}m(mCJ{8y`bs;DntL?7V%1RyaYH3|}U47qU~k4R`D^Z-%%_B5zN-byq-0OzNJ zjG%{pdXC%a=!k>$H$_Qh2%$_4SvzDsttXPUVxp3$_fTn1Q0^Gdp9?-xFb(13!T zsg9_8%ioNNgoYtM&nPsP=DWqw5RQR|iP`q*qqG`H)dQbnq4ea<={qxW0SrHtO=gM6 z8+%lvoByg&3DEf$XIOM0_TiRj((2k6bm!f&fj?>{{-s7RfC^r0zhGfe;zy)R zkkDK_b~XE3YFYabQF-wm-B0zmdH^AaceX!q@GlVp30jS|2m-i#bv2)6u(rhZk0O~o z6j>TTbI>~oV}7Aw>$slq00Ykuc^veMmlv|@1v`GSY^W~?5Q zoZuAq0d{M2Aff(MdVNEMaAM24{Ez>&X@2{XGES(F!KhbGTYOu$)bE{u$QCQogt8EZ zyH?`0!&>=*Y6y3P@9{h#l_AIenTfIZ16siH`24rHiVSj*m0G6B4&{6np2fi}T1rX! z`KL2%eM10Pmjyr8^uILyON@F@9KLSzuIEA#8cZhd29Fm)mIx9+af}DGE1Z83jX;MS z1rbBO(f6LP2N`k(Ey>2Gha~VIw|#kDqH{dq&luYdB#+O1B)QIyJ%AjAKpbsnhvw@3%?$rhb88s_@f)fy zF8_x3fBc447@%)W6xGT<#=mw6HDv`1Ii5~%aXmTyL*qaNAO%#2miFXte(WCvbf}m- z9T!#j+Y+r+=NG$QPbA%a#`h@@5ANntI$zy;w53{?wznSTZI@eMq?vZQ?$hNOk(pZJ zW(luOd~K&W%RYCS$WfE>-5ze(@1%Jlz@yv{^$otOSu$V`(~$>ji7($Sp7-v8sc(QY zs{$;3QgOLgtw}pFl;_4|SqdQ%*ZojPgj7THPCjAfwB4vu@34!mV3TTO+F0f;M5I_< zrRG_o!B~m-x+`^8kNf0-x(at;L8(~pdx)jD&D#SuH@^lQ2wae9TZH?yL4(POy`_d~ zrlF{2ed@A~NQzrgqd?qV?@?@VkCLk}7a0Ah!N&poDIewrNpLk>wb!muOx(+6+)Q=_ zOU65U_kL;`(N$rEBaE^}=TDr8_j2VYtp70~0xLK#{Bh$S^DvrwIdoViCAxLMd!8|y zcjWZW$R%yN=&FKSe0V&U>v*DymEiD3NK=6~99)Y#$5E~F2`}G3Z%aY}X43_$j4JRN z(;wB+HS&54URRz{H!7%P-}H)_iP}n4F^A`Rxs=9Kue<1$q@;f}IA3+U>74MMDa*uA zbCr$2PiO2XYS<5$s=op*@u9x-DaYt-UmN|+{aT|!i8xo?5~{?5xZXNMWG@>+>;?g+ z$?I@(k;<^0-fzN9(pvDD$;E&P%vEp@CY}ks5F8e?bu=vg@&3EgS^QG;V=tYqfqv&L z_lGlM?#2hx!{Ge0D$Y^;`1GZGZiGW^eM$%<9eRs4x30NUHR%jn9GL516)>)5ctl3WP>q_f+?&TBf+2m1Lb)aV6@{mct8A z&8ZVMne21=PYvy7npMq@^!a*@e3MkpzSY~%*dX>Y#{*zvD-ONqy0)=f`Os`5;&e3Y zQ?#3F+RE9DJq0xspC^s^vbMQSN3GY2CyKTh8Zhg}vr^kM4`I`&Cb>z*C5{dH3S}C& z*9mfM*G_BIHJ?pz&++A#Xgt>wiocyJn*X{d&+nvJ(`Zs_GF@P^KH;FYV6yBB#3|kn z&c(?oaL|-4I33DPZ7&(`GzDy*`j^8ZnyUq3>x#W8@(3nkk^_Vho!diI*Q-X>vaKPb z8XrCRR78YJ*|pLlBTKT*1m*!+K#amAn4ju8rjBX#CoUu)1$*w&9#pp@>c0(Cy&@H!enBJh%v~^yRRh zM*LCovLCowVoubk!L2x)L4TR!u$FoLT;>fL;M}34OuQcg>;_5x;^;}yY|k|wfj7NB zH5V~7)ugoG8oDuLyC(1}C~E6=p7x%M6^;u_4F)rsxFW2<`HL{cP!RXp%yE>|1G&Kud)QHI&ER9p<{ErhlI-@|5yF^++jB<=?z8Rf z-5WQqPE8I+9k%`6%7^ zV#M|=pE5C?9^YN^iSYEJ+JbkMdzWA5 zE`}=jG*Pt0%(dMISydX|+*ii{c2EX4z=bt#J=p&vjQ+l?XEKrZFZ@uiN{#78 zk?kpOrTXM7I`zwzmkCS5!PHq%6p-?enW5m*2q&zH5T1I#!1|Tc`eRnPDy7ll30YFk z(;Osmou0fO@n4rn1DP0S+AQA^s-?$YqB{d!R)%bnR@&Akr`pE)$1vhM#p7Quvu;c@ zlNVcjuCOxls@5a|f+VBN{7b8p#J*JFx0~X^$(y2L4HlPY7creG(Vq!cfzCVcH?(n> z^+#H|ng34Upo7dNvKeN^6p_@R9%sFBDopOL7g?3hDmt7`E?3Lx&Vi=LEpa#fYTv2b zFa`kiAu+AK_%wY_qWXCzv^J`g)hFJXUOt6Gxl84CnnK%q&|cF4cVQ?4u*$&7qWz1L zr*ozIOvaY$&yD`cjV^AuK9S#a>0{Eyz(zfGp9MgQvQ*|%HghfXg_iUwPs-pUVg9J5 zBk8;sp{>@4RCmzJX#eJa58LT4wI(&-xWT42Y1uQJLzfFp#Z;kv`<>A@r~@AaWLcOr zZ~K-gIimMfcDysGq8A3pi2$_(p&f^tgJ`i0|C=>JyXj*5lE*`2KDp7n2uQ)`soayY ztnoXOIxJ7#{(?s(CPAJ-#*D`DqP4v2)8N(Fr){g}d;Ld3O*dFT$g%*L3;~J)F$bYr z*YU%WAT{xdE)<%It+<|)p5A}?qdK(mYWq<3$9dp0qtk3=?*dg$DGWR)A!`P|vc!)G z(Qj;c5+@NGFRxBgm?qI|`|Z zA_P5ALB5z)TxoWbtw*Rg$e0j_;1+SvzZ2knC`K{t$I7OHiWd}z0fak~7=Q%e#B$eP zXN%NC3b?^4-UrYcK8VkDU>A0=)%LVT_5)y%S;{77`MD&h9y_h&wWoO-Vr5c=K8eH# z2A3BOP}zEgch~zw1I);xH*9f=UdAtLV!1K6OyGP22m%?w&Y3ArNIL-#A_x&_-bWg; zng8bJ&_NaiZXM*`xg>3Sm);v_qQ-iO1w3s*u+u>2D`~yhoQ{Ky$k}x4fb2-?3~S(5 zr{E8RJfuPf7cqM$8z}dQ7Y^hpCz6>ZwWY`I8_r@M@oM*)K?}Tn)99U{&Tm!i%4_=O(z_DWGx1q{>lv2|8^0)nP?kSjwxo2Y1+(X zMD#hb6e!=7Tlb911I%-W&whJ&RDhhCBB?#Pem21RzSJDe;n;mka!RyK-T~he_OFPN z)*DU~Kh=k0dE{Kc(v#4}f^Mm9^%(RS4G3Nf)Fv{ZV{?x+VAmzWB;KT_M<2a(z3pIc z&)r#W+dDxl6K*PFQh{~edYErJQ|~%4`Qe#4Cua7Ln;bg}!VY8th?OPro?eFNLLruya&8veKH%QD z>m&t$i>~Y&f@KRsD6ZE$7x9pt^7sfuBwZ;KR6=<;qJb3RAqI5BGsDI6G8g_@I_MF= zH43Ex=lWDFSNVF(rbH3MnY3Q1wUx5XLX>q(a6mwgNtMp(8d6(U86MJk&5a!O}Kn2Ql8?MDbfwf36V4DZY`kpbr`%Jk6QYZaPK{1AQMGndF8*cQvm9`bvp7 z<-0wC%B9f^!^3rNCm=9)-*1B~fI6zuX`B8XU@`8^17Ul@rpaIF4g|ill8;*oA5e zN%?dBgZ;-aG*PAvlb8vi&ioXpH)CCdyJW)pfvF9}~)amCbq&FpeWp44B@P-tC z00E5wH#+NmKY0Ls$I3stvn%+r5@hEGGQT^y`lq7mQ9G^-PSn23_)3bzF?=>^$6Qjh zGC)t?)$ZcR`B7N-=AvzkbDK)2-IQR-mI+I(YYK??e1}}*PdPio{8Lcnt)4*Sr8~fM zxor;_9n>(6olMx=QbSgBtc9toDx_f;hKhx@!}~6|1uS|h7_B38)?_x#)VSPA-%;D% zz{>zqqkQt|0G2!k3)KT#BJ5pueWZxv~gP!wzk<0Z@MGK|DRSArt zjhyG0&FZ%0EqTFq>05XxHlCj`dIQ3pLUD;Z@)QG1w&+8L69PoMHplxMf67Y-oDV8x zh{%B?I8e@H zu;fv$mC&^J{`qbDtEpse9z=l}I>;IW+ip4EwCY5jXj_TLTx~Ghmv4kH##GJ^WOaQj z2zYD0H}Pe#teviirS(x6q(5UWVZUg1`NO^gA(*flJvX3aHB#Nv>zN~W5I+^@0!YgT zXeFQiyXJD27#L20_i;bf4OqYqt|u60k+b!Y8vhD%L3n@hr!|aXtv7fuG%#SWH+Vm? z!M7~VE6p?5ReF1)yLaI}TY#hJX2@`d&XNif>q#ETh6Ys934^NMN$C@=noVVnHFjp+ z>vx~KeCv_({Xyyt(dy6^>2_?4YB@zLAXy756JrFH`bR|ticI(d7BEjIOhJWFzsPAa zm5JO&L-fI`Ihsa-s81Qzkw7a`t9&%Qx;rbcI(`tCQ(>SYiN9l4Uv8fbrDc~ZckNB6eJYKIUTj1r z)D?i^k7tRRxlOIlUHl!Rfe~o+_ z%!E4M-Y(x*l^kY1|E9qLY3c0 z$Z&OwFyZ94OoD$YLRl&*Fcg2IA1|m9SI3TbqUA=Gh!?nl21|v<5L+*2=gp?5)@UYO zR(dMBC9269lfv+tx1Q<|&?7K8$MTpJRq2;eGxCze7I38c*{5kD+F?f@HFjZ(=)y)% z0X1D}sr^35_-n_RD@w_I^%chJ++XG6EPYH6X6nfH`O&;=!iCF*SPXPv74mjF4A|g5 zSTfxG=TKi>7&%TJb*M;NQBC`GUDg^7mL0`kN@w7q5l&$v*ZH8=&)`CM@fjB5H zF8r)2LI6nM{LsXfFJLdv75HUmsFc(csW`!5M1Np0(tUaeBkv?VLzqfxwkw-+)so*_Z> zb6MGY`rH1y-kCzhfrY_)CXTj)VwY$>ZtdJlX`AwBQ$nQFo319Yy5dK;F zGWiC&x7;@CINSEBgILH70|#0UvM$VWZWoll5FtLlV^Ds0m&bVW$-}Sb=RWY93frkV zSKJ%o1UipUDrACrz7mTtInL%ktIW^Ox80ol@I$Z0-q3}8Wfq3sMO{#&yaCXMiJIm; zf>7qKawPW&K7Z>8Qu4E29!PzQIQFmd=c*&_K(qMyCyutK0{2WJ((lOXd!p#QbzCUv zI`nxG0G!988Gn^A4nzIK{+d{NwXUwN{cVOf96y{``l1?n*ya-+_nvQM1ZQ`hE&KAR zP1?FUaYM2FT2-tv?wnaJ{<@cZ+7-da1?v`0yAhEN}!`mZTC?R~Ni-D0=h>^|f{^x}CI1 zEXZI7HP>=Ey+vG@IK-ThJ_Yo8f2vF8kMxKo#2@cLGf$D&I3?agL=qK9U$%G0%A&qX zCNr51H7DR(fhMR&RhwP|OoRcyxAM_rQc>;%n{Pv})6>Q;soFM=bnULg`ba^^8VBcC zaeY5EN&Z94Nza$0(@&DN#mMb$-Q2x{(cL(BTv^ApULmm#3Er1bm49%nOE}(!J+HRB z=zGPBT~aZ;Iepkkv+8#@k=}zIFLiO1T-TzhgCV|N(dwdYQti=a%&aq>Z`a^@H}{6D z>c(X%FVBzEbKqVH`bD}9iolTs{6-@B$eHTzUiu|t?~B8m4-YW;sR@*T77B)viI!agwps5G+6aFEjW%(yxNdV4QXW~j6q#! ztnBZrPWO7MJ(EEHIFSnNrMgvsHl;APD2bLCa{z7*Mch!h+~u1r+Q=A66XsY-KIe(5 z=H0BQJXWC=(b7EP!n=zUNm<1;sHh$tEL6oEp2mC4i@uJMO7)b!v0g_rzHZYi`J@8E zBsF1oL`X`DHjw#Jw%~mPfd;tr%EC-jHw^z9J>5YQ&8`HAi`2^-}nX`3j z%;59acu+JH<@RpA8WXMqXsd=#um20p@3F~%4*HdxN z%5Y0*^`Zqq=O*0L@V7tSeyZQHu)VY0)&1IZRE|-SHX3^-q_fh6H292|nUa%&9rI?6 zN{|b4z-g=z>bjI{xrfiXD8AK^&-N>6po1<3yr(2r}vzg3L(%Mg9WhgN= zzfNxardu4+QnWkUO;-|Hv_)vPlm1d+tzfIIFEH`-jmP!}#Y+X;=wSS~gbpd2E(i&m zoqRxz0A#kicv*v^lwSLyJ2Ho_dZ}~d{)aGnQH!OH&s{?VFphkgFR) zur2F7I;e^XVE>&PPRqtIb~-JlVddop59HJgSFzD@;ZkZMhS2${RBTrVD~l}KrzD0O z&b%dZ9Yr`j3sX~kRf0IMmj}k2?&woizNI<`i8rbz^_6N}|m?d^qe9cOdMdZa-`n%upGAvFEJTe42LNEbqF+oEmAuVzW`<_q`w<^t7vx zDBJib5L-SP_Qd#d`E0z|TFrm(29z6Q?VZ(oIRl<}uRLRyHGZF%>%J31)jipNm0jcl z{j0f7YZ|sp&fX1r80l?`g({gvB)JhIH(*@y;K1Br_OyQmGN5c>s!x~G^-ULg%8SNi-%S>uJ^dv zx}XntJkxoiJuHS6+xCTKs)356rITjeu0=$j(nx5q*_`i#ZqJON`+{Syx*}osBQx#k zm#atow)|iCYU!oE)Z6p1`8;*|nM>)A&uI4<5kAu7sBiS28ZF`8mZ4KR5zhI+ufg8YxsSkO>Z12!DhO0KQ(kN? z`*yo7O71O_j?I&3#=STxjs=aEtYj5tNsUNF!wkQ}^Ixn(S*&FW= z8ocYWEnNDRvVPC!!a#s05zdspHrjOHnv`H543>RFIOI08KRIu^lD+*n?nrFA!DYoA z)(^wYsZY(V)oj*Pl=F3>e3I>jV4wy3%KFPm^Lc7&O%*hA-m|shy`AOV4>AsKHK={5 zBbKhQc<0xDe@2kTj#J&wks_PkST~mK;f;oe4ZfO6!&>To^1?0Y$Fnn-mEuR@lhN2~ z09!|8DLZOoe7m$(;#~8|U3T;=6UGgELiaUG{U2(iD?>rENbxvsy|kTU#MH{Tn*;@ltGN4NAx8Ke&JDgZyY-_6S3Lq z*mH>4I5}bZt9?Ziq&DZ?^5g;EQ8L3>J5Jw8nawECle2gFZl7gM?t-jK(!60~dUL47 zolxRTQu5tDC_JL=xQ}d%k$Fbz;|TJWPXy8~KIj?kWb&r)X4N;|k*aA7=`Qv{K2U*J zU$h)d;}Pv@)uwebuH9`;5quf;MbCKXtmF>~1s}&F}4}5=6GkGLoAzrYY)Mj)Z|_oEDj=DANRtfUTi)TEpn8? zUC!V8Sk$qm+a_s<=CXojqT{7@iZ323oc&yKf^y_s??|KU?5F|&R# zf3CHS-Rn;LjI=$_G@JzZ!~m}d+M$GpOZOaE;EFg0A%-+*>c0p-5;8gc0rJ%=d3;oo;cM9Mwjx06^KaJJJ0#rK)$5002OVg(3UaKah=`!HD9< z!_^AZw%zpO4eDTP9+jYWXp9h%>!+g#YS1o6(DaJH3mbCsJyOtVr|TCUFDVq1*AJwe zSyHq3zTNHLnXS#x^C_&RipbZL3HvO|99^pUI~P!0;lq7rpufEj`b z_VEhVQ`KlNrpV*N9Djdq@#iTvC_NLMY_K*1ReCCQJIxR?kTsA(>rU#90SRlV_Dv-( zS{+!Sd%mS!`3AbGHAh3wW+b+y&EbQ$a=J@%~Bg?6Hdgyr{qAe}&Zbk@%VxBYu=SLr^rZyuw zs6TCmIIn_^?8{b$ygSG!ucv_cEDR1K^t*HAr<V*v z>I5=mfH_z*T2>3)#M0xdQQJdPa(>0z zUE#zgZ73hF_kcqGdFAZDlVjCF0vU5BIJ8FKvFM=nq$B(OB=-RwuOJ%5a+r0==fU@N z#IiP9hoVM(gZqXizOGI9Jzj^jZ=8q`JyQbW9RsfZ1!))Z>$Ze`&%UgVE^&P5a(X>3)YUOE6r&+F-2LSHNBHH0P9_JOu+nyU0O2bji+2Q$f|U zs1VZ@hXj29^ngZ9mU)&`-sFAA%1^oiWDvtEl^YOBj;4b5Vt{|t_Zoo9VuI`7?Z zR8~AVUzSCV;!8LnD2E@Xg)@OosTrNWMiA2XP9zRD6NLJI^A&oIzo zwr}wYEz720h$0VrrX(CCcZLn{NU#0b2Qz6!)AJ=8kS?y7bH$GeLf(@XN2_P1!;~4F zu3~I0Uvf**617R&j!$gHG{~Lu%h|0US5!Tgv~P=D?{AT$o_YrA>WgGD*w1|RL^O+p zbrp-5z$zk7YHgp!R_%uza*>hCqO;rJiB2d? zLw@wEGb-qNCF|oyz^BYxv5DV+1-0u?q-O>i+V+H+6EOyaQb4HwFBSi_*&fhc@Xq6d z^8F~etezAc$thbt05loMxU;gr7|%gap!ab3Lyve3b;Gn1yLwJ(a${p;=URw~N0~jm zP`m-Z@sw_9YM3A>3r8ZzK88`05N%OeM?|fEHHUd-InFg_J9O&<*I3M~a6jdC;HOwI zX8HD`BPwj~vi`X6TA)I%<=+Yo>{hvv{m-11TdaoQiSCcCa)O~m+3|`RlvY|lcH+@$W1f6Z0?TC}Q(t7RC z&)7bO4Pc5D{Hwk`4qBnpE>;+Kh3QU&;8KPZd+%eSn9EDyiBUuRgo|WW1V8gSeD~24 z>Z>uOTh^D0g@o-Xx>F6Wnr6IO_23L+ok-IZ!Ha*iyBKu0}#kX-1mDcp){_4Lbj|OP2|M?Igmo^MCO=JkVk{X zMfnX7?I~Wgs#B>bQEEuD!N4?EzwvSo?Pa_@se!T6vwW=&iAxP!V33*dSV$nbDrX6A z3{=CiO@zKiChO|7-jxkFLrF#T7q6qF z=c4>Pt{%GW4E(TNCQq(dja(l#17Iyhi{^6JCVTeeTS2=0$Z_iT+T{Aw&)@E?1QGH5 z6I_8ft8*Xaly_hi)7xErE^2Iy9Uq->+1x!f*>0v{;y&Q+rP2}Oq@B@%3jIXpXs7qa zopt3JYJ=788{0Bzyjs8A?@Fz$vB{=%ki36CT56y4SiPJ^)O!@1!{CXi?Ws42oA!1 zJZxNaQF9;se3i7}XX_(@tsaB}K5S1O*)Rtz>4|JNP}j{751AV};e~kS%{ST~jNN{| zvSa&R2^M6hFI2{tsZzT_;f-=3jE~c`jX8(U&>6-HD)?8ESt=53zb#{~?5dUVEjO6h z&2|pf6XXYd{QMngWI>FL@>I`vzNW2=Hp`q9zw1<$J7_cfU1&y~)`oo2D6bBC;fi;M zPSVR)A*~+?W1P9Do~zxZ?%DI3o)~}2+vHTRpeAfN9s1>%Zh#*ul7b2EJb;mF0E~*Q zV&%`%L_nyrHQ10ZPQ}u1Ka_*8q|l^b15nLKlA?y8K2upPt)^nb`61nkaiO8L3TfVZ z$9?07AlDZw(n41!j36>qI`IqsSZVDJHIo|_oEQl3)0sEHpB-;41*c-1Oh3?K-N)*6 zhIyT7)MVG6HDzaXMF7V&oRktSqN#e_QmXGHry2EY$%|KHPkIpQfUag~ggrRDnNnEa zxfs%4B){WMwGx0$e^XOS`)0*%zIWxPy{+!efhz;bsQJm(qf?r1dJ#fFgpKej1keM)}Y_@HleqDRBJy|&;F`; z&yit2k^uc*E&+@Zcn-L;`OJrjT1>WB9K^z+rtmZlu3|i7FdiO_2uc5qeCJW=!a{qHkPIl_}e1zBGFz3e^2_ zqwuA@#Hy}pzmen2=qK^8ASoN}#l5^cxdA$<&ee+AgGNI+Mz1wby+=n&(zIB=+{H69 zlKgNpR7O-ME|Llw??RRBDDWIEMP>n@TcLghIF)WAmDZ|6Oj}AE3Hb4X ziKy)5(^?fp@gAGNPYUrbU`n_iFlWQLMDxTWic(!-*b^OLLe5laS%aXAOU^~SU=15H zHm=j#Cs_N;)4Q3`ITrPb#``0zR<|y9@Le^nJ_O-Ux71jl>Nr*t7GQk@PnglLHN4>& z9TkOXimsj?HOM+q+hx~_ZfW2KXyOHPC4a^2YCkRGQo2!gw<;VPG;&{x0|$>bwGZg3 z%Zh2QgE43JA34`Xxl1<)0$qSC+25a;{FMcTylBmkq|(6s(39!7R&6&i=E%F3k)Wh~ z5GV%PR|G}^N*1nDhQWbbV!5 zn_IUnRtPQDQi@xlg%)>r3T@F++&#Fv1cy?*E$)TlS|}6^Qrz7M?hrgcNOH5kbME=} z{?5IB;0Jl~@UAu2oNJCT$9Vgyf%J$eV#{izn)7fKa(hlu>qKpN!DxttMt0I_G^6k) zh4}hJzP}0?k>vVZd6MrKSRv@%Hv&F#u(#~h`}oyt?83x-%(ka_vemfHT%v8Zy1@8A zJyFupx6u8mV9rlYQL+OYY+90%uwZR!8bVcUdIsaDQBjXQBkq2_$Fs8&?>zo~Ge>J# zNqrE9CiH|D&Oz`cu`0?G7_2>a<$A;!h`yP6iwA2%j?f1u4Y22Z zj&!m44N|;nxkIe@|3tuvr~TC3syAoERl^8F|LSuLS8yd8_XW44)o(hWU^e(BY=vYB z$+%hK$KGOdr1(1q_v0fHInyqDdBx0^pLV^nxMS-6TvTu>Z{IGxcWw-_sGqFUzvUg5 zYz=z36o^_s8ih)HS!sm1c&)9~9^D+>+j&NNv3v^#?OwJNLu3$~2i|TdroQi_W@(Aj zh(=johYERC(*Ksw|94=r$*=WNJI8V$dY$hTVynT(yTOzx;s>78Os}brxG!9|rw=s7 z%fdO#JiAgxZhzVOSw!iRgh``t;zwy7MUh8B6phxs-2`-~>#5`7g1)#m+YxV`i*rF5 z?F+v09$vI;3(U2@@<;iDgtQTxQok7;Ts?T zXv9Fzk)yP^618>)PYJ2tX92pe<@bpHeOS?16g>@6PLkrzO!1!0B4kL1EoEXBkFT8A z?X@zVqU?dDDZF6DPjqvp3N*LHpC;nPPWM@ybh7(^pY^L6q38QzSjJKgBwh>5Zjt>L zq@-$%*2HXn_F%!^*QA~ZdAu=qe~@V|Ce#LlnO_kj*IT*~ve)PK$dAA0=E^Sexb%mV zso*(VEdft9*o>Rd-c4&ryv!_s8-wP6=V0INV`b-sFOJL2O;dIp+wl)r`i9rLYg;+9 zIjA#hLOrUU?Jwt&yz{B@jCqj_K@D-;n`XIB((f8uAUTWcGQm8BEqf@p&rx}Xk;cw1 z>QG6oa&E8p7D+f8q^Uxq__2(}p%crk$fC3{>#({t8(jo1JrR;_X3hIg-VQH%um&-b z2l-oCW>V{j4|Afy=VgeMP5dBaRGKF_RLb4PV>mU%GuwnN;q^>;kb=!Rch`uJEZnFt zMi0T|1cPxr5ap#KgK{6xA~+8F^~XE3hDr02=b?PfXbD5>$IIMNx$(noY;dmF6Uj?O zXf)!sN2iVR^S0;F%^qrL=FRRqc`R?l&|IB?2Cd!ZO|(Rq|E+i@3h$T!B+I|Z`q+<-<^p^;y#GYbhGpj`3W z8{yp_AK}^FW&c`R|LL%Ho+>s%bfl{MzvfLEzG(j30-?aX$Tf^qsbl>@#(oX zcS#e4rKhLg!>opRY(R&v@hBg9CdXk`ZVg?Dfm1R0ol#og^Skj?*&>d(r^qb?G;TBw zbu&2k3{5A#SX9<}(U|@I+c*;@B^-4pM{ln`kXSbP8jp4-;1Gy>=|5ml9Y+-T^CLsD z&RzT3Cm+<|>f~1JTPsauG{jGB8lC||c>N%Ko?QLD+=M=;FHXfVi#m%`O0QOSen@8FQ*;EI}5(K{NjSRs~c`>J^oVG_7P`XbFVL zm953iX~!j~8<;Y(4L+4Wycc6&>0%B)hY)ziQ?kfr^H2QU3oL{Vzz$HSm|IOT6+ zw^ue_oA+PemG#h#d(DtL{p{1PpVCC33r$U@GC_h)O8oB)o?P8MJt9#@*qA!7e&JCk zdb<0=&1opI+sz1FZTlLwq-~B?Yph2WkF=`&2OU%~uCJB+Jn{QKz4f&=e$EtV5-t%>+-+rGo)*?ao!Kvat{KZU8-8$X81|n=o>c+ z;iHE#T??N5yAojF*s+Ws$RU?juN&r5B-hl=qXNXpahQyn*6{KxleSC~KQf9^P^sc> z4-Dj`jZ0xlMBS1;ns}AxVfcY03nyaAlwgc~FIT@Re=x12bvlIM-Po1(x#y)~a{*jh zdrToSe|x@R5VZemPGNH|_kP0P;#LuTTx*YhfNus}=wl1xob~%;K^`j7ke$>{DMB4X z#3Lml%XV&IG7ls4)i3He%&k~ezc?$!IBMVJWLPbAlqJz!mJN6Hwob=@UHEnTL@lWd zkfAW1Sd1jafbj#rU=jX^o(S zZfHOB!HcxOF3;^&E>1(yowX z;k)t51La(J3u`CaUpiWaX%n&9_%j;uR31lmkxj+jlXm(t05S9Zv7ml)EgR6)yHo#N z-y>qg9!;K2@l~k^6i0k3T!t)@HO(~BBMlmAup>taKsLp{?6}5;67f#vLTz0X; zEcZIRlO@s%sk>sKi2I169-a2*s}}{{Sg6x8H6g9p^#PrF5w$hj{D84z??!R4{C9e8 z1sqYW4?7n`EA*o1$7n_&$zr3TQ*RtCDzk3PC!zZ_8nQ&u@4Oi(^wUyYSA9TikPDHK zx`66hPuqCWmC`EhAdA**4(I6C%`Fd92CzXM9Bi+Q zze9tS9EQ$qedii zQBvQkg+nNdj|p6w4%eQtPSGqQ2!)o%FFTFt&krAZ-zPNlb5!pJp-$<62_hF2M}0Z) zuRV>Gsnrqk=^cnp#zb64vwFkv_Yu{Rl~I%KgoB==?Kh!Eqcsn{gl6GDK?}bYhJtp3 zhDJg65h~xu2$pC(N!xE>Cbs$_A=r$?#T;OV-%1wA2kmq3-1?*-nL;EW!EEFLjLThf)<;&rvmG@68D0UIp1}(`}#lW@0~)f7~fq?zzGfEG0l;(8rs$8u^kj zf-FEFR^h*eE@(4XWW(a`c<1TfD+BPA!9vq+PG;L_1Q*S(ssxW9_b9mo51Y0@vqHT0 zyE@^)lJmp|)S+y=7($cXLU~5X!Yrn8;FD2&T%>CINcC}aJ?#hm#^H~D9YI`2=gX5+ zBl64K^!?TdrCikVZwZYi;}-w@n;WqVViVASK~+qkt&a1PLCMRg5kp*_9SCL3S9NHi zck3qYg2--GH=3vQ3Em6ucu4YA=Lf+76#dV$oihA$kPz|YwDB_ytUTck4I&Rd@!6z? zQ0V+rR#72CYY&C+$l#M@Oh2Y{_s>0tbcml}|K6b7CEKrhVkZO{Vug&dE+00`kFwVL zUldE}IRKegL=A77P{`s9nIWTJ5iUwtZbTkbTLd=bnxK{3HkOusPv=)eYALNWn2n$v z=Zzo7aXI%DLmQZ**}`0ds*fBaF8|Q``?WZ!3(DdgEo-LUQd@-6(HKRb;cc|YqpQP~ z=zln@*yotqFDmXo!cU{53>|=lvz{-zl68#JX+^q()^(7!^Hs?Lh93Xx?w6`%ay2+-iCVX+{!D~B=S}Gf!|^Bz9uu4o zLaoM=*}Rb{UbmIHT2@ZY!u3WT&hcg2pN@c0ZWC3Pdo`9Kw48y+ghtJB$HbEsMAi44 zgFS9*+H3Ls^;BbjNB*5^gBDKobZMB&=IsNw7yP3;y%`DujH;1H`90$aRb^ndmI4~%AHF8^7ry5e$K)3M* zOLVD2CO)KvDvD98`gMw-ua-=*p@`ZNxx_06lgo`pX!1!6!va}grsZzB6i_=1&0NUAi zC<^JOr_~s+XW?j7KQhh|>I#`7mqPBpzXi5mK3MoM@H$kLhLk*V24J<@UF>{s>V@)# zmN{6M^L7`W8P48+7#CrR@r@g9Eve4UsryVec;+^^gosNzFi<5%yX$ngJQ?@;6h?ry zCi_niD+*^Z@OLUt7cM?p9`J?$tu%U$C3xpc$0gMG!sJCkE4dWZH875S=CE2lXblEQ z4qPL4V!dqN5CCVi9ch8HveTp{xOErbf9A^%p09 zgqq3^Qu%fu`_b2|*DZwyk`I$X^>FHL(F(9;vH^N`Z~&LhZF)j-7B6Il>J$HtI-^x( z?67s?UMy--ym9frCS1s#k6ZpGcJy;99x3K6{eT9R%Dnf#jUk>_f~p;sX%W=cp3GpYoT> z#sA=`=z(t8yeVRs-7@_t-sIzNXi^ND1+B!Y_K*}CQ;_NVQpq+s;#~W6D-eHoU)eQ`GaC@f6H)s#nWMN6|JecCkh<<8=v4ybZF>Xk)G< zBl>b^>DlQajJD3sN0c4Mr?9H>PDc{yuRf-sBJyJP=?S}FVI<-cAaXa$Uw4|i2VA_* zhcrL9UO&~)Ys4wA2o<)U_`N#tyX{S41N(dMIu{d4 zgxn;}=g!fYxW-xOwRaJ9h%P-ri*OnWRbV zE9T}P_LPhh9SIyYBRs^>$Vumu$Bp@NSyQKTW&~USHnNu-TM9S5CEfSB1zPw$t^2u= zyJh^ZDu*q1;*7EdGc1u?_XNKhnagFeu>3Twqd_kDF7oog-V zi@{&!2NM`2&IEQKHdU?>?-leWGU zpWGR|M);Sdx=LQxd!A+I!y+#azNq$1>dh#i5nJ@o7rR)%jm!0!+RBoWP$_z>s^WP_ zYU^I>hI1@hb2aEK3tmAdrT^^L>F`_OE6`C7&nIOBIN8&3xadr4gp~t}7kEOAg3~5@ z44RR!6KXWeXD{jLf4{=NE4%vg+EN;=2J2r)QF&0%&Jtdvpc|2QTURuq_ncNGO0?W6=kd7O)-3OY|${(F@efhtj70kz{cwv~Oiho&AgyM)If}y@pT_eqv%XX;K zP{_CoppXa~HnH?Eujp!>!0* zta)Dd#_wpcyr|wf8XN1oG055KH}+lsMLjVlDklj8yV`dBvn`Y%g!?q?JFeMr`H#@c zC4#l@Z$^^c_rJ)_|4JgvI1U)>)a0H}r43bsRo~B3;wv!HX;pO!9kM?j>MspS8m*FA zG*XWXe3vFfsjM`BawJUBrf&E)-&y~>B6B)N#WZD+Un8e3_-68)Y1m3CMq#pxf0?;p zV7)?nWV|9oQ{DegR}`{XrI?H=#AZ7CXcL_A(BK^>(IBgvi$FUZP0_Z-Sc_7%DUJ*S zL9qr#MOnamrDDhYxZxKFVM-#C_-7hg_D|vkZZJclnbn1-^@pQ9Bw>$A(#qX1BGpq* zL|0!S;^WX)eQ79;c+{aEi{=#$i!2%0`QK6{1GhZRWe7uCWQo1U11yrgMCseGK>kG` zf)I^$w{5sv`)-h5wFj87LC-8Uw*2vX)r6u3OYks)SY=`PFILcJ)oxi!5+`}-_Z(Yh zecZh2rZd(L_{_0n@{)@r+CQ9|3I{zVCy`LBfc>ED@9Jd?9F6Yh{C`y|X48UP$s@M5 znmlKiP}FOS;6o9os)?m+@~86Ir4RU@{`ooH_ndjahI2+DJ{*fUdVo2vX;Xyqi?3*| z_(bX2Ptaarq)cP+JTy^su|!T$%A(L|7cKc&NtC$yQfCnn)OL(K8ZW5J)-lE$U_q`~ z{6wpJhrFO7-vSRIbjBgiY`bvo(buXKp8t)yV*lkFhC5GChA#e?k9D1hmbi7DBrC}Y zKUswspIBX0++CY@`O#F*YPkBA z<~{MC*oqz@)t*Ev1^S+-H}dBrmk1lYmfqLlC^QTIU^mryr7puLqoe^*dEPOxn+{3$k?>zwQ2x z3mxSe3rdDurb5_7eMXdWH4DYw@lz6PzXd?j<4-~a^3`M-9r9FoKghDNFjIz-3O|t# z;~v1-60y0brUh#JPPq7$1Z`U8`0fEhUE6cMDpP9oEZUdN>yy8wohry^FCTc@?s(YS z;1^GyJ4UzYU0*p|@@#OAbF(F^eIz%MkFKuNg@M)N9GKPKG%%4kSnZ*&8pkhwhM&+G zortBEUk`cgDHKJT@AQ4|A9`gBzANo)8uS~fyBbP#yc5#r$G^}7LE zw_!>EUmW>8CWC)n-lprrp^B*Y$K4R>Gn%=!xUf-o2fZHe)g=eR_d_X420`Dx*490C zYXQ2jWD)GBi#(+23>b;lxZK_c-TL5X$#HZCN+CVuI$aHQYhkIEAr|9M0nOE8!CwySnaQ;%+>AnCpp|N|m zIkI#?*zC+9#@qP-&CIuv4pO*2|Ms5~z)WkzICtvY1oY=o1ZqPkhG$1Wpn;IHlU~lV zqet=K)o4Dl{9NU}*vRc&(a^dD3IPTt(1~mqZ16Cy`BtpoV7F)CZ~Gqe_Eqh|>p>73 zs$SeHrUufdi&7*;fa_=cTMkT~%ieJGR5{x6Nj$~d5nU#Gn151pX190}G**m3KY=`y zqO{+}X`lz58rY4^PzbL3O<1J0Qmzbdz}|YnLMqBv&1(THCKT%}88C!ucX`=mnhjWq zRUDHke^+lVtC|Bw-mk1^DmJKLfyjkVKCXN@K)a1IFk=5tn~zSB-HOr*VO{Dx0V~wE zblG&;9O7`wu}{7CD7bs>5D~qk__>8S2KqzvN3@GSrhaM~Lot)Jk`1adg|A=kO=&B* zk`_dIGeE{|C{-E~IhhoAaC*~&cM`MhJ_`s5K31Y ztGtzaa@Rq~8SgBC+1gPGidlEut?_Y6!tfbJbuaGOl}A|s zPtrH^f4`=o#V$lR5oYjUso;Df`R%hBqagEIwCa3J-l?T7v2(n!7ijInNygC%Kh%HD0umnLDaqnfr;26=LWR(%(?)%^9>mC@90 zpNY?UnS-O|f(&n8XfR<2Rcv->C9ZbPVY~NonX%CAn;=UJqndR-Z`wc38hD#PpZ%vx z)B^jxA>-j;nFB9Kb}bG4O}!0F^9y&9tEgnleW4W~_Ne*UFnD`QE}3rw zEltgu@#v`0W<7dnKr7-i#3VwK<2Nw+dwZ0;{if|`)Ss^Q?ML>e7|wY*4~G%e1(Jr+ zhwC>-(f^X5XM5A{-Ld2Df4&=CSWK;=hbQ$}7iWFRbpO>hff`$F>9I>Ogax+1F9WUp z{7$;l*R8C5$gPx~am0Kh(FkQ0j(=bf`CRT3CX=s=X$P_IC&I0sCJf(Ch>a);nP7&< z5QJx<4Oqo`>HUhSNq;Ib?qXWJ+A6(i4HX=+1n5_fEClA?f!S|06UZwNEHTr$Ap?y+* z5Ow1r+zr+c&{5cjB~0L5L-t*lmU(eDaL~*BL^anq+J3upTBXh;v0FnUrkNgZEXMbN z*yEjuift-PCR3c);K$rp3LO9nOePmr5}y-1ss6ZR)IAqsG7l<8EM8Me8bX-q%HQEV zw&D$x5Cnq6Qe@2C%`Klup7t}*sy>W>payj!RkEV)*6T~wKjz+Vbzsx#jY*&atSBez z6YgR16)J@jVd>-$(B1wC`u$X^XOqhPw`U0;ek;2aTwd2d;Ol5ghNpu+1X1A%1MPf}QRGqVk=Od@ttgTVIWj>;C>C#EJ(_2TdGQ6g z;EaHO(&>B7%Oyz$x+v8-Usj%K+h|Kywp_QlOIMQ=7j|Ga;W=-@ZTP~3M0~pN)kHdt zHLHj{wvH>=ru@eyg@`o0N)q@&+Q?kE18&ax@EWSaAJc`2ujk{@D=Yp{pJyqX!hDj+o{$ZoxSPPVKylii<_(aYO`y!AT9ypctlZcj0~H(h0~ouD&<+aFz|+c!`FI(_(k9gm~`6B zqse!P;$~^eQ_Q20HW>OlY*1Ay!Rp7lyg28QohB~gCj^w|d<)lHY1{0W66Bwf%6B{1 zmxLXPcQ?dHSwh>NLQ05*Nw|^D&$wGrizYYok&(yxsn;%oYkxY@xjDvHxls27)KuTK z@btjvKq-$HMw4x^I9J2*1Y6ZFwd{*XH4uVyWmgiV3^J19%vGe@6vMqc4xfffAUa_WZ9FVu4u?5|BBEP&m6~yw!X(x56Lae;$S! zQ$XRmYH##|DM!7Tr2L>|DFPUc!GrcSRIy<;%@Co z6fEkZKvZ1tOF7c{g>%7Okr0k02D1(xV%*-?$MH|~dG;SW>?rBM9>C`?2Eac3UN+9R ztAQ&~tDX|&BU7AsUaTu;J{V^m_69wS5ccilSDQ3Hh;I6xeW}C^_8khE-5zp#kOE#{ z*Tlmbny}H!&<7q#ud&@!BPU{N4ab%SBD;grG1$Qc!8}Go zRFj`42T&p})uS9y`qS%6k&(5erkrRU!8CD-HTsme-;zlvxI~Nv+#zWB@Gg5$q$VK7 zOaEUcb~e0ho9?I`O05Ci`3fedFZtCLcvyAsb3i+BF5eADGaU5_{qcZALl5&IY}JN~ zlM~-TOkZC_?Gaz~?Z≪!r_bFhN>UqYLPKHt7Ot!ezyi!}?h~FwhlvzmDAQNU!-h znF7i1yg-Y0s6Gtw{dYe4@8skz%|IB)2;(L?-^J(~esdlXNT?rQGP#z7Y2=YYxSV?tkPrE_e$L{}!0ANj5bClFYFMez3eYGfH<-9<0SC@M?Es7WFkvKY> z*g>fd4`1D5&5`kWXJZ|)rE>UnJ_%MZ9w;6rw;kW85L_Kac~qPa+7d2-wVe1;0un3E zD-F^|Ln<6n%?XGf=PQv!0SOewH%~i1%5fKS=A$Yjk|%7LTyRJ}`A1TJg4?dd1{Vqy ztQvn9M^piW&okmM0;?Hw@wSnW@>WF2n1&E9#nh^gNRF+j=^k*lgWq^d3mrQYX*|!_ zq~&_2#@guk2T`NpAqv6G1i_)@>)N(X}k{G4wQu5 z0_UQO=N>VjD?NP^P`bqSQFf^%&-*z>>DDWU; zP*i&PxWGgLRR%WUft|Mg^){6n6X4V_x`O`Q|=4ACxP`dxAsv z7C7-#Sk2Dv&t$L2hWlMHa{Lqk2EXtF4PWM1MWbTC$f>1+heS98SD6tk?U(nFJrqTU zb8z;Fx@7V8Z`Hkfee^Bk$>`9Z(<3q<691n9zLXLxsR9O{~{q zmbh~){^T~FolQS$c=?kYOBLh6h@d)1|GtSUz>LcG_3?~LUM!GbQz6CvZuMe}c!+tO z8FMPmT4lOf6&883!H+>oiO~dh7s;n%J^$q}d-Kj~uxEaOn(0DBE{BAtbcfw$HB=d5 zV4xh+p{_^(}NZDCpc9?nTH&HZ-J^BN6RDCZ$Ip2thH#I*|^UfWvAI|4~-^|ER7U$Ni2TnS9x=1k95d5`L@rB)aSFBL3w;W<~COP!Y zLcp2vraHEjA44%>BLF7uz6+@Uu~{_VuLS>$PyNdI=>a>Aj|;=mbaV%=^uuDcKIO_M z4fYPK12=NC#<7(29uk1+diGufjaJjLFx zZI`&wiLdsUDclfvvy21DY!!8lpf|) zIVy&{`xR;&ZFuQXwf*(WRq0gm%iguC7fVBNgcttFjcwYEeFCca zKYxy?cDyWbpOj0!|9x#sv$qRtT8@!E6h+|9J+CAqb_8zN`zANsqZ{f zJxGI@lKh5sMHo=cB4R@l5hRur;i~F6^*;7`o8LWPGUwlG8;U-B>Jm@?dD|(R(Rb!z z+2G|=wGw-|N?@e&B`B_AT@j-g6ZW|7vE>ncits^8>NZn1O4Y9l{!Q1h1Qe+HsbNZ& z_frz8Bao$;ua;&kxjeI)13cyREFG0R$65U`!4tZ$=hJ9*#O-rG(bDk3xiz&*bqVa_ z+IF#<6g9>*eTUfdZubsO5DHl>sUF4}*HAb;vZO4Gvpo4WdTY7QPDJx?Z!~hK5T72^ zAGjiv>&F38nJ+zRJI@tyukgR={OV6uS9$04+~LtnzW->idZdFfC2`*o^dkoY8+OAf z@dawalddH@1{ii_IQ{EK2qU2B=HgY3N6Zqd*qFdC-Qo2<=Kt3VV1ucn?QSg)=E-*U zAc(JDt^KLIKS7vie>YP>{$P2M8vjNc&ab572+}ZLy!74(k{q1Lbd*hCr&y zPyJhG^8meFp12E%oWfUP?V%U3OeZV9cXYH>EhVAbd3CMiGP{g?_rX@*cCRK(SmYMy zNPW33c?;nj$Iu$W0eK?4K+t6y+X@(U^148b8SHCq1WHLq|FK|m zC=km8x@l-O+h@Jb64ZJYv%3#8(Q)JCA;J>Dv%$zdJi?=S2)Ta-cdOWG$Jf9D;!*QL zy?u@XSuPOYCCXcC&q15e>%RdZ=)=#z1dT2Y0mAQ z%k_Jm>YUMQzZgK`0}UfC`arb5-2IUEeju3T&YGuHY#pLJ`#bRbwgTV6mb{^R*h2b* zFy<&E@y>#wr-8z#=`m{Wu@b!%Rb@0i1w`b_IzU(p#%X+Mz-`#K`3KkqXfrmfVML zUFc!wGnUsX?9Z{V_BEt>tX2pI6F=PGG5pY(byPkUe+CLM_S81eq}=~9hhtf_Qc`~L z^>Qou`C~;ozmxQ&G^L~@a|sg-CW&u?j~4_qDevZEYOXIpfBkM4b`*?F=h$0$AEX1Tlh3LT4T(QbteY&G!j@yq{xmNuRvJB_Rt8w>> zvW_IyS>#{Lbdykdgk6Pz&xodv$=fBz)KX^LXP&>rsgyhkbr}k(&v5DZ=L%9@n>MF znc|15!MZ$=50s64uU0y5d8>Lwq=O=?J91aa&yECoYx55=^s?$~>oj_m6pw$ctW1NY z@_hF~NPWlRr8MW`S}PBHUUq2%nwI}U(*E5Cv6Em?FjKHD2fUlg-k%{MV<6$a!_`e5U^r2EBPf8gztBNh!JhUHl{__~+q;S_;%Y72{bzK1+Qdu4I^td-J%pLJO z$ocn&*!SUuAB2#-+@=^2%);yi}9{7$!pT0aOaP&@C`(VfH3ja4@zoHZi%wE2AG zP=9FBvFF0Wo-*FXH|ocInZBW%(p7K zx_fUdk-aCU7de@o!X9n|o4uIwuy<7Q>hIZqvY@>!hT+i+;oew3lnb*p=i zu%A0Flz*Zrlrie{a*pyv<;cqY=?UV#Rvwb6v0Y~6k z($3|SL^<52k+-o7$nyV+Isg{I36F7>xGyi>O@id5I&1`(v#(ukVnRYloJ&QqajSv) zD0TJm(3-GDD)d?|Pp7aPlYS8>h1kE*zvw&}s1B_3FZSMHAjXsP6z*uzqYxvIyu8_5 zNp^Ym*1A$`Bt2v*{{Ta(?xiGYrraF&$wv-%ziU9&L#Ac9+SsHM9o$<|3-QRoE8O0= z*wAFvW~wK^e9sx|f>(8qbYJ4iv04e^$)0i3S)^^VxnDa2+F6+jeT4_>*a!6{S%0Rh z;rjTsMUzb@yr)iGkIkdhAN1nBzy2-5wV|OiQ_U-6r|5BT6i#nUFfjsH(U;er{9wmi z?W#Q5=##G$H48fY;U_j-09%;D_mW%q=2aeW_A19zaH3Jicbwj+L(Aj9y834&ejpRg zwD#dwHx4?#v@Qd&WVvQGZ9OXFNBsSibY!bQw^VDXcTRc*=8ry#qn9v)9tm1NU{Jn2 zm^h2!-h#>1nr*9XsV^)n-}|*Xm(_Re!V6{BYj;XoZ*5 z^*Y;%P`hV^L7U)E4to)M7Dg`oo^Pf4eu0?yp?Uv~&-O2+cVa>CQqRK+xx%Ap788mC z){p{>0wI$rw$o%WJca%8a{ESMqQnTyKrS==7K$zr*6w$_mGpaMpxNtU%9-+uknM!B zkha{bU(D<|-;F=XaE( z*bK}GT8$6qaOo{U&LIa)7DCI(E_#qUT|C@Gjsc3{i=HnOfpZ!V+fAUkOY$}bnZ9_MNKAm2EVOrWXi)Tnrr~I7}e>g;?2yifb zGXHggr||0B-~99whi89-fytK^s7@6kVY!_~WKU(~$lrjGHNrY4RG7PvIH0ZLn+(rg z$Jft{Ed!q+?X(p7>e&wL1}e2S`R~mgT)9Z~_`5s{tiJvHaQTRC;J8kYTmn$41L;$+ zoqQ>JNh|H=B|7Lz6K)aQ@ryW{DOLOcg^OpFrzpc!|BL)ZU>{Y&VzyQ+=cNdIrTE7t z`=azSCaUOMGZ}72^Ayr* zv8dc_10gBe=8rTd9!bYIULYOdPkcY8-muM6s95zc1ceG5(bE;Qq1&|J% zN>Vu}!Kx*H7ioxBWVBO?owZmKv~bH}FBM@mm6ZU}l)p=H1kJ zKWm^Ki^jQv_<;daDqZ|`_xRCHVG*vGjG2mJ(f*I$)!E{OLX%Cq)CZ~JKuh7dHz#@K z`BnnkeLNB0XFSxXPoo1G(vOk`QNoy8Zvj9oE8)i*Y$hwz>AQ;wiKBuHMNX%$_CVU8 zXFPs$*y(jPioC8~=Q*xB!|4HiYZ9B+OHSbvZolBye<2`jm1A{#L*EBGl1>N9Q!j=P zigd={fhrHD$Sa-3D=YC{-;{&(!$51sSnQv=p`X%TL|T*Cq;kOwTF9MT^-Z(J^qvMT zW&VOUjY(18wPn}w@`su7Au9)6n7I#`;57bp%6y}ZFT#zLb}=LG5JbXHTn=gqCl&5$ z`O&N=axzl2Q1386aXBWT4Qjpo^;qut$;UQ}Q<@Bza=67=&8r|4>lKNAG6ljF00Qiz zy|P-rZk>bgB&ZSKB;F;UJ2b>TsFGe8$s%fuO?#Y3P9u z{IegDd|0~YK(P=m)PiBt==(gnQEWjoWZ1kvh9XXU$N|j_1a&I~aRG;A&5$Tjt`n4? z#doCn2a(vA3sAI}!E+SnY)!l)Nv8EfNgZE1;&N(*9Jxj;Xs&XndRJGP_ku&59Q#hu z9KXOFIOf&Z6H7vLU1XYl)C=b>MavY5-r53R3R)O?fUaFRbpGU1ZeA1Lr* zTO}8XJ}n#h9JM$&XmB6vn-tewa**U0PUF*epBb&FT{&(-RQVntn-Z#<%u-b^lk(6{ zVW0v5jrj>mx8&(31%)Y7eO87mQsKANN)otR>g{}TPsOWO{#aQ}rkv3ueeA)Wm?q-` z&31hDJvRK$7pc9cen$d($er{sclgT9lFzGE4q>0M;OqN@cu%fYamb60*NX{ybHHbz z)`X_>?Kh(bRdK!j6BUtdgNwFOz=eeDhBmUGo(INRxOnTr^KZxAj6*~GXPaRX0ZiGr ztsk?FA$+4fv5M!yBtD#{%4uldSTo~({i?rJH6jtLs~7x)N>Qy+LwW7EHx#6<&uv`P zlb$1)DNwN{d+&GZ%TMEYACKT$C34x|V&> zPnk3CMJWS8>U>W$;_rUt9K07g*Q>7ndMl$|PQ9DTbtE$OIl^&gQqLIGK#zI=k}?W;z@K@Pfh6`~~d_Y`BndXjiWwkO-MMfv6#f7?8C z%dRVp!!O){wvCX9lp^peywy1E^gr1j4S#&`YwrEN+^d?t8q`ah{l6(Anj3p`;;=`4SA1Fc6Gn7K zBO~eS(4?;i=}t-~VLmfr_uW?0LshDzTru z-g$|`u+lc5Pc~iOyKK4lJ>HkpikmMAyU*X6*mTZcM$*~l)J}4o5og?Jrv5fhMMMg} zm~Njj-|J$In@JkGHr2>XcbJU2RuJxNSJj?=G1hs93{$2k4u4H9;p8^1kOlkm@_dr- z3_2(GUccYKWOf(srKzBu_C-50-6@>C8qwcycR@;~?@O$WIL}UnYo&lAiExuBYf&4aX?x@%mIa)Aj5VSJ#KnF$3;i{9#q_;fc8vHoA%NISm z&Vy7eyr13APuDbj^ub9OyYd5DLME9es#t>f)-Mf%PU;k5&|Bs1A`0zxU8e$$=^Rh8 zl?a=F=>~llxdj2Y`{L5t%-Gz{V zD-)?ll67kjQlrkY%&7G9q?D@L=^qA;Cla$hq>le5JID{rmS8^WH|9XX)f*g)s&7w! zM{o8*_p{b>G$4Oy5CjW!eWgf7?3@oJTDGzqfgvZlqz{4+{+{$0GDax^i4pRx6mKWx z`BZnw9q1cx$L?RC(iHO+<-*4Q2p!&*0Yc5^Ul7GVe4+a0?6Jnkb^bD*`imH^8IEBT z8A?r6iiFyHWmlsSJE`LsVjiZ{V<6Lm#Zd05Dn zzK`-9Uu$gAr+=MpuI)93nMQ~nekqAnL`aO{ZFLhZ!iv8a19>RFxWI09wGYh?%HS6G zM6%N}O?Lb23+pZMGmVVt5uU`4SVnsFJ$bB}8p2z$I>*Y^5*5di3!Qnc-;35PnRb0t zm!defm=m#ME5qbMUZ8e8Dom)~Wrbgel<@4FU;XA&u_{YfjT^AX0G;{#P(0zG*A54g zlcN^%Uc6Ul_a22R%oVtgnN6jx_d#Sm&lf&&agv8)m1Xg<|uu_qlt|o#V(Y)(sivOvfrwY8sJ7%L!{EsnxVBUi$ z6u-{ravJ`k0A+LPyL+g_)Ak_Xd#Z|6Sxmt9-c~zsT4@Wf`|TB|(y-KK8a>OQe=EHB zWuAA%Vpy`KMHX#Dpu!7sD(W%pqzN%4Ka;e4Y{9x_FBQW>=4P2e-Ts5Gvi>bZ_ES zwPpy$w~Lip&`UmJwKikHI``T@fiVeT|MKaB{1F%baEdnBB#sPjpdhZ7k+OJGW)&h@ zuUsO8j6wNrP@d6GGxc;H@7JE=gh_MQ%-?pdH)JT0$$d+w``+xwkYe4`vY+4QoY0&2 zTF`p{+l;{lSEl5Hyqv_+8FktTzu{;TuAKv)-TV8<$J?}l zABDQ(+$|02JSA!yGp?wqmg{^~KRIq`=3Ph5XUQ$9Jw) zTuoQFT$j4vB#RrvBwLt}Q&kU78tp12n6qiGx*78O#Z;k1c7r*NQ^>_0HzmA|C18}V z$69(WKhVR4r0B{FP(#&Yc2N>2S#p=oH$11iN6m|VYWW`to8R$eiz^Yehc;`KY6Vqd zCVjvEE$}Ftket>nj^vF_dfWHRj#rPU)Tw7{e$uA6TB`Wqohv=@w)gNLL@T2jGTK7n-f zUmZyjn-y-VPBBcj2~p0%?MM!vxl2gB;pb)Ao-m=6?7}zelT>0<^)`tp4Rp!;e%Bc$ zmBUv3DzY)|_9vcLKTL7;%=)WAFGi(}o(+a1>97NSi+KNZH!^F7Vk(33RNqd)P0d%B z=5U9E5NxvAODWfpz^Q{2`4t5>;}@jQPe%th!dp2g(i0IkZ6i0W=dI>794L-hGJwS# zvcXvrJiHArtHw5>Hc{aHdnh~@0Tx7xLJKizr1@>g)=rV8g=A%ARo6I{QVz?cgpiCx z@&o`1(uUNV&}Otwcc6|~3Gs@<3=12FQD{h%B5Abw}VF$8}5VvcQDT{NBW1BEt`5J@0j0jw+Zi5nekqX;=-#7GSNAH(? zS*0a3W)-R42eVl9NH3x zSy%4Nk(&fvyKIFz#rw1iYu#Ip89xFfl<8aiP+>uz3T zMxWC4`(S>1FmasAY9oq6=5vPt)eTa&QG9poWAXabwSIKlxbuXt+t^L+IW_Vay0`~t z&VBWk?bqjlu7zIDlaC{h-u(J(K0iniaRG75gFz-0L_AMz6955CJbzn!D*bM2uuB$J zT4><4A#r)p&{IEUg}NkGhAp)WrYdu|GwTWJ8(idb&*a-uSEoJP16GRc$S1=U`Jnoy z*%xEhfo_l%a(XAq12t8a{KvK62u8Qb0D+uLsx|N;#W)=`qW ztPEk8{&X@+68utJ8?U@@H%{iUl(zi(TVj7nHLFRm&zQukXRqXf!z%I+!jW4{2q!Z| zv(yM-x zMN}ypsXa!dl$b$N+=>~Z&cA2yBr1*@VNev%EfhTbbgs~8pB{uY$wdCs>D0;r^P`Y`{pzP6U-0M_;b^j7u|=d*KUTZd4M!R3vV z=3Gb_CPyyu|HfwspyEJc6mF-S@!7iJa$B2dfLJr=veU~FX+->T@taVS+xDAF^}Up% z;f;ad>`nCv@E{uO5WnFP`~fv+k7#~ZUi2LrAEuM7J$ak*s8DU983CDds|IQ`x1Q9$Bl1VYeTm$<{q$xg> zcam8DU~ECxXUL9&&aXg!Dx1+E5s7qA#dGE;tcd=m5G{Mx6Ld%b!FD=IrWuk(CV}7_ z)a^>S&ZNvV0Q@x{XPYM-$|!$%Hh39QYuBUgdKp-KAaOP9M#i!0r+J7zFW^z5QpB-f ze8l<+@YFGfKRW*>HT~ZKHO^O}@Sdm_grDpz3^Q3l)jsUQ2;&f(Swy)! z){It^XNc5gMc`$&TA>p6xU2CZc+f7%4P2CGa8)x3P3i4t0{sPBfwmBS1VSC&LS$C2 zRkekooxzF^+MXP`a=feTX=kx4#f|6Bw2K*2Ezc~ijpFcx5MWI{N1>Te*6?T-UA))X z4dWy}1IiofW%Zfy(T_WE7UqKEDCeRNJ_l#krlwAno`k4THpUGjWYNGV=lkx_*Mk(9Hn=}KJ~O2<3pIy8ce%YYG@0C zk2|SnOg2NAtW&O)c1jbSFiiCf^pr+JKt0GPAUn444@+FT*?JQm6dUO@6uEn#(R|U6 zYjyi8BUHM4XXegj0ylnt^0m%qs}%O)IO~{PG}A3HrsnT6nk6S|wbR8FJgfdC{GSOkE409 zmEs!dYQ^jF_+i0TaDH)2(oYEOK+rd#p(mo6@N*-pU8IA-*1{`t zYK%}2`DN6iyi+7IR)q@{)A-LamVZ@O{3j-C7zBRX@exFevP@WI$M$Ik-~Z+fMiLs~ zv*7^BSgq%KiAOej#jOs~5FC0}Hn`9Tm`hmB4_v#;5-8(HMENt&2GA+dEMt%~BE09$ z0e3G3B#zpj&By%BX%7hnS!GXcGBghf9att^QO6k> zdFw09ZMsnccMER1TqB%qo4gwvoO1YZFA7C$dcYq7VJcApFT&^7(5$Ezhhv!OaV9u0dE29gLs0^wC=g^D-kC4Ar+a6$ zHG!@>j|MXA_(7r&9B_Pl#*xcjmL2zclnXE`AOAd&J}x>`^AZX=>?9$YoY4fq9DH*} zIbyF5NA+7r`YfB}I5fkx@nkzOvBJ$ia@##dT2|jtjBin=xY_Kah#iuwUt7Q;`X?hb z`cHBRCKZU1vv$rbxGuKcN_)+hO*kHfr|P|Jqe&UIU_h{ki!1H3*8u-I z$y%k<8Fil9Egs+%v-vs;VVgnMm3!#*n6-$eIFmx!zo1flV$SdDCZ1lL7tTX!RDRrp zjQPY-jQ@mbV*yq#*32EU$az)*r4~CR%#2*(RZ9M(g+^z5m@5!HmHZrnH7iY-@;pA$ z{W@!-8TF#rl}Xv@;sePFq!z=II5~7im-%%7mUgbpA)-f~O3@{Fu{c&;f%FQJd*8HF zqjXR~DS+vMlw9}^ho$4QABcrRh9kkK%-?uffxz(Gl!E>GSTqfwgS@%vvp~cXspvg$ z?)@ym;+g!o>3jsxCaP8nlDxL<+C>`?*AjsV-{W9i)SbE?btSR!#-av@@9jz(`wPUi zE1XJcjJicQVC0ytG{WlYp*JY;ZWQR;BM&4R z$~8A?gj!nVuIkUCGyYZ$p}qy+?4^mi z;BGN~MMmONep`U>aiDCySI)l_u}pOhez8R;HCTyvzuWN9jP7j@bdmRyq=tgtxEdPB za5}%m{Zxu%-+TQ8;QDr|f1qG-;mw;R=&n3d$25hRCt^A4x#58U$toH~37Wy!z= zOPgosy7MHx;3>C1);N-A0LDs_l>f&^+RBF{-q5Q)QP)UqO(tBplj3Z8gsec zSRMA(COHCPPApMmPK{lvNI_1#*2DH|9vxWz zTgGLDTCa0Aegc-jthV82QCbmpJ0?1xKM{iRZqI%unA)v&-n5*~i`S!toC~LG9-?|g z`juRU+A9!xb$*dlP-#Tj0^T0Qe9ZfwR*ZxnEV@4>XtC3pVlj@IqFAhPd@5Q6@g_z! zDA`i!*;dWvei>6ZT;pDWpE$G$FQHD9%pEP595$}$-AgJn96Rswc-RR4SOPC>f&fQr z3%23&e5)^-{?4egA3Dp(Y33?y z&Q!FqgnwnnqKnk?iYt16Qm?6-DF~vj)2QTbj)ns}On!@#9Io|jFE)Dkqa9B2XOYkk z9!0H74{U2a0;0DRu=NLYB?99f8WifekS*j|PN+onsMoXb^$`>>W`(OVoot&h$+cZFu*t(HH1R>x{XmIk=pTP>(_F zRwH}n@tI6iwBRl=7-?7~*yLw?QzD>E$a5u0CV%10*Zy`~Gs`+$tH&8_0_^avU2+`; z?ooVkJ=Jg1Y!RVJCFXM=#0BJf-W_joB4EILJN2jNMp&EiSw#vpQgms948-<&>Y9!wfP6QYEh>{vEb=IlSC@f)1t{@HK0wRQAV>-^fb`p zsALwJJL=pEY4<*J73m~DVFo&1zBq|`Oj8F23SzpB@TdlDlaAdoPlS`k9`4pJ;*jTd z1a@pr=15e@AcSG;%}=7FIU#-6JLzQn!Ju=Z)?I$;AoYRR+@g~LW{c;UFY)dv-fgP@ z9kPP^8N;2roOFLT0zsOAaK8iJuAO}d>y1%_J7|Zm*D+amC*5E#T6GEgz4jr=Xw39& z;5(Bkr{YMj{tV1deM|P9S#;%DciQq7Ek|XK?eC*zrrXELi0mPF<82u314{ z?!Aa?++oPoixpopl%>fj)E|veMif6((L|+#=g{P^WPUeJV%6q5X~wgRK#{!8*a(57 zjk3)l=GWD+J0*jNG|dCA3TEIeKU#3B3t2cDqNn)(pf7YEKbnR%_ZNoEp;>N6g*>|( z<9?}^vkGD5FW-5a^rXH?m=Sn9rIaNh*8cpbW7wPs2xy|Aoz`C;y0XQhzu5-FZqz?rsm#WZ@y^BykO;-g||ce2@TU#7?4X z3!sDl2mD&YFbqc}M@1u@NG)c(NTVVV_xn34U)N7-B-Q@NgYG0mkfBhFoWW}yi_s|z ztSPh>_!Sv$m9W2xd|17rkWseS{ z-KmuSwF?29hvRxR?sG=gg+9i~svG{7xbemS6h*+n3BpHsj5=bFoVoh7V0)-fw>)}u zV}%Yc*|=Ge5kWP5e|1p{132XGBoqak_Hq0OgTeOUrSJj?4MU#o8vp29mb!0U#K-D< zavkzX#&j#w{5ORGMcYcS>xHCKX>ps$pX00CdJf@6Uv8=p9w(?@cb|0MYv>gC`A$;; zMMwOQWcI%cs+1ufI1JIqSyTscWt3Z!cAApk9d%MoDpe)^)%UxjXne(OB2`~)vL-cj zGE})PWnF(0QPl62tJxWx)-_k z+DZJL(86e_cKoKf+rxl@7GuU0?Kr}+f7cs9IOm0`GcM3xC3YgUUuO626?JP~x=*;} z>I-%6sh?q>kpu(ld23G>5DpoDYw>uyzsSK$`(5o`^jxc8 zKYgnuSBO@xrlm$V9)r1@g(k+e@pbm`ZPIu(%ZE|GR(M(JKCbf&)Ytn~_c{YJN_8V* z`@J;BCq{J3zI@AJ^C93yV#4PPdX{5j%3RwFNo7@k=Rmp=eNpU|F%1w z9%smI2vx=sQb9GAvu(vKE7|7?xQgywnb_imvEV)IhR zokh_VO*@VG81VHP*I!}_VF&Usv--p`t12ux32)?3RO1>8mEc#_%78% zro@J;(oZ~w!X{#aIZl>y2Aqfv6UIcX`T9ew(a!=*C32H=DANYp$cIh z{JV1rI1+%_oeqjs!-Ln1crS<=!U@yhO`QtkbV99-EwJZJ?VxRC9&}9DoEgTwWGzpG2H*gT%TtGm^x zFwB+n#1N1?LpUQ*F}N4R!$QQ490`6%%^a##AlgaZ=0i8g35l#dp=Yg&c_||`G;^6K zM^{|`CC32(ebxH#UB$ve{)zs4k#ON~S|JivyrP3TpnHSnFJ&6R$1U%D!jP#FnW7q%=Yhhb_+E9qn?T>enk8XyK zPl0mrxhKCDO$6@z;bD)?$7KuSvY8|Ve(aAo+$v0A=S4ne_w>}iRgC=WLNu~<7F#4$N%RR5eRS`2>sxE!4!?8 z1Qqx9!pQf=z{*66`I$pg5kE&NY%aWh8XgI(04CZUj^^K@BLBVscKBQ4^ss=>Z3yNc zPaK#?)QaY9sqJDgqx_>cVQ=#xe+GMj4cy`a+q&leJ3SN_tr#--5wL_vl7++YV>srf zkHEj|)-jd|Rno}i$oVMdjev1n;6B|1*M|2W84Uj-Pz)wj!mXqJEL74kbm5bx)}u>ROT=`;@1Y(DBt zuC@yTz&qeQy#wC)pe8GD@ptA2;G*OZa{1uSB+FOrdkV5Vh>yc|A%FN8iuBvG+Qml? zS|(}M1>~pO5JIs-i;|riaG|cHxfh%_&zuIO*zt4JV^ae}fmec9gZEaT`vFkY|3FUGOn8rEhp_Pc*4LZ2uYd zV)sE<1nkuQe}tw?0^AGE*9;QvidE7M5+92PKX>f{U+QN7sm->hNm z_xeAZ3<@s}1D_EVK>wN)(SepE!NKOqRDlO}7%|#gOkSI(ogxnGE}>M1WEj>SEHTKl zb(*WiP<%)C^b~@&%lz)>(qVTup5Zn#Sh^1HZB0l#T97n(Begl_vSI7k^8a1`SylwD zUw#+gv;3Tf4&#LZ-kbp1B_TR2E<%iVdZ)=qZTA+VY{TiBc|BnPYf$BKil+Y78wg5* z(UDU3FC3#E@KSz`(A_nk$(l^A?^E6AukaeJHU!a324gXSh-_5vU3dhO5L&XiC>YM$XH|{SWS!Nh9p|z3na$`t0QVuPq5iI8*3)Et*tI z20>kyAP~T^;}EUHZj1kd2_XV6OSHQCO^C7|S~$@9z4A5}Aj+?e2SjI&7f$*;UzV$} zxbgJRk>H+AQGY^!WXcx9*|v*?Oxh!5 zScjRf1Sdp{GgGl4)z$=LWiF>Mon>zhP)B|uT(xEWs zQ>f$L?+_XxN}BM#tbU7MN95_dRwGf-k+<(6>gnKy5%xnx2lc@d?^w;92!^Yix!^pd zMF$8iMG(3g^U6ChfwkLjl9c}gr1PI!GhC^To?XSKdKBDtJKA=0NU}OFjx0}q{m{Qk z*;<5<{9J?-?OB{i{bT#jF4b|Oe-T`*(bDN?GNwoTC+P$*e+Gbp5p@dldJb7^IABMV zqF4wMDBnf#s{vC3p%u4BJ-VRqnB7$HqUJS5szTuh1T=QI;BlZ^gJjxc2YKM>@`moK z-xzDQQ{kdie&Q>ezN?f1I9@{knogkR7qE(6rJylBDM~C#iXJ93gmDKN3p}{;WJCv= zsF@-J+oDDZeE)`b2tPy(1+if_p=eEoFBf0Mg#PhE8>E&;_d;4x{6`*FkX=ajZr(Pm zQcMRu+LQ z%(5jgQzP~Y@krC+{crr#<1qjGW{&wqD5VPNb0*hLlxOe$yIlW)xj1;>MA5CGSuqND zqwrYu!^&okKW?c9f}0Y?PNk?NA9frWg6rrX$TOr&0W)cw2`GED+_t1%rCvfHqXQk` z%u-hup&@Q`7_Ae1(&1k)NB9uFY|D>}1k%r$PexRZ%qpT^x=pTDPvK`2a z1dij#0M#4@5q3^AZ53vgRR}$k6gXit|Z2mp9 za1I~3tLjM=Rf6bA;ieq$)6n*%)N&bRx)A0pBGjDy(0mRX+8kAS_#wF1cObJg84iMA zvfM+MewNV8bib%1Vn^FcNr?!LhB&@CEUDFL?%$&nyr1rFCx{GmBpvq8Gw#deMMFcX z0i>M}ZcyRsI?b*2b}!8<(trT%XI*9e(~?0u0920ZpLN!Bs!5f`hE0Z#XE9J(D@=wb zc*lBKwQCyJIwoPk)1=h%>d?v141O4dHcn7$BjD-6gUVcv!-x5v2@#IVI-LnP422IX z-Q9TOMKffco0{#F;hqG5T|Kat0ALcUxw=S!moeOK_Y3)SM2BI`5Ouvgxy^C*TtKa1 zlR8A~5Ii7#o^s8OY57BI9dJO+^{IJWZfS5aHR6uM(V%~aOP%JDq=a8igy!#0gnzAu zSqL!txezva6K1O#u%TdVmZ%aOA2?lr2@P&gv$cSNuA`3A91#&dA}EV&T@VIwT=y3Q zb6tj_`?X367|5w?8%^=O%J0~EAcj&y{gT0JTHwaKz6?7WzXV4*dpO?J|vc) ziDeHac8NtJdyFVm=ucz_J`nQf|D)8em)Xb>&^kko_Cjr4AD3OzP6qw2lU3z~GQn(| zD?s-eWw((4YLABn>pwsx_+&|v?mu;!i_vX*w&=^g63TKr`7{*qem zvu)%!ddNxhWJ`+nt-!46Ni(d|V<;c=?H5DDT8sHZI`R@cq~qcsFb zE`3g7K9amO;leVwig2*o-TxusPPzF=!6rcvd4n17AfQNaOo$gVkBg5J_t$Ce2AAab zgSYz%38HH;|23-1TEWXqy+8y!ezjF=*)$mMDqu*Id-(9zVdGb6T#XcD*LfFDpooO| zcwC0y-i``mT_N8szbH;sy1RYpPGgC5c3*-Z-L4(X^bGKdxmr9A|Df*n!=d5PxD0Mq@%Zg;3B&EomPg+Q`^%GW z@ipUp8&4`47P{oVdbI5*|TJQaVhM(-W%ePs%Dcpd&p$}dzx+Q-&CjYIE)bfzn6I2@edn)yRJ98R_ z!3>l)5?4N7Y`lsyCkSh^_mKvNnKN5-!&Tx!>=;A^!l1_$?OZ+3{!eQ9DX7)+mVo%h z;X^$h57+C;i#{1*n8mfpv$pIKf(AUkToxFtX(1<-+qpvUll z9@jLlG@%FlFs3dW0p9_t=Aij*a$sTUW_0mJI!NEgA%T3$*qumJJ@}c`pnC|B8Iqz|y4MEjR zwYURHi7tQL>j)xLrTy#w=<~ih0Fp7MwmXhnuqXv+?1?5!Y*%PfmkA)){a%r|vVGj4 zbL=oh6?UN;s{YGP%lZuDtt`6j79XtUxYpNDUHE{c&*e1le5{aYlAkjOP6 zl7&9`k?25hH6Orv?eOUHsg*dxDJh`^-js$3?jxG*TehB{_E*Wej7A6ya1gAAZ@yQt z2Zs;UA`=TP7wbo>uicXY=JG#uL@NO-31VsUvB#y{ce zEED;#U;JLCSXKf0jh)JGt0VvoX5-U+PXZ4;kg_^lx^$MP{b2^e?(dgw)(&hRHbfG= z7q?8;dtp{1{;Eh(#Uvm>WdG%Nk4h5O%0Zc~_53Wv8yJ%3BkA+0BMo!vx6;tXDO37M z{;gQ^#Np6eplw^L6T;5vN2j4vuhGhP9{XKcbM|F!o1H{MKtAUi(bWn*9JAjN%aj*^ zB3>D{?d10sF13qwKVI3u7g}4kOOvu{ zfk&vqgs8D}I!IsY*9yc7%|SouRdNK;8p2E#At0#|y63D=de5Q+?R!;o+zO; z^Hw8wma?8U*XCOf7L6)T8r=yn2^qq7@s+k&k(FraVOQW3=BryS>wqJLWUyeFTgLSe zt8nFOfv05FGEwl|M6HI%DIM9K`aAF0W8_}A0!y{Oq|RH^FGpXK_hAFl*wZEqb3luS z2jw;XjoPz4%LyfXjxKa$n7diPX1r~Wp;+GUZhP6#FlzS!A%YCfgBIhINu{w@hHZDb zZgX)`t$a<9pplxbC~Tq)GAbz@+r ze$%(tp8Znksg+%Oft?NJ+h1>jg#<7Ypsh&-1`pGPIy`0|VA}Bzp{ye3a+iJRO9CkB zc+#9!f3@`+1jsD(-ytQYti*617=|;35)SL^W)#fednr}IKWdkNQC@Bm5F=SNnRd+G zKaq`&2(^#Z*<-(x5jEa#wzU}@#-RiK(Xb#@u5iR0tPZo4*wEn>3AgKF`OtagU4Y$c z1w(Cp9*2zfr!3)PFdHiW+HsW9t+%8kC)gI}=s2*}-FL4B7mX6B&FJ@DER(carn3$4 zQ6T!et*iKH+%f#2Sx>0b&D*IWmi7fTA9hEClF8xb1Xg~PoA(BbxVa9iBp6sV#5dB> z!J)J0dzBcpEP6kh0=ZefM1XkhiNKZWHzr*kgkaM-*aqCsv@-uC*0)e#I^!D-n~ua; zb*jh>8^Uv>0h=uKzU|V{r9CUM(RRj@_QN^?h8~*#^R3#wc;R~)W|uke9wC(NxN|RF zD74&X|V$Ru>UCAj$proCCM}__p7Mt?*B{@8W}8)?C(NL9z`GxP|EPflj&up>|N=( zym(uWxCX)f#(u~JfYOJ6eFB6Y!Irbi!v)K)h{G*<)ZE^&69_r7g^%1#TERT4zdo^O zP4wrDbt*TeI2;$+GV9w`Ey+cggc@8x?}<}gm95hVmNT?nSl;wO=v9`md9CDP#%fIV&H!K^KxKF|XRqUgJc z@%P4OTYtCac`_&e#WWLx%bdm>M|?$ig7p?aHCn*ev&?XiZNN5Azg)Z&oLq3;3|#n3 zh*I+llVUAHq3l;dB|cd-9U*tAQ}{}u)PAq28cLTjHTO9!tT0d&J@-agEyAtGI8H7e zLYSjA?PO75Y5R)K)hb=r8heIEhgi|}Sf-t;uDWFWdtSm#cB}?d-8zz5>2zMU(!TPP zJ&uzi{+G%)m=mEK~*&CRyo zq}~7QEfmb<`#VI|#?hv;By;jX40WJU(l;u<%7ca_PnJtLx2*?6knxgPtVwzi``wfZ zq-5ltnJlMAu(FrBVTY2m@I$r1fP}v?`OD~TO1~V6>@Tu~_9t;pR?6RGc{?m(!%5n+ z$r2$R9iDN4dQ`GnRg0P=bxc`rCIX32ZB3?sB(Zqwv{463sk)9M?$r2v`HYw8eP(&` zadKDMd&lgP1a&Z!I8K|VjLU30Fg?R(I+~4yS*DKr?cGaj?|e>!^dF5ksk5(!RoG?2 z#iguj`pGT{Q@d$N>72iXlY6~7q^S9VS6He@_MD648;j7etc|(yzR95~o4j=m526GW zrQWA)x}u}DulHm?GbSWDhaFqS*Y0s9Qni>P0w1R0j=t59^_p)}p?rIah3%okL3UH` zTv^xkk;reBg>EOdK*f7=A?b)Od_~<17ix)o!AdpNKY1($OrFpb46UBBdkDlLB`&o7)ug-~m}`2vTuzp)E`dGWv$j5vK~xgEE+trcO!bAo9a zmzf%4b|qU4+taPN@dk>)&30x+YI5<4y4!+zO#FVXnP`8P3-kCF7Nt)2fxcYnhol03 zoE(3j5F=GOi>hC)VC~)(dt+vL#Z4UYb*}H&bgm<;_XYT8vKG!Sb6K@G&R$sbeEi}r zS$tq;gAr39t8g}~e|Da3zD-ec{?4LQEyrb7&)D`PeOZ+L#@zmKBI(oM&@!^#?)len zvvbOxt~%ybGr%77;`n!it3|VR@^M-p|mgi?y}LGL+f9@5vkeA`^eX|JFL0{ zs^VufQc}#Dl{f0W(=M7iXaY1*g()#K9Mf=O1K+@>=C&ZsYpXSmcdi;68XQaZu5(7i z)`jlc(@>K_r%%3UfDfy zYO$Dk?0DGBk1J~Q+h!&Q%r&o52dIOq@%SIFWnBU=X=p<&vyQPCg^swMz3NG2MKG!g zj(oI~p$#e0&m+e4zYT}762C_^Px*0Y#g>f{vu4n*@a)?c_~JPEO}2<#-HwH@32gT* z<$FAii7#Wqlmqk!&Wc5COecgD;zw;0kMv4G4~W{VoG<&bDtc9DwJ-Dtoox22t*XWd zrLjO^oV=Lq+1YgMbW{aDYh=+o=*gsBq)@+m*x+OFC@x*xtWeI8*km9@EHIp4x&}^U z=+%I=M*T@aHgK9iK{HT3JE>VKPEa;7&Mquu4|;7~X|y!=#sj4IfdLK?=+=6Z5zjh@ z^8%j;dQIIb^;_9%`})A#=O!B7o(9+-DvxekZSMQy+O9hnvDaC@@t)ur8*aYvxd-LX zQhbc+Cu-~8)=vT|oJNWMcJuijrwhd(NJ*X@&Ql7;e6hxkM({#&SY~!hNXPg+cfNh~ zyD_WA=%W70UryGp7Kk0CC{x}<*Bup{vj(17Oy_(N$1nHvfFYoY9oZiaN4ILurX@6X@?nBbsOH-9*7n?NXYzB~E%Qc}+pA zhk+wjZ<@PltsTQdA4AfmmGM|mCCq&H$+T9wTNJ%(<~*w(T;5K7-Qic_QI%jyi9%#Y z3lgG@Ro+M4SFJ<1sf%cdLG8<&v17UQX|3!I3mc^-MH0{PL-xk;c(5L8ebz-CwjnpO zGJheM+(-K@G~*DPquprSM1lk@kSLu|rsTvk{DlsCy?s9E*yY@C)r|{rZCaJQ8JGsW!7Vr#reuY%z+VaTSrDe z<_>R8kb)GmnzbiY^zA2F`YkGqK5&wb3bBFl>P1^+zsyMXe$Apj-!}Jb?PwD9ebe{g zMF)yOz4vo;5pz(jX+|A?@9M(LEelBMpZ5HfVxkcNkIUvuGbxaAU!E&KAD@kmiz~1d zH3J>U>UL}6xv?lB!du}eTYCg&Sc+XK26qFEq4n1PbxN2>;TruN8;{22Q=w%}<};+) zcnGt0*LCC%=>R!GKEz^uu~^IzUdp|gn`v>{p7TSfD%2W>+SE3`b5B9PzRIf<^Gu;V zvfbXq`%6<>+zFe@`#Oqd9@}iQw?UPCG7h(GY`<9fCeFqov;o2M(6k>O(!tp%_h6;5 zHwDdjRk}QM&g3+TCb_UW`)!Kf!b|r{M`y&Ot9>l4%G^8>k z*ixGQ`BJaBK_&=Q!im$HK_)%2Q%qobBuoaRc^j9HtdyucJ#Anz1X-{9DvbHnKr8W) zfnq6ydzM|D9{?60RBC7YFgByV=d+_MRAt-0KNr#}&Pbk`g-A0}&1kE4I6Uv(9ueRz z5N^{8{tyoIyDGG|UDUK<75|qCCA@r| zEtbrs7wX|fIPyixKZC@d=@pgXmVU!e0gUG<914{PZn0#WyiB3r$BU=c?k~9Af7KG) zEn9OEE!T3(1kI^>F>V59w?XSC##i6oy8Ef?AEZ6b`|!F74eQ!D7(!{?0{0`qPXbq)jX?lr)q`hl)Iv=jDCd)i)V03#xEl zl*lR>@8nia@~!F;*$H)RpV`@+@6hm%TkUtme%3i#n^0iim?jy0_Z`o)k61xQU8saL zr|@neH~NJ;H?w=C4SLVx>R<F^f z89ePF+!lQrRVhLhHxEm_jc30fY7+J<0L}VccG?zq?}0Oh%TM)DuzRXD76qrWOY2)x z%)GCehu_XJG#zq6SKpk+ol-1(ah;qIe0)1RK+E=iSAcsV^OfW0*PL*itRFGG9WM}P zl$l0e)Roahhlc~2TT4_v&Daj`Zvnfr2`Fp-(sDn-!DKs z9et)i0uhFxZvgL-M_~ga3DVv(ijJi!S9?P834%pe7~!ecX=dkWB5_|qYaCLoxKSS6 z8XJU{ril?1j~mxZU*rJbiNLx!luli*D38_-XNNv!4mxfX9_lJz{&Gq9|1tH}aZR`1|2T{ZLs~?-6;NpqMt3MJ(k-oYH$zIK zMFvQNf=YvohEb!tJ4QF7`#0|Q_r5>Be+Q54bbT#q`K6Xwo1QKTUtd3hMKC9wbf!CO-$vG=pfIfzxx& zFG01j4jfaoawx=^xOd{NDypc`*l#o4AR_NRyp)3K&xh--a%PH?_=aWP99m76$8Yy- z3)Ur6`DTXnIk}{eB$ENyP5xe5SSAh`xa9VJzD0)qdA`J)3JM&e} zUhh@cMdt+(+vX&Q1{c`qeps`^dX@t<>Tb_*;gzNicU(7~=g_V7TpvRGd}*&1Gq<(o zb6?bO0ZJMIyiKTPD@n6IRHc(AZx`?^MyMF6WeAO@gGKI)Rtf@Cse^JuFe-?Koykq` z?FF5PTzioZEb4@l2mS+YlQcp6??Nz%WLCz;3R3y%M$w&&Q0Ybe*{az(R`fl*h>Jm0 zdCBLsL)AXHp2zny53RAiiMv)J!p&Wl@k(-Y%uhCLCvb20JocbN2s7WK_=Q_9^E%;_ z0T7J<&~0E*65jko(1Y&ec7B6yzOV$%9pef_GFL^7*?J1P2!7PGbTl~@2`fc6=+FS? zs`7fW7){0TW+<;8+Nu+*`>5mKk?$xmPb*QgJ@$>TRp@Vwyr~IEX9LlEbkbGwNyS_i zInmy_cLT&uhy3XOy7}l445Ds6qEsmVA1a0{(20pVLGg;I^Bg?xPUSyccbTx@W%;X? znpK*1q(H@HIu>)f@0|O&HT`i9`7;6^3MI!vZ=u{JAlTW0WRXq?_6f(mu0#dJ|HrK`4J3glhm*LtkEOoY=R!I3fp!Rj(DUJyhE(he z-Y_VMI!{q27`%UW?2c+>peznbvxN|TGo$GPZw#ld59o`Bw?q(^1*1c)(JWqq?=y$* z9`uPsH3$y#w;8i~QThSJW7SM?pWXc~$5_>$ezc+yW0cS|3$`%ME2|}aM z2_X^-5KYH1nskp8-QTaYcSjiXm5 zCj}lX_&PzP@}>^~6;5>T%4T~i>N5-M^8cgG2Td_SNMgBy7JRms?H(B1U$N#Y2l0go z1M(@cf|7tDb5E|bT4QFC3H96ZUK9l`r+P5Wzm>B)IBxa^L znXn$?`LfKqrOap`@fVe_J&;fkaCkdp)Koui6ni6^tSwrQF!nD7&)j&K}A#dWjiU|q`xo?U@o-);h$N}vI-)_#l*p@r}G+fM>5om zUwze&8>+bD4;;WGy2BeQxYR2wzrdEjC#2|~irg>Ui*)e~VCqF?CsK1aHH<@rYbjFI zihMj;8Vv~?ES87|3xj(~+UXCSiu;Cx)I~&M$zxwE+z;x|+5bL|34lhMrX+keSEoRg zkI^5meUWU?vGht}RKNIxVd*|)ib6V{{q@m8Xxmm2(O)$`da8K=)35^*lt+~*y0UY# z9J1PTZXyVW>k={Cx%84#^)E~h`(c--dWY_A!R@KMmjviPd8)}U|I`=Jx#V!wuo_5xi|-v}JNLqkd& zHl7JHUU|^E6qRD|JPoeB=BgAz7jh)O_?c0CUuQ4fu>V(wsrFgK3Y&JS%hO&emqxpQ zv*BC6Inj@E9O&>!2IOpXbziVzgluM{qo*JUF3}^fUa#ulJgx^@FP2%4s3H&%6TF?Y zF1T%~FLA=eThz<{e>&zN9u%p1{};VxN21p<`iYFpM>AlQq#VRD%6QfSLA<|Mo4I}E zrK00qp=vGC>jG;2z~}!E%-%uG-kO^N(jcVwzUSFc0bDxT=b8z|_Us+LQQ~A!(zwHh zqf#l_8qT|a8jjmAfYILyZ()G*%9k`aTbi6q-M4CzVn$0 zf@L<{pw}_%6;TQPEz(D%FW(Sxy!`?p`IGe*ZEx0(BM_FYQWXJoLdzWi zqLBi+Wwu509E;nov^oeBWUWip-A((mP5H>PNfeyRvm2eZ(l^*Ev=m}g(lB!~1DzN| z>Ce_qKA-WSTO07;NSs7E)2rWZ_DY1IhA6i2=%&l@le98;(j5ALdOY#s_j(UojuMSi zaP?*Xj^LYll|Xm;{k-Ds|I7bZ;TZdK`i)tz+W4|^)b-s?18$b3oI9uC*?b0TR$Vr= zP4?!KcX_~e5b#r!_-onMelLQ6%nXDPap&v52d>p_(wBQQo5{&=u1{`=!}Gbmym=%U zCz+)M!Y0<(!7iix9=4Zf>(f|ucTzOHAih01w?50aDMcU0JnYg)9PKUUv{iVt!?| zr*UOrQnH~bHEtojMD53S&+rLiLrFH54x-GW!~Lu;1maHq^f8qx(7|%+WGE8!ct49& zK6k0``FYJRzxeYT#&lH zSq=$JK5UACLzp0C{NIS8>}niO*g-t5!>h>O1W|?fKT;-6dBQ{}HWViVyXv`J5sEkl zE<@-aQzr_tpYeRa&L=R%NBm3kxhnACHjF}kW^NM4%DVkGH~U%L(x!}=EFqXo*so+) zK+!OhkP+hf5lnzsc}qPd{h_46B@#U^ zD0S-!FbfnCMgIZ5!gR2rsTQt4?^|z`>vtxv+)8ijVL0Q)VOFS9;Kw99Q|cnwASSs^cL)0Cw#uWcLv|^6k3>L9SHHx zGRU6n@}*kDqP1-Lm3Z>JH|Ah8tSDhr-z&4obx7hs!A zv}N(zF#DO-ax7>VC7;#fGH@UI-M*2^vph7qf57S+4D*-72a#%p_Af*fX_^h`Am{C8Eh(?mG0 znsk?60%kDo{Vn$qdTsCdqf-1wR-ETip$~l&ph7)ZMY?WtB|{fAM~#Eq0G&FCp`~yN;7Hh-$E82$n+GG#&dF|K1JQ)y26}# z^^|sQt;@5EI>siiYq~x;BbHwL(RmMr{YC&NOqivDXjJ;Ut8Mn3GZ32!la}wG#8N8V zo}ndsP@auyf##~39-qkEGI|mlDe(Q{pDkAd&(L&{OafnyWXjX2o+Nd@vI99l&3kjP--$mich~I_9ApDq^Z?}ih{UpLpTB!GJ zvv-H@;pDG2C(f>Yh_UY1Ul9&Z%1>v`Id*=K{^yQ#3J^q_RmRM@FnTvsAvyE9R1YEk z(R$ym^J7DQg)CoU_&iRm_-#n<<gk2fh zJop5qpIEYV8+wdt{nGO`Uc^-}RVRU;H+#WL2cKFU{(31zvQ+KxBtsvWdWQ3IA}`fG zUn{_m_GUE99yQbwQ);mwWOiD(S@Z?vgLbAk21ow=C-gP0 zQ&{gB4&h=-7|W2XR7>jp~3^?x@O0s&!1V*<8;GvWwVjlEbWhG#$C6Hux zsgGAO4rqw}M81_tR$)*L+Te$yWOz)p4LCdQ1MR;C{aaEpItq6W=X~tkaSKAOr)o2vdS0=R!o->rxVsP#)6ZM)(O*y zummcCDzHvXy|e8ihGx;HxPtR=hOxvGvM&F86eSM?wvneJJ__akFWTO}Nabe$zksSg zSn0S&7T(NY(kBs`&B5r;A2X}NjxdS_ZDJ|Hd~b{8DG{DS6xOr{-zX1!HNTIh;KUUO zsNG=ckFw_deH{4Iz}ZLk)15mIn;et2iC5n(s%4!8Q#F)5C3d|WQJQEzQnvd0PRY7u zM{k5TyruWIPWLM(VC~}|V`n4 zt73zx&*2YKn3-HGxI%yPNgF?^IFl`ZMS~30To6v6;=bl%QyZ7C9KgS=Su*K9Zhz(a zSeLH*pc)-;c3fyY^ByE^{h#5^eStsGgFe6!2sNs;XyOjsOIuW;b>E(HX<#A6wEG%G zkWiMXSV7Gqm}||vzjR$R2Xf@I*YR4CgAto$_OJ^Zk{tko+HV;bgP#o1MEHga0(w6BNG(wmK=Lkvb3^Aq1eaP0cSlwGrg z9oC%F#qVizI)gA$Mx)50tJ=vxDlHM(AA!O>m^b{60XGx&`x-dJ@SZ+~#z8lXH{_fD zO5sL>N%gGT0p!afib}0~;Zf4?q%-!Wp>_U|uM-fPAV{@WSdr&1l45^JfXMjDA*TE< zji?;|puxlQ?|7=1ZF-+gGA~aNoEA-hpJs+Z zY3<*iFA?qIeTx(wdG?vh2zaWjdp4?_DB{_FT$;r0&hObDH9X@S6eC&1JovGK@XLW+ zPXvUTY(juW{4rk#gx8O_og`2eqqz5i`zawsu|CtoHAosB}@Lq@F2~oIDF+VSJDItf7G;f zhm5u?nwb6M0r5wZByrhK8BBWzvmwsI5vKcC0&@|t?r#^f<3^Yr$F?&T13ZffH3lf0Krj^n|R(%u-C$Wk=|GmvOo7B zYooLUeZ?GRxLw;0VJKnn?lLQFP?rbI584%7Y}!pX<$UjhQo^3U#xJV7>g}mjv4E4J zFF8YhnKYstm@(f*Z>eyXf0;sfVdgY}+D_y0w!4SHa} z^+Eqk1|6ju~RL= z`k+My?so$+Ue`)OZuIGfcVj0mfvD#;!L@7e| zff-T;X?X4)#UM?$qh%aAeIlZTN(#J4T0-@35$y-wUA11#pEat!8iCF-g}2n#jI@_= zi){pC*}p#Pj*(m^K_92!Fz^hspYc$tFIYbnvcb-VrGpw-rTLshN|*=zK^r}RXTMje zg#JxXSV3^1^)yntfOE5(_`st+!SDt2Qj@){0E2MA<8;;-Ao#J>!1tf3p~c$siPUUB z?ite_*Qudr*y@d*Bd;OJ{Jt)u7%7zW_7}1zg;5d1-|%Eu4FOIJ41TwP0fz2ABL-Zd z&;E9Ux}U{_=>DpDNCA-$^eb~`m9E~H&2VPIhyP};j1+cQQj9wcD(gPf|G*rVF(+8) zmkQf$4uW8?0;H>iAH5U0FsFT(W!A;Klgs^a?Xxgh=Xx14_ZY|&)~xMJ<%D(kd)&Hw z+HopEeVgT#-WOjHbow0>2T*i~PA9EB`q!t>_Min6u%CXcDzy4-5HAKe)4{<#YK5)e z`v#(EHb3jtnvM+I=QXaDl_DQ!p7UVxKWDerBJ-1f&dC4;qM~|g{ z!;y6BUXA+FG;&OSdrLYLe*pEARRGN5Q<~V}IpE-yeNetKL3=kb@ZgFnMktEpHTopY zq%Hl^s6fBoND7<|7Yz|7D!)Z6`ayZ$_IOj0^8Y;tW=Go8NJ}pzM=GB^DLU6vNt<@` z&uOU!2TU$xWyi28bHoZaR-YrfadR@lFM{0d9}vtKAqWpYe#6BH(RD|!14CT2AIeTJk3_7 zedk1UBP3%*`onCdVjy&;F~DBc__@-PBCo@E?+jda>y_*~;?H}Zz~bER!otH`>8U%M zMH<$sgYjd;+60WAX2t{?dps>Q;huIew;Ei0mn?+E_xOeC1C!g&Hr}&@fa>u@{>eu3 zcO^BnA_`YN5jj?+AEqK8sbr2}r(xsD$o_3~(ASKF_#YbvUM!%YV z@#M~U_}QMZG_Jgnh$d!j>u<=IOgVft5*hFVzO;78+&Bm`!_Y-E?yUJ7$gwlGv)Vn>B2VKOU zm@(yd^pQ2`>(z;R{I}a(d#nnd8la{dtIDASsqb;om&{ppE-5kS{S`IB!!%T=^FX$S z%%L0@4^QXJs_d?Jvnhh0=^$bStRD^8WP-?d_bEjl_{CVVRzq!~>tCF0bs<&(ED=f&~T z!SwIxe!E;3snpV*DF+_A-t{jZWGES@ZIF8%Wj1Nr>JEdJlym~#SSw$5 zg*uMa!#J=tfmA>t~J+-QL8lLu7P9NEZ%X#Cub{=4h9=nxGH; z$aVK#YmPZ!-*RcEhx1R)iEEg-7yIWWXZ#poR6aO|TOO=Rfj)#xyO8NWp@h`!TiL8m zbj!^)p~bbmJ8TCNQlqU+z+Tsy9bN{pjPpZ$lCyCd_6S2?WG6S#vzRrcuQubtbYK-R zc@OxG=>T5whlex1>wT|0{$EOw07AJO?c@PI^Ns@!!cR&cKf+3q{-lZ4s3#|J%I#;K z3Jk(e>2)4mNr58ndO5~=Uw=jM!9AT2a^_TjnQH9=k!!R9GjH_?Qirvpa`~2)tL_qE z(a)lZ8?P^#CeTaVr&4oOrv?J4-WYtCe7OpOPX^v-nG}8RiX(pmsGuoLIZ9u+lk&P* zF#JOpHu~K3B0fNwhD4|Fra`M8)Dw(y^gDQ%cm-7ptRBQQyNDbf+}@_It^X5^)h^_0 z)vO$yb9p08{isALEtH|mz0{I2${bB9^Z?T?)SKw?^mWocR`}G8V@vg=>>t5$f}7Lv zWb-86+YL5oLYb#)J}O(29Z;cps4~7Fba@OXh*a?X;AM}t!sLfQmP<2UGGXu3e_I#u zEGF)FX0y1A8961!rP?f4DRVGIo|+CT77YQPwKDyvQvS;;0tL)|^F4Or@Sr?CADtIs z5pVQ@vs7drP|-?RFP>NNLo2BYdVBRWY+Ymh)EZa~&tW~Xv1LwU*2U_FCKoOFts9dz z;QeSv#A6`a@30Z=nNuG!py^JyMqmy`u9z1Ob+$3ag3kQO&DzfqDVeU{0IT1}#Qn`4m79Tt`|Levigf+s_MnE&kMDJcIbW>?tj z#)rZgi_bDmk??r_RA!v=p%eRH7q|bVQ!-}-MV<=v&?L&o=K~2O7o!KzB30T0#$%dG ze~>OL!XZL(@U=NNB~?3Lc-iG1s(1srJ^Zf)VpAvntE_}WB{>vYR0QW9$B6PebP9E% z?TZ}JlwJcr3i=tpXbRDkqQy^1z~l#068qZFW2S}A<(h9jmejX?{5gTalj^uR?oiKqjC7#9e;@Nxwi9D5P=dEqoMy;;`hMT%eh=EreRWBlJLA8av2Ez4JGe4+m=idqd40sa6q|joUpZ>(5{t)3b zX%;TmnqIWHNrO4(DCL^SWq;Jd&LfkC>%{x3LMjbkK>mA2R-YZb7Zb?N6(TdD>!3)e zVtuUPOou+fmF*1_Ka5b@s1&rH=0|TRK`*@;KG1r0^f^Vr(CvDw*xKcpxbH=gckLl? zI(&q|)o$_1mU!hZxmS7;%m0VnCT33yX>5^J*~E2Ire?-O`k8#+RpvMeeBzq>d!~83}z0kgGlf)f0jCG(DzTYuXJLR{~S9bv>t*XGC zAX8d;HwI;XnNRj727Y7KoaA?yP~Jyp2Cy>T=$ojQ{tmO-rv;hUEG4I9v2%H+g7#Ru z7C02>W>Edxbf6ddKq%ck^jpm_U-IAgHA&Yu24V@cz=YLJ?;e0DbIchd+o!mhCa`S| zKa0Az9UGyyD=_RpRiaYzTru1R!IAJJS^(1=rM?H#VqPv6#V{9QZc1*V8=Z7MFha^c zcDQhe=l|E*>$?WK|Ls;VMIxdxV?t&e>Nwbq>#MJ^u@#q9d7wxee)y(j2Ve`1nOz2E zApXm4px=U8wd~pnw`G2>tZ4J%SCB*+W?cF329dCr%?dpkGxvxac4?aX?DM#sshaB! ziQB^%+`F+@5f-1c#lUmadh4?P86Ioq=@(~9RU4WhgQfs`GClz{@!b9Z09QOpf!^=e z+B~jSi!!znq4N_!*JL4OD!t>(BidrTq`i)uS{^IaS*zm=4!&oLaTM=x0Pxza3Q`e| ztxwaEOAD$k=V8p4F`;;z)mfO)X+NI;&fheqaIG%6Wd&pME9U+9puo}9Q*^G*cX_+F>;Y-6BdJUNZWDRLR_`TU$k@Ip{maG6;9DeCj zPGE5jGj}y;kfF`XDgGeiKTXcX!t+#8TV72VlFlE-c)B%}`~NU=1MB9??CKHvkJ|6Q z!6DwrkM*?Zm%e@AhLc4FijR$yfn&3q@oT`9L+dOy8S6@<^sVn?h;R!mai0RgxZ>W; zKbN8duS{BR!;5~s7t??WePFrXF$sM4%C&_S)R#3uUH|v!Bn)UBLaC0dlXiIx7gbZ0<-9$V(+}0>G4skSGdJO4x4&QK} zt6ccH8b#OFp5fcvzVw$bP@>-@g1&RjGQ$R4>|bgiF?bFHQLiMRtSv6GFo$WTQ_0d8 zY<9=vE<8@si>n7aRZcs4`CBMO+Xf%+&hyLgKY}I!H@;k&Mq6XlA-s0dgjW=2a&Z=G zrmf{NvJ7f*+-82QyNRn&#=cwVg`*TYJuocrDpC4i$xj$co zV^e1l_;4j&zJAR$cI8XMNjdV!jSqLn7s4$pKt&6xJr3DmxZttH*={hvY4Hv`T{{Sq zXwaQbniA@pkxSLE&+=NLL39ymG zNR1H3bz(zDRjy+F9JbYh#C79`mEAZIgyd4dV_u@==C8d7`# zG@x-a2qj2hA@7gz96HopQUt-Qm=?pvdi(ywIv*Dc0mPyqvB(T@|Kj@pm1UCjjOsl> zkQC54iXuopSbPiS*-k=10X(Os+Fz(dl&@lPZ`Sz~`Sg8KteK|EBU`*vcVjojZqds8 zZAWpHPu{?hc9y5)<{@3N3l8J~Olm@WsqMV{jCeFL$@m^p5pZ4HXkrn93A7jd9pR1G zXke`6T^ASJ_jSn)d|Ej2;Y&NOVysv-OG&u!WE5qp7?BU$FvD(P<^>yfV_0vwrBswAfb#aRp_bl&=PnUR^x3cHU`4TCD zCkUlmCPSyg8LjYY+|;~(?*hvMypoA95#p_Eq$T>{2SETdfM7@ylto1IZ;0N1f`0ZLCWqtIT;IQ*D*v_( zf^MaI|pdq;tHhX zzTtCf@WcmZ>m4L83=5>HJjb2hFJ4cZ(&#F`%hvL$map>d$3&UnQEnC_qenSoS%R72%HMJ9*F)zB zf5;ggHM7y<*&y|hFC+ItXaUjS>D#J?y_R#yIvTg`GII~bLQf5xaQ~J1pFy%~zYwe4 z^xxNdP&8z7XuMd1RI?3!?yUaYVS8`54WPjd90FVXzx)M4o1dMzovwpn1J@}}tLS84 zVBX^E(S=oB&VVbEP`m{XJqp^yzS$-A&QE?iMqN3^avbGevrhr%<#%N!V&AA&G-$8r z{E)$kDn&(|q2+>@_$Mb&`cJ)e#dXh}Dy+J%`mORv3-q?&wq}wf zG@&p#o;xlEC?aoB9kojt83P=&Of&7BCWxb!^Ui$Gj+~h%^>SRid75~0d)6eu+OY4x zk0+2x?$vVNqzJ+$Irt4E0Ib@@NJ=ollL!$vlO0x9A>^e#I4ff>)q1uLCtfP8j`pZM z?SRg>iKX;?bC+_cRQjT%9zu8hEvtqn)58hBCpqP7`+a?X17zti|*VUae^ z8$RcM)*elgxA|SjuSS{0%sl{pdsD*9{R+gtB?6(zuw;TbN39c?|FB;K zvl4Z`rRy=#zKmTsw<0@PYx1(LJ|DPy@kL7kGmSzIrmuC#SWTXde_b|JX2kRhsthLP z94fgvD`?Mg>vh5~VXnX{O&i|+d1GZwoJC;ww|=l!G>bVq(emq-$1n1S6=gw(1xUb! z-J(-r)eONr4uv4oet8e4rhq2{|0n3t{X}0D^jDO$xp7u`^m7tAD@vX*L6YLEjAd)9 zSo=PXD>iktCP}OfQQ1cQ4(dzDo$qIJfaaNQgy}6u#?MX1S zt?9SuI3v+2uVYW-)>p2vR@B#U=V^KUB)`vHN_2=HGZfcsIMG4g zZkIDAr*)BR2m^Qc5(ZveDa(9!V~e#W$yDgfO+zjWYm9$T7h*aqxmycfO5eYMzyt+^ z!-R0uCEKkuCKDQjn-%}mZu-aYcl({Dm!uUF(KgNr&` zx=SzKVSBUT@OUd$oJ4kWMAv6&u>K%PwoZ+PnOn-g9t$io?EcF$BX74^*x>>*-()Kw zpCs>T6_Bj3?P=wou?Wb4jiCPecQa>34~219r$$A(70H>?BJ9Gw5!y{Tu;-aP+l(wt zKQ-ts?D567;!%2(a<(&1EOou*+}WdjAXb)o84=xY=L5P8l)W4-e+M>MUi;uM28z> zVi%x0)^Gr?aIaz7358fjhJ}Td$R!eTU8!%3H(FIs3KuI3e7c$WAo8K#y;KnEY3H; zZ$55Gcm0Dg+VrS$7#b@YrS-1aoneMEHAHm&=0kNrIAtFf#(<+9HzEXo$qH)`pNu3Q2Q1mbsc zTso0Q5{X)EYUy9Lvc*r7EPr~x47ZHZvpN#!b7`|7!dz9>#NmjO_`&CB`O|Xc@ILAP zgIkh8n4&UHgXbOVzx`ncDZ$CMFDFp?J{6mWu)nRZA9%?kE^svTl{?m1rsZhu>D`%X zrmWHzNH(~D-~evAa-MASm(<%7NXuwF44qem6`~vWsz4`Yet^9YiIe$Ck6J#N`vux? z)x`l9MBykowUjn}|2Lc0&De4&EV0okYmxLvwm28ONyBLQxceqiWIvn3A*Tkv%`Njz zo~NR^4BB)?15p};-irC6)cN(cl$H>O)k*$sUHunHUWYV-OtCJJ?_ur9sLr0#51VR@ z{4kVV%5Lnc*>~q6;jG|r3FBYn=Fk~G4*fS=A4FDvC#h{S$Hr~tv^}|1Z%j1!rdq~G z*-r4i@!t9=ZuQ)o56(l${19+DjyM< z^H4z}W(?5rOb>Qo)YR2m4Id_|(&x*V#!y1>Fn!DCVtu&*98}#OD#Oe9N53X?D_$(( zuMsIi8Dac?+x@0FQ7hIr`l7!%pVP1E`ot>_5;MfE%DETO{GeXV&iIzzoz0G#av3!f z;x-pwtvvSG)twTE78YfNiLZW5+GRq-qrR#Jv_Zq&1}8Iq@}zfLHU1*(rr0@Apjy@! zR_Cou2#jJOB;&HhCOGLa+E-cUITu-m7f5lM){0D*}n9+0mX2*9(DfchTgD*h`p;(tF z1=*4fhZE(iX5Qfuiq)+63@V}5RW42?N*B7uT=HU_=4q%rmv{vm7{|+Sd$OS~(;c-i z{K6j5WhQ-&+Eq*8=%O9fyeS6_YBm|gfmb#sTT>_>zmx95sGL)cc{{}!8M^HG$_X3l z-*)3zbH@2-Wbj#&Qozx&R8>rGW?k{GNyvjy-^T{^e6*tIKk4eMgKWEAe7w zS2d}9wer2`vje=s4uo3DqLOW48EdZTC3{2xUK2T9JT0_Qu0v(tLq(CDY1$W z&nFOt3EhmbiE_qJM)^{EZWZqp%Q<~#4soR_+n3>PZ6NLN99{!*TZ35d0tp*!7H>3V z2W#dp|BvShWgM)1TL3D;?h4CaO0|i|e-gSE0qT-n6V~H`mZVDxW8k5<#NhiO*U)38 zXfJxU7ax=I&Y(N)8YkT}z&H6$hxEU07Za>=${%0N>fIMq`y_DUK^|tL!5Wjz_jK%r z5BOzSp~>Z5^k%8VTH4eczgD%-;l~t0Y||3~rzS&Uw)E9)fsP*lP8F27JbpoMe;hrC zOfP=r-G#L_@>tzX z5y2+xM6~HFM*lco0mOmh(KU$Mw~nK$uj0oo3}B9n*$zK6pB%xdg1qzeV(njOo`&OP#il7q;@-eO#N+;$SwM&Nah~)Aj(I0x@wkvu# z{7UQOCT!NGLffU{@1gM%XqoYWPDO`Q7KUMlNlR$lpAas2=~rq#1r2gI8YOl!2tW3C zFFqyf!pY)@H6XU}HPOjKx|kXd^*sa4T_M7xEzyOu`Ensjhj`x4CmivNFp1J6e7-OM zgZJzI>au}k#FX%rK+niZiy%1@+B63q4>1AG%5~rfn^vPH_ zHJm(}lrb9^2zcBQ#wz3WZde*`W_1I}J+0uM+S~YdSo@{Bu)9y~>m=2aGL0IUigom< zn%9z|*Vnl%2iXeYBti!=(Psp=imf-FY`gbYuby8mmi@2y0bgN&pQ(x~C0lQ29qD1| zUq(skZDz+PKGDdp_kUc?oPDgh1n|W}OSBx)TD=&~N<&VAv^@R1tD8cT3SHa0l{0O= zrd&$SJ)0n$_a8u-9aV_b{iz6VGIiGVSo~CaI@GsxYa%+PRa?Wm%ZQdYyCJUuhW>5k zR{*amcu?FeIOPL+N0Xy_m~(89Ep+6dBVasQXMvm+N1~=?pBB6AACLUSkk#L!FtOp8 zY~EBW7!r6JzM&4jJm zdwz6(wSzwMFK4frw*X0-r;dLjDN{fs8K)zS(6jH^0BU@!ow0Z$kojy7QIG3nor|YB%>tJ~M!6 zr}?mV#Ozo>aEk|R7p+l8cQ5d{zD_+%k!nX&=W`Re4g1|Ou8DoO1A6%{O``_EfXK5c z*4|{EL%!?Z(bTQ|>2oK~OppzBu}7teFAIRxq}dN+-ty6ORECt-+oUBRs-aO<8*!HV zY&Mxy6hD^4+3dJ{2?a)=N5X&`G%qXHZxQZ@v7UN80Oek-cBs0eiHz#`I#X|inH^N1WX}|Auw6tR=u^Cv16H>5c z(0*a!NaAVCrzNuR@c4Bx3H-hkiNT2wPqfNqfukDL6HLxDoDW;C=nIMA*`-(Ho&_>y zJ340}yZeE;{cne(gIQ+55aP#KnZd6i}Xm94r zp2Szg8BY{i8YJ#6zWGJmea($UFMVpfjJ>1C@GovkdTU{V^L&l3r7>`(Ec#3jdpJdb zR@_8V<|6Q-PDGdH_R2)Z>f1^i?jvm?3bC^&Evx4;;)F9+k8!Gk-Nlp#Ka-09upk(Q z4#LJ3=9UD!vZtmc4R(m3b7y`YHV?nR-BwoZXgLu9d!P5e=LVc-%X(InuD_5pWFXF^ z=}oqXhQA8QDUsU;cqoV^PjiYKeIwIaUqKDrR_FE7io6)$;da-r`u@dBO5r}Hn+m#Z ze)ixTMJG++FBrz)^WmZHNEL-dr4Si4>{QOnGP!WTGJlcx)VAqLd3={M89`-&CVsw72EV1?jEV3j>6sFp?Y+t7H zsPQ4Az;3B9V3%QVcVN@wa-74RysI!YDbwi_)lri#GTN-?$;Bc(m|2|{;3rr)fTtwm z%J!1#$PdSAHs*t$xJK-k!t;LGbw-fZLNnj@jdoMxzSpQp5(-`53s)a*?d9{;$rn^T zMHpHiXOo^ak)`L=_LpNl0g74N+!);MP8e1EP-Au0yBi%Z-C~UmLmK;J9eY;!t={SC zP;NQXai;?LSQb}mZu7#0al?BuEP&H4Ory0CDlYW(LTJyk!NP2GI|!Gh&S7bIu`l)c z0*S{1SkA#pWr_7Wyc_Z-;zpWYDyc;(JyEGWozJ=lst*B9EXu(Zhu+~~Mpbv3lNDx0 zlL75@ErpSH1{6XY7d5$%v#0YqQ%%P8O>aG8>u@lQO$ON-5VZ^dP+*tY1w z(Z$i)s>KfzXH?gc!9r{W*%3eu40y%>IgC8)<>^ay6Tqyc-CmKY`I*o(mAtFLX?~OA zGpmeM*eA^@WbkJ^_FXJ(P>>9RYq7K8d-kE<@MqE8ZNS}07Y+#b85Cv0f(zzR2(oVoD`hB%<*b>Ir~M z@w=&|;~2Ks*zJDj-c_Q&|QA%^goA-YNk>-HL7MTs45 zm)>YDD|&S-gqi7~DWRzr%K_cKWcx*(d~=p_F79Z4m9b2HqmSZAu{s%H$o`y@Og3jv z<{5egs*>?n5mN4#OQmEl^DJz0r5~hEl7pP^nGwxpQT+2JDO_9Z+fNcJ-xLf>kvk_v z;G|$lnGfMfF?+`_TEuTjgehCPe1cxlE0_+VFa7WAU=3^IPIGwkjtx~V5LD)JZft97 z|7CxAtC9xmit{_E@kzIgt2My-*<&HvT9|8bGV`E0=-aX^MF_P{Y`Tul1c?m@Q^ezf zX;{7exz#ceJ2i73y~*b#a4AnCO~x$|{Qxa!#3JI+|Hf4O(Ga_G@f%N7ygGC+r0`#9 z-ca;9vzCq{NFs4EX>SNuqB$9EsI+CTZ~4ItU!VUWD2hQKex16of#?o&LQr};66u!z zC?>_E7HA#9Xq3Yv{~%`9e_hBP9MTuTh~89DZi%V-Fl=VO_T@sGPzjeR9(9bw>0}y? zTc?lLu71zg{2}p`eJ&os_%OO;@57$4f>v`i@u}lsxp$HTxu!oIPOD_K` z8A#x|tI6YFYB367%UDtvR#v?HqHn$x_6m`JHsY;NiT?@S5gonj@$eslFdiAR6n6JE z__NqWC-BK)y+7{{ibO5w<9Q@{(|B?$MLc4{`cgeZ#)$0ge7LYX z-+JS;iBT9Nx})~#9$WQCtr@aAx}Axsz1vK;(%;2_Dz`+%TBbC9^IiM>GI^BZ>g}L6 z)_ki;wWF~^kIVb7kbp^?Rv4DuV3qt9sm>P;;Q_Xf5B5{|6jt>0{?t03;;ooI|1C8W zi$wulmR*d-1Bl%p|Ji4Nl=bAMR<4O5wz}P@TD_`#&m49MX>ong$v7_%eWb`{6u4C??BDP|7?oodnBwDQt9s0(9A*WBX?0jUEidczr1yech2?mg*+n7l%aQKd)djjTxmP1E zllBwNymznrUZsQjj*)eD$LJ{(0-NF_k{jZR2&;^B=&#`ZJL6&i<+H@?6h9i3dFYIZ zR~p359)H}oe{HyYzW1%0k$&?`qSSamC4TS8z7H=WUQ8Y`MvU8YQeE7|1m36N+z%yxAg{>SJN~N zi_xr&{JKQ==GZUd$B*hFb{d*Mw{g98!Vp>BV|Xw2hL{FeH;q2}n4p|0UQoj~Slgka z_!aQ0DwX+SD=)&*JkNg2SDcJef-j)JddjCpR3R&g_*c}x274*;Pp|VQ4<~gNq;bn1 zJ0n5Isrbm1q-7z-*FOkDe&|+k*EgM_iz5Dv{A(Mcer$@bA0L3J8627B)NlXXx!Hi% zs-WYK#vaqgL>}E9#S@$%>nCV`S73hs@Pok0G)(U)>Cz__xa)kN_#d+yh8#01F{Fy6AXrQ^9l;4TO{yE5)&vfU!43pCd!S0 zyU3eoPsQM5K;Qlnx8zui7?jmIk6LllR^fPnBN@6bOKlp)gWv33|x+xnaq9MYwWC2*=z`X`qyGw1^ zeM2bG>jfZ|u@rmNNByPd6P#8Z_MjUSGPcA;RUKyw9|y-uVp3enaYHe~u=@2NkF>X( zKagU*3Oj5Re{!LdrZ?V!f-{T~~>9iv> z13w5Np1*ola7|71Wnssq@F(&&z9d5`vK-&Lfep(9%4d3!>Vt{xkIk;oYH6z|lSXxY zNOnDUcNoGxaa34tHjZTlFukC~_A82km9@na2$L>;llg-`REyuwl`*~==&?gVhIN6T zJ|;P57g$(mH5?a_LgWE`U`1ITfY(m4C|Bt4x3Tvxr z+a)B$9g4dYZP7w-C|(!4h4$4ySuvvcPVbg-Q8v9{r+p8<|J3vn#??N>mP4< z6KPZxQxk2UnN$X+5ZRA4a^hxo2&SC+s>4TLiA*^AIB5n=VsQ86;FuUeuk%&wJT?P? z2>~3>s44392(QAgH|;s+wy*`yHZX$eN^t3QZRZ+ES$f()I`|-jXdUX{+h=;HeF0&mJDb>eIn#AW&O>+R@S4N^{W4ZPxb%6QNJG; z_{)}x_%Y9;2h3-QO9%^rF(OeD_o5r(qZT3=511*9n{NM2S;PP)WDroGbKY#D6K;%M zjs&3#gBHSo|5CU62LhkRla90@`$!*rX(Zq06uv)tB{Kg9Mno`?g#pPHML&fS?%;`E zgA=g^bqdH)2IP#Y+!qz{ntc>p&{10~ijxOZv_rsJ*mfcO69MbuY5x$qi6Y zIJ|;*IyHg<*xhMHB^b|*5Y!LD!-Mk=ahVi}0P2yWb~`~pVS-&!pJt6&8fNjfqH}1+ z8QyyBYve|-U~&=(V0}vaGN=>?KzUX7FAl-GDpw@oV`1qH8G|7-hMs96VCXFF_ME&6 zllPSvh%!jh#>5Q&`OE0F@4$|5_-=IdnSgOT=2Rv0^tx|(+Y2rg%_B3vm$9?AMF-;d zMtK~vEm>0XAWo5iUQ6G7zu*2z{8eg;TQu#xB;b-PfBo z%XJEz=cCfkq(qIqkNPa34+p9uxYX9t3_O0n)%z?p!;)-d3IK+mHm!Q|)@>V}5I|5v2R}9ngRjJx zOG|Zjgvf!8kW7@tBH;jSi7=Yi_%X@ikK~g)VoYSFl8~it0_0YgUJ@5|T=@lI-}74f zumYp>6$qbE;u24O1Jw<^g8v}{#n1spd16%c%(FV3H@wnT1U5fwD0C< zl%%7Tl!itcsRIo)f@l3cpbXx^hIg&M?!#NwE}Hz&zZet%%=_sx zR(hK&HGBMR^`xgdsJD85eEc;2kos>zwRl%3TgE0bop4cO%^XFCqn5&uAqUxv-#rd&2iGJ9u)n1H?~knCQ`WQ#w|1Z>M1aP;xjHdoazyoWCaUo+t-y+W zQO^2vk6EUk^l8T7tU>hmUbUx%b$jXW^DkEcrCk{91%Pet@Q2?-WO$7%te74Y5JU)M z32qFpf8qTiSZoIx`LQJ=WyYZ{&a`lI++cZa61zbSC~Pp5U2M&a{BK}yoJ1@B zVn&u2s=K04bHeWOYqwTflI(Rr5&z*f8F{JB5#RH7qZoQWo=K=>CrN zZ=|Zl&#R~<#-S-$#AHqCxOE|nMTUoka^^jyWK|U!D82Qs?qt$8GSujLBU(l`BOj3+ zXkim4YH~1NHzd{FhSnYzP8+@RESP}CK$N|P@Qsatzt5@fURf2>f9suWvvd9s3vP4= z%{&}s0r2M>D9d&0{K|i`rlZ_JgZ_j~j}>50+s&g!`C`!RZBc#{J6CZmIjptgb7}lr z`VaAw#-MwakRte3%flM6Na{$M&0nY}C(%={w^mWAPfzuK*8xW?Kg^U<5N*$5lnp{& z6(2Loc-V2!6$#?~r$XM6;89W((2M5Ls@p))?SALp*Uh%8uWyOQZrN_81F+$T5lq`6 z0&On}yvxiiJVI!9)O74tB{hMu)_Iw6-B)g{_<+v3+<2pw6f@8soB54Wc;+4Uk5aGp~CwupqS@j8|qqJ16UJsbk4IeCCX0R?cSI0Mx3)l5e9UY7WYwx%DTZT6aHWb`agc%KCWs}5^c(eKbU#E1Ty*zBN!3Sjn{y%4=&1peL(Ps3I6 zAAOM}I4>|ipCcTACkLGPC^tH9Manm%mAcEATi2eFY|qiiDQ`ih_v4%Z7ryy`!iDT( zSXE~5V4wiUr~5nuw+Y0S+7}mg+5bFYTnHCS#Jqu*lyw*vV-rc>(CwmfG^2LaCJj5u z#5uU|LTm!f(Vt}>xu;MEmnE_ioUV_3EPm}8QSEAWPke(xCq@|O!K_BX=p9S3`@^_L z^p&T{@<#soul0URB+SN-$r6!Oe4_!-v1Yo4%{JSU=c_FG;R$>gZ90-8)4?t6M4*bG z#)`6Fd8SF>%o2{tNMkKj5F>=3$1_coYz_G@v;nGa(MNG0b}37ojbgpYuk(Fc;hSOi zdG&QhlqPWdAK|IbhJmkJ%@Q7pZE&-u(wdKXuH`CknCkw}h8=U!tOz>!8hC8Bsq683 zFw?X4@6iM_ET;J+($nj`{si-)dE+tIETiD~wKwGgI;5emw`~SB)|54L39b0+2+32h zXc7CM;7@9E{fo5Lns6jEdbuX3o%9bKi~p6&xBNSu|LPJoJj27dgc2^9Z>~sa@Z0>S zS}Px8QBGY_8la^+^t@()Wh*rHrGp{PBVHfl3XuEzu=gOnYs*)3qk655wg0B)I2eqS z3Hi?Oof=HTO{rd#zXS~ME18HvL$M#jwo=% z6=FX@VSs}`!!Hg$F~Lt$Vw*SHiNB#Hz~7dY9xQxK)(f)?I~6V{ewxgbB$m~}-JYAU zyn3OIb;j-KDZaJ2MmsTjbPLtIm?)Z1H7~KB93@M;s|9`nYT`ZLtEji$R+RW^5uCxl zWx1{6T1K)9m@qMq^HaINSY{;8x%bhpmRI~J)gVk$`io;vFnMzk?6$aabs8&I&pHY^ z7ucpS@>&iRHq2z~D#bVXylM*0B9TImQC9bYs(n8e&vJDns~y$oCfr>VT8Pn8CvEnK zw4O{{%*CXHA?|WsIem?nMrI?1TD6UE!w1XriRGA_i0mMmM6kknD)QG*U9eXX=--}} z_2j&k+fa|6)-nG-F8x~(B9Xc}*b}^qG_KLHP9Q^Z&wyU0-2aj1&h8un|j5OcV}LC7b+S zT`Qqf{hpMFq>1oHaQBGForR=sYZ6qN`fE*_Kfrj&54rW{tM@?@d74;~4XjRCRq_W& zK{$l{;{weDZQ?`^AQ;whFlbbgiuldntM?V1uj2U=odMRnet990s!Qo3)ab5IdXlD6 z#IrnUzdw;esyY&3kn6hyDqFD04Edb?2Gy(bB`2`B+&N}NDE$8JW3ud3LQjOcs)ljY zPbZDe^ARJ;%3IUq!5aZ70Gf(H>?b9pZ|=StZ9Uoe`SBI;p30kKfXrQ?VCAeW4kn6A z5jqVzAB1au(i^FI8~^n^_QcQ^CIcj94)OiPB`E-%_mEsx-OMm8<#?_7k^Y%c(AKB; zQUN2WCWIj3Q|xE0j9H%*so#e1Iu$bHnA-0B;2j@7?eU-S!A{22gnHTA+P>tf2;e6GZ#Z%wY8lHj^)P)3Njw1+9V*D=!{Rezg(E$X7ygA zN@IyCW&gJJm!>c>JSQ$*bPmJB@>2;~zC@uqE;qYZb~J`MGov)s$u`TU3AC%f&By|c zB+p61*sXg7@pfP8#I+Km9(J8#K`Vr@5ykiVaX$}5RJ?{2nUx!^t~g3_yzibvWOwXC zZ$J?g7T7!OU))eCipy0=lyu^_-z@xwwI0PRq}k^NXod&zQ6#CK zH5@sC%MEQ&s#oy!>_=`vtPH+$=%@G>y8^JCYrzg5>I9B{Lx*DBTzBYS|EQ)(2M!*P z0mOjII6Z*}<5aKWr4#%Q@4MSLfdq}kkwo$~E(fg)->o=PI{x5J`iBqoD=(%1vFBR% zyNh~J1vQCmv>O_uo&E*=L_cXo)%xeB5E73~3x(ZTDE6eWEzHRdZ>Ew;tUkd;xX0y` zHEkl)2SBtc9}IP0^Ye7^CwOoB_|E^33X?lf23k+<-qVzEpQ$|jJ?zFb{|MlNw}aTS zS})gEulM{05WNUc1(~3qQu!8y>EASULxU5xc~P!7HpplWi@2<&Xrir13h_oBx!)Fz zQmh%oiGDwGg~UILqFcd6z+}W!?v6pk$7u0MHUgV<($Ppiq5Fx|`V4!A;i`m3UJ*;K zn>3k3jP54rqWd2)+6r1X~dOlc@3)x%`4S$1iU`>X%g7^_awi5fCha^J?Ad<9) z3}#ap`Qe{d?qK3*9j+u3FT2bRA3PC>w6F|~SdRw=BQ&KHK`4?OyCfp+8`y^fN z4wx+WE~jl^BVmWLhM&vEIQdtcJ7R%zJ5{S}W-XK)SV>`tG>1x$6h0!_X=lNKXZOeT z(*&xbxiOfXwMH;JuTRE;1I4kQlh$mS<0B?&Mn{rG6eeavnFz@p7k}Q*Gwt}~4$wj6tYOT&|EAbG5fo|r z5M1rdU{Kxqn3^oE+*U~tR^8WusPy(X6!5=0_9iSsn%0vmTMp+-zqI^Exn_=Eyal3d ztOfC%+wrf^t9g!KY%D~DY>Flec)5djAi&<&GPU50T7j0}c?@3)(otNJ_t+%maYD!& z!YVo-Hl`8Ib4u5{2k}2-xHsc|6}!=Rf_Nw(pqqynqQ#t#79gTliL%nKA(YZ+6T=Ra zgD(R-VSvUd#`lAR2^matKC0DPH;SI5qZ53U0+ zH&1p0|2ayP;bxKUP4#H-N*(K1FPD_%pMPWfW7 zOuC#_^tt_S_CI9W6@v6*isQ{rk)vKl;@oW;V~FeuGLi|S(3?vG>hb0CV?UPJEnqp0 zEvrfZrfZ_+-LgqA1v=nBO8qKVPx6L$QW+QwIalznQP#@l$bZ z11&RKn+pA!R6BGF*1aZqoHA_>)1ff4f&ekTBCuBH8+_0syF7nBG-Hj59c7gh@?h1a z9Be?S*PKT}cx(_I%!fa7x^S*4S9ho)=q9sVQLgHLjj%LN;qPqVD^6zJyxS;Yr8-km zL;Nb_qh&`8sqt^+m=<9M8)c`xZjs61U8uvu#@Be^7nw`_$B2yF21x1$^0RNi7{GWR zqC}eBPi2>@8i6x6j8n!oe`_;$h+14=x_G5h4d)Tg;5xjFJ1)mG)fNXW320xToTWFS;vysDEI`@&#Msfi~>gVf6U%o7o!#Y^U#2VeDnz;arFBTw?5_Q?RYBc_v(O<*I zYOhp3kOsSxclw38%$Nw3#WwAD^1hfzyyjivT1*>-w_{kOi2XQXQ+5@Oa#9q3+%Y1# z*cW~ft1Is)&nU0%LHEDuJt)J{XX518W#ib%6HvrUl;Bx5c5~Vv7!D-nE?=8Pt;J9e zx})SW1dT+tUI`$!|!s-9s;=&6Y2I99m~ojoe5+ZP5AtT_JS8OyEVsFK{^_CfNu3GT~3HAc~##U_=5txf{^k5rcWO-^CcP+(D+gf_hZAOVYd1%|g>or*C!7U?owu5n6}0#Y3xWcLc0 z$qqgGb%L#Y*NV9`{Mglg@bmohhN?ZD^$+TEzjWDwD&O7$HpMk6i|NvR<_$L6>(Iu- zCb)N_FHY0Q?sh}%Md29zSjX(N=aJ=>5WX>Yaww`DWek`Taqy6i%=hogqQTzt-AKBQ ze$-PidK2_|;NK+Y+LA}RSo?m6aFO|;TXN;uDwVmz4sS6DVS}&-un*{T&iZPpx=15_ z++vu|x8n=!5sBj8iwwk?;0u3SZ@V0TIzK$mgNS-Y`!}Lq5v+Xwt`VMXDnGcezBvq2 zjHVTC;`{n#jp7vST9Bski=dBjKt4^PG;nId2@<1|eu7#GF;s_`a+cV{&7iJAqmObR zFyOQf#^SlWdRaq2TB21o1A4FZRD90=^YgI|Ip~L17cS+KZwmakXaXdA`JQt`Fxn3C zuYbtGt_H00CJ}5pGo6cO-;fU z<*bodb2ZTj61WtAoZL};iqm1QkzPh> z9qTOcZ1MbyP+=6<4pBXrgPalQr2Q z?Z{NCen=pbmRr-0j==A$D!*p9Dzcq&D7t6r3VCuVkE*+DNjH2ekLZLn#DVhKcaorQ zf03$D&Z}l5=C}v%H@PzW*?VBi1&dt29{*-h60#e_?b&QU)ZM z=(9*vM}l??sO3?2S+ut;oe5{V7k(DceM#c}zS|=-$zwtAWqRU20Xv)EpQK+>X-yvFiwP8oNN6$r!|s^Yb(-o6Xp0pWT>!FArU( zc$D0)(PZW9B7ZETS*OsV=1j3R5GzsAuvy%ZK9#lGK8iOAMlY6U{fEP6sKdSiJPP9m zRjuzx)mhBnHa}x5Y9%uA{Os@E#_=%z_jGYg%cuk*i@9kbYoUBu*(D<$JO)7n*D$N0 zvn}I6v(F`|=_ubRf=nPx6$$>Oe)8I9Y5Yxy1JWx5ocA{qi#1}+2r%+4)~1_^I2AC@XrH!TtWB-N;TRaO0U@% z@(Hw|b)T-+(saZfcywZd5PlgnScg74+vxNt?mDxwYZy5Yarz%2iEn=${3nBMG7=r{ z$j9T>zN2U?XDSE9C}%0D7fN=v{wLK^mnme>v|;a|hLaI34}O+6jU$PJ{@DIHxela( z14a&1mR8frZ-yAi3~cmiH+&OEdyjvxcB*FfL@Ddj?T6C$q|CyvnCLi8JmhdZX(~cy zA>TB_OK#AjfS6Zs4uTJeReaEQrEE} zoxkJm93um{rC)}K2L5dmd2uGLl%5V~}re5Y* zCbI98+FMxP=xG=YdTN#g&?Njk3@#`+>vYkPrTwwQBNF9EUs1bKOj~6_+he>| zedGigO_kU{YZrTgVAY0VUr@sZ^eJqGnWN1^i z^*YV2Z|~rb;8VGP+U_Ap>PtikZ$1CLPtDp@608ae&~iBH@8OjKNJ=NK`BA zbMxQYS>eHaKRb5?@lV)JnA45IB07zK_+VE;TOJnX3nm)VEfQ#9ail2O8AH=EhHK{H(HqR_XXe+N)Wn4dczzmg&AM*LZJAFijlyY z$c>VZ!ZGcHVmwM*g1{J#YQlOXy#*8_jShPKuyUt?!({^{6;bbdqNI$y(#}| zF2pMLwkArWZNidE()e)B_ullW90|U+Xx-~avv4{bX)7`y+z8Ye?U>}ONOWL+n_roj zc_jN5fA)AL0W$$>Rc>{!@UT|;Bo}Cwxk3oN{xn{6sJMI>gJ#%;=NbR{EeJlNX#v9W zHLYo-m_F2my@A*JQcR$y`7uNE#?+$pe1a+XydJw(NOWD? zK%~ikpjLGJHqTGwd%TQBNa884u5gZ{A_P0XXdB&&WtEq4X;i3Xc55mg(u8?1#EldE zzo<}qkn5}Ze=&=HEri&MUr9MvGIAORmD5FdVG#E94+xDd12Kh>*xZIn*!{62CvQYr zkn;EAfsU=S7Qdkv>rwAVUHBM?dJ1UOA^AF_XoG; z5d++$sEYKu7)$(f8fkxPW;x-TIjMa3m<_v&&ZhW)v2U;z*yAEH6%EbYr4ST zC`&EuwY2q)J)<79&9OaaDLK+?P&+_Lwo5l7Vm}hh6HNtQB{+4uINsOzao73}&PZc@ zV<49TfnGx|g7;|Gt2_Cf6b!Z4Gl&C5g#wuv{^7}+;akcpoz{CvUqMW#Oo6w0CxW#0 z$MGO@(R((x|LnRYfupI~plZ(;R7+CpH@HUQzbDXi129!3oxsQamm^{g30yfQ+3KmOv}$YrX_Z3F!poYN^iK{QoR~ zMl0?|jQvxKPlox{dGA=|awD*W;Wtti{`f{{Nj1dDOPB=jfq4WjWnI-Q2uc8@)o0LvgymYixII{Sm z4`oOD`z%B*)A*HayWO{i!BZG`0My3T}U0Mi@)zK%Vf~%~6pi&~f+| z@cA?SO^tL^)lc&VFy)$4f9b{Q{75<6Nx_A+1{dl?fb?5&?cg^Ru}K+vSmHL5jzt+& zv!93KNhQP6y(@<(un+F*aJ<-%X~CztUuD4V>Cow9ii?qZZw}&BKh#uKUYJ^L>Fi>! zX$a5^c;}rwbP+{>I=?AQVDS=D<)tNsJuH3f)_DzOC;L4#-c_ypOaSH5%=PdLAvggbEuGjS5z!^J5rv*kU~u$xt^-fhgr{yuw`tMG-TL z98?{KY@`DxAVhtNk3s6Y*vlcDu#rr~ggC?zQ#G7NVcrk4P7J3KCE49Ae!oT(;d=RR z=6;Q_r_@v({A4OK7foeaqW(FRf~8e{fQI@P{Qv^-RyL;;-YFwD$eoakSbT1(70W2W zGZE(8srj-s(sUKWc;r{;s8~{ZERunHAW6zVaMv*{F+}eCgJOoik>o=*=ffdiF4R%S zH^X{fN!dGpfeU)Y*t#Wh+UF)*kZ>~WcTo6kHXn%;E9fXAO1h=UP1}M6Mr|&v3mwUt zp$_h(B*!W+B6XB9at4M{=LNG+M7~b>tA8e9A4z}xW6xC3P@fRjw31r<4&P3?CG zOTimccK%62@$5wU8-wDb<8UVd`u9TWv_y18Xdp)H)3FSG*?aM7M|pXESWnDNWQ=R2 z3W8e;QFPupqWe4>*PNfCzxGQaU`){$73#@gU0HzLa8`>i^L2WdEOCIy5D%z`EfKcI zbev^lf&IqF8S;nC2p~<{?K^o0X!)}?Tm5BR#S>K<|Fo_7tyYR;ki zMX!~^%JXjNVwQdK0ZZc5$ITU;Hv}dfKCGXWOP=@xz8QFfDf_>XctXh60kA99-PvUS zqA4)-vt!}P?b~*F#llZQHoLBDE2$7C)v-F)u0$V49EdBl#ZUvW2WzPhQfiT>n5Z0Q zQ{PoeEL@&Td5A$taYyjyfH<2~ElKQ0s}&H% zcFvodUJi0<0G06D$6KZEU#n5C%&l&|33LV33gl9*d`l_J1E0ptQ$!8aV0gweCs1b$ z$JBR_Ld>pfoVk4{k|yu$sT*l&Y+7Hw=?wbJ$@|@18G<(Wz-I;Uj{VbO zVuXz+qQ$MlxKhr`@f0ut{E_aP4(m@TV_2@uq|)QHj@Uog9_b!qb`DlyFz3>P>ccbUk5G%QrU>b~#U`Z@IHCcB(%0dkq@jRf;ek z2chY2rtia9V*|t{ecpXlV>xvf=G;#`CSjBj)_E}25ZiKV5hfcuwi&-UOiMUu5n3r? zO3L5F-4q@=wb+To5clF&9;C|IxT_)j+Zo)1Akx84CA32$8Hi>)13yfcf21t`%7H=$ z2Q-+*7Puw{!Nvo|y{2Gva)}jXCgpGCv{Ikvob7CC*`^9~`J!Tn4`$U`^3g`Ne|%@< z&y}O(VE$O#o^Ql|kcvEFEMa9YM2@eL6LpmTdG~fP%A$9%m)6ohL8Y<&BT=MXA(dRj zm5A?@sFpQ+IfPU7kC#xGIawX~1^TD~YxO%u!1q$hP+@YSK9dF5W(+I-{Etm2W%Zuu zBZ{T#Q(m{!!*M6w z{3@7Ptogkp`tP4eApq>1Ny!>A*11GDvM6^6G6rOCKtlIr!Y);Rz!#Nvc;QK{bDL6n znj?|6R_|S+Gi>lO#@7pYrgJTs@HfNcv@dyAzt?FCM>5IKu;qvxE!d+jB8^PrT^XW_ zc=(3CBcUz7mXM^26WRQ*|EPdCElKextRL87%Stsxz|j0mo3Ac&-Bs8We@_gT^99_KKh1 zCTs92Zd?OBWqi&o1xL$X@4G5>Z8(|D$%XmpxvFwi7x>_%3!adM^jQ1E*%_W>32J_F ztc$l*UKDrZ7V1h3{Sb*(+Kg(~hmClKPo6Jx9S(3+N4ZFCdI|-f@ng7cUGdJ6aHt^o zC^B1;V!BOnE=q|LZPg8Ga+X7~Y7?AXz! zL(jCDu}M#&q}%BV-*#82v^IBJ%BOI$%CD`oSU)vu3f>vu?=$Ab1-qJ#|7^MY7>W0S z4Mpp3T9rRRbnR^EY=+YA*cwwYS1O06g#1X>nFX(08sK)e^nK;{B-4(PCqPj1L>Cx) zLm8UM3x@4SxCy=XU52(%Y($i$5YZFeHrF9&KaM~5mU;{l-PM5p9x`hs5?xJ#Z8RWo z!|I{MDGD9&e;b1`Fx2mcMD8h~J3}19g4zd#$kGbE=~3NQqwwPG=)XH4fQWVhSd_voWqLU)UT zJZbvJ#T|lCW(s{=_()B;aE8sy6db5-Uo8KzZ6-h6-_J5|;Uh}AWbXlB5O8HntuGF1 zN=07CZqRu#mQbLEJGwBZzGu!j(l`8{qRbI1pA&As+zB}Q;u_QNxZ zHmW5}+}NxG)Sz<8lP1=YUYqob@zhlm1H;Sy&i-_&9ax~$*$Q66#Kt$nY;GSPx{%z} z(L>$!|0DBpXg46^ru@s%O!AnC&jXEsYf2DG1tU1Gw6Xt=9mwWLn}v&52Dcep+2TD# z5f?WKqDqCh#rx2Slbdq;h>MV-0s1rxS5vg}#c*LSV%uCR@e*-IoInAm(yH)0HQtqE zBa*>yS5XflD6o}k1&GYIbC!!2xq4zw-B}BB79T0A(WIQs{s3T9_BHU{_XlaaXY~vXIiYkX^))P@S9~q~_x!Gd0y!0BMv=JGQ zlIWy^GO3eNKN%a*KnM&T^f>{EC%ie{@aJmdb;B75*oK@cDDAYH65O!oDoO=WO)Fgp z8%smL1)}gQM{3C89|A;{z@8Le0>lsr35kS#A^O3F$MUi7i`0X-DFy$n4h|6bSU(c{ zfhEXReIG#bVf7kj8;RqpS}w2o@)5@S*=F>qF`c8Y;UKZ-gySj5*}y~0w=*>=MZ2h- z>}ULhqZrMLgbtGgP^4D$?;^7lel|^CS4cz+FRkf4R|pKdu`vY?7Q?MZ&vVXJ!mvSF zaVfPTQq3K$W$=K(_+ds6GvpP(%_|8@Cl{;#R?8Dl#b}OD4ShP)wx9W2?6bcy-;7S@J-ew$R|ZGh?Z$-qPJhewyqwdbtvL zh$Zzv(eHw>33~bOI+h;^x?m-)&wpEZ-x2YwzF2e5DyYwF2(X$k}nAjZd$kFvRfJJB!kH$SF3aSNf~^#0Dq&-9Z!-Vvs7#|ZOqiI^>=ll}O?-Fh0B1UbyF+Gc10dZ2QB^g8xOPR9?BO`e}bsWn| zdCXTqK^!ZlHYUExhd$1vzX9Sl*1U1X zR7bcL<`@-yt%`a>%0I*<3<}BUG_o2L-gbDz`|EWovMzJ@k8-Wkp8AN{lq2btbn_6N z|2We#R|K$08uD7TO^*)wy?`|f%^X>BaD=r&ifCqJP4Xl)*gD&7KG0q%VqF)9`tqTj zN(W^a8ycuaqCBV0PZF?ay6+}7r8s_0X*d`k%4Awg2tL8Kg$jv+$7EbplX!b-C7_GI zTMQ-=sC^Ah4N#nfR$1`SLZkn-2t6-xSEacsC^6t-8f|(TSN~MAEp6*l@~Kw+BR=Uw zD2^gP!~QVlelewkaIuSS)Vwmldg~W?=*QLx!BpG}8ySK&4U+p3fS|TAqzjdI#H82t zRz7qt#m=kgOS$0OCpiaD>j%JEZ@ECT{Kvnaql!rUYN44T0|n|}M!z71K3ihQA77eC z7IuK!m<$B4eJHcJq?L|zra*UG-kfeD48x9E9ls#31%@=(pieLeja`0&9x2(^5OD9} z&##O3YLOa@Wg$WKQyL3bNg0i&D`v?Bd9qsKBts%PUzy#tytSe%F1qKV3G-VdU>`B4 za2oSy5^9;!t|{fY%d7Zub<;~QvKAUq>rL<3Lf8TaQs7c#w52%}el?1uY!Jwo-TZk{ z#C402ADwQ$GF?D8rZ8PmmC#aqR^PbL=RfktzoLt?j+XPj@0mx~)z^tCA_au&kveP3H<&h@G0xVgYL~4k-w&?*f|_~w9p5Zyf6JQ@ z;psWdSl0^nxIHesJ-}wM1H3ybFC$u0H?6g`_8mc?m>XUn3!3#d$s-=OQstaLBO(w1 z#+aa*xKX@r1f)SgcwTx2l%GPIvQrQ4LD}q0T3%!au;XMV>_%1xf9nL8x&3UTmdJJl z>A$2g%Rx~SK6UcCR~B4X=E49})(l{}iGBtG6RqHJQy$YGT^4!?)3YwJZkoIsZ$?q2 z*y$o$+NWRNjWSsulEukVzLZX~4Qx<+$!9fv&21({AH~lTmx`kIX#v<0Pl71NaNWRZ1wytK_G5?Xr4kEZ%%otBAL z!IXq)y^YzFc$TN(zYr*@ZN)M`kqa5@qRi3+5@gKZwCD7`964#|4bH)Aa1_3FL$Z0V z6&|@iyF*&n!_^=Ky9gKXOgU@KOxoPENFXnYP9Yz2Mghsp4{Dfn_8;GNLn(qa#Mj^!0W91(Y$+_(I$17KOu&ObLbuL;{xFI3 zJ>fup>Ild)i(xpR4OB9c?5Z-tTLSiyI~mKti&V1=3<)og_!}Vfrt>T@F!fDg_ z)T+0a#vVY%mc*2f-Fe*`44Z*vj_$#n4!xeQZnDH!F=u (uZ6uf7(q8@l`CWQJQ? zPeJsQO&|IGy&^!k^?_!jzCrvSPvC~hV)$`blyY<*#u^s(>+p^)ziPtK24`T}-J;td zpS$&PJ7o||b=wTRG}cXykx)f69(#8DfjO7*_Bk5AUihWC?Vf*$vp_xVnibp~Ux$vd z(t)+1ke%mOhyB2--|0EWFW-3~F4SQSwbIK)T5oADez}D{K#!fqUKQW%(5f+yAWggp85q@SJR7^$*3X!; zuah8w0`GbJJzG8Lij9L*1f0&;ekS%D3(cmCzOD_x}(MATc}&bTGU%3>O5a zyJ%j@z*x;g5muE4jjDeSr)FK03qYO)2U7_BRb^S_a9Up`hMKPXe+@?O6bg>al;i7Z zPv@TZhEcX8SJiMTVE7F>2-_D&23u9w48-hh7-pk<1KZyeckgEBve}=@Mm*M4_~#TH zzvUQG&@K9_I;L<{5ap-ah6alP2%1BF*aFu15oQ}GVdF$qvUza>iJKi0_ojSR@9>>p`CQ~iX z9#0!^nN9cHW%*l0{3OUXl_3+|AUGPx`-#Az56&RqoKrnx>tO4h!+HZnXp&+OCiY$F zwQGQ*RbBj+BABFD=~z>$0dqwu)A9T0{4_{L-x52UfVnk~DxN&}%9na!tbB%C;$IWCFm@syEBExUl%CV+ z{Fc9T)oEKVp0>TUosT_!`_p5To&tOn_jF!le0N@CVglN0vxyB|_N(Rst#MD1B{+k47765E6bUQrUWK&8K zjtteqW!JvEA8nZ~YE7X{FL=Z<*(88BX0^ic*SW(OQNHh%vOKEP625OPG`ma?OkcE; zfOm^my}USV0ExxOcPzDx)2FZIr2C?LhF%AUA}Bn;xf6ESEPhMm8`nPPlLSTI+LZvv z=}0bbY$PrM8xrKF5o4)Huwaz=DlL^Y9ovg;OX*>w2I5l0n^RE*r#yB2lFy^vh}AKZ zyr`9O@X*pn!Y^+t$6;yScJi0GH_7=AnIGn)iNF}qeBXz2G(cC(w6aG!MZbS!k`2c0 zf3?s)lUCHW`zIgjrFZM0le2;MoF?yetJ|2ScYm}sa_rr{aVga9n*x-8yd7@vBe5!| zRfawG>3Of#eWwk&0mJPO3*D#8tSei&cfXLJ*I)l0FTyKI_sSfTUU}rD+FL&q@~~$c zI>*C19Uy1DhJm!4(r}`RTAI&0m7c0(AAR$f;rnckv$P;M<&znBT$0!rAl>&Hp-{o2q7VqhSfQBUrpPc>5gf8i(o-4Wfj;!%PY~`& zV)c(x9&*XFz03pLp|s=g#VDU6e|wz&OFUC=ZgON=bxS%|FMbkC{h-s{3vJoyj#_?w zXG0Dko5p{3resmz?&7c5lkRQNsm|?KIIn)wm4F!r<`Sm zpoKo11(?5IZF`SLv4#p2f-Bv^Tcn7TQMe{lR2-IYs4k~Ig~PlzfvD5c?CGi>8H#*~ zktMk?qlp(K@lKT!9sIz4t%%PRS_{)L_ZVou9Ti9|_P_nHV0g2o1<3g)NPUsddKWm< zfy+-iU7IqCPNn)9EH0H!y~}&UrFRB%##l;wj5Df z{%?C}MBH)cvzl4-xUpxtprii4k90VpClF7Jg1*v1&Z0!w5(fj{{p7?Lc6)p;bvfrv z{Wn@*r85JezDg_^k4@x><5N2!G2H2y%@CR04MZeL<{_l0=vkIGc#dqinI*O|RKWRf ziDvqYIE%B4CwvZwOgo#o4m#6s1b%5Hrz(ufpOwOY2`2H zwHBid$+BZWabarb*FX9eVbpMpQf+VDz*@dw56=dn-Pf zEmeE#w&mSlCY1KQr!xnV!9$yScZz{76bEu4c6Uaz+s+=>phmA-pZJ(vJkNsF*%7X4 zNy{0@+TjyJvS)2JPB+)H6<^Rp)=-pdg{t;~O@8Id^kJ$Tzx}6TxrE6XMC)#>|2K9FKl?a=akJfjKntZbC*#NCcV$L7U`L%$MBg~`2o?t! zLxilxxs3$Ct^oQfbR3CAdHYrHEWSa-*c?0ZFYBKw9bi((+^v!)D0bF8-*e_Az4r^` zt@=q=0oci&u+UOIpI@k3QSx3tEOmxG@%O=7pkY-Ci6A*&393zVIuhhVOeuS|_}1qc zDuTG{ZUb9bK4A-gvv^xtcnnEXO8Ao<`vnXIGPV5&RdU^R{yYi{3^^x;D(q|7b+!GV+pvgLO@Mo^s?;g5ALGs-%!gX zi4h$`Y>1z436ym$^1j%IsG09uJ%6K;$|cz*j`%)hUw|Ed!tC67?&8n$Ez>luF4*{R zt-O2u<&-7y?3*1B?EoimjW4oum*7xD3@R$+D6C)?79nmk^yTbYHE7L3ET!v^Z6&Il?0yi5t(EQh^tLqF{It(9|G! z*fz{Tfjh>p|Cn`QIa2+6Rf(Gjc*+5br8_DTJsW&JOmeFJ<#{^SFsa^8;Cpceg>~Fy zwTGwM51Hkvw`#?}&m~0-LC04g&KYi5CcCYw5%Frl^j_-q zpWat#qDXk^BmTnZXvuRlL#77ID{=*7z0#3J^m1$~UjzpVyIkd`zrk@SBb`Zpb$}_H zy|^!4QB7_;>bUFS(RCBUk>Y||NYve!u&+FCy{zYFdC$0wY_gLVhQi7~@a>RqzBNBp zdN;3u#d;p1qwk5Gxv$_SUI>vR`J@7|-ju;G^I zMkV8N(V{gKMH55Fv_0SUmEQ2JOzEjPGe4y`KIeJ6%Mc!fL_oKjbEx(I&Zb`H<3hYC zUTh-3zF0#)Iqfxz$<`iOx|cE1UEEe}s^0Z)9ji>D#j=6lZLjUYN-Fg|LB~H!j)a`% zTc~7@nup$m`2&aQXD3;qrEtAtEINJC$FEiiIUWrakI0zbc!J9tdi>S&=vX53RX{a1VOVJ>2ArzPsgtx)aM zrEX1WZH$!J&wA^OLV-Lf9(l8C{ReJmZVlb52hWis7wfIqwhRN#b?bQz zSvQr=Fk|X@4i2OZyD<4Q`-0oL8Jk&19{p}aN);Z2ewpQ${8N!2@8fdo^qi_uuWm6qJ(z`qI6;Z_uM2>3 z2v?tWv>-*hyF$eB!90=6fBIU+HkcfMivU5$M^9Yl+%PdlL-U6XhcWI41|?d@NSb&b znn(UVYgzh`43F)vG$AoQ=*tp6svytakTd+$(oa0lW;>q%=8Y}e?`CRp2ylBvw{4_$ z=Ne@UMu82YGwlz#tltL+#6}rtH&i|ERLR1aDdOO`Fg_oYscPfw| z5I6oM#ZN9}KSoi&=&HdLr(=tUfpD)BY;DKAU!F=^(b*4T|M}rtspOAP{mhieF(9;I zr#J>eLb&CABs=QD%-Ru+zCD4qOa7AryrnpR8O82oTx{DHw4x5+iO)tA+Rx~1!M3VM z=y9`?UNF_G86Z5zw^zTdPaxV~J8}LyX&z_fBfB4gj5au1hso-CM<8}Q=BM{AyA5}& zodcxkr6U`I%^iJD?w{u75GM0#49{>*RJM-mwbJMhZ+;;q z*>sySZq=rW#9Ot{zi;TIA>I9EGZ}o~so-S1MsAX$o9;3HM%>T@6S5}P)iV6N^R;D9 zJ&l&=5D_&uHb#2kC9<`PYaqJ5(b5Ob$YEf|o}?Y;^dwY8S0ovLbZn}n>o*tvPEv~a za*r>iTN;=fb~Qe-0ld(uJ-U{$^8_bL#639Mc4@3N)6*`nG<85IH&J{DxZrU0_KQB7 z$Wnz??#I5(G;KKE4yUv6YQ>jrn?pme#2VM%g0Tw7;R_sIx5Kog2O-#d*^l<5a@gG= zUrlZr+<&PrzsIjFgWazh8i|r58@R{y_IhE-kNy%JcFBN`Adzla^ZV(BQh8QIBsPsv zi^y7_c?$?`V^G&RfQ{*QMZ$r+b=;zMYYbS=S0x|HfQF1m@ICo?YCJ6rwBgV%T{&U^ z+C--yk zJp<*queCOranbeGBd=5XFNP=47l&*OG9oQU1ABYYTOn}*T$|m?W<7Ipxa zjyi^*+}Y<9E-13^UeoN7e(hmmq8mGz?%WHk{~~o8E_LyN&L^Qy|LGgU`gCB^`^O;4Ae>IZN#6~UJ~md5n#gajLQTQ2v27j|4x zlHHE?r#*Uv-+=&P1grVDy)Xi?G(N602Jv3-bw%G3zCmNjA#W(9lNs0B(`o2FsXh9` zIfgILP?d5c+&}lh$Cqvylgh8&4vB^(W4YBY7Y{{JOAz6^#`1D|%daU*;%Thd7kEAVWz00@Ks)H*g7jW#4!_=B>669PcmI!aJ z12TMb`IvHAO67)Qca)ZayJhiE@|GFP=3wC~OW2-8wrflV=jZn^p-!!L8Dwi!A8!(s zN}}pNoy7S%(N5qn`?wchRwdwu6={@5>(vw2d zx{ueo?wrlHR)~Y0NBq zY>AK8qdvCJd{pUat=5%DebUe@~3j$<#Qrr1O@=lkwUA+`|d^WxGoRt~LsQ zyvlOppnP}?^Nq~AnXsSB@kT-@b`)u^F+OXcWa6-dDzXRIWi?5kEsBR{epH(Sv`wb2 z!$|Mk`#bA0Z<6Zu_ulK@Zw9+O-xCsy!lAU`4h{}xr%4WI%X)fWxt21>){w1e$Vb1aDK}DxG0KIxIlA#fORBbrec;7`z zBq=`}`DaqnQ+_{f8KU4t`Y?0irG%d*h(sQ>Ki|WlB?}kSfirWDkijnXn+BPF@?Nh~ zhDf%USA-4(_s;ZM<8`cNhs~2nPvXZB&B{21dxllO{!-0riGw8kJc5XM!OSPS1JAGG z2L3KeZ9#AG^t?4r8aPtwkxtsk41?3T-kIaL*_?P70`5}QogE1u~arg1!WnCRx=&pV(6A_qE^@5e^uYm7)5MAO#( z+$QU^XP9#+$b3 z|NK+^z+;8TI@j56?lGmLXwnvxbf~MjhU;)d&ZGldx47!-n@oL$SNLPXBj{l}puf^& z$+f!CWtE&Z+Z)-bhTegZB;8uGt+Rg*l2TrHh?=)GUiwHZ{ISG!p?mf zz?!uBT1`1`_y$z4$kzXvtAL;%`SLR69Jf5!KDT^#E%R7w+-MTp6T3wc^NZY{He3qs z7gTNYwJV{dsweQDnW*xtJ6>2B?#I>LI9ZU^C#_}uvz+6lPs#C#iB{s5R-oTT*AqSq z$A&`wt%uL|h~o`LR$}Re8m3bkFlE5&0Hcr=n5X2Y`@5lTy{a`xEXm$cXLR}bg#EHX zyH10PL-9dDK#f2tt74+6_(%Cv1W~Q=;n?~84%U-jOaTML(^8hQAn(nD>7dKRhGBLrDlC9HD*)P3UcTILjLzUrn5&<>E+US_hf{28a7^4oJ>!KS`tW91Fby<@~LFe{KYMLbnJ zlN6WMU@;nCNinH_{4FYu84^4@sgz5k3CIUS4RNR3L8Q7Rd_&A`ckPVF%je<9cveay zUXat12P=p}GOLyZa(&dWCHypN2OOmYahG!9xesN{bE2Cv9f(B5&A(`^Zcb-7xruZ7&)gl{w7XdVUmtWnG_ zPURrmlSO^M-x;W6B71S)aB0art248CE_xeMB`e*)|J4b|-eJ5L`75M^623J^b=_Rw zF@}SNF=W}4uSxv3pF{OMqvsbPLGd4-m5Mz+$6Q50a zNA~rb5%h&04fHa5dc*-`6{)9)2U8nrs_vL?MSPcIBGGlAg%X^@Tf|HEN%X{VZ~okh z(D}12wA||TwSH-5h30gBJlz*sssdrO>=}jRRr@!pVxQ%PnYr*V2W7e(gOs&9U3Jz> z!!!9!+@?@SF-uZFV*XgkRbC3H2wNZ{wej*6+~XUK`I3HM1gFn|g{zMi#2-?(Q&Pxqy9(AQ^tz zuU$QtZw|)C(Mwg;d-^jXMcsR>QeTt$HQzA+oUCOn>Y+BuOfrY88BGu3gD>b9Q9Rzxa19igE`cXjusVt-W%&z87 z%2LVB`R|yph+KGtT5?EWgX76L^)dd=_O9!aZ$59^I@PZNJHEf#?`&!`SA-hRMjoYH zzg+SQ97Z0tn-r7Q{(QLOW#c~KnO96N?uJ5)?To49Mtt_x=$Fl*j;-0n2pGnDuf(L$ z$?7u1mP0v>hN4Px?F6y+gV)h)>6ig?yU$G9e(B;G-G54UCG^7hC5~LJ@t{+QCk;}=m(&^(=qWvH~EcwY$XAvAl`vbB!3{~R!;N>1AUoc#q)+$MXA`S(}L+0 zZzuNv5ZKPJ)??VP-0nH!T(QrvkBSRtsJ|bXxnE0W-28z39d=xsAM;8VuLQ?{P5owb zeG;|x-^RsE*j)^YQD7{RjZRlH%-2O)KXR%Tzt6@}R(eg}jO4=)=D1AVx*&nR|MAfPPlTXUe1>}?Oj zru;`C{7boU|Eh*i_-NuLo;4#X^$_}<6z(bC@U6im5_}YJsA$^hR=`tRh>%=ZNMDrB ziP}-i%K{-8w>7&m$bb1XHd?8}th{flwasqTB@%y?ZS!m|57_vy1* zR=Zk#^Cc(8qeQJ(Vu!f-XhX(#)S%~C z0z#mmZktcYobrI(p{-+S6-Cq~sD5=$%AG=`cpDwl>txg*CURKs4I;nP#J%&Wpgp>K!We-Lh1vfL0aoO-u%8vG6L}4u<_Vr19h+tu57?bH|rc~`p z-x}aOsMU0z1t1+_Pc(FW1s@ttory(G4H8?g|0>uV>Z!Rt!Z(*KUuR&U1Iz9YV(~{{ z^APj7-ouyG?w`!P%ILh2`ck#%giAm>M2(p08TRShG6qpFeu&UIOWkk?bJ9NG{_8LU z?=x854wx4E=Zr1r$nxLbb;_6PaGf(D; zJ$_nOC0yQ>OeOXAx$hx#x9aW}phH>sGjm1fyqf&o%iI7-z9T0YB)P817RerTCv zP}!!RKp2wdiOZymop3V62uu(B!bP*qeJ+s`iJ#t?F1392U4W`ZO>=&Og^r4|(((Dy zlX(*ZC@@^{r(vvCa2xRn246HX(hKuL-MnWJ@&W_yjRnpbBNsqi>%pB!KpXDuuul)X|>hHY6VT_YTC=YyzvsT)9 zcSQJ{m9so#Hy6JB2HN0galfkE=OOwXlQ;BnWp6_rQ96K-4 zQYy1(4_`*&erO(P*+0<#4r!@QOzVzd=$Us%C9i&U8_{l`Op*lv2l?GoFSFr$4~)I# zjjk8DZP&`ES8JqBn|i#kpBov4d5fje9+>FKbYRb^1dOu!)?{s*E@cki5qJEn*UCY6 z8I^NxJbdrUrWbqJXdkAPc(DR%h6T|@OQ2NmrxSrQw0ES?Z)}T~;VG}A(B;5sH(v0w z|H6<@SxQydCne2i~)spnboj#|Bl!hc3%GoAj^pZ)o1LQ@FhbA z4zA&hI(|1Q`72J1yXLr57nl}h$ITt#^)@QlWf!Y&^mKtU_cTV)_pwV)G6I*!Tll>- zp72R~g}G^O@P-NQXnHEsy0OvAK*cOGCt1T5cWFIjyDuir_%;P6S1&KN;aNE#c9v1K z46e#yAT#gXJ^Jyv2p=N)tF^w2CHSay578?xbINbluN*z4C__}#U05y%vxUckuD3UYyt z@DbO>%5LCU_i{)?T5)UCu>}~@$%;pOA8$2}C918Lu0=ej=*D&O%WNJIR-8fw^9kAy zb^;plt51_4r2KS}JG{_z`7w9LZ3PKg=RVE{$O(!M335r~7k;=svCmp`-31sMtZKYS zXNT;w1Z@^iC@0`9AF+;Q#<2*pr*ev4YyE<#zsoDQ482;WWN0~%9*Lri2@9QFoyakN z*kI5UvAc@DvvMk`Tzmydza&gvuTE(1de@Q+no(jPci~vK_08c+ndg9at%|MjdSo`n zaAK|zfPd_8ZO3{duPH=eA1tiW(scT2H7BE=u%@T~sNFTHW4HjEdO9^P)LG#Ez8B;d zZLH~r+sx&moiOn&qjpXEeRS$hGLLeu$iI_!}5qeUau?K%dfa5k}w+3OCg)b{f24p z?=7~iRe%~-Fv5PUZ5tu|UJWx8eDtazUNB z?G_0H)g!Q!OjdT7fZ@{MtV0_2&~B{ub=}rjmFt?+C>Zy_wfso%0m}Rv!QUEgX&<`& zi1qFfAXl&cLHWv;V*nBd>k6QZ;p`#dWHlWoxwdgkuohT~w z(6D=Jo~U9n>h^ldk@)^NrVdg;$b{|*P?4EUyg@ep)RB%fYFtabCbYV?@_ggkglEIj zmh|#c9KpRz{X8e>R71{f7Jov@HS`U#2%gK}MK^2pS>74}end>_b~FRmG51<@ru3>o zWYQB9JN(EHeV237fdE5m#ds(sakwvv9ykPin+k_l)68elO&;8l6mPB)ymQ{zEbRSv zkM2?u`Jbd`_{*=M!uv3}gCR_N*{CPauZsOL{J)wh040IRSxrLi4fyN>X=`#H6xS^S z+!$7p!!(jwJ=^Y1{|ce)ydp(RK!?P{#c^m81D6*qg3DMeL?}dE1(1RGV2>b{%vvfF zo_jg}_bh<@TKZ4?yJ&K{ZJ&kAzF8qO=9o2y-Pur-YU|99@v3y*Ae_`%R37gk~eJ{hNDQ(&+OKO{+P`Cz~JGcA!ExlZJbSPA(qP4*JS*97^rV5$etj zvD5a$LYI5zQ|2<8Ov9*)O#PA`q3PyA2R4ZQ_I4iaFPWuY2f;-4+|-?iw_V;Ei}eol zVTRCO`|qC1az<^Ruk$*d$WE#lw3hu!Y_;%n3-bI|Xa^`XaBwS5U(m6>bG&l(amPAf zF+N0Iz#zn_ww!NSq{J(+a+12Foz$e-0Lc8kbn@=k{XUK$cuaHq$w-zOC3jJ~ifkrsBue-FE$$`odcT_4$bhZ&w{ z#U`l^eubI_x)`nymR~#!*zlr0w-+l=YyLxOn8=mVeDWGd5bJiG4VMYac){{tI3o<& zOX5TS*r|^ker$s501U9@zt+Ui$0?tL2`)%qYNFv*-lt)k!}~{>(rh#KoV$6aeC!o= zd&AS)QEvI3Po}K7UIX}oN%$4(T_XU0@J!q6w7P%dVZjT(OLtvvcy1QB1%M`@bdY3w ztsf|6(Sj%>!aDCPu>>Zy=u`c%XDlcoKj8!De#p}D@Y05Yu$s++$OuaBR(%IiUeSFSXD|K#m{<#i1A`*RKo zHDN9@{6QXz%vFIg`ANIibywWv(^c`$Zh1EcUq9+G%vh}YSHt5aE1;}VZF91%wesKZ zE-4M{ONaP==V=n>>POt-&v$#z+w3|3Xel>0P$Ki?jJ+JUa^{%BSLECmv~)VG_QrU3 zb?3}lKH=~}8|)M)P@?YgwKNJBbun4jVRl42ZQPYgHHb|Ll0yb$IO0`&KsuI#vUpA< zo1G)dqa9AYd*}TZA)@yB zA0OyXM=g)*_|rlcus?4$q}xOxfccgR$Ne*{^^tLlya%Os2#^0>mz{bzRhfOrfv<`1 zHgTU(G3Z2y8kq;kAl|D>POG}@>8u^jH13XL|8(zL-2PIf!L_`Cabe0)mW@>LGHjW7 z*?E#)QfK5tzFjx&BgtN%O!ws|*voOoX3r0asM;51+;&`4=NM7okTQJXTNjt&WoW}u zqG#P@T5r<1^E=SZ?R1;-9K2r=91-SVl~g)_!kd)+ksuYxfL=&|Pr;4I5bN0Snqgd* zI}h)GBw0S#1H5q+ATh59^j|Zs?TAwNfD7bu%X6Wx->_)1|0|(#n325k2+@Z_TH7p? zDxs6fs*}5mx%ThKa7({l&#i}?jV_$dl3$)b zNRv`xryfafIs(_N=-$-RXW~ z1}FMSGG~}SZg%_aP#wsohZi~*5cF2XR=)2|Ty<@@adwQ4oVVqo85Rv@tf1-434K&=^|agE zC^yqgdWbnuZocpnGC00{dxdAT-!yPy#T#dps_VJmJI|lO;Qgil0|ZQqCQn?kUO?^6 ziAklqs!2>R=qwamglt!r%DY_QFI1h2;N-YQ?jqhMVakaPIV$+ZSh!nnY$K9C+9t@!Ft?`k15>y++Wj#I!OFNyqh{1@s1>tJ3-8)Jw4!8U zk%WXzM~C@VUYLH#k9uQrS(_=BDSZFtw~eTp*t3wOKfT$oW3?7W?Zy524|xuAaJ?Tl%TO|;c^ zew*6Oowt2yF)XkiTSdU+OLQ>$E-R@aUgTMJ%=Mkz6+S~>&wrqTv_MB%yCY_0|FvO# z+xfzcnD!kF=+D;@iH7SrrNwhV-1>lKeMV8Bt=hM$f&67R8t6a*4DoAfa=A0`>uqw; z#Pc0U(LFSH*Nsa~y9y{Y4lDlsvHJ2$O#fF&s>Izx*`w#1^)%h)twuYH;lgp+NYIih z)t8sTjX%I=41+jW4c7NbD2 zOFR-c{7LRBu^gOz54HSFST)SQjuU$a89wf}or{m>nJT42xfJV-Tt!{v-X~_lkzE}$^xRqnv`ImjYY&29b&;EHN!W%izBueE38vR~9!7}Yb3 zoTvH4)LBawuh%LufcaR%ieShP907~Q075POr$HcLr$7x^I6bUF^Xp@}F9gBBNEDxeC z8W*4R@Jb}k13eU{e4ttAiYWQzJ|@ey<-YfzA~y;~?sRFQTWO#x))TVQ17pn$J(U^= zsG98Oj$_ry{sfff=JM!AS}&fT)?WU5RI&~Z3lT8Ce9-h@Zc=V~Bx2kz1;t_TX=4tl zdIza4;Uv%@p1Rq%%&RU|m^L2Ae;_(_6=^xy8+mHs-er3wJA-yp&C90$V%7cxtNBC8 zuM#M^vfJL7kNq-n|ObasaFS9xF19T;hI~OV^`z{tSmRWk&>jQy< zZ+RP`eUcFMNLDmp757G4d=tMVX;>?k~#Txo#Jx_xqTk5)T>lY)JP(}fJk;^T<5btem> zjc#q9Yp)jFNe-#I_p;7*~m)$+4R{9mtfu0be_!PW_(3I|D;?s-~OEp|5(%@S*rM8CzWX+D#A5pc9CPkwQn_Kz+)xH6?=(Ci~nUzgl894ymlXmMy?hQ4O zU};$iK4>r62U(BR`1}8}hrvLr%>S)^RgvgyPvP;&*+5mRkDm+Wdh#XflKJzz0A>Kb zF~l`GX_Go(JbpPGgIJZ_ah*sw_DSdY8rQJQCi<35CwU|fMg2}++uY{v#5g`bNTb%h zGuX+gBzf$#>VoAE>jul^pSEiW(62|&ek(2Ca594Ico!G4(|nYj6Tig~8M@b3nCC5} zQD*ALmVF^{cxu5+L6;3P7#lhZ&Imm?rSq{EwmRK+K;K6n&0vPpzc4p2$$NEeE*FFd zP<`I|+7(FaJyCZ)6Rc}(ksU?=y5gBfy^`~9x&iPBeaDPOI2HS2?IbvJQfdcR zGH|?P-g4c0$=-f}9doedaRf2-m*@Fg9OCuVc^4;Dm^DghdFH9nqr%WyBinhB1}LZ9 zpl~$E(QS;3GlG@9<(~}EsA2)ERj|q}G-VP|;!%LB>YFXKrURgWB1Zy!wP9!khgc1h zj0i5rkI!~>^9q%A*mEk^rOn?g>xMKO$Ru+CquRBA_Ux#1?)lxXlxXtpqjkq|*w%Pv zpaW;k4L&sWw)z4htssm^6Jb0>n^#94{GIcVI4RPRMR;z{>QqDbpO4+(OABrG%z*c8 zTfJ08Su({Ng7!R-I3GoQF?0U;f5P-t-dHKY+WA0}vE6xVLPP3wx3wL>3>lp^i_30h z-;4u_^Q_#y{Wjhoifw0WOb)Q4?fa`eEKXq=>AsWp9i$I#_UQFTAKs&;Zkc6AF`!Fju_!`72VggoRRQmsDy%3j<- ztA%V>HjApZX&6%EW?2DL72EP|(28|!!rjJ0{bb@Wx1X?K=WPe?88eFQ1NujFQ{I)U zm87YRtRC>ui(>1^lKq8XE_nT$w?-6qYx6zWor{rP!Z%QZHkJR-8%*-I3LG|y#;ShX z;U0rGXZ}oAg5MAuh)a5U%cm13suuruYGQ|o%hUBcS`z207>oRU?`>*vK?qnU%&OwG zJd7k$#@{F|)-cH%@!k!X0PC0mfjI~iRWXp(=$2}#3-4mQm6XGKJn6Xo!6#VTPRz!1 zxX0ky?1*^pUeWhH(GZakIGuxaE2}2h$q{=pd${8^GPlU~wY4tpyu38j;Z;bb;Ih>J zSjXGsViD$Q@NGVCu;=1cQM;NhS4Tj4>lus-zTuvO&1?V^4Cz;(x}Mh6&GZ`F34GFb zI~)PGRPA5$Xqovba}_iGR+QhU#zOEPK3-F@k>Us2?XHJ{>WS0S99 zML}XO9>CxXbd6o*9MsD-_?N`0fP6Cxyqga!!ihv-4X|8**zu!!j zEmq3{>wZk(x-wR6bYw4h7_{lF-vF)7%=zZIZ*|$I47l(#Rr}pzkamJycai&_&mE7z z?RsbyR_Omw8x9nXz_{{qjaYv$M7I&QQtJ@5{dTSaB_ zsv^FtjhD77h2sFA5-UBK7a^Qo4V?ROi<~2a#x`GMH_5i_USlaH5Msc ztu;LhFYbH>!r*@9I-@)EPa>4ny28nJOoNbW7q-E+bRda3u#r|m4Yo^v;0 z#Q!1vdx<`E9{|B!FW#_<%#fJuxR6_H%7 z0VGTDrJRwv;`B$m{6Uyd(4(Dj+i{ou3R!HC$z*+_fb*<_PVxJ`nz@_h z#jXPbj)r;kao(8j&*Ye0(cE@|ccgQU*?n_U6iRoqfSsh6I`_%GP_Dr|j;iShE z=QZ#C6R%>%f#Zq-a@|7#BTJ=?e^|U!xmmYDw>FnM%0!|M%mN@Ekk-p5k7s0-0q`;( znk$KN4P>Z0K@2RKZ3k9HOAed#_idc#oIHy0gSf=7%odWfx~b+<%P13-L^k8g&^kU` zdliapfz4N0u7jrvtnywo;^Hd%^HO*5^+~DPf5!ma2T9iCco@bjG>opJ#()IOxPsau zOm3g!?K}dv(^LAaJsoKLv0nFW;4;4^xlXA}s!k{~lLxOk-gv_W;44+<$jit%V!Fj`ib(m{A#ik}Mq@@#fC6QAx3NT7=H*@-@EW>zG{l;qagTFb|IcufGrIEh+ z4GgneYT-sdbVr+xv@e#p1_j?^r>2)$tumQy^emhp;MjuA)Z9Nja;c*N@nz9Yxv@N< z|6=_Sq`>3z&{vWPLR*Y}r}OyGj;Q~-dd0GFE+P1DLYy=4qxFPMt;~Po*TvIx9D4xW zoc0tdcx+b}+Xf}5xy~GSu~&HjP^N(hIjKe*LC$iGVQJ6f6Dr8&AMVNP>h5yLF$Kni zSA>>=*Qr>(@vR7ay+=!!;K@06_B@?K%)n@C>b&!j=X*{-2;|6V08lKanN|`$Xsj*+ z> zmq1?qkc1+|L8VH-1Qi`oEPZ~zr?=Tixt0PYuAu$b-?*+6r3cw;@JEurGq>k~l4QEc zgdzT=i{#>SelAmdJh!gO`b!R+RZ^wyMG~45`0Bk+mS4YoaJ$1nq#pi-3?1E9 z=r1Q>9+s2qVfdT~;OZRhjx!V~2>SZ$0>&4h!6{l&7Z-~)SB-oq|H|!uHFw^$ogsbj zwSX7OOfG;Z#2bJJ>}l0^#`}eDBS}(!?H>f%A)zjsG}{!<4$^ge4*7*~a=7f3FS!B@ zZ?ipF4inxcf{EqqQ(=fxD%r6dC9aYa6A+WC4=Tfk*u1*R@0@(JZ_?$xorj;rW!xZ2RH zh_4K_Mf2@R*mZ8ftC*V*%yX3KWQke63lF~$+W#a>GBaKeVN9rssNO8R@iuif9AJSX zQ~U`=u-naSoGjp4`(>h@IG^SDyA@GYU0ghm#D6t&-$|aP@>@oJs7-QG$en9nsx$zU zIRRc&r){4i9Z*~9d(VL?k)n#RF&~KIum58#vhUA#0Bvh*I5_hY8mS`AtT7bP;hR=w zJzzy{*OlPZp4@l8_R?6v?_L*+P|_@Rou|4 z6(@s=Duno1dZu`OkW@>KBQRpO>NUpfkT|DvUwVBYmRr;Bo$bz}v(e@xkhEj?r5r~30s3+X6lgtC{%bwXkho?c`f%H~$_t1P=4jSW zM}KA<-c;2541Rs-e&jy4_uf(gh4d74JzaM%2QVuW(UczQe2x-o|4YP_>}&AkPX7x{ zrx+)k`U|_=OjYUpWlxQZ@x`!chsd=bP>eduMpl3EpN_5n1xr9Jsj%y9Wrr0N+>xRy zzVS%f@iHj@xXsY-)&3a#C=SaQ1 zIZN4_5C5%9*113VM{Yoh=T|`#@ADUS|6)#H0W}}+?xDs8?BgB0B*ZZ^)0+;J(Mu!^?9&08}RMzFN*Yl6^*i*KBJVnj+*T;HNKx zs##7eO)L}m;?-7)c+m3S_Ffh1WlL?4=0*Vn@gdGzhP<)1MCrNV&2QE5@C>O-C(to&&pHK%*AyGE|3n1-F=XhK>nays%9d)Qxel=i$N#nhe|I=(nG6`R;}jW z{;tBYv`;zev?L@}Tx*whx$dyRKU9b$0i!>}H5GV`U7JRF8HygRn6pT75u=CTMN;BC z-(N!U=P{=mA6M>;w8#8UEC4YmQw*h8a^l8b--!lG0^_f~8{AEV{<`KL&>>t4-fN6A z;(vHpl~*Z62}i{>YNT#}&{KDCb=BQR<89|ZTFHEtZ^dcXFBVX2RXx0&TKZhGf95Od z`eDP0OR$F~r69svp?JT7EjVUl+dp%fQy@0mhzC6R*I0X$6m==nkg;E^eO(~zY(4R2 zOWB3{5A^tLdjX2w@Y>1mWJKYM1iqicK5h!QfQ+Q?3FhJd93dcL>M);83M;M81vhkY zcO<{ygwNmgto*K&%KQJyd?&-^QrVBws?fcd(AWslWD9Q*^*=`fzHeLecA<0bpK`n3 z4eu8f%~z4cKJ9<<0x6vPkx#hhXsAP8d{n{nq=PR8MgBt-&rX3=uhXJP-VYZw=5i9v zuZb>{Qo?HJzmmw9(vn(Z)cJN$IUJEKao53P2>(UhEb36xCXUjX7zoMPFLUgC)6fmf zA@}A5rE7L>MsvQ63otx*(P2N-?u^B18i*RLnxtTn5=#{2*{;8o5!n57&gpnw&P`2& zWXEopK#t+i&`X`T0zo}(RVVc<0WN9CiuPn~Qg`=lxdc}!$2ea{=v_0u>?ilRHea8u zvjd_)`brA|V7d9Qm_n<#0<$6R7+5v2&PA`+o}^VlKnSLXsn7WOt86^!jtO8Cihw)- z7}k7i*`nJ^S7Qsv!XSgiev4m2JAwDFwmW3+TNH|eYU+qvD0ykZ#BU@H(v6j4@tgWq zlUp@a*d0a3cA#A&@C!0(o|$Kz*)xr_jLB}WRWMXETTF*OP}3|(t@d1Ct@^*pH0QlG?9rNEr)y>_4)7FU4PY`k>@Qtqhjg z@HEjDAFY{&cK%TiTJxaVD*mEuv;(B(sZ zJ0&knu@oZqbeRsEoOFTF7m-%R={NR5uK5 zuv_U30*0A+Sih}sfBC9k=lt5THLU$MCJ8c6r&;3+3WkYlDXG)zsM0X$DX zbPGNl;+kA>?uM3fO+0jQOoyIKf`L? zv-JGr<1qWa`)~dfh#fnrKN*bwu%hA(+C2VT&`noQ%EEx@tC^;6-Vu*jIWb;l5SWO zv10r44F$vB!^DLdTG0@Ds>!V9dpGB!Lj+k3>7dsoUyM(WjXaqcaOig zt0!Iao7rH*8WETP+A{O&wb9b)jS`;vX%&|x^&|H;zSz^UZkp5Uw`?JI|E3la8}`af zt8#zDS(yoG)8Tl(P(r&xK<(h^y%T|o*3@qLkX(gV$NVvTH*ot^*5Uq1&k6)XOJsA&ok9dddpAQZ_i5ZF-Tu_2xN3h_ zh2%brD^9|;e+6P%9&MPiU|0q_B$vtn_#xU)?g93Fc3qA8!1zIo)c=j4d7BA2!8J8Wa16-Bw`>2O=om0ckR!C-^U!Cy~pgAzr+y z^OL;_ksnHBzgi%ZlPF7~0E_!tif7R2cvT!iabPJkIy*msvfW(I<~Yb2FR7o(OSUy* zC*HErj>#X=K6vRIsSR?4!HtQw&9D2NNcYK?W1}|pfaKyC7wX2F z>(Z*_?T;0=7)cW=flW=S6kx*yJRCT4V}OlJNREfka!=l9qw-ikuHzz=PZHJt;p;7+ zqTIU2VHgG(5J@E@q$C6ZDQN~#N>WlvLIf0$PC*6~P`Z(pE-4A=p-bs*34x)crN1-Y z>wB->|My#K7K?hGv(N52``D4SC%stl2QxQvb&qaT^(mQZaE|8@lAno6~4c`LQJ@$0Kb`+p-U(j}r|hjXeX^l1lHa z=@t~;AO}UENLR|mMMoJAgDlGot;9-on(!N0d=DIn3fT+oNwbel1E`8k~)(Q4E zol>Ae6_dZ}x2xJooPIQvX{m0Oxb~QQ=SobVqqIJCvSm9<_RF_9}nf` z9z8Hsm~!Mu6LZ4Lt=6yfJjy=`yOCURe?r)daX>rz)3$oZb{{)eOdKy$)U|=-ZQ-5L z@e<(A0`0?%kcE$2(VvJ67U+x!bVXaN&&-J2%2fH=@eZO-!QWe$z-|%2u96>PuZ64% zAX=`cv|Ly!o(q_mvw4=)gA#cV9~&1FshQU&ZQ3DK$TR0P_$|Ft=G{bu=9s9G5THy- zj0fisPLxI+icV2Qd~1>!70DuJfUmI^Hq%|FKT)iwKjnH{To+3|pQ{xvIyXKUWVI-e zh>EFAM`>1OZwiEzXe_cOFInI(w1mcJ30bx?-lf^H##(eNSXf`MKt(E*6wqwrqV9_v zi*j*MO|)ALIhXlZS(A2=lCy6y-=yIwt;CJ-% zj+x!eZWj4k{8dfRiF z;{q3HWza&ZxqZZ=2tZ{*I zzC=p87Cl4k?^v1};RJP9?=4G@xam})G3I2NfHt~f&>x8SZugEA%0wAs@G~y@T)jdg z#*_@$=olRl`GX_7miE4g;6sK~Q2~P04Pk#r;x+-4dZ9zHQdwF}RY{4OggT2`R8W;( zp@2q6dZ&t?CDRajncO?`YXgpMLwWnglyj~#QQhkr^_|(zQF%;$T zf^q}$Sq*x#XuEVfRL7;5H9|iyZQw$d0FMT3b#{Pb#|632#Bq3*lRBaBVwxJ91ss4V zbjt{N;HV+z5UF7RhHi%X8CbD?;3;`yr8y+!>V3p-yweEj{HSr(RyMdN zurOEI@qgk_0Yw$CR1K>V3gA^0&|lF?0*N<}IO;_=xwWi*3twTL`*|)s4(*@{UnS>D z+xRqrE+r6PCbkWfe(#0cH}O#~=(IJ zMDMUteL};sNTaSZM~+_)`@w zM`Mgcos8d!qmxT`_CSI398|>Uy3lbm9_ZT;c@$xHwp7J-ca|P)^#akw=TBZ1qp_1T zKvZa93OjR**Qj6Fz-kNfXy&CH!++P~CuWAkU5D5b+WAn{R68 z+O@R;M~0}Zwp|P=P%7?~Vn?~6Q2`G;f*i9gEwxNj6CGrCUul=C4=$rD`|^TV(>yHi z3ur9%F4B)jWSz;&vs7oxj99A{@`M+2?8z(ZNeXmIk)+4axCsg5Zkxu$L>N3clr6iS zBWgKBOH#YdWSUmGHmH;sS=-rm>wUnXqrfum=(qK6i>5397}I8SDdwkh8U|aAOGC-Y zBo;hWWtrH%n)M3J-|D1>1hWYv4Tfpfxj35*XtMeulPaY5N3Y*BXVomrUMUui2L>?^ zB;u3XMuWm`f4HX|>2ODs$)pV>z?WuaklZN~KdMw{i&B-?E|1iV4_8$dy|B1=WG+>s zR=1|{C`nqme#b2`TD{WgL3wztmc`@1yn^fq#i&TJ2d1gPpTvE#we%fF?nUL~skseF zO{vPn39Bn>hzomne#zQDi(gu@@VKR_?^@Si%;m~nqDs>lBOc@GSzTqXn_-_Lkj?_a z;qXyb<^5l&Z0@HJja8U_UhcN^lmu{2n7MTzI?1g&5(Pst6I2yXz%TO96*qb%gSg%& zS5n)oAQAF(No^6rzAdPVZ~PsjN(Eg5qw*{{+hG2Xo^&-uh*ST4&Bc2(TQ?@m)nXoH zg_!QR7%*p=h(_lPfKfo7I^~DQ9Q(DR3j0-+3y+*kbB{8PuBRyuwzwoJ4zef3fVXL^ zs2h|}QH{yxDsZlDXIzU`uWrii*+bi5@Ii9pHaiNBaP$L}-()%Bh$ce_eCTlIONdq6 zZIXj_z?I>+yB;ie#Fu@B3)?l&`kF5mX@paVdTL+PAZ0W1H<|w?wlVGk!MdpaZEg`F zR67{4Fd*kuDY=8S^in~2$^WmJ0ce&+rf0NV+|F+MU^Vw+k_E`{RS3>TMakzrS3Q#_ zpuG4ObbrIhK$NZP_w1vJqH&gDlc=%)8s7uTbV=v7XZ}YkS+m6GnlQQ{1uC5X2Jdi} zaX9c9zBRgzN(gDAtt=4r^PqZs?YrJz(_j7q6ih^*Lsz#y5mV$1IIm|ja{ApVa7LG? zK%K2FbkKk2?_&Yn3h2;y^u^%ZQY_;L8O~a@Wvqw2J^&%$?kv)w;Ot0kUth(2iaZKi zTIn~qLG45V9`AW1{sC{|Lx+ZRIU4SdGAUEkx7_(ejlrgWP6va_yhGYBbLf`m3m%T_ zQ<=_8MFRtv3^j=D)<~>SZ*X1Xy6(~FaO-bH`M0|C!B~ABoG9WPoe`y}Ve3IVpFdR_ z#wriSy}3hQob^nbZ4k!;D~i8%w^y1;>D7{`>&zS*l@`B|-UhlZbqQf6Zks$eGEmtm z)OA?>;&A(c8~p>U&%o#W%QOA?m4XLT2$CA8csEOOko0*>%!DxdkiQr>_X%~Wg$4s~RFSfQB3x$(K9XMSJ$DdVU5kCI;YJJ{!@Gk2EQ37{Uf?=C@T z|Eh8RqHticmjJ}RV_ft?Mp}Ac=RMt@qj(~`y3YlLdoaU%(PuEy=(sPlKpx#a{(V!ZbZ&&%!yoU?3Q=z zis(CvpAO~DsUCmeIO`$*z%^Qmp6*5=f!Wc4$@_6);vV{+!N9~WoSeHCDK_pHN|E3+ z?ypY-fH5#ELL^SiGL3aD=cnh>k1S7nDkbA=9|GRNgi(98Z_jD4!p~YfIbbdV9Ox!D6fS zCLx*DN%pk+xP$CQGOof6#G{w}opUWcI%>j~KB@WdL@){yGdo&tRQx8`;Qy8d4e{;p zSqr{23W^I|34Po4;wSZ!;hr13S_@UbWE(W!0@?oZ>NO)GNRF910j9GXdJx}6*5Z2_ zb7kge$?vT@?I?e+ZIvQvEF0wbgZ_h>xljBp^SATO#p=wz#PNe+IwG%6#9zHTa{g}k zcc}SO4_uX|vpyjXL2UMF#C!K~wwgRd1MnmOKBRX`7b>cQ7E)r0Tv9~G{QyeE$5pkc z96v5L0x`pfwqNS4*w40nWu*DVD@-7~GP6Q5g&dyqD|v6r^0XqIVPE-rX<&jRYw+00 zm$-zZKW*V(x+ld5y6Yk2`7&Bbe?9WA!8w-1hSP?gp$Q-Q+|fbqJOi5H6U`v=v&GW3 z#Xh8n`LBN0lzJXN(mA=-V_UNlFc-iSB3#ax3wIRLs{5M<2ky}&?KGDyF$?LZ z?U|#AFK=iLQljn9*89=?Vf3z_iQ}W*KA$N6AZX*Eda`jb=E}%)>4#6-M^-IUp3SOi z=Ms~JxO84Sv*}qMIA~ukcW&!7$srqPV3V#$lh$1rb#-fy79ximB8|S>BWR~#yRKYuMc1=@%9%N``J@e6#mK!WsvVVpvlEQjeXBP9Zm`b}5>p`} z6dB!A*;Dr4;NV6W5x2iI($jig+SK~&Nch2UJV?3tFG(!b^_;I92ZE!-u~3MVBSSOh z$9Emgnbkz44Qyr*XQog;-mmXuZr%iwQ%pgB7)=K6*Qx~-^gz07W`K;l8=|sYir=W` z^aiGI1PNUp6Fbu`Z(#M0uIS|#3;`DV7g^BkHd}31NaZswny$wok}F<~_Heg|{FSg0 z&*KPN2N)~BAwli{C|L^_i5skV9}@rW@Cgf5ydcRZcBA9{b+CAQ8wdMK4Uhf-r}Q>z zvG5Ir(XTQzU*=wBwAGL!JTy7jl00_bZhJ5s=O;MBW<`-~eprYdTpVwL{^iA*9f%26 zy5?_&3^@vG4uZEVQnU{od4712(KhUQ+Y$gl2HA42T|yMUfLafNQ@qGjLE^iSazQ~# zWFnTH5=sxUDhj7(I)T9#QvKu~COZ2{>D`a>Qm*=s>? zNarRe^|l6sy56!}EC2Xu36Ti;VA9)Yrq-NU{PPwaNM>#C$%s91b$itQ)>xwo3t=xq zfZ0toW#kn|0A`Ao43P(|q%WufkLe#|As>?())pvA2Hmw_l6>a2(#IC^>1P(27&yD` z67;QRA+Kc6sFQ~<+A^5KIlM~gbruznVRS6sw6@J6q=E#a&IO|{r%L8x+X~HvE*Bh+ z5!44hw7JO=hY{?PcCA|U^d%RUX5sj}O@tJkFoAy;CQBP;H+J{U#77)Vf-}DgK)=ux zC+EfQu2<2vS3OsGiBZ*nS7y-wKpYs3u_KuvBRy6=0Xk7)Z$cQI8N6cTf_*M23rN_K zEO$dUl zjmt42MF@iBm3*2A<(z4lDurv8;7d@GuP=}YI?(^2%Cv;J6uiA)4u60nS)%ac=LFM# zFb8MEE%IF=cmbh8+-i29J#do<2i2;~c(~{}rG@BuN5IMo^VgqIa0F5oGz+CD*a?^7 zcDAxnr3n(dQ1S-9T2_bmG%y5U9s5LQiCiA$R3vzP#dJ?}d%1P@?A3?!$^mB(J_d&* zg0aswvx1{8?I$7xf;d0UM})i<&UKc&H~2!;lQslXL~YZ5^3Ol8As2`YFM%81%Q6ci zuD;tQ)UKDm(IpN$nID?r65rX{2}nL!4S9RKQJdw`z172Ra5#}gRwyqs;B!2^t8D)L ztYt?enf*fH6hHcv@J(oR`n|5kWRYEiHWm5aO9UHyy$$*>!Hc{8jNob-j+77r?717t ze%U_NB-V;buXP7#@G7QnX<62~FR2`%c@TXuTz=xa->)N*UwI0m4?oDaW zya{VLY74IE8R(zQirASQxOT3+Xfu6Oa6tatV}vrwRZ*pCQ}M@T=?TA}3bB$cHHRlxVNi5Oe z36gEdDq%~U$u8W)q4)Bc4PD?v+9K!Q2v8S+6Akwg>DmyS+j=LCTA0FO1+dx;VYss1 zMeL!kEe&+ZkzIdf4|ng-8Q5#z|9k2lzMt2(!5@iHn;)0=YPDvbEyDe05bJlB&+UJ7 z#?-02m<^ffEBEg4*veZ>#I*`)4PB`V!F0olizG#eVXhMvNzcP&{GXLya2V4EYB=@| z#Kb*8mFB+Ut2m5@cP*MU0$9nkIO1{%gh<*WYRQaO`^`tvV}@yMmroM$b`fo~R<-Uj zwryf^N5ry@*8=ts7~P1|VNSM999~j(!c8_XDK%@5@vLeeZRM!ITVemlHU}J*n@s zr89#^?ZCUCmXXT(cC^bc>+=VH1@^ym%oA^~y5nsRFOazr^=p8A7e^=|4w(3EecDt! zQJ0=z`Q|0a5?Jo|Gyd&@UR}N0E^(PzE?Cg06ExizE@+1~!IiMB4(gfT}y$VaMVy9&NmieZ%(B*-q zj>;7DeJNm4T!91AyN$?oR3#omXYNPQX4qC#O2@ZxK9q zw!@%vWWjVJ5F&_-PUl>6%(niIezGL#Wu{MKNy?f(cpZB?zVrgsUPRK-Zt4KnQL_EX z8z24>*^H@!dyt9mPle9;x#-4}dlTR~TfT%%Qy`5PYXc!&y6a!VuPWUE?ex8gDFYX6 zk6;8d>Cz)2c@ZUV)gA?QbrcB*k<%t8TzthMVckC+w}n5!NM%>OIXccG zcJ0VQS=!)Hfu?U(Rj8g;SpJv2g5f9DjkAZrDp%}0k_v8^=!+o+rX8iXLJHkYYb85f z7oXP`SeMlPD1n=-_l20Wd*}=Tn;(Xp20Ic1JXPus&l9i*}`T2 zZiqww{crn&GPDm|bG57%ODqLXC3~2Zg!$zm;*i%qAAIVaF;@ zVYWJ&G2spj?`TCzOSO~dj+vGkau@FUlPk|@Sge$F6!MXn zdF_tx#uSWxE9v<)3qbyxFv@>%xf;c&RQJouD3Bmr){QUC{}nWHQ$ym^4h>}WX4tRq zVcllJnUM$+EzsIBJ~iGPZ5I&la5$2t|w5TDNG4bTL)rykId{45k&TM{jrE2WHGW3+c5Gesfc&K&5Ei}cNnqU zIX_`LjN=Tr1a;L+UYGV@%{b9<`GI4MTGEE4K9kI%x=jfbs8({@Py>i zcHRj7;O)>E;?@$?cF(7z%0(sGCGv((aLS98uuev$P({utzd+Me8rww~574vID;L6< zc}1p`XwOxTT$Rbya+mb7On4hH4R)ti(pqYgn{gegMS6>84t?n0XQ+Q2>JVfixTPa; zy=jLV?b;ezLSxl-d^SU0Bo7?ipR1e)8h6`i{W!LGZ{`v@2^x(2io@}I{hm3O&+uN8 z^qlP%>2Z)82hFvLhN&}exLNU;#{~OYx!Rf!vEAy581*?vo$k*;cRBW9c1)o$6S;Mw z%15jwQ{e84W|O*YiVze$E5$Cn|Gu=?R{K4H@TXE1kF6iYeDfxsXyl|VUe7KJP)pvv zEc4BFB@~*$7T>1vh?C8avy$xSj{>~A7+-2CUn<9@3 zv_Pvcd6f$*VIn8P^5Zm@NLRg;s#`K*dnDS-L4xH4xQQcIsedcfp!|iIE!yDIK!lgxk(c^4 zEN3MD>JPykebbBO!MrRe_iQ^jpzcL+OqK(!suCl<*- zI$Q1U$sw(FBrdHtqh>6M>JvykYu&#h*gs&ViL*ytN>WY&8*U7kXD z(%STG;7lxp7zo(iP{|^T3@;Yl%4Sib9LhOLRTnqyYZK_zz#fy+97{>#45o;Tj?|AF zpA1>dn;Xp`%vClu@hHweVsQWK)Wtus6`B+Epu4BiJo{LQ5L=pl`j)Lccp50<8|K=}9&f0zQ{>MVd zBqJa&xEDf%xb;Ved3aAo6_c4yI$3mA^i5qG%)V5$y>_L&yOdmP;P=9$y(dIH zFX#H53mV9NN;w5s`ojF}{{F2w74pq8sjbvB5$bFaR1R;ZNrLDewueH@8``t%B-*#5 z{|<0}x>d}M2mt1c=%F?R)J)wAw-oN}kg_M&mGAi#rIii_||Tu9gwcrQ4FO#d854FSZD`_A@>@Q=m8kGxXw zU@yI=QYvmsXKIJH9rX`C!%b3%VpSn==PSH)wN7V& zsYatjUPvAi(Yd`uUjSg3F^mN~buplJkQKaFEiEnmuD#vwQ%*paxZR$2x}Mz@zS>(~BLF-5T|*6*9}6;7g`p6Ki~k^iI|@8*{4`wY#B zmf2S6_&q~!73L7XBTAULnZ(Ng3uFY#&bw&!;{GW^nbBT>o&qOB8S8rHI4QcW)lLgi zXg)&r1gN_&ZgI%I2?eYnEr-F<$hyKn_2=Vsy$G)$uc1#;5&f8bIRvJFzz#&RiP$Ul zjVmh@*VBN&wkBdIy|S&BS_YW-CAfdlSWPD7DaLMmYw+p?Ju5XWKT!a770hK?WSFTTB=XNwGj4@R=ABR4c5$OLJYBw z33-5R{h>deBm&rTjCw8FG^9t)n-Wcjuk?u5D?|XGnUjGWJ~~cqi&VlABiwzLoSb|j z$pZlZrUH(Q&7OE&^2f{QxUHZlYHTMC^|ss>9tG!<8znH7J8!(9&`fXeuEL|@#6`@` zr1KTJy%Y0;8rjt_w8(Me1ChJxOKU~{kB0c0m$8V<&35yy;^yt4s0pwflu5zj-Z1qg zk3BbUU+-Li=~qB8a=*V#Q1<7;hIY8^Fl~Nlr%_4`P&IajRjpwvb99(sT-B@@eD9s= z!HZZqI6mNO3L!K<$|&Icxf=|`2Go+Sw4wKA!shDJ%AKF0hkn{|V|CngK*&xS8<6HA znyu_uda?e4+GwvJpI{LL(tPv{;0kb3OOP=0oR;?vERYKQjFU_+&Y#a@Cc%du_IBS= z`EYyBkb(|MXG4NO*{4kH%%%H2Mstq2Kj0)^IKzcMpQP($M>t6%t4?w;i$}z>0nrt% zVM;K%a4O8yn%*U%FoOsZw;pb!Q2k~-0LDSeg$k4AS&ICsiPYCYf+Pgcp&q2!?wsj8 zA$8dOXpVu005!1-d;9di7-Gg9rCZi6iL!PMvVSjk)b5RZO!R#xCGjJ2G2+(MQlQS6 zqX5Kmwf4(EN&e3Jq;e_U6w9J*j#y3F+A^<2VSro3Ncj%loey{w19U?L<>EyT7!Y20cq^O=!;0ea zTfQmWFI_KpkA<(OT>w-8J&HTJJ@_8|L-M4e3hI6Uk^acs?}U-0r0HC_cm5G%4pK}H zImg-LKxRpNfMG)yh-W6s$~fanR7jH*;ebo?nLPeOD8xIK(5u){N|$`aufXH-M8Gp78C)U#Qz1M_@(O&(gD<>1enGO0 z^ZsgvMGPs>{~zxmdobHR^1Dd`AL{?#Y59gbiU<|H_!vK*7nbSxgq9kEj=KrkZzdU+ zWWe`DPDH{uNL8iuF=Wn zWw@?uUY}KweG>+JE8=W%au?dq!>4AYMA~v-EzNN8|A+m;gYdw{!)66vtHs1bN1%;g zGW>{Ic3?olP8#-q8onqme6OL~Z}LWRO{Cs;Ih-~~9J{?)M`?*x{%!q`e&L6w6VF{x z3xxi}V3(xIzybJwibaGpk^qrAozn!j7|P3YUrjvDQ>;Y8@(N?eUEZ3h$qRXz*ia;m zt?C^<_MdcWP4-H_aw1bNi)uoe=IkI{-wvESbuako=7EvGrQCd3oWEQmU8{sT=6mO_2QLZFo-rinpOB zbc~_F9PB2F0RW%TKEU<+%9t>4)Se2O=L=tC=lb$o%lXOITQ;!Ite6tl_+ChmKe>$E z1gIDN4j3-e3rf!a#rL3GXupm4g~muc3m7=jCUNn^GFG1?kQ$)+|DRP!AUqVA(o&QJ zR-z7`4-%OGwicn^zS#{VeydNy7RAXt@#PhT1+oF(z{Jf&<>H@=@&t4OTG08>OE--j ziy1Z(fm&kAJr-T?D7icb6Cjl-npBi@1K5A=7EX7a4l6~ALVago_370nqbS%25vVeI zTC2~Gr6SRYT4V-~R=ydy_TNW|WN9D*u&aTEtHw)}oM;tP3++El$`qU16`B(T5e0~h zNVTU*;S2xeE;?`qh6;lwPlNwvlM(+WTDU3*vVqs6K#fq^5%C~Xm_(|eLq~1Epv?Qj z?*;!{Ujg*I;p7rP>Matk;tiB`IVaYehWiC?05ysOSvgT!s*hWyOlgXaDlARLrD;}$ zqD`PV#L|q9?+)}&mc@X^(W~&|YhHS#;bY8O%K&7NLJQ*9gCW6kc}Ia_M8wa4D3%j| zlDX>1bM;Sl1BDsjD7QA|(%Uxr`5yrYu%IrSd>F*U;9F(1hxF=={C|1n8?60@*1?71 zAn)KI6O_8S5*8CcR0Q1XPy*K(mxdl_bQ>^WMeGUC=D$k__Y-JMV+u-Ews2imcE0q* zXWv)CK;y$;=4Z$AORcafNx(xG2y_hR&@2Tk;4e-4_iIvIAv}0ays&YjS#6!s_q{dJ zvcMl}edm>oH56;sC^*CK;Dg)+_JdpOARH~KzX5=R5}TWy+^4~}prIlhsKLF;iS|0A zNO~s^PMp`48bebp2mrEE&|wwLmI_d|f4spw$qWCb#{8qFmlX!S90cD5gb9cJc=**N z+(AYQ{2+{uB7%?GL16d$v!JJBRF$qQDjL!2DxY;hYB9@{R6uQjRa0w62@1n71?%g1C=7eg71TZXP z{sDUej9S7|RD?_%&o6|!!{LK+jkp_AYjL-6QcQH-cUf~aUsC$A8v*M#KXR}p->(sLV|ZMS)cTf1 z;3a-cso^Me?SdoRVL(pF_i(5(f}z!p`<18=)o^aUjo_@qvUEwk9p(809B*B%9k7k+ zBWoGpUxKMhJ?DcKvu5|=Zr4IHpFNfdx%osUM;kz}V@uH9*YYoSFxk_0#zx2^F_>8FWMvkvCI2ifVsOka7p@Vh@3L=q~642Fo{uFKO|!aH9;N@S<}UfmGUaye%VXXb672d-#=S?hLmbx5(zZtOMStT#NsZW~? zIGwi7r%TxibV8*h^T;!&ic=61VJQwz>D22aZZHB(1$yZni3Edqiz|+46S0k|-=7*$ zF$i7SHJW%oJ*xQh=G2VeK1Z{%uy225k*j{W$FXl?19DiFEzQ~@0X79wMQi1Mbos6# zQkP_X;@rmJs<{XQ^#u=;bV)iD{j%Bh&{7@AWVuN^IbUQhaZ!YAuOrP)08nS2$uN67 z+B^+DlfCXv8;W0X_ukSb@bRO?W2b83C-CY1{3M6Zk#q8F%44y&;`u~qp(d>R_09MZ zU%ueiBudx89g>h&_x}w7Jmn?C?&{hI8GlRs!g!oN211o6dxD@tm#!nH=z6{$#X(cN z70iLdXBLFfZ@Lbo!f$xn4~FpsN(5 z^g60PPq&`utiv_*DkMtF>~iTm()RXPBwE=^rA*Znx@#)*WXDJ|WHls-;dEI&Exk#M z3A*1y3$%YY^&z^o00on+CIq_IzJ3uTmfoWBQ~39`73eW*G3z$J`h7gbA$&~e7rXu@ z^Eq%o>4~Qc-gbICy@g9okYu}k!r&H1evB5XQTMgdqB&Z zf*DYoAY&y|%z?*Y!a|6hg(zpqtz)YTywQ6F%7B@iWqV9>C?GoxS;^;B?kGrFJP$k-grHrJRu-JfK3Ul zz51SpiR#isR0JVV__qMqkn|Wu?^#5)040C;GSX`zQrBqn`9Qugw>XP1F_NVN|5S(^VGZ7x7pv zUn?czk_pA+=7&}F@XZ8Yh#^LY5BK3cJ|KJ`XnT4lsMi11XUh4jAjOjVD{CLp?>DgJ zwCz|e@d;jc+-^`lgI%aEL}$1D$7#sVq`kR5?oD-D2Y!TR)SpiZn-s8mon(&kFPbr1 z5}2_hxDStY*?D@;Fqaee7F+vvQTOa#GX72!YYTDWc3YG@%8!Ne3D3_?%#4v%u*X3? z`iLP-B(gMkW_~-Q-6GHLI}N{!_q|eWyl%^oPvIsEB9wm??$5jQ1YX??F1_=$c&5T?u3SXt3av29lf4T(*8ea%V*juD zOR-p%9J^6hs$8|nvIqgRl;`WN)x;d)v*Pl+SuUZvW&*k#6KxR{DlVUOZ7F%QCJk33 z6sVg(A;iV(UB70l#Bx~xV)9FVAe z;oN)ae~gt+2OKSSSbakQAt&|cI_HBaOWi>_wY!pr_M_9wjXRwFODy>EP2E^R1pOK? zC)&|>p#^u4Z+a8U_jO?9LlNGBSuaPFa^V-Ct&woY>)xIrdwsdkb0raj9Qs*NZMN-Q z5k+X}P~Uu`fXqBf0XXN1^;UuZWDWRa=}zyUDxOeX4#_4a?WON<_TweoxQN3{3Uoyu zk>KhsZ^_3dGb`=)z0Z5?hX|l8IXiHr*TO7!;wnhkiPN*r1=3eM+olWZOf^kLoa+iE zGW4B81@XB8pNB?&I4X^xa|#GB&OUN|bk)zhvg$+I(&%Ar>-s{jMd$TVgLG+UixyOUq|X$Hdploc zi7)0Ci|}HyG%B24<lrN! z+JhW549ImBzwx6MIienVjteM~^GlU3Kvk2*D-FhF;+!TXzi-yR{$NrRSYs`|q0VxU zG3vN#T%o*Ym$Vv??4T-pY$MF|@F>R#kMmLZyl`dBNzVyMNJ;6Z%_{NA56>2hyJ)J_ zM+nxB`aZpQnV&boT8d^-`yvdrF@mHfK z>je{Fr@U$ZWJO|eAuHq$Jmi=IZ)1KC{&d>CX}jOxGQ(UNZwH|oDeQ4tui@6MchYYQ zL%l;SJ=N17ddgnT*uYNgu#OeAE^70T6kHp4p_gyQZJaKFFrT}Z?B!K&D}G;>cPF*l z{48nD45GDUx$XpC_06A+XjkIM_3g4Ba9F+`n_~a<#+#Qh@(6kfO?#@co{7ccf-A;d zHF=}M&!?;P%Yt^K2JY178n7(-xn_f(gUrth%kN33Ml<=X{;1X}RqKk)!h7%etg_Ue)q!I%LwIQ=qAk3%IF_h7V~&F9 zBT2fScwF9F8RBjUN6w^|hAiGacS0E@8@5=F3OY~RMyr;CMYKIs7907&G{o8BulcXq zHpT@?mK3;-=I<`QCUN}7LgACh*5h+ghCLx_hTAlKUk?w(Nr5$uRirG^VFg3r%ld_n(Pb=Rf%V4K0@5XH;tUU zc!kMiueOV7@LcqbRc>0`t5w#lA080|w{%BP3a3}BUd-gW7E4acHX8dZv-qdlxfU=| zrr{a9p)-?2!Bxghs7=qWjx-dZX4xOrhI}NHc8mzS2u{jdj@59w9}RSO3s|q6Q)kjP zJAd#deb3JDPxS*#qYU+=ABs_LbRiLR_@T1@W#V~Pu+3S6%QRhSoE&IS7u{&oA3Ji~ z3x-OlUFl*=`GmS#^pQ>}Po5@RZK^J_ovQrKTsPgWB|E|TXifA?X>WU<=fTp;05u_1 zn`;uNap4|Y4ZZ4pO5<8j>6>jCFtc0l$lM&T5(Gh0@gs7%|21sF7 zUq0F)lP~6WVUk{y>6L#ql&4i8NVK35+P%Uuk~Wq+mtz%ReRMKq`DlB8s6gdI+DG%%A;!$o;-yJGPAebL=c7rv-$ ztL(gUJI2~a%soM??~U=dHnsX*^Gpt#bQGp#r+WI|@pxYEz;N~SVp`E%nJGK5{PT{m0 zrfHuZ>TIVWgtQf|h&-~ODl^r*RPfroP0UvG_J?m5<{0!@EzMc^(^-8EDzej8HPN>$Ui4uUjH8%GEQgFg-8kxz(S_Yi zt4UgDyFHwiWZI^>S##c~P4o6`ShM{7`JRdF&qwa~IZ5G;dIXo)9`bb`-k;aBsEf9? zRlhEB5Y={+6J3x@+TQh9eA7nGYc)3-#qo3!kvyLJ*~H)U^?=Ob)qq{C+EL|O1XH(H zW{Wc!aXCxFKOEifG7ib}UO0)l-$I|xzfB#YG_~^FbS=xqrP-_O#KW;YWmy0+9IV|5_pT7=Dh>1#fHdh*R!H+mrgER9#i}1gHs0iT2KV}s;#`2 z15(=@gqUACUgG8Xa8~cuSgwyR@~(3|QtoOB1L?s32?rhb5cF|E?^1)9dx_uvdLpWr zM-ZkUjPScQc5MkoIZ-h}(9$Y-Wt>}wU)ZgC1doD2%`VSFPLD5v%vX!j&=g;@$TMS} z?(vd<;t(r-x9d>!(N?2`w@p{a$8D#fb&JQjQG=6#RO7GCFL$oi%*Sq)l-8ky>m#q= z*Gy}+6JQM4%dWC>MJ<&YucD5EKg3Vma55;XrDT4m;+;1pQ5k!C?k3R|?@$Xb8l80Sg`V^mfBj6PYY=`VBKIZ7$&%q| zet=2L@p$nL4g1W2f@@CAwod)-SFOGs`Vg$-<;(uO49~M3!>X=DQ>`|yX7GFjO8kkkoX%yZb@Av0?2p-N zonJ}Q8WFR0wXOx{bLTv9xppV@?aqGmkDlj>Tz1lWlq<*G{;i>G?Ul{g1J7q-Y}zRO zcDYr}MU>8B!TffSh5E&FZO+6dv7G4`*0g1M(>)e;@oXDVDaBZ<8qvt@70y_p>(3LxtAoYx?849vTc zuG=RcKv-(3uGd0TJ{V5)W$J~hZ1$Bz=vl2+o?dy|x0yGo0n&^eO392tcwDb?LPR#l zlczim7}JPCKt?2m9&#XN!#&|K!Z>!gWcB7{^;MDyO?;(S<>svaN>zIyU5I zt8ZJHI5^ZqN`Z*&_izMb zuDHp!Uyh@aHYEwl+%Q+T^#8H-)=^P*>;JF<(%szx(gM;A(kUsev~=fyw9+6U4bt5m zBOOC`Hw@jKzuU9c`y8L|dH-Yy&fIbBeeF+OcCnBwO8w4fA6Wg<5I5gn^!@7oziY7F z&&v{}M*_)^l6%LJRFn1}BSq3y2&|X1g_)A0mt+Y{s7}&!uT7vw zv$$ztPi|cW&Yj=c8LRpL9^#*B1IU=ZpY?Xnoix$mGQe?C9quaFCZf)I*?XL%iyN_M zWil(1C*s{qS_Uz^-mwtH-Dl>RsOODDHkOG;T3QIdwr%)1L9tT^5Dpo1{<|1Uz4!8d zH>wHqCpSzoLL)4dLM-Nmn$y5JV_I{ic$CQa?V~vu^f4|!uM+%O<9*bxyP`a3eq0D-bgYCdNuMV`)@UY zzmYNBFIKbZ`CeHEx>?tnI5V$2uyIWA2N1tz5s9$b3HKPgBlvINy^a z@@5i%JT(1(kF7rZfK{Ognv`LKHTnWq^J&ym6B3^>?be#Lu_M@5&r;>1N^py@$%}YD z(A!{7&CJS5mM-ADg7`-*#sHPm3mD}`97^2twk;jI*I?{D2OrIiHg`=Y$Fczk9vOPsC{>^gnMCEk!Ee5GOXH>jT}26OjemFxc|d(CQB$gYsNQ-HFiErsohmCv-w)(xYL;HojFx{U*!N zn*T$FS)}uJHO85!LmT(eus&?x#Y{98fMFiK#-#3XoQXx&4Z(oilh>Kox8Fj2 z1U`z4-d`3I0IvRdxJC*{vjsc#z_hY8l&7D|jFbMNEPg>j4=iHfq*R|V<%j^=1HR*Z zF3p7|Zp$=-302Jk=KQbzsjuXlx{G%tqn91YyLc~r1@-nNfXXzPWJA6G^u%jH9zpH? z#B+%n9hrXa=)Cvm-{_*gl78KVAZHW4R0>p8dH+Xl_0J9@B`RuoVcO-jIVLSlNfra# zngT>pVb7dJ#;%@jSE=7$8uxNtCN9|Tnldc*m^DST4Y)$T;EB60OCm`0ot#TcA={XC zxk}}|7lZ&*b5`T7M|@t&I!)5&C;m30i*7iZit3}(e4^xh z;V4HU!hw8ME;wBc1*OE56q9C%vVs~el59u|xyWf~i!L3>1-AXW${&H-U$}fYmfr>B zYa6Lxi?6TiW?#(PaZC*W!EuG-(z-ra(jvxre6?~u?=WxRLaMhiQg$nIL2#C8`%u6Np$mo z|KEO(OewGr-e0y$g5YKyEQr4uY^R|G?7jtaQ4PLg6YcGV-g{s2w&m~jveh{X~ zk#$kx$t6dDU(mFR-Cq)Ajsg1}bxthhU{7#E(dwW}_ZSg*= z&3H;4+PH8_w7TEK&$|DP^?p(}%(Dk*Qx1r~4f~60FSZrfQ+AzWMj}A{=dmc+H{j&* zby6|9o5&wqSsN_M5}a%hH4nGFn;*oQtIE8tSWy}MUH{ZI_KDuq;-$Iz60ivTA7&5O z+_=SeAYxzGZ;pb@U-SB3G5r>}Vaz1D!7RRowS1B(>&HzW(P#Q`18e`~Sl+gCLB z5B-KG@zXEAt+XAYlOp+xef>>{6%CtZYwP*ssRZ1)^9iaPkD^3_{7ZPn#Iwyo#B#-b z9odf}*+^UlxFf(c631*ir`3|bCN47dWu-3Xst8kq-ImS~BLl^<#?gV- znG&Jnh@B@YXqHOIsoz7fw>Sm=+rMg4H1i|O-REPrYnL7v5*9Yk!B=y^X!|LLq?Q1@ zGy+G?cVcnXioOu;LC$ogr+HAr???u{*DZyo|}cs`9c+Lv9e+QuFt*fzOHRDy>Tun z5;VPW{&I!jB)p-}CxBGuie7XDXuNxFr7H4_Zo+^3hRcCWsWN-uqr) zZq7U{DGf&$4rod|kQSd;CFbtUrcB54v6XsVGNmt8mfHr!ejI=h zP{qxd`mn1fSfWhI1!6jD!n}mR?2B+qrcahiFaET?Lp_|qQI4VQaxQT7HgM2(N!O<~ zoeT+@TKh(~-nC6<5D-&TD8S*PlSM3q=Q3d~x zB(>HN@+L};!c76y4~a7K7E_C&%Y)b~>;b{)wxn=)ODwpW^js0-JgyNLc)V0AvQXy& zs(Y+K3qTFv2C57b?n>C3*UNsSy$-u+Xsr(Io&mt<5t#oL)`}YCBHlniY(0S?*T*Bk zRSgXDg1lVKZZQ9Yw2$t=@*O$%dDLg}rTXX@ddxHSIW1NK5pRbREn*WaBiM5S*)&vc z)-q;OHZdd&XUlTN?`uC83@RfI;=Z(b))@Z&c{7=~cvBtm+-o+|V}-*I$8XB#$1lIw z!@J4~p?h>wKtM7wJlhs?7=ao@2tz|liLea95F~!m_btgc0LCX@*nLljn^N!it^ymx z{I9)*#?L4Gm8XD?Zm}R{cq&1vn{A!P#G4IXT4(>?lGvkj2yRPGcG{QKqt#E`!?~3c zPvdN#hSl`m!8DXWl?2pFt_t_Zld{r6O1ams{g_-AYl_n=~)=@QNJQRITjWPE2bu6qeCbC&sz=xPE=fL>P{ zMh8@3z9KFdWug61v8MO?DBOFKzxG5?qd>TaEuU!CO@*6bn|kvFr3K;41F&Q zL-byPbfrks=T@^8`RwfE2O!RVy~>w$i`xSKNjE#!W4(%QRYo(dWsmdM_cqU%tom$< z#p-CGmS=m{X(EzpwEdZj@o+$NC5i#(eANz%!P2E6BBah%+yzzfnDU097e7~s`CIwq zCxtXCLkZiFX^44su#~t2 zb`Br$C>X=`TWl96sTJOHdGqz|!lcJ@7UfXQi_eII>@RO;*GUcW^c^C8L?7(Q`Q@b} z_-tFe`ye7V9Jq8%+E-_-L-J%(>Bkaz=O?X?sE*j@aWc9rA!MhoZb{B2l(;_-JOmtF zn8oJ*)hqfV&L&m7v9_Z%@3>l3rv#qEC0w#Sc}}e9iF}05T~)bNujP_zzPv;L8O|9B zoc^}vkZEs+?-|zSU5}(aE3Q)Xdk_2*%J91pFq1<(&t%L`7R;kO96Q{zx zoE?p=M}$a|yS}6f({mk7x(nn;PfW!Bd zzywV~hzIwPm?tAIxw;`RvU>v}aPB|gacT7IZaftr+EBndP zvmYehDQ(OoA+0n!xs^EX73x;m=+>L*N+^8&U4mudIVDCWA^>CEa!mm0PKORVc_lXL z-Bz|Tp<+Rm{2oNN7zWmF9NZWa|ORsSCc9uCR-b#N<9=gU(c5bLrgdr?9hG2MB{aoLXtF zfllC^wTtsWPi^P792PjbSW(~ui!d*&PaYH}raU|KfY$5(ScEE2;p8^-cOZtCQgfx6 z^ao?~q+Q_>pW=S^qHlH!!ZF}YSlZ$>^rHOtdPWc1=SW>zvC zUkeE1z+;U{q)20s4_$n$_M}Qp!mJZmKL{i*P&JK3>T|GP9m_s#iKf#cE5XG90Hyco zK7VyQX6e5Z!4F#pI2kvRu3Pb1W&&WvF3=iYLos_&zEl?wmr z=InB$WO!JkXyGBIGraGq@6nzfeNV(B@$nNq%bD?I(u2ce2!b26Nx9cn7cZ9C*)}@6 z#ZZ_x>sG`&!;Qc(g+`kkuUIj?Zo zo#mvtekvjeCY%57wk(;h!fvJQz+=SAKshpS>&bt`vIN1TfCaTiCms+Ox<@=sk+GKWa`@^gyvFaH7*ejH5fI&br;=Tli2J5|;Ag z7SwKP-ix2GrF|w)DmjXRjO<9w;Os~%lPBy$*98xGpmAX+u(I*Ual)N0Pl-|rGSv(@ zq8v^5s!2kMssu|xxm>r-L2#3_(Xe~u@IPjbotUqh6n!3i6S(j7-dtl@zdQDk_X z@XwEzTk5lzq)3Hoeuq(>RI+Kda2L&V`RyH95~nJgLDA&W`C=}l>AmR?j@jB&k!Y@p z7?qEFFo+d>O&ZpxJH(vxcachJbc0bp&h7yn z4cPGRpjt;KZv+#sW?xz$EF*V{-4XGdt7?y}ulh7r8^Kgo=Opw8+)%G-0^+75_>_n(bu(sbRcj0hwXd*@ z^oV34=h6<|{AiXM5TpRr*jjKHEvCtu_em`?t+{L;9G6!La5QFD)yD%v^8_S%N4B?ym(>wDrPh)3VZQH{8Raez8YF5O>tL}2M8Q;y3 zs^&J3E?qXb(-NARq<>~SGQ#RHwlxR*6c0KBQ4F{FY=|`SdUf8fkex%qsi}y3pHG#| zczpMQ2bGX!=1ynBsvriZ>y+oW!^8|vcPo!-SN&EZi{n@QB4^~XmhnRdQ&|*;3d?* zUqQMg{*@ypDW4~owd9KB4d}TSC@Vixz2*|wbq?|)g?9n7{ZOEJD(Y9B- zlvR-%Gi+yXzhw2~%<*TG-X`bSM}JFcE7oZUdL3Bjm2b# zIcWI#B5X?j+|Go4-F9Fb2@vB0^yqP~2rq_fd2wO}`oxiP~bNUWLkou6^cye3cmdenJ(GWG?g+ZCIECBHo< z<0Ct7(lE=V_X+ql{|sphE`7)9fd$e(e1ci9=n}z9643)8s`E-4Xb#2T#Q&f=b z^@Z7;a__0im}hr(7y%peuaG79e9`#roig8RH(wrN=%rx%SXuu`ny>8#9rKURBN-2> zu6If1rX!6HE1Tt?+{%{gbq4g&E%^#|3Fo-x9i z?^l?S`Lx(mx;5Ae+$#{cgx5N*SASn7>5xO>qcxlM!?XZxK7t7}DK;0rS=jbQP18%R zbc}xqj5M-B28JCAKo(bS(?+Bkm&>{y3&fLINt|U1hKADwss)Z3Xr>>ARvv~dydUBX zZ{vG16!q1U3O*B6;NkOIl%5l8np9L%^|a5iQ>0?ZK{HyE5Z0{}_jXYAC@t`1 zdT)8)wb$4Q* z8v3CY-yruJsg(%PJ>F<6w$`I**Tn()kJk>LJe2JbbHsEBT~KriPl75gRXoeYYKQou zTG^Z5S^MPxv2@4!x6=nJPs3oyu*c$i77aF(1=35LwkNme$tp(}1aYMiXz)d6)k+h% z8da&o4Sc^e#|KUQ@wln{WC8}JOHgIjZ{B;T87LtL5YkGaBfi>V>f2=($tZV~-ES;u zdIr@h;f`-$l)os@Ok7)`Ds_5j&>Y@EcZm8}{3^~|+Hbtfn5Zk8CKEg~wep?LtMmIP zt>Pb^Xe_uiC4`Iky6vw)L>~8NB=z}gvw@(L9R*5$V9-~ZCLisTjCf|k?*c=W-``cj z(*E)z_$@ZTg~U^yY_a+Bc`cu(Ct8xw1f_ji@Y^p33k%#IR9C|ypqB*7T>xc=Dl4iK zIF!QE>x5M^6%Rg`;X{ffL-7(L&{caQOgx;Yl75c2a+P%)PuzN%EDv!mM-tgkzCjJf z;di6r#T+;%h%~2o0#YCyC|ma4X_=Sb%oYE#bq&0jr?{NQiFV4Ip)?2b?#`@)@7fL| z?4B;qPE4i}X;!zDA8j5iyt2c5HYlHN4$Mc571b0Xgp z?5daoOfv!Yoe11xbT+F2Lw3IXBQ&9o({pZ%psPMC`+$Kbamxznz)kq6KjT5RF)e0( zUm;Knd^hz#BEb&uETSxcjJsC5?_U!B0n7p?O7CsXjQ(Mv>&1hUwN4NTvZfjC1@cS8 zNvw)B1Jm1ur(4Ds`t^V*-sbKduAHrYv_gL@EDf#WxtU7$wh|dqv5n3XuQ#dBb`g)6 zPEZA5DzWOd1)x1Ub+l{d^mwwGpE z4@ErsIBAIIPHRR>JEW+(qb1|%H=hKO7sD{2I$&m7k-I30yI8!(0{E39Aa(F$-$m)C@6zPA zmm1!E$#X&j%A6Iq?(*d7aGc^LwwwE zVc%q0hK2~K!i9LShb!=5{yC;+Pj03idz08H+gEXu5jw`3Sq#L%kl8q0c zqf83f7nc;#_l9C6#V|0V+@n*sDg*SGQq^tT8-PLO_N8EyKMqg!LUHxa!3IC%_a((` z^}*grk1tSR(oZ#N4tX}gPkZwii7u{!R~-jc_M?vP^sYh^P27B;R}Hx_1D*~FdbrL% z&ewh%>&8C!k0##n(vzYJ6;loGvIqTI8!m=()^{hs?XKGK*iZYw4>qQFuQnnCxp3j> zUm`_}ohUA`1^w}`m7w}ve0+T^uXla#Fp06-qc!J> z7s5$78u44+@NV21Xw@p`|MV*(z?z!0u9P)gFk2}6=daxXG5C(Oacg)~eDt(~s(w1e zGh!-ZQ+pc0_gN%P(eU{k9A}BwfmF(^lBV)Yxh$3s0%Cq$xw~eAM3SyAP@z zuM4V1QWAicL@r>+ZW93UAlseEEXNxkTNPX`&g#Lrbzs*dwzh4Twal%B>gIrK2{Dkx z`wo)j=LJ=;>tUXdVHg}&{y2DIIh1{fBq6|5W8G@RG zNZ`qkn6ThqOAGwHx}eHtQXu$)S25;Z4*`!3k1pD$T^W3$1#K80EixUZpmSY`Jq5RI zLw^;_JU;-qEciAtAv{wos6cv6!9GRM?&q?l6%79Y?-F(2?UXipU~zWL)8H3XNUbJs zfdN}FN=@3ha>&!x&mH;#46>RQ7E5z!e=Wfa8ati61WrvP4_xt+)b9kaPZeV9!_Ac0 zGXVQ$aVD^GJIvflu7Z~g^fbnDlpxldk7G4OCI1u2GfLyf2HKUMlNPxfI~ZUp9|X=4 zbG7kpxH*Q{P28CgzB`R9v0U%wo1D|Ue&_QCP3hbyg2d)4lO zBFCb1Dsy6`^0oxE!$D42--q)hrM}dxxVrjEdDvm)u!N2CJeY2-`~gwq4V*plIB{|7 z(m-Zm0Y;s9iu725rS`nuYic|uhWGa=RSO*+C@B3zhyopAeacU3<|ZPf==ug3sgP)7 zx!&x~AI0q;?}w#QCekt#V|Q8YLUwtv6rb)!68KUN?8KwNZuL}=^yUB#hkHj>I`keT zj`GIsuvS9MMN@4$U0{n`0&O@ zPB(fvL2>yW$5CgIhYzkFr8YP7JA+?%vfBcOR?AjZat!D4TI=H(^nwpWpq)8E|{m#GL zSKD|QNO0QVl#uGUplReLMPCR>XL;i3$#~qlUZ1@Iymbobep)RT{s(btO8wL2h8d*> zt)4qjrKP^P;etqHkH4>$kOn5VGV0p5XQwO^8JMTId_ONG+HKi19|kG9ebj4K zt{>}1P_;b`%~~$e{C272d025N_zD6*FP~IgfQvgNa-xz|CNjp(?$~`OWkf|ip66AI)7&%=)p2ys2<3n`Qm|4 zDMTQfw^cN@Smf>XI(H)!?y&d6CXl30WlN*BZ>+C8WC!ohrb7qo*?Xsm*6z23jMZb%e{{(#IJwwL~pT=G6oh% zG@iZL#J6KCjsxU0%vhu0C#ezWJo;YfLC$LDwFlYjCVJyJNpbg!AP zm;uVJ6u8xt@~=EcKPtqVuyzJ8DQXtEBCuAFBf38&z(3?fK;q)_K&34gKe)&kEI&G8 z&hn@*^D>}iG(spJm`Tx!HRlxgRT8=UfzdY%+rjXf!xwvKme9hYb+vthupIMze2D8_WDCWHX4o)S2p091Y$+3FU|#zxa&2*Wx&$O z*`jXSNFu*R!*Cf+7C5*k2ggVLr$bI9=}}QaQN-}V)tePP@Hn2OE}j8Qf3_av7ZcG< zz$&f{aIyPO3x^Gs-TxcxWbyNB8|uzPU{>a*DNMkHd?A+krQm!~>3xpNINx#2dLAm= z)RXD`Bo*Y&cQ;85B*15w6uqZF3{$NjY95-3+aY+FDpY9wEgWFEilo#=;?(IZfu`sd z@y27r9apHFJV@nVYRnffLs*1c1peDKlGs3uXT(>uuk!Got$oZBAgc>kRg`WgEKvv^7Q!XjzG1Jjqe-7I*XBDh1jGG5|qs{~&F@ z2{BIy9^R_F!pr;i+lj5UhIDA@dMOd`A1@48VGm(PkBwwF3oj6AM`-j0f30b&`LMJv zQs1mzVMy;kpIAkZND9w?{u><@$qg)@r`kwZgPnDd3xhZX&*v-0VMo-Phr zoa#-Q0i8BzBL(AX)SpoiYjnlBm6TmXWF@W-pN|(y$KD=TLH9ouq=xv@tQ9}sl{1~uJxyfXO)5}?h&)2TgXA6;s)pFg zST9EmyYJi$@)wXBm6j^>w;RfoL_DU&Jagc?xX6YwWgAzTBGE&(t#cj=6!j6q0VBuO ziD4>4EblELtDWY$M6ESstnylBbyZu}&NJ~a}D>J`U&O$J)_9z10EJ0-Ne}?9ktUlX6#GW2&??{r;YUZxLx?)RfJ8`8~_^ z<~+uOlWtN726SPgv7^AnNu-MpNaK=vmk}ZY3KICQ^XpG-g{OW66Na~Bzd2mBpDmN| zdeRb!Qcr!A!ecY=G~|@dnTty}-GI4siP8pbHQp_++r1n$j)l##M+{UaD#_L&-{Buo z1M!g$6wy+$SHrERB05nG|Dd*Y_MtNoLSQkhoRF>ci&g9=DmZ^hl2&glf7JnD3(auZ zm#Rj1_*xWEG6#!6K4^RA-6j;RX|i~Ov4Ubw9X|bibi~N^A>`O>Tvbddec!gBHDh~{ zEcWY&4AGRg+3SvIo^RWKZ@Aj%FpZ~5Y^cVKB!TsHOn3YH<>fX_=MU%A@)=JT3g~dJ zO6A-a-xHMfO-t1x$MWI9afQ6|Ep}>HxGaD#_bFA|!>pJPecT>N8;FG^k_)TL9!G1$ z{K){*kNe$;PS4AB%c&q~EN28c*OXn|qAJRa%SK7gll*J$7ZeH*=~?UbjM?)gPCc(x zk#X8v_A)4HUse_;F(Q+M(WGw*5HTV_KVPW2}|mDT5NUK+qS-IzJI#EUrv^FOg{^xmrQakk;x-{%j3Jn1cETd zr=13(ebw0bh4$6*fM05x17iQg?rLu?zjeVjGOcaVT=X9JV(Sd{%o?2upQUI3=b=IL`+UXzbp4okj`QIn!Ba z_zSxzg}mwNi2ftu2J$2kUHShud;s&JKER46AL+xqYsMPombRQ#FOicuPi^BRoOQuG zYcjI7Lt4W$FZ~>?YB&s+R!WJ`x#4=qSPY^fTr-Wda3ZwXt^ewm`kZyrao%Oc^#LdBWY_P-%4~=`Bjk zTPj=Oy?WDWJO(zYj84+b*e)1;E>W zs0n9P%D7#lZ7I6uv?d?h{r6n3pR(899E%cM>3;$>a^>c~Ku^{-#^z3dRWp7!dYwe% ze(7LZ;40o_JTMiznN^UbZzgix3^mU4@5mcA@@U-WTx4#zZ)2p9q;WytE?6d4P;b?9Nh0_)Lk;NW!;PGS~~$^s2S}d7YqzvYEaF&Vs+MoodeD^{8yyo`q>Z zD*6)Ya}?5W)a2TjQd#ecqCxXjb+X~(=W@1(Q=<)-Z;<%osL&#(l^I3*a8NKb?7nia zfjDcFW;^IYHj8dQhu=ihC>U|_7L%*jdXWV8eQLh!#)YT$9Dd4x^KwHkhaMyoY%1SY zJ4TdmXQ9eRoF72bE@qC@_u@IHmyM{=CftpzTbH{$0?J$f(jR3+Bq}pCNgT9A5Xh(m zNoc=2Z5T&P8&ELa^$sxTC`-$aH(qY_@YSUk7umQeettSsh)m+@vp47!9ZuppXtVP5 zcq8GZlb#SC9~x)^J~>0NYAgtyl}V_Rxhv1OnZbmz8L4kIq@RNwAh4&)le_6>N3?2H zn--qiub~O(BoH^|+dHxyy537^GAr~bPjWzTb*V2Y{NHZo1spMTG%-F2_V=IuUzaPC z1L|qy&eeP2ouO6>l&!nvr9PYPv-q0S7A{}Iub&!h-%Zy^>%7d0N0`6rTcfaR;}h_{ zC}7JzIHRUb_u9>6L5JHnN~;wYAO7mBFs{qrq{KL4Szc1n()6~mY;#GCRQ*tJeHn3f zC!g(JEgmJi zW{en{q~c)WBzwc8mQPP;d5Gy)V0z~syUhI~LP9E{k6KF_DIccE!!fRjyYHN`U}fni z0h{8d3(dAvLg52m7A>8}))Nncx_6SzZ6>WZzCH)C!QP42BoK!zheU3Vgnxs z*Clh?FuNHoh_nOCMl^qdv}e6tMQa&9n(6&Dl2$vH5|UFXqX> zHYFA!?dC{b&^rH6uk738sxaa)U0u~nKa9WoVF{jb%FkKI&(=wM%LTr*Mql3pWT)?F zdda+OFlf4+9_Z+mFsi4aPVdIFlg|-Hf$pM6it+Xb1?JDUFv&dWDW8%z#ZFUGk#Ehl zX?MNd3=0QcN5TP-fpglTtxZpso?U`~#eIqrl?$bX0I{9?kGevB%nSU7#HX$03jGn9 z%4AY_9i+@dH&PQ+t(Ez81~iA)qTPO&_3gLY5)StiHl;cI#d->?YH{5pcR{gVk2#8LBF$0a^vggfGX4wZPcxV? zLlyq%O}k<@818OSfK1QX{j}v}i@{{hw_sIFLJ~e@=OU2KuWzz5Q{>mw6q`sqr6CP( zMifI9M^9A0TuEQT-cWF0SWIeClVJRNEh#Y&oY+10Y5-Ed)%d!nEL(el%5nwNZ$VwK z;!HZf3~d@&T&C99Pyy?_UgJbHLG=YT0)pY>zBD-EDDg#ni{20CwhKHgQzgn8y@mG< z6GLvb>Y6KtcYCev4w!WrjKkXBA_F0;y!|^X;}s1BKML>j&S{(@gO9^{=z10J?t$?a zTg%SpODwulw{D`2EvB)bcN3R=>kYrp0)p53p6{bvs(*|n|2!xoI5@}bl~^o0IDLiI zzM{@dexuf0-=zp1&FYQIVH6$k;>v8msh-rsd_a_y*G_t2HRdLq{mB8{81-9Kl!i@w+a`KDeXPq*Rd&wUYfN9pg!@_ zQ@T(#D4dqoA4A$|9@}oX!DYD<9#Csm49n?-gWWGduIO;J8YgRg8n9jl^Y_X`RmG3X zH;n=d$PK4XD=2o88Q5s6MYOPY3=wM@(h;QC8VLUJ@nQzU7`x`3ZQ+-9v+Zk5mW#4^ zJ_t2Ml&v%T-U2SvCAte=811eFGBwaGRhk`ukrHPf6LNH9Lcodr?;FpIc!S~>{_4ls zYhUKymxY1LCB9n#se7fhX7d0ftN3xhX2Lg7I5s7rn31oi*i*|**we)(Q_ti?N=8MP zx6X>Y8td7ye) z!_Pi-V%=e&DOlGtEYIn~N7#v`!~97N)xkzE6X1-d1?qD{CnFss$^um zrS(r_T?IdI)}|M(8ot;t(@S`+9Aw&W0ghq8DGlG{?fy$z=?n*CAja8NhX?Tc;Jp8r zu?sL+O2Lwl&XnsWlJPIzLe3D?e_F(F+|AgC)A58b5Gm2NfQyF!dyu$N{YgfueKJo#;NHV@sxlNj z5xbOg3b<-Hzdb>wq~Oo?TsaT#%830YJaza~&^)c$GNRMX?Ux(#>$nbG4FZ^o(@BW~ zf;sEzZO3MRO?OugR4>B*m*osba=*s`wTsw55)RT4$SoQ|b_uka50%{SpR(+w zehXc%WIRrW8Q@;uCvvVeCK@C^US7^%pye*z!bqS<{g%W6gsZ-mGQh#3ZJZk>r$;cK zOltwvvDC+8GhED7iAcmn|E*_{K?z9_uMk0aUTqYhqGCjf-BvzUV;pSsc+JlspTSc!Z0^IG}#Df!(aI)!#3QG>91PBz`R3*fC?TZ-d}1U}Zk>^SKn=S#$?3U*a!-YE0o9cX_8`?e?9Pe9$S-$n9+Qu+j3Al}}?04oqU$WO$JwO0`R zuRi)Mjghh6=T^KZoUA#LoccSk%Yw}o5&XQrOfl|*Ya1!1uv)tV8*xs^+v^YD41VF& zh79gWeIl4$Iebf_d?NuH!4>@@KyFAuSJ%$2jehC7OR3_sShnMC`{10h++m?HduF1c z+WYA1^h~CNEn$?0XwmJr-6xyXSIi<%=mBO1b%>nSi0CM{Lf|PqrTGY-c`6p#IX66G-OEuL`=oM#?n+QTvQR?p&R7!zl{;Pn zDay3ssU9y0RSKkDeXo+FNX*C+3HJ$Bk>d51x(Y*@^J!5M#japY^@x-9DLWO2JPp*7 zTww}1v2()69&xNMO((Gj4ph}XTCZ~K9*JSzyxsZv{CeCtWrze|g<$G4AW0%W{ zf&uvPa@xwSwkQN$3xoZL_O*+s`j+z-9_yqLq0z^MS|_Wz{a+IUQ-}#H^Z^$WdqMHY z=0SY>=A!(GDbon_;uDfF|9a|Jn4^iXtu_VP8RwlTXDf~8!&nzw&D4jq1vHefKAxgy ze_E8QD4rEPt=d5^0GH@?!XF5y!f`a|bJ)Z~2K&bWM|Yn(&R#7QM~M7F!tu>CVAc~S5sRDB8@ zpBBH{2@-e?Bk0aRxDD_%HQ-u1Uyg;*<7W%zstHM0E;7F1f87z~PR1Yiz&rd4obYy* znuNnN&V^*E(4fACiFKgz@`K23ah%3xBU|s(nfqbW_MCi1Zl++&fbxW8?N7r+nyq{V zy>eaMsdFJseJ96~`>YyqjW$!JGA1p9+CdvVvn0#5fui=ULP2QlB-(*c`p;vpvFykM zHX?_hi1m}PqBh}60y2s<*! zxREuK)Br``EA=OQFBD35!Q?f2w?7kd3@0$%4fTVbR&XrT0di3%d7u>N!KNK2a5XM5;ZK$_7%B;;`tTkkCmGn|7>7VWrKsEjZ!1d)e&a=~ zy!V_|iSU;ZJH!ud_!py8qsZeQcU^~3$+{j&BoX+igb7m71Gi`@9w*f{2B?NUg1AD2 zGHhPNaSu4jpfv)v!%{XcOPqE`d9q(rZ!ZYyI2{=x;y8>3z7el)w0lry?-gl;CAku3 zh{OoIcD|_+FTnOEMGigB#MX!WWQHv}C`V`6h_qQ`uM3@u!^sU19>4QQ^f16oL+E#f zg%)FonlBQf1eM>O8 zBr=uK-+a;MB?Gm1XvNraxK#X>3!cq!i8I%CbX_~@7Q7r+=)2dBjW=K_^_ghNO>WJd z)TD@jWQ4J5r-3#W)@<&GkT3)$G8+Q8ZZe>|Iu{J7U^71o3Ir z>}9LD&#Rr?l{o>cQ7Wk{GlE|l?4Brpelf+S%1sZC3u|wTyRCnGbhQKm2b){N$-|Tb zpo%BVKA`yDAz6TBCeaHdt6goT7|ym>NpmkX+xy&CdAg)k*S;1d8Haj0U$--!FVbo{ z^>tYK!~=0pdw23{dCMPQdM>hhWZ3p;K&uKih*SD((@2Kw|JC-MaZPR8+bAFkNEf6- z6p*46K?S6Rq5{%IiqaHBq=ce`p3qb_T|q#kOOqN}LMJFyRC-N-AVnY{bP`A?H+as! zY|p;$d%s-1E%>q4SaUq%8P6DVt~pss(31BB&m>I4uc&ZloK8tYl)re|v+@R;Lwa~) zGs8>Chp)u(?l)aU<41Vx=w$r0{_hr%RsjB`siX?9LO4^XxkygMdH1Q(LR+CL%=_99 ztH@{m%v~e%odNVirU9CR_fn@OJ^Q)Ns~+Ff5DODagz^jI({?i!1)0W$L&|b!}DGOfpIijNB6(VzvIx_Gkr8>POraM zJ-1YzoLc0Nyrm-}0?37FnNFUDn&dl-G>T*jvmAecN76*o^ z+`t;9(}7D{QbGqL%2XT7#3aB+YGLlVt>`_P zXe;D#YkZpa!A5XG;4AfD!6oNYhgMk&=$`?754I;EtQBXkj6Y0ck&CVJxGYkIsHh6F zrd!4uZl|k|$r-kBaW>KKu-lH|r`97GqW2d$tl>tT3$h=7^<%8VQxE zxaKa^{W^qIRG(PpVSX2k`}CMBn17g71Uq{?V{O}-e}NZj#3T%}=Kqh)uPJ8<&B~sZ zztbC))87bwoG`ZC_swIvgJ6=&Wb?wz-+S0cy?1{by$ zdf7!-8q_CR7+SsF{!2$kgN63Tk2u_T>NXs!`{rj~mx{|=?XSl0xmu$S@#p#WeyUGp zuhz=<_L{QA)qU1x+kLBf<9;H`L+{GV^(@V#N9I>+T^U6(lXA-AL^UrO%a-4gz<9{U zW+g6Gb)EScxuunG5!n9~X4}}&akKd7WJ3;&llQxZ4^b=uukU-2ZGh87HjO*V>|HQI zMN;Yl=pCZZB|t`8w&weQRu3mU7HxxFsF&?5KETRg1JYd2N2l&uZ~GLOjLPCwU{(1+ z51-nKQ*c3Nw@9x;m{^o_LjXEE7zBj|tK2P3<{9NQl zD6u_xk`kuEEU!+%UCL3)x2xrfQ=Fo3chbQSmONHbixI$xVnk7bV)hK`sic!QipGYDW@L1)9dz1 z{K)Jy6-aKq2aMtr$xORQ_gBQ8xuw@m(Ce;a%uK`X!xY0 z>czWX7)2!lG|#dI#O=|(k)=8Zpt>C{%jSRQtd_0>^(A_dw~@4eJ$V~Bt8q@oR3QCP z9lmv9+I$pTA|LXi9HTz}{b5ix`4Ct-Cgd28&e%rEIRCgwc!;*@F6dWnc?el6Ux~R5 zeL#(c|E%|}+rJc5$(*?yuE9o6qj`azHtYf$ZFt)y%}_n+q#!Z+VD#Z6LTTUBx)yrW zuw;=PwEbh$9=cmvny_{2OB9hS<_LKhgscmfNyZ$_aa$hOO7Cq4(ZVaA*>kVyT|V%( zJ|HG*kNPg?F;%v$yG7qQv|8%UTA?%M+YsqbF$C7eszpV<+tv)F{X#^%*m|`Tkg$CX zGOu(sN<-F$X`#8|pin65msUQAr^bvlLb}Hd48mCDFWL{Qx#9|%yXeWS-=oVm>)ygU zb9C&H6hO1xD|m0jm?MUzSD%w8$&LBKY}{` zuo7!gGQb<;Ha;Iyyw?R>`5Ll1A|~f*WHuFTYPs{8=LTrdBjmhUUatVrecD@e=6+?h zfqa(5&+`@z5r{4}oU)PoJ7a0TD9yh@bGWNQA8z2uRG(`j_^tssYS{B|pitIBPhZg% zU5N|M^Xsy&UirF<1)XCUo(P0I^J58dxF;uUC?deqiF zntI%Aq@S2Ax;46SiD9D&d7N4ra?}rfH2s*Xda`x%2W#VM0XG^$daWx$N2=vTksea*o@faLeiHwC30%M|PSvHhiHi0el@fTSW8ysQXa zGDk^wVi_^V_0!G<_EK{~5pJC0XJD+(gQ=87p$}@`y#H_P{TqlGXK4(W4pJ_}B(Yi# zJ@WIblbwL9C1u+jW+SYEVbZ!q0?gEQ;vI^+0m7k{NA6do_KrQQ-z=pcAJjjGT&4yl z(jCxm0}vRY3+&z#9aMU>t06fpy?;H?Fw~XWEfPQXusUKd+a313_$w{G>f&2!!V3u!Es}mSGt)3l z9?QP{4EO0!=UtNc^KmLFwGzEsn)j`BtnRgplD>y~hG27~H3;g87*a`UYsK9It^@^z zl!zTAp-0-;oghaQbGfcOqqJ^hJp4%=NXqQpjJ?z9JI@AFLXB~+LDZ>{XGN!P?p0(bofW`j@K5JI2xxX3Kg_Rvan=d@&F%uJ$-|9O zqxFe~8!K-S*VY)DN&yE)ba*1W$Mj#fXc&JXcU6vS)Oa^0A&i~rT!aToN9CCP?JQ0! z6t&cQGgsj-<(P$et(%PF!Rgbd!K?As3UHiTd{@2H0PY%=XFlY(#kPGokMB9LjR7+$W|zN8(|dOlkTW0x;%r87-rIo@PF z%1IyXXuk(k=p-fEAC4N9+}=0ss^OXCV+0b0^h*^n{(0g|20|J)Eg!sd42v)4>T}_u z9gi8k{eYY0ioJnRoKpjM`c7%31PaM)bc{kCMW?1*o%m^f;}EiAHsl!T=}bKR<5!2$ z-VzD~t6@@+Wg{vS_Kw*3xr84o*bb;S#wC9)1eexbLp?GI%6BZhG#m9(T7N{+L4A~< z9ybX>(N!fpe5Q>o@GWWWni1`O>JqCjkDAw`j=xH-gvE{Z1Y*8NLmabq;j$Z4#FzFQ z5t|bhb=^hDV1QfOgBj*utRvu%!J}F4r=sGE{?O}pv0D%r=+c9BP}&byym4#z8#5J9|$dZNMecvi9^5qS9P2rvmp4u2t`BS}wHW44=1 zB0Z~RrL{O>EZH;zItN_NqZZT2syEtLr4sZQxt7RgoG;L5)ZsMdxO5CjB%AT7YgYa^ zx*!-un?+i7^gsqwzWlaxHrJD|K+U+jWUDLsX5?q`-E>lSY4H&*gq$9ymp{CAZE21O z`DGeJldC2<(r2cSMDLub@kmMqA3lF$r?xxA|37$$nnpFOZQn9{lz9BZ-^MDawCz-LZ&G|*? zv*JFTlcTGL!$48&r*>=Nlum1)%ldgGC6Gg;OGQRbgEdt zF@#!kd}OneH;D)l%9{>WD|~>#HY>Xf8FsYp@e6TyOc;P1M|CD02NyDQXAkvT7;8~S z$+TK^ny>p;$LGJdZWNi$1O^2Dcr(t%ktlYlKpezqFo`|v^s{!zh$AyE*ze;qN9{u( zf$(8HbkpS{)mO7Ls*LD$$kOL3?zpIVGiLEP@cjd$7XmsU@&$4+xLQ0EjXKs+8Ypq>~4@>QUDUa^+Tom99WM<_VNt^y)^Z>Efd{g-e~PWIH`p zjd$;KYhCq`vb|${iA%wXFAowKtfoK|RDdUp3T1-l`M;^(w_lYc^=R)#6g?Uy?#DWF z_W?SD=K0L-s^Z_7BEdML1Vf2MI3At=@}gHn3$Yeij1dHiv~%s<_e=_3eqNwP+I{jA z_SJ#BVyBS`3X(8xC_`t>tB2(}sy}Y(a*{izYUuhw>DFj)&{HY#D!p?*MVC7j|1ClP?HU^@8iR%Xr`xra)x`D$sq6KR+gY2pqLX1QhXiZ{ zLHf%(ie5FMqlS`aIiv%q7HyuKv*(~USSyxQ3wy+>6rvXSApZq5D{rTAGs2t*3&DT; z6u6(3O$uLq>jKhQYJAg+{S@=w4EXSnP^(4uKT4j_QIRelU-nSTm}+y?4HyWxno~>lX>-|_{rjP|(oZ?~fOKH9Z?J(!oI*CC zA5kcg#0g!SdNNLTZTArj#2|@s@IHHXO@m`#w93t z^l!#RWRl&gUD?O*wUCKyt7j`YVz>fcuv$xo0N<;$5)+PFI|bZ5Pli-R$rHhUU9&Dr zq7}HVyU$G9`(zkP_zL~549XFgMB4PN+wY|Ur?kV~E`6+8>2~3o_r?peyG{o&q5|jP zk$CWBR?Fjc{yGcFH~&8ZFaNw=WmI;-i;X7RALO_D)p(TvNGGVb(|!q14|@bmy2TbX za8gYCcaUB1>SWtU*O9W2AM{I1$cVG#*F3f$4=#Eli8EL{V)49=rMq}hQ|ejj#51tE zj;R>Db!djC0XB0YNy+P;Cr#L^`iQfP5grrIWyVjN`*UyQ_Z-b#DI$W#Ao$71G{Wfz z+_vPv#oBvl;8H8%N?$D$P3pWX1xsA{R<+Ud%i*~Aqd0@9)36oss>xC~0p;03Gj-~s zU8+zW)|t6#ReveHS+=ZyuLKtr70_IZCYy9st(k4&96_6_OPOO_PHL?T)iQZ*wtahO zaXXMc+x>YFx*fu-)hQi1A?kT$YN5hA4tkCNgce_78 zCXUQ)6Xb~F6NK9trG~~DZ{N5Ud|Oh8ot#hg^|+yGHL{L*Q*?{~ZkMCJEhl*OBfJX| z?t9@5aYxHV8rRa!@jSX3MJ|mC)ZW{j7&9_b%*4#;FmCJ&LbIAjTj=dDUB1s^EcweV zSS_+4FZ^~l5k31zWMI*u5Sz2vT}pLV6h?)FY>W|A-H|%-YLGU_Yhy(cZMd<9GPhEk zFKe{-hTsoP_K&PIe@}t4T5ONx`xnCQanMWIkeHv%JpN8VU4fO^xxnlK>JIH_kZEtk zIb@!s77SjAZrheNC6d0$!jkAHo}1Ode0Y>q0UDMxm0z4JpLa7ApUl^w6p&D68w$b&jI zCMSClz|lsGEoF;_{m2zPdf~PGo!*07NOI!-?^ZLi%r zWZj|0dfi_6%k=H&yx}j(-r*XzLl-_5skb%-u5_*xizL#`xQ&R?$ophOW|iLPd$6e| zoN>N7ivNEHBO7fW54AW8lrgF?DVT$SxHgT8I&+TUKmu2aw;mDYXdQ;W*`FpjM!8E; z03jvPmPF@SyFmT!Ma+x1D3x>uTFIn91$UMY6E(}@+y?qJS^e|j7oLOQk{(4}@puy$ z?12NX-$UO_E}sW)@P{igRU2lOR!<DBYu!f}X6JUN>JZxy<)+qJ{>$$39tzsChtqAa2FkH==G*Ab4>( zX5|HP+&lfp6U@1(^5f>QsCpsR7d^!ZzR$S`JZ}C$cXJb*vBwVw`(TYBg_=zX^sidf zAK^UGyj;MQuJoqF`az+~3O}le?&WzNS6%8R$je9NC^YI(jWfijZ_nRI382UUpZ|&V&=)dT z8aF!>*gpC~O9NiNR;jX{DEcKpB#ZFBc=V)0BPlYwFca0v5iGWEpzwZqn?x;6KCqgb z=C-qH8L&fsp?v-40c<;%E)@Uz<{YhuGL6)`&4{n=8TuY#zcM;3@7|+UL+k+xy;{d! zwe?yaUykfvuZ{7>CxONg&)~9))zRNS3dfXWI_vqujQ1+!+I;M0?;vD7UaV{e4ak2y zV^0RBj#==#*+0s5yq=`I1IBE4R#}Y{&UPQWRPmf=9yEVBoihw?=0yw1DW9Qzc6xhP zJTAfe>5Zyb-<#jFwAcBut+R0@o!z-;Bj59m3uz7$H9%uMmY>kSV8Eh+t-p}*5FHW=2bnAp#avDRL^~d()f3 z@}=3wfmL?P(p_}>fPC59_>Oa_M;hGNt4FlU&Z`+Ax-Il+(vGr6^#|(+Ht5s zBRAK};(m3=K7v>v&BT$v8B?(D118oUy8H~>dao{+04j4FA~t09rQGp&+pJ;eo}Bb- zdR)kvumWN$9aV`3UF?CPq3g_N%J<&gPKbMy(r^0#+g=~Q_xSQK6uVjZnOjHq_`TOk z_=5>ZI=&V1S`H#JkNZ`gDam>@LGb5PYJBd2YL`s9<6r_9XQrhErQUCwNzzJK zdd&VYb;iA>6UorX>a&K+u*4Sbynq>(FRF8?J`GqZRH~F}HRbu@BV~l>Ay~>SY z9etMzT3tI_h=gdH?Tbqri+PtJE>4ftCBc;fH~_KvcI(aEv&ucGoKyAL{)>x*yD3f0 z9~M7j-R!fo4E#6Vc5k@sT$T%O8@)A4DDOV3yQMypHPbLGBsM$M2qCY4pU0D#*5sy| z*EgEyRaJ2>pd8hD!tPpPTYrd|)9jDrzx;C3eed)B)+6CBqL`v{vuUeI25ph!0xct1 zCjJBGm@yz@+2(OO@H>oAeK?(FWAHOte?xY?2ur)*DkQW(CNW?p7IXGi1(RL2wxTls z+;|8NV$_z+%ako}rc*r@f4Ro<|ng9RCqp*-n7!Q z3;C8&UjxlHtkcnY>>d30bE5C2Q-tTZPF2>!k)Z1Kl=6f+=_|u@_^MTPEF*r?8Uov) zF2gr@!ac)<>R3U9^zijp%b+?1evcMl2WI@vH9R=nV8V3?6Ct@E5E4IC)`a21**Rx8 z<+^%A`q%M2#rc;DUM=FI`5X;h5^LgVgIgK-dvTvh_i4V;u9BV<8oJtn^j?9ra@7hpH1(zOX~Rn6 zsf-2QDET*B`mQu~(>Mog8vCO2R@f2~XF%pa(|bP~gAHd*Uz@)~XNJDy`0i!;<^Dj4 zZ+xsUAl<^yAuHCu^#!$}yR@esj>YsFelj|)iqAfLUDt0rL&B_R9ap}{ZH_OM#`tEF z9-~bHcFgLEp3nxH!FJ&k#ovnWe2+r8W$z8YhZhLq<0eJ_8YiEIK2*w9ejH@Wa^^Iq z#p)5fPNO1Wgb+j%W(2Pu@3+GP)Mm;oNiW?8jOQh+SOVvEcwV%jDh@autCQ$bB}aDd z=3KHSOH6N@%bTJByS+r~?B=H`_e357OMO8__fQVUFVuFRpK+Juk@-}f!qmO{J5`oK z<#r%riK!-{*Y1&@A0_0`VBzn5vNTGm`+dCk~wVTJH_UbC7lf>zn=s{GonG&Bj#N)qGa#?&J76H#MJzA@~ z+#V^fc>}J3RUFl6IDBK|&B|Tl=Wz51QY?4J4nV#*+6UEz19r{r0_Myyy>6E1`-nJ; zpa4l|vL_rYu{Z>uW5_OP;DN4+)K__f_`yqq+UOmU9>tz99oYb!AX5|btqPO@mwzzy=&^fMvpPB6`!WZ77sbdC+-kU-59sZBf?-zUPs(=wz{+s? zEKF56*9$4lxd7Km-Jz<_ofdw)VWVP^D_dJGbi83SaFbQ3BC|eWgaJtK*vo$UAbLqS z7e5h#U6y&_tvX}~u^qaSWMS_#RD$jRbo!dbg0>7dCw0<&&~tIhj=DOB0ps{cANP=? zW|Vn)uVwlMoro82Wge@Cz_yKX!|Ug)pX-VZg{7wLy^O>9GgX>OmvkkXzj$6=t;IZT zga@BsW?gF5jBn@v;ENvEu(NfKOfUCmamc6?!$xD2}Xs*Brl{u2G zan(E2c2^BwVPQOxHiH_BL+2 zF&ad_CW0*#P+6B=n@YjDLUGaW-mk2?M+LZP&hf zf9dZyll4_y@3a`nh_INsG5(T9y`u?zj#0|PWZ?I~6libJI?(PwZ{i%p6$ zXStwb`hl2$_{Z^X;O2*+_UBgo%ezi`>za<-5ZMNkF+ZgR-1G>-N`#vgS*=`Kdb_Kgy zc>w5R9ihQi-%9T_({!`9s^Xy9 z1Xg;0k0heLeRORpn$7xLB=oj4Vh-n9v}p|NWW5GlC33R*S>hR9t?cTEa?Ng&^1f9Q zVZK0rwZRwt@bZ?Yxa6h;lv#YUuiJW-Bm6^aZCeYCwOVV-(yaO`N4pXMYyUp`y>C#V zgdlmnm+WsxD9@+=9~iVT-)f6uO5)>IN~5|N7OC9eJY+9U9&wWoQ|9;1-@E2Lmwb&A z%b9i0f5JV$A(yf{euq}~q8QX`b^j$b!>O$6{s_|rE`QwCy?D!bIdR;*MH7X1uoz+Q z=3DVrZxF6b5|pl#BaP((FPGW`6%&2~AcR(FeY#%8AN0oV4sCPakYdHxoot*ek!t3O z*RjdE<@J^jU%c9=*jRrp-les8Tx?vFNmuCV;4hvWrOzoPFNE#He=SdBw2e3V_AEr{ zP5Uap=mQ9CU$^d;4CuS)FR>KL=(^F{y1NGl!|Wj|X*U33lss3O&{l_OLJD_^&n4D3nI#jz%0MkaQ~ZBJ$xT2wip!TCLlnR}LZRVJ?TiiiPG z2D!{c zH&4@QG**1gCDqjo-&K!r0?339iAQ%Mc2V5mp^^yWxktA|cQ1HB*d~0SvbI-Jo}}_J zkiK<+KOmPlE9TX6L%+Wa8zGlVY!7D8xd44BOoKg3j>Y+~)5Cj}Mw4qSQfQL9`lf2_6fbQ(>$7*74YXUq! zLtX_N@{9zr2armEa*{)K4RZjBB-|8yzee}>Il*nM&?r-Bqa)l)!K|;UwOd9TZyO}8 zVvAKa?LQg}>B{6Zp%o;Ou1WA4OKAY!+<*4~_DkzvNtU-l>Wx#sLp2bdkuQP%}W0CByyEKRi+ zK6+lE)aOo%p4MgCu;%SAH%W9`rCFh`~0l$TINUAV=~G9u}BLGPv6nd?9QXeSs_7JP7??tKE9 zdf(BM`+}WnCJM=M_|{eSHfz5{fYVOz?#bA(g{ya z=~4BXfx@!Ne}o4)gpM8>Y|65|A)|96e3heJk^8s82q@Nh7!pql>CbSR5VNS$wgQ2s$o$X@WoYl_ zU=|@ZERKH*XGc*VYe7o$e> zq6n!(+&K5)Qg3-cY;2yfzjbD-9DCtLbgsH))_mLc{m)h#RXxM%-o;lUgo`9B5x0V} zKR^KbZv2-MS^__T=b!0YZ*cm!Z)G-U0p>TKj`IS#6C6u@H{cb<&rU!HpzaT;V78-c z7P3nn44QUSJ&>;SGW0oUCHAFXx~qhyDS($qEPzE`o8`lpMiZL zzy}dBKo;fs^^)emLtj~S@2_#W=Lv1ESAVHZYMculvX>JD0yPJY5{->HYKydW`6|DC z#>`V;345D9{TfnE$A_D=`>%Y`qf2GEuEYEsM`A2i^8%pU3Pj$3p$^8+n*Y33G zW&XIj>2OasY%I=_(==o1+Mv$5LnwIXo$@{aR7Mf$VMVd>i{|6ZXgZGXru`Fd^cHu= z{rJn(^?m79QiI>DIAd~BOn<5Kf+}@>-Dxq(NY)>$k{->>$s1;g7D&5){7~o4pk)>b zEU{MVEft$E|2{mf(_4FzaQ)#+%6bAXTXvrQkgkSSuJ0iU6}SmO?hp>_4bzz*TCDpF zo$lLF^6R%Sb?i=9Nowj4EU^4e9}b)qG$!d8fG z!)XZ9Vi3`2!4W{CC%hUVlKdY^f{|Jb#3|X#@jz^OEEgWnY@PhsvZ1+F=In*M`l~r{ zwO!_~TcB|1g?FybncgdTOm|HqTjN89)_%5iq!LoxYj30|B!3xi1c`_sz!G5mmGbBQ zVoRjQOZhFNHcKFQ)R?z6rOFOG9_5!Iq=MeqP#RiN{`sbgHta&BAT^BL>T?k22lz3o zA}sFS^;yc1YR46Rn&g}crf9i)YJLZE*{}G;+v!hYQ^EQ@Xd*VES2* zO#?KUkN<2T!Z%z^9&^;4U%M$k%bJN*%&pp1T zFi3gw-)H9f=#|n+In)|wzw8c)KIHW^>PXhqnY0==2JDL5hDqNi%eVtjrAhK)J|>RK zW=_ijKvf=zQYebeF^XFSQuSE7q6O?23wF#IH=cavWJ=3yMux3`7W9Q^x z;pFhz!_!62EOVclVE{umsq{BoTJ07K-Wy)J#v{qW39_PX1~ga=8F~f z9j~!HHhDY&S~6Tj2Lg;pnpZ$9zXMR_X~FQ+REu=e*DPuGXO!=&j%rSq7^U48fQkn%#qbIKiL%K&s$YaY)SpIl-nm*|fVKc|6ix zjyl(lJRZVYgW@6*6gJ?7xZaY4!!KRoOzC+k(Q&OkQ}4{D7E(WL4vg}kE+=^;mKH$* zo=J}tm~$U3&{ImJ=S&=bbX;h|^g~0F?i6F}YTUr`w&FjRb!?O6a>aio8pc6*S|BwL zbnaTmgUk_y{H{uw+iQge(NmN?HaXE)iuPEZ(3#iYtr7}UrENpe6poaZZJ}PBFrgZ| zmGW^5mwi9G{nV!vFF_}}kb)hA811J7W!%8qI=jQz$k>m75LN?SiF7GKU9o{H|E}kC z`QugzK=F}@Paah7b@}^Z%!1`23N?1__MJh`lEv+2HVRWUzcEtzyOyO;h6D|GNK#f% z<=0F8235pXUJH09vZ}ahH3DkxYy&E@EP=p_n-M6}$hrz?{5M=4s6fOX)D94$-W+#i z2m&VuX54?UM?rXnER79+v%1$&jfUGHDW0?y>)sJqHf+{fI}3mCP$$PAnkCC>_v@*} zQA?p9MJJNA;?weW$4DcghoCF=pjqj7{>wmY*|=!9{3j?y=Z<^6D}vrUvHJOHoHdc9{;MWu;*FocCD*aJ+JRQ+%xaTJxDEVw^w%mj&CEqI|?}uZDUD8 zuMmU>DS)iMOh4ilF@Bn4w`l1kHC~myMn~BUDt`Qka6la7Wx(Q`3wg(GpK+kb2~ z57>Lzld|7K1?la0AxU~OFVp+Zexm1-RDKG=Wr#<5>;D1qaQfJ!#R#mvwgBW#dc;m1 z1e;1nO`P8AJsb3Crg7x1bH|fNQVMT# z$VGqBSEE1^7Zs{Uv!c`s8jl@R*gYA;Ht!P~>U%n6|3vQZze-*(;ilV#(Mkl$?&4+gS|rh)It-GTkcCY*WI?fL2M^w{sk{1`qkQ) zf%zA%SAI|~|CTs^U(|2XD=|l^2kv~DuWD*S2Vgx|0qQmvBgdF0L-vP>mYBxlRc!b1 zPhwdXOy!Y^Wp-seN zXZjN-IM7n+kPa1b+dt}%-(aG6I*Rt(nGJVQx_mL#oMgqSO7wwmc2Hi+v4kv7eU^^y z9q=%F{j?EY(Z2EU7R_hM0^dvfwL^y8SLRQk>CWCW-8DIvzWn=cf4*Oq#-m`hD819n zgX5iA{%32y@grZXPs@DMIjJaE!&0-;pr^v#rb)c9Lk>Q@bz-dViLt9e5{Lg}tXm9K zOnBt9e?b&tqYePH!Uxw9MasfEJVk@?IDi#1#gDUz!W($v8mx6k+&<)^pC8&dt9fI` zC^e`rUH8@}Ca8giu>S9EaE1a+fcKn>$Or#_v>b6^_9K8XthJARMwr#lHm!0r5dc+S1873S)4_e?^=aO#o%i zV$h)MPu%<~)^CM&0<*+Eo#r*>(|Dl!(ytU8Tzk7U&?jVfFCwTEK-wuEZCO`GlE%n@ z@>T*t=Z4Qo-j?B|fwk<6V6S1er?5W~rtMk?BM~W5&7=Kp{l`#`e#o7ab0-W`FG1^z z`q48VUGm$THt)r1*?_Ox~#|^mK|Ks zBRL99G>`|q{cK$cYb;SrLFg+(QXMRSjxMl1(lQWHFmr^pFK#phfuf^qviV@B1O6qg5`^p7a*cnt8sgdf`vH-c+ z_@qbsnj=L^1NR3#%G~G#$?lM^biADqT9~_w)Qvlfn`jGHgTov|X)I5&eF-|=cc*48 zLD>$J+V!zdA%|oUgbs5ZPTQ)=7ddlSqTY46BGX((aCby;CjNXefpgScM>ez_ov$<`8DB~LRXw2l zd8|_#l;aN8^4)`__mPofaZqa=V_U#-VYOl1SMlF7{%zhTf=}sFr}%&P<9EcdKce=s zF)5D;-x_HA)Z-Ro<0Go4k$GCzVU!t|L_a?#VXK4GYjt^slHHg2s?b@ybqV-{6O~jq zbAG#f*&3G&Q%CQ$@5L&lV|yggS4$yseY#Z-63y)c@Cu{J+EoRARUO{4n7-;Zj!mL1T${{=(jpHWwANBwaF?zV^~+hxT_Se-DuE=J%omh@Bfjyn3X}ce<&gWB_6f(D za%Qke^wK4;0+w_Ca%vu(O31O^SYPc+5W9 z(nvEFSiXkc98Brd#lNwBQ~cudbG^btv6B8zFc6AG0WJ+0DK%rc(Yjg7iyK9dzKtuP zQNyiWrL8^q2nBQB5$I31DnVRwNs@(NaPl|Hk`{=LPBAzDv|GngE+0@FxXh!U`8>FG z*dBF$JJJ_bGHg*IP-5^9oQ%nbJr5|}%mqtr-B534IJG!YB^gOsf>ef9c&@*gd{U-T z=7i3jfc@Nb$3OC?7Zr`vxrZtKPv&2%RU0zU&bv5gc6OuAY+Xtt(3v_kA8M zRe|iYO(Si1@2e~hmGB3k2Kv<}DwOAAw_JhP5@UD`vd7(W(h2}75y~>epzbVoZ}MsL z?{<}tJ}30po2U{PXDb)THnuiCuG*C~ z!EiKg_fpW4Jsx{#Y~D0a1Zl#ke7qi|6$eCJ9!HG5jk_HpNkTnM#+ABrjCw?2Yw}fM z-II{dC95vm<9fPrM(2{SaFjpKL zYl@pNVZm+}TBR?*nfFl^yQR$t5!-4CcLI0bdApPJO8fN!&;n>dG#?H=T~ey50GE+< zIv$_54BF8T=Kdw}+gb*8iegIjD$3XYTSWfr!s{Blm-pyxT@w>A;gMwX#)XL1-ngH_ zVu9SsaRm^#DPjR;6y&bZd9R2S9N?1tIib_Ht4eZwM%@bN9*pmN)d*s5A7@Ov@W#Os zoukf0euk;FjE9Uq7>_4CD>Vw(t;6KSDmlurZ1L#_B)eSeQY=&-t)Pr_k^;8C9Olkn z4zg&v4Cua!8YSH$->`tc0whGBQdE66^ubjE;NDHc-!j;SlT!8AaBjK$@$JO#@D6XI zp#g6up1zH{XT#FY--r^vRv43L4Ei0!WuVw$e#J64r8a%rctwcDmea-+@cEyQu((tJ+BuPjgoIguI0ZjnV%A5jI+u zuu#Ti{X(>lK0#mJp?;vDHYX|pFYrXM3&Ux>|14yp@o1=h&*h3*k8>?Q4t>z+^7W}? zU!nn~q_ml2$Y~u7DfUU~7i+-ZEwxPs&2Q$ia)AnZ6SQ`du_|N9mVrF!uE4E5GlicI z=RK==9W8?I*#1ja#hl2h7%laGmcB}7Xk=`JI?Vyx61cfv2&K`!+XI(NTOIlH`Ve(r zL5}H$qn{HFs{qR>s9UY&v743CqSxZ_vA&MBK?|IR?-G2HfWBy-E?!DcS@OIm`5x#U zuV<8NLdBLoLciKjU4N^+H-!`~+bKu+u=fn!QO{3cGBE+7A&&X4ydrmk%DK#>ihp+O zSsL)*(Cywqq>MmbDVW=;w7*a(&vW;tlxGkj$lc8hQ(_zUxGB#bKK@Ihwrkip?f$-4 zN#>4{FX^sg=Y$vl(oIspBrUtDBrA%^$B^|^ib;mGD066%+7?CO;;a`%5||+tt#N#^ z|DvVj6B&0IU%qqWB<}wcX%t8QS4wWXDeQ$4K;Jd{@8A4wDaPL}+RmSN;Yw}I(CGj9 zrYHZk6h%=nOHsb4v;Xt3zuoPBF1%iy*n1XV=JWr--T>h@lttnnGZBA=&;NRse9x|NYf0=_#4FVxaG?B453IRSqj;gODX$OS;pGf3Db>acP%5!&_uf3gt8@}Vk{#|cE&PN zDMHx|W4L8ELzWq44F5-6s{8MLe;>ViIcA>cocF$+_c?dWP4zk0jc+|MolIe(!n0_pCscv)_Q&!kgugx#=&5nS*2G-+oUgyDu(F(joWcoB zHeq35b$^$us?hU#|GWynJwQuw1Z`oUgx|{BJ5MD)I`7@`{SGz!S0o zAwGeZugm%bh$DoE)4 z3wz?Sw&+;2KPX!9cLTl6gss28!=)T8$~EWLm0~hpZ=Gk9NxZ>uZ(KV`B-$Qy-&h-x zB=Io))wt?8&UZs$xdQ{fGwyL{6PZ(+&ybId_|NnjgS-{3BX_9R7>3;)*RWgL@f25if<8f5M zlE>r!fgu2>$Hf-@H&g@61%<>xD9 zqm$}c{vUGo4+kEiZzObEhMYG2=kNa;?i&dYDE}MDKo-1bRr~sXoEO{8e#ya+Xg$!s zm-Dc^^6qu#t zYDKBi;gH#@Y7=ovZhPCy-MTnaRK#07wqSW%|H*av17}xsot?@CIDaIZ*9r5d*-UYA z_5+EIg;BNJqBgYDwEgAEk6^F|H$$<`ccYQ$Jn7?tK*n5AVL_By72zkzm@C8M1N{%C zW9wO2B|F*^yUo6n0dvsc$V}Su6vmAdLol;AXnRsk(eC#P(@*G%PEGI`MOPk**v=2ey$cCgOg^E)XA^@K%^(-*(AvLJJWG{3xpMT;VHygwWxaoe_C zGm>wb%H(g4a6JAWB&3N6=Tta%7oXb!P8+rl`KXL|e*xjE;GKTp!5i(sHUs3*KW35< z*CpjGUf**mOZ*9>U0R+L1-NnCpyNAe52Lkp4fzCSSm@I2(GL!SXkrOZpdcf@Ajx*7 zw1dB-)_!3HFd!_35*|^zt6&A0q!KL0b2Ls(Sb}(X^CkZl4DayV+l;!ST>RUwXR(z6 zCP9Uc_@wO20z1T{Xy2VpU-8zuzVxnHahMHhcld2SgwIhDiLx0D^9^ntDlZ>(rHlNM zQb<1T{m@5GZ$FjZW$j(*1mW(1rqv?S$5ugpMR9u?FuH-CeB|rG3+outu)N=qq}3^O$+r4d{TDWfWLt8a7w;Y#&YC z&R^JF`^Uk16j*Yn ze4mvpoNzI*ajCL|!^zHuRR)6Qcdoyfmc(gx_(`SglLCV^p?9Q@ZF>w^n`Rurh zlQxVCq-TQLlWCC(NU3x~K^CW6&r8Yg>+A{-FerTt_|o?E5&Y>L(Z>ohM<Lu4qFSL}GZKXzN7Z|tG1*-OW}vO@ z&id*ka?hc+`B$50w<8B#)Am2r-F@Jv!Llr1#0Xtx3%dZF6!shc`+|ss3HBln(@9ys zFO$<(2dr(FlTY3&)c7R~^W}grX+~j=?Fzs=t4myjl|I21UD)*A3A;E1~!LT5HbHkF3YN? z(Zt?5?3c@ou!9ELk|iaPa}K#7!`Y}F@O-LvE+Xu67=iy%=bsQ7iln;x?2m(oj{y>W zmZ?0yLwp5&5a0cYj|-8JaiPx(nOI$L)!}g79cXt5`HeIWw~<*7yBcb|^LD8|GWO*I z$!tZ%ySg{98?5S^y%yEE;YyFXH`LE)h<#U|ofoukXfpxPAJ?u5r$B_8V`5*GRaNz8 zL~gF5>VnT0!A6DX|#URDUN zX`7qKCN>D8v*{Tl-}jzv>Gy6;)&t> znKe6vnct36d?Pwf?tuAF3CYASHIjfmr+4)tU`!FnaO)DM2349s-??9N`G_PS`VU^4 z?tF{9z-ypgka1#mX`=mZXd&q?Ru|msR(Ug~@=|_!pwcePKMrhR0C|}sYJBePDAgDo z8J#Hg#4=mb)#Xr6Bc9}P7yBIiIRF>;%rfhf-CW)ek<|_|+vXBG2{@ zHAcJQN0+;_d~ww;OGOo~fYK(5^e%c}PUiP|*#0uh!_Kn<5$2{+_b$>%FnfiF6lx|q zr87%QKL6BZw~hmYh1wuUOIAmCg!DCu{0f`PQ@>1o?KQtc4~x>S$p)?%!(BOC>{4GV0W5Hdsz96bGJ_P4-8CEHv==V z_3Y1!iyK{-P~=Q*%en|Uwdg=H?n?iTwEakOVQrZ5wgX(Qo#x$H4ttPU?_Q2&s*(9j znp@_KoO9zPj0|}HpgvOCrr3+m99{fywn!BCcSL3x4DD_ zjd?2_JyYczXZ!p6>(od!*=qiu-@^L@0IcIdU{-s_uZmL*t!(l5jW2uXDTpCopqCb4VkYR~DoYyEONLb-$C zw;3KD2gHpp^hn*VlqevOMY*B;L94MO|1W>PmOK7ez*J{YYrK$COJP`G)J^ zV*6@(pjnrD8iUrOwi@OQAt=p<~R2k2#)e!o$Zq=iB2}n zRCjkl;l~OvE1*O+aRJTn>o674)b$x5CWc?)f9wVTB;yl|=nhq|&0rUFev*CMa3Y(u zE44a0ncE(Sq>jic6!<|a0Wn&&ulybW@0f~h6pnw(?HD*vdoLvO%Z^!A$5c)i|EL%`mj#Y#_&-!7=#b9it8 z2F3f`;#dwT?g|tr{|}-L@d8A}yJfrWLdq_xAXS<)GZrVH*1yeQB&oMZ2Cc0p+-O}^(BPHdv*jZ6!*(0^#-;RFh%gmobNORf#+KTEgk--M521zE&aeU z0ve%#rHy^HaR&y?kg+LA=FpUo4*TC8bU)WPK>wvj`jyQsSDl6MGZ+f-*rRQ38nGei z+_uinO}qwew52C|5ZNRL1^1-C5WD#OICzL)T!y^=$@U7cZA3*ba_#NWLaby9354jq z?>8TH`o!Z!29Gd8_X=m7onW+VT3aXe0Yep$nVh++ zBlFrY%o8Xx{~?XH2hbx|On3*nt*%S9A4N(Y&kd~%DiV6G{Zc<94lLxVkuBOsRjj|Q zKQn+B@V6A8?b?8!1~4)py;m2~rVs3rOORQaT?#RMPRM@k;#IhAs-ON#BT3?0qaYs> z-qa7!+}OfJX#bp5kZkK=gE5J&6WPaI8*~j$J33S)cQ8x&)r4^9|Fp!ey?~``uQlU7 z&Az+VD<(bWWNy~=sjZDHb}8@d#~i1FoxyAJi(4XmH}Otuhx8usc@D{V_Z0K&3HVS*XiI{-*!~<_p@^IZFSH@~@v` z_iY#du;)H7?<^$4D)Y&CWy>aV&Npal2s3(`eS-=_f}G5nEAg8jIX%&OJCJ^JU5E6s zH9U8hW$<7CqZGTbMMXk=*e`Wv7gOwW_Rm5`~p*j14fAOeWx*|JJYw-buNR7(L;Tl*OeA079XUA-6w{q?&EHQ<}LVN@-dV%V%24 z{J9xHEaHoWku=|6x2{Yg5D*LrsQ8~Zm>CniCH<4#!D@nKmMC!A`Dw*%927)Axnb$| zS)TIm8sMG-M>r+ucO$>Z*F}=MHirV`a;-p2oWv%^D8_iZ8%Q7`LF*fAM>`1t zJy-r={*TZzr8sPdY*|g19hs6z!kU60;4`deWqPj!D;Y_c>zva;iG{EEr2X{bH(-#p z+vpGL=Y8KNY5|=_IglgQT7*bAB3ioF9H;?K+C0(IEu7dGy_5M9i+5&(g#h}UGG&Lc z*cDLr4SLfSg@saMr!C2H>uBzl@^zZSXm`U_!ve9)lGXT}@a1#J7WcOvzpfnj@SIg` z$gSMX=C{+9&Auk8jib|Wax{N*a+Sr=g*D+PD~%Km(+HXa0f!GFU82l%A$;oJJiY+q zVay_!z<^5FznKP@C+xoMgln0$n)~m$i1Da$f7Nl(H}+b)Su`J4N>FuS--+oVwZuGs3-&yUZQKG66UiwWBpm7K+3Hzjy0*E&a2aHowHncgqsjoR(yi5ClI%nDoMJDXsEUGwC2gnwgl@>^` zga-wUNw0aYukjP6Bc>=3zMnZJ=?NE;me5NB5u~X)ru5CvU0d~VJCe4rhy>pScNTN> zFU=6_&9>K`M^5k7JFqKE++GhY&$@>;guj^Jp--!6Eyo4#%Qb61=wX$)pzjj8mQI*& zBp3Mx*%1swmib3NG_0?dEI83bwEPQ0J3ErR!0%!`#E(D1kZL|v1^6$~2((cLZmyE zxooNpkwZRU(`;icKG-+(6Tk00*eP^Tm5|CNEsNUgXTdWZXVwrgj$a@FhFs;E8sbl# zb#HjH&f9{fyz!~jv0;&T4dumm*G$|oZ7p<~$Upy*xQ8A);nA)ONgUTAZ3- zMTIW3`ga7)+Jk>@AR}4O??{x;0ndhooHlhtjCfEN?$^Qm2|z()Us?om?YaatZiAZ3 zPI|EQuSNB|(e+M;~Bae;)0A5iY{zQJs5C5?br@qkbBJ(unJuu#5D1m4qxvn4;W8NXT@B z)$}eK5tcW&13if4O*hZAbfIv689G@h2fPgveOd{})v4xP;;9Oq=E*w3l!S{2&*e%p zQOrr2n=F@kZfS;n@Gr^z7BDRz2lP^?%kXAAVpjOO@gkrel-y$q(UZk>Jvb0Ds&(Rq(#Y*-7w!bUpsa%Q8{H)v-SYTDHGDqV zRXzQth`Gmxay*uy4np;423zWbfyHYOHADnhT5QFVG2f7U+IynmzHlF0>@uo8m?sby zuQ-?WiLT(sv1256bn7el?-PO0L0v-@u~RD8KbWX}uO8a2aatLbNEkL7=-YcuF~eH# z0OW?4MW$HIgZsA>gB(oo$;9SUj^{);#B8A=hftXK6>k@hO5>*~aK#7aAT=v)z4cF< z*YAZU9`NH1f0uujOSvZ)@w>$SRlJEFBN|?9-~U!UC~L-{zh}XiKR8mU!;z%Knf>xC zpH6|m0f~Y4^oWM67#Wd`oX;DBxy13hC^p(!)}Xb4WNJtGwR4bk=A=e=fPLi@P1tc7IsTF)z-|yg6PII^-zz=(K;icN@8*)tNzoc;m-CO}xB7kY%8DjrvG)%Y-pW%% zSX|Q{lwJghdw&qq$(4$#5)92(X7SGTxoMW>Q3TPI1S_H}9inbPd7xCN(4|K%0B6%Q zLFNZO)V}S``2s=BwTHaHhQSLJX zepng3tIbbmrzcQ2f>6Y;Wgc+pi6 z=3W9JiRsjhyIHm7)gCl|lv`Z28*=>^%qZ9)TN1rZJ$JR3i8WSQLH+j*z+=4fW<*ut zqpf4|G~4r4)034gyZa5NMMXIK=FG$Vvih}_hq`=1vszlYC*Sq@c)ROIwQW9DI%bZh z{z0=9>Tg?m(%v>ZTO?;WD5JJlMD#Rc`Mi;BkzRBg+@UOTt>J^9P_PEFw90ukb9lbt zASF7NT>k#|K|iepLgkMj?#2NMX4@6Mm5P+M^^Cgo3nGlt36!ShNYh3RRyV~y`r>>h$EHn zr)vPFtT}%5}>-$!DO98I1`E~ zjWnzO_MN4ZBp91BY8s`0A$Gx6ONyR**|@fUYVjdXpLfW3s9*wuwqz@M2X*GeS0c-l|6lEnM%R9ZB9 zy9F<;eIcTaiPtlA9B`7vn5@gW75t+EnGSQrx5kye^6kkHNrg-A|GcCo8`Nn!SGX%R zyGrnN&<=QX;XtAO@F<;B zthZulK9fiI5~uUf{n_En->ndh!DTo=JcMbiH1L(r9u1(_wt}0!GFUKSKO-H?N#^@OrVf@EHgaO z{(Fe6Fbv`|aqowK&J*7n`bK=>XL*`XL{0l(@KSlzqvbf?`lPpSNPCe@*mKnP`q*UbmL({g1?VrwiW%!OW2UsD8Gqh9P+8>2qc+;~CKq z4@k*l+Ib~3blAGctv2yK0!Qs}>Oxf0P4mSyzj*2kL;jqjBm4?Ez$O^q+K88GwUY!| zpEKfJrP66fwZEXNQ(6DG(7yg4(2I(@T3IIq%dB4$$I>Y)C`O2yx8)X+Ue>||-M z5&d1(RtUy(>~ISikreix{#u>fBNI$nK>SDMGifn49S(OW+0lByNr@-;L2o6KBG=n7 zzK)IRPUkFCrN@(;N$12*zwAgSEXL?Ch#~JY{lU#yaHa1SlbPnGLz7i{bxPLy$dnJe zu&e0f`t5W{C!<2?nAG}vH?6R_4ST*!av`qZQ&SJ6mcyWLlYU8a<&N3NWJ;LP!$~?q z{O?4X*FsR~0#Jzh{4SAaZgQAF?UyUbNK)@&N>UCm+FIJ+mMMG?2H7v++NDacR zleV^(TfnCZ!8?}cr}%CEAj@`1X-}Eau4#N9qpi!asv5rLN13H9WZjE9-C$rKip-W7 ze(*tGuY->>+RIvNN)vbcBKN4f{^xg_u@XrGehHr`nyUq=m#T3?GRBFY%FV0%|FjD$ zu?lt#TGg%-Y*7u0?6yS>jg1apIJkzG;}TjsPI+~8O3ZKN#cjbl zZ_8pde-D}dur7p418}XD$#>-El!jwC#3K%H?AoYZaJL6oOQzVIQ@qyqcE5g))_!To z>t`@FzEWU;PcF1^Lu0@nT~4LPS}$AB-|A=UwHjtbJ8Ev-lHaFSJ~Y3V+i86@B09r( zF6CkxuGF;CuvAoJQM3jsB6H6}I$KO^Wu>aoY^KXkExU7Q0r8vRnt0F0*Tozka^!0V zm?Eo`(p{PYI_eeKtT&e4mo@Y}*qr2>T%X8{G19w0g%Ymf1R4qBb(I-Y8*@TiYq<+| z&^GNWlurnA_j1aWxV(6v@2zW$4m8$fsD5|s z&hz$9L*LoFvcjds>c-O#or73W)morD>Vd@3mAU~AJ(D&! zWkwX+F_IbArT?%)C$}>_@CEN(2ER|%*5w3CP9!IE7id0TDh8@t7%lozDwhlOdF{*e z=FY3sx~#3*EKFZHjmEHDhIv&)yeO?jj|(2D3Ol4HQCD7K*+u2p#y;H1XK!RCam{K0eNim{$5UeM%%SD;K6jwJ^8cFD;$s?I! zGckVS)_H`Hc=XC9rJ)3~+^ObEEKF8kpfu9(_+i3Q3hwk-%D8XDa(`9w(Ehh>Uoz-! zOF&1Q@4e8JF0Y%A(`H#~xb`&1Zkh;66|7wIlrG^FQ z4+}-qV0srUCiTZEOF~R2+hsZ<)yIn;KbdK&3}(&XxlNC9K0^RyJ5>?MhG>#@@JQe7 zy^(;unkUVthWNPAo^;RjiFk!7hnZU{rx4u zo}OnEnQTgmR|)ZMG%9us(v;Cna#)y~d@N;C()ds%AK_k7WI+oqQ-`nkX|*@sA4Usm z^e@a&Y~E2nrU%8Xw#JRB!>PUZ!e16xoH@x^7+TrXS^d}qh{g9r$nmN|#_~bu%`~gK zCz2l^t-f~g=a!6l_9q9E%q>&$XbP6Itfx&@f4lazwLirGHqhGC!-gU6+_|TRCVx{@t_LexlS1Fcj?pL0L@XZ(Zh6 z+DWAsi?|jD?Uj?nk?3TpMN7t9rblHFXsr~iMT)Ko%d*R@q}=(;#g8KK&c|XSEXT8B z11)K91e&k4;WIIlJikM1B}LA``OgGnzGlO4uX?iQybDHKymM(5hrG{ZIjdmMKwm5xy57)?1NlB5DuAg%r{VhA5!zPh7>0OnXF}iEg zzB=a>=9DjCn3=*l5HgC_;O+Ema-Yt0zOKPxs(E*)7BjFdI&>ia160o$a(!k*fR9M` zOJ%TlY0^dl>Z#W^frNlHR@6vB2Uh>}G- zXPie{Esy$`8l8G|qj~1*?zXWdjIU`PXvar!NcH7!^Y?~iD~~m`9q5t_+x+|WojT80 ztRnkJNUH#zKk~Ca)p>s5NNi?O!HD8bsMEG7_Jf*yIr*gG{lX#2uCD^}666H0?VAN+ z3dJ3>e1Qiz1@Q7#@@C(b$WR1BQlU-14|C!nqEqUZ2yQ6Au2*A0=&oZUwuhZ?vkidF z*PT+M8yJ9vCss2wlUCk#+`q49S?Sr#j9ct>5UkJRQph+JG;&ibpc+WnkgkVp`{mWR zEzvgj@_XaZ6?JvEiR3G3f+QB8^FV_dB@`%~d&6Ne{%#l{8whzy=JZYLFhUcvZ|{qU z?16x##GVAMOn=l7U_@y=Q`^l%zDEC()-b!K>hxij+)qZ*3alclJuh6(=R`kPQj{I` zZxjH)X(!EW{Wd9#b(yWH=TFhhNxGlVNPSx#rQK~Kq@s0%Ptu}BlBtT@04A9&=emMA z8suI`{niD74B2lxDX)e*kgVtd)rEw(TB1V5a*_t9RH|_AN=rmrX?^VTQ(Ox^h{wD@ z*PUSaY9S_!>b}OU3elF#78Q%N1T+X;9XfH{Ns`!=ZwCm$zc!R<$3JYEULsr}wgx#y zF(5mb6~%_jyPO1=9%P10dIVPz4jJG^+VAs&z%m?q4zqc&YQe2vA`t60jf{Aq5J1yJ z=`#bF^(sNbuT6#D#YY*k_Wo()8?s1}D;ZcF7qfV6qHFzii?TEe)Hpxwb1y$h7<%uC zWyAzc3B`=GVd3`dC9)79#0<4&cG}~fWg6J!(!_fSo%<+o4(3gfGVo6B} z(3BycykO5r<&Hj+Ihmg$@P`T7G|Y>H?r8JV=U|6Hr(K76%$$^UyX8hK@lH?lF4g66 z$>UiSWH&AUU>ATw7y$C%uV)A`C00f9=j#9+ZzY(mIGt@4Oo&yS;oGqu(ZMKIp^7?L+YEKx{{v^;y2R`0F9Sy^s^jfSSEq}bd_=JpSz)=t&6(h9~h zXHwaat~^JKXZqtG1rF2(L`mYg&$dZti}dySfJG`xR8wFUkO;v_t;Cd$5A@wo`KyIO zT0fx+3>iR*VuL`5JlAu%#9XOWY$r{MU1bjprfZuV2qrYRQ9q`bE)>1sI~zti2jP>Q zS#}P4Mr#x4TJ5RPxJ<3$yKXV_k__Url6;EFn%!drXyz!!)I0LQ(XX*3tPK#JBOD_! z0R27mZQ9_>DN@gqTssYG{Z~CGBe&S`pXio;T@(=+Nv1 zWJ!VrCajL{Jo6Z?ve7S0k$F>Fq$-*f4S>D?)))-1U{{EufQAv~(pi z>v}S_yI#v+yQ2sDn0>A6$}-Et4;LF5JoK&6rGPGjrCpEK%ohqp?!F2)(1?cspIp_+VdE3?OM*dDfUaUi3rru}WG*E%$cd(OE$&F*Zq4;u6~fr@ z2|dHkz|u5R6Vdv}TRRJzxa@!xnaRSi#(VbRlPevoH%$hGuDR zybU$Y+e6bqM(xzk`0edgPA6=tz_3;nn^fO4gckz1b0U1^NA(&HcG`Nx!%i6S{}B^7~Y&$ z8B_}l`j_;lgNO^Ax{&hq*cA^rbAP{bU29{cj)FT}=zgY&0n7KZv2*C%LM`?u{dC=4%3+_AQSNJ==!y@kCPan{HEwtT-JVz z&H@{pM_L(BmZaVRKU^X9X%0#v03geG*XFGmY!d4s13__mjJ|ym831|rv-Qutu-Fbr(<}$bRI+*4f0Un9bp-zQ|0E^zDnc67(iv= zC6DB9EqUl0dhCJNmDt>RSoR!daVrI#fs1V9k`=32xun(BJ)v2Xvwu(?sg!3%U=>{z zIf)-)NmRV&5TX3p>M0;MmE+m=U1b?Z{I=|NY1{9#Y{Wmvxx2HMeJiXK;Ac4Y>sEkcqZjU-^U$D!X*H#n7w{0vF!PB zxv?SM$fjhJRd&_ll_P1O-0XO%4U*~4&c~{K8ARvp19i=zx`yE66H_`3Qc0FQwG;KB zt`#}=l;!)H$00C`0GBM_JWMrCuO_J;d!6f)fZ<2@Ow%cv$j7aPw3s~eigWMCbr3BWnIkZ%X2VBCOtc^LhBu(XRM62&HHbxpE4$G&6?s;ggM z)BZfq$w{(LZ*>u>Ro6Whw7w*aT?rR*g0gyvB`|!v^Qo03TQox@AQyuX5}N^vP*E03 zLnSaM#w$wC&i9-_%bbeS>aa2{OWI+z>)(gzU;#)n#ZyGmoAmkZ5|~w7mY@{e8g>|< z=^2pG6nS*;i0rXz1~YnRXB_Bn4X0YC>nvFJ{|OwEYCq-o5;FRAr?=FG(HTB(wTPM$ zzmx<$o^9XH3IU5%&Z#uk)rCrzCdp^`tJ6Hw5G^H_K0$?S?Pt0=PW{0b^6pQgi*}t4 z7%uARAB}y`%krQsqRKh9_fNg``l)2%xJ_qQql|R8R@Ei3!__L>X4rCc-&Ib5=51<>6c8M6UFH zZ*emN0g&230LLvlA-r9my$hUi(X;*JOX7)}c3JcX*f`W|s5Qf*?e~%|)`Bne;`<3vGz%rrZGcHXL zlPfdL@SEqYxP>8xPb9>Kc~3wVCFXdQ(Zbz}aPm|=&Uvm6E<2ocJnHT`*U!#CSm6MY z983z|%HD5ljbuM;UDvdR>%l8^IXPN`C?>GKL40;I-o6bbY6$ACu^1Qk05JG*uFkiTVg||`rz)CJZ1NKu9!>oL8PD@X>rb&tuU^h=W;Grt=Fl|; zD>}{%0Fl4J{+Vk+3T@X6z$Yexs<=clrlSl1$DK9&+y<7P0zTBYIp6Mi)~h!r5vL8E z?T7alX2>i<3MS_NoZ0TB`ysEgRR=_Jfg0iVh14x@VDJ*mn;1%2_0{OnSb7_fZsQ-# zfYbojlPZIQxHPZZ1lI{&c7NpRtP5Gsny?YlAkfnZn;w+U0nmFr;ZJZU_aluNDTIcF z4!jwCutEFlI@mbo@Szu1(08o8&YkByi$)BY9+4bAT?Qq)HpE%M3TA;`)2STO%4F9# zpc(VI+t#8YV{YZ<(E+ML4^Yak_E&mUF`5-w#HBbvZ5w5lItG~Rn+CkPZJSEsX7D_! zjb`dxBhKM$R@%~i8cib zV2|Mk*k|nO%kPs(cb!AZFgUJ1p8-vGj@#IL(jqUBHff!vL|qFm$%)9MEiy%rq;Ezg z_4??x0Cn&~YgkNV1r6KP6tS3Uob5Si^wrop9Rxho-9s$3DOy@wP25sPBT%ON=IBqn zFf>s{y2g^`A8A*jT&5A;fg6x3&wNnF_lKm2yjaaC+mpS$3l4MD?v>WyI`%Ibdzr7W0x?XmYqbAnJRO^#e+kK%dG{4R^lG>afSM$TN-OnWTFqO>R;u zQ3Wwcz{%My^04y^kio1Earg0qj6@gfXbaPu@8s#{gBS35DbvMxKW$z{n48n zW=ca@-1^J^wY}MzM9>l%@UI3(oy;0k#V_ZoR+FtT#M5X~a$itc<^v+Ub|`ez!G?Aq z%|ByR^H^WjSyp9Kgq}OZFp}Hr=ySq86%J*8uWM>F7Vmp1&n$G+PD9KcMcSF}qW9^Z zKcmkLp?l@IRAeWqMm*CJrv&G4v7($HG`qG?rx$2C(Z76^O%TY1W_(jBo2sO*S+*ZD z(?XN{8gK-;!Y-BnU!C)+nQ&7pAP!fy#o+|orP{K7#Ei6AV3)dQO74a5Mv}WGQz!n9 zXJJ!nPAXq!2)T!#p?_~AlSH;i?76||@sw02xti@Wt#1h-Zove6-r)^X{MVFv{sItT zg?$`u3Ru2iiQu!EYiVbbv6{}u`%o&g<*fKsMq^USGAMe(db9nFs`4Pa64#=5Trt8u zyC{`j>3cmgBheMoH#|cf%SqpQS8Fw^T3LegPAwSjZ-1ehc?C?V0B$RobO#!Oj3_QTMr$rJ)W`LAfxi8W zpfH~6uN}oPTjLA$g<%(jY(2IVFxA!>Uypd)75%We)toA@yy`L+;%YYs=0){3%9NL+ zcuz~LdW7|UQ7yrF%=I_gPuh+efmOfSfygQgbjS07P1O{HT?u{2wBS?FbNrc0!G5}1 zSr=*LC6JH+G!aAs}nT1W`;M^^p?oA{dlG^h%QGK7LP21Y*41Yh= z!v7IZ=DXT}K!sv5u;viV`!8(Veg12Y;gS8r#0nel-cVVKlK$n!!etNV&cg2EfiLj1 z)r$L}Eeqo~=b$n#qdovRjsD^(Ro+8@C9}n7UJ@Uc0*Kazg&_W`}qq4l#RI>sYl`r-(L?l4-3^+5~QWP94=;GkPKE3|ZcGBMNTY z-mIKO`D*a6d>L+)jT1FwFT^X@ra~;7tpfVniR8X7b;-C+MN|s$3p_Gz3Nb8KNV-Z3 zs7I8zHQ+nHFx^CVpb7o$q5MbD0Wov^DlP4g)zChe$hGU<)$QY}UeZ6htL=F6Z}4Z# zEvXhlYHx%QXj#g9VMab;{SJwyRdcqa`YD0wkkeRWm|5_`+8rB`ikn{`6uLhCrC@7i z42VvI#I(6Go>!+et5vwLe#eGmQ@5BNMNE<_u>0m2VKGd&ElTyAw}Nu7DOz4?dZCG&LZ*HH%GUs#%I(A3A zWFN4j6|!2Z0UV6xhPH9V-Me>>^wcae?Dz}(LF5IVH45Er74I84;T<_%b&WC|@y)3t z@$CC;ltLpA7PWr^ce4OD00rVEDbW=9)F*qQMG7;(t^ zKK}A|C}$18J;j74pMJd?I*Q=KY;f=HG+L)UcwJnHwHy~^zES^ zuw?~KpgPmr%F7QxslA6IKWMCOwCQ5&z@WQw30<>7TN6C^uFW-il!NzHnb*e!JpG@O zPhmXyT4(mj>sQ*3r^{HxRC^M}#yYkvGjOTISOv?=xgLcYPIGqFO)+z>k(G|^Xy@Sq zivjoH=b_nv7n!ZAJXVb9BcGk}>dUs}$jwsl)@$I#>k}o?|NhpKpYHTKWfBpopi(jI0_y6CQqD&Bl|`7b<&Z;hxAZNWnU9C2SY< zd0(*;(6Z$un_&jiV(!!m_TI1XJaP>)>?t%#1bfq|%`X}oM-ddmXOW16Bb}|ZPEjW)j=~0m-mwA)b*iJD z%}eGa-Re!6dHSPFiXaxszYF zxa!Fr$4(ygS7VneeG&R8a&wgh$=u|)UBv!pre6$zgEgNQ#OD?y6A+V}1{)?}|PNEZXvNB1~)LoI`1Lox^zlTweV*;b2lA@6Ivm>fySD zt;J_gx-D}~Ltx=ZrX;~Rt@Js18DxTeiO)x?H7LArV(>Vs>f@|7G3+Uq4y#mhj2JTN z@Sw~SODzSDkVTEYUtg!RqZx-Bh54dlUv;NMA7@IoKQITB`L@sUXrRM&G+SFjo)SaD z12Jsr0>1L9XjA57)6qY2)B;yN`=<$4U%Yu9R$-|S5QIP7>1uVs8xwy2U#QeJXRkuXA~y(H!}K@ejo}W2&A*4c06@8f zNSbSe#zNW{A!5qA_1l$O$f!dG<}(;V(+7OOz&ny_aF*&csYVlt;yJsuQAwI{EAa{? zcz41uBOm-k0ByuwPOrjD~r#gt(}5IP7{}#wd2F3CO5RD6K3$%^2NdYp7Ir# zqF;Aiwt`ZDsI6(MH9m+CGOtS^ZpyO>MUp*&*V}M$T{d6Fm@`!(pN5fZ+0)^lBiB2$ z_>b{e;*#Eem8OTKSoqLQvVRw_iS6ispLY6E)`NeMeRFToy&zD}ego~rJ9%=!Ha7j$ z-kMnxZ5+hO01dc-q($3A=(G?k~pP1h8zMVFVf{m?a zFeky6_gop#RQc5rG33yp{78^CFA*F%FFgetwxpfp?T;+D_3oEd=bTsrcj@*{^1Tx;TgPyN(&X=ZbZu+b)* zljJ0*8lJBrBFQT;_b<4K>88gMQ3c z41o*Bq7xPUyPb03g+eE;TpIm%zW3FRy??6R$n$=?NS5o%Jprm7|OV`IH)&yW74|-WD!n3t08g%_@1Yv8(N;^o2e5pf!wpJg4rh)24#Cx9zVCBe3b_eHOUPrGZvN*n(3&2x+QLZ+x`4 zO^dt~etlr;Y=|c5lKgFErsS9lUfZgKtzafUkk4;)tf>IDfJ?h;)#CX7KepaFuIYCF z9|uH2X>UauBvnEIi6JFQmvo1MTM97|rMsrAuPKCM~tmupt|R-&^nd zobx@O-}#FeJg)b3U9Wh)>iIS=u@j~SW}d~C@?F~%dRKnZ;Mfh2S`SJ+7FEqkxWsWY z*d6sVh^nGrIY55n9Ig?W?DN_?#OvKoM%(|gYeO|kXX2m8G=ceUYa4pv67SskiCR2u zGxT&QG31DEVRZCBB2#sNJhkvssm^b|JENO>W>sU|28%N_+nnsrlfIxO2ru2a9$s+m z{X>}&lG}nn!4%n%yqc||Dc97ei*aZ!jjfOEnlwI1XH~-|rqZ22or{9eB2`M~bIJ`N zkUXw{@^hx8z>d3J;%^j5w5#Mk{I5A?yGvwPc{^2IO_@!AL1Aw9EOa6CZmypim;M2L zbP=~Am(HbI!@a)azblr)FO}rN9<;Ql2emZUD7>#h&#s#^2kn4nvp|p`Bd;J*$j%ok ztKHg*>p8ZYK#vnFXE~roHiA3$kcAUa!Tw^G`&rCVHXDGaJ<{>?Q;P>CODp_-Hr9Zwm~6za(?+amv&}8o!R=Zqv`%rS8UytuFweq{%Z)W^_KWc z+Y>=a_+OaveVkJJc^P90lYX<$s^;-WU?g#8K=YjbwH~L`HsyeKY_tMj+Md6oFzxf~c9;j+q?5b&7v{m2V_?`rB-Zdw%drq15{*NbSn6cqrIN{H& zYj0Pe5;9ib#4NOk%bYkX%7s5pfj_sxvA)zCJfrja&CK<_qEnd2;mAhPU#v+jvI@aZ ze&6}JFW-Bs@Nx=_LHU2i3ARX8dsGSvwyasP)N>TfyyP6bH1b1_MHx=I4WIrWCa73eP??WNDQ07yrXzJHhb2AZ&b($p4p5Q;z2C z%9K}Wy6mG=T4IrD+Vs-p&w|*2G&hr`MHBq)iw*Tt~i9mo6&vA4q(uX?3K51gHLOVqc{!{1^LkpuU$TaRf7{`()i9=D~-Sm@dEbCD{ z!3)*%{{vu}1&5!$2HJB5)itRF9LWoWYl)`?Y@v)vyKPs=S{NCOkg;h%5(21Qr7Ahn z95|%~h_LJnRKoe{{OZmnOjNBjy>3o?W$`Y#YLN}B=n62pCaJdBu37UccY7;z19;`u z)q5k4oH&9QtTY8ZiTbnGhlZiNRg>|petcp}1?Hh~Et|Hgvje`NpU#P=c-0Q=7vPaX zchoS^WDb9N>2_-L|Cn43KVj}W4hfj@^Q0Csc6KSqap882MtdeiV`f9RcC25ppUP=Uhj zY<*(;zPr*7aLkK%;jwizd@u!!&|JYCIJ+^WdzIWnEkrb)^69@oeS>*%Tyx09yV$yP{z{m``$#&;3aNRba=A{hLdk?+V(9aFDTJxNF7V4sAQ%d0sPh8=D0yb@Czs$vfK`i~O<^Ov|D zNla9)ree>cUWsQbLDOoF0ysy#n4Id*w(^ZNwIV&eY;`OO09BaM8k9hcY zX)?54q4h_3NS0wQ3drXf7s;Doi|XAgn zIgJGgNCPlJyw_EJ@YYlg(j(otV@}!?S{As%$p-nqL^+)ymwrA+zDh=#0>u~se(qw) zn;O4`)vk&BG{C`WRE{(wrOeVu5gFf_!eID1gJMu&$nWRc;>}rw;9sg)A|K2fRalmU z9HDJoCmU;luj6YJ39f(fMPxm6ICd=S$U#-qSF32c$S5kEGR(%kAgn}4pTS7xaUFbc z(!#pz#~&yL$v@(g?o7Z)KX_1NQBeA9JzFe+Qw}PB*@nx@&0jy&m0M;;{Vuf8EwpxQ z!IU10w49ltjTlN`hF~*YTRSmD^}>aglePyTsy%RP#9T@Mksan3HyxN|xjtEgo323v zg$#3utF5Ip21p{O;9g|aCa*qQuO~=bdZHFeb%V-BSn9<99_?a6>C>qmzVy{uxGW4) zKh4@ciZer{FJa-H<_lY`z|2Qo?JRz;`Z22BvlcV8?=Rb`&?*lM;>VYCn9r8ZKkG$e zrtl_%!xdn~Kk@Frb?kkeQb;;%!3l-cuvH5pCr|yGo!pK+9Pc|IvkqE+6#nj@_G4lQ zlWQr7p@l~0O@4x5l8TJa&z=QlI2LV2rUz!JnBYWtw5i@4sw)+K4S}f^lZh=>-6XP9J5~oRdY+BVhW5p~NmBoz32uu3X}7>g|!*u+amD zY@3klrb?f{)gkH)gOh_DPqa#WRYeR131yS_&_4j70e^(pq;*5kX1Io46c$v3MgV8o z1YRi9txtC3@SHxR%m8(y%XC}j5Lzf>BxpOJ z-Uh2Gg7WnZp1kL3D}oAy#9G7wr8F?hlx|!Dp>__QvAvXpzgYr|K90h1rdh%$pz$4O z{Xjcr7!a>3*boc_d(pnt=50&FDgW)!8co#kQs8WbZ)phBq)aa_4Weu9^ow;W?f+Qj z_bEytwy=fAf>VY5Q~+1VE!vow-$WweFaX;HK=s;<<$DFh?oi#kH|%jw)6j7ER(Lwq zq|=Mcg?qTa3`(<&D1quLFl@rGH#>*&WyPvA%$Z`mE3nN^TCA5MFPRx-`O9I|>XVzN zOe{N)#ZQer8?zpR;p;zJT^nE@v0I+U_0xTU1H~)Saji2R;D%{1Ap_fNf_jG3Ejf%# zhit=bS}NOA>yuX?9kZs`+H3=2v9IS=8#Hvg?5qwTpc;MG&)0?x5<4yilm3MidCA!@YJALUz{O@x%UMImjs9a`e1t3e&O zhHGHk$V&6|De?8`rrBu^`bty98g|hVIoHto496_R;0o|>5c_Xw<*s55KW(2qb2}am zQJfpRm#LKth^*$9Ka>aXD}6Z{=u!R?I)HiH?u;BQabI`zw|7#!gBuU`25qyGkqhu9 zjCv5vv$e}&76zUv9$Ji+o3{BkncICrKdiL&Y;X$|X-M+GILKE9A|S#jYWsH}gpD-D zxS_%vOe2PQA%a>lA7y@;_wvS6NE*-yl=C(f_&Cxq$alC!Amud*!*%;bz56EMM zo&fK_xN}Z+MpA{9a1U+tz)=?XhlWVe; z@h}Pf75H(=t#Tq;qBT44s%Xi1R2{_UK5?*J%FckGifxncu-3EeE{Jo=FxY<9f|m%^ z1y48CTVDeT&cT!INX6i+NtglAB^@F>xI~0xx$^dw)%Bu3)K{YG20z?WvK)L2t|R!^pbH(^z(qT4=-s+ucpK>Xi&>k7xcX_2<0(jOIhMT= zD{QEmr6{wg_+J8~1av}U364Pn#S2eSD+p?BO&|s_Ncznm(z$%;9lmy8c^)Rm3otFM z1ZM-nvnnQjm_B?Ho9-4fjM)J=nN-8^Cy28!@buH9?bcgf`Y3Rl1((g*>7~fYli7yk z8gt^3ohnjuZi@>tQjE{5Sjbi^&3e544k;b%0rp%2YWxD*1tFT|bW&0$dLue*aF3Bu zw-!Q0NUUq%uOFo?<}KZXvNJDKeMee%uNv6XqybU}f^~aYmec|9NBI!r=R9(AMqkSM&%)$36ki{VVtq0B{r`u9T>0(gy_Oi{yyb+LxS)8 zo~&`v$ml;EqW?ab6{^YI_uIRrD|IXE|1X*~QwTq8%C{MOe!TO(^9j|~>TwLSTuB@n@sxGbN z8a&FwXJL#NYUg)G{Zo-0bMil5XtFr9Jdo~Qr}Lbyk4&HH$U=CIaw!1i`>782jtOQv zU@8ZOp04&O+a6A9aq+E{@~>4S-uj4}_eZK`wM^i_ZJ>s67iYl&2ynoPlCx ziUneSa@1|I8opEPUq<_X+RLjll!zYY9YG!@qakF;*BFdG)D`1?Lw6Z6Y+K`Q8}oWb zbMoqynZaFV09@gGzOM#71orQKX)^t<+S!(fU1Nf;$BQ<2t^4!@2B{nN3E>tBUzy+G zf)qhop{fv%DNoE~pmmXaC3@Gve7g=>j%D31z<)=8Q57rol_Qn-SzCf@1{7i=QtO#i z&=!Imu1ARn&mlcV2l0(`zwwi+1Oe$*4?$w#x-AOm5OnL(Q$QfD!b`T794idLb(n5! zjWtx0IE<#-U2tuQo2qGBD~4|G4)Cu$L_A2!@p-!#+kOM8LxvBe7d$&5&Rt z)RZoIGBB;1IM~#^J?+ooR%wc%PBm&ns{sy{W6*zl5NMhJH-Hg=9H{aR@JC~5LgexK zOfdjG6^z?(W{5I}HmKNI>sA?lN*61;K6L`I0ov%Awu6KT7*>c)eb&>Pkoi6QqTP8; zX@_+i+AS#Sr~sc$4CsNhB1Z!N*oqG9#;Ss++typrww7Ykqy8puT&KIq=*p)d;j_AzW)+_aoQb$d`8;ueawnroaVdO5ZcVq(hX zYTej?9)J8)Y=x!E7o`%wfLEE32@<>z^1bxWi{jXKLXNifcd$-+ex4h)CoJFF4}5`1 zu#%9thj}5mN&FM)Cbs|bt-q0Ay!+suf6x#poX7w~wDgYC{}K}RcS?lk7IK?iz~$q= z2{0Q`ot|&CQ8!u{r%JW%BrMGj@rCm_@Fj5b@YHc@ID>hVA{ZXAMc7J2`w{NG?W2+6 z7P>@5*OxyRJM*$57xwt&jfk;v0rf>*ax&_K0D(ilT^*m~Q-VBXp}7nve~$=^BLrw&Zm^T&|YC*Q37@Xc=tBCvWZU z%!r(OVno%{#Y4?!B%al?kbPGpdH8ioq&l_#+V&5TQ5cG6j>CSlPhoDAivpLzLu*YI z`eiC@Lq%rv?HLs;Dl&}6j_Nwue}8$b*4cg@gJ&s?I|CSXK_j)X;|zDmX-HK%fiI;O zNQssKaNd!O{8y1%s{)pd!}AQ$*T_y};+w~3v>XF}9^76v*vi6aFP>FA-YuVx1>@{h4H1H%wUeu7{^vu zCNJokYwl~bp%pIYH{X+6gq-0-?U)T*5y6xX%@+9y4)5F7P zHx|CF?tck;@g4G`!?=Cz$m`F_AfPz>`$BC*3bo}1=$ zF1Ee*MJ8r9PaUu>ZE+ZXVGk-2QX8&>cTO+0-@_1_jSIS^c!(^Ap0NPx{^ZAU4>UBR zlot6V(pxi41v!0JZ%}*7jJ`nBbALpIWSw`brTvceI(FQm!k_$IvC+1v65-bgO}Im= zymWx@kO{_*z5b3)M)|X>%9Z{t-|u-g2x4sxQ5)_)%fyhPE3@=U=JA`8loDh(EtRHl z{I7Q`5=?J91tyveW;X+qW`QRZ^eYxg6ygnK^U_u!xtbRPLZFB;rroe_`+H$mK)WhDMFifyDMrkPEXw$pkK%5Do&X zvB56K6xFD34lpnmn)l@Yv6va$b`J}*^=~P%DHzW9<}=pi$0A&U){1WRIwjarPJ}>9 z&`*Md2kSf{nyvUzPHfsaz@VeJ&rfx}E77Xz6KPB!@(%hBxvzpJkH3k9-0h+ZB=0EC zi8FtVx*OlfSEh%7WP)9+Yh=JYkg>q5UYSGl$iPA!)eEF@V&7%x5tmhR8K@RB!gxO> z99C{L^Z4@3DtW*omHZeMq1r52*{OwsXAW0k2E9{BU-(RPUs}7lmAQLaw#uK_Yh~t2 z{s1pIlnJJ;otoc{RO06$>F|y|PFgSi`~sp4O4jcqU_Lqg%Y9G<~b@}mv@93cL7L03U7WoUYR`EuB8)soEGkl zNZ0<79RM0Rm{h)@RN4F2M2~`byr+shI0L~H0D`TQ40OeX?^o3zRZ*MO8$NX{y(Et)5WXM4f$>w&8=_-jYL<6NklT;R`F}Vi~XG z5B)M%OClbI*~H&0jD^;4E<^?*#bNvheiprksSz9zqIoNz9Pi|UM@odp9AaRx@_Q=W z?4-Q6Uav4gV7T)QH}Q^L%!It2%jiA<=brALa#s7e$fX;UQ8-C-${hF1fjGYjynfwj zuAgr!gYv=mobyws@P~WUYGc(lUx_4yD|+|u3Ps&VNI&@+{mRE(PiqEK7spGljPbjl zhMI4vx(|@8UNJ>G7mczV>WdxL#|5U?Lnlvg%~ocapyYoJd6rk{JV&P=a7ON5ZCs;Y zX{hAhXB z=Wid?ziKwO!4RAMK<1f52l?`k;M&ZQ>45fZT>IoUcU)KvRkMQPaG>*Ga9VgaG*=h3 zA%n5Y0>yi4&TwxuXQ*-TgP|-&iwbp|-h~;SHC0q&rc6$mg0`kJ*I-gA+=_tZOyLU2 zGX8Lu*&gX~6y>c*@A8XpfdXVZD#?dYX}TkSXx^bK7*k7Mo&zC3HiEO(F%mA6U~Vs( za?f9TO9x*ri;+14#OBkV}&J(c&KU6d~7_1v?a|J?b3YS`|6Fg*9c*ZQ9lQFv~{*}Jeo z{i4B*7l3QxuPvBQ7uJ$TO)~`qwhkQ*=4;im=T{^LrWGAw?OhNTGD_JGgZq6S_Dj_r z4ZbRvSXxQ{jwN^_4Mrskc5eGkcH02uaXL0|yd)X;=}m(0MKT%sjKz_|MWgvc_)WQU#w$ZqB* zlO-{|(d6c^H|A61&Vlzu&<^BrS}UTyi*qO-5uVd}6KR2kHTS8rDH;DQtOY44PCI; z$qF8Y9gFhqu8-q#eJxhZ?s`Bs<~t)F@~g84J}MZFyMMmu!k zzQ%ymtrf1c!TTlSq)y?<^<|jJ6>7O6WqPU{L!^1??v2EU+X_nfz(9fZD(}6Ip zXL;Kq*@|R%3;WFVIG4JYZb^ap0U^%abt6yN#m&bc0=1vArzw$7T0mmu^hn)?0?Y#r zWCe03G_>_Ab&?FD)8bbrGDy1v3sk{wrHJU0HbO%Rg^BBCnA1LUq)NF4nCsMQyV{l$ zArapA(89;*^5<99hgfZw3n2FU_T7>FwWA8R1#^OHge~F1GKIQXw}7|bTD?43{uZ+$ z)kmUCEhV(u!B&+UvW7nF3ARNmU}UxcX*5@R9~WSOC+gq9_r-44W59ZAFYBUj$RF-U7DJy5eczhfeZQ^rlevGqwzU&+^&%Ca@FL% z`JI00HR~BGK;peO_gF>{LHP$h-J&JEujV)AG8AwU9G*?Ng z%2n1+0A9$d6U{ER;3 zo6f^{8E{=3#D&9M#37!fns1~$8~`$`WRD+qMr(eTy^VAllQvY#iJyN^gnfs(|BDwK z)mr6r?}(}{zJO`s2<#pOPq2rG=RgXQLgv~<4r5ME1s`vw80x;clB#4e9?mDQp`?$W z$T>?<(jR~NEa#q7vnZf~=<$B-_47@%GOD%kg-vqm_g$;z(db&vlt-D$7<0vKmkVvS zkpTji<4HzUmfYa1fThfxq;g)!a6ttmruewrnTX5C0*rAAMY!uns!q?%yS%aeVt$ht zNk4T%$|nD58}s(t@L?3GbxX?H!@pG=LdCwJu8DcxFz}h9WtF%s0^Dk5P#5!EIy=8s zbXW~3+uwnO6T^iqGdiYG-a?lPC0LLy8(OHO$YD-ftw6%fRGe)zV)46MVx!2F7D8rL~dg#qK+<5QAZ9z$wskumsX?W+Ox`SKX%8O zYA3m7?Picn#RJ9jC_0W?p{FZct928)F;`NLW~v65$fz_&?;mQDDO2N(h;-e|j^h`z zFU&CV#wl8xuVF$qWe%YlY??pwbh1=4UDjOWGTDyOR|-Yki&fYI$?`)l7)E+6H;%QhR4*vLb0)cT(5{`}jW!(|bEm`oCk2B<$92%Fpg!@O}b@$lTZtLVNk<2K6&D8Hxd*UQ1|G>Xus;PVB4J2e-a}; zb{Dp?)gY>tm1ghw<)EHpnaVx=>GZ1KkKYSx01H2EI{$9FO#Q}N0RZg3YvG>A;q2;D02a6!30n_DM~D=L0TBH^F)oWkKw0W*&) z2bFs3;%XO~*&!B@snmnh{?WWu)^XP96W$%dgIxmVoY9(kip09i5d0?TV~y!30A_VZ z1kc!qgn?vD5E}h8+A88zG!w{3;t@o<_FY=Zf%7_QLvoqMew#Nw5b5&f{#vc5=9=<@ zq6JxfO245)%%n@qdOS(hT0nn?R9P2bSV0#^e`zhmKSRa?AiAA*v`)EJ0x;RmJYhji z7B3Z->^~&)sJCkG*|9Pu2X8Q*=knuLh88`n%PxZ0iU5`OVQ8bxv?gNX7!FfCFkgF9@Uf{wf=17TYw7}I@ilw+;79KoY2-(Ht1 zf_%sc@Gomv<{Mtbg@ebkU98grEP7?OXWP^2K25@_y5Hxj{LnLuuW4q-eCAyB+r!v@ zXsoi)><}K`U>@j-w7vd48*9g+t`^XA%&?2>Xc-pp$CE9uL zzIu;27a!qU2)E9U_q{m3tf9O(DS<<^u=mTs=0L_X{6K}|SoG9clm~G3ta%f;SNaPs znDqRXF=o>F-Yiticte~wA!_0B&+aCl@wlV$G!qx^!-B|W3(H6Sltbgcfu9$7GXg4tvU(kD!dyQ1D{` zKWIY~ymbrexk>b~;8LlCT7_7k(2W%m+Pl*#_k*DVj`mkf;y}395Epq!JMDx89^N+r z;&76kON*mCu?|$%84bT-oyG3=N-i#?xl665(tKvI_agb}uh%jiAHUR>u2k8xdSBjT zB#8uVNUkK{!`ecgKl^|shwI2WoTK?`&bK>cbM`;=-D38fzd61f2d_$JOs1y#`BuxU z>5lb-rx!IRC65?n7rDdB<^5VGV`Xn8G;*D2KVYYGlfBC(%ZBAWgn#nQ6EkE*5>3Kj3)$K=36W+ zERL&gCbMwgIk99OSaS>ILrsHIY$&nJ`Js(#{E7Ei{;Kdvj>K2^C{1UNj}?@$Bb}<) z5&q6KOxf{NnGuViqPV@+o9~^(gUBV@LD^6plP|yT{W9r%OUmpKuUHZj+pDMW-xszT zLt-oZLN9$+-QbvgW#ysxkS8N$tJ(e@}@{8PuX7CFTb;(Y$Wx1;qaNqATo+|^qt<;QWCwV|5vIJc}zmL21YJqyQqn0yu?RBeS0s$Ii&S zVMV)r57F!ApTTa9XKAV8PTeee6&R-EQw^fz^6(y?=W1+8tefnfrkiJU6x-Z?XPb=<{!aF5*={46r4$+e?y8O2B zKxyPxuhq^{tGcH17`u0X%^ifBiV`n_)aLj}>b2u6{)r=(>tZV@?=zRpzT*_824U_` ze*Yg$xB+)qY6@c419C2fVmsDxW^QK-d2_yZ^O0>i?LQZBpxk+$6y_p-{LNWuXEBr6 zWh96tejWNNZnB+dSz>-JTJutWV$9UF8JmLaX}_VX32vKg?0(6UAVx+wt_Lw$#i+Z* zY`GzqO;Fyw6&y95-?h3POE_mva)mZc9)a$>zr2rEZOPQB*S+Iebf2~h3NNnn@{?Nn*LU{ zHCM09ifPf>t%x1pFsUsmD@n$~jqk3gUek#0(yys-LGK zBw10iap)#9_x#itlHK(I4*Oqyr|Cjw%0;$y488Qy2{Qf{E!_}}uz7d>0xu&SH$Vel ztL9dVs=l>ag$F$iQ!I6x==Piqe%JS2^6#P*3EA0cwu?IZi?}h}b>-31##r9S$)WSu z$+mNMeG0yOWQa;Z;W4+G@CN4wko^~uBqN=r{VM%@5__G&sAC~94F|Sh!HNpIq;B+zQdqtxcNe1zd)?(_9n_=j z^VG>x>}IZ&4wSh>-nn>^mVG%oc1@a0zjTq8J7?;cM%|^JNI}6n-%<9+qPlq;E>g1_ z5ilN%xoB|SFHwNmT<;H2vzg%8LQbAs(~Bk9wOknR!ZKF-HSqvh=r5VocWg;Il;v|U z&b0g5+2=Ee?UQ?+gSOLRG$3lGaphQ|gPn@!rpa`{9gYSIns9c8&U!Oj)G*^}vqr~` zhnxkof74W($@a{Ky4LW)8RFhStgIe+*)6E+SG5AQ>76b}ykaCe=u12%NFd6?obfeD zRox%M7p0_d>@mW}4|>{}sz*6M?}4f2(Kn}Bv_TyEI>w3;on;bVP1=^<&{+W9wFKXe zAzo=HLxuFe{Nv{elyWPE=jyg%PxycFweod4B^DdK4KZW;Cl9(BL9%unNEefR7fJ{`?6n5ZMYRf178aK(dOCc)DFTD0dXc$8bH^}IxG!;9415&e3xQ z(%k-BE38_$Ztg{u_POa2F}~#s<$ik;ARuF-)Wpr8Y6SB5Vkhfyn&89ofDg~IMxQ&l zdeYHPtX@)7c<~)Z)-G`!)g@0D;J6WqtdEmd;K z7(2M;zOt&NCs4AY#lq*;f^0{X_$r>7{i*Hd{zWymAh{cR!t<;2 zPLA>0_W#^OILR7}=EXBF-OKXpO+a`$VA4Qy7SixUCw<+Mno5&9YLZ@2)s^0hkDphF z(IPqP6-kAfRScF_kb0T$JXi=e`r{TsN|>$!VuNZ$$d!C_M3wi%+se3dco%f!(j0lb z&O~xqGc%;PHs4d9-tW~Qj;XxMHG-R}?1SiPm?~l`uEaktbGz5*Gy-xtS%Qo=pGGXo zesiOrPH#VZJDoRBmeF{^JFx&g2}lEq=G3GNsezJGUt3NWMteA$Ru|7$i0S_1G8kjjH;HW3)47! z7A*A!-*T`ejVOD~5qPxMQD)7&WSzrn0vz6VV|nyxseZO5F>zGLCb%|gyT40~Q>xb{ z`}OkCW{xRwmHBXSC08w4W5u88qff%_a7Jp)$JjP}A1Nh$a*s`$A=12%hk>7bF=nsw z3HdLfo{WlXIh-VRT>oeQH5Ffw;`%A`i3|hxzBaWS&eP`BoK3r>D^5}aW|>v!zxW9l z+)_d^Jyr_)*@URSO!j$v$H^m6v}jJ$r?dFgRkJZusOQFuzj_#_6i5W2ou+p<)}(PT zD`9MWu%WAke=~TFLThgCuBz@^bbqXVhmr{zboR8yXCw!(PLN zCrt`urxjnOEs=n;zQq*|L~vn|VzOSHzabP*2r2H-AQ6mx(f?pf;hncSbnkWf{ZStX zE^wvvs?X>}Cr8O&XK}@nxS5;ct3#uM*9GNn=Z;Ee`&hM}xTIT)Oo+CDepjHCxGwTt zkNTN8#325+hbXooKk_9N;@;-UJbz*-FyhJMOn$1U5O2@ST%%q1VT~P?!#gGGHa}VSmvx}evdmqqR-w$q2Z5+smm^=y~~OV`Rcx5a;&gVB2s%fzeR+eOydI?J_yPdzNDt zgOC$B`bSH*J(ND}o8|N+cv#CQW2(n#%cw@ET4%TXr*k1!-y|P}3B+>%>O_-~1&C0; z?7zfbExRI=sfmf;Nnj(xzvgRT9jN__$Q&5o*wdw#zD@<3p5-R-87pVeBhME|cqLU~ zlc?iedyic6Z5Ynfc}6e2P6F}%pV-BLfhtHWY9v&5anP8P`+8XBs|(F-J25jmp+t|W zZ>3-uQpcZqUv3nT0 zo}FfQu|ULQb?QR%F$1qqLC+7L!ONspqh>7SO^}AuF~(H?9gjDoud(W0Vs|%WAOaF9 zBxiklgEY5m#mPTMldS(Wixa6#O&NPH8|Tg=9i+l-LA7@|#u;1M*tV2Ud3KqSLulg2 z4X<_Qh|1;iYK+p;-Ffk?JzhB)yKgTW3S3a|Mg_Csjh`zoufD5FO1!ATtw}Ye_;@_@ zQAs?4`5$aLq84^&hI{3rITDnJP@BHkK~JSQqgv*l0Ly)Ns+~qj1~lS+Ar4!J z!$GP{ZRrq;mY0D>JhP`tVIkxFfq0Gbmmcg%+dhbLJl7im`T5)-vpT1-s-Mb+J>-*f zX>^QAH)kVDjZ~WFkRlO8fr4 zWYc2YZWP^en{4e5O-(%tO4{teJsX#;Da*p_bH`F`RXh7BdpiQ4}5%U%A+DP2l-v;Q*cux4E%51}pIIfG($8CENJ`V++o&6AjhD zwM!VfMulplBj3e$p!Vk%-@h-+Z)fGd)A{Mb#1Ze&)gz7s?xe}K18x2CLA5hoSM;!_ zGmS|01u{SdhRA1$h=qLMy-5br_BDPim~0L3g5n{Ra$1l&TDHyr^5})))5Yj3y`>jE zzooqIKB;70X9sYca5f~3ufb5aFU8&-kyVXjQ_TO|WAwJGlgs%$Oe#Ycd%7@Lqhd2# zC$zDwMtA)d)+0vva->oLPZ(^W=ZH<%6Kp0b1`Dwipt;WT6i@d?Q4LFsXq2}+Y$2oR zINhrqk5qEHkxR6fBKDo2F8E%b?R19&4vOI(d2mt5M6|@>>!Am$ym!`SZoYiFRh_*0 zro|(+vi#Mrs`z)84-7sr@X8W3=Yt?7Kg!R$x*ZNOCLFhV1+>n2y7OHKQUM1emMbS$ z^iI_>WU!OmZR4=DqtnrI!w(IfvFp6z$FCLpdyH?dfD#G|7rZR*!+48B;iwu>69Krj zwaTu72e#PzGwS8=7um|MF}fXRQeC$UuMN=fxx( zbg-vZ$+WBs!II9+@N;!F$&GA1X`t6el|>8P8X)nvp`eb^~IcUZk6jOd%&NFOCoIV~w-+_{%qVyJhIhz4=FM(*~{EG`&8(Njbgb{b4 zGGZl*-D&$}_dKf-S$)?`hnkO{oap{d{;SIR#YkKt6Dgi|Af+eG1&EUpABLUXNgr9& zHW-iXBDMTG!xffX#BJ7G*gmOOf2?s}hI$65Wau>DQEw!*d^V@k&mCuiXy3< znyH=5c0@(onnI;Y6ez9zZczjpw?gCIo_Lsz`9~)^lbF{!W=whp>%9ce2hv}oCwbBy=^#~W%_N`e0@VVJc0m?rZ)zN8f zm_7|!1G!Fk08gC2;!=(0oaXHD*?kx2?r>P~*JI8)27g-S!qtJ2M+B*_O$RCV zrn2iYsKl`1qeSX%hhE&Z*`_vG)N*3&?l>&QZw@tOjl!Xf3IzqX&ZbA7|2za%3;b$& z51izv!*;=EJdA=c@w9N}qxH#+JB`&jlmBq zROB9u;?_AQeUpbO3%%s-8YtIcUnX?XY(|Zg(F@f+#rbpRsahmT&eLDX;0v~oVBC9o z#`Tf$#di!F7@FF*ptb;73I~4nRE7^yTP3gT{RXZt{!T8314>cpVZ;9Zcn4`$Cd4R{ zd^MH?4wdU;Os)Db-Wsblcg<_t5=biF>oD9U2jD>2CD4Y)W6}b87E{I{k z3S{T_8)F_a5!;apCKx!E~GH7deiF*SGDj=x`hf=)JRG;oRR<$z?-Qx+b zqgTltqXH5XOu+d1w(M*WQr2KPkEnp#wYTYQZy$r+D#8Z~pAyj1f$O;dXxG0q3`lgG+cny>aAG7Wi^Wmz%*Qi3#5&vQa0$%&jSpT|8!* zw)-;H+JThrg30birq{j+%@nxnTV1aJTs1QJh#8H48tE+&JB7yC#gm= zIvS`9{x-y=54bETd@u|=-l@Fofdl(h9bWTM{G=8TdO?M z%$4#=iEZyIUnX} z?FPN-f7yR*la;EJ!b+&V>4T$f;Acqjd&(`wAzu$Q7RYkiKX?!()^cQ=V1EW{Rds9w zWa4)}XCE`^eXgV6RA2KAne^8`HHA{JY|Ioo5o7jU48?rpO06n0{~u3p9n|LEH4h^x zR-96xSa5fDFD`{5El`S;Leb(Hw75f&;_mM5?(R_B-67$<`hD)-`&VW%nMo$sNA~R5 z-P7)&0(Mme2|63t!g9|3bX1^;UMC9V`>T-qOB{JqL@edL_B~Givdb3~JGco}TV7x) zXt){uba52RafQG!d8PFF@v=6a-isF(VB$2IC`_oE+ zEdr{!D){A|9Q$4_f*@8Nw0qD%%YNXZ)*;GB<^z*jl2>pGt-0H+u|Y(}uIMG!QdhvQ zNS@Sje#1GEd{OLwIMxQM^LNlOazo-Z+)*MB0qs|B3}esBUc3De`CrXR&x_!}nn5ax z=b*PIRonn+(0#&w zMBB$UYw~gpThTevYODnAM?9aUH7n_cQF5fw{UeR~BZGc`eyWToMgKn$2{%I9`V1_7 z+kHKD8~?6fPCO4tklaVF zO?xmY#{@I`O10tU-;?#RXp4kaPWwnk@?*C6Q;5rYwk7I5T*`h8sJ|hWQ%Le>F!K_{ zs}8f$V)hWSejPJ2(cRxWJxE!>#Qll!lWrax8N&64sB6=pe*om)VkuAo-7}xds)|r! zLJ|Gu42$c*HBhdZ@_qn&E>cd^TG8Z=DsJ9+i@hDocWJ+Wh)>&XVBq-D;|cNMz8)!F zoqgli=ao}(t(Y_2uRUECGu0Mv-CSSy16v(gtn{i#Kt&I1hjo>T6LVqF*WYK2@{tQ+ zNemnL5$z=jOXSL_v2+c)Y0lY@BNk}AfhQJnf3VMK3K^M*c#?BqBD(i|^ODErlj~-& zUm2Og4#GufiL^h^09a3`r12OP!G38N1vU$)Xh&3Kx4DK&MlV0xF|UL#S2DxAzs^Nt z0TwP%UIfxNE*Bk9UJ&g1x`I z%^T-x$a5s=)?l1G7#WH%T@q@(D`n8^XdI;v;8P?!&QByDi$mkP3)n8{1(GfFa8Ivx zwcQTP8TQrGmYgcw;MZ(o(m%8Thwvc!QeRgGqIW3fiBW&0hso>Dj8jlv_*#McY?YKw zB~V0FzyRJ28 z`IW89b?QoXO}fwG@R<^`BxruE)g5n&hsCO=oEJe}Z9x>cguvoehW5vz@9`Q~YteD7 z$XkBbmL+estk;6bo==ODD=RCB5oZ0pZ{Z(=Uv!tW)Qg z8*+|&{uvr8rxKF*P2gIoKXhKBSO5*B<)MoluZhFK@=41AqZLnEWvRhU#@DJ!`l_yU zutR5s(E+O$fNf!-m=e08tz*3?@;c&g3O1$Ur)LH52>j!f;L__YbnFqgJa5A^svvTH6(*h7`{*EB=)n)% z`SLl6vhstmedD+#(AyqAT0oxc0>-{bozh;Z&_*v7uvU=J&%CeBuxkJ|Zn^m5|> zSSv3WuU{dU%dbJ@%5t)wBGPU*sYDM16zT4$enJN=knsG#ND)dhS(Cphi=052iP}`r z15i=F3hn1Gh~0C3xNE7A=}s6v@hR&mXkCN^oG?k#RC9RN;AE6%z5L3Ftb+8se_Hu; z_TN!M{}y3zxA5^(C;xyv^&vsAm{$3}Qnu?9XTQkqRGOI`a_tEZ1&xuP$bHWVTWYL@ zeI!6=VZAAM@RBaPVkVOk-}=Une_PaWK2!PgF$YH?mrhAo$Iao4lFQ(e*1~&Ao#g_F z)|H09e)ro$-k_I#(r`CZ}E_VY_X=JrSr9Wk5Ky#Roy&Dp~-Ba^GwA8j3~l4 z9j#f~w8=kr9S$Jp6)txX$!^I5^X+d{+s_SNST)9>KNWnjwRGLaqm$5x`aCRursVj$ znTfiTE4BZvi&Jhnzk*;5C^3XHS6gCdKqLeIcHaYHJX0>hqXOVJ{Zk}JU{BcVjoHXE zPq=v|&jIfx?TOmlN;k)L#-z8U>^Iq`}ix!ply%rsQ5Hyu&+*|uoTPSkF%6oC>8QHwU& z>0nj~sj5=jhUiF3Ws~u}@^by)c$-(Q-etBd?s1|x%+KH<0knyCH6>pX5yz|~ayE=0 ziQj!ywl0N5P6yTu2*7{6_B)>>dQj4pDgzpKR=>YK2yN`VoM6&uG zPHI@@+c}nMyvqvW-gof*zE2tNP}e$qU=mNDjV_18@CWoS0<=PS@L!tu{UVM;CC=8* z1lyOkdh)AJ4-V3Wx%XHmfq#ZM;dx_cAXW1EsG|+z!t2D+JiMR92u1ppPlcHfl%Cqt zul$JF00)1ig#4E{Bgp0K{_IUlq@`#KTI;Rw=4@HzKe9HC3ZZdjxV8~^4PpA2+`V7Kpzg<V*!1)WEoTfL1fYmp z*Tr$4BOCrXm-d6a+ajYv-IdnONEH)l(c#Ja*@>r#fbHObGthgs(-NcZU|5m%?~Oc^ z^Ug@}rlZ3{P8O{>r$?Gl#Lmbk_q$V5PSNNYm08*ZJC`9|&S4xcTNB(0UtAOgy!^Cx z7NXbZR6CaSwePgP7%end1gvbK$fF?sLXXWO)y0#OFpliKwJfanc&(dza0?MkQU>`K zo(VtIySytFZoNA^2I0Qx59Z*$J*FalW5}@oXSqqm#*CnRgwoUp&I8AjPjG~v=BoCE zG1TW1-T#~bKPA1r3<&0^we|&GcB;EnkE-7Cff(`OR|sIwD0jZ)#WNR2c9R5ZvSL}D zhz>K;n7F)ck&_R=hXnC_EL!ucJ5%mlc}>yGo7bXU#kVgIwJ4{GaRL@IEZ<0d)OHD> z)SAm5lYzTy$MhJRW}669FSm*|8;67k^%j%DuhGFgW%OLcIIK7Y5C++mzI6Qr4rPGl z&bbOKd`#w&503ET)TH`t2AnyQ(r{zh$&#E9nbzMyTo_>vEy?V~eU(Pd+)!!AvJ&2z;>fUvg*QaJwocUDk66Ve)vfh zQ|h$v#oVjUM|-}4+RPrZS}lLviurNlD|l9}BB5r;?qOz|!TEi`+F*uH1?ZrS{EdXJ zXHCV0ytXRTU?>Y$?a$+x;TiTg8`U4h%rcFWA5xcx_v=+! zz!=s0>1;=t6xhGu&bBIA9GZYe<&xp{w2uR6jR$ac(32rFIN!9F+<5A@XbIJSi-^V} zKWUH3pKi(@F#c3p@8W>dp&%!gWbn+UIx+Uj`B^;A0McBdx1d7*PVjaB)TO}-HB4R1 z5PBG?euX@sM!a0-g6MPLP0@|_=+m}Yp_Ya+;hcFFbVNQsWG@Y9pZ;01(S0Pr8G99B zKL%Y{9TH!pINPyLbV(`M+8cGq+)o{e#M~-UoG&@1^3CqDx-S7$^A3y%5-%#!G`y1z zbte)nFqm9so=3W84Ll;dq66gB2R}&-@}Rq(XNte`)67{kcFcuB_aZtj!*#cP_KbD? z4VQWFT_QK@55r0mrrhmeYf;8Z-`a~QfZpLC7LOb5t z3hsL~IZ`30PhG15%rf1iR{cwkt|Q~(kVcIv&)cQ1!u#XX5hXm+M?zNg5-VLrTJ&4U zw^mKvn4ZJGuD9n+52mO(p4JkMQE!xfBr&QkYbtnb9XfK}^?s@{`fFsqR;Q6?;2eET z{!=X%eZ21j802EGzsKzGD3UF%{E6p32rK;#{$Ap--BRe^F@=$7coI~|_m5KF@qs%v zAlsSTdUhK`rM9@(T1r0<7F-0ztxd&8z-ckRvpr-)IiMY?BCAwuDE$!s{+Gpumx)r3JR z&3E}#9^JbcKK*9x6j$w@@z>_eqJJx0s2HtBTOuh=mX3pJ+nkeh2O&QNo?-+2V}9^E z$wj$YH*e^44oMv#1StTUM#=2Aqo|hf6cZawF`Nn5-$a@QJB0l$Zb8f}qKIV!<;SW= zZ`8xBBK5&TDSa?!Ytu%z32j~^7~vW@t|@fl@3~(c#_-+Au*hnSXa4i?uKRX4fw75U z1Tr-ZP*V{iBtq`ba8hDFc8lQslCKloluoj8t^P+1?t{f4iOP{bX&EXxZzGe^pDxtq13-7IWz9W{Vs z_a&{KAO3ww(Z^$NUU@Z1-QhVa%m`EwWP;{{s#5ZKQ3RSZkQjF2||ii{*m`Z1}(aChmhxCNqd`1(U5bR^}eEQM=l zU*$mcBXO@|P)5083o-}kte3M^`~G?m)cM%g2G*Il zn%i<|1b07DEXZHf$TU0mM?JD`wGVc{Y%2EwiZsnNmTu1j zyflT?@FV+MPzhx~<#)+9huNuRSpNA(ZS0kZKtpzi#SYr7WwT#Tt3zR(*@ELO)^WFa zw+?}H-EB@>d6$c~d>n^h2WYYCy+MAO;@pG|k>)qp6$u!a*#hyWaWY=qa)u01A) zu<0=qSx^uj$MJW=cfk2nU;+XjBK#ZvMFkl0dzy3?;y)A{RBBeSv`QY>;%bujW6o`O zEe`tjqdXEzFB|@Y|0eIbY~r(GcJXI)vfBa zC>yaG%I4_W6xWbCd`c+$5%@L}9a=K{sM6?^PBjGzarQ`}kKiQPhc2D9V-)DO$K}1Zn zpPO?*J~oJvL=5%y5a^}f?Ec`CuEI3wwzTb4!kQ(f{@JnSeP za_YmLduwMVY97HD9AMY7jp@gK4K3Z{{>xATp%0ynRlEJgMnYPS<@S2V&)W-@JrAa* zICr;;`c1e`w=YJYWPpz9J0E@N^X(T`>C^?E^61l&JtN`BjU()u(W>d-De>Vt}+RF;XG~-WhC6$ zUp>Lke-tU=GP_4>TjKxEj?qC731}m1TQBMsj)8;eGsgySdgS5jv zuXX-8<Icpt#BTV;&!aRe$mPLx@26i9In6>Um(^z`{&fS1)fy*H8SitRu_? zVtrb<_<73E59$NcLobQ<%(U3CiX(-x=H|ykq2H(L&72#bV$3-y{Tt$B@$j`j8k_|k zx#w*n{rM)|@esta zQ|{tRK7wfU>EyJOt>bBsS|#5&<~Nt_J2X5c*yG8`3SxosWYa!jYa7bss50Wd(fV*X zs9X3LsF`~DyhD8P$gKVar334{g9~YrV?4A^&cAHl=T)xrQv_Mbd=*_yGkDYUxrKu3C_eg9BDsE!j13;`3Ub*(HC2wX_FcTTl~${QWt3C(X0rN$LkcDPTaVXU zKg7sMaxEY8u2gYJ-cC_v?8$~{T&}WvvcwyAR!VI~3Pr6y&ei+EZ^|m%qgx^(i(vYz zkFx~|RgLxcN1!duk$3Xi5--(@CbL^scyDN+{snTjUj+t?;B+_+{-~}ln;mi2^-wHP z?;wYl_}B%6IK3}4f!qOPj(o5nc$efrOWg<#^#E1&ALY;TPK%=Jtz7^*7$XyE0)GM< zE!C0U#D0v>jL<>K$@*XskWa8z7O_WDjK>7_x$l{cixBeonv$_o{x)9T6U=BxEEsNH zRi{kO0@|QEK*px~kaUItyW89s@wBXYufDMMm4=42kBm!#j>Q^<8mOy2RLzpOw$rfa zy&{6OfTZS_7~!da-Q1C5jP-l1wdob@;}%Fc&V;sqpppk&;?ae<^P9Lhh)6R5k<+_)kkci`)-v{)}g1)eh+17>qtbY z=XQM;TcAL4GRWep6lFR#O$~~ntdj4nI!R+>U(~z9&P{o(Xu>JLUzG{bfaK{hz^({@#>j^{O zM1S!tTrG{IYO_c^yIy=BGV=Hk&4sr%BsHo%@Po9ma9t{y@(Ri<2kp~z6bvPih)=w< z1`fI zTsYhwWzDAj@0dr6I9Q9nIc_}A^PAy#CrAzDtvKR4Ez#K=o}g&;C$wB{D@K$zzb@!; zuA;&u$0hHlrDoWgs}9o7UUMC|01XI!3_;8xrFYzawoyoC z+9ggO4|BGW7|l|t7)RZ0vV~vj<2lcu;f(n;AFdxJmsP^}9v8^hV*6jty@aizzf-A| zvJsK+(Tq)n^Q{qj{0)JGQ?9t5wRU0&Bp7(PM)8Ufbo{49#iju5Cji|mGeJrHnzH}Y z004ai2Y(L{e*NtIz~bd1$J>5$GRzS^n^5NN@~!2_Oo@nYI*)`x*STf@+V1)38|;QO z$#x(6;esbUa(N%+n9Ti_$JJua<5L^&tsjk+OfoxAn%EC<*u~5kM~L5Cw%h{c+6~Q7 zp!L!6%S{UNldYm&fFP%p*q4D&y}JpGDn~QL44HJcr{i@x9buj$(xijFKHIzA=^WMZ zQ_O^i%d5RF3H72uQ0!n1Ea&AJW&8V7_aRyHG_9%^h}}X?5z{BJDPA>dM9+3xm^A6M zjD?{3tWn81PKx4XYU1{Lm@BfFIxfMG90s-D*hosV__r}nK5N(c>*=YoHit?oDGm>h z5jg>~p-Ckq&QGZmVxmtNu0ML0I?7a~oQXs;%2VuS{*oTb6->~yL1#jJ0Y{D7a6mvuMDYbX*=t#Xk zbk+Lf(H@pJHRE3_*JkIl-)xYFVTpu;v#g~Ty{7>DAWn=zIq@dU8C2a8laB@Ge}6C7 z3(Y?wHO0p{+%Nn^MvJiYB~y(&)yEAfQ>`3|<<+Zl3RL;r5{B)aJ~Ixv2f;c!AvX6z z+HJUD%nEHcgUt|TWg(+^@;akGUMIa%09zeXR0LG7z1;$3aVTKqj~CMSddUv!*mI=b(6x&HbRTYL!hHP-VkNZuUp-kak*` z6T)gk1=HO2IUc6?%2gDOFP4%29%eGxC>9V%lZ%;5!xqcNf+N`R-#*{w3#O1YUEcDy zotOeSGmriZn8Tq{F`-{vp6fgByB{O30_zO@?Ctw#_E1sG1`!72JO1qHFMkNUN-|^O9=pVbkuB-Eao4;I!~4^ed^0H~j6a_F_))4Q zvf7bs2L*vPzy-(e+LLhFg+hu%*9g4gyzVLS7^BF_k**y7Rq|% z;Ta(DwVUJrBm` z1~nK*Hc54G0xx|H$iT6V&y)<*D1We%^^KfI2~xj$TN=HH1(_WlWO)eCh5W8lj!K)T z{|H2VT}bK2vOZ*bi;}9mjlK))f(SnN06~^;i0`Xow)@LY6Sn%t{$vT-rvAmORfl56%o@9ZORBfO~%AbJpRvPV> zz17ScQw@SAr|{y}pgMJ=F=hJ6x`>1-whvaJJ=C}bNqq{|C_BIA%k)dF9n*RqUiEQQ zK(dz=nBTX;eKKDv3>++n)MqfFaOemeTo|-s5K2Xp`S)7tntI#05#4n*LfVL?HGoBvM37}S$T#KxLCl=J_m2F5kQ_XQ^uv**jB zmo@GtcOboROWjXKdcDbEYueA^ByV+G2a3)iQuup@lXmR>PJFl_Pmejs|G?`p&IZZb zaYRVjaLbSKR$d3M*IDW`ucF)tADi&w9zLFJSBLY3M-;UY-><7l?BSp0`_>_l8PfcNq%XwI25X&#K0c z$FlH`uQw#qGa)%3mHX%^Nqz+C*Pb571z&afO8h~jbxG63m_7ymrQ0yun}jZ=A<5!p zVdKreoL&nX8Qk?=-Dq;?1TWWooG(J!97G9~FlyYmM_#7{NZN5-jJBOSv=CmF*3EFjj8xPe^ z!`}V?xWtiyB0DOqfq}BP_rr6vuuLLj_siVd_;=MOq3x~9{g~wYa;m{r{U(5qf++hmh~;~aW=?Q; zyi*^E5QPsj_o7ul?`<}o$()AuGGFb^@}hNaMqe6-SY|z+RI84;TsbE9TSpc9O_X63~amGd`>69>or0AdeLaR-0 z&(gx{kvHc@0=?D@Yne(i{AUXs;1=*=PXAvPz?#N*XQL=J*fLXeA*v>TJIJt5?qgG% zWUl~SCiBPEcrC18`Q!qBS5%nJtK%UC=Fd$u2XjWT5du22r0%vJ{P`*T3S_R#&-Ji| zXc=@LP8l$Hdjgkyp;Bd0qV>2tBzwc<$S?foq*%B;6H~7W&1R^!HP)p%1}|8k@nn;h zWC>OkV6~O@w5;GdJR4fNRq=UR#;lg@gP8l;=OpNEQ^x0#-RmU0_wuT>S$LeZ=gg___%>=4ONeUoJ~6N3rt#IqfMR8XnFs8g|jRMJMD zx-or+T)7u%ty_zl9Z3B~VC&uv-c=6IhwOgT;I*W>B!H4QmD#_MPVAbd3yJDTm>)$f zD6lW!Vw%ukCpmOzxd|%$;Zz}OvvwQ2nD!i?_S#DY1WK-U>$YV*uK3eQGn%Np*TmOY zQ80IozQXn3lX0Pg@#j>tblL=Eqs5DYp7!m-3AN-RRt$*;fW=-nU$NwSOs`GSjO(pTS^Et<>!;Pfv z?!Yu5!cJc5mVn1G&!XCH+0a5k5D5Ak+zcTpdE4W_87wFPLtTcA*pM5`>yZ|TOioxm7-Pg(%>uro>FtwJXfk zgpMnN-(QgY)f2l|B}eXIs2TP0k1lXnM5THh=QmZvkzQA&=&U4Q6#@cofvs0^8zjG*c` zAM?dvc=B9pc85ymeU0_lQEyHHzWz>I@@V}rfmo3o7^6`Fzk301e>rv!wH(=AxV(7s8%AUkKd?iD1WDKD@Z)d zs*B{31DR<8l`nEcPle-};~7>t|Hys@z+Zqw>wYg0jwhx9qn|V+jhuli;rg#wieo_d z@ch?Lj1yZOmeJwtpv*6x@*ow686VGN3_C;(Nb^_k+KneIyje zCK0I?C4> zXy$y{^iH*;rnU=`s&||5W0p{rb#Ua%8DB{*u-OQuYdS;`B0qYy>&6#VBLdkUm$GnT z>X>B0I;>;uMIpt1N$OuEtEz2+>Cj2vUnY+JzEGgY&U819V(<#*VczrazRN>_ z^nJGT$48A|sLTD|z#Tl1@n|5@gH^p75huU|zug#I`;~{REXF@Rq~dcF?cBX-Os<4S zr-c&wcdqD+o|MF%TMHeEAf|VjR@c3Ba0lu-o%Ekl_v_caC?(|QK?t)+orjWumx3bp zR#otBBnK+xSzvscZuaWN5WhIy+QUtlkX+l?7w3HbL87w`YlH73=4!KtvfzW*$zId! z?*>8KOK)YysmaCpFVts^vlZR~l7zvG1N0NfyVNP};+O89KaM}3nEE7_6j*qV4n$J{ z9pc>125&l)*=|gcW~08wf#~eW?^FziYT(B2n-e(3#vocu7wE6lpL1rSxO6q%1$b}8 zzY6w3+s0USYA^=XWSavd#NFIX$A)S^R3-QR@#)XlS_w8=`IB@atWo7LudZ=U@`#b; zM8KXWOFKbTR++%|bSeOH?+A&hK1m>>^T?y-fTMc5E)h(Zk>#ZKiBXIVeHOrM3zF7j zMeNn$$ef?f)+$9yk6#Jj21fjGL`ZOOq_tv3AX<#!1}rQWuRrkP+1I7I?*g3loDDUZ z#Ab(rqM&!ibq7ZJzBu|6Z;-KOb}?!6L8G?T{ij#*e$uae)tkIIR&HhotHvr{12Jy@ z?WBJ%LC}tG+CKkvNK^g9Xrz&cfKkn$VJ<0I$7WrKwECxQB+UnrMYQeAAjhNq>YLb51abSAp3dhZytJG z2I7ZJmd&eaS2)sZ6<~z<)NmK^<34UeLQH zr~5z_lGBZ*AV_Y9UHolKAFP8>G}E~jfEId>Tn>|b=SE5$2!$2~l=5fQj(V6jevJo5 zxnYAP);hJ!%LNhev@yFF6?JInEtp=J>!dqT(_BXy!)ID#5Rb>Qc}9shb?J!nXOSMd z0OAqv#GE5UB-t;v;(CriTq#(28si?jowDbqUq^j@fr+CBsm_h1e<&ZmT z^(@@FOd+FV@wf7fpAu-%Xn4lJRDbNe>HYDu*&`%t(~CmE^ndlf-sC7D3@CvkOsB-K zbL3(aG@ZrQeiloc`)V>i(Qlh7su-mM%gx*f7ge!j#^eCU`xzWmn)R{a^~67S(LYDQ|BMCfAhe*4Ac+&PiU7^0-K<+kgU@+v z$VAU&g}_iL=XD+xDn2J5Aa1B1{Y;ORC@#o{9u&w72Z2u|m>s1e@LGCUmd zK8ZLNdHb?6BDQjHR_D;d_4N6;?#9^#*35qC0zOMUwBx-d=9r%i`l=gW94w8e;5 zKIyXFV3AMonQB8%rt7%(R9A?_n4i~Mo2Yb1&A(_IyEmB<4=xc`LPP>k^fCh- z4NYk*$rBj&H3Pox&HqhMmH}T5m+4gKd^;O2gDU@+dr+p$32b87R1YvTUarsgB@Gf6 z{LV>OH#NNdyniX!HhILPC9go>9gL|H%m7tKZSwCGBKCo%uKFO6sj6yS3aX)R zn;&_3k#{88h(wBPbXUv9daRLn7rp#(;?@%DSqYK*!bG!`!xD;_ec1ggV{58^I2Q{) z?e$8C)@c~)G+H$4N?3@*#-Lp_L=z-A9U20Q*z)z=SXX!_qYhZ!r^kOb8dq!ZCsb(1 zUuwJ1Z0*kCwQ811m}~2DE1Dyw!!w3uZ2#3Sb+8%B=PDY*o6_^8tju0#2Dh|m^KwA* zursBTm&)0Ej04vxi)@*c*Da7Aj*22SQ@T7uz5YCS1kVv9D2E;9bvG5GCK+3Yn6%&6 zUiu~~om<{~vZ&PO%Q7eCvV9AFT$(d=VIbrx;@EOYs@hwRXRDV6J_)s$^%I~r)7E#4 zzdl~nXmUOyi;54KSLe3KTki^eyBYNeClEaR)mSkXpJ_=OuREcVK;I!+IFA1=qOYTb z066pkgVa9x0{eF*xCdimiGe+2-Y26-`sDw@lfVK9A1>lkmb3E1jID>c_nPBU?qG-7 z?IAXINV=9cx8t$6?Z;@04Be3q)WogHSOZ*1zb*pbrsc$$F9*~dO;@{rnYFUht>?(; zPIih~P+&!)&TVRH`nm=x+*1p(he;Pc8JOGMgTi7VV-oM|9#Puxw#YXhQ|~W_*|1;@ zh`O&yarUR99${S}?^AIexoPhlhg;&~D_Hbt4e!0Zk`nP(kj^AM?*4iXyU!)OvS~qQ z>S+x}yyLD@gFlE`ZZ+axI6T?t&j0;);O7~qoS?Rt@N#K}gHqtEcNdBXn5Xl%bjFG< ztA~74w6z^z-n|P5Ml~klp?@amsb39|!)jnz@s`l(P|ygEi0ZZ4{_ck#XA4*n6K%bF z-o)D;mDaz{Wr{wL%>&qpursf_W>kAPFE&^=51WjWNAD>iaAyhMOc)C$eel>S>Jj8* zocXq0|I)QpX|+;Sm~F0m;k9s%`YF8C{o8acW}T>(SnJLjChx%;(QgOG)r0MTn#u2> zCN1*b9w)E5us+$c@`kzIy}RK;cc(&%$rzs|4B3Q)RGb#LG4xz_YG*rnt%9C9%dx0Q zqv8nVEPOGSJ4x+aECZ;q4I;?I=ociz>fnzKQFje-00dguoeA_69g;sa8R z`}Mu_<~~5VfqjDf>@xJDT*e-r8~8uS$2y50ck*-aDyuKm8EYfZF5dFh8GRJ0NAQ$* z31T_wZtUT8F!S6!cX{!~(vqLjeVqIrLRrXgJ+f~ z5Ec~sha}{4Gy)~u)P!JvlTd*bDyIUs4}J`Q-2Sw{0?YZI&J%F4)2v?xe*keV!Mm@b z7uuA|L;T~@PCbuDa~#EP1Mhq_<13nC12(}KbPD&9xCXZ$H8!Ymjdv6Hg0^Ii*W8!s z+U_}Trxe*R+7=|*{%;fo5Z~v)F;2d)QGMehYTo`^kv#jd+nv=D2!;LUGLJ*^p@*V$ z2{6;hBgNfE!}}wUCGH)P(^snRV{9e-MiczekG;il7i>d3ayDKs!<|0IK_TR` z-MA0NgVi8HI?rdzy?9*ccztnk3auCODQ?e>)w9K8aMuF47uDALTYf99*3;JoJ`vqn z0)2)Rr)4YD@fS9)i_ogFy}j&(ldL;b;PFL@F4pD#QB~<;l;K)+vRF_L*X_V>)ezu? z%fjuuoLaO`Z8Aexbj zvW87GSEKh&S@1FzO$DJUlC%4o>Gs9u)OB?S^JK1!6dCxP85Z8K<#p053Dkl6HqTJXYhm=TFN0BB7|^~8OI=Pi_z&6cIX_0w3)A}!RsDw@QFSn_1#BBzWTomwEb4tR;-+(Ho>kVd9 zIX3u%Y}L<+e9BJ&sr4Uk;t>**md@9QIT@t>07CBXZ+v3#lf%dm0@0Nn_{W>AmN-!t zLQ12kb0?*qdj9k}*@tT7LH50sG{0AUY{7dQ-0uDH^&1bIEtjM`yj}vnd&%E_hk&8u zrVrTnF6@pONd`tRB=obSNse4n0B7KX_*ZECAp%gM{{2UT!e&RrPAY`2hGiagOIvG=tQ0uM z=JH15U3PpWbj13x6j#V*(6FP06 zvVPoQgM0$4KW7In@9||T-hIdgVTakiVe}3%$>hY|xcUQ|g20U9MCCt0*v2m7ecoGV z6S8l2`1=}1lCt5c-oGrmIJ{C2fgu|u!9S0N3<}aEPao7pc?yL zh%~z|;saiI-IB#-l7KEFxR`SX#hN~uUm>+RZ^fLXfzndgs_#Sf;dNkm)%kEzZ1PpT%?npA;* zT2GcYt=1Q6?_T8dRk+;K-NugUI^UaL${Buoml}LFNyt16jEe6pjk479#m)M-Zr4wm z2aKTaXH7pp&pHu?exWxHuwSj_yR9Z(&iNp2A|Xv-oCUcv6LGX_|Ne+9$4`ibq8Jla z?RwPNu;r&3^`inKHO#-K4R@CMYQlgpiS7gBpr%-q_1*ZWF0MQBQp=9Fs(P1;t@WIZ zB^t?R$}3>FgSGBTxoSqaCCm1^#QpoM@+oOuRG~Y+%z`NsEGjj_ddKXqPkck?os$#` zn0#cWno6y%vTk>R%xotOE1NPuV-T+_BNJ$nRR|ECy?vhe*9}tmwJp8|abH!oh0%x} zbm>Fs5kkBvqcA}gUawm3U&ZHUi``_R{NJ1UC=F)?KcA4o;?5c+UU0(YGDk;8+mxL~ zOP=1_PaYTkww=Lo{M+2_G3Rl%)E=?;H~Zxwgxr%M&k+7&nt+w>Czyh)^tLSIPRveT zAv7IYt!g<$Eq&=Fk$k2&lSHDcdOP}XS$LQ z7mRs*dy6$HGu{1ycYkMNH4tCuXr4k&?y2|jNjBX8KOx7TUZK6-Bu^>y>C?cdnVZMr zgqRmf%iWqgG!y&|EsOqdpg6vaI1W8C=>B<^%Ruzy!>>IEa}4J)!CQ;T>Q@xzgLht= z1ZA#54W(xAF>&Z@oXqK! ze$2qA%Z^poO}G~`o0%i!kqQ;^+Dq`-%;m6VP*#pU={NmEJI(q~bnxA1tGLuzPKIH# zyK(C=dNV?C&eS;h$JZ=l#S@FFHOFY>H=AjdO!c5#)Y*71vp+w&P~YE#R}u)X2h*Y< zz^@I@JOC++Sya=BHWLYzDCTT0)#wz058bM5MmbmNpf^4%;^JlVoTl; zkFR*_cXRA^M+Ub>hl#0l)x5nOj;8WQrLI`fQ#cRV^zL20>Z`uFTncci|L_;Oinlco zAi6@1Ai>|VoBZRs^ka}I>xfK$0k9bkB*fS5o%@{7d++@F-@|0ZfOV?pEh|LcPvY#a zTWvRWW*v%Sq-zz4;6;k@TWvG>)2t>-raYUc(gs1VE_H(7?VjDepWkOq^7cYGPd4oK zXVB_*BDW+HM`(2W=O=|vm%G21{M_vEx&LvAzUSy)C)Cw-P{MQH1%nCRg|WjIqP8W1**J?>2Iedd_1EnF$OS70i zw(92s6M3n$@x5os;nG}5RMZ4t5vA?-}w$c{{ZHa~0WzGVsy7V|dJkon^H_8S*clB@$`TPLIm5gx`|ucF?j zk&h63B~`!{r9ctz9_IE=Nhqv#t(`PlXa4)L?hkxMjsDYU*+tD`k31?G#&;S+|P-Gf5n36fM70WUu(BOpQRZ2j<6|OP}HG z{D6EaVeAItyN6+tNb1viebWw}1V=hD3jK$x=}BzHEwjW@!_CIYiMOfv33%`*n_#+N zfe9b)3trU*uL&vjr!2th7z8rXl^7|^F9j256SbM!(!5ZQ5{lJV@+)wUK=b1PPtUO_ zEwJ|}HB%k@LSiBT)BY1&N8XN=_-am%k1U6YaX0HV1_b*SiPV2(C{P(OXD$D3xxRD< zT!X4RQx@q@AGbPbeltD8lgr7eypwT+CWe}=RTpEnSq-%{_~3s6`hu!CBGRc~_j|*2 z{}iC@QMUP#4)NWuvOG;l>SxiPVzOqwclp`IANVSPQ20hz{opAs4g=E@N+cs3 z{Y4Nc)|D3A9pn10%Q>l1!T+CKw1+@!c3eSYaxU4w;X5v;-RYkw5HbGG^wrpC`n@rYtY-PhtjXWbEF%MZfiV%@ZrBT@ZF{x;b z^vaxG52sjJYBq?Q&UKl$hwn@BLEp;qLIYucv|V;T`)G++K!`@#gz8Z`k(2^hS{Ej| zFWFesuxMh+L=5$B_%A={nqU5dqM&)CU8liNM3ndiFL6`0)$>^02k8XtW$&QjTOK)* zpVM?Nbwu6aK)Of&#<6Vi-%0sRzvuIV<~4iJT(7uEKPT7P?{DBcxqzwFWE-jNGR!kEW-hPp zt`@*lhh|K&i+>#gVZNTEwly9R_5DA?yC#C{&Vs9Yc3PvsK)VWtKBs07wKMk>DiIZX zx)n5Jf8QES1YqqEqmv3@NvaTPshap*PkJ6qKV>8Z2b}dqU=N@(^wGkS! z(!!ESA1CHwtSgIMR#T)|GIknxo5UA%?*rq0-DrQzlo4bj#NHj(zihvno|F+Xe zN0dR&a%yw&GDfdDK&uU4MBOs3KO3}D{Yplb!^7cYoTa5tz|~SVlqt&(75W2U8kbEA ztPKbFEfWj*EWfj-vPf3K&#>pXZ7&v3s01K@D`>6KDp0D&QVlP4p|BBRC+b6dVXm^( z)>lKfv3$3rjlaMd5I9p7VJ0WbgZF{Ef`_+V3xV!;*I89LoYR}<`eyrlck{>aAA$_6 zGV#hN(}*n4|3abRdYDHiXp)w15#I|k!T?>L2l$62;tbYDrTb*qu;q&%yUq3vmvU_2 z-|p=$(Ht)5i=jXn6g*}NGcvQt%=?4Xl&$8>9V!78ti$K6TIj|8X| zGf0l_Q96VSBSU1Bp0h7|%X6E?c{T=oRaRY_5S_~uA%-sS3b42vJj`Msf~Aw8$>W=h zM&o?foz-oGU7^tpd%M(o(h_?O-rL2>Ogd-L_Lg94cRnsUU|739qR)9}gLw-e8j(m3 zm$!0y{=@Aa(MZ*jl+2wDh*0=K+gnl^au3T1d;fEyp;M*l*+8xAi;L^#DbIN<#f;vM z3lD6Yj>e4AIQBF{fk9!aL^xCJ*Qcp6@<{>nl*p!^YcbY;iblSu#wL&0ZW5AYAb=pW z=@kwPJUSuSBEvhVb+tM)#_CWvpJKgVOMbpaPikAn`~%rT?dEzL-=Jxp^0+j*pMwPR zPTGOv+-F*%$r9ylZ7Z9f6iceBr}SPo-WBVcGCL5+yk-l%r*5P3yqku6-qRc&J*h;< z2#t$qCL5#sA33DmDX=!4=XoZ6w-i#+7LXta$FIB5Bz2!{ZXJPVh|t&$-~Aj{=ERnE zwJ>3R+ob^w0JcDDl#pt3$?x}2Jt!e%y8f-Jwrg9C6ypl-Jl1Ut``v zcnzHHzBnKAVpqOTsixqqzXmxF?aOe;QeJ`h-^MgFG?w8->yFo297ChdC(=_uulWA!!w0^pF9#%Fcc%< zm%d18it&9-wClKvO8T7y4dYeTJt`Nc*F=dIi0>fZCX0|3`;{DiM1EiwG=->*GE8-@ah%I>c_YbX&W zg5;BzT{y=F?=NHCnhXpK*PL59d(X+8)7~bw87ILu@JmPt!pKF$r=ibnpUky*9ta3- zhhNFC5?dc`cr!Akbe{Be8^+h(P}eeJNd#urHa|1PIj;4yB}?9(P($&VB_9P*Ji676;qyaXV}{*iq`H29`kwC7q)CZKM1B z>IJ;01;kUm<@;Qe#`9ngyZhin2#@FaO^+NYkr|N(B@r8{_+Po7FZ%C0h==nEJT7#o zb{mN&I-^~OKN|ddEbkU>{PVEoW0Q7?{mHbV9YM9y6uFzs-^%|Mm-bnAoqTLAS=mm+ zu`D1Vv`yV8?|e4nz>s}4B`qy$ZcZ)UvbT=5ke@>Y%i7%TL9Hm-E-2uak=xtxBAhe@ zl+%n@2N@XY%#aorMJ&|96C_&T=un5NJqWRa0E z$+F`_s32c>^;P@Hb?rU@zV(yVBLumsD3cZq`+z zE)8+da%p_B&}Hr*|0ovWDW(z@Y&bDCTa@i)-!@@M@wzOmXLolI@m~M!o3?dHo_9fp z_p73u*4RMgoRBGnif{h{U6R^QYWi}jI0ztZTeIUvl6j=JhW!75A3ccx8{r}5`E2A| zd=KEhGjyi45KGxg3Aif9At;$SDjfr#nzqdJkFQ(C`Ou@RdzNWTCifok0y-&~H9@Qs zJpl56Nf^BWg%Ex~@sQ|SL}l?D0VGE9R5oX|v!wUv+M008Zz2`EN6YH6e*2LSeShQq z4c3rsCq%uyu9!a4bMXw5RP|Cd_zHw#RLITu(6<+5qWtr?-?YjKE#t*6=I!qu!LY_Q zbM$}tI|ghnmn{GI?1L74OLpFNIF_rN4+|eh4pG}^=xO>80z@5uzZaYYItY(vc2dBLU+*(nj(OMCaE2r@|1-aIMSl%Pjr} zuXKe!V|H-3-?es~pGOV_mL$6^sraT-TD*66#s;}((H5Cs5y-G+Z_&Gn9Md*MNpDdqea}FM*XWTT@_v(QeR+#R4Jg;=38uwsR_Cw&u4n5==$!Sqk)-CS$Fdi zu9=vy)H@1zw{WV(q?0TxYPqO1NleRA&009U$RA3c>I3S1Lf`?Rm0J zyTKx5De=R?VBkg$U;m8swXA*l>Hd;i`&Ewy(YO;H>2(zz4J%lyHD4*pmUf&gCksKK z+2p6E(PQ*xpbJI4n>W=MmR)J=1wlQ2wV*+V3`nQ=z(YeCwlE(H`^TJUeu3BasB z13X(|%JTK1I!Z?P6d9pHqXN9q|8*OC7VIQVbmCSF%tEOiPBzV6{w1RYJBjnvgpvRa zn4lz^X)i3Be0@k+nWrf8)8^Q;cnSVhqq{S^2=;veaON1%h{LJ$1!9GvlKFQq3`dOH z-qXOtdR-s2jM^8LYTuxZ{ZIm+pn_Bhc816&O8k zIW&V8(kx?L)K1beM3~*ISFBCqy46TXusFf)6_~Tw-6AM2`!QpRrj(olUL2-rn|~&& zCIzjUqJb5-uZB){4@!4shSsub>rfZF03zZ&sqwjhk6|4~jQCipqvn>*|B4R?rPH;i ze439&OD8WlQ_hK&2oh50#sRuk;uGI8c@3Yu@yfr!utz~OyLW&_EjjqY^e%``6ZMiG zn8UdAQV*|*UC;ib!04dL$$Kq>d`Pd_`tJS8f9Zc0{9Yj@ZuBp2lXaOC9jbqJRQRcV zgS3WHuOoVR$MXGbt;qi3th`z_{=oAUf6Q`=^+zD|h5uVA59fHRMJ>~KqO=Zm4~B>5 zE;s0z>+&i2>EUSCL`wC^XiF3{FvjEiTqh2f$#yr=IC9E4Z#tU#wqYs2nso2g9HTz` zLEQ?5LjdJ`-|mSIR@MC|dM!U+&~Iuwm57?x8_Z+E1W?wh$r3|i#B)0cTDABQ9ZO>* zRf#I;^f=~8D@93XiwZW|iZ_zkQXeM$*glX^)OxV)nv8TD&VlIl@@vB&|kG$#@1 zEPB#XFIR3Ewdh38^PGub^>e#lVec`n)FqYeNvdY5Eh6GS1fvce+FPdC8a|p$yRv2Q z-qs_Fj4?vo6%QLE-rn9G{JwnQ&G1{~mdDuUw)-@7K++_OnLEb?n^4f2LxdpLafatO zicvGi6(@EBYFM*Oe|vq-N=m{~*U(707q)k93myt*^W&XgUH!gZ^y}B)l?cha@bCD9 zdkYl0=@Dw@XV}u`wXVRUf4vPgaB-$TV+$Mi3Oao2v1H`#g`YFQ-QWx#+_yJEr{_^E z&k?*5@yWS!9AK$+i+5D^rH<(*z8D3?n$x6N&i~*c5<#G?KA`R!XuE@H%emk&Zrokl z>d`*>CH=i^k-y0|UbrJcxo%2O6z1Z65Oe@0#;01OG>R=ryJ-^2ZmxB8C-5)c(4DWY zFt_}`DUBCrk{ZaHMe^xQxaazf9{y6bsckFzPT@01EIG)v^}q+I}~{0XM!QX)p)wcnv}{@f4rZ$ z&@vG@L*dIXGBawUd6vsjiKeE9eH5P1{zgw{_+tjb&*O*BDRFfcS?KFEv9WjFOMk)@ zt6nXe+uQjrJABsX(celoiR`t%ZrKtw$4O(#dpI*;_neF1@KS`ITdA@_QS*rwvN_?C4eHMHRHzaxwSutg}@G+@u}?) zxX>qI(TuT3czeyQ8hT+MthzA?=3?L1dm9$@6(sS(Jdjg6Qy#74{yB)om-B%ny2Hwl zH`JPLH|t*MXc*c$sVb;ms`UCEnE@J%vyzLbZFG>1g%^@h0NHm8d$^@;H(Ctl*>1Qw zg9>R;M^r@Xk8gh-DB83f-+nEryGyWC)8Xxn$z@+u5WZKpc*<6W=K_rfJxT9(=d4YR zZ834#7(66j;+vK=`%OR3rA;Oz_U(%5#`qrxyHg&I7OPuktfWnV#x!4cN^_Mba6w4e z6E^$xTj}RSoBy7bEexu+ShIgiealK_lKNy=2xUlQsJm|*2ZHWfW-ZlTn%vUoA7v)(?XeXzFkm&sM(eN#1LkC63fhLEL1Q+EH(^q8gHWCzJ zqcjBouCJdu$XQ*SaUSvQdZu;<4T%C)ik+=}$7t4FKL6*;_MoAB;6y-zY1SEO&gN~O z=Nl;!5TL-Q6{X>Umq^iCGo(16Opey)pQ!V}DE35SB;}+Ck^ZrfZ;^I~XDOtt?AUGI zvspY=irJP7$e`~we%f#0bE(@Ju1T(zNXIGcDH3LyPc|qcc_@Q}HlByYVf{q$QsoaO zXi>6<<9>~4n>o`jh`>r#9wQ;cY8Q7|c;0s57a2P?c>CPAcbc_T7k3dRElrd>C-Z-7 z>kpV#0})eHu=rBD#9A_c26NlD{~0>-SZtUk@ey12L}Il}5gv*Ri@Z>|y|`3H_QGRc z5FO~R3CigIRk3?fey0WcfnMqaCMQa576-}VdrEqUvk z27xJ}6gUV)DqVRZ%P5f-Q?&Re6sKyLj71r~agI^5dsi98;JO}a7cNQyH;6ysBAL0 z$EWPdKgSi@31e&rx8=C+r=3^n0p6Ie zbPCY386VQUUnlF4Sz;eYKrrkp#p*?qOIo&f&DeFUEEsvILN1zzj?p$~K)C86t)OnU zp8QhgynASPimfWG)S65i zxC7dM3H#aJ7)q7uIRJ#0lhq!!Jne^8?T%M*e=%_TX&r9GOJ^2@=&>g7HCA{1{4w9A zG2vpv4C0{cycw~7rfIelOwH`kLS4Td5{jNrY302$Q)Ftw6k;aa%gl7hsAxCBPs*a; zqJsbSh$76MPTHB^2LB@cIDV#vuhj<$JwghTq%Tl!xmO+^;(XvWBp^8Ew9t0jIR5QC z$x$RTk~$J2WvS9*=yT`Ewb4%OE^(rLx9{6AH^V%KlHw1y*>fH^Y-wW9A=4wQsa0*k z*e>D_hSW%+o#>nAGFGF^YLcscCJ2cNrHKgo^qHAo*x3Jwe*)m+JN$%Hk+%D1xpopD z^FL~q4Cy4?9E`uNRW)Jv8YJCaFedw7{;q0)xy}CHg5Z=7 zBodS~Kh`sKLZRB9eG+5Q#%+utYNK9V^_2itonI3Md{Si77~32{(@=ZnnYZ-<(_}Np zHxN+EJ9dll-S*vX$F#e5+_i-Rl(esVS&`D{v8Rp5aIF9jK&!n+P3!(+I(jA}5LO#M z>?_#gk2v?^In=CsRz3dvn#W;U0XG-|mH4w;@SYenoK?ea8io>My+-o0Rmd||tq72D z5$ESTOM7W}qL&>Urw!%_=AuPrbjqjSZ%isTk?>EHPy{khxJptmz|Ux3Va;@dOV@6FWFc_@*mXwhu_pfKu0Z+-!G4H~&x& zf(s>h^j{%o!$g!4F(ZwH9~)P|{FPe;bA>2X-;yz7-c#&G(r2s!L~gRQbGB1}vgb-EGzlV*RAQRuAy)bb3~@thJbOVw~Ec4}XXb7Sx~WTD$-0 z+spMry6jxT7mkM44OhPwc|a%QkB;VH`-nI# z7@u0ZEfjts87hX^IXW@S5z$XAM2Ob2!kc^d;UR~^_LpIvJA@GQvRKKiCC7(;d<@@l z3-x7_u$wFXU>-o5_T2@4a{nE~EB3|R_A>0zc!D`DVrIbZZT*cEknKP(0K7u%d#TGI zmeH!NvP~=J-0CQ66EYsh`Y42dRNeX|$c{)-&3z0+cD4J?VCZsG=Y!7zAB5 z^g!o5f?F@snUe=SKzlC0*`@>9B7n_Q3WXhdOn!2Gg*G$Zf5IM8lXQidL3L}xF8nkX zD0G2)ka2PG)-Wi1FxdoUnv`Cf%{?HqYCbODLVPT~9i=^wie`I$C+zmz(z6T>yk#)# za{+T$9Cj0q2ZhO{pAs_JsXJ+A-|I|=I}deAhLX--uX7Jx881(50UdG5;Q!r@4%rr0 zh+mDXMY+cp$O%NZ2KrK}Pe+N^Q}lWx6r~-nXDc2^P2SQvQ_kdm6diG4_BzDs&pRa4 z=3sfWTNe#5N0?zAwpl{F37q%B8K<%`qstomGLl+bg^$dD87QQ&M161ru^v!!NXToL@disCM~WN&1S?HY9b28oB#}k+Vho3x6>96DE8^ecwvIYcW`9)HY1ei zffNl3s+y~HB{(rfJYc_E^5##pTZUUpl|ANU(H&(Q@hq6!<^DbmCe#?Ryg{j|VItew zR~rDAWrS>us)$3xFHFAM7j!#|4Dzjs5*cJZ+n;v%FQW~)(W4R6nRyZ3sOi6lQ3EFN zb&CQA;Dmquga$8sYH-xY8DeM0LlAPZHx{ZjC;V{~6#(NJ-Nl{nTcnl7^aC;Q0%2!TG~<_t4NkjI4_C0^2Qv7fy-FJ60fJl|)+Tpxe)T6PP!6W)dfNp|P%R!^Ny z$nK)V*)J16XP?ATz$aIlk9_?S0j5fq&XgBVy`(%4 z@lBjX6UGcB!IO#{dH^l0@$VFT2x`K&4=jpC^&0pYTT;a-yTHDl44u3%k?ITYfdQUr z=Y^pk-m)-?0Mt*CqLlr3KSK-XdEo2#bXObvBr+9Qno7zl~Ou3f|_wq zw^ZJbq}UP8RsOq+%E*31)u7^mes~x*Ubh(YMRoa?g9sOD^V)L`nIAa;J_D2oo;JwW zMiZWzek%^>!gtk3+zN#_&e0Tfkpxpb}9S>UIfXvPD=)oFYYm1=RwGHxI4A^deD8DiO{W5k1|P#jmUVh>Q!+uRo*5Qh-zD=CFcCUvJ@N{K zCQ1?G0CPTgdGrPFu$%sYfYi{`S&1L@3XVnHSC>_VW zf>S%3v-$G}`ra^MK{rW{@qxKk`6L^T_UUWHgRPg{&v@QdDB7jKd{`g7>(!%C^)9P# zH^<*G+-1)CA0?^i+;w^5_e_gOf2prKws+I!a5J8Go#H@_NjVo9`TU#WnnJC1mKY!7 z{tK2SD7=-TjW^hJ@{#_ZkGv2Usebifz7oV=t@^_OZAhG-4KL+ZSvGKE&ufCs#66%k zW{BysMYY&Kk-z}eT0;IhCNSs?m3gUZAQ|{dj+V|1M9HuqSc+L%S}JP_QE9?W%QR4o zEvfo7c*8K~8r)iT0SBYAcDXy}w8dMkO4^$1y2b;@?1cd%dEfj*Jg~qR zW?fQlD)=6KZW&7R?6+tltd3*N+0of!;Sw~9(AfNr?QNasH;a4DyJ@T zX)pj_}dUWP|XO*=iC+2(;LTh89 za-viPS`s8N1R*h6FdvvCGLsO2ldNq3OX51Q-d#F&!c8uCCOIM?g0e+s&0$#W8!uO_ z_0Ns|k(iP3cF{-05-;6GEZNMqIMWm`-<;$%@*=^MsI#Nfa?wgM529Q@{CrhA{9Sv0gSxam$Cyw~8+g!0uGggcHyd?sEb#l)yyBf4P0ed<#vz0!a zT9QbmE0x>Rp$TC0sueL2@&$KI$@%w2RSa1A9HEP3ja7vf5msB%-G$wr+A zHFgNDWijTmeM!P}7lwQQ9Bq%=)^HZ0GXpM?GeQ(v@gEwr}Nn-GIGo z3l%wFbb_3unK^ZlmP$$u%2Qb?PHtGM{SUnGjBB{VZTbI>D`5j+#9goC5e=CMU5G_1 zEQ5SQk8KQxi-grq`UEWbPddx3jZ?yufrFGlXqdC0q`&>{5N?x;m1^PJZR7VAS~B7q zWUY<^ZI7g~^wQq@!Y*ay_ z7~htk947X#C&x9sT$Es}x&m#N-t% zT%?>Og=2Pwo|0sim|vERYOF_UvAd@IeEgGyNm)tQKF9Rv5wW`49>Pt!2HQwrVAWSQ zIn`P^Ro(?03({egGnB=q(nIM>Zfn`?30>UQ#XnbjZ0+b+5dQ2OF8InP4GquS3M9wk z8I%KCG7@wkxcbHTEDZ+A67cJ| zZ%7H1b#@FD`hj!Pk35_`9wHC+0u*>zT|E?`3ce@Tv0ni^ukPI@tC=T>UsYkQ;UMEZ zEG_449wIlhJ%q{rJm+I<<*>R>5Wz1&Vf-W;fV#!lkz^W=Q^>X$19qBG(jT|~4gBTb zjv8p{3Qy5Zt8gp)ezoe#zt44+ivK^oZ@r7JL7C73>Um>F9lY~Uw_$6@+|R^Gtsqh^@z-Y(uONe z^MIm=NjVv%9ko@nwjGvIo4iB4^q|V|*sJj6Zy{<^TtC9*dy)>JpeK7Pj#6*BLe2y8 zf-I{1p>LAxn-1>emH3_+ z?;_#og05=0 zIU@XD@_^zzZ{@+;%Qh# zR`|E9<{hpSG0`WSvJM z>bhH+(8j8M`n|xdqh)7P5-u?%Qzy%HyhX2@%;ij$JmB>8hkP>q`TVp`rISSIG+t+E zlGE=YRfG{PhTmcwAC77fVcVBt@tLPy)B?XW?faqr*|)6!v~>wsP^VqQhpV6}4}n-!aM}csQ?dibchC z$WN_;kL2DT){aJG*LYkcs-1jow`$`QeFZn9e4Kn2I-lbV^CDp=z9iHvRYv%u z(gt}dPluo48@{Cve=%z>9sczSHq8$`vKcAlqpe8hWs5?GX99-6wM~zcI?qi_Cn7O_ zBO?fV7LyLjS|s1~oqJkd_m=j>JJ9}dis}+Er7qhI3}YOo&_Upo4W{Iml+J-DA+JyW zac;D!w{S7zVw8vurqxE>AP8u4V*Pe|DA=;<{g-Q033;FNz-QMwGJXJVA8Xjdk6qu; zK+UWqj_3?PHt!-i6kB%V+sE=^SrpR%60m%td#7VLUaR`^x#7*+|0^_8pgwgCMuH6# zP3jm3027NX7v|l1W_K3Bo~J2tY=#pE0pqrC36d*CBRmIQ=k9b3bUNfmhG3A{FCcQM zQGQ%MLYN2MV7`n%*JCC<{8+R6Z@Q9X&loe_U!JmCSyFnX*`kbKu2CuA63Ji+<62&| zWH_l4G%q^Npa4b^Yo6OHny)@8P_uU1?iaUxvq2&WqA39SI*J#Wj#|#unumSmS1Vpd zzkli+Yf_O;l$W)FGK@*BLEtIEXB3Ysn@iSK!MuH>LcuU01&4O;DWj#5(n5Q-aWbm$y6iaS&D2y(4;R85r#9}+A8*fPkFOld9k8f68 zcG)1a|J@M>QAkIwmPTtib&FHYZ&0BCY7c=N)8NBMxNiWPL49?Rq+5wn6K1GAm@f+A zDn#@>?!#9jJgW_)w)m@QDb>_<2hZYe6H-_<1rBg#h)`PCt#NAO1%qB-S4{AIiSvvV zz)j5hXB;kr;@L015Mhr4Dxxst$T)!`Trra3Y3z{kWJ6#~qaSnosi-y7w3sVOq?t0{ zIZe9WRPhltC>l$`!eUHFiE)@$^&p?R`-nL_L*uVpM> z5hc+~sGtQbmYvzy&(}AehA;JfUy3KX-e@vzM_8!{N0AbWQUzREKC?Slzc{&UCZ+ri z@!#2%5*1sZO3U9%tdh{hm^51`|GB(6xx)bKJS1Sgz(`<+G@G;MA@ey|Yc`xp9+kk_ z|7@a`OfDs+B*YOe=af*s7CI17%d%nQo;9vv21vJ)uLBOFV9m97z%)#Gq8q3;bbrS8 z>E@qZCY%DRGz0A?=gPrG(lUBZ1lMlQu->{={Ss7(U&gfQ2EoIgz;++yqn#4H8T}Y{C#idMHN>tBr>FHMtpu^zUA= z!~j?Q9X=E+gK1G$ouxpC%k#LTbvg3uS>Ac+63!z}%QY@}NlZF0$$9$%&}ScsIpLXU zTqQwp(=AMj`XDjR>Qw)TXk^a#w-S71TYip}>5d*bWZ04{1CeA^PsM)`<=FU|w`u@> zMK8R->h&JUa1XG+1*f@)J5CmY(24Ov3ag}Ba;Sd5^^DN z;_^gQ27iL6<5iuc84heLQ(71kA7>Xh+mQ-ZI8DDjkf z-M~e#`PkCHao6{24p1$%Z{imTpKavZPc_k)5$eorD9v=xh><~T#?jyapnNT(eeoC! zu-&48olO(DDEMJd2JtuZhi~r`3G&)ql>dW*I$ zUor7Qtz_>SfA+q6lEyqj0$;v`cGHT(LSQFR;f0|=a#_l8e&?acyT^TN=QIcsDu|=_g|6xMGdwM5c?Jbf zRY$yOo$wNb;y8#A+4U2dHq#I>91iCQ#zLHp+z)BG^aKL^rRL082 z;u|D4w8FCeZSfJyVG*uKhu{;$Fse1R8WP+>Z3wtdSD4FC?T{pcZLu0jh??F1h*F^b z3pjsSyD;X~NYe?Ln@n|~D!S=SlkX0M#*j8b&SCCLHFxg5xzAZI@OM{Y?awxuWW>$x zj}TZ)je!wM9e^{1*o-jS12-P-8UaGkWxifK{(0FR7W5|z3MAWqf<$Drx-b^PTYnE$ z5@LN*K&TUv3Y#lDs$4!DxK8n2 zZ3YbWJxAO=R`dE#5Lx>HdYz^Ko}_xh`Q`+L^oz>E<2G~Gn>zDLbNvTkW-JAxmuunW zB9GEe*fWtWINTFRjO0<(ejMQ_O=Ww4tPjdP5R^yNbMoED;NMU5MiEv~0#vX4v*l8q+p* z_godf+c1@3f70yX@SfXOooK! znd!ALndeB_!3rGDJDvt+`CKdl^N|D4PRNBX6I~ZH&|>n1P--K>Z+E=Gb~E2r&X+!& zhn@cgMjN>7Z%Nzf4n0+$Jhp6_T4X=duf6wV@n{OnkCHs)x&HIPZXDF5HPuZ}$`;Xp zVng;Y^rfAe+MVmkn&o~$kEjLr)ENUIC>SRj(%Y$MTY7S*ET8z~ zhkgDmQC907QBKg1yjGz9hhcC6&qN_k1%dv>!PgI}Elo{}M4Wl(Gj#C0k(VPQ07B<( z`n8_T4@_U>y~*PnraYQEdLn}p_ywWyVijZMpsI7MQL$^`rWwc^at~MSxC!m3x9`j4 zm~aXYgjy1X<{+-0VyfVO|NMqGfRYsF1Sc1w{CzoQ07R< zS2%diyKL2b@tenAt83E7(03`e=G%qaj&tCEx8_A1gOpVhrKK*Oy)wxn@?#6W#*)l8`Dc|_q8D}{f+Tm~H! zUSL%dz-z5DFxzF*5paKj4DrVuvv}V(_!-Ib`$ENlRKUYv^PJ8{KIaDG!J%qz8n?n-mg5C#!jP0Ga&oc(=A{4Bx%jv;_Si|4! zg4Gu%dE+FU&E9_kIsF<%2?^nsxW5-?Wo5m-#GlA_{hj=zvZ?=Pg13h&BHyg#F~~^1 zu8(qp9{VPg6W9_wkY86fHq^&`^Zk z*jx<(7MH3ZaIIBy*AS1V(YX}~fdeS$Uq!lj5Fy%DHIx2XDX&2hPgL%Joj5q%LnsLxz&*A&yyjdD`7aehg z5>=G&UO{s!$B96(nm|wZ$Gdnf^HuNDb~WH4^KTP>?QqP)?Y%-xiPXI1kt6td`^c2R zrVcChn^t!#CxGe>WN)x{>agFU>QCejq;z#ksft=pp5G8hF0wUXoI+ zuBo}drt`nzxitXQ|L*>`Zr1o_+tIX!ng-*q>^L5;sa>d|zy2>U|E3880DRjvm*2ET zxpADGV6)g<0dSB(3}V>|w%q)>-%Sco%3_}&lK$-fX?!EvfG6Swh60az>5K9GN`~of zob$1K)+ClU45EzIJ@c2rnkS(7>4dzjefc5JMW4i88G4w1fJk-?GvLFKk5J}K;~ zkJix7;sN)}!r5jkS95&CQ<$07E@blq!I{t^n@pxT-%ZuXo2Y_x-5(W;9hRxEVHq&< zj!yTx9r$exd=6)8!e2c4E5PhZ{UpYn9*XO-Jb)lIDQFFowHv`<0R)A z2!Cb(t{MBmoW+|&JndtqJf&t7HQ2LuF;c9F|I?_ZmTtdcWj0 z$|19m{7=_u0uWrdYnR9c(pLB6yL7%GRT=C^5IYkt%TG{_Z^{nn z*74_oQPjyN~)Q2;s-S z-e{cp48Yg>m;mPx_mTofl|3Ci4O+VusV1;l?7=}rq~9DM`(MNJ|FPH96g2;xhc0Kk z^`Nxi7A7wB4L;-6D&r+7m*jL+f=c5*IHKZ#5YqJrv_i&}OCI4tfe1$jUg8Ax#)$Ji zh+tj*X&>`vSl6?>x{F7R_2V|Gx`yCJdSOyOw6XVLQg%L{g2so48VeFN`KJT~2u0($mI z_Qk&UblS^zy#a)^)IM_j{!hv+uW3!v!Fpmxi;?;H9l zaLdPjd-MPu9+&|wjh5v)>CZ-sQq4V>x?KQ9j#yBr+(}m@(BPL7QEB(;*S0XT-t!Ec zp1JNeA_OJhe`TKJ$r?gJ`T8LY;lb?nI(lnRFXWI#mPLeC`LBE=0l@+hdP6Y?n@nhl zYF%x*;KOD@G9K;Slu^=bP_nqQBn|9d?>ur{fQ^J`rqX*m67#>vGSD@*H@GPVTSMi; zpmI@Esji7S4a_NOaj~+k7(K9DAx8CCmYdw=h;#sk8 zi!KLMLmDWzHFkQy);rHv=h_kRJ;DBVvAKajXE^W%1P)Y`NNgZ-B`(IQN)d{kZg?!W zH}h*In52m~R`1v%)(1sVe^FxXs)$qn_}k0<bjtJmS5)-wD5*!F#o*}B_8tFDige0eg zt;mQcsle<^i}x7J)7N7T1)k4e)FsBq1O=fHPJc~Rpn~0nkVWXGkI24xUIZ-vqak!G zAE4x_^EJzA zFFYQdAL30Gkq;xjsl338A}SR=bxTteJTvv-2+Uy~-b8X@ibiy$*qK)CRtPxf>04(% zYVlyLz!xYF$ANa-Q`jWei>Gh46o#A43~e35(^+n12rjUYwp+9@O&8m~AZHIu8vwPizR@U65wiee@ip^*!~R`q$k!1)SmKM{0A=lOg{^EyWVLBEH|u6krn#59ccMK^Q5W< z|8PlKMmk4Ng~3ri$a$@!!%m3oZ#JN0D-%tKUH?axgej}=BZfQvvo5}*N)k!XLkNQW zzX6g%@g6_MsC=rm24!ux)s(;N6~|Cw0r$c{_3B?Udj4Q8AN@qrzFXxM(b;K523-+D zXmi8Rw3&F-9Ly3UBN*p=uO>>4p4EAdSBkw8o_W5#xW;1Wm}>-Ei37XW{p4`k zf=!{@!v$`i_^Qklxq5(#T3v6)L9KWv>_tZHTiCX*mDlRlX7jaV==baY4V(ZIyh4Ed zf94GR%)6d}0)9V42=_(B`0by=xU0IxCi)Pa6*opWCT8yQI0wUh^@dqDTXrnfBdL2Y znIBxmI8Wps=CKZPU1lr^0=%SIbb%dC3CD|Dq=UZ#fis_1vC#mMJ zbA?A~cH{rHhS~PZ^$cg;=k8FUCX&_@n9&)Yi1w724g7yZeRnjQfB1H*TGe6KsJ$tw z_KrPb)+%b%-qaR?Dz%AKd&jOlqIPT6CU(@`BGe{?H{ajyeb0MN@<+}|p6BzNe8zoW z_jO+v<>Y!3UKL6FmtN(P9%F0YQF`tuyF+YBLT(#b;ssmkeqmFs!bBLUWb8ZB%9e8y z30HX7Qp$`bQG*I$@o>vC*4Q0A`??3Jjl~=f@)1N`|~zbMI|R=Ck!d?HSqtgk-RNXt|jGp92SH zm~s^-*BjyQ!g3nH0c49iLGDMLC4Z{V-$cu3F?_KHR=HGde?HaL^=h9Ajq8CQb} z(hqRp#bXWmd^g@@GE**WWX-pyPih7l#V&KgnM=QUoOr zapB-DMcNgYjPP(7iUs-?GVga6n`<6wM;-M}3>;82w;f!#1!5)g|HGqgZsxL<84~7u zXey7v3b}@6I``PBgmQkBTZY4HP^JSFUr*x)jf?6nz(vFBYKH`>e9%A4Q;->wyXV)Y zRMJJrc7lA=qp4p>3aqQjg(O19o~2RekA&@?n=iNW)earB9nh=iL10;TPItp8uAoIq zH9d!>h=m!k!g8eU>2-vRF)+g+*C*T|;}(BP_^x;2hH{Q5Y>xF*HnO+kc|C6m|25by z({Ee+No2`ccP8!ftYiA~!dERUtk|||F;=>Tr#{%&MS(cfY_T?C)M{U)s5q+gWuy#m z#-CXad6XyzrLUnL8$zbXHHMXJkmG&*GHq#xu4G|qBfhHI8~Q?tkoVs@^`2qbeB|$P z#&+(1E!%9@GYX6-D*gKqYY+_)7A!lUmvl24=g0224{X5(QEoYT_^6byzk`@?D zNqf@sydLiPqdBWD7(7k`Q}@6QXX+{0`tt`A>SH zlp0q5PaKXd)_*2m4R}lRIgj-A>#NO&;Nxh28Wt8Jq3Pt>FhSHrXOl4S@Tf!8xg{0m=N9y2=+q?f9w$nQRu!yEiIt+Qi8KlUTii2Mr<|u`+2n>-T|Qq zb{ET9r#;FDl2~q57*NvHYa2hS|Hyhz^nkIvDz5_)0pS~X^5P;AIG_~E*D;K~x#X+i zTOL2e3`^GV8SKzI?qw0zCDDQ0hU!f;tdOu6+xYWv>?RW|FmtE!&0dzwxlIYk!bIXc ztctFSDxG{wyx{o`y@a<(KlCreMm^khV4FI!7iQ{Rz{2XAoh}h?UEE$&la!M5m`4j$ zXZFXJd9&_Lr+b#bH>ZGh==hdioBx-|L`~OD+xD((Lyyu2tdGZ3ovm&_<)9<$AhFfV zQp|&=K#cz-U(e0UcaNNLR|iwm1q=PYa5g7?0d)-f5(mX6yk_1M;yof1^Ozmx*I>uG z6qiIbjRpQ_0EqSLL1GmoVuh%Qz6-la*nj9UvvNen_w&!R`ruY~u5S`BO^tm#-PENf z%W{?Za`a&E=b@jKX36)a?>T$jiw~DI3^WCH#v9GE`-l?KWUMp2Po8=r2@NuqZb)u^ zk@ggSz;Y_&^4}j>V8MV1|8S3X%E>3M`HPw{I!y*yvewDa)UxRs-?OwjQ*q+QvsX-N zcj+Ccr^}UUH-sFu3rH`4s9y=%&`~Tc)!_m)?<(BU2?y6&bS!iUoaFt*GJ-~S2iY?x zbxUlsvvFf<#{@qn=|q`2v87(dd`x=}Igde-p76)DM`Jf)Z15#x!T4q83s@M-f;(!u zDtLfHz3C7kNdV>?_D_|C(yFMR{f61E@L&1qitJjIz%ZaNej0c;P|as9)>Lp0XIE3hWtWZZh4`(By0AHXR6Z!UkN^*w z$4B^5hug)bf($68)c%jj#EmYM=f z%hQVpi1lG!j8L~WlbyPYjv~V{`b!gzuwgPM$Hs5|X()H%&iy<uf(P4ui%`mu-lqho=JU=|5;1!%C%w#R#??l!Il@%aY}&jlugcZCVz|z2O8P6KU>JTWML)tZWa(GT)Za8>Efc)S(3a&{Aj~yJ37# zf{)OEOx$s+RqDp17Z~>{P!T-b4U=gsCS^AO4eX2f7}mq%yPqi7<+8dP}))-KFpzVmoRn(at)Wa*aPe&`& z379lj=;@+vm3yNw%=eRngzeKF9nl-u3x~uqPI#S_i6LmNzjjr3F4HJ3(;XEox|d|{zYJABkPv>E6lrg?~|11@Vfl1= zpI(sx+lF|<9nsT%bMT2y`mM~rF}1G`k@?pLAuqdRyjIE+)4C-Qy6?jQ^B-n^hr1-$ zcJXTjAO_pB!sEE1^t%dSffMAN@3-U45p~jy?l;O3pOmR*bsOl>R>2&0nJ(H&5=F%m zA5s}dJsgdW(BDDx?y(cu(!&L&0VzO>fD05tfN=B?hT-L?V325n^t;|0#)no3onmb9 z5!Z4vy-MpLf(p5Kvm25>aU`no)#a^dEhHL4UY<6l1!A=@4%R|JB?XN2*&ri|@nuhv z9V{3@6^Qd_dhJt2qebTVALclpYrp$Pk7yF|1&>VaL0#RmJWw9Y#FI>>DQ?{yQJj-j z^>+P>k6*!F@UOp@ACzJ_49dFQ!fx|pFL3y$oTvdO@Z-A!G0Ki_&jNd$49rLv{iifZ zV)uC_%PXbobqeoR@g@tpiwZCSII~vITfCW~$%Z%f6qptJPNC;5*q&6K7B+^Lva|%1 zgO$pNSM&csV}gxGG=6TAY3&awl16k(ygR7jJd+81S@~qkB@(&4U7beA| zYzL71S2;Ch<2>I~XlSW9fp;!rjb52y?roPVcg`|8N&ku6U?pYD1yNUwPa>RlsO{y5 zcOT-m)<>TG`UhWryc0vAardA{^XlYv$)v5^Bp;R-YCAqjE>QWiW^d{au1^1-z8Ut! z$HP_$v9(6qm(gML+jy2ON){ScAaS%leYA`ut_lavS|l!0LY#`I$MD8Aau3Z$l#KxD z2Cbm1Ii>R~ajaQMvSrNVUfZY`G|rtjfvX4{PDs+nVH^@7OsCz`#{ z4)$iH^|20(imx++^2LOGO-3kMQ|F$JKk_1Stz@75BhRv|VpWyjFD6Tu8OZojHrK{X z_J$t)yaGz=+`S*VZ>aQAnx8g>kV_uKw2t0spG@_hK4U-Sr`6-dU|ICCEC6_#dO$cT z*ibXhG9wj~60xO?wZCJA$S-&Czoe#f9u_uZztDQ0>s`@9{Mo%nh_NQ-UuuhXC~C=C zu-LN%asmTq*+JiEe_{s8a}&bp#TH>X*3IvYUt8~{#8DG>NDaeGX{EU>7lo6OiOL6; zQpeo4><0Q6I9oOp0F(CGL3o)Qez<>|9v~m%ZmBbm3(k>NAIW;7Z}Hyb@Co66kqqHK@#>XjyqhAJiA)5)V z1ZR4luF`D>W;@TR>Zp>}5d{#%6>;Bfl*p`4)>AaUcE0r4vSYj6-WjbVXgq)EjB{tF zNqxBYY>`f9W!@5iSo!p!ddVf%+;rpnH_Y`)K!k%W$}dJk(NzE>kW>m>af5;zU_e43A)>KUUIcDYWi%3KMm@+o+*PAh6sa8 z7XfAnEHkGna70<-wFb}<+n$MCB=k5pnZ^e`ZUZBJH-G6D?()+9(ZS4$YwXVTcAtK6 zZgjg3;zbY-Dl@R5+<1`+|2VyV_1;Pqe)t;3PLd|Mq-lZ!!q?7}ajkeQsNy$U#FRjZ z16GnbRl_k-;spfMX9wEKAIL@T#B+3E8Sn_2GXhAwvJdh3TTsd7h%liEDVt{U>O?UP@@laE$(W z!j6y{iiD=1h5CT`i|WrL*oUIyIB6TwBzCnUe3 zpJV5K!lxJi@Dd#yVmzLr61FK5eOUiMxRV_ky-&|5^m6T~0#$exhXkGfwmVbk(T@Yl zUIx}zO{?f)k9xc#MSIXMHjZ(abA%-BPNMgLuD3JpW)q>BC6Knw+jaWR;vDZT)h#kV ztW1vOvBi_Y1Ya53q#||&yVt_Cy-HT%B&08Z(O(%OwaH?YU8Q`nB~0z}#^jGmUz4`f z+cv_2iSjKI>)!JY>b(^1p8BAcufwU1`!m<_LKcZT`!w(_qRZ|9J%z|8s{h7`Xk)^T z^WZPI5*9T-P;X&AU0ucPJCJB^hW?b939vRR5u+>5NIfF45A6xO{1xz~m#35v22H($ z%)P%aihaQM?E~;d$T{_;fB1jeUvBSu?lkYsYrv~Jp(HcRT%*0mq3Y*lcNg@W%yZym zOq}QV@)N9b?!%*}jytLPh$TABZ2oCI$bs9N8;XFLf4atto7byXe|@k5cdyWaQ+KZP zY7=l?Md8J(zzr>&p3 z$F9DYOr|rl#pG*wft$(O9oYxjtqRy0y)5CTHC__(q9;N@?vDqZv17OSNnSH94d-71 z%~ZaZUGpSktg;0`Kc(0{82BqNCgjS*K4a#Jt2G`D>6H&LmBg9gR*QZOVDUJuYC@SN zk^GUV9#WSfaSK!lSzZLVQ(^;z6E#^8nwWx78Ddoup z4}Wo>c;^G&-56E&|0u2wvRJO0PgBI_E-!_aOUBG2Q&&90ENp;fjaAKiYW+GPUPUgc zsK^>z?y~=MRdV&8_2r?AG6N6e2}z`y3SnI@D^YsBtgDVTE(^|W{dC~Kz-g#AJlElx zE|wqQcBMI@qFS~Ac~vP-1#ZwHAzlsjo;Vx4blP0vWcNbNxo3QyOwVuTQFeSf`>bx^KC)t;k02Jo~R=FjFKGe$}k%DGNf-OuntV%MJK-Y zAuB1!2zT2q$cX5FXz}{|1Ob)8=vrl$>F&wMtp9H8#lPep3gJBpo`t^fu6{XW-xEM5 zWyup#8<$oW{7KD10^8*ZvPUate&v4}2&8T%@bSO*z5V&74o8^CGF*^PG4rcGqBhq? z(rrZk^bY&oa*#BX5cb_+FKy#}&TE`sX*)bZgbOpzj9Y{ZJ&2q2ZV;^O*N?#Nqnd7Q zB3%q2y=t&gU~0C)^(=*LdK*Oto$3R*;3TrGi>TY%2I*}GVOZg{Fno%nP2V)(3)xmu z?$?7&QCXSR{i)Gq=TE4MPk!o8&|#qS$Wx^R#WzByDkaQNn-%B(s7 z_soC3RTW!o^%^@ObcS2=1rHc3oPM4$Po1$2TYsDtpq<6boolDFr{0Y5J{CmCb))4? z(}Po|@PxpOJcO}Xo$tqdi5z;IjAvU=yYi0Xb$3Ag5W^iQ)`-V&`vwMitD)~0H90%L zw8x7v^roTOoBgMUjB$0}XbJ@s3q?Apll=rvr?t&C*%opgt{&n)6V?K}Ix5%iU}cL7H}cWCOc3^+&wz(gHRju$5)GTR_ebhoj|C+b(>eKfAe&@i*fbqwXMIJ?4 zwH{)D;zGUVK<3E*BD;LGr;P~0sUzC>+tTey**@FImn%c(k@WEeEoSbtR-I}&P8;oV z4{*~1@d(l}7iE33&M4xfc6(`%(9~_t3!pXImtI`#@>dN-X*&SbCsfUbR-70UGmgb% zMt2?|3SWR`fx&(y`SBOiwQGcA92*jRF69;xg5aJfOgC1vf}L02QoH%;vR(G@#1qBQ zDJY}anZwq)xgih2%2ddRg0}Od7^AO3Rg_%wIE+u=H>nDO-!!C1;1eOmvloHVs-%%h zk@aUr0SvTS@5UNjHaSQtYG+JAK=acp0cvpGzlU(rNWG-|v&r8|p%+*fni9t_t?t9; zj@8y-{+7-Ssxkk-JLRqcBI)&5$#3I=8Ok4F79U}uaf{?OM7a^QKEyv(FPzn=ck_d= zoF4XJth^mmYIry1+As5>T1g_(nRnPQf}f^6==$y?LzcEQ^bxh6lf6Nt`Dm{fbY90_j%lrwM$8w&kE&$ zUr7d{QS%H8EID?}fWfN0ccnZ&W50Btsc0wVE0{agX=KNBhXt)VA&Pzjlo*0c9TqX0 z&B)>(U$st)xmH3}KlY`mZC-nD^8Hfd-2eP9lpI-L6zu5UurP~QoTf#(I6C(x*A)|! zf9~8UJN=K~cBr_@8>igivi#%%jXV=<$su`Hl%&aX;PouQ77<1ByMQ@2gHsD^l7m38 zSJL=ccymO2s#c#O&%|^7ET!GH_2<-I{+O^8w+1eLfR9U!TU-ce2B2g4;W>cfo`;$&2;&v5YLAFV27sv3QHmjfI$C z7-%`IQCW;2br|`0OY*W2%Db6SQp2FpmftGJ`@HAL*5P?7(h&1IDP?<%2C2-|4VK}+ zp<;gp+MRRD2q4d1S5}cLDhhC+!^vZK^q4q`D2JnLd}Wbm4@)giQNik{Iq!u(VkznI zK?+gDMPYv0aHO4-#bvV$L~&?{u6iz3e|*H0f_${m7BEQ-k1KfGeD8u(-@6akI>D(I4b-TC{Hc%BcN z&loA0b3lJRksI~E_CshfeX*D6yU5G5!iU+&3o>vrOew(L{5XYydxUKn_y;-E7UHXs{e#eQ?xs|h>DgQyXFL7A1y+{KTj%_| zQq_5}>#dk&uB49RMYHsul(Li~gTyf{;$1V`JNUdcflzaLh+@a6K6FUKXys4tpkeag zP^95yyh?{1Gfrgn=~}fZ9pj$7+I!hB7FHIUjf|m@bQX}qQuxa|1{p)S#^Q?Kq0ER9 zeXM>lfIYwX&!tKGqhWfgPcgpcDU5Nx7WSQ7J%kw9(}X*t@t}k>hxws)^@eyCYzKZS zBTAJC8#tX1#WH`$S)@3rcX=;0{Posj8BPf2_FW9Se#W&k-wmz@lu0+cZATH`6xm9h z@T0bu#IYZXl`^GYFwi1pf-CJlVwn(@%nxoEBn_r5s(vcfnjxSBPI%vt&rWNLYiZFt z{yqGT^FxA*I_x?-2nIsguDbpCi0OK47BM9o6eZxihv$&m=JFm7*eIE+HH&w?bMae} zuUpjmH3%EI#WiC2wBf1VBRzyJyQDreyu&y)9$+ME|_1tq)3|l`ZQ(8$lBNga? z515qR?%{3K4t#Lphm)YnyDpJJW`e9l$wgldHc4`W?nabqUMzO^CDpoj=f!3#&^Jb4 zOD8NiCKNxnb=9ad`akl_Q93}x1Mhr;Y}I1F6!(tznOkZYV;g&|1M9WWNhq6-7x>1o z=SYdlsxwmSQ2%&EM7@cbGMwPtqWc(*g{lzmMzI(`dGX6BkAqrWATLg|!-?Nmt;7@H z!>Io68Krv4#2K~43s`hjAp=#{RZz3?bB>vTQHKNS=z}GmdQyGUt$*XBwWUySP~E@kbS&aHfw+ylT$u|lcux#IS;!cQ1 z()S4hypuY%Bt}brtqleDN;gQY^lBs8hHl6PZ zr(e|LvQ|o&r2V9;3*P)6PS$nEgQx6~(Ty%QK5mP`aS18(J3^!lpP_I0*5U4{?P2oG z(Zjt)d&_`p=P%mw55?6^#r~EIoYTav15EJaVA=nAu7!h8Z zyTxIZ`P$~+tC-sd##39UH=6>lLB7t_JSRl+gP_T@N;~7!I$FWudHNxfkhtvU zE;cK!UCxip4?=?yBcv9gG!BSCL1g~ITXjko`Gn}s-e*T0B!w3<-=yRXI8@3n_6dGEMv3hp$=>HE! zD6$ubvXg&n2a|Znfkp3$iBp%XIHO1L7P08fcRCTZr*F!-_2} zUJ-xKVX&0mrB*06{OYgp5hoGoI>okF+29?+jrBaQ7I#wycF$0Po8wkJV1t< zFsBoK)G}nxb2_<}@3l6*O1*Ok<;O-`wZ6wVO5Be$)k!5BQ`Bx9o{xHl35EgYl~O@Z zP{RKWlz9}!kEK#iH@sCDu2V6$SHm8nQ~$ofh{lrdEJ6jtE~aH|$~+0@C#8|^pA_)! ztCZhqP~b>HBqR1tvLHb+@egHzN$rMT7FqSp%=WiKRTvWDkdcEM3V+^3!<*`2Hnq!u&6-R8IYg`pcxs^#{KW%g4HKWB`Kb)g@Q4k1vr zHb=_E50ac>8Asa7<*cRr`@+=%Yb*Wt(rvB4u2kc-oIqQXvAkXiYnL3$XV+RAnt=!4 zPTu|JLW7FXghQJ#qg+o>Dgn~}rv)%Z@2w8a(0{`;1ioJ$4|b(SLh5J;~Va;{|H4o5n|pE#>r## zB_D9&V{!p%;Hq3FX~39rZ8A{>fMp_II1=enU~fET7i>*>6c9R53w= zA~m9}W7?JWgvgh>{w2Jg)}?&OA6D$PdAvb{GJ1Qe-oPW|l2{r?^Bk40J?0d*TN$8iDp9d=Ns=pMtWN=Ok2>`{nh8|@qJum!Jh`(DO>@S>~p-hHj#G(Cg z0UxiBG>%lESSqy__bZl(xLNbt_40{%(r)8f@#qsQC1aDqg_M^4k8adz+OFU9TVLW| z;FwD zsK}Q;!|M4;NT~+O$B3;k`qXLeqr(ZfCFxT|UqLun7oEhhB6f0v6!e=*fu8pXIy%dL%L_$W~>AiIr<>pcvYK?XbgkWU>cZGqa=WiBlLJ z-JJ28Hxj2jF=hiuGw2Qr0SR!ONapzLzyDSE62_90?BPi+z7LjpP$Ww8mo477H;!5g z!TffshvvCE0T}Q;!s3_Rvpg=pC&vQPQ@4b6S6KU*3l1ouePrbg*1&9vVXzw6dFq4e z@O{+r$t^$2{tLsyND0}&zu{q=3uNVq;Tun*o_(|Zihf5=mIUCiWa|G&Ln?6hj)1e< zoTX3H84-=IeG!?`PtG~CZ5hSD8j>JG!X|6M{D4Hl|EM}}`Q8TG*k{_F_C4df(K-KV zaEQUqvger8<+Ay9Kub<%O@*nr_rHi^ch>46)g`~HOSSEdrkG)x#p_L&E81)0hdbhW zxCCdQ-u+zbfC71?w`VO+UwjtJ*zv61*~A4ZKUA{wK+)e>#X0FUR2A!_brdV}O07;& zE62VOl)bUh58|e$H8IyKoydc+U?$e&rTgU|=3O=bv$m^_k>V`C(v@Q_>efa##A@Lu zn*Aci8$3bGtlBqa#m*Vn*mqZ9CGWOCkK=8EJi}0VdqFuu%9c=nTTMY-8+>>9>m(~4 zl21RQ;(yFEJ(BW1XtXgnMoL=?r`0b0ZNMNw`u9F{Ho9wkF&L{Z!+}y&c51WZ|g1EMiF7Y{t#@!7#$x1ytu8DowuDfQWl!k72{ zF9-9lTzaL6N1Mfham%hvY9qbEg6=^Uy<+Bk&Ga>=*XdDjIevvv^hP4U^f zTAI?jj~(KrYMYKbmxvSr{oBL+qMS0ifZG|%j-6qlo<5Hp8OgM{rmQ(wN)Z|FK^CEq z8C~ml%4Sjt{R0NCOVk{B8wV|KavD>V`E^FX6scmjraEH5fMxA{ZgBCH0U0gZ>+kp? z%~Jx!fdM7rhi%JF@0!x}iilxJG)1Nvmc%ROcUK+STmEUdUJt1TM2z&^#EUm#8YQWs z=xu(kGJP|Kg|>ZwYiXDOgB#xEbk+%)veC8I*QQ@+Xp|uB?6NDTw@%@QZ0{;?w+%i_ zamA?Z^Eger`8qan>f&+LJuQXsdW@pu-1&wB5Cy>`XHZXpCcKzS170*3ZR_$BR5 zQ%Jen4^UPp2UJ>Fp^{kQ)*@b-4~h~5PW&)GNM6j4=Cd?D2ZQ@qa}Umv%~&rS96kmg z53>d)jEEd@Nd;O-p<9;49vU+B>3tJUWO7=Vp8f6lKtdQu0G^3g^p9Tu1aUvTICwR|x{710S_ zsY1`;TK?PB0Pk-t6A(ZpBHF%@UHuj=HaCr6+ImWR-HukVwRGDTv6Q-6kcC@_+`MM^iB6hpDkH)O_dr_ z(jS$3^4=7wR}VM1M`sk6mCnLz3QrA`gQ^Z$g2sWWuVbHs*dN6;Ku8k2!06{%EUfdJ z&7n8FTFc*q&YQbh!&REYJ^4aVY^x$TRU(z2Cny|-`&<_Fe*RJ2A9cA+iW>k5Ums-{ zYDl*`@hwSpq<6lP&I+pK&oJZkQJt&{@L)nsX|wQ*lDXT)EQ)K8V@%Y4;VPwlNlHkK zJ`#FMF&FV%ZFtWkF`ot4V&lVv`4ckeWqgdNL8?h^_IN8N$>m&HG}ajlCx0PH?7X?! z*aNb%ziM{J4#|Q!f2TILOI)vU7`~@lnq4dCOlZw#ZYC}0z5nMYO-H9n%^cgHVQoP& zp}CG)MAz*)QjZQIx~~-#=y`9S9DFb!s-CguT31D|M1;yk{( zDWwW6(~caT3k#FsD&jAk5dAT8mxav;UJ|LmVuPHWw*oOilEIN&!1j4UT9W(uykpB3 z{Yg9kw=HNl<%rl^Q;gA*pRYqt+4`-j`NGlp2089n;uR%33`&1)LXv!{zR&OK%jp>k(E<|ii>un)7bsxPY93~ioV#-&bWPMz|OYYWszUU91sAiJg z>vPdcn9RP@cemp}{W%+JUiu09L86}0epX@$C8ssVWZA{A%F-M?T$+TA^_*P1c z`8REZP{mzp-9L1>z6CcsefsY}zrdpSLmwj@7Ito|shf||1oEk(_wr(4axc5;?ZhBq z&b~jbu5pb4y58J-Z=K@4zPvYxFW0$F!xS?e0lU?+AMoQ*L-Jx0>Ij1-OT+;n_5P$@ zC3AE^8exzMn9=}kcmi{d9)R3WQe;`{OM|Jhg!zx;siVxl$0C2x6JW37luwsO2v?-G z>bu(Ag-E*`>^c||gv)Hw(07@Pi`5Jywc7S>um!&Y=1MpYq&Ko27j${OX^fhKSKk-Y z^I!btLg)>!Lk0~;w1duy=>s=I=}GcDzN|ye>NZ^#$lMIag13joPtJ!3cxO&=*wH&a z%>m56JaszyX2Ag)yJl?f4spGB*3gVVYEA(BG_wXNs!5~&;7PLpwjT7+?^z8l)^je`w%<8bSIRmS)14n< zx@IE?y%8$tiYoszsOg)UyWfxjMO1ltdD9Mq|N4YMka)#2KIDOZU?no{3*A)3KvT0l zbVdy*ONPoW>ScJf^!4EGGS5WKvYUO#b0<0{*41$CEa_d5al8I2)M}A@+w&!_o;$Do z&9ym}lXk4LYs&?{>rA>tfmM#G>YF*x;#$+r&M;d zbA>1Jr;gF-&hn2y>!7>uYA$Z)N?-Lf%5;{ZwA$__FaE7`(PZsyYT$`9wmIy6aHrtq ztzrd5?Ht{g9{G@VcTzok%aq|?lx5OJ1n&|0|OVgCm|J`hEY;2q@so9)d zC;xnuH{JA!FEAKXD9Sd~@Xdr{*(%R`>HBs}&wta~3x9+h+elwMiDoiQNldENHwi6VKDQ-$f6_mPb>)&^6WUn&#|;aSQ8yIV z{!4HepUghI*zM9YL03qK>nF9=^zCvGWPi(YEE~b=0^IkPV#ZGLG}nf1$`fX@>SP`) zH5vUWdBp0S^!f%ArrJ1=K99KGAhpULz5zlAIB6f#>iEwfDTkDzHgwrFztao%Clt~1 z9m^IfZ{L9=#*Bq&N51VuTNXet*zMj5Jp7yZ4`@osar3G@e{6$G+UsQNI380zr5psG zG;sO8OOdC0)*~Gs^7f*XZs(h(4r{+&mgvaC92P~GeOhNl-dSv1O8i#>&W?Te(JMIG(s>4woz1^vO-qvDQ4$CZNYVu@z^JhXJKvb6P^DThw;n;dzdb}inxI@s8J^Tcyx=R zBI{Gi!h7~=`x9*0%VKG4 z6EdmsG6P@hz>g4kDdCiHN;I>pJC5 z=ti-JOR1|=qXj#s6ggb#VlyKrcRxOEyv6uj0&SbTF9wWy9>H=x`h?mB+p6rYi~hC_ zb4ihYc$?QrA#8!Fs(#TxMM7!x(8pl?MhRVR zUkZGU*QM2eeaF4MOLEUZmXmGKLKw}&FW--%$wFg(57rJ(+dV5m2m!b1?8d5DJsjBI zuzEJdur8Onnyl3AuWx;0>c1BKYdF^u8Ow!h+T#a}nWi>c{y8+3Wy;sB?7h$!pp#pm zWxoFBvSj_kXAuov3XHJ?Co#LKrRp3|YRvTREGBR(l&5Bcej}et!W5%xqs*Qr`8Mzs z7g}-Uud>T+`9|_Ur2qceZ;?DwWH;SSpZ@{7)LqMnOK+r8Wa}^^h%~RY{hBg&KrBNI|&{hz4OVf*1VSkpoUHL4b&5a$5rLlKTmN@TTgp_I=G0I(i z|HQz8Znx#bpLmFu2LvAocUS1x-;ao=RKYq??bR8((mzCHo{hQ%w@K>x@`bpw{?l`p z5T(7jL~*%2zDx}`e!8Q``td`*k05P0Hq zJ2_=rRVNKI*#?Y=K{75q1CY3%ss_Go2!yS?Df#tB*)Jw{O>cdU=rYcBvw;-ekaNCOo5HSyyoMrb18(r z-HU@OXzA!jhXGOe8l;yOQ5auW%$;Cv`I~?nhN@#9BWpG_zK=7PxZ5RnMn(H5G5uBv;_U>x^IVi$QWT`WJDX3g zsb1JrV}c|>0k;KHI^VaZt0IOs=@2_(yhTvukG_dINlE>Bg}SDbZ#5Q;;?3IH(pQ$7 z7{B9|i0N+cqz#=*o?=e-c7nIfruRyiEcS#d*#k z+F+n%VRFs7ijHNlm&&arjn@@}XWq~7za@Z{x0iO=CgXm8z;|^TwB>*bUCVjwW=lyhkrW#f$|P)jxn9}Qy-b=k^4@#iyC3tJa`yR%f#TreQo^L5B)FoH@U7jz zQ@^<)TKS_8YR%Q#Z_j}PP>6J{CWQo74#2yjoyVl7R4R39!@Jn=xJ3hX5U?F;KWvtYc66#(S15Dx+xB$Z|Kh| zrLDJm2^9nL)1F3K6dM*0%sS=#t3JAjd8z5Xm7t{1{6G@7(ADy_zR+aGaxT< z{TG{0G-0KC2X{1l%?&cZMVpxv=B+$FBDv9qukTs^VyaKDS*m zR%!?5XjiunO)*oqxBV+p-=|531;%rETr+&^HHtx)LAxJENkzSHP&(5`{1Uo(QB?*VrqN@kk49fqyRUb; zuEx&BgO1#US#b7dUAk_M&tFJ{T;I;+f?npWTobOScG2)3lOb0yrOH!z6rC*OoEf{& zqYfcV?~SJGyyD(eU%dPkk>Q}uHP80lHSe-Pza9=*bIH^#KpWdC-DN9cdc!7-`Nl3X zqCaS`UKCa#y7ERT_-@8dgasVcOmJPH5%K?-2;PJiQcllA^s=5ud9%`~MCh3YvNr3avwQ9d=Jv-2 zN}$YzcFJ^3i>E*-V_CP101whjq3V|4<#_iB=UoqAmq`um8HC4s&>p(P1HJO&^Pzh# zrQ@Rtz&q1+o#pM1^tRl@ocf&U(muJ;?<>+uxl%LGhlqn&sI<=8d+YS)ez)Wd=$hvY z*G=RNL{U2ac$+adsTjqJNqnv5tqRU9X!r}jQPuRX{aL(7_-xM-hixMF3(0q{zn^dY zGAfITSku;6iOgdfquXh{^=tHxn|@ZfX_gGJ??*-uWDG=|Mho92&R1-ky^_bAh!H)? z7>IWlr)9s0I<}NTzw@CB7PuFeKuem@2q6BYnWA?oT#6ZQe}}(dGlsdT2L8@+(DIPn?%LJgE;kn2%noH-qD{K6QU z^s~BzT^>AnhMmfRD64nO;uZMmU+|q-etC1GQFjQ#{>356ro7c3)AFNrYw8Q+*Nx9V zvKb$i`T+1O8BPIwn*Uvy&#Y3JzJF}PZQI;VId34wN{RZeUO6zQ>+DUP`SO@ciz&*` z#lN3!jP?Yjw`gEy%2*qoIOXm0XgiTbhEn&bRW=N;r@|!r?tVa2%2x+f? z7Bw=7H=|>ksckYHdxz&5%EGv)0{6piu=wXsu!HiP>{W6%MRqHIG^_R=xp!j5HhUnq z>Qm~SLS!X@>17*_6gwwh0`EzJ%AWb{V#Kfll^|km*ZjW!W|%hit;t7~n~M==6}F_6 zofzwkiZH6Z@BhxG`dsD7P;x~Z*FP5XXnA3B!^2kNxj(SD*r3c&>t$<`7zgp5K{cBk zr|rz`$(6AKm-fC$@THsZJGjff*JtWxWVBG<;;e&Y6%hX;NLwz6Bw6K!Zefxia@OY~ zWHwCzPJ=-GOo8z2yNsE?WrB7tg9A+jTfQ)DWZCblGDM#L(R8;l;17+bj0iemTnG_O zdZU8=KD4g2oJyBdq#FB818r!KMRHSs)Y}f+pqxI|R-e_UkiLjQCP)jTw^5^K^NWM6 z>H2{c;7MSGUKNFL<5bN7*`0zErouA(t<%iBuZEExqOX9E|Ki6FQbLJSB#Y8va&uHv zWrLcQ1sIW}kE!sg(`suSTWjAu{x=D>{vokBz(_xns14EF*se>DInzfDxQdD5EQ>jO zcBTcL;qTru{?=*^@vMjMN^kpj45@v8ds(m)<;D)^MJoA_P|#b893RI8mNOqOL?763 zk3vn`#f@`@3%8YdVh{;r2Q0-QP7Yb#0{&FR%hOo%9-ja(vBmSK^_hj%nH{HH zUn&3)uvEGe(AJ1b?({3m0k#fsVMj6y zJ(bp4q`KTyymTdkey!i{gio1*j0BPsTkeOczsBShl-aR?6HzvsaHC)r4)OoTRik+$d z%A~p}i6&fDJFmXi6dg%k82TRtUKV3+F5-SybSRL%d-#7}^8g$$o1&w2VEl-*o!kG$ z9oYE)e_z-v0KCVg^}Sg=rj3{rqykg`t$nazd1En7>M}96vdsNG>is2!ueXjIv7#q~ zJAO`4QvS(OdMy{eqtA8|_9x^{Np2apx-8aPp6r?;Ck1Yj2Fume$+%?|*E3bRp3_xF zCT2HnMy2ZihpzVkXR{64$4k?yw|!}8OKVqRwp7(&Sh4<(xAXh{-|sk{BgZ2op8LA5abD+j-S_RTuJwlP8I;AEd5@Jx z{bOU`F5dw%OwqNCXeA1s_=tjx@=z-|d`ecpws|iSZ;X~NNg0EIeQ0KrF}?+-@20Iz zWr5$9?bMg@!Y$4!nia&oXb4(=Dn{uNshspsH(D^LZq`QoHWYnq`b>nRcYltY#0}oGi5` z7-t8Y_`H#&Et3VG%u+N)?+EfTLGl&2UxVx3%g@+PXE8$kRWSV|x!(HQc=ZNG`-%z; zQuwu^3{ITNjAN8Kgj{qpr#g<9g4n1~z|6%@u1%H(2@GZ-0Vp6gXv8MzyYuSRT9GY& zuSf=YCZPz%C`MU^m^fS~YNVAfR|r?)#N)GX+O{3b>pOBMN7n;Q5Xmj3t?tF;T6?u0 zZvn4K`jo3)%6kXpeXf)38s+E^EY_IG3-k+VJq#!q0SYTsk^SnF{p#%6-E#bG^-mDbb4Bf3VQvTC@IoY6>BR8(gq zzkcVd36n1edxr-b_TJ2Vebp{)NT`#URW$p^>xJ84*Bw_oO|f>|%8TL($#m4p7*f_c zM}n!8ev2KyV-}N{u#{scUXO6jji0p`r9CoT2s&6k+)%SPJlMCewG~iPQ_F!sDta9~ zZ|X)XINzZ=G2j$RItOv`bf8uwSzu#Z#RK_aL3+WKXw5GM)HmT?;Qq6JVUVWfi!Uw_=N=`Xh zYUO3m^|&sZ;^EG(8{6dmuYUF6H-4FNI!98QBN^u8>EgXKT)HOVS;)Y^&~>=u$M7w| zBiMS63MVq2E$VxRu7spD?-C`mAAOh}w!WVFLnG#>XDk14J4t9u)JP<^2m`x5l3w{Z z-!*N#0_l%0|JW}2KHXNc-e2MdFHs3r3~A`_EV+Ovwm&(|Mo1HKM-Y-``d*eCPI249 zJvJBM+c{4*VT?5C0 z!mPfx_j=M=i)|O6-u1v{J0Aik#|*hA_Z-=*oyY*ekqK*X5^8TYafh=zXhlI>Dne zZn?^1(Y@@R%+Me6ssS0R__T!^kS@Xelx=Gpfdc9;({q0AgO9VO-9SMoc;Baww5nio zKbQbGI?u5BLtEa^#+h#>1i?h%vP)(vaOE@p3r`;G0zA#l7qWQ_Ax~PWWInZv1c}LY zPdfc;_(+jO1|1gKq8dO25H7~(s_vAKxmj**?mo|VCKgu@>Wb!qxFuFiuaX}&`iY|> z`a{aBgE#|Z>>o9X4FX3*hR{OcB%DdN&}*>M!d!^D(}jUL`5WZB(W&Wq%DE{lW|Jx= z zSdLHAs1}X}O-zX{S)5^SO}ukHlohD6Ses6nMYh(nEz~t_kZLt<w-j9SMnfMrhg4`na%2+LJ8h0WJI&zq>wY_OK6oThGVy*tWf_Qwl-#I8m z&pv+l>#ZNZI79855FSDL{F+wmE62T0X%>`RXHTuX@X~yX5HYSzS7`wGo;qRJ9j2r( zank<$3x!KNb`5btVfJU{I_^j|gwswcVo0RG{8l!$fvh(Rmtoje{mF3B6}AgA=XdMe zi8^~3olU*!g5?v!uLp5;Rhd~|#d=e`v%45f$afMmq5BSooeUqqJahjySuCa>gR z06#o>sqQWnlA>1DuU{m-m03Dr(jzu!in%pq)|`gOxLn;T)2bp};CQc3rmsuG{kw$S zWLs=#%RL*U+Zj?Rt^*i11S{Gjn++?gybef3_F%fHca*+RNl;;91 z6E2dZg>6h|?w9Fxtae_^MBPm*E14Hc9u+Vys29&>kq`yDH>Ef4jx%_*>%#X!OEoak zTSM77;(W1!*hJj;*-2sb2wk%qV(`}YeASGw4O=e~GY5*5-)`O06cGdgGTC$THWzZ4 zN)w8v$4<@KOPGMUZpm3Z3t|}nl|hmoeLmbKcqbfmP)Uy|U~R_-@PAD1A4<>OphUTj zOQCvEGz0>0tz*>ZuQ1jmg@-TGIjbFEk88*`Q4 zT%6CJ<0X zS`g&D9mwHReZzen{}gDMQOLEe$JPVmk-P0r_829WL-N2@vT3OEGXt#+p50X50LdH| zB3P*$6u0|8+j9Up74o>MSDc|f#Ku&q;kopjPTRgi`OrQji6!D-()*xoR4$|1faN== zNY!WL`@t^TF7Bq7MESHU%>zoVU)Q9nY@+Z^wnQs)h4U{A>HFl|%5L}*Db|Fy)JT=2 zX#6Jt%*{DFY>EL|<@*qvBnH!E=-UfqDNG55-e|ZxLD@u;uRSH-d3--~`++_alh6cX zrv3#__!*9X=T$hi)l2-!dy|Z7=7>(=J4^C$xXgynup5K-WXgC}>$w|3)vM0GW$9DL zKZP|)TBzis9(9j5#ys<1?=)K5Nzzu`IHzLTbg}WuMWN8O0IzhFPBu28&g_t-&D8gj z?eQ$e7LLtYHW~Pwy%E_o93bImLs+<4VA6H6+;EctWE}x=K+!+T=u!7PnvSo)O%c$>GitA72Fa2>m zvcjiUE)`z-w0iZor&mo%kUhY-Ri<47#Hx0srnK44dZZ$nJe`udXeWdQeCK(E=D}tU_2Zy7LKG8c`Pp_RnQZFb zvsu;nNpGLyCu^IZ0lR~RlBIfA!|TL)v9)XgQg$kB!>eIBVRpvJ0==v943Al5%;|^b z{JgXIrG@IHFOp_IHkRa{V~~M2$lgI1r(MgSYs3Olzr=vZ+9KnP+V-a9@J`EbCzI1c zOo{$*m2IU7n9<;NdY*L)wah@r-H@v8fETp;j~iiT4MNFzo<`0IKCEmkA<_q~ZVP)s zlE(pkfKuW;X5Nuz%|ftUY3ezM$$8J5BSrajJv}$}iCb&XK#cpZ_b4f;vT3IPoHkoMW*Qp`aVAV&$9EJ2VV6ceYo4r|SGG+Gb6Pr=-wc*&+SJgM%R z8RZGHdg)U2{qs~(_U$UB@^>(Ed^^k*#o6;hPgNR9??`QkCm`z-Cj9EETaDAw*zAFJ z6Vbv9mEP|xZ1u^(3fyqd9x;C1s-5i?4V+Gh*V<}|+CjdysR{8C_g0uUMo_fL)0(K$ z9lc%GYtX&=nO4dSt(hu%f^A}|sQ&}4me;*3q-8T3oaGRDsnOqQVro)>tzO5_D!f}x zW?*8kTQ2~0O-%(h;^09&i|Z0l7669yoqJ`&4kcWV$mER^_VH8y(K)ZLdRcf96Syo zK}EYJ<+YBl#&Fo|fjiE{6#+@?H$R254WWsoZFX#$QrdADurtulwl3Zgwbi^o%|zh# zSkRCPJ~uDKic1c%YBI}WY2*?>pdWZxn2!;kLqq#Ugaj6 zaiVnN7SWIJ=VuRfi;+8WyWoO}lG6KY+a0uQrs>~faUC+s zo~GFbd2>tzHnY+yDG(`UHqd>sCD{;O3XJz=qEBnjQTWG5+M6y84)5Lbc>UW&aeWj9 zk>2@Dv?Ydq1ggfr6$`kTWuQQr6Q|c38a$eiLMfSS!)3F!`hdJnp7l2|xcLCeDRcti zKS^OG(5`ebtB8)-G;!Dubc(bM)Td~eZfq)Y-@`5|FnH;N*i59D9n4;eMs2^(TdTe3 zl_Jn*79Ql8lHhI&u!z~~W?pM;yF5|<6g|z$oa~#^#fK0g_e`Z4z;_}_rHbyc9RR&2 zehb;}ppG?EydK$IxoDv$kqGF8dU-mgdzW*`WBS-;n-0G+FiJ?e>}gsso0fZIe;Rfb zkKsxpWTPVz+&2d>32zz!%b9&%qg`)`9xdDmflHs9U_Xdm+DWh3%ONy zE2WmrP*>G@+m>sx1ZS5$`J#oOofcvSl-^hC)yvg;cXWv<>Ki3Rj51_6+Yl~c$n;EK z99%Y}lm-HIWxzg>zLOA1-F?n=sh**$vs3Hc)j00H-VNO7pD5lCb}Z2la;r|x$&1xN z==X&Ca-AKMQlX!jxCJN-62^WQ-=Vgg;G$)E)thZ`z_?6@i_wz%CSaL=9?zZ9 zvyU5IIXZOl&%6DXD9OQ%%ev05D`6tOIduMsB4tt`#qp^HJx%4+{{y60Gf-6C>lIK| z);bz9qckPdWs+)a+|bwD`j3+sg2^6!Y&a}=vg_&r{`GHsm#DWZmA6`kYeBKMllt12 z43)L!oZ3Igu1f-HaUZmIvZf96-yi$hb0m3VQse`ap-26Oul;=JBE((x1s7iv?AKL) z>rKL6D2$2T>cU9k&)>&7E_j(#@4r}d$< z36kjnkjNY`WRFY~{XMiAb$U(o3p(bD7cV-y^RWVOE$K8Rm}i4LUL5#-!Sc`G@ zJ2Y%)t}Tp>>Y=snZ`vUJ$iA$TO?d)2QCTvm#U-H+W7s&rFV;MxZ{LzIggY=U^>}P@ zUB3)+m-6#a*O~}?`0oNx?YJHf5)TQhE&Zo$@TXcqIg|P%jMpO;uFJ^a@4-BPcz*M_ zPwPEQEz}$FCy>{G=BQUnx!n~;a4*8K7H+u6*>n3wK%s#BW4@;doER%rdmYi%@!yvw zJwJBqSt$~0zk8@ka{iFmB1e)~o3iV( z{MyZxO&+d~x|C3TL6a(@rRN{ki~DG63^9JYM}zxu)m%U8kKf5?{X87PV!40yW5pO4 z5@SHa;5%L4EAU`z#L=^aYiZy|iWC7NjlNuV4!vUr3|$%ov~brca-wE0u_28!sDn;Y zBvz-aio0L<;YKj$+IRI?Dh?18uDDOZn*i6)f0t5P-{pKY)$gdyc$<>k63%x2HPQt? zI0(1EmOR#5UHw`Qdi|!xGAyw`67N1S>bNT+FXE!n`$HItKFr4pw-b7;kMs&S15m6C z;|BE6+Sj;p5S=NHkYoQX${pH52{HPRUkW`W??YS3&!6M%l!3YDIaIv(aQW^LVFlH2 zO0NUg8c&uc{t|}|bh&oCV-7U_N3L@olxmlx_V# zwWN%*#)>u1cg|~b;U0c7Mq+~y zpAm;F?*rQ1Q(J%1bawyJ$X(!xp^!1hd~K~w;XmHc640O&`EnkBy?yfJLw448LNc4j zkacX_XynDfkqBV6ErWYS{d{cKKbS2q5lJ$Kb(rt(V$1>S1yYlCdWz93QuhGnNF4IA zr6)&bXf^;bou-2HcH9HB{tT_JtsrylryFhEKj*HOdPPyvVWcxN>^!cZAR=xUNP+wB zP@X=0x=@0rp0@IUx$G_N8^7P&8Y7g_JumeI7y@)Bj$M3~&ocap-+rk0RiGl}SI>FT z`|`yG8m?2+v!$ia-WN{x$&?-zA@^cOrq?~D7?w`_<|blfYpF?VEH;3y|7$)@Jd&VN zQuJw<-}}WUKQ-m;m87YYl}m*xt-8sb&4ShO0>%~n>7^bwxULI;1Q2C9aY>-#qt!(c z(W`}j@QtrMR}u)q($&>9t*$(%26uAS?%d5R#w5UH9QsrPEXMcpU34xJxK-u*a!0~R z!#X=`($B7VZI2X3w2i-3@9X+K((ArBj+(7co+d-YFKJMxmi_H8?!Iy$lwCIh>FM1+ zy#0+pdM0I?kn^9h)jXg!DtX3nO+eto{_fUpbKB=+XCyrjHILlNaH%64&}aS;)R+xJ zv*<6q`>&C2AvG0*9T*r`a}0J4>2Z1PPWxdQSj7^{^_Xk4fk{0{Xy}B42}F z4{9|dMg}oZNL!Ke=YXe=Z*MwqyZkg#4Q<6MqLQZCjzcaV9^oXBOU&< z<)fZkG>aG|nbq6@5=o_tn8$`t#a}~5MqPaKGVGP!745J)H~O2PWz^?E>Ih)H1*wXY zxYUQxUb4<6>0R>RoV`H1cN|?nhs|;c}E z2!hx41i_SY|H^g!;U_C&RRPEm5p80Fm3ilou{S%5^vCkO;hdqo^#Q{+ZI)k}Cij3J z-w}M0BJT^RAzBu2MBtmVl()S+h1{pPu2+D>{laR?>Dq>W`bkmlnq9rA8L3PuB|V^l zuIjpOC;Eu?5b;GpxX*cI5d`i3$}AqlpPH45ppztBdcCC$_A)AUfB>E$7s6-3eQ#%W z_x55oa(n7g8DYxGqDh^D3p$t&!_4(3Cy3uQg~4Rt=!}mXW}Zm2&_#O(7Mp?jIiV-q zPW2BZ_c}PR=8`@0dyljDNN@N3->U5!Fra{qq}aoGQ;lm*L!QL*e=IsKyZ~T z&_*XDDu0-C6s1;Pvwx2EKS~;7YM!3^YVzWam6P#X>L4oLZB$;@g{}D1^HhNw>QqGl zc93;EwaskXYBv7v2dX*~e9Xk(@+GQrlXvGi=*-ATb(8eDW^D#~Mi)wQhf3}~EO4@FBNhpx_nd3kR=LZPZub>1{7yR$WVly1B6^@^nR>tSx24ECqC z?J6KhDS~K}6E*)LneUE-1n$!5!?M!xkf422ypcw^tw3oNpET4nfS20Ey5Mmj)Kn=k z`e{~XN_dM3GcD7BWmc=)b96#9R7n%vea>$E&Q+Bk=&^%Cb0by=nzgR1`T%HHV9s@y zDH-?%U|!SfqpG%|p^Lwr!O_jfF3^NWKdM#nuyY&m+K-jI!ZI^)ATUYO=m~QIKs(m^>eFh%LOUa?7E1sK6u{&&J4HCq}PWI>TYxz ztWue%w!A2r6$^ipB0sPFoLPd0cfMm=9+4o(S(e;7(q@@WF%5G)_((;Q(M>Ng*}t_}V!W_WAbc4D$2HMdGfQ z2kZtwVz!6eb8odGq{<~2X1hx?JoqWg|3C_(E+yEz4h&gLu2WY=53*?cy%~KmEpxkW z`8K9D?Bx8;h-Edl64@fbl?%mkphH^Y6}#-B(F0IBkk=E{0g&u#?wRcsp9oenL{-*~)o!nPsN;E1dHeM!KQqPMJpa8Unv^TxA<&an9cwe(eQNWxeQ4BKzx7tCX?CA4H~Cy>t@t{XpMN zt21~FewcjlEs|ftfpt1)!ed}~rs$80J&KGZ)SDMmXDgSx2y9F@0>XW;v}Hd9Sc*pO=>f=r=EsO2V^Ur2oZ;hUC!i#AL2zy* zTD?t7Dh+(gRY3VC6Bk5FVY1aoz-=nS0)0k@GG^DGqg%^)fl&f2FM%7jCyVorhDqm7 z$i;&e7R<&weq_=`AwE^d-sbrZi8)xM#(V_7wWUsb3E6Lc+=Ea};sq5LqS0uu)6MsF zj`~j3el}lH{$nNY5&Hz3WvsK|IM)PtX7~5Pn`8h@ z{Kznm^vVptuF|;x@j9w`W44g$Lsv85BGc(cdH~4xD~)|f`NVX=7J#%zpQr(vPojac zwVQ)!#MmhZU+cr+(2}=IGOTBwey#HB z@!Xd#S~g&5{~2NdCpAcy?tZ9?*ciUJTLM5>z`U+kfhJru-A77hR*+?+ z7rk5WCspeC#eBy(K6-3T4enOEO1qRjzXk!whm)1NQ==qZZeFKvBPgzqlb|K(!n_LC zDYnZlo~8-EX8ug*v1J66xPpniRn>yEBY?hc6wyUS6o4~oi~Z;M*TT+xcnM~47r+^! zZ6(Bm>X#9It9RGqe=|fsfZfm6q=XtLSX!5`x>hFDBVdJfP|Yf5mz8kmR^bTBe8>@v zMqPg>3y1I`K6&$1I-o3UZs!6Dbu+wX-1KyjO9kq1@K*LcXOGpVGv6fnf2s|m7v%zq zUO)2@wyCM9a3rdGGgu|8eY<;-2JW7FRX`vr0G@LDK|kHUuvg`M5C1rz-<}A#*$2xe z_*T_QIwSeM0}x(+#;{mtksuxnOm@2orK4kc=XbGI+pogxF6muiK!xkddpRuzS{63f z=Fg-I!Kz_m|4~BQ&q29Wz0p4CmgT5VurFo^VTmpC11dU4%%ATy`Y|WFX5Y)_ed-{# z^l2Fun1zy55SO!Yr&V?H3Vpk}lKU_ugP+5qJKXio>K@C1d5ZpD?(*-a9XZ$TVlc%x zTi!fm`%d|Au{?0(1g;pcyCQmVw`Qc<(p-gZ2O!d@qgbfr9+o0kZugw*bo3qO+juOau3|M;a& z*0dB})hY7Tx^o^)Ou-sh2IKq6g4J^)5k-V^D?D87QofTg=SV_k>%CT!)@j>h8(w$- zaGtlV(!o?YE!KO77~T2qbn{3{Z51GqA=$C^ssPe~*rLhqIW2;ZW)ZwY<_tw@Z!c~S)TY<~9#SXuDr(Wt=5e+{&IipN!9*ZZR=aXgqG#LeR{`6mf$Fc6@)9t%FcfgG8{#X&A!KyKCwxL zO7HAAs_V?nbbQRCKI0G2#Hh{VyImc%j*p+C^TV9#0vml^I~)8o)~Dww=;X`1x`xvg zZ$7vI9zsGdNN3LmQe8ktKMn5&8)oYlVkcG3^psz7qP`8jz?+c34Zc_pg_^eeITt(P zythD}OU`XAQvbuN{A#07O|k%ySVF2e!EHW9FT6aWyXH;A5?Oba6z&@=2&}%hup)V? zJ~)5s{EiZFBQ2_{$}2}rv*6O3wKK?rwS83a{IC|9PKIW7;=MfmeIui*cK4o<+Lhm( z2LNeRXo7c7G2iT_FRq%J6j&2rrcG>I9x~+OS*2ui(ORRG+P|_W{=-OXZ~Fj;tiN04 zX$gOnaMvKP@8ddX@4$(`cD(KPlm5>epdO_}eQh+&K28aYow$G9~gJsSGQGlc%OUP}*C$yJ_{{;trne&l%e&bV>rJLZ3`lK`Q!r{zU z;r1;Vy9zTQsr?rbsFm9$T9+>uI%_n5V%5jC!y=K*|2981_HEUeW?UHflxEG}C`(mc zj4Y43me_G78_u&T$8bh~RdvQ|Ye~70R_#L@07EgAC2t0g9$ywjbFtWEcSXWEExfN7 z9flllSCnT6i5W>>k9KsG)wchwW?+iy1Ch0dtEs9q(DbR zn4J}qziZg|y-yHZSsvZFRRuJiRf`;SkU%isnii5}+2RZq`Rp*)G$nc9MlnD#O<#Jt zeg$&Tc3NWa8aFR>0ap@GDT$2`V7!}0MsRC|rQZxYnCsXf^@RvTIUg}0vogNL7s|OS zci(=hKfOtM%@U!crFDbj-CM&_SCaQk@UQajkbF&^_fYLXN=yy=sqH-$kZ$82qVF#g zzV3_RNLqFWcCerXDY~<)udfqhb5P{m1vO=Qtz`C8+el8;(z^+h2bj1$-|LH7hO_HacX*!ME!;4LCs#Y0hy_^7)**DZKCkivg{{|G`9r*eCkM{QO3 z(~5(x`fgt?$NbdKdUw;QZ>Cq9Gxf=1+kY&-JYhAex_04k`xJ zF5CI-gpr2-Z#epZlftVn1uqSWw3BW1MJ?-~d{b#e6>p&)VU-N-E1S2ak-oiay7Dk1 za;iWm(-4^PE1}%n_9)&qS(vK2mWQU>$>V?tQBJ;+59BO=bJ~wCs$UoG;O2G2}{~Q~ek$avz5!y!v$j(CElqjFCDU zs!?HJtHV5SBr?H3IRxdM5i~BtbzL3QF9O#Rta&8tAV8#ij|eY$nw|q?_;+V(H(5x1 zuTt*^Chp*@p&IHie5e12a#F5l*`{&Qev|M2oT`SnV|WT;vHJezC2uBOLl?U%)jk!+ zr!T2qbURr*QW3AGcd_f8BmHzMFlinx*j1G1Tf*Fchy|V%!|+Yy>GWKkBVbdh4*w!U zquFoy-)ILz)M8HiU(9+5Db($&muEP0?S`@JgrWiiTEZ?t$z}Ma_Ck7r)-X-36MxBa zWr^gei@_F@Z`qSxrViP~@Y(u`M>@9^6$x7-#JPI&FFGB}vhxN21BCEqJUb9EeVFj_ zrEohh`Stj?MT{&hXj^qaUjHYz^LG^banE>#l8n)&!c*}+yuxMhRQUX2D&W@U z26f?$-@YlAZLT;#J)dQKyoOzmzE7>f@P;MINcDjZHIESS0WPIKtTwCu16PibZ&T9- zSZ4Zc-5#aG8Snr|k-$h8O%nfC5*`rn=`37;{XY9yCG|H9XN%0cUQNXwKha)EPaF*w zCbqnqCUu`yHn`7cOHFtQVKk%>q=0aZ*iRzMcBhbT5@Iu{0|mU_>gpU~;|$gMSnu=LQMfF9$t8Uo$e4PGMM*0M7ixndVvtY0?Tho} z2ZaKdU-Tkkk-4*y9Br}^Tp79YPHShy(<01kc0|8!SR1100^5_tP3d0C&fex=EOQMl z0JAWhMQ9>9N@r>mpcN((-?LMN>uFT*{B%)Oy9*BQJ|Z96N4~$RmW2DyMvrPDK>1iX z%`R5ia^7>SJ`xJRlZ%gpJEN*SohZgDEEkfcgnnV$Qxug?pdIfyXkVv2q(soI>oaqFk48NYF8s#DxLsYIL#iI2Sxcl^X(uP$7_q;fh# zRR^jK9J)cR(#$`9_|*v%>P8LZ84ji*ELTOh2TX<=7TZ7fq&o92e~1*6{c?_Q((Q!x z^XIHG8$COaQpDq-@CbvLi{IX}1%Tvakre0H5j+#`K@fzhk`CIWRL#)dq^Wk7tBV#h zs9RLM^~HhQ`YDk3N?36X?Q)N@`0?>m@jT8dWu@Zk%yG)a%!YWb9H)*L0~Mea2T0?z zUux?)7NeVBx4U;lA7msv|E2ZlV*&85chozN7AW*b_~5r}-+ZO{zdaXFpi}z5-X~j) z;q#N#s?I~}Cm9`IJFTOSqJTAfi!2UQ#s(XG(#eOqNMCS4#;Qi=sk%mE!@M@n5WW-` zb{-fap?2&{uJ(Dw^a0su6k30Ua@oQq(J zc2=3#QFl1k=2qx73d=Z6B}$xBd|@zD$QlClQvBtLCDdx*uO;?RxqAN8i~nWlPft?p z>t*8AH@w2YZpRl2022pL^EH2)oyN&A-yWc0+opHUGR*H|MV68dU$9Cz(UT*xWXhCs zNb$jzQCNfym~)YzE*u<~=5p_TlcKg2{Equ5uodcw?#OsB6WZuFMvU)Gq4wiOVB*E8 z8PH(cS;{L54Yydb##o2Ie zaoiPjwfMFywNm4o)YM5o?oJ}phd^@fO|h=Q%WnyyZB}50GpPylW#Vda)3>B- zB&lTl1@TpgutfWAP>#9W>1b-zM_9Z{qhYn4%G@?6N zrdAFiZrV_772WvP?Z6?K)5%^_N?Cx>2;ANKzK8}W6;jy)2KFn*bLURlia<#%ViG<6 z^aA|t41V0p(o?T^WTMP;S$$|wA&@@yne(9qFM|Q?INR-KW@%Zd1iHuTeE&Z0gg|t2 zwI^1|h|k%eOb6HpG&i8lf95-!8ZIoHHBAFwh`K4!6`Ph&oiqGAqj1UdDbp)j zs+wO+?C;}xmr32Jc*V=nspk{<@NI>iG`x- zbW{k+rN`w9St?4BV){gA(0MFrp+G@^5-oWg&a%6i`=Qc9FVLOe0&a$=i0_>Eq5uW< zVNx|(3syN0=X7*X$fU*8I`*KDa?48tkR0`ZO497+;2NwaB zaPuoNNDF1jIr-`(d9Gn&CacBtqIMHw3^eX^UM4Yjtp^iW zZ-J(0L(P&1kN`NaI8C`xPO|ACzdT@!6vvq}s9@aH|8Ps8!QcU1f~FzExkufNzzVKz z@$hhdV$nns&pwe!j0*zqQ+~jQjuObVRx*w?iLeEl0aBZLsd;vkwyY%e7GB>J`;?_B ztF_i)eOCtc>dHAkD6l~AuFxR>aU5B%R?#)o?0JjH&MN{34WSxsNd?YL6U0J9?0 zfg1cc+&cQB!quG&89!3J#`ozZiA*->{`~qKBLVuDH+5KGyFp`W)mKIa@jMNxj}a0# zORX_Jw#^^9k3m>0Qd0Ejw8FSGXA9*4f%->8_qv4VynMecg-y^dXkrTmo3JbQH1W%t zrsbL5jn?5nNvA!uq;*1TU3kjMj2X*R$$Unk85elc2~6fOLkJ<0v}IU?M4?k}VWLah zU@zl{8srD@h!W(?7faqY9GXUzLfk?Wof8RxBfpt zew&5P@>*J*M*YlcK;oWq>8T?c}x21g^p+Y4zAz1mutu7ZssJ|LhshwSO zZbnY_`FHSpVTovL+=NsHnQq(Gb$S)hcajz%v24YrM z-_pnVHl50T0#G)y+bwr4bf9uIRx%AweH$UbDpbv$M?lpD;%+h&It6K60**RoWoG4p zOT8l{Wu@nW!d>&VLl{o(WPDdV9-uMWz*as8ooXzZeQIU9R=xSa-sIch-XVRQlikn4NRGsN$6clwT?GYa4?N?vac&C^!6zudXRW_YhV7~11R2Qx%zb@_!S zDsUFAmwBcLke{9z@MmaD6@Z5&{%V8-Dy%@~>Q#Ud@Ezw0^)|k|)Z@DOH$?mU9$194 zTuS~oCK>OiOR9`O;R(CE@HL9|aF|bobZ_B0TDiN)??A}8J42%&<7i*mtNjsDgp@SK+Jg>*tkGJY*`xGQrbbrGqo8c5>A2{|Vq z1ZM=NvO&r3xuYIQKGOfJ;k1Cub({2X^_~X@I``;y1N*ba^{6yvd9Ppv%`&d(S;C%d z!?n)8pjX-ur}AvCH|Tz>{TYVlY`DX1(jd>xi@-!&V>LQ!nwTyhA`l$jC&KodZ}FX> zZ?`G|WMA`)u6_4t3CjOU^?v=>uTJTi*0Nk1l!N~m`Rq!D{44xEWE~21_$PEWMMrX> zj+@n{A;<<9d)ocGq1V=eEwQ@_iYUEMw1J7eU(|d5k+vbE;VrP|=2Y793OL{bc2DPH z@qH#o&>C(fEC_es%nHRsAG{)8wAb60Eb4Zw1Ln>mBqod6PKNmfx%aHXOheYwGpz*p z`tRp->`GjOD?8CL(F$mx+>=*Qqg&>U7^j*Qk6}D@^~ax|v&m0^(Y?Si<|&7SOl0M{ zsoeNLIzQ1}n3AwTpC~H*Rr~`|7j>6mzD_pHAU?wFswX%lWUu>1tAQ87gS=(A{x)BRCOxRv#^ze>0evhyx0;?k$t_ zcQ#!o(u37E`lAv8k7+?6oEb^liJ4`{QWH{ru!glTLu#)q;vkdLtrLb(KR=^jvNy|p z$;pxc8JwSskMDPEf@k%2PuI$MTLT*dyIY&-4;v6nuT;Jv*CiJjWQ6=NP@pS+{?@YZ z-k`K+$aFxQo%+_OlfL@_u+RgKnm4pSp-^Li;;01!_r|zUo#ESV9A1m_CK~2#2`t+& ztVvQ7iRFts{l8j7{x;{M`;Yse?bd%XF0CMt#sn8s>Oy8$l#U4tj&*2T_zp=M@k;W9 zDcp}ux>M1^S|xAe`fF=G+}n@|obv)sut@LaTW?20YwhUN+|hy*ftiz<(SjTOu#I$l zwmS3l#q@NUrl)m^ArYh4SfB8S%+hQNx3xNMhCAxl!S3p|8}9|;_Qb{SPS#UkJlj=Z z_{=ivyCzVRo_e>t=AJtisM#Jp%q>Duzn+v}=QO#$A(uY>xw?(#PWf+J-$H{m-pr!@-T}U(G;(1~H;2E0)JAyH@g^dg?t}W*QAe?a8yfLNOW~YH44Kokz;?z{WBgj=FWVrO5>!bgt z8=FlyRt;Ct(m#*V2*G>k_H^jNiQ?z!*&L3yE3y$EH3C}zb;B?-qpf<6^}Tz8f}lOT z{VAyeY|bKDnB~2&waHL~Vo`b^?t!2T&~2o%=BzSbU3vByT1&8dkQAVy-eVSL3%%E& zk0}ARh|wxC2(cl~TMIqW#fZm71B-W4vEn9k+Yb5sC!wD09Jns3YNKx^z;OlzDz&U#Zwe(9)OF)$k*&g4JaXi?2y=~r#8ER)$ zn6$2E9I_8AmCJq{VLCTFz5b0Su`z`d<94)E_5Zvu^!|9adUpf})fJ7v$E~u6+oYP- zKZ20%Hb8ORAu&oj8R?jzcfv(c1hUD(e5~HTQmap!2Q6$Vs&f420~Ce0?6YS4-X27~ z;1~mAKR>f!5|J)7Ltxg5kULERm^YrKvtuu;-LelO^Q=2q=hs?>S`^6!(SkOqNIo|v zM9?+1!X~9uR# zllsp~~0 zEh4fJ8-Hvs^j3W4ayK@RTBG2S%;9Kof4_)1IozkNWP3{Jqslj`>h#9mT)_jUeVidp zFklEL8;k71hA`|#$ibbufM>TGPRn7`_dnJ%;znCXiSaA)4cobYBSa)j7TAyF9*!Pu zi@0caJZ-3qcZW+*w`|zFd?>by&3p6HG!A3ty~A$;hZ@By$|f@H4ZOb>5@7y6si{AM z0y;UJ@QKsp3dF{4HPWqPu{YV6T9fZ?TG0f(@ZeX+uU_4wmQ8&HJ+4c56G~u7w_sZX zZ1Sao<(KX-r?SlYdTCwfqYlg8S5b70%fZ2NcyPtA_QLJ+cE`8OP4-MDZRexf25jho zGFV{H_TxLYE<=Al^82$tZ<1Unb*m5ci8pUYR$+3#HK+~^tl*gc1^)Zxw~ZZHf5VDd zpJRunDaZB8&w~nfJNK8pmM;e$?Q`_*n26ylerHB3t*aQHkl=l=LuyRi9gfw={<`e< zW-496c&iM#GWNMM6%m#o=xk3exAU^P23Ib&m$2_rvqG%9I;|VbYwC>d=el6!cenB~ z&Zl&(_CX&-R3;)`G7DzysvB6^EQ)AyIJ2!Bu#G zl(OcVNgW8UV);zi@;|h&(BhCb=IbjizA{_pwyFQAPJ!dh3;7M1Z1IgXY~e z+I`v!#qG`E5y4h0zHNA^HDFuN@Ckw7UCX_To%94!yUH^5v1dPu(AbyIV6s#v z$;sIF7;BcY8;nwvZDcnLNz!0O_87zGcHUV`*mH<>$;xT^1APP z9Fbg>#qIy@Y&J;n)&KsI%>m#JpD2z9mwON;4f)^j?;;MQWclc|up*J6S(Z<|lBAbD z7pb@gwVJnB{(-#P=yLN?hw(Je5qM$M%^`yiqU!7>*N95L^=DMytp^^W;KDiCw(&ik zD;p^_@l9=M?(tWgXWFz)$VEOE7)?(5UgT{!XQ%C#tPcw~iPDGPt`ps1bZW`?x8Eb& zxdD=NzmKd_-SH(v)_We7GsecoT3KpzkgIFs*gzi<+l%2crodqNTc#<}Vqv2A0ZNNS zr8w&xK^A}MaUfoucX@qC@mI*=@KZ6DAK63i9T_hjSN9~Ty?Al3i#WY8_4bgpM79@K zKS{^a6Ggn??tBgSr6IIB!N>je-TJSEOL`R{Ty|Hque~jCeExXy-zerUXHH>_|1sQ~ zx=lP=3|b3P*Mq(YVLj^~bSCCkW5*3Yq`7O2V`Gszq-3>J?1Am|huSll-*}1FBp<)b z%9K-n;y&+Q&hdHfcQsVu31TXekN>!V=PK?x(`Vrc!tecAre#l|!$uIW7Vz>S2i&~D zoBpbp`=ZZ>aCKHYu=)p}e{B&1-11!w>$%e#eTcqh>lyaFFy-p+4_ zRBQIe2qjfNi;z!6zgD<>WKrdxoVSBPwQNGSTr!l)LTrcMHa%+)qjZVIpm``wuIs83 zU4zPCGs5jVA_xfwY#;9SV-0DXOs=I0E8^S2@DZ`jnWW)+Go>G@D56PogZFe+U3C1; z8}&Z$=BUa^^~-LNd~9TD3^Qk-UDgLh+xR+{!1|-G(P27%_sDYXJSGhI0yxh!~S8#P%&-uQ^B}4|# z7t7A@dE14oy(fJCfi;~6coJ6e_)g4K!-H9(r&wZ=hsA$AT7?YAJ4(&umkoYA#Kbw= zc4ubBv$XVV@oH!M+(Py6oIzvcKcV@{ssH}4lOIS;PCbodpLVj`y%;4+;2s8j506G; zLd7hXtG*BZbEacO$PEtO@sItysQW{*?lSHR?PqNXJ;j+ejK1?OcNv3%<>ddDtKPj1 zAOrg{u;Y=^tw)z}z|%E1b-YfsjjyFNa}F{8lArbO5BD(|C%XD+CQkp}kJ`l4k)QS! zyjq~I=NG}Flv>p@4&fZ1|9mqhZk|2pgj1}nnHM3FO+t0? znl49gi+&r9usC<|fQSA|Fwdy~kEA5#(c{H*!1AQyVN=uWfH*}r` znUPNYg7+c(jx$3Oedrgq9_ap5(l5Q)I7yI3l>~4TkgB zF8!z)iyP6I=S(@u3)28Vcpv7;MZQQtLhH%+Ss#avWP;D=gM}{xT$yb5zyBN~eW{0A z+H2&$7(~9|4ULA~_j?nX6h1Ts+kKD9#L-|5NBm5qculd_=ZHjk?q}xLcnX z@N#EK8l|;1viyyD(*-?5>fA7FeAeRwn~4_?6f=>W^4F>cSL#>lNF!tt$32dqmDQT- zD(cTEKh*F+(+;}V1|Gq1Z#uE*$U?}P!fKBJ9-+`vMvj!t_ajX=r``X(B*Hwl)w#9A zx`XnIkb@2@D4J5Z^r*GITxyvL^FQo957_~+Ch@@=rhh$Q#W#~t3%~ut`KXAA6X;B8hvGDX)Uh-aOXlbj zV0TAaf;NXRY*6+_H7wKx?L%ZACM+^9} z@|Qz#YdoEj(}%b|5>}-`?iI;h-D|wN!A|?`DiNlj>Da)d*1S2!e+`KiN0NBL5RtcG${05oC4M<2J#)G-3z{+d_83t(XC5 zV?}fc+ad9YNleyfw+1xWKo@T|c(_K-^U58fMr(sQefR;0SqG`Wn&Ra9y)B{Rvi=2> z)Bpp0;Ks10rK~K}USq8p3oi;-1$^#4W_{@E*WW8kN;--7(i~0oJQ2l!jn!(?srI7~ zqDiIhUJxP(Tf~&=I-x;T4AjS80#yDRIRecC3K-uN&- zd?9J{OccYFwuR9Mg5OGXF=j-5{o@5od(|3Kja)*rt|DH3<2~!i>1(W$Ls7P9MOLv% z-o3eQoX#GaIL9=zL_NL0Q_>gw!mUw`QW)RitNn{#;)1;Aub(ytQEe}gsL=}7(UFvE z-udP;aUu`qP;oUTR`7DoQ9q%;#)ah%bz*y|QOXVQ+7au|vb1b+6y(&Db>_BT<#Lq@ zqtS&6;*4})Zr%u_wx4dptGlyi0~hwPcS9M2DG7F~9y!TFJzbUCa1!(DgL2wfFf?E+ zR%GLN)l!MBO(G~XI-B6^XS^qi2%i*>?($$#qj`~m@K4HY5T>=_R-PFWbk>-)OULF7 z{gkxD(rv}UoIPdPhcIr~d{*0ALUR!DZh+>%(rt@;%O0NRx$j=(yFYuhuWuxP63twfF(LjsAqwzaCp5sGep+qyx(&%lf(yPyWyd zZg|hYa7w2w=O;G}o}CA#mBCS6HJ>b~2H+NdMt5Za(%b#(SZYE;?U5cWNFF72lHwY& zvKQ6a;6S0sY~oWtE1CkDEb6%#Qe)wo=e>?2o*e(lks7Z%ro2;T+FB*{`_G|(r>fO= zL6an)r`#xaeVL7VNh+-WQ81np_#?uqaVnCk@YY{@ibS18w!M_>52CM?+x-XsVMPOB zz-d2lVKZ5&Ecr@Tsmc~^u3OZJ_k?LxnJC+Go#gvZl(jwMs(^;7da2ySy}vfCB`W zRsCa01vo(R`M+dtpjldMBz9(Ms{0@0gL^uwXldy z1cfYi_2*YYH~y1BPE9wwkoiZw4@pMqEfmF<2y(u`4qYs zT@>)MGvf;5{rRPSG*G8bZ{-EOV!}(tA4rFdN7$f1$8Twz3d8aXGo_l%KH1l~G2?66 zy3MQs5Gs7TY}vNutq+2NT&YnSFS|b@yq)|Fe27?2StfM^YT6L~Z04NSS&|SI0QFtq zzFoyBLQ7Y>A&>!}j$ROv15=l1hzG(}>ew|+^jYBT2#VEjM>t$(PE@#BJL%dGxD;2a zNeO``GSd=2-2L>SBaFT+)ytO4VehE?v~}%uZ#fRywLj8_o>A4Qxz})VlQ>$_9JD@u z2j=Z&v$^qB7?Plo*U~&&ougSoaKI{BYdh_!Jm4QT2e(T0pPBx=)NcUZWhy%LPJzRF zdBU!_21g=)<3opqg}p5#p;tyoK=~Eod9r4O``cn%(BRo$3yRiLu@hebGi1rGC5;}9 zjcmQAeHz{J6tiZu@>Nre@z@G!up7QugpMVd!(D(91l=}c$eWP|yb-SESX}QAd2Uma z>rD-@&m3)u_F?QmZ3V{w-EhUQox!;S_qGDSg0+Euv)X?< zXS~FI&06h_L%O;A6WuGLm_@y`Dl&=mr^5R(sK%)u5@~COaZz1v8ddy6KICKFBqWjSz1OOFI=H0k_Jj4% za9o#Yb?h=I1XrRPYfDbsoEO8-vGUZ7h}GpzD3Q81l{tEP|SKOV3-&RTKK+D|bsbbDNBI2xOBEr_iB#;-hIZx48`%AjnlHe zR&uaj<9qlE(ziSVO{U(CM6YzSgQ90I2k3L$y;YMt`01W1`1rFo4}Ps*_PAniuUBJg zrkVEi+1Ppu9hiOXZ1KI!m_6enS`VzWS-MgmQPuI-@Tv*$>wUnlUMXZv&Z65Eas5AG; z=UUvYHAaqcdd?0;Evp|se3yHjP^J(8c4h@YP%7!B6ULH9n?&V=?YnwM=C^6j) z9T$|HCZJoG6HSf-_ZbB}vkX&}*yYgFP`Y@l0fSHZfgK;CWd>D|s6k{~uB211CWqL5 zRGsulmDik5aeYmhVy)-eB4O-ev!|dN@OpI_xMwkyA9lC`U=hLzpin2xP7-Le+jlO? z4d~?i_E)^MbVV=q-SNFTB8=+m$Vch`J>#uD-|`S2`|RGeS2=YlZygJzVg`2 zJu6#)65@@4^1_kgk(xxclntJa4l;dlsPl+IAXdJ$X>-*RD{{6Hfs*6|yo)d#=Cjp8 z9Z9iTsa}?O1Hv!c`_&v_tnkIA9!c>bU8Y};6=Ut3*qd!Yz+&1;zM9+7f4rn^=zCSM zS3d~JvX(dh?#~POPyN=#jik-m#jdVEABD9)pIbRuN`EoVR^(+3vho>FjL>hh2Z> z=?1-Exe zQ}6=)e8FBBgNa+CBA5H&xxJ(UN*4%(6m4%e+8cs<00iwDbR-%Xi5u_V|LC;`lS}MA z{dcn9i0 z*X*Bo7jbF0LNYx?^Mv}x&e@e0s}{Sfa2`F>PrO5l9)X3Z*iPyMU%50n^FAN^zc1-vf&hcTb88iy-M2I0XZOL zIt|t4#A9Xq?_CFzYP+zJ$Y3b0@7Cq$&tieAt)%glVTt&y?D}7@oMy3+wW;5Y>0;POgb;(9nP{-tkUtilxuP^6IWqPZo zmcR`Bg;-73JzZSuMFSfJ^9}C%g7af3m75rt2N)3RcC&+}4(M)p{I_=L(HYK-$)J(P zL#I4L=r4Qfqi1ki#8lMKoy#d^o4Zx&gv;WwACWd){trzu{UiN2CWF@Tqu%{i0kExM z18k@QHexkOrt$3dT0q&BzlBh58n%(vYrHZ_xBRZH6IAFF-7lt`9!E}ymO}F&k=my* zN|Zo@O3d_};uG0DuNK_k52|>TQ(C|6zS&0U4}n|dXJ&yn4BfK+;K>@ViNI6Zj8rr{ zR|$2JR2QK#+E(c$aURGF7r45dX@XiOMDl$f zlVjNHDj)_6LnU>>%Ny5dmuX8>QaM&RgO_zKo`5s7l_`;v+Fh$s&cd>-hg)g+jrMim zz(#eeXZm*CpOI=pL^+seSnnLFz<0)U=%)X1?vgw@*|pKBpb-pa2uM3&_#URQ@7wy* zQVaug)zST%;VYMaT0p4XkGLh!$fGR$CSpQL8m%9EIncdCmm1rUeE7i+$}HEq{lZY; z=v*@NK8(8hE=9Epfr7Os@EVg2^?D_oh(u)piPyIkZ66%~ek=s|;bq3@)2h~VpeEsx zV(Y{?EO~UQuh2X$5WN(@xsX|PcGm@Inp=beSHAP5n%2kK)PPv@T4IBtbWDS!_0C5% zXNM|v`=8#wMdx|G#LhN@4BU8xuYe8323E33K3Ku~PTqLC=R`uuMrsSuaasQ(RGko4 z4)dfViTMRZ?Dvkql?bj1>$b|MD{BN*8w)xx(?+{B#mE<5-hWI50v6;ojUO#eKsGJ0 z4evF_-%pnb3k$x*r?Ug@eNk3Rjp+K}f0P@g$ zQk$y;;@!G&DVQI&Q8@?N+kA#VI?#;qq+!<31MZm-Vl&MDa(zl!kFY$1cJ?79*R0Gp zfb2DFd=)*rEGe#+FrU_?Sb4PY9WNy{XQ#KDQJR3jfw!pvsn$QEtRy)c;WPi9TS*rb#4j4JI1l!mRL<&`s`N} z-M%PQE=(1m;{qwuF0Yv`3xF8zy;6$3wf4@cu(EpeF5+A+vNB{8>@SYM%@|N_|>dKRnxIZ05TGUN zT-?-2{t>ycTp1gjw1APY+MEaZERAJ_03mA`g!c|{>)MK?_H;uD0p2pCXM*x4VMv?< zY47G+>md-VJP+uxG@}BP#AXOF6;K8SsfH3lcQ7eNdG8?2)&cEpkfgQV;RiU-!Lh6) zS5nbvxij-M$UdO^xI_4G4=li&jvS1}@t(>d!sY*^3>7}f4827fdrW0)n@D;VXiIED zG_C~7VF(EjVO=2u;~A5{6|^XVH*6mwhCaMe189b>=2?C%e5c`9Fc>X7SMerq?}@2a zCZLUE;1#SEH4sY9(Iw~$n5(GI)-H5#=OXo>_C^rByoZ`HaUzK9#h?OfPSMteln3L_ zlrfBcS3_9h&m-C%dbMgxdshzMxAO%ah8!xYoo{(Nqkbg>D(egd+AypAC;@-V=QJXw zLHE~zGc%y#ffu4QLoe_~ML|XO_lL(gsiHz-E)PK~Krvl8@3f0krJ0f z|B?GpjqOH$0#Y!`&|9_?Dnh^Fqe$+W-5NBU8O~E&x5cmc?ngKtR~ILQPZ!T_bTdjH zrPE-ObOONU;f;_yqXjYN(0NlkRQA9dyUi+8Q~^a1OGg+43P;=6XzP0N=-Hw`F@Po8 zOOS&|!B z9~R>MiEMz{oB9)1Fta*!b4}bmWjOhA*7)BLp?{22I-A7Z86N3>6kV~@%qE^+^H42M zD8f*Jf1;|x;d_>afrO}bi>&{#y%!%a1w!@N(v@penHr9QdtO0|Oc?U)^&plKLcCc> z%2an>wZdBzvaMthhuI8RX&Q3%jSWl6zz9qH6ig6t^o7B%3S_)Ie1iA3VQX3W{;b1% zM)-r#g2_l{17V`nfgp`qI_mYQq4F2@edmDfw`0PV+eQ2h?sJ*>tO_meQ&RDUZ&6O> z0wp~iC5CqE^-Llua^Cr}gV;BV-_2baN#jg(ch}`o5tQ~hw`w}@Y2xtfOmz0BOV!32DKSMBT{wB* z=9O-J`lrzT*lCaYcj_`7fgKK*j>)Pf4UuHvHuAQmZENpHMe%`vF$kOCBUDk8Z0JBN zf)I_ApZaKCQ?HDKSilVfiQZR?o3sNUDw-k0c4?90(#&(sA3!W;aDlx>A!aXFR9jeF zjto^YiE7@@tq6VMRG>vFaF$!Qfc<{JG!=`gibF3>$GyCq z#;cr>c+Sy*OT9*_ly}J+LjM>?DFggtQZNFUuCJobrUwZKNM3L(aQ1{hyHu5+xC4h> z(nJCFVSapl+jyBLa+UPk!zJbHppni6ZczF-kL2n6Iu*@4=iz+kb-AFTVzW3F8x||} zB@_JcT*ZpSxjT6kVS(XUz`%8r;i4tnyRwi;z(^yC-yCAjXKHvZbIJgbDTOp78=0i% zJ=CbKc&Fj5du30LWnebd2DNpTO)4XM9+= zd<<1O*{DfKI~_M-WifLCn|%6S4p}_tH{xlvF? zlXIvuCSP)=4s!vq94OOa+zc~c_OvX&qi7C!7Hy-^kyBJ3a*V0P2PhZ98h zR5(N|KUnKAm5HMuD+)of@4(uf z0hU<}^LJ|UKWxk@!eB!+LS#3+hZ>Fh)34?fRzw)RO?T3?RDLX`B8ct{P~m~@DY?Q2PB^o(=m~_ytZ6L zGV1SjM(ILQbS=rS02Pr&B3Ov=o#yFGjA*GOs-sPdkmP36k@KzI0XsWcrJ_4n{6Zn_ zTBboekD;AWij95@=&_Sb`P+@viH;L%clzdsPun?jkp@$_p_}yt1KSMt%&EhaZ&k|+ z9p!1)Q14W{EnsXHApvOXu}mlD=O(z~X_Ne+fC_TRp#|FJ{4UaEU_SLH5L1u28Mn8J zY%b7;hbUH?-~{jWkW9?Oo9q6?RgzJMMM(igQFyZ{<`O>ULWh@6Q($amo&xPko&4xt-WxbI=2@LhG@0y9 z?XrpCx8^831$v<){v0BJq_b zu^fhRvd*(2D)d%%n*;QM zRngpXHX=oqYzlshY)h_kf}z$LU25&f<>#Q!*}T#T_|?fGLYwt7`&F zJU5;nu{5A`;BpTuBd!;NK*4cB`atd+Xiev%sMj8CzGYVHPrYLV*ir)}+MQ5o{K}IJp5QvtL z==Rm+s^#8o0^gN-Y;eHlT{h*2f8aKe+WnTxt?M#K>UYNpW_71xH=HJ?a6Q!(4|QPy zvc|Z6^QjS{Hyb--$jvRc&N`rIt*`uw%h~>4G9A^270c~j^;z}jZ`U}xh_OK!pzZdr zCQCZo@?2Es&d8L?GI(yRoSrWwS3}%rNJc5dCCF@4S!xxSm=zYQ!6j` zUN*o@wz!ke$QbWZgjPfV)$Jxw#m`IA?mYlLd2%9`yVK2$pUH9(chWSkP+m;E$7x{C zf&d7(abV|~{dp;Rm*})W{KrAOIYPvf{M{<~AD?d%=uX=sUT`. -This user guide will help you navigate the Ray Data project and show you how achieve several tasks. +If you’re new to Ray Datasets, we recommend starting with the +:ref:`Ray Datasets Quick Start `. +This user guide will help you navigate the Ray Datasets project and +show you how achieve several tasks. .. toctree:: :maxdepth: 2 @@ -13,7 +15,8 @@ This user guide will help you navigate the Ray Data project and show you how ach creating-datastreams transforming-datastreams consuming-datastreams - data-tensor-support + batch_inference + dataset-tensor-support custom-datasource data-internals performance-tips diff --git a/doc/source/ray-air/api/predictor.rst b/doc/source/ray-air/api/predictor.rst index 1c438fbbd54c..92a4c818f720 100644 --- a/doc/source/ray-air/api/predictor.rst +++ b/doc/source/ray-air/api/predictor.rst @@ -80,6 +80,7 @@ Batch Prediction API batch_predictor.BatchPredictor.predict batch_predictor.BatchPredictor.predict_pipelined +.. _air_framework_predictors: Built-in Predictors for Library Integrations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/ray-overview/use-cases.rst b/doc/source/ray-overview/use-cases.rst index 5a9c22c40952..bbb73e0889d6 100644 --- a/doc/source/ray-overview/use-cases.rst +++ b/doc/source/ray-overview/use-cases.rst @@ -3,7 +3,9 @@ Ray Use Cases ============= -This page indexes common Ray use cases for scaling ML. It contains highlighted references to blogs, examples, and tutorials also located elsewhere in the Ray documentation. +This page indexes common Ray use cases for scaling ML. +It contains highlighted references to blogs, examples, and tutorials also located +elsewhere in the Ray documentation. .. _ref-use-cases-llm: @@ -98,15 +100,15 @@ Learn more about how Ray scales LLMs and generative AI with the following resour Batch Inference --------------- -Batch inference refers to generating model predictions over a set of input observations. The model could be a regression model, neural network, or simply a Python function. Ray can scale batch inference from single GPU machines to large clusters. +Batch inference is the process of generating model predictions on a large "batch" of input data. +Ray for batch inference works with any cloud provider and ML framework, +and is fast and cheap for modern deep learning applications. +It scales from single machines to large clusters with minimal code changes. +As a Python-first framework, you can easily express and interactively develop your inference workloads in Ray. +To learn more about running batch inference with Ray, see the :ref:`batch inference guide`. -Performing inference on incoming batches of data can be parallelized by exporting the architecture and weights of a trained model to the shared object store. Using these model replicas, Ray AIR's :ref:`Batch Predictor ` scales predictions on batches across workers. +.. figure:: batch_inference/images/batch_inference.png -.. figure:: /images/batch_inference.png - - Using Ray AIR's ``BatchPredictor`` for batch inference. - -Learn more about batch inference with the following resources. .. panels:: :container: container pb-3 @@ -116,24 +118,17 @@ Learn more about batch inference with the following resources. --- :img-top: /images/ray_logo.png - .. link-button:: https://github.com/ray-project/ray-educational-materials/blob/main/Computer_vision_workloads/Semantic_segmentation/Scaling_batch_inference.ipynb - :type: url - :text: [Tutorial] Architectures for Scalable Batch Inference with Ray - :classes: btn-link btn-block stretched-link scalableBatchInference + .. link-button:: /data/batch-inference + :type: ref + :text: [User Guide] Batch Inference with Ray Data + :classes: btn-link btn-block stretched-link --- :img-top: /images/ray_logo.png - .. link-button:: https://www.anyscale.com/blog/model-batch-inference-in-ray-actors-actorpool-and-datasets + .. link-button:: https://github.com/ray-project/ray-educational-materials/blob/main/Computer_vision_workloads/Semantic_segmentation/Scaling_batch_inference.ipynb :type: url - :text: [Blog] Batch Inference in Ray: Actors, ActorPool, and Datasets - :classes: btn-link btn-block stretched-link batchActorPool - --- - :img-top: /images/ray_logo.png - - .. link-button:: /ray-core/examples/batch_prediction - :type: ref - :text: [Example] Batch Prediction using Ray Core - :classes: btn-link btn-block stretched-link batchCore + :text: [Tutorial] Architectures for Scalable Batch Inference with Ray + :classes: btn-link btn-block stretched-link scalableBatchInference --- :img-top: /images/ray_logo.png @@ -150,6 +145,7 @@ Learn more about batch inference with the following resources. :text: [Example] Batch OCR processing using Ray Data :classes: btn-link btn-block stretched-link batchOcr + .. _ref-use-cases-mmt: Many Model Training From e17d7ec7d1f6ffc04a1e826bbf7591b9d9e70111 Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Tue, 2 May 2023 14:24:20 +0100 Subject: [PATCH 186/424] [tune] Turn on new execution path per default [no_early_kickoff] (#34840) We've introduced a new execution path for Ray Tune behind a feature flag. The unit tests have been stable for some time. To get better dogfooding results, we should enable it per default, so that all unit and release tests are run using the new engine. If we run into problems, we can easily revert this PR before we cut the 2.5 release branch. Signed-off-by: Kai Fricke --- .buildkite/pipeline.ml.yml | 20 +++++++------- python/ray/air/BUILD | 2 +- .../air/execution/_internal/actor_manager.py | 19 ++++++------- .../ray/air/tests/test_experiment_restore.py | 22 +++++++++------ python/ray/tune/execution/tune_controller.py | 27 ++++++++++++++++--- .../ray/tune/tests/test_cluster_searcher.py | 2 +- .../ray/tune/tests/test_progress_reporter.py | 14 +++++----- python/ray/tune/tests/test_trial_scheduler.py | 2 +- python/ray/tune/tune.py | 2 +- 9 files changed, 69 insertions(+), 41 deletions(-) diff --git a/.buildkite/pipeline.ml.yml b/.buildkite/pipeline.ml.yml index 79a7fb78a0a2..447a50224cc2 100644 --- a/.buildkite/pipeline.ml.yml +++ b/.buildkite/pipeline.ml.yml @@ -326,10 +326,10 @@ - bazel test --config=ci $(./ci/run/bazel_export_options) --build_tests_only python/ray/tests/horovod/... - bazel test --config=ci $(./ci/run/bazel_export_options) python/ray/tests/ray_lightning/... -### NEW EXECUTION PATH +### OLD EXECUTION PATH COMPAT -- label: ":octopus: :sunny: New execution path: Tune tests and examples (small)" +- label: ":octopus: :last_quarter_moon_with_face: Old execution path: Tune tests and examples (small)" conditions: ["NO_WHEELS_REQUIRED", "RAY_CI_TUNE_AFFECTED"] instance_size: small parallelism: 3 @@ -339,11 +339,11 @@ - ./ci/env/env_info.sh - ./ci/run/run_bazel_test_with_sharding.sh --config=ci $(./ci/run/bazel_export_options) --build_tests_only - --test_env=TUNE_NEW_EXECUTION=1 + --test_env=TUNE_NEW_EXECUTION=0 --test_tag_filters=-medium_instance,-py37,-soft_imports,-gpu_only,-rllib,-multinode,-exclude_new_execution python/ray/tune/... -- label: ":octopus: :sunny: New execution path:Tune tests and examples (medium)" +- label: ":octopus: :last_quarter_moon_with_face: Old execution path: Tune tests and examples (medium)" conditions: ["NO_WHEELS_REQUIRED", "RAY_CI_TUNE_AFFECTED"] instance_size: medium commands: @@ -351,11 +351,11 @@ - TUNE_TESTING=1 DATA_PROCESSING_TESTING=1 ./ci/env/install-dependencies.sh - ./ci/env/env_info.sh - bazel test --config=ci $(./ci/run/bazel_export_options) --build_tests_only - --test_env=TUNE_NEW_EXECUTION=1 + --test_env=TUNE_NEW_EXECUTION=0 --test_tag_filters=medium_instance,-py37,-soft_imports,-gpu_only,-rllib,-multinode,-exclude_new_execution python/ray/tune/... -- label: ":octopus: :brain: :sunny: New execution path: Tune tests and examples {using RLlib}" +- label: ":octopus: :brain: :last_quarter_moon_with_face: Old execution path: Tune tests and examples {using RLlib}" conditions: ["NO_WHEELS_REQUIRED", "RAY_CI_TUNE_AFFECTED", "RAY_CI_RLLIB_AFFECTED"] instance_size: large commands: @@ -363,10 +363,10 @@ - TUNE_TESTING=1 ./ci/env/install-dependencies.sh - ./ci/env/env_info.sh - bazel test --config=ci $(./ci/run/bazel_export_options) --build_tests_only - --test_env=TUNE_NEW_EXECUTION=1 + --test_env=TUNE_NEW_EXECUTION=0 --test_tag_filters=-gpu_only,rllib,-exclude_new_execution python/ray/tune/... -- label: ":octopus: :sunny: New execution path: Tune tests and examples. Python 3.7" +- label: ":octopus: :last_quarter_moon_with_face: Old execution path: Tune tests and examples. Python 3.7" conditions: ["NO_WHEELS_REQUIRED", "RAY_CI_TUNE_AFFECTED"] instance_size: small commands: @@ -374,10 +374,10 @@ - TUNE_TESTING=1 INSTALL_HOROVOD=1 ./ci/env/install-dependencies.sh - ./ci/env/env_info.sh - bazel test --config=ci $(./ci/run/bazel_export_options) --build_tests_only - --test_env=TUNE_NEW_EXECUTION=1 + --test_env=TUNE_NEW_EXECUTION=0 --test_tag_filters=py37,-client python/ray/tune/... -- label: ":octopus: :sunny: New execution path: ML library integrations tests and examples. Python 3.7" +- label: ":octopus: :last_quarter_moon_with_face: Old execution path: ML library integrations tests and examples. Python 3.7" conditions: ["NO_WHEELS_REQUIRED", "RAY_CI_TUNE_AFFECTED"] instance_size: small commands: diff --git a/python/ray/air/BUILD b/python/ray/air/BUILD index b3c22b8caa6e..8b92eefb4274 100644 --- a/python/ray/air/BUILD +++ b/python/ray/air/BUILD @@ -76,7 +76,7 @@ py_test( py_test( name = "test_experiment_restore", - size = "medium", + size = "large", srcs = [ "tests/test_experiment_restore.py", "tests/_test_experiment_restore_run.py" diff --git a/python/ray/air/execution/_internal/actor_manager.py b/python/ray/air/execution/_internal/actor_manager.py index e28c6dd72677..c2af238f0229 100644 --- a/python/ray/air/execution/_internal/actor_manager.py +++ b/python/ray/air/execution/_internal/actor_manager.py @@ -360,6 +360,13 @@ def _try_start_actors(self, max_actors: Optional[int] = None) -> int: # Start Ray actor actor = remote_actor_cls.remote(**kwargs) + # Track + self._live_actors_to_ray_actors_resources[tracked_actor] = ( + actor, + acquired_resources, + ) + self._live_resource_cache = None + # Schedule ready future future = actor.__ray_ready__.remote() @@ -392,12 +399,6 @@ def on_error(exception: Exception): on_error=on_error, ) - self._live_actors_to_ray_actors_resources[tracked_actor] = ( - actor, - acquired_resources, - ) - self._live_resource_cache = None - self._enqueue_cached_actor_tasks(tracked_actor=tracked_actor) return started_actors @@ -698,6 +699,9 @@ def schedule_actor_task( args = args or tuple() kwargs = kwargs or {} + if tracked_actor.actor_id in self._failed_actor_ids: + return + tracked_actor_task = TrackedActorTask( tracked_actor=tracked_actor, on_result=on_result, on_error=on_error ) @@ -874,6 +878,3 @@ def cleanup(self): self._resource_manager.clear() self.__init__(resource_manager=self._resource_manager) - - def __del__(self): - self.cleanup() diff --git a/python/ray/air/tests/test_experiment_restore.py b/python/ray/air/tests/test_experiment_restore.py index 54ef313c6ee2..c6b4506a6a7b 100644 --- a/python/ray/air/tests/test_experiment_restore.py +++ b/python/ray/air/tests/test_experiment_restore.py @@ -1,4 +1,6 @@ import json +import os + import numpy as np import pandas as pd from pathlib import Path @@ -58,7 +60,6 @@ def test_experiment_restore(tmp_path, runner_type): - The test will stop the script with a SIGINT at a random time between 4-8 iterations after each restore. - Requirements: - Req 1: Reasonable runtime - The experiment should finish within 2 * 16 = 32 seconds. @@ -112,11 +113,14 @@ def test_experiment_restore(tmp_path, runner_type): "NUM_TRIALS": str(num_trials), "MAX_CONCURRENT_TRIALS": str(max_concurrent), "CSV_DATA_FILE": csv_file, + "TUNE_NEW_EXECUTION": os.environ.get("TUNE_NEW_EXECUTION", "1"), } # Pass criteria no_interrupts_runtime = 16.0 - passing_factor = 2 + # Todo(krfricke): See if we can improve the actor startup/shutdown time + # to reduce the passing factor again. + passing_factor = 2.5 passing_runtime = no_interrupts_runtime * passing_factor _print_message( "Experiment should finish with a total runtime of\n" @@ -197,17 +201,19 @@ def test_experiment_restore(tmp_path, runner_type): ) test_end_time = time.monotonic() + # Req 1: runtime + assert total_runtime <= passing_runtime, ( + f"Expected runtime to be <= {passing_runtime}, but ran for: {total_runtime}. " + f"This means the experiment did not finish (iterations still running). Are " + f"there any performance regressions or expensive failure recoveries??" + ) + # The script shouldn't have errored. (It should have finished by this point.) assert return_code == 0, ( f"The script errored with return code: {return_code}.\n" - f"Check the `{_RUN_SCRIPT_FILENAME}` script for any issues." + f"Check the `{_RUN_SCRIPT_FILENAME}` script for any issues. " ) - # Req 1: runtime - assert ( - total_runtime <= passing_runtime - ), f"Expected runtime to be <= {passing_runtime}, but ran for: {total_runtime}" - # Req 2: training progress persisted # Check that progress increases monotonically (we never go backwards/start from 0) assert np.all(np.diff(progress_history) >= 0), ( diff --git a/python/ray/tune/execution/tune_controller.py b/python/ray/tune/execution/tune_controller.py index 7ae7c6986d61..b2b710dae9ee 100644 --- a/python/ray/tune/execution/tune_controller.py +++ b/python/ray/tune/execution/tune_controller.py @@ -14,6 +14,7 @@ from ray.air._internal.checkpoint_manager import CheckpointStorage, _TrackedCheckpoint from ray.air.execution import ResourceManager, PlacementGroupResourceManager from ray.air.execution._internal import RayActorManager, TrackedActor +from ray.exceptions import RayActorError from ray.tune.error import _AbortTrialExecution from ray.tune.execution.ray_trial_executor import _class_cache from ray.tune.execution.trial_runner import _TuneControllerBase, TrialRunnerWrapper @@ -215,6 +216,11 @@ def _cleanup_stopping_actors(self, force_all: bool = False): continue _, tracked_actor = times.popleft() + + if tracked_actor not in self._stopping_actors: + # Actor stopping has been handled by the block above + continue + if self._actor_manager.is_actor_started(tracked_actor=tracked_actor): logger.debug(f"Forcefully killing actor: {tracked_actor}") self._actor_manager.remove_actor(tracked_actor=tracked_actor, kill=True) @@ -376,8 +382,6 @@ def _maybe_update_trial_queue(self): def _cleanup_trials(self): logger.debug("CLEANING UP all trials") - self._cleanup_cached_actors(force_all=True) - for tracked_actor in list(self._actor_to_trial): trial = self._actor_to_trial[tracked_actor] logger.debug( @@ -386,6 +390,9 @@ def _cleanup_trials(self): ) self._schedule_trial_stop(trial) + # Clean up cached actors now + self._cleanup_cached_actors(force_all=True) + start = time.monotonic() while time.monotonic() - start < 5 and self._actor_manager.num_total_actors: logger.debug("Waiting for actor manager to clean up final state") @@ -518,6 +525,7 @@ def _maybe_reuse_cached_actor(self, trial: Trial) -> bool: if trial in self._trial_to_actor: original_actor = self._trial_to_actor.pop(trial) self._actor_to_trial.pop(original_actor) + logger.debug(f"Removing ORIGINAL ACTOR for trial {trial}: {original_actor}") self._remove_actor(tracked_actor=original_actor) @@ -742,6 +750,14 @@ def _actor_failed(self, tracked_actor: TrackedActor, exception: Exception): self._unstage_trial_with_resources(trial) self._trial_task_failure(trial, exception=exception) + self._actor_manager.clear_actor_task_futures(tracked_actor) + + # Clean up actor + tracked_actor.set_on_stop(None) + tracked_actor.set_on_error(None) + self._actor_manager.remove_actor(tracked_actor, kill=False) + + # Trigger actor stopped callback self._actor_stopped(tracked_actor) def _schedule_trial_task( @@ -794,7 +810,12 @@ def _on_result(tracked_actor: TrackedActor, *args, **kwargs): if on_error: def _on_error(tracked_actor: TrackedActor, exception: Exception): - assert trial == self._actor_to_trial[tracked_actor] + # If the actor failed, it has already been cleaned up. + if tracked_actor not in self._actor_to_trial: + assert isinstance(exception, RayActorError), type(exception) + else: + assert trial == self._actor_to_trial[tracked_actor] + logger.debug( f"Future {method_name.upper()} FAILED for trial {trial}: " f"{exception}" diff --git a/python/ray/tune/tests/test_cluster_searcher.py b/python/ray/tune/tests/test_cluster_searcher.py index b10312d1ccc0..5a3095019792 100644 --- a/python/ray/tune/tests/test_cluster_searcher.py +++ b/python/ray/tune/tests/test_cluster_searcher.py @@ -40,7 +40,7 @@ def start_connected_cluster(): @pytest.mark.skipif( - os.environ.get("TUNE_NEW_EXECUTION") == "1", + os.environ.get("TUNE_NEW_EXECUTION") != "0", reason=( "This test uses the TrialRunner directly and needs to be rewritten " "for the new execution backend." diff --git a/python/ray/tune/tests/test_progress_reporter.py b/python/ray/tune/tests/test_progress_reporter.py index e04bb33d7181..2691a227e9ca 100644 --- a/python/ray/tune/tests/test_progress_reporter.py +++ b/python/ray/tune/tests/test_progress_reporter.py @@ -688,7 +688,7 @@ def testEndToEndReporting(self): output = run_string_as_driver(END_TO_END_COMMAND) try: # New execution path is too fast, trials are already terminated - if os.environ.get("TUNE_NEW_EXECUTION") != "1": + if os.environ.get("TUNE_NEW_EXECUTION") == "0": assert EXPECTED_END_TO_END_START in output assert EXPECTED_END_TO_END_END in output assert "(raylet)" not in output, "Unexpected raylet log messages" @@ -713,7 +713,7 @@ def testVerboseReporting(self): self.assertIsNone(re.search(VERBOSE_TRIAL_NORM_2_PATTERN, output)) self.assertNotIn(VERBOSE_TRIAL_NORM_3, output) self.assertNotIn(VERBOSE_TRIAL_NORM_4, output) - if os.environ.get("TUNE_NEW_EXECUTION") != "1": + if os.environ.get("TUNE_NEW_EXECUTION") == "0": self.assertNotIn(VERBOSE_TRIAL_DETAIL, output) except Exception: print("*** BEGIN OUTPUT ***") @@ -725,14 +725,14 @@ def testVerboseReporting(self): output = run_string_as_driver(verbose_1_cmd) try: # New execution path is too fast, trials are already terminated - if os.environ.get("TUNE_NEW_EXECUTION") != "1": + if os.environ.get("TUNE_NEW_EXECUTION") == "0": self.assertIn(VERBOSE_EXP_OUT_1, output) self.assertIn(VERBOSE_EXP_OUT_2, output) self.assertNotIn(VERBOSE_TRIAL_NORM_1, output) self.assertIsNone(re.search(VERBOSE_TRIAL_NORM_2_PATTERN, output)) self.assertNotIn(VERBOSE_TRIAL_NORM_3, output) self.assertNotIn(VERBOSE_TRIAL_NORM_4, output) - if os.environ.get("TUNE_NEW_EXECUTION") != "1": + if os.environ.get("TUNE_NEW_EXECUTION") == "0": self.assertNotIn(VERBOSE_TRIAL_DETAIL, output) except Exception: print("*** BEGIN OUTPUT ***") @@ -743,7 +743,7 @@ def testVerboseReporting(self): verbose_2_cmd = VERBOSE_CMD + "verbose=2)" output = run_string_as_driver(verbose_2_cmd) try: - if os.environ.get("TUNE_NEW_EXECUTION") != "1": + if os.environ.get("TUNE_NEW_EXECUTION") == "0": self.assertIn(VERBOSE_EXP_OUT_1, output) self.assertIn(VERBOSE_EXP_OUT_2, output) self.assertIn(VERBOSE_TRIAL_NORM_1, output) @@ -760,14 +760,14 @@ def testVerboseReporting(self): verbose_3_cmd = VERBOSE_CMD + "verbose=3)" output = run_string_as_driver(verbose_3_cmd) try: - if os.environ.get("TUNE_NEW_EXECUTION") != "1": + if os.environ.get("TUNE_NEW_EXECUTION") == "0": self.assertIn(VERBOSE_EXP_OUT_1, output) self.assertIn(VERBOSE_EXP_OUT_2, output) self.assertNotIn(VERBOSE_TRIAL_NORM_1, output) self.assertIsNone(re.search(VERBOSE_TRIAL_NORM_2_PATTERN, output)) self.assertNotIn(VERBOSE_TRIAL_NORM_3, output) self.assertNotIn(VERBOSE_TRIAL_NORM_4, output) - if os.environ.get("TUNE_NEW_EXECUTION") != "1": + if os.environ.get("TUNE_NEW_EXECUTION") == "0": self.assertIn(VERBOSE_TRIAL_DETAIL, output) # Check that we don't print duplicate results at the end self.assertTrue(output.count(VERBOSE_TRIAL_WITH_ONCE_RESULT) == 1) diff --git a/python/ray/tune/tests/test_trial_scheduler.py b/python/ray/tune/tests/test_trial_scheduler.py index 79e455144310..26fb21becc67 100644 --- a/python/ray/tune/tests/test_trial_scheduler.py +++ b/python/ray/tune/tests/test_trial_scheduler.py @@ -1899,7 +1899,7 @@ def testFastPerturb(self): shutil.rmtree(tmpdir) @pytest.mark.skipif( - os.environ.get("TUNE_NEW_EXECUTION") == "1", + os.environ.get("TUNE_NEW_EXECUTION") != "0", reason=( "This test is generally flaky: The print after writing `Cleanup` " "to the file is printed, but the data is not always written. " diff --git a/python/ray/tune/tune.py b/python/ray/tune/tune.py index 3f3a8d335b2c..9feff6028e29 100644 --- a/python/ray/tune/tune.py +++ b/python/ray/tune/tune.py @@ -909,7 +909,7 @@ class and registered trainables. trial_checkpoint_config=experiments[0].checkpoint_config, ) - if bool(int(os.environ.get("TUNE_NEW_EXECUTION", "0"))): + if bool(int(os.environ.get("TUNE_NEW_EXECUTION", "1"))): trial_runner_cls = TuneController runner_kwargs.pop("trial_executor") runner_kwargs["reuse_actors"] = reuse_actors From f634fc0612fea5ad7d736fc7d10e3637688b522a Mon Sep 17 00:00:00 2001 From: Jiajun Yao Date: Tue, 2 May 2023 09:24:09 -0700 Subject: [PATCH 187/424] [Core] Remove multiple core workers in one process 2/n (#34942) Remove some legacy code related to multiple core workers in one process since we now no longer support it. Signed-off-by: Jiajun Yao --- src/ray/raylet/worker_pool.cc | 200 +++++++++++----------------------- src/ray/raylet/worker_pool.h | 9 -- 2 files changed, 66 insertions(+), 143 deletions(-) diff --git a/src/ray/raylet/worker_pool.cc b/src/ray/raylet/worker_pool.cc index 2c1ef0ec049d..b9dc82862edc 100644 --- a/src/ray/raylet/worker_pool.cc +++ b/src/ray/raylet/worker_pool.cc @@ -216,7 +216,6 @@ void WorkerPool::AddWorkerProcess( const std::vector &dynamic_options) { state.worker_processes.emplace(worker_startup_token_counter_, WorkerProcessInfo{/*is_pending_registration=*/true, - {}, worker_type, proc, start, @@ -773,7 +772,6 @@ void WorkerPool::OnWorkerStarted(const std::shared_ptr &worker) auto it = state.worker_processes.find(worker_startup_token); if (it != state.worker_processes.end()) { it->second.is_pending_registration = false; - it->second.alive_started_workers.insert(worker); // We may have slots to start more workers now. TryStartIOWorkers(worker->GetLanguage()); } @@ -1063,126 +1061,76 @@ void WorkerPool::TryKillingIdleWorkers() { RAY_LOG(DEBUG) << "idle worker is already dead. Not going to kill worker " << idle_worker->WorkerId(); // This worker has already been killed. - // This is possible because a Java worker process may hold multiple workers. + // It will be removed from idle_of_all_languages_ later. + // This happens when ExitReply is received but the worker is not removed from + // idle_of_all_languages_ yet. continue; } - auto worker_startup_token = idle_worker->GetStartupToken(); - auto &worker_state = GetStateForLanguage(idle_worker->GetLanguage()); - auto it = worker_state.worker_processes.find(worker_startup_token); - if (it != worker_state.worker_processes.end() && it->second.is_pending_registration) { - // A Java worker process may hold multiple workers. - // Some workers of this process are pending registration. Skip killing this worker. + // Skip killing the worker process if there's any inflight `Exit` RPC requests to + // this worker process. + if (pending_exit_idle_workers_.count(idle_worker->WorkerId())) { continue; } - // TODO(clarng): get rid of multiple workers per process code here, as that is - // not longer supported. - auto process = idle_worker->GetProcess(); - // Make sure all workers in this worker process are idle. - // This block of code is needed by Java workers. - auto workers_in_the_same_process = GetWorkersByProcess(process); - bool can_be_killed = true; - for (const auto &worker : workers_in_the_same_process) { - if (worker_state.idle.count(worker) == 0 || - now - idle_of_all_languages_map_[worker] < - RayConfig::instance().idle_worker_killing_time_threshold_ms()) { - // Another worker in this process isn't idle, or hasn't been idle for a while, so - // this process can't be killed. - can_be_killed = false; - break; - } - - // Skip killing the worker process if there's any inflight `Exit` RPC requests to - // this worker process. - if (pending_exit_idle_workers_.count(worker->WorkerId())) { - can_be_killed = false; - break; - } - } - if (!can_be_killed) { - continue; + RAY_LOG(DEBUG) << "The worker pool has " << running_size + << " registered workers which exceeds the soft limit of " + << num_workers_soft_limit_ << ", and worker " + << idle_worker->WorkerId() << " with pid " + << idle_worker->GetProcess().GetId() + << " has been idle for a a while. Kill it."; + // To avoid object lost issue caused by forcibly killing, send an RPC request to the + // worker to allow it to do cleanup before exiting. We kill it anyway if the driver + // is already exited. + RAY_LOG(DEBUG) << "Sending exit message to worker " << idle_worker->WorkerId(); + // Register the worker to pending exit so that we can correctly calculate the + // running_size. + // This also means that there's an inflight `Exit` RPC request to the worker. + pending_exit_idle_workers_.emplace(idle_worker->WorkerId(), idle_worker); + auto rpc_client = idle_worker->rpc_client(); + RAY_CHECK(rpc_client); + RAY_CHECK(running_size > 0); + running_size--; + rpc::ExitRequest request; + if (finished_jobs_.contains(job_id) && + RayConfig::instance().kill_idle_workers_of_terminated_job()) { + RAY_LOG(INFO) << "Force exiting worker whose job has exited " + << idle_worker->WorkerId(); + request.set_force_exit(true); } + rpc_client->Exit( + request, [this, idle_worker](const ray::Status &status, const rpc::ExitReply &r) { + RAY_CHECK(pending_exit_idle_workers_.erase(idle_worker->WorkerId())); + if (!status.ok()) { + RAY_LOG(ERROR) << "Failed to send exit request: " << status.ToString(); + } - RAY_CHECK(running_size >= workers_in_the_same_process.size()); - if (running_size - workers_in_the_same_process.size() < - static_cast(num_workers_soft_limit_)) { - // A Java worker process may contain multiple workers. Killing more workers than we - // expect may slow the job. - if (!finished_jobs_.count(job_id)) { - // Ignore the soft limit for jobs that have already finished, as we - // should always clean up these workers. - return; - } - } - - for (const auto &worker : workers_in_the_same_process) { - RAY_LOG(DEBUG) << "The worker pool has " << running_size - << " registered workers which exceeds the soft limit of " - << num_workers_soft_limit_ << ", and worker " << worker->WorkerId() - << " with pid " << process.GetId() - << " has been idle for a a while. Kill it."; - // To avoid object lost issue caused by forcibly killing, send an RPC request to the - // worker to allow it to do cleanup before exiting. We kill it anyway if the driver - // is already exited. - if (!worker->IsDead()) { - RAY_LOG(DEBUG) << "Sending exit message to worker " << worker->WorkerId(); - // Register the worker to pending exit so that we can correctly calculate the - // running_size. - // This also means that there's an inflight `Exit` RPC request to the worker. - pending_exit_idle_workers_.emplace(worker->WorkerId(), worker); - auto rpc_client = worker->rpc_client(); - RAY_CHECK(rpc_client); - RAY_CHECK(running_size > 0); - running_size--; - rpc::ExitRequest request; - if (finished_jobs_.contains(job_id) && - RayConfig::instance().kill_idle_workers_of_terminated_job()) { - RAY_LOG(INFO) << "Force exiting worker whose job has exited " - << worker->WorkerId(); - request.set_force_exit(true); - } - rpc_client->Exit( - request, [this, worker](const ray::Status &status, const rpc::ExitReply &r) { - RAY_CHECK(pending_exit_idle_workers_.erase(worker->WorkerId())); - if (!status.ok()) { - RAY_LOG(ERROR) << "Failed to send exit request: " << status.ToString(); - } - - // In case of failed to send request, we remove it from pool as well - // TODO (iycheng): We should handle the grpc failure in better way. - if (!status.ok() || r.success()) { - RAY_LOG(DEBUG) << "Removed worker " << worker->WorkerId(); - auto &worker_state = GetStateForLanguage(worker->GetLanguage()); - // If we could kill the worker properly, we remove them from the idle - // pool. - RemoveWorker(worker_state.idle, worker); - // We always mark the worker as dead. - // If the worker is not idle at this moment, we'd want to mark it as dead - // so it won't be reused later. - if (!worker->IsDead()) { - worker->MarkDead(); - } - } else { - RAY_LOG(DEBUG) << "Failed to remove worker " << worker->WorkerId(); - // We re-insert the idle worker to the back of the queue if it fails to - // kill the worker (e.g., when the worker owns the object). Without this, - // if the first N workers own objects, it can't kill idle workers that are - // >= N+1. - const auto &idle_pair = idle_of_all_languages_.front(); - idle_of_all_languages_.push_back(idle_pair); - idle_of_all_languages_.pop_front(); - RAY_CHECK(idle_of_all_languages_.size() == - idle_of_all_languages_map_.size()); - } - }); - } else { - RAY_LOG(DEBUG) << "Removing dead worker " << worker->WorkerId(); - - // Even it's a dead worker, we still need to remove them from the pool. - RemoveWorker(worker_state.idle, worker); - } - } + // In case of failed to send request, we remove it from pool as well + // TODO (iycheng): We should handle the grpc failure in better way. + if (!status.ok() || r.success()) { + RAY_LOG(DEBUG) << "Removed worker " << idle_worker->WorkerId(); + auto &worker_state = GetStateForLanguage(idle_worker->GetLanguage()); + // If we could kill the worker properly, we remove them from the idle + // pool. + RemoveWorker(worker_state.idle, idle_worker); + // We always mark the worker as dead. + // If the worker is not idle at this moment, we'd want to mark it as dead + // so it won't be reused later. + if (!idle_worker->IsDead()) { + idle_worker->MarkDead(); + } + } else { + RAY_LOG(DEBUG) << "Failed to remove worker " << idle_worker->WorkerId(); + // We re-insert the idle worker to the back of the queue if it fails to + // kill the worker (e.g., when the worker owns the object). Without this, + // if the first N workers own objects, it can't kill idle workers that are + // >= N+1. + const auto &idle_pair = idle_of_all_languages_.front(); + idle_of_all_languages_.push_back(idle_pair); + idle_of_all_languages_.pop_front(); + RAY_CHECK(idle_of_all_languages_.size() == idle_of_all_languages_map_.size()); + } + }); } std::list, int64_t>> @@ -1417,7 +1365,7 @@ void WorkerPool::DisconnectWorker(const std::shared_ptr &worker auto &state = GetStateForLanguage(worker->GetLanguage()); auto it = state.worker_processes.find(worker->GetStartupToken()); if (it != state.worker_processes.end()) { - if (!RemoveWorker(it->second.alive_started_workers, worker)) { + if (it->second.is_pending_registration) { // Worker is either starting or started, // if it's not started, we should remove it from starting. it->second.is_pending_registration = false; @@ -1425,11 +1373,9 @@ void WorkerPool::DisconnectWorker(const std::shared_ptr &worker TryPendingPopWorkerRequests(worker->GetLanguage()); } } - if (it->second.alive_started_workers.size() == 0 && - !it->second.is_pending_registration) { - DeleteRuntimeEnvIfPossible(it->second.runtime_env_info.serialized_runtime_env()); - RemoveWorkerProcess(state, worker->GetStartupToken()); - } + + DeleteRuntimeEnvIfPossible(it->second.runtime_env_info.serialized_runtime_env()); + RemoveWorkerProcess(state, worker->GetStartupToken()); } RAY_CHECK(RemoveWorker(state.registered_workers, worker)); @@ -1605,20 +1551,6 @@ void WorkerPool::TryStartIOWorkers(const Language &language, } } -std::unordered_set> WorkerPool::GetWorkersByProcess( - const Process &process) { - std::unordered_set> workers_of_process; - for (auto &entry : states_by_lang_) { - auto &worker_state = entry.second; - for (const auto &worker : worker_state.registered_workers) { - if (worker->GetProcess().GetId() == process.GetId()) { - workers_of_process.insert(worker); - } - } - } - return workers_of_process; -} - std::string WorkerPool::DebugString() const { std::stringstream result; result << "WorkerPool:"; diff --git a/src/ray/raylet/worker_pool.h b/src/ray/raylet/worker_pool.h index b0c03c3d4997..b3efd2839de9 100644 --- a/src/ray/raylet/worker_pool.h +++ b/src/ray/raylet/worker_pool.h @@ -471,8 +471,6 @@ class WorkerPool : public WorkerPoolInterface, public IOWorkerPoolInterface { struct WorkerProcessInfo { /// Whether this worker is pending registration or is started. bool is_pending_registration = true; - /// The started workers which is alive. - std::unordered_set> alive_started_workers; /// The type of the worker. rpc::WorkerType worker_type; /// The worker process instance. @@ -585,13 +583,6 @@ class WorkerPool : public WorkerPoolInterface, public IOWorkerPoolInterface { /// \param language The language of the PopWorker requests. void TryPendingPopWorkerRequests(const Language &language); - /// Get all workers of the given process. - /// - /// \param process The process of workers. - /// \return The workers of the given process. - std::unordered_set> GetWorkersByProcess( - const Process &process); - /// Get either restore or spill worker state from state based on worker_type. /// /// \param worker_type IO Worker Type. From efe658a8bcacd757db38f2cac5e16b8e8b0edc2d Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Tue, 2 May 2023 09:53:37 -0700 Subject: [PATCH 188/424] [CI] Make it visible on UI if we are running a jailed test (#34926) Signed-off-by: Cuong Nguyen Co-authored-by: matthewdeng --- release/ray_release/buildkite/step.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/release/ray_release/buildkite/step.py b/release/ray_release/buildkite/step.py index a13bde1575d8..7bd2f0a089dd 100644 --- a/release/ray_release/buildkite/step.py +++ b/release/ray_release/buildkite/step.py @@ -121,13 +121,16 @@ def get_step( if test.get("run", {}).get("type") == "client": step["agents"]["queue"] = str(RELEASE_QUEUE_CLIENT) - # If a test is not stable, allow to soft fail + # If a test is jailed or not stable, allow to soft fail stable = test.get("stable", True) - if not stable: + jailed = test.get("jailed", False) + full_label = "" + if jailed or not stable: step["soft_fail"] = True - full_label = "[unstable] " - else: - full_label = "" + if not stable: + full_label += "[unstable]" + if jailed: + full_label += "[jailed]" full_label += test["name"] if smoke_test: From 5cedcc9e0eb46921a9070f096fbe72a2df7e73cd Mon Sep 17 00:00:00 2001 From: Artur Niederfahrenhorst Date: Tue, 2 May 2023 18:55:05 +0200 Subject: [PATCH 189/424] [RLlib] Remove check specs from default Model forward code path to improve performance. (#34877) --- rllib/BUILD | 7 + rllib/core/models/base.py | 72 ++--- rllib/core/models/specs/checker.py | 70 ++++- .../models/specs/tests/test_check_specs.py | 49 ++-- .../core/models/specs/tests/test_spec_dict.py | 20 +- rllib/core/models/tests/test_base_models.py | 274 ++++++++++++++++++ rllib/core/models/tests/test_catalog.py | 6 +- rllib/core/models/tf/base.py | 45 ++- rllib/core/models/torch/base.py | 48 ++- rllib/core/models/torch/encoder.py | 29 +- rllib/models/catalog.py | 7 + 11 files changed, 509 insertions(+), 118 deletions(-) create mode 100644 rllib/core/models/tests/test_base_models.py diff --git a/rllib/BUILD b/rllib/BUILD index 78d38e9b65f3..6312a7675c34 100644 --- a/rllib/BUILD +++ b/rllib/BUILD @@ -1866,6 +1866,13 @@ py_test( ) # Default Models +py_test( + name = "test_base_models", + tags = ["team:rllib", "core"], + size = "small", + srcs = ["core/models/tests/test_base_models.py"] +) + py_test( name = "test_cnn_encoders", tags = ["team:rllib", "core", "models"], diff --git a/rllib/core/models/base.py b/rllib/core/models/base.py index 0f00f87d66e5..d977a99a37c2 100644 --- a/rllib/core/models/base.py +++ b/rllib/core/models/base.py @@ -8,7 +8,6 @@ from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.annotations import ExperimentalAPI from ray.rllib.utils.annotations import override -from ray.rllib.utils.nested_dict import NestedDict from ray.rllib.utils.typing import TensorType # Top level keys that unify model i/o. @@ -20,13 +19,6 @@ CRITIC: str = "critic" -def _raise_not_decorated_exception(class_and_method, input_or_output): - raise ValueError( - f"`{class_and_method}()` not decorated with {input_or_output} specification. " - f"Decorate it with @check_{input_or_output}_specs() to define a specification." - ) - - @ExperimentalAPI @dataclass class ModelConfig(abc.ABC): @@ -45,10 +37,16 @@ class ModelConfig(abc.ABC): Attributes: input_dims: The input dimensions of the network output_dims: The output dimensions of the network. + always_check_shapes: Whether to always check the inputs and outputs of the + model for the specifications. Input specifications are checked on failed + forward passes of the model regardless of this flag. If this flag is set + to `True`, inputs and outputs are checked on every call. This leads to + a slow-down and should only be used for debugging. """ input_dims: Union[List[int], Tuple[int]] = None output_dims: Union[List[int], Tuple[int]] = None + always_check_shapes: bool = False @abc.abstractmethod def build(self, framework: str): @@ -181,15 +179,15 @@ def output_specs(self, spec: Spec) -> None: "you want to override this behavior." ) - def get_initial_state(self) -> Union[NestedDict, List[TensorType]]: + def get_initial_state(self) -> Union[dict, List[TensorType]]: """Returns the initial state of the Model. It can be left empty if this Model is not stateful. """ - return NestedDict() + return dict() @abc.abstractmethod - def _forward(self, input_dict: NestedDict, **kwargs) -> NestedDict: + def _forward(self, input_dict: dict, **kwargs) -> dict: """Returns the output of this model for the given input. This method is called by the forwarding method of the respective framework @@ -200,7 +198,7 @@ def _forward(self, input_dict: NestedDict, **kwargs) -> NestedDict: **kwargs: Forward compatibility kwargs. Returns: - NestedDict: The output tensors. + dict: The output tensors. """ @abc.abstractmethod @@ -308,7 +306,7 @@ def get_output_specs(self) -> Optional[Spec]: return convert_to_canonical_format([ENCODER_OUT, STATE_OUT]) @abc.abstractmethod - def _forward(self, input_dict: NestedDict, **kwargs) -> NestedDict: + def _forward(self, input_dict: dict, **kwargs) -> dict: """Returns the latent of the encoder for the given inputs. This method is called by the forwarding method of the respective framework @@ -320,7 +318,7 @@ def _forward(self, input_dict: NestedDict, **kwargs) -> NestedDict: (None for stateless encoders). To establish an agreement between the encoder and RLModules, these values have the fixed keys `SampleBatch.OBS` and `STATE_IN` for the `input_dict`, - and `STATE_OUT` and `ENCODER_OUT` for the returned NestedDict. + and `STATE_OUT` and `ENCODER_OUT` for the returned dict. Args: input_dict: The input tensors. Must contain at a minimum the keys @@ -329,8 +327,8 @@ def _forward(self, input_dict: NestedDict, **kwargs) -> NestedDict: **kwargs: Forward compatibility kwargs. Returns: - NestedDict: The output tensors. Must contain at a minimum the keys - ENCODER_OUT and STATE_OUT (which might be None for stateless encoders). + The output tensors. Must contain at a minimum the keys ENCODER_OUT and + STATE_OUT (which might be None for stateless encoders). """ raise NotImplementedError @@ -410,32 +408,26 @@ def get_initial_state(self): } @override(Model) - def _forward(self, inputs: NestedDict, **kwargs) -> NestedDict: + def _forward(self, inputs: dict, **kwargs) -> dict: if self.config.shared: outs = self.encoder(inputs, **kwargs) - return NestedDict( - { - ENCODER_OUT: {ACTOR: outs[ENCODER_OUT], CRITIC: outs[ENCODER_OUT]}, - STATE_OUT: outs[STATE_OUT], - } - ) + return { + ENCODER_OUT: {ACTOR: outs[ENCODER_OUT], CRITIC: outs[ENCODER_OUT]}, + STATE_OUT: outs[STATE_OUT], + } else: - actor_inputs = NestedDict({**inputs}) - # , **{STATE_IN: inputs[STATE_IN][ACTOR]}}) - critic_inputs = NestedDict( - {**inputs} # , **{STATE_IN: inputs[STATE_IN][CRITIC]}} - ) + actor_inputs = inputs # , **{STATE_IN: inputs[STATE_IN][ACTOR]}}) + critic_inputs = inputs # , **{STATE_IN: inputs[STATE_IN][CRITIC]}} + actor_out = self.actor_encoder(actor_inputs, **kwargs) critic_out = self.critic_encoder(critic_inputs, **kwargs) - return NestedDict( - { - ENCODER_OUT: { - ACTOR: actor_out[ENCODER_OUT], - CRITIC: critic_out[ENCODER_OUT], - }, - STATE_OUT: { - ACTOR: actor_out[STATE_OUT], - CRITIC: critic_out[STATE_OUT], - }, - } - ) + return { + ENCODER_OUT: { + ACTOR: actor_out[ENCODER_OUT], + CRITIC: critic_out[ENCODER_OUT], + }, + STATE_OUT: { + ACTOR: actor_out[STATE_OUT], + CRITIC: critic_out[STATE_OUT], + }, + } diff --git a/rllib/core/models/specs/checker.py b/rllib/core/models/specs/checker.py index 50bc5102969c..da30e6ce5a52 100644 --- a/rllib/core/models/specs/checker.py +++ b/rllib/core/models/specs/checker.py @@ -1,13 +1,23 @@ -from collections import abc import functools +import logging +from collections import abc from typing import Union, Mapping, Any, Callable -from ray.util.annotations import DeveloperAPI - -from ray.rllib.utils.nested_dict import NestedDict from ray.rllib.core.models.specs.specs_base import Spec, TypeSpec from ray.rllib.core.models.specs.specs_dict import SpecDict from ray.rllib.core.models.specs.typing import SpecType +from ray.rllib.utils.nested_dict import NestedDict +from ray.util.annotations import DeveloperAPI + +logger = logging.getLogger(__name__) + + +@DeveloperAPI +class SpecCheckingError(Exception): + """Raised when there is an error in the spec checking. + + This Error is raised when inputs or outputs do match the defined specs. + """ @DeveloperAPI @@ -161,7 +171,7 @@ def _validate( try: spec.validate(data) except ValueError as e: - raise ValueError( + raise SpecCheckingError( f"{tag} spec validation failed on " f"{cls_instance.__class__.__name__}.{method.__name__}, {e}." ) @@ -173,6 +183,7 @@ def _validate( def check_input_specs( input_specs: str, *, + only_check_on_retry: bool = True, filter: bool = False, cache: bool = False, ): @@ -210,6 +221,9 @@ def check_input_specs( string in input_specs and returns the `SpecDict`, `Spec`, or simply the `Type` that the `input_data` should comply with. It can also be None or empty list / dict to enforce no input spec. + only_check_on_retry: If True, the spec will not be checked. Only if the + decorated method raises an Exception, we check the spec to provide a more + informative error message. filter: If True, and `input_data` is a nested dict the `input_data` will be filtered by its corresponding spec tree structure and then passed into the implemented function to make sure user is not confounded with unnecessary @@ -231,12 +245,36 @@ def decorator(func): def wrapper(self, input_data, **kwargs): if cache and not hasattr(self, "__checked_input_specs_cache__"): self.__checked_input_specs_cache__ = {} + if cache and func.__name__ not in self.__checked_input_specs_cache__: + self.__checked_input_specs_cache__[func.__name__] = True + initial_exception = None + if only_check_on_retry: + # Attempt to run the function without spec checking + try: + return func(self, input_data, **kwargs) + except SpecCheckingError as e: + raise e + except Exception as e: + # We store the initial exception to raise it later if the spec + # check fails. + initial_exception = e + logger.error( + f"Exception {e} raised on function call without checkin " + f"input specs. RLlib will now attempt to check the spec " + f"before calling the function again." + ) + + # If the function was not executed successfully yet, we check specs checked_data = input_data if input_specs: - spec = getattr(self, input_specs, "___NOT_FOUND___") - if spec == "___NOT_FOUND___": - raise ValueError(f"object {self} has no attribute {input_specs}.") + if hasattr(self, input_specs): + spec = getattr(self, input_specs) + else: + raise SpecCheckingError( + f"object {self} has no attribute {input_specs}." + ) + if spec is not None: spec = convert_to_canonical_format(spec) checked_data = _validate( @@ -252,12 +290,12 @@ def wrapper(self, input_data, **kwargs): # filtering should happen regardless of cache checked_data = checked_data.filter(spec) - output_data = func(self, checked_data, **kwargs) - - if cache and func.__name__ not in self.__checked_input_specs_cache__: - self.__checked_input_specs_cache__[func.__name__] = True + # If we have encountered an exception from calling `func` already, + # we raise it again here and don't need to call func again. + if initial_exception: + raise initial_exception - return output_data + return func(self, checked_data, **kwargs) wrapper.__checked_input_specs__ = True return wrapper @@ -321,9 +359,11 @@ def wrapper(self, input_data, **kwargs): output_data = func(self, input_data, **kwargs) if output_specs: - spec = getattr(self, output_specs, "___NOT_FOUND___") - if spec == "___NOT_FOUND___": + if hasattr(self, output_specs): + spec = getattr(self, output_specs) + else: raise ValueError(f"object {self} has no attribute {output_specs}.") + if spec is not None: spec = convert_to_canonical_format(spec) _validate( diff --git a/rllib/core/models/specs/tests/test_check_specs.py b/rllib/core/models/specs/tests/test_check_specs.py index 1122ec05079d..fecf5b39b3d2 100644 --- a/rllib/core/models/specs/tests/test_check_specs.py +++ b/rllib/core/models/specs/tests/test_check_specs.py @@ -1,19 +1,21 @@ import abc -import numpy as np import time -import torch -from typing import Dict, Any, Type import unittest +from typing import Dict, Any, Type -from ray.rllib.core.models.specs.specs_base import TensorSpec, TypeSpec -from ray.rllib.core.models.specs.specs_dict import SpecDict -from ray.rllib.utils.annotations import override -from ray.rllib.utils.nested_dict import NestedDict +import numpy as np +import torch + +from ray.rllib.core.models.specs.checker import SpecCheckingError from ray.rllib.core.models.specs.checker import ( convert_to_canonical_format, check_input_specs, check_output_specs, ) +from ray.rllib.core.models.specs.specs_base import TensorSpec, TypeSpec +from ray.rllib.core.models.specs.specs_dict import SpecDict +from ray.rllib.utils.annotations import override +from ray.rllib.utils.nested_dict import NestedDict ONLY_ONE_KEY_ALLOWED = "Only one key is allowed in the data dict." @@ -32,7 +34,9 @@ def input_specs(self) -> SpecDict: def output_specs(self) -> SpecDict: pass - @check_input_specs("input_specs", filter=True, cache=False) + @check_input_specs( + "input_specs", filter=True, cache=False, only_check_on_retry=False + ) @check_output_specs("output_specs", cache=False) def check_input_and_output(self, input_dict: Dict[str, Any]) -> Dict[str, Any]: return self._check_input_and_output(input_dict) @@ -41,7 +45,9 @@ def check_input_and_output(self, input_dict: Dict[str, Any]) -> Dict[str, Any]: def _check_input_and_output(self, input_dict: Dict[str, Any]) -> Dict[str, Any]: pass - @check_input_specs("input_specs", filter=True, cache=False) + @check_input_specs( + "input_specs", filter=True, cache=False, only_check_on_retry=False + ) def check_only_input(self, input_dict: Dict[str, Any]) -> Dict[str, Any]: """should not override this method""" return self._check_only_input(input_dict) @@ -59,7 +65,9 @@ def check_only_output(self, input_dict: Dict[str, Any]) -> Dict[str, Any]: def _check_only_output(self, input_dict: Dict[str, Any]) -> Dict[str, Any]: pass - @check_input_specs("input_specs", filter=True, cache=True) + @check_input_specs( + "input_specs", filter=True, cache=True, only_check_on_retry=False + ) @check_output_specs("output_specs", cache=True) def check_input_and_output_with_cache( self, input_dict: Dict[str, Any] @@ -67,7 +75,9 @@ def check_input_and_output_with_cache( """should not override this method""" return self._check_input_and_output(input_dict) - @check_input_specs("input_specs", filter=False, cache=False) + @check_input_specs( + "input_specs", filter=False, cache=False, only_check_on_retry=False + ) @check_output_specs("output_specs", cache=False) def check_input_and_output_wo_filter(self, input_dict) -> Dict[str, Any]: """should not override this method""" @@ -138,7 +148,8 @@ def test_check_input_and_output(self): # This should raise an error saying that the `input` key is missing. self.assertRaises( - ValueError, lambda: correct_module.check_input_and_output({"not_input": 2}) + SpecCheckingError, + lambda: correct_module.check_input_and_output({"not_input": 2}), ) def test_check_only_input(self): @@ -164,7 +175,8 @@ def test_incorrect_implementation(self): # this should raise an error saying that the output does not match the # `output_specs`. self.assertRaises( - ValueError, lambda: incorrect_module.check_input_and_output({"input": 2}) + SpecCheckingError, + lambda: incorrect_module.check_input_and_output({"input": 2}), ) # this should not raise an error because output is not forced to be checked @@ -172,7 +184,8 @@ def test_incorrect_implementation(self): # This should raise an error because output does not match the `output_specs`. self.assertRaises( - ValueError, lambda: incorrect_module.check_only_output({"not_input": 2}) + SpecCheckingError, + lambda: incorrect_module.check_only_output({"not_input": 2}), ) def test_filter(self): @@ -240,13 +253,13 @@ class ClassWithTensorSpec: def input_spec1(self) -> TensorSpec: return TensorSpec("b, h", h=4, framework="torch") - @check_input_specs("input_spec1", cache=False) + @check_input_specs("input_spec1", cache=False, only_check_on_retry=False) def forward(self, input_data) -> Any: return input_data module = ClassWithTensorSpec() module.forward(torch.rand(2, 4)) - self.assertRaises(ValueError, lambda: module.forward(torch.rand(2, 3))) + self.assertRaises(SpecCheckingError, lambda: module.forward(torch.rand(2, 3))) def test_type_specs(self): class SpecialOutputType: @@ -271,7 +284,9 @@ def forward_fail(self, input_data) -> Any: module = ClassWithTypeSpec() output = module.forward_pass(torch.rand(2, 4)) self.assertIsInstance(output, SpecialOutputType) - self.assertRaises(ValueError, lambda: module.forward_fail(torch.rand(2, 3))) + self.assertRaises( + SpecCheckingError, lambda: module.forward_fail(torch.rand(2, 3)) + ) def test_convert_to_canonical_format(self): diff --git a/rllib/core/models/specs/tests/test_spec_dict.py b/rllib/core/models/specs/tests/test_spec_dict.py index 0838f515d15b..52f269a69357 100644 --- a/rllib/core/models/specs/tests/test_spec_dict.py +++ b/rllib/core/models/specs/tests/test_spec_dict.py @@ -7,6 +7,7 @@ check_input_specs, convert_to_canonical_format, ) +from ray.rllib.core.models.specs.checker import SpecCheckingError class TypeClass1: @@ -162,15 +163,19 @@ def dict_key_spec_with_none_leaves(self): def spec_with_type_and_tensor_leaves(self): return {"a": TypeClass1, "b": TensorSpec("b, h", h=3, framework="np")} - @check_input_specs("nested_key_spec") + @check_input_specs("nested_key_spec", only_check_on_retry=False) def forward_nested_key(self, input_dict): return input_dict - @check_input_specs("dict_key_spec_with_none_leaves") + @check_input_specs( + "dict_key_spec_with_none_leaves", only_check_on_retry=False + ) def forward_dict_key_with_none_leaves(self, input_dict): return input_dict - @check_input_specs("spec_with_type_and_tensor_leaves") + @check_input_specs( + "spec_with_type_and_tensor_leaves", only_check_on_retry=False + ) def forward_spec_with_type_and_tensor_leaves(self, input_dict): return input_dict @@ -207,10 +212,13 @@ def forward_spec_with_type_and_tensor_leaves(self, input_dict): }, } - self.assertRaises(ValueError, lambda: model.forward_nested_key(input_dict_2)) + self.assertRaises( + SpecCheckingError, lambda: model.forward_nested_key(input_dict_2) + ) self.assertRaises( - ValueError, lambda: model.forward_dict_key_with_none_leaves(input_dict_2) + SpecCheckingError, + lambda: model.forward_dict_key_with_none_leaves(input_dict_2), ) input_dict_3 = { @@ -220,7 +228,7 @@ def forward_spec_with_type_and_tensor_leaves(self, input_dict): # should raise shape mismatch self.assertRaises( - ValueError, + SpecCheckingError, lambda: model.forward_spec_with_type_and_tensor_leaves(input_dict_3), ) diff --git a/rllib/core/models/tests/test_base_models.py b/rllib/core/models/tests/test_base_models.py new file mode 100644 index 000000000000..8a5ea238072a --- /dev/null +++ b/rllib/core/models/tests/test_base_models.py @@ -0,0 +1,274 @@ +import unittest +from dataclasses import dataclass + +from ray.rllib.core.models.base import ModelConfig +from ray.rllib.core.models.specs.checker import SpecCheckingError +from ray.rllib.core.models.specs.specs_base import TensorSpec +from ray.rllib.core.models.specs.specs_dict import SpecDict +from ray.rllib.core.models.tf.base import TfModel +from ray.rllib.core.models.torch.base import TorchModel +from ray.rllib.utils.framework import try_import_tf, try_import_torch + +_, tf, _ = try_import_tf() +torch, nn = try_import_torch() + + +def _dynamo_is_available(): + # This only works if torch._dynamo is available + try: + # TODO(Artur): Remove this once torch._dynamo is available on CI + import torch._dynamo as dynamo # noqa: F401 + + return True + except ImportError: + return False + + +class TestModelBase(unittest.TestCase): + def test_model_input_spec_checking(self): + """Tests if model input spec checking works correctly. + + This test is centered around the `always_check_shapes` flag of the + ModelConfig class. If this flag is set to True, the model will always + check if the inputs conform to the specs. If this flag is set to False, + the model will only check the input if we encounter an error in side + the forward call. + """ + + for fw in ["torch", "tf2"]: + + class CatModel: + """Simple model that concatenates parts of its input.""" + + def __init__(self, config): + super().__init__(config) + + def get_output_specs(self): + return SpecDict( + { + "out_1": TensorSpec("b, h", h=1, framework=fw), + # out_2 is simply 2x stacked in_1 + "out_2": TensorSpec("b, h", h=4, framework=fw), + } + ) + + def get_input_specs(self): + return SpecDict( + { + "in_1": TensorSpec("b, h", h=1, framework=fw), + "in_2": TensorSpec("b, h", h=2, framework=fw), + } + ) + + if fw == "tf2": + + class TestModel(CatModel, TfModel): + def _forward(self, input_dict): + out_2 = tf.concat( + [input_dict["in_2"], input_dict["in_2"]], axis=1 + ) + return {"out_1": input_dict["in_1"], "out_2": out_2} + + else: + + class TestModel(CatModel, TorchModel): + def _forward(self, input_dict): + out_2 = torch.cat( + [input_dict["in_2"], input_dict["in_2"]], dim=1 + ) + return {"out_1": input_dict["in_1"], "out_2": out_2} + + @dataclass + class CatModelConfig(ModelConfig): + def build(self, framework: str): + # Since we define the correct model above anyway, we don't need + # to distinguish between frameworks here. + return TestModel(self) + + # 1) Check if model behaves correctly with always_check_shapes=True first + # We expect model to raise an error if the input shapes are not correct. + # This is the behaviour we use for debugging with model specs. + + config = CatModelConfig(always_check_shapes=True) + + model = config.build(framework="spam") + + # We want to raise an input spec validation error here since the input + # consists of lists and not torch Tensors + with self.assertRaisesRegex( + SpecCheckingError, "input spec validation failed" + ): + model({"in_1": [1], "in_2": [1, 2]}) + + # We don't want to raise an input spec validation error here since the + # input consists of valid tensors + if fw == "torch": + model({"in_1": torch.Tensor([[1]]), "in_2": torch.Tensor([[1, 2]])}) + else: + model({"in_1": tf.constant([[1]]), "in_2": tf.constant([[1, 2]])}) + + # 2) Check if model behaves correctly with always_check_shapes=False. + # We don't expect model to raise an error if the input shapes are not + # correct. + # This is the more performant default behaviour + + config = CatModelConfig(always_check_shapes=False) + + model = config.build(framework="spam") + + # This should not raise an error since the specs are correct and the + # model does not raise an error either. + if fw == "torch": + model({"in_1": torch.Tensor([[1]]), "in_2": torch.Tensor([[1, 2]])}) + else: + model({"in_1": tf.constant([[1]]), "in_2": tf.constant([[1, 2]])}) + + # This should not raise an error since specs would be violated, but they + # are not checked and the model does not raise an error. + if fw == "torch": + model( + {"in_1": torch.Tensor([[1]]), "in_2": torch.Tensor([[1, 2, 3, 4]])} + ) + else: + model({"in_1": tf.constant([[1]]), "in_2": tf.constant([[1, 2, 3, 4]])}) + + # We want to raise an input spec validation error here since the model + # raises an exception that stems from inputs that could have been caught + # with input spec checking. + with self.assertRaisesRegex( + SpecCheckingError, "input spec validation failed" + ): + model({"in_1": [1], "in_2": [1, 2]}) + + def test_model_output_spec_checking(self): + """Tests if model output spec checking works correctly. + + This test is centered around the `always_check_shapes` flag of the + ModelConfig class. If this flag is set to True, the model will always + check if the outputs conform to the specs. If this flag is set to False, + the model will never check the outputs. + """ + + for fw in ["torch", "tf2"]: + + class BadModel: + """Simple model that produces bad outputs.""" + + def get_output_specs(self): + return SpecDict( + { + "out": TensorSpec("b, h", h=1), + } + ) + + def get_input_specs(self): + return SpecDict( + { + "in": TensorSpec("b, h", h=1), + } + ) + + if fw == "tf2": + + class TestModel(BadModel, TfModel): + def _forward(self, input_dict): + return {"out": torch.Tensor([[1, 2]])} + + else: + + class TestModel(BadModel, TfModel): + def _forward(self, input_dict): + return {"out": tf.constant([[1, 2]])} + + @dataclass + class CatModelConfig(ModelConfig): + def build(self, framework: str): + # Since we define the correct model above anyway, we don't need + # to distinguish between frameworks here. + return TestModel(self) + + # 1) Check if model behaves correctly with always_check_shapes=True first. + # We expect model to raise an error if the output shapes are not correct. + # This is the behaviour we use for debugging with model specs. + + config = CatModelConfig(always_check_shapes=True) + + model = config.build(framework="spam") + + # We want to raise an output spec validation error here since the output + # has the wrong shape + with self.assertRaisesRegex( + SpecCheckingError, "output spec validation failed" + ): + model({"in": torch.Tensor([[1]])}) + + # 2) Check if model behaves correctly with always_check_shapes=False. + # We don't expect model to raise an error. + # This is the more performant default behaviour + + config = CatModelConfig(always_check_shapes=False) + + model = config.build(framework="spam") + + model({"in_1": [[1]]}) + + @unittest.skipIf(not _dynamo_is_available(), "torch._dynamo not available") + def test_torch_compile_with_model(self): + """Tests if torch.compile() does not encounter any breaks. + + torch.compile() should not encounter any breaks when model is on its + code path by default. This test checks if this is the case. + """ + + class SomeTorchModel(TorchModel): + """Simple model that produces bad outputs.""" + + def __init__(self, config): + super().__init__(config) + self._model = torch.nn.Linear(1, 1) + + def get_output_specs(self): + return SpecDict( + { + "out": TensorSpec("b, h", h=1, framework="torch"), + } + ) + + def get_input_specs(self): + return SpecDict( + { + "in": TensorSpec("b, h", h=1, framework="torch"), + } + ) + + def _forward(self, input_dict): + return {"out": self._model(input_dict["in"])} + + @dataclass + class SomeTorchModelConfig(ModelConfig): + def build(self, framework: str): + return SomeTorchModel(self) + + config = SomeTorchModelConfig() + + model = config.build(framework="spam") + + # This could be the forward method of an RL Module that we torch compile + def compile_me(input_dict): + return model(input_dict) + + import torch._dynamo as dynamo + + dynamo_explanation = dynamo.explain(compile_me, {"in": torch.Tensor([[1]])}) + + # There should be only one break reason - `return_value` - since inputs and + # outputs are not checked + break_reasons_list = dynamo_explanation[4] + self.assertEquals(len(break_reasons_list), 1) + + +if __name__ == "__main__": + import pytest + import sys + + sys.exit(pytest.main(["-v", __file__])) diff --git a/rllib/core/models/tests/test_catalog.py b/rllib/core/models/tests/test_catalog.py index c2454320d791..da22c2b1864f 100644 --- a/rllib/core/models/tests/test_catalog.py +++ b/rllib/core/models/tests/test_catalog.py @@ -424,11 +424,11 @@ def test_post_init_overwrite(self): class MyCostumTorchEncoderConfig(ModelConfig): def build(self, framework): - return MyCostumTorchEncoder() + return MyCostumTorchEncoder(self) class MyCostumTorchEncoder(TorchModel, Encoder): - def __init__(self): - super().__init__({}) + def __init__(self, config): + super().__init__(config) self.net = torch.nn.Linear(env.observation_space.shape[0], 10) def _forward(self, input_dict, **kwargs): diff --git a/rllib/core/models/tf/base.py b/rllib/core/models/tf/base.py index e99630ab4fad..dc88a005ce1f 100644 --- a/rllib/core/models/tf/base.py +++ b/rllib/core/models/tf/base.py @@ -1,4 +1,5 @@ import abc +import logging from typing import Tuple import numpy as np @@ -6,18 +7,18 @@ from ray.rllib.core.models.base import ( Model, ModelConfig, - _raise_not_decorated_exception, ) from ray.rllib.core.models.specs.checker import ( check_input_specs, - check_output_specs, is_input_decorated, is_output_decorated, + check_output_specs, ) from ray.rllib.utils.annotations import override from ray.rllib.utils.framework import try_import_tf -from ray.rllib.utils.nested_dict import NestedDict +from ray.util import log_once +logger = logging.getLogger(__name__) _, tf, _ = try_import_tf() @@ -33,15 +34,26 @@ def __init__(self, config: ModelConfig): tf.keras.Model.__init__(self) Model.__init__(self, config) - # Raise errors if forward method is not decorated to check specs. + # Raise errors if forward method is not decorated to check input specs. if not is_input_decorated(self.call): - _raise_not_decorated_exception(type(self).__name__ + ".call()", "input") - if not is_output_decorated(self.call): - _raise_not_decorated_exception(type(self).__name__ + ".call()", "output") + raise ValueError( + f"`{type(self).__name__}.call()` not decorated with input " + f"specification. Decorate it with @check_input_specs() to define a " + f"specification and resolve this Error. If you don't want to check " + f"anything, you can use an empty spec." + ) + + if is_output_decorated(self.call): + if log_once("tf_model_forward_output_decorated"): + logger.warning( + f"`{type(self).__name__}.call()` decorated with output " + f"specification. This is not recommended because it can lead to " + f"slower execution. Remove @check_output_specs() from the " + f"forward method to resolve this." + ) @check_input_specs("input_specs") - @check_output_specs("output_specs") - def call(self, input_dict: NestedDict, **kwargs) -> NestedDict: + def call(self, input_dict: dict, **kwargs) -> dict: """Returns the output of this model for the given input. This method only makes sure that we have a spec-checked _forward() method. @@ -51,8 +63,21 @@ def call(self, input_dict: NestedDict, **kwargs) -> NestedDict: **kwargs: Forward compatibility kwargs. Returns: - NestedDict: The output tensors. + dict: The output tensors. """ + + # When `always_check_shapes` is set, we always check input and output specs. + # Note that we check the input specs twice because we need the following + # check to always check the input specs. + if self.config.always_check_shapes: + + @check_input_specs("input_specs", only_check_on_retry=False) + @check_output_specs("output_specs") + def checked_forward(self, input_data, **kwargs): + return self._forward(input_data, **kwargs) + + return checked_forward(self, input_dict, **kwargs) + return self._forward(input_dict, **kwargs) @override(Model) diff --git a/rllib/core/models/torch/base.py b/rllib/core/models/torch/base.py index eefcf8ad2b42..775bc6116288 100644 --- a/rllib/core/models/torch/base.py +++ b/rllib/core/models/torch/base.py @@ -1,4 +1,5 @@ import abc +import logging from typing import Tuple, Union import numpy as np @@ -6,7 +7,6 @@ from ray.rllib.core.models.base import ( Model, ModelConfig, - _raise_not_decorated_exception, ) from ray.rllib.core.models.specs.checker import ( is_input_decorated, @@ -16,11 +16,13 @@ ) from ray.rllib.utils.annotations import override from ray.rllib.utils.framework import try_import_torch -from ray.rllib.utils.nested_dict import NestedDict from ray.rllib.utils.typing import TensorType +from ray.util import log_once torch, nn = try_import_torch() +logger = logging.getLogger(__name__) + class TorchModel(nn.Module, Model, abc.ABC): """Base class for RLlib's PyTorch models. @@ -69,17 +71,30 @@ def __init__(self, config: ModelConfig): nn.Module.__init__(self) Model.__init__(self, config) - # Raise errors if forward method is not decorated to check specs. + # Raise errors if forward method is not decorated to check input specs. if not is_input_decorated(self.forward): - _raise_not_decorated_exception(type(self).__name__ + ".forward()", "input") - if not is_output_decorated(self.forward): - _raise_not_decorated_exception(type(self).__name__ + ".forward()", "output") + raise ValueError( + f"`{type(self).__name__}.forward()` not decorated with input " + f"specification. Decorate it with @check_input_specs() to define a " + f"specification and resolve this Error. If you don't want to check " + f"anything, you can use an empty spec." + ) + + if is_output_decorated(self.forward): + if log_once("torch_model_forward_output_decorated"): + logger.warning( + f"`{type(self).__name__}.forward()` decorated with output " + f"specification. This is not recommended for torch models " + f"that are used with torch.compile() because it breaks " + f"torch dynamo's graph. This can lead lead to slower execution." + f"Remove @check_output_specs() from the forward() method to " + f"resolve this." + ) @check_input_specs("input_specs") - @check_output_specs("output_specs") def forward( - self, inputs: Union[NestedDict, TensorType], **kwargs - ) -> Union[NestedDict, TensorType]: + self, inputs: Union[dict, TensorType], **kwargs + ) -> Union[dict, TensorType]: """Returns the output of this model for the given input. This method only makes sure that we have a spec-checked _forward() method. @@ -89,8 +104,21 @@ def forward( **kwargs: Forward compatibility kwargs. Returns: - NestedDict: The output tensors. + dict: The output tensors. """ + + # When `always_check_shapes` is set, we always check input and output specs. + # Note that we check the input specs twice because we need the following + # check to always check the input specs. + if self.config.always_check_shapes: + + @check_input_specs("input_specs", only_check_on_retry=False) + @check_output_specs("output_specs") + def checked_forward(self, input_data, **kwargs): + return self._forward(input_data, **kwargs) + + return checked_forward(self, inputs, **kwargs) + return self._forward(inputs, **kwargs) @override(Model) diff --git a/rllib/core/models/torch/encoder.py b/rllib/core/models/torch/encoder.py index 8f6b930524f3..05ff2785cd80 100644 --- a/rllib/core/models/torch/encoder.py +++ b/rllib/core/models/torch/encoder.py @@ -25,7 +25,6 @@ from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.annotations import override from ray.rllib.utils.framework import try_import_torch -from ray.rllib.utils.nested_dict import NestedDict torch, nn = try_import_torch() @@ -80,13 +79,11 @@ def get_output_specs(self) -> Optional[Spec]: ) @override(Model) - def _forward(self, inputs: NestedDict, **kwargs) -> NestedDict: - return NestedDict( - { - ENCODER_OUT: self.net(inputs[SampleBatch.OBS]), - STATE_OUT: inputs[STATE_IN], - } - ) + def _forward(self, inputs: dict, **kwargs) -> dict: + return { + ENCODER_OUT: self.net(inputs[SampleBatch.OBS]), + STATE_OUT: inputs[STATE_IN], + } class TorchCNNEncoder(TorchModel, Encoder): @@ -153,13 +150,11 @@ def get_output_specs(self) -> Optional[Spec]: ) @override(Model) - def _forward(self, inputs: NestedDict, **kwargs) -> NestedDict: - return NestedDict( - { - ENCODER_OUT: self.net(inputs[SampleBatch.OBS]), - STATE_OUT: inputs[STATE_IN], - } - ) + def _forward(self, inputs: dict, **kwargs) -> dict: + return { + ENCODER_OUT: self.net(inputs[SampleBatch.OBS]), + STATE_OUT: inputs[STATE_IN], + } class TorchGRUEncoder(TorchModel, Encoder): @@ -229,7 +224,7 @@ def get_initial_state(self): } @override(Model) - def _forward(self, inputs: NestedDict, **kwargs) -> NestedDict: + def _forward(self, inputs: dict, **kwargs) -> dict: out = inputs[SampleBatch.OBS].float() # States are batch-first when coming in. Make them layers-first. @@ -326,7 +321,7 @@ def get_initial_state(self): } @override(Model) - def _forward(self, inputs: NestedDict, **kwargs) -> NestedDict: + def _forward(self, inputs: dict, **kwargs) -> dict: out = inputs[SampleBatch.OBS].float() # States are batch-first when coming in. Make them layers-first. diff --git a/rllib/models/catalog.py b/rllib/models/catalog.py index 133d9ef9fb72..83c942d1a277 100644 --- a/rllib/models/catalog.py +++ b/rllib/models/catalog.py @@ -181,6 +181,13 @@ # backward compatibility to old configs. This yields different models than past # versions of RLlib. "encoder_latent_dim": None, + # Whether to always check the inputs and outputs of RLlib's default models for + # their specifications. Input specifications are checked on failed forward passes + # of the models regardless of this flag. If this flag is set to `True`, inputs and + # outputs are checked on every call. This leads to a slow-down and should only be + # used for debugging. Note that this flag is only relevant for instances of + # RLlib's Model class. These are commonly generated from ModelConfigs in RLModules. + "always_check_shapes": False, # Deprecated keys: # Use `lstm_use_prev_action` or `lstm_use_prev_reward` instead. From 5a1386f91f337a54ff10f97b27c67de5cf132b92 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Tue, 2 May 2023 11:30:03 -0700 Subject: [PATCH 190/424] [data] [docs] Generalize fix for converting lists to np.ndarray in UDFs (#34930) --- python/ray/data/_internal/arrow_block.py | 16 ++++++++-- .../ray/data/_internal/planner/map_batches.py | 12 -------- python/ray/data/tests/test_strict_mode.py | 29 +++++++++++++++---- 3 files changed, 38 insertions(+), 19 deletions(-) diff --git a/python/ray/data/_internal/arrow_block.py b/python/ray/data/_internal/arrow_block.py index cb97eb0d67a4..839712bda053 100644 --- a/python/ray/data/_internal/arrow_block.py +++ b/python/ray/data/_internal/arrow_block.py @@ -153,7 +153,7 @@ def from_bytes(cls, data: bytes) -> "ArrowBlockAccessor": @staticmethod def numpy_to_block( - batch: Union[np.ndarray, Dict[str, np.ndarray]], + batch: Union[np.ndarray, Dict[str, np.ndarray], Dict[str, list]], passthrough_arrow_not_implemented_errors: bool = False, ) -> "pyarrow.Table": import pyarrow as pa @@ -163,7 +163,7 @@ def numpy_to_block( if isinstance(batch, np.ndarray): batch = {TENSOR_COLUMN_NAME: batch} elif not isinstance(batch, collections.abc.Mapping) or any( - not isinstance(col, np.ndarray) for col in batch.values() + not isinstance(col, (list, np.ndarray)) for col in batch.values() ): raise ValueError( "Batch must be an ndarray or dictionary of ndarrays when converting " @@ -172,6 +172,18 @@ def numpy_to_block( ) new_batch = {} for col_name, col in batch.items(): + if isinstance(col, list): + # Try to convert list values into an numpy array via + # np.array(), so users don't need to manually cast. + # NOTE: we don't cast generic iterables, since types like + # `str` are also Iterable. + try: + col = np.array(col) + except Exception: + raise ValueError( + "Failed to convert column values to numpy array: " + f"({_truncated_repr(col)})." + ) # Use Arrow's native *List types for 1-dimensional ndarrays. if col.dtype.type is np.object_ or col.ndim > 1: try: diff --git a/python/ray/data/_internal/planner/map_batches.py b/python/ray/data/_internal/planner/map_batches.py index 27597c550bdb..d1a8c09896bf 100644 --- a/python/ray/data/_internal/planner/map_batches.py +++ b/python/ray/data/_internal/planner/map_batches.py @@ -62,18 +62,6 @@ def validate_batch(batch: Block) -> None: f"{type(value)}. To fix this issue, convert " f"the {type(value)} to a `np.ndarray`." ) - if isinstance(value, list): - # Try to convert list values into an numpy array via - # np.array(), so users don't need to manually cast. - # NOTE: we don't cast generic iterables, since types like - # `str` are also Iterable. - try: - batch[key] = np.array(value) - except Exception: - raise ValueError( - "Failed to convert column values to numpy array: " - f"({_truncated_repr(value)})." - ) def process_next_batch(batch: DataBatch) -> Iterator[Block]: # Apply UDF. diff --git a/python/ray/data/tests/test_strict_mode.py b/python/ray/data/tests/test_strict_mode.py index bbbf3c6b1b23..30cc5966ee30 100644 --- a/python/ray/data/tests/test_strict_mode.py +++ b/python/ray/data/tests/test_strict_mode.py @@ -53,8 +53,8 @@ def test_strict_map_output(ray_start_regular_shared, enable_strict_mode): with pytest.raises(StrictModeError): ds.map_batches(lambda x: np.array([0]), max_retries=0).materialize() - ds.map_batches(lambda x: {"id": np.array([0])}).materialize() - ds.map_batches(lambda x: UserDict({"id": np.array([0])})).materialize() + ds.map_batches(lambda x: {"id": [0]}).materialize() + ds.map_batches(lambda x: UserDict({"id": [0]})).materialize() with pytest.raises(StrictModeError): ds.map(lambda x: np.ones(10), max_retries=0).materialize() @@ -71,8 +71,8 @@ def test_strict_map_output(ray_start_regular_shared, enable_strict_mode): ds.map_batches(lambda x: object(), max_retries=0).materialize() with pytest.raises(ValueError): ds.map_batches(lambda x: {"x": object()}, max_retries=0).materialize() - ds.map_batches(lambda x: {"x": np.array([object()])}).materialize() - ds.map_batches(lambda x: UserDict({"x": np.array([object()])})).materialize() + ds.map_batches(lambda x: {"x": [object()]}).materialize() + ds.map_batches(lambda x: UserDict({"x": [object()]})).materialize() with pytest.raises(StrictModeError): ds.map(lambda x: object(), max_retries=0).materialize() @@ -86,7 +86,9 @@ def test_strict_convert_map_output(ray_start_regular_shared, enable_strict_mode) with pytest.raises(ValueError): # Strings not converted into array. - ray.data.range(1).map_batches(lambda x: {"id": "string"}).materialize() + ray.data.range(1).map_batches( + lambda x: {"id": "string"}, max_retries=0 + ).materialize() class UserObj: def __eq__(self, other): @@ -100,6 +102,23 @@ def __eq__(self, other): assert ds.take_batch()["id"].tolist() == [0, 1, 2, UserObj()] +def test_strict_convert_map_groups(ray_start_regular_shared, enable_strict_mode): + ds = ray.data.read_csv("example://iris.csv") + + def process_group(group): + variety = group["variety"][0] + count = len(group["variety"]) + + # Test implicit list->array conversion here. + return { + "variety": [variety], + "count": [count], + } + + ds = ds.groupby("variety").map_groups(process_group) + ds.show() + + def test_strict_default_batch_format(ray_start_regular_shared, enable_strict_mode): ds = ray.data.range(1) From b09e0cbebc460e41345c0ab832343344a978fcdc Mon Sep 17 00:00:00 2001 From: Larry <554538252@qq.com> Date: Wed, 3 May 2023 03:12:41 +0800 Subject: [PATCH 191/424] [Runtime Env]Add usage of expansion of certain PATH variables in env_vars (#34856) Add usage of expansion of certain PATH variables in env vars to documentation Signed-off-by: LarryLian <554538252@qq.com> --- doc/source/ray-core/handling-dependencies.rst | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/doc/source/ray-core/handling-dependencies.rst b/doc/source/ray-core/handling-dependencies.rst index ba8600599e89..0d42ef2faabc 100644 --- a/doc/source/ray-core/handling-dependencies.rst +++ b/doc/source/ray-core/handling-dependencies.rst @@ -371,10 +371,15 @@ The ``runtime_env`` is a Python dictionary or a Python class :class:`ray.runtime Furthermore, referencing local files `within` a `environment.yml` file is not supported. - ``env_vars`` (Dict[str, str]): Environment variables to set. Environment variables already set on the cluster will still be visible to the Ray workers; so there is - no need to include ``os.environ`` or similar in the ``env_vars`` field. + no need to include ``os.environ`` or similar in the ``env_vars`` field. + By default, these environment variables override the same name environment variables on the cluster. + You can also reference existing environment variables using ${ENV_VAR} to achieve the appending behavior. + Only PATH, LD_LIBRARY_PATH, DYLD_LIBRARY_PATH, and LD_PRELOAD are supported. See below for an example: - Example: ``{"OMP_NUM_THREADS": "32", "TF_WARNINGS": "none"}`` + - Example: ``{"LD_LIBRARY_PATH": "${LD_LIBRARY_PATH}:/home/admin/my_lib"}`` + - ``container`` (dict): Require a given (Docker) image, and the worker process will run in a container with this image. The `worker_path` is the default_worker.py path. It is required only if ray installation directory in the container is different from raylet host. The `run_options` list spec is `here `__. From 18bf7fff11559f87cc3d17a4318d7c513b1a0bfc Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Tue, 2 May 2023 20:43:45 +0100 Subject: [PATCH 192/424] [air output] Print single trial config + results as table (#34788) This PR makes a number of improvements to tackle issues uncovered in dogfooding: 1. Instead of just a print, we render a table for configs and results at the start of training (closes https://github.com/ray-project/ray/issues/34784) 2. We round float results to 5 significant numbers after the decimal point (closes https://github.com/ray-project/ray/issues/34785) 3. We track the last printed result and only print the result at the end of training if it hasn't been printed before (closes https://github.com/ray-project/ray/issues/34786) 4. We divide the results by "automatic" results and trainer-specific results (closes https://github.com/ray-project/ray/issues/34787) Signed-off-by: Kai Fricke --- python/ray/air/execution/resources/request.py | 3 +- python/ray/tune/experimental/output.py | 166 ++++++++++++++---- python/ray/tune/tests/output/test_output.py | 56 ++++++ 3 files changed, 188 insertions(+), 37 deletions(-) diff --git a/python/ray/air/execution/resources/request.py b/python/ray/air/execution/resources/request.py index 87455a6b3ad8..40dbd3858d85 100644 --- a/python/ray/air/execution/resources/request.py +++ b/python/ray/air/execution/resources/request.py @@ -154,7 +154,8 @@ def to_placement_group(self): def __eq__(self, other: "ResourceRequest"): return ( - self._bound == other._bound + isinstance(other, ResourceRequest) + and self._bound == other._bound and self.head_bundle_is_empty == other.head_bundle_is_empty ) diff --git a/python/ray/tune/experimental/output.py b/python/ray/tune/experimental/output.py index 6970d2e505e0..fe649658269d 100644 --- a/python/ray/tune/experimental/output.py +++ b/python/ray/tune/experimental/output.py @@ -1,4 +1,4 @@ -from typing import List, Dict, Optional, Tuple, Any, TYPE_CHECKING +from typing import List, Dict, Optional, Tuple, Any, TYPE_CHECKING, Collection import contextlib import collections @@ -11,7 +11,6 @@ import numpy as np import os import pandas as pd -from ray._private.thirdparty.tabulate.tabulate import tabulate import textwrap import time @@ -24,9 +23,14 @@ import ray from ray._private.dict import unflattened_lookup +from ray._private.thirdparty.tabulate.tabulate import ( + tabulate, + TableFormat, + Line, + DataRow, +) from ray.air._internal.checkpoint_manager import _TrackedCheckpoint from ray.tune.callback import Callback -from ray.tune.logger import pretty_print from ray.tune.result import ( AUTO_RESULT_KEYS, EPISODE_REWARD_MEAN, @@ -356,6 +360,91 @@ def _best_trial_str( ) +def _render_table_item(key: str, item: Any, prefix: str = ""): + key = prefix + key + if isinstance(item, float): + # tabulate does not work well with mixed-type columns, so we format + # numbers ourselves. + yield key, f"{item:.5f}".rstrip("0") + elif isinstance(item, list): + yield key, None + for sv in item: + yield from _render_table_item("", sv, prefix=prefix + "-") + elif isinstance(item, Dict): + yield key, None + for sk, sv in item.items(): + yield from _render_table_item(str(sk), sv, prefix=prefix + "/") + else: + yield key, item + + +def _get_dict_as_table_data( + data: Dict, + exclude: Optional[Collection] = None, + upper_keys: Optional[Collection] = None, +): + exclude = exclude or set() + upper_keys = upper_keys or set() + + upper = [] + lower = [] + + for key, value in sorted(data.items()): + if key in exclude: + continue + + for k, v in _render_table_item(str(key), value): + if key in upper_keys: + upper.append([k, v]) + else: + lower.append([k, v]) + + if not upper: + return lower + elif not lower: + return upper + else: + return upper + lower + + +# Copied/adjusted from tabulate +AIR_TABULATE_TABLEFMT = TableFormat( + lineabove=Line("╭", "─", "─", "╮"), + linebelowheader=Line("├", "─", "─", "┤"), + linebetweenrows=None, + linebelow=Line("╰", "─", "─", "╯"), + headerrow=DataRow("│", " ", "│"), + datarow=DataRow("│", " ", "│"), + padding=1, + with_header_hide=None, +) + + +def _print_dict_as_table( + data: Dict, + header: Optional[str] = None, + exclude: Optional[Collection] = None, + division: Optional[Collection] = None, +): + table_data = _get_dict_as_table_data( + data=data, exclude=exclude, upper_keys=division + ) + + headers = [header, ""] if header else [] + + if not table_data: + return + + print( + tabulate( + table_data, + headers=headers, + colalign=("left", "right"), + tablefmt=AIR_TABULATE_TABLEFMT, + ) + ) + + class ProgressReporter: """Periodically prints out status update.""" @@ -594,6 +683,7 @@ def _print_heartbeat(self, trials, *args): # These keys are blacklisted for printing out training/tuning intermediate/final result! BLACKLISTED_KEYS = { + "config", "date", "done", "hostname", @@ -650,12 +740,28 @@ class AirResultProgressCallback(Callback): def __init__(self, verbosity): self._verbosity = verbosity self._start_time = time.time() + self._trial_last_printed_results = {} + + def _print_result(self, trial, result: Optional[Dict] = None, force: bool = False): + """Only print result if a different result has been reported, or force=True""" + result = result or trial.last_result - def _print_result(self, trial, result=None): - print(pretty_print(result or trial.last_result, BLACKLISTED_KEYS)) + last_result_iter = self._trial_last_printed_results.get(trial.trial_id, -1) + this_iter = result.get(TRAINING_ITERATION, 0) + + if this_iter != last_result_iter or force: + _print_dict_as_table( + result, + header=f"{self._addressing_tmpl.format(trial)} result", + exclude=BLACKLISTED_KEYS, + division=AUTO_RESULT_KEYS, + ) + self._trial_last_printed_results[trial.trial_id] = this_iter def _print_config(self, trial): - print(pretty_print(trial.config)) + _print_dict_as_table( + trial.config, header=f"{self._addressing_tmpl.format(trial)} config" + ) def on_trial_result( self, @@ -669,13 +775,9 @@ def on_trial_result( return curr_time, running_time = _get_time_str(self._start_time, time.time()) print( - " ".join( - [ - self._addressing_tmpl.format(trial), - f"finished iter {result[TRAINING_ITERATION]} " - f"at {curr_time} (running for {running_time})", - ] - ) + f"{self._addressing_tmpl.format(trial)} " + f"finished iteration {result[TRAINING_ITERATION]} " + f"at {curr_time} (running for {running_time})." ) self._print_result(trial, result) @@ -689,13 +791,9 @@ def on_trial_complete( if trial.last_result and TRAINING_ITERATION in trial.last_result: finished_iter = trial.last_result[TRAINING_ITERATION] print( - " ".join( - [ - self._addressing_tmpl.format(trial), - f"({finished_iter} iters) " - f"finished at {curr_time} (running for {running_time})", - ] - ) + f"{self._addressing_tmpl.format(trial)} " + f"completed training after {finished_iter} iterations " + f"at {curr_time} (running for {running_time})." ) self._print_result(trial) @@ -714,30 +812,26 @@ def on_checkpoint( if trial.last_result and TRAINING_ITERATION in trial.last_result: saved_iter = trial.last_result[TRAINING_ITERATION] print( - " ".join( - [ - self._addressing_tmpl.format(trial), - f"saved checkpoint for iter {saved_iter}" - f" at {checkpoint.dir_or_data}", - ] - ) + f"{self._addressing_tmpl.format(trial)} " + f"saved a checkpoint for iteration {saved_iter} " + f"at: {checkpoint.dir_or_data}" ) - print() def on_trial_start(self, iteration: int, trials: List[Trial], trial: Trial, **info): if self._verbosity < self._start_end_verbosity: return has_config = bool(trial.config) - print( - " ".join( - [ - self._addressing_tmpl.format(trial), - "started with configuration:" if has_config else "started.", - ] - ) - ) + if has_config: + print( + f"{self._addressing_tmpl.format(trial)} " f"started with configuration:" + ) self._print_config(trial) + else: + print( + f"{self._addressing_tmpl.format(trial)} " + f"started without custom configuration." + ) class TuneResultProgressCallback(AirResultProgressCallback): diff --git a/python/ray/tune/tests/output/test_output.py b/python/ray/tune/tests/output/test_output.py index c13248b79068..7b45a8078446 100644 --- a/python/ray/tune/tests/output/test_output.py +++ b/python/ray/tune/tests/output/test_output.py @@ -13,6 +13,7 @@ _current_best_trial, _best_trial_str, _get_trial_table_data, + _get_dict_as_table_data, ) from ray.tune.experiment.trial import Trial @@ -164,5 +165,60 @@ def test_get_trial_table_data_more_than_20(): assert table_data[2].more_info == "... and 5 more PENDING ..." +def test_result_table_no_divison(): + data = _get_dict_as_table_data( + { + "b": 6, + "a": 8, + "x": 19.123123123, + "c": 5, + "ignore": 9, + "y": 20, + "z": {"m": 4, "n": {"o": "p"}}, + }, + exclude={"ignore"}, + ) + + assert data == [ + ["a", 8], + ["b", 6], + ["c", 5], + ["x", "19.12312"], + ["y", 20], + ["z", None], + ["/m", 4], + ["/n", None], + ["//o", "p"], + ] + + +def test_result_table_divison(): + data = _get_dict_as_table_data( + { + "b": 6, + "a": 8, + "x": 19.123123123, + "c": 5, + "ignore": 9, + "y": 20, + "z": {"m": 4, "n": {"o": "p"}}, + }, + exclude={"ignore"}, + upper_keys={"x", "y", "z"}, + ) + + assert data == [ + ["x", "19.12312"], + ["y", 20], + ["z", None], + ["/m", 4], + ["/n", None], + ["//o", "p"], + ["a", 8], + ["b", 6], + ["c", 5], + ] + + if __name__ == "__main__": sys.exit(pytest.main(["-v", __file__])) From 8b8ba49c01837dfb8d61a9fce0e64bb0b4e7680e Mon Sep 17 00:00:00 2001 From: Yi Cheng <74173148+iycheng@users.noreply.github.com> Date: Tue, 2 May 2023 12:48:58 -0700 Subject: [PATCH 193/424] [doc] Update the owner of data tests in doc (#34964) The team flag is wrong for these tests. Update it to data. --- doc/BUILD | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/BUILD b/doc/BUILD index a2bb7a8bd79d..be989a0d7b07 100644 --- a/doc/BUILD +++ b/doc/BUILD @@ -226,7 +226,7 @@ py_test_run_all_subdirectory( "source/data/doc_code/creating_datastreams_untested.py" ], extra_srcs = [], - tags = ["exclusive", "team:core"], + tags = ["exclusive", "team:data"], ) # -------------------------------------------------------------------- From 66d1e48386498f8660eaad35c6fd01e8c543c602 Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Tue, 2 May 2023 12:49:52 -0700 Subject: [PATCH 194/424] [CI] Fix python/ray/tests:test_autoscaler_aws (#34936) #34804 broke python/ray/tests:test_autoscaler_aws because that unit-test does not allow to specify a key-value pair in the aws provider. Release tests need to specify that key though. Fix release tests to use the same file to we don't have to specify separated keys. Signed-off-by: Cuong Nguyen --- python/ray/autoscaler/aws/example-full.yaml | 2 -- python/ray/autoscaler/aws/example-minimal.yaml | 2 -- python/ray/autoscaler/aws/tests/aws_cluster.yaml | 2 -- .../ray/autoscaler/aws/tests/aws_launch_and_verify_cluster.py | 2 +- 4 files changed, 1 insertion(+), 7 deletions(-) diff --git a/python/ray/autoscaler/aws/example-full.yaml b/python/ray/autoscaler/aws/example-full.yaml index 75ce0b3e99b7..49df110fc64c 100644 --- a/python/ray/autoscaler/aws/example-full.yaml +++ b/python/ray/autoscaler/aws/example-full.yaml @@ -45,8 +45,6 @@ provider: # Whether to allow node reuse. If set to False, nodes will be terminated # instead of stopped. cache_stopped_nodes: True # If not present, the default is True. - key_pair: - key_name: aws-cluster-launcher-test # How Ray will authenticate with newly launched nodes. auth: diff --git a/python/ray/autoscaler/aws/example-minimal.yaml b/python/ray/autoscaler/aws/example-minimal.yaml index 0a9e908bcc73..09a2727d1311 100644 --- a/python/ray/autoscaler/aws/example-minimal.yaml +++ b/python/ray/autoscaler/aws/example-minimal.yaml @@ -5,5 +5,3 @@ cluster_name: aws-example-minimal provider: type: aws region: us-west-2 - key_pair: - key_name: aws-cluster-launcher-test diff --git a/python/ray/autoscaler/aws/tests/aws_cluster.yaml b/python/ray/autoscaler/aws/tests/aws_cluster.yaml index 311c1e90673a..b226c723129c 100644 --- a/python/ray/autoscaler/aws/tests/aws_cluster.yaml +++ b/python/ray/autoscaler/aws/tests/aws_cluster.yaml @@ -8,8 +8,6 @@ provider: type: aws region: us-west-2 cache_stopped_nodes: False - key_pair: - key_name: aws-cluster-launcher-test available_node_types: ray.head.default: diff --git a/python/ray/autoscaler/aws/tests/aws_launch_and_verify_cluster.py b/python/ray/autoscaler/aws/tests/aws_launch_and_verify_cluster.py index 41cd8eadc03a..67fb63add3a6 100644 --- a/python/ray/autoscaler/aws/tests/aws_launch_and_verify_cluster.py +++ b/python/ray/autoscaler/aws/tests/aws_launch_and_verify_cluster.py @@ -66,7 +66,7 @@ def download_ssh_key(): # Set the name of the S3 bucket and the key to download bucket_name = "aws-cluster-launcher-test" - key_name = "aws-cluster-launcher-test.pem" + key_name = "ray-autoscaler_59_us-west-2.pem" # Download the key from the S3 bucket to a local file local_key_path = os.path.expanduser(f"~/.ssh/{key_name}") From de6b14c73b223429d258c1031b7b38f44ee08d07 Mon Sep 17 00:00:00 2001 From: Antoni Baum Date: Tue, 2 May 2023 14:22:42 -0700 Subject: [PATCH 195/424] [Docs] Tip on avoiding OOMs with GPT-J finetune (#33953) Signed-off-by: Antoni Baum --- doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb b/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb index 1a70fa3a4f0e..2063a7cd4b86 100644 --- a/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb +++ b/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb @@ -1,6 +1,7 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -15,7 +16,7 @@ "It is highly recommended to read [Ray AIR Key Concepts](air-key-concepts) and [Ray Data Key Concepts](data_key_concepts) before starting this example.\n", "\n", "```{note}\n", - "In order to run this example, make sure your Ray cluster has access to at least one GPU with 16 or more GBs of memory. The amount of memory needed will depend on the model. This notebook is being tested with 16 g4dn.4xlarge instances.\n", + "In order to run this example, make sure your Ray cluster has access to at least one GPU with 16 or more GBs of memory. The amount of memory needed will depend on the model. This notebook is being tested with 16 g4dn.4xlarge instances (including the head node). If you wish to use a CPU head node, turn on [cloud checkpointing](train-config-sync>) to avoid OOM errors that may happen due to the default behavior of syncing the checkpoint files to head node.\n", "```\n", "\n", "In this notebook, we will:\n", From 1089c4a6d7ac52ca05bf64f3260947955448172f Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Tue, 2 May 2023 23:38:55 +0200 Subject: [PATCH 196/424] [docs] batch prediction guide, strict mode (#34969) Signed-off-by: Max Pumperla Signed-off-by: amogkam Co-authored-by: amogkam --- doc/source/data/batch_inference.rst | 102 +++++------------- doc/source/data/doc_code/batch_formats.py | 27 +++-- doc/source/data/doc_code/hf_quick_start.py | 7 +- .../data/doc_code/pytorch_quick_start.py | 10 +- doc/source/data/doc_code/tf_quick_start.py | 10 +- 5 files changed, 67 insertions(+), 89 deletions(-) diff --git a/doc/source/data/batch_inference.rst b/doc/source/data/batch_inference.rst index 5ace6a8bb45f..585520aa4d7f 100644 --- a/doc/source/data/batch_inference.rst +++ b/doc/source/data/batch_inference.rst @@ -199,6 +199,15 @@ Once you have your Ray Datastream ``ds`` and your predictor class, you can use In the example below, we use two CPUs to run inference in parallel and then print the results. We cover resource allocation in more detail in :ref:`the configuration section of this guide `. +.. note:: + + Defining your :meth:`ds.map_batches() ` function requires + you to write a Python function that takes a batch of data and returns a batch of predictions. + An easy way to do this and validate it is to use ``ds.take_batch(N)`` to get a batch of data + first, and then locally test your predictor function on that batch, without using Ray. + Once you are happy with the results, you can use the same function in ``map_batches`` + on the full dataset. The examples below show you how. + .. tabs:: .. group-tab:: HuggingFace @@ -446,7 +455,7 @@ Working with batch formats -------------------------- Now that you've seen examples of batch inference with Ray, let's have a closer look -at how to deal with different data formats. +at how to deal with different data formats for batches. First of all, you need to distinguish between two types of batch formats: - Input batch formats: This is the format of the input to your UDFs. You will often have to @@ -462,47 +471,34 @@ but it's good to be aware of the differences. We often use batch format names and the libraries they represent interchangeably. Let's focus on the three available input batch formats first, -namely Pandas, NumPy, and Arrow, and how they're used in Ray Data: +namely NumPy, Pandas and Arrow, and how they're used in Ray Data. +By default, the batch format will be ``"numpy"``, but you can specify other formats +as you see fit. -.. tabbed:: Pandas +.. tabbed:: NumPy (default) - The ``"pandas"`` batch format presents batches in - `pandas.DataFrame `__ - format. If converting a simple dataset to Pandas DataFrame batches, a single-column - dataframe with the column ``"__value__"`` will be created. + The ``"numpy"`` batch format presents batches as dictionary of + `numpy.ndarray `__ (``Dict[str, np.ndarray]``), with each key-value pair representing one column. .. literalinclude:: ./doc_code/batch_formats.py :language: python - :start-after: __simple_pandas_start__ - :end-before: __simple_pandas_end__ - -.. tabbed:: NumPy - - The ``"numpy"`` batch format presents batches in - `numpy.ndarray `__ - format as follows: - - * **Tabular datasets**: Each batch will be a dictionary of NumPy - ndarrays (``Dict[str, np.ndarray]``), with each key-value pair representing a column - in the table. + :start-after: __simple_numpy_start__ + :end-before: __simple_numpy_end__ - * **Tensor datasets** (single-column): Each batch will be a single - `numpy.ndarray `__ - containing the single tensor column for this batch. +.. tabbed:: Pandas - * **Simple datasets**: Each batch will be a single NumPy ndarray, where Ray Data will - attempt to convert each list-batch to an ndarray. + The ``"pandas"`` batch format presents batches in + `pandas.DataFrame `__ + format. .. literalinclude:: ./doc_code/batch_formats.py :language: python - :start-after: __simple_numpy_start__ - :end-before: __simple_numpy_end__ + :start-after: __simple_pandas_start__ + :end-before: __simple_pandas_end__ .. tabbed:: Arrow The ``"pyarrow"`` batch format presents batches in ``pyarrow.Table`` format. - If converting a simple dataset to Arrow Table batches, a single-column table - with the column ``"__value__"`` will be created. .. literalinclude:: ./doc_code/batch_formats.py :language: python @@ -510,9 +506,8 @@ namely Pandas, NumPy, and Arrow, and how they're used in Ray Data: :end-before: __simple_pyarrow_end__ When defining the return value of your UDF, you can choose between -Pandas dataframes (``pandas.DataFrame``), NumPy arrays (``numpy.ndarray``), Arrow tables -(``pyarrow.Table``), dictionaries of NumPy arrays (``Dict[str, np.ndarray]``) or simple -Python lists (``list``). +Pandas dataframes (``pandas.DataFrame``), Arrow tables +(``pyarrow.Table``), or dictionaries of NumPy arrays (``Dict[str, np.ndarray]``). You can learn more about output formats in :ref:`the output format guide`. .. important:: @@ -522,50 +517,6 @@ You can learn more about output formats in :ref:`the output format guide`, -the batch format will be ``"numpy"``, but it's not always that easy. - -In any case, Ray Data has a ``"default"`` batch format that is computed per data type -as follows: - -.. tabbed:: Tabular data - - Each batch will be a - `pandas.DataFrame `__. - This may incur a conversion cost if the underlying Datastream block is not - zero-copy convertible from an Arrow table. - - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_default_udfs_tabular_begin__ - :end-before: __writing_default_udfs_tabular_end__ - -.. tabbed:: Tensor data (single-column) - - Each batch will be a single - `numpy.ndarray `__ - containing the single tensor column for this batch. - - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_default_udfs_tensor_begin__ - :end-before: __writing_default_udfs_tensor_end__ - -.. tabbed:: Simple data - - Each batch will be a Python list. - - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_default_udfs_list_begin__ - :end-before: __writing_default_udfs_list_end__ - - .. seealso:: As we've discussed in this guide, using :meth:`ds.map_batches() ` @@ -585,7 +536,6 @@ as follows: :start-after: __hf_quickstart_air_start__ :end-before: __hf_quickstart_air_end__ - .. _batch_inference_config: Configuration & Troubleshooting ------------------------------- diff --git a/doc/source/data/doc_code/batch_formats.py b/doc/source/data/doc_code/batch_formats.py index 8dc1136e6124..e7bb21dbebcd 100644 --- a/doc/source/data/doc_code/batch_formats.py +++ b/doc/source/data/doc_code/batch_formats.py @@ -10,6 +10,9 @@ def map_function(data): return data[data["sepal.length"] < 5] +batch = ds.take_batch(10) +mapped_batch = map_function(batch) + transformed = ds.map_batches(map_function, batch_size=10) # __simple_map_function_end__ @@ -21,7 +24,6 @@ def map_function(data): ds.show(1) # -> {'sepal.length': 5.1, ..., 'petal.width': 0.2, 'variety': 'Setosa'} -ds.default_batch_format() # pandas.core.frame.DataFrame def transform_pandas(df_batch: pd.DataFrame) -> pd.DataFrame: @@ -30,20 +32,27 @@ def transform_pandas(df_batch: pd.DataFrame) -> pd.DataFrame: df_batch = df_batch.drop(columns=["sepal.length"]) return df_batch -ds.map_batches(transform_pandas).show(1) +ds.map_batches(transform_pandas, batch_format="pandas").show(1) # -> {..., 'variety': 'Versicolor', 'normalized.sepal.length': 1.0} # __simple_pandas_end__ # __simple_numpy_start__ +from typing import Dict + import ray import numpy as np + ds = ray.data.range_tensor(1000, shape=(2, 2)) -ds.default_batch_format() -# 'numpy.ndarray' -def transform_numpy(arr: np.ndarray) -> np.ndarray: - return arr * 2 +def transform_numpy(arr: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + arr["data"] = arr["data"] * 2 + return arr + + +# test map function on a batch +batch = ds.take_batch(1) +mapped_batch = transform_numpy(batch) ds.map_batches(transform_numpy) # __simple_numpy_end__ @@ -56,10 +65,16 @@ def transform_numpy(arr: np.ndarray) -> np.ndarray: ds = ray.data.read_csv("example://iris.csv") + def transform_pyarrow(batch: pa.Table) -> pa.Table: batch = batch.filter(pac.equal(batch["variety"], "Versicolor")) return batch.drop(["sepal.length"]) + +# test map function on a batch +batch = ds.take_batch(1) +mapped_batch = transform_pyarrow(batch) + ds.map_batches(transform_pyarrow, batch_format="pyarrow").show(1) # -> {'sepal.width': 3.2, ..., 'variety': 'Versicolor'} # __simple_pyarrow_end__ diff --git a/doc/source/data/doc_code/hf_quick_start.py b/doc/source/data/doc_code/hf_quick_start.py index c9de271ad4ec..b6b6b8b59381 100644 --- a/doc/source/data/doc_code/hf_quick_start.py +++ b/doc/source/data/doc_code/hf_quick_start.py @@ -19,12 +19,17 @@ def __init__(self): # <1> self.model = pipeline("text-generation", model="gpt2") def __call__(self, batch): # <2> + # TODO make this run with "numpy" format return self.model(list(batch["text"]), max_length=20) # __hf_quickstart_model_end__ # __hf_quickstart_prediction_start__ -scale = ray.data.ActorPoolStrategy(2) +hfp = HuggingFacePredictor() +batch = ds.take_batch(10) +test = hfp(batch) + +scale = ray.data.ActorPoolStrategy(size=2) predictions = ds.map_batches(HuggingFacePredictor, compute=scale) predictions.show(limit=1) diff --git a/doc/source/data/doc_code/pytorch_quick_start.py b/doc/source/data/doc_code/pytorch_quick_start.py index 39bcdc4f9bdc..5b71962ace22 100644 --- a/doc/source/data/doc_code/pytorch_quick_start.py +++ b/doc/source/data/doc_code/pytorch_quick_start.py @@ -7,7 +7,7 @@ import numpy as np -dataset = ray.data.from_numpy(np.ones((1, 100))) +ds = ray.data.from_numpy(np.ones((1, 100))) # __pt_quickstart_load_end__ @@ -32,8 +32,12 @@ def __call__(self, batch): # <2> # __pt_quickstart_prediction_start__ -scale = ray.data.ActorPoolStrategy(2) -predictions = dataset.map_batches(TorchPredictor, compute=scale) +tp = TorchPredictor() +batch = ds.take_batch(10) +test = tp(batch) + +scale = ray.data.ActorPoolStrategy(size=2) +predictions = ds.map_batches(TorchPredictor, compute=scale) predictions.show(limit=1) # [0.45092654] # __pt_quickstart_prediction_end__ diff --git a/doc/source/data/doc_code/tf_quick_start.py b/doc/source/data/doc_code/tf_quick_start.py index 92885b619a89..d923034d72c0 100644 --- a/doc/source/data/doc_code/tf_quick_start.py +++ b/doc/source/data/doc_code/tf_quick_start.py @@ -7,7 +7,7 @@ import numpy as np -dataset = ray.data.from_numpy(np.ones((1, 100))) +ds = ray.data.from_numpy(np.ones((1, 100))) # __tf_quickstart_load_end__ @@ -26,9 +26,13 @@ def __call__(self, batch: np.ndarray): # <2> # __tf_quickstart_prediction_start__ -scale = ray.data.ActorPoolStrategy(2) +tfp = TFPredictor() +batch = ds.take_batch(10) +test = tfp(batch) -predicted_probabilities = dataset.map_batches(TFPredictor, compute=scale) +scale = ray.data.ActorPoolStrategy(size=2) + +predicted_probabilities = ds.map_batches(TFPredictor, compute=scale) predicted_probabilities.show(limit=1) # [0.45119727] # __tf_quickstart_prediction_end__ From 01f019208108759e1faaaedbda7cb4ea6690dc0f Mon Sep 17 00:00:00 2001 From: Yiqing Wang Date: Tue, 2 May 2023 14:51:35 -0700 Subject: [PATCH 197/424] [Doc] add doc and example for Ray TLS (#33737) This PR gives a detailed doc and a step-by-step guide on how to configure the TLS Auth for the Ray cluster. Related issue number Closes ray-project/kuberay#889 --- .../configs/static-ray-cluster.tls.yaml | 383 ++++++++++++++++++ doc/source/ray-core/configure.rst | 98 ++++- 2 files changed, 470 insertions(+), 11 deletions(-) create mode 100644 doc/source/cluster/kubernetes/configs/static-ray-cluster.tls.yaml diff --git a/doc/source/cluster/kubernetes/configs/static-ray-cluster.tls.yaml b/doc/source/cluster/kubernetes/configs/static-ray-cluster.tls.yaml new file mode 100644 index 000000000000..70e437f508cc --- /dev/null +++ b/doc/source/cluster/kubernetes/configs/static-ray-cluster.tls.yaml @@ -0,0 +1,383 @@ +apiVersion: v1 +kind: Secret +metadata: + name: ca-tls +data: + # output from cat ca.crt | base64 + ca.crt: | + LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM3RENDQWRRQ0NRQ05Yck8zQTAwbWRqQU5CZ2txaGtpRzl3MEJBUXNGQURBNE1SRXdEd1lEVlFRRERBZ3EKTG5KaGVTNXBiekVMTUFrR0ExVUVCaE1DVlZNeEZqQVVCZ05WQkFjTURWTmhiaUJHY21GdVkybHpZMjh3SGhjTgpNak13TXpJM01EZ3dNVFF4V2hjTk16TXdNekkwTURnd01UUXhXakE0TVJFd0R3WURWUVFEREFncUxuSmhlUzVwCmJ6RUxNQWtHQTFVRUJoTUNWVk14RmpBVUJnTlZCQWNNRFZOaGJpQkdjbUZ1WTJselkyOHdnZ0VpTUEwR0NTcUcKU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLQW9JQkFRQ3ZJbGNGSmZxaFNidWowQ3ZpalA0c2xXN3I3Qk1kYVJOeAp5aDhJMGNaSU5QcjQ5Rjg1dXNrY0pxbnFHNC9LeThBYnlacURBUUxsalFUa0Exb3FxVHhGdTZMSm5LOGJHN012Cm90dStjVlZLWW5SeDlLWVoyWi90THRPdzhjZHFzOURuNXVERVh0L0loZzBRc0tVRDNJN3U3QjF5bVpxTjQwWEgKWDVMRUJkN1llSm5XZExqOStLOTl6ZVR0aHlUMWtsRGsySVp2ZjVsa2xjT2hHRzA5RmNtZlF5REFlM2VvTm1IWQpVaUhVU0NORGtnWTV3U3A4V3R6RXEydHBhZEQ2eTVCNVRMS2kvV1l4ZTJLM2tXbTZnUytwQTIvdkZIaU93RHNaClNqb1ZncUtMZ0lNSnZMOGR0bitaWjNLbDlMRkZNY0JiMWJ1NCtKN2U1bno3RTRVSG4wN0pBZ01CQUFFd0RRWUoKS29aSWh2Y05BUUVMQlFBRGdnRUJBQWhSY3g2NzVJbjJVaERhMzArTkZ0UlNTcUJwK1E2WTl3VGNTL0NqM1J3MgpLSnkzUVhBU0xJUW1ESWdrVlBJeEY0V1VYUFdGdmxUL0taQ2JRejRvN2M3ck9DWEVEWnVhbExUSHRrTHVSZFNWClVHSTVSWTJXNUx6UXM2MnNtUG13OWVQYnNLek5kOEpjWkwvNndHZnNsZVQyY1RLTjliZVE2ZWdiQmdEcy91d0sKeVdOREtnaE4vaE16YmRSaFh2SFNiTW8rUkgvRG1Va1VhTXZZc3NNbzFYQkwzRXZwbmpnZXI1ZWQ5ZDVjQWYvUQpuU0VCMk13Z08rWHEwKy9sWmpiUFNWOVdWQnY1YjZlc1ZPcnZrV2o2TUFKcjUwb3BwT09KUy9TbTNEU3F5aDRBClR5c1BOblQxYStxWDRVZXljZ05VbXRoOXdONFBnc3B6ZEpORWtVdTVSSmM9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + # output from cat ca.key | base64 + ca.key: | + LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2Z0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktnd2dnU2tBZ0VBQW9JQkFRQ3ZJbGNGSmZxaFNidWoKMEN2aWpQNHNsVzdyN0JNZGFSTnh5aDhJMGNaSU5QcjQ5Rjg1dXNrY0pxbnFHNC9LeThBYnlacURBUUxsalFUawpBMW9xcVR4RnU2TEpuSzhiRzdNdm90dStjVlZLWW5SeDlLWVoyWi90THRPdzhjZHFzOURuNXVERVh0L0loZzBRCnNLVUQzSTd1N0IxeW1acU40MFhIWDVMRUJkN1llSm5XZExqOStLOTl6ZVR0aHlUMWtsRGsySVp2ZjVsa2xjT2gKR0cwOUZjbWZReURBZTNlb05tSFlVaUhVU0NORGtnWTV3U3A4V3R6RXEydHBhZEQ2eTVCNVRMS2kvV1l4ZTJLMwprV202Z1MrcEEyL3ZGSGlPd0RzWlNqb1ZncUtMZ0lNSnZMOGR0bitaWjNLbDlMRkZNY0JiMWJ1NCtKN2U1bno3CkU0VUhuMDdKQWdNQkFBRUNnZ0VCQUpYbG9XK2hveE83UlNRZmdBQkhSeUdud1NtaWhIWE93cnJKRWFqOXkyVncKRzBOTC9ka3Vld1ZpUGxwR3Z0c0hhMlVkTitkYXpUem1aMEkxY0U1RlRYWXQ5RlgxaXBaOExmRGV4cEFJOXNSVQo0bS9Ld3dRckZVdnZvWGE0YWtOMHBxQm1Kd2xNWHVPRmdOZEJLZXZWTW0xaW9JMisxTjhPb0dIVjlvdGFydks5ClUzY09CbmVBSjZmamF6ODd4RG1NY0dBcG82ZWdMOG0xaWJ1NUNwcFo2L2J2YVZYbHhFdXRtUjZYR2VKczdBRzMKVEtFYVhzTU1qdFdaM3ZXUDArMFJIMGpzRVI2a0ZMeEI3KzRHRWdPSk1WblZqbjlzT3FhVW41KzJ1REkrdkFkbAo0K2Fya3dwQnpzbGlaUVJLVW95aGwvMTRRZW9pcXpwVk9oVVpheFJOTnpVQ2dZRUE0RC83TmRudGFxa0JSdEdiClZUQTE0clA3Vy90THZSQnpBNWtLc3Q4V3crWFVGeTcvVEY2NkxpMVVtKzhhK2ttY1pMTm9mamNZc1pTMExkVXMKMlR4dk1IRWplcmdNUm5oWmUrOGJBZlZkd1RTcCtpdUpMKzRZWW1JRUZWUWlsSXhtRURzMkZQSnVZMHRDZW9ETAprVEFSeUNtMENPYUR1VXdLZjlMY3h3SFR4M3NDZ1lFQXgrNGpyOHV3aXh3WmwwUFBJb1Z1by9wSHZMT0ZxNXNBCmIrVEZnMEhFTVdIK1JKclhLRjA1YTRGNS9zc3pLZ09ZMGFZVUxlWnp3V1dJZElId0pzQnhGWktOdHRYTkhRbS8KOEFlVGRENnZ1OXlmN0tFZjhRNnFmaDRPRExvVDg0UTFWbGs4ek5ZN0FNUWZwN2p5RnpFOStvSm9tdlM0Snc1SApCZUNLZGZGR1RZc0NnWUVBaitkL0JhZTd1MTZJK3pFM1JRdVRDTkFHMVpnRm1tWWI2SXNsV25QZTRBZDBld3dsCnVKUnhWWUN4Y3YrVmlGZ0VqSHEwNjRuZnh0VnVhcHNLRkwyN2ZKS2QrZnB4cGlkRkJVc0RRZFo3TzZqWUN6bzAKNXhVYmdNYjFaOXA5OW1YQ2VWZ0Y5SnMrUzJuWVYxU2ZUYVJUUk9lK0tKZ0VuN3cwWUtLb0d1MEpRbEVDZ1lCZApZdXJnYm5Ca1NoZmFCQjU0cllMa3JUOWM4UzM2M2tmeC9CWVdIVjRiQXY3VjVNMmpXUWc5SXhsczNsVmp4cEpYCk94QXA4SDhaVXVmT0kvT2M1ajdzS0t4eFBxUzBiNTFyN04zL2FsaURrNlpQeldNeUlmdVpOVWl5d1NnWWt5U20KMU1BRm5mdXBlL0tkVVZJamF5amNIcFhsNjNFcExRNFh2SzV3TU9iNXlRS0JnR0kzSTAwSTlnbURzS1JrOFkxdQpId1l0dVdrNjFvWEhUTHorR3d6RUNCQ0VnNkZxMjZVeDZmVzBySlVwV3pOVURCNkRRRGxCTGx3S1M4Z1R3eGtGCkRkY3VrbzFHekdlQWYvazEwWktTZmFXNVcwVlloVGVjSDhyZXpReWxwUk5YT3ZNZkFwWUplcnhBZ09yK3hFajUKK2wwalU0MDBTMUx0cWhLVzZMK3kxRVd5Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: tls +data: + gencert_head.sh: | + #!/bin/sh + ## Create tls.key + openssl genrsa -out /etc/ray/tls/tls.key 2048 + + ## Write CSR Config + cat > /etc/ray/tls/csr.conf < /etc/ray/tls/cert.conf < /etc/ray/tls/csr.conf < /etc/ray/tls/cert.conf < --from-file=ca.key= + +Step 2: Generate individual private keys and self-signed certificates for the Ray head and workers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The `YAML file +`__, has a ConfigMap named `tls` that +includes two shell scripts: `gencert_head.sh` and `gencert_worker.sh`. These scripts produce the private key +and self-signed certificate files (`tls.key` and `tls.crt`) for both head and worker Pods in the initContainer +of each deployment. By using the initContainer, we can dynamically retrieve the `POD_IP` to the `[alt_names]` section. + +The scripts perform the following steps: first, a 2048-bit RSA private key is generated and saved as +`/etc/ray/tls/tls.key`. Then, a Certificate Signing Request (CSR) is generated using the `tls.key` file +and the `csr.conf` configuration file. Finally, a self-signed certificate (`tls.crt`) is created using +the Certificate Authority's (`ca.key and ca.crt`) keypair and the CSR (`ca.csr`). + +Step 3: Set the environment variables for both Ray head and worker to enable TLS +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TLS is enabled by setting environment variables. - ``RAY_USE_TLS``: Either 1 or 0 to use/not-use TLS. If this is set to 1 then all of the environment variables below must be set. Default: 0. -- ``RAY_TLS_SERVER_CERT``: Location of a `certificate file` which is presented to other endpoints so as to achieve mutual authentication. -- ``RAY_TLS_SERVER_KEY``: Location of a `private key file` which is the cryptographic means to prove to other endpoints that you are the authorized user of a given certificate. -- ``RAY_TLS_CA_CERT``: Location of a `CA certificate file` which allows TLS to decide whether an endpoint's certificate has been signed by the correct authority. +- ``RAY_TLS_SERVER_CERT``: Location of a `certificate file (tls.crt)`, which is presented to other endpoints to achieve mutual authentication. +- ``RAY_TLS_SERVER_KEY``: Location of a `private key file (tls.key)`, which is the cryptographic means to prove to other endpoints that you are the authorized user of a given certificate. +- ``RAY_TLS_CA_CERT``: Location of a `CA certificate file (ca.crt)`, which allows TLS to decide whether an endpoint's certificate has been signed by the correct authority. + +Step 4: Verify TLS authentication +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: bash + # Log in to the worker Pod + kubectl exec -it ${WORKER_POD} -- bash + + # Since the head Pod has the certificate of the full qualified DNS resolution for the Ray head service, the connection to the worker Pods + # is established successfully + ray health-check --address service-ray-head.default.svc.cluster.local:6379 + + # Since service-ray-head hasn't added to the alt_names section in the certificate, the connection fails and an error + # message similar to the following is displayed: "Peer name service-ray-head is not in peer certificate". + ray health-check --address service-ray-head:6379 + + # After you add `DNS.3 = service-ray-head` to the alt_names sections and deploy the YAML again, the connection is able to work. + + +Enabling TLS causes a performance hit due to the extra overhead of mutual +authentication and encryption. +Testing has shown that this overhead is large for small workloads and becomes +relatively smaller for large workloads. +The exact overhead depends on the nature of your workload. Java Applications ----------------- From 610e963d5aef2a20691f9b849c5330c902a1d463 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Tue, 2 May 2023 15:15:28 -0700 Subject: [PATCH 198/424] [CI] Skip invalid python files. (#34981) Signed-off-by: Lonnie Liu --- ci/pipeline/py_dep_analysis.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ci/pipeline/py_dep_analysis.py b/ci/pipeline/py_dep_analysis.py index c9aa6a701b55..4c5720475487 100644 --- a/ci/pipeline/py_dep_analysis.py +++ b/ci/pipeline/py_dep_analysis.py @@ -168,6 +168,9 @@ def build_dep_graph() -> DepGraph: continue full = _full_module_path(module, f) + if full.startswith("ray.serve.tests.test_config_files."): + # Skip ray serve test files; can contain invalid python code. + continue if full not in graph.ids: graph.ids[full] = len(graph.ids) From c29248519a89176acfb57cb86139b0d59883def5 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Tue, 2 May 2023 15:20:14 -0700 Subject: [PATCH 199/424] Use check_call for docker login. (#34901) So that failure will not be missed. Signed-off-by: Lonnie Liu --- .buildkite/copy_files.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/copy_files.py b/.buildkite/copy_files.py index 29302f0e3136..7180bf0ed84f 100644 --- a/.buildkite/copy_files.py +++ b/.buildkite/copy_files.py @@ -46,7 +46,7 @@ def perform_auth(): def handle_docker_login(resp): pwd = resp.json()["docker_password"] - subprocess.call( + subprocess.check_call( ["docker", "login", "--username", "raytravisbot", "--password", pwd] ) From 584bee43567c345cf24250bd8d11505008bfa6d5 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Tue, 2 May 2023 15:20:49 -0700 Subject: [PATCH 200/424] Change release tests owner to ci team. (#34899) And also apply buildifer formatting to the `release/BUILD` file. --- release/BUILD | 138 +++++++++++++++++++++++++++++++++++--------------- 1 file changed, 97 insertions(+), 41 deletions(-) diff --git a/release/BUILD b/release/BUILD index 9ef8f23f3ac8..fd2326c4a15c 100644 --- a/release/BUILD +++ b/release/BUILD @@ -5,10 +5,10 @@ compile_pip_requirements( name = "requirements_buildkite", requirements_in = "requirements_buildkite.in", requirements_txt = "requirements_buildkite.txt", - visibility = ["//visibility:private"], tags = [ - "team:core", - ], + "team:ci", + ], + visibility = ["//visibility:private"], ) test_srcs = glob(["**/*.py"]) @@ -197,16 +197,15 @@ py_test( ], ) - #### # AIR smoke tests #### - py_test( name = "air_benchmark_xgboost_smoke_test", size = "small", srcs = test_srcs, + args = ["--smoke-test"], main = "air_tests/air_benchmarks/workloads/xgboost_benchmark.py", tags = [ "exclusive", @@ -216,13 +215,16 @@ py_test( "//:ray_lib", "//python/ray/air:ml_lib", ], - args = ["--smoke-test"] ) py_test( name = "air_benchmark_data_smoke_test", size = "small", srcs = test_srcs, + args = [ + "--dataset-size-gb=1", + "--num-workers=1", + ], main = "air_tests/air_benchmarks/workloads/data_benchmark.py", tags = [ "exclusive", @@ -232,29 +234,35 @@ py_test( "//:ray_lib", "//python/ray/air:ml_lib", ], - args = ["--dataset-size-gb=1", "--num-workers=1"] ) py_test( - name = "air_benchmark_gpu_batch_prediction_smoke_test", - size = "small", - srcs = test_srcs, - main = "air_tests/air_benchmarks/workloads/gpu_batch_prediction.py", - tags = [ - "exclusive", - "team:ml", - ], - deps = [ - "//:ray_lib", - "//python/ray/air:ml_lib", - ], - args = ["--data-size-gb=1", "--smoke-test"] + name = "air_benchmark_gpu_batch_prediction_smoke_test", + size = "small", + srcs = test_srcs, + args = [ + "--data-size-gb=1", + "--smoke-test", + ], + main = "air_tests/air_benchmarks/workloads/gpu_batch_prediction.py", + tags = [ + "exclusive", + "team:ml", + ], + deps = [ + "//:ray_lib", + "//python/ray/air:ml_lib", + ], ) py_test( name = "air_benchmark_pytorch_training_e2e_smoke_test", size = "small", srcs = test_srcs, + args = [ + "--data-size-gb=1", + "--smoke-test", + ], main = "air_tests/air_benchmarks/workloads/pytorch_training_e2e.py", tags = [ "exclusive", @@ -264,13 +272,22 @@ py_test( "//:ray_lib", "//python/ray/air:ml_lib", ], - args = ["--data-size-gb=1", "--smoke-test"] ) py_test( name = "air_benchmark_tensorflow_smoke_test", size = "large", srcs = test_srcs, + args = [ + "run", + "--num-runs=1", + "--num-epochs=1", + "--num-workers=1", + "--cpus-per-worker=1", + "--batch-size=1", + "--smoke-test", + "--local", + ], main = "air_tests/air_benchmarks/workloads/tensorflow_benchmark.py", tags = [ "exclusive", @@ -280,13 +297,22 @@ py_test( "//:ray_lib", "//python/ray/air:ml_lib", ], - args = ["run", "--num-runs=1", "--num-epochs=1", "--num-workers=1", "--cpus-per-worker=1", "--batch-size=1", "--smoke-test", "--local"] ) py_test( name = "air_benchmark_torch_smoke_test", size = "large", srcs = test_srcs, + args = [ + "run", + "--num-runs=1", + "--num-epochs=1", + "--num-workers=1", + "--cpus-per-worker=1", + "--batch-size=1", + "--smoke-test", + "--local", + ], main = "air_tests/air_benchmarks/workloads/torch_benchmark.py", tags = [ "exclusive", @@ -296,7 +322,6 @@ py_test( "//:ray_lib", "//python/ray/air:ml_lib", ], - args = ["run", "--num-runs=1", "--num-epochs=1", "--num-workers=1", "--cpus-per-worker=1", "--batch-size=1", "--smoke-test", "--local"] ) py_test( @@ -305,6 +330,12 @@ py_test( # (runtime is shorter when air_benchmark_torch_smoke_test is executed first) size = "medium", srcs = test_srcs, + args = [ + "--num-runs=1", + "--num-trials=1", + "--num-workers=1", + "--smoke-test", + ], main = "air_tests/air_benchmarks/workloads/tune_torch_benchmark.py", tags = [ "exclusive", @@ -314,80 +345,105 @@ py_test( "//:ray_lib", "//python/ray/air:ml_lib", ], - args = ["--num-runs=1", "--num-trials=1", "--num-workers=1", "--smoke-test"] ) - #### # RELEASE TEST INFRA unit tests #### py_test( name = "test_alerts", - tags = ["team:ci", "release_unit"], size = "small", - srcs = ["ray_release/tests/test_alerts.py"] + srcs = ["ray_release/tests/test_alerts.py"], + tags = [ + "release_unit", + "team:ci", + ], ) py_test( name = "test_anyscale_job_wrapper", - tags = ["team:ci", "release_unit"], size = "small", - srcs = ["ray_release/tests/test_anyscale_job_wrapper.py"] + srcs = ["ray_release/tests/test_anyscale_job_wrapper.py"], + tags = [ + "release_unit", + "team:ci", + ], ) py_test( name = "test_buildkite", - tags = ["team:ci", "release_unit"], size = "small", - srcs = ["ray_release/tests/test_buildkite.py"] + srcs = ["ray_release/tests/test_buildkite.py"], + tags = [ + "release_unit", + "team:ci", + ], ) py_test( name = "test_cluster_manager", - tags = ["team:ci", "release_unit"], size = "small", - srcs = ["ray_release/tests/test_cluster_manager.py"] + srcs = ["ray_release/tests/test_cluster_manager.py"], + tags = [ + "release_unit", + "team:ci", + ], ) py_test( name = "test_config", - tags = ["team:ci", "release_unit"], size = "small", srcs = ["ray_release/tests/test_config.py"], data = ["release_tests.yaml"], + tags = [ + "release_unit", + "team:ci", + ], ) py_test( name = "test_env", - tags = ["team:ci", "release_unit"], size = "small", - srcs = ["ray_release/tests/test_env.py"] + srcs = ["ray_release/tests/test_env.py"], + tags = [ + "release_unit", + "team:ci", + ], ) py_test( name = "test_glue", - tags = ["team:ci", "release_unit"], size = "small", - srcs = ["ray_release/tests/test_glue.py"] + srcs = ["ray_release/tests/test_glue.py"], + tags = [ + "release_unit", + "team:ci", + ], ) py_test( name = "test_run_script", - tags = ["team:ci", "release_unit"], size = "small", srcs = ["ray_release/tests/test_run_script.py"], data = [ - "run_release_test.sh", "ray_release/tests/_test_catch_args.py", "ray_release/tests/_test_run_release_test_sh.py", + "run_release_test.sh", + ], + tags = [ + "release_unit", + "team:ci", ], ) py_test( name = "test_wheels", - tags = ["team:ci", "release_unit"], size = "small", srcs = ["ray_release/tests/test_wheels.py"], + tags = [ + "release_unit", + "team:ci", + ], deps = ["//:ray_lib"], ) From f13a38ac21e50263e531516cb7ed18997c519d7b Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Tue, 2 May 2023 15:40:21 -0700 Subject: [PATCH 201/424] [CI] Deprecate sdk_runner and v1 environments (#34864) --- .../ray_release/command_runner/sdk_runner.py | 206 ------------------ release/ray_release/env.py | 2 +- .../environments/{staging_v2.env => aws.env} | 0 release/ray_release/environments/prod_v1.env | 5 - .../ray_release/environments/staging_v1.env | 5 - release/ray_release/glue.py | 4 +- release/ray_release/tests/test_env.py | 2 +- release/release_tests.yaml | 2 +- 8 files changed, 4 insertions(+), 222 deletions(-) delete mode 100644 release/ray_release/command_runner/sdk_runner.py rename release/ray_release/environments/{staging_v2.env => aws.env} (100%) delete mode 100644 release/ray_release/environments/prod_v1.env delete mode 100644 release/ray_release/environments/staging_v1.env diff --git a/release/ray_release/command_runner/sdk_runner.py b/release/ray_release/command_runner/sdk_runner.py deleted file mode 100644 index 7bc088595308..000000000000 --- a/release/ray_release/command_runner/sdk_runner.py +++ /dev/null @@ -1,206 +0,0 @@ -import json -import os -import tempfile -import time -from typing import TYPE_CHECKING, Any, Dict, Optional - -from ray_release.anyscale_util import LAST_LOGS_LENGTH -from ray_release.cluster_manager.cluster_manager import ClusterManager -from ray_release.command_runner.command_runner import CommandRunner -from ray_release.exception import ( - ClusterNodesWaitTimeout, - CommandError, - CommandTimeout, - LogsError, - RemoteEnvSetupError, - FetchResultError, -) -from ray_release.file_manager.file_manager import FileManager -from ray_release.logger import logger -from ray_release.util import ( - exponential_backoff_retry, - format_link, - get_anyscale_sdk, - ANYSCALE_HOST, -) - -if TYPE_CHECKING: - from anyscale.sdk.anyscale_client.sdk import AnyscaleSDK - - -class SDKRunner(CommandRunner): - def __init__( - self, - cluster_manager: ClusterManager, - file_manager: FileManager, - working_dir: str, - sdk: Optional["AnyscaleSDK"] = None, - artifact_path: Optional[str] = None, - ): - super(SDKRunner, self).__init__( - cluster_manager=cluster_manager, - file_manager=file_manager, - working_dir=working_dir, - ) - self.sdk = sdk or get_anyscale_sdk() - - self.last_command_scd_id = None - - def prepare_local_env(self, ray_wheels_url: Optional[str] = None): - pass - - def prepare_remote_env(self): - # Copy wait script to working dir - wait_script = os.path.join(os.path.dirname(__file__), "_wait_cluster.py") - # Copy wait script to working dir - if os.path.exists("wait_cluster.py"): - os.unlink("wait_cluster.py") - os.link(wait_script, "wait_cluster.py") - - # Copy prometheus metrics script to working dir - metrics_script = os.path.join( - os.path.dirname(__file__), "_prometheus_metrics.py" - ) - # Copy wait script to working dir - if os.path.exists("prometheus_metrics.py"): - os.unlink("prometheus_metrics.py") - os.link(metrics_script, "prometheus_metrics.py") - - try: - self.file_manager.upload() - except Exception as e: - raise RemoteEnvSetupError( - f"Error setting up remote environment: {e}" - ) from e - - def wait_for_nodes(self, num_nodes: int, timeout: float = 900): - # Wait script should be uploaded already. Kick off command - try: - # Give 30 seconds more to acount for communication - self.run_prepare_command( - f"python wait_cluster.py {num_nodes} {timeout}", timeout=timeout + 30 - ) - except (CommandError, CommandTimeout) as e: - raise ClusterNodesWaitTimeout( - f"Not all {num_nodes} nodes came up within {timeout} seconds." - ) from e - - def save_metrics(self, start_time: float, timeout: float = 900): - self.run_prepare_command( - f"python prometheus_metrics.py {start_time}", timeout=timeout - ) - - def run_command( - self, - command: str, - env: Optional[Dict] = None, - timeout: float = 3600.0, - raise_on_timeout: bool = True, - ) -> float: - full_env = self.get_full_command_env(env) - - if full_env: - env_str = " ".join(f"{k}={v}" for k, v in full_env.items()) + " " - else: - env_str = "" - - full_command = f"{env_str}{command}" - logger.info( - f"Running command in cluster {self.cluster_manager.cluster_name}: " - f"{full_command}" - ) - - logger.info( - f"Link to cluster: " - f"{format_link(self.cluster_manager.get_cluster_url())}" - ) - - result = self.sdk.create_session_command( - dict(session_id=self.cluster_manager.cluster_id, shell_command=full_command) - ) - - scd_id = result.result.id - self.last_command_scd_id = scd_id - - completed = result.result.finished_at is not None - - start_time = time.monotonic() - timeout_at = start_time + timeout - next_status = start_time + 30 - - while not completed: - now = time.monotonic() - if now >= timeout_at: - raise CommandTimeout( - f"Cluster command timed out after {timeout} seconds." - ) - - if now >= next_status: - logger.info( - f"... command still running ..." - f"({int(now - start_time)} seconds) ..." - ) - next_status += 30 - - # Sleep 1 sec before next check. - time.sleep(1) - - result = exponential_backoff_retry( - lambda: self.sdk.get_session_command(session_command_id=scd_id), - retry_exceptions=Exception, - initial_retry_delay_s=10, - max_retries=3, - ) - completed = result.result.finished_at - - status_code = result.result.status_code - time_taken = time.monotonic() - start_time - - if status_code != 0: - raise CommandError(f"Command returned non-success status: {status_code}") - - return time_taken - - def get_last_logs_ex(self, scd_id: Optional[str] = None): - scd_id = scd_id or self.last_command_scd_id - if not scd_id: - raise LogsError( - "Must specify scd_id to fetch command logs. Did " - "you already kick off a command?" - ) - - # Todo: It would be nice to get an actual SDK API here - result, _, _ = self.sdk.api_client.call_api( - "/api/v2/session_commands/{session_command_id}/execution_logs", - "GET", - path_params={"session_command_id": scd_id}, - query_params={"start_line": -LAST_LOGS_LENGTH, "end_line": 0}, - header_params={}, - response_type=object, - _host=str(ANYSCALE_HOST), - _preload_content=True, - _return_http_data_only=False, - ) - return result["result"]["lines"] - - def _fetch_json(self, path: str) -> Dict[str, Any]: - try: - tmpfile = tempfile.mktemp() - self.file_manager.download(path, tmpfile) - - with open(tmpfile, "rt") as f: - data = json.load(f) - - os.unlink(tmpfile) - return data - except Exception as e: - raise FetchResultError(f"Could not fetch results from session: {e}") from e - - def fetch_results(self) -> Dict[str, Any]: - return self._fetch_json(self._RESULT_OUTPUT_JSON) - - def fetch_metrics(self) -> Dict[str, Any]: - return self._fetch_json(self._METRICS_OUTPUT_JSON) - - def fetch_artifact(self): - raise NotImplementedError diff --git a/release/ray_release/env.py b/release/ray_release/env.py index 98e3f329e154..0ba8a906122c 100644 --- a/release/ray_release/env.py +++ b/release/ray_release/env.py @@ -3,7 +3,7 @@ from ray_release.exception import ReleaseTestConfigError -DEFAULT_ENVIRONMENT = "staging_v2" +DEFAULT_ENVIRONMENT = "aws" def load_environment(environment_name: str) -> Dict[str, str]: diff --git a/release/ray_release/environments/staging_v2.env b/release/ray_release/environments/aws.env similarity index 100% rename from release/ray_release/environments/staging_v2.env rename to release/ray_release/environments/aws.env diff --git a/release/ray_release/environments/prod_v1.env b/release/ray_release/environments/prod_v1.env deleted file mode 100644 index 5548d11add98..000000000000 --- a/release/ray_release/environments/prod_v1.env +++ /dev/null @@ -1,5 +0,0 @@ -ANYSCALE_HOST=https://console.anyscale.com -RELEASE_AWS_ANYSCALE_SECRET_ARN="arn:aws:secretsmanager:us-west-2:029272617770:secret:release-automation/anyscale-token20210505220406333800000001-BcUuKB" -RELEASE_DEFAULT_CLOUD_ID="cld_4F7k8814aZzGG8TNUGPKnc" -RELEASE_DEFAULT_PROJECT="prj_FKRmeV5pA6X72aVscFALNC32" -ANYSCALE_PROJECT="prj_FKRmeV5pA6X72aVscFALNC32" \ No newline at end of file diff --git a/release/ray_release/environments/staging_v1.env b/release/ray_release/environments/staging_v1.env deleted file mode 100644 index 111c6ffeede2..000000000000 --- a/release/ray_release/environments/staging_v1.env +++ /dev/null @@ -1,5 +0,0 @@ -ANYSCALE_HOST=https://console.anyscale-staging.com -RELEASE_AWS_ANYSCALE_SECRET_ARN="arn:aws:secretsmanager:us-west-2:029272617770:secret:release-automation/anyscale-staging-token20221014164754935800000001-pfQunc" -RELEASE_DEFAULT_CLOUD_ID="cld_401TPoxgB8MM6A0NNQauOV" -RELEASE_DEFAULT_PROJECT="prj_qC3ZfndQWYYjx2cz8KWGNUL4" -ANYSCALE_PROJECT="prj_qC3ZfndQWYYjx2cz8KWGNUL4" \ No newline at end of file diff --git a/release/ray_release/glue.py b/release/ray_release/glue.py index 016659626ec0..521bcc73d38b 100644 --- a/release/ray_release/glue.py +++ b/release/ray_release/glue.py @@ -12,7 +12,6 @@ from ray_release.command_runner.job_runner import JobRunner from ray_release.command_runner.command_runner import CommandRunner from ray_release.command_runner.anyscale_job_runner import AnyscaleJobRunner -from ray_release.command_runner.sdk_runner import SDKRunner from ray_release.config import ( Test, DEFAULT_BUILD_TIMEOUT, @@ -52,12 +51,11 @@ ) type_str_to_command_runner = { - "sdk_command": SDKRunner, + "job": JobRunner, "anyscale_job": AnyscaleJobRunner, } command_runner_to_cluster_manager = { - SDKRunner: FullClusterManager, JobRunner: FullClusterManager, AnyscaleJobRunner: MinimalClusterManager, } diff --git a/release/ray_release/tests/test_env.py b/release/ray_release/tests/test_env.py index 95eeafc81c90..a87ae0071759 100644 --- a/release/ray_release/tests/test_env.py +++ b/release/ray_release/tests/test_env.py @@ -28,7 +28,7 @@ def test_load_env_invalid(): def test_load_env_changes(): old_val = str(DEFAULT_ANYSCALE_PROJECT) - env_dict = load_environment("staging_v2") + env_dict = load_environment("aws") populate_os_env(env_dict) new_val = str(DEFAULT_ANYSCALE_PROJECT) diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 01cc62e251a0..73aa336e7cb4 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -43,7 +43,7 @@ # # # Run configuration for the test # run: -# # Type of test. Can be [anyscale_job, sdk_command]. +# # Type of test. Can be [anyscale_job] # # Uses either Ray jobs, anyscale jobs or anyscale SDK commands # # run the actual release test. # type: anyscale_job From e36c0fddc590f49415dd2f70837de55b758ce0ad Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Tue, 2 May 2023 15:40:39 -0700 Subject: [PATCH 202/424] [CI] Remove hash when computing log signature (#34973) --- release/ray_release/log_aggregator.py | 5 ++++- release/ray_release/tests/test_log_aggregator.py | 4 ++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/release/ray_release/log_aggregator.py b/release/ray_release/log_aggregator.py index 4617b63bfb8b..28bc5e08a322 100644 --- a/release/ray_release/log_aggregator.py +++ b/release/ray_release/log_aggregator.py @@ -23,7 +23,10 @@ def _compute_signature(stack_trace: List[str]) -> str: """ massaged_trace = [] for line in stack_trace: - line = re.sub(r"\d", "", line.strip()) + # remove any hashes that are more than 10 characters + line = re.sub(r"[a-z0-9]{10,}", "", line.strip()) + # remove any numbers + line = re.sub(r"\d", "", line) if line == "Traceback (most recent call last):": continue file_line = re.search(r'File "(.*)", (.*)', line) diff --git a/release/ray_release/tests/test_log_aggregator.py b/release/ray_release/tests/test_log_aggregator.py index 6cc92e74f3b9..5bc88db568d2 100644 --- a/release/ray_release/tests/test_log_aggregator.py +++ b/release/ray_release/tests/test_log_aggregator.py @@ -24,10 +24,10 @@ def test_compute_signature(): [ "Traceback (most recent call last):", ' File "/tmp/something", line 584', - "Exception: yaya45", + ' File "/tmp/another", deedeebeeaacfa-abc' "Exception: yaya45", ] ) - == "somethingline Exception: yaya" + == "somethingline another-abcException: yaya" ) From b61d4f601762e84638bec492f6797434db0f660a Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Tue, 2 May 2023 16:31:07 -0700 Subject: [PATCH 203/424] [CI] Fix some core shuffle tests (#34932) Three core shuffle release tests are failing because i3 machine is deprecated. The i3 machine is deprecated. Change machines type to m6i. The difference between the two type machine is that i3 uses locally SSD while m6i uses EPS through network bandwidth. EPS seems to work just fine. Signed-off-by: Cuong Nguyen --- release/nightly_tests/dask_on_ray/1tb_sort_compute.yaml | 4 ++-- .../dask_on_ray/chaos_dask_on_ray_stress_compute.yaml | 4 ++-- .../dask_on_ray/dask_on_ray_sort_compute_template.yaml | 2 +- .../dask_on_ray/dask_on_ray_stress_compute.yaml | 4 ++-- .../nightly_tests/dataset/pipelined_ingestion_compute.yaml | 6 +++--- .../nightly_tests/shuffle/shuffle_compute_autoscaling.yaml | 4 ++-- release/nightly_tests/shuffle/shuffle_compute_multi.yaml | 4 ++-- release/nightly_tests/shuffle/shuffle_compute_single.yaml | 2 +- 8 files changed, 15 insertions(+), 15 deletions(-) diff --git a/release/nightly_tests/dask_on_ray/1tb_sort_compute.yaml b/release/nightly_tests/dask_on_ray/1tb_sort_compute.yaml index 1eb827083779..6c098b2f2167 100644 --- a/release/nightly_tests/dask_on_ray/1tb_sort_compute.yaml +++ b/release/nightly_tests/dask_on_ray/1tb_sort_compute.yaml @@ -10,13 +10,13 @@ aws: head_node_type: name: head_node - instance_type: i3.8xlarge + instance_type: m6i.8xlarge resources: cpu: 8 worker_node_types: - name: worker_node - instance_type: i3.8xlarge + instance_type: m6i.8xlarge min_workers: 32 max_workers: 32 use_spot: false diff --git a/release/nightly_tests/dask_on_ray/chaos_dask_on_ray_stress_compute.yaml b/release/nightly_tests/dask_on_ray/chaos_dask_on_ray_stress_compute.yaml index 1368f33934ea..fad7750e2f41 100644 --- a/release/nightly_tests/dask_on_ray/chaos_dask_on_ray_stress_compute.yaml +++ b/release/nightly_tests/dask_on_ray/chaos_dask_on_ray_stress_compute.yaml @@ -10,11 +10,11 @@ aws: head_node_type: name: head_node - instance_type: i3.8xlarge + instance_type: m6i.8xlarge worker_node_types: - name: worker_node - instance_type: i3.8xlarge + instance_type: m6i.8xlarge min_workers: 20 max_workers: 20 use_spot: true diff --git a/release/nightly_tests/dask_on_ray/dask_on_ray_sort_compute_template.yaml b/release/nightly_tests/dask_on_ray/dask_on_ray_sort_compute_template.yaml index 4495d182e64a..da67eec060c4 100644 --- a/release/nightly_tests/dask_on_ray/dask_on_ray_sort_compute_template.yaml +++ b/release/nightly_tests/dask_on_ray/dask_on_ray_sort_compute_template.yaml @@ -10,6 +10,6 @@ aws: head_node_type: name: head_node - instance_type: i3.8xlarge + instance_type: m6i.8xlarge worker_node_types: [] diff --git a/release/nightly_tests/dask_on_ray/dask_on_ray_stress_compute.yaml b/release/nightly_tests/dask_on_ray/dask_on_ray_stress_compute.yaml index 988a2298a0fb..e249486f0377 100644 --- a/release/nightly_tests/dask_on_ray/dask_on_ray_stress_compute.yaml +++ b/release/nightly_tests/dask_on_ray/dask_on_ray_stress_compute.yaml @@ -10,11 +10,11 @@ aws: head_node_type: name: head_node - instance_type: i3.8xlarge + instance_type: m6i.8xlarge worker_node_types: - name: worker_node - instance_type: i3.8xlarge + instance_type: m6i.8xlarge min_workers: 20 max_workers: 20 use_spot: false diff --git a/release/nightly_tests/dataset/pipelined_ingestion_compute.yaml b/release/nightly_tests/dataset/pipelined_ingestion_compute.yaml index 65d377d83643..b8b25b2def6c 100644 --- a/release/nightly_tests/dataset/pipelined_ingestion_compute.yaml +++ b/release/nightly_tests/dataset/pipelined_ingestion_compute.yaml @@ -12,16 +12,16 @@ aws: head_node_type: name: head_node - instance_type: i3.8xlarge + instance_type: m6i.8xlarge worker_node_types: - name: memory_node - instance_type: i3.8xlarge + instance_type: m6i.8xlarge min_workers: 20 max_workers: 20 use_spot: false - name: gpu_node - instance_type: i3.8xlarge + instance_type: m6i.8xlarge min_workers: 4 max_workers: 4 use_spot: false diff --git a/release/nightly_tests/shuffle/shuffle_compute_autoscaling.yaml b/release/nightly_tests/shuffle/shuffle_compute_autoscaling.yaml index 4257f215b5d8..38091a3f12b6 100644 --- a/release/nightly_tests/shuffle/shuffle_compute_autoscaling.yaml +++ b/release/nightly_tests/shuffle/shuffle_compute_autoscaling.yaml @@ -10,11 +10,11 @@ aws: head_node_type: name: head_node - instance_type: i3.4xlarge + instance_type: m6i.4xlarge worker_node_types: - name: worker_node - instance_type: i3.4xlarge + instance_type: m6i.4xlarge min_workers: 0 max_workers: 19 use_spot: false diff --git a/release/nightly_tests/shuffle/shuffle_compute_multi.yaml b/release/nightly_tests/shuffle/shuffle_compute_multi.yaml index 38af0ffe31c2..a726988aeda0 100644 --- a/release/nightly_tests/shuffle/shuffle_compute_multi.yaml +++ b/release/nightly_tests/shuffle/shuffle_compute_multi.yaml @@ -12,12 +12,12 @@ aws: head_node_type: name: head_node - instance_type: i3.4xlarge + instance_type: m6i.4xlarge resources: {"object_store_memory": 21474836480} worker_node_types: - name: worker_node2 - instance_type: i3.4xlarge + instance_type: m6i.4xlarge min_workers: 3 max_workers: 3 use_spot: false diff --git a/release/nightly_tests/shuffle/shuffle_compute_single.yaml b/release/nightly_tests/shuffle/shuffle_compute_single.yaml index 7ec607996cd8..df8d84edc81f 100644 --- a/release/nightly_tests/shuffle/shuffle_compute_single.yaml +++ b/release/nightly_tests/shuffle/shuffle_compute_single.yaml @@ -12,7 +12,7 @@ aws: head_node_type: name: head_node - instance_type: i3.4xlarge + instance_type: m6i.4xlarge resources: {"object_store_memory": 21474836480} worker_node_types: [] From d2914fcd127f7a610f3c7efd7dd8e02c2f151431 Mon Sep 17 00:00:00 2001 From: Yunxuan Xiao Date: Tue, 2 May 2023 18:44:41 -0700 Subject: [PATCH 204/424] [CI] Add Lightning 2.0 compatibility test pipeline again with bugfix. (#34960) --- .buildkite/pipeline.gpu_large.yml | 13 +++++++ ci/ci.sh | 2 + ci/env/install-minimal.sh | 7 ++++ python/ray/train/BUILD | 8 ++-- .../ray/train/tests/lightning_test_utils.py | 37 ++++++++++++++----- .../train/tests/test_lightning_checkpoint.py | 10 ++++- .../train/tests/test_lightning_predictor.py | 2 +- .../ray/train/tests/test_lightning_trainer.py | 4 +- 8 files changed, 64 insertions(+), 19 deletions(-) diff --git a/.buildkite/pipeline.gpu_large.yml b/.buildkite/pipeline.gpu_large.yml index 2f993cd96546..e15ee57050ea 100644 --- a/.buildkite/pipeline.gpu_large.yml +++ b/.buildkite/pipeline.gpu_large.yml @@ -49,3 +49,16 @@ - pip install -Ur ./python/requirements/ml/requirements_ml_docker.txt - ./ci/env/env_info.sh - bazel test --config=ci $(./ci/run/bazel_export_options) --build_tests_only --test_tag_filters=gpu,-timeseries_libs,-py37,-post_wheel_build doc/... + +- label: ":zap: :python: Lightning 2.0 Train GPU tests" + conditions: + ["NO_WHEELS_REQUIRED", "RAY_CI_TRAIN_AFFECTED"] + commands: + - cleanup() { if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then ./ci/build/upload_build_info.sh; fi }; trap cleanup EXIT + - NO_DASHBOARD=1 ./ci/env/install-minimal.sh 3.8 + - PYTHON=3.8 DOC_TESTING=1 TRAIN_TESTING=1 TUNE_TESTING=1 ./ci/env/install-dependencies.sh + - pip install -Ur ./python/requirements/ml/requirements_ml_docker.txt + - pip uninstall -y pytorch-lightning + - pip install lightning==2.0.0 + - ./ci/env/env_info.sh + - bazel test --config=ci $(./scripts/bazel_export_options) --test_tag_filters=ptl_v2 python/ray/train/... \ No newline at end of file diff --git a/ci/ci.sh b/ci/ci.sh index 5a3703d161f8..5bf5ae3ec4d3 100755 --- a/ci/ci.sh +++ b/ci/ci.sh @@ -287,6 +287,8 @@ install_npm_project() { build_dashboard_front_end() { if [ "${OSTYPE}" = msys ]; then { echo "WARNING: Skipping dashboard due to NPM incompatibilities with Windows"; } 2> /dev/null + elif [ "${NO_DASHBOARD-}" = "1" ]; then + echo "Skipping dashboard build" else ( cd ray/dashboard/client diff --git a/ci/env/install-minimal.sh b/ci/env/install-minimal.sh index e99e453ea11e..045239badc48 100755 --- a/ci/env/install-minimal.sh +++ b/ci/env/install-minimal.sh @@ -1,5 +1,12 @@ #!/usr/bin/env bash +if [ "$1" == "3.11" ]; then + # TODO: fix build wheels unsupported tags in the future + echo "'set -xe' not working for Python 3.11" +else + set -xe +fi + # Python version can be specified as 3.7, 3.8, 3.9, etc.. if [ -z "$1" ]; then PYTHON_VERSION=${PYTHON-3.7} diff --git a/python/ray/train/BUILD b/python/ray/train/BUILD index e2a244455ace..1d932ca912a8 100644 --- a/python/ray/train/BUILD +++ b/python/ray/train/BUILD @@ -412,7 +412,7 @@ py_test( name = "test_lightning_checkpoint", size = "medium", srcs = ["tests/test_lightning_checkpoint.py"], - tags = ["team:ml", "exclusive", "ray_air", "gpu"], + tags = ["team:ml", "exclusive", "ray_air", "gpu", "ptl_v2"], deps = [":train_lib"] ) @@ -420,7 +420,7 @@ py_test( name = "test_lightning_trainer_restore", size = "medium", srcs = ["tests/test_lightning_trainer_restore.py"], - tags = ["team:ml", "exclusive", "ray_air", "gpu"], + tags = ["team:ml", "exclusive", "ray_air", "gpu", "ptl_v2"], deps = [":train_lib"] ) @@ -428,7 +428,7 @@ py_test( name = "test_lightning_trainer", size = "large", srcs = ["tests/test_lightning_trainer.py"], - tags = ["team:ml", "exclusive", "ray_air", "gpu"], + tags = ["team:ml", "exclusive", "ray_air", "gpu", "ptl_v2"], deps = [":train_lib"] ) @@ -436,7 +436,7 @@ py_test( name = "test_lightning_predictor", size = "medium", srcs = ["tests/test_lightning_predictor.py"], - tags = ["team:ml", "exclusive", "ray_air", "gpu"], + tags = ["team:ml", "exclusive", "ray_air", "gpu", "ptl_v2"], deps = [":train_lib"] ) diff --git a/python/ray/train/tests/lightning_test_utils.py b/python/ray/train/tests/lightning_test_utils.py index 68b925098d00..36288308cf59 100644 --- a/python/ray/train/tests/lightning_test_utils.py +++ b/python/ray/train/tests/lightning_test_utils.py @@ -7,9 +7,11 @@ class LinearModule(pl.LightningModule): - def __init__(self, input_dim, output_dim) -> None: + def __init__(self, input_dim, output_dim, strategy="ddp") -> None: super().__init__() self.linear = nn.Linear(input_dim, output_dim) + self.loss = [] + self.strategy = strategy def forward(self, input): # Backwards compat for Ray data strict mode. @@ -25,17 +27,23 @@ def training_step(self, batch): def validation_step(self, val_batch, batch_idx): loss = self.forward(val_batch) + self.loss.append(loss) return {"val_loss": loss} - def validation_epoch_end(self, outputs) -> None: - avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean() + def on_validation_epoch_end(self) -> None: + avg_loss = torch.stack(self.loss).mean() self.log("val_loss", avg_loss) + self.loss.clear() def predict_step(self, batch, batch_idx): return self.forward(batch) def configure_optimizers(self): - return torch.optim.SGD(self.parameters(), lr=0.1) + if self.strategy == "fsdp": + # Feed FSDP wrapped model parameters to optimizer + return torch.optim.SGD(self.trainer.model.parameters(), lr=0.1) + else: + return torch.optim.SGD(self.parameters(), lr=0.1) class DoubleLinearModule(pl.LightningModule): @@ -43,6 +51,7 @@ def __init__(self, input_dim_1, input_dim_2, output_dim) -> None: super().__init__() self.linear_1 = nn.Linear(input_dim_1, output_dim) self.linear_2 = nn.Linear(input_dim_2, output_dim) + self.loss = [] def forward(self, batch): input_1 = batch["input_1"] @@ -57,12 +66,14 @@ def training_step(self, batch): def validation_step(self, val_batch, batch_idx): loss = self.forward(val_batch) + self.loss.append(loss) return {"val_loss": loss} - def validation_epoch_end(self, outputs) -> None: + def on_validation_epoch_end(self) -> None: print("Validation Epoch:", self.current_epoch) - avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean() + avg_loss = torch.stack(self.loss).mean() self.log("val_loss", avg_loss) + self.loss.clear() def predict_step(self, batch, batch_idx): return self.forward(batch) @@ -94,7 +105,9 @@ def __init__(self, lr: float, layer_1: int, layer_2: int): self.layer_1 = torch.nn.Linear(28 * 28, layer_1) self.layer_2 = torch.nn.Linear(layer_1, layer_2) self.layer_3 = torch.nn.Linear(layer_2, 10) - self.accuracy = Accuracy() + self.accuracy = Accuracy(task="multiclass", num_classes=10) + self.val_acc_list = [] + self.val_loss_list = [] def forward(self, x): batch_size, channels, width, height = x.size() @@ -124,13 +137,17 @@ def validation_step(self, val_batch, batch_idx): logits = self.forward(x) loss = F.nll_loss(logits, y) acc = self.accuracy(logits, y) + self.val_acc_list.append(acc) + self.val_loss_list.append(loss) return {"val_loss": loss, "val_accuracy": acc} - def validation_epoch_end(self, outputs): - avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean() - avg_acc = torch.stack([x["val_accuracy"] for x in outputs]).mean() + def on_validation_epoch_end(self): + avg_loss = torch.stack(self.val_loss_list).mean() + avg_acc = torch.stack(self.val_acc_list).mean() self.log("ptl/val_loss", avg_loss) self.log("ptl/val_accuracy", avg_acc) + self.val_acc_list.clear() + self.val_loss_list.clear() def predict_step(self, batch, batch_idx, dataloader_idx=None): x = batch diff --git a/python/ray/train/tests/test_lightning_checkpoint.py b/python/ray/train/tests/test_lightning_checkpoint.py index e253bb2a8b85..64bcd40b32be 100644 --- a/python/ray/train/tests/test_lightning_checkpoint.py +++ b/python/ray/train/tests/test_lightning_checkpoint.py @@ -38,7 +38,10 @@ def test_load_from_path(): # Train one epoch and save a checkpoint trainer = pl.Trainer( - max_epochs=1, enable_progress_bar=False, enable_checkpointing=False + max_epochs=1, + accelerator="cpu", + enable_progress_bar=False, + enable_checkpointing=False, ) trainer.fit(model=model, train_dataloaders=dataloader) ckpt_path = f"{tmpdir}/random_checkpoint_name.ckpt" @@ -75,7 +78,10 @@ def test_from_directory(): # Train one epoch and save a checkpoint trainer = pl.Trainer( - max_epochs=1, enable_progress_bar=False, enable_checkpointing=False + max_epochs=1, + accelerator="cpu", + enable_progress_bar=False, + enable_checkpointing=False, ) trainer.fit(model=model, train_dataloaders=dataloader) trainer.save_checkpoint(f"{tmpdir}/{MODEL_KEY}") diff --git a/python/ray/train/tests/test_lightning_predictor.py b/python/ray/train/tests/test_lightning_predictor.py index 49ee42073b16..2c34b5dcc984 100644 --- a/python/ray/train/tests/test_lightning_predictor.py +++ b/python/ray/train/tests/test_lightning_predictor.py @@ -28,7 +28,7 @@ def test_repr(): def save_checkpoint(model: pl.LightningModule, ckpt_path: str): - trainer = pl.Trainer(max_epochs=0) + trainer = pl.Trainer(max_epochs=0, accelerator="cpu") trainer.fit(model, train_dataloaders=DataLoader(torch.randn(1))) trainer.save_checkpoint(ckpt_path) diff --git a/python/ray/train/tests/test_lightning_trainer.py b/python/ray/train/tests/test_lightning_trainer.py index a35f37ac54e9..aab21fb4a6d1 100644 --- a/python/ray/train/tests/test_lightning_trainer.py +++ b/python/ray/train/tests/test_lightning_trainer.py @@ -74,7 +74,7 @@ def test_trainer_with_native_dataloader( config_builder = ( LightningConfigBuilder() - .module(LinearModule, input_dim=32, output_dim=4) + .module(LinearModule, input_dim=32, output_dim=4, strategy=strategy) .trainer(max_epochs=num_epochs, accelerator=accelerator) .strategy(strategy) ) @@ -124,7 +124,7 @@ def test_trainer_with_ray_data(ray_start_6_cpus_2_gpus, strategy, accelerator): lightning_config = ( LightningConfigBuilder() - .module(cls=LinearModule, input_dim=32, output_dim=4) + .module(cls=LinearModule, input_dim=32, output_dim=4, strategy=strategy) .trainer(max_epochs=num_epochs, accelerator=accelerator) .strategy(strategy) .build() From 9b2c37ebe70ffcaafa55a25709ebf18f68b2f63f Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Tue, 2 May 2023 19:52:08 -0700 Subject: [PATCH 205/424] [Data] Add cross-references to batch inference guide (#34988) Signed-off-by: Balaji Veeramani --- doc/source/data/batch_inference.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/data/batch_inference.rst b/doc/source/data/batch_inference.rst index 585520aa4d7f..069786a3a3cc 100644 --- a/doc/source/data/batch_inference.rst +++ b/doc/source/data/batch_inference.rst @@ -203,7 +203,7 @@ We cover resource allocation in more detail in :ref:`the configuration section o Defining your :meth:`ds.map_batches() ` function requires you to write a Python function that takes a batch of data and returns a batch of predictions. - An easy way to do this and validate it is to use ``ds.take_batch(N)`` to get a batch of data + An easy way to do this and validate it is to use :meth:`ds.take_batch(N) ` to get a batch of data first, and then locally test your predictor function on that batch, without using Ray. Once you are happy with the results, you can use the same function in ``map_batches`` on the full dataset. The examples below show you how. @@ -309,7 +309,7 @@ to learn more about loading data with Ray Data, but we'll cover the basics here, .. annotations:: <1> We use one gigabyte of image data from the Imagenet dataset from S3. - <2> We use ``read_images`` from Ray Data and limit the number of images to 1000. + <2> We use :func:`read_images ` from Ray Data and limit the number of images to 1000. The process of loading data with Ray Data is as diverse as the data you have. For instance, in the example above we didn't load the text labels for our images, From f30aa499d42fa504a99517958aae30b3eababb18 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Tue, 2 May 2023 22:51:42 -0700 Subject: [PATCH 206/424] [data] [docs] Update dataset docs to reflect strict mode simplifications (#34880) Update documentation to reflect new APIs. Revamp transforming data page to address feedback of verbosity. Minor rewording / refactoring of other copy. --- doc/BUILD | 2 +- doc/source/data/batch_inference.rst | 127 ++-- ...ing-datastreams.rst => consuming-data.rst} | 35 +- doc/source/data/custom-datasource.rst | 2 +- doc/source/data/data-internals.rst | 33 +- doc/source/data/data-tensor-support.rst | 110 +-- doc/source/data/data.rst | 19 +- doc/source/data/doc_code/batch_formats.py | 9 +- ...uming_datastreams.py => consuming_data.py} | 23 +- doc/source/data/doc_code/hf_quick_start.py | 8 +- ...reating_datastreams.py => loading_data.py} | 137 ++-- ...s_untested.py => loading_data_untested.py} | 0 .../data/doc_code/pytorch_quick_start.py | 9 +- .../{saving_datastreams.py => saving_data.py} | 18 +- doc/source/data/doc_code/tensor.py | 261 +------ doc/source/data/doc_code/tf_quick_start.py | 7 +- .../doc_code/torch_image_batch_trained.py | 19 +- doc/source/data/doc_code/transforming_data.py | 201 ++++++ .../data/doc_code/transforming_datastreams.py | 679 ------------------ doc/source/data/examples/batch_training.ipynb | 2 +- .../examples/nyc_taxi_basic_processing.ipynb | 2 +- doc/source/data/examples/ocr_example.ipynb | 2 +- doc/source/data/faq.rst | 3 +- doc/source/data/getting-started.rst | 76 +- doc/source/data/glossary.rst | 76 +- doc/source/data/key-concepts.rst | 8 +- ...ating-datastreams.rst => loading-data.rst} | 228 ++---- doc/source/data/performance-tips.rst | 52 +- doc/source/data/transforming-data.rst | 284 ++++++++ doc/source/data/transforming-datastreams.rst | 565 --------------- doc/source/data/user-guide.rst | 14 +- doc/source/ray-air/computer-vision.rst | 4 +- .../examples/gptj_batch_prediction.ipynb | 2 +- .../stablediffusion_batch_prediction.ipynb | 2 +- doc/source/ray-overview/use-cases.rst | 2 +- .../templates/01_batch_inference/README.md | 6 +- .../01_batch_inference/batch_inference.ipynb | 6 +- .../lightning/lightning_cola_advanced.ipynb | 6 +- python/ray/data/datastream.py | 6 +- 39 files changed, 926 insertions(+), 2119 deletions(-) rename doc/source/data/{consuming-datastreams.rst => consuming-data.rst} (75%) rename doc/source/data/doc_code/{consuming_datastreams.py => consuming_data.py} (90%) rename doc/source/data/doc_code/{creating_datastreams.py => loading_data.py} (73%) rename doc/source/data/doc_code/{creating_datastreams_untested.py => loading_data_untested.py} (100%) rename doc/source/data/doc_code/{saving_datastreams.py => saving_data.py} (89%) create mode 100644 doc/source/data/doc_code/transforming_data.py delete mode 100644 doc/source/data/doc_code/transforming_datastreams.py rename doc/source/data/{creating-datastreams.rst => loading-data.rst} (71%) create mode 100644 doc/source/data/transforming-data.rst delete mode 100644 doc/source/data/transforming-datastreams.rst diff --git a/doc/BUILD b/doc/BUILD index be989a0d7b07..61958193ca1e 100644 --- a/doc/BUILD +++ b/doc/BUILD @@ -223,7 +223,7 @@ py_test_run_all_subdirectory( include = ["source/data/doc_code/*.py"], exclude = [ "source/ray-air/doc_code/predictors.py", - "source/data/doc_code/creating_datastreams_untested.py" + "source/data/doc_code/loading_data_untested.py" ], extra_srcs = [], tags = ["exclusive", "team:data"], diff --git a/doc/source/data/batch_inference.rst b/doc/source/data/batch_inference.rst index 069786a3a3cc..0c605eeaf96b 100644 --- a/doc/source/data/batch_inference.rst +++ b/doc/source/data/batch_inference.rst @@ -1,12 +1,12 @@ .. _batch_inference_home: -Running Batch Inference with Ray -================================ +Running Batch Inference +======================= .. note:: In this tutorial you'll learn what batch inference is, why you might want to use - Ray for it, and how to use Ray effectively for this task. + Ray for it, and how to use Ray Data effectively for this task. If you are familiar with the basics of inference tasks, jump straight to code in the :ref:`quickstart section ` or the :ref:`advanced guide`. @@ -42,8 +42,8 @@ Here's a realistic view of batch inference for modern AI applications: Evaluating a batch of input data with a model to get predictions. -Why use Ray for batch inference? ---------------------------------- +Why use Ray Data for batch inference? +------------------------------------- There are reasons to use Ray for batch inference, even if your current use case does not require scaling yet: @@ -74,9 +74,9 @@ Install Ray with the data processing library, Ray Data: Running batch inference is conceptually easy and requires three steps: -1. Load your data into a Ray dataset and optionally apply any preprocessing you need. +1. Load your data and optionally apply any preprocessing you need. 2. Define your model for inference. -3. Run inference on your data by using the :meth:`ds.map_batches() ` +3. Run inference on your data by using the :meth:`ds.map_batches() ` method from Ray Data. The last step also defines how your batch processing job gets distributed across your (local) cluster. @@ -92,9 +92,9 @@ We start with very simple use cases here and build up to more complex ones in ot 1. Loading and preprocessing data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -For this quick start guide we use very small, in-memory data sets by +For this quick start guide we use very small, in-memory datasets by leveraging common Python libraries like NumPy and Pandas. -In general, once you load your datasets using Ray Data, you also want to apply some preprocessing steps. +In general, once you load your data using Ray Data, you also want to apply some preprocessing steps. We skip this step here for simplicity. In any case, the result of this step is a Ray Datastream ``ds`` that we can use to run inference on. @@ -128,12 +128,12 @@ In any case, the result of this step is a Ray Datastream ``ds`` that we can use .. group-tab:: TensorFlow Create a NumPy array with 100 - entries, which represents the input to a feed-forward neural network. + entries, which represents the input to a feed-forward neural network. - .. literalinclude:: ./doc_code/tf_quick_start.py - :language: python - :start-after: __tf_quickstart_load_start__ - :end-before: __tf_quickstart_load_end__ + .. literalinclude:: ./doc_code/tf_quick_start.py + :language: python + :start-after: __tf_quickstart_load_start__ + :end-before: __tf_quickstart_load_end__ 2. Setting up your model ~~~~~~~~~~~~~~~~~~~~~~~~ @@ -193,7 +193,7 @@ Below you find examples for PyTorch, TensorFlow, and HuggingFace. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Once you have your Ray Datastream ``ds`` and your predictor class, you can use -:meth:`ds.map_batches() ` to get predictions. +:meth:`ds.map_batches() ` to get predictions. ``map_batches`` takes your predictor class as an argument and allows you to specify ``compute`` resources by defining the :class:`ActorPoolStrategy `. In the example below, we use two CPUs to run inference in parallel and then print the results. @@ -280,23 +280,23 @@ Loading data with Ray Data In the quick start guide we glossed over the details of loading data with Ray Data. Your data might be stored in a variety of formats, and you might want to load it from different sources. Ray Data supports multiple formats and sources out of the box. -The :ref:`guide to creating datasets ` is the ultimate resource +The :ref:`guide to loading data ` is the ultimate resource to learn more about loading data with Ray Data, but we'll cover the basics here, too. .. hint:: - With Ray Data, you can :ref:`create synthetic data in Python`, - :ref:`load data from various storage solutions` such as S3, + With Ray Data, you can :ref:`create synthetic data in Python `, + :ref:`load data from various storage solutions ` such as S3, HDFS, or GCS, using common formats such as CSV, JSON, Text, Images, Binary, TFRecords, Parquet, and more. Ray Data also supports reading from common SQL and NoSQL databases, and allows you to define your own, custom data sources. - You can also read :ref:`common Python library formats ` + You can also read :ref:`common Python library formats ` such as Pandas, NumPy, Arrow, or plain Python objects, as well as from - :ref:`distributed data processing frameworks ` + :ref:`distributed data processing frameworks ` such as Spark, Dask, Modin, or Mars. - Of course, Ray Data also supports :ref:`reading data from common ML frameworks ` + Of course, Ray Data also supports :ref:`reading data from common ML frameworks ` like PyTorch, TensorFlow or HuggingFace. .. callout:: @@ -315,7 +315,7 @@ The process of loading data with Ray Data is as diverse as the data you have. For instance, in the example above we didn't load the text labels for our images, which would require a different data source and loading function. For any advanced use cases, we recommend you read the -:ref:`guide to creating datasets `. +:ref:`guide to loading data `. Preprocessing with Ray Data ~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -323,22 +323,22 @@ Preprocessing with Ray Data After loading your data, it often needs to be preprocessed prior to inference. This may include cropping or resizing images, or tokenizing raw text. -To introduce common terminology, with :ref:`Ray Data ` you can define -:term:`user-defined functions` (UDFs) that transform batches of your data. -As you've seen before, applying these UDFs via -:meth:`ds.map_batches() ` outputs a new, transformed dataset. +To introduce common terminology, with :ref:`Ray Data ` you can define +user-defined functions that transform batches of your data. +As you've seen before, applying these functions via +:meth:`ds.map_batches() ` outputs a new, transformed datastream. .. note:: The way we do preprocessing here is conceptually close to how we do batch - inference, and we use the same :meth:`ds.map_batches() ` + inference, and we use the same :meth:`ds.map_batches() ` call from Ray Data to run this task. The main difference is that we don't use a machine learning model to transform our data, which has some practical consequences. For instance, in the example below we simply define a map function that we pass into ``map_batches``, and not a class. To transform our raw images loaded from S3 in the last step, we use functionality from -the ``torchvision`` package to define a UDF called ``preprocess_images``. +the ``torchvision`` package to define a function called ``preprocess_images``. .. callout:: @@ -350,26 +350,17 @@ the ``torchvision`` package to define a UDF called ``preprocess_images``. .. annotations:: <1> We compose PyTorch tensor creation with image preprocessing, so that our processed images "fit" into a ``ResNet18`` PyTorch model. - <2> We then define a simple UDF to transform batches of raw data accordingly. Note that these batches come as dictionaries of NumPy images stored in the ``"images"`` key. + <2> We then define a simple function to transform batches of raw data accordingly. Note that these batches come as dictionaries of NumPy images stored in the ``"images"`` key. - <3> Finally, we apply the UDF to our dataset using ``map_batches``. + <3> Finally, we apply the function to our datastream using ``map_batches``. .. tip:: For the full suite of transformations available in Ray Data, read - :ref:`the data transformation guide `. - -.. caution:: - - Depending on how you load your data and what input data format you use, the dataset - loaded with :ref:`Ray Data ` will have different *batch formats*. - For instance, image data might be naturally stored in NumPy format, while tabular - data makes much more sense as a Pandas DataFrame. - What (default) batch format your data has and how to deal with it is explained in - detail in :ref:`the batch format section `. + :ref:`the data transformation guide `. -Defining predictors as stateful UDFs -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Defining predictors as stateful classes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ One of the key value adds of Ray over other distributed systems is the support for distributed stateful operations. These stateful operations are especially useful @@ -378,12 +369,12 @@ for inference since the model only needs to be initialized once, instead of per .. margin:: In short, running model inference means applying - :meth:`ds.map_batches() ` - to a dataset with a trained model as a UDF. + :meth:`ds.map_batches() ` + to a datastream with a trained model as a class. You've already seen how to do this in the quickstart section of this guide, but now that you're equipped with more knowledge, let's have a look at how to define a -stateful UDF with Ray for our pretrained ResNet model: +stateful class with Ray for our pretrained ResNet model: .. callout:: @@ -397,7 +388,7 @@ stateful UDF with Ray for our pretrained ResNet model: <2> The ``__call__`` method is used to apply the model to a batch of data. - <3> We're free to use any custom code in a stateful UDF, and here we prepare the data to run on GPUs. + <3> We're free to use any custom code in a stateful class, and here we prepare the data to run on GPUs. <4> Finally, we return the ``"class"`` key of the model predictions as Numpy array. @@ -405,7 +396,7 @@ stateful UDF with Ray for our pretrained ResNet model: Scalable inference with Ray Data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To get predictions, we call :meth:`ds.map_batches() `, +To get predictions, we call :meth:`ds.map_batches() `, by making sure to specify a :class:`ActorPoolStrategy ` which defines how many workers to use for inference. @@ -417,19 +408,19 @@ which defines how many workers to use for inference. :end-before: __pt_prediction_end__ .. annotations:: - <1> In this example we use a total of four Ray Actors to run inference on our dataset. + <1> In this example we use a total of four Ray Actors to run inference on our datastream. <2> Each actor should use one GPU. -To summarize, mapping a UDF over batches is the simplest transform for Ray Datastreams. -The UDF defines the logic for transforming individual batches of data of the dataset +To summarize, mapping a function over batches is the simplest transform for Ray Datastreams. +The function defines the logic for transforming individual batches of data of the datastream Performing operations over batches of data is more performant than single element operations as it can leverage the underlying vectorization capabilities of Pandas or NumPy. .. note:: - You can use :meth:`ds.map_batches() ` on functions, too. + You can use :meth:`ds.map_batches() ` on functions, too. This is mostly useful for quick transformations of your data that doesn't require an ML model or other stateful objects. To handle state, using classes like we did above is the recommended way. @@ -458,9 +449,9 @@ Now that you've seen examples of batch inference with Ray, let's have a closer l at how to deal with different data formats for batches. First of all, you need to distinguish between two types of batch formats: -- Input batch formats: This is the format of the input to your UDFs. You will often have to +- Input batch formats: This is the format of the input to your transformation function. You will often have to refer to the right format name to run batch inference on your data. -- Output batch formats: This is the format your UDFs return. +- Output batch formats: This is the format your function return. In many standard cases, the input batch format is the same as the output batch format, but it's good to be aware of the differences. @@ -505,10 +496,11 @@ as you see fit. :start-after: __simple_pyarrow_start__ :end-before: __simple_pyarrow_end__ -When defining the return value of your UDF, you can choose between -Pandas dataframes (``pandas.DataFrame``), Arrow tables -(``pyarrow.Table``), or dictionaries of NumPy arrays (``Dict[str, np.ndarray]``). -You can learn more about output formats in :ref:`the output format guide`. +When defining the return value of your function, you can choose between +dictionaries of NumPy arrays (``Dict[str, np.ndarray]``), Pandas dataframes +(``pandas.DataFrame``), and Arrow tables (``pyarrow.Table``). + +You can learn more about output formats in :ref:`the transforming data guide `. .. important:: @@ -519,7 +511,7 @@ You can learn more about output formats in :ref:`the output format guide` + As we've discussed in this guide, using :meth:`ds.map_batches() ` on a class defining your model should be your default choice for running inference with Ray. For instance, if you're already using the Ray AIR framework for running your ML workflows, @@ -537,14 +529,15 @@ You can learn more about output formats in :ref:`the output format guide` -is ``batch_size``, which controls the size of the batches provided to the UDF. +An important parameter to set for :meth:`ds.map_batches() ` +is ``batch_size``, which controls the size of the batches provided to the function. Here's a simple example of loading the IRIS dataset (which has Pandas format by default) and processing it with a batch size of `10`: @@ -581,8 +574,8 @@ Here's a quick example for a PyTorch model: + self.model = self.model.cuda() self.model.eval() - def __call__(self, batch: List[torch.Tensor]): - torch_batch = torch.stack(batch) + def __call__(self, batch: Dict[str, np.ndarray]): + torch_batch = torch.stack(batch["data"]) + torch_batch = torch_batch.cuda() with torch.inference_mode(): prediction = self.model(torch_batch) @@ -590,12 +583,12 @@ Here's a quick example for a PyTorch model: + return {"class": prediction.argmax(dim=1).detach().cpu().numpy()} -Next, specify ``num_gpus=N`` in :meth:`ds.map_batches() ` +Next, specify ``num_gpus=N`` in :meth:`ds.map_batches() ` to indicate that each inference worker should use ``N`` GPUs. .. code-block:: diff - predictions = dataset.map_batches( + predictions = ds.map_batches( TorchModel, compute=ray.data.ActorPoolStrategy(size=2), + num_gpus=1 @@ -638,8 +631,10 @@ Let's suppose our machine has 16GiB of RAM and 8 GPUs. To tell Ray to construct .. code-block:: python # Require 5 CPUs per actor (so at most 3 can fit per 16 CPU node). - ds = ds.map_batches(MyFn, - compute=ActorPoolStrategy(size=16), num_cpus=5) + ds = ds.map_batches( + MyFn, + compute=ActorPoolStrategy(size=16), + num_cpus=5) Learn more ---------- diff --git a/doc/source/data/consuming-datastreams.rst b/doc/source/data/consuming-data.rst similarity index 75% rename from doc/source/data/consuming-datastreams.rst rename to doc/source/data/consuming-data.rst index ea80d784558e..7933a6c1af62 100644 --- a/doc/source/data/consuming-datastreams.rst +++ b/doc/source/data/consuming-data.rst @@ -1,4 +1,4 @@ -.. _consuming_datastreams: +.. _consuming_data: ===================== Consuming Data @@ -16,12 +16,9 @@ Retrieving a limited set of rows A limited set of rows can be retrieved from a ``Datastream`` via the :meth:`ds.take() ` or :meth:`ds.take_batch() ` APIs, and :meth:`ds.show() `, for printing a limited set of rows. These -methods are convenient for quickly inspecting a subset (prefix) of rows. They have the -benefit that, if used right after reading, they will only trigger more files to be -read if needed to retrieve rows from that file; if inspecting a small prefix of rows, -often only the first file will need to be read. +methods are convenient for quickly inspecting a subset (prefix) of rows. -.. literalinclude:: ./doc_code/consuming_datastreams.py +.. literalinclude:: ./doc_code/consuming_data.py :language: python :start-after: __take_begin__ :end-before: __take_end__ @@ -32,27 +29,25 @@ Iterating over Datastreams Datastreams can be consumed a row at a time using the :meth:`ds.iter_rows() ` API -.. literalinclude:: ./doc_code/consuming_datastreams.py +.. literalinclude:: ./doc_code/consuming_data.py :language: python :start-after: __iter_rows_begin__ :end-before: __iter_rows_end__ or a batch at a time using the :meth:`ds.iter_batches() ` API, where you can specify -batch size as well as the desired batch format. By default, the batch format is -``"default"``. For tabular data, the default format is a Pandas DataFrame; for Python -objects, it's a list. +batch size as well as the desired batch format. By default, the batches have type +``Dict[str, np.ndarray]`` (NumPy format). -.. literalinclude:: ./doc_code/consuming_datastreams.py +.. literalinclude:: ./doc_code/consuming_data.py :language: python :start-after: __iter_batches_begin__ :end-before: __iter_batches_end__ - Datastreams can be passed to Ray tasks or actors and accessed by these iteration methods. This does not incur a copy, since the blocks of the Datastream are passed by reference as Ray objects: -.. literalinclude:: ./doc_code/consuming_datastreams.py +.. literalinclude:: ./doc_code/consuming_data.py :language: python :start-after: __remote_iterators_begin__ :end-before: __remote_iterators_end__ @@ -69,12 +64,12 @@ This is a common pattern useful for loading and sharding data between distribute If using :ref:`Ray Train ` for distributed training, you do not need to split the datastream; Ray Train will automatically do locality-aware splitting into per-trainer shards for you. -.. literalinclude:: ./doc_code/consuming_datastreams.py +.. literalinclude:: ./doc_code/consuming_data.py :language: python :start-after: __split_begin__ :end-before: __split_end__ -.. _saving_datastreams: +.. _saving_data: Saving Data ================== @@ -88,35 +83,35 @@ to repartition the Datastream before writing out. .. tab-item:: Parquet - .. literalinclude:: ./doc_code/saving_datastreams.py + .. literalinclude:: ./doc_code/saving_data.py :language: python :start-after: __write_parquet_begin__ :end-before: __write_parquet_end__ .. tab-item:: CSV - .. literalinclude:: ./doc_code/saving_datastreams.py + .. literalinclude:: ./doc_code/saving_data.py :language: python :start-after: __write_csv_begin__ :end-before: __write_csv_end__ .. tab-item:: JSON - .. literalinclude:: ./doc_code/saving_datastreams.py + .. literalinclude:: ./doc_code/saving_data.py :language: python :start-after: __write_json_begin__ :end-before: __write_json_end__ .. tab-item:: NumPy - .. literalinclude:: ./doc_code/saving_datastreams.py + .. literalinclude:: ./doc_code/saving_data.py :language: python :start-after: __write_numpy_begin__ :end-before: __write_numpy_end__ .. tab-item:: TFRecords - .. literalinclude:: ./doc_code/saving_datastreams.py + .. literalinclude:: ./doc_code/saving_data.py :language: python :start-after: __write_tfrecords_begin__ :end-before: __write_tfrecords_end__ diff --git a/doc/source/data/custom-datasource.rst b/doc/source/data/custom-datasource.rst index 010f08e19385..a9fc7ca29051 100644 --- a/doc/source/data/custom-datasource.rst +++ b/doc/source/data/custom-datasource.rst @@ -9,7 +9,7 @@ Custom Datasources This MongoDatasource guide below is for education only. For production use of MongoDB in Ray Data, see :ref:`Creating Datastream from MongoDB `. -Ray Data supports multiple ways to :ref:`create a datastream `, +Ray Data supports multiple ways to :ref:`create a datastream `, allowing you to easily ingest data of common formats from popular sources. However, if the datasource you want to read from is not in the built-in list, don't worry, you can implement a custom one for your use case. In this guide, we will walk you through how to build diff --git a/doc/source/data/data-internals.rst b/doc/source/data/data-internals.rst index 7fd74a9f4d43..7bcec8ddfafd 100644 --- a/doc/source/data/data-internals.rst +++ b/doc/source/data/data-internals.rst @@ -20,35 +20,12 @@ Ray Data and Tune When using Ray Data in conjunction with :ref:`Ray Tune `, it is important to ensure there are enough free CPUs for Ray Data to run on. By default, Tune will try to fully utilize cluster CPUs. This can prevent Ray Data from scheduling tasks, reducing performance or causing workloads to hang. -As an example, the following shows two ways to use Ray Data together with Tune: +To ensure CPU resources are always available for Ray Data execution, limit the number of concurrent Tune trials. This can be done using the ``max_concurrent_trials`` Tune option. -.. tab-set:: - - .. tab-item:: Limiting Tune Concurrency - - By limiting the number of concurrent Tune trials, we ensure CPU resources are always available for Ray Data execution. - This can be done using the ``max_concurrent_trials`` Tune option. - - .. literalinclude:: ./doc_code/key_concepts.py - :language: python - :start-after: __resource_allocation_1_begin__ - :end-before: __resource_allocation_1_end__ - - .. tab-item:: Reserving CPUs (Experimental) - - Alternatively, we can tell Tune to set aside CPU resources for other libraries. - This can be done by setting ``_max_cpu_fraction_per_node=0.8``, which reserves - 20% of node CPUs for Datastream execution. - - .. literalinclude:: ./doc_code/key_concepts.py - :language: python - :start-after: __resource_allocation_2_begin__ - :end-before: __resource_allocation_2_end__ - - .. warning:: - - This option is experimental and not currently recommended for use with - autoscaling clusters (scale-up will not trigger properly). +.. literalinclude:: ./doc_code/key_concepts.py + :language: python + :start-after: __resource_allocation_1_begin__ + :end-before: __resource_allocation_1_end__ .. _datastreams_pg: diff --git a/doc/source/data/data-tensor-support.rst b/doc/source/data/data-tensor-support.rst index 282b0c931fca..bb326f6bd0f0 100644 --- a/doc/source/data/data-tensor-support.rst +++ b/doc/source/data/data-tensor-support.rst @@ -3,45 +3,42 @@ ML Tensor Support ================= -Tensor (multi-dimensional array) data is ubiquitous in ML workloads. However, popular data formats such as Pandas, Parquet, and Arrow don't natively support tensor data types. To bridge this gap, Ray Data provides a unified tensor data type that can be used to represent, transform, and store tensor data: +Tensor (multi-dimensional array) data is ubiquitous in ML workloads. However, popular data formats such as Pandas, Parquet, and Arrow don't natively support tensor data types. To bridge this gap, Ray Data provides tensor extension types that integrate with Pandas and Arrow. * For Pandas, Ray Data will transparently convert ``List[np.ndarray]`` columns to and from the :class:`TensorDtype ` extension type. * For Parquet, Ray Data has an Arrow extension :class:`ArrowTensorType ` that allows tensors to be loaded from and stored in the Parquet format. -* In addition, single-column tensor datastreams can be created from NumPy (.npy) files. Ray Data automatically converts between the extension types/arrays above. This means you can think of a ``Tensor`` as a first-class data type in Ray Data. -Creating Tensor Datastreams ---------------------------- +Loading Tensor Data +------------------- -This section shows how to create single and multi-column tensor datastreams. +This section shows how to create datastreams that include tensor data. .. tab-set:: .. tab-item:: Synthetic Data - Create a synthetic tensor datastream from a range of integers. - - **Single-column only**: + Create synthetic tensor data from a range of integers. .. literalinclude:: ./doc_code/tensor.py :language: python :start-after: __create_range_begin__ :end-before: __create_range_end__ - .. tab-item:: Pandas UDF - - Create tensor datastreams by returning ``List[np.ndarray]`` columns from a Pandas - :ref:`user-defined function `. + .. tab-item:: Images - **Single-column**: + Load image data stored as individual files using :func:`~ray.data.read_images`: .. literalinclude:: ./doc_code/tensor.py :language: python - :start-after: __create_pandas_begin__ - :end-before: __create_pandas_end__ + :start-after: __create_images_begin__ + :end-before: __create_images_end__ + + .. tab-item:: Pandas UDF - **Multi-column**: + Create tensor columns by returning ``List[np.ndarray]`` columns from a Pandas + :ref:`user-defined function `. .. literalinclude:: ./doc_code/tensor.py :language: python @@ -50,9 +47,7 @@ This section shows how to create single and multi-column tensor datastreams. .. tab-item:: NumPy - Create from in-memory NumPy data or from previously saved NumPy (.npy) files. - - **Single-column only**: + Create from in-memory NumPy data or previously saved NumPy (.npy) files. .. literalinclude:: ./doc_code/tensor.py :language: python @@ -95,56 +90,22 @@ This section shows how to create single and multi-column tensor datastreams. :start-after: __create_parquet_3_begin__ :end-before: __create_parquet_3_end__ - .. tab-item:: Images - - Load image data stored as individual files using :func:`~ray.data.read_images`: - - **Image and label columns**: +Processing Tensor Data +---------------------- - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __create_images_begin__ - :end-before: __create_images_end__ - -.. note:: - - By convention, single-column tensor datastreams are represented with a single ``__value__`` column. - This kind of datastream will be converted automatically to/from NumPy ndarray format in all transformation and consumption APIs. - -Transforming / Consuming Tensor Data ------------------------------------- - -Like any other Datastream, Datastreams with tensor columns can be consumed / transformed in batches via the :meth:`ds.iter_batches(batch_format=\) ` and :meth:`ds.map_batches(fn, batch_format=\) ` APIs. This section shows the available batch formats and their behavior: +Like any other Datastream, Datastreams with tensor columns can be processed in batches via :meth:`ds.iter_batches ` and :meth:`ds.map_batches ` APIs. This section shows the available batch formats and their behavior: .. tab-set:: - .. tab-item:: "default" - - **Single-column**: - - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __consume_native_begin__ - :end-before: __consume_native_end__ - - **Multi-column**: + .. tab-item:: "numpy" (default) .. literalinclude:: ./doc_code/tensor.py :language: python - :start-after: __consume_native_2_begin__ - :end-before: __consume_native_2_end__ + :start-after: __consume_numpy_2_begin__ + :end-before: __consume_numpy_2_end__ .. tab-item:: "pandas" - **Single-column**: - - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __consume_pandas_begin__ - :end-before: __consume_pandas_end__ - - **Multi-column**: - .. literalinclude:: ./doc_code/tensor.py :language: python :start-after: __consume_pandas_2_begin__ @@ -152,40 +113,15 @@ Like any other Datastream, Datastreams with tensor columns can be consumed / tra .. tab-item:: "pyarrow" - **Single-column**: - - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __consume_pyarrow_begin__ - :end-before: __consume_pyarrow_end__ - - **Multi-column**: - .. literalinclude:: ./doc_code/tensor.py :language: python :start-after: __consume_pyarrow_2_begin__ :end-before: __consume_pyarrow_2_end__ - .. tab-item:: "numpy" - - **Single-column**: - - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __consume_numpy_begin__ - :end-before: __consume_numpy_end__ - - **Multi-column**: - - .. literalinclude:: ./doc_code/tensor.py - :language: python - :start-after: __consume_numpy_2_begin__ - :end-before: __consume_numpy_2_end__ - -Saving Tensor Datastreams -------------------------- +Saving Tensor Data +------------------ -Because tensor datastreams rely on Datastreams-specific extension types, they can only be +Because tensor data relies on Datastream-specific extension types, they can only be saved in formats that preserve Arrow metadata (currently only Parquet). In addition, single-column tensor datastreams can be saved in NumPy format. diff --git a/doc/source/data/data.rst b/doc/source/data/data.rst index 2d009240e588..0d266b9dd92a 100644 --- a/doc/source/data/data.rst +++ b/doc/source/data/data.rst @@ -17,11 +17,7 @@ shuffling operations (:meth:`random_shuffle :meth:`repartition `), and is compatible with a variety of file formats, data sources, and distributed frameworks. -Here's an overview of the integrations with other processing frameworks, file formats, and supported operations, -as well as a glimpse at the Ray Data API. - -Check the :ref:`Input/Output reference ` to see if your favorite format -is already supported. +Read on for an overview of the main use cases and operations supported by Ray Data. .. image:: images/datastream.svg @@ -33,10 +29,9 @@ Streaming Batch Inference ------------------------- Ray Data simplifies general purpose parallel GPU and CPU compute in Ray through its -powerful :ref:`Datastream ` primitive. Datastreams enables workloads such as -:ref:`GPU batch inference ` efficiently on large datasets. Ray Data manages -the pipelined processing of data in the cluster, maximizing resource utilization -by keeping the working data fitting into Ray object store memory. +powerful :ref:`Datastream ` primitive. Datastreams enable workloads such as +:ref:`GPU batch inference ` to run efficiently on large datasets, +maximizing resource utilization by keeping the working data fitting into Ray object store memory. .. image:: images/stream-example.png :width: 650px @@ -112,9 +107,9 @@ Advanced users can refer directly to the Ray Data :ref:`API reference **User Guides** ^^^ - Learn how to :ref:`create datastreams `, :ref:`save - datastreams `, :ref:`transform datastreams `, - :ref:`access and exchange datastreams `, or + Learn how to :ref:`load data `, :ref:`save + data `, :ref:`transform data `, + :ref:`access and exchange data `, or :ref:`work with tensor data `. +++ diff --git a/doc/source/data/doc_code/batch_formats.py b/doc/source/data/doc_code/batch_formats.py index e7bb21dbebcd..2099f70e9bb8 100644 --- a/doc/source/data/doc_code/batch_formats.py +++ b/doc/source/data/doc_code/batch_formats.py @@ -10,10 +10,10 @@ def map_function(data): return data[data["sepal.length"] < 5] -batch = ds.take_batch(10) +batch = ds.take_batch(10, batch_format="pandas") mapped_batch = map_function(batch) -transformed = ds.map_batches(map_function, batch_size=10) +transformed = ds.map_batches(map_function, batch_format="pandas", batch_size=10) # __simple_map_function_end__ # __simple_pandas_start__ @@ -24,8 +24,6 @@ def map_function(data): ds.show(1) # -> {'sepal.length': 5.1, ..., 'petal.width': 0.2, 'variety': 'Setosa'} -# pandas.core.frame.DataFrame - def transform_pandas(df_batch: pd.DataFrame) -> pd.DataFrame: df_batch = df_batch[df_batch["variety"] == "Versicolor"] df_batch.loc[:, "normalized.sepal.length"] = df_batch["sepal.length"] / df_batch["sepal.length"].max() @@ -41,6 +39,7 @@ def transform_pandas(df_batch: pd.DataFrame) -> pd.DataFrame: import ray import numpy as np +from typing import Dict ds = ray.data.range_tensor(1000, shape=(2, 2)) @@ -72,7 +71,7 @@ def transform_pyarrow(batch: pa.Table) -> pa.Table: # test map function on a batch -batch = ds.take_batch(1) +batch = ds.take_batch(1, batch_format="pyarrow") mapped_batch = transform_pyarrow(batch) ds.map_batches(transform_pyarrow, batch_format="pyarrow").show(1) diff --git a/doc/source/data/doc_code/consuming_datastreams.py b/doc/source/data/doc_code/consuming_data.py similarity index 90% rename from doc/source/data/doc_code/consuming_datastreams.py rename to doc/source/data/doc_code/consuming_data.py index 860de83d53dd..63a3d05514bc 100644 --- a/doc/source/data/doc_code/consuming_datastreams.py +++ b/doc/source/data/doc_code/consuming_data.py @@ -8,26 +8,21 @@ # Take up to five records as a batch. print(ds.take(5)) -# -> [0, 1, 2, 3, 4] +# -> [{'id': 0}, {'id': 1}, {'id': 2}, {'id': 3}, {'id': 4}] -# Similar to above but returning in a batch format (like iter_batches / map_batches). -print(ds.take_batch(5, batch_format="pandas")) -# -> value -# 0 0 -# 1 1 -# 2 2 -# 3 3 -# 4 4 +# Similar to above but returning in a batch format. +print(ds.take_batch(5)) +# -> {'id': array([0, 1, 2, 3, 4])} # Warning: This will print all of the rows! print(ds.take_all()) ds.show(5) -# -> 0 -# 1 -# 2 -# 3 -# 4 +# -> {'id': 0} +# {'id': 1} +# {'id': 2} +# {'id': 3} +# {'id': 4} # __take_end__ # fmt: on diff --git a/doc/source/data/doc_code/hf_quick_start.py b/doc/source/data/doc_code/hf_quick_start.py index b6b6b8b59381..1675f38aa4db 100644 --- a/doc/source/data/doc_code/hf_quick_start.py +++ b/doc/source/data/doc_code/hf_quick_start.py @@ -4,7 +4,9 @@ # __hf_quickstart_load_start__ import ray +import numpy as np import pandas as pd +from typing import Dict prompts = pd.DataFrame(["Complete these sentences", "for me"], columns=["text"]) @@ -18,9 +20,9 @@ def __init__(self): # <1> from transformers import pipeline self.model = pipeline("text-generation", model="gpt2") - def __call__(self, batch): # <2> - # TODO make this run with "numpy" format - return self.model(list(batch["text"]), max_length=20) + def __call__(self, batch: Dict[str, np.ndarray]): # <2> + model_out = self.model(list(batch["text"]), max_length=20) + return pd.DataFrame({"output": model_out}) # __hf_quickstart_model_end__ diff --git a/doc/source/data/doc_code/creating_datastreams.py b/doc/source/data/doc_code/loading_data.py similarity index 73% rename from doc/source/data/doc_code/creating_datastreams.py rename to doc/source/data/doc_code/loading_data.py index b43e843027a5..ff34b4ec4377 100644 --- a/doc/source/data/doc_code/creating_datastreams.py +++ b/doc/source/data/doc_code/loading_data.py @@ -9,25 +9,14 @@ # For tfrecords ray.init(runtime_env={"pip": ["tensorflow_metadata"]}) -# fmt: off -# __gen_synth_int_range_begin__ -# Create a Datastream of Python objects. -ds = ray.data.range(10000) -# -> Datastream(num_blocks=200, num_rows=10000, schema=) - -ds.take(5) -# -> [0, 1, 2, 3, 4] -# __gen_synth_int_range_end__ -# fmt: on - # fmt: off # __gen_synth_tabular_range_begin__ -# Create a Datastream of Arrow records. +# Create a Datastream of integers. ds = ray.data.range(10000) # -> Datastream(num_blocks=200, num_rows=10000, schema={id: int64}) -ds.take(5) -# -> [{'value': 0}, {'value': 1}, {'value': 2}, {'value': 3}, {'value': 4}] +ds.take_batch(5) +# -> {'id': array([0, 1, 2, 3, 4])} # __gen_synth_tabular_range_end__ # fmt: on @@ -38,30 +27,32 @@ # -> Datastream( # num_blocks=200, # num_rows=409600, -# schema={__value__: numpy.ndarray(shape=(64, 64), dtype=int64)} +# schema={data: numpy.ndarray(shape=(64, 64), dtype=int64)} # ) -ds.take(2) -# -> [array([[0, 0, 0, ..., 0, 0, 0], +ds.take_batch(5) +# -> {'data': array( +# [[[0, 0, 0, ..., 0, 0, 0], # [0, 0, 0, ..., 0, 0, 0], # [0, 0, 0, ..., 0, 0, 0], # ..., # [0, 0, 0, ..., 0, 0, 0], # [0, 0, 0, ..., 0, 0, 0], -# [0, 0, 0, ..., 0, 0, 0]]), -# array([[1, 1, 1, ..., 1, 1, 1], -# [1, 1, 1, ..., 1, 1, 1], -# [1, 1, 1, ..., 1, 1, 1], +# [0, 0, 0, ..., 0, 0, 0]], +# ... +# [[4, 4, 4, ..., 4, 4, 4], +# [4, 4, 4, ..., 4, 4, 4], +# [4, 4, 4, ..., 4, 4, 4], # ..., -# [1, 1, 1, ..., 1, 1, 1], -# [1, 1, 1, ..., 1, 1, 1], -# [1, 1, 1, ..., 1, 1, 1]])] +# [4, 4, 4, ..., 4, 4, 4], +# [4, 4, 4, ..., 4, 4, 4], +# [4, 4, 4, ..., 4, 4, 4]]])} # __gen_synth_tensor_range_end__ # fmt: on # fmt: off # __from_items_begin__ -# Create a Datastream of tabular (Arrow) records. +# Create a Datastream from python dicts. ds = ray.data.from_items([{"col1": i, "col2": str(i)} for i in range(10000)]) # -> MaterializedDatastream(num_blocks=200, num_rows=10000, schema={col1: int64, col2: string}) @@ -76,7 +67,7 @@ # __from_pandas_begin__ import pandas as pd -# Create a tabular Datastream from a Pandas DataFrame. +# Create a Datastream from a Pandas DataFrame. df = pd.DataFrame({"col1": list(range(10000)), "col2": list(map(str, range(10000)))}) ds = ray.data.from_pandas(df) # -> MaterializedDatastream(num_blocks=1, num_rows=10000, schema={col1: int64, col2: object}) @@ -100,7 +91,7 @@ pd.DataFrame({"col1": list(chunk), "col2": list(map(str, chunk))}) for chunk in chunks ] -# Create a tabular Datastream from multiple Pandas DataFrames. +# Create a Datastream from multiple Pandas DataFrames. ds = ray.data.from_pandas(dfs) # -> MaterializedDatastream(num_blocks=10, num_rows=10000, schema={col1: int64, col2: object}) @@ -115,22 +106,22 @@ # __from_numpy_begin__ import numpy as np -# Create a tensor Datastream from a 3D NumPy ndarray. +# Create a Datastream from a 3D NumPy ndarray. arr = np.ones((3, 4, 4)) # The outer dimension is treated as the row dimension. ds = ray.data.from_numpy(arr) # -> MaterializedDatastream( # num_blocks=1, # num_rows=3, -# schema={__value__: numpy.ndarray(shape=(4, 4), dtype=double)} +# schema={data: numpy.ndarray(shape=(4, 4), dtype=double)} # ) ds.show(2) -# -> {'value': array([[1., 1., 1., 1.], +# -> {'data': array([[1., 1., 1., 1.], # [1., 1., 1., 1.], # [1., 1., 1., 1.], # [1., 1., 1., 1.]])} -# -> {'value': array([[1., 1., 1., 1.], +# -> {'data': array([[1., 1., 1., 1.], # [1., 1., 1., 1.], # [1., 1., 1., 1.], # [1., 1., 1., 1.]])} @@ -144,13 +135,7 @@ # schema={image: numpy.ndarray(shape=(32, 32, 3), dtype=uint8)}) ds.take(1) -# -> [array([[[ 88, 70, 68], -# [103, 88, 85], -# [112, 96, 97], -# ..., -# [168, 151, 81], -# [167, 149, 83], -# [166, 148, 82]]], dtype=uint8)] +# -> [{'image': array([[[ 88, 70, 68], ...]]), dtype=uint8)}] # __read_images_end__ # fmt: on @@ -158,22 +143,22 @@ # __from_numpy_mult_begin__ import numpy as np -# Create a tensor Datastream from multiple 3D NumPy ndarray. +# Create a Datastream from multiple 3D NumPy ndarray. arrs = [np.random.rand(2, 4, 4) for _ in range(4)] # The outer dimension is treated as the row dimension. ds = ray.data.from_numpy(arrs) # -> MaterializedDatastream( # num_blocks=4, # num_rows=8, -# schema={__value__: numpy.ndarray(shape=(4, 4), dtype=double)} +# schema={data: numpy.ndarray(shape=(4, 4), dtype=double)} # ) ds.show(2) -# -> {'value': array([[0.06587483, 0.67808656, 0.76461924, 0.83428549], +# -> {'data': array([[0.06587483, 0.67808656, 0.76461924, 0.83428549], # [0.04932103, 0.25112165, 0.26476714, 0.24599738], # [0.67624391, 0.58689537, 0.12594709, 0.94663371], # [0.32435665, 0.97719096, 0.03234169, 0.71563231]])} -# -> {'value': array([[0.98570318, 0.65956399, 0.82168898, 0.09798336], +# -> {'data': array([[0.98570318, 0.65956399, 0.82168898, 0.09798336], # [0.22426704, 0.34209978, 0.02605247, 0.48200137], # [0.17312096, 0.38789983, 0.42663678, 0.92652456], # [0.80787394, 0.92437162, 0.11185822, 0.3319638 ]])} @@ -184,7 +169,7 @@ # __from_arrow_begin__ import pyarrow as pa -# Create a tabular Datastream from an Arrow Table. +# Create a Datastream from an Arrow Table. t = pa.table({"col1": list(range(10000)), "col2": list(map(str, range(10000)))}) ds = ray.data.from_arrow(t) # -> MaterializedDatastream(num_blocks=1, num_rows=10000, schema={col1: int64, col2: string}) @@ -208,7 +193,7 @@ pa.table({"col1": list(chunk), "col2": list(map(str, chunk))}) for chunk in chunks ] -# Create a tabular Datastream from multiple Arrow Tables. +# Create a Datastream from multiple Arrow Tables. ds = ray.data.from_arrow(ts) # -> MaterializedDatastream(num_blocks=10, num_rows=10000, schema={col1: int64, col2: string}) @@ -226,7 +211,7 @@ df = pd.DataFrame({"col1": list(range(10000)), "col2": list(map(str, range(10000)))}) ddf = dd.from_pandas(df, npartitions=4) -# Create a tabular Datastream from a Dask DataFrame. +# Create a Datastream from a Dask DataFrame. ds = ray.data.from_dask(ddf) # -> MaterializedDatastream(num_blocks=10, num_rows=10000, schema={col1: int64, col2: object}) @@ -243,7 +228,7 @@ df = pd.DataFrame({"col1": list(range(10000)), "col2": list(map(str, range(10000)))}) mdf = md.DataFrame(df) -# Create a tabular Datastream from a Modin DataFrame. +# Create a Datastream from a Modin DataFrame. ds = ray.data.from_modin(mdf) # -> MaterializedDatastream(num_blocks=8, num_rows=10000, schema={col1: int64, col2: object}) @@ -256,7 +241,7 @@ # fmt: off # __read_parquet_begin__ -# Create a tabular Datastream by reading a Parquet file. +# Create a Datastream by reading a Parquet file. ds = ray.data.read_parquet("example://iris.parquet") # -> Datastream( # num_blocks=1, @@ -292,7 +277,7 @@ # __read_parquet_pushdown_begin__ import pyarrow as pa -# Create a tabular Datastream by reading a Parquet file, pushing column selection and row +# Create a Datastream by reading a Parquet file, pushing column selection and row # filtering down to the file scan. ds = ray.data.read_parquet( "example://iris.parquet", @@ -309,7 +294,7 @@ # fmt: off # __read_csv_begin__ -# Create a tabular Datastream by reading a CSV file. +# Create a Datastream by reading a CSV file. ds = ray.data.read_csv("example://iris.csv") # -> Datastream( # num_blocks=1, @@ -343,7 +328,7 @@ # fmt: off # __read_json_begin__ -# Create a tabular Datastream by reading a JSON file. +# Create a Datastream by reading a JSON file. ds = ray.data.read_json("example://iris.json") # -> Datastream( # num_blocks=1, @@ -377,32 +362,29 @@ # fmt: off # __read_numpy_begin__ -# Create a tensor Datastream by reading a NumPy file. +# Create a Datastream by reading a NumPy file. ds = ray.data.read_numpy("example://mnist_subset.npy") # -> Datastream( # num_blocks=1, # num_rows=3, -# schema={__value__: numpy.ndarray(shape=(28, 28), dtype=uint8)} +# schema={data: numpy.ndarray(shape=(28, 28), dtype=uint8)} # ) ds.show(2) -# [array([[0, ...]]), array([[0, ...]])] +# -> {'data': array([[0, ...]], dtype=uint8)} +# {'data': array([[0, ...]], dtype=uint8)} # __read_numpy_end__ # fmt: on # fmt: off # __read_text_begin__ -# Create a tabular Datastream by reading a text file. +# Create a Datastream by reading a text file. ds = ray.data.read_text("example://sms_spam_collection_subset.txt") -# -> Datastream(num_blocks=1, num_rows=10, schema=) +# -> Datastream(num_blocks=1, num_rows=10, schema={text: string}) -ds.show(3) -# -> ham Go until jurong point, crazy.. Available only in bugis n great world la e -# buffet... Cine there got amore wat... -# ham Ok lar... Joking wif u oni... -# spam Free entry in 2 a wkly comp to win FA Cup final tkts 21st May 2005. Text FA -# to 87121 to receive entry question(std txt rate)T&C's apply -# 08452810075over18's +ds.show(2) +# -> {'text': 'ham\tGo until jurong point, crazy.. Available only in bugis n great world la e buffet... Cine there got amore wat...'} +# {'text': 'ham\tOk lar... Joking wif u oni...'} # __read_text_end__ # fmt: on @@ -411,30 +393,25 @@ from io import BytesIO import PIL.Image -# Create a tabular Datastream by reading a binary file. +# Create a Datastream by reading a binary file. ds = ray.data.read_binary_files("example://mnist_subset_partitioned/0/1.png") -# -> Datastream(num_blocks=1, num_rows=1, schema=) +# -> Datastream(num_blocks=1, num_rows=1, schema={bytes: string}) -ds = ds.map(lambda bytes_: {"images": np.asarray(PIL.Image.open(BytesIO(bytes_["bytes"])).convert("L"))}) +ds = ds.map(lambda row: {"image": np.asarray(PIL.Image.open(BytesIO(row["bytes"])).convert("L"))}) # -> Datastream( # num_blocks=1, # num_rows=1, -# schema={__value__: numpy.ndarray(shape=(28, 28), dtype=uint8)} +# schema={image: numpy.ndarray(shape=(28, 28), dtype=uint8)} # ) -ds.show(3) -# -> ham Go until jurong point, crazy.. Available only in bugis n great world la e -# buffet... Cine there got amore wat... -# ham Ok lar... Joking wif u oni... -# spam Free entry in 2 a wkly comp to win FA Cup final tkts 21st May 2005. Text FA -# to 87121 to receive entry question(std txt rate)T&C's apply -# 08452810075over18's +ds.take(1) +# -> [{'image': array([[[ 88, 70, 68], ...]]), dtype=uint8)}] # __read_binary_end__ # fmt: on # fmt: off # __read_parquet_s3_begin__ -# Create a tabular Datastream by reading a Parquet file from S3. +# Create a Datastream by reading a Parquet file from S3. ds = ray.data.read_parquet("s3://anonymous@air-example-data/ursa-labs-taxi-data/by_year/2019/01/data.parquet") # -> Datastream( # num_blocks=1, @@ -487,7 +464,7 @@ # fmt: off # __read_tfrecords_begin__ -# Create a tabular Datastream by reading a TFRecord file. +# Create a Datastream by reading a TFRecord file. ds = ray.data.read_tfrecords("example://iris.tfrecords") # Datastream( # num_blocks=1, @@ -502,11 +479,11 @@ # ) ds.show(1) # { -# "sepal.length": 5.099999904632568, -# "sepal.width": 3.5, -# "petal.length": 1.399999976158142, -# "petal.width": 0.20000000298023224, -# "label": b"Setosa", +# 'sepal.length': 5.099999904632568, +# 'sepal.width': 3.5, +# 'petal.length': 1.399999976158142, +# 'petal.width': 0.20000000298023224, +# 'label': b'Setosa', # } # __read_tfrecords_end__ # fmt: on diff --git a/doc/source/data/doc_code/creating_datastreams_untested.py b/doc/source/data/doc_code/loading_data_untested.py similarity index 100% rename from doc/source/data/doc_code/creating_datastreams_untested.py rename to doc/source/data/doc_code/loading_data_untested.py diff --git a/doc/source/data/doc_code/pytorch_quick_start.py b/doc/source/data/doc_code/pytorch_quick_start.py index 5b71962ace22..9e2d8d2e9bd6 100644 --- a/doc/source/data/doc_code/pytorch_quick_start.py +++ b/doc/source/data/doc_code/pytorch_quick_start.py @@ -5,6 +5,7 @@ # __pt_quickstart_load_start__ import ray import numpy as np +from typing import Dict ds = ray.data.from_numpy(np.ones((1, 100))) @@ -24,10 +25,10 @@ def __init__(self): # <1> ) self.model.eval() - def __call__(self, batch): # <2> - tensor = torch.as_tensor(batch, dtype=torch.float32) + def __call__(self, batch: Dict[str, np.ndarray]) -> Dict: # <2> + tensor = torch.as_tensor(batch["data"], dtype=torch.float32) with torch.inference_mode(): - return self.model(tensor).detach().numpy() + return {"output": self.model(tensor).detach().numpy()} # __pt_quickstart_model_end__ @@ -39,6 +40,6 @@ def __call__(self, batch): # <2> scale = ray.data.ActorPoolStrategy(size=2) predictions = ds.map_batches(TorchPredictor, compute=scale) predictions.show(limit=1) -# [0.45092654] +# {'output': array([0.45092654])} # __pt_quickstart_prediction_end__ # fmt: on diff --git a/doc/source/data/doc_code/saving_datastreams.py b/doc/source/data/doc_code/saving_data.py similarity index 89% rename from doc/source/data/doc_code/saving_datastreams.py rename to doc/source/data/doc_code/saving_data.py index 30740923f1d6..3ef318ccc963 100644 --- a/doc/source/data/doc_code/saving_datastreams.py +++ b/doc/source/data/doc_code/saving_data.py @@ -10,9 +10,7 @@ import ray ds = ray.data.range(1000) -# -> Datastream(num_blocks=200, num_rows=1000, schema=) -ds.take(5) -# -> [0, 1, 2, 3, 4] +# -> Datastream(num_blocks=200, num_rows=1000, schema={id: int64}) # Write out just one file. ds.repartition(1).write_parquet("/tmp/one_parquet") @@ -31,9 +29,7 @@ import ray ds = ray.data.range(1000) -# -> Datastream(num_blocks=200, num_rows=1000, schema=) -ds.take(5) -# -> [0, 1, 2, 3, 4] +# -> Datastream(num_blocks=200, num_rows=1000, schema={id: int64}) # Write out just one file. ds.repartition(1).write_csv("/tmp/one_csv") @@ -52,9 +48,7 @@ import ray ds = ray.data.range(1000) -# -> Datastream(num_blocks=200, num_rows=1000, schema=) -ds.take(5) -# -> [0, 1, 2, 3, 4] +# -> Datastream(num_blocks=200, num_rows=1000, schema={id: int64}) # Write out just one file. ds.repartition(1).write_json("/tmp/one_json") @@ -77,11 +71,11 @@ # -> Datastream( # num_blocks=1, # num_rows=1000, -# schema={value: }, +# schema={data: }, # ) ds.show(2) -# -> {'value': array(0)} -# -> {'value': array(1)} +# -> {'data': array(0)} +# -> {'data': array(1)} # Write out just one file. ds.repartition(1).write_numpy("/tmp/one_numpy", column="data") diff --git a/doc/source/data/doc_code/tensor.py b/doc/source/data/doc_code/tensor.py index 80e7cfcc75b0..d813d6a3e871 100644 --- a/doc/source/data/doc_code/tensor.py +++ b/doc/source/data/doc_code/tensor.py @@ -1,65 +1,31 @@ # flake8: noqa +import ray from typing import Dict, Any # fmt: off # __create_range_begin__ -import ray - # Create a Datastream of tensors. ds = ray.data.range_tensor(10000, shape=(64, 64)) # -> Datastream(num_blocks=200, num_rows=10000, -# schema={data: numpy.ndarray(shape=(64, 64), dtype=int64)}) +# schema={data: numpy.ndarray(shape=(64, 64), dtype=int64)}) -ds.take(2) -# -> [array([[0, 0, 0, ..., 0, 0, 0], -# [0, 0, 0, ..., 0, 0, 0], -# [0, 0, 0, ..., 0, 0, 0], -# ..., -# [0, 0, 0, ..., 0, 0, 0], -# [0, 0, 0, ..., 0, 0, 0], -# [0, 0, 0, ..., 0, 0, 0]]), -# array([[1, 1, 1, ..., 1, 1, 1], -# [1, 1, 1, ..., 1, 1, 1], -# [1, 1, 1, ..., 1, 1, 1], -# ..., -# [1, 1, 1, ..., 1, 1, 1], -# [1, 1, 1, ..., 1, 1, 1], -# [1, 1, 1, ..., 1, 1, 1]])] +ds.take(1) +# -> {'data': array([[0, 0, 0, ..., 0, 0, 0], +# [0, 0, 0, ..., 0, 0, 0], +# [0, 0, 0, ..., 0, 0, 0], +# ..., +# [0, 0, 0, ..., 0, 0, 0], +# [0, 0, 0, ..., 0, 0, 0], +# [0, 0, 0, ..., 0, 0, 0]])} # __create_range_end__ -# __create_pandas_begin__ -import ray - +# __create_pandas_2_begin__ import pandas as pd import numpy as np -# Start with a tabular base datastream. -ds = ray.data.range(1000) - -# Create a single TensorArray column. -def single_col_udf(batch: pd.DataFrame) -> pd.DataFrame: - bs = len(batch) - - # Lists of ndarrays are automatically cast to TensorArray. - arr = [np.zeros((128, 128, 3)) for _ in range(bs)] - return pd.DataFrame({"data": arr}) - - ## Alternatively, manually construct a TensorArray from a single ndarray. - # from ray.data.extensions.tensor_extension import TensorArray - # arr = TensorArray(np.zeros((bs, 128, 128, 3), dtype=np.int64)) - # return pd.DataFrame({"data": arr}) - - -ds.map_batches(single_col_udf, batch_format="pandas") -ds.materialize() -# -> Datastream(num_blocks=17, num_rows=1000, -# schema={data: numpy.ndarray(shape=(128, 128, 3), dtype=int64)}) -# __create_pandas_end__ - -# __create_pandas_2_begin__ # Create multiple TensorArray columns. -def multi_col_udf(batch: pd.DataFrame) -> pd.DataFrame: +def gen_image_and_embed(batch: pd.DataFrame) -> pd.DataFrame: bs = len(batch) # Lists of ndarrays are automatically cast to TensorArray. @@ -72,36 +38,31 @@ def multi_col_udf(batch: pd.DataFrame) -> pd.DataFrame: # embed = TensorArray(np.zeros((bs, 256,), dtype=np.uint8)) # return pd.DataFrame({"image": image, "embed": embed}) - -ds.map_batches(multi_col_udf, batch_format="pandas") +ds.map_batches(gen_image_and_embed, batch_format="pandas") ds.materialize() # -> Datastream(num_blocks=17, num_rows=1000, -# schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=int64), -# embed: numpy.ndarray(shape=(256,), dtype=uint8)}) +# schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=int64), +# embed: numpy.ndarray(shape=(256,), dtype=uint8)}) # __create_pandas_2_end__ # __create_numpy_begin__ -import ray - # From in-memory numpy data. ray.data.from_numpy(np.zeros((1000, 128, 128, 3), dtype=np.int64)) # -> Datastream(num_blocks=1, num_rows=1000, -# schema={data: numpy.ndarray(shape=(128, 128, 3), dtype=int64)}) +# schema={data: numpy.ndarray(shape=(128, 128, 3), dtype=int64)}) # From saved numpy files. ray.data.read_numpy("example://mnist_subset.npy") # -> Datastream(num_blocks=1, num_rows=3, -# schema={data: numpy.ndarray(shape=(28, 28), dtype=uint8)}) +# schema={data: numpy.ndarray(shape=(28, 28), dtype=uint8)}) # __create_numpy_end__ # __create_parquet_1_begin__ -import ray - # Reading previously saved Tensor data works out of the box. ds = ray.data.read_parquet("example://parquet_images_mini") # -> Datastream(num_blocks=3, num_rows=3, -# schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), -# label: string}) +# schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), +# label: string}) ds.take(1) # -> [{'image': @@ -126,7 +87,6 @@ def multi_col_udf(batch: pd.DataFrame) -> pd.DataFrame: shutil.rmtree("/tmp/some_path", ignore_errors=True) # __create_parquet_2_begin__ -import ray import numpy as np import pandas as pd @@ -198,7 +158,7 @@ def cast_udf(block: pa.Table) -> pa.Table: # __create_images_begin__ ds = ray.data.read_images("example://image-datasets/simple") # -> Datastream(num_blocks=3, num_rows=3, -# schema={data: numpy.ndarray(shape=(32, 32, 3), dtype=uint8)}) +# schema={data: numpy.ndarray(shape=(32, 32, 3), dtype=uint8)}) ds.take(1) # -> [array([[[ 88, 70, 68], @@ -210,95 +170,11 @@ def cast_udf(block: pa.Table) -> pa.Table: # [166, 148, 82]]], dtype=uint8)] # __create_images_end__ - -# __consume_native_begin__ -import ray -from typing import Dict - -# Read a single-column example datastream. -ds = ray.data.read_numpy("example://mnist_subset.npy") -# -> Datastream(num_blocks=1, num_rows=3, -# schema={data: numpy.ndarray(shape=(28, 28), dtype=uint8)}) - -def add_one(batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: - batch["data"] += 1 - return batch - -# This processes batches in numpy.ndarray format. -ds = ds.map_batches(add_one) - -# This returns batches in numpy.ndarray format. -next(ds.iter_batches()) -# -> array([[[1, 1, 1, ..., 1, 1, 1], -# [1, 1, 1, ..., 1, 1, 1], -# ..., -# [1, 1, 1, ..., 1, 1, 1], -# [1, 1, 1, ..., 1, 1, 1]], -# -# ..., -# -# [[1, 1, 1, ..., 1, 1, 1], -# [1, 1, 1, ..., 1, 1, 1], -# ..., -# [1, 1, 1, ..., 1, 1, 1], -# [1, 1, 1, ..., 1, 1, 1]]], dtype=uint8) -# __consume_native_end__ - -# __consume_native_2_begin__ -import ray - -# Read a multi-column example datastream. -ds = ray.data.read_parquet("example://parquet_images_mini") -# -> Datastream(num_blocks=3, num_rows=3, -# schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), -# label: string}) - -def add_one(batch: pd.DataFrame) -> pd.DataFrame: - batch["image"] += 1 - return batch - -# This processes batches in pd.DataFrame format. -ds = ds.map_batches(add_one, batch_format="pandas") - -# This returns pandas batches with List[np.ndarray] columns. -next(ds.iter_batches()) -# -> image label -# 0 [[[ 96, 76, 61], [ 92, 72, 57], [ 92, 72,... cat -# 1 [[[ 38, 38, 39], [ 39, 39, 40], [ 39, 39,... cat -# 2 [[[ 47, 39, 33], [ 43, 36, 29], [ 43, 36,... dog -# __consume_native_2_end__ - -# __consume_pandas_begin__ -import ray - -# Read a single-column example datastream. -ds = ray.data.read_numpy("example://mnist_subset.npy") -# -> Datastream(num_blocks=1, num_rows=3, -# schema={data: numpy.ndarray(shape=(28, 28), dtype=uint8)}) - -def add_one(batch: pd.DataFrame) -> pd.DataFrame: - batch["data"] += 1 - return batch - -# This processes batches in pd.DataFrame format. -ds = ds.map_batches(add_one, batch_format="pandas") - -# This returns pandas batches with List[np.ndarray] columns. -next(ds.iter_batches(batch_format="pandas")) -# -> data -# 0 [[ 1, 1, 1, 1, 1, 1, 1, 1, 1,... -# 1 [[ 1, 1, 1, 1, 1, 1, 1, 1, 1,... -# 2 [[ 1, 1, 1, 1, 1, 1, 1, 1, 1,... -# __consume_pandas_end__ - # __consume_pandas_2_begin__ -import ray - -# Read a multi-column example datastream. ds = ray.data.read_parquet("example://parquet_images_mini") # -> Datastream(num_blocks=3, num_rows=3, -# schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), -# label: string}) +# schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), +# label: string}) def add_one(batch: pd.DataFrame) -> pd.DataFrame: batch["image"] += 1 @@ -315,55 +191,15 @@ def add_one(batch: pd.DataFrame) -> pd.DataFrame: # 2 [[[ 47, 39, 33], [ 43, 36, 29], [ 43, 36,... dog # __consume_pandas_2_end__ -# __consume_pyarrow_begin__ -import ray +# __consume_pyarrow_2_begin__ from ray.data.extensions.tensor_extension import ArrowTensorArray -import pyarrow - -# Read a single-column example datastream. -ds = ray.data.read_numpy("example://mnist_subset.npy") -# -> Datastream(num_blocks=1, num_rows=3, -# schema={data: numpy.ndarray(shape=(28, 28), dtype=uint8)}) - -def add_one(batch: pyarrow.Table) -> pyarrow.Table: - - def to_numpy(buf): - if not isinstance(buf, np.ndarray): - buf = buf.as_py() - return buf - - np_col = np.array( - [ - to_numpy(buf) for buf in batch.column("data") - ] - ) - np_col += 1 - - return batch.set_column( - batch._ensure_integer_index("data"), - "data", - ArrowTensorArray.from_numpy(np_col), - ) - -# This processes batches in pyarrow.Table format. -ds = ds.map_batches(add_one, batch_format="pyarrow") - -# This returns batches in pyarrow.Table format. -next(ds.iter_batches(batch_format="pyarrow")) -# pyarrow.Table -# data: extension> -# ---- -# data: [[[1,1,1,1,1,1,1,1,1,1,...],...,[1,1,1,1,1,1,1,1,1,1,...]]] -# __consume_pyarrow_end__ - -# __consume_pyarrow_2_begin__ -# Read a multi-column example datastream. ds = ray.data.read_parquet("example://parquet_images_mini") # -> Datastream(num_blocks=3, num_rows=3, -# schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), label: object}) +# schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), +# label: object}) -def add_one(batch: pyarrow.Table) -> pyarrow.Table: +def add_one(batch: pa.Table) -> pa.Table: def to_numpy(buf): if not isinstance(buf, np.ndarray): @@ -396,43 +232,11 @@ def to_numpy(buf): # label: [["cat"]] # __consume_pyarrow_2_end__ -# __consume_numpy_begin__ -import ray - -# Read a single-column example datastream. -ds = ray.data.read_numpy("example://mnist_subset.npy") -# -> Datastream(num_blocks=1, num_rows=3, -# schema={data: numpy.ndarray(shape=(28, 28), dtype=uint8)}) - -def add_one(batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: - batch["data"] += 1 - return batch - -# This processes batches in np.ndarray format. -ds = ds.map_batches(add_one, batch_format="numpy") - -# This returns batches in np.ndarray format. -next(ds.iter_batches(batch_format="numpy")) -# -> array([[[1, 1, 1, ..., 1, 1, 1], -# [1, 1, 1, ..., 1, 1, 1], -# ..., -# [1, 1, 1, ..., 1, 1, 1], -# [1, 1, 1, ..., 1, 1, 1]], -# -# ..., -# -# [[1, 1, 1, ..., 1, 1, 1], -# [1, 1, 1, ..., 1, 1, 1], -# ..., -# [1, 1, 1, ..., 1, 1, 1], -# [1, 1, 1, ..., 1, 1, 1]]], dtype=uint8) -# __consume_numpy_end__ - # __consume_numpy_2_begin__ -# Read a multi-column example datastream. ds = ray.data.read_parquet("example://parquet_images_mini") # -> Datastream(num_blocks=3, num_rows=3, -# schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), label: object}) +# schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), +# label: object}) def add_one(batch: Dict[str, Any]) -> Dict[str, Any]: assert isinstance(batch, dict) @@ -468,7 +272,8 @@ def add_one(batch: Dict[str, Any]) -> Dict[str, Any]: # Read a multi-column example datastream. ds = ray.data.read_parquet("example://parquet_images_mini") # -> Datastream(num_blocks=3, num_rows=3, -# schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), label: object}) +# schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), +# label: object}) # You can write the datastream to Parquet. ds.write_parquet("/tmp/some_path") @@ -487,7 +292,7 @@ def add_one(batch: Dict[str, Any]) -> Dict[str, Any]: # Read a single-column example datastream. ds = ray.data.read_numpy("example://mnist_subset.npy") # -> Datastream(num_blocks=1, num_rows=3, -# schema={data: numpy.ndarray(shape=(28, 28), dtype=uint8)}) +# schema={data: numpy.ndarray(shape=(28, 28), dtype=uint8)}) # You can write the datastream to Parquet. ds.write_numpy("/tmp/some_path", column="data") @@ -505,8 +310,8 @@ def add_one(batch: Dict[str, Any]) -> Dict[str, Any]: df = pd.DataFrame({"feature": ragged_array, "label": [1, 1]}) ds = ray.data.from_pandas([df, df]) # -> Datastream(num_blocks=2, num_rows=4, -# schema={feature: numpy.ndarray(shape=(None, None), dtype=float64), -# label: int64}) +# schema={feature: numpy.ndarray(shape=(None, None), dtype=float64), +# label: int64}) ds.take(2) # -> [{'feature': array([[1., 1.], diff --git a/doc/source/data/doc_code/tf_quick_start.py b/doc/source/data/doc_code/tf_quick_start.py index d923034d72c0..be9bb41e14cd 100644 --- a/doc/source/data/doc_code/tf_quick_start.py +++ b/doc/source/data/doc_code/tf_quick_start.py @@ -5,6 +5,7 @@ # __tf_quickstart_load_start__ import ray import numpy as np +from typing import Dict ds = ray.data.from_numpy(np.ones((1, 100))) @@ -20,8 +21,8 @@ def __init__(self): # <1> output_layer = keras.layers.Dense(1, activation="sigmoid") self.model = keras.Sequential([input_layer, output_layer]) - def __call__(self, batch: np.ndarray): # <2> - return self.model(batch).numpy() + def __call__(self, batch: Dict[str, np.ndarray]) -> Dict: # <2> + return {"output": self.model(batch["data"]).numpy()} # __tf_quickstart_model_end__ @@ -34,6 +35,6 @@ def __call__(self, batch: np.ndarray): # <2> predicted_probabilities = ds.map_batches(TFPredictor, compute=scale) predicted_probabilities.show(limit=1) -# [0.45119727] +# {'output': array([0.45119727])} # __tf_quickstart_prediction_end__ # fmt: on diff --git a/doc/source/data/doc_code/torch_image_batch_trained.py b/doc/source/data/doc_code/torch_image_batch_trained.py index feb99e0f5d5a..00f06a72c509 100644 --- a/doc/source/data/doc_code/torch_image_batch_trained.py +++ b/doc/source/data/doc_code/torch_image_batch_trained.py @@ -2,11 +2,13 @@ # isort: skip_file # fmt: off +ray.init(num_gpus=4) + # __pt_load_start__ import ray data_url = "s3://anonymous@air-example-data-2/1G-image-data-synthetic-raw" # <1> -dataset = ray.data.read_images(data_url).limit(1000) # <2> +ds = ray.data.read_images(data_url).limit(1000) # <2> # __pt_load_end__ # __pt_preprocess_start__ @@ -18,11 +20,11 @@ resnet_transforms = ResNet18_Weights.DEFAULT.transforms transform = transforms.Compose([transforms.ToTensor(), resnet_transforms()]) # <1> -def preprocess_images(batch: Dict[str, np.ndarray]): # <2> +def preprocess_images(batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: # <2> transformed_images = [transform(image) for image in batch["image"]] - return transformed_images + return {"preprocessed": transformed_images} -dataset = dataset.map_batches(preprocess_images) # <3> +ds = ds.map_batches(preprocess_images) # <3> # __pt_preprocess_end__ @@ -37,8 +39,8 @@ def __init__(self): # <1> self.model = resnet18(pretrained=True).cuda() self.model.eval() - def __call__(self, batch: List[torch.Tensor]): # <2> - torch_batch = torch.stack(batch).cuda() # <3> + def __call__(self, batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: # <2> + torch_batch = torch.stack(batch["preprocessed"]).cuda() # <3> with torch.inference_mode(): prediction = self.model(torch_batch) return {"class": prediction.argmax(dim=1).detach().cpu().numpy()} # <4> @@ -46,10 +48,11 @@ def __call__(self, batch: List[torch.Tensor]): # <2> # __pt_prediction_start__ -predictions = dataset.map_batches( +predictions = ds.map_batches( TorchPredictor, - compute=ray.data.ActorPoolStrategy(4), # <1> + compute=ray.data.ActorPoolStrategy(size=4), # <1> num_gpus=1, # <2> + batch_size=8, ) predictions.show(limit=1) diff --git a/doc/source/data/doc_code/transforming_data.py b/doc/source/data/doc_code/transforming_data.py new file mode 100644 index 000000000000..6a348ade11f3 --- /dev/null +++ b/doc/source/data/doc_code/transforming_data.py @@ -0,0 +1,201 @@ +# flake8: noqa +# fmt: off + +# __map_batches_begin__ +import ray +import numpy as np +from typing import Dict + +# Load data. +ds = ray.data.from_items(["Test", "String", "Test String"]) +# -> Datastream(num_blocks=1, num_rows=3, schema={item: string}) + +# Define the transform function. +def to_lowercase(batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + lowercase_batch = [b.lower() for b in batch["item"]] + return {"text": lowercase_batch} + +ds.map_batches(to_lowercase).show() +# -> {'text': 'test'} +# -> {'text': 'string'} +# -> {'text': 'test string'} +# __map_batches_end__ + + +# __map_begin__ +import ray +from typing import Dict, Any + +# Load data. +ds = ray.data.from_items(["Test", "String", "Test String"]) +# -> Datastream(num_blocks=1, num_rows=3, schema={item: string}) + +# Define the transform function. +def to_lowercase(row: Dict[str, Any]) -> Dict[str, Any]: + lowercase = row["item"].lower() + return {"text": lowercase} + +ds.map(to_lowercase).show() +# -> {'text': 'test'} +# -> {'text': 'string'} +# -> {'text': 'test string'} +# __map_end__ + +# __writing_numpy_udfs_begin__ +import ray +import numpy as np +from typing import Dict + +ds = ray.data.read_csv("example://iris.csv") + +def numpy_transform(arr: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + new_col = arr["sepal.length"] / np.max(arr["sepal.length"]) + arr["normalized.sepal.length"] = new_col + del arr["sepal.length"] + return arr + +ds.map_batches(numpy_transform, batch_format="numpy").show(2) +# -> {'sepal.width': 3.2, 'petal.length': 4.7, 'petal.width': 1.4, +# 'variety': 'Versicolor', 'normalized.sepal.length': 1.0} +# -> {'sepal.width': 3.2, 'petal.length': 4.5, 'petal.width': 1.5, +# 'variety': 'Versicolor', 'normalized.sepal.length': 0.9142857142857144} +# __writing_numpy_udfs_end__ + +# __writing_pandas_udfs_begin__ +import ray +import pandas as pd + +ds = ray.data.read_csv("example://iris.csv") + +def pandas_transform(df: pd.DataFrame) -> pd.DataFrame: + df.loc[:, "normalized.sepal.length"] = df["sepal.length"] / df["sepal.length"].max() + df = df.drop(columns=["sepal.length"]) + return df + +ds.map_batches(pandas_transform, batch_format="pandas").show(2) +# -> {'sepal.width': 3.2, 'petal.length': 4.7, 'petal.width': 1.4, +# 'variety': 'Versicolor', 'normalized.sepal.length': 1.0} +# -> {'sepal.width': 3.2, 'petal.length': 4.5, 'petal.width': 1.5, +# 'variety': 'Versicolor', 'normalized.sepal.length': 0.9142857142857144} +# __writing_pandas_udfs_end__ + +# __writing_arrow_udfs_begin__ +import ray +import pyarrow as pa +import pyarrow.compute as pac + +ds = ray.data.read_csv("example://iris.csv") + +def pyarrow_transform(batch: pa.Table) -> pa.Table: + batch = batch.append_column( + "normalized.sepal.length", + pac.divide(batch["sepal.length"], pac.max(batch["sepal.length"])), + ) + return batch.drop(["sepal.length"]) + +ds.map_batches(pyarrow_transform, batch_format="pyarrow").show(2) +# -> {'sepal.width': 3.2, 'petal.length': 4.7, 'petal.width': 1.4, +# 'variety': 'Versicolor', 'normalized.sepal.length': 1.0} +# -> {'sepal.width': 3.2, 'petal.length': 4.5, 'petal.width': 1.5, +# 'variety': 'Versicolor', 'normalized.sepal.length': 0.9142857142857144} +# __writing_arrow_udfs_end__ + +# __datastream_compute_strategy_begin__ +import ray +import pandas +import numpy +from ray.data import ActorPoolStrategy + +# Dummy model to predict Iris variety. +def predict_iris(df: pandas.DataFrame) -> pandas.DataFrame: + conditions = [ + (df["sepal.length"] < 5.0), + (df["sepal.length"] >= 5.0) & (df["sepal.length"] < 6.0), + (df["sepal.length"] >= 6.0) + ] + values = ["Setosa", "Versicolor", "Virginica"] + return pandas.DataFrame({"predicted_variety": numpy.select(conditions, values)}) + +class IrisInferModel: + # Do any expensive model setup in the __init__ function. + def __init__(self): + self._model = predict_iris + + # This method is called repeatedly by Ray Data to process batches. + def __call__(self, batch: pandas.DataFrame) -> pandas.DataFrame: + return self._model(batch) + +ds = ray.data.read_csv("example://iris.csv").repartition(10) + +# Batch inference processing with Ray tasks (the default compute strategy). +predicted = ds.map_batches(predict_iris, batch_format="pandas") + +# Batch inference processing with Ray actors (pool of size 5). +predicted = ds.map_batches( + IrisInferModel, compute=ActorPoolStrategy(size=5), batch_size=10) +# __datastream_compute_strategy_end__ + +# __writing_generator_udfs_begin__ +import ray +from typing import Iterator + +# Load iris data. +ds = ray.data.read_csv("example://iris.csv") + +# UDF to repeat the dataframe 100 times, in chunks of 20. +def repeat_dataframe(df: pd.DataFrame) -> Iterator[pd.DataFrame]: + for _ in range(5): + yield pd.concat([df]*20) + +ds.map_batches(repeat_dataframe, batch_format="pandas").show(2) +# -> {'sepal.length': 5.1, 'sepal.width': 3.5, 'petal.length': 1.4, 'petal.width': 0.2, 'variety': 'Setosa'} +# -> {'sepal.length': 4.9, 'sepal.width': 3.0, 'petal.length': 1.4, 'petal.width': 0.2, 'variety': 'Setosa'} +# __writing_generator_udfs_end__ + +# __shuffle_begin__ +import ray + +# The datastream starts off with 1000 blocks. +ds = ray.data.range(10000, parallelism=1000) +# -> Datastream(num_blocks=1000, num_rows=10000, schema={id: int64}) + +# Repartition the data into 100 blocks. Since shuffle=False, Ray Data will minimize +# data movement during this operation by merging adjacent blocks. +ds = ds.repartition(100, shuffle=False).materialize() +# -> MaterializedDatastream(num_blocks=100, num_rows=10000, schema={id: int64}) + +# Repartition the data into 200 blocks, and force a full data shuffle. +# This operation will be more expensive +ds = ds.repartition(200, shuffle=True) +# -> MaterializedDatastream(num_blocks=50, num_rows=10000, schema={id: int64}) +# __shuffle_end__ + +# __map_groups_begin__ +import ray +import numpy as np +from typing import Dict + +# Load iris data. +ds = ray.data.read_csv("example://iris.csv") + +# The user function signature for `map_groups` is the same as that of `map_batches`. +# It takes in a batch representing the grouped data, and must return a batch of +# zero or more records as the result. +def process_group(group: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + # Since we are grouping by variety, all elements in this batch are equal. + variety = group["variety"][0] + count = len(group["variety"]) + # Here we return a batch of a single record for the group (array of len 1). + return { + "variety": np.array([variety]), + "count": np.array([count]), + } + +ds = ds.groupby("variety").map_groups(process_group) +ds.show() +# -> {'variety': 'Setosa', 'count': 50} +# {'variety': 'Versicolor', 'count': 50} +# {'variety': 'Virginica', 'count': 50} +# __map_groups_end__ + +# fmt: on diff --git a/doc/source/data/doc_code/transforming_datastreams.py b/doc/source/data/doc_code/transforming_datastreams.py deleted file mode 100644 index 45cb1e7a8a79..000000000000 --- a/doc/source/data/doc_code/transforming_datastreams.py +++ /dev/null @@ -1,679 +0,0 @@ -# flake8: noqa - -# fmt: off -# __datastream_transformation_begin__ -import ray -import pandas - -# Create a datastream from file with Iris data. -# Tip: "example://" is a convenient protocol to access the -# python/ray/data/examples/data directory. -ds = ray.data.read_csv("example://iris.csv") -# Datastream(num_blocks=1, num_rows=150, -# schema={sepal.length: float64, sepal.width: float64, -# petal.length: float64, petal.width: float64, variety: object}) -ds.show(3) -# -> {'sepal.length': 5.1, 'sepal.width': 3.5, -# 'petal.length': 1.4, 'petal.width': 0.2, 'variety': 'Setosa'} -# -> {'sepal.length': 4.9, 'sepal.width': 3.0, -# 'petal.length': 1.4, 'petal.width': 0.2, 'variety': 'Setosa'} -# -> {'sepal.length': 4.7, 'sepal.width': 3.2, -# 'petal.length': 1.3, 'petal.width': 0.2, 'variety': 'Setosa'} - -# Repartition the datastream to 5 blocks. -ds = ds.repartition(5) -# -> Repartition -# +- Datastream(num_blocks=1, num_rows=150, -# schema={sepal.length: float64, sepal.width: float64, -# petal.length: float64, petal.width: float64, variety: object}) - -# Find rows with sepal.length < 5.5 and petal.length > 3.5. -def transform_batch(df: pandas.DataFrame) -> pandas.DataFrame: - return df[(df["sepal.length"] < 5.5) & (df["petal.length"] > 3.5)] - -# Map processing the datastream. -ds.map_batches(transform_batch, batch_format="pandas").show() -# -> {'sepal.length': 5.2, 'sepal.width': 2.7, -# 'petal.length': 3.9, 'petal.width': 1.4, 'variety': 'Versicolor'} -# -> {'sepal.length': 5.4, 'sepal.width': 3.0, -# 'petal.length': 4.5, 'petal.width': 1.5, 'variety': 'Versicolor'} -# -> {'sepal.length': 4.9, 'sepal.width': 2.5, -# 'petal.length': 4.5, 'petal.width': 1.7, 'variety': 'Virginica'} - -# Split the datastream into 2 disjoint iterators. -ds.streaming_split(2) -# -> [, -# ] - -# Sort the datastream by sepal.length. -ds = ds.sort("sepal.length") -ds.show(3) -# -> {'sepal.length': 4.3, 'sepal.width': 3.0, -# 'petal.length': 1.1, 'petal.width': 0.1, 'variety': 'Setosa'} -# -> {'sepal.length': 4.4, 'sepal.width': 2.9, -# 'petal.length': 1.4, 'petal.width': 0.2, 'variety': 'Setosa'} -# -> {'sepal.length': 4.4, 'sepal.width': 3.0, -# 'petal.length': 1.3, 'petal.width': 0.2, 'variety': 'Setosa'} - -# Shuffle the datastream. -ds = ds.random_shuffle() -ds.show(3) -# -> {'sepal.length': 6.7, 'sepal.width': 3.1, -# 'petal.length': 4.4, 'petal.width': 1.4, 'variety': 'Versicolor'} -# -> {'sepal.length': 6.7, 'sepal.width': 3.3, -# 'petal.length': 5.7, 'petal.width': 2.1, 'variety': 'Virginica'} -# -> {'sepal.length': 4.5, 'sepal.width': 2.3, -# 'petal.length': 1.3, 'petal.width': 0.3, 'variety': 'Setosa'} - -# Group by the variety. -ds.groupby("variety").count().show() -# -> {'variety': 'Setosa', 'count()': 50} -# -> {'variety': 'Versicolor', 'count()': 50} -# -> {'variety': 'Virginica', 'count()': 50} -# __datastream_transformation_end__ -# fmt: on - -# fmt: off -# __writing_default_udfs_tabular_begin__ -import ray -import pandas as pd - -# Load datastream. -ds = ray.data.read_csv("example://iris.csv") - -# UDF as a function on Pandas DataFrame batches. -def pandas_transform(df_batch: pd.DataFrame) -> pd.DataFrame: - # Filter rows. - df_batch = df_batch[df_batch["variety"] == "Versicolor"] - # Add derived column. - # Notice here that `df["sepal.length"].max()` is only the max value of the column - # within a given batch (instead of globally)!! - df_batch.loc[:, "normalized.sepal.length"] = df_batch["sepal.length"] / df_batch["sepal.length"].max() - # Drop column. - df_batch = df_batch.drop(columns=["sepal.length"]) - return df_batch - -ds.map_batches(pandas_transform, batch_format="pandas").show(2) -# -> {'sepal.width': 3.2, 'petal.length': 4.7, 'petal.width': 1.4, -# 'variety': 'Versicolor', 'normalized.sepal.length': 1.0} -# -> {'sepal.width': 3.2, 'petal.length': 4.5, 'petal.width': 1.5, -# 'variety': 'Versicolor', 'normalized.sepal.length': 0.9142857142857144} -# __writing_default_udfs_tabular_end__ -# fmt: on - -# fmt: off -# __writing_default_udfs_tensor_begin__ -import ray -import numpy as np -from typing import Dict - -# Load datastream. -ds = ray.data.range_tensor(1000, shape=(2, 2)) - -# UDF as a function on NumPy ndarray batches. -def tensor_transform(arr: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: - # Notice here that the ndarray is of shape (batch_size, 2, 2) - # Multiply each element in the ndarray by a factor of 2 - arr["data"] *= 2 - return arr - -ds.map_batches(tensor_transform, batch_format="numpy").show(2) -# [array([[0, 0], -# [0, 0]]), -# array([[2, 2], -# [2, 2]])] - -# __writing_default_udfs_tensor_end__ -# fmt: on - -# fmt: off -# __writing_default_udfs_list_begin__ -import ray -from typing import Any - -# Load datastream. -ds = ray.data.range(1000) - -def list_transform(batch: Dict[str, Any]) -> Dict[str, Any]: - # Notice here that the list is of length batch_size - # Multiply each element in the list by a factor of 2 - return {"id": [x * 2 for x in batch["id"]]} - -ds.map_batches(list_transform).show(2) -# {"id": 0} -# {"id": 2} - -# __writing_default_udfs_list_end__ -# fmt: on - -# fmt: off -# __writing_pandas_udfs_begin__ -import ray -import pandas as pd - -# Load datastream. -ds = ray.data.read_csv("example://iris.csv") - -# UDF as a function on Pandas DataFrame batches. -def pandas_transform(df: pd.DataFrame) -> pd.DataFrame: - # Filter rows. - df = df[df["variety"] == "Versicolor"] - # Add derived column. - df.loc[:, "normalized.sepal.length"] = df["sepal.length"] / df["sepal.length"].max() - # Drop column. - df = df.drop(columns=["sepal.length"]) - return df - -ds.map_batches(pandas_transform, batch_format="pandas").show(2) -# -> {'sepal.width': 3.2, 'petal.length': 4.7, 'petal.width': 1.4, -# 'variety': 'Versicolor', 'normalized.sepal.length': 1.0} -# -> {'sepal.width': 3.2, 'petal.length': 4.5, 'petal.width': 1.5, -# 'variety': 'Versicolor', 'normalized.sepal.length': 0.9142857142857144} -# __writing_pandas_udfs_end__ -# fmt: on - -# fmt: off -# __writing_arrow_udfs_begin__ -import ray -import pyarrow as pa -import pyarrow.compute as pac - -# Load datastream. -ds = ray.data.read_csv("example://iris.csv") - -# UDF as a function on Arrow Table batches. -def pyarrow_transform(batch: pa.Table) -> pa.Table: - batch = batch.filter(pac.equal(batch["variety"], "Versicolor")) - batch = batch.append_column( - "normalized.sepal.length", - pac.divide(batch["sepal.length"], pac.max(batch["sepal.length"])), - ) - return batch.drop(["sepal.length"]) - -ds.map_batches(pyarrow_transform, batch_format="pyarrow").show(2) -# -> {'sepal.width': 3.2, 'petal.length': 4.7, 'petal.width': 1.4, -# 'variety': 'Versicolor', 'normalized.sepal.length': 1.0} -# -> {'sepal.width': 3.2, 'petal.length': 4.5, 'petal.width': 1.5, -# 'variety': 'Versicolor', 'normalized.sepal.length': 0.9142857142857144} -# __writing_arrow_udfs_end__ -# fmt: on - -# fmt: off -# __writing_numpy_udfs_begin__ -import ray -import numpy as np - -# Load datastream. -ds = ray.data.read_numpy("example://mnist_subset.npy") - -# UDF as a function on NumPy ndarray batches. -def normalize(arr: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: - arr = arr["data"] - # Normalizes each image to [0, 1] range. - mins = arr.min((1, 2))[:, np.newaxis, np.newaxis] - maxes = arr.max((1, 2))[:, np.newaxis, np.newaxis] - range_ = maxes - mins - idx = np.where(range_ == 0) - mins[idx] = 0 - range_[idx] = 1 - return {"data": (arr - mins) / range_} - -ds = ds.map_batches(normalize, batch_format="numpy") -# -> MapBatches(normalize) -# +- Datastream(num_blocks=1, -# num_rows=3, -# schema={__value__: numpy.ndarray(shape=(28, 28), dtype=uint8)} -# ) -# __writing_numpy_udfs_end__ -# fmt: on - -# fmt: off -# __writing_callable_classes_udfs_begin__ -import ray - -# Load datastream. -ds = ray.data.read_csv("example://iris.csv") - -# UDF as a function on Pandas DataFrame batches. -class ModelUDF: - def __init__(self): - self.model = lambda df: df["sepal.length"] > 0.65 - - def __call__(self, df: pd.DataFrame) -> pd.DataFrame: - # Filter rows. - df = df[df["variety"] == "Versicolor"] - # Apply model. - df["output"] = self.model(df) - return df - -ds.map_batches(ModelUDF, batch_format="pandas", compute=ray.data.ActorPoolStrategy(size=2)).show(2) -# -> {'sepal.length': 7.0, 'sepal.width': 3.2, 'petal.length': 4.7, 'petal.width': 1.4, -# 'variety': 'Versicolor', 'output': True} -# -> {'sepal.length': 6.4, 'sepal.width': 3.2, 'petal.length': 4.5, 'petal.width': 1.5, -# 'variety': 'Versicolor', 'output': False}` -# __writing_callable_classes_udfs_end__ -# fmt: on - -# fmt: off -# __writing_generator_udfs_begin__ -import ray -from typing import Iterator - -# Load datastream. -ds = ray.data.read_csv("example://iris.csv") - -# UDF to repeat the dataframe 100 times, in chunks of 20. -def repeat_dataframe(df: pd.DataFrame) -> Iterator[pd.DataFrame]: - for _ in range(5): - yield pd.concat([df]*20) - -ds.map_batches(repeat_dataframe, batch_format="pandas", ).show(2) -# -> {'sepal.length': 5.1, 'sepal.width': 3.5, 'petal.length': 1.4, 'petal.width': 0.2, 'variety': 'Setosa'} -# -> {'sepal.length': 4.9, 'sepal.width': 3.0, 'petal.length': 1.4, 'petal.width': 0.2, 'variety': 'Setosa'} -# __writing_generator_udfs_end__ -# fmt: on - -# fmt: off -# __writing_pandas_out_udfs_begin__ -import ray -import pandas as pd -from typing import List - -# Load datastream. -ds = ray.data.from_items(["test", "string", "teststring"]) -# -> Datastream(num_blocks=1, num_rows=3, schema={item: string}) - -# Convert column name. -def convert_pandas(batch: pd.DataFrame) -> pd.DataFrame: - return pd.DataFrame({"text": batch["item"]}, dtype="string") - -ds = ds.map_batches(convert_pandas, batch_format="pandas") -# -> MapBatches(convert_pandas) -# +- Datastream(num_blocks=3, num_rows=3, schema={item: tsring}) - -ds.show(2) -# -> {'text': 'test'} -# -> {'text': 'string'} - -print(ds) -# -> Datastream(num_blocks=3, num_rows=3, schema={text: string}) -# __writing_pandas_out_udfs_end__ -# fmt: on - -# fmt: off -# __writing_arrow_out_udfs_begin__ -import ray -import pyarrow as pa -from typing import List - -# Load datastream. -ds = ray.data.from_items(["test", "string", "teststring"]) -# -> Datastream(num_blocks=1, num_rows=3, schema={item: string}) - -# Convert to Arrow. -def convert_to_arrow(batch: Dict[str, np.ndarray]) -> pa.Table: - return pa.table({"text": batch["item"]}) - -ds = ds.map_batches(convert_to_arrow) -# -> MapBatches(convert_to_arrow) -# +- Datastream(num_blocks=1, num_rows=3, schema={text: string}) - -ds.show(2) -# -> {'text': 'test'} -# -> {'text': 'string'} - -print(ds) -# -> Datastream(num_blocks=3, num_rows=3, schema={text: string}) -# __writing_arrow_out_udfs_end__ -# fmt: on - -# fmt: off -# __writing_numpy_out_udfs_begin__ -import ray -import pandas as pd -import numpy as np -from typing import Dict - -# Load datastream. -ds = ray.data.read_csv("example://iris.csv") -# -> Datastream( -# num_blocks=1, -# num_rows=150, -# schema={ -# sepal.length: double, -# sepal.width: double, -# petal.length: double, -# petal.width: double, -# variety: string, -# }, -# ) - -# Convert to NumPy. -def convert_to_numpy(df: pd.DataFrame) -> Dict[str, np.ndarray]: - return {"data": df[["sepal.length", "sepal.width"]].to_numpy()} - -ds = ds.map_batches(convert_to_numpy, batch_format="pandas") -# -> MapBatches(convert_to_numpy) -# +- Datastream( -# num_blocks=1, -# num_rows=150, -# schema={ -# sepal.length: double, -# sepal.width: double, -# petal.length: double, -# petal.width: double, -# variety: string, -# }, -# ) - -ds.show(2) -# -> {'data': [5.1 3.5]} -# {'data': [4.9 3. ]} -# __writing_numpy_out_udfs_end__ -# fmt: on - -# fmt: off -# __writing_numpy_dict_out_udfs_begin__ -import ray -import pandas as pd -import numpy as np -from typing import Dict - -# Load datastream. -ds = ray.data.read_csv("example://iris.csv") -# -> Datastream( -# num_blocks=1, -# num_rows=150, -# schema={ -# sepal.length: double, -# sepal.width: double, -# petal.length: double, -# petal.width: double, -# variety: string, -# }, -# ) - -# Convert to dict of NumPy ndarrays. -def convert_to_numpy(df: pd.DataFrame) -> Dict[str, np.ndarray]: - return { - "sepal_len_and_width": df[["sepal.length", "sepal.width"]].to_numpy(), - "petal_len": df["petal.length"].to_numpy(), - "petal_width": df["petal.width"].to_numpy(), - } - -ds = ds.map_batches(convert_to_numpy, batch_format="pandas") -# -> MapBatches(convert_to_numpy) -# +- Datastream( -# num_blocks=1, -# num_rows=150, -# schema={ -# sepal.length: double, -# sepal.width: double, -# petal.length: double, -# petal.width: double, -# variety: string, -# }, -# ) - -ds.show(2) -# -> {'sepal_len_and_width': array([5.1, 3.5]), 'petal_len': 1.4, 'petal_width': 0.2} -# -> {'sepal_len_and_width': array([4.9, 3. ]), 'petal_len': 1.4, 'petal_width': 0.2} -# __writing_numpy_dict_out_udfs_end__ -# fmt: on - -# fmt: off -# __writing_simple_out_udfs_begin__ -import ray -import pandas as pd -from typing import List - -# Load datastream. -ds = ray.data.read_csv("example://iris.csv") -# -> Datastream( -# num_blocks=1, -# num_rows=150, -# schema={ -# sepal.length: double, -# sepal.width: double, -# petal.length: double, -# petal.width: double, -# variety: string, -# }, -# ) - -# Convert to list of dicts. -def convert_to_list(df: pd.DataFrame) -> pd.DataFrame: - return df - -ds = ds.map_batches(convert_to_list, batch_format="pandas") -# -> MapBatches(convert_to_list) -# +- Datastream( -# num_blocks=1, -# num_rows=150, -# schema={ -# sepal.length: double, -# sepal.width: double, -# petal.length: double, -# petal.width: double, -# variety: string, -# }, -# ) - -ds.show(2) -# -> {'sepal.length': 5.1, 'sepal.width': 3.5, 'petal.length': 1.4, 'petal.width': 0.2, -# 'variety': 'Setosa'} -# -> {'sepal.length': 4.9, 'sepal.width': 3.0, 'petal.length': 1.4, 'petal.width': 0.2, -# 'variety': 'Setosa'} -# __writing_simple_out_udfs_end__ -# fmt: on - -# fmt: off -# __writing_dict_out_row_udfs_begin__ -import ray -import pandas as pd -from typing import Dict - -# Load datastream. -ds = ray.data.range(10) -# -> Datastream(num_blocks=10, num_rows=10, schema=) - -# Convert row to dict. -def row_to_dict(row: int) -> Dict[str, int]: - return {"foo": row} - -ds = ds.map(row_to_dict) -# -> Map -# +- Datastream(num_blocks=10, num_rows=10, schema=) - -ds.show(2) -# -> {'foo': 0} -# -> {'foo': 1} -# __writing_dict_out_row_udfs_end__ -# fmt: on - -# fmt: off -# __writing_table_row_out_row_udfs_begin__ -import ray -import pandas as pd -from typing import Dict - -# Load datastream. -ds = ray.data.read_csv("example://iris.csv") -# -> Datastream( -# num_blocks=1, -# num_rows=150, -# schema={ -# sepal.length: double, -# sepal.width: double, -# petal.length: double, -# petal.width: double, -# variety: string, -# }, -# ) - -# Treat row as dict. -def map_row(row: Dict[str, Any]) -> Dict[str, Any]: - row["sepal.area"] = row["sepal.length"] * row["sepal.width"] - return row - -ds = ds.map(map_row) -# -> Map -# +- Datastream( -# num_blocks=1, -# num_rows=150, -# schema={ -# sepal.length: double, -# sepal.width: double, -# petal.length: double, -# petal.width: double, -# variety: string, -# }, -# ) - -ds.show(2) -# -> {'sepal.length': 5.1, 'sepal.width': 3.5, 'petal.length': 1.4, 'petal.width': 0.2, -# 'variety': 'Setosa', 'sepal.area': 17.849999999999998} -# -> {'sepal.length': 4.9, 'sepal.width': 3.0, 'petal.length': 1.4, 'petal.width': 0.2, -# 'variety': 'Setosa', 'sepal.area': 14.700000000000001} -# __writing_table_row_out_row_udfs_end__ -# fmt: on - -# fmt: off -# __writing_numpy_out_row_udfs_begin__ -import ray -import numpy as np -from typing import Dict - -# Load datastream. -ds = ray.data.range(10) -# -> Datastream(num_blocks=10, num_rows=10, schema={id: int64}) - -# Convert row to NumPy ndarray. -def row_to_numpy(row: Dict[str, Any]) -> Dict[str, np.ndarray]: - return {"data": np.full(shape=(2, 2), fill_value=row["id"])} - -ds = ds.map(row_to_numpy) -# -> Map -# +- Datastream(num_blocks=10, num_rows=10, schema={data: np.ndarray(shape=(2, 2))}) - -ds.show(2) -# -> {'data': [[0 0], [0 0]]]} -# {'data': [[1 1], [1 1]]]} -# __writing_numpy_out_row_udfs_end__ -# fmt: on - -# fmt: off -# __writing_simple_out_row_udfs_begin__ -import ray -from typing import List - -# Load datastream. -ds = ray.data.read_csv("example://iris.csv") -# -> Datastream( -# num_blocks=1, -# num_rows=150, -# schema={ -# sepal.length: double, -# sepal.width: double, -# petal.length: double, -# petal.width: double, -# variety: string, -# }, -# ) - -# Convert row to simple (opaque) row. -def map_row(row: Dict[str, Any]) -> Dict[str, Any]: - row["petal.random_property"] = random.random() - return row - -ds = ds.map(map_row) -# -> Map -# +- Datastream( -# num_blocks=1, -# num_rows=150, -# schema={ -# sepal.length: double, -# sepal.width: double, -# petal.length: double, -# petal.width: double, -# variety: string, -# }, -# ) -# __writing_simple_out_row_udfs_end__ -# fmt: on - -# fmt: off -# __configuring_batch_size_begin__ -import ray -import pandas as pd - -# Load datastream. -ds = ray.data.read_csv("example://iris.csv") - -# UDF as a function on Pandas DataFrame batches. -def pandas_transform(df: pd.DataFrame) -> pd.DataFrame: - # Filter rows. - df = df[df["variety"] == "Versicolor"] - # Add derived column. - df.loc[:, "normalized.sepal.length"] = df["sepal.length"] / df["sepal.length"].max() - # Drop column. - df = df.drop(columns=["sepal.length"]) - return df - -# Have each batch that pandas_transform receives contain 10 rows. -ds = ds.map_batches(pandas_transform, batch_format="pandas", batch_size=10) -# -> MapBatches(pandas_transform) -# +- Datastream( -# num_blocks=1, -# num_rows=150, -# schema={ -# sepal.length: double, -# sepal.width: double, -# petal.length: double, -# petal.width: double, -# variety: string, -# }, -# ) - -ds.show(2) -# -> {'sepal.width': 3.2, 'petal.length': 4.7, 'petal.width': 1.4, -# 'variety': 'Versicolor', 'normalized.sepal.length': 1.0} -# -> {'sepal.width': 3.2, 'petal.length': 4.5, 'petal.width': 1.5, -# 'variety': 'Versicolor', 'normalized.sepal.length': 0.9142857142857144} -# __configuring_batch_size_end__ -# fmt: on - -# fmt: off -# __datastream_compute_strategy_begin__ -import ray -import pandas -import numpy -from ray.data import ActorPoolStrategy - -# Dummy model to predict Iris variety. -def predict_iris(df: pandas.DataFrame) -> pandas.DataFrame: - conditions = [ - (df["sepal.length"] < 5.0), - (df["sepal.length"] >= 5.0) & (df["sepal.length"] < 6.0), - (df["sepal.length"] >= 6.0) - ] - values = ["Setosa", "Versicolor", "Virginica"] - return pandas.DataFrame({"predicted_variety": numpy.select(conditions, values)}) - -class IrisInferModel: - def __init__(self): - self._model = predict_iris - - def __call__(self, batch: pandas.DataFrame) -> pandas.DataFrame: - return self._model(batch) - -ds = ray.data.read_csv("example://iris.csv").repartition(10) - -# Batch inference processing with Ray tasks (the default compute strategy). -predicted = ds.map_batches(predict_iris) - -# Batch inference processing with Ray actors (pool of size 5). -predicted = ds.map_batches( - IrisInferModel, compute=ActorPoolStrategy(size=5), batch_size=10) -# __datastream_compute_strategy_end__ -# fmt: on diff --git a/doc/source/data/examples/batch_training.ipynb b/doc/source/data/examples/batch_training.ipynb index 2f9742e6c26c..ad56bb3a39d2 100644 --- a/doc/source/data/examples/batch_training.ipynb +++ b/doc/source/data/examples/batch_training.ipynb @@ -458,7 +458,7 @@ "source": [ "### Transforming a Datastream in parallel using custom functions \n", "\n", - "Ray Data allows you to specify custom data transform functions. These [user defined functions (UDFs)](transforming_datastreams) can be called using `Datastream.map_batches(my_function)`. The transformation will be conducted in parallel for each data batch.\n", + "Ray Data allows you to specify custom data transform functions. These [user defined functions (UDFs)](transforming_data) can be called using `Datastream.map_batches(my_function)`. The transformation will be conducted in parallel for each data batch.\n", "\n", "```{tip}\n", "You may need to call `Datastream.repartition(n)` first to split the Datastream into more blocks internally. By default, each block corresponds to one file. The upper bound of parallelism is the number of blocks.\n", diff --git a/doc/source/data/examples/nyc_taxi_basic_processing.ipynb b/doc/source/data/examples/nyc_taxi_basic_processing.ipynb index 874f5fccf17e..797bfae5b7b4 100644 --- a/doc/source/data/examples/nyc_taxi_basic_processing.ipynb +++ b/doc/source/data/examples/nyc_taxi_basic_processing.ipynb @@ -591,7 +591,7 @@ "id": "0ade2a72", "metadata": {}, "source": [ - "See {ref}`Transforming Data ` for more information on how we can process our data with Ray Data." + "See {ref}`Transforming Data ` for more information on how we can process our data with Ray Data." ] }, { diff --git a/doc/source/data/examples/ocr_example.ipynb b/doc/source/data/examples/ocr_example.ipynb index 2c1cb0b988ef..230501b6c399 100644 --- a/doc/source/data/examples/ocr_example.ipynb +++ b/doc/source/data/examples/ocr_example.ipynb @@ -78,7 +78,7 @@ "\n", "### Running the OCR software on the data\n", "\n", - "We can now use the {meth}`ray.data.read_binary_files ` function to read all the images from S3. We set the `include_paths=True` option to create a datastream of the S3 paths and image contents. We then run the {meth}`ds.map ` function on this datastream to execute the actual OCR process on each file and convert the screen shots into text. This will create a tabular datastream with columns `path` and `text`, see also [](transform_datastreams_row_output_types).\n", + "We can now use the {meth}`ray.data.read_binary_files ` function to read all the images from S3. We set the `include_paths=True` option to create a datastream of the S3 paths and image contents. We then run the {meth}`ds.map ` function on this datastream to execute the actual OCR process on each file and convert the screen shots into text. This will create a tabular datastream with columns `path` and `text`, see also [](transforming_data).\n", "\n", "````{note}\n", "If you want to load the data from a private bucket, you have to run\n", diff --git a/doc/source/data/faq.rst b/doc/source/data/faq.rst index 3d33d74817c7..a7e38715e381 100644 --- a/doc/source/data/faq.rst +++ b/doc/source/data/faq.rst @@ -219,8 +219,7 @@ Ray Data supports creating a ``Datastream`` from local and distributed in-memory via integrations with common data libraries, as well as from local and remote storage systems via our support for many common file formats and storage backends. -Check out our :ref:`feature guide for creating datastreams ` for -details. +For more details, read :ref:`Loading Data `. When should I use global per-epoch shuffling? ============================================= diff --git a/doc/source/data/getting-started.rst b/doc/source/data/getting-started.rst index 7c3e06c4d606..8ddc797c0bb2 100644 --- a/doc/source/data/getting-started.rst +++ b/doc/source/data/getting-started.rst @@ -30,16 +30,15 @@ Create a datastream ------------------- Create datastreams from on-disk files, Python objects, and cloud storage services like S3. -Ray reads from any `filesystem supported by Arrow +Ray Data can read from any `filesystem supported by Arrow `__. .. testcode:: import ray - datastream = ray.data.read_csv("s3://anonymous@air-example-data/iris.csv") - - datastream.show(limit=1) + ds = ray.data.read_csv("s3://anonymous@air-example-data/iris.csv") + ds.show(limit=1) .. testoutput:: @@ -47,7 +46,7 @@ Ray reads from any `filesystem supported by Arrow To learn more about creating datastreams, read -:ref:`Loading data `. +:ref:`Loading data `. Transform the datastream ------------------------ @@ -57,39 +56,42 @@ transform datastreams. Ray executes transformations in parallel for performance. .. testcode:: - import pandas as pd + from typing import Dict + import numpy as np - # Find rows with sepal length < 5.5 and petal length > 3.5. - def transform_batch(df: pd.DataFrame) -> pd.DataFrame: - return df[(df["sepal length (cm)"] < 5.5) & (df["petal length (cm)"] > 3.5)] + # Compute a "petal area" attribute. + def transform_batch(batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + vec_a = batch["petal length (cm)"] + vec_b = batch["petal width (cm)"] + batch["petal area (cm^2)"] = vec_a * vec_b + return batch - transformed_ds = datastream.map_batches(transform_batch, batch_format="pandas") - print(transformed_ds) + transformed_ds = ds.map_batches(transform_batch) + print(transformed_ds.materialize()) .. testoutput:: - MapBatches(transform_batch) - +- Datastream( - num_blocks=1, - num_rows=150, - schema={ - sepal length (cm): double, - sepal width (cm): double, - petal length (cm): double, - petal width (cm): double, - target: int64 - } - ) - + MaterializedDatastream( + num_blocks=1, + num_rows=150, + schema={ + sepal length (cm): double, + sepal width (cm): double, + petal length (cm): double, + petal width (cm): double, + target: int64, + petal area (cm^2): double + } + ) To learn more about transforming datastreams, read -:ref:`Transforming data `. +:ref:`Transforming data `. Consume the datastream ---------------------- Pass datastreams to Ray tasks or actors, and access records with methods like -:meth:`~ray.data.Datastream.iter_batches`. +:meth:`~ray.data.Datastream.take_batch` and :meth:`~ray.data.Datastream.iter_batches`. .. tab-set:: @@ -97,17 +99,17 @@ Pass datastreams to Ray tasks or actors, and access records with methods like .. testcode:: - batches = transformed_ds.iter_batches(batch_size=8) - print(next(iter(batches))) + print(transformed_ds.take_batch(batch_size=3)) .. testoutput:: :options: +NORMALIZE_WHITESPACE - {'sepal length (cm)': array([5.2, 5.4, 4.9]), - 'sepal width (cm)': array([2.7, 3. , 2.5]), - 'petal length (cm)': array([3.9, 4.5, 4.5]), - 'petal width (cm)': array([1.4, 1.5, 1.7]), - 'target': array([1, 1, 2])} + {'sepal length (cm)': array([5.1, 4.9, 4.7]), + 'sepal width (cm)': array([3.5, 3. , 3.2]), + 'petal length (cm)': array([1.4, 1.4, 1.3]), + 'petal width (cm)': array([0.2, 0.2, 0.2]), + 'target': array([0, 0, 0]), + 'petal area (cm^2)': array([0.28, 0.28, 0.26])} .. tab-item:: Tasks @@ -139,7 +141,7 @@ Pass datastreams to Ray tasks or actors, and access records with methods like To learn more about consuming datastreams, read -:ref:`Consuming data `. +:ref:`Consuming data `. Save the datastream ------------------- @@ -151,9 +153,9 @@ or remote filesystems. import os - transformed_ds.write_parquet("iris") + transformed_ds.write_parquet("/tmp/iris") - print(os.listdir("iris")) + print(os.listdir("/tmp/iris")) .. testoutput:: :options: +ELLIPSIS @@ -161,7 +163,7 @@ or remote filesystems. ['..._000000.parquet'] -To learn more about saving datastream contents, read :ref:`Saving data `. +To learn more about saving datastream contents, read :ref:`Saving data `. Next Steps ---------- diff --git a/doc/source/data/glossary.rst b/doc/source/data/glossary.rst index 2ef928403554..35c781e8c132 100644 --- a/doc/source/data/glossary.rst +++ b/doc/source/data/glossary.rst @@ -32,7 +32,7 @@ Ray Data Glossary 4 4 To learn more about batch formats, read - :ref:`UDF Input Batch Formats `. + :ref:`Configuring batch formats `. Block A processing unit of data. A :class:`~ray.data.Datastream` consists of a @@ -47,10 +47,9 @@ Ray Data Glossary Block format The way :term:`blocks ` are represented. - Blocks are represented as - `Arrow tables `_, - `pandas DataFrames `_, - and Python lists. + Blocks are internally represented as + `Arrow tables `_ or + `pandas DataFrames `_. Ray Data (library) A library for distributed data processing. @@ -81,73 +80,10 @@ Ray Data Glossary To learn more about Datasources, read :ref:`Creating a Custom Datasource `. Record - A single data item. - - If your datastream is :term:`tabular `, then records are :class:`TableRows `. - If your datastream is :term:`simple `, then records are arbitrary Python objects. - If your datastream is :term:`tensor `, then records are `NumPy ndarrays `_. + A single data item, which is always a ``Dict[str, Any]``. Schema - The data type of a datastream. - - If your datastream is :term:`tabular `, then the schema describes - the column names and data types. If your datastream is :term:`simple `, - then the schema describes the Python object type. If your datastream is - :term:`tensor `, then the schema describes the per-element - tensor shape and data type. + The name and type of the datastream fields. To determine a datastream's schema, call :meth:`Datastream.schema() `. - - Simple Datastream - A Datastream that represents a collection of arbitrary Python objects. - - .. doctest:: - - >>> import ray - >>> ray.data.from_items(["spam", "ham", "eggs"]) - MaterializedDatastream(num_blocks=3, num_rows=3, schema={item: string}) - - Tensor Datastream - A Datastream that represents a collection of ndarrays. - - :term:`Tabular datastreams ` that contain tensor columns aren’t tensor datastreams. - - .. doctest:: - - >>> import numpy as np - >>> import ray - >>> ray.data.from_numpy(np.zeros((100, 32, 32, 3))) - MaterializedDatastream( - num_blocks=1, - num_rows=100, - schema={data: numpy.ndarray(shape=(32, 32, 3), dtype=double)} - ) - - Tabular Datastream - A Datastream that represents columnar data. - - .. doctest:: - - >>> import ray - >>> ray.data.read_csv("s3://anonymous@air-example-data/iris.csv") - Datastream( - num_blocks=1, - num_rows=150, - schema={ - sepal length (cm): double, - sepal width (cm): double, - petal length (cm): double, - petal width (cm): double, - target: int64 - } - ) - - User-defined function (UDF) - A callable that transforms batches or :term:`records ` of data. UDFs let you arbitrarily transform datastreams. - - Call :meth:`Datastream.map_batches() `, - :meth:`Datastream.map() `, or - :meth:`Datastream.flat_map() ` to apply UDFs. - - To learn more about UDFs, read :ref:`Writing User-Defined Functions `. diff --git a/doc/source/data/key-concepts.rst b/doc/source/data/key-concepts.rst index 625e88d08e19..6b8fec5003e8 100644 --- a/doc/source/data/key-concepts.rst +++ b/doc/source/data/key-concepts.rst @@ -11,8 +11,8 @@ Datastream ---------- A :term:`Datastream ` operates over a sequence of Ray object references to :term:`blocks `. -Each block holds a set of items in an `Arrow table `_, -`pandas DataFrame `_, or Python list. +Each block holds a set of records in an `Arrow table `_ or +`pandas DataFrame `_. Having multiple blocks in a datastream allows for parallel transformation and ingest. For ML use cases, Datastream also natively supports mixing :ref:`Tensors ` and tabular data. @@ -39,7 +39,7 @@ Datastream uses Ray tasks to read data from remote storage in parallel. Each rea You can manually specify the number of read tasks, but the final parallelism is always capped by the number of files in the underlying datastream. -For an in-depth guide on creating datastreams, read :ref:`Loading Data `. +For an in-depth guide on creating datastreams, read :ref:`Loading Data `. Transforming Data ================= @@ -56,7 +56,7 @@ pool of Ray actors. This allows you to cache expensive state initialization .. https://docs.google.com/drawings/d/12STHGV0meGWfdWyBlJMUgw7a-JcFPu9BwSOn5BjRw9k/edit -For an in-depth guide on transforming datastreams, read :ref:`Transforming Data `. +For an in-depth guide on transforming datastreams, read :ref:`Transforming Data `. Shuffling Data ============== diff --git a/doc/source/data/creating-datastreams.rst b/doc/source/data/loading-data.rst similarity index 71% rename from doc/source/data/creating-datastreams.rst rename to doc/source/data/loading-data.rst index c9eb670e1ede..0d4a8cea08fc 100644 --- a/doc/source/data/creating-datastreams.rst +++ b/doc/source/data/loading-data.rst @@ -1,4 +1,4 @@ -.. _creating_datastreams: +.. _loading_data: ==================== Loading Data @@ -10,12 +10,6 @@ Loading Data * local and distributed in-memory data, and * local and external storage systems (local disk, cloud storage, HDFS, etc.). -This guide surveys the many ways to create a ``Datastream``. If none of these meet your -needs, please reach out to us on `Discourse `__ or open a feature -request on the `Ray GitHub repo `__, and check out -our :ref:`guide for implementing a custom datasource ` -if you're interested in rolling your own integration! - .. _datastream_generate_data: ------------------------- @@ -26,29 +20,19 @@ Generating Synthetic Data .. tab-item:: Int Range - Create a ``Datastream`` from a range of integers. - - .. literalinclude:: ./doc_code/creating_datastreams.py - :language: python - :start-after: __gen_synth_int_range_begin__ - :end-before: __gen_synth_int_range_end__ - - .. tab-item:: Tabular Range - - Create an Arrow (tabular) ``Datastream`` from a range of integers, - with a single column containing this integer range. + Create a ``Datastream`` from a range of integers, with a single column containing this integer range. - .. literalinclude:: ./doc_code/creating_datastreams.py + .. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __gen_synth_tabular_range_begin__ :end-before: __gen_synth_tabular_range_end__ .. tab-item:: Tensor Range - Create a tensor datastream from a range of integers, packing this integer range into + Create a datastream from a range of integers, packing this integer range into tensors of the provided shape. - .. literalinclude:: ./doc_code/creating_datastreams.py + .. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __gen_synth_tensor_range_begin__ :end-before: __gen_synth_tensor_range_end__ @@ -59,7 +43,7 @@ Generating Synthetic Data Reading Files From Storage -------------------------- -Using the ``ray.data.read_*()`` APIs, Datastreams can be created from files on local disk +Using the ``ray.data.read_*()`` APIs, data can be loaded from files on local disk or remote storage system such as S3, GCS, Azure Blob Storage, or HDFS. Any filesystem `supported by pyarrow `__ can be used to specify file locations, and many common file formats are supported: @@ -71,29 +55,24 @@ will be read in parallel. .. _datastream_supported_file_formats: -Supported File Formats -====================== +Common File Formats +=================== .. tab-set:: .. tab-item:: Parquet - Read Parquet files into a tabular ``Datastream``. The Parquet data will be read into - `Arrow Table `__ - blocks. Although this simple example demonstrates reading a single file, note that - Datastreams can also read directories of Parquet files. We also support reading partitioned - Parquet datasets with partition column values pulled from the file paths. + Read Parquet files and directories. Partitioned parquet read support is also available. - .. literalinclude:: ./doc_code/creating_datastreams.py + .. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __read_parquet_begin__ :end-before: __read_parquet_end__ - Datastreams' Parquet reader also supports projection and filter pushdown, allowing column - selection and row filtering to be pushed down to the file scan. For column selection, - unselected columns will never be read from the file. + The Parquet reader also supports projection and filter pushdown, allowing column + selection and row filtering to be pushed down to the file scan. - .. literalinclude:: ./doc_code/creating_datastreams.py + .. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __read_parquet_pushdown_begin__ :end-before: __read_parquet_pushdown_end__ @@ -102,13 +81,9 @@ Supported File Formats .. tab-item:: CSV - Read CSV files into a tabular ``Datastream``. The CSV data will be read into - `Arrow Table `__ - blocks. Although this simple example demonstrates reading a single file, note that - Datastreams can also read directories of CSV files, with one tabular block created - per file. + Read CSV files and directories. - .. literalinclude:: ./doc_code/creating_datastreams.py + .. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __read_csv_begin__ :end-before: __read_csv_end__ @@ -117,15 +92,11 @@ Supported File Formats .. tab-item:: JSON - Read JSON files into a tabular ``Datastream``. The JSON data will be read into - `Arrow Table `__ - blocks. Although this simple example demonstrates reading a single file, note that - Datastreams can also read directories of JSON files, with one tabular block created - per file. + Read JSON files and directories. Currently, only newline-delimited JSON (NDJSON) is supported. - .. literalinclude:: ./doc_code/creating_datastreams.py + .. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __read_json_begin__ :end-before: __read_json_end__ @@ -134,18 +105,12 @@ Supported File Formats .. tab-item:: NumPy - Read NumPy files into a tensor ``Datastream``. The NumPy ndarray data will be read into - single-column - `Arrow Table `__ - blocks using our - :class:`tensor extension type `, - treating the outermost ndarray dimension as the row dimension. See our - :ref:`tensor data guide ` for more information on working - with tensors in Datastreams. Although this simple example demonstrates reading a single - file, note that Datastreams can also read directories of NumPy files, with one tensor - block created per file. - - .. literalinclude:: ./doc_code/creating_datastreams.py + Read NumPy files and directories. The NumPy data will be represented via the Ray Data + :class:`tensor extension type `. + Refer to the :ref:`tensor data guide ` for more information on working + with tensors. + + .. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __read_numpy_begin__ :end-before: __read_numpy_end__ @@ -154,11 +119,9 @@ Supported File Formats .. tab-item:: Text - Read text files into a ``Datastream``. Each line in each text file will be treated as a - row in the datastream, resulting in a list-of-strings block being created for each text - file. + Read text files and directories. Each line in each text file will be treated as a row in the datastream. - .. literalinclude:: ./doc_code/creating_datastreams.py + .. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __read_text_begin__ :end-before: __read_text_end__ @@ -167,31 +130,25 @@ Supported File Formats .. tab-item:: Images - Call :func:`~ray.data.read_images` to read images into a :class:`~ray.data.Datastream`. + Call :func:`~ray.data.read_images` to read images. - This function stores image data in single-column - `Arrow Table `__ - blocks using the + This function represents image data using the Ray Data :class:`tensor extension type `. - For more information on working with tensors in Datastreams, read the - :ref:`tensor data guide `. + For more information on working with tensors, refer to the :ref:`tensor data guide `. - .. literalinclude:: ./doc_code/creating_datastreams.py + .. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __read_images_begin__ :end-before: __read_images_end__ .. tab-item:: Binary - Read binary files into a ``Datastream``. Each binary file will be treated as a single row - of opaque bytes. These bytes can be decoded into tensor, tabular, text, or any other + Read binary files and directories. Each binary file will be converted to a record + containing opaque bytes. These bytes can be decoded into tensor, tabular, text, or any other kind of data using :meth:`~ray.data.Datastream.map_batches` to apply a per-row decoding :ref:`user-defined function `. - Although this simple example demonstrates reading a single file, note that Datastreams - can also read directories of binary files, with one bytes block created per file. - - .. literalinclude:: ./doc_code/creating_datastreams.py + .. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __read_binary_begin__ :end-before: __read_binary_end__ @@ -200,14 +157,14 @@ Supported File Formats .. tab-item:: TFRecords - Call :func:`~ray.data.read_tfrecords` to read TFRecord files into a tabular + Call :func:`~ray.data.read_tfrecords` to read TFRecord files into a :class:`~ray.data.Datastream`. .. warning:: Only `tf.train.Example `_ records are supported. - .. literalinclude:: ./doc_code/creating_datastreams.py + .. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __read_tfrecords_begin__ :end-before: __read_tfrecords_end__ @@ -244,7 +201,7 @@ are supported for each of these storage systems. configuration such as S3 credentials being pulled from the machine's environment (e.g. the ``AWS_ACCESS_KEY_ID`` and ``AWS_SECRET_ACCESS_KEY`` environment variables). - .. literalinclude:: ./doc_code/creating_datastreams.py + .. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __read_parquet_s3_begin__ :end-before: __read_parquet_s3_end__ @@ -254,7 +211,7 @@ are supported for each of these storage systems. `S3FileSystem `__ instance to :func:`read_parquet() `. - .. literalinclude:: ./doc_code/creating_datastreams_untested.py + .. literalinclude:: ./doc_code/loading_data_untested.py :language: python :start-after: __read_parquet_s3_with_fs_begin__ :end-before: __read_parquet_s3_with_fs_end__ @@ -269,7 +226,7 @@ are supported for each of these storage systems. This example is not runnable as-is; you'll need to point it at your HDFS cluster/data. - .. literalinclude:: ./doc_code/creating_datastreams_untested.py + .. literalinclude:: ./doc_code/loading_data_untested.py :language: python :start-after: __read_parquet_hdfs_begin__ :end-before: __read_parquet_hdfs_end__ @@ -279,7 +236,7 @@ are supported for each of these storage systems. `__ instance to :func:`read_parquet() `. - .. literalinclude:: ./doc_code/creating_datastreams_untested.py + .. literalinclude:: ./doc_code/loading_data_untested.py :language: python :start-after: __read_parquet_hdfs_with_fs_begin__ :end-before: __read_parquet_hdfs_with_fs_end__ @@ -294,7 +251,7 @@ are supported for each of these storage systems. This example is not runnable as-is; you'll need to point it at your GCS bucket and configure your GCP project and credentials. - .. literalinclude:: ./doc_code/creating_datastreams_untested.py + .. literalinclude:: ./doc_code/loading_data_untested.py :language: python :start-after: __read_parquet_gcs_begin__ :end-before: __read_parquet_gcs_end__ @@ -303,7 +260,7 @@ are supported for each of these storage systems. To verify that your GCP project and credentials are set up, validate that the GCS `filesystem` has permissions to read the input `path`. - .. literalinclude:: ./doc_code/creating_datastreams_untested.py + .. literalinclude:: ./doc_code/loading_data_untested.py :language: python :start-after: __validate_parquet_gcs_begin__ :end-before: __validate_parquet_gcs_end__ @@ -316,7 +273,7 @@ are supported for each of these storage systems. `adlfs AzureBlobFileSystem `__, where the appropriate account name and account key can be specified. - .. literalinclude:: ./doc_code/creating_datastreams_untested.py + .. literalinclude:: ./doc_code/loading_data_untested.py :language: python :start-after: __read_parquet_az_begin__ :end-before: __read_parquet_az_end__ @@ -328,14 +285,10 @@ In Ray Data, users often read from remote storage systems as described above. In some use cases, users may want to read from local storage. There are three ways to read from a local filesystem: -* **Providing a local filesystem path**: For example, in ``ray.data.read_csv("my_file.csv")``, - the given path will be resolved as a local filesystem path. - -.. note:: - - If the file exists only on the local node and you run this read operation in - distributed cluster, this will fail as it cannot access the file from remote node. - +* **Providing a raw filesystem path**: For example, in ``ray.data.read_csv("my_file.csv")``, + the given path will be resolved as a local filesystem path. If the file exists only on the + local node and you run this read operation in distributed cluster, this will fail as it + cannot access the file from remote nodes. * **Using ``local://`` custom URI scheme**: Similarly, this will be resolved to local filesystem, e.g. ``ray.data.read_csv("local://my_file.csv")`` will read the same file as the approach above. The difference is that this scheme will ensure @@ -353,7 +306,7 @@ Ray Data supports reading compressed files using the ``arrow_open_stream_args`` (bz2, brotli, gzip, lz4 or zstd) are compatible with Ray Data. For example: -.. literalinclude:: ./doc_code/creating_datastreams.py +.. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __read_compressed_begin__ :end-before: __read_compressed_end__ @@ -383,9 +336,9 @@ In this section, we demonstrate creating a ``Datastream`` from single-node in-me .. tab-item:: Pandas Create a ``Datastream`` from a Pandas DataFrame. This constructs a ``Datastream`` - backed by a single Pandas DataFrame block. + backed by a single block. - .. literalinclude:: ./doc_code/creating_datastreams.py + .. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __from_pandas_begin__ :end-before: __from_pandas_end__ @@ -393,7 +346,7 @@ In this section, we demonstrate creating a ``Datastream`` from single-node in-me We can also build a ``Datastream`` from more than one Pandas DataFrame, where each said DataFrame will become a block in the ``Datastream``. - .. literalinclude:: ./doc_code/creating_datastreams.py + .. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __from_pandas_mult_begin__ :end-before: __from_pandas_mult_end__ @@ -401,18 +354,18 @@ In this section, we demonstrate creating a ``Datastream`` from single-node in-me .. tab-item:: NumPy Create a ``Datastream`` from a NumPy ndarray. This constructs a ``Datastream`` - backed by a single-column Arrow table block; the outer dimension of the ndarray - will be treated as the row dimension, and the column will have name ``"__value__"``. + backed by a single block; the outer dimension of the ndarray + will be treated as the row dimension, and the column will have name ``"data"``. - .. literalinclude:: ./doc_code/creating_datastreams.py + .. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __from_numpy_begin__ :end-before: __from_numpy_end__ We can also build a ``Datastream`` from more than one NumPy ndarray, where each said - ndarray will become a single-column Arrow table block in the ``Datastream``. + ndarray will become a block in the ``Datastream``. - .. literalinclude:: ./doc_code/creating_datastreams.py + .. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __from_numpy_mult_begin__ :end-before: __from_numpy_mult_end__ @@ -421,9 +374,9 @@ In this section, we demonstrate creating a ``Datastream`` from single-node in-me Create a ``Datastream`` from an `Arrow Table `__. - This constructs a ``Datastream`` backed by a single Arrow ``Table`` block. + This constructs a ``Datastream`` backed by a single block. - .. literalinclude:: ./doc_code/creating_datastreams.py + .. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __from_arrow_begin__ :end-before: __from_arrow_end__ @@ -431,18 +384,17 @@ In this section, we demonstrate creating a ``Datastream`` from single-node in-me We can also build a ``Datastream`` from more than one Arrow Table, where each said ``Table`` will become a block in the ``Datastream``. - .. literalinclude:: ./doc_code/creating_datastreams.py + .. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __from_arrow_mult_begin__ :end-before: __from_arrow_mult_end__ .. tab-item:: Python Objects - Create a ``Datastream`` from a list of Python objects; since each object in this - particular list is a dictionary, Datastreams will treat this list as a list of tabular - records, and will construct an Arrow ``Datastream``. + Create a ``Datastream`` from a list of Python objects; which are interpreted as dict records. + If the object is not a dict, it will be wrapped as ``{"item": item}``. - .. literalinclude:: ./doc_code/creating_datastreams.py + .. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __from_items_begin__ :end-before: __from_items_end__ @@ -457,16 +409,9 @@ distributed (multi-node) in-memory data, interoperating with popular distributed data processing frameworks such as :ref:`Dask `, :ref:`Spark `, :ref:`Modin `, and :ref:`Mars `. -These conversions work by running Ray tasks converting each Dask/Spark/Modin/Mars -data partition to a block format supported by Datastreams (copying data if needed), and using the -futures representing the return value of those conversion tasks as the ``Datastream`` block -futures. - -.. note:: - - These data processing frameworks must be running on Ray in order for these Datastreams - integrations to work. See how these frameworks can be run on Ray in our - :ref:`data processing integrations docs `. +Note that these data processing frameworks must be running on Ray in order for these +integrations to work. See how these frameworks can be run on Ray in our +:ref:`data processing integrations docs `. .. tab-set:: @@ -477,10 +422,7 @@ futures. ``Datastream`` backed by the distributed Pandas DataFrame partitions that underly the Dask DataFrame. - This conversion has near-zero overhead, since Datastreams simply reinterprets existing - Dask-in-Ray partition objects as Datastream blocks. - - .. literalinclude:: ./doc_code/creating_datastreams.py + .. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __from_dask_begin__ :end-before: __from_dask_end__ @@ -494,7 +436,7 @@ futures. will save the Spark DataFrame partitions to Ray's object store in the Arrow format, which Datastreams will then interpret as its blocks. - .. literalinclude:: ./doc_code/creating_datastreams_untested.py + .. literalinclude:: ./doc_code/loading_data_untested.py :language: python :start-after: __from_spark_begin__ :end-before: __from_spark_end__ @@ -504,10 +446,7 @@ futures. Create a ``MaterializedDatastream`` from a Modin DataFrame. This constructs a ``Datastream`` backed by the distributed Pandas DataFrame partitions that underly the Modin DataFrame. - This conversion has near-zero overhead, since Datastreams simply reinterprets existing - Modin partition objects as Datastream blocks. - - .. literalinclude:: ./doc_code/creating_datastreams.py + .. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __from_modin_begin__ :end-before: __from_modin_end__ @@ -517,10 +456,7 @@ futures. Create a ``MaterializedDatastream`` from a Mars DataFrame. This constructs a ``Datastream`` backed by the distributed Pandas DataFrame partitions that underly the Mars DataFrame. - This conversion has near-zero overhead, since Datastreams simply reinterprets existing - Mars partition objects as Datastream blocks. - - .. literalinclude:: ./doc_code/creating_datastreams_untested.py + .. literalinclude:: ./doc_code/loading_data_untested.py :language: python :start-after: __from_mars_begin__ :end-before: __from_mars_end__ @@ -551,7 +487,7 @@ From Torch and TensorFlow torch_ds = torchvision.datasets.MNIST("data", download=True) datastream = ray.data.from_torch(torch_ds) datastream.take(1) - # (, 5) + # {"item": (, 5)} .. tab-item:: TensorFlow @@ -863,35 +799,11 @@ For more details, check out :ref:`guide for implementing a custom datasource ` for tips on how to tune this read -parallelism. - -.. _datastream_deferred_reading: - -Deferred Read Task Execution -============================ - -Datastreams created via the ``ray.data.read_*()`` APIs are lazy: no read tasks are -executed until a downstream consumption operation triggers execution. Metadata -inspection functions like :meth:`ds.schema() ` and -:meth:`ds.show() ` will trigger execution of only one or some -tasks, instead of all tasks. This allows metadata to be inspected right away. Execution -of all read tasks can be triggered manually using the -:meth:`ds.materialize() ` API. +:ref:`performance guide ` for more information on how to tune this read parallelism. diff --git a/doc/source/data/performance-tips.rst b/doc/source/data/performance-tips.rst index ede2c8e6db55..6c56149431f5 100644 --- a/doc/source/data/performance-tips.rst +++ b/doc/source/data/performance-tips.rst @@ -57,6 +57,41 @@ Mapping individual records using :meth:`.map(fn) ` can Instead, consider using :meth:`.map_batches(batch_fn, batch_format="pandas") ` and writing your ``batch_fn`` to perform vectorized pandas operations. +.. _data_format_overheads: + +Format Overheads +~~~~~~~~~~~~~~~~ + +Converting between the internal block types (Arrow, Pandas) +and the requested batch format (``"numpy"``, ``"pandas"``, ``"pyarrow"``) +may incur data copies; which conversions cause data copying is given in the below table: + + +.. list-table:: Data Format Conversion Costs + :header-rows: 1 + :stub-columns: 1 + + * - Block Type x Batch Format + - ``"pandas"`` + - ``"numpy"`` + - ``"pyarrow"`` + - ``None`` + * - Pandas Block + - Zero-copy + - Copy* + - Copy* + - Zero-copy + * - Arrow Block + - Copy* + - Zero-copy* + - Zero-copy + - Zero-copy + +.. note:: + \* No copies occur when converting between Arrow, Pandas, and NumPy formats for columns + represented in the Ray Data tensor extension type (except for bool arrays). + + Parquet Column Pruning ~~~~~~~~~~~~~~~~~~~~~~ @@ -81,14 +116,23 @@ This can be used in conjunction with column pruning when appropriate to get the Tuning Read Parallelism ~~~~~~~~~~~~~~~~~~~~~~~ +By default, Ray Data automatically selects the read ``parallelism`` according to the following procedure: + +1. The number of available CPUs is estimated. If in a placement group, the number of CPUs in the cluster is scaled by the size of the placement group compared to the cluster size. If not in a placement group, this is the number of CPUs in the cluster. +2. The parallelism is set to the estimated number of CPUs multiplied by 2. If the parallelism is less than 8, it is set to 8. +3. The in-memory data size is estimated. If the parallelism would create in-memory blocks that are larger on average than the target block size (512MiB), the parallelism is increased until the blocks are < 512MiB in size. +4. The parallelism is truncated to ``min(num_files, parallelism)``. + +Occasionally, it is advantageous to manually tune the parallelism to optimize the application. This can be done when loading data via the ``parallelism`` parameter. +For example, use ``ray.data.read_parquet(path, parallelism=1000)`` to force up to 1000 read tasks to be created. + +Tuning Read Resources +~~~~~~~~~~~~~~~~~~~~~ + By default, Ray requests 1 CPU per read task, which means one read tasks per CPU can execute concurrently. For data sources that can benefit from higher degress of I/O parallelism, you can specify a lower ``num_cpus`` value for the read function via the ``ray_remote_args`` parameter. For example, use ``ray.data.read_parquet(path, ray_remote_args={"num_cpus": 0.25})`` to allow up to four read tasks per CPU. -By default, Ray Data automatically selects the read parallelism based on the current cluster size and datastream size. -However, the number of read tasks can also be increased manually via the ``parallelism`` parameter. -For example, use ``ray.data.read_parquet(path, parallelism=1000)`` to force up to 1000 read tasks to be created. - .. _shuffle_performance_tips: Enabling Push-Based Shuffle diff --git a/doc/source/data/transforming-data.rst b/doc/source/data/transforming-data.rst new file mode 100644 index 000000000000..a3af026c5f7e --- /dev/null +++ b/doc/source/data/transforming-data.rst @@ -0,0 +1,284 @@ +.. _transforming_data: + +================= +Transforming Data +================= + +Datastream transforms take in datastreams and produce new datastreams. For example, *map_batches* +is a transform that applies a +:ref:`user-defined function ` on each data record +and returns a new datastream as the result. Datastream transforms can be composed to +express a chain of computations. + +-------- +Overview +-------- + +There are two main types of supported transforms: + +* One-to-one: each input block will contribute to only one output + block, such as :meth:`ds.map_batches() `. +* All-to-all: input blocks can contribute to multiple output blocks, + such as :meth:`ds.random_shuffle() `. + +.. list-table:: Common Ray Data transforms. + :header-rows: 1 + + * - Transform + - Type + - Description + * - :meth:`ds.map() ` + - One-to-one + - Apply a given function to individual data records. + * - :meth:`ds.map_batches() ` + - One-to-one + - Apply a given function to batches of records. + * - :meth:`ds.repartition() ` + - All-to-all + - | Repartition the datastream into N blocks. + * - :meth:`ds.random_shuffle() ` + - All-to-all + - | Randomly shuffle the datastream. + * - :meth:`ds.groupby().\() ` + - All-to-all + - | Group data by column and aggregate each group. + * - :meth:`ds.groupby().map_groups() ` + - All-to-all + - | Group data by column and transform each group. + +.. _transform_datastreams_writing_udfs: + +-------------- +Map transforms +-------------- + +Use ``map_batches`` to efficiently transform records in batches, or ``map`` to transform records individually: + +.. tab-set:: + + .. tab-item:: Map Batches + + Call `map_batches`` to transform batches of records. Each batch has type``Dict[str, np.ndarray]``. The below example shows how to use ``map_batches`` to convert text records to lowercase: + + .. literalinclude:: ./doc_code/transforming_data.py + :language: python + :start-after: __map_batches_begin__ + :end-before: __map_batches_end__ + + .. tab-item:: Map + + Records can also be transformed one at a time using the ``map`` function, which takes records encoded as ``Dict[str, Any]]``. The below example shows how to convert text records to lowercase: + + .. literalinclude:: ./doc_code/transforming_data.py + :language: python + :start-after: __map_begin__ + :end-before: __map_end__ + +Configuring resources +===================== + +By default, each task used for (e.g., `map` or `map_batches`) requests 1 CPU from Ray. +To increase the resources reserved per task, you can increase the CPU request by specifying +``.map_batches(..., num_cpus=)``, which will instead reserve ``N`` CPUs per task: + +.. code-block:: python + + # Run each function with 1 CPU each (default). + ds.map_batches(func) + + # Run each function with 4 CPUs each. + ds.map_batches(func, num_cpus=4) + +To request tasks be run on a GPU, use ``.map_batches(..., num_gpus=1)``, etc. In addition to +``num_cpus`` and ``num_gpus``, any kwarg from ``@ray.remote`` can be passed to customize +the resource scheduling of tasks: + +.. code-block:: python + + # Run each function with 1 GPU each. + ds.map_batches(func, num_gpus=1) + + # Can also customize other ray remote args such as `max_retries`. + ds.map_batches(func, num_gpus=1, max_retries=10) + +Configuring batch size +====================== + +An important parameter to set for :meth:`ds.map_batches() ` +is ``batch_size``, which controls the size of the batches provided to the your transform function. The default +batch size is `4096` for CPU tasks. For GPU tasks, an explicit batch size is always required: + +.. code-block:: python + + # Each batch sent to `func` will have up to 4096 records (default). + ds.map_batches(func) + + # Reduce the batch size to 64 records per batch. + ds.map_batches(func, batch_size=64) + +Increasing ``batch_size`` can improve performance for transforms that take advantage of vectorization, but will also result in higher memory utilization, which can lead to out-of-memory (OOM) errors. If encountering OOMs, decreasing your ``batch_size`` may help. Note also that if the ``batch_size`` becomes larger than the number of records per block, multiple blocks will be bundled together into a single batch, potentially reducing the parallelism available. + +.. _transform_datastreams_batch_formats: + +Configuring batch format +======================== + +Customize the format of data batches using the ``batch_format`` argument to :meth:`ds.map_batches() `. The following are examples in each available batch format. + +Transform functions do not have to return data in the same format as the input batch. For example, you could return a ``pd.DataFrame`` even if the input was in NumPy format. + +.. tab-set:: + + .. tab-item:: NumPy (default) + + The ``"numpy"`` option presents batches as ``Dict[str, np.ndarray]``, where the + `numpy.ndarray `__ + values represent a batch of record field values. + + .. literalinclude:: ./doc_code/transforming_data.py + :language: python + :start-after: __writing_numpy_udfs_begin__ + :end-before: __writing_numpy_udfs_end__ + + .. tab-item:: Pandas + + The ``"pandas"`` batch format presents batches in + `pandas.DataFrame `__ + format. + + .. literalinclude:: ./doc_code/transforming_data.py + :language: python + :start-after: __writing_pandas_udfs_begin__ + :end-before: __writing_pandas_udfs_end__ + + .. tab-item:: PyArrow + + The ``"pyarrow"`` batch format presents batches in + `pyarrow.Table `__ + format. + + .. literalinclude:: ./doc_code/transforming_data.py + :language: python + :start-after: __writing_arrow_udfs_begin__ + :end-before: __writing_arrow_udfs_end__ + +.. _transforming_data_actors: + +Reduce setup overheads using actors +=================================== + +Data transforms can be executed by either :ref:`Ray tasks ` +or :ref:`Ray actors `. By default, ``map_batches`` uses tasks. +For transforms that require expensive setup, +it's preferrable to use actors, which are stateful and allow setup to be reused +for efficiency. For a fixed-size actor pool, specify ``compute=ActorPoolStrategy(size=n)``. +For an autoscaling actor pool, use ``compute=ray.data.ActorPoolStrategy(min_size=m, max_size=n)``. + +When using actors, you must also specify your transform as a callable class type instead of a plain function. The following is an example of using actors for batch inference: + +.. literalinclude:: ./doc_code/transforming_data.py + :language: python + :start-after: __datastream_compute_strategy_begin__ + :end-before: __datastream_compute_strategy_end__ + +Reduce memory usage using generators +==================================== + +Transform functions can also be written as Python generators, yielding multiple outputs for a batch or row instead of a single item. Generator UDFs are useful when returning large objects. Instead of returning a very large output batch, ``fn`` can instead yield the output batch in chunks to avoid excessive heap memory usage. + +.. literalinclude:: ./doc_code/transforming_data.py + :language: python + :start-after: __writing_generator_udfs_begin__ + :end-before: __writing_generator_udfs_end__ + +------------------ +Shuffle transforms +------------------ + +Shuffle transforms change the organization of the data, e.g., increasing the number of blocks, or the order of records in each block, without changing the record contents. + +Repartitioning data +=================== + +Call :meth:`Datastream.repartition() ` to change the +number of blocks of the datastream. This may be useful to break up your dataset into small +pieces to enable more fine-grained parallelization, or to reduce the number of files +produced as output of a write operation. + +.. literalinclude:: ./doc_code/transforming_data.py + :language: python + :start-after: __shuffle_begin__ + :end-before: __shuffle_end__ + +Random shuffle +============== + +Call :meth:`Datastream.random_shuffle() ` to +globally shuffle the order of data records. + +.. doctest:: + + >>> import ray + >>> datastream = ray.data.range(10) + >>> datastream.random_shuffle().take_batch() # doctest: +SKIP + {'id': array([7, 0, 9, 3, 5, 1, 4, 2, 8, 6])} + +For reduced overhead during training ingest, use local shuffles. Read +:ref:`Shuffling Data ` in the AIR user guide to learn more. + +.. _data-groupbys: + +------------------ +Grouped transforms +------------------ + +Ray Data supports grouping data by column and applying aggregations to each group. This is supported via the :meth:`ds.groupby() ` call. + +Aggregations +============ + +Aggregations can be performed per group: + +.. code-block:: python + + ds = ray.data.from_items([ + {"A": x % 3, "B": 2 * x, "C": 3 * x} + for x in range(10) + ]) + + # Group by the A column and calculate the per-group mean for B and C columns. + ds.groupby("A").mean(["B", "C"]).to_pandas() + # -> + # A mean(B) mean(C) + # 0 0 9.0 13.5 + # 1 1 8.0 12.0 + # 2 2 10.0 15.0 + +Aggregations can also be applied globally: + +.. code-block:: python + + from ray.data.aggregate import Mean, Std + + # Global mean on B and C columns. + ds.mean(["B", "C"]) + # -> {'mean(B)': 9.0, 'mean(C)': 13.5} + + # Multiple global aggregations on multiple columns. + ds.aggregate(Mean("B"), Std("B", ddof=0), Mean("C"), Std("C", ddof=0)) + # -> {'mean(A)': 0.9, 'std(A)': 0.8306623862918076, 'mean(B)': 9.0, 'std(B)': 5.744562646538029} + +Note that Ray Data currently only supports grouping by a single column. In order to group by multiple columns, you can first compute the grouping key using ``map_batches`` prior to calling ``groupby``. + +Map Groups +========== + +Arbitrary processing can be applied to each group of records using :meth:`ds.groupby().map_groups() `. For example, this could be used to implement custom aggregations, train a model per group, etc. + +.. literalinclude:: ./doc_code/transforming_data.py + :language: python + :start-after: __map_groups_begin__ + :end-before: __map_groups_end__ + +Note that when using ``map_groups``, all records of the same group will be gathered into the same batch, +which may lead to out-of-memory errors if the group size exceeds the capacity of a single machine. diff --git a/doc/source/data/transforming-datastreams.rst b/doc/source/data/transforming-datastreams.rst deleted file mode 100644 index 750becbf808d..000000000000 --- a/doc/source/data/transforming-datastreams.rst +++ /dev/null @@ -1,565 +0,0 @@ -.. _transforming_datastreams: - -======================== -Transforming Data -======================== - -Datastreams transformations take in datastreams and produce new datastreams. For example, *map_batches* -is a transformation that applies a -:ref:`user-defined function ` on each data record -and returns a new datastream as the result. Datastreams transformations can be composed to -express a chain of computations. - -.. _transform_datastreams_transformations: - ---------------- -Transformations ---------------- - -There are two main types of transformations: - -* One-to-one: each input block will contribute to only one output - block, such as :meth:`ds.map_batches() `. -* All-to-all: input blocks can contribute to multiple output blocks, - such as :meth:`ds.random_shuffle() `. - -Here is a table listing some common transformations supported by Ray Data. - -.. list-table:: Common Ray Data transformations. - :header-rows: 1 - - * - Transformation - - Type - - Description - * - :meth:`ds.map_batches() ` - - One-to-one - - Apply a given function to batches of records of this datastream. - * - :meth:`ds.add_column() ` - - One-to-one - - Apply a given function to batches of records to create a new column. - * - :meth:`ds.drop_columns() ` - - One-to-one - - Drop the given columns from the datastream. - * - :meth:`ds.streaming_split() ` - - One-to-one - - | Split the datastream into N disjoint iterators. - * - :meth:`ds.repartition(shuffle=False) ` - - One-to-one - - | Repartition the datastream into N blocks, without shuffling the data. - * - :meth:`ds.repartition(shuffle=True) ` - - All-to-all - - | Repartition the datastream into N blocks, shuffling the data during repartition. - * - :meth:`ds.random_shuffle() ` - - All-to-all - - | Randomly shuffle the elements of this datastream. - * - :meth:`ds.sort() ` - - All-to-all - - | Sort the datastream by a sortkey. - * - :meth:`ds.groupby() ` - - All-to-all - - | Group the datastream by a groupkey. - -.. tip:: - - Datastreams also provides the convenience transformation methods :meth:`ds.map() `, - :meth:`ds.flat_map() `, and :meth:`ds.filter() `, - which are not vectorized (slower than :meth:`ds.map_batches() `), but - may be useful for development. - -The following is an example to make use of those transformation APIs for processing -the Iris datastream. - -.. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __datastream_transformation_begin__ - :end-before: __datastream_transformation_end__ - -.. _transform_datastreams_writing_udfs: - -------------------------------------- -Writing User-defined Functions (UDFs) -------------------------------------- - -User-defined functions (UDFs) are routines that apply on one row (e.g. -:meth:`.map() `) or a batch of rows (e.g. -:meth:`.map_batches() `) of a datastream. UDFs let you -express your customized business logic in transformations. Here we will focus on -:meth:`.map_batches() ` as it's the primary mapping -API in Datastreams. - -Here are the basics that you need to know about UDFs: - -* A UDF can be either a function, a generator, or if using the :ref:`actor compute strategy `, a :ref:`callable class `. -* Select the UDF input :ref:`batch format ` using the ``batch_format`` argument. -* The UDF output type determines the Datastream schema of the transformation result. - -.. _transform_datastreams_callable_classes: - -Types of UDFs -============= -There are three types of UDFs that you can use with Ray Data: Function UDFs, Callable Class UDFs, and Generator UDFs. - -.. tab-set:: - - .. tab-item:: "Function UDFs" - - The most basic UDFs are functions that take in a batch or row as input, and returns a batch or row as output. See :ref:`transform_datastreams_batch_formats` for the supported batch formats. - - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_default_udfs_tabular_begin__ - :end-before: __writing_default_udfs_tabular_end__ - - .. tab-item:: "Callable Class UDFs" - - With the actor compute strategy, you can use per-row and per-batch UDFs - *callable classes*, i.e., classes that implement the ``__call__`` magic method. You - can use the constructor of the class for stateful setup, and it is only invoked once - per worker actor. - - Callable classes are useful if you need to load expensive state (such as a model) for the UDF. By using an actor class, you only need to load the state once in the beginning, rather than for each batch. - - .. note:: - These transformation APIs take the uninstantiated callable class as an argument, - not an instance of the class. - - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_callable_classes_udfs_begin__ - :end-before: __writing_callable_classes_udfs_end__ - - .. tab-item:: "Generator UDFs" - - UDFs can also be written as Python generators, yielding multiple outputs for a batch or row instead of a single item. Generator UDFs are useful when returning large objects. Instead of returning a very large output batch, ``fn`` can instead yield the output batch in chunks to avoid excessive heap memory usage. - - .. warning:: - When applying a generator UDF on individual rows, make sure to use the :meth:`.flat_map() ` API and not the :meth:`.map() ` API. - - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_generator_udfs_begin__ - :end-before: __writing_generator_udfs_end__ - - -.. _transform_datastreams_batch_formats: - -UDF Input Batch Format -====================== - -Choose the *batch format* of the data given to UDFs -by setting the ``batch_format`` option of :meth:`.map_batches() `. -Here is an overview of the available batch formats: - -.. tab-set:: - - .. tab-item:: "default" - - The "default" batch format presents data as follows for each Datastream type: - - * **Tabular Datastreams**: Each batch will be a - `pandas.DataFrame `__. - This may incur a conversion cost if the underlying Datastream block is not - zero-copy convertible from an Arrow table. - - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_default_udfs_tabular_begin__ - :end-before: __writing_default_udfs_tabular_end__ - - * **Tensor Datastreams** (single-column): Each batch will be a single - `numpy.ndarray `__ - containing the single tensor column for this batch. - - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_default_udfs_tensor_begin__ - :end-before: __writing_default_udfs_tensor_end__ - - * **Simple Datastreams**: Each batch will be a Python list. - - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_default_udfs_list_begin__ - :end-before: __writing_default_udfs_list_end__ - - .. tab-item:: "pandas" - - The ``"pandas"`` batch format presents batches in - `pandas.DataFrame `__ - format. If converting a simple datastream to Pandas DataFrame batches, a single-column - dataframe with the column ``"__value__"`` will be created. - - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_pandas_udfs_begin__ - :end-before: __writing_pandas_udfs_end__ - - .. tab-item:: "pyarrow" - - The ``"pyarrow"`` batch format presents batches in - `pyarrow.Table `__ - format. If converting a simple datastream to Arrow Table batches, a single-column table - with the column ``"__value__"`` will be created. - - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_arrow_udfs_begin__ - :end-before: __writing_arrow_udfs_end__ - - .. tab-item:: "numpy" - - The ``"numpy"`` batch format presents batches in - `numpy.ndarray `__ - format as follows: - - * **Tabular Datastreams**: Each batch will be a dictionary of NumPy - ndarrays (``Dict[str, np.ndarray]``), with each key-value pair representing a column - in the table. - - * **Tensor Datastreams** (single-column): Each batch will be a single - `numpy.ndarray `__ - containing the single tensor column for this batch. - - * **Simple Datastreams**: Each batch will be a single NumPy ndarray, where Datastreams will - attempt to convert each list-batch to an ndarray. - - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_numpy_udfs_begin__ - :end-before: __writing_numpy_udfs_end__ - -Converting between the underlying Datastreams data representations (Arrow, Pandas, and -Python lists) and the requested batch format (``"default"``, ``"pandas"``, -``"pyarrow"``, ``"numpy"``) may incur data copies; which conversions cause data copying -is given in the below table: - - -.. list-table:: Data Format Conversion Costs - :header-rows: 1 - :stub-columns: 1 - - * - Datastream Format x Batch Format - - ``"default"`` - - ``"pandas"`` - - ``"numpy"`` - - ``"pyarrow"`` - - ``None`` - * - ``"pandas"`` - - Zero-copy - - Zero-copy - - Copy* - - Copy* - - Zero-copy - * - ``"arrow"`` - - Copy* - - Copy* - - Zero-copy* - - Zero-copy - - Zero-copy - * - ``"simple"`` - - Copy - - Copy - - Copy - - Copy - - Copy - -.. note:: - \* No copies occur when converting between Arrow, Pandas, and NumPy formats for columns - represented in our tensor extension type (unless data is boolean). Copies **always** - occur when converting boolean data from/to Arrow to/from Pandas/NumPy, since Arrow - bitpacks boolean data while Pandas/NumPy does not. - -.. tip:: - - Prefer using vectorized operations on the ``pandas.DataFrame``, - ``pyarrow.Table``, and ``numpy.ndarray`` types for better performance. For - example, suppose you want to compute the sum of a column in ``pandas.DataFrame``: - instead of iterating over each row of a batch and summing up values of that column, - use ``df_batch["col_foo"].sum()``. - -.. tip:: - - If the UDF for :meth:`ds.map_batches() ` does **not** - mutate its input, we can prevent an unnecessary data batch copy by specifying - ``zero_copy_batch=True``, which will provide the UDF with zero-copy, read-only - batches. See the :meth:`ds.map_batches() ` docstring for - more information. - -.. _transform_datastreams_batch_output_types: - -Batch UDF Output Types -====================== - -The following output types are allowed for batch UDFs (e.g., -:meth:`ds.map_batches() `). The following describes -how they are interpreted to create the transformation result: - -.. tab-set:: - - .. tab-item:: pd.DataFrame - - Returning ``pd.DataFrame`` creates a Tabular datastream as the transformation result: - - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_pandas_out_udfs_begin__ - :end-before: __writing_pandas_out_udfs_end__ - - .. tab-item:: pa.Table - - Returning ``pa.Table`` creates a Tabular datastream as the transformation result: - - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_arrow_out_udfs_begin__ - :end-before: __writing_arrow_out_udfs_end__ - - .. tab-item:: np.ndarray - - Returning ``np.ndarray`` creates a single-column Tensor datastream as the transformation result: - - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_numpy_out_udfs_begin__ - :end-before: __writing_numpy_out_udfs_end__ - - .. tab-item:: Dict[str, np.ndarray] - - Returning ``Dict[str, np.ndarray]`` creates a multi-column Tensor datastream as the transformation result. - - If a column tensor is 1-dimensional, then the native Arrow 1D list - type is used; if a column tensor has 2 or more dimensions, then the Datastream - :ref:`tensor extension type ` to embed these - n-dimensional tensors in the Arrow table. - - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_numpy_dict_out_udfs_begin__ - :end-before: __writing_numpy_dict_out_udfs_end__ - - .. tab-item:: list - - Returning ``list`` creates a simple Python object datastream as the transformation result: - - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_simple_out_udfs_begin__ - :end-before: __writing_simple_out_udfs_end__ - -.. _transform_datastreams_row_output_types: - -Row UDF Output Types -==================== - -The following output types are allowed for per-row UDFs (e.g., -:meth:`ds.map() `): - -.. tab-set:: - - .. tab-item:: dict - - Returning a ``dict`` of Arrow-compatible data types creates a Tabular datastream - as the transformation result. If any dict values are not Arrow-compatible, then - a simple Python object datastream will be created: - - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_dict_out_row_udfs_begin__ - :end-before: __writing_dict_out_row_udfs_end__ - - .. tab-item:: np.ndarray - - Returning ``np.ndarray`` creates a single-column Tensor datastream as the transformation result: - - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_numpy_out_row_udfs_begin__ - :end-before: __writing_numpy_out_row_udfs_end__ - - .. tab-item:: object - - Other return row types will create a simple Python object datastream as the transformation result: - - .. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __writing_simple_out_row_udfs_begin__ - :end-before: __writing_simple_out_row_udfs_end__ - -.. _transform_datastreams_configuring_batch_size: - ----------------------- -Configuring Batch Size ----------------------- - -:meth:`ds.map_batches() ` is the canonical parallel -transformation API for Datastreams: it launches parallel tasks over the underlying Datastreams -blocks and maps UDFs over data batches within those tasks, allowing the UDF to -implement vectorized operations on batches. An important parameter to -set is ``batch_size``, which controls the size of the batches provided to the UDF. - -.. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __configuring_batch_size_begin__ - :end-before: __configuring_batch_size_end__ - -Increasing ``batch_size`` can result in faster execution by better leveraging vectorized -operations and hardware, reducing batch slicing and concatenation overhead, and overall -saturation of CPUs/GPUs, but will also result in higher memory utilization, which can -lead to out-of-memory failures. If encountering OOMs, decreasing your ``batch_size`` may -help. - -.. note:: - The default ``batch_size`` of ``4096`` may be too large for datastreams with large rows - (e.g. tables with many columns or a collection of large images). - -If you specify a ``batch_size`` that's larger than your ``Datastream`` blocks, Datastreams -will bundle multiple blocks together for a single task in order to better satisfy -``batch_size``. If ``batch_size`` is a lot larger than your ``Datastream`` blocks (e.g. if -your datastream was created with too large of a ``parallelism`` and/or the ``batch_size`` -is set to too large of a value for your datastream), the number of parallel tasks -may be less than expected. - -If your ``Datastream`` blocks are smaller than your ``batch_size`` and you want to increase -:meth:`ds.map_batches() ` parallelism, decrease your -``batch_size`` to prevent this block bundling. If you think that your ``Datastream`` blocks -are too small, try decreasing ``parallelism`` during the read to create larger blocks. - -.. note:: - The size of the batches provided to the UDF may be smaller than the provided - ``batch_size`` if ``batch_size`` doesn't evenly divide the block(s) sent to a given - task. - -.. note:: - Block bundling (processing multiple blocks in a single task) will not occur if - ``batch_size`` is not set; instead, each task will receive a single block. If a block - is smaller than the default ``batch_size`` (4096), then the batch provided to the UDF - in that task will the same size as the block, and will therefore be smaller than the - default ``batch_size``. - -.. _transform_datastreams_compute_strategy: - ----------------- -Compute Strategy ----------------- - -Datastreams transformations are executed by either :ref:`Ray tasks ` -or :ref:`Ray actors ` across a Ray cluster. By default, Ray tasks are -used. For transformations that require expensive setup, -it's preferrable to use Ray actors, which are stateful and allow setup to be reused -for efficiency. For a fixed-size actor pool, specify ``compute=ActorPoolStrategy(size=n)``. -For an autoscaling actor pool, use ``compute=ray.data.ActorPoolStrategy(min_size=m, max_size=n)``. - -The following is an example of using the Ray tasks and actors compute strategy -for batch inference: - -.. literalinclude:: ./doc_code/transforming_datastreams.py - :language: python - :start-after: __datastream_compute_strategy_begin__ - :end-before: __datastream_compute_strategy_end__ - -.. _data-groupbys: - --------------------------- -Group-bys and aggregations --------------------------- - -Unlike mapping operations, groupbys and aggregations are global. Grouped aggregations -are executed lazily. Global aggregations are executed *eagerly* and block until the -aggregation has been computed. - -.. code-block:: python - - ds: ray.data.Datastream = ray.data.from_items([ - {"A": x % 3, "B": 2 * x, "C": 3 * x} - for x in range(10)]) - - # Group by the A column and calculate the per-group mean for B and C columns. - agg_ds: ray.data.Datastream = ds.groupby("A").mean(["B", "C"]).materialize() - # -> Sort Sample: 100%|███████████████████████████████████████| 10/10 [00:01<00:00, 9.04it/s] - # -> GroupBy Map: 100%|███████████████████████████████████████| 10/10 [00:00<00:00, 23.66it/s] - # -> GroupBy Reduce: 100%|████████████████████████████████████| 10/10 [00:00<00:00, 937.21it/s] - # -> Datastream(num_blocks=10, num_rows=3, schema={}) - agg_ds.to_pandas() - # -> - # A mean(B) mean(C) - # 0 0 9.0 13.5 - # 1 1 8.0 12.0 - # 2 2 10.0 15.0 - - # Global mean on B column. - ds.mean("B") - # -> GroupBy Map: 100%|███████████████████████████████████████| 10/10 [00:00<00:00, 2851.91it/s] - # -> GroupBy Reduce: 100%|████████████████████████████████████| 1/1 [00:00<00:00, 319.69it/s] - # -> 9.0 - - # Global mean on multiple columns. - ds.mean(["B", "C"]) - # -> GroupBy Map: 100%|███████████████████████████████████████| 10/10 [00:00<00:00, 1730.32it/s] - # -> GroupBy Reduce: 100%|████████████████████████████████████| 1/1 [00:00<00:00, 231.41it/s] - # -> {'mean(B)': 9.0, 'mean(C)': 13.5} - - # Multiple global aggregations on multiple columns. - from ray.data.aggregate import Mean, Std - ds.aggregate(Mean("B"), Std("B", ddof=0), Mean("C"), Std("C", ddof=0)) - # -> GroupBy Map: 100%|███████████████████████████████████████| 10/10 [00:00<00:00, 1568.73it/s] - # -> GroupBy Reduce: 100%|████████████████████████████████████| 1/1 [00:00<00:00, 133.51it/s] - # -> {'mean(A)': 0.9, 'std(A)': 0.8306623862918076, 'mean(B)': 9.0, 'std(B)': 5.744562646538029} - -Combine aggreations with batch mapping to transform datastreams using computed statistics. -For example, you can efficiently standardize feature columns and impute missing values -with calculated column means. - -.. code-block:: python - - # Impute missing values with the column mean. - b_mean = ds.mean("B") - # -> GroupBy Map: 100%|███████████████████████████████████████| 10/10 [00:00<00:00, 4054.03it/s] - # -> GroupBy Reduce: 100%|████████████████████████████████████| 1/1 [00:00<00:00, 359.22it/s] - # -> 9.0 - - def impute_b(df: pd.DataFrame): - df["B"].fillna(b_mean) - return df - - ds = ds.map_batches(impute_b, batch_format="pandas") - # -> MapBatches(impute_b) - # +- Datastream(num_blocks=10, num_rows=10, schema={A: int64, B: int64, C: int64}) - - # Standard scaling of all feature columns. - stats = ds.aggregate(Mean("B"), Std("B"), Mean("C"), Std("C")) - # -> MapBatches(impute_b): 100%|██████████████████████████████| 10/10 [00:01<00:00, 7.16it/s] - # -> GroupBy Map: 100%|███████████████████████████████████████| 10/10 [00:00<00:00, 1260.99it/s] - # -> GroupBy Reduce: 100%|████████████████████████████████████| 1/1 [00:00<00:00, 128.77it/s] - # -> {'mean(B)': 9.0, 'std(B)': 6.0553007081949835, 'mean(C)': 13.5, 'std(C)': 9.082951062292475} - - def batch_standard_scaler(df: pd.DataFrame): - def column_standard_scaler(s: pd.Series): - s_mean = stats[f"mean({s.name})"] - s_std = stats[f"std({s.name})"] - return (s - s_mean) / s_std - - cols = df.columns.difference(["A"]) - df.loc[:, cols] = df.loc[:, cols].transform(column_standard_scaler) - return df - - ds = ds.map_batches(batch_standard_scaler, batch_format="pandas") - ds.materialize() - # -> Map Progress: 100%|██████████████████████████████████████| 10/10 [00:00<00:00, 144.79it/s] - # -> Datastream(num_blocks=10, num_rows=10, schema={A: int64, B: double, C: double}) - --------------- -Shuffling data --------------- - -Call :meth:`Datastream.random_shuffle() ` to -perform a global shuffle. - -.. doctest:: - - >>> import ray - >>> datastream = ray.data.range(10) - >>> datastream.random_shuffle().take_all() # doctest: +SKIP - [7, 0, 9, 3, 5, 1, 4, 2, 8, 6] - -For better performance, perform a local shuffle. Read -:ref:`Shuffling Data ` in the AIR user guide to learn more. diff --git a/doc/source/data/user-guide.rst b/doc/source/data/user-guide.rst index 7acb35968480..e9fe208432f3 100644 --- a/doc/source/data/user-guide.rst +++ b/doc/source/data/user-guide.rst @@ -4,19 +4,19 @@ User Guides =========== -If you’re new to Ray Datasets, we recommend starting with the -:ref:`Ray Datasets Quick Start `. -This user guide will help you navigate the Ray Datasets project and +If you’re new to Ray Data, we recommend starting with the +:ref:`Ray Data Quick Start `. +This user guide will help you navigate the Ray Data project and show you how achieve several tasks. .. toctree:: :maxdepth: 2 - creating-datastreams - transforming-datastreams - consuming-datastreams + loading-data + transforming-data + consuming-data batch_inference - dataset-tensor-support + data-tensor-support custom-datasource data-internals performance-tips diff --git a/doc/source/ray-air/computer-vision.rst b/doc/source/ray-air/computer-vision.rst index e6418c233583..430a20de05f7 100644 --- a/doc/source/ray-air/computer-vision.rst +++ b/doc/source/ray-air/computer-vision.rst @@ -116,7 +116,7 @@ Reading image data :dedent: -For more information on creating datastreams, see :ref:`Loading Data `. +For more information on creating datastreams, see :ref:`Loading Data `. Transforming images @@ -157,7 +157,7 @@ standard way to preprocess data with Ray. For more information on transforming data, see :ref:`Using Preprocessors ` and -:ref:`Transforming Data `. +:ref:`Transforming Data `. Training vision models ---------------------- diff --git a/doc/source/ray-air/examples/gptj_batch_prediction.ipynb b/doc/source/ray-air/examples/gptj_batch_prediction.ipynb index 5dafa354f6ea..d748f029b392 100644 --- a/doc/source/ray-air/examples/gptj_batch_prediction.ipynb +++ b/doc/source/ray-air/examples/gptj_batch_prediction.ipynb @@ -95,7 +95,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Since we will be using a pretrained model from Hugging Face hub, the simplest way is to use {meth}`map_batches ` with a [callable class UDF](transform_datastreams_callable_classes). This will allow us to save time by initializing a model just once and then feed it multiple batches of data." + "Since we will be using a pretrained model from Hugging Face hub, the simplest way is to use {meth}`map_batches ` with a [callable class UDF](transforming_data_actors). This will allow us to save time by initializing a model just once and then feed it multiple batches of data." ] }, { diff --git a/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb b/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb index 9068a6f8c8c4..444f02bebb70 100644 --- a/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb +++ b/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb @@ -89,7 +89,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Since we will be using a pretrained model from Hugging Face hub, the simplest way is to use {meth}`map_batches ` with a [callable class UDF](transform_datastreams_callable_classes). This will allow us to save time by initializing a model just once and then feed it multiple batches of data." + "Since we will be using a pretrained model from Hugging Face hub, the simplest way is to use {meth}`map_batches ` with a [callable class UDF](transforming_data_actors). This will allow us to save time by initializing a model just once and then feed it multiple batches of data." ] }, { diff --git a/doc/source/ray-overview/use-cases.rst b/doc/source/ray-overview/use-cases.rst index bbb73e0889d6..41b49eadad4c 100644 --- a/doc/source/ray-overview/use-cases.rst +++ b/doc/source/ray-overview/use-cases.rst @@ -118,7 +118,7 @@ To learn more about running batch inference with Ray, see the :ref:`batch infere --- :img-top: /images/ray_logo.png - .. link-button:: /data/batch-inference + .. link-button:: /data/batch_inference :type: ref :text: [User Guide] Batch Inference with Ray Data :classes: btn-link btn-block stretched-link diff --git a/doc/source/templates/01_batch_inference/README.md b/doc/source/templates/01_batch_inference/README.md index af76ebcf9ec1..4ed2de358d19 100644 --- a/doc/source/templates/01_batch_inference/README.md +++ b/doc/source/templates/01_batch_inference/README.md @@ -16,12 +16,12 @@ to help you build your own application! At a high level, this template will: 1. [Load your dataset using Ray - Data.](https://docs.ray.io/en/latest/data/creating-datastreams.html) + Data.](https://docs.ray.io/en/latest/data/loading-data.html) 2. [Preprocess your dataset before feeding it to your - model.](https://docs.ray.io/en/latest/data/transforming-datastreams.html) + model.](https://docs.ray.io/en/latest/data/transforming-data.html) 3. [Initialize your model and perform inference on a shard of your dataset with a remote - actor.](https://docs.ray.io/en/latest/data/transforming-datastreams.html#callable-class-udfs) + actor.](https://docs.ray.io/en/latest/data/transforming-data.html#callable-class-udfs) 4. [Save your prediction results.](https://docs.ray.io/en/latest/data/api/input_output.html) diff --git a/doc/source/templates/01_batch_inference/batch_inference.ipynb b/doc/source/templates/01_batch_inference/batch_inference.ipynb index 14b109020e87..a6b8f7222a1a 100644 --- a/doc/source/templates/01_batch_inference/batch_inference.ipynb +++ b/doc/source/templates/01_batch_inference/batch_inference.ipynb @@ -13,9 +13,9 @@ "This template walks through GPU batch prediction on an image dataset using a PyTorch model, but the framework and data format are there just to help you build your own application!\n", "\n", "At a high level, this template will:\n", - "1. [Load your dataset using Ray Data.](https://docs.ray.io/en/latest/data/creating-datasets.html)\n", - "2. [Preprocess your dataset before feeding it to your model.](https://docs.ray.io/en/latest/data/transforming-datasets.html)\n", - "3. [Initialize your model and perform inference on a shard of your dataset with a remote actor.](https://docs.ray.io/en/latest/data/transforming-datasets.html#writing-user-defined-functions-udfs)\n", + "1. [Load your dataset using Ray Data.](https://docs.ray.io/en/latest/data/loading-data.html)\n", + "2. [Preprocess your dataset before feeding it to your model.](https://docs.ray.io/en/latest/data/transforming-data.html)\n", + "3. [Initialize your model and perform inference on a shard of your dataset with a remote actor.](https://docs.ray.io/en/latest/data/transforming-data.html#reduce-setup-overheads-using-actors)\n", "4. [Save your prediction results.](https://docs.ray.io/en/latest/data/api/input_output.html)\n", "\n", "> Slot in your code below wherever you see the ✂️ icon to build a many model training Ray application off of this template!" diff --git a/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb b/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb index 363a3d164aeb..de527847f449 100644 --- a/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb +++ b/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb @@ -309,7 +309,7 @@ "\n", "To feed data into LightningTrainer, we need to configure the following arguments:\n", "\n", - "- `datasets`: A dictionary of the input Ray datasets, with special keys \"train\" and \"val\".\n", + "- `datasets`: A dictionary of the input Ray datastreams, with special keys \"train\" and \"val\".\n", "- `datasets_iter_config`: The argument list of {meth}`iter_torch_batches() `. It defines the way we iterate dataset shards for each worker.\n", "- `preprocessor`: The preprocessor that will be applied to the input dataset.\n", "\n", @@ -1338,7 +1338,7 @@ "metadata": {}, "source": [ ":::{note}\n", - "Note that we are using Ray Dataset for data ingestion for faster preprocessing here, but you can also continue to use the native `PyTorch DataLoader` or `LightningDataModule`. See {ref}`this example `. \n", + "Note that we are using Ray Data for data ingestion for faster preprocessing here, but you can also continue to use the native `PyTorch DataLoader` or `LightningDataModule`. See {ref}`this example `. \n", "\n", ":::" ] @@ -1430,7 +1430,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We obtained a Ray dataset containing predictions from `batch_predictor.predict()`. Now we can easily evaluate the results with just a few lines of code:" + "We obtained a Ray datastream containing predictions from `batch_predictor.predict()`. Now we can easily evaluate the results with just a few lines of code:" ] }, { diff --git a/python/ray/data/datastream.py b/python/ray/data/datastream.py index 8b039dca36c0..44c8847b8b48 100644 --- a/python/ray/data/datastream.py +++ b/python/ray/data/datastream.py @@ -436,7 +436,7 @@ def map_batches( Here ``fn`` returns the same batch type as the input, but your ``fn`` can also return a different batch type (e.g., pd.DataFrame). Read more about - :ref:`user-defined function output types `. + :ref:`Transforming Data `. >>> from typing import Dict >>> def map_fn(batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: @@ -512,9 +512,7 @@ def map_batches( will be writable, which will require an extra copy to guarantee. If ``fn`` mutates its input, this will need to be ``False`` in order to avoid "assignment destination is read-only" or "buffer source array is - read-only" errors. Default is ``False``. See - :ref:`batch format docs ` for details - on which format conversion always require a copy. + read-only" errors. Default is ``False``. fn_args: Positional arguments to pass to ``fn`` after the first argument. These arguments are top-level arguments to the underlying Ray task. fn_kwargs: Keyword arguments to pass to ``fn``. These arguments are From 3bf09f4b14856dc0e664348166b50bd1e06bc032 Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Wed, 3 May 2023 12:17:02 +0200 Subject: [PATCH 207/424] [docs] sphinx design 5/N (#34949) Signed-off-by: Max Pumperla --- doc/source/data/examples/index.rst | 24 +- doc/source/ray-air/user-guides.rst | 120 +-- doc/source/ray-overview/use-cases.rst | 1007 +++++++++++++------------ 3 files changed, 595 insertions(+), 556 deletions(-) diff --git a/doc/source/data/examples/index.rst b/doc/source/data/examples/index.rst index 0f815bd224f7..a1dcea2982e5 100644 --- a/doc/source/data/examples/index.rst +++ b/doc/source/data/examples/index.rst @@ -17,7 +17,7 @@ modalities and types. Here you will find a few end-to-end examples of some basic processing with Ray Data on tabular data, text (coming soon!), and imagery (coming soon!). -.. grid:: 3 +.. grid:: 1 2 3 3 :gutter: 2 :class-container: container pb-4 @@ -25,12 +25,7 @@ soon!). :img-top: /images/taxi.png :class-img-top: pt-5 w-75 d-block mx-auto - +++ .. button-ref:: nyc_taxi_basic_processing - :ref-type: doc - :color: primary - :outline: - :expand: Processing the NYC taxi dataset @@ -38,12 +33,7 @@ soon!). :img-top: /images/taxi.png :class-img-top: pt-5 w-75 d-block mx-auto - +++ .. button-ref:: batch_training - :ref-type: doc - :color: primary - :outline: - :expand: Batch Training with Ray Data @@ -51,12 +41,7 @@ soon!). :img-top: /images/ocr.jpg :class-img-top: pt-5 w-75 d-block mx-auto - +++ .. button-ref:: ocr_example - :ref-type: doc - :color: primary - :outline: - :expand: Scaling OCR with Ray Data @@ -66,7 +51,7 @@ Other Examples -------------- -.. grid:: 3 +.. grid:: 1 2 3 3 :gutter: 2 :class-container: container pb-4 @@ -74,11 +59,6 @@ Other Examples :img-top: ../images/datastream-arch.svg :class-img-top: pt-5 w-75 d-block mx-auto - +++ .. button-ref:: random-access - :ref-type: doc - :color: primary - :outline: - :expand: Random Data Access (Experimental) diff --git a/doc/source/ray-air/user-guides.rst b/doc/source/ray-air/user-guides.rst index 0650378739b8..2c5056689160 100644 --- a/doc/source/ray-air/user-guides.rst +++ b/doc/source/ray-air/user-guides.rst @@ -9,80 +9,94 @@ User Guides AIR User Guides --------------- -.. panels:: - :container: text-center - :column: col-md-4 px-2 py-2 - :img-top-cls: pt-5 w-75 d-block mx-auto fixed-height-img +.. grid:: 3 + :gutter: 2 + :class-container: container pb-4 - --- - :img-top: /ray-air/images/preprocessors.svg + .. grid-item-card:: + :img-top: /ray-air/images/preprocessors.svg + :class-img-top: pt-5 w-75 d-block mx-auto fixed-height-img - .. https://docs.google.com/drawings/d/1ZIbsXv5vvwTVIEr2aooKxuYJ_VL7-8VMNlRinAiPaTI/edit + +++ + .. button-ref:: /ray-air/preprocessors + :color: primary + :outline: + :expand: - .. link-button:: /ray-air/preprocessors - :type: ref - :text: Using Preprocessors - :classes: btn-link btn-block stretched-link + Using Preprocessors - --- - :img-top: /ray-air/images/train-icon.svg + .. grid-item-card:: + :img-top: /ray-air/images/train-icon.svg + :class-img-top: pt-5 w-75 d-block mx-auto fixed-height-img - .. https://docs.google.com/drawings/d/15SXGHbKPWdrzx3aTAIFcO2uh_s6Q7jLU03UMuwKSzzM/edit + +++ + .. button-ref:: trainers + :color: primary + :outline: + :expand: - .. link-button:: trainers - :type: ref - :text: Using Trainers - :classes: btn-link btn-block stretched-link + Using Trainers - --- - :img-top: /ray-air/images/ingest-icon.svg + .. grid-item-card:: + :img-top: /ray-air/images/ingest-icon.svg + :class-img-top: pt-5 w-75 d-block mx-auto fixed-height-img - .. https://docs.google.com/drawings/d/10GZE_6s6ss8PSxLYyzcbj6yEalWO4N7MS7ao8KO7ne0/edit + +++ + .. button-ref:: air-ingest + :color: primary + :outline: + :expand: - .. link-button:: air-ingest - :type: ref - :text: Configuring Training Datasets - :classes: btn-link btn-block stretched-link + Configuring Training Datasets - --- - :img-top: /ray-air/images/tuner.svg + .. grid-item-card:: + :img-top: /ray-air/images/tuner.svg + :class-img-top: pt-5 w-75 d-block mx-auto fixed-height-img - .. https://docs.google.com/drawings/d/1yMd12iMkyo6DGrFoET1TIlKfFnXX9dfh2u3GSdTz6W4/edit + +++ + .. button-ref:: /ray-air/tuner + :color: primary + :outline: + :expand: - .. link-button:: /ray-air/tuner - :type: ref - :text: Configuring Hyperparameter Tuning - :classes: btn-link btn-block stretched-link + Configuring Hyperparameter Tuning - --- - :img-top: /ray-air/images/predictors.png + .. grid-item-card:: + :img-top: /ray-air/images/predictors.png + :class-img-top: pt-5 w-75 d-block mx-auto fixed-height-img - .. https://docs.google.com/presentation/d/1jfkQk0tGqgkLgl10vp4-xjcbYG9EEtlZV_Vnve_NenQ/edit#slide=id.g131c21f5e88_0_549 + +++ + .. button-ref:: predictors + :color: primary + :outline: + :expand: - .. link-button:: predictors - :type: ref - :text: Using Predictors for Inference - :classes: btn-link btn-block stretched-link + Using Predictors for Inference - --- - :img-top: /ray-air/images/serve-icon.svg + .. grid-item-card:: + :img-top: /ray-air/images/serve-icon.svg + :class-img-top: pt-5 w-75 d-block mx-auto fixed-height-img - .. https://docs.google.com/drawings/d/1-rg77bV-vEMURXZw5_mIOUFM3FObIIYbFOiYzFJW_68/edit + +++ + .. button-ref:: /ray-air/examples/serving_guide + :color: primary + :outline: + :expand: - .. link-button:: /ray-air/examples/serving_guide - :type: ref - :text: Deploying Predictors with Serve - :classes: btn-link btn-block stretched-link + Deploying Predictors with Serve - --- - :img-top: /ray-air/images/air-deploy.svg + .. grid-item-card:: + :img-top: /ray-air/images/air-deploy.svg + :class-img-top: pt-5 w-75 d-block mx-auto fixed-height-img - .. https://docs.google.com/drawings/d/1ja1RfNCEFn50B9FHWSemUzwhtPAmVyoak1JqEJUmxs4/edit + +++ + .. button-ref:: air-deployment + :color: primary + :outline: + :expand: + + How to Deploy AIR - .. link-button:: air-deployment - :type: ref - :text: How to Deploy AIR - :classes: btn-link btn-block stretched-link .. _air-env-vars: diff --git a/doc/source/ray-overview/use-cases.rst b/doc/source/ray-overview/use-cases.rst index 41b49eadad4c..51b45393a9b3 100644 --- a/doc/source/ray-overview/use-cases.rst +++ b/doc/source/ray-overview/use-cases.rst @@ -18,82 +18,82 @@ Large language models (LLMs) and generative AI are rapidly changing industries, Learn more about how Ray scales LLMs and generative AI with the following resources. -.. panels:: - :container: container pb-3 - :column: col-md-3 px-1 py-1 - :img-top-cls: p-2 w-75 d-block mx-auto fixed-height-img - - --- - :img-top: /images/ray_logo.png - - .. link-button:: https://www.anyscale.com/blog/ray-common-production-challenges-for-generative-ai-infrastructure - :type: url - :text: [Blog] How Ray solves common production challenges for generative AI infrastructure - :classes: btn-link btn-block stretched-link webCrawler - - --- - :img-top: /images/ray_logo.png - - .. link-button:: https://www.anyscale.com/blog/training-175b-parameter-language-models-at-1000-gpu-scale-with-alpa-and-ray - :type: url - :text: [Blog] Training 175B Parameter Language Models at 1000 GPU scale with Alpa and Ray - :classes: btn-link btn-block stretched-link webCrawler - - --- - :img-top: /images/ray_logo.png - - .. link-button:: https://www.anyscale.com/blog/faster-stable-diffusion-fine-tuning-with-ray-air - :type: url - :text: [Blog] Faster stable diffusion fine-tuning with Ray AIR - :classes: btn-link btn-block stretched-link webCrawler - - --- - :img-top: /images/ray_logo.png - - .. link-button:: https://www.anyscale.com/blog/how-to-fine-tune-and-serve-llms-simply-quickly-and-cost-effectively-using - :type: url - :text: [Blog] How to fine tune and serve LLMs simply, quickly and cost effectively using Ray + DeepSpeed + HuggingFace - :classes: btn-link btn-block stretched-link webCrawler - - --- - :img-top: /images/ray_logo.png - - .. link-button:: https://www.businessinsider.com/openai-chatgpt-trained-on-anyscale-ray-generative-lifelike-ai-models-2022-12 - :type: url - :text: [Blog] How OpenAI Uses Ray to Train Tools like ChatGPT - :classes: btn-link btn-block stretched-link chatgpt - - --- - :img-top: /images/ray_logo.png - - .. link-button:: /ray-air/examples/gptj_deepspeed_fine_tuning - :type: ref - :text: [Example] GPT-J-6B Fine-Tuning with Ray AIR and DeepSpeed - :classes: btn-link btn-block stretched-link antServing - - --- - :img-top: /images/ray_logo.png - - .. link-button:: /ray-air/examples/dreambooth_finetuning - :type: ref - :text: [Example] Fine-tuning DreamBooth with Ray AIR - :classes: btn-link btn-block stretched-link rayForward - - --- - :img-top: /images/ray_logo.png - - .. link-button:: /ray-air/examples/stablediffusion_batch_prediction - :type: ref - :text: [Example] Stable Diffusion Batch Prediction with Ray AIR - :classes: btn-link btn-block stretched-link rayForward - - --- - :img-top: /images/ray_logo.png - - .. link-button:: /ray-air/examples/gptj_serving - :type: ref - :text: [Example] GPT-J-6B Serving with Ray AIR - :classes: btn-link btn-block stretched-link webCrawler +.. grid:: 1 2 3 4 + :gutter: 1 + :class-container: container pb-3 + + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-link:: https://www.anyscale.com/blog/ray-common-production-challenges-for-generative-ai-infrastructure + + [Blog] How Ray solves common production challenges for generative AI infrastructure + + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-link:: https://www.anyscale.com/blog/training-175b-parameter-language-models-at-1000-gpu-scale-with-alpa-and-ray + + [Blog] Training 175B Parameter Language Models at 1000 GPU scale with Alpa and Ray + + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-link:: https://www.anyscale.com/blog/faster-stable-diffusion-fine-tuning-with-ray-air + + [Blog] Faster stable diffusion fine-tuning with Ray AIR + + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-link:: https://www.anyscale.com/blog/how-to-fine-tune-and-serve-llms-simply-quickly-and-cost-effectively-using + + [Blog] How to fine tune and serve LLMs simply, quickly and cost effectively using Ray + DeepSpeed + HuggingFace + + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-link:: https://www.businessinsider.com/openai-chatgpt-trained-on-anyscale-ray-generative-lifelike-ai-models-2022-12 + + [Blog] How OpenAI Uses Ray to Train Tools like ChatGPT + + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: /ray-air/examples/gptj_deepspeed_fine_tuning + + [Example] GPT-J-6B Fine-Tuning with Ray AIR and DeepSpeed + + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: /ray-air/examples/dreambooth_finetuning + + [Example] Fine-tuning DreamBooth with Ray AIR + + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: /ray-air/examples/stablediffusion_batch_prediction + + [Example] Stable Diffusion Batch Prediction with Ray AIR + + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: /ray-air/examples/gptj_serving + + [Example] GPT-J-6B Serving with Ray AIR + .. _ref-use-cases-batch-infer: @@ -110,40 +110,50 @@ To learn more about running batch inference with Ray, see the :ref:`batch infere .. figure:: batch_inference/images/batch_inference.png -.. panels:: - :container: container pb-3 - :column: col-md-3 px-1 py-1 - :img-top-cls: p-2 w-75 d-block mx-auto fixed-height-img +.. grid:: 1 2 3 4 + :gutter: 1 + :class-container: container pb-3 + + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-link:: https://github.com/ray-project/ray-educational-materials/blob/main/Computer_vision_workloads/Semantic_segmentation/Scaling_batch_inference.ipynb + + [Tutorial] Architectures for Scalable Batch Inference with Ray + + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-link:: https://www.anyscale.com/blog/model-batch-inference-in-ray-actors-actorpool-and-datasets + + [Blog] Batch Inference in Ray: Actors, ActorPool, and Datasets + + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - --- - :img-top: /images/ray_logo.png + .. button-ref:: /ray-core/examples/batch_prediction - .. link-button:: /data/batch_inference - :type: ref - :text: [User Guide] Batch Inference with Ray Data - :classes: btn-link btn-block stretched-link - --- - :img-top: /images/ray_logo.png + [Example] Batch Prediction using Ray Core - .. link-button:: https://github.com/ray-project/ray-educational-materials/blob/main/Computer_vision_workloads/Semantic_segmentation/Scaling_batch_inference.ipynb - :type: url - :text: [Tutorial] Architectures for Scalable Batch Inference with Ray - :classes: btn-link btn-block stretched-link scalableBatchInference - --- - :img-top: /images/ray_logo.png + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - .. link-button:: /data/examples/nyc_taxi_basic_processing - :type: ref - :text: [Example] Batch Inference on NYC taxi data using Ray Data - :classes: btn-link btn-block stretched-link nycTaxiData + .. button-ref:: /data/examples/nyc_taxi_basic_processing - --- - :img-top: /images/ray_logo.png + [Example] Batch Inference on NYC taxi data using Ray Data + + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: /data/examples/ocr_example + + [Example] Batch OCR processing using Ray Data - .. link-button:: /data/examples/ocr_example - :type: ref - :text: [Example] Batch OCR processing using Ray Data - :classes: btn-link btn-block stretched-link batchOcr .. _ref-use-cases-mmt: @@ -157,7 +167,7 @@ The focus is on training many models on subsets of a dataset. This is in contras When any given model you want to train can fit on a single GPU, Ray can assign each training run to a separate Ray Task. In this way, all available workers are utilized to run independent remote training rather than one worker running jobs sequentially. .. figure:: /images/training_small_models.png - + Data parallelism pattern for distributed training on large datasets. How do I do many model training on Ray? @@ -165,71 +175,77 @@ How do I do many model training on Ray? To train multiple independent models, use the Ray Tune (:ref:`Tutorial `) library. This is the recommended library for most cases. -You can use Tune with your current data preprocessing pipeline if your data source fits into the memory of a single machine (node). +You can use Tune with your current data preprocessing pipeline if your data source fits into the memory of a single machine (node). If you need to scale your data, or you want to plan for future scaling, use the :ref:`Ray Data ` library. -Your data must be a :ref:`supported format `, to use Ray Data. +Your data must be a :ref:`supported format `, to use Ray Data. -Alternative solutions exist for less common cases: +Alternative solutions exist for less common cases: #. If your data is not in a supported format, use Ray Core (:ref:`Tutorial `) for custom applications. This is an advanced option and requires and understanding of :ref:`design patterns and anti-patterns `. -#. If you have a large preprocessing pipeline, you can use the Ray Data library to train multiple models (:ref:`Tutorial `). +#. If you have a large preprocessing pipeline, you can use the Ray Data library to train multiple models (:ref:`Tutorial `). Learn more about many model training with the following resources. -.. panels:: - :container: container pb-3 - :column: col-md-3 px-1 py-1 - :img-top-cls: p-2 w-75 d-block mx-auto fixed-height-img - - --- - :img-top: /images/ray_logo.png - - .. link-button:: https://www.anyscale.com/blog/training-one-million-machine-learning-models-in-record-time-with-ray - :type: url - :text: [Blog] Training One Million ML Models in Record Time with Ray - :classes: btn-link btn-block stretched-link millionModels - --- - :img-top: /images/ray_logo.png - - .. link-button:: https://www.anyscale.com/blog/many-models-batch-training-at-scale-with-ray-core - :type: url - :text: [Blog] Many Models Batch Training at Scale with Ray Core - :classes: btn-link btn-block stretched-link manyModels - --- - :img-top: /images/ray_logo.png - - .. link-button:: /ray-core/examples/batch_training - :type: ref - :text: [Example] Batch Training with Ray Core - :classes: btn-link btn-block stretched-link batchTrainingCore - --- - :img-top: /images/ray_logo.png - - .. link-button:: /data/examples/batch_training - :type: ref - :text: [Example] Batch Training with Ray Data - :classes: btn-link btn-block stretched-link batchTrainingDatasets - --- - :img-top: /images/tune.png - - .. link-button:: /tune/tutorials/tune-run - :type: ref - :text: [Guide] Tune Basic Parallel Experiments - :classes: btn-link btn-block stretched-link tuneBasicParallel - --- - :img-top: /images/tune.png - - .. link-button:: /ray-air/examples/batch_tuning - :type: ref - :text: [Example] Batch Training and Tuning using Ray Tune - :classes: btn-link btn-block stretched-link tuneBatch - --- - :img-top: /images/carrot.png - - .. link-button:: https://www.youtube.com/watch?v=3t26ucTy0Rs - :type: url - :text: [Talk] Scaling Instacart fulfillment ML on Ray - :classes: btn-link btn-block stretched-link instacartFulfillment +.. grid:: 1 2 3 4 + :gutter: 1 + :class-container: container pb-3 + + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-link:: https://www.anyscale.com/blog/training-one-million-machine-learning-models-in-record-time-with-ray + + [Blog] Training One Million ML Models in Record Time with Ray + + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-link:: https://www.anyscale.com/blog/many-models-batch-training-at-scale-with-ray-core + + [Blog] Many Models Batch Training at Scale with Ray Core + + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: /ray-core/examples/batch_training + + [Example] Batch Training with Ray Core + + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: /data/examples/batch_training + + [Example] Batch Training with Ray Data + + .. grid-item-card:: + :img-top: /images/tune.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: /tune/tutorials/tune-run + + [Guide] Tune Basic Parallel Experiments + + .. grid-item-card:: + :img-top: /images/tune.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: /ray-air/examples/batch_tuning + + [Example] Batch Training and Tuning using Ray Tune + + .. grid-item-card:: + :img-top: /images/carrot.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-link:: https://www.youtube.com/watch?v=3t26ucTy0Rs + + [Talk] Scaling Instacart fulfillment ML on Ray + Model Serving ------------- @@ -244,53 +260,58 @@ It supports complex `model deployment patterns Date: Wed, 3 May 2023 09:15:41 -0700 Subject: [PATCH 208/424] Enable TLS on gRPCIngress if RAY_USE_TLS is on (#34403) Signed-off-by: Abin Shahab --- python/ray/serve/drivers.py | 18 ++++- python/ray/serve/tests/test_grpc.py | 107 +++++++++++++++++++++++++--- 2 files changed, 111 insertions(+), 14 deletions(-) diff --git a/python/ray/serve/drivers.py b/python/ray/serve/drivers.py index dc2e8f0d71e7..25019a713103 100644 --- a/python/ray/serve/drivers.py +++ b/python/ray/serve/drivers.py @@ -3,6 +3,7 @@ from typing import Any, Callable, Optional, Union, Dict import ray from ray._private.utils import get_or_create_event_loop +from ray._private.tls_utils import add_port_to_grpc_server from ray.serve._private.utils import install_serve_encoders_to_fastapi, record_serve_tag from ray.util.annotations import PublicAPI @@ -50,7 +51,6 @@ def __init__( install_serve_encoders_to_fastapi() http_adapter = load_http_adapter(http_adapter) - self.app = FastAPI() if isinstance(dags, dict): @@ -146,13 +146,25 @@ def __init__(self, port: int = DEFAULT_GRPC_PORT): async def run(self): """Start gRPC Server""" - logger.info( "Starting gRPC server with on node:{} " "listening on port {}".format(ray.util.get_node_ip_address(), self.port) ) + address = "[::]:{}".format(self.port) + try: + # Depending on whether RAY_USE_TLS is on, `add_port_to_grpc_server` + # can create a secure or insecure channel + self.grpc_port = add_port_to_grpc_server(self.server, address) + except Exception: + # TODO(SongGuyang): Catch the exception here because there is + # port conflict issue which brought from static port. We should + # remove this after we find better port resolution. + logger.exception( + "Failed to add port to grpc server. GRPC service will be disabled" + ) + self.server = None + self.grpc_port = None - self.server.add_insecure_port("[::]:{}".format(self.port)) self.setup_complete.set() await self.server.start() await self.server.wait_for_termination() diff --git a/python/ray/serve/tests/test_grpc.py b/python/ray/serve/tests/test_grpc.py index 69f00886a683..2a78cf78c319 100644 --- a/python/ray/serve/tests/test_grpc.py +++ b/python/ray/serve/tests/test_grpc.py @@ -1,13 +1,13 @@ +# coding: utf-8 import pytest - +import sys +import os from ray.serve.drivers import DefaultgRPCDriver, gRPCIngress import ray from ray import serve -from ray.serve.generated import serve_pb2, serve_pb2_grpc -import grpc from ray.cluster_utils import Cluster from ray.serve._private.constants import SERVE_NAMESPACE -from ray._private.test_utils import wait_for_condition +from ray._private.test_utils import wait_for_condition, run_string_as_driver from ray.serve.exceptions import RayServeException from ray.serve._private.constants import ( @@ -16,9 +16,10 @@ ) from unittest.mock import patch - - -pytestmark = pytest.mark.asyncio +from ray._private.test_utils import ( + setup_tls, + teardown_tls, +) @pytest.fixture @@ -38,8 +39,41 @@ def ray_cluster(): cluster.shutdown() -@patch("ray.serve._private.api.FLAG_DISABLE_HTTP_PROXY", True) -async def test_deploy_basic(serve_start_shutdown): +@pytest.fixture +def use_tls(request): + if request.param: + key_filepath, cert_filepath, temp_dir = setup_tls() + yield request.param + if request.param: + teardown_tls(key_filepath, cert_filepath, temp_dir) + + +def tls_enabled(): + return os.environ.get("RAY_USE_TLS", "0").lower() in ("1", "true") + + +@pytest.mark.skipif( + sys.platform == "darwin", + reason=("Cryptography (TLS dependency) doesn't install in Mac build pipeline"), +) +@pytest.mark.parametrize("use_tls", [True], indirect=True) +def test_deploy_basic(use_tls): + if use_tls: + run_string_as_driver( + """ +# coding: utf-8 +import os +from ray.serve.drivers import DefaultgRPCDriver, gRPCIngress +import ray +from ray import serve +from ray.serve.generated import serve_pb2, serve_pb2_grpc +import grpc +from ray.serve.exceptions import RayServeException +from ray._private.tls_utils import load_certs_from_env +import logging +import asyncio +try: + ray.init() @serve.deployment class D1: def __call__(self, input): @@ -48,16 +82,67 @@ def __call__(self, input): serve.run(DefaultgRPCDriver.bind(D1.bind())) async def send_request(): - async with grpc.aio.insecure_channel("localhost:9000") as channel: + server_cert_chain, private_key, ca_cert = load_certs_from_env() + credentials = grpc.ssl_channel_credentials( + certificate_chain=server_cert_chain, + private_key=private_key, + root_certificates=ca_cert, + ) + + async with grpc.aio.secure_channel("localhost:9000", credentials) as channel: stub = serve_pb2_grpc.PredictAPIsServiceStub(channel) response = await stub.Predict( serve_pb2.PredictRequest(input={"a": bytes("123", "utf-8")}) ) return response - resp = await send_request() + resp = asyncio.run(send_request()) + assert resp.prediction == b"123" +finally: + serve.shutdown() + ray.shutdown() + """, + env=os.environ.copy(), + ) + else: + run_string_as_driver( + """ +# coding: utf-8 +import os +from ray.serve.drivers import DefaultgRPCDriver, gRPCIngress +import ray +from ray import serve +from ray.serve.generated import serve_pb2, serve_pb2_grpc +import grpc +from ray.serve.exceptions import RayServeException +from ray._private.tls_utils import load_certs_from_env +import logging +import asyncio +try: + ray.init() + @serve.deployment + class D1: + def __call__(self, input): + return input["a"] + serve.run(DefaultgRPCDriver.bind(D1.bind())) + + async def send_request(): + async with grpc.aio.insecure_channel("localhost:9000") as channel: + stub = serve_pb2_grpc.PredictAPIsServiceStub(channel) + response = await stub.Predict( + serve_pb2.PredictRequest(input={"a": bytes("123", "utf-8")}) + ) + return response + + resp = asyncio.run(send_request()) assert resp.prediction == b"123" +finally: + serve.shutdown() + ray.shutdown() + """, + env=os.environ.copy(), + ) @patch("ray.serve._private.api.FLAG_DISABLE_HTTP_PROXY", True) From a429c0fadf3ae328c97eac0e70ef80daf145c6cd Mon Sep 17 00:00:00 2001 From: "ZhengYu, Xu" Date: Thu, 4 May 2023 00:36:06 +0800 Subject: [PATCH 209/424] Avoid copying data multiple times during chunking. (#34772) When transferring large objects, Ray performs chunking, but accessing the data field of protobuf during each chunking operation will result in copying Every Time. Signed-off-by: ZhengYu, Xu --- python/ray/util/client/dataclient.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/python/ray/util/client/dataclient.py b/python/ray/util/client/dataclient.py index 449c76534381..5ce08117087d 100644 --- a/python/ray/util/client/dataclient.py +++ b/python/ray/util/client/dataclient.py @@ -39,7 +39,11 @@ def chunk_put(req: ray_client_pb2.DataRequest): into the result_queue, we would effectively double the memory needed on the client to handle the put. """ - total_size = len(req.put.data) + # When accessing a protobuf field, deserialization is performed, which will + # generate a copy. So we need to avoid accessing the `data` field multiple + # times in the loop + request_data = req.put.data + total_size = len(request_data) assert total_size > 0, "Cannot chunk object with missing data" if total_size >= OBJECT_TRANSFER_WARNING_SIZE and log_once( "client_object_put_size_warning" @@ -60,7 +64,7 @@ def chunk_put(req: ray_client_pb2.DataRequest): end = min(total_size, (chunk_id + 1) * OBJECT_TRANSFER_CHUNK_SIZE) chunk = ray_client_pb2.PutRequest( client_ref_id=req.put.client_ref_id, - data=req.put.data[start:end], + data=request_data[start:end], chunk_id=chunk_id, total_chunks=total_chunks, total_size=total_size, @@ -77,7 +81,11 @@ def chunk_task(req: ray_client_pb2.DataRequest): into the result_queue, we would effectively double the memory needed on the client to handle the task. """ - total_size = len(req.task.data) + # When accessing a protobuf field, deserialization is performed, which will + # generate a copy. So we need to avoid accessing the `data` field multiple + # times in the loop + request_data = req.task.data + total_size = len(request_data) assert total_size > 0, "Cannot chunk object with missing data" total_chunks = math.ceil(total_size / OBJECT_TRANSFER_CHUNK_SIZE) for chunk_id in range(0, total_chunks): @@ -91,7 +99,7 @@ def chunk_task(req: ray_client_pb2.DataRequest): options=req.task.options, baseline_options=req.task.baseline_options, namespace=req.task.namespace, - data=req.task.data[start:end], + data=request_data[start:end], chunk_id=chunk_id, total_chunks=total_chunks, ) From 43fc4297a7f41eaae5b102b715da44da4de69894 Mon Sep 17 00:00:00 2001 From: Hao Chen Date: Wed, 3 May 2023 10:03:39 -0700 Subject: [PATCH 210/424] Make sure all public APIs will auto-init ray (#34730) - Fix the issue that `get_runtime_context` won't auto-init ray. - Make sure all public APIs will auto-init ray, by decorating all functions in `__all__` of `ray/__init__.py`. - Decouple auto-init with client mode. --- python/ray/__init__.py | 61 ++++++++++++++++--- python/ray/_private/auto_init_hook.py | 28 +++++++++ python/ray/_private/client_mode_hook.py | 34 +++-------- python/ray/_private/state.py | 8 +-- python/ray/_private/storage.py | 7 ++- python/ray/_private/worker.py | 20 +++--- python/ray/actor.py | 6 +- python/ray/data/read_api.py | 2 + python/ray/experimental/internal_kv.py | 14 ++--- python/ray/remote_function.py | 4 +- python/ray/runtime_context.py | 2 +- python/ray/tests/test_client.py | 12 ++-- .../tests/test_client_library_integration.py | 8 +-- python/ray/util/__init__.py | 4 +- python/ray/util/placement_group.py | 5 +- python/ray/workflow/api.py | 2 +- python/ray/workflow/tests/utils.py | 2 +- .../ray/workflow/workflow_state_from_dag.py | 2 +- 18 files changed, 145 insertions(+), 76 deletions(-) create mode 100644 python/ray/_private/auto_init_hook.py diff --git a/python/ray/__init__.py b/python/ray/__init__.py index 126b48d14d59..08fafe10cd5b 100644 --- a/python/ray/__init__.py +++ b/python/ray/__init__.py @@ -192,11 +192,10 @@ def __getattr__(self, attr): state = _DeprecationWrapper("state", ray._private.state) -__all__ = [ +RAY_APIS = { "__version__", "_config", "get_runtime_context", - "actor", "autoscaler", "available_resources", "cancel", @@ -207,7 +206,6 @@ def __getattr__(self, attr): "get_actor", "get_gpu_ids", "init", - "internal", "is_initialized", "java_actor_class", "java_function", @@ -221,19 +219,68 @@ def __getattr__(self, attr): "shutdown", "show_in_dashboard", "timeline", - "util", "wait", - "widgets", "LOCAL_MODE", "SCRIPT_MODE", "WORKER_MODE", -] +} + +# Public APIs that should automatically trigger ray.init(). +AUTO_INIT_APIS = { + "cancel", + "get", + "get_actor", + "get_gpu_ids", + "kill", + "put", + "wait", +} + +# Public APIs that should not automatically trigger ray.init(). +NON_AUTO_INIT_APIS = { + "ClientBuilder", + "LOCAL_MODE", + "Language", + "SCRIPT_MODE", + "WORKER_MODE", + "__version__", + "_config", + "autoscaler", + "available_resources", + "client", + "cluster_resources", + "cpp_function", + "get_runtime_context", + "init", + "is_initialized", + "java_actor_class", + "java_function", + "method", + "nodes", + "remote", + "show_in_dashboard", + "shutdown", + "timeline", +} + +assert RAY_APIS == AUTO_INIT_APIS | NON_AUTO_INIT_APIS +from ray._private.auto_init_hook import wrap_auto_init_for_all_apis # noqa: E402 + +wrap_auto_init_for_all_apis(AUTO_INIT_APIS) +del wrap_auto_init_for_all_apis + + +__all__ = list(RAY_APIS) # Subpackages __all__ += [ + "actor", + "autoscaler", "data", + "internal", + "util", + "widgets", "workflow", - "autoscaler", ] # ID types diff --git a/python/ray/_private/auto_init_hook.py b/python/ray/_private/auto_init_hook.py new file mode 100644 index 000000000000..0c4184f3873e --- /dev/null +++ b/python/ray/_private/auto_init_hook.py @@ -0,0 +1,28 @@ +import ray +import os +from functools import wraps + + +def auto_init_ray(): + if ( + os.environ.get("RAY_ENABLE_AUTO_CONNECT", "") != "0" + and not ray.is_initialized() + ): + ray.init() + + +def wrap_auto_init(fn): + @wraps(fn) + def auto_init_wrapper(*args, **kwargs): + auto_init_ray() + return fn(*args, **kwargs) + + return auto_init_wrapper + + +def wrap_auto_init_for_all_apis(api_names): + """Wrap public APIs with automatic ray.init.""" + for api_name in api_names: + api = getattr(ray, api_name, None) + assert api is not None, api_name + setattr(ray, api_name, wrap_auto_init(api)) diff --git a/python/ray/_private/client_mode_hook.py b/python/ray/_private/client_mode_hook.py index 2d58726ab169..cbc0944514ea 100644 --- a/python/ray/_private/client_mode_hook.py +++ b/python/ray/_private/client_mode_hook.py @@ -1,7 +1,8 @@ import os import threading from contextlib import contextmanager -from functools import partial, wraps +from functools import wraps +from ray._private.auto_init_hook import auto_init_ray # Attr set on func defs to mark they have been converted to client mode. RAY_CLIENT_MODE_ATTR = "__ray_client_mode_key__" @@ -77,27 +78,20 @@ def enable_client_mode(): _explicitly_disable_client_mode() -def client_mode_hook(func: callable = None, *, auto_init: bool): +def client_mode_hook(func: callable): """Decorator for whether to use the 'regular' ray version of a function, or the Ray Client version of that function. Args: func: This function. This is set when this function is used as a decorator. - auto_init: Whether `ray.init()` should be transparently called when - the wrapped function is called. This should be `True` for functions - that are *NOT* part of the initialization path (e.g. `init` or - `is_initialized`) or for functions that do not require Ray to be - initialized (e.g., KV operations, `shutdown`). """ - if func is None: - return partial(client_mode_hook, auto_init=auto_init) @wraps(func) def wrapper(*args, **kwargs): from ray.util.client import ray - if client_mode_should_convert(auto_init=auto_init): + if client_mode_should_convert(): # Legacy code # we only convert init function if RAY_CLIENT_MODE=1 if func.__name__ != "init" or is_client_mode_enabled_by_default: @@ -107,21 +101,8 @@ def wrapper(*args, **kwargs): return wrapper -def client_mode_should_convert(*, auto_init: bool): - """Determines if functions should be converted to client mode & if - Ray should be auto-initialized. - - NOTE: `auto_init` must happen before we branch into regular ray or client - code because the initialization may result in either mode. - """ - if auto_init: - import ray - - if ( - os.environ.get("RAY_ENABLE_AUTO_CONNECT", "") != "0" - and not ray.is_initialized() - ): - ray.init() +def client_mode_should_convert(): + """Determines if functions should be converted to client mode.""" # `is_client_mode_enabled_by_default` is used for testing with # `RAY_CLIENT_MODE=1`. This flag means all tests run with client mode. @@ -146,9 +127,10 @@ def client_mode_wrap(func): def wrapper(*args, **kwargs): from ray.util.client import ray + auto_init_ray() # Directly pass this through since `client_mode_wrap` is for # Placement Group APIs - if client_mode_should_convert(auto_init=True): + if client_mode_should_convert(): f = ray.remote(num_cpus=0)(func) ref = f.remote(*args, **kwargs) return ray.get(ref) diff --git a/python/ray/_private/state.py b/python/ray/_private/state.py index 40e94e9c5db5..c2d1fc594f17 100644 --- a/python/ray/_private/state.py +++ b/python/ray/_private/state.py @@ -757,7 +757,7 @@ def next_job_id(): @DeveloperAPI -@client_mode_hook(auto_init=False) +@client_mode_hook def nodes(): """Get a list of the nodes in the cluster (for debugging only). @@ -821,7 +821,7 @@ def actors(actor_id=None): @DeveloperAPI -@client_mode_hook(auto_init=False) +@client_mode_hook def timeline(filename=None): """Return a list of profiling events that can viewed as a timeline. @@ -864,7 +864,7 @@ def object_transfer_timeline(filename=None): @DeveloperAPI -@client_mode_hook(auto_init=False) +@client_mode_hook def cluster_resources(): """Get the current total cluster resources. @@ -879,7 +879,7 @@ def cluster_resources(): @DeveloperAPI -@client_mode_hook(auto_init=False) +@client_mode_hook def available_resources(): """Get the current available cluster resources. diff --git a/python/ray/_private/storage.py b/python/ray/_private/storage.py index ea5743a525f1..90d3e54d7e40 100644 --- a/python/ray/_private/storage.py +++ b/python/ray/_private/storage.py @@ -7,6 +7,7 @@ from ray._private.client_mode_hook import client_mode_hook from ray._private.utils import _add_creatable_buckets_param_if_s3_uri +from ray._private.auto_init_hook import wrap_auto_init if TYPE_CHECKING: import pyarrow.fs @@ -25,7 +26,8 @@ _filesystem = None -@client_mode_hook(auto_init=True) +@wrap_auto_init +@client_mode_hook def get_filesystem() -> ("pyarrow.fs.FileSystem", str): """Initialize and get the configured storage filesystem, if possible. @@ -51,7 +53,8 @@ def get_filesystem() -> ("pyarrow.fs.FileSystem", str): # TODO(suquark): There is no implementation of 'get_client' in client hook. -@client_mode_hook(auto_init=True) +@wrap_auto_init +@client_mode_hook def get_client(prefix: str) -> "KVClient": """Returns a KV-client (convenience wrapper around underlying filesystem). diff --git a/python/ray/_private/worker.py b/python/ray/_private/worker.py index 6a66667e4a3f..81ff80881719 100644 --- a/python/ray/_private/worker.py +++ b/python/ray/_private/worker.py @@ -898,7 +898,7 @@ def print_logs(self): @PublicAPI -@client_mode_hook(auto_init=True) +@client_mode_hook def get_gpu_ids(): """Get the IDs of the GPUs that are available to the worker. @@ -1111,7 +1111,7 @@ def _repr_html_(self): @PublicAPI -@client_mode_hook(auto_init=False) +@client_mode_hook def init( address: Optional[str] = None, *, @@ -1655,7 +1655,7 @@ def init( @PublicAPI -@client_mode_hook(auto_init=False) +@client_mode_hook def shutdown(_exiting_interpreter: bool = False): """Disconnect the worker, and terminate processes started by ray.init(). @@ -2002,7 +2002,7 @@ def listen_error_messages(worker, threads_stopped): @PublicAPI -@client_mode_hook(auto_init=False) +@client_mode_hook def is_initialized() -> bool: """Check if ray.init has been called yet. @@ -2429,7 +2429,7 @@ def get(object_refs: "ObjectRef[R]", *, timeout: Optional[float] = None) -> R: @PublicAPI -@client_mode_hook(auto_init=True) +@client_mode_hook def get( object_refs: Union[ray.ObjectRef, Sequence[ray.ObjectRef]], *, @@ -2556,7 +2556,7 @@ def get( @PublicAPI -@client_mode_hook(auto_init=True) +@client_mode_hook def put( value: Any, *, _owner: Optional["ray.actor.ActorHandle"] = None ) -> "ray.ObjectRef": @@ -2618,7 +2618,7 @@ def put( @PublicAPI -@client_mode_hook(auto_init=True) +@client_mode_hook def wait( object_refs: List["ray.ObjectRef"], *, @@ -2740,7 +2740,7 @@ def wait( @PublicAPI -@client_mode_hook(auto_init=True) +@client_mode_hook def get_actor(name: str, namespace: Optional[str] = None) -> "ray.actor.ActorHandle": """Get a handle to a named actor. @@ -2775,7 +2775,7 @@ def get_actor(name: str, namespace: Optional[str] = None) -> "ray.actor.ActorHan @PublicAPI -@client_mode_hook(auto_init=True) +@client_mode_hook def kill(actor: "ray.actor.ActorHandle", *, no_restart: bool = True): """Kill an actor forcefully. @@ -2805,7 +2805,7 @@ def kill(actor: "ray.actor.ActorHandle", *, no_restart: bool = True): @PublicAPI -@client_mode_hook(auto_init=True) +@client_mode_hook def cancel(object_ref: "ray.ObjectRef", *, force: bool = False, recursive: bool = True): """Cancels a task according to the following conditions. diff --git a/python/ray/actor.py b/python/ray/actor.py index 6bf93281fc83..de0d134d6210 100644 --- a/python/ray/actor.py +++ b/python/ray/actor.py @@ -9,6 +9,7 @@ import ray._raylet from ray import ActorClassID, Language, cross_language from ray._private import ray_option_utils +from ray._private.auto_init_hook import auto_init_ray from ray._private.client_mode_hook import ( client_mode_convert_actor, client_mode_hook, @@ -42,7 +43,7 @@ @PublicAPI -@client_mode_hook(auto_init=False) +@client_mode_hook def method(*args, **kwargs): """Annotate an actor method. @@ -763,7 +764,8 @@ def _remote(self, args=None, kwargs=None, **actor_options): if actor_options.get("max_concurrency") is None: actor_options["max_concurrency"] = 1000 if is_asyncio else 1 - if client_mode_should_convert(auto_init=True): + auto_init_ray() + if client_mode_should_convert(): return client_mode_convert_actor(self, args, kwargs, **actor_options) # fill actor required options diff --git a/python/ray/data/read_api.py b/python/ray/data/read_api.py index af3d096e52c7..5170a3dad2f7 100644 --- a/python/ray/data/read_api.py +++ b/python/ray/data/read_api.py @@ -83,6 +83,7 @@ from ray.util.annotations import Deprecated, DeveloperAPI, PublicAPI from ray.util.placement_group import PlacementGroup from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy +from ray._private.auto_init_hook import wrap_auto_init if TYPE_CHECKING: import dask @@ -303,6 +304,7 @@ def range_tensor(n: int, *, shape: Tuple = (1,), parallelism: int = -1) -> Datas @PublicAPI +@wrap_auto_init def read_datasource( datasource: Datasource, *, diff --git a/python/ray/experimental/internal_kv.py b/python/ray/experimental/internal_kv.py index 5cb1ade991ec..862ff3bacc89 100644 --- a/python/ray/experimental/internal_kv.py +++ b/python/ray/experimental/internal_kv.py @@ -25,12 +25,12 @@ def _initialize_internal_kv(gcs_client: GcsClient): _initialized = True -@client_mode_hook(auto_init=False) +@client_mode_hook def _internal_kv_initialized(): return global_gcs_client is not None -@client_mode_hook(auto_init=False) +@client_mode_hook def _internal_kv_get( key: Union[str, bytes], *, namespace: Optional[Union[str, bytes]] = None ) -> bytes: @@ -44,7 +44,7 @@ def _internal_kv_get( return global_gcs_client.internal_kv_get(key, namespace) -@client_mode_hook(auto_init=False) +@client_mode_hook def _internal_kv_exists( key: Union[str, bytes], *, namespace: Optional[Union[str, bytes]] = None ) -> bool: @@ -58,13 +58,13 @@ def _internal_kv_exists( return global_gcs_client.internal_kv_exists(key, namespace) -@client_mode_hook(auto_init=False) +@client_mode_hook def _pin_runtime_env_uri(uri: str, *, expiration_s: int) -> None: """Pin a runtime_env URI for expiration_s.""" return global_gcs_client.pin_runtime_env_uri(uri, expiration_s) -@client_mode_hook(auto_init=False) +@client_mode_hook def _internal_kv_put( key: Union[str, bytes], value: Union[str, bytes], @@ -94,7 +94,7 @@ def _internal_kv_put( return global_gcs_client.internal_kv_put(key, value, overwrite, namespace) == 0 -@client_mode_hook(auto_init=False) +@client_mode_hook def _internal_kv_del( key: Union[str, bytes], *, @@ -109,7 +109,7 @@ def _internal_kv_del( return global_gcs_client.internal_kv_del(key, del_by_prefix, namespace) -@client_mode_hook(auto_init=False) +@client_mode_hook def _internal_kv_list( prefix: Union[str, bytes], *, namespace: Optional[Union[str, bytes]] = None ) -> List[bytes]: diff --git a/python/ray/remote_function.py b/python/ray/remote_function.py index 04dde9fbd4fc..2d1162b6ce33 100644 --- a/python/ray/remote_function.py +++ b/python/ray/remote_function.py @@ -7,6 +7,7 @@ import ray._private.signature from ray import Language, cross_language from ray._private import ray_option_utils +from ray._private.auto_init_hook import auto_init_ray from ray._private.client_mode_hook import ( client_mode_convert_function, client_mode_should_convert, @@ -242,7 +243,8 @@ def _remote(self, args=None, kwargs=None, **task_options): # We pop the "max_calls" coming from "@ray.remote" here. We no longer need # it in "_remote()". task_options.pop("max_calls", None) - if client_mode_should_convert(auto_init=True): + auto_init_ray() + if client_mode_should_convert(): return client_mode_convert_function(self, args, kwargs, **task_options) worker = ray._private.worker.global_worker diff --git a/python/ray/runtime_context.py b/python/ray/runtime_context.py index 16623af6f0a4..bf4ac88db39a 100644 --- a/python/ray/runtime_context.py +++ b/python/ray/runtime_context.py @@ -359,7 +359,7 @@ def _get_actor_call_stats(self): @PublicAPI -@client_mode_hook(auto_init=False) +@client_mode_hook def get_runtime_context(): """Get the runtime context of the current driver/worker. diff --git a/python/ray/tests/test_client.py b/python/ray/tests/test_client.py index 1589c8d1a44d..4ee54ed1eac6 100644 --- a/python/ray/tests/test_client.py +++ b/python/ray/tests/test_client.py @@ -65,11 +65,11 @@ def test_client_context_manager(call_ray_start_shared, connect_to_client): call_ray_start_shared ), enable_client_mode(): # Client mode is on. - assert client_mode_should_convert(auto_init=True) + assert client_mode_should_convert() # We're connected to Ray client. assert ray.util.client.ray.is_connected() else: - assert not client_mode_should_convert(auto_init=True) + assert not client_mode_should_convert() assert not ray.util.client.ray.is_connected() @@ -108,20 +108,20 @@ def run(self): def test_client_mode_hook_thread_safe(call_ray_start_shared): with ray_start_client_server_for_address(call_ray_start_shared): with enable_client_mode(): - assert client_mode_should_convert(auto_init=True) + assert client_mode_should_convert() lock = threading.Lock() lock.acquire() q = queue.Queue() def disable(): with disable_client_hook(): - q.put(client_mode_should_convert(auto_init=True)) + q.put(client_mode_should_convert()) lock.acquire() - q.put(client_mode_should_convert(auto_init=True)) + q.put(client_mode_should_convert()) t = threading.Thread(target=disable) t.start() - assert client_mode_should_convert(auto_init=True) + assert client_mode_should_convert() lock.release() t.join() assert q.get() is False, "Threaded disable_client_hook failed to disable" diff --git a/python/ray/tests/test_client_library_integration.py b/python/ray/tests/test_client_library_integration.py index 133a84b956de..b0e6fa602b0a 100644 --- a/python/ray/tests/test_client_library_integration.py +++ b/python/ray/tests/test_client_library_integration.py @@ -14,11 +14,11 @@ def test_rllib_integration(ray_start_regular): # Confirming the behavior of this context manager. # (Client mode hook not yet enabled.) - assert not client_mode_should_convert(auto_init=True) + assert not client_mode_should_convert() # Need to enable this for client APIs to be used. with enable_client_mode(): # Confirming mode hook is enabled. - assert client_mode_should_convert(auto_init=True) + assert client_mode_should_convert() config = dqn.SIMPLE_Q_DEFAULT_CONFIG.copy() # Run locally. @@ -38,11 +38,11 @@ def test_rllib_integration_tune(ray_start_regular): with ray_start_client_server(): # Confirming the behavior of this context manager. # (Client mode hook not yet enabled.) - assert not client_mode_should_convert(auto_init=True) + assert not client_mode_should_convert() # Need to enable this for client APIs to be used. with enable_client_mode(): # Confirming mode hook is enabled. - assert client_mode_should_convert(auto_init=True) + assert client_mode_should_convert() tune.run( "DQN", config={"env": "CartPole-v1"}, stop={"training_iteration": 2} ) diff --git a/python/ray/util/__init__.py b/python/ray/util/__init__.py index 15c0101c5e80..69388db4ffe9 100644 --- a/python/ray/util/__init__.py +++ b/python/ray/util/__init__.py @@ -2,6 +2,7 @@ import ray from ray._private.client_mode_hook import client_mode_hook +from ray._private.auto_init_hook import wrap_auto_init from ray._private.services import get_node_ip_address from ray.util import iter from ray.util import rpdb as pdb @@ -21,7 +22,8 @@ @PublicAPI(stability="beta") -@client_mode_hook(auto_init=True) +@wrap_auto_init +@client_mode_hook def list_named_actors(all_namespaces: bool = False) -> List[str]: """List all named actors in the system. diff --git a/python/ray/util/placement_group.py b/python/ray/util/placement_group.py index 62d373ecd136..065db97af66d 100644 --- a/python/ray/util/placement_group.py +++ b/python/ray/util/placement_group.py @@ -2,6 +2,7 @@ from typing import Dict, List, Optional, Union import ray +from ray._private.auto_init_hook import auto_init_ray from ray._private.client_mode_hook import client_mode_should_convert, client_mode_wrap from ray._private.utils import hex_to_binary, get_ray_doc_version from ray._raylet import PlacementGroupID @@ -321,7 +322,8 @@ def get_current_placement_group() -> Optional[PlacementGroup]: None if the current task or actor wasn't created with any placement group. """ - if client_mode_should_convert(auto_init=True): + auto_init_ray() + if client_mode_should_convert(): # Client mode is only a driver. return None worker = ray._private.worker.global_worker @@ -374,7 +376,6 @@ def _valid_resource_shape(resources, bundle_specs): def _validate_resource_shape( placement_group, resources, placement_resources, task_or_actor_repr ): - bundles = placement_group.bundle_specs resources_valid = _valid_resource_shape(resources, bundles) placement_resources_valid = _valid_resource_shape(placement_resources, bundles) diff --git a/python/ray/workflow/api.py b/python/ray/workflow/api.py index 2e3e1d20cdee..6a651dbb0fd2 100644 --- a/python/ray/workflow/api.py +++ b/python/ray/workflow/api.py @@ -107,7 +107,7 @@ def wrapper(*args, **kwargs): # `is_client_mode_enabled_by_default` is used for testing with # `RAY_CLIENT_MODE=1`. This flag means all tests run with client mode. - if client_mode_should_convert(auto_init=False): + if client_mode_should_convert(): f = ray.remote(num_cpus=0)(func) ref = f.remote(*args, **kwargs) return ray.get(ref) diff --git a/python/ray/workflow/tests/utils.py b/python/ray/workflow/tests/utils.py index c558d5f4ae18..b96bc7a68c39 100644 --- a/python/ray/workflow/tests/utils.py +++ b/python/ray/workflow/tests/utils.py @@ -55,5 +55,5 @@ def skip_client_mode_test(): import pytest from ray._private.client_mode_hook import client_mode_should_convert - if client_mode_should_convert(auto_init=False): + if client_mode_should_convert(): pytest.skip("Not for Ray client test") diff --git a/python/ray/workflow/workflow_state_from_dag.py b/python/ray/workflow/workflow_state_from_dag.py index 9fd44a9448e3..b7f39ad6dc9d 100644 --- a/python/ray/workflow/workflow_state_from_dag.py +++ b/python/ray/workflow/workflow_state_from_dag.py @@ -156,7 +156,7 @@ def _node_visitor(node: Any) -> Any: # so it won't be mutated later. This guarantees correct # semantics. See "tests/test_variable_mutable.py" as # an example. - if client_mode_should_convert(auto_init=False): + if client_mode_should_convert(): # Handle client mode. The Ray client would serialize and # then deserialize objects in the Ray client server. When # the object is being deserialized, the serialization context From 51089d23b9a686ebf6fac9b09eba0c464db16a48 Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Wed, 3 May 2023 20:07:59 +0200 Subject: [PATCH 211/424] [docs] pendo integration (#34946) Signed-off-by: Max Pumperla --- doc/source/_templates/layout.html | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/doc/source/_templates/layout.html b/doc/source/_templates/layout.html index 50de03599bde..f9a687d27080 100644 --- a/doc/source/_templates/layout.html +++ b/doc/source/_templates/layout.html @@ -15,5 +15,25 @@ gtag('config', 'UA-110413294-1'); + + + {% endblock %} From ee206e560144f04574e616991ef60780fddf8f7d Mon Sep 17 00:00:00 2001 From: Chen Shen Date: Wed, 3 May 2023 11:17:27 -0700 Subject: [PATCH 212/424] [autoscaler-v2] add core as autoscaler owner (#35012) Why are these changes needed? this will help speed up core-review/merge process. --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 066ff295d991..8bc22a8aadfb 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -23,7 +23,7 @@ /python/ray/autoscaler/_private/monitor.py @wuisawesome @DmitriGekhtman # Autoscaler -/python/ray/autoscaler/ @wuisawesome @DmitriGekhtman @ericl +/python/ray/autoscaler/ @wuisawesome @DmitriGekhtman @ericl @ray-project/ray-core # Metrics /src/ray/stats/metric_defs.h @ray-project/ray-core From 56e61d58be4743eb5cba17bc2f42050880a74851 Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Wed, 3 May 2023 21:22:41 +0200 Subject: [PATCH 213/424] [docs] fix build (#35007) Signed-off-by: Max Pumperla --- doc/requirements-doc.txt | 1 - doc/source/conf.py | 1 - doc/source/ray-overview/use-cases.rst | 2 +- 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/doc/requirements-doc.txt b/doc/requirements-doc.txt index 4ec5ab31d03d..d475b3d9e606 100644 --- a/doc/requirements-doc.txt +++ b/doc/requirements-doc.txt @@ -58,7 +58,6 @@ sphinx-panels==0.6.0 sphinx-version-warning==1.1.2 sphinx-book-theme==0.3.3 sphinx-external-toc==0.2.3 -sphinxcontrib.yt==0.2.2 sphinx-sitemap==2.2.0 sphinxcontrib-redoc==1.6.0 sphinx-tabs==3.4.0 diff --git a/doc/source/conf.py b/doc/source/conf.py index 1dbaeea85714..49624d214126 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -45,7 +45,6 @@ "sphinx-jsonschema", "sphinxemoji.sphinxemoji", "sphinx_copybutton", - "sphinxcontrib.yt", "versionwarning.extension", "sphinx_sitemap", "myst_nb", diff --git a/doc/source/ray-overview/use-cases.rst b/doc/source/ray-overview/use-cases.rst index 51b45393a9b3..95f4a02b1b38 100644 --- a/doc/source/ray-overview/use-cases.rst +++ b/doc/source/ray-overview/use-cases.rst @@ -576,7 +576,7 @@ The following highlights feature companies leveraging Ray's unified API to build :img-top: /images/ray_logo.png :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - .. button-ref:: https://www.youtube.com/watch?v=_L0lsShbKaY + .. button-link:: https://www.youtube.com/watch?v=_L0lsShbKaY [Talk] Ray Summit Panel - ML Platform on Ray From b410a4e4b19f28eb60515051261b0a558701462c Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Wed, 3 May 2023 21:23:40 +0200 Subject: [PATCH 214/424] [docs] fix book link check (#35009) Signed-off-by: Max Pumperla --- doc/source/data/mars-on-ray.rst | 4 +- doc/source/tune/examples/tune-xgboost.ipynb | 2506 +++++++++---------- 2 files changed, 1255 insertions(+), 1255 deletions(-) diff --git a/doc/source/data/mars-on-ray.rst b/doc/source/data/mars-on-ray.rst index 423f42580c6e..bac664086fc8 100644 --- a/doc/source/data/mars-on-ray.rst +++ b/doc/source/data/mars-on-ray.rst @@ -13,7 +13,7 @@ all mars scheduler optimizations. If ray tasks mode is used, all tasks will be s pipeline capabilities provided by ray futures. -.. _`Mars`: https://docs.pymars.org +.. _`Mars`: https://mars-project.readthedocs.io/en/latest/ Installation @@ -75,4 +75,4 @@ Interact with Datastream: df2 = ds.to_mars() print(df2.head(5).execute()) -Refer to _`Mars on Ray`: https://docs.pymars.org/en/latest/installation/ray.html for more information. +Refer to _`Mars on Ray`: https://mars-project.readthedocs.io/en/latest/installation/ray.html#mars-ray for more information. diff --git a/doc/source/tune/examples/tune-xgboost.ipynb b/doc/source/tune/examples/tune-xgboost.ipynb index cde9b8ce92e0..08f98ef1933e 100644 --- a/doc/source/tune/examples/tune-xgboost.ipynb +++ b/doc/source/tune/examples/tune-xgboost.ipynb @@ -1,1256 +1,1256 @@ { - "cells": [ - { - "cell_type": "markdown", - "id": "edce67b9", - "metadata": {}, - "source": [ - "# Tuning XGBoost hyperparameters with Ray Tune\n", - "\n", - "(tune-xgboost-ref)=\n", - "\n", - "XGBoost is currently one of the most popular machine learning algorithms. It performs\n", - "very well on a large selection of tasks, and was the key to success in many Kaggle\n", - "competitions.\n", - "\n", - "```{image} /images/xgboost_logo.png\n", - ":align: center\n", - ":alt: XGBoost\n", - ":target: https://xgboost.readthedocs.io/en/latest/\n", - ":width: 200px\n", - "```\n", - "\n", - "This tutorial will give you a quick introduction to XGBoost, show you how\n", - "to train an XGBoost model, and then guide you on how to optimize XGBoost\n", - "parameters using Tune to get the best performance. We tackle the following topics:\n", - "\n", - "```{contents}\n", - ":depth: 2\n", - "```\n", - "\n", - ":::{note}\n", - "To run this tutorial, you will need to install the following:\n", - "\n", - "```bash\n", - "$ pip install xgboost\n", - "```\n", - ":::\n", - "\n", - "## What is XGBoost\n", - "\n", - "XGBoost is an acronym for e**X**treme **G**radient **Boost**ing. Internally,\n", - "XGBoost uses [decision trees](https://en.wikipedia.org/wiki/Decision_tree). Instead\n", - "of training just one large decision tree, XGBoost and other related algorithms train\n", - "many small decision trees. The intuition behind this is that even though single\n", - "decision trees can be inaccurate and suffer from high variance,\n", - "combining the output of a large number of these weak learners can actually lead to\n", - "strong learner, resulting in better predictions and less variance.\n", - "\n", - ":::{figure} /images/tune-xgboost-ensemble.svg\n", - ":alt: Single vs. ensemble learning\n", - "\n", - "A single decision tree (left) might be able to get to an accuracy of 70%\n", - "for a binary classification task. By combining the output of several small\n", - "decision trees, an ensemble learner (right) might end up with a higher accuracy\n", - "of 90%.\n", - ":::\n", - "\n", - "Boosting algorithms start with a single small decision tree and evaluate how well\n", - "it predicts the given examples. When building the next tree, those samples that have\n", - "been misclassified before have a higher chance of being used to generate the tree.\n", - "This is useful because it avoids overfitting to samples that can be easily classified\n", - "and instead tries to come up with models that are able to classify hard examples, too.\n", - "Please see [here for a more thorough introduction to bagging and boosting algorithms](https://towardsdatascience.com/ensemble-methods-bagging-boosting-and-stacking-c9214a10a205).\n", - "\n", - "There are many boosting algorithms. In their core, they are all very similar. XGBoost\n", - "uses second-level derivatives to find splits that maximize the *gain* (the inverse of\n", - "the *loss*) - hence the name. In practice, there really is no drawback in using\n", - "XGBoost over other boosting algorithms - in fact, it usually shows the best performance.\n", - "\n", - "## Training a simple XGBoost classifier\n", - "\n", - "Let's first see how a simple XGBoost classifier can be trained. We'll use the\n", - "`breast_cancer`-Dataset included in the `sklearn` dataset collection. This is\n", - "a binary classification dataset. Given 30 different input features, our task is to\n", - "learn to identify subjects with breast cancer and those without.\n", - "\n", - "Here is the full code to train a simple XGBoost model:" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "id": "77b3c71c", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Accuracy: 0.9650\n" - ] - } - ], - "source": [ - "import sklearn.datasets\n", - "import sklearn.metrics\n", - "from sklearn.model_selection import train_test_split\n", - "import xgboost as xgb\n", - "\n", - "\n", - "def train_breast_cancer(config):\n", - " # Load dataset\n", - " data, labels = sklearn.datasets.load_breast_cancer(return_X_y=True)\n", - " # Split into train and test set\n", - " train_x, test_x, train_y, test_y = train_test_split(data, labels, test_size=0.25)\n", - " # Build input matrices for XGBoost\n", - " train_set = xgb.DMatrix(train_x, label=train_y)\n", - " test_set = xgb.DMatrix(test_x, label=test_y)\n", - " # Train the classifier\n", - " results = {}\n", - " bst = xgb.train(\n", - " config,\n", - " train_set,\n", - " evals=[(test_set, \"eval\")],\n", - " evals_result=results,\n", - " verbose_eval=False,\n", - " )\n", - " return results\n", - "\n", - "\n", - "if __name__ == \"__main__\":\n", - " results = train_breast_cancer(\n", - " {\"objective\": \"binary:logistic\", \"eval_metric\": [\"logloss\", \"error\"]}\n", - " )\n", - " accuracy = 1.0 - results[\"eval\"][\"error\"][-1]\n", - " print(f\"Accuracy: {accuracy:.4f}\")\n" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "id": "ec2a13f8", - "metadata": {}, - "source": [ - "As you can see, the code is quite simple. First, the dataset is loaded and split\n", - "into a `test` and `train` set. The XGBoost model is trained with `xgb.train()`.\n", - "XGBoost automatically evaluates metrics we specified on the test set. In our case\n", - "it calculates the *logloss* and the prediction *error*, which is the percentage of\n", - "misclassified examples. To calculate the accuracy, we just have to subtract the error\n", - "from `1.0`. Even in this simple example, most runs result\n", - "in a good accuracy of over `0.90`.\n", - "\n", - "Maybe you have noticed the `config` parameter we pass to the XGBoost algorithm. This\n", - "is a {class}`dict` in which you can specify parameters for the XGBoost algorithm. In this\n", - "simple example, the only parameters we passed are the `objective` and `eval_metric` parameters.\n", - "The value `binary:logistic` tells XGBoost that we aim to train a logistic regression model for\n", - "a binary classification task. You can find an overview over all valid objectives\n", - "[here in the XGBoost documentation](https://xgboost.readthedocs.io/en/latest/parameter.html#learning-task-parameters).\n", - "\n", - "## XGBoost Hyperparameters\n", - "\n", - "Even with the default settings, XGBoost was able to get to a good accuracy on the\n", - "breast cancer dataset. However, as in many machine learning algorithms, there are\n", - "many knobs to tune which might lead to even better performance. Let's explore some of\n", - "them below.\n", - "\n", - "### Maximum tree depth\n", - "\n", - "Remember that XGBoost internally uses many decision tree models to come up with\n", - "predictions. When training a decision tree, we need to tell the algorithm how\n", - "large the tree may get. The parameter for this is called the tree *depth*.\n", - "\n", - ":::{figure} /images/tune-xgboost-depth.svg\n", - ":align: center\n", - ":alt: Decision tree depth\n", - "\n", - "In this image, the left tree has a depth of 2, and the right tree a depth of 3.\n", - "Note that with each level, $2^{(d-1)}$ splits are added, where *d* is the depth\n", - "of the tree.\n", - ":::\n", - "\n", - "Tree depth is a property that concerns the model complexity. If you only allow short\n", - "trees, the models are likely not very precise - they underfit the data. If you allow\n", - "very large trees, the single models are likely to overfit to the data. In practice,\n", - "a number between `2` and `6` is often a good starting point for this parameter.\n", - "\n", - "XGBoost's default value is `3`.\n", - "\n", - "### Minimum child weight\n", - "\n", - "When a decision tree creates new leaves, it splits up the remaining data at one node\n", - "into two groups. If there are only few samples in one of these groups, it often\n", - "doesn't make sense to split it further. One of the reasons for this is that the\n", - "model is harder to train when we have fewer samples.\n", - "\n", - ":::{figure} /images/tune-xgboost-weight.svg\n", - ":align: center\n", - ":alt: Minimum child weight\n", - "\n", - "In this example, we start with 100 examples. At the first node, they are split\n", - "into 4 and 96 samples, respectively. In the next step, our model might find\n", - "that it doesn't make sense to split the 4 examples more. It thus only continues\n", - "to add leaves on the right side.\n", - ":::\n", - "\n", - "The parameter used by the model to decide if it makes sense to split a node is called\n", - "the *minimum child weight*. In the case of linear regression, this is just the absolute\n", - "number of nodes requried in each child. In other objectives, this value is determined\n", - "using the weights of the examples, hence the name.\n", - "\n", - "The larger the value, the more constrained the trees are and the less deep they will be.\n", - "This parameter thus also affects the model complexity. Values can range between 0\n", - "and infinity and are dependent on the sample size. For our ca. 500 examples in the\n", - "breast cancer dataset, values between `0` and `10` should be sensible.\n", - "\n", - "XGBoost's default value is `1`.\n", - "\n", - "### Subsample size\n", - "\n", - "Each decision tree we add is trained on a subsample of the total training dataset.\n", - "The probabilities for the samples are weighted according to the XGBoost algorithm,\n", - "but we can decide on which fraction of the samples we want to train each decision\n", - "tree on.\n", - "\n", - "Setting this value to `0.7` would mean that we randomly sample `70%` of the\n", - "training dataset before each training iteration.\n", - "\n", - "XGBoost's default value is `1`.\n", - "\n", - "### Learning rate / Eta\n", - "\n", - "Remember that XGBoost sequentially trains many decision trees, and that later trees\n", - "are more likely trained on data that has been misclassified by prior trees. In effect\n", - "this means that earlier trees make decisions for easy samples (i.e. those samples that\n", - "can easily be classified) and later trees make decisions for harder samples. It is then\n", - "sensible to assume that the later trees are less accurate than earlier trees.\n", - "\n", - "To address this fact, XGBoost uses a parameter called *Eta*, which is sometimes called\n", - "the *learning rate*. Don't confuse this with learning rates from gradient descent!\n", - "The original [paper on stochastic gradient boosting](https://jerryfriedman.su.domains/ftp/stobst.pdf)\n", - "introduces this parameter like so:\n", - "\n", - "$$\n", - "F_m(x) = F_{m-1}(x) + \\eta \\cdot \\gamma_{lm} \\textbf{1}(x \\in R_{lm})\n", - "$$\n", - "\n", - "This is just a complicated way to say that when we train we new decision tree,\n", - "represented by $\\gamma_{lm} \\textbf{1}(x \\in R_{lm})$, we want to dampen\n", - "its effect on the previous prediction $F_{m-1}(x)$ with a factor\n", - "$\\eta$.\n", - "\n", - "Typical values for this parameter are between `0.01` and `` 0.3` ``.\n", - "\n", - "XGBoost's default value is `0.3`.\n", - "\n", - "### Number of boost rounds\n", - "\n", - "Lastly, we can decide on how many boosting rounds we perform, which means how\n", - "many decision trees we ultimately train. When we do heavy subsampling or use small\n", - "learning rate, it might make sense to increase the number of boosting rounds.\n", - "\n", - "XGBoost's default value is `10`.\n", - "\n", - "### Putting it together\n", - "\n", - "Let's see how this looks like in code! We just need to adjust our `config` dict:" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "35073e88", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Accuracy: 0.9790\n" - ] - } - ], - "source": [ - "if __name__ == \"__main__\":\n", - " config = {\n", - " \"objective\": \"binary:logistic\",\n", - " \"eval_metric\": [\"logloss\", \"error\"],\n", - " \"max_depth\": 2,\n", - " \"min_child_weight\": 0,\n", - " \"subsample\": 0.8,\n", - " \"eta\": 0.2,\n", - " }\n", - " results = train_breast_cancer(config)\n", - " accuracy = 1.0 - results[\"eval\"][\"error\"][-1]\n", - " print(f\"Accuracy: {accuracy:.4f}\")\n" - ] - }, - { - "cell_type": "markdown", - "id": "69cf0c13", - "metadata": {}, - "source": [ - "The rest stays the same. Please note that we do not adjust the `num_boost_rounds` here.\n", - "The result should also show a high accuracy of over 90%.\n", - "\n", - "## Tuning the configuration parameters\n", - "\n", - "XGBoosts default parameters already lead to a good accuracy, and even our guesses in the\n", - "last section should result in accuracies well above 90%. However, our guesses were\n", - "just that: guesses. Often we do not know what combination of parameters would actually\n", - "lead to the best results on a machine learning task.\n", - "\n", - "Unfortunately, there are infinitely many combinations of hyperparameters we could try\n", - "out. Should we combine `max_depth=3` with `subsample=0.8` or with `subsample=0.9`?\n", - "What about the other parameters?\n", - "\n", - "This is where hyperparameter tuning comes into play. By using tuning libraries such as\n", - "Ray Tune we can try out combinations of hyperparameters. Using sophisticated search\n", - "strategies, these parameters can be selected so that they are likely to lead to good\n", - "results (avoiding an expensive *exhaustive search*). Also, trials that do not perform\n", - "well can be preemptively stopped to reduce waste of computing resources. Lastly, Ray Tune\n", - "also takes care of training these runs in parallel, greatly increasing search speed.\n", - "\n", - "Let's start with a basic example on how to use Tune for this. We just need to make\n", - "a few changes to our code-block:" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "ff856a82", - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2022-07-22 15:52:52,004\tINFO services.py:1483 -- View the Ray dashboard at \u001b[1m\u001b[32mhttp://127.0.0.1:8268\u001b[39m\u001b[22m\n", - "2022-07-22 15:52:55,858\tWARNING function_trainable.py:619 -- Function checkpointing is disabled. This may result in unexpected behavior when using checkpointing features or certain schedulers. To enable, set the train function arguments to be `func(config, checkpoint_dir=None)`.\n" - ] - }, - { - "data": { - "text/html": [ - "== Status ==
    Current time: 2022-07-22 15:53:04 (running for 00:00:07.77)
    Memory usage on this node: 10.5/16.0 GiB
    Using FIFO scheduling algorithm.
    Resources requested: 0/16 CPUs, 0/0 GPUs, 0.0/4.57 GiB heap, 0.0/2.0 GiB objects
    Result logdir: /Users/kai/ray_results/train_breast_cancer_2022-07-22_15-52-48
    Number of trials: 10/10 (10 TERMINATED)
    \n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "
    Trial name status loc eta max_depth min_child_weight subsample acc iter total time (s)
    train_breast_cancer_f8669_00000TERMINATED127.0.0.1:488520.0069356 5 3 0.8235040.944056 1 0.0316169
    train_breast_cancer_f8669_00001TERMINATED127.0.0.1:488570.00145619 6 3 0.8329470.958042 1 0.0328588
    train_breast_cancer_f8669_00002TERMINATED127.0.0.1:488580.00108208 7 3 0.9873190.944056 1 0.0319381
    train_breast_cancer_f8669_00003TERMINATED127.0.0.1:488590.00530429 8 2 0.6156910.923077 1 0.028388
    train_breast_cancer_f8669_00004TERMINATED127.0.0.1:488600.000721843 8 1 0.6509730.958042 1 0.0299618
    train_breast_cancer_f8669_00005TERMINATED127.0.0.1:488610.0074509 1 1 0.7383410.874126 1 0.0193682
    train_breast_cancer_f8669_00006TERMINATED127.0.0.1:488620.0879882 8 2 0.6715760.944056 1 0.0267372
    train_breast_cancer_f8669_00007TERMINATED127.0.0.1:488630.0765404 7 2 0.7081570.965035 1 0.0276129
    train_breast_cancer_f8669_00008TERMINATED127.0.0.1:488640.000627649 6 1 0.81121 0.951049 1 0.0310998
    train_breast_cancer_f8669_00009TERMINATED127.0.0.1:488650.000383711 2 3 0.9905790.93007 1 0.0274954


    " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2022-07-22 15:52:57,385\tINFO plugin_schema_manager.py:52 -- Loading the default runtime env schemas: ['/Users/kai/coding/ray/python/ray/_private/runtime_env/../../runtime_env/schemas/working_dir_schema.json', '/Users/kai/coding/ray/python/ray/_private/runtime_env/../../runtime_env/schemas/pip_schema.json'].\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Result for train_breast_cancer_f8669_00000:\n", - " date: 2022-07-22_15-53-00\n", - " done: true\n", - " experiment_id: 07d10c5f31e74133b53272b7ccf9c528\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 1\n", - " mean_accuracy: 0.9440559440559441\n", - " node_ip: 127.0.0.1\n", - " pid: 48852\n", - " time_since_restore: 0.031616926193237305\n", - " time_this_iter_s: 0.031616926193237305\n", - " time_total_s: 0.031616926193237305\n", - " timestamp: 1658501580\n", - " timesteps_since_restore: 0\n", - " training_iteration: 1\n", - " trial_id: f8669_00000\n", - " warmup_time: 0.0027849674224853516\n", - " \n", - "Result for train_breast_cancer_f8669_00009:\n", - " date: 2022-07-22_15-53-04\n", - " done: true\n", - " experiment_id: bc0d5dd2d079432b859faac8a18928f0\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 1\n", - " mean_accuracy: 0.9300699300699301\n", - " node_ip: 127.0.0.1\n", - " pid: 48865\n", - " time_since_restore: 0.027495384216308594\n", - " time_this_iter_s: 0.027495384216308594\n", - " time_total_s: 0.027495384216308594\n", - " timestamp: 1658501584\n", - " timesteps_since_restore: 0\n", - " training_iteration: 1\n", - " trial_id: f8669_00009\n", - " warmup_time: 0.005235910415649414\n", - " \n", - "Result for train_breast_cancer_f8669_00001:\n", - " date: 2022-07-22_15-53-04\n", - " done: true\n", - " experiment_id: 4b10d350d4374a0d9e7d0c3b1d4e3203\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 1\n", - " mean_accuracy: 0.958041958041958\n", - " node_ip: 127.0.0.1\n", - " pid: 48857\n", - " time_since_restore: 0.032858848571777344\n", - " time_this_iter_s: 0.032858848571777344\n", - " time_total_s: 0.032858848571777344\n", - " timestamp: 1658501584\n", - " timesteps_since_restore: 0\n", - " training_iteration: 1\n", - " trial_id: f8669_00001\n", - " warmup_time: 0.004731178283691406\n", - " \n", - "Result for train_breast_cancer_f8669_00008:\n", - " date: 2022-07-22_15-53-04\n", - " done: true\n", - " experiment_id: 91c25cbbeb6f409d93e1d6537cb8e1ee\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 1\n", - " mean_accuracy: 0.951048951048951\n", - " node_ip: 127.0.0.1\n", - " pid: 48864\n", - " time_since_restore: 0.031099796295166016\n", - " time_this_iter_s: 0.031099796295166016\n", - " time_total_s: 0.031099796295166016\n", - " timestamp: 1658501584\n", - " timesteps_since_restore: 0\n", - " training_iteration: 1\n", - " trial_id: f8669_00008\n", - " warmup_time: 0.003270864486694336\n", - " \n", - "Result for train_breast_cancer_f8669_00005:\n", - " date: 2022-07-22_15-53-04\n", - " done: true\n", - " experiment_id: d225b0fb59e14da7adba952456ccf1d5\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 1\n", - " mean_accuracy: 0.8741258741258742\n", - " node_ip: 127.0.0.1\n", - " pid: 48861\n", - " time_since_restore: 0.01936817169189453\n", - " time_this_iter_s: 0.01936817169189453\n", - " time_total_s: 0.01936817169189453\n", - " timestamp: 1658501584\n", - " timesteps_since_restore: 0\n", - " training_iteration: 1\n", - " trial_id: f8669_00005\n", - " warmup_time: 0.003901958465576172\n", - " \n", - "Result for train_breast_cancer_f8669_00004:\n", - " date: 2022-07-22_15-53-04\n", - " done: true\n", - " experiment_id: 322484af6ea5422f8aaf8ff6a91af4f7\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 1\n", - " mean_accuracy: 0.958041958041958\n", - " node_ip: 127.0.0.1\n", - " pid: 48860\n", - " time_since_restore: 0.029961824417114258\n", - " time_this_iter_s: 0.029961824417114258\n", - " time_total_s: 0.029961824417114258\n", - " timestamp: 1658501584\n", - " timesteps_since_restore: 0\n", - " training_iteration: 1\n", - " trial_id: f8669_00004\n", - " warmup_time: 0.003547191619873047\n", - " \n", - "Result for train_breast_cancer_f8669_00002:\n", - " date: 2022-07-22_15-53-04\n", - " done: true\n", - " experiment_id: 3f588954160b42ce8ce200f68127ebcd\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 1\n", - " mean_accuracy: 0.9440559440559441\n", - " node_ip: 127.0.0.1\n", - " pid: 48858\n", - " time_since_restore: 0.03193807601928711\n", - " time_this_iter_s: 0.03193807601928711\n", - " time_total_s: 0.03193807601928711\n", - " timestamp: 1658501584\n", - " timesteps_since_restore: 0\n", - " training_iteration: 1\n", - " trial_id: f8669_00002\n", - " warmup_time: 0.003523111343383789\n", - " \n", - "Result for train_breast_cancer_f8669_00003:\n", - " date: 2022-07-22_15-53-04\n", - " done: true\n", - " experiment_id: a39ea777ce2d4ebca51b3d7a4179dae5\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 1\n", - " mean_accuracy: 0.9230769230769231\n", - " node_ip: 127.0.0.1\n", - " pid: 48859\n", - " time_since_restore: 0.028388023376464844\n", - " time_this_iter_s: 0.028388023376464844\n", - " time_total_s: 0.028388023376464844\n", - " timestamp: 1658501584\n", - " timesteps_since_restore: 0\n", - " training_iteration: 1\n", - " trial_id: f8669_00003\n", - " warmup_time: 0.0035560131072998047\n", - " \n", - "Result for train_breast_cancer_f8669_00006:\n", - " date: 2022-07-22_15-53-04\n", - " done: true\n", - " experiment_id: f97c6b9674854f8d89ec26ba58cc1618\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 1\n", - " mean_accuracy: 0.9440559440559441\n", - " node_ip: 127.0.0.1\n", - " pid: 48862\n", - " time_since_restore: 0.026737213134765625\n", - " time_this_iter_s: 0.026737213134765625\n", - " time_total_s: 0.026737213134765625\n", - " timestamp: 1658501584\n", - " timesteps_since_restore: 0\n", - " training_iteration: 1\n", - " trial_id: f8669_00006\n", - " warmup_time: 0.003425121307373047\n", - " \n", - "Result for train_breast_cancer_f8669_00007:\n", - " date: 2022-07-22_15-53-04\n", - " done: true\n", - " experiment_id: ff172037065a4d55998ed72f51bdc5df\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 1\n", - " mean_accuracy: 0.965034965034965\n", - " node_ip: 127.0.0.1\n", - " pid: 48863\n", - " time_since_restore: 0.027612924575805664\n", - " time_this_iter_s: 0.027612924575805664\n", - " time_total_s: 0.027612924575805664\n", - " timestamp: 1658501584\n", - " timesteps_since_restore: 0\n", - " training_iteration: 1\n", - " trial_id: f8669_00007\n", - " warmup_time: 0.0031311511993408203\n", - " \n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2022-07-22 15:53:04,846\tINFO tune.py:738 -- Total run time: 8.99 seconds (7.74 seconds for the tuning loop).\n" - ] - } - ], - "source": [ - "import sklearn.datasets\n", - "import sklearn.metrics\n", - "\n", - "from ray import air, tune\n", - "from ray.air import session\n", - "\n", - "\n", - "def train_breast_cancer(config):\n", - " # Load dataset\n", - " data, labels = sklearn.datasets.load_breast_cancer(return_X_y=True)\n", - " # Split into train and test set\n", - " train_x, test_x, train_y, test_y = train_test_split(data, labels, test_size=0.25)\n", - " # Build input matrices for XGBoost\n", - " train_set = xgb.DMatrix(train_x, label=train_y)\n", - " test_set = xgb.DMatrix(test_x, label=test_y)\n", - " # Train the classifier\n", - " results = {}\n", - " xgb.train(\n", - " config,\n", - " train_set,\n", - " evals=[(test_set, \"eval\")],\n", - " evals_result=results,\n", - " verbose_eval=False,\n", - " )\n", - " # Return prediction accuracy\n", - " accuracy = 1.0 - results[\"eval\"][\"error\"][-1]\n", - " session.report({\"mean_accuracy\": accuracy, \"done\": True})\n", - "\n", - "\n", - "if __name__ == \"__main__\":\n", - " config = {\n", - " \"objective\": \"binary:logistic\",\n", - " \"eval_metric\": [\"logloss\", \"error\"],\n", - " \"max_depth\": tune.randint(1, 9),\n", - " \"min_child_weight\": tune.choice([1, 2, 3]),\n", - " \"subsample\": tune.uniform(0.5, 1.0),\n", - " \"eta\": tune.loguniform(1e-4, 1e-1),\n", - " }\n", - " tuner = tune.Tuner(\n", - " train_breast_cancer,\n", - " tune_config=tune.TuneConfig(\n", - " num_samples=10,\n", - " ),\n", - " param_space=config,\n", - " )\n", - " results = tuner.fit()\n" - ] - }, - { - "cell_type": "markdown", - "id": "4999e858", - "metadata": {}, - "source": [ - "As you can see, the changes in the actual training function are minimal. Instead of\n", - "returning the accuracy value, we report it back to Tune using `session.report()`.\n", - "Our `config` dictionary only changed slightly. Instead of passing hard-coded\n", - "parameters, we tell Tune to choose values from a range of valid options. There are\n", - "a number of options we have here, all of which are explained in\n", - "{ref}`the Tune docs `.\n", - "\n", - "For a brief explanation, this is what they do:\n", - "\n", - "- `tune.randint(min, max)` chooses a random integer value between *min* and *max*.\n", - " Note that *max* is exclusive, so it will not be sampled.\n", - "- `tune.choice([a, b, c])` chooses one of the items of the list at random. Each item\n", - " has the same chance to be sampled.\n", - "- `tune.uniform(min, max)` samples a floating point number between *min* and *max*.\n", - " Note that *max* is exclusive here, too.\n", - "- `tune.loguniform(min, max, base=10)` samples a floating point number between *min* and *max*,\n", - " but applies a logarithmic transformation to these boundaries first. Thus, this makes\n", - " it easy to sample values from different orders of magnitude.\n", - "\n", - "The `num_samples=10` option we pass to the `TuneConfig()` means that we sample 10 different\n", - "hyperparameter configurations from this search space.\n", - "\n", - "The output of our training run coud look like this:\n", - "\n", - "```{code-block} bash\n", - ":emphasize-lines: 14\n", - "\n", - " Number of trials: 10/10 (10 TERMINATED)\n", - " +---------------------------------+------------+-------+-------------+-------------+--------------------+-------------+----------+--------+------------------+\n", - " | Trial name | status | loc | eta | max_depth | min_child_weight | subsample | acc | iter | total time (s) |\n", - " |---------------------------------+------------+-------+-------------+-------------+--------------------+-------------+----------+--------+------------------|\n", - " | train_breast_cancer_b63aa_00000 | TERMINATED | | 0.000117625 | 2 | 2 | 0.616347 | 0.916084 | 1 | 0.0306492 |\n", - " | train_breast_cancer_b63aa_00001 | TERMINATED | | 0.0382954 | 8 | 2 | 0.581549 | 0.937063 | 1 | 0.0357082 |\n", - " | train_breast_cancer_b63aa_00002 | TERMINATED | | 0.000217926 | 1 | 3 | 0.528428 | 0.874126 | 1 | 0.0264609 |\n", - " | train_breast_cancer_b63aa_00003 | TERMINATED | | 0.000120929 | 8 | 1 | 0.634508 | 0.958042 | 1 | 0.036406 |\n", - " | train_breast_cancer_b63aa_00004 | TERMINATED | | 0.00839715 | 5 | 1 | 0.730624 | 0.958042 | 1 | 0.0389378 |\n", - " | train_breast_cancer_b63aa_00005 | TERMINATED | | 0.000732948 | 8 | 2 | 0.915863 | 0.958042 | 1 | 0.0382841 |\n", - " | train_breast_cancer_b63aa_00006 | TERMINATED | | 0.000856226 | 4 | 1 | 0.645209 | 0.916084 | 1 | 0.0357089 |\n", - " | train_breast_cancer_b63aa_00007 | TERMINATED | | 0.00769908 | 7 | 1 | 0.729443 | 0.909091 | 1 | 0.0390737 |\n", - " | train_breast_cancer_b63aa_00008 | TERMINATED | | 0.00186339 | 5 | 3 | 0.595744 | 0.944056 | 1 | 0.0343912 |\n", - " | train_breast_cancer_b63aa_00009 | TERMINATED | | 0.000950272 | 3 | 2 | 0.835504 | 0.965035 | 1 | 0.0348201 |\n", - " +---------------------------------+------------+-------+-------------+-------------+--------------------+-------------+----------+--------+------------------+\n", - "```\n", - "\n", - "The best configuration we found used `eta=0.000950272`, `max_depth=3`,\n", - "`min_child_weight=2`, `subsample=0.835504` and reached an accuracy of\n", - "`0.965035`.\n", - "\n", - "## Early stopping\n", - "\n", - "Currently, Tune samples 10 different hyperparameter configurations and trains a full\n", - "XGBoost on all of them. In our small example, training is very fast. However,\n", - "if training takes longer, a significant amount of computer resources is spent on trials\n", - "that will eventually show a bad performance, e.g. a low accuracy. It would be good\n", - "if we could identify these trials early and stop them, so we don't waste any resources.\n", - "\n", - "This is where Tune's *Schedulers* shine. A Tune `TrialScheduler` is responsible\n", - "for starting and stopping trials. Tune implements a number of different schedulers, each\n", - "described {ref}`in the Tune documentation `.\n", - "For our example, we will use the `AsyncHyperBandScheduler` or `ASHAScheduler`.\n", - "\n", - "The basic idea of this scheduler: We sample a number of hyperparameter configurations.\n", - "Each of these configurations is trained for a specific number of iterations.\n", - "After these iterations, only the best performing hyperparameters are retained. These\n", - "are selected according to some loss metric, usually an evaluation loss. This cycle is\n", - "repeated until we end up with the best configuration.\n", - "\n", - "The `ASHAScheduler` needs to know three things:\n", - "\n", - "1. Which metric should be used to identify badly performing trials?\n", - "2. Should this metric be maximized or minimized?\n", - "3. How many iterations does each trial train for?\n", - "\n", - "There are more parameters, which are explained in the\n", - "{ref}`documentation `.\n", - "\n", - "Lastly, we have to report the loss metric to Tune. We do this with a `Callback` that\n", - "XGBoost accepts and calls after each evaluation round. Ray Tune comes\n", - "with {ref}`two XGBoost callbacks `\n", - "we can use for this. The `TuneReportCallback` just reports the evaluation\n", - "metrics back to Tune. The `TuneReportCheckpointCallback` also saves\n", - "checkpoints after each evaluation round. We will just use the latter in this\n", - "example so that we can retrieve the saved model later.\n", - "\n", - "These parameters from the `eval_metrics` configuration setting are then automatically\n", - "reported to Tune via the callback. Here, the raw error will be reported, not the accuracy.\n", - "To display the best reached accuracy, we will inverse it later.\n", - "\n", - "We will also load the best checkpointed model so that we can use it for predictions.\n", - "The best model is selected with respect to the `metric` and `mode` parameters we\n", - "pass to the `TunerConfig()`." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "id": "d08b5b0a", - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "== Status ==
    Current time: 2022-07-22 16:56:01 (running for 00:00:10.38)
    Memory usage on this node: 10.3/16.0 GiB
    Using AsyncHyperBand: num_stopped=10\n", - "Bracket: Iter 8.000: -0.5107275277792991 | Iter 4.000: -0.5876629346317344 | Iter 2.000: -0.6544494184997531 | Iter 1.000: -0.6859214191253369
    Resources requested: 0/16 CPUs, 0/0 GPUs, 0.0/4.57 GiB heap, 0.0/2.0 GiB objects
    Current best trial: c28a3_00003 with eval-logloss=0.38665050018083796 and parameters={'objective': 'binary:logistic', 'eval_metric': ['logloss', 'error'], 'max_depth': 2, 'min_child_weight': 3, 'subsample': 0.782626252548841, 'eta': 0.06385952388342125}
    Result logdir: /Users/kai/ray_results/train_breast_cancer_2022-07-22_16-55-50
    Number of trials: 10/10 (10 TERMINATED)
    \n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "
    Trial name status loc eta max_depth min_child_weight subsample iter total time (s) eval-logloss eval-error
    train_breast_cancer_c28a3_00000TERMINATED127.0.0.1:544160.0186954 2 2 0.516916 10 0.22218 0.571496 0.0629371
    train_breast_cancer_c28a3_00001TERMINATED127.0.0.1:544400.0304404 8 2 0.745969 2 0.135674 0.650353 0.0629371
    train_breast_cancer_c28a3_00002TERMINATED127.0.0.1:544410.0217157 8 3 0.764138 2 0.173076 0.658545 0.041958
    train_breast_cancer_c28a3_00003TERMINATED127.0.0.1:544420.0638595 2 3 0.782626 10 0.281865 0.386651 0.041958
    train_breast_cancer_c28a3_00004TERMINATED127.0.0.1:544430.00442794 7 2 0.792359 1 0.0270212 0.689577 0.0699301
    train_breast_cancer_c28a3_00005TERMINATED127.0.0.1:544440.00222624 3 1 0.536331 1 0.0238512 0.691446 0.0839161
    train_breast_cancer_c28a3_00006TERMINATED127.0.0.1:544450.000825129 1 1 0.82472 1 0.015312 0.692624 0.118881
    train_breast_cancer_c28a3_00007TERMINATED127.0.0.1:544460.000770826 7 2 0.947268 1 0.0175898 0.692598 0.132867
    train_breast_cancer_c28a3_00008TERMINATED127.0.0.1:544470.000429759 7 1 0.88524 1 0.0193739 0.692785 0.0559441
    train_breast_cancer_c28a3_00009TERMINATED127.0.0.1:544480.0149863 2 1 0.722738 1 0.0165932 0.682266 0.111888


    " - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Result for train_breast_cancer_c28a3_00000:\n", - " date: 2022-07-22_16-55-55\n", - " done: false\n", - " eval-error: 0.08391608391608392\n", - " eval-logloss: 0.6790360066440556\n", - " experiment_id: 2a3189442db341519836a07fb2d65dd9\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 1\n", - " node_ip: 127.0.0.1\n", - " pid: 54416\n", - " time_since_restore: 0.01624011993408203\n", - " time_this_iter_s: 0.01624011993408203\n", - " time_total_s: 0.01624011993408203\n", - " timestamp: 1658505355\n", - " timesteps_since_restore: 0\n", - " training_iteration: 1\n", - " trial_id: c28a3_00000\n", - " warmup_time: 0.0035409927368164062\n", - " \n", - "Result for train_breast_cancer_c28a3_00000:\n", - " date: 2022-07-22_16-55-56\n", - " done: true\n", - " eval-error: 0.06293706293706294\n", - " eval-logloss: 0.5714958122560194\n", - " experiment_id: 2a3189442db341519836a07fb2d65dd9\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 10\n", - " node_ip: 127.0.0.1\n", - " pid: 54416\n", - " time_since_restore: 0.22218012809753418\n", - " time_this_iter_s: 0.007044076919555664\n", - " time_total_s: 0.22218012809753418\n", - " timestamp: 1658505356\n", - " timesteps_since_restore: 0\n", - " training_iteration: 10\n", - " trial_id: c28a3_00000\n", - " warmup_time: 0.0035409927368164062\n", - " \n", - "Result for train_breast_cancer_c28a3_00003:\n", - " date: 2022-07-22_16-56-01\n", - " done: false\n", - " eval-error: 0.08391608391608392\n", - " eval-logloss: 0.6472820101918041\n", - " experiment_id: 7ff6133237404b4ea4755b9f8cd114f2\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 1\n", - " node_ip: 127.0.0.1\n", - " pid: 54442\n", - " time_since_restore: 0.023206233978271484\n", - " time_this_iter_s: 0.023206233978271484\n", - " time_total_s: 0.023206233978271484\n", - " timestamp: 1658505361\n", - " timesteps_since_restore: 0\n", - " training_iteration: 1\n", - " trial_id: c28a3_00003\n", - " warmup_time: 0.006722211837768555\n", - " \n", - "Result for train_breast_cancer_c28a3_00005:\n", - " date: 2022-07-22_16-56-01\n", - " done: true\n", - " eval-error: 0.08391608391608392\n", - " eval-logloss: 0.6914464114429234\n", - " experiment_id: 344762ab6d574b63a9374e19526d0510\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 1\n", - " node_ip: 127.0.0.1\n", - " pid: 54444\n", - " time_since_restore: 0.02385115623474121\n", - " time_this_iter_s: 0.02385115623474121\n", - " time_total_s: 0.02385115623474121\n", - " timestamp: 1658505361\n", - " timesteps_since_restore: 0\n", - " training_iteration: 1\n", - " trial_id: c28a3_00005\n", - " warmup_time: 0.008936882019042969\n", - " \n", - "Result for train_breast_cancer_c28a3_00009:\n", - " date: 2022-07-22_16-56-01\n", - " done: true\n", - " eval-error: 0.11188811188811189\n", - " eval-logloss: 0.6822656309688008\n", - " experiment_id: 133901655fa64bf79f2dcc4e8e5e41b1\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 1\n", - " node_ip: 127.0.0.1\n", - " pid: 54448\n", - " time_since_restore: 0.016593217849731445\n", - " time_this_iter_s: 0.016593217849731445\n", - " time_total_s: 0.016593217849731445\n", - " timestamp: 1658505361\n", - " timesteps_since_restore: 0\n", - " training_iteration: 1\n", - " trial_id: c28a3_00009\n", - " warmup_time: 0.004940032958984375\n", - " \n", - "Result for train_breast_cancer_c28a3_00007:\n", - " date: 2022-07-22_16-56-01\n", - " done: true\n", - " eval-error: 0.13286713286713286\n", - " eval-logloss: 0.6925980357023386\n", - " experiment_id: b4331027cbaf442ab905b2e51797dbbd\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 1\n", - " node_ip: 127.0.0.1\n", - " pid: 54446\n", - " time_since_restore: 0.017589807510375977\n", - " time_this_iter_s: 0.017589807510375977\n", - " time_total_s: 0.017589807510375977\n", - " timestamp: 1658505361\n", - " timesteps_since_restore: 0\n", - " training_iteration: 1\n", - " trial_id: c28a3_00007\n", - " warmup_time: 0.003782033920288086\n", - " \n", - "Result for train_breast_cancer_c28a3_00006:\n", - " date: 2022-07-22_16-56-01\n", - " done: true\n", - " eval-error: 0.11888111888111888\n", - " eval-logloss: 0.6926244418104212\n", - " experiment_id: d3906de5943a4e05a4cc782382f67d24\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 1\n", - " node_ip: 127.0.0.1\n", - " pid: 54445\n", - " time_since_restore: 0.015311956405639648\n", - " time_this_iter_s: 0.015311956405639648\n", - " time_total_s: 0.015311956405639648\n", - " timestamp: 1658505361\n", - " timesteps_since_restore: 0\n", - " training_iteration: 1\n", - " trial_id: c28a3_00006\n", - " warmup_time: 0.005506038665771484\n", - " \n", - "Result for train_breast_cancer_c28a3_00002:\n", - " date: 2022-07-22_16-56-01\n", - " done: false\n", - " eval-error: 0.04895104895104895\n", - " eval-logloss: 0.6752762102580571\n", - " experiment_id: a3645fc2d43145d88a1f5b7cc94df703\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 1\n", - " node_ip: 127.0.0.1\n", - " pid: 54441\n", - " time_since_restore: 0.027367830276489258\n", - " time_this_iter_s: 0.027367830276489258\n", - " time_total_s: 0.027367830276489258\n", - " timestamp: 1658505361\n", - " timesteps_since_restore: 0\n", - " training_iteration: 1\n", - " trial_id: c28a3_00002\n", - " warmup_time: 0.0062830448150634766\n", - " \n", - "Result for train_breast_cancer_c28a3_00001:\n", - " date: 2022-07-22_16-56-01\n", - " done: false\n", - " eval-error: 0.07692307692307693\n", - " eval-logloss: 0.6698804135089154\n", - " experiment_id: 85766fe4d9fa482a91e396a8fd509a19\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 1\n", - " node_ip: 127.0.0.1\n", - " pid: 54440\n", - " time_since_restore: 0.017169952392578125\n", - " time_this_iter_s: 0.017169952392578125\n", - " time_total_s: 0.017169952392578125\n", - " timestamp: 1658505361\n", - " timesteps_since_restore: 0\n", - " training_iteration: 1\n", - " trial_id: c28a3_00001\n", - " warmup_time: 0.006204843521118164\n", - " \n", - "Result for train_breast_cancer_c28a3_00008:\n", - " date: 2022-07-22_16-56-01\n", - " done: true\n", - " eval-error: 0.05594405594405594\n", - " eval-logloss: 0.692784742458717\n", - " experiment_id: 2c7d8bc38ad04536b1dec76819a2b3bf\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 1\n", - " node_ip: 127.0.0.1\n", - " pid: 54447\n", - " time_since_restore: 0.01937389373779297\n", - " time_this_iter_s: 0.01937389373779297\n", - " time_total_s: 0.01937389373779297\n", - " timestamp: 1658505361\n", - " timesteps_since_restore: 0\n", - " training_iteration: 1\n", - " trial_id: c28a3_00008\n", - " warmup_time: 0.004342079162597656\n", - " \n", - "Result for train_breast_cancer_c28a3_00001:\n", - " date: 2022-07-22_16-56-01\n", - " done: true\n", - " eval-error: 0.06293706293706294\n", - " eval-logloss: 0.6503534216980834\n", - " experiment_id: 85766fe4d9fa482a91e396a8fd509a19\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 2\n", - " node_ip: 127.0.0.1\n", - " pid: 54440\n", - " time_since_restore: 0.13567376136779785\n", - " time_this_iter_s: 0.11850380897521973\n", - " time_total_s: 0.13567376136779785\n", - " timestamp: 1658505361\n", - " timesteps_since_restore: 0\n", - " training_iteration: 2\n", - " trial_id: c28a3_00001\n", - " warmup_time: 0.006204843521118164\n", - " \n", - "Result for train_breast_cancer_c28a3_00004:\n", - " date: 2022-07-22_16-56-01\n", - " done: true\n", - " eval-error: 0.06993006993006994\n", - " eval-logloss: 0.689577207281873\n", - " experiment_id: ef4fdc645c444112985b4957ab8a84e9\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 1\n", - " node_ip: 127.0.0.1\n", - " pid: 54443\n", - " time_since_restore: 0.027021169662475586\n", - " time_this_iter_s: 0.027021169662475586\n", - " time_total_s: 0.027021169662475586\n", - " timestamp: 1658505361\n", - " timesteps_since_restore: 0\n", - " training_iteration: 1\n", - " trial_id: c28a3_00004\n", - " warmup_time: 0.0063669681549072266\n", - " \n", - "Result for train_breast_cancer_c28a3_00002:\n", - " date: 2022-07-22_16-56-01\n", - " done: true\n", - " eval-error: 0.04195804195804196\n", - " eval-logloss: 0.658545415301423\n", - " experiment_id: a3645fc2d43145d88a1f5b7cc94df703\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 2\n", - " node_ip: 127.0.0.1\n", - " pid: 54441\n", - " time_since_restore: 0.17307591438293457\n", - " time_this_iter_s: 0.1457080841064453\n", - " time_total_s: 0.17307591438293457\n", - " timestamp: 1658505361\n", - " timesteps_since_restore: 0\n", - " training_iteration: 2\n", - " trial_id: c28a3_00002\n", - " warmup_time: 0.0062830448150634766\n", - " \n", - "Result for train_breast_cancer_c28a3_00003:\n", - " date: 2022-07-22_16-56-01\n", - " done: true\n", - " eval-error: 0.04195804195804196\n", - " eval-logloss: 0.38665050018083796\n", - " experiment_id: 7ff6133237404b4ea4755b9f8cd114f2\n", - " hostname: Kais-MacBook-Pro.local\n", - " iterations_since_restore: 10\n", - " node_ip: 127.0.0.1\n", - " pid: 54442\n", - " time_since_restore: 0.28186488151550293\n", - " time_this_iter_s: 0.03063178062438965\n", - " time_total_s: 0.28186488151550293\n", - " timestamp: 1658505361\n", - " timesteps_since_restore: 0\n", - " training_iteration: 10\n", - " trial_id: c28a3_00003\n", - " warmup_time: 0.006722211837768555\n", - " \n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "2022-07-22 16:56:01,498\tINFO tune.py:738 -- Total run time: 10.53 seconds (10.37 seconds for the tuning loop).\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Best model parameters: {'objective': 'binary:logistic', 'eval_metric': ['logloss', 'error'], 'max_depth': 2, 'min_child_weight': 3, 'subsample': 0.782626252548841, 'eta': 0.06385952388342125}\n", - "Best model total accuracy: 0.9580\n" - ] - } - ], - "source": [ - "import sklearn.datasets\n", - "import sklearn.metrics\n", - "import os\n", - "from ray.tune.schedulers import ASHAScheduler\n", - "from sklearn.model_selection import train_test_split\n", - "import xgboost as xgb\n", - "\n", - "from ray import air, tune\n", - "from ray.air import session\n", - "from ray.tune.integration.xgboost import TuneReportCheckpointCallback\n", - "\n", - "\n", - "def train_breast_cancer(config: dict):\n", - " # This is a simple training function to be passed into Tune\n", - " # Load dataset\n", - " data, labels = sklearn.datasets.load_breast_cancer(return_X_y=True)\n", - " # Split into train and test set\n", - " train_x, test_x, train_y, test_y = train_test_split(data, labels, test_size=0.25)\n", - " # Build input matrices for XGBoost\n", - " train_set = xgb.DMatrix(train_x, label=train_y)\n", - " test_set = xgb.DMatrix(test_x, label=test_y)\n", - " # Train the classifier, using the Tune callback\n", - " xgb.train(\n", - " config,\n", - " train_set,\n", - " evals=[(test_set, \"eval\")],\n", - " verbose_eval=False,\n", - " callbacks=[TuneReportCheckpointCallback(filename=\"model.xgb\")],\n", - " )\n", - "\n", - "\n", - "def get_best_model_checkpoint(results):\n", - " best_bst = xgb.Booster()\n", - " best_result = results.get_best_result()\n", - "\n", - " with best_result.checkpoint.as_directory() as best_checkpoint_dir:\n", - " best_bst.load_model(os.path.join(best_checkpoint_dir, \"model.xgb\"))\n", - " accuracy = 1.0 - best_result.metrics[\"eval-error\"]\n", - " print(f\"Best model parameters: {best_result.config}\")\n", - " print(f\"Best model total accuracy: {accuracy:.4f}\")\n", - " return best_bst\n", - "\n", - "\n", - "def tune_xgboost(smoke_test=False):\n", - " search_space = {\n", - " # You can mix constants with search space objects.\n", - " \"objective\": \"binary:logistic\",\n", - " \"eval_metric\": [\"logloss\", \"error\"],\n", - " \"max_depth\": tune.randint(1, 9),\n", - " \"min_child_weight\": tune.choice([1, 2, 3]),\n", - " \"subsample\": tune.uniform(0.5, 1.0),\n", - " \"eta\": tune.loguniform(1e-4, 1e-1),\n", - " }\n", - " # This will enable aggressive early stopping of bad trials.\n", - " scheduler = ASHAScheduler(\n", - " max_t=10, grace_period=1, reduction_factor=2 # 10 training iterations\n", - " )\n", - "\n", - " tuner = tune.Tuner(\n", - " train_breast_cancer,\n", - " tune_config=tune.TuneConfig(\n", - " metric=\"eval-logloss\",\n", - " mode=\"min\",\n", - " scheduler=scheduler,\n", - " num_samples=1 if smoke_test else 10,\n", - " ),\n", - " param_space=search_space,\n", - " )\n", - " results = tuner.fit()\n", - "\n", - " return results\n", - "\n", - "\n", - "if __name__ == \"__main__\":\n", - " import argparse\n", - "\n", - " parser = argparse.ArgumentParser()\n", - " parser.add_argument(\n", - " \"--smoke-test\", action=\"store_true\", help=\"Finish quickly for testing\"\n", - " )\n", - " args, _ = parser.parse_known_args()\n", - "\n", - " results = tune_xgboost(smoke_test=args.smoke_test)\n", - "\n", - " # Load the best model checkpoint.\n", - " best_bst = get_best_model_checkpoint(results)\n", - "\n", - " # You could now do further predictions with\n", - " # best_bst.predict(...)\n" - ] - }, - { - "cell_type": "markdown", - "id": "20732fe4", - "metadata": {}, - "source": [ - "The output of our run could look like this:\n", - "\n", - "```{code-block} bash\n", - ":emphasize-lines: 7\n", - "\n", - " Number of trials: 10/10 (10 TERMINATED)\n", - " +---------------------------------+------------+-------+-------------+-------------+--------------------+-------------+--------+------------------+----------------+--------------+\n", - " | Trial name | status | loc | eta | max_depth | min_child_weight | subsample | iter | total time (s) | eval-logloss | eval-error |\n", - " |---------------------------------+------------+-------+-------------+-------------+--------------------+-------------+--------+------------------+----------------+--------------|\n", - " | train_breast_cancer_ba275_00000 | TERMINATED | | 0.00205087 | 2 | 1 | 0.898391 | 10 | 0.380619 | 0.678039 | 0.090909 |\n", - " | train_breast_cancer_ba275_00001 | TERMINATED | | 0.000183834 | 4 | 3 | 0.924939 | 1 | 0.0228798 | 0.693009 | 0.111888 |\n", - " | train_breast_cancer_ba275_00002 | TERMINATED | | 0.0242721 | 7 | 2 | 0.501551 | 10 | 0.376154 | 0.54472 | 0.06993 |\n", - " | train_breast_cancer_ba275_00003 | TERMINATED | | 0.000449692 | 5 | 3 | 0.890212 | 1 | 0.0234981 | 0.692811 | 0.090909 |\n", - " | train_breast_cancer_ba275_00004 | TERMINATED | | 0.000376393 | 7 | 2 | 0.883609 | 1 | 0.0231569 | 0.692847 | 0.062937 |\n", - " | train_breast_cancer_ba275_00005 | TERMINATED | | 0.00231942 | 3 | 3 | 0.877464 | 2 | 0.104867 | 0.689541 | 0.083916 |\n", - " | train_breast_cancer_ba275_00006 | TERMINATED | | 0.000542326 | 1 | 2 | 0.578584 | 1 | 0.0213971 | 0.692765 | 0.083916 |\n", - " | train_breast_cancer_ba275_00007 | TERMINATED | | 0.0016801 | 1 | 2 | 0.975302 | 1 | 0.02226 | 0.691999 | 0.083916 |\n", - " | train_breast_cancer_ba275_00008 | TERMINATED | | 0.000595756 | 8 | 3 | 0.58429 | 1 | 0.0221152 | 0.692657 | 0.06993 |\n", - " | train_breast_cancer_ba275_00009 | TERMINATED | | 0.000357845 | 8 | 1 | 0.637776 | 1 | 0.022635 | 0.692859 | 0.090909 |\n", - " +---------------------------------+------------+-------+-------------+-------------+--------------------+-------------+--------+------------------+----------------+--------------+\n", - "\n", - "\n", - " Best model parameters: {'objective': 'binary:logistic', 'eval_metric': ['logloss', 'error'], 'max_depth': 7, 'min_child_weight': 2, 'subsample': 0.5015513240240503, 'eta': 0.024272050872920895}\n", - " Best model total accuracy: 0.9301\n", - "```\n", - "\n", - "As you can see, most trials have been stopped only after a few iterations. Only the\n", - "two most promising trials were run for the full 10 iterations.\n", - "\n", - "You can also ensure that all available resources are being used as the scheduler\n", - "terminates trials, freeing them up. This can be done through the\n", - "`ResourceChangingScheduler`. An example of this can be found here:\n", - "{doc}`/tune/examples/includes/xgboost_dynamic_resources_example`.\n", - "\n", - "## Using fractional GPUs\n", - "\n", - "You can often accelerate your training by using GPUs in addition to CPUs. However,\n", - "you usually don't have as many GPUs as you have trials to run. For instance, if you\n", - "run 10 Tune trials in parallel, you usually don't have access to 10 separate GPUs.\n", - "\n", - "Tune supports *fractional GPUs*. This means that each task is assigned a fraction\n", - "of the GPU memory for training. For 10 tasks, this could look like this:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7d1b20a3", - "metadata": {}, - "outputs": [], - "source": [ - "config = {\n", - " \"objective\": \"binary:logistic\",\n", - " \"eval_metric\": [\"logloss\", \"error\"],\n", - " \"tree_method\": \"gpu_hist\",\n", - " \"max_depth\": tune.randint(1, 9),\n", - " \"min_child_weight\": tune.choice([1, 2, 3]),\n", - " \"subsample\": tune.uniform(0.5, 1.0),\n", - " \"eta\": tune.loguniform(1e-4, 1e-1),\n", - "}\n", - "\n", - "tuner = tune.Tuner(\n", - " tune.with_resources(train_breast_cancer, resources={\"cpu\": 1, \"gpu\": 0.1}),\n", - " tune_config=tune.TuneConfig(\n", - " num_samples=10,\n", - " ),\n", - " param_space=config,\n", - ")\n", - "results = tuner.fit()\n" - ] - }, - { - "cell_type": "markdown", - "id": "ee131861", - "metadata": {}, - "source": [ - "Each task thus works with 10% of the available GPU memory. You also have to tell\n", - "XGBoost to use the `gpu_hist` tree method, so it knows it should use the GPU.\n", - "\n", - "## Conclusion\n", - "\n", - "You should now have a basic understanding on how to train XGBoost models and on how\n", - "to tune the hyperparameters to yield the best results. In our simple example,\n", - "Tuning the parameters didn't make a huge difference for the accuracy.\n", - "But in larger applications, intelligent hyperparameter tuning can make the\n", - "difference between a model that doesn't seem to learn at all, and a model\n", - "that outperforms all the other ones.\n", - "\n", - "## More XGBoost Examples\n", - "\n", - "- {doc}`/tune/examples/includes/xgboost_dynamic_resources_example`:\n", - " Trains a basic XGBoost model with Tune with the class-based API and a ResourceChangingScheduler, ensuring all resources are being used at all time.\n", - "\n", - "## Learn More\n", - "\n", - "- [XGBoost Hyperparameter Tuning - A Visual Guide](https://kevinvecmanis.io/machine%20learning/hyperparameter%20tuning/dataviz/python/2019/05/11/XGBoost-Tuning-Visual-Guide.html)\n", - "- [Notes on XGBoost Parameter Tuning](https://xgboost.readthedocs.io/en/latest/tutorials/param_tuning.html)\n", - "- [Doing XGBoost Hyperparameter Tuning the smart way](https://towardsdatascience.com/doing-xgboost-hyper-parameter-tuning-the-smart-way-part-1-of-2-f6d255a45dde)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "ray_dev_py38", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.13 | packaged by conda-forge | (default, Mar 25 2022, 06:05:16) \n[Clang 12.0.1 ]" - }, - "orphan": true, - "vscode": { - "interpreter": { - "hash": "265d195fda5292fe8f69c6e37c435a5634a1ed3b6799724e66a975f68fa21517" - } - } + "cells": [ + { + "cell_type": "markdown", + "id": "edce67b9", + "metadata": {}, + "source": [ + "# Tuning XGBoost hyperparameters with Ray Tune\n", + "\n", + "(tune-xgboost-ref)=\n", + "\n", + "XGBoost is currently one of the most popular machine learning algorithms. It performs\n", + "very well on a large selection of tasks, and was the key to success in many Kaggle\n", + "competitions.\n", + "\n", + "```{image} /images/xgboost_logo.png\n", + ":align: center\n", + ":alt: XGBoost\n", + ":target: https://xgboost.readthedocs.io/en/latest/\n", + ":width: 200px\n", + "```\n", + "\n", + "This tutorial will give you a quick introduction to XGBoost, show you how\n", + "to train an XGBoost model, and then guide you on how to optimize XGBoost\n", + "parameters using Tune to get the best performance. We tackle the following topics:\n", + "\n", + "```{contents}\n", + ":depth: 2\n", + "```\n", + "\n", + ":::{note}\n", + "To run this tutorial, you will need to install the following:\n", + "\n", + "```bash\n", + "$ pip install xgboost\n", + "```\n", + ":::\n", + "\n", + "## What is XGBoost\n", + "\n", + "XGBoost is an acronym for e**X**treme **G**radient **Boost**ing. Internally,\n", + "XGBoost uses [decision trees](https://en.wikipedia.org/wiki/Decision_tree). Instead\n", + "of training just one large decision tree, XGBoost and other related algorithms train\n", + "many small decision trees. The intuition behind this is that even though single\n", + "decision trees can be inaccurate and suffer from high variance,\n", + "combining the output of a large number of these weak learners can actually lead to\n", + "strong learner, resulting in better predictions and less variance.\n", + "\n", + ":::{figure} /images/tune-xgboost-ensemble.svg\n", + ":alt: Single vs. ensemble learning\n", + "\n", + "A single decision tree (left) might be able to get to an accuracy of 70%\n", + "for a binary classification task. By combining the output of several small\n", + "decision trees, an ensemble learner (right) might end up with a higher accuracy\n", + "of 90%.\n", + ":::\n", + "\n", + "Boosting algorithms start with a single small decision tree and evaluate how well\n", + "it predicts the given examples. When building the next tree, those samples that have\n", + "been misclassified before have a higher chance of being used to generate the tree.\n", + "This is useful because it avoids overfitting to samples that can be easily classified\n", + "and instead tries to come up with models that are able to classify hard examples, too.\n", + "Please see [here for a more thorough introduction to bagging and boosting algorithms](https://towardsdatascience.com/ensemble-methods-bagging-boosting-and-stacking-c9214a10a205).\n", + "\n", + "There are many boosting algorithms. In their core, they are all very similar. XGBoost\n", + "uses second-level derivatives to find splits that maximize the *gain* (the inverse of\n", + "the *loss*) - hence the name. In practice, there really is no drawback in using\n", + "XGBoost over other boosting algorithms - in fact, it usually shows the best performance.\n", + "\n", + "## Training a simple XGBoost classifier\n", + "\n", + "Let's first see how a simple XGBoost classifier can be trained. We'll use the\n", + "`breast_cancer`-Dataset included in the `sklearn` dataset collection. This is\n", + "a binary classification dataset. Given 30 different input features, our task is to\n", + "learn to identify subjects with breast cancer and those without.\n", + "\n", + "Here is the full code to train a simple XGBoost model:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "77b3c71c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accuracy: 0.9650\n" + ] + } + ], + "source": [ + "import sklearn.datasets\n", + "import sklearn.metrics\n", + "from sklearn.model_selection import train_test_split\n", + "import xgboost as xgb\n", + "\n", + "\n", + "def train_breast_cancer(config):\n", + " # Load dataset\n", + " data, labels = sklearn.datasets.load_breast_cancer(return_X_y=True)\n", + " # Split into train and test set\n", + " train_x, test_x, train_y, test_y = train_test_split(data, labels, test_size=0.25)\n", + " # Build input matrices for XGBoost\n", + " train_set = xgb.DMatrix(train_x, label=train_y)\n", + " test_set = xgb.DMatrix(test_x, label=test_y)\n", + " # Train the classifier\n", + " results = {}\n", + " bst = xgb.train(\n", + " config,\n", + " train_set,\n", + " evals=[(test_set, \"eval\")],\n", + " evals_result=results,\n", + " verbose_eval=False,\n", + " )\n", + " return results\n", + "\n", + "\n", + "if __name__ == \"__main__\":\n", + " results = train_breast_cancer(\n", + " {\"objective\": \"binary:logistic\", \"eval_metric\": [\"logloss\", \"error\"]}\n", + " )\n", + " accuracy = 1.0 - results[\"eval\"][\"error\"][-1]\n", + " print(f\"Accuracy: {accuracy:.4f}\")\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "ec2a13f8", + "metadata": {}, + "source": [ + "As you can see, the code is quite simple. First, the dataset is loaded and split\n", + "into a `test` and `train` set. The XGBoost model is trained with `xgb.train()`.\n", + "XGBoost automatically evaluates metrics we specified on the test set. In our case\n", + "it calculates the *logloss* and the prediction *error*, which is the percentage of\n", + "misclassified examples. To calculate the accuracy, we just have to subtract the error\n", + "from `1.0`. Even in this simple example, most runs result\n", + "in a good accuracy of over `0.90`.\n", + "\n", + "Maybe you have noticed the `config` parameter we pass to the XGBoost algorithm. This\n", + "is a {class}`dict` in which you can specify parameters for the XGBoost algorithm. In this\n", + "simple example, the only parameters we passed are the `objective` and `eval_metric` parameters.\n", + "The value `binary:logistic` tells XGBoost that we aim to train a logistic regression model for\n", + "a binary classification task. You can find an overview over all valid objectives\n", + "[here in the XGBoost documentation](https://xgboost.readthedocs.io/en/latest/parameter.html#learning-task-parameters).\n", + "\n", + "## XGBoost Hyperparameters\n", + "\n", + "Even with the default settings, XGBoost was able to get to a good accuracy on the\n", + "breast cancer dataset. However, as in many machine learning algorithms, there are\n", + "many knobs to tune which might lead to even better performance. Let's explore some of\n", + "them below.\n", + "\n", + "### Maximum tree depth\n", + "\n", + "Remember that XGBoost internally uses many decision tree models to come up with\n", + "predictions. When training a decision tree, we need to tell the algorithm how\n", + "large the tree may get. The parameter for this is called the tree *depth*.\n", + "\n", + ":::{figure} /images/tune-xgboost-depth.svg\n", + ":align: center\n", + ":alt: Decision tree depth\n", + "\n", + "In this image, the left tree has a depth of 2, and the right tree a depth of 3.\n", + "Note that with each level, $2^{(d-1)}$ splits are added, where *d* is the depth\n", + "of the tree.\n", + ":::\n", + "\n", + "Tree depth is a property that concerns the model complexity. If you only allow short\n", + "trees, the models are likely not very precise - they underfit the data. If you allow\n", + "very large trees, the single models are likely to overfit to the data. In practice,\n", + "a number between `2` and `6` is often a good starting point for this parameter.\n", + "\n", + "XGBoost's default value is `3`.\n", + "\n", + "### Minimum child weight\n", + "\n", + "When a decision tree creates new leaves, it splits up the remaining data at one node\n", + "into two groups. If there are only few samples in one of these groups, it often\n", + "doesn't make sense to split it further. One of the reasons for this is that the\n", + "model is harder to train when we have fewer samples.\n", + "\n", + ":::{figure} /images/tune-xgboost-weight.svg\n", + ":align: center\n", + ":alt: Minimum child weight\n", + "\n", + "In this example, we start with 100 examples. At the first node, they are split\n", + "into 4 and 96 samples, respectively. In the next step, our model might find\n", + "that it doesn't make sense to split the 4 examples more. It thus only continues\n", + "to add leaves on the right side.\n", + ":::\n", + "\n", + "The parameter used by the model to decide if it makes sense to split a node is called\n", + "the *minimum child weight*. In the case of linear regression, this is just the absolute\n", + "number of nodes requried in each child. In other objectives, this value is determined\n", + "using the weights of the examples, hence the name.\n", + "\n", + "The larger the value, the more constrained the trees are and the less deep they will be.\n", + "This parameter thus also affects the model complexity. Values can range between 0\n", + "and infinity and are dependent on the sample size. For our ca. 500 examples in the\n", + "breast cancer dataset, values between `0` and `10` should be sensible.\n", + "\n", + "XGBoost's default value is `1`.\n", + "\n", + "### Subsample size\n", + "\n", + "Each decision tree we add is trained on a subsample of the total training dataset.\n", + "The probabilities for the samples are weighted according to the XGBoost algorithm,\n", + "but we can decide on which fraction of the samples we want to train each decision\n", + "tree on.\n", + "\n", + "Setting this value to `0.7` would mean that we randomly sample `70%` of the\n", + "training dataset before each training iteration.\n", + "\n", + "XGBoost's default value is `1`.\n", + "\n", + "### Learning rate / Eta\n", + "\n", + "Remember that XGBoost sequentially trains many decision trees, and that later trees\n", + "are more likely trained on data that has been misclassified by prior trees. In effect\n", + "this means that earlier trees make decisions for easy samples (i.e. those samples that\n", + "can easily be classified) and later trees make decisions for harder samples. It is then\n", + "sensible to assume that the later trees are less accurate than earlier trees.\n", + "\n", + "To address this fact, XGBoost uses a parameter called *Eta*, which is sometimes called\n", + "the *learning rate*. Don't confuse this with learning rates from gradient descent!\n", + "The original [paper on stochastic gradient boosting](https://www.researchgate.net/publication/222573328_Stochastic_Gradient_Boosting)\n", + "introduces this parameter like so:\n", + "\n", + "$$\n", + "F_m(x) = F_{m-1}(x) + \\eta \\cdot \\gamma_{lm} \\textbf{1}(x \\in R_{lm})\n", + "$$\n", + "\n", + "This is just a complicated way to say that when we train we new decision tree,\n", + "represented by $\\gamma_{lm} \\textbf{1}(x \\in R_{lm})$, we want to dampen\n", + "its effect on the previous prediction $F_{m-1}(x)$ with a factor\n", + "$\\eta$.\n", + "\n", + "Typical values for this parameter are between `0.01` and `` 0.3` ``.\n", + "\n", + "XGBoost's default value is `0.3`.\n", + "\n", + "### Number of boost rounds\n", + "\n", + "Lastly, we can decide on how many boosting rounds we perform, which means how\n", + "many decision trees we ultimately train. When we do heavy subsampling or use small\n", + "learning rate, it might make sense to increase the number of boosting rounds.\n", + "\n", + "XGBoost's default value is `10`.\n", + "\n", + "### Putting it together\n", + "\n", + "Let's see how this looks like in code! We just need to adjust our `config` dict:" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "35073e88", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accuracy: 0.9790\n" + ] + } + ], + "source": [ + "if __name__ == \"__main__\":\n", + " config = {\n", + " \"objective\": \"binary:logistic\",\n", + " \"eval_metric\": [\"logloss\", \"error\"],\n", + " \"max_depth\": 2,\n", + " \"min_child_weight\": 0,\n", + " \"subsample\": 0.8,\n", + " \"eta\": 0.2,\n", + " }\n", + " results = train_breast_cancer(config)\n", + " accuracy = 1.0 - results[\"eval\"][\"error\"][-1]\n", + " print(f\"Accuracy: {accuracy:.4f}\")\n" + ] + }, + { + "cell_type": "markdown", + "id": "69cf0c13", + "metadata": {}, + "source": [ + "The rest stays the same. Please note that we do not adjust the `num_boost_rounds` here.\n", + "The result should also show a high accuracy of over 90%.\n", + "\n", + "## Tuning the configuration parameters\n", + "\n", + "XGBoosts default parameters already lead to a good accuracy, and even our guesses in the\n", + "last section should result in accuracies well above 90%. However, our guesses were\n", + "just that: guesses. Often we do not know what combination of parameters would actually\n", + "lead to the best results on a machine learning task.\n", + "\n", + "Unfortunately, there are infinitely many combinations of hyperparameters we could try\n", + "out. Should we combine `max_depth=3` with `subsample=0.8` or with `subsample=0.9`?\n", + "What about the other parameters?\n", + "\n", + "This is where hyperparameter tuning comes into play. By using tuning libraries such as\n", + "Ray Tune we can try out combinations of hyperparameters. Using sophisticated search\n", + "strategies, these parameters can be selected so that they are likely to lead to good\n", + "results (avoiding an expensive *exhaustive search*). Also, trials that do not perform\n", + "well can be preemptively stopped to reduce waste of computing resources. Lastly, Ray Tune\n", + "also takes care of training these runs in parallel, greatly increasing search speed.\n", + "\n", + "Let's start with a basic example on how to use Tune for this. We just need to make\n", + "a few changes to our code-block:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "ff856a82", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2022-07-22 15:52:52,004\tINFO services.py:1483 -- View the Ray dashboard at \u001B[1m\u001B[32mhttp://127.0.0.1:8268\u001B[39m\u001B[22m\n", + "2022-07-22 15:52:55,858\tWARNING function_trainable.py:619 -- Function checkpointing is disabled. This may result in unexpected behavior when using checkpointing features or certain schedulers. To enable, set the train function arguments to be `func(config, checkpoint_dir=None)`.\n" + ] }, - "nbformat": 4, - "nbformat_minor": 5 + { + "data": { + "text/html": [ + "== Status ==
    Current time: 2022-07-22 15:53:04 (running for 00:00:07.77)
    Memory usage on this node: 10.5/16.0 GiB
    Using FIFO scheduling algorithm.
    Resources requested: 0/16 CPUs, 0/0 GPUs, 0.0/4.57 GiB heap, 0.0/2.0 GiB objects
    Result logdir: /Users/kai/ray_results/train_breast_cancer_2022-07-22_15-52-48
    Number of trials: 10/10 (10 TERMINATED)
    \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
    Trial name status loc eta max_depth min_child_weight subsample acc iter total time (s)
    train_breast_cancer_f8669_00000TERMINATED127.0.0.1:488520.0069356 5 3 0.8235040.944056 1 0.0316169
    train_breast_cancer_f8669_00001TERMINATED127.0.0.1:488570.00145619 6 3 0.8329470.958042 1 0.0328588
    train_breast_cancer_f8669_00002TERMINATED127.0.0.1:488580.00108208 7 3 0.9873190.944056 1 0.0319381
    train_breast_cancer_f8669_00003TERMINATED127.0.0.1:488590.00530429 8 2 0.6156910.923077 1 0.028388
    train_breast_cancer_f8669_00004TERMINATED127.0.0.1:488600.000721843 8 1 0.6509730.958042 1 0.0299618
    train_breast_cancer_f8669_00005TERMINATED127.0.0.1:488610.0074509 1 1 0.7383410.874126 1 0.0193682
    train_breast_cancer_f8669_00006TERMINATED127.0.0.1:488620.0879882 8 2 0.6715760.944056 1 0.0267372
    train_breast_cancer_f8669_00007TERMINATED127.0.0.1:488630.0765404 7 2 0.7081570.965035 1 0.0276129
    train_breast_cancer_f8669_00008TERMINATED127.0.0.1:488640.000627649 6 1 0.81121 0.951049 1 0.0310998
    train_breast_cancer_f8669_00009TERMINATED127.0.0.1:488650.000383711 2 3 0.9905790.93007 1 0.0274954


    " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2022-07-22 15:52:57,385\tINFO plugin_schema_manager.py:52 -- Loading the default runtime env schemas: ['/Users/kai/coding/ray/python/ray/_private/runtime_env/../../runtime_env/schemas/working_dir_schema.json', '/Users/kai/coding/ray/python/ray/_private/runtime_env/../../runtime_env/schemas/pip_schema.json'].\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Result for train_breast_cancer_f8669_00000:\n", + " date: 2022-07-22_15-53-00\n", + " done: true\n", + " experiment_id: 07d10c5f31e74133b53272b7ccf9c528\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 1\n", + " mean_accuracy: 0.9440559440559441\n", + " node_ip: 127.0.0.1\n", + " pid: 48852\n", + " time_since_restore: 0.031616926193237305\n", + " time_this_iter_s: 0.031616926193237305\n", + " time_total_s: 0.031616926193237305\n", + " timestamp: 1658501580\n", + " timesteps_since_restore: 0\n", + " training_iteration: 1\n", + " trial_id: f8669_00000\n", + " warmup_time: 0.0027849674224853516\n", + " \n", + "Result for train_breast_cancer_f8669_00009:\n", + " date: 2022-07-22_15-53-04\n", + " done: true\n", + " experiment_id: bc0d5dd2d079432b859faac8a18928f0\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 1\n", + " mean_accuracy: 0.9300699300699301\n", + " node_ip: 127.0.0.1\n", + " pid: 48865\n", + " time_since_restore: 0.027495384216308594\n", + " time_this_iter_s: 0.027495384216308594\n", + " time_total_s: 0.027495384216308594\n", + " timestamp: 1658501584\n", + " timesteps_since_restore: 0\n", + " training_iteration: 1\n", + " trial_id: f8669_00009\n", + " warmup_time: 0.005235910415649414\n", + " \n", + "Result for train_breast_cancer_f8669_00001:\n", + " date: 2022-07-22_15-53-04\n", + " done: true\n", + " experiment_id: 4b10d350d4374a0d9e7d0c3b1d4e3203\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 1\n", + " mean_accuracy: 0.958041958041958\n", + " node_ip: 127.0.0.1\n", + " pid: 48857\n", + " time_since_restore: 0.032858848571777344\n", + " time_this_iter_s: 0.032858848571777344\n", + " time_total_s: 0.032858848571777344\n", + " timestamp: 1658501584\n", + " timesteps_since_restore: 0\n", + " training_iteration: 1\n", + " trial_id: f8669_00001\n", + " warmup_time: 0.004731178283691406\n", + " \n", + "Result for train_breast_cancer_f8669_00008:\n", + " date: 2022-07-22_15-53-04\n", + " done: true\n", + " experiment_id: 91c25cbbeb6f409d93e1d6537cb8e1ee\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 1\n", + " mean_accuracy: 0.951048951048951\n", + " node_ip: 127.0.0.1\n", + " pid: 48864\n", + " time_since_restore: 0.031099796295166016\n", + " time_this_iter_s: 0.031099796295166016\n", + " time_total_s: 0.031099796295166016\n", + " timestamp: 1658501584\n", + " timesteps_since_restore: 0\n", + " training_iteration: 1\n", + " trial_id: f8669_00008\n", + " warmup_time: 0.003270864486694336\n", + " \n", + "Result for train_breast_cancer_f8669_00005:\n", + " date: 2022-07-22_15-53-04\n", + " done: true\n", + " experiment_id: d225b0fb59e14da7adba952456ccf1d5\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 1\n", + " mean_accuracy: 0.8741258741258742\n", + " node_ip: 127.0.0.1\n", + " pid: 48861\n", + " time_since_restore: 0.01936817169189453\n", + " time_this_iter_s: 0.01936817169189453\n", + " time_total_s: 0.01936817169189453\n", + " timestamp: 1658501584\n", + " timesteps_since_restore: 0\n", + " training_iteration: 1\n", + " trial_id: f8669_00005\n", + " warmup_time: 0.003901958465576172\n", + " \n", + "Result for train_breast_cancer_f8669_00004:\n", + " date: 2022-07-22_15-53-04\n", + " done: true\n", + " experiment_id: 322484af6ea5422f8aaf8ff6a91af4f7\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 1\n", + " mean_accuracy: 0.958041958041958\n", + " node_ip: 127.0.0.1\n", + " pid: 48860\n", + " time_since_restore: 0.029961824417114258\n", + " time_this_iter_s: 0.029961824417114258\n", + " time_total_s: 0.029961824417114258\n", + " timestamp: 1658501584\n", + " timesteps_since_restore: 0\n", + " training_iteration: 1\n", + " trial_id: f8669_00004\n", + " warmup_time: 0.003547191619873047\n", + " \n", + "Result for train_breast_cancer_f8669_00002:\n", + " date: 2022-07-22_15-53-04\n", + " done: true\n", + " experiment_id: 3f588954160b42ce8ce200f68127ebcd\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 1\n", + " mean_accuracy: 0.9440559440559441\n", + " node_ip: 127.0.0.1\n", + " pid: 48858\n", + " time_since_restore: 0.03193807601928711\n", + " time_this_iter_s: 0.03193807601928711\n", + " time_total_s: 0.03193807601928711\n", + " timestamp: 1658501584\n", + " timesteps_since_restore: 0\n", + " training_iteration: 1\n", + " trial_id: f8669_00002\n", + " warmup_time: 0.003523111343383789\n", + " \n", + "Result for train_breast_cancer_f8669_00003:\n", + " date: 2022-07-22_15-53-04\n", + " done: true\n", + " experiment_id: a39ea777ce2d4ebca51b3d7a4179dae5\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 1\n", + " mean_accuracy: 0.9230769230769231\n", + " node_ip: 127.0.0.1\n", + " pid: 48859\n", + " time_since_restore: 0.028388023376464844\n", + " time_this_iter_s: 0.028388023376464844\n", + " time_total_s: 0.028388023376464844\n", + " timestamp: 1658501584\n", + " timesteps_since_restore: 0\n", + " training_iteration: 1\n", + " trial_id: f8669_00003\n", + " warmup_time: 0.0035560131072998047\n", + " \n", + "Result for train_breast_cancer_f8669_00006:\n", + " date: 2022-07-22_15-53-04\n", + " done: true\n", + " experiment_id: f97c6b9674854f8d89ec26ba58cc1618\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 1\n", + " mean_accuracy: 0.9440559440559441\n", + " node_ip: 127.0.0.1\n", + " pid: 48862\n", + " time_since_restore: 0.026737213134765625\n", + " time_this_iter_s: 0.026737213134765625\n", + " time_total_s: 0.026737213134765625\n", + " timestamp: 1658501584\n", + " timesteps_since_restore: 0\n", + " training_iteration: 1\n", + " trial_id: f8669_00006\n", + " warmup_time: 0.003425121307373047\n", + " \n", + "Result for train_breast_cancer_f8669_00007:\n", + " date: 2022-07-22_15-53-04\n", + " done: true\n", + " experiment_id: ff172037065a4d55998ed72f51bdc5df\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 1\n", + " mean_accuracy: 0.965034965034965\n", + " node_ip: 127.0.0.1\n", + " pid: 48863\n", + " time_since_restore: 0.027612924575805664\n", + " time_this_iter_s: 0.027612924575805664\n", + " time_total_s: 0.027612924575805664\n", + " timestamp: 1658501584\n", + " timesteps_since_restore: 0\n", + " training_iteration: 1\n", + " trial_id: f8669_00007\n", + " warmup_time: 0.0031311511993408203\n", + " \n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2022-07-22 15:53:04,846\tINFO tune.py:738 -- Total run time: 8.99 seconds (7.74 seconds for the tuning loop).\n" + ] + } + ], + "source": [ + "import sklearn.datasets\n", + "import sklearn.metrics\n", + "\n", + "from ray import air, tune\n", + "from ray.air import session\n", + "\n", + "\n", + "def train_breast_cancer(config):\n", + " # Load dataset\n", + " data, labels = sklearn.datasets.load_breast_cancer(return_X_y=True)\n", + " # Split into train and test set\n", + " train_x, test_x, train_y, test_y = train_test_split(data, labels, test_size=0.25)\n", + " # Build input matrices for XGBoost\n", + " train_set = xgb.DMatrix(train_x, label=train_y)\n", + " test_set = xgb.DMatrix(test_x, label=test_y)\n", + " # Train the classifier\n", + " results = {}\n", + " xgb.train(\n", + " config,\n", + " train_set,\n", + " evals=[(test_set, \"eval\")],\n", + " evals_result=results,\n", + " verbose_eval=False,\n", + " )\n", + " # Return prediction accuracy\n", + " accuracy = 1.0 - results[\"eval\"][\"error\"][-1]\n", + " session.report({\"mean_accuracy\": accuracy, \"done\": True})\n", + "\n", + "\n", + "if __name__ == \"__main__\":\n", + " config = {\n", + " \"objective\": \"binary:logistic\",\n", + " \"eval_metric\": [\"logloss\", \"error\"],\n", + " \"max_depth\": tune.randint(1, 9),\n", + " \"min_child_weight\": tune.choice([1, 2, 3]),\n", + " \"subsample\": tune.uniform(0.5, 1.0),\n", + " \"eta\": tune.loguniform(1e-4, 1e-1),\n", + " }\n", + " tuner = tune.Tuner(\n", + " train_breast_cancer,\n", + " tune_config=tune.TuneConfig(\n", + " num_samples=10,\n", + " ),\n", + " param_space=config,\n", + " )\n", + " results = tuner.fit()\n" + ] + }, + { + "cell_type": "markdown", + "id": "4999e858", + "metadata": {}, + "source": [ + "As you can see, the changes in the actual training function are minimal. Instead of\n", + "returning the accuracy value, we report it back to Tune using `session.report()`.\n", + "Our `config` dictionary only changed slightly. Instead of passing hard-coded\n", + "parameters, we tell Tune to choose values from a range of valid options. There are\n", + "a number of options we have here, all of which are explained in\n", + "{ref}`the Tune docs `.\n", + "\n", + "For a brief explanation, this is what they do:\n", + "\n", + "- `tune.randint(min, max)` chooses a random integer value between *min* and *max*.\n", + " Note that *max* is exclusive, so it will not be sampled.\n", + "- `tune.choice([a, b, c])` chooses one of the items of the list at random. Each item\n", + " has the same chance to be sampled.\n", + "- `tune.uniform(min, max)` samples a floating point number between *min* and *max*.\n", + " Note that *max* is exclusive here, too.\n", + "- `tune.loguniform(min, max, base=10)` samples a floating point number between *min* and *max*,\n", + " but applies a logarithmic transformation to these boundaries first. Thus, this makes\n", + " it easy to sample values from different orders of magnitude.\n", + "\n", + "The `num_samples=10` option we pass to the `TuneConfig()` means that we sample 10 different\n", + "hyperparameter configurations from this search space.\n", + "\n", + "The output of our training run coud look like this:\n", + "\n", + "```{code-block} bash\n", + ":emphasize-lines: 14\n", + "\n", + " Number of trials: 10/10 (10 TERMINATED)\n", + " +---------------------------------+------------+-------+-------------+-------------+--------------------+-------------+----------+--------+------------------+\n", + " | Trial name | status | loc | eta | max_depth | min_child_weight | subsample | acc | iter | total time (s) |\n", + " |---------------------------------+------------+-------+-------------+-------------+--------------------+-------------+----------+--------+------------------|\n", + " | train_breast_cancer_b63aa_00000 | TERMINATED | | 0.000117625 | 2 | 2 | 0.616347 | 0.916084 | 1 | 0.0306492 |\n", + " | train_breast_cancer_b63aa_00001 | TERMINATED | | 0.0382954 | 8 | 2 | 0.581549 | 0.937063 | 1 | 0.0357082 |\n", + " | train_breast_cancer_b63aa_00002 | TERMINATED | | 0.000217926 | 1 | 3 | 0.528428 | 0.874126 | 1 | 0.0264609 |\n", + " | train_breast_cancer_b63aa_00003 | TERMINATED | | 0.000120929 | 8 | 1 | 0.634508 | 0.958042 | 1 | 0.036406 |\n", + " | train_breast_cancer_b63aa_00004 | TERMINATED | | 0.00839715 | 5 | 1 | 0.730624 | 0.958042 | 1 | 0.0389378 |\n", + " | train_breast_cancer_b63aa_00005 | TERMINATED | | 0.000732948 | 8 | 2 | 0.915863 | 0.958042 | 1 | 0.0382841 |\n", + " | train_breast_cancer_b63aa_00006 | TERMINATED | | 0.000856226 | 4 | 1 | 0.645209 | 0.916084 | 1 | 0.0357089 |\n", + " | train_breast_cancer_b63aa_00007 | TERMINATED | | 0.00769908 | 7 | 1 | 0.729443 | 0.909091 | 1 | 0.0390737 |\n", + " | train_breast_cancer_b63aa_00008 | TERMINATED | | 0.00186339 | 5 | 3 | 0.595744 | 0.944056 | 1 | 0.0343912 |\n", + " | train_breast_cancer_b63aa_00009 | TERMINATED | | 0.000950272 | 3 | 2 | 0.835504 | 0.965035 | 1 | 0.0348201 |\n", + " +---------------------------------+------------+-------+-------------+-------------+--------------------+-------------+----------+--------+------------------+\n", + "```\n", + "\n", + "The best configuration we found used `eta=0.000950272`, `max_depth=3`,\n", + "`min_child_weight=2`, `subsample=0.835504` and reached an accuracy of\n", + "`0.965035`.\n", + "\n", + "## Early stopping\n", + "\n", + "Currently, Tune samples 10 different hyperparameter configurations and trains a full\n", + "XGBoost on all of them. In our small example, training is very fast. However,\n", + "if training takes longer, a significant amount of computer resources is spent on trials\n", + "that will eventually show a bad performance, e.g. a low accuracy. It would be good\n", + "if we could identify these trials early and stop them, so we don't waste any resources.\n", + "\n", + "This is where Tune's *Schedulers* shine. A Tune `TrialScheduler` is responsible\n", + "for starting and stopping trials. Tune implements a number of different schedulers, each\n", + "described {ref}`in the Tune documentation `.\n", + "For our example, we will use the `AsyncHyperBandScheduler` or `ASHAScheduler`.\n", + "\n", + "The basic idea of this scheduler: We sample a number of hyperparameter configurations.\n", + "Each of these configurations is trained for a specific number of iterations.\n", + "After these iterations, only the best performing hyperparameters are retained. These\n", + "are selected according to some loss metric, usually an evaluation loss. This cycle is\n", + "repeated until we end up with the best configuration.\n", + "\n", + "The `ASHAScheduler` needs to know three things:\n", + "\n", + "1. Which metric should be used to identify badly performing trials?\n", + "2. Should this metric be maximized or minimized?\n", + "3. How many iterations does each trial train for?\n", + "\n", + "There are more parameters, which are explained in the\n", + "{ref}`documentation `.\n", + "\n", + "Lastly, we have to report the loss metric to Tune. We do this with a `Callback` that\n", + "XGBoost accepts and calls after each evaluation round. Ray Tune comes\n", + "with {ref}`two XGBoost callbacks `\n", + "we can use for this. The `TuneReportCallback` just reports the evaluation\n", + "metrics back to Tune. The `TuneReportCheckpointCallback` also saves\n", + "checkpoints after each evaluation round. We will just use the latter in this\n", + "example so that we can retrieve the saved model later.\n", + "\n", + "These parameters from the `eval_metrics` configuration setting are then automatically\n", + "reported to Tune via the callback. Here, the raw error will be reported, not the accuracy.\n", + "To display the best reached accuracy, we will inverse it later.\n", + "\n", + "We will also load the best checkpointed model so that we can use it for predictions.\n", + "The best model is selected with respect to the `metric` and `mode` parameters we\n", + "pass to the `TunerConfig()`." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "d08b5b0a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "== Status ==
    Current time: 2022-07-22 16:56:01 (running for 00:00:10.38)
    Memory usage on this node: 10.3/16.0 GiB
    Using AsyncHyperBand: num_stopped=10\n", + "Bracket: Iter 8.000: -0.5107275277792991 | Iter 4.000: -0.5876629346317344 | Iter 2.000: -0.6544494184997531 | Iter 1.000: -0.6859214191253369
    Resources requested: 0/16 CPUs, 0/0 GPUs, 0.0/4.57 GiB heap, 0.0/2.0 GiB objects
    Current best trial: c28a3_00003 with eval-logloss=0.38665050018083796 and parameters={'objective': 'binary:logistic', 'eval_metric': ['logloss', 'error'], 'max_depth': 2, 'min_child_weight': 3, 'subsample': 0.782626252548841, 'eta': 0.06385952388342125}
    Result logdir: /Users/kai/ray_results/train_breast_cancer_2022-07-22_16-55-50
    Number of trials: 10/10 (10 TERMINATED)
    \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
    Trial name status loc eta max_depth min_child_weight subsample iter total time (s) eval-logloss eval-error
    train_breast_cancer_c28a3_00000TERMINATED127.0.0.1:544160.0186954 2 2 0.516916 10 0.22218 0.571496 0.0629371
    train_breast_cancer_c28a3_00001TERMINATED127.0.0.1:544400.0304404 8 2 0.745969 2 0.135674 0.650353 0.0629371
    train_breast_cancer_c28a3_00002TERMINATED127.0.0.1:544410.0217157 8 3 0.764138 2 0.173076 0.658545 0.041958
    train_breast_cancer_c28a3_00003TERMINATED127.0.0.1:544420.0638595 2 3 0.782626 10 0.281865 0.386651 0.041958
    train_breast_cancer_c28a3_00004TERMINATED127.0.0.1:544430.00442794 7 2 0.792359 1 0.0270212 0.689577 0.0699301
    train_breast_cancer_c28a3_00005TERMINATED127.0.0.1:544440.00222624 3 1 0.536331 1 0.0238512 0.691446 0.0839161
    train_breast_cancer_c28a3_00006TERMINATED127.0.0.1:544450.000825129 1 1 0.82472 1 0.015312 0.692624 0.118881
    train_breast_cancer_c28a3_00007TERMINATED127.0.0.1:544460.000770826 7 2 0.947268 1 0.0175898 0.692598 0.132867
    train_breast_cancer_c28a3_00008TERMINATED127.0.0.1:544470.000429759 7 1 0.88524 1 0.0193739 0.692785 0.0559441
    train_breast_cancer_c28a3_00009TERMINATED127.0.0.1:544480.0149863 2 1 0.722738 1 0.0165932 0.682266 0.111888


    " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Result for train_breast_cancer_c28a3_00000:\n", + " date: 2022-07-22_16-55-55\n", + " done: false\n", + " eval-error: 0.08391608391608392\n", + " eval-logloss: 0.6790360066440556\n", + " experiment_id: 2a3189442db341519836a07fb2d65dd9\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 1\n", + " node_ip: 127.0.0.1\n", + " pid: 54416\n", + " time_since_restore: 0.01624011993408203\n", + " time_this_iter_s: 0.01624011993408203\n", + " time_total_s: 0.01624011993408203\n", + " timestamp: 1658505355\n", + " timesteps_since_restore: 0\n", + " training_iteration: 1\n", + " trial_id: c28a3_00000\n", + " warmup_time: 0.0035409927368164062\n", + " \n", + "Result for train_breast_cancer_c28a3_00000:\n", + " date: 2022-07-22_16-55-56\n", + " done: true\n", + " eval-error: 0.06293706293706294\n", + " eval-logloss: 0.5714958122560194\n", + " experiment_id: 2a3189442db341519836a07fb2d65dd9\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 10\n", + " node_ip: 127.0.0.1\n", + " pid: 54416\n", + " time_since_restore: 0.22218012809753418\n", + " time_this_iter_s: 0.007044076919555664\n", + " time_total_s: 0.22218012809753418\n", + " timestamp: 1658505356\n", + " timesteps_since_restore: 0\n", + " training_iteration: 10\n", + " trial_id: c28a3_00000\n", + " warmup_time: 0.0035409927368164062\n", + " \n", + "Result for train_breast_cancer_c28a3_00003:\n", + " date: 2022-07-22_16-56-01\n", + " done: false\n", + " eval-error: 0.08391608391608392\n", + " eval-logloss: 0.6472820101918041\n", + " experiment_id: 7ff6133237404b4ea4755b9f8cd114f2\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 1\n", + " node_ip: 127.0.0.1\n", + " pid: 54442\n", + " time_since_restore: 0.023206233978271484\n", + " time_this_iter_s: 0.023206233978271484\n", + " time_total_s: 0.023206233978271484\n", + " timestamp: 1658505361\n", + " timesteps_since_restore: 0\n", + " training_iteration: 1\n", + " trial_id: c28a3_00003\n", + " warmup_time: 0.006722211837768555\n", + " \n", + "Result for train_breast_cancer_c28a3_00005:\n", + " date: 2022-07-22_16-56-01\n", + " done: true\n", + " eval-error: 0.08391608391608392\n", + " eval-logloss: 0.6914464114429234\n", + " experiment_id: 344762ab6d574b63a9374e19526d0510\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 1\n", + " node_ip: 127.0.0.1\n", + " pid: 54444\n", + " time_since_restore: 0.02385115623474121\n", + " time_this_iter_s: 0.02385115623474121\n", + " time_total_s: 0.02385115623474121\n", + " timestamp: 1658505361\n", + " timesteps_since_restore: 0\n", + " training_iteration: 1\n", + " trial_id: c28a3_00005\n", + " warmup_time: 0.008936882019042969\n", + " \n", + "Result for train_breast_cancer_c28a3_00009:\n", + " date: 2022-07-22_16-56-01\n", + " done: true\n", + " eval-error: 0.11188811188811189\n", + " eval-logloss: 0.6822656309688008\n", + " experiment_id: 133901655fa64bf79f2dcc4e8e5e41b1\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 1\n", + " node_ip: 127.0.0.1\n", + " pid: 54448\n", + " time_since_restore: 0.016593217849731445\n", + " time_this_iter_s: 0.016593217849731445\n", + " time_total_s: 0.016593217849731445\n", + " timestamp: 1658505361\n", + " timesteps_since_restore: 0\n", + " training_iteration: 1\n", + " trial_id: c28a3_00009\n", + " warmup_time: 0.004940032958984375\n", + " \n", + "Result for train_breast_cancer_c28a3_00007:\n", + " date: 2022-07-22_16-56-01\n", + " done: true\n", + " eval-error: 0.13286713286713286\n", + " eval-logloss: 0.6925980357023386\n", + " experiment_id: b4331027cbaf442ab905b2e51797dbbd\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 1\n", + " node_ip: 127.0.0.1\n", + " pid: 54446\n", + " time_since_restore: 0.017589807510375977\n", + " time_this_iter_s: 0.017589807510375977\n", + " time_total_s: 0.017589807510375977\n", + " timestamp: 1658505361\n", + " timesteps_since_restore: 0\n", + " training_iteration: 1\n", + " trial_id: c28a3_00007\n", + " warmup_time: 0.003782033920288086\n", + " \n", + "Result for train_breast_cancer_c28a3_00006:\n", + " date: 2022-07-22_16-56-01\n", + " done: true\n", + " eval-error: 0.11888111888111888\n", + " eval-logloss: 0.6926244418104212\n", + " experiment_id: d3906de5943a4e05a4cc782382f67d24\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 1\n", + " node_ip: 127.0.0.1\n", + " pid: 54445\n", + " time_since_restore: 0.015311956405639648\n", + " time_this_iter_s: 0.015311956405639648\n", + " time_total_s: 0.015311956405639648\n", + " timestamp: 1658505361\n", + " timesteps_since_restore: 0\n", + " training_iteration: 1\n", + " trial_id: c28a3_00006\n", + " warmup_time: 0.005506038665771484\n", + " \n", + "Result for train_breast_cancer_c28a3_00002:\n", + " date: 2022-07-22_16-56-01\n", + " done: false\n", + " eval-error: 0.04895104895104895\n", + " eval-logloss: 0.6752762102580571\n", + " experiment_id: a3645fc2d43145d88a1f5b7cc94df703\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 1\n", + " node_ip: 127.0.0.1\n", + " pid: 54441\n", + " time_since_restore: 0.027367830276489258\n", + " time_this_iter_s: 0.027367830276489258\n", + " time_total_s: 0.027367830276489258\n", + " timestamp: 1658505361\n", + " timesteps_since_restore: 0\n", + " training_iteration: 1\n", + " trial_id: c28a3_00002\n", + " warmup_time: 0.0062830448150634766\n", + " \n", + "Result for train_breast_cancer_c28a3_00001:\n", + " date: 2022-07-22_16-56-01\n", + " done: false\n", + " eval-error: 0.07692307692307693\n", + " eval-logloss: 0.6698804135089154\n", + " experiment_id: 85766fe4d9fa482a91e396a8fd509a19\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 1\n", + " node_ip: 127.0.0.1\n", + " pid: 54440\n", + " time_since_restore: 0.017169952392578125\n", + " time_this_iter_s: 0.017169952392578125\n", + " time_total_s: 0.017169952392578125\n", + " timestamp: 1658505361\n", + " timesteps_since_restore: 0\n", + " training_iteration: 1\n", + " trial_id: c28a3_00001\n", + " warmup_time: 0.006204843521118164\n", + " \n", + "Result for train_breast_cancer_c28a3_00008:\n", + " date: 2022-07-22_16-56-01\n", + " done: true\n", + " eval-error: 0.05594405594405594\n", + " eval-logloss: 0.692784742458717\n", + " experiment_id: 2c7d8bc38ad04536b1dec76819a2b3bf\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 1\n", + " node_ip: 127.0.0.1\n", + " pid: 54447\n", + " time_since_restore: 0.01937389373779297\n", + " time_this_iter_s: 0.01937389373779297\n", + " time_total_s: 0.01937389373779297\n", + " timestamp: 1658505361\n", + " timesteps_since_restore: 0\n", + " training_iteration: 1\n", + " trial_id: c28a3_00008\n", + " warmup_time: 0.004342079162597656\n", + " \n", + "Result for train_breast_cancer_c28a3_00001:\n", + " date: 2022-07-22_16-56-01\n", + " done: true\n", + " eval-error: 0.06293706293706294\n", + " eval-logloss: 0.6503534216980834\n", + " experiment_id: 85766fe4d9fa482a91e396a8fd509a19\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 2\n", + " node_ip: 127.0.0.1\n", + " pid: 54440\n", + " time_since_restore: 0.13567376136779785\n", + " time_this_iter_s: 0.11850380897521973\n", + " time_total_s: 0.13567376136779785\n", + " timestamp: 1658505361\n", + " timesteps_since_restore: 0\n", + " training_iteration: 2\n", + " trial_id: c28a3_00001\n", + " warmup_time: 0.006204843521118164\n", + " \n", + "Result for train_breast_cancer_c28a3_00004:\n", + " date: 2022-07-22_16-56-01\n", + " done: true\n", + " eval-error: 0.06993006993006994\n", + " eval-logloss: 0.689577207281873\n", + " experiment_id: ef4fdc645c444112985b4957ab8a84e9\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 1\n", + " node_ip: 127.0.0.1\n", + " pid: 54443\n", + " time_since_restore: 0.027021169662475586\n", + " time_this_iter_s: 0.027021169662475586\n", + " time_total_s: 0.027021169662475586\n", + " timestamp: 1658505361\n", + " timesteps_since_restore: 0\n", + " training_iteration: 1\n", + " trial_id: c28a3_00004\n", + " warmup_time: 0.0063669681549072266\n", + " \n", + "Result for train_breast_cancer_c28a3_00002:\n", + " date: 2022-07-22_16-56-01\n", + " done: true\n", + " eval-error: 0.04195804195804196\n", + " eval-logloss: 0.658545415301423\n", + " experiment_id: a3645fc2d43145d88a1f5b7cc94df703\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 2\n", + " node_ip: 127.0.0.1\n", + " pid: 54441\n", + " time_since_restore: 0.17307591438293457\n", + " time_this_iter_s: 0.1457080841064453\n", + " time_total_s: 0.17307591438293457\n", + " timestamp: 1658505361\n", + " timesteps_since_restore: 0\n", + " training_iteration: 2\n", + " trial_id: c28a3_00002\n", + " warmup_time: 0.0062830448150634766\n", + " \n", + "Result for train_breast_cancer_c28a3_00003:\n", + " date: 2022-07-22_16-56-01\n", + " done: true\n", + " eval-error: 0.04195804195804196\n", + " eval-logloss: 0.38665050018083796\n", + " experiment_id: 7ff6133237404b4ea4755b9f8cd114f2\n", + " hostname: Kais-MacBook-Pro.local\n", + " iterations_since_restore: 10\n", + " node_ip: 127.0.0.1\n", + " pid: 54442\n", + " time_since_restore: 0.28186488151550293\n", + " time_this_iter_s: 0.03063178062438965\n", + " time_total_s: 0.28186488151550293\n", + " timestamp: 1658505361\n", + " timesteps_since_restore: 0\n", + " training_iteration: 10\n", + " trial_id: c28a3_00003\n", + " warmup_time: 0.006722211837768555\n", + " \n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2022-07-22 16:56:01,498\tINFO tune.py:738 -- Total run time: 10.53 seconds (10.37 seconds for the tuning loop).\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Best model parameters: {'objective': 'binary:logistic', 'eval_metric': ['logloss', 'error'], 'max_depth': 2, 'min_child_weight': 3, 'subsample': 0.782626252548841, 'eta': 0.06385952388342125}\n", + "Best model total accuracy: 0.9580\n" + ] + } + ], + "source": [ + "import sklearn.datasets\n", + "import sklearn.metrics\n", + "import os\n", + "from ray.tune.schedulers import ASHAScheduler\n", + "from sklearn.model_selection import train_test_split\n", + "import xgboost as xgb\n", + "\n", + "from ray import air, tune\n", + "from ray.air import session\n", + "from ray.tune.integration.xgboost import TuneReportCheckpointCallback\n", + "\n", + "\n", + "def train_breast_cancer(config: dict):\n", + " # This is a simple training function to be passed into Tune\n", + " # Load dataset\n", + " data, labels = sklearn.datasets.load_breast_cancer(return_X_y=True)\n", + " # Split into train and test set\n", + " train_x, test_x, train_y, test_y = train_test_split(data, labels, test_size=0.25)\n", + " # Build input matrices for XGBoost\n", + " train_set = xgb.DMatrix(train_x, label=train_y)\n", + " test_set = xgb.DMatrix(test_x, label=test_y)\n", + " # Train the classifier, using the Tune callback\n", + " xgb.train(\n", + " config,\n", + " train_set,\n", + " evals=[(test_set, \"eval\")],\n", + " verbose_eval=False,\n", + " callbacks=[TuneReportCheckpointCallback(filename=\"model.xgb\")],\n", + " )\n", + "\n", + "\n", + "def get_best_model_checkpoint(results):\n", + " best_bst = xgb.Booster()\n", + " best_result = results.get_best_result()\n", + "\n", + " with best_result.checkpoint.as_directory() as best_checkpoint_dir:\n", + " best_bst.load_model(os.path.join(best_checkpoint_dir, \"model.xgb\"))\n", + " accuracy = 1.0 - best_result.metrics[\"eval-error\"]\n", + " print(f\"Best model parameters: {best_result.config}\")\n", + " print(f\"Best model total accuracy: {accuracy:.4f}\")\n", + " return best_bst\n", + "\n", + "\n", + "def tune_xgboost(smoke_test=False):\n", + " search_space = {\n", + " # You can mix constants with search space objects.\n", + " \"objective\": \"binary:logistic\",\n", + " \"eval_metric\": [\"logloss\", \"error\"],\n", + " \"max_depth\": tune.randint(1, 9),\n", + " \"min_child_weight\": tune.choice([1, 2, 3]),\n", + " \"subsample\": tune.uniform(0.5, 1.0),\n", + " \"eta\": tune.loguniform(1e-4, 1e-1),\n", + " }\n", + " # This will enable aggressive early stopping of bad trials.\n", + " scheduler = ASHAScheduler(\n", + " max_t=10, grace_period=1, reduction_factor=2 # 10 training iterations\n", + " )\n", + "\n", + " tuner = tune.Tuner(\n", + " train_breast_cancer,\n", + " tune_config=tune.TuneConfig(\n", + " metric=\"eval-logloss\",\n", + " mode=\"min\",\n", + " scheduler=scheduler,\n", + " num_samples=1 if smoke_test else 10,\n", + " ),\n", + " param_space=search_space,\n", + " )\n", + " results = tuner.fit()\n", + "\n", + " return results\n", + "\n", + "\n", + "if __name__ == \"__main__\":\n", + " import argparse\n", + "\n", + " parser = argparse.ArgumentParser()\n", + " parser.add_argument(\n", + " \"--smoke-test\", action=\"store_true\", help=\"Finish quickly for testing\"\n", + " )\n", + " args, _ = parser.parse_known_args()\n", + "\n", + " results = tune_xgboost(smoke_test=args.smoke_test)\n", + "\n", + " # Load the best model checkpoint.\n", + " best_bst = get_best_model_checkpoint(results)\n", + "\n", + " # You could now do further predictions with\n", + " # best_bst.predict(...)\n" + ] + }, + { + "cell_type": "markdown", + "id": "20732fe4", + "metadata": {}, + "source": [ + "The output of our run could look like this:\n", + "\n", + "```{code-block} bash\n", + ":emphasize-lines: 7\n", + "\n", + " Number of trials: 10/10 (10 TERMINATED)\n", + " +---------------------------------+------------+-------+-------------+-------------+--------------------+-------------+--------+------------------+----------------+--------------+\n", + " | Trial name | status | loc | eta | max_depth | min_child_weight | subsample | iter | total time (s) | eval-logloss | eval-error |\n", + " |---------------------------------+------------+-------+-------------+-------------+--------------------+-------------+--------+------------------+----------------+--------------|\n", + " | train_breast_cancer_ba275_00000 | TERMINATED | | 0.00205087 | 2 | 1 | 0.898391 | 10 | 0.380619 | 0.678039 | 0.090909 |\n", + " | train_breast_cancer_ba275_00001 | TERMINATED | | 0.000183834 | 4 | 3 | 0.924939 | 1 | 0.0228798 | 0.693009 | 0.111888 |\n", + " | train_breast_cancer_ba275_00002 | TERMINATED | | 0.0242721 | 7 | 2 | 0.501551 | 10 | 0.376154 | 0.54472 | 0.06993 |\n", + " | train_breast_cancer_ba275_00003 | TERMINATED | | 0.000449692 | 5 | 3 | 0.890212 | 1 | 0.0234981 | 0.692811 | 0.090909 |\n", + " | train_breast_cancer_ba275_00004 | TERMINATED | | 0.000376393 | 7 | 2 | 0.883609 | 1 | 0.0231569 | 0.692847 | 0.062937 |\n", + " | train_breast_cancer_ba275_00005 | TERMINATED | | 0.00231942 | 3 | 3 | 0.877464 | 2 | 0.104867 | 0.689541 | 0.083916 |\n", + " | train_breast_cancer_ba275_00006 | TERMINATED | | 0.000542326 | 1 | 2 | 0.578584 | 1 | 0.0213971 | 0.692765 | 0.083916 |\n", + " | train_breast_cancer_ba275_00007 | TERMINATED | | 0.0016801 | 1 | 2 | 0.975302 | 1 | 0.02226 | 0.691999 | 0.083916 |\n", + " | train_breast_cancer_ba275_00008 | TERMINATED | | 0.000595756 | 8 | 3 | 0.58429 | 1 | 0.0221152 | 0.692657 | 0.06993 |\n", + " | train_breast_cancer_ba275_00009 | TERMINATED | | 0.000357845 | 8 | 1 | 0.637776 | 1 | 0.022635 | 0.692859 | 0.090909 |\n", + " +---------------------------------+------------+-------+-------------+-------------+--------------------+-------------+--------+------------------+----------------+--------------+\n", + "\n", + "\n", + " Best model parameters: {'objective': 'binary:logistic', 'eval_metric': ['logloss', 'error'], 'max_depth': 7, 'min_child_weight': 2, 'subsample': 0.5015513240240503, 'eta': 0.024272050872920895}\n", + " Best model total accuracy: 0.9301\n", + "```\n", + "\n", + "As you can see, most trials have been stopped only after a few iterations. Only the\n", + "two most promising trials were run for the full 10 iterations.\n", + "\n", + "You can also ensure that all available resources are being used as the scheduler\n", + "terminates trials, freeing them up. This can be done through the\n", + "`ResourceChangingScheduler`. An example of this can be found here:\n", + "{doc}`/tune/examples/includes/xgboost_dynamic_resources_example`.\n", + "\n", + "## Using fractional GPUs\n", + "\n", + "You can often accelerate your training by using GPUs in addition to CPUs. However,\n", + "you usually don't have as many GPUs as you have trials to run. For instance, if you\n", + "run 10 Tune trials in parallel, you usually don't have access to 10 separate GPUs.\n", + "\n", + "Tune supports *fractional GPUs*. This means that each task is assigned a fraction\n", + "of the GPU memory for training. For 10 tasks, this could look like this:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7d1b20a3", + "metadata": {}, + "outputs": [], + "source": [ + "config = {\n", + " \"objective\": \"binary:logistic\",\n", + " \"eval_metric\": [\"logloss\", \"error\"],\n", + " \"tree_method\": \"gpu_hist\",\n", + " \"max_depth\": tune.randint(1, 9),\n", + " \"min_child_weight\": tune.choice([1, 2, 3]),\n", + " \"subsample\": tune.uniform(0.5, 1.0),\n", + " \"eta\": tune.loguniform(1e-4, 1e-1),\n", + "}\n", + "\n", + "tuner = tune.Tuner(\n", + " tune.with_resources(train_breast_cancer, resources={\"cpu\": 1, \"gpu\": 0.1}),\n", + " tune_config=tune.TuneConfig(\n", + " num_samples=10,\n", + " ),\n", + " param_space=config,\n", + ")\n", + "results = tuner.fit()\n" + ] + }, + { + "cell_type": "markdown", + "id": "ee131861", + "metadata": {}, + "source": [ + "Each task thus works with 10% of the available GPU memory. You also have to tell\n", + "XGBoost to use the `gpu_hist` tree method, so it knows it should use the GPU.\n", + "\n", + "## Conclusion\n", + "\n", + "You should now have a basic understanding on how to train XGBoost models and on how\n", + "to tune the hyperparameters to yield the best results. In our simple example,\n", + "Tuning the parameters didn't make a huge difference for the accuracy.\n", + "But in larger applications, intelligent hyperparameter tuning can make the\n", + "difference between a model that doesn't seem to learn at all, and a model\n", + "that outperforms all the other ones.\n", + "\n", + "## More XGBoost Examples\n", + "\n", + "- {doc}`/tune/examples/includes/xgboost_dynamic_resources_example`:\n", + " Trains a basic XGBoost model with Tune with the class-based API and a ResourceChangingScheduler, ensuring all resources are being used at all time.\n", + "\n", + "## Learn More\n", + "\n", + "- [XGBoost Hyperparameter Tuning - A Visual Guide](https://kevinvecmanis.io/machine%20learning/hyperparameter%20tuning/dataviz/python/2019/05/11/XGBoost-Tuning-Visual-Guide.html)\n", + "- [Notes on XGBoost Parameter Tuning](https://xgboost.readthedocs.io/en/latest/tutorials/param_tuning.html)\n", + "- [Doing XGBoost Hyperparameter Tuning the smart way](https://towardsdatascience.com/doing-xgboost-hyper-parameter-tuning-the-smart-way-part-1-of-2-f6d255a45dde)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "ray_dev_py38", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13 | packaged by conda-forge | (default, Mar 25 2022, 06:05:16) \n[Clang 12.0.1 ]" + }, + "orphan": true, + "vscode": { + "interpreter": { + "hash": "265d195fda5292fe8f69c6e37c435a5634a1ed3b6799724e66a975f68fa21517" + } + } + }, + "nbformat": 4, + "nbformat_minor": 5 } From c7b60d55f0f54d407876033cd41e89da9cf5fb9f Mon Sep 17 00:00:00 2001 From: Chen Shen Date: Wed, 3 May 2023 12:44:07 -0700 Subject: [PATCH 215/424] [autoscaler v2][1/n] introducing storage interface for instance provider (#34976) Why are these changes needed? this is the stack of PRs to introduce new node_provider for autoscaler v2. For overall design please refer to #34009 Stack of PRs #34976 <- this PR #34977 #34979 #34983 #34985 introduce a versioned storage interface for node_provider in autoscaler v2. Interface for a storage backend that stores the state of nodes in the cluster. The storage is versioned, which means that each successful stage change to the storage will bump the version number. The version number can be used to implement optimistic concurrency control. Each entry in the storage table is also versioned. The version number of an entry is the last version number when the entry is updated. The storage will be used to store autoscaler's scaling decisions (instance to be started), as well the instance state from the cloud provider. --- .buildkite/pipeline.build.yml | 2 + python/ray/autoscaler/v2/BUILD | 14 ++ .../autoscaler/v2/instance_manager/storage.py | 180 ++++++++++++++++++ .../ray/autoscaler/v2/tests/test_storage.py | 87 +++++++++ 4 files changed, 283 insertions(+) create mode 100644 python/ray/autoscaler/v2/BUILD create mode 100644 python/ray/autoscaler/v2/instance_manager/storage.py create mode 100644 python/ray/autoscaler/v2/tests/test_storage.py diff --git a/.buildkite/pipeline.build.yml b/.buildkite/pipeline.build.yml index cb227306cf39..80ba0820cb91 100644 --- a/.buildkite/pipeline.build.yml +++ b/.buildkite/pipeline.build.yml @@ -378,6 +378,8 @@ --test_env=DOCKER_CERT_PATH=/certs/client --test_env=DOCKER_TLS_CERTDIR=/certs -- python/ray/tests/... + - bazel test --config=ci $(./ci/run/bazel_export_options) + -- python/ray/autoscaler/v2/... - label: ":python: (Large)" conditions: ["RAY_CI_PYTHON_AFFECTED"] diff --git a/python/ray/autoscaler/v2/BUILD b/python/ray/autoscaler/v2/BUILD new file mode 100644 index 000000000000..bef608e8d148 --- /dev/null +++ b/python/ray/autoscaler/v2/BUILD @@ -0,0 +1,14 @@ +# -------------------------------------------------------------------- +# Tests from the python/ray/autoscaler/v2/tests directory. +# Covers all tests starting with `test_`. +# Please keep these sorted alphabetically. +# -------------------------------------------------------------------- +load("//bazel:python.bzl", "py_test_module_list") + +py_test( + name = "test_storage", + size = "small", + srcs = ["tests/test_storage.py"], + tags = ["team:core"], + deps = ["//:ray_lib",], +) \ No newline at end of file diff --git a/python/ray/autoscaler/v2/instance_manager/storage.py b/python/ray/autoscaler/v2/instance_manager/storage.py new file mode 100644 index 000000000000..40447bf79696 --- /dev/null +++ b/python/ray/autoscaler/v2/instance_manager/storage.py @@ -0,0 +1,180 @@ +import copy +from abc import ABCMeta, abstractmethod +from collections import defaultdict, namedtuple +from threading import Lock +from typing import Dict, List, Optional, Tuple + +StoreStatus = namedtuple("StoreStatus", ["success", "version"]) +VersionedValue = namedtuple("VersionedValue", ["value", "version"]) + + +class Storage(metaclass=ABCMeta): + """Interface for a storage backend that stores the state of nodes in the cluster. + + The storage is thread-safe. + + The storage is versioned, which means that each successful stage change to the + storage will bump the version number. The version number can be used to + implement optimistic concurrency control. + + Each entry in the storage table is also versioned. The version number of an entry + is the last version number when the entry is updated. + """ + + @abstractmethod + def batch_update( + self, + table: str, + mutation: Optional[Dict[str, str]] = None, + deletion: Optional[List[str]] = None, + expected_storage_version: Optional[int] = None, + ) -> StoreStatus: + """Batch update the storage table. This method is atomic. + + Args: + table: The name of the table. + mutation: A dictionary of key-value pairs to be updated. + deletion: A list of keys to be deleted. + expected_storage_version: The expected storage version. The + update will fail if the version does not match the + current storage version. + + Returns: + StoreStatus: A tuple of (success, version). If the update is + successful, returns (True, new_version). + Otherwise, returns (False, current_version). + """ + raise NotImplementedError("batch_update() has to be implemented") + + @abstractmethod + def update( + self, + table: str, + key: str, + value: str, + expected_entry_version: Optional[int] = None, + insert_only: bool = False, + ) -> StoreStatus: + """Update a single entry in the storage table. + + Args: + table: The name of the table. + key: The key of the entry. + value: The value of the entry. + expected_entry_version: The expected version of the entry. + The update will fail if the version does not match the current + version of the entry. + insert_only: If True, the update will + fail if the entry already exists. + Returns: + StoreStatus: A tuple of (success, version). If the update is + successful, returns (True, new_version). Otherwise, + returns (False, current_version). + """ + raise NotImplementedError("update() has to be implemented") + + @abstractmethod + def get_all(self, table: str) -> Tuple[Dict[str, Tuple[str, int]], int]: + raise NotImplementedError("get_all() has to be implemented") + + @abstractmethod + def get( + self, table: str, keys: List[str] + ) -> Tuple[Dict[str, Tuple[str, int]], int]: + """Get a list of entries from the storage table. + + Args: + table: The name of the table. + keys: A list of keys to be retrieved. If the list is empty, + all entries in the table will be returned. + + Returns: + Tuple[Dict[str, VersionedValue], int]: A tuple of + (entries, storage_version). The entries is a dictionary of + (key, (value, entry_version)) pairs. The entry_version is the + version of the entry when it was last updated. The + storage_version is the current storage version. + """ + raise NotImplementedError("get() has to be implemented") + + @abstractmethod + def get_version(self) -> int: + """Get the current storage version. + + Returns: + int: The current storage version. + """ + raise NotImplementedError("get_version() has to be implemented") + + +class InMemoryStorage(Storage): + """An in-memory implementation of the Storage interface. This implementation + is not durable""" + + def __init__(self): + self._version = 0 + self._tables = defaultdict(dict) + self._lock = Lock() + + def batch_update( + self, + table: str, + mutation: Dict[str, str] = None, + deletion: List[str] = None, + expected_version: Optional[int] = None, + ) -> StoreStatus: + mutation = mutation if mutation else {} + deletion = deletion if deletion else [] + with self._lock: + if expected_version is not None and expected_version != self._version: + return StoreStatus(False, self._version) + self._version += 1 + key_value_pairs_with_version = { + key: VersionedValue(value, self._version) + for key, value in mutation.items() + } + self._tables[table].update(key_value_pairs_with_version) + for deleted_key in deletion: + self._tables[table].pop(deleted_key, None) + return StoreStatus(True, self._version) + + def update( + self, + table: str, + key: str, + value: str, + expected_entry_version: Optional[int] = None, + expected_storage_version: Optional[int] = None, + insert_only: bool = False, + ) -> StoreStatus: + with self._lock: + if ( + expected_storage_version is not None + and expected_storage_version != self._version + ): + return StoreStatus(False, self._version) + if insert_only and key in self._tables[table]: + return StoreStatus(False, self._version) + _, version = self._tables[table].get(key, (None, -1)) + if expected_entry_version is not None and expected_entry_version != version: + return StoreStatus(False, self._version) + self._version += 1 + self._tables[table][key] = VersionedValue(value, self._version) + return StoreStatus(True, self._version) + + def get_all(self, table: str) -> Tuple[Dict[str, VersionedValue], int]: + with self._lock: + return (copy.deepcopy(self._tables[table]), self._version) + + def get(self, table: str, keys: List[str]) -> Tuple[Dict[str, VersionedValue], int]: + if not keys: + return self.get_all(table) + with self._lock: + result = {} + for key in keys: + if key in self._tables.get(table, {}): + result[key] = self._tables[table][key] + return StoreStatus(result, self._version) + + def get_version(self) -> int: + return self._version diff --git a/python/ray/autoscaler/v2/tests/test_storage.py b/python/ray/autoscaler/v2/tests/test_storage.py new file mode 100644 index 000000000000..92231c89402f --- /dev/null +++ b/python/ray/autoscaler/v2/tests/test_storage.py @@ -0,0 +1,87 @@ +# coding: utf-8 +import os +import sys + +import pytest # noqa + +from ray.autoscaler.v2.instance_manager.storage import ( + InMemoryStorage, + StoreStatus, + VersionedValue, +) + + +@pytest.mark.parametrize("storage", [InMemoryStorage()]) +def test_storage(storage): + assert storage.get_version() == 0 + assert storage.get_all(table="test_table") == ({}, 0) + assert storage.get(table="test_table", keys=[]) == ({}, 0) + assert storage.get(table="test_table", keys=["key1"]) == ({}, 0) + + assert storage.batch_update( + table="test_table", mutation={"key1": "value1"} + ) == StoreStatus( + True, + 1, + ) + + assert storage.get_version() == 1 + + assert storage.get_all(table="test_table") == ( + {"key1": VersionedValue("value1", 1)}, + 1, + ) + assert storage.get(table="test_table", keys=[]) == ( + {"key1": VersionedValue("value1", 1)}, + 1, + ) + + assert storage.batch_update( + table="test_table", mutation={"key1": "value2"}, expected_version=0 + ) == StoreStatus(False, 1) + + assert storage.batch_update( + table="test_table", mutation={"key1": "value2"}, expected_version=1 + ) == StoreStatus(True, 2) + + assert storage.get_all(table="test_table") == ( + {"key1": VersionedValue("value2", 2)}, + 2, + ) + + assert storage.batch_update( + table="test_table", + mutation={"key2": "value3", "key3": "value4"}, + deletion=["key1"], + expected_version=2, + ) == StoreStatus(True, 3) + + assert storage.get_all(table="test_table") == ( + {"key2": VersionedValue("value3", 3), "key3": VersionedValue("value4", 3)}, + 3, + ) + + assert storage.get(table="test_table", keys=["key2", "key1"]) == ( + {"key2": VersionedValue("value3", 3)}, + 3, + ) + + assert storage.update( + table="test_table", key="key2", value="value5" + ) == StoreStatus(True, 4) + assert storage.update( + table="test_table", key="key2", value="value5", insert_only=True + ) == StoreStatus(False, 4) + assert storage.update( + table="test_table", key="key2", value="value5", expected_entry_version=3 + ) == StoreStatus(False, 4) + assert storage.update( + table="test_table", key="key2", value="value6", expected_entry_version=4 + ) == StoreStatus(True, 5) + + +if __name__ == "__main__": + if os.environ.get("PARALLEL_CI"): + sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__])) + else: + sys.exit(pytest.main(["-sv", __file__])) From 639addfed348782f6f3c11b50bfa4ec6a5ff2946 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Wed, 3 May 2023 12:47:38 -0700 Subject: [PATCH 216/424] Update bazelisk version. (#34729) It has fixes for python toolchain setup. Signed-off-by: Lonnie Liu --- .bazeliskrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.bazeliskrc b/.bazeliskrc index f5f1a08e94b8..bd4a53557e49 100644 --- a/.bazeliskrc +++ b/.bazeliskrc @@ -1 +1 @@ -USE_BAZEL_VERSION=5.4.0 +USE_BAZEL_VERSION=5.4.1 From 5d1b3a6b34eb6ec80291a3d55a265d05234f127a Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Wed, 3 May 2023 12:53:36 -0700 Subject: [PATCH 217/424] [CI] Build only one wheel for ray containers (#34963) Signed-off-by: Lonnie Liu --- .buildkite/pipeline.arm64.yml | 16 +++---- .buildkite/pipeline.build.yml | 16 +++---- ci/ci.sh | 1 + python/build-wheel-manylinux2014.sh | 68 ++++++++++++++++------------- 4 files changed, 55 insertions(+), 46 deletions(-) diff --git a/.buildkite/pipeline.arm64.yml b/.buildkite/pipeline.arm64.yml index 6ac4232a5f79..61744b193471 100644 --- a/.buildkite/pipeline.arm64.yml +++ b/.buildkite/pipeline.arm64.yml @@ -60,7 +60,7 @@ conditions: ["RAY_CI_PYTHON_DEPENDENCIES_AFFECTED", "RAY_CI_DOCKER_AFFECTED", "RAY_CI_CORE_CPP_AFFECTED"] instance_size: arm64-medium commands: - - LINUX_WHEELS=1 ./ci/ci.sh build + - LINUX_WHEELS=1 BUILD_ONE_PYTHON_ONLY=3.7 ./ci/ci.sh build - pip install -q docker aws_requests_auth boto3 - ./ci/env/env_info.sh - if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then python .buildkite/copy_files.py --destination docker_login; fi @@ -70,7 +70,7 @@ conditions: ["RAY_CI_PYTHON_DEPENDENCIES_AFFECTED", "RAY_CI_DOCKER_AFFECTED", "RAY_CI_CORE_CPP_AFFECTED"] instance_size: arm64-medium commands: - - LINUX_WHEELS=1 ./ci/ci.sh build + - LINUX_WHEELS=1 BUILD_ONE_PYTHON_ONLY=3.7 ./ci/ci.sh build - pip install -q docker aws_requests_auth boto3 - ./ci/env/env_info.sh - if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then python .buildkite/copy_files.py --destination docker_login; fi @@ -80,7 +80,7 @@ conditions: ["RAY_CI_PYTHON_DEPENDENCIES_AFFECTED", "RAY_CI_DOCKER_AFFECTED", "RAY_CI_CORE_CPP_AFFECTED"] instance_size: arm64-medium commands: - - LINUX_WHEELS=1 ./ci/ci.sh build + - LINUX_WHEELS=1 BUILD_ONE_PYTHON_ONLY=3.8 ./ci/ci.sh build - pip install -q docker aws_requests_auth boto3 - ./ci/env/env_info.sh - if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then python .buildkite/copy_files.py --destination docker_login; fi @@ -90,7 +90,7 @@ conditions: ["RAY_CI_PYTHON_DEPENDENCIES_AFFECTED", "RAY_CI_DOCKER_AFFECTED", "RAY_CI_CORE_CPP_AFFECTED"] instance_size: arm64-medium commands: - - LINUX_WHEELS=1 ./ci/ci.sh build + - LINUX_WHEELS=1 BUILD_ONE_PYTHON_ONLY=3.8 ./ci/ci.sh build - pip install -q docker aws_requests_auth boto3 - ./ci/env/env_info.sh - if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then python .buildkite/copy_files.py --destination docker_login; fi @@ -100,7 +100,7 @@ conditions: ["RAY_CI_PYTHON_DEPENDENCIES_AFFECTED", "RAY_CI_DOCKER_AFFECTED", "RAY_CI_CORE_CPP_AFFECTED"] instance_size: arm64-medium commands: - - LINUX_WHEELS=1 ./ci/ci.sh build + - LINUX_WHEELS=1 BUILD_ONE_PYTHON_ONLY=3.9 ./ci/ci.sh build - pip install -q docker aws_requests_auth boto3 - ./ci/env/env_info.sh - if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then python .buildkite/copy_files.py --destination docker_login; fi @@ -110,7 +110,7 @@ conditions: ["RAY_CI_PYTHON_DEPENDENCIES_AFFECTED", "RAY_CI_DOCKER_AFFECTED", "RAY_CI_CORE_CPP_AFFECTED"] instance_size: arm64-medium commands: - - LINUX_WHEELS=1 ./ci/ci.sh build + - LINUX_WHEELS=1 BUILD_ONE_PYTHON_ONLY=3.9 ./ci/ci.sh build - pip install -q docker aws_requests_auth boto3 - ./ci/env/env_info.sh - if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then python .buildkite/copy_files.py --destination docker_login; fi @@ -120,7 +120,7 @@ conditions: ["RAY_CI_PYTHON_DEPENDENCIES_AFFECTED", "RAY_CI_DOCKER_AFFECTED", "RAY_CI_CORE_CPP_AFFECTED"] instance_size: arm64-medium commands: - - LINUX_WHEELS=1 ./ci/ci.sh build + - LINUX_WHEELS=1 BUILD_ONE_PYTHON_ONLY=3.10 ./ci/ci.sh build - pip install -q docker aws_requests_auth boto3 - ./ci/env/env_info.sh - if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then python .buildkite/copy_files.py --destination docker_login; fi @@ -130,7 +130,7 @@ conditions: ["RAY_CI_PYTHON_DEPENDENCIES_AFFECTED", "RAY_CI_DOCKER_AFFECTED", "RAY_CI_CORE_CPP_AFFECTED"] instance_size: arm64-medium commands: - - LINUX_WHEELS=1 ./ci/ci.sh build + - LINUX_WHEELS=1 BUILD_ONE_PYTHON_ONLY=3.10 ./ci/ci.sh build - pip install -q docker aws_requests_auth boto3 - if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then python .buildkite/copy_files.py --destination docker_login; fi - python ./ci/build/build-docker-images.py --py-versions py310 -T cu113 -T cu116 -T cu118 --build-type BUILDKITE --build-base --suffix aarch64 diff --git a/.buildkite/pipeline.build.yml b/.buildkite/pipeline.build.yml index 80ba0820cb91..fb10e7c3523c 100644 --- a/.buildkite/pipeline.build.yml +++ b/.buildkite/pipeline.build.yml @@ -88,7 +88,7 @@ conditions: ["RAY_CI_PYTHON_DEPENDENCIES_AFFECTED", "RAY_CI_DOCKER_AFFECTED", "RAY_CI_CORE_CPP_AFFECTED"] instance_size: medium commands: - - LINUX_WHEELS=1 ./ci/ci.sh build + - LINUX_WHEELS=1 BUILD_ONE_PYTHON_ONLY=3.7 ./ci/ci.sh build - pip install -q docker aws_requests_auth boto3 - ./ci/env/env_info.sh - if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then python .buildkite/copy_files.py --destination docker_login; fi @@ -98,7 +98,7 @@ conditions: ["RAY_CI_PYTHON_DEPENDENCIES_AFFECTED", "RAY_CI_DOCKER_AFFECTED", "RAY_CI_CORE_CPP_AFFECTED"] instance_size: medium commands: - - LINUX_WHEELS=1 ./ci/ci.sh build + - LINUX_WHEELS=1 BUILD_ONE_PYTHON_ONLY=3.7 ./ci/ci.sh build - pip install -q docker aws_requests_auth boto3 - ./ci/env/env_info.sh - if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then python .buildkite/copy_files.py --destination docker_login; fi @@ -108,7 +108,7 @@ conditions: ["RAY_CI_PYTHON_DEPENDENCIES_AFFECTED", "RAY_CI_DOCKER_AFFECTED", "RAY_CI_CORE_CPP_AFFECTED"] instance_size: medium commands: - - LINUX_WHEELS=1 ./ci/ci.sh build + - LINUX_WHEELS=1 BUILD_ONE_PYTHON_ONLY=3.8 ./ci/ci.sh build - pip install -q docker aws_requests_auth boto3 - ./ci/env/env_info.sh - if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then python .buildkite/copy_files.py --destination docker_login; fi @@ -118,7 +118,7 @@ conditions: ["RAY_CI_PYTHON_DEPENDENCIES_AFFECTED", "RAY_CI_DOCKER_AFFECTED", "RAY_CI_CORE_CPP_AFFECTED"] instance_size: medium commands: - - LINUX_WHEELS=1 ./ci/ci.sh build + - LINUX_WHEELS=1 BUILD_ONE_PYTHON_ONLY=3.8 ./ci/ci.sh build - pip install -q docker aws_requests_auth boto3 - ./ci/env/env_info.sh - if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then python .buildkite/copy_files.py --destination docker_login; fi @@ -128,7 +128,7 @@ conditions: ["RAY_CI_PYTHON_DEPENDENCIES_AFFECTED", "RAY_CI_DOCKER_AFFECTED", "RAY_CI_CORE_CPP_AFFECTED"] instance_size: medium commands: - - LINUX_WHEELS=1 ./ci/ci.sh build + - LINUX_WHEELS=1 BUILD_ONE_PYTHON_ONLY=3.9 ./ci/ci.sh build - pip install -q docker aws_requests_auth boto3 - ./ci/env/env_info.sh - if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then python .buildkite/copy_files.py --destination docker_login; fi @@ -138,7 +138,7 @@ conditions: ["RAY_CI_PYTHON_DEPENDENCIES_AFFECTED", "RAY_CI_DOCKER_AFFECTED", "RAY_CI_CORE_CPP_AFFECTED"] instance_size: medium commands: - - LINUX_WHEELS=1 ./ci/ci.sh build + - LINUX_WHEELS=1 BUILD_ONE_PYTHON_ONLY=3.9 ./ci/ci.sh build - pip install -q docker aws_requests_auth boto3 - ./ci/env/env_info.sh - if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then python .buildkite/copy_files.py --destination docker_login; fi @@ -148,7 +148,7 @@ conditions: ["RAY_CI_PYTHON_DEPENDENCIES_AFFECTED", "RAY_CI_DOCKER_AFFECTED", "RAY_CI_CORE_CPP_AFFECTED"] instance_size: medium commands: - - LINUX_WHEELS=1 ./ci/ci.sh build + - LINUX_WHEELS=1 BUILD_ONE_PYTHON_ONLY=3.10 ./ci/ci.sh build - pip install -q docker aws_requests_auth boto3 - ./ci/env/env_info.sh - if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then python .buildkite/copy_files.py --destination docker_login; fi @@ -158,7 +158,7 @@ conditions: ["RAY_CI_PYTHON_DEPENDENCIES_AFFECTED", "RAY_CI_DOCKER_AFFECTED", "RAY_CI_CORE_CPP_AFFECTED"] instance_size: medium commands: - - LINUX_WHEELS=1 ./ci/ci.sh build + - LINUX_WHEELS=1 BUILD_ONE_PYTHON_ONLY=3.10 ./ci/ci.sh build - pip install -q docker aws_requests_auth boto3 - if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then python .buildkite/copy_files.py --destination docker_login; fi - python ./ci/build/build-docker-images.py --py-versions py310 -T cu111 -T cu112 -T cu113 -T cu116 -T cu118 --build-type BUILDKITE --build-base diff --git a/ci/ci.sh b/ci/ci.sh index 5bf5ae3ec4d3..7b93708078b9 100755 --- a/ci/ci.sh +++ b/ci/ci.sh @@ -459,6 +459,7 @@ build_wheels() { -e "BUILDKITE_PULL_REQUEST=${BUILDKITE_PULL_REQUEST:-}" -e "BUILDKITE_BAZEL_CACHE_URL=${BUILDKITE_BAZEL_CACHE_URL:-}" -e "RAY_DEBUG_BUILD=${RAY_DEBUG_BUILD:-}" + -e "BUILD_ONE_PYTHON_ONLY=${BUILD_ONE_PYTHON_ONLY:-}" ) IMAGE_NAME="quay.io/pypa/manylinux2014_${HOSTTYPE}" diff --git a/python/build-wheel-manylinux2014.sh b/python/build-wheel-manylinux2014.sh index df488e0d47f7..9e843fd2c406 100755 --- a/python/build-wheel-manylinux2014.sh +++ b/python/build-wheel-manylinux2014.sh @@ -12,33 +12,30 @@ EOF chmod +x /usr/bin/nproc NODE_VERSION="14" -PYTHONS=("cp37-cp37m" - "cp38-cp38" - "cp39-cp39" - "cp310-cp310" - "cp311-cp311") - -NUMPY_VERSIONS=("1.14.5" - "1.14.5" - "1.19.3" - "1.22.0" - "1.22.0") + +# Python version key, interpreter version code, numpy tuples. +PYTHON_NUMPYS=( + "3.7 cp37-cp37m 1.14.5" + "3.8 cp38-cp38 1.14.5" + "3.9 cp39-cp39 1.19.3" + "3.10 cp310-cp310 1.22.0" + "3.11 cp311-cp311 1.22.0" +) yum -y install unzip zip sudo yum -y install java-1.8.0-openjdk java-1.8.0-openjdk-devel xz yum -y install openssl -if [ "${HOSTTYPE-}" = "x86_64" ]; then +if [[ "${HOSTTYPE-}" == "x86_64" ]]; then yum install "libasan-4.8.5-44.el7.${HOSTTYPE}" -y yum install "libubsan-7.3.1-5.10.el7.${HOSTTYPE}" -y yum install "devtoolset-8-libasan-devel.${HOSTTYPE}" -y fi java -version -java_bin=$(readlink -f "$(command -v java)") -echo "java_bin path $java_bin" -java_home=${java_bin%jre/bin/java} -export JAVA_HOME="$java_home" +JAVA_BIN="$(readlink -f "$(command -v java)")" +echo "java_bin path ${JAVA_BIN}" +export JAVA_HOME="${JAVA_BIN%jre/bin/java}" /ray/ci/env/install-bazel.sh # Put bazel into the PATH if building Bazel from source @@ -63,10 +60,11 @@ nvm use "$NODE_VERSION" # Build the dashboard so its static assets can be included in the wheel. # TODO(mfitton): switch this back when deleting old dashboard code. -pushd python/ray/dashboard/client +( + cd python/ray/dashboard/client npm ci npm run build -popd +) set -x # Add the repo folder to the safe.dictory global variable to avoid the failure @@ -75,9 +73,16 @@ set -x git config --global --add safe.directory /ray mkdir -p .whl -for ((i=0; i<${#PYTHONS[@]}; ++i)); do - PYTHON=${PYTHONS[i]} - NUMPY_VERSION=${NUMPY_VERSIONS[i]} +for PYTHON_NUMPY in "${PYTHON_NUMPYS[@]}" ; do + PYTHON_VERSION_KEY="$(echo "${PYTHON_NUMPY}" | cut -d' ' -f1)" + if [[ "${BUILD_ONE_PYTHON_ONLY:-}" != "" && "${PYTHON_VERSION_KEY}" != "${BUILD_ONE_PYTHON_ONLY}" ]]; then + continue + fi + + PYTHON="$(echo "${PYTHON_NUMPY}" | cut -d' ' -f2)" + NUMPY_VERSION="$(echo "${PYTHON_NUMPY}" | cut -d' ' -f3)" + + echo "---- Build wheel for ${PYTHON}, numpy=${NUMPY_VERSION}" # The -f flag is passed twice to also run git clean in the arrow subdirectory. # The -d flag removes directories. The -x flag ignores the .gitignore file, @@ -85,12 +90,13 @@ for ((i=0; i<${#PYTHONS[@]}; ++i)); do # dashboard directory and jars directory. git clean -f -f -x -d -e .whl -e python/ray/dashboard/client -e dashboard/client -e python/ray/jars - pushd python + ( + cd python # Fix the numpy version because this will be the oldest numpy version we can # support. /opt/python/"${PYTHON}"/bin/pip install -q numpy=="${NUMPY_VERSION}" cython==0.29.32 # Set the commit SHA in __init__.py. - if [ -n "$TRAVIS_COMMIT" ]; then + if [[ -n "$TRAVIS_COMMIT" ]]; then sed -i.bak "s/{{RAY_COMMIT_SHA}}/$TRAVIS_COMMIT/g" ray/__init__.py && rm ray/__init__.py.bak else echo "TRAVIS_COMMIT variable not set - required to populated ray.__commit__." @@ -98,22 +104,24 @@ for ((i=0; i<${#PYTHONS[@]}; ++i)); do fi # build ray wheel - PATH=/opt/python/${PYTHON}/bin:/root/bazel-3.2.0/output:$PATH \ - /opt/python/"${PYTHON}"/bin/python setup.py bdist_wheel + PATH="/opt/python/${PYTHON}/bin:/root/bazel-3.2.0/output:$PATH" \ + "/opt/python/${PYTHON}/bin/python" setup.py bdist_wheel + # build ray-cpp wheel - PATH=/opt/python/${PYTHON}/bin:/root/bazel-3.2.0/output:$PATH \ - RAY_INSTALL_CPP=1 /opt/python/"${PYTHON}"/bin/python setup.py bdist_wheel + PATH="/opt/python/${PYTHON}/bin:/root/bazel-3.2.0/output:$PATH" \ + RAY_INSTALL_CPP=1 "/opt/python/${PYTHON}/bin/python" setup.py bdist_wheel + # In the future, run auditwheel here. mv dist/*.whl ../.whl/ - popd + ) done # Rename the wheels so that they can be uploaded to PyPI. TODO(rkn): This is a # hack, we should use auditwheel instead. for path in .whl/*.whl; do - if [ -f "${path}" ]; then + if [[ -f "${path}" ]]; then out="${path//-linux/-manylinux2014}" - if [ "$out" != "$path" ]; then + if [[ "$out" != "$path" ]]; then mv "${path}" "${out}" fi fi From 3fcb5081f861852e1c6232e780ed8fedfaad6a7f Mon Sep 17 00:00:00 2001 From: Chen Shen Date: Wed, 3 May 2023 13:16:34 -0700 Subject: [PATCH 218/424] [autoscaler v2][2/n] introduce instance_manager protobuf (#34977) Why are these changes needed? this is the stack of PRs to introduce new node_provider for autoscaler v2. Stack of PRs #34976 #34977 <- this PR #34979 #34983 #34985 This PR introduces the instance_manager interface that autoscaler used to requests for new nodes. InstanceManagerSerivce allows Autoscaler to get current launched/launching nodes belongs to this cluster. To do so it provides 3 APIs GetInstanceManagerState: Returns both launching and launched nodes belongs to this cluster. UpdateInstanceManagerState: Launching or killing nodes, conditioned on Instance manager's version id, or number of successfully applied adjustments. GetAvailableInstanceTypes: Get the list of available instance types. The InstanceManager is expected to be strongly consistent and durable (or fate share with the cluster at least) --- src/ray/protobuf/BUILD | 13 ++ .../protobuf/experimental/autoscaler.proto | 48 +----- .../experimental/instance_manager.proto | 151 ++++++++++++++++++ 3 files changed, 167 insertions(+), 45 deletions(-) create mode 100644 src/ray/protobuf/experimental/instance_manager.proto diff --git a/src/ray/protobuf/BUILD b/src/ray/protobuf/BUILD index 3ab65258b760..0bd29efab106 100644 --- a/src/ray/protobuf/BUILD +++ b/src/ray/protobuf/BUILD @@ -46,6 +46,16 @@ proto_library( ], ) +proto_library( + name = "instance_manager_proto", + srcs = ["experimental/instance_manager.proto"], +) + +python_grpc_compile( + name = "instance_manager_py_proto", + deps = [":instance_manager_proto"], +) + proto_library( name = "runtime_env_common_proto", srcs = ["runtime_env_common.proto"], @@ -351,6 +361,9 @@ cc_proto_library( proto_library( name = "autoscaler_proto", srcs = ["experimental/autoscaler.proto"], + deps = [ + ":instance_manager_proto", + ], ) python_grpc_compile( diff --git a/src/ray/protobuf/experimental/autoscaler.proto b/src/ray/protobuf/experimental/autoscaler.proto index 93bbf98f72df..6cd8c0f7fbd0 100644 --- a/src/ray/protobuf/experimental/autoscaler.proto +++ b/src/ray/protobuf/experimental/autoscaler.proto @@ -15,7 +15,9 @@ syntax = "proto3"; option cc_enable_arenas = true; -package ray.rpc; +package ray.autoscaler; + +import "src/ray/protobuf/experimental/instance_manager.proto"; // ============= Cluster Resources ==================== // @@ -151,50 +153,6 @@ message GetClusterResourceStateReply { repeated ClusterResourceConstraint cluster_resource_constraints = 6; } -message Instance { - enum InstanceStatus { - // The unspecified state - most likey it is queued. - INSTANCE_STATUS_UNSPECIFIED = 0; - // Instance is starting. The first state update received from the - // instance. - STARTING = 1; - // The instance is running - one of two states of a healthy instance. - RUNNING = 2; - // The instance is idle - one of two states of a healthy instance. - IDLE = 3; - // The instance is stopping - usually follows from the RUNNING, IDLE, - // PREEMPT_REQUEST or DRAIN_REQUEST state. - STOPPING = 4; - // The instance is stopped - follows from the STOPPING state. - STOPPED = 5; - // The instance is in a bad state - but it is still able to send updates. - FAILING = 6; - // The subscribe service moves instances to this state if they - // have been idle for too long. This allows the cluster manager to - // make a final decision on whether or not to commence a drain - // sequence for this instance. - DRAIN_CONFIRMATION_PENDING = 7; - // The instance should be drained, Ray should start draining process - // but could reject if failed to drain. - DRAIN_REQUEST = 8; - // The instance will be reempted by the instance manager, regardless - // of whether it is drainable or not. - PREEMPT_REQUEST = 9; - } - // an unique id for the instance that's generated by the - // instance manager. This may be optional if - // the instance hasn't be started yet. - string instance_id = 11; - // the status of the instance. - InstanceStatus status = 12; - // the node id of the instance. - string node_type = 13; - // The corresponding total resources on the node. - map total_resources = 14; - // timestamp of the last state changed. - int64 timestamp_since_last_state_change = 15; -} - message ReportAutoscalingStateRequest { int64 last_seen_cluster_resource_state_version = 1; // A monotonically increasing version identifies diff --git a/src/ray/protobuf/experimental/instance_manager.proto b/src/ray/protobuf/experimental/instance_manager.proto new file mode 100644 index 000000000000..7278899a3b63 --- /dev/null +++ b/src/ray/protobuf/experimental/instance_manager.proto @@ -0,0 +1,151 @@ +// Copyright 2017 The Ray Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; +option cc_enable_arenas = true; + +package ray.autoscaler; + +enum RayNodeKind { + UNKNOWN = 0; + HEAD = 1; + WORKER = 2; +} + +// A node type that's available for the cluster. +message InstanceType { + // the name of the instance type. e.g. "m4.large" + string type_name = 1; + RayNodeKind node_kind = 2; + // avaialble resources on the node. such as {"CPU": 4, "GPU": 1} + map resources = 3; +} + +message GetAvailableInstanceTypesRequest {} + +message GetAvailableInstanceTypesResponse { + repeated InstanceType instance_types = 1; + // number of instances that are available for starting. + // this can change if the cloud provider has a limit on + // number of instances that can be started. + int64 available_instances = 2; +} + +// Represents the state of a launched instance. +// An instance is considered launched as long as +// it has a unique instance_id associated with it. +// +// Note a launched instance may be DEAD. In this case, +// the state will be garbage collected after some timeout +// period (by default 30 minutes). +message Instance { + enum InstanceStatus { + // The unspecified state - most likey it is queued. + INSTANCE_STATUS_UNSPECIFIED = 0; + // Instance is starting. The first state update received from the + // instance. + STARTING = 1; + // The instance is running - one of two states of a healthy instance. + RUNNING = 2; + // The instance is idle - one of two states of a healthy instance. + IDLE = 3; + // The instance is stopping - usually follows from the RUNNING, IDLE, + // PREEMPT_REQUEST or DRAIN_REQUEST state. + STOPPING = 4; + // The instance is stopped - follows from the STOPPING state. + STOPPED = 5; + // The instance is in a bad state - but it is still able to send updates. + FAILING = 6; + // The subscribe service moves instances to this state if they + // have been idle for too long. This allows the cluster manager to + // make a final decision on whether or not to commence a drain + // sequence for this instance. + DRAIN_CONFIRMATION_PENDING = 7; + // The instance should be drained, Ray should start draining process + // but could reject if failed to drain. + DRAIN_REQUEST = 8; + // The instance will be preempted by the instance manager, regardless + // of whether it is drainable or not. + PREEMPT_REQUEST = 9; + // An optional state that can be used to indicate that the instance + // is allocated from cloud provider, but ray hasn't been installed yet. + INSTANCE_ALLOCATED = 10; + // An optional state that can be used to indicate that the instance + // is currently installing Ray. + INSTALLING_RAY = 11; + // An optional state that can be used to indicate that the instance + // failed to allocate from cloud provider. + ALLOCATION_FAILED = 12; + // Node is deleted. + GARAGE_COLLECTED = 13; + } + // an unique id for the instance that's generated by the + // instance manager. This may be optional if + // the instance hasn't be started yet. + string instance_id = 11; + // the status of the instance. + InstanceStatus status = 12; + // the node type of the instance. + string node_type = 13; + // The corresponding total resources on the node. + map total_resources = 14; + // timestamp of the last state changed. + int64 timestamp_since_last_state_change = 15; + // the external id of the instance that's generated by + // the cloud provider like AWS, GCP, etc. + // Note this id can be reused by different instances. + string cloud_instance_id = 16; + // internal ip address of the instance. + string internal_ip = 17; + // external ip address of the instance. + string external_ip = 18; + // the monotonically increasing version number of the instance. + int64 version = 19; +} + +message UpdateInstanceManagerStateRequest { + int64 expected_version = 1; + repeated InstanceType new_nodes_to_start = 2; + repeated string instance_ids_to_terminate = 3; +} + +message UpdateInstanceManagerStateReply { + bool success = 1; + string error_message = 2; + int64 version = 3; +} + +message InstanceManagerState { + // a monotonically increasing version number. + // the version number is incremented whenever + // the state is updated (either by successful adjusting request, + // or instance state change). + int64 version = 1; + repeated Instance instances = 2; +} + +message GetInstanceManagerStateRequest {} + +message GetInstanceManagerStateReply { + InstanceManagerState state = 1; +} + +service InstanceManagerService { + rpc GetInstanceManagerState(GetInstanceManagerStateRequest) + returns (GetInstanceManagerStateReply); + rpc UpdateInstanceManagerState(UpdateInstanceManagerStateRequest) + returns (UpdateInstanceManagerStateReply); + rpc GetAvailableInstanceTypes(GetAvailableInstanceTypesRequest) + returns (GetAvailableInstanceTypesResponse); +} From 47a0c6c9eaa6bed82806b0e33ab699c4d485fbfe Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Wed, 3 May 2023 14:42:39 -0700 Subject: [PATCH 219/424] [doc] [data] Fix a couple stray broken tests still (#35011) --- doc/BUILD | 3 ++- doc/source/data/doc_code/torch_image_batch_trained.py | 4 ++-- doc/source/ray-air/examples/gptj_batch_prediction.ipynb | 2 +- .../ray-air/examples/stablediffusion_batch_prediction.ipynb | 2 +- python/ray/data/datastream.py | 2 ++ 5 files changed, 8 insertions(+), 5 deletions(-) diff --git a/doc/BUILD b/doc/BUILD index 61958193ca1e..13f8beed62aa 100644 --- a/doc/BUILD +++ b/doc/BUILD @@ -223,7 +223,8 @@ py_test_run_all_subdirectory( include = ["source/data/doc_code/*.py"], exclude = [ "source/ray-air/doc_code/predictors.py", - "source/data/doc_code/loading_data_untested.py" + "source/data/doc_code/loading_data_untested.py", + "source/data/doc_code/torch_image_batch_trained.py" ], extra_srcs = [], tags = ["exclusive", "team:data"], diff --git a/doc/source/data/doc_code/torch_image_batch_trained.py b/doc/source/data/doc_code/torch_image_batch_trained.py index 00f06a72c509..5b5c42c7d0f8 100644 --- a/doc/source/data/doc_code/torch_image_batch_trained.py +++ b/doc/source/data/doc_code/torch_image_batch_trained.py @@ -2,11 +2,11 @@ # isort: skip_file # fmt: off -ray.init(num_gpus=4) - # __pt_load_start__ import ray +ray.init(num_gpus=4) + data_url = "s3://anonymous@air-example-data-2/1G-image-data-synthetic-raw" # <1> ds = ray.data.read_images(data_url).limit(1000) # <2> # __pt_load_end__ diff --git a/doc/source/ray-air/examples/gptj_batch_prediction.ipynb b/doc/source/ray-air/examples/gptj_batch_prediction.ipynb index d748f029b392..3b40100f5d0a 100644 --- a/doc/source/ray-air/examples/gptj_batch_prediction.ipynb +++ b/doc/source/ray-air/examples/gptj_batch_prediction.ipynb @@ -168,7 +168,7 @@ " batch_size=4,\n", " fn_constructor_kwargs=dict(model_id=model_id, revision=revision),\n", " batch_format=\"pandas\",\n", - " compute=\"actors\",\n", + " compute=ray.data.ActorPoolStrategy(),\n", " num_gpus=1,\n", " )\n", ")" diff --git a/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb b/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb index 444f02bebb70..387bdea5d80b 100644 --- a/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb +++ b/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb @@ -160,7 +160,7 @@ " PredictCallable,\n", " batch_size=1,\n", " fn_constructor_kwargs=dict(model_id=model_id),\n", - " compute=\"actors\",\n", + " compute=ray.data.ActorPoolStrategy(),\n", " batch_format=\"pandas\",\n", " num_gpus=1,\n", ")\n", diff --git a/python/ray/data/datastream.py b/python/ray/data/datastream.py index 44c8847b8b48..240781801942 100644 --- a/python/ray/data/datastream.py +++ b/python/ray/data/datastream.py @@ -4377,6 +4377,8 @@ def __setstate__(self, state): self._current_executor = None def __del__(self): + if sys.meta_path is None: + return if self._current_executor and ray is not None and ray.is_initialized(): self._current_executor.shutdown() From 07390f12ed64489086f2c3abefc56d5768218a19 Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Wed, 3 May 2023 23:14:12 +0100 Subject: [PATCH 220/424] [air] Fix `test_tune_torch_get_device_gpu` race condition (#35004) The `test_tune_torch_get_device_gpu` test is flaky. Recently, the flakiness has been increased after switching to the new execution backend (presumably because of speedups in experiment start). Due to the way the test is constructed, it keeps a Ray cluster alive. This then leads later tests in the same test suite to fail, as they try to re-initialize a Ray cluster. This PR fixes the underlying cause of the race condition and implements a mitigation. Signed-off-by: Kai Fricke --- python/ray/train/tests/test_torch_trainer.py | 25 ++++++++++++++------ python/ray/tune/execution/trial_runner.py | 5 +++- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/python/ray/train/tests/test_torch_trainer.py b/python/ray/train/tests/test_torch_trainer.py index cecbbd2c8baf..a2e2dc306f1a 100644 --- a/python/ray/train/tests/test_torch_trainer.py +++ b/python/ray/train/tests/test_torch_trainer.py @@ -1,4 +1,6 @@ import contextlib +import uuid + import pytest import time import torch @@ -11,7 +13,7 @@ from ray.train.batch_predictor import BatchPredictor from ray.train.constants import DISABLE_LAZY_CHECKPOINTING_ENV from ray.train.torch import TorchPredictor, TorchTrainer -from ray.air.config import ScalingConfig +from ray.air.config import RunConfig, ScalingConfig from ray.train.torch import TorchConfig from ray.train.trainer import TrainingFailedError import ray.train as train @@ -258,7 +260,6 @@ def test_tune_torch_get_device_gpu(num_gpus_per_worker): (for example when used with Tune). """ from ray.air.config import ScalingConfig - import time num_samples = 2 num_workers = 2 @@ -269,6 +270,7 @@ def test_tune_torch_get_device_gpu(num_gpus_per_worker): # Divide by two because of a 2 node cluster. gpus_per_node = total_gpus_required // 2 + exception = None # Use the same number of cpus per node as gpus per node. with ray_start_2_node_cluster( num_cpus_per_node=gpus_per_node, num_gpus_per_node=gpus_per_node @@ -290,12 +292,14 @@ def train_fn(): @ray.remote(num_cpus=0) class TrialActor: def __init__(self, warmup_steps): - # adding warmup_steps to the config - # to avoid the error of checkpoint name conflict - time.sleep(2 * warmup_steps) self.trainer = TorchTrainer( train_fn, torch_config=TorchConfig(backend="gloo"), + run_config=RunConfig( + # Use a unique name to avoid using the same + # experiment directory + name=f"test_tune_torch_get_device_gpu_{uuid.uuid4()}" + ), scaling_config=ScalingConfig( num_workers=num_workers, use_gpu=True, @@ -313,8 +317,15 @@ def __init__(self, warmup_steps): def run(self): return self.trainer.fit() - actors = [TrialActor.remote(1) for _ in range(num_samples)] - ray.get([actor.run.remote() for actor in actors]) + try: + actors = [TrialActor.remote(1) for _ in range(num_samples)] + ray.get([actor.run.remote() for actor in actors]) + except Exception as exc: + exception = exc + + # Raise exception after Ray cluster has been shutdown to avoid corrupted state + if exception: + raise exception def test_torch_auto_unwrap(ray_start_4_cpus): diff --git a/python/ray/tune/execution/trial_runner.py b/python/ray/tune/execution/trial_runner.py index c3cc20abe0e5..f912fdf6f0c6 100644 --- a/python/ray/tune/execution/trial_runner.py +++ b/python/ray/tune/execution/trial_runner.py @@ -1,3 +1,4 @@ +import uuid from typing import Any, Dict, List, Optional, Union, Tuple, Set from datetime import datetime @@ -364,7 +365,9 @@ def save_to_dir(self, experiment_dir: Optional[str] = None): }, } - tmp_file_name = os.path.join(experiment_dir, ".tmp_experiment_state") + tmp_file_name = os.path.join( + experiment_dir, f".tmp_experiment_state_{uuid.uuid4()}" + ) with open(tmp_file_name, "w") as f: json.dump(runner_state, f, indent=2, cls=TuneFunctionEncoder) From f2b2a52998bf98b76de4da0108d86cf5c8732df5 Mon Sep 17 00:00:00 2001 From: Chao Wang <125417081+chaowanggg@users.noreply.github.com> Date: Wed, 3 May 2023 17:54:20 -0700 Subject: [PATCH 221/424] [UI] Change text for job detail page (#35023) There is an inconsistency of the naming in job detail page. Add "table" to the name --- dashboard/client/src/pages/job/JobDetail.tsx | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/dashboard/client/src/pages/job/JobDetail.tsx b/dashboard/client/src/pages/job/JobDetail.tsx index 3e630c5e1c6f..1861158bd2e9 100644 --- a/dashboard/client/src/pages/job/JobDetail.tsx +++ b/dashboard/client/src/pages/job/JobDetail.tsx @@ -257,7 +257,7 @@ export const JobDetailChartsPage = () => { { setActorTableExpanded(!actorTableExpanded); @@ -274,7 +274,10 @@ export const JobDetailChartsPage = () => { - +

    \n", @@ -979,7 +981,7 @@ { "data": { "text/plain": [ - "HuggingFaceCheckpoint(local_path=/home/ray/ray_results/HuggingFaceTrainer_2023-03-06_16-35-29/HuggingFaceTrainer_f623d_00000_0_2023-03-06_16-35-30/checkpoint_000000)" + "TransformersCheckpoint(local_path=/home/ray/ray_results/TransformersTrainer_2023-03-06_16-35-29/TransformersTrainer_f623d_00000_0_2023-03-06_16-35-30/checkpoint_000000)" ] }, "execution_count": 18, @@ -998,13 +1000,13 @@ "source": [ "### Generate text from prompt\n", "\n", - "We can use the {class}`~ray.train.huggingface.huggingface_predictor.HuggingFacePredictor` to generate predictions from our fine-tuned model.\n", + "We can use the {class}`~ray.train.hf_transformers.huggingface_predictor.TransformersPredictor` to generate predictions from our fine-tuned model.\n", "\n", "```{tip}\n", "For large scale batch inference, consider configuring cloud checkpointing and then pass the cloud-backed {class}`~ray.air.checkpoint.Checkpoint` to {class}`~ray.train.batch_predictor.BatchPredictor`. More information [here](air-predictors).\n", "```\n", "\n", - "Because the {class}`~ray.train.huggingface.huggingface_predictor.HuggingFacePredictor` uses a 🤗 Transformers [`pipeline`](https://huggingface.co/docs/transformers/en/main_classes/pipelines) under the hood, we disable the tokenizer AIR Preprocessor we have used for training and let the `pipeline` to tokenize the data itself." + "Because the {class}`~ray.train.hf_transformers.huggingface_predictor.TransformersPredictor` uses a 🤗 Transformers [`pipeline`](https://huggingface.co/docs/transformers/en/main_classes/pipelines) under the hood, we disable the tokenizer AIR Preprocessor we have used for training and let the `pipeline` to tokenize the data itself." ] }, { @@ -1030,13 +1032,13 @@ "metadata": {}, "outputs": [], "source": [ - "from ray.train.huggingface import HuggingFacePredictor\n", + "from ray.train.hf_transformers import TransformersPredictor\n", "import pandas as pd\n", "\n", "prompts = pd.DataFrame([\"Romeo and Juliet\", \"Romeo\", \"Juliet\"], columns=[\"text\"])\n", "\n", "# Predict on the head node.\n", - "predictor = HuggingFacePredictor.from_checkpoint(\n", + "predictor = TransformersPredictor.from_checkpoint(\n", " checkpoint=checkpoint,\n", " task=\"text-generation\",\n", " torch_dtype=torch.float16 if use_gpu else None,\n", diff --git a/doc/source/ray-air/examples/huggingface_text_classification.ipynb b/doc/source/ray-air/examples/huggingface_text_classification.ipynb index b5db41eafc52..3f3e70612446 100644 --- a/doc/source/ray-air/examples/huggingface_text_classification.ipynb +++ b/doc/source/ray-air/examples/huggingface_text_classification.ipynb @@ -522,13 +522,13 @@ "\n", "We will not go into details about each specific component of the training (see the [original notebook](https://github.com/huggingface/notebooks/blob/6ca682955173cc9d36ffa431ddda505a048cbe80/examples/text_classification.ipynb) for that). The tokenizer is the same as we have used to encoded the dataset before.\n", "\n", - "The main difference when using the Ray AIR is that we need to create our 🤗 Transformers `Trainer` inside a function (`trainer_init_per_worker`) and return it. That function will be passed to the `HuggingFaceTrainer` and will run on every Ray worker. The training will then proceed by the means of PyTorch DDP.\n", + "The main difference when using the Ray AIR is that we need to create our 🤗 Transformers `Trainer` inside a function (`trainer_init_per_worker`) and return it. That function will be passed to the `TransformersTrainer` and will run on every Ray worker. The training will then proceed by the means of PyTorch DDP.\n", "\n", "Make sure that you initialize the model, metric, and tokenizer inside that function. Otherwise, you may run into serialization errors.\n", "\n", "Furthermore, `push_to_hub=True` is not yet supported. Ray will, however, checkpoint the model at every epoch, allowing you to push it to hub manually. We will do that after the training.\n", "\n", - "If you wish to use thrid party logging libraries, such as MLflow or Weights&Biases, do not set them in `TrainingArguments` (they will be automatically disabled) - instead, you should pass Ray AIR callbacks to `HuggingFaceTrainer`'s `run_config`. In this example, we will use MLflow." + "If you wish to use thrid party logging libraries, such as MLflow or Weights&Biases, do not set them in `TrainingArguments` (they will be automatically disabled) - instead, you should pass Ray AIR callbacks to `TransformersTrainer`'s `run_config`. In this example, we will use MLflow." ] }, { @@ -596,7 +596,7 @@ "id": "CdzABDVcIrJg" }, "source": [ - "With our `trainer_init_per_worker` complete, we can now instantiate the `HuggingFaceTrainer`. Aside from the function, we set the `scaling_config`, controlling the amount of workers and resources used, and the `datasets` we will use for training and evaluation.\n", + "With our `trainer_init_per_worker` complete, we can now instantiate the `TransformersTrainer`. Aside from the function, we set the `scaling_config`, controlling the amount of workers and resources used, and the `datasets` we will use for training and evaluation.\n", "\n", "We specify the `MLflowLoggerCallback` inside the `run_config`, and pass the preprocessor we have defined earlier as an argument. The preprocessor will be included with the returned `Checkpoint`, meaning it will also be applied during inference." ] @@ -609,11 +609,11 @@ }, "outputs": [], "source": [ - "from ray.train.huggingface import HuggingFaceTrainer\n", + "from ray.train.hf_transformers import TransformersTrainer\n", "from ray.air.config import RunConfig, ScalingConfig, CheckpointConfig\n", "from ray.air.integrations.mlflow import MLflowLoggerCallback\n", "\n", - "trainer = HuggingFaceTrainer(\n", + "trainer = TransformersTrainer(\n", " trainer_init_per_worker=trainer_init_per_worker,\n", " scaling_config=ScalingConfig(num_workers=num_workers, use_gpu=use_gpu),\n", " datasets={\n", @@ -656,12 +656,12 @@ { "data": { "text/html": [ - "== Status ==
    Current time: 2022-08-25 10:14:09 (running for 00:04:06.45)
    Memory usage on this node: 4.3/62.0 GiB
    Using FIFO scheduling algorithm.
    Resources requested: 0/208 CPUs, 0/16 GPUs, 0.0/574.34 GiB heap, 0.0/241.51 GiB objects (0.0/4.0 accelerator_type:T4)
    Result logdir: /home/ray/ray_results/HuggingFaceTrainer_2022-08-25_10-10-02
    Number of trials: 1/1 (1 TERMINATED)
    \n", + "== Status ==
    Current time: 2022-08-25 10:14:09 (running for 00:04:06.45)
    Memory usage on this node: 4.3/62.0 GiB
    Using FIFO scheduling algorithm.
    Resources requested: 0/208 CPUs, 0/16 GPUs, 0.0/574.34 GiB heap, 0.0/241.51 GiB objects (0.0/4.0 accelerator_type:T4)
    Result logdir: /home/ray/ray_results/TransformersTrainer_2022-08-25_10-10-02
    Number of trials: 1/1 (1 TERMINATED)
    \n", "\n", "\n", "\n", "\n", - "\n", + "\n", "\n", "
    Trial name status loc iter total time (s) loss learning_rate epoch
    HuggingFaceTrainer_c1ff5_00000TERMINATED172.31.90.137:947 2 200.2170.3886 0 2
    TransformersTrainer_c1ff5_00000TERMINATED172.31.90.137:947 2 200.2170.3886 0 2


    " ], @@ -823,7 +823,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Result for HuggingFaceTrainer_c1ff5_00000:\n", + "Result for TransformersTrainer_c1ff5_00000:\n", " _time_this_iter_s: 90.87123560905457\n", " _timestamp: 1661447540\n", " _training_iteration: 1\n", @@ -923,7 +923,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Result for HuggingFaceTrainer_c1ff5_00000:\n", + "Result for TransformersTrainer_c1ff5_00000:\n", " _time_this_iter_s: 96.96447467803955\n", " _timestamp: 1661447637\n", " _training_iteration: 2\n", @@ -957,7 +957,7 @@ " trial_id: c1ff5_00000\n", " warmup_time: 0.003858327865600586\n", " \n", - "Result for HuggingFaceTrainer_c1ff5_00000:\n", + "Result for TransformersTrainer_c1ff5_00000:\n", " _time_this_iter_s: 96.96447467803955\n", " _timestamp: 1661447637\n", " _training_iteration: 2\n", @@ -1029,7 +1029,7 @@ { "data": { "text/plain": [ - "Result(metrics={'loss': 0.3886, 'learning_rate': 0.0, 'epoch': 2.0, 'step': 1070, 'eval_loss': 0.6215357184410095, 'eval_matthews_correlation': 0.42957017514952434, 'eval_runtime': 0.9956, 'eval_samples_per_second': 273.204, 'eval_steps_per_second': 5.022, 'train_runtime': 174.4696, 'train_samples_per_second': 98.023, 'train_steps_per_second': 6.133, 'train_loss': 0.4661755713346963, '_timestamp': 1661447637, '_time_this_iter_s': 96.96447467803955, '_training_iteration': 2, 'should_checkpoint': True, 'done': True, 'trial_id': 'c1ff5_00000', 'experiment_tag': '0'}, error=None, log_dir=PosixPath('/home/ray/ray_results/HuggingFaceTrainer_2022-08-25_10-10-02/HuggingFaceTrainer_c1ff5_00000_0_2022-08-25_10-10-04'))" + "Result(metrics={'loss': 0.3886, 'learning_rate': 0.0, 'epoch': 2.0, 'step': 1070, 'eval_loss': 0.6215357184410095, 'eval_matthews_correlation': 0.42957017514952434, 'eval_runtime': 0.9956, 'eval_samples_per_second': 273.204, 'eval_steps_per_second': 5.022, 'train_runtime': 174.4696, 'train_samples_per_second': 98.023, 'train_steps_per_second': 6.133, 'train_loss': 0.4661755713346963, '_timestamp': 1661447637, '_time_this_iter_s': 96.96447467803955, '_training_iteration': 2, 'should_checkpoint': True, 'done': True, 'trial_id': 'c1ff5_00000', 'experiment_tag': '0'}, error=None, log_dir=PosixPath('/home/ray/ray_results/TransformersTrainer_2022-08-25_10-10-02/TransformersTrainer_c1ff5_00000_0_2022-08-25_10-10-04'))" ] }, "execution_count": 16, @@ -1052,7 +1052,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "If we would like to tune any hyperparameters of the model, we can do so by simply passing our `HuggingFaceTrainer` into a `Tuner` and defining the search space.\n", + "If we would like to tune any hyperparameters of the model, we can do so by simply passing our `TransformersTrainer` into a `Tuner` and defining the search space.\n", "\n", "We can also take advantage of the advanced search algorithms and schedulers provided by Ray Tune. In this example, we will use an `ASHAScheduler` to aggresively terminate underperforming trials." ] @@ -1099,15 +1099,15 @@ "data": { "text/html": [ "== Status ==
    Current time: 2022-08-25 10:20:13 (running for 00:06:01.75)
    Memory usage on this node: 4.4/62.0 GiB
    Using AsyncHyperBand: num_stopped=4\n", - "Bracket: Iter 4.000: -0.8064090609550476 | Iter 1.000: -0.6378736793994904
    Resources requested: 0/208 CPUs, 0/16 GPUs, 0.0/574.34 GiB heap, 0.0/241.51 GiB objects (0.0/4.0 accelerator_type:T4)
    Current best trial: 5654d_00001 with eval_loss=0.6492420434951782 and parameters={'trainer_init_config': {'learning_rate': 0.0002, 'epochs': 4}}
    Result logdir: /home/ray/ray_results/HuggingFaceTrainer_2022-08-25_10-14-11
    Number of trials: 4/4 (4 TERMINATED)
    \n", + "Bracket: Iter 4.000: -0.8064090609550476 | Iter 1.000: -0.6378736793994904
    Resources requested: 0/208 CPUs, 0/16 GPUs, 0.0/574.34 GiB heap, 0.0/241.51 GiB objects (0.0/4.0 accelerator_type:T4)
    Current best trial: 5654d_00001 with eval_loss=0.6492420434951782 and parameters={'trainer_init_config': {'learning_rate': 0.0002, 'epochs': 4}}
    Result logdir: /home/ray/ray_results/TransformersTrainer_2022-08-25_10-14-11
    Number of trials: 4/4 (4 TERMINATED)
    \n", "\n", "\n", "\n", "\n", - "\n", - "\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "\n", "\n", "
    Trial name status loc trainer_init_conf... iter total time (s) loss learning_rate epoch
    HuggingFaceTrainer_5654d_00000TERMINATED172.31.90.137:1729 2e-05 4 347.171 0.1958 0 4
    HuggingFaceTrainer_5654d_00001TERMINATED172.31.76.237:1805 0.0002 1 95.24920.6225 0.00015 1
    HuggingFaceTrainer_5654d_00002TERMINATED172.31.85.32:1322 0.002 1 93.76130.6463 0.0015 1
    HuggingFaceTrainer_5654d_00003TERMINATED172.31.85.193:1060 0.02 1 99.36770.926 0.015 1
    TransformersTrainer_5654d_00000TERMINATED172.31.90.137:1729 2e-05 4 347.171 0.1958 0 4
    TransformersTrainer_5654d_00001TERMINATED172.31.76.237:1805 0.0002 1 95.24920.6225 0.00015 1
    TransformersTrainer_5654d_00002TERMINATED172.31.85.32:1322 0.002 1 93.76130.6463 0.0015 1
    TransformersTrainer_5654d_00003TERMINATED172.31.85.193:1060 0.02 1 99.36770.926 0.015 1


    " ], @@ -1354,7 +1354,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Result for HuggingFaceTrainer_5654d_00000:\n", + "Result for TransformersTrainer_5654d_00000:\n", " _time_this_iter_s: 85.01727724075317\n", " _timestamp: 1661447753\n", " _training_iteration: 1\n", @@ -1419,7 +1419,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Result for HuggingFaceTrainer_5654d_00001:\n", + "Result for TransformersTrainer_5654d_00001:\n", " _time_this_iter_s: 84.79700112342834\n", " _timestamp: 1661447759\n", " _training_iteration: 1\n", @@ -1484,7 +1484,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Result for HuggingFaceTrainer_5654d_00002:\n", + "Result for TransformersTrainer_5654d_00002:\n", " _time_this_iter_s: 84.01720070838928\n", " _timestamp: 1661447764\n", " _training_iteration: 1\n", @@ -1549,7 +1549,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Result for HuggingFaceTrainer_5654d_00003:\n", + "Result for TransformersTrainer_5654d_00003:\n", " _time_this_iter_s: 89.4301290512085\n", " _timestamp: 1661447782\n", " _training_iteration: 1\n", @@ -1614,7 +1614,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Result for HuggingFaceTrainer_5654d_00000:\n", + "Result for TransformersTrainer_5654d_00000:\n", " _time_this_iter_s: 76.82565689086914\n", " _timestamp: 1661447830\n", " _training_iteration: 2\n", @@ -1679,7 +1679,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Result for HuggingFaceTrainer_5654d_00000:\n", + "Result for TransformersTrainer_5654d_00000:\n", " _time_this_iter_s: 76.47252488136292\n", " _timestamp: 1661447906\n", " _training_iteration: 3\n", @@ -1767,7 +1767,7 @@ "output_type": "stream", "text": [ "(RayTrainWorker pid=1789, ip=172.31.90.137) {'train_runtime': 329.1948, 'train_samples_per_second': 103.902, 'train_steps_per_second': 6.501, 'train_loss': 0.34860724689804506, 'epoch': 4.0}\n", - "Result for HuggingFaceTrainer_5654d_00000:\n", + "Result for TransformersTrainer_5654d_00000:\n", " _time_this_iter_s: 98.92064905166626\n", " _timestamp: 1661448005\n", " _training_iteration: 4\n", @@ -1894,7 +1894,7 @@ " 0.003661\n", " 4\n", " 0.00020\n", - " /home/ray/ray_results/HuggingFaceTrainer_2022-...\n", + " /home/ray/ray_results/TransformersTrainer_2022-...\n", " \n", " \n", " 3\n", @@ -1918,7 +1918,7 @@ " 0.004133\n", " 4\n", " 0.02000\n", - " /home/ray/ray_results/HuggingFaceTrainer_2022-...\n", + " /home/ray/ray_results/TransformersTrainer_2022-...\n", " \n", " \n", " 2\n", @@ -1942,7 +1942,7 @@ " 0.004533\n", " 4\n", " 0.00200\n", - " /home/ray/ray_results/HuggingFaceTrainer_2022-...\n", + " /home/ray/ray_results/TransformersTrainer_2022-...\n", " \n", " \n", " 0\n", @@ -1966,7 +1966,7 @@ " 0.003702\n", " 4\n", " 0.00002\n", - " /home/ray/ray_results/HuggingFaceTrainer_2022-...\n", + " /home/ray/ray_results/TransformersTrainer_2022-...\n", " \n", " \n", "\n", @@ -2005,10 +2005,10 @@ "0 4 0.00002 \n", "\n", " logdir \n", - "1 /home/ray/ray_results/HuggingFaceTrainer_2022-... \n", - "3 /home/ray/ray_results/HuggingFaceTrainer_2022-... \n", - "2 /home/ray/ray_results/HuggingFaceTrainer_2022-... \n", - "0 /home/ray/ray_results/HuggingFaceTrainer_2022-... \n", + "1 /home/ray/ray_results/TransformersTrainer_2022-... \n", + "3 /home/ray/ray_results/TransformersTrainer_2022-... \n", + "2 /home/ray/ray_results/TransformersTrainer_2022-... \n", + "0 /home/ray/ray_results/TransformersTrainer_2022-... \n", "\n", "[4 rows x 33 columns]" ] @@ -2044,7 +2044,7 @@ "id": "Tfoyu1q7hYbb" }, "source": [ - "You can now use the checkpoint to run prediction with `HuggingFacePredictor`, which wraps around [🤗 Pipelines](https://huggingface.co/docs/transformers/main_classes/pipelines). In order to distribute prediction, we use `BatchPredictor`. While this is not necessary for the very small example we are using (you could use `HuggingFacePredictor` directly), it will scale well to a large dataset." + "You can now use the checkpoint to run prediction with `TransformersPredictor`, which wraps around [🤗 Pipelines](https://huggingface.co/docs/transformers/main_classes/pipelines). In order to distribute prediction, we use `BatchPredictor`. While this is not necessary for the very small example we are using (you could use `TransformersPredictor` directly), it will scale well to a large dataset." ] }, { @@ -2096,13 +2096,13 @@ } ], "source": [ - "from ray.train.huggingface import HuggingFacePredictor\n", + "from ray.train.hf_transformers import TransformersPredictor\n", "from ray.train.batch_predictor import BatchPredictor\n", "import pandas as pd\n", "\n", "predictor = BatchPredictor.from_checkpoint(\n", " checkpoint=best_result.checkpoint,\n", - " predictor_cls=HuggingFacePredictor,\n", + " predictor_cls=TransformersPredictor,\n", " task=\"text-classification\",\n", " device=0 if use_gpu else -1, # -1 is CPU, otherwise device index\n", ")\n", @@ -2189,9 +2189,9 @@ }, "outputs": [], "source": [ - "from ray.train.huggingface import HuggingFaceCheckpoint\n", + "from ray.train.hf_transformers import TransformersCheckpoint\n", "\n", - "checkpoint = HuggingFaceCheckpoint.from_checkpoint(result.checkpoint)\n", + "checkpoint = TransformersCheckpoint.from_checkpoint(result.checkpoint)\n", "hf_trainer = checkpoint.get_model(model=AutoModelForSequenceClassification)" ] }, diff --git a/doc/source/ray-air/trainers.rst b/doc/source/ray-air/trainers.rst index 9022ed097ad4..d7a35637e20f 100644 --- a/doc/source/ray-air/trainers.rst +++ b/doc/source/ray-air/trainers.rst @@ -145,17 +145,17 @@ Other Trainers Hugging Face ~~~~~~~~~~~~ -Transformers -************ +TransformersTrainer +******************* -:class:`HuggingFaceTrainer ` further extends :class:`TorchTrainer `, built +:class:`TransformersTrainer ` further extends :class:`TorchTrainer `, built for interoperability with the HuggingFace Transformers library. Users are required to provide a ``trainer_init_per_worker`` function which returns a ``transformers.Trainer`` object. The ``trainer_init_per_worker`` function will have access to preprocessed train and evaluation datasets. -Upon calling `HuggingFaceTrainer.fit()`, multiple workers (ray actors) will be spawned, +Upon calling `TransformersTrainer.fit()`, multiple workers (ray actors) will be spawned, and each worker will create its own copy of a ``transformers.Trainer``. Each worker will then invoke ``transformers.Trainer.train()``, which will perform distributed @@ -169,20 +169,20 @@ training via Pytorch DDP. :start-after: __hf_trainer_start__ :end-before: __hf_trainer_end__ -Accelerate -********** +AccelerateTrainer +***************** -If you prefer a more fine-grained Hugging Face API than what Transformers provides, you can use :class:`AccelerateTrainer ` -to run training functions making use of Hugging Face Accelerate. Similarly to :class:`HuggingFaceTrainer `, :class:`AccelerateTrainer ` +If you prefer a more fine-grained Hugging Face API than what Transformers provides, you can use :class:`AccelerateTrainer ` +to run training functions making use of Hugging Face Accelerate. Similarly to :class:`TransformersTrainer `, :class:`AccelerateTrainer ` is also an extension of :class:`TorchTrainer `. -:class:`AccelerateTrainer ` allows you to pass an Accelerate configuration file generated with ``accelerate config`` to be applied on all training workers. +:class:`AccelerateTrainer ` allows you to pass an Accelerate configuration file generated with ``accelerate config`` to be applied on all training workers. This ensures that the worker environments are set up correctly for Accelerate, allowing you to take advantage of Accelerate APIs and integrations such as DeepSpeed and FSDP just as you would if you were running Accelerate without Ray. .. note:: ``AccelerateTrainer`` will override some settings set with ``accelerate config``, mainly related to - the topology and networking. See the :class:`AccelerateTrainer ` + the topology and networking. See the :class:`AccelerateTrainer ` API reference for more details. Aside from Accelerate support, the usage is identical to :class:`TorchTrainer `, meaning you define your own training function diff --git a/doc/source/train/api/api.rst b/doc/source/train/api/api.rst index 52e106c632f1..36bb7a31ca2c 100644 --- a/doc/source/train/api/api.rst +++ b/doc/source/train/api/api.rst @@ -150,15 +150,25 @@ LightGBM ~train.lightgbm.LightGBMCheckpoint -HuggingFace -~~~~~~~~~~~ +Hugging Face +~~~~~~~~~~~~ + +Transformers +************ + +.. autosummary:: + :toctree: doc/ + + ~train.hf_transformers.TransformersTrainer + ~train.hf_transformers.TransformersCheckpoint + +Accelerate +********** .. autosummary:: :toctree: doc/ - ~train.huggingface.HuggingFaceTrainer - ~train.huggingface.HuggingFaceCheckpoint - ~train.huggingface.accelerate.AccelerateTrainer + ~train.hf_accelerate.AccelerateTrainer Scikit-Learn ~~~~~~~~~~~~ @@ -219,7 +229,7 @@ Restoration API for Built-in Trainers .. autosummary:: - train.huggingface.HuggingFaceTrainer.restore + train.hf_transformers.TransformersTrainer.restore .. note:: diff --git a/doc/source/train/config_guide.rst b/doc/source/train/config_guide.rst index 3ccfd2fc0279..e419949ace34 100644 --- a/doc/source/train/config_guide.rst +++ b/doc/source/train/config_guide.rst @@ -86,7 +86,7 @@ are :ref:`not tunable `. :end-before: __checkpoint_config_end__ Trainers of certain frameworks including :class:`~ray.train.xgboost.XGBoostTrainer`, -:class:`~ray.train.lightgbm.LightGBMTrainer`, and :class:`~ray.train.huggingface.HuggingFaceTrainer` +:class:`~ray.train.lightgbm.LightGBMTrainer`, and :class:`~ray.train.hf_transformers.TransformersTrainer` implement checkpointing out of the box. For these trainers, checkpointing can be enabled by setting the checkpoint frequency within the :class:`~ray.air.CheckpointConfig`. diff --git a/doc/source/train/key-concepts.rst b/doc/source/train/key-concepts.rst index 3e99c4d048d6..509c5c46167a 100644 --- a/doc/source/train/key-concepts.rst +++ b/doc/source/train/key-concepts.rst @@ -63,7 +63,7 @@ There are three categories of built-in Trainers: Some trainers don't fit into the other two categories, such as: - - :class:`HuggingFaceTrainer ` for NLP + - :class:`TransformersTrainer ` for NLP - :class:`RLTrainer ` for reinforcement learning - :class:`SklearnTrainer ` for (non-distributed) training of sklearn models. diff --git a/doc/source/train/train.rst b/doc/source/train/train.rst index e60033f329ad..c2d7e1b11223 100644 --- a/doc/source/train/train.rst +++ b/doc/source/train/train.rst @@ -109,9 +109,9 @@ classes that ship out of the box with Train: * - :class:`SklearnTrainer ` - :class:`SklearnCheckpoint ` - :class:`SklearnPredictor ` - * - :class:`HuggingFaceTrainer ` - - :class:`HuggingFaceCheckpoint ` - - :class:`HuggingFacePredictor ` + * - :class:`TransformersTrainer ` + - :class:`TransformersCheckpoint ` + - :class:`TransformersPredictor ` * - :class:`RLTrainer ` - :class:`RLCheckpoint ` - :class:`RLPredictor ` diff --git a/python/ray/air/_internal/usage.py b/python/ray/air/_internal/usage.py index b248f82fb501..1d5fac1689cb 100644 --- a/python/ray/air/_internal/usage.py +++ b/python/ray/air/_internal/usage.py @@ -12,7 +12,7 @@ AIR_TRAINERS = { "AccelerateTrainer", "HorovodTrainer", - "HuggingFaceTrainer", + "TransformersTrainer", "LightGBMTrainer", "LightningTrainer", "MosaicTrainer", diff --git a/python/ray/train/BUILD b/python/ray/train/BUILD index 1d932ca912a8..2d51df828f8b 100644 --- a/python/ray/train/BUILD +++ b/python/ray/train/BUILD @@ -344,46 +344,6 @@ py_test( deps = [":train_lib"] ) -py_test( - name = "test_huggingface_checkpoint", - size = "small", - srcs = ["tests/test_huggingface_checkpoint.py"], - tags = ["team:ml", "exclusive", "ray_air"], - deps = [":train_lib", ":conftest"] -) - -py_test( - name = "test_huggingface_gpu", - size = "medium", - srcs = ["tests/test_huggingface_gpu.py"], - tags = ["team:ml", "exclusive", "gpu_only"], - deps = [":train_lib", ":conftest"] -) - -py_test( - name = "test_huggingface_predictor", - size = "medium", - srcs = ["tests/test_huggingface_predictor.py"], - tags = ["team:ml", "exclusive", "ray_air"], - deps = [":train_lib", ":conftest"] -) - -py_test( - name = "test_huggingface_trainer", - size = "large", - srcs = ["tests/test_huggingface_trainer.py"], - tags = ["team:ml", "exclusive", "ray_air"], - deps = [":train_lib"] -) - -py_test( - name = "test_huggingface_trainer_steps", - size = "large", - srcs = ["tests/test_huggingface_trainer_steps.py"], - tags = ["team:ml", "exclusive", "ray_air"], - deps = [":train_lib"] -) - py_test( name = "test_mosaic_trainer", size = "medium", @@ -568,6 +528,46 @@ py_test( deps = [":train_lib"] ) +py_test( + name = "test_transformers_checkpoint", + size = "small", + srcs = ["tests/test_transformers_checkpoint.py"], + tags = ["team:ml", "exclusive", "ray_air"], + deps = [":train_lib", ":conftest"] +) + +py_test( + name = "test_transformers_gpu", + size = "medium", + srcs = ["tests/test_transformers_gpu.py"], + tags = ["team:ml", "exclusive", "gpu_only"], + deps = [":train_lib", ":conftest"] +) + +py_test( + name = "test_transformers_predictor", + size = "medium", + srcs = ["tests/test_transformers_predictor.py"], + tags = ["team:ml", "exclusive", "ray_air"], + deps = [":train_lib", ":conftest"] +) + +py_test( + name = "test_transformers_trainer_steps", + size = "large", + srcs = ["tests/test_transformers_trainer_steps.py"], + tags = ["team:ml", "exclusive", "ray_air"], + deps = [":train_lib"] +) + +py_test( + name = "test_transformers_trainer", + size = "large", + srcs = ["tests/test_transformers_trainer.py"], + tags = ["team:ml", "exclusive", "ray_air"], + deps = [":train_lib"] +) + py_test( name = "test_tune", size = "large", diff --git a/python/ray/train/examples/huggingface/huggingface_basic_language_modeling_example.py b/python/ray/train/examples/huggingface/huggingface_basic_language_modeling_example.py index fa0817e68abc..3ab907253faa 100644 --- a/python/ray/train/examples/huggingface/huggingface_basic_language_modeling_example.py +++ b/python/ray/train/examples/huggingface/huggingface_basic_language_modeling_example.py @@ -20,7 +20,10 @@ import ray import ray.data from ray.train.batch_predictor import BatchPredictor -from ray.train.huggingface import HuggingFacePredictor, HuggingFaceTrainer +from ray.train.hf_transformers import ( + TransformersPredictor, + TransformersTrainer, +) from ray.air.config import ScalingConfig @@ -113,7 +116,7 @@ def train_function(train_dataset, eval_dataset=None, **config): ray_train = ray_train.limit(16) ray_validation = ray_validation.limit(8) - trainer = HuggingFaceTrainer( + trainer = TransformersTrainer( trainer_init_per_worker=train_function, scaling_config=ScalingConfig(num_workers=num_workers, use_gpu=use_gpu), datasets={"train": ray_train, "evaluation": ray_validation}, @@ -125,7 +128,7 @@ def train_function(train_dataset, eval_dataset=None, **config): prompt = ["My text: Complete me..."] predictor = BatchPredictor.from_checkpoint( results.checkpoint, - HuggingFacePredictor, + TransformersPredictor, task="text-generation", tokenizer=tokenizer, ) @@ -138,7 +141,7 @@ def train_function(train_dataset, eval_dataset=None, **config): if __name__ == "__main__": # Training settings parser = argparse.ArgumentParser( - description="Language modelling from scratch with HuggingFaceTrainer Example", + description="Language modelling from scratch with TransformersTrainer Example", formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) parser.add_argument( diff --git a/python/ray/train/examples/transformers/cluster.yaml b/python/ray/train/examples/transformers/cluster.yaml index 4559964fb035..72e8676e0198 100644 --- a/python/ray/train/examples/transformers/cluster.yaml +++ b/python/ray/train/examples/transformers/cluster.yaml @@ -51,7 +51,7 @@ setup_commands: - pip install ray[tune] - pip install -U https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-3.0.0.dev0-cp37-cp37m-manylinux2014_x86_64.whl - # Install HuggingFace + # Install Transformers - git clone https://github.com/huggingface/transformers || true - cd transformers && pip install -U . && diff --git a/python/ray/train/examples/transformers/transformers_example.py b/python/ray/train/examples/transformers/transformers_example.py index f0e24b24f88a..ba5718bacaa2 100644 --- a/python/ray/train/examples/transformers/transformers_example.py +++ b/python/ray/train/examples/transformers/transformers_example.py @@ -43,7 +43,7 @@ from transformers.utils.versions import require_version import ray -from ray.train.huggingface.accelerate import AccelerateTrainer +from ray.train.hf_accelerate import AccelerateTrainer from ray.air.config import ScalingConfig logger = logging.getLogger(__name__) diff --git a/python/ray/train/hf_accelerate/__init__.py b/python/ray/train/hf_accelerate/__init__.py new file mode 100644 index 000000000000..b84e46c0910f --- /dev/null +++ b/python/ray/train/hf_accelerate/__init__.py @@ -0,0 +1,5 @@ +from ray.train.hf_accelerate.accelerate_trainer import AccelerateTrainer + +__all__ = [ + "AccelerateTrainer", +] diff --git a/python/ray/train/huggingface/accelerate/_accelerate_utils.py b/python/ray/train/hf_accelerate/_accelerate_utils.py similarity index 100% rename from python/ray/train/huggingface/accelerate/_accelerate_utils.py rename to python/ray/train/hf_accelerate/_accelerate_utils.py diff --git a/python/ray/train/huggingface/accelerate/accelerate_trainer.py b/python/ray/train/hf_accelerate/accelerate_trainer.py similarity index 99% rename from python/ray/train/huggingface/accelerate/accelerate_trainer.py rename to python/ray/train/hf_accelerate/accelerate_trainer.py index aab3f5121dc9..f8ddf1b9ffdd 100644 --- a/python/ray/train/huggingface/accelerate/accelerate_trainer.py +++ b/python/ray/train/hf_accelerate/accelerate_trainer.py @@ -22,7 +22,7 @@ from ray.train.torch.config import _set_torch_distributed_env_vars try: - from ray.train.huggingface.accelerate._accelerate_utils import ( + from ray.train.hf_accelerate._accelerate_utils import ( launch_command, AccelerateDefaultNamespace, AccelerateConfigWrapper, @@ -122,7 +122,7 @@ def train_loop_per_worker(): import ray from ray.air import session, Checkpoint - from ray.train.huggingface.accelerate import AccelerateTrainer + from ray.train.hf_accelerate import AccelerateTrainer from ray.air.config import ScalingConfig from ray.air.config import RunConfig from ray.air.config import CheckpointConfig diff --git a/python/ray/train/hf_transformers/__init__.py b/python/ray/train/hf_transformers/__init__.py new file mode 100644 index 000000000000..9ecd347fe0ed --- /dev/null +++ b/python/ray/train/hf_transformers/__init__.py @@ -0,0 +1,15 @@ +from ray.train.hf_transformers.transformers_checkpoint import ( + TransformersCheckpoint, +) +from ray.train.hf_transformers.transformers_predictor import ( + TransformersPredictor, +) +from ray.train.hf_transformers.transformers_trainer import ( + TransformersTrainer, +) + +__all__ = [ + "TransformersCheckpoint", + "TransformersPredictor", + "TransformersTrainer", +] diff --git a/python/ray/train/huggingface/_huggingface_utils.py b/python/ray/train/hf_transformers/_transformers_utils.py similarity index 96% rename from python/ray/train/huggingface/_huggingface_utils.py rename to python/ray/train/hf_transformers/_transformers_utils.py index 838ed9f398bf..289037c04a47 100644 --- a/python/ray/train/huggingface/_huggingface_utils.py +++ b/python/ray/train/hf_transformers/_transformers_utils.py @@ -8,7 +8,9 @@ from ray.air import session from ray.data import DataIterator -from ray.train.huggingface.huggingface_checkpoint import HuggingFaceCheckpoint +from ray.train.hf_transformers.transformers_checkpoint import ( + TransformersCheckpoint, +) if TYPE_CHECKING: from torch.utils.data import IterableDataset @@ -157,8 +159,8 @@ def on_save(self, args, state, control, **kwargs): transformers.trainer.get_last_checkpoint(args.output_dir) ).absolute() if checkpoint_path: - # Use HuggingFaceCheckpoint here to avoid a warning in _TrainSession - self.delayed_report["checkpoint"] = HuggingFaceCheckpoint.from_directory( + # Use TransformersCheckpoint here to avoid a warning in _TrainSession + self.delayed_report["checkpoint"] = TransformersCheckpoint.from_directory( str(checkpoint_path) ) diff --git a/python/ray/train/hf_transformers/transformers_checkpoint.py b/python/ray/train/hf_transformers/transformers_checkpoint.py new file mode 100644 index 000000000000..3128797c4652 --- /dev/null +++ b/python/ray/train/hf_transformers/transformers_checkpoint.py @@ -0,0 +1,104 @@ +import os +from typing import TYPE_CHECKING, Type, Optional, Union + +import torch +import transformers +import transformers.modeling_utils +import transformers.trainer +import transformers.training_args +from transformers.trainer import TRAINING_ARGS_NAME, WEIGHTS_NAME + +from ray.air._internal.checkpointing import save_preprocessor_to_dir +from ray.air._internal.torch_utils import load_torch_model +from ray.air.checkpoint import Checkpoint +from ray.util.annotations import PublicAPI + +if TYPE_CHECKING: + from ray.data.preprocessor import Preprocessor + + +@PublicAPI(stability="alpha") +class TransformersCheckpoint(Checkpoint): + """A :py:class:`~ray.air.checkpoint.Checkpoint` with HuggingFace-specific + functionality. + + Use ``TransformersCheckpoint.from_model`` to create this type of checkpoint. + """ + + @classmethod + def from_model( + cls, + model: Union[transformers.modeling_utils.PreTrainedModel, torch.nn.Module], + tokenizer: Optional[transformers.PreTrainedTokenizer] = None, + *, + path: os.PathLike, + preprocessor: Optional["Preprocessor"] = None, + ) -> "TransformersCheckpoint": + """Create a :py:class:`~ray.air.checkpoint.Checkpoint` that stores a + HuggingFace model. + + Args: + model: The pretrained transformer or Torch model to store in the + checkpoint. + tokenizer: The Tokenizer to use in the Transformers pipeline for inference. + path: The directory where the checkpoint will be stored. + preprocessor: A fitted preprocessor to be applied before inference. + + Returns: + A :py:class:`TransformersCheckpoint` containing the specified model. + """ + if not isinstance(model, transformers.modeling_utils.PreTrainedModel): + state_dict = model.state_dict() + torch.save(state_dict, os.path.join(path, WEIGHTS_NAME)) + else: + model.save_pretrained(path) + + if tokenizer: + tokenizer.save_pretrained(path) + + if preprocessor: + save_preprocessor_to_dir(preprocessor, path) + + checkpoint = cls.from_directory(path) + + return checkpoint + + def get_model( + self, + model: Union[ + Type[transformers.modeling_utils.PreTrainedModel], torch.nn.Module + ], + **pretrained_model_kwargs, + ) -> Union[transformers.modeling_utils.PreTrainedModel, torch.nn.Module]: + """Retrieve the model stored in this checkpoint.""" + with self.as_directory() as checkpoint_path: + if isinstance(model, torch.nn.Module): + state_dict = torch.load( + os.path.join(checkpoint_path, WEIGHTS_NAME), map_location="cpu" + ) + model = load_torch_model(saved_model=state_dict, model_definition=model) + else: + model = model.from_pretrained( + checkpoint_path, **pretrained_model_kwargs + ) + return model + + def get_tokenizer( + self, + tokenizer: Type[transformers.PreTrainedTokenizer], + **kwargs, + ) -> Optional[transformers.PreTrainedTokenizer]: + """Create a tokenizer using the data stored in this checkpoint.""" + with self.as_directory() as checkpoint_path: + return tokenizer.from_pretrained(checkpoint_path, **kwargs) + + def get_training_arguments(self) -> transformers.training_args.TrainingArguments: + """Retrieve the training arguments stored in this checkpoint.""" + with self.as_directory() as checkpoint_path: + training_args_path = os.path.join(checkpoint_path, TRAINING_ARGS_NAME) + if os.path.exists(training_args_path): + with open(training_args_path, "rb") as f: + training_args = torch.load(f, map_location="cpu") + else: + training_args = None + return training_args diff --git a/python/ray/train/hf_transformers/transformers_predictor.py b/python/ray/train/hf_transformers/transformers_predictor.py new file mode 100644 index 000000000000..29e73273abb5 --- /dev/null +++ b/python/ray/train/hf_transformers/transformers_predictor.py @@ -0,0 +1,243 @@ +import logging +from typing import TYPE_CHECKING, List, Optional, Type, Union + +import pandas as pd +from transformers.pipelines import Pipeline +from transformers.pipelines import pipeline as pipeline_factory +from transformers.pipelines.table_question_answering import ( + TableQuestionAnsweringPipeline, +) + +from ray.air.checkpoint import Checkpoint +from ray.air.constants import TENSOR_COLUMN_NAME +from ray.air.data_batch_type import DataBatchType +from ray.train.predictor import Predictor +from ray.util import log_once +from ray.util.annotations import PublicAPI + +try: + import torch + + torch_get_gpus = torch.cuda.device_count +except ImportError: + + def torch_get_gpus(): + return 0 + + +try: + import tensorflow + + def tf_get_gpus(): + return len(tensorflow.config.list_physical_devices("GPU")) + +except ImportError: + + def tf_get_gpus(): + return 0 + + +if TYPE_CHECKING: + from ray.data.preprocessor import Preprocessor + +logger = logging.getLogger(__name__) + + +@PublicAPI(stability="alpha") +class TransformersPredictor(Predictor): + """A predictor for HuggingFace Transformers PyTorch models. + + This predictor uses Transformers Pipelines for inference. + + Args: + pipeline: The Transformers pipeline to use for inference. + preprocessor: A preprocessor used to transform data batches prior + to prediction. + use_gpu: If set, the model will be moved to GPU on instantiation and + prediction happens on GPU. + """ + + def __init__( + self, + pipeline: Optional[Pipeline] = None, + preprocessor: Optional["Preprocessor"] = None, + use_gpu: bool = False, + ): + self.pipeline = pipeline + self.use_gpu = use_gpu + + num_gpus = max(torch_get_gpus(), tf_get_gpus()) + if not use_gpu and num_gpus > 0 and log_once("hf_predictor_not_using_gpu"): + logger.warning( + "You have `use_gpu` as False but there are " + f"{num_gpus} GPUs detected on host where " + "prediction will only use CPU. Please consider explicitly " + "setting `TransformersPredictor(use_gpu=True)` or " + "`batch_predictor.predict(ds, num_gpus_per_worker=1)` to " + "enable GPU prediction. Ignore if you have set `device` or " + "`device_map` arguments in the `pipeline` manually." + ) + + super().__init__(preprocessor) + + def __repr__(self): + return ( + f"{self.__class__.__name__}(pipeline={self.pipeline!r}, " + f"preprocessor={self._preprocessor!r})" + ) + + @classmethod + def from_checkpoint( + cls, + checkpoint: Checkpoint, + *, + pipeline_cls: Optional[Type[Pipeline]] = None, + use_gpu: bool = False, + **pipeline_kwargs, + ) -> "TransformersPredictor": + """Instantiate the predictor from a Checkpoint. + + The checkpoint is expected to be a result of ``TransformersTrainer``. + + Note that the Transformers ``pipeline`` used internally expects to + recieve raw text. If you have any Preprocessors in Checkpoint + that tokenize the data, remove them by calling + ``Checkpoint.set_preprocessor(None)`` beforehand. + + Args: + checkpoint: The checkpoint to load the model, tokenizer and + preprocessor from. It is expected to be from the result of a + ``TransformersTrainer`` run. + pipeline_cls: A ``transformers.pipelines.Pipeline`` class to use. + If not specified, will use the ``pipeline`` abstraction + wrapper. + use_gpu: If set, the model will be moved to GPU on instantiation and + prediction happens on GPU. + **pipeline_kwargs: Any kwargs to pass to the pipeline + initialization. If ``pipeline`` is None, this must contain + the 'task' argument. Cannot contain 'model'. Can be used + to override the tokenizer with 'tokenizer'. If ``use_gpu`` is + True, 'device' will be set to 0 by default, unless 'device_map' is + passed. + """ + if not pipeline_cls and "task" not in pipeline_kwargs: + raise ValueError( + "If `pipeline_cls` is not specified, 'task' must be passed as a kwarg." + ) + if use_gpu and "device_map" not in pipeline_kwargs: + # default to using the GPU with the first index + pipeline_kwargs.setdefault("device", 0) + pipeline_cls = pipeline_cls or pipeline_factory + preprocessor = checkpoint.get_preprocessor() + with checkpoint.as_directory() as checkpoint_path: + # Tokenizer will be loaded automatically (no need to specify + # `tokenizer=checkpoint_path`) + pipeline = pipeline_cls(model=checkpoint_path, **pipeline_kwargs) + return cls( + pipeline=pipeline, + preprocessor=preprocessor, + use_gpu=use_gpu, + ) + + def _predict( + self, data: Union[list, pd.DataFrame], **pipeline_call_kwargs + ) -> pd.DataFrame: + ret = self.pipeline(data, **pipeline_call_kwargs) + # Remove unnecessary lists + try: + new_ret = [x[0] if isinstance(x, list) and len(x) == 1 else x for x in ret] + df = pd.DataFrame(new_ret) + except Exception: + # if we fail for any reason, just give up + df = pd.DataFrame(ret) + df.columns = [str(col) for col in df.columns] + return df + + @staticmethod + def _convert_data_for_pipeline( + data: pd.DataFrame, pipeline: Pipeline + ) -> Union[list, pd.DataFrame]: + """Convert the data into a format accepted by the pipeline. + + In most cases, this format is a list of strings.""" + # Special case where pd.DataFrame is allowed. + if isinstance(pipeline, TableQuestionAnsweringPipeline): + # TODO(team-ml): This may be a performance bottleneck. + return data + + # Otherwise, a list of columns as lists. + columns = [data[col].to_list() for col in data.columns] + # Flatten if it's only one column. + while isinstance(columns, list) and len(columns) == 1: + columns = columns[0] + return columns + + def predict( + self, + data: DataBatchType, + feature_columns: Optional[Union[List[str], List[int]]] = None, + **predict_kwargs, + ) -> DataBatchType: + """Run inference on data batch. + + The data is converted into a list (unless ``pipeline`` is a + ``TableQuestionAnsweringPipeline``) and passed to the ``pipeline`` + object. + + Args: + data: A batch of input data. Either a pandas DataFrame or numpy + array. + feature_columns: The names or indices of the columns in the + data to use as features to predict on. If None, use all + columns. + **pipeline_call_kwargs: additional kwargs to pass to the + ``pipeline`` object. + + Examples: + >>> import pandas as pd + >>> from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer + >>> from transformers.pipelines import pipeline + >>> from ray.train.hf_transformers import TransformersPredictor + >>> + >>> model_checkpoint = "gpt2" + >>> tokenizer_checkpoint = "sgugger/gpt2-like-tokenizer" + >>> tokenizer = AutoTokenizer.from_pretrained(tokenizer_checkpoint) + >>> + >>> model_config = AutoConfig.from_pretrained(model_checkpoint) + >>> model = AutoModelForCausalLM.from_config(model_config) + >>> predictor = TransformersPredictor( + ... pipeline=pipeline( + ... task="text-generation", model=model, tokenizer=tokenizer + ... ) + ... ) + >>> + >>> prompts = pd.DataFrame( + ... ["Complete me", "And me", "Please complete"], columns=["sentences"] + ... ) + >>> predictions = predictor.predict(prompts) + + + Returns: + Prediction result. + """ + return Predictor.predict( + self, data, feature_columns=feature_columns, **predict_kwargs + ) + + def _predict_pandas( + self, + data: "pd.DataFrame", + feature_columns: Optional[List[str]] = None, + **pipeline_call_kwargs, + ) -> "pd.DataFrame": + if TENSOR_COLUMN_NAME in data: + arr = data[TENSOR_COLUMN_NAME].to_numpy() + if feature_columns: + data = pd.DataFrame(arr[:, feature_columns]) + elif feature_columns: + data = data[feature_columns] + + data = data[feature_columns] if feature_columns else data + + data = self._convert_data_for_pipeline(data, self.pipeline) + return self._predict(data, **pipeline_call_kwargs) diff --git a/python/ray/train/hf_transformers/transformers_trainer.py b/python/ray/train/hf_transformers/transformers_trainer.py new file mode 100644 index 000000000000..2df919d625d5 --- /dev/null +++ b/python/ray/train/hf_transformers/transformers_trainer.py @@ -0,0 +1,469 @@ +import importlib.util +import inspect +import os +import sys +import warnings +from packaging.version import Version +from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Type + +import transformers +import transformers.modeling_utils +import transformers.trainer +import transformers.training_args +from transformers.trainer_utils import IntervalStrategy +from transformers.utils import is_datasets_available +from torch.utils.data import Dataset as TorchDataset + +from ray.air import session +from ray.air.checkpoint import Checkpoint +from ray.air.config import DatasetConfig, RunConfig, ScalingConfig +from ray.train.constants import ( + EVALUATION_DATASET_KEY, + TRAIN_DATASET_KEY, +) +from ray.train.data_parallel_trainer import DataParallelTrainer +from ray.train.hf_transformers._transformers_utils import ( + TrainReportCallback, + process_datasets, + wrap_transformers_trainer, +) +from ray.train.torch import TorchConfig, TorchTrainer +from ray.train.trainer import GenDataset +from ray.util import PublicAPI + +if TYPE_CHECKING: + from ray.data.preprocessor import Preprocessor + +# Due to HF Dataset's dynamic module system, we need to dynamically import the +# datasets_modules module on every actor when training. +# We accomplish this by simply running the following bit of code directly +# in module you are currently viewing. This ensures that when we +# unpickle the TransformersTrainer, it will be ran before pickle tries to +# import datasets_modules and prevents an exception from being thrown. +# Same logic is present inside HF Transformers Ray integration: +# https://github.com/huggingface/transformers/blob/\ +# 7d5fde991d598370d961be8cb7add6541e2b59ce/src/transformers/integrations.py#L271 +# Also see https://github.com/ray-project/ray/issues/28084 +if "datasets_modules" not in sys.modules and is_datasets_available(): + import datasets.load + + dynamic_modules_path = os.path.join( + datasets.load.init_dynamic_modules(), "__init__.py" + ) + # load dynamic_modules from path + spec = importlib.util.spec_from_file_location( + "datasets_modules", dynamic_modules_path + ) + datasets_modules = importlib.util.module_from_spec(spec) + sys.modules[spec.name] = datasets_modules + spec.loader.exec_module(datasets_modules) + + +TRAINER_INIT_FN_KEY = "_trainer_init_per_worker" + + +@PublicAPI(stability="alpha") +class TransformersTrainer(TorchTrainer): + """A Trainer for data parallel HuggingFace Transformers on PyTorch training. + + This Trainer runs the ``transformers.Trainer.train()`` method on multiple + Ray Actors. The training is carried out in a distributed fashion through PyTorch + DDP. These actors already have the necessary torch process group already + configured for distributed PyTorch training. If you have PyTorch >= 1.12.0 + installed, you can also run FSDP training by specifying the ``fsdp`` argument + in ``TrainingArguments``. DeepSpeed is + also supported - see :doc:`/ray-air/examples/gptj_deepspeed_fine_tuning`. + For more information on configuring FSDP or DeepSpeed, refer to `Hugging Face + documentation `__. + + The training function ran on every Actor will first run the + specified ``trainer_init_per_worker`` function to obtain an instantiated + ``transformers.Trainer`` object. The ``trainer_init_per_worker`` function + will have access to preprocessed train and evaluation datasets. + + If the ``datasets`` dict contains a training dataset (denoted by + the "train" key), then it will be split into multiple dataset + shards, with each Actor training on a single shard. + All the other datasets will not be split. + + Please note that if you use a custom ``transformers.Trainer`` subclass, + the ``get_train_dataloader`` method will be wrapped around to disable + sharding by ``transformers.IterableDatasetShard``, as the dataset will + already be sharded on the Ray AIR side. + + You can also provide ``datasets.Dataset`` object or other dataset objects + allowed by ``transformers.Trainer`` directly in the ``trainer_init_per_worker`` + function, without specifying the ``datasets`` dict. It is recommended to initialize + those objects inside the function, as otherwise they will be serialized and passed + to the function, which may lead to long runtime and memory issues with large + amounts of data. In this case, the training dataset will be split + automatically by Transformers. + + HuggingFace loggers will be automatically disabled, and the ``local_rank`` + argument in ``TrainingArguments`` will be automatically set. Please note + that if you want to use CPU training, you will need to set the ``no_cuda`` + argument in ``TrainingArguments`` manually - otherwise, an exception + (segfault) may be thrown. + + This Trainer requires ``transformers>=4.19.0`` package. + It is tested with ``transformers==4.19.1``. + + Example: + .. code-block:: python + + # Based on + # huggingface/notebooks/examples/language_modeling_from_scratch.ipynb + + # Hugging Face imports + from datasets import load_dataset + import transformers + from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer + + import ray + from ray.train.hf_transformers import TransformersTrainer + from ray.air.config import ScalingConfig + + # If using GPUs, set this to True. + use_gpu = False + + model_checkpoint = "gpt2" + tokenizer_checkpoint = "sgugger/gpt2-like-tokenizer" + block_size = 128 + + datasets = load_dataset("wikitext", "wikitext-2-raw-v1") + tokenizer = AutoTokenizer.from_pretrained(tokenizer_checkpoint) + + def tokenize_function(examples): + return tokenizer(examples["text"]) + + tokenized_datasets = datasets.map( + tokenize_function, batched=True, num_proc=1, remove_columns=["text"] + ) + + def group_texts(examples): + # Concatenate all texts. + concatenated_examples = { + k: sum(examples[k], []) for k in examples.keys() + } + total_length = len(concatenated_examples[list(examples.keys())[0]]) + # We drop the small remainder, we could add padding if the model + # supported it. + # instead of this drop, you can customize this part to your needs. + total_length = (total_length // block_size) * block_size + # Split by chunks of max_len. + result = { + k: [ + t[i : i + block_size] + for i in range(0, total_length, block_size) + ] + for k, t in concatenated_examples.items() + } + result["labels"] = result["input_ids"].copy() + return result + + lm_datasets = tokenized_datasets.map( + group_texts, + batched=True, + batch_size=1000, + num_proc=1, + ) + ray_train_ds = ray.data.from_huggingface(lm_datasets["train"]) + ray_evaluation_ds = ray.data.from_huggingface( + lm_datasets["validation"] + ) + + def trainer_init_per_worker(train_dataset, eval_dataset, **config): + model_config = AutoConfig.from_pretrained(model_checkpoint) + model = AutoModelForCausalLM.from_config(model_config) + args = transformers.TrainingArguments( + output_dir=f"{model_checkpoint}-wikitext2", + evaluation_strategy="epoch", + save_strategy="epoch", + logging_strategy="epoch", + learning_rate=2e-5, + weight_decay=0.01, + no_cuda=(not use_gpu), + ) + return transformers.Trainer( + model=model, + args=args, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + ) + + scaling_config = ScalingConfig(num_workers=3, use_gpu=use_gpu) + trainer = TransformersTrainer( + trainer_init_per_worker=trainer_init_per_worker, + scaling_config=scaling_config, + datasets={"train": ray_train_ds, "evaluation": ray_evaluation_ds}, + ) + result = trainer.fit() + + Args: + trainer_init_per_worker: The function that returns an instantiated + ``transformers.Trainer`` object and takes in the following arguments: + train ``Torch.Dataset``, optional evaluation ``Torch.Dataset`` + and config as kwargs. The Torch Datasets are automatically + created by converting the Ray Datasets internally before + they are passed into the function. + trainer_init_config: Configurations to pass into + ``trainer_init_per_worker`` as kwargs. + torch_config: Configuration for setting up the PyTorch backend. If set to + None, use the default configuration. This replaces the ``backend_config`` + arg of ``DataParallelTrainer``. Same as in ``TorchTrainer``. + scaling_config: Configuration for how to scale data parallel training. + dataset_config: Configuration for dataset ingest. + run_config: Configuration for the execution of the training run. + datasets: Any Ray Datasets to use for training. Use + the key "train" to denote which dataset is the training + dataset and key "evaluation" to denote the evaluation + dataset. Can only contain a training dataset + and up to one extra dataset to be used for evaluation. + If a ``preprocessor`` is provided and has not already been fit, + it will be fit on the training dataset. All datasets will be + transformed by the ``preprocessor`` if one is provided. + preprocessor: A ray.data.Preprocessor to preprocess the + provided datasets. + resume_from_checkpoint: A checkpoint to resume training from. + """ + + _dataset_config = { + # training dataset should be split by us + "train": DatasetConfig(fit=True, split=True), + # do not split eval dataset, as HF has a system to parallelize + # evaluation across workers, and it requires each worker + # to have the full eval dataset + "evaluation": DatasetConfig(split=False), + } + + def __init__( + self, + trainer_init_per_worker: Callable[ + [Optional[TorchDataset], Optional[TorchDataset], Any], + transformers.trainer.Trainer, + ], + *, + trainer_init_config: Optional[Dict] = None, + torch_config: Optional[TorchConfig] = None, + scaling_config: Optional[ScalingConfig] = None, + dataset_config: Optional[Dict[str, DatasetConfig]] = None, + run_config: Optional[RunConfig] = None, + datasets: Optional[Dict[str, GenDataset]] = None, + preprocessor: Optional["Preprocessor"] = None, + resume_from_checkpoint: Optional[Checkpoint] = None, + ): + + # Functionality required for TransformersTrainer only added in this + # version + if Version(transformers.__version__) < Version("4.19.0"): + raise RuntimeError( + "TransformersTrainer requires transformers>=4.19.0, but you " + f"have {transformers.__version__} which is incompatible. " + "Update on all nodes with `pip install -U 'transformers>=4.19.0'`." + ) + + self._validate_trainer_init_per_worker( + trainer_init_per_worker, "trainer_init_per_worker" + ) + + super().__init__( + train_loop_per_worker=_huggingface_train_loop_per_worker, + train_loop_config=self._create_trainer_init_config( + trainer_init_per_worker, trainer_init_config + ), + torch_config=torch_config, + scaling_config=scaling_config, + dataset_config=dataset_config, + run_config=run_config, + datasets=datasets, + preprocessor=preprocessor, + resume_from_checkpoint=resume_from_checkpoint, + ) + + @classmethod + def _create_trainer_init_config( + cls, + trainer_init_per_worker: Callable[ + [TorchDataset, Optional[TorchDataset], Any], + transformers.trainer.Trainer, + ], + trainer_init_config: Optional[Dict[str, Any]], + ) -> Dict[str, Any]: + trainer_init_config = trainer_init_config.copy() if trainer_init_config else {} + if TRAINER_INIT_FN_KEY in trainer_init_config: + raise ValueError( + f"'{TRAINER_INIT_FN_KEY}' is a reserved key in `trainer_init_config`." + ) + if trainer_init_per_worker: + trainer_init_config[TRAINER_INIT_FN_KEY] = trainer_init_per_worker + return trainer_init_config + + @classmethod + def restore( + cls: Type["TransformersTrainer"], + path: str, + trainer_init_per_worker: Optional[ + Callable[ + [TorchDataset, Optional[TorchDataset], Any], + transformers.trainer.Trainer, + ] + ] = None, + trainer_init_config: Optional[Dict] = None, + datasets: Optional[Dict[str, GenDataset]] = None, + preprocessor: Optional["Preprocessor"] = None, + scaling_config: Optional[ScalingConfig] = None, + ) -> "TransformersTrainer": + """Restores a TransformersTrainer from a previously interrupted/failed run. + + Args: + trainer_init_per_worker: Optionally re-specified trainer init function. + This should be used to re-specify a function that is not + restorable in a new Ray cluster (e.g., it holds onto outdated + object references). This should be the same trainer init + that was passed to the original trainer constructor. + trainer_init_config: Optionally re-specified trainer init config. + This should similarly be used if the original `train_loop_config` + contained outdated object references, and it should not be modified + from what was originally passed in. + + See :meth:`BaseTrainer.restore() ` + for descriptions of the other arguments. + + Returns: + TransformersTrainer: A restored instance of `TransformersTrainer` + """ + return super(DataParallelTrainer, cls).restore( + path=path, + trainer_init_per_worker=trainer_init_per_worker, + trainer_init_config=trainer_init_config, + datasets=datasets, + preprocessor=preprocessor, + scaling_config=scaling_config, + ) + + def _validate_trainer_init_per_worker( + self, trainer_init_per_worker: Callable, fn_name: str + ) -> None: + num_params = len(inspect.signature(trainer_init_per_worker).parameters) + if num_params < 3: + raise ValueError( + f"{fn_name} should take in at least 3 arguments, " + f"but it accepts {num_params} arguments instead." + ) + + def _validate_attributes(self): + for key, conf in self._dataset_config.items(): + if conf.use_stream_api: + raise ValueError( + "TransformersTrainer does not support `use_stream_api`." + ) + gpus_per_worker = self.scaling_config.num_gpus_per_worker + if gpus_per_worker > 1: + raise ValueError( + f"You have assigned {gpus_per_worker} GPUs per worker. " + "This is not supported by HuggingFace, which expects " + "one GPU per worker in DDP mode and will fail " + "if more are assigned." + ) + if gpus_per_worker != int(gpus_per_worker): + raise ValueError( + f"You have assigned {gpus_per_worker} GPUs per worker, " + "but fractional GPUs are not supported by HuggingFace." + ) + + super()._validate_attributes() + + +def _huggingface_train_loop_per_worker(config): + """Per-worker training loop for HuggingFace Transformers.""" + trainer_init_per_worker = config.pop("_trainer_init_per_worker") + + train_dataset = session.get_dataset_shard(TRAIN_DATASET_KEY) + eval_dataset = session.get_dataset_shard(EVALUATION_DATASET_KEY) + + train_torch_dataset, eval_torch_dataset = process_datasets( + train_dataset, + eval_dataset, + ) + + trainer: transformers.trainer.Trainer = trainer_init_per_worker( + train_torch_dataset, eval_torch_dataset, **config + ) + + strategies = [ + strategy + for strategy in (trainer.args.evaluation_strategy, trainer.args.save_strategy) + if strategy not in ("no", IntervalStrategy.NO) + ] + strategies = [trainer.args.logging_strategy] + strategies + if not all(strategy == strategies[0] for strategy in strategies[1:]): + raise ValueError( + "When using Ray AIR,`logging_strategy`, `evaluation_strategy` " + "and `save_strategy` must all be set to the same value. " + "`evaluation_strategy` or `save_strategy` may also be set to 'no'.\n" + f"Got `logging_strategy`={trainer.args.logging_strategy}\n" + f"`evaluation_strategy`={trainer.args.evaluation_strategy}\n" + f"`save_strategy`={trainer.args.save_strategy}" + ) + + if trainer.args.save_strategy in ("steps", IntervalStrategy.STEPS): + if ( + trainer.args.save_steps < trainer.args.logging_steps + or trainer.args.save_steps % trainer.args.logging_steps != 0 + ): + raise ValueError( + "When using 'steps' `save_strategy`, `save_steps` must be " + "equal or bigger to `logging_steps`, and must be divisible " + "by `logging_steps` (so that saving occurs at the same time " + f"logging does). Got `save_steps`={trainer.args.save_steps}, " + f"`logging_steps`={trainer.args.logging_steps}." + ) + + if trainer.args.evaluation_strategy in ("steps", IntervalStrategy.STEPS): + if trainer.args.logging_steps != trainer.args.eval_steps: + raise ValueError( + "`logging_steps` must be equal to `eval_steps`. " + f"Got `logging_steps`={trainer.args.logging_steps}, " + f"`eval_steps`={trainer.args.eval_steps}" + ) + + if trainer.args.load_best_model_at_end: + raise ValueError( + "As Ray AIR replaces Transformers checkpointing, " + "`load_best_model_at_end` must be set to False.\n" + "You can obtain the AIR Checkpoint with " + "`Result.checkpoint` returned by the `fit()` method " + "of this Trainer, and the model itself by calling " + "`Checkpoint.get_model()`.\n" + "You can configure the checkpointing by setting " + "`run_config.checkpoint_config`." + ) + + if trainer.args.push_to_hub and not trainer.args.hub_token: + warnings.warn( + "You have set `push_to_hub=True` but didn't specify `hub_token`. " + "Pushing to hub will most likely fail, as the credentials will not " + "be automatically propagated from the local enviroment to the Ray Actors. " + "If that happens, specify `hub_token` in `TrainingArguments`." + ) + + trainer = wrap_transformers_trainer(trainer) + + # ensure no HF logging callbacks are added + # aside from doubling functionality with our callbacks, + # the Wandb callbacks causes training to freeze + integration_callbacks = transformers.trainer.get_reporting_integration_callbacks( + trainer.args.report_to + ) + for callback in integration_callbacks: + trainer.pop_callback(callback) + + trainer.add_callback(TrainReportCallback) + + checkpoint = session.get_checkpoint() + if checkpoint: + with checkpoint.as_directory() as checkpoint_path: + trainer.train(resume_from_checkpoint=checkpoint_path) + else: + trainer.train() diff --git a/python/ray/train/huggingface/__init__.py b/python/ray/train/huggingface/__init__.py index 3c6d4932e793..5f04ff6cb8b1 100644 --- a/python/ray/train/huggingface/__init__.py +++ b/python/ray/train/huggingface/__init__.py @@ -1,5 +1,9 @@ -from ray.train.huggingface.huggingface_checkpoint import HuggingFaceCheckpoint -from ray.train.huggingface.huggingface_predictor import HuggingFacePredictor +from ray.train.huggingface.huggingface_checkpoint import ( + HuggingFaceCheckpoint, +) +from ray.train.huggingface.huggingface_predictor import ( + HuggingFacePredictor, +) from ray.train.huggingface.huggingface_trainer import ( HuggingFaceTrainer, ) diff --git a/python/ray/train/huggingface/_deprecation_msg.py b/python/ray/train/huggingface/_deprecation_msg.py new file mode 100644 index 000000000000..85622587afaf --- /dev/null +++ b/python/ray/train/huggingface/_deprecation_msg.py @@ -0,0 +1,8 @@ +deprecation_msg = ( + "`ray.train.huggingface` has been split into " + "`ray.train.hf_transformers` and `ray.train.hf_accelerate`," + " with `HuggingFaceTrainer`, `HuggingFacePredictor` and `HuggingFaceCheckpoint` " + "renamed to `TransformersTrainer`, `TransformersPredictor` and " + "`TransformersCheckpoint` respectively. Update your code to use the new import " + "paths. This will raise an exception in the future." +) diff --git a/python/ray/train/huggingface/accelerate.py b/python/ray/train/huggingface/accelerate.py new file mode 100644 index 000000000000..60a5d8c33d6d --- /dev/null +++ b/python/ray/train/huggingface/accelerate.py @@ -0,0 +1,10 @@ +import warnings + +deprecation_msg = ( + "`ray.train.huggingface.accelerate` has been renamed to " + "`ray.train.hf_accelerate`. This import path is left as an alias " + "but will be removed in the future." +) +warnings.warn(deprecation_msg, DeprecationWarning) + +from ray.train.hf_accelerate import * # noqa diff --git a/python/ray/train/huggingface/accelerate/__init__.py b/python/ray/train/huggingface/accelerate/__init__.py deleted file mode 100644 index 152928f54a62..000000000000 --- a/python/ray/train/huggingface/accelerate/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from ray.train.huggingface.accelerate.accelerate_trainer import AccelerateTrainer - -__all__ = [ - "AccelerateTrainer", -] diff --git a/python/ray/train/huggingface/huggingface_checkpoint.py b/python/ray/train/huggingface/huggingface_checkpoint.py index 01e64c2f742f..35539c151323 100644 --- a/python/ray/train/huggingface/huggingface_checkpoint.py +++ b/python/ray/train/huggingface/huggingface_checkpoint.py @@ -1,104 +1,22 @@ -import os -from typing import TYPE_CHECKING, Type, Optional, Union +import warnings +from ray.util.annotations import Deprecated -import torch -import transformers -import transformers.modeling_utils -import transformers.trainer -import transformers.training_args -from transformers.trainer import TRAINING_ARGS_NAME, WEIGHTS_NAME +from ray.train.hf_transformers.transformers_checkpoint import ( + TransformersCheckpoint, +) -from ray.air._internal.checkpointing import save_preprocessor_to_dir -from ray.air._internal.torch_utils import load_torch_model -from ray.air.checkpoint import Checkpoint -from ray.util.annotations import PublicAPI +from ._deprecation_msg import deprecation_msg -if TYPE_CHECKING: - from ray.data.preprocessor import Preprocessor +@Deprecated(message=deprecation_msg) +class HuggingFaceCheckpoint(TransformersCheckpoint): + # Use __new__ as it is much less likely to be overriden + # than __init__ + def __new__(cls: type, *args, **kwargs): + warnings.warn(deprecation_msg, DeprecationWarning) + return super(HuggingFaceCheckpoint, cls).__new__(cls) -@PublicAPI(stability="alpha") -class HuggingFaceCheckpoint(Checkpoint): - """A :py:class:`~ray.air.checkpoint.Checkpoint` with HuggingFace-specific - functionality. - Use ``HuggingFaceCheckpoint.from_model`` to create this type of checkpoint. - """ - - @classmethod - def from_model( - cls, - model: Union[transformers.modeling_utils.PreTrainedModel, torch.nn.Module], - tokenizer: Optional[transformers.PreTrainedTokenizer] = None, - *, - path: os.PathLike, - preprocessor: Optional["Preprocessor"] = None, - ) -> "HuggingFaceCheckpoint": - """Create a :py:class:`~ray.air.checkpoint.Checkpoint` that stores a - HuggingFace model. - - Args: - model: The pretrained transformer or Torch model to store in the - checkpoint. - tokenizer: The Tokenizer to use in the Transformers pipeline for inference. - path: The directory where the checkpoint will be stored. - preprocessor: A fitted preprocessor to be applied before inference. - - Returns: - A :py:class:`HuggingFaceCheckpoint` containing the specified model. - """ - if not isinstance(model, transformers.modeling_utils.PreTrainedModel): - state_dict = model.state_dict() - torch.save(state_dict, os.path.join(path, WEIGHTS_NAME)) - else: - model.save_pretrained(path) - - if tokenizer: - tokenizer.save_pretrained(path) - - if preprocessor: - save_preprocessor_to_dir(preprocessor, path) - - checkpoint = cls.from_directory(path) - - return checkpoint - - def get_model( - self, - model: Union[ - Type[transformers.modeling_utils.PreTrainedModel], torch.nn.Module - ], - **pretrained_model_kwargs, - ) -> Union[transformers.modeling_utils.PreTrainedModel, torch.nn.Module]: - """Retrieve the model stored in this checkpoint.""" - with self.as_directory() as checkpoint_path: - if isinstance(model, torch.nn.Module): - state_dict = torch.load( - os.path.join(checkpoint_path, WEIGHTS_NAME), map_location="cpu" - ) - model = load_torch_model(saved_model=state_dict, model_definition=model) - else: - model = model.from_pretrained( - checkpoint_path, **pretrained_model_kwargs - ) - return model - - def get_tokenizer( - self, - tokenizer: Type[transformers.PreTrainedTokenizer], - **kwargs, - ) -> Optional[transformers.PreTrainedTokenizer]: - """Create a tokenizer using the data stored in this checkpoint.""" - with self.as_directory() as checkpoint_path: - return tokenizer.from_pretrained(checkpoint_path, **kwargs) - - def get_training_arguments(self) -> transformers.training_args.TrainingArguments: - """Retrieve the training arguments stored in this checkpoint.""" - with self.as_directory() as checkpoint_path: - training_args_path = os.path.join(checkpoint_path, TRAINING_ARGS_NAME) - if os.path.exists(training_args_path): - with open(training_args_path, "rb") as f: - training_args = torch.load(f, map_location="cpu") - else: - training_args = None - return training_args +__all__ = [ + "HuggingFaceCheckpoint", +] diff --git a/python/ray/train/huggingface/huggingface_predictor.py b/python/ray/train/huggingface/huggingface_predictor.py index aef519970df5..fd90557e80f5 100644 --- a/python/ray/train/huggingface/huggingface_predictor.py +++ b/python/ray/train/huggingface/huggingface_predictor.py @@ -1,243 +1,22 @@ -import logging -from typing import TYPE_CHECKING, List, Optional, Type, Union +import warnings +from ray.util.annotations import Deprecated -import pandas as pd -from transformers.pipelines import Pipeline -from transformers.pipelines import pipeline as pipeline_factory -from transformers.pipelines.table_question_answering import ( - TableQuestionAnsweringPipeline, +from ray.train.hf_transformers.transformers_predictor import ( + TransformersPredictor, ) -from ray.air.checkpoint import Checkpoint -from ray.air.constants import TENSOR_COLUMN_NAME -from ray.air.data_batch_type import DataBatchType -from ray.train.predictor import Predictor -from ray.util import log_once -from ray.util.annotations import PublicAPI +from ._deprecation_msg import deprecation_msg -try: - import torch - torch_get_gpus = torch.cuda.device_count -except ImportError: +@Deprecated(message=deprecation_msg) +class HuggingFacePredictor(TransformersPredictor): + # Use __new__ as it is much less likely to be overriden + # than __init__ + def __new__(cls: type, *args, **kwargs): + warnings.warn(deprecation_msg, DeprecationWarning) + return super(HuggingFacePredictor, cls).__new__(cls) - def torch_get_gpus(): - return 0 - -try: - import tensorflow - - def tf_get_gpus(): - return len(tensorflow.config.list_physical_devices("GPU")) - -except ImportError: - - def tf_get_gpus(): - return 0 - - -if TYPE_CHECKING: - from ray.data.preprocessor import Preprocessor - -logger = logging.getLogger(__name__) - - -@PublicAPI(stability="alpha") -class HuggingFacePredictor(Predictor): - """A predictor for HuggingFace Transformers PyTorch models. - - This predictor uses Transformers Pipelines for inference. - - Args: - pipeline: The Transformers pipeline to use for inference. - preprocessor: A preprocessor used to transform data batches prior - to prediction. - use_gpu: If set, the model will be moved to GPU on instantiation and - prediction happens on GPU. - """ - - def __init__( - self, - pipeline: Optional[Pipeline] = None, - preprocessor: Optional["Preprocessor"] = None, - use_gpu: bool = False, - ): - self.pipeline = pipeline - self.use_gpu = use_gpu - - num_gpus = max(torch_get_gpus(), tf_get_gpus()) - if not use_gpu and num_gpus > 0 and log_once("hf_predictor_not_using_gpu"): - logger.warning( - "You have `use_gpu` as False but there are " - f"{num_gpus} GPUs detected on host where " - "prediction will only use CPU. Please consider explicitly " - "setting `HuggingFacePredictor(use_gpu=True)` or " - "`batch_predictor.predict(ds, num_gpus_per_worker=1)` to " - "enable GPU prediction. Ignore if you have set `device` or " - "`device_map` arguments in the `pipeline` manually." - ) - - super().__init__(preprocessor) - - def __repr__(self): - return ( - f"{self.__class__.__name__}(pipeline={self.pipeline!r}, " - f"preprocessor={self._preprocessor!r})" - ) - - @classmethod - def from_checkpoint( - cls, - checkpoint: Checkpoint, - *, - pipeline_cls: Optional[Type[Pipeline]] = None, - use_gpu: bool = False, - **pipeline_kwargs, - ) -> "HuggingFacePredictor": - """Instantiate the predictor from a Checkpoint. - - The checkpoint is expected to be a result of ``HuggingFaceTrainer``. - - Note that the Transformers ``pipeline`` used internally expects to - recieve raw text. If you have any Preprocessors in Checkpoint - that tokenize the data, remove them by calling - ``Checkpoint.set_preprocessor(None)`` beforehand. - - Args: - checkpoint: The checkpoint to load the model, tokenizer and - preprocessor from. It is expected to be from the result of a - ``HuggingFaceTrainer`` run. - pipeline_cls: A ``transformers.pipelines.Pipeline`` class to use. - If not specified, will use the ``pipeline`` abstraction - wrapper. - use_gpu: If set, the model will be moved to GPU on instantiation and - prediction happens on GPU. - **pipeline_kwargs: Any kwargs to pass to the pipeline - initialization. If ``pipeline`` is None, this must contain - the 'task' argument. Cannot contain 'model'. Can be used - to override the tokenizer with 'tokenizer'. If ``use_gpu`` is - True, 'device' will be set to 0 by default, unless 'device_map' is - passed. - """ - if not pipeline_cls and "task" not in pipeline_kwargs: - raise ValueError( - "If `pipeline_cls` is not specified, 'task' must be passed as a kwarg." - ) - if use_gpu and "device_map" not in pipeline_kwargs: - # default to using the GPU with the first index - pipeline_kwargs.setdefault("device", 0) - pipeline_cls = pipeline_cls or pipeline_factory - preprocessor = checkpoint.get_preprocessor() - with checkpoint.as_directory() as checkpoint_path: - # Tokenizer will be loaded automatically (no need to specify - # `tokenizer=checkpoint_path`) - pipeline = pipeline_cls(model=checkpoint_path, **pipeline_kwargs) - return cls( - pipeline=pipeline, - preprocessor=preprocessor, - use_gpu=use_gpu, - ) - - def _predict( - self, data: Union[list, pd.DataFrame], **pipeline_call_kwargs - ) -> pd.DataFrame: - ret = self.pipeline(data, **pipeline_call_kwargs) - # Remove unnecessary lists - try: - new_ret = [x[0] if isinstance(x, list) and len(x) == 1 else x for x in ret] - df = pd.DataFrame(new_ret) - except Exception: - # if we fail for any reason, just give up - df = pd.DataFrame(ret) - df.columns = [str(col) for col in df.columns] - return df - - @staticmethod - def _convert_data_for_pipeline( - data: pd.DataFrame, pipeline: Pipeline - ) -> Union[list, pd.DataFrame]: - """Convert the data into a format accepted by the pipeline. - - In most cases, this format is a list of strings.""" - # Special case where pd.DataFrame is allowed. - if isinstance(pipeline, TableQuestionAnsweringPipeline): - # TODO(team-ml): This may be a performance bottleneck. - return data - - # Otherwise, a list of columns as lists. - columns = [data[col].to_list() for col in data.columns] - # Flatten if it's only one column. - while isinstance(columns, list) and len(columns) == 1: - columns = columns[0] - return columns - - def predict( - self, - data: DataBatchType, - feature_columns: Optional[Union[List[str], List[int]]] = None, - **predict_kwargs, - ) -> DataBatchType: - """Run inference on data batch. - - The data is converted into a list (unless ``pipeline`` is a - ``TableQuestionAnsweringPipeline``) and passed to the ``pipeline`` - object. - - Args: - data: A batch of input data. Either a pandas DataFrame or numpy - array. - feature_columns: The names or indices of the columns in the - data to use as features to predict on. If None, use all - columns. - **pipeline_call_kwargs: additional kwargs to pass to the - ``pipeline`` object. - - Examples: - >>> import pandas as pd - >>> from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer - >>> from transformers.pipelines import pipeline - >>> from ray.train.huggingface import HuggingFacePredictor - >>> - >>> model_checkpoint = "gpt2" - >>> tokenizer_checkpoint = "sgugger/gpt2-like-tokenizer" - >>> tokenizer = AutoTokenizer.from_pretrained(tokenizer_checkpoint) - >>> - >>> model_config = AutoConfig.from_pretrained(model_checkpoint) - >>> model = AutoModelForCausalLM.from_config(model_config) - >>> predictor = HuggingFacePredictor( - ... pipeline=pipeline( - ... task="text-generation", model=model, tokenizer=tokenizer - ... ) - ... ) - >>> - >>> prompts = pd.DataFrame( - ... ["Complete me", "And me", "Please complete"], columns=["sentences"] - ... ) - >>> predictions = predictor.predict(prompts) - - - Returns: - Prediction result. - """ - return Predictor.predict( - self, data, feature_columns=feature_columns, **predict_kwargs - ) - - def _predict_pandas( - self, - data: "pd.DataFrame", - feature_columns: Optional[List[str]] = None, - **pipeline_call_kwargs, - ) -> "pd.DataFrame": - if TENSOR_COLUMN_NAME in data: - arr = data[TENSOR_COLUMN_NAME].to_numpy() - if feature_columns: - data = pd.DataFrame(arr[:, feature_columns]) - elif feature_columns: - data = data[feature_columns] - - data = data[feature_columns] if feature_columns else data - - data = self._convert_data_for_pipeline(data, self.pipeline) - return self._predict(data, **pipeline_call_kwargs) +__all__ = [ + "HuggingFacePredictor", +] diff --git a/python/ray/train/huggingface/huggingface_trainer.py b/python/ray/train/huggingface/huggingface_trainer.py index 5c22aea79912..ed5b015b2c88 100644 --- a/python/ray/train/huggingface/huggingface_trainer.py +++ b/python/ray/train/huggingface/huggingface_trainer.py @@ -1,470 +1,22 @@ -import importlib.util -import inspect -import os -import sys import warnings -from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Type -from packaging.version import Version +from ray.util.annotations import Deprecated - -import transformers -import transformers.modeling_utils -import transformers.trainer -import transformers.training_args -from transformers.trainer_utils import IntervalStrategy -from transformers.utils import is_datasets_available -from torch.utils.data import Dataset as TorchDataset - -from ray.air import session -from ray.air.checkpoint import Checkpoint -from ray.air.config import DatasetConfig, RunConfig, ScalingConfig -from ray.train.constants import ( - EVALUATION_DATASET_KEY, - TRAIN_DATASET_KEY, +from ray.train.hf_transformers.transformers_trainer import ( + TransformersTrainer, ) -from ray.train.data_parallel_trainer import DataParallelTrainer -from ray.train.huggingface._huggingface_utils import ( - TrainReportCallback, - process_datasets, - wrap_transformers_trainer, -) -from ray.train.torch import TorchConfig, TorchTrainer -from ray.train.trainer import GenDataset -from ray.util import PublicAPI - -if TYPE_CHECKING: - from ray.data.preprocessor import Preprocessor - -# Due to HF Dataset's dynamic module system, we need to dynamically import the -# datasets_modules module on every actor when training. -# We accomplish this by simply running the following bit of code directly -# in module you are currently viewing. This ensures that when we -# unpickle the HuggingFaceTrainer, it will be ran before pickle tries to -# import datasets_modules and prevents an exception from being thrown. -# Same logic is present inside HF Transformers Ray integration: -# https://github.com/huggingface/transformers/blob/\ -# 7d5fde991d598370d961be8cb7add6541e2b59ce/src/transformers/integrations.py#L271 -# Also see https://github.com/ray-project/ray/issues/28084 -if "datasets_modules" not in sys.modules and is_datasets_available(): - import datasets.load - - dynamic_modules_path = os.path.join( - datasets.load.init_dynamic_modules(), "__init__.py" - ) - # load dynamic_modules from path - spec = importlib.util.spec_from_file_location( - "datasets_modules", dynamic_modules_path - ) - datasets_modules = importlib.util.module_from_spec(spec) - sys.modules[spec.name] = datasets_modules - spec.loader.exec_module(datasets_modules) - - -TRAINER_INIT_FN_KEY = "_trainer_init_per_worker" - - -@PublicAPI(stability="alpha") -class HuggingFaceTrainer(TorchTrainer): - """A Trainer for data parallel HuggingFace Transformers on PyTorch training. - - This Trainer runs the ``transformers.Trainer.train()`` method on multiple - Ray Actors. The training is carried out in a distributed fashion through PyTorch - DDP. These actors already have the necessary torch process group already - configured for distributed PyTorch training. If you have PyTorch >= 1.12.0 - installed, you can also run FSDP training by specifying the ``fsdp`` argument - in ``TrainingArguments``. DeepSpeed is - also supported - see :doc:`/ray-air/examples/gptj_deepspeed_fine_tuning`. - For more information on configuring FSDP or DeepSpeed, refer to `Hugging Face - documentation `__. - - The training function ran on every Actor will first run the - specified ``trainer_init_per_worker`` function to obtain an instantiated - ``transformers.Trainer`` object. The ``trainer_init_per_worker`` function - will have access to preprocessed train and evaluation datasets. - - If the ``datasets`` dict contains a training dataset (denoted by - the "train" key), then it will be split into multiple dataset - shards, with each Actor training on a single shard. - All the other datasets will not be split. - - Please note that if you use a custom ``transformers.Trainer`` subclass, - the ``get_train_dataloader`` method will be wrapped around to disable - sharding by ``transformers.IterableDatasetShard``, as the dataset will - already be sharded on the Ray AIR side. - - You can also provide ``datasets.Dataset`` object or other dataset objects - allowed by ``transformers.Trainer`` directly in the ``trainer_init_per_worker`` - function, without specifying the ``datasets`` dict. It is recommended to initialize - those objects inside the function, as otherwise they will be serialized and passed - to the function, which may lead to long runtime and memory issues with large - amounts of data. In this case, the training dataset will be split - automatically by Transformers. - - HuggingFace loggers will be automatically disabled, and the ``local_rank`` - argument in ``TrainingArguments`` will be automatically set. Please note - that if you want to use CPU training, you will need to set the ``no_cuda`` - argument in ``TrainingArguments`` manually - otherwise, an exception - (segfault) may be thrown. - - This Trainer requires ``transformers>=4.19.0`` package. - It is tested with ``transformers==4.19.1``. - - Example: - .. code-block:: python - - # Based on - # huggingface/notebooks/examples/language_modeling_from_scratch.ipynb - - # Hugging Face imports - from datasets import load_dataset - import transformers - from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer - - import ray - from ray.train.huggingface import HuggingFaceTrainer - from ray.air.config import ScalingConfig - - # If using GPUs, set this to True. - use_gpu = False - - model_checkpoint = "gpt2" - tokenizer_checkpoint = "sgugger/gpt2-like-tokenizer" - block_size = 128 - - datasets = load_dataset("wikitext", "wikitext-2-raw-v1") - tokenizer = AutoTokenizer.from_pretrained(tokenizer_checkpoint) - - def tokenize_function(examples): - return tokenizer(examples["text"]) - - tokenized_datasets = datasets.map( - tokenize_function, batched=True, num_proc=1, remove_columns=["text"] - ) - - def group_texts(examples): - # Concatenate all texts. - concatenated_examples = { - k: sum(examples[k], []) for k in examples.keys() - } - total_length = len(concatenated_examples[list(examples.keys())[0]]) - # We drop the small remainder, we could add padding if the model - # supported it. - # instead of this drop, you can customize this part to your needs. - total_length = (total_length // block_size) * block_size - # Split by chunks of max_len. - result = { - k: [ - t[i : i + block_size] - for i in range(0, total_length, block_size) - ] - for k, t in concatenated_examples.items() - } - result["labels"] = result["input_ids"].copy() - return result - - lm_datasets = tokenized_datasets.map( - group_texts, - batched=True, - batch_size=1000, - num_proc=1, - ) - ray_train_ds = ray.data.from_huggingface(lm_datasets["train"]) - ray_evaluation_ds = ray.data.from_huggingface( - lm_datasets["validation"] - ) - - def trainer_init_per_worker(train_dataset, eval_dataset, **config): - model_config = AutoConfig.from_pretrained(model_checkpoint) - model = AutoModelForCausalLM.from_config(model_config) - args = transformers.TrainingArguments( - output_dir=f"{model_checkpoint}-wikitext2", - evaluation_strategy="epoch", - save_strategy="epoch", - logging_strategy="epoch", - learning_rate=2e-5, - weight_decay=0.01, - no_cuda=(not use_gpu), - ) - return transformers.Trainer( - model=model, - args=args, - train_dataset=train_dataset, - eval_dataset=eval_dataset, - ) - - scaling_config = ScalingConfig(num_workers=3, use_gpu=use_gpu) - trainer = HuggingFaceTrainer( - trainer_init_per_worker=trainer_init_per_worker, - scaling_config=scaling_config, - datasets={"train": ray_train_ds, "evaluation": ray_evaluation_ds}, - ) - result = trainer.fit() - - Args: - trainer_init_per_worker: The function that returns an instantiated - ``transformers.Trainer`` object and takes in the following arguments: - train ``Torch.Dataset``, optional evaluation ``Torch.Dataset`` - and config as kwargs. The Torch Datasets are automatically - created by converting the Datastreams internally before - they are passed into the function. - trainer_init_config: Configurations to pass into - ``trainer_init_per_worker`` as kwargs. - torch_config: Configuration for setting up the PyTorch backend. If set to - None, use the default configuration. This replaces the ``backend_config`` - arg of ``DataParallelTrainer``. Same as in ``TorchTrainer``. - scaling_config: Configuration for how to scale data parallel training. - dataset_config: Configuration for dataset ingest. - run_config: Configuration for the execution of the training run. - datasets: Any datasets to use for training. Use - the key "train" to denote which dataset is the training - dataset and key "evaluation" to denote the evaluation - dataset. Can only contain a training dataset - and up to one extra dataset to be used for evaluation. - If a ``preprocessor`` is provided and has not already been fit, - it will be fit on the training dataset. All datasets will be - transformed by the ``preprocessor`` if one is provided. - preprocessor: A ray.data.Preprocessor to preprocess the - provided datasets. - resume_from_checkpoint: A checkpoint to resume training from. - """ - - _dataset_config = { - # training dataset should be split by us - "train": DatasetConfig(fit=True, split=True), - # do not split eval dataset, as HF has a system to parallelize - # evaluation across workers, and it requires each worker - # to have the full eval dataset - "evaluation": DatasetConfig(split=False), - } - - def __init__( - self, - trainer_init_per_worker: Callable[ - [Optional[TorchDataset], Optional[TorchDataset], Any], - transformers.trainer.Trainer, - ], - *, - trainer_init_config: Optional[Dict] = None, - torch_config: Optional[TorchConfig] = None, - scaling_config: Optional[ScalingConfig] = None, - dataset_config: Optional[Dict[str, DatasetConfig]] = None, - run_config: Optional[RunConfig] = None, - datasets: Optional[Dict[str, GenDataset]] = None, - preprocessor: Optional["Preprocessor"] = None, - resume_from_checkpoint: Optional[Checkpoint] = None, - ): - - # Functionality required for HuggingFaceTrainer only added in this - # version - if Version(transformers.__version__) < Version("4.19.0"): - raise RuntimeError( - "HuggingFaceTrainer requires transformers>=4.19.0, but you " - f"have {transformers.__version__} which is incompatible. " - "Update on all nodes with `pip install -U 'transformers>=4.19.0'`." - ) - - self._validate_trainer_init_per_worker( - trainer_init_per_worker, "trainer_init_per_worker" - ) - - super().__init__( - train_loop_per_worker=_huggingface_train_loop_per_worker, - train_loop_config=self._create_trainer_init_config( - trainer_init_per_worker, trainer_init_config - ), - torch_config=torch_config, - scaling_config=scaling_config, - dataset_config=dataset_config, - run_config=run_config, - datasets=datasets, - preprocessor=preprocessor, - resume_from_checkpoint=resume_from_checkpoint, - ) - - @classmethod - def _create_trainer_init_config( - cls, - trainer_init_per_worker: Callable[ - [TorchDataset, Optional[TorchDataset], Any], - transformers.trainer.Trainer, - ], - trainer_init_config: Optional[Dict[str, Any]], - ) -> Dict[str, Any]: - trainer_init_config = trainer_init_config.copy() if trainer_init_config else {} - if TRAINER_INIT_FN_KEY in trainer_init_config: - raise ValueError( - f"'{TRAINER_INIT_FN_KEY}' is a reserved key in `trainer_init_config`." - ) - if trainer_init_per_worker: - trainer_init_config[TRAINER_INIT_FN_KEY] = trainer_init_per_worker - return trainer_init_config - - @classmethod - def restore( - cls: Type["HuggingFaceTrainer"], - path: str, - trainer_init_per_worker: Optional[ - Callable[ - [TorchDataset, Optional[TorchDataset], Any], - transformers.trainer.Trainer, - ] - ] = None, - trainer_init_config: Optional[Dict] = None, - datasets: Optional[Dict[str, GenDataset]] = None, - preprocessor: Optional["Preprocessor"] = None, - scaling_config: Optional[ScalingConfig] = None, - ) -> "HuggingFaceTrainer": - """Restores a HuggingFaceTrainer from a previously interrupted/failed run. - - Args: - trainer_init_per_worker: Optionally re-specified trainer init function. - This should be used to re-specify a function that is not - restorable in a new Ray cluster (e.g., it holds onto outdated - object references). This should be the same trainer init - that was passed to the original trainer constructor. - trainer_init_config: Optionally re-specified trainer init config. - This should similarly be used if the original `train_loop_config` - contained outdated object references, and it should not be modified - from what was originally passed in. - - See :meth:`BaseTrainer.restore() ` - for descriptions of the other arguments. - - Returns: - HuggingFaceTrainer: A restored instance of `HuggingFaceTrainer` - """ - return super(DataParallelTrainer, cls).restore( - path=path, - trainer_init_per_worker=trainer_init_per_worker, - trainer_init_config=trainer_init_config, - datasets=datasets, - preprocessor=preprocessor, - scaling_config=scaling_config, - ) - - def _validate_trainer_init_per_worker( - self, trainer_init_per_worker: Callable, fn_name: str - ) -> None: - num_params = len(inspect.signature(trainer_init_per_worker).parameters) - if num_params < 3: - raise ValueError( - f"{fn_name} should take in at least 3 arguments, " - f"but it accepts {num_params} arguments instead." - ) - - def _validate_attributes(self): - for key, conf in self._dataset_config.items(): - if conf.use_stream_api: - raise ValueError( - "HuggingFaceTrainer does not support `use_stream_api`." - ) - gpus_per_worker = self.scaling_config.num_gpus_per_worker - if gpus_per_worker > 1: - raise ValueError( - f"You have assigned {gpus_per_worker} GPUs per worker. " - "This is not supported by HuggingFace, which expects " - "one GPU per worker in DDP mode and will fail " - "if more are assigned." - ) - if gpus_per_worker != int(gpus_per_worker): - raise ValueError( - f"You have assigned {gpus_per_worker} GPUs per worker, " - "but fractional GPUs are not supported by HuggingFace." - ) - - super()._validate_attributes() - - -def _huggingface_train_loop_per_worker(config): - """Per-worker training loop for HuggingFace Transformers.""" - trainer_init_per_worker = config.pop("_trainer_init_per_worker") - - train_dataset = session.get_dataset_shard(TRAIN_DATASET_KEY) - eval_dataset = session.get_dataset_shard(EVALUATION_DATASET_KEY) - - train_torch_dataset, eval_torch_dataset = process_datasets( - train_dataset, - eval_dataset, - ) - - trainer: transformers.trainer.Trainer = trainer_init_per_worker( - train_torch_dataset, eval_torch_dataset, **config - ) - - strategies = [ - strategy - for strategy in (trainer.args.evaluation_strategy, trainer.args.save_strategy) - if strategy not in ("no", IntervalStrategy.NO) - ] - strategies = [trainer.args.logging_strategy] + strategies - if not all(strategy == strategies[0] for strategy in strategies[1:]): - raise ValueError( - "When using Ray AIR,`logging_strategy`, `evaluation_strategy` " - "and `save_strategy` must all be set to the same value. " - "`evaluation_strategy` or `save_strategy` may also be set to 'no'.\n" - f"Got `logging_strategy`={trainer.args.logging_strategy}\n" - f"`evaluation_strategy`={trainer.args.evaluation_strategy}\n" - f"`save_strategy`={trainer.args.save_strategy}" - ) - - if trainer.args.save_strategy in ("steps", IntervalStrategy.STEPS): - if ( - trainer.args.save_steps < trainer.args.logging_steps - or trainer.args.save_steps % trainer.args.logging_steps != 0 - ): - raise ValueError( - "When using 'steps' `save_strategy`, `save_steps` must be " - "equal or bigger to `logging_steps`, and must be divisible " - "by `logging_steps` (so that saving occurs at the same time " - f"logging does). Got `save_steps`={trainer.args.save_steps}, " - f"`logging_steps`={trainer.args.logging_steps}." - ) - - if trainer.args.evaluation_strategy in ("steps", IntervalStrategy.STEPS): - if trainer.args.logging_steps != trainer.args.eval_steps: - raise ValueError( - "`logging_steps` must be equal to `eval_steps`. " - f"Got `logging_steps`={trainer.args.logging_steps}, " - f"`eval_steps`={trainer.args.eval_steps}" - ) - - if trainer.args.load_best_model_at_end: - raise ValueError( - "As Ray AIR replaces Hugging Face checkpointing, " - "`load_best_model_at_end` must be set to False.\n" - "You can obtain the AIR Checkpoint with " - "`Result.checkpoint` returned by the `fit()` method " - "of this Trainer, and the model itself by calling " - "`Checkpoint.get_model()`.\n" - "You can configure the checkpointing by setting " - "`run_config.checkpoint_config`." - ) - if trainer.args.push_to_hub and not trainer.args.hub_token: - warnings.warn( - "You have set `push_to_hub=True` but didn't specify `hub_token`. " - "Pushing to hub will most likely fail, as the credentials will not " - "be automatically propagated from the local enviroment to the Ray Actors. " - "If that happens, specify `hub_token` in `TrainingArguments`." - ) +from ._deprecation_msg import deprecation_msg - trainer = wrap_transformers_trainer(trainer) - # ensure no HF logging callbacks are added - # aside from doubling functionality with our callbacks, - # the Wandb callbacks causes training to freeze - integration_callbacks = transformers.trainer.get_reporting_integration_callbacks( - trainer.args.report_to - ) - for callback in integration_callbacks: - trainer.pop_callback(callback) +@Deprecated(message=deprecation_msg) +class HuggingFaceTrainer(TransformersTrainer): + # Use __new__ as it is much less likely to be overriden + # than __init__ + def __new__(cls: type, *args, **kwargs): + warnings.warn(deprecation_msg, DeprecationWarning) + return super(HuggingFaceTrainer, cls).__new__(cls) - trainer.add_callback(TrainReportCallback) - checkpoint = session.get_checkpoint() - if checkpoint: - with checkpoint.as_directory() as checkpoint_path: - trainer.train(resume_from_checkpoint=checkpoint_path) - else: - trainer.train() +__all__ = [ + "HuggingFaceTrainer", +] diff --git a/python/ray/train/tests/test_accelerate_trainer_gpu.py b/python/ray/train/tests/test_accelerate_trainer_gpu.py index f71f3a90a76e..a57290aea070 100644 --- a/python/ray/train/tests/test_accelerate_trainer_gpu.py +++ b/python/ray/train/tests/test_accelerate_trainer_gpu.py @@ -11,7 +11,7 @@ from ray.air import session from ray.train.tests.dummy_preprocessor import DummyPreprocessor from ray.train.torch.torch_checkpoint import TorchCheckpoint -from ray.train.huggingface.accelerate import AccelerateTrainer +from ray.train.hf_accelerate import AccelerateTrainer from accelerate import Accelerator ACCELERATE_CONFIG_CPU = """compute_environment: LOCAL_MACHINE diff --git a/python/ray/train/tests/test_checkpoints.py b/python/ray/train/tests/test_checkpoints.py index b99f42501e96..c5447c2e7a2c 100644 --- a/python/ray/train/tests/test_checkpoints.py +++ b/python/ray/train/tests/test_checkpoints.py @@ -3,7 +3,7 @@ import pytest from ray.air.constants import MAX_REPR_LENGTH -from ray.train.huggingface import HuggingFaceCheckpoint +from ray.train.hf_transformers import TransformersCheckpoint from ray.train.lightgbm import LightGBMCheckpoint from ray.train.rl import RLCheckpoint from ray.train.sklearn import SklearnCheckpoint @@ -15,7 +15,7 @@ @pytest.mark.parametrize( "checkpoint", [ - HuggingFaceCheckpoint(data_dict={"foo": "bar"}), + TransformersCheckpoint(data_dict={"foo": "bar"}), LightGBMCheckpoint(data_dict={"foo": "bar"}), RLCheckpoint(data_dict={"foo": "bar"}), SklearnCheckpoint(data_dict={"foo": "bar"}), diff --git a/python/ray/train/tests/test_trainer_restore.py b/python/ray/train/tests/test_trainer_restore.py index 94da6f7eae62..624ebd7b7c2a 100644 --- a/python/ray/train/tests/test_trainer_restore.py +++ b/python/ray/train/tests/test_trainer_restore.py @@ -11,7 +11,7 @@ from ray.train.torch import TorchTrainer from ray.train.xgboost import XGBoostTrainer from ray.train.lightgbm import LightGBMTrainer -from ray.train.huggingface import HuggingFaceTrainer +from ray.train.hf_transformers import TransformersTrainer from ray.train.rl import RLTrainer from ray.tune import Callback from ray.data.preprocessors.batch_mapper import BatchMapper @@ -172,14 +172,14 @@ def test_gbdt_trainer_restore(ray_start_6_cpus, tmpdir, trainer_cls): assert tmpdir / exp_name in result.log_dir.parents -@pytest.mark.parametrize("trainer_cls", [HuggingFaceTrainer]) +@pytest.mark.parametrize("trainer_cls", [TransformersTrainer]) def test_trainer_with_init_fn_restore(ray_start_4_cpus, tmpdir, trainer_cls): """Tests restore for data parallel trainers that take in a `train_init` function and config. Success criteria: same as for data parallel trainers.""" exp_name = f"{trainer_cls.__name__}_restore_test" - if trainer_cls == HuggingFaceTrainer: - from ray.train.tests.test_huggingface_trainer import ( + if trainer_cls == TransformersTrainer: + from ray.train.tests.test_transformers_trainer import ( train_function as hf_init, train_df, ) diff --git a/python/ray/train/tests/test_huggingface_checkpoint.py b/python/ray/train/tests/test_transformers_checkpoint.py similarity index 85% rename from python/ray/train/tests/test_huggingface_checkpoint.py rename to python/ray/train/tests/test_transformers_checkpoint.py index b272b2568425..557b237d7fa7 100644 --- a/python/ray/train/tests/test_huggingface_checkpoint.py +++ b/python/ray/train/tests/test_transformers_checkpoint.py @@ -3,11 +3,14 @@ from transformers.pipelines import pipeline import ray -from ray.train.huggingface import HuggingFaceCheckpoint, HuggingFacePredictor +from ray.train.hf_transformers import ( + TransformersCheckpoint, + TransformersPredictor, +) from ray.train.tests.dummy_preprocessor import DummyPreprocessor -from test_huggingface_predictor import ( +from test_transformers_predictor import ( model_checkpoint, tokenizer_checkpoint, test_strings, @@ -15,13 +18,13 @@ ) -def test_huggingface_checkpoint(tmpdir, ray_start_runtime_env): +def test_transformers_checkpoint(tmpdir, ray_start_runtime_env): model_config = AutoConfig.from_pretrained(model_checkpoint) model = AutoModelForCausalLM.from_config(model_config) tokenizer = AutoTokenizer.from_pretrained(tokenizer_checkpoint) preprocessor = DummyPreprocessor() - checkpoint = HuggingFaceCheckpoint.from_model( + checkpoint = TransformersCheckpoint.from_model( model, tokenizer, path=tmpdir, preprocessor=preprocessor ) checkpoint_model = checkpoint.get_model(AutoModelForCausalLM) @@ -31,7 +34,7 @@ def test_huggingface_checkpoint(tmpdir, ray_start_runtime_env): @ray.remote def test(model, tokenizer, preprocessor): os.chdir(tmpdir) - predictor = HuggingFacePredictor( + predictor = TransformersPredictor( pipeline=pipeline( task="text-generation", model=model, diff --git a/python/ray/train/tests/test_huggingface_gpu.py b/python/ray/train/tests/test_transformers_gpu.py similarity index 83% rename from python/ray/train/tests/test_huggingface_gpu.py rename to python/ray/train/tests/test_transformers_gpu.py index 24aff99667dd..9eb8ea1cb2b3 100644 --- a/python/ray/train/tests/test_huggingface_gpu.py +++ b/python/ray/train/tests/test_transformers_gpu.py @@ -9,7 +9,10 @@ import ray -from ray.train.huggingface import HuggingFaceCheckpoint, HuggingFacePredictor +from ray.train.hf_transformers import ( + TransformersCheckpoint, + TransformersPredictor, +) test_strings = ["Complete me", "And me", "Please complete"] prompts = pd.DataFrame(test_strings, columns=["sentences"]) @@ -25,12 +28,12 @@ def create_checkpoint(): model_config = AutoConfig.from_pretrained(model_checkpoint) model = AutoModelForCausalLM.from_config(model_config) tokenizer = AutoTokenizer.from_pretrained(tokenizer_checkpoint) - checkpoint = HuggingFaceCheckpoint.from_model(model, tokenizer, path=tmpdir) + checkpoint = TransformersCheckpoint.from_model(model, tokenizer, path=tmpdir) # Serialize to dict so we can remove the temporary directory - return HuggingFaceCheckpoint.from_dict(checkpoint.to_dict()) + return TransformersCheckpoint.from_dict(checkpoint.to_dict()) -class AssertingHuggingFacePredictor(HuggingFacePredictor): +class AssertingTransformersPredictor(TransformersPredictor): def __init__(self, pipeline=None, preprocessor=None, use_gpu: bool = False): super().__init__(pipeline, preprocessor, use_gpu) assert use_gpu @@ -48,7 +51,7 @@ def test_predict_batch(ray_start_4_cpus, caplog, batch_type, device): kwargs["device"] = device predictor = BatchPredictor.from_checkpoint( - checkpoint, AssertingHuggingFacePredictor, task="text-generation", **kwargs + checkpoint, AssertingTransformersPredictor, task="text-generation", **kwargs ) # Todo: Ray data does not support numpy string arrays well diff --git a/python/ray/train/tests/test_huggingface_predictor.py b/python/ray/train/tests/test_transformers_predictor.py similarity index 85% rename from python/ray/train/tests/test_huggingface_predictor.py rename to python/ray/train/tests/test_transformers_predictor.py index 4c3f7eb30825..3896c262ba01 100644 --- a/python/ray/train/tests/test_huggingface_predictor.py +++ b/python/ray/train/tests/test_transformers_predictor.py @@ -15,7 +15,10 @@ import ray -from ray.train.huggingface import HuggingFaceCheckpoint, HuggingFacePredictor +from ray.train.hf_transformers import ( + TransformersCheckpoint, + TransformersPredictor, +) from ray.train.tests.dummy_preprocessor import DummyPreprocessor @@ -29,12 +32,12 @@ def test_repr(tmpdir): - predictor = HuggingFacePredictor() + predictor = TransformersPredictor() representation = repr(predictor) assert len(representation) < MAX_REPR_LENGTH - pattern = re.compile("^HuggingFacePredictor\\((.*)\\)$") + pattern = re.compile("^TransformersPredictor\\((.*)\\)$") assert pattern.match(representation) @@ -53,7 +56,7 @@ def test(use_preprocessor): preprocessor = None model_config = AutoConfig.from_pretrained(model_checkpoint) model = AutoModelForCausalLM.from_config(model_config) - predictor = HuggingFacePredictor( + predictor = TransformersPredictor( pipeline=pipeline( task="text-generation", model=model, @@ -79,8 +82,10 @@ def test(): model_config = AutoConfig.from_pretrained(model_checkpoint) model = AutoModelForCausalLM.from_config(model_config) tokenizer = AutoTokenizer.from_pretrained(tokenizer_checkpoint) - checkpoint = HuggingFaceCheckpoint.from_model(model, tokenizer, path=tmpdir) - predictor = HuggingFacePredictor.from_checkpoint( + checkpoint = TransformersCheckpoint.from_model( + model, tokenizer, path=tmpdir + ) + predictor = TransformersPredictor.from_checkpoint( checkpoint, task="text-generation", ) @@ -97,9 +102,9 @@ def create_checkpoint(): model_config = AutoConfig.from_pretrained(model_checkpoint) model = AutoModelForCausalLM.from_config(model_config) tokenizer = AutoTokenizer.from_pretrained(tokenizer_checkpoint) - checkpoint = HuggingFaceCheckpoint.from_model(model, tokenizer, path=tmpdir) + checkpoint = TransformersCheckpoint.from_model(model, tokenizer, path=tmpdir) # Serialize to dict so we can remove the temporary directory - return HuggingFaceCheckpoint.from_dict(checkpoint.to_dict()) + return TransformersCheckpoint.from_dict(checkpoint.to_dict()) # TODO(ml-team): Add np.ndarray to batch_type @@ -107,7 +112,7 @@ def create_checkpoint(): def test_predict_batch(ray_start_4_cpus, batch_type): checkpoint = create_checkpoint() predictor = BatchPredictor.from_checkpoint( - checkpoint, HuggingFacePredictor, task="text-generation" + checkpoint, TransformersPredictor, task="text-generation" ) # Todo: Ray data does not support numpy string arrays well diff --git a/python/ray/train/tests/test_huggingface_trainer.py b/python/ray/train/tests/test_transformers_trainer.py similarity index 85% rename from python/ray/train/tests/test_huggingface_trainer.py rename to python/ray/train/tests/test_transformers_trainer.py index e66f68442ac0..1098dad5e0cf 100644 --- a/python/ray/train/tests/test_huggingface_trainer.py +++ b/python/ray/train/tests/test_transformers_trainer.py @@ -11,10 +11,10 @@ import ray.data from ray.train.batch_predictor import BatchPredictor -from ray.train.huggingface import ( - HuggingFacePredictor, - HuggingFaceTrainer, - HuggingFaceCheckpoint, +from ray.train.hf_transformers import ( + TransformersPredictor, + TransformersTrainer, + TransformersCheckpoint, ) from ray.train.trainer import TrainingFailedError from ray.air.config import ScalingConfig @@ -92,12 +92,39 @@ def train_function_local_dataset(train_dataset, eval_dataset=None, **config): return train_function(train_dataset, eval_dataset, **config) +def test_deprecations(ray_start_4_cpus): + """Tests that soft deprecations warn but still can be used""" + from ray.train.huggingface import ( + HuggingFaceCheckpoint, + HuggingFacePredictor, + HuggingFaceTrainer, + ) + + ray_train = ray.data.from_pandas(train_df) + ray_validation = ray.data.from_pandas(validation_df) + + with pytest.warns(DeprecationWarning): + obj = HuggingFaceCheckpoint.from_dict({"foo": "bar"}) + assert isinstance(obj, TransformersCheckpoint) + + with pytest.warns(DeprecationWarning): + obj = HuggingFacePredictor() + assert isinstance(obj, TransformersPredictor) + + with pytest.warns(DeprecationWarning): + obj = HuggingFaceTrainer( + train_function, + datasets={"train": ray_train, "evaluation": ray_validation}, + ) + assert isinstance(obj, TransformersTrainer) + + @pytest.mark.parametrize("save_strategy", ["no", "epoch"]) def test_e2e(ray_start_4_cpus, save_strategy): ray_train = ray.data.from_pandas(train_df) ray_validation = ray.data.from_pandas(validation_df) scaling_config = ScalingConfig(num_workers=2, use_gpu=False) - trainer = HuggingFaceTrainer( + trainer = TransformersTrainer( trainer_init_per_worker=train_function, trainer_init_config={"epochs": 4, "save_strategy": save_strategy}, scaling_config=scaling_config, @@ -108,10 +135,10 @@ def test_e2e(ray_start_4_cpus, save_strategy): assert result.metrics["epoch"] == 4 assert result.metrics["training_iteration"] == 4 assert result.checkpoint - assert isinstance(result.checkpoint, HuggingFaceCheckpoint) + assert isinstance(result.checkpoint, TransformersCheckpoint) assert "eval_loss" in result.metrics - trainer2 = HuggingFaceTrainer( + trainer2 = TransformersTrainer( trainer_init_per_worker=train_function, trainer_init_config={ "epochs": 5, @@ -126,12 +153,12 @@ def test_e2e(ray_start_4_cpus, save_strategy): assert result2.metrics["epoch"] == 5 assert result2.metrics["training_iteration"] == 1 assert result2.checkpoint - assert isinstance(result2.checkpoint, HuggingFaceCheckpoint) + assert isinstance(result2.checkpoint, TransformersCheckpoint) assert "eval_loss" in result2.metrics predictor = BatchPredictor.from_checkpoint( result2.checkpoint, - HuggingFacePredictor, + TransformersPredictor, task="text-generation", tokenizer=AutoTokenizer.from_pretrained(tokenizer_checkpoint), ) @@ -142,7 +169,7 @@ def test_e2e(ray_start_4_cpus, save_strategy): def test_training_local_dataset(ray_start_4_cpus): scaling_config = ScalingConfig(num_workers=2, use_gpu=False) - trainer = HuggingFaceTrainer( + trainer = TransformersTrainer( trainer_init_per_worker=train_function_local_dataset, trainer_init_config={"epochs": 1, "save_strategy": "no"}, scaling_config=scaling_config, @@ -152,7 +179,7 @@ def test_training_local_dataset(ray_start_4_cpus): assert result.metrics["epoch"] == 1 assert result.metrics["training_iteration"] == 1 assert result.checkpoint - assert isinstance(result.checkpoint, HuggingFaceCheckpoint) + assert isinstance(result.checkpoint, TransformersCheckpoint) assert "eval_loss" in result.metrics @@ -172,7 +199,7 @@ def fit_and_check_for_error(trainer, error_type=ValueError): ) # load_best_model_at_end set to True should raise an exception - trainer = HuggingFaceTrainer( + trainer = TransformersTrainer( trainer_init_config={ "epochs": 1, "load_best_model_at_end": True, @@ -183,7 +210,7 @@ def fit_and_check_for_error(trainer, error_type=ValueError): fit_and_check_for_error(trainer) # logging strategy set to no should raise an exception - trainer = HuggingFaceTrainer( + trainer = TransformersTrainer( trainer_init_config={ "epochs": 1, "logging_strategy": "no", @@ -193,7 +220,7 @@ def fit_and_check_for_error(trainer, error_type=ValueError): fit_and_check_for_error(trainer) # logging steps != eval steps should raise an exception - trainer = HuggingFaceTrainer( + trainer = TransformersTrainer( trainer_init_config={ "epochs": 1, "logging_strategy": "steps", @@ -212,7 +239,7 @@ def fit_and_check_for_error(trainer, error_type=ValueError): ("epoch", "steps", "epoch"), ("steps", "epoch", "steps"), ): - trainer = HuggingFaceTrainer( + trainer = TransformersTrainer( trainer_init_config={ "epochs": 1, "load_best_model_at_end": True, @@ -232,7 +259,7 @@ def test_tune(ray_start_8_cpus): scaling_config = ScalingConfig( num_workers=2, use_gpu=False, trainer_resources={"CPU": 0} ) - trainer = HuggingFaceTrainer( + trainer = TransformersTrainer( trainer_init_per_worker=train_function, scaling_config=scaling_config, datasets={"train": ray_train, "evaluation": ray_validation}, @@ -280,7 +307,7 @@ def train_function_with_metric(train_dataset, eval_dataset=None, **config): print(metric) return train_function(train_dataset, eval_dataset=eval_dataset, **config) - trainer = HuggingFaceTrainer( + trainer = TransformersTrainer( trainer_init_per_worker=train_function_with_metric, trainer_init_config={"epochs": 1}, scaling_config=scaling_config, diff --git a/python/ray/train/tests/test_huggingface_trainer_steps.py b/python/ray/train/tests/test_transformers_trainer_steps.py similarity index 95% rename from python/ray/train/tests/test_huggingface_trainer_steps.py rename to python/ray/train/tests/test_transformers_trainer_steps.py index d7c3838846f9..edae952bbbcb 100644 --- a/python/ray/train/tests/test_huggingface_trainer_steps.py +++ b/python/ray/train/tests/test_transformers_trainer_steps.py @@ -12,7 +12,10 @@ import ray.data from ray.train.batch_predictor import BatchPredictor -from ray.train.huggingface import HuggingFacePredictor, HuggingFaceTrainer +from ray.train.hf_transformers import ( + TransformersPredictor, + TransformersTrainer, +) from ray.train.trainer import TrainingFailedError from ray.air.config import ScalingConfig from ray.train.tests._huggingface_data import train_data, validation_data @@ -73,7 +76,7 @@ def test_e2e_steps(ray_start_4_cpus, save_steps, logging_steps): scaling_config = ScalingConfig(num_workers=2, use_gpu=False) epochs = 4 - trainer = HuggingFaceTrainer( + trainer = TransformersTrainer( trainer_init_per_worker=train_function, trainer_init_config={ "epochs": epochs, @@ -99,7 +102,7 @@ def test_e2e_steps(ray_start_4_cpus, save_steps, logging_steps): assert result.checkpoint assert "eval_loss" in result.metrics - trainer2 = HuggingFaceTrainer( + trainer2 = TransformersTrainer( trainer_init_per_worker=train_function, trainer_init_config={ "epochs": epochs + 1, @@ -122,7 +125,7 @@ def test_e2e_steps(ray_start_4_cpus, save_steps, logging_steps): predictor = BatchPredictor.from_checkpoint( result2.checkpoint, - HuggingFacePredictor, + TransformersPredictor, task="text-generation", tokenizer=AutoTokenizer.from_pretrained(tokenizer_checkpoint), ) From 9fb7f6e33718683ea7a6f882d3dc681bf0fc8643 Mon Sep 17 00:00:00 2001 From: Amog Kamsetty Date: Mon, 8 May 2023 14:23:49 -0700 Subject: [PATCH 282/424] [AIR] [Docs] Clarify Training needs to be run for Batch Prediction to work (#35095) Signed-off-by: amogkam --- doc/source/ray-air/examples/pytorch_tabular_starter.py | 4 ++-- doc/source/ray-air/examples/tf_tabular_starter.py | 4 ++-- doc/source/ray-air/examples/xgboost_starter.py | 4 ++-- doc/source/ray-air/getting-started.rst | 6 +++++- 4 files changed, 11 insertions(+), 7 deletions(-) diff --git a/doc/source/ray-air/examples/pytorch_tabular_starter.py b/doc/source/ray-air/examples/pytorch_tabular_starter.py index 4df2d99631a7..be01fba0d54d 100644 --- a/doc/source/ray-air/examples/pytorch_tabular_starter.py +++ b/doc/source/ray-air/examples/pytorch_tabular_starter.py @@ -98,8 +98,8 @@ def train_loop_per_worker(config): preprocessor=preprocessor, ) # Execute training. -result = trainer.fit() -print(f"Last result: {result.metrics}") +best_result = trainer.fit() +print(f"Last result: {best_result.metrics}") # Last result: {'loss': 0.6559339960416158, ...} # __air_pytorch_train_end__ diff --git a/doc/source/ray-air/examples/tf_tabular_starter.py b/doc/source/ray-air/examples/tf_tabular_starter.py index aa83346a76c0..5c7315d2c784 100644 --- a/doc/source/ray-air/examples/tf_tabular_starter.py +++ b/doc/source/ray-air/examples/tf_tabular_starter.py @@ -103,8 +103,8 @@ def train_loop_per_worker(config): preprocessor=preprocessor, ) -result = trainer.fit() -print(f"Last result: {result.metrics}") +best_result = trainer.fit() +print(f"Last result: {best_result.metrics}") # Last result: {'loss': 8.997025489807129, ...} # __air_tf_train_end__ diff --git a/doc/source/ray-air/examples/xgboost_starter.py b/doc/source/ray-air/examples/xgboost_starter.py index 7938072e5e2d..3925e1672b09 100644 --- a/doc/source/ray-air/examples/xgboost_starter.py +++ b/doc/source/ray-air/examples/xgboost_starter.py @@ -45,8 +45,8 @@ datasets={"train": train_dataset, "valid": valid_dataset}, preprocessor=preprocessor, ) -result = trainer.fit() -print(result.metrics) +best_result = trainer.fit() +print(best_result.metrics) # __air_xgb_train_end__ # __air_xgb_tuner_start__ diff --git a/doc/source/ray-air/getting-started.rst b/doc/source/ray-air/getting-started.rst index abb5fc29068a..75e94cbd2105 100644 --- a/doc/source/ray-air/getting-started.rst +++ b/doc/source/ray-air/getting-started.rst @@ -107,6 +107,8 @@ Then, we define a ``Preprocessor`` pipeline for our task: :start-after: __air_tf_preprocess_start__ :end-before: __air_tf_preprocess_end__ +.. _air-getting-started-training: + Training ~~~~~~~~ @@ -135,6 +137,8 @@ Train a model with a ``Trainer`` with common ML frameworks: :start-after: __air_tf_train_start__ :end-before: __air_tf_train_end__ +.. _air-getting-started-tuning: + Hyperparameter Tuning ~~~~~~~~~~~~~~~~~~~~~ @@ -173,7 +177,7 @@ Then use the ``Tuner`` to run the search: Batch Inference ~~~~~~~~~~~~~~~ -Use the trained model for scalable batch prediction with a ``BatchPredictor``. +After running the steps in :ref:`Training ` or :ref:`Tuning `, use the trained model for scalable batch prediction with a ``BatchPredictor``. .. tab-set:: From 0bf93358632fd9c8541c8ba7a970518915a74b4d Mon Sep 17 00:00:00 2001 From: Cindy Zhang Date: Mon, 8 May 2023 15:39:48 -0700 Subject: [PATCH 283/424] [serve] Add health checking for http proxy actors (#34944) Add health checking to HTTP Proxy Add no-op check_health() method to proxy actor - In controller, periodically call check_health() method - Before the first health check returns, status is STARTING - If an error occurs, set UNHEALTHY - If request times out, set UNHEALTHY --- .../modules/serve/tests/test_serve_agent.py | 75 +++++----- python/ray/serve/_private/common.py | 6 + python/ray/serve/_private/constants.py | 3 + python/ray/serve/_private/http_proxy.py | 11 +- python/ray/serve/_private/http_state.py | 130 ++++++++++++++++-- python/ray/serve/_private/utils.py | 2 +- python/ray/serve/controller.py | 3 + python/ray/serve/schema.py | 23 ++++ python/ray/serve/tests/test_http_state.py | 67 ++++++++- 9 files changed, 266 insertions(+), 54 deletions(-) diff --git a/dashboard/modules/serve/tests/test_serve_agent.py b/dashboard/modules/serve/tests/test_serve_agent.py index 7d7d43c335f1..03b1f63faf7d 100644 --- a/dashboard/modules/serve/tests/test_serve_agent.py +++ b/dashboard/modules/serve/tests/test_serve_agent.py @@ -1,4 +1,5 @@ import copy +import os import sys from typing import Dict @@ -13,7 +14,12 @@ from ray.serve._private.constants import SERVE_NAMESPACE, MULTI_APP_MIGRATION_MESSAGE from ray.serve.tests.conftest import * # noqa: F401 F403 from ray.serve.schema import ServeInstanceDetails -from ray.serve._private.common import ApplicationStatus, DeploymentStatus, ReplicaState +from ray.serve._private.common import ( + ApplicationStatus, + DeploymentStatus, + ReplicaState, + HTTPProxyStatus, +) from ray.serve._private.constants import ( SERVE_DEFAULT_APP_NAME, DEPLOYMENT_NAME_PREFIX_SEPARATOR, @@ -493,17 +499,29 @@ def test_get_serve_instance_details(ray_start_stop, f_deployment_options): "applications": [ { "name": "app1", - "route_prefix": "/app1", + "route_prefix": "/apple", "import_path": world_import_path, "deployments": [f_deployment_options], }, { "name": "app2", - "route_prefix": "/app2", + "route_prefix": "/banana", "import_path": fastapi_import_path, }, ], } + expected_values = { + "app1": { + "route_prefix": "/apple", + "docs_path": None, + "deployments": {"app1_f", "app1_BasicDriver"}, + }, + "app2": { + "route_prefix": "/banana", + "docs_path": "/my_docs", + "deployments": {"app2_FastAPIDeployment"}, + }, + } deploy_config_multi_app(config1) @@ -526,43 +544,28 @@ def applications_running(): assert serve_details.http_options.host == "127.0.0.1" assert serve_details.http_options.port == 8005 print("Confirmed fetched proxy location, host and port metadata correct.") + # Check HTTP Proxy statuses + for proxy in serve_details.http_proxies.values(): + assert proxy.status == HTTPProxyStatus.HEALTHY + assert os.path.exists("/tmp/ray/session_latest/logs" + proxy.log_file_path) + print("Checked HTTP Proxy details.") app_details = serve_details.applications + # CHECK: application details + for i, app in enumerate(["app1", "app2"]): + assert ( + app_details[app].deployed_app_config.dict(exclude_unset=True) + == config1["applications"][i] + ) + assert app_details[app].last_deployed_time_s > 0 + assert app_details[app].route_prefix == expected_values[app]["route_prefix"] + assert app_details[app].docs_path == expected_values[app]["docs_path"] - # CHECK: app configs are equal - assert ( - app_details["app1"].deployed_app_config.dict(exclude_unset=True) - == config1["applications"][0] - ) - assert ( - app_details["app2"].deployed_app_config.dict(exclude_unset=True) - == config1["applications"][1] - ) - print("Confirmed the deployed app configs from the fetched metadata is correct.") - - # CHECK: deployment timestamp - assert app_details["app1"].last_deployed_time_s > 0 - assert app_details["app2"].last_deployed_time_s > 0 - print("Confirmed deployment timestamps are nonzero.") - - # CHECK: docs path - assert app_details["app1"].docs_path is None - assert app_details["app2"].docs_path == "/my_docs" - print("Confirmed docs paths are correct.") - - # CHECK: all deployments are present - assert app_details["app1"].deployments.keys() == { - "app1_f", - "app1_BasicDriver", - } - assert app_details["app2"].deployments.keys() == { - "app2_FastAPIDeployment", - } - print("Metadata for all deployed deployments are present.") + # CHECK: all deployments are present + assert ( + app_details[app].deployments.keys() == expected_values[app]["deployments"] + ) - # CHECK: application details - for app in ["app1", "app2"]: - assert app_details[app].route_prefix == f"/{app}" for deployment in app_details[app].deployments.values(): assert deployment.status == DeploymentStatus.HEALTHY # Route prefix should be app level options eventually diff --git a/python/ray/serve/_private/common.py b/python/ray/serve/_private/common.py index 92b77468ac5f..e8d7a78c0b3c 100644 --- a/python/ray/serve/_private/common.py +++ b/python/ray/serve/_private/common.py @@ -365,3 +365,9 @@ class ServeDeployMode(str, Enum): UNSET = "UNSET" SINGLE_APP = "SINGLE_APP" MULTI_APP = "MULTI_APP" + + +class HTTPProxyStatus(str, Enum): + STARTING = "STARTING" + HEALTHY = "HEALTHY" + UNHEALTHY = "UNHEALTHY" diff --git a/python/ray/serve/_private/constants.py b/python/ray/serve/_private/constants.py index cf8e9a4fbf72..ea81d65e61ac 100644 --- a/python/ray/serve/_private/constants.py +++ b/python/ray/serve/_private/constants.py @@ -92,6 +92,9 @@ DEFAULT_HEALTH_CHECK_PERIOD_S = 10 DEFAULT_HEALTH_CHECK_TIMEOUT_S = 30 +# HTTP Proxy health check period +PROXY_HEALTH_CHECK_PERIOD_S = 10 + #: Number of times in a row that a replica must fail the health check before #: being marked unhealthy. REPLICA_HEALTH_CHECK_UNHEALTHY_THRESHOLD = 3 diff --git a/python/ray/serve/_private/http_proxy.py b/python/ray/serve/_private/http_proxy.py index 987357bf1105..51d00861030d 100644 --- a/python/ray/serve/_private/http_proxy.py +++ b/python/ray/serve/_private/http_proxy.py @@ -509,7 +509,10 @@ async def ready(self): return_when=asyncio.FIRST_COMPLETED, ) - # Return None, or re-throw the exception from self.running_task. + # Return log filepath, or re-throw the exception from self.running_task. + if self.setup_complete.is_set(): + return f"/serve/http_proxy_{ray.util.get_node_ip_address()}.log" + return await done_set.pop() async def block_until_endpoint_exists( @@ -549,3 +552,9 @@ async def run(self): self.setup_complete.set() await server.serve(sockets=[sock]) + + async def check_health(self): + """No-op method to check on the health of the HTTP Proxy. + Make sure the async event loop is not blocked. + """ + pass diff --git a/python/ray/serve/_private/http_state.py b/python/ray/serve/_private/http_state.py index a31b0c95dd2a..da8ed7ba620a 100644 --- a/python/ray/serve/_private/http_state.py +++ b/python/ray/serve/_private/http_state.py @@ -1,7 +1,8 @@ import asyncio import logging import random -from typing import Dict, List, Tuple +import time +from typing import Dict, List, Tuple, Optional import ray from ray.actor import ActorHandle @@ -14,17 +15,98 @@ SERVE_LOGGER_NAME, SERVE_PROXY_NAME, SERVE_NAMESPACE, + PROXY_HEALTH_CHECK_PERIOD_S, ) from ray.serve._private.http_proxy import HTTPProxyActor from ray.serve._private.utils import ( format_actor_name, get_all_node_ids, ) -from ray.serve._private.common import EndpointTag, NodeId +from ray.serve._private.common import EndpointTag, NodeId, HTTPProxyStatus +from ray.serve.schema import HTTPProxyDetails logger = logging.getLogger(SERVE_LOGGER_NAME) +class HTTPProxyState: + def __init__(self, actor_handle: ActorHandle, actor_name: str, node_ip: str): + self._actor_handle = actor_handle + self._actor_name = actor_name + self._node_ip = node_ip + self._actor_id = None + self._log_file_path = None + + self._ready_obj_ref = self._actor_handle.ready.remote() + self._status = HTTPProxyStatus.STARTING + self._health_check_obj_ref = None + self._last_health_check_time: float = 0 + + @property + def node_ip(self) -> str: + return self._node_ip + + @property + def actor_handle(self) -> ActorHandle: + return self._actor_handle + + @property + def actor_name(self) -> str: + return self._actor_name + + @property + def status(self) -> HTTPProxyStatus: + return self._status + + @property + def actor_id(self) -> Optional[str]: + return self._actor_handle._actor_id.hex() + + @property + def log_file_path(self) -> Optional[str]: + return self._log_file_path + + def update(self): + if self._status == HTTPProxyStatus.STARTING: + try: + finished, _ = ray.wait([self._ready_obj_ref], timeout=0) + if finished: + self._log_file_path = ray.get(finished[0]) + self._status = HTTPProxyStatus.HEALTHY + except Exception: + self._status = HTTPProxyStatus.UNHEALTHY + return + + # Perform periodic health checks + if self._health_check_obj_ref: + finished, _ = ray.wait([self._health_check_obj_ref], timeout=0) + if finished: + try: + ray.get(finished[0]) + self._status = HTTPProxyStatus.HEALTHY + except Exception as e: + logger.warning( + f"Health check for HTTP proxy {self._actor_name} failed: {e}" + ) + self._status = HTTPProxyStatus.UNHEALTHY + + self._health_check_obj_ref = None + + # If there's no active in-progress health check and it has been more than 10 + # seconds since the last health check, perform another health check + randomized_period_s = PROXY_HEALTH_CHECK_PERIOD_S * random.uniform(0.9, 1.1) + if time.time() - self._last_health_check_time > randomized_period_s: + # If the HTTP Proxy is still blocked, mark unhealthy + if self._health_check_obj_ref: + self._status = HTTPProxyStatus.UNHEALTHY + logger.warning( + f"Health check for HTTP Proxy {self._actor_name} took more than " + f"{PROXY_HEALTH_CHECK_PERIOD_S} seconds." + ) + + self._health_check_obj_ref = self._actor_handle.check_health.remote() + self._last_health_check_time = time.time() + + class HTTPState: """Manages all state for HTTP proxies in the system. @@ -48,8 +130,7 @@ def __init__( self._config = config else: self._config = HTTPOptions() - self._proxy_actors: Dict[NodeId, ActorHandle] = dict() - self._proxy_actor_names: Dict[NodeId, str] = dict() + self._proxy_states: Dict[NodeId, HTTPProxyState] = dict() self._head_node_id: str = head_node_id self._gcs_client = gcs_client @@ -68,14 +149,33 @@ def get_config(self): return self._config def get_http_proxy_handles(self) -> Dict[NodeId, ActorHandle]: - return self._proxy_actors + return { + node_id: state.actor_handle for node_id, state in self._proxy_states.items() + } def get_http_proxy_names(self) -> Dict[NodeId, str]: - return self._proxy_actor_names + return { + node_id: state.actor_name for node_id, state in self._proxy_states.items() + } + + def get_http_proxy_details(self) -> Dict[NodeId, HTTPProxyDetails]: + return { + node_id: HTTPProxyDetails( + node_id=node_id, + node_ip=state.node_ip, + actor_id=state.actor_id, + actor_name=state.actor_name, + status=state.status, + log_file_path=state.log_file_path, + ) + for node_id, state in self._proxy_states.items() + } def update(self): self._start_proxies_if_needed() self._stop_proxies_if_needed() + for proxy_state in self._proxy_states.values(): + proxy_state.update() def _get_target_nodes(self) -> List[Tuple[str, str]]: """Return the list of (node_id, ip_address) to deploy HTTP servers on.""" @@ -119,7 +219,7 @@ def _start_proxies_if_needed(self) -> None: """Start a proxy on every node if it doesn't already exist.""" for node_id, node_ip_address in self._get_target_nodes(): - if node_id in self._proxy_actors: + if node_id in self._proxy_states: continue name = format_actor_name(SERVE_PROXY_NAME, self._controller_name, node_id) @@ -153,22 +253,20 @@ def _start_proxies_if_needed(self) -> None: http_middlewares=self._config.middlewares, ) - self._proxy_actors[node_id] = proxy - self._proxy_actor_names[node_id] = name + self._proxy_states[node_id] = HTTPProxyState(proxy, name, node_ip_address) def _stop_proxies_if_needed(self) -> bool: """Removes proxy actors from any nodes that no longer exist.""" all_node_ids = {node_id for node_id, _ in get_all_node_ids(self._gcs_client)} to_stop = [] - for node_id in self._proxy_actors: + for node_id in self._proxy_states: if node_id not in all_node_ids: logger.info("Removing HTTP proxy on removed node '{}'.".format(node_id)) to_stop.append(node_id) for node_id in to_stop: - proxy = self._proxy_actors.pop(node_id) - del self._proxy_actor_names[node_id] - ray.kill(proxy, no_restart=True) + proxy = self._proxy_states.pop(node_id) + ray.kill(proxy.actor_handle, no_restart=True) async def ensure_http_route_exists(self, endpoint: EndpointTag, timeout_s: float): """Block until the route has been propagated to all HTTP proxies. @@ -177,7 +275,9 @@ async def ensure_http_route_exists(self, endpoint: EndpointTag, timeout_s: float """ await asyncio.gather( *[ - proxy.block_until_endpoint_exists.remote(endpoint, timeout_s=timeout_s) - for proxy in self._proxy_actors.values() + proxy.actor_handle.block_until_endpoint_exists.remote( + endpoint, timeout_s=timeout_s + ) + for proxy in self._proxy_states.values() ] ) diff --git a/python/ray/serve/_private/utils.py b/python/ray/serve/_private/utils.py index d15d9f7f4be2..456f57e8ae28 100644 --- a/python/ray/serve/_private/utils.py +++ b/python/ray/serve/_private/utils.py @@ -174,7 +174,7 @@ def get_all_node_ids(gcs_client) -> List[Tuple[str, str]]: """ nodes = gcs_client.get_all_node_info(timeout=RAY_GCS_RPC_TIMEOUT_S) node_ids = [ - (ray.NodeID.from_binary(node_id).hex(), node["node_name"]) + (ray.NodeID.from_binary(node_id).hex(), node["node_name"].decode("utf-8")) for (node_id, node) in nodes.items() if node["state"] == ray.core.generated.gcs_pb2.GcsNodeInfo.ALIVE ] diff --git a/python/ray/serve/controller.py b/python/ray/serve/controller.py index f6ef48102085..e27159d412cd 100644 --- a/python/ray/serve/controller.py +++ b/python/ray/serve/controller.py @@ -706,6 +706,9 @@ def get_serve_instance_details(self) -> Dict: host=http_config.host, port=http_config.port, ), + http_proxies=self.http_state.get_http_proxy_details() + if self.http_state + else None, deploy_mode=self.deploy_mode, applications=applications, ).dict(exclude_unset=True) diff --git a/python/ray/serve/schema.py b/python/ray/serve/schema.py index a72d622d49c1..5dd2612e783b 100644 --- a/python/ray/serve/schema.py +++ b/python/ray/serve/schema.py @@ -11,6 +11,7 @@ DeploymentInfo, ReplicaState, ServeDeployMode, + HTTPProxyStatus, ) from ray.serve.config import DeploymentMode from ray.serve._private.utils import DEFAULT, dict_keys_snake_to_camel_case @@ -758,6 +759,23 @@ def get_status_dict(self) -> Dict: ) +@PublicAPI(stability="alpha") +class HTTPProxyDetails(BaseModel): + node_id: str = Field(description="ID of the node that the HTTP Proxy is running on") + node_ip: str = Field( + description="IP address of the node that the HTTP Proxy is running on." + ) + actor_id: str = Field(description="ID of the HTTP Proxy actor.") + actor_name: str = Field(description="Name of the HTTP Proxy actor.") + status: HTTPProxyStatus = Field(description="Current status of the HTTP Proxy.") + log_file_path: Optional[str] = Field( + description=( + "The relative path to the log file for the replica actor from the ray logs " + "directory." + ) + ) + + @PublicAPI(stability="alpha") class ServeInstanceDetails(BaseModel, extra=Extra.forbid): """ @@ -776,6 +794,11 @@ class ServeInstanceDetails(BaseModel, extra=Extra.forbid): ), ) http_options: Optional[HTTPOptionsSchema] = Field(description="HTTP Proxy options.") + http_proxies: Optional[Dict[str, HTTPProxyDetails]] = Field( + description=( + "Mapping from node_id to details about the HTTP Proxy running on that node." + ) + ) deploy_mode: ServeDeployMode = Field( description=( "Whether a single-app config of format ServeApplicationSchema or multi-app " diff --git a/python/ray/serve/tests/test_http_state.py b/python/ray/serve/tests/test_http_state.py index 957a79f95d1b..83e2c14bb037 100644 --- a/python/ray/serve/tests/test_http_state.py +++ b/python/ray/serve/tests/test_http_state.py @@ -1,9 +1,13 @@ +from functools import partial from unittest.mock import patch import pytest +import ray +from ray._private.test_utils import SignalActor, wait_for_condition from ray.serve.config import DeploymentMode, HTTPOptions -from ray.serve._private.http_state import HTTPState +from ray.serve._private.common import HTTPProxyStatus +from ray.serve._private.http_state import HTTPState, HTTPProxyState def test_node_selection(): @@ -64,6 +68,67 @@ def _make_http_state(http_options): assert set(another_seed) != set(selected_nodes) +def test_http_proxy_healthy(): + ray.init() + signal = SignalActor.remote() + + @ray.remote(num_cpus=0) + class MockHTTPProxyActor: + async def ready(self): + await signal.wait.remote() + return "mock_actor_id", "mock_log_file_path" + + async def check_health(self): + pass + + proxy = MockHTTPProxyActor.options(lifetime="detached").remote() + state = HTTPProxyState(proxy, "alice", "mock_node_ip") + assert state.status == HTTPProxyStatus.STARTING + + state.update() + assert state.status == HTTPProxyStatus.STARTING + + signal.send.remote() + wait_for_condition( + lambda: state.update() or state.status == HTTPProxyStatus.HEALTHY, timeout=2 + ) + ray.shutdown() + + +def test_http_proxy_unhealthy(): + ray.init() + signal = SignalActor.remote() + + @ray.remote(num_cpus=0) + class MockHTTPProxyActor: + async def ready(self): + return "mock_actor_id", "mock_log_file_path" + + async def check_health(self): + await signal.wait.remote() + + with patch("ray.serve._private.http_state.PROXY_HEALTH_CHECK_PERIOD_S", 1): + proxy = MockHTTPProxyActor.options(lifetime="detached").remote() + state = HTTPProxyState(proxy, "alice", "mock_node_ip") + assert state.status == HTTPProxyStatus.STARTING + + def check_proxy(status): + state.update() + return state.status == status + + # Proxy actor is ready, so status should transition STARTING -> HEALTHY + wait_for_condition(partial(check_proxy, HTTPProxyStatus.HEALTHY), timeout=2) + + # Health check is blocked, so status should transition HEALTHY -> UNHEALTHY + wait_for_condition(partial(check_proxy, HTTPProxyStatus.UNHEALTHY), timeout=2) + + # Unblock health check, so status should transition UNHEALTHY -> HEALTHY + signal.send.remote() + wait_for_condition(partial(check_proxy, HTTPProxyStatus.HEALTHY), timeout=2) + + ray.shutdown() + + if __name__ == "__main__": import sys From 22579a5e2e2bff2fb0a775ffe2f3fb036b076a21 Mon Sep 17 00:00:00 2001 From: Jonathan Carter <42900403+joncarter1@users.noreply.github.com> Date: Mon, 8 May 2023 23:51:27 +0100 Subject: [PATCH 284/424] [Jobs] HTTPs support for the job client (#33542) This PR enables HTTPS support within the Ray JobSubmissionClient. e.g. client = JobSubmissionClient(address, verify="/path/to/cert") client.submit_job(...) Related issue number Closes #30888 --- dashboard/modules/dashboard_sdk.py | 25 +++++++++- dashboard/modules/job/cli.py | 37 +++++++++------ dashboard/modules/job/cli_utils.py | 46 +++++++++++++++++++ dashboard/modules/job/sdk.py | 9 +++- dashboard/modules/job/tests/test_cli.py | 33 +++++++++++-- .../modules/job/tests/test_http_job_server.py | 1 + .../job/tests/test_https_connection.py | 46 +++++++++++++++++++ .../job-submission/sdk.rst | 21 ++++++++- python/requirements_test.txt | 2 + 9 files changed, 197 insertions(+), 23 deletions(-) create mode 100644 dashboard/modules/job/cli_utils.py create mode 100644 dashboard/modules/job/tests/test_https_connection.py diff --git a/dashboard/modules/dashboard_sdk.py b/dashboard/modules/dashboard_sdk.py index 946736158a51..8d7691e7ecf1 100644 --- a/dashboard/modules/dashboard_sdk.py +++ b/dashboard/modules/dashboard_sdk.py @@ -2,12 +2,14 @@ import importlib import logging import json +import os import yaml from pathlib import Path import tempfile -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Union from pkg_resources import packaging import ray +import ssl try: import requests @@ -202,8 +204,8 @@ def __init__( cookies: Optional[Dict[str, Any]] = None, metadata: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, Any]] = None, + verify: Optional[Union[str, bool]] = True, ): - # Remove any trailing slashes if address is not None and address.endswith("/"): address = address.rstrip("/") @@ -221,6 +223,24 @@ def __init__( # Headers used for all requests sent to job server, optional and only # needed for cases like authentication to remote cluster. self._headers = cluster_info.headers + # Set SSL verify parameter for the requests library and create an ssl_context + # object when needed for the aiohttp library. + self._verify = verify + if isinstance(self._verify, str): + if os.path.isdir(self._verify): + cafile, capath = None, self._verify + elif os.path.isfile(self._verify): + cafile, capath = self._verify, None + else: + raise FileNotFoundError( + f"Path to CA certificates: '{self._verify}', does not exist." + ) + self._ssl_context = ssl.create_default_context(cafile=cafile, capath=capath) + else: + if self._verify is False: + self._ssl_context = False + else: + self._ssl_context = None def _check_connection_and_version( self, min_version: str = "1.9", version_error_message: str = None @@ -287,6 +307,7 @@ def _do_request( data=data, json=json_data, headers=self._headers, + verify=self._verify, **kwargs, ) diff --git a/dashboard/modules/job/cli.py b/dashboard/modules/job/cli.py index 3b3d35423208..69584e55efe7 100644 --- a/dashboard/modules/job/cli.py +++ b/dashboard/modules/job/cli.py @@ -12,14 +12,17 @@ from ray.autoscaler._private.cli_logger import add_click_logging_options, cf, cli_logger from ray.dashboard.modules.dashboard_sdk import parse_runtime_env_args from ray.job_submission import JobStatus, JobSubmissionClient +from ray.dashboard.modules.job.cli_utils import add_common_job_options from ray.util.annotations import PublicAPI from ray._private.utils import parse_resources_json def _get_sdk_client( - address: Optional[str], create_cluster_if_needed: bool = False + address: Optional[str], + create_cluster_if_needed: bool = False, + verify: Union[bool, str] = True, ) -> JobSubmissionClient: - client = JobSubmissionClient(address, create_cluster_if_needed) + client = JobSubmissionClient(address, create_cluster_if_needed, verify=verify) client_address = client.get_address() cli_logger.labeled_value("Job submission server address", client_address) return client @@ -152,6 +155,7 @@ def job_cli_group(): default=False, help="If set, will not stream logs and wait for the job to exit.", ) +@add_common_job_options @add_click_logging_options @click.argument("entrypoint", nargs=-1, required=True, type=click.UNPROCESSED) @PublicAPI @@ -167,13 +171,13 @@ def submit( entrypoint_num_gpus: Optional[Union[int, float]], entrypoint_resources: Optional[str], no_wait: bool, + verify: Union[bool, str], ): """Submits a job to be run on the cluster. Example: `ray job submit -- python my_script.py --arg=val` """ - if job_id: cli_logger.warning( "--job-id option is deprecated. Please use --submission-id instead." @@ -201,7 +205,7 @@ def submit( no_wait=no_wait, ) - client = _get_sdk_client(address, create_cluster_if_needed=True) + client = _get_sdk_client(address, create_cluster_if_needed=True, verify=verify) final_runtime_env = parse_runtime_env_args( runtime_env=runtime_env, @@ -261,15 +265,16 @@ def submit( ), ) @click.argument("job-id", type=str) +@add_common_job_options @add_click_logging_options @PublicAPI(stability="stable") -def status(address: Optional[str], job_id: str): +def status(address: Optional[str], job_id: str, verify: Union[bool, str]): """Queries for the current status of a job. Example: `ray job status ` """ - client = _get_sdk_client(address) + client = _get_sdk_client(address, verify=verify) _log_job_status(client, job_id) @@ -292,15 +297,16 @@ def status(address: Optional[str], job_id: str): help="If set, will not wait for the job to exit.", ) @click.argument("job-id", type=str) +@add_common_job_options @add_click_logging_options @PublicAPI(stability="stable") -def stop(address: Optional[str], no_wait: bool, job_id: str): +def stop(address: Optional[str], no_wait: bool, job_id: str, verify: Union[bool, str]): """Attempts to stop a job. Example: `ray job stop ` """ - client = _get_sdk_client(address) + client = _get_sdk_client(address, verify=verify) cli_logger.print(f"Attempting to stop job '{job_id}'") client.stop_job(job_id) @@ -333,9 +339,10 @@ def stop(address: Optional[str], no_wait: bool, job_id: str): ), ) @click.argument("job-id", type=str) +@add_common_job_options @add_click_logging_options @PublicAPI(stability="alpha") -def delete(address: Optional[str], job_id: str): +def delete(address: Optional[str], job_id: str, verify: Union[bool, str]): """Deletes a stopped job and its associated data from memory. Only supported for jobs that are already in a terminal state. @@ -347,7 +354,7 @@ def delete(address: Optional[str], job_id: str): Example: ray job delete """ - client = _get_sdk_client(address) + client = _get_sdk_client(address, verify=verify) client.delete_job(job_id) cli_logger.print(f"Job '{job_id}' deleted successfully") @@ -372,15 +379,16 @@ def delete(address: Optional[str], job_id: str): default=False, help="If set, follow the logs (like `tail -f`).", ) +@add_common_job_options @add_click_logging_options @PublicAPI(stability="stable") -def logs(address: Optional[str], job_id: str, follow: bool): +def logs(address: Optional[str], job_id: str, follow: bool, verify: Union[bool, str]): """Gets the logs of a job. Example: `ray job logs ` """ - client = _get_sdk_client(address) + client = _get_sdk_client(address, verify=verify) sdk_version = client.get_version() # sdk version 0 did not have log streaming if follow: @@ -409,15 +417,16 @@ def logs(address: Optional[str], job_id: str, follow: bool): "using the RAY_ADDRESS environment variable." ), ) +@add_common_job_options @add_click_logging_options @PublicAPI(stability="stable") -def list(address: Optional[str]): +def list(address: Optional[str], verify: Union[bool, str]): """Lists all running jobs and their information. Example: `ray job list` """ - client = _get_sdk_client(address) + client = _get_sdk_client(address, verify=verify) # Set no_format to True because the logs may have unescaped "{" and "}" # and the CLILogger calls str.format(). cli_logger.print(pprint.pformat(client.list_jobs()), no_format=True) diff --git a/dashboard/modules/job/cli_utils.py b/dashboard/modules/job/cli_utils.py new file mode 100644 index 000000000000..0e2afe41d652 --- /dev/null +++ b/dashboard/modules/job/cli_utils.py @@ -0,0 +1,46 @@ +from typing import Union + +import click +import functools + + +def bool_cast(string: str) -> Union[bool, str]: + """Cast a string to a boolean if possible, otherwise return the string.""" + if string.lower() == "true" or string == "1": + return True + elif string.lower() == "false" or string == "0": + return False + else: + return string + + +class BoolOrStringParam(click.ParamType): + """A click parameter that can be either a boolean or a string.""" + + name = "BOOL | TEXT" + + def convert(self, value, param, ctx): + if isinstance(value, bool): + return value + else: + return bool_cast(value) + + +def add_common_job_options(func): + """Decorator for adding CLI flags shared by all `ray job` commands.""" + + @click.option( + "--verify", + default=True, + show_default=True, + type=BoolOrStringParam(), + help=( + "Boolean indication to verify the server's TLS certificate or a path to" + " a file or directory of trusted certificates." + ), + ) + @functools.wraps(func) + def wrapper(*args, **kwargs): + return func(*args, **kwargs) + + return wrapper diff --git a/dashboard/modules/job/sdk.py b/dashboard/modules/job/sdk.py index e8a8e00443da..9c7896ce3cee 100644 --- a/dashboard/modules/job/sdk.py +++ b/dashboard/modules/job/sdk.py @@ -60,6 +60,8 @@ class JobSubmissionClient(SubmissionClient): via a simple dict update. headers: Headers to use when sending requests to the HTTP job server, used for cases like authentication to a remote cluster. + verify: Boolean indication to verify the server's TLS certificate or a path to + a file or directory of trusted certificates. Default: True. """ def __init__( @@ -69,6 +71,7 @@ def __init__( cookies: Optional[Dict[str, Any]] = None, metadata: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, Any]] = None, + verify: Optional[Union[str, bool]] = True, ): self._client_ray_version = ray.__version__ """Initialize a JobSubmissionClient and check the connection to the cluster.""" @@ -77,7 +80,6 @@ def __init__( "The Ray jobs CLI & SDK require the ray[default] " "installation: `pip install 'ray[default]'`" ) - # Check types of arguments if address is not None and not isinstance(address, str): raise TypeError(f"address must be a string, got {type(address)}") @@ -92,6 +94,8 @@ def __init__( raise TypeError(f"metadata must be a dict, got {type(metadata)}") if headers is not None and not isinstance(headers, dict): raise TypeError(f"headers must be a dict, got {type(headers)}") + if not (isinstance(verify, str) or isinstance(verify, bool)): + raise TypeError(f"verify must be a str or bool, got {type(verify)}") api_server_url = get_address_for_submission_client(address) @@ -101,6 +105,7 @@ def __init__( cookies=cookies, metadata=metadata, headers=headers, + verify=verify, ) self._check_connection_and_version( min_version="1.9", @@ -454,7 +459,7 @@ async def tail_job_logs(self, job_id: str) -> Iterator[str]: cookies=self._cookies, headers=self._headers ) as session: ws = await session.ws_connect( - f"{self._address}/api/jobs/{job_id}/logs/tail" + f"{self._address}/api/jobs/{job_id}/logs/tail", ssl=self._ssl_context ) while True: diff --git a/dashboard/modules/job/tests/test_cli.py b/dashboard/modules/job/tests/test_cli.py index 4d030c0997a6..c25367421086 100644 --- a/dashboard/modules/job/tests/test_cli.py +++ b/dashboard/modules/job/tests/test_cli.py @@ -89,20 +89,22 @@ def _job_cli_group_test_address(mock_sdk_client, cmd, *args): create_cluster_if_needed = True if cmd == "submit" else False # Test passing address via command line. result = runner.invoke(job_cli_group, [cmd, "--address=arg_addr", *args]) - mock_sdk_client.assert_called_with("arg_addr", create_cluster_if_needed) + mock_sdk_client.assert_called_with( + "arg_addr", create_cluster_if_needed, verify=True + ) with pytest.raises(AssertionError): - mock_sdk_client.assert_called_with("some_other_addr", True) + mock_sdk_client.assert_called_with("some_other_addr", True, verify=True) check_exit_code(result, 0) # Test passing address via env var. with set_env_var("RAY_ADDRESS", "env_addr"): result = runner.invoke(job_cli_group, [cmd, *args]) check_exit_code(result, 0) # RAY_ADDRESS is read inside the SDK client. - mock_sdk_client.assert_called_with(None, create_cluster_if_needed) + mock_sdk_client.assert_called_with(None, create_cluster_if_needed, verify=True) # Test passing no address. result = runner.invoke(job_cli_group, [cmd, *args]) check_exit_code(result, 0) - mock_sdk_client.assert_called_with(None, create_cluster_if_needed) + mock_sdk_client.assert_called_with(None, create_cluster_if_needed, verify=True) class TestList: @@ -390,6 +392,29 @@ def test_entrypoint_resources_invalid_json(self, mock_sdk_client): assert result.exit_code == 1 assert "not a valid JSON string" in result.output + @pytest.mark.parametrize( + "cli_val, verify_param", + [ + ("True", True), + ("true", True), + ("1", True), + ("False", False), + ("false", False), + ("0", False), + ("a/rel/path", "a/rel/path"), + ("/an/abs/path", "/an/abs/path"), + ], + ) + def test_entrypoint_verify(self, mock_sdk_client, cli_val, verify_param): + runner = CliRunner() + with set_env_var("RAY_ADDRESS", "env_addr"): + result = runner.invoke( + job_cli_group, + ["submit", f"--verify={cli_val}", "--", "echo hello"], + ) + assert result.exit_code == 0 + mock_sdk_client.assert_called_with(None, True, verify=verify_param) + class TestDelete: def test_address(self, mock_sdk_client): diff --git a/dashboard/modules/job/tests/test_http_job_server.py b/dashboard/modules/job/tests/test_http_job_server.py index 0351a9f7773d..eff548dd2c19 100644 --- a/dashboard/modules/job/tests/test_http_job_server.py +++ b/dashboard/modules/job/tests/test_http_job_server.py @@ -623,6 +623,7 @@ def test_request_headers(job_sdk_client): data=None, json={"entrypoint": "ls"}, headers={"Connection": "keep-alive", "Authorization": "TOK:"}, + verify=True, ) diff --git a/dashboard/modules/job/tests/test_https_connection.py b/dashboard/modules/job/tests/test_https_connection.py new file mode 100644 index 000000000000..90e1ed3e9cb1 --- /dev/null +++ b/dashboard/modules/job/tests/test_https_connection.py @@ -0,0 +1,46 @@ +import pytest +import ssl +import sys +import trustme + +import ray +from ray.job_submission import JobSubmissionClient + + +@pytest.fixture(scope="session") +def ca(): + return trustme.CA() + + +@pytest.fixture(scope="session") +def httpserver_ssl_context(ca): + context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) + localhost_cert = ca.issue_cert("localhost") + localhost_cert.configure_cert(context) + return context + + +@pytest.fixture(scope="session") +def httpclient_ssl_context(ca): + with ca.cert_pem.tempfile() as ca_temp_path: + return ssl.create_default_context(cafile=ca_temp_path) + + +def test_mock_https_connection(httpserver, ca): + """Test connections to a mock HTTPS job submission server.""" + httpserver.expect_request("/api/version").respond_with_json( + {"ray_version": ray.__version__} + ) + mock_url = httpserver.url_for("/") + # Connection without SSL certificate should fail + with pytest.raises(ConnectionError): + JobSubmissionClient(mock_url) + # Connecton with SSL verification skipped should succeed + JobSubmissionClient(mock_url, verify=False) + # Connection with SSL verification should succeed + with ca.cert_pem.tempfile() as ca_temp_path: + JobSubmissionClient(mock_url, verify=ca_temp_path) + + +if __name__ == "__main__": + sys.exit(pytest.main(["-v", __file__])) diff --git a/doc/source/cluster/running-applications/job-submission/sdk.rst b/doc/source/cluster/running-applications/job-submission/sdk.rst index 681275b5c597..c59e9241a690 100644 --- a/doc/source/cluster/running-applications/job-submission/sdk.rst +++ b/doc/source/cluster/running-applications/job-submission/sdk.rst @@ -223,4 +223,23 @@ To be precise, the environment variable ``CUDA_VISIBLE_DEVICES`` will not be set .. note:: - By default, 0 CPUs and 0 GPUs are reserved for the entrypoint script. \ No newline at end of file + By default, 0 CPUs and 0 GPUs are reserved for the entrypoint script. + + +Client Configuration +-------------------------------- + +Additional client connection options, such as custom HTTP headers and cookies, can be passed to the ``JobSubmissionClient`` class. +A full list of options can be found in the :ref:`API Reference `. + +TLS Verification +~~~~~~~~~ +By default, any HTTPS client connections will be verified using system certificates found by the underlying ``requests`` and ``aiohttp`` libraries. +The ``verify`` parameter can be set to override this behavior. For example: + +.. code-block:: python + + client = JobSubmissionClient("https://", verify="/path/to/cert.pem") + +will use the certificate found at ``/path/to/cert.pem`` to verify the job server's certificate. +Certificate verification can be disabled by setting the ``verify`` parameter to ``False``. \ No newline at end of file diff --git a/python/requirements_test.txt b/python/requirements_test.txt index 2a6e27c02ece..96b6fc6e98c7 100644 --- a/python/requirements_test.txt +++ b/python/requirements_test.txt @@ -59,6 +59,7 @@ pymongo==4.3.2 pyspark==3.3.1 pytest==7.0.1 pytest-asyncio==0.16.0 +pytest-httpserver==1.0.6 pytest-rerunfailures==10.2 pytest-sugar==0.9.5 pytest-lazy-fixture==0.6.3 @@ -68,6 +69,7 @@ redis==4.4.2 scikit-learn==1.0.2; python_version < '3.11' smart_open[s3]==6.2.0 tqdm==4.64.1 +trustme==0.9.0 testfixtures==7.0.0 werkzeug==2.1.2 xlrd==2.0.1 From fee75582b1cef0b1d67acb00f5433a971794ec13 Mon Sep 17 00:00:00 2001 From: Eitan Adler Date: Mon, 8 May 2023 16:35:56 -0700 Subject: [PATCH 285/424] [docs] remove of some some duplicated words (#35146) ## Problem We duplicate some words in documentation. ## Solution Remove them --- doc/source/ray-air/examples/torch_incremental_learning.ipynb | 2 +- doc/source/ray-observability/ray-metrics.rst | 2 +- doc/source/rllib/rllib-replay-buffers.rst | 2 +- doc/source/tune/examples/optuna_example.ipynb | 2 +- doc/source/tune/examples/sigopt_example.ipynb | 2 +- doc/source/tune/examples/skopt_example.ipynb | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/source/ray-air/examples/torch_incremental_learning.ipynb b/doc/source/ray-air/examples/torch_incremental_learning.ipynb index 21fdbf6b831b..7e51e56fe6d3 100644 --- a/doc/source/ray-air/examples/torch_incremental_learning.ipynb +++ b/doc/source/ray-air/examples/torch_incremental_learning.ipynb @@ -1471,7 +1471,7 @@ "\n", "We have now incrementally trained our simple multi-layer perceptron. Let's compare the incrementally trained model via fine tuning against a model that is trained on all the tasks up front.\n", "\n", - "Since we are using a naive fine-tuning strategy, we should expect that our incrementally trained model will perform worse than the the one that is fully trained! However, there's various other strategies that have been developed and are actively being researched to improve accuracy for incremental training. And overall, incremental/continual learning allows you to train in many real world settings where the entire dataset is not available up front, but new data is arriving at a relatively high rate." + "Since we are using a naive fine-tuning strategy, we should expect that our incrementally trained model will perform worse than the one that is fully trained! However, there's various other strategies that have been developed and are actively being researched to improve accuracy for incremental training. And overall, incremental/continual learning allows you to train in many real world settings where the entire dataset is not available up front, but new data is arriving at a relatively high rate." ] }, { diff --git a/doc/source/ray-observability/ray-metrics.rst b/doc/source/ray-observability/ray-metrics.rst index 7b22fd9ff01a..a4efcdc13989 100644 --- a/doc/source/ray-observability/ray-metrics.rst +++ b/doc/source/ray-observability/ray-metrics.rst @@ -23,7 +23,7 @@ Ray exposes its metrics in Prometheus format. This allows us to easily scrape th First, `download Prometheus `_. Make sure to download the correct binary for your operating system. (Ex: darwin for mac osx) -Then, unzip the the archive into a local directory using the following command. +Then, unzip the archive into a local directory using the following command. .. code-block:: bash diff --git a/doc/source/rllib/rllib-replay-buffers.rst b/doc/source/rllib/rllib-replay-buffers.rst index 5ce3621d27a6..b3d709ab8204 100644 --- a/doc/source/rllib/rllib-replay-buffers.rst +++ b/doc/source/rllib/rllib-replay-buffers.rst @@ -36,7 +36,7 @@ Replay Buffers in RLlib RLlib comes with a set of extendable replay buffers built in. All the of them support the two basic methods ``add()`` and ``sample()``. We provide a base :py:class:`~ray.rllib.utils.replay_buffers.replay_buffer.ReplayBuffer` class from which you can build your own buffer. In most algorithms, we require :py:class:`~ray.rllib.utils.replay_buffers.multi_agent_replay_buffer.MultiAgentReplayBuffer`\s. -This is because we want them to generalize to the the multi-agent case. Therefore, these buffer's ``add()`` and ``sample()`` methods require a ``policy_id`` to handle experiences per policy. +This is because we want them to generalize to the multi-agent case. Therefore, these buffer's ``add()`` and ``sample()`` methods require a ``policy_id`` to handle experiences per policy. Have a look at the :py:class:`~ray.rllib.utils.replay_buffers.multi_agent_replay_buffer.MultiAgentReplayBuffer` to get a sense of how it extends our base class. You can find buffer types and arguments to modify their behaviour as part of RLlib's default parameters. They are part of the ``replay_buffer_config``. diff --git a/doc/source/tune/examples/optuna_example.ipynb b/doc/source/tune/examples/optuna_example.ipynb index d020da9bbd5f..2b2d7f36fd53 100644 --- a/doc/source/tune/examples/optuna_example.ipynb +++ b/doc/source/tune/examples/optuna_example.ipynb @@ -264,7 +264,7 @@ "id": "4287fa79", "metadata": {}, "source": [ - "We also constrain the the number of concurrent trials to `4` with a `ConcurrencyLimiter`." + "We also constrain the number of concurrent trials to `4` with a `ConcurrencyLimiter`." ] }, { diff --git a/doc/source/tune/examples/sigopt_example.ipynb b/doc/source/tune/examples/sigopt_example.ipynb index 4d884dbb745c..ac478b0a3547 100644 --- a/doc/source/tune/examples/sigopt_example.ipynb +++ b/doc/source/tune/examples/sigopt_example.ipynb @@ -375,7 +375,7 @@ "id": "3d8441a6", "metadata": {}, "source": [ - "And here are they hyperparameters found to minimize the the objective on average." + "And here are they hyperparameters found to minimize the objective on average." ] }, { diff --git a/doc/source/tune/examples/skopt_example.ipynb b/doc/source/tune/examples/skopt_example.ipynb index 40a5f97ffd33..84e8a7741460 100644 --- a/doc/source/tune/examples/skopt_example.ipynb +++ b/doc/source/tune/examples/skopt_example.ipynb @@ -225,7 +225,7 @@ "id": "2892b243", "metadata": {}, "source": [ - "The search algorithm is instantiated from the `SkOptSearch` class. We also constrain the the number of concurrent trials to `4` with a `ConcurrencyLimiter`." + "The search algorithm is instantiated from the `SkOptSearch` class. We also constrain the number of concurrent trials to `4` with a `ConcurrencyLimiter`." ] }, { From dd83099451b2298b287873605c3c18e557233324 Mon Sep 17 00:00:00 2001 From: Amog Kamsetty Date: Mon, 8 May 2023 17:05:23 -0700 Subject: [PATCH 286/424] [ML] Update codeowners for `requirements_ml_docker.txt` and `requirements_dl.txt` (#35147) Add @krfricke as codeowner for requirements file changes. --------- Signed-off-by: amogkam --- .github/CODEOWNERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 0036840885d1..0a630e281eda 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -103,8 +103,8 @@ /doc/source/ray-air/ @richardliaw @gjoliver @krfricke @xwjiang2010 @amogkam @matthewdeng @Yard1 @maxpumperla @ray-project/ray-docs # ML Docker Dependencies -/python/requirements/ml/requirements_dl.txt @amogkam @sven1977 @richardliaw @matthewdeng -/python/requirements/ml/requirements_ml_docker.txt @amogkam @sven1977 @richardliaw @matthewdeng +/python/requirements/ml/requirements_dl.txt @amogkam @krfricke @richardliaw @matthewdeng +/python/requirements/ml/requirements_ml_docker.txt @amogkam @krfricke @richardliaw @matthewdeng # Ray symbol export /src/ray/ray_version_script.lds @iycheng @ericl @scv119 From 637fc51a58ac3397ac1611cd45d9a00fea3b0c94 Mon Sep 17 00:00:00 2001 From: Chao Wang <125417081+chaowanggg@users.noreply.github.com> Date: Mon, 8 May 2023 18:00:09 -0700 Subject: [PATCH 287/424] [Overview][Serve] Add Recent Serve Applications Card (#34642) Expected Results: Create a Recent Serve Applications card: We have a Serve page with a table containing information about the Application name, Import Path, and Status. We will create a card based on this information and the styles of the Recent Jobs Card. Different icons to show: https://files.slack.com/files-pri/TKC2KFWG3-F055XEKED98/screen_shot_2023-05-02_at_12.01.36_pm.png --- .../src/common/ServeStatus.component.test.tsx | 60 ++++++++ dashboard/client/src/common/ServeStatus.tsx | 77 ++++++++++ .../src/components/AutoscalerStatusCards.tsx | 94 ++++++++++++ .../client/src/components/ListItemCard.tsx | 136 ++++++++++++++++++ dashboard/client/src/pages/job/JobDetail.tsx | 88 ++---------- .../src/pages/overview/OverviewPage.tsx | 36 ++++- .../pages/overview/cards/RecentJobsCard.tsx | 130 ++++------------- .../cards/RecentServeCard.component.test.tsx | 83 +++++++++++ .../pages/overview/cards/RecentServeCard.tsx | 53 +++++++ .../serve/ServeApplicationDetailPage.tsx | 10 +- .../src/pages/serve/ServeApplicationRow.tsx | 14 +- .../src/pages/serve/mockServeApplication.ts | 63 ++++++++ dashboard/client/src/type/serve.ts | 2 +- 13 files changed, 658 insertions(+), 188 deletions(-) create mode 100644 dashboard/client/src/common/ServeStatus.component.test.tsx create mode 100644 dashboard/client/src/common/ServeStatus.tsx create mode 100644 dashboard/client/src/components/AutoscalerStatusCards.tsx create mode 100644 dashboard/client/src/components/ListItemCard.tsx create mode 100644 dashboard/client/src/pages/overview/cards/RecentServeCard.component.test.tsx create mode 100644 dashboard/client/src/pages/overview/cards/RecentServeCard.tsx create mode 100644 dashboard/client/src/pages/serve/mockServeApplication.ts diff --git a/dashboard/client/src/common/ServeStatus.component.test.tsx b/dashboard/client/src/common/ServeStatus.component.test.tsx new file mode 100644 index 000000000000..5436f583e02f --- /dev/null +++ b/dashboard/client/src/common/ServeStatus.component.test.tsx @@ -0,0 +1,60 @@ +import { render, screen } from "@testing-library/react"; +import React from "react"; +import { ServeApplication, ServeApplicationStatus } from "../type/serve"; +import { ServeStatusIcon } from "./ServeStatus"; + +const APP: ServeApplication = { + name: "MyServeApp", + route_prefix: "/my-serve-app", + docs_path: null, + status: ServeApplicationStatus.RUNNING, + message: "", + last_deployed_time_s: 1682029771.0748637, + deployed_app_config: null, + deployments: {}, +}; + +describe("ServeStatusIcon", () => { + it("renders RUNNING status", async () => { + render(); + + await screen.findByTestId("serve-status-icon"); + + const icon = screen.getByTestId("serve-status-icon"); + const classList = icon.getAttribute("class"); + expect(classList).toContain("colorSuccess"); + }); + + it("renders NOT_STARTED status", async () => { + render( + , + ); + + await screen.findByTestId("serve-status-icon"); + + expect(screen.queryByTestId("serve-status-icon")).not.toHaveClass( + "colorSuccess", + ); + expect(screen.queryByTestId("serve-status-icon")).not.toHaveClass( + "colorError", + ); + }); + + it("renders DEPLOY_FAILED status", async () => { + render( + , + ); + + await screen.findByTestId("serve-status-icon"); + + const icon = screen.getByTestId("serve-status-icon"); + const classList = icon.getAttribute("class"); + expect(classList).toContain("colorError"); + }); +}); diff --git a/dashboard/client/src/common/ServeStatus.tsx b/dashboard/client/src/common/ServeStatus.tsx new file mode 100644 index 000000000000..dd4ebad48889 --- /dev/null +++ b/dashboard/client/src/common/ServeStatus.tsx @@ -0,0 +1,77 @@ +import { createStyles, makeStyles } from "@material-ui/core"; +import classNames from "classnames"; +import React from "react"; +import { + RiCloseCircleFill, + RiRecordCircleFill, + RiStopCircleFill, +} from "react-icons/ri"; +import { ServeApplication } from "../type/serve"; +import { JobRunningIcon } from "./JobStatus"; +import { ClassNameProps } from "./props"; + +type ServeStatusIconProps = { + app: ServeApplication; + small: boolean; +} & ClassNameProps; + +const useServeStatusIconStyles = makeStyles((theme) => + createStyles({ + icon: { + width: 20, + height: 20, + marginRight: 8, + }, + iconSmall: { + width: 16, + height: 16, + }, + colorSuccess: { + color: theme.palette.success.main, + }, + colorError: { + color: theme.palette.error.main, + }, + }), +); + +export const ServeStatusIcon = ({ + app, + small, + className, +}: ServeStatusIconProps) => { + const classes = useServeStatusIconStyles(); + + switch (app.status) { + case "RUNNING": + return ( + + ); + case "NOT_STARTED": + return ( + + ); + case "DEPLOY_FAILED": + return ( + + ); + default: + // DEPLOYING || DELETEING + return ( + + ); + } +}; diff --git a/dashboard/client/src/components/AutoscalerStatusCards.tsx b/dashboard/client/src/components/AutoscalerStatusCards.tsx new file mode 100644 index 000000000000..887c192ae36b --- /dev/null +++ b/dashboard/client/src/components/AutoscalerStatusCards.tsx @@ -0,0 +1,94 @@ +import { Box, Typography } from "@material-ui/core"; +import React from "react"; +import { RayStatusResp } from "../service/status"; + +const formatNodeStatus = (cluster_status: string) => { + // ==== auto scaling status + // Node status + // .... + // Resources + // .... + const sections = cluster_status.split("Resources"); + return formatClusterStatus( + "Node Status", + sections[0].split("Node status")[1], + ); +}; + +const formatResourcesStatus = (cluster_status: string) => { + // ==== auto scaling status + // Node status + // .... + // Resources + // .... + const sections = cluster_status.split("Resources"); + return formatClusterStatus("Resource Status", sections[1]); +}; + +const formatClusterStatus = (title: string, cluster_status: string) => { + const cluster_status_rows = cluster_status.split("\n"); + + return ( +
    + + {title} + + {cluster_status_rows.map((i, key) => { + // Format the output. + // See format_info_string in util.py + if (i.startsWith("-----") || i.startsWith("=====") || i === "") { + // Ignore separators + return null; + } else if (i.endsWith(":")) { + return ( +
    + {i} +
    + ); + } else { + return
    {i}
    ; + } + })} +
    + ); +}; + +type StatusCardProps = { + cluster_status: RayStatusResp | undefined; +}; + +export const NodeStatusCard = ({ cluster_status }: StatusCardProps) => { + return ( + + {cluster_status?.data + ? formatNodeStatus(cluster_status?.data.clusterStatus) + : "No cluster status."} + + ); +}; + +export const ResourceStatusCard = ({ cluster_status }: StatusCardProps) => { + return ( + + {cluster_status?.data + ? formatResourcesStatus(cluster_status?.data.clusterStatus) + : "No cluster status."} + + ); +}; diff --git a/dashboard/client/src/components/ListItemCard.tsx b/dashboard/client/src/components/ListItemCard.tsx new file mode 100644 index 000000000000..af2d1d33632a --- /dev/null +++ b/dashboard/client/src/components/ListItemCard.tsx @@ -0,0 +1,136 @@ +import { createStyles, makeStyles, Typography } from "@material-ui/core"; +import classNames from "classnames"; +import _ from "lodash"; +import React, { ReactNode } from "react"; +import { Link } from "react-router-dom"; +import { ClassNameProps } from "../common/props"; +import { + LinkWithArrow, + OverviewCard, +} from "../pages/overview/cards/OverviewCard"; + +type ListItemCardProps = { + headerTitle: string; + items: ListItemProps[]; + emptyListText: string; + footerText: string; + footerLink: string; +} & ClassNameProps; + +type ListItemProps = { + title: string | undefined; + subtitle: string; + link: string | undefined; + icon: ReactNode; +} & ClassNameProps; + +const useStyles = makeStyles((theme) => + createStyles({ + root: { + display: "flex", + flexDirection: "column", + padding: theme.spacing(2, 3), + }, + listContainer: { + marginTop: theme.spacing(2), + flex: 1, + overflow: "hidden", + }, + listItem: { + "&:not(:first-child)": { + marginTop: theme.spacing(1), + }, + }, + }), +); + +export const ListItemCard = ({ + className, + headerTitle, + items, + emptyListText: itemEmptyTip, + footerText, + footerLink, +}: ListItemCardProps) => { + const classes = useStyles(); + + return ( + + {headerTitle} +
    + {items.map((item: ListItemProps) => ( + + ))} + {items.length === 0 && ( + {itemEmptyTip} + )} +
    + +
    + ); +}; + +const useListItemStyles = makeStyles((theme) => + createStyles({ + root: { + display: "flex", + flexDirection: "row", + flexWrap: "nowrap", + alignItems: "center", + textDecoration: "none", + }, + + textContainer: { + flex: "1 1 auto", + width: `calc(100% - ${theme.spacing(1) + 20}px)`, + }, + title: { + color: "#036DCF", + }, + entrypoint: { + overflow: "hidden", + textOverflow: "ellipsis", + whiteSpace: "nowrap", + color: "#5F6469", + }, + }), +); + +const ListItem = ({ + icon, + title, + subtitle, + className, + link, +}: ListItemProps) => { + const classes = useListItemStyles(); + + const cardContent = ( + + {icon} +
    + + {title} + + + {subtitle} + +
    +
    + ); + return ( +
    + {link !== undefined ? ( + + {cardContent} + + ) : ( +
    {cardContent}
    + )} +
    + ); +}; diff --git a/dashboard/client/src/pages/job/JobDetail.tsx b/dashboard/client/src/pages/job/JobDetail.tsx index 1861158bd2e9..90cf98e1c1f2 100644 --- a/dashboard/client/src/pages/job/JobDetail.tsx +++ b/dashboard/client/src/pages/job/JobDetail.tsx @@ -1,9 +1,13 @@ -import { Box, makeStyles, Typography } from "@material-ui/core"; +import { Box, makeStyles } from "@material-ui/core"; import React, { useContext, useRef, useState } from "react"; import { Link } from "react-router-dom"; import { GlobalContext } from "../../App"; import { CollapsibleSection } from "../../common/CollapsibleSection"; import { Section } from "../../common/Section"; +import { + NodeStatusCard, + ResourceStatusCard, +} from "../../components/AutoscalerStatusCards"; import Loading from "../../components/Loading"; import { StatusChip } from "../../components/StatusChip"; import TitleCard from "../../components/TitleCard"; @@ -12,7 +16,6 @@ import ActorList from "../actor/ActorList"; import { NodeCountCard } from "../overview/cards/NodeCountCard"; import PlacementGroupList from "../state/PlacementGroup"; import TaskList from "../state/task"; - import { useRayStatus } from "./hook/useClusterStatus"; import { useJobDetail } from "./hook/useJobDetail"; import { JobMetadataSection } from "./JobDetailInfoPage"; @@ -53,57 +56,6 @@ export const JobDetailChartsPage = () => { const actorTableRef = useRef(null); const { cluster_status } = useRayStatus(); - const formatNodeStatus = (cluster_status: string) => { - // ==== auto scaling status - // Node status - // .... - // Resources - // .... - const sections = cluster_status.split("Resources"); - return formatClusterStatus( - "Node Status", - sections[0].split("Node status")[1], - ); - }; - - const formatResourcesStatus = (cluster_status: string) => { - // ==== auto scaling status - // Node status - // .... - // Resources - // .... - const sections = cluster_status.split("Resources"); - return formatClusterStatus("Resource Status", sections[1]); - }; - - const formatClusterStatus = (title: string, cluster_status: string) => { - const cluster_status_rows = cluster_status.split("\n"); - - return ( -
    - - {title} - - {cluster_status_rows.map((i, key) => { - // Format the output. - // See format_info_string in util.py - if (i.startsWith("-----") || i.startsWith("=====") || i === "") { - // Ignore separators - return null; - } else if (i.endsWith(":")) { - return ( -
    - {i} -
    - ); - } else { - return
    {i}
    ; - } - })} -
    - ); - }; - if (!job) { return (
    @@ -192,7 +144,7 @@ export const JobDetailChartsPage = () => { @@ -205,34 +157,10 @@ export const JobDetailChartsPage = () => { >
    - - {cluster_status?.data - ? formatNodeStatus(cluster_status?.data.clusterStatus) - : "No cluster status."} - +
    - - {cluster_status?.data - ? formatResourcesStatus(cluster_status?.data.clusterStatus) - : "No cluster status."} - +
    diff --git a/dashboard/client/src/pages/overview/OverviewPage.tsx b/dashboard/client/src/pages/overview/OverviewPage.tsx index 4432896349f7..bdca3469b2a8 100644 --- a/dashboard/client/src/pages/overview/OverviewPage.tsx +++ b/dashboard/client/src/pages/overview/OverviewPage.tsx @@ -1,11 +1,19 @@ -import { createStyles, makeStyles } from "@material-ui/core"; +import { createStyles, Grid, makeStyles } from "@material-ui/core"; +import classNames from "classnames"; import React from "react"; import { CollapsibleSection } from "../../common/CollapsibleSection"; +import { + NodeStatusCard, + ResourceStatusCard, +} from "../../components/AutoscalerStatusCards"; import EventTable from "../../components/EventTable"; +import { useRayStatus } from "../job/hook/useClusterStatus"; import { MainNavPageInfo } from "../layout/mainNavContext"; import { ClusterUtilizationCard } from "./cards/ClusterUtilizationCard"; import { NodeCountCard } from "./cards/NodeCountCard"; +import { OverviewCard } from "./cards/OverviewCard"; import { RecentJobsCard } from "./cards/RecentJobsCard"; +import { RecentServeCard } from "./cards/RecentServeCard"; const useStyles = makeStyles((theme) => createStyles({ @@ -40,6 +48,8 @@ const useStyles = makeStyles((theme) => export const OverviewPage = () => { const classes = useStyles(); + const { cluster_status } = useRayStatus(); + return (
    { />
    - +
    + + { +
    + + + + + + + +
    + } +
    + createStyles({ - root: { - display: "flex", - flexDirection: "column", - padding: theme.spacing(2, 3), - }, - listContainer: { - marginTop: theme.spacing(2), - flex: 1, - overflow: "hidden", - }, - listItem: { - "&:not(:first-child)": { - marginTop: theme.spacing(1), - }, + icon: { + marginRight: theme.spacing(1), }, }), ); @@ -32,6 +18,15 @@ type RecentJobsCardProps = { className?: string; }; +const getLink = (job: UnifiedJob) => { + if (job.job_id !== null && job.job_id !== "") { + return `/jobs/${job.job_id}`; + } else if (job.submission_id !== null && job.submission_id !== "") { + return `/jobs/${job.submission_id}`; + } + return undefined; +}; + export const RecentJobsCard = ({ className }: RecentJobsCardProps) => { const classes = useStyles(); @@ -39,89 +34,24 @@ export const RecentJobsCard = ({ className }: RecentJobsCardProps) => { const sortedJobs = _.orderBy(jobList, ["startTime"], ["desc"]).slice(0, 6); - return ( - - Recent jobs -
    - {sortedJobs.map((job) => ( - - ))} - {sortedJobs.length === 0 && ( - No jobs yet... - )} -
    - -
    - ); -}; - -const useRecentJobListItemStyles = makeStyles((theme) => - createStyles({ - root: { - display: "flex", - flexDirection: "row", - flexWrap: "nowrap", - alignItems: "center", - textDecoration: "none", - }, - textContainer: { - flex: "1 1 auto", - width: `calc(100% - ${theme.spacing(1) + 20}px)`, - }, - title: { - color: "#036DCF", - }, - entrypoint: { - overflow: "hidden", - textOverflow: "ellipsis", - whiteSpace: "nowrap", - color: "#5F6469", - }, - icon: { - marginRight: theme.spacing(1), - }, - }), -); - -type RecentJobListItemProps = { - job: UnifiedJob; - className?: string; -}; - -const RecentJobListItem = ({ job, className }: RecentJobListItemProps) => { - const classes = useRecentJobListItemStyles(); - - const cardContent = ( - - -
    - - {job.job_id ?? job.submission_id} - - - {job.entrypoint} - -
    -
    - ); + const sortedJobToRender = sortedJobs.map((job) => { + return { + title: job.job_id ?? job.submission_id ?? undefined, + subtitle: job.entrypoint, + link: getLink(job), + className: className, + icon: , + }; + }); return ( -
    - {job.job_id !== null && job.job_id !== "" ? ( - - {cardContent} - - ) : ( -
    {cardContent}
    - )} -
    + ); }; diff --git a/dashboard/client/src/pages/overview/cards/RecentServeCard.component.test.tsx b/dashboard/client/src/pages/overview/cards/RecentServeCard.component.test.tsx new file mode 100644 index 000000000000..ccb62851d5bc --- /dev/null +++ b/dashboard/client/src/pages/overview/cards/RecentServeCard.component.test.tsx @@ -0,0 +1,83 @@ +import { render, screen } from "@testing-library/react"; +import React from "react"; +import { getServeApplications } from "../../../service/serve"; +import { + ServeApplicationStatus, + ServeDeploymentMode, +} from "../../../type/serve"; +import { TEST_APP_WRAPPER } from "../../../util/test-utils"; +import { RecentServeCard } from "./RecentServeCard"; + +jest.mock("../../../service/serve"); + +const mockGetServeApplications = jest.mocked(getServeApplications); + +describe("RecentServeCard", () => { + beforeEach(() => { + mockGetServeApplications.mockResolvedValue({ + data: { + http_options: { host: "1.2.3.4", port: 8000 }, + proxy_location: ServeDeploymentMode.EveryNode, + applications: { + home: { + name: "home", + route_prefix: "/", + message: null, + status: ServeApplicationStatus.RUNNING, + deployed_app_config: { + import_path: "home:graph", + }, + last_deployed_time_s: new Date().getTime() / 1000, + }, + "second-app": { + name: "second-app", + route_prefix: "/second-app", + message: null, + status: ServeApplicationStatus.DEPLOYING, + deployed_app_config: null, + last_deployed_time_s: new Date().getTime() / 1000, + deployments: {}, + }, + }, + }, + } as any); + }); + + it("should display serve applications with deployed_app_config", async () => { + render(, { + wrapper: TEST_APP_WRAPPER, + }); + + await screen.findByText("View all applications"); + + expect.assertions(3); + expect(screen.getByText("home")).toBeInTheDocument(); + expect(screen.getByText("home:graph")).toBeInTheDocument(); + expect(screen.getByText("Serve Applications")).toBeInTheDocument(); + }); + + it("should display serve applications without deployed_app_config", async () => { + render(, { + wrapper: TEST_APP_WRAPPER, + }); + + await screen.findByText("View all applications"); + + expect.assertions(3); + expect(screen.getByText("second-app")).toBeInTheDocument(); + expect(screen.getByText("-")).toBeInTheDocument(); // default value for no deployed_app_config + expect(screen.getByText("Serve Applications")).toBeInTheDocument(); + }); + + it("should navigate to the applications page when the 'View all applications' link is clicked", async () => { + render(, { + wrapper: TEST_APP_WRAPPER, + }); + + await screen.findByText("View all applications"); + const link = screen.getByRole("link", { + name: /view all applications/i, + }); + expect(link).toHaveAttribute("href"); + }); +}); diff --git a/dashboard/client/src/pages/overview/cards/RecentServeCard.tsx b/dashboard/client/src/pages/overview/cards/RecentServeCard.tsx new file mode 100644 index 000000000000..960cd8738006 --- /dev/null +++ b/dashboard/client/src/pages/overview/cards/RecentServeCard.tsx @@ -0,0 +1,53 @@ +import { createStyles, makeStyles } from "@material-ui/core"; +import _ from "lodash"; +import React from "react"; +import { ServeStatusIcon } from "../../../common/ServeStatus"; +import { ListItemCard } from "../../../components/ListItemCard"; +import { useServeApplications } from "../../serve/hook/useServeApplications"; + +const useStyles = makeStyles((theme) => + createStyles({ + icon: { + marginRight: theme.spacing(1), + }, + }), +); + +type RecentServeCardProps = { + className?: string; +}; + +export const RecentServeCard = ({ className }: RecentServeCardProps) => { + const classes = useStyles(); + + // Use mock data by uncommenting the following line + // const applications = mockServeApplications.applications; + const { allServeApplications: applications } = useServeApplications(); + + const sortedApplications = _.orderBy( + applications, + ["last_deployed_time_s"], + ["desc"], + ).slice(0, 6); + + const sortedApplicationsToRender = sortedApplications.map((app) => { + return { + title: app.name, + subtitle: app?.deployed_app_config?.import_path || "-", + link: app.name ? `/serve/applications/${app.name}` : undefined, + className: className, + icon: , + }; + }); + + return ( + + ); +}; diff --git a/dashboard/client/src/pages/serve/ServeApplicationDetailPage.tsx b/dashboard/client/src/pages/serve/ServeApplicationDetailPage.tsx index efecc9939558..a3245b44b49f 100644 --- a/dashboard/client/src/pages/serve/ServeApplicationDetailPage.tsx +++ b/dashboard/client/src/pages/serve/ServeApplicationDetailPage.tsx @@ -111,7 +111,7 @@ export const ServeApplicationDetailPage = () => { }, { label: "Application config", - content: ( + content: application.deployed_app_config ? ( { } code={application.deployed_app_config} /> + ) : ( + - ), }, { @@ -138,6 +140,12 @@ export const ServeApplicationDetailPage = () => { /> ), }, + { + label: "Import path", + content: { + value: application?.deployed_app_config?.import_path || "-", + }, + }, ]} /> diff --git a/dashboard/client/src/pages/serve/ServeApplicationRow.tsx b/dashboard/client/src/pages/serve/ServeApplicationRow.tsx index d3cf37a24eec..54d06cd964ca 100644 --- a/dashboard/client/src/pages/serve/ServeApplicationRow.tsx +++ b/dashboard/client/src/pages/serve/ServeApplicationRow.tsx @@ -54,10 +54,16 @@ export const ServeApplicationRow = ({ - + {deployed_app_config ? ( + + ) : ( + "-" + )} ); diff --git a/dashboard/client/src/pages/serve/mockServeApplication.ts b/dashboard/client/src/pages/serve/mockServeApplication.ts new file mode 100644 index 000000000000..d1acf376f405 --- /dev/null +++ b/dashboard/client/src/pages/serve/mockServeApplication.ts @@ -0,0 +1,63 @@ +import { ServeApplicationStatus, ServeDeploymentMode } from "../../type/serve"; + +export const mockServeApplications = { + applications: { + app1: { + name: "app1", + route_prefix: "/app1", + message: null, + status: ServeApplicationStatus.RUNNING, + deployed_app_config: { + import_path: "app1:graph", + }, + last_deployed_time_s: new Date().getTime() / 1000, + }, + app2: { + name: "app2", + route_prefix: "/app2", + message: null, + status: ServeApplicationStatus.RUNNING, + deployed_app_config: null, + last_deployed_time_s: new Date().getTime() / 1000, + deployments: {}, + }, + app3: { + name: "app3", + route_prefix: "/app3", + message: null, + status: ServeApplicationStatus.DEPLOYING, + deployed_app_config: null, + last_deployed_time_s: new Date().getTime() / 1000, + deployments: {}, + }, + app4: { + name: "app4", + route_prefix: "/app4", + message: null, + status: ServeApplicationStatus.RUNNING, + deployed_app_config: { + import_path: "app4:graph", + }, + last_deployed_time_s: new Date().getTime() / 1000, + }, + app5: { + name: "app5", + route_prefix: "/app5", + message: null, + status: ServeApplicationStatus.DEPLOY_FAILED, + deployed_app_config: { + import_path: "app5:graph", + }, + last_deployed_time_s: new Date().getTime() / 1000, + }, + app6: { + name: "app6", + route_prefix: "/app6", + message: null, + status: ServeApplicationStatus.DELETING, + deployed_app_config: null, + last_deployed_time_s: new Date().getTime() / 1000, + deployments: {}, + }, + }, +}; diff --git a/dashboard/client/src/type/serve.ts b/dashboard/client/src/type/serve.ts index 839613ec548f..47d978518976 100644 --- a/dashboard/client/src/type/serve.ts +++ b/dashboard/client/src/type/serve.ts @@ -14,7 +14,7 @@ export type ServeApplication = { status: ServeApplicationStatus; message: string; last_deployed_time_s: number; - deployed_app_config: Record; + deployed_app_config: Record | null; // It could be null if user did not provide deployed_app_config deployments: { [name: string]: ServeDeployment; }; From d8fa45656d58ebf71c2ebd80c4560e24100cd8c3 Mon Sep 17 00:00:00 2001 From: Chao Wang <125417081+chaowanggg@users.noreply.github.com> Date: Mon, 8 May 2023 18:00:19 -0700 Subject: [PATCH 288/424] Clean SWR cache between each test cases (#35097) In some cases, it's necessary to mock the useSWR function in Jest test cases. However, if we don't clear the SWR cache between different test cases, we may encounter an error where a test case unintentionally reuses data from a previous test case instead of creating new mock data. By implementing this fix, we can ensure that our test cases are isolated and independent, and that they accurately reflect the behavior of our code under different scenarios. --- dashboard/client/src/util/test-utils.tsx | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/dashboard/client/src/util/test-utils.tsx b/dashboard/client/src/util/test-utils.tsx index 009599636021..6ee6713d5d47 100644 --- a/dashboard/client/src/util/test-utils.tsx +++ b/dashboard/client/src/util/test-utils.tsx @@ -1,6 +1,7 @@ import { ThemeProvider } from "@material-ui/styles"; import React, { PropsWithChildren } from "react"; import { MemoryRouter } from "react-router-dom"; +import { SWRConfig } from "swr"; import { GlobalContext, GlobalContextType } from "../App"; import { lightTheme } from "../theme"; @@ -23,9 +24,14 @@ export const TEST_APP_WRAPPER = ({ children }: PropsWithChildren<{}>) => { return ( - - {children} - + {/* + Clear SWR cache between tests so that tests do impact each other. + */} + new Map() }}> + + {children} + + ); }; From a3dd3470a4b1b17150a89fbeaeef319b8f9d1e00 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Mon, 8 May 2023 21:42:15 -0700 Subject: [PATCH 289/424] Revert "[Overview][Serve] Add Recent Serve Applications Card" (#35155) Reverts ray-project/ray#34642 This seems be breaking all pipeline builds. as it is failing to build the base container. --- .../src/common/ServeStatus.component.test.tsx | 60 -------- dashboard/client/src/common/ServeStatus.tsx | 77 ---------- .../src/components/AutoscalerStatusCards.tsx | 94 ------------ .../client/src/components/ListItemCard.tsx | 136 ------------------ dashboard/client/src/pages/job/JobDetail.tsx | 88 ++++++++++-- .../src/pages/overview/OverviewPage.tsx | 36 +---- .../pages/overview/cards/RecentJobsCard.tsx | 130 +++++++++++++---- .../cards/RecentServeCard.component.test.tsx | 83 ----------- .../pages/overview/cards/RecentServeCard.tsx | 53 ------- .../serve/ServeApplicationDetailPage.tsx | 10 +- .../src/pages/serve/ServeApplicationRow.tsx | 14 +- .../src/pages/serve/mockServeApplication.ts | 63 -------- dashboard/client/src/type/serve.ts | 2 +- 13 files changed, 188 insertions(+), 658 deletions(-) delete mode 100644 dashboard/client/src/common/ServeStatus.component.test.tsx delete mode 100644 dashboard/client/src/common/ServeStatus.tsx delete mode 100644 dashboard/client/src/components/AutoscalerStatusCards.tsx delete mode 100644 dashboard/client/src/components/ListItemCard.tsx delete mode 100644 dashboard/client/src/pages/overview/cards/RecentServeCard.component.test.tsx delete mode 100644 dashboard/client/src/pages/overview/cards/RecentServeCard.tsx delete mode 100644 dashboard/client/src/pages/serve/mockServeApplication.ts diff --git a/dashboard/client/src/common/ServeStatus.component.test.tsx b/dashboard/client/src/common/ServeStatus.component.test.tsx deleted file mode 100644 index 5436f583e02f..000000000000 --- a/dashboard/client/src/common/ServeStatus.component.test.tsx +++ /dev/null @@ -1,60 +0,0 @@ -import { render, screen } from "@testing-library/react"; -import React from "react"; -import { ServeApplication, ServeApplicationStatus } from "../type/serve"; -import { ServeStatusIcon } from "./ServeStatus"; - -const APP: ServeApplication = { - name: "MyServeApp", - route_prefix: "/my-serve-app", - docs_path: null, - status: ServeApplicationStatus.RUNNING, - message: "", - last_deployed_time_s: 1682029771.0748637, - deployed_app_config: null, - deployments: {}, -}; - -describe("ServeStatusIcon", () => { - it("renders RUNNING status", async () => { - render(); - - await screen.findByTestId("serve-status-icon"); - - const icon = screen.getByTestId("serve-status-icon"); - const classList = icon.getAttribute("class"); - expect(classList).toContain("colorSuccess"); - }); - - it("renders NOT_STARTED status", async () => { - render( - , - ); - - await screen.findByTestId("serve-status-icon"); - - expect(screen.queryByTestId("serve-status-icon")).not.toHaveClass( - "colorSuccess", - ); - expect(screen.queryByTestId("serve-status-icon")).not.toHaveClass( - "colorError", - ); - }); - - it("renders DEPLOY_FAILED status", async () => { - render( - , - ); - - await screen.findByTestId("serve-status-icon"); - - const icon = screen.getByTestId("serve-status-icon"); - const classList = icon.getAttribute("class"); - expect(classList).toContain("colorError"); - }); -}); diff --git a/dashboard/client/src/common/ServeStatus.tsx b/dashboard/client/src/common/ServeStatus.tsx deleted file mode 100644 index dd4ebad48889..000000000000 --- a/dashboard/client/src/common/ServeStatus.tsx +++ /dev/null @@ -1,77 +0,0 @@ -import { createStyles, makeStyles } from "@material-ui/core"; -import classNames from "classnames"; -import React from "react"; -import { - RiCloseCircleFill, - RiRecordCircleFill, - RiStopCircleFill, -} from "react-icons/ri"; -import { ServeApplication } from "../type/serve"; -import { JobRunningIcon } from "./JobStatus"; -import { ClassNameProps } from "./props"; - -type ServeStatusIconProps = { - app: ServeApplication; - small: boolean; -} & ClassNameProps; - -const useServeStatusIconStyles = makeStyles((theme) => - createStyles({ - icon: { - width: 20, - height: 20, - marginRight: 8, - }, - iconSmall: { - width: 16, - height: 16, - }, - colorSuccess: { - color: theme.palette.success.main, - }, - colorError: { - color: theme.palette.error.main, - }, - }), -); - -export const ServeStatusIcon = ({ - app, - small, - className, -}: ServeStatusIconProps) => { - const classes = useServeStatusIconStyles(); - - switch (app.status) { - case "RUNNING": - return ( - - ); - case "NOT_STARTED": - return ( - - ); - case "DEPLOY_FAILED": - return ( - - ); - default: - // DEPLOYING || DELETEING - return ( - - ); - } -}; diff --git a/dashboard/client/src/components/AutoscalerStatusCards.tsx b/dashboard/client/src/components/AutoscalerStatusCards.tsx deleted file mode 100644 index 887c192ae36b..000000000000 --- a/dashboard/client/src/components/AutoscalerStatusCards.tsx +++ /dev/null @@ -1,94 +0,0 @@ -import { Box, Typography } from "@material-ui/core"; -import React from "react"; -import { RayStatusResp } from "../service/status"; - -const formatNodeStatus = (cluster_status: string) => { - // ==== auto scaling status - // Node status - // .... - // Resources - // .... - const sections = cluster_status.split("Resources"); - return formatClusterStatus( - "Node Status", - sections[0].split("Node status")[1], - ); -}; - -const formatResourcesStatus = (cluster_status: string) => { - // ==== auto scaling status - // Node status - // .... - // Resources - // .... - const sections = cluster_status.split("Resources"); - return formatClusterStatus("Resource Status", sections[1]); -}; - -const formatClusterStatus = (title: string, cluster_status: string) => { - const cluster_status_rows = cluster_status.split("\n"); - - return ( -
    - - {title} - - {cluster_status_rows.map((i, key) => { - // Format the output. - // See format_info_string in util.py - if (i.startsWith("-----") || i.startsWith("=====") || i === "") { - // Ignore separators - return null; - } else if (i.endsWith(":")) { - return ( -
    - {i} -
    - ); - } else { - return
    {i}
    ; - } - })} -
    - ); -}; - -type StatusCardProps = { - cluster_status: RayStatusResp | undefined; -}; - -export const NodeStatusCard = ({ cluster_status }: StatusCardProps) => { - return ( - - {cluster_status?.data - ? formatNodeStatus(cluster_status?.data.clusterStatus) - : "No cluster status."} - - ); -}; - -export const ResourceStatusCard = ({ cluster_status }: StatusCardProps) => { - return ( - - {cluster_status?.data - ? formatResourcesStatus(cluster_status?.data.clusterStatus) - : "No cluster status."} - - ); -}; diff --git a/dashboard/client/src/components/ListItemCard.tsx b/dashboard/client/src/components/ListItemCard.tsx deleted file mode 100644 index af2d1d33632a..000000000000 --- a/dashboard/client/src/components/ListItemCard.tsx +++ /dev/null @@ -1,136 +0,0 @@ -import { createStyles, makeStyles, Typography } from "@material-ui/core"; -import classNames from "classnames"; -import _ from "lodash"; -import React, { ReactNode } from "react"; -import { Link } from "react-router-dom"; -import { ClassNameProps } from "../common/props"; -import { - LinkWithArrow, - OverviewCard, -} from "../pages/overview/cards/OverviewCard"; - -type ListItemCardProps = { - headerTitle: string; - items: ListItemProps[]; - emptyListText: string; - footerText: string; - footerLink: string; -} & ClassNameProps; - -type ListItemProps = { - title: string | undefined; - subtitle: string; - link: string | undefined; - icon: ReactNode; -} & ClassNameProps; - -const useStyles = makeStyles((theme) => - createStyles({ - root: { - display: "flex", - flexDirection: "column", - padding: theme.spacing(2, 3), - }, - listContainer: { - marginTop: theme.spacing(2), - flex: 1, - overflow: "hidden", - }, - listItem: { - "&:not(:first-child)": { - marginTop: theme.spacing(1), - }, - }, - }), -); - -export const ListItemCard = ({ - className, - headerTitle, - items, - emptyListText: itemEmptyTip, - footerText, - footerLink, -}: ListItemCardProps) => { - const classes = useStyles(); - - return ( - - {headerTitle} -
    - {items.map((item: ListItemProps) => ( - - ))} - {items.length === 0 && ( - {itemEmptyTip} - )} -
    - -
    - ); -}; - -const useListItemStyles = makeStyles((theme) => - createStyles({ - root: { - display: "flex", - flexDirection: "row", - flexWrap: "nowrap", - alignItems: "center", - textDecoration: "none", - }, - - textContainer: { - flex: "1 1 auto", - width: `calc(100% - ${theme.spacing(1) + 20}px)`, - }, - title: { - color: "#036DCF", - }, - entrypoint: { - overflow: "hidden", - textOverflow: "ellipsis", - whiteSpace: "nowrap", - color: "#5F6469", - }, - }), -); - -const ListItem = ({ - icon, - title, - subtitle, - className, - link, -}: ListItemProps) => { - const classes = useListItemStyles(); - - const cardContent = ( - - {icon} -
    - - {title} - - - {subtitle} - -
    -
    - ); - return ( -
    - {link !== undefined ? ( - - {cardContent} - - ) : ( -
    {cardContent}
    - )} -
    - ); -}; diff --git a/dashboard/client/src/pages/job/JobDetail.tsx b/dashboard/client/src/pages/job/JobDetail.tsx index 90cf98e1c1f2..1861158bd2e9 100644 --- a/dashboard/client/src/pages/job/JobDetail.tsx +++ b/dashboard/client/src/pages/job/JobDetail.tsx @@ -1,13 +1,9 @@ -import { Box, makeStyles } from "@material-ui/core"; +import { Box, makeStyles, Typography } from "@material-ui/core"; import React, { useContext, useRef, useState } from "react"; import { Link } from "react-router-dom"; import { GlobalContext } from "../../App"; import { CollapsibleSection } from "../../common/CollapsibleSection"; import { Section } from "../../common/Section"; -import { - NodeStatusCard, - ResourceStatusCard, -} from "../../components/AutoscalerStatusCards"; import Loading from "../../components/Loading"; import { StatusChip } from "../../components/StatusChip"; import TitleCard from "../../components/TitleCard"; @@ -16,6 +12,7 @@ import ActorList from "../actor/ActorList"; import { NodeCountCard } from "../overview/cards/NodeCountCard"; import PlacementGroupList from "../state/PlacementGroup"; import TaskList from "../state/task"; + import { useRayStatus } from "./hook/useClusterStatus"; import { useJobDetail } from "./hook/useJobDetail"; import { JobMetadataSection } from "./JobDetailInfoPage"; @@ -56,6 +53,57 @@ export const JobDetailChartsPage = () => { const actorTableRef = useRef(null); const { cluster_status } = useRayStatus(); + const formatNodeStatus = (cluster_status: string) => { + // ==== auto scaling status + // Node status + // .... + // Resources + // .... + const sections = cluster_status.split("Resources"); + return formatClusterStatus( + "Node Status", + sections[0].split("Node status")[1], + ); + }; + + const formatResourcesStatus = (cluster_status: string) => { + // ==== auto scaling status + // Node status + // .... + // Resources + // .... + const sections = cluster_status.split("Resources"); + return formatClusterStatus("Resource Status", sections[1]); + }; + + const formatClusterStatus = (title: string, cluster_status: string) => { + const cluster_status_rows = cluster_status.split("\n"); + + return ( +
    + + {title} + + {cluster_status_rows.map((i, key) => { + // Format the output. + // See format_info_string in util.py + if (i.startsWith("-----") || i.startsWith("=====") || i === "") { + // Ignore separators + return null; + } else if (i.endsWith(":")) { + return ( +
    + {i} +
    + ); + } else { + return
    {i}
    ; + } + })} +
    + ); + }; + if (!job) { return (
    @@ -144,7 +192,7 @@ export const JobDetailChartsPage = () => { @@ -157,10 +205,34 @@ export const JobDetailChartsPage = () => { >
    - + + {cluster_status?.data + ? formatNodeStatus(cluster_status?.data.clusterStatus) + : "No cluster status."} +
    - + + {cluster_status?.data + ? formatResourcesStatus(cluster_status?.data.clusterStatus) + : "No cluster status."} +
    diff --git a/dashboard/client/src/pages/overview/OverviewPage.tsx b/dashboard/client/src/pages/overview/OverviewPage.tsx index bdca3469b2a8..4432896349f7 100644 --- a/dashboard/client/src/pages/overview/OverviewPage.tsx +++ b/dashboard/client/src/pages/overview/OverviewPage.tsx @@ -1,19 +1,11 @@ -import { createStyles, Grid, makeStyles } from "@material-ui/core"; -import classNames from "classnames"; +import { createStyles, makeStyles } from "@material-ui/core"; import React from "react"; import { CollapsibleSection } from "../../common/CollapsibleSection"; -import { - NodeStatusCard, - ResourceStatusCard, -} from "../../components/AutoscalerStatusCards"; import EventTable from "../../components/EventTable"; -import { useRayStatus } from "../job/hook/useClusterStatus"; import { MainNavPageInfo } from "../layout/mainNavContext"; import { ClusterUtilizationCard } from "./cards/ClusterUtilizationCard"; import { NodeCountCard } from "./cards/NodeCountCard"; -import { OverviewCard } from "./cards/OverviewCard"; import { RecentJobsCard } from "./cards/RecentJobsCard"; -import { RecentServeCard } from "./cards/RecentServeCard"; const useStyles = makeStyles((theme) => createStyles({ @@ -48,8 +40,6 @@ const useStyles = makeStyles((theme) => export const OverviewPage = () => { const classes = useStyles(); - const { cluster_status } = useRayStatus(); - return (
    { />
    + -
    - - { -
    - - - - - - - -
    - } -
    - createStyles({ - icon: { - marginRight: theme.spacing(1), + root: { + display: "flex", + flexDirection: "column", + padding: theme.spacing(2, 3), + }, + listContainer: { + marginTop: theme.spacing(2), + flex: 1, + overflow: "hidden", + }, + listItem: { + "&:not(:first-child)": { + marginTop: theme.spacing(1), + }, }, }), ); @@ -18,15 +32,6 @@ type RecentJobsCardProps = { className?: string; }; -const getLink = (job: UnifiedJob) => { - if (job.job_id !== null && job.job_id !== "") { - return `/jobs/${job.job_id}`; - } else if (job.submission_id !== null && job.submission_id !== "") { - return `/jobs/${job.submission_id}`; - } - return undefined; -}; - export const RecentJobsCard = ({ className }: RecentJobsCardProps) => { const classes = useStyles(); @@ -34,24 +39,89 @@ export const RecentJobsCard = ({ className }: RecentJobsCardProps) => { const sortedJobs = _.orderBy(jobList, ["startTime"], ["desc"]).slice(0, 6); - const sortedJobToRender = sortedJobs.map((job) => { - return { - title: job.job_id ?? job.submission_id ?? undefined, - subtitle: job.entrypoint, - link: getLink(job), - className: className, - icon: , - }; - }); + return ( + + Recent jobs +
    + {sortedJobs.map((job) => ( + + ))} + {sortedJobs.length === 0 && ( + No jobs yet... + )} +
    + +
    + ); +}; + +const useRecentJobListItemStyles = makeStyles((theme) => + createStyles({ + root: { + display: "flex", + flexDirection: "row", + flexWrap: "nowrap", + alignItems: "center", + textDecoration: "none", + }, + textContainer: { + flex: "1 1 auto", + width: `calc(100% - ${theme.spacing(1) + 20}px)`, + }, + title: { + color: "#036DCF", + }, + entrypoint: { + overflow: "hidden", + textOverflow: "ellipsis", + whiteSpace: "nowrap", + color: "#5F6469", + }, + icon: { + marginRight: theme.spacing(1), + }, + }), +); + +type RecentJobListItemProps = { + job: UnifiedJob; + className?: string; +}; + +const RecentJobListItem = ({ job, className }: RecentJobListItemProps) => { + const classes = useRecentJobListItemStyles(); + + const cardContent = ( + + +
    + + {job.job_id ?? job.submission_id} + + + {job.entrypoint} + +
    +
    + ); return ( - +
    + {job.job_id !== null && job.job_id !== "" ? ( + + {cardContent} + + ) : ( +
    {cardContent}
    + )} +
    ); }; diff --git a/dashboard/client/src/pages/overview/cards/RecentServeCard.component.test.tsx b/dashboard/client/src/pages/overview/cards/RecentServeCard.component.test.tsx deleted file mode 100644 index ccb62851d5bc..000000000000 --- a/dashboard/client/src/pages/overview/cards/RecentServeCard.component.test.tsx +++ /dev/null @@ -1,83 +0,0 @@ -import { render, screen } from "@testing-library/react"; -import React from "react"; -import { getServeApplications } from "../../../service/serve"; -import { - ServeApplicationStatus, - ServeDeploymentMode, -} from "../../../type/serve"; -import { TEST_APP_WRAPPER } from "../../../util/test-utils"; -import { RecentServeCard } from "./RecentServeCard"; - -jest.mock("../../../service/serve"); - -const mockGetServeApplications = jest.mocked(getServeApplications); - -describe("RecentServeCard", () => { - beforeEach(() => { - mockGetServeApplications.mockResolvedValue({ - data: { - http_options: { host: "1.2.3.4", port: 8000 }, - proxy_location: ServeDeploymentMode.EveryNode, - applications: { - home: { - name: "home", - route_prefix: "/", - message: null, - status: ServeApplicationStatus.RUNNING, - deployed_app_config: { - import_path: "home:graph", - }, - last_deployed_time_s: new Date().getTime() / 1000, - }, - "second-app": { - name: "second-app", - route_prefix: "/second-app", - message: null, - status: ServeApplicationStatus.DEPLOYING, - deployed_app_config: null, - last_deployed_time_s: new Date().getTime() / 1000, - deployments: {}, - }, - }, - }, - } as any); - }); - - it("should display serve applications with deployed_app_config", async () => { - render(, { - wrapper: TEST_APP_WRAPPER, - }); - - await screen.findByText("View all applications"); - - expect.assertions(3); - expect(screen.getByText("home")).toBeInTheDocument(); - expect(screen.getByText("home:graph")).toBeInTheDocument(); - expect(screen.getByText("Serve Applications")).toBeInTheDocument(); - }); - - it("should display serve applications without deployed_app_config", async () => { - render(, { - wrapper: TEST_APP_WRAPPER, - }); - - await screen.findByText("View all applications"); - - expect.assertions(3); - expect(screen.getByText("second-app")).toBeInTheDocument(); - expect(screen.getByText("-")).toBeInTheDocument(); // default value for no deployed_app_config - expect(screen.getByText("Serve Applications")).toBeInTheDocument(); - }); - - it("should navigate to the applications page when the 'View all applications' link is clicked", async () => { - render(, { - wrapper: TEST_APP_WRAPPER, - }); - - await screen.findByText("View all applications"); - const link = screen.getByRole("link", { - name: /view all applications/i, - }); - expect(link).toHaveAttribute("href"); - }); -}); diff --git a/dashboard/client/src/pages/overview/cards/RecentServeCard.tsx b/dashboard/client/src/pages/overview/cards/RecentServeCard.tsx deleted file mode 100644 index 960cd8738006..000000000000 --- a/dashboard/client/src/pages/overview/cards/RecentServeCard.tsx +++ /dev/null @@ -1,53 +0,0 @@ -import { createStyles, makeStyles } from "@material-ui/core"; -import _ from "lodash"; -import React from "react"; -import { ServeStatusIcon } from "../../../common/ServeStatus"; -import { ListItemCard } from "../../../components/ListItemCard"; -import { useServeApplications } from "../../serve/hook/useServeApplications"; - -const useStyles = makeStyles((theme) => - createStyles({ - icon: { - marginRight: theme.spacing(1), - }, - }), -); - -type RecentServeCardProps = { - className?: string; -}; - -export const RecentServeCard = ({ className }: RecentServeCardProps) => { - const classes = useStyles(); - - // Use mock data by uncommenting the following line - // const applications = mockServeApplications.applications; - const { allServeApplications: applications } = useServeApplications(); - - const sortedApplications = _.orderBy( - applications, - ["last_deployed_time_s"], - ["desc"], - ).slice(0, 6); - - const sortedApplicationsToRender = sortedApplications.map((app) => { - return { - title: app.name, - subtitle: app?.deployed_app_config?.import_path || "-", - link: app.name ? `/serve/applications/${app.name}` : undefined, - className: className, - icon: , - }; - }); - - return ( - - ); -}; diff --git a/dashboard/client/src/pages/serve/ServeApplicationDetailPage.tsx b/dashboard/client/src/pages/serve/ServeApplicationDetailPage.tsx index a3245b44b49f..efecc9939558 100644 --- a/dashboard/client/src/pages/serve/ServeApplicationDetailPage.tsx +++ b/dashboard/client/src/pages/serve/ServeApplicationDetailPage.tsx @@ -111,7 +111,7 @@ export const ServeApplicationDetailPage = () => { }, { label: "Application config", - content: application.deployed_app_config ? ( + content: ( { } code={application.deployed_app_config} /> - ) : ( - - ), }, { @@ -140,12 +138,6 @@ export const ServeApplicationDetailPage = () => { /> ), }, - { - label: "Import path", - content: { - value: application?.deployed_app_config?.import_path || "-", - }, - }, ]} /> diff --git a/dashboard/client/src/pages/serve/ServeApplicationRow.tsx b/dashboard/client/src/pages/serve/ServeApplicationRow.tsx index 54d06cd964ca..d3cf37a24eec 100644 --- a/dashboard/client/src/pages/serve/ServeApplicationRow.tsx +++ b/dashboard/client/src/pages/serve/ServeApplicationRow.tsx @@ -54,16 +54,10 @@ export const ServeApplicationRow = ({ - {deployed_app_config ? ( - - ) : ( - "-" - )} + ); diff --git a/dashboard/client/src/pages/serve/mockServeApplication.ts b/dashboard/client/src/pages/serve/mockServeApplication.ts deleted file mode 100644 index d1acf376f405..000000000000 --- a/dashboard/client/src/pages/serve/mockServeApplication.ts +++ /dev/null @@ -1,63 +0,0 @@ -import { ServeApplicationStatus, ServeDeploymentMode } from "../../type/serve"; - -export const mockServeApplications = { - applications: { - app1: { - name: "app1", - route_prefix: "/app1", - message: null, - status: ServeApplicationStatus.RUNNING, - deployed_app_config: { - import_path: "app1:graph", - }, - last_deployed_time_s: new Date().getTime() / 1000, - }, - app2: { - name: "app2", - route_prefix: "/app2", - message: null, - status: ServeApplicationStatus.RUNNING, - deployed_app_config: null, - last_deployed_time_s: new Date().getTime() / 1000, - deployments: {}, - }, - app3: { - name: "app3", - route_prefix: "/app3", - message: null, - status: ServeApplicationStatus.DEPLOYING, - deployed_app_config: null, - last_deployed_time_s: new Date().getTime() / 1000, - deployments: {}, - }, - app4: { - name: "app4", - route_prefix: "/app4", - message: null, - status: ServeApplicationStatus.RUNNING, - deployed_app_config: { - import_path: "app4:graph", - }, - last_deployed_time_s: new Date().getTime() / 1000, - }, - app5: { - name: "app5", - route_prefix: "/app5", - message: null, - status: ServeApplicationStatus.DEPLOY_FAILED, - deployed_app_config: { - import_path: "app5:graph", - }, - last_deployed_time_s: new Date().getTime() / 1000, - }, - app6: { - name: "app6", - route_prefix: "/app6", - message: null, - status: ServeApplicationStatus.DELETING, - deployed_app_config: null, - last_deployed_time_s: new Date().getTime() / 1000, - deployments: {}, - }, - }, -}; diff --git a/dashboard/client/src/type/serve.ts b/dashboard/client/src/type/serve.ts index 47d978518976..839613ec548f 100644 --- a/dashboard/client/src/type/serve.ts +++ b/dashboard/client/src/type/serve.ts @@ -14,7 +14,7 @@ export type ServeApplication = { status: ServeApplicationStatus; message: string; last_deployed_time_s: number; - deployed_app_config: Record | null; // It could be null if user did not provide deployed_app_config + deployed_app_config: Record; deployments: { [name: string]: ServeDeployment; }; From 7e7aff844b8633ac358e138efb39cd001c876a4f Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Tue, 9 May 2023 09:50:52 +0100 Subject: [PATCH 290/424] [air/output] Fix trial status at end (more info + cut off) (#35128) This PR ensures that the full trial status table is printed at the end of a Ray Tune run with the new output engine. Additionally, trial status data was previously always cut off - now we enforce that when `force= True`, all trial data is reported. It also fixes a bug for showing the `more_info` field (how many more trials with a specific status are available). Signed-off-by: Kai Fricke --- python/ray/tune/experimental/output.py | 59 +++++++++++++-------- python/ray/tune/tests/output/test_output.py | 6 +-- 2 files changed, 40 insertions(+), 25 deletions(-) diff --git a/python/ray/tune/experimental/output.py b/python/ray/tune/experimental/output.py index 0cdbd152a766..3820c9549e41 100644 --- a/python/ray/tune/experimental/output.py +++ b/python/ray/tune/experimental/output.py @@ -290,7 +290,8 @@ def _get_trial_table_data_per_status( more_info = None for t in trials: if len(trial_infos) >= max_row: - more_info = f"... and {str(len(trials) - max_row)} more {status} ..." + remaining = len(trials) - max_row + more_info = f"{remaining} more {status}" break trial_infos.append(_get_trial_info(t, metric_keys)) return _PerStatusTrialTableData(trial_infos, more_info) @@ -299,6 +300,7 @@ def _get_trial_table_data_per_status( def _get_trial_table_data( trials: List[Trial], metric_keys: List[str], + all_rows: bool = False, ) -> _TrialTableData: """Generate a table showing the current progress of tuning trials. @@ -307,6 +309,7 @@ def _get_trial_table_data( metric_keys: Ordered list of metrics to be displayed in the table. Including both default and user defined. Will only be shown if at least one trial is having the key. + all_rows: Force to show all rows. Returns: Trial table data, including header and trial table per each status. @@ -342,7 +345,7 @@ def _get_trial_table_data( t_status, trials_by_state[t_status], metric_keys=formatted_metric_columns, - force_max_rows=len(trials) > max_trial_num_to_show, + force_max_rows=not all_rows and len(trials) > max_trial_num_to_show, ) if trial_data_per_status: trial_data.append(trial_data_per_status) @@ -503,10 +506,10 @@ def print_heartbeat(self, trials, *args, force: bool = False): if self._verbosity < self._heartbeat_threshold: return if force or time.time() - self._last_heartbeat_time > self._heartbeat_freq: - self._print_heartbeat(trials, *args) + self._print_heartbeat(trials, *args, force=force) self._last_heartbeat_time = time.time() - def _print_heartbeat(self, trials, *args): + def _print_heartbeat(self, trials, *args, force: bool = False): raise NotImplementedError @@ -559,7 +562,9 @@ def _get_overall_trial_progress_str(self, trials): return f"Trial status: {result}" # TODO: Return a more structured type to share code with Jupyter flow. - def _get_heartbeat(self, trials, *sys_args) -> Tuple[List[str], _TrialTableData]: + def _get_heartbeat( + self, trials, *sys_args, force_full_output: bool = False + ) -> Tuple[List[str], _TrialTableData]: result = list() # Trial status: 1 RUNNING | 7 PENDING result.append(self._get_overall_trial_progress_str(trials)) @@ -580,10 +585,12 @@ def _get_heartbeat(self, trials, *sys_args) -> Tuple[List[str], _TrialTableData] all_metrics = list(DEFAULT_COLUMNS.keys()) + self._inferred_metric - trial_table_data = _get_trial_table_data(trials, all_metrics) + trial_table_data = _get_trial_table_data( + trials, all_metrics, all_rows=force_full_output + ) return result, trial_table_data - def _print_heartbeat(self, trials, *sys_args): + def _print_heartbeat(self, trials, *sys_args, force: bool = False): raise NotImplementedError @@ -624,28 +631,34 @@ def experiment_started( **kwargs, ) - def _print_heartbeat(self, trials, *sys_args): - if self._verbosity < self._heartbeat_threshold: + def _print_heartbeat(self, trials, *sys_args, force: bool = False): + if self._verbosity < self._heartbeat_threshold and not force: return - heartbeat_strs, table_data = self._get_heartbeat(trials, *sys_args) + heartbeat_strs, table_data = self._get_heartbeat( + trials, *sys_args, force_full_output=force + ) + for s in heartbeat_strs: print(s) # now print the table using Tabulate - all_infos = [] + more_infos = [] + all_data = [] header = table_data.header - table_data_list = table_data.data - for table in table_data_list: - all_infos.extend(table.trial_infos) - if table.more_info: - all_infos.append(table.more_info) + for sub_table in table_data.data: + all_data.extend(sub_table.trial_infos) + if sub_table.more_info: + more_infos.append(sub_table.more_info) + print( tabulate( - all_infos, + all_data, headers=header, tablefmt=AIR_TABULATE_TABLEFMT, showindex=False, ) ) + if more_infos: + print(", ".join(more_infos)) print() @@ -710,7 +723,7 @@ def _render_layout(self, heartbeat_strs: List[str], table_data: _TrialTableData) self._live.update(table) - def _print_heartbeat(self, trials, *args): + def _print_heartbeat(self, trials, *args, force: bool = False): if not rich: return if not self._live: @@ -719,7 +732,9 @@ def _print_heartbeat(self, trials, *args): "be called without `with_live` context manager." ) return - heartbeat_strs, table_data = self._get_heartbeat(trials, *args) + heartbeat_strs, table_data = self._get_heartbeat( + trials, *args, force_full_output=force + ) self._render_layout(heartbeat_strs, table_data) @@ -727,7 +742,7 @@ class TrainReporter(ProgressReporter): # the minimal verbosity threshold at which heartbeat starts getting printed. _heartbeat_threshold = AirVerbosity.VERBOSE - def _get_heartbeat(self, trials: List[Trial]): + def _get_heartbeat(self, trials: List[Trial], force_full_output: bool = False): # Training on iteration 1. Current time: 2023-03-22 15:29:25 (running for 00:00:03.24) # noqa if len(trials) == 0: return @@ -744,8 +759,8 @@ def _get_heartbeat(self, trials: List[Trial]): [f"Training on iteration {iter_num}.", self._time_heartbeat_str] ) - def _print_heartbeat(self, trials, *args): - print(self._get_heartbeat(trials)) + def _print_heartbeat(self, trials, *args, force: bool = False): + print(self._get_heartbeat(trials, force_full_output=force)) # These keys are blacklisted for printing out training/tuning intermediate/final result! diff --git a/python/ray/tune/tests/output/test_output.py b/python/ray/tune/tests/output/test_output.py index 30bd73ed16c5..5fc41d489a6b 100644 --- a/python/ray/tune/tests/output/test_output.py +++ b/python/ray/tune/tests/output/test_output.py @@ -179,9 +179,9 @@ def test_get_trial_table_data_more_than_20(): assert len(table_data) == 3 # only the running category for i in range(3): assert len(table_data[i].trial_infos) == 5 - assert table_data[0].more_info == "... and 5 more RUNNING ..." - assert table_data[1].more_info == "... and 5 more TERMINATED ..." - assert table_data[2].more_info == "... and 5 more PENDING ..." + assert table_data[0].more_info == "5 more RUNNING" + assert table_data[1].more_info == "5 more TERMINATED" + assert table_data[2].more_info == "5 more PENDING" def test_result_table_no_divison(): From 9e5e2b497edef9d73d3a92440f028b1102560cda Mon Sep 17 00:00:00 2001 From: SangBin Cho Date: Wed, 10 May 2023 00:36:46 +0900 Subject: [PATCH 291/424] [Core] Fix async actor shutdown issue when exit_actor is used (#32407) There are 2 issues. When the actor exits via sys.exit, exit_actor, or max_call=1, we didn't cancel queued tasks, which means all queued tasks will still be executed although you call exit APIs. It is an unexpected/unintuitive behavior. The segfault happened when we call disconnect() on exit_actor API & there are still queued tasks. it's because the actor won't exit until the queued tasks are all executed, but since we called disconnect(), it will break the worker with a segfault (it is not expected disconnect is called when you are executing actor tasks). This happened even when a normal actor (not an async actor) was used if there are queued tasks when exit_actor is called. This API fixes the issues by doing 2 things. First, if cpp Exit API is called, we guarantee the queued tasks won't be executed. I fixed this issue by returning ExecuteTask immediately. Alternatively, we could manually clean actor_scheduling_queue, but this will require much more complicated code to have a good error message. I am open for this approach as well. Remove disconnect call from exit_actor API. It was written before 2020, and the comment there seems irrelevant (and also all tests seem to pass, so it should be okay). I assume it was a hack, and the issue from the comment was fixed at some point of time. Also, this PR adds 2 guarantees to exit_actor APIs. Once exit_actor or exit is called on an actor, there will be no additional tasks running from that actor. Any queued or incoming requests will fail with a clear error message. When the actor is terminated via exit_actor or exit, the atexit handler is guaranteed to be called (I will add tests). --- python/ray/_raylet.pyx | 9 +- python/ray/actor.py | 22 +-- python/ray/includes/libcoreworker.pxd | 2 - python/ray/tests/test_actor_failures.py | 141 ++++++++++++++++++ python/ray/tests/test_asyncio.py | 25 ++++ python/ray/tests/test_basic_4.py | 4 +- src/ray/core_worker/core_worker.cc | 45 +++++- src/ray/core_worker/core_worker.h | 11 +- .../transport/direct_actor_transport.cc | 2 +- .../transport/direct_actor_transport.h | 2 +- 10 files changed, 229 insertions(+), 34 deletions(-) diff --git a/python/ray/_raylet.pyx b/python/ray/_raylet.pyx index 6a09c94859e0..5b135b35d419 100644 --- a/python/ray/_raylet.pyx +++ b/python/ray/_raylet.pyx @@ -843,6 +843,7 @@ cdef void execute_task( return function(actor, *arguments, **kwarguments) with core_worker.profile_event(b"task::" + name, extra_data=extra_data): + task_exception = False try: with core_worker.profile_event(b"task:deserialize_arguments"): if c_args.empty(): @@ -893,10 +894,6 @@ cdef void execute_task( with core_worker.profile_event(b"task:execute"): task_exception = True try: - is_exiting = core_worker.is_exiting() - if is_exiting: - title = f"{title}::Exiting" - next_title = f"{next_title}::Exiting" with ray._private.worker._changeproctitle(title, next_title): if debugger_breakpoint != b"": ray.util.pdb.set_trace( @@ -1208,7 +1205,6 @@ cdef CRayStatus task_execution_handler( const c_vector[CConcurrencyGroup] &defined_concurrency_groups, const c_string name_of_concurrency_group_to_execute, c_bool is_reattempt) nogil: - with gil, disable_client_hook(): # Initialize job_config if it hasn't already. # Setup system paths configured in job_config. @@ -2949,9 +2945,6 @@ cdef class CoreWorker: return self.current_runtime_env - def is_exiting(self): - return CCoreWorkerProcess.GetCoreWorker().IsExiting() - cdef yield_current_fiber(self, CFiberEvent &fiber_event): with nogil: CCoreWorkerProcess.GetCoreWorker().YieldCurrentFiber(fiber_event) diff --git a/python/ray/actor.py b/python/ray/actor.py index de0d134d6210..7191031e059b 100644 --- a/python/ray/actor.py +++ b/python/ray/actor.py @@ -1370,21 +1370,19 @@ def _make_actor(cls, actor_options): def exit_actor(): """Intentionally exit the current actor. - This function is used to disconnect an actor and exit the worker. - Any ``atexit`` handlers installed in the actor will be run. + This API can be used only inside an actor. Use ray.kill + API if you'd like to kill an actor using actor handle. + + When the API is called, the actor raises an exception and exits. + Any queued methods will fail. Any ``atexit`` + handlers installed in the actor will be run. Raises: - Exception: An exception is raised if this is a driver or this + TypeError: An exception is raised if this is a driver or this worker is not an actor. """ worker = ray._private.worker.global_worker if worker.mode == ray.WORKER_MODE and not worker.actor_id.is_nil(): - # Intentionally disconnect the core worker from the raylet so the - # raylet won't push an error message to the driver. - ray._private.worker.disconnect() - # Disconnect global state from GCS. - ray._private.state.state.disconnect() - # In asyncio actor mode, we can't raise SystemExit because it will just # quit the asycnio event loop thread, not the main thread. Instead, we # raise a custom error to the main thread to tell it to exit. @@ -1399,4 +1397,8 @@ def exit_actor(): raise exit assert False, "This process should have terminated." else: - raise TypeError("exit_actor called on a non-actor worker.") + raise TypeError( + "exit_actor API is called on a non-actor worker, " + f"{worker.mode}. Call this API inside an actor methods" + "if you'd like to exit the actor gracefully." + ) diff --git a/python/ray/includes/libcoreworker.pxd b/python/ray/includes/libcoreworker.pxd index c0aba4ca6a45..f1763aa89b35 100644 --- a/python/ray/includes/libcoreworker.pxd +++ b/python/ray/includes/libcoreworker.pxd @@ -254,8 +254,6 @@ cdef extern from "ray/core_worker/core_worker.h" nogil: CJobConfig GetJobConfig() - c_bool IsExiting() const - int64_t GetNumTasksSubmitted() const int64_t GetNumLeasesRequested() const diff --git a/python/ray/tests/test_actor_failures.py b/python/ray/tests/test_actor_failures.py index fe499b2181d7..86e14eaee994 100644 --- a/python/ray/tests/test_actor_failures.py +++ b/python/ray/tests/test_actor_failures.py @@ -1,3 +1,4 @@ +import atexit import asyncio import collections import numpy as np @@ -8,6 +9,7 @@ import time import ray +from ray.actor import exit_actor import ray.cluster_utils from ray._private.test_utils import ( wait_for_condition, @@ -794,6 +796,145 @@ def foo(): ray.get(ref) +def test_exit_actor(shutdown_only, tmp_path): + """ + Verify TypeError is raised when exit_actor is not used + inside an actor. + """ + with pytest.raises( + TypeError, match="exit_actor API is called on a non-actor worker" + ): + exit_actor() + + @ray.remote + def f(): + exit_actor() + + with pytest.raises( + TypeError, match="exit_actor API is called on a non-actor worker" + ): + ray.get(f.remote()) + + """ + Verify the basic case. + """ + + @ray.remote + class Actor: + def exit(self): + exit_actor() + + @ray.remote + class AsyncActor: + async def exit(self): + exit_actor() + + a = Actor.remote() + ray.get(a.__ray_ready__.remote()) + with pytest.raises(ray.exceptions.RayActorError) as exc_info: + ray.get(a.exit.remote()) + assert "exit_actor()" in str(exc_info.value) + + b = AsyncActor.remote() + ray.get(b.__ray_ready__.remote()) + with pytest.raises(ray.exceptions.RayActorError) as exc_info: + ray.get(b.exit.remote()) + assert "exit_actor()" in str(exc_info.value) + + """ + Verify atexit handler is called correctly. + """ + sync_temp_file = tmp_path / "actor.log" + async_temp_file = tmp_path / "async_actor.log" + sync_temp_file.touch() + async_temp_file.touch() + + @ray.remote + class Actor: + def __init__(self): + def f(): + print("atexit handler") + with open(sync_temp_file, "w") as f: + f.write("Actor\n") + + atexit.register(f) + + def exit(self): + exit_actor() + + @ray.remote + class AsyncActor: + def __init__(self): + def f(): + print("atexit handler") + with open(async_temp_file, "w") as f: + f.write("Async Actor\n") + + atexit.register(f) + + async def exit(self): + exit_actor() + + a = Actor.remote() + ray.get(a.__ray_ready__.remote()) + b = AsyncActor.remote() + ray.get(b.__ray_ready__.remote()) + with pytest.raises(ray.exceptions.RayActorError): + ray.get(a.exit.remote()) + with pytest.raises(ray.exceptions.RayActorError): + ray.get(b.exit.remote()) + + def verify(): + with open(async_temp_file) as f: + assert f.readlines() == ["Async Actor\n"] + with open(sync_temp_file) as f: + assert f.readlines() == ["Actor\n"] + return True + + wait_for_condition(verify) + + +def test_exit_actor_queued(shutdown_only): + """Verify after exit_actor is called the queued tasks won't execute.""" + + @ray.remote + class RegressionSync: + def f(self): + import time + + time.sleep(1) + exit_actor() + + def ping(self): + pass + + @ray.remote + class RegressionAsync: + async def f(self): + await asyncio.sleep(1) + exit_actor() + + def ping(self): + pass + + # Test async case. + # https://github.com/ray-project/ray/issues/32376 + # If we didn't fix this issue, this will segfault. + a = RegressionAsync.remote() + a.f.remote() + refs = [a.ping.remote() for _ in range(10000)] + with pytest.raises(ray.exceptions.RayActorError) as exc_info: + ray.get(refs) + assert " Worker unexpectedly exits" not in str(exc_info.value) + + # Test a sync case. + a = RegressionSync.remote() + a.f.remote() + with pytest.raises(ray.exceptions.RayActorError) as exc_info: + ray.get([a.ping.remote() for _ in range(10000)]) + assert " Worker unexpectedly exits" not in str(exc_info.value) + + if __name__ == "__main__": import pytest diff --git a/python/ray/tests/test_asyncio.py b/python/ray/tests/test_asyncio.py index 17ce2b08ca88..a7cc9890d094 100644 --- a/python/ray/tests/test_asyncio.py +++ b/python/ray/tests/test_asyncio.py @@ -348,6 +348,31 @@ async def async_thread_id(self): assert sync_id == async_id +def test_asyncio_actor_shutdown_when_non_async_method_mixed(ray_start_regular_shared): + # It is a regression test. + # https://github.com/ray-project/ray/issues/32376 + # Make sure the core worker doesn't crash when + # exit_actor is used when async & regular actor tasks + # are executed. + @ray.remote + class A: + async def f(self): + await asyncio.sleep(1) + ray.actor.exit_actor() + + def ping(self): + pass + + a = A.remote() + a.f.remote() + + with pytest.raises( + ray.exceptions.RayActorError, + match=("exit_actor"), + ): + ray.get([a.ping.remote() for _ in range(10000)]) + + if __name__ == "__main__": import pytest diff --git a/python/ray/tests/test_basic_4.py b/python/ray/tests/test_basic_4.py index 02242886fa08..3918239b840f 100644 --- a/python/ray/tests/test_basic_4.py +++ b/python/ray/tests/test_basic_4.py @@ -20,7 +20,7 @@ def test_actor_scheduling(shutdown_only): - ray.init() + ray.init(num_cpus=1) @ray.remote class A: @@ -32,7 +32,7 @@ def get(self): a = A.remote() a.run_fail.remote() - with pytest.raises(Exception): + with pytest.raises(ray.exceptions.RayActorError, match="exit_actor"): ray.get([a.get.remote()]) diff --git a/src/ray/core_worker/core_worker.cc b/src/ray/core_worker/core_worker.cc index af09bb383ed7..4c27c9bf7feb 100644 --- a/src/ray/core_worker/core_worker.cc +++ b/src/ray/core_worker/core_worker.cc @@ -117,7 +117,8 @@ CoreWorker::CoreWorker(const CoreWorkerOptions &options, const WorkerID &worker_ num_executed_tasks_(0), resource_ids_(new ResourceMappingType()), grpc_service_(io_service_, *this), - task_execution_service_work_(task_execution_service_) { + task_execution_service_work_(task_execution_service_), + exiting_detail_(std::nullopt) { RAY_LOG(DEBUG) << "Constructing CoreWorker, worker_id: " << worker_id; // Initialize task receivers. @@ -764,7 +765,11 @@ void CoreWorker::Exit( "tasks have finished" << ", exit_type=" << rpc::WorkerExitType_Name(exit_type) << ", detail=" << detail; - exiting_ = true; + { + absl::MutexLock lock(&mutex_); + RAY_CHECK_NE(detail, ""); + exiting_detail_ = std::optional{detail}; + } // Release the resources early in case draining takes a long time. RAY_CHECK_OK( local_raylet_client_->NotifyDirectCallTaskBlocked(/*release_resources*/ true)); @@ -2535,6 +2540,15 @@ Status CoreWorker::ExecuteTask( bool *is_retryable_error, std::string *application_error) { RAY_LOG(DEBUG) << "Executing task, task info = " << task_spec.DebugString(); + + // If the worker is exitted via Exit API, we shouldn't execute + // tasks anymore. + if (IsExiting()) { + absl::MutexLock lock(&mutex_); + return Status::IntentionalSystemExit( + absl::StrCat("Worker has already exited. Detail: ", exiting_detail_.value())); + } + task_queue_length_ -= 1; num_executed_tasks_ += 1; @@ -2929,6 +2943,8 @@ Status CoreWorker::GetAndPinArgsForExecutor(const TaskSpecification &task, void CoreWorker::HandlePushTask(rpc::PushTaskRequest request, rpc::PushTaskReply *reply, rpc::SendReplyCallback send_reply_callback) { + RAY_LOG(DEBUG) << "Received Handle Push Task " + << TaskID::FromBinary(request.task_spec().task_id()); if (HandleWrongRecipient(WorkerID::FromBinary(request.intended_worker_id()), send_reply_callback)) { return; @@ -2950,10 +2966,18 @@ void CoreWorker::HandlePushTask(rpc::PushTaskRequest request, // execution service. if (request.task_spec().type() == TaskType::ACTOR_TASK) { task_execution_service_.post( - [this, request, reply, send_reply_callback = std::move(send_reply_callback)] { + [this, + request, + reply, + send_reply_callback = std::move(send_reply_callback), + func_name] { // We have posted an exit task onto the main event loop, // so shouldn't bother executing any further work. - if (exiting_) return; + if (IsExiting()) { + RAY_LOG(INFO) << "Queued task " << func_name + << " won't be executed because the worker already exited."; + return; + } direct_task_receiver_->HandleTask(request, reply, send_reply_callback); }, "CoreWorker.HandlePushTaskActor"); @@ -2962,10 +2986,14 @@ void CoreWorker::HandlePushTask(rpc::PushTaskRequest request, // the task execution service. direct_task_receiver_->HandleTask(request, reply, send_reply_callback); task_execution_service_.post( - [=] { + [this, func_name] { // We have posted an exit task onto the main event loop, // so shouldn't bother executing any further work. - if (exiting_) return; + if (IsExiting()) { + RAY_LOG(INFO) << "Queued task " << func_name + << " won't be executed because the worker already exited."; + return; + } direct_task_receiver_->RunNormalTasksFromQueue(); }, "CoreWorker.HandlePushTask"); @@ -3814,7 +3842,10 @@ rpc::JobConfig CoreWorker::GetJobConfig() const { return worker_context_.GetCurrentJobConfig(); } -bool CoreWorker::IsExiting() const { return exiting_; } +bool CoreWorker::IsExiting() const { + absl::MutexLock lock(&mutex_); + return exiting_detail_.has_value(); +} std::unordered_map> CoreWorker::GetActorCallStats() const { diff --git a/src/ray/core_worker/core_worker.h b/src/ray/core_worker/core_worker.h index e4e694610e1a..b87621238f4a 100644 --- a/src/ray/core_worker/core_worker.h +++ b/src/ray/core_worker/core_worker.h @@ -1125,6 +1125,9 @@ class CoreWorker : public rpc::CoreWorkerServiceHandler { /// Return true if the core worker is in the exit process. bool IsExiting() const; + /// Mark this worker is exiting. + void SetIsExiting(); + /// Retrieve the current statistics about tasks being received and executing. /// \return an unordered_map mapping function name to list of (num_received, /// num_executing, num_executed). It is a std map instead of absl due to its @@ -1198,6 +1201,8 @@ class CoreWorker : public rpc::CoreWorkerServiceHandler { /// (WORKER mode only) Gracefully exit the worker. `Graceful` means the worker will /// exit when it drains all tasks and cleans all owned objects. + /// After this method is called, all the tasks in the queue will not be + /// executed. /// /// \param exit_type The reason why this worker process is disconnected. /// \param exit_detail The detailed reason for a given exit. @@ -1601,9 +1606,9 @@ class CoreWorker : public rpc::CoreWorkerServiceHandler { ObjectID object_id, void *py_future); - /// we are shutting down and not running further tasks. - /// when exiting_ is set to true HandlePushTask becomes no-op. - std::atomic exiting_ = false; + /// The detail reason why the core worker has exited. + /// If this value is set, it means the exit process has begun. + std::optional exiting_detail_ GUARDED_BY(mutex_); std::atomic is_shutdown_ = false; diff --git a/src/ray/core_worker/transport/direct_actor_transport.cc b/src/ray/core_worker/transport/direct_actor_transport.cc index 7cd62a6cc4dd..c355d5f42108 100644 --- a/src/ray/core_worker/transport/direct_actor_transport.cc +++ b/src/ray/core_worker/transport/direct_actor_transport.cc @@ -65,7 +65,7 @@ void CoreWorkerDirectTaskReceiver::Init( } void CoreWorkerDirectTaskReceiver::HandleTask( - rpc::PushTaskRequest request, + const rpc::PushTaskRequest &request, rpc::PushTaskReply *reply, rpc::SendReplyCallback send_reply_callback) { RAY_CHECK(waiter_ != nullptr) << "Must call init() prior to use"; diff --git a/src/ray/core_worker/transport/direct_actor_transport.h b/src/ray/core_worker/transport/direct_actor_transport.h index d82e05637595..a81899f4127e 100644 --- a/src/ray/core_worker/transport/direct_actor_transport.h +++ b/src/ray/core_worker/transport/direct_actor_transport.h @@ -83,7 +83,7 @@ class CoreWorkerDirectTaskReceiver { /// \param[in] request The request message. /// \param[out] reply The reply message. /// \param[in] send_reply_callback The callback to be called when the request is done. - void HandleTask(rpc::PushTaskRequest request, + void HandleTask(const rpc::PushTaskRequest &request, rpc::PushTaskReply *reply, rpc::SendReplyCallback send_reply_callback); From 9260d6e1d391b559ef27deb7347be5266dd18283 Mon Sep 17 00:00:00 2001 From: Cindy Zhang Date: Tue, 9 May 2023 08:38:23 -0700 Subject: [PATCH 292/424] [serve] Add log file path to replica details (#33640) Add absolute file path to log files for each replica. https://github.com/ray-project/ray/pull/33503#discussion_r1142835813 Example: ``` replicas: - replica_id: foo_DAGDriver#jsrUNs state: RUNNING pid: 68276 actor_name: SERVE_REPLICA::foo_DAGDriver#jsrUNs actor_id: 7c1c702270bb634a7cf4c24f01000000 node_id: 568bf20e0658e89361a997fe57b896b15fcb97268f3b039e1513c6a5 node_ip: 192.168.1.14 start_time_s: 1679598497.387779 log_file_path_id: /serve/deployment_foo_DAGDriver_foo_DAGDriver#jsrUNs.log ``` --- .../modules/serve/tests/test_serve_agent.py | 2 ++ python/ray/serve/_private/deployment_state.py | 17 ++++++++++++++--- python/ray/serve/_private/logging_utils.py | 16 ++++++++++++++++ python/ray/serve/_private/replica.py | 10 ++++++++-- python/ray/serve/schema.py | 6 ++++++ 5 files changed, 46 insertions(+), 5 deletions(-) diff --git a/dashboard/modules/serve/tests/test_serve_agent.py b/dashboard/modules/serve/tests/test_serve_agent.py index 03b1f63faf7d..baee47ba8047 100644 --- a/dashboard/modules/serve/tests/test_serve_agent.py +++ b/dashboard/modules/serve/tests/test_serve_agent.py @@ -586,6 +586,8 @@ def applications_running(): ) assert replica.actor_id and replica.node_id and replica.node_ip assert replica.start_time_s > app_details[app].last_deployed_time_s + file_path = "/tmp/ray/session_latest/logs" + replica.log_file_path + assert os.path.exists(file_path) print("Finished checking application details.") diff --git a/python/ray/serve/_private/deployment_state.py b/python/ray/serve/_private/deployment_state.py index c4cc6090360e..0fa57a366034 100644 --- a/python/ray/serve/_private/deployment_state.py +++ b/python/ray/serve/_private/deployment_state.py @@ -223,6 +223,7 @@ def __init__( # Populated after replica is allocated. self._node_id: str = None self._node_ip: str = None + self._log_file_path: str = None # Populated in self.stop(). self._graceful_shutdown_ref: ObjectRef = None @@ -310,6 +311,11 @@ def node_ip(self) -> Optional[str]: """Returns the node ip of the actor, None if not placed.""" return self._node_ip + @property + def log_file_path(self) -> Optional[str]: + """Returns the relative log file path of the actor, None if not placed.""" + return self._log_file_path + def _check_obj_ref_ready(self, obj_ref: ObjectRef) -> bool: ready, _ = ray.wait([obj_ref], timeout=0) return len(ready) == 1 @@ -527,9 +533,13 @@ def check_ready(self) -> Tuple[ReplicaStartupStatus, Optional[str]]: if not self._deployment_is_cross_language: _, self._version = ray.get(self._ready_obj_ref) - self._pid, self._actor_id, self._node_id, self._node_ip = ray.get( - self._allocated_obj_ref - ) + ( + self._pid, + self._actor_id, + self._node_id, + self._node_ip, + self._log_file_path, + ) = ray.get(self._allocated_obj_ref) except RayTaskError as e: logger.exception( f"Exception in replica '{self._replica_tag}', " @@ -768,6 +778,7 @@ def get_replica_details(self, state: ReplicaState) -> ReplicaDetails: node_id=self._actor.node_id, node_ip=self._actor.node_ip, start_time_s=self._start_time, + log_file_path=self._actor._log_file_path, ) @property diff --git a/python/ray/serve/_private/logging_utils.py b/python/ray/serve/_private/logging_utils.py index 97e9e6222873..81066603f6ef 100644 --- a/python/ray/serve/_private/logging_utils.py +++ b/python/ray/serve/_private/logging_utils.py @@ -55,6 +55,22 @@ def log_to_stderr_filter(record: logging.LogRecord) -> bool: return record.log_to_stderr +def get_component_logger_file_path() -> Optional[str]: + """Returns the relative file path for the Serve logger, if it exists. + + If a logger was configured through configure_component_logger() for the Serve + component that's calling this function, this returns the location of the log file + relative to the ray logs directory. + """ + logger = logging.getLogger(SERVE_LOGGER_NAME) + for handler in logger.handlers: + if isinstance(handler, logging.handlers.RotatingFileHandler): + absolute_path = handler.baseFilename + ray_logs_dir = ray._private.worker._global_node.get_logs_dir_path() + if absolute_path.startswith(ray_logs_dir): + return absolute_path[len(ray_logs_dir) :] + + def configure_component_logger( *, component_name: str, diff --git a/python/ray/serve/_private/replica.py b/python/ray/serve/_private/replica.py index 32ba7155190b..0eb4e9acdaa4 100644 --- a/python/ray/serve/_private/replica.py +++ b/python/ray/serve/_private/replica.py @@ -31,7 +31,11 @@ from ray.serve.deployment import Deployment from ray.serve.exceptions import RayServeException from ray.serve._private.http_util import ASGIHTTPSender -from ray.serve._private.logging_utils import access_log_msg, configure_component_logger +from ray.serve._private.logging_utils import ( + access_log_msg, + configure_component_logger, + get_component_logger_file_path, +) from ray.serve._private.router import Query, RequestMetadata from ray.serve._private.utils import ( parse_import_path, @@ -221,13 +225,15 @@ async def is_allocated(self) -> str: to PENDING_INITIALIZATION startup state. Returns: - The PID, actor ID, node ID, node IP of the replica. + The PID, actor ID, node ID, node IP, and log filepath id of the replica. """ + return ( os.getpid(), ray.get_runtime_context().get_actor_id(), ray.get_runtime_context().get_node_id(), ray.util.get_node_ip_address(), + get_component_logger_file_path(), ) async def is_initialized( diff --git a/python/ray/serve/schema.py b/python/ray/serve/schema.py index 5dd2612e783b..24047f37ca14 100644 --- a/python/ray/serve/schema.py +++ b/python/ray/serve/schema.py @@ -635,6 +635,12 @@ class ReplicaDetails(BaseModel, extra=Extra.forbid, frozen=True): "state from the running replica actor." ) ) + log_file_path: Optional[str] = Field( + description=( + "The relative path to the log file for the replica actor from the ray logs " + "directory." + ) + ) @PublicAPI(stability="alpha") From 116da4c947b4323722b9966261ac764a5cbb8819 Mon Sep 17 00:00:00 2001 From: Archit Kulkarni Date: Tue, 9 May 2023 11:04:54 -0700 Subject: [PATCH 293/424] [Docker] [runtime env] Bump boto3 version from 1.4.8 to 1.26.82, add pyOpenSSL and cryptography (#33273) runtime_env working_dir S3 urls require a recent version of boto3 to read environment variables for authentication for downloading from private buckets. We currently include an outdated boto3 version in the Ray Docker images. This PR bumps the version in the Ray Docker images to make the S3 working_dir download feature work out of the box. The reason this is important is that users might try to use S3 URLs for runtime_env with the Ray Docker image, but it's hard to debug the failure that occurs with the outdated boto3 version (see linked issue). This is worse than not having boto3 installed, since in that case the error message is clear ("You must pip install boto3 to fetch URIs"). Related issue number Closes #33256 Closes #34752 --- doc/source/ray-core/examples/lm/lm-cluster.yaml | 2 +- docker/ray-deps/Dockerfile | 4 +++- python/ray/autoscaler/aws/development-example.yaml | 2 +- python/ray/autoscaler/aws/example-gpu-docker.yaml | 2 +- python/ray/autoscaler/aws/example-java.yaml | 2 +- 5 files changed, 7 insertions(+), 5 deletions(-) diff --git a/doc/source/ray-core/examples/lm/lm-cluster.yaml b/doc/source/ray-core/examples/lm/lm-cluster.yaml index e53cf692f687..021b85c4eca6 100644 --- a/doc/source/ray-core/examples/lm/lm-cluster.yaml +++ b/doc/source/ray-core/examples/lm/lm-cluster.yaml @@ -91,7 +91,7 @@ setup_commands: # Custom commands that will be run on the head node after common setup. head_setup_commands: - - pip install boto3==1.4.8 # 1.4.8 adds InstanceMarketOptions + - pip install boto3>=1.4.8 # 1.4.8 adds InstanceMarketOptions # Custom commands that will be run on worker nodes after common setup. worker_setup_commands: [] diff --git a/docker/ray-deps/Dockerfile b/docker/ray-deps/Dockerfile index f9220a09b9f4..81ddd1cb505d 100644 --- a/docker/ray-deps/Dockerfile +++ b/docker/ray-deps/Dockerfile @@ -14,7 +14,9 @@ RUN $HOME/anaconda3/bin/pip --no-cache-dir install --find-links $FIND_LINKS_PATH $(if [ "$AUTOSCALER" = "autoscaler" ]; then echo \ "redis>=3.5.0,<4.0.0" \ "six==1.13.0" \ - "boto3==1.4.8" \ + "boto3==1.26.82" \ + "pyOpenSSL==22.1.0" \ + "cryptography==38.0.1" \ "google-api-python-client==1.7.8" \ "google-oauth" \ "kubernetes" \ diff --git a/python/ray/autoscaler/aws/development-example.yaml b/python/ray/autoscaler/aws/development-example.yaml index 467e755247ee..832bc1e0118d 100644 --- a/python/ray/autoscaler/aws/development-example.yaml +++ b/python/ray/autoscaler/aws/development-example.yaml @@ -59,7 +59,7 @@ setup_commands: - git clone https://github.com/ray-project/ray || true - ray/ci/env/install-bazel.sh - cd ray/python/ray/dashboard/client; npm ci; npm run build - - pip install boto3==1.4.8 cython==0.29.32 aiohttp grpcio psutil setproctitle + - pip install boto3>=1.4.8 cython==0.29.32 aiohttp grpcio psutil setproctitle - cd ray/python; pip install -e . --verbose # Command to start ray on the head node. You don't need to change this. diff --git a/python/ray/autoscaler/aws/example-gpu-docker.yaml b/python/ray/autoscaler/aws/example-gpu-docker.yaml index 85c65ae416dd..6daac090092c 100644 --- a/python/ray/autoscaler/aws/example-gpu-docker.yaml +++ b/python/ray/autoscaler/aws/example-gpu-docker.yaml @@ -120,7 +120,7 @@ setup_commands: [] # Custom commands that will be run on the head node after common setup. head_setup_commands: - - pip install boto3==1.4.8 # 1.4.8 adds InstanceMarketOptions + - pip install boto3>=1.4.8 # 1.4.8 adds InstanceMarketOptions # Custom commands that will be run on worker nodes after common setup. worker_setup_commands: [] diff --git a/python/ray/autoscaler/aws/example-java.yaml b/python/ray/autoscaler/aws/example-java.yaml index 6563d6bf7be4..cc3cd47ac016 100644 --- a/python/ray/autoscaler/aws/example-java.yaml +++ b/python/ray/autoscaler/aws/example-java.yaml @@ -60,7 +60,7 @@ setup_commands: - python3 -m pip install https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-0.9.0.dev0-cp37-cp37m-manylinux2014_x86_64.whl # Custom commands that will be run on the head node after common setup. head_setup_commands: - - python3 -m pip install boto3==1.4.8 # 1.4.8 adds InstanceMarketOptions + - python3 -m pip install boto3>=1.4.8 # 1.4.8 adds InstanceMarketOptions # Custom commands that will be run on worker nodes after common setup. worker_setup_commands: [] # Command to start ray on the head node. You don't need to change this. From 69f50ce96f325f334deb88ccb8f50630e1e8d8f9 Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Wed, 10 May 2023 02:26:10 +0800 Subject: [PATCH 294/424] [core] Make ray.get(timeout=0) to throw timeout error (#35126) Why are these changes needed? With telemetry tracking since ray 2.3, we have not seen significant and recent usage of the timeout=0 behaviour: image Raw query behind firewall So we will update this behaviour as documented in #28465 cc vitrioil for the original PR: https://github.com/ray-project/ray/pull/30210/files Signed-off-by: Ricky Xu --------- Signed-off-by: Ricky Xu Co-authored-by: vitrioil Co-authored-by: Prem <41074533+vitrioil@users.noreply.github.com> --- python/ray/_private/worker.py | 34 ++++------------------ python/ray/tests/test_basic_2.py | 4 +++ python/ray/tests/test_object_spilling.py | 6 ++-- python/ray/tests/test_object_spilling_2.py | 6 ++-- python/ray/tests/test_object_spilling_3.py | 2 +- 5 files changed, 16 insertions(+), 36 deletions(-) diff --git a/python/ray/_private/worker.py b/python/ray/_private/worker.py index 9598ef02aa52..a9b81d672fb3 100644 --- a/python/ray/_private/worker.py +++ b/python/ray/_private/worker.py @@ -738,12 +738,12 @@ def get_objects(self, object_refs: list, timeout: Optional[float] = None): "which is not an ray.ObjectRef." ) - timeout_ms = int(timeout * 1000) if timeout else -1 + timeout_ms = int(timeout * 1000) if timeout is not None else -1 data_metadata_pairs = self.core_worker.get_objects( object_refs, self.current_task_id, timeout_ms ) debugger_breakpoint = b"" - for (data, metadata) in data_metadata_pairs: + for data, metadata in data_metadata_pairs: if metadata: metadata_fields = metadata.split(b",") if len(metadata_fields) >= 2 and metadata_fields[1].startswith( @@ -2464,12 +2464,9 @@ def get( to get. timeout (Optional[float]): The maximum amount of time in seconds to wait before returning. Set this to None will block until the - corresponding object becomes available. - WARNING: In future ray releases ``timeout=0`` will return the object - immediately if it's available, else raise GetTimeoutError in accordance with - the above docstring. The current behavior of blocking until objects become - available of ``timeout=0`` is considered to be a bug, see - https://github.com/ray-project/ray/issues/28465. + corresponding object becomes available. Setting ``timeout=0`` will + return the object immediately if it's available, else raise + GetTimeoutError in accordance with the above docstring. Returns: A Python object or a list of Python objects. @@ -2480,26 +2477,6 @@ def get( Exception: An exception is raised if the task that created the object or that created one of the objects raised an exception. """ - if timeout == 0: - if os.environ.get("RAY_WARN_RAY_GET_TIMEOUT_ZERO", "1") == "1": - import warnings - - warnings.warn( - ( - "Please use timeout=None if you expect ray.get() to block. " - "Setting timeout=0 in future ray releases will raise " - "GetTimeoutError if the objects references are not available. " - "You could suppress this warning by setting " - "RAY_WARN_RAY_GET_TIMEOUT_ZERO=0." - ), - UserWarning, - ) - - # Record this usage in telemetry - import ray._private.usage.usage_lib as usage_lib - - usage_lib.record_extra_usage_tag(usage_lib.TagKey.RAY_GET_TIMEOUT_ZERO, "True") - worker = global_worker worker.check_connected() @@ -2710,7 +2687,6 @@ def wait( worker.check_connected() # TODO(swang): Check main thread. with profiling.profile("ray.wait"): - # TODO(rkn): This is a temporary workaround for # https://github.com/ray-project/ray/issues/997. However, it should be # fixed in Arrow instead of here. diff --git a/python/ray/tests/test_basic_2.py b/python/ray/tests/test_basic_2.py index b83280fcecb6..7c78747148f0 100644 --- a/python/ray/tests/test_basic_2.py +++ b/python/ray/tests/test_basic_2.py @@ -384,6 +384,10 @@ def test_get_with_timeout(ray_start_regular_shared): with pytest.raises(TimeoutError): ray.get(result_id, timeout=0.1) + # timeout of 0 should raise an error + with pytest.raises(GetTimeoutError): + ray.get(result_id, timeout=0) + # Check that a subsequent get() returns early. ray.get(signal.send.remote()) start = time.time() diff --git a/python/ray/tests/test_object_spilling.py b/python/ray/tests/test_object_spilling.py index 50d85d4f9ef1..11fd750bfdfa 100644 --- a/python/ray/tests/test_object_spilling.py +++ b/python/ray/tests/test_object_spilling.py @@ -319,7 +319,7 @@ def test_spill_objects_automatically(fs_only_object_spilling_config, shutdown_on index = random.choice(list(range(buffer_length))) ref = replay_buffer[index] solution = solution_buffer[index] - sample = ray.get(ref, timeout=0) + sample = ray.get(ref, timeout=None) assert np.array_equal(sample, solution) assert_no_thrashing(address["address"]) @@ -359,7 +359,7 @@ def test_unstable_spill_objects_automatically(unstable_spilling_config, shutdown index = random.choice(list(range(buffer_length))) ref = replay_buffer[index] solution = solution_buffer[index] - sample = ray.get(ref, timeout=0) + sample = ray.get(ref, timeout=None) assert np.array_equal(sample, solution) assert_no_thrashing(address["address"]) @@ -397,7 +397,7 @@ def test_slow_spill_objects_automatically(slow_spilling_config, shutdown_only): index = random.choice(list(range(buffer_length))) ref = replay_buffer[index] solution = solution_buffer[index] - sample = ray.get(ref, timeout=0) + sample = ray.get(ref, timeout=None) assert np.array_equal(sample, solution) assert_no_thrashing(address["address"]) diff --git a/python/ray/tests/test_object_spilling_2.py b/python/ray/tests/test_object_spilling_2.py index 9781952574f6..29b3e00ac13c 100644 --- a/python/ray/tests/test_object_spilling_2.py +++ b/python/ray/tests/test_object_spilling_2.py @@ -77,7 +77,7 @@ def test_delete_objects_delete_while_creating(object_spilling_config, shutdown_o # Do random sampling. for _ in range(200): ref = random.choice(replay_buffer) - sample = ray.get(ref, timeout=0) + sample = ray.get(ref, timeout=None) assert np.array_equal(sample, arr) # After all, make sure all objects are killed without race condition. @@ -126,7 +126,7 @@ def create_objects(self): # Do random sampling. for _ in range(200): ref = random.choice(self.replay_buffer) - sample = ray.get(ref, timeout=0) + sample = ray.get(ref, timeout=None) assert np.array_equal(sample, arr) a = Actor.remote() @@ -288,7 +288,7 @@ def test_fusion_objects(fs_only_object_spilling_config, shutdown_only): index = random.choice(list(range(buffer_length))) ref = replay_buffer[index] solution = solution_buffer[index] - sample = ray.get(ref, timeout=0) + sample = ray.get(ref, timeout=None) assert np.array_equal(sample, solution) is_test_passing = False diff --git a/python/ray/tests/test_object_spilling_3.py b/python/ray/tests/test_object_spilling_3.py index 0ae983c5f420..b286df27f949 100644 --- a/python/ray/tests/test_object_spilling_3.py +++ b/python/ray/tests/test_object_spilling_3.py @@ -315,7 +315,7 @@ def test_spill_deadlock(object_spilling_config, shutdown_only): if random.randint(0, 9) < 5: for _ in range(5): ref = random.choice(replay_buffer) - sample = ray.get(ref, timeout=0) + sample = ray.get(ref, timeout=None) assert np.array_equal(sample, arr) assert_no_thrashing(address["address"]) From e74528d4e3c1793367540ad7f8d2d8cb84bf47ce Mon Sep 17 00:00:00 2001 From: Victoria Tsai Date: Tue, 9 May 2023 11:44:46 -0700 Subject: [PATCH 295/424] [core] Change worker niceness in job submission environment (#34727) The niceness of the job supervisor should be set to 0. Signed-off-by: vitsai --- dashboard/modules/job/job_manager.py | 3 +++ .../check_niceness.py | 22 +++++++++++++++++++ .../modules/job/tests/test_job_manager.py | 13 +++++++++++ python/ray/_private/ray_constants.py | 1 + 4 files changed, 39 insertions(+) create mode 100644 dashboard/modules/job/tests/subprocess_driver_scripts/check_niceness.py diff --git a/dashboard/modules/job/job_manager.py b/dashboard/modules/job/job_manager.py index 6e5043c12c6c..8c6f8232ea03 100644 --- a/dashboard/modules/job/job_manager.py +++ b/dashboard/modules/job/job_manager.py @@ -197,6 +197,7 @@ def _get_driver_runtime_env( # & actors. env_vars = curr_runtime_env.get("env_vars", {}) env_vars.pop(ray_constants.NOSET_CUDA_VISIBLE_DEVICES_ENV_VAR) + env_vars.pop(ray_constants.RAY_WORKER_NICENESS) curr_runtime_env["env_vars"] = env_vars return curr_runtime_env @@ -756,6 +757,8 @@ def _get_supervisor_runtime_env( if env_vars is None: env_vars = {} + env_vars[ray_constants.RAY_WORKER_NICENESS] = "0" + if not resources_specified: # Don't set CUDA_VISIBLE_DEVICES for the supervisor actor so the # driver can use GPUs if it wants to. This will be removed from diff --git a/dashboard/modules/job/tests/subprocess_driver_scripts/check_niceness.py b/dashboard/modules/job/tests/subprocess_driver_scripts/check_niceness.py new file mode 100644 index 000000000000..1fbf3d3985df --- /dev/null +++ b/dashboard/modules/job/tests/subprocess_driver_scripts/check_niceness.py @@ -0,0 +1,22 @@ +""" +A dummy ray driver script that executes in subprocess. +Checks that job manager's environment variable is different. +""" + +import ray +import os + + +def run(): + ray.init() + + @ray.remote + def foo(): + print("worker", os.nice(0)) + + ray.get(foo.remote()) + + +if __name__ == "__main__": + print("driver", os.nice(0)) + run() diff --git a/dashboard/modules/job/tests/test_job_manager.py b/dashboard/modules/job/tests/test_job_manager.py index 56cf0af866dc..17255f4a39cc 100644 --- a/dashboard/modules/job/tests/test_job_manager.py +++ b/dashboard/modules/job/tests/test_job_manager.py @@ -465,6 +465,19 @@ async def test_pass_env_var(self, job_manager): ) assert job_manager.get_job_logs(job_id) == "233\n" + async def test_niceness(self, job_manager): + job_id = await job_manager.submit_job( + entrypoint=f"python {_driver_script_path('check_niceness.py')}", + ) + + await async_wait_for_condition_async_predicate( + check_job_succeeded, job_manager=job_manager, job_id=job_id + ) + + logs = job_manager.get_job_logs(job_id) + assert "driver 0" in logs + assert "worker 15" in logs + async def test_multiple_runtime_envs(self, job_manager): # Test that you can run two jobs in different envs without conflict. job_id_1 = await job_manager.submit_job( diff --git a/python/ray/_private/ray_constants.py b/python/ray/_private/ray_constants.py index 769d68a2ec87..c34c50199fa7 100644 --- a/python/ray/_private/ray_constants.py +++ b/python/ray/_private/ray_constants.py @@ -378,6 +378,7 @@ def env_set_by_user(key): LANGUAGE_WORKER_TYPES = ["python", "java", "cpp"] NOSET_CUDA_VISIBLE_DEVICES_ENV_VAR = "RAY_EXPERIMENTAL_NOSET_CUDA_VISIBLE_DEVICES" +RAY_WORKER_NICENESS = "RAY_worker_niceness" # Default max_retries option in @ray.remote for non-actor # tasks. From ac65b3ba1540bd3ddcec5a8c89dbf59d00389002 Mon Sep 17 00:00:00 2001 From: Archit Kulkarni Date: Wed, 10 May 2023 12:41:00 -0700 Subject: [PATCH 296/424] Revert "Add CLI warning for arguments" This reverts commit 6d0895f6530b51a8bf3ced622adb5eeda65794a1. --- .../cluster/running-applications/job-submission/cli.rst | 6 ------ 1 file changed, 6 deletions(-) diff --git a/doc/source/cluster/running-applications/job-submission/cli.rst b/doc/source/cluster/running-applications/job-submission/cli.rst index 94a950ebf39d..708ca12c8d12 100644 --- a/doc/source/cluster/running-applications/job-submission/cli.rst +++ b/doc/source/cluster/running-applications/job-submission/cli.rst @@ -16,12 +16,6 @@ This section contains commands for :ref:`Ray Job Submission `. ``ray job submit --working_dir="." -- python script.py`` instead of ``ray job submit --working_dir="." -- "python script.py"``. Otherwise you may encounter the error ``/bin/sh: 1: python script.py: not found``. -.. warning:: - - The entrypoint command must be provided last, and any arguments to `ray job submit` must be provided before the entrypoint command. - For example, use ``ray job submit --working_dir="." -- python script.py`` instead of ``ray job submit -- python script.py --working_dir="."``. - This is to support the use of ``--`` to separate arguments to `ray job submit` from arguments to the entrypoint command. - .. _ray-job-status-doc: .. click:: ray.dashboard.modules.job.cli:status From 94d12f69f0ced6e4a20b58909a9a5a0bdbc95cf7 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Tue, 9 May 2023 13:25:00 -0700 Subject: [PATCH 297/424] [ci/release] Resolve dependencies with python 3.9 inside conda. (#35176) Was resolved in a python 3.7 environment. Now resolving in a python 3.9 environment. Also upgraded dependencies. Signed-off-by: Lonnie Liu --- release/requirements_buildkite.txt | 298 ++++++++++++----------------- 1 file changed, 120 insertions(+), 178 deletions(-) diff --git a/release/requirements_buildkite.txt b/release/requirements_buildkite.txt index e4e8394185f4..a840aeb645d4 100644 --- a/release/requirements_buildkite.txt +++ b/release/requirements_buildkite.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with python 3.7 +# This file is autogenerated by pip-compile with python 3.9 # To update, run: # # bazel run //release:requirements_buildkite.update @@ -97,8 +97,8 @@ aiosignal==1.3.1 \ --hash=sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc \ --hash=sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17 # via aiohttp -anyscale==0.5.102 \ - --hash=sha256:0fd5999703a5ea0f8c7f53cdfab734f1d65778d52d6de1396ca0254b456182db +anyscale==0.5.106 \ + --hash=sha256:8e6a371f50ab35743521fa552b0ca2cfd75fb186023ed29049b3822ffa1fb7e2 # via -r release/requirements_buildkite.in argon2-cffi==21.3.0 \ --hash=sha256:8c976986f2c5c0e5000919e6de187906cfd81fb1c72bf9d88c01177e77da7f80 \ @@ -131,48 +131,24 @@ async-timeout==4.0.2 \ --hash=sha256:2163e1640ddb52b7a8c80d0a67a08587e5d245cc9c553a74a847056bc2976b15 \ --hash=sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c # via aiohttp -asynctest==0.13.0 \ - --hash=sha256:5da6118a7e6d6b54d83a8f7197769d046922a44d2a99c21382f0a6e4fadae676 \ - --hash=sha256:c27862842d15d83e6a34eb0b2866c323880eb3a75e4485b079ea11748fd77fac - # via aiohttp attrs==23.1.0 \ --hash=sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04 \ --hash=sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015 # via # aiohttp # jsonschema -backports-zoneinfo==0.2.1 \ - --hash=sha256:17746bd546106fa389c51dbea67c8b7c8f0d14b5526a579ca6ccf5ed72c526cf \ - --hash=sha256:1b13e654a55cd45672cb54ed12148cd33628f672548f373963b0bff67b217328 \ - --hash=sha256:1c5742112073a563c81f786e77514969acb58649bcdf6cdf0b4ed31a348d4546 \ - --hash=sha256:4a0f800587060bf8880f954dbef70de6c11bbe59c673c3d818921f042f9954a6 \ - --hash=sha256:5c144945a7752ca544b4b78c8c41544cdfaf9786f25fe5ffb10e838e19a27570 \ - --hash=sha256:7b0a64cda4145548fed9efc10322770f929b944ce5cee6c0dfe0c87bf4c0c8c9 \ - --hash=sha256:8439c030a11780786a2002261569bdf362264f605dfa4d65090b64b05c9f79a7 \ - --hash=sha256:8961c0f32cd0336fb8e8ead11a1f8cd99ec07145ec2931122faaac1c8f7fd987 \ - --hash=sha256:89a48c0d158a3cc3f654da4c2de1ceba85263fafb861b98b59040a5086259722 \ - --hash=sha256:a76b38c52400b762e48131494ba26be363491ac4f9a04c1b7e92483d169f6582 \ - --hash=sha256:da6013fd84a690242c310d77ddb8441a559e9cb3d3d59ebac9aca1a57b2e18bc \ - --hash=sha256:e55b384612d93be96506932a786bbcde5a2db7a9e6a4bb4bffe8b733f5b9036b \ - --hash=sha256:e81b76cace8eda1fca50e345242ba977f9be6ae3945af8d46326d776b4cf78d1 \ - --hash=sha256:e8236383a20872c0cdf5a62b554b27538db7fa1bbec52429d8d106effbaeca08 \ - --hash=sha256:f04e857b59d9d1ccc39ce2da1021d196e47234873820cbeaad210724b1ee28ac \ - --hash=sha256:fadbfe37f74051d024037f223b8e001611eac868b5c5b06144ef4d8b799862f2 - # via - # pytz-deprecation-shim - # tzlocal bazel-runfiles==0.21.0 \ --hash=sha256:3e430dd9a5aba90a90bc2493fdcfce02a3ece47fb574db0f4ac898261e6b068d # via -r release/requirements_buildkite.in -boto3==1.26.118 \ - --hash=sha256:1ff703152553f4d5fc9774071d114dbf06ec661eb1b29b6051f6b1f9d0c24873 \ - --hash=sha256:d0ed43228952b55c9f44d1c733f74656418c39c55dbe36bc37feeef6aa583ded +boto3==1.26.130 \ + --hash=sha256:3ae2b34921bb08a1d7ce52db9ec1a25159fca779648e596ede37e1049ed77de8 \ + --hash=sha256:d6f9c6ebf417260ea5fa7a227e7bce9451f1f5010be05ac4075596356f584455 # via # -r release/requirements_buildkite.in # anyscale -botocore==1.29.118 \ - --hash=sha256:44cb088a73b02dd716c5c5715143a64d5f10388957285246e11f3cc893eebf9d \ - --hash=sha256:b51fc5d50cbc43edaf58b3ec4fa933b82755801c453bf8908c8d3e70ae1142c1 +botocore==1.29.130 \ + --hash=sha256:3a31293b84ecfe5f5f2c4b7dc85a77d7b890b468a376b593fde15cacc76862dd \ + --hash=sha256:56d1f54c3f8e140f965e5300d1cc5b565cb758134d9213fb05e91e1bb160330e # via # anyscale # boto3 @@ -181,9 +157,9 @@ cachetools==5.3.0 \ --hash=sha256:13dfddc7b8df938c21a940dfa6557ce6e94a2f1cdfa58eb90c805721d58f2c14 \ --hash=sha256:429e1a1e845c008ea6c85aa35d4b98b65d6a9763eeef3e37e92728a12d1de9d4 # via google-auth -certifi==2022.12.7 \ - --hash=sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3 \ - --hash=sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18 +certifi==2023.5.7 \ + --hash=sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7 \ + --hash=sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716 # via # anyscale # requests @@ -465,9 +441,9 @@ google-cloud-core==2.3.2 \ --hash=sha256:8417acf6466be2fa85123441696c4badda48db314c607cf1e5d543fa8bdc22fe \ --hash=sha256:b9529ee7047fd8d4bf4a2182de619154240df17fbe60ead399078c1ae152af9a # via google-cloud-storage -google-cloud-storage==2.8.0 \ - --hash=sha256:248e210c13bc109909160248af546a91cb2dabaf3d7ebbf04def9dd49f02dbb6 \ - --hash=sha256:4388da1ff5bda6d729f26dbcaf1bfa020a2a52a7b91f0a8123edbda51660802c +google-cloud-storage==2.9.0 \ + --hash=sha256:83a90447f23d5edd045e0037982c270302e3aeb45fc1288d2c2ca713d27bad94 \ + --hash=sha256:9b6ae7b509fc294bdacb84d0f3ea8e20e2c54a8b4bbe39c5707635fec214eff3 # via -r release/requirements_buildkite.in google-crc32c==1.5.0 \ --hash=sha256:024894d9d3cfbc5943f8f230e23950cd4906b2fe004c72e29b209420a1e6b05a \ @@ -539,9 +515,9 @@ google-crc32c==1.5.0 \ --hash=sha256:fd8536e902db7e365f49e7d9029283403974ccf29b13fc7028b97e2295b33556 \ --hash=sha256:fe70e325aa68fa4b5edf7d1a4b6f691eb04bbccac0ace68e34820d283b5f80d4 # via google-resumable-media -google-resumable-media==2.4.1 \ - --hash=sha256:15b8a2e75df42dc6502d1306db0bce2647ba6013f9cd03b6e17368c0886ee90a \ - --hash=sha256:831e86fd78d302c1a034730a0c6e5369dd11d37bad73fa69ca8998460d5bae8d +google-resumable-media==2.5.0 \ + --hash=sha256:218931e8e2b2a73a58eb354a288e03a0fd5fb1c4583261ac6e4c078666468c93 \ + --hash=sha256:da1bd943e2e114a56d85d6848497ebf9be6a14d3db23e9fc57581e7c3e8170ec # via google-cloud-storage googleapis-common-protos==1.59.0 \ --hash=sha256:4168fcb568a826a52f23510412da405abd93f4d23ba544bb68d943b14ba3cb44 \ @@ -565,20 +541,6 @@ idna==3.4 \ # via # requests # yarl -importlib-metadata==6.5.1 \ - --hash=sha256:b986d197242e4e9960a12743a6ec5a9fc8b3d7054612d90489452170785c98a5 \ - --hash=sha256:cd4687a8df60d9aefd424ed9364a8f29def203a9482ec8eb8e8070ef06075f89 - # via - # attrs - # click - # humanize - # jsonschema - # pluggy - # pytest -importlib-resources==5.12.0 \ - --hash=sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6 \ - --hash=sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a - # via jsonschema iniconfig==2.0.0 \ --hash=sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3 \ --hash=sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374 @@ -761,28 +723,24 @@ pathspec==0.11.1 \ --hash=sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687 \ --hash=sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293 # via anyscale -pkgutil-resolve-name==1.3.10 \ - --hash=sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174 \ - --hash=sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e - # via jsonschema pluggy==1.0.0 \ --hash=sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159 \ --hash=sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3 # via pytest -protobuf==4.22.3 \ - --hash=sha256:13233ee2b9d3bd9a5f216c1fa2c321cd564b93d8f2e4f521a85b585447747997 \ - --hash=sha256:23452f2fdea754a8251d0fc88c0317735ae47217e0d27bf330a30eec2848811a \ - --hash=sha256:52f0a78141078077cfe15fe333ac3e3a077420b9a3f5d1bf9b5fe9d286b4d881 \ - --hash=sha256:70659847ee57a5262a65954538088a1d72dfc3e9882695cab9f0c54ffe71663b \ - --hash=sha256:7760730063329d42a9d4c4573b804289b738d4931e363ffbe684716b796bde51 \ - --hash=sha256:7cf56e31907c532e460bb62010a513408e6cdf5b03fb2611e4b67ed398ad046d \ - --hash=sha256:8b54f56d13ae4a3ec140076c9d937221f887c8f64954673d46f63751209e839a \ - --hash=sha256:d14fc1a41d1a1909998e8aff7e80d2a7ae14772c4a70e4bf7db8a36690b54425 \ - --hash=sha256:d4b66266965598ff4c291416be429cef7989d8fae88b55b62095a2331511b3fa \ - --hash=sha256:e0e630d8e6a79f48c557cd1835865b593d0547dce221c66ed1b827de59c66c97 \ - --hash=sha256:ecae944c6c2ce50dda6bf76ef5496196aeb1b85acb95df5843cd812615ec4b61 \ - --hash=sha256:f08aa300b67f1c012100d8eb62d47129e53d1150f4469fd78a29fa3cb68c66f2 \ - --hash=sha256:f2f4710543abec186aee332d6852ef5ae7ce2e9e807a3da570f36de5a732d88e +protobuf==4.23.0 \ + --hash=sha256:03eee35b60317112a72d19c54d0bff7bc58ff12fea4cd7b018232bd99758ffdf \ + --hash=sha256:2b94bd6df92d71bd1234a2ffe7ce96ddf6d10cf637a18d6b55ad0a89fbb7fc21 \ + --hash=sha256:36f5370a930cb77c8ad2f4135590c672d0d2c72d4a707c7d0058dce4b4b4a598 \ + --hash=sha256:5f1eba1da2a2f3f7df469fccddef3cc060b8a16cfe3cc65961ad36b4dbcf59c5 \ + --hash=sha256:6c16657d6717a0c62d5d740cb354fbad1b0d8cb811669e06fc1caa0ff4799ddd \ + --hash=sha256:6fe180b56e1169d72ecc4acbd39186339aed20af5384531b8e8979b02bbee159 \ + --hash=sha256:7cb5b9a05ce52c6a782bb97de52679bd3438ff2b7460eff5da348db65650f227 \ + --hash=sha256:9744e934ea5855d12191040ea198eaf704ac78665d365a89d9572e3b627c2688 \ + --hash=sha256:9f5a0fbfcdcc364f3986f9ed9f8bb1328fb84114fd790423ff3d7fdb0f85c2d1 \ + --hash=sha256:baca40d067dddd62141a129f244703160d278648b569e90bb0e3753067644711 \ + --hash=sha256:d5a35ff54e3f62e8fc7be02bb0d2fbc212bba1a5a9cc2748090690093996f07b \ + --hash=sha256:e62fb869762b4ba18666370e2f8a18f17f8ab92dd4467295c6d38be6f8fef60b \ + --hash=sha256:ebde3a023b8e11bfa6c890ef34cd6a8b47d586f26135e86c21344fe433daf2e2 # via # -r release/requirements_buildkite.in # google-api-core @@ -943,9 +901,9 @@ pyyaml==6.0 \ # via # -r release/requirements_buildkite.in # anyscale -requests==2.28.2 \ - --hash=sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa \ - --hash=sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf +requests==2.30.0 \ + --hash=sha256:10e94cc4f3121ee6da529d358cdaeaff2f1c409cd377dbc72b825852f2f7e294 \ + --hash=sha256:239d7d4458afcb28a692cdd298d87542235f4ca8d36d03a15bfc128a6559a2f4 # via # -r release/requirements_buildkite.in # anyscale @@ -955,9 +913,9 @@ retry==0.9.2 \ --hash=sha256:ccddf89761fa2c726ab29391837d4327f819ea14d244c232a1d24c67a2f98606 \ --hash=sha256:f8bfa8b99b69c4506d6f5bd3b0aabf77f98cdb17f3c9fc3f5ca820033336fba4 # via -r release/requirements_buildkite.in -rich==13.3.4 \ - --hash=sha256:22b74cae0278fd5086ff44144d3813be1cedc9115bdfabbfefd86400cb88b20a \ - --hash=sha256:b5d573e13605423ec80bdd0cd5f8541f7844a0e71a13f74cf454ccb2f490708b +rich==13.3.5 \ + --hash=sha256:2d11b9b8dd03868f09b4fffadc84a6a8cda574e40dc90821bd845720ebb8e89c \ + --hash=sha256:69cdf53799e63f38b95b9bf9c875f8c90e78dd62b2f00c13a911c7a3b9fa4704 # via anyscale rsa==4.9 \ --hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \ @@ -965,9 +923,9 @@ rsa==4.9 \ # via # google-auth # oauth2client -s3transfer==0.6.0 \ - --hash=sha256:06176b74f3a15f61f1b4f25a1fc29a4429040b7647133a463da8fa5bd28d5ecd \ - --hash=sha256:2ed07d3866f523cc561bf4a00fc5535827981b117dd7876f036b0c1aca42c947 +s3transfer==0.6.1 \ + --hash=sha256:3c0da2d074bf35d6870ef157158641178a4204a6e689e82546083e31e0311346 \ + --hash=sha256:640bb492711f4c0c0905e1f62b6aaeb771881935ad27884852411f8e9cacbca9 # via boto3 six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ @@ -994,9 +952,9 @@ tabulate==0.9.0 \ --hash=sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c \ --hash=sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f # via anyscale -termcolor==2.2.0 \ - --hash=sha256:91ddd848e7251200eac969846cbae2dacd7d71c2871e92733289e7e3666f48e7 \ - --hash=sha256:dfc8ac3f350788f23b2947b3e6cfa5a53b630b612e6cd8965a015a776020b99a +termcolor==2.3.0 \ + --hash=sha256:3afb05607b89aed0ffe25202399ee0867ad4d3cb4180d98aaf8eefa6a5f7d475 \ + --hash=sha256:b5b08f68937f138fe92f6c089b99f1e2da0ae56c52b78bf7075fd95420fd9a5a # via halo tomli==2.0.1 \ --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \ @@ -1009,17 +967,7 @@ tqdm==4.65.0 \ typing-extensions==4.5.0 \ --hash=sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb \ --hash=sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4 - # via - # aiohttp - # argon2-cffi - # async-timeout - # gitpython - # importlib-metadata - # jsonschema - # markdown-it-py - # pydantic - # rich - # yarl + # via pydantic tzdata==2023.3 \ --hash=sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a \ --hash=sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda @@ -1112,85 +1060,79 @@ wrapt==1.15.0 \ --hash=sha256:fbec11614dba0424ca72f4e8ba3c420dba07b4a7c206c8c8e4e73f2e98f4c559 \ --hash=sha256:fd69666217b62fa5d7c6aa88e507493a34dec4fa20c5bd925e4bc12fce586639 # via anyscale -yarl==1.9.1 \ - --hash=sha256:01a073c9175481dfed6b40704a1b67af5a9435fc4a58a27d35fd6b303469b0c7 \ - --hash=sha256:01cf88cb80411978a14aa49980968c1aeb7c18a90ac978c778250dd234d8e0ba \ - --hash=sha256:08c8599d6aa8a24425f8635f6c06fa8726afe3be01c8e53e236f519bcfa5db5b \ - --hash=sha256:098bdc06ffb4db39c73883325b8c738610199f5f12e85339afedf07e912a39af \ - --hash=sha256:09c56a32c26e24ef98d5757c5064e252836f621f9a8b42737773aa92936b8e08 \ - --hash=sha256:13a1ad1f35839b3bb5226f59816b71e243d95d623f5b392efaf8820ddb2b3cd5 \ - --hash=sha256:1baf8cdaaab65d9ccedbf8748d626ad648b74b0a4d033e356a2f3024709fb82f \ - --hash=sha256:1d7a0075a55380b19aa43b9e8056e128b058460d71d75018a4f9d60ace01e78c \ - --hash=sha256:27efc2e324f72df02818cd72d7674b1f28b80ab49f33a94f37c6473c8166ce49 \ - --hash=sha256:307a782736ebf994e7600dcaeea3b3113083584da567272f2075f1540919d6b3 \ - --hash=sha256:395ea180257a3742d09dcc5071739682a95f7874270ebe3982d6696caec75be0 \ - --hash=sha256:39a7a9108e9fc633ae381562f8f0355bb4ba00355218b5fb19cf5263fcdbfa68 \ - --hash=sha256:3abe37fd89a93ebe0010417ca671f422fa6fcffec54698f623b09f46b4d4a512 \ - --hash=sha256:4295790981630c4dab9d6de7b0f555a4c8defe3ed7704a8e9e595a321e59a0f5 \ - --hash=sha256:44fa6158e6b4b8ccfa2872c3900a226b29e8ce543ce3e48aadc99816afa8874d \ - --hash=sha256:46c4010de941e2e1365c07fb4418ddca10fcff56305a6067f5ae857f8c98f3a7 \ - --hash=sha256:4764114e261fe49d5df9b316b3221493d177247825c735b2aae77bc2e340d800 \ - --hash=sha256:4d817593d345fefda2fae877accc8a0d9f47ada57086da6125fa02a62f6d1a94 \ - --hash=sha256:518a92a34c741836a315150460b5c1c71ae782d569eabd7acf53372e437709f7 \ - --hash=sha256:56956b13ec275de31fe4fb991510b735c4fb3e1b01600528c952b9ac90464430 \ - --hash=sha256:575975d28795a61e82c85f114c02333ca54cbd325fd4e4b27598c9832aa732e7 \ - --hash=sha256:5ce0bcab7ec759062c818d73837644cde567ab8aa1e0d6c45db38dfb7c284441 \ - --hash=sha256:5faf3ec98747318cb980aaf9addf769da68a66431fc203a373d95d7ee9c1fbb4 \ - --hash=sha256:65d952e464df950eed32bb5dcbc1b4443c7c2de4d7abd7265b45b1b3b27f5fa2 \ - --hash=sha256:6b09cce412386ea9b4dda965d8e78d04ac5b5792b2fa9cced3258ec69c7d1c16 \ - --hash=sha256:6cdb47cbbacae8e1d7941b0d504d0235d686090eef5212ca2450525905e9cf02 \ - --hash=sha256:6cf47fe9df9b1ededc77e492581cdb6890a975ad96b4172e1834f1b8ba0fc3ba \ - --hash=sha256:73a4b46689f2d59c8ec6b71c9a0cdced4e7863dd6eb98a8c30ea610e191f9e1c \ - --hash=sha256:74390c2318d066962500045aa145f5412169bce842e734b8c3e6e3750ad5b817 \ - --hash=sha256:75676110bce59944dd48fd18d0449bd37eaeb311b38a0c768f7670864b5f8b68 \ - --hash=sha256:78755ce43b6e827e65ec0c68be832f86d059fcf05d4b33562745ebcfa91b26b1 \ - --hash=sha256:791357d537a09a194f92b834f28c98d074e7297bac0a8f1d5b458a906cafa17c \ - --hash=sha256:85aa6fd779e194901386709e0eedd45710b68af2709f82a84839c44314b68c10 \ - --hash=sha256:88f6413ff5edfb9609e2769e32ce87a62353e66e75d264bf0eaad26fb9daa8f2 \ - --hash=sha256:89099c887338608da935ba8bee027564a94f852ac40e472de15d8309517ad5fe \ - --hash=sha256:89da1fd6068553e3a333011cc17ad91c414b2100c32579ddb51517edc768b49c \ - --hash=sha256:8c72a1dc7e2ea882cd3df0417c808ad3b69e559acdc43f3b096d67f2fb801ada \ - --hash=sha256:90ebaf448b5f048352ec7c76cb8d452df30c27cb6b8627dfaa9cf742a14f141a \ - --hash=sha256:92a101f6d5a9464e86092adc36cd40ef23d18a25bfb1eb32eaeb62edc22776bb \ - --hash=sha256:92e37999e36f9f3ded78e9d839face6baa2abdf9344ea8ed2735f495736159de \ - --hash=sha256:97d76a3128f48fa1c721ef8a50e2c2f549296b2402dc8a8cde12ff60ed922f53 \ - --hash=sha256:9ba5a18c4fbd408fe49dc5da85478a76bc75c1ce912d7fd7b43ed5297c4403e1 \ - --hash=sha256:9bb794882818fae20ff65348985fdf143ea6dfaf6413814db1848120db8be33e \ - --hash=sha256:a21789bdf28549d4eb1de6910cabc762c9f6ae3eef85efc1958197c1c6ef853b \ - --hash=sha256:a8b8d4b478a9862447daef4cafc89d87ea4ed958672f1d11db7732b77ead49cc \ - --hash=sha256:ac8e593df1fbea820da7676929f821a0c7c2cecb8477d010254ce8ed54328ea8 \ - --hash=sha256:b20a5ddc4e243cbaa54886bfe9af6ffc4ba4ef58f17f1bb691e973eb65bba84d \ - --hash=sha256:b2b2382d59dec0f1fdca18ea429c4c4cee280d5e0dbc841180abb82e188cf6e9 \ - --hash=sha256:b3b5f8da07a21f2e57551f88a6709c2d340866146cf7351e5207623cfe8aad16 \ - --hash=sha256:b5d5fb6c94b620a7066a3adb7c246c87970f453813979818e4707ac32ce4d7bd \ - --hash=sha256:b63d41e0eecf3e3070d44f97456cf351fff7cb960e97ecb60a936b877ff0b4f6 \ - --hash=sha256:b86e98c3021b7e2740d8719bf074301361bf2f51221ca2765b7a58afbfbd9042 \ - --hash=sha256:bab67d041c78e305ff3eef5e549304d843bd9b603c8855b68484ee663374ce15 \ - --hash=sha256:c3ca8d71b23bdf164b36d06df2298ec8a5bd3de42b17bf3e0e8e6a7489195f2c \ - --hash=sha256:ca14b84091700ae7c1fcd3a6000bd4ec1a3035009b8bcb94f246741ca840bb22 \ - --hash=sha256:d21887cbcf6a3cc5951662d8222bc9c04e1b1d98eebe3bb659c3a04ed49b0eec \ - --hash=sha256:d5c407e530cf2979ea383885516ae79cc4f3c3530623acf5e42daf521f5c2564 \ - --hash=sha256:d966cd59df9a4b218480562e8daab39e87e746b78a96add51a3ab01636fc4291 \ - --hash=sha256:df747104ef27ab1aa9a1145064fa9ea26ad8cf24bfcbdba7db7abf0f8b3676b9 \ - --hash=sha256:e124b283a04cc06d22443cae536f93d86cd55108fa369f22b8fe1f2288b2fe1c \ - --hash=sha256:e2f01351b7809182822b21061d2a4728b7b9e08f4585ba90ee4c5c4d3faa0812 \ - --hash=sha256:e7ddebeabf384099814353a2956ed3ab5dbaa6830cc7005f985fcb03b5338f05 \ - --hash=sha256:e9fe3a1c073ab80a28a06f41d2b623723046709ed29faf2c56bea41848597d86 \ - --hash=sha256:ecaa5755a39f6f26079bf13f336c67af589c222d76b53cd3824d3b684b84d1f1 \ - --hash=sha256:ecad20c3ef57c513dce22f58256361d10550a89e8eaa81d5082f36f8af305375 \ - --hash=sha256:eed9827033b7f67ad12cb70bd0cb59d36029144a7906694317c2dbf5c9eb5ddd \ - --hash=sha256:ef7e2f6c47c41e234600a02e1356b799761485834fe35d4706b0094cb3a587ee \ - --hash=sha256:efec77851231410125cb5be04ec96fa4a075ca637f415a1f2d2c900b09032a8a \ - --hash=sha256:f0cd87949d619157a0482c6c14e5011f8bf2bc0b91cb5087414d9331f4ef02dd \ - --hash=sha256:f206adb89424dca4a4d0b31981869700e44cd62742527e26d6b15a510dd410a2 \ - --hash=sha256:f5bcb80006efe9bf9f49ae89711253dd06df8053ff814622112a9219346566a7 \ - --hash=sha256:f76edb386178a54ea7ceffa798cb830c3c22ab50ea10dfb25dc952b04848295f \ - --hash=sha256:f878a78ed2ccfbd973cab46dd0933ecd704787724db23979e5731674d76eb36f \ - --hash=sha256:f8e73f526140c1c32f5fca4cd0bc3b511a1abcd948f45b2a38a95e4edb76ca72 +yarl==1.9.2 \ + --hash=sha256:04ab9d4b9f587c06d801c2abfe9317b77cdf996c65a90d5e84ecc45010823571 \ + --hash=sha256:066c163aec9d3d073dc9ffe5dd3ad05069bcb03fcaab8d221290ba99f9f69ee3 \ + --hash=sha256:13414591ff516e04fcdee8dc051c13fd3db13b673c7a4cb1350e6b2ad9639ad3 \ + --hash=sha256:149ddea5abf329752ea5051b61bd6c1d979e13fbf122d3a1f9f0c8be6cb6f63c \ + --hash=sha256:159d81f22d7a43e6eabc36d7194cb53f2f15f498dbbfa8edc8a3239350f59fe7 \ + --hash=sha256:1b1bba902cba32cdec51fca038fd53f8beee88b77efc373968d1ed021024cc04 \ + --hash=sha256:22a94666751778629f1ec4280b08eb11815783c63f52092a5953faf73be24191 \ + --hash=sha256:2a96c19c52ff442a808c105901d0bdfd2e28575b3d5f82e2f5fd67e20dc5f4ea \ + --hash=sha256:2b0738fb871812722a0ac2154be1f049c6223b9f6f22eec352996b69775b36d4 \ + --hash=sha256:2c315df3293cd521033533d242d15eab26583360b58f7ee5d9565f15fee1bef4 \ + --hash=sha256:32f1d071b3f362c80f1a7d322bfd7b2d11e33d2adf395cc1dd4df36c9c243095 \ + --hash=sha256:3458a24e4ea3fd8930e934c129b676c27452e4ebda80fbe47b56d8c6c7a63a9e \ + --hash=sha256:38a3928ae37558bc1b559f67410df446d1fbfa87318b124bf5032c31e3447b74 \ + --hash=sha256:3da8a678ca8b96c8606bbb8bfacd99a12ad5dd288bc6f7979baddd62f71c63ef \ + --hash=sha256:494053246b119b041960ddcd20fd76224149cfea8ed8777b687358727911dd33 \ + --hash=sha256:50f33040f3836e912ed16d212f6cc1efb3231a8a60526a407aeb66c1c1956dde \ + --hash=sha256:52a25809fcbecfc63ac9ba0c0fb586f90837f5425edfd1ec9f3372b119585e45 \ + --hash=sha256:53338749febd28935d55b41bf0bcc79d634881195a39f6b2f767870b72514caf \ + --hash=sha256:5415d5a4b080dc9612b1b63cba008db84e908b95848369aa1da3686ae27b6d2b \ + --hash=sha256:5610f80cf43b6202e2c33ba3ec2ee0a2884f8f423c8f4f62906731d876ef4fac \ + --hash=sha256:566185e8ebc0898b11f8026447eacd02e46226716229cea8db37496c8cdd26e0 \ + --hash=sha256:56ff08ab5df8429901ebdc5d15941b59f6253393cb5da07b4170beefcf1b2528 \ + --hash=sha256:59723a029760079b7d991a401386390c4be5bfec1e7dd83e25a6a0881859e716 \ + --hash=sha256:5fcd436ea16fee7d4207c045b1e340020e58a2597301cfbcfdbe5abd2356c2fb \ + --hash=sha256:61016e7d582bc46a5378ffdd02cd0314fb8ba52f40f9cf4d9a5e7dbef88dee18 \ + --hash=sha256:63c48f6cef34e6319a74c727376e95626f84ea091f92c0250a98e53e62c77c72 \ + --hash=sha256:646d663eb2232d7909e6601f1a9107e66f9791f290a1b3dc7057818fe44fc2b6 \ + --hash=sha256:662e6016409828ee910f5d9602a2729a8a57d74b163c89a837de3fea050c7582 \ + --hash=sha256:674ca19cbee4a82c9f54e0d1eee28116e63bc6fd1e96c43031d11cbab8b2afd5 \ + --hash=sha256:6a5883464143ab3ae9ba68daae8e7c5c95b969462bbe42e2464d60e7e2698368 \ + --hash=sha256:6e7221580dc1db478464cfeef9b03b95c5852cc22894e418562997df0d074ccc \ + --hash=sha256:75df5ef94c3fdc393c6b19d80e6ef1ecc9ae2f4263c09cacb178d871c02a5ba9 \ + --hash=sha256:783185c75c12a017cc345015ea359cc801c3b29a2966c2655cd12b233bf5a2be \ + --hash=sha256:822b30a0f22e588b32d3120f6d41e4ed021806418b4c9f0bc3048b8c8cb3f92a \ + --hash=sha256:8288d7cd28f8119b07dd49b7230d6b4562f9b61ee9a4ab02221060d21136be80 \ + --hash=sha256:82aa6264b36c50acfb2424ad5ca537a2060ab6de158a5bd2a72a032cc75b9eb8 \ + --hash=sha256:832b7e711027c114d79dffb92576acd1bd2decc467dec60e1cac96912602d0e6 \ + --hash=sha256:838162460b3a08987546e881a2bfa573960bb559dfa739e7800ceeec92e64417 \ + --hash=sha256:83fcc480d7549ccebe9415d96d9263e2d4226798c37ebd18c930fce43dfb9574 \ + --hash=sha256:84e0b1599334b1e1478db01b756e55937d4614f8654311eb26012091be109d59 \ + --hash=sha256:891c0e3ec5ec881541f6c5113d8df0315ce5440e244a716b95f2525b7b9f3608 \ + --hash=sha256:8c2ad583743d16ddbdf6bb14b5cd76bf43b0d0006e918809d5d4ddf7bde8dd82 \ + --hash=sha256:8c56986609b057b4839968ba901944af91b8e92f1725d1a2d77cbac6972b9ed1 \ + --hash=sha256:8ea48e0a2f931064469bdabca50c2f578b565fc446f302a79ba6cc0ee7f384d3 \ + --hash=sha256:8ec53a0ea2a80c5cd1ab397925f94bff59222aa3cf9c6da938ce05c9ec20428d \ + --hash=sha256:95d2ecefbcf4e744ea952d073c6922e72ee650ffc79028eb1e320e732898d7e8 \ + --hash=sha256:9b3152f2f5677b997ae6c804b73da05a39daa6a9e85a512e0e6823d81cdad7cc \ + --hash=sha256:9bf345c3a4f5ba7f766430f97f9cc1320786f19584acc7086491f45524a551ac \ + --hash=sha256:a60347f234c2212a9f0361955007fcf4033a75bf600a33c88a0a8e91af77c0e8 \ + --hash=sha256:a74dcbfe780e62f4b5a062714576f16c2f3493a0394e555ab141bf0d746bb955 \ + --hash=sha256:a83503934c6273806aed765035716216cc9ab4e0364f7f066227e1aaea90b8d0 \ + --hash=sha256:ac9bb4c5ce3975aeac288cfcb5061ce60e0d14d92209e780c93954076c7c4367 \ + --hash=sha256:aff634b15beff8902d1f918012fc2a42e0dbae6f469fce134c8a0dc51ca423bb \ + --hash=sha256:b03917871bf859a81ccb180c9a2e6c1e04d2f6a51d953e6a5cdd70c93d4e5a2a \ + --hash=sha256:b124e2a6d223b65ba8768d5706d103280914d61f5cae3afbc50fc3dfcc016623 \ + --hash=sha256:b25322201585c69abc7b0e89e72790469f7dad90d26754717f3310bfe30331c2 \ + --hash=sha256:b7232f8dfbd225d57340e441d8caf8652a6acd06b389ea2d3222b8bc89cbfca6 \ + --hash=sha256:b8cc1863402472f16c600e3e93d542b7e7542a540f95c30afd472e8e549fc3f7 \ + --hash=sha256:b9a4e67ad7b646cd6f0938c7ebfd60e481b7410f574c560e455e938d2da8e0f4 \ + --hash=sha256:be6b3fdec5c62f2a67cb3f8c6dbf56bbf3f61c0f046f84645cd1ca73532ea051 \ + --hash=sha256:bf74d08542c3a9ea97bb8f343d4fcbd4d8f91bba5ec9d5d7f792dbe727f88938 \ + --hash=sha256:c027a6e96ef77d401d8d5a5c8d6bc478e8042f1e448272e8d9752cb0aff8b5c8 \ + --hash=sha256:c0c77533b5ed4bcc38e943178ccae29b9bcf48ffd1063f5821192f23a1bd27b9 \ + --hash=sha256:c1012fa63eb6c032f3ce5d2171c267992ae0c00b9e164efe4d73db818465fac3 \ + --hash=sha256:c3a53ba34a636a256d767c086ceb111358876e1fb6b50dfc4d3f4951d40133d5 \ + --hash=sha256:d4e2c6d555e77b37288eaf45b8f60f0737c9efa3452c6c44626a5455aeb250b9 \ + --hash=sha256:de119f56f3c5f0e2fb4dee508531a32b069a5f2c6e827b272d1e0ff5ac040333 \ + --hash=sha256:e65610c5792870d45d7b68c677681376fcf9cc1c289f23e8e8b39c1485384185 \ + --hash=sha256:e9fdc7ac0d42bc3ea78818557fab03af6181e076a2944f43c38684b4b6bed8e3 \ + --hash=sha256:ee4afac41415d52d53a9833ebae7e32b344be72835bbb589018c9e938045a560 \ + --hash=sha256:f364d3480bffd3aa566e886587eaca7c8c04d74f6e8933f3f2c996b7f09bee1b \ + --hash=sha256:f3b078dbe227f79be488ffcfc7a9edb3409d018e0952cf13f15fd6512847f3f7 \ + --hash=sha256:f4e2d08f07a3d7d3e12549052eb5ad3eab1c349c53ac51c209a0e5991bbada78 \ + --hash=sha256:f7a3d8146575e08c29ed1cd287068e6d02f1c7bdff8970db96683b9591b86ee7 # via aiohttp -zipp==3.15.0 \ - --hash=sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b \ - --hash=sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556 - # via - # importlib-metadata - # importlib-resources From c4ff068f0b95881c2f213085b7f3b18030557cd3 Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Tue, 9 May 2023 13:30:37 -0700 Subject: [PATCH 298/424] [Data] Remove "Scalable Batch Inference with Ray" from batch inference examples (#35151) Signed-off-by: Balaji Veeramani BatchPredictor isn't a recommended API for batch inference anymore. "Scalable Batch Inference with Ray" uses BatchPredictor, so we're removing it until it gets updated with the recommended APIs. --- doc/source/data/batch_inference.rst | 8 -------- 1 file changed, 8 deletions(-) diff --git a/doc/source/data/batch_inference.rst b/doc/source/data/batch_inference.rst index af5b0de3390e..8ca2a09f8016 100644 --- a/doc/source/data/batch_inference.rst +++ b/doc/source/data/batch_inference.rst @@ -656,14 +656,6 @@ tutorials and examples: :gutter: 1 :class-container: container pb-3 - .. grid-item-card:: - :img-top: /images/ray_logo.png - :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - - .. button-link:: https://github.com/ray-project/ray-educational-materials/blob/main/Computer_vision_workloads/Semantic_segmentation/Scaling_batch_inference.ipynb - - Scalable Batch Inference with Ray for Semantic Segmentation - .. grid-item-card:: :img-top: /images/ray_logo.png :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img From f6032f7f68fd21f70e79c10ad26fb75921d49e59 Mon Sep 17 00:00:00 2001 From: Yi Cheng <74173148+iycheng@users.noreply.github.com> Date: Tue, 9 May 2023 13:57:35 -0700 Subject: [PATCH 299/424] [core] Start ray syncer reconnection after a delay (#35115) Signed-off-by: Yi Cheng <74173148+iycheng@users.noreply.github.com> Previously, then a connection is broken, it'll try to do reconnection immediately. Usually when network issues happened, it's going to take a while to recover. This PR adds a 2s delay before initializing a reconnect to make the workload more reasonable. --- src/ray/common/ray_syncer/ray_syncer.cc | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/ray/common/ray_syncer/ray_syncer.cc b/src/ray/common/ray_syncer/ray_syncer.cc index 8a76175e58f1..ed2c36adfa2a 100644 --- a/src/ray/common/ray_syncer/ray_syncer.cc +++ b/src/ray/common/ray_syncer/ray_syncer.cc @@ -16,7 +16,9 @@ #include +#include "ray/common/asio/asio_util.h" #include "ray/common/ray_config.h" + namespace ray { namespace syncer { @@ -222,10 +224,14 @@ void RaySyncer::Connect(const std::string &node_id, [this, channel](const std::string &node_id, bool restart) { sync_reactors_.erase(node_id); if (restart) { - RAY_LOG_EVERY_MS(INFO, 10 * 1000) - << "Connection is broken. Reconnect to node: " - << NodeID::FromBinary(node_id); - Connect(node_id, channel); + execute_after( + io_context_, + [this, node_id, channel]() { + RAY_LOG(INFO) << "Connection is broken. Reconnect to node: " + << NodeID::FromBinary(node_id); + Connect(node_id, channel); + }, + /* delay_microseconds = */ 2000); } }, /* stub */ std::move(stub)); From adf28ecbf0d62c21a0dcba47cc833399f0f32133 Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Tue, 9 May 2023 22:05:28 +0100 Subject: [PATCH 300/424] [docker] Add netbase to base deps docker image (#35174) This package is available in the ubuntu:focal base images but not in the CUDA base images, but may be required by downstream dependencies in our docker ML images. Signed-off-by: Kai Fricke --- docker/base-deps/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/base-deps/Dockerfile b/docker/base-deps/Dockerfile index 4accfacbce9a..63c9d42c47e7 100644 --- a/docker/base-deps/Dockerfile +++ b/docker/base-deps/Dockerfile @@ -45,6 +45,7 @@ RUN sudo apt-get update -y && sudo apt-get upgrade -y \ tmux \ screen \ rsync \ + netbase \ openssh-client \ gnupg; fi) \ && wget \ From 256c03500a9e0866f04ebc4cfbce26e1b2ec1e6f Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Tue, 9 May 2023 14:23:55 -0700 Subject: [PATCH 301/424] [Data] Update `pipelined_training_50_gb.aws` instance type (#35150) Anyscale recently stopped supporting i3.8xlarge instance types. As a result, the pipelined_training_50_gb.aws release test -- which uses i3.8xlarge -- has been failing. This PR updates the instance type to m6i.16xlarge (a supported instance type). --------- Signed-off-by: Balaji Veeramani --- .../nightly_tests/dataset/pipelined_training_compute.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/release/nightly_tests/dataset/pipelined_training_compute.yaml b/release/nightly_tests/dataset/pipelined_training_compute.yaml index 966765049ff8..d666b8274f5a 100644 --- a/release/nightly_tests/dataset/pipelined_training_compute.yaml +++ b/release/nightly_tests/dataset/pipelined_training_compute.yaml @@ -14,11 +14,11 @@ aws: head_node_type: name: head_node - instance_type: i3.8xlarge + instance_type: m6i.16xlarge worker_node_types: - - name: memory_node - instance_type: i3.8xlarge + - name: memory_node + instance_type: m6i.16xlarge min_workers: 10 max_workers: 10 use_spot: false From 6df362999fde1e582d9f7aa1e65a41318931c358 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Tue, 9 May 2023 15:34:23 -0700 Subject: [PATCH 302/424] [data] Update the strict mode message to be less confusing (#35185) --- doc/source/data/faq.rst | 7 ++---- python/ray/data/_internal/compute.py | 4 ++-- python/ray/data/_internal/planner/map_rows.py | 2 +- python/ray/data/block.py | 23 +++++++++---------- python/ray/data/datastream.py | 8 +++---- python/ray/data/read_api.py | 4 +--- python/ray/data/tests/test_pipeline.py | 6 ++--- 7 files changed, 23 insertions(+), 31 deletions(-) diff --git a/doc/source/data/faq.rst b/doc/source/data/faq.rst index a7e38715e381..1ce687709dd5 100644 --- a/doc/source/data/faq.rst +++ b/doc/source/data/faq.rst @@ -287,17 +287,14 @@ Ray Data doesn't perform query optimization, so some manual performance tuning may be necessary depending on your use case and data scale. Please see our :ref:`performance tuning guide ` for more information. -What is strict mode? -==================== +Migrating to strict mode +======================== In Ray 2.5, Ray Data by default always requires data schemas, dropping support for standalone Python objects. In addition to unification and simplicity benefits, this aligns the Ray Data API closer to industry-standard distributed data APIs like Apache Spark and also emerging standards for machine learning datasets like HuggingFace. -Migrating to strict mode -~~~~~~~~~~~~~~~~~~~~~~~~ - You can disable strict mode temporarily by setting the environment variable ``RAY_DATA_STRICT_MODE=0`` on all cluster processes. Strict mode will not be possible to disable in future releases. diff --git a/python/ray/data/_internal/compute.py b/python/ray/data/_internal/compute.py index 491d4d29b8cc..74d3187d45c9 100644 --- a/python/ray/data/_internal/compute.py +++ b/python/ray/data/_internal/compute.py @@ -223,7 +223,7 @@ def __init__( if legacy_min_size is not None or legacy_max_size is not None: if ctx.strict_mode: raise StrictModeError( - "In strict mode, ActorPoolStrategy requires min_size and " + "In Ray 2.5, ActorPoolStrategy requires min_size and " "max_size to be explicit kwargs." ) else: @@ -503,7 +503,7 @@ def get_compute(compute_spec: Union[str, ComputeStrategy]) -> ComputeStrategy: compute_spec, (TaskPoolStrategy, ActorPoolStrategy) ): raise StrictModeError( - "In strict mode, the compute spec must be either " + "In Ray 2.5, the compute spec must be either " f"TaskPoolStrategy or ActorPoolStategy, was: {compute_spec}." ) elif not compute_spec or compute_spec == "tasks": diff --git a/python/ray/data/_internal/planner/map_rows.py b/python/ray/data/_internal/planner/map_rows.py index fa94373f62ec..99405ff5ebf1 100644 --- a/python/ray/data/_internal/planner/map_rows.py +++ b/python/ray/data/_internal/planner/map_rows.py @@ -30,7 +30,7 @@ def fn( raise StrictModeError( f"Error validating {_truncated_repr(item)}: " "Standalone Python objects are not " - "allowed in strict mode. To return Python objects from map(), " + "allowed in Ray 2.5. To return Python objects from map(), " "wrap them in a dict, e.g., " "return `{'item': item}` instead of just `item`." ) diff --git a/python/ray/data/block.py b/python/ray/data/block.py index db43326b462a..b29cfeb266e9 100644 --- a/python/ray/data/block.py +++ b/python/ray/data/block.py @@ -53,14 +53,13 @@ STRICT_MODE_EXPLANATION = ( colorama.Fore.YELLOW - + "[IMPORTANT]: Ray Data strict mode is on by default in Ray 2.5. When in strict " - "mode, data schemas are required, standalone Python " - "objects are no longer supported, and the default batch format changes to `numpy` " - "from `pandas`. To disable strict mode temporarily, set the environment variable " - "RAY_DATA_STRICT_MODE=0 on all cluster processes. Strict mode will not be " - "possible to disable in future releases.\n\n" - "Learn more here: https://docs.ray.io/en/master/data/faq.html#what-is-strict-mode" - + colorama.Style.RESET_ALL + + "Important: Ray Data requires schemas for all datasets in Ray 2.5. This means " + "that standalone Python objects are no longer supported. In addition, the default " + "batch format is fixed to NumPy. To revert to legacy behavior temporarily, " + "set the " + "environment variable RAY_DATA_STRICT_MODE=0 on all cluster processes.\n\n" + "Learn more here: https://docs.ray.io/en/master/data/faq.html#" + "migrating-to-strict-mode" + colorama.Style.RESET_ALL ) @@ -92,7 +91,7 @@ def _validate_key_fn( "schema '{}'.".format(key, schema) ) elif ctx.strict_mode: - raise StrictModeError(f"In strict mode, the key must be a string, was: {key}") + raise StrictModeError(f"In Ray 2.5, the key must be a string, was: {key}") elif key is None: if not is_simple_format: raise ValueError( @@ -161,7 +160,7 @@ def _apply_strict_mode_batch_format(given_batch_format: Optional[str]) -> str: if given_batch_format not in VALID_BATCH_FORMATS_STRICT_MODE: raise StrictModeError( f"The given batch format {given_batch_format} is not allowed " - f"in strict mode (must be one of {VALID_BATCH_FORMATS_STRICT_MODE})." + f"in Ray 2.5 (must be one of {VALID_BATCH_FORMATS_STRICT_MODE})." ) return given_batch_format @@ -424,7 +423,7 @@ def batch_to_block(batch: DataBatch) -> Block: raise StrictModeError( f"Error validating {_truncated_repr(batch)}: " "Standalone numpy arrays are not " - "allowed in strict mode. Return a dict of field -> array, " + "allowed in Ray 2.5. Return a dict of field -> array, " "e.g., `{'data': array}` instead of `array`." ) @@ -472,7 +471,7 @@ def for_block(block: Block) -> "BlockAccessor[T]": raise StrictModeError( f"Error validating {_truncated_repr(block)}: " "Standalone Python objects are not " - "allowed in strict mode. To use Python objects in a datastream, " + "allowed in Ray 2.5. To use Python objects in a datastream, " "wrap them in a dict of numpy arrays, e.g., " "return `{'item': np.array(batch)}` instead of just `batch`." ) diff --git a/python/ray/data/datastream.py b/python/ray/data/datastream.py index 240781801942..f44315d0b26c 100644 --- a/python/ray/data/datastream.py +++ b/python/ray/data/datastream.py @@ -2609,7 +2609,7 @@ def write_numpy( context = DataContext.get_current() if context.strict_mode and not column: raise StrictModeError( - "In strict mode, the column must be specified " + "In Ray 2.5, the column must be specified " "(e.g., `write_numpy(column='data')`)." ) column = column or TENSOR_COLUMN_NAME @@ -4116,9 +4116,7 @@ def _divide(self, block_idx: int) -> ("Datastream", "Datastream"): def default_batch_format(self) -> Type: context = DataContext.get_current() if context.strict_mode: - raise StrictModeError( - "default_batch_format() is not allowed in strict mode" - ) + raise StrictModeError("default_batch_format() is not allowed in Ray 2.5") import pandas as pd import pyarrow as pa @@ -4138,7 +4136,7 @@ def default_batch_format(self) -> Type: def dataset_format(self) -> BlockFormat: context = DataContext.get_current() if context.strict_mode: - raise StrictModeError("dataset_format() is not allowed in strict mode") + raise StrictModeError("dataset_format() is not allowed in Ray 2.5") if context.use_streaming_executor: raise DeprecationWarning( diff --git a/python/ray/data/read_api.py b/python/ray/data/read_api.py index bfdd635ca61e..67335045e788 100644 --- a/python/ray/data/read_api.py +++ b/python/ray/data/read_api.py @@ -245,9 +245,7 @@ def range(n: int, *, parallelism: int = -1) -> Datastream: def range_table(n: int, *, parallelism: int = -1) -> Datastream: ctx = ray.data.DataContext.get_current() if ctx.strict_mode: - raise DeprecationWarning( - "In strict mode, use range() instead of range_table()." - ) + raise DeprecationWarning("In Ray 2.5, use range() instead of range_table().") return read_datasource( RangeDatasource(), parallelism=parallelism, diff --git a/python/ray/data/tests/test_pipeline.py b/python/ray/data/tests/test_pipeline.py index 5a250c729ffb..424eb787b672 100644 --- a/python/ray/data/tests/test_pipeline.py +++ b/python/ray/data/tests/test_pipeline.py @@ -22,19 +22,19 @@ def __init__(self): self.infos = [] def warning(self, msg): - if "strict mode" in msg: + if "STRICT_MODE" in msg: return self.warnings.append(msg) print("warning:", msg) def info(self, msg): - if "strict mode" in msg: + if "STRICT_MODE" in msg: return self.infos.append(msg) print("info:", msg) def debug(self, msg): - if "strict mode" in msg: + if "STRICT_MODE" in msg: return print("debug:", msg) From f2b04d4fc18113997ae1152cfb6a069d6729cd18 Mon Sep 17 00:00:00 2001 From: Artur Niederfahrenhorst Date: Wed, 10 May 2023 01:18:30 +0200 Subject: [PATCH 303/424] [RLlib] Activate RLModules and Learner together in docs (#35145) Signed-off-by: Artur Niederfahrenhorst --- doc/source/rllib/doc_code/catalog_guide.py | 1 + doc/source/rllib/doc_code/rlmodule_guide.py | 11 +++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/doc/source/rllib/doc_code/catalog_guide.py b/doc/source/rllib/doc_code/catalog_guide.py index ca4714163231..e72c5317e43a 100644 --- a/doc/source/rllib/doc_code/catalog_guide.py +++ b/doc/source/rllib/doc_code/catalog_guide.py @@ -125,6 +125,7 @@ def __init__(self, *args, **kwargs): .environment("CartPole-v1") .framework("torch") .rl_module(_enable_rl_module_api=True) + .training(_enable_learner_api=True) ) # Specify the catalog to use for the PPORLModule. diff --git a/doc/source/rllib/doc_code/rlmodule_guide.py b/doc/source/rllib/doc_code/rlmodule_guide.py index 77f5fa2084e0..2cad78805811 100644 --- a/doc/source/rllib/doc_code/rlmodule_guide.py +++ b/doc/source/rllib/doc_code/rlmodule_guide.py @@ -15,6 +15,7 @@ .framework("torch") .environment("CartPole-v1") .rl_module(_enable_rl_module_api=True) + .training(_enable_learner_api=True) ) algorithm = config.build() @@ -85,7 +86,10 @@ _enable_rl_module_api=True, rl_module_spec=SingleAgentRLModuleSpec(module_class=DiscreteBCTorchModule), ) - .training(model={"fcnet_hiddens": [32, 32]}) + .training( + model={"fcnet_hiddens": [32, 32]}, + _enable_learner_api=True, + ) ) algo = config.build() @@ -110,7 +114,10 @@ module_specs=SingleAgentRLModuleSpec(module_class=DiscreteBCTorchModule) ), ) - .training(model={"fcnet_hiddens": [32, 32]}) + .training( + model={"fcnet_hiddens": [32, 32]}, + _enable_learner_api=True, + ) ) # __pass-specs-to-configs-ma-end__ From 81476e451b2dc22ca946e669e0ef3cd0cae9db50 Mon Sep 17 00:00:00 2001 From: Avnish Narayan <38871737+avnishn@users.noreply.github.com> Date: Tue, 9 May 2023 16:32:40 -0700 Subject: [PATCH 304/424] [RLlib] Add test utils for rllib contrib (#35056) Signed-off-by: Avnish --- .buildkite/pipeline.ml.yml | 2 +- rllib/BUILD | 11 +- rllib/algorithms/ars/ars.py | 4 + rllib/algorithms/es/es.py | 4 + .../test_algorithm_checkpoint_restore.py | 147 ++------ rllib/tests/test_supported_spaces.py | 237 +++---------- rllib/utils/test_utils.py | 327 +++++++++++++++++- 7 files changed, 413 insertions(+), 319 deletions(-) diff --git a/.buildkite/pipeline.ml.yml b/.buildkite/pipeline.ml.yml index 447a50224cc2..03c5179db864 100644 --- a/.buildkite/pipeline.ml.yml +++ b/.buildkite/pipeline.ml.yml @@ -196,7 +196,7 @@ - ./ci/env/env_info.sh - ./ci/run/run_bazel_test_with_sharding.sh --config=ci $(./ci/run/bazel_export_options) --build_tests_only - --test_tag_filters=-learning_tests,-memory_leak_tests,-examples,-tests_dir,-documentation,-multi_gpu,-multi_gpu + --test_tag_filters=-learning_tests,-memory_leak_tests,-examples,-tests_dir,-documentation,-multi_gpu,-no_cpu --test_env=RAY_USE_MULTIPROCESSING_CPU_COUNT=1 rllib/... - label: ":brain: RLlib: RLModule tests" diff --git a/rllib/BUILD b/rllib/BUILD index a57156320112..133d14197952 100644 --- a/rllib/BUILD +++ b/rllib/BUILD @@ -2756,10 +2756,19 @@ py_test( args = ["TestSupportedSpacesPPO"] ) +py_test( + name="tests/test_supported_spaces_dqn", + main="tests/test_supported_spaces.py", + tags=["team:rllib", "tests_dir"], + size="large", + srcs=["tests/test_supported_spaces.py"], + args=["TestSupportedSpacesDQN"] +) + py_test( name = "tests/test_supported_spaces_ppo_no_preproceesor_gpu", main = "tests/test_supported_spaces.py", - tags = ["team:rllib", "tests_dir", "multi_gpu", "exclusive"], + tags = ["team:rllib", "gpu", "no_cpu"], size = "large", srcs = ["tests/test_supported_spaces.py"], args = ["TestSupportedSpacesPPONoPreprocessorGPU"] diff --git a/rllib/algorithms/ars/ars.py b/rllib/algorithms/ars/ars.py index 168d45efdb97..61547b33f25c 100644 --- a/rllib/algorithms/ars/ars.py +++ b/rllib/algorithms/ars/ars.py @@ -357,6 +357,10 @@ def do_rollouts(self, params, timestep_limit=None): eval_lengths=eval_lengths, ) + def stop(self): + """Releases all resources used by this RolloutWorker.""" + pass + def get_policy_class(config: AlgorithmConfig): if config.framework_str == "torch": diff --git a/rllib/algorithms/es/es.py b/rllib/algorithms/es/es.py index 6c378b10258c..c5dc6b51e840 100644 --- a/rllib/algorithms/es/es.py +++ b/rllib/algorithms/es/es.py @@ -357,6 +357,10 @@ def do_rollouts(self, params, timestep_limit=None): eval_lengths=eval_lengths, ) + def stop(self): + """Releases all resources used by this RolloutWorker.""" + pass + def get_policy_class(config: AlgorithmConfig): if config.framework_str == "torch": diff --git a/rllib/tests/test_algorithm_checkpoint_restore.py b/rllib/tests/test_algorithm_checkpoint_restore.py index aa19ac0de77a..af74dfc55dd3 100644 --- a/rllib/tests/test_algorithm_checkpoint_restore.py +++ b/rllib/tests/test_algorithm_checkpoint_restore.py @@ -1,11 +1,8 @@ #!/usr/bin/env python - -import numpy as np import unittest import ray -from ray.rllib.utils.test_utils import check, framework_iterator from ray.rllib.algorithms.apex_ddpg import ApexDDPGConfig from ray.rllib.algorithms.sac import SACConfig from ray.rllib.algorithms.simple_q import SimpleQConfig @@ -15,17 +12,10 @@ from ray.rllib.algorithms.ddpg import DDPGConfig from ray.rllib.algorithms.ars import ARSConfig from ray.rllib.algorithms.a3c import A3CConfig -from ray.tune.registry import get_trainable_cls +from ray.rllib.utils.test_utils import test_ckpt_restore import os -def get_mean_action(alg, obs): - out = [] - for _ in range(5000): - out.append(float(alg.compute_single_action(obs))) - return np.mean(out) - - # As we transition things to RLModule API the explore=False will get # deprecated. For now, we will just not set it. The reason is that the RLModule # API has forward_exploration() method that can be overriden if user needs to @@ -100,108 +90,6 @@ def get_mean_action(alg, obs): } -def ckpt_restore_test( - algo_name, - tf2=False, - object_store=False, - replay_buffer=False, - run_restored_algorithm=True, -): - config = algorithms_and_configs[algo_name].to_dict() - # If required, store replay buffer data in checkpoints as well. - if replay_buffer: - config["store_buffer_in_checkpoints"] = True - - frameworks = (["tf2"] if tf2 else []) + ["torch", "tf"] - for fw in framework_iterator(config, frameworks=frameworks): - for use_object_store in [False, True] if object_store else [False]: - print("use_object_store={}".format(use_object_store)) - cls = get_trainable_cls(algo_name) - if "DDPG" in algo_name or "SAC" in algo_name: - alg1 = cls(config=config, env="Pendulum-v1") - alg2 = cls(config=config, env="Pendulum-v1") - else: - alg1 = cls(config=config, env="CartPole-v1") - alg2 = cls(config=config, env="CartPole-v1") - - policy1 = alg1.get_policy() - - res = alg1.train() - print("current status: " + str(res)) - - # Check optimizer state as well. - optim_state = policy1.get_state().get("_optimizer_variables") - - if use_object_store: - checkpoint = alg1.save_to_object() - else: - checkpoint = alg1.save() - - # Test if we can restore multiple times (at least twice, assuming failure - # would mainly stem from improperly reused variables) - for num_restores in range(2): - # Sync the models - if use_object_store: - alg2.restore_from_object(checkpoint) - else: - alg2.restore(checkpoint) - - # Compare optimizer state with re-loaded one. - if optim_state: - s2 = alg2.get_policy().get_state().get("_optimizer_variables") - # Tf -> Compare states 1:1. - if fw in ["tf2", "tf"]: - check(s2, optim_state) - # For torch, optimizers have state_dicts with keys=params, - # which are different for the two models (ignore these - # different keys, but compare all values nevertheless). - else: - for i, s2_ in enumerate(s2): - check( - list(s2_["state"].values()), - list(optim_state[i]["state"].values()), - ) - - # Compare buffer content with restored one. - if replay_buffer: - data = alg1.local_replay_buffer.replay_buffers[ - "default_policy" - ]._storage[42 : 42 + 42] - new_data = alg2.local_replay_buffer.replay_buffers[ - "default_policy" - ]._storage[42 : 42 + 42] - check(data, new_data) - - for _ in range(1): - if "DDPG" in algo_name or "SAC" in algo_name: - obs = np.clip( - np.random.uniform(size=3), - policy1.observation_space.low, - policy1.observation_space.high, - ) - else: - obs = np.clip( - np.random.uniform(size=4), - policy1.observation_space.low, - policy1.observation_space.high, - ) - a1 = get_mean_action(alg1, obs) - a2 = get_mean_action(alg2, obs) - print("Checking computed actions", alg1, obs, a1, a2) - if abs(a1 - a2) > 0.1: - raise AssertionError( - "algo={} [a1={} a2={}]".format(algo_name, a1, a2) - ) - # Stop algo 1. - alg1.stop() - - if run_restored_algorithm: - # Check that algo 2 can still run. - print("Starting second run on Algo 2...") - alg2.train() - alg2.stop() - - class TestCheckpointRestorePG(unittest.TestCase): @classmethod def setUpClass(cls): @@ -213,10 +101,14 @@ def tearDownClass(cls): def test_a3c_checkpoint_restore(self): # TODO(Kourosh) A3C cannot run a restored algorithm for some reason. - ckpt_restore_test("A3C", run_restored_algorithm=False) + test_ckpt_restore( + algorithms_and_configs["A3C"], "CartPole-v1", run_restored_algorithm=False + ) def test_ppo_checkpoint_restore(self): - ckpt_restore_test("PPO", object_store=True) + test_ckpt_restore( + algorithms_and_configs["PPO"], "CartPole-v1", object_store=True + ) class TestCheckpointRestoreOffPolicy(unittest.TestCase): @@ -229,19 +121,30 @@ def tearDownClass(cls): ray.shutdown() def test_apex_ddpg_checkpoint_restore(self): - ckpt_restore_test("APEX_DDPG") + test_ckpt_restore(algorithms_and_configs["APEX_DDPG"], "Pendulum-v1") def test_ddpg_checkpoint_restore(self): - ckpt_restore_test("DDPG", replay_buffer=True) + test_ckpt_restore( + algorithms_and_configs["DDPG"], "Pendulum-v1", replay_buffer=True + ) def test_dqn_checkpoint_restore(self): - ckpt_restore_test("DQN", object_store=True, replay_buffer=True) + test_ckpt_restore( + algorithms_and_configs["DQN"], + "CartPole-v1", + object_store=True, + replay_buffer=True, + ) def test_sac_checkpoint_restore(self): - ckpt_restore_test("SAC", replay_buffer=True) + test_ckpt_restore( + algorithms_and_configs["SAC"], "Pendulum-v1", replay_buffer=True + ) def test_simpleq_checkpoint_restore(self): - ckpt_restore_test("SimpleQ", replay_buffer=True) + test_ckpt_restore( + algorithms_and_configs["SimpleQ"], "CartPole-v1", replay_buffer=True + ) class TestCheckpointRestoreEvolutionAlgos(unittest.TestCase): @@ -254,10 +157,10 @@ def tearDownClass(cls): ray.shutdown() def test_ars_checkpoint_restore(self): - ckpt_restore_test("ARS") + test_ckpt_restore(algorithms_and_configs["ARS"], "CartPole-v1") def test_es_checkpoint_restore(self): - ckpt_restore_test("ES") + test_ckpt_restore(algorithms_and_configs["ES"], "CartPole-v1") if __name__ == "__main__": diff --git a/rllib/tests/test_supported_spaces.py b/rllib/tests/test_supported_spaces.py index 7d7a975bd248..00d290a7677c 100644 --- a/rllib/tests/test_supported_spaces.py +++ b/rllib/tests/test_supported_spaces.py @@ -1,10 +1,6 @@ import logging -import time import unittest -import numpy as np -from gymnasium.spaces import Box, Dict, Discrete, Tuple, MultiDiscrete, MultiBinary - import ray from ray.rllib.algorithms.a3c import A3CConfig from ray.rllib.algorithms.appo import APPOConfig @@ -15,176 +11,10 @@ from ray.rllib.algorithms.impala import ImpalaConfig from ray.rllib.algorithms.ppo import PPOConfig from ray.rllib.algorithms.sac import SACConfig -from ray.rllib.examples.env.random_env import RandomEnv -from ray.rllib.models.tf.complex_input_net import ComplexInputNetwork as ComplexNet -from ray.rllib.models.tf.fcnet import FullyConnectedNetwork as FCNet -from ray.rllib.models.tf.visionnet import VisionNetwork as VisionNet -from ray.rllib.models.torch.complex_input_net import ( - ComplexInputNetwork as TorchComplexNet, -) -from ray.rllib.models.torch.fcnet import FullyConnectedNetwork as TorchFCNet -from ray.rllib.models.torch.visionnet import VisionNetwork as TorchVisionNet -from ray.rllib.utils.error import UnsupportedSpaceException -from ray.rllib.utils.test_utils import framework_iterator +from ray.rllib.utils.test_utils import check_supported_spaces -logger = logging.getLogger(__name__) -ACTION_SPACES_TO_TEST = { - # Test discrete twice here until we support multi_binary action spaces - "discrete": Discrete(5), - "continuous": Box(-1.0, 1.0, (5,), dtype=np.float32), - "int_actions": Box(0, 3, (2, 3), dtype=np.int32), - "multidiscrete": MultiDiscrete([1, 2, 3, 4]), - "tuple": Tuple([Discrete(2), Discrete(3), Box(-1.0, 1.0, (5,), dtype=np.float32)]), - "dict": Dict( - { - "action_choice": Discrete(3), - "parameters": Box(-1.0, 1.0, (1,), dtype=np.float32), - "yet_another_nested_dict": Dict({"a": Tuple([Discrete(2), Discrete(3)])}), - } - ), -} - -OBSERVATION_SPACES_TO_TEST = { - "multi_binary": MultiBinary([3, 10, 10]), - "discrete": Discrete(5), - "continuous": Box(-1.0, 1.0, (5,), dtype=np.float32), - "vector2d": Box(-1.0, 1.0, (5, 5), dtype=np.float32), - "image": Box(-1.0, 1.0, (84, 84, 1), dtype=np.float32), - "vizdoomgym": Box(-1.0, 1.0, (240, 320, 3), dtype=np.float32), - "tuple": Tuple([Discrete(10), Box(-1.0, 1.0, (5,), dtype=np.float32)]), - "dict": Dict( - { - "task": Discrete(10), - "position": Box(-1.0, 1.0, (5,), dtype=np.float32), - } - ), -} - -# TODO(Artur): Add back tf2 once we CNNs there -RLMODULE_SUPPORTED_FRAMEWORKS = {"torch"} - -# The action spaces that we test RLModules with -RLMODULE_SUPPORTED_ACTION_SPACES = ["discrete", "continuous"] - -# The observation spaces that we test RLModules with -RLMODULE_SUPPORTED_OBSERVATION_SPACES = [ - "multi_binary", - "discrete", - "continuous", - "image", - "vizdoomgym", - "tuple", - "dict", -] - -DEFAULT_OBSERVATION_SPACE = DEFAULT_ACTION_SPACE = "discrete" - - -def check_support(alg, config, train=True, check_bounds=False, tf2=False): - config["log_level"] = "ERROR" - config["env"] = RandomEnv - - def _do_check(alg, config, a_name, o_name): - # We need to copy here so that this validation does not affect the actual - # validation method call further down the line. - config_copy = config.copy() - config_copy.validate() - # If RLModules are enabled, we need to skip a few tests for now: - if config_copy._enable_rl_module_api: - # Skip PPO cases in which RLModules don't support the given spaces yet. - if o_name not in RLMODULE_SUPPORTED_OBSERVATION_SPACES: - logger.warning( - "Skipping PPO test with RLModules for obs space {}".format(o_name) - ) - return - if a_name not in RLMODULE_SUPPORTED_ACTION_SPACES: - logger.warning( - "Skipping PPO test with RLModules for action space {}".format( - a_name - ) - ) - return - - fw = config["framework"] - action_space = ACTION_SPACES_TO_TEST[a_name] - obs_space = OBSERVATION_SPACES_TO_TEST[o_name] - print( - "=== Testing {} (fw={}) action_space={} obs_space={} ===".format( - alg, fw, action_space, obs_space - ) - ) - t0 = time.time() - config.update_from_dict( - dict( - env_config=dict( - action_space=action_space, - observation_space=obs_space, - reward_space=Box(1.0, 1.0, shape=(), dtype=np.float32), - p_terminated=1.0, - check_action_bounds=check_bounds, - ) - ) - ) - stat = "ok" - - try: - algo = config.build() - except ray.exceptions.RayActorError as e: - if len(e.args) >= 2 and isinstance(e.args[2], UnsupportedSpaceException): - stat = "unsupported" - elif isinstance(e.args[0].args[2], UnsupportedSpaceException): - stat = "unsupported" - else: - raise - except UnsupportedSpaceException: - stat = "unsupported" - else: - if alg not in ["DDPG", "ES", "ARS", "SAC", "PPO"]: - # 2D (image) input: Expect VisionNet. - if o_name in ["atari", "image"]: - if fw == "torch": - assert isinstance(algo.get_policy().model, TorchVisionNet) - else: - assert isinstance(algo.get_policy().model, VisionNet) - # 1D input: Expect FCNet. - elif o_name == "continuous": - if fw == "torch": - assert isinstance(algo.get_policy().model, TorchFCNet) - else: - assert isinstance(algo.get_policy().model, FCNet) - # Could be either one: ComplexNet (if disabled Preprocessor) - # or FCNet (w/ Preprocessor). - elif o_name == "vector2d": - if fw == "torch": - assert isinstance( - algo.get_policy().model, (TorchComplexNet, TorchFCNet) - ) - else: - assert isinstance(algo.get_policy().model, (ComplexNet, FCNet)) - if train: - algo.train() - algo.stop() - print("Test: {}, ran in {}s".format(stat, time.time() - t0)) - - frameworks = {"tf", "torch"} - if tf2: - frameworks.add("tf2") - - if config._enable_rl_module_api: - # Only test the frameworks that are supported by RLModules. - frameworks = frameworks.intersection(RLMODULE_SUPPORTED_FRAMEWORKS) - - for _ in framework_iterator(config, frameworks=frameworks): - # Test all action spaces first. - for a_name in ACTION_SPACES_TO_TEST.keys(): - o_name = DEFAULT_OBSERVATION_SPACE - _do_check(alg, config, a_name, o_name) - - # Now test all observation spaces. - for o_name in OBSERVATION_SPACES_TO_TEST.keys(): - a_name = DEFAULT_ACTION_SPACE - _do_check(alg, config, a_name, o_name) +logger = logging.getLogger(__name__) class TestSupportedSpacesIMPALA(unittest.TestCase): @@ -197,7 +27,7 @@ def tearDownClass(cls) -> None: ray.shutdown() def test_impala(self): - check_support( + check_supported_spaces( "IMPALA", ( ImpalaConfig() @@ -222,9 +52,8 @@ def test_appo(self): .resources(num_gpus=0) .training(vtrace=False, model={"fcnet_hiddens": [10]}) ) - check_support("APPO", config, train=False) config.training(vtrace=True) - check_support("APPO", config) + check_supported_spaces("APPO", config) class TestSupportedSpacesA3C(unittest.TestCase): @@ -245,7 +74,7 @@ def test_a3c(self): model={"fcnet_hiddens": [10]}, ) ) - check_support("A3C", config, check_bounds=True) + check_supported_spaces("A3C", config, check_bounds=True) class TestSupportedSpacesPPO(unittest.TestCase): @@ -270,7 +99,7 @@ def test_ppo(self): }, ) ) - check_support("PPO", config, check_bounds=True, tf2=True) + check_supported_spaces("PPO", config, check_bounds=True) class TestSupportedSpacesPPONoPreprocessorGPU(unittest.TestCase): @@ -311,7 +140,35 @@ def test_ppo_no_preprocessors_gpu(self): _enable_learner_api=False ) - check_support("PPO", config, check_bounds=True, tf2=True) + check_supported_spaces( + "PPO", + config, + check_bounds=True, + frameworks=["torch", "tf"], + use_gpu=True, + ) + + +class TestSupportedSpacesDQN(unittest.TestCase): + @classmethod + def setUpClass(cls) -> None: + ray.init() + + @classmethod + def tearDownClass(cls) -> None: + ray.shutdown() + + def test_dqn(self): + config = ( + DQNConfig() + .reporting(min_sample_timesteps_per_iteration=1) + .training( + replay_buffer_config={ + "capacity": 1000, + } + ) + ) + check_supported_spaces("DQN", config, frameworks=["tf2", "torch", "tf"]) class TestSupportedSpacesOffPolicy(unittest.TestCase): @@ -324,7 +181,7 @@ def tearDownClass(cls) -> None: ray.shutdown() def test_ddpg(self): - check_support( + check_supported_spaces( "DDPG", DDPGConfig() .exploration(exploration_config={"ou_base_scale": 100.0}) @@ -336,20 +193,8 @@ def test_ddpg(self): check_bounds=True, ) - def test_dqn(self): - config = ( - DQNConfig() - .reporting(min_sample_timesteps_per_iteration=1) - .training( - replay_buffer_config={ - "capacity": 1000, - } - ) - ) - check_support("DQN", config, tf2=True) - def test_sac(self): - check_support( + check_supported_spaces( "SAC", SACConfig().training(replay_buffer_config={"capacity": 1000}), check_bounds=True, @@ -366,19 +211,23 @@ def tearDownClass(cls) -> None: ray.shutdown() def test_ars(self): - check_support( + check_supported_spaces( "ARS", ARSConfig() .rollouts(num_rollout_workers=1) .training(noise_size=1500000, num_rollouts=1, rollouts_used=1), + # framework=None corresponds to numpy since ARS uses a numpy policy + frameworks=[None], ) def test_es(self): - check_support( + check_supported_spaces( "ES", ESConfig() .rollouts(num_rollout_workers=1) .training(noise_size=1500000, episodes_per_batch=1, train_batch_size=1), + # framework=None corresponds to numpy since ES uses a numpy policy + frameworks=[None], ) diff --git a/rllib/utils/test_utils.py b/rllib/utils/test_utils.py index c60cbd5ae633..6d1db9114104 100644 --- a/rllib/utils/test_utils.py +++ b/rllib/utils/test_utils.py @@ -1,7 +1,9 @@ from collections import Counter import copy import gymnasium as gym -from gymnasium.spaces import Box +from gymnasium.spaces import Box, Discrete, MultiDiscrete, MultiBinary +from gymnasium.spaces import Dict as GymDict +from gymnasium.spaces import Tuple as GymTuple import logging import numpy as np import os @@ -34,6 +36,9 @@ ) from ray.rllib.utils.nested_dict import NestedDict from ray.rllib.utils.typing import PartialAlgorithmConfigDict, ResultDict +from ray.rllib.utils.error import UnsupportedSpaceException + + from ray.tune import CLIReporter, run_experiments @@ -1261,3 +1266,323 @@ def check(self, rtol=None): # same input and all nets have the same (dummy) weight values. for v in self.output_values.values(): check(v, self.output_values[main_key], rtol=rtol or 0.002) + + +def _get_mean_action_from_algorithm(alg: "Algorithm", obs: np.ndarray) -> np.ndarray: + """Returns the mean action computed by the given algorithm. + + Note: This makes calls to `Algorithm.compute_single_action` + + Args: + alg: The constructed algorithm to run inference on. + obs: The observation to compute the action for. + + Returns: + The mean action computed by the algorithm over 5000 samples. + + """ + out = [] + for _ in range(5000): + out.append(float(alg.compute_single_action(obs))) + return np.mean(out) + + +def test_ckpt_restore( + config: "AlgorithmConfig", + env_name: str, + tf2=False, + object_store=False, + replay_buffer=False, + run_restored_algorithm=True, +): + """Test that after an algorithm is trained, its checkpoint can be restored. + + Check the replay buffers of the algorithm to see if they have identical data. + Check the optimizer weights of the policy on the algorithm to see if they're + identical. + + Args: + config: The config of the algorithm to be trained. + env_name: The name of the gymansium environment to be trained on. + tf2: Whether to test the algorithm with the tf2 framework or not. + object_store: Whether to test checkpointing with objects from the object store. + replay_buffer: Whether to test checkpointing with replay buffers. + run_restored_algorithm: Whether to run the restored algorithm after restoring. + + """ + # config = algorithms_and_configs[algo_name].to_dict() + # If required, store replay buffer data in checkpoints as well. + if replay_buffer: + config["store_buffer_in_checkpoints"] = True + + frameworks = (["tf2"] if tf2 else []) + ["torch", "tf"] + for fw in framework_iterator(config, frameworks=frameworks): + for use_object_store in [False, True] if object_store else [False]: + print("use_object_store={}".format(use_object_store)) + env = gym.make(env_name) + alg1 = config.environment(env_name).framework(fw).build() + alg2 = config.environment(env_name).build() + + policy1 = alg1.get_policy() + + res = alg1.train() + print("current status: " + str(res)) + + # Check optimizer state as well. + optim_state = policy1.get_state().get("_optimizer_variables") + + if use_object_store: + checkpoint = alg1.save_to_object() + else: + checkpoint = alg1.save() + + # Test if we can restore multiple times (at least twice, assuming failure + # would mainly stem from improperly reused variables) + for num_restores in range(2): + # Sync the models + if use_object_store: + alg2.restore_from_object(checkpoint) + else: + alg2.restore(checkpoint) + + # Compare optimizer state with re-loaded one. + if optim_state: + s2 = alg2.get_policy().get_state().get("_optimizer_variables") + # Tf -> Compare states 1:1. + if fw in ["tf2", "tf"]: + check(s2, optim_state) + # For torch, optimizers have state_dicts with keys=params, + # which are different for the two models (ignore these + # different keys, but compare all values nevertheless). + else: + for i, s2_ in enumerate(s2): + check( + list(s2_["state"].values()), + list(optim_state[i]["state"].values()), + ) + + # Compare buffer content with restored one. + if replay_buffer: + data = alg1.local_replay_buffer.replay_buffers[ + "default_policy" + ]._storage[42 : 42 + 42] + new_data = alg2.local_replay_buffer.replay_buffers[ + "default_policy" + ]._storage[42 : 42 + 42] + check(data, new_data) + + for _ in range(1): + obs = env.observation_space.sample() + a1 = _get_mean_action_from_algorithm(alg1, obs) + a2 = _get_mean_action_from_algorithm(alg2, obs) + print("Checking computed actions", alg1, obs, a1, a2) + if abs(a1 - a2) > 0.1: + raise AssertionError( + "algo={} [a1={} a2={}]".format(str(alg1.__class__), a1, a2) + ) + # Stop algo 1. + alg1.stop() + + if run_restored_algorithm: + # Check that algo 2 can still run. + print("Starting second run on Algo 2...") + alg2.train() + alg2.stop() + + +def check_supported_spaces( + alg: str, + config: "AlgorithmConfig", + train: bool = True, + check_bounds: bool = False, + frameworks: List = None, + use_gpu: bool = False, +): + """Checks whether the given algorithm supports different action and obs spaces. + + Performs the checks by constructing an rllib algorithm from the config and + checking to see that the model inside the policy is the correct one given + the action and obs spaces. For example if the action space is discrete and + the obs space is an image, then the model should be a vision network with + a categorical action distribution. + + Args: + alg: The name of the algorithm to test. + config: The config to use for the algorithm. + train: Whether to train the algorithm for a few iterations. + check_bounds: Whether to check the bounds of the action space. + frameworks: The frameworks to test the algorithm with. + use_gpu: Whether to check support for training on a gpu. + + + """ + # do these imports here because otherwise we have circular imports + from ray.rllib.examples.env.random_env import RandomEnv + from ray.rllib.models.tf.complex_input_net import ComplexInputNetwork as ComplexNet + from ray.rllib.models.tf.fcnet import FullyConnectedNetwork as FCNet + from ray.rllib.models.tf.visionnet import VisionNetwork as VisionNet + from ray.rllib.models.torch.complex_input_net import ( + ComplexInputNetwork as TorchComplexNet, + ) + from ray.rllib.models.torch.fcnet import FullyConnectedNetwork as TorchFCNet + from ray.rllib.models.torch.visionnet import VisionNetwork as TorchVisionNet + + action_spaces_to_test = { + # Test discrete twice here until we support multi_binary action spaces + "discrete": Discrete(5), + "continuous": Box(-1.0, 1.0, (5,), dtype=np.float32), + "int_actions": Box(0, 3, (2, 3), dtype=np.int32), + "multidiscrete": MultiDiscrete([1, 2, 3, 4]), + "tuple": GymTuple( + [Discrete(2), Discrete(3), Box(-1.0, 1.0, (5,), dtype=np.float32)] + ), + "dict": GymDict( + { + "action_choice": Discrete(3), + "parameters": Box(-1.0, 1.0, (1,), dtype=np.float32), + "yet_another_nested_dict": GymDict( + {"a": GymTuple([Discrete(2), Discrete(3)])} + ), + } + ), + } + + observation_spaces_to_test = { + "multi_binary": MultiBinary([3, 10, 10]), + "discrete": Discrete(5), + "continuous": Box(-1.0, 1.0, (5,), dtype=np.float32), + "vector2d": Box(-1.0, 1.0, (5, 5), dtype=np.float32), + "image": Box(-1.0, 1.0, (84, 84, 1), dtype=np.float32), + "vizdoomgym": Box(-1.0, 1.0, (240, 320, 3), dtype=np.float32), + "tuple": GymTuple([Discrete(10), Box(-1.0, 1.0, (5,), dtype=np.float32)]), + "dict": GymDict( + { + "task": Discrete(10), + "position": Box(-1.0, 1.0, (5,), dtype=np.float32), + } + ), + } + + # The observation spaces that we test RLModules with + rlmodule_supported_observation_spaces = [ + "multi_binary", + "discrete", + "continuous", + "image", + "vizdoomgym", + "tuple", + "dict", + ] + + # TODO(Artur): Add back tf2 once we CNNs there + rlmodule_supported_frameworks = {"torch"} + + # The action spaces that we test RLModules with + rlmodule_supported_action_spaces = ["discrete", "continuous"] + + default_observation_space = default_action_space = "discrete" + + config["log_level"] = "ERROR" + config["env"] = RandomEnv + + def _do_check(alg, config, a_name, o_name): + + # We need to copy here so that this validation does not affect the actual + # validation method call further down the line. + config_copy = config.copy() + config_copy.validate() + # If RLModules are enabled, we need to skip a few tests for now: + if config_copy._enable_rl_module_api: + # Skip PPO cases in which RLModules don't support the given spaces yet. + if o_name not in rlmodule_supported_observation_spaces: + logger.warning( + "Skipping PPO test with RLModules for obs space {}".format(o_name) + ) + return + if a_name not in rlmodule_supported_action_spaces: + logger.warning( + "Skipping PPO test with RLModules for action space {}".format( + a_name + ) + ) + return + + fw = config["framework"] + action_space = action_spaces_to_test[a_name] + obs_space = observation_spaces_to_test[o_name] + print( + "=== Testing {} (fw={}) action_space={} obs_space={} ===".format( + alg, fw, action_space, obs_space + ) + ) + t0 = time.time() + config.update_from_dict( + dict( + env_config=dict( + action_space=action_space, + observation_space=obs_space, + reward_space=Box(1.0, 1.0, shape=(), dtype=np.float32), + p_terminated=1.0, + check_action_bounds=check_bounds, + ) + ) + ) + stat = "ok" + + try: + algo = config.build() + except ray.exceptions.RayActorError as e: + if len(e.args) >= 2 and isinstance(e.args[2], UnsupportedSpaceException): + stat = "unsupported" + elif isinstance(e.args[0].args[2], UnsupportedSpaceException): + stat = "unsupported" + else: + raise + except UnsupportedSpaceException: + stat = "unsupported" + else: + if alg not in ["DDPG", "ES", "ARS", "SAC", "PPO"]: + # 2D (image) input: Expect VisionNet. + if o_name in ["atari", "image"]: + if fw == "torch": + assert isinstance(algo.get_policy().model, TorchVisionNet) + else: + assert isinstance(algo.get_policy().model, VisionNet) + # 1D input: Expect FCNet. + elif o_name == "continuous": + if fw == "torch": + assert isinstance(algo.get_policy().model, TorchFCNet) + else: + assert isinstance(algo.get_policy().model, FCNet) + # Could be either one: ComplexNet (if disabled Preprocessor) + # or FCNet (w/ Preprocessor). + elif o_name == "vector2d": + if fw == "torch": + assert isinstance( + algo.get_policy().model, (TorchComplexNet, TorchFCNet) + ) + else: + assert isinstance(algo.get_policy().model, (ComplexNet, FCNet)) + if train: + algo.train() + algo.stop() + print("Test: {}, ran in {}s".format(stat, time.time() - t0)) + + if config._enable_rl_module_api: + # Only test the frameworks that are supported by RLModules. + frameworks = frameworks.intersection(rlmodule_supported_frameworks) + + if not frameworks: + frameworks = ["tf2", "torch", "tf"] + _do_check_remote = ray.remote(_do_check) + _do_check_remote = _do_check_remote.options(num_gpus=1 if use_gpu else 0) + for _ in framework_iterator(config, frameworks=frameworks): + # Test all action spaces first. + for a_name in action_spaces_to_test.keys(): + o_name = default_observation_space + ray.get(_do_check_remote.remote(alg, config, a_name, o_name)) + + # Now test all observation spaces. + for o_name in observation_spaces_to_test.keys(): + a_name = default_action_space + ray.get(_do_check_remote.remote(alg, config, a_name, o_name)) From 13de01aa7dd1b4ddafb59ea5179e140def1af2e2 Mon Sep 17 00:00:00 2001 From: Scott Lee Date: Tue, 9 May 2023 16:44:24 -0700 Subject: [PATCH 305/424] [Data] Allow fusing `MapOperator` -> `Repartition` operators (#35178) As a followup to https://github.com/ray-project/ray/pull/34847, allow fusing `MapOperator` -> `Repartition` operators for the shuffle repartition case (we do not support fusing for split repartition, which only uses `ShuffleTaskSpec.reduce` and thus cannot call the upstream map function passed to `ShuffleTaskSpec.map`). Signed-off-by: Scott Lee --- .../logical/rules/operator_fusion.py | 26 +++++++++++++++--- .../ray/data/_internal/planner/repartition.py | 18 +++++++++++-- .../data/tests/test_execution_optimizer.py | 27 ++++++++++--------- 3 files changed, 53 insertions(+), 18 deletions(-) diff --git a/python/ray/data/_internal/logical/rules/operator_fusion.py b/python/ray/data/_internal/logical/rules/operator_fusion.py index eb5d0b2e0820..41b61538bb5a 100644 --- a/python/ray/data/_internal/logical/rules/operator_fusion.py +++ b/python/ray/data/_internal/logical/rules/operator_fusion.py @@ -1,4 +1,5 @@ from typing import Iterator, List, Tuple +from ray.data._internal.logical.operators.all_to_all_operator import Repartition from ray.data._internal.execution.operators.map_operator import MapOperator from ray.data._internal.logical.operators.all_to_all_operator import ( AbstractAllToAll, @@ -107,8 +108,8 @@ def _can_fuse(self, down_op: PhysicalOperator, up_op: PhysicalOperator) -> bool: # We currently only support fusing for the following cases: # - MapOperator -> MapOperator - # - MapOperator -> AllToAllOperator (only RandomShuffle - # LogicalOperator is currently supported) + # - MapOperator -> AllToAllOperator + # (only RandomShuffle and Repartition LogicalOperators are currently supported) if not isinstance(down_op, (MapOperator, AllToAllOperator)) or not isinstance( up_op, MapOperator ): @@ -125,11 +126,17 @@ def _can_fuse(self, down_op: PhysicalOperator, up_op: PhysicalOperator) -> bool: # We currently only support fusing for the following cases: # - AbstractMap -> AbstractMap # - AbstractMap -> RandomShuffle + # - AbstractMap -> Repartition (shuffle=True) if not isinstance( - down_logical_op, (AbstractMap, RandomShuffle) + down_logical_op, (AbstractMap, RandomShuffle, Repartition) ) or not isinstance(up_logical_op, AbstractMap): return False + # Do not fuse Repartition operator if shuffle is disabled + # (i.e. using split shuffle). + if isinstance(down_logical_op, Repartition) and not down_logical_op._shuffle: + return False + # Allow fusing tasks->actors if the resources are compatible (read->map), but # not the other way around. The latter (downstream op) will be used as the # compute if fused. @@ -306,7 +313,18 @@ def fused_all_to_all_transform_fn( # Bottom out at the source logical op (e.g. Read()). input_op = up_logical_op - logical_op = RandomShuffle(input_op, name=name, ray_remote_args=ray_remote_args) + if isinstance(down_logical_op, RandomShuffle): + logical_op = RandomShuffle( + input_op, + name=name, + ray_remote_args=ray_remote_args, + ) + elif isinstance(down_logical_op, Repartition): + logical_op = Repartition( + input_op, + num_outputs=down_logical_op._num_outputs, + shuffle=down_logical_op._shuffle, + ) self._op_map[op] = logical_op # Return the fused physical operator. return op diff --git a/python/ray/data/_internal/planner/repartition.py b/python/ray/data/_internal/planner/repartition.py index 544b0f83b06c..01b3a0226375 100644 --- a/python/ray/data/_internal/planner/repartition.py +++ b/python/ray/data/_internal/planner/repartition.py @@ -1,4 +1,4 @@ -from typing import List, Tuple +from typing import List, Optional, Tuple, TYPE_CHECKING from ray.data._internal.execution.interfaces import ( AllToAllTransformFn, @@ -19,6 +19,9 @@ from ray.data._internal.stats import StatsDict from ray.data.context import DataContext +if TYPE_CHECKING: + from python.ray.data._internal.execution.interfaces import MapTransformFn + def generate_repartition_fn( num_outputs: int, @@ -30,7 +33,18 @@ def shuffle_repartition_fn( refs: List[RefBundle], ctx: TaskContext, ) -> Tuple[List[RefBundle], StatsDict]: - shuffle_spec = ShuffleTaskSpec(random_shuffle=False) + # If map_transform_fn is specified (e.g. from fusing + # MapOperator->AllToAllOperator), we pass a map function which + # is applied to each block before shuffling. + map_transform_fn: Optional["MapTransformFn"] = ctx.upstream_map_transform_fn + upstream_map_fn = None + if map_transform_fn: + upstream_map_fn = lambda block: map_transform_fn(block, ctx) # noqa: E731 + + shuffle_spec = ShuffleTaskSpec( + random_shuffle=False, + upstream_map_fn=upstream_map_fn, + ) if DataContext.get_current().use_push_based_shuffle: scheduler = PushBasedShuffleTaskScheduler(shuffle_spec) diff --git a/python/ray/data/tests/test_execution_optimizer.py b/python/ray/data/tests/test_execution_optimizer.py index b2b4450d70a5..2a140fdbe0cd 100644 --- a/python/ray/data/tests/test_execution_optimizer.py +++ b/python/ray/data/tests/test_execution_optimizer.py @@ -288,10 +288,11 @@ def test_repartition_e2e( def _check_repartition_usage_and_stats(ds): _check_usage_record(["ReadRange", "Repartition"]) ds_stats: DatastreamStats = ds._plan.stats() - assert ds_stats.base_name == "Repartition" if shuffle: - assert "RepartitionMap" in ds_stats.stages + assert ds_stats.base_name == "DoRead->Repartition" + assert "DoRead->RepartitionMap" in ds_stats.stages else: + assert ds_stats.base_name == "Repartition" assert "RepartitionSplit" in ds_stats.stages assert "RepartitionReduce" in ds_stats.stages @@ -630,7 +631,7 @@ def fn(batch): def test_read_map_batches_operator_fusion_with_random_shuffle_operator( - ray_start_regular_shared, enable_optimizer + ray_start_regular_shared, enable_optimizer, use_push_based_shuffle ): # Note: we currently only support fusing MapOperator->AllToAllOperator. def fn(batch): @@ -676,24 +677,26 @@ def fn(batch): _check_usage_record(["ReadRange", "RandomShuffle", "MapBatches"]) +@pytest.mark.parametrize("shuffle", (True, False)) def test_read_map_batches_operator_fusion_with_repartition_operator( - ray_start_regular_shared, enable_optimizer + ray_start_regular_shared, enable_optimizer, shuffle, use_push_based_shuffle ): - # Note: We currently do not fuse MapBatches->Repartition. - # This test is to ensure that we don't accidentally fuse them, until - # we implement it later. def fn(batch): return {"id": [x + 1 for x in batch["id"]]} n = 10 ds = ray.data.range(n) ds = ds.map_batches(fn, batch_size=None) - ds = ds.repartition(2) + ds = ds.repartition(2, shuffle=shuffle) assert set(extract_values("id", ds.take_all())) == set(range(1, n + 1)) - # TODO(Scott): update the below assertions after we support fusion. - assert "DoRead->MapBatches->Repartition" not in ds.stats() - assert "DoRead->MapBatches" in ds.stats() - assert "Repartition" in ds.stats() + + # Operator fusion is only supported for shuffle repartition. + if shuffle: + assert "DoRead->MapBatches->Repartition" in ds.stats() + else: + assert "DoRead->MapBatches->Repartition" not in ds.stats() + assert "DoRead->MapBatches" in ds.stats() + assert "Repartition" in ds.stats() _check_usage_record(["ReadRange", "MapBatches", "Repartition"]) From 6d6b8813f24604d2ad0ab5f7abf3f543bb2ebe55 Mon Sep 17 00:00:00 2001 From: Yi Cheng <74173148+iycheng@users.noreply.github.com> Date: Tue, 9 May 2023 17:02:26 -0700 Subject: [PATCH 306/424] [core] Add object owner and copy metrics to node stats (#35119) This PR adds the object owner and copy metrics to `GetNodeStats` RPC endpoint. The inlined small objects are not counted as one copy because it's not stored in object store and when it's used, it'll be copied inline, so no need to count it. But it's still counted as 1 as ownership for correctness because it's actually owned by worker. The local copies are retrieved from local object manager directly and owner counts needs the caller to aggregate the metrics from each core worker. --- python/ray/tests/test_metrics.py | 125 +++++++++++++++++++++++++ src/ray/core_worker/core_worker.cc | 1 + src/ray/core_worker/reference_count.cc | 9 ++ src/ray/core_worker/reference_count.h | 5 + src/ray/protobuf/common.proto | 2 + src/ray/protobuf/node_manager.proto | 2 + src/ray/raylet/local_object_manager.cc | 3 +- src/ray/raylet/local_object_manager.h | 6 +- src/ray/raylet/node_manager.cc | 2 +- 9 files changed, 150 insertions(+), 5 deletions(-) diff --git a/python/ray/tests/test_metrics.py b/python/ray/tests/test_metrics.py index 4e629da92441..a9357461d0aa 100644 --- a/python/ray/tests/test_metrics.py +++ b/python/ray/tests/test_metrics.py @@ -100,6 +100,131 @@ def verify(): wait_for_condition(verify) +def get_owner_info(node_ids): + node_addrs = { + n["NodeID"]: (n["NodeManagerAddress"], n["NodeManagerPort"]) + for n in ray.nodes() + } + # Force a global gc to clean up the object store. + ray._private.internal_api.global_gc() + owner_stats = {n: 0 for n in node_ids} + primary_copy_stats = {n: 0 for n in node_ids} + + for node_id in node_ids: + node_stats = ray._private.internal_api.node_stats( + node_addrs[node_id][0], node_addrs[node_id][1], False + ) + owner_stats[node_id] = sum( + [stats.num_owned_objects for stats in node_stats.core_workers_stats] + ) + primary_copy_stats[ + node_id + ] = node_stats.store_stats.num_object_store_primary_copies + + print(owner_stats) + print(node_ids) + owner_stats = [owner_stats.get(node_id, 0) for node_id in node_ids] + primary_copy_stats = [primary_copy_stats.get(node_id, 0) for node_id in node_ids] + print("owner_stats", owner_stats) + print("primary_copy_stats", primary_copy_stats) + + return owner_stats, primary_copy_stats + + +def test_node_object_metrics(ray_start_cluster, monkeypatch): + NUM_NODES = 3 + cluster = ray_start_cluster + for i in range(NUM_NODES): + cluster.add_node(True, resources={f"node_{i}": 1}) + if i == 0: + ray.init(address=cluster.address) + node_ids = [] + + for i in range(NUM_NODES): + + @ray.remote(resources={f"node_{i}": 1}) + def get_node_id(): + return ray.get_runtime_context().get_node_id() + + node_ids.append(ray.get(get_node_id.remote())) + + # Object store stats + # x is owned by node_0 + # x is stored at node_0 + x = ray.put([1]) # noqa: F841 + wait_for_condition(lambda: get_owner_info(node_ids) == ([1, 0, 0], [1, 0, 0])) + + # Test nested with put + @ray.remote(resources={"node_1": 1}) + def big_obj(): + # b is owned by node_1 + # b is stored at node_1 + b = ray.put([1] * 1024 * 1024 * 10) + return b + + # Object store stats + # big_obj is owned by node_0 + # big_obj is stored in memory (no primary copy) + big_obj_ref = big_obj.remote() # noqa: F841 + wait_for_condition(lambda: get_owner_info(node_ids) == ([2, 1, 0], [1, 1, 0])) + + # Test nested with task (small output) + @ray.remote(resources={"node_1": 1}) + def nest_task(s): + @ray.remote(resources={"node_2": 1}) + def task(): + return [1] * s + + # t is owned by node_1 + # if s is small, + # then it's is stored in memory of node_1 (no primary copy) + # else it's stored in object store of node_1 + t = task.remote() + return t + + # nest_ref is owned by node_0 + # nest_ref is stored in memory (no primary copy) + nest_ref = nest_task.remote(1) # noqa: F841 + wait_for_condition(lambda: get_owner_info(node_ids) == ([3, 2, 0], [1, 1, 0])) + + big_nest = nest_task.remote(1024 * 1024 * 10) # noqa: F841 + + wait_for_condition(lambda: get_owner_info(node_ids) == ([4, 3, 0], [1, 1, 1])) + + # Test with assigned owned + @ray.remote(resources={"node_2": 0.5}, num_cpus=0) + class A: + def ready(self): + return + + def gen(self): + return ray.put(10) + + # actor is owned by node_0 + # actor is not an object, so no object store copies + actor = A.remote() # noqa: F841 + ray.get(actor.ready.remote()) + # o is owned by actor (node_2) + # o is stored in object store of node_0 + o = ray.put(1, _owner=actor) # noqa: F841 + wait_for_condition(lambda: get_owner_info(node_ids) == ([5, 3, 1], [2, 1, 1])) + + # Test with detached owned + # detached actor is owned by GCS. So it's not counted in the owner stats + detached_actor = A.options(lifetime="detached", name="A").remote() + ray.get(detached_actor.ready.remote()) + for i in range(3): + assert get_owner_info(node_ids) == ([5, 3, 1], [2, 1, 1]) + import time + + time.sleep(1) + # gen_obj is owned by node_0 + # the inner object is owned by A (node_2) + # the inner object is stored in object store of node_2 + gen_obj = detached_actor.gen.remote() # noqa: F841 + wait_for_condition(lambda: get_owner_info(node_ids) == ([6, 3, 2], [2, 1, 2])) + + def test_multi_node_metrics_export_port_discovery(ray_start_cluster): NUM_NODES = 3 cluster = ray_start_cluster diff --git a/src/ray/core_worker/core_worker.cc b/src/ray/core_worker/core_worker.cc index 4c27c9bf7feb..ff03b5b85508 100644 --- a/src/ray/core_worker/core_worker.cc +++ b/src/ray/core_worker/core_worker.cc @@ -3515,6 +3515,7 @@ void CoreWorker::HandleGetCoreWorkerStats(rpc::GetCoreWorkerStatsRequest request stats->set_task_queue_length(task_queue_length_); stats->set_num_executed_tasks(num_executed_tasks_); stats->set_num_object_refs_in_scope(reference_counter_->NumObjectIDsInScope()); + stats->set_num_owned_objects(reference_counter_->NumObjectOwnedByUs()); stats->set_ip_address(rpc_address_.ip_address()); stats->set_port(rpc_address_.port()); stats->set_pid(getpid()); diff --git a/src/ray/core_worker/reference_count.cc b/src/ray/core_worker/reference_count.cc index 970c9c990c65..ba5321828207 100644 --- a/src/ray/core_worker/reference_count.cc +++ b/src/ray/core_worker/reference_count.cc @@ -251,6 +251,7 @@ bool ReferenceCounter::AddOwnedObjectInternal( if (object_id_refs_.count(object_id) != 0) { return false; } + num_objects_owned_by_us_++; RAY_LOG(DEBUG) << "Adding owned object " << object_id; // If the entry doesn't exist, we initialize the direct reference count to zero // because this corresponds to a submitted task whose return ObjectID will be created @@ -666,6 +667,9 @@ void ReferenceCounter::EraseReference(ReferenceTable::iterator it) { reconstructable_owned_objects_index_.erase(index_it); } freed_objects_.erase(it->first); + if (it->second.owned_by_us) { + num_objects_owned_by_us_--; + } object_id_refs_.erase(it); ShutdownIfNeeded(); } @@ -811,6 +815,11 @@ size_t ReferenceCounter::NumObjectIDsInScope() const { return object_id_refs_.size(); } +size_t ReferenceCounter::NumObjectOwnedByUs() const { + absl::MutexLock lock(&mutex_); + return num_objects_owned_by_us_; +} + std::unordered_set ReferenceCounter::GetAllInScopeObjectIDs() const { absl::MutexLock lock(&mutex_); std::unordered_set in_scope_object_ids; diff --git a/src/ray/core_worker/reference_count.h b/src/ray/core_worker/reference_count.h index daf79082dd9a..c16ee0392119 100644 --- a/src/ray/core_worker/reference_count.h +++ b/src/ray/core_worker/reference_count.h @@ -315,6 +315,8 @@ class ReferenceCounter : public ReferenceCounterInterface, /// Returns the total number of ObjectIDs currently in scope. size_t NumObjectIDsInScope() const LOCKS_EXCLUDED(mutex_); + size_t NumObjectOwnedByUs() const LOCKS_EXCLUDED(mutex_); + /// Returns a set of all ObjectIDs currently in scope (i.e., nonzero reference count). std::unordered_set GetAllInScopeObjectIDs() const LOCKS_EXCLUDED(mutex_); @@ -1010,6 +1012,9 @@ class ReferenceCounter : public ReferenceCounterInterface, /// due to node failure. These objects are still in scope and need to be /// recovered. std::vector objects_to_recover_ GUARDED_BY(mutex_); + + /// Keep track of objects owend by this worker. + size_t num_objects_owned_by_us_ GUARDED_BY(mutex_) = 0; }; } // namespace core diff --git a/src/ray/protobuf/common.proto b/src/ray/protobuf/common.proto index 751194bc8f0d..6ac9b1411135 100644 --- a/src/ray/protobuf/common.proto +++ b/src/ray/protobuf/common.proto @@ -763,6 +763,8 @@ message CoreWorkerStats { WorkerType worker_type = 23; // Length of the number of objects without truncation. int64 objects_total = 24; + // Number of objects owned by the worker. + int64 num_owned_objects = 25; } // Resource usage reported by the node reporter. diff --git a/src/ray/protobuf/node_manager.proto b/src/ray/protobuf/node_manager.proto index d5861747faab..16194eea2af7 100644 --- a/src/ray/protobuf/node_manager.proto +++ b/src/ray/protobuf/node_manager.proto @@ -219,6 +219,8 @@ message ObjectStoreStats { // the node has more pull requests than available object store // memory. bool object_pulls_queued = 13; + // The number of primary copies of objects in the local node. + int64 num_object_store_primary_copies = 14; } message GetNodeStatsReply { diff --git a/src/ray/raylet/local_object_manager.cc b/src/ray/raylet/local_object_manager.cc index e745adff3083..6747e5c93564 100644 --- a/src/ray/raylet/local_object_manager.cc +++ b/src/ray/raylet/local_object_manager.cc @@ -593,7 +593,7 @@ void LocalObjectManager::DeleteSpilledObjects(std::vector urls_to_d }); } -void LocalObjectManager::FillObjectSpillingStats(rpc::GetNodeStatsReply *reply) const { +void LocalObjectManager::FillObjectStoreStats(rpc::GetNodeStatsReply *reply) const { auto stats = reply->mutable_store_stats(); stats->set_spill_time_total_s(spill_time_total_s_); stats->set_spilled_bytes_total(spilled_bytes_total_); @@ -602,6 +602,7 @@ void LocalObjectManager::FillObjectSpillingStats(rpc::GetNodeStatsReply *reply) stats->set_restored_bytes_total(restored_bytes_total_); stats->set_restored_objects_total(restored_objects_total_); stats->set_object_store_bytes_primary_copy(pinned_objects_size_); + stats->set_num_object_store_primary_copies(local_objects_.size()); } void LocalObjectManager::RecordMetrics() const { diff --git a/src/ray/raylet/local_object_manager.h b/src/ray/raylet/local_object_manager.h index 72cb4db5d400..116776bd1d16 100644 --- a/src/ray/raylet/local_object_manager.h +++ b/src/ray/raylet/local_object_manager.h @@ -147,10 +147,10 @@ class LocalObjectManager { /// \return True if spilling is still in progress. False otherwise. bool IsSpillingInProgress(); - /// Populate object spilling stats. + /// Populate object store stats. /// - /// \param Output parameter. - void FillObjectSpillingStats(rpc::GetNodeStatsReply *reply) const; + /// \param reply Output parameter. + void FillObjectStoreStats(rpc::GetNodeStatsReply *reply) const; /// Record object spilling stats to metrics. void RecordMetrics() const; diff --git a/src/ray/raylet/node_manager.cc b/src/ray/raylet/node_manager.cc index 4fd965c5b940..f7249347a772 100644 --- a/src/ray/raylet/node_manager.cc +++ b/src/ray/raylet/node_manager.cc @@ -2502,7 +2502,7 @@ void NodeManager::HandleGetNodeStats(rpc::GetNodeStatsRequest node_stats_request rpc::GetNodeStatsReply *reply, rpc::SendReplyCallback send_reply_callback) { // Report object spilling stats. - local_object_manager_.FillObjectSpillingStats(reply); + local_object_manager_.FillObjectStoreStats(reply); // Report object store stats. object_manager_.FillObjectStoreStats(reply); // As a result of the HandleGetNodeStats, we are collecting information from all From 3d7b2ff11d1810652d88b5353e83934e71919e2c Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Tue, 9 May 2023 17:19:01 -0700 Subject: [PATCH 307/424] [data] Revert the dataset to datastream class rename (#35082) After getting further feedback about confusion from some types of users, we've decided to not proceed with the Dataset -> Datastream rename for 2.5. Instead, we will retain the data structure name and just refer to it as "streaming datasets" in the copy and emphasize its streaming nature in other ways. --------- Signed-off-by: Eric Liang --- doc/source/data/api/api.rst | 2 +- doc/source/data/api/data_representations.rst | 2 +- doc/source/data/api/dataset.rst | 166 + doc/source/data/api/datastream.rst | 166 - doc/source/data/api/from_other_data_libs.rst | 50 +- doc/source/data/api/grouped_data.rst | 2 +- doc/source/data/api/input_output.rst | 30 +- doc/source/data/batch_inference.rst | 44 +- doc/source/data/consuming-data.rst | 30 +- doc/source/data/custom-datasource.rst | 26 +- doc/source/data/data-internals.rst | 52 +- doc/source/data/data.rst | 16 +- doc/source/data/doc_code/consuming_data.py | 10 +- doc/source/data/doc_code/key_concepts.py | 4 +- doc/source/data/doc_code/loading_data.py | 84 +- .../data/doc_code/loading_data_untested.py | 20 +- doc/source/data/doc_code/quick_start.py | 16 +- doc/source/data/doc_code/saving_data.py | 10 +- doc/source/data/doc_code/tensor.py | 46 +- doc/source/data/doc_code/transforming_data.py | 16 +- doc/source/data/examples/batch_training.ipynb | 54 +- doc/source/data/examples/index.rst | 6 +- .../examples/nyc_taxi_basic_processing.ipynb | 52 +- doc/source/data/examples/ocr_example.ipynb | 16 +- doc/source/data/examples/random-access.rst | 6 +- doc/source/data/faq.rst | 50 +- doc/source/data/getting-started.rst | 42 +- doc/source/data/glossary.rst | 32 +- doc/source/data/key-concepts.rst | 62 +- doc/source/data/loading-data.rst | 142 +- doc/source/data/mars-on-ray.rst | 6 +- doc/source/data/performance-tips.rst | 40 +- doc/source/data/pipelining-compute.rst | 14 +- doc/source/data/transforming-data.rst | 48 +- doc/source/data/working-with-tensors.rst | 2 +- doc/source/ray-air/check-ingest.rst | 10 +- doc/source/ray-air/computer-vision.rst | 10 +- .../examples/analyze_tuning_results.ipynb | 6 +- ...ert_existing_pytorch_code_to_ray_air.ipynb | 4 +- .../convert_existing_tf_code_to_ray_air.ipynb | 8 +- .../examples/gptj_batch_prediction.ipynb | 2 +- .../huggingface_text_classification.ipynb | 6 +- .../ray-air/examples/lightgbm_example.ipynb | 8 +- .../opt_deepspeed_batch_inference.ipynb | 2 +- .../pytorch_resnet_batch_prediction.ipynb | 4 +- .../examples/pytorch_tabular_starter.py | 2 +- .../ray-air/examples/sklearn_example.ipynb | 4 +- .../stablediffusion_batch_prediction.ipynb | 2 +- .../ray-air/examples/tf_tabular_starter.py | 2 +- .../examples/tfx_tabular_train_to_serve.ipynb | 6 +- .../ray-air/examples/torch_detection.ipynb | 6 +- .../examples/torch_image_example.ipynb | 16 +- .../examples/torch_incremental_learning.ipynb | 32 +- .../ray-air/examples/upload_to_comet_ml.ipynb | 8 +- .../ray-air/examples/upload_to_wandb.ipynb | 8 +- .../ray-air/examples/xgboost_example.ipynb | 8 +- doc/source/ray-air/key-concepts.rst | 6 +- doc/source/ray-air/predictors.rst | 6 +- doc/source/ray-air/preprocessors.rst | 4 +- doc/source/ray-air/trainers.rst | 2 +- .../ray-contribute/writing-code-snippets.rst | 6 +- .../datasets_train/datasets_train.py | 12 +- doc/source/ray-overview/getting-started.md | 14 +- doc/source/ray-references/glossary.rst | 6 +- doc/source/rllib/rllib-offline.rst | 8 +- doc/source/train/dl_guide.rst | 2 +- .../lightning/lightning_cola_advanced.ipynb | 12 +- doc/source/train/getting-started.rst | 4 +- doc/source/train/key-concepts.rst | 4 +- .../tutorials/tune_get_data_in_and_out.md | 2 +- python/ray/air/examples/dreambooth/dataset.py | 4 +- python/ray/air/tests/test_api.py | 2 +- python/ray/air/util/check_ingest.py | 2 +- python/ray/data/__init__.py | 8 +- python/ray/data/_internal/arrow_block.py | 2 +- .../block_batching/block_batching.py | 14 +- .../_internal/block_batching/iter_batches.py | 10 +- .../ray/data/_internal/block_batching/util.py | 16 +- python/ray/data/_internal/block_list.py | 4 +- python/ray/data/_internal/compute.py | 10 +- ...datastream_logger.py => dataset_logger.py} | 30 +- .../execution/autoscaling_requester.py | 2 +- .../data/_internal/execution/bulk_executor.py | 12 +- .../data/_internal/execution/interfaces.py | 16 +- .../data/_internal/execution/legacy_compat.py | 32 +- .../operators/actor_pool_map_operator.py | 10 +- .../execution/operators/input_data_buffer.py | 2 +- .../execution/operators/map_operator.py | 2 +- .../execution/operators/output_splitter.py | 2 +- .../operators/task_pool_map_operator.py | 2 +- .../execution/operators/zip_operator.py | 2 +- .../_internal/execution/streaming_executor.py | 20 +- python/ray/data/_internal/fast_repartition.py | 14 +- .../data/_internal/iterator/iterator_impl.py | 24 +- .../_internal/iterator/pipelined_iterator.py | 16 +- .../iterator/stream_split_iterator.py | 36 +- python/ray/data/_internal/lazy_block_list.py | 22 +- .../ray/data/_internal/logical/interfaces.py | 2 +- .../logical/operators/all_to_all_operator.py | 2 +- .../logical/operators/map_operator.py | 4 +- python/ray/data/_internal/memory_tracing.py | 2 +- python/ray/data/_internal/null_aggregate.py | 2 +- python/ray/data/_internal/pandas_block.py | 6 +- .../ray/data/_internal/pipeline_executor.py | 16 +- python/ray/data/_internal/plan.py | 110 +- .../planner/exchange/sort_task_spec.py | 2 +- python/ray/data/_internal/planner/write.py | 2 +- python/ray/data/_internal/progress_bar.py | 4 +- python/ray/data/_internal/remote_fn.py | 2 +- python/ray/data/_internal/sort.py | 4 +- python/ray/data/_internal/stage_impl.py | 28 +- python/ray/data/_internal/stats.py | 112 +- python/ray/data/_internal/util.py | 20 +- python/ray/data/aggregate.py | 2 +- python/ray/data/block.py | 10 +- python/ray/data/context.py | 22 +- python/ray/data/dataset.py | 4516 +++++++++++++++- python/ray/data/dataset_pipeline.py | 286 +- python/ray/data/datasource/datasource.py | 10 +- .../data/datasource/file_based_datasource.py | 34 +- .../ray/data/datasource/file_meta_provider.py | 20 +- .../ray/data/datasource/parquet_datasource.py | 14 +- python/ray/data/datasource/partitioning.py | 16 +- .../data/datasource/webdataset_datasource.py | 4 +- python/ray/data/datastream.py | 4518 ----------------- python/ray/data/grouped_data.py | 93 +- python/ray/data/iterator.py | 94 +- python/ray/data/preprocessor.py | 48 +- python/ray/data/preprocessors/batch_mapper.py | 16 +- python/ray/data/preprocessors/chain.py | 14 +- python/ray/data/preprocessors/concatenator.py | 6 +- python/ray/data/preprocessors/discretizer.py | 6 +- python/ray/data/preprocessors/encoder.py | 36 +- python/ray/data/preprocessors/imputer.py | 12 +- python/ray/data/preprocessors/scaler.py | 28 +- python/ray/data/preprocessors/torch.py | 18 +- python/ray/data/preprocessors/vectorizer.py | 10 +- python/ray/data/random_access_dataset.py | 10 +- python/ray/data/read_api.py | 288 +- python/ray/data/row.py | 2 +- .../data/tests/block_batching/test_util.py | 6 +- python/ray/data/tests/conftest.py | 17 +- python/ray/data/tests/mock_server.py | 2 +- .../tests/preprocessors/test_preprocessors.py | 12 +- .../data/tests/preprocessors/test_torch.py | 34 +- python/ray/data/tests/test_all_to_all.py | 24 +- python/ray/data/tests/test_consumption.py | 65 +- .../data/tests/test_execution_optimizer.py | 10 +- python/ray/data/tests/test_huggingface.py | 2 +- python/ray/data/tests/test_logger.py | 8 +- python/ray/data/tests/test_mongo.py | 20 +- python/ray/data/tests/test_numpy.py | 8 +- python/ray/data/tests/test_parquet.py | 24 +- python/ray/data/tests/test_partitioning.py | 2 +- python/ray/data/tests/test_pipeline.py | 60 +- python/ray/data/tests/test_split.py | 6 +- python/ray/data/tests/test_stats.py | 68 +- python/ray/data/tests/test_tensor.py | 14 +- .../ray/train/_internal/backend_executor.py | 2 +- python/ray/train/_internal/dataset_spec.py | 22 +- python/ray/train/_internal/session.py | 4 +- python/ray/train/base_trainer.py | 24 +- python/ray/train/batch_predictor.py | 38 +- python/ray/train/data_parallel_trainer.py | 4 +- .../pytorch/torch_regression_example.py | 4 +- .../tf/tensorflow_autoencoder_example.py | 2 +- .../tf/tensorflow_regression_example.py | 4 +- python/ray/train/gbdt_trainer.py | 4 +- .../train/hf_accelerate/accelerate_trainer.py | 4 +- .../hf_transformers/_transformers_utils.py | 6 +- python/ray/train/horovod/horovod_trainer.py | 4 +- python/ray/train/lightgbm/lightgbm_trainer.py | 2 +- .../ray/train/lightning/_lightning_utils.py | 2 +- .../ray/train/lightning/lightning_trainer.py | 12 +- python/ray/train/mosaic/mosaic_trainer.py | 2 +- python/ray/train/rl/rl_trainer.py | 2 +- python/ray/train/session.py | 2 +- python/ray/train/sklearn/sklearn_trainer.py | 2 +- .../train/tensorflow/tensorflow_trainer.py | 4 +- .../ray/train/tensorflow/train_loop_utils.py | 2 +- python/ray/train/tests/test_base_trainer.py | 2 +- .../ray/train/tests/test_xgboost_trainer.py | 2 +- python/ray/train/torch/torch_trainer.py | 4 +- python/ray/train/xgboost/xgboost_trainer.py | 2 +- python/ray/tune/execution/experiment_state.py | 2 +- .../impl/out_of_band_serialize_dataset.py | 8 +- python/ray/tune/tests/test_trial_runner_3.py | 2 +- python/ray/tune/tuner.py | 2 +- python/ray/util/actor_group.py | 2 +- .../dataset/aggregate_benchmark.py | 2 +- release/nightly_tests/dataset/benchmark.py | 2 +- .../dataset/iter_batches_benchmark.py | 2 +- .../dataset/iter_tensor_batches_benchmark.py | 2 +- .../dataset/map_batches_benchmark.py | 4 +- .../dataset/operator_fusion_benchmark.py | 4 +- .../dataset/read_images_benchmark.py | 2 +- .../dataset/read_parquet_benchmark.py | 2 +- .../dataset/read_tfrecords_benchmark.py | 2 +- rllib/evaluation/rollout_worker.py | 2 +- rllib/offline/dataset_reader.py | 10 +- rllib/offline/feature_importance.py | 2 +- rllib/offline/tests/test_dataset_reader.py | 6 +- rllib/utils/metrics/window_stat.py | 2 +- 203 files changed, 6471 insertions(+), 6506 deletions(-) create mode 100644 doc/source/data/api/dataset.rst delete mode 100644 doc/source/data/api/datastream.rst rename python/ray/data/_internal/{datastream_logger.py => dataset_logger.py} (81%) delete mode 100644 python/ray/data/datastream.py diff --git a/doc/source/data/api/api.rst b/doc/source/data/api/api.rst index b1cd7d4ddf33..a5ce1410d1d9 100644 --- a/doc/source/data/api/api.rst +++ b/doc/source/data/api/api.rst @@ -7,7 +7,7 @@ Ray Data API :maxdepth: 2 input_output.rst - datastream.rst + dataset.rst data_iterator.rst execution_options.rst grouped_data.rst diff --git a/doc/source/data/api/data_representations.rst b/doc/source/data/api/data_representations.rst index 7e7ce12de71e..c6fa60beb563 100644 --- a/doc/source/data/api/data_representations.rst +++ b/doc/source/data/api/data_representations.rst @@ -34,7 +34,7 @@ Row API row.TableRow -.. _datastream-tensor-extension-api: +.. _dataset-tensor-extension-api: Tensor Column Extension API --------------------------- diff --git a/doc/source/data/api/dataset.rst b/doc/source/data/api/dataset.rst new file mode 100644 index 000000000000..b75481159c0b --- /dev/null +++ b/doc/source/data/api/dataset.rst @@ -0,0 +1,166 @@ +.. _dataset-api: + +Dataset API +============== + +.. currentmodule:: ray.data + +Constructor +----------- + +.. autosummary:: + :toctree: doc/ + + Dataset + +Basic Transformations +--------------------- + +.. autosummary:: + :toctree: doc/ + + Dataset.map + Dataset.map_batches + Dataset.flat_map + Dataset.filter + Dataset.add_column + Dataset.drop_columns + Dataset.select_columns + Dataset.random_sample + Dataset.limit + +Sorting, Shuffling, Repartitioning +---------------------------------- + +.. autosummary:: + :toctree: doc/ + + Dataset.sort + Dataset.random_shuffle + Dataset.randomize_block_order + Dataset.repartition + +Splitting and Merging Datasets +--------------------------------- + +.. autosummary:: + :toctree: doc/ + + Dataset.split + Dataset.split_at_indices + Dataset.split_proportionately + Dataset.streaming_split + Dataset.train_test_split + Dataset.union + Dataset.zip + +Grouped and Global Aggregations +------------------------------- + +.. autosummary:: + :toctree: doc/ + + Dataset.groupby + Dataset.aggregate + Dataset.sum + Dataset.min + Dataset.max + Dataset.mean + Dataset.std + +Converting to Pipeline +---------------------- + +.. autosummary:: + :toctree: doc/ + + Dataset.repeat + Dataset.window + +Consuming Data +--------------------- + +.. autosummary:: + :toctree: doc/ + + Dataset.show + Dataset.take + Dataset.take_batch + Dataset.take_all + Dataset.iterator + Dataset.iter_rows + Dataset.iter_batches + Dataset.iter_torch_batches + Dataset.iter_tf_batches + +I/O and Conversion +------------------ + +.. autosummary:: + :toctree: doc/ + + Dataset.write_parquet + Dataset.write_json + Dataset.write_csv + Dataset.write_numpy + Dataset.write_tfrecords + Dataset.write_webdataset + Dataset.write_mongo + Dataset.write_datasource + Dataset.to_torch + Dataset.to_tf + Dataset.to_dask + Dataset.to_mars + Dataset.to_modin + Dataset.to_spark + Dataset.to_pandas + Dataset.to_pandas_refs + Dataset.to_numpy_refs + Dataset.to_arrow_refs + Dataset.to_random_access_dataset + +Inspecting Metadata +------------------- + +.. autosummary:: + :toctree: doc/ + + Dataset.count + Dataset.schema + Dataset.default_batch_format + Dataset.num_blocks + Dataset.size_bytes + Dataset.input_files + Dataset.stats + Dataset.get_internal_block_refs + +Execution +--------- + +.. autosummary:: + :toctree: doc/ + + Dataset.materialize + ActorPoolStrategy + +Serialization +------------- + +.. autosummary:: + :toctree: doc/ + + Dataset.has_serializable_lineage + Dataset.serialize_lineage + Dataset.deserialize_lineage + +Internals +--------- + +.. autosummary:: + :toctree: doc/ + + Dataset.__init__ + Dataset.dataset_format + Dataset.fully_executed + Dataset.is_fully_executed + Dataset.lazy diff --git a/doc/source/data/api/datastream.rst b/doc/source/data/api/datastream.rst deleted file mode 100644 index 9f0404a6f327..000000000000 --- a/doc/source/data/api/datastream.rst +++ /dev/null @@ -1,166 +0,0 @@ -.. _datastream-api: - -Datastream API -============== - -.. currentmodule:: ray.data - -Constructor ------------ - -.. autosummary:: - :toctree: doc/ - - Datastream - -Basic Transformations ---------------------- - -.. autosummary:: - :toctree: doc/ - - Datastream.map - Datastream.map_batches - Datastream.flat_map - Datastream.filter - Datastream.add_column - Datastream.drop_columns - Datastream.select_columns - Datastream.random_sample - Datastream.limit - -Sorting, Shuffling, Repartitioning ----------------------------------- - -.. autosummary:: - :toctree: doc/ - - Datastream.sort - Datastream.random_shuffle - Datastream.randomize_block_order - Datastream.repartition - -Splitting and Merging Datastreams ---------------------------------- - -.. autosummary:: - :toctree: doc/ - - Datastream.split - Datastream.split_at_indices - Datastream.split_proportionately - Datastream.streaming_split - Datastream.train_test_split - Datastream.union - Datastream.zip - -Grouped and Global Aggregations -------------------------------- - -.. autosummary:: - :toctree: doc/ - - Datastream.groupby - Datastream.aggregate - Datastream.sum - Datastream.min - Datastream.max - Datastream.mean - Datastream.std - -Converting to Pipeline ----------------------- - -.. autosummary:: - :toctree: doc/ - - Datastream.repeat - Datastream.window - -Consuming Data ---------------------- - -.. autosummary:: - :toctree: doc/ - - Datastream.show - Datastream.take - Datastream.take_batch - Datastream.take_all - Datastream.iterator - Datastream.iter_rows - Datastream.iter_batches - Datastream.iter_torch_batches - Datastream.iter_tf_batches - -I/O and Conversion ------------------- - -.. autosummary:: - :toctree: doc/ - - Datastream.write_parquet - Datastream.write_json - Datastream.write_csv - Datastream.write_numpy - Datastream.write_tfrecords - Datastream.write_webdataset - Datastream.write_mongo - Datastream.write_datasource - Datastream.to_torch - Datastream.to_tf - Datastream.to_dask - Datastream.to_mars - Datastream.to_modin - Datastream.to_spark - Datastream.to_pandas - Datastream.to_pandas_refs - Datastream.to_numpy_refs - Datastream.to_arrow_refs - Datastream.to_random_access_dataset - -Inspecting Metadata -------------------- - -.. autosummary:: - :toctree: doc/ - - Datastream.count - Datastream.schema - Datastream.default_batch_format - Datastream.num_blocks - Datastream.size_bytes - Datastream.input_files - Datastream.stats - Datastream.get_internal_block_refs - -Execution ---------- - -.. autosummary:: - :toctree: doc/ - - Datastream.materialize - ActorPoolStrategy - -Serialization -------------- - -.. autosummary:: - :toctree: doc/ - - Datastream.has_serializable_lineage - Datastream.serialize_lineage - Datastream.deserialize_lineage - -Internals ---------- - -.. autosummary:: - :toctree: doc/ - - Datastream.__init__ - Datastream.dataset_format - Datastream.fully_executed - Datastream.is_fully_executed - Datastream.lazy diff --git a/doc/source/data/api/from_other_data_libs.rst b/doc/source/data/api/from_other_data_libs.rst index 054119c874b3..f3fe2e448b05 100644 --- a/doc/source/data/api/from_other_data_libs.rst +++ b/doc/source/data/api/from_other_data_libs.rst @@ -12,7 +12,7 @@ libraries, so you can quickly map what you may already know to Ray Data APIs. - This is meant to map APIs that perform comparable but not necessarily identical operations. Please check the API reference for exact semantics and usage. - - This list may not be exhaustive: Ray Data is not a traditional ETL data processing library, so not all data processing APIs can map to Datastreams. + - This list may not be exhaustive: Ray Data is not a traditional ETL data processing library, so not all data processing APIs can map to Datasets. In addition, we try to focus on common APIs or APIs that are less obvious to see a connection. .. _api-guide-for-pandas-users: @@ -26,41 +26,41 @@ For Pandas Users * - Pandas DataFrame API - Ray Data API * - df.head() - - :meth:`ds.show() `, :meth:`ds.take() `, or :meth:`ds.take_batch() ` + - :meth:`ds.show() `, :meth:`ds.take() `, or :meth:`ds.take_batch() ` * - df.dtypes - - :meth:`ds.schema() ` + - :meth:`ds.schema() ` * - len(df) or df.shape[0] - - :meth:`ds.count() ` + - :meth:`ds.count() ` * - df.truncate() - - :meth:`ds.limit() ` + - :meth:`ds.limit() ` * - df.iterrows() - - :meth:`ds.iter_rows() ` + - :meth:`ds.iter_rows() ` * - df.drop() - - :meth:`ds.drop_columns() ` + - :meth:`ds.drop_columns() ` * - df.transform() - - :meth:`ds.map_batches() ` or :meth:`ds.map() ` + - :meth:`ds.map_batches() ` or :meth:`ds.map() ` * - df.groupby() - - :meth:`ds.groupby() ` + - :meth:`ds.groupby() ` * - df.groupby().apply() - :meth:`ds.groupby().map_groups() ` * - df.sample() - - :meth:`ds.random_sample() ` + - :meth:`ds.random_sample() ` * - df.sort_values() - - :meth:`ds.sort() ` + - :meth:`ds.sort() ` * - df.append() - - :meth:`ds.union() ` + - :meth:`ds.union() ` * - df.aggregate() - - :meth:`ds.aggregate() ` + - :meth:`ds.aggregate() ` * - df.min() - - :meth:`ds.min() ` + - :meth:`ds.min() ` * - df.max() - - :meth:`ds.max() ` + - :meth:`ds.max() ` * - df.sum() - - :meth:`ds.sum() ` + - :meth:`ds.sum() ` * - df.mean() - - :meth:`ds.mean() ` + - :meth:`ds.mean() ` * - df.std() - - :meth:`ds.std() ` + - :meth:`ds.std() ` .. _api-guide-for-pyarrow-users: @@ -73,16 +73,16 @@ For PyArrow Users * - PyArrow Table API - Ray Data API * - pa.Table.schema - - :meth:`ds.schema() ` + - :meth:`ds.schema() ` * - pa.Table.num_rows - - :meth:`ds.count() ` + - :meth:`ds.count() ` * - pa.Table.filter() - - :meth:`ds.filter() ` + - :meth:`ds.filter() ` * - pa.Table.drop() - - :meth:`ds.drop_columns() ` + - :meth:`ds.drop_columns() ` * - pa.Table.add_column() - - :meth:`ds.add_column() ` + - :meth:`ds.add_column() ` * - pa.Table.groupby() - - :meth:`ds.groupby() ` + - :meth:`ds.groupby() ` * - pa.Table.sort_by() - - :meth:`ds.sort() ` + - :meth:`ds.sort() ` diff --git a/doc/source/data/api/grouped_data.rst b/doc/source/data/api/grouped_data.rst index 2b2e74721477..fce6a8d9705e 100644 --- a/doc/source/data/api/grouped_data.rst +++ b/doc/source/data/api/grouped_data.rst @@ -5,7 +5,7 @@ GroupedData API .. currentmodule:: ray.data -GroupedData objects are returned by groupby call: Datastream.groupby(). +GroupedData objects are returned by groupby call: Dataset.groupby(). Constructor ----------- diff --git a/doc/source/data/api/input_output.rst b/doc/source/data/api/input_output.rst index 019ecd986939..e5cef0988e8a 100644 --- a/doc/source/data/api/input_output.rst +++ b/doc/source/data/api/input_output.rst @@ -30,7 +30,7 @@ Parquet read_parquet read_parquet_bulk - Datastream.write_parquet + Dataset.write_parquet CSV --- @@ -39,7 +39,7 @@ CSV :toctree: doc/ read_csv - Datastream.write_csv + Dataset.write_csv JSON ---- @@ -48,7 +48,7 @@ JSON :toctree: doc/ read_json - Datastream.write_json + Dataset.write_json Text ---- @@ -81,7 +81,7 @@ TFRecords :toctree: doc/ read_tfrecords - Datastream.write_tfrecords + Dataset.write_tfrecords Pandas @@ -92,8 +92,8 @@ Pandas from_pandas from_pandas_refs - Datastream.to_pandas - Datastream.to_pandas_refs + Dataset.to_pandas + Dataset.to_pandas_refs NumPy ----- @@ -104,8 +104,8 @@ NumPy read_numpy from_numpy from_numpy_refs - Datastream.write_numpy - Datastream.to_numpy_refs + Dataset.write_numpy + Dataset.to_numpy_refs Arrow ----- @@ -115,7 +115,7 @@ Arrow from_arrow from_arrow_refs - Datastream.to_arrow_refs + Dataset.to_arrow_refs MongoDB ------- @@ -124,7 +124,7 @@ MongoDB :toctree: doc/ read_mongo - Datastream.write_mongo + Dataset.write_mongo SQL Databases ------------- @@ -141,7 +141,7 @@ Dask :toctree: doc/ from_dask - Datastream.to_dask + Dataset.to_dask Spark ----- @@ -150,7 +150,7 @@ Spark :toctree: doc/ from_spark - Datastream.to_spark + Dataset.to_spark Modin ----- @@ -159,7 +159,7 @@ Modin :toctree: doc/ from_modin - Datastream.to_modin + Dataset.to_modin Mars ---- @@ -168,7 +168,7 @@ Mars :toctree: doc/ from_mars - Datastream.to_mars + Dataset.to_mars Torch ----- @@ -211,7 +211,7 @@ Datasource API :toctree: doc/ read_datasource - Datastream.write_datasource + Dataset.write_datasource Datasource ReadTask datasource.Reader diff --git a/doc/source/data/batch_inference.rst b/doc/source/data/batch_inference.rst index 8ca2a09f8016..8ab5197a18b9 100644 --- a/doc/source/data/batch_inference.rst +++ b/doc/source/data/batch_inference.rst @@ -76,7 +76,7 @@ Running batch inference is conceptually easy and requires three steps: 1. Load your data and optionally apply any preprocessing you need. 2. Define your model for inference. -3. Run inference on your data by using the :meth:`ds.map_batches() ` +3. Run inference on your data by using the :meth:`ds.map_batches() ` method from Ray Data. The last step also defines how your batch processing job gets distributed across your (local) cluster. @@ -96,7 +96,7 @@ For this quick start guide we use very small, in-memory datasets by leveraging common Python libraries like NumPy and Pandas. In general, once you load your data using Ray Data, you also want to apply some preprocessing steps. We skip this step here for simplicity. -In any case, the result of this step is a Ray Datastream ``ds`` that we can use to run inference on. +In any case, the result of this step is a Ray Dataset ``ds`` that we can use to run inference on. .. margin:: @@ -192,8 +192,8 @@ Below you find examples for PyTorch, TensorFlow, and HuggingFace. 3. Getting predictions with Ray Data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Once you have your Ray Datastream ``ds`` and your predictor class, you can use -:meth:`ds.map_batches() ` to get predictions. +Once you have your Ray Dataset ``ds`` and your predictor class, you can use +:meth:`ds.map_batches() ` to get predictions. ``map_batches`` takes your predictor class as an argument and allows you to specify ``compute`` resources by defining the :class:`ActorPoolStrategy `. In the example below, we use two CPUs to run inference in parallel and then print the results. @@ -284,18 +284,18 @@ to learn more about loading data with Ray Data, but we'll cover the basics here, .. hint:: - With Ray Data, you can :ref:`create synthetic data in Python `, - :ref:`load data from various storage solutions ` such as S3, + With Ray Data, you can :ref:`create synthetic data in Python `, + :ref:`load data from various storage solutions ` such as S3, HDFS, or GCS, using common formats such as CSV, JSON, Text, Images, Binary, TFRecords, Parquet, and more. Ray Data also supports reading from common SQL and NoSQL databases, and allows you to define your own, custom data sources. - You can also read :ref:`common Python library formats ` + You can also read :ref:`common Python library formats ` such as Pandas, NumPy, Arrow, or plain Python objects, as well as from - :ref:`distributed data processing frameworks ` + :ref:`distributed data processing frameworks ` such as Spark, Dask, Modin, or Mars. - Of course, Ray Data also supports :ref:`reading data from common ML frameworks ` + Of course, Ray Data also supports :ref:`reading data from common ML frameworks ` like PyTorch, TensorFlow or HuggingFace. .. callout:: @@ -325,12 +325,12 @@ This may include cropping or resizing images, or tokenizing raw text. To introduce common terminology, with :ref:`Ray Data ` you can define user-defined functions that transform batches of your data. As you've seen before, applying these functions via -:meth:`ds.map_batches() ` outputs a new, transformed datastream. +:meth:`ds.map_batches() ` outputs a new, transformed dataset. .. note:: The way we do preprocessing here is conceptually close to how we do batch - inference, and we use the same :meth:`ds.map_batches() ` + inference, and we use the same :meth:`ds.map_batches() ` call from Ray Data to run this task. The main difference is that we don't use a machine learning model to transform our data, which has some practical consequences. For instance, in the example below we simply @@ -351,7 +351,7 @@ the ``torchvision`` package to define a function called ``preprocess_images``. <2> We then define a simple function to transform batches of raw data accordingly. Note that these batches come as dictionaries of NumPy images stored in the ``"images"`` key. - <3> Finally, we apply the function to our datastream using ``map_batches``. + <3> Finally, we apply the function to our dataset using ``map_batches``. .. tip:: @@ -368,8 +368,8 @@ for inference since the model only needs to be initialized once, instead of per .. margin:: In short, running model inference means applying - :meth:`ds.map_batches() ` - to a datastream with a trained model as a class. + :meth:`ds.map_batches() ` + to a dataset with a trained model as a class. You've already seen how to do this in the quickstart section of this guide, but now that you're equipped with more knowledge, let's have a look at how to define a @@ -395,7 +395,7 @@ stateful class with Ray for our pretrained ResNet model: Scalable inference with Ray Data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -To get predictions, we call :meth:`ds.map_batches() `, +To get predictions, we call :meth:`ds.map_batches() `, by making sure to specify a :class:`ActorPoolStrategy ` which defines how many workers to use for inference. @@ -407,19 +407,19 @@ which defines how many workers to use for inference. :end-before: __pt_prediction_end__ .. annotations:: - <1> In this example we use a total of four Ray Actors to run inference on our datastream. + <1> In this example we use a total of four Ray Actors to run inference on our dataset. <2> Each actor should use one GPU. -To summarize, mapping a function over batches is the simplest transform for Ray Datastreams. -The function defines the logic for transforming individual batches of data of the datastream +To summarize, mapping a function over batches is the simplest transform for Ray Datasets. +The function defines the logic for transforming individual batches of data of the dataset Performing operations over batches of data is more performant than single element operations as it can leverage the underlying vectorization capabilities of Pandas or NumPy. .. note:: - You can use :meth:`ds.map_batches() ` on functions, too. + You can use :meth:`ds.map_batches() ` on functions, too. This is mostly useful for quick transformations of your data that doesn't require an ML model or other stateful objects. To handle state, using classes like we did above is the recommended way. @@ -513,7 +513,7 @@ You can learn more about output formats in :ref:`the transforming data guide ` + As we've discussed in this guide, using :meth:`ds.map_batches() ` on a class defining your model should be your default choice for running inference with Ray. For instance, if you're already using the Ray AIR framework for running your ML workflows, @@ -538,7 +538,7 @@ Configuration & Troubleshooting Configuring Batch Size ~~~~~~~~~~~~~~~~~~~~~~ -An important parameter to set for :meth:`ds.map_batches() ` +An important parameter to set for :meth:`ds.map_batches() ` is ``batch_size``, which controls the size of the batches provided to the function. Here's a simple example of loading the IRIS dataset (which has Pandas format by default) and processing it with a batch size of `10`: @@ -585,7 +585,7 @@ Here's a quick example for a PyTorch model: + return {"class": prediction.argmax(dim=1).detach().cpu().numpy()} -Next, specify ``num_gpus=N`` in :meth:`ds.map_batches() ` +Next, specify ``num_gpus=N`` in :meth:`ds.map_batches() ` to indicate that each inference worker should use ``N`` GPUs. .. code-block:: diff diff --git a/doc/source/data/consuming-data.rst b/doc/source/data/consuming-data.rst index 7933a6c1af62..83af18c68f2b 100644 --- a/doc/source/data/consuming-data.rst +++ b/doc/source/data/consuming-data.rst @@ -4,7 +4,7 @@ Consuming Data ===================== -The data underlying a ``Datastream`` can be consumed in several ways: +The data underlying a ``Dataset`` can be consumed in several ways: * Retrieving a limited prefix of rows. * Iterating over rows and batches. @@ -13,9 +13,9 @@ The data underlying a ``Datastream`` can be consumed in several ways: Retrieving a limited set of rows ================================ -A limited set of rows can be retrieved from a ``Datastream`` via the -:meth:`ds.take() ` or :meth:`ds.take_batch() ` -APIs, and :meth:`ds.show() `, for printing a limited set of rows. These +A limited set of rows can be retrieved from a ``Dataset`` via the +:meth:`ds.take() ` or :meth:`ds.take_batch() ` +APIs, and :meth:`ds.show() `, for printing a limited set of rows. These methods are convenient for quickly inspecting a subset (prefix) of rows. .. literalinclude:: ./doc_code/consuming_data.py @@ -23,11 +23,11 @@ methods are convenient for quickly inspecting a subset (prefix) of rows. :start-after: __take_begin__ :end-before: __take_end__ -Iterating over Datastreams +Iterating over Datasets ========================== -Datastreams can be consumed a row at a time using the -:meth:`ds.iter_rows() ` API +Datasets can be consumed a row at a time using the +:meth:`ds.iter_rows() ` API .. literalinclude:: ./doc_code/consuming_data.py :language: python @@ -35,7 +35,7 @@ Datastreams can be consumed a row at a time using the :end-before: __iter_rows_end__ or a batch at a time using the -:meth:`ds.iter_batches() ` API, where you can specify +:meth:`ds.iter_batches() ` API, where you can specify batch size as well as the desired batch format. By default, the batches have type ``Dict[str, np.ndarray]`` (NumPy format). @@ -44,8 +44,8 @@ batch size as well as the desired batch format. By default, the batches have typ :start-after: __iter_batches_begin__ :end-before: __iter_batches_end__ -Datastreams can be passed to Ray tasks or actors and accessed by these iteration methods. -This does not incur a copy, since the blocks of the Datastream are passed by reference as Ray objects: +Datasets can be passed to Ray tasks or actors and accessed by these iteration methods. +This does not incur a copy, since the blocks of the Dataset are passed by reference as Ray objects: .. literalinclude:: ./doc_code/consuming_data.py :language: python @@ -56,12 +56,12 @@ This does not incur a copy, since the blocks of the Datastream are passed by ref Splitting Into and Consuming Shards =================================== -Datastreams can be split up into disjoint iterators, or shards. +Datasets can be split up into disjoint iterators, or shards. This is a common pattern useful for loading and sharding data between distributed training actors: .. note:: - If using :ref:`Ray Train ` for distributed training, you do not need to split the datastream; Ray + If using :ref:`Ray Train ` for distributed training, you do not need to split the dataset; Ray Train will automatically do locality-aware splitting into per-trainer shards for you. .. literalinclude:: ./doc_code/consuming_data.py @@ -74,10 +74,10 @@ This is a common pattern useful for loading and sharding data between distribute Saving Data ================== -Datastreams can be written to local or remote storage in the desired data format. +Datasets can be written to local or remote storage in the desired data format. The supported formats include Parquet, CSV, JSON, NumPy. To control the number -of output files, you may use :meth:`ds.repartition() ` -to repartition the Datastream before writing out. +of output files, you may use :meth:`ds.repartition() ` +to repartition the Dataset before writing out. .. tab-set:: diff --git a/doc/source/data/custom-datasource.rst b/doc/source/data/custom-datasource.rst index a9fc7ca29051..584b927f39ff 100644 --- a/doc/source/data/custom-datasource.rst +++ b/doc/source/data/custom-datasource.rst @@ -7,18 +7,18 @@ Custom Datasources .. note:: This MongoDatasource guide below is for education only. For production use of MongoDB - in Ray Data, see :ref:`Creating Datastream from MongoDB `. + in Ray Data, see :ref:`Creating Dataset from MongoDB `. -Ray Data supports multiple ways to :ref:`create a datastream `, +Ray Data supports multiple ways to :ref:`create a dataset `, allowing you to easily ingest data of common formats from popular sources. However, if the datasource you want to read from is not in the built-in list, don't worry, you can implement a custom one for your use case. In this guide, we will walk you through how to build your own custom datasource, using `MongoDB `__ as an example. -By the end of the guide, you will have a ``MongoDatasource`` that you can use to create datastream as follows: +By the end of the guide, you will have a ``MongoDatasource`` that you can use to create dataset as follows: .. code-block:: python - # Read from custom MongoDB datasource to create a datastream. + # Read from custom MongoDB datasource to create a dataset. ds = ray.data.read_datasource( MongoDatasource(), uri=MY_URI, @@ -27,7 +27,7 @@ By the end of the guide, you will have a ``MongoDatasource`` that you can use to pipelines=MY_PIPELINES ) - # Write the datastream to custom MongoDB datasource. + # Write the dataset to custom MongoDB datasource. ds.write_datasource( MongoDatasource(), uri=MY_URI, database=MY_DATABASE, collection=MY_COLLECTION ) @@ -38,7 +38,7 @@ By the end of the guide, you will have a ``MongoDatasource`` that you can use to a MongoDB instance, which hosts `Databases and Collections `__. A collection is analogous to a table in SQL databases. MongoDB also has a `pipeline `__ concept, which expresses document processing in a series of stages (e.g. match documents with a predicate, sort results, and then select a few fields). - The execution results of the pipelines are used to create datastream. + The execution results of the pipelines are used to create dataset. A custom datasource is an implementation of :class:`~ray.data.Datasource`. In the example here, let's call it ``MongoDatasource``. At a high level, it will have two @@ -50,7 +50,7 @@ core parts to build out: Here are the key design choices we will make in this guide: - **MongoDB connector**: We use `PyMongo `__ to connect to MongoDB. -- **MongoDB to Arrow conversion**: We use `PyMongoArrow `__ to convert MongoDB execution results into Arrow tables, which Datastreams supports as a data format. +- **MongoDB to Arrow conversion**: We use `PyMongoArrow `__ to convert MongoDB execution results into Arrow tables, which Datasets supports as a data format. - **Parallel execution**: We ask the user to provide a list of MongoDB pipelines, with each corresponding to a partition of the MongoDB collection, which will be executed in parallel with :class:`~ray.data.ReadTask`. For example, suppose you have a MongoDB collection with 4 documents, which have a ``partition_field`` with values 0, 1, 2, 3. @@ -94,7 +94,7 @@ MongoDB. This ``Reader`` creates a list of :class:`~ray.data.ReadTask` for the g list of MongoDB pipelines. Each :class:`~ray.data.ReadTask` returns a list of blocks when called, and each :class:`~ray.data.ReadTask` is executed in remote workers to parallelize the execution. -You can find documentation about Ray Data :ref:`block concept here ` and :ref:`block APIs here `. +You can find documentation about Ray Data :ref:`block concept here ` and :ref:`block APIs here `. First, let's handle a single MongoDB pipeline, which is the unit of execution in :class:`~ray.data.ReadTask`. We need to connect to MongoDB, execute the pipeline against it, @@ -119,7 +119,7 @@ The :class:`~ray.data.block.BlockMetadata` contains metadata like number of rows that we know about the block prior to actually executing the read task; the no-arg read function is just a wrapper of ``_read_single_partition``. A list of :class:`~ray.data.ReadTask` objects are returned by ``get_read_tasks``, and these -tasks are executed on remote workers. You can find more details about `Datastream read execution here `__. +tasks are executed on remote workers. You can find more details about `Dataset read execution here `__. .. literalinclude:: ./doc_code/custom_datasource.py :language: python @@ -169,12 +169,12 @@ a ``MongoDatasource``. :start-after: __mongo_datasource_start__ :end-before: __mongo_datasource_end__ -Now you can create a Datastream from and write back to MongoDB, just like +Now you can create a Dataset from and write back to MongoDB, just like any other datasource! .. code-block:: python - # Read from MongoDB datasource and create a datastream. + # Read from MongoDB datasource and create a dataset. # The args are passed to MongoDatasource.create_reader(). ds = ray.data.read_datasource( MongoDatasource(), @@ -184,10 +184,10 @@ any other datasource! pipelines=my_pipelines, # See the example definition of ``my_pipelines`` above ) - # Data preprocessing with Datastream APIs here + # Data preprocessing with Dataset APIs here # ... - # Write the datastream back to MongoDB datasource. + # Write the dataset back to MongoDB datasource. # The args are passed to MongoDatasource.do_write(). ds.write_datasource( MongoDatasource(), diff --git a/doc/source/data/data-internals.rst b/doc/source/data/data-internals.rst index 7bcec8ddfafd..e040a7fa08a1 100644 --- a/doc/source/data/data-internals.rst +++ b/doc/source/data/data-internals.rst @@ -1,4 +1,4 @@ -.. _datastreams_scheduling: +.. _datasets_scheduling: ============================================ Scheduling, Execution, and Memory Management @@ -11,9 +11,9 @@ Ray Data uses Ray core for execution, and hence is subject to the same schedulin * The ``SPREAD`` scheduling strategy is used to ensure data blocks are evenly balanced across the cluster. * Retries of application-level exceptions are enabled to handle transient errors from remote datasources. -* Datastream tasks ignore placement groups by default, see :ref:`Ray Data and Placement Groups `. +* Dataset tasks ignore placement groups by default, see :ref:`Ray Data and Placement Groups `. -.. _datastreams_tune: +.. _datasets_tune: Ray Data and Tune ~~~~~~~~~~~~~~~~~ @@ -27,7 +27,7 @@ To ensure CPU resources are always available for Ray Data execution, limit the n :start-after: __resource_allocation_1_begin__ :end-before: __resource_allocation_1_end__ -.. _datastreams_pg: +.. _datasets_pg: Ray Data and Placement Groups ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -36,27 +36,27 @@ By default, Ray Data configures its tasks and actors to use the cluster-default :class:`ray.data.DataContext.get_current().scheduling_strategy `. This scheduling strategy will schedule these tasks and actors outside any present placement group. If you want to force Ray Data to schedule tasks within the current placement group (i.e., to use current placement group resources specifically for Ray Data), you can set ``ray.data.DataContext.get_current().scheduling_strategy = None``. -This should be considered for advanced use cases to improve performance predictability only. We generally recommend letting Ray Data run outside placement groups as documented in the :ref:`Ray Data and Other Libraries ` section. +This should be considered for advanced use cases to improve performance predictability only. We generally recommend letting Ray Data run outside placement groups as documented in the :ref:`Ray Data and Other Libraries ` section. -.. _datastream_execution: +.. _dataset_execution: Execution ========= Ray Data execution by default is: -- **Lazy**: This means that transformations on Datastream are not executed until a - consumption operation (e.g. :meth:`ds.iter_batches() `) - or :meth:`Datastream.materialize() ` is called. This creates - opportunities for optimizing the execution plan (e.g. :ref:`stage fusion `). -- **Streaming**: This means that Datastream transformations will be executed in a +- **Lazy**: This means that transformations on Dataset are not executed until a + consumption operation (e.g. :meth:`ds.iter_batches() `) + or :meth:`Dataset.materialize() ` is called. This creates + opportunities for optimizing the execution plan (e.g. :ref:`stage fusion `). +- **Streaming**: This means that Dataset transformations will be executed in a streaming way, incrementally on the base data, instead of on all of the data at once, and overlapping the execution of operations. This can be used for streaming data loading into ML training to overlap the data preprocessing and model training, - or to execute batch transformations on large datastreams without needing to load the - entire datastream into cluster memory. + or to execute batch transformations on large datasets without needing to load the + entire dataset into cluster memory. -.. _datastreams_lazy_execution: +.. _datasets_lazy_execution: Lazy Execution ~~~~~~~~~~~~~~ @@ -64,13 +64,13 @@ Lazy Execution Lazy execution offers opportunities for improved performance and memory stability due to stage fusion optimizations and aggressive garbage collection of intermediate results. -Datastream creation and transformation APIs are lazy, with execution only triggered via "sink" -APIs, such as consuming (:meth:`ds.iter_batches() `), -writing (:meth:`ds.write_parquet() `), or manually triggering via -:meth:`ds.materialize() `. There are a few +Dataset creation and transformation APIs are lazy, with execution only triggered via "sink" +APIs, such as consuming (:meth:`ds.iter_batches() `), +writing (:meth:`ds.write_parquet() `), or manually triggering via +:meth:`ds.materialize() `. There are a few exceptions to this rule, where transformations such as :meth:`ds.union() -` and -:meth:`ds.limit() ` trigger execution; we plan to make these +` and +:meth:`ds.limit() ` trigger execution; we plan to make these operations lazy in the future. Check the API docs for Ray Data methods to see if they @@ -83,7 +83,7 @@ Streaming Execution ~~~~~~~~~~~~~~~~~~~ The following code is a hello world example which invokes the execution with -:meth:`ds.iter_batches() ` consumption. We will also enable verbose progress reporting, which shows per-operator progress in addition to overall progress. +:meth:`ds.iter_batches() ` consumption. We will also enable verbose progress reporting, which shows per-operator progress in addition to overall progress. .. code-block:: @@ -191,7 +191,7 @@ Scalability ----------- We expect the data streaming backend to scale to tens of thousands of files / blocks and up to hundreds of terabytes of data. Please report if you experience performance degradation at these scales, we would be very interested to investigate! -.. _datastreams_stage_fusion: +.. _datasets_stage_fusion: Stage Fusion Optimization ~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -205,10 +205,10 @@ lazy operations that are compatible: Read stages and subsequent map-like transformations will usually be fused together. All-to-all transformations such as -:meth:`ds.random_shuffle() ` can be fused with earlier +:meth:`ds.random_shuffle() ` can be fused with earlier map-like stages, but not later stages. -You can tell if stage fusion is enabled by checking the :ref:`Datastream stats ` and looking for fused stages (e.g., ``read->map_batches``). +You can tell if stage fusion is enabled by checking the :ref:`Dataset stats ` and looking for fused stages (e.g., ``read->map_batches``). .. code-block:: @@ -229,7 +229,7 @@ During execution, a task can read multiple input blocks, and write multiple outp Ray Data attempts to bound its heap memory usage to `num_execution_slots * max_block_size`. The number of execution slots is by default equal to the number of CPUs, unless custom resources are specified. The maximum block size is set by the configuration parameter `ray.data.DataContext.target_max_block_size` and is set to 512MiB by default. When a task's output is larger than this value, the worker will automatically split the output into multiple smaller blocks to avoid running out of heap memory. -Large block size can lead to potential out-of-memory situations. To avoid these issues, make sure no single item in your Ray Data is too large, and always call :meth:`ds.map_batches() ` with batch size small enough such that the output batch can comfortably fit into memory. +Large block size can lead to potential out-of-memory situations. To avoid these issues, make sure no single item in your Ray Data is too large, and always call :meth:`ds.map_batches() ` with batch size small enough such that the output batch can comfortably fit into memory. Object Store Memory ~~~~~~~~~~~~~~~~~~~ @@ -238,7 +238,7 @@ Ray Data uses the Ray object store to store data blocks, which means it inherits * Object Spilling: Since Ray Data uses the Ray object store to store data blocks, any blocks that can't fit into object store memory are automatically spilled to disk. The objects are automatically reloaded when needed by downstream compute tasks: * Locality Scheduling: Ray will preferentially schedule compute tasks on nodes that already have a local copy of the object, reducing the need to transfer objects between nodes in the cluster. -* Reference Counting: Datastream blocks are kept alive by object store reference counting as long as there is any Datastream that references them. To free memory, delete any Python references to the Datastream object. +* Reference Counting: Dataset blocks are kept alive by object store reference counting as long as there is any Dataset that references them. To free memory, delete any Python references to the Dataset object. Block Data Formats ~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/data/data.rst b/doc/source/data/data.rst index 08744ed5c90e..1fd599e9b2f9 100644 --- a/doc/source/data/data.rst +++ b/doc/source/data/data.rst @@ -10,16 +10,16 @@ Ray Data: Distributed ML Preprocessing Ray Data is the standard way to load and exchange data in Ray libraries and applications. It provides streaming distributed transformations such as maps -(:meth:`map_batches `), +(:meth:`map_batches `), global and grouped aggregations (:class:`GroupedData `), and -shuffling operations (:meth:`random_shuffle `, -:meth:`sort `, -:meth:`repartition `), +shuffling operations (:meth:`random_shuffle `, +:meth:`sort `, +:meth:`repartition `), and is compatible with a variety of file formats, data sources, and distributed frameworks. Read on for an overview of the main use cases and operations supported by Ray Data. -.. image:: images/datastream.svg +.. image:: images/dataset.svg .. https://docs.google.com/drawings/d/16AwJeBNR46_TsrkOmMbGaBK7u-OPsf_V8fHjU-d2PPQ/edit @@ -29,7 +29,7 @@ Streaming Batch Inference ------------------------- Ray Data simplifies general purpose parallel GPU and CPU compute in Ray through its -powerful :ref:`Datastream ` primitive. Datastreams enable workloads such as +powerful :ref:`Dataset ` primitive. Datasets enable workloads such as :ref:`GPU batch inference ` to run efficiently on large datasets, maximizing resource utilization by keeping the working data fitting into Ray object store memory. @@ -56,7 +56,7 @@ Ray Data serves as a last-mile bridge from storage or ETL pipeline outputs to di applications and libraries in Ray. Don't use it as a replacement for more general data processing systems. -.. image:: images/datastream-loading-1.png +.. image:: images/dataset-loading-1.png :width: 650px :align: center @@ -98,7 +98,7 @@ Advanced users can refer directly to the Ray Data :ref:`API reference ^^^ Understand the key concepts behind Ray Data. - Learn what :ref:`Datastreams ` are and how they are executed in Ray + Learn what :ref:`Datasets ` are and how they are executed in Ray Data. +++ diff --git a/doc/source/data/doc_code/consuming_data.py b/doc/source/data/doc_code/consuming_data.py index 63a3d05514bc..3f7a41806dbb 100644 --- a/doc/source/data/doc_code/consuming_data.py +++ b/doc/source/data/doc_code/consuming_data.py @@ -33,7 +33,7 @@ ds = ray.data.range(10000) num_rows = 0 -# Consume all rows in the Datastream. +# Consume all rows in the Dataset. for row in ds.iter_rows(): assert isinstance(row, dict) num_rows += 1 @@ -51,7 +51,7 @@ ds = ray.data.range(10000) num_batches = 0 -# Consume all batches in the Datastream. +# Consume all batches in the Dataset. for batch in ds.iter_batches(batch_size=2): assert isinstance(batch, dict) num_batches += 1 @@ -63,7 +63,7 @@ cum_sum = 0 for batch in ds.iter_batches(batch_size=2, batch_format="pandas"): assert isinstance(batch, pd.DataFrame) - # Simple integer Datastream is converted to a single-column Pandas DataFrame. + # Simple integer Dataset is converted to a single-column Pandas DataFrame. cum_sum += batch["id"] print(cum_sum) # -> 49995000 @@ -76,7 +76,7 @@ import ray @ray.remote -def consume(data: ray.data.Datastream) -> int: +def consume(data: ray.data.Dataset) -> int: num_batches = 0 # Consume data in 2-record batches. for batch in data.iter_batches(batch_size=2): @@ -108,7 +108,7 @@ def train(self, shard: ray.data.DataIterator) -> int: # -> [Actor(Worker, ...), Actor(Worker, ...), ...] ds = ray.data.range(10000) -# -> Datastream(num_blocks=200, num_rows=10000, schema=) +# -> Dataset(num_blocks=200, num_rows=10000, schema=) shards = ds.streaming_split(n=4, equal=True) # -> [, diff --git a/doc/source/data/doc_code/key_concepts.py b/doc/source/data/doc_code/key_concepts.py index 4769bf47601f..5b6ec252643b 100644 --- a/doc/source/data/doc_code/key_concepts.py +++ b/doc/source/data/doc_code/key_concepts.py @@ -13,7 +13,7 @@ def objective(*args): ray.init(num_cpus=4) # By setting `max_concurrent_trials=3`, this ensures the cluster will always -# have a sparse CPU for Datastream. Try setting `max_concurrent_trials=4` here, +# have a sparse CPU for Dataset. Try setting `max_concurrent_trials=4` here, # and notice that the experiment will appear to hang. tuner = tune.Tuner( tune.with_resources(objective, {"cpu": 1}), @@ -41,7 +41,7 @@ def objective(*args): ray.init(num_cpus=4) # This runs smoothly since _max_cpu_fraction_per_node is set to 0.8, effectively -# reserving 1 CPU for Datastream task execution. +# reserving 1 CPU for Dataset task execution. tuner = tune.Tuner( tune.with_resources(objective, tune.PlacementGroupFactory( [{"CPU": 1}], diff --git a/doc/source/data/doc_code/loading_data.py b/doc/source/data/doc_code/loading_data.py index c161737f8484..5ddd111f0f6a 100644 --- a/doc/source/data/doc_code/loading_data.py +++ b/doc/source/data/doc_code/loading_data.py @@ -1,9 +1,9 @@ # flake8: noqa # fmt: off -# __creating_datastreams_import_begin__ +# __creating_datasets_import_begin__ import ray -# __creating_datastreams_import_end__ +# __creating_datasets_import_end__ # fmt: on # For tfrecords @@ -11,9 +11,9 @@ # fmt: off # __gen_synth_tabular_range_begin__ -# Create a Datastream of integers. +# Create a Dataset of integers. ds = ray.data.range(10000) -# -> Datastream(num_blocks=200, num_rows=10000, schema={id: int64}) +# -> Dataset(num_blocks=200, num_rows=10000, schema={id: int64}) ds.take_batch(5) # -> {'id': array([0, 1, 2, 3, 4])} @@ -22,9 +22,9 @@ # fmt: off # __from_items_begin__ -# Create a Datastream from python dicts. +# Create a Dataset from python dicts. ds = ray.data.from_items([{"col1": i, "col2": str(i)} for i in range(10000)]) -# -> MaterializedDatastream(num_blocks=200, num_rows=10000, schema={col1: int64, col2: string}) +# -> MaterializedDataset(num_blocks=200, num_rows=10000, schema={col1: int64, col2: string}) ds.show(3) # -> {'col1': 0, 'col2': '0'} @@ -37,10 +37,10 @@ # __from_pandas_begin__ import pandas as pd -# Create a Datastream from a Pandas DataFrame. +# Create a Dataset from a Pandas DataFrame. df = pd.DataFrame({"col1": list(range(10000)), "col2": list(map(str, range(10000)))}) ds = ray.data.from_pandas(df) -# -> MaterializedDatastream(num_blocks=1, num_rows=10000, schema={col1: int64, col2: object}) +# -> MaterializedDataset(num_blocks=1, num_rows=10000, schema={col1: int64, col2: object}) ds.show(3) # -> {'col1': 0, 'col2': '0'} @@ -61,9 +61,9 @@ pd.DataFrame({"col1": list(chunk), "col2": list(map(str, chunk))}) for chunk in chunks ] -# Create a Datastream from multiple Pandas DataFrames. +# Create a Dataset from multiple Pandas DataFrames. ds = ray.data.from_pandas(dfs) -# -> MaterializedDatastream(num_blocks=10, num_rows=10000, schema={col1: int64, col2: object}) +# -> MaterializedDataset(num_blocks=10, num_rows=10000, schema={col1: int64, col2: object}) ds.show(3) # -> {'col1': 0, 'col2': '0'} @@ -76,11 +76,11 @@ # __from_numpy_begin__ import numpy as np -# Create a Datastream from a 3D NumPy ndarray. +# Create a Dataset from a 3D NumPy ndarray. arr = np.ones((3, 4, 4)) # The outer dimension is treated as the row dimension. ds = ray.data.from_numpy(arr) -# -> MaterializedDatastream( +# -> MaterializedDataset( # num_blocks=1, # num_rows=3, # schema={data: numpy.ndarray(shape=(4, 4), dtype=double)} @@ -101,7 +101,7 @@ # fmt: off # __read_images_begin__ ds = ray.data.read_images("example://image-datasets/simple") -# -> Datastream(num_blocks=3, num_rows=3, +# -> Dataset(num_blocks=3, num_rows=3, # schema={image: numpy.ndarray(shape=(32, 32, 3), dtype=uint8)}) ds.take(1) @@ -113,11 +113,11 @@ # __from_numpy_mult_begin__ import numpy as np -# Create a Datastream from multiple 3D NumPy ndarray. +# Create a Dataset from multiple 3D NumPy ndarray. arrs = [np.random.rand(2, 4, 4) for _ in range(4)] # The outer dimension is treated as the row dimension. ds = ray.data.from_numpy(arrs) -# -> MaterializedDatastream( +# -> MaterializedDataset( # num_blocks=4, # num_rows=8, # schema={data: numpy.ndarray(shape=(4, 4), dtype=double)} @@ -139,10 +139,10 @@ # __from_arrow_begin__ import pyarrow as pa -# Create a Datastream from an Arrow Table. +# Create a Dataset from an Arrow Table. t = pa.table({"col1": list(range(10000)), "col2": list(map(str, range(10000)))}) ds = ray.data.from_arrow(t) -# -> MaterializedDatastream(num_blocks=1, num_rows=10000, schema={col1: int64, col2: string}) +# -> MaterializedDataset(num_blocks=1, num_rows=10000, schema={col1: int64, col2: string}) ds.show(3) # -> {'col1': 0, 'col2': '0'} @@ -163,9 +163,9 @@ pa.table({"col1": list(chunk), "col2": list(map(str, chunk))}) for chunk in chunks ] -# Create a Datastream from multiple Arrow Tables. +# Create a Dataset from multiple Arrow Tables. ds = ray.data.from_arrow(ts) -# -> MaterializedDatastream(num_blocks=10, num_rows=10000, schema={col1: int64, col2: string}) +# -> MaterializedDataset(num_blocks=10, num_rows=10000, schema={col1: int64, col2: string}) ds.show(3) # -> {'col1': 0, 'col2': '0'} @@ -181,9 +181,9 @@ df = pd.DataFrame({"col1": list(range(10000)), "col2": list(map(str, range(10000)))}) ddf = dd.from_pandas(df, npartitions=4) -# Create a Datastream from a Dask DataFrame. +# Create a Dataset from a Dask DataFrame. ds = ray.data.from_dask(ddf) -# -> MaterializedDatastream(num_blocks=10, num_rows=10000, schema={col1: int64, col2: object}) +# -> MaterializedDataset(num_blocks=10, num_rows=10000, schema={col1: int64, col2: object}) ds.show(3) # -> {'col1': 0, 'col2': '0'} @@ -198,9 +198,9 @@ df = pd.DataFrame({"col1": list(range(10000)), "col2": list(map(str, range(10000)))}) mdf = md.DataFrame(df) -# Create a Datastream from a Modin DataFrame. +# Create a Dataset from a Modin DataFrame. ds = ray.data.from_modin(mdf) -# -> MaterializedDatastream(num_blocks=8, num_rows=10000, schema={col1: int64, col2: object}) +# -> MaterializedDataset(num_blocks=8, num_rows=10000, schema={col1: int64, col2: object}) ds.show(3) # -> {'col1': 0, 'col2': '0'} @@ -211,9 +211,9 @@ # fmt: off # __read_parquet_begin__ -# Create a Datastream by reading a Parquet file. +# Create a Dataset by reading a Parquet file. ds = ray.data.read_parquet("example://iris.parquet") -# -> Datastream( +# -> Dataset( # num_blocks=1, # num_rows=150, # schema={ @@ -247,14 +247,14 @@ # __read_parquet_pushdown_begin__ import pyarrow as pa -# Create a Datastream by reading a Parquet file, pushing column selection and row +# Create a Dataset by reading a Parquet file, pushing column selection and row # filtering down to the file scan. ds = ray.data.read_parquet( "example://iris.parquet", columns=["sepal.length", "variety"], filter=pa.dataset.field("sepal.length") > 5.0, ).materialize() # Force a full read of the file. -# -> Datastream(num_blocks=1, num_rows=118, schema={sepal.length: double, variety: string}) +# -> Dataset(num_blocks=1, num_rows=118, schema={sepal.length: double, variety: string}) ds.show(2) # -> {'sepal.length': 5.1, 'variety': 'Setosa'} @@ -264,9 +264,9 @@ # fmt: off # __read_csv_begin__ -# Create a Datastream by reading a CSV file. +# Create a Dataset by reading a CSV file. ds = ray.data.read_csv("example://iris.csv") -# -> Datastream( +# -> Dataset( # num_blocks=1, # num_rows=150, # schema={ @@ -298,9 +298,9 @@ # fmt: off # __read_json_begin__ -# Create a Datastream by reading a JSON file. +# Create a Dataset by reading a JSON file. ds = ray.data.read_json("example://iris.json") -# -> Datastream( +# -> Dataset( # num_blocks=1, # num_rows=150, # schema={ @@ -332,9 +332,9 @@ # fmt: off # __read_numpy_begin__ -# Create a Datastream by reading a NumPy file. +# Create a Dataset by reading a NumPy file. ds = ray.data.read_numpy("example://mnist_subset.npy") -# -> Datastream( +# -> Dataset( # num_blocks=1, # num_rows=3, # schema={data: numpy.ndarray(shape=(28, 28), dtype=uint8)} @@ -348,9 +348,9 @@ # fmt: off # __read_text_begin__ -# Create a Datastream by reading a text file. +# Create a Dataset by reading a text file. ds = ray.data.read_text("example://sms_spam_collection_subset.txt") -# -> Datastream(num_blocks=1, num_rows=10, schema={text: string}) +# -> Dataset(num_blocks=1, num_rows=10, schema={text: string}) ds.show(2) # -> {'text': 'ham\tGo until jurong point, crazy.. Available only in bugis n great world la e buffet... Cine there got amore wat...'} @@ -363,12 +363,12 @@ from io import BytesIO import PIL.Image -# Create a Datastream by reading a binary file. +# Create a Dataset by reading a binary file. ds = ray.data.read_binary_files("example://mnist_subset_partitioned/0/1.png") -# -> Datastream(num_blocks=1, num_rows=1, schema={bytes: string}) +# -> Dataset(num_blocks=1, num_rows=1, schema={bytes: string}) ds = ds.map(lambda row: {"image": np.asarray(PIL.Image.open(BytesIO(row["bytes"])).convert("L"))}) -# -> Datastream( +# -> Dataset( # num_blocks=1, # num_rows=1, # schema={image: numpy.ndarray(shape=(28, 28), dtype=uint8)} @@ -381,9 +381,9 @@ # fmt: off # __read_parquet_s3_begin__ -# Create a Datastream by reading a Parquet file from S3. +# Create a Dataset by reading a Parquet file from S3. ds = ray.data.read_parquet("s3://anonymous@air-example-data/ursa-labs-taxi-data/by_year/2019/01/data.parquet") -# -> Datastream( +# -> Dataset( # num_blocks=1, # num_rows=7667792, # schema={ @@ -434,9 +434,9 @@ # fmt: off # __read_tfrecords_begin__ -# Create a Datastream by reading a TFRecord file. +# Create a Dataset by reading a TFRecord file. ds = ray.data.read_tfrecords("example://iris.tfrecords") -# Datastream( +# Dataset( # num_blocks=1, # num_rows=150, # schema={ diff --git a/doc/source/data/doc_code/loading_data_untested.py b/doc/source/data/doc_code/loading_data_untested.py index 87a30fe57d57..83d9e9f6685f 100644 --- a/doc/source/data/doc_code/loading_data_untested.py +++ b/doc/source/data/doc_code/loading_data_untested.py @@ -7,14 +7,14 @@ # __from_spark_begin__ import raydp -spark = raydp.init_spark(app_name="Spark -> Datastreams Example", +spark = raydp.init_spark(app_name="Spark -> Datasets Example", num_executors=2, executor_cores=2, executor_memory="500MB") df = spark.createDataFrame([(i, str(i)) for i in range(10000)], ["col1", "col2"]) -# Create a tabular Datastream from a Spark DataFrame. +# Create a tabular Dataset from a Spark DataFrame. ds = ray.data.from_spark(df) -# -> MaterializedDatastream(num_blocks=10, num_rows=10000, schema={col1: int64, col2: string}) +# -> MaterializedDataset(num_blocks=10, num_rows=10000, schema={col1: int64, col2: string}) ds.show(3) # -> {'col1': 0, 'col2': '0'} @@ -27,7 +27,7 @@ # __read_parquet_s3_with_fs_begin__ import pyarrow as pa -# Create a tabular Datastream by reading a Parquet file from a private S3 bucket. +# Create a tabular Dataset by reading a Parquet file from a private S3 bucket. # NOTE: This example is not runnable as-is; add in a path to your private bucket and the # required S3 credentials! ds = ray.data.read_parquet( @@ -43,7 +43,7 @@ # fmt: off # __read_parquet_hdfs_begin__ -# Create a tabular Datastream by reading a Parquet file from HDFS using HDFS connection +# Create a tabular Dataset by reading a Parquet file from HDFS using HDFS connection # automatically constructed based on the URI. # NOTE: This example is not runnable as-is; you'll need to point it at your HDFS # cluster/data. @@ -58,7 +58,7 @@ # __read_parquet_hdfs_with_fs_begin__ import pyarrow as pa -# Create a tabular Datastream by reading a Parquet file from HDFS, manually specifying a +# Create a tabular Dataset by reading a Parquet file from HDFS, manually specifying a # configured HDFS connection via a Pyarrow HDFSFileSystem instance. # NOTE: This example is not runnable as-is; you'll need to point it at your HDFS # cluster/data. @@ -75,7 +75,7 @@ # __read_parquet_gcs_begin__ import gcsfs -# Create a tabular Datastream by reading a Parquet file from GCS, passing the configured +# Create a tabular Dataset by reading a Parquet file from GCS, passing the configured # GCSFileSystem. # NOTE: This example is not runnable as-is; you need to point it at your GCS bucket # and configure your GCP project and credentials. @@ -99,7 +99,7 @@ # __read_parquet_az_begin__ import adlfs -# Create a tabular Datastream by reading a Parquet file from Azure Blob Storage, passing +# Create a tabular Dataset by reading a Parquet file from Azure Blob Storage, passing # the configured AzureBlobFileSystem. path = ( "az://nyctlc/yellow/puYear=2009/puMonth=1/" @@ -123,9 +123,9 @@ df = pd.DataFrame({"col1": list(range(10000)), "col2": list(map(str, range(10000)))}) mdf = md.DataFrame(df, num_partitions=8) -# Create a tabular Datastream from a Mars DataFrame. +# Create a tabular Dataset from a Mars DataFrame. ds = ray.data.from_mars(mdf) -# -> MaterializedDatastream(num_blocks=8, num_rows=10000, schema={col1: int64, col2: object}) +# -> MaterializedDataset(num_blocks=8, num_rows=10000, schema={col1: int64, col2: object}) ds.show(3) # -> {'col1': 0, 'col2': '0'} diff --git a/doc/source/data/doc_code/quick_start.py b/doc/source/data/doc_code/quick_start.py index 1d17cc5b0183..5d63ddbaab55 100644 --- a/doc/source/data/doc_code/quick_start.py +++ b/doc/source/data/doc_code/quick_start.py @@ -4,9 +4,9 @@ # __create_from_python_begin__ import ray -# Create a Datastream of Python objects. +# Create a Dataset of Python objects. ds = ray.data.range(10000) -# -> Datastream(num_blocks=200, num_rows=10000, schema=) +# -> Dataset(num_blocks=200, num_rows=10000, schema=) ds.take(5) # -> [0, 1, 2, 3, 4] @@ -14,7 +14,7 @@ ds.schema() # -# Create a Datastream from Python objects, which are held as Arrow records. +# Create a Dataset from Python objects, which are held as Arrow records. ds = ray.data.from_items([ {"sepal.length": 5.1, "sepal.width": 3.5, "petal.length": 1.4, "petal.width": 0.2, "variety": "Setosa"}, @@ -23,7 +23,7 @@ {"sepal.length": 4.7, "sepal.width": 3.2, "petal.length": 1.3, "petal.width": 0.2, "variety": "Setosa"}, ]) -# Datastream(num_blocks=3, num_rows=3, +# Dataset(num_blocks=3, num_rows=3, # schema={sepal.length: float64, sepal.width: float64, # petal.length: float64, petal.width: float64, variety: object}) @@ -48,13 +48,13 @@ # __create_from_files_begin__ # Create from CSV. ds = ray.data.read_csv("s3://anonymous@air-example-data/iris.csv") -# Datastream(num_blocks=1, num_rows=150, +# Dataset(num_blocks=1, num_rows=150, # schema={sepal length (cm): double, sepal width (cm): double, # petal length (cm): double, petal width (cm): double, target: int64}) # Create from Parquet. ds = ray.data.read_parquet("s3://anonymous@air-example-data/iris.parquet") -# Datastream(num_blocks=1, num_rows=150, +# Dataset(num_blocks=1, num_rows=150, # schema={sepal.length: double, sepal.width: double, # petal.length: double, petal.width: double, variety: string}) @@ -67,7 +67,7 @@ # Create 10 blocks for parallelism. ds = ds.repartition(10) -# Datastream(num_blocks=10, num_rows=150, +# Dataset(num_blocks=10, num_rows=150, # schema={sepal.length: float64, sepal.width: float64, # petal.length: float64, petal.width: float64, variety: object}) @@ -76,7 +76,7 @@ def transform_batch(df: pandas.DataFrame) -> pandas.DataFrame: return df[(df["sepal.length"] < 5.5) & (df["petal.length"] > 3.5)] transformed_ds = ds.map_batches(transform_batch, batch_format="pandas") -# Datastream(num_blocks=10, num_rows=3, +# Dataset(num_blocks=10, num_rows=3, # schema={sepal.length: float64, sepal.width: float64, # petal.length: float64, petal.width: float64, variety: object}) diff --git a/doc/source/data/doc_code/saving_data.py b/doc/source/data/doc_code/saving_data.py index 3ef318ccc963..59252f13b4e6 100644 --- a/doc/source/data/doc_code/saving_data.py +++ b/doc/source/data/doc_code/saving_data.py @@ -10,7 +10,7 @@ import ray ds = ray.data.range(1000) -# -> Datastream(num_blocks=200, num_rows=1000, schema={id: int64}) +# -> Dataset(num_blocks=200, num_rows=1000, schema={id: int64}) # Write out just one file. ds.repartition(1).write_parquet("/tmp/one_parquet") @@ -29,7 +29,7 @@ import ray ds = ray.data.range(1000) -# -> Datastream(num_blocks=200, num_rows=1000, schema={id: int64}) +# -> Dataset(num_blocks=200, num_rows=1000, schema={id: int64}) # Write out just one file. ds.repartition(1).write_csv("/tmp/one_csv") @@ -48,7 +48,7 @@ import ray ds = ray.data.range(1000) -# -> Datastream(num_blocks=200, num_rows=1000, schema={id: int64}) +# -> Dataset(num_blocks=200, num_rows=1000, schema={id: int64}) # Write out just one file. ds.repartition(1).write_json("/tmp/one_json") @@ -68,7 +68,7 @@ import numpy as np ds = ray.data.from_numpy(np.arange(1000)) -# -> Datastream( +# -> Dataset( # num_blocks=1, # num_rows=1000, # schema={data: }, @@ -99,7 +99,7 @@ {"some_int": 2, "some_float": 2.0, "some_bytestring": b"def"}, ] ) -# -> Datastream( +# -> Dataset( # num_blocks=2, # num_rows=2, # schema={some_int: int64, some_float: double, some_bytestring: binary} diff --git a/doc/source/data/doc_code/tensor.py b/doc/source/data/doc_code/tensor.py index d813d6a3e871..f7689b973efd 100644 --- a/doc/source/data/doc_code/tensor.py +++ b/doc/source/data/doc_code/tensor.py @@ -5,9 +5,9 @@ # fmt: off # __create_range_begin__ -# Create a Datastream of tensors. +# Create a Dataset of tensors. ds = ray.data.range_tensor(10000, shape=(64, 64)) -# -> Datastream(num_blocks=200, num_rows=10000, +# -> Dataset(num_blocks=200, num_rows=10000, # schema={data: numpy.ndarray(shape=(64, 64), dtype=int64)}) ds.take(1) @@ -40,7 +40,7 @@ def gen_image_and_embed(batch: pd.DataFrame) -> pd.DataFrame: ds.map_batches(gen_image_and_embed, batch_format="pandas") ds.materialize() -# -> Datastream(num_blocks=17, num_rows=1000, +# -> Dataset(num_blocks=17, num_rows=1000, # schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=int64), # embed: numpy.ndarray(shape=(256,), dtype=uint8)}) # __create_pandas_2_end__ @@ -48,19 +48,19 @@ def gen_image_and_embed(batch: pd.DataFrame) -> pd.DataFrame: # __create_numpy_begin__ # From in-memory numpy data. ray.data.from_numpy(np.zeros((1000, 128, 128, 3), dtype=np.int64)) -# -> Datastream(num_blocks=1, num_rows=1000, +# -> Dataset(num_blocks=1, num_rows=1000, # schema={data: numpy.ndarray(shape=(128, 128, 3), dtype=int64)}) # From saved numpy files. ray.data.read_numpy("example://mnist_subset.npy") -# -> Datastream(num_blocks=1, num_rows=3, +# -> Dataset(num_blocks=1, num_rows=3, # schema={data: numpy.ndarray(shape=(28, 28), dtype=uint8)}) # __create_numpy_end__ # __create_parquet_1_begin__ # Reading previously saved Tensor data works out of the box. ds = ray.data.read_parquet("example://parquet_images_mini") -# -> Datastream(num_blocks=3, num_rows=3, +# -> Dataset(num_blocks=3, num_rows=3, # schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), # label: string}) @@ -100,12 +100,12 @@ def gen_image_and_embed(batch: pd.DataFrame) -> pd.DataFrame: "one": [1, 2, 3], "two": [tensor.tobytes() for tensor in arr]}) -# Write the datastream to Parquet. The tensor column will be written as an +# Write the dataset to Parquet. The tensor column will be written as an # array of opaque byte blobs. ds = ray.data.from_pandas([df]) ds.write_parquet(path) -# Read the Parquet files into a new Datastream, with the serialized tensors +# Read the Parquet files into a new Dataset, with the serialized tensors # automatically cast to our tensor column extension type. ds = ray.data.read_parquet( path, tensor_column_schema={"two": (np.int_, (2, 2, 2))}) @@ -132,7 +132,7 @@ def gen_image_and_embed(batch: pd.DataFrame) -> pd.DataFrame: "one": [1, 2, 3], "two": [pickle.dumps(tensor) for tensor in arr]}) -# Write the datastream to Parquet. The tensor column will be written as an +# Write the dataset to Parquet. The tensor column will be written as an # array of opaque byte blobs. ds = ray.data.from_pandas([df]) ds.write_parquet(path) @@ -144,7 +144,7 @@ def cast_udf(block: pa.Table) -> pa.Table: block["two"] = TensorArray([pickle.loads(a) for a in block["two"]]) return pa.Table.from_pandas(block) -# Read the Parquet files into a new Datastream, applying the casting UDF +# Read the Parquet files into a new Dataset, applying the casting UDF # on-the-fly within the underlying read tasks. ds = ray.data.read_parquet(path, _block_udf=cast_udf) @@ -157,7 +157,7 @@ def cast_udf(block: pa.Table) -> pa.Table: # __create_images_begin__ ds = ray.data.read_images("example://image-datasets/simple") -# -> Datastream(num_blocks=3, num_rows=3, +# -> Dataset(num_blocks=3, num_rows=3, # schema={data: numpy.ndarray(shape=(32, 32, 3), dtype=uint8)}) ds.take(1) @@ -172,7 +172,7 @@ def cast_udf(block: pa.Table) -> pa.Table: # __consume_pandas_2_begin__ ds = ray.data.read_parquet("example://parquet_images_mini") -# -> Datastream(num_blocks=3, num_rows=3, +# -> Dataset(num_blocks=3, num_rows=3, # schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), # label: string}) @@ -195,7 +195,7 @@ def add_one(batch: pd.DataFrame) -> pd.DataFrame: from ray.data.extensions.tensor_extension import ArrowTensorArray ds = ray.data.read_parquet("example://parquet_images_mini") -# -> Datastream(num_blocks=3, num_rows=3, +# -> Dataset(num_blocks=3, num_rows=3, # schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), # label: object}) @@ -234,7 +234,7 @@ def to_numpy(buf): # __consume_numpy_2_begin__ ds = ray.data.read_parquet("example://parquet_images_mini") -# -> Datastream(num_blocks=3, num_rows=3, +# -> Dataset(num_blocks=3, num_rows=3, # schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), # label: object}) @@ -269,13 +269,13 @@ def add_one(batch: Dict[str, Any]) -> Dict[str, Any]: shutil.rmtree("/tmp/some_path") # __write_1_begin__ -# Read a multi-column example datastream. +# Read a multi-column example dataset. ds = ray.data.read_parquet("example://parquet_images_mini") -# -> Datastream(num_blocks=3, num_rows=3, +# -> Dataset(num_blocks=3, num_rows=3, # schema={image: numpy.ndarray(shape=(128, 128, 3), dtype=uint8), # label: object}) -# You can write the datastream to Parquet. +# You can write the dataset to Parquet. ds.write_parquet("/tmp/some_path") # And you can read it back. @@ -289,12 +289,12 @@ def add_one(batch: Dict[str, Any]) -> Dict[str, Any]: shutil.rmtree("/tmp/some_path") # __write_2_begin__ -# Read a single-column example datastream. +# Read a single-column example dataset. ds = ray.data.read_numpy("example://mnist_subset.npy") -# -> Datastream(num_blocks=1, num_rows=3, +# -> Dataset(num_blocks=1, num_rows=3, # schema={data: numpy.ndarray(shape=(28, 28), dtype=uint8)}) -# You can write the datastream to Parquet. +# You can write the dataset to Parquet. ds.write_numpy("/tmp/some_path", column="data") # And you can read it back. @@ -305,11 +305,11 @@ def add_one(batch: Dict[str, Any]) -> Dict[str, Any]: # fmt: off # __create_variable_shaped_tensors_begin___ -# Create a Datastream of variable-shaped tensors. +# Create a Dataset of variable-shaped tensors. ragged_array = np.array([np.ones((2, 2)), np.ones((3, 3))], dtype=object) df = pd.DataFrame({"feature": ragged_array, "label": [1, 1]}) ds = ray.data.from_pandas([df, df]) -# -> Datastream(num_blocks=2, num_rows=4, +# -> Dataset(num_blocks=2, num_rows=4, # schema={feature: numpy.ndarray(shape=(None, None), dtype=float64), # label: int64}) @@ -325,7 +325,7 @@ def add_one(batch: Dict[str, Any]) -> Dict[str, Any]: # fmt: off # __tf_variable_shaped_tensors_begin___ -# Convert Datastream to a TensorFlow Dataset. +# Convert Dataset to a TensorFlow Dataset. tf_ds = ds.to_tf( batch_size=2, feature_columns="feature", diff --git a/doc/source/data/doc_code/transforming_data.py b/doc/source/data/doc_code/transforming_data.py index da79efe09486..184d96b81b7f 100644 --- a/doc/source/data/doc_code/transforming_data.py +++ b/doc/source/data/doc_code/transforming_data.py @@ -8,7 +8,7 @@ # Load data. ds = ray.data.from_items(["Test", "String", "Test String"]) -# -> Datastream(num_blocks=1, num_rows=3, schema={item: string}) +# -> Dataset(num_blocks=1, num_rows=3, schema={item: string}) # Define the transform function. def to_lowercase(batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: @@ -28,7 +28,7 @@ def to_lowercase(batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: # Load data. ds = ray.data.from_items(["Test", "String", "Test String"]) -# -> Datastream(num_blocks=1, num_rows=3, schema={item: string}) +# -> Dataset(num_blocks=1, num_rows=3, schema={item: string}) # Define the transform function. def to_lowercase(row: Dict[str, Any]) -> Dict[str, Any]: @@ -100,7 +100,7 @@ def pyarrow_transform(batch: pa.Table) -> pa.Table: # 'variety': 'Versicolor', 'normalized.sepal.length': 0.9142857142857144} # __writing_arrow_udfs_end__ -# __datastream_compute_strategy_begin__ +# __dataset_compute_strategy_begin__ import ray import pandas as pd import numpy as np @@ -133,7 +133,7 @@ def __call__(self, batch: pd.DataFrame) -> pd.DataFrame: # Batch inference processing with Ray actors (pool of size 5). predicted = ds.map_batches( IrisInferModel, compute=ActorPoolStrategy(size=5), batch_size=10) -# __datastream_compute_strategy_end__ +# __dataset_compute_strategy_end__ # __writing_generator_udfs_begin__ import ray @@ -155,19 +155,19 @@ def repeat_dataframe(df: pd.DataFrame) -> Iterator[pd.DataFrame]: # __shuffle_begin__ import ray -# The datastream starts off with 1000 blocks. +# The dataset starts off with 1000 blocks. ds = ray.data.range(10000, parallelism=1000) -# -> Datastream(num_blocks=1000, num_rows=10000, schema={id: int64}) +# -> Dataset(num_blocks=1000, num_rows=10000, schema={id: int64}) # Repartition the data into 100 blocks. Since shuffle=False, Ray Data will minimize # data movement during this operation by merging adjacent blocks. ds = ds.repartition(100, shuffle=False).materialize() -# -> MaterializedDatastream(num_blocks=100, num_rows=10000, schema={id: int64}) +# -> MaterializedDataset(num_blocks=100, num_rows=10000, schema={id: int64}) # Repartition the data into 200 blocks, and force a full data shuffle. # This operation will be more expensive ds = ds.repartition(200, shuffle=True).materialize() -# -> MaterializedDatastream(num_blocks=200, num_rows=10000, schema={id: int64}) +# -> MaterializedDataset(num_blocks=200, num_rows=10000, schema={id: int64}) # __shuffle_end__ # __map_groups_begin__ diff --git a/doc/source/data/examples/batch_training.ipynb b/doc/source/data/examples/batch_training.ipynb index ad56bb3a39d2..d4dcf959e536 100644 --- a/doc/source/data/examples/batch_training.ipynb +++ b/doc/source/data/examples/batch_training.ipynb @@ -37,10 +37,10 @@ "# Contents\n", "\n", "In this this tutorial, you will learn about:\n", - " 1. [Creating a Datastream](#create_ds)\n", - " 2. [Filtering a Datastream on Read](#filter_ds)\n", - " 3. [Inspecting a Datastream](#inspect_ds)\n", - " 4. [Transforming a Datastream in parallel](#transform_ds)\n", + " 1. [Creating a Dataset](#create_ds)\n", + " 2. [Filtering a Dataset on Read](#filter_ds)\n", + " 3. [Inspecting a Dataset](#inspect_ds)\n", + " 4. [Transforming a Dataset in parallel](#transform_ds)\n", " 5. [Batch training with Ray Data in parallel](#batch_train_ds)\n", " 6. [Load a saved model and perform batch prediction](#load_model)\n", "\n", @@ -80,7 +80,7 @@ "import pyarrow.dataset as pds\n", "\n", "print(f\"pyarrow: {pyarrow.__version__}\")\n", - "from ray.data import Datastream" + "from ray.data import Dataset" ] }, { @@ -201,7 +201,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Creating a Datastream " + "## Creating a Dataset " ] }, { @@ -289,7 +289,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Filtering a Datastream on Read \n", + "### Filtering a Dataset on Read \n", "\n", "Normally there is some last-mile data processing required before training. Let's just assume we know the data processing steps are:\n", "- Drop negative trip distances, 0 fares, 0 passengers.\n", @@ -300,7 +300,7 @@ "Instead of blindly reading all the data, it would be better if we only read the data we needed. This is similar concept to SQL `SELECT only rows, columns you need` vs `SELECT *`.\n", "\n", "```{tip}\n", - "Best practice is to filter as much as you can directly in the Datastream `read_parquet()`.\n", + "Best practice is to filter as much as you can directly in the Dataset `read_parquet()`.\n", "```\n", "\n", "Note that Ray Data' Parquet reader supports projection (column selection) and row filter pushdown, where we can push the above column selection and the row-based filter to the Parquet read. If we specify column selection at Parquet read time, the unselected columns won't even be read from disk. This can save a lot of memory, especially with big datasets, and allow us to avoid OOM issues.\n", @@ -314,7 +314,7 @@ "metadata": {}, "outputs": [], "source": [ - "def pushdown_read_data(files_list: list, sample_ids: list) -> Datastream:\n", + "def pushdown_read_data(files_list: list, sample_ids: list) -> Dataset:\n", " start = time.time()\n", "\n", " filter_expr = (\n", @@ -368,11 +368,11 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Inspecting a Datastream \n", + "### Inspecting a Dataset \n", "\n", - "Let's get some basic statistics about our newly created Datastream.\n", + "Let's get some basic statistics about our newly created Dataset.\n", "\n", - "As our Datastream is backed by Parquet, we can obtain the number of rows from the metadata without triggering a full data read.\n" + "As our Dataset is backed by Parquet, we can obtain the number of rows from the metadata without triggering a full data read.\n" ] }, { @@ -396,7 +396,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Similarly, we can obtain the Datastream size (in bytes) from the metadata.\n" + "Similarly, we can obtain the Dataset size (in bytes) from the metadata.\n" ] }, { @@ -456,12 +456,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Transforming a Datastream in parallel using custom functions \n", + "### Transforming a Dataset in parallel using custom functions \n", "\n", - "Ray Data allows you to specify custom data transform functions. These [user defined functions (UDFs)](transforming_data) can be called using `Datastream.map_batches(my_function)`. The transformation will be conducted in parallel for each data batch.\n", + "Ray Data allows you to specify custom data transform functions. These [user defined functions (UDFs)](transforming_data) can be called using `Dataset.map_batches(my_function)`. The transformation will be conducted in parallel for each data batch.\n", "\n", "```{tip}\n", - "You may need to call `Datastream.repartition(n)` first to split the Datastream into more blocks internally. By default, each block corresponds to one file. The upper bound of parallelism is the number of blocks.\n", + "You may need to call `Dataset.repartition(n)` first to split the Dataset into more blocks internally. By default, each block corresponds to one file. The upper bound of parallelism is the number of blocks.\n", "```\n", "\n", "You can specify the data format you are using in the `batch_format` parameter. The dataset will be divided into batches and those batches converted into the specified format. Available data formats you can specify in the `batch_format` paramater include `\"pandas\", \"pyarrow\", \"numpy\"`. Tabular data will be passed into your UDF by default as a pandas DataFrame. Tensor data will be passed into your UDF as a numpy array.\n", @@ -475,7 +475,7 @@ "metadata": {}, "outputs": [], "source": [ - "# A pandas DataFrame UDF for transforming the Datastream in parallel.\n", + "# A pandas DataFrame UDF for transforming the Dataset in parallel.\n", "def transform_df(input_df: pd.DataFrame) -> pd.DataFrame:\n", " df = input_df.copy()\n", "\n", @@ -685,7 +685,7 @@ "source": [ "The `train_and_evaluate` function contains the logic for train-test splitting and fitting of a model using the `fit_and_score_sklearn` function.\n", "\n", - "As an input, this function takes in a pandas DataFrame. When we call `Datastream.map_batches` or `Datastream.groupby().map_groups()`, the Datastream will be batched into multiple pandas DataFrames and this function will run for each batch in parallel. We will return the model and its error. Those results will be collected back into a Datastream." + "As an input, this function takes in a pandas DataFrame. When we call `Dataset.map_batches` or `Dataset.groupby().map_groups()`, the Dataset will be batched into multiple pandas DataFrames and this function will run for each batch in parallel. We will return the model and its error. Those results will be collected back into a Dataset." ] }, { @@ -730,10 +730,10 @@ "metadata": {}, "source": [ "Recall how we wrote a data transform `transform_batch` UDF? It was called with pattern:\n", - "- `Datastream.map_batches(transform_batch, batch_format=\"pandas\")`\n", + "- `Dataset.map_batches(transform_batch, batch_format=\"pandas\")`\n", "\n", - "Similarly, we can write a custom groupy-aggregate function `agg_func` which will run for each [Datastream *group-by*](data-groupbys) group in parallel. The usage pattern is:\n", - "- `Datastream.groupby(column).map_groups(agg_func, batch_format=\"pandas\")`.\n", + "Similarly, we can write a custom groupy-aggregate function `agg_func` which will run for each [Dataset *group-by*](data-groupbys) group in parallel. The usage pattern is:\n", + "- `Dataset.groupby(column).map_groups(agg_func, batch_format=\"pandas\")`.\n", "\n", "In the cell below, we define our custom `agg_func`." ] @@ -745,7 +745,7 @@ "outputs": [], "source": [ "# A Pandas DataFrame aggregation function for processing\n", - "# grouped batches of Datastream data.\n", + "# grouped batches of Dataset data.\n", "def agg_func(df: pd.DataFrame) -> pd.DataFrame:\n", " location_id = df[\"dropoff_location_id\"][0]\n", "\n", @@ -772,9 +772,9 @@ "source": [ "### Run batch training using `map_groups`\n", "\n", - "The main \"driver code\" reads each Parquet file (where each file corresponds to one month of NYC taxi data) into a Datastream `ds`. \n", + "The main \"driver code\" reads each Parquet file (where each file corresponds to one month of NYC taxi data) into a Dataset `ds`. \n", "\n", - "Then we use Datastream *group-by* to map each group into a batch of data and run `agg_func` on each grouping in parallel by calling `ds.groupby(\"dropoff_location_id\").map_groups(agg_func, batch_format=\"pandas\")`." + "Then we use Dataset *group-by* to map each group into a batch of data and run `agg_func` on each grouping in parallel by calling `ds.groupby(\"dropoff_location_id\").map_groups(agg_func, batch_format=\"pandas\")`." ] }, { @@ -813,12 +813,12 @@ "\n", "start = time.time()\n", "\n", - "# Read data into Datastream\n", + "# Read data into Dataset\n", "# ds = pushdown_read_data(s3_files, sample_locations)\\\n", "# .repartition(14)\\\n", "# .ds.map_batches(transform_df, batch_format=\"pandas\")\n", "\n", - "# Use Datastream groupby.map_groups() to process each group in parallel and return a Datastream.\n", + "# Use Dataset groupby.map_groups() to process each group in parallel and return a Dataset.\n", "results = ds.groupby(\"dropoff_location_id\").map_groups(agg_func, batch_format=\"pandas\")\n", "\n", "total_time_taken = time.time() - start\n", @@ -841,7 +841,7 @@ { "data": { "text/plain": [ - "Datastream(num_blocks=6, num_rows=6, schema={location_id: int32, model: object, error: float64})" + "Dataset(num_blocks=6, num_rows=6, schema={location_id: int32, model: object, error: float64})" ] }, "execution_count": 20, diff --git a/doc/source/data/examples/index.rst b/doc/source/data/examples/index.rst index a1dcea2982e5..4a4fcb39feb0 100644 --- a/doc/source/data/examples/index.rst +++ b/doc/source/data/examples/index.rst @@ -4,8 +4,8 @@ Examples ======== -.. tip:: Check out the Datastreams :ref:`User Guide ` to learn more about - Datastream features in-depth. +.. tip:: Check out the Datasets :ref:`User Guide ` to learn more about + Dataset features in-depth. .. _data-recipes: @@ -56,7 +56,7 @@ Other Examples :class-container: container pb-4 .. grid-item-card:: - :img-top: ../images/datastream-arch.svg + :img-top: ../images/dataset-arch.svg :class-img-top: pt-5 w-75 d-block mx-auto .. button-ref:: random-access diff --git a/doc/source/data/examples/nyc_taxi_basic_processing.ipynb b/doc/source/data/examples/nyc_taxi_basic_processing.ipynb index a8cbf4348e2b..599a479327c0 100644 --- a/doc/source/data/examples/nyc_taxi_basic_processing.ipynb +++ b/doc/source/data/examples/nyc_taxi_basic_processing.ipynb @@ -33,7 +33,7 @@ "\n", "This tutorial will cover:\n", " - Reading Parquet data\n", - " - Inspecting the metadata and first few rows of a large Ray {class}`Datastream `\n", + " - Inspecting the metadata and first few rows of a large Ray {class}`Dataset `\n", " - Calculating some common global and grouped statistics on the dataset\n", " - Dropping columns and rows\n", " - Adding a derived column\n", @@ -68,7 +68,7 @@ "source": [ "### Reading and Inspecting the Data\n", "\n", - "Next, we read a few of the files from the dataset. This read is lazy, where reading and all future transformations are delayed until a downstream operation triggers execution (e.g. consuming the data with {meth}`ds.take() `)\n" + "Next, we read a few of the files from the dataset. This read is lazy, where reading and all future transformations are delayed until a downstream operation triggers execution (e.g. consuming the data with {meth}`ds.take() `)\n" ] }, { @@ -85,7 +85,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "⚠️ The number of blocks in this datastream (2) limits its parallelism to 2 concurrent tasks. This is much less than the number of available CPU slots in the cluster. Use `.repartition(n)` to increase the number of datastream blocks.\n" + "⚠️ The number of blocks in this dataset (2) limits its parallelism to 2 concurrent tasks. This is much less than the number of available CPU slots in the cluster. Use `.repartition(n)` to increase the number of dataset blocks.\n" ] } ], @@ -102,7 +102,7 @@ "id": "a4d4769c", "metadata": {}, "source": [ - "We can easily inspect the schema of this datastream. For Parquet files, we don't even have to read the actual data to get the schema; we can read it from the lightweight Parquet metadata!" + "We can easily inspect the schema of this dataset. For Parquet files, we don't even have to read the actual data to get the schema; we can read it from the lightweight Parquet metadata!" ] }, { @@ -180,7 +180,7 @@ "id": "87fd9a17", "metadata": {}, "source": [ - "We can get a nice, cheap summary of the ``Datastream`` by leveraging it's informative repr:" + "We can get a nice, cheap summary of the ``Dataset`` by leveraging it's informative repr:" ] }, { @@ -192,7 +192,7 @@ { "data": { "text/plain": [ - "Datastream(num_blocks=2, num_rows=2749936, schema={vendor_id: string, pickup_at: timestamp[us], dropoff_at: timestamp[us], passenger_count: int8, trip_distance: float, pickup_longitude: float, pickup_latitude: float, rate_code_id: null, store_and_fwd_flag: string, dropoff_longitude: float, dropoff_latitude: float, payment_type: string, fare_amount: float, extra: float, mta_tax: float, tip_amount: float, tolls_amount: float, total_amount: float})" + "Dataset(num_blocks=2, num_rows=2749936, schema={vendor_id: string, pickup_at: timestamp[us], dropoff_at: timestamp[us], passenger_count: int8, trip_distance: float, pickup_longitude: float, pickup_latitude: float, rate_code_id: null, store_and_fwd_flag: string, dropoff_longitude: float, dropoff_latitude: float, payment_type: string, fare_amount: float, extra: float, mta_tax: float, tip_amount: float, tolls_amount: float, total_amount: float})" ] }, "execution_count": 6, @@ -201,7 +201,7 @@ } ], "source": [ - "# Display some metadata about the datastream.\n", + "# Display some metadata about the dataset.\n", "ds" ] }, @@ -256,7 +256,7 @@ "id": "a3fb551b", "metadata": {}, "source": [ - "To get a better sense of the data size, we can calculate the size in bytes of the full datastream. Note that for Parquet files, this size-in-bytes will be pulled from the Parquet metadata (not triggering a data read), and therefore might be significantly different than the in-memory size!" + "To get a better sense of the data size, we can calculate the size in bytes of the full dataset. Note that for Parquet files, this size-in-bytes will be pulled from the Parquet metadata (not triggering a data read), and therefore might be significantly different than the in-memory size!" ] }, { @@ -287,7 +287,7 @@ "id": "cb4515bf", "metadata": {}, "source": [ - "In order to get the in-memory size, we can trigger full reading of the datastream and inspect the size in bytes." + "In order to get the in-memory size, we can trigger full reading of the dataset and inspect the size in bytes." ] }, { @@ -330,7 +330,7 @@ "For the NYC taxi dataset, instead of reading individual per-month Parquet files, we can read the entire 2009 directory.\n", "\n", "```{warning}\n", - "This could be a lot of data (downsampled with 0.01 ratio leads to ~50.2 MB on disk, ~147 MB in memory), so be careful triggering full reads on a limited-memory machine! This is one place where Datastream's lazy reading comes in handy: Datastream will not execute any read tasks eagerly and will execute the minimum number of file reads to satisfy downstream operations, which allows us to inspect a subset of the data without having to read the entire dataset.\n", + "This could be a lot of data (downsampled with 0.01 ratio leads to ~50.2 MB on disk, ~147 MB in memory), so be careful triggering full reads on a limited-memory machine! This is one place where Dataset's lazy reading comes in handy: Dataset will not execute any read tasks eagerly and will execute the minimum number of file reads to satisfy downstream operations, which allows us to inspect a subset of the data without having to read the entire dataset.\n", "```" ] }, @@ -356,7 +356,7 @@ "id": "6616a15d", "metadata": {}, "source": [ - "The metadata that Datastream prints in its repr is guaranteed to not trigger reads of all files; data such as the row count and the schema is pulled directly from the Parquet metadata." + "The metadata that Dataset prints in its repr is guaranteed to not trigger reads of all files; data such as the row count and the schema is pulled directly from the Parquet metadata." ] }, { @@ -385,7 +385,7 @@ "id": "e61dd6d7", "metadata": {}, "source": [ - "That's a lot of rows! Since we're not going to use this full-year data, let's now delete this datastream to free up some memory in our Ray cluster." + "That's a lot of rows! Since we're not going to use this full-year data, let's now delete this dataset to free up some memory in our Ray cluster." ] }, { @@ -604,7 +604,7 @@ "Note that Ray Data' Parquet reader supports projection (column selection) and row filter pushdown, where we can push the above column selection and the row-based filter to the Parquet read. If we specify column selection at Parquet read time, the unselected columns won't even be read from disk!\n", "\n", "The row-based filter is specified via\n", - "[Arrow's dataset field expressions](https://arrow.apache.org/docs/6.0/python/generated/pyarrow.dataset.Expression.html#pyarrow.dataset.Expression). See the {ref}`feature guide for reading Parquet data ` for more information." + "[Arrow's dataset field expressions](https://arrow.apache.org/docs/6.0/python/generated/pyarrow.dataset.Expression.html#pyarrow.dataset.Expression). See the {ref}`feature guide for reading Parquet data ` for more information." ] }, { @@ -619,14 +619,14 @@ "name": "stderr", "output_type": "stream", "text": [ - "⚠️ The number of blocks in this datastream (2) limits its parallelism to 2 concurrent tasks. This is much less than the number of available CPU slots in the cluster. Use `.repartition(n)` to increase the number of datastream blocks.\n", + "⚠️ The number of blocks in this dataset (2) limits its parallelism to 2 concurrent tasks. This is much less than the number of available CPU slots in the cluster. Use `.repartition(n)` to increase the number of dataset blocks.\n", "Read progress: 100%|██████████| 2/2 [00:00<00:00, 9.19it/s]\n" ] }, { "data": { "text/plain": [ - "Datastream(num_blocks=2, num_rows=2749842, schema={passenger_count: int8, trip_distance: float})" + "Dataset(num_blocks=2, num_rows=2749842, schema={passenger_count: int8, trip_distance: float})" ] }, "execution_count": 19, @@ -663,7 +663,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Delete the pushdown datastream. Deleting the Datastream object\n", + "# Delete the pushdown dataset. Deleting the Dataset object\n", "# will release the underlying memory in the cluster.\n", "del pushdown_ds" ] @@ -677,7 +677,7 @@ "\n", "Now that we've learned more about our data and we have cleaned up our dataset a bit, we now look at how we can feed this dataset into some dummy model trainers.\n", "\n", - "First, let's do a full global random shuffle of the datastream to decorrelate these samples." + "First, let's do a full global random shuffle of the dataset to decorrelate these samples." ] }, { @@ -706,7 +706,7 @@ "id": "ff05b6ea", "metadata": {}, "source": [ - "We define a dummy ``Trainer`` actor, where each trainer will consume a datastream shard in batches and simulate model training.\n", + "We define a dummy ``Trainer`` actor, where each trainer will consume a dataset shard in batches and simulate model training.\n", "\n", ":::{note}\n", "In a real training workflow, we would feed ``ds`` to {ref}`Ray Train `, which would do this sharding and creation of training actors for us, under the hood.\n" @@ -738,7 +738,7 @@ " def __init__(self, rank: int):\n", " pass\n", "\n", - " def train(self, shard: ray.data.Datastream) -> int:\n", + " def train(self, shard: ray.data.Dataset) -> int:\n", " for batch in shard.iter_batches(batch_size=256):\n", " pass\n", " return shard.count()\n", @@ -752,7 +752,7 @@ "id": "9a1afb70", "metadata": {}, "source": [ - "Next, we split the datastream into ``len(trainers)`` shards, ensuring that the shards are of equal size." + "Next, we split the dataset into ``len(trainers)`` shards, ensuring that the shards are of equal size." ] }, { @@ -764,10 +764,10 @@ { "data": { "text/plain": [ - "[Datastream(num_blocks=1, num_rows=687460, schema={vendor_id: object, pickup_at: datetime64[ns], dropoff_at: datetime64[ns], passenger_count: int8, trip_distance: float32, pickup_longitude: float32, pickup_latitude: float32, rate_code_id: object, dropoff_longitude: float32, dropoff_latitude: float32, payment_type: object, fare_amount: float32, extra: float32, tip_amount: float32, tolls_amount: float32, total_amount: float32}),\n", - " Datastream(num_blocks=1, num_rows=687460, schema={vendor_id: object, pickup_at: datetime64[ns], dropoff_at: datetime64[ns], passenger_count: int8, trip_distance: float32, pickup_longitude: float32, pickup_latitude: float32, rate_code_id: object, dropoff_longitude: float32, dropoff_latitude: float32, payment_type: object, fare_amount: float32, extra: float32, tip_amount: float32, tolls_amount: float32, total_amount: float32}),\n", - " Datastream(num_blocks=2, num_rows=687460, schema={vendor_id: object, pickup_at: datetime64[ns], dropoff_at: datetime64[ns], passenger_count: int8, trip_distance: float32, pickup_longitude: float32, pickup_latitude: float32, rate_code_id: object, dropoff_longitude: float32, dropoff_latitude: float32, payment_type: object, fare_amount: float32, extra: float32, tip_amount: float32, tolls_amount: float32, total_amount: float32}),\n", - " Datastream(num_blocks=1, num_rows=687460, schema={vendor_id: object, pickup_at: datetime64[ns], dropoff_at: datetime64[ns], passenger_count: int8, trip_distance: float32, pickup_longitude: float32, pickup_latitude: float32, rate_code_id: object, dropoff_longitude: float32, dropoff_latitude: float32, payment_type: object, fare_amount: float32, extra: float32, tip_amount: float32, tolls_amount: float32, total_amount: float32})]" + "[Dataset(num_blocks=1, num_rows=687460, schema={vendor_id: object, pickup_at: datetime64[ns], dropoff_at: datetime64[ns], passenger_count: int8, trip_distance: float32, pickup_longitude: float32, pickup_latitude: float32, rate_code_id: object, dropoff_longitude: float32, dropoff_latitude: float32, payment_type: object, fare_amount: float32, extra: float32, tip_amount: float32, tolls_amount: float32, total_amount: float32}),\n", + " Dataset(num_blocks=1, num_rows=687460, schema={vendor_id: object, pickup_at: datetime64[ns], dropoff_at: datetime64[ns], passenger_count: int8, trip_distance: float32, pickup_longitude: float32, pickup_latitude: float32, rate_code_id: object, dropoff_longitude: float32, dropoff_latitude: float32, payment_type: object, fare_amount: float32, extra: float32, tip_amount: float32, tolls_amount: float32, total_amount: float32}),\n", + " Dataset(num_blocks=2, num_rows=687460, schema={vendor_id: object, pickup_at: datetime64[ns], dropoff_at: datetime64[ns], passenger_count: int8, trip_distance: float32, pickup_longitude: float32, pickup_latitude: float32, rate_code_id: object, dropoff_longitude: float32, dropoff_latitude: float32, payment_type: object, fare_amount: float32, extra: float32, tip_amount: float32, tolls_amount: float32, total_amount: float32}),\n", + " Dataset(num_blocks=1, num_rows=687460, schema={vendor_id: object, pickup_at: datetime64[ns], dropoff_at: datetime64[ns], passenger_count: int8, trip_distance: float32, pickup_longitude: float32, pickup_latitude: float32, rate_code_id: object, dropoff_longitude: float32, dropoff_latitude: float32, payment_type: object, fare_amount: float32, extra: float32, tip_amount: float32, tolls_amount: float32, total_amount: float32})]" ] }, "execution_count": 24, @@ -830,7 +830,7 @@ "```{tip}\n", "Refer to the blog on [Model Batch Inference in Ray](https://www.anyscale.com/blog/model-batch-inference-in-ray-actors-actorpool-and-datasets) for an overview of batch inference strategies in Ray and additional examples.\n", "```\n", - "After we've trained a model, we may want to perform batch (offline) inference on such a tabular dataset. With Ray Data, this is as easy as a {meth}`ds.map_batches() ` call!\n", + "After we've trained a model, we may want to perform batch (offline) inference on such a tabular dataset. With Ray Data, this is as easy as a {meth}`ds.map_batches() ` call!\n", "\n", "First, we define a callable class that will cache the loading of the model in its constructor." ] @@ -863,7 +863,7 @@ "id": "0c1ba955", "metadata": {}, "source": [ - "``BatchInferModel``'s constructor will only be called once per actor worker when using the actor pool compute strategy in {meth}`ds.map_batches() `." + "``BatchInferModel``'s constructor will only be called once per actor worker when using the actor pool compute strategy in {meth}`ds.map_batches() `." ] }, { diff --git a/doc/source/data/examples/ocr_example.ipynb b/doc/source/data/examples/ocr_example.ipynb index 6f12fb052583..c36825ba413d 100644 --- a/doc/source/data/examples/ocr_example.ipynb +++ b/doc/source/data/examples/ocr_example.ipynb @@ -34,9 +34,9 @@ "## Overview\n", "\n", "This tutorial will cover:\n", - " - Creating a Datastream that represents the images in the dataset\n", - " - Running the computationally expensive OCR process on each image in the datastream in parallel\n", - " - Filtering the datastream by keeping only images that contain text\n", + " - Creating a Dataset that represents the images in the dataset\n", + " - Running the computationally expensive OCR process on each image in the dataset in parallel\n", + " - Filtering the dataset by keeping only images that contain text\n", " - Performing various NLP operations on the text\n", "\n", "## Walkthrough\n", @@ -78,7 +78,7 @@ "\n", "### Running the OCR software on the data\n", "\n", - "We can now use the {meth}`ray.data.read_binary_files ` function to read all the images from S3. We set the `include_paths=True` option to create a datastream of the S3 paths and image contents. We then run the {meth}`ds.map ` function on this datastream to execute the actual OCR process on each file and convert the screen shots into text. This will create a tabular datastream with columns `path` and `text`, see also [](transforming_data).\n", + "We can now use the {meth}`ray.data.read_binary_files ` function to read all the images from S3. We set the `include_paths=True` option to create a dataset of the S3 paths and image contents. We then run the {meth}`ds.map ` function on this dataset to execute the actual OCR process on each file and convert the screen shots into text. This will create a tabular dataset with columns `path` and `text`, see also [](transforming_data).\n", "\n", "````{note}\n", "If you want to load the data from a private bucket, you have to run\n", @@ -113,7 +113,7 @@ "results = ds.map(perform_ocr)\n", "```\n", "\n", - "Let us have a look at some of the data points with the {meth}`take ` function." + "Let us have a look at some of the data points with the {meth}`take ` function." ] }, { @@ -134,10 +134,10 @@ "### Saving and loading the result of the OCR run\n", "\n", "````{note}\n", - "Saving the datastream is optional, you can also continue with the in-memory data without persisting it to storage.\n", + "Saving the dataset is optional, you can also continue with the in-memory data without persisting it to storage.\n", "````\n", "\n", - "We can save the result of running tesseract on the datastream on disk so we can read it out later if we want to re-run the NLP analysis without needing to re-run the OCR (which is very expensive on the whole datastream). This can be done with the {meth}`write_parquet ` function:\n", + "We can save the result of running tesseract on the dataset on disk so we can read it out later if we want to re-run the NLP analysis without needing to re-run the OCR (which is very expensive on the whole dataset). This can be done with the {meth}`write_parquet ` function:\n", "\n", "```python\n", "import os\n", @@ -221,7 +221,7 @@ "source": [ "It gives both the language and a confidence score for that language.\n", "\n", - "In order to run the code on the dataset, we should use Ray Data' built in support for actors since the `nlp` object is not serializable and we want to avoid having to recreate it for each individual sentence. We also batch the computation with the {meth}`map_batches ` function to ensure spaCy can use more efficient vectorized operations where available:" + "In order to run the code on the dataset, we should use Ray Data' built in support for actors since the `nlp` object is not serializable and we want to avoid having to recreate it for each individual sentence. We also batch the computation with the {meth}`map_batches ` function to ensure spaCy can use more efficient vectorized operations where available:" ] }, { diff --git a/doc/source/data/examples/random-access.rst b/doc/source/data/examples/random-access.rst index da96549d0bda..3c28fc8d1c3f 100644 --- a/doc/source/data/examples/random-access.rst +++ b/doc/source/data/examples/random-access.rst @@ -4,7 +4,7 @@ Random Data Access (Experimental) --------------------------------- -Any Arrow-format datastream can be enabled for random access by calling ``ds.to_random_access_dataset(key="col_name")``. This partitions the data across the cluster by the given sort key, providing efficient random access to records via binary search. A number of worker actors are created, each of which has zero-copy access to the underlying sorted data blocks of the Datastream. +Any Arrow-format dataset can be enabled for random access by calling ``ds.to_random_access_dataset(key="col_name")``. This partitions the data across the cluster by the given sort key, providing efficient random access to records via binary search. A number of worker actors are created, each of which has zero-copy access to the underlying sorted data blocks of the Dataset. .. code-block:: python @@ -13,7 +13,7 @@ Any Arrow-format datastream can be enabled for random access by calling ``ds.to_ ds = ds.add_column("embedding", lambda b: b["id"] ** 2) # -> schema={id: int64, embedding: int64} - # Enable random access on the datastream. This launches a number of actors + # Enable random access on the dataset. This launches a number of actors # spread across the cluster that serve random access queries to the data. rmap = ds.to_random_access_dataset(key="id", num_workers=4) @@ -29,7 +29,7 @@ Any Arrow-format datastream can be enabled for random access by calling ``ds.to_ rmap.multiget([4, 2]) # -> [{"id": 4, "embedding": 16}, {"id": 2, "embedding": 4}] -Similar to Datastream, a RandomAccessDataset can be passed to and used from any Ray actor or task. +Similar to Dataset, a RandomAccessDataset can be passed to and used from any Ray actor or task. Architecture ------------ diff --git a/doc/source/data/faq.rst b/doc/source/data/faq.rst index 1ce687709dd5..2a49e3075389 100644 --- a/doc/source/data/faq.rst +++ b/doc/source/data/faq.rst @@ -62,7 +62,7 @@ What should I use Ray Data for? Ray Data is the standard way to load, process, and exchange data in Ray libraries and applications, with a particular emphasis on ease-of-use, performance, and -scalability in both data size and cluster size. Within that, Datastreams is designed for +scalability in both data size and cluster size. Within that, Datasets is designed for two core uses cases: * **ML (training) ingest:** Loading, preprocessing, and ingesting data into one or more @@ -70,7 +70,7 @@ two core uses cases: * **Batch inference:** Loading, preprocessing, and performing parallel batch inference on data. -We have designed the Datastream APIs, data model, execution model, and +We have designed the Dataset APIs, data model, execution model, and integrations with these use cases in mind, and have captured these use cases in large-scale nightly tests to ensure that we're hitting our scalability, performance, and efficiency marks for these use cases. @@ -80,13 +80,13 @@ What should I not use Ray Data for? Ray Data is not meant to be used for generic ETL pipelines (like Spark) or scalable data science (like Dask, Modin, or Mars). However, each of these frameworks -are :ref:`runnable on Ray `, and Datastreams integrates tightly with +are :ref:`runnable on Ray `, and Datasets integrates tightly with these frameworks, allowing for efficient exchange of distributed data partitions often with zero-copy. Check out the -:ref:`datastream creation feature guide ` to learn +:ref:`dataset creation feature guide ` to learn more about these integrations. -Datastreams is specifically targeting +Datasets is specifically targeting the ML ingest and batch inference use cases, with focus on data loading and last-mile preprocessing for ML pipelines. @@ -94,19 +94,19 @@ For data loading for training, how does Ray Data compare to other solutions? ================================================================================ There are several ML framework-specific and general solutions for loading data into -model trainers. Below, we summarize some advantages Datastreams offers over these more +model trainers. Below, we summarize some advantages Datasets offers over these more specific ingest frameworks. Torch datasets (and data loaders) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -* **Framework-agnostic:** Datastreams is framework-agnostic and portable between different +* **Framework-agnostic:** Datasets is framework-agnostic and portable between different distributed training frameworks, while `Torch datasets `__ are specific to Torch. * **No built-in IO layer:** Torch datasets do not have an I/O layer for common file formats or in-memory exchange with other frameworks; users need to bring in other libraries and roll this integration themselves. -* **Generic distributed data processing:** Datastreams is more general: it can handle +* **Generic distributed data processing:** Datasets is more general: it can handle generic distributed operations, including global per-epoch shuffling, which would otherwise have to be implemented by stitching together two separate systems. Torch datasets would require such stitching for anything more involved @@ -114,22 +114,22 @@ Torch datasets (and data loaders) shards. See our `blog post `__ on why this shared infrastructure is important for 3rd generation ML architectures. -* **Lower overhead:** Datastreams is lower overhead: it supports zero-copy exchange between +* **Lower overhead:** Datasets is lower overhead: it supports zero-copy exchange between processes, in contrast to the multi-processing-based pipelines of Torch datasets. TensorFlow datasets ~~~~~~~~~~~~~~~~~~~ -* **Framework-agnostic:** Datastreams is framework-agnostic and portable between different +* **Framework-agnostic:** Datasets is framework-agnostic and portable between different distributed training frameworks, while `TensorFlow datasets `__ is specific to TensorFlow. -* **Unified single-node and distributed:** Datastreams unifies single and multi-node training under +* **Unified single-node and distributed:** Datasets unifies single and multi-node training under the same abstraction. TensorFlow datasets presents `separate concepts `__ for distributed data loading and prevents code from being seamlessly scaled to larger clusters. -* **Generic distributed data processing:** Datastreams is more general: it can handle +* **Generic distributed data processing:** Datasets is more general: it can handle generic distributed operations, including global per-epoch shuffling, which would otherwise have to be implemented by stitching together two separate systems. TensorFlow datasets would require such stitching for anything more involved @@ -137,7 +137,7 @@ TensorFlow datasets shards; only file interleaving is supported. See our `blog post `__ on why this shared infrastructure is important for 3rd generation ML architectures. -* **Lower overhead:** Datastreams is lower overhead: it supports zero-copy exchange between +* **Lower overhead:** Datasets is lower overhead: it supports zero-copy exchange between processes, in contrast to the multi-processing-based pipelines of TensorFlow datasets. Petastorm @@ -145,7 +145,7 @@ Petastorm * **Supported data types:** `Petastorm `__ only supports Parquet data, while Ray Data supports many file formats. -* **Lower overhead:** Datastreams is lower overhead: it supports zero-copy exchange between +* **Lower overhead:** Datasets is lower overhead: it supports zero-copy exchange between processes, in contrast to the multi-processing-based pipelines used by Petastorm. * **No data processing:** Petastorm does not expose any data processing APIs. @@ -154,9 +154,9 @@ NVTabular * **Supported data types:** `NVTabular `__ only supports tabular (Parquet, CSV, Avro) data, while Ray Data supports many other file formats. -* **Lower overhead:** Datastreams is lower overhead: it supports zero-copy exchange between +* **Lower overhead:** Datasets is lower overhead: it supports zero-copy exchange between processes, in contrast to the multi-processing-based pipelines used by Petastorm. -* **Heterogeneous compute:** NVTabular doesn't support mixing heterogeneous resources in datastream transforms (e.g. +* **Heterogeneous compute:** NVTabular doesn't support mixing heterogeneous resources in dataset transforms (e.g. both CPU and GPU transformations), while Ray Data supports this. * **ML-specific ops:** NVTabular has a bunch of great ML-specific preprocessing operations; this is currently WIP for Ray Data: @@ -168,7 +168,7 @@ For batch (offline) inference, why should I use Ray Data instead of an actor poo ====================================================================================== Ray Data provides its own autoscaling actor pool via the actor compute strategy for -:meth:`ds.map_batches() `, allowing you to perform CPU- or +:meth:`ds.map_batches() `, allowing you to perform CPU- or GPU-based batch inference on this actor pool. Using this instead of the `Ray actor pool `__ has a few advantages: @@ -193,7 +193,7 @@ Please see this `blog post on Ray Data `__ for more information on this benchmarking. -The new streaming backend for Ray Data (Datastream) supports throughputs of up to +The new streaming backend for Ray Data (Dataset) supports throughputs of up to hundreds of gigabytes per second in a large cluster. Does all of my data need to fit into memory? @@ -202,20 +202,20 @@ Does all of my data need to fit into memory? No, with Ray's support for :ref:`spilling objects to disk `, you only need to be able to fit your data into memory OR disk. However, keeping your data in distributed memory may speed up your workload, which can be done on arbitrarily large -datastreams by windowing them, creating pipelines. +datasets by windowing them, creating pipelines. How much data can Ray Data handle? ================================== Ray Data has been tested at multi-petabyte scale for I/O and multi-terabyte scale for shuffling, and we're continuously working on improving this scalability. If you have a -very large datastream that you'd like to process and you're running into scalability +very large dataset that you'd like to process and you're running into scalability issues, please reach out to us on our `Discourse `__. How do I get my data into Ray Data? =================================== -Ray Data supports creating a ``Datastream`` from local and distributed in-memory data +Ray Data supports creating a ``Dataset`` from local and distributed in-memory data via integrations with common data libraries, as well as from local and remote storage systems via our support for many common file formats and storage backends. @@ -227,7 +227,7 @@ When should I use global per-epoch shuffling? Background ~~~~~~~~~~ -When training a machine learning model, shuffling your training datastream is important in +When training a machine learning model, shuffling your training dataset is important in general in order to ensure that your model isn't overfitting on some unintended pattern in your data, e.g. sorting on the label column, or time-correlated samples. Per-epoch shuffling in particular can improve your model's precision gain per epoch by reducing @@ -238,8 +238,8 @@ out of such a gradient rut. In the distributed data-parallel training case, the status quo solution is typically to have a per-shard in-memory shuffle buffer that you fill up and pop random batches from, without mixing data across shards between epochs. Ray Data also offers fully global random shuffling via -:meth:`ds.random_shuffle() `, and doing so on an -epoch-repeated datastream pipeline to provide global per-epoch shuffling is as simple as +:meth:`ds.random_shuffle() `, and doing so on an +epoch-repeated dataset pipeline to provide global per-epoch shuffling is as simple as ``ray.data.read().repeat().random_shuffle_each_window()``. But when should you opt for global per-epoch shuffling instead of local shuffle buffer shuffling? @@ -253,7 +253,7 @@ gradient-descent-based model trainers benefiting from improved (global) shuffle and we've found that this is particular pronounced for tabular data/models in practice. However, the more global your shuffle is, the expensive the shuffling operation, and this compounds when doing distributed data-parallel training on a multi-node cluster due -to data transfer costs, and this cost can be prohibitive when using very large datastreams. +to data transfer costs, and this cost can be prohibitive when using very large datasets. The best route for determining the best tradeoff between preprocessing time + cost and per-epoch shuffle quality is to measure the precision gain per training step for your diff --git a/doc/source/data/getting-started.rst b/doc/source/data/getting-started.rst index 7be122b1eeb4..f1db53b5e81d 100644 --- a/doc/source/data/getting-started.rst +++ b/doc/source/data/getting-started.rst @@ -3,17 +3,11 @@ Getting Started =============== -Ray Data's main abstraction is a :class:`Datastream `, which -is a distributed data transformation pipeline. Datastream provides APIs for loading +Ray Data's main abstraction is a :class:`Dataset `, which +is a distributed data transformation pipeline. Dataset provides APIs for loading external data into Ray in *blocks*, and it exposes APIs for streaming processing of these data blocks in the cluster. -.. tip:: - - Ray Data is for processing of *finite* datasets for ML training and - batch inference. This is in contrast to frameworks such as Apache Flink that - process infinite data streams. - Install Ray Data ---------------- @@ -26,10 +20,10 @@ To install Ray Data, run: To learn more about installing Ray and its libraries, read :ref:`Installing Ray `. -Create a datastream +Create a dataset ------------------- -Create datastreams from on-disk files, Python objects, and cloud storage services like S3. +Create datasets from on-disk files, Python objects, and cloud storage services like S3. Ray Data can read from any `filesystem supported by Arrow `__. @@ -45,14 +39,14 @@ Ray Data can read from any `filesystem supported by Arrow {'sepal length (cm)': 5.1, 'sepal width (cm)': 3.5, 'petal length (cm)': 1.4, 'petal width (cm)': 0.2, 'target': 0} -To learn more about creating datastreams, read +To learn more about creating datasets, read :ref:`Loading data `. -Transform the datastream +Transform the dataset ------------------------ -Apply :ref:`user-defined functions ` (UDFs) to -transform datastreams. Ray executes transformations in parallel for performance. +Apply :ref:`user-defined functions ` (UDFs) to +transform datasets. Ray executes transformations in parallel for performance. .. testcode:: @@ -71,7 +65,7 @@ transform datastreams. Ray executes transformations in parallel for performance. .. testoutput:: - MaterializedDatastream( + MaterializedDataset( num_blocks=1, num_rows=150, schema={ @@ -84,14 +78,14 @@ transform datastreams. Ray executes transformations in parallel for performance. } ) -To learn more about transforming datastreams, read +To learn more about transforming datasets, read :ref:`Transforming data `. -Consume the datastream +Consume the dataset ---------------------- -Pass datastreams to Ray tasks or actors, and access records with methods like -:meth:`~ray.data.Datastream.take_batch` and :meth:`~ray.data.Datastream.iter_batches`. +Pass datasets to Ray tasks or actors, and access records with methods like +:meth:`~ray.data.Dataset.take_batch` and :meth:`~ray.data.Dataset.iter_batches`. .. tab-set:: @@ -116,7 +110,7 @@ Pass datastreams to Ray tasks or actors, and access records with methods like .. testcode:: @ray.remote - def consume(ds: ray.data.Datastream) -> int: + def consume(ds: ray.data.Dataset) -> int: num_batches = 0 for batch in ds.iter_batches(batch_size=8): num_batches += 1 @@ -140,13 +134,13 @@ Pass datastreams to Ray tasks or actors, and access records with methods like ray.get([w.train.remote(s) for w, s in zip(workers, shards)]) -To learn more about consuming datastreams, read +To learn more about consuming datasets, read :ref:`Consuming data `. -Save the datastream +Save the dataset ------------------- -Call methods like :meth:`~ray.data.Datastream.write_parquet` to save datastream contents to local +Call methods like :meth:`~ray.data.Dataset.write_parquet` to save dataset contents to local or remote filesystems. .. testcode:: @@ -163,4 +157,4 @@ or remote filesystems. ['..._000000.parquet'] -To learn more about saving datastream contents, read :ref:`Saving data `. +To learn more about saving dataset contents, read :ref:`Saving data `. diff --git a/doc/source/data/glossary.rst b/doc/source/data/glossary.rst index 35c781e8c132..084398e1566d 100644 --- a/doc/source/data/glossary.rst +++ b/doc/source/data/glossary.rst @@ -1,4 +1,4 @@ -.. _datastreams_glossary: +.. _datasets_glossary: ===================== Ray Data Glossary @@ -10,20 +10,20 @@ Ray Data Glossary The way batches of data are represented. Set ``batch_format`` in methods like - :meth:`Datastream.iter_batches() ` and - :meth:`Datastream.map_batches() ` to specify the + :meth:`Dataset.iter_batches() ` and + :meth:`Dataset.map_batches() ` to specify the batch type. .. doctest:: >>> import ray - >>> # Datastream is executed by streaming executor by default, which doesn't + >>> # Dataset is executed by streaming executor by default, which doesn't >>> # preserve the order, so we explicitly set it here. >>> ray.data.context.DataContext.get_current().execution_options.preserve_order = True - >>> datastream = ray.data.range(10) - >>> next(iter(datastream.iter_batches(batch_format="numpy", batch_size=5))) + >>> dataset = ray.data.range(10) + >>> next(iter(dataset.iter_batches(batch_format="numpy", batch_size=5))) {'id': array([0, 1, 2, 3, 4])} - >>> next(iter(datastream.iter_batches(batch_format="pandas", batch_size=5))) + >>> next(iter(dataset.iter_batches(batch_format="pandas", batch_size=5))) id 0 0 1 1 @@ -32,10 +32,10 @@ Ray Data Glossary 4 4 To learn more about batch formats, read - :ref:`Configuring batch formats `. + :ref:`Configuring batch formats `. Block - A processing unit of data. A :class:`~ray.data.Datastream` consists of a + A processing unit of data. A :class:`~ray.data.Dataset` consists of a collection of blocks. Under the hood, :term:`Ray Data ` partition :term:`records ` @@ -58,14 +58,14 @@ Ray Data Glossary Its utility is as the last-mile bridge from ETL pipeline outputs to distributed ML applications and libraries in Ray. - To learn more about Ray Data, read :ref:`Key Concepts `. + To learn more about Ray Data, read :ref:`Key Concepts `. - Datastream (object) + Dataset (object) A class that produces a sequence of distributed data blocks. - :class:`~ray.data.Datastream` exposes methods to read, transform, and consume data at scale. + :class:`~ray.data.Dataset` exposes methods to read, transform, and consume data at scale. - To learn more about Datastreams and the operations they support, read the :ref:`Datastreams API Reference `. + To learn more about Datasets and the operations they support, read the :ref:`Datasets API Reference `. Datasource A :class:`~ray.data.Datasource` specifies how to read and write from @@ -83,7 +83,7 @@ Ray Data Glossary A single data item, which is always a ``Dict[str, Any]``. Schema - The name and type of the datastream fields. + The name and type of the dataset fields. - To determine a datastream's schema, call - :meth:`Datastream.schema() `. + To determine a dataset's schema, call + :meth:`Dataset.schema() `. diff --git a/doc/source/data/key-concepts.rst b/doc/source/data/key-concepts.rst index 6469021530e9..e430842e598f 100644 --- a/doc/source/data/key-concepts.rst +++ b/doc/source/data/key-concepts.rst @@ -4,25 +4,25 @@ Key Concepts ============ -.. _datastream_concept: +.. _dataset_concept: ---------- -Datastream +Dataset ---------- -A :term:`Datastream ` operates over a sequence of Ray object references to :term:`blocks `. +A :term:`Dataset ` operates over a sequence of Ray object references to :term:`blocks `. Each block holds a set of records in an `Arrow table `_ or `pandas DataFrame `_. -Having multiple blocks in a datastream allows for parallel transformation and ingest. +Having multiple blocks in a dataset allows for parallel transformation and ingest. -For ML use cases, Datastream natively supports mixing tensors with tabular data. To +For ML use cases, Dataset natively supports mixing tensors with tabular data. To learn more, read :ref:`Working with tensor data `. -The following figure visualizes a datastream with three blocks, each holding 1000 rows. Note that certain blocks -may not be computed yet. Normally, callers iterate over datastream blocks in a streaming fashion, so that not all +The following figure visualizes a dataset with three blocks, each holding 1000 rows. Note that certain blocks +may not be computed yet. Normally, callers iterate over dataset blocks in a streaming fashion, so that not all blocks need to be materialized in the cluster memory at once. -.. image:: images/datastream-arch.svg +.. image:: images/dataset-arch.svg .. https://docs.google.com/drawings/d/1PmbDvHRfVthme9XD7EYM-LIHPXtHdOfjCbc1SCsM64k/edit @@ -30,78 +30,78 @@ blocks need to be materialized in the cluster memory at once. Reading Data ============ -Datastream uses Ray tasks to read data from remote storage in parallel. Each read task reads one or more files and produces an output block: +Dataset uses Ray tasks to read data from remote storage in parallel. Each read task reads one or more files and produces an output block: -.. image:: images/datastream-read.svg +.. image:: images/dataset-read.svg :align: center .. https://docs.google.com/drawings/d/15B4TB8b5xN15Q9S8-s0MjW6iIvo_PrH7JtV1fL123pU/edit -You can manually specify the number of read tasks, but the final parallelism is always capped by the number of files in the underlying datastream. +You can manually specify the number of read tasks, but the final parallelism is always capped by the number of files in the underlying dataset. -For an in-depth guide on creating datastreams, read :ref:`Loading Data `. +For an in-depth guide on creating datasets, read :ref:`Loading Data `. Transforming Data ================= -Datastream uses either Ray tasks or Ray actors to transform data blocks. By default, it uses tasks. +Dataset uses either Ray tasks or Ray actors to transform data blocks. By default, it uses tasks. To use Actors, pass an :class:`ActorPoolStrategy` to ``compute`` in methods like -:meth:`~ray.data.Datastream.map_batches`. :class:`ActorPoolStrategy` creates an autoscaling +:meth:`~ray.data.Dataset.map_batches`. :class:`ActorPoolStrategy` creates an autoscaling pool of Ray actors. This allows you to cache expensive state initialization (e.g., model loading for GPU-based tasks). -.. image:: images/datastream-map.svg +.. image:: images/dataset-map.svg :align: center .. https://docs.google.com/drawings/d/12STHGV0meGWfdWyBlJMUgw7a-JcFPu9BwSOn5BjRw9k/edit -For an in-depth guide on transforming datastreams, read :ref:`Transforming Data `. +For an in-depth guide on transforming datasets, read :ref:`Transforming Data `. Shuffling Data ============== -Operations like :meth:`~ray.data.Datastream.sort` and :meth:`~ray.data.Datastream.groupby` -require blocks to be partitioned by value or *shuffled*. Datastream uses tasks to shuffle blocks in a map-reduce +Operations like :meth:`~ray.data.Dataset.sort` and :meth:`~ray.data.Dataset.groupby` +require blocks to be partitioned by value or *shuffled*. Dataset uses tasks to shuffle blocks in a map-reduce style: map tasks partition blocks by value and then reduce tasks merge co-partitioned blocks. -Call :meth:`~ray.data.Datastream.repartition` to change the number of blocks in a :class:`~ray.data.Datastream`. +Call :meth:`~ray.data.Dataset.repartition` to change the number of blocks in a :class:`~ray.data.Dataset`. Repartition has two modes: * ``shuffle=False`` - performs the minimal data movement needed to equalize block sizes * ``shuffle=True`` - performs a full distributed shuffle -.. image:: images/datastream-shuffle.svg +.. image:: images/dataset-shuffle.svg :align: center .. https://docs.google.com/drawings/d/132jhE3KXZsf29ho1yUdPrCHB9uheHBWHJhDQMXqIVPA/edit -Datastream can shuffle multi-terabyte datasets, leveraging the Ray object store for disk spilling. For an in-depth guide on shuffle performance, read :ref:`Performance Tips and Tuning `. -Note that operations like shuffle materialize the entire Datastream prior to their execution (shuffle execution is not streamed through memory). +Dataset can shuffle multi-terabyte datasets, leveraging the Ray object store for disk spilling. For an in-depth guide on shuffle performance, read :ref:`Performance Tips and Tuning `. +Note that operations like shuffle materialize the entire Dataset prior to their execution (shuffle execution is not streamed through memory). Iteration and materialization ============================= -Most transformations on a datastream are lazy. They don't execute until you iterate over the datastream or call -:meth:`Datastream.materialize() `. When a Datastream is materialized, its -type becomes a `MaterializedDatastream`, which indicates that all its blocks are materialized in Ray +Most transformations on a dataset are lazy. They don't execute until you iterate over the dataset or call +:meth:`Dataset.materialize() `. When a Dataset is materialized, its +type becomes a `MaterializedDataset`, which indicates that all its blocks are materialized in Ray object store memory. -Datastream transformations are executed in a streaming way, incrementally on the data and +Dataset transformations are executed in a streaming way, incrementally on the data and with operators processed in parallel, see :ref:`Streaming Execution `. -Datastreams and MaterializedDatastreams can be freely passed between Ray tasks, actors, and libraries without +Datasets and MaterializedDatasets can be freely passed between Ray tasks, actors, and libraries without incurring copies of the underlying block data (pass by reference semantics). Fault tolerance =============== -Datastream performs *lineage reconstruction* to recover data. If an application error or -system failure occurs, Datastream recreates lost blocks by re-executing tasks. If ``compute=ActorPoolStrategy(size=n)`` is used, then Ray +Dataset performs *lineage reconstruction* to recover data. If an application error or +system failure occurs, Dataset recreates lost blocks by re-executing tasks. If ``compute=ActorPoolStrategy(size=n)`` is used, then Ray restarts the actor used for computing the block prior to re-executing the task. -Fault tolerance is not supported if the original worker process that created the Datastream dies. -This is because the creator stores the metadata for the :ref:`objects ` that comprise the Datastream. +Fault tolerance is not supported if the original worker process that created the Dataset dies. +This is because the creator stores the metadata for the :ref:`objects ` that comprise the Dataset. diff --git a/doc/source/data/loading-data.rst b/doc/source/data/loading-data.rst index e82d6301850b..8f005e7b6b7f 100644 --- a/doc/source/data/loading-data.rst +++ b/doc/source/data/loading-data.rst @@ -4,13 +4,13 @@ Loading Data ==================== -:class:`Datastreams ` can be created from: +:class:`Datasets ` can be created from: * generated synthetic data, * local and distributed in-memory data, and * local and external storage systems (local disk, cloud storage, HDFS, etc.). -.. _datastream_generate_data: +.. _dataset_generate_data: ------------------------- Generating Synthetic Data @@ -20,7 +20,7 @@ Generating Synthetic Data .. tab-item:: Int Range - Create a ``Datastream`` from a range of integers, with a single column containing this integer range. + Create a ``Dataset`` from a range of integers, with a single column containing this integer range. .. literalinclude:: ./doc_code/loading_data.py :language: python @@ -29,7 +29,7 @@ Generating Synthetic Data .. tab-item:: Tensor Range - Create a datastream from a range of integers, packing this integer range into + Create a dataset from a range of integers, packing this integer range into ndarrays of the provided shape. .. doctest:: @@ -47,7 +47,7 @@ Generating Synthetic Data [0, 0, 0, ..., 0, 0, 0], [0, 0, 0, ..., 0, 0, 0]])} -.. _datastream_reading_from_storage: +.. _dataset_reading_from_storage: -------------------------- Reading Files From Storage @@ -63,7 +63,7 @@ Each of these APIs take a path or list of paths to files or directories. Any dir provided will be walked in order to obtain concrete file paths, at which point all files will be read in parallel. -.. _datastream_supported_file_formats: +.. _dataset_supported_file_formats: Common File Formats =================== @@ -129,7 +129,7 @@ Common File Formats .. tab-item:: Text - Read text files and directories. Each line in each text file will be treated as a row in the datastream. + Read text files and directories. Each line in each text file will be treated as a row in the dataset. .. literalinclude:: ./doc_code/loading_data.py :language: python @@ -154,8 +154,8 @@ Common File Formats Read binary files and directories. Each binary file will be converted to a record containing opaque bytes. These bytes can be decoded into tensor, tabular, text, or any other - kind of data using :meth:`~ray.data.Datastream.map_batches` to apply a per-row decoding - :ref:`user-defined function `. + kind of data using :meth:`~ray.data.Dataset.map_batches` to apply a per-row decoding + :ref:`user-defined function `. .. literalinclude:: ./doc_code/loading_data.py :language: python @@ -167,7 +167,7 @@ Common File Formats .. tab-item:: TFRecords Call :func:`~ray.data.read_tfrecords` to read TFRecord files into a - :class:`~ray.data.Datastream`. + :class:`~ray.data.Dataset`. .. warning:: Only `tf.train.Example `_ @@ -178,7 +178,7 @@ Common File Formats :start-after: __read_tfrecords_begin__ :end-before: __read_tfrecords_end__ -.. _datastream_reading_remote_storage: +.. _dataset_reading_remote_storage: Reading from Remote Storage @@ -320,31 +320,31 @@ For example: :start-after: __read_compressed_begin__ :end-before: __read_compressed_end__ -.. _datastream_from_in_memory_data: +.. _dataset_from_in_memory_data: ------------------- From In-Memory Data ------------------- -Datastreams can be constructed from existing in-memory data. In addition to being able to -construct a ``Datastream`` from plain Python objects, Datastreams also interoperates with popular +Datasets can be constructed from existing in-memory data. In addition to being able to +construct a ``Dataset`` from plain Python objects, Datasets also interoperates with popular single-node libraries (`Pandas `__, `NumPy `__, `Arrow `__) as well as distributed frameworks (:ref:`Dask `, :ref:`Spark `, :ref:`Modin `, :ref:`Mars `). -.. _datastream_from_in_memory_data_single_node: +.. _dataset_from_in_memory_data_single_node: From Single-Node Data Libraries =============================== -In this section, we demonstrate creating a ``Datastream`` from single-node in-memory data. +In this section, we demonstrate creating a ``Dataset`` from single-node in-memory data. .. tab-set:: .. tab-item:: Pandas - Create a ``Datastream`` from a Pandas DataFrame. This constructs a ``Datastream`` + Create a ``Dataset`` from a Pandas DataFrame. This constructs a ``Dataset`` backed by a single block. .. literalinclude:: ./doc_code/loading_data.py @@ -352,8 +352,8 @@ In this section, we demonstrate creating a ``Datastream`` from single-node in-me :start-after: __from_pandas_begin__ :end-before: __from_pandas_end__ - We can also build a ``Datastream`` from more than one Pandas DataFrame, where each said - DataFrame will become a block in the ``Datastream``. + We can also build a ``Dataset`` from more than one Pandas DataFrame, where each said + DataFrame will become a block in the ``Dataset``. .. literalinclude:: ./doc_code/loading_data.py :language: python @@ -362,7 +362,7 @@ In this section, we demonstrate creating a ``Datastream`` from single-node in-me .. tab-item:: NumPy - Create a ``Datastream`` from a NumPy ndarray. This constructs a ``Datastream`` + Create a ``Dataset`` from a NumPy ndarray. This constructs a ``Dataset`` backed by a single block; the outer dimension of the ndarray will be treated as the row dimension, and the column will have name ``"data"``. @@ -371,8 +371,8 @@ In this section, we demonstrate creating a ``Datastream`` from single-node in-me :start-after: __from_numpy_begin__ :end-before: __from_numpy_end__ - We can also build a ``Datastream`` from more than one NumPy ndarray, where each said - ndarray will become a block in the ``Datastream``. + We can also build a ``Dataset`` from more than one NumPy ndarray, where each said + ndarray will become a block in the ``Dataset``. .. literalinclude:: ./doc_code/loading_data.py :language: python @@ -381,17 +381,17 @@ In this section, we demonstrate creating a ``Datastream`` from single-node in-me .. tab-item:: Arrow - Create a ``Datastream`` from an + Create a ``Dataset`` from an `Arrow Table `__. - This constructs a ``Datastream`` backed by a single block. + This constructs a ``Dataset`` backed by a single block. .. literalinclude:: ./doc_code/loading_data.py :language: python :start-after: __from_arrow_begin__ :end-before: __from_arrow_end__ - We can also build a ``Datastream`` from more than one Arrow Table, where each said - ``Table`` will become a block in the ``Datastream``. + We can also build a ``Dataset`` from more than one Arrow Table, where each said + ``Table`` will become a block in the ``Dataset``. .. literalinclude:: ./doc_code/loading_data.py :language: python @@ -400,7 +400,7 @@ In this section, we demonstrate creating a ``Datastream`` from single-node in-me .. tab-item:: Python Objects - Create a ``Datastream`` from a list of Python objects; which are interpreted as dict records. + Create a ``Dataset`` from a list of Python objects; which are interpreted as dict records. If the object is not a dict, it will be wrapped as ``{"item": item}``. .. literalinclude:: ./doc_code/loading_data.py @@ -408,12 +408,12 @@ In this section, we demonstrate creating a ``Datastream`` from single-node in-me :start-after: __from_items_begin__ :end-before: __from_items_end__ -.. _datastream_from_in_memory_data_distributed: +.. _dataset_from_in_memory_data_distributed: From Distributed Data Processing Frameworks =========================================== -In addition to working with single-node in-memory data, Datastreams can be constructed from +In addition to working with single-node in-memory data, Datasets can be constructed from distributed (multi-node) in-memory data, interoperating with popular distributed data processing frameworks such as :ref:`Dask `, :ref:`Spark `, :ref:`Modin `, and :ref:`Mars `. @@ -426,9 +426,9 @@ integrations to work. See how these frameworks can be run on Ray in our .. tab-item:: Dask - Create a ``MaterializedDatastream`` from a + Create a ``MaterializedDataset`` from a `Dask DataFrame `__. This constructs a - ``Datastream`` backed by the distributed Pandas DataFrame partitions that underly the + ``Dataset`` backed by the distributed Pandas DataFrame partitions that underly the Dask DataFrame. .. literalinclude:: ./doc_code/loading_data.py @@ -438,12 +438,12 @@ integrations to work. See how these frameworks can be run on Ray in our .. tab-item:: Spark - Create a ``MaterializedDatastream`` from a `Spark DataFrame + Create a ``MaterializedDataset`` from a `Spark DataFrame `__. - This constructs a ``Datastream`` backed by the distributed Spark DataFrame partitions + This constructs a ``Dataset`` backed by the distributed Spark DataFrame partitions that underly the Spark DataFrame. When this conversion happens, Spark-on-Ray (RayDP) will save the Spark DataFrame partitions to Ray's object store in the Arrow format, - which Datastreams will then interpret as its blocks. + which Datasets will then interpret as its blocks. .. literalinclude:: ./doc_code/loading_data_untested.py :language: python @@ -452,7 +452,7 @@ integrations to work. See how these frameworks can be run on Ray in our .. tab-item:: Modin - Create a ``MaterializedDatastream`` from a Modin DataFrame. This constructs a ``Datastream`` + Create a ``MaterializedDataset`` from a Modin DataFrame. This constructs a ``Dataset`` backed by the distributed Pandas DataFrame partitions that underly the Modin DataFrame. .. literalinclude:: ./doc_code/loading_data.py @@ -462,7 +462,7 @@ integrations to work. See how these frameworks can be run on Ray in our .. tab-item:: Mars - Create a ``MaterializedDatastream`` from a Mars DataFrame. This constructs a ``Datastream`` + Create a ``MaterializedDataset`` from a Mars DataFrame. This constructs a ``Dataset`` backed by the distributed Pandas DataFrame partitions that underly the Mars DataFrame. .. literalinclude:: ./doc_code/loading_data_untested.py @@ -470,7 +470,7 @@ integrations to work. See how these frameworks can be run on Ray in our :start-after: __from_mars_begin__ :end-before: __from_mars_end__ -.. _datastream_from_torch_tf: +.. _dataset_from_torch_tf: ------------------------- From Torch and TensorFlow @@ -480,12 +480,12 @@ From Torch and TensorFlow .. tab-item:: PyTorch - If you already have a Torch dataset available, you can create a Datastream using + If you already have a Torch dataset available, you can create a Dataset using :class:`~ray.data.from_torch`. .. warning:: :class:`~ray.data.from_torch` doesn't support parallel - reads. You should only use this datasource for small datastreams like MNIST or + reads. You should only use this datasource for small datasets like MNIST or CIFAR. .. code-block:: python @@ -494,18 +494,18 @@ From Torch and TensorFlow import torchvision torch_ds = torchvision.datasets.MNIST("data", download=True) - datastream = ray.data.from_torch(torch_ds) - datastream.take(1) + dataset = ray.data.from_torch(torch_ds) + dataset.take(1) # {"item": (, 5)} .. tab-item:: TensorFlow - If you already have a TensorFlow dataset available, you can create a Datastream + If you already have a TensorFlow dataset available, you can create a Dataset using :class:`~ray.data.from_tf`. .. warning:: :class:`~ray.data.from_tf` doesn't support parallel reads. You - should only use this function with small datastreams like MNIST or CIFAR. + should only use this function with small datasets like MNIST or CIFAR. .. code-block:: python @@ -513,12 +513,12 @@ From Torch and TensorFlow import tensorflow_datasets as tfds tf_ds, _ = tfds.load("cifar10", split=["train", "test"]) - datastream = ray.data.from_tf(tf_ds) + dataset = ray.data.from_tf(tf_ds) - datastream - # -> MaterializedDatastream(num_blocks=200, num_rows=50000, schema={id: binary, image: numpy.ndarray(shape=(32, 32, 3), dtype=uint8), label: int64}) + dataset + # -> MaterializedDataset(num_blocks=200, num_rows=50000, schema={id: binary, image: numpy.ndarray(shape=(32, 32, 3), dtype=uint8), label: int64}) -.. _datastream_from_huggingface: +.. _dataset_from_huggingface: ------------------------------- From 🤗 (Hugging Face) Datasets @@ -526,12 +526,12 @@ From 🤗 (Hugging Face) Datasets You can convert 🤗 Datasets into Ray Data by using :py:class:`~ray.data.from_huggingface`. This function accesses the underlying Arrow table and -converts it into a Datastream directly. +converts it into a Dataset directly. .. warning:: :py:class:`~ray.data.from_huggingface` doesn't support parallel reads. This will not usually be an issue with in-memory 🤗 Datasets, - but may fail with large memory-mapped 🤗 Datasets. 🤗 ``IterableDatastream`` + but may fail with large memory-mapped 🤗 Datasets. 🤗 ``IterableDataset`` objects are not supported. .. code-block:: python @@ -544,19 +544,19 @@ converts it into a Datastream directly. ray_ds["train"].take(2) # [{'text': ''}, {'text': ' = Valkyria Chronicles III = \n'}] -.. _datastream_mongo_db: +.. _dataset_mongo_db: ------------ From MongoDB ------------ -A Datastream can also be created from `MongoDB `__ with +A Dataset can also be created from `MongoDB `__ with :py:class:`~ray.data.read_mongo`. This interacts with MongoDB similar to external filesystems, except here you will need to specify the MongoDB source by its `uri `__, `database and collection `__, and specify a `pipeline `__ to run against -the collection. The execution results are then used to create a Datastream. +the collection. The execution results are then used to create a Dataset. .. note:: @@ -591,7 +591,7 @@ the collection. The execution results are then used to create a Datastream. collection="my_collection", ) -.. _datastreams_sql_databases: +.. _datasets_sql_databases: -------------------------- Reading From SQL Databases @@ -630,13 +630,13 @@ Call :func:`~ray.data.read_sql` to read data from a database that provides a ) # Get all movies - datastream = ray.data.read_sql("SELECT * FROM movie", create_connection) + dataset = ray.data.read_sql("SELECT * FROM movie", create_connection) # Get movies after the year 1980 - datastream = ray.data.read_sql( + dataset = ray.data.read_sql( "SELECT title, score FROM movie WHERE year >= 1980", create_connection ) # Get the number of movies per year - datastream = ray.data.read_sql( + dataset = ray.data.read_sql( "SELECT year, COUNT(*) FROM movie GROUP BY year", create_connection ) @@ -667,13 +667,13 @@ Call :func:`~ray.data.read_sql` to read data from a database that provides a ) # Get all movies - datastream = ray.data.read_sql("SELECT * FROM movie", create_connection) + dataset = ray.data.read_sql("SELECT * FROM movie", create_connection) # Get movies after the year 1980 - datastream = ray.data.read_sql( + dataset = ray.data.read_sql( "SELECT title, score FROM movie WHERE year >= 1980", create_connection ) # Get the number of movies per year - datastream = ray.data.read_sql( + dataset = ray.data.read_sql( "SELECT year, COUNT(*) FROM movie GROUP BY year", create_connection ) @@ -703,13 +703,13 @@ Call :func:`~ray.data.read_sql` to read data from a database that provides a ) # Get all movies - datastream = ray.data.read_sql("SELECT * FROM movie", create_connection) + dataset = ray.data.read_sql("SELECT * FROM movie", create_connection) # Get movies after the year 1980 - datastream = ray.data.read_sql( + dataset = ray.data.read_sql( "SELECT title, score FROM movie WHERE year >= 1980", create_connection ) # Get the number of movies per year - datastream = ray.data.read_sql( + dataset = ray.data.read_sql( "SELECT year, COUNT(*) FROM movie GROUP BY year", create_connection ) @@ -740,13 +740,13 @@ Call :func:`~ray.data.read_sql` to read data from a database that provides a # Get all movies - datastream = ray.data.read_sql("SELECT * FROM movie", create_connection) + dataset = ray.data.read_sql("SELECT * FROM movie", create_connection) # Get movies after the year 1980 - datastream = ray.data.read_sql( + dataset = ray.data.read_sql( "SELECT title, score FROM movie WHERE year >= 1980", create_connection ) # Get the number of movies per year - datastream = ray.data.read_sql( + dataset = ray.data.read_sql( "SELECT year, COUNT(*) FROM movie GROUP BY year", create_connection ) @@ -774,13 +774,13 @@ Call :func:`~ray.data.read_sql` to read data from a database that provides a return dbapi.Connection(client) # Get all movies - datastream = ray.data.read_sql("SELECT * FROM movie", create_connection) + dataset = ray.data.read_sql("SELECT * FROM movie", create_connection) # Get movies after the year 1980 - datastream = ray.data.read_sql( + dataset = ray.data.read_sql( "SELECT title, score FROM movie WHERE year >= 1980", create_connection ) # Get the number of movies per year - datastream = ray.data.read_sql( + dataset = ray.data.read_sql( "SELECT year, COUNT(*) FROM movie GROUP BY year", create_connection ) @@ -791,7 +791,7 @@ Call :func:`~ray.data.read_sql` to read data from a database that provides a Custom Datasources ------------------ -Datastreams can read and write in parallel to :ref:`custom datasources ` defined in Python. +Datasets can read and write in parallel to :ref:`custom datasources ` defined in Python. Once you have implemented `YourCustomDataSource`, you can use it like any other source in Ray Data: .. code-block:: python @@ -808,9 +808,9 @@ For more details, check out :ref:`guide for implementing a custom datasource 0.5).show(5) - # Convert ray datastream to mars dataframe + # Convert ray dataset to mars dataframe # df2 = md.read_ray_dataset(ds) df2 = ds.to_mars() print(df2.head(5).execute()) diff --git a/doc/source/data/performance-tips.rst b/doc/source/data/performance-tips.rst index 41bcf691a1b6..6d9ec81d9bb5 100644 --- a/doc/source/data/performance-tips.rst +++ b/doc/source/data/performance-tips.rst @@ -12,8 +12,8 @@ more about the Ray dashboard, read :ref:`Ray Dashboard `. Debugging Statistics ~~~~~~~~~~~~~~~~~~~~ -You can view debug stats for your Datastream executions via :meth:`ds.stats() `. -These stats can be used to understand the performance of your Datastream workload and can help you debug problematic bottlenecks. Note that both execution and iterator statistics are available: +You can view debug stats for your Dataset executions via :meth:`ds.stats() `. +These stats can be used to understand the performance of your Dataset workload and can help you debug problematic bottlenecks. Note that both execution and iterator statistics are available: .. code-block:: python @@ -44,7 +44,7 @@ These stats can be used to understand the performance of your Datastream workloa * Tasks per node: 16 min, 16 max, 16 mean; 1 nodes used * Extra metrics: {'obj_store_mem_alloc': 3658, 'obj_store_mem_freed': 5000, 'obj_store_mem_peak': 40000} - Datastream iterator time breakdown: + Dataset iterator time breakdown: * Total time user code is blocked: 551.67ms * Total time in user code: 144.97us * Total time overall: 1.01s @@ -59,8 +59,8 @@ These stats can be used to understand the performance of your Datastream workloa Batching Transforms ~~~~~~~~~~~~~~~~~~~ -Mapping individual records using :meth:`.map(fn) ` can be quite slow. -Instead, consider using :meth:`.map_batches(batch_fn, batch_format="pandas") ` and writing your ``batch_fn`` to +Mapping individual records using :meth:`.map(fn) ` can be quite slow. +Instead, consider using :meth:`.map_batches(batch_fn, batch_format="pandas") ` and writing your ``batch_fn`` to perform vectorized pandas operations. .. _data_format_overheads: @@ -101,20 +101,20 @@ may incur data copies; which conversions cause data copying is given in the belo Parquet Column Pruning ~~~~~~~~~~~~~~~~~~~~~~ -Current Datastream will read all Parquet columns into memory. +Current Dataset will read all Parquet columns into memory. If you only need a subset of the columns, make sure to specify the list of columns explicitly when calling :meth:`ray.data.read_parquet() ` to avoid loading unnecessary data (projection pushdown). For example, use ``ray.data.read_parquet("example://iris.parquet", columns=["sepal.length", "variety"])`` to read -just two of the five columns of Iris datastream. +just two of the five columns of Iris dataset. Parquet Row Pruning ~~~~~~~~~~~~~~~~~~~ -Similarly, you can pass in a filter to :meth:`ray.data.read_parquet() ` (filter pushdown) +Similarly, you can pass in a filter to :meth:`ray.data.read_parquet() ` (filter pushdown) which will be applied at the file scan so only rows that match the filter predicate will be returned. -For example, use ``ray.data.read_parquet("example://iris.parquet", filter=pyarrow.datastream.field("sepal.length") > 5.0)`` +For example, use ``ray.data.read_parquet("example://iris.parquet", filter=pyarrow.dataset.field("sepal.length") > 5.0)`` (where ``pyarrow`` has to be imported) to read rows with sepal.length greater than 5.0. This can be used in conjunction with column pruning when appropriate to get the benefits of both. @@ -144,16 +144,16 @@ For example, use ``ray.data.read_parquet(path, ray_remote_args={"num_cpus": 0.25 Enabling Push-Based Shuffle ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Some Datastream operations require a *shuffle* operation, meaning that data is shuffled from all of the input partitions to all of the output partitions. -These operations include :meth:`Datastream.random_shuffle `, -:meth:`Datastream.sort ` and :meth:`Datastream.groupby `. -Shuffle can be challenging to scale to large data sizes and clusters, especially when the total datastream size cannot fit into memory. +Some Dataset operations require a *shuffle* operation, meaning that data is shuffled from all of the input partitions to all of the output partitions. +These operations include :meth:`Dataset.random_shuffle `, +:meth:`Dataset.sort ` and :meth:`Dataset.groupby `. +Shuffle can be challenging to scale to large data sizes and clusters, especially when the total dataset size cannot fit into memory. -Datastreams provides an alternative shuffle implementation known as push-based shuffle for improving large-scale performance. -We recommend trying this out if your datastream has more than 1000 blocks or is larger than 1 TB in size. +Datasets provides an alternative shuffle implementation known as push-based shuffle for improving large-scale performance. +We recommend trying this out if your dataset has more than 1000 blocks or is larger than 1 TB in size. -To try this out locally or on a cluster, you can start with the `nightly release test `_ that Ray runs for :meth:`Datastream.random_shuffle ` and :meth:`Datastream.sort `. -To get an idea of the performance you can expect, here are some run time results for :meth:`Datastream.random_shuffle ` on 1-10TB of data on 20 machines (m5.4xlarge instances on AWS EC2, each with 16 vCPUs, 64GB RAM). +To try this out locally or on a cluster, you can start with the `nightly release test `_ that Ray runs for :meth:`Dataset.random_shuffle ` and :meth:`Dataset.sort `. +To get an idea of the performance you can expect, here are some run time results for :meth:`Dataset.random_shuffle ` on 1-10TB of data on 20 machines (m5.4xlarge instances on AWS EC2, each with 16 vCPUs, 64GB RAM). .. image:: https://docs.google.com/spreadsheets/d/e/2PACX-1vQvBWpdxHsW0-loasJsBpdarAixb7rjoo-lTgikghfCeKPQtjQDDo2fY51Yc1B6k_S4bnYEoChmFrH2/pubchart?oid=598567373&format=image :align: center @@ -162,10 +162,10 @@ To try out push-based shuffle, set the environment variable ``RAY_DATA_PUSH_BASE .. code-block:: bash - $ wget https://raw.githubusercontent.com/ray-project/ray/master/release/nightly_tests/datastream/sort.py + $ wget https://raw.githubusercontent.com/ray-project/ray/master/release/nightly_tests/dataset/sort.py $ RAY_DATA_PUSH_BASED_SHUFFLE=1 python sort.py --num-partitions=10 --partition-size=1e7 - # Datastream size: 10 partitions, 0.01GB partition size, 0.1GB total - # [datastream]: Run `pip install tqdm` to enable progress reporting. + # Dataset size: 10 partitions, 0.01GB partition size, 0.1GB total + # [dataset]: Run `pip install tqdm` to enable progress reporting. # 2022-05-04 17:30:28,806 INFO push_based_shuffle.py:118 -- Using experimental push-based shuffle. # Finished in 9.571171760559082 # ... diff --git a/doc/source/data/pipelining-compute.rst b/doc/source/data/pipelining-compute.rst index 7a17c431825b..b201d377da2f 100644 --- a/doc/source/data/pipelining-compute.rst +++ b/doc/source/data/pipelining-compute.rst @@ -6,7 +6,7 @@ DatasetPipelines (deprecated) .. warning:: - DatasetPipelines are deprecated now that Datastream provides pipelined execution + DatasetPipelines are deprecated now that Dataset provides pipelined execution by default. For more detail, see :ref:`Streaming Execution `. Dataset pipelines allow Dataset transformations to be executed incrementally on *windows* of the base data, instead of on all of the data at once. This can be used for streaming data loading into ML training, or to execute batch transformations on large datasets without needing to load the entire dataset into cluster memory. @@ -16,7 +16,7 @@ Dataset pipelines can be read in a streaming fashion by one consumer, or split i Creating a DatasetPipeline ========================== -A `DatasetPipeline `__ can be constructed in two ways: either by pipelining the execution of an existing Dataset (via :meth:`~ray.data.Datastream.window`), or generating repeats of an existing Dataset (via :meth:`~ray.data.Datastream.repeat`). Similar to Datasets, you can freely pass DatasetPipelines between Ray tasks, actors, and libraries. Get started with this synthetic data example: +A `DatasetPipeline `__ can be constructed in two ways: either by pipelining the execution of an existing Dataset (via :meth:`~ray.data.Dataset.window`), or generating repeats of an existing Dataset (via :meth:`~ray.data.Dataset.repeat`). Similar to Datasets, you can freely pass DatasetPipelines between Ray tasks, actors, and libraries. Get started with this synthetic data example: .. code-block:: python @@ -58,7 +58,7 @@ A `DatasetPipeline `__ can be constructed print("Total num rows", num_rows) # -> Total num rows 1000000 -You can also create a DatasetPipeline from a custom iterator over dataset creators using :meth:`~ray.data.DatastreamPipeline.from_iterable`. For example, this is how you would implement :meth:`~ray.data.Datastream.repeat` and :meth:`~ray.data.Datastream.window` using :meth:`~ray.data.DatastreamPipeline.from_iterable`: +You can also create a DatasetPipeline from a custom iterator over dataset creators using :meth:`~ray.data.DatasetPipeline.from_iterable`. For example, this is how you would implement :meth:`~ray.data.Dataset.repeat` and :meth:`~ray.data.Dataset.window` using :meth:`~ray.data.DatasetPipeline.from_iterable`: .. code-block:: python @@ -103,7 +103,7 @@ While most Dataset operations are per-row (e.g., map, filter), some operations a # 0 # 3 -You can also apply arbitrary transformations to each window using :meth:`DatasetPipeline.foreach_window() `: +You can also apply arbitrary transformations to each window using :meth:`DatasetPipeline.foreach_window() `: .. code-block:: python @@ -167,7 +167,7 @@ Ignoring the output, the above script has three separate stages: loading, prepro Enabling Pipelining ~~~~~~~~~~~~~~~~~~~ -We can optimize this by *pipelining* the execution of the dataset with the :meth:`~ray.data.Datastream.window` call, which returns a DatasetPipeline instead of a Dataset object. The pipeline supports similar transformations to the original Dataset: +We can optimize this by *pipelining* the execution of the dataset with the :meth:`~ray.data.Dataset.window` call, which returns a DatasetPipeline instead of a Dataset object. The pipeline supports similar transformations to the original Dataset: .. code-block:: python @@ -226,7 +226,7 @@ Dataset pipelines can also be used for streaming data loading into distributed t Splitting pipelines for distributed ingest ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Similar to how you can split a Dataset with :meth:`Dataset.split() `, you can also split a DatasetPipeline with the same method call :meth:`DatasetPipeline.split() `. This returns a number of DatasetPipeline shards that share a common parent pipeline. Each shard can be passed to a remote task or actor. +Similar to how you can split a Dataset with :meth:`Dataset.split() `, you can also split a DatasetPipeline with the same method call :meth:`DatasetPipeline.split() `. This returns a number of DatasetPipeline shards that share a common parent pipeline. Each shard can be passed to a remote task or actor. **Code**: @@ -258,7 +258,7 @@ Handling Epochs ~~~~~~~~~~~~~~~ It's common in ML training to want to divide data ingest into epochs, or repetitions over the original source dataset. -DatasetPipeline provides a convenient :meth:`DatasetPipeline.iter_epochs() ` method that can be used to split up the pipeline into epoch-delimited pipeline segments. +DatasetPipeline provides a convenient :meth:`DatasetPipeline.iter_epochs() ` method that can be used to split up the pipeline into epoch-delimited pipeline segments. Epochs are defined by the last call to ``.repeat()`` in a pipeline, for example: .. code-block:: python diff --git a/doc/source/data/transforming-data.rst b/doc/source/data/transforming-data.rst index d3b3a5bfda62..0b9305dabbef 100644 --- a/doc/source/data/transforming-data.rst +++ b/doc/source/data/transforming-data.rst @@ -4,10 +4,10 @@ Transforming Data ================= -Datastream transforms take in datastreams and produce new datastreams. For example, *map_batches* +Dataset transforms take in datasets and produce new datasets. For example, *map_batches* is a transform that applies a -:ref:`user-defined function ` on each data record -and returns a new datastream as the result. Datastream transforms can be composed to +:ref:`user-defined function ` on each data record +and returns a new dataset as the result. Dataset transforms can be composed to express a chain of computations. -------- @@ -17,9 +17,9 @@ Overview There are two main types of supported transforms: * One-to-one: each input block will contribute to only one output - block, such as :meth:`ds.map_batches() `. + block, such as :meth:`ds.map_batches() `. * All-to-all: input blocks can contribute to multiple output blocks, - such as :meth:`ds.random_shuffle() `. + such as :meth:`ds.random_shuffle() `. .. list-table:: Common Ray Data transforms. :header-rows: 1 @@ -27,26 +27,26 @@ There are two main types of supported transforms: * - Transform - Type - Description - * - :meth:`ds.map() ` + * - :meth:`ds.map() ` - One-to-one - Apply a given function to individual data records. - * - :meth:`ds.map_batches() ` + * - :meth:`ds.map_batches() ` - One-to-one - Apply a given function to batches of records. - * - :meth:`ds.repartition() ` + * - :meth:`ds.repartition() ` - All-to-all - - | Repartition the datastream into N blocks. - * - :meth:`ds.random_shuffle() ` + - | Repartition the dataset into N blocks. + * - :meth:`ds.random_shuffle() ` - All-to-all - - | Randomly shuffle the datastream. - * - :meth:`ds.groupby().\() ` + - | Randomly shuffle the dataset. + * - :meth:`ds.groupby().\() ` - All-to-all - | Group data by column and aggregate each group. * - :meth:`ds.groupby().map_groups() ` - All-to-all - | Group data by column and transform each group. -.. _transform_datastreams_writing_udfs: +.. _transform_datasets_writing_udfs: -------------- Map transforms @@ -106,7 +106,7 @@ the resource scheduling of tasks: Configuring batch size ====================== -An important parameter to set for :meth:`ds.map_batches() ` +An important parameter to set for :meth:`ds.map_batches() ` is ``batch_size``, which controls the size of the batches provided to the your transform function. The default batch size is `4096` for CPU tasks. For GPU tasks, an explicit batch size is always required: @@ -120,12 +120,12 @@ batch size is `4096` for CPU tasks. For GPU tasks, an explicit batch size is alw Increasing ``batch_size`` can improve performance for transforms that take advantage of vectorization, but will also result in higher memory utilization, which can lead to out-of-memory (OOM) errors. If encountering OOMs, decreasing your ``batch_size`` may help. Note also that if the ``batch_size`` becomes larger than the number of records per block, multiple blocks will be bundled together into a single batch, potentially reducing the parallelism available. -.. _transform_datastreams_batch_formats: +.. _transform_datasets_batch_formats: Configuring batch format ======================== -Customize the format of data batches using the ``batch_format`` argument to :meth:`ds.map_batches() `. The following are examples in each available batch format. +Customize the format of data batches using the ``batch_format`` argument to :meth:`ds.map_batches() `. The following are examples in each available batch format. Transform functions do not have to return data in the same format as the input batch. For example, you could return a ``pd.DataFrame`` even if the input was in NumPy format. @@ -180,8 +180,8 @@ When using actors, you must also specify your transform as a callable class type .. literalinclude:: ./doc_code/transforming_data.py :language: python - :start-after: __datastream_compute_strategy_begin__ - :end-before: __datastream_compute_strategy_end__ + :start-after: __dataset_compute_strategy_begin__ + :end-before: __dataset_compute_strategy_end__ Reduce memory usage using generators ==================================== @@ -202,8 +202,8 @@ Shuffle transforms change the organization of the data, e.g., increasing the num Repartitioning data =================== -Call :meth:`Datastream.repartition() ` to change the -number of blocks of the datastream. This may be useful to break up your dataset into small +Call :meth:`Dataset.repartition() ` to change the +number of blocks of the dataset. This may be useful to break up your dataset into small pieces to enable more fine-grained parallelization, or to reduce the number of files produced as output of a write operation. @@ -215,14 +215,14 @@ produced as output of a write operation. Random shuffle ============== -Call :meth:`Datastream.random_shuffle() ` to +Call :meth:`Dataset.random_shuffle() ` to globally shuffle the order of data records. .. doctest:: >>> import ray - >>> datastream = ray.data.range(10) - >>> datastream.random_shuffle().take_batch() # doctest: +SKIP + >>> dataset = ray.data.range(10) + >>> dataset.random_shuffle().take_batch() # doctest: +SKIP {'id': array([7, 0, 9, 3, 5, 1, 4, 2, 8, 6])} For reduced overhead during training ingest, use local shuffles. Read @@ -234,7 +234,7 @@ For reduced overhead during training ingest, use local shuffles. Read Grouped transforms ------------------ -Ray Data supports grouping data by column and applying aggregations to each group. This is supported via the :meth:`ds.groupby() ` call. +Ray Data supports grouping data by column and applying aggregations to each group. This is supported via the :meth:`ds.groupby() ` call. Aggregations ============ diff --git a/doc/source/data/working-with-tensors.rst b/doc/source/data/working-with-tensors.rst index c30a62d592d8..1d136a297c1e 100644 --- a/doc/source/data/working-with-tensors.rst +++ b/doc/source/data/working-with-tensors.rst @@ -22,7 +22,7 @@ Ray Data represents tensors as .. testoutput:: :options: +ELLIPSIS - Datastream( + Dataset( num_blocks=..., num_rows=100, schema={image: numpy.ndarray(shape=(28, 28), dtype=uint8)} diff --git a/doc/source/ray-air/check-ingest.rst b/doc/source/ray-air/check-ingest.rst index 6984c2fad7f0..3c74e6aa1509 100644 --- a/doc/source/ray-air/check-ingest.rst +++ b/doc/source/ray-air/check-ingest.rst @@ -13,7 +13,7 @@ Overview .. _ingest_basics: The following figure illustrates a simple Ray AIR training job that (1) loads parquet data from S3, (2) applies a simple -:ref:`user-defined function ` to preprocess batches of data, and (3) runs an AIR Trainer with the given dataset and preprocessor. +:ref:`user-defined function ` to preprocess batches of data, and (3) runs an AIR Trainer with the given dataset and preprocessor. .. figure:: images/ingest.svg @@ -29,7 +29,7 @@ on the train dataset passed to the Trainer, followed by :py:meth:`prep.transform on remaining datasets. **Training**: Then, AIR passes the preprocessed dataset to Train workers (Ray actors) launched by the Trainer. Each worker calls :func:`~ray.air.session.get_dataset_shard` to get a handle to its assigned data shard. -This returns a :class:`~ray.data.DataIterator`, which can be used to loop over the data with :meth:`~ray.data.DataIterator.iter_batches`, :meth:`~ray.data.Datastream.iter_torch_batches`, or :meth:`~ray.data.Datastream.to_tf`. +This returns a :class:`~ray.data.DataIterator`, which can be used to loop over the data with :meth:`~ray.data.DataIterator.iter_batches`, :meth:`~ray.data.Dataset.iter_torch_batches`, or :meth:`~ray.data.Dataset.to_tf`. Each of these returns a batch iterator for one epoch (a full pass over the original dataset). Getting Started @@ -217,7 +217,7 @@ By default, only the `"train"` dataset is split. All the other Datasets are not However, you may want to split a large validation dataset example to also do data parallel validation. This example shows overriding the split config for the "valid" and "test" datasets. This means that -both the valid and test datasets here will be :py:meth:`.split() ` across the training workers. +both the valid and test datasets here will be :py:meth:`.split() ` across the training workers. .. literalinclude:: doc_code/air_ingest.py :language: python @@ -432,9 +432,9 @@ FAQ How do I pass in a :py:class:`~ray.data.dataset_pipeline.DatasetPipeline` to my ``Trainer``? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The Trainer interface only accepts a standard :py:class:`~ray.data.Datastream` and not a :py:class:`~ray.data.dataset_pipeline.DatasetPipeline`. +The Trainer interface only accepts a standard :py:class:`~ray.data.Dataset` and not a :py:class:`~ray.data.dataset_pipeline.DatasetPipeline`. Instead, you can configure the ingest via the ``dataset_config`` that is passed to your ``Trainer``. Internally, Ray AIR will -convert the provided :py:class:`~ray.data.Datastream` into a :py:class:`~ray.data.dataset_pipeline.DatasetPipeline` with the specified configurations. +convert the provided :py:class:`~ray.data.Dataset` into a :py:class:`~ray.data.dataset_pipeline.DatasetPipeline` with the specified configurations. See the :ref:`Enabling Streaming Ingest ` and :ref:`Shuffling Data ` sections for full examples. diff --git a/doc/source/ray-air/computer-vision.rst b/doc/source/ray-air/computer-vision.rst index 430a20de05f7..64542835e6d5 100644 --- a/doc/source/ray-air/computer-vision.rst +++ b/doc/source/ray-air/computer-vision.rst @@ -38,7 +38,7 @@ Reading image data :end-before: __read_images1_stop__ :dedent: - Then, apply a :ref:`user-defined function ` to + Then, apply a :ref:`user-defined function ` to encode the class names as integer targets. .. literalinclude:: ./doc_code/computer_vision.py @@ -52,7 +52,7 @@ Reading image data .. tab-item:: NumPy - To load NumPy arrays into a :class:`~ray.data.Datastream`, separately read the image and label arrays. + To load NumPy arrays into a :class:`~ray.data.Dataset`, separately read the image and label arrays. .. literalinclude:: ./doc_code/computer_vision.py :start-after: __read_numpy1_start__ @@ -91,14 +91,14 @@ Reading image data } } - To load examples stored in this format, read the TFRecords into a :class:`~ray.data.Datastream`. + To load examples stored in this format, read the TFRecords into a :class:`~ray.data.Dataset`. .. literalinclude:: ./doc_code/computer_vision.py :start-after: __read_tfrecords1_start__ :end-before: __read_tfrecords1_stop__ :dedent: - Then, apply a :ref:`user-defined function ` to + Then, apply a :ref:`user-defined function ` to decode the raw image bytes. .. literalinclude:: ./doc_code/computer_vision.py @@ -116,7 +116,7 @@ Reading image data :dedent: -For more information on creating datastreams, see :ref:`Loading Data `. +For more information on creating datasets, see :ref:`Loading Data `. Transforming images diff --git a/doc/source/ray-air/examples/analyze_tuning_results.ipynb b/doc/source/ray-air/examples/analyze_tuning_results.ipynb index 9eab291552f1..cd652f099621 100644 --- a/doc/source/ray-air/examples/analyze_tuning_results.ipynb +++ b/doc/source/ray-air/examples/analyze_tuning_results.ipynb @@ -82,7 +82,7 @@ "id": "a93b242c", "metadata": {}, "source": [ - "We'll define a utility function to create a Datastream from the Sklearn dataset. We expect the target column to be in the dataframe, so we'll add it to the dataframe manually." + "We'll define a utility function to create a Dataset from the Sklearn dataset. We expect the target column to be in the dataframe, so we'll add it to the dataframe manually." ] }, { @@ -100,7 +100,7 @@ } ], "source": [ - "def get_training_data() -> ray.data.Datastream:\n", + "def get_training_data() -> ray.data.Dataset:\n", " data_raw = fetch_covtype()\n", " df = pd.DataFrame(data_raw[\"data\"], columns=data_raw[\"feature_names\"])\n", " df[\"target\"] = data_raw[\"target\"]\n", @@ -128,7 +128,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Datastream(num_blocks=1, num_rows=581012, schema={Elevation: float64, Aspect: float64, Slope: float64, Horizontal_Distance_To_Hydrology: float64, Vertical_Distance_To_Hydrology: float64, Horizontal_Distance_To_Roadways: float64, Hillshade_9am: float64, Hillshade_Noon: float64, Hillshade_3pm: float64, Horizontal_Distance_To_Fire_Points: float64, Wilderness_Area_0: float64, Wilderness_Area_1: float64, Wilderness_Area_2: float64, Wilderness_Area_3: float64, Soil_Type_0: float64, Soil_Type_1: float64, Soil_Type_2: float64, Soil_Type_3: float64, Soil_Type_4: float64, Soil_Type_5: float64, Soil_Type_6: float64, Soil_Type_7: float64, Soil_Type_8: float64, Soil_Type_9: float64, Soil_Type_10: float64, Soil_Type_11: float64, Soil_Type_12: float64, Soil_Type_13: float64, Soil_Type_14: float64, Soil_Type_15: float64, Soil_Type_16: float64, Soil_Type_17: float64, Soil_Type_18: float64, Soil_Type_19: float64, Soil_Type_20: float64, Soil_Type_21: float64, Soil_Type_22: float64, Soil_Type_23: float64, Soil_Type_24: float64, Soil_Type_25: float64, Soil_Type_26: float64, Soil_Type_27: float64, Soil_Type_28: float64, Soil_Type_29: float64, Soil_Type_30: float64, Soil_Type_31: float64, Soil_Type_32: float64, Soil_Type_33: float64, Soil_Type_34: float64, Soil_Type_35: float64, Soil_Type_36: float64, Soil_Type_37: float64, Soil_Type_38: float64, Soil_Type_39: float64, target: int32})\n" + "Dataset(num_blocks=1, num_rows=581012, schema={Elevation: float64, Aspect: float64, Slope: float64, Horizontal_Distance_To_Hydrology: float64, Vertical_Distance_To_Hydrology: float64, Horizontal_Distance_To_Roadways: float64, Hillshade_9am: float64, Hillshade_Noon: float64, Hillshade_3pm: float64, Horizontal_Distance_To_Fire_Points: float64, Wilderness_Area_0: float64, Wilderness_Area_1: float64, Wilderness_Area_2: float64, Wilderness_Area_3: float64, Soil_Type_0: float64, Soil_Type_1: float64, Soil_Type_2: float64, Soil_Type_3: float64, Soil_Type_4: float64, Soil_Type_5: float64, Soil_Type_6: float64, Soil_Type_7: float64, Soil_Type_8: float64, Soil_Type_9: float64, Soil_Type_10: float64, Soil_Type_11: float64, Soil_Type_12: float64, Soil_Type_13: float64, Soil_Type_14: float64, Soil_Type_15: float64, Soil_Type_16: float64, Soil_Type_17: float64, Soil_Type_18: float64, Soil_Type_19: float64, Soil_Type_20: float64, Soil_Type_21: float64, Soil_Type_22: float64, Soil_Type_23: float64, Soil_Type_24: float64, Soil_Type_25: float64, Soil_Type_26: float64, Soil_Type_27: float64, Soil_Type_28: float64, Soil_Type_29: float64, Soil_Type_30: float64, Soil_Type_31: float64, Soil_Type_32: float64, Soil_Type_33: float64, Soil_Type_34: float64, Soil_Type_35: float64, Soil_Type_36: float64, Soil_Type_37: float64, Soil_Type_38: float64, Soil_Type_39: float64, target: int32})\n" ] } ], diff --git a/doc/source/ray-air/examples/convert_existing_pytorch_code_to_ray_air.ipynb b/doc/source/ray-air/examples/convert_existing_pytorch_code_to_ray_air.ipynb index 76db9ed3b508..4465734badef 100644 --- a/doc/source/ray-air/examples/convert_existing_pytorch_code_to_ray_air.ipynb +++ b/doc/source/ray-air/examples/convert_existing_pytorch_code_to_ray_air.ipynb @@ -1079,7 +1079,7 @@ "id": "ad556eeb", "metadata": {}, "source": [ - "Batch predictors work with Ray Data. Here we convert our test dataset into a Datastream - note that this is not very efficient, and you can look at our {ref}`other tutorials ` to see more efficient ways to generate a Datastream." + "Batch predictors work with Ray Data. Here we convert our test dataset into a Dataset - note that this is not very efficient, and you can look at our {ref}`other tutorials ` to see more efficient ways to generate a Dataset." ] }, { @@ -1125,7 +1125,7 @@ "id": "41094a55", "metadata": {}, "source": [ - "`results` is another Datastream. We can use `results.show()` to see our prediction results:" + "`results` is another Dataset. We can use `results.show()` to see our prediction results:" ] }, { diff --git a/doc/source/ray-air/examples/convert_existing_tf_code_to_ray_air.ipynb b/doc/source/ray-air/examples/convert_existing_tf_code_to_ray_air.ipynb index e3e6870ac9be..d0ba3609819d 100644 --- a/doc/source/ray-air/examples/convert_existing_tf_code_to_ray_air.ipynb +++ b/doc/source/ray-air/examples/convert_existing_tf_code_to_ray_air.ipynb @@ -705,7 +705,7 @@ "id": "fd72830b", "metadata": {}, "source": [ - "Batch predictors work with [Ray Data](data). Here, we create a {class}`Datastream ` of images from our test set." + "Batch predictors work with [Ray Data](data). Here, we create a {class}`Dataset ` of images from our test set." ] }, { @@ -723,7 +723,7 @@ "id": "6ab1b08a", "metadata": {}, "source": [ - "Let's run {meth}`BatchPredictor.predict ` on our Datastream. This will distribute the prediction across a specified number of workers!" + "Let's run {meth}`BatchPredictor.predict ` on our Dataset. This will distribute the prediction across a specified number of workers!" ] }, { @@ -741,7 +741,7 @@ "id": "9ccadf89", "metadata": {}, "source": [ - "`predict_results` is also a Datastream, and we can take a look at the predictions inside:" + "`predict_results` is also a Dataset, and we can take a look at the predictions inside:" ] }, { @@ -769,7 +769,7 @@ "...\n", "```\n", "\n", - "Our model outputs logits, but we want the actual predicted labels. We can convert the logits to labels by taking the `argmax` of each model output in `predict_results` using {meth}`map_batches `. Then, we can compute the accuracy by comparing to the test set labels!" + "Our model outputs logits, but we want the actual predicted labels. We can convert the logits to labels by taking the `argmax` of each model output in `predict_results` using {meth}`map_batches `. Then, we can compute the accuracy by comparing to the test set labels!" ] }, { diff --git a/doc/source/ray-air/examples/gptj_batch_prediction.ipynb b/doc/source/ray-air/examples/gptj_batch_prediction.ipynb index 3b40100f5d0a..148ff843ec55 100644 --- a/doc/source/ray-air/examples/gptj_batch_prediction.ipynb +++ b/doc/source/ray-air/examples/gptj_batch_prediction.ipynb @@ -95,7 +95,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Since we will be using a pretrained model from Hugging Face hub, the simplest way is to use {meth}`map_batches ` with a [callable class UDF](transforming_data_actors). This will allow us to save time by initializing a model just once and then feed it multiple batches of data." + "Since we will be using a pretrained model from Hugging Face hub, the simplest way is to use {meth}`map_batches ` with a [callable class UDF](transforming_data_actors). This will allow us to save time by initializing a model just once and then feed it multiple batches of data." ] }, { diff --git a/doc/source/ray-air/examples/huggingface_text_classification.ipynb b/doc/source/ray-air/examples/huggingface_text_classification.ipynb index 3f3e70612446..6823ea3d2279 100644 --- a/doc/source/ray-air/examples/huggingface_text_classification.ipynb +++ b/doc/source/ray-air/examples/huggingface_text_classification.ipynb @@ -444,9 +444,9 @@ { "data": { "text/plain": [ - "{'train': Datastream(num_blocks=1, num_rows=8551, schema={sentence: string, label: int64, idx: int32}),\n", - " 'validation': Datastream(num_blocks=1, num_rows=1043, schema={sentence: string, label: int64, idx: int32}),\n", - " 'test': Datastream(num_blocks=1, num_rows=1063, schema={sentence: string, label: int64, idx: int32})}" + "{'train': Dataset(num_blocks=1, num_rows=8551, schema={sentence: string, label: int64, idx: int32}),\n", + " 'validation': Dataset(num_blocks=1, num_rows=1043, schema={sentence: string, label: int64, idx: int32}),\n", + " 'test': Dataset(num_blocks=1, num_rows=1063, schema={sentence: string, label: int64, idx: int32})}" ] }, "execution_count": 11, diff --git a/doc/source/ray-air/examples/lightgbm_example.ipynb b/doc/source/ray-air/examples/lightgbm_example.ipynb index 13ef9da63285..420a1c895b58 100644 --- a/doc/source/ray-air/examples/lightgbm_example.ipynb +++ b/doc/source/ray-air/examples/lightgbm_example.ipynb @@ -51,7 +51,7 @@ "from ray.data.preprocessors.encoder import Categorizer\n", "from ray.train.lightgbm import LightGBMTrainer\n", "from ray.air.config import ScalingConfig\n", - "from ray.data import Datastream\n", + "from ray.data import Dataset\n", "from ray.air.result import Result\n", "from ray.data.preprocessors import StandardScaler" ] @@ -71,7 +71,7 @@ "metadata": {}, "outputs": [], "source": [ - "def prepare_data() -> Tuple[Datastream, Datastream, Datastream]:\n", + "def prepare_data() -> Tuple[Dataset, Dataset, Dataset]:\n", " dataset = ray.data.read_csv(\"s3://anonymous@air-example-data/breast_cancer_with_categorical.csv\")\n", " train_dataset, valid_dataset = dataset.train_test_split(test_size=0.3)\n", " test_dataset = valid_dataset.drop_columns(cols=[\"target\"])\n", @@ -210,8 +210,8 @@ "\u001b[2m\u001b[36m(pid=1491578)\u001b[0m _numeric_index_types = (pd.Int64Index, pd.Float64Index, pd.UInt64Index)\n", "\u001b[2m\u001b[36m(pid=1491578)\u001b[0m /home/ubuntu/ray/venv/lib/python3.8/site-packages/xgboost/compat.py:31: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", "\u001b[2m\u001b[36m(pid=1491578)\u001b[0m from pandas import MultiIndex, Int64Index\n", - "\u001b[2m\u001b[36m(LightGBMTrainer pid=1491578)\u001b[0m UserWarning: Datastream 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", - "\u001b[2m\u001b[36m(LightGBMTrainer pid=1491578)\u001b[0m UserWarning: Datastream 'valid' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", + "\u001b[2m\u001b[36m(LightGBMTrainer pid=1491578)\u001b[0m UserWarning: Dataset 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", + "\u001b[2m\u001b[36m(LightGBMTrainer pid=1491578)\u001b[0m UserWarning: Dataset 'valid' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", "\u001b[2m\u001b[36m(LightGBMTrainer pid=1491578)\u001b[0m UserWarning: cpus_per_actor is set to less than 2. Distributed LightGBM needs at least 2 CPUs per actor to train efficiently. This may lead to a degradation of performance during training.\n", "\u001b[2m\u001b[36m(pid=1491651)\u001b[0m /home/ubuntu/ray/venv/lib/python3.8/site-packages/xgboost/compat.py:31: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", "\u001b[2m\u001b[36m(pid=1491651)\u001b[0m from pandas import MultiIndex, Int64Index\n", diff --git a/doc/source/ray-air/examples/opt_deepspeed_batch_inference.ipynb b/doc/source/ray-air/examples/opt_deepspeed_batch_inference.ipynb index 465ff50bf2df..8c936c25f858 100644 --- a/doc/source/ray-air/examples/opt_deepspeed_batch_inference.ipynb +++ b/doc/source/ray-air/examples/opt_deepspeed_batch_inference.ipynb @@ -592,7 +592,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "2023-04-22 11:14:12,074\tWARNING datastream.py:4124 -- Deprecation warning: use Datastream.materialize() instead of fully_executed().\n", + "2023-04-22 11:14:12,074\tWARNING dataset.py:4124 -- Deprecation warning: use Dataset.materialize() instead of fully_executed().\n", "2023-04-22 11:14:12,079\tINFO streaming_executor.py:87 -- Executing DAG InputDataBuffer[Input] -> AllToAllOperator[Repartition] -> AllToAllOperator[RandomShuffle]\n", "2023-04-22 11:14:12,081\tINFO streaming_executor.py:88 -- Execution config: ExecutionOptions(resource_limits=ExecutionResources(cpu=None, gpu=None, object_store_memory=None), locality_with_output=False, preserve_order=False, actor_locality_enabled=True, verbose_progress=False)\n", "2023-04-22 11:14:12,082\tINFO streaming_executor.py:90 -- Tip: To enable per-operator progress reporting, set RAY_DATA_VERBOSE_PROGRESS=1.\n" diff --git a/doc/source/ray-air/examples/pytorch_resnet_batch_prediction.ipynb b/doc/source/ray-air/examples/pytorch_resnet_batch_prediction.ipynb index 92c3152024c5..69853dad41b4 100644 --- a/doc/source/ray-air/examples/pytorch_resnet_batch_prediction.ipynb +++ b/doc/source/ray-air/examples/pytorch_resnet_batch_prediction.ipynb @@ -304,7 +304,7 @@ "source": [ "## Build a BatchPredictor\n", "\n", - "Now that we have our dataset loaded and preprocessed with [Ray Data](data), we're ready to construct our {class}`BatchPredictor `! A {class}`BatchPredictor ` takes a checkpoint and a predictor class (e.g., {class}`~ray.train.torch.TorchPredictor`, {class}`~ray.train.tensorflow.TensorflowPredictor`) and provides an interface to run batch prediction on Ray {class}`~ray.data.Datastream`s. It will distribute the inference workload across multiple workers when calling `predict()` and run prediction on multiple shards of data in parallel. You can find more details in [Using Predictors for Inference](air-predictors).\n", + "Now that we have our dataset loaded and preprocessed with [Ray Data](data), we're ready to construct our {class}`BatchPredictor `! A {class}`BatchPredictor ` takes a checkpoint and a predictor class (e.g., {class}`~ray.train.torch.TorchPredictor`, {class}`~ray.train.tensorflow.TensorflowPredictor`) and provides an interface to run batch prediction on Ray {class}`~ray.data.Dataset`s. It will distribute the inference workload across multiple workers when calling `predict()` and run prediction on multiple shards of data in parallel. You can find more details in [Using Predictors for Inference](air-predictors).\n", "\n", "For the demo, we'll directly load a pretrained ResNet model from `torchvision.models` and construct a {class}`~ray.train.torch.TorchCheckpoint` which includes the preprocessor. You can also load your own Ray AIR checkpoint from your previous Train/Tune experiments. You can find more details about checkpoint loading at the [AIR `Checkpoint` API reference](air-checkpoint-ref)." ] @@ -391,7 +391,7 @@ "source": [ "## Evaluating Prediction Accuracy\n", "\n", - "`BatchPredictor.predict()` will return a Datastream with a column of model output with key `\"predictions\"`, and all columns specified in `keep_columns`.\n", + "`BatchPredictor.predict()` will return a Dataset with a column of model output with key `\"predictions\"`, and all columns specified in `keep_columns`.\n", "\n", "In this example, the output of the ResNet model is a 1000-dimensional tensor containing the logits of each class. We'll measure accuracy with Top-1 and Top-5 accuracy.\n", "(Top-N accuracy: The percentage of predictions where the true label falls in the top N predicted classes.)" diff --git a/doc/source/ray-air/examples/pytorch_tabular_starter.py b/doc/source/ray-air/examples/pytorch_tabular_starter.py index be01fba0d54d..72654ab7d593 100644 --- a/doc/source/ray-air/examples/pytorch_tabular_starter.py +++ b/doc/source/ray-air/examples/pytorch_tabular_starter.py @@ -54,7 +54,7 @@ def train_loop_per_worker(config): epochs = config["num_epochs"] num_features = config["num_features"] - # Get the Datastream shard for this data parallel worker, + # Get the Dataset shard for this data parallel worker, # and convert it to a PyTorch Dataset. train_data = session.get_dataset_shard("train") # Create model. diff --git a/doc/source/ray-air/examples/sklearn_example.ipynb b/doc/source/ray-air/examples/sklearn_example.ipynb index 1b64d0c6f13d..7d47f4f1390c 100644 --- a/doc/source/ray-air/examples/sklearn_example.ipynb +++ b/doc/source/ray-air/examples/sklearn_example.ipynb @@ -50,7 +50,7 @@ "\n", "\n", "import ray\n", - "from ray.data import Datastream\n", + "from ray.data import Dataset\n", "from ray.train.batch_predictor import BatchPredictor\n", "from ray.train.sklearn import SklearnPredictor\n", "from ray.data.preprocessors import Chain, OrdinalEncoder, StandardScaler\n", @@ -81,7 +81,7 @@ "metadata": {}, "outputs": [], "source": [ - "def prepare_data() -> Tuple[Datastream, Datastream, Datastream]:\n", + "def prepare_data() -> Tuple[Dataset, Dataset, Dataset]:\n", " dataset = ray.data.read_csv(\"s3://anonymous@air-example-data/breast_cancer_with_categorical.csv\")\n", " train_dataset, valid_dataset = dataset.train_test_split(test_size=0.3)\n", " test_dataset = valid_dataset.drop_columns([\"target\"])\n", diff --git a/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb b/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb index d86def70d50e..90735426b2a7 100644 --- a/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb +++ b/doc/source/ray-air/examples/stablediffusion_batch_prediction.ipynb @@ -89,7 +89,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Since we will be using a pretrained model from Hugging Face hub, the simplest way is to use {meth}`map_batches ` with a [callable class UDF](transforming_data_actors). This will allow us to save time by initializing a model just once and then feed it multiple batches of data." + "Since we will be using a pretrained model from Hugging Face hub, the simplest way is to use {meth}`map_batches ` with a [callable class UDF](transforming_data_actors). This will allow us to save time by initializing a model just once and then feed it multiple batches of data." ] }, { diff --git a/doc/source/ray-air/examples/tf_tabular_starter.py b/doc/source/ray-air/examples/tf_tabular_starter.py index 5c7315d2c784..56e66f2f60b3 100644 --- a/doc/source/ray-air/examples/tf_tabular_starter.py +++ b/doc/source/ray-air/examples/tf_tabular_starter.py @@ -55,7 +55,7 @@ def train_loop_per_worker(config): epochs = config["num_epochs"] num_features = config["num_features"] - # Get the Datastream shard for this data parallel worker, + # Get the Dataset shard for this data parallel worker, # and convert it to a Tensorflow Dataset. train_data = session.get_dataset_shard("train") diff --git a/doc/source/ray-air/examples/tfx_tabular_train_to_serve.ipynb b/doc/source/ray-air/examples/tfx_tabular_train_to_serve.ipynb index 9acf35498ab8..fc03148db989 100644 --- a/doc/source/ray-air/examples/tfx_tabular_train_to_serve.ipynb +++ b/doc/source/ray-air/examples/tfx_tabular_train_to_serve.ipynb @@ -14,7 +14,7 @@ "In this example, we showcase how to achieve the same tasks as the Keras Tutorial using [Ray AIR](https://docs.ray.io/en/latest/ray-air/getting-started.html), covering\n", "every step from data ingestion to pushing a model to serving.\n", "\n", - "1. Read a CSV into [Datastream](datastream_concept).\n", + "1. Read a CSV into [Dataset](dataset_concept).\n", "2. Process the dataset by chaining [Ray AIR preprocessors](https://docs.ray.io/en/latest/ray-air/getting-started.html#preprocessors).\n", "3. Train the model using the TensorflowTrainer from AIR.\n", "4. Serve the model using Ray Serve and the above preprocessors." @@ -439,13 +439,13 @@ "from typing import Tuple\n", "\n", "\n", - "def split_data(data: pd.DataFrame) -> Tuple[ray.data.Datastream, pd.DataFrame, np.array]:\n", + "def split_data(data: pd.DataFrame) -> Tuple[ray.data.Dataset, pd.DataFrame, np.array]:\n", " \"\"\"Split the data in a stratified way.\n", "\n", " Returns:\n", " A tuple containing train dataset, test data and test label.\n", " \"\"\"\n", - " # There is a native offering in Datastream for split as well.\n", + " # There is a native offering in Dataset for split as well.\n", " # However, supporting stratification is a TODO there. So use\n", " # scikit-learn equivalent here.\n", " train_data, test_data = train_test_split(\n", diff --git a/doc/source/ray-air/examples/torch_detection.ipynb b/doc/source/ray-air/examples/torch_detection.ipynb index 0b95b2a483cb..1daaa8631657 100644 --- a/doc/source/ray-air/examples/torch_detection.ipynb +++ b/doc/source/ray-air/examples/torch_detection.ipynb @@ -10,7 +10,7 @@ "This tutorial explains how to fine-tune `fasterrcnn_resnet50_fpn` using the [Ray AI Runtime](air) for parallel data ingest and training.\n", "\n", "Here's what you'll do:\n", - "1. Load raw images and [VOC-style](http://host.robots.ox.ac.uk/pascal/VOC/) annotations into a Datastream\n", + "1. Load raw images and [VOC-style](http://host.robots.ox.ac.uk/pascal/VOC/) annotations into a Dataset\n", "2. Fine-tune `fasterrcnn_resnet50_fpn` (the backbone is pre-trained on ImageNet)\n", "3. Evaluate the model's accuracy\n", "\n", @@ -71,7 +71,7 @@ "id": "65bf13b8", "metadata": {}, "source": [ - "## Create a `Datastream`\n", + "## Create a `Dataset`\n", "\n", "You'll work with a subset of [Pascal VOC](http://host.robots.ox.ac.uk/pascal/VOC/) that contains cats and dogs (the full dataset has 20 classes)." ] @@ -326,7 +326,7 @@ "import ray\n", "\n", "\n", - "annotations: ray.data.Datastream = ray.data.read_datasource(\n", + "annotations: ray.data.Dataset = ray.data.read_datasource(\n", " VOCAnnotationDatasource(), paths=\"s3://anonymous@air-example-data/AnimalDetection/Annotations\"\n", ")" ] diff --git a/doc/source/ray-air/examples/torch_image_example.ipynb b/doc/source/ray-air/examples/torch_image_example.ipynb index d750bf14ad30..5718e54a1ca7 100644 --- a/doc/source/ray-air/examples/torch_image_example.ipynb +++ b/doc/source/ray-air/examples/torch_image_example.ipynb @@ -53,7 +53,7 @@ "\n", "We'll train our classifier on a popular image dataset called [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html).\n", "\n", - "First, let's load CIFAR-10 into a Datastream." + "First, let's load CIFAR-10 into a Dataset." ] }, { @@ -100,8 +100,8 @@ "train_dataset = torchvision.datasets.CIFAR10(\"data\", download=True, train=True)\n", "test_dataset = torchvision.datasets.CIFAR10(\"data\", download=True, train=False)\n", "\n", - "train_dataset: ray.data.Datastream = ray.data.from_torch(train_dataset)\n", - "test_dataset: ray.data.Datastream = ray.data.from_torch(test_dataset)" + "train_dataset: ray.data.Dataset = ray.data.from_torch(train_dataset)\n", + "test_dataset: ray.data.Dataset = ray.data.from_torch(test_dataset)" ] }, { @@ -118,7 +118,7 @@ "version_minor": 0 }, "text/plain": [ - "VBox(children=(HTML(value='

    Datastream

    '), Tab(children=(HTML(value='
    Dataset'), Tab(children=(HTML(value='
    ` doesn't parallelize reads, so you shouldn't use it with larger datasets.\n", "\n", - "Next, let's represent our data using a dictionary of ndarrays instead of tuples. This lets us call {py:meth}`Datastream.iter_torch_batches ` later in the tutorial." + "Next, let's represent our data using a dictionary of ndarrays instead of tuples. This lets us call {py:meth}`Dataset.iter_torch_batches ` later in the tutorial." ] }, { @@ -213,7 +213,7 @@ "version_minor": 0 }, "text/plain": [ - "VBox(children=(HTML(value='

    Datastream

    '), Tab(children=(HTML(value='
    Dataset'), Tab(children=(HTML(value='
    `.\n", - "* We call {py:func}`session.get_dataset_shard ` and {py:meth}`Datastream.iter_torch_batches ` to get a subset of our training data.\n", + "* We call {py:func}`session.get_dataset_shard ` and {py:meth}`Dataset.iter_torch_batches ` to get a subset of our training data.\n", "* We save model state using {py:func}`session.report `." ] }, @@ -559,7 +559,7 @@ " model=Net(),\n", ")\n", "\n", - "outputs: ray.data.Datastream = batch_predictor.predict(\n", + "outputs: ray.data.Dataset = batch_predictor.predict(\n", " data=test_dataset,\n", " dtype=torch.float,\n", " feature_columns=[\"image\"],\n", diff --git a/doc/source/ray-air/examples/torch_incremental_learning.ipynb b/doc/source/ray-air/examples/torch_incremental_learning.ipynb index 7e51e56fe6d3..0f05e07cde6d 100644 --- a/doc/source/ray-air/examples/torch_incremental_learning.ipynb +++ b/doc/source/ray-air/examples/torch_incremental_learning.ipynb @@ -47,7 +47,7 @@ "source": [ "This example will cover the following:\n", "1. Loading a PyTorch Dataset to Ray Data\n", - "2. Create an `Iterator[ray.data.Datastream]` abstraction to represent a stream of data to train on for incremental training.\n", + "2. Create an `Iterator[ray.data.Dataset]` abstraction to represent a stream of data to train on for incremental training.\n", "3. Implement a custom Ray AIR preprocessor to preprocess the dataset.\n", "4. Incrementally train a model using data parallel training.\n", "5. Use our trained model to perform batch prediction on test data.\n", @@ -242,19 +242,19 @@ "id": "3SVSrkqrDJuc" }, "source": [ - "## 3a: Load MNIST Dataset to a Datastream\n", + "## 3a: Load MNIST Dataset to a Dataset\n", "\n", - "Let's first define a simple function that will return the original MNIST Dataset as a distributed Datastream. Ray Data is the standard way to load and exchange data in Ray libraries and applications, read more about the library [here](data)!\n", + "Let's first define a simple function that will return the original MNIST Dataset as a distributed Dataset. Ray Data is the standard way to load and exchange data in Ray libraries and applications, read more about the library [here](data)!\n", "\n", "The function in the below code snippet does the following:\n", "1. Downloads the MNIST Dataset from torchvision in-memory\n", - "2. Loads the in-memory Torch Dataset into a Datastream\n", - "3. Converts the Datastream into Numpy format. Instead of the Datastream iterating over tuples, it will have 2 columns: \"image\" & \"label\". \n", - "This will allow us to apply built-in preprocessors to the Datastream and allow Datastreams to be used with Ray AIR Predictors.\n", + "2. Loads the in-memory Torch Dataset into a Dataset\n", + "3. Converts the Dataset into Numpy format. Instead of the Dataset iterating over tuples, it will have 2 columns: \"image\" & \"label\". \n", + "This will allow us to apply built-in preprocessors to the Dataset and allow Datasets to be used with Ray AIR Predictors.\n", "\n", "For this example, since we are just working with MNIST dataset, which is small, we use the {py:class}`~ray.data.datasource.from_torch` which just loads the full MNIST dataset into memory.\n", "\n", - "For loading larger datasets in a parallel fashion, you should use [Datastream's additional read APIs](input-output) to load data from parquet, csv, image files, and more!" + "For loading larger datasets in a parallel fashion, you should use [Dataset's additional read APIs](input-output) to load data from parquet, csv, image files, and more!" ] }, { @@ -273,8 +273,8 @@ "import ray\n", "\n", "\n", - "def get_mnist_dataset(train: bool = True) -> ray.data.Datastream:\n", - " \"\"\"Returns MNIST Dataset as a ray.data.Datastream.\n", + "def get_mnist_dataset(train: bool = True) -> ray.data.Dataset:\n", + " \"\"\"Returns MNIST Dataset as a ray.data.Dataset.\n", " \n", " Args:\n", " train: Whether to return the train dataset or test dataset.\n", @@ -311,7 +311,7 @@ "over Ray Data. Each item in this iterator contains a unique permutation of\n", "MNIST, and is one task that we want to train on.\n", "\n", - "In this example, \"the stream of tasks\" is contrived since all the data for all tasks exist already in an offline setting. For true online continual learning, you would want to implement a custom dataset iterator that reads from some stream datasource to produce new tasks. The only abstraction that's needed is `Iterator[ray.data.Datastream]`.\n", + "In this example, \"the stream of tasks\" is contrived since all the data for all tasks exist already in an offline setting. For true online continual learning, you would want to implement a custom dataset iterator that reads from some stream datasource to produce new tasks. The only abstraction that's needed is `Iterator[ray.data.Dataset]`.\n", "\n", "Note that the test dataset stream has the same permutations that are used for the training dataset stream. In general for continual learning, it is expected that the data distribution of the test/prediction data follows what the model was trained on. If you notice that the distribution of new prediction queries is changing compared to the distribution of the training data, then you should probably trigger training of a new task." ] @@ -357,7 +357,7 @@ " self.test_mnist_dataset = get_mnist_dataset(train=False)\n", "\n", " def random_permute_dataset(\n", - " self, dataset: ray.data.Datastream, permutation: np.ndarray\n", + " self, dataset: ray.data.Dataset, permutation: np.ndarray\n", " ):\n", " \"\"\"Randomly permutes the pixels for each image in the dataset.\"\"\"\n", "\n", @@ -368,14 +368,14 @@ "\n", " return dataset.map_batches(PixelsPermutation, compute=ActorPoolStrategy(), batch_format=\"pandas\")\n", "\n", - " def generate_train_stream(self) -> Iterator[ray.data.Datastream]:\n", + " def generate_train_stream(self) -> Iterator[ray.data.Dataset]:\n", " for permutation in self.permutations:\n", " permuted_mnist_dataset = self.random_permute_dataset(\n", " self.train_mnist_dataset, permutation\n", " )\n", " yield permuted_mnist_dataset\n", "\n", - " def generate_test_stream(self) -> Iterator[ray.data.Datastream]:\n", + " def generate_test_stream(self) -> Iterator[ray.data.Dataset]:\n", " for permutation in self.permutations:\n", " mnist_dataset = get_mnist_dataset(train=False)\n", " permuted_mnist_dataset = self.random_permute_dataset(\n", @@ -421,7 +421,7 @@ "\n", "This is just standard PyTorch training, with the difference being that we can leverage [Ray Train's utility functions](train-pytorch-integration) and [Ray AIR Sesssion](air-session-ref):\n", "- `ray.train.torch.prepare_model(...)`: This will prepare the model for distributed training by wrapping it in either PyTorch `DistributedDataParallel` or `FullyShardedDataParallel` and moving it to the correct accelerator device.\n", - "- `ray.air.session.get_dataset_shard(...)`: This will get the Datastream shard for this particular Data Parallel worker.\n", + "- `ray.air.session.get_dataset_shard(...)`: This will get the Dataset shard for this particular Data Parallel worker.\n", "- `ray.air.session.report({}, checkpoint=...)`: This will tell Ray Train to persist the provided `Checkpoint` object.\n", "- `ray.air.session.get_checkpoint()`: Returns a checkpoint to resume from. This is useful for either fault tolerance purposes, or for our purposes, to continue training the same model on a new incoming dataset." ] @@ -459,7 +459,7 @@ " optimizer = SGD(model.parameters(), lr=learning_rate, momentum=momentum)\n", " criterion = CrossEntropyLoss()\n", "\n", - " # Get the Datastream shard for this data parallel worker, and convert it to a PyTorch Dataset.\n", + " # Get the Dataset shard for this data parallel worker, and convert it to a PyTorch Dataset.\n", " dataset_shard = session.get_dataset_shard(\"train\").iter_torch_batches(\n", " batch_size=batch_size,\n", " )\n", @@ -550,7 +550,7 @@ "from ray.train.batch_predictor import BatchPredictor\n", "from ray.train.torch import TorchPredictor\n", "\n", - "def batch_predict(checkpoint: ray.air.Checkpoint, test_dataset: ray.data.Datastream) -> float:\n", + "def batch_predict(checkpoint: ray.air.Checkpoint, test_dataset: ray.data.Dataset) -> float:\n", " \"\"\"Perform batch prediction on the provided test dataset, and return accuracy results.\"\"\"\n", "\n", " batch_predictor = BatchPredictor.from_checkpoint(checkpoint, predictor_cls=TorchPredictor, model=SimpleMLP(num_classes=10))\n", diff --git a/doc/source/ray-air/examples/upload_to_comet_ml.ipynb b/doc/source/ray-air/examples/upload_to_comet_ml.ipynb index b2d2454abefd..192385b5d862 100644 --- a/doc/source/ray-air/examples/upload_to_comet_ml.ipynb +++ b/doc/source/ray-air/examples/upload_to_comet_ml.ipynb @@ -57,7 +57,7 @@ "id": "29fcd93b", "metadata": {}, "source": [ - "We define a simple function that returns our training dataset as a Datastream:" + "We define a simple function that returns our training dataset as a Dataset:" ] }, { @@ -67,7 +67,7 @@ "metadata": {}, "outputs": [], "source": [ - "def get_train_dataset() -> ray.data.Datastream:\n", + "def get_train_dataset() -> ray.data.Dataset:\n", " dataset = ray.data.read_csv(\"s3://anonymous@air-example-data/breast_cancer.csv\")\n", " return dataset" ] @@ -96,7 +96,7 @@ "metadata": {}, "outputs": [], "source": [ - "def train_model(train_dataset: ray.data.Datastream, comet_project: str) -> Result:\n", + "def train_model(train_dataset: ray.data.Dataset, comet_project: str) -> Result:\n", " \"\"\"Train a simple XGBoost model and return the result.\"\"\"\n", " trainer = XGBoostTrainer(\n", " scaling_config=ScalingConfig(num_workers=2),\n", @@ -171,7 +171,7 @@ "COMET WARNING: Failed to add tag(s) None to the experiment\n", "\n", "COMET WARNING: Empty mapping given to log_params({}); ignoring\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=19852)\u001b[0m UserWarning: Datastream 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", + "\u001b[2m\u001b[36m(GBDTTrainable pid=19852)\u001b[0m UserWarning: Dataset 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", "\u001b[2m\u001b[33m(raylet)\u001b[0m 2022-05-19 15:19:24,628\tINFO context.py:70 -- Exec'ing worker with command: exec /Users/kai/.pyenv/versions/3.7.7/bin/python3.7 /Users/kai/coding/ray/python/ray/workers/default_worker.py --node-ip-address=127.0.0.1 --node-manager-port=61222 --object-store-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/plasma_store --raylet-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/raylet --redis-address=None --storage=None --temp-dir=/tmp/ray --metrics-agent-port=62873 --logging-rotate-bytes=536870912 --logging-rotate-backup-count=5 --gcs-address=127.0.0.1:61938 --redis-password=5241590000000000 --startup-token=17 --runtime-env-hash=-2010331069\n", "\u001b[2m\u001b[36m(GBDTTrainable pid=19852)\u001b[0m 2022-05-19 15:19:25,961\tINFO main.py:980 -- [RayXGBoost] Created 2 new actors (2 total actors). Waiting until actors are ready for training.\n", "\u001b[2m\u001b[33m(raylet)\u001b[0m 2022-05-19 15:19:26,830\tINFO context.py:70 -- Exec'ing worker with command: exec /Users/kai/.pyenv/versions/3.7.7/bin/python3.7 /Users/kai/coding/ray/python/ray/workers/default_worker.py --node-ip-address=127.0.0.1 --node-manager-port=61222 --object-store-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/plasma_store --raylet-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/raylet --redis-address=None --storage=None --temp-dir=/tmp/ray --metrics-agent-port=62873 --logging-rotate-bytes=536870912 --logging-rotate-backup-count=5 --gcs-address=127.0.0.1:61938 --redis-password=5241590000000000 --startup-token=18 --runtime-env-hash=-2010331069\n", diff --git a/doc/source/ray-air/examples/upload_to_wandb.ipynb b/doc/source/ray-air/examples/upload_to_wandb.ipynb index 7b62ff1168eb..fb679dc4636b 100644 --- a/doc/source/ray-air/examples/upload_to_wandb.ipynb +++ b/doc/source/ray-air/examples/upload_to_wandb.ipynb @@ -63,7 +63,7 @@ "id": "2efa1564", "metadata": {}, "source": [ - "We define a simple function that returns our training dataset as a Datastream:\n" + "We define a simple function that returns our training dataset as a Dataset:\n" ] }, { @@ -73,7 +73,7 @@ "metadata": {}, "outputs": [], "source": [ - "def get_train_dataset() -> ray.data.Datastream:\n", + "def get_train_dataset() -> ray.data.Dataset:\n", " dataset = ray.data.read_csv(\"s3://anonymous@air-example-data/breast_cancer.csv\")\n", " return dataset" ] @@ -119,7 +119,7 @@ "from ray.train.xgboost import XGBoostTrainer\n", "\n", "\n", - "def train_model_xgboost(train_dataset: ray.data.Datastream, wandb_project: str) -> Result:\n", + "def train_model_xgboost(train_dataset: ray.data.Dataset, wandb_project: str) -> Result:\n", " \"\"\"Train a simple XGBoost model and return the result.\"\"\"\n", " trainer = XGBoostTrainer(\n", " scaling_config=ScalingConfig(num_workers=2),\n", @@ -285,7 +285,7 @@ "from ray.train.torch import TorchTrainer\n", "\n", "\n", - "def train_model_torch(train_dataset: ray.data.Datastream, wandb_project: str) -> Result:\n", + "def train_model_torch(train_dataset: ray.data.Dataset, wandb_project: str) -> Result:\n", " \"\"\"Train a simple XGBoost model and return the result.\"\"\"\n", " trainer = TorchTrainer(\n", " train_loop_per_worker=train_loop,\n", diff --git a/doc/source/ray-air/examples/xgboost_example.ipynb b/doc/source/ray-air/examples/xgboost_example.ipynb index 0705bebddd1e..671c309bbec9 100644 --- a/doc/source/ray-air/examples/xgboost_example.ipynb +++ b/doc/source/ray-air/examples/xgboost_example.ipynb @@ -77,7 +77,7 @@ "from ray.train.xgboost import XGBoostPredictor\n", "from ray.train.xgboost import XGBoostTrainer\n", "from ray.air.config import ScalingConfig\n", - "from ray.data import Datastream\n", + "from ray.data import Dataset\n", "from ray.air.result import Result\n", "from ray.data.preprocessors import StandardScaler" ] @@ -101,7 +101,7 @@ "metadata": {}, "outputs": [], "source": [ - "def prepare_data() -> Tuple[Datastream, Datastream, Datastream]:\n", + "def prepare_data() -> Tuple[Dataset, Dataset, Dataset]:\n", " dataset = ray.data.read_csv(\"s3://anonymous@air-example-data/breast_cancer.csv\")\n", " train_dataset, valid_dataset = dataset.train_test_split(test_size=0.3)\n", " test_dataset = valid_dataset.drop_columns([\"target\"])\n", @@ -252,8 +252,8 @@ "\u001b[2m\u001b[36m(pid=1493910)\u001b[0m FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", "\u001b[2m\u001b[36m(pid=1493910)\u001b[0m FutureWarning: pandas.Float64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", "\u001b[2m\u001b[36m(pid=1493910)\u001b[0m FutureWarning: pandas.UInt64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", - "\u001b[2m\u001b[36m(XGBoostTrainer pid=1493910)\u001b[0m UserWarning: Datastream 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", - "\u001b[2m\u001b[36m(XGBoostTrainer pid=1493910)\u001b[0m UserWarning: Datastream 'valid' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", + "\u001b[2m\u001b[36m(XGBoostTrainer pid=1493910)\u001b[0m UserWarning: Dataset 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", + "\u001b[2m\u001b[36m(XGBoostTrainer pid=1493910)\u001b[0m UserWarning: Dataset 'valid' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", "\u001b[2m\u001b[36m(XGBoostTrainer pid=1493910)\u001b[0m 2022-06-22 17:29:04,073\tINFO main.py:980 -- [RayXGBoost] Created 2 new actors (2 total actors). Waiting until actors are ready for training.\n", "\u001b[2m\u001b[36m(pid=1494007)\u001b[0m /home/ubuntu/ray/venv/lib/python3.8/site-packages/xgboost/compat.py:31: FutureWarning: pandas.Int64Index is deprecated and will be removed from pandas in a future version. Use pandas.Index with the appropriate dtype instead.\n", "\u001b[2m\u001b[36m(pid=1494007)\u001b[0m from pandas import MultiIndex, Int64Index\n", diff --git a/doc/source/ray-air/key-concepts.rst b/doc/source/ray-air/key-concepts.rst index 5f5165342901..ea18ef28423b 100644 --- a/doc/source/ray-air/key-concepts.rst +++ b/doc/source/ray-air/key-concepts.rst @@ -9,16 +9,16 @@ Here, we cover the main concepts in AIR. :local: -Datastreams +Datasets ----------- -:ref:`Ray Data ` is the standard way to load and exchange data in Ray AIR. It provides a `Datastream ` concept which is used extensively for data loading, preprocessing, and batch inference. +:ref:`Ray Data ` is the standard way to load and exchange data in Ray AIR. It provides a `Dataset ` concept which is used extensively for data loading, preprocessing, and batch inference. Preprocessors ------------- -Preprocessors are primitives that can be used to transform input data into features. Preprocessors operate on :ref:`Datastreams `, which makes them scalable and compatible with a variety of datasources and dataframe libraries. +Preprocessors are primitives that can be used to transform input data into features. Preprocessors operate on :ref:`Datasets `, which makes them scalable and compatible with a variety of datasources and dataframe libraries. A Preprocessor is fitted during Training, and applied at runtime in both Training and Serving on data batches in the same way. AIR comes with a collection of built-in preprocessors, and you can also define your own with simple templates. diff --git a/doc/source/ray-air/predictors.rst b/doc/source/ray-air/predictors.rst index 16c1545d45d8..0656bcd4428a 100644 --- a/doc/source/ray-air/predictors.rst +++ b/doc/source/ray-air/predictors.rst @@ -407,7 +407,7 @@ Implement `_predict_numpy` or `_predict_pandas` batch of NumPy data. It accepts a ``np.ndarray`` or ``dict[str, np.ndarray]`` as input and returns a ``np.ndarray`` or ``dict[str, np.ndarray]`` as output. - The input type is determined by the type of :class:`~ray.data.Datastream` passed to + The input type is determined by the type of :class:`~ray.data.Dataset` passed to :meth:`BatchPredictor.predict `. If your dataset has columns, the input is a ``dict``; otherwise, the input is a ``np.ndarray``. @@ -448,7 +448,7 @@ Perform inference You can also use any of the out-of-the-box preprocessors instead of implementing your own: :ref:`air-preprocessor-ref`. 2. Create a :class:`~ray.train.batch_predictor.BatchPredictor` from your checkpoint. - 3. Read sample images into a :class:`~ray.data.Datastream`. + 3. Read sample images into a :class:`~ray.data.Dataset`. 4. Call :class:`~ray.train.batch_predictor.BatchPredictor.predict` to classify the images in the dataset. @@ -464,7 +464,7 @@ Perform inference 1. Create a :class:`~ray.train.batch_predictor.BatchPredictor` from your checkpoint. - 2. Read the Guerry dataset into a :class:`~ray.data.Datastream`. + 2. Read the Guerry dataset into a :class:`~ray.data.Dataset`. 3. Call :class:`~ray.train.batch_predictor.BatchPredictor.predict` to perform regression on the samples in the dataset. diff --git a/doc/source/ray-air/preprocessors.rst b/doc/source/ray-air/preprocessors.rst index bc9bcad8b37c..a589d2dbdac2 100644 --- a/doc/source/ray-air/preprocessors.rst +++ b/doc/source/ray-air/preprocessors.rst @@ -25,7 +25,7 @@ For example, the following code trains a model with a preprocessor that normaliz The ``Preprocessor`` class with four public methods that can we used separately from a trainer: -#. ``fit()``: Compute state information about a :class:`Dataset ` (e.g., the mean or standard deviation of a column) +#. ``fit()``: Compute state information about a :class:`Dataset ` (e.g., the mean or standard deviation of a column) and save it to the ``Preprocessor``. This information is used to perform ``transform()``, and the method is typically called on a training dataset. #. ``transform()``: Apply a transformation to a ``Dataset``. @@ -183,7 +183,7 @@ Ray AIR provides a handful of preprocessors out of the box. .. autosummary:: :nosignatures: - ray.data.Datastream.train_test_split + ray.data.Dataset.train_test_split Which preprocessor should you use? ---------------------------------- diff --git a/doc/source/ray-air/trainers.rst b/doc/source/ray-air/trainers.rst index d7a35637e20f..3eaa89c212b3 100644 --- a/doc/source/ray-air/trainers.rst +++ b/doc/source/ray-air/trainers.rst @@ -60,7 +60,7 @@ You can provide multiple datasets to a trainer via the ``datasets`` parameter. If ``datasets`` includes a training dataset (denoted by the "train" key), then it will be split into multiple dataset shards, with each worker training on a single shard. All other datasets will not be split. You can access the data shard within a worker via :func:`~ray.air.session.get_dataset_shard()`, and use -:meth:`~ray.data.Datastream.to_tf` or `iter_torch_batches` to generate batches of Tensorflow or Pytorch tensors. +:meth:`~ray.data.Dataset.to_tf` or `iter_torch_batches` to generate batches of Tensorflow or Pytorch tensors. You can read more about :ref:`data ingest ` here. Read more about :ref:`Ray Train's Deep Learning Trainers `. diff --git a/doc/source/ray-contribute/writing-code-snippets.rst b/doc/source/ray-contribute/writing-code-snippets.rst index 0e7a7f5570c7..9c91dc6a913a 100644 --- a/doc/source/ray-contribute/writing-code-snippets.rst +++ b/doc/source/ray-contribute/writing-code-snippets.rst @@ -227,7 +227,7 @@ the `doctest` directive and replace problematic sections with ellipsis. :: >>> import ray >>> ray.data.read_images("s3://anonymous@air-example-data-2/imagenet-sample-images") - Datastream( + Dataset( num_blocks=..., num_rows=..., schema={image: numpy.ndarray(shape=..., dtype=uint8)} @@ -237,7 +237,7 @@ If you omit the `doctest` directive, append `# doctest: +ELLIPSIS` to your code >>> import ray >>> ray.data.read_images("s3://anonymous@air-example-data-2/imagenet-sample-images") # doctest: +ELLIPSIS - Datastream( + Dataset( num_blocks=..., num_rows=..., schema={image: numpy.ndarray(shape=..., dtype=uint8)} @@ -260,7 +260,7 @@ the `testoutput` directive and replace problematic sections with ellipsis. :: .. testoutput:: :options: +ELLIPSIS - Datastream( + Dataset( num_blocks=..., num_rows=..., schema={image: numpy.ndarray(shape=..., dtype=uint8)} diff --git a/doc/source/ray-core/_examples/datasets_train/datasets_train.py b/doc/source/ray-core/_examples/datasets_train/datasets_train.py index 7ced414ae374..eabad34c03ed 100644 --- a/doc/source/ray-core/_examples/datasets_train/datasets_train.py +++ b/doc/source/ray-core/_examples/datasets_train/datasets_train.py @@ -120,7 +120,7 @@ def create_data_chunk(n, d, seed, include_label=False): # os.system("aws s3 sync ./inference s3://cuj-big-data/inference") -def read_dataset(path: str) -> ray.data.Datastream: +def read_dataset(path: str) -> ray.data.Dataset: print(f"reading data from {path}") return ray.data.read_parquet(path).random_shuffle() @@ -139,18 +139,18 @@ def __init__(self): self.standard_stats = None def preprocess_train_data( - self, ds: ray.data.Datastream - ) -> Tuple[ray.data.Datastream, ray.data.Datastream]: + self, ds: ray.data.Dataset + ) -> Tuple[ray.data.Dataset, ray.data.Dataset]: print("\n\nPreprocessing training dataset.\n") return self._preprocess(ds, False) - def preprocess_inference_data(self, df: ray.data.Datastream) -> ray.data.Datastream: + def preprocess_inference_data(self, df: ray.data.Dataset) -> ray.data.Dataset: print("\n\nPreprocessing inference dataset.\n") return self._preprocess(df, True)[0] def _preprocess( - self, ds: ray.data.Datastream, inferencing: bool - ) -> Tuple[ray.data.Datastream, ray.data.Datastream]: + self, ds: ray.data.Dataset, inferencing: bool + ) -> Tuple[ray.data.Dataset, ray.data.Dataset]: print("\nStep 1: Dropping nulls, creating new_col, updating feature_1\n") def batch_transformer(df: pd.DataFrame): diff --git a/doc/source/ray-overview/getting-started.md b/doc/source/ray-overview/getting-started.md index 9f6d879b977d..77366e7c7688 100644 --- a/doc/source/ray-overview/getting-started.md +++ b/doc/source/ray-overview/getting-started.md @@ -65,7 +65,7 @@ pip install "ray[air]" `````{dropdown} Efficiently process your data into features. -Load data into a ``Datastream``. +Load data into a ``Dataset``. ```{literalinclude} ../ray-air/examples/xgboost_starter.py :language: python @@ -158,8 +158,8 @@ pip install "ray[data]" dask ``` ```` -Get started by creating a Datastream from synthetic data using ``ray.data.range()`` and ``ray.data.from_items()``. -A Datastream can hold either plain Python objects (schema is a Python type), or Arrow records (schema is Arrow). +Get started by creating a Dataset from synthetic data using ``ray.data.range()`` and ``ray.data.from_items()``. +A Dataset can hold either plain Python objects (schema is a Python type), or Arrow records (schema is Arrow). ```{literalinclude} ../data/doc_code/quick_start.py :language: python @@ -167,18 +167,18 @@ A Datastream can hold either plain Python objects (schema is a Python type), or :end-before: __create_from_python_end__ ``` -Datastreams can be created from files on local disk or remote datasources such as S3. Any filesystem +Datasets can be created from files on local disk or remote datasources such as S3. Any filesystem [supported by pyarrow](http://arrow.apache.org/docs/python/generated/pyarrow.fs.FileSystem.html) can be used to specify file locations. -You can also create a ``Datastream`` from existing data in the Ray object store or Ray-compatible distributed DataFrames: +You can also create a ``Dataset`` from existing data in the Ray object store or Ray-compatible distributed DataFrames: ```{literalinclude} ../data/doc_code/quick_start.py :language: python :start-after: __create_from_files_begin__ :end-before: __create_from_files_end__ ``` -Datastreams can be transformed in parallel using ``.map()``. +Datasets can be transformed in parallel using ``.map()``. Transformations are executed *eagerly* and block until the operation is finished. -Datastreams also supports ``.filter()`` and ``.flat_map()``. +Datasets also supports ``.filter()`` and ``.flat_map()``. ```{literalinclude} ../data/doc_code/quick_start.py :language: python diff --git a/doc/source/ray-references/glossary.rst b/doc/source/ray-references/glossary.rst index 8366fd7b7b76..4b59a3bb897e 100644 --- a/doc/source/ray-references/glossary.rst +++ b/doc/source/ray-references/glossary.rst @@ -5,7 +5,7 @@ Ray Glossary On this page you find a list of important terminology used throughout the Ray documentation, sorted alphabetically. If you're interested in a glossary for -Ray Data specifically, please see the :ref:`Ray Data Glossary`. +Ray Data specifically, please see the :ref:`Ray Data Glossary`. .. glossary:: @@ -124,7 +124,7 @@ Ray Data specifically, please see the :ref:`Ray Data Glossary` for + :ref:`An interface used to preprocess a Dataset` for training and inference (prediction) with other AIR components. Preprocessors can be stateful, as they can be fitted on the training dataset before being used to transform the training and evaluation datasets. diff --git a/doc/source/rllib/rllib-offline.rst b/doc/source/rllib/rllib-offline.rst index e15036780686..5c06c543984e 100644 --- a/doc/source/rllib/rllib-offline.rst +++ b/doc/source/rllib/rllib-offline.rst @@ -230,7 +230,7 @@ We support JSON and Parquet files today. Other file formats supported by Ray Dat Unlike JSON input, a single dataset can be automatically sharded and replayed by multiple rollout workers by simply specifying the desired num_workers config. -To load sample data using Datastream, specify input and input_config keys like the following: +To load sample data using Dataset, specify input and input_config keys like the following: .. code-block:: python @@ -243,14 +243,14 @@ To load sample data using Datastream, specify input and input_config keys like t "path": "/path/to/json_dir/", # Num of tasks reading dataset in parallel, default is num_workers. "parallelism": 3, - # Datastream allocates 0.5 CPU for each reader by default. + # Dataset allocates 0.5 CPU for each reader by default. # Adjust this value based on the size of your offline dataset. "num_cpus_per_read_task": 0.5, } ... } -To write sample data to JSON or Parquet files using Datastream, specify output and output_config keys like the following: +To write sample data to JSON or Parquet files using Dataset, specify output and output_config keys like the following: .. code-block:: python @@ -276,7 +276,7 @@ ensures that the ``infos`` dictionary, as returned by the RL environment, is inc .. note:: This setting is only relevant for the TensorFlow based agents, for PyTorch agents the ``infos`` data is always stored. -To write the ``infos`` data to JSON or Parquet files using Datastream, specify output and output_config keys like the following: +To write the ``infos`` data to JSON or Parquet files using Dataset, specify output and output_config keys like the following: .. code-block:: python diff --git a/doc/source/train/dl_guide.rst b/doc/source/train/dl_guide.rst index 9ee04e01240a..79d9d4ebdcd2 100644 --- a/doc/source/train/dl_guide.rst +++ b/doc/source/train/dl_guide.rst @@ -418,7 +418,7 @@ Distributed Data Ingest with Ray Data and Ray Train ------------------------------------------------------- :ref:`Ray Data ` is the recommended way to work with large datasets in Ray Train. Ray Data provides automatic loading, sharding, and streamed ingest of Data across multiple Train workers. -To get started, pass in one or more datastreams under the ``datasets`` keyword argument for Trainer (e.g., ``Trainer(datasets={...})``). +To get started, pass in one or more datasets under the ``datasets`` keyword argument for Trainer (e.g., ``Trainer(datasets={...})``). Here's a simple code overview of the Ray Data integration: diff --git a/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb b/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb index de527847f449..a01c1151f980 100644 --- a/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb +++ b/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb @@ -11,7 +11,7 @@ "\n", ":::{note}\n", "\n", - "This is an advanced example for {class}`LightningTrainer `, which demonstrates how to use LightningTrainer with {ref}`Datastream ` and {ref}`Batch Predictor `. \n", + "This is an advanced example for {class}`LightningTrainer `, which demonstrates how to use LightningTrainer with {ref}`Dataset ` and {ref}`Batch Predictor `. \n", "\n", "If you just want to quickly convert your existing PyTorch Lightning scripts into Ray AIR, you can refer to this starter example:\n", "{ref}`Train a Pytorch Lightning Image Classifier `.\n", @@ -84,7 +84,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Pre-process CoLA Datastream\n", + "## Pre-process CoLA Dataset\n", "\n", "CoLA is a binary sentence classification task with 10.6K training examples. First, we download the dataset and metrics using the HuggingFace API, and create Ray Data for each split accordingly." ] @@ -309,12 +309,12 @@ "\n", "To feed data into LightningTrainer, we need to configure the following arguments:\n", "\n", - "- `datasets`: A dictionary of the input Ray datastreams, with special keys \"train\" and \"val\".\n", - "- `datasets_iter_config`: The argument list of {meth}`iter_torch_batches() `. It defines the way we iterate dataset shards for each worker.\n", + "- `datasets`: A dictionary of the input Ray datasets, with special keys \"train\" and \"val\".\n", + "- `datasets_iter_config`: The argument list of {meth}`iter_torch_batches() `. It defines the way we iterate dataset shards for each worker.\n", "- `preprocessor`: The preprocessor that will be applied to the input dataset.\n", "\n", ":::{note}\n", - "Note that we are using Datastream for data ingestion for faster preprocessing here, but you can also continue to use the native `PyTorch DataLoader` or `LightningDataModule`. See {ref}`this example `. \n", + "Note that we are using Dataset for data ingestion for faster preprocessing here, but you can also continue to use the native `PyTorch DataLoader` or `LightningDataModule`. See {ref}`this example `. \n", "\n", ":::\n", "\n", @@ -1430,7 +1430,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We obtained a Ray datastream containing predictions from `batch_predictor.predict()`. Now we can easily evaluate the results with just a few lines of code:" + "We obtained a Ray dataset containing predictions from `batch_predictor.predict()`. Now we can easily evaluate the results with just a few lines of code:" ] }, { diff --git a/doc/source/train/getting-started.rst b/doc/source/train/getting-started.rst index d1579ca5e43d..a7105abed9db 100644 --- a/doc/source/train/getting-started.rst +++ b/doc/source/train/getting-started.rst @@ -31,7 +31,7 @@ Here are examples for some of the commonly used trainers: We then instantiate our XGBoostTrainer by passing in: - The aforementioned ``ScalingConfig``. - - The ``label_column`` refers to the column name containing the labels in the Datastream + - The ``label_column`` refers to the column name containing the labels in the Dataset - The ``params`` are `XGBoost training parameters `__ .. literalinclude:: doc_code/gbdt_user_guide.py @@ -69,7 +69,7 @@ Here are examples for some of the commonly used trainers: We then instantiate our LightGBMTrainer by passing in: - The aforementioned ``ScalingConfig`` - - The ``label_column`` refers to the column name containing the labels in the Datastream + - The ``label_column`` refers to the column name containing the labels in the Dataset - The ``params`` are core `LightGBM training parameters `__ .. literalinclude:: doc_code/gbdt_user_guide.py diff --git a/doc/source/train/key-concepts.rst b/doc/source/train/key-concepts.rst index 509c5c46167a..61ac57ac16e5 100644 --- a/doc/source/train/key-concepts.rst +++ b/doc/source/train/key-concepts.rst @@ -20,7 +20,7 @@ Trainers Trainers are responsible for executing (distributed) training runs. The output of a Trainer run is a :ref:`Result ` that contains metrics from the training run and the latest saved :ref:`Checkpoint `. -Trainers can also be configured with :ref:`Datastreams ` and :ref:`Preprocessors ` for scalable data ingest and preprocessing. +Trainers can also be configured with :ref:`Datasets ` and :ref:`Preprocessors ` for scalable data ingest and preprocessing. Deep Learning, Tree-Based, and other Trainers @@ -115,7 +115,7 @@ Each Trainer has a respective Predictor implementation that is compatible with i A predictor can be passed into a :class:`BatchPredictor ` is used to scale up prediction over a Ray cluster. -It takes a Datastream as input. +It takes a Dataset as input. .. dropdown:: Example: Batch prediction with :class:`XGBoostPredictor ` diff --git a/doc/source/tune/tutorials/tune_get_data_in_and_out.md b/doc/source/tune/tutorials/tune_get_data_in_and_out.md index e9151071458c..477725e0e7b2 100644 --- a/doc/source/tune/tutorials/tune_get_data_in_and_out.md +++ b/doc/source/tune/tutorials/tune_get_data_in_and_out.md @@ -71,7 +71,7 @@ For example, passing in a large pandas DataFrame or an unserializable model obje Instead, use strings or other identifiers as your values, and initialize/load the objects inside your Trainable directly depending on those. ```{note} -[Datastreams](data_getting_started) can be used as values in the search space directly. +[Datasets](data_getting_started) can be used as values in the search space directly. ``` In our example, we want to tune the two model hyperparameters. We also want to set the number of epochs, so that we can easily tweak it later. For the hyperparameters, we will use the `tune.uniform` distribution. We will also modify the `training_function` to obtain those values from the `config` dictionary. diff --git a/python/ray/air/examples/dreambooth/dataset.py b/python/ray/air/examples/dreambooth/dataset.py index d837525194b3..a20a1d8698a9 100644 --- a/python/ray/air/examples/dreambooth/dataset.py +++ b/python/ray/air/examples/dreambooth/dataset.py @@ -8,8 +8,8 @@ def get_train_dataset(args, image_resolution=512): - """Build a Datastream for fine-tuning DreamBooth model.""" - # Load images into Datastream + """Build a Dataset for fine-tuning DreamBooth model.""" + # Load images into Dataset instance_dataset = read_images(args.instance_images_dir) class_dataset = read_images(args.class_images_dir) diff --git a/python/ray/air/tests/test_api.py b/python/ray/air/tests/test_api.py index 5072e45a4851..940e2e2f9435 100644 --- a/python/ray/air/tests/test_api.py +++ b/python/ray/air/tests/test_api.py @@ -13,7 +13,7 @@ def training_loop(self) -> None: pass -class DummyDataset(ray.data.Datastream): +class DummyDataset(ray.data.Dataset): def __init__(self): pass diff --git a/python/ray/air/util/check_ingest.py b/python/ray/air/util/check_ingest.py index 58e46559502b..219f5ffa22bf 100755 --- a/python/ray/air/util/check_ingest.py +++ b/python/ray/air/util/check_ingest.py @@ -31,7 +31,7 @@ class DummyTrainer(DataParallelTrainer): num_epochs: How many many times to iterate through the datasets for. prefetch_batches: The number of batches to prefetch ahead of the current block during the scan. This is the same as - :meth:`~ray.data.Datastream.iter_batches` + :meth:`~ray.data.Dataset.iter_batches` time_preprocessing_separately: Whether to time the preprocessing separately from actual iteration during training. If set to True, preprocessing execution is fully executed before training begins and the preprocessing diff --git a/python/ray/data/__init__.py b/python/ray/data/__init__.py index cb7b3d1c812f..f42c113f2414 100644 --- a/python/ray/data/__init__.py +++ b/python/ray/data/__init__.py @@ -1,12 +1,11 @@ # Short term workaround for https://github.com/ray-project/ray/issues/32435 -# Datastream has a hard dependency on pandas, so it doesn't need to be delayed. +# Dataset has a hard dependency on pandas, so it doesn't need to be delayed. import pandas # noqa from ray.data._internal.compute import ActorPoolStrategy from ray.data._internal.progress_bar import set_progress_bars from ray.data._internal.execution.interfaces import ExecutionOptions, ExecutionResources -from ray.data.dataset import Dataset -from ray.data.datastream import Datastream, Schema +from ray.data.dataset import Dataset, Schema from ray.data.context import DatasetContext, DataContext from ray.data.iterator import DatasetIterator, DataIterator from ray.data.dataset_pipeline import DatasetPipeline @@ -54,8 +53,7 @@ __all__ = [ "ActorPoolStrategy", - "Datastream", - "Dataset", # Backwards compatibility alias. + "Dataset", "DataContext", "DatasetContext", # Backwards compatibility alias. "DataIterator", diff --git a/python/ray/data/_internal/arrow_block.py b/python/ray/data/_internal/arrow_block.py index 839712bda053..80360c5cc2d8 100644 --- a/python/ray/data/_internal/arrow_block.py +++ b/python/ray/data/_internal/arrow_block.py @@ -68,7 +68,7 @@ def get_concat_and_sort_transform(context: DataContext) -> Callable: class ArrowRow(TableRow): """ - Row of a tabular Datastream backed by a Arrow Table block. + Row of a tabular Dataset backed by a Arrow Table block. """ def __getitem__(self, key: str) -> Any: diff --git a/python/ray/data/_internal/block_batching/block_batching.py b/python/ray/data/_internal/block_batching/block_batching.py index 6fc4990f3674..cf1f89aee7e9 100644 --- a/python/ray/data/_internal/block_batching/block_batching.py +++ b/python/ray/data/_internal/block_batching/block_batching.py @@ -15,7 +15,7 @@ ActorBlockPrefetcher, ) from ray.data._internal.memory_tracing import trace_deallocation -from ray.data._internal.stats import DatasetPipelineStats, DatastreamStats +from ray.data._internal.stats import DatasetPipelineStats, DatasetStats from ray.data.block import Block, DataBatch from ray.data.context import DataContext from ray.types import ObjectRef @@ -26,7 +26,7 @@ def batch_block_refs( block_refs: Iterator[ObjectRef[Block]], *, - stats: Optional[Union[DatastreamStats, DatasetPipelineStats]] = None, + stats: Optional[Union[DatasetStats, DatasetPipelineStats]] = None, prefetch_blocks: int = 0, clear_block_after_read: bool = False, batch_size: Optional[int] = None, @@ -42,8 +42,8 @@ def batch_block_refs( This takes a block iterator and creates batch_size batches, slicing, unioning, shuffling, prefetching, and formatting blocks as needed. - This is used by both Datastream.iter_batches()/DatasetPipeline.iter_batches() - and Datastream.map_batches()/DatasetPipeline.map_batches(). + This is used by both Dataset.iter_batches()/DatasetPipeline.iter_batches() + and Dataset.map_batches()/DatasetPipeline.map_batches(). Args: block_refs: An iterator over block object references. @@ -114,7 +114,7 @@ def batch_block_refs( def batch_blocks( blocks: Iterator[Block], *, - stats: Optional[Union[DatastreamStats, DatasetPipelineStats]] = None, + stats: Optional[Union[DatasetStats, DatasetPipelineStats]] = None, batch_size: Optional[int] = None, batch_format: str = "default", drop_last: bool = False, @@ -164,7 +164,7 @@ def _prefetch_blocks( prefetcher: BlockPrefetcher, num_blocks_to_prefetch: int, eager_free: bool = False, - stats: Optional[Union[DatastreamStats, DatasetPipelineStats]] = None, + stats: Optional[Union[DatasetStats, DatasetPipelineStats]] = None, ) -> Iterator[ObjectRef[Block]]: """Given an iterable of Block Object References, returns an iterator over these object reference while prefetching `num_block_to_prefetch` @@ -174,7 +174,7 @@ def _prefetch_blocks( block_ref_iter: An iterator over block object references. num_blocks_to_prefetch: The number of blocks to prefetch ahead of the current block during the scan. - stats: Datastream stats object used to store block wait time. + stats: Dataset stats object used to store block wait time. """ if num_blocks_to_prefetch == 0: for block_ref in block_ref_iter: diff --git a/python/ray/data/_internal/block_batching/iter_batches.py b/python/ray/data/_internal/block_batching/iter_batches.py index e3c2e8b56e06..e406bcd79f12 100644 --- a/python/ray/data/_internal/block_batching/iter_batches.py +++ b/python/ray/data/_internal/block_batching/iter_batches.py @@ -19,7 +19,7 @@ make_async_gen, ) from ray.data._internal.memory_tracing import trace_deallocation -from ray.data._internal.stats import DatastreamStats +from ray.data._internal.stats import DatasetStats from ray.data.context import DataContext from contextlib import nullcontext @@ -27,7 +27,7 @@ def iter_batches( block_refs: Iterator[Tuple[ObjectRef[Block], BlockMetadata]], *, - stats: Optional[DatastreamStats] = None, + stats: Optional[DatasetStats] = None, clear_block_after_read: bool = False, batch_size: Optional[int] = None, batch_format: Optional[str] = "default", @@ -74,7 +74,7 @@ def iter_batches( Args: block_refs: An iterator over block object references and their corresponding metadata. - stats: DatastreamStats object to record timing and other statistics. + stats: DatasetStats object to record timing and other statistics. clear_block_after_read: Whether to clear the block from object store manually (i.e. without waiting for Python's automatic GC) after it is read. Doing so will reclaim memory faster and hence reduce the @@ -176,7 +176,7 @@ def _async_iter_batches( def _format_in_threadpool( batch_iter: Iterator[Batch], - stats: DatastreamStats, + stats: DatasetStats, batch_format: Optional[str], collate_fn: Optional[Callable[[DataBatch], Any]], num_threadpool_workers: int, @@ -185,7 +185,7 @@ def _format_in_threadpool( Args: logical_batch_iterator: An iterator over logical batches. - stats: DatastreamStats object to record timing and other statistics. + stats: DatasetStats object to record timing and other statistics. batch_format: The format in which to return each batch. Specify "default" to use the current block format (promoting Arrow to pandas automatically), "pandas" to diff --git a/python/ray/data/_internal/block_batching/util.py b/python/ray/data/_internal/block_batching/util.py index 63e3f31a4341..bdf9807b5253 100644 --- a/python/ray/data/_internal/block_batching/util.py +++ b/python/ray/data/_internal/block_batching/util.py @@ -14,7 +14,7 @@ CollatedBatch, BlockPrefetcher, ) -from ray.data._internal.stats import DatasetPipelineStats, DatastreamStats +from ray.data._internal.stats import DatasetPipelineStats, DatasetStats from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy T = TypeVar("T") @@ -39,7 +39,7 @@ def _calculate_ref_hits(refs: List[ObjectRef[Any]]) -> Tuple[int, int, int]: def resolve_block_refs( block_ref_iter: Iterator[ObjectRef[Block]], - stats: Optional[Union[DatastreamStats, DatasetPipelineStats]] = None, + stats: Optional[Union[DatasetStats, DatasetPipelineStats]] = None, ) -> Iterator[Block]: """Resolves the block references for each logical batch. @@ -71,7 +71,7 @@ def resolve_block_refs( def blocks_to_batches( block_iter: Iterator[Block], - stats: Optional[Union[DatastreamStats, DatasetPipelineStats]] = None, + stats: Optional[Union[DatasetStats, DatasetPipelineStats]] = None, batch_size: Optional[int] = None, drop_last: bool = False, shuffle_buffer_min_size: Optional[int] = None, @@ -86,7 +86,7 @@ def blocks_to_batches( Args: block_iter: An iterator over blocks. - stats: Datastream stats object used to store block batching time. + stats: Dataset stats object used to store block batching time. batch_size: Record batch size, or None to let the system pick. drop_last: Whether to drop the last batch if it's incomplete. shuffle_buffer_min_size: If non-None, the data will be randomly shuffled @@ -143,7 +143,7 @@ def get_iter_next_batch_s_timer(): def format_batches( block_iter: Iterator[Batch], batch_format: Optional[str], - stats: Optional[Union[DatastreamStats, DatasetPipelineStats]] = None, + stats: Optional[Union[DatasetStats, DatasetPipelineStats]] = None, ) -> Iterator[Batch]: """Given an iterator of blocks, returns an iterator of formatted batches. @@ -166,7 +166,7 @@ def format_batches( def collate( batch_iter: Iterator[Batch], collate_fn: Optional[Callable[[DataBatch], Any]], - stats: Optional[DatastreamStats] = None, + stats: Optional[DatasetStats] = None, ) -> Iterator[CollatedBatch]: """Returns an iterator with the provided collate_fn applied to items of the batch iterator. @@ -278,7 +278,7 @@ def execute_computation(thread_index: int): output_queue.release(num_threads_alive) -PREFETCHER_ACTOR_NAMESPACE = "ray.datastream" +PREFETCHER_ACTOR_NAMESPACE = "ray.dataset" class WaitBlockPrefetcher(BlockPrefetcher): @@ -300,7 +300,7 @@ def __init__(self): @staticmethod def _get_or_create_actor_prefetcher() -> "ActorHandle": node_id = ray.get_runtime_context().get_node_id() - actor_name = f"datastream-block-prefetcher-{node_id}" + actor_name = f"dataset-block-prefetcher-{node_id}" return _BlockPretcher.options( scheduling_strategy=NodeAffinitySchedulingStrategy(node_id, soft=False), name=actor_name, diff --git a/python/ray/data/_internal/block_list.py b/python/ray/data/_internal/block_list.py index 06dd3cf5cad4..b046c24e4cc7 100644 --- a/python/ray/data/_internal/block_list.py +++ b/python/ray/data/_internal/block_list.py @@ -58,8 +58,8 @@ def _check_if_cleared(self) -> None: """Raise an error if this BlockList has been previously cleared.""" if self.is_cleared(): raise ValueError( - "This Datastream's blocks have been moved, which means that you " - "can no longer use this Datastream." + "This Dataset's blocks have been moved, which means that you " + "can no longer use this Dataset." ) def split(self, split_size: int) -> List["BlockList"]: diff --git a/python/ray/data/_internal/compute.py b/python/ray/data/_internal/compute.py index 74d3187d45c9..a97e54127dec 100644 --- a/python/ray/data/_internal/compute.py +++ b/python/ray/data/_internal/compute.py @@ -80,7 +80,7 @@ def _apply( context = DataContext.get_current() - # Handle empty datastreams. + # Handle empty datasets. if block_list.initial_num_blocks() == 0: return block_list @@ -179,10 +179,10 @@ def __eq__(self, other: Any) -> bool: @PublicAPI class ActorPoolStrategy(ComputeStrategy): - """Specify the compute strategy for a Datastream transform. + """Specify the compute strategy for a Dataset transform. ActorPoolStrategy specifies that an autoscaling pool of actors should be used - for a given Datastream transform. This is useful for stateful setup of callable + for a given Dataset transform. This is useful for stateful setup of callable classes. For a fixed-sized pool of size ``n``, specify ``compute=ActorPoolStrategy(size=n)``. @@ -206,7 +206,7 @@ def __init__( max_size: Optional[int] = None, max_tasks_in_flight_per_actor: Optional[int] = None, ): - """Construct ActorPoolStrategy for a Datastream transform. + """Construct ActorPoolStrategy for a Dataset transform. Args: size: Specify a fixed size actor pool of this size. It is an error to @@ -279,7 +279,7 @@ def _apply( fn_constructor_args: Optional[Iterable[Any]] = None, fn_constructor_kwargs: Optional[Dict[str, Any]] = None, ) -> BlockList: - """Note: this is not part of the Datastream public API.""" + """Note: this is not part of the Dataset public API.""" assert not DataContext.get_current().new_execution_backend, "Legacy backend off" if fn_args is None: fn_args = tuple() diff --git a/python/ray/data/_internal/datastream_logger.py b/python/ray/data/_internal/dataset_logger.py similarity index 81% rename from python/ray/data/_internal/datastream_logger.py rename to python/ray/data/_internal/dataset_logger.py index 8ed7dde89602..47e014c77ff4 100644 --- a/python/ray/data/_internal/datastream_logger.py +++ b/python/ray/data/_internal/dataset_logger.py @@ -5,16 +5,16 @@ from ray._private.ray_constants import LOGGER_FORMAT, LOGGER_LEVEL -class DatastreamLogger: - """Logger for Ray Datastreams which writes logs to a separate log file - at `DatastreamLogger.DEFAULT_DATASET_LOG_PATH`. Can optionally turn off +class DatasetLogger: + """Logger for Ray Datasets which writes logs to a separate log file + at `DatasetLogger.DEFAULT_DATASET_LOG_PATH`. Can optionally turn off logging to stdout to reduce clutter (but always logs to the aformentioned - Datastreams-specific log file). + Datasets-specific log file). After initialization, always use the `get_logger()` method to correctly set whether to log to stdout. Example usage: ``` - logger = DatastreamLogger(__name__) + logger = DatasetLogger(__name__) logger.get_logger().info("This logs to file and stdout") logger.get_logger(log_to_stdout=False).info("This logs to file only) logger.get_logger().warning("Can call the usual Logger methods") @@ -24,7 +24,7 @@ class DatastreamLogger: DEFAULT_DATASET_LOG_PATH = "logs/ray-data.log" def __init__(self, log_name: str): - """Initialize DatastreamLogger for a given `log_name`. + """Initialize DatasetLogger for a given `log_name`. Args: log_name: Name of logger (usually passed into `logging.getLogger(...)`) @@ -40,7 +40,7 @@ def __init__(self, log_name: str): def _initialize_logger(self) -> logging.Logger: """Internal method to initialize the logger and the extra file handler - for writing to the Datastream log file. Not intended (nor necessary) + for writing to the Dataset log file. Not intended (nor necessary) to call explicitly. Assumes that `ray.init()` has already been called prior to calling this method; otherwise raises a `ValueError`.""" @@ -62,20 +62,20 @@ def _initialize_logger(self) -> logging.Logger: # If ray.init() is called and the global node session directory path # is valid, we can create the additional handler to write to the - # Datastream log file. If this is not the case (e.g. when used in Ray + # Dataset log file. If this is not the case (e.g. when used in Ray # Client), then we skip initializing the FileHandler. global_node = ray._private.worker._global_node if global_node is not None: - # Add a FileHandler to write to the specific Ray Datastreams log file - # at `DatastreamLogger.DEFAULT_DATASET_LOG_PATH`, using the standard + # Add a FileHandler to write to the specific Ray Datasets log file + # at `DatasetLogger.DEFAULT_DATASET_LOG_PATH`, using the standard # default logger format used by the root logger session_dir = global_node.get_session_dir_path() - datastreams_log_path = os.path.join( + datasets_log_path = os.path.join( session_dir, - DatastreamLogger.DEFAULT_DATASET_LOG_PATH, + DatasetLogger.DEFAULT_DATASET_LOG_PATH, ) file_log_formatter = logging.Formatter(fmt=LOGGER_FORMAT) - file_log_handler = logging.FileHandler(datastreams_log_path) + file_log_handler = logging.FileHandler(datasets_log_path) file_log_handler.setLevel(LOGGER_LEVEL.upper()) file_log_handler.setFormatter(file_log_formatter) logger.addHandler(file_log_handler) @@ -85,10 +85,10 @@ def get_logger(self, log_to_stdout: bool = True) -> logging.Logger: """ Returns the underlying Logger, with the `propagate` attribute set to the same value as `log_to_stdout`. For example, when - `log_to_stdout = False`, we do not want the `DatastreamLogger` to + `log_to_stdout = False`, we do not want the `DatasetLogger` to propagate up to the base Logger which writes to stdout. - This is a workaround needed due to the DatastreamLogger wrapper object + This is a workaround needed due to the DatasetLogger wrapper object not having access to the log caller's scope in Python <3.8. In the future, with Python 3.8 support, we can use the `stacklevel` arg, which allows the logger to fetch the correct calling file/line and diff --git a/python/ray/data/_internal/execution/autoscaling_requester.py b/python/ray/data/_internal/execution/autoscaling_requester.py index 92f6a5dd690c..7d14d6c6920d 100644 --- a/python/ray/data/_internal/execution/autoscaling_requester.py +++ b/python/ray/data/_internal/execution/autoscaling_requester.py @@ -19,7 +19,7 @@ @ray.remote(num_cpus=0, max_restarts=-1, max_task_retries=-1) class AutoscalingRequester: - """Actor to make resource requests to autoscaler for the datastreams. + """Actor to make resource requests to autoscaler for the datasets. The resource requests are set to timeout after RESOURCE_REQUEST_TIMEOUT seconds. For those live requests, we keep track of the last request made for each execution, diff --git a/python/ray/data/_internal/execution/bulk_executor.py b/python/ray/data/_internal/execution/bulk_executor.py index 7c3b2d9d8e73..3355a063b2bf 100644 --- a/python/ray/data/_internal/execution/bulk_executor.py +++ b/python/ray/data/_internal/execution/bulk_executor.py @@ -9,12 +9,12 @@ RefBundle, PhysicalOperator, ) -from ray.data._internal.datastream_logger import DatastreamLogger +from ray.data._internal.dataset_logger import DatasetLogger from ray.data._internal.execution.operators.input_data_buffer import InputDataBuffer from ray.data._internal.progress_bar import ProgressBar -from ray.data._internal.stats import DatastreamStats +from ray.data._internal.stats import DatasetStats -logger = DatastreamLogger(__name__) +logger = DatasetLogger(__name__) class BulkExecutor(Executor): @@ -28,11 +28,11 @@ def __init__(self, options: ExecutionOptions): # Bulk executor always preserves order. options.preserve_order = True super().__init__(options) - self._stats: Optional[DatastreamStats] = DatastreamStats(stages={}, parent=None) + self._stats: Optional[DatasetStats] = DatasetStats(stages={}, parent=None) self._executed = False def execute( - self, dag: PhysicalOperator, initial_stats: Optional[DatastreamStats] = None + self, dag: PhysicalOperator, initial_stats: Optional[DatasetStats] = None ) -> Iterator[RefBundle]: """Synchronously executes the DAG via bottom-up recursive traversal.""" @@ -84,7 +84,7 @@ def execute_recursive(op: PhysicalOperator) -> List[RefBundle]: return OutputIterator(execute_recursive(dag)) - def get_stats(self) -> DatastreamStats: + def get_stats(self) -> DatasetStats: return self._stats diff --git a/python/ray/data/_internal/execution/interfaces.py b/python/ray/data/_internal/execution/interfaces.py index 734384232c45..eee7cf1c46dd 100644 --- a/python/ray/data/_internal/execution/interfaces.py +++ b/python/ray/data/_internal/execution/interfaces.py @@ -8,7 +8,7 @@ from ray.data._internal.logical.interfaces import Operator from ray.data._internal.memory_tracing import trace_deallocation from ray.data._internal.progress_bar import ProgressBar -from ray.data._internal.stats import DatastreamStats, StatsDict +from ray.data._internal.stats import DatasetStats, StatsDict from ray.data.block import Block, BlockMetadata from ray.data.context import DataContext from ray.types import ObjectRef @@ -254,7 +254,7 @@ class PhysicalOperator(Operator): output stream of RefBundles. Physical operators are stateful and non-serializable; they live on the driver side - of the Datastream only. + of the Dataset only. Here's a simple example of implementing a basic "Map" operator: @@ -305,7 +305,7 @@ def completed(self) -> bool: ) or self._dependents_complete def get_stats(self) -> StatsDict: - """Return recorded execution stats for use with DatastreamStats.""" + """Return recorded execution stats for use with DatasetStats.""" raise NotImplementedError def get_metrics(self) -> Dict[str, int]: @@ -492,7 +492,7 @@ def notify_resource_usage( class OutputIterator(Iterator[RefBundle]): """Iterator used to access the output of an Executor execution. - This is a blocking iterator. Datastreams guarantees that all its iterators are + This is a blocking iterator. Datasets guarantees that all its iterators are thread-safe (i.e., multiple threads can block on them at the same time). """ @@ -507,7 +507,7 @@ def get_next(self, output_split_idx: Optional[int] = None) -> RefBundle: Args: output_split_idx: The output split index to get results for. This arg is - only allowed for iterators created by `Datastream.streaming_split()`. + only allowed for iterators created by `Dataset.streaming_split()`. Raises: StopIteration if there are no more outputs to return. @@ -533,13 +533,13 @@ def __init__(self, options: ExecutionOptions): self._options = options def execute( - self, dag: PhysicalOperator, initial_stats: Optional[DatastreamStats] = None + self, dag: PhysicalOperator, initial_stats: Optional[DatasetStats] = None ) -> OutputIterator: """Start execution. Args: dag: The operator graph to execute. - initial_stats: The DatastreamStats to prepend to the stats returned by the + initial_stats: The DatasetStats to prepend to the stats returned by the executor. These stats represent actions done to compute inputs. """ raise NotImplementedError @@ -551,7 +551,7 @@ def shutdown(self): """ pass - def get_stats(self) -> DatastreamStats: + def get_stats(self) -> DatasetStats: """Return stats for the execution so far. This is generally called after `execute` has completed, but may be called diff --git a/python/ray/data/_internal/execution/legacy_compat.py b/python/ray/data/_internal/execution/legacy_compat.py index 40fdec8c0c2c..4d5098f57b77 100644 --- a/python/ray/data/_internal/execution/legacy_compat.py +++ b/python/ray/data/_internal/execution/legacy_compat.py @@ -13,7 +13,7 @@ from ray.types import ObjectRef from ray.data.block import Block, BlockMetadata, List from ray.data.datasource import ReadTask -from ray.data._internal.stats import StatsDict, DatastreamStats +from ray.data._internal.stats import StatsDict, DatasetStats from ray.data._internal.stage_impl import ( RandomizeBlocksStage, LimitStage, @@ -48,11 +48,11 @@ def execute_to_legacy_block_iterator( executor: Executor, plan: ExecutionPlan, allow_clear_input_blocks: bool, - datastream_uuid: str, + dataset_uuid: str, ) -> Iterator[Tuple[ObjectRef[Block], BlockMetadata]]: """Same as execute_to_legacy_bundle_iterator but returning blocks and metadata.""" bundle_iter = execute_to_legacy_bundle_iterator( - executor, plan, allow_clear_input_blocks, datastream_uuid + executor, plan, allow_clear_input_blocks, dataset_uuid ) for bundle in bundle_iter: for block, metadata in bundle.blocks: @@ -63,7 +63,7 @@ def execute_to_legacy_bundle_iterator( executor: Executor, plan: ExecutionPlan, allow_clear_input_blocks: bool, - datastream_uuid: str, + dataset_uuid: str, dag_rewrite=None, ) -> Iterator[RefBundle]: """Execute a plan with the new executor and return a bundle iterator. @@ -72,10 +72,10 @@ def execute_to_legacy_bundle_iterator( executor: The executor to use. plan: The legacy plan to execute. allow_clear_input_blocks: Whether the executor may consider clearing blocks. - datastream_uuid: UUID of the datastream for this execution. + dataset_uuid: UUID of the dataset for this execution. dag_rewrite: Callback that can be used to mutate the DAG prior to execution. This is currently used as a legacy hack to inject the OutputSplit operator - for `Datastream.streaming_split()`. + for `Dataset.streaming_split()`. Returns: The output as a bundle iterator. @@ -97,7 +97,7 @@ def execute_to_legacy_block_list( executor: Executor, plan: ExecutionPlan, allow_clear_input_blocks: bool, - datastream_uuid: str, + dataset_uuid: str, preserve_order: bool, ) -> BlockList: """Execute a plan with the new executor and translate it into a legacy block list. @@ -106,7 +106,7 @@ def execute_to_legacy_block_list( executor: The executor to use. plan: The legacy plan to execute. allow_clear_input_blocks: Whether the executor may consider clearing blocks. - datastream_uuid: UUID of the datastream for this execution. + dataset_uuid: UUID of the dataset for this execution. preserve_order: Whether to preserve order in execution. Returns: @@ -121,7 +121,7 @@ def execute_to_legacy_block_list( bundles = executor.execute(dag, initial_stats=stats) block_list = _bundles_to_block_list(bundles) # Set the stats UUID after execution finishes. - _set_stats_uuid_recursive(executor.get_stats(), datastream_uuid) + _set_stats_uuid_recursive(executor.get_stats(), dataset_uuid) return block_list @@ -130,7 +130,7 @@ def _get_execution_dag( plan: ExecutionPlan, allow_clear_input_blocks: bool, preserve_order: bool, -) -> Tuple[PhysicalOperator, DatastreamStats]: +) -> Tuple[PhysicalOperator, DatasetStats]: """Get the physical operators DAG from a plan.""" # Record usage of logical operators if available. if hasattr(plan, "_logical_plan") and plan._logical_plan is not None: @@ -152,7 +152,7 @@ def _get_execution_dag( return dag, stats -def _get_initial_stats_from_plan(plan: ExecutionPlan) -> DatastreamStats: +def _get_initial_stats_from_plan(plan: ExecutionPlan) -> DatasetStats: assert DataContext.get_current().optimizer_enabled if plan._snapshot_blocks is not None and not plan._snapshot_blocks.is_cleared(): return plan._snapshot_stats @@ -161,7 +161,7 @@ def _get_initial_stats_from_plan(plan: ExecutionPlan) -> DatastreamStats: def _to_operator_dag( plan: ExecutionPlan, allow_clear_input_blocks: bool -) -> Tuple[PhysicalOperator, DatastreamStats]: +) -> Tuple[PhysicalOperator, DatasetStats]: """Translate a plan into an operator DAG for the new execution backend.""" blocks, stats, stages = plan._optimize() @@ -384,8 +384,8 @@ def _block_list_to_bundles(blocks: BlockList, owns_blocks: bool) -> List[RefBund return output -def _set_stats_uuid_recursive(stats: DatastreamStats, datastream_uuid: str) -> None: - if not stats.datastream_uuid: - stats.datastream_uuid = datastream_uuid +def _set_stats_uuid_recursive(stats: DatasetStats, dataset_uuid: str) -> None: + if not stats.dataset_uuid: + stats.dataset_uuid = dataset_uuid for parent in stats.parents or []: - _set_stats_uuid_recursive(parent, datastream_uuid) + _set_stats_uuid_recursive(parent, dataset_uuid) diff --git a/python/ray/data/_internal/execution/operators/actor_pool_map_operator.py b/python/ray/data/_internal/execution/operators/actor_pool_map_operator.py index fa727c363578..bc55b0d9503d 100644 --- a/python/ray/data/_internal/execution/operators/actor_pool_map_operator.py +++ b/python/ray/data/_internal/execution/operators/actor_pool_map_operator.py @@ -6,7 +6,7 @@ from ray.data.block import Block, BlockMetadata, _CallableClassProtocol from ray.data.context import DataContext, DEFAULT_SCHEDULING_STRATEGY from ray.data._internal.compute import ActorPoolStrategy -from ray.data._internal.datastream_logger import DatastreamLogger +from ray.data._internal.dataset_logger import DatasetLogger from ray.data._internal.execution.interfaces import ( RefBundle, ExecutionResources, @@ -24,7 +24,7 @@ from ray.types import ObjectRef from ray._raylet import ObjectRefGenerator -logger = DatastreamLogger(__name__) +logger = DatasetLogger(__name__) # Higher values here are better for prefetching and locality. It's ok for this to be # fairly high since streaming backpressure prevents us from overloading actors. @@ -67,7 +67,7 @@ def __init__( min_rows_per_bundle: The number of rows to gather per batch passed to the transform_fn, or None to use the block size. Setting the batch size is important for the performance of GPU-accelerated transform functions. - The actual rows passed may be less if the datastream is small. + The actual rows passed may be less if the dataset is small. ray_remote_args: Customize the ray remote args for this op's tasks. """ super().__init__( @@ -271,9 +271,9 @@ def shutdown(self): # The user created a stream that has too few blocks to begin with. logger.get_logger().warning( "To ensure full parallelization across an actor pool of size " - f"{min_workers}, the Datastream should consist of at least " + f"{min_workers}, the Dataset should consist of at least " f"{min_workers} distinct blocks. Consider increasing " - "the parallelism when creating the Datastream." + "the parallelism when creating the Dataset." ) def get_work_refs(self) -> List[ray.ObjectRef]: diff --git a/python/ray/data/_internal/execution/operators/input_data_buffer.py b/python/ray/data/_internal/execution/operators/input_data_buffer.py index fb0d686333db..b81f1031deae 100644 --- a/python/ray/data/_internal/execution/operators/input_data_buffer.py +++ b/python/ray/data/_internal/execution/operators/input_data_buffer.py @@ -11,7 +11,7 @@ class InputDataBuffer(PhysicalOperator): """Defines the input data for the operator DAG. - For example, this may hold cached blocks from a previous Datastream execution, or + For example, this may hold cached blocks from a previous Dataset execution, or the arguments for read tasks. """ diff --git a/python/ray/data/_internal/execution/operators/map_operator.py b/python/ray/data/_internal/execution/operators/map_operator.py index 5c580b5c7eac..d76b6a32af93 100644 --- a/python/ray/data/_internal/execution/operators/map_operator.py +++ b/python/ray/data/_internal/execution/operators/map_operator.py @@ -95,7 +95,7 @@ def create( min_rows_per_bundle: The number of rows to gather per batch passed to the transform_fn, or None to use the block size. Setting the batch size is important for the performance of GPU-accelerated transform functions. - The actual rows passed may be less if the datastream is small. + The actual rows passed may be less if the dataset is small. ray_remote_args: Customize the ray remote args for this op's tasks. """ if compute_strategy is None: diff --git a/python/ray/data/_internal/execution/operators/output_splitter.py b/python/ray/data/_internal/execution/operators/output_splitter.py index fe649b7fafb2..a5bdaa622cbc 100644 --- a/python/ray/data/_internal/execution/operators/output_splitter.py +++ b/python/ray/data/_internal/execution/operators/output_splitter.py @@ -21,7 +21,7 @@ class OutputSplitter(PhysicalOperator): The output bundles of this operator will have a `bundle.output_split_idx` attr set to an integer from [0..n-1]. This operator tries to divide the rows evenly across output splits. If the `equal` option is set, the operator will furthermore - guarantee an exact split of rows across outputs, truncating the Datastream. + guarantee an exact split of rows across outputs, truncating the Dataset. Implementation wise, this operator keeps an internal buffer of bundles. The buffer has a minimum size calculated to enable a good locality hit rate, as well as ensure diff --git a/python/ray/data/_internal/execution/operators/task_pool_map_operator.py b/python/ray/data/_internal/execution/operators/task_pool_map_operator.py index d72f1afe6df0..89d51d7857ac 100644 --- a/python/ray/data/_internal/execution/operators/task_pool_map_operator.py +++ b/python/ray/data/_internal/execution/operators/task_pool_map_operator.py @@ -38,7 +38,7 @@ def __init__( min_rows_per_bundle: The number of rows to gather per batch passed to the transform_fn, or None to use the block size. Setting the batch size is important for the performance of GPU-accelerated transform functions. - The actual rows passed may be less if the datastream is small. + The actual rows passed may be less if the dataset is small. ray_remote_args: Customize the ray remote args for this op's tasks. """ super().__init__( diff --git a/python/ray/data/_internal/execution/operators/zip_operator.py b/python/ray/data/_internal/execution/operators/zip_operator.py index db2287042810..3238948ff3e4 100644 --- a/python/ray/data/_internal/execution/operators/zip_operator.py +++ b/python/ray/data/_internal/execution/operators/zip_operator.py @@ -121,7 +121,7 @@ def _zip( total_right_rows = sum(right_block_rows) if total_left_rows != total_right_rows: raise ValueError( - "Cannot zip datastreams of different number of rows: " + "Cannot zip datasets of different number of rows: " f"{total_left_rows}, {total_right_rows}" ) diff --git a/python/ray/data/_internal/execution/streaming_executor.py b/python/ray/data/_internal/execution/streaming_executor.py index c6485f143959..d4ced149c440 100644 --- a/python/ray/data/_internal/execution/streaming_executor.py +++ b/python/ray/data/_internal/execution/streaming_executor.py @@ -6,7 +6,7 @@ import ray from ray.data.context import DataContext -from ray.data._internal.datastream_logger import DatastreamLogger +from ray.data._internal.dataset_logger import DatasetLogger from ray.data._internal.execution.interfaces import ( Executor, ExecutionOptions, @@ -30,9 +30,9 @@ get_or_create_autoscaling_requester_actor, ) from ray.data._internal.progress_bar import ProgressBar -from ray.data._internal.stats import DatastreamStats +from ray.data._internal.stats import DatasetStats -logger = DatastreamLogger(__name__) +logger = DatasetLogger(__name__) # Set this environment variable for detailed scheduler debugging logs. DEBUG_TRACE_SCHEDULING = "RAY_DATA_TRACE_SCHEDULING" in os.environ @@ -46,17 +46,17 @@ class StreamingExecutor(Executor, threading.Thread): - """A streaming Datastream executor. + """A streaming Dataset executor. - This implementation executes Datastream DAGs in a fully streamed way. It runs + This implementation executes Dataset DAGs in a fully streamed way. It runs by setting up the operator topology, and then routing blocks through operators in a way that maximizes throughput under resource constraints. """ def __init__(self, options: ExecutionOptions): self._start_time: Optional[float] = None - self._initial_stats: Optional[DatastreamStats] = None - self._final_stats: Optional[DatastreamStats] = None + self._initial_stats: Optional[DatasetStats] = None + self._final_stats: Optional[DatasetStats] = None self._global_info: Optional[ProgressBar] = None self._execution_id = uuid.uuid4().hex @@ -76,7 +76,7 @@ def __init__(self, options: ExecutionOptions): threading.Thread.__init__(self, daemon=True) def execute( - self, dag: PhysicalOperator, initial_stats: Optional[DatastreamStats] = None + self, dag: PhysicalOperator, initial_stats: Optional[DatasetStats] = None ) -> Iterator[RefBundle]: """Executes the DAG using a streaming execution strategy. @@ -196,9 +196,9 @@ def get_stats(self): else: return self._generate_stats() - def _generate_stats(self) -> DatastreamStats: + def _generate_stats(self) -> DatasetStats: """Create a new stats object reflecting execution status so far.""" - stats = self._initial_stats or DatastreamStats(stages={}, parent=None) + stats = self._initial_stats or DatasetStats(stages={}, parent=None) for op in self._topology: if isinstance(op, InputDataBuffer): continue diff --git a/python/ray/data/_internal/fast_repartition.py b/python/ray/data/_internal/fast_repartition.py index fe7e8de45606..06c53877cace 100644 --- a/python/ray/data/_internal/fast_repartition.py +++ b/python/ray/data/_internal/fast_repartition.py @@ -8,16 +8,16 @@ from ray.data._internal.progress_bar import ProgressBar from ray.data._internal.remote_fn import cached_remote_fn from ray.data._internal.shuffle_and_partition import _ShufflePartitionOp -from ray.data._internal.stats import DatastreamStats +from ray.data._internal.stats import DatasetStats def fast_repartition(blocks, num_blocks, ctx: Optional[TaskContext] = None): - from ray.data.datastream import Datastream, Schema + from ray.data.dataset import Dataset, Schema - wrapped_ds = Datastream( + wrapped_ds = Dataset( ExecutionPlan( blocks, - DatastreamStats(stages={}, parent=None), + DatasetStats(stages={}, parent=None), run_by_consumer=blocks._owned_by_consumer, ), 0, @@ -59,7 +59,7 @@ def fast_repartition(blocks, num_blocks, ctx: Optional[TaskContext] = None): owned_by_consumer = blocks._owned_by_consumer # Schema is safe to fetch here since we have already called - # get_internal_block_refs and executed the datastream. + # get_internal_block_refs and executed the dataset. schema = wrapped_ds.schema(fetch_if_missing=True) if isinstance(schema, Schema): schema = schema.base_schema @@ -86,8 +86,8 @@ def fast_repartition(blocks, num_blocks, ctx: Optional[TaskContext] = None): if schema is None: raise ValueError( - "Datastream is empty or cleared, can't determine the format of " - "the datastream." + "Dataset is empty or cleared, can't determine the format of " + "the dataset." ) elif isinstance(schema, type): builder = SimpleBlockBuilder() diff --git a/python/ray/data/_internal/iterator/iterator_impl.py b/python/ray/data/_internal/iterator/iterator_impl.py index c97131d920fd..9f9a6833bdcb 100644 --- a/python/ray/data/_internal/iterator/iterator_impl.py +++ b/python/ray/data/_internal/iterator/iterator_impl.py @@ -4,52 +4,52 @@ from ray.data.block import Block, BlockMetadata from ray.data.context import DataContext from ray.data.iterator import DataIterator -from ray.data._internal.stats import DatastreamStats +from ray.data._internal.stats import DatasetStats if TYPE_CHECKING: import pyarrow - from ray.data import Datastream + from ray.data import Dataset class DataIteratorImpl(DataIterator): def __init__( self, - base_datastream: "Datastream", + base_dataset: "Dataset", ): - self._base_datastream = base_datastream + self._base_dataset = base_dataset self._base_context = DataContext.get_current() def __repr__(self) -> str: - return f"DataIterator({self._base_datastream})" + return f"DataIterator({self._base_dataset})" def _to_block_iterator( self, ) -> Tuple[ Iterator[Tuple[ObjectRef[Block], BlockMetadata]], - Optional[DatastreamStats], + Optional[DatasetStats], bool, ]: - ds = self._base_datastream + ds = self._base_dataset block_iterator, stats, executor = ds._plan.execute_to_iterator() ds._current_executor = executor return block_iterator, stats, False def stats(self) -> str: - return self._base_datastream.stats() + return self._base_dataset.stats() def schema(self) -> Union[type, "pyarrow.lib.Schema"]: - return self._base_datastream.schema() + return self._base_dataset.schema() def __getattr__(self, name): - if name == "_base_datastream": + if name == "_base_dataset": raise AttributeError() - if hasattr(self._base_datastream, name) and not name.startswith("_"): + if hasattr(self._base_dataset, name) and not name.startswith("_"): # Raise error for backwards compatibility. # TODO: remove this method in 2.6. raise DeprecationWarning( "session.get_dataset_shard returns a ray.data.DataIterator " - "instead of a Datastream/DatasetPipeline as of Ray v2.3. " + "instead of a Dataset/DatasetPipeline as of Ray v2.3. " "Use iter_torch_batches(), to_tf(), or iter_batches() to " "iterate over one epoch. See " "https://docs.ray.io/en/latest/data/api/dataset_iterator.html " diff --git a/python/ray/data/_internal/iterator/pipelined_iterator.py b/python/ray/data/_internal/iterator/pipelined_iterator.py index 591964fc72e9..011fea818d78 100644 --- a/python/ray/data/_internal/iterator/pipelined_iterator.py +++ b/python/ray/data/_internal/iterator/pipelined_iterator.py @@ -3,7 +3,7 @@ from ray.types import ObjectRef from ray.data.block import Block, BlockMetadata, DataBatch from ray.data.iterator import DataIterator -from ray.data._internal.stats import DatastreamStats +from ray.data._internal.stats import DatasetStats if TYPE_CHECKING: import pyarrow @@ -21,7 +21,7 @@ def __init__( def __repr__(self) -> str: return f"DataIterator({self._base_dataset_pipeline})" - def _get_next_datastream(self) -> "DatasetPipeline": + def _get_next_dataset(self) -> "DatasetPipeline": if self._epoch_iterator is None: self._epoch_iterator = self._base_dataset_pipeline.iter_epochs() @@ -32,18 +32,18 @@ def _to_block_iterator( self, ) -> Tuple[ Iterator[Tuple[ObjectRef[Block], BlockMetadata]], - Optional[DatastreamStats], + Optional[DatasetStats], bool, ]: - epoch_pipeline = self._get_next_datastream() + epoch_pipeline = self._get_next_dataset() - # Peek the first datastream from the pipeline to see if blocks are owned + # Peek the first dataset from the pipeline to see if blocks are owned # by consumer. If so, the blocks are safe to be eagerly cleared after use # because memories are not shared across different consumers. This will # improve the memory efficiency. - if epoch_pipeline._first_datastream is not None: + if epoch_pipeline._first_dataset is not None: blocks_owned_by_consumer = ( - epoch_pipeline._first_datastream._plan.execute()._owned_by_consumer + epoch_pipeline._first_dataset._plan.execute()._owned_by_consumer ) else: blocks_owned_by_consumer = ( @@ -96,7 +96,7 @@ def __getattr__(self, name): # TODO: remove this method in 2.6. raise DeprecationWarning( "session.get_dataset_shard returns a ray.data.DataIterator " - "instead of a Datastream/DatasetPipeline as of Ray v2.3. " + "instead of a Dataset/DatasetPipeline as of Ray v2.3. " "Use iter_torch_batches(), to_tf(), or iter_batches() to " "iterate over one epoch. See " "https://docs.ray.io/en/latest/data/api/dataset_iterator.html " diff --git a/python/ray/data/_internal/iterator/stream_split_iterator.py b/python/ray/data/_internal/iterator/stream_split_iterator.py index 6fb3a23a1b93..db3dec49fe5a 100644 --- a/python/ray/data/_internal/iterator/stream_split_iterator.py +++ b/python/ray/data/_internal/iterator/stream_split_iterator.py @@ -23,14 +23,14 @@ ) from ray.data._internal.execution.operators.output_splitter import OutputSplitter from ray.data._internal.execution.interfaces import NodeIdStr, RefBundle -from ray.data._internal.stats import DatastreamStats +from ray.data._internal.stats import DatasetStats from ray.types import ObjectRef from ray.util.debug import log_once from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy if TYPE_CHECKING: import pyarrow - from ray.data import Datastream + from ray.data import Dataset logger = logging.getLogger(__name__) @@ -43,14 +43,14 @@ class StreamSplitDataIterator(DataIterator): @staticmethod def create( - base_datastream: "Datastream", + base_dataset: "Dataset", n: int, equal: bool, locality_hints: Optional[List[NodeIdStr]], ) -> List["StreamSplitDataIterator"]: - """Create a split iterator from the given base Datastream and options. + """Create a split iterator from the given base Dataset and options. - See also: `Datastream.streaming_split`. + See also: `Dataset.streaming_split`. """ ctx = DataContext.get_current() @@ -60,19 +60,17 @@ def create( scheduling_strategy=NodeAffinitySchedulingStrategy( ray.get_runtime_context().get_node_id(), soft=False ), - ).remote(ctx, base_datastream, n, equal, locality_hints) + ).remote(ctx, base_dataset, n, equal, locality_hints) - return [ - StreamSplitDataIterator(base_datastream, coord_actor, i) for i in range(n) - ] + return [StreamSplitDataIterator(base_dataset, coord_actor, i) for i in range(n)] def __init__( self, - base_datastream: "Datastream", + base_dataset: "Dataset", coord_actor: ray.actor.ActorHandle, output_split_idx: int, ): - self._base_datastream = base_datastream + self._base_dataset = base_dataset self._coord_actor = coord_actor self._output_split_idx = output_split_idx @@ -80,7 +78,7 @@ def _to_block_iterator( self, ) -> Tuple[ Iterator[Tuple[ObjectRef[Block], BlockMetadata]], - Optional[DatastreamStats], + Optional[DatasetStats], bool, ]: def gen_blocks() -> Iterator[Tuple[ObjectRef[Block], BlockMetadata]]: @@ -106,11 +104,11 @@ def gen_blocks() -> Iterator[Tuple[ObjectRef[Block], BlockMetadata]]: def stats(self) -> str: """Implements DataIterator.""" - return self._base_datastream.stats() + return self._base_dataset.stats() def schema(self) -> Union[type, "pyarrow.lib.Schema"]: """Implements DataIterator.""" - return self._base_datastream.schema() + return self._base_dataset.schema() @ray.remote(num_cpus=0) @@ -124,7 +122,7 @@ class SplitCoordinator: def __init__( self, ctx: DataContext, - datastream: "Datastream", + dataset: "Dataset", n: int, equal: bool, locality_hints: Optional[List[NodeIdStr]], @@ -135,7 +133,7 @@ def __init__( logger.info(f"Auto configuring locality_with_output={locality_hints}") DataContext._set_current(ctx) - self._base_datastream = datastream + self._base_dataset = dataset self._n = n self._equal = equal self._locality_hints = locality_hints @@ -155,9 +153,9 @@ def add_split_op(dag): output_iterator = execute_to_legacy_bundle_iterator( executor, - datastream._plan, + dataset._plan, True, - datastream._plan._datastream_uuid, + dataset._plan._dataset_uuid, dag_rewrite=add_split_op, ) yield output_iterator @@ -186,7 +184,7 @@ def get( if epoch_id != self._cur_epoch: raise ValueError( - "Invalid iterator: the datastream has moved on to another epoch." + "Invalid iterator: the dataset has moved on to another epoch." ) try: diff --git a/python/ray/data/_internal/lazy_block_list.py b/python/ray/data/_internal/lazy_block_list.py index 23331ddd1773..bd3513dd3d90 100644 --- a/python/ray/data/_internal/lazy_block_list.py +++ b/python/ray/data/_internal/lazy_block_list.py @@ -7,7 +7,7 @@ from ray.data._internal.progress_bar import ProgressBar from ray.data._internal.remote_fn import cached_remote_fn from ray.data._internal.memory_tracing import trace_allocation -from ray.data._internal.stats import DatastreamStats, _get_or_create_stats_actor +from ray.data._internal.stats import DatasetStats, _get_or_create_stats_actor from ray.data._internal.util import _split_list from ray.data.block import ( Block, @@ -26,7 +26,7 @@ class LazyBlockList(BlockList): """A BlockList that submits tasks lazily on-demand. This BlockList is used for implementing read operations (e.g., to avoid - needing to read all files of a Datastream when the user is just wanting to + needing to read all files of a Dataset when the user is just wanting to .take() the first few rows or view the schema). """ @@ -58,7 +58,7 @@ def __init__( in cached_metadata represents the list of output blocks metadata per the read task. One task can produce multiple output blocks. ray_remote_args: Ray remote arguments for the read tasks. - stats_uuid: UUID for the datastream stats, used to group and fetch read task + stats_uuid: UUID for the dataset stats, used to group and fetch read task stats. If not provided, a new UUID will be created. """ self._tasks = tasks @@ -119,10 +119,10 @@ def get_metadata(self, fetch_if_missing: bool = False) -> List[BlockMetadata]: _, metadata = self._get_blocks_with_metadata() return metadata - def stats(self) -> DatastreamStats: - """Create DatastreamStats for this LazyBlockList.""" - return DatastreamStats( - # Make a copy of metadata, as the DatastreamStats may mutate it in-place. + def stats(self) -> DatasetStats: + """Create DatasetStats for this LazyBlockList.""" + return DatasetStats( + # Make a copy of metadata, as the DatasetStats may mutate it in-place. stages={"Read": self.get_metadata(fetch_if_missing=False).copy()}, parent=None, needs_stats_actor=True, @@ -315,7 +315,7 @@ def _get_blocks_with_metadata( if context.block_splitting_enabled: # If block splitting is enabled, fetch the partitions through generator. read_progress_bar = ProgressBar("Read progress", total=len(block_refs)) - # Handle duplicates (e.g. due to unioning the same datastream). + # Handle duplicates (e.g. due to unioning the same dataset). unique_refs = list(set(block_refs)) generators = read_progress_bar.fetch_until_complete(unique_refs) @@ -341,7 +341,7 @@ def _get_blocks_with_metadata( return [], [] read_progress_bar = ProgressBar("Read progress", total=len(meta_refs)) # Fetch the metadata in bulk. - # Handle duplicates (e.g. due to unioning the same datastream). + # Handle duplicates (e.g. due to unioning the same dataset). unique_meta_refs = set(meta_refs) metadata = read_progress_bar.fetch_until_complete(list(unique_meta_refs)) ref_to_data = { @@ -359,7 +359,7 @@ def compute_first_block(self): """Kick off computation for the first block in the list. This is useful if looking to support rapid lightweight interaction with a small - amount of the datastream. + amount of the dataset. """ if self._tasks: self._get_or_compute(0) @@ -385,7 +385,7 @@ def ensure_metadata_for_first_block(self) -> Optional[BlockMetadata]: try: block_partition_ref, metadata_ref = next(self._iter_block_partition_refs()) except (StopIteration, ValueError): - # Datastream is empty (no blocks) or was manually cleared. + # Dataset is empty (no blocks) or was manually cleared. pass else: # This blocks until the underlying read task is finished. diff --git a/python/ray/data/_internal/logical/interfaces.py b/python/ray/data/_internal/logical/interfaces.py index 9eaa7fef49ac..7fd5b4662193 100644 --- a/python/ray/data/_internal/logical/interfaces.py +++ b/python/ray/data/_internal/logical/interfaces.py @@ -7,7 +7,7 @@ class Operator: """Abstract class for operators. - Operators live on the driver side of the Datastream only. + Operators live on the driver side of the Dataset only. """ def __init__(self, name: str, input_dependencies: List["Operator"]): diff --git a/python/ray/data/_internal/logical/operators/all_to_all_operator.py b/python/ray/data/_internal/logical/operators/all_to_all_operator.py index d39a42e6e561..a22851396a84 100644 --- a/python/ray/data/_internal/logical/operators/all_to_all_operator.py +++ b/python/ray/data/_internal/logical/operators/all_to_all_operator.py @@ -19,7 +19,7 @@ def __init__( """ Args: name: Name for this operator. This is the name that will appear when - inspecting the logical plan of a Datastream. + inspecting the logical plan of a Dataset. input_op: The operator preceding this operator in the plan DAG. The outputs of `input_op` will be the inputs to this operator. num_outputs: The number of expected output bundles outputted by this diff --git a/python/ray/data/_internal/logical/operators/map_operator.py b/python/ray/data/_internal/logical/operators/map_operator.py index f64bd482a85c..d3cd66080119 100644 --- a/python/ray/data/_internal/logical/operators/map_operator.py +++ b/python/ray/data/_internal/logical/operators/map_operator.py @@ -20,7 +20,7 @@ def __init__( """ Args: name: Name for this operator. This is the name that will appear when - inspecting the logical plan of a Datastream. + inspecting the logical plan of a Dataset. input_op: The operator preceding this operator in the plan DAG. The outputs of `input_op` will be the inputs to this operator. ray_remote_args: Args to provide to ray.remote. @@ -50,7 +50,7 @@ def __init__( """ Args: name: Name for this operator. This is the name that will appear when - inspecting the logical plan of a Datastream. + inspecting the logical plan of a Dataset. input_op: The operator preceding this operator in the plan DAG. The outputs of `input_op` will be the inputs to this operator. fn: User-defined function to be called. diff --git a/python/ray/data/_internal/memory_tracing.py b/python/ray/data/_internal/memory_tracing.py index 7402218cbca9..f44c648452ad 100644 --- a/python/ray/data/_internal/memory_tracing.py +++ b/python/ray/data/_internal/memory_tracing.py @@ -1,4 +1,4 @@ -"""Utility for debugging object store memory eager deletion in Datastreams. +"""Utility for debugging object store memory eager deletion in Datasets. NOTE: the performance overhead of tracing object allocation is fairly substantial. This is meant to use in unit test for debugging. Please do not enable in production, diff --git a/python/ray/data/_internal/null_aggregate.py b/python/ray/data/_internal/null_aggregate.py index f5b6a5763fa8..3a202c1710ed 100644 --- a/python/ray/data/_internal/null_aggregate.py +++ b/python/ray/data/_internal/null_aggregate.py @@ -15,7 +15,7 @@ # aggregation of non-null values. # 2. Mix of values and nulls - ignore_nulls=False: Return None. # 3. All nulls: Return None. -# 4. Empty datastream: Return None. +# 4. Empty dataset: Return None. # # This is accomplished by checking rows for null values and by propagating nulls # if found AND if we're not ignoring them. If not ignoring nulls, in order to delineate diff --git a/python/ray/data/_internal/pandas_block.py b/python/ray/data/_internal/pandas_block.py index 015d82a9b303..1c0efae931bf 100644 --- a/python/ray/data/_internal/pandas_block.py +++ b/python/ray/data/_internal/pandas_block.py @@ -53,7 +53,7 @@ def lazy_import_pandas(): class PandasRow(TableRow): """ - Row of a tabular Datastream backed by a Pandas DataFrame block. + Row of a tabular Dataset backed by a Pandas DataFrame block. """ def __getitem__(self, key: str) -> Any: @@ -185,11 +185,11 @@ def schema(self) -> PandasBlockSchema: names=dtypes.index.tolist(), types=dtypes.values.tolist() ) # Column names with non-str types of a pandas DataFrame is not - # supported by Ray Datastream. + # supported by Ray Dataset. if any(not isinstance(name, str) for name in schema.names): raise ValueError( "A Pandas DataFrame with column names of non-str types" - " is not supported by Ray Datastream. Column names of this" + " is not supported by Ray Dataset. Column names of this" f" DataFrame: {schema.names!r}." ) return schema diff --git a/python/ray/data/_internal/pipeline_executor.py b/python/ray/data/_internal/pipeline_executor.py index 2f5a7fbc9233..fef5c0f4a020 100644 --- a/python/ray/data/_internal/pipeline_executor.py +++ b/python/ray/data/_internal/pipeline_executor.py @@ -5,7 +5,7 @@ import ray from ray.data.context import DataContext -from ray.data.datastream import Datastream +from ray.data.dataset import Dataset from ray.data._internal.progress_bar import ProgressBar from ray.data._internal import progress_bar @@ -15,7 +15,7 @@ from ray.data.dataset_pipeline import DatasetPipeline -def pipeline_stage(fn: Callable[[], Datastream]) -> Datastream: +def pipeline_stage(fn: Callable[[], Dataset]) -> Dataset: # Force eager evaluation of all blocks in the pipeline stage. This # prevents resource deadlocks due to overlapping stage execution (e.g., # task -> actor stage). @@ -25,7 +25,7 @@ def pipeline_stage(fn: Callable[[], Datastream]) -> Datastream: class PipelineExecutor: def __init__(self, pipeline: "DatasetPipeline"): self._pipeline: "DatasetPipeline" = pipeline - self._stages: List[concurrent.futures.Future[Datastream]] = [None] * ( + self._stages: List[concurrent.futures.Future[Dataset]] = [None] * ( len(self._pipeline._optimized_stages) + 1 ) self._iter = iter(self._pipeline._base_iterable) @@ -161,7 +161,7 @@ def __init__( self, pipeline: "DatasetPipeline", n: int, - splitter: Callable[[Datastream], List["Datastream"]], + splitter: Callable[[Dataset], List["Dataset"]], context: DataContext, ): DataContext._set_current(context) @@ -171,17 +171,17 @@ def __init__( self.splitter = splitter self.cur_splits = [None] * self.n - def next_datastream_if_ready(self, split_index: int) -> Optional[Datastream]: + def next_dataset_if_ready(self, split_index: int) -> Optional[Dataset]: # TODO(swang): This will hang if one of the consumers fails and is # re-executed from the beginning. To make this fault-tolerant, we need - # to make next_datastream_if_ready idempotent. - # Pull the next datastream once all splits are fully consumed. + # to make next_dataset_if_ready idempotent. + # Pull the next dataset once all splits are fully consumed. if all(s is None for s in self.cur_splits): ds = next(self.executor) self.cur_splits = self.splitter(ds) assert len(self.cur_splits) == self.n, (self.cur_splits, self.n) - # Return the datastream at the split index once per split. + # Return the dataset at the split index once per split. ret = self.cur_splits[split_index] self.cur_splits[split_index] = None return ret diff --git a/python/ray/data/_internal/plan.py b/python/ray/data/_internal/plan.py index 6de3b8878835..4e5782daea77 100644 --- a/python/ray/data/_internal/plan.py +++ b/python/ray/data/_internal/plan.py @@ -31,10 +31,10 @@ get_compute, is_task_compute, ) -from ray.data._internal.datastream_logger import DatastreamLogger +from ray.data._internal.dataset_logger import DatasetLogger from ray.data._internal.execution.interfaces import TaskContext from ray.data._internal.lazy_block_list import LazyBlockList -from ray.data._internal.stats import DatastreamStats, DatastreamStatsSummary +from ray.data._internal.stats import DatasetStats, DatasetStatsSummary from ray.data.block import Block from ray.data.context import DataContext from ray.util.debug import log_once @@ -48,11 +48,11 @@ INHERITABLE_REMOTE_ARGS = ["scheduling_strategy"] -logger = DatastreamLogger(__name__) +logger = DatasetLogger(__name__) class Stage: - """Represents a Datastream transform stage (e.g., map or shuffle).""" + """Represents a Dataset transform stage (e.g., map or shuffle).""" def __init__(self, name: str, num_blocks: Optional[int]): self.name = name @@ -80,7 +80,7 @@ def __str__(self): class ExecutionPlan: - """A lazy execution plan for a Datastream.""" + """A lazy execution plan for a Dataset.""" # Implementation Notes: # @@ -101,8 +101,8 @@ class ExecutionPlan: def __init__( self, in_blocks: BlockList, - stats: DatastreamStats, - datastream_uuid=None, + stats: DatasetStats, + dataset_uuid=None, *, run_by_consumer: bool, ): @@ -111,7 +111,7 @@ def __init__( Args: in_blocks: Base list of blocks. stats: Stats for the base blocks. - datastream_uuid: Datastream's UUID. + dataset_uuid: Dataset's UUID. run_by_consumer: Whether this plan is invoked to run by the consumption APIs (e.g. .iter_batches()). """ @@ -126,16 +126,16 @@ def __init__( # Cache of optimized stages. self._last_optimized_stages = None - self._datastream_uuid = datastream_uuid or uuid.uuid4().hex - if not stats.datastream_uuid: - stats.datastream_uuid = self._datastream_uuid + self._dataset_uuid = dataset_uuid or uuid.uuid4().hex + if not stats.dataset_uuid: + stats.dataset_uuid = self._dataset_uuid self._run_by_consumer = run_by_consumer def __repr__(self) -> str: return ( f"ExecutionPlan(" - f"datastream_uuid={self._datastream_uuid}, " + f"dataset_uuid={self._dataset_uuid}, " f"run_by_consumer={self._run_by_consumer}, " f"in_blocks={self._in_blocks}, " f"stages_before_snapshot={self._stages_before_snapshot}, " @@ -149,7 +149,7 @@ def get_plan_as_string(self, classname: str) -> str: Returns: The string representation of this execution plan. """ - # NOTE: this is used for Datastream.__repr__ to give a user-facing string + # NOTE: this is used for Dataset.__repr__ to give a user-facing string # representation. Ideally ExecutionPlan.__repr__ should be replaced with this # method as well. @@ -157,7 +157,7 @@ def get_plan_as_string(self, classname: str) -> str: # cheap. plan_str = "" num_stages = 0 - datastream_blocks = None + dataset_blocks = None if self._stages_after_snapshot: # Get string representation of each stage in reverse order. for stage in self._stages_after_snapshot[::-1]: @@ -182,17 +182,17 @@ def get_plan_as_string(self, classname: str) -> str: schema = self._get_unified_blocks_schema( self._snapshot_blocks, fetch_if_missing=False ) - datastream_blocks = self._snapshot_blocks + dataset_blocks = self._snapshot_blocks else: assert self._in_blocks is not None schema = self._get_unified_blocks_schema( self._in_blocks, fetch_if_missing=False ) - datastream_blocks = self._in_blocks + dataset_blocks = self._in_blocks else: # Get schema of output blocks. schema = self.schema(fetch_if_missing=False) - datastream_blocks = self._snapshot_blocks + dataset_blocks = self._snapshot_blocks if schema is None: schema_str = "Unknown schema" @@ -206,14 +206,14 @@ def get_plan_as_string(self, classname: str) -> str: schema_str.append(f"{n}: {t}") schema_str = ", ".join(schema_str) schema_str = "{" + schema_str + "}" - count = self._get_num_rows_from_blocks_metadata(datastream_blocks) + count = self._get_num_rows_from_blocks_metadata(dataset_blocks) if count is None: count = "?" - if datastream_blocks is None: + if dataset_blocks is None: num_blocks = "?" else: - num_blocks = datastream_blocks.initial_num_blocks() - datastream_str = "{}(num_blocks={}, num_rows={}, schema={})".format( + num_blocks = dataset_blocks.initial_num_blocks() + dataset_str = "{}(num_blocks={}, num_rows={}, schema={})".format( classname, num_blocks, count, schema_str ) @@ -222,9 +222,9 @@ def get_plan_as_string(self, classname: str) -> str: MIN_FIELD_LENGTH = 10 INDENT_STR = " " * 3 trailing_space = " " * (max(num_stages, 0) * 3) - if len(datastream_str) > SCHEMA_LINE_CHAR_LIMIT: + if len(dataset_str) > SCHEMA_LINE_CHAR_LIMIT: # If the resulting string representation exceeds the line char limit, - # first try breaking up each `Datastream` parameter into its own line + # first try breaking up each `Dataset` parameter into its own line # and check if each line fits within the line limit. We check the # `schema` param's length, since this is likely the longest string. schema_str_on_new_line = f"{trailing_space}{INDENT_STR}schema={schema_str}" @@ -254,7 +254,7 @@ def get_plan_as_string(self, classname: str) -> str: schema_str = ( "{\n" + schema_str + f"\n{trailing_space}{INDENT_STR}" + "}" ) - datastream_str = ( + dataset_str = ( f"{classname}(" f"\n{trailing_space}{INDENT_STR}num_blocks={num_blocks}," f"\n{trailing_space}{INDENT_STR}num_rows={count}," @@ -263,10 +263,10 @@ def get_plan_as_string(self, classname: str) -> str: ) if num_stages == 0: - plan_str = datastream_str + plan_str = dataset_str else: trailing_space = " " * ((num_stages - 1) * 3) - plan_str += f"{trailing_space}+- {datastream_str}" + plan_str += f"{trailing_space}+- {dataset_str}" return plan_str def with_stage(self, stage: "Stage") -> "ExecutionPlan": @@ -321,16 +321,16 @@ def deep_copy(self, preserve_uuid: bool = False) -> "ExecutionPlan": Returns: A deep copy of this execution plan. """ - datastream_uuid = None + dataset_uuid = None if preserve_uuid: - datastream_uuid = self._datastream_uuid + dataset_uuid = self._dataset_uuid in_blocks = self._in_blocks if isinstance(in_blocks, BlockList): in_blocks = in_blocks.copy() plan_copy = ExecutionPlan( in_blocks, copy.copy(self._in_stats), - datastream_uuid=datastream_uuid, + dataset_uuid=dataset_uuid, run_by_consumer=self._run_by_consumer, ) if self._snapshot_blocks: @@ -366,7 +366,7 @@ def schema( fetch_if_missing: Whether to execute the plan to fetch the schema. Returns: - The schema of the output datastream. + The schema of the output dataset. """ from ray.data._internal.stage_impl import RandomizeBlocksStage @@ -394,8 +394,8 @@ def schema( return None elif self._in_blocks is not None and self._snapshot_blocks is None: # If the plan only has input blocks, we execute it, so snapshot has output. - # This applies to newly created datastream. For example, initial datastream - # from read, and output datastreams of Datastream.split(). + # This applies to newly created dataset. For example, initial dataset + # from read, and output datasets of Dataset.split(). self.execute() # Snapshot is now guaranteed to be the output of the final stage or None. blocks = self._snapshot_blocks @@ -441,14 +441,14 @@ def meta_count(self) -> Optional[int]: This method will never trigger any computation. Returns: - The number of records of the result Datastream, or None. + The number of records of the result Dataset, or None. """ if self._stages_after_snapshot: return None elif self._in_blocks is not None and self._snapshot_blocks is None: # If the plan only has input blocks, we execute it, so snapshot has output. - # This applies to newly created datastream. For example, initial datastream - # from read, and output datastreams of Datastream.split(). + # This applies to newly created dataset. For example, initial dataset + # from read, and output datasets of Dataset.split(). self.execute() # Snapshot is now guaranteed to be the final block or None. return self._get_num_rows_from_blocks_metadata(self._snapshot_blocks) @@ -466,7 +466,7 @@ def execute_to_iterator( force_read: bool = False, ) -> Tuple[ Iterator[Tuple[ObjectRef[Block], BlockMetadata]], - DatastreamStats, + DatasetStats, Optional["Executor"], ]: """Execute this plan, returning an iterator. @@ -503,7 +503,7 @@ def execute_to_iterator( executor, self, allow_clear_input_blocks=allow_clear_input_blocks, - datastream_uuid=self._datastream_uuid, + dataset_uuid=self._dataset_uuid, ) # Since the generator doesn't run any code until we try to fetch the first # value, force execution of one bundle before we call get_stats(). @@ -530,14 +530,14 @@ def execute( preserve_order: Whether to preserve order in execution. Returns: - The blocks of the output datastream. + The blocks of the output dataset. """ context = DataContext.get_current() if not ray.available_resources().get("CPU"): if log_once("cpu_warning"): logger.get_logger().warning( "Warning: The Ray cluster currently does not have " - "any available CPUs. The Datastream job will hang unless more CPUs " + "any available CPUs. The Dataset job will hang unless more CPUs " "are freed up. A common reason is that cluster resources are " "used by Actors or Tune trials; see the following link " "for more details: " @@ -563,13 +563,13 @@ def execute( executor, self, allow_clear_input_blocks=allow_clear_input_blocks, - datastream_uuid=self._datastream_uuid, + dataset_uuid=self._dataset_uuid, preserve_order=preserve_order, ) # TODO(ekl) we shouldn't need to set this in the future once we move # to a fully lazy execution model, unless .materialize() is used. Th # reason we need it right now is since the user may iterate over a - # Datastream multiple times after fully executing it once. + # Dataset multiple times after fully executing it once. if not self._run_by_consumer: blocks._owned_by_consumer = False stats = executor.get_stats() @@ -598,7 +598,7 @@ def execute( stats = stats_builder.build_multistage(stage_info) else: stats = stats_builder.build(blocks) - stats.datastream_uuid = self._datastream_uuid + stats.dataset_uuid = self._dataset_uuid stats_summary_string = stats.to_summary().to_string( include_parent=False, ) @@ -609,7 +609,7 @@ def execute( # Set the snapshot to the output of the final stage. self._snapshot_blocks = blocks self._snapshot_stats = stats - self._snapshot_stats.datastream_uuid = self._datastream_uuid + self._snapshot_stats.dataset_uuid = self._dataset_uuid self._stages_before_snapshot += self._stages_after_snapshot self._stages_after_snapshot = [] if _is_lazy(self._snapshot_blocks) and force_read: @@ -634,16 +634,16 @@ def _clear_snapshot(self) -> None: ) self._stages_before_snapshot = [] - def stats(self) -> DatastreamStats: + def stats(self) -> DatasetStats: """Return stats for this plan. If the plan isn't executed, an empty stats object will be returned. """ if not self._snapshot_stats: - return DatastreamStats(stages={}, parent=None) + return DatasetStats(stages={}, parent=None) return self._snapshot_stats - def stats_summary(self) -> DatastreamStatsSummary: + def stats_summary(self) -> DatasetStatsSummary: return self.stats().to_summary() def _should_clear_input_blocks( @@ -668,7 +668,7 @@ def _should_clear_input_blocks( # execution plan, so we don't clear these. return False - def _optimize(self) -> Tuple[BlockList, DatastreamStats, List[Stage]]: + def _optimize(self) -> Tuple[BlockList, DatasetStats, List[Stage]]: """Apply stage fusion optimizations, returning an updated source block list and associated stats, and a set of optimized stages. """ @@ -681,7 +681,7 @@ def _optimize(self) -> Tuple[BlockList, DatastreamStats, List[Stage]]: # If using a lazy datasource, rewrite read stage into one-to-one stage # so it can be fused into downstream stages. blocks, stats, stages = _rewrite_read_stages( - blocks, stats, stages, self._datastream_uuid + blocks, stats, stages, self._dataset_uuid ) stages = _fuse_one_to_one_stages(stages) self._last_optimized_stages = stages @@ -689,7 +689,7 @@ def _optimize(self) -> Tuple[BlockList, DatastreamStats, List[Stage]]: def _get_source_blocks_and_stages( self, - ) -> Tuple[BlockList, DatastreamStats, List[Stage]]: + ) -> Tuple[BlockList, DatasetStats, List[Stage]]: """Get the source blocks, corresponding stats, and the stages for plan execution. @@ -1128,20 +1128,20 @@ def __call__( def _rewrite_read_stages( blocks: BlockList, - stats: DatastreamStats, + stats: DatasetStats, stages: List[Stage], - datastream_uuid: str, -) -> Tuple[BlockList, DatastreamStats, List[Stage]]: + dataset_uuid: str, +) -> Tuple[BlockList, DatasetStats, List[Stage]]: """Rewrites read stages into one-to-one stages, if needed.""" if _is_lazy(blocks) and stages: blocks, stats, stages = _rewrite_read_stage(blocks, stages) - stats.datastream_uuid = datastream_uuid + stats.dataset_uuid = dataset_uuid return blocks, stats, stages def _rewrite_read_stage( in_blocks: LazyBlockList, stages: List[Stage] -) -> Tuple[BlockList, DatastreamStats, List[Stage]]: +) -> Tuple[BlockList, DatasetStats, List[Stage]]: """Rewrite the read stage to a OneToOne stage over read tasks as input. For example, suppose the plan was [Read -> MapBatches(Fn)]. These stages cannot @@ -1196,7 +1196,7 @@ def block_fn( TaskPoolStrategy(), remote_args, ) - stats = DatastreamStats(stages={}, parent=None) + stats = DatasetStats(stages={}, parent=None) stages.insert(0, stage) return block_list, stats, stages diff --git a/python/ray/data/_internal/planner/exchange/sort_task_spec.py b/python/ray/data/_internal/planner/exchange/sort_task_spec.py index b87bff6f128d..c6c011fe5585 100644 --- a/python/ray/data/_internal/planner/exchange/sort_task_spec.py +++ b/python/ray/data/_internal/planner/exchange/sort_task_spec.py @@ -104,7 +104,7 @@ def sample_boundaries( sample_bar.close() del sample_results samples = [s for s in samples if len(s) > 0] - # The datastream is empty + # The dataset is empty if len(samples) == 0: return [None] * (num_reducers - 1) builder = DelegatingBlockBuilder() diff --git a/python/ray/data/_internal/planner/write.py b/python/ray/data/_internal/planner/write.py index ca3d63c92a8e..e0ae7d82fbf3 100644 --- a/python/ray/data/_internal/planner/write.py +++ b/python/ray/data/_internal/planner/write.py @@ -8,7 +8,7 @@ def generate_write_fn( datasource: Datasource, **write_args ) -> Callable[[Iterator[Block], TaskContext], Iterator[Block]]: - # If the write op succeeds, the resulting Datastream is a list of + # If the write op succeeds, the resulting Dataset is a list of # WriteResult (one element per write task). Otherwise, an error will # be raised. The Datasource can handle execution outcomes with the # on_write_complete() and on_write_failed(). diff --git a/python/ray/data/_internal/progress_bar.py b/python/ray/data/_internal/progress_bar.py index ee9b721a1a3e..dd03ed0960f7 100644 --- a/python/ray/data/_internal/progress_bar.py +++ b/python/ray/data/_internal/progress_bar.py @@ -62,9 +62,7 @@ def __init__( else: global needs_warning if needs_warning: - print( - "[datastream]: Run `pip install tqdm` to enable progress reporting." - ) + print("[dataset]: Run `pip install tqdm` to enable progress reporting.") needs_warning = False self._bar = None diff --git a/python/ray/data/_internal/remote_fn.py b/python/ray/data/_internal/remote_fn.py index 4a6d93fb0938..077008e0c5aa 100644 --- a/python/ray/data/_internal/remote_fn.py +++ b/python/ray/data/_internal/remote_fn.py @@ -10,7 +10,7 @@ def cached_remote_fn(fn: Any, **ray_remote_args) -> Any: """Lazily defines a ray.remote function. - This is used in Datastreams to avoid circular import issues with ray.remote. + This is used in Datasets to avoid circular import issues with ray.remote. (ray imports ray.data in order to allow ``ray.data.read_foo()`` to work, which means ray.remote cannot be used top-level in ray.data). diff --git a/python/ray/data/_internal/sort.py b/python/ray/data/_internal/sort.py index 02065b59f621..4699ecd0d0a2 100644 --- a/python/ray/data/_internal/sort.py +++ b/python/ray/data/_internal/sort.py @@ -14,7 +14,7 @@ Merging: a merge task would receive a block from every worker that consists of items in a certain range. It then merges the sorted blocks into one sorted -block and becomes part of the new, sorted datastream. +block and becomes part of the new, sorted dataset. """ from typing import Any, Callable, List, Optional, Tuple, TypeVar, Union @@ -113,7 +113,7 @@ def sample_boundaries( sample_bar.close() del sample_results samples = [s for s in samples if len(s) > 0] - # The datastream is empty + # The dataset is empty if len(samples) == 0: return [None] * (num_reducers - 1) builder = DelegatingBlockBuilder() diff --git a/python/ray/data/_internal/stage_impl.py b/python/ray/data/_internal/stage_impl.py index 3ea2044d2e47..73f6ee454e6f 100644 --- a/python/ray/data/_internal/stage_impl.py +++ b/python/ray/data/_internal/stage_impl.py @@ -28,11 +28,11 @@ ) if TYPE_CHECKING: - from ray.data import Datastream + from ray.data import Dataset class RepartitionStage(AllToAllStage): - """Implementation of `Datastream.repartition()`.""" + """Implementation of `Dataset.repartition()`.""" def __init__(self, num_blocks: int, shuffle: bool): if shuffle: @@ -96,7 +96,7 @@ def do_fast_repartition( class RandomizeBlocksStage(AllToAllStage): - """Implementation of `Datastream.randomize_blocks()`.""" + """Implementation of `Dataset.randomize_blocks()`.""" def __init__(self, seed: Optional[int]): self._seed = seed @@ -112,7 +112,7 @@ def do_randomize(self, block_list, *_): class RandomShuffleStage(AllToAllStage): - """Implementation of `Datastream.random_shuffle()`.""" + """Implementation of `Dataset.random_shuffle()`.""" def __init__( self, @@ -167,11 +167,11 @@ def do_shuffle( class ZipStage(AllToAllStage): - """Implementation of `Datastream.zip()`.""" + """Implementation of `Dataset.zip()`.""" - def __init__(self, other: "Datastream"): + def __init__(self, other: "Dataset"): def do_zip_all(block_list: BlockList, clear_input_blocks: bool, *_): - # Repartition other to align with the base datastream, and then zip together + # Repartition other to align with the base dataset, and then zip together # the blocks in parallel. # TODO(Clark): Port this to a streaming zip, e.g. push block pairs through # an actor that buffers and zips. @@ -190,7 +190,7 @@ def do_zip_all(block_list: BlockList, clear_input_blocks: bool, *_): ) inverted = False if sum(other_block_bytes) > sum(base_block_bytes): - # Make sure that other is the smaller datastream, so we minimize + # Make sure that other is the smaller dataset, so we minimize # splitting work when aligning other with base. # TODO(Clark): Improve this heuristic for minimizing splitting work, # e.g. by generating the splitting plans for each route (via @@ -207,14 +207,14 @@ def do_zip_all(block_list: BlockList, clear_input_blocks: bool, *_): indices = list(itertools.accumulate(base_block_rows)) indices.pop(-1) - # Check that each datastream has the same number of rows. + # Check that each dataset has the same number of rows. # TODO(Clark): Support different number of rows via user-directed # dropping/padding. total_base_rows = sum(base_block_rows) total_other_rows = sum(other_block_rows) if total_base_rows != total_other_rows: raise ValueError( - "Cannot zip datastreams of different number of rows: " + "Cannot zip datasets of different number of rows: " f"{total_base_rows}, {total_other_rows}" ) @@ -313,16 +313,16 @@ def _do_zip( class SortStage(AllToAllStage): - """Implementation of `Datastream.sort()`.""" + """Implementation of `Dataset.sort()`.""" - def __init__(self, ds: "Datastream", key: Optional[str], descending: bool): + def __init__(self, ds: "Dataset", key: Optional[str], descending: bool): def do_sort( block_list, ctx: TaskContext, clear_input_blocks: bool, *_, ): - # Handle empty datastream. + # Handle empty dataset. if block_list.initial_num_blocks() == 0: return block_list, {} if clear_input_blocks: @@ -349,7 +349,7 @@ def do_sort( class LimitStage(AllToAllStage): - """Implementation of `Datastream.limit()`.""" + """Implementation of `Dataset.limit()`.""" def __init__(self, limit: int): self._limit = limit diff --git a/python/ray/data/_internal/stats.py b/python/ray/data/_internal/stats.py index c0621814a8a6..94bcfee10f4d 100644 --- a/python/ray/data/_internal/stats.py +++ b/python/ray/data/_internal/stats.py @@ -14,8 +14,8 @@ from ray.util.annotations import DeveloperAPI from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy -STATS_ACTOR_NAME = "datastreams_stats_actor" -STATS_ACTOR_NAMESPACE = "_datastream_stats_actor" +STATS_ACTOR_NAME = "datasets_stats_actor" +STATS_ACTOR_NAMESPACE = "_dataset_stats_actor" StatsDict = Dict[str, List[BlockMetadata]] @@ -76,24 +76,24 @@ def avg(self) -> float: return self._value / self._total_count if self._total_count else float("inf") -class _DatastreamStatsBuilder: - """Helper class for building datastream stats. +class _DatasetStatsBuilder: + """Helper class for building dataset stats. When this class is created, we record the start time. When build() is - called with the final blocks of the new datastream, the time delta is + called with the final blocks of the new dataset, the time delta is saved as part of the stats.""" def __init__( self, stage_name: str, - parent: "DatastreamStats", + parent: "DatasetStats", override_start_time: Optional[float], ): self.stage_name = stage_name self.parent = parent self.start_time = override_start_time or time.perf_counter() - def build_multistage(self, stages: StatsDict) -> "DatastreamStats": + def build_multistage(self, stages: StatsDict) -> "DatasetStats": stage_infos = {} for i, (k, v) in enumerate(stages.items()): capped_k = capfirst(k) @@ -104,7 +104,7 @@ def build_multistage(self, stages: StatsDict) -> "DatastreamStats": stage_infos[self.stage_name.split("->")[-1] + capped_k] = v else: stage_infos[self.stage_name] = v - stats = DatastreamStats( + stats = DatasetStats( stages=stage_infos, parent=self.parent, base_name=self.stage_name, @@ -112,8 +112,8 @@ def build_multistage(self, stages: StatsDict) -> "DatastreamStats": stats.time_total_s = time.perf_counter() - self.start_time return stats - def build(self, final_blocks: BlockList) -> "DatastreamStats": - stats = DatastreamStats( + def build(self, final_blocks: BlockList) -> "DatasetStats": + stats = DatasetStats( stages={self.stage_name: final_blocks.get_metadata()}, parent=self.parent, ) @@ -125,7 +125,7 @@ def build(self, final_blocks: BlockList) -> "DatastreamStats": class _StatsActor: """Actor holding stats for blocks created by LazyBlockList. - This actor is shared across all datastreams created in the same cluster. + This actor is shared across all datasets created in the same cluster. In order to cap memory usage, we set a max number of stats to keep in the actor. When this limit is exceeded, the stats will be garbage collected in FIFO order. @@ -196,31 +196,31 @@ def _get_or_create_stats_actor(): ).remote() -class DatastreamStats: - """Holds the execution times for a given Datastream. +class DatasetStats: + """Holds the execution times for a given Dataset. - This object contains a reference to the parent Datastream's stats as well, - but not the Datastream object itself, to allow its blocks to be dropped from + This object contains a reference to the parent Dataset's stats as well, + but not the Dataset object itself, to allow its blocks to be dropped from memory.""" def __init__( self, *, stages: StatsDict, - parent: Union[Optional["DatastreamStats"], List["DatastreamStats"]], + parent: Union[Optional["DatasetStats"], List["DatasetStats"]], needs_stats_actor: bool = False, stats_uuid: str = None, base_name: str = None, ): - """Create datastream stats. + """Create dataset stats. Args: - stages: Dict of stages used to create this Datastream from the + stages: Dict of stages used to create this Dataset from the previous one. Typically one entry, e.g., {"map": [...]}. - parent: Reference to parent Datastream's stats, or a list of parents + parent: Reference to parent Dataset's stats, or a list of parents if there are multiple. - needs_stats_actor: Whether this Datastream's stats needs a stats actor for - stats collection. This is currently only used for Datastreams using a + needs_stats_actor: Whether this Dataset's stats needs a stats actor for + stats collection. This is currently only used for Datasets using a lazy datasource (i.e. a LazyBlockList). stats_uuid: The uuid for the stats, used to fetch the right stats from the stats actor. @@ -230,20 +230,20 @@ def __init__( self.stages: StatsDict = stages if parent is not None and not isinstance(parent, list): parent = [parent] - self.parents: List["DatastreamStats"] = parent or [] + self.parents: List["DatasetStats"] = parent or [] self.number: int = ( 0 if not self.parents else max(p.number for p in self.parents) + 1 ) self.base_name = base_name - # TODO(ekl) deprecate and remove the notion of datastream UUID once we move + # TODO(ekl) deprecate and remove the notion of dataset UUID once we move # fully to streaming execution. - self.datastream_uuid: str = "unknown_uuid" + self.dataset_uuid: str = "unknown_uuid" self.time_total_s: float = 0 self.needs_stats_actor = needs_stats_actor self.stats_uuid = stats_uuid self._legacy_iter_batches = False - # Iteration stats, filled out if the user iterates over the datastream. + # Iteration stats, filled out if the user iterates over the dataset. self.iter_wait_s: Timer = Timer() self.iter_get_s: Timer = Timer() self.iter_next_batch_s: Timer = Timer() @@ -270,21 +270,21 @@ def stats_actor(self): def child_builder( self, name: str, override_start_time: Optional[float] = None - ) -> _DatastreamStatsBuilder: + ) -> _DatasetStatsBuilder: """Start recording stats for an op of the given name (e.g., map).""" - return _DatastreamStatsBuilder(name, self, override_start_time) + return _DatasetStatsBuilder(name, self, override_start_time) - def child_TODO(self, name: str) -> "DatastreamStats": + def child_TODO(self, name: str) -> "DatasetStats": """Placeholder for child ops not yet instrumented.""" - return DatastreamStats(stages={name + "_TODO": []}, parent=self) + return DatasetStats(stages={name + "_TODO": []}, parent=self) @staticmethod def TODO(): """Placeholder for ops not yet instrumented.""" - return DatastreamStats(stages={"TODO": []}, parent=None) + return DatasetStats(stages={"TODO": []}, parent=None) - def to_summary(self) -> "DatastreamStatsSummary": - """Generate a `DatastreamStatsSummary` object from the given `DatastreamStats` + def to_summary(self) -> "DatasetStatsSummary": + """Generate a `DatasetStatsSummary` object from the given `DatasetStats` object, which can be used to generate a summary string.""" if self.needs_stats_actor: ac = self.stats_actor @@ -330,12 +330,12 @@ def to_summary(self) -> "DatastreamStatsSummary": stats_summary_parents = [] if self.parents is not None: stats_summary_parents = [p.to_summary() for p in self.parents] - return DatastreamStatsSummary( + return DatasetStatsSummary( stages_stats, iter_stats, stats_summary_parents, self.number, - self.datastream_uuid, + self.dataset_uuid, self.time_total_s, self.base_name, self.extra_metrics, @@ -344,12 +344,12 @@ def to_summary(self) -> "DatastreamStatsSummary": @DeveloperAPI @dataclass -class DatastreamStatsSummary: +class DatasetStatsSummary: stages_stats: List["StageStatsSummary"] iter_stats: "IterStatsSummary" - parents: List["DatastreamStatsSummary"] + parents: List["DatasetStatsSummary"] number: int - datastream_uuid: str + dataset_uuid: str time_total_s: float base_name: str extra_metrics: Dict[str, Any] @@ -357,7 +357,7 @@ class DatastreamStatsSummary: def to_string( self, already_printed: Optional[Set[str]] = None, include_parent: bool = True ) -> str: - """Return a human-readable summary of this Datastream's stats. + """Return a human-readable summary of this Dataset's stats. Args: already_printed: Set of stage IDs that have already had its stats printed @@ -365,7 +365,7 @@ def to_string( include_parent: If true, also include parent stats summary; otherwise, only log stats of the latest stage. Returns: - String with summary statistics for executing the Datastream. + String with summary statistics for executing the Dataset. """ if already_printed is None: already_printed = set() @@ -380,7 +380,7 @@ def to_string( if len(self.stages_stats) == 1: stage_stats_summary = self.stages_stats[0] stage_name = stage_stats_summary.stage_name - stage_uuid = self.datastream_uuid + stage_name + stage_uuid = self.dataset_uuid + stage_name out += "Stage {} {}: ".format(self.number, stage_name) if stage_uuid in already_printed: out += "[execution cached]\n" @@ -397,7 +397,7 @@ def to_string( ) for n, stage_stats_summary in enumerate(self.stages_stats): stage_name = stage_stats_summary.stage_name - stage_uuid = self.datastream_uuid + stage_name + stage_uuid = self.dataset_uuid + stage_name out += "\n" out += "\tSubstage {} {}: ".format(n, stage_name) if stage_uuid in already_printed: @@ -426,8 +426,8 @@ def __repr__(self, level=0) -> str: parent_stats = f"\n{parent_stats},\n{indent} " if parent_stats else "" extra_metrics = f"\n{extra_metrics}\n{indent} " if extra_metrics else "" return ( - f"{indent}DatastreamStatsSummary(\n" - f"{indent} datastream_uuid={self.datastream_uuid},\n" + f"{indent}DatasetStatsSummary(\n" + f"{indent} dataset_uuid={self.dataset_uuid},\n" f"{indent} base_name={self.base_name},\n" f"{indent} number={self.number},\n" f"{indent} extra_metrics={{{extra_metrics}}},\n" @@ -463,7 +463,7 @@ class StageStatsSummary: # Whether the stage associated with this StageStatsSummary object is a substage is_substage: bool # This is the total walltime of the entire stage, typically obtained from - # `DatastreamStats.time_total_s`. An important distinction is that this is the + # `DatasetStats.time_total_s`. An important distinction is that this is the # overall runtime of the stage, pulled from the stats actor, whereas the # computed walltimes in `self.wall_time` are calculated on a substage level. time_total_s: float @@ -730,7 +730,7 @@ class IterStatsSummary: block_time: Timer # Time spent in user code, in seconds user_time: Timer - # Total time taken by Datastream iterator, in seconds + # Total time taken by Dataset iterator, in seconds total_time: Timer # Num of blocks that are in local object store iter_blocks_local: int @@ -755,7 +755,7 @@ def to_string(self) -> str: or self.format_time.get() or self.collate_time.get() ): - out += "\nDatastream iterator time breakdown:\n" + out += "\nDataset iterator time breakdown:\n" if self.block_time.get(): out += "* Total time user code is blocked: {}\n".format( fmt(self.block_time.get()) @@ -822,7 +822,7 @@ def to_string_legacy(self) -> str: or self.format_time.get() or self.get_time.get() ): - out += "\nDatastream iterator time breakdown:\n" + out += "\nDataset iterator time breakdown:\n" out += "* In ray.wait(): {}\n".format(fmt(self.wait_time.get())) out += "* In ray.get(): {}\n".format(fmt(self.get_time.get())) out += "* Num blocks local: {}\n".format(self.iter_blocks_local) @@ -854,16 +854,16 @@ def __repr__(self, level=0) -> str: class DatasetPipelineStats: - """Holds the execution times for a pipeline of Datastreams.""" + """Holds the execution times for a pipeline of Datasets.""" def __init__(self, *, max_history: int = 3): - """Create a datastream pipeline stats object. + """Create a dataset pipeline stats object. Args: - max_history: The max number of datastream window stats to track. + max_history: The max number of dataset window stats to track. """ self.max_history: int = max_history - self.history_buffer: List[Tuple[int, DatastreamStats]] = [] + self.history_buffer: List[Tuple[int, DatasetStats]] = [] self.count = 0 self.wait_time_s = [] @@ -887,7 +887,7 @@ def __getattr__(self, name): return self._iter_stats[name] raise AttributeError - def add(self, stats: DatastreamStats) -> None: + def add(self, stats: DatasetStats) -> None: """Called to add stats for a newly computed window.""" self.history_buffer.append((self.count, stats)) if len(self.history_buffer) > self.max_history: @@ -900,8 +900,8 @@ def add_pipeline_stats(self, other_stats: "DatasetPipelineStats"): `other_stats` should cover a disjoint set of windows than the current stats. """ - for _, datastream_stats in other_stats.history_buffer: - self.add(datastream_stats) + for _, dataset_stats in other_stats.history_buffer: + self.add(dataset_stats) self.wait_time_s.extend(other_stats.wait_time_s) @@ -918,7 +918,7 @@ def _summarize_iter(self) -> str: or self.iter_get_s.get() ): out += "\nDatasetPipeline iterator time breakdown:\n" - out += "* Waiting for next datastream: {}\n".format( + out += "* Waiting for next dataset: {}\n".format( fmt(self.iter_ds_wait_s.get()) ) out += "* In ray.wait(): {}\n".format(fmt(self.iter_wait_s.get())) @@ -947,7 +947,7 @@ def summary_string(self, exclude_first_window: bool = True) -> str: wait_time_s = self.wait_time_s[1 if exclude_first_window else 0 :] if wait_time_s: out += ( - "* Time stalled waiting for next datastream: " + "* Time stalled waiting for next dataset: " "{} min, {} max, {} mean, {} total\n".format( fmt(min(wait_time_s)), fmt(max(wait_time_s)), diff --git a/python/ray/data/_internal/util.py b/python/ray/data/_internal/util.py index 85ec96db6a2d..9751fea8009d 100644 --- a/python/ray/data/_internal/util.py +++ b/python/ray/data/_internal/util.py @@ -63,7 +63,7 @@ def _check_pyarrow_version(): if parse_version(version) < parse_version(MIN_PYARROW_VERSION): raise ImportError( - f"Datastream requires pyarrow >= {MIN_PYARROW_VERSION}, but " + f"Dataset requires pyarrow >= {MIN_PYARROW_VERSION}, but " f"{version} is installed. Reinstall with " f'`pip install -U "pyarrow"`. ' "If you want to disable this pyarrow version check, set the " @@ -74,7 +74,7 @@ def _check_pyarrow_version(): "You are using the 'pyarrow' module, but the exact version is unknown " "(possibly carried as an internal component by another module). Please " f"make sure you are using pyarrow >= {MIN_PYARROW_VERSION} to ensure " - "compatibility with Ray Datastream. " + "compatibility with Ray Dataset. " "If you want to disable this pyarrow version check, set the " f"environment variable {RAY_DISABLE_PYARROW_VERSION_CHECK}=1." ) @@ -103,7 +103,7 @@ def _autodetect_parallelism( Args: parallelism: The user-requested parallelism, or -1 for auto-detection. cur_pg: The current placement group, to be used for avail cpu calculation. - ctx: The current Datastream context to use for configs. + ctx: The current Dataset context to use for configs. reader: The datasource reader, to be used for data size estimation. avail_cpus: Override avail cpus detection (for testing only). @@ -141,7 +141,7 @@ def _autodetect_parallelism( def _estimate_avail_cpus(cur_pg: Optional["PlacementGroup"]) -> int: - """Estimates the available CPU parallelism for this Datastream in the cluster. + """Estimates the available CPU parallelism for this Dataset in the cluster. If we aren't in a placement group, this is trivially the number of CPUs in the cluster. Otherwise, we try to calculate how large the placement group is relative @@ -155,7 +155,7 @@ def _estimate_avail_cpus(cur_pg: Optional["PlacementGroup"]) -> int: # If we're in a placement group, we shouldn't assume the entire cluster's # resources are available for us to use. Estimate an upper bound on what's - # reasonable to assume is available for datastreams to use. + # reasonable to assume is available for datasets to use. if cur_pg: pg_cpus = 0 for bundle in cur_pg.bundle_specs: @@ -175,7 +175,7 @@ def _estimate_avail_cpus(cur_pg: Optional["PlacementGroup"]) -> int: def _estimate_available_parallelism() -> int: - """Estimates the available CPU parallelism for this Datastream in the cluster. + """Estimates the available CPU parallelism for this Dataset in the cluster. If we are currently in a placement group, take that into account.""" cur_pg = ray.util.get_current_placement_group() return _estimate_avail_cpus(cur_pg) @@ -351,18 +351,18 @@ def _consumption_api( insert_after=False, ): """Annotate the function with an indication that it's a consumption API, and that it - will trigger Datastream execution. + will trigger Dataset execution. """ base = ( " will trigger execution of the lazy transformations performed on " - "this datastream." + "this dataset." ) if delegate: message = delegate + base elif not if_more_than_read: message = "This operation" + base else: - condition = "If this datastream consists of more than a read, " + condition = "If this dataset consists of more than a read, " if datasource_metadata is not None: condition += ( f"or if the {datasource_metadata} can't be determined from the " @@ -387,7 +387,7 @@ def wrap(obj): def ConsumptionAPI(*args, **kwargs): """Annotate the function with an indication that it's a consumption API, and that it - will trigger Datastream execution. + will trigger Dataset execution. """ if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): return _consumption_api()(args[0]) diff --git a/python/ray/data/aggregate.py b/python/ray/data/aggregate.py index 6545e8463d84..8d50f09f7e85 100644 --- a/python/ray/data/aggregate.py +++ b/python/ray/data/aggregate.py @@ -58,7 +58,7 @@ def __init__( finalize: This is called once to compute the final aggregation result from the fully merged accumulator. name: The name of the aggregation. This will be used as the output - column name in the case of Arrow datastream. + column name in the case of Arrow dataset. """ if (accumulate_row is None and accumulate_block is None) or ( accumulate_row is not None and accumulate_block is not None diff --git a/python/ray/data/block.py b/python/ray/data/block.py index b29cfeb266e9..b9cc776a0ee1 100644 --- a/python/ray/data/block.py +++ b/python/ray/data/block.py @@ -75,14 +75,14 @@ def _validate_key_fn( ) -> None: """Check the key function is valid on the given schema.""" if schema is None: - # Datastream is empty/cleared, validation not possible. + # Dataset is empty/cleared, validation not possible. return ctx = ray.data.DataContext.get_current() is_simple_format = isinstance(schema, type) if isinstance(key, str): if is_simple_format: raise ValueError( - "String key '{}' requires datastream format to be " + "String key '{}' requires dataset format to be " "'arrow' or 'pandas', was 'simple'.".format(key) ) if len(schema.names) > 0 and key not in schema.names: @@ -95,13 +95,13 @@ def _validate_key_fn( elif key is None: if not is_simple_format: raise ValueError( - "The `None` key '{}' requires datastream format to be " + "The `None` key '{}' requires dataset format to be " "'simple'.".format(key) ) elif callable(key): if not is_simple_format: raise ValueError( - "Callable key '{}' requires datastream format to be " + "Callable key '{}' requires dataset format to be " "'simple'".format(key) ) else: @@ -471,7 +471,7 @@ def for_block(block: Block) -> "BlockAccessor[T]": raise StrictModeError( f"Error validating {_truncated_repr(block)}: " "Standalone Python objects are not " - "allowed in Ray 2.5. To use Python objects in a datastream, " + "allowed in Ray 2.5. To use Python objects in a dataset, " "wrap them in a dict of numpy arrays, e.g., " "return `{'item': np.array(batch)}` instead of just `batch`." ) diff --git a/python/ray/data/context.py b/python/ray/data/context.py index eaa0a61b401e..519a1c3148e2 100644 --- a/python/ray/data/context.py +++ b/python/ray/data/context.py @@ -13,7 +13,7 @@ _default_context: "Optional[DataContext]" = None _context_lock = threading.Lock() -# An estimate of what fraction of the object store a Datastream can use without too high +# An estimate of what fraction of the object store a Dataset can use without too high # a risk of triggering spilling. This is used to generate user warnings only. ESTIMATED_SAFE_MEMORY_FRACTION = 0.25 @@ -21,7 +21,7 @@ # We choose 512MiB as 8x less than the typical memory:core ratio of 4:1. DEFAULT_TARGET_MAX_BLOCK_SIZE = 512 * 1024 * 1024 -# Datastream will avoid creating blocks smaller than this size in bytes on read. +# Dataset will avoid creating blocks smaller than this size in bytes on read. # This takes precedence over DEFAULT_MIN_PARALLELISM. DEFAULT_TARGET_MIN_BLOCK_SIZE = 1 * 1024 * 1024 @@ -38,10 +38,10 @@ # TODO (kfstorm): Remove this once stable. DEFAULT_ENABLE_PANDAS_BLOCK = True -# Whether to enable stage-fusion optimizations for datastream pipelines. +# Whether to enable stage-fusion optimizations for dataset pipelines. DEFAULT_OPTIMIZE_FUSE_STAGES = True -# Whether to enable stage-reorder optimizations for datastream pipelines. +# Whether to enable stage-reorder optimizations for dataset pipelines. DEFAULT_OPTIMIZE_REORDER_STAGES = True # Whether to furthermore fuse read stages. @@ -50,7 +50,7 @@ # Whether to furthermore fuse prior map tasks with shuffle stages. DEFAULT_OPTIMIZE_FUSE_SHUFFLE_STAGES = True -# Minimum amount of parallelism to auto-detect for a datastream. Note that the min +# Minimum amount of parallelism to auto-detect for a dataset. Note that the min # block size config takes precedence over this. DEFAULT_MIN_PARALLELISM = 200 @@ -65,7 +65,7 @@ # The default global scheduling strategy. DEFAULT_SCHEDULING_STRATEGY = "DEFAULT" -# Whether to use Polars for tabular datastream sorts, groupbys, and aggregations. +# Whether to use Polars for tabular dataset sorts, groupbys, and aggregations. DEFAULT_USE_POLARS = False # Whether to use the new executor backend. @@ -93,8 +93,8 @@ # extension columns. DEFAULT_ENABLE_TENSOR_EXTENSION_CASTING = True -# Whether to automatically print Datastream stats after execution. -# If disabled, users can still manually print stats with Datastream.stats(). +# Whether to automatically print Dataset stats after execution. +# If disabled, users can still manually print stats with Dataset.stats(). DEFAULT_AUTO_LOG_STATS = False # Whether to enable optimizer. @@ -132,7 +132,7 @@ @DeveloperAPI class DataContext: - """Singleton for shared Datastream resources and configurations. + """Singleton for shared Dataset resources and configurations. This object is automatically propagated to workers and can be retrieved from the driver and remote workers via DataContext.get_current(). @@ -195,7 +195,7 @@ def __init__( self.enable_auto_log_stats = enable_auto_log_stats self.trace_allocations = trace_allocations self.optimizer_enabled = optimizer_enabled - # TODO: expose execution options in Datastream public APIs. + # TODO: expose execution options in Dataset public APIs. self.execution_options = execution_options self.use_ray_tqdm = use_ray_tqdm self.use_legacy_iter_batches = use_legacy_iter_batches @@ -258,7 +258,7 @@ def get_current() -> "DataContext": def _set_current(context: "DataContext") -> None: """Set the current context in a remote worker. - This is used internally by Datastream to propagate the driver context to + This is used internally by Dataset to propagate the driver context to remote workers used for parallelization. """ global _default_context diff --git a/python/ray/data/dataset.py b/python/ray/data/dataset.py index b922ac2611a9..1d24cff5f383 100644 --- a/python/ray/data/dataset.py +++ b/python/ray/data/dataset.py @@ -1,4 +1,4516 @@ -from ray.data.datastream import Datastream +import collections +import itertools +import logging +import sys +import time +import html +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + Generic, + Iterable, + Iterator, + List, + Type, + Optional, + Tuple, + Union, + Mapping, +) +from uuid import uuid4 + +import numpy as np + +import ray +from ray.air.util.tensor_extensions.utils import _create_possibly_ragged_ndarray +import ray.cloudpickle as pickle +from ray._private.usage import usage_lib +from ray.air.constants import TENSOR_COLUMN_NAME +from ray.air.util.data_batch_conversion import BlockFormat +from ray.data._internal.logical.operators.all_to_all_operator import ( + RandomShuffle, + RandomizeBlocks, + Repartition, + Sort, +) +from ray.data._internal.logical.operators.n_ary_operator import Zip +from ray.data._internal.logical.optimizers import LogicalPlan +from ray.data._internal.logical.operators.limit_operator import Limit +from ray.data._internal.logical.operators.map_operator import ( + Filter, + FlatMap, + MapRows, + MapBatches, +) +from ray.data._internal.logical.operators.write_operator import Write +from ray.data._internal.planner.filter import generate_filter_fn +from ray.data._internal.planner.flat_map import generate_flat_map_fn +from ray.data._internal.planner.map_batches import generate_map_batches_fn +from ray.data._internal.planner.map_rows import generate_map_rows_fn +from ray.data._internal.planner.write import generate_write_fn +from ray.data.iterator import DataIterator +from ray.data._internal.block_list import BlockList +from ray.data._internal.iterator.iterator_impl import ( + DataIteratorImpl, +) +from ray.data._internal.iterator.stream_split_iterator import ( + StreamSplitDataIterator, +) +from ray.data._internal.compute import ( + ActorPoolStrategy, + CallableClass, + ComputeStrategy, + TaskPoolStrategy, +) +from ray.data._internal.delegating_block_builder import DelegatingBlockBuilder +from ray.data._internal.equalize import _equalize +from ray.data._internal.lazy_block_list import LazyBlockList +from ray.data._internal.util import ( + _estimate_available_parallelism, + _is_local_scheme, + ConsumptionAPI, +) +from ray.data._internal.pandas_block import PandasBlockSchema +from ray.data._internal.plan import ( + ExecutionPlan, + OneToOneStage, +) +from ray.data._internal.stage_impl import ( + RandomizeBlocksStage, + RepartitionStage, + RandomShuffleStage, + ZipStage, + SortStage, + LimitStage, +) +from ray.data._internal.progress_bar import ProgressBar +from ray.data._internal.remote_fn import cached_remote_fn +from ray.data._internal.split import _split_at_indices, _get_num_rows +from ray.data._internal.stats import DatasetStats, DatasetStatsSummary +from ray.data.aggregate import AggregateFn, Max, Mean, Min, Std, Sum +from ray.data.block import ( + VALID_BATCH_FORMATS, + STRICT_MODE_EXPLANATION, + _apply_strict_mode_batch_format, + _apply_strict_mode_batch_size, + UserDefinedFunction, + Block, + BlockAccessor, + BlockMetadata, + BlockPartition, + DataBatch, + StrictModeError, + T, + U, + _validate_key_fn, +) +from ray.data.context import ( + DataContext, + WARN_PREFIX, + OK_PREFIX, + ESTIMATED_SAFE_MEMORY_FRACTION, +) +from ray.data.datasource import ( + BlockWritePathProvider, + CSVDatasource, + Datasource, + DefaultBlockWritePathProvider, + JSONDatasource, + NumpyDatasource, + ParquetDatasource, + ReadTask, + TFRecordDatasource, + WriteResult, +) +from ray.data.datasource.file_based_datasource import ( + _unwrap_arrow_serialization_workaround, + _wrap_arrow_serialization_workaround, +) +from ray.data.random_access_dataset import RandomAccessDataset +from ray.types import ObjectRef +from ray.util.annotations import DeveloperAPI, PublicAPI, Deprecated +from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy +from ray.widgets import Template +from ray.widgets.util import ensure_notebook_deps, fallback_if_colab + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + +if TYPE_CHECKING: + import dask + import mars + import modin + import pandas + import pyarrow + import pyspark + import tensorflow as tf + import torch + import torch.utils.data + + from ray.data.dataset_pipeline import DatasetPipeline + from ray.data.grouped_data import GroupedData + from ray.data._internal.execution.interfaces import Executor, NodeIdStr + from ray.data._internal.torch_iterable_dataset import TorchTensorBatchType + from tensorflow_metadata.proto.v0 import schema_pb2 + + +logger = logging.getLogger(__name__) + +TensorflowFeatureTypeSpec = Union[ + "tf.TypeSpec", List["tf.TypeSpec"], Dict[str, "tf.TypeSpec"] +] + +TensorFlowTensorBatchType = Union["tf.Tensor", Dict[str, "tf.Tensor"]] + + +@PublicAPI +class Dataset: + """A Dataset is a distributed data collection for data loading and processing. + + Datasets are distributed pipelines that produce ``ObjectRef[Block]`` outputs, + where each block holds data in Arrow format, representing a shard of the overall + data collection. The block also determines the unit of parallelism. + + Datasets can be created in multiple ways: from synthetic data via ``range_*()`` + APIs, from existing memory data via ``from_*()`` APIs (this creates a subclass + of Dataset called ``MaterializedDataset``), or from external storage + systems such as local disk, S3, HDFS etc. via the ``read_*()`` APIs. The + (potentially processed) Dataset can be saved back to external storage systems + via the ``write_*()`` APIs. + + Examples: + >>> import ray + >>> # Create dataset from synthetic data. + >>> ds = ray.data.range(1000) + >>> # Create dataset from in-memory data. + >>> ds = ray.data.from_items( + ... [{"col1": i, "col2": i * 2} for i in range(1000)]) + >>> # Create dataset from external storage system. + >>> ds = ray.data.read_parquet("s3://bucket/path") # doctest: +SKIP + >>> # Save dataset back to external storage system. + >>> ds.write_csv("s3://bucket/output") # doctest: +SKIP + + Dataset has two kinds of operations: transformation, which takes in Dataset + and outputs a new Dataset (e.g. :py:meth:`.map_batches()`); and consumption, + which produces values (not Datatream) as output (e.g. :py:meth:`.iter_batches()`). + + Dataset transformations are lazy, with execution of the transformations being + triggered by downstream consumption. + + Dataset supports parallel processing at scale: transformations such as + :py:meth:`.map_batches()`, aggregations such as + :py:meth:`.min()`/:py:meth:`.max()`/:py:meth:`.mean()`, grouping via + :py:meth:`.groupby()`, shuffling operations such as :py:meth:`.sort()`, + :py:meth:`.random_shuffle()`, and :py:meth:`.repartition()`. + + Examples: + >>> import ray + >>> ds = ray.data.range(1000) + >>> # Transform batches (Dict[str, np.ndarray]) with map_batches(). + >>> ds.map_batches(lambda batch: {"id": batch["id"] * 2}) + MapBatches() + +- Dataset(num_blocks=17, num_rows=1000, schema={id: int64}) + >>> # Compute the maximum. + >>> ds.max("id") + 999 + >>> # Shuffle this dataset randomly. + >>> ds.random_shuffle() + RandomShuffle + +- Dataset(num_blocks=..., num_rows=1000, schema={id: int64}) + >>> # Sort it back in order. + >>> ds.sort("id") + Sort + +- Dataset(num_blocks=..., num_rows=1000, schema={id: int64}) + + Both unexecuted and materialized Datasets can be passed between Ray tasks and + actors without incurring a copy. Dataset supports conversion to/from several + more featureful dataframe libraries (e.g., Spark, Dask, Modin, MARS), and are also + compatible with distributed TensorFlow / PyTorch. + """ + + def __init__( + self, + plan: ExecutionPlan, + epoch: int, + lazy: bool = True, + logical_plan: Optional[LogicalPlan] = None, + ): + """Construct a Dataset (internal API). + + The constructor is not part of the Dataset API. Use the ``ray.data.*`` + read methods to construct a dataset. + """ + assert isinstance(plan, ExecutionPlan) + usage_lib.record_library_usage("dataset") # Legacy telemetry name. + + if ray.util.log_once("strict_mode_explanation"): + logger.warning(STRICT_MODE_EXPLANATION) + + self._plan = plan + self._uuid = uuid4().hex + self._epoch = epoch + self._lazy = lazy + self._logical_plan = logical_plan + if logical_plan is not None: + self._plan.link_logical_plan(logical_plan) + + if not lazy: + self._plan.execute(allow_clear_input_blocks=False) + + # Handle to currently running executor for this dataset. + self._current_executor: Optional["Executor"] = None + + @staticmethod + def copy( + ds: "Dataset", _deep_copy: bool = False, _as: Optional[type] = None + ) -> "Dataset": + if not _as: + _as = Dataset + if _deep_copy: + return _as(ds._plan.deep_copy(), ds._epoch, ds._lazy, ds._logical_plan) + else: + return _as(ds._plan.copy(), ds._epoch, ds._lazy, ds._logical_plan) + + def map( + self, + fn: UserDefinedFunction[Dict[str, Any], Dict[str, Any]], + *, + compute: Optional[ComputeStrategy] = None, + **ray_remote_args, + ) -> "Dataset": + """Apply the given function to each record of this dataset. + + Note that mapping individual records can be quite slow. Consider using + `.map_batches()` for performance. + + Examples: + >>> import ray + >>> # Transform python objects. + >>> ds = ray.data.range(1000) + >>> # The function goes from record (Dict[str, Any]) to record. + >>> ds.map(lambda record: {"id": record["id"] * 2}) + Map + +- Dataset(num_blocks=..., num_rows=1000, schema={id: int64}) + >>> # Transform Arrow records. + >>> ds = ray.data.from_items( + ... [{"value": i} for i in range(1000)]) + >>> ds.map(lambda record: {"v2": record["value"] * 2}) + Map + +- Dataset(num_blocks=200, num_rows=1000, schema={value: int64}) + >>> # Define a callable class that persists state across + >>> # function invocations for efficiency. + >>> init_model = ... # doctest: +SKIP + >>> class CachedModel: + ... def __init__(self): + ... self.model = init_model() + ... def __call__(self, batch): + ... return self.model(batch) + >>> # Apply the transform in parallel on GPUs. Since + >>> # compute=ActorPoolStrategy(size=8) the transform will be applied on a + >>> # pool of 8 Ray actors, each allocated 1 GPU by Ray. + >>> ds.map(CachedModel, # doctest: +SKIP + ... compute=ray.data.ActorPoolStrategy(size=8), + ... num_gpus=1) + + Time complexity: O(dataset size / parallelism) + + Args: + fn: The function to apply to each record, or a class type + that can be instantiated to create such a callable. Callable classes are + only supported for the actor compute strategy. + compute: The compute strategy, either None (default) to use Ray + tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor + pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an + autoscaling actor pool. + ray_remote_args: Additional resource requirements to request from + ray (e.g., num_gpus=1 to request GPUs for the map tasks). + + .. seealso:: + + :meth:`~Dataset.flat_map`: + Call this method to create new records from existing ones. Unlike + :meth:`~Dataset.map`, a function passed to + :meth:`~Dataset.flat_map` can return multiple records. + + :meth:`~Dataset.flat_map` isn't recommended because it's slow; call + :meth:`~Dataset.map_batches` instead. + + :meth:`~Dataset.map_batches` + Call this method to transform batches of data. It's faster and more + flexible than :meth:`~Dataset.map` and :meth:`~Dataset.flat_map`. + """ + if isinstance(fn, CallableClass) and ( + compute is None + or compute == "tasks" + or isinstance(compute, TaskPoolStrategy) + ): + raise ValueError( + "``compute`` must be specified when using a CallableClass, and must " + f"specify the actor compute strategy, but got: {compute}. " + "For example, use ``compute=ActorPoolStrategy(size=n)``." + ) + + self._warn_slow() + + transform_fn = generate_map_rows_fn() + + plan = self._plan.with_stage( + OneToOneStage( + "Map", + transform_fn, + compute, + ray_remote_args, + fn=fn, + ) + ) + + logical_plan = self._logical_plan + if logical_plan is not None: + map_op = MapRows( + logical_plan.dag, + fn, + compute=compute, + ray_remote_args=ray_remote_args, + ) + logical_plan = LogicalPlan(map_op) + return Dataset(plan, self._epoch, self._lazy, logical_plan) + + def map_batches( + self, + fn: UserDefinedFunction[DataBatch, DataBatch], + *, + batch_size: Union[int, None, Literal["default"]] = "default", + compute: Optional[ComputeStrategy] = None, + batch_format: Optional[str] = "default", + zero_copy_batch: bool = False, + fn_args: Optional[Iterable[Any]] = None, + fn_kwargs: Optional[Dict[str, Any]] = None, + fn_constructor_args: Optional[Iterable[Any]] = None, + fn_constructor_kwargs: Optional[Dict[str, Any]] = None, + **ray_remote_args, + ) -> "Dataset": + """Apply the given function to batches of data. + + This applies the ``fn`` in parallel with map tasks, with each task handling + a batch of data (typically Dict[str, np.ndarray] or pd.DataFrame). + + To learn more about writing functions for :meth:`~Dataset.map_batches`, read + :ref:`writing user-defined functions `. + + .. tip:: + If ``fn`` does not mutate its input, set ``zero_copy_batch=True`` to elide a + batch copy, which can improve performance and decrease memory utilization. + ``fn`` will then receive zero-copy read-only batches. + If ``fn`` mutates its input, you will need to ensure that the batch provided + to ``fn`` is writable by setting ``zero_copy_batch=False`` (default). This + will create an extra, mutable copy of each batch before handing it to + ``fn``. + + .. note:: + The size of the batches provided to ``fn`` may be smaller than the provided + ``batch_size`` if ``batch_size`` doesn't evenly divide the block(s) sent to + a given map task. When ``batch_size`` is specified, each map task will be + sent a single block if the block is equal to or larger than ``batch_size``, + and will be sent a bundle of blocks up to (but not exceeding) + ``batch_size`` if blocks are smaller than ``batch_size``. + + Examples: + + >>> import numpy as np + >>> import ray + >>> ds = ray.data.from_items([ + ... {"name": "Luna", "age": 4}, + ... {"name": "Rory", "age": 14}, + ... {"name": "Scout", "age": 9}, + ... ]) + >>> ds # doctest: +SKIP + MaterializedDataset( + num_blocks=3, + num_rows=3, + schema={name: string, age: int64} + ) + + Here ``fn`` returns the same batch type as the input, but your ``fn`` can + also return a different batch type (e.g., pd.DataFrame). Read more about + :ref:`Transforming Data `. + + >>> from typing import Dict + >>> def map_fn(batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + ... batch["age_in_dog_years"] = 7 * batch["age"] + ... return batch + >>> ds = ds.map_batches(map_fn) + >>> ds + MapBatches(map_fn) + +- Dataset(num_blocks=3, num_rows=3, schema={name: string, age: int64}) + + :ref:`Actors ` can improve the performance of some workloads. + For example, you can use :ref:`actors ` to load a model once + per worker instead of once per inference. + + To transform batches with :ref:`actors `, pass a callable type + to ``fn`` and specify an :class:`~ray.data.ActorPoolStrategy>`. + + In the example below, ``CachedModel`` is called on an autoscaling pool of + two to eight :ref:`actors `, each allocated one GPU by Ray. + + >>> init_large_model = ... # doctest: +SKIP + >>> class CachedModel: + ... def __init__(self): + ... self.model = init_large_model() + ... def __call__(self, item): + ... return self.model(item) + >>> ds.map_batches( # doctest: +SKIP + ... CachedModel, # doctest: +SKIP + ... batch_size=256, # doctest: +SKIP + ... compute=ray.data.ActorPoolStrategy(size=8), # doctest: +SKIP + ... num_gpus=1, + ... ) # doctest: +SKIP + + ``fn`` can also be a generator, yielding multiple batches in a single + invocation. This is useful when returning large objects. Instead of + returning a very large output batch, ``fn`` can instead yield the + output batch in chunks. + + >>> def map_fn_with_large_output(batch): + ... for i in range(3): + ... yield {"large_output": np.ones((100, 1000))} + >>> ds = ray.data.from_items([1]) + >>> ds = ds.map_batches(map_fn_with_large_output) + >>> ds + MapBatches(map_fn_with_large_output) + +- Dataset(num_blocks=1, num_rows=1, schema={item: int64}) + + + Args: + fn: The function or generator to apply to each record batch, or a class type + that can be instantiated to create such a callable. Callable classes are + only supported for the actor compute strategy. Note ``fn`` must be + pickle-able. + batch_size: The desired number of rows in each batch, or None to use entire + blocks as batches (blocks may contain different number of rows). + The actual size of the batch provided to ``fn`` may be smaller than + ``batch_size`` if ``batch_size`` doesn't evenly divide the block(s) sent + to a given map task. Default batch_size is 4096 with "default". + compute: The compute strategy, either "tasks" (default) to use Ray + tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor + pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an + autoscaling actor pool. + batch_format: Specify ``"default"`` to use the default block format + (NumPy), ``"pandas"`` to select ``pandas.DataFrame``, "pyarrow" to + select ``pyarrow.Table``, or ``"numpy"`` to select + ``Dict[str, numpy.ndarray]``, or None to return the underlying block + exactly as is with no additional formatting. + zero_copy_batch: Whether ``fn`` should be provided zero-copy, read-only + batches. If this is ``True`` and no copy is required for the + ``batch_format`` conversion, the batch will be a zero-copy, read-only + view on data in Ray's object store, which can decrease memory + utilization and improve performance. If this is ``False``, the batch + will be writable, which will require an extra copy to guarantee. + If ``fn`` mutates its input, this will need to be ``False`` in order to + avoid "assignment destination is read-only" or "buffer source array is + read-only" errors. Default is ``False``. + fn_args: Positional arguments to pass to ``fn`` after the first argument. + These arguments are top-level arguments to the underlying Ray task. + fn_kwargs: Keyword arguments to pass to ``fn``. These arguments are + top-level arguments to the underlying Ray task. + fn_constructor_args: Positional arguments to pass to ``fn``'s constructor. + You can only provide this if ``fn`` is a callable class. These arguments + are top-level arguments in the underlying Ray actor construction task. + fn_constructor_kwargs: Keyword arguments to pass to ``fn``'s constructor. + This can only be provided if ``fn`` is a callable class. These arguments + are top-level arguments in the underlying Ray actor construction task. + ray_remote_args: Additional resource requirements to request from + ray (e.g., ``num_gpus=1`` to request GPUs for the map tasks). + + .. seealso:: + + :meth:`~Dataset.iter_batches` + Call this function to iterate over batches of data. + + :meth:`~Dataset.flat_map`: + Call this method to create new records from existing ones. Unlike + :meth:`~Dataset.map`, a function passed to :meth:`~Dataset.flat_map` + can return multiple records. + + :meth:`~Dataset.flat_map` isn't recommended because it's slow; call + :meth:`~Dataset.map_batches` instead. + + :meth:`~Dataset.map` + Call this method to transform one record at time. + + This method isn't recommended because it's slow; call + :meth:`~Dataset.map_batches` instead. + """ # noqa: E501 + + batch_format = _apply_strict_mode_batch_format(batch_format) + if batch_format == "native": + logger.warning("The 'native' batch format has been renamed 'default'.") + + target_block_size = None + if batch_size is not None and batch_size != "default": + if batch_size < 1: + raise ValueError("Batch size cannot be negative or 0") + # Enable blocks bundling when batch_size is specified by caller. + target_block_size = batch_size + + batch_size = _apply_strict_mode_batch_size( + batch_size, use_gpu="num_gpus" in ray_remote_args + ) + + if batch_format not in VALID_BATCH_FORMATS: + raise ValueError( + f"The batch format must be one of {VALID_BATCH_FORMATS}, got: " + f"{batch_format}" + ) + + if isinstance(fn, CallableClass) and ( + compute is None + or compute == "tasks" + or isinstance(compute, TaskPoolStrategy) + ): + raise ValueError( + "``compute`` must be specified when using a CallableClass, and must " + f"specify the actor compute strategy, but got: {compute}. " + "For example, use ``compute=ActorPoolStrategy(size=n)``." + ) + + if fn_constructor_args is not None or fn_constructor_kwargs is not None: + if compute is None or ( + compute != "actors" and not isinstance(compute, ActorPoolStrategy) + ): + raise ValueError( + "fn_constructor_args and fn_constructor_kwargs can only be " + "specified if using the actor pool compute strategy, but got: " + f"{compute}" + ) + if not isinstance(fn, CallableClass): + raise ValueError( + "fn_constructor_args and fn_constructor_kwargs can only be " + "specified if providing a CallableClass instance for fn, but got: " + f"{fn}" + ) + + transform_fn = generate_map_batches_fn( + batch_size=batch_size, + batch_format=batch_format, + zero_copy_batch=zero_copy_batch, + ) + + # TODO(chengsu): pass function name to MapBatches logical operator. + if hasattr(fn, "__self__") and isinstance( + fn.__self__, ray.data.preprocessor.Preprocessor + ): + stage_name = fn.__self__.__class__.__name__ + else: + stage_name = f'MapBatches({getattr(fn, "__name__", type(fn))})' + + stage = OneToOneStage( + stage_name, + transform_fn, + compute, + ray_remote_args, + # TODO(Clark): Add a strict cap here. + target_block_size=target_block_size, + fn=fn, + fn_args=fn_args, + fn_kwargs=fn_kwargs, + fn_constructor_args=fn_constructor_args, + fn_constructor_kwargs=fn_constructor_kwargs, + ) + plan = self._plan.with_stage(stage) + + logical_plan = self._logical_plan + if logical_plan is not None: + map_batches_op = MapBatches( + logical_plan.dag, + fn, + batch_size=batch_size, + batch_format=batch_format, + zero_copy_batch=zero_copy_batch, + target_block_size=target_block_size, + fn_args=fn_args, + fn_kwargs=fn_kwargs, + fn_constructor_args=fn_constructor_args, + fn_constructor_kwargs=fn_constructor_kwargs, + compute=compute, + ray_remote_args=ray_remote_args, + ) + logical_plan = LogicalPlan(map_batches_op) + + return Dataset(plan, self._epoch, self._lazy, logical_plan) + + def add_column( + self, + col: str, + fn: Callable[["pandas.DataFrame"], "pandas.Series"], + *, + compute: Optional[str] = None, + **ray_remote_args, + ) -> "Dataset": + """Add the given column to the dataset. + + This is only supported for datasets convertible to pandas format. + A function generating the new column values given the batch in pandas + format must be specified. + + Examples: + >>> import ray + >>> ds = ray.data.range(100) + >>> # Add a new column equal to value * 2. + >>> ds = ds.add_column("new_col", lambda df: df["id"] * 2) + >>> # Overwrite the existing "value" with zeros. + >>> ds = ds.add_column("id", lambda df: 0) + + Time complexity: O(dataset size / parallelism) + + Args: + col: Name of the column to add. If the name already exists, the + column will be overwritten. + fn: Map function generating the column values given a batch of + records in pandas format. + compute: The compute strategy, either "tasks" (default) to use Ray + tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor + pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an + autoscaling actor pool. + ray_remote_args: Additional resource requirements to request from + ray (e.g., num_gpus=1 to request GPUs for the map tasks). + """ + + def process_batch(batch: "pandas.DataFrame") -> "pandas.DataFrame": + batch.loc[:, col] = fn(batch) + return batch + + if not callable(fn): + raise ValueError("`fn` must be callable, got {}".format(fn)) + + return self.map_batches( + process_batch, + batch_format="pandas", # TODO(ekl) we should make this configurable. + compute=compute, + zero_copy_batch=False, + **ray_remote_args, + ) + + def drop_columns( + self, + cols: List[str], + *, + compute: Optional[str] = None, + **ray_remote_args, + ) -> "Dataset": + """Drop one or more columns from the dataset. + + Examples: + >>> import ray + >>> ds = ray.data.range(100) + >>> # Add a new column equal to value * 2. + >>> ds = ds.add_column("new_col", lambda df: df["id"] * 2) + >>> # Drop the existing "value" column. + >>> ds = ds.drop_columns(["id"]) + + + Time complexity: O(dataset size / parallelism) + + Args: + cols: Names of the columns to drop. If any name does not exist, + an exception will be raised. + compute: The compute strategy, either "tasks" (default) to use Ray + tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor + pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an + autoscaling actor pool. + ray_remote_args: Additional resource requirements to request from + ray (e.g., num_gpus=1 to request GPUs for the map tasks). + """ + + return self.map_batches( + lambda batch: batch.drop(columns=cols), + batch_format="pandas", + zero_copy_batch=True, + compute=compute, + **ray_remote_args, + ) + + def select_columns( + self, + cols: List[str], + *, + compute: Union[str, ComputeStrategy] = None, + **ray_remote_args, + ) -> "Dataset": + """Select one or more columns from the dataset. + + All input columns used to select need to be in the schema of the dataset. + + Examples: + >>> import ray + >>> # Create a dataset with 3 columns + >>> ds = ray.data.from_items([{"col1": i, "col2": i+1, "col3": i+2} + ... for i in range(10)]) + >>> # Select only "col1" and "col2" columns. + >>> ds = ds.select_columns(cols=["col1", "col2"]) + >>> ds + MapBatches() + +- Dataset( + num_blocks=10, + num_rows=10, + schema={col1: int64, col2: int64, col3: int64} + ) + + + Time complexity: O(dataset size / parallelism) + + Args: + cols: Names of the columns to select. If any name is not included in the + dataset schema, an exception will be raised. + compute: The compute strategy, either "tasks" (default) to use Ray + tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor + pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an + autoscaling actor pool. + ray_remote_args: Additional resource requirements to request from + ray (e.g., num_gpus=1 to request GPUs for the map tasks). + """ # noqa: E501 + return self.map_batches( + lambda batch: BlockAccessor.for_block(batch).select(columns=cols), + batch_format="pandas", + zero_copy_batch=True, + compute=compute, + **ray_remote_args, + ) + + def flat_map( + self, + fn: UserDefinedFunction[Dict[str, Any], List[Dict[str, Any]]], + *, + compute: Optional[ComputeStrategy] = None, + **ray_remote_args, + ) -> "Dataset": + """Apply the given function to each record and then flatten results. + + Consider using ``.map_batches()`` for better performance (the batch size can be + altered in map_batches). + + Examples: + >>> import ray + >>> ds = ray.data.range(1000) + >>> ds.flat_map(lambda x: [{"id": 1}, {"id": 2}, {"id": 4}]) + FlatMap + +- Dataset(num_blocks=..., num_rows=1000, schema={id: int64}) + + Time complexity: O(dataset size / parallelism) + + Args: + fn: The function or generator to apply to each record, or a class type + that can be instantiated to create such a callable. Callable classes are + only supported for the actor compute strategy. + compute: The compute strategy, either "tasks" (default) to use Ray + tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor + pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an + autoscaling actor pool. + ray_remote_args: Additional resource requirements to request from + ray (e.g., num_gpus=1 to request GPUs for the map tasks). + + .. seealso:: + + :meth:`~Dataset.map_batches` + Call this method to transform batches of data. It's faster and more + flexible than :meth:`~Dataset.map` and :meth:`~Dataset.flat_map`. + + :meth:`~Dataset.map` + Call this method to transform one record at time. + + This method isn't recommended because it's slow; call + :meth:`~Dataset.map_batches` instead. + """ + if isinstance(fn, CallableClass) and ( + compute is None + or compute == "tasks" + or isinstance(compute, TaskPoolStrategy) + ): + raise ValueError( + "``compute`` must be specified when using a CallableClass, and must " + f"specify the actor compute strategy, but got: {compute}. " + "For example, use ``compute=ActorPoolStrategy(size=n)``." + ) + + self._warn_slow() + + transform_fn = generate_flat_map_fn() + + plan = self._plan.with_stage( + OneToOneStage("FlatMap", transform_fn, compute, ray_remote_args, fn=fn) + ) + + logical_plan = self._logical_plan + if logical_plan is not None: + op = FlatMap( + input_op=logical_plan.dag, + fn=fn, + compute=compute, + ray_remote_args=ray_remote_args, + ) + logical_plan = LogicalPlan(op) + return Dataset(plan, self._epoch, self._lazy, logical_plan) + + def filter( + self, + fn: UserDefinedFunction[Dict[str, Any], bool], + *, + compute: Union[str, ComputeStrategy] = None, + **ray_remote_args, + ) -> "Dataset": + """Filter out records that do not satisfy the given predicate. + + Consider using ``.map_batches()`` for better performance (you can implement + filter by dropping records). + + Examples: + >>> import ray + >>> ds = ray.data.range(100) + >>> ds.filter(lambda x: x["id"] % 2 == 0) + Filter + +- Dataset(num_blocks=..., num_rows=100, schema={id: int64}) + + Time complexity: O(dataset size / parallelism) + + Args: + fn: The predicate to apply to each record, or a class type + that can be instantiated to create such a callable. Callable classes are + only supported for the actor compute strategy. + compute: The compute strategy, either "tasks" (default) to use Ray + tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor + pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an + autoscaling actor pool. + ray_remote_args: Additional resource requirements to request from + ray (e.g., num_gpus=1 to request GPUs for the map tasks). + """ + if isinstance(fn, CallableClass) and ( + compute is None + or compute == "tasks" + or isinstance(compute, TaskPoolStrategy) + ): + raise ValueError( + "``compute`` must be specified when using a CallableClass, and must " + f"specify the actor compute strategy, but got: {compute}. " + "For example, use ``compute=ActorPoolStrategy(size=n)``." + ) + + self._warn_slow() + + transform_fn = generate_filter_fn() + + plan = self._plan.with_stage( + OneToOneStage("Filter", transform_fn, compute, ray_remote_args, fn=fn) + ) + + logical_plan = self._logical_plan + if logical_plan is not None: + op = Filter( + input_op=logical_plan.dag, + fn=fn, + compute=compute, + ray_remote_args=ray_remote_args, + ) + logical_plan = LogicalPlan(op) + + return Dataset(plan, self._epoch, self._lazy, logical_plan) + + def repartition(self, num_blocks: int, *, shuffle: bool = False) -> "Dataset": + """Repartition the dataset into exactly this number of blocks. + + After repartitioning, all blocks in the returned dataset will have + approximately the same number of rows. + + Examples: + >>> import ray + >>> ds = ray.data.range(100) + >>> # Set the number of output partitions to write to disk. + >>> ds.repartition(10).write_parquet("/tmp/test") + + Time complexity: O(dataset size / parallelism) + + Args: + num_blocks: The number of blocks. + shuffle: Whether to perform a distributed shuffle during the + repartition. When shuffle is enabled, each output block + contains a subset of data rows from each input block, which + requires all-to-all data movement. When shuffle is disabled, + output blocks are created from adjacent input blocks, + minimizing data movement. + + Returns: + The repartitioned dataset. + """ + + plan = self._plan.with_stage(RepartitionStage(num_blocks, shuffle)) + + logical_plan = self._logical_plan + if logical_plan is not None: + op = Repartition( + logical_plan.dag, + num_outputs=num_blocks, + shuffle=shuffle, + ) + logical_plan = LogicalPlan(op) + return Dataset(plan, self._epoch, self._lazy, logical_plan) + + def random_shuffle( + self, + *, + seed: Optional[int] = None, + num_blocks: Optional[int] = None, + **ray_remote_args, + ) -> "Dataset": + """Randomly shuffle the elements of this dataset. + + Examples: + >>> import ray + >>> ds = ray.data.range(100) + >>> # Shuffle this dataset randomly. + >>> ds.random_shuffle() + RandomShuffle + +- Dataset(num_blocks=..., num_rows=100, schema={id: int64}) + >>> # Shuffle this dataset with a fixed random seed. + >>> ds.random_shuffle(seed=12345) + RandomShuffle + +- Dataset(num_blocks=..., num_rows=100, schema={id: int64}) + + Time complexity: O(dataset size / parallelism) + + Args: + seed: Fix the random seed to use, otherwise one will be chosen + based on system randomness. + num_blocks: The number of output blocks after the shuffle, or None + to retain the number of blocks. + + Returns: + The shuffled dataset. + """ + + plan = self._plan.with_stage( + RandomShuffleStage(seed, num_blocks, ray_remote_args) + ) + + logical_plan = self._logical_plan + if logical_plan is not None: + op = RandomShuffle( + logical_plan.dag, + seed=seed, + num_outputs=num_blocks, + ray_remote_args=ray_remote_args, + ) + logical_plan = LogicalPlan(op) + return Dataset(plan, self._epoch, self._lazy, logical_plan) + + def randomize_block_order( + self, + *, + seed: Optional[int] = None, + ) -> "Dataset": + """Randomly shuffle the blocks of this dataset. + + Examples: + >>> import ray + >>> ds = ray.data.range(100) # doctest: +SKIP + >>> # Randomize the block order. + >>> ds.randomize_block_order() # doctest: +SKIP + >>> # Randomize the block order with a fixed random seed. + >>> ds.randomize_block_order(seed=12345) # doctest: +SKIP + + Args: + seed: Fix the random seed to use, otherwise one will be chosen + based on system randomness. + + Returns: + The block-shuffled dataset. + """ + + plan = self._plan.with_stage(RandomizeBlocksStage(seed)) + + logical_plan = self._logical_plan + if logical_plan is not None: + op = RandomizeBlocks( + logical_plan.dag, + seed=seed, + ) + logical_plan = LogicalPlan(op) + return Dataset(plan, self._epoch, self._lazy, logical_plan) + + def random_sample( + self, fraction: float, *, seed: Optional[int] = None + ) -> "Dataset": + """Randomly samples a fraction of the elements of this dataset. + + Note that the exact number of elements returned is not guaranteed, + and that the number of elements being returned is roughly fraction * total_rows. + + Examples: + >>> import ray + >>> ds = ray.data.range(100) # doctest: +SKIP + >>> ds.random_sample(0.1) # doctest: +SKIP + >>> ds.random_sample(0.2, seed=12345) # doctest: +SKIP + + Args: + fraction: The fraction of elements to sample. + seed: Seeds the python random pRNG generator. + + Returns: + Returns a Dataset containing the sampled elements. + """ + import random + + import pandas as pd + import pyarrow as pa + + if self.num_blocks() == 0: + raise ValueError("Cannot sample from an empty Dataset.") + + if fraction < 0 or fraction > 1: + raise ValueError("Fraction must be between 0 and 1.") + + if seed is not None: + random.seed(seed) + + def process_batch(batch): + if isinstance(batch, list): + return [row for row in batch if random.random() <= fraction] + if isinstance(batch, pa.Table): + # Lets the item pass if weight generated for that item <= fraction + return batch.filter( + pa.array(random.random() <= fraction for _ in range(len(batch))) + ) + if isinstance(batch, pd.DataFrame): + return batch.sample(frac=fraction) + if isinstance(batch, np.ndarray): + return _create_possibly_ragged_ndarray( + [row for row in batch if random.random() <= fraction] + ) + raise ValueError(f"Unsupported batch type: {type(batch)}") + + return self.map_batches(process_batch, batch_format=None) + + @ConsumptionAPI + def streaming_split( + self, + n: int, + *, + equal: bool = False, + locality_hints: Optional[List["NodeIdStr"]] = None, + ) -> List[DataIterator]: + """Returns ``n`` :class:`DataIterators ` that can + be used to read disjoint subsets of the dataset in parallel. + + This method is the recommended way to consume Datasets from multiple + processes (e.g., for distributed training), and requires streaming execution + mode. + + Streaming split works by delegating the execution of this Dataset to a + coordinator actor. The coordinator pulls block references from the executed + stream, and divides those blocks among `n` output iterators. Iterators pull + blocks from the coordinator actor to return to their caller on `next`. + + The returned iterators are also repeatable; each iteration will trigger a + new execution of the Dataset. There is an implicit barrier at the start of + each iteration, which means that `next` must be called on all iterators before + the iteration starts. + + Warning: because iterators are pulling blocks from the same Dataset + execution, if one iterator falls behind other iterators may be stalled. + + Examples: + >>> import ray + >>> ds = ray.data.range(1000000) + >>> it1, it2 = ds.streaming_split(2, equal=True) + + >>> # Can consume from both iterators in parallel. + >>> @ray.remote + ... def consume(it): + ... for batch in it.iter_batches(): + ... print(batch) + >>> ray.get([consume.remote(it1), consume.remote(it2)]) # doctest: +SKIP + + >>> # Can loop over the iterators multiple times (multiple epochs). + >>> @ray.remote + ... def train(it): + ... NUM_EPOCHS = 100 + ... for _ in range(NUM_EPOCHS): + ... for batch in it.iter_batches(): + ... print(batch) + >>> ray.get([train.remote(it1), train.remote(it2)]) # doctest: +SKIP + + >>> # ERROR: this will block waiting for a read on `it2` to start. + >>> ray.get(train.remote(it1)) # doctest: +SKIP + + Args: + n: Number of output iterators to return. + equal: If True, each output iterator will see an exactly equal number + of rows, dropping data if necessary. If False, some iterators may see + slightly more or less rows than other, but no data will be dropped. + locality_hints: Specify the node ids corresponding to each iterator + location. Dataset will try to minimize data movement based on the + iterator output locations. This list must have length ``n``. You can + get the current node id of a task or actor by calling + ``ray.get_runtime_context().get_node_id()``. + + Returns: + The output iterator splits. These iterators are Ray-serializable and can + be freely passed to any Ray task or actor. + """ + return StreamSplitDataIterator.create(self, n, equal, locality_hints) + + @ConsumptionAPI + def split( + self, n: int, *, equal: bool = False, locality_hints: Optional[List[Any]] = None + ) -> List["MaterializedDataset"]: + """Materialize and split the dataset into ``n`` disjoint pieces. + + This returns a list of MaterializedDatasets that can be passed to Ray tasks + and actors and used to read the dataset records in parallel. + + Examples: + >>> import ray + >>> ds = ray.data.range(100) # doctest: +SKIP + >>> workers = ... # doctest: +SKIP + >>> # Split up a dataset to process over `n` worker actors. + >>> shards = ds.split(len(workers), locality_hints=workers) # doctest: +SKIP + >>> for shard, worker in zip(shards, workers): # doctest: +SKIP + ... worker.consume.remote(shard) # doctest: +SKIP + + Time complexity: O(1) + + See also: ``Dataset.split_at_indices``, ``Dataset.split_proportionately``, + and ``Dataset.streaming_split``. + + Args: + n: Number of child datasets to return. + equal: Whether to guarantee each split has an equal + number of records. This may drop records if they cannot be + divided equally among the splits. + locality_hints: [Experimental] A list of Ray actor handles of size ``n``. + The system will try to co-locate the blocks of the i-th dataset + with the i-th actor to maximize data locality. + + Returns: + A list of ``n`` disjoint dataset splits. + """ + if n <= 0: + raise ValueError(f"The number of splits {n} is not positive.") + + # fallback to split_at_indices for equal split without locality hints. + # simple benchmarks shows spilit_at_indices yields more stable performance. + # https://github.com/ray-project/ray/pull/26641 for more context. + if equal and locality_hints is None: + count = self.count() + split_index = count // n + # we are creating n split_indices which will generate + # n + 1 splits; the last split will at most contains (n - 1) + # rows, which could be safely dropped. + split_indices = [split_index * i for i in range(1, n + 1)] + shards = self.split_at_indices(split_indices) + return shards[:n] + + if locality_hints and len(locality_hints) != n: + raise ValueError( + f"The length of locality_hints {len(locality_hints)} " + f"doesn't equal the number of splits {n}." + ) + # TODO: this is unreachable code. + if len(set(locality_hints)) != len(locality_hints): + raise ValueError( + "locality_hints must not contain duplicate actor handles" + ) + + blocks = self._plan.execute() + owned_by_consumer = blocks._owned_by_consumer + stats = self._plan.stats() + block_refs, metadata = zip(*blocks.get_blocks_with_metadata()) + + if locality_hints is None: + blocks = np.array_split(block_refs, n) + meta = np.array_split(metadata, n) + return [ + MaterializedDataset( + ExecutionPlan( + BlockList( + b.tolist(), m.tolist(), owned_by_consumer=owned_by_consumer + ), + stats, + run_by_consumer=owned_by_consumer, + ), + self._epoch, + self._lazy, + ) + for b, m in zip(blocks, meta) + ] + + metadata_mapping = {b: m for b, m in zip(block_refs, metadata)} + + # If the locality_hints is set, we use a two-round greedy algorithm + # to co-locate the blocks with the actors based on block + # and actor's location (node_id). + # + # The split algorithm tries to allocate equally-sized blocks regardless + # of locality. Thus we first calculate the expected number of blocks + # for each split. + # + # In the first round, for each actor, we look for all blocks that + # match the actor's node_id, then allocate those matched blocks to + # this actor until we reach the limit(expected number). + # + # In the second round: fill each actor's allocation with + # remaining unallocated blocks until we reach the limit. + + def build_allocation_size_map( + num_blocks: int, actors: List[Any] + ) -> Dict[Any, int]: + """Given the total number of blocks and a list of actors, calcuate + the expected number of blocks to allocate for each actor. + """ + num_actors = len(actors) + num_blocks_per_actor = num_blocks // num_actors + num_blocks_left = num_blocks - num_blocks_per_actor * n + num_blocks_by_actor = {} + for i, actor in enumerate(actors): + num_blocks_by_actor[actor] = num_blocks_per_actor + if i < num_blocks_left: + num_blocks_by_actor[actor] += 1 + return num_blocks_by_actor + + def build_block_refs_by_node_id( + blocks: List[ObjectRef[Block]], + ) -> Dict[str, List[ObjectRef[Block]]]: + """Build the reverse index from node_id to block_refs. For + simplicity, if the block is stored on multiple nodes we + only pick the first one. + """ + block_ref_locations = ray.experimental.get_object_locations(blocks) + block_refs_by_node_id = collections.defaultdict(list) + for block_ref in blocks: + node_ids = block_ref_locations.get(block_ref, {}).get("node_ids", []) + node_id = node_ids[0] if node_ids else None + block_refs_by_node_id[node_id].append(block_ref) + return block_refs_by_node_id + + def build_node_id_by_actor(actors: List[Any]) -> Dict[Any, str]: + """Build a map from a actor to its node_id.""" + actors_state = ray._private.state.actors() + return { + actor: actors_state.get(actor._actor_id.hex(), {}) + .get("Address", {}) + .get("NodeID") + for actor in actors + } + + # expected number of blocks to be allocated for each actor + expected_block_count_by_actor = build_allocation_size_map( + len(block_refs), locality_hints + ) + # the reverse index from node_id to block_refs + block_refs_by_node_id = build_block_refs_by_node_id(block_refs) + # the map from actor to its node_id + node_id_by_actor = build_node_id_by_actor(locality_hints) + + allocation_per_actor = collections.defaultdict(list) + + # In the first round, for each actor, we look for all blocks that + # match the actor's node_id, then allocate those matched blocks to + # this actor until we reach the limit(expected number) + for actor in locality_hints: + node_id = node_id_by_actor[actor] + matching_blocks = block_refs_by_node_id[node_id] + expected_block_count = expected_block_count_by_actor[actor] + allocation = [] + while matching_blocks and len(allocation) < expected_block_count: + allocation.append(matching_blocks.pop()) + allocation_per_actor[actor] = allocation + + # In the second round: fill each actor's allocation with + # remaining unallocated blocks until we reach the limit + remaining_block_refs = list( + itertools.chain.from_iterable(block_refs_by_node_id.values()) + ) + for actor in locality_hints: + while ( + len(allocation_per_actor[actor]) < expected_block_count_by_actor[actor] + ): + allocation_per_actor[actor].append(remaining_block_refs.pop()) + + assert len(remaining_block_refs) == 0, len(remaining_block_refs) + + per_split_block_lists = [ + BlockList( + allocation_per_actor[actor], + [metadata_mapping[b] for b in allocation_per_actor[actor]], + owned_by_consumer=owned_by_consumer, + ) + for actor in locality_hints + ] + + if equal: + # equalize the splits + per_split_block_lists = _equalize(per_split_block_lists, owned_by_consumer) + + return [ + MaterializedDataset( + ExecutionPlan( + block_split, + stats, + run_by_consumer=owned_by_consumer, + ), + self._epoch, + self._lazy, + ) + for block_split in per_split_block_lists + ] + + @ConsumptionAPI + def split_at_indices(self, indices: List[int]) -> List["MaterializedDataset"]: + """Materialize and split the dataset at the given indices (like np.split). + + Examples: + >>> import ray + >>> ds = ray.data.range(10) + >>> d1, d2, d3 = ds.split_at_indices([2, 5]) + >>> d1.take_batch() + {'id': array([0, 1])} + >>> d2.take_batch() + {'id': array([2, 3, 4])} + >>> d3.take_batch() + {'id': array([5, 6, 7, 8, 9])} + + Time complexity: O(num splits) + + See also: ``Dataset.split_at_indices``, ``Dataset.split_proportionately``, + and ``Dataset.streaming_split``. + + Args: + indices: List of sorted integers which indicate where the dataset + will be split. If an index exceeds the length of the dataset, + an empty dataset will be returned. + + Returns: + The dataset splits. + """ + + if len(indices) < 1: + raise ValueError("indices must be at least of length 1") + if sorted(indices) != indices: + raise ValueError("indices must be sorted") + if indices[0] < 0: + raise ValueError("indices must be positive") + start_time = time.perf_counter() + block_list = self._plan.execute() + blocks, metadata = _split_at_indices( + block_list.get_blocks_with_metadata(), + indices, + block_list._owned_by_consumer, + ) + split_duration = time.perf_counter() - start_time + parent_stats = self._plan.stats() + splits = [] + for bs, ms in zip(blocks, metadata): + stats = DatasetStats(stages={"Split": ms}, parent=parent_stats) + stats.time_total_s = split_duration + splits.append( + MaterializedDataset( + ExecutionPlan( + BlockList( + bs, ms, owned_by_consumer=block_list._owned_by_consumer + ), + stats, + run_by_consumer=block_list._owned_by_consumer, + ), + self._epoch, + self._lazy, + ) + ) + return splits + + @ConsumptionAPI + def split_proportionately( + self, proportions: List[float] + ) -> List["MaterializedDataset"]: + """Materialize and split the dataset using proportions. + + A common use case for this would be splitting the dataset into train + and test sets (equivalent to eg. scikit-learn's ``train_test_split``). + See also ``Dataset.train_test_split`` for a higher level abstraction. + + The indices to split at will be calculated in such a way so that all splits + always contains at least one element. If that is not possible, + an exception will be raised. + + This is equivalent to caulculating the indices manually and calling + ``Dataset.split_at_indices``. + + Examples: + >>> import ray + >>> ds = ray.data.range(10) + >>> d1, d2, d3 = ds.split_proportionately([0.2, 0.5]) + >>> d1.take_batch() + {'id': array([0, 1])} + >>> d2.take_batch() + {'id': array([2, 3, 4, 5, 6])} + >>> d3.take_batch() + {'id': array([7, 8, 9])} + + Time complexity: O(num splits) + + See also: ``Dataset.split``, ``Dataset.split_at_indices``, + ``Dataset.train_test_split`` + + Args: + proportions: List of proportions to split the dataset according to. + Must sum up to less than 1, and each proportion has to be bigger + than 0. + + Returns: + The dataset splits. + """ + + if len(proportions) < 1: + raise ValueError("proportions must be at least of length 1") + if sum(proportions) >= 1: + raise ValueError("proportions must sum to less than 1") + if any(p <= 0 for p in proportions): + raise ValueError("proportions must be bigger than 0") + + dataset_length = self.count() + cumulative_proportions = np.cumsum(proportions) + split_indices = [ + int(dataset_length * proportion) for proportion in cumulative_proportions + ] + + # Ensure each split has at least one element + subtract = 0 + for i in range(len(split_indices) - 2, -1, -1): + split_indices[i] -= subtract + if split_indices[i] == split_indices[i + 1]: + subtract += 1 + split_indices[i] -= 1 + if any(i <= 0 for i in split_indices): + raise ValueError( + "Couldn't create non-empty splits with the given proportions." + ) + + return self.split_at_indices(split_indices) + + @ConsumptionAPI + def train_test_split( + self, + test_size: Union[int, float], + *, + shuffle: bool = False, + seed: Optional[int] = None, + ) -> Tuple["MaterializedDataset", "MaterializedDataset"]: + """Materialize and split the dataset into train and test subsets. + + Examples: + + >>> import ray + >>> ds = ray.data.range(8) + >>> train, test = ds.train_test_split(test_size=0.25) + >>> train.take_batch() + {'id': array([0, 1, 2, 3, 4, 5])} + >>> test.take_batch() + {'id': array([6, 7])} + + Args: + test_size: If float, should be between 0.0 and 1.0 and represent the + proportion of the dataset to include in the test split. If int, + represents the absolute number of test samples. The train split will + always be the compliment of the test split. + shuffle: Whether or not to globally shuffle the dataset before splitting. + Defaults to False. This may be a very expensive operation with large + dataset. + seed: Fix the random seed to use for shuffle, otherwise one will be chosen + based on system randomness. Ignored if ``shuffle=False``. + + Returns: + Train and test subsets as two MaterializedDatasets. + """ + ds = self + + if shuffle: + ds = ds.random_shuffle(seed=seed) + + if not isinstance(test_size, (int, float)): + raise TypeError(f"`test_size` must be int or float got {type(test_size)}.") + if isinstance(test_size, float): + if test_size <= 0 or test_size >= 1: + raise ValueError( + "If `test_size` is a float, it must be bigger than 0 and smaller " + f"than 1. Got {test_size}." + ) + return ds.split_proportionately([1 - test_size]) + else: + ds_length = ds.count() + if test_size <= 0 or test_size >= ds_length: + raise ValueError( + "If `test_size` is an int, it must be bigger than 0 and smaller " + f"than the size of the dataset ({ds_length}). " + f"Got {test_size}." + ) + return ds.split_at_indices([ds_length - test_size]) + + @ConsumptionAPI(pattern="Args:") + def union(self, *other: List["Dataset"]) -> "Dataset": + """Materialize and combine this dataset with others of the same type. + + The order of the blocks in the datasets is preserved, as is the + relative ordering between the datasets passed in the argument list. + + .. note:: + Unioned datasets are not lineage-serializable, i.e. they can not be + used as a tunable hyperparameter in Ray Tune. + + Args: + other: List of datasets to combine with this one. The datasets + must have the same schema as this dataset, otherwise the + behavior is undefined. + + Returns: + A new dataset holding the union of their data. + """ + + start_time = time.perf_counter() + + owned_by_consumer = self._plan.execute()._owned_by_consumer + datasets = [self] + list(other) + bls = [] + has_nonlazy = False + for ds in datasets: + bl = ds._plan.execute() + if not isinstance(bl, LazyBlockList): + has_nonlazy = True + bls.append(bl) + if has_nonlazy: + blocks = [] + metadata = [] + for bl in bls: + if isinstance(bl, LazyBlockList): + bs, ms = bl._get_blocks_with_metadata() + else: + bs, ms = bl._blocks, bl._metadata + blocks.extend(bs) + metadata.extend(ms) + blocklist = BlockList(blocks, metadata, owned_by_consumer=owned_by_consumer) + else: + tasks: List[ReadTask] = [] + block_partition_refs: List[ObjectRef[BlockPartition]] = [] + block_partition_meta_refs: List[ObjectRef[BlockMetadata]] = [] + + # Gather read task names from input blocks of unioned Datasets, + # and concat them before passing to resulting LazyBlockList + read_task_names = [] + self_read_name = self._plan._in_blocks._read_stage_name or "Read" + read_task_names.append(self_read_name) + other_read_names = [ + o._plan._in_blocks._read_stage_name or "Read" for o in other + ] + read_task_names.extend(other_read_names) + + for bl in bls: + tasks.extend(bl._tasks) + block_partition_refs.extend(bl._block_partition_refs) + block_partition_meta_refs.extend(bl._block_partition_meta_refs) + blocklist = LazyBlockList( + tasks, + f"Union({','.join(read_task_names)})", + block_partition_refs, + block_partition_meta_refs, + owned_by_consumer=owned_by_consumer, + ) + + epochs = [ds._get_epoch() for ds in datasets] + max_epoch = max(*epochs) + if len(set(epochs)) > 1: + if ray.util.log_once("dataset_epoch_warned"): + logger.warning( + "Dataset contains data from multiple epochs: {}, " + "likely due to a `rewindow()` call. The higher epoch " + "number {} will be used. This warning will not " + "be shown again.".format(set(epochs), max_epoch) + ) + stats = DatasetStats( + stages={"Union": []}, + parent=[d._plan.stats() for d in datasets], + ) + stats.time_total_s = time.perf_counter() - start_time + return Dataset( + ExecutionPlan(blocklist, stats, run_by_consumer=owned_by_consumer), + max_epoch, + self._lazy, + ) + + def groupby(self, key: Optional[str]) -> "GroupedData": + """Group the dataset by the key function or column name. + + Examples: + >>> import ray + >>> # Group by a table column and aggregate. + >>> ray.data.from_items([ + ... {"A": x % 3, "B": x} for x in range(100)]).groupby( + ... "A").count() + Aggregate + +- Dataset(num_blocks=100, num_rows=100, schema={A: int64, B: int64}) + + Time complexity: O(dataset size * log(dataset size / parallelism)) + + Args: + key: A column name. If this is None, the grouping is global. + + Returns: + A lazy GroupedData that can be aggregated later. + """ + from ray.data.grouped_data import GroupedData + + # Always allow None since groupby interprets that as grouping all + # records into a single global group. + if key is not None: + _validate_key_fn(self.schema(fetch_if_missing=True), key) + + return GroupedData(self, key) + + @ConsumptionAPI + def aggregate(self, *aggs: AggregateFn) -> Union[Any, Dict[str, Any]]: + """Aggregate the entire dataset as one group. + + Examples: + >>> import ray + >>> from ray.data.aggregate import Max, Mean + >>> ray.data.range(100).aggregate(Max("id"), Mean("id")) + {'max(id)': 99, 'mean(id)': 49.5} + + Time complexity: O(dataset size / parallelism) + + Args: + aggs: Aggregations to do. + + Returns: + If the input dataset is a simple dataset then the output is + a tuple of ``(agg1, agg2, ...)`` where each tuple element is + the corresponding aggregation result. + If the input dataset is an Arrow dataset then the output is + an dict where each column is the corresponding aggregation result. + If the dataset is empty, return ``None``. + """ + ret = self.groupby(None).aggregate(*aggs).take(1) + return ret[0] if len(ret) > 0 else None + + @ConsumptionAPI + def sum( + self, on: Optional[Union[str, List[str]]] = None, ignore_nulls: bool = True + ) -> Union[Any, Dict[str, Any]]: + """Compute sum over entire dataset. + + Examples: + >>> import ray + >>> ray.data.range(100).sum("id") + 4950 + >>> ray.data.from_items([ + ... {"A": i, "B": i**2} + ... for i in range(100)]).sum(["A", "B"]) + {'sum(A)': 4950, 'sum(B)': 328350} + + Args: + on: a column name or a list of column names to aggregate. + ignore_nulls: Whether to ignore null values. If ``True``, null + values will be ignored when computing the sum; if ``False``, + if a null value is encountered, the output will be None. + We consider np.nan, None, and pd.NaT to be null values. + Default is ``True``. + + Returns: + The sum result. + + For different values of ``on``, the return varies: + + - ``on=None``: a dict containing the column-wise sum of all + columns, + - ``on="col"``: a scalar representing the sum of all items in + column ``"col"``, + - ``on=["col_1", ..., "col_n"]``: an n-column ``dict`` + containing the column-wise sum of the provided columns. + + If the dataset is empty, all values are null, or any value is null + AND ``ignore_nulls`` is ``False``, then the output will be None. + """ + ret = self._aggregate_on(Sum, on, ignore_nulls) + return self._aggregate_result(ret) + + @ConsumptionAPI + def min( + self, on: Optional[Union[str, List[str]]] = None, ignore_nulls: bool = True + ) -> Union[Any, Dict[str, Any]]: + """Compute minimum over entire dataset. + + Examples: + >>> import ray + >>> ray.data.range(100).min("id") + 0 + >>> ray.data.from_items([ + ... {"A": i, "B": i**2} + ... for i in range(100)]).min(["A", "B"]) + {'min(A)': 0, 'min(B)': 0} + + Args: + on: a column name or a list of column names to aggregate. + ignore_nulls: Whether to ignore null values. If ``True``, null + values will be ignored when computing the min; if ``False``, + if a null value is encountered, the output will be None. + We consider np.nan, None, and pd.NaT to be null values. + Default is ``True``. + + Returns: + The min result. + + For different values of ``on``, the return varies: + + - ``on=None``: an dict containing the column-wise min of + all columns, + - ``on="col"``: a scalar representing the min of all items in + column ``"col"``, + - ``on=["col_1", ..., "col_n"]``: an n-column dict + containing the column-wise min of the provided columns. + + If the dataset is empty, all values are null, or any value is null + AND ``ignore_nulls`` is ``False``, then the output will be None. + """ + ret = self._aggregate_on(Min, on, ignore_nulls) + return self._aggregate_result(ret) + + @ConsumptionAPI + def max( + self, on: Optional[Union[str, List[str]]] = None, ignore_nulls: bool = True + ) -> Union[Any, Dict[str, Any]]: + """Compute maximum over entire dataset. + + Examples: + >>> import ray + >>> ray.data.range(100).max("id") + 99 + >>> ray.data.from_items([ + ... {"A": i, "B": i**2} + ... for i in range(100)]).max(["A", "B"]) + {'max(A)': 99, 'max(B)': 9801} + + Args: + on: a column name or a list of column names to aggregate. + ignore_nulls: Whether to ignore null values. If ``True``, null + values will be ignored when computing the max; if ``False``, + if a null value is encountered, the output will be None. + We consider np.nan, None, and pd.NaT to be null values. + Default is ``True``. + + Returns: + The max result. + + For different values of ``on``, the return varies: + + - ``on=None``: an dict containing the column-wise max of + all columns, + - ``on="col"``: a scalar representing the max of all items in + column ``"col"``, + - ``on=["col_1", ..., "col_n"]``: an n-column dict + containing the column-wise max of the provided columns. + + If the dataset is empty, all values are null, or any value is null + AND ``ignore_nulls`` is ``False``, then the output will be None. + """ + ret = self._aggregate_on(Max, on, ignore_nulls) + return self._aggregate_result(ret) + + @ConsumptionAPI + def mean( + self, on: Optional[Union[str, List[str]]] = None, ignore_nulls: bool = True + ) -> Union[Any, Dict[str, Any]]: + """Compute mean over entire dataset. + + Examples: + >>> import ray + >>> ray.data.range(100).mean("id") + 49.5 + >>> ray.data.from_items([ + ... {"A": i, "B": i**2} + ... for i in range(100)]).mean(["A", "B"]) + {'mean(A)': 49.5, 'mean(B)': 3283.5} + + Args: + on: a column name or a list of column names to aggregate. + ignore_nulls: Whether to ignore null values. If ``True``, null + values will be ignored when computing the mean; if ``False``, + if a null value is encountered, the output will be None. + We consider np.nan, None, and pd.NaT to be null values. + Default is ``True``. + + Returns: + The mean result. + + For different values of ``on``, the return varies: + + - ``on=None``: an dict containing the column-wise mean of + all columns, + - ``on="col"``: a scalar representing the mean of all items in + column ``"col"``, + - ``on=["col_1", ..., "col_n"]``: an n-column dict + containing the column-wise mean of the provided columns. + + If the dataset is empty, all values are null, or any value is null + AND ``ignore_nulls`` is ``False``, then the output will be None. + """ + ret = self._aggregate_on(Mean, on, ignore_nulls) + return self._aggregate_result(ret) + + @ConsumptionAPI + def std( + self, + on: Optional[Union[str, List[str]]] = None, + ddof: int = 1, + ignore_nulls: bool = True, + ) -> Union[Any, Dict[str, Any]]: + """Compute standard deviation over entire dataset. + + Examples: + >>> import ray + >>> round(ray.data.range(100).std("id", ddof=0), 5) + 28.86607 + >>> ray.data.from_items([ + ... {"A": i, "B": i**2} + ... for i in range(100)]).std(["A", "B"]) + {'std(A)': 29.011491975882016, 'std(B)': 2968.1748039269296} + + .. note:: This uses Welford's online method for an accumulator-style computation + of the standard deviation. This method was chosen due to it's numerical + stability, and it being computable in a single pass. This may give different + (but more accurate) results than NumPy, Pandas, and sklearn, which use a + less numerically stable two-pass algorithm. + See + https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm + + Args: + on: a column name or a list of column names to aggregate. + ddof: Delta Degrees of Freedom. The divisor used in calculations + is ``N - ddof``, where ``N`` represents the number of elements. + ignore_nulls: Whether to ignore null values. If ``True``, null + values will be ignored when computing the std; if ``False``, + if a null value is encountered, the output will be None. + We consider np.nan, None, and pd.NaT to be null values. + Default is ``True``. + + Returns: + The standard deviation result. + + For different values of ``on``, the return varies: + + - ``on=None``: an dict containing the column-wise std of + all columns, + - ``on="col"``: a scalar representing the std of all items in + column ``"col"``, + - ``on=["col_1", ..., "col_n"]``: an n-column dict + containing the column-wise std of the provided columns. + + If the dataset is empty, all values are null, or any value is null + AND ``ignore_nulls`` is ``False``, then the output will be None. + """ + ret = self._aggregate_on(Std, on, ignore_nulls, ddof=ddof) + return self._aggregate_result(ret) + + def sort(self, key: Optional[str] = None, descending: bool = False) -> "Dataset": + """Sort the dataset by the specified key column or key function. + + Examples: + >>> import ray + >>> # Sort by a single column in descending order. + >>> ds = ray.data.from_items( + ... [{"value": i} for i in range(1000)]) + >>> ds.sort("value", descending=True) + Sort + +- Dataset(num_blocks=200, num_rows=1000, schema={value: int64}) + + Time complexity: O(dataset size * log(dataset size / parallelism)) + + Args: + key: The column to sort by. To sort by multiple columns, use a map function + to generate the sort column beforehand. + descending: Whether to sort in descending order. + + Returns: + A new, sorted dataset. + """ + + plan = self._plan.with_stage(SortStage(self, key, descending)) + + logical_plan = self._logical_plan + if logical_plan is not None: + op = Sort( + logical_plan.dag, + key=key, + descending=descending, + ) + logical_plan = LogicalPlan(op) + return Dataset(plan, self._epoch, self._lazy, logical_plan) + + def zip(self, other: "Dataset") -> "Dataset": + """Materialize and zip this dataset with the elements of another. + + The datasets must have the same number of rows. Their column sets will be + merged, and any duplicate column names disambiguated with _1, _2, etc. suffixes. + + .. note:: + The smaller of the two datasets will be repartitioned to align the number + of rows per block with the larger dataset. + + .. note:: + Zipped datasets are not lineage-serializable, i.e. they can not be used + as a tunable hyperparameter in Ray Tune. + + Examples: + >>> import ray + >>> ds1 = ray.data.range(5) + >>> ds2 = ray.data.range(5) + >>> ds1.zip(ds2).take_batch() + {'id': array([0, 1, 2, 3, 4]), 'id_1': array([0, 1, 2, 3, 4])} + + Time complexity: O(dataset size / parallelism) + + Args: + other: The dataset to zip with on the right hand side. + + Returns: + A ``Dataset`` containing the columns of the second dataset + concatenated horizontally with the columns of the first dataset, + with duplicate column names disambiguated with _1, _2, etc. suffixes. + """ + + plan = self._plan.with_stage(ZipStage(other)) + + logical_plan = self._logical_plan + other_logical_plan = other._logical_plan + if logical_plan is not None and other_logical_plan is not None: + op = Zip(logical_plan.dag, other_logical_plan.dag) + logical_plan = LogicalPlan(op) + return Dataset(plan, self._epoch, self._lazy, logical_plan) + + @ConsumptionAPI + def limit(self, limit: int) -> "Dataset": + """Materialize and truncate the dataset to the first ``limit`` records. + + Contrary to :meth`.take`, this will not move any data to the caller's + machine. Instead, it will return a new ``Dataset`` pointing to the truncated + distributed data. + + Examples: + >>> import ray + >>> ds = ray.data.range(1000) + >>> ds.limit(5).take_batch() + {'id': array([0, 1, 2, 3, 4])} + + Time complexity: O(limit specified) + + Args: + limit: The size of the dataset to truncate to. + + Returns: + The truncated dataset. + """ + plan = self._plan.with_stage(LimitStage(limit)) + logical_plan = self._logical_plan + if logical_plan is not None: + op = Limit(logical_plan.dag, limit=limit) + logical_plan = LogicalPlan(op) + return Dataset(plan, self._epoch, self._lazy, logical_plan) + + @ConsumptionAPI(pattern="Time complexity:") + def take_batch( + self, batch_size: int = 20, *, batch_format: Optional[str] = "default" + ) -> DataBatch: + """Return up to ``batch_size`` records from the dataset in a batch. + + Unlike take(), the records are returned in the same format as used for + `iter_batches` and `map_batches`. + + This will move up to ``batch_size`` records to the caller's machine; if + ``batch_size`` is very large, this can result in an OutOfMemory crash on + the caller. + + Time complexity: O(batch_size specified) + + Args: + batch_size: The max number of records to return. + batch_format: Specify ``"default"`` to use the default block format + (NumPy), ``"pandas"`` to select ``pandas.DataFrame``, "pyarrow" to + select ``pyarrow.Table``, or ``"numpy"`` to select + ``Dict[str, numpy.ndarray]``, or None to return the underlying block + exactly as is with no additional formatting. + + Returns: + A batch of up to ``batch_size`` records from the dataset. + + Raises: + ValueError if the dataset is empty. + """ + batch_format = _apply_strict_mode_batch_format(batch_format) + try: + res = next( + self.iter_batches( + batch_size=batch_size, prefetch_batches=0, batch_format=batch_format + ) + ) + except StopIteration: + raise ValueError("The dataset is empty.") + self._synchronize_progress_bar() + return res + + @ConsumptionAPI(pattern="Time complexity:") + def take(self, limit: int = 20) -> List[Dict[str, Any]]: + """Return up to ``limit`` records from the dataset. + + This will move up to ``limit`` records to the caller's machine; if + ``limit`` is very large, this can result in an OutOfMemory crash on + the caller. + + Time complexity: O(limit specified) + + Args: + limit: The max number of records to return. + + Returns: + A list of up to ``limit`` records from the dataset. + """ + if ray.util.log_once("dataset_take"): + logger.info( + "Tip: Use `take_batch()` instead of `take() / show()` to return " + "records in pandas or numpy batch format." + ) + output = [] + for row in self.iter_rows(): + output.append(row) + if len(output) >= limit: + break + self._synchronize_progress_bar() + return output + + @ConsumptionAPI(pattern="Time complexity:") + def take_all(self, limit: Optional[int] = None) -> List[Dict[str, Any]]: + """Return all of the records in the dataset. + + This will move the entire dataset to the caller's machine; if the + dataset is very large, this can result in an OutOfMemory crash on + the caller. + + Time complexity: O(dataset size) + + Args: + limit: Raise an error if the size exceeds the specified limit. + + Returns: + A list of all the records in the dataset. + """ + output = [] + for row in self.iter_rows(): + output.append(row) + if limit is not None and len(output) > limit: + raise ValueError( + f"The dataset has more than the given limit of {limit} records." + ) + self._synchronize_progress_bar() + return output + + @ConsumptionAPI(pattern="Time complexity:") + def show(self, limit: int = 20) -> None: + """Print up to the given number of records from the dataset. + + Time complexity: O(limit specified) + + Args: + limit: The max number of records to print. + """ + for row in self.take(limit): + print(row) + + @ConsumptionAPI( + if_more_than_read=True, + datasource_metadata="row count", + pattern="Time complexity:", + ) + def count(self) -> int: + """Count the number of records in the dataset. + + Time complexity: O(dataset size / parallelism), O(1) for parquet + + Returns: + The number of records in the dataset. + """ + # Handle empty dataset. + if self.num_blocks() == 0: + return 0 + + # For parquet, we can return the count directly from metadata. + meta_count = self._meta_count() + if meta_count is not None: + return meta_count + + get_num_rows = cached_remote_fn(_get_num_rows) + + return sum( + ray.get( + [get_num_rows.remote(block) for block in self.get_internal_block_refs()] + ) + ) + + @ConsumptionAPI( + if_more_than_read=True, + datasource_metadata="schema", + extra_condition="or if ``fetch_if_missing=True`` (the default)", + pattern="Time complexity:", + ) + def schema(self, fetch_if_missing: bool = True) -> Optional["Schema"]: + """Return the schema of the dataset. + + Time complexity: O(1) + + Args: + fetch_if_missing: If True, synchronously fetch the schema if it's + not known. If False, None is returned if the schema is not known. + Default is True. + + Returns: + The ``ray.data.Schema`` class of the records, or None if the + schema is not known and fetch_if_missing is False. + """ + ctx = DataContext.get_current() + base_schema = self._plan.schema(fetch_if_missing=fetch_if_missing) + if ctx.strict_mode: + if base_schema: + return Schema(base_schema) + else: + return None + else: + return base_schema + + def num_blocks(self) -> int: + """Return the number of blocks of this dataset. + + Note that during read and transform operations, the number of blocks + may be dynamically adjusted to respect memory limits, increasing the + number of blocks at runtime. + + Time complexity: O(1) + + Returns: + The number of blocks of this dataset. + """ + return self._plan.initial_num_blocks() + + @ConsumptionAPI(if_more_than_read=True, pattern="Time complexity:") + def size_bytes(self) -> int: + """Return the in-memory size of the dataset. + + Time complexity: O(1) + + Returns: + The in-memory size of the dataset in bytes, or None if the + in-memory size is not known. + """ + metadata = self._plan.execute().get_metadata() + if not metadata or metadata[0].size_bytes is None: + return None + return sum(m.size_bytes for m in metadata) + + @ConsumptionAPI(if_more_than_read=True, pattern="Time complexity:") + def input_files(self) -> List[str]: + """Return the list of input files for the dataset. + + Time complexity: O(num input files) + + Returns: + The list of input files used to create the dataset, or an empty + list if the input files is not known. + """ + metadata = self._plan.execute().get_metadata() + files = set() + for m in metadata: + for f in m.input_files: + files.add(f) + return list(files) + + @ConsumptionAPI + def write_parquet( + self, + path: str, + *, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + try_create_dir: bool = True, + arrow_open_stream_args: Optional[Dict[str, Any]] = None, + block_path_provider: BlockWritePathProvider = DefaultBlockWritePathProvider(), + arrow_parquet_args_fn: Callable[[], Dict[str, Any]] = lambda: {}, + ray_remote_args: Dict[str, Any] = None, + **arrow_parquet_args, + ) -> None: + """Write the dataset to parquet. + + This is only supported for datasets convertible to Arrow records. + To control the number of files, use ``.repartition()``. + + Unless a custom block path provider is given, the format of the output + files will be {uuid}_{block_idx}.parquet, where ``uuid`` is an unique + id for the dataset. + + Examples: + >>> import ray + >>> ds = ray.data.range(100) # doctest: +SKIP + >>> ds.write_parquet("s3://bucket/path") # doctest: +SKIP + + Time complexity: O(dataset size / parallelism) + + Args: + path: The path to the destination root directory, where Parquet + files will be written to. + filesystem: The filesystem implementation to write to. + try_create_dir: Try to create all directories in destination path + if True. Does nothing if all directories already exist. + arrow_open_stream_args: kwargs passed to + pyarrow.fs.FileSystem.open_output_stream + block_path_provider: BlockWritePathProvider implementation to + write each dataset block to a custom output path. + arrow_parquet_args_fn: Callable that returns a dictionary of write + arguments to use when writing each block to a file. Overrides + any duplicate keys from arrow_parquet_args. This should be used + instead of arrow_parquet_args if any of your write arguments + cannot be pickled, or if you'd like to lazily resolve the write + arguments for each dataset block. + ray_remote_args: Kwargs passed to ray.remote in the write tasks. + arrow_parquet_args: Options to pass to + pyarrow.parquet.write_table(), which is used to write out each + block to a file. + """ + self.write_datasource( + ParquetDatasource(), + ray_remote_args=ray_remote_args, + path=path, + dataset_uuid=self._uuid, + filesystem=filesystem, + try_create_dir=try_create_dir, + open_stream_args=arrow_open_stream_args, + block_path_provider=block_path_provider, + write_args_fn=arrow_parquet_args_fn, + **arrow_parquet_args, + ) + + @ConsumptionAPI + def write_json( + self, + path: str, + *, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + try_create_dir: bool = True, + arrow_open_stream_args: Optional[Dict[str, Any]] = None, + block_path_provider: BlockWritePathProvider = DefaultBlockWritePathProvider(), + pandas_json_args_fn: Callable[[], Dict[str, Any]] = lambda: {}, + ray_remote_args: Dict[str, Any] = None, + **pandas_json_args, + ) -> None: + """Write the dataset to json. + + This is only supported for datasets convertible to Arrow records. + To control the number of files, use ``.repartition()``. + + Unless a custom block path provider is given, the format of the output + files will be {self._uuid}_{block_idx}.json, where ``uuid`` is an + unique id for the dataset. + + Examples: + >>> import ray + >>> ds = ray.data.range(100) # doctest: +SKIP + >>> ds.write_json("s3://bucket/path") # doctest: +SKIP + + Time complexity: O(dataset size / parallelism) + + Args: + path: The path to the destination root directory, where json + files will be written to. + filesystem: The filesystem implementation to write to. + try_create_dir: Try to create all directories in destination path + if True. Does nothing if all directories already exist. + arrow_open_stream_args: kwargs passed to + pyarrow.fs.FileSystem.open_output_stream + block_path_provider: BlockWritePathProvider implementation to + write each dataset block to a custom output path. + pandas_json_args_fn: Callable that returns a dictionary of write + arguments to use when writing each block to a file. Overrides + any duplicate keys from pandas_json_args. This should be used + instead of pandas_json_args if any of your write arguments + cannot be pickled, or if you'd like to lazily resolve the write + arguments for each dataset block. + ray_remote_args: Kwargs passed to ray.remote in the write tasks. + pandas_json_args: These args will be passed to + pandas.DataFrame.to_json(), which we use under the hood to + write out each Dataset block. These + are dict(orient="records", lines=True) by default. + """ + self.write_datasource( + JSONDatasource(), + ray_remote_args=ray_remote_args, + path=path, + dataset_uuid=self._uuid, + filesystem=filesystem, + try_create_dir=try_create_dir, + open_stream_args=arrow_open_stream_args, + block_path_provider=block_path_provider, + write_args_fn=pandas_json_args_fn, + **pandas_json_args, + ) + + @ConsumptionAPI + def write_csv( + self, + path: str, + *, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + try_create_dir: bool = True, + arrow_open_stream_args: Optional[Dict[str, Any]] = None, + block_path_provider: BlockWritePathProvider = DefaultBlockWritePathProvider(), + arrow_csv_args_fn: Callable[[], Dict[str, Any]] = lambda: {}, + ray_remote_args: Dict[str, Any] = None, + **arrow_csv_args, + ) -> None: + """Write the dataset to csv. + + This is only supported for datasets convertible to Arrow records. + To control the number of files, use ``.repartition()``. + + Unless a custom block path provider is given, the format of the output + files will be {uuid}_{block_idx}.csv, where ``uuid`` is an unique id + for the dataset. + + Examples: + >>> import ray + >>> ds = ray.data.range(100) # doctest: +SKIP + >>> ds.write_csv("s3://bucket/path") # doctest: +SKIP + + Time complexity: O(dataset size / parallelism) + + Args: + path: The path to the destination root directory, where csv + files will be written to. + filesystem: The filesystem implementation to write to. + try_create_dir: Try to create all directories in destination path + if True. Does nothing if all directories already exist. + arrow_open_stream_args: kwargs passed to + pyarrow.fs.FileSystem.open_output_stream + block_path_provider: BlockWritePathProvider implementation to + write each dataset block to a custom output path. + arrow_csv_args_fn: Callable that returns a dictionary of write + arguments to use when writing each block to a file. Overrides + any duplicate keys from arrow_csv_args. This should be used + instead of arrow_csv_args if any of your write arguments + cannot be pickled, or if you'd like to lazily resolve the write + arguments for each dataset block. + ray_remote_args: Kwargs passed to ray.remote in the write tasks. + arrow_csv_args: Other CSV write options to pass to pyarrow. + """ + self.write_datasource( + CSVDatasource(), + ray_remote_args=ray_remote_args, + path=path, + dataset_uuid=self._uuid, + filesystem=filesystem, + try_create_dir=try_create_dir, + open_stream_args=arrow_open_stream_args, + block_path_provider=block_path_provider, + write_args_fn=arrow_csv_args_fn, + **arrow_csv_args, + ) + + @ConsumptionAPI + def write_tfrecords( + self, + path: str, + *, + tf_schema: Optional["schema_pb2.Schema"] = None, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + try_create_dir: bool = True, + arrow_open_stream_args: Optional[Dict[str, Any]] = None, + block_path_provider: BlockWritePathProvider = DefaultBlockWritePathProvider(), + ray_remote_args: Dict[str, Any] = None, + ) -> None: + """Write the dataset to TFRecord files. + + The `TFRecord `_ + files will contain + `tf.train.Example `_ # noqa: E501 + records, with one Example record for each row in the dataset. + + .. warning:: + tf.train.Feature only natively stores ints, floats, and bytes, + so this function only supports datasets with these data types, + and will error if the dataset contains unsupported types. + + This is only supported for datasets convertible to Arrow records. + To control the number of files, use ``.repartition()``. + + Unless a custom block path provider is given, the format of the output + files will be {uuid}_{block_idx}.tfrecords, where ``uuid`` is an unique id + for the dataset. + + Examples: + >>> import ray + >>> ds = ray.data.from_items([ + ... { "name": "foo", "score": 42 }, + ... { "name": "bar", "score": 43 }, + ... ]) + >>> ds.write_tfrecords("s3://bucket/path") # doctest: +SKIP + + Time complexity: O(dataset size / parallelism) + + Args: + path: The path to the destination root directory, where tfrecords + files will be written to. + filesystem: The filesystem implementation to write to. + try_create_dir: Try to create all directories in destination path + if True. Does nothing if all directories already exist. + arrow_open_stream_args: kwargs passed to + pyarrow.fs.FileSystem.open_output_stream + block_path_provider: BlockWritePathProvider implementation to + write each dataset block to a custom output path. + ray_remote_args: Kwargs passed to ray.remote in the write tasks. + + """ + + self.write_datasource( + TFRecordDatasource(), + ray_remote_args=ray_remote_args, + path=path, + dataset_uuid=self._uuid, + filesystem=filesystem, + try_create_dir=try_create_dir, + open_stream_args=arrow_open_stream_args, + block_path_provider=block_path_provider, + tf_schema=tf_schema, + ) + + @PublicAPI(stability="alpha") + @ConsumptionAPI + def write_webdataset( + self, + path: str, + *, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + try_create_dir: bool = True, + arrow_open_stream_args: Optional[Dict[str, Any]] = None, + block_path_provider: BlockWritePathProvider = DefaultBlockWritePathProvider(), + ray_remote_args: Dict[str, Any] = None, + encoder: Optional[Union[bool, str, callable, list]] = True, + ) -> None: + """Write the dataset to WebDataset files. + + The `TFRecord `_ + files will contain + `tf.train.Example `_ # noqa: E501 + records, with one Example record for each row in the dataset. + + .. warning:: + tf.train.Feature only natively stores ints, floats, and bytes, + so this function only supports datasets with these data types, + and will error if the dataset contains unsupported types. + + This is only supported for datasets convertible to Arrow records. + To control the number of files, use ``.repartition()``. + + Unless a custom block path provider is given, the format of the output + files will be {uuid}_{block_idx}.tfrecords, where ``uuid`` is an unique id + for the dataset. + + Examples: + >>> import ray + >>> ds = ray.data.from_items([ + ... { "name": "foo", "score": 42 }, + ... { "name": "bar", "score": 43 }, + ... ]) + >>> ds.write_webdataset("s3://bucket/path") # doctest: +SKIP + + Time complexity: O(dataset size / parallelism) + + Args: + path: The path to the destination root directory, where tfrecords + files will be written to. + filesystem: The filesystem implementation to write to. + try_create_dir: Try to create all directories in destination path + if True. Does nothing if all directories already exist. + arrow_open_stream_args: kwargs passed to + pyarrow.fs.FileSystem.open_output_stream + block_path_provider: BlockWritePathProvider implementation to + write each dataset block to a custom output path. + ray_remote_args: Kwargs passed to ray.remote in the write tasks. + + """ + + from ray.data.datasource.webdataset_datasource import WebDatasetDatasource + + self.write_datasource( + WebDatasetDatasource(), + ray_remote_args=ray_remote_args, + path=path, + dataset_uuid=self._uuid, + filesystem=filesystem, + try_create_dir=try_create_dir, + open_stream_args=arrow_open_stream_args, + block_path_provider=block_path_provider, + encoder=encoder, + ) + + @ConsumptionAPI + def write_numpy( + self, + path: str, + *, + column: Optional[str] = None, + filesystem: Optional["pyarrow.fs.FileSystem"] = None, + try_create_dir: bool = True, + arrow_open_stream_args: Optional[Dict[str, Any]] = None, + block_path_provider: BlockWritePathProvider = DefaultBlockWritePathProvider(), + ray_remote_args: Dict[str, Any] = None, + ) -> None: + """Write a tensor column of the dataset to npy files. + + This is only supported for datasets convertible to Arrow records that + contain a TensorArray column. To control the number of files, use + ``.repartition()``. + + Unless a custom block path provider is given, the format of the output + files will be {self._uuid}_{block_idx}.npy, where ``uuid`` is an unique + id for the dataset. + + Examples: + >>> import ray + >>> ds = ray.data.range(100) # doctest: +SKIP + >>> ds.write_numpy("s3://bucket/path") # doctest: +SKIP + + Time complexity: O(dataset size / parallelism) + + Args: + path: The path to the destination root directory, where npy + files will be written to. + column: The name of the table column that contains the tensor to + be written. + filesystem: The filesystem implementation to write to. + try_create_dir: Try to create all directories in destination path + if True. Does nothing if all directories already exist. + arrow_open_stream_args: kwargs passed to + pyarrow.fs.FileSystem.open_output_stream + block_path_provider: BlockWritePathProvider implementation to + write each dataset block to a custom output path. + ray_remote_args: Kwargs passed to ray.remote in the write tasks. + """ + context = DataContext.get_current() + if context.strict_mode and not column: + raise StrictModeError( + "In Ray 2.5, the column must be specified " + "(e.g., `write_numpy(column='data')`)." + ) + column = column or TENSOR_COLUMN_NAME + + self.write_datasource( + NumpyDatasource(), + ray_remote_args=ray_remote_args, + path=path, + dataset_uuid=self._uuid, + column=column, + filesystem=filesystem, + try_create_dir=try_create_dir, + open_stream_args=arrow_open_stream_args, + block_path_provider=block_path_provider, + ) + + @ConsumptionAPI + def write_mongo( + self, + uri: str, + database: str, + collection: str, + ray_remote_args: Dict[str, Any] = None, + ) -> None: + """Write the dataset to a MongoDB datasource. + + This is only supported for datasets convertible to Arrow records. + To control the number of parallel write tasks, use ``.repartition()`` + before calling this method. + + .. note:: + Currently, this supports only a subset of the pyarrow's types, due to the + limitation of pymongoarrow which is used underneath. Writing unsupported + types will fail on type checking. See all the supported types at: + https://mongo-arrow.readthedocs.io/en/latest/data_types.html. + + .. note:: + The records will be inserted into MongoDB as new documents. If a record has + the _id field, this _id must be non-existent in MongoDB, otherwise the write + will be rejected and fail (hence preexisting documents are protected from + being mutated). It's fine to not have _id field in record and MongoDB will + auto generate one at insertion. + + Examples: + >>> import ray + >>> import pandas as pd + >>> docs = [{"title": "MongoDB Datasource test"} for key in range(4)] + >>> ds = ray.data.from_pandas(pd.DataFrame(docs)) + >>> ds.write_mongo( # doctest: +SKIP + >>> MongoDatasource(), # doctest: +SKIP + >>> uri="mongodb://username:password@mongodb0.example.com:27017/?authSource=admin", # noqa: E501 # doctest: +SKIP + >>> database="my_db", # doctest: +SKIP + >>> collection="my_collection", # doctest: +SKIP + >>> ) # doctest: +SKIP + + Args: + uri: The URI to the destination MongoDB where the dataset will be + written to. For the URI format, see details in + https://www.mongodb.com/docs/manual/reference/connection-string/. + database: The name of the database. This database must exist otherwise + ValueError will be raised. + collection: The name of the collection in the database. This collection + must exist otherwise ValueError will be raised. + ray_remote_args: Kwargs passed to ray.remote in the write tasks. + """ + from ray.data.datasource import MongoDatasource + + self.write_datasource( + MongoDatasource(), + ray_remote_args=ray_remote_args, + uri=uri, + database=database, + collection=collection, + ) + + @ConsumptionAPI + def write_datasource( + self, + datasource: Datasource, + *, + ray_remote_args: Dict[str, Any] = None, + **write_args, + ) -> None: + """Write the dataset to a custom datasource. + + Examples: + >>> import ray + >>> from ray.data.datasource import Datasource + >>> ds = ray.data.range(100) # doctest: +SKIP + >>> class CustomDatasource(Datasource): # doctest: +SKIP + ... # define custom data source + ... pass # doctest: +SKIP + >>> ds.write_datasource(CustomDatasource(...)) # doctest: +SKIP + + Time complexity: O(dataset size / parallelism) + + Args: + datasource: The datasource to write to. + ray_remote_args: Kwargs passed to ray.remote in the write tasks. + write_args: Additional write args to pass to the datasource. + """ + if ray_remote_args is None: + ray_remote_args = {} + path = write_args.get("path", None) + if path and _is_local_scheme(path): + if ray.util.client.ray.is_connected(): + raise ValueError( + f"The local scheme paths {path} are not supported in Ray Client." + ) + ray_remote_args["scheduling_strategy"] = NodeAffinitySchedulingStrategy( + ray.get_runtime_context().get_node_id(), + soft=False, + ) + + if type(datasource).write != Datasource.write: + write_fn = generate_write_fn(datasource, **write_args) + + def write_fn_wrapper(blocks: Iterator[Block], ctx, fn) -> Iterator[Block]: + return write_fn(blocks, ctx) + + plan = self._plan.with_stage( + OneToOneStage( + "Write", + write_fn_wrapper, + TaskPoolStrategy(), + ray_remote_args, + fn=lambda x: x, + ) + ) + + logical_plan = self._logical_plan + if logical_plan is not None: + write_op = Write( + logical_plan.dag, + datasource, + ray_remote_args=ray_remote_args, + **write_args, + ) + logical_plan = LogicalPlan(write_op) + + try: + import pandas as pd + + self._write_ds = Dataset( + plan, self._epoch, self._lazy, logical_plan + ).materialize() + blocks = ray.get(self._write_ds._plan.execute().get_blocks()) + assert all( + isinstance(block, pd.DataFrame) and len(block) == 1 + for block in blocks + ) + write_results = [block["write_result"][0] for block in blocks] + datasource.on_write_complete(write_results) + except Exception as e: + datasource.on_write_failed([], e) + raise + else: + logger.warning( + "The Datasource.do_write() is deprecated in " + "Ray 2.4 and will be removed in future release. Use " + "Datasource.write() instead." + ) + + ctx = DataContext.get_current() + blocks, metadata = zip(*self._plan.execute().get_blocks_with_metadata()) + # Prepare write in a remote task so that in Ray client mode, we + # don't do metadata resolution from the client machine. + do_write = cached_remote_fn(_do_write, retry_exceptions=False, num_cpus=0) + write_results: List[ObjectRef[WriteResult]] = ray.get( + do_write.remote( + datasource, + ctx, + blocks, + metadata, + ray_remote_args, + _wrap_arrow_serialization_workaround(write_args), + ) + ) + + progress = ProgressBar("Write Progress", len(write_results)) + try: + progress.block_until_complete(write_results) + datasource.on_write_complete(ray.get(write_results)) + except Exception as e: + datasource.on_write_failed(write_results, e) + raise + finally: + progress.close() + + @ConsumptionAPI( + delegate=( + "Calling any of the consumption methods on the returned ``DataIterator``" + ) + ) + def iterator(self) -> DataIterator: + """Return a :class:`~ray.data.DataIterator` that + can be used to repeatedly iterate over the dataset. + + Examples: + >>> import ray + >>> for batch in ray.data.range( + ... 1000000 + ... ).iterator().iter_batches(): # doctest: +SKIP + ... print(batch) # doctest: +SKIP + + .. note:: + It is recommended to use ``DataIterator`` methods over directly + calling methods such as ``iter_batches()``. + """ + return DataIteratorImpl(self) + + @ConsumptionAPI + def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[Dict[str, Any]]: + """Return a local row iterator over the dataset. + + Examples: + >>> import ray + >>> for i in ray.data.range(1000000).iter_rows(): # doctest: +SKIP + ... print(i) # doctest: +SKIP + + Time complexity: O(1) + + Args: + prefetch_blocks: The number of blocks to prefetch ahead of the + current block during the scan. + + Returns: + A local iterator over the entire dataset. + """ + + return self.iterator().iter_rows(prefetch_blocks=prefetch_blocks) + + @ConsumptionAPI + def iter_batches( + self, + *, + prefetch_batches: int = 1, + batch_size: Optional[int] = 256, + batch_format: Optional[str] = "default", + drop_last: bool = False, + local_shuffle_buffer_size: Optional[int] = None, + local_shuffle_seed: Optional[int] = None, + _collate_fn: Optional[Callable[[DataBatch], Any]] = None, + # Deprecated. + prefetch_blocks: int = 0, + ) -> Iterator[DataBatch]: + """Return a local batched iterator over the dataset. + + Examples: + >>> import ray + >>> for batch in ray.data.range(1000000).iter_batches(): # doctest: +SKIP + ... print(batch) # doctest: +SKIP + + Time complexity: O(1) + + Args: + prefetch_batches: The number of batches to fetch ahead of the current batch + to fetch. If set to greater than 0, a separate threadpool will be used + to fetch the objects to the local node, format the batches, and apply + the collate_fn. Defaults to 1. You can revert back to the old + prefetching behavior that uses `prefetch_blocks` by setting + `use_legacy_iter_batches` to True in the datasetContext. + batch_size: The number of rows in each batch, or None to use entire blocks + as batches (blocks may contain different number of rows). + The final batch may include fewer than ``batch_size`` rows if + ``drop_last`` is ``False``. Defaults to 256. + batch_format: Specify ``"default"`` to use the default block format + (NumPy), ``"pandas"`` to select ``pandas.DataFrame``, "pyarrow" to + select ``pyarrow.Table``, or ``"numpy"`` to select + ``Dict[str, numpy.ndarray]``, or None to return the underlying block + exactly as is with no additional formatting. + drop_last: Whether to drop the last batch if it's incomplete. + local_shuffle_buffer_size: If non-None, the data will be randomly shuffled + using a local in-memory shuffle buffer, and this value will serve as the + minimum number of rows that must be in the local in-memory shuffle + buffer in order to yield a batch. When there are no more rows to add to + the buffer, the remaining rows in the buffer will be drained. + local_shuffle_seed: The seed to use for the local random shuffle. + + Returns: + An iterator over record batches. + """ + batch_format = _apply_strict_mode_batch_format(batch_format) + if batch_format == "native": + logger.warning("The 'native' batch format has been renamed 'default'.") + return self.iterator().iter_batches( + prefetch_batches=prefetch_batches, + prefetch_blocks=prefetch_blocks, + batch_size=batch_size, + batch_format=batch_format, + drop_last=drop_last, + local_shuffle_buffer_size=local_shuffle_buffer_size, + local_shuffle_seed=local_shuffle_seed, + _collate_fn=_collate_fn, + ) + + @ConsumptionAPI + def iter_torch_batches( + self, + *, + prefetch_batches: int = 1, + batch_size: Optional[int] = 256, + dtypes: Optional[Union["torch.dtype", Dict[str, "torch.dtype"]]] = None, + device: Optional[str] = None, + collate_fn: Optional[ + Callable[[Union[np.ndarray, Dict[str, np.ndarray]]], Any] + ] = None, + drop_last: bool = False, + local_shuffle_buffer_size: Optional[int] = None, + local_shuffle_seed: Optional[int] = None, + # Deprecated + prefetch_blocks: int = 0, + ) -> Iterator["TorchTensorBatchType"]: + """Return a local batched iterator of Torch Tensors over the dataset. + + This iterator will yield single-tensor batches if the underlying dataset + consists of a single column; otherwise, it will yield a dictionary of + column-tensors. If looking for more flexibility in the tensor conversion (e.g. + casting dtypes) or the batch format, try use `.iter_batches` directly, which is + a lower-level API. + + Examples: + >>> import ray + >>> for batch in ray.data.range( # doctest: +SKIP + ... 12, + ... ).iter_torch_batches(batch_size=4): + ... print(batch.shape) # doctest: +SKIP + torch.Size([4, 1]) + torch.Size([4, 1]) + torch.Size([4, 1]) + + Time complexity: O(1) + + Args: + prefetch_batches: The number of batches to fetch ahead of the current batch + to fetch. If set to greater than 0, a separate threadpool will be used + to fetch the objects to the local node, format the batches, and apply + the collate_fn. Defaults to 1. You can revert back to the old + prefetching behavior that uses `prefetch_blocks` by setting + `use_legacy_iter_batches` to True in the datasetContext. + batch_size: The number of rows in each batch, or None to use entire blocks + as batches (blocks may contain different number of rows). + The final batch may include fewer than ``batch_size`` rows if + ``drop_last`` is ``False``. Defaults to 256. + dtypes: The Torch dtype(s) for the created tensor(s); if None, the dtype + will be inferred from the tensor data. + device: The device on which the tensor should be placed; if None, the Torch + tensor will be constructed on the CPU. + collate_fn: A function to convert a Numpy batch to a PyTorch tensor batch. + Potential use cases include collating along a dimension other than the + first, padding sequences of various lengths, or generally handling + batches of different length tensors. If not provided, the default + collate function is used which simply converts the batch of numpy + arrays to a batch of PyTorch tensors. This API is still experimental + and is subject to change. + drop_last: Whether to drop the last batch if it's incomplete. + local_shuffle_buffer_size: If non-None, the data will be randomly shuffled + using a local in-memory shuffle buffer, and this value will serve as the + minimum number of rows that must be in the local in-memory shuffle + buffer in order to yield a batch. When there are no more rows to add to + the buffer, the remaining rows in the buffer will be drained. This + buffer size must be greater than or equal to ``batch_size``, and + therefore ``batch_size`` must also be specified when using local + shuffling. + local_shuffle_seed: The seed to use for the local random shuffle. + + Returns: + An iterator over Torch Tensor batches. + """ + return self.iterator().iter_torch_batches( + prefetch_batches=prefetch_batches, + prefetch_blocks=prefetch_blocks, + batch_size=batch_size, + dtypes=dtypes, + device=device, + collate_fn=collate_fn, + drop_last=drop_last, + local_shuffle_buffer_size=local_shuffle_buffer_size, + local_shuffle_seed=local_shuffle_seed, + ) + + @ConsumptionAPI + def iter_tf_batches( + self, + *, + prefetch_batches: int = 1, + batch_size: Optional[int] = 256, + dtypes: Optional[Union["tf.dtypes.DType", Dict[str, "tf.dtypes.DType"]]] = None, + drop_last: bool = False, + local_shuffle_buffer_size: Optional[int] = None, + local_shuffle_seed: Optional[int] = None, + # Deprecated + prefetch_blocks: int = 0, + ) -> Iterator[TensorFlowTensorBatchType]: + """Return a local batched iterator of TensorFlow Tensors over the dataset. + + This iterator will yield single-tensor batches of the underlying dataset + consists of a single column; otherwise, it will yield a dictionary of + column-tensors. + + .. tip:: + If you don't need the additional flexibility provided by this method, + consider using :meth:`~ray.data.Dataset.to_tf` instead. It's easier + to use. + + Examples: + >>> import ray + >>> for batch in ray.data.range( # doctest: +SKIP + ... 12, + ... ).iter_tf_batches(batch_size=4): + ... print(batch.shape) # doctest: +SKIP + (4, 1) + (4, 1) + (4, 1) + + Time complexity: O(1) + + Args: + prefetch_batches: The number of batches to fetch ahead of the current batch + to fetch. If set to greater than 0, a separate threadpool will be used + to fetch the objects to the local node, format the batches, and apply + the collate_fn. Defaults to 1. You can revert back to the old + prefetching behavior that uses `prefetch_blocks` by setting + `use_legacy_iter_batches` to True in the datasetContext. + batch_size: The number of rows in each batch, or None to use entire blocks + as batches (blocks may contain different number of rows). + The final batch may include fewer than ``batch_size`` rows if + ``drop_last`` is ``False``. Defaults to 256. + dtypes: The TensorFlow dtype(s) for the created tensor(s); if None, the + dtype will be inferred from the tensor data. + drop_last: Whether to drop the last batch if it's incomplete. + local_shuffle_buffer_size: If non-None, the data will be randomly shuffled + using a local in-memory shuffle buffer, and this value will serve as the + minimum number of rows that must be in the local in-memory shuffle + buffer in order to yield a batch. When there are no more rows to add to + the buffer, the remaining rows in the buffer will be drained. This + buffer size must be greater than or equal to ``batch_size``, and + therefore ``batch_size`` must also be specified when using local + shuffling. + local_shuffle_seed: The seed to use for the local random shuffle. + + Returns: + An iterator over TensorFlow Tensor batches. + """ + return self.iterator().iter_tf_batches( + prefetch_batches=prefetch_batches, + prefetch_blocks=prefetch_blocks, + batch_size=batch_size, + dtypes=dtypes, + drop_last=drop_last, + local_shuffle_buffer_size=local_shuffle_buffer_size, + local_shuffle_seed=local_shuffle_seed, + ) + + @ConsumptionAPI(pattern="Time complexity:") + def to_torch( + self, + *, + label_column: Optional[str] = None, + feature_columns: Optional[ + Union[List[str], List[List[str]], Dict[str, List[str]]] + ] = None, + label_column_dtype: Optional["torch.dtype"] = None, + feature_column_dtypes: Optional[ + Union["torch.dtype", List["torch.dtype"], Dict[str, "torch.dtype"]] + ] = None, + batch_size: int = 1, + prefetch_batches: int = 1, + drop_last: bool = False, + local_shuffle_buffer_size: Optional[int] = None, + local_shuffle_seed: Optional[int] = None, + unsqueeze_label_tensor: bool = True, + unsqueeze_feature_tensors: bool = True, + # Deprecated + prefetch_blocks: int = 0, + ) -> "torch.utils.data.IterableDataset": + """Return a Torch IterableDataset over this dataset. + + This is only supported for datasets convertible to Arrow records. + + It is recommended to use the returned ``IterableDataset`` directly + instead of passing it into a torch ``DataLoader``. + + Each element in IterableDataset will be a tuple consisting of 2 + elements. The first item contains the feature tensor(s), and the + second item is the label tensor. Those can take on different + forms, depending on the specified arguments. + + For the features tensor (N is the ``batch_size`` and n, m, k + are the number of features per tensor): + + * If ``feature_columns`` is a ``List[str]``, the features will be + a tensor of shape (N, n), with columns corresponding to + ``feature_columns`` + + * If ``feature_columns`` is a ``List[List[str]]``, the features will be + a list of tensors of shape [(N, m),...,(N, k)], with columns of each + tensor corresponding to the elements of ``feature_columns`` + + * If ``feature_columns`` is a ``Dict[str, List[str]]``, the features + will be a dict of key-tensor pairs of shape + {key1: (N, m),..., keyN: (N, k)}, with columns of each + tensor corresponding to the value of ``feature_columns`` under the + key. + + If ``unsqueeze_label_tensor=True`` (default), the label tensor will be + of shape (N, 1). Otherwise, it will be of shape (N,). + If ``label_column`` is specified as ``None``, then no column from the + ``Dataset`` will be treated as the label, and the output label tensor + will be ``None``. + + Note that you probably want to call ``.split()`` on this dataset if + there are to be multiple Torch workers consuming the data. + + Time complexity: O(1) + + Args: + label_column: The name of the column used as the + label (second element of the output list). Can be None for + prediction, in which case the second element of returned + tuple will also be None. + feature_columns: The names of the columns + to use as the features. Can be a list of lists or + a dict of string-list pairs for multi-tensor output. + If None, then use all columns except the label column as + the features. + label_column_dtype: The torch dtype to + use for the label column. If None, then automatically infer + the dtype. + feature_column_dtypes: The dtypes to use for the feature + tensors. This should match the format of ``feature_columns``, + or be a single dtype, in which case it will be applied to + all tensors. If None, then automatically infer the dtype. + batch_size: How many samples per batch to yield at a time. + Defaults to 1. + prefetch_batches: The number of batches to fetch ahead of the current batch + to fetch. If set to greater than 0, a separate threadpool will be used + to fetch the objects to the local node, format the batches, and apply + the collate_fn. Defaults to 1. You can revert back to the old + prefetching behavior that uses `prefetch_blocks` by setting + `use_legacy_iter_batches` to True in the datasetContext. + drop_last: Set to True to drop the last incomplete batch, + if the dataset size is not divisible by the batch size. If + False and the size of the stream is not divisible by the batch + size, then the last batch will be smaller. Defaults to False. + local_shuffle_buffer_size: If non-None, the data will be randomly shuffled + using a local in-memory shuffle buffer, and this value will serve as the + minimum number of rows that must be in the local in-memory shuffle + buffer in order to yield a batch. When there are no more rows to add to + the buffer, the remaining rows in the buffer will be drained. This + buffer size must be greater than or equal to ``batch_size``, and + therefore ``batch_size`` must also be specified when using local + shuffling. + local_shuffle_seed: The seed to use for the local random shuffle. + unsqueeze_label_tensor: If set to True, the label tensor + will be unsqueezed (reshaped to (N, 1)). Otherwise, it will + be left as is, that is (N, ). In general, regression loss + functions expect an unsqueezed tensor, while classification + loss functions expect a squeezed one. Defaults to True. + unsqueeze_feature_tensors: If set to True, the features tensors + will be unsqueezed (reshaped to (N, 1)) before being concatenated into + the final features tensor. Otherwise, they will be left as is, that is + (N, ). Defaults to True. + + Returns: + A torch IterableDataset. + """ + + return self.iterator().to_torch( + label_column=label_column, + feature_columns=feature_columns, + label_column_dtype=label_column_dtype, + feature_column_dtypes=feature_column_dtypes, + batch_size=batch_size, + prefetch_blocks=prefetch_blocks, + prefetch_batches=prefetch_batches, + drop_last=drop_last, + local_shuffle_buffer_size=local_shuffle_buffer_size, + local_shuffle_seed=local_shuffle_seed, + unsqueeze_label_tensor=unsqueeze_label_tensor, + unsqueeze_feature_tensors=unsqueeze_feature_tensors, + ) + + @ConsumptionAPI + def to_tf( + self, + feature_columns: Union[str, List[str]], + label_columns: Union[str, List[str]], + *, + prefetch_batches: int = 1, + batch_size: int = 1, + drop_last: bool = False, + local_shuffle_buffer_size: Optional[int] = None, + local_shuffle_seed: Optional[int] = None, + # Deprecated + prefetch_blocks: int = 0, + ) -> "tf.data.Dataset": + """Return a TF Dataset over this dataset. + + .. warning:: + If your dataset contains ragged tensors, this method errors. To prevent + errors, resize tensors or + :ref:`disable tensor extension casting `. + + Examples: + >>> import ray + >>> ds = ray.data.read_csv("s3://anonymous@air-example-data/iris.csv") + >>> ds + Dataset( + num_blocks=1, + num_rows=150, + schema={ + sepal length (cm): double, + sepal width (cm): double, + petal length (cm): double, + petal width (cm): double, + target: int64 + } + ) + + If your model accepts a single tensor as input, specify a single feature column. + + >>> ds.to_tf(feature_columns="sepal length (cm)", label_columns="target") # doctest: +SKIP + <_OptionsDataset element_spec=(TensorSpec(shape=(None,), dtype=tf.float64, name='sepal length (cm)'), TensorSpec(shape=(None,), dtype=tf.int64, name='target'))> + + If your model accepts a dictionary as input, specify a list of feature columns. + + >>> ds.to_tf(["sepal length (cm)", "sepal width (cm)"], "target") # doctest: +SKIP + <_OptionsDataset element_spec=({'sepal length (cm)': TensorSpec(shape=(None,), dtype=tf.float64, name='sepal length (cm)'), 'sepal width (cm)': TensorSpec(shape=(None,), dtype=tf.float64, name='sepal width (cm)')}, TensorSpec(shape=(None,), dtype=tf.int64, name='target'))> + + If your dataset contains multiple features but your model accepts a single + tensor as input, combine features with + :class:`~ray.data.preprocessors.Concatenator`. + + >>> from ray.data.preprocessors import Concatenator + >>> preprocessor = Concatenator(output_column_name="features", exclude="target") + >>> ds = preprocessor.transform(ds) + >>> ds + Concatenator + +- Dataset( + num_blocks=1, + num_rows=150, + schema={ + sepal length (cm): double, + sepal width (cm): double, + petal length (cm): double, + petal width (cm): double, + target: int64 + } + ) + >>> ds.to_tf("features", "target") # doctest: +SKIP + <_OptionsDataset element_spec=(TensorSpec(shape=(None, 4), dtype=tf.float64, name='features'), TensorSpec(shape=(None,), dtype=tf.int64, name='target'))> + + Args: + feature_columns: Columns that correspond to model inputs. If this is a + string, the input data is a tensor. If this is a list, the input data + is a ``dict`` that maps column names to their tensor representation. + label_column: Columns that correspond to model targets. If this is a + string, the target data is a tensor. If this is a list, the target data + is a ``dict`` that maps column names to their tensor representation. + prefetch_batches: The number of batches to fetch ahead of the current batch + to fetch. If set to greater than 0, a separate threadpool will be used + to fetch the objects to the local node, format the batches, and apply + the collate_fn. Defaults to 1. You can revert back to the old + prefetching behavior that uses `prefetch_blocks` by setting + `use_legacy_iter_batches` to True in the datasetContext. + batch_size: Record batch size. Defaults to 1. + drop_last: Set to True to drop the last incomplete batch, + if the dataset size is not divisible by the batch size. If + False and the size of the stream is not divisible by the batch + size, then the last batch will be smaller. Defaults to False. + local_shuffle_buffer_size: If non-None, the data will be randomly shuffled + using a local in-memory shuffle buffer, and this value will serve as the + minimum number of rows that must be in the local in-memory shuffle + buffer in order to yield a batch. When there are no more rows to add to + the buffer, the remaining rows in the buffer will be drained. This + buffer size must be greater than or equal to ``batch_size``, and + therefore ``batch_size`` must also be specified when using local + shuffling. + local_shuffle_seed: The seed to use for the local random shuffle. + + Returns: + A ``tf.data.Dataset`` that yields inputs and targets. + + .. seealso:: + + :meth:`~ray.data.Dataset.iter_tf_batches` + Call this method if you need more flexibility. + + """ # noqa: E501 + + return self.iterator().to_tf( + feature_columns=feature_columns, + label_columns=label_columns, + prefetch_batches=prefetch_batches, + prefetch_blocks=prefetch_blocks, + drop_last=drop_last, + batch_size=batch_size, + local_shuffle_buffer_size=local_shuffle_buffer_size, + local_shuffle_seed=local_shuffle_seed, + ) + + @ConsumptionAPI(pattern="Time complexity:") + def to_dask( + self, + meta: Union[ + "pandas.DataFrame", + "pandas.Series", + Dict[str, Any], + Iterable[Any], + Tuple[Any], + None, + ] = None, + ) -> "dask.DataFrame": + """Convert this dataset into a Dask DataFrame. + + This is only supported for datasets convertible to Arrow records. + + Note that this function will set the Dask scheduler to Dask-on-Ray + globally, via the config. + + Time complexity: O(dataset size / parallelism) + + Args: + meta: An empty pandas DataFrame or Series that matches the dtypes and column + names of the stream. This metadata is necessary for many algorithms in + dask dataframe to work. For ease of use, some alternative inputs are + also available. Instead of a DataFrame, a dict of ``{name: dtype}`` or + iterable of ``(name, dtype)`` can be provided (note that the order of + the names should match the order of the columns). Instead of a series, a + tuple of ``(name, dtype)`` can be used. + By default, this will be inferred from the underlying Dataset schema, + with this argument supplying an optional override. + + Returns: + A Dask DataFrame created from this dataset. + """ + import dask + import dask.dataframe as dd + import pandas as pd + + try: + import pyarrow as pa + except Exception: + pa = None + + from ray.data._internal.pandas_block import PandasBlockSchema + from ray.util.client.common import ClientObjectRef + from ray.util.dask import ray_dask_get + + dask.config.set(scheduler=ray_dask_get) + + @dask.delayed + def block_to_df(block: Block): + if isinstance(block, (ray.ObjectRef, ClientObjectRef)): + raise ValueError( + "Dataset.to_dask() must be used with Dask-on-Ray, please " + "set the Dask scheduler to ray_dask_get (located in " + "ray.util.dask)." + ) + return _block_to_df(block) + + if meta is None: + from ray.data.extensions import TensorDtype + + # Infer Dask metadata from Dataset schema. + schema = self.schema(fetch_if_missing=True) + if isinstance(schema, PandasBlockSchema): + meta = pd.DataFrame( + { + col: pd.Series( + dtype=( + dtype + if not isinstance(dtype, TensorDtype) + else np.object_ + ) + ) + for col, dtype in zip(schema.names, schema.types) + } + ) + elif pa is not None and isinstance(schema, pa.Schema): + from ray.data.extensions import ArrowTensorType + + if any(isinstance(type_, ArrowTensorType) for type_ in schema.types): + meta = pd.DataFrame( + { + col: pd.Series( + dtype=( + dtype.to_pandas_dtype() + if not isinstance(dtype, ArrowTensorType) + else np.object_ + ) + ) + for col, dtype in zip(schema.names, schema.types) + } + ) + else: + meta = schema.empty_table().to_pandas() + + ddf = dd.from_delayed( + [block_to_df(block) for block in self.get_internal_block_refs()], + meta=meta, + ) + return ddf + + @ConsumptionAPI(pattern="Time complexity:") + def to_mars(self) -> "mars.DataFrame": + """Convert this dataset into a MARS dataframe. + + Time complexity: O(dataset size / parallelism) + + Returns: + A MARS dataframe created from this dataset. + """ + import pandas as pd + import pyarrow as pa + from mars.dataframe.datasource.read_raydataset import DataFrameReadRayDataset + from mars.dataframe.utils import parse_index + + from ray.data._internal.pandas_block import PandasBlockSchema + + refs = self.to_pandas_refs() + # remove this when https://github.com/mars-project/mars/issues/2945 got fixed + schema = self.schema() + if isinstance(schema, Schema): + schema = schema.base_schema # Backwards compat with non strict mode. + if isinstance(schema, PandasBlockSchema): + dtypes = pd.Series(schema.types, index=schema.names) + elif isinstance(schema, pa.Schema): + dtypes = schema.empty_table().to_pandas().dtypes + else: + raise NotImplementedError(f"Unsupported format of schema {schema}") + index_value = parse_index(pd.RangeIndex(-1)) + columns_value = parse_index(dtypes.index, store_data=True) + op = DataFrameReadRayDataset(refs=refs) + return op(index_value=index_value, columns_value=columns_value, dtypes=dtypes) + + @ConsumptionAPI(pattern="Time complexity:") + def to_modin(self) -> "modin.DataFrame": + """Convert this dataset into a Modin dataframe. + + This works by first converting this dataset into a distributed set of + Pandas dataframes (using ``.to_pandas_refs()``). Please see caveats + there. Then the individual dataframes are used to create the modin + DataFrame using + ``modin.distributed.dataframe.pandas.partitions.from_partitions()``. + + This is only supported for datasets convertible to Arrow records. + This function induces a copy of the data. For zero-copy access to the + underlying data, consider using ``.to_arrow()`` or + ``.get_internal_block_refs()``. + + Time complexity: O(dataset size / parallelism) + + Returns: + A Modin dataframe created from this dataset. + """ + + from modin.distributed.dataframe.pandas.partitions import from_partitions + + pd_objs = self.to_pandas_refs() + return from_partitions(pd_objs, axis=0) + + @ConsumptionAPI(pattern="Time complexity:") + def to_spark(self, spark: "pyspark.sql.SparkSession") -> "pyspark.sql.DataFrame": + """Convert this dataset into a Spark dataframe. + + Time complexity: O(dataset size / parallelism) + + Returns: + A Spark dataframe created from this dataset. + """ + import raydp + + schema = self.schema() + if isinstance(schema, Schema): + schema = schema.base_schema # Backwards compat with non strict mode. + return raydp.spark.ray_dataset_to_spark_dataframe( + spark, schema, self.get_internal_block_refs() + ) + + @ConsumptionAPI(pattern="Time complexity:") + def to_pandas(self, limit: int = 100000) -> "pandas.DataFrame": + """Convert this dataset into a single Pandas DataFrame. + + This is only supported for datasets convertible to Arrow or Pandas + records. An error is raised if the number of records exceeds the + provided limit. Note that you can use ``.limit()`` on the dataset + beforehand to truncate the dataset manually. + + Time complexity: O(dataset size) + + Args: + limit: The maximum number of records to return. An error will be + raised if the limit is exceeded. + + Returns: + A Pandas DataFrame created from this dataset, containing a limited + number of records. + """ + count = self.count() + if count > limit: + raise ValueError( + f"the dataset has more than the given limit of {limit} " + f"records: {count}. If you are sure that a DataFrame with " + f"{count} rows will fit in local memory, use " + f"ds.to_pandas(limit={count})." + ) + blocks = self.get_internal_block_refs() + output = DelegatingBlockBuilder() + for block in blocks: + output.add_block(ray.get(block)) + block = output.build() + return _block_to_df(block) + + @ConsumptionAPI(pattern="Time complexity:") + @DeveloperAPI + def to_pandas_refs(self) -> List[ObjectRef["pandas.DataFrame"]]: + """Convert this dataset into a distributed set of Pandas dataframes. + + This is only supported for datasets convertible to Arrow records. + This function induces a copy of the data. For zero-copy access to the + underlying data, consider using ``.to_arrow()`` or + ``.get_internal_block_refs()``. + + Time complexity: O(dataset size / parallelism) + + Returns: + A list of remote Pandas dataframes created from this dataset. + """ + + block_to_df = cached_remote_fn(_block_to_df) + return [block_to_df.remote(block) for block in self.get_internal_block_refs()] + + @DeveloperAPI + def to_numpy_refs( + self, *, column: Optional[str] = None + ) -> List[ObjectRef[np.ndarray]]: + """Convert this dataset into a distributed set of NumPy ndarrays. + + This is only supported for datasets convertible to NumPy ndarrays. + This function induces a copy of the data. For zero-copy access to the + underlying data, consider using ``.to_arrow()`` or + ``.get_internal_block_refs()``. + + Time complexity: O(dataset size / parallelism) + + Args: + column: The name of the column to convert to numpy, or None to specify the + entire row. If not specified for Arrow or Pandas blocks, each returned + future will represent a dict of column ndarrays. + + Returns: + A list of remote NumPy ndarrays created from this dataset. + """ + block_to_ndarray = cached_remote_fn(_block_to_ndarray) + return [ + block_to_ndarray.remote(block, column=column) + for block in self.get_internal_block_refs() + ] + + @ConsumptionAPI(pattern="Time complexity:") + @DeveloperAPI + def to_arrow_refs(self) -> List[ObjectRef["pyarrow.Table"]]: + """Convert this dataset into a distributed set of Arrow tables. + + This is only supported for datasets convertible to Arrow records. + This function is zero-copy if the existing data is already in Arrow + format. Otherwise, the data will be converted to Arrow format. + + Time complexity: O(1) unless conversion is required. + + Returns: + A list of remote Arrow tables created from this dataset. + """ + import pyarrow as pa + + blocks: List[ObjectRef["pyarrow.Table"]] = self.get_internal_block_refs() + # Schema is safe to call since we have already triggered execution with + # get_internal_block_refs. + schema = self.schema(fetch_if_missing=True) + if isinstance(schema, Schema): + schema = schema.base_schema # Backwards compat with non strict mode. + if isinstance(schema, pa.Schema): + # Zero-copy path. + return blocks + + block_to_arrow = cached_remote_fn(_block_to_arrow) + return [block_to_arrow.remote(block) for block in blocks] + + @ConsumptionAPI(pattern="Args:") + def to_random_access_dataset( + self, + key: str, + num_workers: Optional[int] = None, + ) -> RandomAccessDataset: + """Convert this dataset into a distributed RandomAccessDataset (EXPERIMENTAL). + + RandomAccessDataset partitions the dataset across the cluster by the given + sort key, providing efficient random access to records via binary search. A + number of worker actors are created, each of which has zero-copy access to the + underlying sorted data blocks of the dataset. + + Note that the key must be unique in the dataset. If there are duplicate keys, + an arbitrary value is returned. + + This is only supported for Arrow-format datasets. + + Args: + key: The key column over which records can be queried. + num_workers: The number of actors to use to serve random access queries. + By default, this is determined by multiplying the number of Ray nodes + in the cluster by four. As a rule of thumb, you can expect each worker + to provide ~3000 records / second via ``get_async()``, and + ~10000 records / second via ``multiget()``. + """ + if num_workers is None: + num_workers = 4 * len(ray.nodes()) + return RandomAccessDataset(self, key, num_workers=num_workers) + + @ConsumptionAPI + def repeat(self, times: Optional[int] = None) -> "DatasetPipeline": + """Convert this into a DatasetPipeline by looping over this dataset. + + Transformations prior to the call to ``repeat()`` are evaluated once. + Transformations done on the returned pipeline are evaluated on each + loop of the pipeline over the base dataset. + + Note that every repeat of the dataset is considered an "epoch" for + the purposes of ``DatasetPipeline.iter_epochs()``. + + Examples: + >>> import ray + >>> ds = ray.data.range(5, parallelism=1) + >>> # Infinite pipeline of numbers [0, 5) + >>> ds.repeat().take_batch() + {'id': array([0, 1, 2, 3, 4, 0, 1, 2, 3, 4, ...])} + >>> # Can shuffle each epoch (dataset) in the pipeline. + >>> ds.repeat().random_shuffle().take_batch() # doctest: +SKIP + {'id': array([2, 3, 0, 4, 1, 4, 0, 2, 1, 3, ...])} + + Args: + times: The number of times to loop over this dataset, or None + to repeat indefinitely. + """ + from ray.data._internal.plan import _rewrite_read_stage + from ray.data.dataset_pipeline import DatasetPipeline + + ctx = DataContext.get_current() + if self._plan.is_read_stage_equivalent() and ctx.optimize_fuse_read_stages: + blocks, _, stages = self._plan._get_source_blocks_and_stages() + blocks.clear() + blocks, outer_stats, stages = _rewrite_read_stage(blocks, stages) + read_stage = stages[0] + else: + blocks = self._plan.execute() + outer_stats = self._plan.stats() + read_stage = None + uuid = self._get_uuid() + outer_stats.dataset_uuid = uuid + + if times is not None and times < 1: + raise ValueError("`times` must be >= 1, got {}".format(times)) + + class Iterator: + def __init__(self, blocks): + self._blocks = blocks + self._i = 0 + + def __next__(self) -> Callable[[], "Dataset"]: + if times and self._i >= times: + raise StopIteration + epoch = self._i + blocks = self._blocks + self._i += 1 + + def gen(): + ds = Dataset( + ExecutionPlan( + blocks, + outer_stats, + dataset_uuid=uuid, + run_by_consumer=True, + ), + epoch, + lazy=False, + ) + ds._set_uuid(uuid) + return ds + + return gen + + class Iterable: + def __init__(self, blocks): + self._blocks = blocks + + def __iter__(self): + return Iterator(self._blocks) + + pipe = DatasetPipeline(Iterable(blocks), False, length=times or float("inf")) + if read_stage: + pipe = pipe.foreach_window( + lambda ds, read_stage=read_stage: Dataset( + ds._plan.with_stage(read_stage), ds._epoch, True + ) + ) + return pipe + + def window( + self, + *, + blocks_per_window: Optional[int] = None, + bytes_per_window: Optional[int] = None, + ) -> "DatasetPipeline": + """Convert this into a DatasetPipeline by windowing over data blocks. + + Transformations prior to the call to ``window()`` are evaluated in + bulk on the entire dataset. Transformations done on the returned + pipeline are evaluated incrementally per window of blocks as data is + read from the output of the pipeline. + + Windowing execution allows for output to be read sooner without + waiting for all transformations to fully execute, and can also improve + efficiency if transforms use different resources (e.g., GPUs). + + Without windowing:: + + [preprocessing......] + [inference.......] + [write........] + Time -----------------------------------------------------------> + + With windowing:: + + [prep1] [prep2] [prep3] + [infer1] [infer2] [infer3] + [write1] [write2] [write3] + Time -----------------------------------------------------------> + + Examples: + >>> import ray + >>> # Create an inference pipeline. + >>> ds = ray.data.read_binary_files(dir) # doctest: +SKIP + >>> infer = ... # doctest: +SKIP + >>> pipe = ds.window(blocks_per_window=10).map(infer) # doctest: +SKIP + DatasetPipeline(num_windows=40, num_stages=2) + >>> # The higher the stage parallelism, the shorter the pipeline. + >>> pipe = ds.window(blocks_per_window=20).map(infer) # doctest: +SKIP + DatasetPipeline(num_windows=20, num_stages=2) + >>> # Outputs can be incrementally read from the pipeline. + >>> for item in pipe.iter_rows(): # doctest: +SKIP + ... print(item) # doctest: +SKIP + + Args: + blocks_per_window: The window size (parallelism) in blocks. + Increasing window size increases pipeline throughput, but also + increases the latency to initial output, since it decreases the + length of the pipeline. Setting this to infinity effectively + disables pipelining. + bytes_per_window: Specify the window size in bytes instead of blocks. + This will be treated as an upper bound for the window size, but each + window will still include at least one block. This is mutually + exclusive with ``blocks_per_window``. + """ + from ray.data._internal.plan import _rewrite_read_stage + from ray.data.dataset_pipeline import DatasetPipeline + + if blocks_per_window is not None and bytes_per_window is not None: + raise ValueError("Only one windowing scheme can be specified.") + + if blocks_per_window is None: + blocks_per_window = 10 + + ctx = DataContext.get_current() + if self._plan.is_read_stage_equivalent() and ctx.optimize_fuse_read_stages: + blocks, _, stages = self._plan._get_source_blocks_and_stages() + blocks.clear() + blocks, outer_stats, stages = _rewrite_read_stage(blocks, stages) + read_stage = stages[0] + else: + blocks = self._plan.execute() + outer_stats = self._plan.stats() + read_stage = None + + class Iterator: + def __init__(self, splits, epoch): + self._splits = splits.copy() + self._epoch = epoch + + def __next__(self) -> "Dataset": + if not self._splits: + raise StopIteration + + blocks = self._splits.pop(0) + + def gen(): + ds = Dataset( + ExecutionPlan(blocks, outer_stats, run_by_consumer=True), + self._epoch, + lazy=True, + ) + return ds + + return gen + + class Iterable: + def __init__(self, blocks, epoch): + if bytes_per_window: + self._splits = blocks.split_by_bytes(bytes_per_window) + else: + self._splits = blocks.split(split_size=blocks_per_window) + try: + sizes = [s.size_bytes() for s in self._splits] + num_blocks = [s.initial_num_blocks() for s in self._splits] + assert [s > 0 for s in sizes], sizes + + def fmt(size_bytes): + if size_bytes > 1024 * 1024 * 1024: + return "{}GiB".format( + round(size_bytes / (1024 * 1024 * 1024), 2) + ) + elif size_bytes > 10 * 1024: + return "{}MiB".format(round(size_bytes / (1024 * 1024), 2)) + else: + return "{}b".format(size_bytes) + + mean_bytes = int(np.mean(sizes)) + logger.info( + "Created DatasetPipeline with {} windows: " + "{} min, {} max, {} mean".format( + len(self._splits), + fmt(min(sizes)), + fmt(max(sizes)), + fmt(mean_bytes), + ) + ) + mean_num_blocks = int(np.mean(num_blocks)) + logger.info( + "Blocks per window: " + "{} min, {} max, {} mean".format( + min(num_blocks), + max(num_blocks), + mean_num_blocks, + ) + ) + # TODO(ekl) we should try automatically choosing the default + # windowing settings to meet these best-practice constraints. + avail_parallelism = _estimate_available_parallelism() + if mean_num_blocks < avail_parallelism: + logger.warning( + f"{WARN_PREFIX} This pipeline's parallelism is limited " + f"by its blocks per window to ~{mean_num_blocks} " + "concurrent tasks per window. To maximize " + "performance, increase the blocks per window to at least " + f"{avail_parallelism}. This may require increasing the " + "base dataset's parallelism and/or adjusting the " + "windowing parameters." + ) + else: + logger.info( + f"{OK_PREFIX} This pipeline's per-window parallelism " + "is high enough to fully utilize the cluster." + ) + obj_store_mem = ray.cluster_resources().get( + "object_store_memory", 0 + ) + safe_mem_bytes = int(obj_store_mem * ESTIMATED_SAFE_MEMORY_FRACTION) + if mean_bytes > safe_mem_bytes: + logger.warning( + f"{WARN_PREFIX} This pipeline's windows are " + f"~{fmt(mean_bytes)} in size each and may not fit in " + "object store memory without spilling. To improve " + "performance, consider reducing the size of each window " + f"to {fmt(safe_mem_bytes)} or less." + ) + else: + logger.info( + f"{OK_PREFIX} This pipeline's windows likely fit in " + "object store memory without spilling." + ) + except Exception as e: + logger.info( + "Created DatasetPipeline with {} windows; " + "error getting sizes: {}".format( + len(self._splits), + e, + ) + ) + self._epoch = epoch + + def __iter__(self): + return Iterator(self._splits, self._epoch) + + it = Iterable(blocks, self._epoch) + pipe = DatasetPipeline(it, False, length=len(it._splits)) + if read_stage: + pipe = pipe.foreach_window( + lambda ds, read_stage=read_stage: Dataset( + ds._plan.with_stage(read_stage), ds._epoch, True + ) + ) + return pipe + + @Deprecated(message="Use `Dataset.materialize()` instead.") + def fully_executed(self) -> "MaterializedDataset": + logger.warning( + "Deprecation warning: use Dataset.materialize() instead of " + "fully_executed()." + ) + self._plan.execute(force_read=True) + return self + + @Deprecated(message="Check `isinstance(Dataset, MaterializedDataset)` instead.") + def is_fully_executed(self) -> bool: + logger.warning( + "Deprecation warning: Check " + "`isinstance(Dataset, MaterializedDataset)` " + "instead of using is_fully_executed()." + ) + return self._plan.has_computed_output() + + @ConsumptionAPI(pattern="store memory.", insert_after=True) + def materialize(self) -> "MaterializedDataset": + """Execute and materialize this dataset into object store memory. + + This can be used to read all blocks into memory. By default, Dataset + doesn't read blocks from the datasource until the first transform. + + Note that this does not mutate the original Dataset. Only the blocks of the + returned MaterializedDataset class are pinned in memory. + + Returns: + A MaterializedDataset holding the materialized data blocks. + """ + copy = Dataset.copy(self, _deep_copy=True, _as=MaterializedDataset) + copy._plan.execute(force_read=True) + return copy + + @ConsumptionAPI(pattern="timing information.", insert_after=True) + def stats(self) -> str: + """Returns a string containing execution timing information. + + Note that this does not trigger execution, so if the dataset has not yet + executed, an empty string will be returned. + """ + return self._get_stats_summary().to_string() + + def _get_stats_summary(self) -> DatasetStatsSummary: + return self._plan.stats_summary() + + @ConsumptionAPI(pattern="Time complexity:") + @DeveloperAPI + def get_internal_block_refs(self) -> List[ObjectRef[Block]]: + """Get a list of references to the underlying blocks of this dataset. + + This function can be used for zero-copy access to the data. It blocks + until the underlying blocks are computed. + + Time complexity: O(1) + + Returns: + A list of references to this dataset's blocks. + """ + blocks = self._plan.execute().get_blocks() + self._synchronize_progress_bar() + return blocks + + @Deprecated( + message="Dataset is lazy by default, so this conversion call is no longer " + "needed and this API will be removed in a future release" + ) + def lazy(self) -> "Dataset": + """Enable lazy evaluation. + + Dataset is lazy by default, so this is only useful for datasets created + from :func:`ray.data.from_items() `, which is + eager. + + The returned dataset is a lazy dataset, where all subsequent operations + on the stream won't be executed until the dataset is consumed + (e.g. ``.take()``, ``.iter_batches()``, ``.to_torch()``, ``.to_tf()``, etc.) + or execution is manually triggered via ``.materialize()``. + """ + ds = Dataset( + self._plan, self._epoch, lazy=True, logical_plan=self._logical_plan + ) + ds._set_uuid(self._get_uuid()) + return ds + + def has_serializable_lineage(self) -> bool: + """Whether this dataset's lineage is able to be serialized for storage and + later deserialized, possibly on a different cluster. + + Only datasets that are created from data that we know will still exist at + deserialization time, e.g. data external to this Ray cluster such as persistent + cloud object stores, support lineage-based serialization. All of the + ray.data.read_*() APIs support lineage-based serialization. + """ + return self._plan.has_lazy_input() + + @DeveloperAPI + def serialize_lineage(self) -> bytes: + """ + Serialize this dataset's lineage, not the actual data or the existing data + futures, to bytes that can be stored and later deserialized, possibly on a + different cluster. + + Note that this will drop all computed data, and that everything will be + recomputed from scratch after deserialization. + + Use :py:meth:`Dataset.deserialize_lineage` to deserialize the serialized + bytes returned from this method into a Dataset. + + .. note:: + Unioned and zipped datasets, produced by :py:meth`Dataset.union` and + :py:meth:`Dataset.zip`, are not lineage-serializable. + + Returns: + Serialized bytes containing the lineage of this dataset. + """ + if not self.has_serializable_lineage(): + raise ValueError( + "Lineage-based serialization is not supported for this stream, which " + "means that it cannot be used as a tunable hyperparameter. " + "Lineage-based serialization is explicitly NOT supported for unioned " + "or zipped datasets (see docstrings for those methods), and is only " + "supported for datasets created from data that we know will still " + "exist at deserialization time, e.g. external data in persistent cloud " + "object stores or in-memory data from long-lived clusters. Concretely, " + "all ray.data.read_*() APIs should support lineage-based " + "serialization, while all of the ray.data.from_*() APIs do not. To " + "allow this stream to be serialized to storage, write the data to an " + "external store (such as AWS S3, GCS, or Azure Blob Storage) using the " + "Dataset.write_*() APIs, and serialize a new dataset reading " + "from the external store using the ray.data.read_*() APIs." + ) + # Copy Dataset and clear the blocks from the execution plan so only the + # Dataset's lineage is serialized. + plan_copy = self._plan.deep_copy(preserve_uuid=True) + ds = Dataset(plan_copy, self._get_epoch(), self._lazy) + ds._plan.clear_block_refs() + ds._set_uuid(self._get_uuid()) + + def _reduce_remote_fn(rf: ray.remote_function.RemoteFunction): + # Custom reducer for Ray remote function handles that allows for + # cross-cluster serialization. + # This manually unsets the last export session and job to force re-exporting + # of the function when the handle is deserialized on a new cluster. + # TODO(Clark): Fix this in core Ray, see issue: + # https://github.com/ray-project/ray/issues/24152. + reconstructor, args, state = rf.__reduce__() + state["_last_export_session_and_job"] = None + return reconstructor, args, state + + context = ray._private.worker.global_worker.get_serialization_context() + try: + context._register_cloudpickle_reducer( + ray.remote_function.RemoteFunction, _reduce_remote_fn + ) + serialized = pickle.dumps(ds) + finally: + context._unregister_cloudpickle_reducer(ray.remote_function.RemoteFunction) + return serialized + + @staticmethod + @DeveloperAPI + def deserialize_lineage(serialized_ds: bytes) -> "Dataset": + """ + Deserialize the provided lineage-serialized Dataset. + + This assumes that the provided serialized bytes were serialized using + :py:meth:`Dataset.serialize_lineage`. + + Args: + serialized_ds: The serialized Dataset that we wish to deserialize. + + Returns: + A deserialized ``Dataset`` instance. + """ + return pickle.loads(serialized_ds) + + def _divide(self, block_idx: int) -> ("Dataset", "Dataset"): + block_list = self._plan.execute() + left, right = block_list.divide(block_idx) + l_ds = Dataset( + ExecutionPlan( + left, self._plan.stats(), run_by_consumer=block_list._owned_by_consumer + ), + self._epoch, + self._lazy, + ) + r_ds = Dataset( + ExecutionPlan( + right, self._plan.stats(), run_by_consumer=block_list._owned_by_consumer + ), + self._epoch, + self._lazy, + ) + return l_ds, r_ds + + @Deprecated(message="The batch format is no longer exposed as a public API.") + def default_batch_format(self) -> Type: + context = DataContext.get_current() + if context.strict_mode: + raise StrictModeError("default_batch_format() is not allowed in Ray 2.5") + + import pandas as pd + import pyarrow as pa + + schema = self.schema() + assert isinstance(schema, (type, PandasBlockSchema, pa.Schema)) + + if isinstance(schema, type): + return list + + if isinstance(schema, (PandasBlockSchema, pa.Schema)): + if schema.names == [TENSOR_COLUMN_NAME]: + return np.ndarray + return pd.DataFrame + + @Deprecated(message="The dataset format is no longer exposed as a public API.") + def dataset_format(self) -> BlockFormat: + context = DataContext.get_current() + if context.strict_mode: + raise StrictModeError("dataset_format() is not allowed in Ray 2.5") + + if context.use_streaming_executor: + raise DeprecationWarning( + "`dataset_format` is deprecated for streaming execution. To use " + "`dataset_format`, you must explicitly enable bulk execution by " + "setting `use_streaming_executor` to False in the `DataContext`" + ) + + # We need schema to properly validate, so synchronously + # fetch it if necessary. + schema = self.schema(fetch_if_missing=True) + if schema is None: + raise ValueError( + "Dataset is empty or cleared, can't determine the format of " + "the dataset." + ) + + try: + import pyarrow as pa + + if isinstance(schema, pa.Schema): + return BlockFormat.ARROW + except ModuleNotFoundError: + pass + from ray.data._internal.pandas_block import PandasBlockSchema + + if isinstance(schema, PandasBlockSchema): + return BlockFormat.PANDAS + return BlockFormat.SIMPLE + + def _aggregate_on( + self, agg_cls: type, on: Optional[Union[str, List[str]]], *args, **kwargs + ): + """Helper for aggregating on a particular subset of the dataset. + + This validates the `on` argument, and converts a list of column names + or lambdas to a multi-aggregation. A null `on` results in a + multi-aggregation on all columns for an Arrow Dataset, and a single + aggregation on the entire row for a simple Dataset. + """ + aggs = self._build_multicolumn_aggs(agg_cls, on, *args, **kwargs) + return self.aggregate(*aggs) + + def _build_multicolumn_aggs( + self, + agg_cls: type, + on: Optional[Union[str, List[str]]], + ignore_nulls: bool, + *args, + skip_cols: Optional[List[str]] = None, + **kwargs, + ): + """Build set of aggregations for applying a single aggregation to + multiple columns. + """ + # Expand None into an aggregation for each column. + if on is None: + schema = self.schema(fetch_if_missing=True) + if schema is not None and not isinstance(schema, type): + if not skip_cols: + skip_cols = [] + if len(schema.names) > 0: + on = [col for col in schema.names if col not in skip_cols] + + if not isinstance(on, list): + on = [on] + return [agg_cls(on_, *args, ignore_nulls=ignore_nulls, **kwargs) for on_ in on] + + def _aggregate_result(self, result: Union[Tuple, Mapping]) -> U: + if result is not None and len(result) == 1: + if isinstance(result, tuple): + return result[0] + else: + # NOTE (kfstorm): We cannot call `result[0]` directly on + # `PandasRow` because indexing a column with position is not + # supported by pandas. + return list(result.values())[0] + else: + return result + + @ensure_notebook_deps( + ["ipywidgets", "8"], + ) + @fallback_if_colab + def _ipython_display_(self): + from ipywidgets import HTML, VBox, Layout + from IPython.display import display + + title = HTML(f"

    {self.__class__.__name__}

    ") + tab = self._tab_repr_() + + if tab: + display(VBox([title, tab], layout=Layout(width="100%"))) + + @ensure_notebook_deps( + ["tabulate", None], + ["ipywidgets", "8"], + ) + def _tab_repr_(self): + from ray._private.thirdparty.tabulate.tabulate import tabulate + from ipywidgets import Tab, HTML + + metadata = { + "num_blocks": self._plan.initial_num_blocks(), + "num_rows": self._meta_count(), + } + # Show metadata if available, but don't trigger execution. + schema = self.schema(fetch_if_missing=False) + if schema is None: + schema_repr = Template("rendered_html_common.html.j2").render( + content="
    Unknown schema
    " + ) + elif isinstance(schema, type): + schema_repr = Template("rendered_html_common.html.j2").render( + content=f"
    Data type: {html.escape(str(schema))}
    " + ) + else: + schema_data = {} + for sname, stype in zip(schema.names, schema.types): + schema_data[sname] = getattr(stype, "__name__", str(stype)) + + schema_repr = Template("scrollableTable.html.j2").render( + table=tabulate( + tabular_data=schema_data.items(), + tablefmt="html", + showindex=False, + headers=["Name", "Type"], + ), + max_height="300px", + ) + + children = [] + children.append( + HTML( + Template("scrollableTable.html.j2").render( + table=tabulate( + tabular_data=metadata.items(), + tablefmt="html", + showindex=False, + headers=["Field", "Value"], + ), + max_height="300px", + ) + ) + ) + children.append(HTML(schema_repr)) + return Tab(children, titles=["Metadata", "Schema"]) + + def __repr__(self) -> str: + return self._plan.get_plan_as_string(self.__class__.__name__) + + def __str__(self) -> str: + return repr(self) + + def __bool__(self) -> bool: + # Prevents `__len__` from being called to check if it is None + # see: issue #25152 + return True + + def __len__(self) -> int: + raise AttributeError( + "Use `ds.count()` to compute the length of a distributed Dataset. " + "This may be an expensive operation." + ) + + def __iter__(self): + raise TypeError( + "`Dataset` objects aren't iterable. To iterate records, call " + "`ds.iter_rows()` or `ds.iter_batches()`. For more information, read " + "https://docs.ray.io/en/latest/data/consuming-datasets.html." + ) + + def _block_num_rows(self) -> List[int]: + get_num_rows = cached_remote_fn(_get_num_rows) + return ray.get([get_num_rows.remote(b) for b in self.get_internal_block_refs()]) + + def _block_size_bytes(self) -> List[int]: + get_size_bytes = cached_remote_fn(_get_size_bytes) + return ray.get( + [get_size_bytes.remote(b) for b in self.get_internal_block_refs()] + ) + + def _meta_count(self) -> Optional[int]: + return self._plan.meta_count() + + def _get_uuid(self) -> str: + return self._uuid + + def _set_uuid(self, uuid: str) -> None: + self._uuid = uuid + + def _get_epoch(self) -> int: + return self._epoch + + def _set_epoch(self, epoch: int) -> None: + self._epoch = epoch + + def _warn_slow(self): + if ray.util.log_once("dataset_slow_warned"): + logger.warning( + "The `map`, `flat_map`, and `filter` operations are unvectorized and " + "can be very slow. Consider using `.map_batches()` instead." + ) + + def _synchronize_progress_bar(self): + """Flush progress bar output by shutting down the current executor. + + This should be called at the end of all blocking APIs (e.g., `take`), but not + async APIs (e.g., `iter_batches`). + + The streaming executor runs in a separate generator / thread, so it is + possible the shutdown logic runs even after a call to retrieve rows from the + stream has finished. Explicit shutdown avoids this, which can clobber console + output (https://github.com/ray-project/ray/issues/32414). + """ + if self._current_executor: + self._current_executor.shutdown() + self._current_executor = None + + def __getstate__(self): + # Note: excludes _current_executor which is not serializable. + return { + "plan": self._plan, + "uuid": self._uuid, + "epoch": self._epoch, + "lazy": self._lazy, + "logical_plan": self._logical_plan, + } + + def __setstate__(self, state): + self._plan = state["plan"] + self._uuid = state["uuid"] + self._epoch = state["epoch"] + self._lazy = state["lazy"] + self._logical_plan = state["logical_plan"] + self._current_executor = None + + def __del__(self): + if sys.meta_path is None: + return + if self._current_executor and ray is not None and ray.is_initialized(): + self._current_executor.shutdown() + # Backwards compatibility alias. -Dataset = Datastream +Dataset = Dataset + + +@PublicAPI +class MaterializedDataset(Dataset, Generic[T]): + """A Dataset materialized in Ray memory, e.g., via `.materialize()`. + + The blocks of a MaterializedDataset object are materialized into Ray object store + memory, which means that this class can be shared or iterated over by multiple Ray + tasks without re-executing the underlying computations for producing the stream. + """ + + pass + + +@PublicAPI(stability="beta") +class Schema: + """Dataset schema. + + Attributes: + names: List of column names of this Dataset. + types: List of Arrow types of the Dataset. Note that the "object" type is + not Arrow compatible and hence will be returned as `object`. + base_schema: The underlying Arrow or Pandas schema. + """ + + def __init__(self, base_schema: Union["pyarrow.lib.Schema", "PandasBlockSchema"]): + self.base_schema = base_schema + + @property + def names(self) -> List[str]: + """Lists the columns of this Dataset.""" + return self.base_schema.names + + @property + def types(self) -> List[Union[Literal[object], "pyarrow.DataType"]]: + """Lists the types of this Dataset in Arrow format + + For non-Arrow compatible types, we return "object". + """ + import pyarrow as pa + from ray.data.extensions import TensorDtype, ArrowTensorType + + if isinstance(self.base_schema, pa.lib.Schema): + return list(self.base_schema.types) + + arrow_types = [] + for dtype in self.base_schema.types: + if isinstance(dtype, TensorDtype): + # Manually convert our Pandas tensor extension type to Arrow. + arrow_types.append( + ArrowTensorType( + shape=dtype._shape, dtype=pa.from_numpy_dtype(dtype._dtype) + ) + ) + else: + try: + arrow_types.append(pa.from_numpy_dtype(dtype)) + except pa.ArrowNotImplementedError: + arrow_types.append(object) + except Exception: + logger.exception(f"Error converting dtype {dtype} to Arrow.") + arrow_types.append(None) + return arrow_types + + def __eq__(self, other): + return isinstance(other, Schema) and other.base_schema == self.base_schema + + def __str__(self): + return f"Schema({dict(zip(self.names, self.types))})" + + def __repr__(self): + return str(self) + + +def _get_size_bytes(block: Block) -> int: + block = BlockAccessor.for_block(block) + return block.size_bytes() + + +def _block_to_df(block: Block): + block = BlockAccessor.for_block(block) + return block.to_pandas() + + +def _block_to_ndarray(block: Block, column: Optional[str]): + block = BlockAccessor.for_block(block) + return block.to_numpy(column) + + +def _block_to_arrow(block: Block): + block = BlockAccessor.for_block(block) + return block.to_arrow() + + +def _sliding_window(iterable: Iterable, n: int): + """Creates an iterator consisting of n-width sliding windows over + iterable. The sliding windows are constructed lazily such that an + element on the base iterator (iterable) isn't consumed until the + first sliding window containing that element is reached. + + If n > len(iterable), then a single len(iterable) window is + returned. + + Args: + iterable: The iterable on which the sliding window will be + created. + n: The width of the sliding window. + + Returns: + An iterator of n-width windows over iterable. + If n > len(iterable), then a single len(iterable) window is + returned. + """ + it = iter(iterable) + window = collections.deque(itertools.islice(it, n), maxlen=n) + if len(window) > 0: + yield tuple(window) + for elem in it: + window.append(elem) + yield tuple(window) + + +def _do_write( + ds: Datasource, + ctx: DataContext, + blocks: List[Block], + meta: List[BlockMetadata], + ray_remote_args: Dict[str, Any], + write_args: Dict[str, Any], +) -> List[ObjectRef[WriteResult]]: + write_args = _unwrap_arrow_serialization_workaround(write_args) + DataContext._set_current(ctx) + return ds.do_write(blocks, meta, ray_remote_args=ray_remote_args, **write_args) diff --git a/python/ray/data/dataset_pipeline.py b/python/ray/data/dataset_pipeline.py index 637e279928ad..4851b1c5792c 100644 --- a/python/ray/data/dataset_pipeline.py +++ b/python/ray/data/dataset_pipeline.py @@ -30,7 +30,7 @@ PipelinedDataIterator, ) from ray.data._internal.plan import ExecutionPlan -from ray.data._internal.stats import DatasetPipelineStats, DatastreamStats +from ray.data._internal.stats import DatasetPipelineStats, DatasetStats from ray.data.block import ( UserDefinedFunction, Block, @@ -38,7 +38,7 @@ _apply_strict_mode_batch_format, ) from ray.data.context import DataContext -from ray.data.datastream import Datastream +from ray.data.dataset import Dataset from ray.data.iterator import DataIterator from ray.data.datasource import Datasource from ray.data.datasource.file_based_datasource import ( @@ -67,26 +67,26 @@ @PublicAPI class DatasetPipeline: - """Implements a pipeline of Datastreams. + """Implements a pipeline of Datasets. DatasetPipelines implement pipelined execution. This allows for the overlapped execution of data input (e.g., reading files), computation (e.g. feature preprocessing), and output (e.g., distributed ML training). - A DatasetPipeline can be created by either repeating a Datastream - (``ds.repeat(times=None)``), by turning a single Datastream into a pipeline + A DatasetPipeline can be created by either repeating a Dataset + (``ds.repeat(times=None)``), by turning a single Dataset into a pipeline (``ds.window(blocks_per_window=10)``), or defined explicitly using ``DatasetPipeline.from_iterable()``. - DatasetPipeline supports the all the per-record transforms of Datastreams + DatasetPipeline supports the all the per-record transforms of Datasets (e.g., map, flat_map, filter), holistic transforms (e.g., repartition), and output methods (e.g., iter_rows, to_tf, to_torch, write_datasource). """ def __init__( self, - base_iterable: Iterable[Callable[[], Datastream]], - stages: List[Callable[[Datastream], Datastream]] = None, + base_iterable: Iterable[Callable[[], Dataset]], + stages: List[Callable[[Dataset], Dataset]] = None, length: Optional[int] = None, progress_bars: bool = DataContext.get_current().enable_progress_bars, _executed: List[bool] = None, @@ -94,7 +94,7 @@ def __init__( """Construct a DatasetPipeline (internal API). The constructor is not part of the DatasetPipeline API. Use the - ``Datastream.repeat()``, ``Datastream.window()``, or + ``Dataset.repeat()``, ``Dataset.window()``, or ``DatasetPipeline.from_iterable()`` methods to construct a pipeline. """ self._base_iterable = base_iterable @@ -106,19 +106,17 @@ def __init__( # Whether the pipeline execution has started. # This variable is shared across all pipelines descending from this. self._executed = _executed or [False] - self._first_datastream: Optional[Datastream] = None - self._remaining_datastreams_iter: Optional[ - Iterator[Callable[[], Datastream]] - ] = None + self._first_dataset: Optional[Dataset] = None + self._remaining_datasets_iter: Optional[Iterator[Callable[[], Dataset]]] = None self._schema = None self._stats = DatasetPipelineStats() def iterator(self) -> DataIterator: """Return a :class:`~ray.data.DataIterator` that - can be used to repeatedly iterate over the datastream. + can be used to repeatedly iterate over the dataset. - Note that each pass iterates over the entire original Datastream, even if - the datastream was windowed with ``.window()``. + Note that each pass iterates over the entire original Dataset, even if + the dataset was windowed with ``.window()``. Examples: >>> import ray @@ -225,9 +223,9 @@ def iter_batches( if self._executed[0]: raise RuntimeError("Pipeline cannot be read multiple times.") time_start = time.perf_counter() - if self._first_datastream is not None: + if self._first_dataset is not None: blocks_owned_by_consumer = ( - self._first_datastream._plan.execute()._owned_by_consumer + self._first_dataset._plan.execute()._owned_by_consumer ) else: blocks_owned_by_consumer = self._peek()._plan.execute()._owned_by_consumer @@ -298,15 +296,15 @@ def split( ) def split_at_indices(self, indices: List[int]) -> List["DatasetPipeline"]: - """Split the datastreams within the pipeline at the given indices + """Split the datasets within the pipeline at the given indices (like np.split). - This will split each datastream contained within this pipeline, thereby + This will split each dataset contained within this pipeline, thereby producing len(indices) + 1 pipelines with the first pipeline containing - the [0, indices[0]) slice from each datastream, the second pipeline - containing the [indices[0], indices[1]) slice from each datastream, and so + the [0, indices[0]) slice from each dataset, the second pipeline + containing the [indices[0], indices[1]) slice from each dataset, and so on, with the final pipeline will containing the - [indices[-1], self.count()) slice from each datastream. + [indices[-1], self.count()) slice from each dataset. Examples: >>> import ray @@ -342,7 +340,7 @@ def split_at_indices(self, indices: List[int]) -> List["DatasetPipeline"]: return self._split(len(indices) + 1, lambda ds: ds.split_at_indices(indices)) def _split( - self, n: int, splitter: Callable[[Datastream], List["Datastream"]] + self, n: int, splitter: Callable[[Dataset], List["Dataset"]] ) -> List["DatasetPipeline"]: ctx = DataContext.get_current() scheduling_strategy = ctx.scheduling_strategy @@ -377,9 +375,7 @@ def __next__(self): tries = 0 while ds is None: ds = ray.get( - self.coordinator.next_datastream_if_ready.remote( - self.split_index - ) + self.coordinator.next_dataset_if_ready.remote(self.split_index) ) # Wait for other shards to catch up reading. if not ds: @@ -412,13 +408,13 @@ def __next__(self): def rewindow( self, *, blocks_per_window: int, preserve_epoch: bool = True ) -> "DatasetPipeline": - """Change the windowing (blocks per datastream) of this pipeline. + """Change the windowing (blocks per dataset) of this pipeline. Changes the windowing of this pipeline to the specified size. For - example, if the current pipeline has two blocks per datastream, and - `.rewindow(blocks_per_window=4)` is requested, adjacent datastreams will - be merged until each datastream is 4 blocks. If - `.rewindow(blocks_per_window)` was requested the datastreams will be + example, if the current pipeline has two blocks per dataset, and + `.rewindow(blocks_per_window=4)` is requested, adjacent datasets will + be merged until each dataset is 4 blocks. If + `.rewindow(blocks_per_window)` was requested the datasets will be split into smaller windows. Args: @@ -430,9 +426,9 @@ def rewindow( class WindowIterator: def __init__(self, original_iter): self._original_iter = original_iter - self._buffer: Optional[Datastream] = None + self._buffer: Optional[Dataset] = None - def __next__(self) -> Datastream: + def __next__(self) -> Dataset: try: # Merge windows until we meet the requested window size. if self._buffer is None: @@ -477,7 +473,7 @@ def __iter__(self): length = None # The newly created DatasetPipeline will contain a PipelineExecutor (because - # this will execute the pipeline so far to iter the datastreams). In order to + # this will execute the pipeline so far to iter the datasets). In order to # make this new DatasetPipeline serializable, we need to make sure the # PipelineExecutor has not been iterated. So this uses # _iter_datasets_without_peek() instead of iter_datasets(). @@ -514,7 +510,7 @@ def __init__(self, original_iter): # This is calculated later. self._max_i = None - def __next__(self) -> Callable[[], Datastream]: + def __next__(self) -> Callable[[], Dataset]: # Still going through the original pipeline. if self._original_iter: try: @@ -572,10 +568,10 @@ def __iter__(self): def schema( self, fetch_if_missing: bool = False ) -> Union[type, "pyarrow.lib.Schema"]: - """Return the schema of the datastream pipeline. + """Return the schema of the dataset pipeline. - For datastreams of Arrow records, this will return the Arrow schema. - For datastream of Python objects, this returns their Python type. + For datasets of Arrow records, this will return the Arrow schema. + For dataset of Python objects, this returns their Python type. Note: This is intended to be a method for peeking schema before the execution of DatasetPipeline. If execution has already started, @@ -597,7 +593,7 @@ def schema( return self._schema def dataset_format(self) -> BlockFormat: - """The format of the datastream pipeline's underlying data blocks. Possible + """The format of the dataset pipeline's underlying data blocks. Possible values are: "arrow", "pandas" and "simple". This may block; if the schema is unknown, this will synchronously fetch @@ -608,8 +604,8 @@ def dataset_format(self) -> BlockFormat: schema = self.schema(fetch_if_missing=True) if schema is None: raise ValueError( - "Datastream is empty or cleared, can't determine the format of " - "the datastream." + "Dataset is empty or cleared, can't determine the format of " + "the dataset." ) try: @@ -626,14 +622,14 @@ def dataset_format(self) -> BlockFormat: return BlockFormat.SIMPLE def count(self) -> int: - """Count the number of records in the datastream pipeline. + """Count the number of records in the dataset pipeline. This blocks until the entire pipeline is fully executed. - Time complexity: O(datastream size / parallelism) + Time complexity: O(dataset size / parallelism) Returns: - The number of records in the datastream pipeline. + The number of records in the dataset pipeline. """ if self._length == float("inf"): raise ValueError("Cannot count a pipeline of infinite length.") @@ -649,14 +645,14 @@ def batch_len(batch): return total def sum(self) -> int: - """Sum the records in the datastream pipeline. + """Sum the records in the dataset pipeline. This blocks until the entire pipeline is fully executed. - Time complexity: O(datastream size / parallelism) + Time complexity: O(dataset size / parallelism) Returns: - The sum of the records in the datastream pipeline. + The sum of the records in the dataset pipeline. """ if self._length == float("inf"): raise ValueError("Cannot sum a pipeline of infinite length.") @@ -669,14 +665,14 @@ def sum(self) -> int: total += elem["sum"] return total - def show_windows(self, limit_per_datastream: int = 10) -> None: - """Print up to the given number of records from each window/datastream. + def show_windows(self, limit_per_dataset: int = 10) -> None: + """Print up to the given number of records from each window/dataset. This is helpful as a debugging tool for understanding the structure of - datastream pipelines. + dataset pipelines. Args: - limit_per_datastream: Rows to print per window/datastream. + limit_per_dataset: Rows to print per window/dataset. """ epoch = None for i, ds in enumerate(self.iter_datasets()): @@ -684,12 +680,12 @@ def show_windows(self, limit_per_datastream: int = 10) -> None: epoch = ds._get_epoch() print("------ Epoch {} ------".format(epoch)) print("=== Window {} ===".format(i)) - ds.show(limit_per_datastream) + ds.show(limit_per_dataset) def iter_epochs(self, max_epoch: int = -1) -> Iterator["DatasetPipeline"]: """Split this pipeline up by epoch. - This allows reading of data per-epoch for repeated Datastreams, which is + This allows reading of data per-epoch for repeated Datasets, which is useful for ML training. For example, ``ray.data.range(10).repeat(50)`` generates a pipeline with 500 rows total split across 50 epochs. This method allows iterating over the data individually per epoch @@ -712,7 +708,7 @@ def iter_epochs(self, max_epoch: int = -1) -> Iterator["DatasetPipeline"]: """ class Peekable: - def __init__(self, base_iter: Iterator[Datastream]): + def __init__(self, base_iter: Iterator[Dataset]): self._iter = base_iter self._buffer = None @@ -724,13 +720,13 @@ def _fill_buffer_if_possible(self): except StopIteration: pass - def peek(self) -> Datastream: + def peek(self) -> Dataset: self._fill_buffer_if_possible() if self._buffer is None: raise StopIteration return self._buffer - def __next__(self) -> Datastream: + def __next__(self) -> Dataset: self._fill_buffer_if_possible() if self._buffer is None: raise StopIteration @@ -739,11 +735,11 @@ def __next__(self) -> Datastream: return item class SingleEpochIterator: - def __init__(self, peekable_iter: Iterator[Datastream], epoch: int): + def __init__(self, peekable_iter: Iterator[Dataset], epoch: int): self._iter = peekable_iter self._epoch = epoch - def __next__(self) -> Datastream: + def __next__(self) -> Dataset: if self._iter.peek()._get_epoch() > self._epoch: raise StopIteration ds = next(self._iter) @@ -791,7 +787,7 @@ def map( compute: Union[str, ComputeStrategy] = None, **ray_remote_args, ) -> "DatasetPipeline": - """Apply :py:meth:`Datastream.map ` to each datastream/window + """Apply :py:meth:`Dataset.map ` to each dataset/window in this pipeline.""" return self.foreach_window( lambda ds: ds.map(fn, compute=compute, **ray_remote_args) @@ -810,8 +806,8 @@ def map_batches( fn_constructor_kwargs: Optional[Dict[str, Any]] = None, **ray_remote_args, ) -> "DatasetPipeline": - """Apply :py:meth:`Datastream.map_batches ` to each - datastream/window in this pipeline.""" + """Apply :py:meth:`Dataset.map_batches ` to each + dataset/window in this pipeline.""" batch_format = _apply_strict_mode_batch_format(batch_format) return self.foreach_window( @@ -835,8 +831,8 @@ def flat_map( compute: Union[str, ComputeStrategy] = None, **ray_remote_args, ) -> "DatasetPipeline": - """Apply :py:meth:`Datastream.flat_map ` to each - datastream/window in this pipeline.""" + """Apply :py:meth:`Dataset.flat_map ` to each + dataset/window in this pipeline.""" return self.foreach_window( lambda ds: ds.flat_map(fn, compute=compute, **ray_remote_args) ) @@ -848,8 +844,8 @@ def filter( compute: Union[str, ComputeStrategy] = None, **ray_remote_args, ) -> "DatasetPipeline": - """Apply :py:meth:`Datastream.filter ` to each - datastream/window in this pipeline.""" + """Apply :py:meth:`Dataset.filter ` to each + dataset/window in this pipeline.""" return self.foreach_window( lambda ds: ds.filter(fn, compute=compute, **ray_remote_args) ) @@ -862,8 +858,8 @@ def add_column( compute: Optional[str] = None, **ray_remote_args, ) -> "DatasetPipeline": - """Apply :py:meth:`Datastream.add_column ` to each - datastream/window in this pipeline.""" + """Apply :py:meth:`Dataset.add_column ` to each + dataset/window in this pipeline.""" return self.foreach_window( lambda ds: ds.add_column(col, fn, compute=compute, **ray_remote_args) ) @@ -875,8 +871,8 @@ def drop_columns( compute: Optional[str] = None, **ray_remote_args, ) -> "DatasetPipeline": - """Apply :py:meth:`Datastream.drop_columns ` to - each datastream/window in this pipeline.""" + """Apply :py:meth:`Dataset.drop_columns ` to + each dataset/window in this pipeline.""" return self.foreach_window( lambda ds: ds.drop_columns(cols, compute=compute, **ray_remote_args) ) @@ -888,8 +884,8 @@ def select_columns( compute: Optional[str] = None, **ray_remote_args, ) -> "DatasetPipeline": - """Apply :py:meth:`Datastream.select_columns ` to - each datastream/window in this pipeline.""" + """Apply :py:meth:`Dataset.select_columns ` to + each dataset/window in this pipeline.""" return self.foreach_window( lambda ds: ds.select_columns(cols, compute=compute, **ray_remote_args) ) @@ -897,8 +893,8 @@ def select_columns( def repartition_each_window( self, num_blocks: int, *, shuffle: bool = False ) -> "DatasetPipeline": - """Apply :py:meth:`Datastream.repartition ` to each - datastream/window in this pipeline.""" + """Apply :py:meth:`Dataset.repartition ` to each + dataset/window in this pipeline.""" return self.foreach_window( lambda ds: ds.repartition(num_blocks, shuffle=shuffle) ) @@ -910,8 +906,8 @@ def random_shuffle_each_window( num_blocks: Optional[int] = None, **ray_remote_args, ) -> "DatasetPipeline": - """Apply :py:meth:`Datastream.random_shuffle ` to - each datastream/window in this pipeline.""" + """Apply :py:meth:`Dataset.random_shuffle ` to + each dataset/window in this pipeline.""" return self.foreach_window( lambda ds: ds.random_shuffle( seed=seed, num_blocks=num_blocks, **ray_remote_args @@ -921,15 +917,15 @@ def random_shuffle_each_window( def sort_each_window( self, key: Optional[str] = None, descending: bool = False ) -> "DatasetPipeline": - """Apply :py:meth:`Datastream.sort ` to each datastream/window + """Apply :py:meth:`Dataset.sort ` to each dataset/window in this pipeline.""" return self.foreach_window(lambda ds: ds.sort(key, descending)) def randomize_block_order_each_window( self, *, seed: Optional[int] = None ) -> "DatasetPipeline": - """Apply :py:meth:`Datastream.randomize_block_order - ` to each datastream/window in this + """Apply :py:meth:`Dataset.randomize_block_order + ` to each dataset/window in this pipeline.""" return self.foreach_window(lambda ds: ds.randomize_block_order(seed=seed)) @@ -945,9 +941,9 @@ def write_json( ray_remote_args: Dict[str, Any] = None, **pandas_json_args, ) -> None: - """Call :py:meth:`Datastream.write_json ` on each - output datastream of this pipeline.""" - self._write_each_datastream( + """Call :py:meth:`Dataset.write_json ` on each + output dataset of this pipeline.""" + self._write_each_dataset( lambda ds: ds.write_json( path, filesystem=filesystem, @@ -972,9 +968,9 @@ def write_csv( ray_remote_args: Dict[str, Any] = None, **arrow_csv_args, ) -> None: - """Call :py:meth:`Datastream.write_csv ` on each - output datastream of this pipeline.""" - self._write_each_datastream( + """Call :py:meth:`Dataset.write_csv ` on each + output dataset of this pipeline.""" + self._write_each_dataset( lambda ds: ds.write_csv( path, filesystem=filesystem, @@ -999,9 +995,9 @@ def write_parquet( ray_remote_args: Dict[str, Any] = None, **arrow_parquet_args, ) -> None: - """Call :py:meth:`Datastream.write_parquet ` on - each output datastream of this pipeline.""" - self._write_each_datastream( + """Call :py:meth:`Dataset.write_parquet ` on + each output dataset of this pipeline.""" + self._write_each_dataset( lambda ds: ds.write_parquet( path, filesystem=filesystem, @@ -1024,9 +1020,9 @@ def write_tfrecords( block_path_provider: BlockWritePathProvider = DefaultBlockWritePathProvider(), ray_remote_args: Dict[str, Any] = None, ) -> None: - """Call :py:meth:`Datastream.write_tfrecords ` on - each output datastream of this pipeline.""" - self._write_each_datastream( + """Call :py:meth:`Dataset.write_tfrecords ` on + each output dataset of this pipeline.""" + self._write_each_dataset( lambda ds: ds.write_tfrecords( path, filesystem=filesystem, @@ -1044,9 +1040,9 @@ def write_datasource( ray_remote_args: Dict[str, Any] = None, **write_args, ) -> None: - """Call :py:meth:`Datastream.write_datasource ` - on each output datastream of this pipeline.""" - self._write_each_datastream( + """Call :py:meth:`Dataset.write_datasource ` + on each output dataset of this pipeline.""" + self._write_each_dataset( lambda ds: ds.write_datasource( datasource, ray_remote_args=ray_remote_args, @@ -1055,26 +1051,26 @@ def write_datasource( ) def take(self, limit: int = 20) -> List[Dict[str, Any]]: - """Call :py:meth:`Datastream.take ` over the stream of + """Call :py:meth:`Dataset.take ` over the stream of output batches from the pipeline""" - return Datastream.take(self, limit) + return Dataset.take(self, limit) def take_all(self, limit: Optional[int] = None) -> List[Dict[str, Any]]: - """Call :py:meth:`Datastream.take_all ` over the stream + """Call :py:meth:`Dataset.take_all ` over the stream of output batches from the pipeline""" - return Datastream.take_all(self, limit) + return Dataset.take_all(self, limit) def take_batch( self, batch_size: int = 20, *, batch_format: Optional[str] = "default" ) -> DataBatch: - """Call :py:meth:`Datastream.take_batch ` + """Call :py:meth:`Dataset.take_batch ` over the stream of output batches from the pipeline""" - return Datastream.take_batch(self, batch_size, batch_format=batch_format) + return Dataset.take_batch(self, batch_size, batch_format=batch_format) def show(self, limit: int = 20) -> None: - """Call :py:meth:`Datastream.show ` over the stream of + """Call :py:meth:`Dataset.show ` over the stream of output batches from the pipeline""" - return Datastream.show(self, limit) + return Dataset.show(self, limit) def iter_tf_batches( self, @@ -1087,7 +1083,7 @@ def iter_tf_batches( local_shuffle_seed: Optional[int] = None, ) -> Iterator[Union["tf.Tensor", Dict[str, "tf.Tensor"]]]: """Call - :py:meth:`Datastream.iter_tf_batches ` + :py:meth:`Dataset.iter_tf_batches ` over the stream of output batches from the pipeline.""" batch_format = _apply_strict_mode_batch_format(batch_format) return DataIterator.iter_tf_batches( @@ -1114,8 +1110,8 @@ def iter_torch_batches( local_shuffle_seed: Optional[int] = None, ) -> Iterator["TorchTensorBatchType"]: """Call - :py:meth:`Datastream.iter_torch_batches - ` over the stream of output batches + :py:meth:`Dataset.iter_torch_batches + ` over the stream of output batches from the pipeline.""" return DataIterator.iter_torch_batches( self, @@ -1140,7 +1136,7 @@ def to_tf( local_shuffle_buffer_size: Optional[int] = None, local_shuffle_seed: Optional[int] = None, ) -> "tf.data.Dataset": - """Call :py:meth:`Datastream.to_tf ` over the stream of + """Call :py:meth:`Dataset.to_tf ` over the stream of output batches from the pipeline""" return DataIterator.to_tf( self, @@ -1170,7 +1166,7 @@ def to_torch( unsqueeze_label_tensor: bool = True, unsqueeze_feature_tensors: bool = True, ) -> "torch.utils.data.IterableDataset": - """Call :py:meth:`Datastream.to_torch ` over the stream + """Call :py:meth:`Dataset.to_torch ` over the stream of output batches from the pipeline""" return DataIterator.to_torch( self, @@ -1190,17 +1186,17 @@ def _iter_datasets_without_peek(self): if self._executed[0]: raise RuntimeError("Pipeline cannot be read multiple times.") self._executed[0] = True - if self._first_datastream: + if self._first_dataset: raise RuntimeError("The pipeline has been peeked.") self._optimize_stages() return PipelineExecutor(self) @DeveloperAPI - def iter_datasets(self) -> Iterator[Datastream]: - """Iterate over the output datastreams of this pipeline. + def iter_datasets(self) -> Iterator[Dataset]: + """Iterate over the output datasets of this pipeline. Returns: - Iterator over the datastreams outputted from this pipeline. + Iterator over the datasets outputted from this pipeline. """ if self._executed[0]: raise RuntimeError("Pipeline cannot be read multiple times.") @@ -1208,10 +1204,10 @@ def iter_datasets(self) -> Iterator[Datastream]: self._optimize_stages() - # If the first datastream has already been executed (via a peek operation), then - # we don't re-execute the first datastream when iterating through the pipeline. - # We re-use the saved _first_datastream and _remaining_datastream_iter. - if self._first_datastream is not None: + # If the first dataset has already been executed (via a peek operation), then + # we don't re-execute the first dataset when iterating through the pipeline. + # We re-use the saved _first_dataset and _remaining_dataset_iter. + if self._first_dataset is not None: class _IterableWrapper(Iterable): """Wrapper that takes an iterator and converts it to an @@ -1223,26 +1219,24 @@ def __init__(self, base_iterator): def __iter__(self): return self.base_iterator - # Update the base iterable to skip the first datastream. + # Update the base iterable to skip the first dataset. # It is ok to update the base iterable here since # the pipeline can never be executed again. - self._base_iterable = _IterableWrapper(self._remaining_datastreams_iter) + self._base_iterable = _IterableWrapper(self._remaining_datasets_iter) - iter = itertools.chain([self._first_datastream], PipelineExecutor(self)) - self._first_datastream = None - self._remaining_datastreams_iter = None + iter = itertools.chain([self._first_dataset], PipelineExecutor(self)) + self._first_dataset = None + self._remaining_datasets_iter = None return iter else: return PipelineExecutor(self) @DeveloperAPI - def foreach_window( - self, fn: Callable[[Datastream], Datastream] - ) -> "DatasetPipeline": - """Apply a transform to each datastream/window in this pipeline. + def foreach_window(self, fn: Callable[[Dataset], Dataset]) -> "DatasetPipeline": + """Apply a transform to each dataset/window in this pipeline. Args: - fn: The function to transform each datastream with. + fn: The function to transform each dataset with. Returns: The transformed DatasetPipeline. @@ -1271,13 +1265,13 @@ def stats(self, exclude_first_window: bool = True) -> str: @staticmethod def from_iterable( - iterable: Iterable[Callable[[], Datastream]], + iterable: Iterable[Callable[[], Dataset]], ) -> "DatasetPipeline": - """Create a pipeline from an sequence of Datastream producing functions. + """Create a pipeline from an sequence of Dataset producing functions. Args: iterable: A finite or infinite-length sequence of functions that - each produce a Datastream when called. + each produce a Dataset when called. """ if hasattr(iterable, "__len__"): length = len(iterable) @@ -1307,22 +1301,22 @@ def _optimize_stages(self): self._optimized_stages = self._stages return - # This dummy datastream will be used to get a set of optimized stages. - dummy_ds = Datastream( + # This dummy dataset will be used to get a set of optimized stages. + dummy_ds = Dataset( ExecutionPlan( BlockList([], [], owned_by_consumer=True), - DatastreamStats(stages={}, parent=None), + DatasetStats(stages={}, parent=None), run_by_consumer=True, ), 0, True, ) - # Apply all pipeline operations to the dummy datastream. + # Apply all pipeline operations to the dummy dataset. for stage in self._stages: dummy_ds = stage(dummy_ds) # Get the optimized stages. _, _, stages = dummy_ds._plan._optimize() - # Apply these optimized stages to the datastreams underlying the pipeline. + # Apply these optimized stages to the datasets underlying the pipeline. # These optimized stages will be executed by the PipelineExecutor. optimized_stages = [] for stage in stages: @@ -1332,33 +1326,31 @@ def add_stage(ds, stage): return ds._plan.with_stage(stage) optimized_stages.append( - lambda ds, stage=stage: Datastream( - add_stage(ds, stage), ds._epoch, True - ) + lambda ds, stage=stage: Dataset(add_stage(ds, stage), ds._epoch, True) ) self._optimized_stages = optimized_stages - def _peek(self) -> Datastream: - if self._first_datastream is None: - datastream_iter = iter(self._base_iterable) - first_datastream_gen = next(datastream_iter) + def _peek(self) -> Dataset: + if self._first_dataset is None: + dataset_iter = iter(self._base_iterable) + first_dataset_gen = next(dataset_iter) peek_pipe = DatasetPipeline( - base_iterable=[first_datastream_gen], + base_iterable=[first_dataset_gen], stages=self._stages.copy(), length=1, progress_bars=True, ) - # Cache the executed _first_datastream. - self._first_datastream = next(peek_pipe.iter_datasets()) - self._remaining_datastreams_iter = datastream_iter + # Cache the executed _first_dataset. + self._first_dataset = next(peek_pipe.iter_datasets()) + self._remaining_datasets_iter = dataset_iter # Store the stats from the peek pipeline. self._stats.add_pipeline_stats(peek_pipe._stats) - return self._first_datastream + return self._first_dataset - def _write_each_datastream(self, write_fn: Callable[[Datastream], None]) -> None: - """Write output for each datastream. + def _write_each_dataset(self, write_fn: Callable[[Dataset], None]) -> None: + """Write output for each dataset. This is utility method used for write_json, write_csv, write_parquet, write_datasource, etc. diff --git a/python/ray/data/datasource/datasource.py b/python/ray/data/datasource/datasource.py index 6c9c8e5a2a73..87761f0dbebb 100644 --- a/python/ray/data/datasource/datasource.py +++ b/python/ray/data/datasource/datasource.py @@ -22,10 +22,10 @@ @PublicAPI class Datasource: - """Interface for defining a custom ``ray.data.Datastream`` datasource. + """Interface for defining a custom ``ray.data.Dataset`` datasource. - To read a datasource into a datastream, use ``ray.data.read_datasource()``. - To write to a writable datasource, use ``Datastream.write_datasource()``. + To read a datasource into a dataset, use ``ray.data.read_datasource()``. + To write to a writable datasource, use ``Dataset.write_datasource()``. See ``RangeDatasource`` and ``DummyOutputDatasource`` for examples of how to implement readable and writable datasources. @@ -133,7 +133,7 @@ class Reader: """A bound read operation for a datasource. This is a stateful class so that reads can be prepared in multiple stages. - For example, it is useful for Datastreams to know the in-memory size of the read + For example, it is useful for Datasets to know the in-memory size of the read prior to executing it. """ @@ -173,7 +173,7 @@ def get_read_tasks(self, parallelism: int) -> List["ReadTask"]: @DeveloperAPI class ReadTask(Callable[[], Iterable[Block]]): - """A function used to read blocks from the datastream. + """A function used to read blocks from the dataset. Read tasks are generated by ``reader.get_read_tasks()``, and return a list of ``ray.data.Block`` when called. Initial metadata about the read diff --git a/python/ray/data/datasource/file_based_datasource.py b/python/ray/data/datasource/file_based_datasource.py index 3d436bce8cbd..c73938b1ffea 100644 --- a/python/ray/data/datasource/file_based_datasource.py +++ b/python/ray/data/datasource/file_based_datasource.py @@ -62,7 +62,7 @@ @DeveloperAPI class BlockWritePathProvider: """Abstract callable that provides concrete output paths when writing - datastream blocks. + dataset blocks. Current subclasses: DefaultBlockWritePathProvider @@ -73,32 +73,32 @@ def _get_write_path_for_block( base_path: str, *, filesystem: Optional["pyarrow.fs.FileSystem"] = None, - datastream_uuid: Optional[str] = None, + dataset_uuid: Optional[str] = None, block: Optional[Block] = None, block_index: Optional[int] = None, file_format: Optional[str] = None, ) -> str: """ - Resolves and returns the write path for the given datastream block. When + Resolves and returns the write path for the given dataset block. When implementing this method, care should be taken to ensure that a unique - path is provided for every datastream block. + path is provided for every dataset block. Args: - base_path: The base path to write the datastream block out to. This is - expected to be the same for all blocks in the datastream, and may + base_path: The base path to write the dataset block out to. This is + expected to be the same for all blocks in the dataset, and may point to either a directory or file prefix. filesystem: The filesystem implementation that will be used to write a file out to the write path returned. - datastream_uuid: Unique identifier for the datastream that this block + dataset_uuid: Unique identifier for the dataset that this block belongs to. block: The block to write. block_index: Ordered index of the block to write within its parent - datastream. + dataset. file_format: File format string for the block that can be used as the file extension in the write path returned. Returns: - The datastream block write path. + The dataset block write path. """ raise NotImplementedError @@ -107,7 +107,7 @@ def __call__( base_path: str, *, filesystem: Optional["pyarrow.fs.FileSystem"] = None, - datastream_uuid: Optional[str] = None, + dataset_uuid: Optional[str] = None, block: Optional[Block] = None, block_index: Optional[int] = None, file_format: Optional[str] = None, @@ -115,7 +115,7 @@ def __call__( return self._get_write_path_for_block( base_path, filesystem=filesystem, - datastream_uuid=datastream_uuid, + dataset_uuid=dataset_uuid, block=block, block_index=block_index, file_format=file_format, @@ -125,8 +125,8 @@ def __call__( @DeveloperAPI class DefaultBlockWritePathProvider(BlockWritePathProvider): """Default block write path provider implementation that writes each - datastream block out to a file of the form: - {base_path}/{datastream_uuid}_{block_index}.{file_format} + dataset block out to a file of the form: + {base_path}/{dataset_uuid}_{block_index}.{file_format} """ def _get_write_path_for_block( @@ -134,12 +134,12 @@ def _get_write_path_for_block( base_path: str, *, filesystem: Optional["pyarrow.fs.FileSystem"] = None, - datastream_uuid: Optional[str] = None, + dataset_uuid: Optional[str] = None, block: Optional[ObjectRef[Block]] = None, block_index: Optional[int] = None, file_format: Optional[str] = None, ) -> str: - suffix = f"{datastream_uuid}_{block_index:06}.{file_format}" + suffix = f"{dataset_uuid}_{block_index:06}.{file_format}" # Uses POSIX path for cross-filesystem compatibility, since PyArrow # FileSystem paths are always forward slash separated, see: # https://arrow.apache.org/docs/python/filesystems.html @@ -276,7 +276,7 @@ def write( blocks: Iterable[Block], ctx: TaskContext, path: str, - datastream_uuid: str, + dataset_uuid: str, filesystem: Optional["pyarrow.fs.FileSystem"] = None, try_create_dir: bool = True, open_stream_args: Optional[Dict[str, Any]] = None, @@ -331,7 +331,7 @@ def write_block(write_path: str, block: Block): write_path = block_path_provider( path, filesystem=filesystem, - datastream_uuid=datastream_uuid, + dataset_uuid=dataset_uuid, block=block, block_index=ctx.task_idx, file_format=file_format, diff --git a/python/ray/data/datasource/file_meta_provider.py b/python/ray/data/datasource/file_meta_provider.py index a904a6ebebaa..972053d77bab 100644 --- a/python/ray/data/datasource/file_meta_provider.py +++ b/python/ray/data/datasource/file_meta_provider.py @@ -25,7 +25,7 @@ @DeveloperAPI class FileMetadataProvider: - """Abstract callable that provides metadata for the files of a single datastream block. + """Abstract callable that provides metadata for the files of a single dataset block. Current subclasses: BaseFileMetadataProvider @@ -40,10 +40,10 @@ def _get_block_metadata( ) -> BlockMetadata: """Resolves and returns block metadata for files in the given paths. - All file paths provided should belong to a single datastream block. + All file paths provided should belong to a single dataset block. Args: - paths: The file paths for a single datastream block. + paths: The file paths for a single dataset block. schema: The user-provided or inferred schema for the given paths, if any. @@ -80,10 +80,10 @@ def _get_block_metadata( rows_per_file: Optional[int], file_sizes: List[Optional[int]], ) -> BlockMetadata: - """Resolves and returns block metadata for files of a single datastream block. + """Resolves and returns block metadata for files of a single dataset block. Args: - paths: The file paths for a single datastream block. These + paths: The file paths for a single dataset block. These paths will always be a subset of those previously returned from `expand_paths()`. schema: The user-provided or inferred schema for the given file @@ -206,7 +206,7 @@ def expand_paths( class ParquetMetadataProvider(FileMetadataProvider): """Abstract callable that provides block metadata for Arrow Parquet file fragments. - All file fragments should belong to a single datastream block. + All file fragments should belong to a single dataset block. Supports optional pre-fetching of ordered metadata for all file fragments in a single batch to help optimize metadata resolution. @@ -223,10 +223,10 @@ def _get_block_metadata( pieces: List["pyarrow.dataset.ParquetFileFragment"], prefetched_metadata: Optional[List[Any]], ) -> BlockMetadata: - """Resolves and returns block metadata for files of a single datastream block. + """Resolves and returns block metadata for files of a single dataset block. Args: - paths: The file paths for a single datastream block. + paths: The file paths for a single dataset block. schema: The user-provided or inferred schema for the given file paths, if any. pieces: The Parquet file fragments derived from the input file paths. @@ -269,7 +269,7 @@ class DefaultParquetMetadataProvider(ParquetMetadataProvider): """The default file metadata provider for ParquetDatasource. Aggregates total block bytes and number of rows using the Parquet file metadata - associated with a list of Arrow Parquet datastream file fragments. + associated with a list of Arrow Parquet dataset file fragments. """ def _get_block_metadata( @@ -362,7 +362,7 @@ def _handle_read_os_error(error: OSError, paths: Union[str, List[str]]) -> str: "You can also run AWS CLI command to get more detailed error message " "(e.g., aws s3 ls ). " "See https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3/index.html " # noqa - "and https://docs.ray.io/en/latest/data/creating-datastreams.html#reading-from-remote-storage " # noqa + "and https://docs.ray.io/en/latest/data/creating-datasets.html#reading-from-remote-storage " # noqa "for more information." ) ) diff --git a/python/ray/data/datasource/parquet_datasource.py b/python/ray/data/datasource/parquet_datasource.py index 668f8ea69074..c98eaeec52b2 100644 --- a/python/ray/data/datasource/parquet_datasource.py +++ b/python/ray/data/datasource/parquet_datasource.py @@ -41,7 +41,7 @@ # compared to Parquet encoded representation. Parquet file statistics only record # encoded (i.e. uncompressed) data size information. # -# To estimate real-time in-memory data size, Datastreams will try to estimate the +# To estimate real-time in-memory data size, Datasets will try to estimate the # correct inflation ratio from Parquet to Arrow, using this constant as the default # value for safety. See https://github.com/ray-project/ray/pull/26516 for more context. PARQUET_ENCODING_RATIO_ESTIMATE_DEFAULT = 5 @@ -49,11 +49,11 @@ # The lower bound size to estimate Parquet encoding ratio. PARQUET_ENCODING_RATIO_ESTIMATE_LOWER_BOUND = 2 -# The percentage of files (1% by default) to be sampled from the datastream to estimate +# The percentage of files (1% by default) to be sampled from the dataset to estimate # Parquet encoding ratio. PARQUET_ENCODING_RATIO_ESTIMATE_SAMPLING_RATIO = 0.01 -# The minimal and maximal number of file samples to take from the datastream to estimate +# The minimal and maximal number of file samples to take from the dataset to estimate # Parquet encoding ratio. # This is to restrict `PARQUET_ENCODING_RATIO_ESTIMATE_SAMPLING_RATIO` within the # proper boundary. @@ -146,8 +146,8 @@ class ParquetDatasource(ParquetBaseDatasource): """Parquet datasource, for reading and writing Parquet files. The primary difference from ParquetBaseDatasource is that this uses - PyArrow's `ParquetDataset` abstraction for datastream reads, and thus offers - automatic Arrow datastream schema inference and row count collection at the + PyArrow's `ParquetDataset` abstraction for dataset reads, and thus offers + automatic Arrow dataset schema inference and row count collection at the cost of some potential performance and/or compatibility penalties. Examples: @@ -217,14 +217,14 @@ def __init__( ) if _block_udf is not None: - # Try to infer datastream schema by passing dummy table through UDF. + # Try to infer dataset schema by passing dummy table through UDF. dummy_table = schema.empty_table() try: inferred_schema = _block_udf(dummy_table).schema inferred_schema = inferred_schema.with_metadata(schema.metadata) except Exception: logger.debug( - "Failed to infer schema of datastream by passing dummy table " + "Failed to infer schema of dataset by passing dummy table " "through UDF due to the following exception:", exc_info=True, ) diff --git a/python/ray/data/datasource/partitioning.py b/python/ray/data/datasource/partitioning.py index 0554a50c4a77..30462514c36a 100644 --- a/python/ray/data/datasource/partitioning.py +++ b/python/ray/data/datasource/partitioning.py @@ -17,7 +17,7 @@ @DeveloperAPI class PartitionStyle(str, Enum): - """Supported datastream partition styles. + """Supported dataset partition styles. Inherits from `str` to simplify plain text serialization/deserialization. @@ -41,7 +41,7 @@ class Partitioning: """Partition scheme used to describe path-based partitions. Path-based partition formats embed all partition keys and values directly in - their datastream file paths. + their dataset file paths. """ #: The partition style - may be either HIVE or DIRECTORY. @@ -53,7 +53,7 @@ class Partitioning: #: directories. base_dir: Optional[str] = None #: The partition key field names (i.e. column names for tabular - #: datastreams). When non-empty, the order and length of partition key + #: datasets). When non-empty, the order and length of partition key #: field names must match the order and length of partition values. #: Required when parsing DIRECTORY partitioned paths or generating #: HIVE partitioned paths. @@ -112,7 +112,7 @@ class PathPartitionEncoder: """Callable that generates directory path strings for path-based partition formats. Path-based partition formats embed all partition keys and values directly in - their datastream file paths. + their dataset file paths. Two path partition formats are currently supported - HIVE and DIRECTORY. @@ -140,7 +140,7 @@ def of( base_dir: "/"-delimited base directory that all partition paths will be generated under (exclusive). field_names: The partition key field names (i.e. column names for tabular - datastreams). Required for HIVE partition paths, optional for DIRECTORY + datasets). Required for HIVE partition paths, optional for DIRECTORY partition paths. When non-empty, the order and length of partition key field names must match the order and length of partition values. filesystem: Filesystem that will be used for partition path file I/O. @@ -229,7 +229,7 @@ class PathPartitionParser: """Partition parser for path-based partition formats. Path-based partition formats embed all partition keys and values directly in - their datastream file paths. + their dataset file paths. Two path partition formats are currently supported - HIVE and DIRECTORY. @@ -274,7 +274,7 @@ def of( Optional for HIVE partitioning. When non-empty, the order and length of partition key field names must match the order and length of partition directories discovered. Partition key field names are not required to - exist in the datastream schema. + exist in the dataset schema. filesystem: Filesystem that will be used for partition path file I/O. Returns: @@ -452,7 +452,7 @@ def do_assert(val, msg): Optional for HIVE partitioning. When non-empty, the order and length of partition key field names must match the order and length of partition directories discovered. Partition key field names are not required to - exist in the datastream schema. + exist in the dataset schema. filesystem: Filesystem that will be used for partition path file I/O. Returns: diff --git a/python/ray/data/datasource/webdataset_datasource.py b/python/ray/data/datasource/webdataset_datasource.py index 17ac6aaa240f..6020cda47717 100644 --- a/python/ray/data/datasource/webdataset_datasource.py +++ b/python/ray/data/datasource/webdataset_datasource.py @@ -295,7 +295,7 @@ def _make_iterable(block: BlockAccessor): This is a placeholder for dealing with more complex blocks. Args: - block: Ray Datastream block + block: Ray Dataset block Returns: Iterable[Dict[str,Any]]: Iterable of samples @@ -305,7 +305,7 @@ def _make_iterable(block: BlockAccessor): @PublicAPI(stability="alpha") class WebDatasetDatasource(FileBasedDatasource): - """A Datasource for WebDataset datastreams (tar format with naming conventions).""" + """A Datasource for WebDataset datasets (tar format with naming conventions).""" _FILE_EXTENSION = "tar" diff --git a/python/ray/data/datastream.py b/python/ray/data/datastream.py deleted file mode 100644 index f44315d0b26c..000000000000 --- a/python/ray/data/datastream.py +++ /dev/null @@ -1,4518 +0,0 @@ -import collections -import itertools -import logging -import sys -import time -import html -from typing import ( - TYPE_CHECKING, - Any, - Callable, - Dict, - Generic, - Iterable, - Iterator, - List, - Type, - Optional, - Tuple, - Union, - Mapping, -) -from uuid import uuid4 - -import numpy as np - -import ray -from ray.air.util.tensor_extensions.utils import _create_possibly_ragged_ndarray -import ray.cloudpickle as pickle -from ray._private.usage import usage_lib -from ray.air.constants import TENSOR_COLUMN_NAME -from ray.air.util.data_batch_conversion import BlockFormat -from ray.data._internal.logical.operators.all_to_all_operator import ( - RandomShuffle, - RandomizeBlocks, - Repartition, - Sort, -) -from ray.data._internal.logical.operators.n_ary_operator import Zip -from ray.data._internal.logical.optimizers import LogicalPlan -from ray.data._internal.logical.operators.limit_operator import Limit -from ray.data._internal.logical.operators.map_operator import ( - Filter, - FlatMap, - MapRows, - MapBatches, -) -from ray.data._internal.logical.operators.write_operator import Write -from ray.data._internal.planner.filter import generate_filter_fn -from ray.data._internal.planner.flat_map import generate_flat_map_fn -from ray.data._internal.planner.map_batches import generate_map_batches_fn -from ray.data._internal.planner.map_rows import generate_map_rows_fn -from ray.data._internal.planner.write import generate_write_fn -from ray.data.iterator import DataIterator -from ray.data._internal.block_list import BlockList -from ray.data._internal.iterator.iterator_impl import ( - DataIteratorImpl, -) -from ray.data._internal.iterator.stream_split_iterator import ( - StreamSplitDataIterator, -) -from ray.data._internal.compute import ( - ActorPoolStrategy, - CallableClass, - ComputeStrategy, - TaskPoolStrategy, -) -from ray.data._internal.delegating_block_builder import DelegatingBlockBuilder -from ray.data._internal.equalize import _equalize -from ray.data._internal.lazy_block_list import LazyBlockList -from ray.data._internal.util import ( - _estimate_available_parallelism, - _is_local_scheme, - ConsumptionAPI, -) -from ray.data._internal.pandas_block import PandasBlockSchema -from ray.data._internal.plan import ( - ExecutionPlan, - OneToOneStage, -) -from ray.data._internal.stage_impl import ( - RandomizeBlocksStage, - RepartitionStage, - RandomShuffleStage, - ZipStage, - SortStage, - LimitStage, -) -from ray.data._internal.progress_bar import ProgressBar -from ray.data._internal.remote_fn import cached_remote_fn -from ray.data._internal.split import _split_at_indices, _get_num_rows -from ray.data._internal.stats import DatastreamStats, DatastreamStatsSummary -from ray.data.aggregate import AggregateFn, Max, Mean, Min, Std, Sum -from ray.data.block import ( - VALID_BATCH_FORMATS, - STRICT_MODE_EXPLANATION, - _apply_strict_mode_batch_format, - _apply_strict_mode_batch_size, - UserDefinedFunction, - Block, - BlockAccessor, - BlockMetadata, - BlockPartition, - DataBatch, - StrictModeError, - T, - U, - _validate_key_fn, -) -from ray.data.context import ( - DataContext, - WARN_PREFIX, - OK_PREFIX, - ESTIMATED_SAFE_MEMORY_FRACTION, -) -from ray.data.datasource import ( - BlockWritePathProvider, - CSVDatasource, - Datasource, - DefaultBlockWritePathProvider, - JSONDatasource, - NumpyDatasource, - ParquetDatasource, - ReadTask, - TFRecordDatasource, - WriteResult, -) -from ray.data.datasource.file_based_datasource import ( - _unwrap_arrow_serialization_workaround, - _wrap_arrow_serialization_workaround, -) -from ray.data.random_access_dataset import RandomAccessDataset -from ray.types import ObjectRef -from ray.util.annotations import DeveloperAPI, PublicAPI, Deprecated -from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy -from ray.widgets import Template -from ray.widgets.util import ensure_notebook_deps, fallback_if_colab - -if sys.version_info >= (3, 8): - from typing import Literal -else: - from typing_extensions import Literal - -if TYPE_CHECKING: - import dask - import mars - import modin - import pandas - import pyarrow - import pyspark - import tensorflow as tf - import torch - import torch.utils.data - - from ray.data.dataset_pipeline import DatasetPipeline - from ray.data.grouped_data import GroupedData - from ray.data._internal.execution.interfaces import Executor, NodeIdStr - from ray.data._internal.torch_iterable_dataset import TorchTensorBatchType - from tensorflow_metadata.proto.v0 import schema_pb2 - - -logger = logging.getLogger(__name__) - -TensorflowFeatureTypeSpec = Union[ - "tf.TypeSpec", List["tf.TypeSpec"], Dict[str, "tf.TypeSpec"] -] - -TensorFlowTensorBatchType = Union["tf.Tensor", Dict[str, "tf.Tensor"]] - - -@PublicAPI -class Datastream: - """A Datastream is a distributed data collection for data loading and processing. - - Datastreams are distributed pipelines that produce ``ObjectRef[Block]`` outputs, - where each block holds data in Arrow format, representing a shard of the overall - data collection. The block also determines the unit of parallelism. - - Datastreams can be created in multiple ways: from synthetic data via ``range_*()`` - APIs, from existing memory data via ``from_*()`` APIs (this creates a subclass - of Datastream called ``MaterializedDatastream``), or from external storage - systems such as local disk, S3, HDFS etc. via the ``read_*()`` APIs. The - (potentially processed) Datastream can be saved back to external storage systems - via the ``write_*()`` APIs. - - Examples: - >>> import ray - >>> # Create datastream from synthetic data. - >>> ds = ray.data.range(1000) - >>> # Create datastream from in-memory data. - >>> ds = ray.data.from_items( - ... [{"col1": i, "col2": i * 2} for i in range(1000)]) - >>> # Create datastream from external storage system. - >>> ds = ray.data.read_parquet("s3://bucket/path") # doctest: +SKIP - >>> # Save datastream back to external storage system. - >>> ds.write_csv("s3://bucket/output") # doctest: +SKIP - - Datastream has two kinds of operations: transformation, which takes in Datastream - and outputs a new Datastream (e.g. :py:meth:`.map_batches()`); and consumption, - which produces values (not Datatream) as output (e.g. :py:meth:`.iter_batches()`). - - Datastream transformations are lazy, with execution of the transformations being - triggered by downstream consumption. - - Datastream supports parallel processing at scale: transformations such as - :py:meth:`.map_batches()`, aggregations such as - :py:meth:`.min()`/:py:meth:`.max()`/:py:meth:`.mean()`, grouping via - :py:meth:`.groupby()`, shuffling operations such as :py:meth:`.sort()`, - :py:meth:`.random_shuffle()`, and :py:meth:`.repartition()`. - - Examples: - >>> import ray - >>> ds = ray.data.range(1000) - >>> # Transform batches (Dict[str, np.ndarray]) with map_batches(). - >>> ds.map_batches(lambda batch: {"id": batch["id"] * 2}) - MapBatches() - +- Datastream(num_blocks=17, num_rows=1000, schema={id: int64}) - >>> # Compute the maximum. - >>> ds.max("id") - 999 - >>> # Shuffle this datastream randomly. - >>> ds.random_shuffle() - RandomShuffle - +- Datastream(num_blocks=..., num_rows=1000, schema={id: int64}) - >>> # Sort it back in order. - >>> ds.sort("id") - Sort - +- Datastream(num_blocks=..., num_rows=1000, schema={id: int64}) - - Both unexecuted and materialized Datastreams can be passed between Ray tasks and - actors without incurring a copy. Datastream supports conversion to/from several - more featureful dataframe libraries (e.g., Spark, Dask, Modin, MARS), and are also - compatible with distributed TensorFlow / PyTorch. - """ - - def __init__( - self, - plan: ExecutionPlan, - epoch: int, - lazy: bool = True, - logical_plan: Optional[LogicalPlan] = None, - ): - """Construct a Datastream (internal API). - - The constructor is not part of the Datastream API. Use the ``ray.data.*`` - read methods to construct a datastream. - """ - assert isinstance(plan, ExecutionPlan) - usage_lib.record_library_usage("dataset") # Legacy telemetry name. - - if ray.util.log_once("strict_mode_explanation"): - logger.warning(STRICT_MODE_EXPLANATION) - - self._plan = plan - self._uuid = uuid4().hex - self._epoch = epoch - self._lazy = lazy - self._logical_plan = logical_plan - if logical_plan is not None: - self._plan.link_logical_plan(logical_plan) - - if not lazy: - self._plan.execute(allow_clear_input_blocks=False) - - # Handle to currently running executor for this datastream. - self._current_executor: Optional["Executor"] = None - - @staticmethod - def copy( - ds: "Datastream", _deep_copy: bool = False, _as: Optional[type] = None - ) -> "Datastream": - if not _as: - _as = Datastream - if _deep_copy: - return _as(ds._plan.deep_copy(), ds._epoch, ds._lazy, ds._logical_plan) - else: - return _as(ds._plan.copy(), ds._epoch, ds._lazy, ds._logical_plan) - - def map( - self, - fn: UserDefinedFunction[Dict[str, Any], Dict[str, Any]], - *, - compute: Optional[ComputeStrategy] = None, - **ray_remote_args, - ) -> "Datastream": - """Apply the given function to each record of this datastream. - - Note that mapping individual records can be quite slow. Consider using - `.map_batches()` for performance. - - Examples: - >>> import ray - >>> # Transform python objects. - >>> ds = ray.data.range(1000) - >>> # The function goes from record (Dict[str, Any]) to record. - >>> ds.map(lambda record: {"id": record["id"] * 2}) - Map - +- Datastream(num_blocks=..., num_rows=1000, schema={id: int64}) - >>> # Transform Arrow records. - >>> ds = ray.data.from_items( - ... [{"value": i} for i in range(1000)]) - >>> ds.map(lambda record: {"v2": record["value"] * 2}) - Map - +- Datastream(num_blocks=200, num_rows=1000, schema={value: int64}) - >>> # Define a callable class that persists state across - >>> # function invocations for efficiency. - >>> init_model = ... # doctest: +SKIP - >>> class CachedModel: - ... def __init__(self): - ... self.model = init_model() - ... def __call__(self, batch): - ... return self.model(batch) - >>> # Apply the transform in parallel on GPUs. Since - >>> # compute=ActorPoolStrategy(size=8) the transform will be applied on a - >>> # pool of 8 Ray actors, each allocated 1 GPU by Ray. - >>> ds.map(CachedModel, # doctest: +SKIP - ... compute=ray.data.ActorPoolStrategy(size=8), - ... num_gpus=1) - - Time complexity: O(datastream size / parallelism) - - Args: - fn: The function to apply to each record, or a class type - that can be instantiated to create such a callable. Callable classes are - only supported for the actor compute strategy. - compute: The compute strategy, either None (default) to use Ray - tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor - pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an - autoscaling actor pool. - ray_remote_args: Additional resource requirements to request from - ray (e.g., num_gpus=1 to request GPUs for the map tasks). - - .. seealso:: - - :meth:`~Datastream.flat_map`: - Call this method to create new records from existing ones. Unlike - :meth:`~Datastream.map`, a function passed to - :meth:`~Datastream.flat_map` can return multiple records. - - :meth:`~Datastream.flat_map` isn't recommended because it's slow; call - :meth:`~Datastream.map_batches` instead. - - :meth:`~Datastream.map_batches` - Call this method to transform batches of data. It's faster and more - flexible than :meth:`~Datastream.map` and :meth:`~Datastream.flat_map`. - """ - if isinstance(fn, CallableClass) and ( - compute is None - or compute == "tasks" - or isinstance(compute, TaskPoolStrategy) - ): - raise ValueError( - "``compute`` must be specified when using a CallableClass, and must " - f"specify the actor compute strategy, but got: {compute}. " - "For example, use ``compute=ActorPoolStrategy(size=n)``." - ) - - self._warn_slow() - - transform_fn = generate_map_rows_fn() - - plan = self._plan.with_stage( - OneToOneStage( - "Map", - transform_fn, - compute, - ray_remote_args, - fn=fn, - ) - ) - - logical_plan = self._logical_plan - if logical_plan is not None: - map_op = MapRows( - logical_plan.dag, - fn, - compute=compute, - ray_remote_args=ray_remote_args, - ) - logical_plan = LogicalPlan(map_op) - return Datastream(plan, self._epoch, self._lazy, logical_plan) - - def map_batches( - self, - fn: UserDefinedFunction[DataBatch, DataBatch], - *, - batch_size: Union[int, None, Literal["default"]] = "default", - compute: Optional[ComputeStrategy] = None, - batch_format: Optional[str] = "default", - zero_copy_batch: bool = False, - fn_args: Optional[Iterable[Any]] = None, - fn_kwargs: Optional[Dict[str, Any]] = None, - fn_constructor_args: Optional[Iterable[Any]] = None, - fn_constructor_kwargs: Optional[Dict[str, Any]] = None, - **ray_remote_args, - ) -> "Datastream": - """Apply the given function to batches of data. - - This applies the ``fn`` in parallel with map tasks, with each task handling - a batch of data (typically Dict[str, np.ndarray] or pd.DataFrame). - - To learn more about writing functions for :meth:`~Datastream.map_batches`, read - :ref:`writing user-defined functions `. - - .. tip:: - If ``fn`` does not mutate its input, set ``zero_copy_batch=True`` to elide a - batch copy, which can improve performance and decrease memory utilization. - ``fn`` will then receive zero-copy read-only batches. - If ``fn`` mutates its input, you will need to ensure that the batch provided - to ``fn`` is writable by setting ``zero_copy_batch=False`` (default). This - will create an extra, mutable copy of each batch before handing it to - ``fn``. - - .. note:: - The size of the batches provided to ``fn`` may be smaller than the provided - ``batch_size`` if ``batch_size`` doesn't evenly divide the block(s) sent to - a given map task. When ``batch_size`` is specified, each map task will be - sent a single block if the block is equal to or larger than ``batch_size``, - and will be sent a bundle of blocks up to (but not exceeding) - ``batch_size`` if blocks are smaller than ``batch_size``. - - Examples: - - >>> import numpy as np - >>> import ray - >>> ds = ray.data.from_items([ - ... {"name": "Luna", "age": 4}, - ... {"name": "Rory", "age": 14}, - ... {"name": "Scout", "age": 9}, - ... ]) - >>> ds # doctest: +SKIP - MaterializedDatastream( - num_blocks=3, - num_rows=3, - schema={name: string, age: int64} - ) - - Here ``fn`` returns the same batch type as the input, but your ``fn`` can - also return a different batch type (e.g., pd.DataFrame). Read more about - :ref:`Transforming Data `. - - >>> from typing import Dict - >>> def map_fn(batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: - ... batch["age_in_dog_years"] = 7 * batch["age"] - ... return batch - >>> ds = ds.map_batches(map_fn) - >>> ds - MapBatches(map_fn) - +- Datastream(num_blocks=3, num_rows=3, schema={name: string, age: int64}) - - :ref:`Actors ` can improve the performance of some workloads. - For example, you can use :ref:`actors ` to load a model once - per worker instead of once per inference. - - To transform batches with :ref:`actors `, pass a callable type - to ``fn`` and specify an :class:`~ray.data.ActorPoolStrategy>`. - - In the example below, ``CachedModel`` is called on an autoscaling pool of - two to eight :ref:`actors `, each allocated one GPU by Ray. - - >>> init_large_model = ... # doctest: +SKIP - >>> class CachedModel: - ... def __init__(self): - ... self.model = init_large_model() - ... def __call__(self, item): - ... return self.model(item) - >>> ds.map_batches( # doctest: +SKIP - ... CachedModel, # doctest: +SKIP - ... batch_size=256, # doctest: +SKIP - ... compute=ray.data.ActorPoolStrategy(size=8), # doctest: +SKIP - ... num_gpus=1, - ... ) # doctest: +SKIP - - ``fn`` can also be a generator, yielding multiple batches in a single - invocation. This is useful when returning large objects. Instead of - returning a very large output batch, ``fn`` can instead yield the - output batch in chunks. - - >>> def map_fn_with_large_output(batch): - ... for i in range(3): - ... yield {"large_output": np.ones((100, 1000))} - >>> ds = ray.data.from_items([1]) - >>> ds = ds.map_batches(map_fn_with_large_output) - >>> ds - MapBatches(map_fn_with_large_output) - +- Datastream(num_blocks=1, num_rows=1, schema={item: int64}) - - - Args: - fn: The function or generator to apply to each record batch, or a class type - that can be instantiated to create such a callable. Callable classes are - only supported for the actor compute strategy. Note ``fn`` must be - pickle-able. - batch_size: The desired number of rows in each batch, or None to use entire - blocks as batches (blocks may contain different number of rows). - The actual size of the batch provided to ``fn`` may be smaller than - ``batch_size`` if ``batch_size`` doesn't evenly divide the block(s) sent - to a given map task. Default batch_size is 4096 with "default". - compute: The compute strategy, either "tasks" (default) to use Ray - tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor - pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an - autoscaling actor pool. - batch_format: Specify ``"default"`` to use the default block format - (NumPy), ``"pandas"`` to select ``pandas.DataFrame``, "pyarrow" to - select ``pyarrow.Table``, or ``"numpy"`` to select - ``Dict[str, numpy.ndarray]``, or None to return the underlying block - exactly as is with no additional formatting. - zero_copy_batch: Whether ``fn`` should be provided zero-copy, read-only - batches. If this is ``True`` and no copy is required for the - ``batch_format`` conversion, the batch will be a zero-copy, read-only - view on data in Ray's object store, which can decrease memory - utilization and improve performance. If this is ``False``, the batch - will be writable, which will require an extra copy to guarantee. - If ``fn`` mutates its input, this will need to be ``False`` in order to - avoid "assignment destination is read-only" or "buffer source array is - read-only" errors. Default is ``False``. - fn_args: Positional arguments to pass to ``fn`` after the first argument. - These arguments are top-level arguments to the underlying Ray task. - fn_kwargs: Keyword arguments to pass to ``fn``. These arguments are - top-level arguments to the underlying Ray task. - fn_constructor_args: Positional arguments to pass to ``fn``'s constructor. - You can only provide this if ``fn`` is a callable class. These arguments - are top-level arguments in the underlying Ray actor construction task. - fn_constructor_kwargs: Keyword arguments to pass to ``fn``'s constructor. - This can only be provided if ``fn`` is a callable class. These arguments - are top-level arguments in the underlying Ray actor construction task. - ray_remote_args: Additional resource requirements to request from - ray (e.g., ``num_gpus=1`` to request GPUs for the map tasks). - - .. seealso:: - - :meth:`~Datastream.iter_batches` - Call this function to iterate over batches of data. - - :meth:`~Datastream.flat_map`: - Call this method to create new records from existing ones. Unlike - :meth:`~Datastream.map`, a function passed to :meth:`~Datastream.flat_map` - can return multiple records. - - :meth:`~Datastream.flat_map` isn't recommended because it's slow; call - :meth:`~Datastream.map_batches` instead. - - :meth:`~Datastream.map` - Call this method to transform one record at time. - - This method isn't recommended because it's slow; call - :meth:`~Datastream.map_batches` instead. - """ # noqa: E501 - - batch_format = _apply_strict_mode_batch_format(batch_format) - if batch_format == "native": - logger.warning("The 'native' batch format has been renamed 'default'.") - - target_block_size = None - if batch_size is not None and batch_size != "default": - if batch_size < 1: - raise ValueError("Batch size cannot be negative or 0") - # Enable blocks bundling when batch_size is specified by caller. - target_block_size = batch_size - - batch_size = _apply_strict_mode_batch_size( - batch_size, use_gpu="num_gpus" in ray_remote_args - ) - - if batch_format not in VALID_BATCH_FORMATS: - raise ValueError( - f"The batch format must be one of {VALID_BATCH_FORMATS}, got: " - f"{batch_format}" - ) - - if isinstance(fn, CallableClass) and ( - compute is None - or compute == "tasks" - or isinstance(compute, TaskPoolStrategy) - ): - raise ValueError( - "``compute`` must be specified when using a CallableClass, and must " - f"specify the actor compute strategy, but got: {compute}. " - "For example, use ``compute=ActorPoolStrategy(size=n)``." - ) - - if fn_constructor_args is not None or fn_constructor_kwargs is not None: - if compute is None or ( - compute != "actors" and not isinstance(compute, ActorPoolStrategy) - ): - raise ValueError( - "fn_constructor_args and fn_constructor_kwargs can only be " - "specified if using the actor pool compute strategy, but got: " - f"{compute}" - ) - if not isinstance(fn, CallableClass): - raise ValueError( - "fn_constructor_args and fn_constructor_kwargs can only be " - "specified if providing a CallableClass instance for fn, but got: " - f"{fn}" - ) - - transform_fn = generate_map_batches_fn( - batch_size=batch_size, - batch_format=batch_format, - zero_copy_batch=zero_copy_batch, - ) - - # TODO(chengsu): pass function name to MapBatches logical operator. - if hasattr(fn, "__self__") and isinstance( - fn.__self__, ray.data.preprocessor.Preprocessor - ): - stage_name = fn.__self__.__class__.__name__ - else: - stage_name = f'MapBatches({getattr(fn, "__name__", type(fn))})' - - stage = OneToOneStage( - stage_name, - transform_fn, - compute, - ray_remote_args, - # TODO(Clark): Add a strict cap here. - target_block_size=target_block_size, - fn=fn, - fn_args=fn_args, - fn_kwargs=fn_kwargs, - fn_constructor_args=fn_constructor_args, - fn_constructor_kwargs=fn_constructor_kwargs, - ) - plan = self._plan.with_stage(stage) - - logical_plan = self._logical_plan - if logical_plan is not None: - map_batches_op = MapBatches( - logical_plan.dag, - fn, - batch_size=batch_size, - batch_format=batch_format, - zero_copy_batch=zero_copy_batch, - target_block_size=target_block_size, - fn_args=fn_args, - fn_kwargs=fn_kwargs, - fn_constructor_args=fn_constructor_args, - fn_constructor_kwargs=fn_constructor_kwargs, - compute=compute, - ray_remote_args=ray_remote_args, - ) - logical_plan = LogicalPlan(map_batches_op) - - return Datastream(plan, self._epoch, self._lazy, logical_plan) - - def add_column( - self, - col: str, - fn: Callable[["pandas.DataFrame"], "pandas.Series"], - *, - compute: Optional[str] = None, - **ray_remote_args, - ) -> "Datastream": - """Add the given column to the datastream. - - This is only supported for datastreams convertible to pandas format. - A function generating the new column values given the batch in pandas - format must be specified. - - Examples: - >>> import ray - >>> ds = ray.data.range(100) - >>> # Add a new column equal to value * 2. - >>> ds = ds.add_column("new_col", lambda df: df["id"] * 2) - >>> # Overwrite the existing "value" with zeros. - >>> ds = ds.add_column("id", lambda df: 0) - - Time complexity: O(datastream size / parallelism) - - Args: - col: Name of the column to add. If the name already exists, the - column will be overwritten. - fn: Map function generating the column values given a batch of - records in pandas format. - compute: The compute strategy, either "tasks" (default) to use Ray - tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor - pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an - autoscaling actor pool. - ray_remote_args: Additional resource requirements to request from - ray (e.g., num_gpus=1 to request GPUs for the map tasks). - """ - - def process_batch(batch: "pandas.DataFrame") -> "pandas.DataFrame": - batch.loc[:, col] = fn(batch) - return batch - - if not callable(fn): - raise ValueError("`fn` must be callable, got {}".format(fn)) - - return self.map_batches( - process_batch, - batch_format="pandas", # TODO(ekl) we should make this configurable. - compute=compute, - zero_copy_batch=False, - **ray_remote_args, - ) - - def drop_columns( - self, - cols: List[str], - *, - compute: Optional[str] = None, - **ray_remote_args, - ) -> "Datastream": - """Drop one or more columns from the datastream. - - Examples: - >>> import ray - >>> ds = ray.data.range(100) - >>> # Add a new column equal to value * 2. - >>> ds = ds.add_column("new_col", lambda df: df["id"] * 2) - >>> # Drop the existing "value" column. - >>> ds = ds.drop_columns(["id"]) - - - Time complexity: O(datastream size / parallelism) - - Args: - cols: Names of the columns to drop. If any name does not exist, - an exception will be raised. - compute: The compute strategy, either "tasks" (default) to use Ray - tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor - pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an - autoscaling actor pool. - ray_remote_args: Additional resource requirements to request from - ray (e.g., num_gpus=1 to request GPUs for the map tasks). - """ - - return self.map_batches( - lambda batch: batch.drop(columns=cols), - batch_format="pandas", - zero_copy_batch=True, - compute=compute, - **ray_remote_args, - ) - - def select_columns( - self, - cols: List[str], - *, - compute: Union[str, ComputeStrategy] = None, - **ray_remote_args, - ) -> "Datastream": - """Select one or more columns from the datastream. - - All input columns used to select need to be in the schema of the datastream. - - Examples: - >>> import ray - >>> # Create a datastream with 3 columns - >>> ds = ray.data.from_items([{"col1": i, "col2": i+1, "col3": i+2} - ... for i in range(10)]) - >>> # Select only "col1" and "col2" columns. - >>> ds = ds.select_columns(cols=["col1", "col2"]) - >>> ds - MapBatches() - +- Datastream( - num_blocks=10, - num_rows=10, - schema={col1: int64, col2: int64, col3: int64} - ) - - - Time complexity: O(datastream size / parallelism) - - Args: - cols: Names of the columns to select. If any name is not included in the - datastream schema, an exception will be raised. - compute: The compute strategy, either "tasks" (default) to use Ray - tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor - pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an - autoscaling actor pool. - ray_remote_args: Additional resource requirements to request from - ray (e.g., num_gpus=1 to request GPUs for the map tasks). - """ # noqa: E501 - return self.map_batches( - lambda batch: BlockAccessor.for_block(batch).select(columns=cols), - batch_format="pandas", - zero_copy_batch=True, - compute=compute, - **ray_remote_args, - ) - - def flat_map( - self, - fn: UserDefinedFunction[Dict[str, Any], List[Dict[str, Any]]], - *, - compute: Optional[ComputeStrategy] = None, - **ray_remote_args, - ) -> "Datastream": - """Apply the given function to each record and then flatten results. - - Consider using ``.map_batches()`` for better performance (the batch size can be - altered in map_batches). - - Examples: - >>> import ray - >>> ds = ray.data.range(1000) - >>> ds.flat_map(lambda x: [{"id": 1}, {"id": 2}, {"id": 4}]) - FlatMap - +- Datastream(num_blocks=..., num_rows=1000, schema={id: int64}) - - Time complexity: O(datastream size / parallelism) - - Args: - fn: The function or generator to apply to each record, or a class type - that can be instantiated to create such a callable. Callable classes are - only supported for the actor compute strategy. - compute: The compute strategy, either "tasks" (default) to use Ray - tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor - pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an - autoscaling actor pool. - ray_remote_args: Additional resource requirements to request from - ray (e.g., num_gpus=1 to request GPUs for the map tasks). - - .. seealso:: - - :meth:`~Datastream.map_batches` - Call this method to transform batches of data. It's faster and more - flexible than :meth:`~Datastream.map` and :meth:`~Datastream.flat_map`. - - :meth:`~Datastream.map` - Call this method to transform one record at time. - - This method isn't recommended because it's slow; call - :meth:`~Datastream.map_batches` instead. - """ - if isinstance(fn, CallableClass) and ( - compute is None - or compute == "tasks" - or isinstance(compute, TaskPoolStrategy) - ): - raise ValueError( - "``compute`` must be specified when using a CallableClass, and must " - f"specify the actor compute strategy, but got: {compute}. " - "For example, use ``compute=ActorPoolStrategy(size=n)``." - ) - - self._warn_slow() - - transform_fn = generate_flat_map_fn() - - plan = self._plan.with_stage( - OneToOneStage("FlatMap", transform_fn, compute, ray_remote_args, fn=fn) - ) - - logical_plan = self._logical_plan - if logical_plan is not None: - op = FlatMap( - input_op=logical_plan.dag, - fn=fn, - compute=compute, - ray_remote_args=ray_remote_args, - ) - logical_plan = LogicalPlan(op) - return Datastream(plan, self._epoch, self._lazy, logical_plan) - - def filter( - self, - fn: UserDefinedFunction[Dict[str, Any], bool], - *, - compute: Union[str, ComputeStrategy] = None, - **ray_remote_args, - ) -> "Datastream": - """Filter out records that do not satisfy the given predicate. - - Consider using ``.map_batches()`` for better performance (you can implement - filter by dropping records). - - Examples: - >>> import ray - >>> ds = ray.data.range(100) - >>> ds.filter(lambda x: x["id"] % 2 == 0) - Filter - +- Datastream(num_blocks=..., num_rows=100, schema={id: int64}) - - Time complexity: O(datastream size / parallelism) - - Args: - fn: The predicate to apply to each record, or a class type - that can be instantiated to create such a callable. Callable classes are - only supported for the actor compute strategy. - compute: The compute strategy, either "tasks" (default) to use Ray - tasks, ``ray.data.ActorPoolStrategy(size=n)`` to use a fixed-size actor - pool, or ``ray.data.ActorPoolStrategy(min_size=m, max_size=n)`` for an - autoscaling actor pool. - ray_remote_args: Additional resource requirements to request from - ray (e.g., num_gpus=1 to request GPUs for the map tasks). - """ - if isinstance(fn, CallableClass) and ( - compute is None - or compute == "tasks" - or isinstance(compute, TaskPoolStrategy) - ): - raise ValueError( - "``compute`` must be specified when using a CallableClass, and must " - f"specify the actor compute strategy, but got: {compute}. " - "For example, use ``compute=ActorPoolStrategy(size=n)``." - ) - - self._warn_slow() - - transform_fn = generate_filter_fn() - - plan = self._plan.with_stage( - OneToOneStage("Filter", transform_fn, compute, ray_remote_args, fn=fn) - ) - - logical_plan = self._logical_plan - if logical_plan is not None: - op = Filter( - input_op=logical_plan.dag, - fn=fn, - compute=compute, - ray_remote_args=ray_remote_args, - ) - logical_plan = LogicalPlan(op) - - return Datastream(plan, self._epoch, self._lazy, logical_plan) - - def repartition(self, num_blocks: int, *, shuffle: bool = False) -> "Datastream": - """Repartition the datastream into exactly this number of blocks. - - After repartitioning, all blocks in the returned datastream will have - approximately the same number of rows. - - Examples: - >>> import ray - >>> ds = ray.data.range(100) - >>> # Set the number of output partitions to write to disk. - >>> ds.repartition(10).write_parquet("/tmp/test") - - Time complexity: O(datastream size / parallelism) - - Args: - num_blocks: The number of blocks. - shuffle: Whether to perform a distributed shuffle during the - repartition. When shuffle is enabled, each output block - contains a subset of data rows from each input block, which - requires all-to-all data movement. When shuffle is disabled, - output blocks are created from adjacent input blocks, - minimizing data movement. - - Returns: - The repartitioned datastream. - """ - - plan = self._plan.with_stage(RepartitionStage(num_blocks, shuffle)) - - logical_plan = self._logical_plan - if logical_plan is not None: - op = Repartition( - logical_plan.dag, - num_outputs=num_blocks, - shuffle=shuffle, - ) - logical_plan = LogicalPlan(op) - return Datastream(plan, self._epoch, self._lazy, logical_plan) - - def random_shuffle( - self, - *, - seed: Optional[int] = None, - num_blocks: Optional[int] = None, - **ray_remote_args, - ) -> "Datastream": - """Randomly shuffle the elements of this datastream. - - Examples: - >>> import ray - >>> ds = ray.data.range(100) - >>> # Shuffle this datastream randomly. - >>> ds.random_shuffle() - RandomShuffle - +- Datastream(num_blocks=..., num_rows=100, schema={id: int64}) - >>> # Shuffle this datastream with a fixed random seed. - >>> ds.random_shuffle(seed=12345) - RandomShuffle - +- Datastream(num_blocks=..., num_rows=100, schema={id: int64}) - - Time complexity: O(datastream size / parallelism) - - Args: - seed: Fix the random seed to use, otherwise one will be chosen - based on system randomness. - num_blocks: The number of output blocks after the shuffle, or None - to retain the number of blocks. - - Returns: - The shuffled datastream. - """ - - plan = self._plan.with_stage( - RandomShuffleStage(seed, num_blocks, ray_remote_args) - ) - - logical_plan = self._logical_plan - if logical_plan is not None: - op = RandomShuffle( - logical_plan.dag, - seed=seed, - num_outputs=num_blocks, - ray_remote_args=ray_remote_args, - ) - logical_plan = LogicalPlan(op) - return Datastream(plan, self._epoch, self._lazy, logical_plan) - - def randomize_block_order( - self, - *, - seed: Optional[int] = None, - ) -> "Datastream": - """Randomly shuffle the blocks of this datastream. - - Examples: - >>> import ray - >>> ds = ray.data.range(100) # doctest: +SKIP - >>> # Randomize the block order. - >>> ds.randomize_block_order() # doctest: +SKIP - >>> # Randomize the block order with a fixed random seed. - >>> ds.randomize_block_order(seed=12345) # doctest: +SKIP - - Args: - seed: Fix the random seed to use, otherwise one will be chosen - based on system randomness. - - Returns: - The block-shuffled datastream. - """ - - plan = self._plan.with_stage(RandomizeBlocksStage(seed)) - - logical_plan = self._logical_plan - if logical_plan is not None: - op = RandomizeBlocks( - logical_plan.dag, - seed=seed, - ) - logical_plan = LogicalPlan(op) - return Datastream(plan, self._epoch, self._lazy, logical_plan) - - def random_sample( - self, fraction: float, *, seed: Optional[int] = None - ) -> "Datastream": - """Randomly samples a fraction of the elements of this datastream. - - Note that the exact number of elements returned is not guaranteed, - and that the number of elements being returned is roughly fraction * total_rows. - - Examples: - >>> import ray - >>> ds = ray.data.range(100) # doctest: +SKIP - >>> ds.random_sample(0.1) # doctest: +SKIP - >>> ds.random_sample(0.2, seed=12345) # doctest: +SKIP - - Args: - fraction: The fraction of elements to sample. - seed: Seeds the python random pRNG generator. - - Returns: - Returns a Datastream containing the sampled elements. - """ - import random - - import pandas as pd - import pyarrow as pa - - if self.num_blocks() == 0: - raise ValueError("Cannot sample from an empty Datastream.") - - if fraction < 0 or fraction > 1: - raise ValueError("Fraction must be between 0 and 1.") - - if seed is not None: - random.seed(seed) - - def process_batch(batch): - if isinstance(batch, list): - return [row for row in batch if random.random() <= fraction] - if isinstance(batch, pa.Table): - # Lets the item pass if weight generated for that item <= fraction - return batch.filter( - pa.array(random.random() <= fraction for _ in range(len(batch))) - ) - if isinstance(batch, pd.DataFrame): - return batch.sample(frac=fraction) - if isinstance(batch, np.ndarray): - return _create_possibly_ragged_ndarray( - [row for row in batch if random.random() <= fraction] - ) - raise ValueError(f"Unsupported batch type: {type(batch)}") - - return self.map_batches(process_batch, batch_format=None) - - @ConsumptionAPI - def streaming_split( - self, - n: int, - *, - equal: bool = False, - locality_hints: Optional[List["NodeIdStr"]] = None, - ) -> List[DataIterator]: - """Returns ``n`` :class:`DataIterators ` that can - be used to read disjoint subsets of the datastream in parallel. - - This method is the recommended way to consume Datastreams from multiple - processes (e.g., for distributed training), and requires streaming execution - mode. - - Streaming split works by delegating the execution of this Datastream to a - coordinator actor. The coordinator pulls block references from the executed - stream, and divides those blocks among `n` output iterators. Iterators pull - blocks from the coordinator actor to return to their caller on `next`. - - The returned iterators are also repeatable; each iteration will trigger a - new execution of the Datastream. There is an implicit barrier at the start of - each iteration, which means that `next` must be called on all iterators before - the iteration starts. - - Warning: because iterators are pulling blocks from the same Datastream - execution, if one iterator falls behind other iterators may be stalled. - - Examples: - >>> import ray - >>> ds = ray.data.range(1000000) - >>> it1, it2 = ds.streaming_split(2, equal=True) - - >>> # Can consume from both iterators in parallel. - >>> @ray.remote - ... def consume(it): - ... for batch in it.iter_batches(): - ... print(batch) - >>> ray.get([consume.remote(it1), consume.remote(it2)]) # doctest: +SKIP - - >>> # Can loop over the iterators multiple times (multiple epochs). - >>> @ray.remote - ... def train(it): - ... NUM_EPOCHS = 100 - ... for _ in range(NUM_EPOCHS): - ... for batch in it.iter_batches(): - ... print(batch) - >>> ray.get([train.remote(it1), train.remote(it2)]) # doctest: +SKIP - - >>> # ERROR: this will block waiting for a read on `it2` to start. - >>> ray.get(train.remote(it1)) # doctest: +SKIP - - Args: - n: Number of output iterators to return. - equal: If True, each output iterator will see an exactly equal number - of rows, dropping data if necessary. If False, some iterators may see - slightly more or less rows than other, but no data will be dropped. - locality_hints: Specify the node ids corresponding to each iterator - location. Datastream will try to minimize data movement based on the - iterator output locations. This list must have length ``n``. You can - get the current node id of a task or actor by calling - ``ray.get_runtime_context().get_node_id()``. - - Returns: - The output iterator splits. These iterators are Ray-serializable and can - be freely passed to any Ray task or actor. - """ - return StreamSplitDataIterator.create(self, n, equal, locality_hints) - - @ConsumptionAPI - def split( - self, n: int, *, equal: bool = False, locality_hints: Optional[List[Any]] = None - ) -> List["MaterializedDatastream"]: - """Materialize and split the datastream into ``n`` disjoint pieces. - - This returns a list of MaterializedDatastreams that can be passed to Ray tasks - and actors and used to read the datastream records in parallel. - - Examples: - >>> import ray - >>> ds = ray.data.range(100) # doctest: +SKIP - >>> workers = ... # doctest: +SKIP - >>> # Split up a datastream to process over `n` worker actors. - >>> shards = ds.split(len(workers), locality_hints=workers) # doctest: +SKIP - >>> for shard, worker in zip(shards, workers): # doctest: +SKIP - ... worker.consume.remote(shard) # doctest: +SKIP - - Time complexity: O(1) - - See also: ``Datastream.split_at_indices``, ``Datastream.split_proportionately``, - and ``Datastream.streaming_split``. - - Args: - n: Number of child datastreams to return. - equal: Whether to guarantee each split has an equal - number of records. This may drop records if they cannot be - divided equally among the splits. - locality_hints: [Experimental] A list of Ray actor handles of size ``n``. - The system will try to co-locate the blocks of the i-th datastream - with the i-th actor to maximize data locality. - - Returns: - A list of ``n`` disjoint datastream splits. - """ - if n <= 0: - raise ValueError(f"The number of splits {n} is not positive.") - - # fallback to split_at_indices for equal split without locality hints. - # simple benchmarks shows spilit_at_indices yields more stable performance. - # https://github.com/ray-project/ray/pull/26641 for more context. - if equal and locality_hints is None: - count = self.count() - split_index = count // n - # we are creating n split_indices which will generate - # n + 1 splits; the last split will at most contains (n - 1) - # rows, which could be safely dropped. - split_indices = [split_index * i for i in range(1, n + 1)] - shards = self.split_at_indices(split_indices) - return shards[:n] - - if locality_hints and len(locality_hints) != n: - raise ValueError( - f"The length of locality_hints {len(locality_hints)} " - f"doesn't equal the number of splits {n}." - ) - # TODO: this is unreachable code. - if len(set(locality_hints)) != len(locality_hints): - raise ValueError( - "locality_hints must not contain duplicate actor handles" - ) - - blocks = self._plan.execute() - owned_by_consumer = blocks._owned_by_consumer - stats = self._plan.stats() - block_refs, metadata = zip(*blocks.get_blocks_with_metadata()) - - if locality_hints is None: - blocks = np.array_split(block_refs, n) - meta = np.array_split(metadata, n) - return [ - MaterializedDatastream( - ExecutionPlan( - BlockList( - b.tolist(), m.tolist(), owned_by_consumer=owned_by_consumer - ), - stats, - run_by_consumer=owned_by_consumer, - ), - self._epoch, - self._lazy, - ) - for b, m in zip(blocks, meta) - ] - - metadata_mapping = {b: m for b, m in zip(block_refs, metadata)} - - # If the locality_hints is set, we use a two-round greedy algorithm - # to co-locate the blocks with the actors based on block - # and actor's location (node_id). - # - # The split algorithm tries to allocate equally-sized blocks regardless - # of locality. Thus we first calculate the expected number of blocks - # for each split. - # - # In the first round, for each actor, we look for all blocks that - # match the actor's node_id, then allocate those matched blocks to - # this actor until we reach the limit(expected number). - # - # In the second round: fill each actor's allocation with - # remaining unallocated blocks until we reach the limit. - - def build_allocation_size_map( - num_blocks: int, actors: List[Any] - ) -> Dict[Any, int]: - """Given the total number of blocks and a list of actors, calcuate - the expected number of blocks to allocate for each actor. - """ - num_actors = len(actors) - num_blocks_per_actor = num_blocks // num_actors - num_blocks_left = num_blocks - num_blocks_per_actor * n - num_blocks_by_actor = {} - for i, actor in enumerate(actors): - num_blocks_by_actor[actor] = num_blocks_per_actor - if i < num_blocks_left: - num_blocks_by_actor[actor] += 1 - return num_blocks_by_actor - - def build_block_refs_by_node_id( - blocks: List[ObjectRef[Block]], - ) -> Dict[str, List[ObjectRef[Block]]]: - """Build the reverse index from node_id to block_refs. For - simplicity, if the block is stored on multiple nodes we - only pick the first one. - """ - block_ref_locations = ray.experimental.get_object_locations(blocks) - block_refs_by_node_id = collections.defaultdict(list) - for block_ref in blocks: - node_ids = block_ref_locations.get(block_ref, {}).get("node_ids", []) - node_id = node_ids[0] if node_ids else None - block_refs_by_node_id[node_id].append(block_ref) - return block_refs_by_node_id - - def build_node_id_by_actor(actors: List[Any]) -> Dict[Any, str]: - """Build a map from a actor to its node_id.""" - actors_state = ray._private.state.actors() - return { - actor: actors_state.get(actor._actor_id.hex(), {}) - .get("Address", {}) - .get("NodeID") - for actor in actors - } - - # expected number of blocks to be allocated for each actor - expected_block_count_by_actor = build_allocation_size_map( - len(block_refs), locality_hints - ) - # the reverse index from node_id to block_refs - block_refs_by_node_id = build_block_refs_by_node_id(block_refs) - # the map from actor to its node_id - node_id_by_actor = build_node_id_by_actor(locality_hints) - - allocation_per_actor = collections.defaultdict(list) - - # In the first round, for each actor, we look for all blocks that - # match the actor's node_id, then allocate those matched blocks to - # this actor until we reach the limit(expected number) - for actor in locality_hints: - node_id = node_id_by_actor[actor] - matching_blocks = block_refs_by_node_id[node_id] - expected_block_count = expected_block_count_by_actor[actor] - allocation = [] - while matching_blocks and len(allocation) < expected_block_count: - allocation.append(matching_blocks.pop()) - allocation_per_actor[actor] = allocation - - # In the second round: fill each actor's allocation with - # remaining unallocated blocks until we reach the limit - remaining_block_refs = list( - itertools.chain.from_iterable(block_refs_by_node_id.values()) - ) - for actor in locality_hints: - while ( - len(allocation_per_actor[actor]) < expected_block_count_by_actor[actor] - ): - allocation_per_actor[actor].append(remaining_block_refs.pop()) - - assert len(remaining_block_refs) == 0, len(remaining_block_refs) - - per_split_block_lists = [ - BlockList( - allocation_per_actor[actor], - [metadata_mapping[b] for b in allocation_per_actor[actor]], - owned_by_consumer=owned_by_consumer, - ) - for actor in locality_hints - ] - - if equal: - # equalize the splits - per_split_block_lists = _equalize(per_split_block_lists, owned_by_consumer) - - return [ - MaterializedDatastream( - ExecutionPlan( - block_split, - stats, - run_by_consumer=owned_by_consumer, - ), - self._epoch, - self._lazy, - ) - for block_split in per_split_block_lists - ] - - @ConsumptionAPI - def split_at_indices(self, indices: List[int]) -> List["MaterializedDatastream"]: - """Materialize and split the datastream at the given indices (like np.split). - - Examples: - >>> import ray - >>> ds = ray.data.range(10) - >>> d1, d2, d3 = ds.split_at_indices([2, 5]) - >>> d1.take_batch() - {'id': array([0, 1])} - >>> d2.take_batch() - {'id': array([2, 3, 4])} - >>> d3.take_batch() - {'id': array([5, 6, 7, 8, 9])} - - Time complexity: O(num splits) - - See also: ``Datastream.split_at_indices``, ``Datastream.split_proportionately``, - and ``Datastream.streaming_split``. - - Args: - indices: List of sorted integers which indicate where the datastream - will be split. If an index exceeds the length of the datastream, - an empty datastream will be returned. - - Returns: - The datastream splits. - """ - - if len(indices) < 1: - raise ValueError("indices must be at least of length 1") - if sorted(indices) != indices: - raise ValueError("indices must be sorted") - if indices[0] < 0: - raise ValueError("indices must be positive") - start_time = time.perf_counter() - block_list = self._plan.execute() - blocks, metadata = _split_at_indices( - block_list.get_blocks_with_metadata(), - indices, - block_list._owned_by_consumer, - ) - split_duration = time.perf_counter() - start_time - parent_stats = self._plan.stats() - splits = [] - for bs, ms in zip(blocks, metadata): - stats = DatastreamStats(stages={"Split": ms}, parent=parent_stats) - stats.time_total_s = split_duration - splits.append( - MaterializedDatastream( - ExecutionPlan( - BlockList( - bs, ms, owned_by_consumer=block_list._owned_by_consumer - ), - stats, - run_by_consumer=block_list._owned_by_consumer, - ), - self._epoch, - self._lazy, - ) - ) - return splits - - @ConsumptionAPI - def split_proportionately( - self, proportions: List[float] - ) -> List["MaterializedDatastream"]: - """Materialize and split the datastream using proportions. - - A common use case for this would be splitting the datastream into train - and test sets (equivalent to eg. scikit-learn's ``train_test_split``). - See also ``Datastream.train_test_split`` for a higher level abstraction. - - The indices to split at will be calculated in such a way so that all splits - always contains at least one element. If that is not possible, - an exception will be raised. - - This is equivalent to caulculating the indices manually and calling - ``Datastream.split_at_indices``. - - Examples: - >>> import ray - >>> ds = ray.data.range(10) - >>> d1, d2, d3 = ds.split_proportionately([0.2, 0.5]) - >>> d1.take_batch() - {'id': array([0, 1])} - >>> d2.take_batch() - {'id': array([2, 3, 4, 5, 6])} - >>> d3.take_batch() - {'id': array([7, 8, 9])} - - Time complexity: O(num splits) - - See also: ``Datastream.split``, ``Datastream.split_at_indices``, - ``Datastream.train_test_split`` - - Args: - proportions: List of proportions to split the datastream according to. - Must sum up to less than 1, and each proportion has to be bigger - than 0. - - Returns: - The datastream splits. - """ - - if len(proportions) < 1: - raise ValueError("proportions must be at least of length 1") - if sum(proportions) >= 1: - raise ValueError("proportions must sum to less than 1") - if any(p <= 0 for p in proportions): - raise ValueError("proportions must be bigger than 0") - - datastream_length = self.count() - cumulative_proportions = np.cumsum(proportions) - split_indices = [ - int(datastream_length * proportion) for proportion in cumulative_proportions - ] - - # Ensure each split has at least one element - subtract = 0 - for i in range(len(split_indices) - 2, -1, -1): - split_indices[i] -= subtract - if split_indices[i] == split_indices[i + 1]: - subtract += 1 - split_indices[i] -= 1 - if any(i <= 0 for i in split_indices): - raise ValueError( - "Couldn't create non-empty splits with the given proportions." - ) - - return self.split_at_indices(split_indices) - - @ConsumptionAPI - def train_test_split( - self, - test_size: Union[int, float], - *, - shuffle: bool = False, - seed: Optional[int] = None, - ) -> Tuple["MaterializedDatastream", "MaterializedDatastream"]: - """Materialize and split the datastream into train and test subsets. - - Examples: - - >>> import ray - >>> ds = ray.data.range(8) - >>> train, test = ds.train_test_split(test_size=0.25) - >>> train.take_batch() - {'id': array([0, 1, 2, 3, 4, 5])} - >>> test.take_batch() - {'id': array([6, 7])} - - Args: - test_size: If float, should be between 0.0 and 1.0 and represent the - proportion of the datastream to include in the test split. If int, - represents the absolute number of test samples. The train split will - always be the compliment of the test split. - shuffle: Whether or not to globally shuffle the datastream before splitting. - Defaults to False. This may be a very expensive operation with large - datastream. - seed: Fix the random seed to use for shuffle, otherwise one will be chosen - based on system randomness. Ignored if ``shuffle=False``. - - Returns: - Train and test subsets as two MaterializedDatastreams. - """ - ds = self - - if shuffle: - ds = ds.random_shuffle(seed=seed) - - if not isinstance(test_size, (int, float)): - raise TypeError(f"`test_size` must be int or float got {type(test_size)}.") - if isinstance(test_size, float): - if test_size <= 0 or test_size >= 1: - raise ValueError( - "If `test_size` is a float, it must be bigger than 0 and smaller " - f"than 1. Got {test_size}." - ) - return ds.split_proportionately([1 - test_size]) - else: - ds_length = ds.count() - if test_size <= 0 or test_size >= ds_length: - raise ValueError( - "If `test_size` is an int, it must be bigger than 0 and smaller " - f"than the size of the datastream ({ds_length}). " - f"Got {test_size}." - ) - return ds.split_at_indices([ds_length - test_size]) - - @ConsumptionAPI(pattern="Args:") - def union(self, *other: List["Datastream"]) -> "Datastream": - """Materialize and combine this datastream with others of the same type. - - The order of the blocks in the datastreams is preserved, as is the - relative ordering between the datastreams passed in the argument list. - - .. note:: - Unioned datastreams are not lineage-serializable, i.e. they can not be - used as a tunable hyperparameter in Ray Tune. - - Args: - other: List of datastreams to combine with this one. The datastreams - must have the same schema as this datastream, otherwise the - behavior is undefined. - - Returns: - A new datastream holding the union of their data. - """ - - start_time = time.perf_counter() - - owned_by_consumer = self._plan.execute()._owned_by_consumer - datastreams = [self] + list(other) - bls = [] - has_nonlazy = False - for ds in datastreams: - bl = ds._plan.execute() - if not isinstance(bl, LazyBlockList): - has_nonlazy = True - bls.append(bl) - if has_nonlazy: - blocks = [] - metadata = [] - for bl in bls: - if isinstance(bl, LazyBlockList): - bs, ms = bl._get_blocks_with_metadata() - else: - bs, ms = bl._blocks, bl._metadata - blocks.extend(bs) - metadata.extend(ms) - blocklist = BlockList(blocks, metadata, owned_by_consumer=owned_by_consumer) - else: - tasks: List[ReadTask] = [] - block_partition_refs: List[ObjectRef[BlockPartition]] = [] - block_partition_meta_refs: List[ObjectRef[BlockMetadata]] = [] - - # Gather read task names from input blocks of unioned Datastreams, - # and concat them before passing to resulting LazyBlockList - read_task_names = [] - self_read_name = self._plan._in_blocks._read_stage_name or "Read" - read_task_names.append(self_read_name) - other_read_names = [ - o._plan._in_blocks._read_stage_name or "Read" for o in other - ] - read_task_names.extend(other_read_names) - - for bl in bls: - tasks.extend(bl._tasks) - block_partition_refs.extend(bl._block_partition_refs) - block_partition_meta_refs.extend(bl._block_partition_meta_refs) - blocklist = LazyBlockList( - tasks, - f"Union({','.join(read_task_names)})", - block_partition_refs, - block_partition_meta_refs, - owned_by_consumer=owned_by_consumer, - ) - - epochs = [ds._get_epoch() for ds in datastreams] - max_epoch = max(*epochs) - if len(set(epochs)) > 1: - if ray.util.log_once("datastream_epoch_warned"): - logger.warning( - "Datastream contains data from multiple epochs: {}, " - "likely due to a `rewindow()` call. The higher epoch " - "number {} will be used. This warning will not " - "be shown again.".format(set(epochs), max_epoch) - ) - stats = DatastreamStats( - stages={"Union": []}, - parent=[d._plan.stats() for d in datastreams], - ) - stats.time_total_s = time.perf_counter() - start_time - return Datastream( - ExecutionPlan(blocklist, stats, run_by_consumer=owned_by_consumer), - max_epoch, - self._lazy, - ) - - def groupby(self, key: Optional[str]) -> "GroupedData": - """Group the datastream by the key function or column name. - - Examples: - >>> import ray - >>> # Group by a table column and aggregate. - >>> ray.data.from_items([ - ... {"A": x % 3, "B": x} for x in range(100)]).groupby( - ... "A").count() - Aggregate - +- Datastream(num_blocks=100, num_rows=100, schema={A: int64, B: int64}) - - Time complexity: O(datastream size * log(datastream size / parallelism)) - - Args: - key: A column name. If this is None, the grouping is global. - - Returns: - A lazy GroupedData that can be aggregated later. - """ - from ray.data.grouped_data import GroupedData - - # Always allow None since groupby interprets that as grouping all - # records into a single global group. - if key is not None: - _validate_key_fn(self.schema(fetch_if_missing=True), key) - - return GroupedData(self, key) - - @ConsumptionAPI - def aggregate(self, *aggs: AggregateFn) -> Union[Any, Dict[str, Any]]: - """Aggregate the entire datastream as one group. - - Examples: - >>> import ray - >>> from ray.data.aggregate import Max, Mean - >>> ray.data.range(100).aggregate(Max("id"), Mean("id")) - {'max(id)': 99, 'mean(id)': 49.5} - - Time complexity: O(datastream size / parallelism) - - Args: - aggs: Aggregations to do. - - Returns: - If the input datastream is a simple datastream then the output is - a tuple of ``(agg1, agg2, ...)`` where each tuple element is - the corresponding aggregation result. - If the input datastream is an Arrow datastream then the output is - an dict where each column is the corresponding aggregation result. - If the datastream is empty, return ``None``. - """ - ret = self.groupby(None).aggregate(*aggs).take(1) - return ret[0] if len(ret) > 0 else None - - @ConsumptionAPI - def sum( - self, on: Optional[Union[str, List[str]]] = None, ignore_nulls: bool = True - ) -> Union[Any, Dict[str, Any]]: - """Compute sum over entire datastream. - - Examples: - >>> import ray - >>> ray.data.range(100).sum("id") - 4950 - >>> ray.data.from_items([ - ... {"A": i, "B": i**2} - ... for i in range(100)]).sum(["A", "B"]) - {'sum(A)': 4950, 'sum(B)': 328350} - - Args: - on: a column name or a list of column names to aggregate. - ignore_nulls: Whether to ignore null values. If ``True``, null - values will be ignored when computing the sum; if ``False``, - if a null value is encountered, the output will be None. - We consider np.nan, None, and pd.NaT to be null values. - Default is ``True``. - - Returns: - The sum result. - - For different values of ``on``, the return varies: - - - ``on=None``: a dict containing the column-wise sum of all - columns, - - ``on="col"``: a scalar representing the sum of all items in - column ``"col"``, - - ``on=["col_1", ..., "col_n"]``: an n-column ``dict`` - containing the column-wise sum of the provided columns. - - If the datastream is empty, all values are null, or any value is null - AND ``ignore_nulls`` is ``False``, then the output will be None. - """ - ret = self._aggregate_on(Sum, on, ignore_nulls) - return self._aggregate_result(ret) - - @ConsumptionAPI - def min( - self, on: Optional[Union[str, List[str]]] = None, ignore_nulls: bool = True - ) -> Union[Any, Dict[str, Any]]: - """Compute minimum over entire datastream. - - Examples: - >>> import ray - >>> ray.data.range(100).min("id") - 0 - >>> ray.data.from_items([ - ... {"A": i, "B": i**2} - ... for i in range(100)]).min(["A", "B"]) - {'min(A)': 0, 'min(B)': 0} - - Args: - on: a column name or a list of column names to aggregate. - ignore_nulls: Whether to ignore null values. If ``True``, null - values will be ignored when computing the min; if ``False``, - if a null value is encountered, the output will be None. - We consider np.nan, None, and pd.NaT to be null values. - Default is ``True``. - - Returns: - The min result. - - For different values of ``on``, the return varies: - - - ``on=None``: an dict containing the column-wise min of - all columns, - - ``on="col"``: a scalar representing the min of all items in - column ``"col"``, - - ``on=["col_1", ..., "col_n"]``: an n-column dict - containing the column-wise min of the provided columns. - - If the datastream is empty, all values are null, or any value is null - AND ``ignore_nulls`` is ``False``, then the output will be None. - """ - ret = self._aggregate_on(Min, on, ignore_nulls) - return self._aggregate_result(ret) - - @ConsumptionAPI - def max( - self, on: Optional[Union[str, List[str]]] = None, ignore_nulls: bool = True - ) -> Union[Any, Dict[str, Any]]: - """Compute maximum over entire datastream. - - Examples: - >>> import ray - >>> ray.data.range(100).max("id") - 99 - >>> ray.data.from_items([ - ... {"A": i, "B": i**2} - ... for i in range(100)]).max(["A", "B"]) - {'max(A)': 99, 'max(B)': 9801} - - Args: - on: a column name or a list of column names to aggregate. - ignore_nulls: Whether to ignore null values. If ``True``, null - values will be ignored when computing the max; if ``False``, - if a null value is encountered, the output will be None. - We consider np.nan, None, and pd.NaT to be null values. - Default is ``True``. - - Returns: - The max result. - - For different values of ``on``, the return varies: - - - ``on=None``: an dict containing the column-wise max of - all columns, - - ``on="col"``: a scalar representing the max of all items in - column ``"col"``, - - ``on=["col_1", ..., "col_n"]``: an n-column dict - containing the column-wise max of the provided columns. - - If the datastream is empty, all values are null, or any value is null - AND ``ignore_nulls`` is ``False``, then the output will be None. - """ - ret = self._aggregate_on(Max, on, ignore_nulls) - return self._aggregate_result(ret) - - @ConsumptionAPI - def mean( - self, on: Optional[Union[str, List[str]]] = None, ignore_nulls: bool = True - ) -> Union[Any, Dict[str, Any]]: - """Compute mean over entire datastream. - - Examples: - >>> import ray - >>> ray.data.range(100).mean("id") - 49.5 - >>> ray.data.from_items([ - ... {"A": i, "B": i**2} - ... for i in range(100)]).mean(["A", "B"]) - {'mean(A)': 49.5, 'mean(B)': 3283.5} - - Args: - on: a column name or a list of column names to aggregate. - ignore_nulls: Whether to ignore null values. If ``True``, null - values will be ignored when computing the mean; if ``False``, - if a null value is encountered, the output will be None. - We consider np.nan, None, and pd.NaT to be null values. - Default is ``True``. - - Returns: - The mean result. - - For different values of ``on``, the return varies: - - - ``on=None``: an dict containing the column-wise mean of - all columns, - - ``on="col"``: a scalar representing the mean of all items in - column ``"col"``, - - ``on=["col_1", ..., "col_n"]``: an n-column dict - containing the column-wise mean of the provided columns. - - If the datastream is empty, all values are null, or any value is null - AND ``ignore_nulls`` is ``False``, then the output will be None. - """ - ret = self._aggregate_on(Mean, on, ignore_nulls) - return self._aggregate_result(ret) - - @ConsumptionAPI - def std( - self, - on: Optional[Union[str, List[str]]] = None, - ddof: int = 1, - ignore_nulls: bool = True, - ) -> Union[Any, Dict[str, Any]]: - """Compute standard deviation over entire datastream. - - Examples: - >>> import ray - >>> round(ray.data.range(100).std("id", ddof=0), 5) - 28.86607 - >>> ray.data.from_items([ - ... {"A": i, "B": i**2} - ... for i in range(100)]).std(["A", "B"]) - {'std(A)': 29.011491975882016, 'std(B)': 2968.1748039269296} - - .. note:: This uses Welford's online method for an accumulator-style computation - of the standard deviation. This method was chosen due to it's numerical - stability, and it being computable in a single pass. This may give different - (but more accurate) results than NumPy, Pandas, and sklearn, which use a - less numerically stable two-pass algorithm. - See - https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm - - Args: - on: a column name or a list of column names to aggregate. - ddof: Delta Degrees of Freedom. The divisor used in calculations - is ``N - ddof``, where ``N`` represents the number of elements. - ignore_nulls: Whether to ignore null values. If ``True``, null - values will be ignored when computing the std; if ``False``, - if a null value is encountered, the output will be None. - We consider np.nan, None, and pd.NaT to be null values. - Default is ``True``. - - Returns: - The standard deviation result. - - For different values of ``on``, the return varies: - - - ``on=None``: an dict containing the column-wise std of - all columns, - - ``on="col"``: a scalar representing the std of all items in - column ``"col"``, - - ``on=["col_1", ..., "col_n"]``: an n-column dict - containing the column-wise std of the provided columns. - - If the datastream is empty, all values are null, or any value is null - AND ``ignore_nulls`` is ``False``, then the output will be None. - """ - ret = self._aggregate_on(Std, on, ignore_nulls, ddof=ddof) - return self._aggregate_result(ret) - - def sort(self, key: Optional[str] = None, descending: bool = False) -> "Datastream": - """Sort the datastream by the specified key column or key function. - - Examples: - >>> import ray - >>> # Sort by a single column in descending order. - >>> ds = ray.data.from_items( - ... [{"value": i} for i in range(1000)]) - >>> ds.sort("value", descending=True) - Sort - +- Datastream(num_blocks=200, num_rows=1000, schema={value: int64}) - - Time complexity: O(datastream size * log(datastream size / parallelism)) - - Args: - key: The column to sort by. To sort by multiple columns, use a map function - to generate the sort column beforehand. - descending: Whether to sort in descending order. - - Returns: - A new, sorted datastream. - """ - - plan = self._plan.with_stage(SortStage(self, key, descending)) - - logical_plan = self._logical_plan - if logical_plan is not None: - op = Sort( - logical_plan.dag, - key=key, - descending=descending, - ) - logical_plan = LogicalPlan(op) - return Datastream(plan, self._epoch, self._lazy, logical_plan) - - def zip(self, other: "Datastream") -> "Datastream": - """Materialize and zip this datastream with the elements of another. - - The datastreams must have the same number of rows. Their column sets will be - merged, and any duplicate column names disambiguated with _1, _2, etc. suffixes. - - .. note:: - The smaller of the two datastreams will be repartitioned to align the number - of rows per block with the larger datastream. - - .. note:: - Zipped datastreams are not lineage-serializable, i.e. they can not be used - as a tunable hyperparameter in Ray Tune. - - Examples: - >>> import ray - >>> ds1 = ray.data.range(5) - >>> ds2 = ray.data.range(5) - >>> ds1.zip(ds2).take_batch() - {'id': array([0, 1, 2, 3, 4]), 'id_1': array([0, 1, 2, 3, 4])} - - Time complexity: O(datastream size / parallelism) - - Args: - other: The datastream to zip with on the right hand side. - - Returns: - A ``Datastream`` containing the columns of the second datastream - concatenated horizontally with the columns of the first datastream, - with duplicate column names disambiguated with _1, _2, etc. suffixes. - """ - - plan = self._plan.with_stage(ZipStage(other)) - - logical_plan = self._logical_plan - other_logical_plan = other._logical_plan - if logical_plan is not None and other_logical_plan is not None: - op = Zip(logical_plan.dag, other_logical_plan.dag) - logical_plan = LogicalPlan(op) - return Datastream(plan, self._epoch, self._lazy, logical_plan) - - @ConsumptionAPI - def limit(self, limit: int) -> "Datastream": - """Materialize and truncate the datastream to the first ``limit`` records. - - Contrary to :meth`.take`, this will not move any data to the caller's - machine. Instead, it will return a new ``Datastream`` pointing to the truncated - distributed data. - - Examples: - >>> import ray - >>> ds = ray.data.range(1000) - >>> ds.limit(5).take_batch() - {'id': array([0, 1, 2, 3, 4])} - - Time complexity: O(limit specified) - - Args: - limit: The size of the datastream to truncate to. - - Returns: - The truncated datastream. - """ - plan = self._plan.with_stage(LimitStage(limit)) - logical_plan = self._logical_plan - if logical_plan is not None: - op = Limit(logical_plan.dag, limit=limit) - logical_plan = LogicalPlan(op) - return Datastream(plan, self._epoch, self._lazy, logical_plan) - - @ConsumptionAPI(pattern="Time complexity:") - def take_batch( - self, batch_size: int = 20, *, batch_format: Optional[str] = "default" - ) -> DataBatch: - """Return up to ``batch_size`` records from the datastream in a batch. - - Unlike take(), the records are returned in the same format as used for - `iter_batches` and `map_batches`. - - This will move up to ``batch_size`` records to the caller's machine; if - ``batch_size`` is very large, this can result in an OutOfMemory crash on - the caller. - - Time complexity: O(batch_size specified) - - Args: - batch_size: The max number of records to return. - batch_format: Specify ``"default"`` to use the default block format - (NumPy), ``"pandas"`` to select ``pandas.DataFrame``, "pyarrow" to - select ``pyarrow.Table``, or ``"numpy"`` to select - ``Dict[str, numpy.ndarray]``, or None to return the underlying block - exactly as is with no additional formatting. - - Returns: - A batch of up to ``batch_size`` records from the datastream. - - Raises: - ValueError if the datastream is empty. - """ - batch_format = _apply_strict_mode_batch_format(batch_format) - try: - res = next( - self.iter_batches( - batch_size=batch_size, prefetch_batches=0, batch_format=batch_format - ) - ) - except StopIteration: - raise ValueError("The datastream is empty.") - self._synchronize_progress_bar() - return res - - @ConsumptionAPI(pattern="Time complexity:") - def take(self, limit: int = 20) -> List[Dict[str, Any]]: - """Return up to ``limit`` records from the datastream. - - This will move up to ``limit`` records to the caller's machine; if - ``limit`` is very large, this can result in an OutOfMemory crash on - the caller. - - Time complexity: O(limit specified) - - Args: - limit: The max number of records to return. - - Returns: - A list of up to ``limit`` records from the datastream. - """ - if ray.util.log_once("datastream_take"): - logger.info( - "Tip: Use `take_batch()` instead of `take() / show()` to return " - "records in pandas or numpy batch format." - ) - output = [] - for row in self.iter_rows(): - output.append(row) - if len(output) >= limit: - break - self._synchronize_progress_bar() - return output - - @ConsumptionAPI(pattern="Time complexity:") - def take_all(self, limit: Optional[int] = None) -> List[Dict[str, Any]]: - """Return all of the records in the datastream. - - This will move the entire datastream to the caller's machine; if the - datastream is very large, this can result in an OutOfMemory crash on - the caller. - - Time complexity: O(datastream size) - - Args: - limit: Raise an error if the size exceeds the specified limit. - - Returns: - A list of all the records in the datastream. - """ - output = [] - for row in self.iter_rows(): - output.append(row) - if limit is not None and len(output) > limit: - raise ValueError( - f"The datastream has more than the given limit of {limit} records." - ) - self._synchronize_progress_bar() - return output - - @ConsumptionAPI(pattern="Time complexity:") - def show(self, limit: int = 20) -> None: - """Print up to the given number of records from the datastream. - - Time complexity: O(limit specified) - - Args: - limit: The max number of records to print. - """ - for row in self.take(limit): - print(row) - - @ConsumptionAPI( - if_more_than_read=True, - datasource_metadata="row count", - pattern="Time complexity:", - ) - def count(self) -> int: - """Count the number of records in the datastream. - - Time complexity: O(datastream size / parallelism), O(1) for parquet - - Returns: - The number of records in the datastream. - """ - # Handle empty datastream. - if self.num_blocks() == 0: - return 0 - - # For parquet, we can return the count directly from metadata. - meta_count = self._meta_count() - if meta_count is not None: - return meta_count - - get_num_rows = cached_remote_fn(_get_num_rows) - - return sum( - ray.get( - [get_num_rows.remote(block) for block in self.get_internal_block_refs()] - ) - ) - - @ConsumptionAPI( - if_more_than_read=True, - datasource_metadata="schema", - extra_condition="or if ``fetch_if_missing=True`` (the default)", - pattern="Time complexity:", - ) - def schema(self, fetch_if_missing: bool = True) -> Optional["Schema"]: - """Return the schema of the datastream. - - Time complexity: O(1) - - Args: - fetch_if_missing: If True, synchronously fetch the schema if it's - not known. If False, None is returned if the schema is not known. - Default is True. - - Returns: - The ``ray.data.Schema`` class of the records, or None if the - schema is not known and fetch_if_missing is False. - """ - ctx = DataContext.get_current() - base_schema = self._plan.schema(fetch_if_missing=fetch_if_missing) - if ctx.strict_mode: - if base_schema: - return Schema(base_schema) - else: - return None - else: - return base_schema - - def num_blocks(self) -> int: - """Return the number of blocks of this datastream. - - Note that during read and transform operations, the number of blocks - may be dynamically adjusted to respect memory limits, increasing the - number of blocks at runtime. - - Time complexity: O(1) - - Returns: - The number of blocks of this datastream. - """ - return self._plan.initial_num_blocks() - - @ConsumptionAPI(if_more_than_read=True, pattern="Time complexity:") - def size_bytes(self) -> int: - """Return the in-memory size of the datastream. - - Time complexity: O(1) - - Returns: - The in-memory size of the datastream in bytes, or None if the - in-memory size is not known. - """ - metadata = self._plan.execute().get_metadata() - if not metadata or metadata[0].size_bytes is None: - return None - return sum(m.size_bytes for m in metadata) - - @ConsumptionAPI(if_more_than_read=True, pattern="Time complexity:") - def input_files(self) -> List[str]: - """Return the list of input files for the datastream. - - Time complexity: O(num input files) - - Returns: - The list of input files used to create the datastream, or an empty - list if the input files is not known. - """ - metadata = self._plan.execute().get_metadata() - files = set() - for m in metadata: - for f in m.input_files: - files.add(f) - return list(files) - - @ConsumptionAPI - def write_parquet( - self, - path: str, - *, - filesystem: Optional["pyarrow.fs.FileSystem"] = None, - try_create_dir: bool = True, - arrow_open_stream_args: Optional[Dict[str, Any]] = None, - block_path_provider: BlockWritePathProvider = DefaultBlockWritePathProvider(), - arrow_parquet_args_fn: Callable[[], Dict[str, Any]] = lambda: {}, - ray_remote_args: Dict[str, Any] = None, - **arrow_parquet_args, - ) -> None: - """Write the datastream to parquet. - - This is only supported for datastreams convertible to Arrow records. - To control the number of files, use ``.repartition()``. - - Unless a custom block path provider is given, the format of the output - files will be {uuid}_{block_idx}.parquet, where ``uuid`` is an unique - id for the datastream. - - Examples: - >>> import ray - >>> ds = ray.data.range(100) # doctest: +SKIP - >>> ds.write_parquet("s3://bucket/path") # doctest: +SKIP - - Time complexity: O(datastream size / parallelism) - - Args: - path: The path to the destination root directory, where Parquet - files will be written to. - filesystem: The filesystem implementation to write to. - try_create_dir: Try to create all directories in destination path - if True. Does nothing if all directories already exist. - arrow_open_stream_args: kwargs passed to - pyarrow.fs.FileSystem.open_output_stream - block_path_provider: BlockWritePathProvider implementation to - write each datastream block to a custom output path. - arrow_parquet_args_fn: Callable that returns a dictionary of write - arguments to use when writing each block to a file. Overrides - any duplicate keys from arrow_parquet_args. This should be used - instead of arrow_parquet_args if any of your write arguments - cannot be pickled, or if you'd like to lazily resolve the write - arguments for each datastream block. - ray_remote_args: Kwargs passed to ray.remote in the write tasks. - arrow_parquet_args: Options to pass to - pyarrow.parquet.write_table(), which is used to write out each - block to a file. - """ - self.write_datasource( - ParquetDatasource(), - ray_remote_args=ray_remote_args, - path=path, - datastream_uuid=self._uuid, - filesystem=filesystem, - try_create_dir=try_create_dir, - open_stream_args=arrow_open_stream_args, - block_path_provider=block_path_provider, - write_args_fn=arrow_parquet_args_fn, - **arrow_parquet_args, - ) - - @ConsumptionAPI - def write_json( - self, - path: str, - *, - filesystem: Optional["pyarrow.fs.FileSystem"] = None, - try_create_dir: bool = True, - arrow_open_stream_args: Optional[Dict[str, Any]] = None, - block_path_provider: BlockWritePathProvider = DefaultBlockWritePathProvider(), - pandas_json_args_fn: Callable[[], Dict[str, Any]] = lambda: {}, - ray_remote_args: Dict[str, Any] = None, - **pandas_json_args, - ) -> None: - """Write the datastream to json. - - This is only supported for datastreams convertible to Arrow records. - To control the number of files, use ``.repartition()``. - - Unless a custom block path provider is given, the format of the output - files will be {self._uuid}_{block_idx}.json, where ``uuid`` is an - unique id for the datastream. - - Examples: - >>> import ray - >>> ds = ray.data.range(100) # doctest: +SKIP - >>> ds.write_json("s3://bucket/path") # doctest: +SKIP - - Time complexity: O(datastream size / parallelism) - - Args: - path: The path to the destination root directory, where json - files will be written to. - filesystem: The filesystem implementation to write to. - try_create_dir: Try to create all directories in destination path - if True. Does nothing if all directories already exist. - arrow_open_stream_args: kwargs passed to - pyarrow.fs.FileSystem.open_output_stream - block_path_provider: BlockWritePathProvider implementation to - write each datastream block to a custom output path. - pandas_json_args_fn: Callable that returns a dictionary of write - arguments to use when writing each block to a file. Overrides - any duplicate keys from pandas_json_args. This should be used - instead of pandas_json_args if any of your write arguments - cannot be pickled, or if you'd like to lazily resolve the write - arguments for each datastream block. - ray_remote_args: Kwargs passed to ray.remote in the write tasks. - pandas_json_args: These args will be passed to - pandas.DataFrame.to_json(), which we use under the hood to - write out each Datastream block. These - are dict(orient="records", lines=True) by default. - """ - self.write_datasource( - JSONDatasource(), - ray_remote_args=ray_remote_args, - path=path, - datastream_uuid=self._uuid, - filesystem=filesystem, - try_create_dir=try_create_dir, - open_stream_args=arrow_open_stream_args, - block_path_provider=block_path_provider, - write_args_fn=pandas_json_args_fn, - **pandas_json_args, - ) - - @ConsumptionAPI - def write_csv( - self, - path: str, - *, - filesystem: Optional["pyarrow.fs.FileSystem"] = None, - try_create_dir: bool = True, - arrow_open_stream_args: Optional[Dict[str, Any]] = None, - block_path_provider: BlockWritePathProvider = DefaultBlockWritePathProvider(), - arrow_csv_args_fn: Callable[[], Dict[str, Any]] = lambda: {}, - ray_remote_args: Dict[str, Any] = None, - **arrow_csv_args, - ) -> None: - """Write the datastream to csv. - - This is only supported for datastreams convertible to Arrow records. - To control the number of files, use ``.repartition()``. - - Unless a custom block path provider is given, the format of the output - files will be {uuid}_{block_idx}.csv, where ``uuid`` is an unique id - for the datastream. - - Examples: - >>> import ray - >>> ds = ray.data.range(100) # doctest: +SKIP - >>> ds.write_csv("s3://bucket/path") # doctest: +SKIP - - Time complexity: O(datastream size / parallelism) - - Args: - path: The path to the destination root directory, where csv - files will be written to. - filesystem: The filesystem implementation to write to. - try_create_dir: Try to create all directories in destination path - if True. Does nothing if all directories already exist. - arrow_open_stream_args: kwargs passed to - pyarrow.fs.FileSystem.open_output_stream - block_path_provider: BlockWritePathProvider implementation to - write each datastream block to a custom output path. - arrow_csv_args_fn: Callable that returns a dictionary of write - arguments to use when writing each block to a file. Overrides - any duplicate keys from arrow_csv_args. This should be used - instead of arrow_csv_args if any of your write arguments - cannot be pickled, or if you'd like to lazily resolve the write - arguments for each datastream block. - ray_remote_args: Kwargs passed to ray.remote in the write tasks. - arrow_csv_args: Other CSV write options to pass to pyarrow. - """ - self.write_datasource( - CSVDatasource(), - ray_remote_args=ray_remote_args, - path=path, - datastream_uuid=self._uuid, - filesystem=filesystem, - try_create_dir=try_create_dir, - open_stream_args=arrow_open_stream_args, - block_path_provider=block_path_provider, - write_args_fn=arrow_csv_args_fn, - **arrow_csv_args, - ) - - @ConsumptionAPI - def write_tfrecords( - self, - path: str, - *, - tf_schema: Optional["schema_pb2.Schema"] = None, - filesystem: Optional["pyarrow.fs.FileSystem"] = None, - try_create_dir: bool = True, - arrow_open_stream_args: Optional[Dict[str, Any]] = None, - block_path_provider: BlockWritePathProvider = DefaultBlockWritePathProvider(), - ray_remote_args: Dict[str, Any] = None, - ) -> None: - """Write the datastream to TFRecord files. - - The `TFRecord `_ - files will contain - `tf.train.Example `_ # noqa: E501 - records, with one Example record for each row in the datastream. - - .. warning:: - tf.train.Feature only natively stores ints, floats, and bytes, - so this function only supports datastreams with these data types, - and will error if the datastream contains unsupported types. - - This is only supported for datastreams convertible to Arrow records. - To control the number of files, use ``.repartition()``. - - Unless a custom block path provider is given, the format of the output - files will be {uuid}_{block_idx}.tfrecords, where ``uuid`` is an unique id - for the datastream. - - Examples: - >>> import ray - >>> ds = ray.data.from_items([ - ... { "name": "foo", "score": 42 }, - ... { "name": "bar", "score": 43 }, - ... ]) - >>> ds.write_tfrecords("s3://bucket/path") # doctest: +SKIP - - Time complexity: O(datastream size / parallelism) - - Args: - path: The path to the destination root directory, where tfrecords - files will be written to. - filesystem: The filesystem implementation to write to. - try_create_dir: Try to create all directories in destination path - if True. Does nothing if all directories already exist. - arrow_open_stream_args: kwargs passed to - pyarrow.fs.FileSystem.open_output_stream - block_path_provider: BlockWritePathProvider implementation to - write each datastream block to a custom output path. - ray_remote_args: Kwargs passed to ray.remote in the write tasks. - - """ - - self.write_datasource( - TFRecordDatasource(), - ray_remote_args=ray_remote_args, - path=path, - datastream_uuid=self._uuid, - filesystem=filesystem, - try_create_dir=try_create_dir, - open_stream_args=arrow_open_stream_args, - block_path_provider=block_path_provider, - tf_schema=tf_schema, - ) - - @PublicAPI(stability="alpha") - @ConsumptionAPI - def write_webdataset( - self, - path: str, - *, - filesystem: Optional["pyarrow.fs.FileSystem"] = None, - try_create_dir: bool = True, - arrow_open_stream_args: Optional[Dict[str, Any]] = None, - block_path_provider: BlockWritePathProvider = DefaultBlockWritePathProvider(), - ray_remote_args: Dict[str, Any] = None, - encoder: Optional[Union[bool, str, callable, list]] = True, - ) -> None: - """Write the datastream to WebDataset files. - - The `TFRecord `_ - files will contain - `tf.train.Example `_ # noqa: E501 - records, with one Example record for each row in the datastream. - - .. warning:: - tf.train.Feature only natively stores ints, floats, and bytes, - so this function only supports datastreams with these data types, - and will error if the datastream contains unsupported types. - - This is only supported for datastreams convertible to Arrow records. - To control the number of files, use ``.repartition()``. - - Unless a custom block path provider is given, the format of the output - files will be {uuid}_{block_idx}.tfrecords, where ``uuid`` is an unique id - for the datastream. - - Examples: - >>> import ray - >>> ds = ray.data.from_items([ - ... { "name": "foo", "score": 42 }, - ... { "name": "bar", "score": 43 }, - ... ]) - >>> ds.write_webdataset("s3://bucket/path") # doctest: +SKIP - - Time complexity: O(datastream size / parallelism) - - Args: - path: The path to the destination root directory, where tfrecords - files will be written to. - filesystem: The filesystem implementation to write to. - try_create_dir: Try to create all directories in destination path - if True. Does nothing if all directories already exist. - arrow_open_stream_args: kwargs passed to - pyarrow.fs.FileSystem.open_output_stream - block_path_provider: BlockWritePathProvider implementation to - write each datastream block to a custom output path. - ray_remote_args: Kwargs passed to ray.remote in the write tasks. - - """ - - from ray.data.datasource.webdataset_datasource import WebDatasetDatasource - - self.write_datasource( - WebDatasetDatasource(), - ray_remote_args=ray_remote_args, - path=path, - datastream_uuid=self._uuid, - filesystem=filesystem, - try_create_dir=try_create_dir, - open_stream_args=arrow_open_stream_args, - block_path_provider=block_path_provider, - encoder=encoder, - ) - - @ConsumptionAPI - def write_numpy( - self, - path: str, - *, - column: Optional[str] = None, - filesystem: Optional["pyarrow.fs.FileSystem"] = None, - try_create_dir: bool = True, - arrow_open_stream_args: Optional[Dict[str, Any]] = None, - block_path_provider: BlockWritePathProvider = DefaultBlockWritePathProvider(), - ray_remote_args: Dict[str, Any] = None, - ) -> None: - """Write a tensor column of the datastream to npy files. - - This is only supported for datastreams convertible to Arrow records that - contain a TensorArray column. To control the number of files, use - ``.repartition()``. - - Unless a custom block path provider is given, the format of the output - files will be {self._uuid}_{block_idx}.npy, where ``uuid`` is an unique - id for the datastream. - - Examples: - >>> import ray - >>> ds = ray.data.range(100) # doctest: +SKIP - >>> ds.write_numpy("s3://bucket/path") # doctest: +SKIP - - Time complexity: O(datastream size / parallelism) - - Args: - path: The path to the destination root directory, where npy - files will be written to. - column: The name of the table column that contains the tensor to - be written. - filesystem: The filesystem implementation to write to. - try_create_dir: Try to create all directories in destination path - if True. Does nothing if all directories already exist. - arrow_open_stream_args: kwargs passed to - pyarrow.fs.FileSystem.open_output_stream - block_path_provider: BlockWritePathProvider implementation to - write each datastream block to a custom output path. - ray_remote_args: Kwargs passed to ray.remote in the write tasks. - """ - context = DataContext.get_current() - if context.strict_mode and not column: - raise StrictModeError( - "In Ray 2.5, the column must be specified " - "(e.g., `write_numpy(column='data')`)." - ) - column = column or TENSOR_COLUMN_NAME - - self.write_datasource( - NumpyDatasource(), - ray_remote_args=ray_remote_args, - path=path, - datastream_uuid=self._uuid, - column=column, - filesystem=filesystem, - try_create_dir=try_create_dir, - open_stream_args=arrow_open_stream_args, - block_path_provider=block_path_provider, - ) - - @ConsumptionAPI - def write_mongo( - self, - uri: str, - database: str, - collection: str, - ray_remote_args: Dict[str, Any] = None, - ) -> None: - """Write the datastream to a MongoDB datasource. - - This is only supported for datastreams convertible to Arrow records. - To control the number of parallel write tasks, use ``.repartition()`` - before calling this method. - - .. note:: - Currently, this supports only a subset of the pyarrow's types, due to the - limitation of pymongoarrow which is used underneath. Writing unsupported - types will fail on type checking. See all the supported types at: - https://mongo-arrow.readthedocs.io/en/latest/data_types.html. - - .. note:: - The records will be inserted into MongoDB as new documents. If a record has - the _id field, this _id must be non-existent in MongoDB, otherwise the write - will be rejected and fail (hence preexisting documents are protected from - being mutated). It's fine to not have _id field in record and MongoDB will - auto generate one at insertion. - - Examples: - >>> import ray - >>> import pandas as pd - >>> docs = [{"title": "MongoDB Datasource test"} for key in range(4)] - >>> ds = ray.data.from_pandas(pd.DataFrame(docs)) - >>> ds.write_mongo( # doctest: +SKIP - >>> MongoDatasource(), # doctest: +SKIP - >>> uri="mongodb://username:password@mongodb0.example.com:27017/?authSource=admin", # noqa: E501 # doctest: +SKIP - >>> database="my_db", # doctest: +SKIP - >>> collection="my_collection", # doctest: +SKIP - >>> ) # doctest: +SKIP - - Args: - uri: The URI to the destination MongoDB where the datastream will be - written to. For the URI format, see details in - https://www.mongodb.com/docs/manual/reference/connection-string/. - database: The name of the database. This database must exist otherwise - ValueError will be raised. - collection: The name of the collection in the database. This collection - must exist otherwise ValueError will be raised. - ray_remote_args: Kwargs passed to ray.remote in the write tasks. - """ - from ray.data.datasource import MongoDatasource - - self.write_datasource( - MongoDatasource(), - ray_remote_args=ray_remote_args, - uri=uri, - database=database, - collection=collection, - ) - - @ConsumptionAPI - def write_datasource( - self, - datasource: Datasource, - *, - ray_remote_args: Dict[str, Any] = None, - **write_args, - ) -> None: - """Write the datastream to a custom datasource. - - Examples: - >>> import ray - >>> from ray.data.datasource import Datasource - >>> ds = ray.data.range(100) # doctest: +SKIP - >>> class CustomDatasource(Datasource): # doctest: +SKIP - ... # define custom data source - ... pass # doctest: +SKIP - >>> ds.write_datasource(CustomDatasource(...)) # doctest: +SKIP - - Time complexity: O(datastream size / parallelism) - - Args: - datasource: The datasource to write to. - ray_remote_args: Kwargs passed to ray.remote in the write tasks. - write_args: Additional write args to pass to the datasource. - """ - if ray_remote_args is None: - ray_remote_args = {} - path = write_args.get("path", None) - if path and _is_local_scheme(path): - if ray.util.client.ray.is_connected(): - raise ValueError( - f"The local scheme paths {path} are not supported in Ray Client." - ) - ray_remote_args["scheduling_strategy"] = NodeAffinitySchedulingStrategy( - ray.get_runtime_context().get_node_id(), - soft=False, - ) - - if type(datasource).write != Datasource.write: - write_fn = generate_write_fn(datasource, **write_args) - - def write_fn_wrapper(blocks: Iterator[Block], ctx, fn) -> Iterator[Block]: - return write_fn(blocks, ctx) - - plan = self._plan.with_stage( - OneToOneStage( - "Write", - write_fn_wrapper, - TaskPoolStrategy(), - ray_remote_args, - fn=lambda x: x, - ) - ) - - logical_plan = self._logical_plan - if logical_plan is not None: - write_op = Write( - logical_plan.dag, - datasource, - ray_remote_args=ray_remote_args, - **write_args, - ) - logical_plan = LogicalPlan(write_op) - - try: - import pandas as pd - - self._write_ds = Datastream( - plan, self._epoch, self._lazy, logical_plan - ).materialize() - blocks = ray.get(self._write_ds._plan.execute().get_blocks()) - assert all( - isinstance(block, pd.DataFrame) and len(block) == 1 - for block in blocks - ) - write_results = [block["write_result"][0] for block in blocks] - datasource.on_write_complete(write_results) - except Exception as e: - datasource.on_write_failed([], e) - raise - else: - logger.warning( - "The Datasource.do_write() is deprecated in " - "Ray 2.4 and will be removed in future release. Use " - "Datasource.write() instead." - ) - - ctx = DataContext.get_current() - blocks, metadata = zip(*self._plan.execute().get_blocks_with_metadata()) - # Prepare write in a remote task so that in Ray client mode, we - # don't do metadata resolution from the client machine. - do_write = cached_remote_fn(_do_write, retry_exceptions=False, num_cpus=0) - write_results: List[ObjectRef[WriteResult]] = ray.get( - do_write.remote( - datasource, - ctx, - blocks, - metadata, - ray_remote_args, - _wrap_arrow_serialization_workaround(write_args), - ) - ) - - progress = ProgressBar("Write Progress", len(write_results)) - try: - progress.block_until_complete(write_results) - datasource.on_write_complete(ray.get(write_results)) - except Exception as e: - datasource.on_write_failed(write_results, e) - raise - finally: - progress.close() - - @ConsumptionAPI( - delegate=( - "Calling any of the consumption methods on the returned ``DataIterator``" - ) - ) - def iterator(self) -> DataIterator: - """Return a :class:`~ray.data.DataIterator` that - can be used to repeatedly iterate over the datastream. - - Examples: - >>> import ray - >>> for batch in ray.data.range( - ... 1000000 - ... ).iterator().iter_batches(): # doctest: +SKIP - ... print(batch) # doctest: +SKIP - - .. note:: - It is recommended to use ``DataIterator`` methods over directly - calling methods such as ``iter_batches()``. - """ - return DataIteratorImpl(self) - - @ConsumptionAPI - def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[Dict[str, Any]]: - """Return a local row iterator over the datastream. - - Examples: - >>> import ray - >>> for i in ray.data.range(1000000).iter_rows(): # doctest: +SKIP - ... print(i) # doctest: +SKIP - - Time complexity: O(1) - - Args: - prefetch_blocks: The number of blocks to prefetch ahead of the - current block during the scan. - - Returns: - A local iterator over the entire datastream. - """ - - return self.iterator().iter_rows(prefetch_blocks=prefetch_blocks) - - @ConsumptionAPI - def iter_batches( - self, - *, - prefetch_batches: int = 1, - batch_size: Optional[int] = 256, - batch_format: Optional[str] = "default", - drop_last: bool = False, - local_shuffle_buffer_size: Optional[int] = None, - local_shuffle_seed: Optional[int] = None, - _collate_fn: Optional[Callable[[DataBatch], Any]] = None, - # Deprecated. - prefetch_blocks: int = 0, - ) -> Iterator[DataBatch]: - """Return a local batched iterator over the datastream. - - Examples: - >>> import ray - >>> for batch in ray.data.range(1000000).iter_batches(): # doctest: +SKIP - ... print(batch) # doctest: +SKIP - - Time complexity: O(1) - - Args: - prefetch_batches: The number of batches to fetch ahead of the current batch - to fetch. If set to greater than 0, a separate threadpool will be used - to fetch the objects to the local node, format the batches, and apply - the collate_fn. Defaults to 1. You can revert back to the old - prefetching behavior that uses `prefetch_blocks` by setting - `use_legacy_iter_batches` to True in the datastreamContext. - batch_size: The number of rows in each batch, or None to use entire blocks - as batches (blocks may contain different number of rows). - The final batch may include fewer than ``batch_size`` rows if - ``drop_last`` is ``False``. Defaults to 256. - batch_format: Specify ``"default"`` to use the default block format - (NumPy), ``"pandas"`` to select ``pandas.DataFrame``, "pyarrow" to - select ``pyarrow.Table``, or ``"numpy"`` to select - ``Dict[str, numpy.ndarray]``, or None to return the underlying block - exactly as is with no additional formatting. - drop_last: Whether to drop the last batch if it's incomplete. - local_shuffle_buffer_size: If non-None, the data will be randomly shuffled - using a local in-memory shuffle buffer, and this value will serve as the - minimum number of rows that must be in the local in-memory shuffle - buffer in order to yield a batch. When there are no more rows to add to - the buffer, the remaining rows in the buffer will be drained. - local_shuffle_seed: The seed to use for the local random shuffle. - - Returns: - An iterator over record batches. - """ - batch_format = _apply_strict_mode_batch_format(batch_format) - if batch_format == "native": - logger.warning("The 'native' batch format has been renamed 'default'.") - return self.iterator().iter_batches( - prefetch_batches=prefetch_batches, - prefetch_blocks=prefetch_blocks, - batch_size=batch_size, - batch_format=batch_format, - drop_last=drop_last, - local_shuffle_buffer_size=local_shuffle_buffer_size, - local_shuffle_seed=local_shuffle_seed, - _collate_fn=_collate_fn, - ) - - @ConsumptionAPI - def iter_torch_batches( - self, - *, - prefetch_batches: int = 1, - batch_size: Optional[int] = 256, - dtypes: Optional[Union["torch.dtype", Dict[str, "torch.dtype"]]] = None, - device: Optional[str] = None, - collate_fn: Optional[ - Callable[[Union[np.ndarray, Dict[str, np.ndarray]]], Any] - ] = None, - drop_last: bool = False, - local_shuffle_buffer_size: Optional[int] = None, - local_shuffle_seed: Optional[int] = None, - # Deprecated - prefetch_blocks: int = 0, - ) -> Iterator["TorchTensorBatchType"]: - """Return a local batched iterator of Torch Tensors over the datastream. - - This iterator will yield single-tensor batches if the underlying datastream - consists of a single column; otherwise, it will yield a dictionary of - column-tensors. If looking for more flexibility in the tensor conversion (e.g. - casting dtypes) or the batch format, try use `.iter_batches` directly, which is - a lower-level API. - - Examples: - >>> import ray - >>> for batch in ray.data.range( # doctest: +SKIP - ... 12, - ... ).iter_torch_batches(batch_size=4): - ... print(batch.shape) # doctest: +SKIP - torch.Size([4, 1]) - torch.Size([4, 1]) - torch.Size([4, 1]) - - Time complexity: O(1) - - Args: - prefetch_batches: The number of batches to fetch ahead of the current batch - to fetch. If set to greater than 0, a separate threadpool will be used - to fetch the objects to the local node, format the batches, and apply - the collate_fn. Defaults to 1. You can revert back to the old - prefetching behavior that uses `prefetch_blocks` by setting - `use_legacy_iter_batches` to True in the datastreamContext. - batch_size: The number of rows in each batch, or None to use entire blocks - as batches (blocks may contain different number of rows). - The final batch may include fewer than ``batch_size`` rows if - ``drop_last`` is ``False``. Defaults to 256. - dtypes: The Torch dtype(s) for the created tensor(s); if None, the dtype - will be inferred from the tensor data. - device: The device on which the tensor should be placed; if None, the Torch - tensor will be constructed on the CPU. - collate_fn: A function to convert a Numpy batch to a PyTorch tensor batch. - Potential use cases include collating along a dimension other than the - first, padding sequences of various lengths, or generally handling - batches of different length tensors. If not provided, the default - collate function is used which simply converts the batch of numpy - arrays to a batch of PyTorch tensors. This API is still experimental - and is subject to change. - drop_last: Whether to drop the last batch if it's incomplete. - local_shuffle_buffer_size: If non-None, the data will be randomly shuffled - using a local in-memory shuffle buffer, and this value will serve as the - minimum number of rows that must be in the local in-memory shuffle - buffer in order to yield a batch. When there are no more rows to add to - the buffer, the remaining rows in the buffer will be drained. This - buffer size must be greater than or equal to ``batch_size``, and - therefore ``batch_size`` must also be specified when using local - shuffling. - local_shuffle_seed: The seed to use for the local random shuffle. - - Returns: - An iterator over Torch Tensor batches. - """ - return self.iterator().iter_torch_batches( - prefetch_batches=prefetch_batches, - prefetch_blocks=prefetch_blocks, - batch_size=batch_size, - dtypes=dtypes, - device=device, - collate_fn=collate_fn, - drop_last=drop_last, - local_shuffle_buffer_size=local_shuffle_buffer_size, - local_shuffle_seed=local_shuffle_seed, - ) - - @ConsumptionAPI - def iter_tf_batches( - self, - *, - prefetch_batches: int = 1, - batch_size: Optional[int] = 256, - dtypes: Optional[Union["tf.dtypes.DType", Dict[str, "tf.dtypes.DType"]]] = None, - drop_last: bool = False, - local_shuffle_buffer_size: Optional[int] = None, - local_shuffle_seed: Optional[int] = None, - # Deprecated - prefetch_blocks: int = 0, - ) -> Iterator[TensorFlowTensorBatchType]: - """Return a local batched iterator of TensorFlow Tensors over the datastream. - - This iterator will yield single-tensor batches of the underlying datastream - consists of a single column; otherwise, it will yield a dictionary of - column-tensors. - - .. tip:: - If you don't need the additional flexibility provided by this method, - consider using :meth:`~ray.data.Datastream.to_tf` instead. It's easier - to use. - - Examples: - >>> import ray - >>> for batch in ray.data.range( # doctest: +SKIP - ... 12, - ... ).iter_tf_batches(batch_size=4): - ... print(batch.shape) # doctest: +SKIP - (4, 1) - (4, 1) - (4, 1) - - Time complexity: O(1) - - Args: - prefetch_batches: The number of batches to fetch ahead of the current batch - to fetch. If set to greater than 0, a separate threadpool will be used - to fetch the objects to the local node, format the batches, and apply - the collate_fn. Defaults to 1. You can revert back to the old - prefetching behavior that uses `prefetch_blocks` by setting - `use_legacy_iter_batches` to True in the datastreamContext. - batch_size: The number of rows in each batch, or None to use entire blocks - as batches (blocks may contain different number of rows). - The final batch may include fewer than ``batch_size`` rows if - ``drop_last`` is ``False``. Defaults to 256. - dtypes: The TensorFlow dtype(s) for the created tensor(s); if None, the - dtype will be inferred from the tensor data. - drop_last: Whether to drop the last batch if it's incomplete. - local_shuffle_buffer_size: If non-None, the data will be randomly shuffled - using a local in-memory shuffle buffer, and this value will serve as the - minimum number of rows that must be in the local in-memory shuffle - buffer in order to yield a batch. When there are no more rows to add to - the buffer, the remaining rows in the buffer will be drained. This - buffer size must be greater than or equal to ``batch_size``, and - therefore ``batch_size`` must also be specified when using local - shuffling. - local_shuffle_seed: The seed to use for the local random shuffle. - - Returns: - An iterator over TensorFlow Tensor batches. - """ - return self.iterator().iter_tf_batches( - prefetch_batches=prefetch_batches, - prefetch_blocks=prefetch_blocks, - batch_size=batch_size, - dtypes=dtypes, - drop_last=drop_last, - local_shuffle_buffer_size=local_shuffle_buffer_size, - local_shuffle_seed=local_shuffle_seed, - ) - - @ConsumptionAPI(pattern="Time complexity:") - def to_torch( - self, - *, - label_column: Optional[str] = None, - feature_columns: Optional[ - Union[List[str], List[List[str]], Dict[str, List[str]]] - ] = None, - label_column_dtype: Optional["torch.dtype"] = None, - feature_column_dtypes: Optional[ - Union["torch.dtype", List["torch.dtype"], Dict[str, "torch.dtype"]] - ] = None, - batch_size: int = 1, - prefetch_batches: int = 1, - drop_last: bool = False, - local_shuffle_buffer_size: Optional[int] = None, - local_shuffle_seed: Optional[int] = None, - unsqueeze_label_tensor: bool = True, - unsqueeze_feature_tensors: bool = True, - # Deprecated - prefetch_blocks: int = 0, - ) -> "torch.utils.data.IterableDataset": - """Return a Torch IterableDataset over this datastream. - - This is only supported for datastreams convertible to Arrow records. - - It is recommended to use the returned ``IterableDataset`` directly - instead of passing it into a torch ``DataLoader``. - - Each element in IterableDataset will be a tuple consisting of 2 - elements. The first item contains the feature tensor(s), and the - second item is the label tensor. Those can take on different - forms, depending on the specified arguments. - - For the features tensor (N is the ``batch_size`` and n, m, k - are the number of features per tensor): - - * If ``feature_columns`` is a ``List[str]``, the features will be - a tensor of shape (N, n), with columns corresponding to - ``feature_columns`` - - * If ``feature_columns`` is a ``List[List[str]]``, the features will be - a list of tensors of shape [(N, m),...,(N, k)], with columns of each - tensor corresponding to the elements of ``feature_columns`` - - * If ``feature_columns`` is a ``Dict[str, List[str]]``, the features - will be a dict of key-tensor pairs of shape - {key1: (N, m),..., keyN: (N, k)}, with columns of each - tensor corresponding to the value of ``feature_columns`` under the - key. - - If ``unsqueeze_label_tensor=True`` (default), the label tensor will be - of shape (N, 1). Otherwise, it will be of shape (N,). - If ``label_column`` is specified as ``None``, then no column from the - ``Datastream`` will be treated as the label, and the output label tensor - will be ``None``. - - Note that you probably want to call ``.split()`` on this datastream if - there are to be multiple Torch workers consuming the data. - - Time complexity: O(1) - - Args: - label_column: The name of the column used as the - label (second element of the output list). Can be None for - prediction, in which case the second element of returned - tuple will also be None. - feature_columns: The names of the columns - to use as the features. Can be a list of lists or - a dict of string-list pairs for multi-tensor output. - If None, then use all columns except the label column as - the features. - label_column_dtype: The torch dtype to - use for the label column. If None, then automatically infer - the dtype. - feature_column_dtypes: The dtypes to use for the feature - tensors. This should match the format of ``feature_columns``, - or be a single dtype, in which case it will be applied to - all tensors. If None, then automatically infer the dtype. - batch_size: How many samples per batch to yield at a time. - Defaults to 1. - prefetch_batches: The number of batches to fetch ahead of the current batch - to fetch. If set to greater than 0, a separate threadpool will be used - to fetch the objects to the local node, format the batches, and apply - the collate_fn. Defaults to 1. You can revert back to the old - prefetching behavior that uses `prefetch_blocks` by setting - `use_legacy_iter_batches` to True in the datastreamContext. - drop_last: Set to True to drop the last incomplete batch, - if the datastream size is not divisible by the batch size. If - False and the size of the stream is not divisible by the batch - size, then the last batch will be smaller. Defaults to False. - local_shuffle_buffer_size: If non-None, the data will be randomly shuffled - using a local in-memory shuffle buffer, and this value will serve as the - minimum number of rows that must be in the local in-memory shuffle - buffer in order to yield a batch. When there are no more rows to add to - the buffer, the remaining rows in the buffer will be drained. This - buffer size must be greater than or equal to ``batch_size``, and - therefore ``batch_size`` must also be specified when using local - shuffling. - local_shuffle_seed: The seed to use for the local random shuffle. - unsqueeze_label_tensor: If set to True, the label tensor - will be unsqueezed (reshaped to (N, 1)). Otherwise, it will - be left as is, that is (N, ). In general, regression loss - functions expect an unsqueezed tensor, while classification - loss functions expect a squeezed one. Defaults to True. - unsqueeze_feature_tensors: If set to True, the features tensors - will be unsqueezed (reshaped to (N, 1)) before being concatenated into - the final features tensor. Otherwise, they will be left as is, that is - (N, ). Defaults to True. - - Returns: - A torch IterableDataset. - """ - - return self.iterator().to_torch( - label_column=label_column, - feature_columns=feature_columns, - label_column_dtype=label_column_dtype, - feature_column_dtypes=feature_column_dtypes, - batch_size=batch_size, - prefetch_blocks=prefetch_blocks, - prefetch_batches=prefetch_batches, - drop_last=drop_last, - local_shuffle_buffer_size=local_shuffle_buffer_size, - local_shuffle_seed=local_shuffle_seed, - unsqueeze_label_tensor=unsqueeze_label_tensor, - unsqueeze_feature_tensors=unsqueeze_feature_tensors, - ) - - @ConsumptionAPI - def to_tf( - self, - feature_columns: Union[str, List[str]], - label_columns: Union[str, List[str]], - *, - prefetch_batches: int = 1, - batch_size: int = 1, - drop_last: bool = False, - local_shuffle_buffer_size: Optional[int] = None, - local_shuffle_seed: Optional[int] = None, - # Deprecated - prefetch_blocks: int = 0, - ) -> "tf.data.Dataset": - """Return a TF Dataset over this datastream. - - .. warning:: - If your datastream contains ragged tensors, this method errors. To prevent - errors, resize tensors or - :ref:`disable tensor extension casting `. - - Examples: - >>> import ray - >>> ds = ray.data.read_csv("s3://anonymous@air-example-data/iris.csv") - >>> ds - Datastream( - num_blocks=1, - num_rows=150, - schema={ - sepal length (cm): double, - sepal width (cm): double, - petal length (cm): double, - petal width (cm): double, - target: int64 - } - ) - - If your model accepts a single tensor as input, specify a single feature column. - - >>> ds.to_tf(feature_columns="sepal length (cm)", label_columns="target") # doctest: +SKIP - <_OptionsDataset element_spec=(TensorSpec(shape=(None,), dtype=tf.float64, name='sepal length (cm)'), TensorSpec(shape=(None,), dtype=tf.int64, name='target'))> - - If your model accepts a dictionary as input, specify a list of feature columns. - - >>> ds.to_tf(["sepal length (cm)", "sepal width (cm)"], "target") # doctest: +SKIP - <_OptionsDataset element_spec=({'sepal length (cm)': TensorSpec(shape=(None,), dtype=tf.float64, name='sepal length (cm)'), 'sepal width (cm)': TensorSpec(shape=(None,), dtype=tf.float64, name='sepal width (cm)')}, TensorSpec(shape=(None,), dtype=tf.int64, name='target'))> - - If your datastream contains multiple features but your model accepts a single - tensor as input, combine features with - :class:`~ray.data.preprocessors.Concatenator`. - - >>> from ray.data.preprocessors import Concatenator - >>> preprocessor = Concatenator(output_column_name="features", exclude="target") - >>> ds = preprocessor.transform(ds) - >>> ds - Concatenator - +- Datastream( - num_blocks=1, - num_rows=150, - schema={ - sepal length (cm): double, - sepal width (cm): double, - petal length (cm): double, - petal width (cm): double, - target: int64 - } - ) - >>> ds.to_tf("features", "target") # doctest: +SKIP - <_OptionsDataset element_spec=(TensorSpec(shape=(None, 4), dtype=tf.float64, name='features'), TensorSpec(shape=(None,), dtype=tf.int64, name='target'))> - - Args: - feature_columns: Columns that correspond to model inputs. If this is a - string, the input data is a tensor. If this is a list, the input data - is a ``dict`` that maps column names to their tensor representation. - label_column: Columns that correspond to model targets. If this is a - string, the target data is a tensor. If this is a list, the target data - is a ``dict`` that maps column names to their tensor representation. - prefetch_batches: The number of batches to fetch ahead of the current batch - to fetch. If set to greater than 0, a separate threadpool will be used - to fetch the objects to the local node, format the batches, and apply - the collate_fn. Defaults to 1. You can revert back to the old - prefetching behavior that uses `prefetch_blocks` by setting - `use_legacy_iter_batches` to True in the datastreamContext. - batch_size: Record batch size. Defaults to 1. - drop_last: Set to True to drop the last incomplete batch, - if the datastream size is not divisible by the batch size. If - False and the size of the stream is not divisible by the batch - size, then the last batch will be smaller. Defaults to False. - local_shuffle_buffer_size: If non-None, the data will be randomly shuffled - using a local in-memory shuffle buffer, and this value will serve as the - minimum number of rows that must be in the local in-memory shuffle - buffer in order to yield a batch. When there are no more rows to add to - the buffer, the remaining rows in the buffer will be drained. This - buffer size must be greater than or equal to ``batch_size``, and - therefore ``batch_size`` must also be specified when using local - shuffling. - local_shuffle_seed: The seed to use for the local random shuffle. - - Returns: - A ``tf.data.Dataset`` that yields inputs and targets. - - .. seealso:: - - :meth:`~ray.data.Datastream.iter_tf_batches` - Call this method if you need more flexibility. - - """ # noqa: E501 - - return self.iterator().to_tf( - feature_columns=feature_columns, - label_columns=label_columns, - prefetch_batches=prefetch_batches, - prefetch_blocks=prefetch_blocks, - drop_last=drop_last, - batch_size=batch_size, - local_shuffle_buffer_size=local_shuffle_buffer_size, - local_shuffle_seed=local_shuffle_seed, - ) - - @ConsumptionAPI(pattern="Time complexity:") - def to_dask( - self, - meta: Union[ - "pandas.DataFrame", - "pandas.Series", - Dict[str, Any], - Iterable[Any], - Tuple[Any], - None, - ] = None, - ) -> "dask.DataFrame": - """Convert this datastream into a Dask DataFrame. - - This is only supported for datastreams convertible to Arrow records. - - Note that this function will set the Dask scheduler to Dask-on-Ray - globally, via the config. - - Time complexity: O(datastream size / parallelism) - - Args: - meta: An empty pandas DataFrame or Series that matches the dtypes and column - names of the stream. This metadata is necessary for many algorithms in - dask dataframe to work. For ease of use, some alternative inputs are - also available. Instead of a DataFrame, a dict of ``{name: dtype}`` or - iterable of ``(name, dtype)`` can be provided (note that the order of - the names should match the order of the columns). Instead of a series, a - tuple of ``(name, dtype)`` can be used. - By default, this will be inferred from the underlying Datastream schema, - with this argument supplying an optional override. - - Returns: - A Dask DataFrame created from this datastream. - """ - import dask - import dask.dataframe as dd - import pandas as pd - - try: - import pyarrow as pa - except Exception: - pa = None - - from ray.data._internal.pandas_block import PandasBlockSchema - from ray.util.client.common import ClientObjectRef - from ray.util.dask import ray_dask_get - - dask.config.set(scheduler=ray_dask_get) - - @dask.delayed - def block_to_df(block: Block): - if isinstance(block, (ray.ObjectRef, ClientObjectRef)): - raise ValueError( - "Datastream.to_dask() must be used with Dask-on-Ray, please " - "set the Dask scheduler to ray_dask_get (located in " - "ray.util.dask)." - ) - return _block_to_df(block) - - if meta is None: - from ray.data.extensions import TensorDtype - - # Infer Dask metadata from Datastream schema. - schema = self.schema(fetch_if_missing=True) - if isinstance(schema, PandasBlockSchema): - meta = pd.DataFrame( - { - col: pd.Series( - dtype=( - dtype - if not isinstance(dtype, TensorDtype) - else np.object_ - ) - ) - for col, dtype in zip(schema.names, schema.types) - } - ) - elif pa is not None and isinstance(schema, pa.Schema): - from ray.data.extensions import ArrowTensorType - - if any(isinstance(type_, ArrowTensorType) for type_ in schema.types): - meta = pd.DataFrame( - { - col: pd.Series( - dtype=( - dtype.to_pandas_dtype() - if not isinstance(dtype, ArrowTensorType) - else np.object_ - ) - ) - for col, dtype in zip(schema.names, schema.types) - } - ) - else: - meta = schema.empty_table().to_pandas() - - ddf = dd.from_delayed( - [block_to_df(block) for block in self.get_internal_block_refs()], - meta=meta, - ) - return ddf - - @ConsumptionAPI(pattern="Time complexity:") - def to_mars(self) -> "mars.DataFrame": - """Convert this datastream into a MARS dataframe. - - Time complexity: O(datastream size / parallelism) - - Returns: - A MARS dataframe created from this datastream. - """ - import pandas as pd - import pyarrow as pa - from mars.dataframe.datasource.read_raydataset import DataFrameReadRayDataset - from mars.dataframe.utils import parse_index - - from ray.data._internal.pandas_block import PandasBlockSchema - - refs = self.to_pandas_refs() - # remove this when https://github.com/mars-project/mars/issues/2945 got fixed - schema = self.schema() - if isinstance(schema, Schema): - schema = schema.base_schema # Backwards compat with non strict mode. - if isinstance(schema, PandasBlockSchema): - dtypes = pd.Series(schema.types, index=schema.names) - elif isinstance(schema, pa.Schema): - dtypes = schema.empty_table().to_pandas().dtypes - else: - raise NotImplementedError(f"Unsupported format of schema {schema}") - index_value = parse_index(pd.RangeIndex(-1)) - columns_value = parse_index(dtypes.index, store_data=True) - op = DataFrameReadRayDataset(refs=refs) - return op(index_value=index_value, columns_value=columns_value, dtypes=dtypes) - - @ConsumptionAPI(pattern="Time complexity:") - def to_modin(self) -> "modin.DataFrame": - """Convert this datastream into a Modin dataframe. - - This works by first converting this datastream into a distributed set of - Pandas dataframes (using ``.to_pandas_refs()``). Please see caveats - there. Then the individual dataframes are used to create the modin - DataFrame using - ``modin.distributed.dataframe.pandas.partitions.from_partitions()``. - - This is only supported for datastreams convertible to Arrow records. - This function induces a copy of the data. For zero-copy access to the - underlying data, consider using ``.to_arrow()`` or - ``.get_internal_block_refs()``. - - Time complexity: O(datastream size / parallelism) - - Returns: - A Modin dataframe created from this datastream. - """ - - from modin.distributed.dataframe.pandas.partitions import from_partitions - - pd_objs = self.to_pandas_refs() - return from_partitions(pd_objs, axis=0) - - @ConsumptionAPI(pattern="Time complexity:") - def to_spark(self, spark: "pyspark.sql.SparkSession") -> "pyspark.sql.DataFrame": - """Convert this datastream into a Spark dataframe. - - Time complexity: O(datastream size / parallelism) - - Returns: - A Spark dataframe created from this datastream. - """ - import raydp - - schema = self.schema() - if isinstance(schema, Schema): - schema = schema.base_schema # Backwards compat with non strict mode. - return raydp.spark.ray_dataset_to_spark_dataframe( - spark, schema, self.get_internal_block_refs() - ) - - @ConsumptionAPI(pattern="Time complexity:") - def to_pandas(self, limit: int = 100000) -> "pandas.DataFrame": - """Convert this datastream into a single Pandas DataFrame. - - This is only supported for datastreams convertible to Arrow or Pandas - records. An error is raised if the number of records exceeds the - provided limit. Note that you can use ``.limit()`` on the datastream - beforehand to truncate the datastream manually. - - Time complexity: O(datastream size) - - Args: - limit: The maximum number of records to return. An error will be - raised if the limit is exceeded. - - Returns: - A Pandas DataFrame created from this datastream, containing a limited - number of records. - """ - count = self.count() - if count > limit: - raise ValueError( - f"the datastream has more than the given limit of {limit} " - f"records: {count}. If you are sure that a DataFrame with " - f"{count} rows will fit in local memory, use " - f"ds.to_pandas(limit={count})." - ) - blocks = self.get_internal_block_refs() - output = DelegatingBlockBuilder() - for block in blocks: - output.add_block(ray.get(block)) - block = output.build() - return _block_to_df(block) - - @ConsumptionAPI(pattern="Time complexity:") - @DeveloperAPI - def to_pandas_refs(self) -> List[ObjectRef["pandas.DataFrame"]]: - """Convert this datastream into a distributed set of Pandas dataframes. - - This is only supported for datastreams convertible to Arrow records. - This function induces a copy of the data. For zero-copy access to the - underlying data, consider using ``.to_arrow()`` or - ``.get_internal_block_refs()``. - - Time complexity: O(datastream size / parallelism) - - Returns: - A list of remote Pandas dataframes created from this datastream. - """ - - block_to_df = cached_remote_fn(_block_to_df) - return [block_to_df.remote(block) for block in self.get_internal_block_refs()] - - @DeveloperAPI - def to_numpy_refs( - self, *, column: Optional[str] = None - ) -> List[ObjectRef[np.ndarray]]: - """Convert this datastream into a distributed set of NumPy ndarrays. - - This is only supported for datastreams convertible to NumPy ndarrays. - This function induces a copy of the data. For zero-copy access to the - underlying data, consider using ``.to_arrow()`` or - ``.get_internal_block_refs()``. - - Time complexity: O(datastream size / parallelism) - - Args: - column: The name of the column to convert to numpy, or None to specify the - entire row. If not specified for Arrow or Pandas blocks, each returned - future will represent a dict of column ndarrays. - - Returns: - A list of remote NumPy ndarrays created from this datastream. - """ - block_to_ndarray = cached_remote_fn(_block_to_ndarray) - return [ - block_to_ndarray.remote(block, column=column) - for block in self.get_internal_block_refs() - ] - - @ConsumptionAPI(pattern="Time complexity:") - @DeveloperAPI - def to_arrow_refs(self) -> List[ObjectRef["pyarrow.Table"]]: - """Convert this datastream into a distributed set of Arrow tables. - - This is only supported for datastreams convertible to Arrow records. - This function is zero-copy if the existing data is already in Arrow - format. Otherwise, the data will be converted to Arrow format. - - Time complexity: O(1) unless conversion is required. - - Returns: - A list of remote Arrow tables created from this datastream. - """ - import pyarrow as pa - - blocks: List[ObjectRef["pyarrow.Table"]] = self.get_internal_block_refs() - # Schema is safe to call since we have already triggered execution with - # get_internal_block_refs. - schema = self.schema(fetch_if_missing=True) - if isinstance(schema, Schema): - schema = schema.base_schema # Backwards compat with non strict mode. - if isinstance(schema, pa.Schema): - # Zero-copy path. - return blocks - - block_to_arrow = cached_remote_fn(_block_to_arrow) - return [block_to_arrow.remote(block) for block in blocks] - - @ConsumptionAPI(pattern="Args:") - def to_random_access_dataset( - self, - key: str, - num_workers: Optional[int] = None, - ) -> RandomAccessDataset: - """Convert this datastream into a distributed RandomAccessDataset (EXPERIMENTAL). - - RandomAccessDataset partitions the datastream across the cluster by the given - sort key, providing efficient random access to records via binary search. A - number of worker actors are created, each of which has zero-copy access to the - underlying sorted data blocks of the datastream. - - Note that the key must be unique in the datastream. If there are duplicate keys, - an arbitrary value is returned. - - This is only supported for Arrow-format datastreams. - - Args: - key: The key column over which records can be queried. - num_workers: The number of actors to use to serve random access queries. - By default, this is determined by multiplying the number of Ray nodes - in the cluster by four. As a rule of thumb, you can expect each worker - to provide ~3000 records / second via ``get_async()``, and - ~10000 records / second via ``multiget()``. - """ - if num_workers is None: - num_workers = 4 * len(ray.nodes()) - return RandomAccessDataset(self, key, num_workers=num_workers) - - @ConsumptionAPI - def repeat(self, times: Optional[int] = None) -> "DatasetPipeline": - """Convert this into a DatasetPipeline by looping over this datastream. - - Transformations prior to the call to ``repeat()`` are evaluated once. - Transformations done on the returned pipeline are evaluated on each - loop of the pipeline over the base datastream. - - Note that every repeat of the datastream is considered an "epoch" for - the purposes of ``DatasetPipeline.iter_epochs()``. - - Examples: - >>> import ray - >>> ds = ray.data.range(5, parallelism=1) - >>> # Infinite pipeline of numbers [0, 5) - >>> ds.repeat().take_batch() - {'id': array([0, 1, 2, 3, 4, 0, 1, 2, 3, 4, ...])} - >>> # Can shuffle each epoch (datastream) in the pipeline. - >>> ds.repeat().random_shuffle().take_batch() # doctest: +SKIP - {'id': array([2, 3, 0, 4, 1, 4, 0, 2, 1, 3, ...])} - - Args: - times: The number of times to loop over this datastream, or None - to repeat indefinitely. - """ - from ray.data._internal.plan import _rewrite_read_stage - from ray.data.dataset_pipeline import DatasetPipeline - - ctx = DataContext.get_current() - if self._plan.is_read_stage_equivalent() and ctx.optimize_fuse_read_stages: - blocks, _, stages = self._plan._get_source_blocks_and_stages() - blocks.clear() - blocks, outer_stats, stages = _rewrite_read_stage(blocks, stages) - read_stage = stages[0] - else: - blocks = self._plan.execute() - outer_stats = self._plan.stats() - read_stage = None - uuid = self._get_uuid() - outer_stats.datastream_uuid = uuid - - if times is not None and times < 1: - raise ValueError("`times` must be >= 1, got {}".format(times)) - - class Iterator: - def __init__(self, blocks): - self._blocks = blocks - self._i = 0 - - def __next__(self) -> Callable[[], "Datastream"]: - if times and self._i >= times: - raise StopIteration - epoch = self._i - blocks = self._blocks - self._i += 1 - - def gen(): - ds = Datastream( - ExecutionPlan( - blocks, - outer_stats, - datastream_uuid=uuid, - run_by_consumer=True, - ), - epoch, - lazy=False, - ) - ds._set_uuid(uuid) - return ds - - return gen - - class Iterable: - def __init__(self, blocks): - self._blocks = blocks - - def __iter__(self): - return Iterator(self._blocks) - - pipe = DatasetPipeline(Iterable(blocks), False, length=times or float("inf")) - if read_stage: - pipe = pipe.foreach_window( - lambda ds, read_stage=read_stage: Datastream( - ds._plan.with_stage(read_stage), ds._epoch, True - ) - ) - return pipe - - def window( - self, - *, - blocks_per_window: Optional[int] = None, - bytes_per_window: Optional[int] = None, - ) -> "DatasetPipeline": - """Convert this into a DatasetPipeline by windowing over data blocks. - - Transformations prior to the call to ``window()`` are evaluated in - bulk on the entire datastream. Transformations done on the returned - pipeline are evaluated incrementally per window of blocks as data is - read from the output of the pipeline. - - Windowing execution allows for output to be read sooner without - waiting for all transformations to fully execute, and can also improve - efficiency if transforms use different resources (e.g., GPUs). - - Without windowing:: - - [preprocessing......] - [inference.......] - [write........] - Time -----------------------------------------------------------> - - With windowing:: - - [prep1] [prep2] [prep3] - [infer1] [infer2] [infer3] - [write1] [write2] [write3] - Time -----------------------------------------------------------> - - Examples: - >>> import ray - >>> # Create an inference pipeline. - >>> ds = ray.data.read_binary_files(dir) # doctest: +SKIP - >>> infer = ... # doctest: +SKIP - >>> pipe = ds.window(blocks_per_window=10).map(infer) # doctest: +SKIP - DatasetPipeline(num_windows=40, num_stages=2) - >>> # The higher the stage parallelism, the shorter the pipeline. - >>> pipe = ds.window(blocks_per_window=20).map(infer) # doctest: +SKIP - DatasetPipeline(num_windows=20, num_stages=2) - >>> # Outputs can be incrementally read from the pipeline. - >>> for item in pipe.iter_rows(): # doctest: +SKIP - ... print(item) # doctest: +SKIP - - Args: - blocks_per_window: The window size (parallelism) in blocks. - Increasing window size increases pipeline throughput, but also - increases the latency to initial output, since it decreases the - length of the pipeline. Setting this to infinity effectively - disables pipelining. - bytes_per_window: Specify the window size in bytes instead of blocks. - This will be treated as an upper bound for the window size, but each - window will still include at least one block. This is mutually - exclusive with ``blocks_per_window``. - """ - from ray.data._internal.plan import _rewrite_read_stage - from ray.data.dataset_pipeline import DatasetPipeline - - if blocks_per_window is not None and bytes_per_window is not None: - raise ValueError("Only one windowing scheme can be specified.") - - if blocks_per_window is None: - blocks_per_window = 10 - - ctx = DataContext.get_current() - if self._plan.is_read_stage_equivalent() and ctx.optimize_fuse_read_stages: - blocks, _, stages = self._plan._get_source_blocks_and_stages() - blocks.clear() - blocks, outer_stats, stages = _rewrite_read_stage(blocks, stages) - read_stage = stages[0] - else: - blocks = self._plan.execute() - outer_stats = self._plan.stats() - read_stage = None - - class Iterator: - def __init__(self, splits, epoch): - self._splits = splits.copy() - self._epoch = epoch - - def __next__(self) -> "Datastream": - if not self._splits: - raise StopIteration - - blocks = self._splits.pop(0) - - def gen(): - ds = Datastream( - ExecutionPlan(blocks, outer_stats, run_by_consumer=True), - self._epoch, - lazy=True, - ) - return ds - - return gen - - class Iterable: - def __init__(self, blocks, epoch): - if bytes_per_window: - self._splits = blocks.split_by_bytes(bytes_per_window) - else: - self._splits = blocks.split(split_size=blocks_per_window) - try: - sizes = [s.size_bytes() for s in self._splits] - num_blocks = [s.initial_num_blocks() for s in self._splits] - assert [s > 0 for s in sizes], sizes - - def fmt(size_bytes): - if size_bytes > 1024 * 1024 * 1024: - return "{}GiB".format( - round(size_bytes / (1024 * 1024 * 1024), 2) - ) - elif size_bytes > 10 * 1024: - return "{}MiB".format(round(size_bytes / (1024 * 1024), 2)) - else: - return "{}b".format(size_bytes) - - mean_bytes = int(np.mean(sizes)) - logger.info( - "Created DatasetPipeline with {} windows: " - "{} min, {} max, {} mean".format( - len(self._splits), - fmt(min(sizes)), - fmt(max(sizes)), - fmt(mean_bytes), - ) - ) - mean_num_blocks = int(np.mean(num_blocks)) - logger.info( - "Blocks per window: " - "{} min, {} max, {} mean".format( - min(num_blocks), - max(num_blocks), - mean_num_blocks, - ) - ) - # TODO(ekl) we should try automatically choosing the default - # windowing settings to meet these best-practice constraints. - avail_parallelism = _estimate_available_parallelism() - if mean_num_blocks < avail_parallelism: - logger.warning( - f"{WARN_PREFIX} This pipeline's parallelism is limited " - f"by its blocks per window to ~{mean_num_blocks} " - "concurrent tasks per window. To maximize " - "performance, increase the blocks per window to at least " - f"{avail_parallelism}. This may require increasing the " - "base datastream's parallelism and/or adjusting the " - "windowing parameters." - ) - else: - logger.info( - f"{OK_PREFIX} This pipeline's per-window parallelism " - "is high enough to fully utilize the cluster." - ) - obj_store_mem = ray.cluster_resources().get( - "object_store_memory", 0 - ) - safe_mem_bytes = int(obj_store_mem * ESTIMATED_SAFE_MEMORY_FRACTION) - if mean_bytes > safe_mem_bytes: - logger.warning( - f"{WARN_PREFIX} This pipeline's windows are " - f"~{fmt(mean_bytes)} in size each and may not fit in " - "object store memory without spilling. To improve " - "performance, consider reducing the size of each window " - f"to {fmt(safe_mem_bytes)} or less." - ) - else: - logger.info( - f"{OK_PREFIX} This pipeline's windows likely fit in " - "object store memory without spilling." - ) - except Exception as e: - logger.info( - "Created DatasetPipeline with {} windows; " - "error getting sizes: {}".format( - len(self._splits), - e, - ) - ) - self._epoch = epoch - - def __iter__(self): - return Iterator(self._splits, self._epoch) - - it = Iterable(blocks, self._epoch) - pipe = DatasetPipeline(it, False, length=len(it._splits)) - if read_stage: - pipe = pipe.foreach_window( - lambda ds, read_stage=read_stage: Datastream( - ds._plan.with_stage(read_stage), ds._epoch, True - ) - ) - return pipe - - @Deprecated(message="Use `Datastream.materialize()` instead.") - def fully_executed(self) -> "MaterializedDatastream": - logger.warning( - "Deprecation warning: use Datastream.materialize() instead of " - "fully_executed()." - ) - self._plan.execute(force_read=True) - return self - - @Deprecated( - message="Check `isinstance(Datastream, MaterializedDatastream)` instead." - ) - def is_fully_executed(self) -> bool: - logger.warning( - "Deprecation warning: Check " - "`isinstance(Datastream, MaterializedDatastream)` " - "instead of using is_fully_executed()." - ) - return self._plan.has_computed_output() - - @ConsumptionAPI(pattern="store memory.", insert_after=True) - def materialize(self) -> "MaterializedDatastream": - """Execute and materialize this datastream into object store memory. - - This can be used to read all blocks into memory. By default, Datastream - doesn't read blocks from the datasource until the first transform. - - Note that this does not mutate the original Datastream. Only the blocks of the - returned MaterializedDatastream class are pinned in memory. - - Returns: - A MaterializedDatastream holding the materialized data blocks. - """ - copy = Datastream.copy(self, _deep_copy=True, _as=MaterializedDatastream) - copy._plan.execute(force_read=True) - return copy - - @ConsumptionAPI(pattern="timing information.", insert_after=True) - def stats(self) -> str: - """Returns a string containing execution timing information. - - Note that this does not trigger execution, so if the datastream has not yet - executed, an empty string will be returned. - """ - return self._get_stats_summary().to_string() - - def _get_stats_summary(self) -> DatastreamStatsSummary: - return self._plan.stats_summary() - - @ConsumptionAPI(pattern="Time complexity:") - @DeveloperAPI - def get_internal_block_refs(self) -> List[ObjectRef[Block]]: - """Get a list of references to the underlying blocks of this datastream. - - This function can be used for zero-copy access to the data. It blocks - until the underlying blocks are computed. - - Time complexity: O(1) - - Returns: - A list of references to this datastream's blocks. - """ - blocks = self._plan.execute().get_blocks() - self._synchronize_progress_bar() - return blocks - - @Deprecated( - message="Datastream is lazy by default, so this conversion call is no longer " - "needed and this API will be removed in a future release" - ) - def lazy(self) -> "Datastream": - """Enable lazy evaluation. - - Datastream is lazy by default, so this is only useful for datastreams created - from :func:`ray.data.from_items() `, which is - eager. - - The returned datastream is a lazy datastream, where all subsequent operations - on the stream won't be executed until the datastream is consumed - (e.g. ``.take()``, ``.iter_batches()``, ``.to_torch()``, ``.to_tf()``, etc.) - or execution is manually triggered via ``.materialize()``. - """ - ds = Datastream( - self._plan, self._epoch, lazy=True, logical_plan=self._logical_plan - ) - ds._set_uuid(self._get_uuid()) - return ds - - def has_serializable_lineage(self) -> bool: - """Whether this datastream's lineage is able to be serialized for storage and - later deserialized, possibly on a different cluster. - - Only datastreams that are created from data that we know will still exist at - deserialization time, e.g. data external to this Ray cluster such as persistent - cloud object stores, support lineage-based serialization. All of the - ray.data.read_*() APIs support lineage-based serialization. - """ - return self._plan.has_lazy_input() - - @DeveloperAPI - def serialize_lineage(self) -> bytes: - """ - Serialize this datastream's lineage, not the actual data or the existing data - futures, to bytes that can be stored and later deserialized, possibly on a - different cluster. - - Note that this will drop all computed data, and that everything will be - recomputed from scratch after deserialization. - - Use :py:meth:`Datastream.deserialize_lineage` to deserialize the serialized - bytes returned from this method into a Datastream. - - .. note:: - Unioned and zipped datastreams, produced by :py:meth`Datastream.union` and - :py:meth:`Datastream.zip`, are not lineage-serializable. - - Returns: - Serialized bytes containing the lineage of this datastream. - """ - if not self.has_serializable_lineage(): - raise ValueError( - "Lineage-based serialization is not supported for this stream, which " - "means that it cannot be used as a tunable hyperparameter. " - "Lineage-based serialization is explicitly NOT supported for unioned " - "or zipped datastreams (see docstrings for those methods), and is only " - "supported for datastreams created from data that we know will still " - "exist at deserialization time, e.g. external data in persistent cloud " - "object stores or in-memory data from long-lived clusters. Concretely, " - "all ray.data.read_*() APIs should support lineage-based " - "serialization, while all of the ray.data.from_*() APIs do not. To " - "allow this stream to be serialized to storage, write the data to an " - "external store (such as AWS S3, GCS, or Azure Blob Storage) using the " - "Datastream.write_*() APIs, and serialize a new datastream reading " - "from the external store using the ray.data.read_*() APIs." - ) - # Copy Datastream and clear the blocks from the execution plan so only the - # Datastream's lineage is serialized. - plan_copy = self._plan.deep_copy(preserve_uuid=True) - ds = Datastream(plan_copy, self._get_epoch(), self._lazy) - ds._plan.clear_block_refs() - ds._set_uuid(self._get_uuid()) - - def _reduce_remote_fn(rf: ray.remote_function.RemoteFunction): - # Custom reducer for Ray remote function handles that allows for - # cross-cluster serialization. - # This manually unsets the last export session and job to force re-exporting - # of the function when the handle is deserialized on a new cluster. - # TODO(Clark): Fix this in core Ray, see issue: - # https://github.com/ray-project/ray/issues/24152. - reconstructor, args, state = rf.__reduce__() - state["_last_export_session_and_job"] = None - return reconstructor, args, state - - context = ray._private.worker.global_worker.get_serialization_context() - try: - context._register_cloudpickle_reducer( - ray.remote_function.RemoteFunction, _reduce_remote_fn - ) - serialized = pickle.dumps(ds) - finally: - context._unregister_cloudpickle_reducer(ray.remote_function.RemoteFunction) - return serialized - - @staticmethod - @DeveloperAPI - def deserialize_lineage(serialized_ds: bytes) -> "Datastream": - """ - Deserialize the provided lineage-serialized Datastream. - - This assumes that the provided serialized bytes were serialized using - :py:meth:`Datastream.serialize_lineage`. - - Args: - serialized_ds: The serialized Datastream that we wish to deserialize. - - Returns: - A deserialized ``Datastream`` instance. - """ - return pickle.loads(serialized_ds) - - def _divide(self, block_idx: int) -> ("Datastream", "Datastream"): - block_list = self._plan.execute() - left, right = block_list.divide(block_idx) - l_ds = Datastream( - ExecutionPlan( - left, self._plan.stats(), run_by_consumer=block_list._owned_by_consumer - ), - self._epoch, - self._lazy, - ) - r_ds = Datastream( - ExecutionPlan( - right, self._plan.stats(), run_by_consumer=block_list._owned_by_consumer - ), - self._epoch, - self._lazy, - ) - return l_ds, r_ds - - @Deprecated(message="The batch format is no longer exposed as a public API.") - def default_batch_format(self) -> Type: - context = DataContext.get_current() - if context.strict_mode: - raise StrictModeError("default_batch_format() is not allowed in Ray 2.5") - - import pandas as pd - import pyarrow as pa - - schema = self.schema() - assert isinstance(schema, (type, PandasBlockSchema, pa.Schema)) - - if isinstance(schema, type): - return list - - if isinstance(schema, (PandasBlockSchema, pa.Schema)): - if schema.names == [TENSOR_COLUMN_NAME]: - return np.ndarray - return pd.DataFrame - - @Deprecated(message="The dataset format is no longer exposed as a public API.") - def dataset_format(self) -> BlockFormat: - context = DataContext.get_current() - if context.strict_mode: - raise StrictModeError("dataset_format() is not allowed in Ray 2.5") - - if context.use_streaming_executor: - raise DeprecationWarning( - "`dataset_format` is deprecated for streaming execution. To use " - "`dataset_format`, you must explicitly enable bulk execution by " - "setting `use_streaming_executor` to False in the `DataContext`" - ) - - # We need schema to properly validate, so synchronously - # fetch it if necessary. - schema = self.schema(fetch_if_missing=True) - if schema is None: - raise ValueError( - "Datastream is empty or cleared, can't determine the format of " - "the datastream." - ) - - try: - import pyarrow as pa - - if isinstance(schema, pa.Schema): - return BlockFormat.ARROW - except ModuleNotFoundError: - pass - from ray.data._internal.pandas_block import PandasBlockSchema - - if isinstance(schema, PandasBlockSchema): - return BlockFormat.PANDAS - return BlockFormat.SIMPLE - - def _aggregate_on( - self, agg_cls: type, on: Optional[Union[str, List[str]]], *args, **kwargs - ): - """Helper for aggregating on a particular subset of the datastream. - - This validates the `on` argument, and converts a list of column names - or lambdas to a multi-aggregation. A null `on` results in a - multi-aggregation on all columns for an Arrow Datastream, and a single - aggregation on the entire row for a simple Datastream. - """ - aggs = self._build_multicolumn_aggs(agg_cls, on, *args, **kwargs) - return self.aggregate(*aggs) - - def _build_multicolumn_aggs( - self, - agg_cls: type, - on: Optional[Union[str, List[str]]], - ignore_nulls: bool, - *args, - skip_cols: Optional[List[str]] = None, - **kwargs, - ): - """Build set of aggregations for applying a single aggregation to - multiple columns. - """ - # Expand None into an aggregation for each column. - if on is None: - schema = self.schema(fetch_if_missing=True) - if schema is not None and not isinstance(schema, type): - if not skip_cols: - skip_cols = [] - if len(schema.names) > 0: - on = [col for col in schema.names if col not in skip_cols] - - if not isinstance(on, list): - on = [on] - return [agg_cls(on_, *args, ignore_nulls=ignore_nulls, **kwargs) for on_ in on] - - def _aggregate_result(self, result: Union[Tuple, Mapping]) -> U: - if result is not None and len(result) == 1: - if isinstance(result, tuple): - return result[0] - else: - # NOTE (kfstorm): We cannot call `result[0]` directly on - # `PandasRow` because indexing a column with position is not - # supported by pandas. - return list(result.values())[0] - else: - return result - - @ensure_notebook_deps( - ["ipywidgets", "8"], - ) - @fallback_if_colab - def _ipython_display_(self): - from ipywidgets import HTML, VBox, Layout - from IPython.display import display - - title = HTML(f"

    {self.__class__.__name__}

    ") - tab = self._tab_repr_() - - if tab: - display(VBox([title, tab], layout=Layout(width="100%"))) - - @ensure_notebook_deps( - ["tabulate", None], - ["ipywidgets", "8"], - ) - def _tab_repr_(self): - from ray._private.thirdparty.tabulate.tabulate import tabulate - from ipywidgets import Tab, HTML - - metadata = { - "num_blocks": self._plan.initial_num_blocks(), - "num_rows": self._meta_count(), - } - # Show metadata if available, but don't trigger execution. - schema = self.schema(fetch_if_missing=False) - if schema is None: - schema_repr = Template("rendered_html_common.html.j2").render( - content="
    Unknown schema
    " - ) - elif isinstance(schema, type): - schema_repr = Template("rendered_html_common.html.j2").render( - content=f"
    Data type: {html.escape(str(schema))}
    " - ) - else: - schema_data = {} - for sname, stype in zip(schema.names, schema.types): - schema_data[sname] = getattr(stype, "__name__", str(stype)) - - schema_repr = Template("scrollableTable.html.j2").render( - table=tabulate( - tabular_data=schema_data.items(), - tablefmt="html", - showindex=False, - headers=["Name", "Type"], - ), - max_height="300px", - ) - - children = [] - children.append( - HTML( - Template("scrollableTable.html.j2").render( - table=tabulate( - tabular_data=metadata.items(), - tablefmt="html", - showindex=False, - headers=["Field", "Value"], - ), - max_height="300px", - ) - ) - ) - children.append(HTML(schema_repr)) - return Tab(children, titles=["Metadata", "Schema"]) - - def __repr__(self) -> str: - return self._plan.get_plan_as_string(self.__class__.__name__) - - def __str__(self) -> str: - return repr(self) - - def __bool__(self) -> bool: - # Prevents `__len__` from being called to check if it is None - # see: issue #25152 - return True - - def __len__(self) -> int: - raise AttributeError( - "Use `ds.count()` to compute the length of a distributed Datastream. " - "This may be an expensive operation." - ) - - def __iter__(self): - raise TypeError( - "`Datastream` objects aren't iterable. To iterate records, call " - "`ds.iter_rows()` or `ds.iter_batches()`. For more information, read " - "https://docs.ray.io/en/latest/data/consuming-datastreams.html." - ) - - def _block_num_rows(self) -> List[int]: - get_num_rows = cached_remote_fn(_get_num_rows) - return ray.get([get_num_rows.remote(b) for b in self.get_internal_block_refs()]) - - def _block_size_bytes(self) -> List[int]: - get_size_bytes = cached_remote_fn(_get_size_bytes) - return ray.get( - [get_size_bytes.remote(b) for b in self.get_internal_block_refs()] - ) - - def _meta_count(self) -> Optional[int]: - return self._plan.meta_count() - - def _get_uuid(self) -> str: - return self._uuid - - def _set_uuid(self, uuid: str) -> None: - self._uuid = uuid - - def _get_epoch(self) -> int: - return self._epoch - - def _set_epoch(self, epoch: int) -> None: - self._epoch = epoch - - def _warn_slow(self): - if ray.util.log_once("datastream_slow_warned"): - logger.warning( - "The `map`, `flat_map`, and `filter` operations are unvectorized and " - "can be very slow. Consider using `.map_batches()` instead." - ) - - def _synchronize_progress_bar(self): - """Flush progress bar output by shutting down the current executor. - - This should be called at the end of all blocking APIs (e.g., `take`), but not - async APIs (e.g., `iter_batches`). - - The streaming executor runs in a separate generator / thread, so it is - possible the shutdown logic runs even after a call to retrieve rows from the - stream has finished. Explicit shutdown avoids this, which can clobber console - output (https://github.com/ray-project/ray/issues/32414). - """ - if self._current_executor: - self._current_executor.shutdown() - self._current_executor = None - - def __getstate__(self): - # Note: excludes _current_executor which is not serializable. - return { - "plan": self._plan, - "uuid": self._uuid, - "epoch": self._epoch, - "lazy": self._lazy, - "logical_plan": self._logical_plan, - } - - def __setstate__(self, state): - self._plan = state["plan"] - self._uuid = state["uuid"] - self._epoch = state["epoch"] - self._lazy = state["lazy"] - self._logical_plan = state["logical_plan"] - self._current_executor = None - - def __del__(self): - if sys.meta_path is None: - return - if self._current_executor and ray is not None and ray.is_initialized(): - self._current_executor.shutdown() - - -# Backwards compatibility alias. -Dataset = Datastream - - -@PublicAPI -class MaterializedDatastream(Datastream, Generic[T]): - """A Datastream materialized in Ray memory, e.g., via `.materialize()`. - - The blocks of a MaterializedDatastream object are materialized into Ray object store - memory, which means that this class can be shared or iterated over by multiple Ray - tasks without re-executing the underlying computations for producing the stream. - """ - - pass - - -@PublicAPI(stability="beta") -class Schema: - """Datastream schema. - - Attributes: - names: List of column names of this Datastream. - types: List of Arrow types of the Datastream. Note that the "object" type is - not Arrow compatible and hence will be returned as `object`. - base_schema: The underlying Arrow or Pandas schema. - """ - - def __init__(self, base_schema: Union["pyarrow.lib.Schema", "PandasBlockSchema"]): - self.base_schema = base_schema - - @property - def names(self) -> List[str]: - """Lists the columns of this Datastream.""" - return self.base_schema.names - - @property - def types(self) -> List[Union[Literal[object], "pyarrow.DataType"]]: - """Lists the types of this Datastream in Arrow format - - For non-Arrow compatible types, we return "object". - """ - import pyarrow as pa - from ray.data.extensions import TensorDtype, ArrowTensorType - - if isinstance(self.base_schema, pa.lib.Schema): - return list(self.base_schema.types) - - arrow_types = [] - for dtype in self.base_schema.types: - if isinstance(dtype, TensorDtype): - # Manually convert our Pandas tensor extension type to Arrow. - arrow_types.append( - ArrowTensorType( - shape=dtype._shape, dtype=pa.from_numpy_dtype(dtype._dtype) - ) - ) - else: - try: - arrow_types.append(pa.from_numpy_dtype(dtype)) - except pa.ArrowNotImplementedError: - arrow_types.append(object) - except Exception: - logger.exception(f"Error converting dtype {dtype} to Arrow.") - arrow_types.append(None) - return arrow_types - - def __eq__(self, other): - return isinstance(other, Schema) and other.base_schema == self.base_schema - - def __str__(self): - return f"Schema({dict(zip(self.names, self.types))})" - - def __repr__(self): - return str(self) - - -def _get_size_bytes(block: Block) -> int: - block = BlockAccessor.for_block(block) - return block.size_bytes() - - -def _block_to_df(block: Block): - block = BlockAccessor.for_block(block) - return block.to_pandas() - - -def _block_to_ndarray(block: Block, column: Optional[str]): - block = BlockAccessor.for_block(block) - return block.to_numpy(column) - - -def _block_to_arrow(block: Block): - block = BlockAccessor.for_block(block) - return block.to_arrow() - - -def _sliding_window(iterable: Iterable, n: int): - """Creates an iterator consisting of n-width sliding windows over - iterable. The sliding windows are constructed lazily such that an - element on the base iterator (iterable) isn't consumed until the - first sliding window containing that element is reached. - - If n > len(iterable), then a single len(iterable) window is - returned. - - Args: - iterable: The iterable on which the sliding window will be - created. - n: The width of the sliding window. - - Returns: - An iterator of n-width windows over iterable. - If n > len(iterable), then a single len(iterable) window is - returned. - """ - it = iter(iterable) - window = collections.deque(itertools.islice(it, n), maxlen=n) - if len(window) > 0: - yield tuple(window) - for elem in it: - window.append(elem) - yield tuple(window) - - -def _do_write( - ds: Datasource, - ctx: DataContext, - blocks: List[Block], - meta: List[BlockMetadata], - ray_remote_args: Dict[str, Any], - write_args: Dict[str, Any], -) -> List[ObjectRef[WriteResult]]: - write_args = _unwrap_arrow_serialization_workaround(write_args) - DataContext._set_current(ctx) - return ds.do_write(blocks, meta, ray_remote_args=ray_remote_args, **write_args) diff --git a/python/ray/data/grouped_data.py b/python/ray/data/grouped_data.py index 9bdb7d072c10..0823fd419d79 100644 --- a/python/ray/data/grouped_data.py +++ b/python/ray/data/grouped_data.py @@ -29,7 +29,7 @@ UserDefinedFunction, ) from ray.data.context import DataContext -from ray.data.datastream import DataBatch, Datastream +from ray.data.dataset import DataBatch, Dataset from ray.util.annotations import PublicAPI @@ -117,34 +117,33 @@ class PushBasedGroupbyOp(_GroupbyOp, PushBasedShufflePlan): @PublicAPI class GroupedData: - """Represents a grouped datastream created by calling ``Datastream.groupby()``. + """Represents a grouped dataset created by calling ``Dataset.groupby()``. The actual groupby is deferred until an aggregation is applied. """ - def __init__(self, datastream: Datastream, key: str): - """Construct a datastream grouped by key (internal API). + def __init__(self, dataset: Dataset, key: str): + """Construct a dataset grouped by key (internal API). The constructor is not part of the GroupedData API. - Use the ``Datastream.groupby()`` method to construct one. + Use the ``Dataset.groupby()`` method to construct one. """ - self._datastream = datastream + self._dataset = dataset self._key = key def __repr__(self) -> str: return ( - f"{self.__class__.__name__}(datastream={self._datastream}, " - f"key={self._key!r})" + f"{self.__class__.__name__}(dataset={self._dataset}, " f"key={self._key!r})" ) - def aggregate(self, *aggs: AggregateFn) -> Datastream: + def aggregate(self, *aggs: AggregateFn) -> Dataset: """Implements an accumulator-based aggregation. Args: aggs: Aggregations to do. Returns: - The output is an datastream of ``n + 1`` columns where the first column + The output is an dataset of ``n + 1`` columns where the first column is the groupby key and the second through ``n + 1`` columns are the results of the aggregations. If groupby key is ``None`` then the key part of return is omitted. @@ -156,8 +155,8 @@ def do_agg(blocks, task_ctx: TaskContext, clear_input_blocks: bool, *_): if len(aggs) == 0: raise ValueError("Aggregate requires at least one aggregation") for agg in aggs: - agg._validate(self._datastream.schema(fetch_if_missing=True)) - # Handle empty datastream. + agg._validate(self._dataset.schema(fetch_if_missing=True)) + # Handle empty dataset. if blocks.initial_num_blocks() == 0: return blocks, stage_info @@ -190,7 +189,7 @@ def do_agg(blocks, task_ctx: TaskContext, clear_input_blocks: bool, *_): ctx=task_ctx, ) - plan = self._datastream._plan.with_stage( + plan = self._dataset._plan.with_stage( AllToAllStage( "Aggregate", None, @@ -199,7 +198,7 @@ def do_agg(blocks, task_ctx: TaskContext, clear_input_blocks: bool, *_): ) ) - logical_plan = self._datastream._logical_plan + logical_plan = self._dataset._logical_plan if logical_plan is not None: op = Aggregate( logical_plan.dag, @@ -207,10 +206,10 @@ def do_agg(blocks, task_ctx: TaskContext, clear_input_blocks: bool, *_): aggs=aggs, ) logical_plan = LogicalPlan(op) - return Datastream( + return Dataset( plan, - self._datastream._epoch, - self._datastream._lazy, + self._dataset._epoch, + self._dataset._lazy, logical_plan, ) @@ -222,14 +221,14 @@ def _aggregate_on( *args, **kwargs, ): - """Helper for aggregating on a particular subset of the datastream. + """Helper for aggregating on a particular subset of the dataset. This validates the `on` argument, and converts a list of column names to a multi-aggregation. A null `on` results in a - multi-aggregation on all columns for an Arrow Datastream, and a single - aggregation on the entire row for a simple Datastream. + multi-aggregation on all columns for an Arrow Dataset, and a single + aggregation on the entire row for a simple Dataset. """ - aggs = self._datastream._build_multicolumn_aggs( + aggs = self._dataset._build_multicolumn_aggs( agg_cls, on, ignore_nulls, *args, skip_cols=self._key, **kwargs ) return self.aggregate(*aggs) @@ -241,8 +240,8 @@ def map_groups( compute: Union[str, ComputeStrategy] = None, batch_format: Optional[str] = "default", **ray_remote_args, - ) -> "Datastream": - """Apply the given function to each group of records of this datastream. + ) -> "Dataset": + """Apply the given function to each group of records of this dataset. While map_groups() is very flexible, note that it comes with downsides: * It may be slower than using more specific methods such as min(), max(). @@ -302,9 +301,9 @@ def map_groups( # Note that sort() will ensure that records of the same key partitioned # into the same block. if self._key is not None: - sorted_ds = self._datastream.sort(self._key) + sorted_ds = self._dataset.sort(self._key) else: - sorted_ds = self._datastream.repartition(1) + sorted_ds = self._dataset.repartition(1) # Returns the group boundaries. def get_key_boundaries(block_accessor: BlockAccessor): @@ -354,7 +353,7 @@ def group_fn(batch): **ray_remote_args, ) - def count(self) -> Datastream: + def count(self) -> Dataset: """Compute count aggregation. Examples: @@ -364,7 +363,7 @@ def count(self) -> Datastream: ... "A").count() # doctest: +SKIP Returns: - A datastream of ``[k, v]`` columns where ``k`` is the groupby key and + A dataset of ``[k, v]`` columns where ``k`` is the groupby key and ``v`` is the number of rows with that key. If groupby key is ``None`` then the key part of return is omitted. """ @@ -372,7 +371,7 @@ def count(self) -> Datastream: def sum( self, on: Union[str, List[str]] = None, ignore_nulls: bool = True - ) -> Datastream: + ) -> Dataset: r"""Compute grouped sum aggregation. Examples: @@ -402,10 +401,10 @@ def sum( For different values of ``on``, the return varies: - - ``on=None``: a datastream containing a groupby key column, + - ``on=None``: a dataset containing a groupby key column, ``"k"``, and a column-wise sum column for each original column - in the datastream. - - ``on=["col_1", ..., "col_n"]``: a datastream of ``n + 1`` + in the dataset. + - ``on=["col_1", ..., "col_n"]``: a dataset of ``n + 1`` columns where the first column is the groupby key and the second through ``n + 1`` columns are the results of the aggregations. @@ -415,7 +414,7 @@ def sum( def min( self, on: Union[str, List[str]] = None, ignore_nulls: bool = True - ) -> Datastream: + ) -> Dataset: """Compute grouped min aggregation. Examples: @@ -440,10 +439,10 @@ def min( For different values of ``on``, the return varies: - - ``on=None``: a datastream containing a groupby key column, + - ``on=None``: a dataset containing a groupby key column, ``"k"``, and a column-wise min column for each original column in - the datastream. - - ``on=["col_1", ..., "col_n"]``: a datastream of ``n + 1`` + the dataset. + - ``on=["col_1", ..., "col_n"]``: a dataset of ``n + 1`` columns where the first column is the groupby key and the second through ``n + 1`` columns are the results of the aggregations. @@ -453,7 +452,7 @@ def min( def max( self, on: Union[str, List[str]] = None, ignore_nulls: bool = True - ) -> Datastream: + ) -> Dataset: """Compute grouped max aggregation. Examples: @@ -478,10 +477,10 @@ def max( For different values of ``on``, the return varies: - - ``on=None``: a datastream containing a groupby key column, + - ``on=None``: a dataset containing a groupby key column, ``"k"``, and a column-wise max column for each original column in - the datastream. - - ``on=["col_1", ..., "col_n"]``: a datastream of ``n + 1`` + the dataset. + - ``on=["col_1", ..., "col_n"]``: a dataset of ``n + 1`` columns where the first column is the groupby key and the second through ``n + 1`` columns are the results of the aggregations. @@ -491,7 +490,7 @@ def max( def mean( self, on: Union[str, List[str]] = None, ignore_nulls: bool = True - ) -> Datastream: + ) -> Dataset: """Compute grouped mean aggregation. Examples: @@ -516,10 +515,10 @@ def mean( For different values of ``on``, the return varies: - - ``on=None``: a datastream containing a groupby key column, + - ``on=None``: a dataset containing a groupby key column, ``"k"``, and a column-wise mean column for each original column - in the datastream. - - ``on=["col_1", ..., "col_n"]``: a datastream of ``n + 1`` + in the dataset. + - ``on=["col_1", ..., "col_n"]``: a dataset of ``n + 1`` columns where the first column is the groupby key and the second through ``n + 1`` columns are the results of the aggregations. @@ -532,7 +531,7 @@ def std( on: Union[str, List[str]] = None, ddof: int = 1, ignore_nulls: bool = True, - ) -> Datastream: + ) -> Dataset: """Compute grouped standard deviation aggregation. Examples: @@ -567,10 +566,10 @@ def std( For different values of ``on``, the return varies: - - ``on=None``: a datastream containing a groupby key column, + - ``on=None``: a dataset containing a groupby key column, ``"k"``, and a column-wise std column for each original column in - the datastream. - - ``on=["col_1", ..., "col_n"]``: a datastream of ``n + 1`` + the dataset. + - ``on=["col_1", ..., "col_n"]``: a dataset of ``n + 1`` columns where the first column is the groupby key and the second through ``n + 1`` columns are the results of the aggregations. diff --git a/python/ray/data/iterator.py b/python/ray/data/iterator.py index 36fc86f8ee1c..79d73ac515f2 100644 --- a/python/ray/data/iterator.py +++ b/python/ray/data/iterator.py @@ -19,18 +19,18 @@ from ray.util.annotations import PublicAPI from ray.data._internal.block_batching import batch_block_refs from ray.data._internal.block_batching.iter_batches import iter_batches -from ray.data._internal.stats import DatastreamStats +from ray.data._internal.stats import DatasetStats from ray.data._internal.util import _is_tensor_schema if TYPE_CHECKING: import tensorflow as tf import torch from ray.data._internal.torch_iterable_dataset import TorchTensorBatchType - from ray.data.datastream import TensorFlowTensorBatchType, Schema + from ray.data.dataset import TensorFlowTensorBatchType, Schema -def _is_tensor_datastream(schema) -> bool: - """Return ``True`` if this is an iterator over a tensor datastream.""" +def _is_tensor_dataset(schema) -> bool: + """Return ``True`` if this is an iterator over a tensor dataset.""" if schema is None or isinstance(schema, type): return False return _is_tensor_schema(schema.names) @@ -38,13 +38,13 @@ def _is_tensor_datastream(schema) -> bool: @PublicAPI(stability="beta") class DataIterator(abc.ABC): - """An iterator for reading records from a :class:`~Datastream` or + """An iterator for reading records from a :class:`~Dataset` or :class:`~DatasetPipeline`. - For Datastreams, each iteration call represents a complete read of all items in the - Datastream. For DatasetPipelines, each iteration call represents one pass (epoch) - over the base Datastream. Note that for DatasetPipelines, each pass iterates over - the original Datastream, instead of a window (if ``.window()`` was used). + For Datasets, each iteration call represents a complete read of all items in the + Dataset. For DatasetPipelines, each iteration call represents one pass (epoch) + over the base Dataset. Note that for DatasetPipelines, each pass iterates over + the original Dataset, instead of a window (if ``.window()`` was used). If using Ray AIR, each trainer actor should get its own iterator by calling :meth:`session.get_dataset_shard("train") @@ -54,15 +54,15 @@ class DataIterator(abc.ABC): >>> import ray >>> ds = ray.data.range(5) >>> ds - Datastream(num_blocks=5, num_rows=5, schema={id: int64}) + Dataset(num_blocks=5, num_rows=5, schema={id: int64}) >>> ds.iterator() - DataIterator(Datastream(num_blocks=5, num_rows=5, schema={id: int64})) + DataIterator(Dataset(num_blocks=5, num_rows=5, schema={id: int64})) .. tip:: For debugging purposes, use :meth:`~ray.air.util.check_ingest.make_local_dataset_iterator` to create a - local `DataIterator` from a :class:`~ray.data.Datastream`, a - :class:`~ray.data.Preprocessor`, and a :class:`~ray.air.DatastreamConfig`. + local `DataIterator` from a :class:`~ray.data.Dataset`, a + :class:`~ray.data.Preprocessor`, and a :class:`~ray.air.DatasetConfig`. """ @abc.abstractmethod @@ -70,7 +70,7 @@ def _to_block_iterator( self, ) -> Tuple[ Iterator[Tuple[ObjectRef[Block], BlockMetadata]], - Optional[DatastreamStats], + Optional[DatasetStats], bool, ]: """Returns the iterator to use for `iter_batches`. @@ -78,7 +78,7 @@ def _to_block_iterator( Returns: A tuple. The first item of the tuple is an iterator over pairs of Block object references and their corresponding metadata. The second item of the - tuple is a DatastreamStats object used for recording stats during iteration. + tuple is a DatasetStats object used for recording stats during iteration. The third item is a boolean indicating if the blocks can be safely cleared after use. """ @@ -97,7 +97,7 @@ def iter_batches( # Deprecated. prefetch_blocks: int = 0, ) -> Iterator[DataBatch]: - """Return a local batched iterator over the datastream. + """Return a local batched iterator over the dataset. Examples: >>> import ray @@ -192,16 +192,16 @@ def drop_metadata(block_iterator): stats.iter_total_s.add(time.perf_counter() - time_start) def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[Dict[str, Any]]: - """Return a local row iterator over the datastream. + """Return a local row iterator over the dataset. - If the datastream is a tabular datastream (Arrow/Pandas blocks), dicts - are yielded for each row by the iterator. If the datastream is not tabular, + If the dataset is a tabular dataset (Arrow/Pandas blocks), dicts + are yielded for each row by the iterator. If the dataset is not tabular, the raw row is yielded. Examples: >>> import ray - >>> datastream = ray.data.range(10) - >>> next(iter(datastream.iterator().iter_rows())) + >>> dataset = ray.data.range(10) + >>> next(iter(dataset.iterator().iter_rows())) 0 Time complexity: O(1) @@ -211,7 +211,7 @@ def iter_rows(self, *, prefetch_blocks: int = 0) -> Iterator[Dict[str, Any]]: current block during the scan. Returns: - An iterator over rows of the datastream. + An iterator over rows of the dataset. """ iter_batch_args = {"batch_size": None, "batch_format": None} @@ -234,7 +234,7 @@ def stats(self) -> str: @abc.abstractmethod def schema(self) -> "Schema": - """Return the schema of the datastream iterated over.""" + """Return the schema of the dataset iterated over.""" raise NotImplementedError def iter_torch_batches( @@ -253,9 +253,9 @@ def iter_torch_batches( # Deprecated. prefetch_blocks: int = 0, ) -> Iterator["TorchTensorBatchType"]: - """Return a local batched iterator of Torch Tensors over the datastream. + """Return a local batched iterator of Torch Tensors over the dataset. - This iterator will yield single-tensor batches if the underlying datastream + This iterator will yield single-tensor batches if the underlying dataset consists of a single column; otherwise, it will yield a dictionary of column-tensors. If looking for more flexibility in the tensor conversion (e.g. casting dtypes) or the batch format, try using `.iter_batches` directly. @@ -354,15 +354,15 @@ def iter_tf_batches( # Deprecated. prefetch_blocks: int = 0, ) -> Iterator["TensorFlowTensorBatchType"]: - """Return a local batched iterator of TensorFlow Tensors over the datastream. + """Return a local batched iterator of TensorFlow Tensors over the dataset. - This iterator will yield single-tensor batches of the underlying datastream + This iterator will yield single-tensor batches of the underlying dataset consists of a single column; otherwise, it will yield a dictionary of column-tensors. .. tip:: If you don't need the additional flexibility provided by this method, - consider using :meth:`~ray.data.Datastream.to_tf` instead. It's easier + consider using :meth:`~ray.data.Dataset.to_tf` instead. It's easier to use. Examples: @@ -440,9 +440,9 @@ def to_torch( # Deprecated. prefetch_blocks: int = 0, ) -> "torch.utils.data.IterableDataset": - """Return a Torch IterableDataset over this datastream. + """Return a Torch IterableDataset over this dataset. - This is only supported for datastreams convertible to Arrow records. + This is only supported for datasets convertible to Arrow records. It is recommended to use the returned ``IterableDataset`` directly instead of passing it into a torch ``DataLoader``. @@ -472,10 +472,10 @@ def to_torch( If ``unsqueeze_label_tensor=True`` (default), the label tensor will be of shape (N, 1). Otherwise, it will be of shape (N,). If ``label_column`` is specified as ``None``, then no column from the - ``Datastream`` will be treated as the label, and the output label tensor + ``Dataset`` will be treated as the label, and the output label tensor will be ``None``. - Note that you probably want to call ``.split()`` on this datastream if + Note that you probably want to call ``.split()`` on this dataset if there are to be multiple Torch workers consuming the data. Time complexity: O(1) @@ -506,8 +506,8 @@ def to_torch( prefetching behavior that uses `prefetch_blocks` by setting `use_legacy_iter_batches` to True in the DataContext. drop_last: Set to True to drop the last incomplete batch, - if the datastream size is not divisible by the batch size. If - False and the size of datastream is not divisible by the batch + if the dataset size is not divisible by the batch size. If + False and the size of dataset is not divisible by the batch size, then the last batch will be smaller. Defaults to False. local_shuffle_buffer_size: If non-None, the data will be randomly shuffled using a local in-memory shuffle buffer, and this value will serve as the @@ -628,10 +628,10 @@ def to_tf( # Deprecated. prefetch_blocks: int = 0, ) -> "tf.data.Dataset": - """Return a TF Dataset over this datastream. + """Return a TF Dataset over this dataset. .. warning:: - If your datastream contains ragged tensors, this method errors. To prevent + If your dataset contains ragged tensors, this method errors. To prevent errors, resize tensors or :ref:`disable tensor extension casting `. @@ -641,7 +641,7 @@ def to_tf( ... "s3://anonymous@air-example-data/iris.csv" ... ) >>> it = ds.iterator(); it - DataIterator(Datastream( + DataIterator(Dataset( num_blocks=1, num_rows=150, schema={ @@ -663,7 +663,7 @@ def to_tf( >>> it.to_tf(["sepal length (cm)", "sepal width (cm)"], "target") # doctest: +SKIP <_OptionsDataset element_spec=({'sepal length (cm)': TensorSpec(shape=(None,), dtype=tf.float64, name='sepal length (cm)'), 'sepal width (cm)': TensorSpec(shape=(None,), dtype=tf.float64, name='sepal width (cm)')}, TensorSpec(shape=(None,), dtype=tf.int64, name='target'))> - If your datastream contains multiple features but your model accepts a single + If your dataset contains multiple features but your model accepts a single tensor as input, combine features with :class:`~ray.data.preprocessors.Concatenator`. @@ -672,7 +672,7 @@ def to_tf( >>> it = preprocessor.transform(ds).iterator() >>> it DataIterator(Concatenator - +- Datastream( + +- Dataset( num_blocks=1, num_rows=150, schema={ @@ -701,8 +701,8 @@ def to_tf( `use_legacy_iter_batches` to True in the DataContext. batch_size: Record batch size. Defaults to 1. drop_last: Set to True to drop the last incomplete batch, - if the datastream size is not divisible by the batch size. If - False and the size of datastream is not divisible by the batch + if the dataset size is not divisible by the batch size. If + False and the size of dataset is not divisible by the batch size, then the last batch will be smaller. Defaults to False. local_shuffle_buffer_size: If non-None, the data will be randomly shuffled using a local in-memory shuffle buffer, and this value will serve as the @@ -730,15 +730,15 @@ def to_tf( schema = self.schema() - if _is_tensor_datastream(schema): + if _is_tensor_dataset(schema): raise NotImplementedError( - "`to_tf` doesn't support single-column tensor datastreams. Call the " + "`to_tf` doesn't support single-column tensor datasets. Call the " "more-flexible `iter_batches` instead." ) if isinstance(schema, type): raise NotImplementedError( - "`to_tf` doesn't support simple datastreams. Call `map_batches` and " + "`to_tf` doesn't support simple datasets. Call `map_batches` and " "convert your data to a tabular format. Alternatively, call the more-" "flexible `iter_batches` in place of `to_tf`." ) @@ -750,7 +750,7 @@ def validate_column(column: str) -> None: raise ValueError( f"You specified '{column}' in `feature_columns` or " f"`label_columns`, but there's no column named '{column}' in the " - f"datastream. Valid column names are: {valid_columns}." + f"dataset. Valid column names are: {valid_columns}." ) def validate_columns(columns: Union[str, List]) -> None: @@ -801,7 +801,7 @@ def generator(): label_type_spec = get_type_spec(schema, columns=label_columns) output_signature = (feature_type_spec, label_type_spec) - datastream = tf.data.Dataset.from_generator( + dataset = tf.data.Dataset.from_generator( generator, output_signature=output_signature ) @@ -809,7 +809,7 @@ def generator(): options.experimental_distribute.auto_shard_policy = ( tf.data.experimental.AutoShardPolicy.OFF ) - return datastream.with_options(options) + return dataset.with_options(options) def iter_epochs(self, max_epoch: int = -1) -> None: raise DeprecationWarning( diff --git a/python/ray/data/preprocessor.py b/python/ray/data/preprocessor.py index 5d03155f9ff4..859898d64001 100644 --- a/python/ray/data/preprocessor.py +++ b/python/ray/data/preprocessor.py @@ -8,7 +8,7 @@ from ray.util.annotations import Deprecated, DeveloperAPI, PublicAPI if TYPE_CHECKING: - from ray.data import Datastream, DatasetPipeline + from ray.data import Dataset, DatasetPipeline import pandas as pd import numpy as np from ray.air.data_batch_type import DataBatchType @@ -25,7 +25,7 @@ class PreprocessorNotFittedException(RuntimeError): class Preprocessor(abc.ABC): """Implements an ML preprocessing operation. - Preprocessors are stateful objects that can be fitted against a Datastream and used + Preprocessors are stateful objects that can be fitted against a Dataset and used to transform both local data batches and distributed data. For example, a Normalization preprocessor may calculate the mean and stdev of a field during fitting, and uses these attributes to implement its normalization transform. @@ -70,20 +70,20 @@ def fit_status(self) -> "Preprocessor.FitStatus": @Deprecated def transform_stats(self) -> Optional[str]: - """Return Datastream stats for the most recent transform call, if any.""" + """Return Dataset stats for the most recent transform call, if any.""" raise DeprecationWarning( "`preprocessor.transform_stats()` is no longer supported in Ray 2.4. " - "With Datastream now lazy by default, the stats are only populated " - "after execution. Once the datastream transform is executed, the " - "stats can be accessed directly from the transformed datastream " + "With Dataset now lazy by default, the stats are only populated " + "after execution. Once the dataset transform is executed, the " + "stats can be accessed directly from the transformed dataset " "(`ds.stats()`), or can be viewed in the ray-data.log " "file saved in the Ray logs directory " "(defaults to /tmp/ray/session_{SESSION_ID}/logs/)." ) - def fit(self, ds: "Datastream") -> "Preprocessor": - """Fit this Preprocessor to the Datastream. + def fit(self, ds: "Dataset") -> "Preprocessor": + """Fit this Preprocessor to the Dataset. Fitted state attributes will be directly set in the Preprocessor. @@ -91,7 +91,7 @@ def fit(self, ds: "Datastream") -> "Preprocessor": ``preprocessor.fit(A).fit(B)`` is equivalent to ``preprocessor.fit(B)``. Args: - ds: Input datastream. + ds: Input dataset. Returns: Preprocessor: The fitted Preprocessor with state attributes. @@ -113,30 +113,30 @@ def fit(self, ds: "Datastream") -> "Preprocessor": return self._fit(ds) - def fit_transform(self, ds: "Datastream") -> "Datastream": - """Fit this Preprocessor to the Datastream and then transform the Datastream. + def fit_transform(self, ds: "Dataset") -> "Dataset": + """Fit this Preprocessor to the Dataset and then transform the Dataset. Calling it more than once will overwrite all previously fitted state: ``preprocessor.fit_transform(A).fit_transform(B)`` is equivalent to ``preprocessor.fit_transform(B)``. Args: - ds: Input Datastream. + ds: Input Dataset. Returns: - ray.data.Datastream: The transformed Datastream. + ray.data.Dataset: The transformed Dataset. """ self.fit(ds) return self.transform(ds) - def transform(self, ds: "Datastream") -> "Datastream": - """Transform the given datastream. + def transform(self, ds: "Dataset") -> "Dataset": + """Transform the given dataset. Args: - ds: Input Datastream. + ds: Input Dataset. Returns: - ray.data.Datastream: The transformed Datastream. + ray.data.Dataset: The transformed Dataset. Raises: PreprocessorNotFittedException: if ``fit`` is not called yet. @@ -196,8 +196,8 @@ def _transform_pipeline(self, pipeline: "DatasetPipeline") -> "DatasetPipeline": ): raise RuntimeError( "Streaming/pipelined ingest only works with " - "Preprocessors that do not need to be fit on the entire datastream. " - "It is not possible to fit on Datastreams " + "Preprocessors that do not need to be fit on the entire dataset. " + "It is not possible to fit on Datasets " "in a streaming fashion." ) @@ -213,7 +213,7 @@ def _check_is_fitted(self) -> bool: return bool(fitted_vars) @DeveloperAPI - def _fit(self, ds: "Datastream") -> "Preprocessor": + def _fit(self, ds: "Dataset") -> "Preprocessor": """Sub-classes should override this instead of fit().""" raise NotImplementedError() @@ -247,10 +247,10 @@ def _determine_transform_to_use(self) -> BatchFormat: ) def _transform( - self, ds: Union["Datastream", "DatasetPipeline"] - ) -> Union["Datastream", "DatasetPipeline"]: + self, ds: Union["Dataset", "DatasetPipeline"] + ) -> Union["Dataset", "DatasetPipeline"]: # TODO(matt): Expose `batch_size` or similar configurability. - # The default may be too small for some datastreams and too large for others. + # The default may be too small for some datasets and too large for others. transform_type = self._determine_transform_to_use() # Our user-facing batch format should only be pandas or NumPy, other @@ -271,7 +271,7 @@ def _transform( ) def _get_transform_config(self) -> Dict[str, Any]: - """Returns kwargs to be passed to :meth:`ray.data.Datastream.map_batches`. + """Returns kwargs to be passed to :meth:`ray.data.Dataset.map_batches`. This can be implemented by subclassing preprocessors. """ diff --git a/python/ray/data/preprocessors/batch_mapper.py b/python/ray/data/preprocessors/batch_mapper.py index 4aca660cce90..4479bf3e9478 100644 --- a/python/ray/data/preprocessors/batch_mapper.py +++ b/python/ray/data/preprocessors/batch_mapper.py @@ -18,12 +18,12 @@ @PublicAPI(stability="alpha") class BatchMapper(Preprocessor): - """Apply an arbitrary operation to a datastream. + """Apply an arbitrary operation to a dataset. - :class:`BatchMapper` applies a user-defined function to batches of a datastream. A + :class:`BatchMapper` applies a user-defined function to batches of a dataset. A batch is a Pandas ``DataFrame`` that represents a small amount of data. By modifying batches instead of individual records, this class can efficiently transform a - datastream with vectorized operations. + dataset with vectorized operations. Use this preprocessor to apply stateless operations that aren't already built-in. @@ -48,25 +48,25 @@ class BatchMapper(Preprocessor): >>> >>> preprocessor = BatchMapper(fn, batch_format="pandas") >>> preprocessor.transform(ds) # doctest: +SKIP - Datastream(num_blocks=1, num_rows=3, schema={X: int64}) + Dataset(num_blocks=1, num_rows=3, schema={X: int64}) >>> >>> def fn_numpy(batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: ... return {"X": batch["X"]} >>> preprocessor = BatchMapper(fn_numpy, batch_format="numpy") >>> preprocessor.transform(ds) # doctest: +SKIP - Datastream(num_blocks=1, num_rows=3, schema={X: int64}) + Dataset(num_blocks=1, num_rows=3, schema={X: int64}) Args: fn: The function to apply to data batches. batch_size: The desired number of rows in each data batch provided to ``fn``. - Semantics are the same as in ```datastream.map_batches()``: specifying + Semantics are the same as in ```dataset.map_batches()``: specifying ``None`` wil use the entire underlying blocks as batches (blocks may contain different number of rows) and the actual size of the batch provided to ``fn`` may be smaller than ``batch_size`` if ``batch_size`` doesn't evenly divide the block(s) sent to a given map task. Defaults to 4096, - which is the same default value as ``datastream.map_batches()``. + which is the same default value as ``dataset.map_batches()``. batch_format: The preferred batch format to use in UDF. If not given, - we will infer based on the input datastream data format. + we will infer based on the input dataset data format. """ _is_fittable = False diff --git a/python/ray/data/preprocessors/chain.py b/python/ray/data/preprocessors/chain.py index 4673b73b871e..48349813160a 100644 --- a/python/ray/data/preprocessors/chain.py +++ b/python/ray/data/preprocessors/chain.py @@ -1,6 +1,6 @@ from typing import TYPE_CHECKING, Union from ray.air.util.data_batch_conversion import BatchFormat -from ray.data import Datastream, DatasetPipeline +from ray.data import Dataset, DatasetPipeline from ray.data.preprocessor import Preprocessor from ray.util.annotations import PublicAPI @@ -12,7 +12,7 @@ class Chain(Preprocessor): """Combine multiple preprocessors into a single :py:class:`Preprocessor`. - When you call ``fit``, each preprocessor is fit on the datastream produced by the + When you call ``fit``, each preprocessor is fit on the dataset produced by the preceeding preprocessor's ``fit_transform``. Example: @@ -69,22 +69,22 @@ def fit_status(self): def __init__(self, *preprocessors: Preprocessor): self.preprocessors = preprocessors - def _fit(self, ds: Datastream) -> Preprocessor: + def _fit(self, ds: Dataset) -> Preprocessor: for preprocessor in self.preprocessors[:-1]: ds = preprocessor.fit_transform(ds) self.preprocessors[-1].fit(ds) return self - def fit_transform(self, ds: Datastream) -> Datastream: + def fit_transform(self, ds: Dataset) -> Dataset: for preprocessor in self.preprocessors: ds = preprocessor.fit_transform(ds) return ds def _transform( - self, ds: Union[Datastream, DatasetPipeline] - ) -> Union[Datastream, DatasetPipeline]: + self, ds: Union[Dataset, DatasetPipeline] + ) -> Union[Dataset, DatasetPipeline]: for preprocessor in self.preprocessors: - if isinstance(ds, Datastream): + if isinstance(ds, Dataset): ds = preprocessor.transform(ds) elif isinstance(ds, DatasetPipeline): ds = preprocessor._transform_pipeline(ds) diff --git a/python/ray/data/preprocessors/concatenator.py b/python/ray/data/preprocessors/concatenator.py index cc4bbdd38aa5..31d0621cf7fb 100644 --- a/python/ray/data/preprocessors/concatenator.py +++ b/python/ray/data/preprocessors/concatenator.py @@ -51,7 +51,7 @@ class Concatenator(Preprocessor): 2 [1.0, 0.9] Sometimes, you might not want to concatenate all of of the columns in your - datastream. In this case, you can exclude columns with the ``exclude`` parameter. + dataset. In this case, you can exclude columns with the ``exclude`` parameter. >>> df = pd.DataFrame({"X0": [0, 3, 1], "X1": [0.5, 0.2, 0.9], "Y": ["blue", "orange", "blue"]}) >>> ds = ray.data.from_pandas(df) # doctest: +SKIP @@ -88,7 +88,7 @@ class Concatenator(Preprocessor): >>> concatenator = Concatenator(include=["X0", "X1"], dtype=np.float32) >>> concatenator.fit_transform(ds) # doctest: +SKIP - Datastream(num_blocks=1, num_rows=3, schema={Y: object, concat_out: TensorDtype(shape=(2,), dtype=float32)}) + Dataset(num_blocks=1, num_rows=3, schema={Y: object, concat_out: TensorDtype(shape=(2,), dtype=float32)}) Args: output_column_name: The desired name for the new column. @@ -106,7 +106,7 @@ class Concatenator(Preprocessor): Raises: ValueError: if `raise_if_missing` is `True` and a column in `include` or - `exclude` doesn't exist in the datastream. + `exclude` doesn't exist in the dataset. """ # noqa: E501 _is_fittable = False diff --git a/python/ray/data/preprocessors/discretizer.py b/python/ray/data/preprocessors/discretizer.py index 0ef294b7a8c1..36e34f805ef1 100644 --- a/python/ray/data/preprocessors/discretizer.py +++ b/python/ray/data/preprocessors/discretizer.py @@ -3,7 +3,7 @@ import pandas as pd import numpy as np -from ray.data import Datastream +from ray.data import Dataset from ray.data.aggregate import Max, Min from ray.data.preprocessor import Preprocessor from ray.util.annotations import PublicAPI @@ -253,7 +253,7 @@ def __init__( self.duplicates = duplicates self.dtypes = dtypes - def _fit(self, datastream: Datastream) -> Preprocessor: + def _fit(self, dataset: Dataset) -> Preprocessor: self._validate_on_fit() stats = {} aggregates = [] @@ -267,7 +267,7 @@ def _fit(self, datastream: Datastream) -> Preprocessor: self._fit_uniform_covert_bin_to_aggregate_if_needed(column) ) - aggregate_stats = datastream.aggregate(*aggregates) + aggregate_stats = dataset.aggregate(*aggregates) mins = {} maxes = {} for key, value in aggregate_stats.items(): diff --git a/python/ray/data/preprocessors/encoder.py b/python/ray/data/preprocessors/encoder.py index fe1d646de015..0df8885aabf5 100644 --- a/python/ray/data/preprocessors/encoder.py +++ b/python/ray/data/preprocessors/encoder.py @@ -6,7 +6,7 @@ import pandas as pd import pandas.api.types -from ray.data import Datastream +from ray.data import Dataset from ray.data.preprocessor import Preprocessor from ray.util.annotations import PublicAPI @@ -43,7 +43,7 @@ class OrdinalEncoder(Preprocessor): 2 1 0 3 0 1 - If you transform a value not present in the original datastream, then the value + If you transform a value not present in the original dataset, then the value is encoded as ``float("nan")``. >>> df = pd.DataFrame({"sex": ["female"], "level": ["L6"]}) @@ -87,9 +87,9 @@ def __init__(self, columns: List[str], *, encode_lists: bool = True): self.columns = columns self.encode_lists = encode_lists - def _fit(self, datastream: Datastream) -> Preprocessor: + def _fit(self, dataset: Dataset) -> Preprocessor: self.stats_ = _get_unique_value_indices( - datastream, self.columns, encode_lists=self.encode_lists + dataset, self.columns, encode_lists=self.encode_lists ) return self @@ -135,7 +135,7 @@ class OneHotEncoder(Preprocessor): 1 if the category matches and 0 otherwise. If you encode an infrequent category (see ``max_categories``) or a category - that isn't in the fitted datastream, then the category is encoded as all 0s. + that isn't in the fitted dataset, then the category is encoded as all 0s. Columns must contain hashable objects or lists of hashable objects. @@ -160,7 +160,7 @@ class OneHotEncoder(Preprocessor): 4 1 0 0 5 0 1 0 - If you one-hot encode a value that isn't in the fitted datastream, then the + If you one-hot encode a value that isn't in the fitted dataset, then the value is encoded with zeros. >>> df = pd.DataFrame({"color": ["yellow"]}) @@ -206,9 +206,9 @@ def __init__( self.columns = columns self.max_categories = max_categories - def _fit(self, datastream: Datastream) -> Preprocessor: + def _fit(self, dataset: Dataset) -> Preprocessor: self.stats_ = _get_unique_value_indices( - datastream, + dataset, self.columns, max_categories=self.max_categories, encode_lists=False, @@ -315,9 +315,9 @@ def __init__( self.columns = columns self.max_categories = max_categories - def _fit(self, datastream: Datastream) -> Preprocessor: + def _fit(self, dataset: Dataset) -> Preprocessor: self.stats_ = _get_unique_value_indices( - datastream, + dataset, self.columns, max_categories=self.max_categories, encode_lists=True, @@ -377,7 +377,7 @@ class LabelEncoder(Preprocessor): 2 4.9 3.0 0 3 6.2 3.4 2 - If you transform a label not present in the original datastream, then the new + If you transform a label not present in the original dataset, then the new label is encoded as ``float("nan")``. >>> df = pd.DataFrame({ @@ -403,8 +403,8 @@ class LabelEncoder(Preprocessor): def __init__(self, label_column: str): self.label_column = label_column - def _fit(self, datastream: Datastream) -> Preprocessor: - self.stats_ = _get_unique_value_indices(datastream, [self.label_column]) + def _fit(self, dataset: Dataset) -> Preprocessor: + self.stats_ = _get_unique_value_indices(dataset, [self.label_column]) return self def _transform_pandas(self, df: pd.DataFrame): @@ -431,7 +431,7 @@ class Categorizer(Preprocessor): .. warning:: If you don't specify ``dtypes``, fit this preprocessor before splitting - your datastream into train and test splits. This ensures categories are + your dataset into train and test splits. This ensures categories are consistent across splits. Examples: @@ -477,13 +477,13 @@ def __init__( self.columns = columns self.dtypes = dtypes - def _fit(self, datastream: Datastream) -> Preprocessor: + def _fit(self, dataset: Dataset) -> Preprocessor: columns_to_get = [ column for column in self.columns if column not in self.dtypes ] if columns_to_get: unique_indices = _get_unique_value_indices( - datastream, columns_to_get, drop_na_values=True, key_format="{0}" + dataset, columns_to_get, drop_na_values=True, key_format="{0}" ) unique_indices = { column: pd.CategoricalDtype(values_indices.keys()) @@ -507,7 +507,7 @@ def __repr__(self): def _get_unique_value_indices( - datastream: Datastream, + dataset: Dataset, columns: List[str], drop_na_values: bool = False, key_format: str = "unique_values({0})", @@ -554,7 +554,7 @@ def get_pd_value_counts(df: pd.DataFrame) -> List[Dict[str, Counter]]: ) return result - value_counts = datastream.map_batches(get_pd_value_counts, batch_format="pandas") + value_counts = dataset.map_batches(get_pd_value_counts, batch_format="pandas") final_counters = {col: Counter() for col in columns} for batch in value_counts.iter_batches(batch_size=None): for col, counters in batch.items(): diff --git a/python/ray/data/preprocessors/imputer.py b/python/ray/data/preprocessors/imputer.py index dff233f6e6d2..50eeb8b89c1e 100644 --- a/python/ray/data/preprocessors/imputer.py +++ b/python/ray/data/preprocessors/imputer.py @@ -5,7 +5,7 @@ import pandas as pd from pandas.api.types import is_categorical_dtype -from ray.data import Datastream +from ray.data import Dataset from ray.data.aggregate import Mean from ray.data.preprocessor import Preprocessor from ray.util.annotations import PublicAPI @@ -106,12 +106,12 @@ def __init__( '`fill_value` must be set when using "constant" strategy.' ) - def _fit(self, datastream: Datastream) -> Preprocessor: + def _fit(self, dataset: Dataset) -> Preprocessor: if self.strategy == "mean": aggregates = [Mean(col) for col in self.columns] - self.stats_ = datastream.aggregate(*aggregates) + self.stats_ = dataset.aggregate(*aggregates) elif self.strategy == "most_frequent": - self.stats_ = _get_most_frequent_values(datastream, *self.columns) + self.stats_ = _get_most_frequent_values(dataset, *self.columns) return self @@ -142,14 +142,14 @@ def __repr__(self): def _get_most_frequent_values( - datastream: Datastream, *columns: str + dataset: Dataset, *columns: str ) -> Dict[str, Union[str, Number]]: columns = list(columns) def get_pd_value_counts(df: pd.DataFrame) -> List[Dict[str, Counter]]: return {col: [Counter(df[col].value_counts().to_dict())] for col in columns} - value_counts = datastream.map_batches(get_pd_value_counts, batch_format="pandas") + value_counts = dataset.map_batches(get_pd_value_counts, batch_format="pandas") final_counters = {col: Counter() for col in columns} for batch in value_counts.iter_batches(batch_size=None): for col, counters in batch.items(): diff --git a/python/ray/data/preprocessors/scaler.py b/python/ray/data/preprocessors/scaler.py index 713542fea8e3..b53415c954cd 100644 --- a/python/ray/data/preprocessors/scaler.py +++ b/python/ray/data/preprocessors/scaler.py @@ -3,7 +3,7 @@ import numpy as np import pandas as pd -from ray.data import Datastream +from ray.data import Dataset from ray.data.aggregate import Mean, Std, Min, Max, AbsMax from ray.data.preprocessor import Preprocessor from ray.util.annotations import PublicAPI @@ -66,10 +66,10 @@ class StandardScaler(Preprocessor): def __init__(self, columns: List[str]): self.columns = columns - def _fit(self, datastream: Datastream) -> Preprocessor: + def _fit(self, dataset: Dataset) -> Preprocessor: mean_aggregates = [Mean(col) for col in self.columns] std_aggregates = [Std(col, ddof=0) for col in self.columns] - self.stats_ = datastream.aggregate(*mean_aggregates, *std_aggregates) + self.stats_ = dataset.aggregate(*mean_aggregates, *std_aggregates) return self def _transform_pandas(self, df: pd.DataFrame): @@ -150,9 +150,9 @@ class MinMaxScaler(Preprocessor): def __init__(self, columns: List[str]): self.columns = columns - def _fit(self, datastream: Datastream) -> Preprocessor: + def _fit(self, dataset: Dataset) -> Preprocessor: aggregates = [Agg(col) for Agg in [Min, Max] for col in self.columns] - self.stats_ = datastream.aggregate(*aggregates) + self.stats_ = dataset.aggregate(*aggregates) return self def _transform_pandas(self, df: pd.DataFrame): @@ -230,9 +230,9 @@ class MaxAbsScaler(Preprocessor): def __init__(self, columns: List[str]): self.columns = columns - def _fit(self, datastream: Datastream) -> Preprocessor: + def _fit(self, dataset: Dataset) -> Preprocessor: aggregates = [AbsMax(col) for col in self.columns] - self.stats_ = datastream.aggregate(*aggregates) + self.stats_ = dataset.aggregate(*aggregates) return self def _transform_pandas(self, df: pd.DataFrame): @@ -315,12 +315,12 @@ def __init__( self.columns = columns self.quantile_range = quantile_range - def _fit(self, datastream: Datastream) -> Preprocessor: + def _fit(self, dataset: Dataset) -> Preprocessor: low = self.quantile_range[0] med = 0.50 high = self.quantile_range[1] - num_records = datastream.count() + num_records = dataset.count() max_index = num_records - 1 split_indices = [int(percentile * max_index) for percentile in (low, med, high)] @@ -328,15 +328,15 @@ def _fit(self, datastream: Datastream) -> Preprocessor: # TODO(matt): Handle case where quantile lands between 2 numbers. # The current implementation will simply choose the closest index. - # This will affect the results of small datastreams more than large datastreams. + # This will affect the results of small datasets more than large datasets. for col in self.columns: - filtered_datastream = datastream.map_batches( + filtered_dataset = dataset.map_batches( lambda df: df[[col]], batch_format="pandas" ) - sorted_datastream = filtered_datastream.sort(col) - _, low, med, high = sorted_datastream.split_at_indices(split_indices) + sorted_dataset = filtered_dataset.sort(col) + _, low, med, high = sorted_dataset.split_at_indices(split_indices) - def _get_first_value(ds: Datastream, c: str): + def _get_first_value(ds: Dataset, c: str): return ds.take(1)[0][c] low_val = _get_first_value(low, col) diff --git a/python/ray/data/preprocessors/torch.py b/python/ray/data/preprocessors/torch.py index 8ffdddb2435e..642fd1483ca3 100644 --- a/python/ray/data/preprocessors/torch.py +++ b/python/ray/data/preprocessors/torch.py @@ -18,9 +18,9 @@ class TorchVisionPreprocessor(Preprocessor): Examples: >>> import ray - >>> datastream = ray.data.read_images("s3://anonymous@air-example-data-2/imagenet-sample-images") - >>> datastream # doctest: +ellipsis - Datastream(num_blocks=..., num_rows=..., schema={image: numpy.ndarray(shape=(..., 3), dtype=float)}) + >>> dataset = ray.data.read_images("s3://anonymous@air-example-data-2/imagenet-sample-images") + >>> dataset # doctest: +ellipsis + Dataset(num_blocks=..., num_rows=..., schema={image: numpy.ndarray(shape=(..., 3), dtype=float)}) Torch models expect inputs of shape :math:`(B, C, H, W)` in the range :math:`[0.0, 1.0]`. To convert images to this format, add ``ToTensor`` to your @@ -33,9 +33,9 @@ class TorchVisionPreprocessor(Preprocessor): ... transforms.Resize((224, 224)), ... ]) >>> preprocessor = TorchVisionPreprocessor(["image"], transform=transform) - >>> datastream = preprocessor.transform(datastream) # doctest: +ellipsis - >>> datastream # doctest: +ellipsis - Datastream(num_blocks=..., num_rows=..., schema={image: numpy.ndarray(shape=(3, 224, 224), dtype=float)}) + >>> dataset = preprocessor.transform(dataset) # doctest: +ellipsis + >>> dataset # doctest: +ellipsis + Dataset(num_blocks=..., num_rows=..., schema={image: numpy.ndarray(shape=(3, 224, 224), dtype=float)}) For better performance, set ``batched`` to ``True`` and replace ``ToTensor`` with a batch-supporting ``Lambda``. @@ -54,9 +54,9 @@ class TorchVisionPreprocessor(Preprocessor): >>> preprocessor = TorchVisionPreprocessor( ... ["image"], transform=transform, batched=True ... ) - >>> datastream = preprocessor.transform(datastream) # doctest: +ellipsis - >>> datastream # doctest: +ellipsis - Datastream(num_blocks=..., num_rows=..., schema={image: numpy.ndarray(shape=(3, 224, 224), dtype=float)}) + >>> dataset = preprocessor.transform(dataset) # doctest: +ellipsis + >>> dataset # doctest: +ellipsis + Dataset(num_blocks=..., num_rows=..., schema={image: numpy.ndarray(shape=(3, 224, 224), dtype=float)}) Args: columns: The columns to apply the TorchVision transform to. diff --git a/python/ray/data/preprocessors/vectorizer.py b/python/ray/data/preprocessors/vectorizer.py index 8a435b878908..bcd8b5f14e1f 100644 --- a/python/ray/data/preprocessors/vectorizer.py +++ b/python/ray/data/preprocessors/vectorizer.py @@ -3,7 +3,7 @@ import pandas as pd -from ray.data import Datastream +from ray.data import Dataset from ray.data.preprocessor import Preprocessor from ray.data.preprocessors.utils import simple_split_tokenizer, simple_hash from ray.util.annotations import PublicAPI @@ -201,7 +201,7 @@ class CountVectorizer(Preprocessor): output. If unspecified, the tokenizer uses a function equivalent to ``lambda s: s.split(" ")``. max_features: The maximum number of tokens to encode in the transformed - datastream. If specified, only the most frequent tokens are encoded. + dataset. If specified, only the most frequent tokens are encoded. """ # noqa: E501 @@ -217,7 +217,7 @@ def __init__( self.tokenization_fn = tokenization_fn or simple_split_tokenizer self.max_features = max_features - def _fit(self, datastream: Datastream) -> Preprocessor: + def _fit(self, dataset: Dataset) -> Preprocessor: def get_pd_value_counts(df: pd.DataFrame) -> List[Counter]: def get_token_counts(col): token_series = df[col].apply(self.tokenization_fn) @@ -226,9 +226,7 @@ def get_token_counts(col): return {col: [get_token_counts(col)] for col in self.columns} - value_counts = datastream.map_batches( - get_pd_value_counts, batch_format="pandas" - ) + value_counts = dataset.map_batches(get_pd_value_counts, batch_format="pandas") total_counts = {col: Counter() for col in self.columns} for batch in value_counts.iter_batches(batch_size=None): for col, counters in batch.items(): diff --git a/python/ray/data/random_access_dataset.py b/python/ray/data/random_access_dataset.py index 9b3b52eba62c..1df2eebc4140 100644 --- a/python/ray/data/random_access_dataset.py +++ b/python/ray/data/random_access_dataset.py @@ -19,21 +19,21 @@ pa = None if TYPE_CHECKING: - from ray.data import Datastream + from ray.data import Dataset logger = logging.getLogger(__name__) @PublicAPI(stability="alpha") class RandomAccessDataset: - """A class that provides distributed, random access to a Datastream. + """A class that provides distributed, random access to a Dataset. - See: ``Datastream.to_random_access_dataset()``. + See: ``Dataset.to_random_access_dataset()``. """ def __init__( self, - ds: "Datastream", + ds: "Dataset", key: str, num_workers: int, ): @@ -47,7 +47,7 @@ def __init__( raise ValueError("RandomAccessDataset only supports Arrow-format blocks.") start = time.perf_counter() - logger.info("[setup] Indexing datastream by sort key.") + logger.info("[setup] Indexing dataset by sort key.") sorted_ds = ds.sort(key) get_bounds = cached_remote_fn(_get_bounds) blocks = sorted_ds.get_internal_block_refs() diff --git a/python/ray/data/read_api.py b/python/ray/data/read_api.py index 67335045e788..384c8998d60e 100644 --- a/python/ray/data/read_api.py +++ b/python/ray/data/read_api.py @@ -37,7 +37,7 @@ from ray.data._internal.logical.operators.read_operator import Read from ray.data._internal.plan import ExecutionPlan from ray.data._internal.remote_fn import cached_remote_fn -from ray.data._internal.stats import DatastreamStats +from ray.data._internal.stats import DatasetStats from ray.data._internal.util import ( _lazy_import_pyarrow_dataset, _autodetect_parallelism, @@ -48,7 +48,7 @@ ) from ray.data.block import Block, BlockAccessor, BlockExecStats, BlockMetadata from ray.data.context import DEFAULT_SCHEDULING_STRATEGY, WARN_PREFIX, DataContext -from ray.data.datastream import Datastream, MaterializedDatastream +from ray.data.dataset import Dataset, MaterializedDataset from ray.data.datasource import ( BaseFileMetadataProvider, BinaryDatasource, @@ -110,26 +110,26 @@ def from_items( *, parallelism: int = -1, output_arrow_format: bool = False, -) -> MaterializedDatastream: - """Create a datastream from a list of local Python objects. +) -> MaterializedDataset: + """Create a dataset from a list of local Python objects. Examples: >>> import ray >>> ds = ray.data.from_items([1, 2, 3, 4, 5]) # doctest: +SKIP >>> ds # doctest: +SKIP - MaterializedDatastream(num_blocks=5, num_rows=5, schema={item: int64}) + MaterializedDataset(num_blocks=5, num_rows=5, schema={item: int64}) >>> ds.take_batch(2) # doctest: +SKIP {"item": array([1, 2])} Args: items: List of local Python objects. - parallelism: The amount of parallelism to use for the datastream. + parallelism: The amount of parallelism to use for the dataset. Parallelism may be limited by the number of items. output_arrow_format: If True, always return data in Arrow format, raising an error if this is not possible. Defaults to False. Returns: - MaterializedDatastream holding the items. + MaterializedDataset holding the items. """ ctx = ray.data.DataContext.get_current() if ctx.strict_mode: @@ -195,10 +195,10 @@ def from_items( from_items_op = FromItems(items, detected_parallelism) logical_plan = LogicalPlan(from_items_op) - return MaterializedDatastream( + return MaterializedDataset( ExecutionPlan( BlockList(blocks, metadata, owned_by_consumer=False), - DatastreamStats(stages={"FromItems": metadata}, parent=None), + DatasetStats(stages={"FromItems": metadata}, parent=None), run_by_consumer=False, ), 0, @@ -208,24 +208,24 @@ def from_items( @PublicAPI -def range(n: int, *, parallelism: int = -1) -> Datastream: - """Create a datastream from a range of integers [0..n). +def range(n: int, *, parallelism: int = -1) -> Dataset: + """Create a dataset from a range of integers [0..n). Examples: >>> import ray >>> ds = ray.data.range(10000) # doctest: +SKIP >>> ds # doctest: +SKIP - Datastream(num_blocks=200, num_rows=10000, schema={id: int64}) + Dataset(num_blocks=200, num_rows=10000, schema={id: int64}) >>> ds.map(lambda x: {"id": x["id"] * 2}).take(4) # doctest: +SKIP [{"id": 0}, {"id": 2}, {"id": 4}, {"id": 6}] Args: n: The upper bound of the range of integers. - parallelism: The amount of parallelism to use for the datastream. + parallelism: The amount of parallelism to use for the dataset. Parallelism may be limited by the number of items. Returns: - Datastream producing the integers. + Dataset producing the integers. """ ctx = ray.data.DataContext.get_current() if ctx.strict_mode: @@ -242,7 +242,7 @@ def range(n: int, *, parallelism: int = -1) -> Datastream: @Deprecated -def range_table(n: int, *, parallelism: int = -1) -> Datastream: +def range_table(n: int, *, parallelism: int = -1) -> Dataset: ctx = ray.data.DataContext.get_current() if ctx.strict_mode: raise DeprecationWarning("In Ray 2.5, use range() instead of range_table().") @@ -261,14 +261,14 @@ def range_arrow(*args, **kwargs): @PublicAPI -def range_tensor(n: int, *, shape: Tuple = (1,), parallelism: int = -1) -> Datastream: +def range_tensor(n: int, *, shape: Tuple = (1,), parallelism: int = -1) -> Dataset: """Create a Tensor stream from a range of integers [0..n). Examples: >>> import ray >>> ds = ray.data.range_tensor(1000, shape=(2, 2)) >>> ds # doctest: +ellipsis - Datastream( + Dataset( num_blocks=..., num_rows=1000, schema={data: numpy.ndarray(shape=(2, 2), dtype=int64)}) @@ -279,16 +279,16 @@ def range_tensor(n: int, *, shape: Tuple = (1,), parallelism: int = -1) -> Datas [2, 2]])] This is similar to range_table(), but uses the ArrowTensorArray extension - type. The datastream elements take the form {"data": array(N, shape=shape)}. + type. The dataset elements take the form {"data": array(N, shape=shape)}. Args: n: The upper bound of the range of integer records. shape: The shape of each record. - parallelism: The amount of parallelism to use for the datastream. + parallelism: The amount of parallelism to use for the dataset. Parallelism may be limited by the number of items. Returns: - Datastream producing the integers as Arrow tensor records. + Dataset producing the integers as Arrow tensor records. """ ctx = ray.data.DataContext.get_current() return read_datasource( @@ -309,7 +309,7 @@ def read_datasource( parallelism: int = -1, ray_remote_args: Dict[str, Any] = None, **read_args, -) -> Datastream: +) -> Dataset: """Read a stream from a custom data source. Args: @@ -322,7 +322,7 @@ def read_datasource( ray_remote_args: kwargs passed to ray.remote in the read tasks. Returns: - Datastream that reads data from the datasource. + Dataset that reads data from the datasource. """ ctx = DataContext.get_current() @@ -390,24 +390,24 @@ def read_datasource( if read_tasks and len(read_tasks) < min_safe_parallelism * 0.7: perc = 1 + round((min_safe_parallelism - len(read_tasks)) / len(read_tasks), 1) logger.warning( - f"{WARN_PREFIX} The blocks of this datastream are estimated to be {perc}x " + f"{WARN_PREFIX} The blocks of this dataset are estimated to be {perc}x " "larger than the target block size " f"of {int(ctx.target_max_block_size / 1024 / 1024)} MiB. This may lead to " "out-of-memory errors during processing. Consider reducing the size of " "input files or using `.repartition(n)` to increase the number of " - "datastream blocks." + "dataset blocks." ) elif len(read_tasks) < requested_parallelism and ( len(read_tasks) < ray.available_resources().get("CPU", 1) // 2 ): logger.warning( - f"{WARN_PREFIX} The number of blocks in this datastream " + f"{WARN_PREFIX} The number of blocks in this dataset " f"({len(read_tasks)}) " f"limits its parallelism to {len(read_tasks)} concurrent tasks. " "This is much less than the number " "of available CPU slots in the cluster. Use `.repartition(n)` to " "increase the number of " - "datastream blocks." + "dataset blocks." ) read_stage_name = f"Read{datasource.get_name()}" @@ -440,7 +440,7 @@ def read_datasource( read_op = Read(datasource, requested_parallelism, ray_remote_args, read_args) logical_plan = LogicalPlan(read_op) - return Datastream( + return Dataset( plan=ExecutionPlan(block_list, block_list.stats(), run_by_consumer=False), epoch=0, lazy=True, @@ -459,11 +459,11 @@ def read_mongo( parallelism: int = -1, ray_remote_args: Dict[str, Any] = None, **mongo_args, -) -> Datastream: - """Create an Arrow datastream from MongoDB. +) -> Dataset: + """Create an Arrow dataset from MongoDB. The data to read from is specified via the ``uri``, ``database`` and ``collection`` - of the MongoDB. The datastream is created from the results of executing + of the MongoDB. The dataset is created from the results of executing ``pipeline`` against the ``collection``. If ``pipeline`` is None, the entire ``collection`` will be read. @@ -492,7 +492,7 @@ def read_mongo( ... ) Args: - uri: The URI of the source MongoDB where the datastream will be + uri: The URI of the source MongoDB where the dataset will be read from. For the URI format, see details in https://www.mongodb.com/docs/manual/reference/connection-string/. database: The name of the database hosted in the MongoDB. This database @@ -500,7 +500,7 @@ def read_mongo( collection: The name of the collection in the database. This collection must exist otherwise ValueError will be raised. pipeline: A MongoDB pipeline, which will be executed on the given collection - with results used to create Datastream. If None, the entire collection will + with results used to create Dataset. If None, the entire collection will be read. schema: The schema used to read the collection. If None, it'll be inferred from the results of pipeline. @@ -512,7 +512,7 @@ def read_mongo( Arrow-formatted results. Returns: - Datastream producing Arrow records from the results of executing the pipeline + Dataset producing Arrow records from the results of executing the pipeline on the specified MongoDB collection. """ return read_datasource( @@ -539,8 +539,8 @@ def read_parquet( tensor_column_schema: Optional[Dict[str, Tuple[np.dtype, Tuple[int, ...]]]] = None, meta_provider: ParquetMetadataProvider = DefaultParquetMetadataProvider(), **arrow_parquet_args, -) -> Datastream: - """Create an Arrow datastream from parquet files. +) -> Dataset: + """Create an Arrow dataset from parquet files. Examples: >>> import ray @@ -559,7 +559,7 @@ def read_parquet( ... ("variety", pa.string())] >>> ray.data.read_parquet("example://iris.parquet", ... schema=pa.schema(fields)) - Datastream( + Dataset( num_blocks=1, num_rows=150, schema={ @@ -581,7 +581,7 @@ def read_parquet( https://arrow.apache.org/docs/python/api/filesystems.html#filesystem-implementations. columns: A list of column names to read. parallelism: The requested parallelism of the read. Parallelism may be - limited by the number of files of the datastream. + limited by the number of files of the dataset. ray_remote_args: kwargs passed to ray.remote in the read tasks. tensor_column_schema: A dict of column name --> tensor dtype and shape mappings for converting a Parquet column containing serialized @@ -595,7 +595,7 @@ def read_parquet( https://arrow.apache.org/docs/python/generated/pyarrow.dataset.Scanner.html#pyarrow.dataset.Scanner.from_fragment Returns: - Datastream producing Arrow records read from the specified paths. + Dataset producing Arrow records read from the specified paths. """ arrow_parquet_args = _resolve_parquet_args( tensor_column_schema, @@ -630,7 +630,7 @@ def read_images( mode: Optional[str] = None, include_paths: bool = False, ignore_missing_paths: bool = False, -) -> Datastream: +) -> Dataset: """Read images from the specified paths. Examples: @@ -638,13 +638,13 @@ def read_images( >>> path = "s3://anonymous@air-example-data-2/movie-image-small-filesize-1GB" >>> ds = ray.data.read_images(path) # doctest: +SKIP >>> ds # doctest: +SKIP - Datastream(num_blocks=200, num_rows=41979, schema={image: numpy.ndarray(ndim=3, dtype=uint8)}) + Dataset(num_blocks=200, num_rows=41979, schema={image: numpy.ndarray(ndim=3, dtype=uint8)}) If you need image file paths, set ``include_paths=True``. >>> ds = ray.data.read_images(path, include_paths=True) # doctest: +SKIP >>> ds # doctest: +SKIP - Datastream(num_blocks=200, num_rows=41979, schema={image: numpy.ndarray(ndim=3, dtype=uint8), path: string}) + Dataset(num_blocks=200, num_rows=41979, schema={image: numpy.ndarray(ndim=3, dtype=uint8), path: string}) >>> ds.take(1)[0]["path"] # doctest: +SKIP 'air-example-data-2/movie-image-small-filesize-1GB/0.jpg' @@ -667,21 +667,21 @@ def read_images( >>> partitioning = Partitioning("dir", field_names=["class"], base_dir=root) >>> ds = ray.data.read_images(root, size=(224, 224), partitioning=partitioning) # doctest: +SKIP >>> ds # doctest: +SKIP - Datastream(num_blocks=176, num_rows=94946, schema={image: TensorDtype(shape=(224, 224, 3), dtype=uint8), class: object}) + Dataset(num_blocks=176, num_rows=94946, schema={image: TensorDtype(shape=(224, 224, 3), dtype=uint8), class: object}) Args: paths: A single file/directory path or a list of file/directory paths. A list of paths can contain both files and directories. filesystem: The filesystem implementation to read from. parallelism: The requested parallelism of the read. Parallelism may be - limited by the number of files of the datastream. + limited by the number of files of the dataset. meta_provider: File metadata provider. Custom metadata providers may be able to resolve file metadata more quickly and/or accurately. ray_remote_args: kwargs passed to ray.remote in the read tasks. arrow_open_file_args: kwargs passed to ``pyarrow.fs.FileSystem.open_input_file``. partition_filter: Path-based partition filter, if any. Can be used - with a custom callback to read only selected partitions of a datastream. + with a custom callback to read only selected partitions of a dataset. By default, this filters out any file paths whose file extension does not match ``*.png``, ``*.jpg``, ``*.jpeg``, ``*.tiff``, ``*.bmp``, or ``*.gif``. partitioning: A :class:`~ray.data.datasource.partitioning.Partitioning` object @@ -698,7 +698,7 @@ def read_images( that are not found. Defaults to False. Returns: - A :class:`~ray.data.Datastream` producing tensors that represent the images at + A :class:`~ray.data.Dataset` producing tensors that represent the images at the specified paths. For information on working with tensors, read the :ref:`tensor data guide `. @@ -738,8 +738,8 @@ def read_parquet_bulk( ParquetBaseDatasource.file_extension_filter() ), **arrow_parquet_args, -) -> Datastream: - """Create an Arrow datastream from a large number (such as >1K) of parquet files +) -> Dataset: + """Create an Arrow dataset from a large number (such as >1K) of parquet files quickly. By default, ONLY file paths should be provided as input (i.e. no directory paths), @@ -779,7 +779,7 @@ def read_parquet_bulk( filesystem: The filesystem implementation to read from. columns: A list of column names to read. parallelism: The requested parallelism of the read. Parallelism may be - limited by the number of files of the datastream. + limited by the number of files of the dataset. ray_remote_args: kwargs passed to ray.remote in the read tasks. arrow_open_file_args: kwargs passed to ``pyarrow.fs.FileSystem.open_input_file``. @@ -794,13 +794,13 @@ def read_parquet_bulk( files. Change to ``DefaultFileMetadataProvider`` or a custom metadata provider if directory expansion and/or file metadata resolution is required. partition_filter: Path-based partition filter, if any. Can be used - with a custom callback to read only selected partitions of a datastream. + with a custom callback to read only selected partitions of a dataset. By default, this filters out any file paths whose file extension does not match "*.parquet*". arrow_parquet_args: Other parquet read options to pass to pyarrow. Returns: - Datastream producing Arrow records read from the specified paths. + Dataset producing Arrow records read from the specified paths. """ arrow_parquet_args = _resolve_parquet_args( tensor_column_schema, @@ -835,8 +835,8 @@ def read_json( partitioning: Partitioning = Partitioning("hive"), ignore_missing_paths: bool = False, **arrow_json_args, -) -> Datastream: - """Create an Arrow datastream from json files. +) -> Dataset: + """Create an Arrow dataset from json files. Examples: >>> import ray @@ -864,14 +864,14 @@ def read_json( A list of paths can contain both files and directories. filesystem: The filesystem implementation to read from. parallelism: The requested parallelism of the read. Parallelism may be - limited by the number of files of the datastream. + limited by the number of files of the dataset. ray_remote_args: kwargs passed to ray.remote in the read tasks. arrow_open_stream_args: kwargs passed to pyarrow.fs.FileSystem.open_input_stream meta_provider: File metadata provider. Custom metadata providers may be able to resolve file metadata more quickly and/or accurately. partition_filter: Path-based partition filter, if any. Can be used - with a custom callback to read only selected partitions of a datastream. + with a custom callback to read only selected partitions of a dataset. By default, this filters out any file paths whose file extension does not match "*.json*". arrow_json_args: Other json read options to pass to pyarrow. @@ -882,7 +882,7 @@ def read_json( found. Defaults to False. Returns: - Datastream producing Arrow records read from the specified paths. + Dataset producing Arrow records read from the specified paths. """ # noqa: E501 return read_datasource( JSONDatasource(), @@ -912,8 +912,8 @@ def read_csv( partitioning: Partitioning = Partitioning("hive"), ignore_missing_paths: bool = False, **arrow_csv_args, -) -> Datastream: - r"""Create an Arrow datastream from csv files. +) -> Dataset: + r"""Create an Arrow dataset from csv files. Examples: >>> import ray @@ -968,14 +968,14 @@ def read_csv( A list of paths can contain both files and directories. filesystem: The filesystem implementation to read from. parallelism: The requested parallelism of the read. Parallelism may be - limited by the number of files of the datastream. + limited by the number of files of the dataset. ray_remote_args: kwargs passed to ray.remote in the read tasks. arrow_open_stream_args: kwargs passed to pyarrow.fs.FileSystem.open_input_stream meta_provider: File metadata provider. Custom metadata providers may be able to resolve file metadata more quickly and/or accurately. partition_filter: Path-based partition filter, if any. Can be used - with a custom callback to read only selected partitions of a datastream. + with a custom callback to read only selected partitions of a dataset. By default, this does not filter out any files. If wishing to filter out all file paths except those whose file extension matches e.g. "*.csv*", a ``FileExtensionFilter("csv")`` can be provided. @@ -987,7 +987,7 @@ def read_csv( found. Defaults to False. Returns: - Datastream producing Arrow records read from the specified paths. + Dataset producing Arrow records read from the specified paths. """ # noqa: E501 return read_datasource( CSVDatasource(), @@ -1019,8 +1019,8 @@ def read_text( partition_filter: Optional[PathPartitionFilter] = None, partitioning: Partitioning = None, ignore_missing_paths: bool = False, -) -> Datastream: - """Create a datastream from lines stored in text files. +) -> Dataset: + """Create a dataset from lines stored in text files. Examples: >>> import ray @@ -1055,7 +1055,7 @@ def read_text( found. Defaults to False. Returns: - Datastream producing lines of text read from the specified paths. + Dataset producing lines of text read from the specified paths. """ return read_datasource( TextDatasource(), @@ -1087,8 +1087,8 @@ def read_numpy( partitioning: Partitioning = None, ignore_missing_paths: bool = False, **numpy_load_args, -) -> Datastream: - """Create an Arrow datastream from numpy files. +) -> Dataset: + """Create an Arrow dataset from numpy files. Examples: >>> import ray @@ -1107,14 +1107,14 @@ def read_numpy( A list of paths can contain both files and directories. filesystem: The filesystem implementation to read from. parallelism: The requested parallelism of the read. Parallelism may be - limited by the number of files of the datastream. + limited by the number of files of the dataset. arrow_open_stream_args: kwargs passed to pyarrow.fs.FileSystem.open_input_stream numpy_load_args: Other options to pass to np.load. meta_provider: File metadata provider. Custom metadata providers may be able to resolve file metadata more quickly and/or accurately. partition_filter: Path-based partition filter, if any. Can be used - with a custom callback to read only selected partitions of a datastream. + with a custom callback to read only selected partitions of a dataset. By default, this filters out any file paths whose file extension does not match "*.npy*". partitioning: A :class:`~ray.data.datasource.partitioning.Partitioning` object @@ -1123,7 +1123,7 @@ def read_numpy( found. Defaults to False. Returns: - Datastream holding Tensor records read from the specified paths. + Dataset holding Tensor records read from the specified paths. """ return read_datasource( NumpyDatasource(), @@ -1150,8 +1150,8 @@ def read_tfrecords( partition_filter: Optional[PathPartitionFilter] = None, ignore_missing_paths: bool = False, tf_schema: Optional["schema_pb2.Schema"] = None, -) -> Datastream: - """Create a datastream from TFRecord files that contain +) -> Dataset: + """Create a dataset from TFRecord files that contain `tf.train.Example `_ messages. @@ -1177,7 +1177,7 @@ def read_tfrecords( ... writer.write(example.SerializeToString()) This function reads ``tf.train.Example`` messages into a tabular - :class:`~ray.data.Datastream`. + :class:`~ray.data.Dataset`. >>> import ray >>> ds = ray.data.read_tfrecords(path) @@ -1206,7 +1206,7 @@ def read_tfrecords( A list of paths can contain both files and directories. filesystem: The filesystem implementation to read from. parallelism: The requested parallelism of the read. Parallelism may be - limited by the number of files in the datastream. + limited by the number of files in the dataset. arrow_open_stream_args: Key-word arguments passed to ``pyarrow.fs.FileSystem.open_input_stream``. To read a compressed TFRecord file, pass the corresponding compression type (e.g. for ``GZIP`` or ``ZLIB``, use @@ -1214,16 +1214,16 @@ def read_tfrecords( meta_provider: File metadata provider. Custom metadata providers may be able to resolve file metadata more quickly and/or accurately. partition_filter: Path-based partition filter, if any. Can be used - with a custom callback to read only selected partitions of a datastream. + with a custom callback to read only selected partitions of a dataset. By default, this filters out any file paths whose file extension does not match ``"*.tfrecords*"``. ignore_missing_paths: If True, ignores any file paths in ``paths`` that are not found. Defaults to False. tf_schema: Optional TensorFlow Schema which is used to explicitly set the schema - of the underlying Datastream. + of the underlying Dataset. Returns: - A :class:`~ray.data.Datastream` that contains the example features. + A :class:`~ray.data.Dataset` that contains the example features. Raises: ValueError: If a file contains a message that isn't a ``tf.train.Example``. @@ -1255,15 +1255,15 @@ def read_webdataset( filerename: Optional[Union[list, callable]] = None, suffixes: Optional[Union[list, callable]] = None, verbose_open: bool = False, -) -> Datastream: - """Create a datastream from WebDataset files. +) -> Dataset: + """Create a dataset from WebDataset files. Args: paths: A single file/directory path or a list of file/directory paths. A list of paths can contain both files and directories. filesystem: The filesystem implementation to read from. parallelism: The requested parallelism of the read. Parallelism may be - limited by the number of files in the datastream. + limited by the number of files in the dataset. arrow_open_stream_args: Key-word arguments passed to ``pyarrow.fs.FileSystem.open_input_stream``. To read a compressed TFRecord file, pass the corresponding compression type (e.g. for ``GZIP`` or ``ZLIB``, use @@ -1271,7 +1271,7 @@ def read_webdataset( meta_provider: File metadata provider. Custom metadata providers may be able to resolve file metadata more quickly and/or accurately. partition_filter: Path-based partition filter, if any. Can be used - with a custom callback to read only selected partitions of a datastream. + with a custom callback to read only selected partitions of a dataset. decoder: A function or list of functions to decode the data. fileselect: A callable or list of glob patterns to select files. filerename: A function or list of tuples to rename files prior to grouping. @@ -1279,7 +1279,7 @@ def read_webdataset( verbose_open: Whether to print the file names as they are opened. Returns: - A :class:`~ray.data.Datastream` that contains the example features. + A :class:`~ray.data.Dataset` that contains the example features. Raises: ValueError: If a file contains a message that isn't a ``tf.train.Example``. @@ -1314,8 +1314,8 @@ def read_binary_files( partitioning: Partitioning = None, ignore_missing_paths: bool = False, output_arrow_format: bool = False, -) -> Datastream: - """Create a datastream from binary files of arbitrary contents. +) -> Dataset: + """Create a dataset from binary files of arbitrary contents. Examples: >>> import ray @@ -1329,7 +1329,7 @@ def read_binary_files( Args: paths: A single file path or a list of file paths (or directories). include_paths: Whether to include the full path of the file in the - datastream records. When specified, the stream records will be a + dataset records. When specified, the stream records will be a tuple of the file path and the file contents. filesystem: The filesystem implementation to read from. ray_remote_args: kwargs passed to ray.remote in the read tasks. @@ -1340,7 +1340,7 @@ def read_binary_files( meta_provider: File metadata provider. Custom metadata providers may be able to resolve file metadata more quickly and/or accurately. partition_filter: Path-based partition filter, if any. Can be used - with a custom callback to read only selected partitions of a datastream. + with a custom callback to read only selected partitions of a dataset. By default, this does not filter out any files. partitioning: A :class:`~ray.data.datasource.partitioning.Partitioning` object that describes how paths are organized. Defaults to ``None``. @@ -1350,7 +1350,7 @@ def read_binary_files( list format. Defaults to False. Returns: - Datastream producing records read from the specified paths. + Dataset producing records read from the specified paths. """ ctx = ray.data.DataContext.get_current() if ctx.strict_mode: @@ -1358,9 +1358,9 @@ def read_binary_files( if not output_arrow_format: logger.warning( - "read_binary_files() returns Datastream in Python list format as of Ray " + "read_binary_files() returns Dataset in Python list format as of Ray " "v2.4. Use read_binary_files(output_arrow_format=True) to return " - "Datastream in Arrow format.", + "Dataset in Arrow format.", ) return read_datasource( @@ -1386,7 +1386,7 @@ def read_sql( *, parallelism: int = -1, ray_remote_args: Optional[Dict[str, Any]] = None, -) -> Datastream: +) -> Dataset: """Read from a database that provides a `Python DB API2-compliant `_ connector. @@ -1404,7 +1404,7 @@ def read_sql( Examples: For examples of reading from larger databases like MySQL and PostgreSQL, see - :ref:`Reading from SQL Databases `. + :ref:`Reading from SQL Databases `. .. testcode:: @@ -1450,7 +1450,7 @@ def create_connection(): ray_remote_args: Keyword arguments passed to :func:`ray.remote` in read tasks. Returns: - A :class:`Datastream` containing the queried data. + A :class:`Dataset` containing the queried data. """ datasource = SQLDatasource(connection_factory) return read_datasource( @@ -1462,14 +1462,14 @@ def create_connection(): @PublicAPI -def from_dask(df: "dask.DataFrame") -> MaterializedDatastream: - """Create a datastream from a Dask DataFrame. +def from_dask(df: "dask.DataFrame") -> MaterializedDataset: + """Create a dataset from a Dask DataFrame. Args: df: A Dask DataFrame. Returns: - MaterializedDatastream holding Arrow records read from the DataFrame. + MaterializedDataset holding Arrow records read from the DataFrame. """ import dask @@ -1500,18 +1500,18 @@ def to_ref(df): @PublicAPI -def from_mars(df: "mars.DataFrame") -> MaterializedDatastream: - """Create a datastream from a MARS dataframe. +def from_mars(df: "mars.DataFrame") -> MaterializedDataset: + """Create a dataset from a MARS dataframe. Args: df: A MARS dataframe, which must be executed by MARS-on-Ray. Returns: - MaterializedDatastream holding Arrow records read from the dataframe. + MaterializedDataset holding Arrow records read from the dataframe. """ import mars.dataframe as md - ds: Datastream = md.to_ray_dataset(df) + ds: Dataset = md.to_ray_dataset(df) logical_plan = LogicalPlan(FromMars(ds.dataframe)) ds._logical_plan = logical_plan @@ -1520,14 +1520,14 @@ def from_mars(df: "mars.DataFrame") -> MaterializedDatastream: @PublicAPI -def from_modin(df: "modin.DataFrame") -> MaterializedDatastream: - """Create a datastream from a Modin dataframe. +def from_modin(df: "modin.DataFrame") -> MaterializedDataset: + """Create a dataset from a Modin dataframe. Args: df: A Modin dataframe, which must be using the Ray backend. Returns: - MaterializedDatastream holding Arrow records read from the dataframe. + MaterializedDataset holding Arrow records read from the dataframe. """ from modin.distributed.dataframe.pandas.partitions import unwrap_partitions @@ -1543,14 +1543,14 @@ def from_modin(df: "modin.DataFrame") -> MaterializedDatastream: @PublicAPI def from_pandas( dfs: Union["pandas.DataFrame", List["pandas.DataFrame"]] -) -> MaterializedDatastream: - """Create a datastream from a list of Pandas dataframes. +) -> MaterializedDataset: + """Create a dataset from a list of Pandas dataframes. Args: dfs: A Pandas dataframe or a list of Pandas dataframes. Returns: - MaterializedDatastream holding Arrow records read from the dataframes. + MaterializedDataset holding Arrow records read from the dataframes. """ import pandas as pd @@ -1570,8 +1570,8 @@ def from_pandas( @DeveloperAPI def from_pandas_refs( dfs: Union[ObjectRef["pandas.DataFrame"], List[ObjectRef["pandas.DataFrame"]]], -) -> MaterializedDatastream: - """Create a datastream from a list of Ray object references to Pandas +) -> MaterializedDataset: + """Create a dataset from a list of Ray object references to Pandas dataframes. Args: @@ -1579,7 +1579,7 @@ def from_pandas_refs( Ray object references to pandas dataframes. Returns: - MaterializedDatastream holding Arrow records read from the dataframes. + MaterializedDataset holding Arrow records read from the dataframes. """ if isinstance(dfs, ray.ObjectRef): dfs = [dfs] @@ -1600,10 +1600,10 @@ def from_pandas_refs( if context.enable_pandas_block: get_metadata = cached_remote_fn(get_table_block_metadata) metadata = ray.get([get_metadata.remote(df) for df in dfs]) - return MaterializedDatastream( + return MaterializedDataset( ExecutionPlan( BlockList(dfs, metadata, owned_by_consumer=False), - DatastreamStats(stages={"FromPandasRefs": metadata}, parent=None), + DatasetStats(stages={"FromPandasRefs": metadata}, parent=None), run_by_consumer=False, ), 0, @@ -1616,10 +1616,10 @@ def from_pandas_refs( res = [df_to_block.remote(df) for df in dfs] blocks, metadata = map(list, zip(*res)) metadata = ray.get(metadata) - return MaterializedDatastream( + return MaterializedDataset( ExecutionPlan( BlockList(blocks, metadata, owned_by_consumer=False), - DatastreamStats(stages={"FromPandasRefs": metadata}, parent=None), + DatasetStats(stages={"FromPandasRefs": metadata}, parent=None), run_by_consumer=False, ), 0, @@ -1629,14 +1629,14 @@ def from_pandas_refs( @PublicAPI -def from_numpy(ndarrays: Union[np.ndarray, List[np.ndarray]]) -> MaterializedDatastream: - """Create a datastream from a list of NumPy ndarrays. +def from_numpy(ndarrays: Union[np.ndarray, List[np.ndarray]]) -> MaterializedDataset: + """Create a dataset from a list of NumPy ndarrays. Args: ndarrays: A NumPy ndarray or a list of NumPy ndarrays. Returns: - MaterializedDatastream holding the given ndarrays. + MaterializedDataset holding the given ndarrays. """ if isinstance(ndarrays, np.ndarray): ndarrays = [ndarrays] @@ -1647,15 +1647,15 @@ def from_numpy(ndarrays: Union[np.ndarray, List[np.ndarray]]) -> MaterializedDat @DeveloperAPI def from_numpy_refs( ndarrays: Union[ObjectRef[np.ndarray], List[ObjectRef[np.ndarray]]], -) -> MaterializedDatastream: - """Create a datastream from a list of NumPy ndarray futures. +) -> MaterializedDataset: + """Create a dataset from a list of NumPy ndarray futures. Args: ndarrays: A Ray object reference to a NumPy ndarray or a list of Ray object references to NumPy ndarrays. Returns: - MaterializedDatastream holding the given ndarrays. + MaterializedDataset holding the given ndarrays. """ if isinstance(ndarrays, ray.ObjectRef): ndarrays = [ndarrays] @@ -1681,10 +1681,10 @@ def from_numpy_refs( from_numpy_refs_op = FromNumpyRefs(ndarrays) logical_plan = LogicalPlan(from_numpy_refs_op) - return MaterializedDatastream( + return MaterializedDataset( ExecutionPlan( BlockList(blocks, metadata, owned_by_consumer=False), - DatastreamStats(stages={"FromNumpyRefs": metadata}, parent=None), + DatasetStats(stages={"FromNumpyRefs": metadata}, parent=None), run_by_consumer=False, ), 0, @@ -1696,15 +1696,15 @@ def from_numpy_refs( @PublicAPI def from_arrow( tables: Union["pyarrow.Table", bytes, List[Union["pyarrow.Table", bytes]]], -) -> MaterializedDatastream: - """Create a datastream from a list of Arrow tables. +) -> MaterializedDataset: + """Create a dataset from a list of Arrow tables. Args: tables: An Arrow table, or a list of Arrow tables, or its streaming format in bytes. Returns: - MaterializedDatastream holding Arrow records from the tables. + MaterializedDataset holding Arrow records from the tables. """ import pyarrow as pa @@ -1719,15 +1719,15 @@ def from_arrow_refs( ObjectRef[Union["pyarrow.Table", bytes]], List[ObjectRef[Union["pyarrow.Table", bytes]]], ], -) -> MaterializedDatastream: - """Create a datastream from a set of Arrow tables. +) -> MaterializedDataset: + """Create a dataset from a set of Arrow tables. Args: tables: A Ray object reference to Arrow table, or list of Ray object references to Arrow tables, or its streaming format in bytes. Returns: - MaterializedDatastream holding Arrow records from the tables. + MaterializedDataset holding Arrow records from the tables. """ if isinstance(tables, ray.ObjectRef): tables = [tables] @@ -1736,10 +1736,10 @@ def from_arrow_refs( metadata = ray.get([get_metadata.remote(t) for t in tables]) logical_plan = LogicalPlan(FromArrowRefs(tables)) - return MaterializedDatastream( + return MaterializedDataset( ExecutionPlan( BlockList(tables, metadata, owned_by_consumer=False), - DatastreamStats(stages={"FromArrowRefs": metadata}, parent=None), + DatasetStats(stages={"FromArrowRefs": metadata}, parent=None), run_by_consumer=False, ), 0, @@ -1751,18 +1751,18 @@ def from_arrow_refs( @PublicAPI def from_spark( df: "pyspark.sql.DataFrame", *, parallelism: Optional[int] = None -) -> MaterializedDatastream: - """Create a datastream from a Spark dataframe. +) -> MaterializedDataset: + """Create a dataset from a Spark dataframe. Args: spark: A SparkSession, which must be created by RayDP (Spark-on-Ray). df: A Spark dataframe, which must be created by RayDP (Spark-on-Ray). - parallelism: The amount of parallelism to use for the datastream. + parallelism: The amount of parallelism to use for the dataset. If not provided, it will be equal to the number of partitions of the original Spark dataframe. Returns: - MaterializedDatastream holding Arrow records read from the dataframe. + MaterializedDataset holding Arrow records read from the dataframe. """ import raydp @@ -1772,8 +1772,8 @@ def from_spark( @PublicAPI def from_huggingface( dataset: Union["datasets.Dataset", "datasets.DatasetDict"], -) -> Union[MaterializedDatastream]: - """Create a datastream from a Hugging Face Datasets Dataset. +) -> Union[MaterializedDataset]: + """Create a dataset from a Hugging Face Datasets Dataset. This function is not parallelized, and is intended to be used with Hugging Face Datasets that are loaded into memory (as opposed @@ -1784,12 +1784,12 @@ def from_huggingface( ``IterableDataset`` is not supported. Returns: - MaterializedDatastream holding Arrow records from the Hugging Face Dataset, or a - dict of MaterializedDatastream in case ``dataset`` is a ``DatasetDict``. + MaterializedDataset holding Arrow records from the Hugging Face Dataset, or a + dict of MaterializedDataset in case ``dataset`` is a ``DatasetDict``. """ import datasets - def convert(ds: "datasets.Dataset") -> Datastream: + def convert(ds: "datasets.Dataset") -> Dataset: ray_ds = from_arrow(ds.data.table) logical_plan = LogicalPlan(FromHuggingFace(ds)) ray_ds._logical_plan = logical_plan @@ -1810,8 +1810,8 @@ def convert(ds: "datasets.Dataset") -> Datastream: @PublicAPI def from_tf( dataset: "tf.data.Dataset", -) -> MaterializedDatastream: - """Create a datastream from a TensorFlow dataset. +) -> MaterializedDataset: + """Create a dataset from a TensorFlow dataset. This function is inefficient. Use it to read small datasets or prototype. @@ -1830,7 +1830,7 @@ def from_tf( >>> dataset, _ = tfds.load('cifar10', split=["train", "test"]) # doctest: +SKIP >>> ds = ray.data.from_tf(dataset) # doctest: +SKIP >>> ds # doctest: +SKIP - Datastream(num_blocks=200, num_rows=50000, schema={id: binary, image: numpy.ndarray(shape=(32, 32, 3), dtype=uint8), label: int64}) + Dataset(num_blocks=200, num_rows=50000, schema={id: binary, image: numpy.ndarray(shape=(32, 32, 3), dtype=uint8), label: int64}) >>> ds.take(1) # doctest: +SKIP [{'id': b'train_16399', 'image': array([[[143, 96, 70], [141, 96, 72], @@ -1854,7 +1854,7 @@ def from_tf( dataset: A TensorFlow dataset. Returns: - A :class:`MaterializedDatastream` that contains the samples stored in the + A :class:`MaterializedDataset` that contains the samples stored in the TensorFlow dataset. """ # noqa: E501 # FIXME: `as_numpy_iterator` errors if `dataset` contains ragged tensors. @@ -1864,8 +1864,8 @@ def from_tf( @PublicAPI def from_torch( dataset: "torch.utils.data.Dataset", -) -> MaterializedDatastream: - """Create a datastream from a Torch dataset. +) -> MaterializedDataset: + """Create a dataset from a Torch dataset. This function is inefficient. Use it to read small datasets or prototype. @@ -1884,7 +1884,7 @@ def from_torch( >>> dataset = datasets.MNIST("data", download=True) # doctest: +SKIP >>> ds = ray.data.from_torch(dataset) # doctest: +SKIP >>> ds # doctest: +SKIP - Datastream(num_blocks=200, num_rows=60000, schema={item: object}) + Dataset(num_blocks=200, num_rows=60000, schema={item: object}) >>> ds.take(1) # doctest: +SKIP {"item": (, 5)} @@ -1892,7 +1892,7 @@ def from_torch( dataset: A Torch dataset. Returns: - A :class:`MaterializedDatastream` containing the Torch dataset samples. + A :class:`MaterializedDataset` containing the Torch dataset samples. """ return from_items(list(dataset)) @@ -1909,7 +1909,7 @@ def _get_read_tasks( Args: ds: Datasource to read from. - ctx: Datastream config to use. + ctx: Dataset config to use. cur_pg: The current placement group, if any. parallelism: The user-requested parallelism, or -1 for autodetection. kwargs: Additional kwargs to pass to the reader. diff --git a/python/ray/data/row.py b/python/ray/data/row.py index 37252fd194eb..b7c382736f27 100644 --- a/python/ray/data/row.py +++ b/python/ray/data/row.py @@ -7,7 +7,7 @@ @Deprecated("TableRow is no longer part of the public Ray Data API.") class TableRow(Mapping): """ - A dict-like row of a tabular ``Datastream``. + A dict-like row of a tabular ``Dataset``. This implements the dictionary mapping interface, but provides more efficient access with less data copying than converting Arrow Tables diff --git a/python/ray/data/tests/block_batching/test_util.py b/python/ray/data/tests/block_batching/test_util.py index 990e06529e7f..036331b0d272 100644 --- a/python/ray/data/tests/block_batching/test_util.py +++ b/python/ray/data/tests/block_batching/test_util.py @@ -49,15 +49,15 @@ def test_blocks_to_batches(block_size, drop_last): full_batches = 0 leftover_batches = 0 - datastream_size = block_size * num_blocks + dataset_size = block_size * num_blocks for batch in batch_iter: if len(batch.data) == batch_size: full_batches += 1 - if len(batch.data) == (datastream_size % batch_size): + if len(batch.data) == (dataset_size % batch_size): leftover_batches += 1 assert leftover_batches == 1 - assert full_batches == (datastream_size // batch_size) + assert full_batches == (dataset_size // batch_size) assert [batch.batch_idx for batch in batch_iter] == list(range(len(batch_iter))) diff --git a/python/ray/data/tests/conftest.py b/python/ray/data/tests/conftest.py index 2e132c0e4ac6..2b0eebdace36 100644 --- a/python/ray/data/tests/conftest.py +++ b/python/ray/data/tests/conftest.py @@ -173,15 +173,14 @@ def _get_write_path_for_block( base_path, *, filesystem=None, - datastream_uuid=None, + dataset_uuid=None, block=None, block_index=None, file_format=None, ): num_rows = BlockAccessor.for_block(block).num_rows() suffix = ( - f"{block_index:06}_{num_rows:02}_{datastream_uuid}" - f".test.{file_format}" + f"{block_index:06}_{num_rows:02}_{dataset_uuid}" f".test.{file_format}" ) return posixpath.join(base_path, suffix) @@ -269,7 +268,7 @@ def _assert_base_partitioned_ds( actual_input_files = ds.input_files() assert len(actual_input_files) == num_input_files, actual_input_files - # For Datastreams with long string representations, the format will include + # For Datasets with long string representations, the format will include # whitespace and newline characters, which is difficult to generalize # without implementing the formatting logic again (from # `ExecutionPlan.get_plan_as_string()`). Therefore, we remove whitespace @@ -279,12 +278,12 @@ def _remove_whitespace(ds_str): ds_str = ds_str.replace(c, "") return ds_str - assert "Datastream(num_blocks={},num_rows={},schema={})".format( + assert "Dataset(num_blocks={},num_rows={},schema={})".format( num_input_files, num_rows, _remove_whitespace(schema), ) == _remove_whitespace(str(ds)), ds - assert "Datastream(num_blocks={},num_rows={},schema={})".format( + assert "Dataset(num_blocks={},num_rows={},schema={})".format( num_input_files, num_rows, _remove_whitespace(schema), @@ -386,7 +385,7 @@ def enable_streaming_executor(): ctx.use_streaming_executor = use_streaming_executor -# ===== Pandas datastream formats ===== +# ===== Pandas dataset formats ===== @pytest.fixture(scope="function") def ds_pandas_single_column_format(ray_start_regular_shared): in_df = pd.DataFrame({"column_1": [1, 2, 3, 4]}) @@ -405,7 +404,7 @@ def ds_pandas_list_multi_column_format(ray_start_regular_shared): yield ray.data.from_pandas([in_df] * 4) -# ===== Arrow datastream formats ===== +# ===== Arrow dataset formats ===== @pytest.fixture(scope="function") def ds_arrow_single_column_format(ray_start_regular_shared): yield ray.data.from_arrow(pa.table({"column_1": [1, 2, 3, 4]})) @@ -441,7 +440,7 @@ def ds_list_arrow_multi_column_format(ray_start_regular_shared): yield ray.data.from_arrow([pa.table({"column_1": [1], "column_2": [1]})] * 4) -# ===== Numpy datastream formats ===== +# ===== Numpy dataset formats ===== @pytest.fixture(scope="function") def ds_numpy_single_column_tensor_format(ray_start_regular_shared): yield ray.data.from_numpy(np.arange(16).reshape((4, 2, 2))) diff --git a/python/ray/data/tests/mock_server.py b/python/ray/data/tests/mock_server.py index c2a81c2b5546..42dffd7f0690 100644 --- a/python/ray/data/tests/mock_server.py +++ b/python/ray/data/tests/mock_server.py @@ -67,7 +67,7 @@ def stop_process(process): # being unreachable). This appears to only be an issue when using the tmp_dir # fixture as the S3 dir path. We should fix this since "session" scope should # reduce a lot of the per-test overhead (2x faster execution for IO methods in -# test_datastream.py). +# test_dataset.py). @pytest.fixture(scope="function") def s3_server(): host = "localhost" diff --git a/python/ray/data/tests/preprocessors/test_preprocessors.py b/python/ray/data/tests/preprocessors/test_preprocessors.py index 85485198dfae..3fa4c8a27d81 100644 --- a/python/ray/data/tests/preprocessors/test_preprocessors.py +++ b/python/ray/data/tests/preprocessors/test_preprocessors.py @@ -187,7 +187,7 @@ def test_pipeline_fail(): class FittablePreprocessor(Preprocessor): _is_fittable = True - def _fit(self, datastream): + def _fit(self, dataset): self.fitted_ = True return self @@ -234,7 +234,7 @@ def test_transform_all_formats(create_dummy_preprocessors, pipeline, dataset_for if pipeline: patcher = patch.object(ray.data.dataset_pipeline.DatasetPipeline, "map_batches") else: - patcher = patch.object(ray.data.datastream.Datastream, "map_batches") + patcher = patch.object(ray.data.dataset.Dataset, "map_batches") with patcher as mock_map_batches: _apply_transform(with_pandas, ds) @@ -263,7 +263,7 @@ def test_transform_all_formats(create_dummy_preprocessors, pipeline, dataset_for def test_numpy_pandas_support_transform_batch_wrong_format(create_dummy_preprocessors): - # Case 1: simple datastream. No support + # Case 1: simple dataset. No support ( with_nothing, with_pandas, @@ -290,7 +290,7 @@ def test_numpy_pandas_support_transform_batch_wrong_format(create_dummy_preproce def test_numpy_pandas_support_transform_batch_pandas(create_dummy_preprocessors): - # Case 2: pandas datastream + # Case 2: pandas dataset ( with_nothing, with_pandas, @@ -328,7 +328,7 @@ def test_numpy_pandas_support_transform_batch_pandas(create_dummy_preprocessors) def test_numpy_pandas_support_transform_batch_arrow(create_dummy_preprocessors): - # Case 3: arrow datastream + # Case 3: arrow dataset ( with_nothing, with_pandas, @@ -371,7 +371,7 @@ def test_numpy_pandas_support_transform_batch_arrow(create_dummy_preprocessors): def test_numpy_pandas_support_transform_batch_tensor(create_dummy_preprocessors): - # Case 4: tensor datastream created by from numpy data directly + # Case 4: tensor dataset created by from numpy data directly ( with_nothing, with_pandas, diff --git a/python/ray/data/tests/preprocessors/test_torch.py b/python/ray/data/tests/preprocessors/test_torch.py index 2f2d99a69462..cf514f5cc11e 100644 --- a/python/ray/data/tests/preprocessors/test_torch.py +++ b/python/ray/data/tests/preprocessors/test_torch.py @@ -32,7 +32,7 @@ def __repr__(self): ], ) def test_transform_images(self, transform): - datastream = ray.data.from_items( + dataset = ray.data.from_items( [ {"image": np.zeros((32, 32, 3)), "label": 0}, {"image": np.zeros((32, 32, 3)), "label": 1}, @@ -40,19 +40,19 @@ def test_transform_images(self, transform): ) preprocessor = TorchVisionPreprocessor(columns=["image"], transform=transform) - transformed_datastream = preprocessor.transform(datastream) + transformed_dataset = preprocessor.transform(dataset) - assert transformed_datastream.schema().names == ["image", "label"] + assert transformed_dataset.schema().names == ["image", "label"] transformed_images = [ - record["image"] for record in transformed_datastream.take_all() + record["image"] for record in transformed_dataset.take_all() ] assert all(image.shape == (3, 32, 32) for image in transformed_images) assert all(image.dtype == np.double for image in transformed_images) - labels = {record["label"] for record in transformed_datastream.take_all()} + labels = {record["label"] for record in transformed_dataset.take_all()} assert labels == {0, 1} def test_batch_transform_images(self): - datastream = ray.data.from_items( + dataset = ray.data.from_items( [ {"image": np.zeros((32, 32, 3)), "label": 0}, {"image": np.zeros((32, 32, 3)), "label": 1}, @@ -70,19 +70,19 @@ def test_batch_transform_images(self): columns=["image"], transform=transform, batched=True ) - transformed_datastream = preprocessor.transform(datastream) + transformed_dataset = preprocessor.transform(dataset) - assert transformed_datastream.schema().names == ["image", "label"] + assert transformed_dataset.schema().names == ["image", "label"] transformed_images = [ - record["image"] for record in transformed_datastream.take_all() + record["image"] for record in transformed_dataset.take_all() ] assert all(image.shape == (3, 64, 64) for image in transformed_images) assert all(image.dtype == np.double for image in transformed_images) - labels = {record["label"] for record in transformed_datastream.take_all()} + labels = {record["label"] for record in transformed_dataset.take_all()} assert labels == {0, 1} def test_transform_ragged_images(self): - datastream = ray.data.from_items( + dataset = ray.data.from_items( [ {"image": np.zeros((16, 16, 3)), "label": 0}, {"image": np.zeros((32, 32, 3)), "label": 1}, @@ -91,22 +91,22 @@ def test_transform_ragged_images(self): transform = transforms.ToTensor() preprocessor = TorchVisionPreprocessor(columns=["image"], transform=transform) - transformed_datastream = preprocessor.transform(datastream) + transformed_dataset = preprocessor.transform(dataset) - assert transformed_datastream.schema().names == ["image", "label"] + assert transformed_dataset.schema().names == ["image", "label"] transformed_images = [ - record["image"] for record in transformed_datastream.take_all() + record["image"] for record in transformed_dataset.take_all() ] assert sorted(image.shape for image in transformed_images) == [ (3, 16, 16), (3, 32, 32), ] assert all(image.dtype == np.double for image in transformed_images) - labels = {record["label"] for record in transformed_datastream.take_all()} + labels = {record["label"] for record in transformed_dataset.take_all()} assert labels == {0, 1} def test_invalid_transform_raises_value_error(self): - datastream = ray.data.from_items( + dataset = ray.data.from_items( [ {"image": np.zeros((32, 32, 3)), "label": 0}, {"image": np.zeros((32, 32, 3)), "label": 1}, @@ -116,7 +116,7 @@ def test_invalid_transform_raises_value_error(self): preprocessor = TorchVisionPreprocessor(columns=["image"], transform=transform) with pytest.raises(ValueError): - preprocessor.transform(datastream).materialize() + preprocessor.transform(dataset).materialize() if __name__ == "__main__": diff --git a/python/ray/data/tests/test_all_to_all.py b/python/ray/data/tests/test_all_to_all.py index e0df35390624..625e389f5c9f 100644 --- a/python/ray/data/tests/test_all_to_all.py +++ b/python/ray/data/tests/test_all_to_all.py @@ -217,13 +217,13 @@ def test_repartition_shuffle_arrow(ray_start_regular_shared): assert large._block_num_rows() == [500] * 20 -def test_grouped_datastream_repr(ray_start_regular_shared): +def test_grouped_dataset_repr(ray_start_regular_shared): ds = ray.data.from_items([{"key": "spam"}, {"key": "ham"}, {"key": "spam"}]) - assert repr(ds.groupby("key")) == f"GroupedData(datastream={ds!r}, key='key')" + assert repr(ds.groupby("key")) == f"GroupedData(dataset={ds!r}, key='key')" def test_groupby_arrow(ray_start_regular_shared, use_push_based_shuffle): - # Test empty datastream. + # Test empty dataset. agg_ds = ray.data.range(10).filter(lambda r: r["id"] > 10).groupby("value").count() assert agg_ds.count() == 0 @@ -405,7 +405,7 @@ def _to_pandas(ds): ds = _to_pandas(ds) assert ds.sum("A") == 4950 - # Test empty datastream + # Test empty dataset ds = ray.data.range(10) if ds_format == "pandas": ds = _to_pandas(ds) @@ -765,7 +765,7 @@ def test_groupby_agg_bad_on(ray_start_regular_shared): ray.data.from_pandas(df).groupby("A").mean("D").materialize() with pytest.raises(ValueError): ray.data.from_pandas(df).groupby("A").mean(["B", "D"]).materialize() - # Columns for simple Datastream. + # Columns for simple Dataset. with pytest.raises(ValueError): ray.data.from_items(xs).groupby(lambda x: x % 3 == 0).mean("A").materialize() @@ -783,7 +783,7 @@ def test_groupby_agg_bad_on(ray_start_regular_shared): ray.data.from_pandas(df).mean("D").materialize() with pytest.raises(ValueError): ray.data.from_pandas(df).mean(["B", "D"]).materialize() - # Columns for simple Datastream. + # Columns for simple Dataset. with pytest.raises(ValueError): ray.data.from_items(xs).mean("A").materialize() @@ -956,7 +956,7 @@ def test_groupby_simple(ray_start_regular_shared): ("None", 3), ] - # Test empty datastream. + # Test empty dataset. ds = ray.data.from_items([]) agg_ds = ds.groupby(lambda r: r[0]).aggregate( AggregateFn( @@ -1049,7 +1049,7 @@ def test_groupby_simple_sum(ray_start_regular_shared, num_parts): @pytest.mark.skipif(STRICT_MODE, reason="Deprecated in strict mode") -def test_groupby_map_groups_for_empty_datastream(ray_start_regular_shared): +def test_groupby_map_groups_for_empty_dataset(ray_start_regular_shared): ds = ray.data.from_items([]) mapped = ds.groupby(lambda x: x % 3).map_groups(lambda x: [min(x) * min(x)]) assert mapped.count() == 0 @@ -1368,7 +1368,7 @@ def test_groupby_simple_mean(ray_start_regular_shared, num_parts): # Test built-in global mean aggregation assert ray.data.from_items(xs).repartition(num_parts).mean() == 49.5 - # Test empty datastream + # Test empty dataset assert ray.data.range(10).filter(lambda r: r > 10).mean() is None # Test built-in global mean aggregation with nans @@ -1468,7 +1468,7 @@ def test_groupby_simple_std(ray_start_regular_shared, num_parts): pd.Series(xs).std(ddof=0), ) - # Test empty datastream + # Test empty dataset assert ray.data.from_items([]).std() is None # Test edge cases assert ray.data.from_items([3]).std() == 0 @@ -1673,7 +1673,7 @@ def range(n, parallelism=200): r2 = range(100).random_shuffle().take(999) assert r1 != r2, (r1, r2) - # Test empty datastream. + # Test empty dataset. ds = ray.data.from_items([]) r1 = ds.random_shuffle() assert r1.count() == 0 @@ -1741,7 +1741,7 @@ def test_random_shuffle_with_custom_resource(ray_start_cluster): ray.init(cluster.address) - # Run datastream in "bar" nodes. + # Run dataset in "bar" nodes. ds = ray.data.read_parquet( "example://parquet_images_mini", parallelism=2, diff --git a/python/ray/data/tests/test_consumption.py b/python/ray/data/tests/test_consumption.py index 2c6b879a2caa..25b979175eae 100644 --- a/python/ray/data/tests/test_consumption.py +++ b/python/ray/data/tests/test_consumption.py @@ -13,11 +13,11 @@ import ray from ray.data._internal.block_builder import BlockBuilder -from ray.data._internal.datastream_logger import DatastreamLogger +from ray.data._internal.dataset_logger import DatasetLogger from ray.data._internal.lazy_block_list import LazyBlockList from ray.data.block import BlockAccessor, BlockMetadata from ray.data.context import DataContext -from ray.data.datastream import Dataset, MaterializedDatastream, _sliding_window +from ray.data.dataset import Dataset, MaterializedDataset, _sliding_window from ray.data.datasource.datasource import Datasource, ReadTask from ray.data.datasource.csv_datasource import CSVDatasource from ray.data.tests.conftest import * # noqa @@ -67,7 +67,7 @@ def test_dataset_lineage_serialization(shutdown_only): ds = ds.random_shuffle() epoch = ds._get_epoch() uuid = ds._get_uuid() - plan_uuid = ds._plan._datastream_uuid + plan_uuid = ds._plan._dataset_uuid serialized_ds = ds.serialize_lineage() # Confirm that the original Dataset was properly copied before clearing/mutating. @@ -84,7 +84,7 @@ def test_dataset_lineage_serialization(shutdown_only): # Check Dataset state. assert ds._get_epoch() == epoch assert ds._get_uuid() == uuid - assert ds._plan._datastream_uuid == plan_uuid + assert ds._plan._dataset_uuid == plan_uuid # Check Dataset content. assert ds.count() == 10 assert sorted(extract_values("id", ds.take())) == list(range(2, 12)) @@ -172,7 +172,7 @@ def test_empty_dataset(ray_start_regular_shared): ds = ds.materialize() assert ( str(ds) - == "MaterializedDatastream(num_blocks=1, num_rows=0, schema=Unknown schema)" + == "MaterializedDataset(num_blocks=1, num_rows=0, schema=Unknown schema)" ) # Test map on empty dataset. @@ -208,10 +208,10 @@ def inc(x): ds = ray.data.range(1) ds = ds.map(inc) assert not ds.is_fully_executed() - assert not isinstance(ds, MaterializedDatastream) + assert not isinstance(ds, MaterializedDataset) ds2 = ds.materialize() assert ds2.is_fully_executed() - assert isinstance(ds2, MaterializedDatastream) + assert isinstance(ds2, MaterializedDataset) assert not ds.is_fully_executed() for _ in range(10): @@ -226,13 +226,12 @@ def test_schema(ray_start_regular_shared): ds3 = ds3.materialize() ds4 = ds3.map(lambda x: {"a": "hi", "b": 1.0}).limit(5).repartition(1) ds4 = ds4.materialize() - assert str(ds2) == "Datastream(num_blocks=10, num_rows=10, schema={id: int64})" + assert str(ds2) == "Dataset(num_blocks=10, num_rows=10, schema={id: int64})" assert ( - str(ds3) - == "MaterializedDatastream(num_blocks=5, num_rows=10, schema={id: int64})" + str(ds3) == "MaterializedDataset(num_blocks=5, num_rows=10, schema={id: int64})" ) assert ( - str(ds4) == "MaterializedDatastream(num_blocks=1, num_rows=5, " + str(ds4) == "MaterializedDataset(num_blocks=1, num_rows=5, " "schema={a: string, b: double})" ) @@ -286,49 +285,48 @@ def check_num_computed(expected): def test_dataset_repr(ray_start_regular_shared): ds = ray.data.range(10, parallelism=10) - assert repr(ds) == "Datastream(num_blocks=10, num_rows=10, schema={id: int64})" + assert repr(ds) == "Dataset(num_blocks=10, num_rows=10, schema={id: int64})" ds = ds.map_batches(lambda x: x) assert repr(ds) == ( "MapBatches()\n" - "+- Datastream(num_blocks=10, num_rows=10, schema={id: int64})" + "+- Dataset(num_blocks=10, num_rows=10, schema={id: int64})" ) ds = ds.filter(lambda x: x["id"] > 0) assert repr(ds) == ( "Filter\n" "+- MapBatches()\n" - " +- Datastream(num_blocks=10, num_rows=10, schema={id: int64})" + " +- Dataset(num_blocks=10, num_rows=10, schema={id: int64})" ) ds = ds.random_shuffle() assert repr(ds) == ( "RandomShuffle\n" "+- Filter\n" " +- MapBatches()\n" - " +- Datastream(num_blocks=10, num_rows=10, schema={id: int64})" + " +- Dataset(num_blocks=10, num_rows=10, schema={id: int64})" ) ds = ds.materialize() assert ( - repr(ds) - == "MaterializedDatastream(num_blocks=10, num_rows=9, schema={id: int64})" + repr(ds) == "MaterializedDataset(num_blocks=10, num_rows=9, schema={id: int64})" ) ds = ds.map_batches(lambda x: x) assert repr(ds) == ( "MapBatches()\n" - "+- Datastream(num_blocks=10, num_rows=9, schema={id: int64})" + "+- Dataset(num_blocks=10, num_rows=9, schema={id: int64})" ) ds1, ds2 = ds.split(2) assert ( - repr(ds1) == f"MaterializedDatastream(num_blocks=5, num_rows={ds1.count()}, " + repr(ds1) == f"MaterializedDataset(num_blocks=5, num_rows={ds1.count()}, " "schema={id: int64})" ) assert ( - repr(ds2) == f"MaterializedDatastream(num_blocks=5, num_rows={ds2.count()}, " + repr(ds2) == f"MaterializedDataset(num_blocks=5, num_rows={ds2.count()}, " "schema={id: int64})" ) ds3 = ds1.union(ds2) - assert repr(ds3) == "Datastream(num_blocks=10, num_rows=9, schema={id: int64})" + assert repr(ds3) == "Dataset(num_blocks=10, num_rows=9, schema={id: int64})" ds = ds.zip(ds3) assert repr(ds) == ( - "Zip\n" "+- Datastream(num_blocks=10, num_rows=9, schema={id: int64})" + "Zip\n" "+- Dataset(num_blocks=10, num_rows=9, schema={id: int64})" ) def my_dummy_fn(x): @@ -338,7 +336,7 @@ def my_dummy_fn(x): ds = ds.map_batches(my_dummy_fn) assert repr(ds) == ( "MapBatches(my_dummy_fn)\n" - "+- Datastream(num_blocks=10, num_rows=10, schema={id: int64})" + "+- Dataset(num_blocks=10, num_rows=10, schema={id: int64})" ) @@ -1296,11 +1294,8 @@ def test_column_name_type_check(ray_start_regular_shared): df = pd.DataFrame({"1": np.random.rand(10), "a": np.random.rand(10)}) ds = ray.data.from_pandas(df) expected_str = ( - "MaterializedDatastream(\n" - " num_blocks=1,\n" - " num_rows=10,\n" - " schema={1: float64, a: float64}\n" - ")" + "MaterializedDataset(num_blocks=1, num_rows=10, " + "schema={1: float64, a: float64})" ) assert str(ds) == expected_str, str(ds) df = pd.DataFrame({1: np.random.rand(10), "a": np.random.rand(10)}) @@ -1552,7 +1547,7 @@ def test_dataset_retry_exceptions(ray_start_regular, local_path): path1 = os.path.join(local_path, "test1.csv") df1.to_csv(path1, index=False, storage_options={}) ds1 = ray.data.read_datasource(FlakyCSVDatasource(), parallelism=1, paths=path1) - ds1.write_datasource(FlakyCSVDatasource(), path=local_path, datastream_uuid="data") + ds1.write_datasource(FlakyCSVDatasource(), path=local_path, dataset_uuid="data") assert df1.equals( pd.read_csv(os.path.join(local_path, "data_000000.csv"), storage_options={}) ) @@ -1669,8 +1664,8 @@ def test_dataset_schema_after_read_stats(ray_start_cluster): def test_dataset_plan_as_string(ray_start_cluster): ds = ray.data.read_parquet("example://iris.parquet") - assert ds._plan.get_plan_as_string("Datastream") == ( - "Datastream(\n" + assert ds._plan.get_plan_as_string("Dataset") == ( + "Dataset(\n" " num_blocks=1,\n" " num_rows=150,\n" " schema={\n" @@ -1684,13 +1679,13 @@ def test_dataset_plan_as_string(ray_start_cluster): ) for _ in range(5): ds = ds.map_batches(lambda x: x) - assert ds._plan.get_plan_as_string("Datastream") == ( + assert ds._plan.get_plan_as_string("Dataset") == ( "MapBatches()\n" "+- MapBatches()\n" " +- MapBatches()\n" " +- MapBatches()\n" " +- MapBatches()\n" - " +- Datastream(\n" + " +- Dataset(\n" " num_blocks=1,\n" " num_rows=150,\n" " schema={\n" @@ -1721,7 +1716,7 @@ def test_warning_execute_with_no_cpu(ray_start_cluster): cluster = ray_start_cluster cluster.add_node(num_cpus=0) - logger = DatastreamLogger("ray.data._internal.plan").get_logger() + logger = DatasetLogger("ray.data._internal.plan").get_logger() with patch.object( logger, "warning", @@ -1752,7 +1747,7 @@ def test_nowarning_execute_with_cpu(ray_start_cluster): # Create one node with CPUs to avoid triggering the Dataset warning ray.init(ray_start_cluster.address) - logger = DatastreamLogger("ray.data._internal.plan").get_logger() + logger = DatasetLogger("ray.data._internal.plan").get_logger() with patch.object( logger, "warning", diff --git a/python/ray/data/tests/test_execution_optimizer.py b/python/ray/data/tests/test_execution_optimizer.py index 2a140fdbe0cd..c0e575b877e5 100644 --- a/python/ray/data/tests/test_execution_optimizer.py +++ b/python/ray/data/tests/test_execution_optimizer.py @@ -49,7 +49,7 @@ _op_name_white_list, ) from ray.data._internal.planner.planner import Planner -from ray.data._internal.stats import DatastreamStats +from ray.data._internal.stats import DatasetStats from ray.data.aggregate import Count from ray.data.datasource.parquet_datasource import ParquetDatasource @@ -287,7 +287,7 @@ def test_repartition_e2e( ): def _check_repartition_usage_and_stats(ds): _check_usage_record(["ReadRange", "Repartition"]) - ds_stats: DatastreamStats = ds._plan.stats() + ds_stats: DatasetStats = ds._plan.stats() if shuffle: assert ds_stats.base_name == "DoRead->Repartition" assert "DoRead->RepartitionMap" in ds_stats.stages @@ -1261,7 +1261,7 @@ def test_from_huggingface_e2e(ray_start_regular_shared, enable_optimizer): assert isinstance(ray_datasets, dict) for ds_key, ds in ray_datasets.items(): - assert isinstance(ds, ray.data.Datastream) + assert isinstance(ds, ray.data.Dataset) # `ds.take_all()` triggers execution with new backend, which is # needed for checking operator usage below. assert len(ds.take_all()) > 0 @@ -1276,7 +1276,7 @@ def test_from_huggingface_e2e(ray_start_regular_shared, enable_optimizer): _check_usage_record(["FromHuggingFace"]) ray_dataset = ray.data.from_huggingface(data["train"]) - assert isinstance(ray_dataset, ray.data.Datastream) + assert isinstance(ray_dataset, ray.data.Dataset) assert len(ray_dataset.take_all()) > 0 assert "FromArrowRefs" in ray_dataset.stats() assert ray_dataset._plan._logical_plan.dag.name == "FromHuggingFace" @@ -1362,7 +1362,7 @@ def test_blocks_to_input_buffer_op_name( ray_start_regular_shared, enable_streaming_executor, ): - ds: ray.data.Datastream = ray.data.range(10) + ds: ray.data.Dataset = ray.data.range(10) blocks, _, _ = ds._plan._optimize() assert hasattr(blocks, "_tasks"), blocks physical_op = _blocks_to_input_buffer(blocks, owns_blocks=False) diff --git a/python/ray/data/tests/test_huggingface.py b/python/ray/data/tests/test_huggingface.py index 31057ac633b9..3e46c8dc10bc 100644 --- a/python/ray/data/tests/test_huggingface.py +++ b/python/ray/data/tests/test_huggingface.py @@ -17,7 +17,7 @@ def test_huggingface(ray_start_regular_shared): ) ray_dataset = ray.data.from_huggingface(data["train"]) - assert isinstance(ray_dataset, ray.data.Datastream) + assert isinstance(ray_dataset, ray.data.Dataset) assert ray.get(ray_dataset.to_arrow_refs())[0].equals(data["train"].data.table) diff --git a/python/ray/data/tests/test_logger.py b/python/ray/data/tests/test_logger.py index e077159d56fa..c15da873250d 100644 --- a/python/ray/data/tests/test_logger.py +++ b/python/ray/data/tests/test_logger.py @@ -8,18 +8,18 @@ from datetime import datetime import ray -from ray.data._internal.datastream_logger import DatastreamLogger +from ray.data._internal.dataset_logger import DatasetLogger -def test_datastream_logger(shutdown_only): +def test_dataset_logger(shutdown_only): ray.init() log_name, msg = "test_name", "test_message_1234" - logger = DatastreamLogger(log_name) + logger = DatasetLogger(log_name) logger.get_logger().info(msg) # Read from log file, and parse each component of emitted log row session_dir = ray._private.worker._global_node.get_session_dir_path() - log_file_path = os.path.join(session_dir, DatastreamLogger.DEFAULT_DATASET_LOG_PATH) + log_file_path = os.path.join(session_dir, DatasetLogger.DEFAULT_DATASET_LOG_PATH) with open(log_file_path, "r") as f: raw_logged_msg = f.read() ( diff --git a/python/ray/data/tests/test_mongo.py b/python/ray/data/tests/test_mongo.py index 276f6933f3c6..c52e43f4043d 100644 --- a/python/ray/data/tests/test_mongo.py +++ b/python/ray/data/tests/test_mongo.py @@ -78,7 +78,7 @@ def test_read_write_mongo(ray_start_regular_shared, start_mongo): ) assert ds._block_num_rows() == [3, 2] assert str(ds) == ( - "Datastream(\n" + "Dataset(\n" " num_blocks=2,\n" " num_rows=5,\n" " schema={float_field: double, int_field: int32}\n" @@ -96,7 +96,7 @@ def test_read_write_mongo(ray_start_regular_shared, start_mongo): ) assert ds._block_num_rows() == [3, 2] assert str(ds) == ( - "Datastream(\n" + "Dataset(\n" " num_blocks=2,\n" " num_rows=5,\n" " schema={_id: fixed_size_binary[12], float_field: double, " @@ -115,7 +115,7 @@ def test_read_write_mongo(ray_start_regular_shared, start_mongo): ) assert ds._block_num_rows() == [2, 1] assert str(ds) == ( - "Datastream(\n" + "Dataset(\n" " num_blocks=2,\n" " num_rows=3,\n" " schema={_id: fixed_size_binary[12], float_field: double, " @@ -131,7 +131,7 @@ def test_read_write_mongo(ray_start_regular_shared, start_mongo): collection=foo_collection, ) assert str(ds) == ( - "Datastream(\n" + "Dataset(\n" " num_blocks=5,\n" " num_rows=5,\n" " schema={_id: fixed_size_binary[12], float_field: double, " @@ -148,7 +148,7 @@ def test_read_write_mongo(ray_start_regular_shared, start_mongo): parallelism=1000, ) assert str(ds) == ( - "Datastream(\n" + "Dataset(\n" " num_blocks=5,\n" " num_rows=5,\n" " schema={_id: fixed_size_binary[12], float_field: double, " @@ -211,7 +211,7 @@ def test_mongo_datasource(ray_start_regular_shared, start_mongo): ).materialize() assert ds._block_num_rows() == [3, 2] assert str(ds) == ( - "MaterializedDatastream(\n" + "MaterializedDataset(\n" " num_blocks=2,\n" " num_rows=5,\n" " schema={float_field: double, int_field: int32}\n" @@ -230,7 +230,7 @@ def test_mongo_datasource(ray_start_regular_shared, start_mongo): ).materialize() assert ds._block_num_rows() == [3, 2] assert str(ds) == ( - "MaterializedDatastream(\n" + "MaterializedDataset(\n" " num_blocks=2,\n" " num_rows=5,\n" " schema={_id: fixed_size_binary[12], float_field: double, " @@ -247,7 +247,7 @@ def test_mongo_datasource(ray_start_regular_shared, start_mongo): collection=foo_collection, ).materialize() assert str(ds) == ( - "MaterializedDatastream(\n" + "MaterializedDataset(\n" " num_blocks=5,\n" " num_rows=5,\n" " schema={_id: fixed_size_binary[12], float_field: double, " @@ -265,7 +265,7 @@ def test_mongo_datasource(ray_start_regular_shared, start_mongo): collection=foo_collection, ) assert str(ds) == ( - "Datastream(\n" + "Dataset(\n" " num_blocks=5,\n" " num_rows=5,\n" " schema={_id: fixed_size_binary[12], float_field: double, " @@ -285,7 +285,7 @@ def test_mongo_datasource(ray_start_regular_shared, start_mongo): ) assert ds._block_num_rows() == [2, 1] assert str(ds) == ( - "Datastream(\n" + "Dataset(\n" " num_blocks=2,\n" " num_rows=3,\n" " schema={_id: fixed_size_binary[12], float_field: double, " diff --git a/python/ray/data/tests/test_numpy.py b/python/ray/data/tests/test_numpy.py index cbd1c7378b94..4c9565ba5234 100644 --- a/python/ray/data/tests/test_numpy.py +++ b/python/ray/data/tests/test_numpy.py @@ -118,7 +118,7 @@ def test_numpy_roundtrip(ray_start_regular_shared, fs, data_path): ds.write_numpy(data_path, filesystem=fs, column="data") ds = ray.data.read_numpy(data_path, filesystem=fs) assert str(ds) == ( - "Datastream(\n" + "Dataset(\n" " num_blocks=2,\n" " num_rows=?,\n" " schema={data: numpy.ndarray(shape=(1,), dtype=int64)}\n" @@ -135,7 +135,7 @@ def test_numpy_read(ray_start_regular_shared, tmp_path): np.save(os.path.join(path, "test.npy"), np.expand_dims(np.arange(0, 10), 1)) ds = ray.data.read_numpy(path) assert str(ds) == ( - "Datastream(\n" + "Dataset(\n" " num_blocks=1,\n" " num_rows=10,\n" " schema={data: numpy.ndarray(shape=(1,), dtype=int64)}\n" @@ -153,7 +153,7 @@ def test_numpy_read(ray_start_regular_shared, tmp_path): assert ds.num_blocks() == 1 assert ds.count() == 10 assert str(ds) == ( - "Datastream(\n" + "Dataset(\n" " num_blocks=1,\n" " num_rows=10,\n" " schema={data: numpy.ndarray(shape=(1,), dtype=int64)}\n" @@ -191,7 +191,7 @@ def test_numpy_read_meta_provider(ray_start_regular_shared, tmp_path): np.save(path, np.expand_dims(np.arange(0, 10), 1)) ds = ray.data.read_numpy(path, meta_provider=FastFileMetadataProvider()) assert str(ds) == ( - "Datastream(\n" + "Dataset(\n" " num_blocks=1,\n" " num_rows=10,\n" " schema={data: numpy.ndarray(shape=(1,), dtype=int64)}\n" diff --git a/python/ray/data/tests/test_parquet.py b/python/ray/data/tests/test_parquet.py index 5386dd22e32f..fcc0337cc5e9 100644 --- a/python/ray/data/tests/test_parquet.py +++ b/python/ray/data/tests/test_parquet.py @@ -146,11 +146,11 @@ def test_parquet_read_basic(ray_start_regular_shared, fs, data_path): assert "test1.parquet" in str(input_files) assert "test2.parquet" in str(input_files) assert ( - str(ds) == "Datastream(num_blocks=2, num_rows=6, " + str(ds) == "Dataset(num_blocks=2, num_rows=6, " "schema={one: int64, two: string})" ), ds assert ( - repr(ds) == "Datastream(num_blocks=2, num_rows=6, " + repr(ds) == "Dataset(num_blocks=2, num_rows=6, " "schema={one: int64, two: string})" ), ds check_num_computed(ds, 0, 0) @@ -224,11 +224,11 @@ def prefetch_file_metadata(self, pieces): assert "test1.parquet" in str(input_files) assert "test2.parquet" in str(input_files) assert ( - str(ds) == "Datastream(num_blocks=2, num_rows=6, " + str(ds) == "Dataset(num_blocks=2, num_rows=6, " "schema={one: int64, two: string})" ), ds assert ( - repr(ds) == "Datastream(num_blocks=2, num_rows=6, " + repr(ds) == "Dataset(num_blocks=2, num_rows=6, " "schema={one: int64, two: string})" ), ds check_num_computed(ds, 2, 2) @@ -301,11 +301,11 @@ def test_parquet_read_bulk(ray_start_regular_shared, fs, data_path): assert "test1.parquet" in str(input_files) assert "test2.parquet" in str(input_files) assert ( - str(ds) == "Datastream(num_blocks=2, num_rows=6, " + str(ds) == "Dataset(num_blocks=2, num_rows=6, " "schema={one: int64, two: string})" ), ds assert ( - repr(ds) == "Datastream(num_blocks=2, num_rows=6, " + repr(ds) == "Dataset(num_blocks=2, num_rows=6, " "schema={one: int64, two: string})" ), ds check_num_computed(ds, 2, 2) @@ -391,11 +391,11 @@ def test_parquet_read_bulk_meta_provider(ray_start_regular_shared, fs, data_path assert "test1.parquet" in str(input_files) assert "test2.parquet" in str(input_files) assert ( - str(ds) == "Datastream(num_blocks=2, num_rows=6, " + str(ds) == "Dataset(num_blocks=2, num_rows=6, " "schema={one: int64, two: string})" ), ds assert ( - repr(ds) == "Datastream(num_blocks=2, num_rows=6, " + repr(ds) == "Dataset(num_blocks=2, num_rows=6, " "schema={one: int64, two: string})" ), ds check_num_computed(ds, 2, 2) @@ -452,7 +452,7 @@ def test_parquet_read_partitioned(ray_start_regular_shared, fs, data_path): assert len(input_files) == 2, input_files check_num_computed(ds, 0, 0) assert str(ds) == ( - "Datastream(\n" + "Dataset(\n" " num_blocks=2,\n" " num_rows=6,\n" " schema={two: string, " @@ -460,7 +460,7 @@ def test_parquet_read_partitioned(ray_start_regular_shared, fs, data_path): ")" ), ds assert repr(ds) == ( - "Datastream(\n" + "Dataset(\n" " num_blocks=2,\n" " num_rows=6,\n" " schema={two: string, " @@ -550,11 +550,11 @@ def test_parquet_read_partitioned_explicit(ray_start_regular_shared, tmp_path): input_files = ds.input_files() assert len(input_files) == 2, input_files assert ( - str(ds) == "Datastream(num_blocks=2, num_rows=6, " + str(ds) == "Dataset(num_blocks=2, num_rows=6, " "schema={two: string, one: int32})" ), ds assert ( - repr(ds) == "Datastream(num_blocks=2, num_rows=6, " + repr(ds) == "Dataset(num_blocks=2, num_rows=6, " "schema={two: string, one: int32})" ), ds check_num_computed(ds, 0, 0) diff --git a/python/ray/data/tests/test_partitioning.py b/python/ray/data/tests/test_partitioning.py index 984e8e2f9d32..4cc567b38cb8 100644 --- a/python/ray/data/tests/test_partitioning.py +++ b/python/ray/data/tests/test_partitioning.py @@ -11,7 +11,7 @@ import ray from ray.data.block import Block -from ray.data.datastream import Dataset +from ray.data.dataset import Dataset from ray.data.datasource import ( FileBasedDatasource, PathPartitionParser, diff --git a/python/ray/data/tests/test_pipeline.py b/python/ray/data/tests/test_pipeline.py index 424eb787b672..fd72d1ac3b8a 100644 --- a/python/ray/data/tests/test_pipeline.py +++ b/python/ray/data/tests/test_pipeline.py @@ -7,9 +7,9 @@ import numpy as np import ray -from ray.data import datastream +from ray.data import dataset from ray.data.context import DataContext, WARN_PREFIX, OK_PREFIX -from ray.data.datastream import Dataset +from ray.data.dataset import Dataset from ray.data.dataset_pipeline import DatasetPipeline from ray.tests.conftest import * # noqa @@ -43,19 +43,19 @@ def test_warnings(shutdown_only): ray.init(num_cpus=2) # Test parallelism warning. - datastream.logger = MockLogger() + dataset.logger = MockLogger() ray.data.range(10, parallelism=10).window(blocks_per_window=1) - print(datastream.logger.warnings) - print(datastream.logger.infos) - assert datastream.logger.warnings == [ + print(dataset.logger.warnings) + print(dataset.logger.infos) + assert dataset.logger.warnings == [ f"{WARN_PREFIX} This pipeline's parallelism is limited by its blocks per " "window to " "~1 concurrent tasks per window. To maximize " "performance, increase the blocks per window to at least 2. This " - "may require increasing the base datastream's parallelism and/or " + "may require increasing the base dataset's parallelism and/or " "adjusting the windowing parameters." ] - assert datastream.logger.infos == [ + assert dataset.logger.infos == [ "Created DatasetPipeline with 10 windows: 8b min, 8b max, 8b mean", "Blocks per window: 1 min, 1 max, 1 mean", f"{OK_PREFIX} This pipeline's windows likely fit in object store memory " @@ -69,17 +69,17 @@ def test_warnings(shutdown_only): ray.cluster_resources = lambda: res_dict # Test window memory warning. - datastream.logger = MockLogger() + dataset.logger = MockLogger() ray.data.range(100000, parallelism=100).window(blocks_per_window=10) - print(datastream.logger.warnings) - print(datastream.logger.infos) - assert datastream.logger.warnings == [ + print(dataset.logger.warnings) + print(dataset.logger.infos) + assert dataset.logger.warnings == [ f"{WARN_PREFIX} This pipeline's windows are ~0.08MiB in size each and " "may not fit in " "object store memory without spilling. To improve performance, " "consider reducing the size of each window to 250b or less." ] - assert datastream.logger.infos == [ + assert dataset.logger.infos == [ "Created DatasetPipeline with 10 windows: 0.08MiB min, 0.08MiB max, " "0.08MiB mean", "Blocks per window: 10 min, 10 max, 10 mean", @@ -89,22 +89,22 @@ def test_warnings(shutdown_only): ] # Test warning on both. - datastream.logger = MockLogger() + dataset.logger = MockLogger() ray.data.range(100000, parallelism=1).window(bytes_per_window=100000) - print(datastream.logger.warnings) - print(datastream.logger.infos) - assert datastream.logger.warnings == [ + print(dataset.logger.warnings) + print(dataset.logger.infos) + assert dataset.logger.warnings == [ f"{WARN_PREFIX} This pipeline's parallelism is limited by its blocks " "per window " "to ~1 concurrent tasks per window. To maximize performance, increase " "the blocks per window to at least 2. This may require increasing the " - "base datastream's parallelism and/or adjusting the windowing parameters.", + "base dataset's parallelism and/or adjusting the windowing parameters.", f"{WARN_PREFIX} This pipeline's windows are ~0.76MiB in size each and may " "not fit " "in object store memory without spilling. To improve performance, " "consider reducing the size of each window to 250b or less.", ] - assert datastream.logger.infos == [ + assert dataset.logger.infos == [ "Created DatasetPipeline with 1 windows: 0.76MiB min, 0.76MiB max, " "0.76MiB mean", "Blocks per window: 1 min, 1 max, 1 mean", @@ -113,12 +113,12 @@ def test_warnings(shutdown_only): ray.cluster_resources = old # Test no warning. - datastream.logger = MockLogger() + dataset.logger = MockLogger() ray.data.range(10, parallelism=10).window(blocks_per_window=10) - print(datastream.logger.warnings) - print(datastream.logger.infos) - assert datastream.logger.warnings == [] - assert datastream.logger.infos == [ + print(dataset.logger.warnings) + print(dataset.logger.infos) + assert dataset.logger.warnings == [] + assert dataset.logger.infos == [ "Created DatasetPipeline with 1 windows: 80b min, 80b max, 80b mean", "Blocks per window: 10 min, 10 max, 10 mean", f"{OK_PREFIX} This pipeline's per-window parallelism is high enough to fully " @@ -474,19 +474,19 @@ def test_schema_peek(ray_start_regular_shared): # Multiple datasets pipe = ray.data.range(6, parallelism=6).window(blocks_per_window=2) assert pipe.schema().names == ["id"] - assert pipe._first_datastream is not None + assert pipe._first_dataset is not None dss = list(pipe.iter_datasets()) assert len(dss) == 3, dss - assert pipe._first_datastream is None + assert pipe._first_dataset is None assert pipe.schema().names == ["id"] # Only 1 dataset pipe = ray.data.range(1).window(blocks_per_window=2) assert pipe.schema().names == ["id"] - assert pipe._first_datastream is not None + assert pipe._first_dataset is not None dss = list(pipe.iter_datasets()) assert len(dss) == 1, dss - assert pipe._first_datastream is None + assert pipe._first_dataset is None assert pipe.schema().names == ["id"] # Empty datasets @@ -496,10 +496,10 @@ def test_schema_peek(ray_start_regular_shared): .window(blocks_per_window=2) ) assert pipe.schema() is None - assert pipe._first_datastream is not None + assert pipe._first_dataset is not None dss = list(pipe.iter_datasets()) assert len(dss) == 3, dss - assert pipe._first_datastream is None + assert pipe._first_dataset is None assert pipe.schema() is None diff --git a/python/ray/data/tests/test_split.py b/python/ray/data/tests/test_split.py index 0db9a8fba635..b869bf2ddd2b 100644 --- a/python/ray/data/tests/test_split.py +++ b/python/ray/data/tests/test_split.py @@ -15,7 +15,7 @@ _equalize, ) from ray.data._internal.plan import ExecutionPlan -from ray.data._internal.stats import DatastreamStats +from ray.data._internal.stats import DatasetStats from ray.data._internal.split import ( _drop_empty_block_split, _generate_valid_indices, @@ -25,7 +25,7 @@ _split_at_indices, ) from ray.data.block import BlockAccessor -from ray.data.datastream import Dataset +from ray.data.dataset import Dataset from ray.data.tests.conftest import * # noqa from ray.data.tests.util import extract_values from ray.tests.conftest import * # noqa @@ -105,7 +105,7 @@ def _test_equal_split_balanced(block_sizes, num_splits): total_rows += block_size block_list = BlockList(blocks, metadata, owned_by_consumer=True) ds = Dataset( - ExecutionPlan(block_list, DatastreamStats.TODO(), run_by_consumer=True), + ExecutionPlan(block_list, DatasetStats.TODO(), run_by_consumer=True), 0, False, ) diff --git a/python/ray/data/tests/test_stats.py b/python/ray/data/tests/test_stats.py index 5d256cae7c3f..c148a5bd545d 100644 --- a/python/ray/data/tests/test_stats.py +++ b/python/ray/data/tests/test_stats.py @@ -5,8 +5,8 @@ import pytest import ray -from ray.data._internal.stats import _StatsActor, DatastreamStats -from ray.data._internal.datastream_logger import DatastreamLogger +from ray.data._internal.stats import _StatsActor, DatasetStats +from ray.data._internal.dataset_logger import DatasetLogger from ray.data.block import BlockMetadata from ray.data.context import DataContext from ray.data.tests.util import column_udf @@ -40,19 +40,19 @@ def test_dataset_stats_basic(ray_start_regular_shared, enable_auto_log_stats): if context.new_execution_backend: if context.use_streaming_executor: - logger = DatastreamLogger( + logger = DatasetLogger( "ray.data._internal.execution.streaming_executor" ).get_logger( log_to_stdout=enable_auto_log_stats, ) else: - logger = DatastreamLogger( + logger = DatasetLogger( "ray.data._internal.execution.bulk_executor" ).get_logger( log_to_stdout=enable_auto_log_stats, ) else: - logger = DatastreamLogger("ray.data._internal.plan").get_logger( + logger = DatasetLogger("ray.data._internal.plan").get_logger( log_to_stdout=enable_auto_log_stats, ) with patch.object(logger, "info") as mock_logger: @@ -147,7 +147,7 @@ def test_dataset_stats_basic(ray_start_regular_shared, enable_auto_log_stats): * Extra metrics: {'obj_store_mem_alloc': N, 'obj_store_mem_freed': N, \ 'obj_store_mem_peak': N} -Datastream iterator time breakdown: +Dataset iterator time breakdown: * Total time user code is blocked: T * Total time in user code: T * Total time overall: T @@ -183,7 +183,7 @@ def test_dataset_stats_basic(ray_start_regular_shared, enable_auto_log_stats): * Extra metrics: {'obj_store_mem_alloc': N, 'obj_store_mem_freed': N, \ 'obj_store_mem_peak': N} -Datastream iterator time breakdown: +Dataset iterator time breakdown: * In ray.wait(): T * In ray.get(): T * Num blocks local: Z @@ -219,7 +219,7 @@ def test_dataset_stats_basic(ray_start_regular_shared, enable_auto_log_stats): * Extra metrics: {'obj_store_mem_alloc': N, 'obj_store_mem_freed': N, \ 'obj_store_mem_peak': N} -Datastream iterator time breakdown: +Dataset iterator time breakdown: * Total time user code is blocked: T * Total time in user code: T * Total time overall: T @@ -251,7 +251,7 @@ def test_dataset_stats_basic(ray_start_regular_shared, enable_auto_log_stats): * Output size bytes: N min, N max, N mean, N total * Tasks per node: N min, N max, N mean; N nodes used -Datastream iterator time breakdown: +Dataset iterator time breakdown: * In ray.wait(): T * In ray.get(): T * In next_batch(): T @@ -269,8 +269,8 @@ def test_dataset__repr__(ray_start_regular_shared): ds = ds.materialize() assert canonicalize(repr(ds._plan.stats().to_summary())) == ( - "DatastreamStatsSummary(\n" - " datastream_uuid=U,\n" + "DatasetStatsSummary(\n" + " dataset_uuid=U,\n" " base_name=None,\n" " number=N,\n" " extra_metrics={},\n" @@ -306,8 +306,8 @@ def test_dataset__repr__(ray_start_regular_shared): ds2 = ds.map_batches(lambda x: x).materialize() assert len(ds2.take_all()) == n assert canonicalize(repr(ds2._plan.stats().to_summary())) == ( - "DatastreamStatsSummary(\n" - " datastream_uuid=U,\n" + "DatasetStatsSummary(\n" + " dataset_uuid=U,\n" " base_name=MapBatches(),\n" " number=N,\n" " extra_metrics={\n" @@ -341,8 +341,8 @@ def test_dataset__repr__(ray_start_regular_shared): " total_time=T,\n" " ),\n" " parents=[\n" - " DatastreamStatsSummary(\n" - " datastream_uuid=U,\n" + " DatasetStatsSummary(\n" + " dataset_uuid=U,\n" " base_name=None,\n" " number=N,\n" " extra_metrics={},\n" @@ -571,19 +571,19 @@ def test_dataset_pipeline_stats_basic(ray_start_regular_shared, enable_auto_log_ if context.new_execution_backend: if context.use_streaming_executor: - logger = DatastreamLogger( + logger = DatasetLogger( "ray.data._internal.execution.streaming_executor" ).get_logger( log_to_stdout=enable_auto_log_stats, ) else: - logger = DatastreamLogger( + logger = DatasetLogger( "ray.data._internal.execution.bulk_executor" ).get_logger( log_to_stdout=enable_auto_log_stats, ) else: - logger = DatastreamLogger("ray.data._internal.plan").get_logger( + logger = DatasetLogger("ray.data._internal.plan").get_logger( log_to_stdout=enable_auto_log_stats, ) @@ -744,10 +744,10 @@ def test_dataset_pipeline_stats_basic(ray_start_regular_shared, enable_auto_log_ 'obj_store_mem_peak': N} ##### Overall Pipeline Time Breakdown ##### -* Time stalled waiting for next datastream: T min, T max, T mean, T total +* Time stalled waiting for next dataset: T min, T max, T mean, T total DatasetPipeline iterator time breakdown: -* Waiting for next datastream: T +* Waiting for next dataset: T * In ray.wait(): T * In ray.get(): T * In next_batch(): T @@ -799,10 +799,10 @@ def test_dataset_pipeline_stats_basic(ray_start_regular_shared, enable_auto_log_ * Tasks per node: N min, N max, N mean; N nodes used ##### Overall Pipeline Time Breakdown ##### -* Time stalled waiting for next datastream: T min, T max, T mean, T total +* Time stalled waiting for next dataset: T min, T max, T mean, T total DatasetPipeline iterator time breakdown: -* Waiting for next datastream: T +* Waiting for next dataset: T * In ray.wait(): T * In ray.get(): T * In next_batch(): T @@ -875,10 +875,10 @@ def consume(split): 'obj_store_mem_peak': N} ##### Overall Pipeline Time Breakdown ##### -* Time stalled waiting for next datastream: T min, T max, T mean, T total +* Time stalled waiting for next dataset: T min, T max, T mean, T total DatasetPipeline iterator time breakdown: -* Waiting for next datastream: T +* Waiting for next dataset: T * In ray.wait(): T * In ray.get(): T * In next_batch(): T @@ -909,10 +909,10 @@ def consume(split): * Tasks per node: N min, N max, N mean; N nodes used ##### Overall Pipeline Time Breakdown ##### -* Time stalled waiting for next datastream: T min, T max, T mean, T total +* Time stalled waiting for next dataset: T min, T max, T mean, T total DatasetPipeline iterator time breakdown: -* Waiting for next datastream: T +* Waiting for next dataset: T * In ray.wait(): T * In ray.get(): T * In next_batch(): T @@ -928,7 +928,7 @@ def test_calculate_blocks_stats(ray_start_regular_shared, stage_two_block): context.optimize_fuse_stages = True block_params, block_meta_list = stage_two_block - stats = DatastreamStats( + stats = DatasetStats( stages={"Read": block_meta_list}, parent=None, ) @@ -973,11 +973,11 @@ def test_summarize_blocks(ray_start_regular_shared, stage_two_block): context.optimize_fuse_stages = True block_params, block_meta_list = stage_two_block - stats = DatastreamStats( + stats = DatasetStats( stages={"Read": block_meta_list}, parent=None, ) - stats.datastream_uuid = "test-uuid" + stats.dataset_uuid = "test-uuid" calculated_stats = stats.to_summary() summarized_lines = calculated_stats.to_string().split("\n") @@ -1048,14 +1048,14 @@ def test_summarize_blocks(ray_start_regular_shared, stage_two_block): def test_get_total_stats(ray_start_regular_shared, stage_two_block): """Tests a set of similar getter methods which pull aggregated statistics values after calculating stage-level stats: - `DatastreamStats.get_max_wall_time()`, - `DatastreamStats.get_total_cpu_time()`, - `DatastreamStats.get_max_heap_memory()`.""" + `DatasetStats.get_max_wall_time()`, + `DatasetStats.get_total_cpu_time()`, + `DatasetStats.get_max_heap_memory()`.""" context = DataContext.get_current() context.optimize_fuse_stages = True block_params, block_meta_list = stage_two_block - stats = DatastreamStats( + stats = DatasetStats( stages={"Read": block_meta_list}, parent=None, ) @@ -1091,7 +1091,7 @@ def test_streaming_stats_full(ray_start_regular_shared, restore_data_context): * Extra metrics: \ {'obj_store_mem_alloc': N, 'obj_store_mem_freed': N, 'obj_store_mem_peak': N} -Datastream iterator time breakdown: +Dataset iterator time breakdown: * Total time user code is blocked: T * Total time in user code: T * Total time overall: T diff --git a/python/ray/data/tests/test_tensor.py b/python/ray/data/tests/test_tensor.py index 1d3fc7e5bd61..eb31502720ea 100644 --- a/python/ray/data/tests/test_tensor.py +++ b/python/ray/data/tests/test_tensor.py @@ -37,7 +37,7 @@ def test_tensors_basic(ray_start_regular_shared): tensor_shape = (3, 5) ds = ray.data.range_tensor(6, shape=tensor_shape, parallelism=6) assert str(ds) == ( - "Datastream(\n" + "Dataset(\n" " num_blocks=6,\n" " num_rows=6,\n" " schema={data: numpy.ndarray(shape=(3, 5), dtype=int64)}\n" @@ -214,7 +214,7 @@ def test_batch_tensors(ray_start_regular_shared): import torch ds = ray.data.from_items([torch.tensor([0, 0]) for _ in range(40)], parallelism=40) - res = "MaterializedDatastream(num_blocks=40, num_rows=40, schema={item: object})" + res = "MaterializedDataset(num_blocks=40, num_rows=40, schema={item: object})" assert str(ds) == res, str(ds) with pytest.raises(pa.lib.ArrowInvalid): next(ds.iter_batches(batch_format="pyarrow")) @@ -300,7 +300,7 @@ def test_tensors_inferred_from_map(ray_start_regular_shared): ds = ray.data.range(10, parallelism=10).map(lambda _: {"data": np.ones((4, 4))}) ds = ds.materialize() assert str(ds) == ( - "MaterializedDatastream(\n" + "MaterializedDataset(\n" " num_blocks=10,\n" " num_rows=10,\n" " schema={data: numpy.ndarray(shape=(4, 4), dtype=double)}\n" @@ -313,7 +313,7 @@ def test_tensors_inferred_from_map(ray_start_regular_shared): ) ds = ds.materialize() assert str(ds) == ( - "MaterializedDatastream(\n" + "MaterializedDataset(\n" " num_blocks=4,\n" " num_rows=24,\n" " schema={data: numpy.ndarray(shape=(4, 4), dtype=double)}\n" @@ -326,7 +326,7 @@ def test_tensors_inferred_from_map(ray_start_regular_shared): ) ds = ds.materialize() assert str(ds) == ( - "MaterializedDatastream(\n" + "MaterializedDataset(\n" " num_blocks=10,\n" " num_rows=20,\n" " schema={data: numpy.ndarray(shape=(4, 4), dtype=double)}\n" @@ -339,7 +339,7 @@ def test_tensors_inferred_from_map(ray_start_regular_shared): ) ds = ds.materialize() assert str(ds) == ( - "MaterializedDatastream(\n" + "MaterializedDataset(\n" " num_blocks=4,\n" " num_rows=24,\n" " schema={a: numpy.ndarray(shape=(4, 4), dtype=float64)}\n" @@ -352,7 +352,7 @@ def test_tensors_inferred_from_map(ray_start_regular_shared): ) ds = ds.materialize() assert str(ds) == ( - "MaterializedDatastream(\n" + "MaterializedDataset(\n" " num_blocks=4,\n" " num_rows=16,\n" " schema={a: numpy.ndarray(shape=(None, None), dtype=float64)}\n" diff --git a/python/ray/train/_internal/backend_executor.py b/python/ray/train/_internal/backend_executor.py index 62bd77454ecb..423a6babeaa8 100644 --- a/python/ray/train/_internal/backend_executor.py +++ b/python/ray/train/_internal/backend_executor.py @@ -341,7 +341,7 @@ def start_training( Args: train_func: The training function to run on each worker. - dataset_spec: A specification for the Datastream to be + dataset_spec: A specification for the Dataset to be passed to the training workers, and the logic on how to shard the Ray Dataset. checkpoint: The checkpoint data that diff --git a/python/ray/train/_internal/dataset_spec.py b/python/ray/train/_internal/dataset_spec.py index 1eedcb4301c7..6ad4d2bf1239 100644 --- a/python/ray/train/_internal/dataset_spec.py +++ b/python/ray/train/_internal/dataset_spec.py @@ -4,7 +4,7 @@ from ray.actor import ActorHandle from ray.air.config import DatasetConfig -from ray.data import Datastream, DatasetPipeline +from ray.data import Dataset, DatasetPipeline from ray.data.preprocessor import Preprocessor from ray.data.preprocessors import Chain from ray.air._internal.util import _estimate_avail_object_store_memory @@ -12,14 +12,14 @@ if TYPE_CHECKING: from ray.data import DataIterator -RayDataset = Union["Datastream", "DatasetPipeline"] +RayDataset = Union["Dataset", "DatasetPipeline"] @dataclass class RayDatasetSpec: - """Configuration for Datastreams to pass to the training workers. + """Configuration for Datasets to pass to the training workers. - dataset_or_dict: An optional Datastream (or DatasetPipeline) or a dictionary of + dataset_or_dict: An optional Dataset (or DatasetPipeline) or a dictionary of datasets to be sharded across all the training workers, which can be accessed from the training function via ``session.get_dataset_shard()``. Multiple Datasets can be passed in as a dictionary that maps each name key to a @@ -32,7 +32,7 @@ class RayDatasetSpec: training workers (to use as locality hints). The Callable is expected to return a list of RayDatasets or a list of dictionaries of RayDatasets, with the length of the list equal to the length of the list of actor handles. - If None is provided, the provided Datastream(s) will be equally split. + If None is provided, the provided Dataset(s) will be equally split. """ @@ -91,7 +91,7 @@ def get_dataset_shards( ) if not len(splits) == len(training_worker_handles): raise RuntimeError( - "The list of Datastreams returned by the " + "The list of Datasets returned by the " f"`dataset_split_fn`: {len(splits)} does not match " f"the number of training workers: {len(training_worker_handles)}" ) @@ -109,14 +109,14 @@ def __init__(self, dataset_config: Dict[str, DatasetConfig]): with all defaults filled in. """ self.dataset_config = dataset_config - self.preprocessed_datasets: Optional[Dict[str, "Datastream"]] = None + self.preprocessed_datasets: Optional[Dict[str, "Dataset"]] = None self.preprocessor: Optional["Preprocessor"] = None def preprocess_datasets( self, prep: "Preprocessor", - datasets: Dict[str, "Datastream"], - ) -> Dict[str, "Datastream"]: + datasets: Dict[str, "Dataset"], + ) -> Dict[str, "Dataset"]: """Preprocess the given datasets. This will be called prior to `get_dataset_shards()`. @@ -215,7 +215,7 @@ def get_dataset_shards( dataset = dataset.randomize_block_order_each_window() elif config.per_epoch_preprocessor is not None: # Reapply the per epoch preprocessor on each epoch. - if isinstance(dataset, Datastream): + if isinstance(dataset, Dataset): dataset = dataset.repeat() dataset = config.per_epoch_preprocessor._transform_pipeline(dataset) @@ -223,7 +223,7 @@ def get_dataset_shards( # If global shuffle is requested, then we should try to overlap # this with other computation, so convert to a DatasetPipeline # if not already being used. - if isinstance(dataset, Datastream): + if isinstance(dataset, Dataset): dataset = dataset.repeat() dataset = dataset.random_shuffle_each_window() diff --git a/python/ray/train/_internal/session.py b/python/ray/train/_internal/session.py index d1f1c003e0ec..369261901f46 100644 --- a/python/ray/train/_internal/session.py +++ b/python/ray/train/_internal/session.py @@ -15,7 +15,7 @@ from ray.air._internal.util import StartTraceback, RunnerThread from ray.air.checkpoint import Checkpoint from ray.air.constants import _RESULT_FETCH_TIMEOUT, _ERROR_FETCH_TIMEOUT -from ray.data import Datastream, DatasetPipeline +from ray.data import Dataset, DatasetPipeline from ray.train._internal.accelerator import Accelerator from ray.train.constants import ( DETAILED_AUTOFILLED_KEYS, @@ -73,7 +73,7 @@ def __init__( world_size: int, # TODO(xwjiang): Legacy Ray Train trainer clean up! trial_info: Optional[TrialInfo] = None, - dataset_shard: Optional[Union[Datastream, DatasetPipeline]] = None, + dataset_shard: Optional[Union[Dataset, DatasetPipeline]] = None, # TODO(xwjiang): Legacy Ray Train trainer clean up! checkpoint: Optional[Checkpoint] = None, # Deprecated diff --git a/python/ray/train/base_trainer.py b/python/ray/train/base_trainer.py index 7f40d0a1ac01..3f554ddb684a 100644 --- a/python/ray/train/base_trainer.py +++ b/python/ray/train/base_trainer.py @@ -27,16 +27,16 @@ from ray._private.dict import merge_dicts if TYPE_CHECKING: - from ray.data import Datastream + from ray.data import Dataset from ray.data.preprocessor import Preprocessor from ray.tune import Trainable _TRAINER_PKL = "trainer.pkl" -# A type representing either a ray.data.Datastream or a function that returns a -# ray.data.Datastream and accepts no arguments. -GenDataset = Union["Datastream", Callable[[], "Datastream"]] +# A type representing either a ray.data.Dataset or a function that returns a +# ray.data.Dataset and accepts no arguments. +GenDataset = Union["Dataset", Callable[[], "Dataset"]] logger = logging.getLogger(__name__) @@ -83,7 +83,7 @@ class BaseTrainer(abc.ABC): - ``trainer.setup()``: Any heavyweight Trainer setup should be specified here. - ``trainer.preprocess_datasets()``: The provided - ray.data.Datastream are preprocessed with the provided + ray.data.Dataset are preprocessed with the provided ray.data.Preprocessor. - ``trainer.train_loop()``: Executes the main training logic. - Calling ``trainer.fit()`` will return a ``ray.result.Result`` @@ -157,7 +157,7 @@ def training_loop(self): Args: scaling_config: Configuration for how to scale training. run_config: Configuration for the execution of the training run. - datasets: Any Datastreams to use for training. Use the key "train" + datasets: Any Datasets to use for training. Use the key "train" to denote which dataset is the training dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed @@ -407,7 +407,7 @@ def _validate_attributes(self): if not isinstance(self.datasets, dict): raise ValueError( f"`datasets` should be a dict mapping from a string to " - f"`ray.data.Datastream` objects, " + f"`ray.data.Dataset` objects, " f"found {type(self.datasets)} with value `{self.datasets}`." ) else: @@ -415,18 +415,18 @@ def _validate_attributes(self): if isinstance(dataset, ray.data.DatasetPipeline): raise ValueError( f"The Dataset under '{key}' key is a " - f"`ray.data.DatasetPipeline`. Only `ray.data.Datastream` are " + f"`ray.data.DatasetPipeline`. Only `ray.data.Dataset` are " f"allowed to be passed in. Pipelined/streaming ingest can be " f"configured via the `dataset_config` arg. See " "https://docs.ray.io/en/latest/ray-air/check-ingest.html#enabling-streaming-ingest" # noqa: E501 "for an example." ) - elif not isinstance(dataset, ray.data.Datastream) and not callable( + elif not isinstance(dataset, ray.data.Dataset) and not callable( dataset ): raise ValueError( - f"The Datastream under '{key}' key is not a " - "`ray.data.Datastream`. " + f"The Dataset under '{key}' key is not a " + "`ray.data.Dataset`. " f"Received {dataset} instead." ) @@ -626,7 +626,7 @@ def _save(self, experiment_path: Union[str, Path]): of parameters can be passed in again), that parameter will be loaded from the saved copy. - Datastreams should not be saved as part of the state. Instead, we save the + Datasets should not be saved as part of the state. Instead, we save the keys and replace the dataset values with dummy functions that will raise an error if invoked. The error only serves as a guardrail for misuse (e.g., manually unpickling and constructing the Trainer again) diff --git a/python/ray/train/batch_predictor.py b/python/ray/train/batch_predictor.py index 694103b906bf..576d14699e5f 100644 --- a/python/ray/train/batch_predictor.py +++ b/python/ray/train/batch_predictor.py @@ -8,7 +8,7 @@ from ray.air import Checkpoint from ray.air.data_batch_type import DataBatchType from ray.air.util.data_batch_conversion import BatchFormat -from ray.data import Datastream, DatasetPipeline, Preprocessor +from ray.data import Dataset, DatasetPipeline, Preprocessor from ray.data.context import DataContext from ray.train.predictor import Predictor from ray.util.annotations import PublicAPI @@ -21,7 +21,7 @@ class BatchPredictor: """Batch predictor class. Takes a predictor class and a checkpoint and provides an interface to run - batch scoring on Datastreams. + batch scoring on Datasets. This batch predictor wraps around a predictor class and executes it in a distributed way when calling ``predict()``. @@ -111,7 +111,7 @@ def set_preprocessor(self, preprocessor: Preprocessor) -> None: def predict( self, - data: Union[ray.data.Datastream, ray.data.DatasetPipeline], + data: Union[ray.data.Dataset, ray.data.DatasetPipeline], *, feature_columns: Optional[List[str]] = None, keep_columns: Optional[List[str]] = None, @@ -123,14 +123,14 @@ def predict( separate_gpu_stage: bool = True, ray_remote_args: Optional[Dict[str, Any]] = None, **predict_kwargs, - ) -> Union[ray.data.Datastream, ray.data.DatasetPipeline]: - """Run batch scoring on a Datastream. + ) -> Union[ray.data.Dataset, ray.data.DatasetPipeline]: + """Run batch scoring on a Dataset. .. note:: - In Ray 2.4, `BatchPredictor` is lazy by default. Use one of the Datastream consumption APIs, such as iterating through the output, to trigger the execution of prediction. + In Ray 2.4, `BatchPredictor` is lazy by default. Use one of the Dataset consumption APIs, such as iterating through the output, to trigger the execution of prediction. Args: - data: Datastream or pipeline to run batch prediction on. + data: Dataset or pipeline to run batch prediction on. feature_columns: List of columns in the preprocessed dataset to use for prediction. Columns not specified will be dropped from `data` before being passed to the predictor. @@ -157,7 +157,7 @@ def predict( ``predict()`` method. Returns: - Datastream containing scoring results. + Dataset containing scoring results. Examples: @@ -190,7 +190,7 @@ def calculate_accuracy(df): .. testoutput:: MapBatches(ScoringWrapper) - +- Datastream(num_blocks=1, num_rows=3, schema={feature_1: int64, label: int64}) + +- Dataset(num_blocks=1, num_rows=3, schema={feature_1: int64, label: int64}) Final accuracy: 1.0 """ # noqa: E501 if num_gpus_per_worker is None: @@ -297,7 +297,7 @@ def _keep_columns_from_input_batch( return prediction_output_batch def __call__(self, input_batch: DataBatchType) -> DataBatchType: - # TODO: Delegate separate_gpu_stage flag to Datastream. + # TODO: Delegate separate_gpu_stage flag to Dataset. if self.override_prep: # Apply preprocessing before selecting feature columns. input_batch = self.override_prep.transform_batch(input_batch) @@ -330,18 +330,18 @@ def __call__(self, input_batch: DataBatchType) -> DataBatchType: preprocessor = self.get_preprocessor() override_prep = None if preprocessor: - # TODO: Delegate separate_gpu_stage flag to Datastream. + # TODO: Delegate separate_gpu_stage flag to Dataset. if not separate_gpu_stage and num_gpus_per_worker > 0: override_prep = preprocessor else: # In batch prediction, preprocessing is always done in a separate stage. # We should not in-line it with prediction, unless separate_gpu_stage is # False. - # Datastream optimizer will fuse preprocessing+prediction stage as + # Dataset optimizer will fuse preprocessing+prediction stage as # necessary. - if isinstance(data, Datastream): - # Datastream is lazy by default so this transform + if isinstance(data, Dataset): + # Dataset is lazy by default so this transform # will not trigger execution. data = preprocessor.transform(data) elif isinstance(data, DatasetPipeline): @@ -362,7 +362,7 @@ def __call__(self, input_batch: DataBatchType) -> DataBatchType: def predict_pipelined( self, - data: ray.data.Datastream, + data: ray.data.Dataset, *, blocks_per_window: Optional[int] = None, bytes_per_window: Optional[int] = None, @@ -383,11 +383,11 @@ def predict_pipelined( Unlike `predict()`, this generates a DatasetPipeline object and does not perform execution. Execution can be triggered by pulling from the pipeline. - This is a convenience wrapper around calling `.window()` on the Datastream prior + This is a convenience wrapper around calling `.window()` on the Dataset prior to passing it `BatchPredictor.predict()`. Args: - data: Datastream to run batch prediction on. + data: Dataset to run batch prediction on. blocks_per_window: The window size (parallelism) in blocks. Increasing window size increases pipeline throughput, but also increases the latency to initial output, since it decreases the @@ -468,7 +468,7 @@ def predict_pipelined( ) def _determine_preprocessor_batch_format( - self, ds: Union[ray.data.Datastream, ray.data.DatasetPipeline] + self, ds: Union[ray.data.Dataset, ray.data.DatasetPipeline] ) -> BatchFormat: """Determine batch format we use for the first preprocessor. @@ -477,7 +477,7 @@ def _determine_preprocessor_batch_format( transform type to avoid unnecessary data conversion. Args: - ds (Union[ray.data.Datastream, ray.data.DatasetPipeline]): Input + ds (Union[ray.data.Dataset, ray.data.DatasetPipeline]): Input dataset or dataset pipeline. Returns: diff --git a/python/ray/train/data_parallel_trainer.py b/python/ray/train/data_parallel_trainer.py index b08b8f5600ee..eba45c82c505 100644 --- a/python/ray/train/data_parallel_trainer.py +++ b/python/ray/train/data_parallel_trainer.py @@ -99,7 +99,7 @@ def train_loop_per_worker(): # Returns dict of last saved checkpoint. session.get_checkpoint() - # Returns the Datastream shard for the given key. + # Returns the Dataset shard for the given key. session.get_dataset_shard("my_dataset") # Returns the total number of workers executing training. @@ -210,7 +210,7 @@ def __init__(self, train_loop_per_worker, my_backend_config: dataset_config: Configuration for dataset ingest. This is merged with the default dataset config for the given trainer (`cls._dataset_config`). run_config: Configuration for the execution of the training run. - datasets: Any Datastreams to use for training. Use + datasets: Any Datasets to use for training. Use the key "train" to denote which dataset is the training dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed diff --git a/python/ray/train/examples/pytorch/torch_regression_example.py b/python/ray/train/examples/pytorch/torch_regression_example.py index 94d628087269..663f4af5a621 100644 --- a/python/ray/train/examples/pytorch/torch_regression_example.py +++ b/python/ray/train/examples/pytorch/torch_regression_example.py @@ -12,13 +12,13 @@ import ray.train as train from ray.air import session from ray.air.result import Result -from ray.data import Datastream +from ray.data import Dataset from ray.train.batch_predictor import BatchPredictor from ray.train.torch import TorchPredictor, TorchTrainer from ray.air.config import ScalingConfig -def get_datasets(split: float = 0.7) -> Tuple[Datastream]: +def get_datasets(split: float = 0.7) -> Tuple[Dataset]: dataset = ray.data.read_csv("s3://anonymous@air-example-data/regression.csv") def combine_x(batch): diff --git a/python/ray/train/examples/tf/tensorflow_autoencoder_example.py b/python/ray/train/examples/tf/tensorflow_autoencoder_example.py index 80579d169585..c0a91307af6f 100644 --- a/python/ray/train/examples/tf/tensorflow_autoencoder_example.py +++ b/python/ray/train/examples/tf/tensorflow_autoencoder_example.py @@ -137,7 +137,7 @@ def train_tensorflow_mnist( return results -def predict_tensorflow_mnist(result: Result) -> ray.data.Datastream: +def predict_tensorflow_mnist(result: Result) -> ray.data.Dataset: test_dataset = get_dataset(split_type="test") batch_predictor = BatchPredictor.from_checkpoint( result.checkpoint, TensorflowPredictor, model_definition=build_autoencoder_model diff --git a/python/ray/train/examples/tf/tensorflow_regression_example.py b/python/ray/train/examples/tf/tensorflow_regression_example.py index b7ecf7da0dad..5b130a9947b4 100644 --- a/python/ray/train/examples/tf/tensorflow_regression_example.py +++ b/python/ray/train/examples/tf/tensorflow_regression_example.py @@ -8,7 +8,7 @@ from ray.air import session from ray.air.integrations.keras import ReportCheckpointCallback from ray.air.result import Result -from ray.data import Datastream +from ray.data import Dataset from ray.data.preprocessors import Concatenator from ray.train.batch_predictor import BatchPredictor from ray.train.tensorflow import ( @@ -75,7 +75,7 @@ def train_tensorflow_regression(num_workers: int = 2, use_gpu: bool = False) -> return results -def predict_regression(result: Result) -> Datastream: +def predict_regression(result: Result) -> Dataset: batch_predictor = BatchPredictor.from_checkpoint( result.checkpoint, TensorflowPredictor, model_definition=build_model ) diff --git a/python/ray/train/gbdt_trainer.py b/python/ray/train/gbdt_trainer.py index 2e56488d13ca..30d37230f236 100644 --- a/python/ray/train/gbdt_trainer.py +++ b/python/ray/train/gbdt_trainer.py @@ -108,7 +108,7 @@ class GBDTTrainer(BaseTrainer): Inherited by XGBoostTrainer and LightGBMTrainer. Args: - datasets: Datastreams to use for training and validation. Must include a + datasets: Datasets to use for training and validation. Must include a "train" key denoting the training dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed by the ``preprocessor`` if @@ -170,7 +170,7 @@ def __init__( resume_from_checkpoint=resume_from_checkpoint, ) - # Datastreams should always use distributed loading. + # Datasets should always use distributed loading. for dataset_name in self.datasets.keys(): dataset_params = self.dmatrix_params.get(dataset_name, {}) dataset_params["distributed"] = True diff --git a/python/ray/train/hf_accelerate/accelerate_trainer.py b/python/ray/train/hf_accelerate/accelerate_trainer.py index f8ddf1b9ffdd..489788fb9b84 100644 --- a/python/ray/train/hf_accelerate/accelerate_trainer.py +++ b/python/ray/train/hf_accelerate/accelerate_trainer.py @@ -68,7 +68,7 @@ def train_loop_per_worker(): # Get dict of last saved checkpoint. session.get_checkpoint() - # Session returns the Datastream shard for the given key. + # Session returns the Dataset shard for the given key. session.get_dataset_shard("my_dataset") # Get the total number of workers executing training. @@ -249,7 +249,7 @@ def train_loop_per_worker(): scaling_config: Configuration for how to scale data parallel training. dataset_config: Configuration for dataset ingest. run_config: Configuration for the execution of the training run. - datasets: Any Datastreams to use for training. Use + datasets: Any Datasets to use for training. Use the key "train" to denote which dataset is the training dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed diff --git a/python/ray/train/hf_transformers/_transformers_utils.py b/python/ray/train/hf_transformers/_transformers_utils.py index 289037c04a47..d33f69c1a20c 100644 --- a/python/ray/train/hf_transformers/_transformers_utils.py +++ b/python/ray/train/hf_transformers/_transformers_utils.py @@ -61,9 +61,9 @@ def get_train_dataloader(self): return trainer -# TODO(ml-team): Replace with a Datastreams-HuggingFace integration when available. +# TODO(ml-team): Replace with a Datasets-HuggingFace integration when available. class RayDatasetHFIterable(datasets.iterable_dataset.ExamplesIterable): - """HF ExamplesIterable backed by a Datastream.""" + """HF ExamplesIterable backed by a Dataset.""" def __init__(self, dataset: DataIterator) -> None: self.dataset = dataset @@ -88,7 +88,7 @@ def process_dataset_for_hf( ).with_format("torch") try: - dataset_length = dataset._base_datastream.count() + dataset_length = dataset._base_dataset.count() except (ValueError, AttributeError): # pipeline case dataset_length = None diff --git a/python/ray/train/horovod/horovod_trainer.py b/python/ray/train/horovod/horovod_trainer.py index ff74ee1648b9..e6601c5fd8a3 100644 --- a/python/ray/train/horovod/horovod_trainer.py +++ b/python/ray/train/horovod/horovod_trainer.py @@ -57,7 +57,7 @@ def train_loop_per_worker(): # Returns dict of last saved checkpoint. session.get_checkpoint() - # Returns the Datastream shard for the given key. + # Returns the Dataset shard for the given key. session.get_dataset_shard("my_dataset") # Returns the total number of workers executing training. @@ -162,7 +162,7 @@ def train_loop_per_worker(): scaling_config: Configuration for how to scale data parallel training. dataset_config: Configuration for dataset ingest. run_config: Configuration for the execution of the training run. - datasets: Any Datastreams to use for training. Use + datasets: Any Datasets to use for training. Use the key "train" to denote which dataset is the training dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed diff --git a/python/ray/train/lightgbm/lightgbm_trainer.py b/python/ray/train/lightgbm/lightgbm_trainer.py index 0a738c4696ef..15d600382d5b 100644 --- a/python/ray/train/lightgbm/lightgbm_trainer.py +++ b/python/ray/train/lightgbm/lightgbm_trainer.py @@ -50,7 +50,7 @@ class LightGBMTrainer(GBDTTrainer): result = trainer.fit() Args: - datasets: Datastreams to use for training and validation. Must include a + datasets: Datasets to use for training and validation. Must include a "train" key denoting the training dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed by the ``preprocessor`` if diff --git a/python/ray/train/lightning/_lightning_utils.py b/python/ray/train/lightning/_lightning_utils.py index 1495fdcf21dc..ddd8629bc28e 100644 --- a/python/ray/train/lightning/_lightning_utils.py +++ b/python/ray/train/lightning/_lightning_utils.py @@ -1,7 +1,7 @@ import ray from ray.air import session from ray.air.constants import MODEL_KEY -from ray.data.datastream import DataIterator +from ray.data.dataset import DataIterator from ray.train.lightning.lightning_checkpoint import LightningCheckpoint import logging diff --git a/python/ray/train/lightning/lightning_trainer.py b/python/ray/train/lightning/lightning_trainer.py index 14aea9d320da..5518dcaebe12 100644 --- a/python/ray/train/lightning/lightning_trainer.py +++ b/python/ray/train/lightning/lightning_trainer.py @@ -226,7 +226,7 @@ class LightningTrainer(TorchTrainer): ``pytorch_lightning.LightningModule`` using the arguments provided in ``LightningConfigBuilder.module()``. - For data ingestion, the LightningTrainer will then either convert the Datastream + For data ingestion, the LightningTrainer will then either convert the Dataset shards to a ``pytorch_lightning.LightningDataModule``, or directly use the datamodule or dataloaders if provided by users. @@ -348,19 +348,19 @@ def configure_optimizers(self): scaling_config: Configuration for how to scale data parallel training. dataset_config: Configuration for dataset ingest. run_config: Configuration for the execution of the training run. - datasets: A dictionary of Datastreams to use for training. + datasets: A dictionary of Datasets to use for training. Use the key "train" to denote which dataset is the training dataset and (optionally) key "val" to denote the validation dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed by the ``preprocessor`` if one is provided. - datasets_iter_config: Configurations for iterating over input Datastreams. + datasets_iter_config: Configurations for iterating over input Datasets. This configuration is only valid when `datasets` argument is provided to the LightningTrainer. Otherwise, LightningTrainer will use datamodule or dataloaders specified in ``LightningConfig.trainer_init_config``. For valid arguments to pass, please refer to: :py:meth:`Dataset.iter_torch_batches - ` + ` preprocessor: A ray.data.Preprocessor to preprocess the provided datasets. resume_from_checkpoint: A checkpoint to resume training from. @@ -489,13 +489,13 @@ def _lightning_train_loop_per_worker(config): if not (train_dataloaders or datamodule or train_ray_dataset): raise RuntimeError( "Please provide at least one of the following data inputs: " - "train_dataloaders, datamodule, or Datastreams with key 'train'." + "train_dataloaders, datamodule, or Datasets with key 'train'." ) if train_ray_dataset: if datamodule: logger.warning( - "Using Datastreams as primary input. The 'datamodule' defined in " + "Using Datasets as primary input. The 'datamodule' defined in " "'LightningConfig.trainer_fit_params' is ignored!" ) diff --git a/python/ray/train/mosaic/mosaic_trainer.py b/python/ray/train/mosaic/mosaic_trainer.py index 12e533ff1ccb..d9c0ce5a6eb2 100644 --- a/python/ray/train/mosaic/mosaic_trainer.py +++ b/python/ray/train/mosaic/mosaic_trainer.py @@ -108,7 +108,7 @@ class MosaicTrainer(TorchTrainer): ``composer.Trainer`` object and takes in configuration dictionary (``config``) as an argument. This dictionary is based on ``trainer_init_config`` and is modified for Ray - Composer integration. - datasets: Any Datastreams to use for training. At the moment, we do not support + datasets: Any Datasets to use for training. At the moment, we do not support passing datasets to the trainer and using the dataset shards in the trainer loop. Instead, configure and load the datasets inside ``trainer_init_per_worker`` function diff --git a/python/ray/train/rl/rl_trainer.py b/python/ray/train/rl/rl_trainer.py index bbf406a7b285..71b41779d5af 100644 --- a/python/ray/train/rl/rl_trainer.py +++ b/python/ray/train/rl/rl_trainer.py @@ -37,7 +37,7 @@ class RLTrainer(BaseTrainer): (e.g. ``"PPO"``) or a RLlib trainer class. scaling_config: Configuration for how to scale training. run_config: Configuration for the execution of the training run. - datasets: Any Datastreams to use for training. Use the key "train" + datasets: Any Datasets to use for training. Use the key "train" to denote which dataset is the training dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed diff --git a/python/ray/train/session.py b/python/ray/train/session.py index 70801dbe653d..7b53116e6254 100644 --- a/python/ray/train/session.py +++ b/python/ray/train/session.py @@ -81,7 +81,7 @@ def get_dataset_shard( if shard is None: warnings.warn( "No dataset passed in. Returning None. Make sure to " - "pass in a Datastream to Trainer.run to use this " + "pass in a Dataset to Trainer.run to use this " "function." ) elif isinstance(shard, dict): diff --git a/python/ray/train/sklearn/sklearn_trainer.py b/python/ray/train/sklearn/sklearn_trainer.py index d570c80feb69..60dbb9c85ac1 100644 --- a/python/ray/train/sklearn/sklearn_trainer.py +++ b/python/ray/train/sklearn/sklearn_trainer.py @@ -84,7 +84,7 @@ class SklearnTrainer(BaseTrainer): Args: estimator: A scikit-learn compatible estimator to use. - datasets: Datastreams to use for training and validation. Must include a + datasets: Datasets to use for training and validation. Must include a "train" key denoting the training dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed by the ``preprocessor`` if diff --git a/python/ray/train/tensorflow/tensorflow_trainer.py b/python/ray/train/tensorflow/tensorflow_trainer.py index 045b6f2e6100..28ea7f8a2c74 100644 --- a/python/ray/train/tensorflow/tensorflow_trainer.py +++ b/python/ray/train/tensorflow/tensorflow_trainer.py @@ -65,7 +65,7 @@ def train_loop_per_worker(): # Returns dict of last saved checkpoint. session.get_checkpoint() - # Returns the Datastream shard for the given key. + # Returns the Dataset shard for the given key. session.get_dataset_shard("my_dataset") # Returns the total number of workers executing training. @@ -154,7 +154,7 @@ def train_loop_per_worker(config): scaling_config: Configuration for how to scale data parallel training. dataset_config: Configuration for dataset ingest. run_config: Configuration for the execution of the training run. - datasets: Any Datastreams to use for training. Use + datasets: Any Datasets to use for training. Use the key "train" to denote which dataset is the training dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed diff --git a/python/ray/train/tensorflow/train_loop_utils.py b/python/ray/train/tensorflow/train_loop_utils.py index f2657c75ccc4..d0a74a35e6de 100644 --- a/python/ray/train/tensorflow/train_loop_utils.py +++ b/python/ray/train/tensorflow/train_loop_utils.py @@ -8,7 +8,7 @@ def prepare_dataset_shard(tf_dataset_shard: tf.data.Dataset): """A utility function that overrides default config for Tensorflow Dataset. This should be used on a TensorFlow ``Dataset`` created by calling - ``iter_tf_batches()`` on a ``ray.data.Datastream`` returned by + ``iter_tf_batches()`` on a ``ray.data.Dataset`` returned by ``ray.air.session.get_dataset_shard()`` since the dataset has already been sharded across the workers. diff --git a/python/ray/train/tests/test_base_trainer.py b/python/ray/train/tests/test_base_trainer.py index 59ce56e73f3b..505b3bef039a 100644 --- a/python/ray/train/tests/test_base_trainer.py +++ b/python/ray/train/tests/test_base_trainer.py @@ -118,7 +118,7 @@ def test_validate_datasets(ray_start_4_cpus): with pytest.raises(ValueError) as e: DummyTrainer(train_loop=None, datasets={"train": 1}) - assert "The Dataset under train key is not a `ray.data.Datastream`" + assert "The Dataset under train key is not a `ray.data.Dataset`" with pytest.raises(ValueError) as e: DummyTrainer( diff --git a/python/ray/train/tests/test_xgboost_trainer.py b/python/ray/train/tests/test_xgboost_trainer.py index b1eedbe1dcaf..6812a9c7ef67 100644 --- a/python/ray/train/tests/test_xgboost_trainer.py +++ b/python/ray/train/tests/test_xgboost_trainer.py @@ -246,7 +246,7 @@ def test_validation(ray_start_4_cpus): def test_distributed_data_loading(ray_start_4_cpus): - """Checks that XGBoostTrainer does distributed data loading for Datastreams.""" + """Checks that XGBoostTrainer does distributed data loading for Datasets.""" class DummyXGBoostTrainer(XGBoostTrainer): def _train(self, params, dtrain, **kwargs): diff --git a/python/ray/train/torch/torch_trainer.py b/python/ray/train/torch/torch_trainer.py index 5b0951799fe7..e40eef526bba 100644 --- a/python/ray/train/torch/torch_trainer.py +++ b/python/ray/train/torch/torch_trainer.py @@ -56,7 +56,7 @@ def train_loop_per_worker(): # Get dict of last saved checkpoint. session.get_checkpoint() - # Session returns the Datastream shard for the given key. + # Session returns the Dataset shard for the given key. session.get_dataset_shard("my_dataset") # Get the total number of workers executing training. @@ -247,7 +247,7 @@ def train_loop_per_worker(): scaling_config: Configuration for how to scale data parallel training. dataset_config: Configuration for dataset ingest. run_config: Configuration for the execution of the training run. - datasets: Any Datastreams to use for training. Use + datasets: Any Datasets to use for training. Use the key "train" to denote which dataset is the training dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed diff --git a/python/ray/train/xgboost/xgboost_trainer.py b/python/ray/train/xgboost/xgboost_trainer.py index e6f1156b7624..ea4262deb3a0 100644 --- a/python/ray/train/xgboost/xgboost_trainer.py +++ b/python/ray/train/xgboost/xgboost_trainer.py @@ -46,7 +46,7 @@ class XGBoostTrainer(GBDTTrainer): result = trainer.fit() Args: - datasets: Datastreams to use for training and validation. Must include a + datasets: Datasets to use for training and validation. Must include a "train" key denoting the training dataset. If a ``preprocessor`` is provided and has not already been fit, it will be fit on the training dataset. All datasets will be transformed by the ``preprocessor`` if diff --git a/python/ray/tune/execution/experiment_state.py b/python/ray/tune/execution/experiment_state.py index fa719d3f7463..f03297a32051 100644 --- a/python/ray/tune/execution/experiment_state.py +++ b/python/ray/tune/execution/experiment_state.py @@ -223,7 +223,7 @@ def checkpoint( # Checkpoint checkpoint_time_start = time.monotonic() - # NOTE: This context manager is for Datastreams captured in a trial config. + # NOTE: This context manager is for Datasets captured in a trial config. # This is the case when *tuning over datasets*. # If the datasets have already been full executed, then serializing # block refs means that this checkpoint is not usable in a new Ray cluster. diff --git a/python/ray/tune/impl/out_of_band_serialize_dataset.py b/python/ray/tune/impl/out_of_band_serialize_dataset.py index 25749f3b4aa6..112cee4d8032 100644 --- a/python/ray/tune/impl/out_of_band_serialize_dataset.py +++ b/python/ray/tune/impl/out_of_band_serialize_dataset.py @@ -5,11 +5,11 @@ def _deserialize_and_fully_execute_if_needed(serialized_ds: bytes): - ds = ray.data.Datastream.deserialize_lineage(serialized_ds) + ds = ray.data.Dataset.deserialize_lineage(serialized_ds) return ds -def _reduce(ds: ray.data.Datastream): +def _reduce(ds: ray.data.Dataset): tb_list = traceback.format_list(traceback.extract_stack()) _already_in_out_of_band_serialization = False for tb in tb_list: @@ -27,7 +27,7 @@ def _reduce(ds: ray.data.Datastream): def out_of_band_serialize_dataset(): context = ray._private.worker.global_worker.get_serialization_context() try: - context._register_cloudpickle_reducer(ray.data.Datastream, _reduce) + context._register_cloudpickle_reducer(ray.data.Dataset, _reduce) yield finally: - context._unregister_cloudpickle_reducer(ray.data.Datastream) + context._unregister_cloudpickle_reducer(ray.data.Dataset) diff --git a/python/ray/tune/tests/test_trial_runner_3.py b/python/ray/tune/tests/test_trial_runner_3.py index 3893104b5f62..35e8575e872b 100644 --- a/python/ray/tune/tests/test_trial_runner_3.py +++ b/python/ray/tune/tests/test_trial_runner_3.py @@ -1159,7 +1159,7 @@ def testPeriodicCloudCheckpointSyncTimeout(self): assert syncer.sync_up_counter == 2 def testExperimentCheckpointWithDatasets(self): - """Test trial runner checkpointing where trials contain Datastreams. + """Test trial runner checkpointing where trials contain Datasets. When possible, a dataset plan should be saved (for read_* APIs). See `Dataset.serialize_lineage` for more information. diff --git a/python/ray/tune/tuner.py b/python/ray/tune/tuner.py index edcdbe802beb..1658188b9530 100644 --- a/python/ray/tune/tuner.py +++ b/python/ray/tune/tuner.py @@ -212,7 +212,7 @@ def restore( param_space: The same `param_space` that was passed to the original Tuner. This can be optionally re-specified due to the `param_space` potentially containing Ray object - references (tuning over Datastreams or tuning over + references (tuning over Datasets or tuning over several `ray.put` object references). **Tune expects the `param_space` to be unmodified**, and the only part that will be used during restore are the updated object references. diff --git a/python/ray/util/actor_group.py b/python/ray/util/actor_group.py index a7a625cb1808..35484abfe62b 100644 --- a/python/ray/util/actor_group.py +++ b/python/ray/util/actor_group.py @@ -56,7 +56,7 @@ def remote(self, *args, **kwargs): f"in https://docs.ray.io/en/{get_ray_doc_version()}/ray-more-libs/multiprocessing.html. " # noqa: E501 "For stateful/actor processing such as batch prediction, use " "Datasets.map_batches(compute=ActorPoolStrategy, ...), see details in " - f"https://docs.ray.io/en/{get_ray_doc_version()}/data/api/datastream.html#ray.data.Datastream.map_batches.", # noqa: E501 + f"https://docs.ray.io/en/{get_ray_doc_version()}/data/api/dataset.html#ray.data.Dataset.map_batches.", # noqa: E501 warning=True, ) class ActorGroup: diff --git a/release/nightly_tests/dataset/aggregate_benchmark.py b/release/nightly_tests/dataset/aggregate_benchmark.py index a1112a4c8c0f..af4f551e614e 100644 --- a/release/nightly_tests/dataset/aggregate_benchmark.py +++ b/release/nightly_tests/dataset/aggregate_benchmark.py @@ -3,7 +3,7 @@ import ray from ray.data.aggregate import _AggregateOnKeyBase, Max, Mean, Min, Sum from ray.data.block import Block -from ray.data.datastream import Dataset +from ray.data.dataset import Dataset import pyarrow.compute as pac from benchmark import Benchmark diff --git a/release/nightly_tests/dataset/benchmark.py b/release/nightly_tests/dataset/benchmark.py index b40c8e599b26..0b650e72c5ec 100644 --- a/release/nightly_tests/dataset/benchmark.py +++ b/release/nightly_tests/dataset/benchmark.py @@ -4,7 +4,7 @@ import time from typing import Callable -from ray.data.datastream import Dataset +from ray.data.dataset import Dataset class Benchmark: diff --git a/release/nightly_tests/dataset/iter_batches_benchmark.py b/release/nightly_tests/dataset/iter_batches_benchmark.py index 18c288f25f49..2720a733e2f8 100644 --- a/release/nightly_tests/dataset/iter_batches_benchmark.py +++ b/release/nightly_tests/dataset/iter_batches_benchmark.py @@ -2,7 +2,7 @@ from typing import Optional import ray -from ray.data.datastream import Dataset +from ray.data.dataset import Dataset from benchmark import Benchmark diff --git a/release/nightly_tests/dataset/iter_tensor_batches_benchmark.py b/release/nightly_tests/dataset/iter_tensor_batches_benchmark.py index 3419a435c985..9541adb412a6 100644 --- a/release/nightly_tests/dataset/iter_tensor_batches_benchmark.py +++ b/release/nightly_tests/dataset/iter_tensor_batches_benchmark.py @@ -3,7 +3,7 @@ from typing import Optional, Union, List import ray -from ray.data.datastream import Dataset +from ray.data.dataset import Dataset from benchmark import Benchmark diff --git a/release/nightly_tests/dataset/map_batches_benchmark.py b/release/nightly_tests/dataset/map_batches_benchmark.py index 971810b4a09c..0a2bc8596e9d 100644 --- a/release/nightly_tests/dataset/map_batches_benchmark.py +++ b/release/nightly_tests/dataset/map_batches_benchmark.py @@ -3,7 +3,7 @@ import ray from ray.data._internal.compute import ActorPoolStrategy, ComputeStrategy -from ray.data.datastream import Dataset, MaterializedDatastream +from ray.data.dataset import Dataset, MaterializedDataset from benchmark import Benchmark @@ -22,7 +22,7 @@ def map_batches( is_eager_executed: Optional[bool] = False, ) -> Dataset: - assert isinstance(input_ds, MaterializedDatastream) + assert isinstance(input_ds, MaterializedDataset) ds = input_ds for _ in range(num_calls): diff --git a/release/nightly_tests/dataset/operator_fusion_benchmark.py b/release/nightly_tests/dataset/operator_fusion_benchmark.py index 1156dfa33509..c8b6ea579eb2 100644 --- a/release/nightly_tests/dataset/operator_fusion_benchmark.py +++ b/release/nightly_tests/dataset/operator_fusion_benchmark.py @@ -77,7 +77,7 @@ def make_ds( num_columns: int, ops_spec: List[Dict[str, Any]], target_max_block_size: int, -) -> ray.data.Datastream: +) -> ray.data.Dataset: ds = ray.data.read_datasource( BlockDatasource(), num_blocks_per_task=num_blocks_per_task, @@ -96,7 +96,7 @@ def make_ds( return ds -def execute_ds(ds: ray.data.Datastream): +def execute_ds(ds: ray.data.Dataset): ds = ds.fully_executed() diff --git a/release/nightly_tests/dataset/read_images_benchmark.py b/release/nightly_tests/dataset/read_images_benchmark.py index 111fb3cd8b82..7eb4c8e26a6a 100644 --- a/release/nightly_tests/dataset/read_images_benchmark.py +++ b/release/nightly_tests/dataset/read_images_benchmark.py @@ -7,7 +7,7 @@ from PIL import Image import ray -from ray.data.datastream import Dataset +from ray.data.dataset import Dataset from benchmark import Benchmark diff --git a/release/nightly_tests/dataset/read_parquet_benchmark.py b/release/nightly_tests/dataset/read_parquet_benchmark.py index 3205b9a18d55..6e6fff795627 100644 --- a/release/nightly_tests/dataset/read_parquet_benchmark.py +++ b/release/nightly_tests/dataset/read_parquet_benchmark.py @@ -1,5 +1,5 @@ import ray -from ray.data.datastream import Dataset +from ray.data.dataset import Dataset from benchmark import Benchmark from parquet_data_generator import generate_data diff --git a/release/nightly_tests/dataset/read_tfrecords_benchmark.py b/release/nightly_tests/dataset/read_tfrecords_benchmark.py index 6cfd817b4ceb..46526fad3fc1 100644 --- a/release/nightly_tests/dataset/read_tfrecords_benchmark.py +++ b/release/nightly_tests/dataset/read_tfrecords_benchmark.py @@ -4,7 +4,7 @@ from typing import List, Tuple import ray -from ray.data.datastream import Dataset +from ray.data.dataset import Dataset from benchmark import Benchmark from read_images_benchmark import generate_images diff --git a/rllib/evaluation/rollout_worker.py b/rllib/evaluation/rollout_worker.py index a30c07fa33fc..6f68dc01ee0b 100644 --- a/rllib/evaluation/rollout_worker.py +++ b/rllib/evaluation/rollout_worker.py @@ -265,7 +265,7 @@ def __init__( log_dir: Optional[str] = None, spaces: Optional[Dict[PolicyID, Tuple[Space, Space]]] = None, default_policy_class: Optional[Type[Policy]] = None, - dataset_shards: Optional[List[ray.data.Datastream]] = None, + dataset_shards: Optional[List[ray.data.Dataset]] = None, # Deprecated: This is all specified in `config` anyways. policy_config=DEPRECATED_VALUE, input_creator=DEPRECATED_VALUE, diff --git a/rllib/offline/dataset_reader.py b/rllib/offline/dataset_reader.py index 4dabf2050c03..d105cd59e2c4 100644 --- a/rllib/offline/dataset_reader.py +++ b/rllib/offline/dataset_reader.py @@ -69,7 +69,7 @@ def _unzip_if_needed(paths: List[str], format: str): @PublicAPI def get_dataset_and_shards( config: "AlgorithmConfig", num_workers: int = 0 -) -> Tuple[ray.data.Datastream, List[ray.data.Datastream]]: +) -> Tuple[ray.data.Dataset, List[ray.data.Dataset]]: """Returns a dataset and a list of shards. This function uses algorithm configs to create a dataset and a list of shards. @@ -77,12 +77,12 @@ def get_dataset_and_shards( input: The input type should be "dataset". input_config: A dict containing the following key and values: `format`: str, speciifies the format of the input data. This will be the - format that ray dataset supports. See ray.data.Datastream for + format that ray dataset supports. See ray.data.Dataset for supported formats. Only "parquet" or "json" are supported for now. `paths`: str, a single string or a list of strings. Each string is a path to a file or a directory holding the dataset. It can be either a local path or a remote path (e.g. to an s3 bucket). - `loader_fn`: Callable[None, ray.data.Datastream], Instead of + `loader_fn`: Callable[None, ray.data.Dataset], Instead of specifying paths and format, you can specify a function to load the dataset. `parallelism`: int, The number of tasks to use for loading the dataset. If not specified, it will be set to the number of workers. @@ -185,7 +185,7 @@ class DatasetReader(InputReader): "input_config": { "format": "json", # A single data file, a directory, or anything - # that ray.data.datastream recognizes. + # that ray.data.dataset recognizes. "paths": "/tmp/sample_batches/", # By default, parallelism=num_workers. "parallelism": 3, @@ -197,7 +197,7 @@ class DatasetReader(InputReader): """ @PublicAPI - def __init__(self, ds: ray.data.Datastream, ioctx: Optional[IOContext] = None): + def __init__(self, ds: ray.data.Dataset, ioctx: Optional[IOContext] = None): """Initializes a DatasetReader instance. Args: diff --git a/rllib/offline/feature_importance.py b/rllib/offline/feature_importance.py index 61adef81f60f..2efe17790a79 100644 --- a/rllib/offline/feature_importance.py +++ b/rllib/offline/feature_importance.py @@ -70,7 +70,7 @@ def _compute_actions( @ray.remote def get_feature_importance_on_index( - dataset: ray.data.Datastream, + dataset: ray.data.Dataset, *, index: int, perturb_fn: Callable[[pd.DataFrame, int], None], diff --git a/rllib/offline/tests/test_dataset_reader.py b/rllib/offline/tests/test_dataset_reader.py index 3887908402fa..9557f68cb4f1 100644 --- a/rllib/offline/tests/test_dataset_reader.py +++ b/rllib/offline/tests/test_dataset_reader.py @@ -60,7 +60,7 @@ def test_dataset_shard_with_only_local(self): _, shards = get_dataset_and_shards(config, num_workers=0) assert len(shards) == 1 - assert isinstance(shards[0], ray.data.Datastream) + assert isinstance(shards[0], ray.data.Dataset) def test_dataset_shard_remote_workers_with_local_worker(self): """Tests whether the dataset_shard function works correctly for the remote @@ -76,7 +76,7 @@ def test_dataset_shard_remote_workers_with_local_worker(self): assert len(shards) == NUM_WORKERS + 1 assert shards[0] is None assert all( - isinstance(remote_shard, ray.data.Datastream) for remote_shard in shards[1:] + isinstance(remote_shard, ray.data.Dataset) for remote_shard in shards[1:] ) def test_dataset_shard_with_task_parallelization(self): @@ -100,7 +100,7 @@ def test_dataset_shard_with_task_parallelization(self): assert len(shards) == NUM_WORKERS + 1 assert shards[0] is None assert all( - isinstance(remote_shard, ray.data.Datastream) for remote_shard in shards[1:] + isinstance(remote_shard, ray.data.Dataset) for remote_shard in shards[1:] ) def test_dataset_shard_with_loader_fn(self): diff --git a/rllib/utils/metrics/window_stat.py b/rllib/utils/metrics/window_stat.py index 8b270b4a2206..ff6c15569797 100644 --- a/rllib/utils/metrics/window_stat.py +++ b/rllib/utils/metrics/window_stat.py @@ -2,7 +2,7 @@ class WindowStat: - """Handles/stores incoming datastream and provides window-based statistics. + """Handles/stores incoming dataset and provides window-based statistics. Examples: >>> win_stats = WindowStat("level", 3) From 8492f8001b24665869e417202a694efac22972f2 Mon Sep 17 00:00:00 2001 From: Yunxuan Xiao Date: Tue, 9 May 2023 17:34:34 -0700 Subject: [PATCH 308/424] [AIR] LightningTrainer Dolly V2 FSDP Fine-tuning Example (#34990) Signed-off-by: woshiyyya --- doc/source/_toc.yml | 1 + doc/source/ray-air/examples/BUILD | 1 + .../dolly_lightning_fsdp_finetuning.ipynb | 1043 +++++++++++++++++ .../examples/gptj_deepspeed_fine_tuning.ipynb | 2 + doc/source/ray-air/examples/index.rst | 1 + doc/source/train/examples.rst | 8 + .../lightning/lightning_cola_advanced.ipynb | 11 + .../lightning/lightning_mnist_example.ipynb | 1 + .../examples/tune-pytorch-lightning.ipynb | 3 +- .../dolly_v2_fsdp_compute_aws.yaml | 20 + .../dolly_v2_fsdp_env.yaml | 21 + .../lightning-llm-finetuning-7b.ipynb | 1 + .../test_myst_doc.py | 1 + release/release_tests.yaml | 17 + 14 files changed, 1130 insertions(+), 1 deletion(-) create mode 100644 doc/source/ray-air/examples/dolly_lightning_fsdp_finetuning.ipynb create mode 100644 release/air_examples/dolly_v2_lightning_fsdp_finetuning/dolly_v2_fsdp_compute_aws.yaml create mode 100644 release/air_examples/dolly_v2_lightning_fsdp_finetuning/dolly_v2_fsdp_env.yaml create mode 120000 release/air_examples/dolly_v2_lightning_fsdp_finetuning/lightning-llm-finetuning-7b.ipynb create mode 120000 release/air_examples/dolly_v2_lightning_fsdp_finetuning/test_myst_doc.py diff --git a/doc/source/_toc.yml b/doc/source/_toc.yml index 88247c248ba6..55eaedf08b64 100644 --- a/doc/source/_toc.yml +++ b/doc/source/_toc.yml @@ -82,6 +82,7 @@ parts: - file: ray-air/examples/gptj_batch_prediction - file: ray-air/examples/gptj_serving - file: ray-air/examples/dreambooth_finetuning + - file: ray-air/examples/dolly_lightning_fsdp_finetuning - file: ray-air/api/api - file: ray-air/benchmarks diff --git a/doc/source/ray-air/examples/BUILD b/doc/source/ray-air/examples/BUILD index 5b6ce7351c76..1fcc19fd24b7 100644 --- a/doc/source/ray-air/examples/BUILD +++ b/doc/source/ray-air/examples/BUILD @@ -52,6 +52,7 @@ py_test_run_all_notebooks( "stablediffusion_batch_prediction.ipynb", # Requires GPUs "gptj_deepspeed_fine_tuning.ipynb", # Requires release test "opt_deepspeed_batch_inference.ipynb", # Requires release test + "dolly_lightning_fsdp_finetuning.ipynb", # Requires release test ], data = ["//doc/source/ray-air/examples:air_examples"], tags = ["exclusive", "team:ml", "ray_air"], diff --git a/doc/source/ray-air/examples/dolly_lightning_fsdp_finetuning.ipynb b/doc/source/ray-air/examples/dolly_lightning_fsdp_finetuning.ipynb new file mode 100644 index 000000000000..f4c0953a92c5 --- /dev/null +++ b/doc/source/ray-air/examples/dolly_lightning_fsdp_finetuning.ipynb @@ -0,0 +1,1043 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "(dolly_lightning_fsdp_finetuning)=\n", + "\n", + "# Fine-tune `dolly-v2-7b` with Ray AIR LightningTrainer and FSDP" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example, we demonstrate how to use Ray AIR to fine-tune a [`dolly-v2-7b`](https://huggingface.co/databricks/dolly-v2-7b) model. `dolly-v2-12b` is a 12 billion parameter causal language model created by Databricks, derived from EleutherAI’s [Pythia-12b](https://huggingface.co/EleutherAI/pythia-12b), and fine-tuned on a [~15K record instruction corpus](https://github.com/databrickslabs/dolly/tree/master/data).\n", + "\n", + "We load the pre-trained model from the HuggingFace model hub into a LightningModule and launch an FSDP fine-tuning job across 16 T4 GPUs with the help of {class}`Ray LightningTrainer `. It is also straightforward to fine-tune other similar large language models in a similar manner as shown in this example.\n", + "\n", + "Before starting this example, we highly recommend reading [Ray AIR Key Concepts](air-key-concepts) and [Ray Data Key Concepts](data_key_concepts)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Set up ray cluster \n", + "In this example, we are using a ray cluster with 16 g4dn.4xlarge instances. Each instance has one Tesla T4 GPU (16GiB Memory). \n", + "\n", + "We define a `runtime_env` to install the necessary Python libraries on each node. You can skip this step if you have already installed all the required packages in your workers' base image." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import ray\n", + "\n", + "ray.init(\n", + " runtime_env={\n", + " \"pip\": [\n", + " \"datasets\",\n", + " \"evaluate\",\n", + " \"transformers>=4.26.0\",\n", + " \"torch>=1.12.0\",\n", + " \"pytorch_lightning>=2.0\",\n", + " ]\n", + " }\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "MODEL_NAME = \"databricks/dolly-v2-7b\"" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Prepare your data \n", + "We are using tiny_shakespeare for fine-tuning, which contains 40,000 lines of Shakespeare from a variety of Shakespeare's plays. Featured in Andrej Karpathy's blog post ['The Unreasonable Effectiveness of Recurrent Neural Networks'](http://karpathy.github.io/2015/05/21/rnn-effectiveness/). \n", + "\n", + "Dataset samples:\n", + "```\n", + "BAPTISTA:\n", + "I know him well: you are welcome for his sake.\n", + "\n", + "GREMIO:\n", + "Saving your tale, Petruchio, I pray,\n", + "Let us, that are poor petitioners, speak too:\n", + "Baccare! you are marvellous forward.\n", + "\n", + "PETRUCHIO:\n", + "O, pardon me, Signior Gremio; I would fain be doing.\n", + "```\n", + "\n", + "Here, we have adopted similar pre-processing logic from another demo: {ref}`GPT-J-6B Fine-Tuning with Ray AIR and DeepSpeed `." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import ray\n", + "import pandas as pd\n", + "from datasets import load_dataset\n", + "from ray.data.preprocessors import BatchMapper, Chain\n", + "from transformers import AutoTokenizer, AutoModelForCausalLM\n", + "\n", + "def split_text(batch: pd.DataFrame) -> pd.DataFrame:\n", + " text = list(batch[\"text\"])\n", + " flat_text = \"\".join(text)\n", + " split_text = [\n", + " x.strip()\n", + " for x in flat_text.split(\"\\n\")\n", + " if x.strip() and not x.strip()[-1] == \":\"\n", + " ]\n", + " return pd.DataFrame(split_text, columns=[\"text\"])\n", + "\n", + "\n", + "def tokenize(batch: pd.DataFrame) -> dict:\n", + " tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, padding_side=\"left\")\n", + " tokenizer.pad_token = tokenizer.eos_token\n", + " ret = tokenizer(\n", + " list(batch[\"text\"]),\n", + " truncation=True,\n", + " max_length=256,\n", + " padding=\"max_length\",\n", + " return_tensors=\"np\",\n", + " )\n", + " ret[\"labels\"] = ret[\"input_ids\"].copy()\n", + " return dict(ret)\n", + "\n", + "splitter = BatchMapper(split_text, batch_format=\"pandas\")\n", + "tokenizer = BatchMapper(tokenize, batch_format=\"pandas\")\n", + "preprocessor = Chain(splitter, tokenizer)\n", + "\n", + "hf_dataset = load_dataset(\"tiny_shakespeare\")\n", + "ray_datasets = ray.data.from_huggingface(hf_dataset)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We first split the original paragraphs into multiple sentences, then tokenize them. Here are some samples:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'text': 'Before we proceed any further, hear me speak.'},\n", + " {'text': 'Speak, speak.'},\n", + " {'text': 'You are all resolved rather to die than to famish?'},\n", + " {'text': 'Resolved. resolved.'},\n", + " {'text': 'First, you know Caius Marcius is chief enemy to the people.'},\n", + " {'text': \"We know't, we know't.\"},\n", + " {'text': \"Let us kill him, and we'll have corn at our own price.\"},\n", + " {'text': \"Is't a verdict?\"},\n", + " {'text': \"No more talking on't; let it be done: away, away!\"},\n", + " {'text': 'One word, good citizens.'}]" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ds = ray_datasets[\"train\"]\n", + "splitter.fit_transform(ds).take(10)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Define your lightning model\n", + "\n", + "In this example, we use the [dolly-v2-7b](https://huggingface.co/databricks/dolly-v2-7b) model for finetuning. It is an instruction-following large language model trained on the Databricks machine learning platform that is licensed for commercial use. We load the model weights from Huggingface Model Hub and encapsulate it into a `pl.LightningModule`.\n", + "\n", + ":::{note}\n", + "Make sure you pass the FSDP wrapped model parameters `self.trainer.model.parameters()` into the optimizer, instead of `self.model.parameters()`. \n", + ":::\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import torch\n", + "import pytorch_lightning as pl\n", + "\n", + "class DollyV2Model(pl.LightningModule):\n", + " def __init__(self, lr=2e-5, eps=1e-8):\n", + " super().__init__()\n", + " self.lr = lr\n", + " self.eps = eps\n", + " self.model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)\n", + " self.predictions = []\n", + " self.references = []\n", + "\n", + " def forward(self, batch):\n", + " outputs = self.model(\n", + " batch[\"input_ids\"], \n", + " attention_mask=batch[\"attention_mask\"], \n", + " labels=batch[\"labels\"]\n", + " )\n", + " return outputs.loss\n", + "\n", + " def training_step(self, batch, batch_idx):\n", + " loss = self.forward(batch)\n", + " self.log(\"train_loss\", loss, prog_bar=True, on_step=True)\n", + " return loss\n", + "\n", + " def configure_optimizers(self):\n", + " if self.global_rank == 0:\n", + " print(self.trainer.model)\n", + " return torch.optim.AdamW(self.trainer.model.parameters(), lr=self.lr, eps=self.eps)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Configure your FSDP strategy\n", + "As Dolly-v2-3b is a relatively large model, it cannot be properly fit into a single commercial GPU. In this example, we use the FSDP strategy to shard model parameters across multiple workers. This allows us to avoid GPU out-of-memory issues and support a larger global batch size.\n", + "\n", + "![](https://user-images.githubusercontent.com/26745457/236892936-d4b91751-4689-421e-ac5f-edfd2eeeb635.png)\n", + "Image source: [Fully Sharded Data Parallel: faster AI training with fewer GPUs](https://engineering.fb.com/2021/07/15/open-source/fsdp/)\n", + "\n", + ":::{note}\n", + "FSDP is a type of data parallelism that shards model parameters, optimizer states and gradients across DDP ranks. This was inspired by Xu et al. as well as the ZeRO Stage 3 from DeepSpeed. You may refer to these blogs for more information:\n", + "\n", + "- [Fully Sharded Data Parallel: faster AI training with fewer GPUs](https://engineering.fb.com/2021/07/15/open-source/fsdp/)\n", + "- [Getting Started with Fully Sharded Data Parallel(FSDP)](https://pytorch.org/tutorials/intermediate/FSDP_tutorial.html#:~:text=FSDP%20is%20a%20type%20of,sizes%20for%20our%20training%20job.)\n", + "- [PyTorch FSDP Tutorial](https://www.youtube.com/watch?v=8_k76AHu__s&list=PL_lsbAsL_o2BT6aerEKgIoufVD_fodnuT)\n", + ":::\n", + "\n", + "To start trainig with Lightning's [FSDPStrategy](https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.strategies.FSDPStrategy.html#lightning.pytorch.strategies.FSDPStrategy), you only need to provide the initialization arguments in `LightningConfigBuilder.strategy()`. Behind the scenes, LightningTrainer handles the cluster environment settings and job launching.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import functools\n", + "from ray.train.lightning import LightningTrainer, LightningConfigBuilder\n", + "from ray.air.config import RunConfig, ScalingConfig, CheckpointConfig\n", + "from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy\n", + "from torch.distributed.fsdp import ShardingStrategy, BackwardPrefetch\n", + "from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXLayer\n", + "\n", + "# Define the model sharding policy:\n", + "# Wrap every GPTNeoXLayer as its own FSDP instance\n", + "auto_wrap_policy = functools.partial(\n", + " transformer_auto_wrap_policy,\n", + " transformer_layer_cls = {GPTNeoXLayer}\n", + ")\n", + "\n", + "# Aggregate all arguments for LightningTrainer\n", + "lightning_config = (\n", + " LightningConfigBuilder()\n", + " .module(cls=DollyV2Model, lr=2e-5, eps=1e-8)\n", + " .trainer(\n", + " max_epochs=1, \n", + " accelerator=\"gpu\", \n", + " precision=\"16-mixed\",\n", + " )\n", + " .strategy(\n", + " name=\"fsdp\",\n", + " sharding_strategy=ShardingStrategy.FULL_SHARD,\n", + " backward_prefetch=BackwardPrefetch.BACKWARD_PRE,\n", + " forward_prefetch=True,\n", + " auto_wrap_policy=auto_wrap_policy,\n", + " limit_all_gathers=True,\n", + " activation_checkpointing=[GPTNeoXLayer],\n", + " )\n", + " .checkpointing(save_top_k=0, save_weights_only=True, save_last=True)\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + ":::{tip}\n", + "\n", + "Some tips for FSDP configutarion:\n", + "- `sharding_strategy`:\n", + " - `ShardingStrategy.NO_SHARD`: Parameters, gradients, and optimizer states are not sharded. Similar to DDP.\n", + " - `ShardingStrategy.SHARD_GRAD_OP`: Gradients and optimizer states are sharded during computation, and additionally, parameters are sharded outside computation. Similar to ZeRO stage-2.\n", + " - `ShardingStrategy.FULL_SHARD`: Parameters, gradients, and optimizer states are sharded. It has minimal GRAM usage among the 3 options. Similar to ZeRO stage-3.\n", + "- `auto_wrap_policy`:\n", + " - Model layers are often wrapped with FSDP in a layered fashion. This means that only the layers in a single FSDP instance are required to aggregate all parameters to a single device during forwarding or backward calculations.\n", + " - Use `transformer_auto_wrap_policy` to automatically wrap each Transformer Block into a single FSDP instance. \n", + "- `backward_prefetch` and `forward_prefetch`:\n", + " - Overlap the upcoming all-gather while executing the current forward/backward pass. It can improve throughput but may slightly increase peak memory usage.\n", + ":::" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Fine-tune with LightningTrainer\n", + "\n", + "```{note}\n", + "Here we save the checkpoints to the local file system. You can also upload the checkpoints to cloud storage by setting S3 bucket URI to {class}`air.RunConfig(storage_path=S3_BUCKET_URI) `. See {ref}`train-run-config` for an example.\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "num_workers = 16\n", + "batch_size_per_worker = 10" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "tags": [ + "remove-cell" + ] + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-05-05 00:17:19,384\tINFO streaming_executor.py:91 -- Executing DAG InputDataBuffer[Input] -> TaskPoolMapOperator[BatchMapper]\n", + "2023-05-05 00:17:19,384\tINFO streaming_executor.py:92 -- Execution config: ExecutionOptions(resource_limits=ExecutionResources(cpu=None, gpu=None, object_store_memory=None), locality_with_output=False, preserve_order=False, actor_locality_enabled=True, verbose_progress=False)\n", + "2023-05-05 00:17:19,385\tINFO streaming_executor.py:94 -- Tip: For detailed progress reporting, run `ray.data.DataContext.get_current().execution_options.verbose_progress = True`\n", + "Running: 0.0/272.0 CPU, 0.0/16.0 GPU, 1.98 MiB/73.21 GiB object_store_memory: 0%| | 0/1 [00:00.\n", + " \r" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from pytorch_lightning.callbacks import TQDMProgressBar\n", + "\n", + "# Create a customized progress bar for LightningTrainer\n", + "class DollyV2ProgressBar(TQDMProgressBar):\n", + " def __init__(self, num_iters_per_epoch, *args, **kwargs):\n", + " super().__init__(*args, **kwargs)\n", + " self.num_iters_per_epoch = num_iters_per_epoch\n", + " \n", + " def on_train_epoch_start(self, trainer, *_):\n", + " super().on_train_epoch_start(trainer, *_)\n", + " self.train_progress_bar.reset(self.num_iters_per_epoch)\n", + "\n", + "total_batches = splitter.fit_transform(ray_datasets[\"train\"]).count()\n", + "num_iters_per_epoch = total_batches // (num_workers * batch_size_per_worker)\n", + "lightning_config.trainer(callbacks=[DollyV2ProgressBar(num_iters_per_epoch)])" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/html": [ + "
    \n", + "
    \n", + "
    \n", + "

    Tune Status

    \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
    Current time:2023-05-05 01:03:12
    Running for: 00:45:50.28
    Memory: 35.4/124.4 GiB
    \n", + "
    \n", + "
    \n", + "
    \n", + "

    System Info

    \n", + " Using FIFO scheduling algorithm.
    Logical resource usage: 0/272 CPUs, 0/16 GPUs (0.0/16.0 accelerator_type:T4)\n", + "
    \n", + " \n", + "
    \n", + "
    \n", + "
    \n", + "

    Trial Status

    \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
    Trial name status loc iter total time (s) train_loss epoch step
    LightningTrainer_e0990_00000TERMINATED10.0.102.147:41219 1 2699.78 0.166992 0 135
    \n", + "
    \n", + "
    \n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-05-05 00:17:21,842\tWARNING trial_runner.py:1607 -- The maximum number of pending trials has been automatically set to the number of available cluster CPUs, which is high (299 CPUs/pending trials). If you're running an experiment with a large number of trials, this could lead to scheduling overhead. In this case, consider setting the `TUNE_MAX_PENDING_TRIALS_PG` environment variable to the desired maximum number of concurrent trials.\n", + "(LightningTrainer pid=41219) 2023-05-05 00:17:28,673\tINFO backend_executor.py:128 -- Starting distributed worker processes: ['41376 (10.0.102.147)', '8301 (10.0.67.96)', '8263 (10.0.103.36)', '27794 (10.0.105.149)', '8088 (10.0.110.210)', '8238 (10.0.106.19)', '8225 (10.0.81.63)', '8200 (10.0.106.22)', '8231 (10.0.90.160)', '8345 (10.0.98.168)', '28207 (10.0.76.146)', '8213 (10.0.115.72)', '8272 (10.0.92.209)', '8247 (10.0.74.31)', '27629 (10.0.68.102)', '8224 (10.0.88.86)']\n", + "(RayTrainWorker pid=41376) 2023-05-05 00:17:30,953\tINFO config.py:86 -- Setting up process group for: env:// [rank=0, world_size=16]\n", + "\n", + "(pid=41219) Running: 0.0/272.0 CPU, 0.0/16.0 GPU, 0.0 MiB/73.21 GiB object_store_memory: 0%| | 0/1 [00:00 TaskPoolMapOperator[BatchMapper->BatchMapper] -> AllToAllOperator[RandomizeBlockOrder]\n", + "\n", + "(pid=41219) Running: 0.0/272.0 CPU, 0.0/16.0 GPU, 0.0 MiB/73.21 GiB object_store_memory: 0%| | 0/1 [00:00\n", + "

    Trial Progress

    \n", + " \n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "
    Trial name _report_on date done epoch experiment_taghostname iterations_since_restorenode_ip pidshould_checkpoint step time_since_restore time_this_iter_s time_total_s timestamp train_loss training_iterationtrial_id
    LightningTrainer_e0990_00000train_epoch_end2023-05-05_01-02-26True 0 0ip-10-0-102-147 110.0.102.14741219True 135 2699.78 2699.78 2699.78 1683273746 0.166992 1e0990_00000
    \n", + "
    \n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "(RayTrainWorker pid=41376) `Trainer.fit` stopped: `max_epochs=1` reached.\n", + "(RayTrainWorker pid=41376) RayFSDPStrategy: tearing down strategy...\n" + ] + } + ], + "source": [ + "from ray.tune.syncer import SyncConfig\n", + "# Save AIR checkpoints according to the performance on validation set\n", + "run_config = RunConfig(\n", + " name=\"finetune_dolly-v2-7b\",\n", + " checkpoint_config=CheckpointConfig(),\n", + " sync_config=SyncConfig(sync_artifacts=False)\n", + ")\n", + "\n", + "# Scale the DDP training workload across 16 GPUs\n", + "# You can change this config based on your compute resources.\n", + "scaling_config = ScalingConfig(\n", + " num_workers=num_workers, use_gpu=True, resources_per_worker={\"CPU\": 12, \"GPU\": 1}\n", + ")\n", + "\n", + "trainer = LightningTrainer(\n", + " lightning_config=lightning_config.build(),\n", + " run_config=run_config,\n", + " scaling_config=scaling_config,\n", + " datasets={\"train\": ray_datasets[\"train\"]},\n", + " datasets_iter_config={\"batch_size\": batch_size_per_worker},\n", + " preprocessor=preprocessor,\n", + ")\n", + "result = trainer.fit()\n", + "\n", + "result\n" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We finished training in 2361s. The price for an on-demand g4dn.4xlarge instance is `$1.204/hour`, while a g4dn.4xlarge instance costs `$2.176/hour`. The total cost would be `($1.204 * 15 + $2.176) * 2699 / 3600 = $15.17`." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Text-generation with HuggingFace Pipeline\n", + "\n", + "We can use the [HuggingFace Pipeline](https://huggingface.co/docs/transformers/main_classes/pipelines) to generate predictions from our fine-tuned model. Let's input some prompts and see if our tuned Dolly can speak like Shakespeare:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "from transformers import pipeline\n", + "\n", + "tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, padding_side=\"right\")\n", + "\n", + "dolly = result.checkpoint.get_model(model_class=DollyV2Model, map_location=torch.device(\"cpu\"))\n", + "\n", + "nlp_pipeline = pipeline(\n", + " task=\"text-generation\", \n", + " model=dolly.model, \n", + " tokenizer=tokenizer, \n", + " device_map=\"auto\"\n", + ")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[{'generated_text': 'This is the very place, my lord, where I was born.'}]\n", + "[{'generated_text': 'I am a man of a thousand lives, and I will live.'}]\n", + "[{'generated_text': 'Once more, my lord, I beseech you, hear me speak.'}]\n" + ] + } + ], + "source": [ + "for prompt in [\"This is\", \"I am\", \"Once more\"]:\n", + " print(nlp_pipeline(prompt, max_new_tokens=20, do_sample=True, pad_token_id=tokenizer.eos_token_id))" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "References:\n", + "- [PyTorch FSDP Tutorial](https://www.youtube.com/watch?v=8_k76AHu__s&list=PL_lsbAsL_o2BT6aerEKgIoufVD_fodnuT)\n", + "- [Getting Started with Fully Sharded Data Parallel(FSDP)](https://pytorch.org/tutorials/intermediate/FSDP_tutorial.html#:~:text=FSDP%20is%20a%20type%20of,sizes%20for%20our%20training%20job.)\n", + "- [Fully Sharded Data Parallel: faster AI training with fewer GPUs](https://engineering.fb.com/2021/07/15/open-source/fsdp/)\n", + "- [Hugging Face: dolly-v2-7b Model Card](https://huggingface.co/databricks/dolly-v2-7b)\n", + "- [Hugging Face: Handling big models for inference](https://huggingface.co/docs/accelerate/usage_guides/big_modeling)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb b/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb index fa941d84c640..27e223dbbc99 100644 --- a/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb +++ b/doc/source/ray-air/examples/gptj_deepspeed_fine_tuning.ipynb @@ -5,6 +5,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ + "(gptj_deepspeed_finetune)=\n", + "\n", "# GPT-J-6B Fine-Tuning with Ray AIR and DeepSpeed\n", "\n", "In this example, we will showcase how to use the Ray AIR for **GPT-J fine-tuning**. GPT-J is a GPT-2-like causal language model trained on the Pile dataset. This particular model has 6 billion parameters. For more information on GPT-J, click [here](https://huggingface.co/docs/transformers/model_doc/gptj).\n", diff --git a/doc/source/ray-air/examples/index.rst b/doc/source/ray-air/examples/index.rst index 48c2227a072d..8c29a9645cde 100644 --- a/doc/source/ray-air/examples/index.rst +++ b/doc/source/ray-air/examples/index.rst @@ -30,6 +30,7 @@ Text/NLP - :doc:`/ray-air/examples/gptj_serving`: How to use Ray AIR to do online serving with the Hugging Face Transformers GPT-J model. - :doc:`/ray-air/examples/dreambooth_finetuning`: How to fine-tune a DreamBooth text-to-image model with your own images. - :doc:`/ray-air/examples/opt_deepspeed_batch_inference`: How to run batch inference on a dataset of texts with a 30B OPT model. +- :doc:`/ray-air/examples/dolly_lightning_fsdp_finetuning`: How to fine-tune a dolly-v2-7b model with Ray AIR LightningTrainer and FSDP. Image/CV -------- diff --git a/doc/source/train/examples.rst b/doc/source/train/examples.rst index 8639b9345211..5ce1ac51e5f5 100644 --- a/doc/source/train/examples.rst +++ b/doc/source/train/examples.rst @@ -83,6 +83,14 @@ Distributed Training Examples using Ray Train Use LightningTrainer with Ray Data and Batch Predictor + .. grid-item-card:: + :img-top: /images/pytorch_lightning_small.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: dolly_lightning_fsdp_finetuning + + Fine-tune LLM with AIR LightningTrainer and FSDP + Ray Train Examples Using Loggers & Callbacks -------------------------------------------- diff --git a/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb b/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb index a01c1151f980..d95cbe9b2195 100644 --- a/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb +++ b/doc/source/train/examples/lightning/lightning_cola_advanced.ipynb @@ -1483,6 +1483,17 @@ "print(results.head(10))\n", "print(matthews_corr)" ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## What's next?\n", + "\n", + "- {ref}`Fine-tune a Large Language Model with LightningTrainer and FSDP `\n", + "- {ref}`Hyperparameter searching with LightningTrainer + Ray Tune. `" + ] } ], "metadata": { diff --git a/doc/source/train/examples/lightning/lightning_mnist_example.ipynb b/doc/source/train/examples/lightning/lightning_mnist_example.ipynb index 8fa7372cbbd7..fed176064fdd 100644 --- a/doc/source/train/examples/lightning/lightning_mnist_example.ipynb +++ b/doc/source/train/examples/lightning/lightning_mnist_example.ipynb @@ -741,6 +741,7 @@ "## What's next?\n", "\n", "- {ref}`Use LightningTrainer with Ray Data and Batch Predictor `\n", + "- {ref}`Fine-tune a Large Language Model with LightningTrainer and FSDP `\n", "- {ref}`Hyperparameter searching with LightningTrainer + Ray Tune. `" ] } diff --git a/doc/source/tune/examples/tune-pytorch-lightning.ipynb b/doc/source/tune/examples/tune-pytorch-lightning.ipynb index c8083b5ead01..5b364a4497c8 100644 --- a/doc/source/tune/examples/tune-pytorch-lightning.ipynb +++ b/doc/source/tune/examples/tune-pytorch-lightning.ipynb @@ -582,6 +582,7 @@ "\n", "- {ref}`Use LightningTrainer for Image Classification `.\n", "- {ref}`Use LightningTrainer with Ray Data and Batch Predictor `\n", + "- {ref}`Fine-tune a Large Language Model with LightningTrainer and FSDP `\n", "- {doc}`/tune/examples/includes/mlflow_ptl_example`: Example for using [MLflow](https://github.com/mlflow/mlflow/)\n", " and [Pytorch Lightning](https://github.com/PyTorchLightning/pytorch-lightning) with Ray Tune.\n", "- {doc}`/tune/examples/includes/mnist_ptl_mini`:\n", @@ -607,7 +608,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.15" + "version": "3.8.16" } }, "nbformat": 4, diff --git a/release/air_examples/dolly_v2_lightning_fsdp_finetuning/dolly_v2_fsdp_compute_aws.yaml b/release/air_examples/dolly_v2_lightning_fsdp_finetuning/dolly_v2_fsdp_compute_aws.yaml new file mode 100644 index 000000000000..7966578a31b1 --- /dev/null +++ b/release/air_examples/dolly_v2_lightning_fsdp_finetuning/dolly_v2_fsdp_compute_aws.yaml @@ -0,0 +1,20 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west-2 + +head_node_type: + name: head_node + instance_type: g4dn.8xlarge + +worker_node_types: + - name: worker_node + instance_type: g4dn.4xlarge + min_workers: 15 + max_workers: 15 + use_spot: false + +aws: + TagSpecifications: + - ResourceType: "instance" + Tags: + - Key: ttl-hours + Value: '24' diff --git a/release/air_examples/dolly_v2_lightning_fsdp_finetuning/dolly_v2_fsdp_env.yaml b/release/air_examples/dolly_v2_lightning_fsdp_finetuning/dolly_v2_fsdp_env.yaml new file mode 100644 index 000000000000..26017cde6ae0 --- /dev/null +++ b/release/air_examples/dolly_v2_lightning_fsdp_finetuning/dolly_v2_fsdp_env.yaml @@ -0,0 +1,21 @@ +base_image: {{ env["RAY_IMAGE_ML_NIGHTLY_GPU"] | default("anyscale/ray:nightly-py38-cu118") }} +env_vars: {} +debian_packages: + - curl + +python: + pip_packages: + - "datasets" + - "evaluate" + - "scikit-learn" + - "boto3" + - myst-parser==0.15.2 + - myst-nb==0.13.1 + - jupytext==1.13.6 + conda_packages: [] + +post_build_cmds: + - pip uninstall -y ray || true && pip3 install -U {{ env["RAY_WHEELS"] | default("ray") }} + - {{ env["RAY_WHEELS_SANITY_CHECK"] | default("echo No Ray wheels sanity check") }} + - pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu118 + - pip3 install "pytorch_lightning>=2.0.0" "transformers>=4.28.0" "accelerate>=0.18.0" diff --git a/release/air_examples/dolly_v2_lightning_fsdp_finetuning/lightning-llm-finetuning-7b.ipynb b/release/air_examples/dolly_v2_lightning_fsdp_finetuning/lightning-llm-finetuning-7b.ipynb new file mode 120000 index 000000000000..5f90fb2ae158 --- /dev/null +++ b/release/air_examples/dolly_v2_lightning_fsdp_finetuning/lightning-llm-finetuning-7b.ipynb @@ -0,0 +1 @@ +../../../doc/source/ray-air/examples/dolly_lightning_fsdp_finetuning.ipynb \ No newline at end of file diff --git a/release/air_examples/dolly_v2_lightning_fsdp_finetuning/test_myst_doc.py b/release/air_examples/dolly_v2_lightning_fsdp_finetuning/test_myst_doc.py new file mode 120000 index 000000000000..c265ccc7b062 --- /dev/null +++ b/release/air_examples/dolly_v2_lightning_fsdp_finetuning/test_myst_doc.py @@ -0,0 +1 @@ +../../../doc/test_myst_doc.py \ No newline at end of file diff --git a/release/release_tests.yaml b/release/release_tests.yaml index f8c6fed0958f..40801cce1182 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -827,6 +827,23 @@ cluster_compute: gptj_deepspeed_compute_gce.yaml +- name: air_example_dolly_v2_lightning_fsdp_finetuning + group: AIR examples + working_dir: air_examples/dolly_v2_lightning_fsdp_finetuning + + python: "3.8" + + frequency: weekly + team: ml + cluster: + cluster_env: dolly_v2_fsdp_env.yaml + cluster_compute: dolly_v2_fsdp_compute_aws.yaml + + run: + timeout: 4700 + script: python test_myst_doc.py --path lightning-llm-finetuning-7b.ipynb + + - name: air_example_opt_deepspeed_batch_inference group: AIR examples working_dir: air_examples/opt_deepspeed_batch_inference From 0a1e4faaebc1964617bffe9b0d45effac57c863b Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Wed, 10 May 2023 08:44:53 +0800 Subject: [PATCH 309/424] [ci][core] Remove test_ray_get_timeout_zero #35196 Signed-off-by: Ricky Xu --- python/ray/tests/test_advanced_2.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/python/ray/tests/test_advanced_2.py b/python/ray/tests/test_advanced_2.py index f1900ec46e16..8bb2838c61e6 100644 --- a/python/ray/tests/test_advanced_2.py +++ b/python/ray/tests/test_advanced_2.py @@ -520,21 +520,6 @@ def test(): assert cluster_resources == {} -def test_ray_get_timeout_zero(monkeypatch): - # Check that ray.get(timeout=0) raises warnings on change of behavior. - # Removed when https://github.com/ray-project/ray/issues/28465 is resolved. - with pytest.warns(UserWarning): - ray.get(ray.put(1), timeout=0) - - with monkeypatch.context() as m: - m.setenv("RAY_WARN_RAY_GET_TIMEOUT_ZERO", "0") - import warnings - - with warnings.catch_warnings(): - warnings.simplefilter("error") - ray.get(ray.put(1), timeout=0) - - if __name__ == "__main__": if os.environ.get("PARALLEL_CI"): sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__])) From 5182db2e2b8be52af7cbaffccf88fce500cb7430 Mon Sep 17 00:00:00 2001 From: Larry <554538252@qq.com> Date: Wed, 10 May 2023 09:11:48 +0800 Subject: [PATCH 310/424] [Core]Fixing the flakey test cases caused by Redis startup failure due to port conflicts (#35127) The test_placement_group_3 use case occasionally fails. I have seen that the reason for the failure is that redis failed to start ("Warning: Could not create server TCP listening socket ::*:49152: bind: Address already in use"). Now for the test case with external redis, when starting redis, add the judgment of whether the redis process starts successfully, and try again if it fails to start. --- python/ray/tests/conftest.py | 76 +++++++++++++++++++++++++++++------- 1 file changed, 61 insertions(+), 15 deletions(-) diff --git a/python/ray/tests/conftest.py b/python/ray/tests/conftest.py index 475b6c8b7ede..3af9928cf784 100644 --- a/python/ray/tests/conftest.py +++ b/python/ray/tests/conftest.py @@ -146,21 +146,42 @@ def get_default_fixture_ray_kwargs(): return ray_kwargs -@contextmanager -def _setup_redis(request): - # Setup external Redis and env var for initialization. - redis_ports = [] - for _ in range(redis_replicas()): - # max port for redis cluster - port = 55536 - while port >= 55535: - with socket.socket() as s: - s.bind(("", 0)) - port = s.getsockname()[1] - print("Picking port", port) - redis_ports.append(port) +def is_process_listen_to_port(pid, port): + retry_num = 10 + interval_time = 0.5 + for _ in range(retry_num): + try: + proc = psutil.Process(pid) + for conn in proc.connections(): + if conn.status == "LISTEN" and conn.laddr.port == port: + return True + except Exception: + pass + finally: + time.sleep(interval_time) + print( + f"Process({pid}) has not listened to port {port} " + + f"for more than {retry_num * interval_time}s." + ) + return False + + +def start_redis(db_dir): + retry_num = 0 + while True: + is_need_restart = False + # Setup external Redis and env var for initialization. + redis_ports = [] + for _ in range(redis_replicas()): + # max port for redis cluster + port = 55536 + while port >= 55535: + with socket.socket() as s: + s.bind(("", 0)) + port = s.getsockname()[1] + print("Picking port", port) + redis_ports.append(port) - with tempfile.TemporaryDirectory() as tmpdirname: processes = [] enable_tls = "RAY_REDIS_CA_CERT" in os.environ leader_port = None @@ -174,12 +195,30 @@ def _setup_redis(request): enable_tls=enable_tls, replica_of=leader_port, leader_id=leader_id, - db_dir=tmpdirname, + db_dir=db_dir, ) if leader_port is None: leader_port = port leader_id = node_id processes.append(proc) + # Check if th redis has started successfully and is listening on the port. + if not is_process_listen_to_port(proc.process.pid, port): + is_need_restart = True + break + + if is_need_restart: + retry_num += 1 + for proc in processes: + proc.process.kill() + + if retry_num > 5: + raise RuntimeError("Failed to start redis after {retry_num} attempts.") + print( + "Retry to start redis because the process failed to " + + f"listen to the port({port}), retry num:{retry_num}." + ) + continue + if redis_replicas() > 1: import redis @@ -189,6 +228,13 @@ def _setup_redis(request): scheme = "rediss://" if enable_tls else "" address_str = f"{scheme}127.0.0.1:{redis_ports[-1]}" + return address_str, processes + + +@contextmanager +def _setup_redis(request): + with tempfile.TemporaryDirectory() as tmpdirname: + address_str, processes = start_redis(tmpdirname) old_addr = os.environ.get("RAY_REDIS_ADDRESS") os.environ["RAY_REDIS_ADDRESS"] = address_str import uuid From 0ce6f71f67e19c545140c782a580c7d3f4ab54e4 Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Wed, 10 May 2023 09:21:01 +0800 Subject: [PATCH 311/424] [core][state] Push down filtering to GCS for listing/getting task from state api (#35109) Re-Revert of #34433 --- dashboard/state_aggregator.py | 8 +- .../ray/experimental/state/state_manager.py | 40 +++++++-- python/ray/tests/test_state_api.py | 12 ++- src/ray/gcs/gcs_server/gcs_task_manager.cc | 40 ++++++--- .../gcs_server/test/gcs_task_manager_test.cc | 84 +++++++++++++++++-- src/ray/protobuf/gcs_service.proto | 21 +++-- 6 files changed, 164 insertions(+), 41 deletions(-) diff --git a/dashboard/state_aggregator.py b/dashboard/state_aggregator.py index e4e38c9f323b..b7cfd20b5c9c 100644 --- a/dashboard/state_aggregator.py +++ b/dashboard/state_aggregator.py @@ -377,16 +377,10 @@ async def list_tasks(self, *, option: ListApiOptions) -> ListApiResponse: {task_id -> task_data_in_dict} task_data_in_dict's schema is in TaskState """ - job_id = None - for filter in option.filters: - if filter[0] == "job_id" and filter[1] == "=": - # Filtering by job_id == xxxx, pass it to source side filtering. - # tuple consists of (job_id, predicate, value) - job_id = filter[2] try: reply = await self._client.get_all_task_info( timeout=option.timeout, - job_id=job_id, + filters=option.filters, exclude_driver=option.exclude_driver, ) except DataSourceUnavailable: diff --git a/python/ray/experimental/state/state_manager.py b/python/ray/experimental/state/state_manager.py index 11ea98b89c4c..19e1fa318e38 100644 --- a/python/ray/experimental/state/state_manager.py +++ b/python/ray/experimental/state/state_manager.py @@ -12,7 +12,7 @@ from ray._private import ray_constants from ray._private.gcs_utils import GcsAioClient from ray._private.utils import hex_to_binary -from ray._raylet import ActorID, JobID +from ray._raylet import ActorID, JobID, TaskID from ray.core.generated import gcs_service_pb2_grpc from ray.core.generated.gcs_pb2 import ActorTableData from ray.core.generated.gcs_service_pb2 import ( @@ -262,16 +262,40 @@ async def get_all_task_info( self, timeout: int = None, limit: int = None, - job_id: Optional[str] = None, - exclude_driver: bool = True, + filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, + exclude_driver: bool = False, ) -> Optional[GetTaskEventsReply]: if not limit: limit = RAY_MAX_LIMIT_FROM_DATA_SOURCE - if job_id: - job_id = JobID(hex_to_binary(job_id)).binary() - request = GetTaskEventsRequest( - limit=limit, exclude_driver=exclude_driver, job_id=job_id - ) + + if filters is None: + filters = [] + + req_filters = GetTaskEventsRequest.Filters() + for filter in filters: + key, predicate, value = filter + if predicate != "=": + # We only support EQUAL predicate for source side filtering. + continue + + if key == "actor_id": + req_filters.actor_id = ActorID(hex_to_binary(value)).binary() + elif key == "job_id": + req_filters.job_id = JobID(hex_to_binary(value)).binary() + elif key == "name": + req_filters.name = value + elif key == "task_id": + req_filters.task_ids.append(TaskID(hex_to_binary(value)).binary()) + else: + continue + + # Remove the filter from the list so that we don't have to + # filter it again later. + filters.remove(filter) + + req_filters.exclude_driver = exclude_driver + + request = GetTaskEventsRequest(limit=limit, filters=req_filters) reply = await self._gcs_task_info_stub.GetTaskEvents(request, timeout=timeout) return reply diff --git a/python/ray/tests/test_state_api.py b/python/ray/tests/test_state_api.py index 64882421c856..c9b70b1d9ce0 100644 --- a/python/ray/tests/test_state_api.py +++ b/python/ray/tests/test_state_api.py @@ -915,7 +915,7 @@ async def test_api_manager_list_tasks(state_api_manager): ] result = await state_api_manager.list_tasks(option=create_api_options()) data_source_client.get_all_task_info.assert_any_await( - timeout=DEFAULT_RPC_TIMEOUT, job_id=None, exclude_driver=True + timeout=DEFAULT_RPC_TIMEOUT, filters=[], exclude_driver=True ) data = result.result data = data @@ -2282,7 +2282,7 @@ def g(dep): def impossible(): pass - out = [f.remote() for _ in range(2)] # noqa + out = [f.options(name=f"f_{i}").remote() for i in range(2)] # noqa g_out = g.remote(f.remote()) # noqa im = impossible.remote() # noqa @@ -2350,6 +2350,9 @@ def verify(): for task in tasks: assert task["job_id"] == job_id + tasks = list_tasks(filters=[("name", "=", "f_0")]) + assert len(tasks) == 1 + return True wait_for_condition(verify) @@ -2540,7 +2543,6 @@ def verify(): for task in tasks: assert task["job_id"] == job_id for task in tasks: - print(task) assert task["actor_id"] == actor_id # Actor.__init__: 1 finished # Actor.call: 1 running, 9 waiting for execution (queued). @@ -2590,6 +2592,10 @@ def verify(): == 1 ) + # Filters with actor id. + assert len(list_tasks(filters=[("actor_id", "=", actor_id)])) == 11 + assert len(list_tasks(filters=[("actor_id", "!=", actor_id)])) == 0 + return True wait_for_condition(verify) diff --git a/src/ray/gcs/gcs_server/gcs_task_manager.cc b/src/ray/gcs/gcs_server/gcs_task_manager.cc index 6771e042bb24..e733856b8ee5 100644 --- a/src/ray/gcs/gcs_server/gcs_task_manager.cc +++ b/src/ray/gcs/gcs_server/gcs_task_manager.cc @@ -313,16 +313,17 @@ void GcsTaskManager::HandleGetTaskEvents(rpc::GetTaskEventsRequest request, rpc::SendReplyCallback send_reply_callback) { RAY_LOG(DEBUG) << "Getting task status:" << request.ShortDebugString(); - // Select candidate events by indexing. + // Select candidate events by indexing if possible. std::vector task_events; - if (request.has_task_ids()) { + const auto &filters = request.filters(); + if (filters.task_ids_size() > 0) { absl::flat_hash_set task_ids; - for (const auto &task_id_str : request.task_ids().vals()) { + for (const auto &task_id_str : filters.task_ids()) { task_ids.insert(TaskID::FromBinary(task_id_str)); } task_events = task_event_storage_->GetTaskEvents(task_ids); - } else if (request.has_job_id()) { - task_events = task_event_storage_->GetTaskEvents(JobID::FromBinary(request.job_id())); + } else if (filters.has_job_id()) { + task_events = task_event_storage_->GetTaskEvents(JobID::FromBinary(filters.job_id())); } else { task_events = task_event_storage_->GetTaskEvents(); } @@ -334,15 +335,34 @@ void GcsTaskManager::HandleGetTaskEvents(rpc::GetTaskEventsRequest request, int32_t num_profile_event_limit = 0; int32_t num_status_event_limit = 0; - for (auto itr = task_events.rbegin(); itr != task_events.rend(); ++itr) { - auto &task_event = *itr; + // A lambda filter fn, where it returns true for task events to be included in the + // result. Task ids and job ids are already filtered by the storage with indexing above. + auto filter_fn = [&filters](const rpc::TaskEvents &task_event) { if (!task_event.has_task_info()) { // Skip task events w/o task info. - continue; + return false; } - - if (request.exclude_driver() && + if (filters.exclude_driver() && task_event.task_info().type() == rpc::TaskType::DRIVER_TASK) { + return false; + } + + if (filters.has_actor_id() && task_event.task_info().has_actor_id() && + ActorID::FromBinary(task_event.task_info().actor_id()) != + ActorID::FromBinary(filters.actor_id())) { + return false; + } + + if (filters.has_name() && task_event.task_info().name() != filters.name()) { + return false; + } + + return true; + }; + + for (auto itr = task_events.rbegin(); itr != task_events.rend(); ++itr) { + auto &task_event = *itr; + if (!filter_fn(task_event)) { continue; } diff --git a/src/ray/gcs/gcs_server/test/gcs_task_manager_test.cc b/src/ray/gcs/gcs_server/test/gcs_task_manager_test.cc index 91070fe1cf35..d60ea97f100f 100644 --- a/src/ray/gcs/gcs_server/test/gcs_task_manager_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_task_manager_test.cc @@ -115,26 +115,36 @@ class GcsTaskManagerTest : public ::testing::Test { rpc::GetTaskEventsReply SyncGetTaskEvents(absl::flat_hash_set task_ids, absl::optional job_id = absl::nullopt, int64_t limit = -1, - bool exclude_driver = true) { + bool exclude_driver = true, + const std::string &name = "", + const ActorID &actor_id = ActorID::Nil()) { rpc::GetTaskEventsRequest request; rpc::GetTaskEventsReply reply; std::promise promise; if (!task_ids.empty()) { for (const auto &task_id : task_ids) { - request.mutable_task_ids()->add_vals(task_id.Binary()); + request.mutable_filters()->add_task_ids(task_id.Binary()); } } + if (!name.empty()) { + request.mutable_filters()->set_name(name); + } + + if (!actor_id.IsNil()) { + request.mutable_filters()->set_actor_id(actor_id.Binary()); + } + if (job_id) { - request.set_job_id(job_id->Binary()); + request.mutable_filters()->set_job_id(job_id->Binary()); } if (limit >= 0) { request.set_limit(limit); } - request.set_exclude_driver(exclude_driver); + request.mutable_filters()->set_exclude_driver(exclude_driver); task_manager->GetIoContext().dispatch( [this, &promise, &request, &reply]() { task_manager->HandleGetTaskEvents( @@ -155,11 +165,15 @@ class GcsTaskManagerTest : public ::testing::Test { static rpc::TaskInfoEntry GenTaskInfo( JobID job_id, TaskID parent_task_id = TaskID::Nil(), - rpc::TaskType task_type = rpc::TaskType::NORMAL_TASK) { + rpc::TaskType task_type = rpc::TaskType::NORMAL_TASK, + const ActorID actor_id = ActorID::Nil(), + const std::string name = "") { rpc::TaskInfoEntry task_info; task_info.set_job_id(job_id.Binary()); task_info.set_parent_task_id(parent_task_id.Binary()); task_info.set_type(task_type); + task_info.set_actor_id(actor_id.Binary()); + task_info.set_name(name); return task_info; } @@ -490,6 +504,66 @@ TEST_F(GcsTaskManagerTest, TestGetTaskEventsByJob) { reply_job2.mutable_events_by_task()); } +TEST_F(GcsTaskManagerTest, TestGetTaskEventsFilters) { + // Generate task events + + // A task event with actor id + ActorID actor_id = ActorID::Of(JobID::FromInt(1), TaskID::Nil(), 1); + { + auto task_ids = GenTaskIDs(1); + auto task_info_actor_id = + GenTaskInfo(JobID::FromInt(1), TaskID::Nil(), rpc::ACTOR_TASK, actor_id); + auto events = GenTaskEvents(task_ids, + /* attempt_number */ + 0, + /* job_id */ 1, + absl::nullopt, + absl::nullopt, + task_info_actor_id); + auto data = Mocker::GenTaskEventsData(events); + SyncAddTaskEventData(data); + } + + // A task event with name. + { + auto task_ids = GenTaskIDs(1); + auto task_info_name = GenTaskInfo( + JobID::FromInt(1), TaskID::Nil(), rpc::NORMAL_TASK, ActorID::Nil(), "task_name"); + auto events = GenTaskEvents(task_ids, + /* attempt_number */ + 0, + /* job_id */ 1, + absl::nullopt, + absl::nullopt, + task_info_name); + auto data = Mocker::GenTaskEventsData(events); + SyncAddTaskEventData(data); + } + + auto reply_name = SyncGetTaskEvents({}, + /* job_id */ absl::nullopt, + /* limit */ -1, + /* exclude_driver */ false, + "task_name"); + EXPECT_EQ(reply_name.events_by_task_size(), 1); + + auto reply_actor_id = SyncGetTaskEvents({}, + /* job_id */ absl::nullopt, + /* limit */ -1, + /* exclude_driver */ false, + /* name */ "", + actor_id); + EXPECT_EQ(reply_name.events_by_task_size(), 1); + + auto reply_both_and = SyncGetTaskEvents({}, + /* job_id */ absl::nullopt, + /* limit */ -1, + /* exclude_driver */ false, + "task_name", + actor_id); + EXPECT_EQ(reply_both_and.events_by_task_size(), 0); +} + TEST_F(GcsTaskManagerTest, TestMarkTaskAttemptFailedIfNeeded) { auto tasks = GenTaskIDs(3); auto tasks_running = tasks[0]; diff --git a/src/ray/protobuf/gcs_service.proto b/src/ray/protobuf/gcs_service.proto index 38280e48d3f6..7bc382bc0842 100644 --- a/src/ray/protobuf/gcs_service.proto +++ b/src/ray/protobuf/gcs_service.proto @@ -644,22 +644,27 @@ message AddTaskEventDataReply { } message GetTaskEventsRequest { - message TaskIDs { - repeated string vals = 1; - } - oneof select_by { + // Filter object where predicates are AND together. + message Filters { // Get task events from a job. - string job_id = 1; + optional bytes job_id = 1; // Get task events from a set of tasks. - TaskIDs task_ids = 2; + repeated bytes task_ids = 2; + // Get the task events with an actor id. + optional bytes actor_id = 3; + // Get the task events of task with names. + optional string name = 4; + // True if task events from driver (only profiling events) should be excluded. + optional bool exclude_driver = 5; } // Maximum number of TaskEvents to return. // If set, the exact `limit` TaskEvents returned do not have any ordering or selection // guarantee. optional int64 limit = 3; - // True if task events from driver (only profiling events) should be excluded. - bool exclude_driver = 4; + + // Filters to apply to the get query. + optional Filters filters = 4; } message GetTaskEventsReply { From deff07284a479b78ec2ab0e6368296229e896f26 Mon Sep 17 00:00:00 2001 From: Larry <554538252@qq.com> Date: Wed, 10 May 2023 09:31:32 +0800 Subject: [PATCH 312/424] [Core] Add bundles_to_node_id info in placement_group_table (#35122) Now there is node_id information corresponding to each bundles in gcs_utils.PlacementGroupTableData. But there is no node_id information corresponding to bundles in ray.util.placement_group_table() interface in python. Now add a "bundles_to_node_id" field in the returned result of the ray.util.placement_group_table() interface To ensure compatibility. A "bundles_to_node_id" field is added. --- python/ray/_private/state.py | 4 ++++ python/ray/tests/test_placement_group_2.py | 18 +++++++++++++++++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/python/ray/_private/state.py b/python/ray/_private/state.py index c2d1fc594f17..45aa57d8b601 100644 --- a/python/ray/_private/state.py +++ b/python/ray/_private/state.py @@ -329,6 +329,10 @@ def get_strategy(strategy): bundle.bundle_id.bundle_index: MessageToDict(bundle)["unitResources"] for bundle in placement_group_info.bundles }, + "bundles_to_node_id": { + bundle.bundle_id.bundle_index: binary_to_hex(bundle.node_id) + for bundle in placement_group_info.bundles + }, "strategy": get_strategy(placement_group_info.strategy), "state": get_state(placement_group_info.state), "stats": { diff --git a/python/ray/tests/test_placement_group_2.py b/python/ray/tests/test_placement_group_2.py index 4356090bc0c4..c9b367e429f9 100644 --- a/python/ray/tests/test_placement_group_2.py +++ b/python/ray/tests/test_placement_group_2.py @@ -89,6 +89,9 @@ def test_pending_placement_group_wait(ray_start_cluster, connect_to_client): assert len(ready) == 0 table = ray.util.placement_group_table(placement_group) assert table["state"] == "PENDING" + for i in range(3): + assert len(table["bundles_to_node_id"][i]) == 0 + with pytest.raises(ray.exceptions.GetTimeoutError): ray.get(placement_group.ready(), timeout=0.1) @@ -115,11 +118,24 @@ def test_placement_group_wait(ray_start_cluster, connect_to_client): assert len(ready) == 1 table = ray.util.placement_group_table(placement_group) assert table["state"] == "CREATED" - pg = ray.get(placement_group.ready()) assert pg.bundle_specs == placement_group.bundle_specs assert pg.id.binary() == placement_group.id.binary() + @ray.remote + def get_node_id(): + return ray.get_runtime_context().get_node_id() + + for i in range(2): + scheduling_strategy = PlacementGroupSchedulingStrategy( + placement_group=placement_group, + placement_group_bundle_index=i, + ) + node_id = ray.get( + get_node_id.options(scheduling_strategy=scheduling_strategy).remote() + ) + assert node_id == table["bundles_to_node_id"][i] + @pytest.mark.parametrize("connect_to_client", [False, True]) def test_schedule_placement_group_when_node_add(ray_start_cluster, connect_to_client): From f934d1ad8bddd62c7c1429dd92c4f60d6b97c636 Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Tue, 9 May 2023 18:33:50 -0700 Subject: [PATCH 313/424] Downgrade hermetic python to 3.8 (#35198) And build dependencies with 3.7. Signed-off-by: Lonnie Liu --- WORKSPACE | 6 +-- release/BUILD | 2 +- release/requirements_buildkite.txt | 74 ++++++++++++++++++++++++++---- 3 files changed, 70 insertions(+), 12 deletions(-) diff --git a/WORKSPACE b/WORKSPACE index eb6aeba907e3..06b6bd03ee0f 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -41,12 +41,12 @@ http_archive( load("@rules_python//python:repositories.bzl", "python_register_toolchains") python_register_toolchains( - name = "python3_9", - python_version = "3.9", + name = "python3_8", + python_version = "3.8", register_toolchains = False, ) -load("@python3_9//:defs.bzl", bk_python = "interpreter") +load("@python3_8//:defs.bzl", bk_python = "interpreter") load("@rules_python//python/pip_install:repositories.bzl", "pip_install_dependencies") pip_install_dependencies() diff --git a/release/BUILD b/release/BUILD index b15f22324acb..5afadec6406e 100644 --- a/release/BUILD +++ b/release/BUILD @@ -1,7 +1,7 @@ load("@rules_python//python:defs.bzl", "py_library", "py_runtime", "py_runtime_pair", "py_test") load("@rules_python//python:pip.bzl", "compile_pip_requirements") load("@py_deps_buildkite//:requirements.bzl", bk_require = "requirement") -load("@python3_9//:defs.bzl", bk_python = "interpreter") +load("@python3_8//:defs.bzl", bk_python = "interpreter") compile_pip_requirements( name = "requirements_buildkite", diff --git a/release/requirements_buildkite.txt b/release/requirements_buildkite.txt index a840aeb645d4..5ed50766f501 100644 --- a/release/requirements_buildkite.txt +++ b/release/requirements_buildkite.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with python 3.9 +# This file is autogenerated by pip-compile with python 3.7 # To update, run: # # bazel run //release:requirements_buildkite.update @@ -131,24 +131,48 @@ async-timeout==4.0.2 \ --hash=sha256:2163e1640ddb52b7a8c80d0a67a08587e5d245cc9c553a74a847056bc2976b15 \ --hash=sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c # via aiohttp +asynctest==0.13.0 \ + --hash=sha256:5da6118a7e6d6b54d83a8f7197769d046922a44d2a99c21382f0a6e4fadae676 \ + --hash=sha256:c27862842d15d83e6a34eb0b2866c323880eb3a75e4485b079ea11748fd77fac + # via aiohttp attrs==23.1.0 \ --hash=sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04 \ --hash=sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015 # via # aiohttp # jsonschema +backports-zoneinfo==0.2.1 \ + --hash=sha256:17746bd546106fa389c51dbea67c8b7c8f0d14b5526a579ca6ccf5ed72c526cf \ + --hash=sha256:1b13e654a55cd45672cb54ed12148cd33628f672548f373963b0bff67b217328 \ + --hash=sha256:1c5742112073a563c81f786e77514969acb58649bcdf6cdf0b4ed31a348d4546 \ + --hash=sha256:4a0f800587060bf8880f954dbef70de6c11bbe59c673c3d818921f042f9954a6 \ + --hash=sha256:5c144945a7752ca544b4b78c8c41544cdfaf9786f25fe5ffb10e838e19a27570 \ + --hash=sha256:7b0a64cda4145548fed9efc10322770f929b944ce5cee6c0dfe0c87bf4c0c8c9 \ + --hash=sha256:8439c030a11780786a2002261569bdf362264f605dfa4d65090b64b05c9f79a7 \ + --hash=sha256:8961c0f32cd0336fb8e8ead11a1f8cd99ec07145ec2931122faaac1c8f7fd987 \ + --hash=sha256:89a48c0d158a3cc3f654da4c2de1ceba85263fafb861b98b59040a5086259722 \ + --hash=sha256:a76b38c52400b762e48131494ba26be363491ac4f9a04c1b7e92483d169f6582 \ + --hash=sha256:da6013fd84a690242c310d77ddb8441a559e9cb3d3d59ebac9aca1a57b2e18bc \ + --hash=sha256:e55b384612d93be96506932a786bbcde5a2db7a9e6a4bb4bffe8b733f5b9036b \ + --hash=sha256:e81b76cace8eda1fca50e345242ba977f9be6ae3945af8d46326d776b4cf78d1 \ + --hash=sha256:e8236383a20872c0cdf5a62b554b27538db7fa1bbec52429d8d106effbaeca08 \ + --hash=sha256:f04e857b59d9d1ccc39ce2da1021d196e47234873820cbeaad210724b1ee28ac \ + --hash=sha256:fadbfe37f74051d024037f223b8e001611eac868b5c5b06144ef4d8b799862f2 + # via + # pytz-deprecation-shim + # tzlocal bazel-runfiles==0.21.0 \ --hash=sha256:3e430dd9a5aba90a90bc2493fdcfce02a3ece47fb574db0f4ac898261e6b068d # via -r release/requirements_buildkite.in -boto3==1.26.130 \ - --hash=sha256:3ae2b34921bb08a1d7ce52db9ec1a25159fca779648e596ede37e1049ed77de8 \ - --hash=sha256:d6f9c6ebf417260ea5fa7a227e7bce9451f1f5010be05ac4075596356f584455 +boto3==1.26.131 \ + --hash=sha256:061d3270472b9be09901bb08a45e9871ac8f86a9b1c9c615535ca0223acd7582 \ + --hash=sha256:5b2b13d9f3430e3d5e768bf32097d5d6d16f47a4719f2656de67da49dd3e4de1 # via # -r release/requirements_buildkite.in # anyscale -botocore==1.29.130 \ - --hash=sha256:3a31293b84ecfe5f5f2c4b7dc85a77d7b890b468a376b593fde15cacc76862dd \ - --hash=sha256:56d1f54c3f8e140f965e5300d1cc5b565cb758134d9213fb05e91e1bb160330e +botocore==1.29.131 \ + --hash=sha256:d0dea23bccdfd7c2f6d0cd3216cfbd7065bc3e9e7b1ef6fee0952b04f5d2cffd \ + --hash=sha256:ffbd85915b2624c545438a33c2624a809593720a10648f6e757fe50be4893188 # via # anyscale # boto3 @@ -541,6 +565,20 @@ idna==3.4 \ # via # requests # yarl +importlib-metadata==6.6.0 \ + --hash=sha256:43dd286a2cd8995d5eaef7fee2066340423b818ed3fd70adf0bad5f1fac53fed \ + --hash=sha256:92501cdf9cc66ebd3e612f1b4f0c0765dfa42f0fa38ffb319b6bd84dd675d705 + # via + # attrs + # click + # humanize + # jsonschema + # pluggy + # pytest +importlib-resources==5.12.0 \ + --hash=sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6 \ + --hash=sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a + # via jsonschema iniconfig==2.0.0 \ --hash=sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3 \ --hash=sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374 @@ -723,6 +761,10 @@ pathspec==0.11.1 \ --hash=sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687 \ --hash=sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293 # via anyscale +pkgutil-resolve-name==1.3.10 \ + --hash=sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174 \ + --hash=sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e + # via jsonschema pluggy==1.0.0 \ --hash=sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159 \ --hash=sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3 @@ -967,7 +1009,17 @@ tqdm==4.65.0 \ typing-extensions==4.5.0 \ --hash=sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb \ --hash=sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4 - # via pydantic + # via + # aiohttp + # argon2-cffi + # async-timeout + # gitpython + # importlib-metadata + # jsonschema + # markdown-it-py + # pydantic + # rich + # yarl tzdata==2023.3 \ --hash=sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a \ --hash=sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda @@ -1136,3 +1188,9 @@ yarl==1.9.2 \ --hash=sha256:f4e2d08f07a3d7d3e12549052eb5ad3eab1c349c53ac51c209a0e5991bbada78 \ --hash=sha256:f7a3d8146575e08c29ed1cd287068e6d02f1c7bdff8970db96683b9591b86ee7 # via aiohttp +zipp==3.15.0 \ + --hash=sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b \ + --hash=sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556 + # via + # importlib-metadata + # importlib-resources From df585d8469aa97de8d251fb2afa86711d7462d62 Mon Sep 17 00:00:00 2001 From: Larry <554538252@qq.com> Date: Wed, 10 May 2023 09:51:21 +0800 Subject: [PATCH 314/424] [Core] Put pg state to kv store when pg rescheduling (resubmit) (#34948) This PR may have caused flakey failures in the test case 'test_placement_group_3', so it was rolled back. This is a resubmitted PR If it is confirmed that the issue was caused by this PR, then I will make the necessary modifications to address the problem --- python/ray/_private/state.py | 2 + .../tests/test_placement_group_failover.py | 71 ++++++++++++++++++- .../gcs_server/gcs_placement_group_manager.cc | 11 ++- .../test/gcs_placement_group_manager_test.cc | 2 + 4 files changed, 80 insertions(+), 6 deletions(-) diff --git a/python/ray/_private/state.py b/python/ray/_private/state.py index 45aa57d8b601..bf4bb4097d4f 100644 --- a/python/ray/_private/state.py +++ b/python/ray/_private/state.py @@ -301,6 +301,8 @@ def get_state(state): return "PENDING" elif state == gcs_utils.PlacementGroupTableData.CREATED: return "CREATED" + elif state == gcs_utils.PlacementGroupTableData.RESCHEDULING: + return "RESCHEDULING" else: return "REMOVED" diff --git a/python/ray/tests/test_placement_group_failover.py b/python/ray/tests/test_placement_group_failover.py index 3bbe88536443..b8a7841eec48 100755 --- a/python/ray/tests/test_placement_group_failover.py +++ b/python/ray/tests/test_placement_group_failover.py @@ -2,9 +2,7 @@ import sys import ray import ray.cluster_utils -from ray._private.test_utils import ( - get_other_nodes, -) +from ray._private.test_utils import get_other_nodes, wait_for_condition MB = 1024 * 1024 @@ -58,5 +56,72 @@ def test_placement_group_failover_when_two_nodes_die(monkeypatch, ray_start_clus ray.get(object_ref, timeout=5) +def test_gcs_restart_when_placement_group_failover( + ray_start_cluster_head_with_external_redis, +): + @ray.remote(num_cpus=1) + class Actor(object): + def __init__(self): + self.n = 0 + + def value(self): + return self.n + + cluster = ray_start_cluster_head_with_external_redis + num_nodes = 3 + nodes = [] + for _ in range(num_nodes - 1): + nodes.append(cluster.add_node(num_cpus=1)) + + # Make sure the placement group is ready. + bundles = [{"CPU": 1, "memory": 100 * MB} for _ in range(num_nodes)] + placement_group = ray.util.placement_group( + name="name", strategy="STRICT_SPREAD", bundles=bundles + ) + assert placement_group.wait(5000) + actors = [] + for i in range(num_nodes): + actor = Actor.options( + placement_group=placement_group, + placement_group_bundle_index=i, + max_restarts=-1, + ).remote() + object_ref = actor.value.remote() + ray.get(object_ref, timeout=5) + actors.append(actor) + + # Simulate a node dead. + other_nodes = get_other_nodes(cluster, exclude_head=True) + cluster.remove_node(other_nodes[0]) + + # Make sure placement group state change to rescheduling. + def _check_pg_whether_be_reschedule(): + table = ray.util.placement_group_table(placement_group) + return table["state"] == "RESCHEDULING" + + wait_for_condition( + _check_pg_whether_be_reschedule, timeout=5, retry_interval_ms=1000 + ) + + # Simulate gcs restart. + cluster.head_node.kill_gcs_server() + cluster.head_node.start_gcs_server() + + cluster.add_node(num_cpus=1) + cluster.wait_for_nodes() + + # Check placement gorup reschedule success after gcs server restart. + def _check_actor_with_pg_is_ready(): + try: + for actor in actors: + object_ref = actor.value.remote() + ray.get(object_ref, timeout=5) + return True + except Exception: + return False + + wait_for_condition(_check_actor_with_pg_is_ready, timeout=5, retry_interval_ms=1000) + + if __name__ == "__main__": sys.exit(pytest.main(["-v", __file__])) diff --git a/src/ray/gcs/gcs_server/gcs_placement_group_manager.cc b/src/ray/gcs/gcs_server/gcs_placement_group_manager.cc index 2851fe41f494..6c222727ee60 100644 --- a/src/ray/gcs/gcs_server/gcs_placement_group_manager.cc +++ b/src/ray/gcs/gcs_server/gcs_placement_group_manager.cc @@ -756,11 +756,13 @@ void GcsPlacementGroupManager::OnNodeDead(const NodeID &node_id) { iter->second->GetMutableStats()->set_scheduling_state( rpc::PlacementGroupStats::QUEUED); AddToPendingQueue(iter->second, 0); + RAY_CHECK_OK(gcs_table_storage_->PlacementGroupTable().Put( + iter->second->GetPlacementGroupID(), + iter->second->GetPlacementGroupTableData(), + [this](Status status) { SchedulePendingPlacementGroups(); })); } } } - - SchedulePendingPlacementGroups(); } void GcsPlacementGroupManager::OnNodeAdd(const NodeID &node_id) { @@ -966,7 +968,10 @@ bool GcsPlacementGroupManager::RescheduleIfStillHasUnplacedBundles( << placement_group->GetPlacementGroupID(); placement_group->UpdateState(rpc::PlacementGroupTableData::RESCHEDULING); AddToPendingQueue(placement_group, 0); - SchedulePendingPlacementGroups(); + RAY_CHECK_OK(gcs_table_storage_->PlacementGroupTable().Put( + placement_group->GetPlacementGroupID(), + placement_group->GetPlacementGroupTableData(), + [this](Status status) { SchedulePendingPlacementGroups(); })); return true; } } diff --git a/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc b/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc index 82d46f13f145..e0cdced97ae6 100644 --- a/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc +++ b/src/ray/gcs/gcs_server/test/gcs_placement_group_manager_test.cc @@ -462,6 +462,7 @@ TEST_F(GcsPlacementGroupManagerTest, TestReschedulingRetry) { placement_group->GetPlacementGroupID(); mock_placement_group_scheduler_->bundles_on_dead_node_.push_back(0); gcs_placement_group_manager_->OnNodeDead(NodeID::FromRandom()); + WaitUntilIoServiceDone(); const auto &bundles = mock_placement_group_scheduler_->placement_groups_[0]->GetBundles(); EXPECT_TRUE(NodeID::FromBinary(bundles[0]->GetMessage().node_id()).IsNil()); @@ -503,6 +504,7 @@ TEST_F(GcsPlacementGroupManagerTest, TestRescheduleWhenNodeDead) { placement_group->GetPlacementGroupID(); mock_placement_group_scheduler_->bundles_on_dead_node_.push_back(0); gcs_placement_group_manager_->OnNodeDead(NodeID::FromRandom()); + WaitUntilIoServiceDone(); ASSERT_EQ(mock_placement_group_scheduler_->placement_groups_[0]->GetPlacementGroupID(), placement_group->GetPlacementGroupID()); const auto &bundles = From fdb79a24d5436f808841fcb22377229a047e45fd Mon Sep 17 00:00:00 2001 From: Alan Guo Date: Tue, 9 May 2023 23:05:33 -0700 Subject: [PATCH 315/424] Add runtime env metadata to jobs detail page. (#34984) Also makes the job detail page work when accessed via the submission id in the path. This will enable future work to link to submission-only jobs. Also fixes bug where the grafana dashboard dropdowns for Deployments and Replicas don't work until after the first request was received for that replica or deployment. --- dashboard/client/src/common/util.ts | 14 +++ dashboard/client/src/common/util.unit.test.ts | 34 ++++++ .../client/src/components/ActorTable.tsx | 12 +- dashboard/client/src/components/TaskTable.tsx | 14 +-- dashboard/client/src/pages/job/JobDetail.tsx | 113 +++++++++--------- .../src/pages/job/JobDetailInfoPage.tsx | 23 +++- .../client/src/pages/job/JobProgressBar.tsx | 2 +- ...rve_deployment_grafana_dashboard_base.json | 8 +- 8 files changed, 147 insertions(+), 73 deletions(-) create mode 100644 dashboard/client/src/common/util.unit.test.ts diff --git a/dashboard/client/src/common/util.ts b/dashboard/client/src/common/util.ts index 42571de261a9..c25914c2a175 100644 --- a/dashboard/client/src/common/util.ts +++ b/dashboard/client/src/common/util.ts @@ -1,3 +1,5 @@ +import _ from "lodash"; + export const getWeightedAverage = ( input: { weight: number; @@ -24,3 +26,15 @@ export const filterObj = (obj: Record, filterFn: any) => export const mapObj = (obj: Record, filterFn: any) => Object.fromEntries(Object.entries(obj).map(filterFn) as any[]); + +export const filterRuntimeEnvSystemVariables = ( + runtime_env: Record, +): Record => { + const out = _.pickBy(runtime_env, (_, key) => { + if (key.startsWith("_")) { + return false; + } + return true; + }); + return out; +}; diff --git a/dashboard/client/src/common/util.unit.test.ts b/dashboard/client/src/common/util.unit.test.ts new file mode 100644 index 000000000000..86c1758d48b6 --- /dev/null +++ b/dashboard/client/src/common/util.unit.test.ts @@ -0,0 +1,34 @@ +import { filterRuntimeEnvSystemVariables } from "./util"; + +describe("filterRuntimeEnvSystemVariables", () => { + it("filters out system variables", () => { + expect( + filterRuntimeEnvSystemVariables({ + pip: { + pip_check: true, + packages: ["chess", "foo", "bar"], + pip_version: "1.2.3", + }, + env_vars: { + FOO: "foo", + BAR: "5", + }, + working_dir: ".", + _ray_release: "2.3.1", + _ray_commit: "12345abc", + _inject_current_ray: false, + }), + ).toEqual({ + pip: { + pip_check: true, + packages: ["chess", "foo", "bar"], + pip_version: "1.2.3", + }, + env_vars: { + FOO: "foo", + BAR: "5", + }, + working_dir: ".", + }); + }); +}); diff --git a/dashboard/client/src/components/ActorTable.tsx b/dashboard/client/src/components/ActorTable.tsx index 2ef4d150bb60..4508888e16af 100644 --- a/dashboard/client/src/components/ActorTable.tsx +++ b/dashboard/client/src/components/ActorTable.tsx @@ -184,8 +184,8 @@ const ActorTable = ({ ), }, { label: "Uptime" }, - { label: "Job Id" }, - { label: "Pid" }, + { label: "Job ID" }, + { label: "PID" }, { label: "IP" }, { label: "Restarted", @@ -196,16 +196,16 @@ const ActorTable = ({ ), }, { - label: "Placement Group Id", + label: "Placement group ID", helpInfo: ( - The id of the placement group this actor is scheduled to. + The ID of the placement group this actor is scheduled to.
    ), }, { - label: "Required Resources", + label: "Required resources", helpInfo: ( The required Ray resources to start an actor. @@ -223,7 +223,7 @@ const ActorTable = ({ ), }, { - label: "Exit Detail", + label: "Exit detail", helpInfo: ( The detail of an actor exit. Only available when an actor is dead. diff --git a/dashboard/client/src/components/TaskTable.tsx b/dashboard/client/src/components/TaskTable.tsx index 868e79732518..a5343b1be211 100644 --- a/dashboard/client/src/components/TaskTable.tsx +++ b/dashboard/client/src/components/TaskTable.tsx @@ -60,7 +60,7 @@ const TaskTable = ({ const columns = [ { label: "ID" }, { label: "Name" }, - { label: "Job Id" }, + { label: "Job ID" }, { label: "State" }, { label: "Actions", @@ -77,13 +77,13 @@ const TaskTable = ({ ), }, { label: "Duration" }, - { label: "Function or Class Name" }, - { label: "Node Id" }, - { label: "Actor_id" }, - { label: "Worker_id" }, + { label: "Function or class name" }, + { label: "Node ID" }, + { label: "Actor ID" }, + { label: "Worker ID" }, { label: "Type" }, - { label: "Placement Group Id" }, - { label: "Required Resources" }, + { label: "Placement group ID" }, + { label: "Required resources" }, ]; return ( diff --git a/dashboard/client/src/pages/job/JobDetail.tsx b/dashboard/client/src/pages/job/JobDetail.tsx index 1861158bd2e9..8bca84de8409 100644 --- a/dashboard/client/src/pages/job/JobDetail.tsx +++ b/dashboard/client/src/pages/job/JobDetail.tsx @@ -42,7 +42,6 @@ const useStyle = makeStyles((theme) => ({ export const JobDetailChartsPage = () => { const classes = useStyle(); const { job, msg, isLoading, params } = useJobDetail(); - const jobId = params.id; const [taskListFilter, setTaskListFilter] = useState(); const [taskTableExpanded, setTaskTableExpanded] = useState(false); @@ -162,7 +161,7 @@ export const JobDetailChartsPage = () => { >
    @@ -181,15 +180,17 @@ export const JobDetailChartsPage = () => { )} - -
    - -
    -
    + {job.job_id && ( + +
    + +
    +
    + )} { - { - setTaskTableExpanded(!taskTableExpanded); - }} - className={classes.section} - > -
    - -
    -
    + {job.job_id && ( + + { + setTaskTableExpanded(!taskTableExpanded); + }} + className={classes.section} + > +
    + +
    +
    - { - setActorTableExpanded(!actorTableExpanded); - }} - className={classes.section} - > -
    - -
    -
    + { + setActorTableExpanded(!actorTableExpanded); + }} + className={classes.section} + > +
    + +
    +
    - -
    - -
    -
    + +
    + +
    +
    +
    + )}
    ); }; diff --git a/dashboard/client/src/pages/job/JobDetailInfoPage.tsx b/dashboard/client/src/pages/job/JobDetailInfoPage.tsx index a732c0664576..9cdf851c6895 100644 --- a/dashboard/client/src/pages/job/JobDetailInfoPage.tsx +++ b/dashboard/client/src/pages/job/JobDetailInfoPage.tsx @@ -1,6 +1,9 @@ import { createStyles, makeStyles, Typography } from "@material-ui/core"; import React from "react"; -import { CodeDialogButtonWithPreview } from "../../common/CodeDialogButton"; +import { + CodeDialogButton, + CodeDialogButtonWithPreview, +} from "../../common/CodeDialogButton"; import { DurationText } from "../../common/DurationText"; import { formatDateFromTimeMs } from "../../common/formatUtils"; import { JobStatusWithIcon } from "../../common/JobStatus"; @@ -8,6 +11,7 @@ import { CpuProfilingLink, CpuStackTraceLink, } from "../../common/ProfilingLink"; +import { filterRuntimeEnvSystemVariables } from "../../common/util"; import Loading from "../../components/Loading"; import { MetadataSection } from "../../components/MetadataSection"; import { StatusChip } from "../../components/StatusChip"; @@ -138,6 +142,23 @@ export const JobMetadataSection = ({ job }: JobMetadataSectionProps) => { value: job.end_time ? formatDateFromTimeMs(job.end_time) : "-", }, }, + { + label: "Runtime environemnt", + ...(job.runtime_env + ? { + content: ( + + ), + } + : { + content: { + value: "-", + }, + }), + }, ...(job.type === "SUBMISSION" ? [ { diff --git a/dashboard/client/src/pages/job/JobProgressBar.tsx b/dashboard/client/src/pages/job/JobProgressBar.tsx index ce175bff2111..48be5e452952 100644 --- a/dashboard/client/src/pages/job/JobProgressBar.tsx +++ b/dashboard/client/src/pages/job/JobProgressBar.tsx @@ -15,7 +15,7 @@ const useStyles = makeStyles((theme) => ({ })); type JobProgressBarProps = { - jobId: string; + jobId: string | undefined; job: Pick; } & Pick; diff --git a/dashboard/modules/metrics/dashboards/serve_deployment_grafana_dashboard_base.json b/dashboard/modules/metrics/dashboards/serve_deployment_grafana_dashboard_base.json index af9c611e8867..ed72492a58da 100644 --- a/dashboard/modules/metrics/dashboards/serve_deployment_grafana_dashboard_base.json +++ b/dashboard/modules/metrics/dashboards/serve_deployment_grafana_dashboard_base.json @@ -36,7 +36,7 @@ ] }, "datasource": "Prometheus", - "definition": "label_values(ray_serve_deployment_request_counter{{{global_filters}}}, deployment)", + "definition": "label_values(ray_serve_deployment_replica_healthy{{{global_filters}}}, deployment)", "description": null, "error": null, "hide": 0, @@ -46,7 +46,7 @@ "name": "Deployment", "options": [], "query": { - "query": "label_values(ray_serve_deployment_request_counter{{{global_filters}}}, deployment)", + "query": "label_values(ray_serve_deployment_replica_healthy{{{global_filters}}}, deployment)", "refId": "Prometheus-Instance-Variable-Query" }, "refresh": 2, @@ -71,7 +71,7 @@ ] }, "datasource": "Prometheus", - "definition": "label_values(ray_serve_deployment_request_counter{{deployment=~\"$Deployment\",{global_filters}}}, replica)", + "definition": "label_values(ray_serve_deployment_replica_healthy{{deployment=~\"$Deployment\",{global_filters}}}, replica)", "description": null, "error": null, "hide": 0, @@ -81,7 +81,7 @@ "name": "Replica", "options": [], "query": { - "query": "label_values(ray_serve_deployment_request_counter{{deployment=~\"$Deployment\",{global_filters}}}, replica)", + "query": "label_values(ray_serve_deployment_replica_healthy{{deployment=~\"$Deployment\",{global_filters}}}, replica)", "refId": "Prometheus-Instance-Variable-Query" }, "refresh": 2, From 63eab90f7e5f5fec99441732be67660db9212978 Mon Sep 17 00:00:00 2001 From: Sven Mika Date: Wed, 10 May 2023 10:05:51 +0200 Subject: [PATCH 316/424] [RLlib] Unity3D adapter: Disable env pre-checking (agent IDs not known before connection to Unity editor). (#35167) --- rllib/env/wrappers/unity3d_env.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/rllib/env/wrappers/unity3d_env.py b/rllib/env/wrappers/unity3d_env.py index a93dbd9191c6..28a0e6ebcdee 100644 --- a/rllib/env/wrappers/unity3d_env.py +++ b/rllib/env/wrappers/unity3d_env.py @@ -64,6 +64,9 @@ def __init__( Note: The game itself may contain its own episode length limits, which are always obeyed (on top of this value here). """ + # Skip env checking as the nature of the agent IDs depends on the game + # running in the connected Unity editor. + self._skip_env_checking = True super().__init__() From 45d9c409165798cbd1250c9c3b5b1ce11fede52e Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Wed, 10 May 2023 10:11:31 +0200 Subject: [PATCH 317/424] [docs] update batch guide link, fix tensor ref (#35171) Direct users to the new batch inference guide. Found a broken reference while doing so. Signed-off-by: Max Pumperla --- doc/source/data/data.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/source/data/data.rst b/doc/source/data/data.rst index 1fd599e9b2f9..adf2569f79ac 100644 --- a/doc/source/data/data.rst +++ b/doc/source/data/data.rst @@ -29,8 +29,8 @@ Streaming Batch Inference ------------------------- Ray Data simplifies general purpose parallel GPU and CPU compute in Ray through its -powerful :ref:`Dataset ` primitive. Datasets enable workloads such as -:ref:`GPU batch inference ` to run efficiently on large datasets, +powerful :ref:`Datastream ` primitive. Datastreams enable workloads such as +:doc:`GPU batch inference ` to run efficiently on large datasets, maximizing resource utilization by keeping the working data fitting into Ray object store memory. .. image:: images/stream-example.png @@ -117,7 +117,7 @@ Advanced users can refer directly to the Ray Data :ref:`API reference Learn how to :ref:`load data `, :ref:`save data `, :ref:`transform data `, :ref:`access and exchange data `, or - :ref:`work with tensor data `. + :ref:`work with tensor data `. +++ .. button-ref:: data_user_guide From 12576e7990e2a9349dcb80cf23052c8303563744 Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Wed, 10 May 2023 10:12:24 +0200 Subject: [PATCH 318/424] [docs] synced tabs in AIR getting started (#35170) as discussed here https://docs.google.com/document/d/1fMF-Pt0gzJDhPJpmGQVUmEoUXsPwvSnHnJR058lLm8g/edit?disco=AAAAuOuL7ME&usp_dm=true Signed-off-by: Max Pumperla --- doc/source/ray-air/getting-started.rst | 32 +++++++++++++------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/doc/source/ray-air/getting-started.rst b/doc/source/ray-air/getting-started.rst index 75e94cbd2105..e35f1e7c1192 100644 --- a/doc/source/ray-air/getting-started.rst +++ b/doc/source/ray-air/getting-started.rst @@ -84,23 +84,23 @@ First, let's start by loading a dataset from storage: Then, we define a ``Preprocessor`` pipeline for our task: -.. tab-set:: +.. tabs:: - .. tab-item:: XGBoost + .. group-tab:: XGBoost .. literalinclude:: examples/xgboost_starter.py :language: python :start-after: __air_xgb_preprocess_start__ :end-before: __air_xgb_preprocess_end__ - .. tab-item:: Pytorch + .. group-tab:: Pytorch .. literalinclude:: examples/pytorch_tabular_starter.py :language: python :start-after: __air_pytorch_preprocess_start__ :end-before: __air_pytorch_preprocess_end__ - .. tab-item:: Tensorflow + .. group-tab:: Tensorflow .. literalinclude:: examples/tf_tabular_starter.py :language: python @@ -114,23 +114,23 @@ Training Train a model with a ``Trainer`` with common ML frameworks: -.. tab-set:: +.. tabs:: - .. tab-item:: XGBoost + .. group-tab:: XGBoost .. literalinclude:: examples/xgboost_starter.py :language: python :start-after: __air_xgb_train_start__ :end-before: __air_xgb_train_end__ - .. tab-item:: Pytorch + .. group-tab:: Pytorch .. literalinclude:: examples/pytorch_tabular_starter.py :language: python :start-after: __air_pytorch_train_start__ :end-before: __air_pytorch_train_end__ - .. tab-item:: Tensorflow + .. group-tab:: Tensorflow .. literalinclude:: examples/tf_tabular_starter.py :language: python @@ -144,23 +144,23 @@ Hyperparameter Tuning You can specify a hyperparameter space to search over for each trainer: -.. tab-set:: +.. tabs:: - .. tab-item:: XGBoost + .. group-tab:: XGBoost .. literalinclude:: examples/xgboost_starter.py :language: python :start-after: __air_xgb_tuner_start__ :end-before: __air_xgb_tuner_end__ - .. tab-item:: Pytorch + .. group-tab:: Pytorch .. literalinclude:: examples/pytorch_tabular_starter.py :language: python :start-after: __air_pytorch_tuner_start__ :end-before: __air_pytorch_tuner_end__ - .. tab-item:: Tensorflow + .. group-tab:: Tensorflow .. literalinclude:: examples/tf_tabular_starter.py :language: python @@ -179,23 +179,23 @@ Batch Inference After running the steps in :ref:`Training ` or :ref:`Tuning `, use the trained model for scalable batch prediction with a ``BatchPredictor``. -.. tab-set:: +.. tabs:: - .. tab-item:: XGBoost + .. group-tab:: XGBoost .. literalinclude:: examples/xgboost_starter.py :language: python :start-after: __air_xgb_batchpred_start__ :end-before: __air_xgb_batchpred_end__ - .. tab-item:: Pytorch + .. group-tab:: Pytorch .. literalinclude:: examples/pytorch_tabular_starter.py :language: python :start-after: __air_pytorch_batchpred_start__ :end-before: __air_pytorch_batchpred_end__ - .. tab-item:: Tensorflow + .. group-tab:: Tensorflow .. literalinclude:: examples/tf_tabular_starter.py :language: python From b4d12f79093e81a507e3236f4cc6c3f6074b69ab Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Wed, 10 May 2023 10:13:20 +0200 Subject: [PATCH 319/424] [docs] fixing missing libs in batch-x examples (#35169) currently going through some batch-processing related examples, noticed that we're still missing installation instructions in some of them. Signed-off-by: Max Pumperla --- .../ray-air/examples/batch_forecasting.ipynb | 1128 +++++++++-------- .../ray-air/examples/batch_tuning.ipynb | 23 +- 2 files changed, 595 insertions(+), 556 deletions(-) diff --git a/doc/source/ray-air/examples/batch_forecasting.ipynb b/doc/source/ray-air/examples/batch_forecasting.ipynb index 5b954a770caa..c8fbae69c9a5 100644 --- a/doc/source/ray-air/examples/batch_forecasting.ipynb +++ b/doc/source/ray-air/examples/batch_forecasting.ipynb @@ -53,9 +53,29 @@ "Prerequisite for this notebook: Read the [Key Concepts](tune-60-seconds) page for Ray Tune.\n", "```\n", "\n", - "Let us start by importing a few required libraries, including open-source [Ray](https://github.com/ray-project/ray) itself!" + "First, let's make sure we have all Python packages we need installed." ] }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "!pip install -q \"ray[air]\" scikit-learn prophet statsmodels" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Next, let's import a few required libraries, including open-source [Ray](https://github.com/ray-project/ray) itself!" + ], + "metadata": { + "collapsed": false + } + }, { "cell_type": "code", "execution_count": 1, @@ -708,82 +728,82 @@ "name": "stderr", "output_type": "stream", "text": [ - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m INFO:prophet:Disabling yearly seasonality. Run prophet with yearly_seasonality=True to override this.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n" + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m INFO:prophet:Disabling yearly seasonality. Run prophet with yearly_seasonality=True to override this.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m \n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Initial log joint probability = -24.6903\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 1. Log joint probability = 56.7318. Improved by 81.4221.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 2. Log joint probability = 74.9096. Improved by 18.1778.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 3. Log joint probability = 116.738. Improved by 41.8283.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 4. Log joint probability = 121.485. Improved by 4.74745.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 5. Log joint probability = 123.373. Improved by 1.88806.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 6. Log joint probability = 123.877. Improved by 0.503922.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 7. Log joint probability = 124.063. Improved by 0.185315.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 8. Log joint probability = 124.083. Improved by 0.0205245.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 9. Log joint probability = 124.187. Improved by 0.103934.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 10. Log joint probability = 124.3. Improved by 0.11302.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 11. Log joint probability = 124.316. Improved by 0.0161654.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 12. Log joint probability = 124.375. Improved by 0.0588467.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 13. Log joint probability = 124.406. Improved by 0.0307753.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 14. Log joint probability = 124.414. Improved by 0.00790605.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 15. Log joint probability = 124.421. Improved by 0.00744155.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 16. Log joint probability = 124.428. Improved by 0.00688068.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 17. Log joint probability = 124.444. Improved by 0.0160026.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 18. Log joint probability = 124.45. Improved by 0.00550397.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 19. Log joint probability = 124.45. Improved by 0.000490096.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 20. Log joint probability = 124.45. Improved by 9.73771e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 21. Log joint probability = 124.456. Improved by 0.00539044.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 22. Log joint probability = 124.462. Improved by 0.00667823.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 23. Log joint probability = 124.464. Improved by 0.00138419.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 24. Log joint probability = 124.466. Improved by 0.00192804.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 25. Log joint probability = 124.47. Improved by 0.00406199.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 26. Log joint probability = 124.47. Improved by 0.000535657.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 27. Log joint probability = 124.471. Improved by 0.000549635.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 28. Log joint probability = 124.474. Improved by 0.00299757.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 29. Log joint probability = 124.475. Improved by 0.000802363.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 30. Log joint probability = 124.475. Improved by 0.000302488.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 31. Log joint probability = 124.476. Improved by 0.000657009.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 32. Log joint probability = 124.476. Improved by 5.99847e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 33. Log joint probability = 124.476. Improved by 9.36055e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 34. Log joint probability = 124.476. Improved by 0.000110802.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 35. Log joint probability = 124.476. Improved by 0.000323327.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 36. Log joint probability = 124.476. Improved by 0.000124956.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 37. Log joint probability = 124.476. Improved by 1.69834e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 38. Log joint probability = 124.476. Improved by 2.1557e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 39. Log joint probability = 124.476. Improved by 2.41295e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 40. Log joint probability = 124.476. Improved by 7.22567e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 41. Log joint probability = 124.476. Improved by 4.47652e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 42. Log joint probability = 124.476. Improved by 7.65725e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 43. Log joint probability = 124.476. Improved by 3.42432e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 44. Log joint probability = 124.476. Improved by 3.72182e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 45. Log joint probability = 124.476. Improved by 3.8856e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 46. Log joint probability = 124.476. Improved by 6.05641e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 47. Log joint probability = 124.476. Improved by 9.84136e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 48. Log joint probability = 124.476. Improved by 6.66388e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 49. Log joint probability = 124.476. Improved by 1.34989e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 50. Log joint probability = 124.476. Improved by 7.44078e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 51. Log joint probability = 124.476. Improved by 5.28681e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 52. Log joint probability = 124.476. Improved by 6.72879e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 53. Log joint probability = 124.476. Improved by 3.58152e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 54. Log joint probability = 124.476. Improved by 1.52185e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 55. Log joint probability = 124.476. Improved by 4.81723e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 56. Log joint probability = 124.476. Improved by 6.24187e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 57. Log joint probability = 124.476. Improved by 1.10699e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 58. Log joint probability = 124.476. Improved by 3.56434e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 59. Log joint probability = 124.476. Improved by 7.01115e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 60. Log joint probability = 124.476. Improved by 1.28068e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 61. Log joint probability = 124.476. Improved by 1.27551e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 62. Log joint probability = 124.476. Improved by 1.5548e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 63. Log joint probability = 124.476. Improved by 5.52294e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 64. Log joint probability = 124.476. Improved by 3.71382e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 65. Log joint probability = 124.476. Improved by 2.87695e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=569, ip=172.31.136.199)\u001b[0m Iteration 66. Log joint probability = 124.476. Improved by 8.95623e-09.\n" + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m \n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Initial log joint probability = -24.6903\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 1. Log joint probability = 56.7318. Improved by 81.4221.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 2. Log joint probability = 74.9096. Improved by 18.1778.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 3. Log joint probability = 116.738. Improved by 41.8283.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 4. Log joint probability = 121.485. Improved by 4.74745.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 5. Log joint probability = 123.373. Improved by 1.88806.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 6. Log joint probability = 123.877. Improved by 0.503922.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 7. Log joint probability = 124.063. Improved by 0.185315.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 8. Log joint probability = 124.083. Improved by 0.0205245.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 9. Log joint probability = 124.187. Improved by 0.103934.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 10. Log joint probability = 124.3. Improved by 0.11302.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 11. Log joint probability = 124.316. Improved by 0.0161654.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 12. Log joint probability = 124.375. Improved by 0.0588467.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 13. Log joint probability = 124.406. Improved by 0.0307753.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 14. Log joint probability = 124.414. Improved by 0.00790605.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 15. Log joint probability = 124.421. Improved by 0.00744155.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 16. Log joint probability = 124.428. Improved by 0.00688068.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 17. Log joint probability = 124.444. Improved by 0.0160026.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 18. Log joint probability = 124.45. Improved by 0.00550397.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 19. Log joint probability = 124.45. Improved by 0.000490096.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 20. Log joint probability = 124.45. Improved by 9.73771e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 21. Log joint probability = 124.456. Improved by 0.00539044.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 22. Log joint probability = 124.462. Improved by 0.00667823.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 23. Log joint probability = 124.464. Improved by 0.00138419.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 24. Log joint probability = 124.466. Improved by 0.00192804.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 25. Log joint probability = 124.47. Improved by 0.00406199.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 26. Log joint probability = 124.47. Improved by 0.000535657.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 27. Log joint probability = 124.471. Improved by 0.000549635.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 28. Log joint probability = 124.474. Improved by 0.00299757.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 29. Log joint probability = 124.475. Improved by 0.000802363.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 30. Log joint probability = 124.475. Improved by 0.000302488.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 31. Log joint probability = 124.476. Improved by 0.000657009.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 32. Log joint probability = 124.476. Improved by 5.99847e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 33. Log joint probability = 124.476. Improved by 9.36055e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 34. Log joint probability = 124.476. Improved by 0.000110802.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 35. Log joint probability = 124.476. Improved by 0.000323327.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 36. Log joint probability = 124.476. Improved by 0.000124956.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 37. Log joint probability = 124.476. Improved by 1.69834e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 38. Log joint probability = 124.476. Improved by 2.1557e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 39. Log joint probability = 124.476. Improved by 2.41295e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 40. Log joint probability = 124.476. Improved by 7.22567e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 41. Log joint probability = 124.476. Improved by 4.47652e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 42. Log joint probability = 124.476. Improved by 7.65725e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 43. Log joint probability = 124.476. Improved by 3.42432e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 44. Log joint probability = 124.476. Improved by 3.72182e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 45. Log joint probability = 124.476. Improved by 3.8856e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 46. Log joint probability = 124.476. Improved by 6.05641e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 47. Log joint probability = 124.476. Improved by 9.84136e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 48. Log joint probability = 124.476. Improved by 6.66388e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 49. Log joint probability = 124.476. Improved by 1.34989e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 50. Log joint probability = 124.476. Improved by 7.44078e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 51. Log joint probability = 124.476. Improved by 5.28681e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 52. Log joint probability = 124.476. Improved by 6.72879e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 53. Log joint probability = 124.476. Improved by 3.58152e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 54. Log joint probability = 124.476. Improved by 1.52185e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 55. Log joint probability = 124.476. Improved by 4.81723e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 56. Log joint probability = 124.476. Improved by 6.24187e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 57. Log joint probability = 124.476. Improved by 1.10699e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 58. Log joint probability = 124.476. Improved by 3.56434e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 59. Log joint probability = 124.476. Improved by 7.01115e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 60. Log joint probability = 124.476. Improved by 1.28068e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 61. Log joint probability = 124.476. Improved by 1.27551e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 62. Log joint probability = 124.476. Improved by 1.5548e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 63. Log joint probability = 124.476. Improved by 5.52294e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 64. Log joint probability = 124.476. Improved by 3.71382e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 65. Log joint probability = 124.476. Improved by 2.87695e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=569, ip=172.31.136.199)\u001B[0m Iteration 66. Log joint probability = 124.476. Improved by 8.95623e-09.\n" ] }, { @@ -833,519 +853,519 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m \n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Initial log joint probability = -24.6903\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 1. Log joint probability = 55.3662. Improved by 80.0565.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 2. Log joint probability = 95.8737. Improved by 40.5075.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 3. Log joint probability = 120.379. Improved by 24.5055.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 4. Log joint probability = 122.813. Improved by 2.43399.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 5. Log joint probability = 123.073. Improved by 0.259582.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 6. Log joint probability = 123.074. Improved by 0.00165627.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 7. Log joint probability = 123.112. Improved by 0.0373812.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 8. Log joint probability = 123.133. Improved by 0.0215269.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 9. Log joint probability = 123.216. Improved by 0.0827413.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 10. Log joint probability = 123.274. Improved by 0.0580866.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 11. Log joint probability = 123.275. Improved by 0.000726338.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 12. Log joint probability = 123.287. Improved by 0.0124071.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 13. Log joint probability = 123.354. Improved by 0.0669767.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 14. Log joint probability = 123.532. Improved by 0.177947.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 15. Log joint probability = 123.537. Improved by 0.00465327.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 16. Log joint probability = 123.567. Improved by 0.0304046.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 17. Log joint probability = 123.626. Improved by 0.0586984.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 18. Log joint probability = 123.717. Improved by 0.0906553.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 19. Log joint probability = 123.767. Improved by 0.0503912.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 20. Log joint probability = 123.794. Improved by 0.0270009.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 21. Log joint probability = 123.809. Improved by 0.0150776.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 22. Log joint probability = 123.819. Improved by 0.00949975.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 23. Log joint probability = 123.826. Improved by 0.00746779.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 24. Log joint probability = 123.83. Improved by 0.00414592.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 25. Log joint probability = 123.835. Improved by 0.00493402.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 26. Log joint probability = 123.836. Improved by 0.000572895.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 27. Log joint probability = 123.837. Improved by 0.00107582.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 28. Log joint probability = 123.839. Improved by 0.00219839.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 29. Log joint probability = 123.84. Improved by 0.000507895.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 30. Log joint probability = 123.841. Improved by 0.00153871.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 31. Log joint probability = 123.842. Improved by 0.000513638.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 32. Log joint probability = 123.842. Improved by 0.000147151.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 33. Log joint probability = 123.842. Improved by 0.000274432.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 34. Log joint probability = 123.842. Improved by 0.000105308.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 35. Log joint probability = 123.842. Improved by 0.000105348.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 36. Log joint probability = 123.842. Improved by 8.63243e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 37. Log joint probability = 123.842. Improved by 5.25735e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 38. Log joint probability = 123.842. Improved by 2.12369e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 39. Log joint probability = 123.842. Improved by 9.84594e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 40. Log joint probability = 123.842. Improved by 7.66574e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 41. Log joint probability = 123.842. Improved by 1.93305e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 42. Log joint probability = 123.842. Improved by 6.82331e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 43. Log joint probability = 123.842. Improved by 2.44574e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 44. Log joint probability = 123.842. Improved by 3.12753e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 45. Log joint probability = 123.842. Improved by 5.82608e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 46. Log joint probability = 123.842. Improved by 4.6484e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 47. Log joint probability = 123.842. Improved by 1.3307e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 48. Log joint probability = 123.843. Improved by 2.23967e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 49. Log joint probability = 123.843. Improved by 4.8155e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 50. Log joint probability = 123.843. Improved by 3.33246e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 51. Log joint probability = 123.843. Improved by 2.56905e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 52. Log joint probability = 123.843. Improved by 2.44229e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 53. Log joint probability = 123.843. Improved by 4.22397e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 54. Log joint probability = 123.843. Improved by 9.91746e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 55. Log joint probability = 123.843. Improved by 1.89293e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 56. Log joint probability = 123.843. Improved by 7.36958e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 57. Log joint probability = 123.843. Improved by 1.30557e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 58. Log joint probability = 123.843. Improved by 2.02889e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 59. Log joint probability = 123.843. Improved by 8.04966e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 60. Log joint probability = 123.843. Improved by 8.67718e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 61. Log joint probability = 123.843. Improved by 1.47952e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 62. Log joint probability = 123.843. Improved by 3.63641e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 63. Log joint probability = 123.843. Improved by 2.15615e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 64. Log joint probability = 123.843. Improved by 1.3613e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 65. Log joint probability = 123.843. Improved by 2.43754e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 66. Log joint probability = 123.843. Improved by 3.49743e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 67. Log joint probability = 123.843. Improved by 6.23249e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 68. Log joint probability = 123.843. Improved by 1.42323e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 69. Log joint probability = 123.843. Improved by 2.71484e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 70. Log joint probability = 123.843. Improved by 1.82188e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 71. Log joint probability = 123.843. Improved by 2.51761e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 72. Log joint probability = 123.843. Improved by 1.31146e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 73. Log joint probability = 123.843. Improved by 1.40753e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=799, ip=172.31.136.199)\u001b[0m Iteration 74. Log joint probability = 123.843. Improved by 2.03943e-09.\n" + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m \n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Initial log joint probability = -24.6903\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 1. Log joint probability = 55.3662. Improved by 80.0565.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 2. Log joint probability = 95.8737. Improved by 40.5075.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 3. Log joint probability = 120.379. Improved by 24.5055.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 4. Log joint probability = 122.813. Improved by 2.43399.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 5. Log joint probability = 123.073. Improved by 0.259582.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 6. Log joint probability = 123.074. Improved by 0.00165627.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 7. Log joint probability = 123.112. Improved by 0.0373812.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 8. Log joint probability = 123.133. Improved by 0.0215269.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 9. Log joint probability = 123.216. Improved by 0.0827413.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 10. Log joint probability = 123.274. Improved by 0.0580866.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 11. Log joint probability = 123.275. Improved by 0.000726338.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 12. Log joint probability = 123.287. Improved by 0.0124071.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 13. Log joint probability = 123.354. Improved by 0.0669767.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 14. Log joint probability = 123.532. Improved by 0.177947.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 15. Log joint probability = 123.537. Improved by 0.00465327.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 16. Log joint probability = 123.567. Improved by 0.0304046.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 17. Log joint probability = 123.626. Improved by 0.0586984.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 18. Log joint probability = 123.717. Improved by 0.0906553.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 19. Log joint probability = 123.767. Improved by 0.0503912.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 20. Log joint probability = 123.794. Improved by 0.0270009.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 21. Log joint probability = 123.809. Improved by 0.0150776.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 22. Log joint probability = 123.819. Improved by 0.00949975.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 23. Log joint probability = 123.826. Improved by 0.00746779.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 24. Log joint probability = 123.83. Improved by 0.00414592.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 25. Log joint probability = 123.835. Improved by 0.00493402.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 26. Log joint probability = 123.836. Improved by 0.000572895.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 27. Log joint probability = 123.837. Improved by 0.00107582.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 28. Log joint probability = 123.839. Improved by 0.00219839.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 29. Log joint probability = 123.84. Improved by 0.000507895.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 30. Log joint probability = 123.841. Improved by 0.00153871.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 31. Log joint probability = 123.842. Improved by 0.000513638.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 32. Log joint probability = 123.842. Improved by 0.000147151.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 33. Log joint probability = 123.842. Improved by 0.000274432.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 34. Log joint probability = 123.842. Improved by 0.000105308.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 35. Log joint probability = 123.842. Improved by 0.000105348.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 36. Log joint probability = 123.842. Improved by 8.63243e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 37. Log joint probability = 123.842. Improved by 5.25735e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 38. Log joint probability = 123.842. Improved by 2.12369e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 39. Log joint probability = 123.842. Improved by 9.84594e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 40. Log joint probability = 123.842. Improved by 7.66574e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 41. Log joint probability = 123.842. Improved by 1.93305e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 42. Log joint probability = 123.842. Improved by 6.82331e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 43. Log joint probability = 123.842. Improved by 2.44574e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 44. Log joint probability = 123.842. Improved by 3.12753e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 45. Log joint probability = 123.842. Improved by 5.82608e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 46. Log joint probability = 123.842. Improved by 4.6484e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 47. Log joint probability = 123.842. Improved by 1.3307e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 48. Log joint probability = 123.843. Improved by 2.23967e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 49. Log joint probability = 123.843. Improved by 4.8155e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 50. Log joint probability = 123.843. Improved by 3.33246e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 51. Log joint probability = 123.843. Improved by 2.56905e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 52. Log joint probability = 123.843. Improved by 2.44229e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 53. Log joint probability = 123.843. Improved by 4.22397e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 54. Log joint probability = 123.843. Improved by 9.91746e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 55. Log joint probability = 123.843. Improved by 1.89293e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 56. Log joint probability = 123.843. Improved by 7.36958e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 57. Log joint probability = 123.843. Improved by 1.30557e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 58. Log joint probability = 123.843. Improved by 2.02889e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 59. Log joint probability = 123.843. Improved by 8.04966e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 60. Log joint probability = 123.843. Improved by 8.67718e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 61. Log joint probability = 123.843. Improved by 1.47952e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 62. Log joint probability = 123.843. Improved by 3.63641e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 63. Log joint probability = 123.843. Improved by 2.15615e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 64. Log joint probability = 123.843. Improved by 1.3613e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 65. Log joint probability = 123.843. Improved by 2.43754e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 66. Log joint probability = 123.843. Improved by 3.49743e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 67. Log joint probability = 123.843. Improved by 6.23249e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 68. Log joint probability = 123.843. Improved by 1.42323e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 69. Log joint probability = 123.843. Improved by 2.71484e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 70. Log joint probability = 123.843. Improved by 1.82188e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 71. Log joint probability = 123.843. Improved by 2.51761e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 72. Log joint probability = 123.843. Improved by 1.31146e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 73. Log joint probability = 123.843. Improved by 1.40753e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=799, ip=172.31.136.199)\u001B[0m Iteration 74. Log joint probability = 123.843. Improved by 2.03943e-09.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m \n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Initial log joint probability = -21.7758\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 1. Log joint probability = 41.5159. Improved by 63.2917.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 2. Log joint probability = 68.4175. Improved by 26.9016.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 3. Log joint probability = 88.1348. Improved by 19.7173.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 4. Log joint probability = 88.147. Improved by 0.0121786.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 5. Log joint probability = 88.1524. Improved by 0.00537125.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 6. Log joint probability = 88.1633. Improved by 0.0109589.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 7. Log joint probability = 88.1753. Improved by 0.0119717.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 8. Log joint probability = 88.1783. Improved by 0.00301597.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 9. Log joint probability = 88.2164. Improved by 0.0380849.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 10. Log joint probability = 88.2239. Improved by 0.00749222.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 11. Log joint probability = 88.3633. Improved by 0.139416.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 12. Log joint probability = 88.4154. Improved by 0.0520892.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 13. Log joint probability = 88.4651. Improved by 0.0496986.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 14. Log joint probability = 89.8472. Improved by 1.38208.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 15. Log joint probability = 89.8657. Improved by 0.0185247.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 16. Log joint probability = 89.8732. Improved by 0.00753048.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 17. Log joint probability = 89.9318. Improved by 0.0585562.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 18. Log joint probability = 89.9447. Improved by 0.0129053.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 19. Log joint probability = 89.965. Improved by 0.0202932.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 20. Log joint probability = 90.0397. Improved by 0.0747472.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 21. Log joint probability = 90.0875. Improved by 0.0477876.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 22. Log joint probability = 90.105. Improved by 0.0175359.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 23. Log joint probability = 90.4892. Improved by 0.384151.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 24. Log joint probability = 90.556. Improved by 0.0668293.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 25. Log joint probability = 90.6581. Improved by 0.102125.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 26. Log joint probability = 90.742. Improved by 0.0838101.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 27. Log joint probability = 90.7738. Improved by 0.031868.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 28. Log joint probability = 90.7856. Improved by 0.011803.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 29. Log joint probability = 90.8302. Improved by 0.0445906.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 30. Log joint probability = 90.8852. Improved by 0.0549923.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 31. Log joint probability = 90.9034. Improved by 0.0181786.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 32. Log joint probability = 90.9276. Improved by 0.0241721.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 33. Log joint probability = 90.9412. Improved by 0.0136337.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 34. Log joint probability = 90.9542. Improved by 0.0130142.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 35. Log joint probability = 90.962. Improved by 0.00775981.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 36. Log joint probability = 90.9638. Improved by 0.00186611.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 37. Log joint probability = 90.9718. Improved by 0.00797594.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 38. Log joint probability = 90.976. Improved by 0.0042081.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 39. Log joint probability = 90.9777. Improved by 0.00165647.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 40. Log joint probability = 90.9814. Improved by 0.00370259.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 41. Log joint probability = 90.9839. Improved by 0.00256843.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 42. Log joint probability = 90.9851. Improved by 0.0011523.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 43. Log joint probability = 90.9868. Improved by 0.00170077.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 44. Log joint probability = 90.9874. Improved by 0.000631959.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 45. Log joint probability = 90.9885. Improved by 0.00111174.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 46. Log joint probability = 90.9887. Improved by 0.000172812.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 47. Log joint probability = 90.9897. Improved by 0.000951722.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 48. Log joint probability = 90.9904. Improved by 0.000744776.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 49. Log joint probability = 90.9907. Improved by 0.000334385.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 50. Log joint probability = 90.9911. Improved by 0.000323131.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 51. Log joint probability = 90.9913. Improved by 0.000195932.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 52. Log joint probability = 90.9913. Improved by 7.26249e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 53. Log joint probability = 90.9914. Improved by 9.38402e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 54. Log joint probability = 90.9915. Improved by 0.000104485.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 55. Log joint probability = 90.9915. Improved by 3.9586e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 56. Log joint probability = 90.9916. Improved by 7.77437e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 57. Log joint probability = 90.9916. Improved by 2.79958e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 58. Log joint probability = 90.9917. Improved by 5.30653e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 59. Log joint probability = 90.9918. Improved by 5.32272e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 60. Log joint probability = 90.9918. Improved by 2.72417e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 61. Log joint probability = 90.9919. Improved by 9.20075e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 62. Log joint probability = 90.9919. Improved by 1.97313e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 63. Log joint probability = 90.9919. Improved by 3.52389e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 64. Log joint probability = 90.992. Improved by 4.48494e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 65. Log joint probability = 90.992. Improved by 3.68675e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 66. Log joint probability = 90.992. Improved by 2.02192e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 67. Log joint probability = 90.9921. Improved by 2.05867e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 68. Log joint probability = 90.9921. Improved by 1.60531e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 69. Log joint probability = 90.9921. Improved by 1.09975e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 70. Log joint probability = 90.9921. Improved by 5.48589e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 71. Log joint probability = 90.9921. Improved by 5.17867e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 72. Log joint probability = 90.9921. Improved by 6.19947e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 73. Log joint probability = 90.9921. Improved by 1.90771e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 74. Log joint probability = 90.9921. Improved by 1.96755e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 75. Log joint probability = 90.9921. Improved by 3.14253e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 76. Log joint probability = 90.9922. Improved by 2.00154e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 77. Log joint probability = 90.9922. Improved by 7.38871e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 78. Log joint probability = 90.9922. Improved by 5.2899e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 79. Log joint probability = 90.9922. Improved by 3.05609e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 80. Log joint probability = 90.9922. Improved by 4.27669e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 81. Log joint probability = 90.9922. Improved by 2.5749e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 82. Log joint probability = 90.9922. Improved by 4.80204e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 83. Log joint probability = 90.9922. Improved by 2.77249e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 84. Log joint probability = 90.9922. Improved by 6.44e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 85. Log joint probability = 90.9922. Improved by 5.69327e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 86. Log joint probability = 90.9922. Improved by 6.80163e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 87. Log joint probability = 90.9922. Improved by 1.10273e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 88. Log joint probability = 90.9922. Improved by 3.1814e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 89. Log joint probability = 90.9922. Improved by 1.15471e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 90. Log joint probability = 90.9922. Improved by 2.80645e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 91. Log joint probability = 90.9922. Improved by 1.97469e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 92. Log joint probability = 90.9922. Improved by 3.01754e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 93. Log joint probability = 90.9922. Improved by 5.89157e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 94. Log joint probability = 90.9922. Improved by 4.37725e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 95. Log joint probability = 90.9922. Improved by 2.67717e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 96. Log joint probability = 90.9922. Improved by 3.00174e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 97. Log joint probability = 90.9922. Improved by 4.5588e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 98. Log joint probability = 90.9922. Improved by 1.30664e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 99. Log joint probability = 90.9922. Improved by 2.56521e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 100. Log joint probability = 90.9922. Improved by 1.77492e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 101. Log joint probability = 90.9922. Improved by 1.62366e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 102. Log joint probability = 90.9922. Improved by 1.84507e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 103. Log joint probability = 90.9922. Improved by 9.9194e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 104. Log joint probability = 90.9922. Improved by 6.85e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 105. Log joint probability = 90.9922. Improved by 2.19949e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 106. Log joint probability = 90.9922. Improved by 3.50271e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 107. Log joint probability = 90.9922. Improved by 7.81865e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 108. Log joint probability = 90.9922. Improved by 6.23645e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 109. Log joint probability = 90.9922. Improved by 6.12578e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 110. Log joint probability = 90.9922. Improved by 5.88466e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 111. Log joint probability = 90.9922. Improved by 1.63983e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 112. Log joint probability = 90.9922. Improved by 1.58961e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 113. Log joint probability = 90.9922. Improved by 4.68893e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 114. Log joint probability = 90.9922. Improved by 2.36556e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 115. Log joint probability = 90.9922. Improved by 4.54818e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 116. Log joint probability = 90.9922. Improved by 2.94216e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 117. Log joint probability = 90.9922. Improved by 1.2584e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 118. Log joint probability = 90.9922. Improved by 2.77487e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 119. Log joint probability = 90.9922. Improved by 2.76151e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 120. Log joint probability = 90.9922. Improved by 1.37145e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 121. Log joint probability = 90.9922. Improved by 4.27885e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=867, ip=172.31.136.199)\u001b[0m Iteration 122. Log joint probability = 90.9922. Improved by 7.76434e-09.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m \n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Initial log joint probability = -21.7758\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 1. Log joint probability = 20.1836. Improved by 41.9594.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 2. Log joint probability = 59.1549. Improved by 38.9713.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 3. Log joint probability = 79.9487. Improved by 20.7939.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 4. Log joint probability = 90.4604. Improved by 10.5117.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 5. Log joint probability = 90.7685. Improved by 0.308148.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 6. Log joint probability = 90.8866. Improved by 0.118032.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 7. Log joint probability = 90.9086. Improved by 0.0220841.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 8. Log joint probability = 90.9484. Improved by 0.0397311.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 9. Log joint probability = 90.9681. Improved by 0.0197759.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 10. Log joint probability = 90.9738. Improved by 0.00567126.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 11. Log joint probability = 90.9772. Improved by 0.00338425.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 12. Log joint probability = 90.979. Improved by 0.00180031.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 13. Log joint probability = 90.9909. Improved by 0.0118985.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 14. Log joint probability = 90.9977. Improved by 0.00677184.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 15. Log joint probability = 90.9994. Improved by 0.00176338.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 16. Log joint probability = 90.9998. Improved by 0.000346058.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 17. Log joint probability = 91.0026. Improved by 0.00283502.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 18. Log joint probability = 91.0067. Improved by 0.00404095.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 19. Log joint probability = 91.009. Improved by 0.00230573.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 20. Log joint probability = 91.0097. Improved by 0.000728684.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 21. Log joint probability = 91.0105. Improved by 0.000842848.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 22. Log joint probability = 91.0137. Improved by 0.00315459.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 23. Log joint probability = 91.0144. Improved by 0.000675261.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 24. Log joint probability = 91.015. Improved by 0.000668053.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 25. Log joint probability = 91.0153. Improved by 0.00022664.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 26. Log joint probability = 91.0158. Improved by 0.000553923.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 27. Log joint probability = 91.0169. Improved by 0.00108114.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 28. Log joint probability = 91.0173. Improved by 0.000446418.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 29. Log joint probability = 91.0179. Improved by 0.000535655.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 30. Log joint probability = 91.0188. Improved by 0.000894825.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 31. Log joint probability = 91.0192. Improved by 0.000463639.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 32. Log joint probability = 91.0193. Improved by 5.37241e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 33. Log joint probability = 91.0194. Improved by 0.00012323.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 34. Log joint probability = 91.0196. Improved by 0.000156284.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 35. Log joint probability = 91.0197. Improved by 8.54979e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 36. Log joint probability = 91.02. Improved by 0.000353443.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 37. Log joint probability = 91.0201. Improved by 9.12108e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 38. Log joint probability = 91.0201. Improved by 3.2033e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 39. Log joint probability = 91.0202. Improved by 5.68514e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 40. Log joint probability = 91.0203. Improved by 7.33769e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 41. Log joint probability = 91.0203. Improved by 6.37981e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 42. Log joint probability = 91.0203. Improved by 1.38012e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 43. Log joint probability = 91.0204. Improved by 2.29702e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 44. Log joint probability = 91.0204. Improved by 6.54176e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 45. Log joint probability = 91.0204. Improved by 1.93438e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 46. Log joint probability = 91.0204. Improved by 3.1678e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 47. Log joint probability = 91.0204. Improved by 5.27803e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 48. Log joint probability = 91.0204. Improved by 1.66328e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 49. Log joint probability = 91.0204. Improved by 1.35778e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 50. Log joint probability = 91.0205. Improved by 1.29478e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 51. Log joint probability = 91.0205. Improved by 7.81213e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 52. Log joint probability = 91.0205. Improved by 1.64481e-05.\n" + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m \n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Initial log joint probability = -21.7758\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 1. Log joint probability = 41.5159. Improved by 63.2917.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 2. Log joint probability = 68.4175. Improved by 26.9016.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 3. Log joint probability = 88.1348. Improved by 19.7173.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 4. Log joint probability = 88.147. Improved by 0.0121786.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 5. Log joint probability = 88.1524. Improved by 0.00537125.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 6. Log joint probability = 88.1633. Improved by 0.0109589.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 7. Log joint probability = 88.1753. Improved by 0.0119717.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 8. Log joint probability = 88.1783. Improved by 0.00301597.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 9. Log joint probability = 88.2164. Improved by 0.0380849.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 10. Log joint probability = 88.2239. Improved by 0.00749222.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 11. Log joint probability = 88.3633. Improved by 0.139416.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 12. Log joint probability = 88.4154. Improved by 0.0520892.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 13. Log joint probability = 88.4651. Improved by 0.0496986.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 14. Log joint probability = 89.8472. Improved by 1.38208.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 15. Log joint probability = 89.8657. Improved by 0.0185247.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 16. Log joint probability = 89.8732. Improved by 0.00753048.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 17. Log joint probability = 89.9318. Improved by 0.0585562.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 18. Log joint probability = 89.9447. Improved by 0.0129053.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 19. Log joint probability = 89.965. Improved by 0.0202932.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 20. Log joint probability = 90.0397. Improved by 0.0747472.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 21. Log joint probability = 90.0875. Improved by 0.0477876.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 22. Log joint probability = 90.105. Improved by 0.0175359.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 23. Log joint probability = 90.4892. Improved by 0.384151.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 24. Log joint probability = 90.556. Improved by 0.0668293.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 25. Log joint probability = 90.6581. Improved by 0.102125.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 26. Log joint probability = 90.742. Improved by 0.0838101.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 27. Log joint probability = 90.7738. Improved by 0.031868.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 28. Log joint probability = 90.7856. Improved by 0.011803.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 29. Log joint probability = 90.8302. Improved by 0.0445906.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 30. Log joint probability = 90.8852. Improved by 0.0549923.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 31. Log joint probability = 90.9034. Improved by 0.0181786.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 32. Log joint probability = 90.9276. Improved by 0.0241721.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 33. Log joint probability = 90.9412. Improved by 0.0136337.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 34. Log joint probability = 90.9542. Improved by 0.0130142.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 35. Log joint probability = 90.962. Improved by 0.00775981.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 36. Log joint probability = 90.9638. Improved by 0.00186611.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 37. Log joint probability = 90.9718. Improved by 0.00797594.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 38. Log joint probability = 90.976. Improved by 0.0042081.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 39. Log joint probability = 90.9777. Improved by 0.00165647.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 40. Log joint probability = 90.9814. Improved by 0.00370259.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 41. Log joint probability = 90.9839. Improved by 0.00256843.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 42. Log joint probability = 90.9851. Improved by 0.0011523.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 43. Log joint probability = 90.9868. Improved by 0.00170077.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 44. Log joint probability = 90.9874. Improved by 0.000631959.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 45. Log joint probability = 90.9885. Improved by 0.00111174.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 46. Log joint probability = 90.9887. Improved by 0.000172812.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 47. Log joint probability = 90.9897. Improved by 0.000951722.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 48. Log joint probability = 90.9904. Improved by 0.000744776.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 49. Log joint probability = 90.9907. Improved by 0.000334385.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 50. Log joint probability = 90.9911. Improved by 0.000323131.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 51. Log joint probability = 90.9913. Improved by 0.000195932.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 52. Log joint probability = 90.9913. Improved by 7.26249e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 53. Log joint probability = 90.9914. Improved by 9.38402e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 54. Log joint probability = 90.9915. Improved by 0.000104485.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 55. Log joint probability = 90.9915. Improved by 3.9586e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 56. Log joint probability = 90.9916. Improved by 7.77437e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 57. Log joint probability = 90.9916. Improved by 2.79958e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 58. Log joint probability = 90.9917. Improved by 5.30653e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 59. Log joint probability = 90.9918. Improved by 5.32272e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 60. Log joint probability = 90.9918. Improved by 2.72417e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 61. Log joint probability = 90.9919. Improved by 9.20075e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 62. Log joint probability = 90.9919. Improved by 1.97313e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 63. Log joint probability = 90.9919. Improved by 3.52389e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 64. Log joint probability = 90.992. Improved by 4.48494e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 65. Log joint probability = 90.992. Improved by 3.68675e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 66. Log joint probability = 90.992. Improved by 2.02192e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 67. Log joint probability = 90.9921. Improved by 2.05867e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 68. Log joint probability = 90.9921. Improved by 1.60531e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 69. Log joint probability = 90.9921. Improved by 1.09975e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 70. Log joint probability = 90.9921. Improved by 5.48589e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 71. Log joint probability = 90.9921. Improved by 5.17867e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 72. Log joint probability = 90.9921. Improved by 6.19947e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 73. Log joint probability = 90.9921. Improved by 1.90771e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 74. Log joint probability = 90.9921. Improved by 1.96755e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 75. Log joint probability = 90.9921. Improved by 3.14253e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 76. Log joint probability = 90.9922. Improved by 2.00154e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 77. Log joint probability = 90.9922. Improved by 7.38871e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 78. Log joint probability = 90.9922. Improved by 5.2899e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 79. Log joint probability = 90.9922. Improved by 3.05609e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 80. Log joint probability = 90.9922. Improved by 4.27669e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 81. Log joint probability = 90.9922. Improved by 2.5749e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 82. Log joint probability = 90.9922. Improved by 4.80204e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 83. Log joint probability = 90.9922. Improved by 2.77249e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 84. Log joint probability = 90.9922. Improved by 6.44e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 85. Log joint probability = 90.9922. Improved by 5.69327e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 86. Log joint probability = 90.9922. Improved by 6.80163e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 87. Log joint probability = 90.9922. Improved by 1.10273e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 88. Log joint probability = 90.9922. Improved by 3.1814e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 89. Log joint probability = 90.9922. Improved by 1.15471e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 90. Log joint probability = 90.9922. Improved by 2.80645e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 91. Log joint probability = 90.9922. Improved by 1.97469e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 92. Log joint probability = 90.9922. Improved by 3.01754e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 93. Log joint probability = 90.9922. Improved by 5.89157e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 94. Log joint probability = 90.9922. Improved by 4.37725e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 95. Log joint probability = 90.9922. Improved by 2.67717e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 96. Log joint probability = 90.9922. Improved by 3.00174e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 97. Log joint probability = 90.9922. Improved by 4.5588e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 98. Log joint probability = 90.9922. Improved by 1.30664e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 99. Log joint probability = 90.9922. Improved by 2.56521e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 100. Log joint probability = 90.9922. Improved by 1.77492e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 101. Log joint probability = 90.9922. Improved by 1.62366e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 102. Log joint probability = 90.9922. Improved by 1.84507e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 103. Log joint probability = 90.9922. Improved by 9.9194e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 104. Log joint probability = 90.9922. Improved by 6.85e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 105. Log joint probability = 90.9922. Improved by 2.19949e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 106. Log joint probability = 90.9922. Improved by 3.50271e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 107. Log joint probability = 90.9922. Improved by 7.81865e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 108. Log joint probability = 90.9922. Improved by 6.23645e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 109. Log joint probability = 90.9922. Improved by 6.12578e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 110. Log joint probability = 90.9922. Improved by 5.88466e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 111. Log joint probability = 90.9922. Improved by 1.63983e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 112. Log joint probability = 90.9922. Improved by 1.58961e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 113. Log joint probability = 90.9922. Improved by 4.68893e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 114. Log joint probability = 90.9922. Improved by 2.36556e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 115. Log joint probability = 90.9922. Improved by 4.54818e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 116. Log joint probability = 90.9922. Improved by 2.94216e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 117. Log joint probability = 90.9922. Improved by 1.2584e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 118. Log joint probability = 90.9922. Improved by 2.77487e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 119. Log joint probability = 90.9922. Improved by 2.76151e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 120. Log joint probability = 90.9922. Improved by 1.37145e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 121. Log joint probability = 90.9922. Improved by 4.27885e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=867, ip=172.31.136.199)\u001B[0m Iteration 122. Log joint probability = 90.9922. Improved by 7.76434e-09.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m \n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Initial log joint probability = -21.7758\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 1. Log joint probability = 20.1836. Improved by 41.9594.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 2. Log joint probability = 59.1549. Improved by 38.9713.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 3. Log joint probability = 79.9487. Improved by 20.7939.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 4. Log joint probability = 90.4604. Improved by 10.5117.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 5. Log joint probability = 90.7685. Improved by 0.308148.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 6. Log joint probability = 90.8866. Improved by 0.118032.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 7. Log joint probability = 90.9086. Improved by 0.0220841.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 8. Log joint probability = 90.9484. Improved by 0.0397311.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 9. Log joint probability = 90.9681. Improved by 0.0197759.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 10. Log joint probability = 90.9738. Improved by 0.00567126.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 11. Log joint probability = 90.9772. Improved by 0.00338425.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 12. Log joint probability = 90.979. Improved by 0.00180031.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 13. Log joint probability = 90.9909. Improved by 0.0118985.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 14. Log joint probability = 90.9977. Improved by 0.00677184.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 15. Log joint probability = 90.9994. Improved by 0.00176338.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 16. Log joint probability = 90.9998. Improved by 0.000346058.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 17. Log joint probability = 91.0026. Improved by 0.00283502.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 18. Log joint probability = 91.0067. Improved by 0.00404095.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 19. Log joint probability = 91.009. Improved by 0.00230573.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 20. Log joint probability = 91.0097. Improved by 0.000728684.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 21. Log joint probability = 91.0105. Improved by 0.000842848.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 22. Log joint probability = 91.0137. Improved by 0.00315459.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 23. Log joint probability = 91.0144. Improved by 0.000675261.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 24. Log joint probability = 91.015. Improved by 0.000668053.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 25. Log joint probability = 91.0153. Improved by 0.00022664.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 26. Log joint probability = 91.0158. Improved by 0.000553923.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 27. Log joint probability = 91.0169. Improved by 0.00108114.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 28. Log joint probability = 91.0173. Improved by 0.000446418.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 29. Log joint probability = 91.0179. Improved by 0.000535655.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 30. Log joint probability = 91.0188. Improved by 0.000894825.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 31. Log joint probability = 91.0192. Improved by 0.000463639.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 32. Log joint probability = 91.0193. Improved by 5.37241e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 33. Log joint probability = 91.0194. Improved by 0.00012323.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 34. Log joint probability = 91.0196. Improved by 0.000156284.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 35. Log joint probability = 91.0197. Improved by 8.54979e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 36. Log joint probability = 91.02. Improved by 0.000353443.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 37. Log joint probability = 91.0201. Improved by 9.12108e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 38. Log joint probability = 91.0201. Improved by 3.2033e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 39. Log joint probability = 91.0202. Improved by 5.68514e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 40. Log joint probability = 91.0203. Improved by 7.33769e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 41. Log joint probability = 91.0203. Improved by 6.37981e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 42. Log joint probability = 91.0203. Improved by 1.38012e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 43. Log joint probability = 91.0204. Improved by 2.29702e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 44. Log joint probability = 91.0204. Improved by 6.54176e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 45. Log joint probability = 91.0204. Improved by 1.93438e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 46. Log joint probability = 91.0204. Improved by 3.1678e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 47. Log joint probability = 91.0204. Improved by 5.27803e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 48. Log joint probability = 91.0204. Improved by 1.66328e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 49. Log joint probability = 91.0204. Improved by 1.35778e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 50. Log joint probability = 91.0205. Improved by 1.29478e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 51. Log joint probability = 91.0205. Improved by 7.81213e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 52. Log joint probability = 91.0205. Improved by 1.64481e-05.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m INFO:prophet:Disabling yearly seasonality. Run prophet with yearly_seasonality=True to override this.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n" + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m INFO:prophet:Disabling yearly seasonality. Run prophet with yearly_seasonality=True to override this.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 53. Log joint probability = 91.0205. Improved by 5.89368e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 54. Log joint probability = 91.0205. Improved by 2.73371e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 55. Log joint probability = 91.0205. Improved by 3.59134e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 56. Log joint probability = 91.0205. Improved by 7.21082e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 57. Log joint probability = 91.0205. Improved by 1.16206e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 58. Log joint probability = 91.0205. Improved by 2.44705e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 59. Log joint probability = 91.0205. Improved by 1.59075e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 60. Log joint probability = 91.0205. Improved by 2.89546e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 61. Log joint probability = 91.0205. Improved by 1.19933e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 62. Log joint probability = 91.0205. Improved by 2.3315e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 63. Log joint probability = 91.0205. Improved by 3.0172e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 64. Log joint probability = 91.0205. Improved by 1.1254e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 65. Log joint probability = 91.0205. Improved by 1.43073e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 66. Log joint probability = 91.0205. Improved by 1.06503e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 67. Log joint probability = 91.0205. Improved by 1.94521e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 68. Log joint probability = 91.0205. Improved by 1.91264e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 69. Log joint probability = 91.0205. Improved by 1.14165e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 70. Log joint probability = 91.0205. Improved by 6.19488e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 71. Log joint probability = 91.0205. Improved by 1.3134e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 72. Log joint probability = 91.0205. Improved by 7.83336e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 73. Log joint probability = 91.0205. Improved by 6.66751e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 74. Log joint probability = 91.0205. Improved by 2.12689e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 75. Log joint probability = 91.0205. Improved by 1.21127e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 76. Log joint probability = 91.0205. Improved by 6.65688e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 77. Log joint probability = 91.0205. Improved by 2.69727e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 78. Log joint probability = 91.0205. Improved by 3.26115e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 79. Log joint probability = 91.0205. Improved by 6.01741e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 80. Log joint probability = 91.0205. Improved by 9.90215e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 81. Log joint probability = 91.0205. Improved by 1.34709e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 82. Log joint probability = 91.0205. Improved by 1.86905e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 83. Log joint probability = 91.0205. Improved by 1.13228e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 84. Log joint probability = 91.0205. Improved by 1.84163e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 85. Log joint probability = 91.0205. Improved by 9.80857e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 86. Log joint probability = 91.0205. Improved by 3.26897e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 87. Log joint probability = 91.0205. Improved by 2.67554e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 88. Log joint probability = 91.0205. Improved by 3.02441e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=868, ip=172.31.136.199)\u001b[0m Iteration 89. Log joint probability = 91.0205. Improved by 6.99644e-09.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m \n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Initial log joint probability = -24.7798\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 1. Log joint probability = 56.6567. Improved by 81.4365.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 2. Log joint probability = 97.3654. Improved by 40.7088.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 3. Log joint probability = 118.678. Improved by 21.3124.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 4. Log joint probability = 129.821. Improved by 11.1432.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 5. Log joint probability = 132.527. Improved by 2.70548.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 6. Log joint probability = 132.562. Improved by 0.0357063.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 7. Log joint probability = 132.959. Improved by 0.396572.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 8. Log joint probability = 132.964. Improved by 0.00492318.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 9. Log joint probability = 132.968. Improved by 0.00386232.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 10. Log joint probability = 133.011. Improved by 0.0434838.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 11. Log joint probability = 133.125. Improved by 0.113608.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m \n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Initial log joint probability = -24.7798\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 1. Log joint probability = 58.4966. Improved by 83.2764.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 2. Log joint probability = 98.0201. Improved by 39.5235.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 3. Log joint probability = 124.762. Improved by 26.7417.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 4. Log joint probability = 128.406. Improved by 3.64467.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 5. Log joint probability = 131.459. Improved by 3.05241.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 6. Log joint probability = 131.536. Improved by 0.0771233.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 7. Log joint probability = 131.585. Improved by 0.0491424.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 8. Log joint probability = 131.622. Improved by 0.0372929.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 9. Log joint probability = 131.746. Improved by 0.123634.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 10. Log joint probability = 131.84. Improved by 0.0940927.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 11. Log joint probability = 131.915. Improved by 0.0752941.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 12. Log joint probability = 131.944. Improved by 0.0284656.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 13. Log joint probability = 132.136. Improved by 0.192139.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 14. Log joint probability = 132.154. Improved by 0.0182919.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 12. Log joint probability = 133.156. Improved by 0.0315004.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 15. Log joint probability = 132.205. Improved by 0.0502591.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 13. Log joint probability = 133.165. Improved by 0.00863589.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 16. Log joint probability = 132.283. Improved by 0.0788813.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 14. Log joint probability = 133.205. Improved by 0.0399492.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 17. Log joint probability = 132.295. Improved by 0.0111451.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 15. Log joint probability = 133.263. Improved by 0.0582913.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 18. Log joint probability = 132.508. Improved by 0.213728.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 16. Log joint probability = 133.312. Improved by 0.0488556.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 17. Log joint probability = 133.379. Improved by 0.0673858.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 19. Log joint probability = 132.535. Improved by 0.0269674.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 18. Log joint probability = 133.399. Improved by 0.0201265.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 20. Log joint probability = 132.608. Improved by 0.0723374.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 19. Log joint probability = 133.484. Improved by 0.0845203.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 20. Log joint probability = 133.489. Improved by 0.00529988.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 21. Log joint probability = 133.564. Improved by 0.074616.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 22. Log joint probability = 133.65. Improved by 0.0863769.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 23. Log joint probability = 133.704. Improved by 0.0536392.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 24. Log joint probability = 133.726. Improved by 0.0224161.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 25. Log joint probability = 133.734. Improved by 0.00765676.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 26. Log joint probability = 133.771. Improved by 0.0367052.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 27. Log joint probability = 133.782. Improved by 0.0110577.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 28. Log joint probability = 133.782. Improved by 0.000409333.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 29. Log joint probability = 133.786. Improved by 0.00424821.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 30. Log joint probability = 133.793. Improved by 0.00702624.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 31. Log joint probability = 133.793. Improved by 0.000120618.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 32. Log joint probability = 133.796. Improved by 0.00259901.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 33. Log joint probability = 133.8. Improved by 0.00347541.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 34. Log joint probability = 133.8. Improved by 4.34525e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 35. Log joint probability = 133.8. Improved by 0.000442336.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 36. Log joint probability = 133.801. Improved by 0.000935713.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 37. Log joint probability = 133.803. Improved by 0.00171089.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 38. Log joint probability = 133.803. Improved by 0.000512353.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 39. Log joint probability = 133.803. Improved by 4.16449e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 40. Log joint probability = 133.804. Improved by 0.000354666.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 41. Log joint probability = 133.804. Improved by 5.7549e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 42. Log joint probability = 133.804. Improved by 0.000324601.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 43. Log joint probability = 133.805. Improved by 0.00101344.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 44. Log joint probability = 133.805. Improved by 0.000491843.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 45. Log joint probability = 133.806. Improved by 8.67991e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 46. Log joint probability = 133.806. Improved by 0.000128382.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 47. Log joint probability = 133.806. Improved by 3.70175e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 48. Log joint probability = 133.806. Improved by 4.50979e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 21. Log joint probability = 132.66. Improved by 0.0521015.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 22. Log joint probability = 132.673. Improved by 0.0129431.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 23. Log joint probability = 132.883. Improved by 0.210274.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 24. Log joint probability = 133.261. Improved by 0.378255.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 25. Log joint probability = 133.449. Improved by 0.187961.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 26. Log joint probability = 133.654. Improved by 0.204868.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 27. Log joint probability = 133.762. Improved by 0.10752.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 28. Log joint probability = 133.793. Improved by 0.0309585.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 29. Log joint probability = 133.847. Improved by 0.0542512.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 30. Log joint probability = 133.898. Improved by 0.0509466.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 31. Log joint probability = 134.179. Improved by 0.2808.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 32. Log joint probability = 134.209. Improved by 0.0301489.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 33. Log joint probability = 134.253. Improved by 0.0447352.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 34. Log joint probability = 134.339. Improved by 0.0856853.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 49. Log joint probability = 133.806. Improved by 2.93527e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 50. Log joint probability = 133.806. Improved by 4.40796e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 35. Log joint probability = 134.341. Improved by 0.00205512.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 51. Log joint probability = 133.806. Improved by 0.000118919.\n" + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 53. Log joint probability = 91.0205. Improved by 5.89368e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 54. Log joint probability = 91.0205. Improved by 2.73371e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 55. Log joint probability = 91.0205. Improved by 3.59134e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 56. Log joint probability = 91.0205. Improved by 7.21082e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 57. Log joint probability = 91.0205. Improved by 1.16206e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 58. Log joint probability = 91.0205. Improved by 2.44705e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 59. Log joint probability = 91.0205. Improved by 1.59075e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 60. Log joint probability = 91.0205. Improved by 2.89546e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 61. Log joint probability = 91.0205. Improved by 1.19933e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 62. Log joint probability = 91.0205. Improved by 2.3315e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 63. Log joint probability = 91.0205. Improved by 3.0172e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 64. Log joint probability = 91.0205. Improved by 1.1254e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 65. Log joint probability = 91.0205. Improved by 1.43073e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 66. Log joint probability = 91.0205. Improved by 1.06503e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 67. Log joint probability = 91.0205. Improved by 1.94521e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 68. Log joint probability = 91.0205. Improved by 1.91264e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 69. Log joint probability = 91.0205. Improved by 1.14165e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 70. Log joint probability = 91.0205. Improved by 6.19488e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 71. Log joint probability = 91.0205. Improved by 1.3134e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 72. Log joint probability = 91.0205. Improved by 7.83336e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 73. Log joint probability = 91.0205. Improved by 6.66751e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 74. Log joint probability = 91.0205. Improved by 2.12689e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 75. Log joint probability = 91.0205. Improved by 1.21127e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 76. Log joint probability = 91.0205. Improved by 6.65688e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 77. Log joint probability = 91.0205. Improved by 2.69727e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 78. Log joint probability = 91.0205. Improved by 3.26115e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 79. Log joint probability = 91.0205. Improved by 6.01741e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 80. Log joint probability = 91.0205. Improved by 9.90215e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 81. Log joint probability = 91.0205. Improved by 1.34709e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 82. Log joint probability = 91.0205. Improved by 1.86905e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 83. Log joint probability = 91.0205. Improved by 1.13228e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 84. Log joint probability = 91.0205. Improved by 1.84163e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 85. Log joint probability = 91.0205. Improved by 9.80857e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 86. Log joint probability = 91.0205. Improved by 3.26897e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 87. Log joint probability = 91.0205. Improved by 2.67554e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 88. Log joint probability = 91.0205. Improved by 3.02441e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=868, ip=172.31.136.199)\u001B[0m Iteration 89. Log joint probability = 91.0205. Improved by 6.99644e-09.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m \n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Initial log joint probability = -24.7798\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 1. Log joint probability = 56.6567. Improved by 81.4365.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 2. Log joint probability = 97.3654. Improved by 40.7088.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 3. Log joint probability = 118.678. Improved by 21.3124.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 4. Log joint probability = 129.821. Improved by 11.1432.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 5. Log joint probability = 132.527. Improved by 2.70548.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 6. Log joint probability = 132.562. Improved by 0.0357063.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 7. Log joint probability = 132.959. Improved by 0.396572.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 8. Log joint probability = 132.964. Improved by 0.00492318.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 9. Log joint probability = 132.968. Improved by 0.00386232.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 10. Log joint probability = 133.011. Improved by 0.0434838.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 11. Log joint probability = 133.125. Improved by 0.113608.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m \n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Initial log joint probability = -24.7798\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 1. Log joint probability = 58.4966. Improved by 83.2764.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 2. Log joint probability = 98.0201. Improved by 39.5235.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 3. Log joint probability = 124.762. Improved by 26.7417.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 4. Log joint probability = 128.406. Improved by 3.64467.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 5. Log joint probability = 131.459. Improved by 3.05241.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 6. Log joint probability = 131.536. Improved by 0.0771233.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 7. Log joint probability = 131.585. Improved by 0.0491424.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 8. Log joint probability = 131.622. Improved by 0.0372929.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 9. Log joint probability = 131.746. Improved by 0.123634.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 10. Log joint probability = 131.84. Improved by 0.0940927.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 11. Log joint probability = 131.915. Improved by 0.0752941.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 12. Log joint probability = 131.944. Improved by 0.0284656.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 13. Log joint probability = 132.136. Improved by 0.192139.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 14. Log joint probability = 132.154. Improved by 0.0182919.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 12. Log joint probability = 133.156. Improved by 0.0315004.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 15. Log joint probability = 132.205. Improved by 0.0502591.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 13. Log joint probability = 133.165. Improved by 0.00863589.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 16. Log joint probability = 132.283. Improved by 0.0788813.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 14. Log joint probability = 133.205. Improved by 0.0399492.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 17. Log joint probability = 132.295. Improved by 0.0111451.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 15. Log joint probability = 133.263. Improved by 0.0582913.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 18. Log joint probability = 132.508. Improved by 0.213728.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 16. Log joint probability = 133.312. Improved by 0.0488556.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 17. Log joint probability = 133.379. Improved by 0.0673858.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 19. Log joint probability = 132.535. Improved by 0.0269674.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 18. Log joint probability = 133.399. Improved by 0.0201265.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 20. Log joint probability = 132.608. Improved by 0.0723374.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 19. Log joint probability = 133.484. Improved by 0.0845203.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 20. Log joint probability = 133.489. Improved by 0.00529988.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 21. Log joint probability = 133.564. Improved by 0.074616.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 22. Log joint probability = 133.65. Improved by 0.0863769.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 23. Log joint probability = 133.704. Improved by 0.0536392.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 24. Log joint probability = 133.726. Improved by 0.0224161.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 25. Log joint probability = 133.734. Improved by 0.00765676.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 26. Log joint probability = 133.771. Improved by 0.0367052.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 27. Log joint probability = 133.782. Improved by 0.0110577.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 28. Log joint probability = 133.782. Improved by 0.000409333.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 29. Log joint probability = 133.786. Improved by 0.00424821.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 30. Log joint probability = 133.793. Improved by 0.00702624.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 31. Log joint probability = 133.793. Improved by 0.000120618.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 32. Log joint probability = 133.796. Improved by 0.00259901.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 33. Log joint probability = 133.8. Improved by 0.00347541.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 34. Log joint probability = 133.8. Improved by 4.34525e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 35. Log joint probability = 133.8. Improved by 0.000442336.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 36. Log joint probability = 133.801. Improved by 0.000935713.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 37. Log joint probability = 133.803. Improved by 0.00171089.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 38. Log joint probability = 133.803. Improved by 0.000512353.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 39. Log joint probability = 133.803. Improved by 4.16449e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 40. Log joint probability = 133.804. Improved by 0.000354666.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 41. Log joint probability = 133.804. Improved by 5.7549e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 42. Log joint probability = 133.804. Improved by 0.000324601.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 43. Log joint probability = 133.805. Improved by 0.00101344.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 44. Log joint probability = 133.805. Improved by 0.000491843.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 45. Log joint probability = 133.806. Improved by 8.67991e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 46. Log joint probability = 133.806. Improved by 0.000128382.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 47. Log joint probability = 133.806. Improved by 3.70175e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 48. Log joint probability = 133.806. Improved by 4.50979e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 21. Log joint probability = 132.66. Improved by 0.0521015.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 22. Log joint probability = 132.673. Improved by 0.0129431.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 23. Log joint probability = 132.883. Improved by 0.210274.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 24. Log joint probability = 133.261. Improved by 0.378255.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 25. Log joint probability = 133.449. Improved by 0.187961.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 26. Log joint probability = 133.654. Improved by 0.204868.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 27. Log joint probability = 133.762. Improved by 0.10752.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 28. Log joint probability = 133.793. Improved by 0.0309585.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 29. Log joint probability = 133.847. Improved by 0.0542512.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 30. Log joint probability = 133.898. Improved by 0.0509466.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 31. Log joint probability = 134.179. Improved by 0.2808.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 32. Log joint probability = 134.209. Improved by 0.0301489.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 33. Log joint probability = 134.253. Improved by 0.0447352.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 34. Log joint probability = 134.339. Improved by 0.0856853.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 49. Log joint probability = 133.806. Improved by 2.93527e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 50. Log joint probability = 133.806. Improved by 4.40796e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 35. Log joint probability = 134.341. Improved by 0.00205512.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 51. Log joint probability = 133.806. Improved by 0.000118919.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m INFO:prophet:Disabling yearly seasonality. Run prophet with yearly_seasonality=True to override this.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m INFO:prophet:Disabling yearly seasonality. Run prophet with yearly_seasonality=True to override this.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n" + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m INFO:prophet:Disabling yearly seasonality. Run prophet with yearly_seasonality=True to override this.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m INFO:prophet:Disabling yearly seasonality. Run prophet with yearly_seasonality=True to override this.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m INFO:prophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 52. Log joint probability = 133.806. Improved by 1.19684e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 53. Log joint probability = 133.806. Improved by 5.11185e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 54. Log joint probability = 133.806. Improved by 4.74767e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 55. Log joint probability = 133.806. Improved by 1.2416e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 56. Log joint probability = 133.806. Improved by 2.02582e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 57. Log joint probability = 133.806. Improved by 1.71245e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 58. Log joint probability = 133.806. Improved by 8.42186e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 59. Log joint probability = 133.806. Improved by 5.25634e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 60. Log joint probability = 133.806. Improved by 1.02038e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 61. Log joint probability = 133.806. Improved by 8.6083e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 62. Log joint probability = 133.806. Improved by 1.95771e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 63. Log joint probability = 133.806. Improved by 2.81929e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 64. Log joint probability = 133.806. Improved by 9.62887e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 65. Log joint probability = 133.806. Improved by 1.02108e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 66. Log joint probability = 133.806. Improved by 8.08545e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 67. Log joint probability = 133.806. Improved by 1.06262e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 68. Log joint probability = 133.806. Improved by 1.44616e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 69. Log joint probability = 133.806. Improved by 2.11851e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 70. Log joint probability = 133.806. Improved by 2.4721e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 71. Log joint probability = 133.806. Improved by 3.84309e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 72. Log joint probability = 133.806. Improved by 8.01389e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 73. Log joint probability = 133.806. Improved by 6.42814e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 74. Log joint probability = 133.806. Improved by 3.08296e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 75. Log joint probability = 133.806. Improved by 7.11785e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 76. Log joint probability = 133.806. Improved by 6.76762e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 77. Log joint probability = 133.806. Improved by 2.88068e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 78. Log joint probability = 133.806. Improved by 6.82979e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 36. Log joint probability = 134.393. Improved by 0.0516495.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 37. Log joint probability = 134.406. Improved by 0.0128166.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 38. Log joint probability = 134.53. Improved by 0.124634.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 39. Log joint probability = 134.593. Improved by 0.0626.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 40. Log joint probability = 134.626. Improved by 0.03309.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 41. Log joint probability = 134.631. Improved by 0.00515215.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 42. Log joint probability = 134.664. Improved by 0.0326243.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 43. Log joint probability = 134.675. Improved by 0.0115272.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 44. Log joint probability = 134.678. Improved by 0.00297174.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 45. Log joint probability = 134.687. Improved by 0.00902203.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 46. Log joint probability = 134.695. Improved by 0.00741251.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 47. Log joint probability = 134.698. Improved by 0.00291338.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 48. Log joint probability = 134.698. Improved by 0.000831812.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 49. Log joint probability = 134.699. Improved by 0.000221433.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 50. Log joint probability = 134.7. Improved by 0.00103722.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 79. Log joint probability = 133.806. Improved by 4.89768e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 80. Log joint probability = 133.806. Improved by 5.13849e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 51. Log joint probability = 134.7. Improved by 0.00033267.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 81. Log joint probability = 133.806. Improved by 1.1728e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=865, ip=172.31.136.199)\u001b[0m Iteration 82. Log joint probability = 133.806. Improved by 5.41323e-09.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 52. Log joint probability = 134.7. Improved by 0.000370356.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 53. Log joint probability = 134.701. Improved by 0.000590457.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 54. Log joint probability = 134.701. Improved by 0.000308186.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 55. Log joint probability = 134.701. Improved by 1.19587e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 56. Log joint probability = 134.703. Improved by 0.0017289.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 57. Log joint probability = 134.705. Improved by 0.00162144.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 58. Log joint probability = 134.706. Improved by 0.000936565.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 59. Log joint probability = 134.706. Improved by 0.000489671.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 60. Log joint probability = 134.706. Improved by 2.13758e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 61. Log joint probability = 134.706. Improved by 7.25762e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 62. Log joint probability = 134.706. Improved by 0.000109131.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 63. Log joint probability = 134.706. Improved by 5.9817e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 64. Log joint probability = 134.706. Improved by 0.000246335.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 65. Log joint probability = 134.707. Improved by 2.75556e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 66. Log joint probability = 134.707. Improved by 6.77305e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 67. Log joint probability = 134.707. Improved by 0.000101361.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 68. Log joint probability = 134.707. Improved by 2.67652e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 69. Log joint probability = 134.707. Improved by 4.08686e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 70. Log joint probability = 134.707. Improved by 5.56634e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 71. Log joint probability = 134.707. Improved by 8.41062e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 72. Log joint probability = 134.707. Improved by 3.58515e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 73. Log joint probability = 134.707. Improved by 1.01022e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 74. Log joint probability = 134.707. Improved by 2.71279e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 75. Log joint probability = 134.707. Improved by 1.57461e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 76. Log joint probability = 134.707. Improved by 2.20976e-05.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 77. Log joint probability = 134.707. Improved by 4.12488e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 78. Log joint probability = 134.707. Improved by 4.15849e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 79. Log joint probability = 134.707. Improved by 4.0241e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 80. Log joint probability = 134.707. Improved by 5.34552e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 81. Log joint probability = 134.707. Improved by 2.28619e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 82. Log joint probability = 134.707. Improved by 1.55421e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 83. Log joint probability = 134.707. Improved by 4.21746e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 84. Log joint probability = 134.707. Improved by 1.7876e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 85. Log joint probability = 134.707. Improved by 4.65521e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 86. Log joint probability = 134.707. Improved by 6.75201e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 87. Log joint probability = 134.707. Improved by 1.22495e-06.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 88. Log joint probability = 134.707. Improved by 6.8387e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 89. Log joint probability = 134.707. Improved by 1.51393e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 90. Log joint probability = 134.707. Improved by 3.06142e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 91. Log joint probability = 134.707. Improved by 2.65367e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 92. Log joint probability = 134.707. Improved by 3.27718e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 93. Log joint probability = 134.707. Improved by 1.4017e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 94. Log joint probability = 134.707. Improved by 1.27841e-07.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 95. Log joint probability = 134.707. Improved by 7.60193e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 96. Log joint probability = 134.707. Improved by 2.21328e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 97. Log joint probability = 134.707. Improved by 1.95887e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 98. Log joint probability = 134.707. Improved by 7.67787e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 99. Log joint probability = 134.707. Improved by 1.98719e-08.\n", - "\u001b[2m\u001b[36m(train_model pid=864, ip=172.31.136.199)\u001b[0m Iteration 100. Log joint probability = 134.707. Improved by 6.91463e-09.\n" + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 52. Log joint probability = 133.806. Improved by 1.19684e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 53. Log joint probability = 133.806. Improved by 5.11185e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 54. Log joint probability = 133.806. Improved by 4.74767e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 55. Log joint probability = 133.806. Improved by 1.2416e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 56. Log joint probability = 133.806. Improved by 2.02582e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 57. Log joint probability = 133.806. Improved by 1.71245e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 58. Log joint probability = 133.806. Improved by 8.42186e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 59. Log joint probability = 133.806. Improved by 5.25634e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 60. Log joint probability = 133.806. Improved by 1.02038e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 61. Log joint probability = 133.806. Improved by 8.6083e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 62. Log joint probability = 133.806. Improved by 1.95771e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 63. Log joint probability = 133.806. Improved by 2.81929e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 64. Log joint probability = 133.806. Improved by 9.62887e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 65. Log joint probability = 133.806. Improved by 1.02108e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 66. Log joint probability = 133.806. Improved by 8.08545e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 67. Log joint probability = 133.806. Improved by 1.06262e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 68. Log joint probability = 133.806. Improved by 1.44616e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 69. Log joint probability = 133.806. Improved by 2.11851e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 70. Log joint probability = 133.806. Improved by 2.4721e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 71. Log joint probability = 133.806. Improved by 3.84309e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 72. Log joint probability = 133.806. Improved by 8.01389e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 73. Log joint probability = 133.806. Improved by 6.42814e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 74. Log joint probability = 133.806. Improved by 3.08296e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 75. Log joint probability = 133.806. Improved by 7.11785e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 76. Log joint probability = 133.806. Improved by 6.76762e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 77. Log joint probability = 133.806. Improved by 2.88068e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 78. Log joint probability = 133.806. Improved by 6.82979e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 36. Log joint probability = 134.393. Improved by 0.0516495.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 37. Log joint probability = 134.406. Improved by 0.0128166.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 38. Log joint probability = 134.53. Improved by 0.124634.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 39. Log joint probability = 134.593. Improved by 0.0626.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 40. Log joint probability = 134.626. Improved by 0.03309.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 41. Log joint probability = 134.631. Improved by 0.00515215.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 42. Log joint probability = 134.664. Improved by 0.0326243.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 43. Log joint probability = 134.675. Improved by 0.0115272.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 44. Log joint probability = 134.678. Improved by 0.00297174.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 45. Log joint probability = 134.687. Improved by 0.00902203.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 46. Log joint probability = 134.695. Improved by 0.00741251.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 47. Log joint probability = 134.698. Improved by 0.00291338.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 48. Log joint probability = 134.698. Improved by 0.000831812.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 49. Log joint probability = 134.699. Improved by 0.000221433.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 50. Log joint probability = 134.7. Improved by 0.00103722.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 79. Log joint probability = 133.806. Improved by 4.89768e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 80. Log joint probability = 133.806. Improved by 5.13849e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 51. Log joint probability = 134.7. Improved by 0.00033267.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 81. Log joint probability = 133.806. Improved by 1.1728e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=865, ip=172.31.136.199)\u001B[0m Iteration 82. Log joint probability = 133.806. Improved by 5.41323e-09.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 52. Log joint probability = 134.7. Improved by 0.000370356.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 53. Log joint probability = 134.701. Improved by 0.000590457.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 54. Log joint probability = 134.701. Improved by 0.000308186.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 55. Log joint probability = 134.701. Improved by 1.19587e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 56. Log joint probability = 134.703. Improved by 0.0017289.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 57. Log joint probability = 134.705. Improved by 0.00162144.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 58. Log joint probability = 134.706. Improved by 0.000936565.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 59. Log joint probability = 134.706. Improved by 0.000489671.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 60. Log joint probability = 134.706. Improved by 2.13758e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 61. Log joint probability = 134.706. Improved by 7.25762e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 62. Log joint probability = 134.706. Improved by 0.000109131.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 63. Log joint probability = 134.706. Improved by 5.9817e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 64. Log joint probability = 134.706. Improved by 0.000246335.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 65. Log joint probability = 134.707. Improved by 2.75556e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 66. Log joint probability = 134.707. Improved by 6.77305e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 67. Log joint probability = 134.707. Improved by 0.000101361.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 68. Log joint probability = 134.707. Improved by 2.67652e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 69. Log joint probability = 134.707. Improved by 4.08686e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 70. Log joint probability = 134.707. Improved by 5.56634e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 71. Log joint probability = 134.707. Improved by 8.41062e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 72. Log joint probability = 134.707. Improved by 3.58515e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 73. Log joint probability = 134.707. Improved by 1.01022e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 74. Log joint probability = 134.707. Improved by 2.71279e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 75. Log joint probability = 134.707. Improved by 1.57461e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 76. Log joint probability = 134.707. Improved by 2.20976e-05.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 77. Log joint probability = 134.707. Improved by 4.12488e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 78. Log joint probability = 134.707. Improved by 4.15849e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 79. Log joint probability = 134.707. Improved by 4.0241e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 80. Log joint probability = 134.707. Improved by 5.34552e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 81. Log joint probability = 134.707. Improved by 2.28619e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 82. Log joint probability = 134.707. Improved by 1.55421e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 83. Log joint probability = 134.707. Improved by 4.21746e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 84. Log joint probability = 134.707. Improved by 1.7876e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 85. Log joint probability = 134.707. Improved by 4.65521e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 86. Log joint probability = 134.707. Improved by 6.75201e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 87. Log joint probability = 134.707. Improved by 1.22495e-06.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 88. Log joint probability = 134.707. Improved by 6.8387e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 89. Log joint probability = 134.707. Improved by 1.51393e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 90. Log joint probability = 134.707. Improved by 3.06142e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 91. Log joint probability = 134.707. Improved by 2.65367e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 92. Log joint probability = 134.707. Improved by 3.27718e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 93. Log joint probability = 134.707. Improved by 1.4017e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 94. Log joint probability = 134.707. Improved by 1.27841e-07.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 95. Log joint probability = 134.707. Improved by 7.60193e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 96. Log joint probability = 134.707. Improved by 2.21328e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 97. Log joint probability = 134.707. Improved by 1.95887e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 98. Log joint probability = 134.707. Improved by 7.67787e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 99. Log joint probability = 134.707. Improved by 1.98719e-08.\n", + "\u001B[2m\u001B[36m(train_model pid=864, ip=172.31.136.199)\u001B[0m Iteration 100. Log joint probability = 134.707. Improved by 6.91463e-09.\n" ] }, { diff --git a/doc/source/ray-air/examples/batch_tuning.ipynb b/doc/source/ray-air/examples/batch_tuning.ipynb index 9194adc8529e..7e3c4bc872ea 100644 --- a/doc/source/ray-air/examples/batch_tuning.ipynb +++ b/doc/source/ray-air/examples/batch_tuning.ipynb @@ -55,10 +55,29 @@ "```{tip}\n", "Prerequisite for this notebook: Read the [Key Concepts](tune-60-seconds) page for Ray Tune.\n", "```\n", - "\n", - "Let us start by importing a few required libraries, including open-source Ray itself!" + "First, let's make sure we have all Python packages we need installed." ] }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "!pip install -q \"ray[air]\" scikit-learn" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "Next, let's import a few required libraries, including open-source Ray itself!" + ], + "metadata": { + "collapsed": false + } + }, { "cell_type": "code", "execution_count": 1, From 0ece85a3896ed1d5a8c6256d4fc1d355befb2f75 Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Wed, 10 May 2023 10:13:48 +0200 Subject: [PATCH 320/424] [docs] replace deprecated sklearn by scikit-learn installation (#35168) some AIR notebooks use pip install sklearn, which is deprecated (see https://pypi.org/project/sklearn/), we fix this here. Signed-off-by: Max Pumperla --- .../examples/analyze_tuning_results.ipynb | 146 +++++++++--------- .../ray-air/examples/upload_to_comet_ml.ipynb | 30 ++-- .../ray-air/examples/upload_to_wandb.ipynb | 4 +- 3 files changed, 90 insertions(+), 90 deletions(-) diff --git a/doc/source/ray-air/examples/analyze_tuning_results.ipynb b/doc/source/ray-air/examples/analyze_tuning_results.ipynb index cd652f099621..a38261c0a4de 100644 --- a/doc/source/ray-air/examples/analyze_tuning_results.ipynb +++ b/doc/source/ray-air/examples/analyze_tuning_results.ipynb @@ -42,13 +42,13 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001b[33mWARNING: You are using pip version 21.3.1; however, version 22.0.4 is available.\r\n", - "You should consider upgrading via the '/Users/kai/.pyenv/versions/3.7.7/bin/python3.7 -m pip install --upgrade pip' command.\u001b[0m\r\n" + "\u001B[33mWARNING: You are using pip version 21.3.1; however, version 22.0.4 is available.\r\n", + "You should consider upgrading via the '/Users/kai/.pyenv/versions/3.7.7/bin/python3.7 -m pip install --upgrade pip' command.\u001B[0m\r\n" ] } ], "source": [ - "!pip install -q \"ray[air]\" sklearn" + "!pip install -q \"ray[air]\" scikit-learn" ] }, { @@ -95,7 +95,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "2022-05-13 12:31:51,444\tINFO services.py:1484 -- View the Ray dashboard at \u001b[1m\u001b[32mhttp://127.0.0.1:8265\u001b[39m\u001b[22m\n" + "2022-05-13 12:31:51,444\tINFO services.py:1484 -- View the Ray dashboard at \u001B[1m\u001B[32mhttp://127.0.0.1:8265\u001B[39m\u001B[22m\n" ] } ], @@ -256,81 +256,81 @@ "name": "stderr", "output_type": "stream", "text": [ - "\u001b[2m\u001b[36m(GBDTTrainable pid=62456)\u001b[0m UserWarning: Dataset 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62456)\u001b[0m 2022-05-13 12:32:02,793\tINFO main.py:980 -- [RayXGBoost] Created 2 new actors (2 total actors). Waiting until actors are ready for training.\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62464)\u001b[0m UserWarning: Dataset 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62463)\u001b[0m UserWarning: Dataset 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62465)\u001b[0m UserWarning: Dataset 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62466)\u001b[0m UserWarning: Dataset 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62463)\u001b[0m 2022-05-13 12:32:05,102\tINFO main.py:980 -- [RayXGBoost] Created 2 new actors (2 total actors). Waiting until actors are ready for training.\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62466)\u001b[0m 2022-05-13 12:32:05,204\tINFO main.py:980 -- [RayXGBoost] Created 2 new actors (2 total actors). Waiting until actors are ready for training.\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62464)\u001b[0m 2022-05-13 12:32:05,338\tINFO main.py:980 -- [RayXGBoost] Created 2 new actors (2 total actors). Waiting until actors are ready for training.\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62465)\u001b[0m 2022-05-13 12:32:07,164\tINFO main.py:980 -- [RayXGBoost] Created 2 new actors (2 total actors). Waiting until actors are ready for training.\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62456)\u001b[0m 2022-05-13 12:32:10,549\tINFO main.py:1025 -- [RayXGBoost] Starting XGBoost training.\n", - "\u001b[2m\u001b[36m(_RemoteRayXGBoostActor pid=62495)\u001b[0m [12:32:10] task [xgboost.ray]:6975277392 got new rank 1\n", - "\u001b[2m\u001b[36m(_RemoteRayXGBoostActor pid=62494)\u001b[0m [12:32:10] task [xgboost.ray]:4560390352 got new rank 0\n", - "\u001b[2m\u001b[36m(raylet)\u001b[0m Spilled 2173 MiB, 22 objects, write throughput 402 MiB/s. Set RAY_verbose_spill_logs=0 to disable this message.\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62463)\u001b[0m 2022-05-13 12:32:17,848\tINFO main.py:1025 -- [RayXGBoost] Starting XGBoost training.\n", - "\u001b[2m\u001b[36m(_RemoteRayXGBoostActor pid=62523)\u001b[0m [12:32:18] task [xgboost.ray]:4441524624 got new rank 0\n", - "\u001b[2m\u001b[36m(_RemoteRayXGBoostActor pid=62524)\u001b[0m [12:32:18] task [xgboost.ray]:6890641808 got new rank 1\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62465)\u001b[0m 2022-05-13 12:32:21,253\tINFO main.py:1025 -- [RayXGBoost] Starting XGBoost training.\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62466)\u001b[0m 2022-05-13 12:32:21,529\tINFO main.py:1025 -- [RayXGBoost] Starting XGBoost training.\n", - "\u001b[2m\u001b[36m(_RemoteRayXGBoostActor pid=62563)\u001b[0m [12:32:21] task [xgboost.ray]:4667801680 got new rank 1\n", - "\u001b[2m\u001b[36m(_RemoteRayXGBoostActor pid=62562)\u001b[0m [12:32:21] task [xgboost.ray]:6856360848 got new rank 0\n", - "\u001b[2m\u001b[36m(_RemoteRayXGBoostActor pid=62530)\u001b[0m [12:32:21] task [xgboost.ray]:6971527824 got new rank 0\n", - "\u001b[2m\u001b[36m(_RemoteRayXGBoostActor pid=62532)\u001b[0m [12:32:21] task [xgboost.ray]:4538321232 got new rank 1\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62464)\u001b[0m 2022-05-13 12:32:21,937\tINFO main.py:1025 -- [RayXGBoost] Starting XGBoost training.\n", - "\u001b[2m\u001b[36m(_RemoteRayXGBoostActor pid=62544)\u001b[0m [12:32:21] task [xgboost.ray]:7005661840 got new rank 1\n", - "\u001b[2m\u001b[36m(_RemoteRayXGBoostActor pid=62543)\u001b[0m [12:32:21] task [xgboost.ray]:4516088080 got new rank 0\n", - "\u001b[2m\u001b[36m(raylet)\u001b[0m Spilled 4098 MiB, 83 objects, write throughput 347 MiB/s.\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62456)\u001b[0m 2022-05-13 12:32:41,289\tINFO main.py:1109 -- Training in progress (31 seconds since last restart).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62463)\u001b[0m 2022-05-13 12:32:48,617\tINFO main.py:1109 -- Training in progress (31 seconds since last restart).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62465)\u001b[0m 2022-05-13 12:32:52,110\tINFO main.py:1109 -- Training in progress (31 seconds since last restart).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62466)\u001b[0m 2022-05-13 12:32:52,448\tINFO main.py:1109 -- Training in progress (31 seconds since last restart).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62464)\u001b[0m 2022-05-13 12:32:52,692\tINFO main.py:1109 -- Training in progress (31 seconds since last restart).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62456)\u001b[0m 2022-05-13 12:33:11,960\tINFO main.py:1109 -- Training in progress (61 seconds since last restart).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62463)\u001b[0m 2022-05-13 12:33:19,076\tINFO main.py:1109 -- Training in progress (61 seconds since last restart).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62464)\u001b[0m 2022-05-13 12:33:23,409\tINFO main.py:1109 -- Training in progress (61 seconds since last restart).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62465)\u001b[0m 2022-05-13 12:33:23,420\tINFO main.py:1109 -- Training in progress (62 seconds since last restart).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62466)\u001b[0m 2022-05-13 12:33:23,541\tINFO main.py:1109 -- Training in progress (62 seconds since last restart).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62463)\u001b[0m 2022-05-13 12:33:23,693\tINFO main.py:1519 -- [RayXGBoost] Finished XGBoost training on training data with total N=581,012 in 78.74 seconds (65.79 pure XGBoost training time).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62464)\u001b[0m 2022-05-13 12:33:24,802\tINFO main.py:1519 -- [RayXGBoost] Finished XGBoost training on training data with total N=581,012 in 79.62 seconds (62.85 pure XGBoost training time).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62648)\u001b[0m UserWarning: Dataset 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62651)\u001b[0m UserWarning: Dataset 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62648)\u001b[0m 2022-05-13 12:33:38,788\tINFO main.py:980 -- [RayXGBoost] Created 2 new actors (2 total actors). Waiting until actors are ready for training.\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62651)\u001b[0m 2022-05-13 12:33:38,766\tINFO main.py:980 -- [RayXGBoost] Created 2 new actors (2 total actors). Waiting until actors are ready for training.\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62456)\u001b[0m 2022-05-13 12:33:42,168\tINFO main.py:1109 -- Training in progress (92 seconds since last restart).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62456)\u001b[0m 2022-05-13 12:33:46,177\tINFO main.py:1519 -- [RayXGBoost] Finished XGBoost training on training data with total N=581,012 in 103.54 seconds (95.60 pure XGBoost training time).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62651)\u001b[0m 2022-05-13 12:33:51,825\tINFO main.py:1025 -- [RayXGBoost] Starting XGBoost training.\n", - "\u001b[2m\u001b[36m(_RemoteRayXGBoostActor pid=62670)\u001b[0m [12:33:51] task [xgboost.ray]:4623186960 got new rank 1\n", - "\u001b[2m\u001b[36m(_RemoteRayXGBoostActor pid=62669)\u001b[0m [12:33:51] task [xgboost.ray]:4707639376 got new rank 0\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62648)\u001b[0m 2022-05-13 12:33:52,036\tINFO main.py:1025 -- [RayXGBoost] Starting XGBoost training.\n", - "\u001b[2m\u001b[36m(_RemoteRayXGBoostActor pid=62672)\u001b[0m [12:33:52] task [xgboost.ray]:4530073552 got new rank 1\n", - "\u001b[2m\u001b[36m(_RemoteRayXGBoostActor pid=62671)\u001b[0m [12:33:52] task [xgboost.ray]:6824757200 got new rank 0\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62466)\u001b[0m 2022-05-13 12:33:54,229\tINFO main.py:1109 -- Training in progress (92 seconds since last restart).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62465)\u001b[0m 2022-05-13 12:33:54,355\tINFO main.py:1109 -- Training in progress (93 seconds since last restart).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62730)\u001b[0m UserWarning: Dataset 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62730)\u001b[0m 2022-05-13 12:34:04,708\tINFO main.py:980 -- [RayXGBoost] Created 2 new actors (2 total actors). Waiting until actors are ready for training.\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62466)\u001b[0m 2022-05-13 12:34:11,126\tINFO main.py:1519 -- [RayXGBoost] Finished XGBoost training on training data with total N=581,012 in 126.08 seconds (109.48 pure XGBoost training time).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62730)\u001b[0m 2022-05-13 12:34:15,175\tINFO main.py:1025 -- [RayXGBoost] Starting XGBoost training.\n", - "\u001b[2m\u001b[36m(_RemoteRayXGBoostActor pid=62753)\u001b[0m [12:34:15] task [xgboost.ray]:4468564048 got new rank 1\n", - "\u001b[2m\u001b[36m(_RemoteRayXGBoostActor pid=62752)\u001b[0m [12:34:15] task [xgboost.ray]:6799468304 got new rank 0\n" + "\u001B[2m\u001B[36m(GBDTTrainable pid=62456)\u001B[0m UserWarning: Dataset 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62456)\u001B[0m 2022-05-13 12:32:02,793\tINFO main.py:980 -- [RayXGBoost] Created 2 new actors (2 total actors). Waiting until actors are ready for training.\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62464)\u001B[0m UserWarning: Dataset 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62463)\u001B[0m UserWarning: Dataset 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62465)\u001B[0m UserWarning: Dataset 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62466)\u001B[0m UserWarning: Dataset 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62463)\u001B[0m 2022-05-13 12:32:05,102\tINFO main.py:980 -- [RayXGBoost] Created 2 new actors (2 total actors). Waiting until actors are ready for training.\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62466)\u001B[0m 2022-05-13 12:32:05,204\tINFO main.py:980 -- [RayXGBoost] Created 2 new actors (2 total actors). Waiting until actors are ready for training.\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62464)\u001B[0m 2022-05-13 12:32:05,338\tINFO main.py:980 -- [RayXGBoost] Created 2 new actors (2 total actors). Waiting until actors are ready for training.\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62465)\u001B[0m 2022-05-13 12:32:07,164\tINFO main.py:980 -- [RayXGBoost] Created 2 new actors (2 total actors). Waiting until actors are ready for training.\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62456)\u001B[0m 2022-05-13 12:32:10,549\tINFO main.py:1025 -- [RayXGBoost] Starting XGBoost training.\n", + "\u001B[2m\u001B[36m(_RemoteRayXGBoostActor pid=62495)\u001B[0m [12:32:10] task [xgboost.ray]:6975277392 got new rank 1\n", + "\u001B[2m\u001B[36m(_RemoteRayXGBoostActor pid=62494)\u001B[0m [12:32:10] task [xgboost.ray]:4560390352 got new rank 0\n", + "\u001B[2m\u001B[36m(raylet)\u001B[0m Spilled 2173 MiB, 22 objects, write throughput 402 MiB/s. Set RAY_verbose_spill_logs=0 to disable this message.\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62463)\u001B[0m 2022-05-13 12:32:17,848\tINFO main.py:1025 -- [RayXGBoost] Starting XGBoost training.\n", + "\u001B[2m\u001B[36m(_RemoteRayXGBoostActor pid=62523)\u001B[0m [12:32:18] task [xgboost.ray]:4441524624 got new rank 0\n", + "\u001B[2m\u001B[36m(_RemoteRayXGBoostActor pid=62524)\u001B[0m [12:32:18] task [xgboost.ray]:6890641808 got new rank 1\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62465)\u001B[0m 2022-05-13 12:32:21,253\tINFO main.py:1025 -- [RayXGBoost] Starting XGBoost training.\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62466)\u001B[0m 2022-05-13 12:32:21,529\tINFO main.py:1025 -- [RayXGBoost] Starting XGBoost training.\n", + "\u001B[2m\u001B[36m(_RemoteRayXGBoostActor pid=62563)\u001B[0m [12:32:21] task [xgboost.ray]:4667801680 got new rank 1\n", + "\u001B[2m\u001B[36m(_RemoteRayXGBoostActor pid=62562)\u001B[0m [12:32:21] task [xgboost.ray]:6856360848 got new rank 0\n", + "\u001B[2m\u001B[36m(_RemoteRayXGBoostActor pid=62530)\u001B[0m [12:32:21] task [xgboost.ray]:6971527824 got new rank 0\n", + "\u001B[2m\u001B[36m(_RemoteRayXGBoostActor pid=62532)\u001B[0m [12:32:21] task [xgboost.ray]:4538321232 got new rank 1\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62464)\u001B[0m 2022-05-13 12:32:21,937\tINFO main.py:1025 -- [RayXGBoost] Starting XGBoost training.\n", + "\u001B[2m\u001B[36m(_RemoteRayXGBoostActor pid=62544)\u001B[0m [12:32:21] task [xgboost.ray]:7005661840 got new rank 1\n", + "\u001B[2m\u001B[36m(_RemoteRayXGBoostActor pid=62543)\u001B[0m [12:32:21] task [xgboost.ray]:4516088080 got new rank 0\n", + "\u001B[2m\u001B[36m(raylet)\u001B[0m Spilled 4098 MiB, 83 objects, write throughput 347 MiB/s.\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62456)\u001B[0m 2022-05-13 12:32:41,289\tINFO main.py:1109 -- Training in progress (31 seconds since last restart).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62463)\u001B[0m 2022-05-13 12:32:48,617\tINFO main.py:1109 -- Training in progress (31 seconds since last restart).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62465)\u001B[0m 2022-05-13 12:32:52,110\tINFO main.py:1109 -- Training in progress (31 seconds since last restart).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62466)\u001B[0m 2022-05-13 12:32:52,448\tINFO main.py:1109 -- Training in progress (31 seconds since last restart).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62464)\u001B[0m 2022-05-13 12:32:52,692\tINFO main.py:1109 -- Training in progress (31 seconds since last restart).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62456)\u001B[0m 2022-05-13 12:33:11,960\tINFO main.py:1109 -- Training in progress (61 seconds since last restart).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62463)\u001B[0m 2022-05-13 12:33:19,076\tINFO main.py:1109 -- Training in progress (61 seconds since last restart).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62464)\u001B[0m 2022-05-13 12:33:23,409\tINFO main.py:1109 -- Training in progress (61 seconds since last restart).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62465)\u001B[0m 2022-05-13 12:33:23,420\tINFO main.py:1109 -- Training in progress (62 seconds since last restart).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62466)\u001B[0m 2022-05-13 12:33:23,541\tINFO main.py:1109 -- Training in progress (62 seconds since last restart).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62463)\u001B[0m 2022-05-13 12:33:23,693\tINFO main.py:1519 -- [RayXGBoost] Finished XGBoost training on training data with total N=581,012 in 78.74 seconds (65.79 pure XGBoost training time).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62464)\u001B[0m 2022-05-13 12:33:24,802\tINFO main.py:1519 -- [RayXGBoost] Finished XGBoost training on training data with total N=581,012 in 79.62 seconds (62.85 pure XGBoost training time).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62648)\u001B[0m UserWarning: Dataset 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62651)\u001B[0m UserWarning: Dataset 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62648)\u001B[0m 2022-05-13 12:33:38,788\tINFO main.py:980 -- [RayXGBoost] Created 2 new actors (2 total actors). Waiting until actors are ready for training.\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62651)\u001B[0m 2022-05-13 12:33:38,766\tINFO main.py:980 -- [RayXGBoost] Created 2 new actors (2 total actors). Waiting until actors are ready for training.\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62456)\u001B[0m 2022-05-13 12:33:42,168\tINFO main.py:1109 -- Training in progress (92 seconds since last restart).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62456)\u001B[0m 2022-05-13 12:33:46,177\tINFO main.py:1519 -- [RayXGBoost] Finished XGBoost training on training data with total N=581,012 in 103.54 seconds (95.60 pure XGBoost training time).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62651)\u001B[0m 2022-05-13 12:33:51,825\tINFO main.py:1025 -- [RayXGBoost] Starting XGBoost training.\n", + "\u001B[2m\u001B[36m(_RemoteRayXGBoostActor pid=62670)\u001B[0m [12:33:51] task [xgboost.ray]:4623186960 got new rank 1\n", + "\u001B[2m\u001B[36m(_RemoteRayXGBoostActor pid=62669)\u001B[0m [12:33:51] task [xgboost.ray]:4707639376 got new rank 0\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62648)\u001B[0m 2022-05-13 12:33:52,036\tINFO main.py:1025 -- [RayXGBoost] Starting XGBoost training.\n", + "\u001B[2m\u001B[36m(_RemoteRayXGBoostActor pid=62672)\u001B[0m [12:33:52] task [xgboost.ray]:4530073552 got new rank 1\n", + "\u001B[2m\u001B[36m(_RemoteRayXGBoostActor pid=62671)\u001B[0m [12:33:52] task [xgboost.ray]:6824757200 got new rank 0\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62466)\u001B[0m 2022-05-13 12:33:54,229\tINFO main.py:1109 -- Training in progress (92 seconds since last restart).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62465)\u001B[0m 2022-05-13 12:33:54,355\tINFO main.py:1109 -- Training in progress (93 seconds since last restart).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62730)\u001B[0m UserWarning: Dataset 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62730)\u001B[0m 2022-05-13 12:34:04,708\tINFO main.py:980 -- [RayXGBoost] Created 2 new actors (2 total actors). Waiting until actors are ready for training.\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62466)\u001B[0m 2022-05-13 12:34:11,126\tINFO main.py:1519 -- [RayXGBoost] Finished XGBoost training on training data with total N=581,012 in 126.08 seconds (109.48 pure XGBoost training time).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62730)\u001B[0m 2022-05-13 12:34:15,175\tINFO main.py:1025 -- [RayXGBoost] Starting XGBoost training.\n", + "\u001B[2m\u001B[36m(_RemoteRayXGBoostActor pid=62753)\u001B[0m [12:34:15] task [xgboost.ray]:4468564048 got new rank 1\n", + "\u001B[2m\u001B[36m(_RemoteRayXGBoostActor pid=62752)\u001B[0m [12:34:15] task [xgboost.ray]:6799468304 got new rank 0\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "\u001b[2m\u001b[36m(GBDTTrainable pid=62648)\u001b[0m 2022-05-13 12:34:22,167\tINFO main.py:1109 -- Training in progress (30 seconds since last restart).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62651)\u001b[0m 2022-05-13 12:34:22,147\tINFO main.py:1109 -- Training in progress (30 seconds since last restart).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62465)\u001b[0m 2022-05-13 12:34:24,646\tINFO main.py:1109 -- Training in progress (123 seconds since last restart).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62465)\u001b[0m 2022-05-13 12:34:24,745\tINFO main.py:1519 -- [RayXGBoost] Finished XGBoost training on training data with total N=581,012 in 137.75 seconds (123.36 pure XGBoost training time).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62651)\u001b[0m 2022-05-13 12:34:40,173\tINFO main.py:1519 -- [RayXGBoost] Finished XGBoost training on training data with total N=581,012 in 61.63 seconds (48.34 pure XGBoost training time).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62730)\u001b[0m 2022-05-13 12:34:45,745\tINFO main.py:1109 -- Training in progress (31 seconds since last restart).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62648)\u001b[0m 2022-05-13 12:34:52,543\tINFO main.py:1109 -- Training in progress (60 seconds since last restart).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62648)\u001b[0m 2022-05-13 12:35:14,888\tINFO main.py:1519 -- [RayXGBoost] Finished XGBoost training on training data with total N=581,012 in 96.35 seconds (82.83 pure XGBoost training time).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62730)\u001b[0m 2022-05-13 12:35:16,197\tINFO main.py:1109 -- Training in progress (61 seconds since last restart).\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=62730)\u001b[0m 2022-05-13 12:35:33,441\tINFO main.py:1519 -- [RayXGBoost] Finished XGBoost training on training data with total N=581,012 in 88.89 seconds (78.26 pure XGBoost training time).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62648)\u001B[0m 2022-05-13 12:34:22,167\tINFO main.py:1109 -- Training in progress (30 seconds since last restart).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62651)\u001B[0m 2022-05-13 12:34:22,147\tINFO main.py:1109 -- Training in progress (30 seconds since last restart).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62465)\u001B[0m 2022-05-13 12:34:24,646\tINFO main.py:1109 -- Training in progress (123 seconds since last restart).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62465)\u001B[0m 2022-05-13 12:34:24,745\tINFO main.py:1519 -- [RayXGBoost] Finished XGBoost training on training data with total N=581,012 in 137.75 seconds (123.36 pure XGBoost training time).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62651)\u001B[0m 2022-05-13 12:34:40,173\tINFO main.py:1519 -- [RayXGBoost] Finished XGBoost training on training data with total N=581,012 in 61.63 seconds (48.34 pure XGBoost training time).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62730)\u001B[0m 2022-05-13 12:34:45,745\tINFO main.py:1109 -- Training in progress (31 seconds since last restart).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62648)\u001B[0m 2022-05-13 12:34:52,543\tINFO main.py:1109 -- Training in progress (60 seconds since last restart).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62648)\u001B[0m 2022-05-13 12:35:14,888\tINFO main.py:1519 -- [RayXGBoost] Finished XGBoost training on training data with total N=581,012 in 96.35 seconds (82.83 pure XGBoost training time).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62730)\u001B[0m 2022-05-13 12:35:16,197\tINFO main.py:1109 -- Training in progress (61 seconds since last restart).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=62730)\u001B[0m 2022-05-13 12:35:33,441\tINFO main.py:1519 -- [RayXGBoost] Finished XGBoost training on training data with total N=581,012 in 88.89 seconds (78.26 pure XGBoost training time).\n", "2022-05-13 12:35:33,610\tINFO tune.py:753 -- Total run time: 218.52 seconds (217.48 seconds for the tuning loop).\n" ] } diff --git a/doc/source/ray-air/examples/upload_to_comet_ml.ipynb b/doc/source/ray-air/examples/upload_to_comet_ml.ipynb index 192385b5d862..d6ef4def7430 100644 --- a/doc/source/ray-air/examples/upload_to_comet_ml.ipynb +++ b/doc/source/ray-air/examples/upload_to_comet_ml.ipynb @@ -26,7 +26,7 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install -qU \"ray[tune]\" sklearn xgboost_ray comet_ml" + "!pip install -qU \"ray[tune]\" scikit-learn xgboost_ray comet_ml" ] }, { @@ -138,7 +138,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "2022-05-19 15:19:17,237\tINFO services.py:1483 -- View the Ray dashboard at \u001b[1m\u001b[32mhttp://127.0.0.1:8265\u001b[39m\u001b[22m\n" + "2022-05-19 15:19:17,237\tINFO services.py:1483 -- View the Ray dashboard at \u001B[1m\u001B[32mhttp://127.0.0.1:8265\u001B[39m\u001B[22m\n" ] }, { @@ -165,23 +165,23 @@ "output_type": "stream", "text": [ "COMET WARNING: As you are running in a Jupyter environment, you will need to call `experiment.end()` when finished to ensure all metrics and code are logged before exiting.\n", - "\u001b[2m\u001b[33m(raylet)\u001b[0m 2022-05-19 15:19:21,584\tINFO context.py:70 -- Exec'ing worker with command: exec /Users/kai/.pyenv/versions/3.7.7/bin/python3.7 /Users/kai/coding/ray/python/ray/workers/default_worker.py --node-ip-address=127.0.0.1 --node-manager-port=61222 --object-store-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/plasma_store --raylet-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/raylet --redis-address=None --storage=None --temp-dir=/tmp/ray --metrics-agent-port=62873 --logging-rotate-bytes=536870912 --logging-rotate-backup-count=5 --gcs-address=127.0.0.1:61938 --redis-password=5241590000000000 --startup-token=16 --runtime-env-hash=-2010331134\n", + "\u001B[2m\u001B[33m(raylet)\u001B[0m 2022-05-19 15:19:21,584\tINFO context.py:70 -- Exec'ing worker with command: exec /Users/kai/.pyenv/versions/3.7.7/bin/python3.7 /Users/kai/coding/ray/python/ray/workers/default_worker.py --node-ip-address=127.0.0.1 --node-manager-port=61222 --object-store-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/plasma_store --raylet-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/raylet --redis-address=None --storage=None --temp-dir=/tmp/ray --metrics-agent-port=62873 --logging-rotate-bytes=536870912 --logging-rotate-backup-count=5 --gcs-address=127.0.0.1:61938 --redis-password=5241590000000000 --startup-token=16 --runtime-env-hash=-2010331134\n", "COMET INFO: Experiment is live on comet.ml https://www.comet.ml/krfricke/ray-air-example/ecd3726ca127497ba7386003a249fad6\n", "\n", "COMET WARNING: Failed to add tag(s) None to the experiment\n", "\n", "COMET WARNING: Empty mapping given to log_params({}); ignoring\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=19852)\u001b[0m UserWarning: Dataset 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", - "\u001b[2m\u001b[33m(raylet)\u001b[0m 2022-05-19 15:19:24,628\tINFO context.py:70 -- Exec'ing worker with command: exec /Users/kai/.pyenv/versions/3.7.7/bin/python3.7 /Users/kai/coding/ray/python/ray/workers/default_worker.py --node-ip-address=127.0.0.1 --node-manager-port=61222 --object-store-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/plasma_store --raylet-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/raylet --redis-address=None --storage=None --temp-dir=/tmp/ray --metrics-agent-port=62873 --logging-rotate-bytes=536870912 --logging-rotate-backup-count=5 --gcs-address=127.0.0.1:61938 --redis-password=5241590000000000 --startup-token=17 --runtime-env-hash=-2010331069\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=19852)\u001b[0m 2022-05-19 15:19:25,961\tINFO main.py:980 -- [RayXGBoost] Created 2 new actors (2 total actors). Waiting until actors are ready for training.\n", - "\u001b[2m\u001b[33m(raylet)\u001b[0m 2022-05-19 15:19:26,830\tINFO context.py:70 -- Exec'ing worker with command: exec /Users/kai/.pyenv/versions/3.7.7/bin/python3.7 /Users/kai/coding/ray/python/ray/workers/default_worker.py --node-ip-address=127.0.0.1 --node-manager-port=61222 --object-store-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/plasma_store --raylet-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/raylet --redis-address=None --storage=None --temp-dir=/tmp/ray --metrics-agent-port=62873 --logging-rotate-bytes=536870912 --logging-rotate-backup-count=5 --gcs-address=127.0.0.1:61938 --redis-password=5241590000000000 --startup-token=18 --runtime-env-hash=-2010331069\n", - "\u001b[2m\u001b[33m(raylet)\u001b[0m 2022-05-19 15:19:26,918\tINFO context.py:70 -- Exec'ing worker with command: exec /Users/kai/.pyenv/versions/3.7.7/bin/python3.7 /Users/kai/coding/ray/python/ray/workers/default_worker.py --node-ip-address=127.0.0.1 --node-manager-port=61222 --object-store-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/plasma_store --raylet-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/raylet --redis-address=None --storage=None --temp-dir=/tmp/ray --metrics-agent-port=62873 --logging-rotate-bytes=536870912 --logging-rotate-backup-count=5 --gcs-address=127.0.0.1:61938 --redis-password=5241590000000000 --startup-token=20 --runtime-env-hash=-2010331134\n", - "\u001b[2m\u001b[33m(raylet)\u001b[0m 2022-05-19 15:19:26,922\tINFO context.py:70 -- Exec'ing worker with command: exec /Users/kai/.pyenv/versions/3.7.7/bin/python3.7 /Users/kai/coding/ray/python/ray/workers/default_worker.py --node-ip-address=127.0.0.1 --node-manager-port=61222 --object-store-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/plasma_store --raylet-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/raylet --redis-address=None --storage=None --temp-dir=/tmp/ray --metrics-agent-port=62873 --logging-rotate-bytes=536870912 --logging-rotate-backup-count=5 --gcs-address=127.0.0.1:61938 --redis-password=5241590000000000 --startup-token=21 --runtime-env-hash=-2010331134\n", - "\u001b[2m\u001b[33m(raylet)\u001b[0m 2022-05-19 15:19:26,922\tINFO context.py:70 -- Exec'ing worker with command: exec /Users/kai/.pyenv/versions/3.7.7/bin/python3.7 /Users/kai/coding/ray/python/ray/workers/default_worker.py --node-ip-address=127.0.0.1 --node-manager-port=61222 --object-store-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/plasma_store --raylet-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/raylet --redis-address=None --storage=None --temp-dir=/tmp/ray --metrics-agent-port=62873 --logging-rotate-bytes=536870912 --logging-rotate-backup-count=5 --gcs-address=127.0.0.1:61938 --redis-password=5241590000000000 --startup-token=22 --runtime-env-hash=-2010331134\n", - "\u001b[2m\u001b[33m(raylet)\u001b[0m 2022-05-19 15:19:26,923\tINFO context.py:70 -- Exec'ing worker with command: exec /Users/kai/.pyenv/versions/3.7.7/bin/python3.7 /Users/kai/coding/ray/python/ray/workers/default_worker.py --node-ip-address=127.0.0.1 --node-manager-port=61222 --object-store-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/plasma_store --raylet-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/raylet --redis-address=None --storage=None --temp-dir=/tmp/ray --metrics-agent-port=62873 --logging-rotate-bytes=536870912 --logging-rotate-backup-count=5 --gcs-address=127.0.0.1:61938 --redis-password=5241590000000000 --startup-token=19 --runtime-env-hash=-2010331134\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=19852)\u001b[0m 2022-05-19 15:19:29,272\tINFO main.py:1025 -- [RayXGBoost] Starting XGBoost training.\n", - "\u001b[2m\u001b[36m(_RemoteRayXGBoostActor pid=19876)\u001b[0m [15:19:29] task [xgboost.ray]:4505889744 got new rank 1\n", - "\u001b[2m\u001b[36m(_RemoteRayXGBoostActor pid=19875)\u001b[0m [15:19:29] task [xgboost.ray]:6941849424 got new rank 0\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=19852)\u001B[0m UserWarning: Datastream 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", + "\u001B[2m\u001B[33m(raylet)\u001B[0m 2022-05-19 15:19:24,628\tINFO context.py:70 -- Exec'ing worker with command: exec /Users/kai/.pyenv/versions/3.7.7/bin/python3.7 /Users/kai/coding/ray/python/ray/workers/default_worker.py --node-ip-address=127.0.0.1 --node-manager-port=61222 --object-store-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/plasma_store --raylet-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/raylet --redis-address=None --storage=None --temp-dir=/tmp/ray --metrics-agent-port=62873 --logging-rotate-bytes=536870912 --logging-rotate-backup-count=5 --gcs-address=127.0.0.1:61938 --redis-password=5241590000000000 --startup-token=17 --runtime-env-hash=-2010331069\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=19852)\u001B[0m 2022-05-19 15:19:25,961\tINFO main.py:980 -- [RayXGBoost] Created 2 new actors (2 total actors). Waiting until actors are ready for training.\n", + "\u001B[2m\u001B[33m(raylet)\u001B[0m 2022-05-19 15:19:26,830\tINFO context.py:70 -- Exec'ing worker with command: exec /Users/kai/.pyenv/versions/3.7.7/bin/python3.7 /Users/kai/coding/ray/python/ray/workers/default_worker.py --node-ip-address=127.0.0.1 --node-manager-port=61222 --object-store-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/plasma_store --raylet-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/raylet --redis-address=None --storage=None --temp-dir=/tmp/ray --metrics-agent-port=62873 --logging-rotate-bytes=536870912 --logging-rotate-backup-count=5 --gcs-address=127.0.0.1:61938 --redis-password=5241590000000000 --startup-token=18 --runtime-env-hash=-2010331069\n", + "\u001B[2m\u001B[33m(raylet)\u001B[0m 2022-05-19 15:19:26,918\tINFO context.py:70 -- Exec'ing worker with command: exec /Users/kai/.pyenv/versions/3.7.7/bin/python3.7 /Users/kai/coding/ray/python/ray/workers/default_worker.py --node-ip-address=127.0.0.1 --node-manager-port=61222 --object-store-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/plasma_store --raylet-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/raylet --redis-address=None --storage=None --temp-dir=/tmp/ray --metrics-agent-port=62873 --logging-rotate-bytes=536870912 --logging-rotate-backup-count=5 --gcs-address=127.0.0.1:61938 --redis-password=5241590000000000 --startup-token=20 --runtime-env-hash=-2010331134\n", + "\u001B[2m\u001B[33m(raylet)\u001B[0m 2022-05-19 15:19:26,922\tINFO context.py:70 -- Exec'ing worker with command: exec /Users/kai/.pyenv/versions/3.7.7/bin/python3.7 /Users/kai/coding/ray/python/ray/workers/default_worker.py --node-ip-address=127.0.0.1 --node-manager-port=61222 --object-store-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/plasma_store --raylet-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/raylet --redis-address=None --storage=None --temp-dir=/tmp/ray --metrics-agent-port=62873 --logging-rotate-bytes=536870912 --logging-rotate-backup-count=5 --gcs-address=127.0.0.1:61938 --redis-password=5241590000000000 --startup-token=21 --runtime-env-hash=-2010331134\n", + "\u001B[2m\u001B[33m(raylet)\u001B[0m 2022-05-19 15:19:26,922\tINFO context.py:70 -- Exec'ing worker with command: exec /Users/kai/.pyenv/versions/3.7.7/bin/python3.7 /Users/kai/coding/ray/python/ray/workers/default_worker.py --node-ip-address=127.0.0.1 --node-manager-port=61222 --object-store-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/plasma_store --raylet-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/raylet --redis-address=None --storage=None --temp-dir=/tmp/ray --metrics-agent-port=62873 --logging-rotate-bytes=536870912 --logging-rotate-backup-count=5 --gcs-address=127.0.0.1:61938 --redis-password=5241590000000000 --startup-token=22 --runtime-env-hash=-2010331134\n", + "\u001B[2m\u001B[33m(raylet)\u001B[0m 2022-05-19 15:19:26,923\tINFO context.py:70 -- Exec'ing worker with command: exec /Users/kai/.pyenv/versions/3.7.7/bin/python3.7 /Users/kai/coding/ray/python/ray/workers/default_worker.py --node-ip-address=127.0.0.1 --node-manager-port=61222 --object-store-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/plasma_store --raylet-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/raylet --redis-address=None --storage=None --temp-dir=/tmp/ray --metrics-agent-port=62873 --logging-rotate-bytes=536870912 --logging-rotate-backup-count=5 --gcs-address=127.0.0.1:61938 --redis-password=5241590000000000 --startup-token=19 --runtime-env-hash=-2010331134\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=19852)\u001B[0m 2022-05-19 15:19:29,272\tINFO main.py:1025 -- [RayXGBoost] Starting XGBoost training.\n", + "\u001B[2m\u001B[36m(_RemoteRayXGBoostActor pid=19876)\u001B[0m [15:19:29] task [xgboost.ray]:4505889744 got new rank 1\n", + "\u001B[2m\u001B[36m(_RemoteRayXGBoostActor pid=19875)\u001B[0m [15:19:29] task [xgboost.ray]:6941849424 got new rank 0\n", "COMET WARNING: The given value of the metric episodes_total was None; ignoring\n", "COMET WARNING: The given value of the metric timesteps_total was None; ignoring\n", "COMET INFO: Artifact 'checkpoint_XGBoostTrainer_ac544_00000' version 1.0.0 created\n" @@ -271,7 +271,7 @@ "COMET INFO: Artifact 'checkpoint_XGBoostTrainer_ac544_00000' version 10.0.0 created (previous was: 9.0.0)\n", "COMET INFO: Scheduling the upload of 3 assets for a size of 16.37 KB, this can take some time\n", "COMET INFO: Artifact 'krfricke/checkpoint_XGBoostTrainer_ac544_00000:10.0.0' has started uploading asynchronously\n", - "\u001b[2m\u001b[36m(GBDTTrainable pid=19852)\u001b[0m 2022-05-19 15:19:33,890\tINFO main.py:1519 -- [RayXGBoost] Finished XGBoost training on training data with total N=569 in 7.96 seconds (4.61 pure XGBoost training time).\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=19852)\u001B[0m 2022-05-19 15:19:33,890\tINFO main.py:1519 -- [RayXGBoost] Finished XGBoost training on training data with total N=569 in 7.96 seconds (4.61 pure XGBoost training time).\n", "COMET INFO: Artifact 'krfricke/checkpoint_XGBoostTrainer_ac544_00000:9.0.0' has been fully uploaded successfully\n", "COMET INFO: Artifact 'checkpoint_XGBoostTrainer_ac544_00000' version 11.0.0 created (previous was: 10.0.0)\n", "COMET INFO: Scheduling the upload of 3 assets for a size of 16.39 KB, this can take some time\n", diff --git a/doc/source/ray-air/examples/upload_to_wandb.ipynb b/doc/source/ray-air/examples/upload_to_wandb.ipynb index fb679dc4636b..5ba2e60d5630 100644 --- a/doc/source/ray-air/examples/upload_to_wandb.ipynb +++ b/doc/source/ray-air/examples/upload_to_wandb.ipynb @@ -33,7 +33,7 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install -qU \"ray[tune]\" sklearn xgboost_ray wandb" + "!pip install -qU \"ray[tune]\" scikit-learn xgboost_ray wandb" ] }, { @@ -160,7 +160,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "2022-10-28 16:28:19,325\tINFO worker.py:1524 -- Started a local Ray instance. View the dashboard at \u001b[1m\u001b[32mhttp://127.0.0.1:8265 \u001b[39m\u001b[22m\n", + "2022-10-28 16:28:19,325\tINFO worker.py:1524 -- Started a local Ray instance. View the dashboard at \u001B[1m\u001B[32mhttp://127.0.0.1:8265 \u001B[39m\u001B[22m\n", "2022-10-28 16:28:22,993\tWARNING read_api.py:297 -- ⚠️ The number of blocks in this dataset (1) limits its parallelism to 1 concurrent tasks. This is much less than the number of available CPU slots in the cluster. Use `.repartition(n)` to increase the number of dataset blocks.\n", "2022-10-28 16:28:26,033\tINFO wandb.py:267 -- Already logged into W&B.\n" ] From 3c7b1f99066f3563638b22aebfb0a93b0e575288 Mon Sep 17 00:00:00 2001 From: Artur Niederfahrenhorst Date: Wed, 10 May 2023 13:08:03 +0200 Subject: [PATCH 321/424] [RLlib] No longer return action distribution objects from RLModule's `forward_...()` methods. (#35085) --- doc/source/rllib/rllib-rlmodule.rst | 2 +- rllib/algorithms/appo/tf/appo_tf_learner.py | 13 +++-- rllib/algorithms/appo/tf/appo_tf_rl_module.py | 3 -- .../appo/torch/appo_torch_learner.py | 6 ++- .../appo/torch/appo_torch_rl_module.py | 1 - .../algorithms/impala/tf/impala_tf_learner.py | 5 +- .../impala/torch/impala_torch_learner.py | 5 +- rllib/algorithms/ppo/ppo_base_rl_module.py | 34 ++++++------ .../ppo/tests/test_ppo_rl_module.py | 28 +++++++--- rllib/algorithms/ppo/tf/ppo_tf_learner.py | 16 ++++-- rllib/algorithms/ppo/tf/ppo_tf_rl_module.py | 16 +----- .../algorithms/ppo/torch/ppo_torch_learner.py | 17 ++++-- .../ppo/torch/ppo_torch_rl_module.py | 19 ++----- rllib/core/rl_module/rl_module.py | 54 +++++++++++++++++-- .../rl_module/tf/tests/test_tf_rl_module.py | 17 +++--- rllib/core/rl_module/tf/tf_rl_module.py | 1 - .../torch/tests/test_torch_rl_module.py | 17 +++--- rllib/core/rl_module/torch/torch_rl_module.py | 14 ++++- rllib/core/testing/tf/bc_learner.py | 6 ++- rllib/core/testing/tf/bc_module.py | 38 +++++++------ rllib/core/testing/torch/bc_learner.py | 6 ++- rllib/core/testing/torch/bc_module.py | 30 +++++++++-- rllib/policy/eager_tf_policy_v2.py | 21 +++++--- rllib/policy/torch_policy_v2.py | 17 +++--- 24 files changed, 251 insertions(+), 135 deletions(-) diff --git a/doc/source/rllib/rllib-rlmodule.rst b/doc/source/rllib/rllib-rlmodule.rst index 1c498df3f9a3..cf7be524ea4a 100644 --- a/doc/source/rllib/rllib-rlmodule.rst +++ b/doc/source/rllib/rllib-rlmodule.rst @@ -426,7 +426,7 @@ What your customization could have looked like before: return None, None, None -All of the ``Policy.compute_***`` functions expect that `~ray.rllib.core.rl_module.rl_module.RLModule.forward_exploration` and `~ray.rllib.core.rl_module.rl_module.RLModule.forward_inference` return a dictionary that contains the key "action_dist" mapping to a ``ray.rllib.models.distributions.Distribution`` instance. Commonly used distribution implementations can be found under ``ray.rllib.models.tf.tf_distributions`` for tensorflow and ``ray.rllib.models.torch.torch_distributions`` for torch. You can choose to return determinstic actions, by creating a determinstic distribution instance. See `Writing Custom Single Agent RL Modules`_ for more details on how to implement your own custom RL Module. +All of the ``Policy.compute_***`` functions expect that `~ray.rllib.core.rl_module.rl_module.RLModule.forward_exploration` and `~ray.rllib.core.rl_module.rl_module.RLModule.forward_inference` return a dictionary that contains the key "action_dist_inputs", whose value are the parameters (inputs) of a ``ray.rllib.models.distributions.Distribution`` class. Commonly used distribution implementations can be found under ``ray.rllib.models.tf.tf_distributions`` for tensorflow and ``ray.rllib.models.torch.torch_distributions`` for torch. You can choose to return determinstic actions, by creating a determinstic distribution instance. See `Writing Custom Single Agent RL Modules`_ for more details on how to implement your own custom RL Module. .. tab-set:: diff --git a/rllib/algorithms/appo/tf/appo_tf_learner.py b/rllib/algorithms/appo/tf/appo_tf_learner.py index cdf8ee9f8361..9af71bbb50b5 100644 --- a/rllib/algorithms/appo/tf/appo_tf_learner.py +++ b/rllib/algorithms/appo/tf/appo_tf_learner.py @@ -5,7 +5,7 @@ AppoLearner, LEARNER_RESULTS_CURR_KL_COEFF_KEY, LEARNER_RESULTS_KL_KEY, - OLD_ACTION_DIST_KEY, + OLD_ACTION_DIST_LOGITS_KEY, ) from ray.rllib.algorithms.impala.tf.vtrace_tf_v2 import make_time_major, vtrace_tf2 from ray.rllib.core.learner.learner import POLICY_LOSS_KEY, VF_LOSS_KEY, ENTROPY_KEY @@ -26,10 +26,13 @@ def compute_loss_per_module( self, module_id: str, batch: SampleBatch, fwd_out: Mapping[str, TensorType] ) -> TensorType: values = fwd_out[SampleBatch.VF_PREDS] - - target_policy_dist = fwd_out[SampleBatch.ACTION_DIST] - old_target_policy_dist = fwd_out[OLD_ACTION_DIST_KEY] - + action_dist_cls_train = self._module[module_id].get_train_action_dist_cls() + target_policy_dist = action_dist_cls_train.from_logits( + fwd_out[SampleBatch.ACTION_DIST_INPUTS] + ) + old_target_policy_dist = action_dist_cls_train.from_logits( + fwd_out[OLD_ACTION_DIST_LOGITS_KEY] + ) old_target_policy_actions_logp = old_target_policy_dist.logp( batch[SampleBatch.ACTIONS] ) diff --git a/rllib/algorithms/appo/tf/appo_tf_rl_module.py b/rllib/algorithms/appo/tf/appo_tf_rl_module.py index 48287375a1f1..ffb8abf86502 100644 --- a/rllib/algorithms/appo/tf/appo_tf_rl_module.py +++ b/rllib/algorithms/appo/tf/appo_tf_rl_module.py @@ -2,7 +2,6 @@ from ray.rllib.algorithms.appo.appo_learner import ( OLD_ACTION_DIST_LOGITS_KEY, - OLD_ACTION_DIST_KEY, ) from ray.rllib.algorithms.ppo.tf.ppo_tf_rl_module import PPOTfRLModule from ray.rllib.core.models.base import ACTOR @@ -48,7 +47,5 @@ def _forward_train(self, batch: NestedDict): outs = super()._forward_train(batch) old_pi_inputs_encoded = self.old_encoder(batch)[ENCODER_OUT][ACTOR] old_action_dist_logits = tf.stop_gradient(self.old_pi(old_pi_inputs_encoded)) - old_action_dist = self.action_dist_cls.from_logits(old_action_dist_logits) - outs[OLD_ACTION_DIST_KEY] = old_action_dist outs[OLD_ACTION_DIST_LOGITS_KEY] = old_action_dist_logits return outs diff --git a/rllib/algorithms/appo/torch/appo_torch_learner.py b/rllib/algorithms/appo/torch/appo_torch_learner.py index 56f48a0fe0ed..1e604566ea8e 100644 --- a/rllib/algorithms/appo/torch/appo_torch_learner.py +++ b/rllib/algorithms/appo/torch/appo_torch_learner.py @@ -37,7 +37,11 @@ def compute_loss_per_module( ) -> TensorType: values = fwd_out[SampleBatch.VF_PREDS] - target_policy_dist = fwd_out[SampleBatch.ACTION_DIST] + action_dist_cls_train = self._module[module_id].get_train_action_dist_cls() + target_policy_dist = action_dist_cls_train.from_logits( + fwd_out[SampleBatch.ACTION_DIST_INPUTS] + ) + old_target_policy_dist = fwd_out[OLD_ACTION_DIST_KEY] old_target_policy_actions_logp = old_target_policy_dist.logp( batch[SampleBatch.ACTIONS] diff --git a/rllib/algorithms/appo/torch/appo_torch_rl_module.py b/rllib/algorithms/appo/torch/appo_torch_rl_module.py index 805ca11c3352..df2b653105ad 100644 --- a/rllib/algorithms/appo/torch/appo_torch_rl_module.py +++ b/rllib/algorithms/appo/torch/appo_torch_rl_module.py @@ -35,7 +35,6 @@ def get_target_network_pairs(self): @override(PPOTorchRLModule) def output_specs_train(self) -> List[str]: return [ - SampleBatch.ACTION_DIST, SampleBatch.VF_PREDS, OLD_ACTION_DIST_KEY, ] diff --git a/rllib/algorithms/impala/tf/impala_tf_learner.py b/rllib/algorithms/impala/tf/impala_tf_learner.py index fd04a6bf8d23..78893ac9180b 100644 --- a/rllib/algorithms/impala/tf/impala_tf_learner.py +++ b/rllib/algorithms/impala/tf/impala_tf_learner.py @@ -19,7 +19,10 @@ class ImpalaTfLearner(ImpalaLearner, TfLearner): def compute_loss_per_module( self, module_id: str, batch: SampleBatch, fwd_out: Mapping[str, TensorType] ) -> TensorType: - target_policy_dist = fwd_out[SampleBatch.ACTION_DIST] + action_dist_class_train = self._module[module_id].get_train_action_dist_cls() + target_policy_dist = action_dist_class_train.from_logits( + fwd_out[SampleBatch.ACTION_DIST_INPUTS] + ) values = fwd_out[SampleBatch.VF_PREDS] behaviour_actions_logp = batch[SampleBatch.ACTION_LOGP] diff --git a/rllib/algorithms/impala/torch/impala_torch_learner.py b/rllib/algorithms/impala/torch/impala_torch_learner.py index 50dd9911823f..d6f89a299739 100644 --- a/rllib/algorithms/impala/torch/impala_torch_learner.py +++ b/rllib/algorithms/impala/torch/impala_torch_learner.py @@ -23,7 +23,10 @@ class ImpalaTorchLearner(ImpalaLearner, TorchLearner): def compute_loss_per_module( self, module_id: str, batch: SampleBatch, fwd_out: Mapping[str, TensorType] ) -> TensorType: - target_policy_dist = fwd_out[SampleBatch.ACTION_DIST] + action_dist_class_train = self._module[module_id].get_train_action_dist_cls() + target_policy_dist = action_dist_class_train.from_logits( + fwd_out[SampleBatch.ACTION_DIST_INPUTS] + ) values = fwd_out[SampleBatch.VF_PREDS] behaviour_actions_logp = batch[SampleBatch.ACTION_LOGP] diff --git a/rllib/algorithms/ppo/ppo_base_rl_module.py b/rllib/algorithms/ppo/ppo_base_rl_module.py index 39a740fd9c22..620da1422e9e 100644 --- a/rllib/algorithms/ppo/ppo_base_rl_module.py +++ b/rllib/algorithms/ppo/ppo_base_rl_module.py @@ -5,7 +5,6 @@ import abc from ray.rllib.core.models.base import ActorCriticEncoder -from ray.rllib.core.models.specs.specs_base import TensorSpec from ray.rllib.core.models.specs.specs_dict import SpecDict from ray.rllib.core.rl_module.rl_module import RLModule from ray.rllib.core.rl_module.rl_module import RLModuleConfig @@ -34,42 +33,41 @@ def setup(self): assert isinstance(self.encoder, ActorCriticEncoder) + def get_train_action_dist_cls(self) -> Distribution: + return self.action_dist_cls + + def get_exploration_action_dist_cls(self) -> Distribution: + return self.action_dist_cls + + def get_inference_action_dist_cls(self) -> Distribution: + return self.action_dist_cls + @override(RLModule) def input_specs_inference(self) -> SpecDict: return self.input_specs_exploration() @override(RLModule) def output_specs_inference(self) -> SpecDict: - return SpecDict({SampleBatch.ACTION_DIST: Distribution}) + return [SampleBatch.ACTION_DIST_INPUTS] @override(RLModule) def input_specs_exploration(self): - return [] + return [SampleBatch.OBS] @override(RLModule) def output_specs_exploration(self) -> SpecDict: return [ SampleBatch.VF_PREDS, - SampleBatch.ACTION_DIST, SampleBatch.ACTION_DIST_INPUTS, ] @override(RLModule) def input_specs_train(self) -> SpecDict: - specs = self.input_specs_exploration() - specs.append(SampleBatch.ACTIONS) - if SampleBatch.OBS in specs: - specs.append(SampleBatch.NEXT_OBS) - return specs + return self.input_specs_exploration() @override(RLModule) def output_specs_train(self) -> SpecDict: - spec = SpecDict( - { - SampleBatch.ACTION_DIST: Distribution, - SampleBatch.ACTION_LOGP: TensorSpec("b", framework=self.framework), - SampleBatch.VF_PREDS: TensorSpec("b", framework=self.framework), - "entropy": TensorSpec("b", framework=self.framework), - } - ) - return spec + return [ + SampleBatch.VF_PREDS, + SampleBatch.ACTION_DIST_INPUTS, + ] diff --git a/rllib/algorithms/ppo/tests/test_ppo_rl_module.py b/rllib/algorithms/ppo/tests/test_ppo_rl_module.py index 678cc02b8af8..938e285b1eb3 100644 --- a/rllib/algorithms/ppo/tests/test_ppo_rl_module.py +++ b/rllib/algorithms/ppo/tests/test_ppo_rl_module.py @@ -47,7 +47,7 @@ def get_expected_module_config( return config -def dummy_torch_ppo_loss(batch, fwd_out): +def dummy_torch_ppo_loss(module, batch, fwd_out): """Dummy PPO loss function for testing purposes. Will eventually use the actual PPO loss function implemented in the PPOTfTrainer. @@ -64,19 +64,24 @@ def dummy_torch_ppo_loss(batch, fwd_out): # this is not exactly a ppo loss, just something to show that the # forward train works adv = batch[SampleBatch.REWARDS] - fwd_out[SampleBatch.VF_PREDS] - actor_loss = -(fwd_out[SampleBatch.ACTION_LOGP] * adv).mean() + action_dist_class = module.get_train_action_dist_cls() + action_probs = action_dist_class.from_logits( + fwd_out[SampleBatch.ACTION_DIST_INPUTS] + ).logp(batch[SampleBatch.ACTIONS]) + actor_loss = -(action_probs * adv).mean() critic_loss = (adv**2).mean() loss = actor_loss + critic_loss return loss -def dummy_tf_ppo_loss(batch, fwd_out): +def dummy_tf_ppo_loss(module, batch, fwd_out): """Dummy PPO loss function for testing purposes. Will eventually use the actual PPO loss function implemented in the PPOTfTrainer. Args: + module: PPOTfRLModule batch: SampleBatch used for training. fwd_out: Forward output of the model. @@ -84,7 +89,10 @@ def dummy_tf_ppo_loss(batch, fwd_out): Loss tensor """ adv = batch[SampleBatch.REWARDS] - fwd_out[SampleBatch.VF_PREDS] - action_probs = fwd_out[SampleBatch.ACTION_DIST].logp(batch[SampleBatch.ACTIONS]) + action_dist_class = module.get_train_action_dist_cls() + action_probs = action_dist_class.from_logits( + fwd_out[SampleBatch.ACTION_DIST_INPUTS] + ).logp(batch[SampleBatch.ACTIONS]) actor_loss = -tf.reduce_mean(action_probs * adv) critic_loss = tf.reduce_mean(tf.square(adv)) return actor_loss + critic_loss @@ -219,9 +227,13 @@ def test_forward_train(self): # input_batch[SampleBatch.SEQ_LENS] = np.array([1]) fwd_out = module.forward_exploration(input_batch) - _action = fwd_out["action_dist"].sample() + action_dist_cls = module.get_exploration_action_dist_cls() + action_dist = action_dist_cls.from_logits( + fwd_out[SampleBatch.ACTION_DIST_INPUTS] + ) + _action = action_dist.sample() action = convert_to_numpy(_action[0]) - action_logp = convert_to_numpy(fwd_out["action_dist"].logp(_action)[0]) + action_logp = convert_to_numpy(action_dist.logp(_action)[0]) new_obs, reward, terminated, truncated, _ = env.step(action) new_obs = preprocessor.transform(new_obs) output_batch = { @@ -259,7 +271,7 @@ def test_forward_train(self): module.to("cpu") module.train() fwd_out = module.forward_train(fwd_in) - loss = dummy_torch_ppo_loss(fwd_in, fwd_out) + loss = dummy_torch_ppo_loss(module, fwd_in, fwd_out) loss.backward() # check that all neural net parameters have gradients @@ -274,7 +286,7 @@ def test_forward_train(self): # fwd_in[SampleBatch.SEQ_LENS] = torch.Tensor([10]) with tf.GradientTape() as tape: fwd_out = module.forward_train(fwd_in) - loss = dummy_tf_ppo_loss(fwd_in, fwd_out) + loss = dummy_tf_ppo_loss(module, fwd_in, fwd_out) grads = tape.gradient(loss, module.trainable_variables) for grad in grads: self.assertIsNotNone(grad) diff --git a/rllib/algorithms/ppo/tf/ppo_tf_learner.py b/rllib/algorithms/ppo/tf/ppo_tf_learner.py index 6ab8960a326a..df22ad27b959 100644 --- a/rllib/algorithms/ppo/tf/ppo_tf_learner.py +++ b/rllib/algorithms/ppo/tf/ppo_tf_learner.py @@ -39,14 +39,20 @@ def compute_loss_per_module( # learning rate for that agent. # TODO (Kourosh): come back to RNNs later - curr_action_dist = fwd_out[SampleBatch.ACTION_DIST] - action_dist_class = type(fwd_out[SampleBatch.ACTION_DIST]) - prev_action_dist = action_dist_class.from_logits( + action_dist_class_train = self._module[module_id].get_train_action_dist_cls() + action_dist_class_exploration = self._module[ + module_id + ].get_exploration_action_dist_cls() + curr_action_dist = action_dist_class_train.from_logits( + fwd_out[SampleBatch.ACTION_DIST_INPUTS] + ) + prev_action_dist = action_dist_class_exploration.from_logits( batch[SampleBatch.ACTION_DIST_INPUTS] ) logp_ratio = tf.exp( - fwd_out[SampleBatch.ACTION_LOGP] - batch[SampleBatch.ACTION_LOGP] + curr_action_dist.logp(batch[SampleBatch.ACTIONS]) + - batch[SampleBatch.ACTION_LOGP] ) # Only calculate kl loss if necessary (kl-coeff > 0.0). @@ -67,7 +73,7 @@ def compute_loss_per_module( else: mean_kl_loss = tf.constant(0.0, dtype=logp_ratio.dtype) - curr_entropy = fwd_out["entropy"] + curr_entropy = curr_action_dist.entropy() mean_entropy = tf.reduce_mean(curr_entropy) surrogate_loss = tf.minimum( diff --git a/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py b/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py index b70aeb5a3ebb..b14e9ab176eb 100644 --- a/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py +++ b/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py @@ -49,8 +49,7 @@ def _forward_inference(self, batch: NestedDict) -> Mapping[str, Any]: # Actions action_logits = self.pi(encoder_outs[ENCODER_OUT][ACTOR]) - action_dist = self.action_dist_cls.from_logits(action_logits) - output[SampleBatch.ACTION_DIST] = action_dist.to_deterministic() + output[SampleBatch.ACTION_DIST_INPUTS] = action_logits return output @@ -88,9 +87,6 @@ def _forward_exploration(self, batch: NestedDict) -> Mapping[str, Any]: action_logits = self.pi(encoder_outs[ENCODER_OUT][ACTOR]) output[SampleBatch.ACTION_DIST_INPUTS] = action_logits - output[SampleBatch.ACTION_DIST] = self.action_dist_cls.from_logits( - logits=action_logits - ) return output @@ -119,15 +115,7 @@ def _forward_train(self, batch: NestedDict): output[SampleBatch.VF_PREDS] = tf.squeeze(vf_out, axis=-1) # Policy head - pi_out = self.pi(encoder_outs[ENCODER_OUT][ACTOR]) - action_logits = pi_out - action_dist = self.action_dist_cls.from_logits(logits=action_logits) - logp = action_dist.logp(batch[SampleBatch.ACTIONS]) - entropy = action_dist.entropy() - + action_logits = self.pi(encoder_outs[ENCODER_OUT][ACTOR]) output[SampleBatch.ACTION_DIST_INPUTS] = action_logits - output[SampleBatch.ACTION_DIST] = action_dist - output[SampleBatch.ACTION_LOGP] = logp - output["entropy"] = entropy return output diff --git a/rllib/algorithms/ppo/torch/ppo_torch_learner.py b/rllib/algorithms/ppo/torch/ppo_torch_learner.py index aa32a956d978..cdaae478519a 100644 --- a/rllib/algorithms/ppo/torch/ppo_torch_learner.py +++ b/rllib/algorithms/ppo/torch/ppo_torch_learner.py @@ -39,14 +39,21 @@ def compute_loss_per_module( # learning rate for that agent. # TODO (Kourosh): come back to RNNs later - curr_action_dist = fwd_out[SampleBatch.ACTION_DIST] - action_dist_class = type(fwd_out[SampleBatch.ACTION_DIST]) - prev_action_dist = action_dist_class.from_logits( + action_dist_class_train = self._module[module_id].get_train_action_dist_cls() + action_dist_class_exploration = self._module[ + module_id + ].get_exploration_action_dist_cls() + + curr_action_dist = action_dist_class_train.from_logits( + fwd_out[SampleBatch.ACTION_DIST_INPUTS] + ) + prev_action_dist = action_dist_class_exploration.from_logits( batch[SampleBatch.ACTION_DIST_INPUTS] ) logp_ratio = torch.exp( - fwd_out[SampleBatch.ACTION_LOGP] - batch[SampleBatch.ACTION_LOGP] + curr_action_dist.logp(batch[SampleBatch.ACTIONS]) + - batch[SampleBatch.ACTION_LOGP] ) # Only calculate kl loss if necessary (kl-coeff > 0.0). @@ -67,7 +74,7 @@ def compute_loss_per_module( else: mean_kl_loss = torch.tensor(0.0, device=logp_ratio.device) - curr_entropy = fwd_out["entropy"] + curr_entropy = curr_action_dist.entropy() mean_entropy = torch.mean(curr_entropy) surrogate_loss = torch.min( diff --git a/rllib/algorithms/ppo/torch/ppo_torch_rl_module.py b/rllib/algorithms/ppo/torch/ppo_torch_rl_module.py index ad1c026424ba..e908a1b03b03 100644 --- a/rllib/algorithms/ppo/torch/ppo_torch_rl_module.py +++ b/rllib/algorithms/ppo/torch/ppo_torch_rl_module.py @@ -40,8 +40,7 @@ def _forward_inference(self, batch: NestedDict) -> Mapping[str, Any]: # Actions action_logits = self.pi(encoder_outs[ENCODER_OUT][ACTOR]) - action_dist = self.action_dist_cls.from_logits(action_logits) - output[SampleBatch.ACTION_DIST] = action_dist.to_deterministic() + output[SampleBatch.ACTION_DIST_INPUTS] = action_logits return output @@ -75,13 +74,11 @@ def _forward_exploration(self, batch: NestedDict) -> Mapping[str, Any]: # Policy head action_logits = self.pi(encoder_outs[ENCODER_OUT][ACTOR]) - output[SampleBatch.ACTION_DIST_INPUTS] = action_logits - output[SampleBatch.ACTION_DIST] = self.action_dist_cls.from_logits( - logits=action_logits - ) + return output + @override(RLModule) def _forward_train(self, batch: NestedDict) -> Mapping[str, Any]: output = {} @@ -105,15 +102,7 @@ def _forward_train(self, batch: NestedDict) -> Mapping[str, Any]: output[SampleBatch.VF_PREDS] = vf_out.squeeze(-1) # Policy head - pi_out = self.pi(encoder_outs[ENCODER_OUT][ACTOR]) - action_logits = pi_out - action_dist = self.action_dist_cls.from_logits(logits=action_logits) - logp = action_dist.logp(batch[SampleBatch.ACTIONS]) - entropy = action_dist.entropy() - + action_logits = self.pi(encoder_outs[ENCODER_OUT][ACTOR]) output[SampleBatch.ACTION_DIST_INPUTS] = action_logits - output[SampleBatch.ACTION_DIST] = action_dist - output[SampleBatch.ACTION_LOGP] = logp - output["entropy"] = entropy return output diff --git a/rllib/core/rl_module/rl_module.py b/rllib/core/rl_module/rl_module.py index c403f54a7435..855a0151c8fd 100644 --- a/rllib/core/rl_module/rl_module.py +++ b/rllib/core/rl_module/rl_module.py @@ -329,6 +329,48 @@ def setup(self): abstraction can be used to create any component that your RLModule needs. """ + def get_train_action_dist_cls(self) -> Type[Distribution]: + """Returns the action distribution class for this RLModule used for training. + + This class is used to create action distributions from outputs of the + forward_train method. If the case that no action distribution class is needed, + this method can return None. + + Note that RLlib's distribution classes all implement the `Distribution` + interface. This requires two special methods: `Distribution.from_logits()` and + `Distribution.to_deterministic()`. See the documentation for `Distribution` + for more detail. + """ + raise NotImplementedError + + def get_exploration_action_dist_cls(self) -> Type[Distribution]: + """Returns the action distribution class for this RLModule used for exploration. + + This class is used to create action distributions from outputs of the + forward_exploration method. If the case that no action distribution class is + needed, this method can return None. + + Note that RLlib's distribution classes all implement the `Distribution` + interface. This requires two special methods: `Distribution.from_logits()` and + `Distribution.to_deterministic()`. See the documentation for `Distribution` + for more detail. + """ + raise NotImplementedError + + def get_inference_action_dist_cls(self) -> Type[Distribution]: + """Returns the action distribution class for this RLModule used for inference. + + This class is used to create action distributions from outputs of the forward + inference method. If the case that no action distribution class is needed, + this method can return None. + + Note that RLlib's distribution classes all implement the `Distribution` + interface. This requires two special methods: `Distribution.from_logits()` and + `Distribution.to_deterministic()`. See the documentation for `Distribution` + for more detail. + """ + raise NotImplementedError + def get_initial_state(self) -> NestedDict: """Returns the initial state of the module. @@ -381,8 +423,10 @@ def _default_input_specs(self) -> SpecType: @check_input_specs("_input_specs_inference") @check_output_specs("_output_specs_inference") def forward_inference(self, batch: SampleBatchType, **kwargs) -> Mapping[str, Any]: - """Forward-pass during evaluation, called from the sampler. This method should - not be overriden. Instead, override the _forward_inference method. + """Forward-pass during evaluation, called from the sampler. + + This method should not be overriden to implement a custom forward inference + method. Instead, override the _forward_inference method. Args: batch: The input batch. This input batch should comply with @@ -404,8 +448,10 @@ def _forward_inference(self, batch: NestedDict, **kwargs) -> Mapping[str, Any]: def forward_exploration( self, batch: SampleBatchType, **kwargs ) -> Mapping[str, Any]: - """Forward-pass during exploration, called from the sampler. This method should - not be overriden. Instead, override the _forward_exploration method. + """Forward-pass during exploration, called from the sampler. + + This method should not be overriden to implement a custom forward exploration + method. Instead, override the _forward_exploration method. Args: batch: The input batch. This input batch should comply with diff --git a/rllib/core/rl_module/tf/tests/test_tf_rl_module.py b/rllib/core/rl_module/tf/tests/test_tf_rl_module.py index 98fe5e71765f..c95d4cff96eb 100644 --- a/rllib/core/rl_module/tf/tests/test_tf_rl_module.py +++ b/rllib/core/rl_module/tf/tests/test_tf_rl_module.py @@ -1,13 +1,14 @@ -import gymnasium as gym -import tensorflow as tf -import tensorflow_probability as tfp import tempfile -from typing import Mapping import unittest +from typing import Mapping + +import gymnasium as gym +import tensorflow as tf from ray.rllib.core.rl_module.rl_module import RLModuleConfig from ray.rllib.core.rl_module.tf.tf_rl_module import TfRLModule from ray.rllib.core.testing.tf.bc_module import DiscreteBCTFModule +from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.test_utils import check @@ -46,11 +47,13 @@ def test_forward_train(self): ) with tf.GradientTape() as tape: output = module.forward_train({"obs": obs}) - loss = -tf.math.reduce_mean(output["action_dist"].log_prob(actions)) + action_dist_class = module.get_train_action_dist_cls() + action_dist = action_dist_class.from_logits( + output[SampleBatch.ACTION_DIST_INPUTS] + ) + loss = -tf.math.reduce_mean(action_dist.logp(actions)) self.assertIsInstance(output, Mapping) - self.assertIn("action_dist", output) - self.assertIsInstance(output["action_dist"], tfp.distributions.Categorical) grads = tape.gradient(loss, module.trainable_variables) diff --git a/rllib/core/rl_module/tf/tf_rl_module.py b/rllib/core/rl_module/tf/tf_rl_module.py index 15861488dfcd..227cdd5a7990 100644 --- a/rllib/core/rl_module/tf/tf_rl_module.py +++ b/rllib/core/rl_module/tf/tf_rl_module.py @@ -5,7 +5,6 @@ from ray.rllib.utils.annotations import override from ray.rllib.utils.framework import try_import_tf - _, tf, _ = try_import_tf() diff --git a/rllib/core/rl_module/torch/tests/test_torch_rl_module.py b/rllib/core/rl_module/torch/tests/test_torch_rl_module.py index 8393add6debc..e359e5e09dd6 100644 --- a/rllib/core/rl_module/torch/tests/test_torch_rl_module.py +++ b/rllib/core/rl_module/torch/tests/test_torch_rl_module.py @@ -1,12 +1,14 @@ -import gymnasium as gym import tempfile -import torch -from typing import Mapping import unittest +from typing import Mapping + +import gymnasium as gym +import torch from ray.rllib.core.rl_module.rl_module import RLModuleConfig from ray.rllib.core.rl_module.torch import TorchRLModule from ray.rllib.core.testing.torch.bc_module import DiscreteBCTorchModule +from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.test_utils import check @@ -44,10 +46,13 @@ def test_forward_train(self): output = module.forward_train({"obs": obs}) self.assertIsInstance(output, Mapping) - self.assertIn("action_dist", output) - self.assertIsInstance(output["action_dist"], torch.distributions.Categorical) + self.assertIn(SampleBatch.ACTION_DIST_INPUTS, output) + + action_dist_inputs = output[SampleBatch.ACTION_DIST_INPUTS] + action_dist_class = module.get_train_action_dist_cls() + action_dist = action_dist_class.from_logits(action_dist_inputs) - loss = -output["action_dist"].log_prob(actions.view(-1)).mean() + loss = -action_dist.logp(actions.view(-1)).mean() loss.backward() # check that all neural net parameters have gradients diff --git a/rllib/core/rl_module/torch/torch_rl_module.py b/rllib/core/rl_module/torch/torch_rl_module.py index a31c6e758137..152a26534f1f 100644 --- a/rllib/core/rl_module/torch/torch_rl_module.py +++ b/rllib/core/rl_module/torch/torch_rl_module.py @@ -1,10 +1,11 @@ import pathlib -from typing import Any, List, Mapping, Tuple, Union +from typing import Any, List, Mapping, Tuple, Union, Type -from ray.rllib.core.rl_module.rl_module import RLModule from ray.rllib.core.rl_module.rl_module_with_target_networks_interface import ( RLModuleWithTargetNetworksInterface, ) +from ray.rllib.core.rl_module import RLModule +from ray.rllib.models.distributions import Distribution from ray.rllib.utils.annotations import override from ray.rllib.utils.framework import try_import_torch from ray.rllib.utils.typing import NetworkType @@ -54,6 +55,15 @@ def __init__(self, *args, **kwargs) -> None: # the interface of that base-class not the actual implementation. self.config = self.unwrapped().config + def get_train_action_dist_cls(self, *args, **kwargs) -> Type[Distribution]: + return self.unwrapped().get_train_action_dist_cls(*args, **kwargs) + + def get_exploration_action_dist_cls(self, *args, **kwargs) -> Type[Distribution]: + return self.unwrapped().get_exploration_action_dist_cls(*args, **kwargs) + + def get_inference_action_dist_cls(self, *args, **kwargs) -> Type[Distribution]: + return self.unwrapped().get_inference_action_dist_cls(*args, **kwargs) + @override(RLModule) def _forward_train(self, *args, **kwargs): return self(*args, **kwargs) diff --git a/rllib/core/testing/tf/bc_learner.py b/rllib/core/testing/tf/bc_learner.py index 0bc0d782d094..b3863a6f07e3 100644 --- a/rllib/core/testing/tf/bc_learner.py +++ b/rllib/core/testing/tf/bc_learner.py @@ -13,7 +13,9 @@ def compute_loss_per_module( self, module_id: str, batch: SampleBatch, fwd_out: Mapping[str, TensorType] ) -> Mapping[str, Any]: - action_dist = fwd_out["action_dist"] - loss = -tf.math.reduce_mean(action_dist.log_prob(batch[SampleBatch.ACTIONS])) + action_dist_inputs = fwd_out[SampleBatch.ACTION_DIST_INPUTS] + action_dist_class = self._module[module_id].get_train_action_dist_cls() + action_dist = action_dist_class.from_logits(action_dist_inputs) + loss = -tf.math.reduce_mean(action_dist.logp(batch[SampleBatch.ACTIONS])) return {self.TOTAL_LOSS_KEY: loss} diff --git a/rllib/core/testing/tf/bc_module.py b/rllib/core/testing/tf/bc_module.py index cca3a42f4eeb..0998468a4202 100644 --- a/rllib/core/testing/tf/bc_module.py +++ b/rllib/core/testing/tf/bc_module.py @@ -1,8 +1,8 @@ import tensorflow as tf -import tensorflow_probability as tfp from typing import Any, Mapping from ray.rllib.core.rl_module.rl_module import RLModule, RLModuleConfig +from ray.rllib.models.tf.tf_distributions import TfCategorical from ray.rllib.core.rl_module.marl_module import ( MultiAgentRLModule, MultiAgentRLModuleConfig, @@ -33,36 +33,44 @@ def setup(self): self.policy = tf.keras.Sequential(layers) self._input_dim = input_dim + def get_train_action_dist_cls(self): + return TfCategorical + + def get_exploration_action_dist_cls(self): + return TfCategorical + + def get_inference_action_dist_cls(self): + return TfCategorical + @override(RLModule) def output_specs_exploration(self) -> SpecType: - return ["action_dist"] + return [SampleBatch.ACTION_DIST_INPUTS] @override(RLModule) def output_specs_inference(self) -> SpecType: - return ["action_dist"] + return [SampleBatch.ACTION_DIST_INPUTS] @override(RLModule) def output_specs_train(self) -> SpecType: - return ["action_dist"] + return [SampleBatch.ACTION_DIST_INPUTS] + + def _forward_shared(self, batch: NestedDict) -> Mapping[str, Any]: + # We can use a shared forward method because BC does not need to distinguish + # between train, inference, and exploration. + action_logits = self.policy(batch["obs"]) + return {SampleBatch.ACTION_DIST_INPUTS: action_logits} @override(RLModule) def _forward_inference(self, batch: NestedDict) -> Mapping[str, Any]: - obs = batch[SampleBatch.OBS] - action_logits = self.policy(obs) - action_logits_inference = tf.argmax(action_logits, axis=-1) - action_dist = tfp.distributions.Deterministic(action_logits_inference) - return {"action_dist": action_dist} + return self._forward_shared(batch) @override(RLModule) def _forward_exploration(self, batch: NestedDict) -> Mapping[str, Any]: - return self._forward_inference(batch) + return self._forward_shared(batch) @override(RLModule) def _forward_train(self, batch: NestedDict) -> Mapping[str, Any]: - obs = batch[SampleBatch.OBS] - action_logits = self.policy(obs) - action_dist = tfp.distributions.Categorical(logits=action_logits) - return {"action_dist": action_dist} + return self._forward_shared(batch) @override(RLModule) def get_state(self) -> Mapping[str, Any]: @@ -112,7 +120,7 @@ def _common_forward(self, batch): policy_in = tf.concat([global_enc, obs["local"]], axis=-1) action_logits = self.policy_head(policy_in) - return {"action_dist": tf.distributions.Categorical(logits=action_logits)} + return {SampleBatch.ACTION_DIST_INPUTS: action_logits} class BCTfMultiAgentModuleWithSharedEncoder(MultiAgentRLModule): diff --git a/rllib/core/testing/torch/bc_learner.py b/rllib/core/testing/torch/bc_learner.py index 123d8566c1ef..8db3a2213abd 100644 --- a/rllib/core/testing/torch/bc_learner.py +++ b/rllib/core/testing/torch/bc_learner.py @@ -12,6 +12,8 @@ def compute_loss_per_module( self, module_id: str, batch: SampleBatch, fwd_out: Mapping[str, TensorType] ) -> Mapping[str, Any]: - action_dist = fwd_out["action_dist"] - loss = -torch.mean(action_dist.log_prob(batch[SampleBatch.ACTIONS])) + action_dist_inputs = fwd_out[SampleBatch.ACTION_DIST_INPUTS] + action_dist_class = self._module[module_id].get_train_action_dist_cls() + action_dist = action_dist_class.from_logits(action_dist_inputs) + loss = -torch.mean(action_dist.logp(batch[SampleBatch.ACTIONS])) return {self.TOTAL_LOSS_KEY: loss} diff --git a/rllib/core/testing/torch/bc_module.py b/rllib/core/testing/torch/bc_module.py index 2ee380a2530c..06b015a47205 100644 --- a/rllib/core/testing/torch/bc_module.py +++ b/rllib/core/testing/torch/bc_module.py @@ -1,6 +1,8 @@ from typing import Any, Mapping +from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.core.rl_module.rl_module import RLModule, RLModuleConfig +from ray.rllib.models.torch.torch_distributions import TorchCategorical from ray.rllib.core.rl_module.marl_module import ( MultiAgentRLModuleConfig, MultiAgentRLModule, @@ -31,17 +33,26 @@ def setup(self): self.input_dim = input_dim + def get_train_action_dist_cls(self): + return TorchCategorical + + def get_exploration_action_dist_cls(self): + return TorchCategorical + + def get_inference_action_dist_cls(self): + return TorchCategorical + @override(RLModule) def output_specs_exploration(self) -> SpecType: - return ["action_dist"] + return [SampleBatch.ACTION_DIST_INPUTS] @override(RLModule) def output_specs_inference(self) -> SpecType: - return ["action_dist"] + return [SampleBatch.ACTION_DIST_INPUTS] @override(RLModule) def output_specs_train(self) -> SpecType: - return ["action_dist"] + return [SampleBatch.ACTION_DIST_INPUTS] @override(RLModule) def _forward_inference(self, batch: NestedDict) -> Mapping[str, Any]: @@ -56,7 +67,7 @@ def _forward_exploration(self, batch: NestedDict) -> Mapping[str, Any]: @override(RLModule) def _forward_train(self, batch: NestedDict) -> Mapping[str, Any]: action_logits = self.policy(batch["obs"]) - return {"action_dist": torch.distributions.Categorical(logits=action_logits)} + return {SampleBatch.ACTION_DIST_INPUTS: action_logits} class BCTorchRLModuleWithSharedGlobalEncoder(TorchRLModule): @@ -84,6 +95,15 @@ def __init__( nn.Linear(hidden_dim, action_dim), ) + def get_train_action_dist_cls(self): + return TorchCategorical + + def get_exploration_action_dist_cls(self): + return TorchCategorical + + def get_inference_action_dist_cls(self): + return TorchCategorical + @override(RLModule) def _default_input_specs(self): return [("obs", "global"), ("obs", "local")] @@ -108,7 +128,7 @@ def _common_forward(self, batch): policy_in = torch.cat([global_enc, obs["local"]], dim=-1) action_logits = self.policy_head(policy_in) - return {"action_dist": torch.distributions.Categorical(logits=action_logits)} + return {SampleBatch.ACTION_DIST_INPUTS: action_logits} class BCTorchMultiAgentModuleWithSharedEncoder(MultiAgentRLModule): diff --git a/rllib/policy/eager_tf_policy_v2.py b/rllib/policy/eager_tf_policy_v2.py index 161a4bea0a22..aa49eceb3dbf 100644 --- a/rllib/policy/eager_tf_policy_v2.py +++ b/rllib/policy/eager_tf_policy_v2.py @@ -3,13 +3,14 @@ It supports both traced and non-traced eager execution modes. """ -import gymnasium as gym import logging import os import threading -import tree # pip install dm_tree from typing import Dict, List, Optional, Tuple, Type, Union +import gymnasium as gym +import tree # pip install dm_tree + from ray.rllib.core.models.base import STATE_IN from ray.rllib.evaluation.episode import Episode from ray.rllib.models.catalog import ModelCatalog @@ -852,17 +853,23 @@ def _compute_actions_helper( input_dict[SampleBatch.SEQ_LENS] = seq_lens if explore: + action_dist_class = self.model.get_exploration_action_dist_cls() fwd_out = self.model.forward_exploration(input_dict) - else: - fwd_out = self.model.forward_inference(input_dict) - - action_dist = fwd_out[SampleBatch.ACTION_DIST] - if explore: + action_dist = action_dist_class.from_logits( + fwd_out[SampleBatch.ACTION_DIST_INPUTS] + ) actions = action_dist.sample() logp = action_dist.logp(actions) else: + action_dist_class = self.model.get_inference_action_dist_cls() + fwd_out = self.model.forward_inference(input_dict) + action_dist = action_dist_class.from_logits( + fwd_out[SampleBatch.ACTION_DIST_INPUTS] + ) + action_dist = action_dist.to_deterministic() actions = action_dist.sample() logp = None + state_out = fwd_out.get("state_out", {}) # anything but action_dist and state_out is an extra fetch diff --git a/rllib/policy/torch_policy_v2.py b/rllib/policy/torch_policy_v2.py index 1962a04adc60..4fb224d77f38 100644 --- a/rllib/policy/torch_policy_v2.py +++ b/rllib/policy/torch_policy_v2.py @@ -1110,18 +1110,23 @@ def _compute_action_helper( extra_fetches = None if isinstance(self.model, RLModule): if explore: + action_dist_class = self.model.get_exploration_action_dist_cls() fwd_out = self.model.forward_exploration(input_dict) - else: - fwd_out = self.model.forward_inference(input_dict) - # anything but action_dist and state_out is an extra fetch - action_dist = fwd_out.pop("action_dist") - - if explore: + action_dist = action_dist_class.from_logits( + fwd_out[SampleBatch.ACTION_DIST_INPUTS] + ) actions = action_dist.sample() logp = action_dist.logp(actions) else: + action_dist_class = self.model.get_inference_action_dist_cls() + fwd_out = self.model.forward_inference(input_dict) + action_dist = action_dist_class.from_logits( + fwd_out[SampleBatch.ACTION_DIST_INPUTS] + ) + action_dist = action_dist.to_deterministic() actions = action_dist.sample() logp = None + state_out = fwd_out.pop("state_out", {}) extra_fetches = fwd_out dist_inputs = None From f773d99dc786a23b25ce314933f17124edfd4955 Mon Sep 17 00:00:00 2001 From: angelinalg <122562471+angelinalg@users.noreply.github.com> Date: Wed, 10 May 2023 08:27:48 -0700 Subject: [PATCH 322/424] [docs][serve] add note that Ray doesn't pickle (#35194) Co-authored-by: Cindy Zhang --- doc/source/serve/tutorials/gradio-integration.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/source/serve/tutorials/gradio-integration.md b/doc/source/serve/tutorials/gradio-integration.md index 3f865f185f86..3a8e852a888b 100644 --- a/doc/source/serve/tutorials/gradio-integration.md +++ b/doc/source/serve/tutorials/gradio-integration.md @@ -44,6 +44,10 @@ Currently, there is no support for routing requests properly to multiple replica `GradioServer` is simply `GradioIngress` but wrapped in a Serve deployment. You can use `GradioServer` for the simple wrap-and-deploy use case, but as you will see in the next section, you can use `GradioIngress` to define your own Gradio Server for more customized use cases. ::: +:::{note} +Ray can’t pickle Gradio. Instead, pass a builder function that constructs the Gradio interface. +::: + Using either Gradio app `io` constructed by the builder function above or providing your own application (of type `Interface`, `Block`, `Parallel`, etc.), wrap it in your Gradio Server. Pass the builder function as input to your Gradio Server. It will be used to construct your Gradio app on the Ray cluster. ```{literalinclude} ../doc_code/gradio-integration.py From 1b7588db4d44544f3dc71749c1c1e477206a3563 Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Wed, 10 May 2023 19:16:03 +0200 Subject: [PATCH 323/424] [docs] batch inference pass (#35041) we're focusing more on explaining how batch inference works without Ray first, what the differences are, and what to know about batches and their formats to scale out your workloads. --- doc/source/data/batch_inference.rst | 192 ++++++++++++++---- doc/source/data/doc_code/hf_quick_start.py | 50 ++++- .../data/doc_code/pytorch_quick_start.py | 58 +++++- doc/source/data/doc_code/tf_quick_start.py | 52 ++++- .../doc_code/torch_image_batch_trained.py | 5 +- doc/source/data/images/batch_inference.png | Bin 24719 -> 104887 bytes 6 files changed, 306 insertions(+), 51 deletions(-) diff --git a/doc/source/data/batch_inference.rst b/doc/source/data/batch_inference.rst index 8ab5197a18b9..89de506a3ec7 100644 --- a/doc/source/data/batch_inference.rst +++ b/doc/source/data/batch_inference.rst @@ -8,8 +8,9 @@ Running Batch Inference In this tutorial you'll learn what batch inference is, why you might want to use Ray for it, and how to use Ray Data effectively for this task. If you are familiar with the basics of inference tasks, jump straight to - code in the :ref:`quickstart section ` or the - :ref:`advanced guide`. + code in the :ref:`quickstart section `, our detailed + :ref:`walk-through`, + or our :ref:`in-depth guide for PyTorch models`. Batch inference refers to generating model predictions on a set of input data. The model can range from a simple Python function to a complex neural network. @@ -18,8 +19,9 @@ batch of data on demand. This is in contrast to online inference, where the model is run immediately on a data point when it becomes available. -Here's a simple schematic of batch inference, "mapping" batches to predictions -via model inference: +Here's a simple schematic of batch inference for the computer vision task classifying +images as cats or docs, by "mapping" batches of input data to predictions +via ML model inference: .. figure:: images/batch_inference.png @@ -66,21 +68,96 @@ use case does not require scaling yet: Quick Start ----------- -Install Ray with the data processing library, Ray Data: +If you're impatient and want to see a copy-paste example right away, +here are a few simple examples. +Just pick one of the frameworks you like and run the code in your terminal. +If you want a more detailed rundown of the same examples, skip ahead to the +:ref:`following batch inference walk-through with Ray`. -.. code-block:: bash - pip install ray[data] +.. tabs:: + + .. group-tab:: HuggingFace + + .. literalinclude:: ./doc_code/hf_quick_start.py + :language: python + :start-after: __hf_super_quick_start__ + :end-before: __hf_super_quick_end__ + + .. group-tab:: PyTorch + + .. literalinclude:: ./doc_code/pytorch_quick_start.py + :language: python + :start-after: __pt_super_quick_start__ + :end-before: __pt_super_quick_end__ + + .. group-tab:: TensorFlow + + .. literalinclude:: ./doc_code/tf_quick_start.py + :language: python + :start-after: __tf_super_quick_start__ + :end-before: __tf_super_quick_end__ + + +.. _batch_inference_walk_through: + +Walk-through: Batch Inference with Ray +-------------------------------------- Running batch inference is conceptually easy and requires three steps: -1. Load your data and optionally apply any preprocessing you need. -2. Define your model for inference. -3. Run inference on your data by using the :meth:`ds.map_batches() ` - method from Ray Data. +1. Load your data and apply any preprocessing you need. +2. Define your model and define a transformation that applies your model to your data. +3. Run the transformation on your data. + + +Let's take a look at a simple example of this process without using Ray. +In each example we load ``batches`` of data, load a ``model``, define a ``transform`` +function and apply the model to the data to get ``results``. + +.. tabs:: + + .. group-tab:: HuggingFace + + .. literalinclude:: ./doc_code/hf_quick_start.py + :language: python + :start-after: __hf_no_ray_start__ + :end-before: __hf_no_ray_end__ + + .. group-tab:: PyTorch + + .. literalinclude:: ./doc_code/pytorch_quick_start.py + :language: python + :start-after: __pt_no_ray_start__ + :end-before: __pt_no_ray_end__ + + .. group-tab:: TensorFlow + + .. literalinclude:: ./doc_code/tf_quick_start.py + :language: python + :start-after: __tf_no_ray_start__ + :end-before: __tf_no_ray_end__ + +.. note:: + + As a Python user, this should all look familiar to you. + The only part that you might be wondering about is that we're using + ``Dict[str, np.ndarray]`` as input type to our ``transform`` functions. + We do this to ease the transition to Ray, given that Ray Data uses + ``Dict[str, np.ndarray]`` as the default format for its batches. + + +If you can follow the above examples conceptually, you should have no trouble scaling your batch +inference workload to a compute cluster with Ray Data. +If you're using Ray, the three steps for running batch inference read as follows: -The last step also defines how your batch processing job gets distributed across your (local) cluster. -We start with very simple use cases here and build up to more complex ones in other guides and tutorials. +1. Load a Ray Data dataset and apply any preprocessing you need. This will distribute your data + across the cluster. +2. Define your model in a class and define a transformation that applies your model to + your data batches (of format ``Dict[str, np.ndarray]`` by default). +3. Run inference on your data by using the :meth:`ds.map_batches() ` + method from Ray Data. In this step you also define how your batch processing job + gets distributed across your cluster. .. note:: @@ -89,26 +166,31 @@ We start with very simple use cases here and build up to more complex ones in ot demanding model setups, additional postprocessing, or other customizations. We'll cover these advanced use cases in the next sections. +Let's scale out the above examples to a Ray cluster. +To start, install Ray with the data processing library, Ray Data: + +.. code-block:: bash + + pip install ray[data] + + 1. Loading and preprocessing data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For this quick start guide we use very small, in-memory datasets by leveraging common Python libraries like NumPy and Pandas. -In general, once you load your data using Ray Data, you also want to apply some preprocessing steps. -We skip this step here for simplicity. -In any case, the result of this step is a Ray Dataset ``ds`` that we can use to run inference on. -.. margin:: +In fact, we're using the exact same datasets as in the previous section, but load +them into Ray data. +The result of this step is a Ray Datastream ``ds`` that we can use to run inference on. - For larger data sets, you can use Ray Data to load data from cloud storage like S3 or GCS. - We'll cover this later on. .. tabs:: .. group-tab:: HuggingFace - Create a Pandas - DataFrame with text data to run a GPT-2 model on. + Create a Pandas DataFrame with text data and convert it to a Ray Datastream + with the :meth:`ray.data.from_pandas() ` method. .. literalinclude:: ./doc_code/hf_quick_start.py :language: python @@ -118,7 +200,8 @@ In any case, the result of this step is a Ray Dataset ``ds`` that we can use to .. group-tab:: PyTorch Create a NumPy array with 100 - entries, which represents the input to a feed-forward neural network. + entries and convert it to a Ray Datastream with the + :meth:`ray.data.from_numpy() ` method. .. literalinclude:: ./doc_code/pytorch_quick_start.py :language: python @@ -127,10 +210,11 @@ In any case, the result of this step is a Ray Dataset ``ds`` that we can use to .. group-tab:: TensorFlow - Create a NumPy array with 100 - entries, which represents the input to a feed-forward neural network. + Create a NumPy array with 100 + entries and convert it to a Ray Datastream with the + :meth:`ray.data.from_numpy() ` method. - .. literalinclude:: ./doc_code/tf_quick_start.py + .. literalinclude:: ./doc_code/tf_quick_start.py :language: python :start-after: __tf_quickstart_load_start__ :end-before: __tf_quickstart_load_end__ @@ -141,6 +225,8 @@ In any case, the result of this step is a Ray Dataset ``ds`` that we can use to Next, you want to set up your model for inference, by defining a predictor. The core idea is to define a class that loads your model in its ``__init__`` method and and implements a ``__call__`` method that takes a batch of data and returns a batch of predictions. +The ``__call__`` method is essentially the same as the ``transform`` function from the previous section. + Below you find examples for PyTorch, TensorFlow, and HuggingFace. .. tabs:: @@ -199,15 +285,6 @@ Once you have your Ray Dataset ``ds`` and your predictor class, you can use In the example below, we use two CPUs to run inference in parallel and then print the results. We cover resource allocation in more detail in :ref:`the configuration section of this guide `. -.. note:: - - Defining your :meth:`ds.map_batches() ` function requires - you to write a Python function that takes a batch of data and returns a batch of predictions. - An easy way to do this and validate it is to use :meth:`ds.take_batch(N) ` to get a batch of data - first, and then locally test your predictor function on that batch, without using Ray. - Once you are happy with the results, you can use the same function in ``map_batches`` - on the full dataset. The examples below show you how. - .. tabs:: .. group-tab:: HuggingFace @@ -231,10 +308,42 @@ We cover resource allocation in more detail in :ref:`the configuration section o :start-after: __tf_quickstart_prediction_start__ :end-before: __tf_quickstart_prediction_end__ + +Note how defining your :meth:`ds.map_batches() ` function requires +you to write a Python method that takes a batch of data and returns a batch of predictions. +An easy way to do this and validate it is to use :meth:`ds.take_batch(N) ` to get a batch of data +first, and then locally test your predictor method on that batch, without using Ray. +Once you are happy with the results, you can use the same function in ``map_batches`` +on the full dataset. Below you see how to do that in our running examples. + +.. tabs:: + + .. group-tab:: HuggingFace + + .. literalinclude:: ./doc_code/hf_quick_start.py + :language: python + :start-after: __hf_quickstart_prediction_test_start__ + :end-before: __hf_quickstart_prediction_test_end__ + + .. group-tab:: PyTorch + + .. literalinclude:: ./doc_code/pytorch_quick_start.py + :language: python + :start-after: __pt_quickstart_prediction_test_start__ + :end-before: __pt_quickstart_prediction_test_end__ + + .. group-tab:: TensorFlow + + .. literalinclude:: ./doc_code/tf_quick_start.py + :language: python + :start-after: __tf_quickstart_prediction_test_start__ + :end-before: __tf_quickstart_prediction_test_end__ + + .. _batch_inference_advanced_pytorch_example: -Advanced batch inference guide ------------------------------- +Advanced Guide to Batch Inference with PyTorch +---------------------------------------------- Let's use batch inference on a pre-trained PyTorch model for image classification to illustrate advanced concepts of batch processing with Ray. @@ -387,10 +496,16 @@ stateful class with Ray for our pretrained ResNet model: <2> The ``__call__`` method is used to apply the model to a batch of data. - <3> We're free to use any custom code in a stateful class, and here we prepare the data to run on GPUs. + <3> We're free to use any custom code in a stateful class. <4> Finally, we return the ``"class"`` key of the model predictions as Numpy array. +.. note:: + + Of course, you can also use GPUs for inference with Ray. + Jump ahead to the :ref:`GPU usage section ` to see how + to modify the current example to use GPUs. + Scalable inference with Ray Data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -559,6 +674,9 @@ If encountering OOMs, decreasing your ``batch_size`` may help. The default ``batch_size`` of ``4096`` may be too large for datasets with large rows (e.g. tables with many columns or a collection of large images). + +.. _batch_inference_gpu: + Using GPUs in batch inference ----------------------------- diff --git a/doc/source/data/doc_code/hf_quick_start.py b/doc/source/data/doc_code/hf_quick_start.py index 1675f38aa4db..3c8fd8612afb 100644 --- a/doc/source/data/doc_code/hf_quick_start.py +++ b/doc/source/data/doc_code/hf_quick_start.py @@ -2,15 +2,48 @@ # isort: skip_file # fmt: off -# __hf_quickstart_load_start__ +# __hf_super_quick_start__ import ray import numpy as np -import pandas as pd from typing import Dict +ds = ray.data.from_numpy(np.asarray(["Complete this", "for me"])) + +class HuggingFacePredictor: + def __init__(self): + from transformers import pipeline + self.model = pipeline("text-generation", model="gpt2") + + def __call__(self, batch: Dict[str, np.ndarray]): + model_out = self.model(list(batch["data"]), max_length=20) + return {"output": model_out} + +scale = ray.data.ActorPoolStrategy(size=2) +predictions = ds.map_batches(HuggingFacePredictor, compute=scale) +predictions.show(limit=1) +# __hf_super_quick_end__ + +# __hf_no_ray_start__ +import numpy as np +from typing import Dict +from transformers import pipeline + +batches = {"data": np.asarray(["Complete this", "for me"])} + +model = pipeline("text-generation", model="gpt2") + +def transform(batch: Dict[str, np.ndarray]): + return model(list(batch["data"]), max_length=20) -prompts = pd.DataFrame(["Complete these sentences", "for me"], columns=["text"]) -ds = ray.data.from_pandas(prompts) +results = transform(batches) +# __hf_no_ray_end__ + +# __hf_quickstart_load_start__ +import ray +import numpy as np +from typing import Dict + +ds = ray.data.from_numpy(np.asarray(["Complete this", "for me"])) # __hf_quickstart_load_end__ @@ -21,16 +54,19 @@ def __init__(self): # <1> self.model = pipeline("text-generation", model="gpt2") def __call__(self, batch: Dict[str, np.ndarray]): # <2> - model_out = self.model(list(batch["text"]), max_length=20) - return pd.DataFrame({"output": model_out}) + model_out = self.model(list(batch["data"]), max_length=20) + return {"output": np.asarray(model_out)} # __hf_quickstart_model_end__ -# __hf_quickstart_prediction_start__ +# __hf_quickstart_prediction_test_start__ hfp = HuggingFacePredictor() batch = ds.take_batch(10) test = hfp(batch) +# __hf_quickstart_prediction_test_end__ + +# __hf_quickstart_prediction_start__ scale = ray.data.ActorPoolStrategy(size=2) predictions = ds.map_batches(HuggingFacePredictor, compute=scale) diff --git a/doc/source/data/doc_code/pytorch_quick_start.py b/doc/source/data/doc_code/pytorch_quick_start.py index 9e2d8d2e9bd6..033e06617b8a 100644 --- a/doc/source/data/doc_code/pytorch_quick_start.py +++ b/doc/source/data/doc_code/pytorch_quick_start.py @@ -2,6 +2,59 @@ # isort: skip_file # fmt: off +# __pt_super_quick_start__ +import ray +import numpy as np +from typing import Dict +import torch +import torch.nn as nn + +ds = ray.data.from_numpy(np.ones((1, 100))) + +class TorchPredictor: + + def __init__(self): + self.model = nn.Sequential( + nn.Linear(in_features=100, out_features=1), + nn.Sigmoid(), + ) + self.model.eval() + + def __call__(self, batch: Dict[str, np.ndarray]) -> Dict: + tensor = torch.as_tensor(batch["data"], dtype=torch.float32) + with torch.inference_mode(): + return {"output": self.model(tensor).detach().numpy()} + +scale = ray.data.ActorPoolStrategy(size=2) +predictions = ds.map_batches(TorchPredictor, compute=scale) +predictions.show(limit=1) +# {'output': array([0.45092654])} +# __pt_super_quick_end__ + + +# __pt_no_ray_start__ +import torch +import torch.nn as nn +import numpy as np +from typing import Dict + +batches = {"data": np.ones((1, 100))} + +model = nn.Sequential( + nn.Linear(in_features=100, out_features=1), + nn.Sigmoid(), +) +model.eval() + +def transform(batch: Dict[str, np.ndarray]): + tensor = torch.as_tensor(batch["data"], dtype=torch.float32) + with torch.inference_mode(): + return {"output": model(tensor).detach().numpy()} + +results = transform(batches) +# __pt_no_ray_end__ + + # __pt_quickstart_load_start__ import ray import numpy as np @@ -32,11 +85,14 @@ def __call__(self, batch: Dict[str, np.ndarray]) -> Dict: # <2> # __pt_quickstart_model_end__ -# __pt_quickstart_prediction_start__ +# __pt_quickstart_prediction_test_start__ tp = TorchPredictor() batch = ds.take_batch(10) test = tp(batch) +# __pt_quickstart_prediction_test_end__ + +# __pt_quickstart_prediction_start__ scale = ray.data.ActorPoolStrategy(size=2) predictions = ds.map_batches(TorchPredictor, compute=scale) predictions.show(limit=1) diff --git a/doc/source/data/doc_code/tf_quick_start.py b/doc/source/data/doc_code/tf_quick_start.py index be9bb41e14cd..dd3cde9119dd 100644 --- a/doc/source/data/doc_code/tf_quick_start.py +++ b/doc/source/data/doc_code/tf_quick_start.py @@ -2,6 +2,49 @@ # isort: skip_file # fmt: off +# __tf_super_quick_start__ +import ray +import numpy as np +from typing import Dict + +ds = ray.data.from_numpy(np.ones((1, 100))) + +class TFPredictor: + def __init__(self): + from tensorflow import keras + + input_layer = keras.Input(shape=(100,)) + output_layer = keras.layers.Dense(1, activation="sigmoid") + self.model = keras.Sequential([input_layer, output_layer]) + + def __call__(self, batch: Dict[str, np.ndarray]) -> Dict: + return {"output": self.model(batch["data"]).numpy()} + +scale = ray.data.ActorPoolStrategy(size=2) +predictions = ds.map_batches(TFPredictor, compute=scale) +predictions.show(limit=1) +# {'output': array([0.45119727])} +# __tf_super_quick_end__ + + +# __tf_no_ray_start__ +from tensorflow import keras +import numpy as np +from typing import Dict + +batches = {"data": np.ones((1, 100))} + +input_layer = keras.Input(shape=(100,)) +output_layer = keras.layers.Dense(1, activation="sigmoid") +model = keras.Sequential([input_layer, output_layer]) + +def transform(batch: Dict[str, np.ndarray]): + return {"output": model(batch["data"]).numpy()} + +results = transform(batches) +# __tf_no_ray_end__ + + # __tf_quickstart_load_start__ import ray import numpy as np @@ -26,15 +69,18 @@ def __call__(self, batch: Dict[str, np.ndarray]) -> Dict: # <2> # __tf_quickstart_model_end__ -# __tf_quickstart_prediction_start__ +# __tf_quickstart_prediction_test_start__ tfp = TFPredictor() batch = ds.take_batch(10) test = tfp(batch) +# __tf_quickstart_prediction_test_end__ + +# __tf_quickstart_prediction_start__ scale = ray.data.ActorPoolStrategy(size=2) -predicted_probabilities = ds.map_batches(TFPredictor, compute=scale) -predicted_probabilities.show(limit=1) +predictions = ds.map_batches(TFPredictor, compute=scale) +predictions.show(limit=1) # {'output': array([0.45119727])} # __tf_quickstart_prediction_end__ # fmt: on diff --git a/doc/source/data/doc_code/torch_image_batch_trained.py b/doc/source/data/doc_code/torch_image_batch_trained.py index 5b5c42c7d0f8..205245a54208 100644 --- a/doc/source/data/doc_code/torch_image_batch_trained.py +++ b/doc/source/data/doc_code/torch_image_batch_trained.py @@ -29,7 +29,6 @@ def preprocess_images(batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: # # __pt_model_start__ -from typing import List import torch from torchvision.models import resnet18 @@ -40,10 +39,10 @@ def __init__(self): # <1> self.model.eval() def __call__(self, batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: # <2> - torch_batch = torch.stack(batch["preprocessed"]).cuda() # <3> + torch_batch = torch.stack(batch["preprocessed"]) # <3> with torch.inference_mode(): prediction = self.model(torch_batch) - return {"class": prediction.argmax(dim=1).detach().cpu().numpy()} # <4> + return {"class": prediction.argmax(dim=1).detach().numpy()} # <4> # __pt_model_end__ diff --git a/doc/source/data/images/batch_inference.png b/doc/source/data/images/batch_inference.png index c2aba39e55c6a1dd8ba0c9769be14fb5f9db3d45..aea38fb43b9d3b29e545b178a2f2c8813ee81ac3 100644 GIT binary patch literal 104887 zcmeFXbyJ*O_a%&L* zC_)s(%z%vj5ssNHS5 zG_=dPJSe>qGI*QZTh<`n*W+^9w0rdQ`(fo>U3q#5KQ?tKvx?g^CZzu9?i#xp@qHigh9bR0y0mjzjlsde~ z#G2$2C4RA{9QfmDh$QNdD|8=zJgjsWw5zCjZs2O@wBb|Xy3t6Lqu*Yz>7)#ljRH^H zwnO7|&kc%vGIt$4u!twB%z@rtsi{rfJ38MPiTD3$GpCqvX1fq3YULE=M`;sBd4dlM zrmAA@#Pj#FB6-ua0Pgrq9jCDT%(nEteC>BhEteso7z19B_!mIv>gwS zV`JT&_&A%g;f%Jg?F z8rp=xCny~L^YiQ-l?mJ??93>(D2ooGlI^9Wt-QEalKQ+s9K%6Uc~U?(e zG;6Qf4AyQqkjtjI1{~$wY9yuXK5eIlRG@L6>SL zg0kH{Hcgl;<$2gJW2e`z3p;1mSr{0)s6LV|{B~Z^20VH)5`DwHb$Jd!2r@ob#vs8+ zX1b~th~+v!21;=p&=-h!C?`Zs^BVz8Bg;oEn4t&2oDTLdEXM(L;rV5IaK??@D^0DE>Q72?MTi`Q{X zRC*X;0bR{VM@iT#Y7)U0@dGX-zrY;9o+x|4^iusM9h+7@?L->MS9_B5K7UD(CSrHJ z9ygY_=lW0#!_cshrDu||a7{E$opy~3qJNcWn7}BVZ30;rKWxfPQm%V06>Cq?9wskX zUT?|{zVxD>_1ZY*9Xjtt4C?t}pvDbEHmyh~Ajkj?t}}t`7e-F-0`fFfz7d|sCUTlQ zRZv%YNxa!+7_Z*9Jt4hqATChgF<1T^bfO%GO2Xj$v5Av^C7ro1C?dsyQdO*gT&g6< zxonikhr9D@cWLol!UfK@iE~JdR3G7h+=SVGQben+3R`AC=Ot(-XlpjTiGU{}RkGa# z*D&&XXF&5#!62rG1i7OVBl>x5Hrw&W^Lp|m4;BN58kd$1Sv1C6(s$?@8n`c%9cpLZOgKrwYOPEws$a4ojE7{RX z;Ryhm4XHY8q*3-bY~v>GfU4q%jhCEZ5j{7zKyWrjSkwe9^DQr|>BX4?4;%}8dp4J8 zSI%UyZK(S;;9MMIh<~#V>|-o5P^2DMvr4{h4gSMV+_G?4zUYigxrMX3Z? z8n0s&$)lGr7Q#t?+ykDMX1BS}y^$@#N?s~op`3Drp#}a|Wjx1;T?a3PcR~KvJ{>?| z!9VPcK)eDExYY3wq_Ib8_s6@T53zE~=n|00-?LT?f**3T^O*jU!cNb{5)tOkeqAgE z9Hz|DGJ}e1^L~BX`*7>Cvv{S~!-cOJ5M;bikpi>BlFt&<)7O}}V>dM83>OF*_5 z2Spgjvgn`i_1aC9LTOy7Dt5q{B*`um>~A6%5qm>{kq>%%6tYkkc#9cebjoE}_>D_^t`MuMFaS>ucmyY0%{r+L~@V&t9XOOB>H!?QyT+=%M0yG7Yn z=5!ytld*XlNNZ9!rAkb8TN1KA`grV!MW~4d-fkl8 z|Eb#CtMdUvJ+aCn-=N*mekoazv8esc2mgp%Ti;U(rZ74zOSW{+DpACaaV~M+4wR+4ISCn8 zCjGwy2SP|w8hwFoO88Tq*R#`Zb;El^+vy(&!(ck}Pf_PhKyqTw0zyP+3s;Oe7`v|6 zMn|@iLt3{Proo~OeF60cMUa-7uG=A>Dk~88x%X+6?u$?Atm6<{My6I$tq&7Y-x&Vt z-Ooxq88@($GvtWhbs?mpI&Dy%dMo&L{o}u3I)7{O3a`dxo6}F31N~;hpz9wfKNIn{ zox>$&TRn~SL+6*JCivME8>aaijHW#e77mv~1Q!05{jf70f=J;ogoY#C8{*Z>G6*Z1j6x^}&b2+NUPkdcF^o2`}b zc!APhlt8>`N|qBac*@*l^3)crC1W~sD!-Jb@By@J)+v#9NDD14Z$UVmyq>m;oom&NL2*l@v1) z9OR*LDwYZ7R=UTa6fbyn@V&*}4;LmW1e9^^tk60I#8XN4b7hn*JTDmLqazw8#ihC? zm|4?+PYf%!4P;7B)lFL7vo8Vgzic~0r6Ux;k8kO^&?8&?oJxiYM(`WG7A+hPYfN3{}*182k5YywaA)!zl6YjRv;p@^EWN4f%demc49=Pf+1QX z5Wqwdkov7KB`{WsgIQ#m|0Y-=cq1stEWu?VTVxNqOA(%say0xg%&8;%vr|Of7@pT* z1|Y%?W#}S)OCE?NmbH8&%N-h4)zu(GP!vIkvR3&E4v3wslJd6&mNUO!hGbDq(e|0c zI#n`jT(VoTRnI`u?Gd57aH5K_nP~C(Cdg4&fo;NOfvDcr6SgfarM@G7 z_6!if{XE`vSfwS(en`cHd$F$rbd9o6I|n&@{!eZ`FXpas#HY9FVB-IUmCygt-oDHJ zlMR`w-hDdCm#Sh~LBr0|Nb7inRolv+_y+#~_yxcJi<^J!#O=n zM>p>n)YISHj=tov60@XeJm1Egm=s7XqD`zx3?33M>8kiwxcKj+*~!tjF@^3pinxUI-f@C6RIoUMM z;i5Au(G5`)xouMsSA0@*a=N zukVH6dEiv^^VDv*zq^!&OMm=Sn9id$%Rw(UM$gD>#~&*H*XSK9oNckDQMC%XgVDM zPXH_rvE^Zk2=XYT%vv^(Aqbt;j2+6KBgJx^6DIx|csWUcKO4$ys`L`*Z04A1LL3akd`9QI*(0m|Ly@iT?_Wj-BeHWN@4JVHx^9@4>u z8`XcA=LA&rgs@bNr-dD}zUN|x6kZReTLh$+ToQ=dZybEHp6toByTD-kjOw4&_4FMq zI-bm-7O3K$(prQs-ofFl9^NIai8Fv24t&EgAI9|@2y|( zm{EY6c@}nXxMu?Ij(h5t8s8`sl*eQt2Jv}$pz?3A@{!ii>aJvcp7`+@8duq=h^el_ z_(j~-7Kbr@d4+uO{wEaA|E&=8|I3Iqq?bbF(%)zir4f-6yT%1R3AimA)=uY@0BRj_ zvr-~zA2iV$KJv#IH`DX03ijzDSgc`b@ZPFlQfNg*Dedr<4+wQE_Jq11ezgCz=g?eh zk}UA;!T=aLuXg1nCIrc-vw75OcX8lO^vD_Vr-p83v41zjyCsGXE#b14RguWb!aMkOCi= zgSUVT!GB{~URmhMP^(lrKN`tnKM)7A>O-zUWVeF6Y}y?cFt9FdPml9EC7aM zdq;}LxKvo{R@*KW*6yhc4{gX2nXLWJM_;N#lFQr=qyJ!FUO65b5vOFuQA_q%5-p7p zri}=T6M5Fv0Tm<0QxhH(4&EiwPj1-$_=tWhmZmoL2?HZi2v#GPM}Qg_g|=@(Un+~g z>kU=NPWd_l+nes2d%_otvqb{phYQAfh!J3Xj@SvC4zWDRRaV%c?hOD6_ccRM!l5J7GE~=LQ+#5Z@+yn z1tV9^+gTXM_Xqfev54jWgl?_pPMeu4(k$s-It{KoZQ112ktK>VjLXsIRM@)8$lPcb zHDZY48|9C_S70`UMui#3NX-;!D&o0HLL9liCn&*tVrSVJo^5!Fx_zbD|MOM7#On#` zw4uiesrZ-~?a7=s5jRvZM${@yQ>Qqx8*;0ZT10}cn6 zy->M7^aVja#u!Ny3&q2K5F7NnJ2&gf;Gm3TIabl;@sVg~lB$kBzrN zvVT##e)mC}2UWN)@^>yZQUsF+t>U4QM`rn9JrIT6D=%2+uPc%C@5cWlI%-7Nq9>Jt zBtnQQW2vW4lYe4wz{_4?`hs!QL<1~rRBc?n){_HNhzDC+K@g7s&5o*A$Q!LV(m;6_ z8kJDidiNWy{A?0{Ivr{DLaZ_^(BjMT6j|gS1dFe~U(bulC#!RdEcZ-xu~Se&GegAd zCn{z-{JJRXV^?l=mk6?w8fq>sUiVg{3G-cA z{^+b_d@X(HceFn*&CURf@^QjJOk(D&u&vha!fh}!f`vkciv5YEHqvxAKPznBK#E)7 zDD?3T?9(jFLoxB682yLXal2RbVSTeq?MV8Yxq@V-9^}w5koAOG?e_|^P*#+hzMoX|qXJ72%{4LzN( zZ!MsR3{YiI16v^xY5|8!)^p59h?8S(%Zq9jO^&K&p$kR`#zcotNRMu8thJF7f8D$H+oBxEGTHMH%rZgM1oF_@mC57?rbfN& zp9W`j!*f@KTt{Ha$of4kuB*GYLiOw0Ra1W~MgOdzZzar1|Mf*O270^EBN8PsLn#&I z%6!JQ67G@LR8uNMr8C=30Viv}`{Xz~_bbqnCk#(%s`;_2TXM|7L$2?{C|npb;jelz8W;Tay{Ubcm7 zK|aV7!jPRS20ZSD0h|UIprV}wsqMDKN~&+JqJ+YB=eQz=bRW;i8szcr+mPC5s+=HJ zwC=8c-6~c(+J@aSlcs3Fl^l>AFn!{W!cv1*e}erg zkv$}`7J9tH{y$5(EiLh~(_iwxykn}(J< zLMo(YR9U&pB+V?4c5^m8t|G6*@Jr#D;5SR!nnEs3rjSSsWcjk#qP~6FFW)Glox}RN z;ISPHx*$uNOMX$ocYs53^Q$;Jlzo?v_Hy4ckmGnEx`jYq?jWDAr5Ev8yx;@tw5eGW zU4kjMYZBM*1W7+zSwS4pi5v)&*cOggf_=$kRtD`nU0qWMueyG5G%y@S9XqUq=v$$= zF7jy%+tC%=-Bia8CZr=2w^F~ZMf2HQ04G&eT6DV>Z^~fg^$6vA6XZ*rbbUr>>rj-) zFX$ei-EID{P@-mM_r9I#qK$TbIf6FI{|&!`d%~-a_7BolDgH^+GfCfE{5IT=m0LRN zlsDkr9~kyj`(JO-6%tWHJ3^#^_c>gscf^%Plx#xz388;GE&YFOX7S+s2|jbQ*!j)|O8wL4!= zoxav``p#D^g(FR-(WZ82F>kmu()4_C4)eZ>%1`sw`gdmKvO|DJ0!5mO+LkY;u&lHw zIT>v^)GldG0M`*tyoSag9~vndv9NEuI+*o5xp^;yAdbCM5rS(g&$239$t%ym%LwZv z|F1LE0Qai+C(bM(lf1{?}!Unct&@^0eh&+eW-pfn4Ig$+UxH^0QfUE7;8@c9(3r{P+uZua9$yDla59 z=y-hbQ%HrEtqj>gEsp!(raEnIiB>0;JqHgJL|g-!Nn-{%ix=XO>y{#PMN>3miBqD< ziwJw#1`n2_Hgi2TQ4dmJuFeG6RoE;FC4lMc?4%d}dy6ps(T+c+*y&FNAOxOsC@oT^ zazLPbM@V`-pq6_xbfjR_)s?{U5AiH30WCX-X~vxR{8mFT8^ z5_x%onEC!C4jr~&FF$4X71#8i7$`7;9DT~XPS?5>%;{@|c?PL(!+R`+_u0Bi4JNDW zXG}#ST=Fc)!=esCqTwz${zxqk;e)osc9sE}Rr17zQhGW<3cEt-V_e2EgJL;SU7C2a z3f!)`dH8}sI8B1#Ux1pFAU4aCzMw1LH{$>#?2T zJ1g%0BNG*4D`P>JDZ^8cyVd4w-RactqoUyAnY#fFhplqDLhnc8HnvIis=^CX^9EL~ zlM)(6F>oR4Tw~0OQH8MN-G~CMZ~C;zd)CJbOI+<#^78VxM^+Ni?~Hv3)g_+zj6DAk z`l5Z`CA71%YkYZr_#`25-b>)|cs_8gYxIwIy!!NB+Z7?-s28OFfvDDpkE}jQ1XPEv zmvrRaOlQ0cFxy=|-#PKuQPbA&cm{is!w3uG4#fE?iYrMPqh~s^{?2}%G_O8_vL*4b zrv<{+9T&2s%-a>t^(PGasoQQ@TI;(WIK{%K?`r&sby_3+??gJ{GOB#x7h?a`IjnCo zb&>@iYlmQDh4mYDUYDThHB)>pX*tjCsKwvois(;ZEj{FLwF0L)$R;=A>UagbIl3ex z^2r(zAFoj_*_vxZG{{kF95KE`6Mt1J>08j(sHX^j4a7T3Lx1!>#lso7-CI&!cEO^4 z#LnVP@i9qarXxzMJChxkZ6DSiQaJQ&?c^0hns`(K&1lYiRewA48%EzWF}Lt>lw?S3 zSpU}DuLO2;#bD#1(qi}>FZf|~JRf#EmmqaDTr9TG1VN}I&Iu#}a~_9sLSB`l^ON3J zHnDe2XCdtVNX*Z2Qm$MP7|Z=d)x+j#f4|XeR<=M&@&l2~MgY9`5J*6hk77&PKdNgT zy!c((;JDTkw+Sbf z>FpRb^h`*_Ixxle#ZPt#ti6n3qwWQcaCJ;8s8F{=f(i0B=@5sk-B%EiHBA52ZYQ*( zarl4;OZzy7cKYup=bP8d)~GnO^|uLKMAYz7-O5>GSAr8*2U_ zH}P(b)N%@G9pvA&@tvLVL@zL+PJ2&rAyG2Fgsz)}RxO7_niUmKq;}|f0;$dvSmBSb zP31j!TdH-TLbCz8Bm-<2$u9>RKmRd)n*f5mM12$NoevGKc6~PeXy}8aNxLRdE{Yr%r$r+PXoyC3ZzDF$J#Y)21O=8Kz=gP-$2$~`%vJa(M2|V{Sob)t zre{J5!xx11-GrOXXih&YPIbf4VQJC&HWUc}Os|$0^_N1wp_Cf2d_AikS>!6iHZL?Xjube-Sr8w8?nwdzuh&@Lp zlmp$UeKAsh7N`O|+C{Sp7>_mSFutZNjBGuO$3XlGt_i;9+lAj~?QmM?E{>(IlU+Zb z`)=}CWB*Dl^LqM8E6|&^uN`l?>>a^$+h_bJ`}oxHa`O@*DRi^vLa4V ztw6Fc8Vs@aH-T&%9U_)RZaR@|v_fuYg1H?r1F@KKRHIn=H7dzpT0Y>r9zSYDHpC^S zYQ54{CVu2nYvs0g-)9T!lOK*o-lMNxLc@pZrtOChml2=JVH%sZb*g0xI^2WY>GPWJ z`nXOQPTt`6LOdSBpWjfuY~)N=t=Ycq-s ziuAGyI?x(9?)pBrr1x~p6@SaeZNSEvBG2wqf4=PqH-5U3kX#*ga{Uz8d${q)oEAS? z=ft=copSdGu5L1P=;pM8tBwD)k0P`{(`tcg?-r8;?+wALZQ|{3>H!{)nOk@+lCn(W zMU6+Q*b~vvqo$~-^YG@Wqnfr+=M{_4hK{xw;XM74Rw)VQk?DK4cTS~v92|j6$iW`< zLrMmZhFOO&`K>NAKnOaqelTH2`7D<@u)A_C>gp)E@GdX+*lz4S*9qP~(UrYDi+jl; z5JvlyGPOfW$Z&z%uyNUOj&nBbqFy=!+Sj0^lHvP@7MzOy!{8KX#6uDn~DQly~I%r$KREzdo14lbDOI~$$~P~mqwYGB$9DAj$F!4Ld-R} zp#279L~9&fSIR)%{&1=`g#{YOBPw>gBY*d$Wh{9(yBuCt#B#}lV@yG-WNS^J@!h}+ z`-d6CmT&FSFPaLh9oN%I=&OZ(>9*B0QNTp3EW_m<*#I#UAL9aPi=`m+2p*rpoe?s3b?3&z}`{sa2p&A^U-8Pd9%ynR{ z39i+O2%+1woIBE$8sc^>nQn5thK+Fl9f=3^8P zPUkF{I(~z4MwpzqF1@a>zQrhJ*%c+83V{WP#jI!Z!k$x zKEh#6IS&)wE^khmU*awjD2IoVpU?y&`s60HDHW5;4waBqG~F}q#Ipix*=n(+!NhY| zl)?0no`BUVOFu|hou5KUfSHATfRV&h*0WqJ-6f+%m_Bpxj%|ftJiLqPo*jG0se6Yl zJaUj9bDC~?;an3M|EZ32!j^IZPiPP@a)bhTNw8HKvT&q;vaWdZ#>u+1vXVksUjB`y z9eiyPUn5tHJ42!~eaq$Rmn$0e^LfYX5YXCCM8w^;|F!zr+{6C8}8#hS4Z7u(`)vzfOxkSXNW#AG}kyOcf7jwlA?TN;0e^lUg) z26}3`UnB6`ft)c645FPJtO6>2mc++|>uGHWB@2iUBGXC7QfhK7eXIT5v`}9N#NgS@ z+pJ515zs5$7`DHqri=9fbRvU_`xLc>?$_bNA}+7D;>*(lBM?!A`p3=iPx-=`#z?M-s+!_O8y zdL&BGk4tUssRSDIMogFpC74;1a_?z+cq~y4V!j$wR&7HGy)>egrshIBR#1u~$}^Wh zw>Qv%g6vD2`5s4e)D4q}!j7@63a$7n*AIb6bF!<}zqQ|qp!2UR(Q7Ou6)L&kke}=k z!$`{VXX{I$IJn4IVKoJzLpn#L8XK~g9)vrWp+s&EP-RK@Bw}PGaVapF-qDKNvASoe z-pwRdn_I1SqL9FkFt`1FYw){u*J-)=e1akoO`2F)kXc}r&U9AUNilfD%C3B=nXJDB zQE~0=c0i(X;uI*jaag0Iia&K$<^PBHQwt#HZq$m6edx95iU3~~h)%1swbMS5za+g{ zY>j}BXaVU}ZE#;YxCHYX|SBjq<_4l zrAVbJ1Q&!QOt&Us6+aOmGPFmvefD4{Dj%2`=>Clv%sy`a$x#gcM--%%n#cNt&e05j zBJwB>t(ihz!jAieykZ0@)iA_}kH{!Hal2yRO6}g9B?nPtL-^O^kBvh;@a=-dLsi2- zW11~Ip?V=Pqsq9bdGjkziTMf#AePl|y((RbP=zYp1H#(hiAf}ElyCzCT~c^*;kgG< zc+J|S#X1erz0r+mtQEPc(5xcfEm+-H#XK9V4zQAm@?Do}m2yy{F6W^sN6QL=Qu4It ze`Wkq+GTSp^OmEhOV@@yU(=_KIy>$;)%&Fu@%)dxiBv*g$Y20tjLTYN^ zkLePKa-f_V$wJ`v&=vivjWb8$FN?tFaw$1Hjlp$HkO95%7pGo}HmERp~ zYvwPS0e@G-{W%CSEx~+iBLNaI1XLwQ7=FPafj!>1aj)V#l&O~vXbCIDx0Ok-MRlk{OWpjY@xbF!=sTL*Dx~8E4PSC>Z2iJ~Bu!B5!I^ z*kHbQBJQ;ehzbt42|C@C{vZ|Aa4^r@|* zq!dgGq1KTX^77Y^2KY8&1XKrbws-!VbKA>!mHs+v9COMxsw)KF1L^E^k6_X> zmsGdP&35O9gAo~!(!VK{v55?8&J_d~@M0ODtpE7Q5m=7&z)^6#bUy2AU4;8pDZjLE zvQd_gjF!aS8lInzsW=x6h3^dy$~hhi`QadTti~>;D?^Y+=Pn*)Sjg$dJI!)P{YdBH z0*5Ls+n%@vlaQYJx;0)+F?G`lu7K`4R)S2!ckcV%fZOQM5`G|jU#Nbztc!pSp60t2 zm=FS6EE9r3rQpY|MrtuUar@oO!JZpWDkr7%lSc3yS)y0(ZtL&sPbuS&!E43ypT-&n zXW7GT3F~V(&DRkm@h(G>BAWGj6Ks}K(7tkECH7``tnb*0-!ag7Ja%cX(RoB5@oMta z&vyRgr*FmY8~kF)xe$Gx1)uYX#4rWE{KJYxQ2&t#Cste*4O5xNRI~CD11&E@uc-g1 zwbd#or-(w$miIWk*;@7kgSis3ZX@&$O5LOr=S%TxN2+;jxLJ4+g9c^&%~79sf<;%2 z7EBcpj8Ts4=cHNpm4S#Gx#5`ukmMd_^CTdd_zD(ZC=n={bmKu(aP1H}t35=9tF=yV zo=rEMt8a@c%ImlAO=^a?vyYOWFrvEYX7$A$!h*M;?fE^G^mh6QR4);q8aCWk1zjOV5$7bZHyU>zVVZ# z9$&Ea;>u0a)+z5ECnJFnq);oN2u^rscVpJ8vN08=hCqqRnY8)AGeu3tkc-_FL3Ys< z_hN;knkYP1xA^)h9MK}0VnDFc^cVHHs(A=G@gCGefk<9R=+_)kNddh~Z`}kGgL>bY z-Z0wZhzS4-3!3E=^6J(Tf!GB8q`PEcVVC?R?2Tu6xa9%fZ5!cZSG+H?O{kq^9&9^r zwcP6fB8UbQIw=0urK%^NEM~KkR{(#BMBbtt&XR&m#}=w>7(ot5>N!P&)L#T8yju>x zpribMkt!aXLm&LWyfZK$l)fyJqI&x}Rp+A0uCysrBBESQ6@uq}IS~Kb;B6Wao^q{$ zPN|Wr!GeA{DsG1$q0KuVTo?#iRN6 z32*%0=EoOng+NC#S06I9)}e{etc>-#ku3M+>~sSqZs(O;C0^k4jy6iSQ^OjjK}^3s zkPs+DC1G5lnF+Msdpqd*2dL--oVLfq)0z=WK-h)HE=e0#7#xN(rOe%k?EpHAjCV9- z4jKpu&#RxVZ^S*4ATNmj#r|8fE>_`5Z5Tzizej%(1pAU`^UL8kp@6d9mk%^eD;0R$ zQ{Yz-7u|;ccI+ZP#+{Zmk84@YGPK4HrOC77U~-w2DTcVSU0Z0?wpxsaAuHGpAahn1MsRh!qYkpVmOM zI2v2Q;?6TJV;Daf)C_N0@jbbGAZR$&`ryKL9uTu<*?vwW5kcC5r=wI!LwvGl!g!Ob9 z$dU4q)hgFG=knqf0%@2Kh5-2_cy`^=gO|e^{ED}qJSo=JB===kS`i$&c4aF#0%Ck{ z1J|3TH*0g@&S~aQ&#k3PxEJ;*24=)GW=#BF&<^!?neiLHQ-dP}Ij0{{K##!fiy}osB~Redw$pNOr+W z5^Kgh&#k(oqCFW7j}*eeBkTq1`BNqf%0ZlH?lgF=*sZoXzog05jutaxGx8o8`Qphx zG-ymxTHDEorww_Lt78;B{A%+=j$Wj2)y#MDjiO4U7wk7V^utIc(d}Q)p*0n#s>vOR zPr>>dhdKI&aC5NBdT+|+yzWG@`2B>Ha9nDhgu%i?alvd5Rel{7U?4*;!0_Ct2*}Tj zoKh%{zDcjJq&a?_s*PEP9pf^4uAw{W)-jkZ-k@jVKyr#ysPhdbmPi&#{jlgh00?cs zzymm|Z?8s}RM(fiV$xsypXMCdy@8d{j7wbZ7<+$2;b0Vul2J0Le|lIRXe41ed3cCq zu2_;FP&7?bTFI))KKUD|;7vtLy21YYW@LCpT!h6-y4k|V7+Ivm;e=?D5KRbBwpLX5PV^dV>LTkvs@kbH?$>doY@D_HEK8NK zUG5Knm<4O~j~aXtgTD$3s>iNlG>col-I9je*&#-ur~OzKI&fB~HWI8n*?VZi8y9<| zcmL}tHf<9L-ylT&BZD*W_T>7zk3=Yv(rDaY4?}H17`#`(3{XHMVqqN0#kDTKLdf}@ zbHjS9GbW#Aj7-&c+L)Vpra}HC&ul-%iz-jRAnEuchUp#g0MkOaXT&zds?-Gw z=s-_vU?iDE?-PR@?bFM~64L)D$GVv&F5&F&Sm-MByMsIa+I6_;Gx=PV_>G5j3re1h z;XIA{ml==n*)_Sp-nsr9%2!ugk_0*ZrK(g6-=KjOlz$SgRuSk-T_2Ss)81mxOrt_8yvi z_`r&#Butei3{ha-Jln17Yx^ub?8oY=BVx}?9L=Ji2BX5we$Xt8vam;sB7`NW-!~0t zSinY(bv07fMO&4i!+>&VUY_L@(Ooc6*K~g-8ii~0iBqjsL+_1X^jV^-S76D_&lFm# zw2I04rFxxF{2U1lcj@p^i1EV1zFy$Z^Q`Dm-1mgX!<}^l{?^Ia3{BRX`3z#pOq9+ykNEQSN0rkG^?}`OS1R7dTJhA$u zWrIzMwW3&21}xKhhW;`O32X>jF-sP%H_e%TQvB{r$Q#mOB|z~1XA1QO6jrH0M|Ij6h6?Kt?#B=DAW3b#H3rU&JR zBwN8Q5{TE?U~2L$SBg_(IT;#)-kM57y|(5jl%6`u_2;cgko)wAIME!C*;3O0He2eom+v3G@ss9<^ka4uu`7`Ii z$_ppQ>wvkuqT;m&=<)FWr$#6|chmys_D5VBW^#&n=mLTu;JZTMVd3DvY;(0=C5xF+ zv(=P{!t*LHRr5-3n?#d|ib#pRNGC;S0M;>A_&@a-IyOrRsi0Wzt)K8aAEh#1M zy77rI7ip~Dj7c4m(Pt^>Wi`VahFa)r70OI1*nZ(;*InVz9n`_vqD9xcT1M_06i306 z9pC1sF!CjPm-8YxxHBNMBvJejZxdd)l`%n=VFW(PwGmNn!ZSBf0et)QuqztCwx9#WiXbDSQm*r-p=Dn?Lx#8@eT?-p(U> z+n%MNb1dUB;0U@cJ}>nmIrxz+_iGAPt1iEa&ka=jdi1Ue7mpueU)@G9weQuZCyJOa zQ$-)eE*O_ddv3j23H_~J8&0bY3oeqXjHH%ZcEtBl$W^b}nGIMafgL2~MZaPEhRrUA zid5mP`>Zc(C8TLa%!tpmBj(wyh;vPOtJU zVQ2t7&G};TiM(_+-Nv~UVALEVR{Eqnp#Qyvt*r3*m70l+;FI{+o%N!8Z#D zgu)BMSr0s&nN_F9MpF)oLylo~g2uru4RKLHukUy$?w>-Hr@-p*1k^v=UB_0}oV}`1 zAoi;C%s$irJIN!I6-6+r+Y~Btw#~LRL$420|Ajc&D_Fj}=Fg zBj}y1Y86^Cel1^vZAB6`T2@9evZi+yBapZ3rJIQ=vinGdY*^Kqty6p+bfUGlwq@Gj z{jRKDSRaS|&lTHJp=Xb!>b@9ZPJ*jPYm4qI##Js~g`DCfo`teKHql-F#1NRi`d_e~ z&Sslly;fGu?=$xH3zkK)gMp1W(?7JG|FRB;myyfJMF+y4lO!Xx`Lq!^uZL6H(*rBOFm1_0N}576NgMpOHkgE$ZVwpL1jd(@_1ogTD=S?bVO#JblvlcxX8&)K)DRNL|GB+h#wcw~Y~) z=l<0cXLs1MJE8wdfZf!2uk86BQE6l5Wo*K>>nD=L zt4S;IssYe_+BrQENt^4iYsZs#nvtlu*?oRE4L9}SeQjVv#@Qr$DbLzxDY&W~mec@^ zSyj2nePwj%Cy3KKT75yiwWbJF*CZM)f@@gKUO|&{6h6e?hk6Xj5&;1zf6LlvD1*mt z=yF!f=-V-0{VY-c0qWf86?bFz*$kE~t9ebu=h&G!c!m1}iV`+5wjJgR=h&YAMbcR? zMA>y+Sh~AAmF{%tl9KKYX{3klAq9k?OS)SrX{1X!=Akb{qSvtXni)!nS`RamW~n|v7HNdj@;975pIC!u zl)K`dsN%l?TLgJibtU=tp-d9U+v9F0(>M;}Lww!WY+3PE*BFDVcV4d(T}HU_)}qww zFE8_YX7k@;CxdPp{J8pa9{<{k7paC>1Teg5>*EpLpU|UAY%DxE7r*XL7G{9HIAIvP zShPf$X_FKxuPXBAlsT6YawD|NuqaY~{~5J#NtWSVC}ULf32)uh;+OY#`b~6Gzk60< z-_E80_B8~xM(Ei;Ns7=OGA5YLC}^Y_RmEiX!)}$A$^^g9IZ8%B z6r~y;7{)+fD|`>6gY`@>)?N@MQv|{g%W)K>{QE!6-_lVrU%K#aA?}dfb*$gQA{OP zjvclmMHO1(6$jV+gW?$ z(M89|)l{}ocNfzFD{Y=J@cl!{2~7WqUZVOr-qIC%xL&4alQyB18bij7J}Q|)77TF` zO#Bd+%V@$SQaBX~+7NRarRE!iOyGWFn-^hLl2;>hdv}GmS37|@HKX1d}Q^cI1NULMJInCZ69wl_C!5>ehFjOimp-Jtg+SUroAtARo-q;I*dx*nUR%l<3 zDrXiE^syO}DanU0Hki;rLB0E=rj+)N>d=d#V_6wIR7{6^Fw1z?&RPm)c>}(VF2iIz zc)!()YzFH}*RkZfpl{6TK)*56>x>pRPwRJKKJKtpJe>EQQCS@V8Kn|R zu*3v_D1^)gTwYI3w-R2@yj`DD*UMs)4vnK5&v-Hs+8@bmd?WZo5MIVM;3N7CQg~14 zC^f9&Hjn|HAV_AFB&uj;9qwTqQYub|7akVUX~So?1u~2dQ?jlP&Y(u!oPB{(gD@7V zIHbJTq}u{801v8dJpnquq3?;>d8`G$o^|~aNq9nWBl8U_u~7DTg4YDFX+sB(fgg`L+Rm$0IqD>^=P3W)vh$) zUmYgCd(#QW4n9~vfw*JcBiS^a`>Ml&Fw??t5(Q|WC1Z?!#Ouu;`Xe}>r_AF#W^xvU ze{qBiWtShMHI(-Im{v^XOGpaqB!sfLed^NT3+8WKw~b>1n$8v-+ZT~u$d`8DV}R+Z z$OyYoyoVq}mlhQZaKY6BA#s24GAn4Vt!7O^QN+E~ndKk$-23uIRjJdE zX7KR`KF86-Xe7nh8MFx#NHJ(A`hd}gWBpXzQwZJ~W``9c2E8k3AGRt%4esA>|%oHzgw~T++TH z*WCVN?Q@xA^X;nc<%tPLfK7<82mombiyq5aPSVGqru|2mHd3+$UJ@4r1>ma>i|2KGDst{k(Jdx|>`sls2Iy$TY$+``WMQ866z$B0Cue%1 zwPD#q7=OoNhK_rZ<+*3tVhTAo1nl+3Kg;(e(|K!_vd@;4$jtB#>ftRcu*Mh9w6f_t zw?7~qE9e|K^h@EUmpK72tc~*$Y`u;ne<{|U-pO=UZ znpL$(()snBU$E_|4x^{q%Y*`VFjLhyHXcDzVWR^r?Uv*ko3!*A!)tB(Xfqh|MU&wzoRwq)WQm(JS{4JRe!bjKu1mJk-bWWhW2!lWpXNqrm7|%+R{05gZ zeRfAo=Qdg4_s0WQg__=wp4Mfg8BgV?qW)>i(t$zzY8gaHVUQ}%hCgp*;&z`2yA6un zEQ449mj?+mF;KxcyDFK!=MfzOJwyxO9KA+}DQ}I*I`_d!G}?M%K~J1UZWdCrr!~|3 z`^SJ5^}#S*L*%2x{$T);A3T7s7pK?R287CJkHbcUyq_nZRG6_}NW3Tz}zQz|C3%3HQgTTRZL4HRfk3UKFX zrT;iJQcHgL`E6_iaN_PA@G)5OIHESEy!72SMJXvf#eF)7Z~_<0zQx`r$Kbdst{}3a zt5K!Qtv;Vc; zLd#_?gUMX^0b_B1b}CT$LVcmOStPX;E_dDb-#`14uI*|YP9V@;a`ow5W(>Q%Og!R| zsIQ9?iY?7OE>^61*x7s>9JHCy=<j0e3%FHeOnIGv-I%KdHoOx-qCvlC@-ac$&*5FfcsiaiR;lnNafom|h z3WV}AhBgAl@E;{@Q20wQnPUZ%yv~@GgDTbSmUsV!>K+jvMuz^{IfQTw#3a1!2a8;t33d(vnb!lE}JH8JaUw$SK-HRfon$&uCvC#w(Lf^dRRGk zPK==in`cOeQ9KQ!Lm!gQ`S5aL^0QC=dFhNhE$>o&%))N`tURMQ-~_>MN&(sG%|0t1Ac}bEBNa z!-@IUb6Hmp0hBMJ!+)ctf@Q*d6fu$v!?vNA^2sd7gjfS#K`=}7_i;SP{HzoZ8ve>l zCfJ7TrrBsJh4QP(FoM<*Z<7I3Zi1amP*&_lt};2}hqNY>wlH;+W(YW9^3cC$kSLVRJSd8ZrDd701l zx`ATmy-khedX0kCJE@o2YJH^3_0NbM=lko2`*GgXX^}v98bG+!!N9u)Xhctij9o$n=Eq`z)Hs)6@~tD`xwX$-Di#LI8|9 z34S}5HqbuAd)H_+F#R4$g`$wX9fU%JK?do+4mHaY36_8memVXt6YXca;mZRu51G!O zmyYbT_fsn}Nof3JLH2X(mnWEVeY;JH$%c`NmC}MY)8bb~;b%Td8Y=VQjm~0xzcW}f z7yyNf6M}FXnhp7UK^d`QWLPI1CUIxDCYOynkkLk1B{pL+7a8-a+2WPNsA>lmYpz$> zE9L%JmkGu_GgHM|c%+LAM9;{$6+PDmlMUt};R8FSeUq*j*c@Ii84j_WCr!a46T~f~ z-WP-24fKR3^YHtr?ey3^IfZ_yAFc+m6}{<$>vSc|TJgPuPL7^ClP?zoBD|@Ib#mUV zCkWJ;YJL~>#I;I;^ZxZrOoy49)gpSrgc@D0K8ESCfCyXR6`YuMLF5@MZo z()dwA|KU0=<7{vFi^ufb_=DpnFEa``m9YLAVQm~OuO=*&^i5Y2FT@zshpyS>Kqwfl z!~pNf1Q!+(waO{gJbXm;rjmKTctcaBs8GMGHK2B(MwCu%*^Aj~K`jK36l0p9oWla6 zw;F`>*a(cU$=R^n!Lhuy++ZdFf9;a7a7;2398Nz>uF{9k)XTl*eDZ;?U+3plkwkFu zfA=dzY7XWXvE6sE+?YLYuac6%w)=%521HE3MXji8Og1_{*@rJHCpM2f@jCRVes}|5 zrxzj#zv^Xh#^&+Pm_;Uk`SuaprE|E6s)}vK)6~I5gmU5nB11td zU}9U$Y%sKUBNr3?E|7(jI*xQszGKaJwy{vScW}WA($)kXncTum@#_wVL4_y-3K(uR zW+M7I><~Eqixj4e=lXK{Qa?iHI|IIC zogDh#`!nquwBop9x;^Z8=r){9^f|yNKMLWjI<6d_{*PeurSxH4kk`m%nQ(bYOGce_ zMe*yIhQg7CIUvt6jmwsXa0PlhL>|&kh-RAI!?I6EJoBx*`=8a$0hm{DB=JNM|8-#% z6&WkzeYEL3p8{1_DeigpVvS=o*|5AKd_`s2hS?HW34PkNG4%9xjV8aHr;nuT3T2f& z!iFroh~hBc6HOT+4GV~YGxRb*FQOA!ux}TJ<-)!ZXeo2`xUEm4t)Kqy*?jBVEU))o`lfi7+UypvF`qwTxWpj*X44Y3z_|v&WH8jrl_>Mrb?<35l0T0&o)O8cL$-R zBvtRH$`Pj3e+HaatPi4y-=(XM>_ipzJUj@NHV{Vn#!S1$B?LZk!Aj(YBUb?4l_)?C zB!blEVB3{#L3N$ zk!S!^WLd~8D>=N;?pzyY(LvS0a{zwju6-%8P0s4R1WP2VN!&J&-zk_|iDcbU-_{qM zeUg_9dM)XaEag=G-Yewz{b5c^&0d~Ik*e)7cVtqQIiS<|3ooHYJV)@U?rPc&--Wlj zDakMMj)phOG9#C`qI5sc?TB@;`|1HDf>XgL)mg_s^qBX@!<(vI|1$K(L3_Q%TH%(8 zL^HR#KmuYQL8;xS`1TW~vmZi6O#$h#tF0+$_vXe`aAdtM$C^S1EnI-eo`vwxS;*uA z?~ohF=6lc6i}Ky?;4`w0{qyefTG@GR()i!}fl%V^HS@FQ^xz#P*TXroj=SFUrsoQv z9P60&Dk2_@)RfGB6k3$;MNe>LUgS>Yc&L&IMal%Hwp%0JXcUtx&wO6y*`gnpg}o2? zA*YU0X7d76q%^CdHg;qBZWdRCR(l|zyXGyud=1nvLN(S)`)+QPThO;#Sa^%}tV<^q zC{~zFca0YN$&s3|M0t1uL!UA6#+0#o3DY!2)(_^X@NCQaC132f;IrY&R^<9>$z;tH!rQ!ml1KcG6v0UECMxEBnCRUXs>i3^yXy3K8+~x?{BRLY zxM7Bv`9y57+`k{dJzwdG_v%P4+m^5hgZgvsH<78n@!%&|7D)^~Jdk0MMfdpzriBtR z2oYjX=GNd{zmF_({#Au@ort*&yCfnLXIqj=5duDig z4c?~NJXokwz%eZv09y(h&o-1?qSY~&zoXvjjqpGXBRvg1CGa^2z1>*JjBPo6LLkKv zbj>=E;rhn?@4fyJlh56XAQciyk!c7$*V&bxS??Ic8R|0QJ-e1-K#*lI*R|%Q!?k_$ zr&V_#enFa@>65ZvaVe|1d*B5InctXTgC;ZQHH~Hwt=11JJp0^I8%339%;4+^tcX%pi%BM2i>%G#2@&;YIr=@X*Q7AM^)0y*VXXNbPgPzR-;8oh{&b{!yXM< z94#8>l&hD>V(ngSl5@YGQns)>pSVyppzE*yD+$^Qz6)lEhqB9_*R!a7D;(y@Cd{Bz z0dtmS*=!F5#(Yz6r|`uLXs0^EFZfsQVcVVNJ0hnf4-$C_!+TiyoN&>#Gn<4sjF>?%#94-xyh^C@}b9dWx$;7OhG+oY<4B1K8@V!YJ+DU``` z(l5a}skse5VAD+W`eTQ|TID*$_G;9Vyz4IOQ{F4hD-7-Q7A63czV^Ky*BzwO7)U7&q+?$BaZaOyuiU9Kdm3V6+&4A?WFBR)x7&t8}Tt4cyZ+L6Dk$y3@e zU#x|*{{@j?n!bw%!mM z{L=kG>}=W{D)IC?uW#V|9nHsJv>}wo7B|r$Q}J1}VJ-BWqer3nTCnA2QKy(seAKEQ zL?Zb zic0`lhi8`Fh2^W4FV`Wvd@cvSm{R0uQ&#Q{&|X2M^WQ`xA?DaZEj_O8YGFW24#DQ; zjXz9B?Kh6_hBXhOXZ`Z$Gqc@}Ifje3eeKVEL@Ig1>AGmCYdzsyNO;{ zd1aNcA)C54DS?*>oXy4E@XNNDWBq=9@O{Tjpk6$>Xi4!-qn1xp?yq}$^L4UFB zR+NI%h?@46$Ku6TOj?ejL{EsrzScQf#Oxy4mSyMAhBp3nDL<-(H*{bY9r1VfiDQ+W z$0ijX`v<~34ZJ{s)x`j~I+LzadoeTLU0+NOl7z1=1-U2m*=laWBGL;xm}~nPvqaHP zaC;4C@b=b9a14Be=;#H_LXue|#x9XR)^8sf6j(0$aKxi=#O_UF3PM0-bA>iJ+{d1+ z`a%C~KUi|)I{Q{fTEgxANH9j}wf2n!2F2Y6{Q8l-QR$(6cJ$*`L$fJqYTpz2_iUzY z+=#0nk#$gdbFr@(E9Yl+QWu)~FU{oaR!!)qlve85(MU&eR-`Um^oY9lX5OVRM$l*9 zQez$s3;-3{CECE+WSP0j8?SaX2~u~I95~>vE7J=M9XTp7O7hxIR2+-18|V@bJEw*A zBm_`J3hviFpKo$dPO(bl{#Cc~P!Xt1XF$+BNh3xR8T%UyZPZRRj@^0mCdhqnN?b-C z^97Mt?j!0iw-=tL{`I?CH$Q)hFS!j{j}o^}G+zcMMO(TeJ&|u@grp|7{jzX#Ns);V zZrYZAdB0gZtq`#zRFBRBJgjs^AHkK$Thn6qwYL&6;wDkjF%8Sc`+^E23=k3_>%Wh@ zTWvBm`{n%_`FspWn@i-)_)=hdjD^AjWF;V{QWZVS2@L#Wv~dM~6Hp_RT>g0p^l z5p+6JJsGKU%SeEJ`t|KWS{MErIhT;%W5~#> zvq=vJ!CCK+w3iM6BG88XrEB3RGM1_agjeK zYmUMI@jcE*I`4HyBE%${q6XA7lTe`MFU)X{_QrCMrPU`m+7np~Z>p%B47#veZgj9Z z_=o?F;?uC%Ga_V@;f6rgG@PsJ0)Ha6Gu)$3;OZG{*(1L-c%XGAYcUg;+xuydVfZsj zrXOwmV26Hw9_Cs4>&&`jUEW3y2F}JW6q-*4`{kDr$d@&9WC}|>R`PtrH;AkU3uVm(!(lHTv8YO||p=JfX* zXortm@vZ6VirAdqchQ8MPxO`oC?YL>P`)8G-o<4S2wu+JV~m^hF5uCxNNB}m`H+>s z$pN>G2KisK+r5lfiWJmtQC)>CAK#zXSyw7!lWWCIe)rD++Hy0*^t5ohn}!scfIW_ zTE#I4#l5Wkr+f69711S5PKQYnaEeG?qC`N0hk-dYtBDdS0H9Q9zCn0ti9(d)6 zhZ%Ff_Ep22Cy&Q&q7ehkRihKg&LoI}Io#?I+!1x(V@)_~LnCF$U4c$^loFYBW4K ziOl%tQ{O^r`KlXJIRuWCoTG}}pn=4R{X$5IQOwxpq%J7DJBmJAQLi83ygwwoPOZ=E zi(qiQ5j1rPFhAY9#gX)Y7uGudU3tTBU7bx#9Rekha6iZP0rIS# z7&VADiO6@Ix(<|-C#AnJlLD`j@sK;wV1AE}-p~6&-Jk^=D5kOb)V~Uusd6PZKv|)u z=Wy1)wszj0jCi>Z4d|n&sfde;A>3!c)tSH%H2i88r_EixIy<)uv%Af1kgFd;-C9Td z%LI1EHW{|F$}~kBp88jxK5=X@8nb|1WkL?Erw#95F7XGwmoFWs=iSkp57_Z_V&9)v zvwhCmX=YY~B;r}HEz!Z^OXkI^3B~!Xi3{aW@uiClGm^u21^C3-xhy~`7xvE#N{1r| z*g3jb-(D$k9~yXZuWvOIQRzXylVNzlae|!_*?a?ZZzl8+AtVaL4Bk}G*;5`@+hi)LgdvZ@5GXRUyAVH#fr$SQ6h86N&W*tk+#|;#U=@RQVJo^#5t_!r- zRA(uk$A$89n##2;h0%@MP~mXAKHnm>vXAOXROAytq}MX7a!ef>Ck)en1)C?UhVNJQ z#bfh-N8SeQ$Os1(47h54safATN8#Lf@et8pH||S?;)&n>eU0oJVIxI?ZD=S^zRNwm zuhN(BMpJE#;*xk4$@u&MKnD1{fP15Q?u+8wfs}3u=PY~4D5FxKPS|nd&gxWjMWPCj z|Hh}j_p$ySZ$M$x@tUxf%o5YwPwa#>fr>3X^M^$_*B(1cprvbxhd7 z(@hEcDx_C%WGE&puvP3hM!5-2Xeia{kt3n9~%f*PNv7Mnf! zDlUGR{fcO|3&~}JY6a%(Z}^~@q~kWB!Yz?4U6R$XmZl3%M2W{K!L+t*^WP^lD2eLI;cdRMJ)34qq272eohtI{3al)S|pfRTi{+( zsx~=nk1ZGa(<&2@yd`cFUm|EP@}L9ISM!@Z5zpjSZVG~FyF-eY7v zh9zN#Q&hX{2Vz+C-hP-dVBUk!^@cVQnb`gj|K#F?wOzwe?UXy<^9AAA4P-i#Y^pTO z1o1Rqlr}8VO^s(?fVaiN(jR9izxKQqU^E%oKbH|_{uIC>YBQA+jtBcvH`*nh0<+pK z>s|&V`ciH4LMfcT>@x(D;XI236WPiKu5`CXVz`Cey3}q<+`Z5YGKoKF{Al-)Sdh4S zZaFP`5$w8oy1ZlRzL1zfD(4qCxUz?_AplJD$YzSFmFOb%lOyo&rGhHDFXc@^1smwf zHP_6VEO+9i=H-pt2K=&$JhV`w%MMMRjf!@5`&N-^dD-H98%7+b~&MaA)?WdU@64!vnn2G zh@@`sF0}U;DeVC&6d%evGPq`?z`6yP^$bjV)1oPwJCxrKK4Aje8n8LXunXk39`_n3 z?|Z6-Edc}*{ue-Mb{#QNFV`rT?Dexdt_Aw%WX z?QF^VY0u?})p>%+Zz9~%Dlky%h@jv4+)BmRds(ScN6K5)6a!jMC>q>E$d5m#*C9{9 zZXWb@I1BW;f+tYZPWd}Yr| z3??cPJw4RDJ{YGuS$93?DPN7{^Ox37Y`}t!|}H2C{HG;UrU;(fHIu>1oQYM}(=L zgKv0KVr3}Jg0gJK)SbFeu3eLdP@9mYyccq;(?2}d%dKj+F>tYH%uLJ~EPRXxN4Ngu zKz^lp$UPk>3qB6+ZGd>6JHB4cVou0l3N_!+OrYT9E%khi$(s&4l~)wnK1_c_y~i-? zdA(qJr84!|G%yxvDX9g`DI0gV;&r}c9ARHQwy`nmt5%$NM$D^3Mp{a>mAK0p>d{_P zHQcLMX?AcNmFR<$!j+Ds40CkwVh)l{G~79Ii}gII3Mg^L3Y=!pD&(eC;eCkY!k1OP zT$;Qg)PNvpdXQqC#0BInK58xLtVstZZU(Rh)#uu`twu?%Y4l& z(ep*H2I1zT#6$WySI|-3_QEFJrz46xn%DhToTQ&o+fbf~-1cEdwqHT#K`U*3NRwih z?U?_xa&FbtQHuvKiI76}2k-uTIqg}vxp9Ays^~5D1orbosFc3(>&}d({#=7bmXB0_ z8=1z7XNs*g&s>n?$SEL)@(+;s)W-QS?lgEwfQq2nZj!$=py7g4SEUCB-##7D6%QK$ zXO%?B2D9ZTk=%&G8xk_A&YWd56UARCU>OhvmrG0J{{wZt!7f$=yfQ|`cEz5|G-n=z-6s3gWNW&=F|a_^`(2evjEbOQ%7Ubn_pmEK ztUmxCpSb^ofZl{{>DMUP<%8Av6=WAcK79uN!!FFQr0$Z~P!Yb+&Q-w9tm(5u`y9=0 zw)iv?KR1eI*!V#6P|zJ?_MB=q2r>1e#u2*{zgiId)#Zz9_ORq34ERs({(iF1u91el z!*zI)M86o)x<+OahO5@~>Lz;j2f;A<_yUnLB8lG*w4gSuy$b>rZ2g9PjMr3mfo;s( z*=xOYuYIDKd&(sFy88vyvg|-lni<7W3}#$rqGEB`88H32JeB9I2B(^-S(D}bQe1x3 zWJn1enF}7iEc#6LI(qhcpk2Xbk=oXwhnhX!W109h%Uc9aF#a1nE%6zw`0b>_f?GuX zefxh|6-+C$ow6T8T@cv$)dbuPy<&?$uS_1w+7e*A9Ul3ct2eaq-m(^uURQa$SLrLW0fkH`5jDc74_(fswCwt zcp;fYssEv}S{hObqIB{B@nl_n$LM@&S$U~vr$KIR^oBKoU`_>-!z8ZNBx5&~AGPmm zU_>>Cq$W5w=)s=jE3s{lk2k&Cgy8hs4RQ3=pfomJ-KG1%XJV<-)|D}{;07qRx-n=2 zkz(kLx|sBS_nbPJ3$Oh#CoO&&UBDdQg%lw++F9V84dAF$PZ6H5vtMqrmGSjFmPBBS3dN#zQ2nd0K@K|@A z;n{P5{(20FUiq$5`ST5#gw5YC17+thF00PX6&TXl>DboJb^5&s(dcN|=c)5_;5O(M z#R>Z5%f&B$}2M*zc z{9}Zifrnm~q1{RF4i4EJ&V5(3?a1cBN{Pf;8HH5AFJ|=w>_p6)%@mK?eM->UhgV18 zchb&NFo3+7)~UV6Idt;+hFjbdP2&8U+LnCqoxCg16+x)BJBHt++~X$?c_GOsz&p-u z!`iN!!msyZ^U9C(vgfErA|!YTejRIYD^@vlh$5(3t8clDjm{f z{pGsy)Tx`d1ht247Xc#qD*P(8Uwfr>K1I(Lxh(PRgU6lAR6vdFPj} zgeZXA-7HJ=Xz;T#$p=rYd=FAeiLFT%=0Y-1+cAKy)dYO1w;GrV_%6w$(e5`UD;!NQ zV~2&@#sfO$y;feXJeVMlHwK8Vys})A=6t7 zP`zAfg@PtOyFEYuyEfxM$f;ovXX#Qee>?>DRw(gILi{-wQ$DP8Sb>|p3GJg!2{tIRLJoU8VdlKkhDTu+I3c< zWNbr0hCU{g{NyVQEN48Rk{DUaF?%b9PuP|6)y{;KI?D>RYY{D=s>ZBUlV%sPvEU44TzC`XN7T92;@#u z1vsxKKvAm`{75e{LjP*#FN29Zu%3>b5%NIAbySJ0i=Jh(>TWrUmhp>LAP2W<4xN%d zFrm5osrdoX@|{Bya&W{Pw`bL zz@^`^U5;^FxYjnoYp_$vn}~E!RTBu;CU4Z7r9G{xZ6F*Kw;2KCB$LS{ zf3~V^&wDi0^QOfPi~gYIL>w4Bb@-V?am{dzk8EH8DD?SI%mI&@kQc6-IMJGPofGzZ zaj~pWfb|33Z+Y-`r0iu{CyjiyC(26S3GDph8ZqamT5Cw-_zvcsT-9V&+rad@(z8y7 zpdM$haMtXi?CcLcUMro!i-8Ku(T8!!D2lzSH?#`@0VBIE#YikTIy;(ns}2vd+~=O~ z^*~A`D5JukRdw~h`;Q7_J_!)D%e%lX>H>=S1v`H&#kE9_gTE+{*43|#qYFM*zk&Fd zR-8GRDF>%eR5+GWRUb8_ zE^f0pAFfFK9Tel=%kv=$WeoNWOFkwl1H8KmOI1p&$#y*R#-4i1`QPMu<=UZ#vZB-S zi7jGtpB6sO`w&!r5Hv!c>5TG}LHA9Sumcf zK(0%2PvI*!V{Z}3yEq~XyZ?ebG#eQ70+za`%lgrN_tYVS2igs{AdJ;Dvh#nEEQxleIKw?92|hq9T(T=};m zNA6s^mmAC?UGnz*DU!*T8=WR~#;us8Blw}5DZHwXUZA8E$LLu>qId`{a~{09L%kd! zT-*V~3~;Q48Nl`s>GsgB>lEOgW$>(~sXGcI_rW302q+r%>UQKAC!mHbKo9ZSj8-dY zSHl>y<<9-8FX6xIgq!Af=Xb z7hH>#3tQZI_q(<;QaD~GV>kds2{GsQ#@Mw&Nz-SYQVfO)b-YO)Y9*Bc;+cESm=w+t z{@Hj%-q1xiqvHTZ4!hm11pv8p8&zwTSMh41)qrym)Uam%pt9JxK-+D)6+80$m=-Gk zR&?u?$>DKas-yUXJ&0Xxw5teBBQuv8?{qE=fl?(6zuGjSD8j*m56+xaZjm1H%>%h*MNN z{6`xI<&{;UjqB#L!hxKkie*XhYGcHWuwyc3Q5Zx}3((Hf>wcR)ZUK8RKyxG^9Z0F@ zVZa?SPP*dHgW&Ree$^iuUCh_tSfx?cr-Tfxr!o_n(M*vFUd5ug1j)t*mwb3<5F_XT z-peGW@u%y16kNEqXVX-i4!wB{WdQXgI!p1s2YmUN>$v%=f2fKVm8FAo>|LtSxsT{4 zUreTD84B!sLp|Z@cu?`_jtRNWHjYm&<-pU2(fc1m=Avr%ioYrR+L8*P@_ws zrBdz2MStz=a+Dltc2@Ew?K)csKI)Vno5^WM!`mAJFwwMq;)J{p5ZRf_oh^SUG`Y4N zw^exY5mA=$$@E*}y7f*j94tvhX#U4hqjPe_CP-+L(YjgJ<&3}PIZpbLZOdO(fJvisCY zO3+cDk=kCVUFv*g*Fff99knv6cQ9zxTrlr~lrG7t+7pSF#ZbNC=f8Jz4j4tw4=|JK zN@}mI6B|0!+`_{Y=%DtdV19eD^iko?lxruIGNV>Hb9HNf5VOS=h~{i5 znCb4sA_oEjV{lt@V)H!~8kaRl;!wKQ$0J?S!uI-Wr%vRJ!4%#{saHj9QwA9H%{n#cLpR2S$gk$+w=^q%vbS-6hqL3aEO?GoW!eJQIL!k~KPNvZ zP%>iM<`0sgAT_Bijp!EVVtY-M zPIJoO@UQN%d8_hm`sSy5DxbaT7|g6heggw?PXzIvQdZp8ZlToA#+GfNx9mUKX-rFWQ-)Nf<$PC4tnFxzz zz^Dkei;mh8jOVSVBA9#k0ts|mpCU6qUKn_?;fpyNsWN+ECn}=ju7=*9QzmyFWvF?< zt~pt$DGE7KSs;Ryd>(`Ywh9 zmwW+L3pUK7@!3K#Us+Zl&x%HJBZ2uvBHilVCiwnm9wbTCA<3m;z_9hyJBCH68F|8M zJSGH{(8RXL`eCyDG(y6NnCK9kRXA@4tPTt`V2Pxm`y*7iKNM3Q<5$KWV6kImv{;jgb<2|8RU7B1+^AG4wYT|(Yfgt zj{Z))#^hvUE$#rYGe!=feYR4lHp&=~KC@2}Zo^%L8n!9CmJW5OL%z;Fy;S&HB6Vk^ zd*T@7L2ZP?;)-fR63cjP8!FJKvQ{FwsBpl*tJ#U4ZIoMe^k@Q9Lc4lY9fVTsyqONu zcVQc5-MHQk>1uEvhD^GqqhNknb}lCi+vcX^K09{IiX&WW&M$W*UEv~~(^BVmt`Wcj zP=vHYXKb1((vPLy-_7fm6aa7v)*%a3C7&%#W$hmLj%(-qLcLhr9ngu;akI!+2}he3 zBo}wj;eWH4g4>W`n^q7WNCOhGJ+Fx^{1g(NOLw$1*#FUqbx8?X!~p&BO%iRMf_TPA zzbeNhR3+!gu`9$Re7#_DE4M5dRz9v~%+wik8wDaLea5M^;9mJ83J8}NX3-yHO({48PBUM;EsNPg1mlv=u4dKNoip1!H}Obs1usN?X6>claBf_-X6P>pS9cIUS zhH$TO{*xFS%%j<3g&}Z;xfC745vfmI&J0kp#4^7-0z zZr{)0fZpK$UY_E+jS{feb%zUaq^{s-uh;^7UKQ})Rp%<5W;HCSL+Xr9cL!c=(+~wJ*?vzK0L9x(lNEkqziuM10P1 zE`F$kZ$F4TV@7p2`*3?2FR+e(ew2Zb`Yb0Dw4mPa-u%61!*jz%%TxQ!vK zCFNY2H7+&Mlp_EN9AYZ)@^_V;;sIrkD?mbkphhTR5GQu&?nlfXk345!Th%?+&H5r^ zd48`~ckjdoWwDK5%j};!pn?giN6e_5z|d79YZ%&DL8o6<))4WhuJ=NvhBXS z-La}AzOs%L#5UYjXhR-%ox{K@rg#^HpZqXRe+f^s1{D95CCyE5N18GO?HBrO@TYOl}YJ$eJI<`(XFozZWgDS_F-!9Si=+7y)Ue zZ>d)=rU=kl5u4OcdAR!bDf_S#k`0d%=VoS)l^v*$fX2|{x^eNEtZ0$)!TczrgZPe0 z3oQZ?(^pQ~e-kj*Vcs3P7sV7*LMsmgD3OyD|5*LvzZDM4AKK1EG^8|Xy!3PG^ymFy zpXVx2UX;8#$k6MHy2sJ1LeCmMN0eano1It}>8A(HzpNkH+JT^ssVfMC^st9h@ehKn7m;1O@QkW4fJFN==Oi2qw?t!)zi=-+IL) z=zDe-?x_B>0f5&hUNn)R@oiWtfRtP8UPF;KtvyhuG%6H9oD@2zfrDmo-F7A`uh_HPu9gkmI85ec0N zv)u@7A3*zkofwrZEX3XYZ*+=QIK!NK_e@e$7$64NpY7I{b(IsQw|#|oiqFtV`KixR zb9{mUB^ltpvS8*=t{(|WLd$^x_FXTRv3)fpUASwWpfRRI_#cSN5!j{8#kD@Ny2$f0 zP+;AVSWPsPR6&iP6)9DXW?t8>yKp;5SPUF(#@=s{RSH3Qe7mO<5$3I_n3CHEELND4 z+<~Y@4SYvdXp`qB!B$FpAetjiMQWUCS$VR8n@C~)+)UVO9gsi=e)G6kOj=}S`p`LO zma!d3;z&lJTe?uA8fUSKQ8VDUUm-z=+7kIh>v~ifq@zFr?$h>W(*TruYNCh6^ht@K z*e!7U~E@7VB{rM`4#sh?Ijtt08aUJ7Qcc1O!8H3(>~qto2ru^*Zk<}^EfBd5dC z^S)R6EZVF;F;u(6{80pC8Pm{?{R`J0ZYX{jU$K#zYqog&=s^zZWbU5$DIFhnQTY?N zzMP|nBdtF7rf&_tFBO_wIL6-A$f$xhB+bp7xMv-ylWZ_6C>yY_KZcF3Zt+hujwrzs z^rABRp7_RhoKt*{nvixq;a*wkcpwZHNRDE6?qN$o2|t|KZ(rttYug}5u`KB4boUfgq_$z8(6}s zm?xAecgfdTi?M0GU><~T(%-7+a`yn#0!v7DmZ(y74f8baKBLx;9VeCu>D5?bC*SD0 zF0OPI%3A~d$A3!qQDy^NgBK~>!q0AsR~d})9$Xoc1Y4M%lu;QI zn_FB`UC})#`{E8(xF4)Xp>9^RcZmSzL>Wr=07J^33l}IylV1O=2o6rI^Nje+h$ky= zN&gU!hUG{eDn2rzrp@m6o){l+xTeCC_3OOh5s{{@VvAioZ?{{AN`lw9%fjRdhkJBr zH59=K2bWz%6IsahgeE3uY_mK_Cm&F;W3veO{l9X6dRzZ}(VPTOjxi&%MEX0q=7?c` z{g^od0ug5Y)@$QJSm7PukV$XaSosT%gf6fEN})*{M_Jm-XqHWtopt*Ff4%Nfnadm zAC(M%xLIFCuZP-Ad6^^JO&p?of)-1@M7ByjVI?5LgI5#*wH1qVG^ZQ(iUA(J2yN1d zq97)A<~lTi-qIrQlXXZ=*NBaK_*kXndKA*w_WCCu2&vhI&Wp(Fw3r`Cm89)qQX}EA zKcZ_whY+Dg;nS_C+?tbDNP+~SQSkn*N|{=nD4R{97)b{;-B7fu$pLEgb{yT9CM+44 z-1wl*zz9_IMoAbW>ruufZ#da%&<%~D040RcJm9{-RH^wVbsWKVz_xm@i2sGcv5$!l zLJ6P}cXcN`J`3x(_s-NJ&SPXtx}|OA9A4h%cIDu7u(JN5@BJYD2Pe}UgIxYwVPb9Z zJ*1;=@3yy}L_N?7KEKGWcR|PZxSj%fwjx)1_u%JBNYlbYH4wG1i*5rP{_Zw^2&iYw zHnA`hcWKQ{1?|r)LLG)Cla7WNIu8)jqXy0YTkQ}w19ep5c4_hzM*-LWu&4A46p0N! z9L8RE86%4z@7nA(*)SH2o>fN>+AX@E-fhraMx|#mdIMl5Rf7zBN>x5CednB zcumk(h1<8t{vk5RVj$Q-q!TU15!BTV+cq|~aH*J1W5NLjIXxDihJWAmA<9}D8O`+W zv0+VU(nza^3%35#SOd)}!rr}$jqM}gia$AwA#Dk5q|?_wB1|n~Zp|70f<|HSF5eW; zpi_Y1Wxbk!p|}*iNu9-5XESp$%dUhyq*a2q6{Loc0J=R}bn5&6D~CZsAHO$z0Ps9& znjN5wd4PU=A_smvP5ylI3<2rur{s&sTcQm!L1Y#+x<)4|MM^pmMWJbTioVwkQd+id zDGUlQG`7}0MAd1liyYXaWy)R~emOufL$SSdKoXR3;LEZWB6c{`9}JkM8-0TWfV=22 z!d5=7I;qYUk~H9IS<3)@IR~#_c+#(*-EXLSjx? zF-q7qqLp^%TXwT%GMJzOHqOKAs=69NM2X~=!8f02MAo!`eLftU+qX4f6EXj7x>$H-D-Gc=Tle9n*vb@Tn5(055KrA1s3$x(nhB8>#BeNJ?J9nv>{B>Y+UV}0$gPW6 z2SVh$ptDj)EcuY818L?7-x$W9G!=89@a`@N&vyM0Q&hO)boPrsi18f6cC zoJlD^5Bz9H2zkBiA^x0}LwqU{S3NW9UY~h&AG!8jE+g0cLo3Q51>)UN{0jrGJ-d(% zcbr%XNZrk+b3FJ1qxVz5oaAEn>{w;}IK@pdtQ9=)G%siv19b^)Y`Za6&CS3TXZvIU zJH8mvCbJ#Y_Waa=E$xkAG9Ut4+6}=AW=|Q6q2r!3D@^DVpCUza){mmL`j|zEUYS-# zuZegTSTMgpJB!rQ)aoIxQ9z8$qG0|cD|iL4i3!D_z(BxX<5Q3Nw*k*aYay^YvRYa) zfWp?@V#{`RemQVUj!DdEzd4Ili3~je%sxhf0iZe2cdSBd5F&#{d4v4Z{*x8I%7q^P zDX#~CZ^XA}q%tzazO_m5B*TTe-XEIa=!b%W@q`A8d_*A`h%#}!_1{cG<8!e)CWiXx z#lyAZc4P95%~nD=eiv@WB1{58`*-aOsSz6vqe$O|YbaBIhWYaP<}_9@`Z^O9H%P6K zZIP41QD!!-^>)+P;Cwc|wb?UJ_+AI{6x^+eV&Q1@ug0-J<=*f+lDSt}?HOZeT-$iq zs-!*Fphah%ZfN0aoCUn)Gu`PA2CP&is8PkX;^W!!c5dW1(^6|OG7JKb#-lmmust;2 z9nb9T>zB86s6$td+EHc8*dQXpc#7P>8TVGlTROW7y}oFC&ikiXXXzl3Y}4hUH8e{8 zEQ+pC1U-eSivU*E*#1&ErOzT^0Wb`w$gOE;upk`mKaQI0rdL$xfTpa z9e3RotMW^Nqlh{?(Q8r%yZzn?8w&!6yDaE752{==@)l zp27_bXoxb}TopwoDjbwWRy?~tdjx_D=g01)z!vZ%j}Z94tAgL8w(ttt7-bYOFUW4hS z{=ZdKj9tI>_Q>VT5#5ynIun{p+3j?_aZd-(U^#1gt)2PYj~R$s-9!3*1~=UU`tR9! z@YYvKPDw1qJv%xuAStmLaAP%(oqO^3rR%lV#t7Xs(8q^!i|ly29r0MbGql0z!#8V~xaN>Xmw* zSnC^mjRZzAjrOs0AwyC6cV>uYhX|V16FaAc?G7iihYoYz>Fmx4`&0VtIwxt4Bi;OE zR+}TfZnx_qsHvP*!lf9M=%{~_J*05KzBuxs_$Dh%vQ?v16yb105wyU7vz4lbm$09U zKP;{3B3A!U2C4vLKO5Q(q)yWqVQKK~O08lxDaPk?IcEW*E3d%IV=6pY3`DOKAV~rk zPsZ2PB8O6Z8x=(pFM-`jkit2Eyt!uQ9l|2#5=d(*oR9~(Wbz_++oM{iqZ&15G8pPH z@i#yQ<{=c;lgq^lvggs>2UWWSL>-(UM~G$Kn;F@hayi>;WA+Y58L&idrA{=4d7=d! z@v9NvI}b6#!kQsRvfm^mKh`^4ld=2) z^_BxKVYI%)2W2DVr|5S?k!2};`RMYD40SP+oH%X(Lobw`H9860V%y-ztZxrv`jm7^}3mVHjq`1^WJY=|_ z(fel?(s}7ke54$=DzO3Oh;EWlzdtQYl6lSUA?1e;-5mk_`(I5Y!HQ8x5QGR!Dl#mj z*K_O#+>+i)Izn)eyx8!Ddu-0i>S`~atmF9$GEqhEg!U^Otw8lwYf%lh8cYq)(fN@_ zH_A3DgjLULhx_iQb*tk)w!jOBwGw&Dc2$BGp0^3yhqhOJYu$d#c9^~E)YBRkZk}x| z|9hXPi6+vUJ#V=Ow`<~$tRDCz=gxXJ#Jz@+t5rm1gKtC@am-8s1l0R3?nqDA4N$DH z*Q@uZSHIX{STDo3A{~K^ev;JroL4j^PK0EKGlqVkfoe*ZV#lUjjDTC2gx0ts@2bn# zk7oDtL6OHp5+h-A$x+Vpda%`Ft0{X_tFKWV4nSZbsw3uSR8v;(;k@BB`7E$n|BL$< z9UloW#pwHHNb9ENIy?wfu0682Uc;eK)!By@h-&sA?u&RaL_oa=VywL~5(ZLdk!fiZ zS@`HbQakRvvAVJM z!e)E;a`k#KA`6QgEFZhXQjm)PxH=^X^RS)WY|3cXS)e>ApTA#rXsgC*p?f5@k+%zPx>xJ7<|$bus?I|S_GAM?X)xkU4ywNU?k@)w;kt|)1Cx4 zb=9QNMAVyS;c;mkJ1dT37+c+!G}-S?;sVD`XtL&X*txTGk)6yE5N|O?U3!Rdcwe{F>AWL(ze+BN8}H?O zPp6cINU09!$PD2pM0vOCYc8btJkKyzx)hL@oI1;iO_*r8Uuq zO|}&_G)d5R+u68F3ja=_MfP9vFnDgzF#+{?dIbGlzNO?;R8icpxJFH1I;+;LEy z_eb@rg9o~clZ+gg(BRD5>(-dwcs8{9i0==-d&k3LeQ(@ z@vQjWi#JqV=$yG*t+N^Il-EwA)Ettz{H1}jGk%BXm#9$dapvQAL-$+@egYJ453thoRMLaa8HTRuEnDof%qWqx)4Fv)M-Z3!Aj624RmZQ1FVQZMG+x3}k7F$2Bc2semZKZty ztS3Vnyi2K~x^&z@Ul+=p7TxaS&oPy(>~({yywpu#i(yK?*Ag$n9RdShT!$8)z1bWMqvjP7uv6MIPK>*w^8TGJ-I@lT zp7@5Y0N<;&t$`rnw|Wv?3A*Sz?*Hsh~=#;f`I* zu+*-%Z&N_e`>&c#`_@46X-gyfalxfWs=v!^`+!g%nf~QQyZ_Y34slsSEe9T1jf?;S zpTjj-53zx#caS0a4#v=^1xxROHRtp?sHA_P()0SuR&437vPZgF{+AWfyRiv=*+AyE)ze~yHxgvX#;XZ`@5cX%46Lz%-HW~E(M?M&F&JB4hHp$11g3N zh46-LPA$HezmK!>(t{y~ygUW|dQeDvb|B5{(}qu`4AY{(0;pL;{%jUUWwT_~t@s08 z+od#J!4r56d##%Uau-k5ObzXHzh1dC6_vt;@i9MHRYOm*oqIHL1Q5fdTirgrZbhoB zWoSV$W~G3_jgp-C*YBnYc2Zhrd^me6=KB4RDMw^1EX1fGsZ$8i@vuPX!Yj>H0>)Wx zs`}T3MXmKtUkAV2!^!Q{WO^;f0i_FzohZx%MZO)zVstADRPpa^t2qxinPeFQC@ z7qfWs{9g6drKwzpW)KhbVK%ZjW-wrns_F>6d0to#X4xK~cduEqOK4bXdM%~6gpLnxgA1GY4HhJDYAc1dr>M{Tsg$T2_b7A~|f?5aj zvUt%Eh|JSTg(6hgG|RfqyYxY4?tH38vB5{d;u7dAvsI?--CE%WNpJ_pZtMGAIO8CPVt^;E zK~63Xle=(<#RXx|Sf5&7Z6u5w?JV9~yN93RqP*bDIBKphtIRkZoO#&TWcLHvMD>zd zFS}n!eh7W^F!M)-EPYjwt*oipyPHI@Bb2Z8d1-VFK7pxw)*V+}>dFeZD{%x~^b>mM zZdBNoFjFi@LHX(_r9MmP& zAM>g5Uv;=%!R&Op{0)}}k}EXoIs^xXLW%lj?%-;sxizKCDIIA}5+IE5|M>*K5_iTP z9_>9J<`g-{?~GY%(D*pLjiUO>bx;)D8tZib3_cxWl|vuY)-EmnayEgze88$z(|1Ak z*VAtIPuuJDtSs|wlOIIsHXhSnzbNYVNmE5>1$y2Waug9UM-l4bQyxAwmz2?>pF(0n zrH>pDKRZn?>hB7a`Y1tbS?s@6-1-+@_bPGoH%i>s$X9ulD?#$UgF4WA zRU$s2#ji`>KS?zTpPLJ;{HFj>@v8OH_M%G6}QT0to1bbh!h`++TP4Pcsx<`w~@8n`Ph7F*7 zaegELRXZKWHc;0fH}59|lp~$>^$LgIDMFilUq2o!k!XMk4nmwHAqsuD1GD8K)Xdh% z{cNL>4QF2S-Pu0-JXr%I3e^a#rC^CR6do3w6w-oP?|TtPaiTXkM1mME57xGCmVeY( zHt8g^OVk*a4LCp$C^SPk4>)+>eQ@KqR8(n%Iv!%%M-iv8?_VFH2W0A zTis23F3~dLw~0TlpvD~k@~Y0ri0{>Fo%g%I+fGXM(B=iB*BNX#_NB*}Qs-4ujbS{K z!^hBVtK%LFlfDn~qi$y|@1yxt_TpX^uQ!(Dh$mh+h-ZgYL_6dy=ch+J9HFpHoU=o&J+ zYYaP{;p*ulqUJKRMTX6T-FLcL-p*E$ztD~blJg)f%OD{}mo(%a{GKy|Dd|}aQWo*W z%K$0&sYM+z=nSc}XkX)jtj9CzN>8A?jyePjpFfmybr?OLTeNyQod8yiY^Z+1br_pt zB=xDW*A`-ud0WU-d#7xBU}_Qfnh>ce?Ch}L!@UFs(yCmtFl%w{9Jai%k6Hd~v*hZZ z*09D+DPkf&pu{{QWeU21NA5uqF)#T$lKUJS8Kms)rRqOA3blwRyJOw9O`1KOnfppk zF^bQluKN;A3B2;vH};pMxwha za?Sd0nfYV82D~?8@d~;{{ihF`zAdjWi|{QA3F?82?rU>NM6O@F&!7TYnQO2^Q(UU; zq7H%ghw7L^gOq=`t$xKnZDKGf)Cc8(0@AIg*2vTTWs`OSy zA4A-|YL*z+Hvif#!p<3)Tm(Cu${D$PiX)?*{g#u_p;6b(+S_EU;mAeraB#;%JKU|dq6}Hk&(3Hgl5<%nFd^3x>i!$$8PSGb9hJ%}>UPN>iblsL!&C?+ml3NB zi)O^Jr-DCjTd<~4Rw?^$y3?Mu8vms+91mQj5x(M;Ihm%HJt3o@W zNLTphLGz~gF6;!~*Vd~=HH%0rtyh}AfU!wcmB}z#*nw6O4_~Vh2p8a5%rz>h_&moa zr7zBJlYZKFgc^3nr|A3q$f@dGac>A3SLJa2q>=sCMp9g*R14%aUg|tNrG%cs;B~gg z)HEUSWLLR>)BmQJ^M;$Zg*XJpt|ky!`!aX(dDy9%o2li z*5=q#RWhTLh|cEna{WMiJMaBJ2sYcJQo5TXy``}uATw81B);jWINt2aGS8lm(%50O zid!{};Da+QT37mFpDO zs_ckB^|_jTpLsq7J(Ck4SB!5*?Mi`W9Blfef1D>BOkklmcZIM$Pgk3@8ol@d@@0=s zj$tPNf7A>Vv^f5wf^GH%Uo(>Tb(PdXL_jd0#nu?2#21;8a%)6mX43;H_zwIK0fGOlAqj(%>4@(>QXCJFuiF>!{+D zqK=5doUj>sM1s5JasN|xR=nZMKspn=;U^vhokH4)TZOr{h~VRW9N*(T;-R5e_jRd| zjJBgkG8^0II*yCI?$@bP#nuFHFNu=<#ZA^M)OL^(G}7d8f$bUo1~>r4Y$GKZE!#b3$d* z@$$X^G&BE!#`LHel;kSn9X&jVTxHG-ZT)(!@h`Jfq?;bcF& z)Lc~aEi0SA_!1kMZFHYU4vHTTvz+o!52l`7_jJDaydC?mcy$_Q_Nir@wsiY<6*64g z{GQaku2_DtmfX-LB1otA@X@4$O$S2pfh?z>D^HYNy_gdR2<96H<43%ZQ&K(8>mP4@ z#2=*ZSv^mNZ^4%y5G^rmI-46q|F9 z95d(hWCc=z1-}03H>^4h5Cb^8pNUt8GledluTuMj0kAoX$m=>d2POu{y_WVaM|Lg* zf+ui!Fiw|wHK(PoYghC|eHnXgcou*q&tBC-TjSsptHLtMao-lgV&vwOkyNYOVZQGj zO%3r|Si#zDTVWSg^W~ju6)C&TpO>QtkO^xo%ap4yPJWMG2iXrXRVqs`Apdr6(DZKr z;Y6RqYQMrescqhp1`?#a!00rPC5)oo{` zEw)AS!l#lLFxHwTDmE(aNkb6Q_Hcq{tYL@7wc>_~e$9{v+~Q-K0>k)_XT+Nc^#(|= z@JxDK70`=h63W#IAu0UVPoDASy8+YT+tnX1v0XhqBi5+{hfM#-QFZ%!UNmxw#!2l) zSYxQ-7X`xNENAw#jK%nTYp@kNp+KxINiG}%+oM_2uv*R?Ei7olXjYU*dqZd2KlfP6 z>#IU6Jb|;)4!6TTY2r)2VQkA{f4|H+q`C()dsQ@S;FwonZG)(_y z*IfcGhdgU8yO{ONO!5&*p@eYNo{3p-(g4?sV`J@yf{*KaD?}8|{-YsZ;$GLkGAi+6 zV7Q9lW@2vA1tpsc3kw^f3daZQ`hD+Tuu>ZpX_*rzUlQtBljNBr3E0jb`S*N<0TUUa z4sYuWQz!-;d}640XlDwal%ugx#&(BLnj*-JlOxf_NmMFbw^OliMvH<$i(NjNer*Q` zkr9e6HldNYkYvf{>*`7v^2r{m_U-PHo!Aob;|&iXVzY97jV>xOZFLP&4X`vhx-}%H zC^jGe7Q8Mehk-`gM1ljno-MW+$1YRFli;En^5pv^krwe!COJ%!B?!v9 z$KDJD#XDv#8;{R}7Ol7MAMA9&UMM+0ESig_s<3lPT#z1R-Dja}3|So#*AiD&TQNX~9$ zzzPNihMfydeJ!F6mve;rfv0NH14Un(ZcV-#{oIL<@~Vm4l65F++vCKZ4ie2HVi)|W-mc;!P_m{+d=c=4i*k`)?b$2{plQh)V^ zz()Zm<_POkmP#zUEx~XzU%&B4dQCk>a`$TnqaC=PRaiu=w@_4UxKKKY2-jGRxiM|w z1m#T;LH&OlEP?G*3CFc^Qu&CXr$9gvnIL}b>?1!{8BXOf#6|%4ucbCH_%Uw-(Z--! zR!cY$^g3NKrHyLFXTZ6l)TVhp8Uaj`2VLNyrGe2uI@i?p<%no(_wHBPX#K}{LL z*ETKzHiokTiMY}JWc*d86u7;^O*g4-fu2=&%pX^eziTdC>bFJ95D^d%T6i#iEJIIb z`~1jWiR`ug+T}`kZv`OLsylRX%7_3)YR86jL+d3UPTibvx12qO{+@bsDC<=iM5Zg$ zPMrucq(s&vRHm{p)zR`V?R{TNoZ6l1d4|q(-{!jTITW6^bYY2u>3%<_rs%Z!ID^`J ztnQ%P+MZ2=&XpDC+-0e!4f+BvHUO_IU;{M=_-5rYxQn-_j%|4K1*Dl0A5p@}`Aej$ zKH|_arvzKw;bn~OqWhBQ&%~50{@g8dPY`v_JK8jeWN;whyM#DMw zqI@L=G)p|LbUC4urgRo0eCc-Pl@&`Je@Y6{8KN_Y&sDufLRqJeV zk#&eW#d57phg|znHcFUO>Ohd$)j2Tit(DDxOxA93Xnmcseh7eA8AU@AWfaM9#_xPf zE{XVbd-%()wDs|~U;in&S}KOAa6B|Lbg(vI1j8A`SLau>!el!t1ZleAqSj<1wOc1J zcX!k-Gvv;0kL*m7F}4;aXouKtY_))aHJhpZKUAA~_9U?q17EeY?s1+?2k8~j6 zLI*6GRtsf-(2j9_)RWer@`h9E(K8zOvoLD#&PqkKJF3oL3~*% z$jr?Oc^8dn2V~t#c}AVFW?y5>Dq>XGNV}tY9fo|jk)$l8j69;HRfD8Pkf^AK!%fh_ zYvttRVvJqQV6d4OWq5zdmdh%lN)w6E5lQgH$G!gcz_L|lbl0)XA)>d>^2D7|RKyw5Bgz;X>mLxqX>~cecQB!Y4#tO!RnQ&2&G@|ydQcHs z=surRBCq|8S@Z!lE3&kt8W+p>9R_IKgE!bPK42KlH$3EPj>Zn%)^tvYc@2CME%Y+9 zctx}_O&r$LMq)^fl%=|MAnr_9dL#zAgB3*rg+dnbV4hgyu|w)3f{v#f&Kwad zo|u7Gs=8J^e+h)pY=i<)8KDg;0yzk#^+XF{yIUo$>}%|>=RAjJCA*B zQFq}bw1A!p?DdJ(Dq!+=$s;jmL9<1`Ydv2E{8Sj|-z}Gu-es+pZby9vqCbo1)zfQ< zVQHN=LshvP5ji)^>tDG14!BgiYEe@B{4*%^1-SYTjKs)l1MpKlD6=qOd}74&!-mONN=ceaQ_t^S z3i*I0Kb1ofO!cl0ex;H8hJq2aXv%58aj#bBZJOTHlpv|xdkc2Pp;mS{+UA>mW(ZJS z_PUyR+^gw+hO+au5QG5e$_<60Tw<#gj2oJJ!`5^0bn_?5>9S2cfUaSe8lGThx%UVayhOk{V~6V`#T~X&*ETe2wlQK z=YBj7;qy>%*|$FrbtZwBrio$B(p#o6DZ@K`y%v)jH(^ki_$GURPJftXE}3h5cp7kN8Zqm54}y zhiYnzMEN@YRgHJ{R5)5eO;K@~UNO(6Lx?O(m@ZMACqpz!{g}Lt3-etGDvE_a|IQn| z?F^^>m|?dLs;W4YvcV70*e-de$R!)B(BJb`v`|c}tW|+LC1>zwT}W_|n%vanoDceu z3V>if(EY00GnW_K>oPnM&eD9 zkhqI@G0F9Qp#ufd93BaJjEdoeNT`3qVa>EmSKwi`fI@_D@UO#To=h~dJN=I3enf?= zVgFl0TSx6u;$yh$-FDttM747_7 z;$CVx4)v~*5NS@q=~P@fx+vN3<2Q(4ZQ`D{@g4@j(4P0x9+cqIy&-4?HIKW&eHX!j zVKb^POX+@v4z{J&W%l}|TgV6q_KZoOdLhY@GBZ-wN31~U8d9wE$h5!LR$!fJ&c3N= zYcok&3R3Hbz4Qqi_6Z-;H@P)aXAg=5TFZ&LveMYO+c62i-=B!zS0$-3k`_GX933pS z_ zyzR=Ch+{6d*b^U^n>k#pLo4B4bey_+o$_u48@m7yu}ha@5yH2BF>R(D!usz^h=qeK ziw?MVB*yVUj}u6FCeRLFgX5i7ufI~2VIs5$2>d%%7{9Igw_5WXq@;LKqxab0$!)xI zty1A06$X(APPMXRx)-Fhp@H6=_8C(Im0u?Ejuai8T!2%Id2@@AM=r7-;h>OF{kq=>;y9d%wVr? zWU;R&VK4jUmnpC~t4R{Wa;@&A!ljJU)%R8uNa$Z!10$RWnLDq@s&+$beO-SgMTN=P zXmNLmLD58%$$`hdk3zQYjgnUC=?vcdrN>biGJO25S%?J%S&h?Ppt>A-#3whG1oGR% z`6JOLaWamRpn52ItSMKlTt-1AmcZ4~_ca7|d)mAKXh&@)Nb#{erb-TPc3b@Yd0IMp z8{5Mwfqv0cBMfp&lKvZ7Q$V-~$f0UkP4Wgyj%NL38=_aA+p8k?+K%UtpVa$h!gO5QTx0M z2!`Z5NH+7Y_{4{be> z0=;40t!pM_%wH_L?Ebu$Trr`FvDN*U@BL(4pXz3Aq`S;9HIRv} zP+2TNc)x)bBLKJaQTNf<>?gYQ5FNS~Vf;4+oUoz9qq!#Yh9B+vfWbsZr1I~L>EEyE z9Xb>{#OYHnz+fM~?*$CWfMLGaM3`YW(!j-}_Ot&|a_=lJje)?){8_CaX#rwGM2(mZFwY!DztY|U zi9CCa`8_j*gQki3E1y>AtmncmmpCL^uPz9ASvd@$Y`VPLQoE=T|5=Ea$ex0gYY zCWTHa;Us`EXU2T7_)YLHyDaPA+R zIh^!z6f&F^yBBBfYz>#eV~BsJT5ic5|II2d_2j0=i+azzh+h*+15ub)PfnG%sd1M zb#~67#c`T@1q)*NA5!_u*>FFt-1#or4v9$Ns1>pYiT}+IODlb-Z;^krdS#BMFpKUS z{C5x@Ki14NBs_XS;mgg^{bYG9rGW^@UBVd*79%nr%T~_hu?FNRI1bhP2-yt>i z+5#HH&?ogfqpZ59%#QxSQr9Z!gi275WaEkcWQ&lZ3ktIX38#B1a-@g{OG%We4iEkz zfT{LxnngG-2wZ=HLWDfZ_*Dvl%ah}885v40@<@>^)!Qg_ALbOLlUiK@nQgK!wQ*@g zVk-$`*0tsGx8id8b&+R$&EHVS9xhpGwA?(Xhn~^{D)8216~+6a{gWZ75Qw|&u?~B@ zwDk504_CZ&q9(>_avds!|Jhw_e}Sfg(|_B=ZfU~5j`;W#aP+!x&iP`@wMFie10Ng^ zfGUpV>OY<`8CjO5tsfOfS|z1Znf?nKYxrEcOjV{Hn*rW6*5gfKJ4jVTmuAnjDQ9s9SBmvc+e4S5jxpAIsLD+kLv{c^F}2Ld4D^ z$f1gvq_Gn%>VdVnxqRA#p9;mIWB^f{ND~#H9rt9O20y_3Z%K>^D4(fq+owDS=g$e$_Ig``p5eGpu){LF9-$=CWM}z=32A)Tmmk~G0^mQC%=>%A z0;I+A6myfG=oS|jrAVg%F)(Eg%_fcU2WTs)84?NafSQGL3r;JpMqh-9pW!j)`POc2 zQz7c}kWy5j0%lWk!3B&^YiOy=A^T6sQM@=0o+Mosx_Ezmra{*Ft!Ze&CX2HwWV>P44TA#>)8TBiEi zDz9DqAX&;x?+1@Rf2Is0h^p+gsVIw-;ptXIfj=PO zzst_KB+05ZY7tUMr)`UW$WcYFq~Y=`ar6WAye^-*Z<1)3-V>W8p`N-?q{JNz!t1va z`bl8S$uKhc`x@eHmaKJ+4g*S22$HZ>O$=*xe~SADgksl#@@s8=Z)Ohpqz?9}Y^L_py{rU9KyAY8CWy)scgM4J9-27g-6e4dEA0kTOssvz~F>G4WJ zbHDZq zF1_q>pk|`kV#UYcfpY%rroSF4l{N5C2a?)guB~zA(sj^gy<|yVzZD{}{50)1F5QJ^ zp+OQ_ICSY~$*w1wL=Sd}F_Zxm`yH(9e7dQHKhwiR?9Skh_q5#hpSJO*ciED>P^1^i zxCCpvneG*xKmIoSJlR;^(5CjoNyPsCKc>EdEy^xxSGr*a7&?dU4v}W)?(UH8l5U1B z=`QJRq&uWjKnX!YkS;+u<9ogn&rjIbv-eu-t`)>dyz}adu2pUk3zsuKmBa143Je#F zuJV$Dv%_neAFU<3k*TuSX`^0{Dt#g@q1+51jSv9K-^VZ9Z2bUM2YRG^>D5*n6cr%M zYP}Y->dPrY9VW)MQUDEA`v37p24vp^(bH}^Oz?;o@J&_%!faap*h+NWtfY-S5JM4y*2@FgO6h?Vno06SM@JR#7S z!vO`TpBjv(d6dQe$p&Wly=>Nxt82E>-z7`GNkmU522DxV7>B3PmvCm_;mGjfO7oMZS%_6$E*Fb}{?c|- zSe6xa`aa$e@29S7=)u*}uU|A;ru1I_gW! zawc10`d0SPVDzkmy!2@)xv9E#wef1`OuEVnlck34<1}B_xT7H|E?O#@oK*9hLe`5U z_P@l9-NTlpomf?>_nTntlFg0_9Z66wdIzqr=SfNuMP|cxWz!>N;QoU{-)pX^NZf%t zia(qZ!SXpYmr!7_z@ueI7w!A$ZIi`CQxZ(Q-Lm8|fEY%()jon#PPUcv@5dMPyXkBl zn~3e0R_4r+!z4L`yFfX5{q*Tjt!8b$XeIcL8L>0jda2H7TEQ<5ACWyV{3ZlgG?XFR zh#Ujmib1U#GYm8+chNfHDO%%GlEIU+9&>wYvV<<&pV=VjrM4=IIfF=l|*N;k!Cx3dhW4hfgVkt5;jpt${mz~y-_XAY%XoOsV;%YTB3 z3&(Y2;$>(IrYY+KbTOxCH1+gEc;#1tHjkd?fvZ zrquo*<}9^`CJ*-~iGh1k3MW=S_nOVSuM$d&DMWudCn%zYrZV}1WwCSm4r-{C#i zNW9@|ApuRd0M9%8J9?8iM8@H(y8oIPM`tD6@DD@k{o9nd$PK#R84yY8etIzK!`x}` z>z>MsF;BhhLn(=6x?=gj;pl+bO$wL&362yriRh9ZW49?~wM$+_<3u?qQ`Ww~Wa zBgusm6M}12yolf0%GXgqTMMpa(b#0gVPZ6gwH^ZoU?pj3d^a##f*(@4J&*~EP^?CT z%tXfyd^3!;vlIGvol(1~TYYe7k$Fr`p3|;>c0htZt-`5>(WO?Ag&^${)qAaZuJs4< zgfZxRtFV|9xBR`m_zBGW2hpULe>==1i!e@ur6lAzb5CcRDms&vLUzZ}xPgbdTNh!# z*Ymie`sQm)FYeS>opi&B$<18Q)7M{p<1dlH50riWGxzoPe@sRgFSTHA3dQO3w}CpI zt+_doyAI~>%y@n05_)=i>o-!3IYmk@9ismRCSSU5=}i?{dEAOcC`rd3Tg`vh6XF7B zH2|e0eEXoSKb+QCvUM7vHPK3TZHIPp!hB3BNP)r`sUZKB_ltgAGWG~{i}!!DAev1S z7}JFsR9isqRJjA&z`xv6D?%xuoJ!0^n>Sv(=`tR62#19tks4Lh-=mt8B!%DT5)el6 z(%K=3iL<;8+LED3(UfowSkXtrOxZ&R2e^w>^0l>EZVwM+Rd~WssLo53tBXK)nQihP z@h4mk$@q_GU?~YOrfv+8>#zAKKcpF*=-=w`6G>85pW=t3?-V=9RQ;v^Xcja+bvo&g zUqVb2mk_e+>nGdK%vW8lO0Fa<$tA^XMOyVsx2tALBD<^fvwAzv==plsX=t4p$r!JL z!35Mgj_}wiL_OtdKLlK0YOjY`3-Q~JEYDQML9$z9ovY_jhS^7tCP?f@+n8CHu77y& zBGn&CHGLB0wL5Q%CWhF}V1iB8^>I2G`5k$&qJ1Ja}yLG@_8uR}vTLhR-IbxrL5o zm{@61ScK*@;ThB~pnWiza(;PX>$fF$IU})N{VZrD&nj zJTOoy0KURhG)_%0e1DMym@fFmnKa)f9aSn{E?)W2vFQ9pt**m1y-?>R7Xr2gHGlm* zIk7|_^3FMVISwC%$}G^y7`HH6f3$>s?CLF=ouP@r*!~yZUV`x|U6nXAA?Xoninv$_ zp){4LCkGS&YZXlKeL0#bOas4fQE2Y{0D!2-hLR~viZ~Wf`N`{tNY(0T)C_u zoFkhW(}fgWLFP6FUxT$(fIlxKNzb~J$|Sug(vX%Tj>LL@vsuqpvz_-smWXKQ^&OJc z8LH9{DQ8V|L)O{OG-k?_X9vnhnB+ZPp`=1oe13DyV|mo~B*ALZ3#}*&r0B|iOO{tS zr20D3AGz}S*TeVjYbJo2jz%R3m7GPsI$SX_C_DBXC-4Yi-RD#g{u}t^^u9l?$q`rV z<~s^tE}oib+RX3ujI)H8ORwYD?@S`Po(?fQCJ`9naN!bAXmojSuez|hh zT1(6pZ{_-&oS`~veW`jK7-+33sr`(%mBGg&B8_2vz)IAyTW?@+Og8^4{f!Wg(=%C?gZ3NByV#X5UrU0ZaIPvkge^DYVvuLCDcJVm z7v822NRBTaFI!%6hMl6#qd*s5O3=uwTI2M&uz+`3n(<^|caTQ1?u(*US zjL(feE^c2}cdpy9yi^?P|BY88O>EoIfwf)NLhKi5^=;eGI|(_g1gaezI67 zll=aXtMXEh=xapi*YNRV%DErqX>&VQ2E>_mNTS)q(eE9)x%6yw^4pxa@-B*C9RMJP`6!oih(x319C0lKjZ z3W6+QA+kRMJp)5B)#AvQ1N+TWK1B(a@o#EX^{`o5rijZ+R!Io|*zq-%2KQ<#o#V&Y z#`Os$oXaFz3e%mxh*^$q7VUpmf3h<6^cF+IV60nPQILSIW_-XlLE0Pk-?E1BM15BS zbI*S&QO4(#FXRVpZNdw=+(p7$wm~Lcsk}{?4l|*75 z(4gEO1>-g|_Mf$U7&Mh-&+pblxu4_>ee7g7kN0Pd3EN+i9$Ay4c6unbvRX5@Qf*QfZ1_4{ObyBs_kpDSyM|0lZoc^EDV(4DWd0$4K z3Hu*LouowwpHDS+oCE_B(z3ESU84{x1g@V-C45SE?jHn|Bn4SB`tK&=hGvld!B*Zi zG1zss*RlRxxF0$BoVhIe;Yst@;-n^o`6Rx~4M$5$JIP6II;xYNw4r|qLTPEWpeTla zx;f_Q^t)wVEMxpKH>X&+=`kk~onQgX_m-6jpC!*4-(SWo(QMHxvB=I`Vqm~66aBcr z5Lvo(T)IS5qiRsg+@E6Mu zs!VDuyqrL{_QVb>iX&EnTreHwL=dJjJyql|2`s$qupv*D-gF@F{#$gYKkhV(Lm6g7 zW2?d(G7~M6fjTOQXZ#zTRRIVR5eNQ}Ah5e0YeSYa-$eEI^yv*g?m|c?bcNyp9BHD^BR~zX&gFm4j(giNmSRJURJ|BfPx8cgInuTse5jhpf z#e!3!qDD3u4=I%Oc$tCy+ro~~~^4{mXn^F0}C_*XRNvXfVK=?Xo zr8O4MfAZ@H53V|;dfuMbb~IQVu`HnmBhFJa(`>Tq7ZFQ5wZK@hOBt)JL|$*JTy+Q& z+KPwAbt(`J3>gfeXTOlCiMHqhpJ3n>c?yLiJpJSn;gc`3BG)CTH}m$E>A@JsJ|Et< zqR1p7< zS$`<)+j|ypedC;XtZLPrF1d9#F_%#DG18cdLXk=ai7Sa73mb$)5k3`!NR5$ELEE@7 zz$Tx%{KdvlIgP8hr+W6M;rG=h@@1C1*Z`yjSU~&Y5eAVgHx4HSl zmw>zT&FGjWd41`ICDU#j4|0?{54AkF&HXB+_8yZOfrXNE|2yTIqlI%X$Zew&9_g>? z5x`Rx5oKxmgQ!lXPGgTJ8iI=0Cy#TlZU~XV)y~a9R~1uhE}J@@pr=#0kt`z_sCnMD z708J@L3pLN$q4>H75ey9Kkvf<=F~085pLN|!@L)PoOTv%8&%;Pn)>cjI+Wq)Wg!qf z()=yE{Y+wfVnW2Qi6LPX!(aXs!av`sxbZQ5$&Etu`20j>f_|WHO{*Oa<%b zc{#|=Y(YWnenS?R0FnN%vzL{YgD@D<^B!9XjP3_993A zNSf$-#pKPct&f$&uozKZlMj!yTN0VjA@S4{&9WR?=z47ffqETpm0*&hKQJlSdRwx0 zZ8WaI2vYui@XHq2VwdnXobdAw!lK4#5K8pN2={XMJrz9zeL5r>iSdLV#nBXyWPHLt zhi(6#`q?C27XxrzV{+gqPksrja=7tRNgq})R@n$3#VlAxT;M2&tTw~m*UlV)BhNHB zqsn9sKfG(y1iITtt~(y-(XcZk$%N5?{js^|B0NNcz?r;q`ySl|Q9)nL7o=CsLJ-IO zdpVq5jIox})ol*Rcb%*3zJP)LgB3OFewe8xQfMGXEtr$~s6*LvA;qm!iz9B~Z6V7< zyRrseXJm|FzEWBRd*rCcypy^)$Jj|mZQ=Vx`3=*2o!56aCX})`TTx5~gYOT*CZ#Ku zAhZ@{waL^Vi;P%j!qG*c+5C6W?+gBlGGSn3ktot+7GEFVa&m&jvt&EGV~Scw!=SD3lOqDfX)WF%CDR*qS@ABhK2jhRBoZ+!2EK#ifh#-gXU1@HhR)gSa1l`vwa zBQ;>j|83S?$2M88KF-#OaQ_nHrbUTD?BJ5$@up?Z2!|u*cN$I$duY*_qvwXXVCs0c{2Hd2->RzY zP0CJM;qsq#Y&yh&2Q78HGGl$PBY7%|lA_83Z7l=#Eb9nS4-eQnV*(U(YRk83V0*_3ssmyZ~sl`e`(ut=w^c!Qh>$(MJS zUt9rs+WOP;BKYBi0zSQeKOljr3})^*!{F$Php@(PVpD>--QhCpjYb!SWuct3*4=p9 zHMa>^OfPSJ>lLTqJDjm9=jW7Fms+rLmjMOv+U)5msL=oh$pvtmwOx6HMq9#x1y+qV z$7POTGN+Qm-MR>apcEf9QxCeKRq<^*0|q5=DiZtM>sOa;LH&nG**o)$SZ~;=625Ls z4COz0_(q^VE3~|>CP$W9N(Qjk@_%2o8a(Ac(EtAgfK^rs8&T`~TP5ki+iKGyceNxD zP%1gNX=#?+kgEB6od=W>ihk=kmEI@f;@ z_CU#e&>T^2=Sd=O(1in_+o5Vx;|yzS%Zc}~=8(us*eXM=#aFyZlRSx`NC^|ooPkM^ zhac97rrPLJLV1BX1zN{2N#DdRf%{u6IWJqEcQBZ-vG$o~uzKah(^$e~ixu=B{I_z! zzIa5l@Cg2XF45dO?B&u+d0Bx@L$7;Y2`f`KPB7RGNn69xuN@=pw+IHBU&u1Ky0@pQ zF<_|Z2u7tRG5!_VEqN3M%`~9s;0KhzCPLT;14#eLs)tuQ1q$TFla)uqDL?}z1|Ahl zAlyMOX2QSu-iGiKoe%FVJ-{irlnDLf#n!m{1lS5qappUm(XnptmO{|YqnRJE311Vc zAFv%?;%Cy5N}5UL*rdn}++6rc+-s)^IfhR@3wE9tSK*fXG2@@TYu{KOYQyz84U;=}32*RWo@X^PCDo5_MoK^@P=QEW&lZen9?B3bPvTD_-tv2fP0 zlGr7H>!8hly*gg|OV)78@^8*#e5U#b@u;!un&*fxwdzyvlw^(vZQO}NC9?3N%s8o7 zwhVD}t?b#MUBO}$NpZk6Hz($vCrM1-R>sT@C0ZrFgkI!l5>@__``(nAyvrgt1uoDv zxsXkZcA8mB8RrXF`;WMW@0w<@vXdq3LV}e z`{+_;^({Lvhn2ev{;Va!7Mvi+#8gxBE6*#+mtOkGY%+ftw8k>Qrh)Rz1R>-w9h|yi z{{7bNT@^@PLzK1a`{X6pM?rCA$phZEl+Mx?+UD`U(URP-qTu(=v+}Y4k)OUhQYoAu zCmF0rnG-e9T72cQO~I`-!SK8vi5|Q+bVTgy2_fmwhxS3v1j%zU2NaIowL%Ly-jee`R@lb#s0@;+}caa+C`mzRTDH^?J!lSBA*xc^|QBh?(H zAYo^F2xH?@H3Lv#361zLT|ZoZLyx|RbTwyWF5I?Y8~sb9tg6!9T%lq2@54k)ZQz^>_Tu>APVxVWV|#KNCj1c=R^?A6!V01c~4l`>FM?;z5VUbC( zWpWO%Oj6KReb7`o(xhhDkcfl!_=1Z}G$03AfWSwZ8LcK4jK^v}rcv78 zn;O#Az_lPEC6&sm@7k^`o|Tm4mSk3u4*)3@3C*CRPYV9TaMQ~d64KEN+Z8_??L(if zyMVt$c31i?BVmp&P>Plw7fg?lq~7$`U@0L$ikI-|g}2OMSCdkNeM-DTEO{=Y%B-;b z{Cp#G_#gFG`)pd{`~Js`C4+F-=nI+-l3w=CSg(mwlf>wX} zk{F_tEM9g~{UMT+5Gp3LJk$AeeeMIs{2llR-P3SEpG$OXN+Bw|c#?!q4}jE3BG4VR z(xO`u#oLIgDl~Ng-fjYw@(iz_Km8^hENkz$xY*aGOZB_{+KQuN<~D{G(<0lT%-BeU zov)(|Rfo$nE9>zuRM^7KS&$?~M5O(n4Wdbx>hfGZ*!%86w(g6- z80hbII$~XaO?=_n;@wGqp&2guV}R6VTKW3bq4gRf|Lrim71p&l(-LCSk z7Z7~$Ez$`xxSgmS8Vot~VRL;-=|5{rJa+N#b@2ZzbpRs;7*9c!)%Jutt2s8GXbb{DP513Q_juHG{x6SS% zl&mm5K=zDwd=~8a=8t889lnOj+{Daf6rP4`1}hvXscIuDMq#n88VKF6@`BQYAa4tY zz;pdnDOC!QB{27^hvTMg#s+_CtdXdN$EZfub*j|LH1Ip%a>LSQ=K zr)R~i>^$*styJU=vSa=fWYVvWBq;ZDa+23K%bm2+*N;A*jc!mz=H{LKi7^A_1a}hR zg0MeXMwYKs$A)p)#G}Dk7Rm;3Ck61cAjbAmifpXfwF5YhC;%UT;e09Vb`wW#PZ>OU z;-h%gJ?lvM?}wNfx8QfQeYTH!JmRZe9Dm-!WRoU7lzkPu&kp+pt`CN5TMUJ_>FD&t zFYRlaV&`d_*;lPIjJ}2x2UAa7CXHK1ZM_9CPiN4+DSg=6zfSXySn)mg|1NbR3F9>q zTX#Gwv(9hXnE!Z98bBj{aKfAGmuDKmF~@i5dFR0aQKjhbE~W%j<@T6F;f84dFnyxd zye_k}K8PbeVRA~iAy0EOQHej?h?=h0-cj!-MIeRGlohR2#6qQjbg@~98Bl-MPMY+W z^cN-7;uYsurY`NOQfVZAa0*PmUu@AIDi^;TV~@bTvISFu(dWz5EJ(6u6oLDgAr>WS z1Ci6ZR+4GT(!QB;%A93Oed9oSX)}9wvS=Lg)aw)Vrnw-@zVD$<3Pi@`_KTOp-255x z2)RRa>j|g`+obe{0cQj$2=st}dL=SJD7$RgAZi-A`3L#0N`gk%<#qdaM-tXM^L<5{TioR-Jowfhj~qC!opdn7>864P0c6@ z&wk|*1ML}?`MI_s=1=6Eiu9Vb;3|$Ew}ue>+lB_?lLi6IYTxu#>qA!@EefnOO_51-7Nn5uJvTrN%+9MP)^ZwEdG_W>)Q zA7-8bfGGThRL|QsjnO)PJXy{^y1q+aaarP|GG?0P^$-}v9UE?k&t7l zI+zqm16A;%PNQm4ZjGJ^^|@DZKXNXq1*>!Z(5yYOMiF&M?CzN{BX(UtfGNVp5ge_@ z9BU*R*Y(v-U2ffRFoGc)ipONXaLD3W7szGWB9W~P2ny~sNd=oG%9Az~h7$u>(4M6W zXJh_i`Q=u~@u_X;$?{TwR-!Zu*$kx$elrxJurTM9&Ab3IQMyhFHaHu5-&QlHDVsC2 zPYlgeu(|@zN%JY`hX&G8I^P_67q8i~ZN#iin-Ymo40C+%N)Bc-`W0AccJcUk7E9^f z$xS**<-gI<`(INMGIqx9Ilcb;OGwDik>r!M|E4!EVGJ1emWb9^Q0uv+Dw`6EdFaD3 z<648!`n9bp^_y?(tQ~P#pmNYnJ^ra9{!g$N;AX?#*Kn44hxX8PXeSY4Na zzeR>lI9X1M5;{4A6L|4g&v1L-Sa-HqhPGIX)&ETFhA+54_g50782CF%0U69IGlE!! z$g_k85n^R!ZLUrp@^eFIx!Kr<=!^|r5iKD^-_xY1C}7}sys&aKSh%}O>ik84Gvvm} zs&##bZ>kC|Nd4g%RmMOOx6m|3M;SV^y7e+LIlU#MRktSZR#~Ed_}$id)BRvW)6&z) z7_Uu6bwaalfqGBeb9iOnq*)@T-&^D<3`MpHDe_FNV(IGiK6~5oB4Jk8U29^f5h-T8 zrjO#eq);}akKp-!dU)j zYzjI_%}Fjw53CNNGyh8Op7IH#o_ja5>mmULb{Qep`6)_n67^`zNsD%TudQ`V9xAr9 zu~&$F)T~gUk&S#kV2LYJ7%|T!kZTpsgd(_5mi%an$?7GPVHCHbi3@5_mi7yg8xl+; z+hU-$S7KgtH%WXzF99~B$6#>Y2_JbHKl}B5ryat}xT}$)RoJ%=Axf#F1(6o3L3I9p ztr@J-UvQ21Z64a^9OFK4pPG*3>=R2O#64MCNhS=c+BjOg#j(o4$D;t!K>>(c$y>VX z!!xEoc7|_$u`0FG!CGO27>plb_y&4vfGaReHoz)r)OPZQgo9 zOrOKP+T@sgBJ!v1r7V$H+4HQC$F?!$?4#2fEm0uMF{ak3UA$Q14JTp?R7a)C=I7)} zAKdp0)mgokJ4`KO=7cK(orfUlo~s|LPK7Q-$tNiUg3713PwKo_S@Xx!M_cB@5Yz#k zd@GbY-=Q9LQUnxgbB`U3rqx z)3emnl-blO#%Z)1r%>=quF2^i0jpwVhW0YEvI`*e`M6hu@{0jMTT`PnmeK;Qyy||w zL%2Lh?{{LS2J)1dU=7}G!wdhkL#O^O9>O-~vq2Yn=}t+Z@**a-`t+F^yp4O)%nt9X zhiE|9z##0ANBcgfD%MQ7CbC_8a_Mm{a#L-|{X)?DqItfa)c^PUnt*sedGRM|rhhFS zqqRBVJYSp~Tn9DrQFQiuIsj%@88U>kkqnGX&$PQu|K@MY4Pc&SKInhKg;<$=yqd&&IOER)UCDJ$Qj&Q6Pp{3K$ix>r}}fn{2bK2SYY^|;FA2AoMsn3Fz-C3+Az>paBF_TBHmgmjqv@*a((NjwLf zyeh^&kL6yb#KL0+Rm#KA&|IF5Vwj$%RJZ7uM;BfHEEr$wQ-in5o$5hf#-8@(+`NYy zUK9KUIuc@zyTnf{)P}zQ2ME|e`hQe?{-?uX+&cN!1)_HG!^#=C?w6Tx0O$_{(=5s? zRb$>~=%`x)jTOW8)Y%n?Wy6_5eGsY@0bAo4E0HfJ4xlqIPNH9g@MG#@C5@Hrz1vCm z<`?I&+X4CLux4?xa%@6K{7);1geu(d+fJpVXhLIoR}hg2@$fZS;>teth! zG&)jZ&<9E?3S6W}^(}OyeEgpfB0;kRjB`>x{-@2C;JyHZ30VK%bRSB2MFBbuetBN3 z(L$mms~+1t-%5-b3HI9qiw3-&dL>OqM2TPoY%Ms+c5Kgh_2d`*&vme6*VXS3Y}YB; z-YW0AXycIE;5u43yJnmht%mt3G|H`gB7Aey`57Q3>TJ}+STxF!!qFcu5esV+r+s(J zH6cVKm3+3^%rK92a!QF$fCYNXQj3<>VPkW5d|R7!M5#ocpj+N@2ra)(1Nx&YwAS_W z0xX2fT!Y%_&d+yz_#VKu)7TDH*$UDMz=)Ou7BR5au`cxKL{_?PYIGTm|GnQ*9-)p5 zO42#|&Ad!7$r0vgK2cr+Q9|9AC*4Q`iF`Q*W3}-vzoix5PUUCL=AiYC4&ia z6GTreTbxYTA42rG1{C3F$c^8L#gxf1k)>atH}Vx9>rUCTJC%DTD8?}n6%xOk78(WN zX4A(eZ;?2WLC~!?87)_2nXs-nHWAXR(d9}8z_u%rOA_d*xQbZA+Jm*eEA1Zr9&IrJ zbQhe5;DXn0E~1gW*rv*6OIM5R&+sqaMmy)LonM@k`qaC(cwM@UBJ&?igLlcVYJ46+XisB(2)Xw)}U>kq5Y%8+lAXDv9Tt!>-*T*xXhzM@n} zc(CB;PU8Kr%*x6dxSG zM4A@1E;k#@WPt40X|q>;{5^^NeT!Xc!;VN6r>$E5DaZuN1g?Hzj;;YUIwtG4^;YHf zZrw&=lP6wIRzq|jx|Jqul$84Sh{#!?n@Dosk83vNutspIn8Ie8q&ElV!bWBShI|oS zXL9fH%<^KbXW`~))2IulEh5nm)_rl6!Nh0IIBm?Ja~Yx)clnK2K|NO0SJYz(EG7+> zRXMwPsGyaG{6~HTJ!8|%l0Mbyp?%2r+-k@O4H=q3y+ko*6n+)_t)DbKZPV3){oM%S z%nxZ@omCh!3juM~g|<@HEjaWHQDR4y9gC5HxtF}u%YHOvhAJ=PSt4nW;I?|YP^%Qq zB)qrQ&?v-@qGw{#oh_8E_y#ldM9rdZ`#!tvFA{60;(DrX=y7N_DLnnKI{wi?S?d4O zT1`m>9|{?vLl~O=WESpqSt&$J)aOTP6RACR&?i~o5@En+}iV6 z^I=1LjnV`L{sk~$(_7wYmaE!{f8r*Skbe@fh0XIa3;z8C^n{TS7XWIcHQn=7I+9UO zfWGjXUl5`#2}&UI-RIT~6UP00FXs0t&@thI5Ofo4vmaXs&U7c98ycQ#HQMq{Kbwk? zL9a{(ZVeeyN3BB5l5;hexXv<%<|xP-jMt*;fEjy)B#sq%P7=!8k1(|vyVNL8L%08~ zbKXI+68*8D^700`)iuog#z|7fIr7*;>(xz-^sWX+Q`54sD(evb8*L;+am>m>{mZ;O zEDpFQNAT&2N%(0CgTf7fyOg;VwV8ODc_wu8_vVHOn<6XxEmgZ@Vat)1!wwxU-qXc$ z#9+vGcWY9L-jl_^)uSgOCfr^VMa*y7%y-D_f$V@1!(B&aw8TUA3e4^TXVdavyR(*9R=Y%LnO9k%8fql zeMMA+r`7;;8b{;BeV9^%j(^e!ojc^@1j?8*2Toa6*C)R$3sOhEX%Z5*&60A`P4D5Jij9Ylbvf4 zwf%Y(?IjbJ!)03J8~Bpr892qX0XV~tahY_KVKZM(dnc?s^lWY;(SuFW<8YaFJf^J1 zDra%hriRiIb5JqEI=em5;KKQklTeC|o0m?&09E}?I7D~WD_U^c8c$QL05%kqiCz$((?Pf!r=~-Y^XJk&TPD_m) z#MP)&TUSpbLuVLr1<}Mh4ZMkK~N1r-;%;^NhRnT4X9m7NuoygBav#0H_ ziPxrRF&6{_Z*c#CHz&8qH)d)ZMtXERR@T_kp93Cob#yW+J8w}X3Stjz8AEJXS?lDm z*&;m#UEB2;;afN zNb2R3$ryEersK!PSCVbPkC=oC2X2$%g7}Ha?PfCye`|e9KDjY zyYpNEI&pN{S(2UP7A5R#tuXX!3X(G6MzMjiu>MY+k;N4{rX7|UWCC5?h2M8Dq|53G zph(FMy5!SP`gv#*Nqf;Q0meDCOx6w-QmHaZzgDIHL1jX-lqpoE#0y2#YmXxWR=7V_ zD<$pGo=Eb`9)6TCXH`A4n{Pw57IxrG9Y{X_SMk(9iO_nUr#>KJF~49r&)MT#6S})Q z1)P7F(qhEL-5VWr+8P_R3N;)M1R3^_qocn|R;f7ns`_19&9{`1tswJ#`F{X7(myFS zZ3(b_d8l|#R1PVCmIRgtBAvAyKFQTkZe!Vt7_Ia8^o_r6-v?L*pO^V1EhZw%Rk=nI zsfqg2Aw^x$y&t(cfss6(2Owd5;jFdtVm4-Hpj4tu(8Saf6z&qK^Xsb>dFjfFLRW-J z1;f*O9I1xwH1$tLBjb4bOk+G9n#@G7`TxvSe4&XgV9f3~yg06ei>eqokL zGZOzOYUd*I3<{X7uZu(TcMLG1=LY$e_2}{CiTDu19P;8-24k1+uotn20E2RI-2-(> zN{Eir=~b#x5>|t&v;m2MK)631X7KNcPR0%XJ!+qAKK#ogK^EQJf`xOyz!MmH@*j6_ zM?$eN`ARIKh1n4f(fTqm?LbRCns+E%6`>osCEeab7 z-c>q`L0%`;23AW`TN9nh@&^}Bt7S`Cd$UNL*Iz5hbyxRhSge=83o@#LkP#Z$4?R_u zhI)qjqh|~h5cbScaKAz|G84eFpcb<>odp8%YgJZlPfArt#vI#CWmT&Y$U>4gpQAwc z$_s%#8?TLbOMLQj+VA8tCgRTp81-=1dt!0dMC*@Wa&v zB@cm8FuMv$>o?}!aR?Qp=6>rsZK}*HJJ1hn&)O`^GRm)ny%DEaDpK_QTV6vpFbA>z zW5cQurWrVQ4!Bb6bi5C4>a2u|0@AlpGZaLUF>yRLVdgvFBWPf6^um#Siz25g0k{|W zOH~yuap~)u9fk?KO|SdL{89P8I=Y=#U34L{`-=GurbUL~NNSxc|FP#ieY7$r>``aIS**OI+Ah{&Kn7{*P@4+HNS8u%U)fmOICRT9 zmYXWq_hM4ud`a#Z>CQ;+(x?Fb7`gcYstKSqghH;1^GR!8Gt#7PIYpU%cV2Y1#~nKc zg4m{kq^!Yav?$K10T^4SNuDZN)RTywojJiN!VU1(>gFYg8uTH*rb=Yf6|+dH5&`pN7arnRf&fv#}XBc~VI@!`m;2Aipoc zrbGG=4?^{&R13EKa5aiOB5oxCF0evAmIoi-1c@FaXcfK&gV{l1H(`JQUEv=d2a9lm zO%FT!mh8`eC*S4kkRHzFm6r4};|1Bg*Wk?8Cgz#?%b8)Vk+Lb(Aj|R$(tsaQ@BSfy z_MTL&X$~)x?1L{}V(cgu^TUs$1@wTH-}97-QeHli8xC1{%l7S7Y($Qy)(9TVbX^!< z9SLS^lraY!YDl36m1h0!prs#{i^%x=&^KyLCUt98Q)*R4)JUZqKMx&+kiUIH_l}c^ zK?D*v%2^))nyOZQY52Dg=gOld>E-7ajI7ke=^2?wr_Kr}9S#$tZ|RIK<*irFyn4Zq zq-newe5FKYFR*N4T+F0fJRy4J(o2S^a1!UYOocX%rCbe96|$TG&5DfGwb8?cl0X>? zbz6w=dP52oo#f#gWH4i3cQAVjozK8`qzSjq_wN z(Eqh1_P6ayTo%n{sptb$)4xbT%+SrO?EKL$eIIPKF0O7$*{hJx=yM}~U3Do$>ik5p z9;q4-s1*DO&XIr9koPOJaUa$7imPGS!7q6^oZYj)aYRJ4KHQ)!Os6K)msN@7LIRfl zLR4II^V{QT?j6MkRC4AJ*itxaqMzb2If(}tQ0Wpz7yK$Uj-|I+P$^agQtb$xi+7$_ zrxj3SFKPj!T3M*xg%z!{y2Uj=;n~uXrlfuBD7Vg<04-&xW)`d*#mO8C>Q4sSwhL)J zL|f%Kq(LRa@zGTD5*hGd%exp?PAUufV^=Jc>1nmAy|~5cz$VsUKK1VU zTMhl$UA5oIO2N369)gnSszj-c1Dd70ioXkgqz*~kEBLVe^XeGw^j-{MFq()Y4Q;8z zkVpdNWMx=C-0)vNeEP20-gy&tQtY5PZPvQln68XKxE@JSnA)8H7?ER1ZEqzfaTLQ4 zQi058=%o(2DPkH484p0te>3F9ujo?-nmoCugML9nlIyZfW;WZ|dHFDD4BK`D;vQr< zgvkWt7nXRIU>HN zY3F4Ooe})D#MC{OuHC&RK@nGt_I9VJJ4EI_FMs{L*IUhI81b>C_pmbs}0#FsW3d>4_G8IcTjM9SwXAO4vix@$> zy#~SW~wfIm?(x{yEA*P;*br#Ozt&0h$R!d^e3BSUdhf^G~nh&h4G-+PJ|?%#0cZ0;|ZHv1C7uv`K2o~-D;g3 zCzi|KIKBV+sUt@av>>Bk-J z+Y_GPu8jGm7S;0)!m>RU(}XhfMI86f<zLKwKSq_*zd0)*hbaMumHda>MgE) zQMWOOp)g+A@(~cxNO2-csv4^hFR7e-JtN-zU<(5&(qYgPi=E9R1UBu5PU-zC5)S`C zOy%6TxMOwf-<&b14KA@475aa;yUoRl1JPKRP2gvU=f#+fA01<5OEh-L`G+QO&24>@ zFELu>t#=kmZ~YL8p-j^4@}n-y#8Uj35)rW`>?3IljJQpVDIvVwt&67hYjkTNGpqGK zky7Y1&{{Xx)G>v~HHf$V7gJvy7gZZ=O~=sPH6Se^2uOEH2-4jlEg;?9CDPr3gmlBu zN+Z(U-95l}@V)nUKmHtGPCaKod#}CrS}+#H#VQNDVV)is4Rd*>GTk4Qn@cUldrTDz z(h%BbQcTG%EqFF!)L(0~an@91R$DjA_l7lU>lnO}qM`Rdv#+Orc%!RNqrCXHcV_ex zGV&0oSR`_fm18Ud!8IWhJZ1vwwaVd&NYukIUucaBx~T`4t5;_@fawlxF|;>l#@dTV zgI!ObP(*?`37Au)$DvA_k*DhiuXhIr55XOsc~svMhBf4W5nQM$P@b4R)Ml zkttSqxMTAV8-p16m|EJQexH(Zb+R`6M4AYx@BTrnK+Jboy?K){w?DOlrIvp@veDL%od9b1cB+3s~a zz9l`I?d4Xouaa?g34xcAtHsARooompsj%hnYVvr^;b7a)4|a8J8_NUb-K?r+4+euK})yBGiPjj~qc+I%L~75fuekHMY;PLkVbScsi$$B@;*=DUlVhhF*Z20B_l&~}Kcu3jdz4Vq+g!o_JR~9RvFH*8zsQmc$ z#6mh-I~q9_9*t!$BMqHt#Zv>T<&C(M!Y3Q7)g z=J4j-tqAc-lVzAPfmnbcAtL3dA)NDUIJ8&(gP^bm91;|SHXf=`ca-x;p)9HgrrX6^ zz2+?0or}-(gksAwz# z{G;M=@B{m?CcQbR6Q_>>Fv(Hq_J~$cSY6DR_jI6``QIM;4=q_8|6ad6kL%b_(-Hfr z>=)VzN9nBF++ul1>8_#}8tLFRT53KVui+qVlHvt~m;Vt{+*qeCj!ezyck$(%e~MVH z*nGO)NHT(HOt@qQigeLjw{&|5E)y&%MHpUklBA@#n#)D13fu1QEoS*X(0T$N?gaj0 z8EI;2T8K14an*#dbxr4=(mZKNnV+b;G(VKu_^XB-X@&99c6%vNV(`-7m&p_mTs-tk zARQ4Pg40U7NX`~jwxsp(z*o%3!7eWInG;4AA$eK{viYt-1{>GZ!dl^#ud&Ul5aVUd zaiR+yJ(>eW=^`Fw;=tIk1+tssQ?f-UQoRLYyjhhgV3g1h?dle^kXoRJ@L!>6NV&|%9{EOaLyhzvbH8+IBueNuc^6rg^G}f z`p1TVHCeN!roo>tA&P_G6gEsZiw_YH<5xlmz3#zFOU>l4j!0xyO>(T~P#BQ(mr2BJ zuGahblZ3xezAoRz;PTz55lUiWS5j1}ZM8T=IvP{|W2wW7S3q;v0c7hWtM z0Lt1pajW7%ROq6)@9l|kgye4uqbiFM67!6Wjpas}#Ahbm!h>_)|58-I&HK^uI;jkf zUkL9bXT;#UM@og3fyYWz#E^sT9FwUEK*JT(8l=qeh|~7(C53%q;OE>I8Fr6^xUDA0 z_z?mh>$+b~$ZToX(=Z(p#+;`Mmj4G7AWVlA0@+^4Dv_tHW|m@PWTUUKmsBKP_&Rkr;Xp@$gN4MkmA12 zu>zjz3oYQk@2P$#>LdYbnhioKNlj2|gffS&`1F_iIedS@9Va%7RB@l9uSLZI4 z{hv<&kioxn1BAj9=xG66&nD}yUx&2U>bnaQ87$An(4P;1127)G`tJw~YmG^)IX;*m zP*sTZ&~MET-~TmWf&o@55WH~zNg%Q$)j|%s8NUAOkpJ%`GUO?Yf_OnIi-%tNz)Ags zgkR1o=>xrG2wBJ*USL$@(ENW6!ujd5MzGkL=jq6Z0{+)#?HAl#{J*nA6EU*6JG=ny zA|W&fqv+q&|IZDxTqE+A0)Rk0&x?}tCoXc;oWqw;Iui)1KCY9H%_S2+m+sxw(bZl_uGN<;U zKB21J$NDqVIceJg#Zt<1^G)YNJCMo<-F6_p+z_n3wK~@1#fq1z0{~qrSHakS0FwqL z#PZ@jgEMqC1AYAWQWceK;v39=zf4#+7^&8bB6M$qm#;w!)OI)7VJjie`vuQLG$2^! z#pVx~;|Dd4#9YmSn2IAt&7P=_ltfRY1dnX2_w$8eOGS^46L#>h_ivO~bC39|$&1On z{i(&pVkr3)J`QvxGE*45Q(71w5@?9gs6vY(mR7|Y!h%pe?(Ljyx^+Z*o%k{ww{FlZ zdL7y;n7wiFZIQv=)p1?IE0B%|ylEehx%F7(UkDFuI~hDmdc0k@U7fkD#cW7|4^`Z0 zZpNBhUTTXQ#mryz6YmlAgXLUr+K?|30qC<>yrUa;aZ|JyKi*r=FroV+WTglRM~OM2*(hD+lQf zN5s(PfH|omZOM|Ays8MR_%{vEjVeFdNXLBmqjkF}uXe9^^T@wOvzdP~CR_>ho7aU-*2sWSQMtTh->jTw^zL$z421k!_H5XoP>yfEF?HjmHKz8`qE6BxtjW{(!|x8!>65ezBN!#a zjri1pZx-OSUZcUQA`gc~Xp}`&LC;d0_SU85LMrz%j`-5;R|8D3)=E|$cu%uiSK{-#0s#`1a2lW(7B^wa!nnu?HE`+VQ2TJIqUsB*bVS50?OBXK zK=5{0c?i^0>~L0LJQRiUW0vULw2;0q9`jYrvc5!IxLEC9KpiV=iAwgOgA!d7?VT*% z8F5Z_o(~3^{S1qLK?1Sweb^HrP5{R@mbczl0){{-Ta~+TLPL%Twsqg$@IQ_e@oz_6 zwD@y;sZ!P``cRj0AdVAKjVPQqrMly)Bi0Qjv~QPnD?6*nXEwHQme@e$+TKW zOj3ej@@(<_O3zR&JJ|18Aapro!bA_RD)gpkr_~5jEd@8rl4qy;*qIIPIwgQrVGKpG zpeo3rLPyGY@J!|v-$dU6a22Q1uVgEYT^E-$GEzGy8!_Ww^;h<{Bo-v>B`F$17cA)pxt=Z8=2_Z0T~3?x^z2z%@Ut{?_^9 zFC;jkCw!2_4?Yom0CcS{@f>^ox|sc6Aw}kL+J=EiXmI}~gj=mk8L3Y0rCYi1TU~6@ z-Rr%{k`%-#cAk>cre4QG>k226HJ>uk!JOoryCtWo4{S%LSr!p$$s&BZ9P>lolE#Dg z;tP-+)gV(6CAEen{f?;H_i*iXsl)QLuuSGj$8+PA6(OU$;^m7}g=n3NJ_>3LT}krY z^C)6tbpJs6@dd^de7VZ-@lTKLG>rvKH<6}(@*n8Pi@_NnUmdy=HzCtE6~<@ZW9p8^ zpC_9G>u@~;$kILEQeXfDAxzW zi523~xGWUcfiW&&J&R*4S|vV7#(xD7Nx$(o5)2k5n5FVKwRAMv|6*&)x*qp@dr|u5 z*Cl*6!+K;64e=jy@=xX3QY$|t->&jPooZrDv4-F$lGkRQe2k^U)DYWR#;G2Cy(Uf` zcrxwm6lOf#EiAocnf20+>C6xXm?D8pCCF=%$`9(qn&XAp*m+R!;lt^`-have%uD|u zUQv=$>=%SldZeX-7y(X#d&asT9p`e(LyIhtLJmUXmnN8QBH4V5`w zqxG{Bk%+OU?wzu6-kPQcqy^H3&C@S};>j9gKXM!OQ)Pn`rMw47jreoK=(D7_wfg5h z)j`%#*DIxs#^aPGWz^!PI96pGl&P&)RDPEX@o{|<+$Yl1#N7^lPb-s8iZrx~xMsaB z*2$im0aW5|2=T->?bB;ybo{E-a~%kuNB?@wieW>??CehLnel|>94p0GrAxp=u$5vw zUygNwY<*Q(vZcZs4J2{9l1|@AHkDFPhNruYs8b(`FEC z77)BcUpATtJuGG!;zTLU4)&%VVh><|AeD-WmXM%Y#FP}BDFy?;EIO%-@L6$rWbP;W zRDjW`Xy6~*foYsL`tWh9!q9QoOZ~!pPQm-xxb*}#Zy1>y3h_54w)X^r7o2oNu=9k* z=Mqz2lo-xs`!Nw=XT3+R+mVmos)c5)*`BvMXFXzc0;9vmDYL`6rPGDV?Hq3SATJ^) zSJxfr;pRls^ktb}3%-E$CTGMc5Zz59?xEQSdGUo=5H5OH8RJaRNe&LNbbifw?m%EQ zE!+O?n&$J3VUJ~rMSLDyc1$a~o}i3mH(^ip%+Ur(m`6*3%ThIJDrgH97#{T|E7?%6 za1KvQG!8@hsmMV}Cq*3qlaQz-7}79TxS{H#=y09x)AUIFeVQ=x4(N5k7&L#z@jEZ+ z43dw*N70bkb(T44!-^BSPii_^WnIt17Qg>nm_MQrl$WR7agF9;FZ8+1i*vrXK1uZU z=zRgLs*FhEMg0#;FXxqlsrX7)R>Y7cf87M(o8~dOhX^p57uk12fNCOUjP_Dy+4B=u)rsBT8%EM&fOU})pU$b3yuspArk)b67iT*fr|s3A%%6aN+tVs^vk z9BV`3w*Z@1X;&H5^g9n{NJ6VKWTHIikFkG8I>1;h#h<=n_KJ)Xt@-J9rsW*Tb$w8fb zl><_8gbp@!G7VuazaPxZXUVcveS*(;sp;?hAJ-UpNTs}=FXhI_hyBFiyTTEr%4A7Z z%xIm!ARZhHygf(qGIyj2M}%}d(V2ak9C8^}Gny=tAh8_R`%7&Z8ISf5rKMm~e!i>q zC_cAqbCQH#b0ERU=^|$e?5WkSJ6t85alcaxy7iuLz*sa!#7*?HrtQZC316s28nN|y z4E^mw+PT!UzBCXWkq7bLw{&8;l``w;#t?_LSiiJD0TJ_%7x((f@=EE_V|_sP0!Yk( zpJ-7aDKj#+CQ1H8aCi7^XwFb9g^>8Z#X+}@-{JSZVAWt)G_|)EJke0*w9)vd_Lw}e zlz}o;f~P;g#@Rc|n*IX`?CBqlK+vgnyC0B?f1bb*XC7k$pw;ezQ3`xw&!w(=SEb~Q zYOnLv*z+gtAs9F+P8hbV-b8zPm`nB;J4!iHjq^$9p+=%foYjK#$q{k=1#vEvU7!fY z@Nv|EU2Zapz^u9{xBRQGl@q%tFWC7{i&)p*8DAEgI(1v_X z52DN?14M_`?r=*J8&$xPzH$8{UiiQ@rzMOQduyGtX0%u zZuEEqysfRRQC2u4Em^8!CLLvh%7)u2w?!k&F8vML^f&nxvfp%NBWgx&uQjEM+4_4pk0$)6?!>O-t~h_s9+Yp&F9(PiqCgpiI4A{*uJc& zJHmV14x(<&k8Mnn#|_uC{q;I^Ur19QXTFf88w#Ngg|76y{q(tmQrqYU!mrheQ>1R8 z{m#qxorS=HHwHGc^M7g(?k}B)@ZNlbgYjyV&|GyLIm=aiRH2fjCO0YJ%n#teJj_Wtt)IRk zpn-+gS#rQcDg^5l-O%DLep93i!4KAFzK0i!-f>QSt&rQKL-Tn`{7s5vk+xXTvcI{r z4Ia-Ua^^(W3>W$Qrnxu*I$ePZ$-}RltB8P=od*qgZ?c!+kbEZ~zw_UTldcy53t1xwV)e8T0cEfNrVEJxbn+1@c0MS{}zK zY=r?ZNnjvAQx#{#{bt!|)qwm>bC^GG1pEz6i_HvK5~jk}5Ttj_gy8WV8z;b>JG1}@ zyPsE3Gd;JY+CD#V|C)Ytfnco+3JJH}g(qI4UcOlEaX%3d39o_jeczH2d>qDezdmHr zGguG}l7+!bPymMni+v9TD$plB573mIOzhh3nrS{;YH+z!Vkh zKjT#6u1ylI@Y!)RG7qiK%QCxyw5KVTjfkb~pCtEOhnI=(`BvK^$!~*u{CpPpG}7G7 z9OyY65+ZQmFHeOfMwgZvsIJ{gdFVoTgOmZgk=uI1hqrp3PZ_}C=4?C95=pY04g^rJ z-0BxeJ6N{ccF{KuE{tJSsAB$|^FlgmSQyQ;8^Ui*5V)LWu*b%7&EJ*swv^g_HF8t9 zycYtaw(Xw`f;{+!X&m1j=A`v#@kd3tWP(hQ>kY_KDl5SX2*%Me8T(>dF_??82#EKY z6j8KEBt65ud5SKYagIUFSklbk0vY!q1!xy3CDTh@~9p zzLVf{dJqc7Gp*YQ@7a6f&VL4B5wQD zVFCg@vhELqLbmGr)jrAJl(q}Wg*J#GydaU9o8ga!{a#P~>|(|zqV?1O+Q-;N`m&)n zn9GZEHJ&)yV5hPgc?n?lu#|?ew;WqE>439j_yyP&v`5}}#(>UM;M1C!)b`E)2)yQg zppX+(>H}M;|E=zDho!O6n0F+%ff@OWgE8Rr&>b|JGgi=6Cm!<5kFF>qm>f)i9v^A+ ze!HuS(jNiTYi($)N`AX|RKuC-e9`nUo3*RsbE%*~b&rAwCyPBH+ixl>ozLN-c+Kgl zC?JH^fK79xeiJd~$Hmp{6xP^HF{*JD-m@*_95(fp+$~A1Mmc~fbmO)eO~M>cdv0i+on;mo3MMtl{yZD=KI{`bN1 zbP@5ZX=B#~^ofiO4Nf{_T{D1H!Kb?^x=h+Y@Gj%wm3lYDV9nN*3Z(k=ts+3i2>{YJ zz?`aFqfiK;6HyN5r2r2(dWm$7Nbt;hcC*EA>EN(`cHt#x*%_H~A^!}C#uoD0w(ynJ z(jAPyPQRLFaldah3{a9ff(G=Y4#v3Ld0u z4R&Fo3X8buP#DNs^$mH%xN|(dKTrC>d*Y~KPT_Yk5fMygQy#$>JuJUIl2l!OGd(5k zbG?uN_uo=Cc5*>j~w$s`}3rR4!vMW}`bfP#ULt2~LdP@3;zgBiB?5z@xkB;k^@ z=dN1WwBp!J#Fp=a%&$3fWjZ&0TnboV;iv+6fklF{rIDjATp7%W1x1!UnnkA!V(rn@ zm5rcv@>dvZFOVD=?gL%o86Ul~>0kn>ifj?5*h2)K7KI|am$DO3{C2@fP=p?mc2TL_-XHlm@N-4xnXOI!8jWERbL9P2Z@DRAhM0x$0)1O!G=9hz z=E0s+$6`|gJ`^6veU}w+nLAczBPr_yE6Z2+Qf+lc7$d(D!#LDvQmv{rIKKmfy`U4A zif^~QMzWZ*gMx{NhYpR+LJfHM&wA2hLl5(F#r!}ZC5cv&^4uMGW&;%H@a+|EU(4p) z{{d7cU$&TkS)~DgHE*v>cwpVBXry7xAB2%j z<@L0hDKE6IGs2b|Z*JJxn4FospmGc@v=u0JFxx(uXq=Dowk0?>XvJ@oN9}erD>&MO zp$+b?Zxlpo;G#oWwt-*pO=f_6q?X3ZI?}wOYB2S(2m(mpu#E+$8)pMlGS+8ppQKu( zx_{v`Qoj%;nZp6HmDn07z(Z{f7pdt`Ni_PgjTu*22E!WL#8!FF^%xjN~zXiHO5M9*H*IRoccdr>2)A=s4 z*niiqd@26_9@I&@cfEGqFTadASOeKKW#Wc<)`(!E_W>tI!^*F&#b><<0(Ik36;5Xi z_G`s@X;|e0(M?Dde9Z~bR7Z}#h)u*$3oTs$#1j06iL5Q zj_S#3DRgPYi3QZYZ^rW>NfbCuL1c*z$Bx@X?Fs z2G#axY$^NWD$)4$hlF_W{24v*T?k(ZL2D1UxfitnwlZ82f(k0Dmn?$kZa8E-ap?VP zU;4@MR{*8**Q%oIIh6tjw1FXP`GTI|ZfpjOG~;ROHAZ2}gwkO7Y5PlA7LxgiM-*cW zGyc<~i%c6@r41HU&k2R53Q|ZTe&>C8C-c}7s1EWYsN{LphW+E*zp7~_>8e+l1-Pn^bZ^^Uc+3Pj)9?k*@c9?QxL47%fVq+}U zIJN3EEjKf;YEHQLY^3})oEJfGCWnVpsOkHx($pAQ910xD2~v~@%RtUtpDRzHcgXf3 z>x4`^V4Eic>CtDMH9M#gF1V_DPK}Pkd)eBm!Or2ZEJu?D!PtF__XPeh?^jpw@R$Ly zTr%Z%R)A+u038mQTz{9`?1*N}tt%Fz@NbU(Ulfieb@EXUNJctoKR`~kgRkVQ z2xv`-uKo4T`EeSd><#xn4+D(m0MkN@NA94&Y}=`p{JdiSX_?tXW~9>>M=1Rj$GxM^ z1{YDs<@l>MTYs4Lo~e)U^QU4}VY_H-`=$xhLz0+qYI+w3`~>IFRYh0L3mtbrAYdWN zOmz~bd6Dj#ia)S<&O;JHgX(YEm$zh}KXcS|Scil8X^h}K2Po43zPhE33QSu6 zCOrm_!PfM~Av|#_R2-rV{qatsDzNUYU1Cr-&&5xwU7k+?` zn6vU#HpbkN*BR#>+=f~pi-D1maNtH|hWoH(2=io#Be3l@ zt|QD4a*^#0f0Kqwzm+^<3nYy5z`*qwcUm7s=6Ls3VzXz-0&)#mxiztMNKc$gijY6_ zia)CgfVpo;dFZ0wGbuxfQVX!6i&gL?uW=8oe6_nJ+jMO4fRoc$Qarm5j)xci<6(d2 zEuTSlUp)#W+!Ow}_7o+>FAlJ~cQ0Qf%X;S=5Hm_XS?qYRetw}m$W;#dH4F5Gb5G|# z{DQD`6})%7#fz36Zc@E%&7S1orm!(E#1-u8UMdDhH_V;#saG}j0~Bcl2qn6%gW%8j z!sp+_u!Rz&fFz)5-8@wZSVXLlMo!9yh7$4=!EK?6vk6C-_$7=IV>5{UdIyrKgcwiG zjeYbL=Xlx?aH;58Toew*a8NraC50fm4Wh&lhY5YGcRM!d9{V9@Q59##9ay7>X%mHZ z+i&K5B17YQ{WAu0LlLUA-Swfx&c(V9+ zw8p`*x3o%+I2FELm`MoNuN>n#mBto*3(pSV-+RL!cLk3cVs-9Z(`rz`k@|eo*oWuo zvgl5OZmZE8`2%#pBG>EO; z1BFP+>>MW7cVZ)5zjhh&hN~HYCz}Ozq#P}y3ujsnc-$@vj9LOd)9z=_TO;+JtLX0Y zFgdA)9p~}Q8f~?ygR@2S{u6hGZ#>LaU)rDu^l&uHw-PiCAl1ehsgebNSQ%z5Ks>wX zS%?v{sM~L6FVXU|+Fzp_@Yr+8Y}(+v9#LGwdfsF|O|F8zBCN`mDAAT^ZxbslV0x`s zrDOqE7ST1?rIxMEFAR0>*1dW*IH5zId7H$-*w%MivdvgL?2BwJr60WjF#tLC562<3_>V>w`@ko zXMh&B?fF{X7loYY4E7!SiA~ko3P~*fG|;)t&gogZdA8Hjsmh<+Uu_6_u-;nfU6h1{ z_dLF8wfbY@4$WBUElW{a>v%ZsxkDq0S7bOl^>G8^#CQbQNvh$VAtYY%Ij+L|l?x%J zqLOIVgB)tX>d)WsW_zc9jo`YS%Ymu{n{a+*cHQhRj{i-7eY&`o5p3+WU(59wt3i6| z{>J@wR5oL>oQr9bE0UWL>fdF@qG$GVLOVvK{jST;LJ%%R9q~U5#nJSou3Qb&-zlc% zgt5s;Llwt{dqSB$qW$oeEjH`u9RfeWY5Fl9TBrMU>{>=9P#ezijgSW@4$hAYta@(3 zcHC13n^6ByY-5?3tNEp5DD&nc-}zH#L<1qDG24UkVf$v|AkGi(X2}(qbnC6)#iOO; z9>g4<_m|qNrdiGoJNsHu?ao>{AnuXVn>Bzb|63(t`MCy5C{#dsF+z_%%D&_^FzJ`o zfAjaRBXrgnI(|r(?SAg%e%wrSN=tQz^OZM}?{QM@P{14EVF!zvl(X$`VRzR@Jn2Em z=$_2e%;@;o8disZm z$KP_KNC44jO)8PLl3))I9%dtcotjpPfF0iNG!W)TIRxR(eptSFr3~0B8g!wJc_YX9 zZH*;_?^ZuxcDS8O)n(YtOAkOVQ?cPo940*un%rFhRblzh_BcJFWqJc5cVX{_M~)@( zK6EuZTjGHB-FI3k1FX~dymzU8HVYCGRfgZJk9Pxht5CvhzkPv0NpN^P?i#KN@#)th zV~fC9&$_nmeI?4q4#XdkdZK3Y!jB8N*B$oBQ{aSO!Wv_Ig~ z&D*IU1qS3+G-L}olMck?i1LvqZZP0+-p`BH>-6{D;+K)s@?8-5MQCRGfsQ8%P(o>~ z(g1#mrrAf+u`-3EYX1G)QsoZtKG=_a)o z=w(UF`KIg5p_0TQS)a&rNhf1u09vPLN_$Tdh9H0Lp6x}{IsG?x2=d;{t1U_jLzhY>UnDCtbpo{k+SI-)_bGO z($U-rk%gsvmRBIczA6kQODqT|xza!g- zbhdXu=v0|Mx=tPj9NYF}3oNWgq5|m8^M0595##nVD5Q}=OMO*@Hk7KOq?)(-;-mtA&B%gY zL^wv}gw{1@SwB!%7tDc7FN!IeeGkwH{-$rlh-^W$mdTWO)}vu%;1=4rElH}FWj+f| z)lq#2eJ$0^#rt5#uPPiQkbUd^(X9j7^df9^f$T3&6eTckGES_9zGPw8ykrO9IT@038$7GF*7;641EmCr{iGcDB{^kl!XB z4D@s$)8%L=NN|OeX4QW7xMY1G+?sGk3LMXEjiSe}YZ_i*q}TZ0L>qmp;HT&&$4|QR zcD~d`>J+hVX!D-bN7>!s`{G{jrmA5qOo!5>0PoT7Zsx#e=!?_3M(JE@*X`dEA5xHm zGNZcjiOJ52HV}t+kpQ0#v|SqZRNHce0gH)aBQyTE)+*;h3!f$NftWI#U(s zsTUO_D-0`Fi+L?$1xm=e0IBlcKPiZ(B3kdJ>!;t!)zb^I`_hLl1dQQPc^(S6Lv_cD z?CmjeRPj45i$wa6g-?Blhlfo(El4#hE7u$DNc8lpR(qlNpgrO@bFAC4eT+XUP0kjz z&v+#B<#t`NQNg64m3i9b;kZdG(NP}Wmj*FNKbwWc9NQ1kG>?m(zJH-Z3~9t|V}3m7 zd^$M4+7Z5a=kXM?a*J`7j+ZrUR@g}O^?P-r(~nNTllaimsl2CiJ8SiNsP&|S>}Jt9 z&ihBMW5G*ygF@zXM&fR&BINcn#r81n$wg+E4YL6^j?=b$j@d$c@~j<;6bd(UH3&=4 zFUIrgJ%4Sp95J*&ey!q;?ox$Pukfl@p*++Q)IQ>a?ShTAb*V@mam*mE0Z`jdL# z#PZdtPg9h}!4f=X$>|-rgYEBx%PaM&9g8&JZ$B>%9iu)TyH1B5Q zn~{7qRuf}uoW>Pja+x^tz9eN|WUBjdWb5a*2of~p?>@s+v9hi~KB4Y>`)f4$?rB(A zc~Gq=*#v`?I(RQLHF?()yT%`XIT%!Ufo_iJtq6GGtKFq zQH}_lDh|);k%TqI+;C@xZ`voKYAahNakb6B@ie|m%Qoyb1uZ)P#c+nP#iftQIt;zN zpKN%DxwcxoZqv`UXs_6uc%S-t3T?%vi(5OsU(y!y*R3mlup=5|RKAD$?we0uXRAVP z-5i19hUm1>N}X6EPAY?2VPcN(nXT@KdoJRTB?V6rTu#UH%U6I>IZ3h(JO-F?qAw%Nsi(}>Y>JE#QI1e`t zvYuOQUPO2x(en1u6ajp-rAoCqbD~*?u}xs2fS$^YbOVz4y$T;`fc5o+ePd1A&(PnR zmFoemCj=Ua1#r*HrpR4R*nr4m$P>)XiVwr`-6}kzS>ig^=}&TM8iyKLe13|D{psJ6 z_Fv_DUsW6d#x=d2_q@+&#q=fd3MHpoRf4fmHm|%7_D)QnFX=|v2%OB1bQ5c-BPKKD zVB_E_9KO~_2M2cQYO#PRHWRseuG!&R1dQQm3Os%TfYdJ8FHSX zJ7=?|acg%wIhuYl_y%|{T}lPQ!qpuIU@$ksw)cmuFNA}G|AA_Oo_5I6rK)M#Cq#To zNL93Ip|WA_XD`7QquHdfWmtz~{TuYo)A`IMt?MBhy`FmhJ_t9pj9o@4FZtJivgl)l;H7#yjcP`#xJQzoD3n!{C^k%xXWYqCj0-a7RY^JbH zRKTK_Az32Bn2?Et*cx&~R$myF!H6(`cdZ^{L0R#iey0UJw8fENopE!CXS9dg9^DtrMdOG==ot6?~V`1fDEuS{1I!g5e#jP@<+OCwg$A4CMZMXz-p?tR6~&Y!F^ zpWea$iRg9l6apK=pJ`rNKc89Y4WcxPT6@BD@S3p{%`HKf7o60dluYwduIyA+AU#c)dswupe78851*jC*LPxLnZwz+Z=zR*9- zhbcp}>su*rDFJOjT^@W6C$zzUdW^6 zO6MxkPg{-#iYnKkiNldL1Rpan?uoN#q38`4;Vxf;X#{TfejpP2L>j6Dw?M!m6xDle z>GSfNGj(klq_E`xjWc_HAWb`xXHz2Ew}zSTc{)WwE6LNw03MgY~>kN z(D7NQltmj zlHAjoxFo~Jz*11#{hdxxVR=eM)4_-5o^>!Ad%hx z(cXN1K4r(yF-#ccq@XQ^t=pCd2V}{GBLVk2RN8XZfJOCbReyKt{fhUwz|Sg4FlMa- zSiJncJ3gRZ9;3!;ke0~6y?fl~sEq7p$(d;R?$`~CJ?`Xcj&Y=f>_#OLEjgUK(VEJ- zX{=L>f0-u=1ItgPL6ixz!#13^bg)k3ch=b*=eId06_qKR?*x!zzJ~Z)c$liV+GQXkn9#4h z5wyurI$QfnhvC3f$s4L97SJsc293*O|G}l?d*$ZzARrR`B8C9H3Qz~MIndbx|E%bT z*ck7u%(W{JyVvJL`LS)wbQc1yx6eW*h+S0mzc&3veoF?5Ty)&DJ7n+!LcUbqejcqV z^(J5a85i|lQlC>4SfVF`Eqg2im{143@zs5w51RNKxPveAk!K6_ScG-(-nhywZm)~Iam!tfr=crQLlY(x24#iE5p{!45E6<23bS-+j1E#4AuB%l~>+Cpn{8~Cp z#-B|je3{h0zXhWoXE+ z5Aczc!F7$90Hd@Ghq3m;kp0%c`5^(Q%8ohZ3AL<^s`nj*@ZL1Ca?-4h-S=D4llU%7 zUg^N3X`McNr`X$mA*PA4rkyI(OGt@<=8Re5wnvbzbi&EKoCyR6eQw(LNBLot-+pel zmezB0KC80PMIEJdNek|ME!&IaUx`@$p5>%Cf;s{DTEM6jyGhN6p%_g|PruLbX?kW& zi_r5DP$3zY5Q~o6Iy*)_NmhS!pN;=Z&k6fc%;$RYmG>Pxl}ynx1ewZ<*Z0xzS;T33 z_?`Fa1mOFMd=i8sYgf!DAhAySTG&PLo4LIwhd2$GGLHkDUz(a!Ot3I&-SH-3qv@i7 zmXUDgEaQ2=K^$#=c2;Wu@=4_nzF*|W5=&I=)d=N9Vb|SNL2*gRrY#$;de2l%8cw&K zhq`3vnypu`p#@g;jSX0GfOc~gvOynZ^4~iu$tRhwBcsU2zU&DrOpMS~j!&O7U~NIX zfmwc*H~iAmK}ihCzbKAzK^|Hj4C<3<)s{G%)V#S;Q4iX zJK#$$ax*0oStAZBa#uqo^#K1?aFFvfqVyIu6zxhJ*L|hkAPnV~Xd6aFAiC^X^=>^3 z6Z)EaKbMujw^FPi3(a#H_w~uKCry!%ft*|ZZ-j(3sy1GB8jFNd4<}Pj-zBwLjv5%T zrnr)bcSCRSZ!cvNEv-^zc$AR2a`$<19~9yV&|eh5$kRUv*`l8M4JP}vaQWbj>Q z=pDbQAA7W!j=h9O!UHHgOp?Y`N)0MaPI?C?6)RtAXIAloCg4^KuU6)K>+poNXMf0_ zdOoPTKfDmgsFN=>7KA>YYXn}}v?TCw zUw+O=Ac@J#`PIE9o&|EEN3|#kF{q_s*~bgBm&u(Tn2>ld8s8VPWZzNc1k5xxhVOp! z-9eq*Z9N$ zs-~yyI(_53=nf9}okrjz?50~KW{X22Z%<4DSnpnFky5pgj4Gxfcy}i1w z<-GN_|L@{c3pYEQz6Zm1{vdO#x*U)wL#}X!G$MQfs&vi8?8K?N0C0Hh^u7ddHM~Yf zH6vOkYkGn@jX8Jl=M9;uljkn7OCC8ZOppl5k6gT>?dMwfi!#GE`r<%7oAVD?oZPqg9*h{y9e2bI6iRF;9@VieillBUOn%+z09Xm4&PCKkGyEqyyP1YpRO>MP?(L~=gbDW7=@XyR&S)#f6+wHV9`I})0!?u;6cJX zDy1NK*cna6E9@4ycVlh){`2I&eu=i^{1RcYkg>%b&-^lDzo%>XD))q}FOe~`#u76< z$7_#WO;M!E+meMi9561GFs{lX9^XIrV|vuOi9T{xuUc!0zFb50raO$Of<)dj;~lRU zXT$NA8Pm@3LKaNzrvE*W$Dhsf;WPJ*K)2b!QAvDXa@y7eGPQ0DYB~aHC4GY2 zU*?3kEGQkAA3Y|5Yday9QQC=;~LzdTG$0wFpM& z*-nt3i_<*qOP^d_YV;00Yp+q}z-;gAZ!eSR`^q5arjMmlQcH2mbw*US-Kc~X<>c{o=ojz<*O^B&$YszN*+HtQZHc+J@^BKfw7h3t?LKmU#|Ty?MS z><*&Id*M1>#*|{;$+Y(+1TVlh@%aA*kb`^p;9bBIDIMf`y;)SWGZ`YZJwt`!D*eQi zO(FGd;bQs3<%707@TYzMaKSR|(CKXn;{*XuBZ(hnl4JLQzC@K39c(oNM|!Yk zrDG+MJ!$P_bh9t}I_;17V2%PLT{%^X{C3Xs`4F?Ifd62X0Md`C_KSnj!tbRU zH8|8LZMSXr-j~qXdA}zl*_0xq-XLpwong06+~;hU=dD}H5zd9t#vE@Re6_Xph7Eo8 zPs+Io+M68!SXjy0x}cO-yyh-xerg?zMy_+KY#@{pd866-yfA|3cz%#T@l=aWF%Fnx z>(XW8#zn;wgb0xxZ!+Ib3vILImP2bR76QZ$5|Kr2BlrC?Boq{y>iROD=clornfo%+ z#N%=Sfh3MiT}G{G)pUDBq;ght@xBT@(NKpxWDiPS5o>ZhDP4=&u4t{fQAXR_J$6IK7v6AT0k| zO7xR;prQY6Bn=nzPi~;DrEX8{)vGc+9!6Sb;&lHZDoo23LOE4O1f4ml>eE!JyaSw zNj8a)f(^6y^)A%oWtZlymG+bWMM)oJ;{^(F?>UdTxp_p0ivS>45Zl2P%mTVJeYg*N z{oHFYz|GEX^7TC|#<|4#Vj^f4?3M`KnT5gcko*3Wo@TMX68-KB-&(JjyG81i2DB{O zwLN=SW5Mfq-ufy(D-bRQVc&g!FPYgT`t;0~&kDVFhtxTq%qa@tyl?K^*TUK%|M6x2 zeNo1E$tAD&;7W~*#Ett=h;9N_ODWW1IGE7}kTsB{A$vH*m+4zQKLh>G>7fl+dpWNb z-*eE>P@ey>o}w;xJFvz|)g3qDu5s2qTu%n99ZZO9E#)tlRF!K~mUDFS_;?6;l?K0` z`9GrM3Oyz@bB+v(AljG&VIo|4c0`O8y5y7kk|afn+2sC3L9qBbg`i~@xO-PAPA)Pj zoB;y^9JN(gFYxfO+P0Gg`xj(1R>l!0ml@iVz&8=6;O6rLi4e*_ubw!`77j{@Pkx6z zXp2!JYP1^GJidRSb{kltP6*32vUfGU_62J)4n|O!`BbI5oK?WvZ2bL<{ELM5%Z)~c zxza!P4D|#Laq?=JgB0+6nC%OPS@%GZ&C8Hj)a1UgtP`(tLTfH%t%#Q+0~N z%@%k`>xcOl^}5w`=dUJg$)OHBq9eUh(tNhFxbw93`6_f6kqxFV-;}S{o3sJ-r-ItW zhi+XCHQ+^Xay)TyD{&;ITwhqM$7V{oFD}#Peh*AON6dM{kH}y}O+QSqy6b2cNK*-F z9n``GcjNHcmR)V&Qmy_wq}OKd2A1Zxha6r7J_5OjHE+2V6=jc;{t?0Vy?gVhPiY5_ zB%AbFGkf5@xwk;2H)Prk9&mEqUcaHMS1roF0C~{A%>Zqoe5J34e!i>_ zC~7HEoBI2LQn$5rC7N`kx;90=`N9&ORM7(DJoaCpG*{%b={4uuy1z*gmH?wtk%)c& z_~{vF4~`xaL|I!Of;lBCmEc>t>#=Ydv>~wpQaBOSc#;uI@PtzFL{ba{*VxE>Lb+B; z#~gq7J*=>*(DWkiy#+W3U7HW*zj1}VLH25@(nWAQeEU(@?$8dFe7q8=de4fylO&-r z((|fkQ|}h{7d}jF9SMXa^>1exCcXn^4amruf_qcm6Q8&ELdp!2RQ@}=+ z&~U#~Fs(L~9~n6;0v1{Q=bv$_?j+Gv0%A=kCG5`jJ%n?^C_i=v zS0f0-u~iGX)F&nMdVD@br_U5@TFnFf;HR|G8zf;0Hcod(qP5K41HnU{=OOhu_^zy#er zMp^%4+i5^fa#o9uGv%UO$<@%VIVesR4o3%l-hROKGO^l6aXvevF#k%q9H@?}#YP2@ zEI2eNl7JxGe{sdAJ@Qp`&?@eobK8(wj}^Tnhz}bnf9tqV^7-E_AJ6sV zlx6F>Q=Ghn#|v$nfa8}0G5VV|btnSBm9=`c5-wYF9gBGxO#)frZg$vOS-)ER?nJXG zzqkqArIgDTq7E)D?)!=a5_Gxy7oYM<0>>t}yA@_4R9;yW`Iy83-Q-=Dj;JCRax?WZVRF z=ps)s?)Hy4+7k#4Kn^T!a4zP3AW`1Fcv+r(v&Q}#H@8QNZGei;``l%4&YlgF8Y%)y zDr^~iC7+!nd6w{rPeY?dd^;DEc(DJkCpZ;yLcjt1+f>8_U+Lc2h-M}e;*BhF--yrWT>MvyvACd;2XKm`PKO1*xU)e z)My9gAk@M<<$vk_25;=48DjJ$gZ84wh($#?%p7ccp$vov;>7t#`6$wj?hm*#qKs{B zK{%~aJ^A^;6R6S_zD$oh_iY6|ZnV7r6GfJ^ms$vlSkPK~0P6e+#wFyi+0x;{t{H=# z2N-lppR;y9^n`sSuG>OWw&iG0uPjWzJ=$CD(1-$aORtcb;*15pij+CMwW4JkJk3j7tdynYW-o=L+RZh3vfSS3Qj1w98ZOI@Y|t z1xN>qP?j*N#qbG4W8s8+~ zIH?a)-~v>@D?N|-2!l0{A3QkXKms<=#w?;}s)WfuVSf$6kzSd&(zxcRLj!yf8eDJ| zpFbIGzlq_Ib~7Zo-~(a;rlJM&Pd+rarPif8M(O4H{P0k_*EG_yJ%_n1R=^@T(i1Am6bYoG_*`tR3dZ`?Vx_rQ zbWjmIM$Zuvt64UIcd{PSDp}<$Hq`eB&^_c>i8g$Oi-ccJI-TB7?pR^MWJ7}nry=}d z;}C=ESWaCB`^5)H5+{>wmJlRAXk+hAA1;}2S-Mf71j|I2+99;1kS$uJJ^;=c6vGGT^9fNB2})E6rL%DWCy%!uQ0w1wo6j8Rm1>qvyI`)? zP%1D0*pZ*H=~0`{ED{Lm^Hpo?;CdcMIFsStdKF{`{htPL=fFZ`_g#l+(W2Nqp}aD~ z!D^XG-+84*{NWzO@znP4!N;bXNiPx96^;G|b051}o)M3ZjUK7=+dh|GD3D8#sUi)6 zgd>ER09fp8BZA1eZrD3Vl1Ai&MlLe@U$e=%++ptXIl@GkS4p<;lB^Z<*E^Ktl8!uL|X>Mp#!7vl!-%51*o|hUxKzgM(8Gz} z@Q1H#!C|UNE#3rTvpU#R3J=o&O#=WhcHzdsO;dF9Y{bjgBs{gac@$Pf9$Ww(0Fz9K|L(N&{ZC`$}4AM*h$pWVs;wwmwO2HG;Y%EupP#Al0-vBF`13 z+5?^3>N(p*GH@E`Nf(10>PU(C4N+Pbtk$Mj7fca;U$kk}PX-%+I#Fn8sw$mS=;+T7 zi#x*{2?8ImVgAKhvBu3AX#(aJNo_Y}vdKja<5d89m}>qmR{D70!HrQEX71Ei_D79> z*?v^yzx~mKfbm$-qN|>;co2ifrvB5g#JI1@(L)z-OQ6ieoiJ-5t%FD^YdYwRYU%PO z^W!`(dHD^`R%`sg;d$U&by`(%#N0ht4lI-MrwRrR=inl&M`fbI@Q-3=Q-An2Qkm>T z>xhm_8kvp-yAi1cZeLuVGm%wmIaTF${024f(#`DKLN0!l7nq(lnr!$G^FLa!J^RNS z{^t-2C!^)g-Iw!`pART$wK+rdv%XqCMj~Bs;L*0jVq456G!Abo9{vWjXhtJQfa&-c zZgufhRfKrVe=f&|?$yYuO-zbl((iZ$guASdLV~1rd+XbX;I}>bTmTC z8!uJyvL-q&)8Q|PlW}r~psux1$%dE(FGM49Zgz2;g*}sT^k5mM2g06})0RWf;Y+7) zNU~!laJY1jr2=ZEU%YIyKXT7~$Nnw0FHqr zAj06x>Pl!Sb&x)xsVocFhYpw@2rDPm=-3}-mcDeg+B_NQ@{@qqCKHpYknm~FV@OWfW<`mDs&&5K{g z3dBeYpxS_!In=4BIq-GNE?wgh)Dx-95oXsTA}b*Uio(lbo;!D8mCjP{%BEHRyn9XPk~IQTfq<_{oPi;(Mi(>tqiFGHPEhw|Ly|f*;ZNf_l zVwQA?Gmpn1#Hdp4h*w8sQ+j$N0|zU2E;K!id^^vH(T>O8Gif%u1L{2sM!{Ug1HyEE z3lZw2NmXByRSX2_djda(nFZ#K$<^p(;=#1U#H?qs+`K7%lqP zlJ9=~m|o72w1Z*X_Pvle_Ojf8RCZ>gqbsPD6|RJ-gM=TZDj;34?-62U%W1tY$Z~`{ zGW18ESYxrm|6c3_yH8kn(rp(SXX{$DH}A9N@3A*Uj1<*N`+jX|SgtiWIwoh*??eiZ zdK71fQ|^}tko~4;XFzjHnkG%EG${^Dmtx8uP95PSrxgIz%(RG72c`)~`N}LZ&eU8~_lnRzC zDI}u?iQo@pY9h$`{BgW^;5JEN1BnwoF!|_k6@|o~|K3p(mf8+DpvuR@@BM5L1K0ls z>HC^rd&YVHLYI?Sn{tS!W#*Dr7;2<^@)&pwLLeuEAIgY+}nS2 z_y2JWi{gW}My|IXFac`{OO}8>&6+^14d+lO@U8Fp(p3LD?B;cShU#jgnw2DnCirvy zDERXZ=tS`l>9K$m7$u@P=&g1&M1nm7U#Z~fP`0|6rm}vP1ahbbP_2_XHvS!EcZ zqwfbqA{|hrucSws{@6K8q%an(O_|Eb${8N1(i&k9$elHD+h!nFUuudUDh0ge?vt8! zNQ+Z!MW}Ttb#a&YMrgfuNF4U};_LDFgNaU!-RTN6p*tvWaT!ItuEd zA9%oNt~Bn+W)ShPK=|F>Ljy%I$4~GOiGMj9*@9SNQe?QcI`@~Va+3-uXyieF&%9|~ zG%xDjffjrQIg%+6A{yz)Qu#V)y`0C8VnNlfg3e0xC&D0VnB(tV2|XRU1)#rtL{O<` zsdcdC+gksR!2|q{WFYX-P_a0_n`qaf z)`MYqx1MM|z_Av$)q?rrVRf`j25yUzp-viWA^*|h%ID@M{T#mAM(pmbpBmEhQ(jYEWH@kp4-9Sb^3(+cFHpXAY^T%7 zn5d@=lgp4JBx@V!r{=e)17j%#T%LSl?w$|)@3h(-j*tXz2Bg3l|JM<@tIj3b0P$8b zBDg-@q?QDE49KX_k+ytC`qZF7Mi{2jwTG9(qWGxzKpJ>PuqrXy`@?q6H}x)u1{UX_lOc1pQ}J$ovYSJKJ`L-^a59 z%H)CH|3t<{{DqoDv*OZ@NA7%Tzm^2M$^A$;IVXg*@!N@@?0X1~a*B@&r4drc7TLr67>geq7 zh>|E|2yrF|7>*rfd3PABqVE>o7EhE~o+XdT&%H~vsFX%9Tk8lW^+YCw7e9$M1i=VYap3qm}uXdmxa#ss&*r=i8u65`su|(*kGwnCPuc zKf!F^w^oUzP!*Urb?|3!ql!&{lfwJXU-)kG4*n_@ypB;^G5R89*QWro%AwdM`y6)+ z^j+XbXXSjrT6tlt9<%=Sa3P$L;3~gzEw8Ac8(MNi#O|grp}rt4Pn!QLtcUnh;V$$p zqA27n1ntTcf;c3MSI?SKxBXZ(0I8Ck(<@9H9KY5Zv zN3Q*r2EodUO*&gp3b9g!*l2t;Mf1W>mr=jSBfd2AH$d&_Q#krkIpx?z_HbOw(e2{Y}12AY@)2&FDr3iwntK%_SLiaxC#v2xc@IE}j7i13)qHyHDGueCu(E^J+^yLU4n=IC{A5C0leOGlr!L3F zmCKFcxL8Zn=zpK0u98^+N`s*i-xRC;8r2#P4=$uhD+C+2{GfXyU!AZ;tiY zQ8T-F#2AstUYrf+gf3{dr=}88PmOnk4!B?lL}b;1e8ggK)) zFj4wvPyN4{*Zc#KZ=9hCHJ`dsJb0L)MUyWA!XXQP=S?lLFkqrPg=`@r@EY4ek%eq{ zC^J+C$NL^%r}0O#DZp4PWzuJvzCQy(uQ%%ww z!WV^<^y^Vg)Bkd>t-Uz>kYVcTG!wV4!IHw*eYpL$Y?4I0P1nbNSNU=?zCD@#_Cl7K z;inMywu!dC=e})bBs^{%QMRx$Mpgpn(KTOxLFU2K`8~lCieNhI)iyWQZo3<{ZObWI zLoTY4rl^)&dM7kuxLQPCs?u%Rk~@4gF(QU(;ZaZ0?tLzNzsG%f=xgLQ5f<>UY9-uV z5>~DW#LA33SpT%yo?B-N1*wr#xgX>usH(|ZVWF_8KH8kQBj1XKj;zSheX}6PRwen= z^fWGini6dI;JoI4;oRqJFZS&+sQY&BpvDl&2@^sBY+D;tuv-S|I>wJBcNW%G9Y>}R z?6V^K2O=PVoE-W)iFEG|WxM^3U8hTSEuX&|tFSmY2U6HHfeDwA+&@9=3cn z6Q@u)QI7$*hF}?4;7yLF2OtO=GbaUkd&ODuGs!tyQ2DAEU^?3Cv8-bZ#RDGDw(eMO zMPnwE{4W?EpZi`5a-Odi*_VS>{$FZG1#Fq5g)6|jP%DEJ00i8mq07&8ZW@H z{McgCTv;t~nQVh_D?SqkBAJX-+~*GLT$daB+~%=A3h;k$0H_ySHYsC}xxvoopRfID zy)gb~P)4vp!Fz#&qIy}K$90-L1n)8v=h+EiwLa-uI0@@E6CJepQ5S`7Q&p3}K5%%; zuK{o32#H>VK~=e7eulAj08)KUhX&Pc&C39QNA)RuumR2Pja7yUX**DSX=d_f2`MLxRN7+DGbo7UB}};;=EO zr6l!~mLd8D2aqtBKw^>Q{PV%8F|fK-p1;AT(@i>(o>!a>g*xua)d(278191SY5Cdl zqqXD}_`E{ zYmNl8rruEVJA=uj1f-;&NCa(SXD<@YeZ-6>Vj{yQ1mJ({1#Fv4G&A{`GF=Pzn?T^q zb{B1b1L*MAjPafjO?je2h(kJIC?f$nu))c;^%FMo2`NIbzSdk~IUzz=i6*05g*uxU zAG=^ln5`Oc-we47v4WL|YgTlwYG?6W=K%%NH?cf=8)Ix+B^aMLuvSA_l;)uRW`S-B z4dKXb+z6rOF9NAQq%R?QZAhxC2UWts*I%MM`GhVUEU>-SrK9}pz zTeL=)$i2*o%Iu}`SS94V{Twqv zfZ0F;cUKBB`Fl-vxGJoer|y&h%U{4Rg9%HXMu1S9gTJTz7y>It+vJ$@lRYZw@q1|K<>*Bb9#L5i z^#O&V1z&>(5aC1UfAiiuqibzp3ta}#{_JwEi$srt7DZSi{-|y8Kr=Y&n)xfQ&*hi# zY^4JRdQX?RoIKW5zX{?;O?{lwIjjIWIywo$v&dpWS`Y<%7wGxjDm=RyEKp>ZF?Q;N zqC!x0l$GpB0uAW%tMyLt7sY|(Km#t;kveOr`&nJ)eD`C)~mX9*Ih5eVeb+0zheU;uGl-|`x#~Pik zmSj|>^xyu(_m+#6(JzA(&ZR~PyOHuFt769)B8zmL>)O0@e66Y5-0z$DZ%E!+KEJ*T zEqE{0^M@IC0QV{!>n2qw?FOlCeziatHgy=Sk$Y7 zuAanQKD0fabQc#u^S;|6Z;@4zl|!~VyrV3DvuA@9nd5*aSq>+qwvt{N;YIH(J`i4T z;=yU)56kyrlg~NiQ` zzec8KF&)eP4FnraumLVAv0+r{5fc)eZ{BD5o}BCY03yE9Cs~=dxA1pT!?z~*KXq?= zhLiI}%vBBBNs$dXQ3TS2%xeU>$w|5NsWZ~;xD5%kdR3kpOXcCKCfn2AW>}g}csNJ3hgE!I5;id+ELj0;3WW6E7W$e_QaPsZRM+Y^^Q_a}4B$uKk-sTM4D*{4aiRKj_W#F!yO=epFWHU0x43e<)Ibsm@UBO?Tp%5QT- zII9V#OU1re?M=h!xKLwZ8R#yzDCMSF-Pdq9QPk{ z`O2vqWEdu?cGPagM|hF5VTy;JIP`bn2q*$SCq{S)N3B)=sG9e8a+HaCcUr4jDISDI zJsLb#@p#{m=Netj)67JR$nNhmoqQ3MF4z)#E^f@L+1}dJ(y?c;(83t>g+K_N(vb z;Ji3vkO1FzBh~0^9=-&vUp2lYsuTs%z?m}3^tIRA;^nN$jp)?NFia>V01GI`-{uK=|-B81GLk@AeHa)9^kh8kA0e0bdzQgZBq@TirR>o^I z5oP>*Ks|^$bT~Nb>MqrkR#g?b5Sut*UTvzJbwRE!|J-Xne`DFX4Q2vFc^w8&y4(^G zM|toSLBUdqPhrX&v$~*++w#==r=M1EGCZzhMmT@dp(_udO@E0+_;{)G-qn45VmMWyc0N@@ z^3(be4FjNvJT=AU{Z`oRdtCi;+_UHwXI>M8NBxD{&Q51cWA(DIO|*oeQ%NO>jM6rG z_(qR=AB(M4lr&jZyH=Vs21%J#=ET!iIS>h|R@EdA_QS0bVgrng^lubsVMd~3?cJYp z=VP35iR!9YQ8v9a1dzXXYAA{#C>Y)cIz}T2SssRIT&Nn!fNEO35HXV!_lxTrEs64% zn+GxAiqU-or7-=tskz8nAPp{Vr}T9vU{ znz6R(?J)*h7C?70=~pw)G23qnBxmKU{jEW}La1u*y_}JBs6TfkBPKvV#u2ZUsIvHq zBqz6mO3*{}1Oz42L9+LOs&s=~^NFM87_9@=CFnjtgh4Rvvx9;&fo^ylQ`kEPp zzFcN?ySu6Q`QCNMD4i$CmKZO`@%6xAa#lNGVYVj9{g#2hm3Dznnv(pp;v+vsznb5O5aEzsOiU^#D$)R)HFF)sU)QSOVW6fv7jkwZX}h9#?^AB zfWnk3Zm_8x^&fvi;2>m_`+HWE$j{j%MqNtHh)7_7Vv{FRMzwJc+Am3|?HB7`lBK%# ziYWmz?3A)Cz8ZN-AmJhNk)5=bJA~9uxC%(=rC-sovFgvGfijm?{jMk`DkB~q%7hrP z*uQ8jg(z^i+^lc;9S~Wo-jDe|cwS3xxb0{HsHl(?_`Lshf41?L-|~Cb&&<|MNVb@f zvGZAQ`a%Fy?mW!?+C$#0ID?KzjjicScA?dMR#WMUu8le+66Dx+PK7nZf=Z3Ku(a$}?NJmDg>vnOE7K}a zae~C>=eATeCvK@q`!yG_J4<*g*ozWr%1qxkuPq^e<7Wotf{t89SVzus-+{Rjh}?lG zIrZ>QI~5o6%(U)-G2Zf}wz}$C^KcR(vQ}kUbfd#tH^UG8gb<`KA~lm0yxQ3Maxb~c z)XJ!+;#+U&Rz6h~$RvePGcW$C+@R6Q0pdn0@e+_iREY%dQ&CZ8)^>`wpnIJu4K-MW zePTIzk^O3to>@-kUal%*aMHg;HA(7icROPY+DIFnWz$W|>nx37`g^*tdYx_^e2eCD zl?>)<;PYuNa;=wBp%>~;E;vYR#5Lzc%ME@qGC4S@j+KB=|K;25V!axq50^sd@B=^3 zAY-&|q=lPU@tzSRtd=SlGy(%sUwUf~SC=g9DDLw$aF<{uD7aE2B(9wK^3ubi!h5$| zJ%|W88@DwvFybmPK2K&^KMK|^-8T$& z1W+sJHiXkoSS*+a6ggkh3wi1Qts=Hvp3j_4!iC5KmBoAC2hkXYk%+S1cf{JfV;3=H zI&NE`RX1D2U{w{|{$(Vh5&xt?9!bhku@~z@WDPBK)H({!-&?AL9ZRwq_t}4i@j}j>#hN;5aEiNaH4D&)tI18ARJv{ho07FC%q#=tD`XXpoB9m=Z zJjRa5*}eP#Uf_#LlIFJ(Evzh*3NpGiC^q9yZ%QaLptXF&LPtyW64L~_^17WKT)iDF zVn$J!9^`H&&6b*aS%Kn+1%O@Z*p#;(;}nM+zmCnuWyN|}wKvf){}eCO9I)g4DSNiW@u18HcSFltPgf98AKKeO3P3OeWx} zYwGeGK@3lShu|yCtjHKCHsb>mHs8Mt!{Woq%8-RB9i_zp*M|fOKrH;AMxODU1@og9* zLsi8+FzyZIcv;7Q2{clg6IGEr*ChOrx*IV*`lr?EA6Uq??20NSWrXq4jwO^mZof4D z%uA6~LP0h>GwQcUDZw!0Q|Ohc(q&sf_`)Z!Y@WC;3D@b^iHX7vYYP_TMs*TAFnWI) z^@A`#;B0=b#NOdNq;MA%Ahcj+kS^6uOd+1dOm(FaN|TqV(QEwkySVo0U{41CUa(Sh zZ3s#;TUjOJPHW2vk!)cKO$Er52M8y*!4PBJ+J$J4ir-+YS~mzH@{LYRu*$6HHmaA8 zj|?VtgO9AUv9eRR`(CMC?F)G;&*WsLI(1auOZH%P?vkjWi8XZpU!+-U4H zC*aNpx(_JP?kIXbl5~|Voh%$m*Jw7M*i|5iY$H*sQ=HOQLRA(T!v`^zyu*pylm@Jh z#fki{z~O=m90WV|Z;1xW{=ty$USxevWoWD7G6ih$T_QP9m|33e!8E%v1A}g@C?uKe zv8(WgKvwnuA*2$FY&Cg0L=T!K4vvPy(lj)rk|c`E91v#5wOqr>t2-@}j%?!T@8%bV z30x|vMEC#$y9wzmT!MDF=#-4Z zYBIkBal&aJB8OFwJJyTBc&-3nbKJEAh&Vad`(C-q<)i{^_Bvb^rfArZ4sh4z!l!6!d?TG#7MCq4tSKCGmH$SBmXuoXbsfQ7RU~k= zS~LkMx;`qTGOns_>k|Tg2s!63Q~)DJVuBZy=r{U-b<5V0!cn1_aPeJrE|XkX=9YeX zG;cZcHiq(YIt^>p>Xtv_?tN4T7D|&Ag72`%5PVOQ@n+}X{3J70qQ*Fx5myk!LQeEp zYpR!4C{h87nG0XI*|LF_qNp&koQ#1cCkeYSy=Zc*VtK@?Yw!*WFhhr#SMDDM+z4w+ zh>E~sUoj^n2foBqA`zr1{?`Mn;Vnu2dYY<-WRBLXy;n69@Y|SlCnxSxtvH_a)snTU zHeT-2V*t&>^SPPdD2go*%0ss&Q zZGnL?2gz3T;Vx!^1eYi~rCvnnKo~Sger!nHShOQgdY~lmm<~bCkDuAd1Ly@bP#OU2=dZC^F@0ae4*UR7}CCQR@78(Tr&Y7%Enz|_i_LQnBbH9Xu zmYx8%3V`A893mc4wteRC;Wwl3CpQ#cLZHaP6aPOj2qrv&e-~^~+yz`BH3b3rN&oC< zJiaMyC7hTQZE=H(9T)dYFU{&8m*@6P2Ta!|bUv6NPP`jY=Xzrjq_%1T3{m?@D-;Tm zj_q%mEd5cQx?1PZkj%Kku0g{KtokEpARbiN;=APs+m$5*JJOi)Z?6E=ie~(Wz2v>d z#&%Kx0dqFi(%hI{FOE?#)>H^FX1zj)vx}|9nm|a>rWaQ4MM41l>~%O8BZzLrf3Je% zdK)MRYgrkloG;H;M1RBj?IZ)^ugPjCcOMnTYE6h0bvi5S;@suHV!)(~#~!H&9|7`I zvlry(7+YhrUJAG0Z6(2tk&MMPoBO{QY>npp?ZJ)?PZ{Bu!yZv-bwmHWA0;IPt2*jr zkSYsm@lr+0tik@k>}6}wKv0iVPe4wWwsIh#&4Ci3It>Z1(H9q0>BIInRI*?#8>xoa z8Rx%{5hi4^BLzrsOcy>YyrLn|~=>lXIYBhEN*b3X`VL zX3tJOEQR3z8!0iX`n*)W)J<)cSnmn_RR4;>R$>yC4rTirT@hPFOfU%U+cB9;2$tI? zb*;)On&P6WI}th*Fx;+QW{`(35be4-7>Iv?Lx|P}7VJf2a+4Pygj$eiDpsu)kVneP&f>EwL5jvDWj1><@O4^k|7&G< zp{pr`=s?3Wl#)Q1Y$0c;_$6AQLBu^-WxQ+%i!fPs=TePMcqzRgn_jq!h4M<}+cPhk zp{mRWPCuu$(7Ne(<+%eoe)(SkrJW|1?kRW)H{^Oea{ZpbARXp+3P@ar{pMz zZX7zlqF;$YErhf$c%#kFFJ7C9s>oe^(oXVNYON`tWc**pZTB%@3LUL#&|OWnNB)Bt z!dTH>3&#b79ms~L6C>=wS_+7#R{iU!Ob-If{MB^dcQPV(AZ8WR(!X~G2?`1@K2A6S zP{8=7_Qg!myQ_Le{)rRIDHQ~T7J)CEFfGg_x+X^9UWNJ@FMG!PPey{aP%la}ZuMs2 zZb~(LYs~bCeTLD#RT0jzgj9kN9%VQjOgJ4rOJ3wJV8Kk{%^{VQuxQ6j%v*{InX2r4@15PVTO^K6&ruq^Gfh7)ZGqr7H z&G2XFT@6>L3v~5k1a2hHdnJ^Ra8CGv0be}l>LLH48sMeFW z@^WwN>^>MCZQX6?$IQ&+>;1O9&;WSTo`$*iq0q*)b?-S0JnW(Pe=_)y&d;F!2X|4( z{&69nhYPb`1J^qtLBH%T?8hRpXqXUT`(FRoReVe`karJ6#`m49*FXK1T6HuNm1Nw`6H9O0oC{BSsf>^q-m>4&lS z&laNNtzrLfekH^yuEFJ@|LQ+}m>U#Xy4dXh?KAdHD66{^>N1i{44WHLnPcJ_-?as5F&-=DVul^(}ElB>=;Z$IYx@7nt1Fjk|=4)XcK<=!1tICFa}gdx-eTYvg~g<9E1xUKW$GxyS(sx6dR z7$rlue4pB9devvZIfIT__UeY?l4TL5?XN!wPXm9n&KH(;_}JeA9Z#7SxvWDEk{Mcg zeox;Buf@;=C`DR1 zU_Uhd5-8C+;6Mx?WBOzHqS3w>>T+gn@7Sl>c}7}l3oZpW;hy2>sUE6 zX**U!VPIa-VDv?nH_AWg`dM1C-Hqt}b2%sM;AbO#Y(DUpZzb104gMM+f`T@*qj&aK zq>c3O@2HCNZOhDetuOybXK>mVHqg)`>v6h_&CEvLMrVJYxSA09&75m%HJL~f**h5a zDVM7p4BbMm(^lO~l)}7oc;xW8TK(Zd6MCt)Eiwd43WvQe1@yUmzMn&zH2YO$ebach zA=dYY>C0Wytf2K6{ua*OJTuI;iOI8y$<-J5kL0uF=uGyvQ|0p)x6=iFp79-iAs%jH zCL)-zyX-Fwo2fP-WqE#`b^lj$SHjignT1(G*b+bmA&`J*!6h#2phOtxh_o}JIIJ2% zLW4;$oODIb%1foC)`+S@^{S7nk zFSzHq_dfSN=e+OpW-Ua!X{ZFvbM8BNsVpVEjc7Dtw&20a7ci%}uNSukrI!kMPnOnD zi@zQ&YbHmo+|6l@Q8PZh>I)r?xz}GFBZorcFQnfw1pabPsQ3`%5#m(v0`l{wjMf)k zZ-%24={|Vz5vdY7etfxPDx*qJZFKTaraHB7+|8Y>RmEVKtKgz2dL4YEXPqL#Ng4~X zsgojoPn$dm>5E-x4;5=Wnnwk6ZhG<)^GZLv75-t1ZB8Ed{hBO*5ZnWWAkV0I4`gn802t(Ab~c1`ez zCCPYdb+P*4CBX8rB|b`^KKRk zQp_Y4P+IdiRd@Ce`IgdhCJG@RYP%~DBvgkLGSF(_s+?4MC$Ln95ywi>Za~pnefrYN z?oX;d&lDZVHbT|U626CN_i5pnpu52O$PdZ zV!J)^!g1^e;gm{ZuJ08llpyWWpc3E|66}E~KRGE%bE$UZn#T=sPj0y{$%O4p3hYOX z^ppWhEzU2_E8LIRzdyEL=9+U?LhvWvBWz4u5p_(c{qFan7=t*f#EPz1D4l#dzOpuC zYoRSH=z1dNSi-eK5wZe`-MZaIPNL(wYkoM|!zO*B&zP_9Te-}?9JNQBKi6DZx4RhZ zU=LI$nRLhKedE*VD~-Vi+3qz(e-!rYl>5e+>;)RFA36b?09lwHN7Z!cw?GwV+=BuS zB{{q-49p{)UFtfC{q<57A#Yo!+`>$EaVBEQ zCZ_|j3FOM5=(U`V6K|jE9QJan44)gYs8+ie$eafc0>R&9MMfH{R{{TwI1W;$#L5?T zaB`JCdJl1jV+9L!gQ4~PuZXQSsv@`kNN{BevUP9!3nL-%kxxZZV5HPev|AzL(5z|Y zrXEo>#$1dD{hI3SFkVwsMgKmjI_4x}X=gF^A?3L$YoB2+jdqsbr5EEVFE)WMV6I!P zZp?ev4Lu0sXJo|gjS_H_eB&Rh4rmZG+^t)ymDd z5ahd)UM>N9Ub&xbZFhV0?3SmLm*u#d#IF=E%k7hpR|Z{b|UozY_cP|K>B<3>liFLUa&{JRt9y6~#U(%kJfy$_B^7|U#U*l>tLaa0w^to^P?#UPANdY&m^>)}x^w~a2 z0oBI3IYsw%Wuq}7$Vo*<*+$g&;&n0z*l-uA=I}IzpGfxD-^(e|W{@$mI>#sIqU=K; z^>qzXZu1=a30)m(hS=knDg4I}lu>vG>j;az<=}1HJHS2k2{fLj8ktLsjoIjtg;LrG ztY(=QJVvh&vpiP8!N44l83g4gj+PPfC-{xnh5ETMJ#A}ZVgtN)nj04#J$<=XOYB*h zsc3j`(ao-YxIna+VSoHOhCOqbl`B=2#Qw`k-R=dTY|qnkkn`!kO9vq++fmS#=y4UI z_5S{7-rR3T{OFX=z-Zl^V?Rt_4BT(x;7DfeTs?E5Q63-y)I9@S?WKV;uKA=#&{slN zwpGba&AU+xA`&UV?`uN$>{vW4I{^@LNnbHsnH}_htA2^2#0ghp`;&&n%UbHTf)n~7 z>oq)Y_7Vk0tYz@dAedvY*Z-$d;lCmLKZ}_^Bh2K0SnUFaSqIv7_R+H+=iktSWNLuti*o^g J<@(~;{{ibxWi9{! literal 24719 zcmY(r1zelW(mosr4H~4ld!cx7*8*+P7AY>p-6_GHQrz8(Yw_alPLV)?P~6?^AD-u& z^L}6WCHH1CJ3BLL*Uruos;nprMk7H3007|k@7{g_0DxHVbPNgz{u!hZ=mvivnMo)} z002Lu(C-ajz`s)&z5Ap90Jy&b0DJ=hfLnN$?=}G7!T|v68UO%-$p8SMeR|VJVfYV$ zCK~Te6%+tW@H7hG1;R@J5S~JS{{aw40MFduDZmE=(*LDDAu#@j1`z-Vu>c_bheiwj ze15%yzn{ze`vhhq{2#|`#Q#zQv9f{xO#@<{i`loF62l*;_U|;E004B{=Qjc%HJu2~ zP18bE!&yT?UeL(ShRx8}?u!YVn~nW*DS)t>AUtVf;%rFeW@ByZBj+Nm9V3+sosPj~Tx@oZX6$bS1O(VQIN3QlS>Y6{P7qsXLpN4iC)$6R{Fl#L6DK1_3wvh^J6oz} zzJ_1yT%1K{Xr3$j-|JsFoh?lNuO?fk|EL95kp1}=_BU)C?Em8pFDm?;E2!jXVFIuD znZM{8;eROqe|i6@N0|M&^8b%8|7!YAE?iYnG-3AtnN1XpSZ@^>01yYfe=DKthH%ho z6>sv{WuzUyYlBv~Eswj_%h?xDonvscfngUdACTT&_DdR3RN5b`(?*bRI8MF9 z`~XOR7b{I--B^RtR#*mQJ`^{YHP#AUh>8sTONUM%EuF+3bm>#+irSKzZsn|QyUVVR zWu}@(mh6mx%cZDU#hc3%t5Kdg9j8&R&1I*^jHzVGTVl@ z;CR|R1j6GF>#~U!GEz$ej!Wx3=5#5^M4`&gU8Swv5z`$e6vEc&)iz3Ua&U(H@0`xg zh)p6j^pc}ErgTbHtwj>fauiZ&9yp_FE7TyyXPeee{MNbF4)wy-7WuZG&Mds7q@@xk zB>HUHnVDbzPuy%SFe4EacC?WacDCpDqi~P9VpJNuZ;Vw`6I5aX#Jh;;DBKj*&PT!? z#}o9rLkFhzOw>A|4(~FvEk&^qB+5uw3chgT-}Y9#>gD(3hqQTN&FJTa|Hs{-=@LM&EBN zKM^-`l|90MZZRPtQZr;cJZuh>Ld`&4sDP#)=!^JIGHZ@FiRey=%%;K6Q2)e8*Z(Rc zF2Lh^CMLv18G!HtS%q3d17kRt(Ss=pgeT9TEus9=soyQVm8yzr`#2tMP?jfj6r^OH zWK;xyf8YxcOe8@TIHHU#&)As!hZDz)y*{|ZUlT*=pLXqlH}iG~2<2r#0b8&UTUu)) z5n^zVd&dM8q-Ffj(idhZi}H&9Redi+mt<+iW=z!5*iXJ1(ow$ z$8B!}G&B_Z?X$-6NZS(9?PI}Ld;fGG&}ZPAcPPW(lO?D$V85hEack1Kf= zWP*TZB+KjoWK*2KIEX@<+4Fno_7QP^h0qzo{p*9 z=|VH3YBaLDrp)zGLH7noctBiX4kr$;M^B8G1?=fjX>lUT zMDj-an??0@iv(t`QRtG(k9~8~7F_iHu|c*w$;4FN8oSOhanNo?+b6j);_o8VMMWNmgCDGiD&8K8S8?E*;wOKE8N7~VnCp1VySbkmdzJ#9bl5MGE z+MM93kx+v#PmealH810a^`VCi$@!=h$-4YbhdR7GPNG7D+3ZrgSNh#|cCtVgrv(C2 zT36h6fhnkZlzac37j7#N5QAxD$4vHhkD)=2Sy)L14wPd%Z`1qfJlOs-uzI&h7$t4Rz5rmot27R-p4JV3g~7v`{`DzU#M9JCH5 z8q2jf7Tx~HmmFk5kDh;yTX2A9qB=m6DZm|Va?E_IlUYiSqvpTe$b_eZr8`b3YcSJ& zx9rHUXkXL0v$@zWG<332_HQ5M{NnTjjtH1yzQO)}J#;_F{+c4#_E9ALvl>sw=Sqmx zbmzn8KCBrv7c#bne;X1_rcXaT+@vksWV7G}09WlK$`&JO zJY3L)*3IqfP515cyuh;d-W+{z9elYCulHOAc*nky|pRcqlZPBwU@5E#a!eWwB&$$=*IQ5aO=aEj+KcNo{Buh|`LI zo+^7tXwSWGD{Ad!W;eeHIEyzaR2(&4d0Zr{hC*L_4uM-~!U$Xyzmu4xw|v5^Cxk}P zNTsCT7%k$mo?>n5-PSMVcnKe^0KaZIOfrm0w_dTIH>9a=)CT zwk9j4D3f;P>(-&%pC+dt)In(t7wa~pGx^E~FNzy(Xx!zHMc$vHp&{c%*zZblm}x$BixOmexkZX^Ho){dNY_4l%%(eM0PrqYZdOZKAr*YbPQ)%k^E zLsFU#u?_DXi`t4ZwtuTXd{oy{=JqF0$~3+~WpJM<&dAtb8Eb6~ zIo}z^O(=-K`}1!yTTk(kyEqGGJ*XGZC^k2@su{ApX0LcLM)Z4-KS_h-ZBqukVGCB> zoCPOL-ziPToa(ug;cI+~t7BUaZBY5H%54NorCNP$2J7OzdtYon1C}hM^!3F#dt1l3 zPnBL<`9k5^`{CO0?)+;SlF4F<D#^=#+0~cse)>(kh#&NhvGU0WpHU@f*Spil@6S6-6qo z=}ZZlQ+c?Ehq%0Ei>l0jzc*u6Bl=rL z`F^UpH?BcA%seMgaj+hzLRaH=$Ti+im1n5#WVJ*$C^tO8jDM1EQ|OZW*{2@sv`RDru)M30wv`KLL^JB^&UsfpP)s7e_vf{9*x3&#SJC6(EQ3YuKdc_hA^wc=0x$r0(S!n7A-T z%PIPlNQ5yc!KlcTA$81M#iBC^=esMj`ED0R7BJ1g&pwG*jkO~DlhF+M8>albr|ZaB zNm?gkr4635DwXx(`Kpxlm?YYjmzL;wt@$dg+pH0zVPjpnyov4kIC}iUa*!ERv@GOa zC2W#YK}I8By&kuC+QQEGRMP9Hec4Dw514X6+~4X(kR!MKeUE(AO!2j}rALOpv)XY- z@7{XKG_<(J%uz#N2)^XQQNoylf(e6S7N-7>Wpe2c8Iha=p?TSF^)|Te%*fu-k{txe z<+!2e1_&d|UqGUH|CZjyD^Wz4tIVU?EF@MR2yNsGPNHhv%z(%s2{Y<$3$fOkkh_mATdY z2P)U@LXG6x!en0hV29^9kuyz?A%Ybf< zrpYn5#?$~a1Dyi%Dmj%+PV-jox~C!rUpRu#KLIuRJ~U>Sr0GBU%wDw{u+Inif7ctD zL|=}5g3_u$_j`NwoL7b6h|G`83ZU(Ec(bz)^COR`sSvB16GFlxq{qxO9%H{wB5yA9 zG20^94h3xzZq{%af)E#3wPS01b(-4d?>}9C5^+NWVGa!eMV?n_o<#|);)ueH>b(#3 z4_%??CWJE^PH=1<4$?yzPMY#?lsfJhz58<>Z}C_28K&%$@C&(A9J(dFR~O(DPm`4y zLSs{9-;%77H*$a@kcSChYpKiJLhpxY6A>>BG}LELXz;9*m?Y62vy5^Q|*LS4>xC=T~3!`a64S8g@qEIqGLDFTQ3Cbd@}g zzS@jq+D5zid2mMPX(EL`mNdLg5)ES0)A=siH!$$-p;JE{G67CcL2i3>@E^7WU0?w( zCS~=VU$fD8jEwfMg*+)m8O?r}<3ISdns6}-KHTzOYf}g;RSdhT>rl*z-;s3m!zdzH zow$*(9(yXiqy_V>vgJuSiZ;F_NmTqU{-{Yvf$kz9=^YW!0*AO&R#bJQ=hS33B`ijg zrqHXd+(q84u{f)2CX(bd{qI%{Ef<3Fxk`ItrOI*@v;6Ds3|J;LWUcD)CT-H&X1CVIw6p5twc$*Ag zfTvSqliOf+lE>{~R@om&lF{vSVn5P;O(8?~|7Lp1Xy7XY>62WJvoe_Hp<2YLy)`${ zPOQ&@3}UYHi>-A^iH(O`9gu9R0_Rz-jG>$dd^?DAZingZHpMEvf~{8BStK2%Z+3q- z-o1`vbvRnbr^eZQif&%bE1GR-2=w(2M?ZEG|(ALAv>KLI}u& zqfEb8EAe_JWT{}?S3iGk&;O`qlMxr`sG6uhX|XaO;-S(u+@G&!MgRPwm8zkY_k`}O zYh-A8gekn!#2Ji{1}YdEFHd!u;4zQmbQD#_RO0lynW}nF@N7|=?kF}WoM#$bP9EmO ztYDqBs4~&D3k3S(s(NYm{dH^v4gEma52Vw01wAz$Y!(%vY}48sPCBjK;GYwJ;_ z;`dv9BEVj4>V4)&5?gCBxtHw%@EcRcyCfGzZgH4B-Tda4Q!SIjo$8I-5vdZKSu&ra z&_ZF~clc{2bK+S%;;`t20*nQcuUWm}OWF{+yzF%q;(;7ulo=towlyGQO0_wOWq$il z!oEefDK0n2?}5^OK%?5LGdR0t*jakKuwei5v_P8YN0Zi5B=BxupGhjVhUym*J?Ta6 zYyVuAZ4x3G@BrKXe0jOHXUf^Au{xkLr05^S|1jOL-y8IYI7B#fmkSF*_?l!ub5%<9 zpH_ObFu1)BQscsm1>Mbj41PdF!=qDSDjX8m_4s6NA$;@?*GTTm-Efn4NLXIjyuyThA7*O*Rr_x#7jYu%5n+hk3kV0hn6V_KAAn9G|sV>cZU z-(M6;iHM9h0=W=9nu+$U&H()GPH^G4J1k5gA=@fuAM%!vzSFcEnmvfolN0dS)X2za z^j+&1e4uXogHZlJAe}cHdlQP5{gI2S(+a7ouXH^-3T653;Ja`?i3#8Rpr7-eYJIQn z2(9V)C}YyHvc_BwiN;-z{!D%Uv-dos^H};qgQv~Rx<$MD=8Cs|t}PfPz3opy4sTit zdkHVm>h@;w>&^21_kVSC&qFXJMVS|RFwkPS{6jiurSq%cU7{;F@+9izC(*3~lA>5p zi7oEIEm1mdW^ksk2&t%+P}0E)Uy6b?arsoSqoyt}e-_|Ds+gV{QRA6`Vwvsi4;Eibg(O8WM5pjI^whGguzy7E7_#vwPkm_vQzN4BX5DDzroHO$>Keuxr z@z*0ai}ok!kKK4m*e4GRnHVyy%!fh8`SQiVE&C+q`f%d$_G8IKxp)>c>joywMlwoW zyH6Sk4jK<0%RCe4-~j?-u3ahKJV$6w-%oXc8yX&DiJr&eBAbIlnvH1A>L~ z%PlskO?p1t3ZT$j?k^v3;S~En^Uw9WKmva$O>;^~M;oLgF1N0a6m}sfN(1Q~`Qnbf8sTsb=~D2UlAr5g2+>SAFVQ;paCe-Vgw?8oq!Tw#tlZDr5g-Wig`p zHc#r0`p;_B25Q3LUUy=nOxz38Wl#BFPM%|{6ZYv*&-K|^F#m0-N`qb~rfl{`WBkSd z`U2U`Y@xzjUWL{k`1s#%I@Tx%# zL*>_{HkYZDF}e+YGD~pPO-jVXKH;u2tuu44X1A^M;-0(l&q)8#>4sfDL3h@;v#(%P z1>k(Jf(5{EZzwgX{7SOzI(0CPE9PQ$aMIdLW2zR;4GD^_D>UwCmAzPlA)PnVD%@s+ zN|T9JKvStzw{Iu{v$V8C49M1N(wdh{p6azTNpX zi40GC5P>gg*q5gTUWb2F`#KADZ46&^UCGo6^62TifU!A_EsYY-Ts#t~XChoirklJI z@X3mNJiG?uK41Lamjt zF3ql%pkcPReJZ>Ne;7kr>;iOU`$uG0SnR4crJN|*pI z(keP3g$w~hBc__JYCe+85BPRQz8?!Dv)7&*Zz0Q24W<#0)>q9sEtb4l9)FN_wQZ`e zMTD$eM7C!gEQtPkHQ9bT+F#v%zqxi%&)&YHd>N(p^xgfJd`e+dMp>BC;r-C{-T<4~ zn8K(rvseA&_Q#~=BUB+PZqtm$_&f~yKK+ZO8;pkBZC^7@! zLNn_QU${w0;jfw9BMw#%=={YfC5dSh?Y(lhoaA*ooi!$Uo`YpVc`bfmQM-=!)-;X1 zo2ZGtyEW^wAS58p^7DKU3NIAjW@%$kp8u z)`A3u^#JeBDkV97CPG6h1BsDI`7x@#dzuT7M0?HOar0}BSUy>G z)r|7-I3(?IT1g-bsV0)R1{(>NEDo0zdOaV^KE(KP`a~+*@7GOluz_LT6?<*zmkORH z%!FCV^S)Q0UKXUzp8E1SBgZdt%|+I(!^4g6z) ztO2a9^u~6 zm6oQzd-v&w>&$?QY;l|Q`^iA2AtZ;{+EtQYD6d{g0t^FC^*2=uxYTtuuPbp_M)X>U z_=LuTJyQZ+SZGNE>^EY-=h$@phZ+5dFAD(wY)OQ`Rx0WSRx8f{(9y#_A>7@RDeB5} z_w`DwmW?MJEHV6R2RdN1bs5TY)EteSxWa>ch1P0oe&Z@N=7Rp)3{=c&M7a-%_PUT0 z_7Bp(tJ4o#Y9JLqmz!0*>+PH#>Dj5Nn}wU)X!ST>F~icCf9HmhmF2Gri|HBlyJXar zSDF~4x25UMB1{J(Qd}E=`&E%|Kz) zI9%t#Qypc7464IlTbYcT{i2sYiNvd1-HCA1vV6|lW;>-= zCo~}4pPHMRo`zS34jb>TxX8|0@XWX(MexkHm(L>kOew8Sm}WvKeR%Iqt{(Q(##-HQ z(;2Cnjt+g;oaNl?P;5~mzpww$j}*H(eM<7$j`WFdy~}c`|5KU|pnny8K$Uyy5AUGg zR}=x%kh|ArmA8oj1F;PBtdLIzs}e!nTtv|kS7(OM$@{K+{;=#PKxa1xo$lAa!4~+S z(E)trs`dBp$m1|02G*cies%Pb(Y84dSZii6?c7jxaE+BVaB zsMTJ_0hHm2(U+p0e{r+EZDig3$|^mYVVovv%11w0#V>b^Ew3rR6~0dDd_Bkf&gM>7 zYgLp4F;3Bx`Eh3JQTWc)ej=_eVm0**YUMAp*HNs{htLzH&(#r-P7CJzs|S|R1@>bZhV>7F630Od+koW;m(GOFt}gI&Ks3;hYmmiXAg`kml(aDz9;8}S z(qB2>uiutxS*Z7gee&qm6ie_&Fijjd&THR*sfZBxUtqK7(wuA(uzfgkpKJcGMjyP zpCDw}3B=@%Y)IdTe=zHMWicwK{PiyC!G0J1ufv7c~Vv(j8+n1ytDHi{Z4bokf+mIytV+gy1Xd@>08NU^Z^R-e*m zMM(yNVi3QedrE&>BDrU6#8+L)8Cl5aJAsZHO?)G=VFl<q#KE*{AXlC2mNEHxs_+)yaZn<9g)mN*Aha5?`tG zAmFvn`LH12AQ8S9K_m=2Ne@^6S;m3SxhH;f3RX51*%B z({`SNt0|!WHWfI^$9u+#aw3ge`yPl~O6`xrE3k6r!Ax46B2m@Z4L7N2+Ehp9Y|PX3 zoj1)$8TU}k)Q^jS6cA?p=)o%U!f!dskyzpT`I!CLG%U?(zv048#7mW6K34qKg6zKH zrh3V+=O!+8V^Hct<;ByO5b{Gt6%I8pI@{C|H``H7@*Wh<50ziIpTvENCkxRZoMK^h zs4QZ|nMzI}2NVMQTFiU84Uc#_TwdEIzsfVuy;{Pw#cC-ojBB{Swr0#0XGWsH3GN8M z+v{S!*@bX58!k%wA9Ga;=rCeL93O~_oG4jr><5I%j6Hm^KaKy4o$X$CVn8)xdNhQD zMJKS+x>wwge$;xkyx-vmgdQ|tU8Y`s1<7h)dQ8f$O+|I#zaY8{=ZG`JeW?9#gu0S; zy;K$WemE8;0TBO?;hBD|ZfCkcY)ZXmWke_$Yxqqw(@%H78m9cwM2x5AhutJwa+X446Z&RvQCcSOPk>^&U68D z&j7q-bn4$0>?+Dm$0XKP6PTB^~s=?v6yX^GF zQ&jXQZAHQoyR`1^IKYsU3VIFOZ)e*NZG4ie>j)67joF;Gu#i9^SZauKEE4@|L@t*6LuoTB)@&&IzlzS&E z`;ZSwii$wjdWc+~O*HF&zT%gW5Nc+5i5S=ueLZ^zEATRj}5s*f~H zFYY+<)h5l{-0{-WqqJb!C%LX_UAV$kQ4s+D zfR=roJBlHLJ@vwUhY4@w;})Ax*y@+-&B*_V`mRz4z8jk%iPSjO}bK#6zzxrCWqXl50Qs5HHvNX6ZfBE%R)n0b!P}Tld*%>{cCBzd%>NLc(-k zdwGrJbKQM1A%!5f2YI%hAW5_xb>n+N8fp9);_q_1aH)QfM2i$&d&mD8*p|$Dys55j zV*?+K2_21~P*^if6P4+-4y;tuZC_I$cNr!Pwf5I9{Q;A9w$48ZK{%vzQtHd;DJ6Xd`ju!LQ?9v`ssplX@%Cc} zFLM|5pQK7G(F;0J?o(f2d#)}VRvN@P3)URZ9=ofjU2BkXkw8L;!cl30<&#R0X^CDOGOskc(}F zU&L=fBoe9+o75)&6~s-AHe`zZVT!%?s%?u^jVVrr4&s68@B`9R*^0Isw(+67t(Zy+ zUCet+$=GL$yj@S$>%2`YbU4+6r<*tQX=^m`}f3_f^eGR1C((r6x6 z`=bIsH$JL)StMyJU9I|5>`#b8@qmfC7AsO>2@{Gi-7Uvn^`Jgo8u?62BzU=4 zNlAwx>PNJ}k!{S9!&Q5^%1gQ*ev++@0%tkc{4ICncJh-y??bXYKVLq>!R~Oxnio5< z3_P1N)gzYNasxL%kKYI~n{j3S?VXdlO3S7d$7; zXtz&gF0-BWTVR<3ekhCD7lLf4cEb3ityM?u|bRVL= zw`qnSs?8*XE``&}k4)O)IuHbcp=}-ziu49616rPJjU2;GiGc{bp1WsvXj7>l=lmPa zfuE}iAF>|L3u~GWo0w_cU_zM1wA5PJr%N**o*GX~snhLVDlMM9tbF}o;CA{HwDGyN zO^D?U(}zpu@rGqj7aYyL&&2|6sZS~F!to~;7ObzkaAetbS5lDJq%nxQ^RAwTCB3a@ zHxE880G2D#C-~s9R-z9lC*w;T=HjiuRJB_PlRn{3s)6kepbH>~cEjY_l&SgcM)`+& zYm=Xx+AUkpcC>`Mj7D6xs+HXw3=OH%($a0Lw;;ZGdRRCc_pNw3RVNT~DHOd$HSU7KcpriOWW=wS88yYD4ByI=K3Y5o zVcjkR>qqsFp^XLzVVOB_f54S#*6r`YR1l`~mI@_yaKG`KxFs^vm5}}R+oRD6gp6Yct!G*?5}2SW7KQUirVw+spl1O?K~*c9 z3suG+GRTJLyVIl_MLf_(eN&JUkKyE@ZhDT-tKg7vzdFIji(lB#u$WP-E8_FXW{^7kPz?(@ zX*uNhrR0^JK;8DIU!dDU1SrQ`?(J6!#vg=*kD=Ql&uV3ucP!YZWFAu|vGJ2cB_s&W z0Bth1$>BXk6DaNaQCd5u9P8|c(2~ez-O7fI%^^O?2LQcdY{qC_xawov%K9AS-3#9*7=_OQdJ0_IMs# zmL$=v!)&S*x!xr#9{3j-3}-n3i#6JMO_h2V19=QJwEYFUCWs+Er%^r}i$A@D|JpX% ze!e_V zKGPC*Q3+AVc>1~4U(=uEmaBIuf5l?}Zod07*Z(mx0P#ELa8p|he2Nsm;*KUX9u7X; zZJKks5=fBHOJ5^jY~UvU^XJbO*T28zRZ>gT|5~p(GoPF<7kDKp%>t2+Cyo}I>!>@b z&Pg*El7Bu27#(&7s11NN_l6UBtH%AdcTbm^YkvOxnP?;c*_7uM)32~-Pn7%`3M(MB zHHWQtN(BfuX+?95ftxV+EIYYxBiX}{Qdz-oZ|`o*mSvO5=4@oQ-zJc&#L z;o%{ehehOuUJn627tR)mhv%Ps7!=2zBX1`6XT%)Uh>pIvmH4piL$+;>CkEa4u|_D~ zNJ-jXb@+Hx$89P$D}oJ%tJiQ$kmd##)3(eF?BLty2P=KjjO)9n`|+Byt??~ocsxUu zHyl<)#`FqedWIqDFcaG}Us&}Bz=u>N!u~tI0IIx_A&fw4y($*7o{Fm^d6t<~<&yGp z%HhZDC`x?GY-$gbp{6Kb@n8|6OkO=8uYL9v{~1*P!5_DuB};2M8i_solBvll^KCZT z%W9fA#Mk9daE^e%?? z^;<0(YWAI%hh#qvs@^@@m=Ga1l`J1EC`}iFvZ5x{jpL2##8^D@1Q33LIEj?w(n9O55!rC_T z%MD*?Mq>Y{eida^8s6({ckffj-?VFV{pvL$Td#xP{~N-tjU9 z*4hPjb=F_hvyo%BtlS6hIbI;LHu(|C<-ogdyY{sR`ef)6JZSUV)+;5~(j!yhRcYUw z-OsveSigJpu^;b|wN7vzWF(+YtSuO=>N{U*KYrL%7i%f_U0`GSI<7a)N7KFx3KPz3 z8+^^;_hlB*ER)K=6&H!%GD2nrW2|9$-}!im*$5DO+RzDdvCdQsKqm7v?ikchNaPE} z)u8HM5xq6P%&@P8hh9ebhP<(NE2#%HW|~I@kWuM6BL$iLxV+7~wY2KU{|YYC2hRJ~ z#a%eQRCfJrC?hjG7y`A)B}tlBRgF>)7@M@fJ>(IwvM<+k%Z&AS9)7A;WS z9X!GM)G%9ig@TY+!fDz0KR_n0lpd~!tC8Ez-+HEI&R*q)Pe&LnD}FzHm`*ao5Tu0b5Vmg<%Vb;>Y@Jf&iv&x;p+t6avI`L6%*&aii{Pv?~^FlB#Q z#<*q-8!}jamY5jy6@I3#4%SGkf}F=B4wvL{>Lv+J;wZ%?XlzcgKs)?Q7__{9Bg$v7VW%~$?k~rxf9e(H@Jnjhr-FA&rANE4YsI^@580nRixYl^w51bmP*Cb zW8;Eu!3mbLDSi$A-oriK*Pb`dK9v9>*h z_{L=xunv}~Rfe%Zm<2CyC96+~moX;Na7gq~PiRUaLSi3SDUsj}#X{j$#80m7_`^~O zA9c|&Xaj|)o>==ma*=FEENhff*&i6CM{B4D#Wv+yMTwtX)uZ5HB*QMR1vI8tg0^4N zEfM*|IE!8ciOi#G1XzQ>e<1iaO)`=1&042Ca5yc#?KZ;B*a;n-sbXP!G-SydMnz1= z#YKaJ;3_{55uRJj#Ya5)|UIE&u(U@Q7XVidkRlIg%^Ol8EkYcYon^`lM4P z+L+VVfA}q08Rp=(9jMZxovSH_*5AKYc$>H_%PF_~GIUlIFR^yuV_;PVR{tuDOD}u3^n-wmgW{9Uqm#XKtom@RI^&ts_Ry;_ z+1pFT+sE0jnOF!?8m9I4y$3|%_pPO$1@01)P9eeqcKcX{S>a>oMzjVsPslG`xah6A zG`i}CJ_3&?rx>YDp=Xtsq^J-gMxlyvVS5CoJ85&;gx?WuZZuJAW781&18rqZ4lo%7 z`qM}AI(a6l@Vx3Y2XX_R&eH;v?y(vA5 z>XEskh6Aufa5J;*A|lx@_{UnE{igvD;&fm#I)+Go39Qq>oc{M)$_BJr=xHbGVjV30 zB6A^KUnQ-NIYiZ0Sh1l556HlSfv2EkwmVz@)Isb8-FYMHbvxbogMy-p9zK^Xo4#X~aR7I#pP-1=yaa zGd68Sy74fx0BX7Za1@D1Q^vo`;g#Mqv2JZap!2uleM|l=g)`~|@QH4Rh?u_c^!0wR z@7Nz6{)18l%904LS*}3VCl~2agUz`Pad{N;AORq(56vNsq zYU}E)$OX)go|CtZmJ*={Ja2}`y!nShix#@)dsg`a`Xb>B2GoCaq!1+h z%88`7gAq(`tT{WqTCSvpDQzF97-st3T6;W61=y5nKlxEKL6wkhRjp#Ezml#&YEGP; zo65ttVRS3OWAdr$tmn-p^RT<)iacfUnVCh(>vyL{gpCU6GI zcn;vuIme>;ftP#NuyYp$s`bMBoB!Gvq+Tu}HI$#vr&!ZvSTrU~)P(|t>8bMTqkYd$ z_J9sfFh{EEe8`8xE$|t`PH!<5C)hek_`Og6;xfTXY`nLj`{F5SIFYcTv(T{1TeBqM za#@H_IL5KzxCGzy&9Qo+da~6Z=RGK}Yny1o@-!B7lBVlHMuvo4> zl2ESvuns&BOxtt0oOY;POXovbSo22&G+7Xg>A_ z%h(@3d2TMg@ipf1XsQj({+pO)wNh&^Te@WDd~TVJOAS*}`Ey3qy(w91)*rc_jEq&B zsg>xIG#6NS(4zM3bIBv+(=lb=#XS;E%Vg1Z*l$)LP9*6&WQA7V?{F23_=Mr>BiPXpL;h9)d{|yD#6F zvmm-b)~S+0P>91OSeierzHR#{n_ZJ|rlG44^Tz$AZnrY-)|tzp>CG={ z$@=BXjC|O(5cUhXlt-zbZeB88!t`R`UTFODp0w~F3dFWjSDItAuL)XK{^*bB=x_}2 z7<{iOfy7Tvk3A<1zwV+2oDZ^og^$!?%~;N?t?9^_`EVyw&AOIkLbzpFnaY}0)*3Cl zLWpfQTh~w%C3*f@9{8foW&*R_x$^B$gIWr^?P7e}`+?aDTDx|XOxr`Y*uqOtQGzso z2Fq8bztGp2XM-;VFcDFW&1iWbFZEe{)+ZOPgakKQ_1vv5Cj1sM$twit4E}H^$Pm6J z!y%LPDQVS9;Hx(Sb-hYtvakbo2Kfg!wiJ&>ba~(wPM#idXB|2ewM!SP$SAhwkaVbH z={QK&>RTt%0auhIV#n+W-VM<7`&A!zeXQKW;-8Va`*YY%nN`8Y$|^1FGMXq3t%p4L ztN5#PcOeAfmb;o2Rf)IOIPO{rz^hdrs@O_l_KQz5WSOA&?AWt$L-k zAuA(86X{DrR+U-lg70HPIfPD9{mGQaG|)oI5j7qBL%n6%`>@{ zrqx(C^0n5Ds}T~haSlyuRim*(UJxU@kVLs7F|;6}?8XwVHf zUTv5y=0h&%J+;QV8@k)SDdC7k=w2}GJ;`fAvPTPDESVWI3F>3Vr!JYys@m|WQxK-<%Gkru1Ik);<7W26))EF8!J9L&v-L|S+ z%d2<)+q)dQf41%QHwWy-A1id&PtHbeLSyWT?JfnqrR3%=7$SlK4yTeL+aDnInEQLW zR*@cd`cWUm^PUh+?Z-|L+nqx-nC@iauk^h&gUc7J67~f^SS1}*5Hv?=H=if?#fX}k z={ltXyTIsxKge?N=V+fIrftLfcKUFV*2Yd>wV`%?Dr~7`NT!sue~S_Qe)e1<@@YvXA8X&bu^1La59&Py zM|}?V`Xvo&PMbcr1%mBJQ6|}qOu~6Zu2-^t_9iQ3825F$&Ak-IR-RNJ6L7={q)zkd zQmT3p0Vc!li!@Bg)|EU)5FbRAbMDC`x>G%r%>7`d*avrTla+Ny-R+WaLU%+u&WQEgx{qQ6%ex57~tV` z{r9w!xpJfpr)?{nkE=7PqYFeb!7FExZNCQQGbWU(qRFhc(ZiOiGBokc)Qny1DzHnN zdIJ?4|Edo(>deMETqKMjN!#ei{<+3UzqdUQDlR&RhMDq$zByA>MOM#bH)lgx)U*pr zxDy?1<#p|W4TY!$^i*A!B2bZYk$0#RypijvmGl`@h(!5GuYH#mevL=iHv^V!H=A{h zKk28WqW5H#^O1CzeN>9}X1Yo7q+ZNzgPeaFc9XTIOkDIsn0u(SVqf``MhFw->P^LG z+|L+s-T(jv()0fUz|DiQ^0Aseo&6lHzM1!kZ?)EW)7i7jv=GuY!`|ybBZ|@BB#h{PN5PpbQp%5CUOqz(k9|^sF@Thes7=a z^ZgxezuRs9?e^a7{od>Oe!iZs=kxKn%X|vQIpbbnYz10rI4fZ@p85|F`{yjdUY7r7#r^~K zAAi3Jqx@Mvt)9I05`pra0#)?B=m8*R&M9Pw8F8X?HZ(e3U9{38^>92l?i0>$!Q$N0WpJA08e4ZHXY|Ya|KJ zv>kMZnsNkYn>qt9I@lveye-~f^^)7h0q3T0!ajX!VBkYFxNVh4D0(@~3}>MlZpT%k zGHJ1^8Cfg7*ka!WDk!%aa9JfMuDECFyKxPS zSPhm#Bqt?UlT%X+>I!4A_`PmVt$?z{S31NLi&yQb@)`wguF&p3jY&+E7!2#WqppVK z$8F_oeH6zNYL3omQBFb}ptGnjeOhY9tV>%D{qC>J3Xpx$&!Luq#I-!Rr6%+CjqWml`6x!O#4HMN;J>YafBSh_x_?#Dyx=+nPS@+JjZf>Qj|ZF`8>!Qg z*h(EQc@X?|@_3z3S_UfO$XoE5)srcE@80>0y@l5^g!(q`$+yNUIpd%>$JYwrE7|C? z=@jbQVRUdi=bJf`ZGQWguh4@zGU;dO-#5B#TL`P+X^;kod9a+`$W7o(RaS1>2?22r z4}>|nberkcZgttFUR_<9o;Teit3hlhm=g zLJnDT9lZl9d;v7yc+R=nQsQJIz(Ur5D`M26;ytXe2>#LYQf9^w9i*D=*%`(|R2CC~ z8OxFH%g9?`NVBmOIPDwdWIMOF$Z^%6!^q&1*-q{Yn_0V#g{X*8?SP=wH&%Ld+4!EB z_KD*?Gg<^YeZW^6ru*@!b+beeortP!r4Uhs*VKq$)pRvRRQ3Ai>|Jwwhp8UpkuyH7 zKGb+$w^qQuWd95iQ<&_Is0+WFOm`q%c_JCF5r+l8ksM*D{qfc(!|0`I;mga<3dZrv zd0g{v24=P^_eLDj3w!r<2VbL-9-z7Chou@ zT)adPM=zl9!(Rwt7GirW8`J*J^4d~5CUo5x4EH9^=XaN+p-P&ojH~C7Ttd5MDquM2 zY#vTxpzja`XK4Ro?sVYC2vzD^rfqa@+}C!N&t(Pd`_e%YtM~OEW^XSZ_POcpeOx`@ zr3hY$4*N8m%sKPUsMJk9$s9uP-#;95`{(0=Sn4CB-24FQ43}x7^w=%by3aVJI59Sf z95*!SE*QxVN5kQPHtd)0Znp;;VT&+ut2{V3Q1KdYn3m@jVt>A9sGA}ph=G98=A@5N z6L0B$-snzl(g21JEfEgw*&9#&QQRp59{tH(JJXV+-8*;gcraZ*VM+>5tmC_j0fQenS}Zd{>V#&UyQmqLk70mi|^_sUy{Oaw)x7w-OhY{oV;7G-n#_ z`aTW%>Xz*v;o7U!ZhIs5krX12aHFeh5-yz6QhZ$ux2T1kVH@Y00;=8A7^oq14;lk) z@4LVBN8;Bbb#IOHh^W7_(w$*v=kzFcOH2GdvMyV^3PabPK5%-{KDPVky`zh5bB&Yy zRJi1>sHS*&g#gFx`CqDQAuq%sJS6C^)B$Dym(I_T;KO{-!(Yp_0!yGC(^~!lg<5?Z zcGZnyOT!ZBA2-~lxNz+y+hTfbQ$MDeid^tFyL;(~zY2CgeM*OEbWr~UE+GrJ-ZLv$ zz_(Mi^(tRYiDP&K_>RBu01T=#A*6VmX8rJdPIAcvowq4;rQ3MJYA3~Pi>N>uBokUc0qk))V++_~K{>t&|z3+|C1wOYlal=~TBbVa}_2h>Yd)=c=dC zezS(uH4PCZl|Qu}&F#+wPZS!=d~M@yMo>Opt=Qd+anz=1RmFFHxs_0$m?v?qUn@|t z<*}F0sXF?D@h#K$V!4~j%|f|VP9?g!K}1?>&G7uBk=~+OspC#_OH))2HuIHzjH?gh z(GJDU#oZpa<7(J4{F&u7NUzqcYFaFhNr0V9QSO*a2(+A6Ap-mTo@pV>^b|aO!6A}= z;MDzAFFDPWjH>5EM(MVLsFwFLFJ$saKDW%fxxe|ZI)AYj!)r|0Lq^ z&1GPs8|nmkt9wP%ko$ZrE$-4y&Ghq3+K^Oti!Q^B_tm?LQi`j{8mW~voxaay7K8o| zj5nYs@3ihp87BIB{$m$I3}ZhJ9oiKw-gR^ynIBl%QgPuAp`WNGoJdu7e0bxoQ#79` zu<&ZbpG_h6a#r4qeO{C>xhp&}w#eFCQBxo4KALM4WjC91FXGrrDBMRWY=m+8PHC`z z>3olQICX9qUzWHGazuH6`&fZq{+0toH zTO+*KrCpnT|Mtmcx!?Rz{J#Xiu&C7XXNDuK}W{z<|-})7NGD zs6g}g@`2xS`5ttd(O|T85p*r}!}ZhG(_CO)e@8tZXgofUr+==?^<^hU#iaywCWa1B z4@RfI4{_$*OgvX7Gj|k7?mzx zF*(s*0KvH{K!=N z(L1`5T^Vd*7_&yoh~cQ``>+b7AFP~y;Eh+ic-MNdVSONbqJ+AvroL_-hF-KYT+b=1 z_3!#~R3cJs=Bwg9xZlt!cw2^JiYuPUDMr1X@j3E1qRZ_|yKgL+g@()CN+Op@=DbH( zu5?x+aA00pwgLIm3CqMzh@hr_8bekWdQn8F`w-3Z-@^F)a^>Sk3O=A8rSs~p5rtCaE&|-z2*nhJavsAMkUq62rd%M^a&P0 zGF_JyQ@Hx#>Oq2WZ@%peHrgqrFadN9pmz+Q4QmGrj%xYTUE#XH^z>~3?SqX>-am;N zW<^K}{I$4JVsohq5`XMGEoIAIQ714>ZkJ!_g<@ttUy$v8e<7B9)g3My%O|VNK(iGq z@TpgqeoWsE%pSb>w-VOUN~NzBUW#A1J3Bk-TB55Vo7zO2Y~?>kV5l107)iR`cUW}L zh=|&pO~ExBMwmuFu3OCgY?|HIJ$4t@P$PsI;D{0M#oIdZ{y~1WMLIl!ixw=1y%@tA3tx1j9aRom-sl59>E#6x;_PZbYb+4PF+s*L@M@{e+;i(nF`q#@r zrJqgtDHgxpm`7vBpAK^r>%Xhc(XXx|gHoLU+DM&B(4vFeYny2J*zjl9jkr8znF&YD z`shn?zNxd$f6qoC_EAOKe0O{E2>2~D+fufgzK(AGN+G=dwm#N;G@!t*(tJl2^>jkX z2y#Nfpk4>IjmCP*#BRemGVC*Y3$Dz725vyj3OT>> zrq%accg)L_vp6*WeP0l5Xvbja2YA2RLc89Nx8$WYqg2pE(;=Sr7UbCI=-o zeG(JyEPs{be(7va_xt%ke6%EHl<)2{2j|c_4}`+(^iB1L;-5E?9@Q9xZ={%?j?i{Mz1WUKJDpPJ`p~$bN}iobmrAl;UnXqmGyB@TG7Qx zYVjR3;1E1tAq2Yw;pSr)Cv=>7s~srRi^Mi#;spvk;;3}AfUJu;ynEiH|0Os}x|Oq= zab8LlzeOSv#`Qb!D!ax@Dwqo8LB5Gzb&^7XjZqSYdbK9!$${<%G3rQ zs=p2VesSu_IRp^waeh6{q3* zdlSdx8*qw^J#D1ddM;$0S&HT}ZaZxbfA!NBFF4W*L-0OM^H`%?9ZZ4U3S*8p|5y#C+ z&VPMU1H;XpzMSgPh zh`Y3E6Dd%L0&xcsUP+Vq_Tj~ol%=0_puD4^NXs@_JE49hYFVON|&8Li7MY?>;qhG&2w0~L^| z$i+ZvmuwCv38aJJZpyUNr%ua(`M|tjK5Ia(J4VYB;SxC^qIu$SW<>@j>p_TTgOC5L=vZ%`P0mZueE}tB^t_r$tNxu+qmEt9bKX)RbNwFjk@Igk6 z(1{%tMF#Q)0n_0#Hc2NuE0OYMr#`iGf{3n3NxHA#HNPVkw+^UO0AUNjP*l6`fxLi~ zS6p!I|4J5UW4F`|Rk&^sB}kbLC&=8Hc^S+dZW!eXq%Cp*6fWM|i%%QOINsN7TUpnv zMOd)T?9;GujsffKNbyM`?GI<_a#XoljKPU^Et;o`?zsM6jNj`&mRA3=ZjtI(V>-4#ofwiIX#{aO0H;yVoKo!PD=fMJ_Y z$x@IhBa=Wf6rEtX3;p>((Z1Zzk4geJ3wU+W@=ZM)<9@>)x}jxi zHwOAUPic5czrgw1qvZU_3X zVU?XEHbq6)XoLIvDMC2g19q2nUsGQoU-?rm1`EN#!Z})3tW*rqS>g1W&e40vtZ%Xu z8&b|lBJ9Zz|O8*l?O^d$@f>e`C&Ei-+Fh-vi+Xk3uK+~9C z{y}1^ZPHP+_2MWOsOvM^(%xGbE@Eb`krHNF>ikE&xIK%JtM0X(R?=5#UYE_hZ8!}E<> zoP7lzH8093p;v&w2}7M^_#sU_`rDHtn*HH*Ap>lC*aYaX*<%Qm!*-lA9v#V}hLe>T zK}7A(S^tHNa{Y*WHj0k?s`9SQJ$Xw>9M%6WQv+N#WS5jNdsju%Ku?UB(|-nw@pz6t zaM8VFA5y+vE1>}o#Zj92biMz_eIG7hJ zZpEDl<_q5BmSB-H5=TDUW_rdrOZpnrSwlu^NT<^^VBj?j(=#Bd?CkJ~gvKnL;{k!} z%;&rrpzQ1Y&lNnGQ#B5-x@AJ-7XM)jP03$5?!tuPCsh*ADN(S++`zoejEe+0JG8 z4eVt<|B*tvsB?Mg7>_XV70_UB=ru=FRFzP{Fis0HUP>+0-?NOudjl?RmlF8;!U@BN! zybbiC^UlP1FWSTtoE~}BRzsFGA`#2S>)AE%ky5Lx>kU26nv(OoY-mD~Y){Lr z49Q+HBxHYKF-?RMgwDGzswvlFzvV1qYj6#4YlHPvp;G}8oLo4<*7Qb}g%*!KG4+Ob@Vfp%qzTz#bxB#^R~-+ck7e(&4u-0G*w@s@*< z)}_iDl{n%6wg3x#s=~$mCay@R)?G<$cvybbe!Cgs})f2ZhxV zAJk)_>D}`=u#>eKuD~o-x2yI_s8I#}2Z~|#ZRK25oZ}0W3qT+nsEW=M}u*HL?&^^Gv34cW(hZKiq=T!$b)z^3;6<;S5QOo|_{z*6a^I66_e~8;}ot%S7vz zYX`e|g_UPoUcDm$Bf`S@RC*UAjP@Y%6&>yB*w%BWcWNf0P%3?v%R*Hgw|!olX#|l$ zi5H*w?Of}GhnmdHC$!pVGm$0Ky1_wy^N;?*_lyb&)&>J(?=_omJr*0z+v=;T>&dRh zg)f@c-w32!I924Ud1Bzyt0P4OZ2NM9nPo}5GPV3RkMLtimtAPyZA)x%ZeinrQSM;H zf&~LBp`rU#)|`{o#Fe_+O8ToRH5XrNu8(u7?YbRSIt9;lqfU>*l{$M-h^b zNCgghR6n(8eT-VwM6^;tY4+GTS}uy>Z(kp%38_UORPQy;*5b9hq&g?Ik;OXK*yYDQ zuGzfKk@9Rn*|%uLH?7;lfJ*(}dj{xLHJ~pJ%3s_U9HtE3&TTpf*L~T#b%Cj8U$5WX zMBU_!xVj7|qekYD_%L+VHn#ryCeuD?YHnZEuCGkrs&p4JU!hx`ILv7bEeB-*9U{-f zeQ|#e-}q$X^nAX(aKA(9!h9-_M}Z;qRFzO=tLnxTduWyk|AKIRtig;BMKmniNY)@F8M zeRdz)xk8xMeuJh&9_~Cmg|rf@1*7=;toAc@Cp7JaFZ>iE_~*=<%N+{!f1=A=oJfV9 zXP{7F%FXZM{TMF%`(gi9g$Ly<`LN*f9zkjS#nK}~6MBZO?o8v5t1Phdw{ddpJ{pG+ z*L%#hfgV=GDBt6t+&HE$z(bK`Gs{|c2~2?4_q!WQQ)nv)NyV@Gd4}W^9%DR{DPDQX znxSrAmM5IBDxu&|rapuJ5BR%7cBIYiM~H>z*LBHiYNDU3t2v&!aCBj`T3c0Ep;Q?=+9rf%m3l^ z{!c_7R4Z=dH2Ul3uX@L1{tNT{zrT#HIk;})>2Ci2cb&mVEKJzI=5bT#|M{=}jX8mG b=-%x*BkP1m;dZ Date: Wed, 10 May 2023 20:28:05 +0200 Subject: [PATCH 324/424] [docs] fix outdated tensor data ref (#35212) Signed-off-by: Max Pumperla --- doc/source/data/working-with-tensors.rst | 2 ++ python/ray/data/dataset.py | 3 +-- python/ray/data/iterator.py | 3 +-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/source/data/working-with-tensors.rst b/doc/source/data/working-with-tensors.rst index 1d136a297c1e..cd8fe7dde27f 100644 --- a/doc/source/data/working-with-tensors.rst +++ b/doc/source/data/working-with-tensors.rst @@ -103,6 +103,8 @@ Save tensor data in Parquet or Numpy files. Other formats aren't supported. For more information on saving data, read :ref:`Saving data `. +.. _transforming_variable_tensors: + Transforming variable-shape tensor data --------------------------------------- diff --git a/python/ray/data/dataset.py b/python/ray/data/dataset.py index 1d24cff5f383..0e55c2c98f7d 100644 --- a/python/ray/data/dataset.py +++ b/python/ray/data/dataset.py @@ -3211,8 +3211,7 @@ def to_tf( .. warning:: If your dataset contains ragged tensors, this method errors. To prevent - errors, resize tensors or - :ref:`disable tensor extension casting `. + errors, :ref:`resize your tensors `. Examples: >>> import ray diff --git a/python/ray/data/iterator.py b/python/ray/data/iterator.py index 79d73ac515f2..2252f2dd2a7d 100644 --- a/python/ray/data/iterator.py +++ b/python/ray/data/iterator.py @@ -632,8 +632,7 @@ def to_tf( .. warning:: If your dataset contains ragged tensors, this method errors. To prevent - errors, resize tensors or - :ref:`disable tensor extension casting `. + errors, :ref:`resize your tensors `. Examples: >>> import ray From 62a7933f3e58a0a50f2642f94aa8632c9915c583 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Wed, 10 May 2023 11:49:11 -0700 Subject: [PATCH 325/424] [data] [doc] Fix dataset images --- doc/source/data/images/dataset-arch.svg | 1 + ...tastream-loading-1.png => dataset-loading-1.png} | Bin .../images/{datastream-map.svg => dataset-map.svg} | 0 .../{datastream-read.svg => dataset-read.svg} | 0 .../{datastream-shuffle.svg => dataset-shuffle.svg} | 0 doc/source/data/images/dataset.svg | 1 + doc/source/data/images/datastream-arch.svg | 1 - doc/source/data/images/datastream.svg | 1 - 8 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 doc/source/data/images/dataset-arch.svg rename doc/source/data/images/{datastream-loading-1.png => dataset-loading-1.png} (100%) rename doc/source/data/images/{datastream-map.svg => dataset-map.svg} (100%) rename doc/source/data/images/{datastream-read.svg => dataset-read.svg} (100%) rename doc/source/data/images/{datastream-shuffle.svg => dataset-shuffle.svg} (100%) create mode 100644 doc/source/data/images/dataset.svg delete mode 100644 doc/source/data/images/datastream-arch.svg delete mode 100644 doc/source/data/images/datastream.svg diff --git a/doc/source/data/images/dataset-arch.svg b/doc/source/data/images/dataset-arch.svg new file mode 100644 index 000000000000..9f3bbea5596b --- /dev/null +++ b/doc/source/data/images/dataset-arch.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/doc/source/data/images/datastream-loading-1.png b/doc/source/data/images/dataset-loading-1.png similarity index 100% rename from doc/source/data/images/datastream-loading-1.png rename to doc/source/data/images/dataset-loading-1.png diff --git a/doc/source/data/images/datastream-map.svg b/doc/source/data/images/dataset-map.svg similarity index 100% rename from doc/source/data/images/datastream-map.svg rename to doc/source/data/images/dataset-map.svg diff --git a/doc/source/data/images/datastream-read.svg b/doc/source/data/images/dataset-read.svg similarity index 100% rename from doc/source/data/images/datastream-read.svg rename to doc/source/data/images/dataset-read.svg diff --git a/doc/source/data/images/datastream-shuffle.svg b/doc/source/data/images/dataset-shuffle.svg similarity index 100% rename from doc/source/data/images/datastream-shuffle.svg rename to doc/source/data/images/dataset-shuffle.svg diff --git a/doc/source/data/images/dataset.svg b/doc/source/data/images/dataset.svg new file mode 100644 index 000000000000..afd36e76bcc7 --- /dev/null +++ b/doc/source/data/images/dataset.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/doc/source/data/images/datastream-arch.svg b/doc/source/data/images/datastream-arch.svg deleted file mode 100644 index 757f93d1777c..000000000000 --- a/doc/source/data/images/datastream-arch.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/doc/source/data/images/datastream.svg b/doc/source/data/images/datastream.svg deleted file mode 100644 index a607ea98213e..000000000000 --- a/doc/source/data/images/datastream.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file From 410ac727197a1800aec19dd39979dea74b7e8f34 Mon Sep 17 00:00:00 2001 From: Avnish Narayan <38871737+avnishn@users.noreply.github.com> Date: Wed, 10 May 2023 12:30:04 -0700 Subject: [PATCH 326/424] [RLlib] Replace calls to socket in learner group for getting ip address with ray (#35218) Signed-off-by: Avnish --- rllib/core/learner/learner_group.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/rllib/core/learner/learner_group.py b/rllib/core/learner/learner_group.py index cbb6870a72fd..0ab6fb47f569 100644 --- a/rllib/core/learner/learner_group.py +++ b/rllib/core/learner/learner_group.py @@ -1,6 +1,5 @@ from collections import deque import pathlib -import socket from typing import ( Any, Callable, @@ -489,16 +488,15 @@ def load_state(self, path: str) -> None: self._learner.load_state(path) else: assert len(self._workers) == self._worker_manager.num_healthy_actors() - head_node_ip = socket.gethostbyname(socket.gethostname()) + head_node_ip = ray.util.get_node_ip_address() workers = self._worker_manager.healthy_actor_ids() def _load_state(w): # doing imports here since they might not be imported on the worker - import socket + import ray import tempfile - hostname = socket.gethostname() - worker_node_ip = socket.gethostbyname(hostname) + worker_node_ip = ray.util.get_node_ip_address() # if the worker is on the same node as the head, load the checkpoint # directly from the path otherwise sync the checkpoint from the head # to the worker and load it from there @@ -540,11 +538,9 @@ def _get_ip_address(_=None) -> str: The address of this process. """ - import socket + import ray - hostname = socket.gethostname() - - return socket.gethostbyname(hostname) + return ray.util.get_node_ip_address() def shutdown(self): """Shuts down the LearnerGroup.""" From 06b7690eba6b9f4813b59af10bb0e5cebc9148c2 Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Wed, 10 May 2023 12:33:36 -0700 Subject: [PATCH 327/424] [Data][CI] Mark `dataset_shuffle_sort_1tb` tests as unstable (#35203) `dataset_shuffle_sort_1tb` and its chaos variant have been flaky for some time. We don't have time to fix this right now, so I'm marking this as unstable. Signed-off-by: Balaji Veeramani --- release/release_tests.yaml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 40801cce1182..6727a7e28ffd 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -4304,7 +4304,7 @@ env: gce frequency: manual cluster: - # TODO(https://github.com/ray-project/ray/issues/34591) + # TODO(https://github.com/ray-project/ray/issues/34591) # Revert to the comment below once ^ closed. # cluster_env: app_config.yaml cluster_env: debug_app_config.yaml @@ -5636,6 +5636,8 @@ group: data-tests working_dir: nightly_tests + stable: false + frequency: nightly team: data cluster: @@ -5907,6 +5909,8 @@ group: data-tests working_dir: nightly_tests + stable: false + frequency: nightly team: data cluster: From 71c4bb59aaa170aae8c4e28238f4a9f0d508c4f4 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Wed, 10 May 2023 13:10:52 -0700 Subject: [PATCH 328/424] [data] Update tagline to datasets for ML (#35228) --- README.rst | 2 +- doc/source/data/data.rst | 6 +++--- doc/source/ray-overview/getting-started.md | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/README.rst b/README.rst index 3e2eaab5109f..f1ea3157c08d 100644 --- a/README.rst +++ b/README.rst @@ -23,7 +23,7 @@ Ray is a unified framework for scaling AI and Python applications. Ray consists Learn more about `Ray AIR`_ and its libraries: -- `Data`_: Distributed ML Preprocessing +- `Data`_: Scalable Datasets for ML - `Train`_: Distributed Training - `Tune`_: Scalable Hyperparameter Tuning - `RLlib`_: Scalable Reinforcement Learning diff --git a/doc/source/data/data.rst b/doc/source/data/data.rst index adf2569f79ac..cae879be7d3e 100644 --- a/doc/source/data/data.rst +++ b/doc/source/data/data.rst @@ -2,9 +2,9 @@ .. _data: -====================================== -Ray Data: Distributed ML Preprocessing -====================================== +================================== +Ray Data: Scalable Datasets for ML +================================== .. _data-intro: diff --git a/doc/source/ray-overview/getting-started.md b/doc/source/ray-overview/getting-started.md index 77366e7c7688..58e510702e0e 100644 --- a/doc/source/ray-overview/getting-started.md +++ b/doc/source/ray-overview/getting-started.md @@ -143,7 +143,7 @@ Learn more about Ray AIR Ray has a rich ecosystem of libraries and frameworks built on top of it. Simply click on the dropdowns below to see examples of our most popular libraries. -`````{dropdown} ray Data: Distributed ML Preprocessing +`````{dropdown} ray Data: Scalable Datasets for ML :animate: fade-in-slide-down Ray Data is the standard way to load and exchange data in Ray libraries and applications. From c8eb82fb188ad911c9664eb0aa5b0178f91b878a Mon Sep 17 00:00:00 2001 From: Peyton Murray Date: Wed, 10 May 2023 13:11:27 -0700 Subject: [PATCH 329/424] [Data] Improve notebook widget display (#34359) This PR aims to fix some outstanding issues with notebook ipywidget display. Changes error messaging for the ipywidgets soft dependency to include explicit instructions to Install/upgrade ipywigets as appropriate Restart the notebook (i.e. jupyter) server, without which widgets will not be properly displayed. I've also added a number of tests to ensure these decorators are working correctly. Switch from using _ipython_display_ to _repr_mimebundle_ for displaying reprs of DataParallelTrainer and Datastream objects. This change is motivated by the fact that when using _ipython_display_ to display widgets, it is the responsibility of the author of the _ipython_display_ method to identify the right repr to display depending on the display capabilities of the frontend. This introduces additional complexity, since IPython.get_ipython() is usually the way by which people detect whether they are running in a notebook, but the result of this function depends on the kernel being used and doesn't directly tell you about the display capabilities of the frontend. Instead, a better way to do this is to provide a variety of reprs (e.g. an ipywidget, a simple text repr, etc...) and let the frontend decide which one to display. This is what the _repr_mimebundle_ function is meant to do. --------- Signed-off-by: pdmurray Signed-off-by: amogkam Co-authored-by: amogkam --- python/ray/data/dataset.py | 52 +++++--- python/ray/tests/BUILD | 1 + python/ray/tests/test_widgets.py | 103 ++++++++++++++++ python/ray/train/data_parallel_trainer.py | 82 ++++++++----- python/ray/widgets/util.py | 112 ++++++++++++++---- .../ml/requirements_ml_docker.txt | 3 + 6 files changed, 280 insertions(+), 73 deletions(-) create mode 100644 python/ray/tests/test_widgets.py diff --git a/python/ray/data/dataset.py b/python/ray/data/dataset.py index 0e55c2c98f7d..c38ab2c719c7 100644 --- a/python/ray/data/dataset.py +++ b/python/ray/data/dataset.py @@ -24,9 +24,10 @@ import numpy as np import ray +from ray._private.thirdparty.tabulate.tabulate import tabulate +from ray._private.usage import usage_lib from ray.air.util.tensor_extensions.utils import _create_possibly_ragged_ndarray import ray.cloudpickle as pickle -from ray._private.usage import usage_lib from ray.air.constants import TENSOR_COLUMN_NAME from ray.air.util.data_batch_conversion import BlockFormat from ray.data._internal.logical.operators.all_to_all_operator import ( @@ -133,7 +134,10 @@ from ray.util.annotations import DeveloperAPI, PublicAPI, Deprecated from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy from ray.widgets import Template -from ray.widgets.util import ensure_notebook_deps, fallback_if_colab +from ray.widgets.util import ( + ensure_ipywidgets_dep, + repr_fallback_if_colab, +) if sys.version_info >= (3, 8): from typing import Literal @@ -4214,26 +4218,38 @@ def _aggregate_result(self, result: Union[Tuple, Mapping]) -> U: else: return result - @ensure_notebook_deps( - ["ipywidgets", "8"], - ) - @fallback_if_colab - def _ipython_display_(self): - from ipywidgets import HTML, VBox, Layout - from IPython.display import display + @ensure_ipywidgets_dep("8") + @repr_fallback_if_colab + def _repr_mimebundle_(self, **kwargs): + """Return a mimebundle with an ipywidget repr and a simple text repr. - title = HTML(f"

    {self.__class__.__name__}

    ") - tab = self._tab_repr_() + Depending on the frontend where the data is being displayed, + different mimetypes will be used from this bundle. + See https://ipython.readthedocs.io/en/stable/config/integrating.html + for information about this method, and + https://ipywidgets.readthedocs.io/en/latest/embedding.html + for more information about the jupyter widget mimetype. + + Returns: + A mimebundle containing an ipywidget repr and a simple text repr. + """ + import ipywidgets - if tab: - display(VBox([title, tab], layout=Layout(width="100%"))) + title = ipywidgets.HTML(f"

    {self.__class__.__name__}

    ") + tab = self._tab_repr_() + widget = ipywidgets.VBox([title, tab], layout=ipywidgets.Layout(width="100%")) + + # Get the widget mime bundle, but replace the plaintext + # with the Datastream repr + bundle = widget._repr_mimebundle_(**kwargs) + bundle.update( + { + "text/plain": repr(self), + } + ) + return bundle - @ensure_notebook_deps( - ["tabulate", None], - ["ipywidgets", "8"], - ) def _tab_repr_(self): - from ray._private.thirdparty.tabulate.tabulate import tabulate from ipywidgets import Tab, HTML metadata = { diff --git a/python/ray/tests/BUILD b/python/ray/tests/BUILD index 34c239ceb9c7..7b483064b550 100644 --- a/python/ray/tests/BUILD +++ b/python/ray/tests/BUILD @@ -204,6 +204,7 @@ py_test_module_list( "test_top_level_api.py", "test_unhandled_error.py", "test_utils.py", + "test_widgets.py", ], size = "small", tags = ["exclusive", "small_size_python_tests", "team:core"], diff --git a/python/ray/tests/test_widgets.py b/python/ray/tests/test_widgets.py new file mode 100644 index 000000000000..ad95d2b3c9e4 --- /dev/null +++ b/python/ray/tests/test_widgets.py @@ -0,0 +1,103 @@ +from unittest import mock + +import pytest +from ray.widgets.util import ensure_notebook_deps, repr_fallback_if_colab + + +@mock.patch("importlib.import_module") +def test_ensure_notebook_dep_missing(mock_import_module, caplog): + """Test that missing notebook dependencies trigger a warning.""" + + class MockDep: + __version__ = "8.0.0" + + def raise_import_error(*args): + raise ImportError + + mock_import_module.return_value = MockDep() + mock_import_module.side_effect = raise_import_error + + class DummyObject: + @ensure_notebook_deps(["somedep", "8"]) + def dummy_ipython_display(self): + return + + DummyObject().dummy_ipython_display() + + assert "Missing packages:" in caplog.records[-1].msg + + +@mock.patch("importlib.import_module") +def test_ensure_notebook_dep_outdated(mock_import_module, caplog): + """Test that outdated notebook dependencies trigger a warning.""" + + class MockDep: + __version__ = "7.0.0" + + mock_import_module.return_value = MockDep() + + class DummyObject: + @ensure_notebook_deps(["somedep", "8"]) + def dummy_ipython_display(): + return + + DummyObject().dummy_ipython_display() + + assert "Outdated packages:" in caplog.records[-1].msg + + +@mock.patch("importlib.import_module") +def test_ensure_notebook_valid(mock_import_module, caplog): + """Test that valid notebook dependencies don't trigger a warning.""" + + class MockDep: + __version__ = "8.0.0" + + mock_import_module.return_value = MockDep() + + class DummyObject: + @ensure_notebook_deps(["somedep", "8"]) + def dummy_ipython_display(self): + return + + DummyObject().dummy_ipython_display() + + assert len(caplog.records) == 0 + + +@pytest.mark.parametrize( + "kernel", + [ + ("google.colab.kernel"), + ("normal.ipython.kernel"), + ], +) +def test_repr_fallback_if_colab(kernel): + """Test that the mimebundle is correctly stripped if run in google colab.""" + pytest.importorskip("IPython", reason="IPython is not installed.") + with mock.patch("IPython.get_ipython") as mock_get_ipython: + mock_get_ipython.return_value = kernel + + class DummyObject: + @repr_fallback_if_colab + def _repr_mimebundle_(self, **kwargs): + return { + "fancy/mimetype": "A fancy repr", + "text/plain": "A simple repr", + } + + obj = DummyObject() + result = obj._repr_mimebundle_() + + assert "text/plain" in result + if "google.colab" in kernel: + assert len(result) == 1 + else: + assert len(result) == 2 + assert "fancy/mimetype" + + +if __name__ == "__main__": + import sys + + sys.exit(pytest.main(["-v", __file__])) diff --git a/python/ray/train/data_parallel_trainer.py b/python/ray/train/data_parallel_trainer.py index eba45c82c505..f6aa6df202e9 100644 --- a/python/ray/train/data_parallel_trainer.py +++ b/python/ray/train/data_parallel_trainer.py @@ -21,7 +21,7 @@ from ray.train.trainer import BaseTrainer, GenDataset from ray.util.annotations import DeveloperAPI from ray.widgets import Template -from ray.widgets.util import ensure_notebook_deps, fallback_if_colab +from ray.widgets.util import ensure_ipywidgets_dep, repr_fallback_if_colab if TYPE_CHECKING: from ray.data.preprocessor import Preprocessor @@ -443,40 +443,61 @@ def get_dataset_config(self) -> Dict[str, DatasetConfig]: """ return self._dataset_config.copy() - @ensure_notebook_deps( - ["tabulate", None], - ["ipywidgets", "8"], - ) - @fallback_if_colab - def _ipython_display_(self): + @ensure_ipywidgets_dep("8") + @repr_fallback_if_colab + def _repr_mimebundle_(self, **kwargs): + """Return a mimebundle with an ipywidget repr and a simple text repr. + + Depending on the frontend where the data is being displayed, + different mimetypes will be used from this bundle. + See https://ipython.readthedocs.io/en/stable/config/integrating.html + for information about this method, and + https://ipywidgets.readthedocs.io/en/latest/embedding.html + for more information about the jupyter widget mimetype. + + Returns: + A mimebundle containing an ipywidget repr and a simple text repr. + """ from ipywidgets import HTML, VBox, Tab, Layout - from IPython.display import display title = HTML(f"

    {self.__class__.__name__}

    ") - children = [ - self._datasets_repr_() if self.datasets else None, - HTML(self._dataset_config_repr_html_()) if self._dataset_config else None, - HTML(self._train_loop_config_repr_html_()) - if self._train_loop_config - else None, - HTML(self.scaling_config._repr_html_()) if self.scaling_config else None, - HTML(self.run_config._repr_html_()) if self.run_config else None, - HTML(self._backend_config._repr_html_()) if self._backend_config else None, - ] - - tab = Tab( - children, - titles=[ - "Datasets", - "Dataset Config", - "Train Loop Config", - "Scaling Config", - "Run Config", - "Backend Config", - ], + children = [] + titles = [] + + if self.datasets: + children.append(self._datasets_repr_()) + titles.append("Datasets") + + if self._dataset_config: + children.append(HTML(self._dataset_config_repr_html_())) + titles.append("Dataset Config") + + if self._train_loop_config: + children.append(HTML(self._train_loop_config_repr_html_())) + titles.append("Train Loop Config") + + if self.scaling_config: + children.append(HTML(self.scaling_config._repr_html_())) + titles.append("Scaling Config") + + if self.run_config: + children.append(HTML(self.run_config._repr_html_())) + titles.append("Run Config") + + if self._backend_config: + children.append(HTML(self._backend_config._repr_html_())) + titles.append("Backend Config") + + tab = Tab(children, titles=titles) + widget = VBox([title, tab], layout=Layout(width="100%")) + bundle = widget._repr_mimebundle_(**kwargs) + bundle.update( + { + "text/plain": repr(self), + } ) - display(VBox([title, tab], layout=Layout(width="100%"))) + return bundle def _train_loop_config_repr_html_(self) -> str: if self._train_loop_config: @@ -514,7 +535,6 @@ def _dataset_config_repr_html_(self) -> str: return Template("rendered_html_common.html.j2").render(content=content) - @ensure_notebook_deps(["ipywidgets", "8"]) def _datasets_repr_(self) -> str: from ipywidgets import HTML, VBox, Layout diff --git a/python/ray/widgets/util.py b/python/ray/widgets/util.py index ebf97f66b014..6991384779f2 100644 --- a/python/ray/widgets/util.py +++ b/python/ray/widgets/util.py @@ -73,10 +73,10 @@ def ensure_notebook_deps( ) -> Callable[[F], F]: """Generate a decorator which checks for soft dependencies. - This decorator is meant to wrap _ipython_display_. If the dependency is not found, + This decorator is meant to wrap repr methods. If the dependency is not found, or a version is specified here and the version of the package is older than the - specified version, the wrapped function is not executed and None is returned. If - the dependency is missing or the version is old, a log message is displayed. + specified version, the original repr is used. + If the dependency is missing or the version is old, a log message is displayed. Args: *deps: Iterable of (dependency name, min version (optional)) @@ -90,18 +90,56 @@ def ensure_notebook_deps( def wrapper(func: F) -> F: @wraps(func) - def wrapped(*args, **kwargs): + def wrapped(self, *args, **kwargs): if _has_missing(*deps, message=missing_message) or _has_outdated( *deps, message=outdated_message ): - return None - return func(*args, **kwargs) + # Fallback to plaintext repr if dependencies are missing. + return {"text/plain": repr(self)} + return func(self, *args, **kwargs) return wrapped return wrapper +@DeveloperAPI +def ensure_ipywidgets_dep(version: str) -> Callable[[F], F]: + """Generate a decorator which checks for a soft ipywidgets dependency. + + This is a convencience function separate from `ensure_notebook_deps` because + of its custom missing and outdated messages, which suggest the user restart the + notebook server after installation/upgrade. + + Args: + version: Version of ipywidgets required. + + Returns: + Wrapped function. Guaranteed to be safe against the specified ipywidgets + version. + """ + text = ( + "Run `pip install {}ipywidgets`, then restart " + "the notebook server for rich notebook output." + ) + + if in_notebook(): + return ensure_notebook_deps( + ["ipywidgets", version], + missing_message=text.format(""), + outdated_message=text.format("-U "), + ) + else: + # If not in a notebook, then immediately short-circuit. + # We do not log has_missing or has_outdated messages if not in a notebook + # setting. + def dummy_decorator(func): + # Return the original function without any changes. + return func + + return dummy_decorator + + def _has_missing( *deps: Iterable[Union[str, Optional[str]]], message: Optional[str] = None ): @@ -151,28 +189,48 @@ def _has_outdated( if not message: message = f"Run `pip install -U {install_str}` for rich notebook output." - # stacklevel=3: First level is this function, then ensure_notebook_deps, then - # the actual function affected. - logger.warning(f"Outdated packages:\n{outdated_str}\n{message}", stacklevel=3) + if sys.version_info < (3, 8): + logger.warning(f"Outdated packages:\n{outdated_str}\n{message}") + else: + # stacklevel=3: First level is this function, then ensure_notebook_deps, + # then the actual function affected. + logger.warning( + f"Outdated packages:\n{outdated_str}\n{message}", stacklevel=3 + ) return outdated @DeveloperAPI -def fallback_if_colab(func: F) -> Callable[[F], F]: +def repr_fallback_if_colab(func: F) -> Callable[[F], F]: + """Decorator which strips rich notebook output from mimebundles if run in colab. + + See https://github.com/googlecolab/colabtools/issues/60 for more information about + the status of this issue. + + Args: + func: Function to wrap; must be a _repr_mimebundle_ method. + + Returns: + A function that returns the usual _repr_mimebundle_ unless it is run in + google colab, in which case it returns a mimebundle that only contains a + single text/plain mimetype, preventing rich notebook integration in colab. + """ try: - ipython = get_ipython() - except NameError: + import IPython + + ipython = IPython.get_ipython() + except (ModuleNotFoundError, ValueError): ipython = None @wraps(func) - def wrapped(self, *args, **kwargs): - if ipython and "google.colab" not in str(ipython): - return func(self, *args, **kwargs) - elif hasattr(self, "__repr__"): - return print(self.__repr__(*args, **kwargs)) - else: - return None + def wrapped(*args, **kwargs): + result = func(*args, **kwargs) + if ipython and "google.colab" in str(ipython): + if isinstance(result, dict) and "text/plain" in result: + return {"text/plain": result["text/plain"]} + + return result return wrapped @@ -181,8 +239,14 @@ def wrapped(self, *args, **kwargs): def in_notebook() -> bool: """Return whether we are in a Jupyter notebook.""" try: - class_name = get_ipython().__class__.__name__ - is_notebook = True if "Terminal" not in class_name else False - except NameError: - is_notebook = False - return is_notebook + import IPython + + shell = IPython.get_ipython().__class__.__name__ + if shell == "ZMQInteractiveShell": + return True # Jupyter notebook or qtconsole + elif shell == "TerminalInteractiveShell": + return False # Terminal running IPython + else: + return False # Other type + except (ModuleNotFoundError, NameError, ValueError): + return False diff --git a/python/requirements/ml/requirements_ml_docker.txt b/python/requirements/ml/requirements_ml_docker.txt index 6dd8d1532ed1..3a027b9b4869 100644 --- a/python/requirements/ml/requirements_ml_docker.txt +++ b/python/requirements/ml/requirements_ml_docker.txt @@ -1,5 +1,8 @@ ipython +# Needed for rich visualization for Ray Train and Ray Data. +ipywidgets>=8 + # Needed for Ray Client error message serialization/deserialization. tblib From b16eec4e5e48658735542d83ecf22866cd5674ab Mon Sep 17 00:00:00 2001 From: Yunxuan Xiao Date: Wed, 10 May 2023 13:56:56 -0700 Subject: [PATCH 330/424] [CI/air] Fix lightning_gpu_tune_.* release test (#35193) Temporarily fix the release tests fails described in #35187. TODO: Come up with a holistic solution for metric dict flattening. Signed-off-by: woshiyyya --- .../lightning_tests/workloads/lightning_test_utils.py | 7 +++++-- release/lightning_tests/workloads/test_trainer.py | 4 ++-- release/lightning_tests/workloads/test_tuner.py | 10 +++++----- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/release/lightning_tests/workloads/lightning_test_utils.py b/release/lightning_tests/workloads/lightning_test_utils.py index 885954b1e5b9..150e2bc3e23a 100644 --- a/release/lightning_tests/workloads/lightning_test_utils.py +++ b/release/lightning_tests/workloads/lightning_test_utils.py @@ -39,8 +39,11 @@ def validation_step(self, val_batch, batch_idx): def validation_epoch_end(self, outputs): avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean() avg_acc = torch.stack([x["val_accuracy"] for x in outputs]).mean() - self.log("ptl/val_loss", avg_loss, sync_dist=True) - self.log("ptl/val_accuracy", avg_acc, sync_dist=True) + + # TODO(yunxuanx): change this back to ptl/val_loss after + # we resolved the metric unpacking issue + self.log("val_loss", avg_loss, sync_dist=True) + self.log("val_accuracy", avg_acc, sync_dist=True) def configure_optimizers(self): optimizer = torch.optim.Adam(self.parameters(), lr=self.lr) diff --git a/release/lightning_tests/workloads/test_trainer.py b/release/lightning_tests/workloads/test_trainer.py index 845ff2b4c5e8..117f6ee85ab9 100644 --- a/release/lightning_tests/workloads/test_trainer.py +++ b/release/lightning_tests/workloads/test_trainer.py @@ -23,7 +23,7 @@ logger=CSVLogger("logs", name="my_exp_name"), ) .fit_params(datamodule=MNISTDataModule(batch_size=128)) - .checkpointing(monitor="ptl/val_accuracy", mode="max", save_last=True) + .checkpointing(monitor="val_accuracy", mode="max", save_last=True) .build() ) @@ -41,7 +41,7 @@ taken = time.time() - start result = { "time_taken": taken, - "ptl/val_accuracy": result.metrics["ptl/val_accuracy"], + "val_accuracy": result.metrics["val_accuracy"], } test_output_json = os.environ.get( "TEST_OUTPUT_JSON", "/tmp/lightning_trainer_test.json" diff --git a/release/lightning_tests/workloads/test_tuner.py b/release/lightning_tests/workloads/test_tuner.py index cc6c6b79d0ab..36ff2b257dc4 100644 --- a/release/lightning_tests/workloads/test_tuner.py +++ b/release/lightning_tests/workloads/test_tuner.py @@ -29,7 +29,7 @@ logger=CSVLogger("logs", name="my_exp_name"), ) .fit_params(datamodule=MNISTDataModule(batch_size=200)) - .checkpointing(monitor="ptl/val_accuracy", mode="max") + .checkpointing(monitor="val_accuracy", mode="max") .build() ) @@ -57,12 +57,12 @@ verbose=2, checkpoint_config=CheckpointConfig( num_to_keep=2, - checkpoint_score_attribute="ptl/val_accuracy", + checkpoint_score_attribute="val_accuracy", checkpoint_score_order="max", ), ), tune_config=tune.TuneConfig( - metric="ptl/val_accuracy", + metric="val_accuracy", mode="max", num_samples=2, scheduler=PopulationBasedTraining( @@ -73,7 +73,7 @@ ), ) results = tuner.fit() - best_result = results.get_best_result(metric="ptl/val_accuracy", mode="max") + best_result = results.get_best_result(metric="val_accuracy", mode="max") best_result assert len(results.errors) == 0 @@ -83,7 +83,7 @@ # Report experiment results result = { "time_taken": taken, - "ptl/val_accuracy": best_result.metrics["ptl/val_accuracy"], + "val_accuracy": best_result.metrics["val_accuracy"], } test_output_json = os.environ.get( From e5fe6539c7cf3fb5316f73fbc010ff4d27f8ae1e Mon Sep 17 00:00:00 2001 From: Yunxuan Xiao Date: Wed, 10 May 2023 14:27:48 -0700 Subject: [PATCH 331/424] [Doc] Correctly Render the Enumerate Numbers in `convert_torch_code_to_ray_air` (#35224) Signed-off-by: woshiyyya --- ...ert_existing_pytorch_code_to_ray_air.ipynb | 70 ++++++++++--------- 1 file changed, 36 insertions(+), 34 deletions(-) diff --git a/doc/source/ray-air/examples/convert_existing_pytorch_code_to_ray_air.ipynb b/doc/source/ray-air/examples/convert_existing_pytorch_code_to_ray_air.ipynb index 4465734badef..1e8e6734b67c 100644 --- a/doc/source/ray-air/examples/convert_existing_pytorch_code_to_ray_air.ipynb +++ b/doc/source/ray-air/examples/convert_existing_pytorch_code_to_ray_air.ipynb @@ -559,6 +559,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "id": "abe8e708", "metadata": {}, @@ -576,62 +577,63 @@ "\n", "1. We import Ray Train and Ray AIR Session:\n", "\n", - "```python\n", - "import ray.train as train\n", - "from ray.air import session\n", - "```\n", + " ```python\n", + " import ray.train as train\n", + " from ray.air import session\n", + " ```\n", "\n", "\n", "2. We use a `config` dict to configure some hyperparameters (this is not strictly needed but good practice, especially if you want to o hyperparameter tuning later):\n", "\n", - "```python\n", - "def train_func(config: dict):\n", - " batch_size = config[\"batch_size\"]\n", - " lr = config[\"lr\"]\n", - " epochs = config[\"epochs\"]\n", - "```\n", + " ```python\n", + " def train_func(config: dict):\n", + " batch_size = config[\"batch_size\"]\n", + " lr = config[\"lr\"]\n", + " epochs = config[\"epochs\"]\n", + " ```\n", "\n", "3. We dynamically adjust the worker batch size according to the number of workers:\n", "\n", - "```python\n", - " batch_size_per_worker = batch_size // session.get_world_size()\n", - "```\n", + " ```python\n", + " batch_size_per_worker = batch_size // session.get_world_size()\n", + " ```\n", "\n", "4. We prepare the data loader for distributed data sharding:\n", "\n", - "```python\n", - " train_dataloader = train.torch.prepare_data_loader(train_dataloader)\n", - " test_dataloader = train.torch.prepare_data_loader(test_dataloader)\n", - "```\n", + " ```python\n", + " train_dataloader = train.torch.prepare_data_loader(train_dataloader)\n", + " test_dataloader = train.torch.prepare_data_loader(test_dataloader)\n", + " ```\n", "\n", "5. We prepare the model for distributed gradient updates:\n", "\n", - "```python\n", - " model = train.torch.prepare_model(model)\n", - "```\n", - "\n", - "Note that `train.torch.prepare_model()` also automatically takes care of setting up devices (e.g. GPU training) - so we can get rid of those lines in our current code!\n", - "\n", + " ```python\n", + " model = train.torch.prepare_model(model)\n", + " ```\n", + " :::{note}\n", + " Note that `train.torch.prepare_model()` also automatically takes care of setting up devices (e.g. GPU training) - so we can get rid of those lines in our current code!\n", + " :::\n", "\n", "6. We capture the validation loss and report it to Ray train:\n", "\n", - "```python\n", - " test_loss = test(test_dataloader, model, loss_fn)\n", - " session.report(dict(loss=test_loss))\n", - "```\n", + " ```python\n", + " test_loss = test(test_dataloader, model, loss_fn)\n", + " session.report(dict(loss=test_loss))\n", + " ```\n", "\n", "7. In the `train_epoch()` and `test_epoch()` functions we divide the `size` by the world size:\n", "\n", - "```python\n", - " size = len(dataloader.dataset) // session.get_world_size() # Divide by word size\n", - "```\n", + " ```python\n", + " # Divide by word size\n", + " size = len(dataloader.dataset) // session.get_world_size()\n", + " ```\n", "\n", "8. In the `train_epoch()` function we can get rid of the device mapping. Ray Train does this for us:\n", "\n", - "```python\n", - " # We don't need this anymore! Ray Train does this automatically:\n", - " # X, y = X.to(device), y.to(device) \n", - "```\n", + " ```python\n", + " # We don't need this anymore! Ray Train does this automatically:\n", + " # X, y = X.to(device), y.to(device) \n", + " ```\n", "\n", "That's it - you need less than 10 lines of Ray Train-specific code and can otherwise continue to use your original code.\n", "\n", From ca40a9e73f5cad22345561e0db10cfc91157e246 Mon Sep 17 00:00:00 2001 From: Amog Kamsetty Date: Wed, 10 May 2023 14:53:23 -0700 Subject: [PATCH 332/424] [Data] Clarify `map` slow warning (#35204) --- python/ray/data/dataset.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/ray/data/dataset.py b/python/ray/data/dataset.py index c38ab2c719c7..16d4e2f7b4ef 100644 --- a/python/ray/data/dataset.py +++ b/python/ray/data/dataset.py @@ -4351,7 +4351,8 @@ def _warn_slow(self): if ray.util.log_once("dataset_slow_warned"): logger.warning( "The `map`, `flat_map`, and `filter` operations are unvectorized and " - "can be very slow. Consider using `.map_batches()` instead." + "can be very slow. If you're using a vectorized transformation, " + "consider using `.map_batches()` instead." ) def _synchronize_progress_bar(self): From c5b1e809900c1822b190444a50c2d8f10e46a995 Mon Sep 17 00:00:00 2001 From: Cade Daniel Date: Wed, 10 May 2023 15:54:26 -0700 Subject: [PATCH 333/424] [Release tests] Moving Ray Data bulk ingest test ownership team to Data (#35238) --- release/release_tests.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 6727a7e28ffd..5d5720b0ada7 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -699,7 +699,7 @@ jailed: true frequency: nightly - team: core + team: data cluster: cluster_env: app_config_oom.yaml cluster_compute: compute_cpu_16.yaml @@ -726,7 +726,7 @@ jailed: true frequency: nightly - team: core + team: data cluster: cluster_env: app_config_oom.yaml cluster_compute: compute_cpu_16.yaml @@ -753,7 +753,7 @@ jailed: true frequency: nightly - team: core + team: data cluster: cluster_env: app_config_oom.yaml cluster_compute: compute_cpu_16_worker_nodes_2.yaml From 3a81c878c1da988b002a586a534f76857be99b7d Mon Sep 17 00:00:00 2001 From: Yi Cheng <74173148+iycheng@users.noreply.github.com> Date: Wed, 10 May 2023 20:18:22 -0700 Subject: [PATCH 334/424] [core] Reduce self alive check from 60s to 5s. (#34992) This PR reduce the liveness check from 60s to 5s. It also fixed a bug in the old code where if the server is restarted locally, it'll mark the current one as unhealthy incorrectly because the set is not multi set. One example, - If the head node restart in-place - when it started, it'll mark the old raylet as dead - in the liveness check endpoint here, it'll mark the raylet as dead because they share the same ip+port. - then the head node will exit Node id should be used for this check in the future for simplicity but the correctness is ok, given: No two raylets can start at the same address (ip+port). --- ci/ci.sh | 2 +- python/ray/tests/test_gcs_ha_e2e.py | 12 +++++++++++- src/ray/common/ray_config_def.h | 2 +- src/ray/gcs/gcs_server/gcs_node_manager.h | 3 ++- src/ray/raylet/node_manager.cc | 2 +- 5 files changed, 16 insertions(+), 5 deletions(-) diff --git a/ci/ci.sh b/ci/ci.sh index 7b93708078b9..6c67efa25701 100755 --- a/ci/ci.sh +++ b/ci/ci.sh @@ -154,7 +154,7 @@ prepare_docker() { EXPOSE 8000 EXPOSE 10001 RUN pip install /${wheel}[serve] - RUN sudo apt update && sudo apt install curl -y + RUN (sudo apt update || true) && sudo apt install curl -y " > $tmp_dir/Dockerfile pushd $tmp_dir diff --git a/python/ray/tests/test_gcs_ha_e2e.py b/python/ray/tests/test_gcs_ha_e2e.py index bbd1b22eae72..53c57167a1d2 100644 --- a/python/ray/tests/test_gcs_ha_e2e.py +++ b/python/ray/tests/test_gcs_ha_e2e.py @@ -3,7 +3,7 @@ import threading from time import sleep from ray._private.test_utils import wait_for_condition -from pytest_docker_tools import container, fetch, network +from pytest_docker_tools import container, fetch, network, volume from pytest_docker_tools import wrappers from http.client import HTTPConnection @@ -59,6 +59,11 @@ def client(self): port = self.ports["8000/tcp"][0] return HTTPConnection(f"localhost:{port}") + def print_logs(self): + for (name, content) in self.get_files("/tmp"): + print(f"===== log start: {name} ====") + print(content.decode()) + gcs_network = network(driver="bridge") @@ -70,6 +75,9 @@ def client(self): command=("redis-server --save 60 1 --loglevel" " warning"), ) +head_node_vol = volume() +worker_node_vol = volume() + head_node = container( image="ray_ci:v1", name="gcs", @@ -86,6 +94,7 @@ def client(self): "--node-manager-port", "9379", ], + volumes={"{head_node_vol.name}": {"bind": "/tmp", "mode": "rw"}}, environment={"RAY_REDIS_ADDRESS": "{redis.ips.primary}:6379"}, wrapper_class=Container, ports={ @@ -110,6 +119,7 @@ def client(self): "--node-manager-port", "9379", ], + volumes={"{worker_node_vol.name}": {"bind": "/tmp", "mode": "rw"}}, environment={"RAY_REDIS_ADDRESS": "{redis.ips.primary}:6379"}, wrapper_class=Container, ports={ diff --git a/src/ray/common/ray_config_def.h b/src/ray/common/ray_config_def.h index 0188646292cc..d5202917e02b 100644 --- a/src/ray/common/ray_config_def.h +++ b/src/ray/common/ray_config_def.h @@ -800,7 +800,7 @@ RAY_CONFIG(bool, kill_idle_workers_of_terminated_job, true) RAY_CONFIG(std::vector, preload_python_modules, {}) // By default, raylet send a self liveness check to GCS every 60s -RAY_CONFIG(int64_t, raylet_liveness_self_check_interval_ms, 60000) +RAY_CONFIG(int64_t, raylet_liveness_self_check_interval_ms, 5000) // Instruct the CoreWorker to kill its child processes while // it exits. This prevents certain classes of resource leaks diff --git a/src/ray/gcs/gcs_server/gcs_node_manager.h b/src/ray/gcs/gcs_server/gcs_node_manager.h index b1b9526599c8..d76e94fbd8ea 100644 --- a/src/ray/gcs/gcs_server/gcs_node_manager.h +++ b/src/ray/gcs/gcs_server/gcs_node_manager.h @@ -17,6 +17,7 @@ #include #include +#include #include #include "absl/container/flat_hash_map.h" @@ -173,7 +174,7 @@ class GcsNodeManager : public rpc::NodeInfoHandler { /// A map of NodeId <-> ip:port of raylet using NodeIDAddrBiMap = boost::bimap>, - boost::bimaps::unordered_set_of>; + boost::bimaps::unordered_multiset_of>; NodeIDAddrBiMap node_map_; friend GcsMonitorServerTest; diff --git a/src/ray/raylet/node_manager.cc b/src/ray/raylet/node_manager.cc index f7249347a772..21bd053cbb58 100644 --- a/src/ray/raylet/node_manager.cc +++ b/src/ray/raylet/node_manager.cc @@ -550,8 +550,8 @@ ray::Status NodeManager::RegisterGcs() { << "GCS is not backed by a DB and restarted or there is data loss " << "in the DB."; } - *checking_ptr = false; } + *checking_ptr = false; }, /* timeout_ms = */ 30000)); }, From a329c251a35c565c87d2fb2ef2e0d056dce5449c Mon Sep 17 00:00:00 2001 From: Yi Cheng <74173148+iycheng@users.noreply.github.com> Date: Wed, 10 May 2023 21:07:48 -0700 Subject: [PATCH 335/424] [core] Turn on ray syncer again. (#35116) Signed-off-by: Yi Cheng <74173148+iycheng@users.noreply.github.com> After fixing several issues: - https://github.com/ray-project/ray/pull/34645 - https://github.com/ray-project/ray/pull/35115 - https://github.com/ray-project/ray/pull/34687 Ray syncer should be ready to be turned on again. --- src/ray/common/ray_config_def.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ray/common/ray_config_def.h b/src/ray/common/ray_config_def.h index d5202917e02b..7920387fe452 100644 --- a/src/ray/common/ray_config_def.h +++ b/src/ray/common/ray_config_def.h @@ -441,7 +441,7 @@ RAY_CONFIG(uint64_t, gcs_grpc_max_request_queued_max_bytes, 1024UL * 1024 * 1024 RAY_CONFIG(int32_t, gcs_client_check_connection_status_interval_milliseconds, 1000) /// Feature flag to use the ray syncer for resource synchronization -RAY_CONFIG(bool, use_ray_syncer, false) +RAY_CONFIG(bool, use_ray_syncer, true) /// Due to the protocol drawback, raylet needs to refresh the message if /// no message is received for a while. /// Refer to https://tinyurl.com/n6kvsp87 for more details From 066620436bde56be7b8a0809a6c631508bed240c Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Wed, 10 May 2023 21:36:15 -0700 Subject: [PATCH 336/424] [Data][Docs] Fix `hf_quick_start.py` (#35240) `hf_quick_start.py` was failing with > ValueError: ArrowVariableShapedTensorArray only supports heterogeneous-shaped tensor collections, not arbitrarily nested ragged tensors. Got arrays: [('dtype=object', 'shape=(1,)'), ('dtype=object', 'shape=(1,)')] This is because we're returning an object that looks like ```python {"output": [[{'generated_text': 'Complete this page to stay up to date with our latest news in aviation related news. You can also'}], [{'generated_text': "for me. We could use those resources as time goes on. We'll get to it in the"}]] } ``` from a UDF. This PR updates the UDF so it returns object like ```python {"output": [ 'Complete this page to stay up to date with our latest news in aviation related news. You can also', "for me. We could use those resources as time goes on. We'll get to it in the" ]} ``` Signed-off-by: Balaji Veeramani --- doc/source/data/doc_code/hf_quick_start.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/doc/source/data/doc_code/hf_quick_start.py b/doc/source/data/doc_code/hf_quick_start.py index 3c8fd8612afb..e54a7a0131c4 100644 --- a/doc/source/data/doc_code/hf_quick_start.py +++ b/doc/source/data/doc_code/hf_quick_start.py @@ -15,8 +15,9 @@ def __init__(self): self.model = pipeline("text-generation", model="gpt2") def __call__(self, batch: Dict[str, np.ndarray]): - model_out = self.model(list(batch["data"]), max_length=20) - return {"output": model_out} + model_out = self.model(list(batch["data"]), max_length=20, num_return_sequences=1) + batch["output"] = [sequence[0]["generated_text"] for sequence in model_out] + return batch scale = ray.data.ActorPoolStrategy(size=2) predictions = ds.map_batches(HuggingFacePredictor, compute=scale) @@ -54,8 +55,9 @@ def __init__(self): # <1> self.model = pipeline("text-generation", model="gpt2") def __call__(self, batch: Dict[str, np.ndarray]): # <2> - model_out = self.model(list(batch["data"]), max_length=20) - return {"output": np.asarray(model_out)} + model_out = self.model(list(batch["data"]), max_length=20, num_return_sequences=1) + batch["output"] = [sequence[0]["generated_text"] for sequence in model_out] + return batch # __hf_quickstart_model_end__ From 9316ce7475f50cf4e695730155420c38602c99ba Mon Sep 17 00:00:00 2001 From: Yi Cheng <74173148+iycheng@users.noreply.github.com> Date: Wed, 10 May 2023 22:33:18 -0700 Subject: [PATCH 337/424] [core] Deflakey test advanced 9 (#35247) Previously a bug was fixed in #33311 where pubsub causes the leak. Somehow the fix has race conditions and got triggered later when code changes. The test is flakey because there is a race condition between raylet sending node failure and core worker exit itself. When disconnect is sent to Raylet, Raylet will start to report worker failure. But the worker still continue to run. GCS uses worker failure to close the connection. But if the worker is still alive, the worker might send another request the GCS which will lead to the FD leak. Compare with #34883 it's a short term fix and the goal is to make the case the same as 2.3. --- src/ray/core_worker/core_worker.cc | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/ray/core_worker/core_worker.cc b/src/ray/core_worker/core_worker.cc index ff03b5b85508..5f1b1c3ea9d9 100644 --- a/src/ray/core_worker/core_worker.cc +++ b/src/ray/core_worker/core_worker.cc @@ -790,8 +790,12 @@ void CoreWorker::Exit( detail = std::move(detail), creation_task_exception_pb_bytes]() { rpc::DrainServerCallExecutor(); - Disconnect(exit_type, detail, creation_task_exception_pb_bytes); KillChildProcs(); + // Disconnect should be put close to Shutdown + // https://github.com/ray-project/ray/pull/34883 + // TODO (iycheng) Improve the Process.h and make it able to monitor + // process liveness + Disconnect(exit_type, detail, creation_task_exception_pb_bytes); Shutdown(); }, "CoreWorker.Shutdown"); @@ -835,9 +839,13 @@ void CoreWorker::ForceExit(const rpc::WorkerExitType exit_type, const std::string &detail) { RAY_LOG(WARNING) << "Force exit the process. " << " Details: " << detail; - Disconnect(exit_type, detail); KillChildProcs(); + // Disconnect should be put close to Exit + // https://github.com/ray-project/ray/pull/34883 + // TODO (iycheng) Improve the Process.h and make it able to monitor + // process liveness + Disconnect(exit_type, detail); // NOTE(hchen): Use `QuickExit()` to force-exit this process without doing cleanup. // `exit()` will destruct static objects in an incorrect order, which will lead to From 0c727550cbdf16bde0cfe623727b13836bc6cdd7 Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Thu, 11 May 2023 09:21:44 +0200 Subject: [PATCH 338/424] [ci] Fix dask Ray client tests (#35233) The Ray client tests for dask are broken in master: ``` ModuleNotFoundError: No module named 'dask' ``` We didn't change any logic in our CI, but it seems we never explicitly installed dask in the respective job. Maybe it was previously automatically installed by some subdependency. This PR adds the data processing requirements to the job, thus explicitly installing dask. Signed-off-by: Kai Fricke --- .buildkite/pipeline.ml.yml | 2 +- python/ray/tune/tune.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.buildkite/pipeline.ml.yml b/.buildkite/pipeline.ml.yml index 03c5179db864..6c1007d3cd50 100644 --- a/.buildkite/pipeline.ml.yml +++ b/.buildkite/pipeline.ml.yml @@ -402,7 +402,7 @@ instance_size: medium commands: - cleanup() { if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then ./ci/build/upload_build_info.sh; fi }; trap cleanup EXIT - - TUNE_TESTING=1 INSTALL_HOROVOD=1 ./ci/env/install-dependencies.sh + - TUNE_TESTING=1 DATA_PROCESSING_TESTING=1 INSTALL_HOROVOD=1 ./ci/env/install-dependencies.sh - ./ci/env/env_info.sh - bazel test --config=ci $(./ci/run/bazel_export_options) --build_tests_only --test_tag_filters=client --test_env=RAY_CLIENT_MODE=1 python/ray/util/dask/... - bazel test --config=ci $(./ci/run/bazel_export_options) --build_tests_only --test_tag_filters=client python/ray/tune/... diff --git a/python/ray/tune/tune.py b/python/ray/tune/tune.py index 1dab032bd425..2467c8eee84d 100644 --- a/python/ray/tune/tune.py +++ b/python/ray/tune/tune.py @@ -193,7 +193,7 @@ def signal_interrupt_tune_run(sig: int, frame): "to skip. " ) experiment_interrupted_event.set() - # Restore original signal handler to react to future SIGINT signals + # Restore original signal handler to react to future SIGINT signals. signal.signal(signal.SIGINT, original_handler) # We should only install the handler when it is safe to do so. From 6229f325d2994d74901a324f59339b45688d9b49 Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Thu, 11 May 2023 10:48:05 +0200 Subject: [PATCH 339/424] [docs] auto-remove gen apis on make clean (#35210) if you don't regularly clean API docs generated by autodoc/summary, your build output will be spammed with warnings about outdated/non-existing APIs. we make it so that ``` make clean && make develop ``` truly builds from scratch to avoid this issue. Signed-off-by: Max Pumperla --- doc/Makefile | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/Makefile b/doc/Makefile index 8e819ce54b7b..98bc7e086207 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -50,6 +50,12 @@ help: clean: rm -rf $(BUILDDIR)/* + rm -rf ./source/*/api/doc/* + rm -rf ./source/ray-references/api/*/doc/* + rm -rf ./source/cluster/running_applications/doc/* + rm -rf ./source/cluster/running_applications/job-submission/doc/* + rm -rf ./source/ray-observability/api/state/doc* + rm -rf ./source/rllib/package_ref/doc* html: $(SPHINXBUILD) -W --keep-going -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html From c9d6677446edb9af44f562544cd7125202012869 Mon Sep 17 00:00:00 2001 From: Sven Mika Date: Thu, 11 May 2023 13:29:01 +0200 Subject: [PATCH 340/424] [RLlib] APPO+new-stack (Atari benchmark) - Preparatory PR 04 - LearnerAPI changes/tf-tracing fixes. (#34959) --- rllib/algorithms/appo/appo_catalog.py | 2 +- rllib/algorithms/appo/appo_learner.py | 10 +- rllib/algorithms/appo/tf/appo_tf_learner.py | 5 +- rllib/algorithms/appo/tf/appo_tf_rl_module.py | 14 +- .../appo/torch/appo_torch_learner.py | 16 +- .../appo/torch/appo_torch_rl_module.py | 6 +- rllib/algorithms/impala/impala.py | 53 +++-- rllib/algorithms/impala/impala_learner.py | 2 +- .../algorithms/impala/tf/impala_tf_learner.py | 5 +- .../impala/torch/impala_torch_learner.py | 7 +- rllib/algorithms/ppo/ppo_catalog.py | 13 +- ...ppo_base_rl_module.py => ppo_rl_module.py} | 13 +- .../ppo/tests/test_ppo_with_rl_module.py | 6 +- rllib/algorithms/ppo/tf/ppo_tf_learner.py | 9 +- rllib/algorithms/ppo/tf/ppo_tf_rl_module.py | 6 +- .../algorithms/ppo/torch/ppo_torch_learner.py | 14 +- .../ppo/torch/ppo_torch_rl_module.py | 6 +- rllib/core/learner/learner.py | 2 +- rllib/core/learner/learner_group.py | 138 +++++++---- rllib/core/models/catalog.py | 10 +- rllib/core/models/tf/encoder.py | 6 +- rllib/models/tf/tf_distributions.py | 10 +- rllib/policy/eager_tf_policy.py | 45 ++-- rllib/policy/eager_tf_policy_v2.py | 215 +++++++++++------- rllib/policy/torch_policy_v2.py | 6 +- 25 files changed, 383 insertions(+), 236 deletions(-) rename rllib/algorithms/ppo/{ppo_base_rl_module.py => ppo_rl_module.py} (84%) diff --git a/rllib/algorithms/appo/appo_catalog.py b/rllib/algorithms/appo/appo_catalog.py index b675cba4b9cd..4c9e14244570 100644 --- a/rllib/algorithms/appo/appo_catalog.py +++ b/rllib/algorithms/appo/appo_catalog.py @@ -10,7 +10,7 @@ class APPOCatalog(PPOCatalog): - Value Function Head: The head used to compute the value function. The ActorCriticEncoder is a wrapper around Encoders to produce separate outputs - for the policy and value function. See implementations of PPORLModuleBase for + for the policy and value function. See implementations of PPORLModule for more details. Any custom ActorCriticEncoder can be built by overriding the diff --git a/rllib/algorithms/appo/appo_learner.py b/rllib/algorithms/appo/appo_learner.py index d067fdb25587..8fa026b2ff61 100644 --- a/rllib/algorithms/appo/appo_learner.py +++ b/rllib/algorithms/appo/appo_learner.py @@ -1,7 +1,7 @@ import abc from collections import defaultdict from dataclasses import dataclass -from typing import Any, Dict, Mapping +from typing import Any, Mapping from ray.rllib.algorithms.impala.impala_learner import ( ImpalaLearner, @@ -115,7 +115,7 @@ def _update_module_target_networks(self, module_id: ModuleID) -> None: @abc.abstractmethod def _update_module_kl_coeff( - self, module_id: ModuleID, sampled_kls: Dict[ModuleID, float] + self, module_id: ModuleID, sampled_kl: float ) -> Mapping[str, Any]: """Dynamically update the KL loss coefficients of each module with. @@ -125,7 +125,7 @@ def _update_module_kl_coeff( Args: module_id: The module whose KL loss coefficient to update. - sampled_kls: Mapping from Module ID to this module's KL divergence between - the action distributions of the current (most recently updated) module - and the old module version. + sampled_kl: The computed KL loss for the given Module + (KL divergence between the action distributions of the current + (most recently updated) module and the old module version). """ diff --git a/rllib/algorithms/appo/tf/appo_tf_learner.py b/rllib/algorithms/appo/tf/appo_tf_learner.py index 9af71bbb50b5..dbf8d8d418e4 100644 --- a/rllib/algorithms/appo/tf/appo_tf_learner.py +++ b/rllib/algorithms/appo/tf/appo_tf_learner.py @@ -132,7 +132,10 @@ def compute_loss_per_module( total_loss = ( mean_pi_loss + (mean_vf_loss * self.hps.vf_loss_coeff) - + (mean_entropy_loss * self.hps.entropy_coeff) + + ( + mean_entropy_loss + * self.entropy_coeff_scheduler.get_current_value(module_id) + ) + (mean_kl_loss * self.curr_kl_coeffs_per_module[module_id]) ) diff --git a/rllib/algorithms/appo/tf/appo_tf_rl_module.py b/rllib/algorithms/appo/tf/appo_tf_rl_module.py index ffb8abf86502..46c9281283b8 100644 --- a/rllib/algorithms/appo/tf/appo_tf_rl_module.py +++ b/rllib/algorithms/appo/tf/appo_tf_rl_module.py @@ -4,7 +4,7 @@ OLD_ACTION_DIST_LOGITS_KEY, ) from ray.rllib.algorithms.ppo.tf.ppo_tf_rl_module import PPOTfRLModule -from ray.rllib.core.models.base import ACTOR +from ray.rllib.core.models.base import ACTOR, CRITIC, STATE_IN from ray.rllib.core.models.tf.encoder import ENCODER_OUT from ray.rllib.core.rl_module.rl_module_with_target_networks_interface import ( RLModuleWithTargetNetworksInterface, @@ -45,7 +45,19 @@ def output_specs_train(self) -> List[str]: @override(PPOTfRLModule) def _forward_train(self, batch: NestedDict): outs = super()._forward_train(batch) + + # TODO (Artur): Remove this once Policy supports RNN + batch = batch.copy() + if self.encoder.config.shared: + batch[STATE_IN] = None + else: + batch[STATE_IN] = { + ACTOR: None, + CRITIC: None, + } + batch[SampleBatch.SEQ_LENS] = None old_pi_inputs_encoded = self.old_encoder(batch)[ENCODER_OUT][ACTOR] + old_action_dist_logits = tf.stop_gradient(self.old_pi(old_pi_inputs_encoded)) outs[OLD_ACTION_DIST_LOGITS_KEY] = old_action_dist_logits return outs diff --git a/rllib/algorithms/appo/torch/appo_torch_learner.py b/rllib/algorithms/appo/torch/appo_torch_learner.py index 1e604566ea8e..d1e9b4ed4ec6 100644 --- a/rllib/algorithms/appo/torch/appo_torch_learner.py +++ b/rllib/algorithms/appo/torch/appo_torch_learner.py @@ -5,7 +5,7 @@ AppoLearner, LEARNER_RESULTS_CURR_KL_COEFF_KEY, LEARNER_RESULTS_KL_KEY, - OLD_ACTION_DIST_KEY, + OLD_ACTION_DIST_LOGITS_KEY, ) from ray.rllib.algorithms.impala.torch.vtrace_torch_v2 import ( make_time_major, @@ -37,12 +37,15 @@ def compute_loss_per_module( ) -> TensorType: values = fwd_out[SampleBatch.VF_PREDS] - action_dist_cls_train = self._module[module_id].get_train_action_dist_cls() + action_dist_cls_train = ( + self.module[module_id].unwrapped().get_train_action_dist_cls() + ) target_policy_dist = action_dist_cls_train.from_logits( fwd_out[SampleBatch.ACTION_DIST_INPUTS] ) - - old_target_policy_dist = fwd_out[OLD_ACTION_DIST_KEY] + old_target_policy_dist = action_dist_cls_train.from_logits( + fwd_out[OLD_ACTION_DIST_LOGITS_KEY] + ) old_target_policy_actions_logp = old_target_policy_dist.logp( batch[SampleBatch.ACTIONS] ) @@ -133,7 +136,10 @@ def compute_loss_per_module( total_loss = ( mean_pi_loss + (mean_vf_loss * self.hps.vf_loss_coeff) - + (mean_entropy_loss * self.hps.entropy_coeff) + + ( + mean_entropy_loss + * self.entropy_coeff_scheduler.get_current_value(module_id) + ) + (mean_kl_loss * self.curr_kl_coeffs_per_module[module_id]) ) diff --git a/rllib/algorithms/appo/torch/appo_torch_rl_module.py b/rllib/algorithms/appo/torch/appo_torch_rl_module.py index df2b653105ad..83710ca35b9b 100644 --- a/rllib/algorithms/appo/torch/appo_torch_rl_module.py +++ b/rllib/algorithms/appo/torch/appo_torch_rl_module.py @@ -1,7 +1,6 @@ from typing import List from ray.rllib.algorithms.appo.appo_learner import ( - OLD_ACTION_DIST_KEY, OLD_ACTION_DIST_LOGITS_KEY, ) from ray.rllib.algorithms.ppo.torch.ppo_torch_rl_module import PPOTorchRLModule @@ -35,8 +34,9 @@ def get_target_network_pairs(self): @override(PPOTorchRLModule) def output_specs_train(self) -> List[str]: return [ + SampleBatch.ACTION_DIST_INPUTS, + OLD_ACTION_DIST_LOGITS_KEY, SampleBatch.VF_PREDS, - OLD_ACTION_DIST_KEY, ] @override(PPOTorchRLModule) @@ -44,7 +44,5 @@ def _forward_train(self, batch: NestedDict): outs = super()._forward_train(batch) old_pi_inputs_encoded = self.old_encoder(batch)[ENCODER_OUT][ACTOR] old_action_dist_logits = self.old_pi(old_pi_inputs_encoded) - old_action_dist = self.action_dist_cls.from_logits(old_action_dist_logits) - outs[OLD_ACTION_DIST_KEY] = old_action_dist outs[OLD_ACTION_DIST_LOGITS_KEY] = old_action_dist_logits return outs diff --git a/rllib/algorithms/impala/impala.py b/rllib/algorithms/impala/impala.py index d1ecfda9d6e4..128275c6083c 100644 --- a/rllib/algorithms/impala/impala.py +++ b/rllib/algorithms/impala/impala.py @@ -7,6 +7,9 @@ import random from typing import Callable, List, Optional, Set, Tuple, Type, Union +import numpy as np +import tree # pip install dm_tree + import ray from ray import ObjectRef from ray.rllib import SampleBatch @@ -938,35 +941,39 @@ def learn_on_processed_samples(self) -> ResultDict: Aggregated results from the learner group after an update is completed. """ - result = {} - # There are batches on the queue -> Send them to the learner group. + # There are batches on the queue -> Send them all to the learner group. if self.batches_to_place_on_learner: - batch = self.batches_to_place_on_learner.pop(0) + batches = self.batches_to_place_on_learner[:] + self.batches_to_place_on_learner.clear() # If there are no learner workers and learning is directly on the driver # Then we can't do async updates, so we need to block. blocking = self.config.num_learner_workers == 0 - lg_results = self.learner_group.update( - batch, - reduce_fn=_reduce_impala_results, - block=blocking, - num_iters=self.config.num_sgd_iter, - minibatch_size=self.config.minibatch_size, - ) - # Nothing on the queue -> Don't send requests to learner group. - else: - lg_results = None - - if lg_results: - self._counters[NUM_ENV_STEPS_TRAINED] += lg_results[ALL_MODULES].pop( - NUM_ENV_STEPS_TRAINED - ) - self._counters[NUM_AGENT_STEPS_TRAINED] += lg_results[ALL_MODULES].pop( - NUM_AGENT_STEPS_TRAINED - ) + results = [] + for batch in batches: + result = self.learner_group.update( + batch, + reduce_fn=_reduce_impala_results, + block=blocking, + num_iters=self.config.num_sgd_iter, + minibatch_size=self.config.minibatch_size, + ) + if result: + self._counters[NUM_ENV_STEPS_TRAINED] += result[ALL_MODULES].pop( + NUM_ENV_STEPS_TRAINED + ) + self._counters[NUM_AGENT_STEPS_TRAINED] += result[ALL_MODULES].pop( + NUM_AGENT_STEPS_TRAINED + ) + results.append(result) self._counters.update(self.learner_group.get_in_queue_stats()) - result = lg_results + # If there are results, reduce-mean over each individual value and return. + if results: + return tree.map_structure(lambda *x: np.mean(x), *results) - return result + # Nothing on the queue -> Don't send requests to learner group + # or no results ready (from previous `self.learner_group.update()` calls) for + # reducing. + return {} def place_processed_samples_on_learner_thread_queue(self) -> None: """Place processed samples on the learner queue for training. diff --git a/rllib/algorithms/impala/impala_learner.py b/rllib/algorithms/impala/impala_learner.py index 7e1153f4acc4..568f40f6c24b 100644 --- a/rllib/algorithms/impala/impala_learner.py +++ b/rllib/algorithms/impala/impala_learner.py @@ -93,7 +93,7 @@ def _reduce_impala_results(results: List[ResultDict]) -> ResultDict: """Reduce/Aggregate a list of results from Impala Learners. Average the values of the result dicts. Add keys for the number of agent and env - steps trained. + steps trained (on all modules). Args: results: result dicts to reduce. diff --git a/rllib/algorithms/impala/tf/impala_tf_learner.py b/rllib/algorithms/impala/tf/impala_tf_learner.py index 78893ac9180b..d397eb268fdc 100644 --- a/rllib/algorithms/impala/tf/impala_tf_learner.py +++ b/rllib/algorithms/impala/tf/impala_tf_learner.py @@ -19,7 +19,7 @@ class ImpalaTfLearner(ImpalaLearner, TfLearner): def compute_loss_per_module( self, module_id: str, batch: SampleBatch, fwd_out: Mapping[str, TensorType] ) -> TensorType: - action_dist_class_train = self._module[module_id].get_train_action_dist_cls() + action_dist_class_train = self.module[module_id].get_train_action_dist_cls() target_policy_dist = action_dist_class_train.from_logits( fwd_out[SampleBatch.ACTION_DIST_INPUTS] ) @@ -95,7 +95,8 @@ def compute_loss_per_module( total_loss = ( pi_loss + vf_loss * self.hps.vf_loss_coeff - + mean_entropy_loss * self.hps.entropy_coeff + + mean_entropy_loss + * (self.entropy_coeff_scheduler.get_current_value(module_id)) ) return { self.TOTAL_LOSS_KEY: total_loss, diff --git a/rllib/algorithms/impala/torch/impala_torch_learner.py b/rllib/algorithms/impala/torch/impala_torch_learner.py index d6f89a299739..bd1c4c37f3d4 100644 --- a/rllib/algorithms/impala/torch/impala_torch_learner.py +++ b/rllib/algorithms/impala/torch/impala_torch_learner.py @@ -23,7 +23,9 @@ class ImpalaTorchLearner(ImpalaLearner, TorchLearner): def compute_loss_per_module( self, module_id: str, batch: SampleBatch, fwd_out: Mapping[str, TensorType] ) -> TensorType: - action_dist_class_train = self._module[module_id].get_train_action_dist_cls() + action_dist_class_train = ( + self.module[module_id].unwrapped().get_train_action_dist_cls() + ) target_policy_dist = action_dist_class_train.from_logits( fwd_out[SampleBatch.ACTION_DIST_INPUTS] ) @@ -111,7 +113,8 @@ def compute_loss_per_module( total_loss = ( pi_loss + vf_loss * self.hps.vf_loss_coeff - + mean_entropy_loss * self.hps.entropy_coeff + + mean_entropy_loss + * (self.entropy_coeff_scheduler.get_current_value(module_id)) ) return { self.TOTAL_LOSS_KEY: total_loss, diff --git a/rllib/algorithms/ppo/ppo_catalog.py b/rllib/algorithms/ppo/ppo_catalog.py index 186953b57aba..c9c53aa514ff 100644 --- a/rllib/algorithms/ppo/ppo_catalog.py +++ b/rllib/algorithms/ppo/ppo_catalog.py @@ -38,7 +38,7 @@ class PPOCatalog(Catalog): - Value Function Head: The head used to compute the value function. The ActorCriticEncoder is a wrapper around Encoders to produce separate outputs - for the policy and value function. See implementations of PPORLModuleBase for + for the policy and value function. See implementations of PPORLModule for more details. Any custom ActorCriticEncoder can be built by overriding the @@ -89,8 +89,9 @@ def __init__( hidden_layer_dims=post_fcnet_hiddens, hidden_layer_activation=post_fcnet_activation, output_activation="linear", - output_dims=None, # We don't know the output dimension yet, because it - # depends on the action distribution input dimension + # We don't know the output dimension yet, because it depends on the + # action distribution input dimension. + output_dims=None, ) self.vf_head_config = MLPHeadConfig( @@ -106,7 +107,7 @@ def build_actor_critic_encoder(self, framework: str) -> ActorCriticEncoder: The default behavior is to build the encoder from the encoder_config. This can be overridden to build a custom ActorCriticEncoder as a means of - configuring the behavior of a PPORLModuleBase implementation. + configuring the behavior of a PPORLModule implementation. Args: framework: The framework to use. Either "torch" or "tf2". @@ -131,7 +132,7 @@ def build_pi_head(self, framework: str) -> Model: The default behavior is to build the head from the pi_head_config. This can be overridden to build a custom policy head as a means of configuring - the behavior of a PPORLModuleBase implementation. + the behavior of a PPORLModule implementation. Args: framework: The framework to use. Either "torch" or "tf2". @@ -156,7 +157,7 @@ def build_vf_head(self, framework: str) -> Model: The default behavior is to build the head from the vf_head_config. This can be overridden to build a custom value function head as a means of - configuring the behavior of a PPORLModuleBase implementation. + configuring the behavior of a PPORLModule implementation. Args: framework: The framework to use. Either "torch" or "tf2". diff --git a/rllib/algorithms/ppo/ppo_base_rl_module.py b/rllib/algorithms/ppo/ppo_rl_module.py similarity index 84% rename from rllib/algorithms/ppo/ppo_base_rl_module.py rename to rllib/algorithms/ppo/ppo_rl_module.py index 620da1422e9e..a7ec91fa3399 100644 --- a/rllib/algorithms/ppo/ppo_base_rl_module.py +++ b/rllib/algorithms/ppo/ppo_rl_module.py @@ -3,11 +3,11 @@ """ import abc +from typing import Type from ray.rllib.core.models.base import ActorCriticEncoder from ray.rllib.core.models.specs.specs_dict import SpecDict from ray.rllib.core.rl_module.rl_module import RLModule -from ray.rllib.core.rl_module.rl_module import RLModuleConfig from ray.rllib.models.distributions import Distribution from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.annotations import ExperimentalAPI @@ -15,10 +15,7 @@ @ExperimentalAPI -class PPORLModuleBase(RLModule, abc.ABC): - def __init__(self, config: RLModuleConfig): - super().__init__(config) - +class PPORLModule(RLModule, abc.ABC): def setup(self): # __sphinx_doc_begin__ catalog = self.config.get_catalog() @@ -33,13 +30,13 @@ def setup(self): assert isinstance(self.encoder, ActorCriticEncoder) - def get_train_action_dist_cls(self) -> Distribution: + def get_train_action_dist_cls(self) -> Type[Distribution]: return self.action_dist_cls - def get_exploration_action_dist_cls(self) -> Distribution: + def get_exploration_action_dist_cls(self) -> Type[Distribution]: return self.action_dist_cls - def get_inference_action_dist_cls(self) -> Distribution: + def get_inference_action_dist_cls(self) -> Type[Distribution]: return self.action_dist_cls @override(RLModule) diff --git a/rllib/algorithms/ppo/tests/test_ppo_with_rl_module.py b/rllib/algorithms/ppo/tests/test_ppo_with_rl_module.py index c700ff7ab16e..b365f25a1043 100644 --- a/rllib/algorithms/ppo/tests/test_ppo_with_rl_module.py +++ b/rllib/algorithms/ppo/tests/test_ppo_with_rl_module.py @@ -61,10 +61,10 @@ def on_train_result(self, *, algorithm, result: dict, **kwargs): 0.05 if algorithm.iteration == 1 else 0.0, ) - # Learning rate should decrease by 0.0001 per iteration. + # Learning rate should decrease by 0.0001/4 per iteration. check( stats[LEARNER_RESULTS_CURR_LR_KEY], - 0.0003 if algorithm.iteration == 1 else 0.0002, + 0.0000075 if algorithm.iteration == 1 else 0.000005, ) # Compare reported curr lr vs the actual lr found in the optimizer object. optim = algorithm.learner_group._learner._named_optimizers[DEFAULT_POLICY_ID] @@ -94,7 +94,7 @@ def test_ppo_compilation_and_schedule_mixins(self): .training( num_sgd_iter=2, # Setup lr schedule for testing lr-scheduling correctness. - lr_schedule=[[0, 0.0004], [512, 0.0]], # 512=4x128 + lr_schedule=[[0, 0.00001], [512, 0.0]], # 512=4x128 # Set entropy_coeff to a faulty value to proof that it'll get # overridden by the schedule below (which is expected). entropy_coeff=100.0, diff --git a/rllib/algorithms/ppo/tf/ppo_tf_learner.py b/rllib/algorithms/ppo/tf/ppo_tf_learner.py index df22ad27b959..1794990713f6 100644 --- a/rllib/algorithms/ppo/tf/ppo_tf_learner.py +++ b/rllib/algorithms/ppo/tf/ppo_tf_learner.py @@ -39,8 +39,8 @@ def compute_loss_per_module( # learning rate for that agent. # TODO (Kourosh): come back to RNNs later - action_dist_class_train = self._module[module_id].get_train_action_dist_cls() - action_dist_class_exploration = self._module[ + action_dist_class_train = self.module[module_id].get_train_action_dist_cls() + action_dist_class_exploration = self.module[ module_id ].get_exploration_action_dist_cls() curr_action_dist = action_dist_class_train.from_logits( @@ -67,8 +67,8 @@ def compute_loss_per_module( "This can happen naturally in deterministic " "environments where the optimal policy has zero mass " "for a specific action. To fix this issue, consider " - "setting the coefficient for the KL loss term to " - "zero or increasing policy entropy." + "setting `kl_coeff` to 0.0 or increasing `entropy_coeff` in your " + "config." ) else: mean_kl_loss = tf.constant(0.0, dtype=logp_ratio.dtype) @@ -103,7 +103,6 @@ def compute_loss_per_module( -surrogate_loss + self.hps.vf_loss_coeff * vf_loss_clipped - self.entropy_coeff_scheduler.get_current_value(module_id) * curr_entropy - # - self.curr_entropy_coeffs_per_module[module_id] * curr_entropy ) # Add mean_kl_loss (already processed through `reduce_mean_valid`), diff --git a/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py b/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py index b14e9ab176eb..9c6416632a69 100644 --- a/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py +++ b/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py @@ -1,6 +1,6 @@ from typing import Mapping, Any -from ray.rllib.algorithms.ppo.ppo_base_rl_module import PPORLModuleBase +from ray.rllib.algorithms.ppo.ppo_rl_module import PPORLModule from ray.rllib.core.models.base import ACTOR, CRITIC, STATE_IN from ray.rllib.core.models.tf.encoder import ENCODER_OUT from ray.rllib.core.rl_module.rl_module import RLModule @@ -13,12 +13,12 @@ tf1, tf, _ = try_import_tf() -class PPOTfRLModule(PPORLModuleBase, TfRLModule): +class PPOTfRLModule(PPORLModule, TfRLModule): framework: str = "tf2" def __init__(self, *args, **kwargs): TfRLModule.__init__(self, *args, **kwargs) - PPORLModuleBase.__init__(self, *args, **kwargs) + PPORLModule.__init__(self, *args, **kwargs) # TODO(Artur): Comment in as soon as we support RNNs from Polciy side # @override(RLModule) diff --git a/rllib/algorithms/ppo/torch/ppo_torch_learner.py b/rllib/algorithms/ppo/torch/ppo_torch_learner.py index cdaae478519a..0b3ba822a066 100644 --- a/rllib/algorithms/ppo/torch/ppo_torch_learner.py +++ b/rllib/algorithms/ppo/torch/ppo_torch_learner.py @@ -39,10 +39,12 @@ def compute_loss_per_module( # learning rate for that agent. # TODO (Kourosh): come back to RNNs later - action_dist_class_train = self._module[module_id].get_train_action_dist_cls() - action_dist_class_exploration = self._module[ - module_id - ].get_exploration_action_dist_cls() + action_dist_class_train = ( + self.module[module_id].unwrapped().get_train_action_dist_cls() + ) + action_dist_class_exploration = ( + self.module[module_id].unwrapped().get_exploration_action_dist_cls() + ) curr_action_dist = action_dist_class_train.from_logits( fwd_out[SampleBatch.ACTION_DIST_INPUTS] @@ -68,8 +70,8 @@ def compute_loss_per_module( "This can happen naturally in deterministic " "environments where the optimal policy has zero mass " "for a specific action. To fix this issue, consider " - "setting the coefficient for the KL loss term to " - "zero or increasing policy entropy." + "setting `kl_coeff` to 0.0 or increasing `entropy_coeff` in your " + "config." ) else: mean_kl_loss = torch.tensor(0.0, device=logp_ratio.device) diff --git a/rllib/algorithms/ppo/torch/ppo_torch_rl_module.py b/rllib/algorithms/ppo/torch/ppo_torch_rl_module.py index e908a1b03b03..58cc3ff70c85 100644 --- a/rllib/algorithms/ppo/torch/ppo_torch_rl_module.py +++ b/rllib/algorithms/ppo/torch/ppo_torch_rl_module.py @@ -1,6 +1,6 @@ from typing import Mapping, Any -from ray.rllib.algorithms.ppo.ppo_base_rl_module import PPORLModuleBase +from ray.rllib.algorithms.ppo.ppo_rl_module import PPORLModule from ray.rllib.core.models.base import ACTOR, CRITIC, ENCODER_OUT, STATE_IN from ray.rllib.core.rl_module.rl_module import RLModule @@ -13,12 +13,12 @@ torch, nn = try_import_torch() -class PPOTorchRLModule(PPORLModuleBase, TorchRLModule): +class PPOTorchRLModule(PPORLModule, TorchRLModule): framework: str = "torch" def __init__(self, *args, **kwargs): TorchRLModule.__init__(self, *args, **kwargs) - PPORLModuleBase.__init__(self, *args, **kwargs) + PPORLModule.__init__(self, *args, **kwargs) @override(RLModule) def _forward_inference(self, batch: NestedDict) -> Mapping[str, Any]: diff --git a/rllib/core/learner/learner.py b/rllib/core/learner/learner.py index ef2e3fc01217..1e00fb6a6cff 100644 --- a/rllib/core/learner/learner.py +++ b/rllib/core/learner/learner.py @@ -1091,7 +1091,7 @@ def _update( ) -> Mapping[str, Any]: """Performs a single update given a batch of data.""" # TODO (Kourosh): remove the MultiAgentBatch from the type, it should be - # NestedDict from the base class. + # NestedDict from the base class. tensorbatch = self._convert_batch_type(batch) fwd_out = self._module.forward_train(tensorbatch) loss = self.compute_loss(fwd_out=fwd_out, batch=tensorbatch) diff --git a/rllib/core/learner/learner_group.py b/rllib/core/learner/learner_group.py index 0ab6fb47f569..ee2213351787 100644 --- a/rllib/core/learner/learner_group.py +++ b/rllib/core/learner/learner_group.py @@ -1,4 +1,5 @@ from collections import deque +from functools import partial import pathlib from typing import ( Any, @@ -127,11 +128,14 @@ def __init__( # Run the neural network building code on remote workers. ray.get([w.build.remote() for w in self._workers]) - # Use only 1 max in flight request per worker since training workers have to - # be synchronously executed. + self._worker_manager = FaultTolerantActorManager( self._workers, - max_remote_requests_in_flight_per_actor=1, + # TODO (sven): This probably works even without any restriction + # (allowing for any arbitrary number of requests in-flight). Test with + # 3 first, then with unlimited, and if both show the same behavior on + # an async algo, remove this restriction entirely. + max_remote_requests_in_flight_per_actor=3, ) self._in_queue = deque(maxlen=max_queue_len) @@ -152,23 +156,25 @@ def update( *, minibatch_size: Optional[int] = None, num_iters: int = 1, - reduce_fn: Callable[[ResultDict], ResultDict] = _reduce_mean_results, + reduce_fn: Optional[Callable[[List[Mapping[str, Any]]], ResultDict]] = ( + _reduce_mean_results + ), block: bool = True, - ) -> List[Mapping[str, Any]]: - """Do one gradient based update to the Learner(s). + ) -> Union[Mapping[str, Any], List[Mapping[str, Any]]]: + """Do one or more gradient based updates to the Learner(s) based on given data. Args: - batch: The data to use for the update. + batch: The data batch to use for the update. minibatch_size: The minibatch size to use for the update. num_iters: The number of complete passes over all the sub-batches in the input multi-agent batch. - reduce_fn: A function to reduce the results from a list of Learner Actors - into a single result. This can be any arbitrary function that takes a - list of dictionaries and returns a single dictionary. For example you - can either take an average (default) or concatenate the results (for - example for metrics) or be more selective about you want to report back - to the algorithm's training_step. If None is passed, the results will - not get reduced. + reduce_fn: An optional callable to reduce the results from a list of the + Learner actors into a single result. This can be any arbitrary function + that takes a list of dictionaries and returns a single dictionary. For + example you can either take an average (default) or concatenate the + results (for example for metrics) or be more selective about you want to + report back to the algorithm's training_step. If None is passed, the + results will not get reduced. block: Whether to block until the update is complete. Returns: @@ -205,9 +211,15 @@ def update( block=block, ) - # TODO (Kourosh): Maybe we should use LearnerInfoBuilder() here? - if reduce_fn is None or not results: + # No reduce function -> Return results as is: (possibly empty) list of mappings. + if reduce_fn is None: return results + # If results are empty, don't run them through reduce_fn, but return empty dict. + elif not results: + return {} + # Run results (list of result dicts from our n learner actors) through + # reduction function and return single mapping. + # TODO (Kourosh): Maybe we should use LearnerInfoBuilder() here? return reduce_fn(results) def _distributed_update( @@ -216,7 +228,9 @@ def _distributed_update( *, minibatch_size: Optional[int] = None, num_iters: int = 1, - reduce_fn: Callable[[ResultDict], ResultDict] = _reduce_mean_results, + reduce_fn: Callable[[List[Mapping[str, Any]]], ResultDict] = ( + _reduce_mean_results + ), block: bool = True, ) -> List[Mapping[str, Any]]: """Do a gradient based update to the Learners using DDP training. @@ -230,43 +244,77 @@ def _distributed_update( See `.update()` docstring. Returns: - A list of dictionaries of results from the updates from the Learner(s) + A list of dictionaries of results from the updates from the individual + Learner(s) """ + # Make sure minibatch size is reduced to the correct number of shards as well + # (just like we split each batch into the number of learner workers). + if minibatch_size is not None: + minibatch_size //= len(self._workers) + + def _learner_update(learner, minibatch): + return learner.update( + minibatch, + minibatch_size=minibatch_size, + num_iters=num_iters, + reduce_fn=reduce_fn, + ) if block: - results = self._worker_manager.foreach_actor( - [ - lambda w: w.update( - b, - minibatch_size=minibatch_size, - num_iters=num_iters, - reduce_fn=reduce_fn, - ) - for b in ShardBatchIterator(batch, len(self._workers)) - ] - ) - else: - if batch is not None: - self._in_queue.append(batch) - results = self._worker_manager.fetch_ready_async_reqs() - if self._worker_manager_ready() and self._in_queue: - batch = self._in_queue.popleft() - self._worker_manager.foreach_actor_async( + results = self._get_results( + self._worker_manager.foreach_actor( [ - lambda w: w.update( - b, - minibatch_size=minibatch_size, - num_iters=num_iters, - reduce_fn=reduce_fn, - ) - for b in ShardBatchIterator(batch, len(self._workers)) + partial(_learner_update, minibatch=minibatch) + for minibatch in ShardBatchIterator(batch, len(self._workers)) ] ) + ) + else: + # Queue the new batches. + # If queue is full, kick out the oldest item (and thus add its + # length to the "dropped ts" counter). + if len(self._in_queue) == self._in_queue.maxlen: + self._in_queue_ts_dropped += len(self._in_queue[0]) + + self._in_queue.append(batch) + + # Retrieve all ready results (kicked off by prior calls to this method). + results = self._worker_manager.fetch_ready_async_reqs() + # Only if there are no more requests in-flight on any of the learners, + # we can send in one new batch for sharding and parallel learning. + if self._worker_manager_ready(): + count = 0 + # TODO (sven): This probably works even without any restriction + # (allowing for any arbitrary number of requests in-flight). Test with + # 3 first, then with unlimited, and if both show the same behavior on + # an async algo, remove this restriction entirely. + while len(self._in_queue) > 0 and count < 3: + # Pull a single batch from the queue (from the left side, meaning: + # use the oldest one first). + batch = self._in_queue.popleft() + self._worker_manager.foreach_actor_async( + [ + partial(_learner_update, minibatch=minibatch) + for minibatch in ShardBatchIterator( + batch, len(self._workers) + ) + ] + ) + count += 1 + + results = self._get_results(results) - return self._get_results(results) + return results def _worker_manager_ready(self): - return self._worker_manager.num_outstanding_async_reqs() == 0 + # TODO (sven): This probably works even without any restriction (allowing for + # any arbitrary number of requests in-flight). Test with 3 first, then with + # unlimited, and if both show the same behavior on an async algo, remove + # this method entirely. + return ( + self._worker_manager.num_outstanding_async_reqs() + <= self._worker_manager.num_actors() * 2 + ) def _get_results(self, results): processed_results = [] diff --git a/rllib/core/models/catalog.py b/rllib/core/models/catalog.py index 8494f6babadb..53f9e5776758 100644 --- a/rllib/core/models/catalog.py +++ b/rllib/core/models/catalog.py @@ -367,12 +367,18 @@ def get_encoder_config( encoder_config = CNNEncoderConfig( input_dims=observation_space.shape, cnn_filter_specifiers=model_config_dict["conv_filters"], - cnn_activation=activation, + cnn_activation=model_config_dict["conv_activation"], cnn_use_layernorm=model_config_dict.get( "conv_use_layernorm", False ), output_dims=[encoder_latent_dim], - output_activation=output_activation, + # TODO (sven): Setting this to None here helps with the existing + # APPO Pong benchmark (actually, leaving this at default=tanh does + # NOT learn at all!). + # We need to remove the last Dense layer from CNNEncoder in general + # AND establish proper ModelConfig objects (instead of hacking + # everything with the old default model config dict). + output_activation=None, ) # input_space is a 2D Box elif ( diff --git a/rllib/core/models/tf/encoder.py b/rllib/core/models/tf/encoder.py index 71f308436b18..51ce70eb321b 100644 --- a/rllib/core/models/tf/encoder.py +++ b/rllib/core/models/tf/encoder.py @@ -134,8 +134,8 @@ def get_input_specs(self) -> Optional[Spec]: SampleBatch.OBS: TensorSpec( "b, d", d=self.config.input_dims[0], framework="tf2" ), - # STATE_IN: None, - # SampleBatch.SEQ_LENS: None, + STATE_IN: None, + SampleBatch.SEQ_LENS: None, } ) @@ -155,7 +155,7 @@ def _forward(self, inputs: NestedDict, **kwargs) -> NestedDict: return NestedDict( { ENCODER_OUT: self.net(inputs[SampleBatch.OBS]), - STATE_OUT: None, # inputs[STATE_IN], + STATE_OUT: inputs[STATE_IN], } ) diff --git a/rllib/models/tf/tf_distributions.py b/rllib/models/tf/tf_distributions.py index 8652cdfc3ec5..373ba3440fc8 100644 --- a/rllib/models/tf/tf_distributions.py +++ b/rllib/models/tf/tf_distributions.py @@ -105,8 +105,8 @@ def __init__( if logits is not None: assert temperature > 0.0, "Categorical `temperature` must be > 0.0!" - _logits = logits / temperature - probs = tf.nn.softmax(_logits, axis=-1) + logits /= temperature + probs = tf.nn.softmax(logits, axis=-1) self.probs = probs self.logits = logits @@ -118,8 +118,10 @@ def __init__( def logp(self, value: TensorType, **kwargs) -> TensorType: # This prevents an error in which float values at the boundaries of the range # of the distribution are passed to this function. - value = tf.cast(value, tf.int32) - return self._dist.log_prob(value, **kwargs) + return -tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=self.logits if self.logits is not None else self.probs, + labels=tf.cast(value, tf.int32), + ) @override(TfDistribution) def _get_tf_distribution( diff --git a/rllib/policy/eager_tf_policy.py b/rllib/policy/eager_tf_policy.py index e736f31dd1e1..9efe552d5aa6 100644 --- a/rllib/policy/eager_tf_policy.py +++ b/rllib/policy/eager_tf_policy.py @@ -175,22 +175,39 @@ def compute_actions_from_input_dict( ) -> Tuple[TensorType, List[TensorType], Dict[str, TensorType]]: """Traced version of Policy.compute_actions_from_input_dict.""" - # NOTE: In the new RLModule stack the sampling side is not traced with this - # justification that in order to speed up sampling we need to use more - # actors. # Create a traced version of `self._compute_actions_helper`. - if ( - not self.config.get("_enable_rl_module_api", False) - and self._traced_compute_actions_helper is False - and not self._no_tracing - ): - self._compute_actions_helper = _convert_eager_inputs( - tf.function( - super(TracedEagerPolicy, self)._compute_actions_helper, - autograph=False, - reduce_retracing=True, + if self._traced_compute_actions_helper is False and not self._no_tracing: + if self.config.get("_enable_rl_module_api"): + self._compute_actions_helper_rl_module_explore = ( + _convert_eager_inputs( + tf.function( + super( + TracedEagerPolicy, self + )._compute_actions_helper_rl_module_explore, + autograph=True, + reduce_retracing=True, + ) + ) + ) + self._compute_actions_helper_rl_module_inference = ( + _convert_eager_inputs( + tf.function( + super( + TracedEagerPolicy, self + )._compute_actions_helper_rl_module_inference, + autograph=True, + reduce_retracing=True, + ) + ) + ) + else: + self._compute_actions_helper = _convert_eager_inputs( + tf.function( + super(TracedEagerPolicy, self)._compute_actions_helper, + autograph=False, + reduce_retracing=True, + ) ) - ) self._traced_compute_actions_helper = True # Now that the helper method is traced, call super's diff --git a/rllib/policy/eager_tf_policy_v2.py b/rllib/policy/eager_tf_policy_v2.py index aa49eceb3dbf..e9158176e2a0 100644 --- a/rllib/policy/eager_tf_policy_v2.py +++ b/rllib/policy/eager_tf_policy_v2.py @@ -484,14 +484,20 @@ def compute_actions_from_input_dict( timestep=timestep, explore=explore, tf_sess=self.get_session() ) - ret = self._compute_actions_helper( - input_dict, - state_batches, - # TODO: Passing episodes into a traced method does not work. - None if self.config["eager_tracing"] else episodes, - explore, - timestep, - ) + if self.config.get("_enable_rl_module_api"): + if explore: + ret = self._compute_actions_helper_rl_module_explore(input_dict) + else: + ret = self._compute_actions_helper_rl_module_inference(input_dict) + else: + ret = self._compute_actions_helper( + input_dict, + state_batches, + # TODO: Passing episodes into a traced method does not work. + None if self.config["eager_tracing"] else episodes, + explore, + timestep, + ) # Update our global timestep by the batch size. self.global_timestep.assign_add(tree.flatten(ret[0])[0].shape.as_list()[0]) return convert_to_numpy(ret) @@ -814,9 +820,83 @@ def loss_initialized(self): return self._loss_initialized # TODO: Figure out, why _ray_trace_ctx=None helps to prevent a crash in - # AlphaStar w/ framework=tf2; eager_tracing=True on the policy learner actors. + # eager_tracing=True. # It seems there may be a clash between the traced-by-tf function and the # traced-by-ray functions (for making the policy class a ray actor). + @with_lock + def _compute_actions_helper_rl_module_explore( + self, input_dict, _ray_trace_ctx=None + ): + # Increase the tracing counter to make sure we don't re-trace too + # often. If eager_tracing=True, this counter should only get + # incremented during the @tf.function trace operations, never when + # calling the already traced function after that. + self._re_trace_counter += 1 + + # Add models `forward_explore` extra fetches. + extra_fetches = {} + + input_dict = NestedDict(input_dict) + # TODO (sven): Support RNNs when using RLModules. + input_dict[STATE_IN] = None + input_dict[SampleBatch.SEQ_LENS] = None + + action_dist_class = self.model.get_exploration_action_dist_cls() + fwd_out = self.model.forward_exploration(input_dict) + action_dist = action_dist_class.from_logits( + fwd_out[SampleBatch.ACTION_DIST_INPUTS] + ) + actions = action_dist.sample() + + # Anything but action_dist and state_out is an extra fetch + for k, v in fwd_out.items(): + if k not in [SampleBatch.ACTIONS, "state_out"]: + extra_fetches[k] = v + + # Action-logp and action-prob. + logp = action_dist.logp(actions) + extra_fetches[SampleBatch.ACTION_LOGP] = logp + extra_fetches[SampleBatch.ACTION_PROB] = tf.exp(logp) + + return actions, {}, extra_fetches + + # TODO: Figure out, why _ray_trace_ctx=None helps to prevent a crash in + # eager_tracing=True. + # It seems there may be a clash between the traced-by-tf function and the + # traced-by-ray functions (for making the policy class a ray actor). + @with_lock + def _compute_actions_helper_rl_module_inference( + self, input_dict, _ray_trace_ctx=None + ): + # Increase the tracing counter to make sure we don't re-trace too + # often. If eager_tracing=True, this counter should only get + # incremented during the @tf.function trace operations, never when + # calling the already traced function after that. + self._re_trace_counter += 1 + + # Add models `forward_explore` extra fetches. + extra_fetches = {} + + input_dict = NestedDict(input_dict) + # TODO (sven): Support RNNs when using RLModules. + input_dict[STATE_IN] = None + input_dict[SampleBatch.SEQ_LENS] = None + + action_dist_class = self.model.get_inference_action_dist_cls() + fwd_out = self.model.forward_inference(input_dict) + action_dist = action_dist_class.from_logits( + fwd_out[SampleBatch.ACTION_DIST_INPUTS] + ) + action_dist = action_dist.to_deterministic() + actions = action_dist.sample() + + # Anything but action_dist and state_out is an extra fetch + for k, v in fwd_out.items(): + if k not in [SampleBatch.ACTIONS, "state_out"]: + extra_fetches[k] = v + + return actions, {}, extra_fetches + @with_lock def _compute_actions_helper( self, @@ -831,10 +911,7 @@ def _compute_actions_helper( # often. If eager_tracing=True, this counter should only get # incremented during the @tf.function trace operations, never when # calling the already traced function after that. - # NOTE: On the new RLModule API, we won't trace the sampling side, so we should - # not increment this counter to trigger excess re-tracing error. - if not self.config.get("_enable_rl_module_api", False): - self._re_trace_counter += 1 + self._re_trace_counter += 1 # Calculate RNN sequence lengths. batch_size = tree.flatten(input_dict[SampleBatch.OBS])[0].shape[0] @@ -843,87 +920,53 @@ def _compute_actions_helper( # Add default and custom fetches. extra_fetches = {} - if self.config.get("_enable_rl_module_api", False) is False: - scope = tf.variable_creator_scope(_disallow_var_creation) - scope.__enter__() - - if self.config.get("_enable_rl_module_api", False): - input_dict = NestedDict(input_dict) - input_dict[STATE_IN] = state_batches - input_dict[SampleBatch.SEQ_LENS] = seq_lens + with tf.variable_creator_scope(_disallow_var_creation): - if explore: - action_dist_class = self.model.get_exploration_action_dist_cls() - fwd_out = self.model.forward_exploration(input_dict) - action_dist = action_dist_class.from_logits( - fwd_out[SampleBatch.ACTION_DIST_INPUTS] - ) - actions = action_dist.sample() - logp = action_dist.logp(actions) - else: - action_dist_class = self.model.get_inference_action_dist_cls() - fwd_out = self.model.forward_inference(input_dict) - action_dist = action_dist_class.from_logits( - fwd_out[SampleBatch.ACTION_DIST_INPUTS] - ) - action_dist = action_dist.to_deterministic() - actions = action_dist.sample() - logp = None - - state_out = fwd_out.get("state_out", {}) - - # anything but action_dist and state_out is an extra fetch - for k, v in fwd_out.items(): - if k not in [SampleBatch.ACTION_DIST, "state_out"]: - extra_fetches[k] = v - dist_inputs = None - - elif is_overridden(self.action_sampler_fn): - actions, logp, dist_inputs, state_out = self.action_sampler_fn( - self.model, - input_dict[SampleBatch.OBS], - explore=explore, - timestep=timestep, - episodes=episodes, - ) - else: - if is_overridden(self.action_distribution_fn): - # Try new action_distribution_fn signature, supporting - # state_batches and seq_lens. - ( - dist_inputs, - self.dist_class, - state_out, - ) = self.action_distribution_fn( + if is_overridden(self.action_sampler_fn): + actions, logp, dist_inputs, state_out = self.action_sampler_fn( self.model, - obs_batch=input_dict[SampleBatch.OBS], - state_batches=state_batches, - seq_lens=seq_lens, + input_dict[SampleBatch.OBS], explore=explore, timestep=timestep, - is_training=False, + episodes=episodes, ) - elif isinstance(self.model, tf.keras.Model): - input_dict = SampleBatch(input_dict, seq_lens=seq_lens) - if state_batches and "state_in_0" not in input_dict: - for i, s in enumerate(state_batches): - input_dict[f"state_in_{i}"] = s - self._lazy_tensor_dict(input_dict) - dist_inputs, state_out, extra_fetches = self.model(input_dict) else: - dist_inputs, state_out = self.model(input_dict, state_batches, seq_lens) - - action_dist = self.dist_class(dist_inputs, self.model) + if is_overridden(self.action_distribution_fn): + # Try new action_distribution_fn signature, supporting + # state_batches and seq_lens. + ( + dist_inputs, + self.dist_class, + state_out, + ) = self.action_distribution_fn( + self.model, + obs_batch=input_dict[SampleBatch.OBS], + state_batches=state_batches, + seq_lens=seq_lens, + explore=explore, + timestep=timestep, + is_training=False, + ) + elif isinstance(self.model, tf.keras.Model): + input_dict = SampleBatch(input_dict, seq_lens=seq_lens) + if state_batches and "state_in_0" not in input_dict: + for i, s in enumerate(state_batches): + input_dict[f"state_in_{i}"] = s + self._lazy_tensor_dict(input_dict) + dist_inputs, state_out, extra_fetches = self.model(input_dict) + else: + dist_inputs, state_out = self.model( + input_dict, state_batches, seq_lens + ) - # Get the exploration action from the forward results. - actions, logp = self.exploration.get_exploration_action( - action_distribution=action_dist, - timestep=timestep, - explore=explore, - ) + action_dist = self.dist_class(dist_inputs, self.model) - if self.config.get("_enable_rl_module_api", False) is False: - scope.__exit__(None, None, None) + # Get the exploration action from the forward results. + actions, logp = self.exploration.get_exploration_action( + action_distribution=action_dist, + timestep=timestep, + explore=explore, + ) # Action-logp and action-prob. if logp is not None: diff --git a/rllib/policy/torch_policy_v2.py b/rllib/policy/torch_policy_v2.py index 4fb224d77f38..e4db0da342df 100644 --- a/rllib/policy/torch_policy_v2.py +++ b/rllib/policy/torch_policy_v2.py @@ -12,6 +12,7 @@ import tree # pip install dm_tree import ray +from ray.rllib.core.models.base import STATE_OUT from ray.rllib.models.catalog import ModelCatalog from ray.rllib.models.modelv2 import ModelV2 from ray.rllib.models.torch.torch_action_dist import TorchDistributionWrapper @@ -1127,9 +1128,10 @@ def _compute_action_helper( actions = action_dist.sample() logp = None - state_out = fwd_out.pop("state_out", {}) + # Anything but actions and state_out is an extra fetch. + state_out = fwd_out.pop(STATE_OUT, {}) extra_fetches = fwd_out - dist_inputs = None + dist_inputs = fwd_out[SampleBatch.ACTION_DIST_INPUTS] elif is_overridden(self.action_sampler_fn): action_dist = None actions, logp, dist_inputs, state_out = self.action_sampler_fn( From 60655319089a660114bc4e2c3833e412755e29ce Mon Sep 17 00:00:00 2001 From: Lonnie Liu <95255098+aslonnie@users.noreply.github.com> Date: Thu, 11 May 2023 09:54:32 -0700 Subject: [PATCH 341/424] [ci] Use python 3.9 in WORKSPACE (#35255) Seems that python 3.8 tool chain will break arm building.. Signed-off-by: Lonnie Liu --- WORKSPACE | 6 +++--- release/BUILD | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/WORKSPACE b/WORKSPACE index 06b6bd03ee0f..eb6aeba907e3 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -41,12 +41,12 @@ http_archive( load("@rules_python//python:repositories.bzl", "python_register_toolchains") python_register_toolchains( - name = "python3_8", - python_version = "3.8", + name = "python3_9", + python_version = "3.9", register_toolchains = False, ) -load("@python3_8//:defs.bzl", bk_python = "interpreter") +load("@python3_9//:defs.bzl", bk_python = "interpreter") load("@rules_python//python/pip_install:repositories.bzl", "pip_install_dependencies") pip_install_dependencies() diff --git a/release/BUILD b/release/BUILD index 5afadec6406e..b15f22324acb 100644 --- a/release/BUILD +++ b/release/BUILD @@ -1,7 +1,7 @@ load("@rules_python//python:defs.bzl", "py_library", "py_runtime", "py_runtime_pair", "py_test") load("@rules_python//python:pip.bzl", "compile_pip_requirements") load("@py_deps_buildkite//:requirements.bzl", bk_require = "requirement") -load("@python3_8//:defs.bzl", bk_python = "interpreter") +load("@python3_9//:defs.bzl", bk_python = "interpreter") compile_pip_requirements( name = "requirements_buildkite", From 3325db9a01b59e15cf454d8761be0d996f914721 Mon Sep 17 00:00:00 2001 From: "ZhengYu, Xu" Date: Fri, 12 May 2023 01:43:25 +0800 Subject: [PATCH 342/424] [Client] Optimize chunk size (#35025) * Optimize chunk size The time it takes to serialize a protobuf object is not linear by its size. Signed-off-by: ZhengYu, Xu * resize large objects --------- Signed-off-by: ZhengYu, Xu Co-authored-by: Chris Wong --- python/ray/tests/test_client_reconnect.py | 20 ++++++++++---------- python/ray/util/client/common.py | 4 ++-- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/python/ray/tests/test_client_reconnect.py b/python/ray/tests/test_client_reconnect.py index e49726c62ec4..58df0027da52 100644 --- a/python/ray/tests/test_client_reconnect.py +++ b/python/ray/tests/test_client_reconnect.py @@ -367,17 +367,17 @@ def fail_every_three(_): @ray.remote def large_result(): - # 1024x1024x128 float64 matrix (1024 MiB). With 64MiB chunk size, + # 1024x1024x6 float64 matrix (96 MiB). With 5MiB chunk size, # it will take at least 16 chunks to transfer this object. Since # the failure is injected every 3 chunks, this transfer can only # work if the chunked get request retries at the last received chunk # (instead of starting from the beginning each retry) - return np.random.random((1024, 1024, 128)) + return np.random.random((1024, 1024, 6)) with start_middleman_server(on_task_response=fail_every_three): started = True result = ray.get(large_result.remote()) - assert result.shape == (1024, 1024, 128) + assert result.shape == (1024, 1024, 6) def test_disconnects_during_large_async_get(): @@ -398,12 +398,12 @@ def fail_every_three(_): @ray.remote def large_result(): - # 1024x1024x128 float64 matrix (1024 MiB). With 64MiB chunk size, + # 1024x1024x6 float64 matrix (96 MiB). With 5MiB chunk size, # it will take at least 16 chunks to transfer this object. Since # the failure is injected every 3 chunks, this transfer can only # work if the chunked get request retries at the last received chunk # (instead of starting from the beginning each retry) - return np.random.random((1024, 1024, 128)) + return np.random.random((1024, 1024, 6)) with start_middleman_server(on_data_response=fail_every_three): started = True @@ -412,7 +412,7 @@ async def get_large_result(): return await large_result.remote() result = get_or_create_event_loop().run_until_complete(get_large_result()) - assert result.shape == (1024, 1024, 128) + assert result.shape == (1024, 1024, 6) def test_disconnect_during_large_put(): @@ -433,10 +433,10 @@ def fail_halfway(_): with start_middleman_server(on_data_request=fail_halfway): started = True - objref = ray.put(np.random.random((1024, 1024, 128))) + objref = ray.put(np.random.random((1024, 1024, 6))) assert i > 8 # Check that the failure was injected result = ray.get(objref) - assert result.shape == (1024, 1024, 128) + assert result.shape == (1024, 1024, 6) def test_disconnect_during_large_schedule(): @@ -461,10 +461,10 @@ def f(a): with start_middleman_server(on_data_request=fail_halfway): started = True - a = np.random.random((1024, 1024, 128)) + a = np.random.random((1024, 1024, 6)) result = ray.get(f.remote(a)) assert i > 8 # Check that the failure was injected - assert result == (1024, 1024, 128) + assert result == (1024, 1024, 6) def test_valid_actor_state(): diff --git a/python/ray/util/client/common.py b/python/ray/util/client/common.py index 873bc25ed68b..66825dc2fee1 100644 --- a/python/ray/util/client/common.py +++ b/python/ray/util/client/common.py @@ -79,8 +79,8 @@ CLIENT_SERVER_MAX_THREADS = float(os.getenv("RAY_CLIENT_SERVER_MAX_THREADS", 100)) -# Large objects are chunked into 64 MiB messages -OBJECT_TRANSFER_CHUNK_SIZE = 64 * 2**20 +# Large objects are chunked into 5 MiB messages, ref PR #35025 +OBJECT_TRANSFER_CHUNK_SIZE = 5 * 2**20 # Warn the user if the object being transferred is larger than 2 GiB OBJECT_TRANSFER_WARNING_SIZE = 2 * 2**30 From 5f70efb374bd3ef4fa5c40bf285d55663735b56f Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Thu, 11 May 2023 11:34:42 -0700 Subject: [PATCH 343/424] Run bisect with the correct python version (#35186) Currently because we do not specify the python version, bisect defaults to 3.7. Some tests want to run with a specific python version, so read the python version from the test configuration for those cases. Signed-off-by: Cuong Nguyen --- release/ray_release/scripts/ray_bisect.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/release/ray_release/scripts/ray_bisect.py b/release/ray_release/scripts/ray_bisect.py index d46dffe167c4..9985a8534416 100644 --- a/release/ray_release/scripts/ray_bisect.py +++ b/release/ray_release/scripts/ray_bisect.py @@ -9,7 +9,9 @@ from ray_release.buildkite.step import get_step from ray_release.config import ( read_and_validate_release_test_collection, + parse_python_version, DEFAULT_WHEEL_WAIT_TIMEOUT, + DEFAULT_PYTHON_VERSION, Test, ) from ray_release.wheels import find_and_wait_for_ray_wheels_url @@ -123,9 +125,12 @@ def _run_test( def _trigger_test_run(test: Test, commit: str, run_per_commit: int) -> None: + python_version = DEFAULT_PYTHON_VERSION + if "python" in test: + python_version = parse_python_version(test["python"]) + ray_wheels_url = find_and_wait_for_ray_wheels_url( - commit, - timeout=DEFAULT_WHEEL_WAIT_TIMEOUT, + commit, timeout=DEFAULT_WHEEL_WAIT_TIMEOUT, python_version=python_version ) for run in range(run_per_commit): step = get_step( From 53d5cbc39e7a44db72ef9cfe3e4f5be2ac76a2fc Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Thu, 11 May 2023 11:35:00 -0700 Subject: [PATCH 344/424] [ci/bazel][2] bazelize all other ray_release tests (#35032) Bazelize all other ray-release tests Signed-off-by: Cuong Nguyen --- release/BUILD | 45 +++++++++++++++++++ .../tests/test_anyscale_job_manager.py | 6 +++ release/ray_release/tests/test_result.py | 6 +++ 3 files changed, 57 insertions(+) diff --git a/release/BUILD b/release/BUILD index b15f22324acb..6ddd98a94fac 100644 --- a/release/BUILD +++ b/release/BUILD @@ -435,6 +435,21 @@ py_test( ], ) +py_test( + name = "test_anyscale_job_manager", + size = "small", + srcs = ["ray_release/tests/test_anyscale_job_manager.py"], + exec_compatible_with = [":hermetic_python"], + tags = [ + "release_unit", + "team:ci", + ], + deps = [ + ":ray_release", + bk_require("pytest"), + ], +) + py_test( name = "test_anyscale_job_wrapper", size = "small", @@ -565,6 +580,21 @@ py_test( ], ) +py_test( + name = "test_result", + size = "small", + srcs = ["ray_release/tests/test_result.py"], + exec_compatible_with = [":hermetic_python"], + tags = [ + "release_unit", + "team:ci", + ], + deps = [ + ":ray_release", + bk_require("pytest"), + ], +) + py_test( name = "test_run_script", size = "small", @@ -585,6 +615,21 @@ py_test( ], ) +py_test( + name = "test_template", + size = "small", + srcs = ["ray_release/tests/test_template.py"], + exec_compatible_with = [":hermetic_python"], + tags = [ + "release_unit", + "team:ci", + ], + deps = [ + ":ray_release", + bk_require("pytest"), + ], +) + py_test( name = "test_wheels", size = "small", diff --git a/release/ray_release/tests/test_anyscale_job_manager.py b/release/ray_release/tests/test_anyscale_job_manager.py index 5985aa5105f9..bd28fba1684f 100644 --- a/release/ray_release/tests/test_anyscale_job_manager.py +++ b/release/ray_release/tests/test_anyscale_job_manager.py @@ -1,3 +1,5 @@ +import pytest +import sys import tempfile import os @@ -19,3 +21,7 @@ def test_get_ray_error_logs(): ) = AnyscaleJobManager._find_job_driver_and_ray_error_logs(tmpdir) assert ray_error_log == "".join(ERROR_LOG_PATTERNS + ["haha"]) assert job_driver_log == "w00t" + + +if __name__ == "__main__": + sys.exit(pytest.main(["-v", __file__])) diff --git a/release/ray_release/tests/test_result.py b/release/ray_release/tests/test_result.py index 8ae16b298503..f5e963f60df6 100644 --- a/release/ray_release/tests/test_result.py +++ b/release/ray_release/tests/test_result.py @@ -1,3 +1,5 @@ +import pytest +import sys import os from unittest import mock from ray_release.result import handle_exception, ExitCode, ResultStatus @@ -34,3 +36,7 @@ def test_handle_exception(): ResultStatus.INFRA_ERROR, None, ) + + +if __name__ == "__main__": + sys.exit(pytest.main(["-v", __file__])) From e5dc3540ef723195c7fb522166aba5c3e160a8b5 Mon Sep 17 00:00:00 2001 From: Justin Coffi Date: Thu, 11 May 2023 12:03:42 -0700 Subject: [PATCH 345/424] Update README.rst (#35267) Fixed the URL to point to the correct location Signed-off-by: Justin Coffi --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index f1ea3157c08d..e52e01da439f 100644 --- a/README.rst +++ b/README.rst @@ -44,7 +44,7 @@ Install Ray with: ``pip install ray``. For nightly wheels, see the `Installation page `__. .. _`Serve`: https://docs.ray.io/en/latest/serve/index.html -.. _`Data`: https://docs.ray.io/en/latest/data/data.html +.. _`Data`: https://docs.ray.io/en/latest/data/dataset.html .. _`Workflow`: https://docs.ray.io/en/latest/workflows/concepts.html .. _`Train`: https://docs.ray.io/en/latest/train/train.html .. _`Tune`: https://docs.ray.io/en/latest/tune/index.html From bf0192ec5ee88299fe8bab982af7bf74e130b77d Mon Sep 17 00:00:00 2001 From: Philipp Moritz Date: Thu, 11 May 2023 12:12:52 -0700 Subject: [PATCH 346/424] Bring back "[Core] Port GcsPublisher to Cython" (#34393) (#35179) I spent quite a bit of time debugging the test failure in #34393 (see also #35108) It turns out the PR slightly made the _do_importing race condition (first time call in the import thread) more likely to happen. There is already a plan / PR to get rid of it (#30895) but it is currently waiting for having a replacement mechanism that @rkooo567 is working on. I synced with @scv119 and for the time being, we are planning to skip the offending test on Windows and once we got rid of the import thread, we can re-activate it. --- .buildkite/pipeline.build.yml | 6 ++ dashboard/agent.py | 6 +- dashboard/dashboard.py | 3 +- python/ray/_private/gcs_pubsub.py | 45 ---------- python/ray/_private/log_monitor.py | 8 +- python/ray/_private/utils.py | 29 +------ python/ray/_private/worker.py | 3 +- python/ray/_raylet.pyx | 63 ++++++++++++++ python/ray/autoscaler/_private/monitor.py | 3 +- python/ray/includes/common.pxd | 38 +++++++++ python/ray/includes/common.pxi | 1 + python/ray/tests/test_basic_5.py | 3 + python/ray/tests/test_failure.py | 3 +- python/ray/tests/test_gcs_fault_tolerance.py | 14 ++-- python/ray/tests/test_gcs_pubsub.py | 26 +++--- src/ray/gcs/gcs_client/gcs_client.cc | 5 +- src/ray/gcs/pubsub/gcs_pub_sub.cc | 87 ++++++++++++++++++++ src/ray/gcs/pubsub/gcs_pub_sub.h | 37 +++++++++ 18 files changed, 271 insertions(+), 109 deletions(-) diff --git a/.buildkite/pipeline.build.yml b/.buildkite/pipeline.build.yml index 81debe8a17bf..8bdd31723559 100644 --- a/.buildkite/pipeline.build.yml +++ b/.buildkite/pipeline.build.yml @@ -368,6 +368,9 @@ - DL=1 ./ci/env/install-dependencies.sh - bash ./ci/ci.sh prepare_docker - ./ci/env/env_info.sh + # This is needed or else the Ray Client tests run into a gRPC forking problem + # similar to https://github.com/grpc/grpc/issues/31885 + - pip install pip install grpcio==1.50.0 - bazel test --config=ci $(./ci/run/bazel_export_options) --test_tag_filters=client_tests,small_size_python_tests -- python/ray/tests/... @@ -418,6 +421,9 @@ - cleanup() { if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then ./ci/build/upload_build_info.sh; fi }; trap cleanup EXIT - DL=1 ./ci/env/install-dependencies.sh - ./ci/env/env_info.sh + # This is needed or else the Ray Client tests run into a gRPC forking problem + # similar to https://github.com/grpc/grpc/issues/31885 + - pip install pip install grpcio==1.50.0 - bazel test --config=ci $(./scripts/bazel_export_options) --test_tag_filters=client_tests,small_size_python_tests --test_env=TEST_EXTERNAL_REDIS=1 diff --git a/dashboard/agent.py b/dashboard/agent.py index 345099ff7c25..df57590ff0b6 100644 --- a/dashboard/agent.py +++ b/dashboard/agent.py @@ -15,7 +15,7 @@ import ray.dashboard.consts as dashboard_consts import ray.dashboard.utils as dashboard_utils from ray.dashboard.consts import _PARENT_DEATH_THREASHOLD -from ray._private.gcs_pubsub import GcsAioPublisher, GcsPublisher +from ray._private.gcs_pubsub import GcsAioPublisher from ray._raylet import GcsClient from ray._private.gcs_utils import GcsAioClient from ray._private.ray_logging import setup_component_logger @@ -263,7 +263,9 @@ async def _check_parent(): ray._private.utils.publish_error_to_driver( ray_constants.RAYLET_DIED_ERROR, msg, - gcs_publisher=GcsPublisher(address=self.gcs_address), + gcs_publisher=ray._raylet.GcsPublisher( + address=self.gcs_address + ), ) else: logger.info(msg) diff --git a/dashboard/dashboard.py b/dashboard/dashboard.py index 4732e96d23ee..273fbc4c904d 100644 --- a/dashboard/dashboard.py +++ b/dashboard/dashboard.py @@ -13,7 +13,6 @@ import ray.dashboard.consts as dashboard_consts import ray.dashboard.head as dashboard_head import ray.dashboard.utils as dashboard_utils -from ray._private.gcs_pubsub import GcsPublisher from ray._private.ray_logging import setup_component_logger from typing import Optional, Set @@ -261,7 +260,7 @@ def sigterm_handler(): raise e # Something went wrong, so push an error to all drivers. - gcs_publisher = GcsPublisher(address=args.gcs_address) + gcs_publisher = ray._raylet.GcsPublisher(address=args.gcs_address) ray._private.utils.publish_error_to_driver( ray_constants.DASHBOARD_DIED_ERROR, message, diff --git a/python/ray/_private/gcs_pubsub.py b/python/ray/_private/gcs_pubsub.py index c1d39e728b15..2168b9dfed9d 100644 --- a/python/ray/_private/gcs_pubsub.py +++ b/python/ray/_private/gcs_pubsub.py @@ -4,10 +4,8 @@ import random import threading from typing import Optional, Tuple, List -import time import grpc -from grpc._channel import _InactiveRpcError from ray._private.utils import get_or_create_event_loop try: @@ -160,49 +158,6 @@ def _pop_actors(queue, batch_size=100): return msgs -class GcsPublisher(_PublisherBase): - """Publisher to GCS.""" - - def __init__(self, address: str): - channel = gcs_utils.create_gcs_channel(address) - self._stub = gcs_service_pb2_grpc.InternalPubSubGcsServiceStub(channel) - - def publish_error( - self, key_id: bytes, error_info: ErrorTableData, num_retries=None - ) -> None: - """Publishes error info to GCS.""" - msg = pubsub_pb2.PubMessage( - channel_type=pubsub_pb2.RAY_ERROR_INFO_CHANNEL, - key_id=key_id, - error_info_message=error_info, - ) - req = gcs_service_pb2.GcsPublishRequest(pub_messages=[msg]) - self._gcs_publish(req, num_retries, timeout=1) - - def publish_logs(self, log_batch: dict) -> None: - """Publishes logs to GCS.""" - req = self._create_log_request(log_batch) - self._gcs_publish(req) - - def publish_function_key(self, key: bytes) -> None: - """Publishes function key to GCS.""" - req = self._create_function_key_request(key) - self._gcs_publish(req) - - def _gcs_publish(self, req, num_retries=None, timeout=None) -> None: - count = num_retries or MAX_GCS_PUBLISH_RETRIES - while count > 0: - try: - self._stub.GcsPublish(req, timeout=timeout) - return - except _InactiveRpcError: - pass - count -= 1 - if count > 0: - time.sleep(1) - raise TimeoutError(f"Failed to publish after retries: {req}") - - class _SyncSubscriber(_SubscriberBase): def __init__( self, diff --git a/python/ray/_private/log_monitor.py b/python/ray/_private/log_monitor.py index 7f06343625ae..444ac5b34bec 100644 --- a/python/ray/_private/log_monitor.py +++ b/python/ray/_private/log_monitor.py @@ -11,11 +11,9 @@ import traceback from typing import Callable, List, Set -import ray._private.gcs_pubsub as gcs_pubsub import ray._private.ray_constants as ray_constants import ray._private.services as services import ray._private.utils -from ray._private.gcs_pubsub import GcsPublisher from ray._private.ray_logging import setup_component_logger # Logger for this module. It should be configured at the entry point @@ -135,7 +133,7 @@ class LogMonitor: def __init__( self, logs_dir, - gcs_publisher: gcs_pubsub.GcsPublisher, + gcs_publisher, is_proc_alive_fn: Callable[[int], bool], max_files_open: int = ray_constants.LOG_MONITOR_MAX_OPEN_FILES, ): @@ -525,14 +523,14 @@ def is_proc_alive(pid): ) log_monitor = LogMonitor( - args.logs_dir, gcs_pubsub.GcsPublisher(address=args.gcs_address), is_proc_alive + args.logs_dir, ray._raylet.GcsPublisher(address=args.gcs_address), is_proc_alive ) try: log_monitor.run() except Exception as e: # Something went wrong, so push an error to all drivers. - gcs_publisher = GcsPublisher(address=args.gcs_address) + gcs_publisher = ray._raylet.GcsPublisher(address=args.gcs_address) traceback_str = ray._private.utils.format_error_message(traceback.format_exc()) message = ( f"The log monitor on node {platform.node()} " diff --git a/python/ray/_private/utils.py b/python/ray/_private/utils.py index 6174890cd8ea..8d1793114ac9 100644 --- a/python/ray/_private/utils.py +++ b/python/ray/_private/utils.py @@ -44,7 +44,6 @@ import ray import ray._private.ray_constants as ray_constants from ray._private.tls_utils import load_certs_from_env -from ray.core.generated.gcs_pb2 import ErrorTableData from ray.core.generated.runtime_env_common_pb2 import ( RuntimeEnvInfo as ProtoRuntimeEnvInfo, ) @@ -182,27 +181,6 @@ def push_error_to_driver( worker.core_worker.push_error(job_id, error_type, message, time.time()) -def construct_error_message(job_id, error_type, message, timestamp): - """Construct an ErrorTableData object. - - Args: - job_id: The ID of the job that the error should go to. If this is - nil, then the error will go to all drivers. - error_type: The type of the error. - message: The error message. - timestamp: The time of the error. - - Returns: - The ErrorTableData object. - """ - data = ErrorTableData() - data.job_id = job_id.binary() - data.type = error_type - data.error_message = message - data.timestamp = timestamp - return data - - def publish_error_to_driver( error_type: str, message: str, @@ -228,11 +206,12 @@ def publish_error_to_driver( if job_id is None: job_id = ray.JobID.nil() assert isinstance(job_id, ray.JobID) - error_data = construct_error_message(job_id, error_type, message, time.time()) try: - gcs_publisher.publish_error(job_id.hex().encode(), error_data, num_retries) + gcs_publisher.publish_error( + job_id.hex().encode(), error_type, message, job_id, num_retries + ) except Exception: - logger.exception(f"Failed to publish error {error_data}") + logger.exception(f"Failed to publish error: {message} [type {error_type}]") def decode(byte_str: str, allow_none: bool = False, encode_type: str = "utf-8"): diff --git a/python/ray/_private/worker.py b/python/ray/_private/worker.py index a9b81d672fb3..234f79c3be5c 100644 --- a/python/ray/_private/worker.py +++ b/python/ray/_private/worker.py @@ -68,7 +68,6 @@ GcsErrorSubscriber, GcsFunctionKeySubscriber, GcsLogSubscriber, - GcsPublisher, ) from ray._private.inspect_util import is_cython from ray._private.ray_logging import ( @@ -2074,7 +2073,7 @@ def connect( ray._private.state.state._initialize_global_state( ray._raylet.GcsClientOptions.from_gcs_address(node.gcs_address) ) - worker.gcs_publisher = GcsPublisher(address=worker.gcs_client.address) + worker.gcs_publisher = ray._raylet.GcsPublisher(address=worker.gcs_client.address) # Initialize some fields. if mode in (WORKER_MODE, RESTORE_WORKER_MODE, SPILL_WORKER_MODE): # We should not specify the job_id if it's `WORKER_MODE`. diff --git a/python/ray/_raylet.pyx b/python/ray/_raylet.pyx index 5b135b35d419..3c929ade46ef 100644 --- a/python/ray/_raylet.pyx +++ b/python/ray/_raylet.pyx @@ -61,14 +61,17 @@ from ray.includes.common cimport ( CObjectReference, CRayObject, CRayStatus, + CErrorTableData, CGcsClientOptions, CGcsNodeInfo, CJobTableData, + CLogBatch, CTaskArg, CTaskArgByReference, CTaskArgByValue, CTaskType, CPlacementStrategy, + CPythonFunction, CSchedulingStrategy, CPlacementGroupSchedulingStrategy, CNodeAffinitySchedulingStrategy, @@ -1742,6 +1745,66 @@ cdef class GcsClient: } return result +cdef class GcsPublisher: + """Cython wrapper class of C++ `ray::gcs::PythonGcsPublisher`.""" + cdef: + shared_ptr[CPythonGcsPublisher] inner + + def __cinit__(self, address): + self.inner.reset(new CPythonGcsPublisher(address)) + check_status(self.inner.get().Connect()) + + def publish_error(self, key_id: bytes, error_type: str, message: str, + job_id=None, num_retries=None): + cdef: + CErrorTableData error_info + int64_t c_num_retries = num_retries if num_retries else -1 + c_string c_key_id = key_id + + if job_id is None: + job_id = ray.JobID.nil() + assert isinstance(job_id, ray.JobID) + error_info.set_job_id(job_id.binary()) + error_info.set_type(error_type) + error_info.set_error_message(message) + error_info.set_timestamp(time.time()) + + with nogil: + check_status( + self.inner.get().PublishError(c_key_id, error_info, c_num_retries)) + + def publish_logs(self, log_json: dict): + cdef: + CLogBatch log_batch + c_string c_job_id + + job_id = log_json.get("job") + log_batch.set_ip(log_json.get("ip") if log_json.get("ip") else b"") + log_batch.set_pid( + str(log_json.get("pid")).encode() if log_json.get("pid") else b"") + log_batch.set_job_id(job_id.encode() if job_id else b"") + log_batch.set_is_error(bool(log_json.get("is_err"))) + for line in log_json.get("lines", []): + log_batch.add_lines(line) + actor_name = log_json.get("actor_name") + log_batch.set_actor_name(actor_name.encode() if actor_name else b"") + task_name = log_json.get("task_name") + log_batch.set_task_name(task_name.encode() if task_name else b"") + + c_job_id = job_id.encode() if job_id else b"" + with nogil: + check_status(self.inner.get().PublishLogs(c_job_id, log_batch)) + + def publish_function_key(self, key: bytes): + cdef: + CPythonFunction python_function + + python_function.set_key(key) + + with nogil: + check_status(self.inner.get().PublishFunctionKey(python_function)) + + cdef class CoreWorker: def __cinit__(self, worker_type, store_socket, raylet_socket, diff --git a/python/ray/autoscaler/_private/monitor.py b/python/ray/autoscaler/_private/monitor.py index 14faf14fa8e9..f15e109fc9d4 100644 --- a/python/ray/autoscaler/_private/monitor.py +++ b/python/ray/autoscaler/_private/monitor.py @@ -16,7 +16,6 @@ import ray._private.ray_constants as ray_constants import ray._private.utils from ray._private.event.event_logger import get_event_logger -from ray._private.gcs_pubsub import GcsPublisher from ray._private.ray_logging import setup_component_logger from ray._raylet import GcsClient from ray.autoscaler._private.autoscaler import StandardAutoscaler @@ -560,7 +559,7 @@ def _handle_failure(self, error): _internal_kv_put( ray_constants.DEBUG_AUTOSCALING_ERROR, message, overwrite=True ) - gcs_publisher = GcsPublisher(address=self.gcs_address) + gcs_publisher = ray._raylet.GcsPublisher(address=self.gcs_address) from ray._private.utils import publish_error_to_driver publish_error_to_driver( diff --git a/python/ray/includes/common.pxd b/python/ray/includes/common.pxd index e0f8b8ee9712..4250470f3013 100644 --- a/python/ray/includes/common.pxd +++ b/python/ray/includes/common.pxd @@ -346,6 +346,21 @@ cdef extern from "ray/gcs/gcs_client/gcs_client.h" namespace "ray::gcs" nogil: unordered_map[c_string, double] PythonGetResourcesTotal( const CGcsNodeInfo& node_info) +cdef extern from "ray/gcs/pubsub/gcs_pub_sub.h" nogil: + + cdef cppclass CPythonGcsPublisher "ray::gcs::PythonGcsPublisher": + + CPythonGcsPublisher(const c_string& gcs_address) + + CRayStatus Connect() + + CRayStatus PublishError( + const c_string &key_id, const CErrorTableData &data, int64_t num_retries) + + CRayStatus PublishLogs(const c_string &key_id, const CLogBatch &data) + + CRayStatus PublishFunctionKey(const CPythonFunction& python_function) + cdef extern from "src/ray/protobuf/gcs.pb.h" nogil: cdef cppclass CJobConfig "ray::rpc::JobConfig": c_string ray_namespace() const @@ -372,6 +387,29 @@ cdef extern from "src/ray/protobuf/gcs.pb.h" nogil: c_bool is_dead() const CJobConfig config() const + cdef cppclass CPythonFunction "ray::rpc::PythonFunction": + void set_key(const c_string &key) + + cdef cppclass CErrorTableData "ray::rpc::ErrorTableData": + c_string job_id() const + c_string type() const + c_string error_message() const + double timestamp() const + + void set_job_id(const c_string &job_id) + void set_type(const c_string &type) + void set_error_message(const c_string &error_message) + void set_timestamp(double timestamp) + + cdef cppclass CLogBatch "ray::rpc::LogBatch": + void set_ip(const c_string &ip) + void set_pid(const c_string &pid) + void set_job_id(const c_string &job_id) + void set_is_error(c_bool is_error) + void add_lines(const c_string &line) + void set_actor_name(const c_string &actor_name) + void set_task_name(const c_string &task_name) + cdef extern from "ray/common/task/task_spec.h" nogil: cdef cppclass CConcurrencyGroup "ray::ConcurrencyGroup": diff --git a/python/ray/includes/common.pxi b/python/ray/includes/common.pxi index 89983ff8808c..d7c3c121bc69 100644 --- a/python/ray/includes/common.pxi +++ b/python/ray/includes/common.pxi @@ -6,6 +6,7 @@ from ray.includes.common cimport ( CObjectLocation, CGcsClientOptions, CPythonGcsClient, + CPythonGcsPublisher, ) diff --git a/python/ray/tests/test_basic_5.py b/python/ray/tests/test_basic_5.py index ffdeb6cf20b5..c3847ad8b1be 100644 --- a/python/ray/tests/test_basic_5.py +++ b/python/ray/tests/test_basic_5.py @@ -227,6 +227,9 @@ def sys_path(): subprocess.check_call(["python", "-m", "package.module2"]) +# This will be fixed on Windows once the import thread is removed, see +# https://github.com/ray-project/ray/pull/30895 +@pytest.mark.skipif(sys.platform == "win32", reason="Currently fails on Windows.") def test_worker_kv_calls(monkeypatch, shutdown_only): monkeypatch.setenv("TEST_RAY_COLLECT_KV_FREQUENCY", "1") ray.init() diff --git a/python/ray/tests/test_failure.py b/python/ray/tests/test_failure.py index 71bb7a98dd9a..93f1c734ee0a 100644 --- a/python/ray/tests/test_failure.py +++ b/python/ray/tests/test_failure.py @@ -10,7 +10,6 @@ import ray._private.gcs_utils as gcs_utils import ray._private.ray_constants as ray_constants import ray._private.utils -from ray._private.gcs_pubsub import GcsPublisher from ray._private.test_utils import ( SignalActor, convert_actor_state, @@ -69,7 +68,7 @@ def interceptor(e): def test_publish_error_to_driver(ray_start_regular, error_pubsub): address_info = ray_start_regular - gcs_publisher = GcsPublisher(address=address_info["gcs_address"]) + gcs_publisher = ray._raylet.GcsPublisher(address=address_info["gcs_address"]) error_message = "Test error message" ray._private.utils.publish_error_to_driver( diff --git a/python/ray/tests/test_gcs_fault_tolerance.py b/python/ray/tests/test_gcs_fault_tolerance.py index fedd531d6cb8..72caad2f0f6e 100644 --- a/python/ray/tests/test_gcs_fault_tolerance.py +++ b/python/ray/tests/test_gcs_fault_tolerance.py @@ -18,10 +18,8 @@ run_string_as_driver, ) from ray._private.gcs_pubsub import ( - GcsPublisher, GcsErrorSubscriber, ) -from ray.core.generated.gcs_pb2 import ErrorTableData import psutil @@ -675,20 +673,20 @@ def test_publish_and_subscribe_error_info(ray_start_regular_with_external_redis) subscriber = GcsErrorSubscriber(address=gcs_server_addr) subscriber.subscribe() - publisher = GcsPublisher(address=gcs_server_addr) - err1 = ErrorTableData(error_message="test error message 1") - err2 = ErrorTableData(error_message="test error message 2") + publisher = ray._raylet.GcsPublisher(address=gcs_server_addr) print("sending error message 1") - publisher.publish_error(b"aaa_id", err1) + publisher.publish_error(b"aaa_id", "", "test error message 1") ray._private.worker._global_node.kill_gcs_server() ray._private.worker._global_node.start_gcs_server() print("sending error message 2") - publisher.publish_error(b"bbb_id", err2) + publisher.publish_error(b"bbb_id", "", "test error message 2") print("done") - assert subscriber.poll() == (b"bbb_id", err2) + (key_id, err) = subscriber.poll() + assert key_id == b"bbb_id" + assert err.error_message == "test error message 2" subscriber.close() diff --git a/python/ray/tests/test_gcs_pubsub.py b/python/ray/tests/test_gcs_pubsub.py index b9a4eddee7a4..71d4ae802f26 100644 --- a/python/ray/tests/test_gcs_pubsub.py +++ b/python/ray/tests/test_gcs_pubsub.py @@ -3,8 +3,8 @@ import threading import re +import ray from ray._private.gcs_pubsub import ( - GcsPublisher, GcsErrorSubscriber, GcsLogSubscriber, GcsFunctionKeySubscriber, @@ -24,14 +24,16 @@ def test_publish_and_subscribe_error_info(ray_start_regular): subscriber = GcsErrorSubscriber(address=gcs_server_addr) subscriber.subscribe() - publisher = GcsPublisher(address=gcs_server_addr) - err1 = ErrorTableData(error_message="test error message 1") - err2 = ErrorTableData(error_message="test error message 2") - publisher.publish_error(b"aaa_id", err1) - publisher.publish_error(b"bbb_id", err2) + publisher = ray._raylet.GcsPublisher(address=gcs_server_addr) + publisher.publish_error(b"aaa_id", "", "test error message 1") + publisher.publish_error(b"bbb_id", "", "test error message 2") - assert subscriber.poll() == (b"aaa_id", err1) - assert subscriber.poll() == (b"bbb_id", err2) + (key_id1, err1) = subscriber.poll() + assert key_id1 == b"aaa_id" + assert err1.error_message == "test error message 1" + (key_id2, err2) = subscriber.poll() + assert key_id2 == b"bbb_id" + assert err2.error_message == "test error message 2" subscriber.close() @@ -63,7 +65,7 @@ def test_publish_and_subscribe_logs(ray_start_regular): subscriber = GcsLogSubscriber(address=gcs_server_addr) subscriber.subscribe() - publisher = GcsPublisher(address=gcs_server_addr) + publisher = ray._raylet.GcsPublisher(address=gcs_server_addr) log_batch = { "ip": "127.0.0.1", "pid": 1234, @@ -114,7 +116,7 @@ def test_publish_and_subscribe_function_keys(ray_start_regular): subscriber = GcsFunctionKeySubscriber(address=gcs_server_addr) subscriber.subscribe() - publisher = GcsPublisher(address=gcs_server_addr) + publisher = ray._raylet.GcsPublisher(address=gcs_server_addr) publisher.publish_function_key(b"111") publisher.publish_function_key(b"222") @@ -196,9 +198,9 @@ def receive_logs(): t2 = threading.Thread(target=receive_logs) t2.start() - publisher = GcsPublisher(address=gcs_server_addr) + publisher = ray._raylet.GcsPublisher(address=gcs_server_addr) for i in range(0, num_messages): - publisher.publish_error(b"msg_id", ErrorTableData(error_message=f"error {i}")) + publisher.publish_error(b"msg_id", "", f"error {i}") publisher.publish_logs( { "ip": "127.0.0.1", diff --git a/src/ray/gcs/gcs_client/gcs_client.cc b/src/ray/gcs/gcs_client/gcs_client.cc index fb721893d7ea..ae342b05eec0 100644 --- a/src/ray/gcs/gcs_client/gcs_client.cc +++ b/src/ray/gcs/gcs_client/gcs_client.cc @@ -146,10 +146,7 @@ std::pair GcsClient::GetGcsServerAddress() const { PythonGcsClient::PythonGcsClient(const GcsClientOptions &options) : options_(options) {} Status PythonGcsClient::Connect() { - grpc::ChannelArguments arguments; - arguments.SetInt(GRPC_ARG_MAX_MESSAGE_LENGTH, 512 * 1024 * 1024); - arguments.SetInt(GRPC_ARG_KEEPALIVE_TIME_MS, 60 * 1000); - arguments.SetInt(GRPC_ARG_KEEPALIVE_TIMEOUT_MS, 60 * 1000); + auto arguments = PythonGrpcChannelArguments(); channel_ = rpc::BuildChannel(options_.gcs_address_, options_.gcs_port_, arguments); kv_stub_ = rpc::InternalKVGcsService::NewStub(channel_); runtime_env_stub_ = rpc::RuntimeEnvGcsService::NewStub(channel_); diff --git a/src/ray/gcs/pubsub/gcs_pub_sub.cc b/src/ray/gcs/pubsub/gcs_pub_sub.cc index 32c0e9f41367..b03a9157da46 100644 --- a/src/ray/gcs/pubsub/gcs_pub_sub.cc +++ b/src/ray/gcs/pubsub/gcs_pub_sub.cc @@ -15,6 +15,7 @@ #include "ray/gcs/pubsub/gcs_pub_sub.h" #include "absl/strings/str_cat.h" +#include "ray/rpc/grpc_client.h" namespace ray { namespace gcs { @@ -212,5 +213,91 @@ Status GcsSubscriber::SubscribeAllWorkerFailures( return Status::OK(); } +grpc::ChannelArguments PythonGrpcChannelArguments() { + grpc::ChannelArguments arguments; + arguments.SetInt(GRPC_ARG_MAX_MESSAGE_LENGTH, 512 * 1024 * 1024); + arguments.SetInt(GRPC_ARG_KEEPALIVE_TIME_MS, 60 * 1000); + arguments.SetInt(GRPC_ARG_KEEPALIVE_TIMEOUT_MS, 60 * 1000); + return arguments; +} + +PythonGcsPublisher::PythonGcsPublisher(const std::string &gcs_address) { + std::vector address = absl::StrSplit(gcs_address, ':'); + RAY_LOG(DEBUG) << "Connect to gcs server via address: " << gcs_address; + RAY_CHECK(address.size() == 2); + gcs_address_ = address[0]; + gcs_port_ = std::stoi(address[1]); +} + +Status PythonGcsPublisher::Connect() { + auto arguments = PythonGrpcChannelArguments(); + channel_ = rpc::BuildChannel(gcs_address_, gcs_port_, arguments); + pubsub_stub_ = rpc::InternalPubSubGcsService::NewStub(channel_); + return Status::OK(); +} + +constexpr int MAX_GCS_PUBLISH_RETRIES = 60; + +Status PythonGcsPublisher::DoPublishWithRetries(const rpc::GcsPublishRequest &request, + int64_t num_retries, + int64_t timeout_ms) { + int count = num_retries == -1 ? MAX_GCS_PUBLISH_RETRIES : num_retries; + rpc::GcsPublishReply reply; + grpc::Status status; + while (count > 0) { + grpc::ClientContext context; + if (timeout_ms != -1) { + context.set_deadline(std::chrono::system_clock::now() + + std::chrono::milliseconds(timeout_ms)); + } + status = pubsub_stub_->GcsPublish(&context, request, &reply); + if (status.error_code() == grpc::StatusCode::OK) { + if (reply.status().code() != static_cast(StatusCode::OK)) { + return Status::Invalid(reply.status().message()); + } + return Status::OK(); + } else if (status.error_code() == grpc::StatusCode::UNAVAILABLE || + status.error_code() == grpc::StatusCode::UNKNOWN) { + // This is the case in which we will retry + count -= 1; + std::this_thread::sleep_for(std::chrono::seconds(1)); + continue; + } else { + return Status::Invalid(status.error_message()); + } + } + return Status::TimedOut("Failed to publish after retries: " + status.error_message()); +} + +Status PythonGcsPublisher::PublishError(const std::string &key_id, + const rpc::ErrorTableData &error_info, + int64_t num_retries) { + rpc::GcsPublishRequest request; + auto *message = request.add_pub_messages(); + message->set_channel_type(rpc::RAY_ERROR_INFO_CHANNEL); + message->set_key_id(key_id); + message->mutable_error_info_message()->MergeFrom(error_info); + return DoPublishWithRetries(request, num_retries, 1000); +} + +Status PythonGcsPublisher::PublishLogs(const std::string &key_id, + const rpc::LogBatch &log_batch) { + rpc::GcsPublishRequest request; + auto *message = request.add_pub_messages(); + message->set_channel_type(rpc::RAY_LOG_CHANNEL); + message->set_key_id(key_id); + message->mutable_log_batch_message()->MergeFrom(log_batch); + return DoPublishWithRetries(request, -1, -1); +} + +Status PythonGcsPublisher::PublishFunctionKey( + const rpc::PythonFunction &python_function) { + rpc::GcsPublishRequest request; + auto *message = request.add_pub_messages(); + message->set_channel_type(rpc::RAY_PYTHON_FUNCTION_CHANNEL); + message->mutable_python_function_message()->MergeFrom(python_function); + return DoPublishWithRetries(request, -1, -1); +} + } // namespace gcs } // namespace ray diff --git a/src/ray/gcs/pubsub/gcs_pub_sub.h b/src/ray/gcs/pubsub/gcs_pub_sub.h index ffd79a6adfab..db621938dc98 100644 --- a/src/ray/gcs/pubsub/gcs_pub_sub.h +++ b/src/ray/gcs/pubsub/gcs_pub_sub.h @@ -25,6 +25,7 @@ #include "ray/pubsub/publisher.h" #include "ray/pubsub/subscriber.h" #include "src/ray/protobuf/gcs.pb.h" +#include "src/ray/protobuf/gcs_service.grpc.pb.h" #include "src/ray/protobuf/gcs_service.pb.h" namespace ray { @@ -132,5 +133,41 @@ class GcsSubscriber { const std::unique_ptr subscriber_; }; +// This client is only supposed to be used from Cython / Python +class RAY_EXPORT PythonGcsPublisher { + public: + explicit PythonGcsPublisher(const std::string &gcs_address); + + /// Connect to the publisher service of the GCS. + /// This function must be called before calling other functions. + /// + /// \return Status + Status Connect(); + + /// Publish error information to GCS. + Status PublishError(const std::string &key_id, + const rpc::ErrorTableData &data, + int64_t num_retries); + + /// Publish logs to GCS. + Status PublishLogs(const std::string &key_id, const rpc::LogBatch &log_batch); + + /// Publish a function key to GCS. + Status PublishFunctionKey(const rpc::PythonFunction &python_function); + + private: + Status DoPublishWithRetries(const rpc::GcsPublishRequest &request, + int64_t num_retries, + int64_t timeout_ms); + std::unique_ptr pubsub_stub_; + std::shared_ptr channel_; + std::string gcs_address_; + int gcs_port_; +}; + +/// Construct the arguments for synchronous gRPC clients +/// (the ones wrapped in Python) +grpc::ChannelArguments PythonGrpcChannelArguments(); + } // namespace gcs } // namespace ray From 22d3d243f9d987bb5e12c468db7ca8674cab89fc Mon Sep 17 00:00:00 2001 From: Cindy Zhang Date: Thu, 11 May 2023 13:01:16 -0700 Subject: [PATCH 347/424] [serve] Add controller metadata (#35182) * Return node id, node ip, actor id, actor name, worker id, log file path for controller. (Field name `controller_info` up for discussion) Example: ``` "controller_info": { "node_id": "a2ee49da74f69cb177cfca907354ea7cf669a015b4af1e0e9224500a", "node_ip": "192.168.0.141", "actor_id": "539ac33eadf7ead5375d741c01000000", "actor_name": "SERVE_CONTROLLER_ACTOR", "worker_id": "766d2f49edcfe39b422fb7c237a2084a618302dd51e9904795d7492b", "log_file_path": "/serve/controller_5629.log" }, ``` * Add worker id to http proxy and replica details * Update http proxy to use `get_component_logger_file_path()` --- .../modules/serve/tests/test_serve_agent.py | 20 +++++- python/ray/serve/_private/deployment_state.py | 65 +++++++++++------- python/ray/serve/_private/http_proxy.py | 18 ++++- python/ray/serve/_private/http_state.py | 68 +++++++++++-------- python/ray/serve/_private/replica.py | 1 + python/ray/serve/_private/version.py | 3 + python/ray/serve/controller.py | 16 ++++- python/ray/serve/schema.py | 54 +++++++-------- .../ray/serve/tests/test_deployment_state.py | 25 ++++++- python/ray/serve/tests/test_http_state.py | 25 ++++--- 10 files changed, 192 insertions(+), 103 deletions(-) diff --git a/dashboard/modules/serve/tests/test_serve_agent.py b/dashboard/modules/serve/tests/test_serve_agent.py index baee47ba8047..9ef184594b88 100644 --- a/dashboard/modules/serve/tests/test_serve_agent.py +++ b/dashboard/modules/serve/tests/test_serve_agent.py @@ -472,6 +472,12 @@ def test_get_status(ray_start_stop): print("Serve app status is correct.") +@pytest.mark.skipif(sys.platform == "darwin", reason="Flaky on OSX.") +def test_get_serve_instance_details_not_started(ray_start_stop): + """Test rest api when serve isn't started yet.""" + ServeInstanceDetails(**requests.get(GET_OR_PUT_URL_V2).json()) + + @pytest.mark.skipif(sys.platform == "darwin", reason="Flaky on OSX.") @pytest.mark.parametrize( "f_deployment_options", @@ -490,7 +496,7 @@ def test_get_status(ray_start_stop): def test_get_serve_instance_details(ray_start_stop, f_deployment_options): world_import_path = "ray.serve.tests.test_config_files.world.DagNode" fastapi_import_path = "ray.serve.tests.test_config_files.fastapi_deployment.node" - config1 = { + config = { "proxy_location": "HeadOnly", "http_options": { "host": "127.0.0.1", @@ -523,7 +529,7 @@ def test_get_serve_instance_details(ray_start_stop, f_deployment_options): }, } - deploy_config_multi_app(config1) + deploy_config_multi_app(config) def applications_running(): response = requests.get(GET_OR_PUT_URL_V2, timeout=15) @@ -549,13 +555,21 @@ def applications_running(): assert proxy.status == HTTPProxyStatus.HEALTHY assert os.path.exists("/tmp/ray/session_latest/logs" + proxy.log_file_path) print("Checked HTTP Proxy details.") + # Check controller info + assert serve_details.controller_info.actor_id + assert serve_details.controller_info.actor_name + assert serve_details.controller_info.node_id + assert serve_details.controller_info.node_ip + assert os.path.exists( + "/tmp/ray/session_latest/logs" + serve_details.controller_info.log_file_path + ) app_details = serve_details.applications # CHECK: application details for i, app in enumerate(["app1", "app2"]): assert ( app_details[app].deployed_app_config.dict(exclude_unset=True) - == config1["applications"][i] + == config["applications"][i] ) assert app_details[app].last_deployed_time_s > 0 assert app_details[app].route_prefix == expected_values[app]["route_prefix"] diff --git a/python/ray/serve/_private/deployment_state.py b/python/ray/serve/_private/deployment_state.py index 0fa57a366034..08629fd507c5 100644 --- a/python/ray/serve/_private/deployment_state.py +++ b/python/ray/serve/_private/deployment_state.py @@ -217,6 +217,7 @@ def __init__( self._pid: int = None self._actor_id: str = None + self._worker_id: str = None if isinstance(scheduling_strategy, NodeAffinitySchedulingStrategy): self._node_id = scheduling_strategy.node_id else: @@ -301,6 +302,11 @@ def actor_id(self) -> Optional[str]: """Returns the actor id, None if not started.""" return self._actor_id + @property + def worker_id(self) -> Optional[str]: + """Returns the worker id, None if not started.""" + return self._worker_id + @property def node_id(self) -> Optional[str]: """Returns the node id of the actor, None if not placed.""" @@ -536,6 +542,7 @@ def check_ready(self) -> Tuple[ReplicaStartupStatus, Optional[str]]: ( self._pid, self._actor_id, + self._worker_id, self._node_id, self._node_ip, self._log_file_path, @@ -752,6 +759,12 @@ def __init__( self._replica_tag = replica_tag self._start_time = None self._prev_slow_startup_warning_time = None + self._actor_details = ReplicaDetails( + actor_name=self._actor._actor_name, + replica_id=self._replica_tag, + state=ReplicaState.STARTING, + start_time_s=0, + ) def get_running_replica_info(self) -> RunningReplicaInfo: return RunningReplicaInfo( @@ -762,24 +775,9 @@ def get_running_replica_info(self) -> RunningReplicaInfo: is_cross_language=self._actor.is_cross_language, ) - def get_replica_details(self, state: ReplicaState) -> ReplicaDetails: - """Get replica details. - - Args: - state: The state of the replica, which is not stored within a - DeploymentReplica object - """ - return ReplicaDetails( - replica_id=self.replica_tag, - state=state, - pid=self._actor.pid, - actor_name=self._actor._actor_name, - actor_id=self._actor.actor_id, - node_id=self._actor.node_id, - node_ip=self._actor.node_ip, - start_time_s=self._start_time, - log_file_path=self._actor._log_file_path, - ) + @property + def actor_details(self) -> ReplicaDetails: + return self._actor_details @property def replica_tag(self) -> ReplicaTag: @@ -809,6 +807,7 @@ def start(self, deployment_info: DeploymentInfo, version: DeploymentVersion): self._actor.start(deployment_info, version) self._start_time = time.time() self._prev_slow_startup_warning_time = time.time() + self.update_actor_details(start_time_s=self._start_time) def reconfigure(self, version: DeploymentVersion) -> bool: """ @@ -826,8 +825,7 @@ def recover(self): """ self._actor.recover() self._start_time = time.time() - # Replica version is fetched from recovered replica dynamically in - # check_started() below + self.update_actor_details(start_time_s=self._start_time) def check_started(self) -> Tuple[ReplicaStartupStatus, Optional[str]]: """Check if the replica has started. If so, transition to RUNNING. @@ -838,7 +836,16 @@ def check_started(self) -> Tuple[ReplicaStartupStatus, Optional[str]]: status: Most recent state of replica by querying actor obj ref """ - return self._actor.check_ready() + is_ready = self._actor.check_ready() + self.update_actor_details( + pid=self._actor.pid, + node_id=self._actor.node_id, + node_ip=self._actor.node_ip, + actor_id=self._actor.actor_id, + worker_id=self._actor.worker_id, + log_file_path=self._actor.log_file_path, + ) + return is_ready def stop(self, graceful: bool = True) -> None: """Stop the replica. @@ -879,6 +886,15 @@ def check_health(self) -> bool: """ return self._actor.check_health() + def update_state(self, state: ReplicaState) -> None: + """Updates state in actor details.""" + self.update_actor_details(state=state) + + def update_actor_details(self, **kwargs) -> None: + details_kwargs = self._actor_details.dict() + details_kwargs.update(kwargs) + self._actor_details = ReplicaDetails(**details_kwargs) + def resource_requirements(self) -> Tuple[str, str]: """Returns required and currently available resources. @@ -920,6 +936,7 @@ def add(self, state: ReplicaState, replica: VersionedReplica): """ assert isinstance(state, ReplicaState) assert isinstance(replica, VersionedReplica) + replica.update_state(state) self._replicas[state].append(replica) def get( @@ -1168,11 +1185,7 @@ def get_running_replica_infos(self) -> List[RunningReplicaInfo]: ] def list_replica_details(self) -> List[ReplicaDetails]: - return [ - replica.get_replica_details(state) - for state in ReplicaState - for replica in self._replicas.get([state]) - ] + return [replica.actor_details for replica in self._replicas.get()] def _notify_running_replicas_changed(self): self._long_poll_host.notify_changed( diff --git a/python/ray/serve/_private/http_proxy.py b/python/ray/serve/_private/http_proxy.py index 51d00861030d..c9e977755b33 100644 --- a/python/ray/serve/_private/http_proxy.py +++ b/python/ray/serve/_private/http_proxy.py @@ -1,5 +1,6 @@ import asyncio from asyncio.tasks import FIRST_COMPLETED +import json import os import logging import pickle @@ -32,7 +33,11 @@ DEFAULT_LATENCY_BUCKET_MS, ) from ray.serve._private.long_poll import LongPollClient, LongPollNamespace -from ray.serve._private.logging_utils import access_log_msg, configure_component_logger +from ray.serve._private.logging_utils import ( + access_log_msg, + configure_component_logger, + get_component_logger_file_path, +) from ray.serve._private.utils import get_random_letters @@ -509,9 +514,16 @@ async def ready(self): return_when=asyncio.FIRST_COMPLETED, ) - # Return log filepath, or re-throw the exception from self.running_task. + # Return metadata, or re-throw the exception from self.running_task. if self.setup_complete.is_set(): - return f"/serve/http_proxy_{ray.util.get_node_ip_address()}.log" + # NOTE(zcin): We need to convert the metadata to a json string because + # of cross-language scenarios. Java can't deserialize a Python tuple. + return json.dumps( + [ + ray._private.worker.global_worker.worker_id.hex(), + get_component_logger_file_path(), + ] + ) return await done_set.pop() diff --git a/python/ray/serve/_private/http_state.py b/python/ray/serve/_private/http_state.py index da8ed7ba620a..e3c5e64c83b6 100644 --- a/python/ray/serve/_private/http_state.py +++ b/python/ray/serve/_private/http_state.py @@ -1,8 +1,9 @@ import asyncio +import json import logging import random import time -from typing import Dict, List, Tuple, Optional +from typing import Dict, List, Tuple import ray from ray.actor import ActorHandle @@ -29,21 +30,23 @@ class HTTPProxyState: - def __init__(self, actor_handle: ActorHandle, actor_name: str, node_ip: str): + def __init__( + self, actor_handle: ActorHandle, actor_name: str, node_id: str, node_ip: str + ): self._actor_handle = actor_handle self._actor_name = actor_name - self._node_ip = node_ip - self._actor_id = None - self._log_file_path = None - self._ready_obj_ref = self._actor_handle.ready.remote() self._status = HTTPProxyStatus.STARTING self._health_check_obj_ref = None self._last_health_check_time: float = 0 - @property - def node_ip(self) -> str: - return self._node_ip + self._actor_details = HTTPProxyDetails( + node_id=node_id, + node_ip=node_ip, + actor_id=self._actor_handle._actor_id.hex(), + actor_name=self._actor_name, + status=self._status, + ) @property def actor_handle(self) -> ActorHandle: @@ -58,22 +61,34 @@ def status(self) -> HTTPProxyStatus: return self._status @property - def actor_id(self) -> Optional[str]: - return self._actor_handle._actor_id.hex() + def actor_details(self) -> HTTPProxyDetails: + return self._actor_details - @property - def log_file_path(self) -> Optional[str]: - return self._log_file_path + def set_status(self, status: HTTPProxyStatus) -> None: + """Sets _status and updates _actor_details with the new status.""" + self._status = status + self.update_actor_details(status=self._status) + + def update_actor_details(self, **kwargs) -> None: + """Updates _actor_details with passed in kwargs.""" + details_kwargs = self._actor_details.dict() + details_kwargs.update(kwargs) + self._actor_details = HTTPProxyDetails(**details_kwargs) def update(self): if self._status == HTTPProxyStatus.STARTING: try: finished, _ = ray.wait([self._ready_obj_ref], timeout=0) if finished: - self._log_file_path = ray.get(finished[0]) - self._status = HTTPProxyStatus.HEALTHY + worker_id, log_file_path = json.loads(ray.get(finished[0])) + self.set_status(HTTPProxyStatus.HEALTHY) + self.update_actor_details( + worker_id=worker_id, + log_file_path=log_file_path, + status=self._status, + ) except Exception: - self._status = HTTPProxyStatus.UNHEALTHY + self.set_status(HTTPProxyStatus.UNHEALTHY) return # Perform periodic health checks @@ -82,12 +97,12 @@ def update(self): if finished: try: ray.get(finished[0]) - self._status = HTTPProxyStatus.HEALTHY + self.set_status(HTTPProxyStatus.HEALTHY) except Exception as e: logger.warning( f"Health check for HTTP proxy {self._actor_name} failed: {e}" ) - self._status = HTTPProxyStatus.UNHEALTHY + self.set_status(HTTPProxyStatus.UNHEALTHY) self._health_check_obj_ref = None @@ -97,7 +112,7 @@ def update(self): if time.time() - self._last_health_check_time > randomized_period_s: # If the HTTP Proxy is still blocked, mark unhealthy if self._health_check_obj_ref: - self._status = HTTPProxyStatus.UNHEALTHY + self.set_status(HTTPProxyStatus.UNHEALTHY) logger.warning( f"Health check for HTTP Proxy {self._actor_name} took more than " f"{PROXY_HEALTH_CHECK_PERIOD_S} seconds." @@ -160,14 +175,7 @@ def get_http_proxy_names(self) -> Dict[NodeId, str]: def get_http_proxy_details(self) -> Dict[NodeId, HTTPProxyDetails]: return { - node_id: HTTPProxyDetails( - node_id=node_id, - node_ip=state.node_ip, - actor_id=state.actor_id, - actor_name=state.actor_name, - status=state.status, - log_file_path=state.log_file_path, - ) + node_id: state.actor_details for node_id, state in self._proxy_states.items() } @@ -253,7 +261,9 @@ def _start_proxies_if_needed(self) -> None: http_middlewares=self._config.middlewares, ) - self._proxy_states[node_id] = HTTPProxyState(proxy, name, node_ip_address) + self._proxy_states[node_id] = HTTPProxyState( + proxy, name, node_id, node_ip_address + ) def _stop_proxies_if_needed(self) -> bool: """Removes proxy actors from any nodes that no longer exist.""" diff --git a/python/ray/serve/_private/replica.py b/python/ray/serve/_private/replica.py index 0eb4e9acdaa4..e1b1c8461eed 100644 --- a/python/ray/serve/_private/replica.py +++ b/python/ray/serve/_private/replica.py @@ -231,6 +231,7 @@ async def is_allocated(self) -> str: return ( os.getpid(), ray.get_runtime_context().get_actor_id(), + ray._private.worker.global_worker.worker_id.hex(), ray.get_runtime_context().get_node_id(), ray.util.get_node_ip_address(), get_component_logger_file_path(), diff --git a/python/ray/serve/_private/version.py b/python/ray/serve/_private/version.py index c0e252ebb6dc..f6f51b532e8f 100644 --- a/python/ray/serve/_private/version.py +++ b/python/ray/serve/_private/version.py @@ -149,3 +149,6 @@ class VersionedReplica(ABC): @property def version(self) -> DeploymentVersion: pass + + def update_state(self, state): + pass diff --git a/python/ray/serve/controller.py b/python/ray/serve/controller.py index e27159d412cd..1367ace05d42 100644 --- a/python/ray/serve/controller.py +++ b/python/ray/serve/controller.py @@ -44,7 +44,10 @@ from ray.serve._private.deployment_state import DeploymentStateManager, ReplicaState from ray.serve._private.endpoint_state import EndpointState from ray.serve._private.http_state import HTTPState -from ray.serve._private.logging_utils import configure_component_logger +from ray.serve._private.logging_utils import ( + configure_component_logger, + get_component_logger_file_path, +) from ray.serve._private.long_poll import LongPollHost from ray.serve.exceptions import RayServeException from ray.serve.schema import ( @@ -53,6 +56,7 @@ ApplicationDetails, ServeInstanceDetails, HTTPOptionsSchema, + ServeActorDetails, ) from ray.serve._private.storage.kv_store import RayInternalKVStore from ray.serve._private.utils import ( @@ -161,6 +165,15 @@ async def __init__( # Keep track of single-app vs multi-app self.deploy_mode = ServeDeployMode.UNSET + # Controller actor details + self._actor_details = ServeActorDetails( + node_id=ray.get_runtime_context().get_node_id(), + node_ip=ray.util.get_node_ip_address(), + actor_id=ray.get_runtime_context().get_actor_id(), + actor_name=self.controller_name, + worker_id=ray._private.worker.global_worker.worker_id.hex(), + log_file_path=get_component_logger_file_path(), + ) run_background_task(self.run_control_loop()) @@ -701,6 +714,7 @@ def get_serve_instance_details(self) -> Dict: # route_prefix is set instead in each application. # Eventually we want to remove route_prefix from DeploymentSchema. return ServeInstanceDetails( + controller_info=self._actor_details, proxy_location=http_config.location, http_options=HTTPOptionsSchema( host=http_config.host, diff --git a/python/ray/serve/schema.py b/python/ray/serve/schema.py index 24047f37ca14..8b655a0be18c 100644 --- a/python/ray/serve/schema.py +++ b/python/ray/serve/schema.py @@ -608,7 +608,26 @@ def get_empty_schema_dict() -> Dict: @PublicAPI(stability="alpha") -class ReplicaDetails(BaseModel, extra=Extra.forbid, frozen=True): +class ServeActorDetails(BaseModel, frozen=True): + node_id: Optional[str] = Field( + description="ID of the node that the actor is running on." + ) + node_ip: Optional[str] = Field( + description="IP address of the node that the actor is running on." + ) + actor_id: Optional[str] = Field(description="Actor ID.") + actor_name: Optional[str] = Field(description="Actor name.") + worker_id: Optional[str] = Field(description="Worker ID.") + log_file_path: Optional[str] = Field( + description=( + "The relative path to the Serve actor's log file from the ray logs " + "directory." + ) + ) + + +@PublicAPI(stability="alpha") +class ReplicaDetails(ServeActorDetails, frozen=True): """Detailed info about a single deployment replica.""" replica_id: str = Field( @@ -620,14 +639,6 @@ class ReplicaDetails(BaseModel, extra=Extra.forbid, frozen=True): ) state: ReplicaState = Field(description="Current state of the replica.") pid: Optional[int] = Field(description="PID of the replica actor process.") - actor_name: str = Field(description="Name of the replica actor.") - actor_id: Optional[str] = Field(description="ID of the replica actor.") - node_id: Optional[str] = Field( - description="ID of the node that the replica actor is running on." - ) - node_ip: Optional[str] = Field( - description="IP address of the node that the replica actor is running on." - ) start_time_s: float = Field( description=( "The time at which the replica actor was started. If the controller dies, " @@ -635,12 +646,6 @@ class ReplicaDetails(BaseModel, extra=Extra.forbid, frozen=True): "state from the running replica actor." ) ) - log_file_path: Optional[str] = Field( - description=( - "The relative path to the log file for the replica actor from the ray logs " - "directory." - ) - ) @PublicAPI(stability="alpha") @@ -766,20 +771,8 @@ def get_status_dict(self) -> Dict: @PublicAPI(stability="alpha") -class HTTPProxyDetails(BaseModel): - node_id: str = Field(description="ID of the node that the HTTP Proxy is running on") - node_ip: str = Field( - description="IP address of the node that the HTTP Proxy is running on." - ) - actor_id: str = Field(description="ID of the HTTP Proxy actor.") - actor_name: str = Field(description="Name of the HTTP Proxy actor.") +class HTTPProxyDetails(ServeActorDetails, frozen=True): status: HTTPProxyStatus = Field(description="Current status of the HTTP Proxy.") - log_file_path: Optional[str] = Field( - description=( - "The relative path to the log file for the replica actor from the ray logs " - "directory." - ) - ) @PublicAPI(stability="alpha") @@ -791,6 +784,9 @@ class ServeInstanceDetails(BaseModel, extra=Extra.forbid): This is the response JSON schema for v2 REST API `GET /api/serve/applications`. """ + controller_info: ServeActorDetails = Field( + description="Details about the Serve controller actor." + ) proxy_location: Optional[DeploymentMode] = Field( description=( "The location of HTTP servers.\n" @@ -822,7 +818,7 @@ def get_empty_schema_dict() -> Dict: Represents no Serve instance running on the cluster. """ - return {"deploy_mode": "UNSET", "applications": {}} + return {"deploy_mode": "UNSET", "controller_info": {}, "applications": {}} @PublicAPI(stability="beta") diff --git a/python/ray/serve/tests/test_deployment_state.py b/python/ray/serve/tests/test_deployment_state.py index c10a405390ee..8932b1da2c80 100644 --- a/python/ray/serve/tests/test_deployment_state.py +++ b/python/ray/serve/tests/test_deployment_state.py @@ -99,6 +99,18 @@ def actor_handle(self) -> MockActorHandle: def max_concurrent_queries(self) -> int: return 100 + @property + def pid(self) -> Optional[int]: + return None + + @property + def actor_id(self) -> Optional[str]: + return None + + @property + def worker_id(self) -> Optional[str]: + return None + @property def node_id(self) -> Optional[str]: if isinstance(self._scheduling_strategy, NodeAffinitySchedulingStrategy): @@ -107,6 +119,14 @@ def node_id(self) -> Optional[str]: return "node-id" return None + @property + def node_ip(self) -> Optional[str]: + return None + + @property + def log_file_path(self) -> Optional[str]: + return None + def set_ready(self): self.ready = ReplicaStartupStatus.SUCCEEDED @@ -269,6 +289,9 @@ def __init__(self, version: DeploymentVersion): def version(self): return self._version + def update_state(self, state): + pass + return MockVersionedReplica(version) @@ -2262,7 +2285,7 @@ class FakeActor: available_resources = {} # Make a DeploymentReplica just to accesss its resource_requirement function - replica = DeploymentReplica(None, None, None, None, None) + replica = DeploymentReplica(None, None, "random_tag", None, None) replica._actor = FakeActor() # resource_requirements() should not error diff --git a/python/ray/serve/tests/test_http_state.py b/python/ray/serve/tests/test_http_state.py index 83e2c14bb037..ae3870d292d5 100644 --- a/python/ray/serve/tests/test_http_state.py +++ b/python/ray/serve/tests/test_http_state.py @@ -1,4 +1,4 @@ -from functools import partial +import json from unittest.mock import patch import pytest @@ -76,22 +76,25 @@ def test_http_proxy_healthy(): class MockHTTPProxyActor: async def ready(self): await signal.wait.remote() - return "mock_actor_id", "mock_log_file_path" + return json.dumps(["mock_worker_id", "mock_log_file_path"]) async def check_health(self): pass proxy = MockHTTPProxyActor.options(lifetime="detached").remote() - state = HTTPProxyState(proxy, "alice", "mock_node_ip") + state = HTTPProxyState(proxy, "alice", "mock_node_id", "mock_node_ip") assert state.status == HTTPProxyStatus.STARTING state.update() assert state.status == HTTPProxyStatus.STARTING signal.send.remote() - wait_for_condition( - lambda: state.update() or state.status == HTTPProxyStatus.HEALTHY, timeout=2 - ) + + def check_proxy(status): + state.update() + return state.status == status + + wait_for_condition(check_proxy, status=HTTPProxyStatus.HEALTHY, timeout=2) ray.shutdown() @@ -102,14 +105,14 @@ def test_http_proxy_unhealthy(): @ray.remote(num_cpus=0) class MockHTTPProxyActor: async def ready(self): - return "mock_actor_id", "mock_log_file_path" + return json.dumps(["mock_worker_id", "mock_log_file_path"]) async def check_health(self): await signal.wait.remote() with patch("ray.serve._private.http_state.PROXY_HEALTH_CHECK_PERIOD_S", 1): proxy = MockHTTPProxyActor.options(lifetime="detached").remote() - state = HTTPProxyState(proxy, "alice", "mock_node_ip") + state = HTTPProxyState(proxy, "alice", "mock_node_id", "mock_node_ip") assert state.status == HTTPProxyStatus.STARTING def check_proxy(status): @@ -117,14 +120,14 @@ def check_proxy(status): return state.status == status # Proxy actor is ready, so status should transition STARTING -> HEALTHY - wait_for_condition(partial(check_proxy, HTTPProxyStatus.HEALTHY), timeout=2) + wait_for_condition(check_proxy, status=HTTPProxyStatus.HEALTHY, timeout=2) # Health check is blocked, so status should transition HEALTHY -> UNHEALTHY - wait_for_condition(partial(check_proxy, HTTPProxyStatus.UNHEALTHY), timeout=2) + wait_for_condition(check_proxy, status=HTTPProxyStatus.UNHEALTHY, timeout=2) # Unblock health check, so status should transition UNHEALTHY -> HEALTHY signal.send.remote() - wait_for_condition(partial(check_proxy, HTTPProxyStatus.HEALTHY), timeout=2) + wait_for_condition(check_proxy, status=HTTPProxyStatus.HEALTHY, timeout=2) ray.shutdown() From 82f2b1f29133b09173a2f66719b4488d78815c9e Mon Sep 17 00:00:00 2001 From: Cindy Zhang Date: Thu, 11 May 2023 13:01:32 -0700 Subject: [PATCH 348/424] [serve] Stream Serve logs across different drivers (#35070) Add back `_filter_logs_by_job` to worker.py, and use it to disable filtering of streamed logs in `print_logs`. This existed in worker.py before, but was removed at some point. --- python/ray/_private/worker.py | 12 +++- python/ray/serve/_private/api.py | 2 +- python/ray/serve/context.py | 2 +- python/ray/serve/tests/test_standalone2.py | 79 +++++++++++++++------- 4 files changed, 65 insertions(+), 30 deletions(-) diff --git a/python/ray/_private/worker.py b/python/ray/_private/worker.py index 234f79c3be5c..a7f07fc5763e 100644 --- a/python/ray/_private/worker.py +++ b/python/ray/_private/worker.py @@ -461,6 +461,11 @@ def __init__(self): # Create the lock here because the serializer will use it before # initializing Ray. self.lock = threading.RLock() + # By default, don't show logs from other drivers. This is set to true by Serve + # in order to stream logs from the controller and replica actors across + # different drivers that connect to the same Serve instance. + # See https://github.com/ray-project/ray/pull/35070. + self._filter_logs_by_job = True @property def connected(self): @@ -870,8 +875,11 @@ def print_logs(self): last_polling_batch_size = 0 continue - # Don't show logs from other drivers. - if data["job"] and data["job"] != job_id_hex: + if ( + self._filter_logs_by_job + and data["job"] + and data["job"] != job_id_hex + ): last_polling_batch_size = 0 continue diff --git a/python/ray/serve/_private/api.py b/python/ray/serve/_private/api.py index d43ccc7fd344..132a7ac30109 100644 --- a/python/ray/serve/_private/api.py +++ b/python/ray/serve/_private/api.py @@ -138,7 +138,7 @@ def _start_controller( """ # Initialize ray if needed. - ray._private.worker.global_worker.filter_logs_by_job = False + ray._private.worker.global_worker._filter_logs_by_job = False if not ray.is_initialized(): ray.init(namespace=SERVE_NAMESPACE) diff --git a/python/ray/serve/context.py b/python/ray/serve/context.py index 2912a2e66ee4..902025b0adcc 100644 --- a/python/ray/serve/context.py +++ b/python/ray/serve/context.py @@ -100,7 +100,7 @@ def _connect() -> ServeControllerClient: """ # Initialize ray if needed. - ray._private.worker.global_worker.filter_logs_by_job = False + ray._private.worker.global_worker._filter_logs_by_job = False if not ray.is_initialized(): ray.init(namespace=SERVE_NAMESPACE) diff --git a/python/ray/serve/tests/test_standalone2.py b/python/ray/serve/tests/test_standalone2.py index 285fd0f4df3f..7d7c615b17e6 100644 --- a/python/ray/serve/tests/test_standalone2.py +++ b/python/ray/serve/tests/test_standalone2.py @@ -81,13 +81,14 @@ def ray_instance(request): @contextmanager def start_and_shutdown_ray_cli(): - subprocess.check_output( - ["ray", "start", "--head"], - ) + subprocess.check_output(["ray", "stop", "--force"]) + wait_for_condition(_check_ray_stop, timeout=15) + subprocess.check_output(["ray", "start", "--head"]) + yield - subprocess.check_output( - ["ray", "stop", "--force"], - ) + + subprocess.check_output(["ray", "stop", "--force"]) + wait_for_condition(_check_ray_stop, timeout=15) @pytest.fixture(scope="function") @@ -419,29 +420,55 @@ def f(): ray.shutdown() +def test_serve_stream_logs(start_and_shutdown_ray_cli_function): + """Test that serve logs show up across different drivers.""" + import tempfile + + file1 = """from ray import serve +@serve.deployment +class A: + def __call__(self): + return "Hello A" +serve.run(A.bind())""" + + file2 = """from ray import serve +@serve.deployment +class B: + def __call__(self): + return "Hello B" +serve.run(B.bind())""" + + with tempfile.NamedTemporaryFile() as f1, tempfile.NamedTemporaryFile() as f2: + f1.write(file1.encode("utf-8")) + f1.seek(0) + # Driver 1 (starts Serve controller) + output = subprocess.check_output(["python", f1.name], stderr=subprocess.STDOUT) + assert "Connecting to existing Ray cluster" in output.decode("utf-8") + assert "Adding 1 replica to deployment default_A" in output.decode("utf-8") + + f2.write(file2.encode("utf-8")) + f2.seek(0) + # Driver 2 (reconnects to the same Serve controller) + output = subprocess.check_output(["python", f2.name], stderr=subprocess.STDOUT) + assert "Connecting to existing Ray cluster" in output.decode("utf-8") + assert "Adding 1 replica to deployment default_B" in output.decode("utf-8") + + class TestDeployApp: @pytest.fixture(scope="function") def client(self): - subprocess.check_output(["ray", "stop", "--force"]) - wait_for_condition( - _check_ray_stop, - timeout=15, - ) - subprocess.check_output(["ray", "start", "--head"]) - wait_for_condition( - lambda: requests.get("http://localhost:52365/api/ray/version").status_code - == 200, - timeout=15, - ) - ray.init(address="auto", namespace=SERVE_NAMESPACE) - yield serve.start(detached=True) - serve.shutdown() - ray.shutdown() - subprocess.check_output(["ray", "stop", "--force"]) - wait_for_condition( - _check_ray_stop, - timeout=15, - ) + with start_and_shutdown_ray_cli(): + wait_for_condition( + lambda: requests.get( + "http://localhost:52365/api/ray/version" + ).status_code + == 200, + timeout=15, + ) + ray.init(address="auto", namespace=SERVE_NAMESPACE) + yield serve.start(detached=True) + serve.shutdown() + ray.shutdown() def check_deployment_running(self, client: ServeControllerClient, name: str): serve_status = client.get_serve_status() From 53c51885b8219e8c154265c930f65986cea1463d Mon Sep 17 00:00:00 2001 From: Chao Wang <125417081+chaowanggg@users.noreply.github.com> Date: Thu, 11 May 2023 15:05:04 -0700 Subject: [PATCH 349/424] [Overview][Serve] Add Recent Serve Applications Card #34642 (#35227) Follow up with #34642 Fixed ESLint errors and test cases --- .../src/common/ServeStatus.component.test.tsx | 60 ++++++++ dashboard/client/src/common/ServeStatus.tsx | 77 ++++++++++ .../src/components/AutoscalerStatusCards.tsx | 94 ++++++++++++ .../client/src/components/ListItemCard.tsx | 135 ++++++++++++++++++ dashboard/client/src/pages/job/JobDetail.tsx | 88 ++---------- .../src/pages/overview/OverviewPage.tsx | 34 ++++- .../cards/RecentJobsCard.component.test.tsx | 7 +- .../pages/overview/cards/RecentJobsCard.tsx | 130 ++++------------- .../cards/RecentServeCard.component.test.tsx | 83 +++++++++++ .../pages/overview/cards/RecentServeCard.tsx | 53 +++++++ .../serve/ServeApplicationDetailPage.tsx | 10 +- .../src/pages/serve/ServeApplicationRow.tsx | 14 +- .../src/pages/serve/mockServeApplication.ts | 63 ++++++++ dashboard/client/src/type/serve.ts | 2 +- 14 files changed, 661 insertions(+), 189 deletions(-) create mode 100644 dashboard/client/src/common/ServeStatus.component.test.tsx create mode 100644 dashboard/client/src/common/ServeStatus.tsx create mode 100644 dashboard/client/src/components/AutoscalerStatusCards.tsx create mode 100644 dashboard/client/src/components/ListItemCard.tsx create mode 100644 dashboard/client/src/pages/overview/cards/RecentServeCard.component.test.tsx create mode 100644 dashboard/client/src/pages/overview/cards/RecentServeCard.tsx create mode 100644 dashboard/client/src/pages/serve/mockServeApplication.ts diff --git a/dashboard/client/src/common/ServeStatus.component.test.tsx b/dashboard/client/src/common/ServeStatus.component.test.tsx new file mode 100644 index 000000000000..5436f583e02f --- /dev/null +++ b/dashboard/client/src/common/ServeStatus.component.test.tsx @@ -0,0 +1,60 @@ +import { render, screen } from "@testing-library/react"; +import React from "react"; +import { ServeApplication, ServeApplicationStatus } from "../type/serve"; +import { ServeStatusIcon } from "./ServeStatus"; + +const APP: ServeApplication = { + name: "MyServeApp", + route_prefix: "/my-serve-app", + docs_path: null, + status: ServeApplicationStatus.RUNNING, + message: "", + last_deployed_time_s: 1682029771.0748637, + deployed_app_config: null, + deployments: {}, +}; + +describe("ServeStatusIcon", () => { + it("renders RUNNING status", async () => { + render(); + + await screen.findByTestId("serve-status-icon"); + + const icon = screen.getByTestId("serve-status-icon"); + const classList = icon.getAttribute("class"); + expect(classList).toContain("colorSuccess"); + }); + + it("renders NOT_STARTED status", async () => { + render( + , + ); + + await screen.findByTestId("serve-status-icon"); + + expect(screen.queryByTestId("serve-status-icon")).not.toHaveClass( + "colorSuccess", + ); + expect(screen.queryByTestId("serve-status-icon")).not.toHaveClass( + "colorError", + ); + }); + + it("renders DEPLOY_FAILED status", async () => { + render( + , + ); + + await screen.findByTestId("serve-status-icon"); + + const icon = screen.getByTestId("serve-status-icon"); + const classList = icon.getAttribute("class"); + expect(classList).toContain("colorError"); + }); +}); diff --git a/dashboard/client/src/common/ServeStatus.tsx b/dashboard/client/src/common/ServeStatus.tsx new file mode 100644 index 000000000000..dd4ebad48889 --- /dev/null +++ b/dashboard/client/src/common/ServeStatus.tsx @@ -0,0 +1,77 @@ +import { createStyles, makeStyles } from "@material-ui/core"; +import classNames from "classnames"; +import React from "react"; +import { + RiCloseCircleFill, + RiRecordCircleFill, + RiStopCircleFill, +} from "react-icons/ri"; +import { ServeApplication } from "../type/serve"; +import { JobRunningIcon } from "./JobStatus"; +import { ClassNameProps } from "./props"; + +type ServeStatusIconProps = { + app: ServeApplication; + small: boolean; +} & ClassNameProps; + +const useServeStatusIconStyles = makeStyles((theme) => + createStyles({ + icon: { + width: 20, + height: 20, + marginRight: 8, + }, + iconSmall: { + width: 16, + height: 16, + }, + colorSuccess: { + color: theme.palette.success.main, + }, + colorError: { + color: theme.palette.error.main, + }, + }), +); + +export const ServeStatusIcon = ({ + app, + small, + className, +}: ServeStatusIconProps) => { + const classes = useServeStatusIconStyles(); + + switch (app.status) { + case "RUNNING": + return ( + + ); + case "NOT_STARTED": + return ( + + ); + case "DEPLOY_FAILED": + return ( + + ); + default: + // DEPLOYING || DELETEING + return ( + + ); + } +}; diff --git a/dashboard/client/src/components/AutoscalerStatusCards.tsx b/dashboard/client/src/components/AutoscalerStatusCards.tsx new file mode 100644 index 000000000000..887c192ae36b --- /dev/null +++ b/dashboard/client/src/components/AutoscalerStatusCards.tsx @@ -0,0 +1,94 @@ +import { Box, Typography } from "@material-ui/core"; +import React from "react"; +import { RayStatusResp } from "../service/status"; + +const formatNodeStatus = (cluster_status: string) => { + // ==== auto scaling status + // Node status + // .... + // Resources + // .... + const sections = cluster_status.split("Resources"); + return formatClusterStatus( + "Node Status", + sections[0].split("Node status")[1], + ); +}; + +const formatResourcesStatus = (cluster_status: string) => { + // ==== auto scaling status + // Node status + // .... + // Resources + // .... + const sections = cluster_status.split("Resources"); + return formatClusterStatus("Resource Status", sections[1]); +}; + +const formatClusterStatus = (title: string, cluster_status: string) => { + const cluster_status_rows = cluster_status.split("\n"); + + return ( +
    + + {title} + + {cluster_status_rows.map((i, key) => { + // Format the output. + // See format_info_string in util.py + if (i.startsWith("-----") || i.startsWith("=====") || i === "") { + // Ignore separators + return null; + } else if (i.endsWith(":")) { + return ( +
    + {i} +
    + ); + } else { + return
    {i}
    ; + } + })} +
    + ); +}; + +type StatusCardProps = { + cluster_status: RayStatusResp | undefined; +}; + +export const NodeStatusCard = ({ cluster_status }: StatusCardProps) => { + return ( + + {cluster_status?.data + ? formatNodeStatus(cluster_status?.data.clusterStatus) + : "No cluster status."} + + ); +}; + +export const ResourceStatusCard = ({ cluster_status }: StatusCardProps) => { + return ( + + {cluster_status?.data + ? formatResourcesStatus(cluster_status?.data.clusterStatus) + : "No cluster status."} + + ); +}; diff --git a/dashboard/client/src/components/ListItemCard.tsx b/dashboard/client/src/components/ListItemCard.tsx new file mode 100644 index 000000000000..530cb08f13d8 --- /dev/null +++ b/dashboard/client/src/components/ListItemCard.tsx @@ -0,0 +1,135 @@ +import { createStyles, makeStyles, Typography } from "@material-ui/core"; +import classNames from "classnames"; +import React, { ReactNode } from "react"; +import { Link } from "react-router-dom"; +import { ClassNameProps } from "../common/props"; +import { + LinkWithArrow, + OverviewCard, +} from "../pages/overview/cards/OverviewCard"; + +type ListItemCardProps = { + headerTitle: string; + items: ListItemProps[]; + emptyListText: string; + footerText: string; + footerLink: string; +} & ClassNameProps; + +type ListItemProps = { + title: string | undefined; + subtitle: string; + link: string | undefined; + icon: ReactNode; +} & ClassNameProps; + +const useStyles = makeStyles((theme) => + createStyles({ + root: { + display: "flex", + flexDirection: "column", + padding: theme.spacing(2, 3), + }, + listContainer: { + marginTop: theme.spacing(2), + flex: 1, + overflow: "hidden", + }, + listItem: { + "&:not(:first-child)": { + marginTop: theme.spacing(1), + }, + }, + }), +); + +export const ListItemCard = ({ + className, + headerTitle, + items, + emptyListText: itemEmptyTip, + footerText, + footerLink, +}: ListItemCardProps) => { + const classes = useStyles(); + + return ( + + {headerTitle} +
    + {items.map((item: ListItemProps) => ( + + ))} + {items.length === 0 && ( + {itemEmptyTip} + )} +
    + +
    + ); +}; + +const useListItemStyles = makeStyles((theme) => + createStyles({ + root: { + display: "flex", + flexDirection: "row", + flexWrap: "nowrap", + alignItems: "center", + textDecoration: "none", + }, + + textContainer: { + flex: "1 1 auto", + width: `calc(100% - ${theme.spacing(1) + 20}px)`, + }, + title: { + color: "#036DCF", + }, + entrypoint: { + overflow: "hidden", + textOverflow: "ellipsis", + whiteSpace: "nowrap", + color: "#5F6469", + }, + }), +); + +const ListItem = ({ + icon, + title, + subtitle, + className, + link, +}: ListItemProps) => { + const classes = useListItemStyles(); + + const cardContent = ( + + {icon} +
    + + {title} + + + {subtitle} + +
    +
    + ); + return ( +
    + {link !== undefined ? ( + + {cardContent} + + ) : ( +
    {cardContent}
    + )} +
    + ); +}; diff --git a/dashboard/client/src/pages/job/JobDetail.tsx b/dashboard/client/src/pages/job/JobDetail.tsx index 8bca84de8409..d0b1399fb326 100644 --- a/dashboard/client/src/pages/job/JobDetail.tsx +++ b/dashboard/client/src/pages/job/JobDetail.tsx @@ -1,9 +1,13 @@ -import { Box, makeStyles, Typography } from "@material-ui/core"; +import { Box, makeStyles } from "@material-ui/core"; import React, { useContext, useRef, useState } from "react"; import { Link } from "react-router-dom"; import { GlobalContext } from "../../App"; import { CollapsibleSection } from "../../common/CollapsibleSection"; import { Section } from "../../common/Section"; +import { + NodeStatusCard, + ResourceStatusCard, +} from "../../components/AutoscalerStatusCards"; import Loading from "../../components/Loading"; import { StatusChip } from "../../components/StatusChip"; import TitleCard from "../../components/TitleCard"; @@ -12,7 +16,6 @@ import ActorList from "../actor/ActorList"; import { NodeCountCard } from "../overview/cards/NodeCountCard"; import PlacementGroupList from "../state/PlacementGroup"; import TaskList from "../state/task"; - import { useRayStatus } from "./hook/useClusterStatus"; import { useJobDetail } from "./hook/useJobDetail"; import { JobMetadataSection } from "./JobDetailInfoPage"; @@ -52,57 +55,6 @@ export const JobDetailChartsPage = () => { const actorTableRef = useRef(null); const { cluster_status } = useRayStatus(); - const formatNodeStatus = (cluster_status: string) => { - // ==== auto scaling status - // Node status - // .... - // Resources - // .... - const sections = cluster_status.split("Resources"); - return formatClusterStatus( - "Node Status", - sections[0].split("Node status")[1], - ); - }; - - const formatResourcesStatus = (cluster_status: string) => { - // ==== auto scaling status - // Node status - // .... - // Resources - // .... - const sections = cluster_status.split("Resources"); - return formatClusterStatus("Resource Status", sections[1]); - }; - - const formatClusterStatus = (title: string, cluster_status: string) => { - const cluster_status_rows = cluster_status.split("\n"); - - return ( -
    - - {title} - - {cluster_status_rows.map((i, key) => { - // Format the output. - // See format_info_string in util.py - if (i.startsWith("-----") || i.startsWith("=====") || i === "") { - // Ignore separators - return null; - } else if (i.endsWith(":")) { - return ( -
    - {i} -
    - ); - } else { - return
    {i}
    ; - } - })} -
    - ); - }; - if (!job) { return (
    @@ -193,7 +145,7 @@ export const JobDetailChartsPage = () => { )} @@ -206,34 +158,10 @@ export const JobDetailChartsPage = () => { >
    - - {cluster_status?.data - ? formatNodeStatus(cluster_status?.data.clusterStatus) - : "No cluster status."} - +
    - - {cluster_status?.data - ? formatResourcesStatus(cluster_status?.data.clusterStatus) - : "No cluster status."} - +
    diff --git a/dashboard/client/src/pages/overview/OverviewPage.tsx b/dashboard/client/src/pages/overview/OverviewPage.tsx index 4432896349f7..3582107d3e92 100644 --- a/dashboard/client/src/pages/overview/OverviewPage.tsx +++ b/dashboard/client/src/pages/overview/OverviewPage.tsx @@ -1,11 +1,19 @@ import { createStyles, makeStyles } from "@material-ui/core"; +import classNames from "classnames"; import React from "react"; import { CollapsibleSection } from "../../common/CollapsibleSection"; +import { + NodeStatusCard, + ResourceStatusCard, +} from "../../components/AutoscalerStatusCards"; import EventTable from "../../components/EventTable"; +import { useRayStatus } from "../job/hook/useClusterStatus"; import { MainNavPageInfo } from "../layout/mainNavContext"; import { ClusterUtilizationCard } from "./cards/ClusterUtilizationCard"; import { NodeCountCard } from "./cards/NodeCountCard"; +import { OverviewCard } from "./cards/OverviewCard"; import { RecentJobsCard } from "./cards/RecentJobsCard"; +import { RecentServeCard } from "./cards/RecentServeCard"; const useStyles = makeStyles((theme) => createStyles({ @@ -40,6 +48,8 @@ const useStyles = makeStyles((theme) => export const OverviewPage = () => { const classes = useStyles(); + const { cluster_status } = useRayStatus(); + return (
    { />
    - +
    + + { +
    + + + + + + + +
    + } +
    + { expect(link).toHaveAttribute("href"); }); - it("disables link when job_id is null", async () => { + it("link is active for driverless job(only have submission_id)", async () => { render(, { wrapper: MemoryRouter }); await screen.findByText("01000000"); - expect(screen.queryByRole("link", { name: "raysubmit_23456" })).toBeNull(); + + expect( + screen.queryByRole("link", { name: "raysubmit_23456" }), + ).toBeVisible(); }); }); diff --git a/dashboard/client/src/pages/overview/cards/RecentJobsCard.tsx b/dashboard/client/src/pages/overview/cards/RecentJobsCard.tsx index a38ebedd6729..9d43820505f1 100644 --- a/dashboard/client/src/pages/overview/cards/RecentJobsCard.tsx +++ b/dashboard/client/src/pages/overview/cards/RecentJobsCard.tsx @@ -1,29 +1,15 @@ -import { createStyles, makeStyles, Typography } from "@material-ui/core"; -import classNames from "classnames"; +import { createStyles, makeStyles } from "@material-ui/core"; import _ from "lodash"; import React from "react"; -import { Link } from "react-router-dom"; import { JobStatusIcon } from "../../../common/JobStatus"; +import { ListItemCard } from "../../../components/ListItemCard"; import { UnifiedJob } from "../../../type/job"; import { useJobList } from "../../job/hook/useJobList"; -import { LinkWithArrow, OverviewCard } from "./OverviewCard"; const useStyles = makeStyles((theme) => createStyles({ - root: { - display: "flex", - flexDirection: "column", - padding: theme.spacing(2, 3), - }, - listContainer: { - marginTop: theme.spacing(2), - flex: 1, - overflow: "hidden", - }, - listItem: { - "&:not(:first-child)": { - marginTop: theme.spacing(1), - }, + icon: { + marginRight: theme.spacing(1), }, }), ); @@ -32,6 +18,15 @@ type RecentJobsCardProps = { className?: string; }; +const getLink = (job: UnifiedJob) => { + if (job.job_id !== null && job.job_id !== "") { + return `/jobs/${job.job_id}`; + } else if (job.submission_id !== null && job.submission_id !== "") { + return `/jobs/${job.submission_id}`; + } + return undefined; +}; + export const RecentJobsCard = ({ className }: RecentJobsCardProps) => { const classes = useStyles(); @@ -39,89 +34,24 @@ export const RecentJobsCard = ({ className }: RecentJobsCardProps) => { const sortedJobs = _.orderBy(jobList, ["startTime"], ["desc"]).slice(0, 6); - return ( - - Recent jobs -
    - {sortedJobs.map((job) => ( - - ))} - {sortedJobs.length === 0 && ( - No jobs yet... - )} -
    - -
    - ); -}; - -const useRecentJobListItemStyles = makeStyles((theme) => - createStyles({ - root: { - display: "flex", - flexDirection: "row", - flexWrap: "nowrap", - alignItems: "center", - textDecoration: "none", - }, - textContainer: { - flex: "1 1 auto", - width: `calc(100% - ${theme.spacing(1) + 20}px)`, - }, - title: { - color: "#036DCF", - }, - entrypoint: { - overflow: "hidden", - textOverflow: "ellipsis", - whiteSpace: "nowrap", - color: "#5F6469", - }, - icon: { - marginRight: theme.spacing(1), - }, - }), -); - -type RecentJobListItemProps = { - job: UnifiedJob; - className?: string; -}; - -const RecentJobListItem = ({ job, className }: RecentJobListItemProps) => { - const classes = useRecentJobListItemStyles(); - - const cardContent = ( - - -
    - - {job.job_id ?? job.submission_id} - - - {job.entrypoint} - -
    -
    - ); + const sortedJobToRender = sortedJobs.map((job) => { + return { + title: job.job_id ?? job.submission_id ?? undefined, + subtitle: job.entrypoint, + link: getLink(job), + className: className, + icon: , + }; + }); return ( -
    - {job.job_id !== null && job.job_id !== "" ? ( - - {cardContent} - - ) : ( -
    {cardContent}
    - )} -
    + ); }; diff --git a/dashboard/client/src/pages/overview/cards/RecentServeCard.component.test.tsx b/dashboard/client/src/pages/overview/cards/RecentServeCard.component.test.tsx new file mode 100644 index 000000000000..ccb62851d5bc --- /dev/null +++ b/dashboard/client/src/pages/overview/cards/RecentServeCard.component.test.tsx @@ -0,0 +1,83 @@ +import { render, screen } from "@testing-library/react"; +import React from "react"; +import { getServeApplications } from "../../../service/serve"; +import { + ServeApplicationStatus, + ServeDeploymentMode, +} from "../../../type/serve"; +import { TEST_APP_WRAPPER } from "../../../util/test-utils"; +import { RecentServeCard } from "./RecentServeCard"; + +jest.mock("../../../service/serve"); + +const mockGetServeApplications = jest.mocked(getServeApplications); + +describe("RecentServeCard", () => { + beforeEach(() => { + mockGetServeApplications.mockResolvedValue({ + data: { + http_options: { host: "1.2.3.4", port: 8000 }, + proxy_location: ServeDeploymentMode.EveryNode, + applications: { + home: { + name: "home", + route_prefix: "/", + message: null, + status: ServeApplicationStatus.RUNNING, + deployed_app_config: { + import_path: "home:graph", + }, + last_deployed_time_s: new Date().getTime() / 1000, + }, + "second-app": { + name: "second-app", + route_prefix: "/second-app", + message: null, + status: ServeApplicationStatus.DEPLOYING, + deployed_app_config: null, + last_deployed_time_s: new Date().getTime() / 1000, + deployments: {}, + }, + }, + }, + } as any); + }); + + it("should display serve applications with deployed_app_config", async () => { + render(, { + wrapper: TEST_APP_WRAPPER, + }); + + await screen.findByText("View all applications"); + + expect.assertions(3); + expect(screen.getByText("home")).toBeInTheDocument(); + expect(screen.getByText("home:graph")).toBeInTheDocument(); + expect(screen.getByText("Serve Applications")).toBeInTheDocument(); + }); + + it("should display serve applications without deployed_app_config", async () => { + render(, { + wrapper: TEST_APP_WRAPPER, + }); + + await screen.findByText("View all applications"); + + expect.assertions(3); + expect(screen.getByText("second-app")).toBeInTheDocument(); + expect(screen.getByText("-")).toBeInTheDocument(); // default value for no deployed_app_config + expect(screen.getByText("Serve Applications")).toBeInTheDocument(); + }); + + it("should navigate to the applications page when the 'View all applications' link is clicked", async () => { + render(, { + wrapper: TEST_APP_WRAPPER, + }); + + await screen.findByText("View all applications"); + const link = screen.getByRole("link", { + name: /view all applications/i, + }); + expect(link).toHaveAttribute("href"); + }); +}); diff --git a/dashboard/client/src/pages/overview/cards/RecentServeCard.tsx b/dashboard/client/src/pages/overview/cards/RecentServeCard.tsx new file mode 100644 index 000000000000..960cd8738006 --- /dev/null +++ b/dashboard/client/src/pages/overview/cards/RecentServeCard.tsx @@ -0,0 +1,53 @@ +import { createStyles, makeStyles } from "@material-ui/core"; +import _ from "lodash"; +import React from "react"; +import { ServeStatusIcon } from "../../../common/ServeStatus"; +import { ListItemCard } from "../../../components/ListItemCard"; +import { useServeApplications } from "../../serve/hook/useServeApplications"; + +const useStyles = makeStyles((theme) => + createStyles({ + icon: { + marginRight: theme.spacing(1), + }, + }), +); + +type RecentServeCardProps = { + className?: string; +}; + +export const RecentServeCard = ({ className }: RecentServeCardProps) => { + const classes = useStyles(); + + // Use mock data by uncommenting the following line + // const applications = mockServeApplications.applications; + const { allServeApplications: applications } = useServeApplications(); + + const sortedApplications = _.orderBy( + applications, + ["last_deployed_time_s"], + ["desc"], + ).slice(0, 6); + + const sortedApplicationsToRender = sortedApplications.map((app) => { + return { + title: app.name, + subtitle: app?.deployed_app_config?.import_path || "-", + link: app.name ? `/serve/applications/${app.name}` : undefined, + className: className, + icon: , + }; + }); + + return ( + + ); +}; diff --git a/dashboard/client/src/pages/serve/ServeApplicationDetailPage.tsx b/dashboard/client/src/pages/serve/ServeApplicationDetailPage.tsx index efecc9939558..a3245b44b49f 100644 --- a/dashboard/client/src/pages/serve/ServeApplicationDetailPage.tsx +++ b/dashboard/client/src/pages/serve/ServeApplicationDetailPage.tsx @@ -111,7 +111,7 @@ export const ServeApplicationDetailPage = () => { }, { label: "Application config", - content: ( + content: application.deployed_app_config ? ( { } code={application.deployed_app_config} /> + ) : ( + - ), }, { @@ -138,6 +140,12 @@ export const ServeApplicationDetailPage = () => { /> ), }, + { + label: "Import path", + content: { + value: application?.deployed_app_config?.import_path || "-", + }, + }, ]} /> diff --git a/dashboard/client/src/pages/serve/ServeApplicationRow.tsx b/dashboard/client/src/pages/serve/ServeApplicationRow.tsx index d3cf37a24eec..54d06cd964ca 100644 --- a/dashboard/client/src/pages/serve/ServeApplicationRow.tsx +++ b/dashboard/client/src/pages/serve/ServeApplicationRow.tsx @@ -54,10 +54,16 @@ export const ServeApplicationRow = ({ - + {deployed_app_config ? ( + + ) : ( + "-" + )} ); diff --git a/dashboard/client/src/pages/serve/mockServeApplication.ts b/dashboard/client/src/pages/serve/mockServeApplication.ts new file mode 100644 index 000000000000..5ed996d2829f --- /dev/null +++ b/dashboard/client/src/pages/serve/mockServeApplication.ts @@ -0,0 +1,63 @@ +import { ServeApplicationStatus } from "../../type/serve"; + +export const mockServeApplications = { + applications: { + app1: { + name: "app1", + route_prefix: "/app1", + message: null, + status: ServeApplicationStatus.RUNNING, + deployed_app_config: { + import_path: "app1:graph", + }, + last_deployed_time_s: new Date().getTime() / 1000, + }, + app2: { + name: "app2", + route_prefix: "/app2", + message: null, + status: ServeApplicationStatus.RUNNING, + deployed_app_config: null, + last_deployed_time_s: new Date().getTime() / 1000, + deployments: {}, + }, + app3: { + name: "app3", + route_prefix: "/app3", + message: null, + status: ServeApplicationStatus.DEPLOYING, + deployed_app_config: null, + last_deployed_time_s: new Date().getTime() / 1000, + deployments: {}, + }, + app4: { + name: "app4", + route_prefix: "/app4", + message: null, + status: ServeApplicationStatus.RUNNING, + deployed_app_config: { + import_path: "app4:graph", + }, + last_deployed_time_s: new Date().getTime() / 1000, + }, + app5: { + name: "app5", + route_prefix: "/app5", + message: null, + status: ServeApplicationStatus.DEPLOY_FAILED, + deployed_app_config: { + import_path: "app5:graph", + }, + last_deployed_time_s: new Date().getTime() / 1000, + }, + app6: { + name: "app6", + route_prefix: "/app6", + message: null, + status: ServeApplicationStatus.DELETING, + deployed_app_config: null, + last_deployed_time_s: new Date().getTime() / 1000, + deployments: {}, + }, + }, +}; diff --git a/dashboard/client/src/type/serve.ts b/dashboard/client/src/type/serve.ts index 839613ec548f..47d978518976 100644 --- a/dashboard/client/src/type/serve.ts +++ b/dashboard/client/src/type/serve.ts @@ -14,7 +14,7 @@ export type ServeApplication = { status: ServeApplicationStatus; message: string; last_deployed_time_s: number; - deployed_app_config: Record; + deployed_app_config: Record | null; // It could be null if user did not provide deployed_app_config deployments: { [name: string]: ServeDeployment; }; From c03e798f5b2e1a1bab8e08adb298dfe0d4ace884 Mon Sep 17 00:00:00 2001 From: Sihan Wang Date: Thu, 11 May 2023 15:14:20 -0700 Subject: [PATCH 350/424] [Serve] Add status_code to http qps & latency (#35134) Add status_code for http qps & latency stats. This is to resolve "double counting" issue because of redirect request. Related issue number Close #33686 --- python/ray/serve/_private/http_proxy.py | 31 ++++++++---- python/ray/serve/tests/test_metrics.py | 64 +++++++++++++++++++++++++ 2 files changed, 86 insertions(+), 9 deletions(-) diff --git a/python/ray/serve/_private/http_proxy.py b/python/ray/serve/_private/http_proxy.py index c9e977755b33..e2f16d1a458b 100644 --- a/python/ray/serve/_private/http_proxy.py +++ b/python/ray/serve/_private/http_proxy.py @@ -289,7 +289,7 @@ def get_handle(name): self.request_counter = metrics.Counter( "serve_num_http_requests", description="The number of HTTP requests processed.", - tag_keys=("route", "method", "application"), + tag_keys=("route", "method", "application", "status_code"), ) self.request_error_counter = metrics.Counter( @@ -325,6 +325,7 @@ def get_handle(name): tag_keys=( "route", "application", + "status_code", ), ) @@ -376,6 +377,7 @@ async def __call__(self, scope, receive, send): "route": route_path, "method": scope["method"].upper(), "application": "", + "status_code": "200", } ) return await starlette.responses.JSONResponse(self.route_info)( @@ -388,6 +390,7 @@ async def __call__(self, scope, receive, send): "route": route_path, "method": scope["method"].upper(), "application": "", + "status_code": "200", } ) return await starlette.responses.PlainTextResponse("success")( @@ -408,16 +411,11 @@ async def __call__(self, scope, receive, send): "route": route_path, "method": scope["method"].upper(), "application": "", + "status_code": "404", } ) return await self._not_found(scope, receive, send) - self.request_counter.inc( - tags={ - "route": route_path, - "method": scope["method"].upper(), - "application": app_name, - } - ) + # Modify the path and root path so that reverse lookups and redirection # work as expected. We do this here instead of in replicas so it can be # changed without restarting the replicas. @@ -433,9 +431,24 @@ async def __call__(self, scope, receive, send): ) ) status_code = await _send_request_to_handle(handle, scope, receive, send) + + self.request_counter.inc( + tags={ + "route": route_path, + "method": scope["method"].upper(), + "application": app_name, + "status_code": status_code, + } + ) + latency_ms = (time.time() - start_time) * 1000.0 self.processing_latency_tracker.observe( - latency_ms, tags={"route": route_path, "application": app_name} + latency_ms, + tags={ + "route": route_path, + "application": app_name, + "status_code": status_code, + }, ) logger.info( access_log_msg( diff --git a/python/ray/serve/tests/test_metrics.py b/python/ray/serve/tests/test_metrics.py index 946f04c5b98d..794fae8aff58 100644 --- a/python/ray/serve/tests/test_metrics.py +++ b/python/ray/serve/tests/test_metrics.py @@ -12,6 +12,8 @@ from fastapi import FastAPI from ray.serve.metrics import Counter, Histogram, Gauge from ray.serve._private.constants import DEFAULT_LATENCY_BUCKET_MS +from ray.serve.drivers import DAGDriver +from ray.serve.http_adapters import json_request @pytest.fixture @@ -194,6 +196,7 @@ def f(*args): assert num_requests[0]["route"] == "/fake_route" assert num_requests[0]["method"] == "GET" assert num_requests[0]["application"] == "" + assert num_requests[0]["status_code"] == "404" print("serve_num_http_requests working as expected.") num_errors = get_metric_dictionaries("serve_num_http_error_requests") @@ -223,9 +226,70 @@ def f(*args): assert len(latency_metrics) == 1 assert latency_metrics[0]["route"] == "/real_route" assert latency_metrics[0]["application"] == "app" + assert latency_metrics[0]["status_code"] == "500" print("serve_http_request_latency_ms working as expected.") +def test_http_redirect_metrics(serve_start_shutdown): + """Tests the http redirect metrics' behavior.""" + + def verify_metrics_with_route(metrics, expected_metrics): + assert len(metrics) == len(expected_metrics) + for metric_dict in metrics: + match_metric = None + for expected_metric in expected_metrics: + if expected_metric["route"] == metric_dict["route"]: + match_metric = expected_metric + break + assert match_metric is not None + for key in match_metric: + assert match_metric[key] == metric_dict[key] + + @serve.deployment + class Model: + def __call__(self, *args): + return "123" + + serve.run( + DAGDriver.bind(Model.bind(), http_adapter=json_request), route_prefix="/bar" + ) + resp = requests.get("http://localhost:8000/bar", json=["123"]) + assert resp.status_code == 200 + assert resp.text == '"123"' + + wait_for_condition( + lambda: len(get_metric_dictionaries("serve_num_http_requests")) == 2, + timeout=20, + ) + num_http_requests = get_metric_dictionaries("serve_num_http_requests") + expected_output = [ + { + "route": "/bar/", + "application": "default", + "method": "GET", + "status_code": "200", + }, + { + "route": "/bar", + "application": "default", + "method": "GET", + "status_code": "307", + }, + ] + verify_metrics_with_route(num_http_requests, expected_output) + + wait_for_condition( + lambda: len(get_metric_dictionaries("serve_http_request_latency_ms_sum")) == 2, + timeout=20, + ) + http_latency = get_metric_dictionaries("serve_num_http_requests") + expected_output = [ + {"route": "/bar/", "application": "default", "status_code": "200"}, + {"route": "/bar", "application": "default", "status_code": "307"}, + ] + verify_metrics_with_route(http_latency, expected_output) + + def test_replica_metrics_fields(serve_start_shutdown): """Test replica metrics fields""" From cc58b8efae0a318c4ad04b359ac28ce84697e257 Mon Sep 17 00:00:00 2001 From: matthewdeng Date: Thu, 11 May 2023 15:33:44 -0700 Subject: [PATCH 351/424] [train] Fix HuggingFace -> Transformers wrapping logic (#35276) Properly pass constructor arguments through. Signed-off-by: Matthew Deng --- python/ray/train/huggingface/huggingface_checkpoint.py | 2 +- python/ray/train/huggingface/huggingface_predictor.py | 2 +- python/ray/train/huggingface/huggingface_trainer.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/python/ray/train/huggingface/huggingface_checkpoint.py b/python/ray/train/huggingface/huggingface_checkpoint.py index 35539c151323..fc96c521d4ea 100644 --- a/python/ray/train/huggingface/huggingface_checkpoint.py +++ b/python/ray/train/huggingface/huggingface_checkpoint.py @@ -14,7 +14,7 @@ class HuggingFaceCheckpoint(TransformersCheckpoint): # than __init__ def __new__(cls: type, *args, **kwargs): warnings.warn(deprecation_msg, DeprecationWarning) - return super(HuggingFaceCheckpoint, cls).__new__(cls) + return super(HuggingFaceCheckpoint, cls).__new__(cls, *args, **kwargs) __all__ = [ diff --git a/python/ray/train/huggingface/huggingface_predictor.py b/python/ray/train/huggingface/huggingface_predictor.py index fd90557e80f5..9d276f7a3649 100644 --- a/python/ray/train/huggingface/huggingface_predictor.py +++ b/python/ray/train/huggingface/huggingface_predictor.py @@ -14,7 +14,7 @@ class HuggingFacePredictor(TransformersPredictor): # than __init__ def __new__(cls: type, *args, **kwargs): warnings.warn(deprecation_msg, DeprecationWarning) - return super(HuggingFacePredictor, cls).__new__(cls) + return super(HuggingFacePredictor, cls).__new__(cls, *args, **kwargs) __all__ = [ diff --git a/python/ray/train/huggingface/huggingface_trainer.py b/python/ray/train/huggingface/huggingface_trainer.py index ed5b015b2c88..7255a13d262c 100644 --- a/python/ray/train/huggingface/huggingface_trainer.py +++ b/python/ray/train/huggingface/huggingface_trainer.py @@ -14,7 +14,7 @@ class HuggingFaceTrainer(TransformersTrainer): # than __init__ def __new__(cls: type, *args, **kwargs): warnings.warn(deprecation_msg, DeprecationWarning) - return super(HuggingFaceTrainer, cls).__new__(cls) + return super(HuggingFaceTrainer, cls).__new__(cls, *args, **kwargs) __all__ = [ From 3f4be857554bb42ed6ce06e82b70db956ccca54f Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Fri, 12 May 2023 06:34:42 +0800 Subject: [PATCH 352/424] [core][dashboard][state] Support task logs from state API (#35101) This PR adds support for retrieving task logs from state API with task ID directly. In the high level: On task execution: it writes magical token including task id + attempt before and after task is run in the worker file. On query: it reconstructs the worker file name from querying worker id from the task backend for a task attempt, and look for the magical tokens in the worker file to find beginning and end of the files. The end token is used to identify task log in the case of async actor: if async actors product interleaved logs, one task's log will contain all logs from the task and other interleaved logs from other tasks. (We could probably return some sort of warning to let users know this) --- dashboard/modules/log/log_agent.py | 138 ++++++++- dashboard/modules/log/log_manager.py | 146 ++++++++-- dashboard/modules/state/state_head.py | 1 + python/ray/_private/log_monitor.py | 7 + python/ray/_private/ray_constants.py | 4 + python/ray/_raylet.pyx | 25 +- python/ray/experimental/state/api.py | 5 +- python/ray/experimental/state/common.py | 8 +- python/ray/experimental/state/state_cli.py | 79 +++++ .../ray/experimental/state/state_manager.py | 5 +- python/ray/includes/libcoreworker.pxd | 1 + python/ray/tests/test_state_api_log.py | 275 ++++++++++++++++-- src/ray/core_worker/core_worker.h | 6 + src/ray/protobuf/reporter.proto | 4 + 14 files changed, 630 insertions(+), 74 deletions(-) diff --git a/dashboard/modules/log/log_agent.py b/dashboard/modules/log/log_agent.py index 5ae03d4cf357..572d3b2c95f8 100644 --- a/dashboard/modules/log/log_agent.py +++ b/dashboard/modules/log/log_agent.py @@ -1,9 +1,12 @@ import logging +from typing import Tuple +import concurrent.futures import ray.dashboard.modules.log.log_utils as log_utils import ray.dashboard.modules.log.log_consts as log_consts import ray.dashboard.utils as dashboard_utils import ray.dashboard.optional_utils as dashboard_optional_utils +from ray._private.ray_constants import env_integer import asyncio import grpc import io @@ -14,6 +17,10 @@ from ray.core.generated import reporter_pb2 from ray.core.generated import reporter_pb2_grpc +from ray._private.ray_constants import ( + LOG_PREFIX_TASK_ATTEMPT_START, + LOG_PREFIX_TASK_ATTEMPT_END, +) logger = logging.getLogger(__name__) routes = dashboard_optional_utils.ClassMethodRouteTable @@ -24,6 +31,41 @@ # Keep-alive interval for reading the file DEFAULT_KEEP_ALIVE_INTERVAL_SEC = 1 +RAY_DASHBOARD_LOG_TASK_LOG_SEARCH_MAX_WORKER_COUNT = env_integer( + "RAY_DASHBOARD_LOG_TASK_LOG_SEARCH_MAX_WORKER_COUNT", default=2 +) + + +def find_offset_of_content_in_file( + file: io.BufferedIOBase, content: bytes, start_offset: int = 0 +) -> int: + """Find the offset of the first occurrence of content in a file. + + Args: + file: File object + content: Content to find + start_offset: Start offset to read from, inclusive. + + Returns: + Offset of the first occurrence of content in a file. + """ + logger.debug(f"Finding offset of content {content} in file") + file.seek(start_offset, io.SEEK_SET) # move file pointer to start of file + offset = start_offset + while True: + # Read in block + block_data = file.read(BLOCK_SIZE) + if block_data == b"": + # Stop reading + return -1 + # Find the offset of the first occurrence of content in the block + block_offset = block_data.find(content) + if block_offset != -1: + # Found the offset in the block + return offset + block_offset + # Continue reading + offset += len(block_data) + def find_end_offset_file(file: io.BufferedIOBase) -> int: """ @@ -220,6 +262,11 @@ def is_minimal_module(): return False +_task_log_search_worker_pool = concurrent.futures.ThreadPoolExecutor( + max_workers=RAY_DASHBOARD_LOG_TASK_LOG_SEARCH_MAX_WORKER_COUNT +) + + class LogAgentV1Grpc(dashboard_utils.DashboardAgentModule): def __init__(self, dashboard_agent): super().__init__(dashboard_agent) @@ -252,6 +299,77 @@ async def ListLogs(self, request, context): log_files.append(p.name) return reporter_pb2.ListLogsReply(log_files=log_files) + @classmethod + async def _find_task_log_offsets( + cls, task_id: str, attempt_number: int, lines: int, f: io.BufferedIOBase + ) -> Tuple[int, int]: + """Find the start and end offsets in the log file for a task attempt + Current task log is in the format of below: + + :job_id:xxx + :task_name:xxx + :task_attempt_start:- + ... + actual user logs + ... + :task_attempt_end:- + ... (other tasks) + + + For async actor tasks, task logs from multiple tasks might however + be interleaved. + """ + + # Find start + task_attempt_start_magic_line = ( + f"{LOG_PREFIX_TASK_ATTEMPT_START}{task_id}-{attempt_number}\n" + ) + + # Offload the heavy IO CPU work to a thread pool to avoid blocking the + # event loop for concurrent requests. + task_attempt_magic_line_offset = ( + await asyncio.get_running_loop().run_in_executor( + _task_log_search_worker_pool, + find_offset_of_content_in_file, + f, + task_attempt_start_magic_line.encode(), + ) + ) + + if task_attempt_magic_line_offset == -1: + raise FileNotFoundError( + f"Log for task attempt({task_id},{attempt_number}) not found" + ) + start_offset = task_attempt_magic_line_offset + len( + task_attempt_start_magic_line + ) + + # Find the end of the task log, which is the start of the next task log if any + # with the LOG_PREFIX_TASK_ATTEMPT_END magic line. + task_attempt_end_magic_line = ( + f"{LOG_PREFIX_TASK_ATTEMPT_END}{task_id}-{attempt_number}\n" + ) + end_offset = await asyncio.get_running_loop().run_in_executor( + _task_log_search_worker_pool, + find_offset_of_content_in_file, + f, + task_attempt_end_magic_line.encode(), + start_offset, + ) + + if end_offset == -1: + # No other tasks (might still be running), stream til the end. + end_offset = find_end_offset_file(f) + + if lines != -1: + # Tail lines specified, find end_offset - lines offsets. + start_offset = max( + find_start_offset_last_n_lines_from_offset(f, end_offset, lines), + start_offset, + ) + + return start_offset, end_offset + async def StreamLog(self, request, context): """ Streams the log in real time starting from `request.lines` number of lines from @@ -265,6 +383,7 @@ async def StreamLog(self, request, context): # NOTE: If the client side connection is closed, this handler will # be automatically terminated. lines = request.lines if request.lines else 1000 + task_id = request.task_id if request.HasField("task_id") else None filepath = f"{self._dashboard_agent.log_dir}/{request.log_file_name}" if not os.path.isfile(filepath): @@ -279,7 +398,20 @@ async def StreamLog(self, request, context): start_offset = 0 end_offset = find_end_offset_file(f) - if lines != -1: + if task_id is not None: # Stream from task log. + attempt_number = ( + request.attempt_number + if request.HasField("attempt_number") + else 0 + ) + start_offset, end_offset = await self._find_task_log_offsets( + task_id, attempt_number, lines, f + ) + logger.info( + f"Tailing task logs from {start_offset} to {end_offset} for" + f"task attempt({task_id}, {attempt_number}) in {f.name}" + ) + elif lines != -1: # Default tailing files # If specified tail line number, # look for the file offset with the line count start_offset = find_start_offset_last_n_lines_from_offset( @@ -299,8 +431,8 @@ async def StreamLog(self, request, context): end_offset = -1 logger.info( - f"Tailing logs from {start_offset} to {end_offset} for {lines}, " - f"with keep_alive={keep_alive_interval_sec}" + f"Tailing logs from {start_offset} to {end_offset} for " + f"lines={lines}, with keep_alive={keep_alive_interval_sec}" ) # Read and send the file data in chunk diff --git a/dashboard/modules/log/log_manager.py b/dashboard/modules/log/log_manager.py index d3971b6e780a..a8c2a11564b2 100644 --- a/dashboard/modules/log/log_manager.py +++ b/dashboard/modules/log/log_manager.py @@ -4,7 +4,11 @@ from collections import defaultdict from typing import List, Optional, Dict, AsyncIterable, Tuple, Callable -from ray.experimental.state.common import GetLogOptions +from ray.experimental.state.common import ( + GetLogOptions, + protobuf_to_task_state_dict, + DEFAULT_RPC_TIMEOUT, +) from ray.experimental.state.exception import DataSourceUnavailable from ray.experimental.state.state_manager import StateDataSourceClient @@ -76,6 +80,7 @@ async def stream_logs( log_filename=options.filename, actor_id=options.actor_id, task_id=options.task_id, + attempt_number=options.attempt_number, pid=options.pid, get_actor_fn=DataSource.actors.get, timeout=options.timeout, @@ -93,6 +98,8 @@ async def stream_logs( # otherwise the stream will be terminated forcefully # after the deadline is expired. timeout=options.timeout if not keep_alive else None, + task_id=options.task_id, + attempt_number=options.attempt_number, ) async for streamed_log in stream: @@ -110,16 +117,55 @@ def _verify_node_registered(self, node_id: str): ) assert node_id is not None - async def resolve_filename( + async def _resolve_worker_file( self, - *, node_id: str, - log_filename: Optional[str], - actor_id: Optional[str], - task_id: Optional[str], - pid: Optional[str], - get_actor_fn: Callable[[str], Dict], + worker_id: Optional[str], + pid: Optional[int], + suffix: str, timeout: int, + ) -> Optional[str]: + """Resolve worker log file.""" + if worker_id is not None and pid is not None: + raise ValueError( + f"Only one of worker id({worker_id}) or pid({pid}) should be provided." + ) + + if worker_id is not None: + log_files = await self.list_logs( + node_id, timeout, glob_filter=f"*{worker_id}*{suffix}" + ) + else: + log_files = await self.list_logs( + node_id, timeout, glob_filter=f"*{pid}*{suffix}" + ) + + # Find matching worker logs. + for filename in [*log_files["worker_out"], *log_files["worker_err"]]: + # Worker logs look like worker-[worker_id]-[job_id]-[pid].out + if worker_id is not None: + worker_id_from_filename = WORKER_LOG_PATTERN.match(filename).group(1) + if worker_id_from_filename == worker_id: + return filename + else: + worker_pid_from_filename = int( + WORKER_LOG_PATTERN.match(filename).group(3) + ) + if worker_pid_from_filename == pid: + return filename + return None + + async def resolve_filename( + self, + *, + node_id: Optional[str] = None, + log_filename: Optional[str] = None, + actor_id: Optional[str] = None, + task_id: Optional[str] = None, + attempt_number: Optional[int] = None, + pid: Optional[str] = None, + get_actor_fn: Optional[Callable[[str], Dict]] = None, + timeout: int = DEFAULT_RPC_TIMEOUT, suffix: str = "out", ) -> Tuple[str, str]: """Return the file name given all options. @@ -137,6 +183,9 @@ async def resolve_filename( resolving by other ids'. Default to "out". """ if actor_id: + if get_actor_fn is None: + raise ValueError("get_actor_fn needs to be specified for actor_id") + actor_data = get_actor_fn(actor_id) if actor_data is None: raise ValueError(f"Actor ID {actor_id} not found.") @@ -158,34 +207,71 @@ async def resolve_filename( ) self._verify_node_registered(node_id) - # List all worker logs that match actor's worker id. - log_files = await self.list_logs( - node_id, timeout, glob_filter=f"*{worker_id}*{suffix}" + log_filename = await self._resolve_worker_file( + node_id=node_id, + worker_id=worker_id, + pid=None, + suffix=suffix, + timeout=timeout, ) - - # Find matching worker logs. - for filename in [*log_files["worker_out"], *log_files["worker_err"]]: - # Worker logs look like worker-[worker_id]-[job_id]-[pid].out - worker_id_from_filename = WORKER_LOG_PATTERN.match(filename).group(1) - if worker_id_from_filename == worker_id: - log_filename = filename - break elif task_id: - raise NotImplementedError("task_id is not supported yet.") - elif pid: - self._verify_node_registered(node_id) - log_files = await self.list_logs( - node_id, timeout, glob_filter=f"*{pid}*{suffix}" + reply = await self.client.get_all_task_info( + filters=[("task_id", "=", task_id)], timeout=timeout ) - for filename in [*log_files["worker_out"], *log_files["worker_err"]]: - # worker-[worker_id]-[job_id]-[pid].out - worker_pid_from_filename = int( - WORKER_LOG_PATTERN.match(filename).group(3) + # Check if the task is found. + if len(reply.events_by_task) == 0: + raise FileNotFoundError( + f"Could not find log file for task: {task_id}" + f" (attempt {attempt_number}) with suffix: {suffix}" ) - if worker_pid_from_filename == pid: - log_filename = filename + task_event = None + for t in reply.events_by_task: + if t.attempt_number == attempt_number: + task_event = t break + if task_event is None: + raise FileNotFoundError( + "Could not find log file for task attempt:" + f"{task_id}({attempt_number})" + ) + + # Get the worker id and node id. + task = protobuf_to_task_state_dict(task_event) + + worker_id = task.get("worker_id", None) + node_id = task.get("node_id", None) + + if worker_id is None or node_id is None: + raise FileNotFoundError( + "Could not find log file for task attempt:" + f"{task_id}({attempt_number})." + f"Worker id = {worker_id}, node id = {node_id}" + ) + + log_filename = await self._resolve_worker_file( + node_id=node_id, + worker_id=worker_id, + pid=None, + suffix=suffix, + timeout=timeout, + ) + + elif pid: + if node_id is None: + raise ValueError( + "Node id needs to be specified for resolving" + f" filenames of pid {pid}" + ) + self._verify_node_registered(node_id) + log_filename = await self._resolve_worker_file( + node_id=node_id, + worker_id=None, + pid=pid, + suffix=suffix, + timeout=timeout, + ) + if log_filename is None: raise FileNotFoundError( "Could not find a log file. Please make sure the given " diff --git a/dashboard/modules/state/state_head.py b/dashboard/modules/state/state_head.py index 93b9592ca867..79e13a078f4a 100644 --- a/dashboard/modules/state/state_head.py +++ b/dashboard/modules/state/state_head.py @@ -410,6 +410,7 @@ async def get_logs(self, req: aiohttp.web.Request): lines=req.query.get("lines", DEFAULT_LOG_LIMIT), interval=req.query.get("interval", None), suffix=req.query.get("suffix", "out"), + attempt_number=req.query.get("attempt_number", 0), ) response = aiohttp.web.StreamResponse() diff --git a/python/ray/_private/log_monitor.py b/python/ray/_private/log_monitor.py index 444ac5b34bec..76944d42ec03 100644 --- a/python/ray/_private/log_monitor.py +++ b/python/ray/_private/log_monitor.py @@ -366,6 +366,13 @@ def flush(): file_info.job_id = next_line.split( ray_constants.LOG_PREFIX_JOB_ID, 1 )[1] + elif next_line.startswith( + ray_constants.LOG_PREFIX_TASK_ATTEMPT_START + ) or next_line.startswith( + ray_constants.LOG_PREFIX_TASK_ATTEMPT_END + ): + # Ignore these magic tokens for task logs. + pass elif next_line.startswith( "Windows fatal exception: access violation" ): diff --git a/python/ray/_private/ray_constants.py b/python/ray/_private/ray_constants.py index c34c50199fa7..7bbad5f5aa1c 100644 --- a/python/ray/_private/ray_constants.py +++ b/python/ray/_private/ray_constants.py @@ -284,6 +284,10 @@ def env_set_by_user(key): LOG_PREFIX_TASK_NAME = ":task_name:" # Job ids are recorded in the logs with this magic token as a prefix. LOG_PREFIX_JOB_ID = ":job_id:" +# Task attempts magic token marked the beginning of the task logs +LOG_PREFIX_TASK_ATTEMPT_START = ":task_attempt_start:" +# Task attempts magic token marked the beginning of the task logs +LOG_PREFIX_TASK_ATTEMPT_END = ":task_attempt_end:" # The object metadata field uses the following format: It is a comma # separated list of fields. The first field is mandatory and is the diff --git a/python/ray/_raylet.pyx b/python/ray/_raylet.pyx index 3c929ade46ef..25cdd67bbdb1 100644 --- a/python/ray/_raylet.pyx +++ b/python/ray/_raylet.pyx @@ -892,7 +892,16 @@ cdef void execute_task( actor_title = f"{class_name}({args!r}, {kwargs!r})" core_worker.set_actor_title(actor_title.encode("utf-8")) - worker.record_task_log_start() + # Record the task id via magic token in the log file. + # This will be used to locate the beginning of logs from a task. + attempt_number = core_worker.get_current_task_attempt_number() + task_attempt_magic_token = "{}{}-{}\n".format( + ray_constants.LOG_PREFIX_TASK_ATTEMPT_START, task_id.hex(), + attempt_number) + # Print on both .out and .err + print(task_attempt_magic_token, end="") + print(task_attempt_magic_token, file=sys.stderr, end="") + # Execute the task. with core_worker.profile_event(b"task:execute"): task_exception = True @@ -943,9 +952,14 @@ cdef void execute_task( exc_info=True) raise e finally: - # Record the task logs end offsets regardless of - # task execution results. - worker.record_task_log_end() + # Record the end of task via magic token in the log file. + # This will be used to locate the end of logs from a task. + task_attempt_magic_token = "{}{}-{}\n".format( + ray_constants.LOG_PREFIX_TASK_ATTEMPT_END, task_id.hex(), + attempt_number) + # Print on both .out and .err + print(task_attempt_magic_token, end="") + print(task_attempt_magic_token, file=sys.stderr, end="") if returns[0].size() == 1 and not inspect.isgenerator(outputs): # If there is only one return specified, we should return @@ -1899,6 +1913,9 @@ cdef class CoreWorker: return TaskID( CCoreWorkerProcess.GetCoreWorker().GetCurrentTaskId().Binary()) + def get_current_task_attempt_number(self): + return CCoreWorkerProcess.GetCoreWorker().GetCurrentTaskAttemptNumber() + def get_task_depth(self): return CCoreWorkerProcess.GetCoreWorker().GetTaskDepth() diff --git a/python/ray/experimental/state/api.py b/python/ray/experimental/state/api.py index c20454118bba..b8873af0ec1c 100644 --- a/python/ray/experimental/state/api.py +++ b/python/ray/experimental/state/api.py @@ -1144,6 +1144,7 @@ def get_log( suffix: str = "out", encoding: Optional[str] = "utf-8", errors: Optional[str] = "strict", + attempt_number: int = 0, _interval: Optional[float] = None, ) -> Generator[str, None, None]: """Retrieve log file based on file name or some entities ids (pid, actor id, task id). @@ -1179,6 +1180,7 @@ def get_log( "utf-8". Use None to get binary data directly. errors: The error handling scheme to use for decoding errors. Default is "strict". See https://docs.python.org/3/library/codecs.html#error-handlers + attempt_number: The attempt number of the task if getting logs generated by a task. _interval: The interval in secs to print new logs when `follow=True`. Return: @@ -1204,11 +1206,12 @@ def get_log( media_type=media_type, timeout=timeout, suffix=suffix, + attempt_number=attempt_number, ) options_dict = {} for field in fields(options): option_val = getattr(options, field.name) - if option_val: + if option_val is not None: options_dict[field.name] = option_val with requests.get( diff --git a/python/ray/experimental/state/common.py b/python/ray/experimental/state/common.py index 7433a1040822..3471ad03a35b 100644 --- a/python/ray/experimental/state/common.py +++ b/python/ray/experimental/state/common.py @@ -278,9 +278,10 @@ class GetLogOptions: filename: Optional[str] = None # The actor id of the log. It is used only for worker logs. actor_id: Optional[str] = None - # The task id of the log. It is used only for worker logs. - # This is currently not working. TODO(sang): Support task log. + # The task id of the log. task_id: Optional[str] = None + # The attempt number of the task. + attempt_number: int = 0 # The pid of the log. It is used only for worker logs. pid: Optional[int] = None # Total log lines to return. @@ -299,9 +300,6 @@ def __post_init__(self): self.interval = float(self.interval) self.lines = int(self.lines) - if self.task_id: - raise NotImplementedError("task_id is not supported yet.") - if self.media_type == "file": assert self.interval is None if self.media_type not in ["file", "stream"]: diff --git a/python/ray/experimental/state/state_cli.py b/python/ray/experimental/state/state_cli.py index da88e6d7d02e..1b19db874701 100644 --- a/python/ray/experimental/state/state_cli.py +++ b/python/ray/experimental/state/state_cli.py @@ -812,6 +812,8 @@ def _print_log( suffix: str = "out", encoding: str = "utf-8", encoding_errors: str = "strict", + task_id: Optional[str] = None, + attempt_number: int = 0, ): """Wrapper around `get_log()` that prints the preamble and the log lines""" if tail > 0: @@ -838,6 +840,8 @@ def _print_log( suffix=suffix, encoding=encoding, errors=encoding_errors, + task_id=task_id, + attempt_number=attempt_number, ): print(chunk, end="", flush=True) @@ -877,6 +881,12 @@ def _print_log( ``` ray logs actor --id ABC --follow ``` + + [ray logs task] Get the std err generated by a task. + + ``` + ray logs task --id --err + ``` """ @@ -1161,3 +1171,72 @@ def log_worker( timeout=timeout, suffix="err" if err else "out", ) + + +@logs_state_cli_group.command(name="task") +@click.option( + "--id", + "task_id", + required=True, + type=str, + help="Retrieves the logs from the task with this task id.", +) +@click.option( + "--attempt-number", + "-a", + required=False, + type=int, + default=0, + help="Retrieves the logs from the attempt, default to 0", +) +@address_option +@log_follow_option +@log_interval_option +@log_tail_option +@log_timeout_option +@log_suffix_option +@click.pass_context +@PublicAPI(stability="alpha") +def log_task( + ctx, + task_id: Optional[str], + attempt_number: int, + address: Optional[str], + follow: bool, + interval: float, + tail: int, + timeout: int, + err: bool, +): + """Get/List logs associated with a task. + + Example: + + Follow the log file from a task with task id. + + ``` + ray logs tasks --id --follow + ``` + + Get the log from a retry attempt 1 from a task. + + ``` + ray logs tasks --id -a 1 + ``` + + Raises: + :class:`RayStateApiException ` + if the CLI is failed to query the data. + MissingParameter if inputs are missing. + """ # noqa: E501 + + _print_log( + address=address, + task_id=task_id, + attempt_number=attempt_number, + follow=follow, + tail=tail, + interval=interval, + timeout=timeout, + suffix="err" if err else "out", + ) diff --git a/python/ray/experimental/state/state_manager.py b/python/ray/experimental/state/state_manager.py index 19e1fa318e38..89d1e4340a37 100644 --- a/python/ray/experimental/state/state_manager.py +++ b/python/ray/experimental/state/state_manager.py @@ -360,7 +360,6 @@ async def get_task_info( ) -> Optional[GetTasksInfoReply]: if not limit: limit = RAY_MAX_LIMIT_FROM_DATA_SOURCE - stub = self._raylet_stubs.get(node_id) if not stub: raise ValueError(f"Raylet for a node id, {node_id} doesn't exist.") @@ -424,6 +423,8 @@ async def stream_log( lines: int, interval: Optional[float], timeout: int, + task_id: Optional[str] = None, + attempt_number: Optional[int] = None, ) -> UnaryStreamCall: stub = self._log_agent_stub.get(node_id) if not stub: @@ -434,6 +435,8 @@ async def stream_log( log_file_name=log_file_name, lines=lines, interval=interval, + task_id=task_id, + attempt_number=attempt_number, ), timeout=timeout, ) diff --git a/python/ray/includes/libcoreworker.pxd b/python/ray/includes/libcoreworker.pxd index f1763aa89b35..c847e2938628 100644 --- a/python/ray/includes/libcoreworker.pxd +++ b/python/ray/includes/libcoreworker.pxd @@ -151,6 +151,7 @@ cdef extern from "ray/core_worker/core_worker.h" nogil: CJobID GetCurrentJobId() CTaskID GetCurrentTaskId() + int64_t GetCurrentTaskAttemptNumber() CNodeID GetCurrentNodeId() int64_t GetTaskDepth() c_bool GetCurrentTaskRetryExceptions() diff --git a/python/ray/tests/test_state_api_log.py b/python/ray/tests/test_state_api_log.py index a5f0ed9bdfbe..258be91f70bb 100644 --- a/python/ray/tests/test_state_api_log.py +++ b/python/ray/tests/test_state_api_log.py @@ -1,6 +1,7 @@ import json import os import sys +import asyncio from typing import List from unittest.mock import MagicMock @@ -16,18 +17,29 @@ wait_for_condition, wait_until_server_available, ) + +from ray._private.ray_constants import ( + LOG_PREFIX_TASK_ATTEMPT_START, + LOG_PREFIX_TASK_ATTEMPT_END, +) from ray._raylet import ActorID, NodeID, TaskID, WorkerID from ray.core.generated.common_pb2 import Address -from ray.core.generated.gcs_pb2 import ActorTableData +from ray.core.generated.gcs_service_pb2 import GetTaskEventsReply from ray.core.generated.reporter_pb2 import ListLogsReply, StreamLogReply +from ray.core.generated.gcs_pb2 import ( + ActorTableData, + TaskEvents, + TaskStateUpdate, +) from ray.dashboard.modules.actor.actor_head import actor_table_data_to_dict from ray.dashboard.modules.log.log_agent import ( + find_offset_of_content_in_file, find_end_offset_file, find_end_offset_next_n_lines_from_offset, find_start_offset_last_n_lines_from_offset, + LogAgentV1Grpc, ) from ray.dashboard.modules.log.log_agent import _stream_log_in_chunk - from ray.dashboard.modules.log.log_manager import LogsManager from ray.dashboard.tests.conftest import * # noqa from ray.experimental.state.api import get_log, list_logs, list_nodes, list_workers @@ -44,6 +56,19 @@ ASYNCMOCK_MIN_PYTHON_VER = (3, 8) +def generate_task_event(task_id, node_id, attempt_number, worker_id): + task_event = TaskEvents( + task_id=task_id.binary(), + attempt_number=attempt_number, + job_id=b"", + state_updates=TaskStateUpdate( + node_id=node_id.binary(), worker_id=worker_id.binary() + ), + ) + + return task_event + + def generate_actor_data(id, node_id, worker_id): if worker_id: worker_id = worker_id.binary() @@ -86,6 +111,9 @@ async def _stream_log(context, fp, start, end): return result +TEST_LINE_TEMPLATE = "{}-test-line" + + def _write_lines_and_get_offset_at_index( f, num_lines, start_offset=0, trailing_new_line=True ): @@ -110,9 +138,9 @@ def _write_lines_and_get_offset_at_index( offsets.append(f.tell()) if i == num_lines - 1 and not trailing_new_line: # Last line no newline - line = f"{i}-test-line" + line = TEST_LINE_TEMPLATE.format(i) else: - line = f"{i}-test-line\n" + line = TEST_LINE_TEMPLATE.format(i) + "\n" f.write(line.encode("utf-8")) f.flush() @@ -182,6 +210,40 @@ def test_find_end_offset_next_n_lines_from_offset(temp_file): assert find_end_offset_next_n_lines_from_offset(file, o[1] - 1, 1) == o[1] +def test_find_offset_of_content_in_file(temp_file): + file = temp_file + o, end_file = _write_lines_and_get_offset_at_index(file, num_lines=10) + + assert ( + find_offset_of_content_in_file( + file, TEST_LINE_TEMPLATE.format(0).encode("utf-8") + ) + == o[0] + ) + + assert ( + find_offset_of_content_in_file( + file, TEST_LINE_TEMPLATE.format(3).encode("utf-8"), o[1] + 1 + ) + == o[3] + ) + + assert ( + find_offset_of_content_in_file( + file, TEST_LINE_TEMPLATE.format(4).encode("utf-8"), o[1] - 1 + ) + == o[4] + ) + + # Not found + assert ( + find_offset_of_content_in_file( + file, TEST_LINE_TEMPLATE.format(1000).encode("utf-8"), o[1] - 1 + ) + == -1 + ) + + @pytest.mark.asyncio @pytest.mark.parametrize("random_ascii_file", [1 << 20], indirect=True) @pytest.mark.parametrize( @@ -288,6 +350,48 @@ async def test_log_tails_with_appends(lines_to_tail, total_lines, temp_file): ), "Non-matching number of lines tailed after append" +@pytest.mark.asyncio +async def test_log_agent_find_task_log_offsets(temp_file): + log_file_content = "" + task_id = "taskid1234" + attempt_number = 0 + # Previous data + for i in range(3): + log_file_content += TEST_LINE_TEMPLATE.format(i) + "\n" + # Task's logs + log_file_content += f"{LOG_PREFIX_TASK_ATTEMPT_START}{task_id}-{attempt_number}\n" + expected_start = len(log_file_content) + for i in range(10): + log_file_content += TEST_LINE_TEMPLATE.format(i) + "\n" + expected_end = len(log_file_content) + log_file_content += f"{LOG_PREFIX_TASK_ATTEMPT_END}{task_id}-{attempt_number}\n" + + # Next data + for i in range(3): + log_file_content += TEST_LINE_TEMPLATE.format(i) + "\n" + + # Write to files + temp_file.write(log_file_content.encode("utf-8")) + + # Test all task logs + start_offset, end_offset = await LogAgentV1Grpc._find_task_log_offsets( + task_id, attempt_number, -1, temp_file + ) + assert start_offset == expected_start + assert end_offset == expected_end + + # Test tailing last X lines + num_tail = 3 + start_offset, end_offset = await LogAgentV1Grpc._find_task_log_offsets( + task_id, attempt_number, num_tail, temp_file + ) + assert end_offset == expected_end + exclude_tail_content = "" + for i in range(10 - num_tail): + exclude_tail_content += TEST_LINE_TEMPLATE.format(i) + "\n" + assert start_offset == expected_start + len(exclude_tail_content) + + # Unit Tests (LogsManager) @@ -442,17 +546,33 @@ def get_actor_fn(id): """ Test task id is given. """ - with pytest.raises(NotImplementedError): - task_id = TaskID(b"2" * 24) - log_file_name, n = await logs_manager.resolve_filename( - node_id=node_id.hex(), - log_filename=None, - actor_id=None, - task_id=task_id, - pid=None, - get_actor_fn=lambda _: generate_actor_data(actor_id, node_id, worker_id), - timeout=10, - ) + task_id = TaskID(b"2" * 24) + logs_client = logs_manager.data_source_client + logs_client.get_all_task_info = AsyncMock() + logs_client.get_all_task_info.return_value = GetTaskEventsReply( + events_by_task=[ + generate_task_event(task_id, node_id, attempt_number=1, worker_id=worker_id) + ] + ) + logs_manager.list_logs.return_value = { + "worker_out": [f"worker-{worker_id.hex()}-123-123.out"], + "worker_err": [], + } + + # Expect resolved file. + filename, n = await logs_manager.resolve_filename(task_id=task_id, attempt_number=1) + # Default out file. See generate_task_event() for filename + assert filename == f"worker-{worker_id.hex()}-123-123.out" + assert n == node_id.hex() + + # Wrong task attempt + with pytest.raises(FileNotFoundError): + await logs_manager.resolve_filename(task_id=task_id, attempt_number=0) + + # No task found + logs_client.get_all_task_info.return_value = GetTaskEventsReply(events_by_task=[]) + with pytest.raises(FileNotFoundError): + await logs_manager.resolve_filename(task_id=TaskID(b"1" * 24), attempt_number=1) """ Test pid is given. @@ -587,6 +707,8 @@ async def test_logs_manager_stream_log(logs_manager): lines=10, interval=None, timeout=30, + task_id=None, + attempt_number=0, ) # Test pid, media_type = "stream", node_ip @@ -614,6 +736,8 @@ async def test_logs_manager_stream_log(logs_manager): lines=10, interval=0.5, timeout=None, + task_id=None, + attempt_number=0, ) # Currently cannot test actor_id with AsyncMock. @@ -656,6 +780,8 @@ async def test_logs_manager_keepalive_no_timeout(logs_manager): lines=10, interval=None, timeout=None, + task_id=None, + attempt_number=0, ) @@ -788,24 +914,22 @@ def getpid(self): # Test stream and fetching by actor id stream_response = requests.get( webui_url - + "/api/v0/logs/stream?&lines=2" + + "/api/v0/logs/stream?&lines=-1" + f"&actor_id={actor._ray_actor_id.hex()}", stream=True, ) if stream_response.status_code != 200: raise ValueError(stream_response.content.decode("utf-8")) stream_iterator = stream_response.iter_content(chunk_size=None) - # NOTE: Prefix 1 indicates the stream has succeeded. - assert ( - next(stream_iterator).decode("utf-8") - == "1:actor_name:Actor\n" + test_log_text.format("XXXXXX") + "\n" - ) + actual_output = next(stream_iterator).decode("utf-8") + assert "actor_name:Actor\n" in actual_output + assert test_log_text.format("XXXXXX") in actual_output streamed_string = "" for i in range(5): strings = [] - for j in range(100): - strings.append(test_log_text.format(f"{100*i + j:06d}")) + for j in range(3): + strings.append(test_log_text.format(f"{3*i + j:06d}")) ray.get(actor.write_log.remote(strings)) @@ -814,7 +938,7 @@ def getpid(self): string += s + "\n" streamed_string += string # NOTE: Prefix 1 indicates the stream has succeeded. - assert next(stream_iterator).decode("utf-8") == "1" + string + assert string in next(stream_iterator).decode("utf-8") del stream_response # Test tailing log by actor id @@ -826,7 +950,8 @@ def getpid(self): + actor._ray_actor_id.hex(), ).content.decode("utf-8") # NOTE: Prefix 1 indicates the stream has succeeded. - assert file_response == "1" + "\n".join(streamed_string.split("\n")[-(LINES + 1) :]) + for line in streamed_string.split("\n")[-(LINES + 1) :]: + assert line in file_response # Test query by pid & node_ip instead of actor id. node_ip = list(ray.nodes())[0]["NodeManagerAddress"] @@ -837,7 +962,8 @@ def getpid(self): + f"&pid={pid}", ).content.decode("utf-8") # NOTE: Prefix 1 indicates the stream has succeeded. - assert file_response == "1" + "\n".join(streamed_string.split("\n")[-(LINES + 1) :]) + for line in streamed_string.split("\n")[-(LINES + 1) :]: + assert line in file_response def test_log_list(ray_start_cluster): @@ -936,10 +1062,6 @@ def verify(): wait_for_condition(verify) - with pytest.raises(NotImplementedError): - for _ in get_log(task_id=123, tail=10): - pass - del a """ Test log suffix selection for worker/actor @@ -1053,6 +1175,99 @@ def verify(): wait_for_condition(verify) + # Test running task logs + @ray.remote + def sleep_task(out_msg): + print(out_msg, end="", file=sys.stdout) + import time + + time.sleep(10) + + expected_out = "This is a test log from stdout\n" + task = sleep_task.remote(expected_out) + + def verify(): + lines = get_log(task_id=task.task_id().hex()) + assert expected_out == "".join(lines) + + return True + + wait_for_condition(verify) + + # Test get log by multiple task id + @ray.remote + def task_log(): + out_msg = "This is a test log from stdout\n" + print(out_msg, end="", file=sys.stdout) + err_msg = "THIS IS A TEST LOG FROM STDERR\n" + print(err_msg, end="", file=sys.stderr) + + return out_msg, err_msg + + # Run some other tasks before and after to make sure task + # log only outputs the task's log. + ray.get(task_log.remote()) + task = task_log.remote() + expected_out, expected_err = ray.get(task) + ray.get(task_log.remote()) + + def verify(): + lines = get_log(task_id=task.task_id().hex()) + assert expected_out == "".join(lines) + + # Test suffix + lines = get_log(task_id=task.task_id().hex(), suffix="err") + assert expected_err == "".join(lines) + + return True + + wait_for_condition(verify) + + # Test actor task logs with interleaving logs. + @ray.remote + class Actor: + async def print_log(self, x, out_msg): + for _ in range(3): + print(out_msg, end="", file=sys.stdout) + await asyncio.sleep(1) + + actor = Actor.options(max_concurrency=2).remote() + out_msg = "[{name}]: This is a test log from stdout\n" + task_a = actor.print_log.remote("a", out_msg.format(name="a")) + task_b = actor.print_log.remote("b", out_msg.format(name="b")) + ray.get([task_a, task_b]) + + def verify(): + lines = get_log(task_id=task_a.task_id().hex()) + actual_output = "".join(lines) + assert actual_output.count(out_msg.format(name="a")) == 3 + + lines = get_log(task_id=task_b.task_id().hex()) + actual_output = "".join(lines) + assert actual_output.count(out_msg.format(name="b")) == 3 + + return True + + wait_for_condition(verify) + + # Test task logs tail with lines. + expected_out = [f"task-{i}\n" for i in range(5)] + + @ray.remote + def f(): + print("".join(expected_out), end="", file=sys.stdout) + + t = f.remote() + ray.get(t) + + def verify(): + lines = get_log(task_id=t.task_id().hex(), tail=2) + actual_output = "".join(lines) + assert actual_output == "".join(expected_out[-2:]) + return True + + wait_for_condition(verify) + def test_log_cli(shutdown_only): ray.init(num_cpus=1) diff --git a/src/ray/core_worker/core_worker.h b/src/ray/core_worker/core_worker.h index b87621238f4a..55f89fae9ed6 100644 --- a/src/ray/core_worker/core_worker.h +++ b/src/ray/core_worker/core_worker.h @@ -348,6 +348,12 @@ class CoreWorker : public rpc::CoreWorkerServiceHandler { const TaskID &GetCurrentTaskId() const { return worker_context_.GetCurrentTaskID(); } + int64_t GetCurrentTaskAttemptNumber() const { + return worker_context_.GetCurrentTask() != nullptr + ? worker_context_.GetCurrentTask()->AttemptNumber() + : 0; + } + JobID GetCurrentJobId() const { return worker_context_.GetCurrentJobID(); } const int64_t GetTaskDepth() const { return worker_context_.GetTaskDepth(); } diff --git a/src/ray/protobuf/reporter.proto b/src/ray/protobuf/reporter.proto index cc79e8f10875..b2425ae1eec5 100644 --- a/src/ray/protobuf/reporter.proto +++ b/src/ray/protobuf/reporter.proto @@ -100,6 +100,10 @@ message StreamLogRequest { // if keep_alive is true, this indicates how frequently to poll the // log file for new lines optional float interval = 4; + // Task id to start streaming from from this file. + optional string task_id = 5; + // Attempt number of the task. + optional int64 attempt_number = 6; } message StreamLogReply { From 0c73afe634a071fe95353a33c4ed9e4b4e5b17d7 Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Fri, 12 May 2023 06:35:10 +0800 Subject: [PATCH 353/424] [core][dashboard] Task backend GC policy - worker update [1/3] (#34896) This is the series of PRs that improve GC policy for task backend. The overall goal of the stack is to make data loss at a task attempt granularity: if a task attempt incurred some data loss (due to number of task events enforced at the worker/ GCS), all the status change w.r.t that task attempt will be dropped, so there will be no partial task attempt. Right now, individual events (e.g. task started running) could be lost for a task attempt, which isn't great for observability. This PR adds task attempt level data loss tracking on the worker side, by tracking: per job profile events dropped for timeline. task attempts dropped. Worker will send the data loss info ^ the GCS In the subsequent PRs: GCS side will be updated Dashboard front-end will be updated to reflect the per job profile events loss. --- src/ray/common/ray_config_def.h | 4 + src/ray/core_worker/core_worker.cc | 4 +- src/ray/core_worker/task_event_buffer.cc | 122 ++++++++++------- src/ray/core_worker/task_event_buffer.h | 70 ++++++---- .../test/task_event_buffer_test.cc | 124 ++++++++++-------- src/ray/gcs/gcs_server/gcs_task_manager.cc | 20 ++- src/ray/gcs/gcs_server/gcs_task_manager.h | 7 + src/ray/gcs/test/gcs_test_util.h | 11 +- src/ray/protobuf/gcs.proto | 19 ++- 9 files changed, 238 insertions(+), 143 deletions(-) diff --git a/src/ray/common/ray_config_def.h b/src/ray/common/ray_config_def.h index 7920387fe452..affb9ec7c9a5 100644 --- a/src/ray/common/ray_config_def.h +++ b/src/ray/common/ray_config_def.h @@ -476,6 +476,10 @@ RAY_CONFIG(uint64_t, task_events_max_buffer_size, 100 * 1000) /// the message size, and also the processing work on GCS. RAY_CONFIG(uint64_t, task_events_send_batch_size, 10 * 1000) +/// Max number of dropped task attempt info to be sent in a single rpc call to +/// GCS for task events in rpc::TaskEventsData +RAY_CONFIG(uint64_t, task_events_drop_task_attempt_batch_size, 10 * 1000) + /// Max number of profile events allowed for a single task when sent to GCS. /// NOTE: this limit only applies to the profile events per task in a single /// report gRPC call. A task could have more profile events in GCS from multiple diff --git a/src/ray/core_worker/core_worker.cc b/src/ray/core_worker/core_worker.cc index 5f1b1c3ea9d9..5313ee0efc63 100644 --- a/src/ray/core_worker/core_worker.cc +++ b/src/ray/core_worker/core_worker.cc @@ -225,8 +225,8 @@ CoreWorker::CoreWorker(const CoreWorkerOptions &options, const WorkerID &worker_ // Initialize the task state event buffer. auto task_event_gcs_client = std::make_unique(options_.gcs_options); - task_event_buffer_ = - std::make_unique(std::move(task_event_gcs_client)); + task_event_buffer_ = std::make_unique( + std::move(task_event_gcs_client), worker_context_.GetCurrentJobID()); if (RayConfig::instance().task_events_report_interval_ms() > 0) { if (!task_event_buffer_->Start().ok()) { RAY_CHECK(!task_event_buffer_->Enabled()) << "TaskEventBuffer should be disabled."; diff --git a/src/ray/core_worker/task_event_buffer.cc b/src/ray/core_worker/task_event_buffer.cc index 9bb80e5a04ea..61c643cfd14e 100644 --- a/src/ray/core_worker/task_event_buffer.cc +++ b/src/ray/core_worker/task_event_buffer.cc @@ -134,8 +134,10 @@ bool TaskProfileEvent::ToRpcTaskEventsOrDrop(rpc::TaskEvents *rpc_task_events) { return false; } -TaskEventBufferImpl::TaskEventBufferImpl(std::unique_ptr gcs_client) - : work_guard_(boost::asio::make_work_guard(io_service_)), +TaskEventBufferImpl::TaskEventBufferImpl(std::unique_ptr gcs_client, + const JobID &job_id) + : job_id_(job_id), + work_guard_(boost::asio::make_work_guard(io_service_)), periodical_runner_(io_service_), gcs_client_(std::move(gcs_client)), buffer_() {} @@ -212,32 +214,37 @@ void TaskEventBufferImpl::AddTaskEvent(std::unique_ptr task_event) { if (!enabled_) { return; } - size_t num_profile_events_dropped = 0; - size_t num_status_events_dropped = 0; size_t num_add = 0; absl::MutexLock lock(&mutex_); size_t prev_size = buffer_.size(); + size_t num_profile_events_dropped = 0; { + if (task_attempts_dropped_.count(task_event->GetTaskAttempt())) { + // We are already dropping events for this task attempt. + // So don't add it to the buffer. + if (task_event->IsProfileEvent()) { + num_profile_events_dropped++; + } + return; + } + if (buffer_.full()) { const auto &to_evict = buffer_.front(); if (to_evict->IsProfileEvent()) { num_profile_events_dropped++; } else { - num_status_events_dropped++; + // Mark task attempt to be dropped. + task_attempts_dropped_.insert(to_evict->GetTaskAttempt()); } } buffer_.push_back(std::move(task_event)); num_add = buffer_.size() - prev_size; } - + stats_counter_.Increment(TaskEventBufferCounter::kNumTaskEventsStored, num_add); stats_counter_.Increment( TaskEventBufferCounter::kNumTaskProfileEventDroppedSinceLastFlush, num_profile_events_dropped); - stats_counter_.Increment( - TaskEventBufferCounter::kNumTaskStatusEventDroppedSinceLastFlush, - num_status_events_dropped); - stats_counter_.Increment(TaskEventBufferCounter::kNumTaskEventsStored, num_add); } void TaskEventBufferImpl::FlushEvents(bool forced) { @@ -246,7 +253,7 @@ void TaskEventBufferImpl::FlushEvents(bool forced) { } std::vector> to_send; to_send.reserve(RayConfig::instance().task_events_send_batch_size()); - + absl::flat_hash_set task_attempts_dropped; { absl::MutexLock lock(&mutex_); @@ -260,6 +267,18 @@ void TaskEventBufferImpl::FlushEvents(bool forced) { return; } + // Get the data loss info. + size_t task_attempt_count = 0; + // iterate and erase task attempt dropped. + while (task_attempt_count < + RayConfig::instance().task_events_drop_task_attempt_batch_size() && + !task_attempts_dropped_.empty()) { + auto itr = task_attempts_dropped_.begin(); + task_attempts_dropped.insert(*itr); + task_attempts_dropped_.erase(itr); + task_attempt_count++; + } + // No data to send. if (buffer_.empty()) { return; @@ -274,9 +293,15 @@ void TaskEventBufferImpl::FlushEvents(bool forced) { buffer_.erase(buffer_.begin(), buffer_.begin() + num_to_send); } - // Aggregate + // Aggregate data to be sent. absl::flat_hash_map agg_task_events; - auto to_rpc_event_fn = [this, &agg_task_events](std::unique_ptr &event) { + auto to_rpc_event_fn = [this, &agg_task_events, &task_attempts_dropped]( + std::unique_ptr &event) { + if (task_attempts_dropped.count(event->GetTaskAttempt())) { + // We are dropping all events from the task attempt due to data loss. + return; + } + if (!agg_task_events.count(event->GetTaskAttempt())) { auto inserted = agg_task_events.insert({event->GetTaskAttempt(), rpc::TaskEvents()}); @@ -284,11 +309,16 @@ void TaskEventBufferImpl::FlushEvents(bool forced) { } auto itr = agg_task_events.find(event->GetTaskAttempt()); - - if (event->ToRpcTaskEventsOrDrop(&(itr->second))) { - RAY_CHECK(event->IsProfileEvent()); - stats_counter_.Increment( - TaskEventBufferCounter::kNumTaskProfileEventDroppedSinceLastFlush); + if (event->IsProfileEvent()) { + if (event->ToRpcTaskEventsOrDrop(&(itr->second))) { + // We are dropping profile events since there are too many for a single task + // attempt. This happens frequently for driver task submitting many tasks. + stats_counter_.Increment( + TaskEventBufferCounter::kNumTaskProfileEventDroppedSinceLastFlush); + } + } else { + // We will not be dropping any status changes during conversion to rpc::TaskEvents. + RAY_CHECK(!event->ToRpcTaskEventsOrDrop(&(itr->second))); } }; std::for_each(to_send.begin(), to_send.end(), to_rpc_event_fn); @@ -296,39 +326,30 @@ void TaskEventBufferImpl::FlushEvents(bool forced) { // Convert to rpc::TaskEventsData auto data = std::make_unique(); size_t num_task_events = to_send.size(); - size_t num_profile_event_to_send = 0; - size_t num_status_event_to_send = 0; for (auto &[_task_attempt, task_event] : agg_task_events) { auto events_by_task = data->add_events_by_task(); - if (task_event.has_profile_events()) { - num_profile_event_to_send++; - } - if (task_event.has_state_updates()) { - num_status_event_to_send++; - } *events_by_task = std::move(task_event); } - // Send and reset the counters - stats_counter_.Decrement(TaskEventBufferCounter::kNumTaskEventsStored, to_send.size()); - size_t num_profile_task_events_dropped = stats_counter_.Get( + // Add the data loss info. + auto num_profile_events_dropped_since_last_flush = stats_counter_.Get( TaskEventBufferCounter::kNumTaskProfileEventDroppedSinceLastFlush); + data->set_num_profile_events_dropped(num_profile_events_dropped_since_last_flush); + // Reset the counter stats_counter_.Decrement( TaskEventBufferCounter::kNumTaskProfileEventDroppedSinceLastFlush, - num_profile_task_events_dropped); - stats_counter_.Increment(TaskEventBufferCounter::kTotalNumTaskProfileEventDropped, - num_profile_task_events_dropped); - - size_t num_status_task_events_dropped = stats_counter_.Get( - TaskEventBufferCounter::kNumTaskStatusEventDroppedSinceLastFlush); - stats_counter_.Decrement( - TaskEventBufferCounter::kNumTaskStatusEventDroppedSinceLastFlush, - num_status_task_events_dropped); - stats_counter_.Increment(TaskEventBufferCounter::kTotalNumTaskStatusEventDropped, - num_status_task_events_dropped); + num_profile_events_dropped_since_last_flush); + data->set_job_id(job_id_.Binary()); + + for (auto &task_attempt : task_attempts_dropped) { + rpc::TaskAttempt rpc_task_attempt; + rpc_task_attempt.set_task_id(task_attempt.first.Binary()); + rpc_task_attempt.set_attempt_number(task_attempt.second); + *(data->add_dropped_task_attempts()) = rpc_task_attempt; + } - data->set_num_profile_task_events_dropped(num_profile_task_events_dropped); - data->set_num_status_task_events_dropped(num_status_task_events_dropped); + // Send and reset the counters + stats_counter_.Decrement(TaskEventBufferCounter::kNumTaskEventsStored, to_send.size()); gcs::TaskInfoAccessor *task_accessor; { @@ -344,6 +365,11 @@ void TaskEventBufferImpl::FlushEvents(bool forced) { RAY_LOG(WARNING) << "Failed to push " << num_task_events << " task state events to GCS. Data will be lost. [status=" << status.ToString() << "]"; + stats_counter_.Increment(TaskEventBufferCounter::kTotalNumTaskEventsDropped, + num_task_events); + } else { + stats_counter_.Increment(TaskEventBufferCounter::kTotalNumTaskEventsReported, + num_task_events); } grpc_in_progress_ = false; }; @@ -360,10 +386,8 @@ void TaskEventBufferImpl::FlushEvents(bool forced) { grpc_in_progress_ = false; // Fail to send, currently dropping events. - stats_counter_.Increment(TaskEventBufferCounter::kTotalNumTaskProfileEventDropped, - num_profile_event_to_send); - stats_counter_.Increment(TaskEventBufferCounter::kTotalNumTaskStatusEventDropped, - num_status_event_to_send); + stats_counter_.Increment(TaskEventBufferCounter::kTotalNumTaskEventsDropped, + num_task_events); } } } @@ -387,11 +411,7 @@ const std::string TaskEventBufferImpl::DebugString() { << 1.0 * stats[TaskEventBufferCounter::kTotalTaskEventsBytesReported] / 1024 / 1024 << " MiB" << "\n\ttotal number of task events sent: " - << stats[TaskEventBufferCounter::kTotalTaskEventsReported] - << "\n\tnum status task events dropped: " - << stats[TaskEventBufferCounter::kTotalNumTaskProfileEventDropped] - << "\n\tnum profile task events dropped: " - << stats[TaskEventBufferCounter::kTotalNumTaskStatusEventDropped] << "\n"; + << stats[TaskEventBufferCounter::kTotalNumTaskEventsReported]; return ss.str(); } diff --git a/src/ray/core_worker/task_event_buffer.h b/src/ray/core_worker/task_event_buffer.h index 1279471f86ca..7ce29cc92c9c 100644 --- a/src/ray/core_worker/task_event_buffer.h +++ b/src/ray/core_worker/task_event_buffer.h @@ -50,14 +50,16 @@ class TaskEvent { virtual ~TaskEvent() = default; - /// Convert itself a rpc::TaskEvents or drop itself due to data limit. + /// Convert itself a rpc::TaskEvents or drop it if there is data loss. /// /// NOTE: this method will modify internal states by moving fields to the /// rpc::TaskEvents. /// \param[out] rpc_task_events The rpc task event to be filled. - /// \return If it's dropped due to data limit. + /// \return True if data is dropped, false otherwise. virtual bool ToRpcTaskEventsOrDrop(rpc::TaskEvents *rpc_task_events) = 0; + virtual JobID GetJobId() const { return job_id_; } + /// If it is a profile event. virtual bool IsProfileEvent() const = 0; @@ -158,13 +160,17 @@ class TaskProfileEvent : public TaskEvent { /// @brief An enum class defining counters to be used in TaskEventBufferImpl. enum TaskEventBufferCounter { - kNumTaskProfileEventDroppedSinceLastFlush, - kNumTaskStatusEventDroppedSinceLastFlush, + /// Number of task events stored in the buffer. kNumTaskEventsStored, - /// Below stats are updated every flush. - kTotalNumTaskProfileEventDropped, - kTotalNumTaskStatusEventDropped, - kTotalTaskEventsReported, + /// Number of dropped task attempt stored in the buffer. + kNumTaskAttemptsDroppedStored, + /// Total number of task events dropped on the worker due to network issue. + kTotalNumTaskEventsDropped, + /// Number of profile events dropped since the last report. + kNumTaskProfileEventDroppedSinceLastFlush, + /// Total number of task events reported to GCS. + kTotalNumTaskEventsReported, + /// Total bytes of task events reported to GCS. kTotalTaskEventsBytesReported, }; @@ -173,13 +179,24 @@ enum TaskEventBufferCounter { /// /// Dropping of task events /// ======================== -/// Task events will be lost in the below cases for now: +/// Task events from task attempts will be lost in the below cases for now: /// 1. If any of the gRPC call failed, the task events will be dropped and warnings /// logged. This is probably fine since this usually indicated a much worse issue. /// /// 2. More than `RAY_task_events_max_buffer_size` tasks have been stored -/// in the buffer, any new task events will be dropped. In this case, the number of -/// dropped task events will also be included in the next flush to surface this. +/// in the buffer, oldest events in the buffer will be dropped. In this case, the task +/// attempts info will also be included in subsequent flush to GCS. +/// +/// For profiling events: +/// - If the number of profiling events for a task attempt exceeds the limit specified +/// by `RAY_task_events_max_num_profile_events_for_task`, any new profiling events will +/// be dropped. Dropping of profile events will not result in the entire task attempt +/// being dropped. +/// +/// For task status events: +/// - If any task status change event is dropped, the entire task attempt will be +/// dropped. The dropped task attempt info will be sent to GCS, and GCS will then drop +/// all new and existing events from the task attempt. /// /// No overloading of GCS /// ===================== @@ -244,7 +261,8 @@ class TaskEventBufferImpl : public TaskEventBuffer { /// Constructor /// /// \param gcs_client GCS client - TaskEventBufferImpl(std::unique_ptr gcs_client); + /// \param job_id Corresponding Job ID + TaskEventBufferImpl(std::unique_ptr gcs_client, const JobID &job_id); void AddTaskEvent(std::unique_ptr task_event) LOCKS_EXCLUDED(mutex_) override; @@ -266,22 +284,16 @@ class TaskEventBufferImpl : public TaskEventBuffer { } /// Test only functions. - size_t GetTotalNumStatusTaskEventsDropped() { - return stats_counter_.Get(TaskEventBufferCounter::kTotalNumTaskStatusEventDropped); - } - - /// Test only functions. - size_t GetNumStatusTaskEventsDroppedSinceLastFlush() { - return stats_counter_.Get( - TaskEventBufferCounter::kNumTaskStatusEventDroppedSinceLastFlush); + size_t GetNumTaskEventsDropped() { + return stats_counter_.Get(TaskEventBufferCounter::kTotalNumTaskEventsDropped); } - /// Test only functions. - size_t GetTotalNumProfileTaskEventsDropped() { - return stats_counter_.Get(TaskEventBufferCounter::kTotalNumTaskProfileEventDropped); + /// Test only function. + size_t GetNumTaskEventsReported() { + return stats_counter_.Get(TaskEventBufferCounter::kTotalNumTaskEventsReported); } - /// Test only functions. + /// Test only function. size_t GetNumProfileTaskEventsDroppedSinceLastFlush() { return stats_counter_.Get( TaskEventBufferCounter::kNumTaskProfileEventDroppedSinceLastFlush); @@ -293,9 +305,15 @@ class TaskEventBufferImpl : public TaskEventBuffer { return gcs_client_.get(); } + /// Test only functions. + const JobID &GetJobId() const { return job_id_; } + /// Mutex guarding task_events_data_. absl::Mutex mutex_; + /// Job id. + const JobID job_id_; + /// IO service event loop owned by TaskEventBuffer. instrumented_io_context io_service_; @@ -325,6 +343,10 @@ class TaskEventBufferImpl : public TaskEventBuffer { /// process them quick enough. std::atomic grpc_in_progress_ = false; + /// Task attempts dropped on this worker that are to be reported to GCS. Reported + /// data loss will be removed. + absl::flat_hash_set task_attempts_dropped_ GUARDED_BY(mutex_); + FRIEND_TEST(TaskEventBufferTestManualStart, TestGcsClientFail); FRIEND_TEST(TaskEventBufferTestBatchSend, TestBatchedSend); FRIEND_TEST(TaskEventBufferTest, TestAddEvent); diff --git a/src/ray/core_worker/test/task_event_buffer_test.cc b/src/ray/core_worker/test/task_event_buffer_test.cc index 7621294d6efc..40a46f3cd1f3 100644 --- a/src/ray/core_worker/test/task_event_buffer_test.cc +++ b/src/ray/core_worker/test/task_event_buffer_test.cc @@ -44,7 +44,7 @@ class TaskEventBufferTest : public ::testing::Test { )"); task_event_buffer_ = std::make_unique( - std::make_unique()); + std::make_unique(), JobID::FromInt(1)); } virtual void SetUp() { RAY_CHECK_OK(task_event_buffer_->Start(/*auto_flush*/ false)); } @@ -66,13 +66,15 @@ class TaskEventBufferTest : public ::testing::Test { task_id, JobID::FromInt(0), attempt_num, rpc::TaskStatus::RUNNING, running_ts); } - std::unique_ptr GenProfileTaskEvent(TaskID task_id, int32_t attempt_num) { + std::unique_ptr GenProfileTaskEvent(TaskID task_id, + int32_t attempt_num, + JobID job_id = JobID::FromInt(0)) { return std::make_unique( - task_id, JobID::FromInt(0), attempt_num, "", "", "", "test_event", 1); + task_id, job_id, attempt_num, "", "", "", "test_event", 1); } - static void CompareTaskEventData(const rpc::TaskEventData &actual_data, - const rpc::TaskEventData &expect_data) { + static void CompareTaskEventData(rpc::TaskEventData &actual_data, + rpc::TaskEventData &expect_data) { // Sort and compare std::vector actual_events; std::vector expect_events; @@ -89,10 +91,22 @@ class TaskEventBufferTest : public ::testing::Test { EXPECT_EQ(actual_events[i], expect_events[i]); } - EXPECT_EQ(actual_data.num_profile_task_events_dropped(), - expect_data.num_profile_task_events_dropped()); - EXPECT_EQ(actual_data.num_status_task_events_dropped(), - expect_data.num_status_task_events_dropped()); + // sort and compare data loss + std::vector actual_attempts; + std::vector expect_attempts; + for (const auto &t : actual_data.dropped_task_attempts()) { + actual_attempts.push_back(t.DebugString()); + } + for (const auto &t : expect_data.dropped_task_attempts()) { + expect_attempts.push_back(t.DebugString()); + } + std::sort(actual_attempts.begin(), actual_attempts.end()); + std::sort(expect_attempts.begin(), expect_attempts.end()); + + EXPECT_EQ(actual_attempts.size(), expect_attempts.size()); + for (size_t i = 0; i < actual_attempts.size(); ++i) { + EXPECT_EQ(actual_attempts[i], expect_attempts[i]); + } } std::unique_ptr task_event_buffer_ = nullptr; @@ -174,8 +188,6 @@ TEST_F(TaskEventBufferTest, TestFlushEvents) { // Expect data flushed match rpc::TaskEventData expected_data; - expected_data.set_num_profile_task_events_dropped(0); - expected_data.set_num_status_task_events_dropped(0); for (const auto &task_event : task_events) { auto event = expected_data.add_events_by_task(); task_event->ToRpcTaskEventsOrDrop(event); @@ -232,9 +244,9 @@ TEST_F(TaskEventBufferTest, TestFailedFlush) { task_event_buffer_->FlushEvents(false); // Expect the number of dropped events incremented. - ASSERT_EQ(task_event_buffer_->GetTotalNumStatusTaskEventsDropped(), num_status_events); - ASSERT_EQ(task_event_buffer_->GetTotalNumProfileTaskEventsDropped(), - num_profile_events); + ASSERT_EQ(task_event_buffer_->GetNumTaskEventsDropped(), + num_status_events + num_profile_events); + ASSERT_EQ(task_event_buffer_->GetNumTaskEventsReported(), 0); // Adding some more events for (size_t i = 0; i < num_status_events + num_profile_events; ++i) { @@ -246,11 +258,12 @@ TEST_F(TaskEventBufferTest, TestFailedFlush) { } } - // Flush successfully will reset the num events dropped. + ASSERT_EQ(task_event_buffer_->GetNumTaskEventsStored(), + num_status_events + num_profile_events); task_event_buffer_->FlushEvents(false); - ASSERT_EQ(task_event_buffer_->GetTotalNumStatusTaskEventsDropped(), num_status_events); - ASSERT_EQ(task_event_buffer_->GetTotalNumProfileTaskEventsDropped(), - num_profile_events); + ASSERT_EQ(task_event_buffer_->GetNumTaskEventsDropped(), + num_status_events + num_profile_events); + ASSERT_EQ(task_event_buffer_->GetNumTaskEventsStored(), 0); } TEST_F(TaskEventBufferTest, TestBackPressure) { @@ -339,33 +352,18 @@ TEST_F(TaskEventBufferTestBatchSend, TestBatchedSend) { TEST_F(TaskEventBufferTest, TestBufferSizeLimit) { size_t num_limit = 100; // Synced with test setup - size_t num_profile = 50; - size_t num_status = 50; // Generate 2 batches of events each, where batch 1 will be evicted by batch 2. - std::vector> profile_events_1; std::vector> status_events_1; - std::vector> profile_events_2; std::vector> status_events_2; // Generate data - for (size_t i = 0; i < 50; ++i) { + for (size_t i = 0; i < num_limit; ++i) { status_events_1.push_back(GenStatusTaskEvent(RandomTaskId(), 0)); status_events_2.push_back(GenStatusTaskEvent(RandomTaskId(), 0)); - profile_events_1.push_back(GenProfileTaskEvent(RandomTaskId(), 0)); - profile_events_2.push_back(GenProfileTaskEvent(RandomTaskId(), 0)); } rpc::TaskEventData expected_data; - expected_data.set_num_profile_task_events_dropped(num_profile); - expected_data.set_num_status_task_events_dropped(num_status); - for (const auto &event_ptr : profile_events_2) { - auto expect_event = expected_data.add_events_by_task(); - // Copy the data - auto event = std::make_unique( - *static_cast(event_ptr.get())); - event->ToRpcTaskEventsOrDrop(expect_event); - } for (const auto &event_ptr : status_events_2) { auto expect_event = expected_data.add_events_by_task(); // Copy the data @@ -374,19 +372,26 @@ TEST_F(TaskEventBufferTest, TestBufferSizeLimit) { event->ToRpcTaskEventsOrDrop(expect_event); } - // Add the data - for (auto &event : profile_events_1) { - task_event_buffer_->AddTaskEvent(std::move(event)); - } + // Add the data profile_events_1 and status_events_1 will be evicted. for (auto &event : status_events_1) { - task_event_buffer_->AddTaskEvent(std::move(event)); + rpc::TaskAttempt rpc_attempt; + rpc_attempt.set_task_id(event->GetTaskAttempt().first.Binary()); + rpc_attempt.set_attempt_number(event->GetTaskAttempt().second); + *(expected_data.add_dropped_task_attempts()) = rpc_attempt; + + // Copy the data + auto event_copy = + std::make_unique(*static_cast(event.get())); + task_event_buffer_->AddTaskEvent(std::move(event_copy)); } - for (auto &event : profile_events_2) { + for (auto &event : status_events_2) { task_event_buffer_->AddTaskEvent(std::move(event)); } - for (auto &event : status_events_2) { + // Status events from the same task attempt that were dropped should be dropped + for (auto &event : status_events_1) { task_event_buffer_->AddTaskEvent(std::move(event)); } + // Expect only limit in buffer. ASSERT_EQ(task_event_buffer_->GetNumTaskEventsStored(), num_limit); @@ -398,30 +403,25 @@ TEST_F(TaskEventBufferTest, TestBufferSizeLimit) { EXPECT_CALL(*task_gcs_accessor, AsyncAddTaskEventData(_, _)) .WillOnce([&](std::unique_ptr actual_data, ray::gcs::StatusCallback callback) { - // Sort and compare CompareTaskEventData(*actual_data, expected_data); return Status::OK(); }); task_event_buffer_->FlushEvents(false); - // Expect data flushed. ASSERT_EQ(task_event_buffer_->GetNumTaskEventsStored(), 0); - ASSERT_EQ(task_event_buffer_->GetNumProfileTaskEventsDroppedSinceLastFlush(), 0); - ASSERT_EQ(task_event_buffer_->GetNumStatusTaskEventsDroppedSinceLastFlush(), 0); - ASSERT_EQ(task_event_buffer_->GetTotalNumProfileTaskEventsDropped(), num_profile); - ASSERT_EQ(task_event_buffer_->GetTotalNumStatusTaskEventsDropped(), num_status); } TEST_F(TaskEventBufferTestLimitProfileEvents, TestLimitProfileEventsPerTask) { - size_t num_profile_events_per_task = 10; + size_t num_profile_events_per_task = 10; // sync with class config. size_t num_total_profile_events = 1000; std::vector> profile_events; - auto task_id = RandomTaskId(); + auto task_id1 = RandomTaskId(); + const auto &job_id = task_event_buffer_->GetJobId(); - // Generate data for the same task attempts. + // Generate data for the same task attempts from job 1 for (size_t i = 0; i < num_total_profile_events; ++i) { - profile_events.push_back(GenProfileTaskEvent(task_id, 0)); + profile_events.push_back(GenProfileTaskEvent(task_id1, 0, job_id)); } // Add all @@ -429,11 +429,25 @@ TEST_F(TaskEventBufferTestLimitProfileEvents, TestLimitProfileEventsPerTask) { task_event_buffer_->AddTaskEvent(std::move(event)); } - // Assert dropped count + auto task_gcs_accessor = + static_cast(task_event_buffer_->GetGcsClient()) + ->mock_task_accessor; + + // With batch size = 10, there should be 10 flush calls + EXPECT_CALL(*task_gcs_accessor, AsyncAddTaskEventData) + .WillOnce([&](std::unique_ptr actual_data, + ray::gcs::StatusCallback callback) { + EXPECT_EQ(actual_data->num_profile_events_dropped(), + num_total_profile_events - num_profile_events_per_task); + EXPECT_EQ(actual_data->job_id(), job_id.Binary()); + callback(Status::OK()); + return Status::OK(); + }); + task_event_buffer_->FlushEvents(false); - ASSERT_EQ(task_event_buffer_->GetTotalNumProfileTaskEventsDropped(), - num_total_profile_events - num_profile_events_per_task); - ASSERT_EQ(task_event_buffer_->GetTotalNumStatusTaskEventsDropped(), 0); + + // Counter is reset correctly. + EXPECT_EQ(task_event_buffer_->GetNumProfileTaskEventsDroppedSinceLastFlush(), 0); } } // namespace worker diff --git a/src/ray/gcs/gcs_server/gcs_task_manager.cc b/src/ray/gcs/gcs_server/gcs_task_manager.cc index e733856b8ee5..12628c82a20e 100644 --- a/src/ray/gcs/gcs_server/gcs_task_manager.cc +++ b/src/ray/gcs/gcs_server/gcs_task_manager.cc @@ -387,15 +387,25 @@ void GcsTaskManager::HandleGetTaskEvents(rpc::GetTaskEventsRequest request, return; } +void GcsTaskManager::RecordDataLossFromWorker(const rpc::TaskEventData &data) { + // TODO(rickyx): GCS side GC will be changed in another PR. This is a temporary + // routine for supporting legacy behaviour with worker side changes. + if (data.dropped_task_attempts_size() > 0) { + stats_counter_.Increment(kTotalNumStatusTaskEventsDropped, + data.dropped_task_attempts_size()); + } + + if (data.num_profile_events_dropped() > 0) { + stats_counter_.Increment(kTotalNumProfileTaskEventsDropped, + data.num_profile_events_dropped()); + } +} + void GcsTaskManager::HandleAddTaskEventData(rpc::AddTaskEventDataRequest request, rpc::AddTaskEventDataReply *reply, rpc::SendReplyCallback send_reply_callback) { auto data = std::move(request.data()); - // Update counters. - stats_counter_.Increment(kTotalNumProfileTaskEventsDropped, - data.num_profile_task_events_dropped()); - stats_counter_.Increment(kTotalNumStatusTaskEventsDropped, - data.num_status_task_events_dropped()); + RecordDataLossFromWorker(data); for (auto events_by_task : *data.mutable_events_by_task()) { stats_counter_.Increment(kTotalNumTaskEventsReported); diff --git a/src/ray/gcs/gcs_server/gcs_task_manager.h b/src/ray/gcs/gcs_server/gcs_task_manager.h index c1d1b204a2d1..41c20bcbecc6 100644 --- a/src/ray/gcs/gcs_server/gcs_task_manager.h +++ b/src/ray/gcs/gcs_server/gcs_task_manager.h @@ -277,6 +277,13 @@ class GcsTaskManager : public rpc::TaskInfoHandler { }; private: + /// Record data loss from worker. + /// + /// TODO(rickyx): This will be updated to record task attempt loss properly. + /// + /// \param data The task event data. + void RecordDataLossFromWorker(const rpc::TaskEventData &data); + /// Test only size_t GetTotalNumStatusTaskEventsDropped() { return stats_counter_.Get(kTotalNumStatusTaskEventsDropped); diff --git a/src/ray/gcs/test/gcs_test_util.h b/src/ray/gcs/test/gcs_test_util.h index a0746add894c..fdef576c32e3 100644 --- a/src/ray/gcs/test/gcs_test_util.h +++ b/src/ray/gcs/test/gcs_test_util.h @@ -266,9 +266,16 @@ struct Mocker { auto new_events = data.add_events_by_task(); new_events->CopyFrom(events); } - data.set_num_profile_task_events_dropped(num_profile_task_events_dropped); - data.set_num_status_task_events_dropped(num_status_task_events_dropped); + for (int i = 0; i < num_status_task_events_dropped; ++i) { + rpc::TaskAttempt rpc_task_attempt; + rpc_task_attempt.set_task_id(RandomTaskId().Binary()); + rpc_task_attempt.set_attempt_number(0); + *(data.add_dropped_task_attempts()) = rpc_task_attempt; + } + + data.set_num_profile_events_dropped(num_profile_task_events_dropped); + data.set_job_id(JobID::FromInt(0).Binary()); return data; } }; diff --git a/src/ray/protobuf/gcs.proto b/src/ray/protobuf/gcs.proto index ff49640628e3..f5949f7f3df1 100644 --- a/src/ray/protobuf/gcs.proto +++ b/src/ray/protobuf/gcs.proto @@ -258,15 +258,26 @@ message TaskEvents { bytes job_id = 6; } +message TaskAttempt { + // The task id of the task attempt. + bytes task_id = 1; + // The attempt number of the task attempt. + int32 attempt_number = 2; +} + // Represents a compact list of task state events by different tasks, // where each task has a list of state change events. message TaskEventData { // A batch of task state change events. repeated TaskEvents events_by_task = 1; - // Number of dropped profile task events due to buffer size limit on workers. - int32 num_profile_task_events_dropped = 3; - // Number of dropped status task events due to buffer size limit on workers. - int32 num_status_task_events_dropped = 4; + // A list of task attempts that were dropped on the worker. + // We only drop task attempts if task state update is lost on the worker + // due to too many events being generated. + repeated TaskAttempt dropped_task_attempts = 2; + // Number of profile events dropped on the worker. + int32 num_profile_events_dropped = 3; + // Current job the worker is reporting data for. + bytes job_id = 4; } message ResourceTableData { From 43a06685a62288f7d2509075231b6e85f973ef2d Mon Sep 17 00:00:00 2001 From: Alan Guo Date: Thu, 11 May 2023 16:01:34 -0700 Subject: [PATCH 354/424] Use state-api for job driver logs (#35235) Update Actors page with new IA look and feel Fix actors page IA for job actors Follow-up PR: Use state-api for actor logs Follow-up PR 2: Use state-api for serve replica and controller logs. Actor list page: --- dashboard/client/src/App.tsx | 20 +- .../client/src/pages/actor/ActorDetail.tsx | 289 +++++++++--------- .../client/src/pages/actor/ActorLayout.tsx | 18 ++ .../client/src/pages/actor/ActorList.tsx | 10 +- .../src/pages/actor/hook/useActorDetail.ts | 4 +- dashboard/client/src/pages/actor/index.tsx | 14 +- dashboard/client/src/pages/job/JobDetail.tsx | 6 + .../src/pages/job/JobDetailActorPage.tsx | 21 +- .../job/JobDriverLogs.component.test.tsx | 12 +- .../client/src/pages/job/JobDriverLogs.tsx | 58 ++-- dashboard/client/src/service/log.ts | 12 + dashboard/client/src/type/job.d.ts | 1 + 12 files changed, 241 insertions(+), 224 deletions(-) create mode 100644 dashboard/client/src/pages/actor/ActorLayout.tsx diff --git a/dashboard/client/src/App.tsx b/dashboard/client/src/App.tsx index c183a7669d69..dc7f07c4ee90 100644 --- a/dashboard/client/src/App.tsx +++ b/dashboard/client/src/App.tsx @@ -5,10 +5,14 @@ import duration from "dayjs/plugin/duration"; import React, { Suspense, useEffect, useState } from "react"; import { HashRouter, Navigate, Route, Routes } from "react-router-dom"; import ActorDetailPage from "./pages/actor/ActorDetail"; +import { ActorLayout } from "./pages/actor/ActorLayout"; import Loading from "./pages/exception/Loading"; import JobList, { JobsLayout } from "./pages/job"; import { JobDetailChartsPage } from "./pages/job/JobDetail"; -import { JobDetailActorsPage } from "./pages/job/JobDetailActorPage"; +import { + JobDetailActorLayout, + JobDetailActorsPage, +} from "./pages/job/JobDetailActorPage"; import { JobDetailInfoPage } from "./pages/job/JobDetailInfoPage"; import { JobDetailLayout } from "./pages/job/JobDetailLayout"; import { MainNavLayout } from "./pages/layout/MainNavLayout"; @@ -186,16 +190,20 @@ const App = () => { - + } path="actors" - /> - } path="actors/:id" /> + > + } path="" /> + } path=":actorId" /> + - } path="actors" /> - } path="actors/:id" /> + } path="actors"> + } path="" /> + } path=":actorId" /> + } path="metrics" /> } path="serve"> } path="" /> diff --git a/dashboard/client/src/pages/actor/ActorDetail.tsx b/dashboard/client/src/pages/actor/ActorDetail.tsx index 30a407c14709..7b237dc08037 100644 --- a/dashboard/client/src/pages/actor/ActorDetail.tsx +++ b/dashboard/client/src/pages/actor/ActorDetail.tsx @@ -2,6 +2,7 @@ import { makeStyles } from "@material-ui/core"; import React, { useContext } from "react"; import { Link } from "react-router-dom"; import { GlobalContext } from "../../App"; +import { CollapsibleSection } from "../../common/CollapsibleSection"; import { DurationText } from "../../common/DurationText"; import { formatDateFromTimeMs } from "../../common/formatUtils"; import { generateNodeLink } from "../../common/links"; @@ -20,6 +21,7 @@ import { useActorDetail } from "./hook/useActorDetail"; const useStyle = makeStyles((theme) => ({ root: { padding: theme.spacing(2), + backgroundColor: "white", }, paper: { padding: theme.spacing(2), @@ -43,8 +45,8 @@ const ActorDetailPage = () => { return (
    - - + +
    Request Status: {msg}
    @@ -56,154 +58,153 @@ const ActorDetailPage = () => {
    - - , - }, - { - label: "ID", - content: actorDetail.actorId - ? { - value: actorDetail.actorId, - copyableValue: actorDetail.actorId, - } - : { value: "-" }, - }, - { - label: "Name", - content: actorDetail.name - ? { - value: actorDetail.name, - } - : { value: "-" }, - }, - { - label: "Class Name", - content: actorDetail.actorClass - ? { - value: actorDetail.actorClass, - } - : { value: "-" }, - }, - { - label: "Repr", - content: actorDetail.reprName - ? { - value: actorDetail.reprName, - } - : { value: "-" }, - }, - { - label: "Job ID", - content: actorDetail.jobId - ? { - value: actorDetail.jobId, - copyableValue: actorDetail.jobId, - } - : { value: "-" }, - }, - { - label: "Node ID", - content: actorDetail.address?.rayletId - ? { - value: actorDetail.address?.rayletId, - copyableValue: actorDetail.address?.rayletId, - link: actorDetail.address.rayletId - ? generateNodeLink(actorDetail.address.rayletId) - : undefined, - } - : { value: "-" }, - }, - { - label: "Worker ID", - content: actorDetail.address?.workerId - ? { - value: actorDetail.address?.workerId, - copyableValue: actorDetail.address?.workerId, - } - : { value: "-" }, - }, - { - label: "Started at", - content: { - value: actorDetail.startTime - ? formatDateFromTimeMs(actorDetail.startTime) - : "-", - }, + + , + }, + { + label: "ID", + content: actorDetail.actorId + ? { + value: actorDetail.actorId, + copyableValue: actorDetail.actorId, + } + : { value: "-" }, + }, + { + label: "Name", + content: actorDetail.name + ? { + value: actorDetail.name, + } + : { value: "-" }, + }, + { + label: "Class Name", + content: actorDetail.actorClass + ? { + value: actorDetail.actorClass, + } + : { value: "-" }, + }, + { + label: "Repr", + content: actorDetail.reprName + ? { + value: actorDetail.reprName, + } + : { value: "-" }, + }, + { + label: "Job ID", + content: actorDetail.jobId + ? { + value: actorDetail.jobId, + copyableValue: actorDetail.jobId, + } + : { value: "-" }, + }, + { + label: "Node ID", + content: actorDetail.address?.rayletId + ? { + value: actorDetail.address?.rayletId, + copyableValue: actorDetail.address?.rayletId, + link: actorDetail.address.rayletId + ? generateNodeLink(actorDetail.address.rayletId) + : undefined, + } + : { value: "-" }, + }, + { + label: "Worker ID", + content: actorDetail.address?.workerId + ? { + value: actorDetail.address?.workerId, + copyableValue: actorDetail.address?.workerId, + } + : { value: "-" }, + }, + { + label: "Started at", + content: { + value: actorDetail.startTime + ? formatDateFromTimeMs(actorDetail.startTime) + : "-", }, - { - label: "Ended at", - content: { - value: actorDetail.endTime - ? formatDateFromTimeMs(actorDetail.endTime) - : "-", - }, + }, + { + label: "Ended at", + content: { + value: actorDetail.endTime + ? formatDateFromTimeMs(actorDetail.endTime) + : "-", }, - { - label: "Uptime", - content: actorDetail.startTime ? ( - + ) : ( + - + ), + }, + { + label: "Restarted", + content: { value: actorDetail.numRestarts }, + }, + { + label: "Exit Detail", + content: actorDetail.exitDetail + ? { + value: actorDetail.exitDetail, + } + : { value: "-" }, + }, + { + label: "Actions", + content: ( +
    + + Log + +
    + - ) : ( - - - ), - }, - { - label: "Restarted", - content: { value: actorDetail.numRestarts }, - }, - { - label: "Exit Detail", - content: actorDetail.exitDetail - ? { - value: actorDetail.exitDetail, - } - : { value: "-" }, - }, - { - label: "Actions", - content: ( -
    - - Log - -
    - -
    - -
    - ), - }, - ]} - /> - - - - +
    + +
    + ), + }, + ]} + /> + + +
    ); }; diff --git a/dashboard/client/src/pages/actor/ActorLayout.tsx b/dashboard/client/src/pages/actor/ActorLayout.tsx new file mode 100644 index 000000000000..7033a12a57c1 --- /dev/null +++ b/dashboard/client/src/pages/actor/ActorLayout.tsx @@ -0,0 +1,18 @@ +import React from "react"; +import { Outlet } from "react-router-dom"; +import { MainNavPageInfo } from "../layout/mainNavContext"; + +export const ActorLayout = () => { + return ( +
    + + +
    + ); +}; diff --git a/dashboard/client/src/pages/actor/ActorList.tsx b/dashboard/client/src/pages/actor/ActorList.tsx index a99ce2dae6b2..d9ab44f1914c 100644 --- a/dashboard/client/src/pages/actor/ActorList.tsx +++ b/dashboard/client/src/pages/actor/ActorList.tsx @@ -1,6 +1,4 @@ -import { Grid } from "@material-ui/core"; -import dayjs from "dayjs"; -import React, { useState } from "react"; +import React from "react"; import ActorTable, { ActorTableProps } from "../../components/ActorTable"; import { Actor } from "../../type/actor"; import { useActorList } from "./hook/useActorList"; @@ -16,17 +14,11 @@ const ActorList = ({ jobId?: string | null; detailPathPrefix?: string; } & Pick) => { - const [timeStamp] = useState(dayjs()); const data: { [actorId: string]: Actor } | undefined = useActorList(); const actors: { [actorId: string]: Actor } = data ? data : {}; return (
    - - - Last updated: {timeStamp.format("YYYY-MM-DD HH:mm:ss")} - - { - const params = useParams() as { id: string }; + const params = useParams() as { actorId: string }; const [msg, setMsg] = useState("Loading the actor infos..."); const { namespaceMap } = useContext(GlobalContext); const { data: actorDetail, isLoading } = useSWR( - ["useActorDetail", params.id], + ["useActorDetail", params.actorId], async ([_, actorId]) => { const actor_resp = await getActor(actorId); const data: ActorResp = actor_resp?.data; diff --git a/dashboard/client/src/pages/actor/index.tsx b/dashboard/client/src/pages/actor/index.tsx index d66ce9f8aae3..6c0995cb4c58 100644 --- a/dashboard/client/src/pages/actor/index.tsx +++ b/dashboard/client/src/pages/actor/index.tsx @@ -1,13 +1,12 @@ import { makeStyles } from "@material-ui/core"; import React from "react"; -import TitleCard from "../../components/TitleCard"; -import { MainNavPageInfo } from "../layout/mainNavContext"; import ActorList from "./ActorList"; const useStyles = makeStyles((theme) => ({ root: { padding: theme.spacing(2), width: "100%", + backgroundColor: "white", }, })); @@ -19,16 +18,7 @@ const Actors = () => { return (
    - - - - +
    ); }; diff --git a/dashboard/client/src/pages/job/JobDetail.tsx b/dashboard/client/src/pages/job/JobDetail.tsx index d0b1399fb326..b0194856d6fc 100644 --- a/dashboard/client/src/pages/job/JobDetail.tsx +++ b/dashboard/client/src/pages/job/JobDetail.tsx @@ -235,6 +235,12 @@ export const JobLogsLink = ({ }: JobLogsLinkProps) => { const { ipLogMap } = useContext(GlobalContext); + if (type === "SUBMISSION") { + // For submission jobs, send them to the job detail page because we have logs there already. + const link = `/jobs/${job_id ? job_id : submission_id}`; + return Log; + } + let link: string | undefined; if (driver_agent_http_address) { diff --git a/dashboard/client/src/pages/job/JobDetailActorPage.tsx b/dashboard/client/src/pages/job/JobDetailActorPage.tsx index 2a8ab3d9e1a6..af701d7c4edf 100644 --- a/dashboard/client/src/pages/job/JobDetailActorPage.tsx +++ b/dashboard/client/src/pages/job/JobDetailActorPage.tsx @@ -1,6 +1,7 @@ import { makeStyles } from "@material-ui/core"; import React from "react"; +import { Outlet } from "react-router-dom"; import { Section } from "../../common/Section"; import ActorList from "../actor/ActorList"; import { MainNavPageInfo } from "../layout/mainNavContext"; @@ -15,7 +16,19 @@ const useStyle = makeStyles((theme) => ({ export const JobDetailActorsPage = () => { const classes = useStyle(); - const { job, params } = useJobDetail(); + const { params } = useJobDetail(); + + return ( +
    +
    + +
    +
    + ); +}; + +export const JobDetailActorLayout = () => { + const { job } = useJobDetail(); const pageInfo = job ? { @@ -30,11 +43,9 @@ export const JobDetailActorsPage = () => { }; return ( -
    +
    -
    - -
    +
    ); }; diff --git a/dashboard/client/src/pages/job/JobDriverLogs.component.test.tsx b/dashboard/client/src/pages/job/JobDriverLogs.component.test.tsx index 1c526b1caccf..507e72a5c24d 100644 --- a/dashboard/client/src/pages/job/JobDriverLogs.component.test.tsx +++ b/dashboard/client/src/pages/job/JobDriverLogs.component.test.tsx @@ -15,20 +15,14 @@ describe("JobDriverLogs", () => { headers: { "content-type": "text/plain", }, - data: "log line\nthis is a line\nHi\n10\nfoo", + data: "1log line\nthis is a line\nHi\n10\nfoo", }); render( , ); @@ -41,7 +35,7 @@ describe("JobDriverLogs", () => { expect(screen.getByText(/foo/)).toBeVisible(); expect(mockedGet).toBeCalledWith( - "log_proxy?url=http%3A%2F%2F127.0.0.1%3A52365%2Flogs%2Fjob-driver-raysubmit_12345.log", + "api/v0/logs/file?node_id=node-id-0&filename=job-driver-raysubmit_12345.log&lines=-1", ); }); }); diff --git a/dashboard/client/src/pages/job/JobDriverLogs.tsx b/dashboard/client/src/pages/job/JobDriverLogs.tsx index 5d45b03bf1bb..34e42ee2bc0f 100644 --- a/dashboard/client/src/pages/job/JobDriverLogs.tsx +++ b/dashboard/client/src/pages/job/JobDriverLogs.tsx @@ -1,63 +1,47 @@ import { Typography } from "@material-ui/core"; -import React, { useContext } from "react"; +import React from "react"; import useSWR from "swr"; -import { GlobalContext } from "../../App"; -import { getLogDetail, getLogDownloadUrl } from "../../service/log"; +import { getStateApiDownloadLogUrl, getStateApiLog } from "../../service/log"; import { UnifiedJob } from "../../type/job"; import { LogViewer } from "../log/LogViewer"; const useDriverLogs = ( - job: Pick< - UnifiedJob, - "driver_agent_http_address" | "driver_info" | "submission_id" - >, + job: Pick, ) => { - const { ipLogMap } = useContext(GlobalContext); - const { driver_agent_http_address, driver_info, submission_id } = job; - const host = (() => { - if (driver_agent_http_address) { - return `${driver_agent_http_address}/logs/`; - } else if (driver_info && ipLogMap[driver_info.node_ip_address]) { - return `${ipLogMap[driver_info.node_ip_address]}/`; - } - })(); - const path = `job-driver-${submission_id}.log`; + const { driver_node_id, submission_id } = job; - const url = host ? `${host}${path}` : undefined; - const downloadUrl = url ? getLogDownloadUrl(url) : undefined; + const filename = submission_id + ? `job-driver-${submission_id}.log` + : undefined; + + const downloadUrl = + driver_node_id && filename + ? getStateApiDownloadLogUrl(driver_node_id, filename) + : undefined; const { data: log, isLoading, mutate, - } = useSWR(url ? ["useDriverLogs", url] : null, async ([_, url]) => - getLogDetail(url) - .then((res) => { - if (res) { - return res; - } else { - return "(This file is empty.)"; - } - }) - .catch(() => { - return "(Failed to load)"; - }), + } = useSWR( + driver_node_id && filename + ? ["useDriverLogs", driver_node_id, filename] + : null, + async ([_, node_id, filename]) => { + return getStateApiLog(node_id, filename); + }, ); return { log: isLoading ? "Loading..." : log, downloadUrl, refresh: mutate, - host, - path, + path: filename, }; }; type JobDriverLogsProps = { - job: Pick< - UnifiedJob, - "driver_agent_http_address" | "driver_info" | "submission_id" - >; + job: Pick; }; export const JobDriverLogs = ({ job }: JobDriverLogsProps) => { diff --git a/dashboard/client/src/service/log.ts b/dashboard/client/src/service/log.ts index d301254c1b37..0b7084f8018e 100644 --- a/dashboard/client/src/service/log.ts +++ b/dashboard/client/src/service/log.ts @@ -49,3 +49,15 @@ export const getLogDetail = async (url: string) => { return rsp.data as string; }; + +export const getStateApiDownloadLogUrl = (nodeId: string, fileName: string) => + `api/v0/logs/file?node_id=${nodeId}&filename=${fileName}&lines=-1`; + +export const getStateApiLog = async (nodeId: string, fileName: string) => { + const resp = await get(getStateApiDownloadLogUrl(nodeId, fileName)); + // TODO(aguo): get rid of this first byte check once we support state-api logs without this streaming byte. + if (resp.data[0] !== "1") { + throw new Error(resp.data.substring(1)); + } + return resp.data.substring(1); +}; diff --git a/dashboard/client/src/type/job.d.ts b/dashboard/client/src/type/job.d.ts index 4fc6632c56a7..f8797851e6a8 100644 --- a/dashboard/client/src/type/job.d.ts +++ b/dashboard/client/src/type/job.d.ts @@ -72,6 +72,7 @@ export type UnifiedJob = { runtime_env: { [key: string]: string } | null; driver_info: DriverInfo | null; driver_agent_http_address: string | null; + driver_node_id: string | null; }; export type DriverInfo = { From 95aee3b5954dc60dd3e923a900a064426beea1fc Mon Sep 17 00:00:00 2001 From: Alan Guo Date: Thu, 11 May 2023 16:01:47 -0700 Subject: [PATCH 355/424] Add docs for setting up metrics for homebrew installations (#35026) fixes #35121 --- dashboard/client/src/pages/job/index.tsx | 9 +------ .../monitoring-and-observability.rst | 2 +- doc/source/ray-observability/ray-metrics.rst | 24 +++++++++++++++---- 3 files changed, 22 insertions(+), 13 deletions(-) diff --git a/dashboard/client/src/pages/job/index.tsx b/dashboard/client/src/pages/job/index.tsx index 1ff5eb6346f6..596630cc3ce9 100644 --- a/dashboard/client/src/pages/job/index.tsx +++ b/dashboard/client/src/pages/job/index.tsx @@ -45,14 +45,7 @@ const columns = [ helpInfo: ( The progress of the all submitted tasks per job. Tasks that are not yet - submitted will not show up in the progress bar. -
    -
    - Note: This column requires that prometheus is running. See{" "} -
    - here - {" "} - for instructions. + submitted do not show up in the progress bar. ), }, diff --git a/doc/source/cluster/running-applications/monitoring-and-observability.rst b/doc/source/cluster/running-applications/monitoring-and-observability.rst index df3bcd6cddd9..c9dd3e39ee9a 100644 --- a/doc/source/cluster/running-applications/monitoring-and-observability.rst +++ b/doc/source/cluster/running-applications/monitoring-and-observability.rst @@ -124,7 +124,7 @@ The service discovery file is generated on the :ref:`head node `. +You can choose to use this config or modify your own to enable this behavior. The details of the config can be seen below and full documentation can be found `here `_. With this config, Prometheus will automatically update the addresses that it scrapes based on the contents of Ray's service discovery file. diff --git a/doc/source/ray-observability/ray-metrics.rst b/doc/source/ray-observability/ray-metrics.rst index a4efcdc13989..bcedba3a3f77 100644 --- a/doc/source/ray-observability/ray-metrics.rst +++ b/doc/source/ray-observability/ray-metrics.rst @@ -278,9 +278,21 @@ to `RAY_GRAFANA_HOST=http://55.66.77.88:3000`. Troubleshooting --------------- +Getting Prometheus and Grafana to use the Ray configurations when installed via homebrew on macOS X +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +With homebrew, Prometheus and Grafana are installed as services that are automatically launched for you. +Therefore, to configure these services, you cannot simply pass in the config files as command line arguments. + +Instead, follow these instructions: +1. Change the --config-file line in `/usr/local/etc/prometheus.args` to read `--config.file /tmp/ray/session_latest/metrics/prometheus/prometheus.yml`. +2. Update `/usr/local/etc/grafana/grafana.ini` file so that it matches the contents of `/tmp/ray/session_latest/metrics/grafana/grafana.ini`. + +You can then start or restart the services with `brew services start grafana` and `brew services start prometheus`. + .. _unverified-developer: -Mac does not trust the developer when installing prometheus or grafana +MacOS does not trust the developer to install Prometheus or Grafana ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You may have received an error that looks like this: @@ -295,6 +307,10 @@ See `these instructions Date: Thu, 11 May 2023 16:01:59 -0700 Subject: [PATCH 356/424] Add HTTPProxy details to Serve Dashboard UI (#35159) Adds details about HTTPProxy to the Serve UI --- .../client/src/common/CollapsibleSection.tsx | 35 ++- .../client/src/components/StatusChip.tsx | 6 + ...rveApplicationsListPage.component.test.tsx | 28 +- .../pages/serve/ServeApplicationsListPage.tsx | 247 +++++++++--------- .../src/pages/serve/ServeSystemDetailRows.tsx | 95 +++++++ .../src/pages/serve/ServeSystemDetails.tsx | 150 +++++++++++ .../pages/serve/hook/useServeApplications.ts | 31 ++- dashboard/client/src/type/serve.ts | 30 ++- python/ray/serve/_private/common.py | 2 + 9 files changed, 475 insertions(+), 149 deletions(-) create mode 100644 dashboard/client/src/pages/serve/ServeSystemDetailRows.tsx create mode 100644 dashboard/client/src/pages/serve/ServeSystemDetails.tsx diff --git a/dashboard/client/src/common/CollapsibleSection.tsx b/dashboard/client/src/common/CollapsibleSection.tsx index 57c2d3792563..9d2135e46e2d 100644 --- a/dashboard/client/src/common/CollapsibleSection.tsx +++ b/dashboard/client/src/common/CollapsibleSection.tsx @@ -1,4 +1,4 @@ -import { createStyles, makeStyles, Typography } from "@material-ui/core"; +import { Box, createStyles, makeStyles, Typography } from "@material-ui/core"; import classNames from "classnames"; import React, { forwardRef, @@ -18,6 +18,7 @@ const useStyles = makeStyles((theme) => alignItems: "center", fontWeight: 500, cursor: "pointer", + marginRight: theme.spacing(1), }, icon: { marginRight: theme.spacing(1), @@ -43,6 +44,10 @@ type CollapsibleSectionProps = PropsWithChildren< onExpandButtonClick?: () => void; title: string; startExpanded?: boolean; + /** + * Icon to show to the right of the title. + */ + icon?: React.ReactNode; /** * An optimization to not avoid re-rendering the contents of the collapsible section. * When enabled, we will keep the content around when collapsing but hide it via css. @@ -64,6 +69,7 @@ export const CollapsibleSection = forwardRef< className, children, keepRendered, + icon, }, ref, ) => { @@ -85,18 +91,21 @@ export const CollapsibleSection = forwardRef< return (
    - - {finalExpanded ? ( - - ) : ( - - )} - {title} - + + + {finalExpanded ? ( + + ) : ( + + )} + {title} + + {icon} + {(finalExpanded || (keepRendered && rendered)) && (
    { it("renders list", async () => { - expect.assertions(11); + expect.assertions(14); mockGetServeApplications.mockResolvedValue({ data: { http_options: { host: "1.2.3.4", port: 8000 }, + http_proxies: { + foo: { + node_id: "node:12345", + status: ServeHTTPProxyStatus.HEALTHY, + actor_id: "actor:12345", + }, + }, proxy_location: ServeDeploymentMode.EveryNode, applications: { home: { @@ -54,11 +65,20 @@ describe("ServeApplicationsListPage", () => { const user = userEvent.setup(); - await screen.findByText("Config"); - expect(screen.getByText("Config")).toBeVisible(); + await screen.findByText("System"); + expect(screen.getByText("System")).toBeVisible(); + // System tab is hidden at first + expect(screen.queryByText("1.2.3.4")).toBeNull(); + // Expand the system tab + await user.click(screen.getByText("System")); + await screen.findByText("1.2.3.4"); expect(screen.getByText("1.2.3.4")).toBeVisible(); expect(screen.getByText("8000")).toBeVisible(); + // HTTP Proxy row + expect(screen.getByText("HTTPProxyActor:node:12345")).toBeVisible(); + expect(screen.getByText("HEALTHY")).toBeVisible(); + // First row expect(screen.getByText("home")).toBeVisible(); expect(screen.getByText("/")).toBeVisible(); diff --git a/dashboard/client/src/pages/serve/ServeApplicationsListPage.tsx b/dashboard/client/src/pages/serve/ServeApplicationsListPage.tsx index acd835b2c10b..c2523bf32cba 100644 --- a/dashboard/client/src/pages/serve/ServeApplicationsListPage.tsx +++ b/dashboard/client/src/pages/serve/ServeApplicationsListPage.tsx @@ -17,11 +17,11 @@ import { Alert, Autocomplete, Pagination } from "@material-ui/lab"; import React, { ReactElement } from "react"; import { CollapsibleSection } from "../../common/CollapsibleSection"; import Loading from "../../components/Loading"; -import { MetadataSection } from "../../components/MetadataSection"; import { HelpInfo } from "../../components/Tooltip"; import { useServeApplications } from "./hook/useServeApplications"; import { ServeApplicationRow } from "./ServeApplicationRow"; import { ServeMetricsSection } from "./ServeMetricsSection"; +import { ServeSystemDetails } from "./ServeSystemDetails"; const useStyles = makeStyles((theme) => createStyles({ @@ -34,6 +34,9 @@ const useStyles = makeStyles((theme) => helpInfo: { marginLeft: theme.spacing(1), }, + applicationsSection: { + marginTop: theme.spacing(4), + }, metricsSection: { marginTop: theme.spacing(4), }, @@ -56,10 +59,13 @@ export const ServeApplicationsListPage = () => { const { serveDetails, filteredServeApplications, + httpProxies, error, allServeApplications, page, setPage, + httpProxiesPage, + setHttpProxiesPage, changeFilter, } = useServeApplications(); @@ -73,132 +79,121 @@ export const ServeApplicationsListPage = () => { return (
    - - {serveDetails.host && serveDetails.port ? ( - + Serve not started. Please deploy a serve application first. + + ) : ( + + - ) : ( - - Serve not started. Please deploy a serve application first. - - )} - - - -
    - (e.name ? e.name : "-")), - ), - )} - onInputChange={(_: any, value: string) => { - changeFilter("name", value.trim() !== "-" ? value.trim() : ""); - }} - renderInput={(params: TextFieldProps) => ( - - )} - /> - e.status)), - )} - onInputChange={(_: any, value: string) => { - changeFilter("status", value.trim()); - }} - renderInput={(params: TextFieldProps) => ( - - )} - /> - { - setPage("pageSize", Math.min(Number(value), 500) || 10); - }, - endAdornment: ( - Per Page - ), - }} - /> -
    -
    - setPage("pageNo", pageNo)} - /> -
    - - - - {columns.map(({ label, helpInfo, width }) => ( - - - {label} - {helpInfo && ( - - {helpInfo} - - )} - - - ))} - - - - {filteredServeApplications - .slice( - (page.pageNo - 1) * page.pageSize, - page.pageNo * page.pageSize, - ) - .map((application) => ( - - ))} - -
    -
    -
    + + +
    + (e.name ? e.name : "-")), + ), + )} + onInputChange={(_: any, value: string) => { + changeFilter( + "name", + value.trim() !== "-" ? value.trim() : "", + ); + }} + renderInput={(params: TextFieldProps) => ( + + )} + /> + e.status)), + )} + onInputChange={(_: any, value: string) => { + changeFilter("status", value.trim()); + }} + renderInput={(params: TextFieldProps) => ( + + )} + /> + { + setPage("pageSize", Math.min(Number(value), 500) || 10); + }, + endAdornment: ( + Per Page + ), + }} + /> +
    +
    + setPage("pageNo", pageNo)} + /> +
    + + + + {columns.map(({ label, helpInfo, width }) => ( + + + {label} + {helpInfo && ( + + {helpInfo} + + )} + + + ))} + + + + {filteredServeApplications + .slice( + (page.pageNo - 1) * page.pageSize, + page.pageNo * page.pageSize, + ) + .map((application) => ( + + ))} + +
    +
    +
    + + )}
    ); diff --git a/dashboard/client/src/pages/serve/ServeSystemDetailRows.tsx b/dashboard/client/src/pages/serve/ServeSystemDetailRows.tsx new file mode 100644 index 000000000000..230916e992ec --- /dev/null +++ b/dashboard/client/src/pages/serve/ServeSystemDetailRows.tsx @@ -0,0 +1,95 @@ +import { + createStyles, + Link, + makeStyles, + TableCell, + TableRow, + Tooltip, +} from "@material-ui/core"; +import React, { useContext } from "react"; +import { Link as RouterLink } from "react-router-dom"; +import { GlobalContext } from "../../App"; +import { StatusChip } from "../../components/StatusChip"; +import { ServeHttpProxy } from "../../type/serve"; + +const useStyles = makeStyles((theme) => + createStyles({ + idCol: { + display: "inline-block", + width: "50px", + overflow: "hidden", + textOverflow: "ellipsis", + whiteSpace: "nowrap", + verticalAlign: "bottom", + }, + }), +); + +export type ServeHttpProxyRowProps = { + httpProxy: ServeHttpProxy; +}; + +export const ServeHttpProxyRow = ({ httpProxy }: ServeHttpProxyRowProps) => { + const { node_id, status, actor_id } = httpProxy; + const classes = useStyles(); + + return ( + + HTTPProxyActor:{node_id} + + + + + + + + + + {node_id} + + + + + + + {actor_id} + + + + + ); +}; + +export type ServeReplicaLogsLinkProps = { + httpProxy: ServeHttpProxy; +}; + +export const ServeHttpProxyLogLink = ({ + httpProxy: { log_file_path, node_ip }, +}: ServeReplicaLogsLinkProps) => { + const { ipLogMap } = useContext(GlobalContext); + + let link: string | undefined; + + if (node_ip && ipLogMap[node_ip]) { + // TODO(aguo): Clean up this logic after re-writing the log viewer + const logsRoot = ipLogMap[node_ip].endsWith("/logs") + ? ipLogMap[node_ip].substring( + 0, + ipLogMap[node_ip].length - "/logs".length, + ) + : ipLogMap[node_ip]; + const path = `/logs${log_file_path}`; + link = `/logs/${encodeURIComponent(logsRoot)}/${encodeURIComponent(path)}`; + } + + if (link) { + return ( + + Log + + ); + } + + return -; +}; diff --git a/dashboard/client/src/pages/serve/ServeSystemDetails.tsx b/dashboard/client/src/pages/serve/ServeSystemDetails.tsx new file mode 100644 index 000000000000..017134949a87 --- /dev/null +++ b/dashboard/client/src/pages/serve/ServeSystemDetails.tsx @@ -0,0 +1,150 @@ +import { + Box, + createStyles, + makeStyles, + Table, + TableBody, + TableCell, + TableContainer, + TableHead, + TableRow, +} from "@material-ui/core"; +import { Pagination } from "@material-ui/lab"; +import React, { ReactElement } from "react"; +import { RiErrorWarningFill } from "react-icons/ri"; +import { CollapsibleSection } from "../../common/CollapsibleSection"; +import { MetadataSection } from "../../components/MetadataSection"; +import { HelpInfo } from "../../components/Tooltip"; +import { ServeApplicationsRsp, ServeHttpProxy } from "../../type/serve"; +import { ServeHttpProxyRow } from "./ServeSystemDetailRows"; + +const useStyles = makeStyles((theme) => + createStyles({ + table: {}, + helpInfo: { + marginLeft: theme.spacing(1), + }, + errorIcon: { + color: theme.palette.error.main, + width: 20, + height: 20, + }, + }), +); + +export type ServeDetails = Pick< + ServeApplicationsRsp, + "http_options" | "proxy_location" +>; + +type ServeSystemDetailsProps = { + serveDetails: ServeDetails; + httpProxies: ServeHttpProxy[]; + page: { pageSize: number; pageNo: number }; + setPage: (key: string, value: number) => void; +}; + +const columns: { label: string; helpInfo?: ReactElement; width?: string }[] = [ + { label: "Name" }, + { label: "Status" }, + { label: "Actions" }, + { label: "Node ID" }, + { label: "Actor ID" }, +]; + +export const ServeSystemDetails = ({ + serveDetails, + httpProxies, + page, + setPage, +}: ServeSystemDetailsProps) => { + const classes = useStyles(); + + const isUnhealthy = httpProxies.some(({ status }) => status === "UNHEALTHY"); + + return ( + + ) : undefined + } + > + {serveDetails.http_options && ( + + )} + +
    + setPage("pageNo", pageNo)} + /> +
    + + + + {columns.map(({ label, helpInfo, width }) => ( + + + {label} + {helpInfo && ( + + {helpInfo} + + )} + + + ))} + + + + {httpProxies + .slice( + (page.pageNo - 1) * page.pageSize, + page.pageNo * page.pageSize, + ) + .map((httpProxy) => ( + + ))} + +
    +
    +
    + ); +}; diff --git a/dashboard/client/src/pages/serve/hook/useServeApplications.ts b/dashboard/client/src/pages/serve/hook/useServeApplications.ts index 4ab54eb23158..e54545a79e94 100644 --- a/dashboard/client/src/pages/serve/hook/useServeApplications.ts +++ b/dashboard/client/src/pages/serve/hook/useServeApplications.ts @@ -3,6 +3,15 @@ import useSWR from "swr"; import { GlobalContext } from "../../../App"; import { API_REFRESH_INTERVAL_MS } from "../../../common/constants"; import { getServeApplications } from "../../../service/serve"; +import { ServeHTTPProxyStatus } from "../../../type/serve"; +import { ServeDetails } from "../ServeSystemDetails"; + +const SERVE_HTTP_PROXY_STATUS_SORT_ORDER: Record = + { + [ServeHTTPProxyStatus.UNHEALTHY]: 0, + [ServeHTTPProxyStatus.STARTING]: 1, + [ServeHTTPProxyStatus.HEALTHY]: 2, + }; export const useServeApplications = () => { const [page, setPage] = useState({ pageSize: 10, pageNo: 1 }); @@ -23,6 +32,11 @@ export const useServeApplications = () => { setFilter([...filter]); }; + const [httpProxiesPage, setHttpProxiesPage] = useState({ + pageSize: 10, + pageNo: 1, + }); + const { data, error } = useSWR( "useServeApplications", async () => { @@ -35,8 +49,8 @@ export const useServeApplications = () => { { refreshInterval: API_REFRESH_INTERVAL_MS }, ); - const serveDetails = data - ? { ...data.http_options, proxy_location: data.proxy_location } + const serveDetails: ServeDetails | undefined = data + ? { http_options: data.http_options, proxy_location: data.proxy_location } : undefined; const serveApplicationsList = data ? Object.values(data.applications).sort( @@ -44,6 +58,15 @@ export const useServeApplications = () => { ) : []; + const httpProxies = + data && data.http_proxies + ? Object.values(data.http_proxies).sort( + (a, b) => + SERVE_HTTP_PROXY_STATUS_SORT_ORDER[b.status] - + SERVE_HTTP_PROXY_STATUS_SORT_ORDER[a.status], + ) + : []; + return { serveDetails, filteredServeApplications: serveApplicationsList.filter((app) => @@ -51,10 +74,14 @@ export const useServeApplications = () => { f.val ? app[f.key] && (app[f.key] ?? "").includes(f.val) : true, ), ), + httpProxies, error, changeFilter, page, setPage: (key: string, val: number) => setPage({ ...page, [key]: val }), + httpProxiesPage, + setHttpProxiesPage: (key: string, val: number) => + setHttpProxiesPage({ ...httpProxiesPage, [key]: val }), ipLogMap, allServeApplications: serveApplicationsList, }; diff --git a/dashboard/client/src/type/serve.ts b/dashboard/client/src/type/serve.ts index 47d978518976..aa8d52159cf9 100644 --- a/dashboard/client/src/type/serve.ts +++ b/dashboard/client/src/type/serve.ts @@ -74,6 +74,7 @@ export type ServeReplica = { node_id: string | null; node_ip: string | null; start_time_s: number; + log_file_path: string | null; }; // Keep in sync with DeploymentMode in python/ray/serve/config.py @@ -84,12 +85,33 @@ export enum ServeDeploymentMode { FixedNumber = "FixedNumber", } +// Keep in sync with HTTPProxyStatus in python/ray/serve/_private/common.py +export enum ServeHTTPProxyStatus { + STARTING = "STARTING", + HEALTHY = "HEALTHY", + UNHEALTHY = "UNHEALTHY", +} + +export type ServeHttpProxy = { + node_id: string; + node_ip: string; + actor_id: string; + actor_name: string; + status: ServeHTTPProxyStatus; + log_file_path: string | null; +}; + export type ServeApplicationsRsp = { - http_options: { - host: string; - port: number; - }; + http_options: + | { + host: string; + port: number; + } + | undefined; proxy_location: ServeDeploymentMode; + http_proxies: { + [name: string]: ServeHttpProxy; + } | null; applications: { [name: string]: ServeApplication; }; diff --git a/python/ray/serve/_private/common.py b/python/ray/serve/_private/common.py index e8d7a78c0b3c..986071739061 100644 --- a/python/ray/serve/_private/common.py +++ b/python/ray/serve/_private/common.py @@ -367,6 +367,8 @@ class ServeDeployMode(str, Enum): MULTI_APP = "MULTI_APP" +# Keep in sync with ServeHTTPProxyStatus in +# python/ray/dashboard/client/src/type/serve.ts class HTTPProxyStatus(str, Enum): STARTING = "STARTING" HEALTHY = "HEALTHY" From b590e9f6f29c22336d67a7654e9f673243c78990 Mon Sep 17 00:00:00 2001 From: Justin Yu Date: Thu, 11 May 2023 16:11:26 -0700 Subject: [PATCH 357/424] [AIR] Remove hard-deprecated and unused code (#35163) Signed-off-by: Justin Yu --- .../results_preprocessors/__init__.py | 8 --- .../aggregate/__init__.py | 9 --- .../aggregate/aggregate_fn.py | 9 --- .../aggregate/aggregate_preprocessor.py | 9 --- .../aggregate/aggregate_utils.py | 9 --- .../_internal/results_preprocessors/index.py | 8 --- .../_internal/results_preprocessors/keys.py | 8 --- .../results_preprocessors/preprocessor.py | 8 --- python/ray/tune/tests/test_trainable_util.py | 19 ----- python/ray/tune/trainable/util.py | 72 ------------------- 10 files changed, 159 deletions(-) delete mode 100644 python/ray/train/_internal/results_preprocessors/__init__.py delete mode 100644 python/ray/train/_internal/results_preprocessors/aggregate/__init__.py delete mode 100644 python/ray/train/_internal/results_preprocessors/aggregate/aggregate_fn.py delete mode 100644 python/ray/train/_internal/results_preprocessors/aggregate/aggregate_preprocessor.py delete mode 100644 python/ray/train/_internal/results_preprocessors/aggregate/aggregate_utils.py delete mode 100644 python/ray/train/_internal/results_preprocessors/index.py delete mode 100644 python/ray/train/_internal/results_preprocessors/keys.py delete mode 100644 python/ray/train/_internal/results_preprocessors/preprocessor.py diff --git a/python/ray/train/_internal/results_preprocessors/__init__.py b/python/ray/train/_internal/results_preprocessors/__init__.py deleted file mode 100644 index aeaa22813837..000000000000 --- a/python/ray/train/_internal/results_preprocessors/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -raise DeprecationWarning( - "`ray.train.callbacks.results_preprocessors` and the `ray.train.Trainer` API are " - "deprecated in Ray " - "2.0, and are replaced by Ray AI Runtime (Ray AIR). Ray AIR " - "(https://docs.ray.io/en/latest/ray-air/getting-started.html) " - "provides greater functionality and a unified API " - "compared to the old Ray Train API. " -) diff --git a/python/ray/train/_internal/results_preprocessors/aggregate/__init__.py b/python/ray/train/_internal/results_preprocessors/aggregate/__init__.py deleted file mode 100644 index fae5be80db31..000000000000 --- a/python/ray/train/_internal/results_preprocessors/aggregate/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -raise DeprecationWarning( - "`ray.train.callbacks.results_preprocessors.aggregate` and the `ray.train.Trainer` " - "API are " - "deprecated in Ray " - "2.0, and are replaced by Ray AI Runtime (Ray AIR). Ray AIR " - "(https://docs.ray.io/en/latest/ray-air/getting-started.html) " - "provides greater functionality and a unified API " - "compared to the old Ray Train API. " -) diff --git a/python/ray/train/_internal/results_preprocessors/aggregate/aggregate_fn.py b/python/ray/train/_internal/results_preprocessors/aggregate/aggregate_fn.py deleted file mode 100644 index fae5be80db31..000000000000 --- a/python/ray/train/_internal/results_preprocessors/aggregate/aggregate_fn.py +++ /dev/null @@ -1,9 +0,0 @@ -raise DeprecationWarning( - "`ray.train.callbacks.results_preprocessors.aggregate` and the `ray.train.Trainer` " - "API are " - "deprecated in Ray " - "2.0, and are replaced by Ray AI Runtime (Ray AIR). Ray AIR " - "(https://docs.ray.io/en/latest/ray-air/getting-started.html) " - "provides greater functionality and a unified API " - "compared to the old Ray Train API. " -) diff --git a/python/ray/train/_internal/results_preprocessors/aggregate/aggregate_preprocessor.py b/python/ray/train/_internal/results_preprocessors/aggregate/aggregate_preprocessor.py deleted file mode 100644 index fae5be80db31..000000000000 --- a/python/ray/train/_internal/results_preprocessors/aggregate/aggregate_preprocessor.py +++ /dev/null @@ -1,9 +0,0 @@ -raise DeprecationWarning( - "`ray.train.callbacks.results_preprocessors.aggregate` and the `ray.train.Trainer` " - "API are " - "deprecated in Ray " - "2.0, and are replaced by Ray AI Runtime (Ray AIR). Ray AIR " - "(https://docs.ray.io/en/latest/ray-air/getting-started.html) " - "provides greater functionality and a unified API " - "compared to the old Ray Train API. " -) diff --git a/python/ray/train/_internal/results_preprocessors/aggregate/aggregate_utils.py b/python/ray/train/_internal/results_preprocessors/aggregate/aggregate_utils.py deleted file mode 100644 index fae5be80db31..000000000000 --- a/python/ray/train/_internal/results_preprocessors/aggregate/aggregate_utils.py +++ /dev/null @@ -1,9 +0,0 @@ -raise DeprecationWarning( - "`ray.train.callbacks.results_preprocessors.aggregate` and the `ray.train.Trainer` " - "API are " - "deprecated in Ray " - "2.0, and are replaced by Ray AI Runtime (Ray AIR). Ray AIR " - "(https://docs.ray.io/en/latest/ray-air/getting-started.html) " - "provides greater functionality and a unified API " - "compared to the old Ray Train API. " -) diff --git a/python/ray/train/_internal/results_preprocessors/index.py b/python/ray/train/_internal/results_preprocessors/index.py deleted file mode 100644 index aeaa22813837..000000000000 --- a/python/ray/train/_internal/results_preprocessors/index.py +++ /dev/null @@ -1,8 +0,0 @@ -raise DeprecationWarning( - "`ray.train.callbacks.results_preprocessors` and the `ray.train.Trainer` API are " - "deprecated in Ray " - "2.0, and are replaced by Ray AI Runtime (Ray AIR). Ray AIR " - "(https://docs.ray.io/en/latest/ray-air/getting-started.html) " - "provides greater functionality and a unified API " - "compared to the old Ray Train API. " -) diff --git a/python/ray/train/_internal/results_preprocessors/keys.py b/python/ray/train/_internal/results_preprocessors/keys.py deleted file mode 100644 index aeaa22813837..000000000000 --- a/python/ray/train/_internal/results_preprocessors/keys.py +++ /dev/null @@ -1,8 +0,0 @@ -raise DeprecationWarning( - "`ray.train.callbacks.results_preprocessors` and the `ray.train.Trainer` API are " - "deprecated in Ray " - "2.0, and are replaced by Ray AI Runtime (Ray AIR). Ray AIR " - "(https://docs.ray.io/en/latest/ray-air/getting-started.html) " - "provides greater functionality and a unified API " - "compared to the old Ray Train API. " -) diff --git a/python/ray/train/_internal/results_preprocessors/preprocessor.py b/python/ray/train/_internal/results_preprocessors/preprocessor.py deleted file mode 100644 index aeaa22813837..000000000000 --- a/python/ray/train/_internal/results_preprocessors/preprocessor.py +++ /dev/null @@ -1,8 +0,0 @@ -raise DeprecationWarning( - "`ray.train.callbacks.results_preprocessors` and the `ray.train.Trainer` API are " - "deprecated in Ray " - "2.0, and are replaced by Ray AI Runtime (Ray AIR). Ray AIR " - "(https://docs.ray.io/en/latest/ray-air/getting-started.html) " - "provides greater functionality and a unified API " - "compared to the old Ray Train API. " -) diff --git a/python/ray/tune/tests/test_trainable_util.py b/python/ray/tune/tests/test_trainable_util.py index f077938f1d75..65cf745ff998 100644 --- a/python/ray/tune/tests/test_trainable_util.py +++ b/python/ray/tune/tests/test_trainable_util.py @@ -10,7 +10,6 @@ import ray import ray._private.utils -import ray.cloudpickle as cloudpickle from ray.tune.utils.util import wait_for_gpu from ray.tune.utils.util import flatten_dict, unflatten_dict, unflatten_list_dict from ray.tune.trainable.util import TrainableUtil @@ -74,24 +73,6 @@ def testFindCheckpointDir(self): parent = os.path.dirname(found_dir) TrainableUtil.find_checkpoint_dir(parent) - def testPickleCheckpoint(self): - for i in range(5): - path = os.path.join(self.checkpoint_dir, str(i)) - with open(path, "w") as f: - f.write(str(i)) - - checkpoint_path = os.path.join(self.checkpoint_dir, "0") - - data_dict = TrainableUtil.pickle_checkpoint(checkpoint_path) - loaded = cloudpickle.loads(data_dict) - - checkpoint_name = os.path.basename(checkpoint_path) - self.assertEqual(loaded["checkpoint_name"], checkpoint_name) - - for i in range(5): - path = os.path.join(self.checkpoint_dir, str(i)) - self.assertEqual(loaded["data"][str(i)], open(path, "rb").read()) - class FlattenDictTest(unittest.TestCase): def test_output_type(self): diff --git a/python/ray/tune/trainable/util.py b/python/ray/tune/trainable/util.py index ff26376e220f..32967bfb43a3 100644 --- a/python/ray/tune/trainable/util.py +++ b/python/ray/tune/trainable/util.py @@ -18,7 +18,6 @@ from ray.air.config import ScalingConfig from ray.tune.registry import _ParameterRegistry from ray.tune.utils import _detect_checkpoint_function -from ray.util import placement_group from ray.util.annotations import DeveloperAPI, PublicAPI if TYPE_CHECKING: @@ -42,27 +41,6 @@ def load_metadata(checkpoint_dir: str) -> Dict: with open(os.path.join(checkpoint_dir, _TUNE_METADATA_FILENAME), "rb") as f: return pickle.load(f) - @staticmethod - def pickle_checkpoint(checkpoint_path: str): - """Pickles checkpoint data.""" - checkpoint_dir = TrainableUtil.find_checkpoint_dir(checkpoint_path) - data = {} - for basedir, _, file_names in os.walk(checkpoint_dir): - for file_name in file_names: - path = os.path.join(basedir, file_name) - with open(path, "rb") as f: - data[os.path.relpath(path, checkpoint_dir)] = f.read() - # Use normpath so that a directory path isn't mapped to empty string. - name = os.path.relpath(os.path.normpath(checkpoint_path), checkpoint_dir) - name += os.path.sep if os.path.isdir(checkpoint_path) else "" - data_dict = pickle.dumps( - { - "checkpoint_name": name, - "data": data, - } - ) - return data_dict - @staticmethod def find_checkpoint_dir(checkpoint_path): """Returns the directory containing the checkpoint path. @@ -218,56 +196,6 @@ def get_remote_storage_path( return str(uri / rel_local_path) -@DeveloperAPI -class PlacementGroupUtil: - @staticmethod - def get_remote_worker_options( - num_workers: int, - num_cpus_per_worker: int, - num_gpus_per_worker: int, - num_workers_per_host: Optional[int], - timeout_s: Optional[int], - ) -> (Dict[str, Any], placement_group): - """Returns the option for remote workers. - - Args: - num_workers: Number of training workers to include in - world. - num_cpus_per_worker: Number of CPU resources to reserve - per training worker. - num_gpus_per_worker: Number of GPU resources to reserve - per training worker. - num_workers_per_host: Optional[int]: Number of workers to - colocate per host. - timeout_s: Seconds before the torch process group - times out. Useful when machines are unreliable. Defaults - to 60 seconds. This value is also reused for triggering - placement timeouts if forcing colocation. - - - Returns: - type: option that contains CPU/GPU count of - the remote worker and the placement group information. - pg: return a reference to the placement group - """ - pg = None - options = dict(num_cpus=num_cpus_per_worker, num_gpus=num_gpus_per_worker) - if num_workers_per_host: - num_hosts = int(num_workers / num_workers_per_host) - cpus_per_node = num_cpus_per_worker * num_workers_per_host - gpus_per_node = num_gpus_per_worker * num_workers_per_host - bundle = {"CPU": cpus_per_node, "GPU": gpus_per_node} - - all_bundles = [bundle] * num_hosts - pg = placement_group(all_bundles, strategy="STRICT_SPREAD") - logger.debug("Waiting for placement_group to start.") - ray.get(pg.ready(), timeout=timeout_s) - logger.debug("Placement_group started.") - options["placement_group"] = pg - - return options, pg - - @PublicAPI(stability="beta") def with_parameters(trainable: Union[Type["Trainable"], Callable], **kwargs): """Wrapper for trainables to pass arbitrary large data objects. From a5974af3e797e97a6f07a80689f4d1bf3a42c053 Mon Sep 17 00:00:00 2001 From: Jiajun Yao Date: Thu, 11 May 2023 16:20:49 -0700 Subject: [PATCH 358/424] [Doc] [no_early_kickoff] Revamp ray core api reference [1/n] (#34428) We have coding style for function docstring but not for class docstring. This PR tries to propose one by following the same Google style guild and makes sure it works well with autogenerated class page. Signed-off-by: Jiajun Yao --- .../ray-contribute/getting-involved.rst | 72 ++++++++-- doc/source/ray-core/api/core.rst | 1 + python/ray/_private/worker.py | 8 +- python/ray/job_config.py | 127 ++++++++++++++---- python/ray/tests/test_client_proxy.py | 6 +- python/ray/tests/test_runtime_env.py | 2 +- python/ray/util/client/server/proxier.py | 4 +- python/ray/util/client/server/server.py | 4 +- 8 files changed, 173 insertions(+), 51 deletions(-) diff --git a/doc/source/ray-contribute/getting-involved.rst b/doc/source/ray-contribute/getting-involved.rst index 54cb8e8bb2d0..9bccb133a64d 100644 --- a/doc/source/ray-contribute/getting-involved.rst +++ b/doc/source/ray-contribute/getting-involved.rst @@ -112,7 +112,7 @@ The full suite of tests is too large to run on a single machine. However, you ca This will run all of the tests in the file. To run a specific test, use the following: .. code-block:: shell - + # Directly calling `pytest -v ...` may lose import paths. python -m pytest -v -s test_file.py::name_of_the_test @@ -136,9 +136,9 @@ Code Style In general, we follow the `Google style guide `__ for C++ code and the `Black code style `__ for Python code. Python imports follow `PEP8 style `__. However, it is more important for code to be in a locally consistent style than to strictly follow guidelines. Whenever in doubt, follow the local code style of the component. -For Python documentation, we follow a subset of the `Google pydoc format `__. The following code snippet demonstrates the canonical Ray pydoc formatting: +For Python documentation, we follow a subset of the `Google pydoc format `__. The following code snippets demonstrate the canonical Ray pydoc formatting: -.. code-block:: python +.. testcode:: def ray_canonical_doc_style(param1: int, param2: str) -> bool: """First sentence MUST be inline with the quotes and fit on one line. @@ -147,17 +147,19 @@ For Python documentation, we follow a subset of the `Google pydoc format >> # Provide code examples as possible. - >>> ray_canonical_doc_style(41, "hello") - True + .. doctest:: - >>> # A second example. - >>> ray_canonical_doc_style(72, "goodbye") - False + >>> # Provide code examples for key use cases, as possible. + >>> ray_canonical_doc_style(41, "hello") + True + + >>> # A second example. + >>> ray_canonical_doc_style(72, "goodbye") + False Args: param1: The first parameter. Do not include the types in the - docstring (they should be defined only in the signature). + docstring. They should be defined only in the signature. Multi-line parameter docs should be indented by four spaces. param2: The second parameter. @@ -165,6 +167,56 @@ For Python documentation, we follow a subset of the `Google pydoc format str: + """Public property of the class. + + Properties created with the @property decorator + should be documented here. + """ + return "hello" + + def increment_attr1(self) -> None: + """Class methods are similar to regular functions. + + See above about how to document functions. + """ + + self.attr1 = self.attr1 + 1 + +See :ref:`this ` for more details about how to write code snippets in docstrings. + Lint and Formatting ~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/ray-core/api/core.rst b/doc/source/ray-core/api/core.rst index c78e9afc418c..5bd7776d6dd6 100644 --- a/doc/source/ray-core/api/core.rst +++ b/doc/source/ray-core/api/core.rst @@ -7,6 +7,7 @@ Core API ray.init ray.shutdown ray.is_initialized + ray.job_config.JobConfig Tasks ----- diff --git a/python/ray/_private/worker.py b/python/ray/_private/worker.py index a7f07fc5763e..1bb275a2312e 100644 --- a/python/ray/_private/worker.py +++ b/python/ray/_private/worker.py @@ -2164,7 +2164,7 @@ def connect( # If it's a driver and it's not coming from ray client, we'll prepare the # environment here. If it's ray client, the environment will be prepared # at the server side. - if mode == SCRIPT_MODE and not job_config.client_job and job_config.runtime_env: + if mode == SCRIPT_MODE and not job_config._client_job and job_config.runtime_env: scratch_dir: str = worker.node.get_runtime_env_dir_path() runtime_env = job_config.runtime_env or {} runtime_env = upload_py_modules_if_needed( @@ -2194,13 +2194,13 @@ def connect( code_paths.append(script_directory) # In client mode, if we use runtime envs with "working_dir", then # it'll be handled automatically. Otherwise, add the current dir. - if not job_config.client_job and not job_config.runtime_env_has_working_dir(): + if not job_config._client_job and not job_config._runtime_env_has_working_dir(): current_directory = os.path.abspath(os.path.curdir) code_paths.append(current_directory) if len(code_paths) != 0: - job_config.py_driver_sys_path.extend(code_paths) + job_config._py_driver_sys_path.extend(code_paths) - serialized_job_config = job_config.serialize() + serialized_job_config = job_config._serialize() if not node.should_redirect_logs(): # Logging to stderr, so give core worker empty logs directory. logs_dir = "" diff --git a/python/ray/job_config.py b/python/ray/job_config.py index 2772902fa098..5d5b818dc39e 100644 --- a/python/ray/job_config.py +++ b/python/ray/job_config.py @@ -1,69 +1,115 @@ import uuid -from typing import Any, Dict, List, Optional, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union import ray._private.gcs_utils as gcs_utils from ray.util.annotations import PublicAPI +if TYPE_CHECKING: + from ray.runtime_env import RuntimeEnv + @PublicAPI class JobConfig: """A class used to store the configurations of a job. - Attributes: + Examples: + .. testcode:: + :hide: + + import ray + ray.shutdown() + + .. testcode:: + + import ray + from ray.job_config import JobConfig + + ray.init(job_config=JobConfig(default_actor_lifetime="non_detached")) + + Args: jvm_options: The jvm options for java workers of the job. code_search_path: A list of directories or jar files that specify the search path for user code. This will be used as `CLASSPATH` in Java and `PYTHONPATH` in Python. - runtime_env: A runtime environment dictionary (see - ``runtime_env.py`` for detailed documentation). - client_job: A boolean represent the source of the job. - default_actor_lifetime: The default value of actor lifetime. - py_driver_sys_path: A list of directories that - specify the search path for python workers. + See :ref:`Ray cross-language programming ` for more details. + runtime_env: A :ref:`runtime environment ` dictionary. + metadata: An opaque metadata dictionary. + ray_namespace: A :ref:`namespace ` + is a logical grouping of jobs and named actors. + default_actor_lifetime: The default value of actor lifetime, + can be "detached" or "non_detached". + See :ref:`actor lifetimes ` for more details. """ def __init__( self, - jvm_options: List[str] = None, - code_search_path: List[str] = None, - runtime_env: dict = None, - client_job: bool = False, + jvm_options: Optional[List[str]] = None, + code_search_path: Optional[List[str]] = None, + runtime_env: Optional[dict] = None, + _client_job: bool = False, metadata: Optional[dict] = None, ray_namespace: Optional[str] = None, default_actor_lifetime: str = "non_detached", - py_driver_sys_path: List[str] = None, + _py_driver_sys_path: Optional[List[str]] = None, ): + #: The jvm options for java workers of the job. self.jvm_options = jvm_options or [] + #: A list of directories or jar files that + #: specify the search path for user code. self.code_search_path = code_search_path or [] # It's difficult to find the error that caused by the # code_search_path is a string. So we assert here. assert isinstance(self.code_search_path, (list, tuple)), ( f"The type of code search path is incorrect: " f"{type(code_search_path)}" ) - self.client_job = client_job + self._client_job = _client_job + #: An opaque metadata dictionary. self.metadata = metadata or {} + #: A namespace is a logical grouping of jobs and named actors. self.ray_namespace = ray_namespace self.set_runtime_env(runtime_env) self.set_default_actor_lifetime(default_actor_lifetime) - self.py_driver_sys_path = py_driver_sys_path or [] + # A list of directories that specify the search path for python workers. + self._py_driver_sys_path = _py_driver_sys_path or [] def set_metadata(self, key: str, value: str) -> None: + """Add key-value pair to the metadata dictionary. + + If the key already exists, the value is overwritten to the new value. + + Examples: + .. testcode:: + + import ray + from ray.job_config import JobConfig + + job_config = JobConfig() + job_config.set_metadata("submitter", "foo") + + Args: + key: The key of the metadata. + value: The value of the metadata. + """ self.metadata[key] = value - def serialize(self): + def _serialize(self) -> str: """Serialize the struct into protobuf string""" - return self.get_proto_job_config().SerializeToString() + return self._get_proto_job_config().SerializeToString() def set_runtime_env( self, - runtime_env: Optional[Union[Dict[str, Any], "RuntimeEnv"]], # noqa: F821 + runtime_env: Optional[Union[Dict[str, Any], "RuntimeEnv"]], validate: bool = False, ) -> None: """Modify the runtime_env of the JobConfig. We don't validate the runtime_env by default here because it may go through some translation before actually being passed to C++ (e.g., - working_dir translated from a local directory to a URI. + working_dir translated from a local directory to a URI). + + Args: + runtime_env: A :ref:`runtime environment ` dictionary. + validate: Whether to validate the runtime env. """ self.runtime_env = runtime_env if runtime_env is not None else {} if validate: @@ -71,11 +117,24 @@ def set_runtime_env( self._cached_pb = None def set_ray_namespace(self, ray_namespace: str) -> None: + """Set Ray :ref:`namespace `. + + Args: + ray_namespace: The namespace to set. + """ + if ray_namespace != self.ray_namespace: self.ray_namespace = ray_namespace self._cached_pb = None def set_default_actor_lifetime(self, default_actor_lifetime: str) -> None: + """Set the default actor lifetime, which can be "detached" or "non_detached". + + See :ref:`actor lifetimes ` for more details. + + Args: + default_actor_lifetime: The default actor lifetime to set. + """ if default_actor_lifetime == "detached": self._default_actor_lifetime = gcs_utils.JobConfig.ActorLifetime.DETACHED elif default_actor_lifetime == "non_detached": @@ -97,7 +156,7 @@ def _validate_runtime_env(self): return self.runtime_env return RuntimeEnv(**self.runtime_env) - def get_proto_job_config(self): + def _get_proto_job_config(self): """Return the protobuf structure of JobConfig.""" # TODO(edoakes): this is really unfortunate, but JobConfig is imported # all over the place so this causes circular imports. We should remove @@ -112,7 +171,7 @@ def get_proto_job_config(self): pb.ray_namespace = self.ray_namespace pb.jvm_options.extend(self.jvm_options) pb.code_search_path.extend(self.code_search_path) - pb.py_driver_sys_path.extend(self.py_driver_sys_path) + pb.py_driver_sys_path.extend(self._py_driver_sys_path) for k, v in self.metadata.items(): pb.metadata[k] = v @@ -131,28 +190,38 @@ def get_proto_job_config(self): return self._cached_pb - def runtime_env_has_working_dir(self): + def _runtime_env_has_working_dir(self): return self._validate_runtime_env().has_working_dir() - def get_serialized_runtime_env(self) -> str: + def _get_serialized_runtime_env(self) -> str: """Return the JSON-serialized parsed runtime env dict""" return self._validate_runtime_env().serialize() - def get_proto_runtime_env_config(self) -> str: + def _get_proto_runtime_env_config(self) -> str: """Return the JSON-serialized parsed runtime env info""" - return self.get_proto_job_config().runtime_env_info.runtime_env_config + return self._get_proto_job_config().runtime_env_info.runtime_env_config @classmethod def from_json(cls, job_config_json): - """ - Generates a JobConfig object from json. + """Generates a JobConfig object from json. + + Examples: + .. testcode:: + + from ray.job_config import JobConfig + + job_config = JobConfig.from_json( + {"runtime_env": {"working_dir": "uri://abc"}}) + + Args: + job_config_json: The job config json dictionary. """ return cls( jvm_options=job_config_json.get("jvm_options", None), code_search_path=job_config_json.get("code_search_path", None), runtime_env=job_config_json.get("runtime_env", None), - client_job=job_config_json.get("client_job", False), metadata=job_config_json.get("metadata", None), ray_namespace=job_config_json.get("ray_namespace", None), - py_driver_sys_path=job_config_json.get("py_driver_sys_path", None), + _client_job=job_config_json.get("client_job", False), + _py_driver_sys_path=job_config_json.get("py_driver_sys_path", None), ) diff --git a/python/ray/tests/test_client_proxy.py b/python/ray/tests/test_client_proxy.py index f23646263964..c52106dd70b8 100644 --- a/python/ray/tests/test_client_proxy.py +++ b/python/ray/tests/test_client_proxy.py @@ -272,9 +272,9 @@ def test_prepare_runtime_init_req_no_modification(): ), ) req, new_config = proxier.prepare_runtime_init_req(init_req) - assert new_config.serialize() == job_config.serialize() + assert new_config._serialize() == job_config._serialize() assert isinstance(req, ray_client_pb2.DataRequest) - assert pickle.loads(req.init.job_config).serialize() == new_config.serialize() + assert pickle.loads(req.init.job_config)._serialize() == new_config._serialize() assert json.loads(req.init.ray_init_kwargs) == {"log_to_driver": False} @@ -301,7 +301,7 @@ def modify_namespace(job_config: JobConfig): req, new_config = proxier.prepare_runtime_init_req(init_req) assert new_config.ray_namespace == "test_value" - assert pickle.loads(req.init.job_config).serialize() == new_config.serialize() + assert pickle.loads(req.init.job_config)._serialize() == new_config._serialize() assert json.loads(req.init.ray_init_kwargs) == {"log_to_driver": False} diff --git a/python/ray/tests/test_runtime_env.py b/python/ray/tests/test_runtime_env.py index 74eb3d51fb9e..fdb05f6f79eb 100644 --- a/python/ray/tests/test_runtime_env.py +++ b/python/ray/tests/test_runtime_env.py @@ -219,7 +219,7 @@ def test_container_option_serialize(runtime_env_class): container={"image": "ray:latest", "run_options": ["--name=test"]} ) job_config = ray.job_config.JobConfig(runtime_env=runtime_env) - job_config_serialized = job_config.serialize() + job_config_serialized = job_config._serialize() # job_config_serialized is JobConfig protobuf serialized string, # job_config.runtime_env_info.serialized_runtime_env # has container_option info diff --git a/python/ray/util/client/server/proxier.py b/python/ray/util/client/server/proxier.py index 0b3128d090ba..c648104a9968 100644 --- a/python/ray/util/client/server/proxier.py +++ b/python/ray/util/client/server/proxier.py @@ -294,8 +294,8 @@ def start_specific_server(self, client_id: str, job_config: JobConfig) -> bool: f"ray_client_server_{specific_server.port}", unique=True ) - serialized_runtime_env = job_config.get_serialized_runtime_env() - runtime_env_config = job_config.get_proto_runtime_env_config() + serialized_runtime_env = job_config._get_serialized_runtime_env() + runtime_env_config = job_config._get_proto_runtime_env_config() if not serialized_runtime_env or serialized_runtime_env == "{}": # TODO(edoakes): can we just remove this case and always send it # to the agent? diff --git a/python/ray/util/client/server/server.py b/python/ray/util/client/server/server.py index e5f97251e5a0..962b8bbb1c19 100644 --- a/python/ray/util/client/server/server.py +++ b/python/ray/util/client/server/server.py @@ -118,7 +118,7 @@ def Init( ) -> ray_client_pb2.InitResponse: if request.job_config: job_config = pickle.loads(request.job_config) - job_config.client_job = True + job_config._client_job = True else: job_config = None current_job_config = None @@ -144,7 +144,7 @@ def Init( # that tests the behavior of multiple clients with the same job config # connecting to one server (test_client_init.py::test_num_clients), # so I'm leaving it here for now. - job_config = job_config.get_proto_job_config() + job_config = job_config._get_proto_job_config() # If the server has been initialized, we need to compare whether the # runtime env is compatible. if current_job_config: From c13f14c151e8a65531035929851dc0cf36d120f3 Mon Sep 17 00:00:00 2001 From: angelinalg <122562471+angelinalg@users.noreply.github.com> Date: Thu, 11 May 2023 17:20:41 -0700 Subject: [PATCH 359/424] [docs][observability] O11y refactor 1/N (#35158) --- doc/source/_toc.yml | 15 +++++++++++++++ doc/source/ray-contribute/debugging.rst | 6 ++++-- doc/source/ray-contribute/profiling.rst | 6 +++--- .../monitoring-debugging/monitoring-debugging.rst | 7 ------- doc/source/ray-observability/overview.rst | 2 ++ doc/source/ray-observability/user-guides/index.md | 9 +++++++++ .../{ => user-guides}/ray-tracing.rst | 0 .../user-guides/troubleshoot-apps/index.md | 10 ++++++++++ .../troubleshoot-apps/optimize-performance.rst} | 6 ++++-- .../troubleshoot-apps}/profiling.rst | 0 .../troubleshoot-apps}/ray-debugging.rst | 4 ++-- .../troubleshoot-apps}/troubleshoot-failures.rst | 2 ++ .../troubleshoot-apps}/troubleshoot-hangs.rst | 2 ++ 13 files changed, 53 insertions(+), 16 deletions(-) create mode 100644 doc/source/ray-observability/user-guides/index.md rename doc/source/ray-observability/{ => user-guides}/ray-tracing.rst (100%) create mode 100644 doc/source/ray-observability/user-guides/troubleshoot-apps/index.md rename doc/source/ray-observability/{monitoring-debugging/troubleshoot-performance.rst => user-guides/troubleshoot-apps/optimize-performance.rst} (97%) rename doc/source/ray-observability/{monitoring-debugging => user-guides/troubleshoot-apps}/profiling.rst (100%) rename doc/source/ray-observability/{ => user-guides/troubleshoot-apps}/ray-debugging.rst (99%) rename doc/source/ray-observability/{monitoring-debugging => user-guides/troubleshoot-apps}/troubleshoot-failures.rst (99%) rename doc/source/ray-observability/{monitoring-debugging => user-guides/troubleshoot-apps}/troubleshoot-hangs.rst (93%) diff --git a/doc/source/_toc.yml b/doc/source/_toc.yml index 55eaedf08b64..39c494d142cf 100644 --- a/doc/source/_toc.yml +++ b/doc/source/_toc.yml @@ -384,6 +384,19 @@ parts: - file: ray-observability/monitoring-debugging/monitoring-debugging title: "Monitoring and Debugging" + sections: + - file: ray-observability/user-guides/index + title: User Guides + sections: + - file: ray-observability/user-guides/troubleshoot-apps/index + title: Troubleshooting Applications + sections: + - file: ray-observability/user-guides/troubleshoot-apps/troubleshoot-failures + - file: ray-observability/user-guides/troubleshoot-apps/troubleshoot-hangs + - file: ray-observability/user-guides/troubleshoot-apps/optimize-performance + - file: ray-observability/user-guides/troubleshoot-apps/ray-debugging + - file: ray-observability/user-guides/troubleshoot-apps/ray-core-profiling + - file: ray-observability/user-guides/ray-tracing - file: ray-references/api title: References @@ -401,5 +414,7 @@ parts: - file: ray-contribute/writing-code-snippets - file: ray-contribute/fake-autoscaler - file: ray-core/examples/testing-tips + - file: ray-contribute/debugging.rst + - file: ray-contribute/profiling.rst - file: ray-core/configure - file: ray-contribute/whitepaper diff --git a/doc/source/ray-contribute/debugging.rst b/doc/source/ray-contribute/debugging.rst index b156b35e6b61..66a602e891b0 100644 --- a/doc/source/ray-contribute/debugging.rst +++ b/doc/source/ray-contribute/debugging.rst @@ -1,5 +1,7 @@ -Debugging (internal) -==================== +Debugging for Ray Developers +============================ + +This debugging guide is for contributors to the Ray project. Starting processes in a debugger -------------------------------- diff --git a/doc/source/ray-contribute/profiling.rst b/doc/source/ray-contribute/profiling.rst index 88263f9225fd..f9d74b11add3 100644 --- a/doc/source/ray-contribute/profiling.rst +++ b/doc/source/ray-contribute/profiling.rst @@ -1,9 +1,9 @@ .. _ray-core-internal-profiling: -Profiling (internal) -==================== +Profiling for Ray Developers +============================ -This document details, for Ray developers, how to analyze Ray performance. +This guide helps contributors to the Ray project analyze Ray performance. Getting a stack trace of Ray C++ processes ------------------------------------------ diff --git a/doc/source/ray-observability/monitoring-debugging/monitoring-debugging.rst b/doc/source/ray-observability/monitoring-debugging/monitoring-debugging.rst index 4dd9b646d0f7..6d882c34fe59 100644 --- a/doc/source/ray-observability/monitoring-debugging/monitoring-debugging.rst +++ b/doc/source/ray-observability/monitoring-debugging/monitoring-debugging.rst @@ -13,15 +13,8 @@ See :ref:`Getting Help ` if your problem is not s ../overview ../../ray-core/ray-dashboard ../state/state-api - ../ray-debugging ../ray-logging ../ray-metrics profiling - ../ray-tracing - troubleshoot-failures - troubleshoot-hangs - troubleshoot-performance gotchas getting-help - ../../ray-contribute/debugging.rst - ../../ray-contribute/profiling.rst diff --git a/doc/source/ray-observability/overview.rst b/doc/source/ray-observability/overview.rst index 3e073c51a43d..8919d3f29b5a 100644 --- a/doc/source/ray-observability/overview.rst +++ b/doc/source/ray-observability/overview.rst @@ -1,3 +1,5 @@ +.. _observability-overview: + Overview ======== diff --git a/doc/source/ray-observability/user-guides/index.md b/doc/source/ray-observability/user-guides/index.md new file mode 100644 index 000000000000..a8772bfe58a1 --- /dev/null +++ b/doc/source/ray-observability/user-guides/index.md @@ -0,0 +1,9 @@ +(observability-user-guides)= + +# User Guides + +These guides help you monitor and debug your Ray applications and clusters. + +The guides include: +* {ref}`observability-troubleshoot-user-guides` +* {ref}`ray-tracing` \ No newline at end of file diff --git a/doc/source/ray-observability/ray-tracing.rst b/doc/source/ray-observability/user-guides/ray-tracing.rst similarity index 100% rename from doc/source/ray-observability/ray-tracing.rst rename to doc/source/ray-observability/user-guides/ray-tracing.rst diff --git a/doc/source/ray-observability/user-guides/troubleshoot-apps/index.md b/doc/source/ray-observability/user-guides/troubleshoot-apps/index.md new file mode 100644 index 000000000000..cd6562375a40 --- /dev/null +++ b/doc/source/ray-observability/user-guides/troubleshoot-apps/index.md @@ -0,0 +1,10 @@ +(observability-troubleshoot-user-guides)= + +# Troubleshooting Applications + +These guides help you perform common debugging or optimization tasks for your distributed application on Ray: +* {ref}`observability-troubleshoot-failures` +* {ref}`observability-troubleshoot-hangs` +* {ref}`observability-optimize-performance` +* {ref}`ray-debugger` +* {ref}`ray-core-profiling` \ No newline at end of file diff --git a/doc/source/ray-observability/monitoring-debugging/troubleshoot-performance.rst b/doc/source/ray-observability/user-guides/troubleshoot-apps/optimize-performance.rst similarity index 97% rename from doc/source/ray-observability/monitoring-debugging/troubleshoot-performance.rst rename to doc/source/ray-observability/user-guides/troubleshoot-apps/optimize-performance.rst index 1127ea5d1e71..465f7b6b5c52 100644 --- a/doc/source/ray-observability/monitoring-debugging/troubleshoot-performance.rst +++ b/doc/source/ray-observability/user-guides/troubleshoot-apps/optimize-performance.rst @@ -1,5 +1,7 @@ -Troubleshooting Performance -=========================== +.. _observability-optimize-performance: + +Optimizing Performance +====================== No Speedup ---------- diff --git a/doc/source/ray-observability/monitoring-debugging/profiling.rst b/doc/source/ray-observability/user-guides/troubleshoot-apps/profiling.rst similarity index 100% rename from doc/source/ray-observability/monitoring-debugging/profiling.rst rename to doc/source/ray-observability/user-guides/troubleshoot-apps/profiling.rst diff --git a/doc/source/ray-observability/ray-debugging.rst b/doc/source/ray-observability/user-guides/troubleshoot-apps/ray-debugging.rst similarity index 99% rename from doc/source/ray-observability/ray-debugging.rst rename to doc/source/ray-observability/user-guides/troubleshoot-apps/ray-debugging.rst index 99002237c6ff..afc98df070a6 100644 --- a/doc/source/ray-observability/ray-debugging.rst +++ b/doc/source/ray-observability/user-guides/troubleshoot-apps/ray-debugging.rst @@ -1,7 +1,7 @@ .. _ray-debugger: -Ray Debugger -============= +Using the Ray Debugger +====================== Ray has a built in debugger that allows you to debug your distributed applications. It allows to set breakpoints in your Ray tasks and actors and when hitting the breakpoint you can diff --git a/doc/source/ray-observability/monitoring-debugging/troubleshoot-failures.rst b/doc/source/ray-observability/user-guides/troubleshoot-apps/troubleshoot-failures.rst similarity index 99% rename from doc/source/ray-observability/monitoring-debugging/troubleshoot-failures.rst rename to doc/source/ray-observability/user-guides/troubleshoot-apps/troubleshoot-failures.rst index 0659c4f7e435..046c3e1bb2d2 100644 --- a/doc/source/ray-observability/monitoring-debugging/troubleshoot-failures.rst +++ b/doc/source/ray-observability/user-guides/troubleshoot-apps/troubleshoot-failures.rst @@ -1,3 +1,5 @@ +.. _observability-troubleshoot-failures: + Troubleshooting Failures ======================== diff --git a/doc/source/ray-observability/monitoring-debugging/troubleshoot-hangs.rst b/doc/source/ray-observability/user-guides/troubleshoot-apps/troubleshoot-hangs.rst similarity index 93% rename from doc/source/ray-observability/monitoring-debugging/troubleshoot-hangs.rst rename to doc/source/ray-observability/user-guides/troubleshoot-apps/troubleshoot-hangs.rst index 3a4519eb7ec2..0725e8863bb1 100644 --- a/doc/source/ray-observability/monitoring-debugging/troubleshoot-hangs.rst +++ b/doc/source/ray-observability/user-guides/troubleshoot-apps/troubleshoot-hangs.rst @@ -1,3 +1,5 @@ +.. _observability-troubleshoot-hangs: + Troubleshooting Hangs ===================== From 9ee063fc4a62190b3a94dc0e29c006e14ca87d0f Mon Sep 17 00:00:00 2001 From: matthewdeng Date: Thu, 11 May 2023 23:25:53 -0700 Subject: [PATCH 360/424] [train] Fix HuggingFace -> Transformers wrapping logic 2 (#35284) Signed-off-by: Matthew Deng --- python/ray/train/huggingface/huggingface_checkpoint.py | 2 +- python/ray/train/huggingface/huggingface_predictor.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python/ray/train/huggingface/huggingface_checkpoint.py b/python/ray/train/huggingface/huggingface_checkpoint.py index fc96c521d4ea..35539c151323 100644 --- a/python/ray/train/huggingface/huggingface_checkpoint.py +++ b/python/ray/train/huggingface/huggingface_checkpoint.py @@ -14,7 +14,7 @@ class HuggingFaceCheckpoint(TransformersCheckpoint): # than __init__ def __new__(cls: type, *args, **kwargs): warnings.warn(deprecation_msg, DeprecationWarning) - return super(HuggingFaceCheckpoint, cls).__new__(cls, *args, **kwargs) + return super(HuggingFaceCheckpoint, cls).__new__(cls) __all__ = [ diff --git a/python/ray/train/huggingface/huggingface_predictor.py b/python/ray/train/huggingface/huggingface_predictor.py index 9d276f7a3649..fd90557e80f5 100644 --- a/python/ray/train/huggingface/huggingface_predictor.py +++ b/python/ray/train/huggingface/huggingface_predictor.py @@ -14,7 +14,7 @@ class HuggingFacePredictor(TransformersPredictor): # than __init__ def __new__(cls: type, *args, **kwargs): warnings.warn(deprecation_msg, DeprecationWarning) - return super(HuggingFacePredictor, cls).__new__(cls, *args, **kwargs) + return super(HuggingFacePredictor, cls).__new__(cls) __all__ = [ From 5403e277841b09c004fc140c923dfe5e1f3bed87 Mon Sep 17 00:00:00 2001 From: Sihan Wang Date: Fri, 12 May 2023 08:49:01 -0700 Subject: [PATCH 361/424] [Serve] Add route tags with custom metrics (#35246) - Add route value automatically with custom metrics - Fix some metrics tests. --- python/ray/serve/metrics.py | 33 ++++++++++- python/ray/serve/tests/test_metrics.py | 78 ++++++++++++++++++-------- 2 files changed, 86 insertions(+), 25 deletions(-) diff --git a/python/ray/serve/metrics.py b/python/ray/serve/metrics.py index 1e8c8c15d106..80b671a1c9a6 100644 --- a/python/ray/serve/metrics.py +++ b/python/ray/serve/metrics.py @@ -1,10 +1,12 @@ from ray.util import metrics -from typing import Tuple, Optional, Dict, List +from typing import Tuple, Optional, Dict, List, Union from ray.serve import context +import ray DEPLOYMENT_TAG = "deployment" REPLICA_TAG = "replica" APPLICATION_TAG = "application" +ROUTE_TAG = "route" def _add_serve_metric_tags(tag_keys: Optional[Tuple[str]] = None) -> Tuple[str]: @@ -52,6 +54,14 @@ def _add_serve_metric_default_tags(default_tags: Dict[str, str]): return default_tags +def _add_serve_context_tag_values(tag_keys: Tuple, tags: Dict[str, str]): + """Add serve context tag values to the metric tags""" + + _request_context = ray.serve.context._serve_request_context.get() + if ROUTE_TAG in tag_keys and ROUTE_TAG not in tags: + tags[ROUTE_TAG] = _request_context.route + + class Counter(metrics.Counter): def __init__( self, name: str, description: str = "", tag_keys: Optional[Tuple[str]] = None @@ -67,6 +77,13 @@ def __init__( def set_default_tags(self, default_tags: Dict[str, str]): super().set_default_tags(_add_serve_metric_default_tags(default_tags)) + def inc(self, value: Union[int, float] = 1.0, tags: Dict[str, str] = None): + """Increment the counter by the given value, add serve context + tag values to the tags + """ + _add_serve_context_tag_values(self._tag_keys, tags) + super().inc(value, tags) + class Gauge(metrics.Gauge): def __init__( @@ -83,6 +100,13 @@ def __init__( def set_default_tags(self, default_tags: Dict[str, str]): super().set_default_tags(_add_serve_metric_default_tags(default_tags)) + def set(self, value: Union[int, float], tags: Dict[str, str] = None): + """Set the gauge to the given value, add serve context + tag values to the tags + """ + _add_serve_context_tag_values(self._tag_keys, tags) + super().set(value, tags) + class Histogram(metrics.Histogram): def __init__( @@ -102,3 +126,10 @@ def __init__( def set_default_tags(self, default_tags: Dict[str, str]): super().set_default_tags(_add_serve_metric_default_tags(default_tags)) + + def observe(self, value: Union[int, float], tags: Dict[str, str] = None): + """Observe the given value, add serve context + tag values to the tags + """ + _add_serve_context_tag_values(self._tag_keys, tags) + super().observe(value, tags) diff --git a/python/ray/serve/tests/test_metrics.py b/python/ray/serve/tests/test_metrics.py index 794fae8aff58..f9e1ecf6b91c 100644 --- a/python/ray/serve/tests/test_metrics.py +++ b/python/ray/serve/tests/test_metrics.py @@ -406,6 +406,10 @@ def _generate_metrics_summary(self, metrics): ] return metrics_summary_route, metrics_summary_app + def verify_metrics(self, metric, expected_output): + for key in expected_output: + assert metric[key] == expected_output[key] + def test_request_context_pass_for_http_proxy(self, serve_start_shutdown): """Test HTTP proxy passing request context""" @@ -475,7 +479,7 @@ def h(): "serve_deployment_processing_latency_ms_sum", ]: metrics_route, metrics_app_name = self._generate_metrics_summary( - get_metric_dictionaries("serve_handle_request_counter") + get_metric_dictionaries(metric_name) ) assert metrics_route["app1_f"] == {"/app1"} assert metrics_route["app2_g"] == {"/app2"} @@ -552,6 +556,7 @@ def __init__(self): tag_keys=( "my_static_tag", "my_runtime_tag", + "route", ), ) self.counter.set_default_tags({"my_static_tag": "static_value"}) @@ -562,6 +567,7 @@ def __init__(self): tag_keys=( "my_static_tag", "my_runtime_tag", + "route", ), ) self.histogram.set_default_tags({"my_static_tag": "static_value"}) @@ -571,6 +577,7 @@ def __init__(self): tag_keys=( "my_static_tag", "my_runtime_tag", + "route", ), ) self.gauge.set_default_tags({"my_static_tag": "static_value"}) @@ -591,29 +598,42 @@ def __call__(self): lambda: len(get_metric_dictionaries("my_gauge")) == 1, timeout=20, ) + counter_metrics = get_metric_dictionaries("my_counter") assert len(counter_metrics) == 1 - counter_metrics[0]["my_static_tag"] == "static_value" - counter_metrics[0]["my_runtime_tag"] == "100" - counter_metrics[0]["replica"] == replica_tag - counter_metrics[0]["deployment"] == deployment_name - counter_metrics[0]["application"] == "app" - + expected_metrics = { + "my_static_tag": "static_value", + "my_runtime_tag": "100", + "replica": replica_tag, + "deployment": deployment_name, + "application": "app", + "route": "/app", + } + self.verify_metrics(counter_metrics[0], expected_metrics) + + expected_metrics = { + "my_static_tag": "static_value", + "my_runtime_tag": "300", + "replica": replica_tag, + "deployment": deployment_name, + "application": "app", + "route": "/app", + } gauge_metrics = get_metric_dictionaries("my_gauge") assert len(counter_metrics) == 1 - gauge_metrics[0]["my_static_tag"] == "static_value" - gauge_metrics[0]["my_runtime_tag"] == "300" - gauge_metrics[0]["replica"] == replica_tag - gauge_metrics[0]["deployment"] == deployment_name - gauge_metrics[0]["application"] == "app" - + self.verify_metrics(gauge_metrics[0], expected_metrics) + + expected_metrics = { + "my_static_tag": "static_value", + "my_runtime_tag": "200", + "replica": replica_tag, + "deployment": deployment_name, + "application": "app", + "route": "/app", + } histogram_metrics = get_metric_dictionaries("my_histogram_sum") assert len(histogram_metrics) == 1 - histogram_metrics[0]["my_static_tag"] == "static_value" - histogram_metrics[0]["my_runtime_tag"] == "200" - histogram_metrics[0]["replica"] == replica_tag - histogram_metrics[0]["deployment"] == deployment_name - gauge_metrics[0]["application"] == "app" + self.verify_metrics(histogram_metrics[0], expected_metrics) @pytest.mark.parametrize("use_actor", [False, True]) def test_serve_metrics_outside_serve(self, use_actor, serve_start_shutdown): @@ -714,20 +734,30 @@ async def __call__(self): lambda: len(get_metric_dictionaries("my_gauge")) == 1, timeout=20, ) + counter_metrics = get_metric_dictionaries("my_counter") assert len(counter_metrics) == 1 - counter_metrics[0]["my_static_tag"] == "static_value" - counter_metrics[0]["my_runtime_tag"] == "100" + expected_metrics = { + "my_static_tag": "static_value", + "my_runtime_tag": "100", + } + self.verify_metrics(counter_metrics[0], expected_metrics) gauge_metrics = get_metric_dictionaries("my_gauge") assert len(counter_metrics) == 1 - gauge_metrics[0]["my_static_tag"] == "static_value" - gauge_metrics[0]["my_runtime_tag"] == "300" + expected_metrics = { + "my_static_tag": "static_value", + "my_runtime_tag": "300", + } + self.verify_metrics(gauge_metrics[0], expected_metrics) histogram_metrics = get_metric_dictionaries("my_histogram_sum") assert len(histogram_metrics) == 1 - histogram_metrics[0]["my_static_tag"] == "static_value" - histogram_metrics[0]["my_runtime_tag"] == "200" + expected_metrics = { + "my_static_tag": "static_value", + "my_runtime_tag": "200", + } + self.verify_metrics(histogram_metrics[0], expected_metrics) def test_actor_summary(serve_instance): From e0f6b41a84d2b4eb298142cd915d50d3bbe57a7a Mon Sep 17 00:00:00 2001 From: Sihan Wang Date: Fri, 12 May 2023 08:49:45 -0700 Subject: [PATCH 362/424] [Serve] Add more bucket size (#35242) Increase the bucket size. - Still providing current granularity with 0 - 100 ms latency. - Providing more buckets for 100ms - 1000ms latency precision out of the box. - Increase the bucket range to handle heavier use case. **note: we are going to increase 2x stats points for the latency, it should be trivial comparing with host network bandwidth.** --- python/ray/serve/_private/constants.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/python/ray/serve/_private/constants.py b/python/ray/serve/_private/constants.py index ea81d65e61ac..ce7a4cfa78e0 100644 --- a/python/ray/serve/_private/constants.py +++ b/python/ray/serve/_private/constants.py @@ -62,10 +62,23 @@ 50, 100, 200, + 300, + 400, 500, 1000, 2000, + # 5 seconds 5000, + # 10 seconds + 10000, + # 60 seconds + 60000, + # 2min + 120000, + # 5 min + 300000, + # 10 min + 600000, ] #: Name of deployment health check method implemented by user. From 53fd7eaecdbe642fadc3cefecbb079e864f43678 Mon Sep 17 00:00:00 2001 From: Archit Kulkarni Date: Fri, 12 May 2023 09:44:42 -0700 Subject: [PATCH 363/424] [release test] [Cluster launcher] Add gcp minimal and full cluster launcher release test (#34878) Adds a nightly release test for the example-minimal.yaml and example-full files in the cluster launcher docs for GCP. Adds optional no-config-cache argument to test script for debugging purposes --- python/ray/autoscaler/gcp/BUILD | 6 + .../ray/autoscaler/gcp/example-minimal.yaml | 17 +-- .../ray/autoscaler/gcp/tests/gce_config.yaml | 11 ++ .../gcp/tests/single_node_32_cpu_gce.yaml | 27 ++++ ...luster.py => launch_and_verify_cluster.py} | 135 +++++++++++++----- release/BUILD | 1 + release/release_tests.yaml | 58 ++++++-- 7 files changed, 195 insertions(+), 60 deletions(-) create mode 100644 python/ray/autoscaler/gcp/tests/gce_config.yaml create mode 100644 python/ray/autoscaler/gcp/tests/single_node_32_cpu_gce.yaml rename python/ray/autoscaler/{aws/tests/aws_launch_and_verify_cluster.py => launch_and_verify_cluster.py} (50%) diff --git a/python/ray/autoscaler/gcp/BUILD b/python/ray/autoscaler/gcp/BUILD index 4e733cfe26be..c587b2d2fc80 100644 --- a/python/ray/autoscaler/gcp/BUILD +++ b/python/ray/autoscaler/gcp/BUILD @@ -3,3 +3,9 @@ filegroup( data = glob(["example-*.yaml"]), visibility = ["//python/ray/tests:__pkg__"], ) + +filegroup( + name = "test_configs", + data = glob(["tests/*.yaml"]), + visibility = ["//release:__pkg__"], +) \ No newline at end of file diff --git a/python/ray/autoscaler/gcp/example-minimal.yaml b/python/ray/autoscaler/gcp/example-minimal.yaml index c8914d7800e8..365d5df80e5c 100644 --- a/python/ray/autoscaler/gcp/example-minimal.yaml +++ b/python/ray/autoscaler/gcp/example-minimal.yaml @@ -1,13 +1,8 @@ -# A unique identifier for the head node and workers of this cluster. +auth: + ssh_user: ubuntu cluster_name: minimal - -# Cloud-provider specific configuration. provider: - type: gcp - region: us-west1 - availability_zone: us-west1-a - project_id: null # Globally unique project id - -# How Ray will authenticate with newly launched nodes. -auth: - ssh_user: ubuntu + availability_zone: us-west1-a + project_id: null # TODO: set your GCP project ID here + region: us-west1 + type: gcp diff --git a/python/ray/autoscaler/gcp/tests/gce_config.yaml b/python/ray/autoscaler/gcp/tests/gce_config.yaml new file mode 100644 index 000000000000..028cc7ce2a32 --- /dev/null +++ b/python/ray/autoscaler/gcp/tests/gce_config.yaml @@ -0,0 +1,11 @@ +base_image: {{ env["RAY_IMAGE_NIGHTLY_CPU"] | default("anyscale/ray:nightly-py37") }} +debian_packages: [] + +python: + pip_packages: [] + conda_packages: [] + +post_build_cmds: + - pip3 uninstall -y ray && pip3 install -U {{ env["RAY_WHEELS"] | default("ray") }} + - pip3 install -U ray[default] + - {{ env["RAY_WHEELS_SANITY_CHECK"] | default("echo No Ray wheels sanity check") }} diff --git a/python/ray/autoscaler/gcp/tests/single_node_32_cpu_gce.yaml b/python/ray/autoscaler/gcp/tests/single_node_32_cpu_gce.yaml new file mode 100644 index 000000000000..c6d1a6729fa0 --- /dev/null +++ b/python/ray/autoscaler/gcp/tests/single_node_32_cpu_gce.yaml @@ -0,0 +1,27 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west1 +allowed_azs: + - us-west1-c + +max_workers: 0 + +head_node_type: + name: head_node + instance_type: n2-standard-32 # m5.8xlarge + +worker_node_types: [] + +gcp_advanced_configurations_json: + instance_properties: + disks: + - boot: true + auto_delete: true + initialize_params: + disk_size_gb: 500 + +#aws: +# BlockDeviceMappings: +# - DeviceName: /dev/sda1 +# Ebs: +# DeleteOnTermination: true +# VolumeSize: 500 diff --git a/python/ray/autoscaler/aws/tests/aws_launch_and_verify_cluster.py b/python/ray/autoscaler/launch_and_verify_cluster.py similarity index 50% rename from python/ray/autoscaler/aws/tests/aws_launch_and_verify_cluster.py rename to python/ray/autoscaler/launch_and_verify_cluster.py index 67fb63add3a6..6e130d47c60a 100644 --- a/python/ray/autoscaler/aws/tests/aws_launch_and_verify_cluster.py +++ b/python/ray/autoscaler/launch_and_verify_cluster.py @@ -1,45 +1,56 @@ """ This script automates the process of launching and verifying a Ray cluster using a given cluster configuration file. It also handles cluster cleanup before and after the -verification process. The script requires two command-line arguments: the path to the -cluster configuration file and an optional number of retries for the verification step. +verification process. The script requires one command-line argument: the path to the +cluster configuration file. Usage: - python aws_launch_and_verify_cluster.py [retries] + python launch_and_verify_cluster.py [--no-config-cache] [--retries NUM_RETRIES] + Example: - python aws_launch_and_verify_cluster.py /path/to/cluster_config.yaml 5 + python launch_and_verify_cluster.py --retries 5 --no-config-cache + /path/to/cluster_config.yaml """ +import argparse import os import subprocess import sys +import tempfile import time from pathlib import Path import boto3 +import yaml -def check_arguments(args): +def check_arguments(): """ - Check command line arguments and return the cluster configuration file path and the - number of retries. - - Args: - args: The list of command line arguments. + Check command line arguments and return the cluster configuration file path, the + number of retries, and the value of the --no-config-cache flag. Returns: - A tuple containing the cluster config file path and the number of retries. - - Raises: - SystemExit: If an incorrect number of command line arguments is provided. + A tuple containing the cluster config file path, the number of retries, and the + value of the --no-config-cache flag. """ - if len(args) < 2: - print( - "Error: Please provide a path to the cluster configuration file as a " - "command line argument." - ) - sys.exit(1) - return args[1], int(args[2]) if len(args) >= 3 else 3 + parser = argparse.ArgumentParser(description="Launch and verify a Ray cluster") + parser.add_argument( + "--no-config-cache", + action="store_true", + help="Pass the --no-config-cache flag to Ray CLI commands", + ) + parser.add_argument( + "--retries", + type=int, + default=3, + help="Number of retries for verifying Ray is running (default: 3)", + ) + parser.add_argument( + "cluster_config", type=str, help="Path to the cluster configuration file" + ) + args = parser.parse_args() + + return args.cluster_config, args.retries, args.no_config_cache def check_file(file_path): @@ -90,7 +101,7 @@ def cleanup_cluster(cluster_config): subprocess.run(["ray", "down", "-v", "-y", str(cluster_config)], check=True) -def run_ray_commands(cluster_config, retries): +def run_ray_commands(cluster_config, retries, no_config_cache): """ Run the necessary Ray commands to start a cluster, verify Ray is running, and clean up the cluster. @@ -98,13 +109,19 @@ def run_ray_commands(cluster_config, retries): Args: cluster_config: The path of the cluster configuration file. retries: The number of retries for the verification step. + no_config_cache: Whether to pass the --no-config-cache flag to the ray CLI + commands. """ print("======================================") cleanup_cluster(cluster_config) print("======================================") print("Starting new cluster...") - subprocess.run(["ray", "up", "-v", "-y", str(cluster_config)], check=True) + cmd = ["ray", "up", "-v", "-y"] + if no_config_cache: + cmd.append("--no-config-cache") + cmd.append(str(cluster_config)) + subprocess.run(cmd, check=True) print("======================================") print("Verifying Ray is running...") @@ -113,16 +130,16 @@ def run_ray_commands(cluster_config, retries): count = 0 while count < retries: try: - subprocess.run( - [ - "ray", - "exec", - "-v", - str(cluster_config), - "python -c 'import ray; ray.init(\"localhost:6379\")'", - ], - check=True, - ) + cmd = [ + "ray", + "exec", + "-v", + str(cluster_config), + "python -c 'import ray; ray.init(\"localhost:6379\")'", + ] + if no_config_cache: + cmd.append("--no-config-cache") + subprocess.run(cmd, check=True) success = True break except subprocess.CalledProcessError: @@ -147,16 +164,60 @@ def run_ray_commands(cluster_config, retries): cleanup_cluster(cluster_config) print("======================================") - print("Finished executing script.") + print("Finished executing script successfully.") if __name__ == "__main__": - cluster_config, retries = check_arguments(sys.argv) + cluster_config, retries, no_config_cache = check_arguments() cluster_config = Path(cluster_config) check_file(cluster_config) print(f"Using cluster configuration file: {cluster_config}") print(f"Number of retries for 'verify ray is running' step: {retries}") + print(f"Using --no-config-cache flag: {no_config_cache}") + + config_yaml = yaml.safe_load(cluster_config.read_text()) + provider_type = config_yaml.get("provider", {}).get("type") + if provider_type == "aws": + download_ssh_key() + run_ray_commands(cluster_config, retries, no_config_cache) + elif provider_type == "gcp": + print("======================================") + print("GCP provider detected. Skipping ssh key download step.") + # Get the active account email + account_email = ( + subprocess.run( + ["gcloud", "config", "get-value", "account"], + stdout=subprocess.PIPE, + check=True, + ) + .stdout.decode("utf-8") + .strip() + ) + print("Active account email:", account_email) + # Get the current project ID + project_id = ( + subprocess.run( + ["gcloud", "config", "get-value", "project"], + stdout=subprocess.PIPE, + check=True, + ) + .stdout.decode("utf-8") + .strip() + ) + print( + f"Injecting GCP project '{project_id}' into cluster configuration file..." + ) + config_yaml["provider"]["project_id"] = project_id - download_ssh_key() - run_ray_commands(cluster_config, retries) + # Create a new temporary file and dump the updated configuration into it + with tempfile.NamedTemporaryFile(suffix=".yaml") as temp: + temp.write(yaml.dump(config_yaml).encode("utf-8")) + temp.flush() + cluster_config = Path(temp.name) + run_ray_commands(cluster_config, retries, no_config_cache) + + else: + print("======================================") + print("Provider type not recognized. Exiting script.") + sys.exit(1) diff --git a/release/BUILD b/release/BUILD index 6ddd98a94fac..9caa3ae5879c 100644 --- a/release/BUILD +++ b/release/BUILD @@ -522,6 +522,7 @@ py_test( exclude = ["ray_release/**/*.yaml"], ) + [ "//python/ray/autoscaler/aws:test_configs", + "//python/ray/autoscaler/gcp:test_configs", ], exec_compatible_with = [":hermetic_python"], tags = [ diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 5d5720b0ada7..864d874a3644 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -6038,48 +6038,82 @@ - name: aws_cluster_launcher group: cluster-launcher-test - working_dir: ../python/ray/autoscaler/aws/ + working_dir: ../python/ray/autoscaler/ stable: true frequency: nightly team: core cluster: - cluster_env: tests/aws_config.yaml - cluster_compute: tests/aws_compute.yaml + cluster_env: aws/tests/aws_config.yaml + cluster_compute: aws/tests/aws_compute.yaml run: timeout: 1200 - script: cd tests && python aws_launch_and_verify_cluster.py aws_cluster.yaml + script: python launch_and_verify_cluster.py aws/tests/aws_cluster.yaml - name: aws_cluster_launcher_minimal group: cluster-launcher-test - working_dir: ../python/ray/autoscaler/aws/ + working_dir: ../python/ray/autoscaler/ stable: true frequency: nightly team: core cluster: - cluster_env: tests/aws_config.yaml - cluster_compute: tests/aws_compute.yaml + cluster_env: aws/tests/aws_config.yaml + cluster_compute: aws/tests/aws_compute.yaml run: timeout: 1200 - script: cd tests && python aws_launch_and_verify_cluster.py ../example-minimal.yaml + script: python launch_and_verify_cluster.py aws/example-minimal.yaml - name: aws_cluster_launcher_full group: cluster-launcher-test - working_dir: ../python/ray/autoscaler/aws/ + working_dir: ../python/ray/autoscaler/ stable: true frequency: nightly team: core cluster: - cluster_env: tests/aws_config.yaml - cluster_compute: tests/aws_compute.yaml + cluster_env: aws/tests/aws_config.yaml + cluster_compute: aws/tests/aws_compute.yaml run: timeout: 1200 - script: cd tests && python aws_launch_and_verify_cluster.py ../example-full.yaml + script: python launch_and_verify_cluster.py aws/example-full.yaml + +- name: gcp_cluster_launcher_minimal + group: cluster-launcher-test + working_dir: ../python/ray/autoscaler/ + + stable: true + + env: gce + frequency: nightly + team: core + cluster: + cluster_env: gcp/tests/gce_config.yaml + cluster_compute: gcp/tests/single_node_32_cpu_gce.yaml + + run: + timeout: 1200 + script: python launch_and_verify_cluster.py gcp/example-minimal.yaml + +- name: gcp_cluster_launcher_full + group: cluster-launcher-test + working_dir: ../python/ray/autoscaler/ + + stable: true + + env: gce + frequency: nightly + team: core + cluster: + cluster_env: gcp/tests/gce_config.yaml + cluster_compute: gcp/tests/single_node_32_cpu_gce.yaml + + run: + timeout: 2400 + script: python launch_and_verify_cluster.py gcp/example-full.yaml \ No newline at end of file From bac199ec25bb46f4157ac76e9d1bbf6c503d4951 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Fri, 12 May 2023 10:39:47 -0700 Subject: [PATCH 364/424] [docs] [data] Update use case doc links and resources (#35277) --- doc/source/data/data.rst | 8 ++++++++ doc/source/ray-overview/use-cases.rst | 20 ++++++++++++++------ 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/doc/source/data/data.rst b/doc/source/data/data.rst index cae879be7d3e..a9d785755a4f 100644 --- a/doc/source/data/data.rst +++ b/doc/source/data/data.rst @@ -203,6 +203,14 @@ request on the `Ray GitHub repo `__, and che our :ref:`guide for implementing a custom datasource ` if you're interested in rolling your own integration! +---------- +Learn More +---------- + +- `[Blog] Streaming distributed execution across CPUs and GPUs `__ +- `[Blog] Offline Batch Inference: Comparing Ray, Apache Spark, and SageMaker `__ +- `[Blog] Using Ray Data to parallelize LangChain inference `__ + ---------- Contribute ---------- diff --git a/doc/source/ray-overview/use-cases.rst b/doc/source/ray-overview/use-cases.rst index 06e3d292b4b1..fb581fb65ef8 100644 --- a/doc/source/ray-overview/use-cases.rst +++ b/doc/source/ray-overview/use-cases.rst @@ -118,25 +118,33 @@ To learn more about running batch inference with Ray, see the :ref:`batch infere :img-top: /images/ray_logo.png :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - .. button-link:: https://github.com/ray-project/ray-educational-materials/blob/main/Computer_vision_workloads/Semantic_segmentation/Scaling_batch_inference.ipynb + .. button-link:: https://www.anyscale.com/blog/offline-batch-inference-comparing-ray-apache-spark-and-sagemaker - [Tutorial] Architectures for Scalable Batch Inference with Ray + [Blog] Offline Batch Inference: Comparing Ray, Apache Spark, and SageMaker .. grid-item-card:: :img-top: /images/ray_logo.png :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - .. button-link:: https://www.anyscale.com/blog/model-batch-inference-in-ray-actors-actorpool-and-datasets + .. button-link:: https://www.anyscale.com/blog/streaming-distributed-execution-across-cpus-and-gpus - [Blog] Batch Inference in Ray: Actors, ActorPool, and Datasets + [Blog] Streaming distributed execution across CPUs and GPUs .. grid-item-card:: :img-top: /images/ray_logo.png :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img - .. button-ref:: /ray-core/examples/batch_prediction + .. button-link:: https://www.anyscale.com/blog/turbocharge-langchain-now-guide-to-20x-faster-embedding - [Example] Batch Prediction using Ray Core + [Blog] Using Ray Data to parallelize LangChain inference + + .. grid-item-card:: + :img-top: /images/ray_logo.png + :class-img-top: pt-2 w-75 d-block mx-auto fixed-height-img + + .. button-ref:: /data/batch_inference + + [Guide] Batch Prediction using Ray Data .. grid-item-card:: :img-top: /images/ray_logo.png From f394b8c7fdaa43fc156cc805648b0ae36718de89 Mon Sep 17 00:00:00 2001 From: Cade Daniel Date: Fri, 12 May 2023 11:02:50 -0700 Subject: [PATCH 365/424] [Release test] Disabling empty-runtime-env tests in benchmark_worker_startup.aws #35232 This test uses some hackery to get a "default" (empty) runtime environment on Anyscale. This allows us to measure startup performance for non-Anyscale environments. We validate that the numbers are correct by asserting empty runtime env for these measurements. Since our infra team recently added cgroup to the default runtime env, the assertion now fails. We can fix this but not a priority right now -- this PR disables the empty-runtime-env tests in this release test, so we still have metrics for the normal path. Closes #35183 --- release/benchmark-worker-startup/benchmark_worker_startup.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/release/benchmark-worker-startup/benchmark_worker_startup.py b/release/benchmark-worker-startup/benchmark_worker_startup.py index 8c0c1d2119ef..67ae1e3c05f3 100755 --- a/release/benchmark-worker-startup/benchmark_worker_startup.py +++ b/release/benchmark-worker-startup/benchmark_worker_startup.py @@ -172,7 +172,10 @@ def generate_test_matrix( for with_tasks in [True, False]: for with_gpu in [True, False]: - for with_runtime_env in [True, False]: + # Do not run without runtime env. TODO(cade) Infra team added cgroups to + # default runtime env, need to find some way around that if we want + # "pure" (non-runtime-env) measurements. + for with_runtime_env in [True]: for import_to_try in imports_to_try: for num_jobs in num_jobs_per_type.values(): From 672f45e2efb7c254f162c2a13480feb7063e8f60 Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Fri, 12 May 2023 20:16:36 +0200 Subject: [PATCH 366/424] [docs] nav fixes #34583 (#35296) Signed-off-by: Max Pumperla --- doc/source/_static/js/custom.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/_static/js/custom.js b/doc/source/_static/js/custom.js index 727459e9fea8..2ade6e53c97d 100644 --- a/doc/source/_static/js/custom.js +++ b/doc/source/_static/js/custom.js @@ -35,7 +35,7 @@ document.addEventListener("DOMContentLoaded", function() { for (let i = 0; i < navItems.length; i++) { let navItem = navItems[i]; const stringList = [ - "User Guide", "Examples", + "User Guides", "Examples", "Ray Core", "Ray Core API", "Ray Clusters", "Deploying on Kubernetes", "Deploying on VMs", "Applications Guide", "Ray Cluster Management API", From 68731cbb4861ce15d906e389a88c1324bd3580ac Mon Sep 17 00:00:00 2001 From: Jun Gong Date: Fri, 12 May 2023 12:47:33 -0700 Subject: [PATCH 367/424] [AIR] Distributed checkpointing (#34709) Signed-off-by: Jun Gong --- .../ray/air/_internal/checkpoint_manager.py | 59 +++++-- python/ray/air/_internal/util.py | 25 +++ python/ray/air/checkpoint.py | 14 +- python/ray/air/config.py | 12 +- .../ray/air/tests/test_checkpoint_manager.py | 12 +- python/ray/air/tests/test_checkpoints.py | 29 ++++ .../ray/train/_internal/backend_executor.py | 28 +++ python/ray/train/_internal/checkpoint.py | 39 ++++- python/ray/train/_internal/session.py | 105 +++++++++++- python/ray/train/constants.py | 7 + python/ray/train/data_parallel_trainer.py | 19 +- python/ray/train/trainer.py | 60 ++++++- python/ray/tune/impl/tuner_internal.py | 6 + python/ray/tune/tests/test_syncer.py | 162 +++++++++++++++++- python/ray/tune/trainable/util.py | 12 +- python/ray/tune/tune.py | 8 + 16 files changed, 537 insertions(+), 60 deletions(-) diff --git a/python/ray/air/_internal/checkpoint_manager.py b/python/ray/air/_internal/checkpoint_manager.py index 096326f57bce..ee1bdd973f1f 100644 --- a/python/ray/air/_internal/checkpoint_manager.py +++ b/python/ray/air/_internal/checkpoint_manager.py @@ -55,6 +55,7 @@ class _TrackedCheckpoint: into `"evaluation/episode_reward_mean"`. node_ip: IP of the node where the checkpoint was generated. Defaults to the current node. + rank: Rank of the node where the checkpoint was generated. Defaults to 0. """ def __init__( @@ -64,12 +65,14 @@ def __init__( checkpoint_id: Optional[int] = None, metrics: Optional[Dict] = None, node_ip: Optional[str] = None, + rank: Optional[int] = 0, ): from ray.tune.result import NODE_IP self.dir_or_data = dir_or_data self.id = checkpoint_id self.storage_mode = storage_mode + self.rank = rank self.metrics = flatten_dict(metrics) if metrics else {} self.node_ip = node_ip or self.metrics.get(NODE_IP, None) @@ -296,7 +299,7 @@ def __init__( # always available). self._checkpoints_to_clean_up = set() - self._delete_fn = delete_fn + self.set_delete_fn(delete_fn) def set_delete_fn( self, delete_fn: Optional[Callable[["_TrackedCheckpoint"], None]] @@ -309,7 +312,10 @@ def set_delete_fn( """ self._delete_fn = delete_fn - def register_checkpoint(self, checkpoint: _TrackedCheckpoint): + def register_checkpoints( + self, + checkpoints: Union[_TrackedCheckpoint, List[_TrackedCheckpoint]], + ): """Register new checkpoint and add to bookkeeping. This method will register a new checkpoint and add it to the internal @@ -318,23 +324,27 @@ def register_checkpoint(self, checkpoint: _TrackedCheckpoint): checkpoints should be deleted. Args: - checkpoint: Tracked checkpoint object to add to bookkeeping. + checkpoints: Tracked checkpoint object to add to bookkeeping. """ - checkpoint.id = checkpoint.id or self._latest_checkpoint_id + if not isinstance(checkpoints, list): + checkpoints = [checkpoints] - if checkpoint.storage_mode == CheckpointStorage.MEMORY: - self._replace_latest_memory_checkpoint(checkpoint) + for checkpoint in checkpoints: + checkpoint.id = checkpoint.id or self._latest_checkpoint_id - if self._persist_memory_checkpoints: - persisted_checkpoint = copy.copy(checkpoint) - persisted_checkpoint.storage_mode = CheckpointStorage.PERSISTENT + if checkpoint.storage_mode == CheckpointStorage.MEMORY: + self._replace_latest_memory_checkpoint(checkpoint) + + if self._persist_memory_checkpoints: + persisted_checkpoint = copy.copy(checkpoint) + persisted_checkpoint.storage_mode = CheckpointStorage.PERSISTENT + else: + persisted_checkpoint = None else: - persisted_checkpoint = None - else: - persisted_checkpoint = checkpoint + persisted_checkpoint = checkpoint - if persisted_checkpoint and self._checkpoint_strategy.num_to_keep != 0: - self._process_persistent_checkpoint(persisted_checkpoint) + if persisted_checkpoint and self._checkpoint_strategy.num_to_keep != 0: + self._process_persistent_checkpoint(persisted_checkpoint) self._latest_checkpoint_id += 1 @@ -405,8 +415,20 @@ def _get_checkpoint_score( checkpoint.id, ) - def _process_persistent_checkpoint(self, checkpoint: _TrackedCheckpoint): + def _process_persistent_checkpoint( + self, + checkpoint: _TrackedCheckpoint, + next_checkpoint_path: Optional[str] = None, + ): + # Note(jungong) : Track rank0 checkpoint as the best / worst checkpoint. + # That is because we only care about the data for checkpoints + # from non-rank0 workers. They do not represent a different Trial + # checkpoint as the rank0 one. + if checkpoint.rank > 0: + return + assert checkpoint.storage_mode == CheckpointStorage.PERSISTENT + next_checkpoint_path = next_checkpoint_path or self._get_next_checkpoint_path() checkpoint_score = self._get_checkpoint_score(checkpoint) wrapped_checkpoint = _HeapCheckpointWrapper( @@ -414,20 +436,19 @@ def _process_persistent_checkpoint(self, checkpoint: _TrackedCheckpoint): ) if self._checkpoint_strategy.num_to_keep is None: - # Keep all checkpoints - checkpoint.commit(path=self._get_next_checkpoint_path()) + checkpoint.commit(path=next_checkpoint_path) self._replace_latest_persisted_checkpoint(checkpoint) self._top_persisted_checkpoints.append(wrapped_checkpoint) elif ( len(self._top_persisted_checkpoints) < self._checkpoint_strategy.num_to_keep ): + checkpoint.commit(path=next_checkpoint_path) # Heap is not full yet, so keep this checkpoint - checkpoint.commit(path=self._get_next_checkpoint_path()) heapq.heappush(self._top_persisted_checkpoints, wrapped_checkpoint) self._replace_latest_persisted_checkpoint(checkpoint) elif wrapped_checkpoint.priority >= self._top_persisted_checkpoints[0].priority: + checkpoint.commit(path=next_checkpoint_path) # Priority is higher than current worst checkpoint, so replace worst - checkpoint.commit(path=self._get_next_checkpoint_path()) worst_checkpoint = heapq.heappushpop( self._top_persisted_checkpoints, wrapped_checkpoint ).tracked_checkpoint diff --git a/python/ray/air/_internal/util.py b/python/ray/air/_internal/util.py index c1c93d8a1c23..a4eba452e6de 100644 --- a/python/ray/air/_internal/util.py +++ b/python/ray/air/_internal/util.py @@ -3,10 +3,12 @@ from contextlib import closing import logging import queue +import shutil import threading from typing import Optional import numpy as np +from pathlib import Path import ray from ray.air.constants import _ERROR_REPORT_TIMEOUT @@ -119,3 +121,26 @@ def join(self, timeout=None): def _estimate_avail_object_store_memory() -> int: """Estimates total object store memory available in the cluster.""" return ray.available_resources()["object_store_memory"] + + +def _copy_dir_ignore_conflicts(src_dir: Path, dst_dir: Path): + """This is a workaround for python < 3.8 where shutil.copytree does not + support dirs_exist_ok=True. + + We will go through the content of the folder and manually copy ites, + while ignoring files that conflict. + + TODO(jungong): remove this workaround when we drop support for python < 3.8. + """ + for inner in src_dir.iterdir(): + dest = dst_dir / inner.name + if inner.is_dir(): + if not dest.exists(): + dest.mkdir(parents=True) + _copy_dir_ignore_conflicts(inner, dest) + else: + if not dest.exists(): + shutil.copy2(str(inner.absolute()), str(dest.absolute())) + else: + # Ignore and don't overwrite the existing file. + pass diff --git a/python/ray/air/checkpoint.py b/python/ray/air/checkpoint.py index 9bcc0860fb02..e5f0905e424d 100644 --- a/python/ray/air/checkpoint.py +++ b/python/ray/air/checkpoint.py @@ -26,6 +26,7 @@ read_file_from_uri, upload_to_uri, ) +from ray.air._internal.util import _copy_dir_ignore_conflicts from ray.air.constants import PREPROCESSOR_KEY, CHECKPOINT_ID_ATTR from ray.util.annotations import DeveloperAPI, PublicAPI @@ -559,21 +560,22 @@ def _to_directory(self, path: str, move_instead_of_copy: bool = False) -> None: if local_path: local_path_pathlib = Path(local_path).resolve() if local_path_pathlib != path_pathlib: - if path_pathlib.exists(): - shutil.rmtree(str(path_pathlib.absolute())) # If this exists on the local path, just copy over if move_instead_of_copy: os.makedirs(str(path_pathlib.absolute()), exist_ok=True) self._local_path = str(path_pathlib.absolute()) for inner in local_path_pathlib.iterdir(): + dest = path_pathlib / inner.name + if dest.exists(): + # Ignore files that already exist. + # For example, checkpoints from every rank may all have + # a same .is_checkpoint file. + continue shutil.move( str(inner.absolute()), str(path_pathlib.absolute()) ) else: - shutil.copytree( - str(local_path_pathlib.absolute()), - str(path_pathlib.absolute()), - ) + _copy_dir_ignore_conflicts(local_path_pathlib, path_pathlib) elif external_path: # If this exists on external storage (e.g. cloud), download download_from_uri(uri=external_path, local_path=path, filelock=False) diff --git a/python/ray/air/config.py b/python/ray/air/config.py index c869b90c76d7..f8c262021f43 100644 --- a/python/ray/air/config.py +++ b/python/ray/air/config.py @@ -601,7 +601,15 @@ class CheckpointConfig: This attribute is only supported by trainers that don't take in custom training loops. Defaults to True for trainers that support it and False for generic function trainables. - + _checkpoint_keep_all_ranks: If True, will save checkpoints from all ranked + training workers. If False, only checkpoint from rank 0 worker is kept. + NOTE: This API is experimental and subject to change between minor + releases. + _checkpoint_upload_from_workers: If True, distributed workers + will upload their checkpoints to cloud directly. This is to avoid the + need for transferring large checkpoint files to the training worker + group coordinator for persistence. NOTE: This API is experimental and + subject to change between minor releases. """ num_to_keep: Optional[int] = None @@ -609,6 +617,8 @@ class CheckpointConfig: checkpoint_score_order: str = MAX checkpoint_frequency: int = 0 checkpoint_at_end: Optional[bool] = None + _checkpoint_keep_all_ranks: bool = False + _checkpoint_upload_from_workers: bool = False def __post_init__(self): if self.num_to_keep is not None and self.num_to_keep <= 0: diff --git a/python/ray/air/tests/test_checkpoint_manager.py b/python/ray/air/tests/test_checkpoint_manager.py index fa0e20b89473..eb50aa04fb74 100644 --- a/python/ray/air/tests/test_checkpoint_manager.py +++ b/python/ray/air/tests/test_checkpoint_manager.py @@ -11,7 +11,7 @@ def test_unlimited_persistent_checkpoints(): cpm = _CheckpointManager(checkpoint_strategy=CheckpointConfig(num_to_keep=None)) for i in range(10): - cpm.register_checkpoint( + cpm.register_checkpoints( _TrackedCheckpoint({"data": i}, storage_mode=CheckpointStorage.PERSISTENT) ) @@ -22,7 +22,7 @@ def test_limited_persistent_checkpoints(): cpm = _CheckpointManager(checkpoint_strategy=CheckpointConfig(num_to_keep=2)) for i in range(10): - cpm.register_checkpoint( + cpm.register_checkpoints( _TrackedCheckpoint({"data": i}, storage_mode=CheckpointStorage.PERSISTENT) ) @@ -41,7 +41,7 @@ def __post_init__(self): cpm = _CheckpointManager(checkpoint_strategy=_CheckpointConfig(num_to_keep=0)) for i in range(10): - cpm.register_checkpoint( + cpm.register_checkpoints( _TrackedCheckpoint({"data": i}, storage_mode=CheckpointStorage.PERSISTENT) ) @@ -53,7 +53,7 @@ def test_dont_persist_memory_checkpoints(): cpm._persist_memory_checkpoints = False for i in range(10): - cpm.register_checkpoint( + cpm.register_checkpoints( _TrackedCheckpoint({"data": i}, storage_mode=CheckpointStorage.MEMORY) ) @@ -65,7 +65,7 @@ def test_persist_memory_checkpoints(): cpm._persist_memory_checkpoints = True for i in range(10): - cpm.register_checkpoint( + cpm.register_checkpoints( _TrackedCheckpoint({"data": i}, storage_mode=CheckpointStorage.MEMORY) ) @@ -83,7 +83,7 @@ def test_keep_best_checkpoints(): cpm._persist_memory_checkpoints = True for i in range(10): - cpm.register_checkpoint( + cpm.register_checkpoints( _TrackedCheckpoint( {"data": i}, storage_mode=CheckpointStorage.MEMORY, diff --git a/python/ray/air/tests/test_checkpoints.py b/python/ray/air/tests/test_checkpoints.py index 8338fee31ca8..6ae062e7b814 100644 --- a/python/ray/air/tests/test_checkpoints.py +++ b/python/ray/air/tests/test_checkpoints.py @@ -15,6 +15,7 @@ import ray from ray.air._internal.remote_storage import _ensure_directory, delete_at_uri from ray.air._internal.uri_utils import URI +from ray.air._internal.util import _copy_dir_ignore_conflicts from ray.air.checkpoint import _DICT_CHECKPOINT_ADDITIONAL_FILE_KEY, Checkpoint from ray.air.constants import MAX_REPR_LENGTH, PREPROCESSOR_KEY from ray.data import Preprocessor @@ -159,6 +160,34 @@ def test_directory_move_instead_of_copy(self): assert new_recovered_checkpoint.foo == "bar" assert not list(Path(path).glob("*")) + def test_copy_dir_ignore_conflicts(self): + tmpdir = Path(tempfile.mkdtemp()) + + src_dir = tmpdir / "src" + dst_dir = tmpdir / "dst" + + src_dir.mkdir() + dst_dir.mkdir() + + (src_dir / "foo.txt").touch() + (src_dir / "bar.txt").touch() + (src_dir / "a").mkdir() + (src_dir / "a" / "a.txt").touch() + (src_dir / "b").mkdir() + (src_dir / "b" / "b.txt").touch() + + # Has a file conflict. + (dst_dir / "foo.txt").touch() + # Has a directory conflict. + (dst_dir / "a").mkdir() + + _copy_dir_ignore_conflicts(src_dir, dst_dir) + + assert (dst_dir / "foo.txt").exists() + assert (dst_dir / "bar.txt").exists() + assert (dst_dir / "a" / "a.txt").exists() + assert (dst_dir / "b" / "b.txt").exists() + def test_uri(self): checkpoint = StubCheckpoint.from_dict({"spam": "ham"}) assert "foo" in checkpoint._SERIALIZED_ATTRS diff --git a/python/ray/train/_internal/backend_executor.py b/python/ray/train/_internal/backend_executor.py index 423a6babeaa8..be940111f1c4 100644 --- a/python/ray/train/_internal/backend_executor.py +++ b/python/ray/train/_internal/backend_executor.py @@ -5,6 +5,7 @@ import ray from ray._private.ray_constants import env_integer +from ray.air.config import CheckpointConfig from ray.exceptions import RayActorError from ray.train._internal.dataset_spec import RayDatasetSpec from ray.air.checkpoint import Checkpoint @@ -71,6 +72,7 @@ def __init__( num_gpus_per_worker: float = 0, additional_resources_per_worker: Optional[Dict[str, float]] = None, max_retries: int = 3, + checkpoint_config: Optional[CheckpointConfig] = None, ): self._backend_config = backend_config self._backend = backend_config.backend_cls() @@ -91,6 +93,13 @@ def __init__( self.worker_group = InactiveWorkerGroup() self.dataset_shards = None + self._checkpoint_keep_all_ranks = ( + checkpoint_config and checkpoint_config._checkpoint_keep_all_ranks + ) + self._checkpoint_upload_from_workers = ( + checkpoint_config and checkpoint_config._checkpoint_upload_from_workers + ) + def start( self, initialization_hook: Optional[Callable[[], None]] = None, @@ -366,6 +375,8 @@ def initialize_session( checkpoint, dataset_shard, encode_data_fn, + checkpoint_keep_all_ranks, + checkpoint_upload_from_workers, ): try: init_session( @@ -381,6 +392,8 @@ def initialize_session( encode_data_fn=encode_data_fn, detailed_autofilled_metrics=use_detailed_autofilled_metrics, enable_lazy_checkpointing=use_lazy_checkpointing, + checkpoint_keep_all_ranks=checkpoint_keep_all_ranks, + checkpoint_upload_from_workers=(checkpoint_upload_from_workers), ) except ValueError: raise TrainBackendError( @@ -416,6 +429,10 @@ def initialize_session( dataset_shard=self.dataset_shards[index], checkpoint=checkpoint, encode_data_fn=self._backend._encode_data, + checkpoint_keep_all_ranks=self._checkpoint_keep_all_ranks, + checkpoint_upload_from_workers=( + self._checkpoint_upload_from_workers + ), ) ) @@ -482,8 +499,19 @@ def get_next(): "`session.report()` are called the " "same number of times on all workers." ) + return results + def _set_checkpoint_uri(self, uri: str): + """Tell remote sessions where to upload the chekcpoint.""" + + def set_uri(): + session = _get_session("_set_checkpoint_uri") + session._set_checkpoint_uri(uri) + + futures = self.worker_group.execute_async(set_uri) + self.get_with_failure_handling(futures) + def pause_reporting(self): """Disable workers from enqueuing results from ``session.report()``. diff --git a/python/ray/train/_internal/checkpoint.py b/python/ray/train/_internal/checkpoint.py index a85f05d7c915..83b92b819cb7 100644 --- a/python/ray/train/_internal/checkpoint.py +++ b/python/ray/train/_internal/checkpoint.py @@ -12,6 +12,7 @@ from ray.train._internal.session import TrainingResult from ray.train._internal.utils import construct_path from ray.train.constants import ( + CHECKPOINT_RANK_KEY, TIMESTAMP, TRAIN_CHECKPOINT_SUBDIR, TUNE_CHECKPOINT_ID, @@ -98,15 +99,12 @@ def _load_checkpoint( def _process_checkpoint( self, - checkpoint_results: List[TrainingResult], + checkpoint_result: TrainingResult, decode_checkpoint_fn: Callable, - ) -> None: - """Ray Train entrypoint. Perform all processing for a checkpoint.""" - # Get checkpoint from first worker. - checkpoint_result = checkpoint_results[0] - + ) -> _TrackedCheckpoint: checkpoint_data = checkpoint_result.data checkpoint_metadata = checkpoint_result.metadata or {} + checkpoint_rank = checkpoint_metadata.get(CHECKPOINT_RANK_KEY, 0) if isinstance(checkpoint_data, str): checkpoint_class: Type[Checkpoint] = checkpoint_metadata[ @@ -131,13 +129,31 @@ def _process_checkpoint( f"`session.report()`." ) - tracked_checkpoint = _TrackedCheckpoint( + return _TrackedCheckpoint( dir_or_data=checkpoint_data, checkpoint_id=self._latest_checkpoint_id, storage_mode=CheckpointStorage.MEMORY, metrics={score_attr: checkpoint_metadata.get(score_attr, 0.0)}, + rank=checkpoint_rank, ) - self.register_checkpoint(checkpoint=tracked_checkpoint) + + def _process_checkpoints( + self, + checkpoint_results: List[TrainingResult], + decode_checkpoint_fn: Callable, + ) -> None: + """Ray Train entrypoint. Perform all processing for a checkpoint.""" + if self._checkpoint_strategy._checkpoint_keep_all_ranks: + tracked_checkpoints = [ + self._process_checkpoint(checkpoint_result, decode_checkpoint_fn) + for checkpoint_result in checkpoint_results + ] + else: + # Get checkpoint from first worker. + tracked_checkpoints = [ + self._process_checkpoint(checkpoint_results[0], decode_checkpoint_fn) + ] + self.register_checkpoints(checkpoints=tracked_checkpoints) def _get_next_checkpoint_path(self) -> Optional[Path]: """Path to the next checkpoint to persist.""" @@ -249,7 +265,12 @@ def add_tune_checkpoint_id(self, checkpoint: Checkpoint): def _process_persistent_checkpoint(self, checkpoint: _TrackedCheckpoint): self.add_tune_checkpoint_id(checkpoint.dir_or_data) - # If inside a Tune Trainable, then checkpoint with Tune. + + # Train may choose not to commit a checkpoint, but make sure the + # checkpoint is always committed for Tuning purpose. + # After this is committed, checkpoint.dir_or_path will become a string, + # which will prevent this checkpoint from being commtted again in the + # subsequent super()._process_persistent_checkpoint() call. with tune.checkpoint_dir(step=self._latest_checkpoint_id) as checkpoint_dir: path = Path(checkpoint_dir) checkpoint.commit(path) diff --git a/python/ray/train/_internal/session.py b/python/ray/train/_internal/session.py index 369261901f46..f13a5a4bb29a 100644 --- a/python/ray/train/_internal/session.py +++ b/python/ray/train/_internal/session.py @@ -9,6 +9,7 @@ from datetime import datetime from enum import Enum, auto from pathlib import Path +import shutil from typing import Callable, Dict, Optional, Type, Union import ray @@ -18,6 +19,9 @@ from ray.data import Dataset, DatasetPipeline from ray.train._internal.accelerator import Accelerator from ray.train.constants import ( + CHECKPOINT_DISTRIBUTED_KEY, + CHECKPOINT_METADATA_KEY, + CHECKPOINT_RANK_KEY, DETAILED_AUTOFILLED_KEYS, WORKER_HOSTNAME, WORKER_NODE_IP, @@ -25,11 +29,16 @@ TIME_THIS_ITER_S, TIME_TOTAL_S, TIMESTAMP, - CHECKPOINT_METADATA_KEY, LAZY_CHECKPOINT_MARKER_FILE, ) from ray.train.error import SessionMisuseError from ray.train.session import _TrainSessionImpl +from ray.util.annotations import DeveloperAPI +from ray.util.debug import log_once + + +_INDEX_FILE_EXTENSION = ".files" +_INDEX_FILE = ".RANK_{0}" + _INDEX_FILE_EXTENSION class TrainingResultType(Enum): @@ -60,6 +69,7 @@ class TrainingResult: # TODO(xwjiang): This needs a better name. +@DeveloperAPI class _TrainSession: """Holds information for training on each worker.""" @@ -83,6 +93,8 @@ def __init__( # will send over checkpoint path and metadata instead of # the whole checkpoint to avoid unnecessary serialization. enable_lazy_checkpointing: bool = True, + checkpoint_keep_all_ranks: bool = False, + checkpoint_upload_from_workers: bool = False, ): self.dataset_shard = dataset_shard @@ -96,6 +108,10 @@ def __init__( # TODO(xwjiang): Legacy Ray Train trainer clean up! self.loaded_checkpoint = checkpoint self.enable_lazy_checkpointing = enable_lazy_checkpointing + self.checkpoint_keep_all_ranks = checkpoint_keep_all_ranks + self.checkpoint_upload_from_workers = checkpoint_upload_from_workers + # Only used if checkpoint_upload_from_workers is True. + self.checkpoint_uri = None # Function to encode checkpoint dict before sending to the driver. if not encode_data_fn: @@ -281,17 +297,85 @@ def _report_thread_runner_error(self, block=False): except queue.Empty: pass + def _create_checkpoint_file_list(self, checkpoint: Checkpoint): + """Create an index of the folder contents + + So we know which files belong to which rank. + """ + root = checkpoint._local_path + ckpt_files = [] + for dir, _, files in os.walk(root): + # Strip the root path from the path though, since + # we are only interested in the part relative to + # the root of this checkpoint. + dir = dir[len(root) :] + for fn in files: + ckpt_files.append(os.path.join(dir, fn)) + # Write these files into the index file. + with open(os.path.join(root, _INDEX_FILE.format(self.world_rank)), "w") as f: + for fn in ckpt_files: + f.write(f"{fn}\n") + + def _remove_uploaded_checkpoint_files(self, checkpoint: Checkpoint): + """Get rid of already uploaded large checkpoint files. + + This is so they don't get shipped to the driver node. + """ + root = checkpoint._local_path + for f in os.listdir(root): + if f.endswith(_INDEX_FILE_EXTENSION): + # We will leave the index file in there so local + # checkpoint has knowledge about the cloud files. + continue + fp = os.path.join(root, f) + if os.path.isfile(fp): + os.unlink(fp) + elif os.path.isdir(fp): + shutil.rmtree(fp) + def checkpoint(self, checkpoint: Checkpoint): """Adds kwargs to the queue to be consumed by main thread. Also stores the checkpoint in ``self.loaded_checkpoint``. """ + checkpoint_type, _ = checkpoint.get_internal_representation() + + if checkpoint_type == "data_dict" and self.checkpoint_keep_all_ranks: + if log_once("keep_all_ranks_dict_checkpoint"): + logger.warning( + "Saving checkpoints from all ranks does not work with " + "dictionary checkpoints. Set `ray.air.CheckpointConfig" + "(_checkpoint_keep_all_ranks=False)`, or write checkpoints " + "to a directory and report directory checkpoints that " + "contain unique files per worker rank. For example, " + "use filenames that contain the unique rank. You can " + "retrieve the rank with `session.get_world_rank()` within " + "your training loop per worker." + ) + + upload_from_workers = ( + checkpoint_type == "local_path" + and self.checkpoint_upload_from_workers + and self.checkpoint_uri + ) + if upload_from_workers: + self._create_checkpoint_file_list(checkpoint) + logger.info( + f"Uploading checkpoint files from worker rank {self.world_rank} " + f"to cloud URI {self.checkpoint_uri}." + ) + # We want to upload the files directly to cloud storage, + # so that they won't need to be shipped to the driver node + # via object store. + checkpoint.to_uri(self.checkpoint_uri) + logger.info("Done uploading checkpoint files.") + self._remove_uploaded_checkpoint_files(checkpoint) # Update session checkpoint to latest checkpoint. self.loaded_checkpoint = checkpoint # Only store checkpoints on worker with rank 0. - if self.world_rank != 0: + if self.world_rank != 0 and not self.checkpoint_keep_all_ranks: checkpoint = None elif checkpoint: checkpoint = self._encode_data_fn(checkpoint) @@ -307,11 +391,20 @@ def checkpoint(self, checkpoint: Checkpoint): metadata.update({CHECKPOINT_METADATA_KEY: checkpoint._metadata}) checkpoint = str(checkpoint._local_path) + # Save the rank of the worker that created this checkpoint. + metadata.update( + { + CHECKPOINT_RANK_KEY: self.world_rank, + CHECKPOINT_DISTRIBUTED_KEY: upload_from_workers, + } + ) + result = TrainingResult( type=TrainingResultType.CHECKPOINT, data=checkpoint, metadata=metadata, ) + # Add result to a thread-safe queue. self.result_queue.put(result, block=True) @@ -319,6 +412,14 @@ def checkpoint(self, checkpoint: Checkpoint): # checkpoint has been processed. self.continue_lock.acquire() + def _set_checkpoint_uri(self, uri: str): + """Tell session where to save the next directory checkpoint on the cloud. + + Args: + uri: URI to the location where next checkpoint should be saved. + """ + self.checkpoint_uri = uri + def report(self, metrics: Dict, checkpoint: Optional[Checkpoint] = None) -> None: # TODO(xwjiang): tons of optimizations. diff --git a/python/ray/train/constants.py b/python/ray/train/constants.py index 2d7d198c48cf..1ddba37fd238 100644 --- a/python/ray/train/constants.py +++ b/python/ray/train/constants.py @@ -84,3 +84,10 @@ # Key for AIR Checkpoint metadata in TrainingResult metadata CHECKPOINT_METADATA_KEY = "checkpoint_metadata" + +# Key for AIR Checkpoint world rank in TrainingResult metadata +CHECKPOINT_RANK_KEY = "checkpoint_rank" + + +# Key for AIR Checkpoint that gets uploaded from distributed workers. +CHECKPOINT_DISTRIBUTED_KEY = "distributed" diff --git a/python/ray/train/data_parallel_trainer.py b/python/ray/train/data_parallel_trainer.py index f6aa6df202e9..78d4b54d7a41 100644 --- a/python/ray/train/data_parallel_trainer.py +++ b/python/ray/train/data_parallel_trainer.py @@ -1,3 +1,4 @@ +import copy import inspect import logging from pathlib import Path @@ -39,7 +40,8 @@ def __init__( ): self.preprocessor = preprocessor super(_DataParallelCheckpointManager, self).__init__( - run_dir=run_dir, checkpoint_strategy=checkpoint_strategy + run_dir=run_dir, + checkpoint_strategy=checkpoint_strategy, ) def _process_persistent_checkpoint(self, checkpoint: _TrackedCheckpoint): @@ -411,6 +413,7 @@ def training_loop(self) -> None: num_gpus_per_worker=scaling_config.num_gpus_per_worker, additional_resources_per_worker=additional_resources_per_worker, max_retries=0, + checkpoint_config=self.run_config.checkpoint_config, ) checkpoint_manager = self._checkpoint_manager_cls( @@ -420,6 +423,17 @@ def training_loop(self) -> None: # Start the remote actors. backend_executor.start(initialization_hook=None) + # Disable TrainingIterator's CheckpointManager from handling + # checkpoints itself by setting num_to_keep to None. + # This is important because otherwise Trainer's CheckpointManager + # may delete a checkpoint prematurely, before the next checkpoint + # has been fully handled by Tune. + # TODO(jungong, justinvyu) : Trainer should not own a + # CheckpointManager. + checkpoint_strategy = copy.deepcopy(self.run_config.checkpoint_config) + checkpoint_strategy.num_to_keep = None + checkpoint_strategy.checkpoint_score_attribute = None + training_iterator = self._training_iterator_cls( backend_executor=backend_executor, backend_config=self._backend_config, @@ -427,7 +441,8 @@ def training_loop(self) -> None: dataset_spec=self._ingest_spec, checkpoint_manager=checkpoint_manager, checkpoint=self.resume_from_checkpoint, - checkpoint_strategy=None, + checkpoint_strategy=checkpoint_strategy, + storage_path=self.run_config.storage_path, ) self._report(training_iterator) diff --git a/python/ray/train/trainer.py b/python/ray/train/trainer.py index 34dac3276111..52ea9d292873 100644 --- a/python/ray/train/trainer.py +++ b/python/ray/train/trainer.py @@ -4,6 +4,8 @@ from ray.air.checkpoint import Checkpoint from ray.air.config import CheckpointConfig +from ray.air import session +from ray.air._internal.uri_utils import URI from ray.air._internal.util import StartTraceback from ray.train._internal.backend_executor import ( BackendExecutor, @@ -25,6 +27,7 @@ GenDataset, TrainingFailedError, ) +from ray.tune.trainable.util import TrainableUtil from ray.util.annotations import DeveloperAPI T = TypeVar("T") @@ -47,6 +50,7 @@ def __init__( checkpoint: Optional[Union[Dict, str, Path, Checkpoint]], checkpoint_strategy: Optional[CheckpointConfig], run_dir: Optional[Path] = None, + storage_path: Optional[str] = None, ): self._backend_executor = backend_executor self._backend = backend_config.backend_cls() @@ -55,12 +59,12 @@ def __init__( self._run_dir = run_dir self._checkpoint_manager = checkpoint_manager self._checkpoint_strategy = checkpoint_strategy + self._storage_path = storage_path self._start_training( train_func=train_func, run_dir=run_dir, dataset_spec=self._dataset_spec, checkpoint=checkpoint, - checkpoint_strategy=checkpoint_strategy, ) self._final_results = None @@ -75,11 +79,10 @@ def _start_training( run_dir, dataset_spec, checkpoint, - checkpoint_strategy, latest_checkpoint_id=None, ): self._checkpoint_manager.on_start_training( - checkpoint_strategy=checkpoint_strategy, + checkpoint_strategy=self._checkpoint_strategy, run_dir=run_dir, latest_checkpoint_id=latest_checkpoint_id, ) @@ -92,6 +95,12 @@ def _start_training( ) ) + # Session has started. Set current cloud checkpoint dir if necessary. + if self._checkpoint_strategy._checkpoint_upload_from_workers: + self._backend_executor._set_checkpoint_uri( + self.__get_cloud_checkpoint_dir() + ) + def _run_with_error_handling(self, func: Callable): try: return func() @@ -109,8 +118,7 @@ def _run_with_error_handling(self, func: Callable): self._run_dir, self._dataset_spec, self._checkpoint_manager.latest_checkpoint, - self._checkpoint_strategy, - latest_checkpoint_id=self._checkpoint_manager.latest_checkpoint_id, + self._checkpoint_manager.latest_checkpoint_id, ) return self._run_with_error_handling(func) except InactiveWorkerGroupError: @@ -174,9 +182,22 @@ def _fetch_next_result(self) -> Optional[List[Dict]]: result_data = [r.data for r in results] return result_data elif result_type is TrainingResultType.CHECKPOINT: - self._checkpoint_manager._process_checkpoint( + self._checkpoint_manager._process_checkpoints( results, decode_checkpoint_fn=self._backend._decode_data ) + + # Note(jungong) : This is kinda funky. We update the cloud + # checkpoint dir on every distributed worker right after + # an existing checkpoint is processed. We must do this because + # Trainers do not have the concept of iterations or steps, + # which must be synced between Trainable driver and the trainers. + # TODO(jungong) : It would be nicer if we find a cleaner way + # to sync the current cloud checkpointing directory between + # Tuner, Trainable, and Trainers. + if self._checkpoint_strategy._checkpoint_upload_from_workers: + self._backend_executor._set_checkpoint_uri( + self.__get_cloud_checkpoint_dir() + ) # Iterate until next REPORT call or training has finished. else: raise TrainBackendError( @@ -194,9 +215,13 @@ def _finish_checkpointing(self): result_type = results[0].type # Process checkpoints and ignore other result types. if result_type is TrainingResultType.CHECKPOINT: - self._checkpoint_manager._process_checkpoint( + self._checkpoint_manager._process_checkpoints( results, decode_checkpoint_fn=self._backend._decode_data ) + if self._checkpoint_strategy._checkpoint_upload_from_workers: + self._backend_executor._set_checkpoint_uri( + self.__get_cloud_checkpoint_dir() + ) def _finish_training(self): """Finish training and return final results. Propagate any exceptions. @@ -248,3 +273,24 @@ def get_final_results(self, force: bool = False) -> List[T]: ) return self._final_results + + # This is extremely hacky and fragile. + # TODO(jungong) : We should refactor things so Tuner, Trinable, and + # Trainers have a consistent view of the current cloud checkpointing + # directory. + # We should probably also refactor things so Syncer and SyncConfig + # are available everywhere session is available. + def __get_cloud_checkpoint_dir(self): + if not self._storage_path: + # Can't run cloud upload if storage path is not set. + return None + + base_dir = URI(self._storage_path) + path = Path(session.get_trial_dir()) + trial_dir_name = path.name + exp_dir_name = path.parent.name + checkpoint_dir_name = TrainableUtil._make_checkpoint_dir_name( + self._checkpoint_manager._latest_checkpoint_id + ) + + return str(base_dir / exp_dir_name / trial_dir_name / checkpoint_dir_name) diff --git a/python/ray/tune/impl/tuner_internal.py b/python/ray/tune/impl/tuner_internal.py index 3985c7ed58b0..33134812bacd 100644 --- a/python/ray/tune/impl/tuner_internal.py +++ b/python/ray/tune/impl/tuner_internal.py @@ -671,6 +671,12 @@ def _get_tune_run_arguments(self, trainable: TrainableType) -> Dict[str, Any]: ), checkpoint_freq=checkpoint_freq, checkpoint_at_end=checkpoint_at_end, + checkpoint_keep_all_ranks=( + self._run_config.checkpoint_config._checkpoint_keep_all_ranks + ), + checkpoint_upload_from_workers=( + self._run_config.checkpoint_config._checkpoint_upload_from_workers + ), _experiment_checkpoint_dir=self._experiment_checkpoint_dir, raise_on_failed_trial=False, fail_fast=(self._run_config.failure_config.fail_fast), diff --git a/python/ray/tune/tests/test_syncer.py b/python/ray/tune/tests/test_syncer.py index 86c57484d14e..e3bff59fb67e 100644 --- a/python/ray/tune/tests/test_syncer.py +++ b/python/ray/tune/tests/test_syncer.py @@ -6,25 +6,29 @@ import tempfile import time from typing import List, Optional +import unittest from unittest.mock import patch +from freezegun import freeze_time +import numpy as np import pyarrow.fs import pytest -from freezegun import freeze_time import ray import ray.cloudpickle as pickle from ray import tune from ray.air import session, Checkpoint, RunConfig -from ray.air._internal.uri_utils import URI -from ray.tune import TuneError -from ray.tune.syncer import _DefaultSyncer, Syncer, SyncConfig -from ray.tune.utils.file_transfer import _pack_dir, _unpack_dir +from ray.air.config import CheckpointConfig, ScalingConfig from ray.air._internal.remote_storage import ( upload_to_uri, download_from_uri, get_fs_and_path, ) +from ray.air._internal.uri_utils import URI +from ray.train.torch import TorchTrainer +from ray.tune import TuneError +from ray.tune.syncer import _DefaultSyncer, Syncer, SyncConfig +from ray.tune.utils.file_transfer import _pack_dir, _unpack_dir @pytest.fixture @@ -1006,6 +1010,154 @@ def get_remote_trial_dir(trial_id: int): assert num_checkpoints == 2 # 1 before restore + 1 after +def test_distributed_checkpointing_to_s3( + ray_start_4_cpus, mock_s3_bucket_uri, tmp_path +): + """Tests a Tune run with distributed checkpointing to a mock s3 bucket. + + This test runs a Tune run with 3 distributed DDP workers. + We run 10 steps in total and checkpoint every 3 steps. + At the end of the test, we check the ranked index files are + available both locally and on the cloud. + We also make sure the model checkpoint files are only available + on the cloud. + """ + exp_name = "test_dist_ckpt_to_s3" + local_dir = os.path.join(tmp_path, "local_dir") + + def train_fn(config): + world_rank = session.get_world_rank() + for step in range(config["num_steps"]): + time.sleep(0.1) + checkpoint = None + if step % 3 == 0: + checkpoint_dir = tempfile.mkdtemp(dir=tmp_path) + path = os.path.join(checkpoint_dir, f"optim-{world_rank}.pt") + with open(path, "wb") as f: + f.write( + pickle.dumps( + { + "optimizer": "adam", + "lr": 0.001, + "optimizer_state": np.random.random((100, 100)), + } + ) + ) + path = os.path.join(checkpoint_dir, f"model-{world_rank}.pt") + with open(path, "wb") as f: + f.write( + pickle.dumps( + { + "model": "resnet", + "weights": np.random.random((100, 100)), + } + ) + ) + checkpoint = Checkpoint.from_directory(checkpoint_dir) + session.report({"score": step}, checkpoint=checkpoint) + + def _check_dir_content(checkpoint_dir, exist=True): + # Double check local checkpoint dir. + local_trial_data = os.listdir( + os.path.join(local_dir, "test_dist_ckpt_to_s3", "trial_0") + ) + if exist: + # checkpoint in local trial folder. + assert checkpoint_dir in local_trial_data + local_checkpoint_data = os.listdir( + os.path.join( + local_dir, "test_dist_ckpt_to_s3", "trial_0", checkpoint_dir + ) + ) + # Local folder has index files. + assert ".RANK_0.files" in local_checkpoint_data + assert ".RANK_1.files" in local_checkpoint_data + assert ".RANK_2.files" in local_checkpoint_data + # But no data files. + assert "model-0.pt" not in local_checkpoint_data + assert "model-1.pt" not in local_checkpoint_data + assert "model-2.pt" not in local_checkpoint_data + else: + assert checkpoint_dir not in local_trial_data + + cloud_trial_data = os.listdir( + os.path.join(download_dir, "test_dist_ckpt_to_s3", "trial_0") + ) + if exist: + # Checkpoint in cloud trial folder. + assert checkpoint_dir in cloud_trial_data + cloud_checkpoint_data = os.listdir( + os.path.join( + download_dir, "test_dist_ckpt_to_s3", "trial_0", checkpoint_dir + ) + ) + # Cloud folder has index files. + assert ".RANK_0.files" in cloud_checkpoint_data + assert ".RANK_1.files" in cloud_checkpoint_data + assert ".RANK_2.files" in cloud_checkpoint_data + # And all the data files. + assert "model-0.pt" in cloud_checkpoint_data + assert "model-1.pt" in cloud_checkpoint_data + assert "model-2.pt" in cloud_checkpoint_data + else: + assert checkpoint_dir not in cloud_trial_data + + with unittest.mock.patch.dict(os.environ, {"RAY_AIR_LOCAL_CACHE_DIR": local_dir}): + trainer = TorchTrainer( + train_fn, + train_loop_config={"num_steps": 10}, + scaling_config=ScalingConfig( + num_workers=3, + use_gpu=False, + ), + # Note(jungong) : Trainers ignore the RunConfig specified via + # Tuner below. So to specify proper cloud paths and CheckpointConfig, + # we must pass another dummy RunConfig here. + # TODO(jungong) : this is extremely awkward. Refactor and clean up. + run_config=RunConfig( + storage_path=mock_s3_bucket_uri, + checkpoint_config=CheckpointConfig( + num_to_keep=3, + checkpoint_frequency=3, + _checkpoint_keep_all_ranks=True, + _checkpoint_upload_from_workers=True, + ), + ), + ) + + tuner = tune.Tuner( + trainer, + run_config=RunConfig( + name=exp_name, + storage_path=mock_s3_bucket_uri, + checkpoint_config=CheckpointConfig( + num_to_keep=3, + ), + ), + tune_config=tune.TuneConfig( + # Only running 1 trial. + trial_dirname_creator=lambda t: "trial_0" + ), + ) + result_grid = tuner.fit() + # Run was successful. + assert not result_grid.errors + # Make sure checkpoint is backed by the full s3 checkpoint uri. + assert result_grid[0].checkpoint.uri.startswith("s3://") + + # Download remote dir locally to do some sanity checks + download_dir = os.path.join(tmp_path, "download") + + shutil.rmtree(download_dir, ignore_errors=True) + download_from_uri(uri=mock_s3_bucket_uri, local_path=str(download_dir)) + + # Step 0 checkpoint is deleted. + _check_dir_content("checkpoint_000000", exist=False) + _check_dir_content("checkpoint_000001") # Step 3 + _check_dir_content("checkpoint_000002") # Step 6 + _check_dir_content("checkpoint_000003") # Step 9 + + if __name__ == "__main__": import sys diff --git a/python/ray/tune/trainable/util.py b/python/ray/tune/trainable/util.py index 32967bfb43a3..2adbb6bf3dc9 100644 --- a/python/ray/tune/trainable/util.py +++ b/python/ray/tune/trainable/util.py @@ -80,6 +80,14 @@ def find_rel_checkpoint_dir(logdir, checkpoint_path): tokens = rel_path.split(os.sep) return os.path.join(tokens[0]) + @staticmethod + def _make_checkpoint_dir_name(index: Union[int, str]): + """Get the name of the checkpoint directory suffix.""" + suffix = "checkpoint" + if index is not None: + suffix += f"_{index:06d}" if isinstance(index, int) else f"_{index}" + return suffix + @staticmethod def make_checkpoint_dir( checkpoint_dir: str, index: Union[int, str], override: bool = False @@ -93,9 +101,7 @@ def make_checkpoint_dir( override: Deletes checkpoint_dir before creating a new one. """ - suffix = "checkpoint" - if index is not None: - suffix += f"_{index:06d}" if isinstance(index, int) else f"_{index}" + suffix = TrainableUtil._make_checkpoint_dir_name(index) checkpoint_dir = os.path.join(checkpoint_dir, suffix) if override and os.path.exists(checkpoint_dir): diff --git a/python/ray/tune/tune.py b/python/ray/tune/tune.py index 2467c8eee84d..4c5c7dd2983a 100644 --- a/python/ray/tune/tune.py +++ b/python/ray/tune/tune.py @@ -254,6 +254,8 @@ def run( checkpoint_score_attr: Optional[str] = None, checkpoint_freq: int = 0, checkpoint_at_end: bool = False, + checkpoint_keep_all_ranks: bool = False, + checkpoint_upload_from_workers: bool = False, verbose: Union[int, Verbosity] = Verbosity.V3_TRIAL_DETAILS, progress_reporter: Optional[ProgressReporter] = None, log_to_file: bool = False, @@ -388,6 +390,10 @@ def run( checkpoint_at_end: Whether to checkpoint at the end of the experiment regardless of the checkpoint_freq. Default is False. This has no effect when using the Functional Training API. + checkpoint_keep_all_ranks: Whether to save checkpoints from all ranked + training workers. + checkpoint_upload_from_workers: Whether to upload checkpoint files + directly from distributed training workers. verbose: 0, 1, 2, or 3. Verbosity mode. 0 = silent, 1 = only status updates, 2 = status and brief trial results, 3 = status and detailed trial results. Defaults to 3. @@ -665,6 +671,8 @@ class and registered trainables. checkpoint_score_order=checkpoint_score_order, checkpoint_frequency=checkpoint_freq, checkpoint_at_end=checkpoint_at_end, + _checkpoint_keep_all_ranks=checkpoint_keep_all_ranks, + _checkpoint_upload_from_workers=checkpoint_upload_from_workers, ) if num_samples == -1: From 0b384973771a54b27c437aba11cdb4157b7cb892 Mon Sep 17 00:00:00 2001 From: Avnish Narayan <38871737+avnishn@users.noreply.github.com> Date: Fri, 12 May 2023 13:11:14 -0700 Subject: [PATCH 368/424] [RLlib] RLlib contrib (#35141) Signed-off-by: Avnish --- .buildkite/pipeline.ml.yml | 27 + ci/pipeline/determine_tests_to_run.py | 8 + rllib_contrib/README.md | 30 + rllib_contrib/a3c/README.rst | 21 + rllib_contrib/a3c/examples/a3c_cartpole_v1.py | 29 + rllib_contrib/a3c/pyproject.toml | 18 + rllib_contrib/a3c/requirements.txt | 2 + .../a3c/src/rllib_a3c/a3c/__init__.py | 7 + rllib_contrib/a3c/src/rllib_a3c/a3c/a3c.py | 261 +++++++++ .../a3c/src/rllib_a3c/a3c/a3c_tf_policy.py | 183 ++++++ .../a3c/src/rllib_a3c/a3c/a3c_torch_policy.py | 152 +++++ rllib_contrib/a3c/tests/test_a3c.py | 100 ++++ rllib_contrib/maml/README.rst | 27 + .../maml/examples/cartpole_mass_maml.py | 52 ++ rllib_contrib/maml/pyproject.toml | 18 + rllib_contrib/maml/requirements.txt | 2 + rllib_contrib/maml/src/rllib_maml/__init__.py | 0 .../maml/src/rllib_maml/envs/__init__.py | 11 + .../maml/src/rllib_maml/envs/ant_rand_goal.py | 86 +++ .../maml/src/rllib_maml/envs/cartpole_mass.py | 31 ++ .../maml/src/rllib_maml/envs/pendulum_mass.py | 33 ++ .../maml/src/rllib_maml/maml/__init__.py | 12 + .../maml/src/rllib_maml/maml/maml.py | 388 +++++++++++++ .../src/rllib_maml/maml/maml_tf_policy.py | 520 ++++++++++++++++++ .../src/rllib_maml/maml/maml_torch_policy.py | 449 +++++++++++++++ rllib_contrib/maml/tests/test_maml.py | 61 ++ 26 files changed, 2528 insertions(+) create mode 100644 rllib_contrib/README.md create mode 100644 rllib_contrib/a3c/README.rst create mode 100644 rllib_contrib/a3c/examples/a3c_cartpole_v1.py create mode 100644 rllib_contrib/a3c/pyproject.toml create mode 100644 rllib_contrib/a3c/requirements.txt create mode 100644 rllib_contrib/a3c/src/rllib_a3c/a3c/__init__.py create mode 100644 rllib_contrib/a3c/src/rllib_a3c/a3c/a3c.py create mode 100644 rllib_contrib/a3c/src/rllib_a3c/a3c/a3c_tf_policy.py create mode 100644 rllib_contrib/a3c/src/rllib_a3c/a3c/a3c_torch_policy.py create mode 100644 rllib_contrib/a3c/tests/test_a3c.py create mode 100644 rllib_contrib/maml/README.rst create mode 100644 rllib_contrib/maml/examples/cartpole_mass_maml.py create mode 100644 rllib_contrib/maml/pyproject.toml create mode 100644 rllib_contrib/maml/requirements.txt create mode 100644 rllib_contrib/maml/src/rllib_maml/__init__.py create mode 100644 rllib_contrib/maml/src/rllib_maml/envs/__init__.py create mode 100644 rllib_contrib/maml/src/rllib_maml/envs/ant_rand_goal.py create mode 100644 rllib_contrib/maml/src/rllib_maml/envs/cartpole_mass.py create mode 100644 rllib_contrib/maml/src/rllib_maml/envs/pendulum_mass.py create mode 100644 rllib_contrib/maml/src/rllib_maml/maml/__init__.py create mode 100644 rllib_contrib/maml/src/rllib_maml/maml/maml.py create mode 100644 rllib_contrib/maml/src/rllib_maml/maml/maml_tf_policy.py create mode 100644 rllib_contrib/maml/src/rllib_maml/maml/maml_torch_policy.py create mode 100644 rllib_contrib/maml/tests/test_maml.py diff --git a/.buildkite/pipeline.ml.yml b/.buildkite/pipeline.ml.yml index 6c1007d3cd50..ad474cf46e67 100644 --- a/.buildkite/pipeline.ml.yml +++ b/.buildkite/pipeline.ml.yml @@ -528,3 +528,30 @@ - ./ci/env/env_info.sh - python ./ci/env/setup_credentials.py wandb comet_ml - bazel test --config=ci $(./ci/run/bazel_export_options) --build_tests_only --test_tag_filters=needs_credentials,-timeseries_libs,-gpu,-py37,-post_wheel_build doc/... + + +- label: ":exploding_death_star: RLlib Contrib: A3C Tests" + conditions: ["NO_WHEELS_REQUIRED", "RAY_CI_RLLIB_CONTRIB_AFFECTED"] + commands: + - cleanup() { if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then ./ci/build/upload_build_info.sh; fi }; trap cleanup EXIT + - (cd rllib_contrib/a3c && pip install -r requirements.txt && pip install -e .) + - ./ci/env/env_info.sh + - pytest rllib_contrib/a3c/tests/test_a3c.py + +- label: ":exploding_death_star: RLlib Contrib: MAML Tests" + conditions: ["NO_WHEELS_REQUIRED", "RAY_CI_RLLIB_CONTRIB_AFFECTED"] + commands: + - cleanup() { if [ "${BUILDKITE_PULL_REQUEST}" = "false" ]; then ./ci/build/upload_build_info.sh; fi }; trap cleanup EXIT + + # Install mujoco necessary for the testing environments + - sudo apt install libosmesa6-dev libgl1-mesa-glx libglfw3 patchelf -y + - wget https://mujoco.org/download/mujoco210-linux-x86_64.tar.gz + - mkdir /root/.mujoco + - mv mujoco210-linux-x86_64.tar.gz /root/.mujoco/. + - (cd /root/.mujoco && tar -xf /root/.mujoco/mujoco210-linux-x86_64.tar.gz) + - echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/root/.mujoco/mujoco210/bin' >> /root/.bashrc + - source /root/.bashrc + + - (cd rllib_contrib/maml && pip install -r requirements.txt && pip install -e .) + - ./ci/env/env_info.sh + - pytest rllib_contrib/maml/tests/test_maml.py diff --git a/ci/pipeline/determine_tests_to_run.py b/ci/pipeline/determine_tests_to_run.py index bed9110be938..7a3cd86d4320 100644 --- a/ci/pipeline/determine_tests_to_run.py +++ b/ci/pipeline/determine_tests_to_run.py @@ -88,6 +88,8 @@ def get_commit_range(): # Whether all RLlib tests should be run. # Set to 1 only when a source file in `ray/rllib` has been changed. RAY_CI_RLLIB_DIRECTLY_AFFECTED = 0 + # Whether to run all RLlib contrib tests + RAY_CI_RLLIB_CONTRIB_AFFECTED = 0 RAY_CI_SERVE_AFFECTED = 0 RAY_CI_CORE_CPP_AFFECTED = 0 RAY_CI_CPP_AFFECTED = 0 @@ -179,6 +181,9 @@ def get_commit_range(): RAY_CI_RLLIB_DIRECTLY_AFFECTED = 1 RAY_CI_LINUX_WHEELS_AFFECTED = 1 RAY_CI_MACOS_WHEELS_AFFECTED = 1 + elif re.match("rllib_contrib/", changed_file): + if not changed_file.endswith(".md"): + RAY_CI_RLLIB_CONTRIB_AFFECTED = 1 elif changed_file.startswith("python/ray/serve"): RAY_CI_DOC_AFFECTED = 1 RAY_CI_SERVE_AFFECTED = 1 @@ -307,6 +312,8 @@ def get_commit_range(): RAY_CI_TRAIN_AFFECTED = 1 RAY_CI_RLLIB_AFFECTED = 1 RAY_CI_RLLIB_DIRECTLY_AFFECTED = 1 + # the rllib contrib ci should only be run on pull requests + RAY_CI_RLLIB_CONTRIB_AFFECTED = 0 RAY_CI_SERVE_AFFECTED = 1 RAY_CI_CPP_AFFECTED = 1 RAY_CI_CORE_CPP_AFFECTED = 1 @@ -331,6 +338,7 @@ def get_commit_range(): "RAY_CI_TRAIN_AFFECTED={}".format(RAY_CI_TRAIN_AFFECTED), "RAY_CI_RLLIB_AFFECTED={}".format(RAY_CI_RLLIB_AFFECTED), "RAY_CI_RLLIB_DIRECTLY_AFFECTED={}".format(RAY_CI_RLLIB_DIRECTLY_AFFECTED), + "RAY_CI_RLLIB_CONTRIB_AFFECTED={}".format(RAY_CI_RLLIB_CONTRIB_AFFECTED), "RAY_CI_SERVE_AFFECTED={}".format(RAY_CI_SERVE_AFFECTED), "RAY_CI_DASHBOARD_AFFECTED={}".format(RAY_CI_DASHBOARD_AFFECTED), "RAY_CI_DOC_AFFECTED={}".format(RAY_CI_DOC_AFFECTED), diff --git a/rllib_contrib/README.md b/rllib_contrib/README.md new file mode 100644 index 000000000000..1cc2e0e775ea --- /dev/null +++ b/rllib_contrib/README.md @@ -0,0 +1,30 @@ +# RLlib-Contrib + +RLlib-Contrib is a directory for more experimental community contributions to RLlib including contributed algorithms. **This directory has a more relaxed bar for contributions than Ray or RLlib.** If you are interested in contributing to RLlib-Contrib, please see the [contributing guide](CONTRIBUTING.md). + +## Getting Started and Installation +Navigate to the algorithm sub-directory you are interested in and see the README.md for installation instructions and example scripts to help you get started! + +## Maintenance + +**Any issues that are filed in `rllib_contrib` will be solved best-effort by the community and there is no expectation of maintenance by the RLlib team.** + +**The API surface between algorithms in `rllib_contrib` and current versions of Ray / RLlib is not guaranteed. This means that any APIs that are used in rllib_contrib could potentially become modified/removed in newer version of Ray/RLlib.** + +We will generally accept contributions to this directory that meet any of the following criteria: + +1. Updating dependencies. +2. Submitting community contributed algorithms that have been tested and are ready for use. +3. Enabling algorithms to be run in different environments (ex. adding support for a new type of gymnasium environment). +4. Updating algorithms for use with the newer RLlib APIs. +5. General bug fixes. + +We will not accept contributions that generally add a significant maintenance burden. In this case users should instead make their own repo with their contribution, using the same guidelines as this directory, and the RLlib team can help to market/promote it in the Ray docs. + +## Getting Involved + +| Platform | Purpose | Support Level | +| --- | --- | --- | +| [Discuss Forum](https://discuss.ray.io) | For discussions about development and questions about usage. | Community | +| [GitHub Issues](https://github.com/ray-project/rllib-contrib-maml/issues) | For reporting bugs and filing feature requests. | Community | +| [Slack](https://forms.gle/9TSdDYUgxYs8SA9e8) | For collaborating with other Ray users. | Community | diff --git a/rllib_contrib/a3c/README.rst b/rllib_contrib/a3c/README.rst new file mode 100644 index 000000000000..df3665c1408e --- /dev/null +++ b/rllib_contrib/a3c/README.rst @@ -0,0 +1,21 @@ +A3C (Asynchronous Advantage Actor-Critic) +----------------------------------------- + +`A3C ` is the asynchronous version of A2C, where gradients are computed on the workers directly after trajectory rollouts, and only then shipped to a central learner to accumulate these gradients on the central model. After the central model update, parameters are broadcast back to all workers. Similar to A2C, A3C scales to 16-32+ worker processes depending on the environment. + + +Installation +------------ + +.. code-block:: bash + + conda create -n rllib-a3c python=3.10 + conda activate rllib-a3c + pip install -r requirements.txt + pip install -e '.[development]' + + +Usage +----- + +.. literalinclude:: examples/a3c_cartpole_v1.py \ No newline at end of file diff --git a/rllib_contrib/a3c/examples/a3c_cartpole_v1.py b/rllib_contrib/a3c/examples/a3c_cartpole_v1.py new file mode 100644 index 000000000000..2f57ff71e105 --- /dev/null +++ b/rllib_contrib/a3c/examples/a3c_cartpole_v1.py @@ -0,0 +1,29 @@ +from rllib_a3c.a3c import A3C, A3CConfig + +import ray +from ray import air, tune + +if __name__ == "__main__": + ray.init() + + config = ( + A3CConfig() + .rollouts(num_rollout_workers=1) + .framework("torch") + .environment("CartPole-v1") + .training( + gamma=0.95, + ) + ) + + num_iterations = 100 + + tuner = tune.Tuner( + A3C, + param_space=config.to_dict(), + run_config=air.RunConfig( + stop={"episode_reward_mean": 150, "timesteps_total": 200000}, + failure_config=air.FailureConfig(fail_fast="raise"), + ), + ) + results = tuner.fit() diff --git a/rllib_contrib/a3c/pyproject.toml b/rllib_contrib/a3c/pyproject.toml new file mode 100644 index 000000000000..173999a039a8 --- /dev/null +++ b/rllib_contrib/a3c/pyproject.toml @@ -0,0 +1,18 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[tool.setuptools.packages.find] +where = ["src"] + +[project] +name = "rllib-a3c" +authors = [{name = "Anyscale Inc."}] +version = "0.1.0" +description = "" +readme = "README.md" +requires-python = ">=3.7, <3.11" +dependencies = ["gym[accept-rom-license]", "gymnasium[mujoco]==0.26.3", "higher", "ray[rllib]==2.3.1"] + +[project.optional-dependencies] +development = ["pytest>=7.2.2", "pre-commit==2.21.0", "tensorflow==2.11.0", "torch==1.12.0"] diff --git a/rllib_contrib/a3c/requirements.txt b/rllib_contrib/a3c/requirements.txt new file mode 100644 index 000000000000..f1191ef52412 --- /dev/null +++ b/rllib_contrib/a3c/requirements.txt @@ -0,0 +1,2 @@ +tensorflow==2.11.0 +torch==1.12.0 diff --git a/rllib_contrib/a3c/src/rllib_a3c/a3c/__init__.py b/rllib_contrib/a3c/src/rllib_a3c/a3c/__init__.py new file mode 100644 index 000000000000..3b050de0dca5 --- /dev/null +++ b/rllib_contrib/a3c/src/rllib_a3c/a3c/__init__.py @@ -0,0 +1,7 @@ +from rllib_a3c.a3c.a3c import A3C, A3CConfig + +from ray.tune.registry import register_trainable + +__all__ = ["A3CConfig", "A3C"] + +register_trainable("rllib-contrib-a3c", A3C) diff --git a/rllib_contrib/a3c/src/rllib_a3c/a3c/a3c.py b/rllib_contrib/a3c/src/rllib_a3c/a3c/a3c.py new file mode 100644 index 000000000000..7f5a661cb94d --- /dev/null +++ b/rllib_contrib/a3c/src/rllib_a3c/a3c/a3c.py @@ -0,0 +1,261 @@ +import logging +from typing import Any, Dict, List, Optional, Type, Union + +from ray.rllib.algorithms.algorithm import Algorithm +from ray.rllib.algorithms.algorithm_config import AlgorithmConfig, NotProvided +from ray.rllib.evaluation.rollout_worker import RolloutWorker +from ray.rllib.policy.policy import Policy +from ray.rllib.utils.annotations import override +from ray.rllib.utils.metrics import ( + APPLY_GRADS_TIMER, + GRAD_WAIT_TIMER, + NUM_AGENT_STEPS_SAMPLED, + NUM_AGENT_STEPS_TRAINED, + NUM_ENV_STEPS_SAMPLED, + NUM_ENV_STEPS_TRAINED, + SYNCH_WORKER_WEIGHTS_TIMER, +) +from ray.rllib.utils.metrics.learner_info import LearnerInfoBuilder +from ray.rllib.utils.typing import ResultDict + +logger = logging.getLogger(__name__) + + +class A3CConfig(AlgorithmConfig): + """Defines a configuration class from which a A3C Algorithm can be built. + + Example: + >>> from ray import tune + >>> from ray.rllib.algorithms.a3c import A3CConfig + >>> config = A3CConfig() # doctest: +SKIP + >>> config = config.training(lr=0.01, grad_clip=30.0) # doctest: +SKIP + >>> config = config.resources(num_gpus=0) # doctest: +SKIP + >>> config = config.rollouts(num_rollout_workers=4) # doctest: +SKIP + >>> config = config.environment("CartPole-v1") # doctest: +SKIP + >>> print(config.to_dict()) # doctest: +SKIP + >>> # Build a Algorithm object from the config and run 1 training iteration. + >>> algo = config.build() # doctest: +SKIP + >>> algo.train() # doctest: +SKIP + + Example: + >>> from ray.rllib.algorithms.a3c import A3CConfig + >>> config = A3CConfig() + >>> # Print out some default values. + >>> print(config.sample_async) # doctest: +SKIP + >>> # Update the config object. + >>> config = config.training( # doctest: +SKIP + ... lr=tune.grid_search([0.001, 0.0001]), use_critic=False) + >>> # Set the config object's env. + >>> config = config.environment(env="CartPole-v1") # doctest: +SKIP + >>> # Use to_dict() to get the old-style python config dict + >>> # when running with tune. + >>> tune.Tuner( # doctest: +SKIP + ... "A3C", + ... stop={"episode_reward_mean": 200}, + ... param_space=config.to_dict(), + ... ).fit() + """ + + def __init__(self, algo_class=None): + """Initializes a A3CConfig instance.""" + super().__init__(algo_class=algo_class or A3C) + + # fmt: off + # __sphinx_doc_begin__ + # + # A3C specific settings. + self.use_critic = True + self.use_gae = True + self.lambda_ = 1.0 + self.grad_clip = 40.0 + self.lr_schedule = None + self.vf_loss_coeff = 0.5 + self.entropy_coeff = 0.01 + self.entropy_coeff_schedule = None + self.sample_async = True + + # Override some of AlgorithmConfig's default values with PPO-specific values. + self.num_rollout_workers = 2 + self.rollout_fragment_length = 10 + self.lr = 0.0001 + # Min time (in seconds) per reporting. + # This causes not every call to `training_iteration` to be reported, + # but to wait until n seconds have passed and then to summarize the + # thus far collected results. + self.min_time_s_per_iteration = 5 + self.exploration_config = { + # The Exploration class to use. In the simplest case, this is the name + # (str) of any class present in the `rllib.utils.exploration` package. + # You can also provide the python class directly or the full location + # of your class (e.g. "ray.rllib.utils.exploration.epsilon_greedy. + # EpsilonGreedy"). + "type": "StochasticSampling", + # Add constructor kwargs here (if any). + } + # __sphinx_doc_end__ + # fmt: on + + @override(AlgorithmConfig) + def training( + self, + *, + lr_schedule: Optional[List[List[Union[int, float]]]] = NotProvided, + use_critic: Optional[bool] = NotProvided, + use_gae: Optional[bool] = NotProvided, + lambda_: Optional[float] = NotProvided, + grad_clip: Optional[float] = NotProvided, + vf_loss_coeff: Optional[float] = NotProvided, + entropy_coeff: Optional[float] = NotProvided, + entropy_coeff_schedule: Optional[List[List[Union[int, float]]]] = NotProvided, + sample_async: Optional[bool] = NotProvided, + **kwargs, + ) -> "A3CConfig": + """Sets the training related configuration. + + Args: + lr_schedule: Learning rate schedule. In the format of + [[timestep, lr-value], [timestep, lr-value], ...] + Intermediary timesteps will be assigned to interpolated learning rate + values. A schedule should normally start from timestep 0. + use_critic: Should use a critic as a baseline (otherwise don't use value + baseline; required for using GAE). + use_gae: If true, use the Generalized Advantage Estimator (GAE) + with a value function, see https://arxiv.org/pdf/1506.02438.pdf. + lambda_: GAE(gamma) parameter. + grad_clip: Max global norm for each gradient calculated by worker. + vf_loss_coeff: Value Function Loss coefficient. + entropy_coeff: Coefficient of the entropy regularizer. + entropy_coeff_schedule: Decay schedule for the entropy regularizer. + sample_async: Whether workers should sample async. Note that this + increases the effective rollout_fragment_length by up to 5x due + to async buffering of batches. + + Returns: + This updated AlgorithmConfig object. + """ + # Pass kwargs onto super's `training()` method. + super().training(**kwargs) + + if lr_schedule is not NotProvided: + self.lr_schedule = lr_schedule + if use_critic is not NotProvided: + self.lr_schedule = use_critic + if use_gae is not NotProvided: + self.use_gae = use_gae + if lambda_ is not NotProvided: + self.lambda_ = lambda_ + if grad_clip is not NotProvided: + self.grad_clip = grad_clip + if vf_loss_coeff is not NotProvided: + self.vf_loss_coeff = vf_loss_coeff + if entropy_coeff is not NotProvided: + self.entropy_coeff = entropy_coeff + if entropy_coeff_schedule is not NotProvided: + self.entropy_coeff_schedule = entropy_coeff_schedule + if sample_async is not NotProvided: + self.sample_async = sample_async + + return self + + @override(AlgorithmConfig) + def validate(self) -> None: + # Call super's validation method. + super().validate() + + if self.entropy_coeff < 0: + raise ValueError("`entropy_coeff` must be >= 0.0!") + if self.num_rollout_workers <= 0 and self.sample_async: + raise ValueError("`num_workers` for A3C must be >= 1!") + + +class A3C(Algorithm): + @classmethod + @override(Algorithm) + def get_default_config(cls) -> AlgorithmConfig: + return A3CConfig() + + @classmethod + @override(Algorithm) + def get_default_policy_class( + cls, config: AlgorithmConfig + ) -> Optional[Type[Policy]]: + if config["framework"] == "torch": + from ray.rllib.algorithms.a3c.a3c_torch_policy import A3CTorchPolicy + + return A3CTorchPolicy + elif config["framework"] == "tf": + from ray.rllib.algorithms.a3c.a3c_tf_policy import A3CTF1Policy + + return A3CTF1Policy + else: + from ray.rllib.algorithms.a3c.a3c_tf_policy import A3CTF2Policy + + return A3CTF2Policy + + def training_step(self) -> ResultDict: + # Shortcut. + local_worker = self.workers.local_worker() + + # Define the function executed in parallel by all RolloutWorkers to collect + # samples + compute and return gradients (and other information). + + def sample_and_compute_grads(worker: RolloutWorker) -> Dict[str, Any]: + """Call sample() and compute_gradients() remotely on workers.""" + samples = worker.sample() + grads, infos = worker.compute_gradients(samples) + return { + "grads": grads, + "infos": infos, + "agent_steps": samples.agent_steps(), + "env_steps": samples.env_steps(), + } + + # Perform rollouts and gradient calculations asynchronously. + with self._timers[GRAD_WAIT_TIMER]: + # Results are a mapping from ActorHandle (RolloutWorker) to their + # returned gradient calculation results. + self.workers.foreach_worker_async( + func=sample_and_compute_grads, + healthy_only=True, + ) + async_results = self.workers.fetch_ready_async_reqs() + + # Loop through all fetched worker-computed gradients (if any) + # and apply them - one by one - to the local worker's model. + # After each apply step (one step per worker that returned some gradients), + # update that particular worker's weights. + global_vars = None + learner_info_builder = LearnerInfoBuilder(num_devices=1) + to_sync_workers = set() + for worker_id, result in async_results: + # Apply gradients to local worker. + with self._timers[APPLY_GRADS_TIMER]: + local_worker.apply_gradients(result["grads"]) + self._timers[APPLY_GRADS_TIMER].push_units_processed(result["agent_steps"]) + + # Update all step counters. + self._counters[NUM_AGENT_STEPS_SAMPLED] += result["agent_steps"] + self._counters[NUM_ENV_STEPS_SAMPLED] += result["env_steps"] + self._counters[NUM_AGENT_STEPS_TRAINED] += result["agent_steps"] + self._counters[NUM_ENV_STEPS_TRAINED] += result["env_steps"] + + learner_info_builder.add_learn_on_batch_results_multi_agent(result["infos"]) + + # Create current global vars. + global_vars = { + "timestep": self._counters[NUM_AGENT_STEPS_SAMPLED], + } + + # Add this worker to be synced. + to_sync_workers.add(worker_id) + + # Synch updated weights back to the particular worker + # (only those policies that are trainable). + with self._timers[SYNCH_WORKER_WEIGHTS_TIMER]: + self.workers.sync_weights( + policies=local_worker.get_policies_to_train(), + to_worker_indices=list(to_sync_workers), + global_vars=global_vars, + ) + + return learner_info_builder.finalize() diff --git a/rllib_contrib/a3c/src/rllib_a3c/a3c/a3c_tf_policy.py b/rllib_contrib/a3c/src/rllib_a3c/a3c/a3c_tf_policy.py new file mode 100644 index 000000000000..bdc77f5790ae --- /dev/null +++ b/rllib_contrib/a3c/src/rllib_a3c/a3c/a3c_tf_policy.py @@ -0,0 +1,183 @@ +"""Note: Keep in sync with changes to VTraceTFPolicy.""" +from typing import Dict, List, Optional, Type, Union + +from ray.rllib.evaluation.episode import Episode +from ray.rllib.evaluation.postprocessing import ( + Postprocessing, + compute_gae_for_sample_batch, +) +from ray.rllib.models.modelv2 import ModelV2 +from ray.rllib.models.tf.tf_action_dist import TFActionDistribution +from ray.rllib.policy.dynamic_tf_policy_v2 import DynamicTFPolicyV2 +from ray.rllib.policy.eager_tf_policy_v2 import EagerTFPolicyV2 +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.policy.tf_mixins import ( + EntropyCoeffSchedule, + LearningRateSchedule, + ValueNetworkMixin, + compute_gradients, +) +from ray.rllib.utils.annotations import override +from ray.rllib.utils.framework import try_import_tf +from ray.rllib.utils.tf_utils import explained_variance +from ray.rllib.utils.typing import ( + AgentID, + LocalOptimizer, + ModelGradients, + TensorType, + TFPolicyV2Type, +) + +tf1, tf, tfv = try_import_tf() + + +# We need this builder function because we want to share the same +# custom logics between TF1 dynamic and TF2 eager policies. +def get_a3c_tf_policy(name: str, base: TFPolicyV2Type) -> TFPolicyV2Type: + """Construct a A3CTFPolicy inheriting either dynamic or eager base policies. + + Args: + base: Base class for this policy. DynamicTFPolicyV2 or EagerTFPolicyV2. + + Returns: + A TF Policy to be used with MAML. + """ + + class A3CTFPolicy( + ValueNetworkMixin, LearningRateSchedule, EntropyCoeffSchedule, base + ): + def __init__( + self, + observation_space, + action_space, + config, + existing_model=None, + existing_inputs=None, + ): + # First thing first, enable eager execution if necessary. + base.enable_eager_execution_if_necessary() + + # Initialize base class. + base.__init__( + self, + observation_space, + action_space, + config, + existing_inputs=existing_inputs, + existing_model=existing_model, + ) + + ValueNetworkMixin.__init__(self, self.config) + LearningRateSchedule.__init__( + self, self.config["lr"], self.config["lr_schedule"] + ) + EntropyCoeffSchedule.__init__( + self, config["entropy_coeff"], config["entropy_coeff_schedule"] + ) + + # Note: this is a bit ugly, but loss and optimizer initialization must + # happen after all the MixIns are initialized. + self.maybe_initialize_optimizer_and_loss() + + @override(base) + def loss( + self, + model: Union[ModelV2, "tf.keras.Model"], + dist_class: Type[TFActionDistribution], + train_batch: SampleBatch, + ) -> Union[TensorType, List[TensorType]]: + model_out, _ = model(train_batch) + action_dist = dist_class(model_out, model) + if self.is_recurrent(): + max_seq_len = tf.reduce_max(train_batch[SampleBatch.SEQ_LENS]) + valid_mask = tf.sequence_mask( + train_batch[SampleBatch.SEQ_LENS], max_seq_len + ) + valid_mask = tf.reshape(valid_mask, [-1]) + else: + valid_mask = tf.ones_like(train_batch[SampleBatch.REWARDS]) + + log_prob = action_dist.logp(train_batch[SampleBatch.ACTIONS]) + vf = model.value_function() + + # The "policy gradients" loss + self.pi_loss = -tf.reduce_sum( + tf.boolean_mask( + log_prob * train_batch[Postprocessing.ADVANTAGES], valid_mask + ) + ) + + delta = tf.boolean_mask( + vf - train_batch[Postprocessing.VALUE_TARGETS], valid_mask + ) + + # Compute a value function loss. + if self.config.get("use_critic", True): + self.vf_loss = 0.5 * tf.reduce_sum(tf.math.square(delta)) + # Ignore the value function. + else: + self.vf_loss = tf.constant(0.0) + + self.entropy_loss = tf.reduce_sum( + tf.boolean_mask(action_dist.entropy(), valid_mask) + ) + + self.total_loss = ( + self.pi_loss + + self.vf_loss * self.config["vf_loss_coeff"] + - self.entropy_loss * self.entropy_coeff + ) + + return self.total_loss + + @override(base) + def stats_fn(self, train_batch: SampleBatch) -> Dict[str, TensorType]: + return { + "cur_lr": tf.cast(self.cur_lr, tf.float64), + "entropy_coeff": tf.cast(self.entropy_coeff, tf.float64), + "policy_loss": self.pi_loss, + "policy_entropy": self.entropy_loss, + "var_gnorm": tf.linalg.global_norm( + list(self.model.trainable_variables()) + ), + "vf_loss": self.vf_loss, + } + + @override(base) + def grad_stats_fn( + self, train_batch: SampleBatch, grads: ModelGradients + ) -> Dict[str, TensorType]: + return { + "grad_gnorm": tf.linalg.global_norm(grads), + "vf_explained_var": explained_variance( + train_batch[Postprocessing.VALUE_TARGETS], + self.model.value_function(), + ), + } + + @override(base) + def postprocess_trajectory( + self, + sample_batch: SampleBatch, + other_agent_batches: Optional[Dict[AgentID, SampleBatch]] = None, + episode: Optional[Episode] = None, + ): + sample_batch = super().postprocess_trajectory(sample_batch) + return compute_gae_for_sample_batch( + self, sample_batch, other_agent_batches, episode + ) + + @override(base) + def compute_gradients_fn( + self, optimizer: LocalOptimizer, loss: TensorType + ) -> ModelGradients: + return compute_gradients(self, optimizer, loss) + + A3CTFPolicy.__name__ = name + A3CTFPolicy.__qualname__ = name + + return A3CTFPolicy + + +A3CTF1Policy = get_a3c_tf_policy("A3CTF1Policy", DynamicTFPolicyV2) +A3CTF2Policy = get_a3c_tf_policy("A3CTF2Policy", EagerTFPolicyV2) diff --git a/rllib_contrib/a3c/src/rllib_a3c/a3c/a3c_torch_policy.py b/rllib_contrib/a3c/src/rllib_a3c/a3c/a3c_torch_policy.py new file mode 100644 index 000000000000..e702254cd16c --- /dev/null +++ b/rllib_contrib/a3c/src/rllib_a3c/a3c/a3c_torch_policy.py @@ -0,0 +1,152 @@ +from typing import Dict, List, Optional, Type, Union + +from ray.rllib.evaluation.episode import Episode +from ray.rllib.evaluation.postprocessing import ( + Postprocessing, + compute_gae_for_sample_batch, +) +from ray.rllib.models.modelv2 import ModelV2 +from ray.rllib.models.torch.torch_action_dist import TorchDistributionWrapper +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.policy.torch_mixins import ( + EntropyCoeffSchedule, + LearningRateSchedule, + ValueNetworkMixin, +) +from ray.rllib.policy.torch_policy_v2 import TorchPolicyV2 +from ray.rllib.utils.annotations import override +from ray.rllib.utils.framework import try_import_torch +from ray.rllib.utils.numpy import convert_to_numpy +from ray.rllib.utils.torch_utils import apply_grad_clipping, sequence_mask +from ray.rllib.utils.typing import AgentID, TensorType + +torch, nn = try_import_torch() + + +class A3CTorchPolicy( + ValueNetworkMixin, LearningRateSchedule, EntropyCoeffSchedule, TorchPolicyV2 +): + """PyTorch Policy class used with A3C.""" + + def __init__(self, observation_space, action_space, config): + TorchPolicyV2.__init__( + self, + observation_space, + action_space, + config, + max_seq_len=config["model"]["max_seq_len"], + ) + ValueNetworkMixin.__init__(self, config) + LearningRateSchedule.__init__(self, config["lr"], config["lr_schedule"]) + EntropyCoeffSchedule.__init__( + self, config["entropy_coeff"], config["entropy_coeff_schedule"] + ) + + # TODO: Don't require users to call this manually. + self._initialize_loss_from_dummy_batch() + + @override(TorchPolicyV2) + def loss( + self, + model: ModelV2, + dist_class: Type[TorchDistributionWrapper], + train_batch: SampleBatch, + ) -> Union[TensorType, List[TensorType]]: + """Constructs the loss function. + + Args: + model: The Model to calculate the loss for. + dist_class: The action distr. class. + train_batch: The training data. + + Returns: + The A3C loss tensor given the input batch. + """ + logits, _ = model(train_batch) + values = model.value_function() + + if self.is_recurrent(): + B = len(train_batch[SampleBatch.SEQ_LENS]) + max_seq_len = logits.shape[0] // B + mask_orig = sequence_mask(train_batch[SampleBatch.SEQ_LENS], max_seq_len) + valid_mask = torch.reshape(mask_orig, [-1]) + else: + valid_mask = torch.ones_like(values, dtype=torch.bool) + + dist = dist_class(logits, model) + log_probs = dist.logp(train_batch[SampleBatch.ACTIONS]).reshape(-1) + pi_err = -torch.sum( + torch.masked_select( + log_probs * train_batch[Postprocessing.ADVANTAGES], valid_mask + ) + ) + + # Compute a value function loss. + if self.config["use_critic"]: + value_err = 0.5 * torch.sum( + torch.pow( + torch.masked_select( + values.reshape(-1) - train_batch[Postprocessing.VALUE_TARGETS], + valid_mask, + ), + 2.0, + ) + ) + # Ignore the value function. + else: + value_err = 0.0 + + entropy = torch.sum(torch.masked_select(dist.entropy(), valid_mask)) + + total_loss = ( + pi_err + + value_err * self.config["vf_loss_coeff"] + - entropy * self.entropy_coeff + ) + + # Store values for stats function in model (tower), such that for + # multi-GPU, we do not override them during the parallel loss phase. + model.tower_stats["entropy"] = entropy + model.tower_stats["pi_err"] = pi_err + model.tower_stats["value_err"] = value_err + + return total_loss + + @override(TorchPolicyV2) + def optimizer( + self, + ) -> Union[List["torch.optim.Optimizer"], "torch.optim.Optimizer"]: + """Returns a torch optimizer (Adam) for A3C.""" + return torch.optim.Adam(self.model.parameters(), lr=self.config["lr"]) + + @override(TorchPolicyV2) + def stats_fn(self, train_batch: SampleBatch) -> Dict[str, TensorType]: + return convert_to_numpy( + { + "cur_lr": self.cur_lr, + "entropy_coeff": self.entropy_coeff, + "policy_entropy": torch.mean( + torch.stack(self.get_tower_stats("entropy")) + ), + "policy_loss": torch.mean(torch.stack(self.get_tower_stats("pi_err"))), + "vf_loss": torch.mean(torch.stack(self.get_tower_stats("value_err"))), + } + ) + + @override(TorchPolicyV2) + def postprocess_trajectory( + self, + sample_batch: SampleBatch, + other_agent_batches: Optional[Dict[AgentID, SampleBatch]] = None, + episode: Optional[Episode] = None, + ): + sample_batch = super().postprocess_trajectory(sample_batch) + return compute_gae_for_sample_batch( + self, sample_batch, other_agent_batches, episode + ) + + @override(TorchPolicyV2) + def extra_grad_process( + self, optimizer: "torch.optim.Optimizer", loss: TensorType + ) -> Dict[str, TensorType]: + return apply_grad_clipping(self, optimizer, loss) diff --git a/rllib_contrib/a3c/tests/test_a3c.py b/rllib_contrib/a3c/tests/test_a3c.py new file mode 100644 index 000000000000..66984eb1e4ae --- /dev/null +++ b/rllib_contrib/a3c/tests/test_a3c.py @@ -0,0 +1,100 @@ +import unittest + +from rllib_a3c.a3c import A3CConfig + +import ray +from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID +from ray.rllib.utils.metrics.learner_info import LEARNER_INFO, LEARNER_STATS_KEY +from ray.rllib.utils.test_utils import ( + check_compute_single_action, + check_train_results, + framework_iterator, +) + + +class TestA3C(unittest.TestCase): + """Sanity tests for A2C exec impl.""" + + def setUp(self): + ray.init(num_cpus=4) + + def tearDown(self): + ray.shutdown() + + def test_a3c_compilation(self): + """Test whether an A3C can be built with both frameworks.""" + config = A3CConfig().rollouts(num_rollout_workers=2, num_envs_per_worker=2) + + num_iterations = 2 + + # Test against all frameworks. + for _ in framework_iterator(config, with_eager_tracing=False): + for env in ["CartPole-v1", "Pendulum-v1"]: + print("env={}".format(env)) + config.model["use_lstm"] = env == "CartPole-v1" + algo = config.build(env=env) + for i in range(num_iterations): + results = algo.train() + check_train_results(results) + print(results) + check_compute_single_action( + algo, include_state=config.model["use_lstm"] + ) + algo.stop() + + def test_a3c_entropy_coeff_schedule(self): + """Test A3C entropy coeff schedule support.""" + config = A3CConfig().rollouts( + num_rollout_workers=1, + num_envs_per_worker=1, + batch_mode="truncate_episodes", + rollout_fragment_length=10, + ) + # Initial entropy coeff, doesn't really matter because of the schedule below. + config.training( + train_batch_size=20, + entropy_coeff=0.01, + entropy_coeff_schedule=[ + [0, 0.01], + [120, 0.0001], + ], + ) + # 0 metrics reporting delay, this makes sure timestep, + # which entropy coeff depends on, is updated after each worker rollout. + config.reporting( + min_time_s_per_iteration=0, min_sample_timesteps_per_iteration=20 + ) + + def _step_n_times(trainer, n: int): + """Step trainer n times. + + Returns: + learning rate at the end of the execution. + """ + for _ in range(n): + results = trainer.train() + return results["info"][LEARNER_INFO][DEFAULT_POLICY_ID][LEARNER_STATS_KEY][ + "entropy_coeff" + ] + + # Test against all frameworks. + for _ in framework_iterator(config): + algo = config.build(env="CartPole-v1") + + coeff = _step_n_times(algo, 1) # 20 timesteps + # Should be close to the starting coeff of 0.01 + self.assertGreaterEqual(coeff, 0.005) + + coeff = _step_n_times(algo, 10) # 200 timesteps + # Should have annealed to the final coeff of 0.0001. + self.assertLessEqual(coeff, 0.00011) + + algo.stop() + + +if __name__ == "__main__": + import sys + + import pytest + + sys.exit(pytest.main(["-v", __file__])) diff --git a/rllib_contrib/maml/README.rst b/rllib_contrib/maml/README.rst new file mode 100644 index 000000000000..912fca39ed35 --- /dev/null +++ b/rllib_contrib/maml/README.rst @@ -0,0 +1,27 @@ +MAML (Model-Agnostic Meta-Learning for Fast Adaptation of Deep Networks) +------------------------------------------------------------------------ + +`MAML ` is an on-policy meta RL algorithm. Unlike standard RL algorithms, which aim to maximize the sum of rewards into the future for a single task (e.g. HalfCheetah), meta RL algorithms seek to maximize the sum of rewards for *a given distribution of tasks*. + +On a high level, MAML seeks to learn quick adaptation across different tasks (e.g. different velocities for HalfCheetah). Quick adaptation is defined by the number of gradient steps it takes to adapt. MAML aims to maximize the RL objective for each task after `X` gradient steps. Doing this requires partitioning the algorithm into two steps. The first step is data collection. This involves collecting data for each task for each step of adaptation (from `1, 2, ..., X`). The second step is the meta-update step. This second step takes all the aggregated ddata from the first step and computes the meta-gradient. + +Code here is adapted from `https://github.com/jonasrothfuss`, which outperforms vanilla MAML and avoids computation of the higher order gradients during the meta-update step. MAML is evaluated on custom environments that are described in greater detail here. + +MAML uses additional metrics to measure performance; episode_reward_mean measures the agent’s returns before adaptation, episode_reward_mean_adapt_N measures the agent’s returns after N gradient steps of inner adaptation, and adaptation_delta measures the difference in performance before and after adaptation. + + +Installation +------------ + +.. code-block:: bash + + conda create -n rllib-maml python=3.10 + conda activate rllib-maml + pip install -r requirements.txt + pip install -e '.[development]' + + +Usage +----- + +.. literalinclude:: examples/cartpole_mass_maml.py \ No newline at end of file diff --git a/rllib_contrib/maml/examples/cartpole_mass_maml.py b/rllib_contrib/maml/examples/cartpole_mass_maml.py new file mode 100644 index 000000000000..72c27f83056c --- /dev/null +++ b/rllib_contrib/maml/examples/cartpole_mass_maml.py @@ -0,0 +1,52 @@ +from gymnasium.wrappers import TimeLimit +from rllib_maml.maml import MAML, MAMLConfig + +import ray +from ray import air, tune +from ray.rllib.examples.env.cartpole_mass import CartPoleMassEnv +from ray.tune.registry import register_env + +if __name__ == "__main__": + ray.init() + register_env( + "cartpole", + lambda env_cfg: TimeLimit(CartPoleMassEnv(), max_episode_steps=200), + ) + + rollout_fragment_length = 32 + + config = ( + MAMLConfig() + .rollouts( + num_rollout_workers=4, rollout_fragment_length=rollout_fragment_length + ) + .framework("torch") + .environment("cartpole", clip_actions=False) + .training( + inner_adaptation_steps=1, + maml_optimizer_steps=5, + gamma=0.99, + lambda_=1.0, + lr=0.001, + vf_loss_coeff=0.5, + inner_lr=0.03, + use_meta_env=False, + clip_param=0.3, + kl_target=0.01, + kl_coeff=0.001, + model=dict(fcnet_hiddens=[64, 64]), + train_batch_size=rollout_fragment_length, + ) + ) + + num_iterations = 100 + + tuner = tune.Tuner( + MAML, + param_space=config.to_dict(), + run_config=air.RunConfig( + stop={"training_iteration": num_iterations}, + failure_config=air.FailureConfig(fail_fast="raise"), + ), + ) + results = tuner.fit() diff --git a/rllib_contrib/maml/pyproject.toml b/rllib_contrib/maml/pyproject.toml new file mode 100644 index 000000000000..bf6df70018fe --- /dev/null +++ b/rllib_contrib/maml/pyproject.toml @@ -0,0 +1,18 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" + +[tool.setuptools.packages.find] +where = ["src"] + +[project] +name = "rllib-maml" +authors = [{name = "Anyscale Inc."}] +version = "0.1.0" +description = "" +readme = "README.md" +requires-python = ">=3.7, <3.11" +dependencies = ["gymnasium[mujoco]==0.26.3", "higher", "ray[rllib]==2.3.1"] + +[project.optional-dependencies] +development = ["pytest>=7.2.2", "pre-commit==2.21.0", "tensorflow==2.11.0", "torch==1.12.0"] diff --git a/rllib_contrib/maml/requirements.txt b/rllib_contrib/maml/requirements.txt new file mode 100644 index 000000000000..f1191ef52412 --- /dev/null +++ b/rllib_contrib/maml/requirements.txt @@ -0,0 +1,2 @@ +tensorflow==2.11.0 +torch==1.12.0 diff --git a/rllib_contrib/maml/src/rllib_maml/__init__.py b/rllib_contrib/maml/src/rllib_maml/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/rllib_contrib/maml/src/rllib_maml/envs/__init__.py b/rllib_contrib/maml/src/rllib_maml/envs/__init__.py new file mode 100644 index 000000000000..1796db67d13e --- /dev/null +++ b/rllib_contrib/maml/src/rllib_maml/envs/__init__.py @@ -0,0 +1,11 @@ +# Copyright 2023-onwards Anyscale, Inc. The use of this library is subject to the +# included LICENSE file. +from rllib_maml.envs.ant_rand_goal import AntRandGoalEnv +from rllib_maml.envs.cartpole_mass import CartPoleMassEnv +from rllib_maml.envs.pendulum_mass import PendulumMassEnv + +__all__ = [ + "AntRandGoalEnv", + "CartPoleMassEnv", + "PendulumMassEnv", +] diff --git a/rllib_contrib/maml/src/rllib_maml/envs/ant_rand_goal.py b/rllib_contrib/maml/src/rllib_maml/envs/ant_rand_goal.py new file mode 100644 index 000000000000..5dd2f3c8e026 --- /dev/null +++ b/rllib_contrib/maml/src/rllib_maml/envs/ant_rand_goal.py @@ -0,0 +1,86 @@ +import numpy as np +from gymnasium.envs.mujoco.mujoco_env import MujocoEnv +from gymnasium.utils import EzPickle + +from ray.rllib.env.apis.task_settable_env import TaskSettableEnv + + +class AntRandGoalEnv(EzPickle, MujocoEnv, TaskSettableEnv): + """Ant Environment that randomizes goals as tasks + + Goals are randomly sampled 2D positions + """ + + def __init__(self): + self.set_task(self.sample_tasks(1)[0]) + MujocoEnv.__init__(self, "ant.xml", 5) + EzPickle.__init__(self) + + def sample_tasks(self, n_tasks): + # Samples a goal position (2x1 position ector) + a = np.random.random(n_tasks) * 2 * np.pi + r = 3 * np.random.random(n_tasks) ** 0.5 + return np.stack((r * np.cos(a), r * np.sin(a)), axis=-1) + + def set_task(self, task): + """ + Args: + task: task of the meta-learning environment + """ + self.goal_pos = task + + def get_task(self): + """ + Returns: + task: task of the meta-learning environment + """ + return self.goal_pos + + def step(self, a): + self.do_simulation(a, self.frame_skip) + xposafter = self.get_body_com("torso") + goal_reward = -np.sum( + np.abs(xposafter[:2] - self.goal_pos) + ) # make it happy, not suicidal + ctrl_cost = 0.1 * np.square(a).sum() + contact_cost = ( + 0.5 * 1e-3 * np.sum(np.square(np.clip(self.sim.data.cfrc_ext, -1, 1))) + ) + # survive_reward = 1.0 + survive_reward = 0.0 + reward = goal_reward - ctrl_cost - contact_cost + survive_reward + # notdone = np.isfinite(state).all() and 1.0 >= state[2] >= 0. + # done = not notdone + done = False + ob = self._get_obs() + return ( + ob, + reward, + done, + dict( + reward_forward=goal_reward, + reward_ctrl=-ctrl_cost, + reward_contact=-contact_cost, + reward_survive=survive_reward, + ), + ) + + def _get_obs(self): + return np.concatenate( + [ + self.sim.data.qpos.flat, + self.sim.data.qvel.flat, + np.clip(self.sim.data.cfrc_ext, -1, 1).flat, + ] + ) + + def reset_model(self): + qpos = self.init_qpos + self.np_random.uniform( + size=self.model.nq, low=-0.1, high=0.1 + ) + qvel = self.init_qvel + self.np_random.randn(self.model.nv) * 0.1 + self.set_state(qpos, qvel) + return self._get_obs() + + def viewer_setup(self): + self.viewer.cam.distance = self.model.stat.extent * 0.5 diff --git a/rllib_contrib/maml/src/rllib_maml/envs/cartpole_mass.py b/rllib_contrib/maml/src/rllib_maml/envs/cartpole_mass.py new file mode 100644 index 000000000000..bfd481402eb7 --- /dev/null +++ b/rllib_contrib/maml/src/rllib_maml/envs/cartpole_mass.py @@ -0,0 +1,31 @@ +import numpy as np +from gymnasium.envs.classic_control.cartpole import CartPoleEnv +from gymnasium.utils import EzPickle + +from ray.rllib.env.apis.task_settable_env import TaskSettableEnv + + +class CartPoleMassEnv(CartPoleEnv, EzPickle, TaskSettableEnv): + """CartPoleMassEnv varies the weights of the cart and the pole.""" + + def sample_tasks(self, n_tasks): + # Sample new cart- and pole masses (random floats between 0.5 and 2.0 + # (cart) and between 0.05 and 0.2 (pole)). + cart_masses = np.random.uniform(low=0.5, high=2.0, size=(n_tasks, 1)) + pole_masses = np.random.uniform(low=0.05, high=0.2, size=(n_tasks, 1)) + return np.concatenate([cart_masses, pole_masses], axis=-1) + + def set_task(self, task): + """ + Args: + task (Tuple[float]): Masses of the cart and the pole. + """ + self.masscart = task[0] + self.masspole = task[1] + + def get_task(self): + """ + Returns: + Tuple[float]: The current mass of the cart- and pole. + """ + return np.array([self.masscart, self.masspole]) diff --git a/rllib_contrib/maml/src/rllib_maml/envs/pendulum_mass.py b/rllib_contrib/maml/src/rllib_maml/envs/pendulum_mass.py new file mode 100644 index 000000000000..2b4abdf20107 --- /dev/null +++ b/rllib_contrib/maml/src/rllib_maml/envs/pendulum_mass.py @@ -0,0 +1,33 @@ +import numpy as np +from gymnasium.envs.classic_control.pendulum import PendulumEnv +from gymnasium.utils import EzPickle + +from ray.rllib.env.apis.task_settable_env import TaskSettableEnv + + +class PendulumMassEnv(PendulumEnv, EzPickle, TaskSettableEnv): + """PendulumMassEnv varies the weight of the pendulum + + Tasks are defined to be weight uniformly sampled between [0.5,2] + """ + + def sample_tasks(self, n_tasks): + # Sample new pendulum masses (random floats between 0.5 and 2). + return np.random.uniform(low=0.5, high=2.0, size=(n_tasks,)) + + def set_task(self, task): + """ + Args: + task: Task of the meta-learning environment (here: mass of + the pendulum). + """ + # self.m is the mass property of the pendulum. + self.m = task + + def get_task(self): + """ + Returns: + float: The current mass of the pendulum (self.m in the PendulumEnv + object). + """ + return self.m diff --git a/rllib_contrib/maml/src/rllib_maml/maml/__init__.py b/rllib_contrib/maml/src/rllib_maml/maml/__init__.py new file mode 100644 index 000000000000..1ec07956fabd --- /dev/null +++ b/rllib_contrib/maml/src/rllib_maml/maml/__init__.py @@ -0,0 +1,12 @@ +# Copyright 2023-onwards Anyscale, Inc. The use of this library is subject to the +# included LICENSE file. +from rllib_maml.maml.maml import MAML, MAMLConfig + +from ray.tune.registry import register_trainable + +__all__ = [ + "MAML", + "MAMLConfig", +] + +register_trainable("rllib-contrib-maml", MAML) diff --git a/rllib_contrib/maml/src/rllib_maml/maml/maml.py b/rllib_contrib/maml/src/rllib_maml/maml/maml.py new file mode 100644 index 000000000000..e03a7ff3f6ca --- /dev/null +++ b/rllib_contrib/maml/src/rllib_maml/maml/maml.py @@ -0,0 +1,388 @@ +import logging +from typing import Optional, Type + +import numpy as np + +from ray.rllib.algorithms.algorithm import Algorithm +from ray.rllib.algorithms.algorithm_config import AlgorithmConfig, NotProvided +from ray.rllib.evaluation.metrics import collect_metrics, get_learner_stats +from ray.rllib.evaluation.worker_set import WorkerSet +from ray.rllib.execution.common import ( + STEPS_SAMPLED_COUNTER, + STEPS_TRAINED_COUNTER, + STEPS_TRAINED_THIS_ITER_COUNTER, + _get_shared_metrics, +) +from ray.rllib.execution.metric_ops import CollectMetrics +from ray.rllib.policy.policy import Policy +from ray.rllib.policy.sample_batch import ( + concat_samples, + convert_ma_batch_to_sample_batch, +) +from ray.rllib.utils.annotations import override +from ray.rllib.utils.deprecation import DEPRECATED_VALUE +from ray.rllib.utils.metrics.learner_info import LEARNER_INFO +from ray.rllib.utils.sgd import standardized +from ray.util.iter import LocalIterator, from_actors + +logger = logging.getLogger(__name__) + + +class MAMLConfig(AlgorithmConfig): + """Defines a configuration class from which a MAML Algorithm can be built. + + Example: + >>> from ray.rllib.algorithms.maml import MAMLConfig + >>> config = MAMLConfig().training(use_gae=False).resources(num_gpus=1) + >>> print(config.to_dict()) # doctest: +SKIP + >>> # Build a Algorithm object from the config and run 1 training iteration. + >>> algo = config.build(env="CartPole-v1") # doctest: +SKIP + >>> algo.train() # doctest: +SKIP + + Example: + >>> from ray.rllib.algorithms.maml import MAMLConfig + >>> from ray import air + >>> from ray import tune + >>> config = MAMLConfig() + >>> # Print out some default values. + >>> print(config.lr) # doctest: +SKIP + >>> # Update the config object. + >>> config = config.training( # doctest: +SKIP + ... grad_clip=tune.grid_search([10.0, 40.0])) + >>> # Set the config object's env. + >>> config = config.environment(env="CartPole-v1") + >>> # Use to_dict() to get the old-style python config dict + >>> # when running with tune. + >>> tune.Tuner( # doctest: +SKIP + ... "MAML", + ... run_config=air.RunConfig(stop={"episode_reward_mean": 200}), + ... param_space=config.to_dict(), + ... ).fit() + """ + + def __init__(self, algo_class=None): + """Initializes a PGConfig instance.""" + super().__init__(algo_class=algo_class or MAML) + + # fmt: off + # __sphinx_doc_begin__ + # MAML-specific config settings. + self.use_gae = True + self.lambda_ = 1.0 + self.kl_coeff = 0.0005 + self.vf_loss_coeff = 0.5 + self.entropy_coeff = 0.0 + self.clip_param = 0.3 + self.vf_clip_param = 10.0 + self.grad_clip = None + self.kl_target = 0.01 + self.inner_adaptation_steps = 1 + self.maml_optimizer_steps = 5 + self.inner_lr = 0.1 + self.use_meta_env = True + + # Override some of AlgorithmConfig's default values with MAML-specific values. + self.num_rollout_workers = 2 + self.rollout_fragment_length = 200 + self.create_env_on_local_worker = True + self.lr = 1e-3 + + # Share layers for value function. + self.model.update({ + "vf_share_layers": False, + }) + + self.batch_mode = "complete_episodes" + self._disable_execution_plan_api = False + self.exploration_config = { + # The Exploration class to use. In the simplest case, this is the name + # (str) of any class present in the `rllib.utils.exploration` package. + # You can also provide the python class directly or the full location + # of your class (e.g. "ray.rllib.utils.exploration.epsilon_greedy. + # EpsilonGreedy"). + "type": "StochasticSampling", + # Add constructor kwargs here (if any). + } + # __sphinx_doc_end__ + # fmt: on + + # Deprecated keys: + self.vf_share_layers = DEPRECATED_VALUE + + def training( + self, + *, + use_gae: Optional[bool] = NotProvided, + lambda_: Optional[float] = NotProvided, + kl_coeff: Optional[float] = NotProvided, + vf_loss_coeff: Optional[float] = NotProvided, + entropy_coeff: Optional[float] = NotProvided, + clip_param: Optional[float] = NotProvided, + vf_clip_param: Optional[float] = NotProvided, + grad_clip: Optional[float] = NotProvided, + kl_target: Optional[float] = NotProvided, + inner_adaptation_steps: Optional[int] = NotProvided, + maml_optimizer_steps: Optional[int] = NotProvided, + inner_lr: Optional[float] = NotProvided, + use_meta_env: Optional[bool] = NotProvided, + **kwargs, + ) -> "MAMLConfig": + """Sets the training related configuration. + + Args: + use_gae: If true, use the Generalized Advantage Estimator (GAE) + with a value function, see https://arxiv.org/pdf/1506.02438.pdf. + lambda_: The GAE (lambda) parameter. + kl_coeff: Initial coefficient for KL divergence. + vf_loss_coeff: Coefficient of the value function loss. + entropy_coeff: Coefficient of the entropy regularizer. + clip_param: PPO clip parameter. + vf_clip_param: Clip param for the value function. Note that this is + sensitive to the scale of the rewards. If your expected V is large, + increase this. + grad_clip: If specified, clip the global norm of gradients by this amount. + kl_target: Target value for KL divergence. + inner_adaptation_steps: Number of Inner adaptation steps for the MAML + algorithm. + maml_optimizer_steps: Number of MAML steps per meta-update iteration + (PPO steps). + inner_lr: Inner Adaptation Step size. + use_meta_env: Use Meta Env Template. + + Returns: + This updated AlgorithmConfig object. + """ + # Pass kwargs onto super's `training()` method. + super().training(**kwargs) + + if use_gae is not NotProvided: + self.use_gae = use_gae + if lambda_ is not NotProvided: + self.lambda_ = lambda_ + if kl_coeff is not NotProvided: + self.kl_coeff = kl_coeff + if vf_loss_coeff is not NotProvided: + self.vf_loss_coeff = vf_loss_coeff + if entropy_coeff is not NotProvided: + self.entropy_coeff = entropy_coeff + if clip_param is not NotProvided: + self.clip_param = clip_param + if vf_clip_param is not NotProvided: + self.vf_clip_param = vf_clip_param + if grad_clip is not NotProvided: + self.grad_clip = grad_clip + if kl_target is not NotProvided: + self.kl_target = kl_target + if inner_adaptation_steps is not NotProvided: + self.inner_adaptation_steps = inner_adaptation_steps + if maml_optimizer_steps is not NotProvided: + self.maml_optimizer_steps = maml_optimizer_steps + if inner_lr is not NotProvided: + self.inner_lr = inner_lr + if use_meta_env is not NotProvided: + self.use_meta_env = use_meta_env + + return self + + @override(AlgorithmConfig) + def validate(self) -> None: + # Call super's validation method. + super().validate() + + if self.num_gpus > 1: + raise ValueError("`num_gpus` > 1 not yet supported for MAML!") + if self.inner_adaptation_steps <= 0: + raise ValueError("Inner Adaptation Steps must be >=1!") + if self.maml_optimizer_steps <= 0: + raise ValueError("PPO steps for meta-update needs to be >=0!") + if self.entropy_coeff < 0: + raise ValueError("`entropy_coeff` must be >=0.0!") + if self.batch_mode != "complete_episodes": + raise ValueError("`batch_mode`=truncate_episodes not supported!") + if self.num_rollout_workers <= 0: + raise ValueError("Must have at least 1 worker/task!") + if self.create_env_on_local_worker is False: + raise ValueError( + "Must have an actual Env created on the driver " + "(local) worker! Try setting `config.environment(" + "create_env_on_local_worker=True)`." + ) + + +# @mluo: TODO +def set_worker_tasks(workers, use_meta_env): + if use_meta_env: + n_tasks = len(workers.remote_workers()) + tasks = workers.local_worker().foreach_env(lambda x: x)[0].sample_tasks(n_tasks) + for i, worker in enumerate(workers.remote_workers()): + worker.foreach_env.remote(lambda env: env.set_task(tasks[i])) + + +class MetaUpdate: + def __init__(self, workers, maml_steps, metric_gen, use_meta_env): + self.workers = workers + self.maml_optimizer_steps = maml_steps + self.metric_gen = metric_gen + self.use_meta_env = use_meta_env + + def __call__(self, data_tuple): + # Metaupdate Step + samples = data_tuple[0] + adapt_metrics_dict = data_tuple[1] + + # Metric Updating + metrics = _get_shared_metrics() + metrics.counters[STEPS_SAMPLED_COUNTER] += samples.count + fetches = None + for i in range(self.maml_optimizer_steps): + fetches = self.workers.local_worker().learn_on_batch(samples) + learner_stats = get_learner_stats(fetches) + + # Sync workers with meta policy + self.workers.sync_weights() + + # Set worker tasks + set_worker_tasks(self.workers, self.use_meta_env) + + # Update KLS + def update(pi, pi_id): + assert "inner_kl" not in learner_stats, ( + "inner_kl should be nested under policy id key", + learner_stats, + ) + if pi_id in learner_stats: + assert "inner_kl" in learner_stats[pi_id], (learner_stats, pi_id) + pi.update_kls(learner_stats[pi_id]["inner_kl"]) + else: + logger.warning("No data for {}, not updating kl".format(pi_id)) + + self.workers.local_worker().foreach_policy_to_train(update) + + # Modify Reporting Metrics + metrics = _get_shared_metrics() + metrics.info[LEARNER_INFO] = fetches + metrics.counters[STEPS_TRAINED_THIS_ITER_COUNTER] = samples.count + metrics.counters[STEPS_TRAINED_COUNTER] += samples.count + + res = self.metric_gen.__call__(None) + res.update(adapt_metrics_dict) + + return res + + +def post_process_metrics(adapt_iter, workers, metrics): + # Obtain Current Dataset Metrics and filter out + name = "_adapt_" + str(adapt_iter) if adapt_iter > 0 else "" + + # Only workers are collecting data + res = collect_metrics(workers=workers) + + metrics["episode_reward_max" + str(name)] = res["episode_reward_max"] + metrics["episode_reward_mean" + str(name)] = res["episode_reward_mean"] + metrics["episode_reward_min" + str(name)] = res["episode_reward_min"] + + return metrics + + +def inner_adaptation(workers, samples): + # Each worker performs one gradient descent + for i, e in enumerate(workers.remote_workers()): + e.learn_on_batch.remote(samples[i]) + + +class MAML(Algorithm): + @classmethod + @override(Algorithm) + def get_default_config(cls) -> AlgorithmConfig: + return MAMLConfig() + + @classmethod + @override(Algorithm) + def get_default_policy_class( + cls, config: AlgorithmConfig + ) -> Optional[Type[Policy]]: + if config["framework"] == "torch": + from ray.rllib.algorithms.maml.maml_torch_policy import MAMLTorchPolicy + + return MAMLTorchPolicy + elif config["framework"] == "tf": + from ray.rllib.algorithms.maml.maml_tf_policy import MAMLTF1Policy + + return MAMLTF1Policy + else: + from ray.rllib.algorithms.maml.maml_tf_policy import MAMLTF2Policy + + return MAMLTF2Policy + + @staticmethod + @override(Algorithm) + def execution_plan( + workers: WorkerSet, config: AlgorithmConfig, **kwargs + ) -> LocalIterator[dict]: + assert ( + len(kwargs) == 0 + ), "MAML execution_plan does NOT take any additional parameters" + + # Sync workers with meta policy + workers.sync_weights() + + # Samples and sets worker tasks + use_meta_env = config.use_meta_env + set_worker_tasks(workers, use_meta_env) + + # Metric Collector + metric_collect = CollectMetrics( + workers, + min_history=config.metrics_num_episodes_for_smoothing, + timeout_seconds=config.metrics_episode_collection_timeout_s, + ) + + # Iterator for Inner Adaptation Data gathering (from pre->post + # adaptation) + inner_steps = config.inner_adaptation_steps + + def inner_adaptation_steps(itr): + buf = [] + split = [] + metrics = {} + for samples in itr: + # Processing Samples (Standardize Advantages) + split_lst = [] + for sample in samples: + sample = convert_ma_batch_to_sample_batch(sample) + sample["advantages"] = standardized(sample["advantages"]) + split_lst.append(sample.count) + buf.append(sample) + + split.append(split_lst) + + adapt_iter = len(split) - 1 + metrics = post_process_metrics(adapt_iter, workers, metrics) + if len(split) > inner_steps: + out = concat_samples(buf) + out["split"] = np.array(split) + buf = [] + split = [] + + # Reporting Adaptation Rew Diff + ep_rew_pre = metrics["episode_reward_mean"] + ep_rew_post = metrics[ + "episode_reward_mean_adapt_" + str(inner_steps) + ] + metrics["adaptation_delta"] = ep_rew_post - ep_rew_pre + yield out, metrics + metrics = {} + else: + inner_adaptation(workers, samples) + + rollouts = from_actors(workers.remote_workers()) + rollouts = rollouts.batch_across_shards() + rollouts = rollouts.transform(inner_adaptation_steps) + + # Metaupdate Step + train_op = rollouts.for_each( + MetaUpdate( + workers, config.maml_optimizer_steps, metric_collect, use_meta_env + ) + ) + return train_op diff --git a/rllib_contrib/maml/src/rllib_maml/maml/maml_tf_policy.py b/rllib_contrib/maml/src/rllib_maml/maml/maml_tf_policy.py new file mode 100644 index 000000000000..d81bf8d834ec --- /dev/null +++ b/rllib_contrib/maml/src/rllib_maml/maml/maml_tf_policy.py @@ -0,0 +1,520 @@ +import logging +from typing import Dict, List, Type, Union + +from ray.rllib.algorithms.ppo.ppo_tf_policy import validate_config +from ray.rllib.evaluation.postprocessing import ( + Postprocessing, + compute_gae_for_sample_batch, +) +from ray.rllib.models.modelv2 import ModelV2 +from ray.rllib.models.tf.tf_action_dist import TFActionDistribution +from ray.rllib.models.utils import get_activation_fn +from ray.rllib.policy.dynamic_tf_policy_v2 import DynamicTFPolicyV2 +from ray.rllib.policy.eager_tf_policy_v2 import EagerTFPolicyV2 +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.policy.tf_mixins import ( + LocalOptimizer, + ModelGradients, + ValueNetworkMixin, + compute_gradients, +) +from ray.rllib.utils import try_import_tf +from ray.rllib.utils.annotations import override +from ray.rllib.utils.typing import TensorType + +tf1, tf, tfv = try_import_tf() + +logger = logging.getLogger(__name__) + + +def PPOLoss( + dist_class, + actions, + curr_logits, + behaviour_logits, + advantages, + value_fn, + value_targets, + vf_preds, + cur_kl_coeff, + entropy_coeff, + clip_param, + vf_clip_param, + vf_loss_coeff, + clip_loss=False, +): + def surrogate_loss( + actions, curr_dist, prev_dist, advantages, clip_param, clip_loss + ): + pi_new_logp = curr_dist.logp(actions) + pi_old_logp = prev_dist.logp(actions) + + logp_ratio = tf.math.exp(pi_new_logp - pi_old_logp) + if clip_loss: + return tf.minimum( + advantages * logp_ratio, + advantages + * tf.clip_by_value(logp_ratio, 1 - clip_param, 1 + clip_param), + ) + return advantages * logp_ratio + + def kl_loss(curr_dist, prev_dist): + return prev_dist.kl(curr_dist) + + def entropy_loss(dist): + return dist.entropy() + + def vf_loss(value_fn, value_targets, vf_preds, vf_clip_param=0.1): + # GAE Value Function Loss + vf_loss1 = tf.math.square(value_fn - value_targets) + vf_clipped = vf_preds + tf.clip_by_value( + value_fn - vf_preds, -vf_clip_param, vf_clip_param + ) + vf_loss2 = tf.math.square(vf_clipped - value_targets) + vf_loss = tf.maximum(vf_loss1, vf_loss2) + return vf_loss + + pi_new_dist = dist_class(curr_logits, None) + pi_old_dist = dist_class(behaviour_logits, None) + + surr_loss = tf.reduce_mean( + surrogate_loss( + actions, pi_new_dist, pi_old_dist, advantages, clip_param, clip_loss + ) + ) + kl_loss = tf.reduce_mean(kl_loss(pi_new_dist, pi_old_dist)) + vf_loss = tf.reduce_mean(vf_loss(value_fn, value_targets, vf_preds, vf_clip_param)) + entropy_loss = tf.reduce_mean(entropy_loss(pi_new_dist)) + + total_loss = -surr_loss + cur_kl_coeff * kl_loss + total_loss += vf_loss_coeff * vf_loss - entropy_coeff * entropy_loss + return total_loss, surr_loss, kl_loss, vf_loss, entropy_loss + + +# This is the computation graph for workers (inner adaptation steps) +class WorkerLoss(object): + def __init__( + self, + dist_class, + actions, + curr_logits, + behaviour_logits, + advantages, + value_fn, + value_targets, + vf_preds, + cur_kl_coeff, + entropy_coeff, + clip_param, + vf_clip_param, + vf_loss_coeff, + clip_loss=False, + ): + self.loss, surr_loss, kl_loss, vf_loss, ent_loss = PPOLoss( + dist_class=dist_class, + actions=actions, + curr_logits=curr_logits, + behaviour_logits=behaviour_logits, + advantages=advantages, + value_fn=value_fn, + value_targets=value_targets, + vf_preds=vf_preds, + cur_kl_coeff=cur_kl_coeff, + entropy_coeff=entropy_coeff, + clip_param=clip_param, + vf_clip_param=vf_clip_param, + vf_loss_coeff=vf_loss_coeff, + clip_loss=clip_loss, + ) + self.loss = tf1.Print(self.loss, ["Worker Adapt Loss", self.loss]) + + +# This is the Meta-Update computation graph for main (meta-update step) +class MAMLLoss(object): + def __init__( + self, + model, + config, + dist_class, + value_targets, + advantages, + actions, + behaviour_logits, + vf_preds, + cur_kl_coeff, + policy_vars, + obs, + num_tasks, + split, + inner_adaptation_steps=1, + entropy_coeff=0, + clip_param=0.3, + vf_clip_param=0.1, + vf_loss_coeff=1.0, + use_gae=True, + ): + self.config = config + self.num_tasks = num_tasks + self.inner_adaptation_steps = inner_adaptation_steps + self.clip_param = clip_param + self.dist_class = dist_class + self.cur_kl_coeff = cur_kl_coeff + + # Split episode tensors into [inner_adaptation_steps+1, num_tasks, -1] + self.obs = self.split_placeholders(obs, split) + self.actions = self.split_placeholders(actions, split) + self.behaviour_logits = self.split_placeholders(behaviour_logits, split) + self.advantages = self.split_placeholders(advantages, split) + self.value_targets = self.split_placeholders(value_targets, split) + self.vf_preds = self.split_placeholders(vf_preds, split) + + # Construct name to tensor dictionary for easier indexing + self.policy_vars = {} + for var in policy_vars: + self.policy_vars[var.name] = var + + # Calculate pi_new for PPO + pi_new_logits, current_policy_vars, value_fns = [], [], [] + for i in range(self.num_tasks): + pi_new, value_fn = self.feed_forward( + self.obs[0][i], self.policy_vars, policy_config=config["model"] + ) + pi_new_logits.append(pi_new) + value_fns.append(value_fn) + current_policy_vars.append(self.policy_vars) + + inner_kls = [] + inner_ppo_loss = [] + + # Recompute weights for inner-adaptation (same weights as workers) + for step in range(self.inner_adaptation_steps): + kls = [] + for i in range(self.num_tasks): + # PPO Loss Function (only Surrogate) + ppo_loss, _, kl_loss, _, _ = PPOLoss( + dist_class=dist_class, + actions=self.actions[step][i], + curr_logits=pi_new_logits[i], + behaviour_logits=self.behaviour_logits[step][i], + advantages=self.advantages[step][i], + value_fn=value_fns[i], + value_targets=self.value_targets[step][i], + vf_preds=self.vf_preds[step][i], + cur_kl_coeff=0.0, + entropy_coeff=entropy_coeff, + clip_param=clip_param, + vf_clip_param=vf_clip_param, + vf_loss_coeff=vf_loss_coeff, + clip_loss=False, + ) + adapted_policy_vars = self.compute_updated_variables( + ppo_loss, current_policy_vars[i] + ) + pi_new_logits[i], value_fns[i] = self.feed_forward( + self.obs[step + 1][i], + adapted_policy_vars, + policy_config=config["model"], + ) + current_policy_vars[i] = adapted_policy_vars + kls.append(kl_loss) + inner_ppo_loss.append(ppo_loss) + + self.kls = kls + inner_kls.append(kls) + + mean_inner_kl = tf.stack( + [tf.reduce_mean(tf.stack(inner_kl)) for inner_kl in inner_kls] + ) + self.mean_inner_kl = mean_inner_kl + + ppo_obj = [] + for i in range(self.num_tasks): + ppo_loss, surr_loss, kl_loss, val_loss, entropy_loss = PPOLoss( + dist_class=dist_class, + actions=self.actions[self.inner_adaptation_steps][i], + curr_logits=pi_new_logits[i], + behaviour_logits=self.behaviour_logits[self.inner_adaptation_steps][i], + advantages=self.advantages[self.inner_adaptation_steps][i], + value_fn=value_fns[i], + value_targets=self.value_targets[self.inner_adaptation_steps][i], + vf_preds=self.vf_preds[self.inner_adaptation_steps][i], + cur_kl_coeff=0.0, + entropy_coeff=entropy_coeff, + clip_param=clip_param, + vf_clip_param=vf_clip_param, + vf_loss_coeff=vf_loss_coeff, + clip_loss=True, + ) + ppo_obj.append(ppo_loss) + self.mean_policy_loss = surr_loss + self.mean_kl = kl_loss + self.mean_vf_loss = val_loss + self.mean_entropy = entropy_loss + self.inner_kl_loss = tf.reduce_mean( + tf.multiply(self.cur_kl_coeff, mean_inner_kl) + ) + self.loss = tf.reduce_mean(tf.stack(ppo_obj, axis=0)) + self.inner_kl_loss + self.loss = tf1.Print( + self.loss, ["Meta-Loss", self.loss, "Inner KL", self.mean_inner_kl] + ) + + def feed_forward(self, obs, policy_vars, policy_config): + # Hacky for now, reconstruct FC network with adapted weights + # @mluo: TODO for any network + def fc_network( + inp, network_vars, hidden_nonlinearity, output_nonlinearity, policy_config + ): + bias_added = False + x = inp + for name, param in network_vars.items(): + if "kernel" in name: + x = tf.matmul(x, param) + elif "bias" in name: + x = tf.add(x, param) + bias_added = True + else: + raise NameError + + if bias_added: + if "out" not in name: + x = hidden_nonlinearity(x) + elif "out" in name: + x = output_nonlinearity(x) + else: + raise NameError + bias_added = False + return x + + policyn_vars = {} + valuen_vars = {} + log_std = None + for name, param in policy_vars.items(): + if "value" in name: + valuen_vars[name] = param + elif "log_std" in name: + log_std = param + else: + policyn_vars[name] = param + + output_nonlinearity = tf.identity + hidden_nonlinearity = get_activation_fn(policy_config["fcnet_activation"]) + + pi_new_logits = fc_network( + obs, policyn_vars, hidden_nonlinearity, output_nonlinearity, policy_config + ) + if log_std is not None: + pi_new_logits = tf.concat([pi_new_logits, 0.0 * pi_new_logits + log_std], 1) + value_fn = fc_network( + obs, valuen_vars, hidden_nonlinearity, output_nonlinearity, policy_config + ) + + return pi_new_logits, tf.reshape(value_fn, [-1]) + + def compute_updated_variables(self, loss, network_vars): + grad = tf.gradients(loss, list(network_vars.values())) + adapted_vars = {} + for i, tup in enumerate(network_vars.items()): + name, var = tup + if grad[i] is None: + adapted_vars[name] = var + else: + adapted_vars[name] = var - self.config["inner_lr"] * grad[i] + return adapted_vars + + def split_placeholders(self, placeholder, split): + inner_placeholder_list = tf.split( + placeholder, tf.math.reduce_sum(split, axis=1), axis=0 + ) + placeholder_list = [] + for index, split_placeholder in enumerate(inner_placeholder_list): + placeholder_list.append(tf.split(split_placeholder, split[index], axis=0)) + return placeholder_list + + +class KLCoeffMixin: + def __init__(self, config): + self.kl_coeff_val = [config["kl_coeff"]] * config["inner_adaptation_steps"] + self.kl_target = self.config["kl_target"] + self.kl_coeff = tf1.get_variable( + initializer=tf.keras.initializers.Constant(self.kl_coeff_val), + name="kl_coeff", + shape=(config["inner_adaptation_steps"]), + trainable=False, + dtype=tf.float32, + ) + + def update_kls(self, sampled_kls): + for i, kl in enumerate(sampled_kls): + if kl < self.kl_target / 1.5: + self.kl_coeff_val[i] *= 0.5 + elif kl > 1.5 * self.kl_target: + self.kl_coeff_val[i] *= 2.0 + print(self.kl_coeff_val) + self.kl_coeff.load(self.kl_coeff_val, session=self.get_session()) + return self.kl_coeff_val + + +# We need this builder function because we want to share the same +# custom logics between TF1 dynamic and TF2 eager policies. +def get_maml_tf_policy(name: str, base: type) -> type: + """Construct a MAMLTFPolicy inheriting either dynamic or eager base policies. + + Args: + base: Base class for this policy. DynamicTFPolicyV2 or EagerTFPolicyV2. + + Returns: + A TF Policy to be used with MAML. + """ + + class MAMLTFPolicy(KLCoeffMixin, ValueNetworkMixin, base): + def __init__( + self, + observation_space, + action_space, + config, + existing_model=None, + existing_inputs=None, + ): + # First thing first, enable eager execution if necessary. + base.enable_eager_execution_if_necessary() + + validate_config(config) + + # Initialize base class. + base.__init__( + self, + observation_space, + action_space, + config, + existing_inputs=existing_inputs, + existing_model=existing_model, + ) + + KLCoeffMixin.__init__(self, config) + ValueNetworkMixin.__init__(self, config) + + # Create the `split` placeholder before initialize loss. + if self.framework == "tf": + self._loss_input_dict["split"] = tf1.placeholder( + tf.int32, + name="Meta-Update-Splitting", + shape=( + self.config["inner_adaptation_steps"] + 1, + self.config["num_workers"], + ), + ) + + # Note: this is a bit ugly, but loss and optimizer initialization must + # happen after all the MixIns are initialized. + self.maybe_initialize_optimizer_and_loss() + + @override(base) + def loss( + self, + model: Union[ModelV2, "tf.keras.Model"], + dist_class: Type[TFActionDistribution], + train_batch: SampleBatch, + ) -> Union[TensorType, List[TensorType]]: + logits, state = model(train_batch) + self.cur_lr = self.config["lr"] + + if self.config["worker_index"]: + self.loss_obj = WorkerLoss( + dist_class=dist_class, + actions=train_batch[SampleBatch.ACTIONS], + curr_logits=logits, + behaviour_logits=train_batch[SampleBatch.ACTION_DIST_INPUTS], + advantages=train_batch[Postprocessing.ADVANTAGES], + value_fn=model.value_function(), + value_targets=train_batch[Postprocessing.VALUE_TARGETS], + vf_preds=train_batch[SampleBatch.VF_PREDS], + cur_kl_coeff=0.0, + entropy_coeff=self.config["entropy_coeff"], + clip_param=self.config["clip_param"], + vf_clip_param=self.config["vf_clip_param"], + vf_loss_coeff=self.config["vf_loss_coeff"], + clip_loss=False, + ) + else: + self.var_list = tf1.get_collection( + tf1.GraphKeys.TRAINABLE_VARIABLES, tf1.get_variable_scope().name + ) + self.loss_obj = MAMLLoss( + model=model, + dist_class=dist_class, + value_targets=train_batch[Postprocessing.VALUE_TARGETS], + advantages=train_batch[Postprocessing.ADVANTAGES], + actions=train_batch[SampleBatch.ACTIONS], + behaviour_logits=train_batch[SampleBatch.ACTION_DIST_INPUTS], + vf_preds=train_batch[SampleBatch.VF_PREDS], + cur_kl_coeff=self.kl_coeff, + policy_vars=self.var_list, + obs=train_batch[SampleBatch.CUR_OBS], + num_tasks=self.config["num_workers"], + split=train_batch["split"], + config=self.config, + inner_adaptation_steps=self.config["inner_adaptation_steps"], + entropy_coeff=self.config["entropy_coeff"], + clip_param=self.config["clip_param"], + vf_clip_param=self.config["vf_clip_param"], + vf_loss_coeff=self.config["vf_loss_coeff"], + use_gae=self.config["use_gae"], + ) + + return self.loss_obj.loss + + @override(base) + def optimizer( + self, + ) -> Union[ + "tf.keras.optimizers.Optimizer", List["tf.keras.optimizers.Optimizer"] + ]: + """ + Workers use simple SGD for inner adaptation + Meta-Policy uses Adam optimizer for meta-update + """ + if not self.config["worker_index"]: + return tf1.train.AdamOptimizer(learning_rate=self.config["lr"]) + return tf1.train.GradientDescentOptimizer( + learning_rate=self.config["inner_lr"] + ) + + @override(base) + def stats_fn(self, train_batch: SampleBatch) -> Dict[str, TensorType]: + if self.config["worker_index"]: + return {"worker_loss": self.loss_obj.loss} + else: + return { + "cur_kl_coeff": tf.cast(self.kl_coeff, tf.float64), + "cur_lr": tf.cast(self.cur_lr, tf.float64), + "total_loss": self.loss_obj.loss, + "policy_loss": self.loss_obj.mean_policy_loss, + "vf_loss": self.loss_obj.mean_vf_loss, + "kl": self.loss_obj.mean_kl, + "inner_kl": self.loss_obj.mean_inner_kl, + "entropy": self.loss_obj.mean_entropy, + } + + @override(base) + def postprocess_trajectory( + self, sample_batch, other_agent_batches=None, episode=None + ): + sample_batch = super().postprocess_trajectory(sample_batch) + return compute_gae_for_sample_batch( + self, sample_batch, other_agent_batches, episode + ) + + @override(base) + def compute_gradients_fn( + self, optimizer: LocalOptimizer, loss: TensorType + ) -> ModelGradients: + return compute_gradients(self, optimizer, loss) + + MAMLTFPolicy.__name__ = name + MAMLTFPolicy.__qualname__ = name + + return MAMLTFPolicy + + +MAMLTF1Policy = get_maml_tf_policy("MAMLTF1Policy", DynamicTFPolicyV2) +MAMLTF2Policy = get_maml_tf_policy("MAMLTF2Policy", EagerTFPolicyV2) diff --git a/rllib_contrib/maml/src/rllib_maml/maml/maml_torch_policy.py b/rllib_contrib/maml/src/rllib_maml/maml/maml_torch_policy.py new file mode 100644 index 000000000000..4a16f5eb950a --- /dev/null +++ b/rllib_contrib/maml/src/rllib_maml/maml/maml_torch_policy.py @@ -0,0 +1,449 @@ +import logging +from typing import Dict, List, Type, Union + +import ray +from ray.rllib.algorithms.ppo.ppo_tf_policy import validate_config +from ray.rllib.evaluation.postprocessing import ( + Postprocessing, + compute_gae_for_sample_batch, +) +from ray.rllib.models.modelv2 import ModelV2 +from ray.rllib.models.torch.torch_action_dist import TorchDistributionWrapper +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.policy.torch_mixins import ValueNetworkMixin +from ray.rllib.policy.torch_policy_v2 import TorchPolicyV2 +from ray.rllib.utils.annotations import override +from ray.rllib.utils.framework import try_import_torch +from ray.rllib.utils.numpy import convert_to_numpy +from ray.rllib.utils.torch_utils import apply_grad_clipping +from ray.rllib.utils.typing import TensorType + +torch, nn = try_import_torch() +logger = logging.getLogger(__name__) + +try: + import higher +except (ImportError, ModuleNotFoundError): + raise ImportError( + ( + "The MAML and MB-MPO algorithms require the `higher` module to be " + "installed! However, there was no installation found. You can install it " + "via `pip install higher`." + ) + ) + + +def PPOLoss( + dist_class, + actions, + curr_logits, + behaviour_logits, + advantages, + value_fn, + value_targets, + vf_preds, + cur_kl_coeff, + entropy_coeff, + clip_param, + vf_clip_param, + vf_loss_coeff, + clip_loss=False, +): + def surrogate_loss( + actions, curr_dist, prev_dist, advantages, clip_param, clip_loss + ): + pi_new_logp = curr_dist.logp(actions) + pi_old_logp = prev_dist.logp(actions) + + logp_ratio = torch.exp(pi_new_logp - pi_old_logp) + if clip_loss: + return torch.min( + advantages * logp_ratio, + advantages * torch.clamp(logp_ratio, 1 - clip_param, 1 + clip_param), + ) + return advantages * logp_ratio + + def kl_loss(curr_dist, prev_dist): + return prev_dist.kl(curr_dist) + + def entropy_loss(dist): + return dist.entropy() + + def vf_loss(value_fn, value_targets, vf_preds, vf_clip_param=0.1): + # GAE Value Function Loss + vf_loss1 = torch.pow(value_fn - value_targets, 2.0) + vf_clipped = vf_preds + torch.clamp( + value_fn - vf_preds, -vf_clip_param, vf_clip_param + ) + vf_loss2 = torch.pow(vf_clipped - value_targets, 2.0) + vf_loss = torch.max(vf_loss1, vf_loss2) + return vf_loss + + pi_new_dist = dist_class(curr_logits, None) + pi_old_dist = dist_class(behaviour_logits, None) + + surr_loss = torch.mean( + surrogate_loss( + actions, pi_new_dist, pi_old_dist, advantages, clip_param, clip_loss + ) + ) + kl_loss = torch.mean(kl_loss(pi_new_dist, pi_old_dist)) + vf_loss = torch.mean(vf_loss(value_fn, value_targets, vf_preds, vf_clip_param)) + entropy_loss = torch.mean(entropy_loss(pi_new_dist)) + + total_loss = -surr_loss + cur_kl_coeff * kl_loss + total_loss += vf_loss_coeff * vf_loss + total_loss -= entropy_coeff * entropy_loss + return total_loss, surr_loss, kl_loss, vf_loss, entropy_loss + + +# This is the computation graph for workers (inner adaptation steps) +class WorkerLoss(object): + def __init__( + self, + model, + dist_class, + actions, + curr_logits, + behaviour_logits, + advantages, + value_fn, + value_targets, + vf_preds, + cur_kl_coeff, + entropy_coeff, + clip_param, + vf_clip_param, + vf_loss_coeff, + clip_loss=False, + ): + self.loss, surr_loss, kl_loss, vf_loss, ent_loss = PPOLoss( + dist_class=dist_class, + actions=actions, + curr_logits=curr_logits, + behaviour_logits=behaviour_logits, + advantages=advantages, + value_fn=value_fn, + value_targets=value_targets, + vf_preds=vf_preds, + cur_kl_coeff=cur_kl_coeff, + entropy_coeff=entropy_coeff, + clip_param=clip_param, + vf_clip_param=vf_clip_param, + vf_loss_coeff=vf_loss_coeff, + clip_loss=clip_loss, + ) + + +# This is the Meta-Update computation graph for main (meta-update step) +class MAMLLoss(object): + def __init__( + self, + model, + config, + dist_class, + value_targets, + advantages, + actions, + behaviour_logits, + vf_preds, + cur_kl_coeff, + policy_vars, + obs, + num_tasks, + split, + meta_opt, + inner_adaptation_steps=1, + entropy_coeff=0, + clip_param=0.3, + vf_clip_param=0.1, + vf_loss_coeff=1.0, + use_gae=True, + ): + self.config = config + self.num_tasks = num_tasks + self.inner_adaptation_steps = inner_adaptation_steps + self.clip_param = clip_param + self.dist_class = dist_class + self.cur_kl_coeff = cur_kl_coeff + self.model = model + self.vf_clip_param = vf_clip_param + self.vf_loss_coeff = vf_loss_coeff + self.entropy_coeff = entropy_coeff + + # Split episode tensors into [inner_adaptation_steps+1, num_tasks, -1] + self.obs = self.split_placeholders(obs, split) + self.actions = self.split_placeholders(actions, split) + self.behaviour_logits = self.split_placeholders(behaviour_logits, split) + self.advantages = self.split_placeholders(advantages, split) + self.value_targets = self.split_placeholders(value_targets, split) + self.vf_preds = self.split_placeholders(vf_preds, split) + + inner_opt = torch.optim.SGD(model.parameters(), lr=config["inner_lr"]) + surr_losses = [] + val_losses = [] + kl_losses = [] + entropy_losses = [] + meta_losses = [] + kls = [] + + meta_opt.zero_grad() + for i in range(self.num_tasks): + with higher.innerloop_ctx(model, inner_opt, copy_initial_weights=False) as ( + fnet, + diffopt, + ): + inner_kls = [] + for step in range(self.inner_adaptation_steps): + ppo_loss, _, inner_kl_loss, _, _ = self.compute_losses( + fnet, step, i + ) + diffopt.step(ppo_loss) + inner_kls.append(inner_kl_loss) + kls.append(inner_kl_loss.detach()) + + # Meta Update + ppo_loss, s_loss, kl_loss, v_loss, ent = self.compute_losses( + fnet, self.inner_adaptation_steps - 1, i, clip_loss=True + ) + + inner_loss = torch.mean( + torch.stack( + [ + a * b + for a, b in zip( + self.cur_kl_coeff[ + i + * self.inner_adaptation_steps : (i + 1) + * self.inner_adaptation_steps + ], + inner_kls, + ) + ] + ) + ) + meta_loss = (ppo_loss + inner_loss) / self.num_tasks + meta_loss.backward() + + surr_losses.append(s_loss.detach()) + kl_losses.append(kl_loss.detach()) + val_losses.append(v_loss.detach()) + entropy_losses.append(ent.detach()) + meta_losses.append(meta_loss.detach()) + + meta_opt.step() + + # Stats Logging + self.mean_policy_loss = torch.mean(torch.stack(surr_losses)) + self.mean_kl_loss = torch.mean(torch.stack(kl_losses)) + self.mean_vf_loss = torch.mean(torch.stack(val_losses)) + self.mean_entropy = torch.mean(torch.stack(entropy_losses)) + self.mean_inner_kl = kls + self.loss = torch.sum(torch.stack(meta_losses)) + # Hacky, needed to bypass RLlib backend + self.loss.requires_grad = True + + def compute_losses(self, model, inner_adapt_iter, task_iter, clip_loss=False): + obs = self.obs[inner_adapt_iter][task_iter] + obs_dict = {"obs": obs, "obs_flat": obs} + curr_logits, _ = model.forward(obs_dict, None, None) + value_fns = model.value_function() + ppo_loss, surr_loss, kl_loss, val_loss, ent_loss = PPOLoss( + dist_class=self.dist_class, + actions=self.actions[inner_adapt_iter][task_iter], + curr_logits=curr_logits, + behaviour_logits=self.behaviour_logits[inner_adapt_iter][task_iter], + advantages=self.advantages[inner_adapt_iter][task_iter], + value_fn=value_fns, + value_targets=self.value_targets[inner_adapt_iter][task_iter], + vf_preds=self.vf_preds[inner_adapt_iter][task_iter], + cur_kl_coeff=0.0, + entropy_coeff=self.entropy_coeff, + clip_param=self.clip_param, + vf_clip_param=self.vf_clip_param, + vf_loss_coeff=self.vf_loss_coeff, + clip_loss=clip_loss, + ) + return ppo_loss, surr_loss, kl_loss, val_loss, ent_loss + + def split_placeholders(self, placeholder, split): + inner_placeholder_list = torch.split( + placeholder, torch.sum(split, dim=1).tolist(), dim=0 + ) + placeholder_list = [] + for index, split_placeholder in enumerate(inner_placeholder_list): + placeholder_list.append( + torch.split(split_placeholder, split[index].tolist(), dim=0) + ) + return placeholder_list + + +class KLCoeffMixin: + def __init__(self, config): + self.kl_coeff_val = ( + [config["kl_coeff"]] + * config["inner_adaptation_steps"] + * config["num_workers"] + ) + self.kl_target = self.config["kl_target"] + + def update_kls(self, sampled_kls): + for i, kl in enumerate(sampled_kls): + if kl < self.kl_target / 1.5: + self.kl_coeff_val[i] *= 0.5 + elif kl > 1.5 * self.kl_target: + self.kl_coeff_val[i] *= 2.0 + return self.kl_coeff_val + + +class MAMLTorchPolicy(ValueNetworkMixin, KLCoeffMixin, TorchPolicyV2): + """PyTorch policy class used with MAML.""" + + def __init__(self, observation_space, action_space, config): + config = dict(ray.rllib.algorithms.maml.maml.MAMLConfig(), **config) + validate_config(config) + + TorchPolicyV2.__init__( + self, + observation_space, + action_space, + config, + max_seq_len=config["model"]["max_seq_len"], + ) + + KLCoeffMixin.__init__(self, config) + ValueNetworkMixin.__init__(self, config) + + # TODO: Don't require users to call this manually. + self._initialize_loss_from_dummy_batch() + + @override(TorchPolicyV2) + def loss( + self, + model: ModelV2, + dist_class: Type[TorchDistributionWrapper], + train_batch: SampleBatch, + ) -> Union[TensorType, List[TensorType]]: + """Constructs the loss function. + + Args: + model: The Model to calculate the loss for. + dist_class: The action distr. class. + train_batch: The training data. + + Returns: + The PPO loss tensor given the input batch. + """ + logits, state = model(train_batch) + self.cur_lr = self.config["lr"] + + if self.config["worker_index"]: + self.loss_obj = WorkerLoss( + model=model, + dist_class=dist_class, + actions=train_batch[SampleBatch.ACTIONS], + curr_logits=logits, + behaviour_logits=train_batch[SampleBatch.ACTION_DIST_INPUTS], + advantages=train_batch[Postprocessing.ADVANTAGES], + value_fn=model.value_function(), + value_targets=train_batch[Postprocessing.VALUE_TARGETS], + vf_preds=train_batch[SampleBatch.VF_PREDS], + cur_kl_coeff=0.0, + entropy_coeff=self.config["entropy_coeff"], + clip_param=self.config["clip_param"], + vf_clip_param=self.config["vf_clip_param"], + vf_loss_coeff=self.config["vf_loss_coeff"], + clip_loss=False, + ) + else: + self.var_list = model.named_parameters() + + # `split` may not exist yet (during test-loss call), use a dummy value. + # Cannot use get here due to train_batch being a TrackingDict. + if "split" in train_batch: + split = train_batch["split"] + else: + split_shape = ( + self.config["inner_adaptation_steps"], + self.config["num_workers"], + ) + split_const = int( + train_batch["obs"].shape[0] // (split_shape[0] * split_shape[1]) + ) + split = torch.ones(split_shape, dtype=int) * split_const + self.loss_obj = MAMLLoss( + model=model, + dist_class=dist_class, + value_targets=train_batch[Postprocessing.VALUE_TARGETS], + advantages=train_batch[Postprocessing.ADVANTAGES], + actions=train_batch[SampleBatch.ACTIONS], + behaviour_logits=train_batch[SampleBatch.ACTION_DIST_INPUTS], + vf_preds=train_batch[SampleBatch.VF_PREDS], + cur_kl_coeff=self.kl_coeff_val, + policy_vars=self.var_list, + obs=train_batch[SampleBatch.CUR_OBS], + num_tasks=self.config["num_workers"], + split=split, + config=self.config, + inner_adaptation_steps=self.config["inner_adaptation_steps"], + entropy_coeff=self.config["entropy_coeff"], + clip_param=self.config["clip_param"], + vf_clip_param=self.config["vf_clip_param"], + vf_loss_coeff=self.config["vf_loss_coeff"], + use_gae=self.config["use_gae"], + meta_opt=self.meta_opt, + ) + + return self.loss_obj.loss + + @override(TorchPolicyV2) + def optimizer( + self, + ) -> Union[List["torch.optim.Optimizer"], "torch.optim.Optimizer"]: + """ + Workers use simple SGD for inner adaptation + Meta-Policy uses Adam optimizer for meta-update + """ + if not self.config["worker_index"]: + self.meta_opt = torch.optim.Adam( + self.model.parameters(), lr=self.config["lr"] + ) + return self.meta_opt + return torch.optim.SGD(self.model.parameters(), lr=self.config["inner_lr"]) + + @override(TorchPolicyV2) + def stats_fn(self, train_batch: SampleBatch) -> Dict[str, TensorType]: + if self.config["worker_index"]: + return convert_to_numpy({"worker_loss": self.loss_obj.loss}) + else: + return convert_to_numpy( + { + "cur_kl_coeff": self.kl_coeff_val, + "cur_lr": self.cur_lr, + "total_loss": self.loss_obj.loss, + "policy_loss": self.loss_obj.mean_policy_loss, + "vf_loss": self.loss_obj.mean_vf_loss, + "kl_loss": self.loss_obj.mean_kl_loss, + "inner_kl": self.loss_obj.mean_inner_kl, + "entropy": self.loss_obj.mean_entropy, + } + ) + + @override(TorchPolicyV2) + def extra_grad_process( + self, optimizer: "torch.optim.Optimizer", loss: TensorType + ) -> Dict[str, TensorType]: + return apply_grad_clipping(self, optimizer, loss) + + @override(TorchPolicyV2) + def postprocess_trajectory( + self, sample_batch, other_agent_batches=None, episode=None + ): + # Do all post-processing always with no_grad(). + # Not using this here will introduce a memory leak + # in torch (issue #6962). + # TODO: no_grad still necessary? + with torch.no_grad(): + return compute_gae_for_sample_batch( + self, sample_batch, other_agent_batches, episode + ) diff --git a/rllib_contrib/maml/tests/test_maml.py b/rllib_contrib/maml/tests/test_maml.py new file mode 100644 index 000000000000..774be4ecde41 --- /dev/null +++ b/rllib_contrib/maml/tests/test_maml.py @@ -0,0 +1,61 @@ +import unittest + +from gymnasium.wrappers import TimeLimit +from rllib_maml.envs.cartpole_mass import CartPoleMassEnv +from rllib_maml.envs.pendulum_mass import PendulumMassEnv +from rllib_maml.maml import MAMLConfig + +import ray +from ray.rllib.utils.test_utils import ( + check_compute_single_action, + check_train_results, + framework_iterator, +) +from ray.tune.registry import register_env + + +class TestMAML(unittest.TestCase): + @classmethod + def setUpClass(cls): + ray.init() + register_env( + "cartpole", + lambda env_cfg: TimeLimit(CartPoleMassEnv(), max_episode_steps=200), + ) + register_env( + "pendulum", + lambda env_cfg: TimeLimit(PendulumMassEnv(), max_episode_steps=200), + ) + + @classmethod + def tearDownClass(cls): + ray.shutdown() + + def test_maml_compilation(self): + """Test whether MAML can be built with all frameworks.""" + config = MAMLConfig().rollouts(num_rollout_workers=1) + + num_iterations = 1 + + # Test for tf framework (torch not implemented yet). + for fw in framework_iterator(config, frameworks=("tf", "torch")): + for env in ["cartpole", "pendulum"]: + if fw == "tf" and env.startswith("cartpole"): + continue + print("env={}".format(env)) + config.environment(env) + algo = config.build() + for i in range(num_iterations): + results = algo.train() + check_train_results(results) + print(results) + check_compute_single_action(algo, include_prev_action_reward=True) + algo.stop() + + +if __name__ == "__main__": + import sys + + import pytest + + sys.exit(pytest.main(["-v", __file__])) From c3e9a7a60b031faa549593ae736c910eeef49b9e Mon Sep 17 00:00:00 2001 From: Yi Cheng <74173148+iycheng@users.noreply.github.com> Date: Fri, 12 May 2023 13:44:59 -0700 Subject: [PATCH 369/424] [telemetry] Add libc version to ray telemetry. (#33444) This PR added GLIBC version to Ray telemetry. Linux distribution is not added because it needs distro pkg. --- python/ray/_private/usage/usage_lib.py | 11 +++++++++++ python/ray/tests/test_usage_stats.py | 11 +++++++++++ 2 files changed, 22 insertions(+) diff --git a/python/ray/_private/usage/usage_lib.py b/python/ray/_private/usage/usage_lib.py index 7eda1e3d3c2f..d40effe571b6 100644 --- a/python/ray/_private/usage/usage_lib.py +++ b/python/ray/_private/usage/usage_lib.py @@ -45,6 +45,7 @@ import logging import threading import os +import platform import sys import time import uuid @@ -144,6 +145,8 @@ class UsageStatsToReport: #: The total number of running jobs excluding internal ones # when the report is generated. total_num_running_jobs: Optional[int] + #: The libc version in the OS. + libc_version: Optional[str] @dataclass(init=True) @@ -356,6 +359,13 @@ def _generate_cluster_metadata(): "session_start_timestamp_ms": int(time.time() * 1000), } ) + if sys.platform == "linux": + # Record llibc version + (lib, ver) = platform.libc_ver() + if not lib: + metadata.update({"libc_version": "NA"}) + else: + metadata.update({"libc_version": f"{lib}:{ver}"}) return metadata @@ -759,6 +769,7 @@ def generate_report_data( extra_usage_tags=get_extra_usage_tags_to_report(gcs_client), total_num_nodes=get_total_num_nodes_to_report(gcs_client), total_num_running_jobs=get_total_num_running_jobs_to_report(gcs_client), + libc_version=cluster_metadata.get("libc_version"), ) return data diff --git a/python/ray/tests/test_usage_stats.py b/python/ray/tests/test_usage_stats.py index 2493c19c11cb..f145299ad8ac 100644 --- a/python/ray/tests/test_usage_stats.py +++ b/python/ray/tests/test_usage_stats.py @@ -43,6 +43,7 @@ "min_workers": {"type": ["null", "integer"]}, "max_workers": {"type": ["null", "integer"]}, "head_node_instance_type": {"type": ["null", "string"]}, + "libc_version": {"type": ["null", "string"]}, "worker_node_instance_types": { "type": ["null", "array"], "items": {"type": "string"}, @@ -1153,6 +1154,16 @@ def run_usage_stats_server(reporter): assert payload["python_version"] == python_version assert payload["schema_version"] == "0.1" assert payload["os"] == sys.platform + if sys.platform != "linux": + payload["libc_version"] is None + else: + import platform + + assert ( + payload["libc_version"] + == f"{platform.libc_ver()[0]}:{platform.libc_ver()[1]}" + ) + assert payload["source"] == "OSS" assert payload["cloud_provider"] == "aws" assert payload["min_workers"] is None From cc3bc67887fd01e958ec3fac637a70d68f962361 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Fri, 12 May 2023 15:22:51 -0700 Subject: [PATCH 370/424] [data] Capture the context when the dataset is first created (#35239) --- .../data/_internal/iterator/iterator_impl.py | 2 -- python/ray/data/_internal/plan.py | 19 ++++++++---- python/ray/data/dataset.py | 6 ++++ .../data/tests/test_context_propagation.py | 30 +++++++++++++++++++ 4 files changed, 50 insertions(+), 7 deletions(-) diff --git a/python/ray/data/_internal/iterator/iterator_impl.py b/python/ray/data/_internal/iterator/iterator_impl.py index 9f9a6833bdcb..2dc69a34f649 100644 --- a/python/ray/data/_internal/iterator/iterator_impl.py +++ b/python/ray/data/_internal/iterator/iterator_impl.py @@ -2,7 +2,6 @@ from ray.types import ObjectRef from ray.data.block import Block, BlockMetadata -from ray.data.context import DataContext from ray.data.iterator import DataIterator from ray.data._internal.stats import DatasetStats @@ -17,7 +16,6 @@ def __init__( base_dataset: "Dataset", ): self._base_dataset = base_dataset - self._base_context = DataContext.get_current() def __repr__(self) -> str: return f"DataIterator({self._base_dataset})" diff --git a/python/ray/data/_internal/plan.py b/python/ray/data/_internal/plan.py index 4e5782daea77..235247636973 100644 --- a/python/ray/data/_internal/plan.py +++ b/python/ray/data/_internal/plan.py @@ -132,6 +132,10 @@ def __init__( self._run_by_consumer = run_by_consumer + # Snapshot the current context, so that the config of Datasets is always + # determined by the config at the time it was created. + self._context = copy.deepcopy(DataContext.get_current()) + def __repr__(self) -> str: return ( f"ExecutionPlan(" @@ -483,7 +487,9 @@ def execute_to_iterator( Tuple of iterator over output blocks and the executor. """ - ctx = DataContext.get_current() + # Always used the saved context for execution. + ctx = self._context + if not ctx.use_streaming_executor or self.has_computed_output(): return ( self.execute( @@ -532,7 +538,10 @@ def execute( Returns: The blocks of the output dataset. """ - context = DataContext.get_current() + + # Always used the saved context for execution. + context = self._context + if not ray.available_resources().get("CPU"): if log_once("cpu_warning"): logger.get_logger().warning( @@ -672,7 +681,7 @@ def _optimize(self) -> Tuple[BlockList, DatasetStats, List[Stage]]: """Apply stage fusion optimizations, returning an updated source block list and associated stats, and a set of optimized stages. """ - context = DataContext.get_current() + context = self._context blocks, stats, stages = self._get_source_blocks_and_stages() if context.optimize_reorder_stages: stages = _reorder_stages(stages) @@ -728,7 +737,7 @@ def is_read_stage_equivalent(self) -> bool: """Return whether this plan can be executed as only a read stage.""" from ray.data._internal.stage_impl import RandomizeBlocksStage - context = DataContext.get_current() + context = self._context remaining_stages = self._stages_after_snapshot if ( context.optimize_fuse_stages @@ -764,7 +773,7 @@ def _run_with_new_execution_backend(self) -> bool: # - Read only: handle with legacy backend # - Read->randomize_block_order: handle with new backend # Note that both are considered read equivalent, hence this extra check. - context = DataContext.get_current() + context = self._context trailing_randomize_block_order_stage = ( self._stages_after_snapshot and len(self._stages_after_snapshot) == 1 diff --git a/python/ray/data/dataset.py b/python/ray/data/dataset.py index 16d4e2f7b4ef..d030f131fd0c 100644 --- a/python/ray/data/dataset.py +++ b/python/ray/data/dataset.py @@ -4094,6 +4094,12 @@ def deserialize_lineage(serialized_ds: bytes) -> "Dataset": """ return pickle.loads(serialized_ds) + @property + @DeveloperAPI + def context(self) -> DataContext: + """Return the DataContext used to create this Dataset.""" + return self._plan._context + def _divide(self, block_idx: int) -> ("Dataset", "Dataset"): block_list = self._plan.execute() left, right = block_list.divide(block_idx) diff --git a/python/ray/data/tests/test_context_propagation.py b/python/ray/data/tests/test_context_propagation.py index c7f50e1d7ec9..e532e97b94d8 100644 --- a/python/ray/data/tests/test_context_propagation.py +++ b/python/ray/data/tests/test_context_propagation.py @@ -10,6 +10,36 @@ from ray._private.test_utils import run_string_as_driver +def test_context_saved_when_dataset_created(ray_start_regular_shared): + ctx = DataContext.get_current() + d1 = ray.data.range(10) + d2 = ray.data.range(10) + assert ctx.eager_free + assert d1.context.eager_free + assert d2.context.eager_free + + d1.context.eager_free = False + assert not d1.context.eager_free + assert d2.context.eager_free + assert ctx.eager_free + + @ray.remote(num_cpus=0) + def check(d1, d2): + assert not d1.context.eager_free + assert d2.context.eager_free + + ray.get(check.remote(d1, d2)) + + @ray.remote(num_cpus=0) + def check2(d): + d.take() + + d1.context.execution_options.resource_limits.cpu = 0.1 + with pytest.raises(ValueError): + ray.get(check2.remote(d1)) + ray.get(check2.remote(d2)) + + def test_read(ray_start_regular_shared): class CustomDatasource(Datasource): def prepare_read(self, parallelism: int): From af16d3c6dc5c9a8af31cc60cda1713590289e12b Mon Sep 17 00:00:00 2001 From: Yi Cheng <74173148+iycheng@users.noreply.github.com> Date: Fri, 12 May 2023 15:57:44 -0700 Subject: [PATCH 371/424] [core] Make execute_after accept chrono (#35099) The current execute_after is verbose and the unit waiting is not stated clearly in the signature. This PR fixed this by passing chrono there. This PR doesn't update the existing code base for this but make it compatible backward. --- src/ray/common/asio/asio_util.h | 17 +++++++++-------- src/ray/common/asio/instrumented_io_context.cc | 4 ++-- src/ray/common/ray_syncer/ray_syncer.cc | 2 +- src/ray/gcs/gcs_server/gcs_actor_scheduler.cc | 6 ++++-- .../gcs_server/gcs_placement_group_manager.cc | 4 +++- src/ray/gcs/gcs_server/gcs_server.cc | 5 +++-- src/ray/object_manager/plasma/store.cc | 6 +++--- src/ray/raylet/node_manager.cc | 5 +++-- src/ray/raylet/worker_pool_test.cc | 2 +- 9 files changed, 29 insertions(+), 22 deletions(-) diff --git a/src/ray/common/asio/asio_util.h b/src/ray/common/asio/asio_util.h index 0fa69d0f8e2e..1bd513c2fe4f 100644 --- a/src/ray/common/asio/asio_util.h +++ b/src/ray/common/asio/asio_util.h @@ -15,22 +15,23 @@ #pragma once #include +#include -inline std::shared_ptr execute_after_us( +template +std::shared_ptr execute_after( instrumented_io_context &io_context, std::function fn, - int64_t delay_microseconds) { + Duration delay_duration) { auto timer = std::make_shared(io_context); - timer->expires_from_now(boost::posix_time::microseconds(delay_microseconds)); + auto delay = boost::posix_time::microseconds( + std::chrono::duration_cast(delay_duration).count()); + timer->expires_from_now(delay); + timer->async_wait([timer, fn = std::move(fn)](const boost::system::error_code &error) { if (error != boost::asio::error::operation_aborted && fn) { fn(); } }); - return timer; -} -inline std::shared_ptr execute_after( - instrumented_io_context &io_context, std::function fn, int64_t milliseconds) { - return execute_after_us(io_context, fn, milliseconds * 1000); + return timer; } diff --git a/src/ray/common/asio/instrumented_io_context.cc b/src/ray/common/asio/instrumented_io_context.cc index e0fadbda15fb..3e577826f519 100644 --- a/src/ray/common/asio/instrumented_io_context.cc +++ b/src/ray/common/asio/instrumented_io_context.cc @@ -41,7 +41,7 @@ void instrumented_io_context::post(std::function handler, boost::asio::io_context::post(std::move(handler)); } else { RAY_LOG(DEBUG) << "Deferring " << name << " by " << defer_us << "us"; - execute_after_us(*this, std::move(handler), defer_us); + execute_after(*this, std::move(handler), std::chrono::microseconds(defer_us)); } } @@ -65,7 +65,7 @@ void instrumented_io_context::post(std::function handler, } else { RAY_LOG(DEBUG) << "Deferring " << stats_handle->event_name << " by " << defer_us << "us"; - execute_after_us(*this, std::move(handler), defer_us); + execute_after(*this, std::move(handler), std::chrono::microseconds(defer_us)); } } diff --git a/src/ray/common/ray_syncer/ray_syncer.cc b/src/ray/common/ray_syncer/ray_syncer.cc index ed2c36adfa2a..3f4810da2985 100644 --- a/src/ray/common/ray_syncer/ray_syncer.cc +++ b/src/ray/common/ray_syncer/ray_syncer.cc @@ -231,7 +231,7 @@ void RaySyncer::Connect(const std::string &node_id, << NodeID::FromBinary(node_id); Connect(node_id, channel); }, - /* delay_microseconds = */ 2000); + /* delay_duration = */ std::chrono::milliseconds(2000)); } }, /* stub */ std::move(stub)); diff --git a/src/ray/gcs/gcs_server/gcs_actor_scheduler.cc b/src/ray/gcs/gcs_server/gcs_actor_scheduler.cc index 85f1f14b5240..4b3ce52e3b4b 100644 --- a/src/ray/gcs/gcs_server/gcs_actor_scheduler.cc +++ b/src/ray/gcs/gcs_server/gcs_actor_scheduler.cc @@ -341,7 +341,8 @@ void GcsActorScheduler::RetryLeasingWorkerFromNode( RAY_UNUSED(execute_after( io_context_, [this, node, actor] { DoRetryLeasingWorkerFromNode(actor, node); }, - RayConfig::instance().gcs_lease_worker_retry_interval_ms())); + std::chrono::milliseconds( + RayConfig::instance().gcs_lease_worker_retry_interval_ms()))); } void GcsActorScheduler::DoRetryLeasingWorkerFromNode( @@ -504,7 +505,8 @@ void GcsActorScheduler::RetryCreatingActorOnWorker( RAY_UNUSED(execute_after( io_context_, [this, actor, worker] { DoRetryCreatingActorOnWorker(actor, worker); }, - RayConfig::instance().gcs_create_actor_retry_interval_ms())); + std::chrono::milliseconds( + RayConfig::instance().gcs_create_actor_retry_interval_ms()))); } void GcsActorScheduler::DoRetryCreatingActorOnWorker( diff --git a/src/ray/gcs/gcs_server/gcs_placement_group_manager.cc b/src/ray/gcs/gcs_server/gcs_placement_group_manager.cc index 6c222727ee60..fb19bea10756 100644 --- a/src/ray/gcs/gcs_server/gcs_placement_group_manager.cc +++ b/src/ray/gcs/gcs_server/gcs_placement_group_manager.cc @@ -828,7 +828,9 @@ void GcsPlacementGroupManager::Tick() { // added as a safety check. https://github.com/ray-project/ray/pull/18419 SchedulePendingPlacementGroups(); execute_after( - io_context_, [this] { Tick(); }, 1000 /* milliseconds */); + io_context_, + [this] { Tick(); }, + std::chrono::milliseconds(1000) /* milliseconds */); } void GcsPlacementGroupManager::UpdatePlacementGroupLoad() { diff --git a/src/ray/gcs/gcs_server/gcs_server.cc b/src/ray/gcs/gcs_server/gcs_server.cc index 2def12549a7a..1c5b3c40df8b 100644 --- a/src/ray/gcs/gcs_server/gcs_server.cc +++ b/src/ray/gcs/gcs_server/gcs_server.cc @@ -558,7 +558,7 @@ void GcsServer::InitRuntimeEnvManager() { main_service_, *runtime_env_manager_, /*delay_executor=*/ [this](std::function task, uint32_t delay_ms) { - return execute_after(main_service_, task, delay_ms); + return execute_after(main_service_, task, std::chrono::milliseconds(delay_ms)); }); runtime_env_service_ = std::make_unique(main_service_, *runtime_env_handler_); @@ -702,7 +702,8 @@ void GcsServer::RecordMetrics() const { execute_after( main_service_, [this] { RecordMetrics(); }, - (RayConfig::instance().metrics_report_interval_ms() / 2) /* milliseconds */); + std::chrono::milliseconds(RayConfig::instance().metrics_report_interval_ms() / + 2) /* milliseconds */); } void GcsServer::DumpDebugStateToFile() const { diff --git a/src/ray/object_manager/plasma/store.cc b/src/ray/object_manager/plasma/store.cc index a4a8ffa2ea9f..a616a652f684 100644 --- a/src/ray/object_manager/plasma/store.cc +++ b/src/ray/object_manager/plasma/store.cc @@ -516,7 +516,7 @@ void PlasmaStore::ProcessCreateRequests() { create_timer_ = nullptr; ProcessCreateRequests(); }, - retry_after_ms); + std::chrono::milliseconds(retry_after_ms)); } } @@ -555,7 +555,7 @@ void PlasmaStore::PrintAndRecordDebugDump() const { stats_timer_ = execute_after( io_context_, [this]() { PrintAndRecordDebugDump(); }, - RayConfig::instance().event_stats_print_interval_ms()); + std::chrono::milliseconds(RayConfig::instance().event_stats_print_interval_ms())); } void PlasmaStore::ScheduleRecordMetrics() const { @@ -567,7 +567,7 @@ void PlasmaStore::ScheduleRecordMetrics() const { [this]() { ScheduleRecordMetrics(); }, // divide by 2 to make sure record happens before reporting // this also matches with NodeManager::RecordMetrics interval - RayConfig::instance().metrics_report_interval_ms() / 2); + std::chrono::milliseconds(RayConfig::instance().metrics_report_interval_ms() / 2)); } std::string PlasmaStore::GetDebugDump() const { diff --git a/src/ray/raylet/node_manager.cc b/src/ray/raylet/node_manager.cc index 21bd053cbb58..7865f35f4eb4 100644 --- a/src/ray/raylet/node_manager.cc +++ b/src/ray/raylet/node_manager.cc @@ -240,7 +240,8 @@ NodeManager::NodeManager(instrumented_io_context &io_service, }, /*delay_executor*/ [this](std::function fn, int64_t delay_ms) { - RAY_UNUSED(execute_after(io_service_, fn, delay_ms)); + RAY_UNUSED(execute_after( + io_service_, fn, std::chrono::milliseconds(delay_ms))); }), node_manager_server_("NodeManager", config.node_manager_port, @@ -398,7 +399,7 @@ NodeManager::NodeManager(instrumented_io_context &io_service, std::move(options), /*delay_executor=*/ [this](std::function task, uint32_t delay_ms) { - return execute_after(io_service_, task, delay_ms); + return execute_after(io_service_, task, std::chrono::milliseconds(delay_ms)); }, /*runtime_env_agent_factory=*/ [this](const std::string &ip_address, int port) { diff --git a/src/ray/raylet/worker_pool_test.cc b/src/ray/raylet/worker_pool_test.cc index c8bc23a3736b..f626247d20e4 100644 --- a/src/ray/raylet/worker_pool_test.cc +++ b/src/ray/raylet/worker_pool_test.cc @@ -488,7 +488,7 @@ class WorkerPoolTest : public ::testing::Test { std::move(options), /*delay_executor=*/ [this](std::function task, uint32_t delay_ms) { - return execute_after(io_service_, task, delay_ms); + return execute_after(io_service_, task, std::chrono::milliseconds(delay_ms)); }, /*runtime_env_agent_factory=*/ [](const std::string &ip_address, int port) { From 469afaf38aac8651d62c01c21e42a1b1ff5900fb Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Fri, 12 May 2023 23:56:19 -0700 Subject: [PATCH 372/424] [no_early_kickoff] [data] Improve our handling of tensor returns in strict mode (#35272) --- doc/source/data/faq.rst | 1 + doc/source/data/working-with-tensors.rst | 67 +++++---- python/ray/data/BUILD | 8 ++ python/ray/data/_internal/arrow_block.py | 20 +-- python/ray/data/_internal/numpy_support.py | 85 ++++++++++++ .../ray/data/_internal/planner/map_batches.py | 3 +- python/ray/data/_internal/table_block.py | 8 ++ python/ray/data/dataset.py | 2 +- python/ray/data/iterator.py | 2 +- python/ray/data/tests/test_numpy_support.py | 129 ++++++++++++++++++ python/ray/data/tests/test_tensor.py | 10 +- .../train/torch/torch_detection_predictor.py | 5 +- 12 files changed, 289 insertions(+), 51 deletions(-) create mode 100644 python/ray/data/_internal/numpy_support.py create mode 100644 python/ray/data/tests/test_numpy_support.py diff --git a/doc/source/data/faq.rst b/doc/source/data/faq.rst index 2a49e3075389..7f1af6ec80a7 100644 --- a/doc/source/data/faq.rst +++ b/doc/source/data/faq.rst @@ -327,6 +327,7 @@ just need to be aware of ``Dict[str, Any]`` (non-batched data records) and * There is no more special interpretation of single-column schema containing just ``__value__`` as a column. * The default batch format is ``numpy`` instead of ``default`` (pandas). * ``schema()`` returns a unified Schema class instead of ``Union[pyarrow.lib.Schema, type]``. +* When lists of array-like objects are returned from map batches, they will be converted into a contiguous numpy array, rather than treated as a list of objects. **Datasource behavior changes**: diff --git a/doc/source/data/working-with-tensors.rst b/doc/source/data/working-with-tensors.rst index cd8fe7dde27f..6c064f941073 100644 --- a/doc/source/data/working-with-tensors.rst +++ b/doc/source/data/working-with-tensors.rst @@ -46,7 +46,7 @@ If your tensors have a fixed shape, Ray Data represents batches as regular ndarr Batches of variable-shape tensors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -If your tensors vary in shape, Ray Data represents batches as ragged arrays. +If your tensors vary in shape, Ray Data represents batches as arrays of object dtype. .. doctest:: @@ -58,7 +58,7 @@ If your tensors vary in shape, Ray Data represents batches as ragged arrays. >>> batch["image"].dtype dtype('O') -Elements of ragged arrays are regular ndarrays. +The individual elements of these object arrays are regular ndarrays. .. doctest:: @@ -69,6 +69,43 @@ Elements of ragged arrays are regular ndarrays. >>> batch["image"][3].shape # doctest: +SKIP (333, 465, 3) +.. _transforming_tensors: + +Transforming tensor data +------------------------ + +Call :meth:`~ray.data.Dataset.map` or :meth:`~ray.data.Dataset.map_batches` to transform tensor data. + +.. testcode:: + + from typing import Any, Dict + + import ray + import numpy as np + + ds = ray.data.read_images("s3://anonymous@air-example-data/AnimalDetection") + + def increase_brightness(row: Dict[str, Any]) -> Dict[str, Any]: + row["image"] = np.clip(row["image"] + 4, 0, 255) + return row + + # Increase the brightness, record at a time. + ds.map(increase_brightness) + + def batch_increase_brightness(batch: Dict[str, np.ndarray]) -> Dict: + batch["image"] = np.clip(batch["image"] + 4, 0, 255) + return batch + + # Increase the brightness, batch at a time. + ds.map_batches(batch_increase_brightness) + +In this example, we return ``np.ndarray`` directly as the output. Ray Data will also treat +returned lists of ``np.ndarray`` and objects implementing ``__array__`` (e.g., ``torch.Tensor``) +as tensor data. + +For more information on transforming data, read +:ref:`Transforming data `. + Saving tensor data ------------------ @@ -102,29 +139,3 @@ Save tensor data in Parquet or Numpy files. Other formats aren't supported. ds.write_numpy("/tmp/simple.npy", column="image") For more information on saving data, read :ref:`Saving data `. - -.. _transforming_variable_tensors: - -Transforming variable-shape tensor data ---------------------------------------- - -Call :meth:`~ray.data.Dataset.map` to transform variable-shape tensor data. Don't use -:meth:`~ray.data.Dataset.map_batches`. - -.. testcode:: - - from typing import Any, Dict - - import ray - import numpy as np - - ds = ray.data.read_images("s3://anonymous@air-example-data/AnimalDetection") - - def increase_brightness(row: Dict[str, Any]) -> Dict[str, Any]: - row["image"] = np.clip(row["image"] + 4, 0, 255) - return row - - ds.map(increase_brightness) - -For more information on transforming data, read -:ref:`Transforming data `. diff --git a/python/ray/data/BUILD b/python/ray/data/BUILD index 5be74db39003..1007bc1333a0 100644 --- a/python/ray/data/BUILD +++ b/python/ray/data/BUILD @@ -42,6 +42,14 @@ py_test( deps = ["//:ray_lib", ":conftest"], ) +py_test( + name = "test_numpy_support", + size = "small", + srcs = ["tests/test_numpy_support.py"], + tags = ["team:data", "exclusive"], + deps = ["//:ray_lib", ":conftest"], +) + py_test( name = "test_nonstrict_mode", size = "small", diff --git a/python/ray/data/_internal/arrow_block.py b/python/ray/data/_internal/arrow_block.py index 80360c5cc2d8..9faf3fc8371e 100644 --- a/python/ray/data/_internal/arrow_block.py +++ b/python/ray/data/_internal/arrow_block.py @@ -19,6 +19,10 @@ from ray.air.constants import TENSOR_COLUMN_NAME from ray._private.utils import _get_pyarrow_version from ray.data._internal.arrow_ops import transform_polars, transform_pyarrow +from ray.data._internal.numpy_support import ( + convert_udf_returns_to_numpy, + is_valid_udf_return, +) from ray.data._internal.table_block import ( TableBlockAccessor, TableBlockBuilder, @@ -163,7 +167,7 @@ def numpy_to_block( if isinstance(batch, np.ndarray): batch = {TENSOR_COLUMN_NAME: batch} elif not isinstance(batch, collections.abc.Mapping) or any( - not isinstance(col, (list, np.ndarray)) for col in batch.values() + not is_valid_udf_return(col) for col in batch.values() ): raise ValueError( "Batch must be an ndarray or dictionary of ndarrays when converting " @@ -172,18 +176,8 @@ def numpy_to_block( ) new_batch = {} for col_name, col in batch.items(): - if isinstance(col, list): - # Try to convert list values into an numpy array via - # np.array(), so users don't need to manually cast. - # NOTE: we don't cast generic iterables, since types like - # `str` are also Iterable. - try: - col = np.array(col) - except Exception: - raise ValueError( - "Failed to convert column values to numpy array: " - f"({_truncated_repr(col)})." - ) + # Coerce to np.ndarray format if possible. + col = convert_udf_returns_to_numpy(col) # Use Arrow's native *List types for 1-dimensional ndarrays. if col.dtype.type is np.object_ or col.ndim > 1: try: diff --git a/python/ray/data/_internal/numpy_support.py b/python/ray/data/_internal/numpy_support.py new file mode 100644 index 000000000000..5c9c24f682d8 --- /dev/null +++ b/python/ray/data/_internal/numpy_support.py @@ -0,0 +1,85 @@ +from typing import Any + +import numpy as np + +import ray +from ray.data._internal.dataset_logger import DatasetLogger +from ray.data._internal.util import _truncated_repr +from ray.air.util.tensor_extensions.utils import create_ragged_ndarray + +logger = DatasetLogger(__name__) + + +def is_array_like(value: Any) -> bool: + """Checks whether objects are array-like, excluding numpy scalars.""" + + return hasattr(value, "__array__") and hasattr(value, "__len__") + + +def is_valid_udf_return(udf_return_col: Any) -> bool: + """Check whether a UDF column is valid. + + Valid columns must either be a list of elements, or an array-like object. + """ + + return isinstance(udf_return_col, list) or is_array_like(udf_return_col) + + +def convert_udf_returns_to_numpy(udf_return_col: Any) -> Any: + """Convert UDF columns (output of map_batches) to numpy, if possible. + + This includes lists of scalars, objects supporting the array protocol, and lists + of objects supporting the array protocol, such as `[1, 2, 3]`, `Tensor([1, 2, 3])`, + and `[array(1), array(2), array(3)]`. + + Returns: + The input as an np.ndarray if possible, otherwise the original input. + + Raises: + ValueError if an input was array-like but we failed to convert it to an array. + """ + + if isinstance(udf_return_col, np.ndarray): + # No copy/conversion needed, just keep it verbatim. + return udf_return_col + + ctx = ray.data.DataContext.get_current() + if not ctx.strict_mode: + # Legacy compat. + return np.array(udf_return_col) + + if isinstance(udf_return_col, list): + # Try to convert list values into an numpy array via + # np.array(), so users don't need to manually cast. + # NOTE: we don't cast generic iterables, since types like + # `str` are also Iterable. + try: + # Try to cast the inner scalars to numpy as well, to avoid unnecessarily + # creating an inefficient array of array of object dtype. + if all(is_valid_udf_return(e) for e in udf_return_col): + udf_return_col = [np.array(e) for e in udf_return_col] + shapes = set() + if all(isinstance(e, np.ndarray) for e in udf_return_col): + for e in udf_return_col: + shapes.add(e.shape) + if len(shapes) > 1: + # This util works around some limitations of np.array(dtype=object). + udf_return_col = create_ragged_ndarray(udf_return_col) + else: + udf_return_col = np.array(udf_return_col) + except Exception as e: + raise ValueError( + "Failed to convert column values to numpy array: " + f"({_truncated_repr(udf_return_col)}): {e}." + ) + elif hasattr(udf_return_col, "__array__"): + # Converts other array-like objects such as torch.Tensor. + try: + udf_return_col = np.array(udf_return_col) + except Exception as e: + raise ValueError( + "Failed to convert column values to numpy array: " + f"({_truncated_repr(udf_return_col)}): {e}." + ) + + return udf_return_col diff --git a/python/ray/data/_internal/planner/map_batches.py b/python/ray/data/_internal/planner/map_batches.py index d1a8c09896bf..e661c07d89bf 100644 --- a/python/ray/data/_internal/planner/map_batches.py +++ b/python/ray/data/_internal/planner/map_batches.py @@ -5,6 +5,7 @@ from ray.data._internal.block_batching import batch_blocks from ray.data._internal.execution.interfaces import TaskContext from ray.data._internal.output_buffer import BlockOutputBuffer +from ray.data._internal.numpy_support import is_valid_udf_return from ray.data._internal.util import _truncated_repr from ray.data.block import UserDefinedFunction, Block, DataBatch from ray.data.context import DEFAULT_BATCH_SIZE, DataContext @@ -52,7 +53,7 @@ def validate_batch(batch: Block) -> None: if isinstance(batch, collections.abc.Mapping): for key, value in list(batch.items()): - if not isinstance(value, (np.ndarray, list)): + if not is_valid_udf_return(value): raise ValueError( f"Error validating {_truncated_repr(batch)}: " "The `fn` you passed to `map_batches` returned a " diff --git a/python/ray/data/_internal/table_block.py b/python/ray/data/_internal/table_block.py index 0b842edc612f..63efe612e49d 100644 --- a/python/ray/data/_internal/table_block.py +++ b/python/ray/data/_internal/table_block.py @@ -8,6 +8,7 @@ from ray.data.block import Block, BlockAccessor from ray.data.row import TableRow from ray.data._internal.block_builder import BlockBuilder +from ray.data._internal.numpy_support import is_array_like from ray.data._internal.size_estimator import SizeEstimator from ray.data._internal.util import _is_tensor_schema @@ -46,6 +47,7 @@ def __init__(self, block_type): self._block_type = block_type def add(self, item: Union[dict, TableRow, np.ndarray]) -> None: + ctx = ray.data.DataContext.get_current() if isinstance(item, TableRow): item = item.as_pydict() elif isinstance(item, np.ndarray): @@ -70,6 +72,12 @@ def add(self, item: Union[dict, TableRow, np.ndarray]) -> None: self._column_names = item_column_names for key, value in item.items(): + if ( + ctx.strict_mode + and is_array_like(value) + and not isinstance(value, np.ndarray) + ): + value = np.array(value) self._columns[key].append(value) self._num_rows += 1 self._compact_if_needed() diff --git a/python/ray/data/dataset.py b/python/ray/data/dataset.py index d030f131fd0c..7c38f3c7999a 100644 --- a/python/ray/data/dataset.py +++ b/python/ray/data/dataset.py @@ -3215,7 +3215,7 @@ def to_tf( .. warning:: If your dataset contains ragged tensors, this method errors. To prevent - errors, :ref:`resize your tensors `. + errors, :ref:`resize your tensors `. Examples: >>> import ray diff --git a/python/ray/data/iterator.py b/python/ray/data/iterator.py index 2252f2dd2a7d..41e18f5d8347 100644 --- a/python/ray/data/iterator.py +++ b/python/ray/data/iterator.py @@ -632,7 +632,7 @@ def to_tf( .. warning:: If your dataset contains ragged tensors, this method errors. To prevent - errors, :ref:`resize your tensors `. + errors, :ref:`resize your tensors `. Examples: >>> import ray diff --git a/python/ray/data/tests/test_numpy_support.py b/python/ray/data/tests/test_numpy_support.py new file mode 100644 index 000000000000..23dad677cf52 --- /dev/null +++ b/python/ray/data/tests/test_numpy_support.py @@ -0,0 +1,129 @@ +import numpy as np +import torch +import pytest + +import ray +from ray.data.tests.conftest import * # noqa +from ray.tests.conftest import * # noqa + + +class UserObj: + def __eq__(self, other): + return isinstance(other, UserObj) + + +def do_map_batches(data): + ds = ray.data.range(1) + ds = ds.map_batches(lambda x: {"output": data}) + return ds.take_batch()["output"] + + +def assert_structure_equals(a, b): + assert type(a) == type(b), (type(a), type(b)) + assert type(a[0]) == type(b[0]), (type(a[0]), type(b[0])) # noqa: E721 + assert a.dtype == b.dtype + assert a.shape == b.shape + for i in range(len(a)): + assert np.array_equiv(a[i], b[i]) + + +def test_list_of_scalars(ray_start_regular_shared): + data = [1, 2, 3] + output = do_map_batches(data) + assert_structure_equals(output, np.array([1, 2, 3], dtype=np.int64)) + + +def test_list_of_numpy_scalars(ray_start_regular_shared): + data = [np.int64(1), np.int64(2), np.int64(3)] + output = do_map_batches(data) + assert_structure_equals(output, np.array([1, 2, 3], dtype=np.int64)) + + +def test_list_of_objects(ray_start_regular_shared): + data = [1, 2, 3, UserObj()] + output = do_map_batches(data) + assert_structure_equals(output, np.array([1, 2, 3, UserObj()])) + + +def test_array_like(ray_start_regular_shared): + data = torch.Tensor([1, 2, 3]) + output = do_map_batches(data) + assert_structure_equals(output, np.array([1.0, 2.0, 3.0], dtype=np.float32)) + + +def test_list_of_arrays(ray_start_regular_shared): + data = [np.array([1, 2, 3]), np.array([4, 5, 6])] + output = do_map_batches(data) + assert_structure_equals(output, np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int64)) + + +def test_list_of_array_like(ray_start_regular_shared): + data = [torch.Tensor([1, 2, 3]), torch.Tensor([4, 5, 6])] + output = do_map_batches(data) + assert_structure_equals(output, np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)) + + +def test_ragged_array_like(ray_start_regular_shared): + data = [torch.Tensor([1, 2, 3]), torch.Tensor([1, 2])] + output = do_map_batches(data) + assert_structure_equals( + output, np.array([np.array([1, 2, 3]), np.array([1, 2])], dtype=object) + ) + + +def test_ragged_lists(ray_start_regular_shared): + data = [[1, 2, 3], [1, 2]] + output = do_map_batches(data) + assert_structure_equals( + output, np.array([np.array([1, 2, 3]), np.array([1, 2])], dtype=object) + ) + + +def test_scalar_numpy(ray_start_regular_shared): + data = np.int64(1) + ds = ray.data.range(2) + ds = ds.map(lambda x: {"output": data}) + output = ds.take_batch()["output"] + assert_structure_equals(output, np.array([1, 1], dtype=np.int64)) + + +def test_scalar_arrays(ray_start_regular_shared): + data = np.array([1, 2, 3]) + ds = ray.data.range(2) + ds = ds.map(lambda x: {"output": data}) + output = ds.take_batch()["output"] + assert_structure_equals(output, np.array([[1, 2, 3], [1, 2, 3]], dtype=np.int64)) + + +def test_scalar_array_like(ray_start_regular_shared): + data = torch.Tensor([1, 2, 3]) + ds = ray.data.range(2) + ds = ds.map(lambda x: {"output": data}) + output = ds.take_batch()["output"] + assert_structure_equals(output, np.array([[1, 2, 3], [1, 2, 3]], dtype=np.float32)) + + +def test_scalar_ragged_arrays(ray_start_regular_shared): + data = [np.array([1, 2, 3]), np.array([1, 2])] + ds = ray.data.range(2) + ds = ds.map(lambda x: {"output": data[x["id"]]}) + output = ds.take_batch()["output"] + assert_structure_equals( + output, np.array([np.array([1, 2, 3]), np.array([1, 2])], dtype=object) + ) + + +def test_scalar_ragged_array_like(ray_start_regular_shared): + data = [torch.Tensor([1, 2, 3]), torch.Tensor([1, 2])] + ds = ray.data.range(2) + ds = ds.map(lambda x: {"output": data[x["id"]]}) + output = ds.take_batch()["output"] + assert_structure_equals( + output, np.array([np.array([1, 2, 3]), np.array([1, 2])], dtype=object) + ) + + +if __name__ == "__main__": + import sys + + sys.exit(pytest.main(["-v", __file__])) diff --git a/python/ray/data/tests/test_tensor.py b/python/ray/data/tests/test_tensor.py index eb31502720ea..cf0967ea1adf 100644 --- a/python/ray/data/tests/test_tensor.py +++ b/python/ray/data/tests/test_tensor.py @@ -214,10 +214,14 @@ def test_batch_tensors(ray_start_regular_shared): import torch ds = ray.data.from_items([torch.tensor([0, 0]) for _ in range(40)], parallelism=40) - res = "MaterializedDataset(num_blocks=40, num_rows=40, schema={item: object})" + res = ( + "MaterializedDataset(\n" + " num_blocks=40,\n" + " num_rows=40,\n" + " schema={item: numpy.ndarray(shape=(2,), dtype=int64)}\n" + ")" + ) assert str(ds) == res, str(ds) - with pytest.raises(pa.lib.ArrowInvalid): - next(ds.iter_batches(batch_format="pyarrow")) df = next(ds.iter_batches(batch_format="pandas")) assert df.to_dict().keys() == {"item"} diff --git a/python/ray/train/torch/torch_detection_predictor.py b/python/ray/train/torch/torch_detection_predictor.py index 7de5eee0934a..4e6ff2dd9ca9 100644 --- a/python/ray/train/torch/torch_detection_predictor.py +++ b/python/ray/train/torch/torch_detection_predictor.py @@ -4,7 +4,6 @@ import numpy as np import torch -from ray.air.util.tensor_extensions.utils import create_ragged_ndarray from ray.train._internal.dl_predictor import TensorDtype from ray.train.torch.torch_predictor import TorchPredictor from ray.util.annotations import PublicAPI @@ -134,7 +133,5 @@ def _convert_outputs_to_ndarray_batch( batch = collections.defaultdict(list) for output in outputs: for key, value in output.items(): - batch[key].append(value.cpu().detach().numpy()) - for key, value in batch.items(): - batch[key] = create_ragged_ndarray(value) + batch[key].append(value.cpu().detach()) return batch From 37f7f2ef51cfa37c345dadcee8d7e35636eb1fc7 Mon Sep 17 00:00:00 2001 From: Sihan Wang Date: Sat, 13 May 2023 05:21:34 -0700 Subject: [PATCH 373/424] [Serve] Add multiplex support (#34941) Introduce multiplex API. @serve.multiplexed(num_models_per_replica=0) @serve.get_model_id router & controller & deployment_state change will be in followed up pr. Usage: --- python/ray/serve/BUILD | 8 ++ python/ray/serve/__init__.py | 4 + python/ray/serve/_private/client.py | 1 - python/ray/serve/api.py | 97 ++++++++++++++++++++++++ python/ray/serve/tests/test_multiplex.py | 28 +++++++ 5 files changed, 137 insertions(+), 1 deletion(-) create mode 100644 python/ray/serve/tests/test_multiplex.py diff --git a/python/ray/serve/BUILD b/python/ray/serve/BUILD index 6ac79f95de11..c4495b9cd4f7 100644 --- a/python/ray/serve/BUILD +++ b/python/ray/serve/BUILD @@ -574,3 +574,11 @@ py_test( tags = ["exclusive", "team:serve"], deps = [":serve_lib"], ) + +py_test( + name = "test_multiplex", + size = "medium", + srcs = serve_tests_srcs, + tags = ["exclusive", "team:serve"], + deps = [":serve_lib"], +) \ No newline at end of file diff --git a/python/ray/serve/__init__.py b/python/ray/serve/__init__.py index 0d188e673355..061374e29671 100644 --- a/python/ray/serve/__init__.py +++ b/python/ray/serve/__init__.py @@ -15,6 +15,8 @@ Application, BuiltApplication, Deployment, + multiplexed, + get_multiplexed_model_id, ) from ray.serve.air_integrations import PredictorDeployment from ray.serve.batching import batch @@ -48,4 +50,6 @@ "Application", "BuiltApplication", "Deployment", + "multiplexed", + "get_multiplexed_model_id", ] diff --git a/python/ray/serve/_private/client.py b/python/ray/serve/_private/client.py index 19faa09455a3..d8f04cb3fa2d 100644 --- a/python/ray/serve/_private/client.py +++ b/python/ray/serve/_private/client.py @@ -102,7 +102,6 @@ def shutdown(self) -> None: # Shut down handles for k in list(self.handle_cache): - self.handle_cache[k].stop_metrics_pusher() del self.handle_cache[k] if ray.is_initialized() and not self._shutdown: diff --git a/python/ray/serve/api.py b/python/ray/serve/api.py index 1785dda9ed2d..737a62b26b75 100644 --- a/python/ray/serve/api.py +++ b/python/ray/serve/api.py @@ -49,6 +49,7 @@ from ray.serve._private import api as _private_api + logger = logging.getLogger(__file__) @@ -571,3 +572,99 @@ def delete(name: str, _blocking: bool = True): """ client = get_global_client() client.delete_apps([name], blocking=_blocking) + + +@PublicAPI(stability="alpha") +def multiplexed( + func: Optional[Callable[..., Any]] = None, max_num_models_per_replica: int = 3 +): + """[EXPERIMENTAL] Defines a function or method used to load multiplexed + models in a replica. + + The function can be standalone function or a method of a class. The + function must have exactly one argument, the model id of type `str` for the + model to be loaded. + + It is required to define the function with `async def` and the function must be + an async function. It is recommended to define coroutines for long running + IO tasks in the function to avoid blocking the event loop. + + The multiplexed function is called to load a model with the given model ID when + necessary. + + When the number of models in one replica is larger than max_num_models_per_replica, + the models will be unloaded using an LRU policy. + + If you want to release resources after the model is loaded, you can define + a `__del__` method in your model class. The `__del__` method will be called when + the model is unloaded. + + Example: + + .. code-block:: python + from ray import serve + + @serve.deployment + class MultiplexedDeployment: + + def __init__(self): + # Define s3 base path to load models. + self.s3_base_path = "s3://my_bucket/my_models" + + @serve.multiplexed(max_num_models_per_replica=5) + async def load_model(self, model_id: str) -> Any: + # Load model with the given tag + # You can use any model loading library here + # and return the loaded model. load_from_s3 is + # a placeholder function. + return load_from_s3(model_id) + + async def __call__(self, request): + # Get the model_id from the request context. + model_id = serve.get_multiplexed_model_id() + # Load the model for the requested model_id. + # If the model is already cached locally, + # this will just be a dictionary lookup. + model = await self.load_model(model_id) + return model(request) + + + Args: + max_num_models_per_replica: the maximum number of models + to be loaded on each replica. By default, it is 3, which + means that each replica can cache up to 3 models. You can + set it to a larger number if you have enough memory on + the node resource, in opposite, you can set it to a smaller + number if you want to save memory on the node resource. + """ + + raise NotImplementedError("Multiplexed deployment is not supported yet.") + + +@PublicAPI(stability="alpha") +def get_multiplexed_model_id() -> str: + """[EXPERIMENTAL] Get the multiplexed model ID for the current request. + + This is used with a function decorated with `@serve.multiplexed` + to retrieve the model ID for the current request. + + .. code-block:: python + import ray + from ray import serve + import requests + + # Set the multiplexed model id with the key + # "ray_serve_multiplexed_model_id" in the request + # headers when sending requests to the http proxy. + requests.get("http://localhost:8000", + headers={"ray_serve_multiplexed_model_id": "model_1"}) + # This can also be set when using `RayServeHandle`. + handle.options(multiplexed_model_id="model_1").remote("blablabla") + + # In your deployment code, you can retrieve the model id from + # `get_multiplexed_model_id()`. + @serve.deployment + def my_deployment_function(request): + assert serve.get_multiplexed_model_id() == "model_1" + """ + raise NotImplementedError("get_multiplexed_model_id API is not supported yet.") diff --git a/python/ray/serve/tests/test_multiplex.py b/python/ray/serve/tests/test_multiplex.py new file mode 100644 index 000000000000..c1eb44d2c066 --- /dev/null +++ b/python/ray/serve/tests/test_multiplex.py @@ -0,0 +1,28 @@ +import pytest + +from ray import serve + + +def test_multiplexed(): + """Test multiplexed API.""" + + with pytest.raises(NotImplementedError): + + @serve.deployment + class Model: + @serve.multiplexed + def get_model(self, model_id: str): + pass + + +def test_get_multiplexed_model_id(): + """Test get_multiplexed_model_id API.""" + + with pytest.raises(NotImplementedError): + serve.get_multiplexed_model_id() + + +if __name__ == "__main__": + import sys + + sys.exit(pytest.main(["-v", "-s", __file__])) From a2448fae0721e3e1e6e4afb11977347f6ec04938 Mon Sep 17 00:00:00 2001 From: Alan Guo Date: Sun, 14 May 2023 04:36:59 -0700 Subject: [PATCH 374/424] Add support for multi-tab log viewer (#35280) Adds log views to Actor detail page, serve replica page, http proxy page. Adds links to job detail pages for driverless jobs Multi-tab log viewer is expandable Fixes side tab layouts for Job -> Actors page and Cluster -> node page. --- dashboard/client/src/App.tsx | 66 ++++--- .../client/src/common/CollapsibleSection.tsx | 84 +++++--- .../client/src/common/MultiTabLogViewer.tsx | 147 ++++++++++++++ dashboard/client/src/common/ProfilingLink.tsx | 9 +- dashboard/client/src/common/Section.tsx | 13 +- .../client/src/components/ActorTable.tsx | 15 +- .../client/src/pages/actor/ActorDetail.tsx | 33 ++-- .../client/src/pages/actor/ActorLogs.tsx | 40 ++++ .../src/pages/actor/hook/useActorDetail.ts | 15 ++ dashboard/client/src/pages/job/JobDetail.tsx | 70 ++----- .../src/pages/job/JobDetailActorPage.tsx | 24 ++- .../src/pages/job/JobDetailInfoPage.tsx | 3 - .../client/src/pages/job/JobDetailLayout.tsx | 19 +- .../job/JobDriverLogs.component.test.tsx | 10 + .../client/src/pages/job/JobDriverLogs.tsx | 93 +++++---- dashboard/client/src/pages/job/JobRow.tsx | 31 ++- dashboard/client/src/pages/log/hooks.ts | 32 ++++ .../src/pages/serve/ServeDeploymentRow.tsx | 13 +- .../pages/serve/ServeReplicaDetailPage.tsx | 106 +++++++---- .../serve/ServeSystemActorDetailPage.tsx | 179 ++++++++++++++++++ .../src/pages/serve/ServeSystemDetailRows.tsx | 47 +---- .../pages/serve/hook/useServeApplications.ts | 24 +++ dashboard/client/src/service/log.ts | 4 +- dashboard/client/src/theme.ts | 2 +- dashboard/client/src/type/serve.ts | 1 + 25 files changed, 792 insertions(+), 288 deletions(-) create mode 100644 dashboard/client/src/common/MultiTabLogViewer.tsx create mode 100644 dashboard/client/src/pages/actor/ActorLogs.tsx create mode 100644 dashboard/client/src/pages/log/hooks.ts create mode 100644 dashboard/client/src/pages/serve/ServeSystemActorDetailPage.tsx diff --git a/dashboard/client/src/App.tsx b/dashboard/client/src/App.tsx index dc7f07c4ee90..a08a26200f2b 100644 --- a/dashboard/client/src/App.tsx +++ b/dashboard/client/src/App.tsx @@ -10,11 +10,11 @@ import Loading from "./pages/exception/Loading"; import JobList, { JobsLayout } from "./pages/job"; import { JobDetailChartsPage } from "./pages/job/JobDetail"; import { - JobDetailActorLayout, + JobDetailActorDetailWrapper, JobDetailActorsPage, } from "./pages/job/JobDetailActorPage"; import { JobDetailInfoPage } from "./pages/job/JobDetailInfoPage"; -import { JobDetailLayout } from "./pages/job/JobDetailLayout"; +import { JobDetailLayout, JobPage } from "./pages/job/JobDetailLayout"; import { MainNavLayout } from "./pages/layout/MainNavLayout"; import { SideTabPage } from "./pages/layout/SideTabLayout"; import { LogsLayout } from "./pages/log/Logs"; @@ -32,6 +32,7 @@ import { import { ServeApplicationsListPage } from "./pages/serve/ServeApplicationsListPage"; import { ServeLayout } from "./pages/serve/ServeLayout"; import { ServeReplicaDetailPage } from "./pages/serve/ServeReplicaDetailPage"; +import { ServeHttpProxyDetailPage } from "./pages/serve/ServeSystemActorDetailPage"; import { getNodeList } from "./service/node"; import { lightTheme } from "./theme"; @@ -165,39 +166,46 @@ const App = () => { } path="" /> - } path="nodes/:id" /> + } path="nodes/:id" /> } path="jobs"> } path="" /> - } path=":id"> - - - - } - path="info" - /> + } path=":id"> + } path=""> + + + + } + path="info" + /> + + + + } + path="" + /> + + + + } + path="actors" + /> + - - + + + } - path="" + path="actors/:actorId" /> - - - - } - path="actors" - > - } path="" /> - } path=":actorId" /> - } path="actors"> @@ -207,6 +215,10 @@ const App = () => { } path="metrics" /> } path="serve"> } path="" /> + } + path="httpProxies/:httpProxyId" + /> } path="applications/:applicationName" diff --git a/dashboard/client/src/common/CollapsibleSection.tsx b/dashboard/client/src/common/CollapsibleSection.tsx index 9d2135e46e2d..0dac753422f8 100644 --- a/dashboard/client/src/common/CollapsibleSection.tsx +++ b/dashboard/client/src/common/CollapsibleSection.tsx @@ -25,12 +25,6 @@ const useStyles = makeStyles((theme) => width: 24, height: 24, }, - body: { - marginTop: theme.spacing(1), - }, - bodyHidden: { - display: "none", - }, }), ); @@ -76,13 +70,6 @@ export const CollapsibleSection = forwardRef< const classes = useStyles(); const [internalExpanded, setInternalExpanded] = useState(startExpanded); const finalExpanded = expanded !== undefined ? expanded : internalExpanded; - const [rendered, setRendered] = useState(finalExpanded); - - useEffect(() => { - if (finalExpanded) { - setRendered(true); - } - }, [finalExpanded]); const handleExpandClick = () => { onExpandButtonClick?.(); @@ -106,16 +93,69 @@ export const CollapsibleSection = forwardRef< {icon} - {(finalExpanded || (keepRendered && rendered)) && ( -
    - {children} -
    - )} + + {children} +
    ); }, ); + +const useHideableBlockStyles = makeStyles((theme) => + createStyles({ + body: { + marginTop: theme.spacing(1), + }, + bodyHidden: { + display: "none", + }, + }), +); + +type HideableBlockProps = PropsWithChildren< + { + visible: boolean; + /** + * An optimization to not avoid re-rendering the contents of the collapsible section. + * When enabled, we will keep the content around when collapsing but hide it via css. + */ + keepRendered?: boolean; + } & ClassNameProps +>; + +/** + * Component that can be hidden depending on a passed in prop. Supports an optimization + * to keep the component rendered (but not visible) when hidden to avoid re-rendering + * when component is shown again. + */ +export const HideableBlock = ({ + visible, + keepRendered, + children, +}: HideableBlockProps) => { + const classes = useHideableBlockStyles(); + + // visible represents whether the component is viewable in the browser. + // Rendered represents whether the DOM elements exist in the DOM tree. + // If !visible && rendered, then the elements are in the DOM but are + // not drawn via CSS visibility rules. + const [rendered, setRendered] = useState(visible); + + useEffect(() => { + if (visible) { + setRendered(true); + } + }, [visible]); + + // Optimization to keep the component rendered (but not visible) when hidden + // to avoid re-rendering when component is shown again. + return visible || (keepRendered && rendered) ? ( +
    + {children} +
    + ) : null; +}; diff --git a/dashboard/client/src/common/MultiTabLogViewer.tsx b/dashboard/client/src/common/MultiTabLogViewer.tsx new file mode 100644 index 000000000000..19b26b7a2226 --- /dev/null +++ b/dashboard/client/src/common/MultiTabLogViewer.tsx @@ -0,0 +1,147 @@ +import { + Box, + createStyles, + IconButton, + makeStyles, + Tab, + Tabs, + Typography, +} from "@material-ui/core"; +import React, { useState } from "react"; +import { RiExternalLinkLine, RiSortAsc, RiSortDesc } from "react-icons/ri"; +import { Link } from "react-router-dom"; +import { useStateApiLogs } from "../pages/log/hooks"; +import { LogViewer } from "../pages/log/LogViewer"; +import { HideableBlock } from "./CollapsibleSection"; +import { ClassNameProps } from "./props"; + +const useStyles = makeStyles((theme) => + createStyles({ + tabs: { + borderBottom: `1px solid ${theme.palette.divider}`, + }, + }), +); + +export type MultiTabLogViewerTabDetails = { + title: string; + nodeId: string | null; + filename?: string; +}; + +export type MultiTabLogViewerProps = { + tabs: MultiTabLogViewerTabDetails[]; + otherLogsLink?: string; +} & ClassNameProps; + +export const MultiTabLogViewer = ({ + tabs, + otherLogsLink, + className, +}: MultiTabLogViewerProps) => { + const classes = useStyles(); + const [value, setValue] = useState(tabs[0]?.title); + const [expanded, setExpanded] = useState(false); + + const currentTab = tabs.find((tab) => tab.title === value); + + return ( +
    + + + { + setValue(newValue); + }} + indicatorColor="primary" + > + {tabs.map(({ title }) => ( + + ))} + {otherLogsLink && ( + + Other logs   + + } + onClick={(event) => { + // Prevent the tab from changing + setValue(value); + }} + component={Link} + to={otherLogsLink} + target="_blank" + rel="noopener noreferrer" + /> + )} + + + {!currentTab ? ( + Please select a tab. + ) : ( + tabs.map(({ title, nodeId, filename }) => ( + + + + )) + )} + + { + setExpanded(!expanded); + }} + > + {expanded ? : } + + +
    + ); +}; + +export type StateApiLogViewerProps = { + nodeId?: string | null; + filename?: string; + height?: number; +}; + +export const StateApiLogViewer = ({ + nodeId, + filename, + height = 300, +}: StateApiLogViewerProps) => { + const { downloadUrl, log, path, refresh } = useStateApiLogs(nodeId, filename); + return typeof log === "string" ? ( + { + refresh(); + }} + /> + ) : ( + Failed to load + ); +}; diff --git a/dashboard/client/src/common/ProfilingLink.tsx b/dashboard/client/src/common/ProfilingLink.tsx index bf432a712e87..323d4a063b18 100644 --- a/dashboard/client/src/common/ProfilingLink.tsx +++ b/dashboard/client/src/common/ProfilingLink.tsx @@ -1,3 +1,4 @@ +import { Link } from "@material-ui/core"; import React, { PropsWithChildren } from "react"; import { ClassNameProps } from "./props"; @@ -19,14 +20,14 @@ export const CpuProfilingLink = ({ } return ( - Stack Trace{type ? ` (${type})` : ""} - + ); }; @@ -40,13 +41,13 @@ export const CpuStackTraceLink = ({ } return ( - CPU Flame Graph{type ? ` (${type})` : ""} - + ); }; diff --git a/dashboard/client/src/common/Section.tsx b/dashboard/client/src/common/Section.tsx index 63b1332af88a..bcbdada9ad36 100644 --- a/dashboard/client/src/common/Section.tsx +++ b/dashboard/client/src/common/Section.tsx @@ -6,6 +6,7 @@ import { Paper, Typography, } from "@material-ui/core"; +import classNames from "classnames"; import React, { PropsWithChildren } from "react"; import { ClassNameProps } from "./props"; @@ -15,11 +16,15 @@ const useStyles = makeStyles((theme) => padding: theme.spacing(2), height: "100%", }, + contentContainerNoTopPadding: { + paddingTop: 0, + }, }), ); type SectionProps = { title?: string; + noTopPadding?: boolean; } & ClassNameProps & BoxProps; @@ -27,6 +32,7 @@ export const Section = ({ title, children, className, + noTopPadding = false, ...props }: PropsWithChildren) => { const classes = useStyles(); @@ -38,7 +44,12 @@ export const Section = ({ {title} )} - + {children} diff --git a/dashboard/client/src/components/ActorTable.tsx b/dashboard/client/src/components/ActorTable.tsx index 4508888e16af..339e223f92cf 100644 --- a/dashboard/client/src/components/ActorTable.tsx +++ b/dashboard/client/src/components/ActorTable.tsx @@ -16,7 +16,6 @@ import { SearchOutlined } from "@material-ui/icons"; import Autocomplete from "@material-ui/lab/Autocomplete"; import Pagination from "@material-ui/lab/Pagination"; import React, { useContext, useMemo, useState } from "react"; -import { Link } from "react-router-dom"; import { GlobalContext } from "../App"; import { DurationText } from "../common/DurationText"; import { ActorLink } from "../common/links"; @@ -436,14 +435,16 @@ const ActorTable = ({ {ipLogMap[address?.ipAddress] && ( - Log - +
    ({ @@ -34,11 +34,13 @@ const useStyle = makeStyles((theme) => ({ tab: { marginBottom: theme.spacing(2), }, + tasksSection: { + marginTop: theme.spacing(4), + }, })); const ActorDetailPage = () => { const classes = useStyle(); - const { ipLogMap } = useContext(GlobalContext); const { params, actorDetail, msg, isLoading } = useActorDetail(); if (!actorDetail) { @@ -177,15 +179,6 @@ const ActorDetailPage = () => { label: "Actions", content: (
    - - Log - -
    { }, ]} /> - - + +
    + +
    +
    + +
    + +
    ); diff --git a/dashboard/client/src/pages/actor/ActorLogs.tsx b/dashboard/client/src/pages/actor/ActorLogs.tsx new file mode 100644 index 000000000000..cea3bfe5bf97 --- /dev/null +++ b/dashboard/client/src/pages/actor/ActorLogs.tsx @@ -0,0 +1,40 @@ +import React from "react"; +import { + MultiTabLogViewer, + MultiTabLogViewerTabDetails, +} from "../../common/MultiTabLogViewer"; +import { ActorDetail } from "../../type/actor"; + +export type ActorLogsProps = { + actor: Pick; +}; + +export const ActorLogs = ({ + actor: { + jobId, + pid, + address: { workerId, rayletId }, + }, +}: ActorLogsProps) => { + const tabs: MultiTabLogViewerTabDetails[] = [ + { + title: "stderr", + nodeId: rayletId, + // TODO(aguo): Have API return the log file name. + filename: `worker-${workerId}-${jobId}-${pid}.err`, + }, + { + title: "stdout", + nodeId: rayletId, + // TODO(aguo): Have API return the log file name. + filename: `worker-${workerId}-${jobId}-${pid}.out`, + }, + { + title: "system", + nodeId: rayletId, + // TODO(aguo): Have API return the log file name. + filename: `python-core-worker-${workerId}_${pid}.log`, + }, + ]; + return ; +}; diff --git a/dashboard/client/src/pages/actor/hook/useActorDetail.ts b/dashboard/client/src/pages/actor/hook/useActorDetail.ts index df9b590175d0..ec51d6a53eff 100644 --- a/dashboard/client/src/pages/actor/hook/useActorDetail.ts +++ b/dashboard/client/src/pages/actor/hook/useActorDetail.ts @@ -5,6 +5,21 @@ import { GlobalContext } from "../../../App"; import { API_REFRESH_INTERVAL_MS } from "../../../common/constants"; import { ActorResp, getActor } from "../../../service/actor"; +export const useFetchActor = (actorId: string | null) => { + return useSWR( + actorId ? ["useActorDetail", actorId] : null, + async ([_, actorId]) => { + const actor_resp = await getActor(actorId); + const data: ActorResp = actor_resp?.data; + const { data: rspData } = data; + + if (rspData.detail) { + return rspData.detail; + } + }, + ); +}; + export const useActorDetail = () => { const params = useParams() as { actorId: string }; const [msg, setMsg] = useState("Loading the actor infos..."); diff --git a/dashboard/client/src/pages/job/JobDetail.tsx b/dashboard/client/src/pages/job/JobDetail.tsx index b0194856d6fc..460cf0eacb83 100644 --- a/dashboard/client/src/pages/job/JobDetail.tsx +++ b/dashboard/client/src/pages/job/JobDetail.tsx @@ -1,7 +1,5 @@ import { Box, makeStyles } from "@material-ui/core"; -import React, { useContext, useRef, useState } from "react"; -import { Link } from "react-router-dom"; -import { GlobalContext } from "../../App"; +import React, { useRef, useState } from "react"; import { CollapsibleSection } from "../../common/CollapsibleSection"; import { Section } from "../../common/Section"; import { @@ -11,7 +9,7 @@ import { import Loading from "../../components/Loading"; import { StatusChip } from "../../components/StatusChip"; import TitleCard from "../../components/TitleCard"; -import { NestedJobProgressLink, UnifiedJob } from "../../type/job"; +import { NestedJobProgressLink } from "../../type/job"; import ActorList from "../actor/ActorList"; import { NodeCountCard } from "../overview/cards/NodeCountCard"; import PlacementGroupList from "../state/PlacementGroup"; @@ -120,17 +118,15 @@ export const JobDetailChartsPage = () => { - {job.type === "SUBMISSION" && ( - -
    - -
    -
    - )} + +
    + +
    +
    {job.job_id && ( {
    ); }; - -type JobLogsLinkProps = { - job: Pick< - UnifiedJob, - | "driver_agent_http_address" - | "driver_info" - | "job_id" - | "submission_id" - | "type" - >; -}; - -export const JobLogsLink = ({ - job: { driver_agent_http_address, driver_info, job_id, submission_id, type }, -}: JobLogsLinkProps) => { - const { ipLogMap } = useContext(GlobalContext); - - if (type === "SUBMISSION") { - // For submission jobs, send them to the job detail page because we have logs there already. - const link = `/jobs/${job_id ? job_id : submission_id}`; - return Log; - } - - let link: string | undefined; - - if (driver_agent_http_address) { - link = `/logs/${encodeURIComponent(`${driver_agent_http_address}/logs`)}`; - } else if (driver_info && ipLogMap[driver_info.node_ip_address]) { - link = `/logs/${encodeURIComponent(ipLogMap[driver_info.node_ip_address])}`; - } - - if (link) { - link += `?fileName=${ - type === "DRIVER" ? job_id : `driver-${submission_id}` - }`; - return ( - - Log - - ); - } - - return -; -}; diff --git a/dashboard/client/src/pages/job/JobDetailActorPage.tsx b/dashboard/client/src/pages/job/JobDetailActorPage.tsx index af701d7c4edf..91e23dbce2ff 100644 --- a/dashboard/client/src/pages/job/JobDetailActorPage.tsx +++ b/dashboard/client/src/pages/job/JobDetailActorPage.tsx @@ -1,7 +1,6 @@ import { makeStyles } from "@material-ui/core"; -import React from "react"; +import React, { PropsWithChildren } from "react"; -import { Outlet } from "react-router-dom"; import { Section } from "../../common/Section"; import ActorList from "../actor/ActorList"; import { MainNavPageInfo } from "../layout/mainNavContext"; @@ -16,10 +15,23 @@ const useStyle = makeStyles((theme) => ({ export const JobDetailActorsPage = () => { const classes = useStyle(); - const { params } = useJobDetail(); + const { job, params } = useJobDetail(); + + const pageInfo = job + ? { + title: "Actors", + id: "actors", + path: job.job_id ? `/jobs/${job.job_id}/actors` : undefined, + } + : { + title: "Actors", + id: "actors", + path: undefined, + }; return (
    +
    @@ -27,7 +39,9 @@ export const JobDetailActorsPage = () => { ); }; -export const JobDetailActorLayout = () => { +export const JobDetailActorDetailWrapper = ({ + children, +}: PropsWithChildren<{}>) => { const { job } = useJobDetail(); const pageInfo = job @@ -45,7 +59,7 @@ export const JobDetailActorLayout = () => { return (
    - + {children}
    ); }; diff --git a/dashboard/client/src/pages/job/JobDetailInfoPage.tsx b/dashboard/client/src/pages/job/JobDetailInfoPage.tsx index 9cdf851c6895..bd6c746dd139 100644 --- a/dashboard/client/src/pages/job/JobDetailInfoPage.tsx +++ b/dashboard/client/src/pages/job/JobDetailInfoPage.tsx @@ -20,7 +20,6 @@ import { UnifiedJob } from "../../type/job"; import { MainNavPageInfo } from "../layout/mainNavContext"; import { useJobDetail } from "./hook/useJobDetail"; -import { JobLogsLink } from "./JobDetail"; const useStyle = makeStyles((theme) => ({ root: { @@ -178,8 +177,6 @@ export const JobMetadataSection = ({ job }: JobMetadataSectionProps) => { label: "Actions", content: (
    - -
    { +export const JobPage = () => { const { job } = useJobDetail(); + const jobId = job?.job_id ?? job?.submission_id; const pageInfo = job ? { - title: job.job_id ?? "Job", - pageTitle: job.job_id ? `${job.job_id} | Job` : undefined, + title: jobId ?? "Job", + pageTitle: jobId ? `${jobId} | Job` : undefined, id: "job-detail", - path: job.job_id ? `/jobs/${job.job_id}` : undefined, + path: jobId ? `/jobs/${jobId}` : undefined, } : { title: "Job", id: "job-detail", path: undefined, }; + return ( +
    + + +
    + ); +}; +export const JobDetailLayout = () => { return ( - { render( , + { wrapper: TEST_APP_WRAPPER }, ); await screen.findByText(/log line/); diff --git a/dashboard/client/src/pages/job/JobDriverLogs.tsx b/dashboard/client/src/pages/job/JobDriverLogs.tsx index 34e42ee2bc0f..5603000d785d 100644 --- a/dashboard/client/src/pages/job/JobDriverLogs.tsx +++ b/dashboard/client/src/pages/job/JobDriverLogs.tsx @@ -1,62 +1,59 @@ -import { Typography } from "@material-ui/core"; -import React from "react"; -import useSWR from "swr"; -import { getStateApiDownloadLogUrl, getStateApiLog } from "../../service/log"; +import React, { useContext } from "react"; +import { GlobalContext } from "../../App"; +import { MultiTabLogViewer } from "../../common/MultiTabLogViewer"; import { UnifiedJob } from "../../type/job"; -import { LogViewer } from "../log/LogViewer"; -const useDriverLogs = ( - job: Pick, -) => { - const { driver_node_id, submission_id } = job; +type JobDriverLogsProps = { + job: Pick< + UnifiedJob, + | "job_id" + | "driver_node_id" + | "submission_id" + | "driver_agent_http_address" + | "driver_info" + >; +}; +export const JobDriverLogs = ({ job }: JobDriverLogsProps) => { + const { driver_node_id, submission_id } = job; const filename = submission_id ? `job-driver-${submission_id}.log` : undefined; - const downloadUrl = - driver_node_id && filename - ? getStateApiDownloadLogUrl(driver_node_id, filename) - : undefined; + const { ipLogMap } = useContext(GlobalContext); - const { - data: log, - isLoading, - mutate, - } = useSWR( - driver_node_id && filename - ? ["useDriverLogs", driver_node_id, filename] - : null, - async ([_, node_id, filename]) => { - return getStateApiLog(node_id, filename); - }, - ); + let link: string | undefined; - return { - log: isLoading ? "Loading..." : log, - downloadUrl, - refresh: mutate, - path: filename, - }; -}; + if (job.driver_agent_http_address) { + link = `/logs/${encodeURIComponent( + `${job.driver_agent_http_address}/logs`, + )}`; + } else if (job.driver_info && ipLogMap[job.driver_info.node_ip_address]) { + link = `/logs/${encodeURIComponent( + ipLogMap[job.driver_info.node_ip_address], + )}`; + } -type JobDriverLogsProps = { - job: Pick; -}; + if (link && job.job_id) { + link += `?fileName=${job.job_id}`; + } else { + // Don't show "other logs" link if link is not available + // or job_id does not exist. + link = undefined; + } -export const JobDriverLogs = ({ job }: JobDriverLogsProps) => { - const { downloadUrl, log, path, refresh } = useDriverLogs(job); - return typeof log === "string" ? ( - { - refresh(); - }} + // TODO(aguo): Support showing message for jobs not created via ray job submit + // instead of hiding the driver logs + return ( + - ) : ( - Failed to load ); }; diff --git a/dashboard/client/src/pages/job/JobRow.tsx b/dashboard/client/src/pages/job/JobRow.tsx index b26272c52f8b..fb5d1f856c34 100644 --- a/dashboard/client/src/pages/job/JobRow.tsx +++ b/dashboard/client/src/pages/job/JobRow.tsx @@ -1,7 +1,7 @@ -import { TableCell, TableRow, Tooltip } from "@material-ui/core"; +import { Link, TableCell, TableRow, Tooltip } from "@material-ui/core"; import { makeStyles } from "@material-ui/core/styles"; import React from "react"; -import { Link } from "react-router-dom"; +import { Link as RouterLink } from "react-router-dom"; import { DurationText } from "../../common/DurationText"; import { formatDateFromTimeMs } from "../../common/formatUtils"; import { JobStatusWithIcon } from "../../common/JobStatus"; @@ -11,7 +11,6 @@ import { } from "../../common/ProfilingLink"; import { UnifiedJob } from "../../type/job"; import { useJobProgress } from "./hook/useJobProgress"; -import { JobLogsLink } from "./JobDetail"; import { MiniTaskProgressBar } from "./TaskProgressBar"; const useStyles = makeStyles((theme) => ({ @@ -57,10 +56,22 @@ export const JobRow = ({ job }: JobRowProps) => { } })(); + const jobId = job_id ? job_id : submission_id; + return ( - {job_id ? {job_id} : "-"} + {job_id ? ( + + {job_id} + + ) : submission_id ? ( + + (no ray driver) + + ) : ( + "(no ray driver)" + )} {submission_id ?? "-"} @@ -85,10 +96,14 @@ export const JobRow = ({ job }: JobRowProps) => { {progressBar} - {/* TODO(aguo): Also show logs for the job id instead - of just the submission's logs */} - -
    + {jobId && ( + + + Log + +
    +
    + )} { + const downloadUrl = + driver_node_id && filename + ? getStateApiDownloadLogUrl(driver_node_id, filename) + : undefined; + + const { + data: log, + isLoading, + mutate, + } = useSWR( + driver_node_id && filename + ? ["useDriverLogs", driver_node_id, filename] + : null, + async ([_, node_id, filename]) => { + return getStateApiLog(node_id, filename); + }, + ); + + return { + log: isLoading ? "Loading..." : log, + downloadUrl, + refresh: mutate, + path: filename, + }; +}; diff --git a/dashboard/client/src/pages/serve/ServeDeploymentRow.tsx b/dashboard/client/src/pages/serve/ServeDeploymentRow.tsx index 24e9386b406b..9c749b8bf7d7 100644 --- a/dashboard/client/src/pages/serve/ServeDeploymentRow.tsx +++ b/dashboard/client/src/pages/serve/ServeDeploymentRow.tsx @@ -22,7 +22,6 @@ import { ServeReplica, } from "../../type/serve"; import { useViewServeDeploymentMetricsButtonUrl } from "./ServeDeploymentMetricsSection"; -import { ServeReplicaLogsLink } from "./ServeReplicaDetailPage"; const useStyles = makeStyles((theme) => createStyles({ @@ -144,18 +143,24 @@ export const ServeReplicaRow = ({ - {replica_id} - + - - + + Log + {metricsUrl && (
    diff --git a/dashboard/client/src/pages/serve/ServeReplicaDetailPage.tsx b/dashboard/client/src/pages/serve/ServeReplicaDetailPage.tsx index be40900f8625..a4c4086b6758 100644 --- a/dashboard/client/src/pages/serve/ServeReplicaDetailPage.tsx +++ b/dashboard/client/src/pages/serve/ServeReplicaDetailPage.tsx @@ -1,16 +1,26 @@ -import { createStyles, Link, makeStyles, Typography } from "@material-ui/core"; -import React, { useContext } from "react"; -import { Link as RouterLink, useParams } from "react-router-dom"; -import { GlobalContext } from "../../App"; +import { + CircularProgress, + createStyles, + makeStyles, + Typography, +} from "@material-ui/core"; +import React from "react"; +import { useParams } from "react-router-dom"; import { CodeDialogButton } from "../../common/CodeDialogButton"; import { CollapsibleSection } from "../../common/CollapsibleSection"; import { DurationText } from "../../common/DurationText"; import { formatDateFromTimeMs } from "../../common/formatUtils"; import { generateActorLink, generateNodeLink } from "../../common/links"; +import { + MultiTabLogViewer, + MultiTabLogViewerTabDetails, +} from "../../common/MultiTabLogViewer"; +import { Section } from "../../common/Section"; import Loading from "../../components/Loading"; import { MetadataSection } from "../../components/MetadataSection"; import { StatusChip } from "../../components/StatusChip"; -import { ServeDeployment, ServeReplica } from "../../type/serve"; +import { ServeReplica } from "../../type/serve"; +import { useFetchActor } from "../actor/hook/useActorDetail"; import { MainNavPageInfo } from "../layout/mainNavContext"; import TaskList from "../state/task"; import { useServeReplicaDetails } from "./hook/useServeApplications"; @@ -83,12 +93,6 @@ export const ServeReplicaDetailPage = () => { label: "State", content: , }, - { - label: "Logs", - content: ( - - ), - }, { label: "Actor ID", content: { @@ -147,6 +151,11 @@ export const ServeReplicaDetailPage = () => { }, ]} /> + +
    + +
    +
    { ); }; -export type ServeReplicaLogsLinkProps = { - replica: ServeReplica; - deployment: ServeDeployment; +type ServeReplicaLogsProps = { + replica: Pick; }; -export const ServeReplicaLogsLink = ({ - replica: { replica_id, node_ip }, - deployment: { name: deploymentName }, -}: ServeReplicaLogsLinkProps) => { - const { ipLogMap } = useContext(GlobalContext); - - let link: string | undefined; +const ServeReplicaLogs = ({ + replica: { log_file_path, node_id, actor_id }, +}: ServeReplicaLogsProps) => { + const { data: actor } = useFetchActor(actor_id); - if (node_ip && ipLogMap[node_ip]) { - // TODO(aguo): Clean up this logic after re-writing the log viewer - const logsRoot = ipLogMap[node_ip].endsWith("/logs") - ? ipLogMap[node_ip].substring( - 0, - ipLogMap[node_ip].length - "/logs".length, - ) - : ipLogMap[node_ip]; - // TODO(aguo): Have API return the location of the logs. - const path = `/logs/serve/deployment_${deploymentName}_${replica_id}.log`; - link = `/logs/${encodeURIComponent(logsRoot)}/${encodeURIComponent(path)}`; + if (!actor) { + return ; } - if (link) { - return ( - - Log - - ); - } + const { + address: { workerId }, + pid, + jobId, + } = actor; - return -; + const tabs: MultiTabLogViewerTabDetails[] = [ + { + title: "stderr", + nodeId: node_id, + // TODO(aguo): Have API return the log file name. + filename: `worker-${workerId}-${jobId}-${pid}.err`, + }, + { + title: "stdout", + nodeId: node_id, + // TODO(aguo): Have API return the log file name. + filename: `worker-${workerId}-${jobId}-${pid}.out`, + }, + { + title: "system", + nodeId: node_id, + // TODO(aguo): Have API return the log file name. + filename: `python-core-worker-${workerId}_${pid}.log`, + }, + // TODO(aguo): enable this once state-api logs supports files with # in the name. + // ...(log_file_path + // ? [ + // { + // title: "replica", + // nodeId: node_id, + // filename: log_file_path.startsWith("/") + // ? log_file_path.substring(1) + // : log_file_path, + // }, + // ] + // : []), + ]; + return ; }; diff --git a/dashboard/client/src/pages/serve/ServeSystemActorDetailPage.tsx b/dashboard/client/src/pages/serve/ServeSystemActorDetailPage.tsx new file mode 100644 index 000000000000..d702345e69e6 --- /dev/null +++ b/dashboard/client/src/pages/serve/ServeSystemActorDetailPage.tsx @@ -0,0 +1,179 @@ +import { Typography } from "@material-ui/core"; +import React from "react"; +import { useParams } from "react-router-dom"; +import { CollapsibleSection } from "../../common/CollapsibleSection"; +import { generateActorLink, generateNodeLink } from "../../common/links"; +import { + MultiTabLogViewer, + MultiTabLogViewerTabDetails, +} from "../../common/MultiTabLogViewer"; +import { Section } from "../../common/Section"; +import { MetadataSection } from "../../components/MetadataSection"; +import { StatusChip } from "../../components/StatusChip"; +import { ActorDetail } from "../../type/actor"; +import { ServeHttpProxy } from "../../type/serve"; +import { useFetchActor } from "../actor/hook/useActorDetail"; +import { MainNavPageInfo } from "../layout/mainNavContext"; +import { useServeHTTPProxyDetails } from "./hook/useServeApplications"; + +type ActorInfo = { + type: "httpProxy"; + detail: ServeHttpProxy; +}; + +type ServeSystemActorDetailProps = { + actor: ActorInfo; +}; + +export const ServeHttpProxyDetailPage = () => { + const { httpProxyId } = useParams(); + + const { httpProxy } = useServeHTTPProxyDetails(httpProxyId); + + if (!httpProxy) { + return ( + + HTTPProxyActor with id "{httpProxyId}" not found. + + ); + } + + return ( +
    + + +
    + ); +}; + +export const ServeSystemActorDetail = ({ + actor, +}: ServeSystemActorDetailProps) => { + const name = `HTTPProxyActor:${actor.detail.actor_id}`; + + const { data: fetchedActor } = useFetchActor(actor.detail.actor_id); + + return ( +
    + + ), + }, + { + label: "Actor ID", + content: { + value: actor.detail.actor_id, + copyableValue: actor.detail.actor_id, + link: actor.detail.actor_id + ? generateActorLink(actor.detail.actor_id) + : undefined, + }, + }, + { + label: "Actor name", + content: { + value: actor.detail.actor_name, + }, + }, + { + label: "Worker ID", + content: { + value: actor.detail.worker_id, + }, + }, + { + label: "Node ID", + content: { + value: actor.detail.node_id, + copyableValue: actor.detail.node_id, + link: actor.detail.node_id + ? generateNodeLink(actor.detail.node_id) + : undefined, + }, + }, + { + label: "Node IP", + content: { + value: actor.detail.node_ip, + }, + }, + ]} + /> + {fetchedActor && actor.detail.log_file_path && ( + +
    + +
    +
    + )} +
    + ); +}; + +type ServeSystemActorLogsProps = { + type: "controller" | "httpProxy"; + actor: Pick; + systemLogFilePath: string; +}; + +const ServeSystemActorLogs = ({ + type, + actor: { + jobId, + pid, + address: { workerId, rayletId }, + }, + systemLogFilePath, +}: ServeSystemActorLogsProps) => { + const tabs: MultiTabLogViewerTabDetails[] = [ + { + title: type === "controller" ? "Controller" : "HTTP Proxy", + nodeId: rayletId, + filename: systemLogFilePath.startsWith("/") + ? systemLogFilePath.substring(1) + : systemLogFilePath, + }, + { + title: "Actor Logs (stderr)", + nodeId: rayletId, + // TODO(aguo): Have API return the log file name. + filename: `worker-${workerId}-${jobId}-${pid}.err`, + }, + { + title: "Actor Logs (stdout)", + nodeId: rayletId, + // TODO(aguo): Have API return the log file name. + filename: `worker-${workerId}-${jobId}-${pid}.out`, + }, + { + title: "Actor Logs (system)", + nodeId: rayletId, + // TODO(aguo): Have API return the log file name. + filename: `python-core-worker-${workerId}_${pid}.log`, + }, + ]; + return ; +}; diff --git a/dashboard/client/src/pages/serve/ServeSystemDetailRows.tsx b/dashboard/client/src/pages/serve/ServeSystemDetailRows.tsx index 230916e992ec..3ede9a318d2b 100644 --- a/dashboard/client/src/pages/serve/ServeSystemDetailRows.tsx +++ b/dashboard/client/src/pages/serve/ServeSystemDetailRows.tsx @@ -6,9 +6,8 @@ import { TableRow, Tooltip, } from "@material-ui/core"; -import React, { useContext } from "react"; +import React from "react"; import { Link as RouterLink } from "react-router-dom"; -import { GlobalContext } from "../../App"; import { StatusChip } from "../../components/StatusChip"; import { ServeHttpProxy } from "../../type/serve"; @@ -35,12 +34,18 @@ export const ServeHttpProxyRow = ({ httpProxy }: ServeHttpProxyRowProps) => { return ( - HTTPProxyActor:{node_id} + + + HTTPProxyActor:{node_id} + + - + + Log + @@ -59,37 +64,3 @@ export const ServeHttpProxyRow = ({ httpProxy }: ServeHttpProxyRowProps) => { ); }; - -export type ServeReplicaLogsLinkProps = { - httpProxy: ServeHttpProxy; -}; - -export const ServeHttpProxyLogLink = ({ - httpProxy: { log_file_path, node_ip }, -}: ServeReplicaLogsLinkProps) => { - const { ipLogMap } = useContext(GlobalContext); - - let link: string | undefined; - - if (node_ip && ipLogMap[node_ip]) { - // TODO(aguo): Clean up this logic after re-writing the log viewer - const logsRoot = ipLogMap[node_ip].endsWith("/logs") - ? ipLogMap[node_ip].substring( - 0, - ipLogMap[node_ip].length - "/logs".length, - ) - : ipLogMap[node_ip]; - const path = `/logs${log_file_path}`; - link = `/logs/${encodeURIComponent(logsRoot)}/${encodeURIComponent(path)}`; - } - - if (link) { - return ( - - Log - - ); - } - - return -; -}; diff --git a/dashboard/client/src/pages/serve/hook/useServeApplications.ts b/dashboard/client/src/pages/serve/hook/useServeApplications.ts index e54545a79e94..e04488fe22c1 100644 --- a/dashboard/client/src/pages/serve/hook/useServeApplications.ts +++ b/dashboard/client/src/pages/serve/hook/useServeApplications.ts @@ -189,3 +189,27 @@ export const useServeReplicaDetails = ( error, }; }; + +export const useServeHTTPProxyDetails = (httpProxyId: string | undefined) => { + const { data, error, isLoading } = useSWR( + "useServeHTTPProxyDetails", + async () => { + const rsp = await getServeApplications(); + + if (rsp) { + return rsp.data; + } + }, + { refreshInterval: API_REFRESH_INTERVAL_MS }, + ); + + const httpProxy = httpProxyId ? data?.http_proxies?.[httpProxyId] : undefined; + + // Need to expose loading because it's not clear if undefined values + // for application, deployment, or replica means loading or missing data. + return { + loading: isLoading, + httpProxy, + error, + }; +}; diff --git a/dashboard/client/src/service/log.ts b/dashboard/client/src/service/log.ts index 0b7084f8018e..f4deeff96286 100644 --- a/dashboard/client/src/service/log.ts +++ b/dashboard/client/src/service/log.ts @@ -51,7 +51,9 @@ export const getLogDetail = async (url: string) => { }; export const getStateApiDownloadLogUrl = (nodeId: string, fileName: string) => - `api/v0/logs/file?node_id=${nodeId}&filename=${fileName}&lines=-1`; + `api/v0/logs/file?node_id=${encodeURIComponent( + nodeId, + )}&filename=${encodeURIComponent(fileName)}&lines=-1`; export const getStateApiLog = async (nodeId: string, fileName: string) => { const resp = await get(getStateApiDownloadLogUrl(nodeId, fileName)); diff --git a/dashboard/client/src/theme.ts b/dashboard/client/src/theme.ts index a3ec06564f61..37e935117a6f 100644 --- a/dashboard/client/src/theme.ts +++ b/dashboard/client/src/theme.ts @@ -83,7 +83,7 @@ export const lightTheme = createTheme({ ...basicTheme, palette: { primary: { - main: "#538DF9", + main: "#036DCF", }, secondary: lightBlue, success: { diff --git a/dashboard/client/src/type/serve.ts b/dashboard/client/src/type/serve.ts index aa8d52159cf9..ea96988e76f4 100644 --- a/dashboard/client/src/type/serve.ts +++ b/dashboard/client/src/type/serve.ts @@ -97,6 +97,7 @@ export type ServeHttpProxy = { node_ip: string; actor_id: string; actor_name: string; + worker_id: string; status: ServeHTTPProxyStatus; log_file_path: string | null; }; From 61cdb561aac9694b64f785293c36ea3c92ba5d40 Mon Sep 17 00:00:00 2001 From: Hao Chen Date: Sun, 14 May 2023 14:54:33 -0700 Subject: [PATCH 375/424] Fix "ImportError: sys.meta_path is None, Python is likely shutting down" (#35304) Signed-off-by: Hao Chen --- python/ray/_private/client_mode_hook.py | 8 ++++++-- python/ray/data/dataset.py | 2 -- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/python/ray/_private/client_mode_hook.py b/python/ray/_private/client_mode_hook.py index cbc0944514ea..052aa01b0b75 100644 --- a/python/ray/_private/client_mode_hook.py +++ b/python/ray/_private/client_mode_hook.py @@ -87,10 +87,14 @@ def client_mode_hook(func: callable): as a decorator. """ + from ray.util.client import ray + @wraps(func) def wrapper(*args, **kwargs): - from ray.util.client import ray - + # NOTE(hchen): DO NOT use "import" inside this function. + # Because when it's called within a `__del__` method, this error + # will be raised (see #35114): + # ImportError: sys.meta_path is None, Python is likely shutting down. if client_mode_should_convert(): # Legacy code # we only convert init function if RAY_CLIENT_MODE=1 diff --git a/python/ray/data/dataset.py b/python/ray/data/dataset.py index 7c38f3c7999a..88ba111615ea 100644 --- a/python/ray/data/dataset.py +++ b/python/ray/data/dataset.py @@ -4395,8 +4395,6 @@ def __setstate__(self, state): self._current_executor = None def __del__(self): - if sys.meta_path is None: - return if self._current_executor and ray is not None and ray.is_initialized(): self._current_executor.shutdown() From 5f02650b222dae12f6afb46bbf87d856decff153 Mon Sep 17 00:00:00 2001 From: Hao Chen Date: Sun, 14 May 2023 14:56:47 -0700 Subject: [PATCH 376/424] [data] Add GPU data ingestion nightly test (#34986) Signed-off-by: Hao Chen --- .../dataset/data_ingest_benchmark.py | 87 +++++++++++++++---- .../data_ingest_benchmark_compute_gpu.yaml | 15 ++++ ...data_ingest_benchmark_compute_gpu_gce.yaml | 17 ++++ release/release_tests.yaml | 59 ++++++++++++- 4 files changed, 160 insertions(+), 18 deletions(-) create mode 100644 release/nightly_tests/dataset/data_ingest_benchmark_compute_gpu.yaml create mode 100644 release/nightly_tests/dataset/data_ingest_benchmark_compute_gpu_gce.yaml diff --git a/release/nightly_tests/dataset/data_ingest_benchmark.py b/release/nightly_tests/dataset/data_ingest_benchmark.py index aa2bf213259b..089dff4998ac 100644 --- a/release/nightly_tests/dataset/data_ingest_benchmark.py +++ b/release/nightly_tests/dataset/data_ingest_benchmark.py @@ -10,23 +10,39 @@ from ray.data import DatasetPipeline import pandas as pd +import torch GiB = 1024 * 1024 * 1024 -@ray.remote(num_cpus=0.5) +@ray.remote class ConsumingActor: def __init__(self, rank): self._rank = rank - def consume(self, split): - DoConsume(split, self._rank) + def consume( + self, + split, + use_gpu=False, + max_bytes_to_read=None, + ): + do_consume( + split, + self._rank, + use_gpu, + max_bytes_to_read, + ) def get_location(self): return ray.get_runtime_context().get_node_id() -def DoConsume(split, rank): +def do_consume( + split, + rank, + use_gpu=False, + max_bytes_to_read=None, +): prefetch_batches = 1 batch_size = 4096 num_epochs = 1 @@ -56,9 +72,16 @@ def generate_epochs(data, epochs: int): prefetch_blocks=prefetch_batches, batch_size=batch_size ) else: - batch_iterator = epoch_data.iter_batches( - prefetch_batches=prefetch_batches, batch_size=batch_size - ) + if not use_gpu: + batch_iterator = epoch_data.iter_batches( + prefetch_batches=prefetch_batches, batch_size=batch_size + ) + else: + batch_iterator = epoch_data.iter_torch_batches( + prefetch_batches=prefetch_batches, + batch_size=batch_size, + device="cuda", + ) for batch in batch_iterator: batch_delay = time.perf_counter() - batch_start @@ -68,11 +91,19 @@ def generate_epochs(data, epochs: int): bytes_read += int(batch.memory_usage(index=True, deep=True).sum()) elif isinstance(batch, np.ndarray): bytes_read += batch.nbytes + elif isinstance(batch, dict) and isinstance( + batch.get("data"), torch.Tensor + ): + tensor = batch["data"] + bytes_read += tensor.element_size() * tensor.nelement() else: # NOTE: This isn't recursive and will just return the size of # the object pointers if list of non-primitive types. bytes_read += sys.getsizeof(batch) batch_start = time.perf_counter() + if max_bytes_to_read is not None: + if bytes_read >= max_bytes_to_read: + break delta = time.perf_counter() - start print("Time to read all data", delta, "seconds") @@ -94,7 +125,7 @@ def generate_epochs(data, epochs: int): def make_ds(size_gb: int, parallelism: int = -1): # Dataset of 10KiB tensor records. total_size = 1024 * 1024 * 1024 * size_gb - record_dim = 1280 + record_dim = 1024 record_size = record_dim * 8 num_records = int(total_size / record_size) dataset = ray.data.range_tensor( @@ -104,28 +135,45 @@ def make_ds(size_gb: int, parallelism: int = -1): return dataset -def run_ingest_streaming(dataset_size_gb, num_workers): +def run_ingest_streaming(dataset_size_gb, num_workers, use_gpu, early_stop): ds = make_ds(dataset_size_gb) + resources = {"num_cpus": 0.5} + if use_gpu: + resources["num_gpus"] = 0.5 consumers = [ - ConsumingActor.options(scheduling_strategy="SPREAD").remote(i) + ConsumingActor.options(scheduling_strategy="SPREAD", **resources).remote(i) for i in range(num_workers) ] locality_hints = ray.get([actor.get_location.remote() for actor in consumers]) ds = ds.map_batches(lambda df: df * 2, batch_format="pandas") splits = ds.streaming_split(num_workers, equal=True, locality_hints=locality_hints) - future = [consumers[i].consume.remote(s) for i, s in enumerate(splits)] + max_bytes_to_read = None + if early_stop: + max_bytes_to_read = dataset_size_gb * GiB // num_workers // 2 + # Early stop when we've read half the dataset. + future = [ + consumers[i].consume.remote( + s, + use_gpu, + max_bytes_to_read, + ) + for i, s in enumerate(splits) + ] ray.get(future) def run_ingest_bulk(dataset_size_gb, num_workers): ds = make_ds(dataset_size_gb, parallelism=200) consumers = [ - ConsumingActor.options(scheduling_strategy="SPREAD").remote(i) + ConsumingActor.options(scheduling_strategy="SPREAD", num_cpus=0.5).remote(i) for i in range(num_workers) ] ds = ds.map_batches(lambda df: df * 2, batch_format="pandas") splits = ds.split(num_workers, equal=True, locality_hints=consumers) - future = [consumers[i].consume.remote(s) for i, s in enumerate(splits)] + future = [ + consumers[i].consume.remote(s) + for i, s in enumerate(splits) + ] ray.get(future) # Example ballpark number for transformation (5s): @@ -146,7 +194,7 @@ def run_ingest_bulk(dataset_size_gb, num_workers): def run_ingest_dataset_pipeline(dataset_size_gb, num_workers): ds = make_ds(dataset_size_gb) consumers = [ - ConsumingActor.options(scheduling_strategy="SPREAD").remote(i) + ConsumingActor.options(scheduling_strategy="SPREAD", num_cpus=0.5).remote(i) for i in range(num_workers) ] p = ( @@ -155,7 +203,10 @@ def run_ingest_dataset_pipeline(dataset_size_gb, num_workers): .map_batches(lambda df: df * 2, batch_format="pandas") ) splits = p.split(num_workers, equal=True, locality_hints=consumers) - future = [consumers[i].consume.remote(s) for i, s in enumerate(splits)] + future = [ + consumers[i].consume.remote(s) + for i, s in enumerate(splits) + ] ray.get(future) # Example ballpark numbers: @@ -186,11 +237,15 @@ def run_ingest_dataset_pipeline(dataset_size_gb, num_workers): parser.add_argument("--dataset-size-gb", type=int, default=200) parser.add_argument("--streaming", action="store_true", default=False) parser.add_argument("--new_streaming", action="store_true", default=False) + parser.add_argument("--use-gpu", action="store_true", default=False) + parser.add_argument("--early-stop", action="store_true", default=False) args = parser.parse_args() start = time.time() if args.new_streaming: - run_ingest_streaming(args.dataset_size_gb, args.num_workers) + run_ingest_streaming( + args.dataset_size_gb, args.num_workers, args.use_gpu, args.early_stop + ) elif args.streaming: run_ingest_dataset_pipeline(args.dataset_size_gb, args.num_workers) else: diff --git a/release/nightly_tests/dataset/data_ingest_benchmark_compute_gpu.yaml b/release/nightly_tests/dataset/data_ingest_benchmark_compute_gpu.yaml new file mode 100644 index 000000000000..5ab624706d30 --- /dev/null +++ b/release/nightly_tests/dataset/data_ingest_benchmark_compute_gpu.yaml @@ -0,0 +1,15 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west-2 + +max_workers: 3 + +head_node_type: + name: head_node + instance_type: m5.2xlarge + +worker_node_types: + - name: worker_node + instance_type: g4dn.4xlarge + max_workers: 2 + min_workers: 2 + use_spot: false diff --git a/release/nightly_tests/dataset/data_ingest_benchmark_compute_gpu_gce.yaml b/release/nightly_tests/dataset/data_ingest_benchmark_compute_gpu_gce.yaml new file mode 100644 index 000000000000..58b28b6980ea --- /dev/null +++ b/release/nightly_tests/dataset/data_ingest_benchmark_compute_gpu_gce.yaml @@ -0,0 +1,17 @@ +cloud_id: {{env["ANYSCALE_CLOUD_ID"]}} +region: us-west1 +allowed_azs: + - us-west1-b + +max_workers: 3 + +head_node_type: + name: head_node + instance_type: n2-standard-8 + +worker_node_types: + - name: worker_node + instance_type: n1-standard-4-nvidia-tesla-t4-1 + max_workers: 2 + min_workers: 2 + use_spot: false diff --git a/release/release_tests.yaml b/release/release_tests.yaml index 864d874a3644..dc33f3ffb051 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -842,8 +842,8 @@ run: timeout: 4700 script: python test_myst_doc.py --path lightning-llm-finetuning-7b.ipynb - - + + - name: air_example_opt_deepspeed_batch_inference group: AIR examples working_dir: air_examples/opt_deepspeed_batch_inference @@ -5365,6 +5365,61 @@ cluster_env: app_config.yaml cluster_compute: data_ingest_benchmark_compute_gce.yaml +- name: streaming_data_ingest_benchmark_100gb_gpu + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + cluster: + cluster_env: app_config.yaml + cluster_compute: data_ingest_benchmark_compute_gpu.yaml + + run: + timeout: 300 + script: python data_ingest_benchmark.py --dataset-size-gb=100 --num-workers=4 --new_streaming --use-gpu + wait_for_nodes: + num_nodes: 3 + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: data_ingest_benchmark_compute_gpu_gce.yaml + +# This test case will early stop the data ingestion iteration on the GPU actors. +# This is a common usage in PyTorch Lightning +# (https://lightning.ai/docs/pytorch/stable/common/trainer.html#limit-train-batches). +# There was a bug in Ray Data that caused GPU memoy leak (see #34819). +# We add this test case to cover this scenario. +- name: streaming_data_ingest_benchmark_100gb_gpu_early_stop + group: data-tests + working_dir: nightly_tests/dataset + + frequency: nightly + team: data + cluster: + cluster_env: app_config.yaml + cluster_compute: data_ingest_benchmark_compute_gpu.yaml + + run: + timeout: 300 + script: python data_ingest_benchmark.py --dataset-size-gb=100 --num-workers=4 --new_streaming --use-gpu --early-stop + wait_for_nodes: + num_nodes: 3 + + variations: + - __suffix__: aws + - __suffix__: gce + env: gce + frequency: manual + cluster: + cluster_env: app_config.yaml + cluster_compute: data_ingest_benchmark_compute_gpu_gce.yaml + - name: aggregate_benchmark group: data-tests working_dir: nightly_tests/dataset From 66012c30f9a5d0f6f84c800a5b88560f997ab0da Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Mon, 15 May 2023 13:09:57 +0800 Subject: [PATCH 377/424] [core][state][dashboard][log] Fix subdirectory log getting (#35283) We are not able to get logs from a subdirectory from state API. This PR fixed it. --- dashboard/modules/log/log_agent.py | 6 +-- dashboard/modules/log/log_manager.py | 2 +- dashboard/modules/state/state_head.py | 2 + .../ray/experimental/state/state_manager.py | 8 +--- python/ray/tests/test_state_api_log.py | 42 +++++++++++++++++++ 5 files changed, 50 insertions(+), 10 deletions(-) diff --git a/dashboard/modules/log/log_agent.py b/dashboard/modules/log/log_agent.py index 572d3b2c95f8..8df5417f062f 100644 --- a/dashboard/modules/log/log_agent.py +++ b/dashboard/modules/log/log_agent.py @@ -296,7 +296,7 @@ async def ListLogs(self, request, context): ) log_files = [] for p in path.glob(request.glob_filter): - log_files.append(p.name) + log_files.append(str(p.relative_to(path))) return reporter_pb2.ListLogsReply(log_files=log_files) @classmethod @@ -385,8 +385,8 @@ async def StreamLog(self, request, context): lines = request.lines if request.lines else 1000 task_id = request.task_id if request.HasField("task_id") else None - filepath = f"{self._dashboard_agent.log_dir}/{request.log_file_name}" - if not os.path.isfile(filepath): + filepath = Path(self._dashboard_agent.log_dir) / request.log_file_name + if not filepath.is_file(): await context.send_initial_metadata( [[log_consts.LOG_GRPC_ERROR, log_consts.FILE_NOT_FOUND]] ) diff --git a/dashboard/modules/log/log_manager.py b/dashboard/modules/log/log_manager.py index a8c2a11564b2..3f4915ee371d 100644 --- a/dashboard/modules/log/log_manager.py +++ b/dashboard/modules/log/log_manager.py @@ -283,7 +283,7 @@ async def resolve_filename( f"\tpid: {pid}\n" f"\tsuffix: {suffix}\n" ) - + logger.info(f"Resolved log file: {log_filename} on node {node_id}") return log_filename, node_id def _categorize_log_files(self, log_files: List[str]) -> Dict[str, List[str]]: diff --git a/dashboard/modules/state/state_head.py b/dashboard/modules/state/state_head.py index 79e13a078f4a..e132a4f3da8a 100644 --- a/dashboard/modules/state/state_head.py +++ b/dashboard/modules/state/state_head.py @@ -417,6 +417,8 @@ async def get_logs(self, req: aiohttp.web.Request): response.content_type = "text/plain" await response.prepare(req) + logger.info(f"Streaming logs with options: {options}") + # NOTE: The first byte indicates the success / failure of individual # stream. If the first byte is b"1", it means the stream was successful. # If it is b"0", it means it is failed. diff --git a/python/ray/experimental/state/state_manager.py b/python/ray/experimental/state/state_manager.py index 89d1e4340a37..9173c893b33f 100644 --- a/python/ray/experimental/state/state_manager.py +++ b/python/ray/experimental/state/state_manager.py @@ -440,11 +440,7 @@ async def stream_log( ), timeout=timeout, ) - await self._validate_stream(stream) - return stream - - @staticmethod - async def _validate_stream(stream): metadata = await stream.initial_metadata() if metadata.get(log_consts.LOG_GRPC_ERROR) == log_consts.FILE_NOT_FOUND: - raise ValueError('File "{log_file_name}" not found on node {node_id}') + raise ValueError(f'File "{log_file_name}" not found on node {node_id}') + return stream diff --git a/python/ray/tests/test_state_api_log.py b/python/ray/tests/test_state_api_log.py index 258be91f70bb..a4e6da1775cc 100644 --- a/python/ray/tests/test_state_api_log.py +++ b/python/ray/tests/test_state_api_log.py @@ -3,6 +3,7 @@ import sys import asyncio from typing import List +import urllib from unittest.mock import MagicMock import pytest @@ -11,6 +12,8 @@ from click.testing import CliRunner import grpc +from pathlib import Path + import ray from ray._private.test_utils import ( format_web_url, @@ -999,6 +1002,45 @@ def verify(): e.match(f"Given node id {node_id} is not available") +def test_log_get_subdir(ray_start_with_dashboard): + assert ( + wait_until_server_available(ray_start_with_dashboard.address_info["webui_url"]) + is True + ) + webui_url = ray_start_with_dashboard.address_info["webui_url"] + webui_url = format_web_url(webui_url) + node_id = list_nodes()[0]["node_id"] + + log_dir = ray._private.worker.global_worker.node.get_logs_dir_path() + subdir = "test_subdir" + file = "test_#file.log" + path = Path(log_dir) / subdir / file + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text("test log") + + # HTTP endpoint + def verify(): + # Direct logs stream + response = requests.get( + webui_url + + f"/api/v0/logs/file?node_id={node_id}" + + f"&filename={urllib.parse.quote('test_subdir/test_#file.log')}" + ) + assert response.status_code == 200, response.reason + assert "test log" in response.text + return True + + wait_for_condition(verify) + + # get log SDK + def verify(): + logs = "".join(get_log(node_id=node_id, filename="test_subdir/test_#file.log")) + assert "test log" in logs + return True + + wait_for_condition(verify) + + def test_log_get(ray_start_cluster): cluster = ray_start_cluster cluster.add_node(num_cpus=0) From 362a74d2bc1d5e725fcb9d9d8c0d76abd39f10fb Mon Sep 17 00:00:00 2001 From: "Jack He (Github)" Date: Sun, 14 May 2023 22:18:32 -0700 Subject: [PATCH 378/424] [core][state][no_early_kickoff] Add "humanify" feature to StateSchema (#35059) This PR introduces a framework to address this issue: #31876. Essentially, we add a humanify() method to the base stateSchema class, and any subclasses would provide relevant format_fn as a metadata argument to any of its fields, and the humanify() method would aggregate the output from the lambdas. This PR is meant to introduce the general framework, and any additions (new format_fn) can be added by request. --- python/ray/experimental/state/common.py | 123 ++++++++++++++++++--- python/ray/experimental/state/state_cli.py | 5 + python/ray/tests/test_state_api.py | 13 +++ 3 files changed, 125 insertions(+), 16 deletions(-) diff --git a/python/ray/experimental/state/common.py b/python/ray/experimental/state/common.py index 3471ad03a35b..21ae132c44c2 100644 --- a/python/ray/experimental/state/common.py +++ b/python/ray/experimental/state/common.py @@ -1,3 +1,4 @@ +import datetime import json import logging import sys @@ -75,6 +76,43 @@ class SummaryResource(Enum): PredicateType = str # Literal["=", "!="] +class Humanify: + """A class containing default methods to + convert units into a human readable string.""" + + def timestamp(x: float): + """Converts miliseconds to a datetime object.""" + return str(datetime.datetime.fromtimestamp(x / 1000)) + + def memory(x: int): + """Converts raw bytes to a human readable memory size.""" + if x >= 2**30: + return str(format(x / (2**30), ".3f")) + " GiB" + elif x >= 2**20: + return str(format(x / (2**20), ".3f")) + " MiB" + elif x >= 2**10: + return str(format(x / (2**10), ".3f")) + " KiB" + return str(format(x, ".3f")) + " B" + + def duration(x: int): + """Converts miliseconds to a human readable duration.""" + return str(datetime.timedelta(milliseconds=x)) + + def events(events: List[dict]): + """Converts a list of task events into a human readable format.""" + for event in events: + if "created_ms" in event: + event["created_ms"] = Humanify.timestamp(event["created_ms"]) + return events + + def node_resources(resources: dict): + """Converts a node's resources into a human readable format.""" + for resource in resources: + if "memory" in resource: + resources[resource] = Humanify.memory(resources[resource]) + return resources + + @dataclass(init=True) class ListApiOptions: # Maximum number of entries to return @@ -144,7 +182,7 @@ class SummaryApiOptions: summary_by: Optional[str] = None -def state_column(*, filterable: bool, detail: bool = False, **kwargs): +def state_column(*, filterable: bool, detail: bool = False, format_fn=None, **kwargs): """A wrapper around dataclass.field to add additional metadata. The metadata is used to define detail / filterable option of @@ -155,15 +193,16 @@ def state_column(*, filterable: bool, detail: bool = False, **kwargs): filterable: If True, the column can be used for filtering. kwargs: The same kwargs for the `dataclasses.field` function. """ - m = {"detail": detail, "filterable": filterable} - + m = {"detail": detail, "filterable": filterable, "format_fn": format_fn} # Default for detail field is None since it could be missing. if detail and "default" not in kwargs: kwargs["default"] = None if "metadata" in kwargs: + # Metadata explicitly specified, so add detail and filterable if missing. kwargs["metadata"].update(m) else: + # Metadata not explicitly specified, so add it. kwargs["metadata"] = m return field(**kwargs) @@ -193,8 +232,32 @@ class State(StateSchema): # Returns {"column_a", "column_b"} s.columns() ``` + + In addition, the schema also provides a humanify abstract method to + convert the state object into something human readable, ready for printing. + + Subclasses should override this method, providing logic to convert its own fields + to something human readable, packaged and returned in a dict. + + Each field that wants to be humanified should include a 'format_fn' key in its + metadata dictionary. """ + @classmethod + def humanify(cls, state: dict) -> dict: + """Convert the given state object into something human readable.""" + for f in fields(cls): + if ( + f.metadata.get("format_fn") is not None + and f.name in state + and state[f.name] is not None + ): + try: + state[f.name] = f.metadata["format_fn"](state[f.name]) + except Exception as e: + logger.error(f"Failed to format {f.name}:{state[f.name]} with {e}") + return state + @classmethod def list_columns(cls, detail: bool = True) -> List[str]: """Return a list of columns.""" @@ -426,13 +489,19 @@ class NodeState(StateSchema): #: The name of the node if it is given by the name argument. node_name: str = state_column(filterable=True) #: The total resources of the node. - resources_total: dict = state_column(filterable=False) + resources_total: dict = state_column( + filterable=False, format_fn=Humanify.node_resources + ) #: The time when the node (raylet) starts. - start_time_ms: Optional[int] = state_column(filterable=False, detail=True) + start_time_ms: Optional[int] = state_column( + filterable=False, detail=True, format_fn=Humanify.timestamp + ) #: The time when the node exits. The timestamp could be delayed #: if the node is dead unexpectedly (could be delayed # up to 30 seconds). - end_time_ms: Optional[int] = state_column(filterable=False, detail=True) + end_time_ms: Optional[int] = state_column( + filterable=False, detail=True, format_fn=Humanify.timestamp + ) @dataclass(init=True) @@ -491,17 +560,25 @@ class WorkerState(StateSchema): #: -> worker_launched_time_ms (process started). #: -> start_time_ms (worker is ready to be used). #: -> end_time_ms (worker is destroyed). - worker_launch_time_ms: Optional[int] = state_column(filterable=False, detail=True) + worker_launch_time_ms: Optional[int] = state_column( + filterable=False, detail=True, format_fn=Humanify.timestamp + ) #: The time worker is succesfully launched #: -1 if the value doesn't exist. - worker_launched_time_ms: Optional[int] = state_column(filterable=False, detail=True) + worker_launched_time_ms: Optional[int] = state_column( + filterable=False, detail=True, format_fn=Humanify.timestamp + ) #: The time when the worker is started and initialized. #: 0 if the value doesn't exist. - start_time_ms: Optional[int] = state_column(filterable=False, detail=True) + start_time_ms: Optional[int] = state_column( + filterable=False, detail=True, format_fn=Humanify.timestamp + ) #: The time when the worker exits. The timestamp could be delayed #: if the worker is dead unexpectedly. #: 0 if the value doesn't exist. - end_time_ms: Optional[int] = state_column(filterable=False, detail=True) + end_time_ms: Optional[int] = state_column( + filterable=False, detail=True, format_fn=Humanify.timestamp + ) @dataclass(init=True) @@ -569,15 +646,27 @@ class TaskState(StateSchema): #: The list of events of the given task. #: Refer to src/ray/protobuf/common.proto for a detailed explanation of the state #: breakdowns and typical state transition flow. - events: Optional[List[dict]] = state_column(detail=True, filterable=False) + events: Optional[List[dict]] = state_column( + detail=True, filterable=False, format_fn=Humanify.events + ) #: The list of profile events of the given task. profiling_data: Optional[dict] = state_column(detail=True, filterable=False) #: The time when the task is created. A Unix timestamp in ms. - creation_time_ms: Optional[int] = state_column(detail=True, filterable=False) + creation_time_ms: Optional[int] = state_column( + detail=True, + filterable=False, + format_fn=Humanify.timestamp, + ) #: The time when the task starts to run. A Unix timestamp in ms. - start_time_ms: Optional[int] = state_column(detail=True, filterable=False) + start_time_ms: Optional[int] = state_column( + detail=True, + filterable=False, + format_fn=Humanify.timestamp, + ) #: The time when the task is finished or failed. A Unix timestamp in ms. - end_time_ms: Optional[int] = state_column(detail=True, filterable=False) + end_time_ms: Optional[int] = state_column( + detail=True, filterable=False, format_fn=Humanify.timestamp + ) #: The task logs info, e.g. offset into the worker log file when the task #: starts/finishes. task_log_info: Optional[dict] = state_column(detail=True, filterable=False) @@ -592,7 +681,7 @@ class ObjectState(StateSchema): #: The id of the object. object_id: str = state_column(filterable=True) #: The size of the object in mb. - object_size: int = state_column(filterable=True) + object_size: int = state_column(filterable=True, format_fn=Humanify.memory) #: The status of the task that creates the object. #: #: - NIL: We don't have a status for this task because we are not the owner or the @@ -651,7 +740,9 @@ class RuntimeEnvState(StateSchema): success: bool = state_column(filterable=True) #: The latency of creating the runtime environment. #: Available if the runtime env is successfully created. - creation_time_ms: Optional[float] = state_column(filterable=False) + creation_time_ms: Optional[float] = state_column( + filterable=False, format_fn=Humanify.timestamp + ) #: The node id of this runtime environment. node_id: str = state_column(filterable=True) #: The number of actors and tasks that use this runtime environment. diff --git a/python/ray/experimental/state/state_cli.py b/python/ray/experimental/state/state_cli.py index 1b19db874701..ee7449f902ba 100644 --- a/python/ray/experimental/state/state_cli.py +++ b/python/ray/experimental/state/state_cli.py @@ -172,6 +172,9 @@ def output_with_format( format: AvailableFormat = AvailableFormat.DEFAULT, detail: bool = False, ) -> str: + # humanify all input state data + if schema: + state_data = [schema.humanify(state) for state in state_data] if format == AvailableFormat.DEFAULT: return get_table_output(state_data, schema, detail) if format == AvailableFormat.YAML: @@ -292,8 +295,10 @@ def format_get_api_output( ) -> str: if not state_data or isinstance(state_data, list) and len(state_data) == 0: return f"Resource with id={id} not found in the cluster." + if not isinstance(state_data, list): state_data = [state_data] + state_data = [dataclasses.asdict(state) for state in state_data] return output_with_format(state_data, schema=schema, format=format, detail=True) diff --git a/python/ray/tests/test_state_api.py b/python/ray/tests/test_state_api.py index c9b70b1d9ce0..aa68bf05fd09 100644 --- a/python/ray/tests/test_state_api.py +++ b/python/ray/tests/test_state_api.py @@ -8,6 +8,7 @@ from unittest.mock import MagicMock import pytest +from ray.experimental.state.common import Humanify from ray._private.gcs_utils import GcsAioClient import yaml from click.testing import CliRunner @@ -1696,6 +1697,18 @@ def ready(self): assert result.total == 6 +def test_humanify(): + raw_bytes = 1024 + assert Humanify.memory(raw_bytes) == "1.000 KiB" + raw_bytes *= 1024 + assert Humanify.memory(raw_bytes) == "1.000 MiB" + raw_bytes *= 1024 + assert Humanify.memory(raw_bytes) == "1.000 GiB" + timestamp = 1610000000 + assert "1970-01" in Humanify.timestamp(timestamp) + assert Humanify.duration(timestamp) == "18 days, 15:13:20" + + @pytest.mark.asyncio async def test_state_data_source_client_limit_distributed_sources(ray_start_cluster): cluster = ray_start_cluster From eb173d351d750f747b5cf9d6cf8cfbabb6ba0c14 Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Mon, 15 May 2023 10:33:10 +0200 Subject: [PATCH 379/424] [docs] clarify FAST build option, fixes #35293 (#35297) nit changes (sub-project --> subproject) Signed-off-by: Max Pumperla --- doc/README.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/doc/README.md b/doc/README.md index bd131b119273..f707e059c63e 100644 --- a/doc/README.md +++ b/doc/README.md @@ -21,17 +21,20 @@ make develop && open _build/html/index.html > **_NOTE:_** The above command is for development. To reproduce build failures from the > CI, you should use `make html` which is the same as `make develop` but treats warnings as errors. +> Additionally, note that `make develop` uses the `FAST` environment variable to skip some +> expensive parts of the build process. In particular, it will aggressively prune the +> left-hand side navigation, but leave the documents itself intact. -## Building just one sub-project +## Building just one subproject -Often your changes in documentation just concern one sub-project, such as Tune or Train. -To build just this one sub-project, and ignore the rest +Often your changes in documentation just concern one subproject, such as Tune or Train. +To build just this one subproject, and ignore the rest (leading to build warnings due to broken references etc.), run the following command: ```shell DOC_LIB= sphinx-build -b html -d _build/doctrees source _build/html ``` -where `` is the name of the sub-project and can be any of the docs projects in the `source/` +where `` is the name of the subproject and can be any of the docs projects in the `source/` directory either called `tune`, `rllib`, `train`, `cluster`, `serve`, `data` or the ones starting with `ray-`, e.g. `ray-observability`. From 5ebdc2520e0842a1e09070d3de16bd421d624de6 Mon Sep 17 00:00:00 2001 From: Justin Yu Date: Mon, 15 May 2023 04:49:25 -0700 Subject: [PATCH 380/424] [AIR] Deprecate `ray.tune.logger.Logger` interface (#35162) This PR deprecates: 1. Soft-deprecated, for removal in 2.7. - `ray.tune.logger.Logger`, in favor of `ray.tune.logger.LoggerCallback` - Also, deprecated any built-in `Logger` subclasses, including `CSVLogger`, `JsonLogger`, `TBXLogger`, etc. Signed-off-by: Justin Yu --- python/ray/air/integrations/mlflow.py | 2 +- python/ray/tune/integration/comet.py | 4 ++-- python/ray/tune/logger/csv.py | 10 ++++++++-- python/ray/tune/logger/json.py | 10 ++++++++-- python/ray/tune/logger/logger.py | 12 +++++++++++- python/ray/tune/logger/noop.py | 3 ++- python/ray/tune/logger/tensorboardx.py | 10 ++++++++-- python/ray/tune/logger/unified.py | 4 +++- python/ray/tune/trainable/trainable.py | 11 ++++++----- 9 files changed, 49 insertions(+), 17 deletions(-) diff --git a/python/ray/air/integrations/mlflow.py b/python/ray/air/integrations/mlflow.py index fc22e28fdf4a..f31f6ffdeaf3 100644 --- a/python/ray/air/integrations/mlflow.py +++ b/python/ray/air/integrations/mlflow.py @@ -1,7 +1,7 @@ import logging -import warnings from types import ModuleType from typing import Dict, Optional, Union +import warnings import ray from ray.air import session diff --git a/python/ray/tune/integration/comet.py b/python/ray/tune/integration/comet.py index 4e61bdd7af28..1a741968a4f9 100644 --- a/python/ray/tune/integration/comet.py +++ b/python/ray/tune/integration/comet.py @@ -24,5 +24,5 @@ def __init__( save_checkpoints: bool = False, **experiment_kwargs ): - logging.warning(callback_deprecation_message) - super().__init__(online, tags, save_checkpoints, **experiment_kwargs) + # TODO(ml-team): Remove in 2.6. + raise DeprecationWarning(callback_deprecation_message) diff --git a/python/ray/tune/logger/csv.py b/python/ray/tune/logger/csv.py index f8509990b3a7..b9357c7dd872 100644 --- a/python/ray/tune/logger/csv.py +++ b/python/ray/tune/logger/csv.py @@ -4,10 +4,10 @@ from typing import TYPE_CHECKING, Dict, TextIO -from ray.tune.logger.logger import Logger, LoggerCallback +from ray.tune.logger.logger import _LOGGER_DEPRECATION_WARNING, Logger, LoggerCallback from ray.tune.result import EXPR_PROGRESS_FILE from ray.tune.utils import flatten_dict -from ray.util.annotations import PublicAPI +from ray.util.annotations import Deprecated, PublicAPI if TYPE_CHECKING: from ray.tune.experiment.trial import Trial # noqa: F401 @@ -15,6 +15,12 @@ logger = logging.getLogger(__name__) +@Deprecated( + message=_LOGGER_DEPRECATION_WARNING.format( + old="CSVLogger", new="ray.tune.csv.CSVLoggerCallback" + ), + warning=True, +) @PublicAPI class CSVLogger(Logger): """Logs results to progress.csv under the trial directory. diff --git a/python/ray/tune/logger/json.py b/python/ray/tune/logger/json.py index ef59a455a3ba..efd04d431cd1 100644 --- a/python/ray/tune/logger/json.py +++ b/python/ray/tune/logger/json.py @@ -7,14 +7,14 @@ import ray.cloudpickle as cloudpickle -from ray.tune.logger.logger import Logger, LoggerCallback +from ray.tune.logger.logger import _LOGGER_DEPRECATION_WARNING, Logger, LoggerCallback from ray.tune.utils.util import SafeFallbackEncoder from ray.tune.result import ( EXPR_PARAM_FILE, EXPR_PARAM_PICKLE_FILE, EXPR_RESULT_FILE, ) -from ray.util.annotations import PublicAPI +from ray.util.annotations import Deprecated, PublicAPI if TYPE_CHECKING: from ray.tune.experiment.trial import Trial # noqa: F401 @@ -25,6 +25,12 @@ VALID_SUMMARY_TYPES = [int, float, np.float32, np.float64, np.int32, np.int64] +@Deprecated( + message=_LOGGER_DEPRECATION_WARNING.format( + old="JsonLogger", new="ray.tune.json.JsonLoggerCallback" + ), + warning=True, +) @PublicAPI class JsonLogger(Logger): """Logs trial results in json format. diff --git a/python/ray/tune/logger/logger.py b/python/ray/tune/logger/logger.py index 64dbfe7d909a..7540d3f02d24 100644 --- a/python/ray/tune/logger/logger.py +++ b/python/ray/tune/logger/logger.py @@ -7,7 +7,7 @@ import yaml from ray.air._internal.json import SafeFallbackEncoder from ray.tune.callback import Callback -from ray.util.annotations import PublicAPI, DeveloperAPI +from ray.util.annotations import Deprecated, DeveloperAPI, PublicAPI if TYPE_CHECKING: from ray.tune.experiment.trial import Trial # noqa: F401 @@ -18,7 +18,17 @@ # Apply flow style for sequences of this length _SEQUENCE_LEN_FLOW_STYLE = 3 +_LOGGER_DEPRECATION_WARNING = ( + "The `{old} interface is deprecated in favor of the " + "`{new}` interface and will be removed in Ray 2.7." +) + +@Deprecated( + message=_LOGGER_DEPRECATION_WARNING.format( + old="Logger", new="ray.tune.logger.LoggerCallback" + ), +) @DeveloperAPI class Logger(abc.ABC): """Logging interface for ray.tune. diff --git a/python/ray/tune/logger/noop.py b/python/ray/tune/logger/noop.py index 00a3b8f28fbd..a9bae96b7cd7 100644 --- a/python/ray/tune/logger/noop.py +++ b/python/ray/tune/logger/noop.py @@ -1,7 +1,8 @@ from ray.tune.logger.logger import Logger -from ray.util.annotations import PublicAPI +from ray.util.annotations import Deprecated, PublicAPI +@Deprecated(message="`NoopLogger` will be removed in Ray 2.7.") @PublicAPI class NoopLogger(Logger): def on_result(self, result): diff --git a/python/ray/tune/logger/tensorboardx.py b/python/ray/tune/logger/tensorboardx.py index 9e083319c630..e4e3e25e8872 100644 --- a/python/ray/tune/logger/tensorboardx.py +++ b/python/ray/tune/logger/tensorboardx.py @@ -3,7 +3,7 @@ from typing import TYPE_CHECKING, Dict -from ray.tune.logger.logger import Logger, LoggerCallback +from ray.tune.logger.logger import _LOGGER_DEPRECATION_WARNING, Logger, LoggerCallback from ray.util.debug import log_once from ray.tune.result import ( TRAINING_ITERATION, @@ -11,7 +11,7 @@ TIMESTEPS_TOTAL, ) from ray.tune.utils import flatten_dict -from ray.util.annotations import PublicAPI +from ray.util.annotations import Deprecated, PublicAPI if TYPE_CHECKING: from ray.tune.experiment.trial import Trial # noqa: F401 @@ -21,6 +21,12 @@ VALID_SUMMARY_TYPES = [int, float, np.float32, np.float64, np.int32, np.int64] +@Deprecated( + message=_LOGGER_DEPRECATION_WARNING.format( + old="TBXLogger", new="ray.tune.tensorboardx.TBXLoggerCallback" + ), + warning=True, +) @PublicAPI class TBXLogger(Logger): """TensorBoardX Logger. diff --git a/python/ray/tune/logger/unified.py b/python/ray/tune/logger/unified.py index 73aaf00ce081..ede689829c40 100644 --- a/python/ray/tune/logger/unified.py +++ b/python/ray/tune/logger/unified.py @@ -4,7 +4,8 @@ from ray.tune.logger import DEFAULT_LOGGERS from ray.tune.logger.json import JsonLogger from ray.tune.logger.logger import Logger -from ray.util import log_once, PublicAPI +from ray.util import log_once +from ray.util.annotations import Deprecated, PublicAPI logger = logging.getLogger(__name__) @@ -13,6 +14,7 @@ from ray.tune.experiment.trial import Trial # noqa: F401 +@Deprecated(message="`UnifiedLogger` will be removed in Ray 2.7.", warning=True) @PublicAPI class UnifiedLogger(Logger): """Unified result logger for TensorBoard, rllab/viskit, plain json. diff --git a/python/ray/tune/trainable/trainable.py b/python/ray/tune/trainable/trainable.py index bd51ac89fffe..63a5d997f4f4 100644 --- a/python/ray/tune/trainable/trainable.py +++ b/python/ray/tune/trainable/trainable.py @@ -9,7 +9,7 @@ import tempfile import time from contextlib import redirect_stderr, redirect_stdout -from typing import Any, Callable, Dict, List, Optional, Union, Type, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union, Type import warnings import ray @@ -108,10 +108,10 @@ class Trainable: def __init__( self, config: Dict[str, Any] = None, - logger_creator: Callable[[Dict[str, Any]], "Logger"] = None, + logger_creator: Callable[[Dict[str, Any]], "Logger"] = None, # Deprecated (2.7) remote_checkpoint_dir: Optional[str] = None, - custom_syncer: Optional[Syncer] = None, # Deprecated - sync_timeout: Optional[int] = None, # Deprecated + custom_syncer: Optional[Syncer] = None, # Deprecated (2.6) + sync_timeout: Optional[int] = None, # Deprecated (2.6) sync_config: Optional[SyncConfig] = None, ): """Initialize a Trainable. @@ -125,7 +125,7 @@ def __init__( Args: config: Trainable-specific configuration data. By default will be saved as ``self.config``. - logger_creator: Function that creates a ray.tune.Logger + logger_creator: (Deprecated) Function that creates a ray.tune.Logger object. If unspecified, a default logger is created. remote_checkpoint_dir: Upload directory (S3 or GS path). This is **per trial** directory, @@ -140,6 +140,7 @@ def __init__( if self.is_actor(): disable_ipython() + # TODO(ml-team): Remove `logger_creator` in 2.7. self._result_logger = self._logdir = None self._create_logger(self.config, logger_creator) From 92e7fbd8a0a7f997f2e1acdebda1b43ad9d4324d Mon Sep 17 00:00:00 2001 From: SangBin Cho Date: Mon, 15 May 2023 22:23:16 +0900 Subject: [PATCH 381/424] [Core/Logging] Worker startup hook (#34738) This PR supports the basic worker setup hook API to runtime env according to the design; https://docs.google.com/document/d/1ngiuAZAMnl9c4LozoTpWh37KPviDRIpmEjEI6BsNL7w/edit This PR also exposes exit API to the Python so that we can easily fail the worker with a exception we want It is the first PR to support this feature. The PR allows users to add a setup method using runtime env. There will be 2 more PRs that will be coming a follow up Merge the runtime env when the job + driver specifies the runtime env. Allow to specify setup hook for individual task and actor --- python/ray/_private/function_manager.py | 92 +++++++++-- python/ray/_private/ray_constants.py | 3 + python/ray/_private/runtime_env/plugin.py | 2 + python/ray/_private/runtime_env/setup_hook.py | 131 +++++++++++++++ python/ray/_private/worker.py | 5 + python/ray/_private/workers/default_worker.py | 17 +- python/ray/_raylet.pyx | 47 ++++-- python/ray/includes/common.pxd | 8 + python/ray/includes/common.pxi | 6 + python/ray/includes/libcoreworker.pxd | 5 + python/ray/runtime_env/runtime_env.py | 11 +- python/ray/tests/BUILD | 1 + .../ray/tests/test_runtime_env_setup_func.py | 152 ++++++++++++++++++ src/ray/common/constants.h | 2 + src/ray/core_worker/core_worker.h | 28 ++-- src/ray/gcs/gcs_server/gcs_function_manager.h | 6 +- 16 files changed, 470 insertions(+), 46 deletions(-) create mode 100644 python/ray/_private/runtime_env/setup_hook.py create mode 100644 python/ray/tests/test_runtime_env_setup_func.py diff --git a/python/ray/_private/function_manager.py b/python/ray/_private/function_manager.py index 9dd9173cf265..419731ad8315 100644 --- a/python/ray/_private/function_manager.py +++ b/python/ray/_private/function_manager.py @@ -9,7 +9,7 @@ import time import traceback from collections import defaultdict, namedtuple -from typing import Optional +from typing import Optional, Callable import ray import ray._private.profiling as profiling @@ -27,11 +27,16 @@ format_error_message, ) from ray._private.serialization import pickle_dumps -from ray._raylet import JobID, PythonFunctionDescriptor +from ray._raylet import JobID, PythonFunctionDescriptor, WORKER_SETUP_HOOK_KEY_NAME_GCS FunctionExecutionInfo = namedtuple( "FunctionExecutionInfo", ["function", "function_name", "max_calls"] ) +ImportedFunctionInfo = namedtuple( + "ImportedFunctionInfo", + ["job_id", "function_id", "function_name", "function", "module", "max_calls"], +) + """FunctionExecutionInfo: A named tuple storing remote function information.""" logger = logging.getLogger(__name__) @@ -175,6 +180,53 @@ def export_key(self, key): # TODO(mwtian) implement per-job notification here. self._worker.gcs_publisher.publish_function_key(key) + def export_setup_func( + self, setup_func: Callable, timeout: Optional[int] = None + ) -> bytes: + """Export the setup hook function and return the key.""" + pickled_function = pickle_dumps( + setup_func, f"Cannot serialize the worker_setup_hook {setup_func.__name__}" + ) + + function_to_run_id = hashlib.shake_128(pickled_function).digest( + ray_constants.ID_SIZE + ) + key = make_function_table_key( + # This value should match with gcs_function_manager.h. + # Otherwise, it won't be GC'ed. + WORKER_SETUP_HOOK_KEY_NAME_GCS.encode(), + # b"FunctionsToRun", + self._worker.current_job_id.binary(), + function_to_run_id, + ) + + check_oversized_function( + pickled_function, setup_func.__name__, "function", self._worker + ) + + try: + self._worker.gcs_client.internal_kv_put( + key, + pickle.dumps( + { + "job_id": self._worker.current_job_id.binary(), + "function_id": function_to_run_id, + "function": pickled_function, + } + ), + # overwrite + True, + ray_constants.KV_NAMESPACE_FUNCTION_TABLE, + timeout=timeout, + ) + except Exception as e: + logger.exception( + "Failed to export the setup hook " f"{setup_func.__name__}." + ) + raise e + + return key + def export(self, remote_function): """Pickle a remote function and export it to redis. Args: @@ -224,21 +276,31 @@ def export(self, remote_function): key, val, True, KV_NAMESPACE_FUNCTION_TABLE ) - def fetch_and_register_remote_function(self, key): - """Import a remote function.""" - vals = self._worker.gcs_client.internal_kv_get(key, KV_NAMESPACE_FUNCTION_TABLE) + def fetch_registered_method( + self, key: str, timeout: Optional[int] = None + ) -> Optional[ImportedFunctionInfo]: + vals = self._worker.gcs_client.internal_kv_get( + key, KV_NAMESPACE_FUNCTION_TABLE, timeout=timeout + ) if vals is None: - return False + return None else: vals = pickle.loads(vals) - fields = [ - "job_id", - "function_id", - "function_name", - "function", - "module", - "max_calls", - ] + fields = [ + "job_id", + "function_id", + "function_name", + "function", + "module", + "max_calls", + ] + return ImportedFunctionInfo._make(vals.get(field) for field in fields) + + def fetch_and_register_remote_function(self, key): + """Import a remote function.""" + remote_function_info = self.fetch_registered_method(key) + if not remote_function_info: + return False ( job_id_str, function_id_str, @@ -246,7 +308,7 @@ def fetch_and_register_remote_function(self, key): serialized_function, module, max_calls, - ) = (vals.get(field) for field in fields) + ) = remote_function_info function_id = ray.FunctionID(function_id_str) job_id = ray.JobID(job_id_str) diff --git a/python/ray/_private/ray_constants.py b/python/ray/_private/ray_constants.py index 7bbad5f5aa1c..8acf81575ab2 100644 --- a/python/ray/_private/ray_constants.py +++ b/python/ray/_private/ray_constants.py @@ -430,3 +430,6 @@ def gcs_actor_scheduling_enabled(): } RAY_ENABLE_RECORD_TASK_LOGGING = env_bool("RAY_ENABLE_RECORD_TASK_LOGGING", False) + +WORKER_SETUP_HOOK_ENV_VAR = "__RAY_WORKER_SETUP_HOOK_ENV_VAR" +RAY_WORKER_SETUP_HOOK_LOAD_TIMEOUT_ENV_VAR = "RAY_WORKER_SETUP_HOOK_LOAD_TIMEOUT" diff --git a/python/ray/_private/runtime_env/plugin.py b/python/ray/_private/runtime_env/plugin.py index 21b5fa8c49a2..b36d59858c0a 100644 --- a/python/ray/_private/runtime_env/plugin.py +++ b/python/ray/_private/runtime_env/plugin.py @@ -31,6 +31,8 @@ class RuntimeEnvPlugin(ABC): def validate(runtime_env_dict: dict) -> None: """Validate user entry for this plugin. + The method is invoked upon installation of runtime env. + Args: runtime_env_dict: the user-supplied runtime environment dict. diff --git a/python/ray/_private/runtime_env/setup_hook.py b/python/ray/_private/runtime_env/setup_hook.py new file mode 100644 index 000000000000..135252dd4611 --- /dev/null +++ b/python/ray/_private/runtime_env/setup_hook.py @@ -0,0 +1,131 @@ +import traceback +import logging +import base64 +import os + +from typing import Dict, Any, Callable, Union, Optional + +import ray +import ray._private.ray_constants as ray_constants +import ray.cloudpickle as pickle +from ray.runtime_env import RuntimeEnv + +logger = logging.getLogger(__name__) + + +def get_import_export_timeout(): + return int( + os.environ.get(ray_constants.RAY_WORKER_SETUP_HOOK_LOAD_TIMEOUT_ENV_VAR, "60") + ) + + +def _decode_function_key(key: bytes) -> str: + return base64.b64encode(key).decode() + + +def _encode_function_key(key: str) -> bytes: + return base64.b64decode(key) + + +def upload_worker_setup_hook_if_needed( + runtime_env: Union[Dict[str, Any], RuntimeEnv], + worker: "ray.Worker", +) -> Union[Dict[str, Any], RuntimeEnv]: + """Uploads the worker_setup_hook to GCS with a key. + + runtime_env["worker_setup_hook"] is converted to a decoded key + that can load the worker setup hook function from GCS. + I.e., you can use internalKV.Get(runtime_env["worker_setup_hook]) + to access the worker setup hook from GCS. + + Args: + runtime_env: The runtime_env. The value will be modified + when returned. + worker: ray.worker instance. + decoder: GCS requires the function key to be bytes. However, + we cannot json serialize (which is required to serialize + runtime env) the bytes. So the key should be decoded to + a string. The given decoder is used to decode the function + key. + """ + setup_func = runtime_env.get("worker_setup_hook") + if setup_func is None: + return runtime_env + + if not isinstance(setup_func, Callable): + raise TypeError( + "worker_setup_hook must be a function, " f"got {type(setup_func)}." + ) + # TODO(sang): Support modules. + + try: + key = worker.function_actor_manager.export_setup_func( + setup_func, timeout=get_import_export_timeout() + ) + except Exception as e: + raise ray.exceptions.RuntimeEnvSetupError( + "Failed to export the setup function." + ) from e + env_vars = runtime_env.get("env_vars", {}) + assert ray_constants.WORKER_SETUP_HOOK_ENV_VAR not in env_vars, ( + f"The env var, {ray_constants.WORKER_SETUP_HOOK_ENV_VAR}, " + "is not permitted because it is reserved for the internal use." + ) + env_vars[ray_constants.WORKER_SETUP_HOOK_ENV_VAR] = _decode_function_key(key) + runtime_env["env_vars"] = env_vars + # Note: This field is no-op. We don't have a plugin for the setup hook + # because we can implement it simply using an env var. + # This field is just for the observability purpose, so we store + # the name of the method. + runtime_env["worker_setup_hook"] = setup_func.__name__ + return runtime_env + + +def load_and_execute_setup_hook( + worker_setup_hook_key: str, +) -> Optional[str]: + """Load the setup hook from a given key and execute. + + Args: + worker_setup_hook_key: The key to import the setup hook + from GCS. + Returns: + An error message if it fails. None if it succeeds. + """ + assert worker_setup_hook_key is not None + worker = ray._private.worker.global_worker + assert worker.connected + + func_manager = worker.function_actor_manager + try: + worker_setup_func_info = func_manager.fetch_registered_method( + _encode_function_key(worker_setup_hook_key), + timeout=get_import_export_timeout(), + ) + except Exception: + error_message = ( + "Failed to import setup hook within " + f"{get_import_export_timeout()} seconds.\n" + f"{traceback.format_exc()}" + ) + return error_message + + try: + setup_func = pickle.loads(worker_setup_func_info.function) + except Exception: + error_message = ( + "Failed to deserialize the setup hook method.\n" f"{traceback.format_exc()}" + ) + return error_message + + try: + setup_func() + except Exception: + error_message = ( + f"Failed to execute the setup hook method. Function name:" + f"{worker_setup_func_info.function_name}\n" + f"{traceback.format_exc()}" + ) + return error_message + + return None diff --git a/python/ray/_private/worker.py b/python/ray/_private/worker.py index 1bb275a2312e..69a8327173c9 100644 --- a/python/ray/_private/worker.py +++ b/python/ray/_private/worker.py @@ -79,6 +79,7 @@ from ray._private.runtime_env.constants import RAY_JOB_CONFIG_JSON_ENV_VAR from ray._private.runtime_env.py_modules import upload_py_modules_if_needed from ray._private.runtime_env.working_dir import upload_working_dir_if_needed +from ray._private.runtime_env.setup_hook import upload_worker_setup_hook_if_needed from ray._private.storage import _load_class from ray._private.utils import check_oversized_function, get_ray_doc_version from ray.exceptions import ObjectStoreFullError, RayError, RaySystemError, RayTaskError @@ -2173,6 +2174,10 @@ def connect( runtime_env = upload_working_dir_if_needed( runtime_env, scratch_dir, logger=logger ) + runtime_env = upload_worker_setup_hook_if_needed( + runtime_env, + worker, + ) # Remove excludes, it isn't relevant after the upload step. runtime_env.pop("excludes", None) job_config.set_runtime_env(runtime_env) diff --git a/python/ray/_private/workers/default_worker.py b/python/ray/_private/workers/default_worker.py index 937f45a8b85d..19fd801532c8 100644 --- a/python/ray/_private/workers/default_worker.py +++ b/python/ray/_private/workers/default_worker.py @@ -1,3 +1,4 @@ +import os import argparse import base64 import json @@ -10,6 +11,7 @@ import ray.actor from ray._private.parameter import RayParams from ray._private.ray_logging import configure_log_file, get_worker_log_file_name +from ray._private.runtime_env.setup_hook import load_and_execute_setup_hook parser = argparse.ArgumentParser( @@ -236,20 +238,29 @@ worker_launched_time_ms=worker_launched_time_ms, ) + worker = ray._private.worker.global_worker + # Setup log file. out_file, err_file = node.get_log_file_handles( get_worker_log_file_name(args.worker_type) ) configure_log_file(out_file, err_file) - ray._private.worker.global_worker.set_out_file(out_file) - ray._private.worker.global_worker.set_err_file(err_file) + worker.set_out_file(out_file) + worker.set_err_file(err_file) if mode == ray.WORKER_MODE and args.worker_preload_modules: module_names_to_import = args.worker_preload_modules.split(",") ray._private.utils.try_import_each_module(module_names_to_import) + # If the worker setup function is configured, run it. + worker_setup_hook_key = os.getenv(ray_constants.WORKER_SETUP_HOOK_ENV_VAR) + if worker_setup_hook_key: + error = load_and_execute_setup_hook(worker_setup_hook_key) + if error is not None: + worker.core_worker.exit_worker("system", error) + if mode == ray.WORKER_MODE: - ray._private.worker.global_worker.main_loop() + worker.main_loop() elif mode in [ray.RESTORE_WORKER_MODE, ray.SPILL_WORKER_MODE]: # It is handled by another thread in the C++ core worker. # We just need to keep the worker alive. diff --git a/python/ray/_raylet.pyx b/python/ray/_raylet.pyx index 25cdd67bbdb1..9be8234a0084 100644 --- a/python/ray/_raylet.pyx +++ b/python/ray/_raylet.pyx @@ -59,6 +59,7 @@ from ray.includes.common cimport ( CObjectReference, CLanguage, CObjectReference, + CWorkerExitType, CRayObject, CRayStatus, CErrorTableData, @@ -95,6 +96,10 @@ from ray.includes.common cimport ( PLACEMENT_STRATEGY_SPREAD, PLACEMENT_STRATEGY_STRICT_PACK, PLACEMENT_STRATEGY_STRICT_SPREAD, + WORKER_EXIT_TYPE_USER_ERROR, + WORKER_EXIT_TYPE_SYSTEM_ERROR, + kResourceUnitScaling, + kWorkerSetupHookKeyName, ) from ray.includes.unique_ids cimport ( CActorID, @@ -177,11 +182,6 @@ current_task_id_lock = threading.Lock() job_config_initialized = False job_config_initialization_lock = threading.Lock() -cdef extern from "ray/common/constants.h" nogil: - cdef int kResourceUnitScaling - -RESOURCE_UNIT_SCALING = kResourceUnitScaling - class ObjectRefGenerator: def __init__(self, refs): @@ -1886,15 +1886,16 @@ cdef class CoreWorker: self.cgname_to_eventloop_dict = None self.fd_to_cgname_dict = None self.eventloop_for_default_cg = None + self.current_runtime_env = None def shutdown(self): - with nogil: - # If it's a worker, the core worker process should have been - # shutdown. So we can't call - # `CCoreWorkerProcess.GetCoreWorker().GetWorkerType()` here. - # Instead, we use the cached `is_driver` flag to test if it's a - # driver. - if self.is_driver: + # If it's a worker, the core worker process should have been + # shutdown. So we can't call + # `CCoreWorkerProcess.GetCoreWorker().GetWorkerType()` here. + # Instead, we use the cached `is_driver` flag to test if it's a + # driver. + if self.is_driver: + with nogil: CCoreWorkerProcess.Shutdown() def notify_raylet(self): @@ -1905,6 +1906,28 @@ cdef class CoreWorker: with nogil: CCoreWorkerProcess.RunTaskExecutionLoop() + def exit_worker(self, exit_type: str, c_string detail): + """ + Exit the current worker process. This API should only be used by + a worker. If this API is called, the worker will finish currently + executing task, initiate the shutdown, and stop itself gracefully. + The given exit_type and detail will be reported to GCS, and any + worker failure error will contain them. + """ + cdef: + CWorkerExitType c_exit_type + cdef const shared_ptr[LocalMemoryBuffer] null_ptr + + if exit_type == "user": + c_exit_type = WORKER_EXIT_TYPE_USER_ERROR + if exit_type == "system": + c_exit_type = WORKER_EXIT_TYPE_SYSTEM_ERROR + else: + raise ValueError(f"Invalid exit type: {exit_type}") + assert not self.is_driver + with nogil: + CCoreWorkerProcess.GetCoreWorker().Exit(c_exit_type, detail, null_ptr) + def get_current_task_retry_exceptions(self): return CCoreWorkerProcess.GetCoreWorker( ).GetCurrentTaskRetryExceptions() diff --git a/python/ray/includes/common.pxd b/python/ray/includes/common.pxd index 4250470f3013..a06630fd2132 100644 --- a/python/ray/includes/common.pxd +++ b/python/ray/includes/common.pxd @@ -154,6 +154,8 @@ cdef extern from "src/ray/protobuf/common.pb.h" nogil: pass cdef cppclass CWorkerType "ray::core::WorkerType": pass + cdef cppclass CWorkerExitType "ray::rpc::WorkerExitType": + pass cdef cppclass CTaskType "ray::TaskType": pass cdef cppclass CPlacementStrategy "ray::core::PlacementStrategy": @@ -204,6 +206,8 @@ cdef extern from "src/ray/protobuf/common.pb.h" nogil: cdef CWorkerType WORKER_TYPE_SPILL_WORKER "ray::core::WorkerType::SPILL_WORKER" # noqa: E501 cdef CWorkerType WORKER_TYPE_RESTORE_WORKER "ray::core::WorkerType::RESTORE_WORKER" # noqa: E501 cdef CWorkerType WORKER_TYPE_UTIL_WORKER "ray::core::WorkerType::UTIL_WORKER" # noqa: E501 + cdef CWorkerExitType WORKER_EXIT_TYPE_USER_ERROR "ray::rpc::WorkerExitType::USER_ERROR" # noqa: E501 + cdef CWorkerExitType WORKER_EXIT_TYPE_SYSTEM_ERROR "ray::rpc::WorkerExitType::SYSTEM_ERROR" # noqa: E501 cdef extern from "src/ray/protobuf/common.pb.h" nogil: cdef CTaskType TASK_TYPE_NORMAL_TASK "ray::TaskType::NORMAL_TASK" @@ -421,3 +425,7 @@ cdef extern from "ray/common/task/task_spec.h" nogil: c_string GetName() const uint32_t GetMaxConcurrency() const c_vector[CFunctionDescriptor] GetFunctionDescriptors() const + +cdef extern from "ray/common/constants.h" nogil: + cdef const char[] kWorkerSetupHookKeyName + cdef int kResourceUnitScaling diff --git a/python/ray/includes/common.pxi b/python/ray/includes/common.pxi index d7c3c121bc69..ea402ded009e 100644 --- a/python/ray/includes/common.pxi +++ b/python/ray/includes/common.pxi @@ -7,6 +7,8 @@ from ray.includes.common cimport ( CGcsClientOptions, CPythonGcsClient, CPythonGcsPublisher, + kWorkerSetupHookKeyName, + kResourceUnitScaling, ) @@ -24,3 +26,7 @@ cdef class GcsClientOptions: cdef CGcsClientOptions* native(self): return (self.inner.get()) + + +WORKER_SETUP_HOOK_KEY_NAME_GCS = str(kWorkerSetupHookKeyName) +RESOURCE_UNIT_SCALING = kResourceUnitScaling diff --git a/python/ray/includes/libcoreworker.pxd b/python/ray/includes/libcoreworker.pxd index c847e2938628..42c17b8572ca 100644 --- a/python/ray/includes/libcoreworker.pxd +++ b/python/ray/includes/libcoreworker.pxd @@ -42,6 +42,7 @@ from ray.includes.common cimport ( CJobConfig, CConcurrencyGroup, CSchedulingStrategy, + CWorkerExitType, ) from ray.includes.function_descriptor cimport ( CFunctionDescriptor, @@ -270,6 +271,10 @@ cdef extern from "ray/core_worker/core_worker.h" nogil: void RecordTaskLogEnd(int64_t stdout_end_offset, int64_t stderr_end_offset) const + void Exit(const CWorkerExitType exit_type, + const c_string &detail, + const shared_ptr[LocalMemoryBuffer] &creation_task_exception_pb_bytes) + cdef cppclass CCoreWorkerOptions "ray::core::CoreWorkerOptions": CWorkerType worker_type CLanguage language diff --git a/python/ray/runtime_env/runtime_env.py b/python/ray/runtime_env/runtime_env.py index 32378b910840..e99072585688 100644 --- a/python/ray/runtime_env/runtime_env.py +++ b/python/ray/runtime_env/runtime_env.py @@ -3,7 +3,7 @@ import os from copy import deepcopy from dataclasses import asdict, is_dataclass -from typing import Any, Dict, List, Optional, Set, Tuple, Union +from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union import ray from ray._private.ray_constants import DEFAULT_RUNTIME_ENV_TIMEOUT_SECONDS @@ -235,6 +235,11 @@ class MyClass: The `run_options` list spec is here: https://docs.docker.com/engine/reference/run/ env_vars: Environment variables to set. + worker_setup_hook: The setup hook that's called after workers + start and before tasks and actors are scheduled. + The value has to be a callable when passed to the job/task/actor. + The callable is then exported and this value is converted to + the setup hook's function name for the observability purpose. config: config for runtime environment. Either a dict or a RuntimeEnvConfig. Field: (1) setup_timeout_seconds, the timeout of runtime environment creation, timeout is in seconds. @@ -258,6 +263,7 @@ class MyClass: # field which is not supported. We should remove it # with the test. "docker", + "worker_setup_hook", } extensions_fields: Set[str] = { @@ -275,6 +281,7 @@ def __init__( conda: Optional[Union[Dict[str, str], str]] = None, container: Optional[Dict[str, str]] = None, env_vars: Optional[Dict[str, str]] = None, + worker_setup_hook: Optional[Union[Callable, str]] = None, config: Optional[Union[Dict, RuntimeEnvConfig]] = None, _validate: bool = True, **kwargs, @@ -296,6 +303,8 @@ def __init__( runtime_env["env_vars"] = env_vars if config is not None: runtime_env["config"] = config + if worker_setup_hook is not None: + runtime_env["worker_setup_hook"] = worker_setup_hook if runtime_env.get("java_jars"): runtime_env["java_jars"] = runtime_env.get("java_jars") diff --git a/python/ray/tests/BUILD b/python/ray/tests/BUILD index 7b483064b550..8cb7a0beed60 100644 --- a/python/ray/tests/BUILD +++ b/python/ray/tests/BUILD @@ -151,6 +151,7 @@ py_test_module_list( "test_runtime_env_env_vars.py", "test_runtime_env_packaging.py", "test_runtime_env_plugin.py", + "test_runtime_env_setup_func.py", "test_runtime_env_strong_type.py", "test_runtime_env_fork_process.py", "test_serialization.py", diff --git a/python/ray/tests/test_runtime_env_setup_func.py b/python/ray/tests/test_runtime_env_setup_func.py new file mode 100644 index 000000000000..32c0cf88bb07 --- /dev/null +++ b/python/ray/tests/test_runtime_env_setup_func.py @@ -0,0 +1,152 @@ +import threading +import os +import sys +import logging + +import pytest + +import ray + + +def test_setup_func_basic(shutdown_only): + def configure_logging(level: int): + logger = logging.getLogger("") + logger.setLevel(level) + + ray.init( + num_cpus=1, + runtime_env={ + "worker_setup_hook": lambda: configure_logging(logging.DEBUG), + "env_vars": {"ABC": "123"}, + }, + ) + + @ray.remote + def f(level): + logger = logging.getLogger("") + assert logging.getLevelName(logger.getEffectiveLevel()) == level + return True + + @ray.remote + class Actor: + def __init__(self, level): + logger = logging.getLogger("") + assert logging.getLevelName(logger.getEffectiveLevel()) == level + + def ready(self): + return True + + def get_env_var(self, key): + return os.getenv(key) + + # Test basic. + for _ in range(10): + assert ray.get(f.remote("DEBUG")) + a = Actor.remote("DEBUG") + assert ray.get(a.__ray_ready__.remote()) + + # Make sure env var is not overwritten. + assert ray.get(a.get_env_var.remote("ABC")) == "123" + + # Test override. + # TODO(sang) + # ray.get( + # f.options( + # runtime_env={ + # "worker_setup_hook": lambda: configure_logging(logging.INFO)} + # ).remote("INFO")) + # a = Actor.optinos( + # runtime_env={"worker_setup_hook": lambda: configure_logging(logging.INFO)} + # ).remote("INFO") + # assert ray.get(a.__ray_ready__.remote()) + + +def test_setup_func_failure(shutdown_only): + """ + Verify when deserilization failed, it raises an exception. + """ + + class CustomClass: + """ + Custom class that can serialize but canont deserialize. + It is used to test deserialization failure. + """ + + def __getstate__(self): + # This method is called during serialization + return self.__dict__ + + def __setstate__(self, state): + # This method is called during deserialization + raise RuntimeError("Deserialization not allowed") + + c = CustomClass() + + def setup(): + print(c) + + ray.init( + num_cpus=1, + runtime_env={ + "worker_setup_hook": setup, + }, + ) + + @ray.remote + class A: + pass + + a = A.remote() + # TODO(sang): Maybe we should raise RuntimeEnvSetupError? + # It is pretty difficult now. See + # https://github.com/ray-project/ray/pull/34738#discussion_r1189553716 + with pytest.raises(ray.exceptions.RayActorError) as e: + ray.get(a.__ray_ready__.remote()) + assert "Deserialization not allowed" in str(e.value) + + """ + Verify when the serialization fails, ray.init fails. + """ + ray.shutdown() + lock = threading.Lock() + + with pytest.raises(ray.exceptions.RuntimeEnvSetupError) as e: + ray.init( + num_cpus=0, + runtime_env={ + "worker_setup_hook": lambda: print(lock), + }, + ) + assert "Failed to export the setup function." in str(e.value) + + """ + Verify when the setup hook failed, it raises an exception. + """ + ray.shutdown() + + def setup_func(): + raise ValueError("Setup Failed") + + ray.init( + num_cpus=1, + runtime_env={ + "worker_setup_hook": setup_func, + }, + ) + + @ray.remote + class A: + pass + + a = A.remote() + with pytest.raises(ray.exceptions.RayActorError) as e: + ray.get(a.__ray_ready__.remote()) + assert "Setup Failed" in str(e.value) + assert "Failed to execute the setup hook method." in str(e.value) + + +if __name__ == "__main__": + if os.environ.get("PARALLEL_CI"): + sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__])) + else: + sys.exit(pytest.main(["-sv", __file__])) diff --git a/src/ray/common/constants.h b/src/ray/common/constants.h index aa05020fd509..bf83ecc5189c 100644 --- a/src/ray/common/constants.h +++ b/src/ray/common/constants.h @@ -20,6 +20,8 @@ /// The precision of fractional resource quantity. constexpr int kResourceUnitScaling = 10000; +constexpr char kWorkerSetupHookKeyName[] = "FunctionsToRun"; + /// Length of Ray full-length IDs in bytes. constexpr size_t kUniqueIDSize = 28; diff --git a/src/ray/core_worker/core_worker.h b/src/ray/core_worker/core_worker.h index 55f89fae9ed6..7340e9ed4901 100644 --- a/src/ray/core_worker/core_worker.h +++ b/src/ray/core_worker/core_worker.h @@ -1161,6 +1161,20 @@ class CoreWorker : public rpc::CoreWorkerServiceHandler { /// \param stderr_end_offset End offset of the stderr for this task. void RecordTaskLogEnd(int64_t stdout_end_offset, int64_t stderr_end_offset) const; + /// (WORKER mode only) Gracefully exit the worker. `Graceful` means the worker will + /// exit when it drains all tasks and cleans all owned objects. + /// After this method is called, all the tasks in the queue will not be + /// executed. + /// + /// \param exit_type The reason why this worker process is disconnected. + /// \param exit_detail The detailed reason for a given exit. + /// \param creation_task_exception_pb_bytes It is given when the worker is + /// disconnected because the actor is failed due to its exception in its init method. + void Exit(const rpc::WorkerExitType exit_type, + const std::string &detail, + const std::shared_ptr &creation_task_exception_pb_bytes = + nullptr); + private: static json OverrideRuntimeEnv(json &child, const std::shared_ptr parent); @@ -1205,20 +1219,6 @@ class CoreWorker : public rpc::CoreWorkerServiceHandler { /// Run the io_service_ event loop. This should be called in a background thread. void RunIOService(); - /// (WORKER mode only) Gracefully exit the worker. `Graceful` means the worker will - /// exit when it drains all tasks and cleans all owned objects. - /// After this method is called, all the tasks in the queue will not be - /// executed. - /// - /// \param exit_type The reason why this worker process is disconnected. - /// \param exit_detail The detailed reason for a given exit. - /// \param creation_task_exception_pb_bytes It is given when the worker is - /// disconnected because the actor is failed due to its exception in its init method. - void Exit(const rpc::WorkerExitType exit_type, - const std::string &detail, - const std::shared_ptr &creation_task_exception_pb_bytes = - nullptr); - /// Forcefully exit the worker. `Force` means it will exit actor without draining /// or cleaning any resources. /// \param exit_type The reason why this worker process is disconnected. diff --git a/src/ray/gcs/gcs_server/gcs_function_manager.h b/src/ray/gcs/gcs_server/gcs_function_manager.h index 705958cf4a50..530b2dbb8ad9 100644 --- a/src/ray/gcs/gcs_server/gcs_function_manager.h +++ b/src/ray/gcs/gcs_server/gcs_function_manager.h @@ -14,6 +14,7 @@ #pragma once #include "absl/container/flat_hash_map.h" +#include "ray/common/constants.h" #include "ray/gcs/gcs_server/gcs_kv_manager.h" namespace ray { @@ -48,7 +49,10 @@ class GcsFunctionManager { kv_.Del("fun", "IsolatedExports:" + job_id_hex + ":", true, nullptr); kv_.Del("fun", "RemoteFunction:" + job_id_hex + ":", true, nullptr); kv_.Del("fun", "ActorClass:" + job_id_hex + ":", true, nullptr); - kv_.Del("fun", "FunctionsToRun:" + job_id_hex + ":", true, nullptr); + kv_.Del("fun", + std::string(kWorkerSetupHookKeyName) + ":" + job_id_hex + ":", + true, + nullptr); } // Handler for internal KV From db7d2f0298120f59be825a1131e51aa5cd9196f5 Mon Sep 17 00:00:00 2001 From: Sihan Wang Date: Mon, 15 May 2023 08:45:10 -0700 Subject: [PATCH 382/424] [serve] Log to files in JSON format by default (#35118) Update the logging format to json format. Used for better parsing and log search. - User can set `SERVE_JSONIFY_LOG_MESSAGE` to jsonify the log message. - Stream log doesn't have effect with this change. controller log file ``` {"levelname": "INFO", "asctime": "2023-05-07 17:22:58,360", "component_name": "controller", "component_id": "3525674", "message": "http_state.py:129 - Starting HTTP proxy with name 'SERVE_CONTROLLER_ACTOR:SERVE_PROXY_ACTOR-9e1688af72409c6ffaf805b05c397632cfb4eb7acf0703468e8e3535' on node '9e1688af72409c6ffaf805b05c397632cfb4eb7acf0703468e8e3535' listening on '127.0.0.1:8000'"} {"levelname": "INFO", "asctime": "2023-05-07 17:22:59,252", "component_name": "controller", "component_id": "3525674", "message": "deployment_state.py:1220 - Deploying new version of deployment app_testv2."} {"levelname": "INFO", "asctime": "2023-05-07 17:22:59,282", "component_name": "controller", "component_id": "3525674", "message": "deployment_state.py:1459 - Adding 2 replicas to deployment app_testv2."} {"levelname": "INFO", "asctime": "2023-05-07 17:22:59,283", "component_name": "controller", "component_id": "3525674", "message": "deployment_state.py:330 - Starting replica app_testv2#QZdGDm for deployment app_testv2."} {"levelname": "INFO", "asctime": "2023-05-07 17:22:59,297", "component_name": "controller", "component_id": "3525674", "message": "deployment_state.py:330 - Starting replica app_testv2#jorUqs for deployment app_testv2."} {"levelname": "INFO", "asctime": "2023-05-07 17:23:00,217", "component_name": "controller", "component_id": "3525674", "message": "deployment_state.py:1615 - Replica app_testv2#QZdGDm started successfully."} {"levelname": "INFO", "asctime": "2023-05-07 17:23:00,217", "component_name": "controller", "component_id": "3525674", "message": "deployment_state.py:1615 - Replica app_testv2#jorUqs started successfully."} {"levelname": "INFO", "asctime": "2023-05-07 17:23:00,270", "component_name": "controller", "component_id": "3525674", "message": "deployment_state.py:1220 - Deploying new version of deployment app2_testv2."} {"levelname": "INFO", "asctime": "2023-05-07 17:23:00,319", "component_name": "controller", "component_id": "3525674", "message": "deployment_state.py:1459 - Adding 2 replicas to deployment app2_testv2."} {"levelname": "INFO", "asctime": "2023-05-07 17:23:00,319", "component_name": "controller", "component_id": "3525674", "message": "deployment_state.py:330 - Starting replica app2_testv2#lIYdIP for deployment app2_testv2."} {"levelname": "INFO", "asctime": "2023-05-07 17:23:00,332", "component_name": "controller", "component_id": "3525674", "message": "deployment_state.py:330 - Starting replica app2_testv2#FxSRtl for deployment app2_testv2."} {"levelname": "INFO", "asctime": "2023-05-07 17:23:01,254", "component_name": "controller", "component_id": "3525674", "message": "deployment_state.py:1615 - Replica app2_testv2#lIYdIP started successfully."} {"levelname": "INFO", "asctime": "2023-05-07 17:23:01,255", "component_name": "controller", "component_id": "3525674", "message": "deployment_state.py:1615 - Replica app2_testv2#FxSRtl started successfully."} ``` http proxy ``` {"levelname": "INFO", "asctime": "2023-05-07 17:22:59,242", "component_name": "http_proxy", "component_id": "b'172.31.5.229'", "message": "http_proxy.py:185 - Got updated endpoints: {}."} {"levelname": "INFO", "asctime": "2023-05-07 17:22:59,253", "component_name": "http_proxy", "component_id": "b'172.31.5.229'", "message": "http_proxy.py:185 - Got updated endpoints: {'app_testv2': EndpointInfo(route='/app1', app_name='app')}."} {"levelname": "INFO", "asctime": "2023-05-07 17:23:00,272", "component_name": "http_proxy", "component_id": "b'172.31.5.229'", "message": "http_proxy.py:185 - Got updated endpoints: {'app_testv2': EndpointInfo(route='/app1', app_name='app'), 'app2_testv2': EndpointInfo(route='/app2', app_name='app2')}."} {"levelname": "INFO", "asctime": "2023-05-07 17:23:06,895", "component_name": "http_proxy", "component_id": "b'172.31.5.229'", "request_id": "rzqadzRWFP", "route": "/app1", "app_name": "app", "message": "http_proxy.py:435 - GET 200 4.8ms"} {"levelname": "INFO", "asctime": "2023-05-07 17:23:08,168", "component_name": "http_proxy", "component_id": "b'172.31.5.229'", "request_id": "hYjyoiHTPJ", "route": "/app2", "app_name": "app2", "message": "http_proxy.py:435 - GET 200 4.6ms"} {"levelname": "INFO", "asctime": "2023-05-07 17:23:32,596", "component_name": "http_proxy", "component_id": "b'172.31.5.229'", "message": "http_proxy.py:185 - Got updated endpoints: {'app_testv2': EndpointInfo(route='/app1', app_name='app'), 'app2_testv2': EndpointInfo(route='/app2', app_name='app2')}."} {"levelname": "INFO", "asctime": "2023-05-07 17:24:02,716", "component_name": "http_proxy", "component_id": "b'172.31.5.229'", "message": "http_proxy.py:185 - Got updated endpoints: {'app_testv2': EndpointInfo(route='/app1', app_name='app'), 'app2_testv2': EndpointInfo(route='/app2', app_name='app2')}."} {"levelname": "INFO", "asctime": "2023-05-07 17:24:35,044", "component_name": "http_proxy", "component_id": "b'172.31.5.229'", "message": "http_proxy.py:185 - Got updated endpoints: {'app_testv2': EndpointInfo(route='/app1', app_name='app'), 'app2_testv2': EndpointInfo(route='/app2', app_name='app2')}."} ``` deployment: ``` {"levelname": "INFO", "asctime": "2023-05-12 17:38:00,698", "deployment": "app2_Model", "replica": "app2_Model#XlFWYc", "request_id": "OfbLbdKgjT", "route": "/class_method", "application": "app2", "message": "replica.py:440 - Started executing request OfbLbdKgjT"} {"levelname": "INFO", "asctime": "2023-05-12 17:38:00,699", "deployment": "app2_Model", "replica": "app2_Model#XlFWYc", "request_id": "OfbLbdKgjT", "route": "/class_method", "application": "app2", "message": "test_logging.py:200 - user log message from class method"} {"levelname": "INFO", "asctime": "2023-05-12 17:38:00,699", "deployment": "app2_Model", "replica": "app2_Model#XlFWYc", "request_id": "OfbLbdKgjT", "route": "/class_method", "application": "app2", "message": "replica.py:537 - __CALL__ OK 0.6ms"} ``` --- python/ray/serve/_private/common.py | 4 + python/ray/serve/_private/constants.py | 25 ++++ python/ray/serve/_private/logging_utils.py | 144 +++++++++++++++++---- python/ray/serve/_private/replica.py | 10 +- python/ray/serve/tests/test_logging.py | 124 +++++++++++++++++- 5 files changed, 274 insertions(+), 33 deletions(-) diff --git a/python/ray/serve/_private/common.py b/python/ray/serve/_private/common.py index 986071739061..e2e0954eba62 100644 --- a/python/ray/serve/_private/common.py +++ b/python/ray/serve/_private/common.py @@ -373,3 +373,7 @@ class HTTPProxyStatus(str, Enum): STARTING = "STARTING" HEALTHY = "HEALTHY" UNHEALTHY = "UNHEALTHY" + + +class ServeComponentType(str, Enum): + DEPLOYMENT = "deployment" diff --git a/python/ray/serve/_private/constants.py b/python/ray/serve/_private/constants.py index ce7a4cfa78e0..c92ae0937947 100644 --- a/python/ray/serve/_private/constants.py +++ b/python/ray/serve/_private/constants.py @@ -160,3 +160,28 @@ class ServeHandleType(str, Enum): "Please see the documentation for ServeDeploySchema for more details on multi-app " "config files." ) + +# Jsonify the log messages +RAY_SERVE_ENABLE_JSON_LOGGING = os.environ.get("RAY_SERVE_ENABLE_JSON_LOGGING") == "1" +# Logging format attributes +SERVE_LOG_REQUEST_ID = "request_id" +SERVE_LOG_ROUTE = "route" +SERVE_LOG_APPLICATION = "application" +SERVE_LOG_DEPLOYMENT = "deployment" +SERVE_LOG_REPLICA = "replica" +SERVE_LOG_COMPONENT = "component_name" +SERVE_LOG_COMPONENT_ID = "component_id" +SERVE_LOG_MESSAGE = "message" +# This is a reserved for python logging module attribute, it should not be changed. +SERVE_LOG_LEVEL_NAME = "levelname" +SERVE_LOG_TIME = "asctime" + +# Logging format with record key to format string dict +SERVE_LOG_RECORD_FORMAT = { + SERVE_LOG_REQUEST_ID: "%(request_id)s", + SERVE_LOG_ROUTE: "%(route)s", + SERVE_LOG_APPLICATION: "%(application)s", + SERVE_LOG_MESSAGE: "%(filename)s:%(lineno)d - %(message)s", + SERVE_LOG_LEVEL_NAME: "%(levelname)s", + SERVE_LOG_TIME: "%(asctime)s", +} diff --git a/python/ray/serve/_private/logging_utils.py b/python/ray/serve/_private/logging_utils.py index 81066603f6ef..9a96fcbf47e1 100644 --- a/python/ray/serve/_private/logging_utils.py +++ b/python/ray/serve/_private/logging_utils.py @@ -1,18 +1,84 @@ import logging import os from typing import Optional +import json +import copy import ray -from ray.serve._private.constants import DEBUG_LOG_ENV_VAR, SERVE_LOGGER_NAME +from ray.serve._private.constants import ( + DEBUG_LOG_ENV_VAR, + SERVE_LOGGER_NAME, + RAY_SERVE_ENABLE_JSON_LOGGING, + SERVE_LOG_RECORD_FORMAT, + SERVE_LOG_REQUEST_ID, + SERVE_LOG_ROUTE, + SERVE_LOG_APPLICATION, + SERVE_LOG_MESSAGE, + SERVE_LOG_DEPLOYMENT, + SERVE_LOG_COMPONENT, + SERVE_LOG_COMPONENT_ID, + SERVE_LOG_TIME, + SERVE_LOG_LEVEL_NAME, + SERVE_LOG_REPLICA, +) +from ray.serve._private.common import ServeComponentType LOG_FILE_FMT = "{component_name}_{component_id}.log" -COMPONENT_LOG_FMT = ( - "%(levelname)s %(asctime)s {component_name} {component_id} " # noqa:E501 -) -MESSAGE_FMT = "%(filename)s:%(lineno)d - %(message)s" -REQUEST_ID_FMT = "%(request_id)s " -ROUTE_FMT = "%(route)s " + + +class ServeJSONFormatter(logging.Formatter): + """Serve Logging Json Formatter + + The formatter will generate the json log format on the fly + based on the field of record. + """ + + def __init__( + self, + component_name: str, + component_id: str, + component_type: Optional[ServeComponentType] = None, + ): + self.component_log_fmt = { + SERVE_LOG_LEVEL_NAME: SERVE_LOG_RECORD_FORMAT[SERVE_LOG_LEVEL_NAME], + SERVE_LOG_TIME: SERVE_LOG_RECORD_FORMAT[SERVE_LOG_TIME], + } + if component_type and component_type == ServeComponentType.DEPLOYMENT: + self.component_log_fmt[SERVE_LOG_DEPLOYMENT] = component_name + self.component_log_fmt[SERVE_LOG_REPLICA] = component_id + else: + self.component_log_fmt[SERVE_LOG_COMPONENT] = component_name + self.component_log_fmt[SERVE_LOG_COMPONENT_ID] = component_id + + def format(self, record: logging.LogRecord) -> str: + """Format the log record into json format. + + Args: + record: The log record to be formatted. + + Returns: + The formatted log record in json format. + """ + record_format = copy.deepcopy(self.component_log_fmt) + if SERVE_LOG_REQUEST_ID in record.__dict__: + record_format[SERVE_LOG_REQUEST_ID] = SERVE_LOG_RECORD_FORMAT[ + SERVE_LOG_REQUEST_ID + ] + if SERVE_LOG_ROUTE in record.__dict__: + record_format[SERVE_LOG_ROUTE] = SERVE_LOG_RECORD_FORMAT[SERVE_LOG_ROUTE] + if SERVE_LOG_APPLICATION in record.__dict__: + record_format[SERVE_LOG_APPLICATION] = SERVE_LOG_RECORD_FORMAT[ + SERVE_LOG_APPLICATION + ] + + record_format[SERVE_LOG_MESSAGE] = SERVE_LOG_RECORD_FORMAT[SERVE_LOG_MESSAGE] + + # create a formatter using the format string + formatter = logging.Formatter(json.dumps(record_format)) + + # format the log record using the formatter + return formatter.format(record) class ServeFormatter(logging.Formatter): @@ -21,22 +87,39 @@ class ServeFormatter(logging.Formatter): The formatter will generate the log format on the fly based on the field of record. """ - def __init__(self, component_name: str, component_id: str): - self.component_log_fmt = COMPONENT_LOG_FMT.format( + COMPONENT_LOG_FMT = f"%({SERVE_LOG_LEVEL_NAME})s %({SERVE_LOG_TIME})s {{{SERVE_LOG_COMPONENT}}} {{{SERVE_LOG_COMPONENT_ID}}} " # noqa:E501 + + def __init__( + self, + component_name: str, + component_id: str, + ): + self.component_log_fmt = ServeFormatter.COMPONENT_LOG_FMT.format( component_name=component_name, component_id=component_id ) - def format(self, record): - # generate a format string based on the record field. - cur_format = self.component_log_fmt - if "request_id" in record.__dict__: - cur_format += REQUEST_ID_FMT - if "route" in record.__dict__: - cur_format += ROUTE_FMT - cur_format += MESSAGE_FMT + def format(self, record: logging.LogRecord) -> str: + """Format the log record into the format string. + + Args: + record: The log record to be formatted. + + Returns: + The formatted log record in string format. + """ + record_format = self.component_log_fmt + record_formats_attrs = [] + if SERVE_LOG_REQUEST_ID in record.__dict__: + record_formats_attrs.append(SERVE_LOG_RECORD_FORMAT[SERVE_LOG_REQUEST_ID]) + if SERVE_LOG_ROUTE in record.__dict__: + record_formats_attrs.append(SERVE_LOG_RECORD_FORMAT[SERVE_LOG_ROUTE]) + if SERVE_LOG_APPLICATION in record.__dict__: + record_formats_attrs.append(SERVE_LOG_RECORD_FORMAT[SERVE_LOG_APPLICATION]) + record_formats_attrs.append(SERVE_LOG_RECORD_FORMAT[SERVE_LOG_MESSAGE]) + record_format += " ".join(record_formats_attrs) # create a formatter using the format string - formatter = logging.Formatter(cur_format) + formatter = logging.Formatter(record_format) # format the log record using the formatter return formatter.format(record) @@ -75,7 +158,7 @@ def configure_component_logger( *, component_name: str, component_id: str, - component_type: Optional[str] = None, + component_type: Optional[ServeComponentType] = None, log_level: int = logging.INFO, max_bytes: Optional[int] = None, backup_count: Optional[int] = None, @@ -99,9 +182,11 @@ def record_factory(*args, **kwargs): request_context = ray.serve.context._serve_request_context.get() record = factory(*args, **kwargs) if request_context.route: - record.route = request_context.route + setattr(record, SERVE_LOG_ROUTE, request_context.route) if request_context.request_id: - record.request_id = request_context.request_id + setattr(record, SERVE_LOG_REQUEST_ID, request_context.request_id) + if request_context.app_name: + setattr(record, SERVE_LOG_APPLICATION, request_context.app_name) return record logging.setLogRecordFactory(record_factory) @@ -119,17 +204,28 @@ def record_factory(*args, **kwargs): max_bytes = ray._private.worker._global_node.max_bytes if backup_count is None: backup_count = ray._private.worker._global_node.backup_count + + # For DEPLOYMENT component type, we want to log the deployment name + # instead of adding the component type to the component name. + component_log_file_name = component_name if component_type is not None: - component_name = f"{component_type}_{component_name}" + component_log_file_name = f"{component_type}_{component_name}" + if component_type != ServeComponentType.DEPLOYMENT: + component_name = f"{component_type}_{component_name}" log_file_name = LOG_FILE_FMT.format( - component_name=component_name, component_id=component_id + component_name=component_log_file_name, component_id=component_id ) file_handler = logging.handlers.RotatingFileHandler( os.path.join(logs_dir, log_file_name), maxBytes=max_bytes, backupCount=backup_count, ) - file_handler.setFormatter(ServeFormatter(component_name, component_id)) + if RAY_SERVE_ENABLE_JSON_LOGGING: + file_handler.setFormatter( + ServeJSONFormatter(component_name, component_id, component_type) + ) + else: + file_handler.setFormatter(ServeFormatter(component_name, component_id)) logger.addHandler(file_handler) diff --git a/python/ray/serve/_private/replica.py b/python/ray/serve/_private/replica.py index e1b1c8461eed..6717b0ee75ae 100644 --- a/python/ray/serve/_private/replica.py +++ b/python/ray/serve/_private/replica.py @@ -19,7 +19,11 @@ from ray._private.async_compat import sync_to_async from ray.serve._private.autoscaling_metrics import start_metrics_pusher -from ray.serve._private.common import HEALTH_CHECK_CONCURRENCY_GROUP, ReplicaTag +from ray.serve._private.common import ( + HEALTH_CHECK_CONCURRENCY_GROUP, + ReplicaTag, + ServeComponentType, +) from ray.serve.config import DeploymentConfig from ray.serve._private.constants import ( HEALTH_CHECK_METHOD, @@ -76,7 +80,7 @@ async def __init__( app_name: str = None, ): configure_component_logger( - component_type="deployment", + component_type=ServeComponentType.DEPLOYMENT, component_name=deployment_name, component_id=replica_tag, ) @@ -521,7 +525,7 @@ async def handle_request(self, request: Query) -> asyncio.Future: # handle can pass the correct request context to subsequent replicas. ray.serve.context._serve_request_context.set( ray.serve.context.RequestContext( - request.metadata.route, request.metadata.request_id + request.metadata.route, request.metadata.request_id, self.app_name ) ) diff --git a/python/ray/serve/tests/test_logging.py b/python/ray/serve/tests/test_logging.py index 561dceaa28b2..44e4f79ba9d7 100644 --- a/python/ray/serve/tests/test_logging.py +++ b/python/ray/serve/tests/test_logging.py @@ -7,11 +7,21 @@ import requests import starlette import pytest +import json import ray from ray import serve -from ray._private.test_utils import wait_for_condition import re +from ray.serve._private.logging_utils import ServeJSONFormatter +from ray.serve._private.common import ServeComponentType +from ray._private.test_utils import wait_for_condition + + +@pytest.fixture +def serve_and_ray_shutdown(): + serve.shutdown() + ray.shutdown() + yield def set_logging_config(monkeypatch, max_bytes, backup_count): @@ -156,9 +166,20 @@ def __call__(self, *args): assert replica_tag not in f.getvalue() -def test_context_information_in_logging(serve_instance): +@pytest.mark.parametrize("json_log_format", [False, True]) +def test_context_information_in_logging(serve_and_ray_shutdown, json_log_format): """Make sure all context information exist in the log message""" + if json_log_format: + serve_json_log_format = "1" + else: + serve_json_log_format = "0" + ray.init( + runtime_env={ + "env_vars": {"RAY_SERVE_ENABLE_JSON_LOGGING": serve_json_log_format} + } + ) + logger = logging.getLogger("ray.serve") @serve.deployment @@ -168,6 +189,9 @@ def fn(*args): return { "request_id": request_context.request_id, "route": request_context.route, + "app_name": request_context.app_name, + "log_file": logger.handlers[1].baseFilename, + "replica": serve.get_replica_context().replica_tag, } @serve.deployment @@ -178,6 +202,9 @@ def __call__(self, req: starlette.requests.Request): return { "request_id": request_context.request_id, "route": request_context.route, + "app_name": request_context.app_name, + "log_file": logger.handlers[1].baseFilename, + "replica": serve.get_replica_context().replica_tag, } serve.run(fn.bind(), name="app1", route_prefix="/fn") @@ -190,14 +217,14 @@ def __call__(self, req: starlette.requests.Request): # Check the component log expected_log_infos = [ - f"{resp['request_id']} {resp['route']} replica.py", - f"{resp2['request_id']} {resp2['route']} replica.py", + f"{resp['request_id']} {resp['route']} {resp['app_name']} replica.py", + f"{resp2['request_id']} {resp2['route']} {resp2['app_name']} replica.py", ] # Check User log user_log_regexes = [ - f".*{resp['request_id']} {resp['route']}.* user func.*", - f".*{resp2['request_id']} {resp2['route']}.* user log " + f".*{resp['request_id']} {resp['route']} {resp['app_name']}.* user func.*", + f".*{resp2['request_id']} {resp2['route']} {resp2['app_name']}.* user log " "message from class method.*", ] @@ -213,8 +240,93 @@ def check_log(): for regex in user_log_regexes: assert re.findall(regex, logs_content) != [] + # Check stream log check_log() + # Check user log file + if json_log_format: + user_method_log_regexes = [ + f'.*"deployment": "app1_fn", ' + f'"replica": "{resp["replica"]}", ' + f'"request_id": "{resp["request_id"]}", ' + f'"route": "{resp["route"]}", ' + f'"application": "{resp["app_name"]}", "message":.* user func.*', + ] + user_class_method_log_regexes = [ + f'.*"deployment": "app2_Model", ' + f'"replica": "{resp2["replica"]}", ' + f'"request_id": "{resp2["request_id"]}", ' + f'"route": "{resp2["route"]}", ' + f'"application": "{resp2["app_name"]}", "message":.* user log ' + "message from class method.*", + ] + else: + user_method_log_regexes = [ + f".*{resp['request_id']} {resp['route']} {resp['app_name']}.* " + f"user func.*", + ] + user_class_method_log_regexes = [ + f".*{resp2['request_id']} {resp2['route']} {resp2['app_name']}.* " + f"user log message from class method.*", + ] + + def check_log_file(log_file: str, expected_regex: list): + with open(log_file, "r") as f: + s = f.read() + for regex in expected_regex: + assert re.findall(regex, s) != [] + + check_log_file(resp["log_file"], user_method_log_regexes) + check_log_file(resp2["log_file"], user_class_method_log_regexes) + + +@pytest.mark.parametrize("is_deployment_type_component", [False, True]) +def test_json_log_formatter(is_deployment_type_component): + """Test the json log formatter""" + + if is_deployment_type_component: + component_type = ServeComponentType.DEPLOYMENT + formatter = ServeJSONFormatter("component", "component_id", component_type) + else: + formatter = ServeJSONFormatter("component", "component_id") + init_kwargs = { + "name": "test_log", + "level": logging.DEBUG, + "pathname": "my_path", + "lineno": 1, + "msg": "my_message", + "args": (), + "exc_info": None, + } + record = logging.LogRecord(**init_kwargs) + + def format_and_verify_json_output(record, expected_record: dict): + formatted_record = formatter.format(record) + formatted_record_dict = json.loads(formatted_record) + for key in expected_record: + assert key in formatted_record_dict + assert formatted_record_dict[key] == expected_record[key] + + expected_json = {} + if is_deployment_type_component: + expected_json["deployment"] = "component" + expected_json["replica"] = "component_id" + + # Set request id + record.request_id = "request_id" + expected_json["request_id"] = "request_id" + format_and_verify_json_output(record, expected_json) + + # Set route + record.route = "route" + expected_json["route"] = "route" + format_and_verify_json_output(record, expected_json) + + # set application + record.application = "application" + expected_json["application"] = "application" + format_and_verify_json_output(record, expected_json) + if __name__ == "__main__": sys.exit(pytest.main(["-v", "-s", __file__])) From 0554594be76a2f8a095120e45c819985ec9fe268 Mon Sep 17 00:00:00 2001 From: Chen Shen Date: Mon, 15 May 2023 10:21:20 -0700 Subject: [PATCH 383/424] [autoscaler v2][4/n] introducing node-provider and node-provider-config (#34983) Why are these changes needed? this is the stack of PRs to introduce new node_provider for autoscaler v2. Stack of PRs #34976 #34977 #34979 #34983 <- this PR #34985 This PR introduces node provider where instance manager can allocates instances from. Implementation wise, it's a wrapper around the v1 node provider, node launcher and node updater --- .../autoscaler/v2/instance_manager/config.py | 102 +++++++++++++ .../v2/instance_manager/node_provider.py | 136 ++++++++++++++++++ .../v2/instance_manager/ray_installer.py | 66 +++++++++ 3 files changed, 304 insertions(+) create mode 100644 python/ray/autoscaler/v2/instance_manager/config.py create mode 100644 python/ray/autoscaler/v2/instance_manager/node_provider.py create mode 100644 python/ray/autoscaler/v2/instance_manager/ray_installer.py diff --git a/python/ray/autoscaler/v2/instance_manager/config.py b/python/ray/autoscaler/v2/instance_manager/config.py new file mode 100644 index 000000000000..48f81237e206 --- /dev/null +++ b/python/ray/autoscaler/v2/instance_manager/config.py @@ -0,0 +1,102 @@ +import copy +from typing import Any, Dict, List + +from ray.autoscaler._private.util import hash_runtime_conf +from ray.core.generated.instance_manager_pb2 import Instance + + +class NodeProviderConfig(object): + """ + NodeProviderConfig is the helper class to provide instance + related configs. + """ + + def __init__(self, node_configs: Dict[str, Any]) -> None: + self._sync_continuously = False + self.update_configs(node_configs) + + def update_configs(self, node_configs: Dict[str, Any]) -> None: + self._node_configs = node_configs + self._calculate_hashes() + self._sync_continuously = self._node_configs.get( + "generate_file_mounts_contents_hash", True + ) + + def _calculate_hashes(self) -> None: + self._runtime_hash, self._file_mounts_contents_hash = hash_runtime_conf( + self._node_configs["file_mounts"], + self._node_configs["cluster_synced_files"], + [ + self._node_configs["worker_setup_commands"], + self._node_configs["worker_start_ray_commands"], + ], + generate_file_mounts_contents_hash=self._node_configs.get( + "generate_file_mounts_contents_hash", True + ), + ) + + def get_node_config(self, instance_type_name: str) -> Dict[str, Any]: + return copy.deepcopy( + self._node_configs["available_node_types"][instance_type_name][ + "node_config" + ] + ) + + def get_docker_config(self, instance_type_name: str) -> Dict[str, Any]: + if "docker" not in self._node_configs: + return {} + docker_config = copy.deepcopy(self._node_configs.get("docker", {})) + node_specific_docker_config = self._node_configs["available_node_types"][ + instance_type_name + ].get("docker", {}) + docker_config.update(node_specific_docker_config) + return docker_config + + def get_worker_start_ray_commands(self, instance: Instance) -> List[str]: + if ( + instance.num_successful_updates > 0 + and not self._node_config_provider.restart_only + ): + return [] + return self._node_configs["worker_start_ray_commands"] + + def get_worker_setup_commands(self, instance: Instance) -> List[str]: + if ( + instance.num_successful_updates > 0 + and self._node_config_provider.restart_only + ): + return [] + + return self._node_configs["available_node_types"][instance.name][ + "worker_setup_commands" + ] + + def get_node_type_specific_config( + self, instance_type_name: str, config_name: str + ) -> Any: + config = self._node_config_provider.get_config(config_name) + node_specific_config = self._node_configs["available_node_types"][ + instance_type_name + ] + if config_name in node_specific_config: + config = node_specific_config[config_name] + return config + + def get_config(self, config_name, default=None) -> Any: + return self._node_configs.get(config_name, default) + + @property + def restart_only(self) -> bool: + return self._node_configs.get("restart_only", False) + + @property + def no_restart(self) -> bool: + return self._node_configs.get("no_restart", False) + + @property + def runtime_hash(self) -> str: + return self._runtime_hash + + @property + def file_mounts_contents_hash(self) -> str: + return self._file_mounts_contents_hash diff --git a/python/ray/autoscaler/v2/instance_manager/node_provider.py b/python/ray/autoscaler/v2/instance_manager/node_provider.py new file mode 100644 index 000000000000..6d0a3c92c9c0 --- /dev/null +++ b/python/ray/autoscaler/v2/instance_manager/node_provider.py @@ -0,0 +1,136 @@ +import logging +from abc import ABCMeta, abstractmethod +from typing import Dict, List, Set, override + +from ray.autoscaler._private.node_launcher import BaseNodeLauncher +from ray.autoscaler.node_provider import NodeProvider as NodeProviderV1 +from ray.autoscaler.tags import TAG_RAY_USER_NODE_TYPE +from ray.autoscaler.v2.instance_manager.config import NodeProviderConfig +from ray.core.generated.instance_manager_pb2 import Instance, InstanceType + +logger = logging.getLogger(__name__) + + +class NodeProvider(metaclass=ABCMeta): + """NodeProvider defines the interface for + interacting with cloud provider, such as AWS, GCP, Azure, etc. + """ + + @abstractmethod + def create_nodes(self, instance_type: InstanceType, count: int) -> List[str]: + """Create new nodes synchronously, returns all non-terminated nodes in the cluster. + Note that create_nodes could fail partially. + """ + pass + + @abstractmethod + def async_terminate_nodes(self, cloud_instance_ids: List[str]) -> None: + """ + Terminate nodes asynchronously, returns immediately.""" + pass + + @abstractmethod + def get_non_terminated_nodes( + self, + ) -> Dict[str, Instance]: + """Get all non-terminated nodes in the cluster""" + pass + + @abstractmethod + def get_nodes_by_cloud_id( + self, + cloud_instance_ids: List[str], + ) -> Dict[str, Instance]: + """Get nodes by node ids, including terminated nodes""" + pass + + @abstractmethod + def is_readonly(self) -> bool: + return False + + +class NodeProviderAdapter(NodeProvider): + """ + Warps a NodeProviderV1 to a NodeProvider. + """ + + def __init__( + self, + provider: NodeProviderV1, + node_launcher: BaseNodeLauncher, + instance_config_provider: NodeProviderConfig, + ) -> None: + super().__init__() + self._provider = provider + self._node_launcher = node_launcher + self._config = instance_config_provider + + def _filter_instances( + self, + instances: Dict[str, Instance], + instance_ids_filter: Set[str], + instance_states_filter: Set[int], + ) -> Dict[str, Instance]: + filtered = {} + for instance_id, instance in instances.items(): + if instance_ids_filter and instance_id not in instance_ids_filter: + continue + if instance_states_filter and instance.state not in instance_states_filter: + continue + filtered[instance_id] = instance + return filtered + + @override + def create_nodes(self, instance_type: InstanceType, count: int) -> List[Instance]: + result = self._node_launcher.launch_node( + self._config.get_node_config(instance_type.name), + count, + instance_type.name, + ) + # TODO: we should handle failures where the instance type is + # not available + if result: + return [ + self._get_instance(cloud_instance_id) + for cloud_instance_id in result.keys() + ] + return [] + + @override + def async_terminate_nodes(self, clould_instance_ids: List[str]) -> None: + self._provider.terminate_node(clould_instance_ids) + + @override + def is_readonly(self) -> bool: + return self._provider.is_readonly() + + @override + def get_non_terminated_nodes(self): + clould_instance_ids = self._provider.non_terminated_nodes() + return self.get_nodes_by_id(clould_instance_ids) + + @override + def get_nodes_by_cloud_id( + self, + cloud_instance_ids: List[str], + ) -> Dict[str, Instance]: + instances = {} + for cloud_instance_id in cloud_instance_ids: + instances[cloud_instance_id] = self._get_instance(cloud_instance_id) + return instances + + def _get_instance(self, cloud_instance_id: str) -> Instance: + instance = Instance() + instance.cloud_instance_id = cloud_instance_id + if self._provider.is_running(cloud_instance_id): + instance.state = Instance.STARTING + elif self._provider.is_terminated(cloud_instance_id): + instance.state = Instance.STOPPED + else: + instance.state = Instance.INSTANCE_STATUS_UNSPECIFIED + instance.interal_ip = self._provider.internal_ip(cloud_instance_id) + instance.external_ip = self._provider.external_ip(cloud_instance_id) + instance.instance_type = self._provider.node_tags(cloud_instance_id)[ + TAG_RAY_USER_NODE_TYPE + ] + return instance diff --git a/python/ray/autoscaler/v2/instance_manager/ray_installer.py b/python/ray/autoscaler/v2/instance_manager/ray_installer.py new file mode 100644 index 000000000000..f4f936cbe2b8 --- /dev/null +++ b/python/ray/autoscaler/v2/instance_manager/ray_installer.py @@ -0,0 +1,66 @@ +import logging + +from ray.autoscaler._private.updater import NodeUpdater +from ray.autoscaler._private.util import with_head_node_ip +from ray.autoscaler.node_provider import NodeProvider as NodeProviderV1 +from ray.autoscaler.v2.instance_manager.config import NodeProviderConfig +from ray.core.generated.instance_manager_pb2 import Instance + +logger = logging.getLogger(__name__) + + +class RayInstaller(object): + """ + RayInstaller is responsible for installing ray on the target instance. + """ + + def __init__( + self, + provider: NodeProviderV1, + config: NodeProviderConfig, + ) -> None: + self._provider = provider + self._config = config + + def install_ray(self, instance: Instance, head_node_ip: str) -> bool: + """ + Install ray on the target instance synchronously. + """ + + setup_commands = self._config.get_worker_setup_commands(instance) + ray_start_commands = self._config.get_worker_start_ray_commands(instance) + docker_config = self._config.get_docker_config(instance) + + logger.info( + f"Creating new (spawn_updater) updater thread for node" + f" {instance.cloud_instance_id}." + ) + updater = NodeUpdater( + node_id=instance.instance_id, + provider_config=self._config.get_config("provider"), + provider=self._provider, + auth_config=self._config.get_config("auth"), + cluster_name=self._config.get_config("cluster_name"), + file_mounts=self._config.get_config("file_mounts"), + initialization_commands=with_head_node_ip( + self.get_node_type_specific_config( + instance.instance_id, "initialization_commands" + ), + head_node_ip, + ), + setup_commands=with_head_node_ip(setup_commands, head_node_ip), + ray_start_commands=with_head_node_ip(ray_start_commands, head_node_ip), + runtime_hash=self._config.runtime_hash, + file_mounts_contents_hash=self._config.file_mounts_contents_hash, + is_head_node=False, + cluster_synced_files=self._config.get_config("cluster_synced_files"), + rsync_options={ + "rsync_exclude": self._config.get_config("rsync_exclude"), + "rsync_filter": self._config.get_config("rsync_filter"), + }, + use_internal_ip=True, + docker_config=docker_config, + node_resources=instance.node_resources, + ) + updater.run() + # TODO: handle failures From 779e5aa34f3127382d20de12411d6fd511b9735a Mon Sep 17 00:00:00 2001 From: Max Pumperla Date: Mon, 15 May 2023 19:37:38 +0200 Subject: [PATCH 384/424] [docs] fix map_batches ActorPoolStrategy ref (#35331) Signed-off-by: Max Pumperla --- python/ray/data/dataset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/ray/data/dataset.py b/python/ray/data/dataset.py index 88ba111615ea..ab690f398003 100644 --- a/python/ray/data/dataset.py +++ b/python/ray/data/dataset.py @@ -456,7 +456,7 @@ def map_batches( per worker instead of once per inference. To transform batches with :ref:`actors `, pass a callable type - to ``fn`` and specify an :class:`~ray.data.ActorPoolStrategy>`. + to ``fn`` and specify an :class:`~ray.data.ActorPoolStrategy`. In the example below, ``CachedModel`` is called on an autoscaling pool of two to eight :ref:`actors `, each allocated one GPU by Ray. From 3f48f737cfad1284d9f18b80736a6585dd210778 Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Mon, 15 May 2023 21:56:25 +0200 Subject: [PATCH 385/424] [ci/github] Track external code changes (blogs, tutorials) (#35261) This adds a GitHub workflow to track changes to code we're using in external sources (e.g. blog posts or other repositories). If a change to a tracked file is detected in a PR, a comment is added calling out the changed file and the URI of the external resource where it's being used. Also, the label `external-code-affected` is added. In subsequent pushes to the PR, if the changes to tracked files change, the comment is updated. This will enable us to update external sources better. It is very easy to miss subtle changes, especially when a lot of files are changed at the same time. In result some of our external blog posts or tutorials are outdated and use stale APIs. With a comment and label, we can proactively update external sources or filter for them after a new ray release and update in batch. Example PR + bot interaction: https://github.com/ray-project/ray/pull/35263 Signed-off-by: Kai Fricke --- .github/workflows/external-code-affected.yml | 127 +++++++++++++++++++ ci/lint/format.sh | 4 +- doc/external/external_code.txt | 6 + 3 files changed, 136 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/external-code-affected.yml create mode 100644 doc/external/external_code.txt diff --git a/.github/workflows/external-code-affected.yml b/.github/workflows/external-code-affected.yml new file mode 100644 index 000000000000..268e88d452f8 --- /dev/null +++ b/.github/workflows/external-code-affected.yml @@ -0,0 +1,127 @@ +# Check if code checked into external resources (blogs, tutorials) +# that we also track in our CI is affected by a PR. +# In that case, we add a label to the PR (`external-code-affected`) and +# add a comment to make sure that the external code still works and is +# eventually updated. +name: External code check + +on: pull_request_target + +jobs: + check-changes: + permissions: write-all + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v2 + with: + ref: ${{ github.event.pull_request.head.sha }} + fetch-depth: 0 + + - name: Check for changes in tracked files + run: | + set -xe + git clone https://github.com/ray-project/buildkite-ci-pipelines.git ./pipelines + + # Find changed files + GIT_DIFF=$(git diff --name-only ${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }}) + + echo "All changed files:" + echo "$GIT_DIFF" + + GIT_DIFF_SERIALIZED=$(echo "$GIT_DIFF" | tr '\n' '|') + echo "GIT_DIFF_SERIALIZED=$GIT_DIFF_SERIALIZED" >> $GITHUB_ENV + + - name: Add label and comment if a tracked file changed + uses: actions/github-script@v5 + with: + github-token: ${{secrets.GITHUB_TOKEN}} + script: | + const { + deserializeIntoArray, + filterFilesByNames, + getCommentContentChanged, + getCommentContentNotChanged, + parseTrackedFilesToURIs, + readFileContent + } = require('./pipelines/external_code_tracker/track_code'); + + const fs = require("fs"); + + const commentHeader = `## Attention: External code changed` + const externalCodeFile = "doc/external/external_code.txt" + + // Get existing comments + const existingComments = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number + }); + + // Find comment by the bot that starts with the header + let commentToUpdate = existingComments.data.find(comment => + comment.user.login === 'github-actions[bot]' && comment.body.startsWith(commentHeader) + ); + + // Read and parse external_code.txt file + let externCodeFileContent = fs.readFileSync(externalCodeFile, "utf8"); + let trackedFilesToURIs = parseTrackedFilesToURIs(externCodeFileContent); + + console.log("trackedFileToURIs"); + console.log(trackedFilesToURIs); + + // Get changed files from environment variable + let changedFiles = await deserializeIntoArray(process.env.GIT_DIFF_SERIALIZED) + + console.log("changedFiles"); + console.log(changedFiles); + + // Filter associative array + let changedFileToURIs = filterFilesByNames(trackedFilesToURIs, changedFiles); + + console.log("changedFileToURIs"); + console.log(changedFileToURIs); + console.log(changedFileToURIs.length); + + if (Object.keys(changedFileToURIs).length === 0) { + console.log("No changes to tracked files detected"); + commentBody = getCommentContentNotChanged(commentHeader); + if (commentToUpdate && commentBody !== commentToUpdate.body) { + await github.rest.issues.updateComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: commentToUpdate.id, + body: commentBody + }); + } + } else { + console.log("Changes to tracked files detected"); + commentBody = getCommentContentChanged(commentHeader, changedFileToURIs); + + if (commentToUpdate) { + // Only update if content changed + if (commentBody !== commentToUpdate.body) { + await github.rest.issues.updateComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: commentToUpdate.id, + body: commentBody + }); + } + } else { + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + body: commentBody + }); + } + await github.rest.issues.addLabels({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + labels: ['external-code-affected'] + }); + + } + diff --git a/ci/lint/format.sh b/ci/lint/format.sh index 7dfc6be0dafe..309cdfa52c42 100755 --- a/ci/lint/format.sh +++ b/ci/lint/format.sh @@ -155,12 +155,14 @@ BLACK_EXCLUDES=( `'python/ray/core/src/ray/gcs/*|'` `'python/ray/thirdparty_files/*|'` `'python/ray/_private/thirdparty/*|'` - `'python/ray/serve/tests/test_config_files/syntax_error\.py' + `'python/ray/serve/tests/test_config_files/syntax_error\.py|'` + `'doc/external/*' ) GIT_LS_EXCLUDES=( ':(exclude)python/ray/cloudpickle/' ':(exclude)python/ray/_private/runtime_env/_clonevirtualenv.py' + ':(exclude)doc/external/' ) JAVA_EXCLUDES=( diff --git a/doc/external/external_code.txt b/doc/external/external_code.txt new file mode 100644 index 000000000000..7eec59ed3a7a --- /dev/null +++ b/doc/external/external_code.txt @@ -0,0 +1,6 @@ +# Mapping from file to external URI. +# If the file is touched in a PR, a comment is posted +# by a bot to remind the user to update the contents at +# the external URI + +# Please keep this as the last line. From 957f51cbba5576137a73544697caa497c1abcbbf Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Mon, 15 May 2023 12:57:53 -0700 Subject: [PATCH 386/424] [Data] Improve compute validation error (#35234) We repeat the validation code six times. I've abstracted the validation into a function to avoid code duplication. I've also fully qualified ActorPoolStrategy in the error message, so users don't need to search how to import it. --------- Signed-off-by: Balaji Veeramani --- .../data/_internal/execution/legacy_compat.py | 12 ++--- .../data/_internal/planner/plan_udf_map_op.py | 9 +--- python/ray/data/_internal/util.py | 20 +++++++- python/ray/data/dataset.py | 48 ++----------------- 4 files changed, 29 insertions(+), 60 deletions(-) diff --git a/python/ray/data/_internal/execution/legacy_compat.py b/python/ray/data/_internal/execution/legacy_compat.py index 4d5098f57b77..907c368a2ffa 100644 --- a/python/ray/data/_internal/execution/legacy_compat.py +++ b/python/ray/data/_internal/execution/legacy_compat.py @@ -11,7 +11,7 @@ from ray.data._internal.logical.util import record_operators_usage from ray.data.context import DataContext from ray.types import ObjectRef -from ray.data.block import Block, BlockMetadata, List +from ray.data.block import Block, BlockMetadata, CallableClass, List from ray.data.datasource import ReadTask from ray.data._internal.stats import StatsDict, DatasetStats from ray.data._internal.stage_impl import ( @@ -22,8 +22,6 @@ from ray.data._internal.lazy_block_list import LazyBlockList from ray.data._internal.compute import ( get_compute, - CallableClass, - TaskPoolStrategy, ActorPoolStrategy, ) from ray.data._internal.memory_tracing import trace_allocation @@ -38,6 +36,7 @@ RefBundle, TaskContext, ) +from ray.data._internal.util import validate_compute from ray.data._internal.execution.util import make_callable_class_concurrent # Warn about tasks larger than this. @@ -267,16 +266,11 @@ def _stage_to_operator(stage: Stage, input_op: PhysicalOperator) -> PhysicalOper if isinstance(stage, OneToOneStage): compute = get_compute(stage.compute) + validate_compute(stage.fn, compute) block_fn = stage.block_fn if stage.fn: if isinstance(stage.fn, CallableClass): - if isinstance(compute, TaskPoolStrategy): - raise ValueError( - "``compute`` must be specified when using a callable class, " - "and must specify the actor compute strategy. " - "For example, use ``compute=ActorPoolStrategy(size=n)``." - ) assert isinstance(compute, ActorPoolStrategy) fn_constructor_args = stage.fn_constructor_args or () diff --git a/python/ray/data/_internal/planner/plan_udf_map_op.py b/python/ray/data/_internal/planner/plan_udf_map_op.py index 31f931d251f2..e3427443b571 100644 --- a/python/ray/data/_internal/planner/plan_udf_map_op.py +++ b/python/ray/data/_internal/planner/plan_udf_map_op.py @@ -3,7 +3,6 @@ import ray from ray.data._internal.compute import ( ActorPoolStrategy, - TaskPoolStrategy, get_compute, ) from ray.data._internal.execution.interfaces import PhysicalOperator, TaskContext @@ -20,6 +19,7 @@ from ray.data._internal.planner.flat_map import generate_flat_map_fn from ray.data._internal.planner.map_batches import generate_map_batches_fn from ray.data._internal.planner.map_rows import generate_map_rows_fn +from ray.data._internal.util import validate_compute from ray.data.block import Block, CallableClass @@ -47,14 +47,9 @@ def _plan_udf_map_op( raise ValueError(f"Found unknown logical operator during planning: {op}") compute = get_compute(op._compute) + validate_compute(op._fn, compute) if isinstance(op._fn, CallableClass): - if isinstance(compute, TaskPoolStrategy): - raise ValueError( - "``compute`` must be specified when using a callable class, " - "and must specify the actor compute strategy. " - "For example, use ``compute=ActorPoolStrategy(size=n)``." - ) assert isinstance(compute, ActorPoolStrategy) fn_constructor_args = op._fn_constructor_args or () diff --git a/python/ray/data/_internal/util.py b/python/ray/data/_internal/util.py index 9751fea8009d..d6a7efce4ff6 100644 --- a/python/ray/data/_internal/util.py +++ b/python/ray/data/_internal/util.py @@ -18,7 +18,8 @@ from ray.util.placement_group import PlacementGroup import pyarrow import pandas - from ray.data.block import Block, BlockMetadata + from ray.data._internal.compute import ComputeStrategy + from ray.data.block import Block, BlockMetadata, UserDefinedFunction logger = logging.getLogger(__name__) @@ -411,6 +412,23 @@ def _split_list(arr: List[Any], num_splits: int) -> List[List[Any]]: return splits +def validate_compute( + fn: "UserDefinedFunction", compute: Optional[Union[str, "ComputeStrategy"]] +) -> None: + # Lazily import these objects to avoid circular imports. + from ray.data._internal.compute import TaskPoolStrategy + from ray.data.block import CallableClass + + if isinstance(fn, CallableClass) and ( + compute is None or compute == "tasks" or isinstance(compute, TaskPoolStrategy) + ): + raise ValueError( + "``compute`` must be specified when using a CallableClass, and must " + f"specify the actor compute strategy, but got: {compute}. " + "For example, use ``compute=ray.data.ActorPoolStrategy(size=n)``." + ) + + def capfirst(s: str): """Capitalize the first letter of a string diff --git a/python/ray/data/dataset.py b/python/ray/data/dataset.py index ab690f398003..6cda9efc8be8 100644 --- a/python/ray/data/dataset.py +++ b/python/ray/data/dataset.py @@ -71,6 +71,7 @@ from ray.data._internal.util import ( _estimate_available_parallelism, _is_local_scheme, + validate_compute, ConsumptionAPI, ) from ray.data._internal.pandas_block import PandasBlockSchema @@ -347,17 +348,7 @@ def map( Call this method to transform batches of data. It's faster and more flexible than :meth:`~Dataset.map` and :meth:`~Dataset.flat_map`. """ - if isinstance(fn, CallableClass) and ( - compute is None - or compute == "tasks" - or isinstance(compute, TaskPoolStrategy) - ): - raise ValueError( - "``compute`` must be specified when using a CallableClass, and must " - f"specify the actor compute strategy, but got: {compute}. " - "For example, use ``compute=ActorPoolStrategy(size=n)``." - ) - + validate_compute(fn, compute) self._warn_slow() transform_fn = generate_map_rows_fn() @@ -571,16 +562,7 @@ def map_batches( f"{batch_format}" ) - if isinstance(fn, CallableClass) and ( - compute is None - or compute == "tasks" - or isinstance(compute, TaskPoolStrategy) - ): - raise ValueError( - "``compute`` must be specified when using a CallableClass, and must " - f"specify the actor compute strategy, but got: {compute}. " - "For example, use ``compute=ActorPoolStrategy(size=n)``." - ) + validate_compute(fn, compute) if fn_constructor_args is not None or fn_constructor_kwargs is not None: if compute is None or ( @@ -829,17 +811,7 @@ def flat_map( This method isn't recommended because it's slow; call :meth:`~Dataset.map_batches` instead. """ - if isinstance(fn, CallableClass) and ( - compute is None - or compute == "tasks" - or isinstance(compute, TaskPoolStrategy) - ): - raise ValueError( - "``compute`` must be specified when using a CallableClass, and must " - f"specify the actor compute strategy, but got: {compute}. " - "For example, use ``compute=ActorPoolStrategy(size=n)``." - ) - + validate_compute(fn, compute) self._warn_slow() transform_fn = generate_flat_map_fn() @@ -891,17 +863,7 @@ def filter( ray_remote_args: Additional resource requirements to request from ray (e.g., num_gpus=1 to request GPUs for the map tasks). """ - if isinstance(fn, CallableClass) and ( - compute is None - or compute == "tasks" - or isinstance(compute, TaskPoolStrategy) - ): - raise ValueError( - "``compute`` must be specified when using a CallableClass, and must " - f"specify the actor compute strategy, but got: {compute}. " - "For example, use ``compute=ActorPoolStrategy(size=n)``." - ) - + validate_compute(fn, compute) self._warn_slow() transform_fn = generate_filter_fn() From b438c7a6b377090749694a8b4cb924daca88bf08 Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Mon, 15 May 2023 12:58:57 -0700 Subject: [PATCH 387/424] [Data] Fix inference release test (#35339) 0785e97 broke the inference release test. Since the release test is two years old, I've decided to rewrite the test altogether. --------- Signed-off-by: Balaji Veeramani --- release/nightly_tests/dataset/inference.py | 138 ++++++--------------- 1 file changed, 41 insertions(+), 97 deletions(-) diff --git a/release/nightly_tests/dataset/inference.py b/release/nightly_tests/dataset/inference.py index fc4b49b68264..4534c86a854e 100644 --- a/release/nightly_tests/dataset/inference.py +++ b/release/nightly_tests/dataset/inference.py @@ -1,126 +1,70 @@ -from io import BytesIO -from PIL import Image +import json +import os +import time +from typing import Any, Dict +import numpy as np import torch from torchvision import transforms from torchvision.models import resnet50 import ray -import boto3 -import json -import time -import os -from tqdm import tqdm -import numpy as np - - -class Preprocessor: - def __init__(self): - self.torch_transform = transforms.Compose( - [ - transforms.Resize(224), - transforms.CenterCrop(224), - transforms.ToTensor(), - transforms.Lambda(lambda t: t[:3, ...]), # remove alpha channel - transforms.Normalize( - mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] - ), - ] - ) - - def __call__(self, img_bytes): - try: - img = Image.open(BytesIO(img_bytes)).convert("RGB") - tensor = self.torch_transform(img) - return tensor - except Exception as e: - raise e -class ImageModel: +class ImageClassifier: def __init__(self): self.model = resnet50(pretrained=True).eval().half().cuda() - def __call__(self, input_tensor_np): - input_tensor = torch.from_numpy(input_tensor_np).half().cuda() + def __call__(self, batch: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: + inputs = torch.from_numpy(batch["image"]).half().cuda() with torch.no_grad(): - output_tensor = self.model(input_tensor) - result = torch.argmax(output_tensor, dim=1).cpu() - return result.numpy() - - -def get_paths(bucket, path, max_files=100 * 1000): - s3 = boto3.resource("s3") - s3_objects = s3.Bucket(bucket).objects.filter(Prefix=path).limit(max_files).all() - materialized = [(obj.bucket_name, obj.key) for obj in tqdm(s3_objects)] - return materialized - - -def preprocess(batch): - preprocessor = Preprocessor() - return {"bytes": preprocessor(batch["bytes"])} - - -infer_initialized = False -model_fn = None - + outputs = self.model(inputs) + predictions = torch.argmax(outputs, dim=1).cpu() + batch["predictions"] = predictions + return batch + + +transform = transforms.Compose( + [ + transforms.ToTensor(), + transforms.Resize(224), + transforms.CenterCrop(224), + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), + ] +) -def infer(batch): - global infer_initialized, model_fn - if not infer_initialized: - infer_initialized = True - model_fn = ImageModel() - ndarr_obj = batch.values - input_tensor_np = np.array([img.numpy() for img in ndarr_obj.reshape(-1)]) - return {"out": list(model_fn(input_tensor_np))} +def preprocess(record: Dict[str, Any]) -> Dict[str, Any]: + record["image"] = transform(record["image"]) + return record -ray.init() start_time = time.time() -print("Downloading...") -ds = ray.data.read_binary_files( - "s3://anyscale-data/small-images/", - parallelism=1000, - ray_remote_args={"num_cpus": 0.5}, +ds = ( + ray.data.read_images( + "s3://anyscale-data/small-images/", + parallelism=1000, + ray_remote_args={"num_cpus": 0.5}, + mode="RGB", + ) + .map(preprocess) + .map_batches( + ImageClassifier, + num_gpus=0.25, + batch_size=128, + compute=ray.data.ActorPoolStrategy(), + ) + .materialize() ) -# Do a blocking map so that we can measure the download time. -ds = ds.map(lambda x: x).materialize() - -end_download_time = time.time() -print("Preprocessing...") -ds = ds.map(preprocess).materialize() -end_preprocess_time = time.time() -print("Inferring...") -# NOTE: set a small batch size to avoid OOM on GRAM when doing inference. -ds = ds.map_batches( - infer, - num_gpus=0.25, - batch_size=128, - batch_format="pandas", - compute=ray.data.ActorPoolStrategy(), -).materialize() - -end_time = time.time() - -download_time = end_download_time - start_time -preprocess_time = end_preprocess_time - end_download_time -infer_time = end_time - end_preprocess_time -total_time = end_time - start_time -print("Download time", download_time) -print("Preprocess time", preprocess_time) -print("Infer time", infer_time) +total_time = time.time() - start_time print("total time", total_time) if "TEST_OUTPUT_JSON" in os.environ: out_file = open(os.environ["TEST_OUTPUT_JSON"], "w") results = { - "download_time": download_time, - "preprocess_time": preprocess_time, - "inference_time": infer_time, "total_time": total_time, } json.dump(results, out_file) From 64c94d1c2fecd801e7b8ec91007555aae62f5ff2 Mon Sep 17 00:00:00 2001 From: Balaji Veeramani Date: Mon, 15 May 2023 12:59:53 -0700 Subject: [PATCH 388/424] [Data] Improve `Schema` representation (#35278) The current representation doesn't make it clear that the keys represent column names. This can be especially confusing when your dataset contains one column (e.g., Schema({'text': DataType(string)})) --------- Signed-off-by: Balaji Veeramani --- doc/source/data/loading-data.rst | 4 ++- python/ray/data/dataset.py | 22 +++++++++++++--- python/ray/data/tests/test_consumption.py | 23 +++++++++++++++++ python/ray/data/tests/test_strict_mode.py | 31 +++++++++++------------ 4 files changed, 59 insertions(+), 21 deletions(-) diff --git a/doc/source/data/loading-data.rst b/doc/source/data/loading-data.rst index 8f005e7b6b7f..0c6684f6a3e6 100644 --- a/doc/source/data/loading-data.rst +++ b/doc/source/data/loading-data.rst @@ -37,7 +37,9 @@ Generating Synthetic Data >>> import ray >>> ds = ray.data.range_tensor(100 * 64 * 64, shape=(64, 64)) >>> ds.schema() - Schema({'data': numpy.ndarray(shape=(64, 64), dtype=int64)}) + Column Type + ------ ---- + data numpy.ndarray(shape=(64, 64), dtype=int64) >>> ds.show(1) {'data': array([[0, 0, 0, ..., 0, 0, 0], [0, 0, 0, ..., 0, 0, 0], diff --git a/python/ray/data/dataset.py b/python/ray/data/dataset.py index 6cda9efc8be8..b84fc2ad703e 100644 --- a/python/ray/data/dataset.py +++ b/python/ray/data/dataset.py @@ -4430,11 +4430,25 @@ def types(self) -> List[Union[Literal[object], "pyarrow.DataType"]]: def __eq__(self, other): return isinstance(other, Schema) and other.base_schema == self.base_schema - def __str__(self): - return f"Schema({dict(zip(self.names, self.types))})" - def __repr__(self): - return str(self) + column_width = max([len(name) for name in self.names] + [len("Column")]) + padding = 2 + + output = "Column" + output += " " * ((column_width + padding) - len("Column")) + output += "Type\n" + + output += "-" * len("Column") + output += " " * ((column_width + padding) - len("Column")) + output += "-" * len("Type") + "\n" + + for name, type in zip(self.names, self.types): + output += name + output += " " * ((column_width + padding) - len(name)) + output += f"{type}\n" + + output = output.rstrip() + return output def _get_size_bytes(block: Block) -> int: diff --git a/python/ray/data/tests/test_consumption.py b/python/ray/data/tests/test_consumption.py index 25b979175eae..18d5605e9d28 100644 --- a/python/ray/data/tests/test_consumption.py +++ b/python/ray/data/tests/test_consumption.py @@ -249,6 +249,29 @@ def test_schema_lazy(ray_start_regular_shared): assert ds._plan.execute()._num_computed() == 0 +def test_schema_repr(ray_start_regular_shared): + ds = ray.data.from_items([{"text": "spam", "number": 0}]) + # fmt: off + expected_repr = ( + "Column Type\n" + "------ ----\n" + "text string\n" + "number int64" + ) + # fmt:on + assert repr(ds.schema()) == expected_repr + + ds = ray.data.from_items([{"long_column_name": "spam"}]) + # fmt: off + expected_repr = ( + "Column Type\n" + "------ ----\n" + "long_column_name string" + ) + # fmt: on + assert repr(ds.schema()) == expected_repr + + def test_count_lazy(ray_start_regular_shared): ds = ray.data.range(100, parallelism=10) # We do not kick off the read task by default. diff --git a/python/ray/data/tests/test_strict_mode.py b/python/ray/data/tests/test_strict_mode.py index 30cc5966ee30..c81a25cd9162 100644 --- a/python/ray/data/tests/test_strict_mode.py +++ b/python/ray/data/tests/test_strict_mode.py @@ -183,38 +183,37 @@ def test_strict_compute(ray_start_regular_shared, enable_strict_mode): def test_strict_schema(ray_start_regular_shared, enable_strict_mode): - import pyarrow + import pyarrow as pa + from ray.data.extensions.tensor_extension import ArrowTensorType from ray.data._internal.pandas_block import PandasBlockSchema ds = ray.data.from_items([{"x": 2}]) schema = ds.schema() - assert isinstance(schema.base_schema, pyarrow.lib.Schema) - assert str(schema) == "Schema({'x': DataType(int64)})" + assert isinstance(schema.base_schema, pa.lib.Schema) + assert schema.names == ["x"] + assert schema.types == [pa.int64()] ds = ray.data.from_items([{"x": 2, "y": [1, 2]}]) schema = ds.schema() - assert isinstance(schema.base_schema, pyarrow.lib.Schema) - assert ( - str(schema) - == "Schema({'x': DataType(int64), 'y': ListType(list)})" - ) + assert isinstance(schema.base_schema, pa.lib.Schema) + assert schema.names == ["x", "y"] + assert schema.types == [pa.int64(), pa.list_(pa.int64())] ds = ray.data.from_items([{"x": 2, "y": object(), "z": [1, 2]}]) schema = ds.schema() - assert isinstance(schema.base_schema, PandasBlockSchema) - assert str(schema) == ( - "Schema({'x': DataType(int64), 'y': " - ", 'z': })" - ) + assert schema.names == ["x", "y", "z"] + assert schema.types == [pa.int64(), object, object] ds = ray.data.from_numpy(np.ones((100, 10))) schema = ds.schema() - assert isinstance(schema.base_schema, pyarrow.lib.Schema) - assert str(schema) == "Schema({'data': numpy.ndarray(shape=(10,), dtype=double)})" + assert isinstance(schema.base_schema, pa.lib.Schema) + assert schema.names == ["data"] + assert schema.types == [ArrowTensorType(shape=(10,), dtype=pa.float64())] schema = ds.map_batches(lambda x: x, batch_format="pandas").schema() - assert str(schema) == "Schema({'data': numpy.ndarray(shape=(10,), dtype=double)})" assert isinstance(schema.base_schema, PandasBlockSchema) + assert schema.names == ["data"] + assert schema.types == [ArrowTensorType(shape=(10,), dtype=pa.float64())] def test_use_raw_dicts(ray_start_regular_shared, enable_strict_mode): From bf97ca80914207d37eb3dac3609f44947af49cf3 Mon Sep 17 00:00:00 2001 From: Jiajun Yao Date: Mon, 15 May 2023 13:56:51 -0700 Subject: [PATCH 389/424] During GCS restarts, grpc based resource broadcaster should only add ALIVE nodes during initialization (#35349) During GCS restarts, grpc based resource broadcaster should only add ALIVE nodes during initialization. Otherwise it will keep broadcasting messages to dead nodes after restart. Signed-off-by: Jiajun Yao --- src/ray/gcs/gcs_server/gcs_resource_report_poller.cc | 4 +++- src/ray/gcs/gcs_server/grpc_based_resource_broadcaster.cc | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/ray/gcs/gcs_server/gcs_resource_report_poller.cc b/src/ray/gcs/gcs_server/gcs_resource_report_poller.cc index 9fa2a3efe14d..a1a7fcbd4a48 100644 --- a/src/ray/gcs/gcs_server/gcs_resource_report_poller.cc +++ b/src/ray/gcs/gcs_server/gcs_resource_report_poller.cc @@ -39,7 +39,9 @@ GcsResourceReportPoller::~GcsResourceReportPoller() { Stop(); } void GcsResourceReportPoller::Initialize(const GcsInitData &gcs_init_data) { for (const auto &pair : gcs_init_data.Nodes()) { - HandleNodeAdded(pair.second); + if (pair.second.state() == rpc::GcsNodeInfo::ALIVE) { + HandleNodeAdded(pair.second); + } } } diff --git a/src/ray/gcs/gcs_server/grpc_based_resource_broadcaster.cc b/src/ray/gcs/gcs_server/grpc_based_resource_broadcaster.cc index 5dc8bf3cefcd..e62c1a0825c5 100644 --- a/src/ray/gcs/gcs_server/grpc_based_resource_broadcaster.cc +++ b/src/ray/gcs/gcs_server/grpc_based_resource_broadcaster.cc @@ -36,7 +36,9 @@ GrpcBasedResourceBroadcaster::~GrpcBasedResourceBroadcaster() {} void GrpcBasedResourceBroadcaster::Initialize(const GcsInitData &gcs_init_data) { for (const auto &pair : gcs_init_data.Nodes()) { - HandleNodeAdded(pair.second); + if (pair.second.state() == rpc::GcsNodeInfo::ALIVE) { + HandleNodeAdded(pair.second); + } } } From d7daaf7774fa66d2fbf63bc0df4671da98e6452a Mon Sep 17 00:00:00 2001 From: Amog Kamsetty Date: Mon, 15 May 2023 15:05:12 -0700 Subject: [PATCH 390/424] [Data] Improve docstring and warning message for `from_huggingface` (#35206) Corrects return type hint, add docstring example, and log warning message for from_huggingface, per confusions from user feedback. --------- Signed-off-by: amogkam --- doc/requirements-doc.txt | 1 + python/ray/data/read_api.py | 71 +++++++++++++++++++++++++++++-------- 2 files changed, 58 insertions(+), 14 deletions(-) diff --git a/doc/requirements-doc.txt b/doc/requirements-doc.txt index 05156ea26c2a..8ea8d767da4e 100644 --- a/doc/requirements-doc.txt +++ b/doc/requirements-doc.txt @@ -5,6 +5,7 @@ accelerate>=0.17.0 click colorama colorful +datasets # Newer versions of fairscale do not support Python 3.6 even though they still have wheels for it. # Have to manually pin it: https://github.com/facebookresearch/fairscale/issues/962 fairscale; python_version >= '3.7' diff --git a/python/ray/data/read_api.py b/python/ray/data/read_api.py index 384c8998d60e..3cf4649c61fe 100644 --- a/python/ray/data/read_api.py +++ b/python/ray/data/read_api.py @@ -267,11 +267,12 @@ def range_tensor(n: int, *, shape: Tuple = (1,), parallelism: int = -1) -> Datas Examples: >>> import ray >>> ds = ray.data.range_tensor(1000, shape=(2, 2)) - >>> ds # doctest: +ellipsis + >>> ds # doctest: +ELLIPSIS Dataset( - num_blocks=..., - num_rows=1000, - schema={data: numpy.ndarray(shape=(2, 2), dtype=int64)}) + num_blocks=..., + num_rows=1000, + schema={data: numpy.ndarray(shape=(2, 2), dtype=int64)} + ) >>> ds.map_batches(lambda arr: arr * 2).take(2) # doctest: +SKIP [array([[0, 0], [0, 0]]), @@ -855,8 +856,8 @@ def read_json( from file paths. If your data adheres to a different partitioning scheme, set the ``partitioning`` parameter. - >>> ds = ray.data.read_json("example://year=2022/month=09/sales.json") # doctest: + SKIP - >>> ds.take(1) # doctest: + SKIP + >>> ds = ray.data.read_json("example://year=2022/month=09/sales.json") # doctest: +SKIP + >>> ds.take(1) # doctest: +SKIP [{'order_number': 10107, 'quantity': 30, 'year': '2022', 'month': '09'} Args: @@ -950,8 +951,8 @@ def read_csv( from file paths. If your data adheres to a different partitioning scheme, set the ``partitioning`` parameter. - >>> ds = ray.data.read_csv("example://year=2022/month=09/sales.csv") # doctest: + SKIP - >>> ds.take(1) # doctest: + SKIP + >>> ds = ray.data.read_csv("example://year=2022/month=09/sales.csv") # doctest: +SKIP + >>> ds.take(1) # doctest: +SKIP [{'order_number': 10107, 'quantity': 30, 'year': '2022', 'month': '09'}] By default, ``read_csv`` reads all files from file paths. If you want to filter @@ -1772,20 +1773,52 @@ def from_spark( @PublicAPI def from_huggingface( dataset: Union["datasets.Dataset", "datasets.DatasetDict"], -) -> Union[MaterializedDataset]: +) -> Union[MaterializedDataset, Dict[str, MaterializedDataset]]: """Create a dataset from a Hugging Face Datasets Dataset. This function is not parallelized, and is intended to be used with Hugging Face Datasets that are loaded into memory (as opposed to memory-mapped). + Example: + + .. doctest:: + :options: +ELLIPSIS + + >>> import ray + >>> import datasets + >>> hf_dataset = datasets.load_dataset("tweet_eval", "emotion") + Downloading ... + >>> ray_ds = ray.data.from_huggingface(hf_dataset) + >>> ray_ds + {'train': MaterializedDataset( + num_blocks=1, + num_rows=3257, + schema={text: string, label: int64} + ), 'test': MaterializedDataset( + num_blocks=1, + num_rows=1421, + schema={text: string, label: int64} + ), 'validation': MaterializedDataset( + num_blocks=1, + num_rows=374, + schema={text: string, label: int64} + )} + >>> ray_ds = ray.data.from_huggingface(hf_dataset["train"]) + >>> ray_ds + MaterializedDataset( + num_blocks=1, + num_rows=3257, + schema={text: string, label: int64} + ) + Args: - dataset: A Hugging Face ``Dataset``, or ``DatasetDict``. - ``IterableDataset`` is not supported. + dataset: A Hugging Face Dataset, or DatasetDict. IterableDataset is not + supported. ``IterableDataset`` is not supported. Returns: - MaterializedDataset holding Arrow records from the Hugging Face Dataset, or a - dict of MaterializedDataset in case ``dataset`` is a ``DatasetDict``. + Dataset holding Arrow records from the Hugging Face Dataset, or a dict of + datasets in case dataset is a DatasetDict. """ import datasets @@ -1797,12 +1830,22 @@ def convert(ds: "datasets.Dataset") -> Dataset: return ray_ds if isinstance(dataset, datasets.DatasetDict): + available_keys = list(dataset.keys()) + logger.warning( + "You provided a Huggingface DatasetDict which contains multiple " + "datasets. The output of `from_huggingface` is a dictionary of Ray " + "Datasets. To convert just a single Huggingface Dataset to a " + "Ray Dataset, specify a split. For example, " + "`ray.data.from_huggingface(my_dataset_dictionary" + f"['{available_keys[0]}'])`. " + f"Available splits are {available_keys}." + ) return {k: convert(ds) for k, ds in dataset.items()} elif isinstance(dataset, datasets.Dataset): return convert(dataset) else: raise TypeError( - "`dataset` must be a `datasets.Dataset` or `datasets.DatasetDict`, " + "`dataset` must be a `datasets.Dataset` or `datasets.DatasetDict`." f"got {type(dataset)}" ) From 0a51e95391f113bef301398457bef7865723b3c7 Mon Sep 17 00:00:00 2001 From: Amog Kamsetty Date: Mon, 15 May 2023 15:06:48 -0700 Subject: [PATCH 391/424] [Data] Add `column` API to Dataset (#35241) Adds columns API to Dataset to be able to see the columns of the Dataset. --------- Signed-off-by: amogkam --- doc/source/data/api/dataset.rst | 1 + python/ray/data/dataset.py | 37 ++++++++++++++++++++--- python/ray/data/tests/test_consumption.py | 9 ++++++ 3 files changed, 43 insertions(+), 4 deletions(-) diff --git a/doc/source/data/api/dataset.rst b/doc/source/data/api/dataset.rst index b75481159c0b..27755db4c03a 100644 --- a/doc/source/data/api/dataset.rst +++ b/doc/source/data/api/dataset.rst @@ -126,6 +126,7 @@ Inspecting Metadata :toctree: doc/ Dataset.count + Dataset.columns Dataset.schema Dataset.default_batch_format Dataset.num_blocks diff --git a/python/ray/data/dataset.py b/python/ray/data/dataset.py index b84fc2ad703e..e1fccf0ac89a 100644 --- a/python/ray/data/dataset.py +++ b/python/ray/data/dataset.py @@ -2158,6 +2158,39 @@ def schema(self, fetch_if_missing: bool = True) -> Optional["Schema"]: else: return base_schema + @ConsumptionAPI( + if_more_than_read=True, + datasource_metadata="schema", + extra_condition="or if ``fetch_if_missing=True`` (the default)", + pattern="Time complexity:", + ) + def columns(self, fetch_if_missing: bool = True) -> Optional[List[str]]: + """Returns the columns of this Dataset. + + Time complexity: O(1) + + Example: + >>> import ray + >>> # Create dataset from synthetic data. + >>> ds = ray.data.range(1000) + >>> ds.columns() + ['id'] + + Args: + fetch_if_missing: If True, synchronously fetch the column names from the + schema if it's not known. If False, None is returned if the schema is + not known. Default is True. + + Returns: + A list of the column names for this Dataset or None if schema is not known + and `fetch_if_missing` is False. + + """ + schema = self.schema(fetch_if_missing=fetch_if_missing) + if schema is not None: + return schema.names + return None + def num_blocks(self) -> int: """Return the number of blocks of this dataset. @@ -4361,10 +4394,6 @@ def __del__(self): self._current_executor.shutdown() -# Backwards compatibility alias. -Dataset = Dataset - - @PublicAPI class MaterializedDataset(Dataset, Generic[T]): """A Dataset materialized in Ray memory, e.g., via `.materialize()`. diff --git a/python/ray/data/tests/test_consumption.py b/python/ray/data/tests/test_consumption.py index 18d5605e9d28..a2e373ab85f3 100644 --- a/python/ray/data/tests/test_consumption.py +++ b/python/ray/data/tests/test_consumption.py @@ -249,6 +249,15 @@ def test_schema_lazy(ray_start_regular_shared): assert ds._plan.execute()._num_computed() == 0 +def test_columns(ray_start_regular_shared): + ds = ray.data.range(1) + assert ds.columns() == ds.schema().names + assert ds.columns() == ["id"] + + ds = ds.map(lambda x: x) + assert ds.columns(fetch_if_missing=False) is None + + def test_schema_repr(ray_start_regular_shared): ds = ray.data.from_items([{"text": "spam", "number": 0}]) # fmt: off From 92e911ae27d9efbe10ed89d900fc90ee011639e9 Mon Sep 17 00:00:00 2001 From: Jiajun Yao Date: Mon, 15 May 2023 15:29:37 -0700 Subject: [PATCH 392/424] [Doc] Make doc code snippet testable [2/n] (#35274) Change code snippet from ..code-block:: to ..testcode:: Signed-off-by: Jiajun Yao --- doc/source/ray-core/actors/async_api.rst | 128 +++++++++++---- doc/source/ray-core/configure.rst | 47 ++++-- doc/source/ray-core/handling-dependencies.rst | 147 ++++++++++++------ doc/source/ray-core/miscellaneous.rst | 31 ++-- doc/source/ray-core/objects.rst | 27 +++- .../ray-core/objects/object-spilling.rst | 46 +++++- doc/source/ray-core/objects/serialization.rst | 59 ++++--- doc/source/ray-core/ray-dashboard.rst | 65 +++++--- doc/source/ray-core/starting-ray.rst | 34 ++-- 9 files changed, 424 insertions(+), 160 deletions(-) diff --git a/doc/source/ray-core/actors/async_api.rst b/doc/source/ray-core/actors/async_api.rst index dba3fca9805c..94eb0d1cb79a 100644 --- a/doc/source/ray-core/actors/async_api.rst +++ b/doc/source/ray-core/actors/async_api.rst @@ -25,14 +25,10 @@ Since Python 3.5, it is possible to write concurrent code using the Ray natively integrates with asyncio. You can use ray alongside with popular async frameworks like aiohttp, aioredis, etc. -You can try it about by running the following snippet in ``ipython`` or a shell -that supports top level ``await``: - -.. code-block:: python +.. testcode:: import ray import asyncio - ray.init() @ray.remote class AsyncActor: @@ -49,8 +45,21 @@ that supports top level ``await``: ray.get([actor.run_concurrent.remote() for _ in range(4)]) # async ray.get - await actor.run_concurrent.remote() - + async def async_get(): + await actor.run_concurrent.remote() + asyncio.run(async_get()) + +.. testoutput:: + :options: +SKIP + + (AsyncActor pid=40293) started + (AsyncActor pid=40293) started + (AsyncActor pid=40293) started + (AsyncActor pid=40293) started + (AsyncActor pid=40293) finished + (AsyncActor pid=40293) finished + (AsyncActor pid=40293) finished + (AsyncActor pid=40293) finished ObjectRefs as asyncio.Futures ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -60,7 +69,9 @@ applications. Instead of: -.. code-block:: python +.. testcode:: + + import ray @ray.remote def some_task(): @@ -71,23 +82,34 @@ Instead of: you can do: -.. code-block:: python +.. testcode:: + + import ray + import asyncio @ray.remote def some_task(): return 1 - await some_task.remote() - await asyncio.wait([some_task.remote()]) + async def await_obj_ref(): + await some_task.remote() + await asyncio.wait([some_task.remote()]) + + asyncio.run(await_obj_ref()) Please refer to `asyncio doc `__ for more `asyncio` patterns including timeouts and ``asyncio.gather``. If you need to directly access the future object, you can call: -.. code-block:: python +.. testcode:: - fut: asyncio.Future = asyncio.wrap_future(ref.future()) + import asyncio + + async def convert_to_asyncio_future(): + ref = some_task.remote() + fut: asyncio.Future = asyncio.wrap_future(ref.future()) + asyncio.run(convert_to_asyncio_future()) .. _async-ref-to-futures: @@ -96,21 +118,29 @@ ObjectRefs as concurrent.futures.Futures ObjectRefs can also be wrapped into ``concurrent.futures.Future`` objects. This is useful for interfacing with existing ``concurrent.futures`` APIs: -.. code-block:: python +.. testcode:: - refs = [fun.remote() for _ in range(4)] + import concurrent + + refs = [some_task.remote() for _ in range(4)] futs = [ref.future() for ref in refs] for fut in concurrent.futures.as_completed(futs): assert fut.done() print(fut.result()) +.. testoutput:: + + 1 + 1 + 1 + 1 Defining an Async Actor ~~~~~~~~~~~~~~~~~~~~~~~ By using `async` method definitions, Ray will automatically detect whether an actor support `async` calls or not. -.. code-block:: python +.. testcode:: import asyncio @@ -118,13 +148,27 @@ By using `async` method definitions, Ray will automatically detect whether an ac class AsyncActor: async def run_task(self): print("started") - await asyncio.sleep(1) # Network, I/O task here + await asyncio.sleep(2) # Network, I/O task here print("ended") actor = AsyncActor.remote() - # All 50 tasks should start at once. After 1 second they should all finish. + # All 5 tasks should start at once. After 2 second they should all finish. # they should finish at the same time - ray.get([actor.run_task.remote() for _ in range(50)]) + ray.get([actor.run_task.remote() for _ in range(5)]) + +.. testoutput:: + :options: +SKIP + + (AsyncActor pid=3456) started + (AsyncActor pid=3456) started + (AsyncActor pid=3456) started + (AsyncActor pid=3456) started + (AsyncActor pid=3456) started + (AsyncActor pid=3456) ended + (AsyncActor pid=3456) ended + (AsyncActor pid=3456) ended + (AsyncActor pid=3456) ended + (AsyncActor pid=3456) ended Under the hood, Ray runs all of the methods inside a single python event loop. Please note that running blocking ``ray.get`` or ``ray.wait`` inside async @@ -139,7 +183,7 @@ Setting concurrency in Async Actors You can set the number of "concurrent" task running at once using the ``max_concurrency`` flag. By default, 1000 tasks can be running concurrently. -.. code-block:: python +.. testcode:: import asyncio @@ -150,10 +194,30 @@ You can set the number of "concurrent" task running at once using the await asyncio.sleep(1) # Network, I/O task here print("ended") - actor = AsyncActor.options(max_concurrency=10).remote() - - # Only 10 tasks will be running concurrently. Once 10 finish, the next 10 should run. - ray.get([actor.run_task.remote() for _ in range(50)]) + actor = AsyncActor.options(max_concurrency=2).remote() + + # Only 2 tasks will be running concurrently. Once 2 finish, the next 2 should run. + ray.get([actor.run_task.remote() for _ in range(8)]) + +.. testoutput:: + :options: +SKIP + + (AsyncActor pid=5859) started + (AsyncActor pid=5859) started + (AsyncActor pid=5859) ended + (AsyncActor pid=5859) ended + (AsyncActor pid=5859) started + (AsyncActor pid=5859) started + (AsyncActor pid=5859) ended + (AsyncActor pid=5859) ended + (AsyncActor pid=5859) started + (AsyncActor pid=5859) started + (AsyncActor pid=5859) ended + (AsyncActor pid=5859) ended + (AsyncActor pid=5859) started + (AsyncActor pid=5859) started + (AsyncActor pid=5859) ended + (AsyncActor pid=5859) ended .. _threaded-actors: @@ -172,7 +236,7 @@ Instead, you can use the ``max_concurrency`` Actor options without any async met will recognize the actor as AsyncActor instead of ThreadedActor. -.. code-block:: python +.. testcode:: @ray.remote class ThreadedActor: @@ -182,6 +246,11 @@ Instead, you can use the ``max_concurrency`` Actor options without any async met a = ThreadedActor.options(max_concurrency=2).remote() ray.get([a.task_1.remote(), a.task_2.remote()]) +.. testoutput:: + :options: +SKIP + + (ThreadedActor pid=4822) I'm running in a thread! + (ThreadedActor pid=4822) I'm running in another thread! Each invocation of the threaded actor will be running in a thread pool. The size of the threadpool is limited by the ``max_concurrency`` value. @@ -190,7 +259,8 @@ AsyncIO for Remote Tasks We don't support asyncio for remote tasks. The following snippet will fail: -.. code-block:: python +.. testcode:: + :skipif: True @ray.remote async def f(): @@ -198,7 +268,7 @@ We don't support asyncio for remote tasks. The following snippet will fail: Instead, you can wrap the ``async`` function with a wrapper to run the task synchronously: -.. code-block:: python +.. testcode:: async def f(): pass @@ -207,7 +277,3 @@ Instead, you can wrap the ``async`` function with a wrapper to run the task sync def wrapper(): import asyncio asyncio.run(f()) - # For python < 3.7: - # asyncio.get_event_loop().run_until_complete(f()) - - diff --git a/doc/source/ray-core/configure.rst b/doc/source/ray-core/configure.rst index 1d113a1a454f..5707b75013e2 100644 --- a/doc/source/ray-core/configure.rst +++ b/doc/source/ray-core/configure.rst @@ -19,18 +19,38 @@ Cluster Resources Ray by default detects available resources. -.. code-block:: python +.. testcode:: + :hide: + + import ray + ray.shutdown() + +.. testcode:: + + import ray # This automatically detects available resources in the single machine. ray.init() If not running cluster mode, you can specify cluster resources overrides through ``ray.init`` as follows. -.. code-block:: python +.. testcode:: + :hide: + + ray.shutdown() + +.. testcode:: # If not connecting to an existing cluster, you can specify resources overrides: ray.init(num_cpus=8, num_gpus=1) +.. testcode:: + :hide: + + ray.shutdown() + +.. testcode:: + # Specifying custom resources ray.init(num_gpus=1, resources={'Resource1': 4, 'Resource2': 16}) @@ -49,7 +69,8 @@ When starting Ray from the command line, pass the ``--num-cpus`` and ``--num-gpu If using the command line, connect to the Ray cluster as follow: -.. code-block:: python +.. testcode:: + :skipif: True # Connect to ray. Notice if connected to existing cluster, you don't specify resources. ray.init(address=
    ) @@ -96,7 +117,7 @@ Look :ref:`Logging Directory Structure ` for more d Ports configurations -------------------- -Ray requires bi-directional communication among its nodes in a cluster. Each node opens specific ports to receive incoming network requests. +Ray requires bi-directional communication among its nodes in a cluster. Each node opens specific ports to receive incoming network requests. All Nodes ~~~~~~~~~ @@ -182,14 +203,14 @@ former is kept secret by the owner and the latter is shared with the other party This pattern ensures that only the intended recipient can read the message. A Certificate Authority (CA) is a trusted third party that certifies the identity of the -public key owner. The digital certificate issued by the CA contains the public key itself, +public key owner. The digital certificate issued by the CA contains the public key itself, the identity of the public key owner, and the expiration date of the certificate. Note that -if the owner of the public key does not want to obtain a digital certificate from a CA, -they can generate a self-signed certificate with some tools like OpenSSL. +if the owner of the public key does not want to obtain a digital certificate from a CA, +they can generate a self-signed certificate with some tools like OpenSSL. To obtain a digital certificate, the owner of the public key must generate a Certificate Signing -Request (CSR). The CSR contains information about the owner of the public -key and the public key itself. For Ray, some additional steps are required for achieving +Request (CSR). The CSR contains information about the owner of the public +key and the public key itself. For Ray, some additional steps are required for achieving a successful TLS encryption. Here is a step-by-step guide for adding TLS Authentication to a static Kubernetes Ray cluster using @@ -225,9 +246,9 @@ The `YAML file `__, has a ConfigMap named `tls` that includes two shell scripts: `gencert_head.sh` and `gencert_worker.sh`. These scripts produce the private key and self-signed certificate files (`tls.key` and `tls.crt`) for both head and worker Pods in the initContainer -of each deployment. By using the initContainer, we can dynamically retrieve the `POD_IP` to the `[alt_names]` section. +of each deployment. By using the initContainer, we can dynamically retrieve the `POD_IP` to the `[alt_names]` section. -The scripts perform the following steps: first, a 2048-bit RSA private key is generated and saved as +The scripts perform the following steps: first, a 2048-bit RSA private key is generated and saved as `/etc/ray/tls/tls.key`. Then, a Certificate Signing Request (CSR) is generated using the `tls.key` file and the `csr.conf` configuration file. Finally, a self-signed certificate (`tls.crt`) is created using the Certificate Authority's (`ca.key and ca.crt`) keypair and the CSR (`ca.csr`). @@ -249,8 +270,8 @@ Step 4: Verify TLS authentication # Log in to the worker Pod kubectl exec -it ${WORKER_POD} -- bash - - # Since the head Pod has the certificate of the full qualified DNS resolution for the Ray head service, the connection to the worker Pods + + # Since the head Pod has the certificate of the full qualified DNS resolution for the Ray head service, the connection to the worker Pods # is established successfully ray health-check --address service-ray-head.default.svc.cluster.local:6379 diff --git a/doc/source/ray-core/handling-dependencies.rst b/doc/source/ray-core/handling-dependencies.rst index 0d42ef2faabc..940209475219 100644 --- a/doc/source/ray-core/handling-dependencies.rst +++ b/doc/source/ray-core/handling-dependencies.rst @@ -66,22 +66,30 @@ In contrast with the base cluster environment, a runtime environment will only b Runtime environments also allow you to set dependencies per-task, per-actor, and per-job on a long-running Ray cluster. -.. - TODO(architkulkarni): run working_dir doc example in CI +.. testcode:: + :hide: -.. code-block:: python + import ray + ray.shutdown() + +.. testcode:: import ray - import requests - runtime_env = {"working_dir": "/data/my_files", "pip": ["requests", "pendulum==2.1.2"]} + runtime_env = {"pip": ["emoji"]} ray.init(runtime_env=runtime_env) @ray.remote def f(): - open("my_datafile.txt").read() - return requests.get("https://www.ray.io") + import emoji + return emoji.emojize('Python is :thumbs_up:') + + print(ray.get(f.remote())) + +.. testoutput:: + + Python is 👍 A runtime environment can be described by a Python `dict`: @@ -117,7 +125,8 @@ You can specify a runtime environment for your whole job, whether running a scri :start-after: __ray_init_start__ :end-before: __ray_init_end__ -.. code-block:: python +.. testcode:: + :skipif: True # Option 2: Using Ray Jobs API (Python SDK) from ray.job_submission import JobSubmissionClient @@ -129,14 +138,14 @@ You can specify a runtime environment for your whole job, whether running a scri ) .. code-block:: bash - + # Option 3: Using Ray Jobs API (CLI). (Note: can use --runtime-env to pass a YAML file instead of an inline JSON string.) $ ray job submit --address="http://:8265" --runtime-env-json='{"working_dir": "/data/my_files", "pip": ["emoji"]}' -- python my_ray_script.py .. warning:: - If using the Ray Jobs API (either the Python SDK or the CLI), specify the ``runtime_env`` argument in the ``submit_job`` call or the ``ray job submit``, not in the ``ray.init()`` call in the entrypoint script (in this example, ``my_ray_script.py``). - + If using the Ray Jobs API (either the Python SDK or the CLI), specify the ``runtime_env`` argument in the ``submit_job`` call or the ``ray job submit``, not in the ``ray.init()`` call in the entrypoint script (in this example, ``my_ray_script.py``). + This ensures the runtime environment is installed on the cluster before the entrypoint script is run. .. note:: @@ -182,24 +191,36 @@ For a development workflow, these might live on your local machine, but when it The following simple example explains how to get your local files on the cluster. -.. code-block:: python +.. testcode:: + :hide: - # /path/to/files is a directory on the local machine. - # /path/to/files/hello.txt contains the string "Hello World!" + import ray + ray.shutdown() + +.. testcode:: + import os import ray + os.makedirs("/tmp/runtime_env_working_dir", exist_ok=True) + with open("/tmp/runtime_env_working_dir/hello.txt", "w") as hello_file: + hello_file.write("Hello World!") + # Specify a runtime environment for the entire Ray job - ray.init(runtime_env={"working_dir": "/path/to/files"}) + ray.init(runtime_env={"working_dir": "/tmp/runtime_env_working_dir"}) # Create a Ray task, which inherits the above runtime env. @ray.remote def f(): # The function will have its working directory changed to its node's - # local copy of /path/to/files. + # local copy of /tmp/runtime_env_working_dir. return open("hello.txt").read() - print(ray.get(f.remote())) # Hello World! + print(ray.get(f.remote())) + +.. testoutput:: + + Hello World! .. note:: The example above is written to run on a local machine, but as for all of these examples, it also works when specifying a Ray cluster to connect to @@ -218,7 +239,13 @@ Ray ordinarily expects all imported packages to be preinstalled on every node of However, using runtime environments you can dynamically specify packages to be automatically downloaded and installed in a virtual environment for your Ray job, or for specific Ray tasks or actors. -.. code-block:: python +.. testcode:: + :hide: + + import ray + ray.shutdown() + +.. testcode:: import ray import requests @@ -229,9 +256,13 @@ However, using runtime environments you can dynamically specify packages to be a @ray.remote def reqs(): - return requests.get("https://www.ray.io/") + return requests.get("https://www.ray.io/").status_code + + print(ray.get(reqs.remote())) - print(ray.get(reqs.remote())) # +.. testoutput:: + + 200 You may also specify your ``pip`` dependencies either via a Python list or a local ``requirements.txt`` file. @@ -242,7 +273,7 @@ For details, head to the :ref:`API Reference `. Since the packages in the ``runtime_env`` are installed at runtime, be cautious when specifying ``conda`` or ``pip`` packages whose installations involve building from source, as this can be slow. -.. note:: +.. note:: When using the ``"pip"`` field, the specified packages will be installed "on top of" the base environment using ``virtualenv``, so existing packages on your cluster will still be importable. By contrast, when using the ``conda`` field, your Ray tasks and actors will run in an isolated environment. The ``conda`` and ``pip`` fields cannot both be used in a single ``runtime_env``. @@ -250,7 +281,7 @@ For details, head to the :ref:`API Reference `. The ``ray[default]`` package itself will automatically be installed in the environment. For the ``conda`` field only, if you are using any other Ray libraries (for example, Ray Serve), then you will need to specify the library in the runtime environment (e.g. ``runtime_env = {"conda": {"dependencies": ["pytorch", "pip", {"pip": ["requests", "ray[serve]"]}]}}``.) -.. note:: +.. note:: ``conda`` environments must have the same Python version as the Ray cluster. Do not list ``ray`` in the ``conda`` dependencies, as it will be automatically installed. @@ -266,7 +297,8 @@ A typical iteration cycle will involve To ensure your local changes show up across all Ray workers and can be imported properly, use the ``py_modules`` field. -.. code-block:: python +.. testcode:: + :skipif: True import ray import my_module @@ -371,8 +403,8 @@ The ``runtime_env`` is a Python dictionary or a Python class :class:`ray.runtime Furthermore, referencing local files `within` a `environment.yml` file is not supported. - ``env_vars`` (Dict[str, str]): Environment variables to set. Environment variables already set on the cluster will still be visible to the Ray workers; so there is - no need to include ``os.environ`` or similar in the ``env_vars`` field. - By default, these environment variables override the same name environment variables on the cluster. + no need to include ``os.environ`` or similar in the ``env_vars`` field. + By default, these environment variables override the same name environment variables on the cluster. You can also reference existing environment variables using ${ENV_VAR} to achieve the appending behavior. Only PATH, LD_LIBRARY_PATH, DYLD_LIBRARY_PATH, and LD_PRELOAD are supported. See below for an example: @@ -425,7 +457,7 @@ If an actor or task specifies a new ``runtime_env``, it will override the parent Example: -.. code-block:: python +.. testcode:: # Parent's `runtime_env` {"pip": ["requests", "chess"], @@ -433,11 +465,11 @@ Example: # Child's specified `runtime_env` {"pip": ["torch", "ray[serve]"], - "env_vars": {"B": "new", "C", "c"}} + "env_vars": {"B": "new", "C": "c"}} # Child's actual `runtime_env` (merged with parent's) {"pip": ["torch", "ray[serve]"], - "env_vars": {"A": "a", "B": "new", "C", "c"}} + "env_vars": {"A": "a", "B": "new", "C": "c"}} .. _runtime-env-faq: @@ -465,8 +497,8 @@ Any local files downloaded by the environments are cached at ``/tmp/ray/session_ How long does it take to install or to load from cache? """"""""""""""""""""""""""""""""""""""""""""""""""""""" -The install time usually mostly consists of the time it takes to run ``pip install`` or ``conda create`` / ``conda activate``, or to upload/download a ``working_dir``, depending on which ``runtime_env`` options you're using. -This could take seconds or minutes. +The install time usually mostly consists of the time it takes to run ``pip install`` or ``conda create`` / ``conda activate``, or to upload/download a ``working_dir``, depending on which ``runtime_env`` options you're using. +This could take seconds or minutes. On the other hand, loading a runtime environment from the cache should be nearly as fast as the ordinary Ray worker startup time, which is on the order of a few seconds. A new Ray worker is started for every Ray actor or task that requires a new runtime environment. (Note that loading a cached ``conda`` environment could still be slow, since the ``conda activate`` command sometimes takes a few seconds.) @@ -476,7 +508,7 @@ You can set ``setup_timeout_seconds`` config to avoid the installation hanging f What is the relationship between runtime environments and Docker? """"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""" -They can be used independently or together. +They can be used independently or together. A container image can be specified in the :ref:`Cluster Launcher ` for large or static dependencies, and runtime environments can be specified per-job or per-task/actor for more dynamic use cases. The runtime environment will inherit packages, files, and environment variables from the container image. @@ -499,9 +531,10 @@ The contents of this directory will be directly accessed as the ``working_dir`` For example, suppose you want to use the contents in your local ``/some_path/example_dir`` directory as your ``working_dir``. If you want to specify this directory as a local path, your ``runtime_env`` dictionary should contain: -.. code-block:: python +.. testcode:: + :skipif: True - runtime_env = {..., "working_dir": "/some_path/example_dir", ...} + runtime_env = {..., "working_dir": "/some_path/example_dir", ...} Suppose instead you want to host your files in your ``/some_path/example_dir`` directory remotely and provide a remote URI. You would need to first compress the ``example_dir`` directory into a zip file. @@ -530,9 +563,10 @@ You can check that the zip file contains a single top-level directory by running Suppose you upload the compressed ``example_dir`` directory to AWS S3 at the S3 URI ``s3://example_bucket/example.zip``. Your ``runtime_env`` dictionary should contain: -.. code-block:: python +.. testcode:: + :skipif: True - runtime_env = {..., "working_dir": "s3://example_bucket/example.zip", ...} + runtime_env = {..., "working_dir": "s3://example_bucket/example.zip", ...} .. warning:: @@ -635,7 +669,7 @@ To create the URL, pick a URL template below that fits your use case, and fill i For instance, suppose your GitHub username is ``example_user``, the repository's name is ``example_repository``, and the desired commit hash is ``abcdefg``. If ``example_repository`` is public and you want to retrieve the ``abcdefg`` commit (which matches the first example use case), the URL would be: -.. code-block:: python +.. testcode:: runtime_env = {"working_dir": ("https://github.com" "/example_user/example_repository/archive/abcdefg.zip")} @@ -644,28 +678,28 @@ Here is a list of different use cases and corresponding URLs: - Example: Retrieve package from a specific commit hash on a public GitHub repository -.. code-block:: python +.. testcode:: runtime_env = {"working_dir": ("https://github.com" "/[username]/[repository]/archive/[commit hash].zip")} - Example: Retrieve package from a private GitHub repository using a Personal Access Token -.. code-block:: python +.. testcode:: runtime_env = {"working_dir": ("https://[username]:[personal access token]@github.com" "/[username]/[private repository]/archive/[commit hash].zip")} - Example: Retrieve package from a public GitHub repository's latest commit -.. code-block:: python +.. testcode:: runtime_env = {"working_dir": ("https://github.com" "/[username]/[repository]/archive/HEAD.zip")} - Example: Retrieve package from a specific commit hash on a public Bitbucket repository -.. code-block:: python +.. testcode:: runtime_env = {"working_dir": ("https://bitbucket.org" "/[owner]/[repository]/get/[commit hash].tar.gz")} @@ -687,7 +721,10 @@ If runtime_env cannot be set up (e.g., network issues, download failures, etc.), that require the runtime_env. If you call ``ray.get``, it will raise ``RuntimeEnvSetupError`` with the error message in detail. -.. code-block:: python +.. testcode:: + + import ray + import time @ray.remote def f(): @@ -702,11 +739,23 @@ the error message in detail. bad_env = {"conda": {"dependencies": ["this_doesnt_exist"]}} # [Tasks] will raise `RuntimeEnvSetupError`. - ray.get(f.options(runtime_env=bad_env).remote()) + try: + ray.get(f.options(runtime_env=bad_env).remote()) + except ray.exceptions.RuntimeEnvSetupError: + print("Task fails with RuntimeEnvSetupError") # [Actors] will raise `RuntimeEnvSetupError`. a = A.options(runtime_env=bad_env).remote() - ray.get(a.f.remote()) + try: + ray.get(a.f.remote()) + except ray.exceptions.RuntimeEnvSetupError: + print("Actor fails with RuntimeEnvSetupError") + +.. testoutput:: + + Task fails with RuntimeEnvSetupError + Actor fails with RuntimeEnvSetupError + Full logs can always be found in the file ``runtime_env_setup-[job_id].log`` for per-actor, per-task and per-job environments, or in ``runtime_env_setup-ray_client_server_[port].log`` for per-job environments when using Ray Client. @@ -716,9 +765,17 @@ This will print the full ``runtime_env`` setup log messages to the driver (the s Example log output: -.. code-block:: text +.. testcode:: + :hide: + + ray.shutdown() + +.. testcode:: + + ray.init(runtime_env={"pip": ["requests"]}) - >>> ray.init(runtime_env={"pip" ["requests"]}) +.. testoutput:: + :options: +SKIP (pid=runtime_env) 2022-02-28 14:12:33,653 INFO pip.py:188 -- Creating virtualenv at /tmp/ray/session_2022-02-28_14-12-29_909064_87908/runtime_resources/pip/0cc818a054853c3841171109300436cad4dcf594/virtualenv, current python dir /Users/user/anaconda3/envs/ray-py38 (pid=runtime_env) 2022-02-28 14:12:33,653 INFO utils.py:76 -- Run cmd[1] ['/Users/user/anaconda3/envs/ray-py38/bin/python', '-m', 'virtualenv', '--app-data', '/tmp/ray/session_2022-02-28_14-12-29_909064_87908/runtime_resources/pip/0cc818a054853c3841171109300436cad4dcf594/virtualenv_app_data', '--reset-app-data', '--no-periodic-update', '--system-site-packages', '--no-download', '/tmp/ray/session_2022-02-28_14-12-29_909064_87908/runtime_resources/pip/0cc818a054853c3841171109300436cad4dcf594/virtualenv'] diff --git a/doc/source/ray-core/miscellaneous.rst b/doc/source/ray-core/miscellaneous.rst index 7eebbdf76e61..0629fcf961c5 100644 --- a/doc/source/ray-core/miscellaneous.rst +++ b/doc/source/ray-core/miscellaneous.rst @@ -13,7 +13,9 @@ You can dynamically adjust resource requirements or return values of ``ray.remot For example, here we instantiate many copies of the same actor with varying resource requirements. Note that to create these actors successfully, Ray will need to be started with sufficient CPU resources and the relevant custom resources: -.. code-block:: python +.. testcode:: + + import ray @ray.remote(num_cpus=4) class Counter(object): @@ -30,21 +32,28 @@ For example, here we instantiate many copies of the same actor with varying reso You can specify different resource requirements for tasks (but not for actor methods): -.. code-block:: python +.. testcode:: + :hide: + + ray.shutdown() + +.. testcode:: + + ray.init(num_cpus=1, num_gpus=1) @ray.remote def g(): return ray.get_gpu_ids() object_gpu_ids = g.remote() - assert ray.get(object_gpu_ids) == [0] + assert ray.get(object_gpu_ids) == [] dynamic_object_gpu_ids = g.options(num_cpus=1, num_gpus=1).remote() assert ray.get(dynamic_object_gpu_ids) == [0] And vary the number of return values for tasks (and actor methods too): -.. code-block:: python +.. testcode:: @ray.remote def f(n): @@ -56,7 +65,7 @@ And vary the number of return values for tasks (and actor methods too): And specify a name for tasks (and actor methods too) at task submission time: -.. code-block:: python +.. testcode:: import setproctitle @@ -154,16 +163,21 @@ To get information about the current nodes in your cluster, you can use ``ray.no .. autofunction:: ray.nodes :noindex: +.. testcode:: + :hide: -.. code-block:: python + ray.shutdown() + +.. testcode:: import ray ray.init() - print(ray.nodes()) - """ +.. testoutput:: + :options: +SKIP + [{'NodeID': '2691a0c1aed6f45e262b2372baf58871734332d7', 'Alive': True, 'NodeManagerAddress': '192.168.1.82', @@ -175,7 +189,6 @@ To get information about the current nodes in your cluster, you can use ``ray.no 'MetricsExportPort': 64860, 'alive': True, 'Resources': {'CPU': 16.0, 'memory': 100.0, 'object_store_memory': 34.0, 'node:192.168.1.82': 1.0}}] - """ The above information includes: diff --git a/doc/source/ray-core/objects.rst b/doc/source/ray-core/objects.rst index e168c6ebd220..78294535d420 100644 --- a/doc/source/ray-core/objects.rst +++ b/doc/source/ray-core/objects.rst @@ -18,7 +18,9 @@ Object refs can be created in two ways. .. tab-item:: Python - .. code-block:: python + .. testcode:: + + import ray # Put an object in Ray's object store. y = 1 @@ -61,7 +63,10 @@ If the current node's object store does not contain the object, the object is do or a collection of numpy arrays, the ``get`` call is zero-copy and returns arrays backed by shared object store memory. Otherwise, we deserialize the object data into a Python object. - .. code-block:: python + .. testcode:: + + import ray + import time # Get the value of one object ref. obj_ref = ray.put(1) @@ -85,6 +90,10 @@ If the current node's object store does not contain the object, the object is do except GetTimeoutError: # You can capture the standard "TimeoutError" instead print("`get` timed out.") + .. testoutput:: + + `get` timed out. + .. tab-item:: Java .. code-block:: java @@ -149,7 +158,17 @@ There are two different ways one can pass an object to a Ray task or method. Dep The top-level vs not top-level passing convention also applies to actor constructors and actor method calls: -.. code-block:: python +.. testcode:: + + @ray.remote + class Actor: + def __init__(self, arg): + pass + + def method(self, arg): + pass + + obj = ray.put(2) # Examples of passing objects to actor constructors. actor_handle = Actor.remote(obj) # by-value @@ -171,7 +190,7 @@ Nested Objects Ray also supports nested object references. This allows you to build composite objects that themselves hold references to further sub-objects. -.. code-block:: python +.. testcode:: # Objects can be nested within each other. Ray will keep the inner object # alive via reference counting until all outer object references are deleted. diff --git a/doc/source/ray-core/objects/object-spilling.rst b/doc/source/ray-core/objects/object-spilling.rst index b8eba3be732c..845215aa2a44 100644 --- a/doc/source/ray-core/objects/object-spilling.rst +++ b/doc/source/ray-core/objects/object-spilling.rst @@ -11,7 +11,13 @@ Ray uses object spilling by default. Without any setting, objects are spilled to To configure the directory where objects are spilled to, use: -.. code-block:: python +.. testcode:: + :hide: + + import ray + ray.shutdown() + +.. testcode:: import json import ray @@ -27,7 +33,12 @@ To configure the directory where objects are spilled to, use: You can also specify multiple directories for spilling to spread the IO load and disk space usage across multiple physical devices if needed (e.g., SSD devices): -.. code-block:: python +.. testcode:: + :hide: + + ray.shutdown() + +.. testcode:: import json import ray @@ -59,7 +70,12 @@ usage across multiple physical devices if needed (e.g., SSD devices): If you are using an HDD, it is recommended that you specify a large buffer size (> 1MB) to reduce IO requests during spilling. -.. code-block:: python +.. testcode:: + :hide: + + ray.shutdown() + +.. testcode:: import json import ray @@ -82,7 +98,12 @@ To prevent running out of disk space, local object spilling will throw ``OutOfDi If multiple physical devices are used, any physical device's over-usage will trigger the ``OutOfDiskError``. The default threshold is 0.95 (95%). You can adjust the threshold by setting ``local_fs_capacity_threshold``, or set it to 1 to disable the protection. -.. code-block:: python +.. testcode:: + :hide: + + ray.shutdown() + +.. testcode:: import json import ray @@ -97,6 +118,7 @@ The default threshold is 0.95 (95%). You can adjust the threshold by setting ``l "type": "filesystem", "params": { "directory_path": "/tmp/spill", + } }, ) }, @@ -105,7 +127,13 @@ The default threshold is 0.95 (95%). You can adjust the threshold by setting ``l To enable object spilling to remote storage (any URI supported by `smart_open `__): -.. code-block:: python +.. testcode:: + :hide: + + ray.shutdown() + +.. testcode:: + :skipif: True import json import ray @@ -130,7 +158,13 @@ It is recommended that you specify a large buffer size (> 1MB) to reduce IO requ Spilling to multiple remote storages is also supported. -.. code-block:: python +.. testcode:: + :hide: + + ray.shutdown() + +.. testcode:: + :skipif: True import json import ray diff --git a/doc/source/ray-core/objects/serialization.rst b/doc/source/ray-core/objects/serialization.rst index af5a236267db..f7007adce862 100644 --- a/doc/source/ray-core/objects/serialization.rst +++ b/doc/source/ray-core/objects/serialization.rst @@ -47,9 +47,11 @@ Serialization notes - For non-native objects, Ray will always keep a single copy even it is referred multiple times in an object: - .. code-block:: python + .. testcode:: + import ray import numpy as np + obj = [np.zeros(42)] * 99 l = ray.get(ray.put(obj)) assert l[0] is l[1] # no problem! @@ -72,13 +74,11 @@ There are at least 3 ways to define your custom serialization process: function inside the corresponding class. This is commonly done by most Python libraries. Example code: - .. code-block:: python + .. testcode:: import ray import sqlite3 - ray.init() - class DBConnection: def __init__(self, path): self.path = path @@ -96,11 +96,18 @@ There are at least 3 ways to define your custom serialization process: copied = ray.get(ray.put(original)) print(copied.conn) + .. testoutput:: + :options: +ELLIPSIS + + + + + 2. If you want to customize the serialization of a type of objects, but you cannot access or modify the corresponding class, you can register the class with the serializer you use: - .. code-block:: python + .. testcode:: import ray import threading @@ -110,7 +117,10 @@ There are at least 3 ways to define your custom serialization process: self.x = x self.lock = threading.Lock() # could not be serialized! - ray.get(ray.put(A(1))) # fail! + try: + ray.get(ray.put(A(1))) # fail! + except TypeError: + pass def custom_serializer(a): return a.x @@ -125,7 +135,10 @@ There are at least 3 ways to define your custom serialization process: # You can deregister the serializer at any time. ray.util.deregister_serializer(A) - ray.get(ray.put(A(1))) # fail! + try: + ray.get(ray.put(A(1))) # fail! + except TypeError: + pass # Nothing happens when deregister an unavailable serializer. ray.util.deregister_serializer(A) @@ -141,7 +154,7 @@ There are at least 3 ways to define your custom serialization process: 3. We also provide you an example, if you want to customize the serialization of a specific object: - .. code-block:: python + .. testcode:: import threading @@ -150,7 +163,10 @@ There are at least 3 ways to define your custom serialization process: self.x = x self.lock = threading.Lock() # could not serialize! - ray.get(ray.put(A(1))) # fail! + try: + ray.get(ray.put(A(1))) # fail! + except TypeError: + pass class SerializationHelperForA: """A helper class for serialization.""" @@ -163,7 +179,10 @@ There are at least 3 ways to define your custom serialization process: ray.get(ray.put(SerializationHelperForA(A(1)))) # success! # the serializer only works for a specific object, not all A # instances, so we still expect failure here. - ray.get(ray.put(A(1))) # still fail! + try: + ray.get(ray.put(A(1))) # still fail! + except TypeError: + pass Troubleshooting @@ -173,7 +192,7 @@ Use ``ray.util.inspect_serializability`` to identify tricky pickling issues. Thi Below, we demonstrate this behavior on a function with a non-serializable object (threading lock): -.. code-block:: python +.. testcode:: from ray.util import inspect_serializability import threading @@ -187,24 +206,26 @@ Below, we demonstrate this behavior on a function with a non-serializable object The resulting output is: - -.. code-block:: bash +.. testoutput:: + :options: +SKIP ============================================================= - Checking Serializability of + Checking Serializability of ============================================================= - !!! FAIL serialization: can't pickle _thread.lock objects + !!! FAIL serialization: cannot pickle '_thread.lock' object Detected 1 global variables. Checking serializability... - Serializing 'lock' ... - !!! FAIL serialization: can't pickle _thread.lock objects - WARNING: Did not find non-serializable object in . This may be an oversight. + Serializing 'lock' ... + !!! FAIL serialization: cannot pickle '_thread.lock' object + WARNING: Did not find non-serializable object in . This may be an oversight. ============================================================= Variable: - lock [obj=, parent=] + FailTuple(lock [obj=, parent=]) was found to be non-serializable. There may be multiple other undetected variables that were non-serializable. Consider either removing the instantiation/imports of these variables or moving the instantiation into the scope of the function/class. + ============================================================= + Check https://docs.ray.io/en/master/ray-core/objects/serialization.html#troubleshooting for more information. If you have any suggestions on how to improve this error message, please reach out to the Ray developers on github.com/ray-project/ray/issues/ ============================================================= diff --git a/doc/source/ray-core/ray-dashboard.rst b/doc/source/ray-core/ray-dashboard.rst index f7e729eec058..ef9e0553a17b 100644 --- a/doc/source/ray-core/ray-dashboard.rst +++ b/doc/source/ray-core/ray-dashboard.rst @@ -3,7 +3,7 @@ Ray Dashboard ============= Ray provides a web-based dashboard for monitoring and debugging Ray applications. -The dashboard provides a visual representation of the system state, allowing users to track the performance +The dashboard provides a visual representation of the system state, allowing users to track the performance of their applications and troubleshoot issues. .. raw:: html @@ -36,20 +36,32 @@ To use the dashboard, you should use the `ray[default]` installation: You can access the dashboard through a URL printed when Ray is initialized (the default URL is **http://localhost:8265**) or via the context object returned from `ray.init`. -.. code-block:: python +.. testcode:: + :hide: + + import ray + ray.shutdown() + +.. testcode:: + + import ray context = ray.init() print(context.dashboard_url) +.. testoutput:: + + 127.0.0.1:8265 + .. code-block:: text INFO worker.py:1487 -- Connected to Ray cluster. View the dashboard at 127.0.0.1:8265. Ray cluster comes with the dashboard. See :ref:`Cluster Monitoring ` for more details. -.. note:: +.. note:: - When using the Ray dashboard, it is highly recommended to also set up Prometheus and Grafana. + When using the Ray dashboard, it is highly recommended to also set up Prometheus and Grafana. They are necessary for critical features such as :ref:`Metrics View `. See :ref:`Ray Metrics ` to learn how to set up Prometheus and Grafana. @@ -68,7 +80,7 @@ View the application logs and errors If the Ray job is submitted by :ref:`Ray job API `, the job logs are available from the dashboard. The log file follows the following format; ``job-driver-.log``. -.. note:: +.. note:: If the driver is executed directly on the head node of the Ray cluster (without the job API) or run via :ref:`Ray client `, the driver logs are not accessible from the dashboard. In this case, see the terminal output to view the driver logs. @@ -81,7 +93,7 @@ If the Ray job is submitted by :ref:`Ray job API `, the job log :align: center Task and actor logs are accessible from the :ref:`task and actor table view `. Click the log button. -You can see the worker logs (``worker-[worker_id]-[job_id]-[pid].[out|err]``) that execute the task and actor. ``.out`` (stdout) and ``.err`` (stderr) logs contain the logs emitted from the tasks and actors. +You can see the worker logs (``worker-[worker_id]-[job_id]-[pid].[out|err]``) that execute the task and actor. ``.out`` (stdout) and ``.err`` (stderr) logs contain the logs emitted from the tasks and actors. The core worker logs (``python-core-worker-[worker_id]_[pid].log``) contain the system-level logs for the corresponding worker. **Task and Actor Errors** @@ -89,7 +101,7 @@ The core worker logs (``python-core-worker-[worker_id]_[pid].log``) contain the .. image:: https://raw.githubusercontent.com/ray-project/Images/master/docs/new-dashboard-v2/dashboard-pics/failed_task_progress-bar.png :align: center -You can easily identify failed tasks or actors by looking at the job progress bar, which links to the table. +You can easily identify failed tasks or actors by looking at the job progress bar, which links to the table. .. image:: https://raw.githubusercontent.com/ray-project/Images/master/docs/new-dashboard-v2/dashboard-pics/task_error_button.png :align: center @@ -104,8 +116,8 @@ The table displays the name of the failed tasks or actors and provides access to Analyze the CPU and memory usage of tasks and actors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The :ref:`Metrics View ` in the Ray dashboard provides a "per-component CPU/memory usage graph" that displays CPU and memory usage over time for each task and actor in the application (as well as system components). -This allows users to identify tasks and actors that may be consuming more resources than expected and optimize the performance of the application. +The :ref:`Metrics View ` in the Ray dashboard provides a "per-component CPU/memory usage graph" that displays CPU and memory usage over time for each task and actor in the application (as well as system components). +This allows users to identify tasks and actors that may be consuming more resources than expected and optimize the performance of the application. .. image:: https://raw.githubusercontent.com/ray-project/Images/master/docs/new-dashboard-v2/dashboard-pics/node_cpu_by_comp.png :align: center @@ -128,7 +140,7 @@ Additionally, users can see a snapshot of hardware utilization from the :ref:`cl View the Resource Utilization ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Ray requires users to specify the number of :ref:`resources ` their tasks and actors will use through arguments such as ``num_cpus``, ``num_gpus``, ``memory``, and ``resource``. +Ray requires users to specify the number of :ref:`resources ` their tasks and actors will use through arguments such as ``num_cpus``, ``num_gpus``, ``memory``, and ``resource``. These values are used for scheduling, but may not always match the actual resource utilization (physical resource utilization). - You can see the logical and physical resource utilization over time from the :ref:`Metrics View `. @@ -191,7 +203,7 @@ A job is a ray workload that uses Ray APIs (e.g., ``ray.init``). It can be submi .. image:: https://raw.githubusercontent.com/ray-project/Images/master/docs/new-dashboard-v2/dashboard-pics/job_list.png :align: center -The job page displays a list of active, finished, and failed jobs, and clicking on an ID allows users to view detailed information about that job. +The job page displays a list of active, finished, and failed jobs, and clicking on an ID allows users to view detailed information about that job. For more information on Ray jobs, see the Ray Job Overview section. Job Profiling @@ -210,7 +222,7 @@ Advanced Task and Actor Breakdown .. image:: https://raw.githubusercontent.com/ray-project/Images/master/docs/new-dashboard-v2/dashboard-pics/advanced-progress.png :align: left -The job page allows you to see tasks and actors broken down by their states. +The job page allows you to see tasks and actors broken down by their states. Tasks and actors are grouped and nested by default. You can see the nested entries by clicking the expand button. Tasks and actors are grouped and nested by the following criteria. @@ -222,7 +234,7 @@ Tasks and actors are grouped and nested by the following criteria. - Child actors (actors created within an actor) are nested under their parent actor's row. - Actor tasks (remote methods within an actor) are nested under the actor for the corresponding actor method. -.. note:: +.. note:: Ray dashboard can only display or retrieve up to 10K tasks at a time. If there are more than 10K tasks from your job, they are unaccounted. The number of unaccounted tasks is available from the task breakdown. @@ -230,7 +242,7 @@ Tasks and actors are grouped and nested by the following criteria. Task Timeline ~~~~~~~~~~~~~ -The :ref:`timeline API ` is available from the dashboard. +The :ref:`timeline API ` is available from the dashboard. .. image:: https://raw.githubusercontent.com/ray-project/Images/master/docs/new-dashboard-v2/dashboard-pics/profile-button.png :align: center @@ -245,7 +257,7 @@ Second, you can use tools like ``chrome://tracing`` or the `Perfetto UI `. .. image:: https://raw.githubusercontent.com/ray-project/Images/master/docs/new-dashboard-v2/dashboard-pics/task-table.png @@ -383,12 +395,12 @@ Actor Detail Page .. image:: https://raw.githubusercontent.com/ray-project/Images/master/docs/new-dashboard-v2/dashboard-pics/actor-list-id.png :align: center -By clicking the ID, you can also see the detail view of the actor. +By clicking the ID, you can also see the detail view of the actor. .. image:: https://raw.githubusercontent.com/ray-project/Images/master/docs/new-dashboard-v2/dashboard-pics/actor-detail.png :align: center -From the actor detail page, you can see the metadata, state, and the all tasks that have run from this actor. +From the actor detail page, you can see the metadata, state, and the all tasks that have run from this actor. .. _dash-metrics-view: @@ -407,7 +419,7 @@ Ray exports default metrics which are available from the :ref:`Metrics View ` for available metrics. -.. note:: +.. note:: The metrics view required the Prometheus and Grafana setup. See :ref:`Ray Metrics ` to learn how to set up Prometheus and Grafana. @@ -535,9 +547,14 @@ To disable the dashboard, use the following arguments `--include-dashboard`. **ray.init** - .. code-block:: python + .. testcode:: + :hide: + + ray.shutdown() + + .. testcode:: - ray.init(include_dashboard=False) + ray.init(include_dashboard=False) .. tab-item:: VM Cluster Launcher diff --git a/doc/source/ray-core/starting-ray.rst b/doc/source/ray-core/starting-ray.rst index 60c4860525df..711049d940f5 100644 --- a/doc/source/ray-core/starting-ray.rst +++ b/doc/source/ray-core/starting-ray.rst @@ -39,11 +39,17 @@ Calling ``ray.init()`` starts a local Ray instance on your laptop/machine. This .. tab-item:: Python - .. code-block:: python + .. testcode:: + :hide: - import ray - # Other Ray APIs will not work until `ray.init()` is called. - ray.init() + import ray + ray.shutdown() + + .. testcode:: + + import ray + # Other Ray APIs will not work until `ray.init()` is called. + ray.init() .. tab-item:: Java @@ -74,7 +80,12 @@ When the process calling ``ray.init()`` terminates, the Ray runtime will also te .. tab-item:: Python - .. code-block:: python + .. testcode:: + :hide: + + ray.shutdown() + + .. testcode:: import ray ray.init() @@ -111,7 +122,7 @@ To check if Ray is initialized, use the ``is_initialized`` API. .. tab-item:: Python - .. code-block:: python + .. testcode:: import ray ray.init() @@ -180,7 +191,7 @@ You can connect to this Ray instance by starting a driver process on the same no .. tab-item:: Python - .. code-block:: python + .. testcode:: import ray ray.init() @@ -235,9 +246,14 @@ Your code **only** needs to execute on one machine in the cluster (usually the h To connect to the Ray cluster, call ``ray.init`` from one of the machines in the cluster. This will connect to the latest Ray cluster: -.. code-block:: python +.. testcode:: + :hide: + + ray.shutdown() + +.. testcode:: - ray.init() + ray.init() Note that the machine calling ``ray up`` will not be considered as part of the Ray cluster, and therefore calling ``ray.init`` on that same machine will not attach to the cluster. From 0f9964c9977aca58fa98139f4e0c81995668c33d Mon Sep 17 00:00:00 2001 From: Artur Niederfahrenhorst Date: Tue, 16 May 2023 01:19:30 +0200 Subject: [PATCH 393/424] [RLlib] Remove some specs from encoders to smoothen dev experience (#34911) Signed-off-by: Artur Niederfahrenhorst --- rllib/algorithms/ppo/ppo_rl_module.py | 7 + rllib/algorithms/ppo/tf/ppo_tf_rl_module.py | 57 +------ .../ppo/torch/ppo_torch_rl_module.py | 44 +----- rllib/core/models/base.py | 149 +++++++++++------- rllib/core/models/tests/test_cnn_encoders.py | 5 +- rllib/core/models/tests/test_mlp_encoders.py | 3 +- rllib/core/models/tf/encoder.py | 47 +++--- rllib/core/models/torch/encoder.py | 40 ++--- 8 files changed, 151 insertions(+), 201 deletions(-) diff --git a/rllib/algorithms/ppo/ppo_rl_module.py b/rllib/algorithms/ppo/ppo_rl_module.py index a7ec91fa3399..cc14de1bf2d4 100644 --- a/rllib/algorithms/ppo/ppo_rl_module.py +++ b/rllib/algorithms/ppo/ppo_rl_module.py @@ -39,6 +39,13 @@ def get_exploration_action_dist_cls(self) -> Type[Distribution]: def get_inference_action_dist_cls(self) -> Type[Distribution]: return self.action_dist_cls + @override(RLModule) + def get_initial_state(self) -> dict: + if hasattr(self.encoder, "get_initial_state"): + return self.encoder.get_initial_state() + else: + return {} + @override(RLModule) def input_specs_inference(self) -> SpecDict: return self.input_specs_exploration() diff --git a/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py b/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py index 9c6416632a69..63b51892f01d 100644 --- a/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py +++ b/rllib/algorithms/ppo/tf/ppo_tf_rl_module.py @@ -1,8 +1,8 @@ from typing import Mapping, Any from ray.rllib.algorithms.ppo.ppo_rl_module import PPORLModule -from ray.rllib.core.models.base import ACTOR, CRITIC, STATE_IN -from ray.rllib.core.models.tf.encoder import ENCODER_OUT +from ray.rllib.core.models.base import ACTOR, CRITIC +from ray.rllib.core.models.tf.encoder import ENCODER_OUT, STATE_OUT from ray.rllib.core.rl_module.rl_module import RLModule from ray.rllib.core.rl_module.tf.tf_rl_module import TfRLModule from ray.rllib.policy.sample_batch import SampleBatch @@ -20,32 +20,13 @@ def __init__(self, *args, **kwargs): TfRLModule.__init__(self, *args, **kwargs) PPORLModule.__init__(self, *args, **kwargs) - # TODO(Artur): Comment in as soon as we support RNNs from Polciy side - # @override(RLModule) - # def get_initial_state(self) -> NestedDict: - # if hasattr(self.encoder, "get_initial_state"): - # return self.encoder.get_initial_state() - # else: - # return NestedDict({}) - @override(RLModule) def _forward_inference(self, batch: NestedDict) -> Mapping[str, Any]: output = {} - # TODO (Artur): Remove this once Policy supports RNN - batch = batch.copy() - if self.encoder.config.shared: - batch[STATE_IN] = None - else: - batch[STATE_IN] = { - ACTOR: None, - CRITIC: None, - } - batch[SampleBatch.SEQ_LENS] = None - encoder_outs = self.encoder(batch) - # TODO (Artur): Un-uncomment once Policy supports RNN - # output[STATE_OUT] = encoder_outs[STATE_OUT] + if STATE_OUT in encoder_outs: + output[STATE_OUT] = encoder_outs[STATE_OUT] # Actions action_logits = self.pi(encoder_outs[ENCODER_OUT][ACTOR]) @@ -63,21 +44,10 @@ def _forward_exploration(self, batch: NestedDict) -> Mapping[str, Any]: """ output = {} - # TODO (Artur): Remove this once Policy supports RNN - batch = batch.copy() - if self.encoder.config.shared: - batch[STATE_IN] = None - else: - batch[STATE_IN] = { - ACTOR: None, - CRITIC: None, - } - batch[SampleBatch.SEQ_LENS] = None - # Shared encoder encoder_outs = self.encoder(batch) - # TODO (Artur): Un-uncomment once Policy supports RNN - # output[STATE_OUT] = encoder_outs[STATE_OUT] + if STATE_OUT in encoder_outs: + output[STATE_OUT] = encoder_outs[STATE_OUT] # Value head vf_out = self.vf(encoder_outs[ENCODER_OUT][CRITIC]) @@ -94,21 +64,10 @@ def _forward_exploration(self, batch: NestedDict) -> Mapping[str, Any]: def _forward_train(self, batch: NestedDict): output = {} - # TODO (Artur): Remove this once Policy supports RNN - batch = batch.copy() - if self.encoder.config.shared: - batch[STATE_IN] = None - else: - batch[STATE_IN] = { - ACTOR: None, - CRITIC: None, - } - batch[SampleBatch.SEQ_LENS] = None - # Shared encoder encoder_outs = self.encoder(batch) - # TODO (Artur): Un-uncomment once Policy supports RNN - # output[STATE_OUT] = encoder_outs[STATE_OUT] + if STATE_OUT in encoder_outs: + output[STATE_OUT] = encoder_outs[STATE_OUT] # Value head vf_out = self.vf(encoder_outs[ENCODER_OUT][CRITIC]) diff --git a/rllib/algorithms/ppo/torch/ppo_torch_rl_module.py b/rllib/algorithms/ppo/torch/ppo_torch_rl_module.py index 58cc3ff70c85..33461f58e3ea 100644 --- a/rllib/algorithms/ppo/torch/ppo_torch_rl_module.py +++ b/rllib/algorithms/ppo/torch/ppo_torch_rl_module.py @@ -2,7 +2,7 @@ from ray.rllib.algorithms.ppo.ppo_rl_module import PPORLModule -from ray.rllib.core.models.base import ACTOR, CRITIC, ENCODER_OUT, STATE_IN +from ray.rllib.core.models.base import ACTOR, CRITIC, ENCODER_OUT, STATE_OUT from ray.rllib.core.rl_module.rl_module import RLModule from ray.rllib.core.rl_module.torch import TorchRLModule from ray.rllib.policy.sample_batch import SampleBatch @@ -24,19 +24,9 @@ def __init__(self, *args, **kwargs): def _forward_inference(self, batch: NestedDict) -> Mapping[str, Any]: output = {} - # TODO (Artur): Remove this once Policy supports RNN - if self.encoder.config.shared: - batch[STATE_IN] = None - else: - batch[STATE_IN] = { - ACTOR: None, - CRITIC: None, - } - batch[SampleBatch.SEQ_LENS] = None - encoder_outs = self.encoder(batch) - # TODO (Artur): Un-uncomment once Policy supports RNN - # output[STATE_OUT] = encoder_outs[STATE_OUT] + if STATE_OUT in encoder_outs: + output[STATE_OUT] = encoder_outs[STATE_OUT] # Actions action_logits = self.pi(encoder_outs[ENCODER_OUT][ACTOR]) @@ -53,20 +43,10 @@ def _forward_exploration(self, batch: NestedDict) -> Mapping[str, Any]: """ output = {} - # TODO (Artur): Remove this once Policy supports RNN - if self.encoder.config.shared: - batch[STATE_IN] = None - else: - batch[STATE_IN] = { - ACTOR: None, - CRITIC: None, - } - batch[SampleBatch.SEQ_LENS] = None - # Shared encoder encoder_outs = self.encoder(batch) - # TODO (Artur): Un-uncomment once Policy supports RNN - # output[STATE_OUT] = encoder_outs[STATE_OUT] + if STATE_OUT in encoder_outs: + output[STATE_OUT] = encoder_outs[STATE_OUT] # Value head vf_out = self.vf(encoder_outs[ENCODER_OUT][CRITIC]) @@ -82,20 +62,10 @@ def _forward_exploration(self, batch: NestedDict) -> Mapping[str, Any]: def _forward_train(self, batch: NestedDict) -> Mapping[str, Any]: output = {} - # TODO (Artur): Remove this once Policy supports RNN - if self.encoder.config.shared: - batch[STATE_IN] = None - else: - batch[STATE_IN] = { - ACTOR: None, - CRITIC: None, - } - batch[SampleBatch.SEQ_LENS] = None - # Shared encoder encoder_outs = self.encoder(batch) - # TODO (Artur): Un-uncomment once Policy supports RNN - # output[STATE_OUT] = encoder_outs[STATE_OUT] + if STATE_OUT in encoder_outs: + output[STATE_OUT] = encoder_outs[STATE_OUT] # Value head vf_out = self.vf(encoder_outs[ENCODER_OUT][CRITIC]) diff --git a/rllib/core/models/base.py b/rllib/core/models/base.py index da8b199e229f..dca2d030deb6 100644 --- a/rllib/core/models/base.py +++ b/rllib/core/models/base.py @@ -2,9 +2,7 @@ from dataclasses import dataclass from typing import List, Optional, Tuple, Union -from ray.rllib.core.models.specs.checker import convert_to_canonical_format from ray.rllib.core.models.specs.specs_base import Spec -from ray.rllib.core.models.specs.specs_dict import SpecDict from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.annotations import ExperimentalAPI from ray.rllib.utils.annotations import override @@ -301,11 +299,11 @@ def build(self, framework: str): @override(Model) def get_input_specs(self) -> Optional[Spec]: - return convert_to_canonical_format([SampleBatch.OBS, STATE_IN]) + return [SampleBatch.OBS] @override(Model) def get_output_specs(self) -> Optional[Spec]: - return convert_to_canonical_format([ENCODER_OUT, STATE_OUT]) + return [] @abc.abstractmethod def _forward(self, input_dict: dict, **kwargs) -> dict: @@ -319,8 +317,8 @@ def _forward(self, input_dict: dict, **kwargs) -> dict: The output dict contains at minimum the latent and the state of the encoder (None for stateless encoders). To establish an agreement between the encoder and RLModules, these values - have the fixed keys `SampleBatch.OBS` and `STATE_IN` for the `input_dict`, - and `STATE_OUT` and `ENCODER_OUT` for the returned dict. + have the fixed keys `SampleBatch.OBS` for the `input_dict`, + and `ACTOR` and `CRITIC` for the returned dict. Args: input_dict: The input tensors. Must contain at a minimum the keys @@ -329,25 +327,25 @@ def _forward(self, input_dict: dict, **kwargs) -> dict: **kwargs: Forward compatibility kwargs. Returns: - The output tensors. Must contain at a minimum the keys ENCODER_OUT and - STATE_OUT (which might be None for stateless encoders). + The output tensors. Must contain at a minimum the key ENCODER_OUT. """ - raise NotImplementedError @ExperimentalAPI class ActorCriticEncoder(Encoder): - """An encoder that potentially holds two encoders. + """An encoder that potentially holds two stateless encoders. - This is a special case of encoder that can either enclose a single, + This is a special case of Encoder that can either enclose a single, shared encoder or two separate encoders: One for the actor and one for the - critic. The two encoders are of the same type and we can therefore make the + critic. The two encoders are of the same type, and we can therefore make the assumption that they have the same input and output specs. """ framework = None def __init__(self, config: ModelConfig) -> None: + super().__init__(config) + if config.shared: self.encoder = config.base_encoder_config.build(framework=self.framework) else: @@ -358,47 +356,75 @@ def __init__(self, config: ModelConfig) -> None: framework=self.framework ) - # We need to call Encoder.__init__() after initializing the encoder(s) in - # order to build on their specs. - super().__init__(config) - @override(Model) def get_input_specs(self) -> Optional[Spec]: - # if self.config.shared: - # state_in_spec = self.encoder.input_specs[STATE_IN] - # else: - # state_in_spec = { - # ACTOR: self.actor_encoder.input_specs[STATE_IN], - # CRITIC: self.critic_encoder.input_specs[STATE_IN], - # } - - return SpecDict( - { - SampleBatch.OBS: None, - # STATE_IN: state_in_spec, - # SampleBatch.SEQ_LENS: None, - } - ) + return [SampleBatch.OBS] @override(Model) def get_output_specs(self) -> Optional[Spec]: + return [(ENCODER_OUT, ACTOR), (ENCODER_OUT, CRITIC)] + + @override(Model) + def _forward(self, inputs: dict, **kwargs) -> dict: if self.config.shared: - state_out_spec = self.encoder.output_specs[STATE_OUT] - else: - state_out_spec = { - ACTOR: self.actor_encoder.output_specs[STATE_OUT], - CRITIC: self.critic_encoder.output_specs[STATE_OUT], + encoder_outs = self.encoder(inputs, **kwargs) + return { + ENCODER_OUT: { + ACTOR: encoder_outs[ENCODER_OUT], + CRITIC: encoder_outs[ENCODER_OUT], + } } + else: + # Encoders should not modify inputs, so we can pass the same inputs + actor_out = self.actor_encoder(inputs, **kwargs) + critic_out = self.critic_encoder(inputs, **kwargs) - return SpecDict( - { + return { ENCODER_OUT: { - ACTOR: None, - CRITIC: None, - }, - STATE_OUT: state_out_spec, + ACTOR: actor_out[ENCODER_OUT], + CRITIC: critic_out[ENCODER_OUT], + } } - ) + + +@ExperimentalAPI +class StatefulActorCriticEncoder(Encoder): + """An encoder that potentially holds two potentially stateful encoders. + + This is a special case of Encoder that can either enclose a single, + shared encoder or two separate encoders: One for the actor and one for the + critic. The two encoders are of the same type, and we can therefore make the + assumption that they have the same input and output specs. + + If this encoder wraps a single encoder, state in input- and output dicts + is simply stored under the key `STATE_IN` and `STATE_OUT`, respectively. + If this encoder wraps two encoders, state in input- and output dicts is + stored under the keys `(STATE_IN, ACTOR)` and `(STATE_IN, CRITIC)` and + `(STATE_OUT, ACTOR)` and `(STATE_OUT, CRITIC)`, respectively. + """ + + framework = None + + def __init__(self, config: ModelConfig) -> None: + super().__init__(config) + + if config.shared: + self.encoder = config.base_encoder_config.build(framework=self.framework) + else: + self.actor_encoder = config.base_encoder_config.build( + framework=self.framework + ) + self.critic_encoder = config.base_encoder_config.build( + framework=self.framework + ) + + @override(Model) + def get_input_specs(self) -> Optional[Spec]: + return [SampleBatch.OBS, STATE_IN] + + @override(Model) + def get_output_specs(self) -> Optional[Spec]: + return [(ENCODER_OUT, ACTOR), (ENCODER_OUT, CRITIC), (STATE_OUT,)] @override(Model) def get_initial_state(self): @@ -412,25 +438,32 @@ def get_initial_state(self): @override(Model) def _forward(self, inputs: dict, **kwargs) -> dict: + outputs = {} + if self.config.shared: outs = self.encoder(inputs, **kwargs) - return { - ENCODER_OUT: {ACTOR: outs[ENCODER_OUT], CRITIC: outs[ENCODER_OUT]}, - STATE_OUT: outs[STATE_OUT], - } + encoder_out = outs.pop(ENCODER_OUT) + outputs[ENCODER_OUT] = {ACTOR: encoder_out, CRITIC: encoder_out} + outputs[STATE_OUT] = outs[STATE_OUT] else: - actor_inputs = inputs # , **{STATE_IN: inputs[STATE_IN][ACTOR]}}) - critic_inputs = inputs # , **{STATE_IN: inputs[STATE_IN][CRITIC]}} + # Shallow copy inputs so that we can add states without modifying + # original dict. + actor_inputs = inputs.copy() + critic_inputs = inputs.copy() + actor_inputs[STATE_IN] = inputs[STATE_IN][ACTOR] + critic_inputs[STATE_IN] = inputs[STATE_IN][CRITIC] actor_out = self.actor_encoder(actor_inputs, **kwargs) critic_out = self.critic_encoder(critic_inputs, **kwargs) - return { - ENCODER_OUT: { - ACTOR: actor_out[ENCODER_OUT], - CRITIC: critic_out[ENCODER_OUT], - }, - STATE_OUT: { - ACTOR: actor_out[STATE_OUT], - CRITIC: critic_out[STATE_OUT], - }, + + outputs[ENCODER_OUT] = { + ACTOR: actor_out[ENCODER_OUT], + CRITIC: critic_out[ENCODER_OUT], } + + outputs[STATE_OUT] = { + ACTOR: actor_out[STATE_OUT], + CRITIC: critic_out[STATE_OUT], + } + + return outputs diff --git a/rllib/core/models/tests/test_cnn_encoders.py b/rllib/core/models/tests/test_cnn_encoders.py index fabff92ecfd2..3276de3ec51d 100644 --- a/rllib/core/models/tests/test_cnn_encoders.py +++ b/rllib/core/models/tests/test_cnn_encoders.py @@ -1,7 +1,7 @@ import itertools import unittest -from ray.rllib.core.models.base import ENCODER_OUT, STATE_OUT +from ray.rllib.core.models.base import ENCODER_OUT from ray.rllib.core.models.configs import CNNEncoderConfig from ray.rllib.models.utils import get_filter_config from ray.rllib.utils.framework import try_import_tf, try_import_torch @@ -15,7 +15,7 @@ class TestCNNEncoders(unittest.TestCase): def test_cnn_encoders(self): """Tests building CNN encoders properly and checks for correct architecture.""" - # Loop through different combinations of hyperparameters. + # Loop through permutations of hyperparameters. inputs_dimss = [ [480, 640, 3], [480, 640, 1], @@ -85,7 +85,6 @@ def test_cnn_encoders(self): # Add this framework version of the model to our checker. outputs = model_checker.add(framework=fw) self.assertEqual(outputs[ENCODER_OUT].shape, (1, output_dims[0])) - self.assertEqual(outputs[STATE_OUT], None) # Check all added models against each other. model_checker.check() diff --git a/rllib/core/models/tests/test_mlp_encoders.py b/rllib/core/models/tests/test_mlp_encoders.py index 26fd1430a56d..41a3207aa0bb 100644 --- a/rllib/core/models/tests/test_mlp_encoders.py +++ b/rllib/core/models/tests/test_mlp_encoders.py @@ -2,7 +2,7 @@ import unittest from ray.rllib.core.models.configs import MLPEncoderConfig -from ray.rllib.core.models.base import STATE_OUT, ENCODER_OUT +from ray.rllib.core.models.base import ENCODER_OUT from ray.rllib.utils.framework import try_import_tf, try_import_torch from ray.rllib.utils.test_utils import framework_iterator, ModelChecker @@ -71,7 +71,6 @@ def test_mlp_encoders(self): # Add this framework version of the model to our checker. outputs = model_checker.add(framework=fw) self.assertEqual(outputs[ENCODER_OUT].shape, (1, output_dims[0])) - self.assertEqual(outputs[STATE_OUT], None) # Check all added models against each other. model_checker.check() diff --git a/rllib/core/models/tf/encoder.py b/rllib/core/models/tf/encoder.py index 51ce70eb321b..232b59c2acb0 100644 --- a/rllib/core/models/tf/encoder.py +++ b/rllib/core/models/tf/encoder.py @@ -85,8 +85,6 @@ def get_input_specs(self) -> Optional[Spec]: c=self.config.input_dims[2], framework="tf2", ), - STATE_IN: None, - SampleBatch.SEQ_LENS: None, } ) @@ -97,18 +95,12 @@ def get_output_specs(self) -> Optional[Spec]: ENCODER_OUT: TensorSpec( "b, d", d=self.config.output_dims[0], framework="tf2" ), - STATE_OUT: None, } ) @override(Model) - def _forward(self, inputs: NestedDict, **kwargs) -> NestedDict: - return NestedDict( - { - ENCODER_OUT: self.net(inputs[SampleBatch.OBS]), - STATE_OUT: inputs[STATE_IN], - } - ) + def _forward(self, inputs: dict, **kwargs) -> dict: + return {ENCODER_OUT: self.net(inputs[SampleBatch.OBS])} class TfMLPEncoder(Encoder, TfModel): @@ -134,8 +126,6 @@ def get_input_specs(self) -> Optional[Spec]: SampleBatch.OBS: TensorSpec( "b, d", d=self.config.input_dims[0], framework="tf2" ), - STATE_IN: None, - SampleBatch.SEQ_LENS: None, } ) @@ -146,18 +136,12 @@ def get_output_specs(self) -> Optional[Spec]: ENCODER_OUT: TensorSpec( "b, d", d=self.config.output_dims[0], framework="tf2" ), - STATE_OUT: None, } ) @override(Model) def _forward(self, inputs: NestedDict, **kwargs) -> NestedDict: - return NestedDict( - { - ENCODER_OUT: self.net(inputs[SampleBatch.OBS]), - STATE_OUT: inputs[STATE_IN], - } - ) + return {ENCODER_OUT: self.net(inputs[SampleBatch.OBS])} class TfGRUEncoder(TfModel, Encoder): @@ -230,6 +214,9 @@ def get_initial_state(self): @override(Model) def _forward(self, inputs: NestedDict, **kwargs) -> NestedDict: + outputs = {} + + # Calculate the output and state of the GRU. out = tf.cast(inputs[SampleBatch.OBS], tf.float32) # States are batch-first when coming in. Make them layers-first. @@ -245,11 +232,10 @@ def _forward(self, inputs: NestedDict, **kwargs) -> NestedDict: out = self.linear(out) - return { - ENCODER_OUT: out, - # Make state_out batch-first. - STATE_OUT: {"h": tf.stack(states_out, 1)}, - } + # Insert them into the output dict. + outputs[ENCODER_OUT] = out + outputs[STATE_OUT] = {"h": tf.stack(states_out, 1)} + return outputs class TfLSTMEncoder(TfModel, Encoder): @@ -335,6 +321,9 @@ def get_initial_state(self): @override(Model) def _forward(self, inputs: NestedDict, **kwargs) -> NestedDict: + outputs = {} + + # Calculate the output and state of the LSTM. out = tf.cast(inputs[SampleBatch.OBS], tf.float32) # States are batch-first when coming in. Make them layers-first. @@ -352,8 +341,10 @@ def _forward(self, inputs: NestedDict, **kwargs) -> NestedDict: out = self.linear(out) - return { - ENCODER_OUT: out, - # Make state_out batch-first. - STATE_OUT: {"h": tf.stack(states_out_h, 1), "c": tf.stack(states_out_c, 1)}, + # Insert them into the output dict. + outputs[ENCODER_OUT] = out + outputs[STATE_OUT] = { + "h": tf.stack(states_out_h, 1), + "c": tf.stack(states_out_c, 1), } + return outputs diff --git a/rllib/core/models/torch/encoder.py b/rllib/core/models/torch/encoder.py index 05ff2785cd80..61aa6fb3d51e 100644 --- a/rllib/core/models/torch/encoder.py +++ b/rllib/core/models/torch/encoder.py @@ -62,8 +62,6 @@ def get_input_specs(self) -> Optional[Spec]: SampleBatch.OBS: TensorSpec( "b, d", d=self.config.input_dims[0], framework="torch" ), - STATE_IN: None, - SampleBatch.SEQ_LENS: None, } ) @@ -74,16 +72,12 @@ def get_output_specs(self) -> Optional[Spec]: ENCODER_OUT: TensorSpec( "b, d", d=self.config.output_dims[0], framework="torch" ), - STATE_OUT: None, } ) @override(Model) def _forward(self, inputs: dict, **kwargs) -> dict: - return { - ENCODER_OUT: self.net(inputs[SampleBatch.OBS]), - STATE_OUT: inputs[STATE_IN], - } + return {ENCODER_OUT: self.net(inputs[SampleBatch.OBS])} class TorchCNNEncoder(TorchModel, Encoder): @@ -133,8 +127,6 @@ def get_input_specs(self) -> Optional[Spec]: c=self.config.input_dims[2], framework="torch", ), - STATE_IN: None, - SampleBatch.SEQ_LENS: None, } ) @@ -145,16 +137,12 @@ def get_output_specs(self) -> Optional[Spec]: ENCODER_OUT: TensorSpec( "b, d", d=self.config.output_dims[0], framework="torch" ), - STATE_OUT: None, } ) @override(Model) def _forward(self, inputs: dict, **kwargs) -> dict: - return { - ENCODER_OUT: self.net(inputs[SampleBatch.OBS]), - STATE_OUT: inputs[STATE_IN], - } + return {ENCODER_OUT: self.net(inputs[SampleBatch.OBS])} class TorchGRUEncoder(TorchModel, Encoder): @@ -225,6 +213,9 @@ def get_initial_state(self): @override(Model) def _forward(self, inputs: dict, **kwargs) -> dict: + outputs = {} + + # Calculate the output and state of the GRU. out = inputs[SampleBatch.OBS].float() # States are batch-first when coming in. Make them layers-first. @@ -235,11 +226,10 @@ def _forward(self, inputs: dict, **kwargs) -> dict: out = self.linear(out) - return { - ENCODER_OUT: out, - # Make states layer-first again. - STATE_OUT: tree.map_structure(lambda s: s.transpose(0, 1), states_out), - } + # Insert them into the output dict. + outputs[ENCODER_OUT] = out + outputs[STATE_OUT] = tree.map_structure(lambda s: s.transpose(0, 1), states_out) + return outputs class TorchLSTMEncoder(TorchModel, Encoder): @@ -322,6 +312,9 @@ def get_initial_state(self): @override(Model) def _forward(self, inputs: dict, **kwargs) -> dict: + outputs = {} + + # Calculate the output and state of the LSTM cell. out = inputs[SampleBatch.OBS].float() # States are batch-first when coming in. Make them layers-first. @@ -332,8 +325,7 @@ def _forward(self, inputs: dict, **kwargs) -> dict: out = self.linear(out) - return { - ENCODER_OUT: out, - # Make states layer-first again. - STATE_OUT: tree.map_structure(lambda s: s.transpose(0, 1), states_out), - } + # Insert them into the output dict. + outputs[ENCODER_OUT] = out + outputs[STATE_OUT] = tree.map_structure(lambda s: s.transpose(0, 1), states_out) + return outputs From 3e5111e061e4bb94f26f9436ed805480dcd27b96 Mon Sep 17 00:00:00 2001 From: Antoni Baum Date: Mon, 15 May 2023 16:50:45 -0700 Subject: [PATCH 394/424] [Train] Don't repartition if xgboost-ray>=0.1.16 (#32960) * [Train] Don't repartition if xgboost-ray>=0.1.14 Repartitioning is not necessary anymore with xgboost-ray>=0.1.16. --------- Signed-off-by: Antoni Baum --- python/ray/train/gbdt_trainer.py | 8 ++++---- python/ray/train/lightgbm/lightgbm_trainer.py | 15 +++++++++++++++ python/ray/train/xgboost/xgboost_trainer.py | 14 ++++++++++++++ 3 files changed, 33 insertions(+), 4 deletions(-) diff --git a/python/ray/train/gbdt_trainer.py b/python/ray/train/gbdt_trainer.py index 30d37230f236..114020682cd0 100644 --- a/python/ray/train/gbdt_trainer.py +++ b/python/ray/train/gbdt_trainer.py @@ -226,12 +226,12 @@ def _ray_params(self) -> "xgboost_ray.RayParams": scaling_config_dataclass, self._ray_params_cls, self._default_ray_params ) - def preprocess_datasets(self) -> None: - super().preprocess_datasets() - + def _repartition_datasets_to_match_num_actors(self): # XGBoost/LightGBM-Ray requires each dataset to have at least as many # blocks as there are workers. - # TODO: Move this logic to the respective libraries + # This is only applicable for xgboost-ray<0.1.16. The version check + # is done in subclasses to ensure that xgboost-ray doesn't need to be + # imported here. for dataset_key, dataset in self.datasets.items(): if dataset.num_blocks() < self._ray_params.num_actors: if dataset.size_bytes() > _WARN_REPARTITION_THRESHOLD: diff --git a/python/ray/train/lightgbm/lightgbm_trainer.py b/python/ray/train/lightgbm/lightgbm_trainer.py index 15d600382d5b..9735bedb1d9d 100644 --- a/python/ray/train/lightgbm/lightgbm_trainer.py +++ b/python/ray/train/lightgbm/lightgbm_trainer.py @@ -1,5 +1,10 @@ from typing import Dict, Any, Optional, Tuple, TYPE_CHECKING +try: + from packaging.version import Version +except ImportError: + from distutils.version import LooseVersion as Version + from ray.air.checkpoint import Checkpoint from ray.train.gbdt_trainer import GBDTTrainer from ray.util.annotations import PublicAPI @@ -7,6 +12,7 @@ import lightgbm import lightgbm_ray +import xgboost_ray from lightgbm_ray.tune import TuneReportCheckpointCallback, TuneReportCallback if TYPE_CHECKING: @@ -102,3 +108,12 @@ def _save_model(self, model: lightgbm.LGBMModel, path: str): def _model_iteration(self, model: lightgbm.LGBMModel) -> int: return model.booster_.current_iteration() + + def preprocess_datasets(self) -> None: + super().preprocess_datasets() + + # XGBoost/LightGBM-Ray requires each dataset to have at least as many + # blocks as there are workers. + # This is only applicable for xgboost-ray<0.1.16 + if Version(xgboost_ray.__version__) < Version("0.1.16"): + self._repartition_datasets_to_match_num_actors() diff --git a/python/ray/train/xgboost/xgboost_trainer.py b/python/ray/train/xgboost/xgboost_trainer.py index ea4262deb3a0..5cc92a86c079 100644 --- a/python/ray/train/xgboost/xgboost_trainer.py +++ b/python/ray/train/xgboost/xgboost_trainer.py @@ -1,5 +1,10 @@ from typing import Any, Dict, Optional, Tuple, TYPE_CHECKING +try: + from packaging.version import Version +except ImportError: + from distutils.version import LooseVersion as Version + from ray.air.checkpoint import Checkpoint from ray.train.gbdt_trainer import GBDTTrainer from ray.train.xgboost.xgboost_checkpoint import XGBoostCheckpoint @@ -97,3 +102,12 @@ def _model_iteration(self, model: xgboost.Booster) -> int: # Compatibility with XGBoost < 1.4 return len(model.get_dump()) return model.num_boosted_rounds() + + def preprocess_datasets(self) -> None: + super().preprocess_datasets() + + # XGBoost/LightGBM-Ray requires each dataset to have at least as many + # blocks as there are workers. + # This is only applicable for xgboost-ray<0.1.16 + if Version(xgboost_ray.__version__) < Version("0.1.16"): + self._repartition_datasets_to_match_num_actors() From 4bc18501674b006278af84544efc16bf1460dfda Mon Sep 17 00:00:00 2001 From: Sihan Wang Date: Mon, 15 May 2023 16:56:32 -0700 Subject: [PATCH 395/424] [Serve] Multiplex API Impl (#35326) Adds @serve.multiplexed and @serve.get_multiplexed_model_id implementation. --- python/ray/serve/_private/utils.py | 36 +++++- python/ray/serve/api.py | 80 +++++++++++- python/ray/serve/batching.py | 27 +--- python/ray/serve/context.py | 1 + python/ray/serve/multiplex.py | 96 ++++++++++++++ python/ray/serve/tests/test_multiplex.py | 153 +++++++++++++++++++++-- 6 files changed, 356 insertions(+), 37 deletions(-) create mode 100644 python/ray/serve/multiplex.py diff --git a/python/ray/serve/_private/utils.py b/python/ray/serve/_private/utils.py index 456f57e8ae28..c6d4a6797092 100644 --- a/python/ray/serve/_private/utils.py +++ b/python/ray/serve/_private/utils.py @@ -9,7 +9,17 @@ import traceback from enum import Enum from functools import wraps -from typing import Dict, Iterable, List, Tuple, TypeVar, Union +from typing import ( + Any, + Callable, + Dict, + Iterable, + List, + Tuple, + TypeVar, + Union, + Optional, +) import fastapi.encoders import numpy as np @@ -535,3 +545,27 @@ def record_serve_tag(key: str, value: str): ) record_extra_usage_tag(serve_telemetry_tag_map[key], value) + + +def extract_self_if_method_call(args: List[Any], func: Callable) -> Optional[object]: + """Check if this is a method rather than a function. + + Does this by checking to see if `func` is the attribute of the first + (`self`) argument under `func.__name__`. Unfortunately, this is the most + robust solution to this I was able to find. It would also be preferable + to do this check when the decorator runs, rather than when the method is. + + Returns the `self` object if it's a method call, else None. + + Arguments: + args: arguments to the function/method call. + func: the unbound function that was called. + """ + if len(args) > 0: + method = getattr(args[0], func.__name__, False) + if method: + wrapped = getattr(method, "__wrapped__", False) + if wrapped and wrapped == func: + return args[0] + + return None diff --git a/python/ray/serve/api.py b/python/ray/serve/api.py index 737a62b26b75..c86d8b749f36 100644 --- a/python/ray/serve/api.py +++ b/python/ray/serve/api.py @@ -2,6 +2,7 @@ import inspect import logging from typing import Any, Callable, Dict, Optional, Tuple, Union +from functools import wraps from fastapi import APIRouter, FastAPI from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag @@ -9,6 +10,7 @@ from uvicorn.config import Config from uvicorn.lifespan.on import LifespanOn +import ray from ray import cloudpickle from ray.dag import DAGNode from ray.util.annotations import Deprecated, PublicAPI @@ -29,6 +31,7 @@ _set_global_client, ) from ray.serve.deployment import Application, Deployment +from ray.serve.multiplex import _ModelMultiplexWrapper from ray.serve._private.deployment_graph_build import build as pipeline_build from ray.serve._private.deployment_graph_build import ( get_and_validate_ingress_deployment, @@ -45,6 +48,7 @@ install_serve_encoders_to_fastapi, guarded_deprecation_warning, record_serve_tag, + extract_self_if_method_call, ) from ray.serve._private import api as _private_api @@ -638,7 +642,78 @@ async def __call__(self, request): number if you want to save memory on the node resource. """ - raise NotImplementedError("Multiplexed deployment is not supported yet.") + if func is not None: + if not callable(func): + raise TypeError( + "The `multiplexed` decorator must be used with a function or method." + ) + + # TODO(Sihan): Make the API accept the sync function as well. + # https://github.com/ray-project/ray/issues/35356 + if not inspect.iscoroutinefunction(func): + raise TypeError( + "@serve.multiplexed can only be used to decorate async " + "functions or methods." + ) + signature = inspect.signature(func) + if len(signature.parameters) == 0 or len(signature.parameters) > 2: + raise TypeError( + "@serve.multiplexed can only be used to decorate functions or methods " + "with at least one 'model_id: str' argument." + ) + + if type(max_num_models_per_replica) is not int: + raise TypeError("max_num_models_per_replica must be an integer.") + + if max_num_models_per_replica != -1 and max_num_models_per_replica <= 0: + raise ValueError("max_num_models_per_replica must be positive.") + + def _multiplex_decorator(func: Callable): + @wraps(func) + async def _multiplex_wrapper(*args): + args_check_error_msg = ( + "Functions decorated with `@serve.multiplexed` must take exactly one" + "the multiplexed model ID (str), but got {}" + ) + if not args: + raise TypeError( + args_check_error_msg.format("no arguments are provided.") + ) + self = extract_self_if_method_call(args, func) + + # User defined multiplexed function can be a standalone function or a + # method of a class. If it is a method of a class, the first argument + # is self. + if self is None: + if len(args) != 1: + raise TypeError( + args_check_error_msg.format("more than one arguments.") + ) + multiplex_object = func + model_id = args[0] + else: + # count self as an argument + if len(args) != 2: + raise TypeError( + args_check_error_msg.format("more than one arguments.") + ) + multiplex_object = self + model_id = args[1] + multiplex_attr = f"__serve_multiplex_{func.__name__}" + # If the multiplexed function is called for the first time, + # create a model multiplex wrapper and cache it in the multiplex object. + if not hasattr(multiplex_object, multiplex_attr): + model_multiplex_wrapper = _ModelMultiplexWrapper( + func, self, max_num_models_per_replica + ) + setattr(multiplex_object, multiplex_attr, model_multiplex_wrapper) + else: + model_multiplex_wrapper = getattr(multiplex_object, multiplex_attr) + return await model_multiplex_wrapper.load_model(model_id) + + return _multiplex_wrapper + + return _multiplex_decorator(func) if callable(func) else _multiplex_decorator @PublicAPI(stability="alpha") @@ -667,4 +742,5 @@ def get_multiplexed_model_id() -> str: def my_deployment_function(request): assert serve.get_multiplexed_model_id() == "model_1" """ - raise NotImplementedError("get_multiplexed_model_id API is not supported yet.") + _request_context = ray.serve.context._serve_request_context.get() + return _request_context.multiplexed_model_id diff --git a/python/ray/serve/batching.py b/python/ray/serve/batching.py index a44f918bc079..219e954a6831 100644 --- a/python/ray/serve/batching.py +++ b/python/ray/serve/batching.py @@ -9,6 +9,7 @@ from ray._private.signature import extract_signature, flatten_args, recover_args from ray._private.utils import get_or_create_event_loop from ray.serve.exceptions import RayServeException +from ray.serve._private.utils import extract_self_if_method_call from ray.util.annotations import PublicAPI @@ -171,30 +172,6 @@ def __del__(self): self._handle_batch_task.cancel() -def _extract_self_if_method_call(args: List[Any], func: Callable) -> Optional[object]: - """Check if this is a method rather than a function. - - Does this by checking to see if `func` is the attribute of the first - (`self`) argument under `func.__name__`. Unfortunately, this is the most - robust solution to this I was able to find. It would also be preferable - to do this check when the decorator runs, rather than when the method is. - - Returns the `self` object if it's a method call, else None. - - Arguments: - args (List[Any]): arguments to the function/method call. - func: the unbound function that was called. - """ - if len(args) > 0: - method = getattr(args[0], func.__name__, False) - if method: - wrapped = getattr(method, "__wrapped__", False) - if wrapped and wrapped == func: - return args[0] - - return None - - T = TypeVar("T") R = TypeVar("R") F = TypeVar("F", bound=Callable[[List[T]], List[R]]) @@ -289,7 +266,7 @@ async def __call__(self, request: Request): def _batch_decorator(_func): @wraps(_func) async def batch_wrapper(*args, **kwargs): - self = _extract_self_if_method_call(args, _func) + self = extract_self_if_method_call(args, _func) flattened_args: List = flatten_args(extract_signature(_func), args, kwargs) if self is None: diff --git a/python/ray/serve/context.py b/python/ray/serve/context.py index 902025b0adcc..0823ceac2d88 100644 --- a/python/ray/serve/context.py +++ b/python/ray/serve/context.py @@ -149,6 +149,7 @@ class RequestContext: route: str = "" request_id: str = "" app_name: str = "" + multiplexed_model_id: str = "" _serve_request_context = contextvars.ContextVar( diff --git a/python/ray/serve/multiplex.py b/python/ray/serve/multiplex.py new file mode 100644 index 000000000000..0cfed15bf62d --- /dev/null +++ b/python/ray/serve/multiplex.py @@ -0,0 +1,96 @@ +from ray._private.async_compat import sync_to_async +from collections import OrderedDict +from typing import Any, Callable +import logging +from ray.serve._private.constants import SERVE_LOGGER_NAME +import inspect +import asyncio + + +logger = logging.getLogger(SERVE_LOGGER_NAME) + + +class _ModelMultiplexWrapper: + """A wrapper class that wraps the model load function and + provides the LRU caching functionality. + + The model multiplexer is a wrapper class that wraps the model load function + and provides the LRU caching functionality, and the model load function should + be a coroutine function that takes the model ID as the first argument and + returns the user-constructed model object. + The model multiplexer will also ensure that the number of models on the current + replica does not exceed the specified limit. + The model will be unloaded in the LRU order, the model multiplexer will call the + model's __del__ attribute if it exists to clean up the model resources eagerly. + + """ + + def __init__( + self, + model_load_func: Callable[[str], Any], + self_arg: Any, + max_num_models_per_replica: int, + ): + """Initialize the model multiplexer. + Args: + model_load_func: the model load async function. + self_arg: self argument when model_load_func is class method. + max_num_models_per_replica: the maximum number of models to be loaded on the + current replica. If it is -1, there is no limit for the number of models + per replica. + """ + self.models = OrderedDict() + self._func = model_load_func + self.self_arg = self_arg + self.max_num_models_per_replica = max_num_models_per_replica + + async def load_model(self, model_id: str) -> Any: + """Load the model if it is not loaded yet, and return the user-constructed model object. + + Args: + model_id: the model ID. + + Returns: + The user-constructed model object. + """ + + if type(model_id) != str: + raise TypeError("The model ID must be a string.") + + if not model_id: + raise ValueError("The model ID cannot be empty.") + + if model_id in self.models: + # Move the model to the end of the OrderedDict to ensure LRU caching. + model = self.models.pop(model_id) + self.models[model_id] = model + else: + # If the number of models per replica is specified, check if the number of + # models on the current replica has reached the limit. + if ( + self.max_num_models_per_replica > 0 + and len(self.models) >= self.max_num_models_per_replica + ): + # Unload the least recently used model. + await self.unload_model() + # Load the model. + logger.info(f"Loading model '{model_id}'.") + if self.self_arg is None: + self.models[model_id] = await self._func(model_id) + else: + self.models[model_id] = await self._func(self.self_arg, model_id) + return self.models[model_id] + + async def unload_model(self) -> None: + """Unload the least recently used model.""" + model_id, model = self.models.popitem(last=False) + logger.info(f"Unloading model '{model_id}'.") + + # If the model has __del__ attribute, call it. + # This is to clean up the model resources eagerly. + if hasattr(model, "__del__"): + if not inspect.iscoroutinefunction(model.__del__): + await asyncio.get_running_loop().run_in_executor(None, model.__del__) + else: + await sync_to_async(model.__del__)() + setattr(model, "__del__", lambda _: None) diff --git a/python/ray/serve/tests/test_multiplex.py b/python/ray/serve/tests/test_multiplex.py index c1eb44d2c066..c435c1523925 100644 --- a/python/ray/serve/tests/test_multiplex.py +++ b/python/ray/serve/tests/test_multiplex.py @@ -1,25 +1,160 @@ import pytest +import ray from ray import serve +from ray.serve.multiplex import _ModelMultiplexWrapper -def test_multiplexed(): - """Test multiplexed API.""" +class TestMultiplexWrapper: + @pytest.mark.asyncio + async def test_multiplex_wrapper(self): + """Test multiplex wrapper with LRU caching.""" - with pytest.raises(NotImplementedError): + async def model_load_func(model_id: str): + return model_id + + multiplexer = _ModelMultiplexWrapper( + model_load_func, None, max_num_models_per_replica=2 + ) + # Load model1 + await multiplexer.load_model("1") + assert multiplexer.models == {"1": "1"} + # Load model2 + await multiplexer.load_model("2") + assert multiplexer.models == {"1": "1", "2": "2"} + + # Load model3, model1 should be unloaded + await multiplexer.load_model("3") + assert multiplexer.models == {"2": "2", "3": "3"} + + # reload model2, model2 should be moved to the end of the LRU cache + await multiplexer.load_model("2") + assert multiplexer.models == {"3": "3", "2": "2"} + + # Load model4, model3 should be unloaded + await multiplexer.load_model("4") + assert multiplexer.models == {"2": "2", "4": "4"} + + @pytest.mark.asyncio + async def test_bad_call_multiplexed_func(self): + """Test bad call to multiplexed function""" + + async def model_load_func(model_id: str): + return model_id + + multiplexer = _ModelMultiplexWrapper( + model_load_func, None, max_num_models_per_replica=2 + ) + with pytest.raises(TypeError): + await multiplexer.load_model(1) + with pytest.raises(TypeError): + await multiplexer.load_model() + + @pytest.mark.asyncio + async def test_unload_model_call_del(self): + class MyModel: + def __init__(self, model_id): + self.model_id = model_id + + def __del__(self): + raise Exception(f"{self.model_id} is dead") + + def __eq__(self, model): + return model.model_id == self.model_id + + async def model_load_func(model_id: str) -> MyModel: + return MyModel(model_id) + + multiplexer = _ModelMultiplexWrapper( + model_load_func, None, max_num_models_per_replica=1 + ) + await multiplexer.load_model("1") + assert multiplexer.models == {"1": MyModel("1")} + with pytest.raises(Exception, match="1 is dead"): + await multiplexer.load_model("2") + + +class TestBasicAPI: + def test_decorator_validation(self): + @serve.multiplexed + async def get_model(model: str): + return + + @serve.multiplexed(max_num_models_per_replica=1) + async def get_model2(model: str): + return + + @serve.deployment + class MyModel: + @serve.multiplexed + async def get_model(model: str): + return @serve.deployment - class Model: + class MyModel2: + @serve.multiplexed(max_num_models_per_replica=1) + async def get_model(self, model: str): + return + + # multiplex can only be used with func or method. + with pytest.raises(TypeError): + + @serve.deployment + @serve.multiplexed + class BadDecorator: + pass + + # max_num_models_per_replica must be an integer + with pytest.raises(TypeError): + + @serve.multiplexed(max_num_models_per_replica="1") + async def get_model3(model: str): + pass + + # max_num_models_per_replica must be positive + with pytest.raises(ValueError): + + @serve.multiplexed(max_num_models_per_replica=0) + async def get_model4(model: str): + pass + + # multiplexed function must be async def + with pytest.raises(TypeError): + + @serve.multiplexed + def get_model5(model: str): + pass + + with pytest.raises(TypeError): + + @serve.deployment + class MyModel3: + @serve.multiplexed + def get_model(self, model: str): + return + + # no model_id argument in multiplexed function + with pytest.raises(TypeError): + @serve.multiplexed - def get_model(self, model_id: str): + def get_model6(): pass + with pytest.raises(TypeError): -def test_get_multiplexed_model_id(): - """Test get_multiplexed_model_id API.""" + @serve.deployment + class MyModel4: + @serve.multiplexed + def get_model(self): + return - with pytest.raises(NotImplementedError): - serve.get_multiplexed_model_id() + def test_get_multiplexed_model_id(self): + """Test get_multiplexed_model_id() API""" + assert serve.get_multiplexed_model_id() == "" + ray.serve.context._serve_request_context.set( + ray.serve.context.RequestContext(multiplexed_model_id="1") + ) + assert serve.get_multiplexed_model_id() == "1" if __name__ == "__main__": From b8be99830bc5f3dfcc8e62954233bc0fe01555fe Mon Sep 17 00:00:00 2001 From: Chao Wang <125417081+chaowanggg@users.noreply.github.com> Date: Mon, 15 May 2023 17:09:32 -0700 Subject: [PATCH 396/424] [UI] Unify colors of different status for Jobs, Services, Actors (#35138) Unify the colors of different status for Ray entities(Jobs, Actors, Serve....) Change icon for Recent Job with status STOPPED Remove icon for job status in job detail page and job list page In this pull request, we will be removing the jobs status icon from the detail page. The reason is that it's not compatible with the background color, for example, the PENDING status with an orange background and a blue loading icon. We will only keep the blue loading icon for the RUNNING status.cc @alanwguo --- dashboard/client/src/common/JobStatus.tsx | 27 ++++- .../client/src/components/ActorTable.tsx | 40 +++---- .../client/src/components/StatusChip.tsx | 74 +++++------- .../pages/actor/hook/mockedUseActorList.ts | 113 ++++++++++++++++++ .../src/pages/job/hook/mockedUseJobList.ts | 29 +++++ .../src/pages/job/hook/useJobProgress.ts | 52 +++++--- .../pages/state/hook/mockedPlacementGroup.ts | 61 ++++++++++ dashboard/client/src/type/actor.ts | 5 +- .../client/src/type/{job.d.ts => job.ts} | 10 +- 9 files changed, 323 insertions(+), 88 deletions(-) create mode 100644 dashboard/client/src/pages/actor/hook/mockedUseActorList.ts create mode 100644 dashboard/client/src/pages/job/hook/mockedUseJobList.ts create mode 100644 dashboard/client/src/pages/state/hook/mockedPlacementGroup.ts rename dashboard/client/src/type/{job.d.ts => job.ts} (95%) diff --git a/dashboard/client/src/common/JobStatus.tsx b/dashboard/client/src/common/JobStatus.tsx index f096132367b1..09473674e031 100644 --- a/dashboard/client/src/common/JobStatus.tsx +++ b/dashboard/client/src/common/JobStatus.tsx @@ -5,9 +5,10 @@ import { RiCheckboxCircleFill, RiCloseCircleFill, RiLoader4Line, + RiStopCircleFill, } from "react-icons/ri"; import { StatusChip } from "../components/StatusChip"; -import { UnifiedJob } from "../type/job"; +import { JobStatus, UnifiedJob } from "../type/job"; import { ClassNameProps } from "./props"; const useJobRunningIconStyles = makeStyles((theme) => @@ -75,6 +76,9 @@ const useJobStatusIconStyles = makeStyles((theme) => colorError: { color: theme.palette.error.main, }, + colorStopped: { + color: "#757575", + }, }), ); @@ -89,9 +93,8 @@ export const JobStatusIcon = ({ className, }: JobStatusIconProps) => { const classes = useJobStatusIconStyles(); - switch (job.status) { - case "SUCCEEDED": + case JobStatus.SUCCEEDED: return ( ); - case "FAILED": - case "STOPPED": + case JobStatus.FAILED: return ( ); + case JobStatus.STOPPED: + return ( + + ); default: return ; } @@ -133,7 +148,7 @@ export const JobStatusWithIcon = ({ job }: JobStatusWithIconProps) => { } + icon={job.status === JobStatus.RUNNING && } /> ); diff --git a/dashboard/client/src/components/ActorTable.tsx b/dashboard/client/src/components/ActorTable.tsx index 339e223f92cf..885a501b522f 100644 --- a/dashboard/client/src/components/ActorTable.tsx +++ b/dashboard/client/src/components/ActorTable.tsx @@ -15,6 +15,7 @@ import { orange } from "@material-ui/core/colors"; import { SearchOutlined } from "@material-ui/icons"; import Autocomplete from "@material-ui/lab/Autocomplete"; import Pagination from "@material-ui/lab/Pagination"; +import _ from "lodash"; import React, { useContext, useMemo, useState } from "react"; import { GlobalContext } from "../App"; import { DurationText } from "../common/DurationText"; @@ -38,14 +39,22 @@ export type ActorTableProps = { detailPathPrefix?: string; }; +const SEQUENCE = { + FIRST: 1, + MIDDLE: 2, + LAST: 3, +}; + type StateOrder = { [key in ActorEnum]: number; }; + const stateOrder: StateOrder = { - [ActorEnum.ALIVE]: 0, - [ActorEnum.PENDING]: 1, - [ActorEnum.RECONSTRUCTING]: 2, - [ActorEnum.DEAD]: 3, + [ActorEnum.ALIVE]: SEQUENCE.FIRST, + [ActorEnum.DEPENDENCIES_UNREADY]: SEQUENCE.MIDDLE, + [ActorEnum.PENDING_CREATION]: SEQUENCE.MIDDLE, + [ActorEnum.RESTARTING]: SEQUENCE.MIDDLE, + [ActorEnum.DEAD]: SEQUENCE.LAST, }; //type predicate for ActorEnum const isActorEnum = (state: unknown): state is ActorEnum => { @@ -55,26 +64,11 @@ const isActorEnum = (state: unknown): state is ActorEnum => { // We sort the actorsList so that the "Alive" actors appear at first and "Dead" actors appear in the end. export const sortActors = (actorList: Actor[]) => { const sortedActors = [...actorList]; - sortedActors.sort((actor1, actor2) => { - const actorOrder1 = isActorEnum(actor1.state) - ? stateOrder[actor1.state] - : 0; - const actorOrder2 = isActorEnum(actor2.state) - ? stateOrder[actor2.state] - : 0; - - const actorTime1 = actor1.startTime || 0; - const actorTime2 = actor2.startTime || 0; - - if (actorOrder1 !== actorOrder2) { - return actorOrder1 - actorOrder2; - } else { - // When the state is equal, we sort by startTime - // in order to provide a determined order for users no matter the backend API changes - return actorTime1 - actorTime2; - } + return _.sortBy(sortedActors, (actor) => { + const actorOrder = isActorEnum(actor.state) ? stateOrder[actor.state] : 0; + const actorTime = actor.startTime || 0; + return [actorOrder, actorTime]; }); - return sortedActors; }; const ActorTable = ({ diff --git a/dashboard/client/src/components/StatusChip.tsx b/dashboard/client/src/components/StatusChip.tsx index 4b9717e3453a..090bd86a1713 100644 --- a/dashboard/client/src/components/StatusChip.tsx +++ b/dashboard/client/src/components/StatusChip.tsx @@ -1,19 +1,11 @@ import { Color, createStyles, makeStyles } from "@material-ui/core"; -import { - blue, - blueGrey, - cyan, - green, - grey, - lightBlue, - orange, - red, - yellow, -} from "@material-ui/core/colors"; +import { blue, blueGrey, cyan, green, red } from "@material-ui/core/colors"; import { CSSProperties } from "@material-ui/core/styles/withStyles"; import classNames from "classnames"; import React, { ReactNode } from "react"; +import { TaskStatus } from "../pages/job/hook/useJobProgress"; import { ActorEnum } from "../type/actor"; +import { JobStatus } from "../type/job"; import { PlacementGroupState } from "../type/placementGroup"; import { ServeApplicationStatus, @@ -21,7 +13,9 @@ import { ServeHTTPProxyStatus, ServeReplicaState, } from "../type/serve"; -import { TypeTaskStatus } from "../type/task"; + +const orange = "#DB6D00"; +const grey = "#5F6469"; const colorMap = { node: { @@ -30,56 +24,52 @@ const colorMap = { }, worker: { ALIVE: green, + DEAD: red, }, actor: { [ActorEnum.ALIVE]: green, [ActorEnum.DEAD]: red, - [ActorEnum.PENDING]: blue, - [ActorEnum.RECONSTRUCTING]: lightBlue, + [ActorEnum.DEPENDENCIES_UNREADY]: orange, + [ActorEnum.PENDING_CREATION]: orange, + [ActorEnum.RESTARTING]: orange, }, task: { - [TypeTaskStatus.FAILED]: red, - [TypeTaskStatus.FINISHED]: green, - [TypeTaskStatus.RUNNING]: blue, - [TypeTaskStatus.RUNNING_IN_RAY_GET]: blue, - [TypeTaskStatus.RUNNING_IN_RAY_WAIT]: blue, - [TypeTaskStatus.SUBMITTED_TO_WORKER]: "#cfcf08", - [TypeTaskStatus.PENDING_ARGS_FETCH]: blue, - [TypeTaskStatus.PENDING_OBJ_STORE_MEM_AVAIL]: blue, - [TypeTaskStatus.PENDING_NODE_ASSIGNMENT]: "#cfcf08", - [TypeTaskStatus.PENDING_ARGS_AVAIL]: "#f79e02", + [TaskStatus.FAILED]: red, + [TaskStatus.FINISHED]: green, + [TaskStatus.RUNNING]: blue, + [TaskStatus.SUBMITTED_TO_WORKER]: orange, + [TaskStatus.PENDING_NODE_ASSIGNMENT]: orange, + [TaskStatus.PENDING_ARGS_AVAIL]: orange, + [TaskStatus.UNKNOWN]: grey, }, job: { - INIT: grey, - SUBMITTED: "#cfcf08", - DISPATCHED: lightBlue, - RUNNING: blue, - COMPLETED: green, - SUCCEEDED: green, - FINISHED: green, - FAILED: red, + [JobStatus.PENDING]: orange, + [JobStatus.RUNNING]: blue, + [JobStatus.STOPPED]: grey, + [JobStatus.SUCCEEDED]: green, + [JobStatus.FAILED]: red, }, placementGroup: { - [PlacementGroupState.PENDING]: "#f79e02", - [PlacementGroupState.CREATED]: blue, - [PlacementGroupState.REMOVED]: red, - [PlacementGroupState.RESCHEDULING]: "#cfcf08", + [PlacementGroupState.PENDING]: orange, + [PlacementGroupState.CREATED]: green, + [PlacementGroupState.REMOVED]: grey, + [PlacementGroupState.RESCHEDULING]: orange, }, serveApplication: { [ServeApplicationStatus.NOT_STARTED]: grey, - [ServeApplicationStatus.DEPLOYING]: yellow, + [ServeApplicationStatus.DEPLOYING]: orange, [ServeApplicationStatus.RUNNING]: green, [ServeApplicationStatus.DEPLOY_FAILED]: red, - [ServeApplicationStatus.DELETING]: yellow, + [ServeApplicationStatus.DELETING]: orange, }, serveDeployment: { - [ServeDeploymentStatus.UPDATING]: yellow, + [ServeDeploymentStatus.UPDATING]: orange, [ServeDeploymentStatus.HEALTHY]: green, [ServeDeploymentStatus.UNHEALTHY]: red, }, serveReplica: { - [ServeReplicaState.STARTING]: yellow, - [ServeReplicaState.UPDATING]: yellow, + [ServeReplicaState.STARTING]: orange, + [ServeReplicaState.UPDATING]: orange, [ServeReplicaState.RECOVERING]: orange, [ServeReplicaState.RUNNING]: green, [ServeReplicaState.STOPPING]: red, @@ -87,7 +77,7 @@ const colorMap = { serveHttpProxy: { [ServeHTTPProxyStatus.HEALTHY]: green, [ServeHTTPProxyStatus.UNHEALTHY]: red, - [ServeHTTPProxyStatus.STARTING]: yellow, + [ServeHTTPProxyStatus.STARTING]: orange, }, } as { [key: string]: { diff --git a/dashboard/client/src/pages/actor/hook/mockedUseActorList.ts b/dashboard/client/src/pages/actor/hook/mockedUseActorList.ts new file mode 100644 index 000000000000..2063b68fc776 --- /dev/null +++ b/dashboard/client/src/pages/actor/hook/mockedUseActorList.ts @@ -0,0 +1,113 @@ +import { Actor } from "../../../type/actor"; + +const MOCK_ACTORS: { [actorId: string]: Actor } = { + ACTOR_1: { + actorId: "ACTOR_1", + jobId: "01000000", + address: { + rayletId: "426854e68e4225b3941deaf03c8dcfcb1daacc69a92711d370dbb0e1", + ipAddress: "172.31.11.178", + port: 10003, + workerId: "b8b276a03612644098ed7a929c3b0e50f5bde894eb0d8cab288fbb6d", + }, + state: "ALIVE", + numRestarts: "0", + name: "", + pid: 25321, + startTime: 1679010689148, + endTime: 0, + actorClass: "Counter", + exitDetail: "-", + requiredResources: {}, + placementGroupId: "123", + reprName: ",", + }, + ACTOR_2: { + actorId: "ACTOR_2", + jobId: "01000000", + address: { + rayletId: "426854e68e4225b3941deaf03c8dcfcb1daacc69a92711d370dbb0e1", + ipAddress: "172.31.11.178", + port: 10003, + workerId: "b8b276a03612644098ed7a929c3b0e50f5bde894eb0d8cab288fbb6d", + }, + state: "DEAD", + numRestarts: "0", + name: "", + pid: 25322, + startTime: 1679010689150, + endTime: 0, + actorClass: "Counter", + exitDetail: "-", + requiredResources: {}, + placementGroupId: "123", + reprName: ",", + }, + ACTOR_3: { + actorId: "ACTOR_3", + jobId: "01000000", + address: { + rayletId: "426854e68e4225b3941deaf03c8dcfcb1daacc69a92711d370dbb0e1", + ipAddress: "172.31.11.178", + port: 10003, + workerId: "b8b276a03612644098ed7a929c3b0e50f5bde894eb0d8cab288fbb6d", + }, + state: "DEPENDENCIES_UNREADY", + numRestarts: "0", + name: "", + pid: 25323, + startTime: 1679010689152, + endTime: 0, + actorClass: "Counter", + exitDetail: "-", + requiredResources: {}, + placementGroupId: "123", + reprName: ",", + }, + ACTOR_4: { + actorId: "ACTOR_4", + jobId: "01000000", + address: { + rayletId: "426854e68e4225b3941deaf03c8dcfcb1daacc69a92711d370dbb0e1", + ipAddress: "172.31.11.178", + port: 10003, + workerId: "b8b276a03612644098ed7a929c3b0e50f5bde894eb0d8cab288fbb6d", + }, + state: "PENDING_CREATION", + numRestarts: "0", + name: "", + pid: 25324, + startTime: 1679010689154, + endTime: 0, + actorClass: "Counter", + exitDetail: "-", + requiredResources: {}, + placementGroupId: "123", + reprName: ",", + }, + ACTOR_5: { + actorId: "ACTOR_5", + jobId: "01000000", + address: { + rayletId: "426854e68e4225b3941deaf03c8dcfcb1daacc69a92711d370dbb0e1", + ipAddress: "172.31.11.178", + port: 10003, + workerId: "b8b276a03612644098ed7a929c3b0e50f5bde894eb0d8cab288fbb6d", + }, + state: "RESTARTING", + numRestarts: "1", + name: "", + pid: 25325, + startTime: 1679010689156, + endTime: 0, + actorClass: "Counter", + exitDetail: "-", + requiredResources: {}, + placementGroupId: "123", + reprName: ",", + }, +}; + +export const useActorList = (): { [actorId: string]: Actor } => { + return MOCK_ACTORS; +}; diff --git a/dashboard/client/src/pages/job/hook/mockedUseJobList.ts b/dashboard/client/src/pages/job/hook/mockedUseJobList.ts new file mode 100644 index 000000000000..e343379a015d --- /dev/null +++ b/dashboard/client/src/pages/job/hook/mockedUseJobList.ts @@ -0,0 +1,29 @@ +import { JobStatus } from "../../../type/job"; + +export const JOB_LIST = [ + { + job_id: "01000000", + submission_id: "raysubmit_12345", + status: JobStatus.PENDING, + }, + { + job_id: "02000000", + submission_id: null, + status: JobStatus.FAILED, + }, + { + job_id: null, + submission_id: "raysubmit_23456", + status: JobStatus.RUNNING, + }, + { + job_id: "04000000", + submission_id: "raysubmit_34567", + status: JobStatus.STOPPED, + }, + { + job_id: "05000000", + submission_id: "raysubmit_45678", + status: JobStatus.SUCCEEDED, + }, +] as any; diff --git a/dashboard/client/src/pages/job/hook/useJobProgress.ts b/dashboard/client/src/pages/job/hook/useJobProgress.ts index 1a55ba5ff1cc..a3cc6fc01e87 100644 --- a/dashboard/client/src/pages/job/hook/useJobProgress.ts +++ b/dashboard/client/src/pages/job/hook/useJobProgress.ts @@ -15,21 +15,42 @@ import { } from "../../../type/job"; import { TypeTaskStatus } from "../../../type/task"; -const TASK_STATE_NAME_TO_PROGRESS_KEY: Record< - TypeTaskStatus, +export enum TaskStatus { + PENDING_ARGS_AVAIL = "PENDING_ARGS_AVAIL", + PENDING_NODE_ASSIGNMENT = "PENDING_NODE_ASSIGNMENT", + SUBMITTED_TO_WORKER = "SUBMITTED_TO_WORKER", + RUNNING = "RUNNING", + FINISHED = "FINISHED", + FAILED = "FAILED", + UNKNOWN = "UNKNOWN", +} + +const TASK_STATE_NAME_TO_PROGRESS_KEY: Record = { + [TypeTaskStatus.PENDING_ARGS_AVAIL]: TaskStatus.PENDING_ARGS_AVAIL, + [TypeTaskStatus.PENDING_NODE_ASSIGNMENT]: TaskStatus.PENDING_NODE_ASSIGNMENT, + [TypeTaskStatus.PENDING_OBJ_STORE_MEM_AVAIL]: + TaskStatus.PENDING_NODE_ASSIGNMENT, + [TypeTaskStatus.PENDING_ARGS_FETCH]: TaskStatus.PENDING_NODE_ASSIGNMENT, + [TypeTaskStatus.SUBMITTED_TO_WORKER]: TaskStatus.SUBMITTED_TO_WORKER, + [TypeTaskStatus.RUNNING]: TaskStatus.RUNNING, + [TypeTaskStatus.RUNNING_IN_RAY_GET]: TaskStatus.RUNNING, + [TypeTaskStatus.RUNNING_IN_RAY_WAIT]: TaskStatus.RUNNING, + [TypeTaskStatus.FINISHED]: TaskStatus.FINISHED, + [TypeTaskStatus.FAILED]: TaskStatus.FAILED, + [TypeTaskStatus.NIL]: TaskStatus.UNKNOWN, +}; + +export const TaskStatusToTaskProgressMapping: Record< + TaskStatus, keyof TaskProgress > = { - [TypeTaskStatus.PENDING_ARGS_AVAIL]: "numPendingArgsAvail", - [TypeTaskStatus.PENDING_NODE_ASSIGNMENT]: "numPendingNodeAssignment", - [TypeTaskStatus.PENDING_OBJ_STORE_MEM_AVAIL]: "numPendingNodeAssignment", - [TypeTaskStatus.PENDING_ARGS_FETCH]: "numPendingNodeAssignment", - [TypeTaskStatus.SUBMITTED_TO_WORKER]: "numSubmittedToWorker", - [TypeTaskStatus.RUNNING]: "numRunning", - [TypeTaskStatus.RUNNING_IN_RAY_GET]: "numRunning", - [TypeTaskStatus.RUNNING_IN_RAY_WAIT]: "numRunning", - [TypeTaskStatus.FINISHED]: "numFinished", - [TypeTaskStatus.FAILED]: "numFailed", - [TypeTaskStatus.NIL]: "numUnknown", + [TaskStatus.PENDING_ARGS_AVAIL]: "numPendingArgsAvail", + [TaskStatus.PENDING_NODE_ASSIGNMENT]: "numPendingNodeAssignment", + [TaskStatus.SUBMITTED_TO_WORKER]: "numSubmittedToWorker", + [TaskStatus.RUNNING]: "numRunning", + [TaskStatus.FINISHED]: "numFinished", + [TaskStatus.FAILED]: "numFailed", + [TaskStatus.UNKNOWN]: "numUnknown", }; const useFetchStateApiProgressByTaskName = ( @@ -181,8 +202,11 @@ const formatStateCountsToProgress = (stateCounts: { }) => { const formattedProgress: TaskProgress = {}; Object.entries(stateCounts).forEach(([state, count]) => { + const taskStatus: TaskStatus = + TASK_STATE_NAME_TO_PROGRESS_KEY[state as TypeTaskStatus]; + const key: keyof TaskProgress = - TASK_STATE_NAME_TO_PROGRESS_KEY[state as TypeTaskStatus] ?? "numUnknown"; + TaskStatusToTaskProgressMapping[taskStatus] ?? "numUnknown"; formattedProgress[key] = (formattedProgress[key] ?? 0) + count; }); diff --git a/dashboard/client/src/pages/state/hook/mockedPlacementGroup.ts b/dashboard/client/src/pages/state/hook/mockedPlacementGroup.ts new file mode 100644 index 000000000000..68723a806902 --- /dev/null +++ b/dashboard/client/src/pages/state/hook/mockedPlacementGroup.ts @@ -0,0 +1,61 @@ +export const bundles = [ + { + bundle_id: "bundle-1", + node_id: "node-1", + unit_resources: { + cpu: 4, + memory: 8192, + }, + }, + { + bundle_id: "bundle-2", + node_id: null, + unit_resources: { + cpu: 2, + memory: 4096, + }, + }, + { + bundle_id: "bundle-3", + node_id: "node-2", + unit_resources: { + cpu: 8, + memory: 16384, + }, + }, +]; + +export const mockData = [ + { + placement_group_id: "pg-123456789", + name: "MyPlacementGroup", + creator_job_id: "job-987654321", + state: "CREATED", + stats: null, + bundles, + }, + { + placement_group_id: "pg-123456789", + name: "MyPlacementGroup", + creator_job_id: "job-987654321", + state: "REMOVED", + stats: null, + bundles, + }, + { + placement_group_id: "pg-123456789", + name: "MyPlacementGroup", + creator_job_id: "job-987654321", + state: "RESCHEDULING", + stats: null, + bundles, + }, + { + placement_group_id: "pg-123456789", + name: "MyPlacementGroup", + creator_job_id: "job-987654321", + state: "PENDING", + stats: null, + bundles, + }, +]; diff --git a/dashboard/client/src/type/actor.ts b/dashboard/client/src/type/actor.ts index 7ac5274307ee..aedb5e4053df 100644 --- a/dashboard/client/src/type/actor.ts +++ b/dashboard/client/src/type/actor.ts @@ -1,9 +1,10 @@ import { GPUStats } from "./node"; export enum ActorEnum { + DEPENDENCIES_UNREADY = "DEPENDENCIES_UNREADY", + PENDING_CREATION = "PENDING_CREATION", ALIVE = "ALIVE", - PENDING = "PENDING", - RECONSTRUCTING = "RECONSTRUCTING", + RESTARTING = "RESTARTING", DEAD = "DEAD", } diff --git a/dashboard/client/src/type/job.d.ts b/dashboard/client/src/type/job.ts similarity index 95% rename from dashboard/client/src/type/job.d.ts rename to dashboard/client/src/type/job.ts index f8797851e6a8..80f697ac855d 100644 --- a/dashboard/client/src/type/job.d.ts +++ b/dashboard/client/src/type/job.ts @@ -58,11 +58,19 @@ export type JobDetail = { export type JobListRsp = UnifiedJob[]; +export enum JobStatus { + PENDING = "PENDING", + RUNNING = "RUNNING", + STOPPED = "STOPPED", + SUCCEEDED = "SUCCEEDED", + FAILED = "FAILED", +} + export type UnifiedJob = { job_id: string | null; submission_id: string | null; type: string; - status: string; + status: JobStatus; entrypoint: string; message: string | null; error_type: string | null; From 7a63a8ef61033b7daa6654dd4462cd247a538f3e Mon Sep 17 00:00:00 2001 From: Yi Cheng <74173148+iycheng@users.noreply.github.com> Date: Mon, 15 May 2023 17:44:23 -0700 Subject: [PATCH 397/424] [core] Delete disconnected node view in ray syncer when connection is broken. (#35312) The current ray syncer doesn't take care of disconnection very well. If one raylet is disconnected due to some reason, the ray syncer won't clear its local view and will send the dead node info to any newly joined node. This won't introduce any correctness bugs because the raylet will just reject the offending message. And this won't introduce too big performance impact since only the newly added node will receive these mesages. This PR cleaned up its local view table when the node is disconnect. It'll get the new snapshot when it rejoin if the disconnection is due to network. --- src/ray/common/ray_syncer/ray_syncer-inl.h | 3 + src/ray/common/ray_syncer/ray_syncer.cc | 87 +++++++++++----------- src/ray/common/test/ray_syncer_test.cc | 14 ++++ 3 files changed, 59 insertions(+), 45 deletions(-) diff --git a/src/ray/common/ray_syncer/ray_syncer-inl.h b/src/ray/common/ray_syncer/ray_syncer-inl.h index 89e6758f18b0..8ec216a43f1c 100644 --- a/src/ray/common/ray_syncer/ray_syncer-inl.h +++ b/src/ray/common/ray_syncer/ray_syncer-inl.h @@ -63,6 +63,9 @@ class NodeState { return cluster_view_; } + /// Remove a node from the cluster view. + bool RemoveNode(const std::string &node_id); + private: /// For local nodes std::array reporters_ = {nullptr}; diff --git a/src/ray/common/ray_syncer/ray_syncer.cc b/src/ray/common/ray_syncer/ray_syncer.cc index 3f4810da2985..b050fc06aad0 100644 --- a/src/ray/common/ray_syncer/ray_syncer.cc +++ b/src/ray/common/ray_syncer/ray_syncer.cc @@ -54,6 +54,10 @@ std::optional NodeState::CreateSyncMessage(MessageType message_t return message; } +bool NodeState::RemoveNode(const std::string &node_id) { + return cluster_view_.erase(node_id) != 0; +} + bool NodeState::ConsumeSyncMessage(std::shared_ptr message) { auto ¤t = cluster_view_[message->node_id()][message->message_type()]; @@ -173,13 +177,11 @@ RaySyncer::RaySyncer(instrumented_io_context &io_context, RaySyncer::~RaySyncer() { *stopped_ = true; - io_context_.dispatch( - [reactors = sync_reactors_]() { - for (auto [_, reactor] : reactors) { - reactor->Disconnect(); - } - }, - ""); + boost::asio::dispatch(io_context_.get_executor(), [reactors = sync_reactors_]() { + for (auto [_, reactor] : reactors) { + reactor->Disconnect(); + } + }); } std::shared_ptr RaySyncer::GetSyncMessage( @@ -197,23 +199,20 @@ std::shared_ptr RaySyncer::GetSyncMessage( } std::vector RaySyncer::GetAllConnectedNodeIDs() const { - std::promise> promise; - io_context_.dispatch( - [&]() { - std::vector nodes; - for (auto [node_id, _] : sync_reactors_) { - nodes.push_back(node_id); - } - promise.set_value(std::move(nodes)); - }, - ""); - return promise.get_future().get(); + auto task = std::packaged_task()>([&]() { + std::vector nodes; + for (auto [node_id, _] : sync_reactors_) { + nodes.push_back(node_id); + } + return nodes; + }); + return boost::asio::dispatch(io_context_.get_executor(), std::move(task)).get(); } void RaySyncer::Connect(const std::string &node_id, std::shared_ptr channel) { - io_context_.dispatch( - [=]() { + boost::asio::dispatch( + io_context_.get_executor(), std::packaged_task([=]() { auto stub = ray::rpc::syncer::RaySyncer::NewStub(channel); auto reactor = new RayClientBidiReactor( /* remote_node_id */ node_id, @@ -231,19 +230,21 @@ void RaySyncer::Connect(const std::string &node_id, << NodeID::FromBinary(node_id); Connect(node_id, channel); }, - /* delay_duration = */ std::chrono::milliseconds(2000)); + /* delay_microseconds = */ std::chrono::milliseconds(2000)); + } else { + node_state_->RemoveNode(node_id); } }, /* stub */ std::move(stub)); Connect(reactor); reactor->StartCall(); - }, - ""); + })) + .get(); } void RaySyncer::Connect(RaySyncerBidiReactor *reactor) { - io_context_.dispatch( - [this, reactor]() { + boost::asio::dispatch( + io_context_.get_executor(), std::packaged_task([this, reactor]() { RAY_CHECK(sync_reactors_.find(reactor->GetRemoteNodeID()) == sync_reactors_.end()); sync_reactors_[reactor->GetRemoteNodeID()] = reactor; @@ -260,29 +261,24 @@ void RaySyncer::Connect(RaySyncerBidiReactor *reactor) { reactor->PushToSendingQueue(message); } } - }, - "RaySyncerConnect"); + })) + .get(); } void RaySyncer::Disconnect(const std::string &node_id) { - std::promise promise; - io_context_.dispatch( - [&]() { - auto iter = sync_reactors_.find(node_id); - if (iter == sync_reactors_.end()) { - promise.set_value(); - return; - } - - auto reactor = iter->second; - if (iter != sync_reactors_.end()) { - sync_reactors_.erase(iter); - } - reactor->Disconnect(); - promise.set_value(); - }, - "RaySyncerDisconnect"); - promise.get_future().get(); + auto task = std::packaged_task([&]() { + auto iter = sync_reactors_.find(node_id); + if (iter == sync_reactors_.end()) { + return; + } + + auto reactor = iter->second; + if (iter != sync_reactors_.end()) { + sync_reactors_.erase(iter); + } + reactor->Disconnect(); + }); + boost::asio::dispatch(io_context_.get_executor(), std::move(task)).get(); } void RaySyncer::Register(MessageType message_type, @@ -356,6 +352,7 @@ ServerBidiReactor *RaySyncerService::StartSync(grpc::CallbackServerContext *cont // No need to reconnect for server side. RAY_CHECK(!reconnect); syncer_.sync_reactors_.erase(node_id); + syncer_.node_state_->RemoveNode(node_id); }); RAY_LOG(DEBUG) << "Get connection from " << NodeID::FromBinary(reactor->GetRemoteNodeID()) << " to " diff --git a/src/ray/common/test/ray_syncer_test.cc b/src/ray/common/test/ray_syncer_test.cc index 0f9dc4643a36..de5e0617d824 100644 --- a/src/ray/common/test/ray_syncer_test.cc +++ b/src/ray/common/test/ray_syncer_test.cc @@ -611,6 +611,20 @@ TEST_F(SyncerTest, Broadcast) { ASSERT_EQ(nullptr, s1.syncer->GetSyncMessage(NodeID::FromRandom().Binary(), MessageType::RESOURCE_VIEW)); + s1.syncer->Disconnect(s3.syncer->GetLocalNodeID()); + RAY_LOG(INFO) << "s1.id=" << NodeID::FromBinary(s1.syncer->GetLocalNodeID()); + RAY_LOG(INFO) << "s3.id=" << NodeID::FromBinary(s3.syncer->GetLocalNodeID()); + + EXPECT_TRUE(s3.WaitUntil( + [&s3, node_id = s1.syncer->GetLocalNodeID()]() mutable { + return s3.syncer->node_state_->GetClusterView().count(node_id) == 0; + }, + 5)); + EXPECT_TRUE(s1.WaitUntil( + [&s1, node_id = s3.syncer->GetLocalNodeID()]() mutable { + return s1.syncer->node_state_->GetClusterView().count(node_id) == 0; + }, + 5)); } bool CompareViews(const std::vector &servers, From d552c8adb9181134dbb74840f26b840c9d3c91e7 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Mon, 15 May 2023 18:06:25 -0700 Subject: [PATCH 398/424] [doc] [data] Update dataset intro page and fix some typos (#35361) --- doc/source/data/batch_inference.rst | 20 +++++++++---------- doc/source/data/data.rst | 12 ++++++----- .../opt_deepspeed_batch_inference.ipynb | 4 ++-- .../ray-air/examples/upload_to_comet_ml.ipynb | 2 +- 4 files changed, 20 insertions(+), 18 deletions(-) diff --git a/doc/source/data/batch_inference.rst b/doc/source/data/batch_inference.rst index 89de506a3ec7..cc15baf7fcde 100644 --- a/doc/source/data/batch_inference.rst +++ b/doc/source/data/batch_inference.rst @@ -155,7 +155,7 @@ If you're using Ray, the three steps for running batch inference read as follows across the cluster. 2. Define your model in a class and define a transformation that applies your model to your data batches (of format ``Dict[str, np.ndarray]`` by default). -3. Run inference on your data by using the :meth:`ds.map_batches() ` +3. Run inference on your data by using the :meth:`ds.map_batches() ` method from Ray Data. In this step you also define how your batch processing job gets distributed across your cluster. @@ -182,15 +182,15 @@ leveraging common Python libraries like NumPy and Pandas. In fact, we're using the exact same datasets as in the previous section, but load them into Ray data. -The result of this step is a Ray Datastream ``ds`` that we can use to run inference on. +The result of this step is a Dataset ``ds`` that we can use to run inference on. .. tabs:: .. group-tab:: HuggingFace - Create a Pandas DataFrame with text data and convert it to a Ray Datastream - with the :meth:`ray.data.from_pandas() ` method. + Create a Pandas DataFrame with text data and convert it to a Dataset + with the :meth:`ray.data.from_pandas() ` method. .. literalinclude:: ./doc_code/hf_quick_start.py :language: python @@ -200,8 +200,8 @@ The result of this step is a Ray Datastream ``ds`` that we can use to run infere .. group-tab:: PyTorch Create a NumPy array with 100 - entries and convert it to a Ray Datastream with the - :meth:`ray.data.from_numpy() ` method. + entries and convert it to a Dataset with the + :meth:`ray.data.from_numpy() ` method. .. literalinclude:: ./doc_code/pytorch_quick_start.py :language: python @@ -211,8 +211,8 @@ The result of this step is a Ray Datastream ``ds`` that we can use to run infere .. group-tab:: TensorFlow Create a NumPy array with 100 - entries and convert it to a Ray Datastream with the - :meth:`ray.data.from_numpy() ` method. + entries and convert it to a Dataset with the + :meth:`ray.data.from_numpy() ` method. .. literalinclude:: ./doc_code/tf_quick_start.py :language: python @@ -278,7 +278,7 @@ Below you find examples for PyTorch, TensorFlow, and HuggingFace. 3. Getting predictions with Ray Data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Once you have your Ray Dataset ``ds`` and your predictor class, you can use +Once you have your Dataset ``ds`` and your predictor class, you can use :meth:`ds.map_batches() ` to get predictions. ``map_batches`` takes your predictor class as an argument and allows you to specify ``compute`` resources by defining the :class:`ActorPoolStrategy `. @@ -526,7 +526,7 @@ which defines how many workers to use for inference. <2> Each actor should use one GPU. -To summarize, mapping a function over batches is the simplest transform for Ray Datasets. +To summarize, mapping a function over batches is the simplest transform for Datasets. The function defines the logic for transforming individual batches of data of the dataset Performing operations over batches of data is more performant than single element operations as it can leverage the underlying vectorization capabilities of Pandas or NumPy. diff --git a/doc/source/data/data.rst b/doc/source/data/data.rst index a9d785755a4f..e93cdaae90aa 100644 --- a/doc/source/data/data.rst +++ b/doc/source/data/data.rst @@ -8,9 +8,11 @@ Ray Data: Scalable Datasets for ML .. _data-intro: -Ray Data is the standard way to load and exchange data in Ray libraries and applications. -It provides streaming distributed transformations such as maps -(:meth:`map_batches `), +Ray Data scales common ML data processing patterns that arise in batch inference +and distributed training applications. These problems occur when it becomes necessary to +combine data preprocessing and model computations in the same job. Ray Data does this by providing +streaming distributed transformations +such as maps (:meth:`map_batches `), global and grouped aggregations (:class:`GroupedData `), and shuffling operations (:meth:`random_shuffle `, :meth:`sort `, @@ -29,9 +31,9 @@ Streaming Batch Inference ------------------------- Ray Data simplifies general purpose parallel GPU and CPU compute in Ray through its -powerful :ref:`Datastream ` primitive. Datastreams enable workloads such as +powerful streaming :ref:`Dataset ` primitive. Datasets enable workloads such as :doc:`GPU batch inference ` to run efficiently on large datasets, -maximizing resource utilization by keeping the working data fitting into Ray object store memory. +maximizing resource utilization by streaming the working data through Ray object store memory. .. image:: images/stream-example.png :width: 650px diff --git a/doc/source/ray-air/examples/opt_deepspeed_batch_inference.ipynb b/doc/source/ray-air/examples/opt_deepspeed_batch_inference.ipynb index 8c936c25f858..e5945910fe0c 100644 --- a/doc/source/ray-air/examples/opt_deepspeed_batch_inference.ipynb +++ b/doc/source/ray-air/examples/opt_deepspeed_batch_inference.ipynb @@ -6,7 +6,7 @@ "id": "dfdf1047", "metadata": {}, "source": [ - "# Batch Inference with OPT 30B and Ray Dataset\n", + "# Batch Inference with OPT 30B and Ray Data\n", "\n", "This notebook was tested on a single p3.16xlarge instance with 8 V100 GPUs.\n", "\n", @@ -573,7 +573,7 @@ "id": "ca57e150", "metadata": {}, "source": [ - "## Create a Ray Dataset Pipeline\n", + "## Create a Dataset Pipeline\n", "\n", "Finally, we connect all these pieces together, and use a BatchPredictor to run multiple copies of the DeepSpeedPredictor actors.\n", "\n", diff --git a/doc/source/ray-air/examples/upload_to_comet_ml.ipynb b/doc/source/ray-air/examples/upload_to_comet_ml.ipynb index d6ef4def7430..04c812c61a1d 100644 --- a/doc/source/ray-air/examples/upload_to_comet_ml.ipynb +++ b/doc/source/ray-air/examples/upload_to_comet_ml.ipynb @@ -171,7 +171,7 @@ "COMET WARNING: Failed to add tag(s) None to the experiment\n", "\n", "COMET WARNING: Empty mapping given to log_params({}); ignoring\n", - "\u001B[2m\u001B[36m(GBDTTrainable pid=19852)\u001B[0m UserWarning: Datastream 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", + "\u001B[2m\u001B[36m(GBDTTrainable pid=19852)\u001B[0m UserWarning: Dataset 'train' has 1 blocks, which is less than the `num_workers` 2. This dataset will be automatically repartitioned to 2 blocks.\n", "\u001B[2m\u001B[33m(raylet)\u001B[0m 2022-05-19 15:19:24,628\tINFO context.py:70 -- Exec'ing worker with command: exec /Users/kai/.pyenv/versions/3.7.7/bin/python3.7 /Users/kai/coding/ray/python/ray/workers/default_worker.py --node-ip-address=127.0.0.1 --node-manager-port=61222 --object-store-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/plasma_store --raylet-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/raylet --redis-address=None --storage=None --temp-dir=/tmp/ray --metrics-agent-port=62873 --logging-rotate-bytes=536870912 --logging-rotate-backup-count=5 --gcs-address=127.0.0.1:61938 --redis-password=5241590000000000 --startup-token=17 --runtime-env-hash=-2010331069\n", "\u001B[2m\u001B[36m(GBDTTrainable pid=19852)\u001B[0m 2022-05-19 15:19:25,961\tINFO main.py:980 -- [RayXGBoost] Created 2 new actors (2 total actors). Waiting until actors are ready for training.\n", "\u001B[2m\u001B[33m(raylet)\u001B[0m 2022-05-19 15:19:26,830\tINFO context.py:70 -- Exec'ing worker with command: exec /Users/kai/.pyenv/versions/3.7.7/bin/python3.7 /Users/kai/coding/ray/python/ray/workers/default_worker.py --node-ip-address=127.0.0.1 --node-manager-port=61222 --object-store-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/plasma_store --raylet-name=/tmp/ray/session_2022-05-19_15-19-14_632568_19778/sockets/raylet --redis-address=None --storage=None --temp-dir=/tmp/ray --metrics-agent-port=62873 --logging-rotate-bytes=536870912 --logging-rotate-backup-count=5 --gcs-address=127.0.0.1:61938 --redis-password=5241590000000000 --startup-token=18 --runtime-env-hash=-2010331069\n", From d5b37b51761a3dca80d41208f22e56ecf29d0b98 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Mon, 15 May 2023 18:07:17 -0700 Subject: [PATCH 399/424] [data] Fix bugs in handling of nested ndarrays (and other complex object types) (#35359) There were a couple bugs in our handling of complex ndarrays: We weren't consistently falling back to PandasBlock for object dtypes. This was due to raising different exception types, some of which were not caught at the upper layer. This PR simplifies our exception handling path, removing legacy code. We weren't calling create_ragged_ndarray for certain return types due to a bug in the shape mismatch detection code. --- python/ray/air/util/tensor_extensions/arrow.py | 11 +---------- python/ray/data/_internal/arrow_block.py | 13 +------------ python/ray/data/_internal/numpy_support.py | 8 +++++--- python/ray/data/block.py | 6 ++---- python/ray/data/tests/test_numpy_support.py | 14 +++++++++++++- 5 files changed, 22 insertions(+), 30 deletions(-) diff --git a/python/ray/air/util/tensor_extensions/arrow.py b/python/ray/air/util/tensor_extensions/arrow.py index cb34a85fb8ac..41bff8c40ffb 100644 --- a/python/ray/air/util/tensor_extensions/arrow.py +++ b/python/ray/air/util/tensor_extensions/arrow.py @@ -673,9 +673,7 @@ def from_numpy( # underlying scalar data type. # - shape: a variable-sized list array containing the shapes of each tensor # element. - if isinstance(arr, Iterable): - arr = list(arr) - elif not isinstance(arr, (list, tuple)): + if not isinstance(arr, (list, tuple, np.ndarray)): raise ValueError( "ArrowVariableShapedTensorArray can only be constructed from an " f"ndarray or a list/tuple of ndarrays, but got: {type(arr)}" @@ -716,13 +714,6 @@ def from_numpy( else: np_data_buffer = np.concatenate(raveled) dtype = np_data_buffer.dtype - if dtype.type is np.object_: - types_and_shapes = [(f"dtype={a.dtype}", f"shape={a.shape}") for a in arr] - raise ValueError( - "ArrowVariableShapedTensorArray only supports heterogeneous-shaped " - "tensor collections, not arbitrarily nested ragged tensors. Got " - f"arrays: {types_and_shapes}" - ) pa_dtype = pa.from_numpy_dtype(dtype) if pa.types.is_string(pa_dtype): if dtype.byteorder == ">" or ( diff --git a/python/ray/data/_internal/arrow_block.py b/python/ray/data/_internal/arrow_block.py index 9faf3fc8371e..1627da13e3bd 100644 --- a/python/ray/data/_internal/arrow_block.py +++ b/python/ray/data/_internal/arrow_block.py @@ -158,7 +158,6 @@ def from_bytes(cls, data: bytes) -> "ArrowBlockAccessor": @staticmethod def numpy_to_block( batch: Union[np.ndarray, Dict[str, np.ndarray], Dict[str, list]], - passthrough_arrow_not_implemented_errors: bool = False, ) -> "pyarrow.Table": import pyarrow as pa @@ -180,17 +179,7 @@ def numpy_to_block( col = convert_udf_returns_to_numpy(col) # Use Arrow's native *List types for 1-dimensional ndarrays. if col.dtype.type is np.object_ or col.ndim > 1: - try: - col = ArrowTensorArray.from_numpy(col) - except pa.ArrowNotImplementedError as e: - if passthrough_arrow_not_implemented_errors: - raise e - raise ValueError( - "Failed to convert multi-dimensional ndarray of dtype " - f"{col.dtype} to our tensor extension since this dtype is not " - "supported by Arrow. If encountering this due to string data, " - 'cast the ndarray to a string dtype, e.g. a.astype("U").' - ) from e + col = ArrowTensorArray.from_numpy(col) new_batch[col_name] = col return pa.Table.from_pydict(new_batch) diff --git a/python/ray/data/_internal/numpy_support.py b/python/ray/data/_internal/numpy_support.py index 5c9c24f682d8..69cb09aa6be9 100644 --- a/python/ray/data/_internal/numpy_support.py +++ b/python/ray/data/_internal/numpy_support.py @@ -59,9 +59,11 @@ def convert_udf_returns_to_numpy(udf_return_col: Any) -> Any: if all(is_valid_udf_return(e) for e in udf_return_col): udf_return_col = [np.array(e) for e in udf_return_col] shapes = set() - if all(isinstance(e, np.ndarray) for e in udf_return_col): - for e in udf_return_col: - shapes.add(e.shape) + for e in udf_return_col: + if isinstance(e, np.ndarray): + shapes.add((e.dtype, e.shape)) + else: + shapes.add(type(e)) if len(shapes) > 1: # This util works around some limitations of np.array(dtype=object). udf_return_col = create_ragged_ndarray(udf_return_col) diff --git a/python/ray/data/block.py b/python/ray/data/block.py index b9cc776a0ee1..b75144b75c02 100644 --- a/python/ray/data/block.py +++ b/python/ray/data/block.py @@ -433,10 +433,8 @@ def batch_to_block(batch: DataBatch) -> Block: import pyarrow as pa try: - return ArrowBlockAccessor.numpy_to_block( - batch, passthrough_arrow_not_implemented_errors=True - ) - except (pa.ArrowNotImplementedError, pa.ArrowInvalid): + return ArrowBlockAccessor.numpy_to_block(batch) + except (pa.ArrowNotImplementedError, pa.ArrowInvalid, pa.ArrowTypeError): import pandas as pd # TODO(ekl) once we support Python objects within Arrow blocks, we diff --git a/python/ray/data/tests/test_numpy_support.py b/python/ray/data/tests/test_numpy_support.py index 23dad677cf52..fcbfd8388de6 100644 --- a/python/ray/data/tests/test_numpy_support.py +++ b/python/ray/data/tests/test_numpy_support.py @@ -3,6 +3,7 @@ import pytest import ray +from ray.air.util.tensor_extensions.utils import create_ragged_ndarray from ray.data.tests.conftest import * # noqa from ray.tests.conftest import * # noqa @@ -24,7 +25,7 @@ def assert_structure_equals(a, b): assert a.dtype == b.dtype assert a.shape == b.shape for i in range(len(a)): - assert np.array_equiv(a[i], b[i]) + assert np.array_equiv(a[i], b[i]), (i, a, b) def test_list_of_scalars(ray_start_regular_shared): @@ -123,6 +124,17 @@ def test_scalar_ragged_array_like(ray_start_regular_shared): ) +# https://github.com/ray-project/ray/issues/35340 +def test_complex_ragged_arrays(ray_start_regular_shared): + data = [[{"a": 1}, {"a": 2}, {"a": 3}], [{"b": 1}]] + output = do_map_batches(data) + assert_structure_equals(output, create_ragged_ndarray(data)) + + data = ["hi", 1, None, [[[[]]]], {"a": [[{"b": 2, "c": UserObj()}]]}, UserObj()] + output = do_map_batches(data) + assert_structure_equals(output, create_ragged_ndarray(data)) + + if __name__ == "__main__": import sys From 02aca85c2f37aecf51bd7f5d199f6eef77bb3c5c Mon Sep 17 00:00:00 2001 From: Yunxuan Xiao Date: Mon, 15 May 2023 19:18:11 -0700 Subject: [PATCH 400/424] [Train] LightningTrainer: Enable prog bar (#35350) Signed-off-by: woshiyyya --- python/ray/train/lightning/lightning_trainer.py | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/python/ray/train/lightning/lightning_trainer.py b/python/ray/train/lightning/lightning_trainer.py index 5518dcaebe12..dd6c4a9969d9 100644 --- a/python/ray/train/lightning/lightning_trainer.py +++ b/python/ray/train/lightning/lightning_trainer.py @@ -1,16 +1,10 @@ import os +import pytorch_lightning as pl + from inspect import isclass from typing import Any, Dict, Optional, Type -import pytorch_lightning as pl from pytorch_lightning.plugins.environments import ClusterEnvironment -from packaging.version import Version - -if Version(pl.__version__) >= Version("2.0.0"): - from pytorch_lightning.callbacks.progress import ProgressBar as ProgressBarBase -else: - from pytorch_lightning.callbacks.progress.base import ProgressBarBase - from ray.air import session from ray.air.config import CheckpointConfig, DatasetConfig, RunConfig, ScalingConfig from ray.air.constants import MODEL_KEY @@ -509,13 +503,6 @@ def _lightning_train_loop_per_worker(config): lightning_module = module_class(**module_init_config) # Prepare Lightning Trainer - # Disable the Lightning progress bar to avoid corrupted AIR outputs, - # unless users provide a customized progress bar callback. - trainer_config["enable_progress_bar"] = any( - isinstance(callback, ProgressBarBase) - for callback in trainer_config.get("callbacks", []) - ) - # Setup trainer's parallel devices if trainer_config.get("accelerator", None) == "gpu": current_device = get_worker_root_device() From 19ad176f5746baaa304d509380cd83d19a1f8708 Mon Sep 17 00:00:00 2001 From: Alan Guo Date: Mon, 15 May 2023 20:48:47 -0700 Subject: [PATCH 401/424] Add "all" option for SessionName (#35303) This opens as default in the Grafana page but for the Dashboard UI, the default is still scoped to the latest session. --- .../modules/metrics/dashboards/default_dashboard_panels.py | 2 +- .../metrics/dashboards/default_grafana_dashboard_base.json | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/dashboard/modules/metrics/dashboards/default_dashboard_panels.py b/dashboard/modules/metrics/dashboards/default_dashboard_panels.py index e8fc7482911c..fb85030481d7 100644 --- a/dashboard/modules/metrics/dashboards/default_dashboard_panels.py +++ b/dashboard/modules/metrics/dashboards/default_dashboard_panels.py @@ -393,6 +393,6 @@ def max_plus_pending(max_resource, pending_resource): name="DEFAULT", default_uid="rayDefaultDashboard", panels=DEFAULT_GRAFANA_PANELS, - standard_global_filters=['SessionName="$SessionName"'], + standard_global_filters=['SessionName=~"$SessionName"'], base_json_file_name="default_grafana_dashboard_base.json", ) diff --git a/dashboard/modules/metrics/dashboards/default_grafana_dashboard_base.json b/dashboard/modules/metrics/dashboards/default_grafana_dashboard_base.json index 47d40a198de4..72b0c3cc04da 100644 --- a/dashboard/modules/metrics/dashboards/default_grafana_dashboard_base.json +++ b/dashboard/modules/metrics/dashboards/default_grafana_dashboard_base.json @@ -25,7 +25,7 @@ "templating": { "list": [ { - "allValue": null, + "allValue": ".+", "current": { "selected": false }, @@ -34,7 +34,7 @@ "description": "Filter queries to specific ray sessions.", "error": null, "hide": 0, - "includeAll": false, + "includeAll": true, "label": null, "multi": false, "name": "SessionName", From d6d74f3c7b219746f691958e5bd6897b847a1eee Mon Sep 17 00:00:00 2001 From: Alan Guo Date: Mon, 15 May 2023 20:49:07 -0700 Subject: [PATCH 402/424] [Dashboard] Add task detail page with logs (#35328) Add task detail page with logs Update actor logs to use state-api actor_id filter instead of filename to fetch logs Hide log tabs if there is only 1 tab available. Update the driver logs in the jobs page to show explanation why logs are not available. --- dashboard/client/src/App.tsx | 16 +- .../client/src/common/MultiTabLogViewer.tsx | 197 +++++++++---- .../src/components/AutoscalerStatusCards.tsx | 8 +- .../MetadataSection/MetadataSection.tsx | 86 +++--- dashboard/client/src/components/TaskTable.tsx | 107 ++----- .../client/src/pages/actor/ActorDetail.tsx | 38 ++- .../client/src/pages/actor/ActorLogs.tsx | 14 +- .../AdvancedProgressBar.tsx | 25 +- .../src/pages/job/JobDetailActorPage.tsx | 44 +-- .../src/pages/job/JobDetailInfoPage.tsx | 2 +- .../client/src/pages/job/JobDetailLayout.tsx | 8 +- .../job/JobDriverLogs.component.test.tsx | 1 + .../client/src/pages/job/JobDriverLogs.tsx | 24 +- .../client/src/pages/layout/MainNavLayout.tsx | 11 +- .../client/src/pages/layout/mainNavContext.ts | 5 +- dashboard/client/src/pages/log/hooks.ts | 25 +- .../src/pages/node/ClusterDetailInfoPage.tsx | 2 +- .../src/pages/overview/OverviewPage.tsx | 15 +- .../pages/serve/ServeReplicaDetailPage.tsx | 12 +- .../serve/ServeSystemActorDetailPage.tsx | 14 +- .../src/pages/state/hook/useStateApi.ts | 21 ++ dashboard/client/src/pages/task/TaskPage.tsx | 270 ++++++++++++++++++ dashboard/client/src/service/log.ts | 64 ++++- dashboard/client/src/service/log.unit.test.ts | 65 +++++ dashboard/client/src/service/task.ts | 7 + dashboard/client/src/type/task.ts | 2 +- 26 files changed, 796 insertions(+), 287 deletions(-) create mode 100644 dashboard/client/src/pages/task/TaskPage.tsx create mode 100644 dashboard/client/src/service/log.unit.test.ts diff --git a/dashboard/client/src/App.tsx b/dashboard/client/src/App.tsx index a08a26200f2b..a60571201bcc 100644 --- a/dashboard/client/src/App.tsx +++ b/dashboard/client/src/App.tsx @@ -4,7 +4,7 @@ import dayjs from "dayjs"; import duration from "dayjs/plugin/duration"; import React, { Suspense, useEffect, useState } from "react"; import { HashRouter, Navigate, Route, Routes } from "react-router-dom"; -import ActorDetailPage from "./pages/actor/ActorDetail"; +import ActorDetailPage, { ActorDetailLayout } from "./pages/actor/ActorDetail"; import { ActorLayout } from "./pages/actor/ActorLayout"; import Loading from "./pages/exception/Loading"; import JobList, { JobsLayout } from "./pages/job"; @@ -33,6 +33,7 @@ import { ServeApplicationsListPage } from "./pages/serve/ServeApplicationsListPa import { ServeLayout } from "./pages/serve/ServeLayout"; import { ServeReplicaDetailPage } from "./pages/serve/ServeReplicaDetailPage"; import { ServeHttpProxyDetailPage } from "./pages/serve/ServeSystemActorDetailPage"; +import { TaskPage } from "./pages/task/TaskPage"; import { getNodeList } from "./service/node"; import { lightTheme } from "./theme"; @@ -201,16 +202,23 @@ const App = () => { - + } path="actors/:actorId" - /> + > + } path="" /> + } path="tasks/:taskId" /> + + } path="tasks/:taskId" /> } path="actors"> } path="" /> - } path=":actorId" /> + } path=":actorId"> + } path="" /> + } path="tasks/:taskId" /> + } path="metrics" /> } path="serve"> diff --git a/dashboard/client/src/common/MultiTabLogViewer.tsx b/dashboard/client/src/common/MultiTabLogViewer.tsx index 19b26b7a2226..e9a9d617cc60 100644 --- a/dashboard/client/src/common/MultiTabLogViewer.tsx +++ b/dashboard/client/src/common/MultiTabLogViewer.tsx @@ -25,9 +25,7 @@ const useStyles = makeStyles((theme) => export type MultiTabLogViewerTabDetails = { title: string; - nodeId: string | null; - filename?: string; -}; +} & LogViewerData; export type MultiTabLogViewerProps = { tabs: MultiTabLogViewerTabDetails[]; @@ -45,6 +43,10 @@ export const MultiTabLogViewer = ({ const currentTab = tabs.find((tab) => tab.title === value); + if (tabs.length === 0) { + return No logs to display.; + } + return (
    - { - setValue(newValue); - }} - indicatorColor="primary" - > - {tabs.map(({ title }) => ( - - ))} - {otherLogsLink && ( - - Other logs   - - } - onClick={(event) => { - // Prevent the tab from changing - setValue(value); - }} - component={Link} - to={otherLogsLink} - target="_blank" - rel="noopener noreferrer" - /> - )} - + {(tabs.length > 1 || otherLogsLink) && ( + { + setValue(newValue); + }} + indicatorColor="primary" + > + {tabs.map(({ title }) => ( + + ))} + {otherLogsLink && ( + + Other logs   + + } + onClick={(event) => { + // Prevent the tab from changing + setValue(value); + }} + component={Link} + to={otherLogsLink} + target="_blank" + rel="noopener noreferrer" + /> + )} + + )} {!currentTab ? ( Please select a tab. ) : ( - tabs.map(({ title, nodeId, filename }) => ( - - - - )) + tabs.map((tab) => { + const { title, ...data } = tab; + return ( + + + + ); + }) )} + "contents" in data; + +const isLogViewerDataActor = (data: LogViewerData): data is ActorData => + "actorId" in data; + +const isLogViewerDataTask = (data: LogViewerData): data is TaskData => + "taskId" in data; + +export type StateApiLogViewerProps = { height?: number; + data: LogViewerData; }; export const StateApiLogViewer = ({ + height = 300, + data, +}: StateApiLogViewerProps) => { + if (isLogViewerDataText(data)) { + return ; + } else if (isLogViewerDataActor(data)) { + return ; + } else if (isLogViewerDataTask(data)) { + return ; + } else { + return ; + } +}; + +const TextLogViewer = ({ + height = 300, + contents, +}: { + height: number; + contents: string; +}) => { + return ; +}; + +const FileLogViewer = ({ + height = 300, nodeId, filename, +}: { + height: number; +} & FileData) => { + const apiData = useStateApiLogs({ nodeId, filename }, filename); + return ; +}; + +const ActorLogViewer = ({ height = 300, -}: StateApiLogViewerProps) => { - const { downloadUrl, log, path, refresh } = useStateApiLogs(nodeId, filename); + actorId, + suffix, +}: { + height: number; +} & ActorData) => { + const apiData = useStateApiLogs( + { actorId, suffix }, + `actor-log-${actorId}.${suffix}`, + ); + return ; +}; + +const TaskLogViewer = ({ + height = 300, + taskId, + suffix, +}: { + height: number; +} & TaskData) => { + const apiData = useStateApiLogs( + { taskId, suffix }, + `task-log-${taskId}.${suffix}`, + ); + return ; +}; + +const ApiLogViewer = ({ + apiData: { downloadUrl, log, path, refresh }, + height = 300, +}: { + apiData: ReturnType; + height: number; +}) => { return typeof log === "string" ? ( { refresh(); diff --git a/dashboard/client/src/components/AutoscalerStatusCards.tsx b/dashboard/client/src/components/AutoscalerStatusCards.tsx index 887c192ae36b..eb00be02621d 100644 --- a/dashboard/client/src/components/AutoscalerStatusCards.tsx +++ b/dashboard/client/src/components/AutoscalerStatusCards.tsx @@ -31,7 +31,7 @@ const formatClusterStatus = (title: string, cluster_status: string) => { return (
    - {title} + {title} {cluster_status_rows.map((i, key) => { // Format the output. @@ -64,9 +64,6 @@ export const NodeStatusCard = ({ cluster_status }: StatusCardProps) => { overflow: "hidden", overflowY: "scroll", }} - sx={{ borderRadius: "16px" }} - marginLeft={1} - marginRight={1} > {cluster_status?.data ? formatNodeStatus(cluster_status?.data.clusterStatus) @@ -82,9 +79,6 @@ export const ResourceStatusCard = ({ cluster_status }: StatusCardProps) => { overflow: "hidden", overflowY: "scroll", }} - sx={{ border: 1, borderRadius: "1", borderColor: "primary.main" }} - marginLeft={1} - marginRight={1} > {cluster_status?.data ? formatResourcesStatus(cluster_status?.data.clusterStatus) diff --git a/dashboard/client/src/components/MetadataSection/MetadataSection.tsx b/dashboard/client/src/components/MetadataSection/MetadataSection.tsx index 91c73dc7723d..f9091a78a62e 100644 --- a/dashboard/client/src/components/MetadataSection/MetadataSection.tsx +++ b/dashboard/client/src/components/MetadataSection/MetadataSection.tsx @@ -30,6 +30,9 @@ type CopyableMetadataContent = StringOnlyMetadataContent & { readonly copyableValue: string; }; +type CopyAndLinkableMetadataContent = LinkableMetadataContent & + CopyableMetadataContent; + export type Metadata = { readonly label: string; readonly labelTooltip?: string | JSX.Element; @@ -39,6 +42,7 @@ export type Metadata = { | StringOnlyMetadataContent | LinkableMetadataContent | CopyableMetadataContent + | CopyAndLinkableMetadataContent | JSX.Element; /** @@ -92,6 +96,28 @@ export const MetadataContentField: React.FC<{ const classes = useStyles(); const [copyIconClicked, setCopyIconClicked] = useState(false); + const copyElement = content && "copyableValue" in content && ( + + { + setCopyIconClicked(true); + copy(content.copyableValue); + }} + // Set up mouse events to avoid text changing while tooltip is visible + onMouseEnter={() => setCopyIconClicked(false)} + onMouseLeave={() => setTimeout(() => setCopyIconClicked(false), 333)} + size="small" + className={classes.button} + > + + + + ); + if (content === undefined || "value" in content) { return content === undefined || !("link" in content) ? (
    @@ -103,47 +129,31 @@ export const MetadataContentField: React.FC<{ > {content?.value ?? "-"} - {content && "copyableValue" in content && ( - - { - setCopyIconClicked(true); - copy(content.copyableValue); - }} - // Set up mouse events to avoid text changing while tooltip is visible - onMouseEnter={() => setCopyIconClicked(false)} - onMouseLeave={() => - setTimeout(() => setCopyIconClicked(false), 333) - } - size="small" - className={classes.button} - > - - - - )} + {copyElement}
    ) : content.link.startsWith("http") ? ( - - {content.value} - +
    + + {content.value} + + {copyElement} +
    ) : ( - - {content.value} - +
    + + {content.value} + + {copyElement} +
    ); } return
    {content}
    ; diff --git a/dashboard/client/src/components/TaskTable.tsx b/dashboard/client/src/components/TaskTable.tsx index a5343b1be211..fbce5c5a2683 100644 --- a/dashboard/client/src/components/TaskTable.tsx +++ b/dashboard/client/src/components/TaskTable.tsx @@ -1,8 +1,7 @@ import { Box, - createStyles, InputAdornment, - makeStyles, + Link, Table, TableBody, TableCell, @@ -15,10 +14,9 @@ import { } from "@material-ui/core"; import Autocomplete from "@material-ui/lab/Autocomplete"; import Pagination from "@material-ui/lab/Pagination"; -import React, { useContext, useState } from "react"; -import { Link } from "react-router-dom"; -import { GlobalContext } from "../App"; -import DialogWithTitle from "../common/DialogWithTitle"; +import React, { useState } from "react"; +import { Link as RouterLink } from "react-router-dom"; +import { CodeDialogButton } from "../common/CodeDialogButton"; import { DurationText } from "../common/DurationText"; import { ActorLink, NodeLink } from "../common/links"; import rowStyles from "../common/RowStyles"; @@ -231,7 +229,9 @@ const TaskTable = ({ arrow interactive > -
    {task_id}
    + + {task_id} + {name ? name : "-"} @@ -299,24 +299,14 @@ const TaskTable = ({ - ( -
    - {key}: {val} -
    - ), - )} - arrow - interactive - > -
    - {Object.entries(required_resources || {}) - .map(([key, val]) => `${key}: ${val}`) - .join(", ")} -
    -
    + {Object.entries(required_resources || {}).length > 0 ? ( + + ) : ( + "{}" + )}
    ); @@ -330,70 +320,29 @@ const TaskTable = ({ export default TaskTable; -const useTaskTableActionsStyles = makeStyles(() => - createStyles({ - errorDetails: { - whiteSpace: "pre", - }, - link: { - border: "none", - cursor: "pointer", - color: "#036DCF", - textDecoration: "underline", - background: "none", - }, - }), -); - type TaskTableActionsProps = { task: Task; }; const TaskTableActions = ({ task }: TaskTableActionsProps) => { - const classes = useTaskTableActionsStyles(); - const { ipLogMap } = useContext(GlobalContext); - const [showErrorDetailsDialog, setShowErrorDetailsDialog] = useState(false); - - const handleErrorClick = () => { - setShowErrorDetailsDialog(true); - }; - - const errorDetails = task.error_type - ? `Error Type: ${task.error_type}\n\n${task.error_message}` - : undefined; + const errorDetails = + task.error_type !== null && task.error_message !== null + ? `Error Type: ${task.error_type}\n\n${task.error_message}` + : undefined; return ( - {task?.profiling_data?.node_ip_address && - ipLogMap[task?.profiling_data?.node_ip_address] && - task.worker_id && - task.job_id && ( - - - Log - -
    -
    - )} + + Log + +
    + {errorDetails && ( - - )} - {showErrorDetailsDialog && errorDetails && ( - { - setShowErrorDetailsDialog(false); - }} - > -
    {errorDetails}
    -
    + code={errorDetails} + buttonText="Error" + /> )}
    ); diff --git a/dashboard/client/src/pages/actor/ActorDetail.tsx b/dashboard/client/src/pages/actor/ActorDetail.tsx index 8feb0a10f0e1..c134a1482e3e 100644 --- a/dashboard/client/src/pages/actor/ActorDetail.tsx +++ b/dashboard/client/src/pages/actor/ActorDetail.tsx @@ -1,5 +1,6 @@ import { makeStyles } from "@material-ui/core"; import React from "react"; +import { Outlet } from "react-router-dom"; import { CollapsibleSection } from "../../common/CollapsibleSection"; import { DurationText } from "../../common/DurationText"; import { formatDateFromTimeMs } from "../../common/formatUtils"; @@ -39,11 +40,37 @@ const useStyle = makeStyles((theme) => ({ }, })); +export const ActorDetailLayout = () => { + const { params, actorDetail } = useActorDetail(); + + return ( +
    + + +
    + ); +}; + const ActorDetailPage = () => { const classes = useStyle(); const { params, actorDetail, msg, isLoading } = useActorDetail(); - if (!actorDetail) { + if (isLoading || actorDetail === undefined) { return (
    @@ -58,15 +85,6 @@ const ActorDetailPage = () => { return (
    - - ; + actor: Pick; }; export const ActorLogs = ({ actor: { - jobId, + actorId, pid, address: { workerId, rayletId }, }, @@ -19,15 +19,13 @@ export const ActorLogs = ({ const tabs: MultiTabLogViewerTabDetails[] = [ { title: "stderr", - nodeId: rayletId, - // TODO(aguo): Have API return the log file name. - filename: `worker-${workerId}-${jobId}-${pid}.err`, + actorId, + suffix: "err", }, { title: "stdout", - nodeId: rayletId, - // TODO(aguo): Have API return the log file name. - filename: `worker-${workerId}-${jobId}-${pid}.out`, + actorId, + suffix: "out", }, { title: "system", diff --git a/dashboard/client/src/pages/job/AdvancedProgressBar/AdvancedProgressBar.tsx b/dashboard/client/src/pages/job/AdvancedProgressBar/AdvancedProgressBar.tsx index 6ba92e3ffcbf..d9fbb29d86d9 100644 --- a/dashboard/client/src/pages/job/AdvancedProgressBar/AdvancedProgressBar.tsx +++ b/dashboard/client/src/pages/job/AdvancedProgressBar/AdvancedProgressBar.tsx @@ -15,6 +15,7 @@ import { RiCloseLine, RiSubtractLine, } from "react-icons/ri"; +import { Link } from "react-router-dom"; import { ClassNameProps } from "../../../common/props"; import { JobProgressGroup, NestedJobProgressLink } from "../../../type/job"; import { MiniTaskProgressBar } from "../TaskProgressBar"; @@ -166,15 +167,21 @@ export const AdvancedProgressBarSegment = ({ }} /> {link ? ( - + link.type === "actor" ? ( + + ) : ( + + {name} + + ) ) : ( name )} diff --git a/dashboard/client/src/pages/job/JobDetailActorPage.tsx b/dashboard/client/src/pages/job/JobDetailActorPage.tsx index 91e23dbce2ff..0e0d09437bf5 100644 --- a/dashboard/client/src/pages/job/JobDetailActorPage.tsx +++ b/dashboard/client/src/pages/job/JobDetailActorPage.tsx @@ -15,23 +15,17 @@ const useStyle = makeStyles((theme) => ({ export const JobDetailActorsPage = () => { const classes = useStyle(); - const { job, params } = useJobDetail(); - - const pageInfo = job - ? { - title: "Actors", - id: "actors", - path: job.job_id ? `/jobs/${job.job_id}/actors` : undefined, - } - : { - title: "Actors", - id: "actors", - path: undefined, - }; + const { params } = useJobDetail(); return (
    - +
    @@ -42,23 +36,15 @@ export const JobDetailActorsPage = () => { export const JobDetailActorDetailWrapper = ({ children, }: PropsWithChildren<{}>) => { - const { job } = useJobDetail(); - - const pageInfo = job - ? { - title: "Actors", - id: "actors", - path: job.job_id ? `/jobs/${job.job_id}/actors` : undefined, - } - : { - title: "Actors", - id: "actors", - path: undefined, - }; - return (
    - + {children}
    ); diff --git a/dashboard/client/src/pages/job/JobDetailInfoPage.tsx b/dashboard/client/src/pages/job/JobDetailInfoPage.tsx index bd6c746dd139..3f1a19ae661b 100644 --- a/dashboard/client/src/pages/job/JobDetailInfoPage.tsx +++ b/dashboard/client/src/pages/job/JobDetailInfoPage.tsx @@ -41,7 +41,7 @@ export const JobDetailInfoPage = () => { pageInfo={{ title: "Info", id: "job-info", - path: undefined, + path: "info", }} /> diff --git a/dashboard/client/src/pages/job/JobDetailLayout.tsx b/dashboard/client/src/pages/job/JobDetailLayout.tsx index 88685472e572..0d4119eda364 100644 --- a/dashboard/client/src/pages/job/JobDetailLayout.tsx +++ b/dashboard/client/src/pages/job/JobDetailLayout.tsx @@ -10,20 +10,20 @@ import { SideTabLayout, SideTabRouteLink } from "../layout/SideTabLayout"; import { useJobDetail } from "./hook/useJobDetail"; export const JobPage = () => { - const { job } = useJobDetail(); + const { job, params } = useJobDetail(); const jobId = job?.job_id ?? job?.submission_id; - const pageInfo = job + const pageInfo = jobId ? { title: jobId ?? "Job", pageTitle: jobId ? `${jobId} | Job` : undefined, id: "job-detail", - path: jobId ? `/jobs/${jobId}` : undefined, + path: jobId, } : { title: "Job", id: "job-detail", - path: undefined, + path: params.id, }; return (
    diff --git a/dashboard/client/src/pages/job/JobDriverLogs.component.test.tsx b/dashboard/client/src/pages/job/JobDriverLogs.component.test.tsx index 7e6fbe21fb46..8dc5aab5fc0b 100644 --- a/dashboard/client/src/pages/job/JobDriverLogs.component.test.tsx +++ b/dashboard/client/src/pages/job/JobDriverLogs.component.test.tsx @@ -22,6 +22,7 @@ describe("JobDriverLogs", () => { render( ; }; export const JobDriverLogs = ({ job }: JobDriverLogsProps) => { - const { driver_node_id, submission_id } = job; + const { driver_node_id, submission_id, type } = job; const filename = submission_id ? `job-driver-${submission_id}.log` : undefined; @@ -42,16 +43,23 @@ export const JobDriverLogs = ({ job }: JobDriverLogsProps) => { link = undefined; } - // TODO(aguo): Support showing message for jobs not created via ray job submit - // instead of hiding the driver logs return ( diff --git a/dashboard/client/src/pages/layout/MainNavLayout.tsx b/dashboard/client/src/pages/layout/MainNavLayout.tsx index cb68b0dd7d00..2e317fcc3547 100644 --- a/dashboard/client/src/pages/layout/MainNavLayout.tsx +++ b/dashboard/client/src/pages/layout/MainNavLayout.tsx @@ -264,15 +264,24 @@ const MainNavBreadcrumbs = () => { return null; } + let currentPath = ""; + return (
    {mainNavPageHierarchy.map(({ title, id, path }, index) => { + if (path) { + if (path.startsWith("/")) { + currentPath = path; + } else { + currentPath = `${currentPath}/${path}`; + } + } const linkOrText = path ? ( {title} diff --git a/dashboard/client/src/pages/layout/mainNavContext.ts b/dashboard/client/src/pages/layout/mainNavContext.ts index 0937fd082d2a..ff030d803670 100644 --- a/dashboard/client/src/pages/layout/mainNavContext.ts +++ b/dashboard/client/src/pages/layout/mainNavContext.ts @@ -11,13 +11,16 @@ export type MainNavPage = { pageTitle?: string; /** * This helps identifies the current page a user is on and highlights the nav bar correctly. - * This should be unique per page. + * This should be unique per page within an hiearchy. i.e. you should NOT put two pages with the same ID + * as parents or children of each other. * DO NOT change the pageId of a page. The behavior of the main nav and * breadcrumbs is undefined in that case. */ id: string; /** * URL to link to access this route. + * If this begins with a `/`, it is treated as an absolute path. + * If not, this is treated as a relative path and the path is appended to the parent breadcrumb's path. */ path?: string; }; diff --git a/dashboard/client/src/pages/log/hooks.ts b/dashboard/client/src/pages/log/hooks.ts index 0ee04401f771..fe333ba96a2e 100644 --- a/dashboard/client/src/pages/log/hooks.ts +++ b/dashboard/client/src/pages/log/hooks.ts @@ -1,25 +1,24 @@ import useSWR from "swr"; -import { getStateApiDownloadLogUrl, getStateApiLog } from "../../service/log"; +import { + getStateApiDownloadLogUrl, + getStateApiLog, + StateApiLogInput, +} from "../../service/log"; export const useStateApiLogs = ( - driver_node_id?: string | null, - filename?: string, + props: StateApiLogInput, + path: string | undefined, ) => { - const downloadUrl = - driver_node_id && filename - ? getStateApiDownloadLogUrl(driver_node_id, filename) - : undefined; + const downloadUrl = getStateApiDownloadLogUrl(props); const { data: log, isLoading, mutate, } = useSWR( - driver_node_id && filename - ? ["useDriverLogs", driver_node_id, filename] - : null, - async ([_, node_id, filename]) => { - return getStateApiLog(node_id, filename); + downloadUrl ? ["useDriverLogs", downloadUrl] : null, + async ([_]) => { + return getStateApiLog(props); }, ); @@ -27,6 +26,6 @@ export const useStateApiLogs = ( log: isLoading ? "Loading..." : log, downloadUrl, refresh: mutate, - path: filename, + path, }; }; diff --git a/dashboard/client/src/pages/node/ClusterDetailInfoPage.tsx b/dashboard/client/src/pages/node/ClusterDetailInfoPage.tsx index 8c8ecb211afc..a44636abebaf 100644 --- a/dashboard/client/src/pages/node/ClusterDetailInfoPage.tsx +++ b/dashboard/client/src/pages/node/ClusterDetailInfoPage.tsx @@ -27,7 +27,7 @@ export const ClusterDetailInfoPage = () => { pageInfo={{ title: "Cluster Info", id: "cluster-info", - path: undefined, + path: "info", }} /> diff --git a/dashboard/client/src/pages/overview/OverviewPage.tsx b/dashboard/client/src/pages/overview/OverviewPage.tsx index 3582107d3e92..2b9ed841ca72 100644 --- a/dashboard/client/src/pages/overview/OverviewPage.tsx +++ b/dashboard/client/src/pages/overview/OverviewPage.tsx @@ -39,6 +39,9 @@ const useStyles = makeStyles((theme) => maxWidth: `calc((100% - ${theme.spacing(3)}px * 2) / 3)`, }, }, + autoscalerCard: { + padding: theme.spacing(2, 3), + }, section: { marginTop: theme.spacing(4), }, @@ -70,12 +73,20 @@ export const OverviewPage = () => {
    diff --git a/dashboard/client/src/pages/serve/ServeReplicaDetailPage.tsx b/dashboard/client/src/pages/serve/ServeReplicaDetailPage.tsx index a4c4086b6758..a9bfc0cb1b23 100644 --- a/dashboard/client/src/pages/serve/ServeReplicaDetailPage.tsx +++ b/dashboard/client/src/pages/serve/ServeReplicaDetailPage.tsx @@ -188,21 +188,19 @@ const ServeReplicaLogs = ({ const { address: { workerId }, pid, - jobId, + actorId, } = actor; const tabs: MultiTabLogViewerTabDetails[] = [ { title: "stderr", - nodeId: node_id, - // TODO(aguo): Have API return the log file name. - filename: `worker-${workerId}-${jobId}-${pid}.err`, + actorId, + suffix: "err", }, { title: "stdout", - nodeId: node_id, - // TODO(aguo): Have API return the log file name. - filename: `worker-${workerId}-${jobId}-${pid}.out`, + actorId, + suffix: "out", }, { title: "system", diff --git a/dashboard/client/src/pages/serve/ServeSystemActorDetailPage.tsx b/dashboard/client/src/pages/serve/ServeSystemActorDetailPage.tsx index d702345e69e6..3aaf68a32500 100644 --- a/dashboard/client/src/pages/serve/ServeSystemActorDetailPage.tsx +++ b/dashboard/client/src/pages/serve/ServeSystemActorDetailPage.tsx @@ -135,14 +135,14 @@ export const ServeSystemActorDetail = ({ type ServeSystemActorLogsProps = { type: "controller" | "httpProxy"; - actor: Pick; + actor: Pick; systemLogFilePath: string; }; const ServeSystemActorLogs = ({ type, actor: { - jobId, + actorId, pid, address: { workerId, rayletId }, }, @@ -158,15 +158,13 @@ const ServeSystemActorLogs = ({ }, { title: "Actor Logs (stderr)", - nodeId: rayletId, - // TODO(aguo): Have API return the log file name. - filename: `worker-${workerId}-${jobId}-${pid}.err`, + actorId, + suffix: "err", }, { title: "Actor Logs (stdout)", - nodeId: rayletId, - // TODO(aguo): Have API return the log file name. - filename: `worker-${workerId}-${jobId}-${pid}.out`, + actorId, + suffix: "out", }, { title: "Actor Logs (system)", diff --git a/dashboard/client/src/pages/state/hook/useStateApi.ts b/dashboard/client/src/pages/state/hook/useStateApi.ts index d8ec1187784b..3ee375e4785a 100644 --- a/dashboard/client/src/pages/state/hook/useStateApi.ts +++ b/dashboard/client/src/pages/state/hook/useStateApi.ts @@ -1,6 +1,7 @@ import { AxiosResponse } from "axios"; import useSWR, { Key } from "swr"; import { PER_JOB_PAGE_REFRESH_INTERVAL_MS } from "../../../common/constants"; +import { getTask } from "../../../service/task"; import { AsyncFunction, StateApiResponse, @@ -29,3 +30,23 @@ export const useStateApiList = ( return data; }; + +export const useStateApiTask = (taskId: string | undefined) => { + const { data, isLoading } = useSWR( + taskId ? ["useStateApiTask", taskId] : null, + async ([_, taskId]) => { + const rsp = await getTask(taskId); + if (rsp?.data?.data?.result?.result) { + return rsp.data.data.result.result[0]; + } else { + return undefined; + } + }, + { refreshInterval: PER_JOB_PAGE_REFRESH_INTERVAL_MS }, + ); + + return { + task: data, + isLoading, + }; +}; diff --git a/dashboard/client/src/pages/task/TaskPage.tsx b/dashboard/client/src/pages/task/TaskPage.tsx new file mode 100644 index 000000000000..619dc5a2ead6 --- /dev/null +++ b/dashboard/client/src/pages/task/TaskPage.tsx @@ -0,0 +1,270 @@ +import { Box, createStyles, makeStyles, Typography } from "@material-ui/core"; +import React from "react"; +import { useParams } from "react-router-dom"; +import { CodeDialogButtonWithPreview } from "../../common/CodeDialogButton"; +import { CollapsibleSection } from "../../common/CollapsibleSection"; +import { DurationText } from "../../common/DurationText"; +import { formatDateFromTimeMs } from "../../common/formatUtils"; +import { generateActorLink, generateNodeLink } from "../../common/links"; +import { + MultiTabLogViewer, + MultiTabLogViewerTabDetails, +} from "../../common/MultiTabLogViewer"; +import { Section } from "../../common/Section"; +import Loading from "../../components/Loading"; +import { MetadataSection } from "../../components/MetadataSection"; +import { StatusChip } from "../../components/StatusChip"; +import { Task } from "../../type/task"; +import { MainNavPageInfo } from "../layout/mainNavContext"; +import { useStateApiTask } from "../state/hook/useStateApi"; + +const useStyles = makeStyles((theme) => + createStyles({ + root: { + padding: theme.spacing(2), + backgroundColor: "white", + }, + }), +); + +export const TaskPage = () => { + const { taskId } = useParams(); + const { task, isLoading } = useStateApiTask(taskId); + + const classes = useStyles(); + + return ( +
    + + +
    + ); +}; + +type TaskPageContentsProps = { + taskId?: string; + task?: Task; + isLoading: boolean; +}; + +const TaskPageContents = ({ + taskId, + task, + isLoading, +}: TaskPageContentsProps) => { + if (isLoading) { + return ; + } + + if (!task) { + return ( + Task with ID "{taskId}" not found. + ); + } + + const { + task_id, + actor_id, + end_time_ms, + start_time_ms, + node_id, + placement_group_id, + required_resources, + state, + type, + worker_id, + job_id, + func_or_class_name, + name, + } = task; + + return ( +
    + , + }, + { + label: "Job ID", + content: { + value: job_id, + copyableValue: job_id, + }, + }, + { + label: "Function or class name", + content: { + value: func_or_class_name, + }, + }, + { + label: "Actor ID", + content: actor_id + ? { + value: actor_id, + copyableValue: actor_id, + link: generateActorLink(actor_id), + } + : { + value: "-", + }, + }, + { + label: "Node ID", + content: node_id + ? { + value: node_id, + copyableValue: node_id, + link: generateNodeLink(node_id), + } + : { + value: "-", + }, + }, + { + label: "Worker ID", + content: worker_id + ? { + value: worker_id, + copyableValue: worker_id, + } + : { + value: "-", + }, + }, + { + label: "Type", + content: { + value: type, + }, + }, + { + label: "Placement group ID", + content: placement_group_id + ? { + value: placement_group_id, + copyableValue: placement_group_id, + } + : { + value: "-", + }, + }, + { + label: "Required resources", + content: + Object.entries(required_resources).length > 0 ? ( + + + + ) : ( + { + value: "{}", + } + ), + }, + { + label: "Started at", + content: { + value: start_time_ms ? formatDateFromTimeMs(start_time_ms) : "-", + }, + }, + { + label: "Ended at", + content: { + value: end_time_ms ? formatDateFromTimeMs(end_time_ms) : "-", + }, + }, + { + label: "Duration", + content: start_time_ms ? ( + + ) : ( + { + value: "-", + } + ), + }, + ]} + /> + +
    + +
    +
    +
    + ); +}; + +type TaskLogsProps = { + task: Task; +}; + +const TaskLogs = ({ + task: { task_id, error_message, error_type, worker_id, node_id }, +}: TaskLogsProps) => { + const errorDetails = + error_type !== null && error_message !== null + ? `Error Type: ${error_type}\n\n${error_message}` + : undefined; + + const tabs: MultiTabLogViewerTabDetails[] = [ + ...(worker_id !== null && node_id !== null + ? ([ + { + title: "stderr", + taskId: task_id, + suffix: "err", + }, + { + title: "stdout", + taskId: task_id, + suffix: "out", + }, + ] as const) + : []), + // TODO(aguo): uncomment once PID is available in the API. + // { + // title: "system", + // nodeId: node_id, + // // TODO(aguo): Have API return the log file name. + // filename: `python-core-worker-${worker_id}_${pid}.log`, + // }, + ...(errorDetails + ? [{ title: "Error stack trace", contents: errorDetails }] + : []), + ]; + return ; +}; diff --git a/dashboard/client/src/service/log.ts b/dashboard/client/src/service/log.ts index f4deeff96286..51ca4902b59b 100644 --- a/dashboard/client/src/service/log.ts +++ b/dashboard/client/src/service/log.ts @@ -50,13 +50,65 @@ export const getLogDetail = async (url: string) => { return rsp.data as string; }; -export const getStateApiDownloadLogUrl = (nodeId: string, fileName: string) => - `api/v0/logs/file?node_id=${encodeURIComponent( - nodeId, - )}&filename=${encodeURIComponent(fileName)}&lines=-1`; +export type StateApiLogInput = { + nodeId?: string | null; + /** + * If actorId is provided, nodeId is not necessary + */ + actorId?: string | null; + /** + * If taskId is provided, nodeId is not necessary + */ + taskId?: string | null; + suffix?: string; + /** + * If filename is provided, suffix is not necessary + */ + filename?: string | null; +}; + +export const getStateApiDownloadLogUrl = ({ + nodeId, + filename, + taskId, + actorId, + suffix, +}: StateApiLogInput) => { + if ( + nodeId === null || + actorId === null || + taskId === null || + filename === null + ) { + // Null means data is not ready yet. + return null; + } + const variables = [ + ...(nodeId !== undefined ? [`node_id=${encodeURIComponent(nodeId)}`] : []), + ...(filename !== undefined + ? [`filename=${encodeURIComponent(filename)}`] + : []), + ...(taskId !== undefined ? [`task_id=${encodeURIComponent(taskId)}`] : []), + ...(actorId !== undefined + ? [`actor_id=${encodeURIComponent(actorId)}`] + : []), + ...(suffix !== undefined ? [`suffix=${encodeURIComponent(suffix)}`] : []), + "lines=-1", + ]; + + return `api/v0/logs/file?${variables.join("&")}`; +}; -export const getStateApiLog = async (nodeId: string, fileName: string) => { - const resp = await get(getStateApiDownloadLogUrl(nodeId, fileName)); +export const getStateApiLog = async (props: StateApiLogInput) => { + const url = getStateApiDownloadLogUrl(props); + if (url === null) { + return undefined; + } + const resp = await get(url); + // Handle case where log file is empty. + if (resp.status === 200 && resp.data.length === 0) { + return ""; + } // TODO(aguo): get rid of this first byte check once we support state-api logs without this streaming byte. if (resp.data[0] !== "1") { throw new Error(resp.data.substring(1)); diff --git a/dashboard/client/src/service/log.unit.test.ts b/dashboard/client/src/service/log.unit.test.ts new file mode 100644 index 000000000000..c7e437df9779 --- /dev/null +++ b/dashboard/client/src/service/log.unit.test.ts @@ -0,0 +1,65 @@ +import { getStateApiDownloadLogUrl } from "./log"; + +describe("getStateApiDownloadLogUrl", () => { + it("only uses parameters provided but doesn't fetch when parameters are null", () => { + expect.assertions(8); + + expect( + getStateApiDownloadLogUrl({ + nodeId: "node-id", + filename: "file.log", + }), + ).toStrictEqual( + "api/v0/logs/file?node_id=node-id&filename=file.log&lines=-1", + ); + + expect( + getStateApiDownloadLogUrl({ + taskId: "task-id", + suffix: "err", + }), + ).toStrictEqual("api/v0/logs/file?task_id=task-id&suffix=err&lines=-1"); + + expect( + getStateApiDownloadLogUrl({ + taskId: "task-id", + suffix: "out", + }), + ).toStrictEqual("api/v0/logs/file?task_id=task-id&suffix=out&lines=-1"); + + expect( + getStateApiDownloadLogUrl({ + actorId: "actor-id", + suffix: "err", + }), + ).toStrictEqual("api/v0/logs/file?actor_id=actor-id&suffix=err&lines=-1"); + + expect( + getStateApiDownloadLogUrl({ + nodeId: null, + filename: "file.log", + }), + ).toBeNull(); + + expect( + getStateApiDownloadLogUrl({ + nodeId: null, + filename: null, + }), + ).toBeNull(); + + expect( + getStateApiDownloadLogUrl({ + taskId: null, + suffix: "err", + }), + ).toBeNull(); + + expect( + getStateApiDownloadLogUrl({ + actorId: null, + suffix: "err", + }), + ).toBeNull(); + }); +}); diff --git a/dashboard/client/src/service/task.ts b/dashboard/client/src/service/task.ts index 41a9355585ba..17441651de07 100644 --- a/dashboard/client/src/service/task.ts +++ b/dashboard/client/src/service/task.ts @@ -10,6 +10,13 @@ export const getTasks = (jobId: string | undefined) => { return get>(url); }; +export const getTask = (taskId: string) => { + const url = `api/v0/tasks?detail=1&limit=1&filter_keys=task_id&filter_predicates=%3D&filter_values=${encodeURIComponent( + taskId, + )}`; + return get>(url); +}; + export const downloadTaskTimelineHref = (jobId: string | undefined) => { let url = "/api/v0/tasks/timeline?download=1"; if (jobId) { diff --git a/dashboard/client/src/type/task.ts b/dashboard/client/src/type/task.ts index 168d3154f9bc..ddb65b47580e 100644 --- a/dashboard/client/src/type/task.ts +++ b/dashboard/client/src/type/task.ts @@ -27,7 +27,7 @@ export type Task = { state: TypeTaskStatus; job_id: string; node_id: string; - actor_id: string; + actor_id: string | null; placement_group_id: string | null; type: TypeTaskType; func_or_class_name: string; From 9dc95b9e6fb21b5e44069995938e85b845cf9efe Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Tue, 16 May 2023 11:56:02 +0800 Subject: [PATCH 403/424] [core][state][ci] Fix stress_test_state_api_scale (#35332) We changed the logging content with per task magic token. Updating test logic. --- .../stress_tests/test_state_api_scale.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/release/nightly_tests/stress_tests/test_state_api_scale.py b/release/nightly_tests/stress_tests/test_state_api_scale.py index 0c43c19a0e81..780d9f19fa01 100644 --- a/release/nightly_tests/stress_tests/test_state_api_scale.py +++ b/release/nightly_tests/stress_tests/test_state_api_scale.py @@ -1,7 +1,6 @@ import click import json import ray -from ray._private.ray_constants import LOG_PREFIX_ACTOR_NAME, LOG_PREFIX_JOB_ID from ray._private.state_api_test_utils import ( STATE_LIST_LIMIT, StateAPIMetric, @@ -251,9 +250,6 @@ def test_large_log_file(log_file_size_byte: int): class LogActor: def write_log(self, log_file_size_byte: int): ctx = hashlib.md5() - job_id = ray.get_runtime_context().get_job_id() - prefix = f"{LOG_PREFIX_JOB_ID}{job_id}\n{LOG_PREFIX_ACTOR_NAME}LogActor\n" - ctx.update(prefix.encode()) while log_file_size_byte > 0: n = min(log_file_size_byte, 4 * MiB) chunk = "".join(random.choices(string.ascii_letters, k=n)) @@ -265,9 +261,9 @@ def write_log(self, log_file_size_byte: int): return ctx.hexdigest(), ray.get_runtime_context().get_node_id() actor = LogActor.remote() - expected_hash, node_id = ray.get( - actor.write_log.remote(log_file_size_byte=log_file_size_byte) - ) + + task = actor.write_log.remote(log_file_size_byte=log_file_size_byte) + expected_hash, node_id = ray.get(task) assert expected_hash is not None, "Empty checksum from the log actor" assert node_id is not None, "Empty node id from the log actor" @@ -276,7 +272,7 @@ def write_log(self, log_file_size_byte: int): time_taken = 0 t_start = time.perf_counter() - for s in get_log(actor_id=actor._actor_id.hex(), tail=1000000000): + for s in get_log(task_id=task.task_id().hex(), tail=1000000000): t_end = time.perf_counter() time_taken += t_end - t_start # Not including this time From 331e6da905096122e922674f701a224bdeeed744 Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Tue, 16 May 2023 12:02:35 +0800 Subject: [PATCH 404/424] [core][state][job] Supporting job listing(getting) and logs from state API (#35124) This PR adds better support for listing jobs (ray list jobs) and getting submission job logs (ray logs job --id) The state API client mirrors the implementation of job endpoint and depends on job's implementations for retrieving job related info. --- dashboard/modules/job/common.py | 1 + dashboard/modules/job/job_head.py | 8 +- dashboard/modules/job/job_manager.py | 4 +- dashboard/modules/job/pydantic_models.py | 9 ++- dashboard/modules/job/utils.py | 4 +- dashboard/modules/log/log_manager.py | 42 +++++++++- dashboard/modules/state/state_head.py | 1 + dashboard/state_aggregator.py | 42 +++++----- python/ray/experimental/state/api.py | 35 ++++++++- python/ray/experimental/state/common.py | 78 +++++++++++++++---- python/ray/experimental/state/state_cli.py | 71 +++++++++++++++-- .../ray/experimental/state/state_manager.py | 47 ++++++----- python/ray/scripts/scripts.py | 26 ++++--- python/ray/tests/test_state_api.py | 62 +++++++++++++-- python/ray/tests/test_state_api_log.py | 35 +++++++++ 15 files changed, 376 insertions(+), 89 deletions(-) diff --git a/dashboard/modules/job/common.py b/dashboard/modules/job/common.py index 0bc1d0c8f26f..e77dfb659e45 100644 --- a/dashboard/modules/job/common.py +++ b/dashboard/modules/job/common.py @@ -25,6 +25,7 @@ # In order to get information about SupervisorActors launched by different jobs, # they must be set to the same namespace. SUPERVISOR_ACTOR_RAY_NAMESPACE = "SUPERVISOR_ACTOR_RAY_NAMESPACE" +JOB_LOGS_PATH_TEMPLATE = "job-driver-{submission_id}.log" @PublicAPI(stability="stable") diff --git a/dashboard/modules/job/job_head.py b/dashboard/modules/job/job_head.py index 801fa1079923..acedb61fde4d 100644 --- a/dashboard/modules/job/job_head.py +++ b/dashboard/modules/job/job_head.py @@ -68,13 +68,11 @@ async def _raise_error(self, resp: ClientResponse): raise RuntimeError(f"Request failed with status code {status}: {error_text}.") async def submit_job_internal(self, req: JobSubmitRequest) -> JobSubmitResponse: - logger.debug(f"Submitting job with submission_id={req.submission_id}.") async with self._session.post( f"{self._agent_address}/api/job_agent/jobs/", json=dataclasses.asdict(req) ) as resp: - if resp.status == 200: result_json = await resp.json() return JobSubmitResponse(**result_json) @@ -82,13 +80,11 @@ async def submit_job_internal(self, req: JobSubmitRequest) -> JobSubmitResponse: await self._raise_error(resp) async def stop_job_internal(self, job_id: str) -> JobStopResponse: - logger.debug(f"Stopping job with job_id={job_id}.") async with self._session.post( f"{self._agent_address}/api/job_agent/jobs/{job_id}/stop" ) as resp: - if resp.status == 200: result_json = await resp.json() return JobStopResponse(**result_json) @@ -96,7 +92,6 @@ async def stop_job_internal(self, job_id: str) -> JobStopResponse: await self._raise_error(resp) async def delete_job_internal(self, job_id: str) -> JobDeleteResponse: - logger.debug(f"Deleting job with job_id={job_id}.") async with self._session.delete( @@ -401,6 +396,9 @@ async def get_job_info(self, req: Request) -> Response: content_type="application/json", ) + # TODO(rickyx): This endpoint's logic is also mirrored in state API's endpoint. + # We should eventually unify the backend logic (and keep the logic in sync before + # that). @routes.get("/api/jobs/") async def list_jobs(self, req: Request) -> Response: driver_jobs, submission_job_drivers = await get_driver_jobs( diff --git a/dashboard/modules/job/job_manager.py b/dashboard/modules/job/job_manager.py index 8c6f8232ea03..eb1e2a2f0dfa 100644 --- a/dashboard/modules/job/job_manager.py +++ b/dashboard/modules/job/job_manager.py @@ -33,6 +33,7 @@ JOB_ID_METADATA_KEY, JOB_NAME_METADATA_KEY, JOB_ACTOR_NAME_TEMPLATE, + JOB_LOGS_PATH_TEMPLATE, SUPERVISOR_ACTOR_RAY_NAMESPACE, JobInfo, JobInfoStorageClient, @@ -86,7 +87,6 @@ class JobLogStorageClient: Disk storage for stdout / stderr of driver script logs. """ - JOB_LOGS_PATH = "job-driver-{job_id}.log" # Number of last N lines to put in job message upon failure. NUM_LOG_LINES_ON_ERROR = 10 # Maximum number of characters to print out of the logs to avoid @@ -133,7 +133,7 @@ def get_log_file_path(self, job_id: str) -> Tuple[str, str]: """ return os.path.join( ray._private.worker._global_node.get_logs_dir_path(), - self.JOB_LOGS_PATH.format(job_id=job_id), + JOB_LOGS_PATH_TEMPLATE.format(submission_id=job_id), ) diff --git a/dashboard/modules/job/pydantic_models.py b/dashboard/modules/job/pydantic_models.py index b7c4404a6c00..5b7edd9f6f23 100644 --- a/dashboard/modules/job/pydantic_models.py +++ b/dashboard/modules/job/pydantic_models.py @@ -22,7 +22,11 @@ class DriverInfo(BaseModel): @PublicAPI(stability="beta") class JobType(str, Enum): - """An enumeration for describing the different job types.""" + """An enumeration for describing the different job types. + + NOTE: + This field is still experimental and may change in the future. + """ #: A job that was initiated by the Ray Jobs API. SUBMISSION = "SUBMISSION" @@ -37,9 +41,6 @@ class JobDetails(BaseModel): """ type: JobType = Field(..., description="The type of job.") - entrypoint: Optional[str] = Field( - None, description="The entrypoint command for this job." - ) job_id: Optional[str] = Field( None, description="The job ID. An ID that is created for every job that is " diff --git a/dashboard/modules/job/utils.py b/dashboard/modules/job/utils.py index b232c7372791..572e5f67ae60 100644 --- a/dashboard/modules/job/utils.py +++ b/dashboard/modules/job/utils.py @@ -142,7 +142,7 @@ async def parse_and_validate_request( async def get_driver_jobs( - gcs_aio_client: GcsAioClient, + gcs_aio_client: GcsAioClient, timeout: Optional[int] = None ) -> Tuple[Dict[str, JobDetails], Dict[str, DriverInfo]]: """Returns a tuple of dictionaries related to drivers. @@ -151,7 +151,7 @@ async def get_driver_jobs( It's keyed by the submission job's submission id. Only the last driver of a submission job is returned. """ - reply = await gcs_aio_client.get_all_job_info() + reply = await gcs_aio_client.get_all_job_info(timeout=timeout) jobs = {} submission_job_drivers = {} diff --git a/dashboard/modules/log/log_manager.py b/dashboard/modules/log/log_manager.py index 3f4915ee371d..b5533903eb9b 100644 --- a/dashboard/modules/log/log_manager.py +++ b/dashboard/modules/log/log_manager.py @@ -4,6 +4,7 @@ from collections import defaultdict from typing import List, Optional, Dict, AsyncIterable, Tuple, Callable +from ray.dashboard.modules.job.common import JOB_LOGS_PATH_TEMPLATE from ray.experimental.state.common import ( GetLogOptions, protobuf_to_task_state_dict, @@ -85,6 +86,7 @@ async def stream_logs( get_actor_fn=DataSource.actors.get, timeout=options.timeout, suffix=options.suffix, + submission_id=options.submission_id, ) keep_alive = options.media_type == "stream" @@ -117,6 +119,35 @@ def _verify_node_registered(self, node_id: str): ) assert node_id is not None + async def _resolve_job_filename(self, sub_job_id: str) -> Tuple[str, str]: + """Return the log file name and node id for a given job submission id. + + Args: + sub_job_id: The job submission id. + + Returns: + The log file name and node id. + """ + job_infos = await self.client.get_job_info(timeout=DEFAULT_RPC_TIMEOUT) + target_job = None + for job_info in job_infos: + if job_info.submission_id == sub_job_id: + target_job = job_info + break + if target_job is None: + logger.info(f"Submission job ID {sub_job_id} not found.") + return None, None + + node_id = job_info.driver_node_id + if node_id is None: + raise ValueError( + f"Job {sub_job_id} has no driver node id info. " + "This is likely a bug. Please file an issue." + ) + + log_filename = JOB_LOGS_PATH_TEMPLATE.format(submission_id=sub_job_id) + return node_id, log_filename + async def _resolve_worker_file( self, node_id: str, @@ -167,6 +198,7 @@ async def resolve_filename( get_actor_fn: Optional[Callable[[str], Dict]] = None, timeout: int = DEFAULT_RPC_TIMEOUT, suffix: str = "out", + submission_id: Optional[str] = None, ) -> Tuple[str, str]: """Return the file name given all options. @@ -181,6 +213,7 @@ async def resolve_filename( specified by `node_id`. suffix: Log suffix if no `log_filename` is provided, when resolving by other ids'. Default to "out". + submission_id: The submission id for a submission job. """ if actor_id: if get_actor_fn is None: @@ -235,7 +268,6 @@ async def resolve_filename( "Could not find log file for task attempt:" f"{task_id}({attempt_number})" ) - # Get the worker id and node id. task = protobuf_to_task_state_dict(task_event) @@ -256,6 +288,13 @@ async def resolve_filename( suffix=suffix, timeout=timeout, ) + elif submission_id: + node_id, log_filename = await self._resolve_job_filename(submission_id) + + logger.info( + f"Resolving job {submission_id} on node {node_id} with " + f"filename {log_filename}" + ) elif pid: if node_id is None: @@ -282,6 +321,7 @@ async def resolve_filename( f"\task_id: {task_id}\n" f"\tpid: {pid}\n" f"\tsuffix: {suffix}\n" + f"\tsubmission_id: {submission_id}\n" ) logger.info(f"Resolved log file: {log_filename} on node {node_id}") return log_filename, node_id diff --git a/dashboard/modules/state/state_head.py b/dashboard/modules/state/state_head.py index e132a4f3da8a..df042e9c0ad0 100644 --- a/dashboard/modules/state/state_head.py +++ b/dashboard/modules/state/state_head.py @@ -406,6 +406,7 @@ async def get_logs(self, req: aiohttp.web.Request): filename=req.query.get("filename", None), actor_id=req.query.get("actor_id", None), task_id=req.query.get("task_id", None), + submission_id=req.query.get("submission_id", None), pid=req.query.get("pid", None), lines=req.query.get("lines", DEFAULT_LOG_LIMIT), interval=req.query.get("interval", None), diff --git a/dashboard/state_aggregator.py b/dashboard/state_aggregator.py index b7cfd20b5c9c..46b8987cfd68 100644 --- a/dashboard/state_aggregator.py +++ b/dashboard/state_aggregator.py @@ -1,7 +1,8 @@ import asyncio import logging -from dataclasses import asdict, fields +from dataclasses import fields +import dataclasses from itertools import islice from typing import List, Tuple, Optional from datetime import datetime @@ -14,6 +15,7 @@ from ray.experimental.state.common import ( protobuf_message_to_dict, ActorState, + JobState, ListApiOptions, ListApiResponse, NodeState, @@ -80,7 +82,10 @@ def _convert_filters_type( A new list of filters with correct types that match the schema. """ new_filter = [] - schema = {field.name: field.type for field in fields(schema)} + if dataclasses.is_dataclass(schema): + schema = {field.name: field.type for field in fields(schema)} + else: + schema = schema.schema_dict() for col, predicate, val in filter: if col in schema: @@ -95,7 +100,7 @@ def _convert_filters_type( if isinstance(val, column_type): # Do nothing. pass - elif column_type is int: + elif column_type is int or column_type == "integer": try: val = convert_string_to_type(val, int) except ValueError: @@ -104,16 +109,19 @@ def _convert_filters_type( "column. Please provide an integer filter " f"`--filter {col} [int]`" ) - elif column_type is float: + elif column_type is float or column_type == "number": try: - val = convert_string_to_type(val, float) + val = convert_string_to_type( + val, + float, + ) except ValueError: raise ValueError( f"Invalid filter `--filter {col} {val}` for a float " "type column. Please provide an integer filter " f"`--filter {col} [float]`" ) - elif column_type is bool: + elif column_type is bool or column_type == "boolean": try: val = convert_string_to_type(val, bool) except ValueError: @@ -251,7 +259,6 @@ async def list_placement_groups(self, *, option: ListApiOptions) -> ListApiRespo result = [] for message in reply.placement_group_table_data: - data = protobuf_message_to_dict( message=message, fields_to_decode=["placement_group_id", "creator_job_id", "node_id"], @@ -352,22 +359,21 @@ async def list_workers(self, *, option: ListApiOptions) -> ListApiResponse: ) async def list_jobs(self, *, option: ListApiOptions) -> ListApiResponse: - # TODO(sang): Support limit & timeout & async calls. try: - result = [] - job_info = await self._client.get_job_info() - for job_id, data in job_info.items(): - data = asdict(data) - data["job_id"] = job_id - result.append(data) + result = await self._client.get_job_info(timeout=option.timeout) + result = [job.dict() for job in result] + total = len(result) + result = self._filter(result, option.filters, JobState, option.detail) + num_filtered = len(result) + result.sort(key=lambda entry: entry["job_id"] or "") + result = list(islice(result, option.limit)) except DataSourceUnavailable: raise DataSourceUnavailable(GCS_QUERY_FAILURE_WARNING) return ListApiResponse( result=result, - # TODO(sang): Support this. - total=len(result), - num_after_truncation=len(result), - num_filtered=len(result), + total=total, + num_after_truncation=total, + num_filtered=num_filtered, ) async def list_tasks(self, *, option: ListApiOptions) -> ListApiResponse: diff --git a/python/ray/experimental/state/api.py b/python/ray/experimental/state/api.py index b8873af0ec1c..bf54aede53d5 100644 --- a/python/ray/experimental/state/api.py +++ b/python/ray/experimental/state/api.py @@ -252,6 +252,7 @@ def get( WorkerState, TaskState, List[ObjectState], + JobState, ] ]: """Get resources states by id @@ -266,12 +267,13 @@ def get( latency or failed query information. Returns: - None if not found, and if found, a dictionarified: + None if not found, and if found: - ActorState for actors - PlacementGroupState for placement groups - NodeState for nodes - WorkerState for workers - TaskState for tasks + - JobState for jobs Empty list for objects if not found, or list of ObjectState for objects @@ -294,6 +296,7 @@ def get( StateResource.WORKERS: "worker_id", StateResource.TASKS: "task_id", StateResource.OBJECTS: "object_id", + StateResource.JOBS: "submission_id", } if resource not in RESOURCE_ID_KEY_NAME: raise ValueError(f"Can't get {resource.name} by id.") @@ -489,6 +492,7 @@ def list( when timeout occurs. """ + endpoint = f"/api/v0/{resource.value}" params = self._make_param(options) list_api_response = self._make_http_get_request( @@ -577,14 +581,36 @@ def get_actor( ) -# TODO(rickyyx:alpha-obs) def get_job( id: str, address: Optional[str] = None, timeout: int = DEFAULT_RPC_TIMEOUT, _explain: bool = False, ) -> Optional[JobState]: - raise NotImplementedError("Get Job by id is currently not supported") + """Get a submission job detail by id. + + Args: + id: Submission ID obtained from job API. + address: Ray bootstrap address, could be `auto`, `localhost:6379`. + If None, it will be resolved automatically from an initialized ray. + timeout: Max timeout value for the state API requests made. + _explain: Print the API information such as API latency or + failed query information. + + Returns: + None if job not found, or + :class:`JobState `. + + Raises: + Exceptions: :class:`RayStateApiException ` if the CLI + failed to query the data. + """ # noqa: E501 + return StateApiClient(address=address).get( + StateResource.JOBS, + id, + GetApiOptions(timeout=timeout), + _explain=_explain, + ) def get_placement_group( @@ -1144,6 +1170,7 @@ def get_log( suffix: str = "out", encoding: Optional[str] = "utf-8", errors: Optional[str] = "strict", + submission_id: Optional[str] = None, attempt_number: int = 0, _interval: Optional[float] = None, ) -> Generator[str, None, None]: @@ -1180,6 +1207,7 @@ def get_log( "utf-8". Use None to get binary data directly. errors: The error handling scheme to use for decoding errors. Default is "strict". See https://docs.python.org/3/library/codecs.html#error-handlers + submission_id: Job submission ID if getting log from a submission job. attempt_number: The attempt number of the task if getting logs generated by a task. _interval: The interval in secs to print new logs when `follow=True`. @@ -1206,6 +1234,7 @@ def get_log( media_type=media_type, timeout=timeout, suffix=suffix, + submission_id=submission_id, attempt_number=attempt_number, ) options_dict = {} diff --git a/python/ray/experimental/state/common.py b/python/ray/experimental/state/common.py index 21ae132c44c2..cb332d44bf62 100644 --- a/python/ray/experimental/state/common.py +++ b/python/ray/experimental/state/common.py @@ -3,15 +3,14 @@ import logging import sys from abc import ABC -from dataclasses import field, fields +from dataclasses import asdict, field, fields from enum import Enum, unique -from typing import Dict, List, Optional, Set, Tuple, Union +from typing import Any, Dict, List, Optional, Set, Tuple, Union import ray.dashboard.utils as dashboard_utils from ray._private.ray_constants import env_integer from ray.core.generated.common_pb2 import TaskStatus, TaskType from ray.core.generated.gcs_pb2 import TaskEvents -from ray.dashboard.modules.job.common import JobInfo from ray.experimental.state.custom_types import ( TypeActorStatus, TypeNodeStatus, @@ -26,11 +25,16 @@ try: from pydantic.dataclasses import dataclass + + from ray.dashboard.modules.job.pydantic_models import JobDetails + except ImportError: # pydantic is not available in the dashboard. # We will use the dataclass from the standard library. from dataclasses import dataclass + JobDetails = object + logger = logging.getLogger(__name__) @@ -300,6 +304,9 @@ def detail_columns(cls) -> Set[str]: """ return set(cls.list_columns(detail=True)) + def asdict(self): + return asdict(self) + # Allow dict like access on the class directly for backward compatibility. def __getitem__(self, key): return getattr(self, key) @@ -355,6 +362,9 @@ class GetLogOptions: # The suffix of the log file if file resolution not through filename directly. # Default to "out". suffix: str = "out" + # The job submission id for submission job. This doesn't work for driver job + # since Ray doesn't log driver logs to file in the ray logs directory. + submission_id: Optional[str] = None def __post_init__(self): if self.pid: @@ -377,10 +387,16 @@ def __post_init__(self): "Both node_id and node_ip are given. Only one of them can be provided. " f"Given node id: {self.node_id}, given node ip: {self.node_ip}" ) - if not (self.actor_id or self.task_id or self.pid or self.filename): + if not ( + self.actor_id + or self.task_id + or self.pid + or self.filename + or self.submission_id + ): raise ValueError( - "None of actor_id, task_id, pid, or filename is provided. " - "At least one of them is required to fetch logs." + "None of actor_id, task_id, pid, submission_id or filename " + "is provided. At least one of them is required to fetch logs." ) if self.suffix not in ["out", "err"]: @@ -504,19 +520,55 @@ class NodeState(StateSchema): ) -@dataclass(init=True) -class JobState(JobInfo, StateSchema): - """The state of the job that's submitted by Ray's Job APIs""" +# NOTE: +# Declaring this as dataclass would make __init__ not being called properly. +class JobState(StateSchema, JobDetails): + """The state of the job that's submitted by Ray's Job APIs or driver jobs""" - job_id: Optional[str] = state_column(filterable=False, default=None) + def __init__(self, **kwargs): + JobDetails.__init__(self, **kwargs) @classmethod def filterable_columns(cls) -> Set[str]: - return {"status", "entrypoint", "error_type"} + # We are not doing any filtering since filtering is currently done + # at the backend. + return {"job_id", "type", "status", "submission_id"} + + @classmethod + def humanify(cls, state: dict) -> dict: + return state + + @classmethod + def list_columns(cls, detail: bool = False) -> List[str]: + if not detail: + return [ + "job_id", + "submission_id", + "entrypoint", + "type", + "status", + "message", + "error_type", + "driver_info", + ] + if isinstance(JobDetails, object): + # We don't have pydantic in the dashboard. This is because + # we call this method at module import time, so we need to + # check if the class is a pydantic model. + return [] + + return JobDetails.__fields__ + + def asdict(self): + return JobDetails.dict(self) @classmethod - def list_columns(cls, detail: bool) -> List[str]: - return ["job_id"] + [f.name for f in fields(JobInfo)] + def schema_dict(cls) -> Dict[str, Any]: + schema_types = cls.schema()["properties"] + # Get type name to actual type mapping. + return { + k: v["type"] for k, v in schema_types.items() if v.get("type") is not None + } @dataclass(init=True) diff --git a/python/ray/experimental/state/state_cli.py b/python/ray/experimental/state/state_cli.py index ee7449f902ba..e21337b1a83d 100644 --- a/python/ray/experimental/state/state_cli.py +++ b/python/ray/experimental/state/state_cli.py @@ -1,4 +1,3 @@ -import dataclasses import json import logging from datetime import datetime @@ -298,8 +297,8 @@ def format_get_api_output( if not isinstance(state_data, list): state_data = [state_data] + state_data = [state.asdict() for state in state_data] - state_data = [dataclasses.asdict(state) for state in state_data] return output_with_format(state_data, schema=schema, format=format, detail=True) @@ -312,7 +311,7 @@ def format_list_api_output( ) -> str: if len(state_data) == 0: return "No resource in the cluster" - state_data = [dataclasses.asdict(state) for state in state_data] + state_data = [state.asdict() for state in state_data] return output_with_format(state_data, schema=schema, format=format, detail=detail) @@ -819,6 +818,7 @@ def _print_log( encoding_errors: str = "strict", task_id: Optional[str] = None, attempt_number: int = 0, + submission_id: Optional[str] = None, ): """Wrapper around `get_log()` that prints the preamble and the log lines""" if tail > 0: @@ -847,6 +847,7 @@ def _print_log( errors=encoding_errors, task_id=task_id, attempt_number=attempt_number, + submission_id=submission_id, ): print(chunk, end="", flush=True) @@ -1143,7 +1144,7 @@ def log_worker( timeout: int, err: bool, ): - """Get/List logs associated with a worker process. + """Get logs associated with a worker process. Example: @@ -1178,6 +1179,66 @@ def log_worker( ) +@logs_state_cli_group.command(name="job") +@click.option( + "--id", + "submission_id", + required=True, + type=str, + help=( + "Retrieves the logs from a submission job with submission id," + "i.e. raysubmit_XXX" + ), +) +@address_option +@log_follow_option +@log_tail_option +@log_interval_option +@log_timeout_option +@click.pass_context +@PublicAPI(stability="alpha") +def log_job( + ctx, + submission_id: Optional[str], + address: Optional[str], + follow: bool, + tail: int, + interval: float, + timeout: int, +): + """Get logs associated with a submission job. + + Example: + + Follow the log file from a submission job with submission id raysumbit_xxx. + + ``` + ray logs job --id raysubmit_xxx + ``` + + Follow the submission job log. + + ``` + ray logs jobs --id raysubmit_xxx --follow + + ``` + + Raises: + :class:`RayStateApiException ` + if the CLI is failed to query the data. + MissingParameter if inputs are missing. + """ # noqa: E501 + + _print_log( + address=address, + tail=tail, + follow=follow, + interval=interval, + timeout=timeout, + submission_id=submission_id, + ) + + @logs_state_cli_group.command(name="task") @click.option( "--id", @@ -1213,7 +1274,7 @@ def log_task( timeout: int, err: bool, ): - """Get/List logs associated with a task. + """Get logs associated with a task. Example: diff --git a/python/ray/experimental/state/state_manager.py b/python/ray/experimental/state/state_manager.py index 9173c893b33f..9afb3f75c360 100644 --- a/python/ray/experimental/state/state_manager.py +++ b/python/ray/experimental/state/state_manager.py @@ -1,8 +1,9 @@ +import dataclasses import inspect import logging from collections import defaultdict from functools import wraps -from typing import Dict, List, Optional, Tuple +from typing import List, Optional, Tuple import grpc from grpc.aio._call import UnaryStreamCall @@ -46,7 +47,9 @@ ) from ray.core.generated.runtime_env_agent_pb2_grpc import RuntimeEnvServiceStub from ray.dashboard.datacenter import DataSource -from ray.dashboard.modules.job.common import JobInfo, JobInfoStorageClient +from ray.dashboard.modules.job.common import JobInfoStorageClient +from ray.dashboard.modules.job.pydantic_models import JobDetails, JobType +from ray.dashboard.modules.job.utils import get_driver_jobs from ray.dashboard.utils import Dict as Dictionary from ray.experimental.state.common import ( RAY_MAX_LIMIT_FROM_DATA_SOURCE, @@ -157,6 +160,7 @@ def __init__(self, gcs_channel: grpc.aio.Channel, gcs_aio_client: GcsAioClient): self._log_agent_stub = {} self._job_client = JobInfoStorageClient(gcs_aio_client) self._id_id_map = IdToIpMap() + self._gcs_aio_client = gcs_aio_client def register_gcs_client(self, gcs_channel: grpc.aio.Channel): self._gcs_actor_info_stub = gcs_service_pb2_grpc.ActorInfoGcsServiceStub( @@ -333,23 +337,30 @@ async def get_all_worker_info( ) return reply - async def get_job_info(self) -> Optional[Dict[str, JobInfo]]: + # TODO(rickyx): + # This is currently mirroring dashboard/modules/job/job_head.py::list_jobs + # We should eventually unify the logic. + async def get_job_info(self, timeout: int = None) -> List[JobDetails]: # Cannot use @handle_grpc_network_errors because async def is not supported yet. - # TODO(sang): Support timeout & make it async - try: - return await self._job_client.get_all_jobs() - except grpc.aio.AioRpcError as e: - if ( - e.code == grpc.StatusCode.DEADLINE_EXCEEDED - or e.code == grpc.StatusCode.UNAVAILABLE - ): - raise DataSourceUnavailable( - "Failed to query the data source. " - "It is either there's a network issue, or the source is down." - ) - else: - logger.exception(e) - raise e + + driver_jobs, submission_job_drivers = await get_driver_jobs( + self._gcs_aio_client, timeout=timeout + ) + submission_jobs = await self._job_client.get_all_jobs(timeout=timeout) + submission_jobs = [ + JobDetails( + **dataclasses.asdict(job), + submission_id=submission_id, + job_id=submission_job_drivers.get(submission_id).id + if submission_id in submission_job_drivers + else None, + driver_info=submission_job_drivers.get(submission_id), + type=JobType.SUBMISSION, + ) + for submission_id, job in submission_jobs.items() + ] + + return list(driver_jobs.values()) + submission_jobs async def get_all_cluster_events(self) -> Dictionary: return DataSource.events diff --git a/python/ray/scripts/scripts.py b/python/ray/scripts/scripts.py index 85bfea133c2b..6c8d33c0347e 100644 --- a/python/ray/scripts/scripts.py +++ b/python/ray/scripts/scripts.py @@ -45,12 +45,6 @@ from ray.autoscaler._private.fake_multi_node.node_provider import FAKE_HEAD_NODE_ID from ray.util.annotations import PublicAPI -from ray.experimental.state.state_cli import ( - ray_get, - ray_list, - logs_state_cli_group, - summary_state_cli_group, -) logger = logging.getLogger(__name__) @@ -2431,10 +2425,22 @@ def add_command_alias(command, name, hidden): cli.add_command(cpp) cli.add_command(disable_usage_stats) cli.add_command(enable_usage_stats) -cli.add_command(ray_list, name="list") -cli.add_command(ray_get, name="get") -add_command_alias(summary_state_cli_group, name="summary", hidden=False) -add_command_alias(logs_state_cli_group, name="logs", hidden=False) + +try: + from ray.experimental.state.state_cli import ( + ray_get, + ray_list, + logs_state_cli_group, + summary_state_cli_group, + ) + + cli.add_command(ray_list, name="list") + cli.add_command(ray_get, name="get") + add_command_alias(summary_state_cli_group, name="summary", hidden=False) + add_command_alias(logs_state_cli_group, name="logs", hidden=False) +except ImportError as e: + logger.debug(f"Integrating ray state command line tool failed: {e}") + try: from ray.dashboard.modules.job.cli import job_cli_group diff --git a/python/ray/tests/test_state_api.py b/python/ray/tests/test_state_api.py index aa68bf05fd09..bc997971970f 100644 --- a/python/ray/tests/test_state_api.py +++ b/python/ray/tests/test_state_api.py @@ -8,6 +8,8 @@ from unittest.mock import MagicMock import pytest +from ray.experimental.state.api import get_job +from ray.dashboard.modules.job.pydantic_models import JobDetails from ray.experimental.state.common import Humanify from ray._private.gcs_utils import GcsAioClient import yaml @@ -1534,8 +1536,14 @@ async def test_state_data_source_client(ray_start_cluster): entrypoint="ls", ) result = await client.get_job_info() - assert list(result.keys())[0] == job_id - assert isinstance(result, dict) + assert isinstance(result[0], JobDetails) + found_job = False + for job in result: + if job.type != "DRIVER": + assert job.submission_id == job_id + found_job = True + assert found_job, result + assert isinstance(result, list) """ Test tasks @@ -2185,8 +2193,9 @@ def verify(): sys.platform == "win32", reason="Failed on Windows", ) -def test_list_jobs(shutdown_only): +def test_list_get_jobs(shutdown_only): ray.init() + # Test submission job client = JobSubmissionClient( f"http://{ray._private.worker.global_worker.node.address_info['webui_url']}" ) @@ -2198,13 +2207,50 @@ def test_list_jobs(shutdown_only): def verify(): job_data = list_jobs()[0] print(job_data) - job_id_from_api = job_data["job_id"] - correct_state = job_data["status"] == "SUCCEEDED" - correct_id = job_id == job_id_from_api - return correct_state and correct_id + job_id_from_api = job_data["submission_id"] + assert job_data["status"] == "SUCCEEDED" + assert job_id == job_id_from_api + return True + + wait_for_condition(verify) + + # Test driver jobs + script = """ + +import ray + +ray.init("auto") + +@ray.remote +def f(): + pass + +ray.get(f.remote()) +""" + run_string_as_driver(script) + + def verify(): + jobs = list_jobs(filters=[("type", "=", "DRIVER")]) + assert len(jobs) == 2, "1 test driver + 1 script run above" + for driver_job in jobs: + assert driver_job["driver_info"] is not None + + sub_jobs = list_jobs(filters=[("type", "=", "SUBMISSION")]) + assert len(sub_jobs) == 1 + assert sub_jobs[0]["submission_id"] is not None + return True + + wait_for_condition(verify) + + # Test GET api + def verify(): + job = get_job(id=job_id) + assert job["submission_id"] == job_id + assert job["entrypoint"] == "ls" + assert job["status"] == "SUCCEEDED" + return True wait_for_condition(verify) - print(list_jobs()) @pytest.mark.skipif( diff --git a/python/ray/tests/test_state_api_log.py b/python/ray/tests/test_state_api_log.py index a4e6da1775cc..11b8a15b9676 100644 --- a/python/ray/tests/test_state_api_log.py +++ b/python/ray/tests/test_state_api_log.py @@ -7,6 +7,7 @@ from unittest.mock import MagicMock import pytest +from ray.experimental.state.api import list_jobs from ray.experimental.state.state_cli import logs_state_cli_group import requests from click.testing import CliRunner @@ -1002,6 +1003,40 @@ def verify(): e.match(f"Given node id {node_id} is not available") +@pytest.mark.skipif( + sys.platform == "win32", reason="Job submission is failing on windows." +) +def test_log_job(ray_start_with_dashboard): + assert wait_until_server_available(ray_start_with_dashboard["webui_url"]) is True + webui_url = ray_start_with_dashboard["webui_url"] + webui_url = format_web_url(webui_url) + node_id = list_nodes()[0]["node_id"] + + # Submit a job + from ray.job_submission import JobSubmissionClient + + JOB_LOG = "test-job-log" + client = JobSubmissionClient(webui_url) + entrypoint = f"python -c \"print('{JOB_LOG}')\"" + job_id = client.submit_job(entrypoint=entrypoint) + + def job_done(): + jobs = list_jobs(filters=[("submission_id", "=", job_id)]) + assert len(jobs) == 1 + assert jobs[0].status == "SUCCEEDED" + return True + + wait_for_condition(job_done) + + def verify(): + logs = "".join(get_log(submission_id=job_id, node_id=node_id)) + assert JOB_LOG + "\n" == logs + + return True + + wait_for_condition(verify) + + def test_log_get_subdir(ray_start_with_dashboard): assert ( wait_until_server_available(ray_start_with_dashboard.address_info["webui_url"]) From 98ae72a03231993f52c9dea07b8537b66dac94d3 Mon Sep 17 00:00:00 2001 From: matthewdeng Date: Mon, 15 May 2023 22:17:17 -0700 Subject: [PATCH 405/424] [tests] fix lint and dependency issues in tests (#35373) Signed-off-by: Matthew Deng --- ci/env/install-dependencies.sh | 1 + release/nightly_tests/dataset/data_ingest_benchmark.py | 10 ++-------- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/ci/env/install-dependencies.sh b/ci/env/install-dependencies.sh index a6a19977e0e8..81fcd3f72f0f 100755 --- a/ci/env/install-dependencies.sh +++ b/ci/env/install-dependencies.sh @@ -392,6 +392,7 @@ install_pip_packages() { if [ "${INSTALL_TIMESERIES_LIBS-}" = 1 ]; then requirements_packages+=("statsforecast==1.5.0") requirements_packages+=("prophet==1.1.1") + requirements_packages+=("holidays==0.24") # holidays 0.25 causes `import prophet` to fail. fi # Data processing test dependencies. diff --git a/release/nightly_tests/dataset/data_ingest_benchmark.py b/release/nightly_tests/dataset/data_ingest_benchmark.py index 089dff4998ac..4cf3e6bc91bf 100644 --- a/release/nightly_tests/dataset/data_ingest_benchmark.py +++ b/release/nightly_tests/dataset/data_ingest_benchmark.py @@ -170,10 +170,7 @@ def run_ingest_bulk(dataset_size_gb, num_workers): ] ds = ds.map_batches(lambda df: df * 2, batch_format="pandas") splits = ds.split(num_workers, equal=True, locality_hints=consumers) - future = [ - consumers[i].consume.remote(s) - for i, s in enumerate(splits) - ] + future = [consumers[i].consume.remote(s) for i, s in enumerate(splits)] ray.get(future) # Example ballpark number for transformation (5s): @@ -203,10 +200,7 @@ def run_ingest_dataset_pipeline(dataset_size_gb, num_workers): .map_batches(lambda df: df * 2, batch_format="pandas") ) splits = p.split(num_workers, equal=True, locality_hints=consumers) - future = [ - consumers[i].consume.remote(s) - for i, s in enumerate(splits) - ] + future = [consumers[i].consume.remote(s) for i, s in enumerate(splits)] ray.get(future) # Example ballpark numbers: From fad7e19b73167a73f43cfff29c757bde59d865c3 Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Tue, 16 May 2023 09:22:42 +0200 Subject: [PATCH 406/424] [ci] External code tracker: Ignore if file is not found (#35376) When master is not merged, a file not found error can come up. This update to the script will prevent that from happening by catching errors and setting default values for the variables. Signed-off-by: Kai Fricke --- .github/workflows/external-code-affected.yml | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/.github/workflows/external-code-affected.yml b/.github/workflows/external-code-affected.yml index 268e88d452f8..2917e32a939b 100644 --- a/.github/workflows/external-code-affected.yml +++ b/.github/workflows/external-code-affected.yml @@ -63,9 +63,17 @@ jobs: comment.user.login === 'github-actions[bot]' && comment.body.startsWith(commentHeader) ); + let externCodeFileContent; + let trackedFilesToURIs; + // Read and parse external_code.txt file - let externCodeFileContent = fs.readFileSync(externalCodeFile, "utf8"); - let trackedFilesToURIs = parseTrackedFilesToURIs(externCodeFileContent); + try { + externCodeFileContent = fs.readFileSync(externalCodeFile, "utf8"); + trackedFilesToURIs = parseTrackedFilesToURIs(externCodeFileContent); + } catch (error) { + console.error("An error occurred reading the external code file:", error); + trackedFilesToURIs = {}; + } console.log("trackedFileToURIs"); console.log(trackedFilesToURIs); From 72a089581c40c02041d50990ddc8c059c446b351 Mon Sep 17 00:00:00 2001 From: Alan Guo Date: Tue, 16 May 2023 00:59:12 -0700 Subject: [PATCH 407/424] [Dashboard] Add serve controller info to the Serve page (#35327) --- dashboard/client/src/App.tsx | 9 +- .../client/src/components/StatusChip.tsx | 13 +- ...rveApplicationsListPage.component.test.tsx | 33 ++- .../pages/serve/ServeReplicaDetailPage.tsx | 59 ++---- .../serve/ServeSystemActorDetailPage.tsx | 196 ++++++++++++------ .../src/pages/serve/ServeSystemDetailRows.tsx | 111 ++++++++-- .../src/pages/serve/ServeSystemDetails.tsx | 6 +- .../pages/serve/hook/useServeApplications.ts | 46 +++- dashboard/client/src/type/serve.ts | 20 +- 9 files changed, 334 insertions(+), 159 deletions(-) diff --git a/dashboard/client/src/App.tsx b/dashboard/client/src/App.tsx index a60571201bcc..6647b4129cf6 100644 --- a/dashboard/client/src/App.tsx +++ b/dashboard/client/src/App.tsx @@ -32,7 +32,10 @@ import { import { ServeApplicationsListPage } from "./pages/serve/ServeApplicationsListPage"; import { ServeLayout } from "./pages/serve/ServeLayout"; import { ServeReplicaDetailPage } from "./pages/serve/ServeReplicaDetailPage"; -import { ServeHttpProxyDetailPage } from "./pages/serve/ServeSystemActorDetailPage"; +import { + ServeControllerDetailPage, + ServeHttpProxyDetailPage, +} from "./pages/serve/ServeSystemActorDetailPage"; import { TaskPage } from "./pages/task/TaskPage"; import { getNodeList } from "./service/node"; import { lightTheme } from "./theme"; @@ -223,6 +226,10 @@ const App = () => { } path="metrics" /> } path="serve"> } path="" /> + } + path="controller" + /> } path="httpProxies/:httpProxyId" diff --git a/dashboard/client/src/components/StatusChip.tsx b/dashboard/client/src/components/StatusChip.tsx index 090bd86a1713..4437321ac7e2 100644 --- a/dashboard/client/src/components/StatusChip.tsx +++ b/dashboard/client/src/components/StatusChip.tsx @@ -10,8 +10,8 @@ import { PlacementGroupState } from "../type/placementGroup"; import { ServeApplicationStatus, ServeDeploymentStatus, - ServeHTTPProxyStatus, ServeReplicaState, + ServeSystemActorStatus, } from "../type/serve"; const orange = "#DB6D00"; @@ -75,9 +75,14 @@ const colorMap = { [ServeReplicaState.STOPPING]: red, }, serveHttpProxy: { - [ServeHTTPProxyStatus.HEALTHY]: green, - [ServeHTTPProxyStatus.UNHEALTHY]: red, - [ServeHTTPProxyStatus.STARTING]: orange, + [ServeSystemActorStatus.HEALTHY]: green, + [ServeSystemActorStatus.UNHEALTHY]: red, + [ServeSystemActorStatus.STARTING]: orange, + }, + serveController: { + [ServeSystemActorStatus.HEALTHY]: green, + [ServeSystemActorStatus.UNHEALTHY]: red, + [ServeSystemActorStatus.STARTING]: orange, }, } as { [key: string]: { diff --git a/dashboard/client/src/pages/serve/ServeApplicationsListPage.component.test.tsx b/dashboard/client/src/pages/serve/ServeApplicationsListPage.component.test.tsx index acbba562eb08..57684b20e55e 100644 --- a/dashboard/client/src/pages/serve/ServeApplicationsListPage.component.test.tsx +++ b/dashboard/client/src/pages/serve/ServeApplicationsListPage.component.test.tsx @@ -1,22 +1,36 @@ import { render, screen } from "@testing-library/react"; import userEvent from "@testing-library/user-event"; import React from "react"; +import { getActor } from "../../service/actor"; import { getServeApplications } from "../../service/serve"; import { ServeApplicationStatus, ServeDeploymentMode, - ServeHTTPProxyStatus, + ServeSystemActorStatus, } from "../../type/serve"; import { TEST_APP_WRAPPER } from "../../util/test-utils"; import { ServeApplicationsListPage } from "./ServeApplicationsListPage"; +jest.mock("../../service/actor"); jest.mock("../../service/serve"); const mockGetServeApplications = jest.mocked(getServeApplications); +const mockGetActor = jest.mocked(getActor); describe("ServeApplicationsListPage", () => { it("renders list", async () => { - expect.assertions(14); + expect.assertions(15); + + // Mock ServeController actor fetch + mockGetActor.mockResolvedValue({ + data: { + data: { + detail: { + state: "ALIVE", + }, + }, + }, + } as any); mockGetServeApplications.mockResolvedValue({ data: { @@ -24,10 +38,14 @@ describe("ServeApplicationsListPage", () => { http_proxies: { foo: { node_id: "node:12345", - status: ServeHTTPProxyStatus.HEALTHY, + status: ServeSystemActorStatus.STARTING, actor_id: "actor:12345", }, }, + controller_info: { + node_id: "node:12345", + actor_id: "actor:12345", + }, proxy_location: ServeDeploymentMode.EveryNode, applications: { home: { @@ -67,16 +85,15 @@ describe("ServeApplicationsListPage", () => { await screen.findByText("System"); expect(screen.getByText("System")).toBeVisible(); - // System tab is hidden at first - expect(screen.queryByText("1.2.3.4")).toBeNull(); - // Expand the system tab - await user.click(screen.getByText("System")); - await screen.findByText("1.2.3.4"); expect(screen.getByText("1.2.3.4")).toBeVisible(); expect(screen.getByText("8000")).toBeVisible(); // HTTP Proxy row expect(screen.getByText("HTTPProxyActor:node:12345")).toBeVisible(); + expect(screen.getByText("STARTING")).toBeVisible(); + + // Serve Controller row + expect(screen.getByText("Serve Controller")).toBeVisible(); expect(screen.getByText("HEALTHY")).toBeVisible(); // First row diff --git a/dashboard/client/src/pages/serve/ServeReplicaDetailPage.tsx b/dashboard/client/src/pages/serve/ServeReplicaDetailPage.tsx index a9bfc0cb1b23..d1572729f26e 100644 --- a/dashboard/client/src/pages/serve/ServeReplicaDetailPage.tsx +++ b/dashboard/client/src/pages/serve/ServeReplicaDetailPage.tsx @@ -1,9 +1,4 @@ -import { - CircularProgress, - createStyles, - makeStyles, - Typography, -} from "@material-ui/core"; +import { createStyles, makeStyles, Typography } from "@material-ui/core"; import React from "react"; import { useParams } from "react-router-dom"; import { CodeDialogButton } from "../../common/CodeDialogButton"; @@ -20,7 +15,6 @@ import Loading from "../../components/Loading"; import { MetadataSection } from "../../components/MetadataSection"; import { StatusChip } from "../../components/StatusChip"; import { ServeReplica } from "../../type/serve"; -import { useFetchActor } from "../actor/hook/useActorDetail"; import { MainNavPageInfo } from "../layout/mainNavContext"; import TaskList from "../state/task"; import { useServeReplicaDetails } from "./hook/useServeApplications"; @@ -179,47 +173,18 @@ type ServeReplicaLogsProps = { const ServeReplicaLogs = ({ replica: { log_file_path, node_id, actor_id }, }: ServeReplicaLogsProps) => { - const { data: actor } = useFetchActor(actor_id); - - if (!actor) { - return ; - } - - const { - address: { workerId }, - pid, - actorId, - } = actor; - const tabs: MultiTabLogViewerTabDetails[] = [ - { - title: "stderr", - actorId, - suffix: "err", - }, - { - title: "stdout", - actorId, - suffix: "out", - }, - { - title: "system", - nodeId: node_id, - // TODO(aguo): Have API return the log file name. - filename: `python-core-worker-${workerId}_${pid}.log`, - }, - // TODO(aguo): enable this once state-api logs supports files with # in the name. - // ...(log_file_path - // ? [ - // { - // title: "replica", - // nodeId: node_id, - // filename: log_file_path.startsWith("/") - // ? log_file_path.substring(1) - // : log_file_path, - // }, - // ] - // : []), + ...(log_file_path + ? [ + { + title: "replica", + nodeId: node_id, + filename: log_file_path.startsWith("/") + ? log_file_path.substring(1) + : log_file_path, + }, + ] + : []), ]; return ; }; diff --git a/dashboard/client/src/pages/serve/ServeSystemActorDetailPage.tsx b/dashboard/client/src/pages/serve/ServeSystemActorDetailPage.tsx index 3aaf68a32500..228ac278a7d2 100644 --- a/dashboard/client/src/pages/serve/ServeSystemActorDetailPage.tsx +++ b/dashboard/client/src/pages/serve/ServeSystemActorDetailPage.tsx @@ -8,27 +8,30 @@ import { MultiTabLogViewerTabDetails, } from "../../common/MultiTabLogViewer"; import { Section } from "../../common/Section"; +import Loading from "../../components/Loading"; import { MetadataSection } from "../../components/MetadataSection"; import { StatusChip } from "../../components/StatusChip"; -import { ActorDetail } from "../../type/actor"; -import { ServeHttpProxy } from "../../type/serve"; +import { ActorDetail, ActorEnum } from "../../type/actor"; +import { + ServeHttpProxy, + ServeSystemActor, + ServeSystemActorStatus, +} from "../../type/serve"; import { useFetchActor } from "../actor/hook/useActorDetail"; import { MainNavPageInfo } from "../layout/mainNavContext"; -import { useServeHTTPProxyDetails } from "./hook/useServeApplications"; - -type ActorInfo = { - type: "httpProxy"; - detail: ServeHttpProxy; -}; - -type ServeSystemActorDetailProps = { - actor: ActorInfo; -}; +import { + useServeControllerDetails, + useServeHTTPProxyDetails, +} from "./hook/useServeApplications"; export const ServeHttpProxyDetailPage = () => { const { httpProxyId } = useParams(); - const { httpProxy } = useServeHTTPProxyDetails(httpProxyId); + const { httpProxy, loading } = useServeHTTPProxyDetails(httpProxyId); + + if (loading) { + return ; + } if (!httpProxy) { return ( @@ -38,27 +41,93 @@ export const ServeHttpProxyDetailPage = () => { ); } + return ( +
    + + +
    + ); +}; + +export const ServeControllerDetailPage = () => { + const { controller, loading } = useServeControllerDetails(); + + if (loading) { + return ; + } + + if (!controller) { + return Serve controller not found.; + } + return (
    ); }; +type ActorInfo = + | { + type: "httpProxy"; + detail: ServeHttpProxy; + } + | { + type: "controller"; + detail: ServeSystemActor; + }; + +type ServeSystemActorDetailProps = { + actor: ActorInfo; +}; + +export const convertActorStateForServeController = ( + actorState: ActorEnum | string, +) => { + if (actorState === ActorEnum.ALIVE) { + return ServeSystemActorStatus.HEALTHY; + } else if (actorState === ActorEnum.DEAD) { + return ServeSystemActorStatus.UNHEALTHY; + } else { + return ServeSystemActorStatus.STARTING; + } +}; + export const ServeSystemActorDetail = ({ actor, }: ServeSystemActorDetailProps) => { - const name = `HTTPProxyActor:${actor.detail.actor_id}`; + const name = + actor.type === "httpProxy" + ? `HTTPProxyActor:${actor.detail.actor_id}` + : "Serve Controller"; const { data: fetchedActor } = useFetchActor(actor.detail.actor_id); @@ -74,46 +143,71 @@ export const ServeSystemActorDetail = ({ }, { label: "Status", - content: ( - - ), + content: + actor.type === "httpProxy" ? ( + + ) : fetchedActor ? ( + + ) : ( + { + value: "-", + } + ), }, { label: "Actor ID", - content: { - value: actor.detail.actor_id, - copyableValue: actor.detail.actor_id, - link: actor.detail.actor_id - ? generateActorLink(actor.detail.actor_id) - : undefined, - }, + content: actor.detail.actor_id + ? { + value: actor.detail.actor_id, + copyableValue: actor.detail.actor_id, + link: actor.detail.actor_id + ? generateActorLink(actor.detail.actor_id) + : undefined, + } + : { + value: "-", + }, }, { label: "Actor name", content: { - value: actor.detail.actor_name, + value: actor.detail.actor_name ? actor.detail.actor_name : "-", }, }, { label: "Worker ID", - content: { - value: actor.detail.worker_id, - }, + content: actor.detail.worker_id + ? { + value: actor.detail.worker_id, + copyableValue: actor.detail.worker_id, + } + : { + value: "-", + }, }, { label: "Node ID", - content: { - value: actor.detail.node_id, - copyableValue: actor.detail.node_id, - link: actor.detail.node_id - ? generateNodeLink(actor.detail.node_id) - : undefined, - }, + content: actor.detail.node_id + ? { + value: actor.detail.node_id, + copyableValue: actor.detail.node_id, + link: actor.detail.node_id + ? generateNodeLink(actor.detail.node_id) + : undefined, + } + : { + value: "-", + }, }, { label: "Node IP", content: { - value: actor.detail.node_ip, + value: actor.detail.node_ip ? actor.detail.node_ip : "-", }, }, ]} @@ -122,7 +216,7 @@ export const ServeSystemActorDetail = ({
    @@ -150,28 +244,12 @@ const ServeSystemActorLogs = ({ }: ServeSystemActorLogsProps) => { const tabs: MultiTabLogViewerTabDetails[] = [ { - title: type === "controller" ? "Controller" : "HTTP Proxy", + title: type === "controller" ? "Controller logs" : "HTTP proxy logs", nodeId: rayletId, filename: systemLogFilePath.startsWith("/") ? systemLogFilePath.substring(1) : systemLogFilePath, }, - { - title: "Actor Logs (stderr)", - actorId, - suffix: "err", - }, - { - title: "Actor Logs (stdout)", - actorId, - suffix: "out", - }, - { - title: "Actor Logs (system)", - nodeId: rayletId, - // TODO(aguo): Have API return the log file name. - filename: `python-core-worker-${workerId}_${pid}.log`, - }, ]; return ; }; diff --git a/dashboard/client/src/pages/serve/ServeSystemDetailRows.tsx b/dashboard/client/src/pages/serve/ServeSystemDetailRows.tsx index 3ede9a318d2b..f641499adb9a 100644 --- a/dashboard/client/src/pages/serve/ServeSystemDetailRows.tsx +++ b/dashboard/client/src/pages/serve/ServeSystemDetailRows.tsx @@ -9,7 +9,9 @@ import { import React from "react"; import { Link as RouterLink } from "react-router-dom"; import { StatusChip } from "../../components/StatusChip"; -import { ServeHttpProxy } from "../../type/serve"; +import { ServeHttpProxy, ServeSystemActor } from "../../type/serve"; +import { useFetchActor } from "../actor/hook/useActorDetail"; +import { convertActorStateForServeController } from "./ServeSystemActorDetailPage"; const useStyles = makeStyles((theme) => createStyles({ @@ -29,37 +31,104 @@ export type ServeHttpProxyRowProps = { }; export const ServeHttpProxyRow = ({ httpProxy }: ServeHttpProxyRowProps) => { - const { node_id, status, actor_id } = httpProxy; + const { status } = httpProxy; + + return ( + } + /> + ); +}; + +export type ServeControllerRowProps = { + controller: ServeSystemActor; +}; + +export const ServeControllerRow = ({ controller }: ServeControllerRowProps) => { + const { data: actor } = useFetchActor(controller.actor_id); + + const status = actor?.state; + + return ( + + ) : ( + "-" + ) + } + /> + ); +}; + +type ServeSystemActorRowProps = { + actor: ServeSystemActor; + type: "controller" | "httpProxy"; + status: React.ReactNode; +}; + +const ServeSystemActorRow = ({ + actor, + type, + status, +}: ServeSystemActorRowProps) => { + const { node_id, actor_id } = actor; const classes = useStyles(); return ( - - HTTPProxyActor:{node_id} - - - - + {type === "httpProxy" ? ( + + HTTPProxyActor:{node_id} + + ) : ( + + Serve Controller + + )} + {status} - - Log - + {type === "httpProxy" ? ( + + Log + + ) : ( + + Log + + )} - - - {node_id} - - + {node_id ? ( + + + {node_id} + + + ) : ( + "-" + )} - - - {actor_id} - - + {actor_id ? ( + + + {actor_id} + + + ) : ( + "-" + )} ); diff --git a/dashboard/client/src/pages/serve/ServeSystemDetails.tsx b/dashboard/client/src/pages/serve/ServeSystemDetails.tsx index 017134949a87..d709b73945da 100644 --- a/dashboard/client/src/pages/serve/ServeSystemDetails.tsx +++ b/dashboard/client/src/pages/serve/ServeSystemDetails.tsx @@ -16,7 +16,7 @@ import { CollapsibleSection } from "../../common/CollapsibleSection"; import { MetadataSection } from "../../components/MetadataSection"; import { HelpInfo } from "../../components/Tooltip"; import { ServeApplicationsRsp, ServeHttpProxy } from "../../type/serve"; -import { ServeHttpProxyRow } from "./ServeSystemDetailRows"; +import { ServeControllerRow, ServeHttpProxyRow } from "./ServeSystemDetailRows"; const useStyles = makeStyles((theme) => createStyles({ @@ -34,7 +34,7 @@ const useStyles = makeStyles((theme) => export type ServeDetails = Pick< ServeApplicationsRsp, - "http_options" | "proxy_location" + "http_options" | "proxy_location" | "controller_info" >; type ServeSystemDetailsProps = { @@ -65,6 +65,7 @@ export const ServeSystemDetails = ({ return ( @@ -131,6 +132,7 @@ export const ServeSystemDetails = ({ + {httpProxies .slice( (page.pageNo - 1) * page.pageSize, diff --git a/dashboard/client/src/pages/serve/hook/useServeApplications.ts b/dashboard/client/src/pages/serve/hook/useServeApplications.ts index e04488fe22c1..1f7fdfb759ca 100644 --- a/dashboard/client/src/pages/serve/hook/useServeApplications.ts +++ b/dashboard/client/src/pages/serve/hook/useServeApplications.ts @@ -3,15 +3,17 @@ import useSWR from "swr"; import { GlobalContext } from "../../../App"; import { API_REFRESH_INTERVAL_MS } from "../../../common/constants"; import { getServeApplications } from "../../../service/serve"; -import { ServeHTTPProxyStatus } from "../../../type/serve"; +import { ServeSystemActorStatus } from "../../../type/serve"; import { ServeDetails } from "../ServeSystemDetails"; -const SERVE_HTTP_PROXY_STATUS_SORT_ORDER: Record = - { - [ServeHTTPProxyStatus.UNHEALTHY]: 0, - [ServeHTTPProxyStatus.STARTING]: 1, - [ServeHTTPProxyStatus.HEALTHY]: 2, - }; +const SERVE_HTTP_PROXY_STATUS_SORT_ORDER: Record< + ServeSystemActorStatus, + number +> = { + [ServeSystemActorStatus.UNHEALTHY]: 0, + [ServeSystemActorStatus.STARTING]: 1, + [ServeSystemActorStatus.HEALTHY]: 2, +}; export const useServeApplications = () => { const [page, setPage] = useState({ pageSize: 10, pageNo: 1 }); @@ -50,7 +52,11 @@ export const useServeApplications = () => { ); const serveDetails: ServeDetails | undefined = data - ? { http_options: data.http_options, proxy_location: data.proxy_location } + ? { + http_options: data.http_options, + proxy_location: data.proxy_location, + controller_info: data.controller_info, + } : undefined; const serveApplicationsList = data ? Object.values(data.applications).sort( @@ -206,10 +212,32 @@ export const useServeHTTPProxyDetails = (httpProxyId: string | undefined) => { const httpProxy = httpProxyId ? data?.http_proxies?.[httpProxyId] : undefined; // Need to expose loading because it's not clear if undefined values - // for application, deployment, or replica means loading or missing data. + // for http proxies means loading or missing data. return { loading: isLoading, httpProxy, error, }; }; + +export const useServeControllerDetails = () => { + const { data, error, isLoading } = useSWR( + "useServeControllerDetails", + async () => { + const rsp = await getServeApplications(); + + if (rsp) { + return rsp.data; + } + }, + { refreshInterval: API_REFRESH_INTERVAL_MS }, + ); + + // Need to expose loading because it's not clear if undefined values + // for serve controller means loading or missing data. + return { + loading: isLoading, + controller: data?.controller_info, + error, + }; +}; diff --git a/dashboard/client/src/type/serve.ts b/dashboard/client/src/type/serve.ts index ea96988e76f4..ea396b329919 100644 --- a/dashboard/client/src/type/serve.ts +++ b/dashboard/client/src/type/serve.ts @@ -86,22 +86,25 @@ export enum ServeDeploymentMode { } // Keep in sync with HTTPProxyStatus in python/ray/serve/_private/common.py -export enum ServeHTTPProxyStatus { +export enum ServeSystemActorStatus { STARTING = "STARTING", HEALTHY = "HEALTHY", UNHEALTHY = "UNHEALTHY", } -export type ServeHttpProxy = { - node_id: string; - node_ip: string; - actor_id: string; - actor_name: string; - worker_id: string; - status: ServeHTTPProxyStatus; +export type ServeSystemActor = { + node_id: string | null; + node_ip: string | null; + actor_id: string | null; + actor_name: string | null; + worker_id: string | null; log_file_path: string | null; }; +export type ServeHttpProxy = { + status: ServeSystemActorStatus; +} & ServeSystemActor; + export type ServeApplicationsRsp = { http_options: | { @@ -110,6 +113,7 @@ export type ServeApplicationsRsp = { } | undefined; proxy_location: ServeDeploymentMode; + controller_info: ServeSystemActor; http_proxies: { [name: string]: ServeHttpProxy; } | null; From 7683c17de4626f8693f23c4cf435f9b43e11f46a Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Tue, 16 May 2023 17:03:56 +0800 Subject: [PATCH 408/424] [core][dashboard] Make actor tasks'name default to . (#35371) We updated the dashboard to show actor's name with actor repr, and we should do the same for actor tasks. Caveat: Actor.__init__ is currently not showing up as repr_name.__init__ because during creation task initialization, we haven't initialized the actor states yet, so the repr func should not yet be called. There's workaround (i.e we could modify the init task name later at rendering, but chose not implement in this PR), but would be rather hacky. The major issue is that repr info for an actor is only available on the executor, and on the submitter (or encoded in actor handle). Given the feature is only for dashboard, I feel this the intrusive change in this PR is better. --- python/ray/experimental/state/common.py | 15 +++++- python/ray/tests/test_state_api_2.py | 61 ++++++++++++++++++++++++ src/ray/core_worker/core_worker.cc | 21 +++++++- src/ray/core_worker/core_worker.h | 3 ++ src/ray/core_worker/task_event_buffer.cc | 4 ++ src/ray/core_worker/task_event_buffer.h | 5 ++ src/ray/protobuf/gcs.proto | 2 + 7 files changed, 108 insertions(+), 3 deletions(-) diff --git a/python/ray/experimental/state/common.py b/python/ray/experimental/state/common.py index cb332d44bf62..edebfbb64906 100644 --- a/python/ray/experimental/state/common.py +++ b/python/ray/experimental/state/common.py @@ -1516,7 +1516,7 @@ def protobuf_to_task_state_dict(message: TaskEvents) -> dict: (task_attempt, ["task_id", "attempt_number", "job_id"]), ( state_updates, - ["node_id", "worker_id", "task_log_info"], + ["node_id", "worker_id", "task_log_info", "actor_repr_name"], ), ] for src, keys in mappings: @@ -1566,6 +1566,19 @@ def protobuf_to_task_state_dict(message: TaskEvents) -> dict: ) task_state["error_type"] = error_info.get("error_type", "") + # Parse actor task name for actor with repr name. + if ( + state_updates.get("actor_repr_name") + and task_state["type"] == "ACTOR_TASK" + and task_state["name"] + == task_state["func_or_class_name"] # no name option provided. + ): + # If it's an actor task with no name override, and has repr name defined + # for the actor, we override the name. + method_name = task_state["name"].split(".")[-1] + actor_repr_task_name = f"{state_updates['actor_repr_name']}.{method_name}" + task_state["name"] = actor_repr_task_name + return task_state diff --git a/python/ray/tests/test_state_api_2.py b/python/ray/tests/test_state_api_2.py index 9cf126870fae..6419bd9c132d 100644 --- a/python/ray/tests/test_state_api_2.py +++ b/python/ray/tests/test_state_api_2.py @@ -252,6 +252,67 @@ def get_actor(self, name): wait_for_condition(_verify_repr_name, id=a._actor_id.hex(), name="inner") +def test_actor_task_with_repr_name(): + @ray.remote + class ReprActor: + def __init__(self, x) -> None: + self.x = x + + def __repr__(self) -> str: + return self.x + + def f(self): + pass + + a = ReprActor.remote(x="repr-name-a") + ray.get(a.f.remote()) + + def verify(): + tasks = list_tasks(detail=True, filters=[("type", "=", "ACTOR_TASK")]) + assert len(tasks) == 1, tasks + assert tasks[0].name == "repr-name-a.f" + assert tasks[0].func_or_class_name == "ReprActor.f" + return True + + wait_for_condition(verify) + + b = ReprActor.remote(x="repr-name-b") + ray.get(b.f.options(name="custom-name").remote()) + + def verify(): + tasks = list_tasks( + detail=True, + filters=[("actor_id", "=", b._actor_id.hex()), ("type", "=", "ACTOR_TASK")], + ) + assert len(tasks) == 1, tasks + assert tasks[0].name == "custom-name" + assert tasks[0].func_or_class_name == "ReprActor.f" + return True + + wait_for_condition(verify) + + @ray.remote + class Actor: + def f(self): + pass + + c = Actor.remote() + ray.get(c.f.remote()) + + def verify(): + tasks = list_tasks( + detail=True, + filters=[("actor_id", "=", c._actor_id.hex()), ("type", "=", "ACTOR_TASK")], + ) + + assert len(tasks) == 1, tasks + assert tasks[0].name == "Actor.f" + assert tasks[0].func_or_class_name == "Actor.f" + return True + + wait_for_condition(verify) + + if __name__ == "__main__": import sys diff --git a/src/ray/core_worker/core_worker.cc b/src/ray/core_worker/core_worker.cc index 5313ee0efc63..1d0f313c527e 100644 --- a/src/ray/core_worker/core_worker.cc +++ b/src/ray/core_worker/core_worker.cc @@ -2562,11 +2562,25 @@ Status CoreWorker::ExecuteTask( // Modify the worker's per function counters. std::string func_name = task_spec.FunctionDescriptor()->CallString(); + std::string actor_repr_name = ""; + { + absl::MutexLock lock(&mutex_); + actor_repr_name = actor_repr_name_; + } if (!options_.is_local_mode) { task_counter_.MovePendingToRunning(func_name, task_spec.IsRetry()); - task_manager_->RecordTaskStatusEvent( - task_spec.AttemptNumber(), task_spec, rpc::TaskStatus::RUNNING); + if (task_spec.IsActorTask() && !actor_repr_name.empty()) { + task_manager_->RecordTaskStatusEvent( + task_spec.AttemptNumber(), + task_spec, + rpc::TaskStatus::RUNNING, + /* include_task_info */ false, + worker::TaskStatusEvent::TaskStateUpdate(actor_repr_name)); + } else { + task_manager_->RecordTaskStatusEvent( + task_spec.AttemptNumber(), task_spec, rpc::TaskStatus::RUNNING); + } worker_context_.SetCurrentTask(task_spec); SetCurrentTaskId(task_spec.TaskId(), task_spec.AttemptNumber(), task_spec.GetName()); @@ -3845,6 +3859,9 @@ void CoreWorker::SetActorTitle(const std::string &title) { void CoreWorker::SetActorReprName(const std::string &repr_name) { RAY_CHECK(direct_task_receiver_ != nullptr); direct_task_receiver_->SetActorReprName(repr_name); + + absl::MutexLock lock(&mutex_); + actor_repr_name_ = repr_name; } rpc::JobConfig CoreWorker::GetJobConfig() const { diff --git a/src/ray/core_worker/core_worker.h b/src/ray/core_worker/core_worker.h index 7340e9ed4901..3ca65a09594e 100644 --- a/src/ray/core_worker/core_worker.h +++ b/src/ray/core_worker/core_worker.h @@ -1562,6 +1562,9 @@ class CoreWorker : public rpc::CoreWorkerServiceHandler { /// Actor title that consists of class name, args, kwargs for actor construction. std::string actor_title_ GUARDED_BY(mutex_); + /// Actor repr name if overrides by the user, empty string if not. + std::string actor_repr_name_ GUARDED_BY(mutex_) = ""; + /// Number of tasks that have been pushed to the actor but not executed. std::atomic task_queue_length_; diff --git a/src/ray/core_worker/task_event_buffer.cc b/src/ray/core_worker/task_event_buffer.cc index 61c643cfd14e..086ecf314411 100644 --- a/src/ray/core_worker/task_event_buffer.cc +++ b/src/ray/core_worker/task_event_buffer.cc @@ -95,6 +95,10 @@ bool TaskStatusEvent::ToRpcTaskEventsOrDrop(rpc::TaskEvents *rpc_task_events) { state_update_->task_log_info_.value()); } + if (!state_update_->actor_repr_name_.empty()) { + dst_state_update->set_actor_repr_name(state_update_->actor_repr_name_); + } + return false; } diff --git a/src/ray/core_worker/task_event_buffer.h b/src/ray/core_worker/task_event_buffer.h index 7ce29cc92c9c..fddc873bf601 100644 --- a/src/ray/core_worker/task_event_buffer.h +++ b/src/ray/core_worker/task_event_buffer.h @@ -90,6 +90,9 @@ class TaskStatusEvent : public TaskEvent { TaskStateUpdate(const rpc::TaskLogInfo &task_log_info) : task_log_info_(task_log_info) {} + TaskStateUpdate(const std::string &actor_repr_name) + : actor_repr_name_(actor_repr_name) {} + private: friend class TaskStatusEvent; @@ -101,6 +104,8 @@ class TaskStatusEvent : public TaskEvent { const absl::optional error_info_ = absl::nullopt; /// Task log info. const absl::optional task_log_info_ = absl::nullopt; + /// Actor task repr name. + const std::string actor_repr_name_ = ""; }; explicit TaskStatusEvent( diff --git a/src/ray/protobuf/gcs.proto b/src/ray/protobuf/gcs.proto index f5949f7f3df1..50fbe259286b 100644 --- a/src/ray/protobuf/gcs.proto +++ b/src/ray/protobuf/gcs.proto @@ -240,6 +240,8 @@ message TaskStateUpdate { optional RayErrorInfo error_info = 9; // Task logs info. optional TaskLogInfo task_log_info = 10; + // Actor task repr name. + optional string actor_repr_name = 11; } // Represents events and state changes from a single task run. From c8e007b6f952aab7c3e4ff40b374445c7cf05f2d Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Tue, 16 May 2023 16:50:37 +0200 Subject: [PATCH 409/424] [tune/execution] 1/n Add more unittests for TuneController (#34833) For full test coverage, we should migrate all tests in `test_ray_trial_executor` and `test_trial_runner_*` to use the new execution backend as well. This is a WIP PR to migrate the first batch of these unittests. Signed-off-by: Kai Fricke --- python/ray/tune/BUILD | 48 +++ python/ray/tune/execution/tune_controller.py | 33 +- .../test_controller_callback_integration.py | 67 ++++ ...st_controller_checkpointing_integration.py | 303 +++++++++++++++ .../test_controller_control_integration.py | 91 +++++ .../test_controller_errors_integration.py | 196 ++++++++++ .../test_controller_resources_integration.py | 259 +++++++++++++ .../test_controller_search_alg_integration.py | 366 ++++++++++++++++++ python/ray/tune/tests/execution/utils.py | 33 +- .../tune/tests/test_trial_relative_logdir.py | 12 + python/ray/tune/tests/test_trial_runner.py | 32 +- python/ray/tune/utils/log.py | 15 +- rllib/algorithms/mock.py | 5 + 13 files changed, 1399 insertions(+), 61 deletions(-) create mode 100644 python/ray/tune/tests/execution/test_controller_callback_integration.py create mode 100644 python/ray/tune/tests/execution/test_controller_checkpointing_integration.py create mode 100644 python/ray/tune/tests/execution/test_controller_control_integration.py create mode 100644 python/ray/tune/tests/execution/test_controller_errors_integration.py create mode 100644 python/ray/tune/tests/execution/test_controller_resources_integration.py create mode 100644 python/ray/tune/tests/execution/test_controller_search_alg_integration.py diff --git a/python/ray/tune/BUILD b/python/ray/tune/BUILD index dc2bc678e646..78ecaabdeed4 100644 --- a/python/ray/tune/BUILD +++ b/python/ray/tune/BUILD @@ -518,6 +518,54 @@ py_test( tags = ["team:ml", "exclusive"] ) +py_test( + name = "test_controller_callback_integration", + size = "large", + srcs = ["tests/execution/test_controller_callback_integration.py"], + deps = [":tune_lib"], + tags = ["team:ml", "exclusive"] +) + +py_test( + name = "test_controller_checkpointing_integration", + size = "large", + srcs = ["tests/execution/test_controller_checkpointing_integration.py"], + deps = [":tune_lib"], + tags = ["team:ml", "exclusive"] +) + +py_test( + name = "test_controller_control_integration", + size = "large", + srcs = ["tests/execution/test_controller_control_integration.py"], + deps = [":tune_lib"], + tags = ["team:ml", "exclusive"] +) + +py_test( + name = "test_controller_errors_integration", + size = "large", + srcs = ["tests/execution/test_controller_errors_integration.py"], + deps = [":tune_lib"], + tags = ["team:ml", "exclusive"] +) + +py_test( + name = "test_controller_resources_integration", + size = "large", + srcs = ["tests/execution/test_controller_resources_integration.py"], + deps = [":tune_lib"], + tags = ["team:ml", "exclusive"] +) + +py_test( + name = "test_controller_search_alg_integration", + size = "large", + srcs = ["tests/execution/test_controller_search_alg_integration.py"], + deps = [":tune_lib"], + tags = ["team:ml", "exclusive"] +) + # -------------------------------------------------------------------- # Examples from the python/ray/tune/examples directory. # Please keep these sorted alphabetically. diff --git a/python/ray/tune/execution/tune_controller.py b/python/ray/tune/execution/tune_controller.py index de77eeecf80b..95d2fa11121c 100644 --- a/python/ray/tune/execution/tune_controller.py +++ b/python/ray/tune/execution/tune_controller.py @@ -35,6 +35,7 @@ from ray.tune.syncer import SyncConfig from ray.tune.experiment import Trial from ray.tune.utils import warn_if_slow +from ray.tune.utils.log import _dedup_logs from ray.tune.utils.object_cache import _ObjectCache from ray.tune.utils.resource_updater import _ResourceUpdater from ray.util.annotations import DeveloperAPI @@ -69,11 +70,11 @@ def __init__( _trainer_api: bool = False, ): if resource_manager_factory: - self._resource_manager = resource_manager_factory() + resource_manager = resource_manager_factory() else: - self._resource_manager = PlacementGroupResourceManager() + resource_manager = PlacementGroupResourceManager() - self._actor_manager = RayActorManager(resource_manager=self._resource_manager) + self._actor_manager = RayActorManager(resource_manager=resource_manager) self._class_cache = _class_cache @@ -397,7 +398,10 @@ def _cleanup_trials(self): start = time.monotonic() while time.monotonic() - start < 5 and self._actor_manager.num_total_actors: - logger.debug("Waiting for actor manager to clean up final state") + if _dedup_logs("actor_manager_cleanup", str(start)): + logger.debug( + "Waiting for actor manager to clean up final state [dedup]" + ) self._actor_manager.next(timeout=1) logger.debug("Force cleanup of remaining actors") @@ -440,7 +444,10 @@ def _maybe_add_actors(self) -> None: trial_to_run = self._scheduler_alg.choose_trial_to_run(self._wrapped()) if trial_to_run: - logger.debug(f"Chose trial to run from scheduler: {trial_to_run}") + if _dedup_logs("trial_to_run_chosen", trial_to_run.trial_id): + logger.debug( + f"Chose trial to run from scheduler: {trial_to_run} [dedup]" + ) if ( trial_to_run not in self._staged_trials and trial_to_run not in self._trial_to_actor @@ -453,7 +460,11 @@ def _maybe_add_actors(self) -> None: self._schedule_trial_actor(trial_to_run) else: # Otherwise, only try to use the cached actor - logger.debug(f"Trying to re-use actor for trial to run: {trial_to_run}") + if _dedup_logs("trial_to_run_reuse", trial_to_run.trial_id): + logger.debug( + f"Trying to re-use actor for trial to run: {trial_to_run} " + f"[dedup]" + ) self._maybe_reuse_cached_actor(trial_to_run) ### @@ -462,7 +473,7 @@ def _maybe_add_actors(candidates: List[Trial]): new_candidates = [] while candidates: - if len(self._staged_trials) >= self._max_pending_trials: + if self._actor_manager.num_pending_actors >= self._max_pending_trials: break trial = candidates.pop(0) @@ -574,10 +585,13 @@ def _schedule_trial_actor(self, trial: Trial): trainable_cls = trial.get_trainable_cls() if not trainable_cls: - raise _AbortTrialExecution( + exception = _AbortTrialExecution( f"Invalid trainable: {trial.trainable_name}. If you passed " f"a string, make sure the trainable was registered before." ) + self._schedule_trial_stop(trial, exception=exception) + return + _actor_cls = self._class_cache.get(trainable_cls) trial.set_location(_Location()) @@ -856,7 +870,7 @@ def _trial_task_failure(self, trial: Trial, exception: Exception): raise exception else: if self._print_trial_errors: - logger.error("Trial task failed", exc_info=exception) + logger.error(f"Trial task failed for trial {trial}", exc_info=exception) self._process_trial_failure(trial, exception=exception) def _schedule_trial_stop(self, trial: Trial, exception: Optional[Exception] = None): @@ -1137,7 +1151,6 @@ def _on_trial_reset(self, trial: Trial, success: bool): def __getstate__(self): state = super().__getstate__() for exclude in [ - "_resource_manager", "_actor_manager", "_class_cache", "_resource_updater", diff --git a/python/ray/tune/tests/execution/test_controller_callback_integration.py b/python/ray/tune/tests/execution/test_controller_callback_integration.py new file mode 100644 index 000000000000..84befbaa1d8e --- /dev/null +++ b/python/ray/tune/tests/execution/test_controller_callback_integration.py @@ -0,0 +1,67 @@ +from typing import Dict, Optional + +import pytest +import sys + +import ray +from ray.air.execution import FixedResourceManager, PlacementGroupResourceManager +from ray.tune import Callback +from ray.tune.execution.tune_controller import TuneController +from ray.tune.experiment import Trial + + +@pytest.fixture(scope="function") +def ray_start_4_cpus_2_gpus_extra(): + address_info = ray.init(num_cpus=4, num_gpus=2, resources={"a": 2}) + yield address_info + ray.shutdown() + + +class StatefulCallback(Callback): + CKPT_FILE_TMPL = "test-callback-state-{}.json" + + def __init__(self): + self.counter = 0 + + def on_trial_result(self, iteration, trials, trial, result, **info): + self.counter += 1 + + def get_state(self) -> Optional[Dict]: + return {"counter": self.counter} + + def set_state(self, state: Dict): + self.counter = state["counter"] + + +@pytest.mark.parametrize( + "resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager] +) +def test_callback_save_restore( + ray_start_4_cpus_2_gpus_extra, resource_manager_cls, tmpdir +): + """Check that callback state is restored correctly. + + Legacy test: test_trial_runner_3.py::TrialRunnerTest::testCallbackSaveRestore + """ + runner = TuneController( + callbacks=[StatefulCallback()], + experiment_path=str(tmpdir), + ) + runner.add_trial(Trial("__fake", stub=True)) + for i in range(3): + runner._callbacks.on_trial_result( + iteration=i, trials=None, trial=None, result=None + ) + runner.checkpoint(force=True) + callback = StatefulCallback() + runner2 = TuneController( + callbacks=[callback], + experiment_path=str(tmpdir), + ) + assert callback.counter == 0 + runner2.resume() + assert callback.counter == 3 + + +if __name__ == "__main__": + sys.exit(pytest.main(["-v", __file__])) diff --git a/python/ray/tune/tests/execution/test_controller_checkpointing_integration.py b/python/ray/tune/tests/execution/test_controller_checkpointing_integration.py new file mode 100644 index 000000000000..2e13f5d84574 --- /dev/null +++ b/python/ray/tune/tests/execution/test_controller_checkpointing_integration.py @@ -0,0 +1,303 @@ +import json +import os +import shutil + +import pytest +import sys + +import ray +from ray.air import CheckpointConfig +from ray.air._internal.checkpoint_manager import _TrackedCheckpoint, CheckpointStorage +from ray.air.execution import FixedResourceManager, PlacementGroupResourceManager +from ray.tune import PlacementGroupFactory +from ray.tune.execution.tune_controller import TuneController +from ray.tune.experiment import Trial +from ray.tune.result import TRAINING_ITERATION, DONE +from ray.tune.schedulers import FIFOScheduler +from ray.tune.search import BasicVariantGenerator +from ray.tune.trainable import TrainableUtil + + +@pytest.fixture(scope="function") +def ray_start_4_cpus_2_gpus_extra(): + address_info = ray.init(num_cpus=4, num_gpus=2, resources={"a": 2}) + yield address_info + ray.shutdown() + + +def create_mock_components(): + class _MockScheduler(FIFOScheduler): + errored_trials = [] + + def on_trial_error(self, trial_runner, trial): + self.errored_trials += [trial] + + class _MockSearchAlg(BasicVariantGenerator): + errored_trials = [] + + def on_trial_complete(self, trial_id, error=False, **kwargs): + if error: + self.errored_trials += [trial_id] + + searchalg = _MockSearchAlg() + scheduler = _MockScheduler() + return searchalg, scheduler + + +@pytest.mark.parametrize( + "resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager] +) +def test_checkpoint_save_restore( + ray_start_4_cpus_2_gpus_extra, resource_manager_cls, tmpdir +): + """Test that a checkpoint is saved and can be used to restore a trainable. + + The trainable saves a checkpoint and terminates. We then start another trial + that should restore from the saved checkpoint and assert that it picks up + the state and continues to run to termination. + + Legacy test: test_trial_runner_2.py::TrialRunnerTest::testCheckpointing + Legacy test: test_trial_runner_2.py::TrialRunnerTest::testRestoreMetricsAfterCheckpointing # noqa + """ + runner = TuneController( + resource_manager_factory=lambda: resource_manager_cls(), + experiment_path=str(tmpdir), + ) + kwargs = { + "stopping_criterion": {"training_iteration": 1}, + "placement_group_factory": PlacementGroupFactory([{"CPU": 1, "GPU": 1}]), + "checkpoint_config": CheckpointConfig(checkpoint_frequency=1), + } + runner.add_trial(Trial("__fake", **kwargs)) + trials = runner.get_trials() + + runner.step() # Start trial + + while trials[0].status != Trial.RUNNING: + runner.step() + + # Set some state that will be saved in the checkpoint + assert ray.get(trials[0].runner.set_info.remote(1)) == 1 + + while trials[0].status != Trial.TERMINATED: + runner.step() + + assert trials[0].checkpoint.metrics[TRAINING_ITERATION] == 1 + assert trials[0].last_result[TRAINING_ITERATION] == 1 + assert trials[0].last_result["iterations_since_restore"] == 1 + + # Prepare new trial + kwargs["restore_path"] = trials[0].checkpoint.dir_or_data + runner.add_trial(Trial("__fake", **kwargs)) + trials = runner.get_trials() + + assert trials[1].status == Trial.PENDING + + # Start trial, restore, run to termination + while trials[1].status != Trial.RUNNING: + runner.step() + + # Restore + runner.step() + + assert ray.get(trials[1].runner.get_info.remote()) == 1 + + # Run to termination + while trials[1].status != Trial.TERMINATED: + runner.step() + + assert trials[1].checkpoint.metrics[TRAINING_ITERATION] == 2 + assert trials[1].last_result[TRAINING_ITERATION] == 2 + assert trials[1].last_result["iterations_since_restore"] == 1 + assert trials[1].last_result["time_since_restore"] > 0 + + +@pytest.mark.parametrize( + "resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager] +) +def test_checkpoint_at_end(ray_start_4_cpus_2_gpus_extra, resource_manager_cls, tmpdir): + """Test that a checkpoint is saved at end for class trainables with that config. + + Legacy test: test_trial_runner_2.py::TrialRunnerTest::testCheckpointingAtEnd + Legacy test: test_trial_runner_2.py::TrialRunnerTest::testResultDone + """ + runner = TuneController( + resource_manager_factory=lambda: resource_manager_cls(), + experiment_path=str(tmpdir), + ) + kwargs = { + "stopping_criterion": {"training_iteration": 2}, + "checkpoint_config": CheckpointConfig(checkpoint_at_end=True), + "placement_group_factory": PlacementGroupFactory([{"CPU": 1, "GPU": 1}]), + } + runner.add_trial(Trial("__fake", **kwargs)) + trials = runner.get_trials() + + while not runner.is_finished(): + runner.step() + + assert trials[0].has_checkpoint() + assert trials[0].last_result[DONE] + + +@pytest.mark.parametrize( + "resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager] +) +def test_pause_resume_trial( + ray_start_4_cpus_2_gpus_extra, resource_manager_cls, tmpdir +): + """Test that trial that is paused and resumed picks up its last checkpoint. + + Legacy test: test_trial_runner_2.py::TrialRunnerTest::testPauseThenResume + """ + runner = TuneController( + resource_manager_factory=lambda: resource_manager_cls(), + experiment_path=str(tmpdir), + ) + kwargs = { + "stopping_criterion": {"training_iteration": 2}, + "placement_group_factory": PlacementGroupFactory([{"CPU": 1, "GPU": 1}]), + "checkpoint_config": CheckpointConfig(checkpoint_frequency=1), + } + runner.add_trial(Trial("__fake", **kwargs)) + trials = runner.get_trials() + + while trials[0].status != Trial.RUNNING: + runner.step() + + assert ray.get(trials[0].runner.get_info.remote()) is None + assert ray.get(trials[0].runner.set_info.remote(1)) == 1 + + runner._schedule_trial_pause(trials[0], should_checkpoint=True) + + while trials[0].status != Trial.PAUSED: + runner.step() + + assert trials[0].has_checkpoint() + assert DONE not in trials[0].last_result + + # Start again + runner._set_trial_status(trials[0], Trial.PENDING) + + while trials[0].status != Trial.RUNNING: + runner.step() + + assert ray.get(trials[0].runner.get_info.remote()) == 1 + + while trials[0].status != Trial.TERMINATED: + runner.step() + + assert trials[0].checkpoint.metrics[TRAINING_ITERATION] == 2 + assert trials[0].last_result[TRAINING_ITERATION] == 2 + assert trials[0].last_result["iterations_since_restore"] == 1 + assert trials[0].last_result["time_since_restore"] > 0 + + +@pytest.mark.parametrize( + "resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager] +) +def test_checkpoint_num_to_keep( + ray_start_4_cpus_2_gpus_extra, resource_manager_cls, tmpdir +): + """Test that only num_to_keep checkpoints are kept. + + This should also hold true when the experiment is resumed. + + Legacy test: test_trial_runner_2.py::TrialRunnerTest::testPauseResumeCheckpointCount + """ + trial = Trial( + "__fake", + experiment_path=str(tmpdir), + checkpoint_config=CheckpointConfig(num_to_keep=2), + ) + trial.init_local_path() + trial.checkpoint_manager.set_delete_fn(lambda cp: shutil.rmtree(cp.dir_or_data)) + + def write_checkpoint(trial: Trial, index: int): + checkpoint_dir = TrainableUtil.make_checkpoint_dir( + trial.local_path, index=index + ) + result = {"training_iteration": index} + with open(os.path.join(checkpoint_dir, "cp.json"), "w") as f: + json.dump(result, f) + + tune_cp = _TrackedCheckpoint( + dir_or_data=checkpoint_dir, + storage_mode=CheckpointStorage.PERSISTENT, + metrics=result, + ) + trial.saving_to = tune_cp + + return checkpoint_dir + + def get_checkpoint_dirs(trial: Trial): + return [d for d in os.listdir(trial.local_path) if d.startswith("checkpoint_")] + + runner = TuneController( + resource_manager_factory=lambda: resource_manager_cls(), + experiment_path=str(tmpdir), + ) + + runner.add_trial(trial) + + # Write 1 checkpoint + result = write_checkpoint(trial, 1) + runner._on_saving_result(trial, result) + + # Expect 1 checkpoint + cp_dirs = get_checkpoint_dirs(trial) + assert len(cp_dirs) == 1, f"Checkpoint dirs: {cp_dirs}" + + # Write second checkpoint + result = write_checkpoint(trial, 2) + runner._on_saving_result(trial, result) + + # Expect 2 checkpoints + cp_dirs = get_checkpoint_dirs(trial) + assert len(cp_dirs) == 2, f"Checkpoint dirs: {cp_dirs}" + + # Write third checkpoint + result = write_checkpoint(trial, 3) + runner._on_saving_result(trial, result) + + # Expect 2 checkpoints because num_to_keep = 2 + cp_dirs = get_checkpoint_dirs(trial) + assert len(cp_dirs) == 2, f"Checkpoint dirs: {cp_dirs}" + + # Re-instantiate trial runner and resume + runner.checkpoint(force=True) + runner = TuneController( + resource_manager_factory=lambda: resource_manager_cls(), + experiment_path=str(tmpdir), + ) + runner.resume() + + trial = runner.get_trials()[0] + trial.checkpoint_manager.set_delete_fn(lambda cp: shutil.rmtree(cp.dir_or_data)) + + # Write fourth checkpoint + result = write_checkpoint(trial, 4) + runner._on_saving_result(trial, result) + + # Expect 2 checkpoints because num_to_keep = 2 + cp_dirs = get_checkpoint_dirs(trial) + assert len(cp_dirs) == 2, f"Checkpoint dirs: {cp_dirs}" + + # Write fifth checkpoint + result = write_checkpoint(trial, 5) + runner._on_saving_result(trial, result) + + # Expect 2 checkpoints because num_to_keep = 2 + cp_dirs = get_checkpoint_dirs(trial) + assert len(cp_dirs) == 2, f"Checkpoint dirs: {cp_dirs}" + + # Checkpoints before restore should be deleted + assert "checkpoint_000004" in cp_dirs + assert "checkpoint_000005" in cp_dirs + + assert "checkpoint_000002" not in cp_dirs + assert "checkpoint_000003" not in cp_dirs + + +if __name__ == "__main__": + sys.exit(pytest.main(["-v", __file__])) diff --git a/python/ray/tune/tests/execution/test_controller_control_integration.py b/python/ray/tune/tests/execution/test_controller_control_integration.py new file mode 100644 index 000000000000..dc677ed94eb8 --- /dev/null +++ b/python/ray/tune/tests/execution/test_controller_control_integration.py @@ -0,0 +1,91 @@ +from collections import Counter + +import pytest +import sys + +import ray +from ray.air.execution import FixedResourceManager, PlacementGroupResourceManager +from ray.tune import PlacementGroupFactory +from ray.tune.execution.tune_controller import TuneController +from ray.tune.experiment import Trial + + +@pytest.fixture(scope="function") +def ray_start_4_cpus_2_gpus_extra(): + address_info = ray.init(num_cpus=4, num_gpus=2, resources={"a": 2}) + yield address_info + ray.shutdown() + + +@pytest.mark.parametrize( + "resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager] +) +def test_stop_trial(ray_start_4_cpus_2_gpus_extra, resource_manager_cls): + """Stopping a trial while RUNNING or PENDING should work. + + Legacy test: test_trial_runner_3.py::TrialRunnerTest::testStopTrial + """ + runner = TuneController( + resource_manager_factory=lambda: resource_manager_cls(), + ) + kwargs = { + "stopping_criterion": {"training_iteration": 10}, + "placement_group_factory": PlacementGroupFactory([{"CPU": 2, "GPU": 1}]), + "config": {"sleep": 1}, + } + trials = [ + Trial("__fake", **kwargs), + Trial("__fake", **kwargs), + Trial("__fake", **kwargs), + Trial("__fake", **kwargs), + ] + for t in trials: + runner.add_trial(t) + + counter = Counter(t.status for t in trials) + + # Wait until 2 trials started + while counter.get("RUNNING", 0) != 2: + runner.step() + counter = Counter(t.status for t in trials) + + assert counter.get("RUNNING", 0) == 2 + assert counter.get("PENDING", 0) == 2 + + # Stop trial that is running + for trial in trials: + if trial.status == Trial.RUNNING: + runner._schedule_trial_stop(trial) + break + + counter = Counter(t.status for t in trials) + + # Wait until the next trial started + while counter.get("RUNNING", 0) < 2: + runner.step() + counter = Counter(t.status for t in trials) + + assert counter.get("RUNNING", 0) == 2 + assert counter.get("TERMINATED", 0) == 1 + assert counter.get("PENDING", 0) == 1 + + # Stop trial that is pending + for trial in trials: + if trial.status == Trial.PENDING: + runner._schedule_trial_stop(trial) + break + + counter = Counter(t.status for t in trials) + + # Wait until 2 trials are running again + while counter.get("RUNNING", 0) < 2: + runner.step() + counter = Counter(t.status for t in trials) + + assert counter.get("RUNNING", 0) == 2 + assert counter.get("TERMINATED", 0) == 2 + assert counter.get("PENDING", 0) == 0 + + +if __name__ == "__main__": + sys.exit(pytest.main(["-v", __file__])) diff --git a/python/ray/tune/tests/execution/test_controller_errors_integration.py b/python/ray/tune/tests/execution/test_controller_errors_integration.py new file mode 100644 index 000000000000..6b69fd272343 --- /dev/null +++ b/python/ray/tune/tests/execution/test_controller_errors_integration.py @@ -0,0 +1,196 @@ +import os +from collections import Counter + +import pytest +import sys + +import ray +from ray.air import CheckpointConfig +from ray.air.execution import FixedResourceManager, PlacementGroupResourceManager +from ray.tune import PlacementGroupFactory, TuneError +from ray.tune.execution.tune_controller import TuneController +from ray.tune.experiment import Trial +from ray.tune.registry import TRAINABLE_CLASS, _global_registry +from ray.tune.schedulers import FIFOScheduler +from ray.tune.search import BasicVariantGenerator +from ray.tune.tests.execution.utils import BudgetResourceManager + + +@pytest.fixture(scope="function") +def ray_start_4_cpus_2_gpus_extra(): + address_info = ray.init(num_cpus=4, num_gpus=2, resources={"a": 2}) + yield address_info + ray.shutdown() + + +def create_mock_components(): + class _MockScheduler(FIFOScheduler): + errored_trials = [] + + def on_trial_error(self, trial_runner, trial): + self.errored_trials += [trial] + + class _MockSearchAlg(BasicVariantGenerator): + errored_trials = [] + + def on_trial_complete(self, trial_id, error=False, **kwargs): + if error: + self.errored_trials += [trial_id] + + searchalg = _MockSearchAlg() + scheduler = _MockScheduler() + return searchalg, scheduler + + +@pytest.mark.parametrize( + "resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager] +) +def test_invalid_trainable(ray_start_4_cpus_2_gpus_extra, resource_manager_cls): + """An invalid trainable should make the trial fail on startup. + + The controller itself should continue. Other trials should run. + + Legacy test: test_trial_runner_2.py::TrialRunnerTest::testErrorHandling + """ + runner = TuneController( + resource_manager_factory=lambda: resource_manager_cls(), + ) + kwargs = { + "stopping_criterion": {"training_iteration": 1}, + "placement_group_factory": PlacementGroupFactory([{"CPU": 1, "GPU": 1}]), + } + _global_registry.register(TRAINABLE_CLASS, "asdf", None) + trials = [Trial("asdf", **kwargs), Trial("__fake", **kwargs)] + for t in trials: + runner.add_trial(t) + + while not trials[1].status == Trial.RUNNING: + runner.step() + assert trials[0].status == Trial.ERROR + assert trials[1].status == Trial.RUNNING + + +def test_overstep(ray_start_4_cpus_2_gpus_extra): + """Stepping when trials are finished should raise a TuneError. + + Legacy test: test_trial_runner_2.py::TrialRunnerTest::testThrowOnOverstep + """ + os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1" + runner = TuneController( + resource_manager_factory=lambda: BudgetResourceManager({"CPU": 4}), + ) + runner.step() + with pytest.raises(TuneError): + runner.step() + + +@pytest.mark.parametrize( + "resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager] +) +@pytest.mark.parametrize("max_failures_persistent", [(0, False), (1, False), (2, True)]) +def test_failure_recovery( + ray_start_4_cpus_2_gpus_extra, resource_manager_cls, max_failures_persistent +): + """Test failure recover with `max_failures`. + + Trials should be retried up to `max_failures` times. + + Legacy test: test_trial_runner_2.py::TrialRunnerTest::testFailureRecoveryDisabled + Legacy test: test_trial_runner_2.py::TrialRunnerTest::testFailureRecoveryEnabled + Legacy test: test_trial_runner_2.py::TrialRunnerTest::testFailureRecoveryMaxFailures + """ + max_failures, persistent_error = max_failures_persistent + searchalg, scheduler = create_mock_components() + + runner = TuneController( + search_alg=searchalg, + scheduler=scheduler, + resource_manager_factory=lambda: resource_manager_cls(), + ) + kwargs = { + "placement_group_factory": PlacementGroupFactory([{"CPU": 1, "GPU": 1}]), + "stopping_criterion": {"training_iteration": 2}, + "checkpoint_config": CheckpointConfig(checkpoint_frequency=1), + "max_failures": max_failures, + "config": {"mock_error": True, "persistent_error": persistent_error}, + } + runner.add_trial(Trial("__fake", **kwargs)) + trials = runner.get_trials() + + while not runner.is_finished(): + runner.step() + + if persistent_error or not max_failures: + assert trials[0].status == Trial.ERROR + + num_failures = max_failures + 1 + assert trials[0].num_failures == num_failures + # search alg receives on_complete, so only after the max failures + # have been exhausted. Thus, it only has errored_trials if the + # trial fails even in the last try. + assert len(searchalg.errored_trials) == 1 + # search alg receives on_error, so every failure is registered. + assert len(scheduler.errored_trials) == num_failures + else: + assert trials[0].status == Trial.TERMINATED + assert trials[0].num_failures == 1 + assert len(searchalg.errored_trials) == 0 + assert len(scheduler.errored_trials) == 1 + + +@pytest.mark.parametrize( + "resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager] +) +@pytest.mark.parametrize("fail_fast", [True, TuneController.RAISE]) +def test_fail_fast(ray_start_4_cpus_2_gpus_extra, resource_manager_cls, fail_fast): + """Test fail_fast feature. + + If fail_fast=True, after the first failure, all other trials should be terminated + (because we end the experiment). + + If fail_fast=RAISE, after the first failure, we should raise an error. + + Legacy test: test_trial_runner_2.py::TrialRunnerTest::testFailFast + Legacy test: test_trial_runner_2.py::TrialRunnerTest::testFailFastRaise + """ + + runner = TuneController( + resource_manager_factory=lambda: resource_manager_cls(), fail_fast=fail_fast + ) + kwargs = { + "placement_group_factory": PlacementGroupFactory([{"CPU": 1, "GPU": 1}]), + "checkpoint_config": CheckpointConfig(checkpoint_frequency=1), + "max_failures": 0, + "config": { + "mock_error": True, + "persistent_error": True, + }, + } + runner.add_trial(Trial("__fake", **kwargs)) + runner.add_trial(Trial("__fake", **kwargs)) + trials = runner.get_trials() + + if fail_fast == TuneController.RAISE: + with pytest.raises(Exception): + while not runner.is_finished(): + runner.step() + runner.cleanup() + return + else: + while not runner.is_finished(): + runner.step() + + status_count = Counter(t.status for t in trials) + + # One trial failed + assert status_count.get(Trial.ERROR) == 1 + # The other one was pre-empted + assert status_count.get(Trial.TERMINATED) == 1 + + # Controller finished + with pytest.raises(TuneError): + runner.step() + + +if __name__ == "__main__": + sys.exit(pytest.main(["-v", __file__])) diff --git a/python/ray/tune/tests/execution/test_controller_resources_integration.py b/python/ray/tune/tests/execution/test_controller_resources_integration.py new file mode 100644 index 000000000000..40f775c066c0 --- /dev/null +++ b/python/ray/tune/tests/execution/test_controller_resources_integration.py @@ -0,0 +1,259 @@ +import os +import time +from collections import Counter + +import pytest +import sys + +import ray +from ray import tune +from ray.air.execution import FixedResourceManager, PlacementGroupResourceManager +from ray.tune import PlacementGroupFactory, TuneError +from ray.tune.execution.tune_controller import TuneController +from ray.tune.experiment import Trial +from ray.tune.schedulers import FIFOScheduler, TrialScheduler +from ray.tune.search import BasicVariantGenerator +from ray.tune.utils.mock import TrialStatusSnapshot, TrialStatusSnapshotTaker + + +@pytest.fixture(scope="function") +def ray_start_4_cpus_2_gpus_extra(): + address_info = ray.init(num_cpus=4, num_gpus=2, resources={"a": 2}) + yield address_info + ray.shutdown() + + +@pytest.mark.parametrize( + "resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager] +) +@pytest.mark.parametrize( + "bundles", + [ + [{"CPU": 1}, {"CPU": 3, "GPU": 1}], + [{"CPU": 1, "a": 2}], + [{"CPU": 1}, {"a": 2}], + [{"CPU": 1, "GPU": 1}], + ], +) +def test_resource_parallelism_single( + ray_start_4_cpus_2_gpus_extra, resource_manager_cls, bundles +): + """Test that extra and custom resources are respected for parallelism. + + We schedule two trials with resources according to the bundle. If only + the head bundle or only CPU/GPU resources were considered, both trials + could run in parallel. + + However, we assert that the resources in child bundles and extra resources + are respected and only one trial runs in parallel. + + Legacy test: test_trial_runner.py::TrialRunnerTest::testExtraResources + Legacy test: test_trial_runner.py::TrialRunnerTest::testCustomResources + Legacy test: test_trial_runner.py::TrialRunnerTest::testExtraCustomResources + Legacy test: test_trial_runner.py::TrialRunnerTest::testResourceScheduler + """ + snapshot = TrialStatusSnapshot() + runner = TuneController( + resource_manager_factory=lambda: resource_manager_cls(), + callbacks=[TrialStatusSnapshotTaker(snapshot)], + ) + kwargs = { + "stopping_criterion": {"training_iteration": 1}, + "placement_group_factory": PlacementGroupFactory(bundles), + } + trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)] + for t in trials: + runner.add_trial(t) + + while not runner.is_finished(): + runner.step() + + assert snapshot.max_running_trials() == 1 + assert snapshot.all_trials_are_terminated() + + +@pytest.mark.parametrize( + "resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager] +) +def test_fractional_gpus(ray_start_4_cpus_2_gpus_extra, resource_manager_cls): + """Test that fractional GPUs lead to more parallelism. + + We schedule four trials with 0.75 GPUs each. Since our cluster has 2 GPUs, + we should be able to run 2 trials in parallel. + + Legacy test: test_trial_runner.py::TrialRunnerTest::testFractionalGpus + """ + snapshot = TrialStatusSnapshot() + runner = TuneController( + resource_manager_factory=lambda: resource_manager_cls(), + callbacks=[TrialStatusSnapshotTaker(snapshot)], + ) + kwargs = { + "stopping_criterion": {"training_iteration": 1}, + "placement_group_factory": PlacementGroupFactory([{"GPU": 0.75}]), + "config": { + "sleep": 1, + }, + } + trials = [Trial("__fake", **kwargs) for i in range(4)] + for t in trials: + runner.add_trial(t) + + while not runner.is_finished(): + runner.step() + + assert snapshot.max_running_trials() == 2 + assert snapshot.all_trials_are_terminated() + + +@pytest.mark.parametrize( + "resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager] +) +def test_multi_step(ray_start_4_cpus_2_gpus_extra, resource_manager_cls): + """Test that trials can run for more than one iteration. + + Todo (krfricke): This is not a resource test, so it should be moved. + + Legacy test: test_trial_runner.py::TrialRunnerTest::testMultiStepRun + Legacy test: test_trial_runner.py::TrialRunnerTest::testMultiStepRun2 + """ + snapshot = TrialStatusSnapshot() + runner = TuneController( + resource_manager_factory=lambda: resource_manager_cls(), + callbacks=[TrialStatusSnapshotTaker(snapshot)], + ) + kwargs = { + "stopping_criterion": {"training_iteration": 5}, + "placement_group_factory": PlacementGroupFactory([{"CPU": 1, "GPU": 1}]), + } + trials = [Trial("__fake", **kwargs) for i in range(2)] + for t in trials: + runner.add_trial(t) + + while not runner.is_finished(): + runner.step() + + # Overstepping should throw error + # test_trial_runner.py::TrialRunnerTest::testMultiStepRun2 + with pytest.raises(TuneError): + runner.step() + + assert snapshot.all_trials_are_terminated() + assert all(t.last_result["training_iteration"] == 5 for t in runner.get_trials()) + + +@pytest.mark.parametrize( + "resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager] +) +def test_resources_changing(ray_start_4_cpus_2_gpus_extra, resource_manager_cls): + """Checks that resource requirements can be changed on fly. + + Legacy test: test_trial_runner.py::TrialRunnerTest::testChangeResources + """ + + class ChangingScheduler(FIFOScheduler): + def __init__(self): + self._has_received_one_trial_result = False + + # For figuring out how many runner.step there are. + def has_received_one_trial_result(self): + return self._has_received_one_trial_result + + def on_trial_result(self, trial_runner, trial, result): + if result["training_iteration"] == 1: + self._has_received_one_trial_result = True + executor = trial_runner.trial_executor + executor.pause_trial(trial) + trial.update_resources(dict(cpu=4, gpu=0)) + return TrialScheduler.NOOP + + scheduler = ChangingScheduler() + runner = TuneController( + resource_manager_factory=lambda: resource_manager_cls(), scheduler=scheduler + ) + kwargs = { + "stopping_criterion": {"training_iteration": 2}, + "placement_group_factory": PlacementGroupFactory([{"CPU": 2, "GPU": 0}]), + } + trials = [Trial("__fake", **kwargs)] + for t in trials: + runner.add_trial(t) + + while not trials[0].status == Trial.RUNNING: + runner.step() + + assert trials[0].status == Trial.RUNNING + assert runner._actor_manager.get_live_actors_resources().get("CPU") == 2 + + with pytest.raises(ValueError): + trials[0].update_resources(dict(cpu=4, gpu=0)) + + while not scheduler.has_received_one_trial_result(): + runner.step() + + assert trials[0].status == Trial.PAUSED + + while not trials[0].status == Trial.RUNNING: + runner.step() + + assert runner._actor_manager.get_live_actors_resources().get("CPU") == 4 + + runner.step() + + +@pytest.mark.parametrize( + "resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager] +) +def test_queue_filling(ray_start_4_cpus_2_gpus_extra, resource_manager_cls): + """Checks that the trial queue is filled even if only 1 pending trial is allowed. + + Legacy test: test_trial_runner.py::TrialRunnerTest::testQueueFilling + """ + os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1" + + def f1(config): + for i in range(10): + yield i + time.sleep(1) + + tune.register_trainable("f1", f1) + + search_alg = BasicVariantGenerator() + search_alg.add_configurations( + { + "foo": { + "run": "f1", + "num_samples": 100, + "config": { + "a": tune.sample_from(lambda spec: 5.0 / 7), + "b": tune.sample_from(lambda spec: "long" * 40), + }, + "resources_per_trial": {"cpu": 2}, + } + } + ) + + runner = TuneController( + resource_manager_factory=lambda: resource_manager_cls(), search_alg=search_alg + ) + + while len(runner.get_trials()) < 3: + runner.step() + + # All trials are enqueued + assert len(runner.get_trials()) == 3 + + status_count = Counter(t.status for t in runner.get_trials()) + while status_count.get(Trial.RUNNING, 0) < 2 and not runner.is_finished(): + runner.step() + status_count = Counter(t.status for t in runner.get_trials()) + + assert len(runner.get_trials()) == 3 + + status_count = Counter(t.status for t in runner.get_trials()) + assert status_count.get(Trial.RUNNING) == 2 + assert status_count.get(Trial.PENDING) == 1 + + +if __name__ == "__main__": + sys.exit(pytest.main(["-v", __file__])) diff --git a/python/ray/tune/tests/execution/test_controller_search_alg_integration.py b/python/ray/tune/tests/execution/test_controller_search_alg_integration.py new file mode 100644 index 000000000000..fc3e3e2cd0ed --- /dev/null +++ b/python/ray/tune/tests/execution/test_controller_search_alg_integration.py @@ -0,0 +1,366 @@ +import os +import pickle +from collections import Counter + +import pytest +import sys + +import ray +from ray.air.execution import FixedResourceManager, PlacementGroupResourceManager +from ray.tune import Experiment, PlacementGroupFactory +from ray.tune.execution.tune_controller import TuneController +from ray.tune.experiment import Trial +from ray.tune.result import TRAINING_ITERATION +from ray.tune.schedulers import FIFOScheduler, TrialScheduler +from ray.tune.search import Searcher, ConcurrencyLimiter, Repeater, SearchGenerator +from ray.tune.search._mock import _MockSuggestionAlgorithm + + +@pytest.fixture(scope="function") +def ray_start_8_cpus(): + address_info = ray.init(num_cpus=8, num_gpus=0) + yield address_info + ray.shutdown() + + +@pytest.fixture(scope="function") +def ray_start_4_cpus_2_gpus_extra(): + address_info = ray.init(num_cpus=4, num_gpus=2, resources={"a": 2}) + yield address_info + ray.shutdown() + + +@pytest.mark.parametrize( + "resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager] +) +def test_search_alg_notification(ray_start_4_cpus_2_gpus_extra, resource_manager_cls): + """Check that the searchers gets notified of trial results + completions. + + Also check that the searcher is "finished" before the runner, i.e. the runner + continues processing trials when the searcher finished. + + Legacy test: test_trial_runner_3.py::TrialRunnerTest::testSearchAlgNotification + Legacy test: test_trial_runner_3.py::TrialRunnerTest::testSearchAlgFinished + """ + + experiment_spec = {"run": "__fake", "stop": {"training_iteration": 2}} + experiments = [Experiment.from_json("test", experiment_spec)] + search_alg = _MockSuggestionAlgorithm() + searcher = search_alg.searcher + search_alg.add_configurations(experiments) + + runner = TuneController( + resource_manager_factory=lambda: resource_manager_cls(), search_alg=search_alg + ) + + # Run until trial is running + while not search_alg.is_finished(): + runner.step() + + trials = runner.get_trials() + + # Make sure trial started + while trials[0].status != Trial.RUNNING: + runner.step() + + assert trials[0].status == Trial.RUNNING + assert search_alg.is_finished() + assert not runner.is_finished() + + # Run until everything finished + while not runner.is_finished(): + runner.step() + + assert trials[0].status == Trial.TERMINATED + assert search_alg.is_finished() + assert runner.is_finished() + + assert searcher.counter["result"] == 1 + assert searcher.counter["complete"] == 1 + + +@pytest.mark.parametrize( + "resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager] +) +def test_search_alg_scheduler_stop(ray_start_4_cpus_2_gpus_extra, resource_manager_cls): + """Check that a scheduler-issued stop also notifies the search algorithm. + + Legacy test: test_trial_runner_3.py::TrialRunnerTest::testSearchAlgSchedulerInteraction # noqa + """ + + class _MockScheduler(FIFOScheduler): + def on_trial_result(self, *args, **kwargs): + return TrialScheduler.STOP + + experiment_spec = {"run": "__fake", "stop": {"training_iteration": 5}} + experiments = [Experiment.from_json("test", experiment_spec)] + search_alg = _MockSuggestionAlgorithm() + searcher = search_alg.searcher + search_alg.add_configurations(experiments) + + runner = TuneController( + resource_manager_factory=lambda: resource_manager_cls(), + search_alg=search_alg, + scheduler=_MockScheduler(), + ) + + trials = runner.get_trials() + + while not runner.is_finished(): + runner.step() + + # Result is not processed because trial stop takes precedence + assert searcher.counter["result"] == 0 + # But on_trial_complete is triggered... + assert searcher.counter["complete"] == 1 + # ... and still updates the last result. + assert trials[0].last_result[TRAINING_ITERATION] == 1 + + +@pytest.mark.parametrize( + "resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager] +) +def test_search_alg_stalled(ray_start_4_cpus_2_gpus_extra, resource_manager_cls): + """Checks that runner and searcher state is maintained when stalled. + + We use a concurrency limit of 1, meaning each trial is added one-by-one + from the searchers. + + We then run three samples. During the second trial, we stall the searcher, + which means we don't suggest new trials after it finished. + + In this case, the runner should still be considered "running". Once we unstall, + the experiment finishes regularly. + + Legacy test: test_trial_runner_3.py::TrialRunnerTest::testSearchAlgStalled + """ + experiment_spec = { + "run": "__fake", + "num_samples": 3, + "stop": {"training_iteration": 1}, + } + experiments = [Experiment.from_json("test", experiment_spec)] + search_alg = _MockSuggestionAlgorithm(max_concurrent=1) + search_alg.add_configurations(experiments) + searcher = search_alg.searcher + runner = TuneController( + resource_manager_factory=lambda: resource_manager_cls(), + search_alg=search_alg, + ) + runner.step() + trials = runner.get_trials() + while trials[0].status != Trial.TERMINATED: + runner.step() + + # On next step, trials[1] is created + runner.step() + + trials = runner.get_trials() + + while trials[1].status != Trial.RUNNING: + runner.step() + + assert trials[1].status == Trial.RUNNING + assert len(searcher.live_trials) == 1 + + # Stall: We don't suggest new algorithms + searcher.stall = True + + while trials[1].status != Trial.TERMINATED: + runner.step() + + assert trials[1].status == Trial.TERMINATED + assert len(searcher.live_trials) == 0 + + assert all(trial.is_finished() for trial in trials) + assert not search_alg.is_finished() + assert not runner.is_finished() + + # Unstall + searcher.stall = False + + # Create trials[2] + runner.step() + + trials = runner.get_trials() + + while trials[2].status != Trial.RUNNING: + runner.step() + + assert trials[2].status == Trial.RUNNING + assert len(searcher.live_trials) == 1 + + while trials[2].status != Trial.TERMINATED: + runner.step() + + assert len(searcher.live_trials) == 0 + assert search_alg.is_finished() + assert runner.is_finished() + + +@pytest.mark.parametrize( + "resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager] +) +def test_search_alg_finishes(ray_start_4_cpus_2_gpus_extra, resource_manager_cls): + """Empty SearchAlg changing state in `next_trials` does not crash. + + The search algorithm changes to ``finished`` mid-run. This should not + affect processing of the experiment. + + Legacy test: test_trial_runner_3.py::TrialRunnerTest::testSearchAlgFinishes + """ + os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "1" + + class FinishFastAlg(_MockSuggestionAlgorithm): + _index = 0 + + def next_trial(self): + spec = self._experiment.spec + trial = None + if self._index < spec["num_samples"]: + trial = Trial(spec.get("run"), stopping_criterion=spec.get("stop")) + self._index += 1 + + if self._index > 4: + self.set_finished() + + return trial + + def suggest(self, trial_id): + return {} + + experiment_spec = { + "run": "__fake", + "num_samples": 2, + "stop": {"training_iteration": 1}, + } + searcher = FinishFastAlg() + experiments = [Experiment.from_json("test", experiment_spec)] + searcher.add_configurations(experiments) + + runner = TuneController( + resource_manager_factory=lambda: resource_manager_cls(), + search_alg=searcher, + ) + + assert not runner.is_finished() + + while len(runner.get_trials()) < 2: + runner.step() # Launch 2 runs + + assert not searcher.is_finished() + assert not runner.is_finished() + + searcher_finished_before = False + while not runner.is_finished(): + runner.step() + searcher_finished_before = searcher.is_finished() + + # searcher_finished_before will be True if the searcher was finished before + # the controller. + assert searcher_finished_before + + +# Todo (krfricke): Fix in next batch +@pytest.mark.skip("This test is currently flaky as it can fail due to timing issues.") +@pytest.mark.parametrize( + "resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager] +) +def test_searcher_save_restore(ray_start_8_cpus, resource_manager_cls, tmpdir): + """Searchers state should be saved and restored in the experiment checkpoint. + + Legacy test: test_trial_runner_3.py::TrialRunnerTest::testSearcherSaveRestore + """ + + def create_searcher(): + class TestSuggestion(Searcher): + def __init__(self, index): + self.index = index + self.returned_result = [] + super().__init__(metric="episode_reward_mean", mode="max") + + def suggest(self, trial_id): + self.index += 1 + return {"test_variable": self.index} + + def on_trial_complete(self, trial_id, result=None, **kwargs): + self.returned_result.append(result) + + def save(self, checkpoint_path): + with open(checkpoint_path, "wb") as f: + pickle.dump(self.__dict__, f) + + def restore(self, checkpoint_path): + with open(checkpoint_path, "rb") as f: + self.__dict__.update(pickle.load(f)) + + searcher = TestSuggestion(0) + searcher = ConcurrencyLimiter(searcher, max_concurrent=2) + searcher = Repeater(searcher, repeat=3, set_index=False) + search_alg = SearchGenerator(searcher) + experiment_spec = { + "run": "__fake", + "num_samples": 20, + "config": {"sleep": 10}, + "stop": {"training_iteration": 2}, + "resources_per_trial": PlacementGroupFactory([{"CPU": 1}]), + } + experiments = [Experiment.from_json("test", experiment_spec)] + search_alg.add_configurations(experiments) + return search_alg + + searcher = create_searcher() + + runner = TuneController( + resource_manager_factory=lambda: resource_manager_cls(), + search_alg=searcher, + checkpoint_period=-1, + experiment_path=str(tmpdir), + ) + + while len(runner.get_trials()) < 6: + runner.step() + + assert len(runner.get_trials()) == 6, [t.config for t in runner.get_trials()] + runner.checkpoint() + trials = runner.get_trials() + [runner._schedule_trial_stop(t) for t in trials if t.status is not Trial.ERROR] + + runner.cleanup() + + del runner + + searcher = create_searcher() + + runner2 = TuneController( + resource_manager_factory=lambda: resource_manager_cls(), + search_alg=searcher, + experiment_path=str(tmpdir), + resume="LOCAL", + ) + + assert len(runner2.get_trials()) == 6, [t.config for t in runner2.get_trials()] + + def trial_statuses(): + return [t.status for t in runner2.get_trials()] + + def num_running_trials(): + return sum(t.status == Trial.RUNNING for t in runner2.get_trials()) + + while num_running_trials() < 6: + runner2.step() + + assert len(set(trial_statuses())) == 1 + assert Trial.RUNNING in trial_statuses() + + for i in range(20): + runner2.step() + assert 1 <= num_running_trials() <= 6 + + evaluated = [t.evaluated_params["test_variable"] for t in runner2.get_trials()] + count = Counter(evaluated) + assert all(v <= 3 for v in count.values()) + + +if __name__ == "__main__": + sys.exit(pytest.main(["-v", __file__])) diff --git a/python/ray/tune/tests/execution/utils.py b/python/ray/tune/tests/execution/utils.py index e284d1a80e19..62eaaa232d06 100644 --- a/python/ray/tune/tests/execution/utils.py +++ b/python/ray/tune/tests/execution/utils.py @@ -1,9 +1,9 @@ import os import uuid -from collections import Counter from typing import Any, Callable, Dict, Optional, Tuple, Type, Union import ray +from ray.air.execution import FixedResourceManager from ray.air.execution._internal import RayActorManager from ray.air.execution.resources import ( ResourceManager, @@ -20,22 +20,12 @@ def get(self, trainable_name: str): return trainable_name -class NoopResourceManager(ResourceManager): - def __init__(self): - self.requested_resources = [] - self.canceled_resource_requests = [] - self.currently_requested_resources = Counter() - - def request_resources(self, resource_request: ResourceRequest): - self.requested_resources.append(resource_request) - self.currently_requested_resources[resource_request] += 1 - - def cancel_resource_request(self, resource_request: ResourceRequest): - self.canceled_resource_requests.append(resource_request) - self.currently_requested_resources[resource_request] -= 1 - - def has_resources_ready(self, resource_request: ResourceRequest) -> bool: - return True +class BudgetResourceManager(FixedResourceManager): + def __init__(self, total_resources: Dict[str, float]): + self._allow_strict_pack = True + self._total_resources = total_resources + self._requested_resources = [] + self._used_resources = [] class NoopActorManager(RayActorManager): @@ -68,6 +58,7 @@ def remove_actor( self, tracked_actor: TrackedActor, kill: bool = False, + stop_future: Optional[ray.ObjectRef] = None, ) -> None: self.removed_actors.append(tracked_actor) @@ -106,14 +97,18 @@ def create_placement_group_factory(self): pass -def create_execution_test_objects(tmpdir, max_pending_trials: int = 8): +def create_execution_test_objects( + tmpdir, max_pending_trials: int = 8, resources: Optional[Dict[str, float]] = None +): os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = str(max_pending_trials) + resources = resources or {"CPU": 4} + tune_controller = TuneController( experiment_path=str(tmpdir), reuse_actors=True, ) - resource_manager = NoopResourceManager() + resource_manager = BudgetResourceManager(total_resources=resources) actor_manger = NoopActorManager(resource_manager) tune_controller._actor_manager = actor_manger tune_controller._class_cache = NoopClassCache() diff --git a/python/ray/tune/tests/test_trial_relative_logdir.py b/python/ray/tune/tests/test_trial_relative_logdir.py index d6bf194ddb02..dc1425548ddd 100644 --- a/python/ray/tune/tests/test_trial_relative_logdir.py +++ b/python/ray/tune/tests/test_trial_relative_logdir.py @@ -310,5 +310,17 @@ def test_change_trial_local_dir(tmpdir): assert trial.get_trial_checkpoints()[0].dir_or_data.startswith(new_local_dir) +def test_trial_logdir_length(tmpdir): + """Test that trial local paths with a long logdir are truncated""" + trial = Trial( + trainable_name="none", + experiment_path=str(tmpdir), + stub=True, + config={"a" * 50: 5.0 / 7, "b" * 50: "long" * 40}, + ) + trial.init_local_path() + assert len(os.path.basename(trial.local_path)) < 200 + + if __name__ == "__main__": sys.exit(pytest.main(["-v", __file__])) diff --git a/python/ray/tune/tests/test_trial_runner.py b/python/ray/tune/tests/test_trial_runner.py index 02ccad99f58e..92a9e35ba593 100644 --- a/python/ray/tune/tests/test_trial_runner.py +++ b/python/ray/tune/tests/test_trial_runner.py @@ -7,7 +7,7 @@ from ray.rllib import _register_all from ray import tune -from ray.tune import TuneError, register_trainable +from ray.tune import TuneError from ray.tune.execution.ray_trial_executor import RayTrialExecutor from ray.tune.schedulers import TrialScheduler, FIFOScheduler from ray.tune.search import BasicVariantGenerator @@ -27,36 +27,6 @@ def setUp(self): def tearDown(self): ray.shutdown() - def testExperimentTagTruncation(self): - ray.init(num_cpus=2) - - def train(config, reporter): - reporter(timesteps_total=1) - - trial_executor = RayTrialExecutor(resource_manager=self._resourceManager()) - register_trainable("f1", train) - - experiments = { - "foo": { - "run": "f1", - "config": { - "a" * 50: tune.sample_from(lambda spec: 5.0 / 7), - "b" * 50: tune.sample_from(lambda spec: "long" * 40), - }, - } - } - - for name, spec in experiments.items(): - trial_generator = BasicVariantGenerator() - trial_generator.add_configurations({name: spec}) - while not trial_generator.is_finished(): - trial = trial_generator.next_trial() - if not trial: - break - trial_executor.start_trial(trial) - self.assertLessEqual(len(os.path.basename(trial.local_path)), 200) - trial_executor.stop_trial(trial) - def testExtraResources(self): ray.init(num_cpus=4, num_gpus=2) snapshot = TrialStatusSnapshot() diff --git a/python/ray/tune/utils/log.py b/python/ray/tune/utils/log.py index 44489f591f1d..a4b57e2d8da8 100644 --- a/python/ray/tune/utils/log.py +++ b/python/ray/tune/utils/log.py @@ -1,5 +1,6 @@ +import time from enum import Enum -from typing import Union +from typing import Dict, Tuple, Union from ray.util import PublicAPI from ray.util.annotations import DeveloperAPI @@ -49,3 +50,15 @@ def disable_ipython(): InteractiveShell.clear_instance() except Exception: pass + + +_log_cache_count: Dict[str, Tuple[str, float]] = {} + + +def _dedup_logs(domain: str, value: str, repeat_after_s: int = 5) -> bool: + cur_val, ts = _log_cache_count.get(domain, (None, None)) + if value == cur_val and time.monotonic() - repeat_after_s < ts: + return False + else: + _log_cache_count[domain] = value, time.monotonic() + return True diff --git a/rllib/algorithms/mock.py b/rllib/algorithms/mock.py index abc4de8b3f13..ae885d96679b 100644 --- a/rllib/algorithms/mock.py +++ b/rllib/algorithms/mock.py @@ -1,5 +1,7 @@ import os import pickle +import time + import numpy as np from ray.tune import result as tune_result @@ -22,6 +24,7 @@ def get_default_config(cls) -> AlgorithmConfig: "persistent_error": False, "test_variable": 1, "user_checkpoint_freq": 0, + "sleep": 0, } ) ) @@ -46,6 +49,8 @@ def step(self): and (self.config.persistent_error or not self.restored) ): raise Exception("mock error") + if self.config.sleep: + time.sleep(self.config.sleep) result = dict( episode_reward_mean=10, episode_len_mean=10, timesteps_this_iter=10, info={} ) From 30ed5f3d91fd0447505a50c8901535eb2dd84e02 Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Tue, 16 May 2023 16:52:24 +0200 Subject: [PATCH 410/424] [air/output] Context-aware output engine: Add docs, experimental feature docs, prepare default on (#35129) This prepare to enable the new output engine per default. When activated, a hint is displayed how to disable the new output engine. This PR also adds documentation around experimental features in Ray AIR. Signed-off-by: Kai Fricke Signed-off-by: Kai Fricke Co-authored-by: matthewdeng Co-authored-by: angelinalg <122562471+angelinalg@users.noreply.github.com> --- doc/source/_toc.yml | 1 + doc/source/ray-air/experimental-features.rst | 133 ++++++++++++++++++ .../ray-air/images/rich-sticky-status.png | Bin 0 -> 419968 bytes doc/source/ray-air/user-guides.rst | 22 +++ doc/source/tune/api/env.rst | 1 + python/ray/air/config.py | 18 ++- python/ray/air/constants.py | 3 + python/ray/tune/constants.py | 2 +- python/ray/tune/experimental/output.py | 37 ++++- python/ray/tune/tests/test_client.py | 33 +++-- .../ray/tune/tests/test_progress_reporter.py | 15 +- python/ray/tune/tune.py | 66 ++++++--- python/ray/tune/tuner.py | 14 +- 13 files changed, 292 insertions(+), 53 deletions(-) create mode 100644 doc/source/ray-air/experimental-features.rst create mode 100644 doc/source/ray-air/images/rich-sticky-status.png diff --git a/doc/source/_toc.yml b/doc/source/_toc.yml index 39c494d142cf..c3555bf6228e 100644 --- a/doc/source/_toc.yml +++ b/doc/source/_toc.yml @@ -53,6 +53,7 @@ parts: - file: ray-air/computer-vision - file: ray-air/examples/serving_guide - file: ray-air/deployment + - file: ray-air/experimental-features - file: ray-air/examples/index sections: - file: ray-air/examples/opt_deepspeed_batch_inference diff --git a/doc/source/ray-air/experimental-features.rst b/doc/source/ray-air/experimental-features.rst new file mode 100644 index 000000000000..b8c338fee290 --- /dev/null +++ b/doc/source/ray-air/experimental-features.rst @@ -0,0 +1,133 @@ +.. _air-experimental-features: + +================================ +Experimental features in Ray AIR +================================ + +The Ray Team is testing a number of experimental features in Ray AIR. + +During development, the features +are disabled per default. You can opt-in by setting a +feature-specific environment variable. + +After some time, the Ray Team enables the feature by default to gather +more feedback from the community. In that case, you can still +disable the feature using the same environment variable to +fully revert to the old behavior. + +If you run into issues with experimental features, +`open an issue `_ +on GitHub. The Ray Team considers feedback before removing +the old implementation and making the new implementation the +default. + +.. note:: + + Experimental features can undergo frequent changes, + especially on the master branch and the nightly wheels. + +.. _air-experimental-new-output: + +Context-aware progress reporting +-------------------------------- + +.. note:: + + This feature is *disabled by default* in Ray 2.5. + + To enable, set the environment variable ``RAY_AIR_NEW_OUTPUT=1``. + +A context-aware output engine is available for Ray Train and Ray Tune runs. + +This output engine affects how the training progress +is printed in the console. The output changes depending on the execution +context: Ray Tune runs will be displayed differently to Ray Train runs. + +The features include: + +- Ray Train runs report status relevant to the single training run. + It does not use the default Ray Tune table layout from previous versions. +- The table format has been updated. +- The format of reporting configurations and observed metrics is different from pervious versions. +- Significant reduction in the default metrics displayed in the console output for runs (e.g., RLlib runs). +- Decluttered the output to improve readability. + + +This output feature only works for the regular console. +It is automatically disabled when you use Jupyter Notebooks +or Ray client. + + +.. _air-experimental-rich: + +Rich layout (sticky status) +--------------------------- + +.. note:: + + This feature is *disabled by default*. + + To enable, set the environment variable ``RAY_AIR_RICH_LAYOUT=1``. + +The :ref:`context-aware output engine ` +exposes an advanced layout using the +`rich `_ library. + +The *rich* layout provides a sticky +status table: The regular console logs are still printed +as before, but the trial overview table (in Ray Tune) is stuck to the bottom of the +screen and periodically updated. + +This feature is still in development. You can opt-in to try +it out. + +To opt-in, set the ``RAY_AIR_RICH_LAYOUT=1`` environment variable +and install rich (``pip install rich``). + +.. figure:: images/rich-sticky-status.png + + +.. _air-experimental-execution: + +Event-based trial execution engine +---------------------------------- + +.. note:: + + This feature is *enabled by default* starting Ray 2.5. + + To disable, set the environment variable ``TUNE_NEW_EXECUTION=0``. + + +Ray Tune has an updated trial execution engine. +Since Ray Tune is also the execution backend for +Ray Train, the updated engine affects both tuning and training runs. + +The update is a refactor of the :ref:`TrialRunner ` +which uses a generic Ray actor and future manager instead of +the previous ``RayTrialExecutor``. This manager exposes an +interface to react to scheduling and task execution events, which makes +it easier to maintain and develop. + +This is a drop-in replacement of an internal class, and you shouldn't see +any change to the previous behavior. + +However, if you notice any odd behavior, you can opt out of +the event-based execution engine and see if it resolves your problem. + +In that case, please `open an issue `_ +on GitHub, ideally with a reproducible script. + +Things to look out for: + +- Less trials are running in parallel than before +- It takes longer to start new trials (or goes much faster) +- The tuning run finishes, but the script does not exit +- The end-to-end runtime is much slower than before +- The CPU load on the head node is high, + even though the training jobs don't + require many resources or don't run on the head node +- Any exceptions are raised that indicate an error in starting or + stopping trials or the experiment + +Note that some edge cases may not be captured in the regression tests. Your feedback is welcome. diff --git a/doc/source/ray-air/images/rich-sticky-status.png b/doc/source/ray-air/images/rich-sticky-status.png new file mode 100644 index 0000000000000000000000000000000000000000..e054d2ceeb235bdd8bcb2b13b221c7e366914b3b GIT binary patch literal 419968 zcmeFYbyQT}*FO#>snXI&NHcVYN=Y{e${;ax=M1TI2}(+rG&0f%Lw5^EHv^1_#0=fP z%lrMQXMLaF`mJZJ-}k>q7YpXzbN4-GpS}0l`?X&ap{cG&h)0Eog@r|^^iob63+pxm z7S^peoO{3(j=L?nz^^lJ-B<3~rd|xru1=OV_7Dblm@|X{0=2Qk!h+6vXuT%xV8FRJ zCY8GNP+m+ubvk7+y9dMw^C+;m^QN!i?eDB(PN~pO+Wq;$T1319sfpw&RH{A=ioH7v zIPY#Ztti=~!|rc<@M znfyr6Ne&af2Qr*sul`>B@{5t>3dCL= zj$W>(-`$bgS;T%(+AmlI!nSUJSZFKpC7Y0)n-H}!1mZC92vkY4D%n6|;65iO-V{i~ zMbs6ItZkdcI}K*_Zw;nIPLD?HjXZ|WJBjp@^j6(gm4bL)Ui5l&t&0q^gx$@gRR?`fZvYW;pXByf7lKq(GL?k;+Gg7te2# z>6$)KC{>S)AlPP=y2y($VN8Bej`vuMTr=q23_9q#ZP#&VRd-$ukL7)9ltV!D?aRhz zjW%;jJ+%wc*cT(MiHnC*My-EXH;2l}rEay)wRU+J=p4Nppnbbm%tKwtbZUdw#kl9M zu#|8=)am_up}M+ZX?6N{Tpo$d*bg4Ok06G?eGn4Ub8{_L;+L0Lu;;GDyxd#XSXd9N zYyhi#rKT!w?&QE@YT;xC;ek3h1Llf_B`E`SHZ`|{xHFhRtZW>m7R|8aCJvQi{7bGl@b}GS z5F^81L)`787+c${3VKzw3iVjy0A5I;XRFoN3+=ICw;<#u#qx)JfW3^|CKxvPz{yN#10 z!;MT+GbaysDMm)%KEprf=iscS_Al{{ZvW5%pbrq#)EUIb!wYh70R86^Ztn7)fRKMo z=)Zl!O&4%(kT%54$-~tgBJT-tbZ7cc5sFG`n*Wk;vm`4U2j{;Q1t{;oyv)MDwZ`2NfIn}G7KC;rRaZtncGod2ANyN%`l%kY~!|Hp7(cK^Nb zf2!&)*}y0@HE}s7bB~)jD#=MP-Yiet!pYpmLj12k%}qsk%}x2lxFP)fX550j=4RZc zqI@FUqN2j4rVvXZeqJGw{}83*=;m(fXb!m%1qkP{0pyqoi3*ts3X5^`iShDr3z~|V za*Ohd@pAL>@>}o%e~ADC{zHU@s|{epruP3is~b@kfGCKdpt-Q9DId20udoQWprD`_ zw<({w0Jk8-g3r>@!a~SWOzbaFH`^xuTvJJkk)Mb6-&Zv4P2DY>Tpgqs)lAJ9G<5!b zN7u#yqT_CQqcuKZQ2`M#elamVAwCfSQQ?0N(ucUZ0S0;_l#iE3K=7|S7Utp#fJjro z&}ulC=!4gd4D0N>wYZ+2B2&;_tB zrZ-*z%nb5y(s{zhyVd`!wXX*|C=E}=2AkHf! z&c~;<+uzCf4QKzsrlbaa5MFUSo}3lK-S;4%&kowtsp?G`iFD;^SaG{V@bm10sF#Cf3VCh?=n>ErVBLS| zih=EK17QEJlku{K-0Uu(F>YQV?tf`afbSm~1N~#gpqmi&&m~KO{!gVy{l zCx;u~`VUn-xBthdf2!&xzKPodY5;tO+X7-~>S6EBD6M1e3IUA9+R0t=uh-ez{O8O6 zDMS+V-@WPYC;lU*0+0TE4T#l1=m-5P_Wvyt(AWRffB!Is|Eup{_}`cOkM#S$>iS=G z{f{*8KN|eM*7d*Y`X6cFe>C`it?U0cb>aQHdW1LvX_yyK8y5KRZvu7YJu?+WIjrlO z|JhB2@xT>a=a+_VSXksNH$T`)+RS^v#ry6`YV!9{ckyu^(OH%pn*x`J-Q{1o|0DCk z`gbC8PE-mYFAP{pa?f?4vwt$YKI_U)hAoXt<-87j&lbn`{@twxHYK^h+}yY1Bpgrm zweIk}1(UX=w|E%H6B&1ui+v3YEGi8&eWrBl9w{@fsM~Jv&e_$k=41VZ2jf!M_qJh~ z#=)C)rN2Y*STACNUp%=R82*AC8(RzS?sTo1iwhsUpI~fWjCkrZ2Ap@_z$HDt*zg+nXkviDm8#_ z94`e}W%(UFA;rUPS3i7lae%ZX=Qfht1vlZ`3F2w7pW(+~0;}z&GBw%KE3ds&8{@T5 z(PkGXXvw2#^NNbdY)u;1y(fsHS-Y|~-{W=`8i;xaZs*q@#HrL@g;KvdyyWn9-+F)N z_H?!<-@-d$@y!p@L|Z^yNHY5rRhGyVvgtG zw^%CX3Kjwa&a)pq$+;F9lD!ZRlg5`mI+c^Y8k`gYcXcPF$Hw|dpgFEkG5sU~yPWID zH@;@Fd;5!SsH>M38tXM_$V&M@cK1TN}(Z%zC+R- z8+)mb9OxgoSw5wxGXqws{?}Zu-NotJq0R`B6zF>5;Q246P+AGDtt0GVQ6#}m^SPtC z^r^+7&u&xM>F;QI)RJ97?1GBc&d`D>%;{)e9Z`G#D-Zp7cov1%L&JDP=hPn6`e^3< z_Mp$wDjGY&sWanwceCsxucQuW%6w=B^&emDPr(#;`1ppEP(H2v$6lxF)3s#RMY+}0 zq;s`I%Ncb`Pl!KMqB4EVl9L*?V`(Ll?h#Ud^FAC=Fi9W%zO8uJ?7h8pz0v%N{R}Rb zlTalV86bZ3P#&Lxkx|fLHlY&Z%1A~=AZ5#Dpq~CX>mWm(a#&(l^6Yqbgu=wP4S%HN z(Rqezv%q9?Bu9hi_V1X0opdn*dcJhK@@A@h(k7qdh^A2Vk;qjo?z^|VFXiR$kjmxZ z&DCP#ynH9JMU+b-u%0&H1+c^{Dk-5_tMc28O|Ov3Lvy539j_ zGZJFRmVdHzExq1JqQ2aY4`PnY)qpSVOvIlYi%~ppxmLeU#4O++r~Z7PkqAW$y%t93 zW)#*!i}C1X9fZ^2s>n(h%kcA0m{UXGF2VW4?nZkS|+aQ^TN&(Di{_|%xW%~~$?N@A^# zHKX$fQ_M2TWYT#vKbe8}?PY_9@8#hX;CdllQ502XU3g~)JF*cD-y3C!zS&1{vc6FQ zf>;kBz526TJ1axm&KG+^4zq%@)ixcdtbp$JkR?rh{ce{Y+NhWBMx@Vw))cY5?ebcx z@Z2^^6WBP1r)btD>xP!{W` z$R5tAz+#xrYM4~#e(nh@@h!Fo)YCds7h!W>wXd`U6W3cxwI;DMM}+N(7X~}cPZsnE z_#@%eaQc*->s2d02+{gw>**-8PT`H)c8#Q%pE!lFI}!KmS~DQd`yNgtqpeCJFAY*Z z@!Em)q_0j=+zFWm0@AXi+FWeRjiCdRq* z@tH4EeweHEebSEo-&+BK#LsVGdmmlk12XcuLyY>FrySw=0KWZJQLmx;* z0tY5ue(=jLfnshl%h3YE%%phVsWUW1JH=3$9ZU45GWQ-(^=3a0(o{yTARaEb45dgU zkD@+oY`k_fYkh}Jxd5mgy;}l5&6A;m8z1+Ms9EnL zWo0hulVv}w7c1Wel9Q83BIHC*mV?-NcnXS&*pg60^w~JClI}her5Q6DHe0;kI)wj8 zblFeZ{uS!t(hGXYO7ozH$86pX+xb_{C9nb#dTmu{)Y-hCawPd0c#-PlJa&htS(D9w(>b9$On0X&n-WHt-t?_$@0yYR791=A{~NU zV0X_^T9zC^uIPkW%&|SktUAbzqtVCn0}I8*iH+8opUPd;FqQ`|$ceKpnN? zv$L@lufEZAYzZ0S)UBDFg7J!c^rw^J@9b%$vvPQtNUPCKS2m1UJ?7CJ-nx$~LO*o+ zMh~{YmJysBe9P;9orN|n5m&qpV#JCN*Lg6ncf(so$n|*tk4Iv{na+()>GxQQ|hUmOb=z= z*lkOQaVQLG8_{`H{(#L@n&1CONd2qf`YItY(Xz{+?ow{Cn*TRq!)NHD*DW7a@UF(! zJx3fya4%lU{?0u<7I++;f5~7Yu5KcF(L-Yd2%xE}mzk3^CR0$5`dn4^7L3U;y*W*l zJv4bpYV{@UhS0;_+GP*Z&_5?~reTB7p-0IqPWBkvbj1M`<>BMqrK}3`9@hnrsS4+n zb|4e0acY*e7_?h4n^?aLMHsE>Ro0lLp$i^=pmZ#Jy<_*uOUiu)eMUDqB0+sb1OMnO z`XynE$2yMSQT0$tak*))6Ei9!)YG|H1vNbAf9)68hNYFy#?F%iZt})LTnP*%_}W|-g&UZ_}e5_ITW zWygL4I%gPTbM)<5CjZaUy*2TgHOKW(8)la#QC#_AC~NyGgea2s({)2p_(Iz5@V1Uq zp={WLJGc10FND#c^#WYFLlzW435U6q~YGrM{r3x9!9}EJP zsh;N<(kKZ_JcB7#Yy+VPFOv(VmcgZy`3GvnsNSQ4yDR@Re|fY$E-{BLo&{dgdM@L1 zW!xKW;BPog0L#Z+hx!O}yF*^@4PT_*h1b;$LG?Ni7nIQXPRw?D)Chj=I^t>ilNUK3 zT`e{*em{0R%USVG4%TE;`q*(ae>j85W&LH@OHJ2gi%B_|N%FsTVqG9o{8ozXAgh|eyl?6}n97qk4x55A%qiY!G3YbOqETl<)!6)D0? zK?EiQlv|81PDds<14aa0)}lPZ42CfMgZ)G3O{n|2R1;Ws2py^jSAZ_WrE$YK!yN;_ z2AL)4J<0g{R;c(1jS|B}ff&o-EpQj@dy3$9{^}oW4s0=oFJ9!ZYoKPiR99^-Y{QZ{ z_H^GTQ>_VXw!CEUJSk!{SvcBy0B13q5_F;g$Dx@~e)wu?n7AiST>YC+rLOq&sXvZ8 ztFpF$g5%J*>%qW*fV9g@-X4zsLX(&rkc|-at~&}rReIs+egEXxPvA}adnpp4$iFfQCg89+BwiT0bA$x7T+b^BqoL>wx%f=fDyhB#148ONilwK{J}$ucAtJ{L$pQjfafx; zm(y3Yk;7-IwFP+>#g%-_dDyQU;w$B4@4+S$v$^;0l%J;O=Uu<0AnZfz!l9;{JKgkX=YwP!CAX5w9xS}wTuE`WE z(@AQV8NW@FXK{TY+fi`8#9#wu(;4z4h(QE!3fz+r8BDF?<^;P)!LId)>C9Z4E81f@ zwomjzFExfr6A;pu5nmdv=yosoKN&pLa0QD=rtCdN=(aO0wka#w0(#r&)?gOXcq$nn znaPaM3__T178_>Le(-wCr#DWcQSd9%J+G4$eJHYtt)&(8GN84>BN%OgjjSI&|`-JBU#bLhvCGy6YDW83o3 z*hDQXbAJ654pF7>hRAVsiJ^sOKDKnJ=e(4=%4$%aZw@`?d$kS%=#tWtm2>(Nk8N>% zuImQ_9H!HJJ@vlK+HFj+lIy20)*n>*jNw{f!}H zVzSA`r!e{_Exb#94O|13v$W&Tn zKC-S*R6tSz$*+=(XnP*2rrxvne=e*u44p5b51B4auD zxI9(#Q(PReWs6K2@GDi=q2DP@q|vu5wLwjnol4mL^N(g}mU?u#;lWa+efjqdDk>q{ z&XwEusaghtW66lBwY20rLJ55(*-sG&Un&ttd$#XvL`HSCL*p7ULuZFulze7#w;ui| zQzH2-RHVWCSa3qd7pcfg_8{)S1Tq*O%z7IP$|?}7drK_+M1RWd6Nlun_YkJU6%3P1 z+&h6;ZL#?;l722CS|r;q?m+w$+~wX3FaophYUVVhg(toJg*4tR)0b4;l=hK2=-+uNkjj}&jS|y2R1%QRrmNg`{b?KnX*!=?ohBdBa z#*I58Ibn3lc?(o7efh~26aF?f>;c#3OJaU&FBz55{2b;13NnbznLm~%W zCv^!;fjdq(yLDc@>eVPU5OA2SPC(WRd$vYa4u>q1jgylJ^w?r&{=56`;%xwwtF~cUXrO)8;(sZ6nh>8N zVddC=^dUTp87cECI|zWDM5V9RNC0#%L&)iKRq2mr3guOu!{QD}myw7uX0*<}?R^v{ zWlJfhIs-Pem&)LD#kzdEd|%@ftSS;C6B)_TVU*od$UX`ig)Ve~GQavz?buWyZ$G^9QsGnWWEC~;i0 zi3X|ag8%QWLJtoP5)HnR#&9icE#>jRyg5Q|O2w@U~ zb{!9}=UecEAe;lma#w>JZGWGx=XS2$XWLNHrOWX*)TwomgRoj`&{L2!Sg&2gW-LKI zirl4(3~BC>C`XBX|AEt-aGt`4+%iP8j=FTn?FQP`UIB$$KX!lkJ3`~YKRJ!h!wz=3 zHklN)&1MKh|L}@?{^?yu@~|?r{~FC(IPwDE5TGo$8(*FxC(mrAe4mw4rWLqDDF}Ue zLG+=)jTZ+3$)9(=B&>E?in+erCGnhr7(5VN#xkn5Zkw(!XXm-1CDV&;T*Yhf*aF}h zc|qHs;Zn!*QC8Q7Ak6+gY%NEzcVIvTcG+Z??t7uko-60q^@AVffqNc|K8F>jYZ$&} zqs~-?j_rXRSe4qTJ-)utNdRy9zTW@I2-<|{DW24ij*e7upE8GqI&^T}0u$nBII6oX z+Tcpg-ZcOdgE+``H^s#58XKN?Pg*DQdy~JW+6~9G~|;;LTV3Uu^4LndTw2*UBc0KL~R$- zu+=OKb@kPkWQW>DSW%z`}d!QUpI*1epdFG%<+UJ_5$a8@*^9rTaltk^F`l%wNL`e)kOoT%`v{=YTJp(NQ~>E_c?d_3NO4F!_m1VAfKmI9rE^OZ0BSKN$&D zkxOnPQs38;UbBHXRYYG#1WNmz@w@-}WIuaAtw;l7&e80>Tq=blUN95qS63(KXh`3n z^Xj5kcV6W1=4ghyB%7!UQsrwQ7k~)kH+FPL)1r$MY4;jO$3+@so+ZR;6cN5ilBC4+ z_iPy%DZyB$_{rd5a+YmXO;FoU20|k$wqX9{;!?!ZBZeY}?z~lg1W%DFuBPA?g%Lq+ z+AEfg(jwv$DB>#{m7sNNz|tv*C8-OTa*rK=clPbJ>|+K%I!5|2kKD{P0x(L-kx{23 zDj;-Q`u8&9#-ssIOSH8hRV;hWtIKiQBrfCraH0;wW?wM?j^uzNF#PiJ^3zFo@p4Dm zWbZxP_)|4f0NdXH(A=LzTItdlcXgRd_gOFN*q48JD|}B~xk~SpGS|AaEMMTK9}Jun zEJ{q!LEk=H47m1F=86i5BCmA3e5$Ohe6o=yFPGO3buu&6As1Ql6=-|=v9!E7wY~iX z*TCxeZi`h{BpF-&suG$72*`d24dEyX(8in-g}EX5lK;6wSwK<<#4ws#glGM5_h$vu zE|4W8BqV6+R)@2wBr{@<v^X;KXr z@TCxV%T?rc^F9xNtv=<{FUz*6vIKPIbg_82pTVeeV8Xv4gnF(G*2ctzPRVA;*&v8E zKE+UcFreJeod1&5v!-R4svC11w7=-vxB$&+Ppl&N?85^2Ssp=at@{EeXN;IcPYa3 zRdYr9fvx)eVTods7U|;!udJkw4Ts9}sDn*e+~&5>bX0~}CZ z50e5}-zRfHPu(yaLU8_Ny$n?13b;QQxO9CeOA5=)KWxM!_ok^$u`^?yXy~e~CTVHw zp?EM?dwQ`yVzX2(6-CaomKQ6%S~rxo3AC{8-VMnnh2*bG?hgtOPzy&-v|P0BkH{!i z6%2`bPFK9~TJX3yLB=9Z+M^sBB}D9dZ$FfLX=BJ`Ulnjfih?-J_9ycbYYVN;Ckw zwCtuik&ia{Jk-^7nXl*}j*tUdkwiegVTxYux`{^sD!??^^4-nwyDLlMP6MZLqvwb9 zRkm@p*gyb_TRuf)6#^c`b?qoNki^4EgEtBsdOLKv0oPaa*R8j)9Mc|3?qxS(jiiah z=1E`tp!G0)jxAs{%>0;u-)orWrV#)v(_$lAuVO;7&=0j@*kU=FGh(kdduPcBPb8Y~7y+AAO4k+Cv}-UeE+hKn$E8l(`q8wcCRZ z&Z9M|{z%-kSHPN{Lsl|288wSc9KE(cq@Y!)7NY=+`&O{*lON1AmuVKyaYEYXh1Z3F zgHz%9>hflpYH7kql!^aFI8zLR?&JP?zjwk8bJ}m-v`3OHX&D)HueGS%d(W;@;5?yc zF#7F%GUww5A%4e`Y3}oqO+~Af13Y;eS&P1x9z+kN4juszkE_& zY_Bcs*K7^`it}gs=Ii$@YRw^_mXpZJG5*|7Y#|{bq@<*p+HV4H-K2z~G%u96_^}dK z&r17yb`#w{mcbVA*{&jJ(cfbQP)%=)dW!5m7a_h$bL&2R@5Qp3I5M}o*FSi>m=9b_ z0(bRQGYK$%XySt?d76cKN{?^EmG)UL@q=2@5vU);ib5i38?WErzXAI8Q6q&;OU*!Q z`_78fCp;i|#SME=?KmC^G+eqTxnIxNjY#2!ykK~tr9}i3VV3Q|xPzbhI<|F!&JJfP zJvfaU+zU>Q&CU+C2Y{HYdhWUXq=b!&%VLlvApc5T0^NDA8QwZ$70U<$J;aVA=aG8{ z;QU<_Cc(t@$QX&udM8Ct^Uw!9lKy^`?>h}!Hb3k6=!Y`~J_1`ngGbVfkJ*~7F;Y(x z@t>)Y)j?|DGI0Uiaqk3dKE?&aBGp-X%>5D>c-suRSkJO9ZA00T+d1(N<6YZ<^* zi5`U1ABhVI(Ayjn+@NQTf5;o;-4adTSjEw_=9k&PDXu-+UMMs+r7 zV4zp}g>n1Rt2~R7-bPx>@j7usZE7nU(!-e-`abOo3YF;P?t*s+Td|oy%Igd&kDTKZ zMn1h?hW_R8IBw6Iud@ug&e>3(hrUpPqTA=S6n0+LBKG}tlMR?L)(ETo!!I@F)OBkJ zjMQ~QVx1+7AIb6>TuQndOZBMgI?L~jau>1xfJrgti&FekEJ^3;jjL~DH9DBC0s>- z-~elrG`1rrVgrE+{mzfr^6P=5;-;5=)4fC>0h^%& zQg25Us=Yrek-}TX1=Z;Hc#U2=DZywTZfS62y8EtDW|#P4abHcTc8(E$N+jZoEa;`U zwp8j3^%}ITb*LHv`!M0eN*z63(VSinf;|TknQ&f<-bZr5kbz_(JhFxhAT{pTeH~EP zMaLc8SCz}yUK9UR>&=^6j9f<5Y@S-TU+BtzeD)4R&b8dK+pMCbgbTER5x}W5m?@NI zSY=hQGvzTG3@W&j%1ka1sgO%R>&@&Lu}&wQz;=tVAX~xrV$?ce&ao+R(qVQFqXIHb zoJ9+~V|+~T&HvK-kvvd;J?4FjRT^-Hd-p)%K(oQ~1MN%8k<5JgpT)uQa=QUvasx>; zBtMN+YO9N_>%LfVHY*PcErT&UJACC%~YjG`H%yd@9G>&l<`)ycH~RJ z{+~8=I@RBBBIq?~@N3fsQ#Y=*F1@-zanskSD}2Hl5r}#Ov!6eDzFKeXGU9_0torx( z15kfYUf)KlqOk9RdT@lLCMyi{8dCH$J;tl+=zxw`g8N6LQ`a0%&4f;cv-e zSh3n-@*I4BeVyp3M&=+;*r_YI;OvOV)|!~%^XP`=4wzSrB5q8 zw)Ag$Z+Lj-rurNu(S%{tX13u`o%>V{Q2^rcDJyGayH3p%sQAk+J-+&=0cX4UVIf0i zDdWNKi73DV6C4*FWcwUDN&;;tQd080<=YQGCR14SC$Mw9ZitqBAoe+S!>_X?^PB>B zqsK&@|J5lfcq`fUf@9*Ov^&c4?O>=EWnYY!yuUAED!?|>fJy^E;6G?|HJ-AG`AcBp zh<1y~&Y;EVBJKh1;Cy6_E$zh0`*#L>W2(QDaazXmepN^4SAWD~V%9!S-QZ3QYDJD0 z7A}*W?=*rQL4ZIHc7>ZKn(X5r{+)Ea37Bi*dj!;TV=Xb`kDO0(Mb0GMjVyo{K+@^EJ&j2vg z;2NA1TspQFm+3`ub_X1v!$d;&p=WuNR1>lHmB!>42C}c2Mf@bt2t_+EiYsa6zUcN18J0A|T>0e-xbA8S_JxuOIb0YhOV+?Xz z-d~}5MtYija?uvujY@mNi=91Nm&1_;Df@A)c0_jFB`L@)at#H z%-`r${5E3lP`wfC(GG=$E)`f$rkpfg2R%yox|WhO-ET7GVk;$pWj&e8Fp%L-!*-Rin|IoI8esNn;i#6tXc-F_6$E|0@`dR$$n5GG zl9_N5g6x_44ySgtYY}wk+xYB!b};LTCPpQsn-pD@A6iL;Wu?i8tq+`V2~UeSSg$#BlTOH>#-NbC)0Q@4EthN`AKu)ei5^C4TP! z&dli6M=$}6=tq_)3IUH?lSTF$Pdzs>ebvR0YGQUF*_CoS}P(bqhPZdt2wR{3e3_Uj9 z#)ye#-=T(#fApKyKTuYh0i8wlqYLXEA#|jJWk-D66mwo;z(YN;SmqL|O&{J-V*C&D z^k0k8t7ZgXO|PtCmX3TqcC%tBv_}eUptVHw;eO8sgQXy|6 zYH(*)mZy|)^i@?CZf57?4QalfZ1o!hwPwD1dL@CtUq)S3X8+XpWTLDiaX%ZCwE< zDN~Zi^KOpqsn%3GrOhTG4u(JB2C;|K!g~P7iUv3$d79O>GjWxb{JOf|x6k8AhORmy zcl85Xhb2U!xgU(;+^6^D1t1B#^l++erzsci)$1rOr z4P7c5k~N(=xUsPYF}nH9CpKixwDHN|*3AkZ4D^;4j74wHL>n_@EKxOF z&n7%9QK4@nRu896g$txeY({tYZK3xeu`s>y!|5!E-TEM7&&3&Q85%qRpCkKcTKeGb zJtC$@w-S<)0AxfHP?gAnyX=c=l3eQ5OOtAAqM&<(mOomb6Hp0$e17{rzwe1NY2?ju z4CEA{v~C1R5cCd!%w7_1bYbLjwYmUS*CSkjqG!uUwq7CGJEjey5yE3qh|YFP=CPmt z_Gtt)0$)3e-#D}L026>A-91^5(K*&ng_BTUd4U=!-FpbTX+}!8_MQt9xuUbai{Cje|go-lfU z(e2qfWSUd6>D9%l{Y+)zmzfQPAz0DbbD3<|n`w!!~6=T5x|Ub*V$$ zNA;*hADl2*N1Bq87$)p>x1QgICk?ls$WN$oGrl+Nh{-Wo@-H)N42>XRWh8#P4R65= zi7b8FZKh>6s{hOXtbaUHRvBozz>etK0gItIl*@=iAIc;>YaRU<15)l!6Ky&B&FOt4 z`5CCQp0uWmxdEgPsSjy_rV}N4zc$6o*9;}?w_0yMT&q({tM{E3P0$9Ao={NBMcnNN zqVGm1eAPz{KJYFya`(0cJv^_x9OU+UMMN>qPPK@+h z4X-dOc>ddht*t;k9q9T}akoX-KVYRG!S`Y%1}4Cd>ZtNCrK`~!3~g7wjRcninsaO2ME`1NJ1R-OW%^N+U$>S^5) zyNDqo*_gMT;Y6>Jy6{J2P`Gz-00gzg5Z3lr?efOd^zX3>;9E#ArQ%_N&A!J%ROwPDQuP?5&18jU?yz75d!PPBAli z;AuIkwWzn)=kCEB63Lo-Ezud&8zeHtVI5CmXuO%nvp%b%hBIvDYPlJK-28O;@)(Oq#7q?|DkGD1!=nJUxT{RH zJcPMhF)#8N!?Uc#rsu&XlNT@EA|6F_zgf7!3?EAGVSRmFQ{YiLdBfGdu?ip@-MX_% zqEU9~EIK%or+|)Cyl&G{BwR1bi+RD`6?nQobiUo-k31G6e)8xM(Cnlv{fxOP{esl+ zysTPf`IHol5W_IS z$E~f;gXVW@wzVyXeN%g~pfotlO;-xTc`lbj^P0rbotsby#_VZb0csTCtG8T?r{TR= z!&6$a-z@b;7@;lZ-?QE&(E4S$>%&EFx}uJzyNc(RmAgO*`Ee`}?vAVdPv#tq#_wSC z4gr2d=6k2^ZK|dG&9<3Z`Jm9l$IjEwfQSc_bMo@AU}w=knDzB_fYSS;&dJ1potfr= z4Ib&Lk-ED&`)XpR3yW_C#nAPs?qyw#?XTh3yr=mIH_Y#M0F+;0io8cYXwsTumEPA8 zD>=B@6}352#cJLiopZV~^!==))O?%e*9z>~p{Ggg{o9X$7rt_ciU^z>O}3RYmIDwe ze|ZbUkjC>>K%@$n^yShuPRNU$dSD?bKcAN|`fN=)oT!Whz2d#b*DpI6zEm#+<d~*jaSP)5^t4ditU7S?a z_CoEWG-{Z7rr6+yA4}Rf^C@OaaHX$oh_n8s*S*slM3R^h7)-vWTqfIezVN1h zJKBR8;I)sdABt{%A+edQ#$EdQs!u?`iWn~p!O@l)Rt(#>B%t=>0{E+-6-~Arze3U7 z5DM^1pwD&!kgrns%tIURiG$%|%4ZaKVI)1VB2!^Vx2DBr2g8bwC70q=?x*~jejAi8 za=vMSROOin=v^df#jDN7yVWEwdYN1j2c9KO(dS)`kBA2O?0ip)R~Jt)+#QPCkD#m7 z^~IdS-j<_CKLEYW`Mifp8J;k7I3%d|TrC^Ul(DH!I5#|Ey=c?;;i0uP(iQ z_izsR@3ox+F2%P-LHg6^T!JHu%bte}{(A8OQBDQv%(`ZWLW<(`5)Gsuva&-vgfo#| zl8aU!N({j)9JJylOsn`S9$e0;W#t2QckL5FJCFUULfY(H%xXr?<;q=s7go_Bdo1gO zE8;^@>CexN4$!shHM~8kt5Orf`S{^f9yFvjP$>!mK8*i|M|6GYnm(Uno7ekBzZ@qK zpLc_KY>6L)C1S`x=!AC{jI|9^NuZnx?Zu*&SGaIrcB{m_(5|nZ(EMCM`+9-8CFzi+ zqj?hCd*77pfhyJ??0rhs9+D%zaJ}8WMPg+H4zLzjSHo|S`OeP0D?xh62hCUGRU%(h zg-KP5^6d)rOmToR(^0i+LKVT)h&+ZOl&m%EZut^3B`A|Be385%M+HfOiSYo#FOWzw z4pX;OYITgNmz*6>s}!rrQz`SC}MR^lZCWPqG;N) zKZFX(cUau}^Ao|CeefBCtj=vY$TiHAcZ=$dQ<4M^j>mMX&~6xay(2h%_6+z^lq0`p z?G2AF4WZob!|Gbt3l^vQAQJhYU|p%rPFc?*E_n^%lRa6|+EW_QfY42*s55GGDVa2r zkfJl_GDHEJJz%%^oK?C>8(g_od@EM}{_U^QpPcp+vl&t3Z6vCc*~(C_2g)6+627|e z8uKTXBk{=6&UkEq3jk>~-+Me_G6A)XO`b*RanR9&`VD@G-QC6a8$?g6J`FVeLw8w_M{Xns|{$1 zgNb1sksyVO?>c?o9WnMy;Hb|(VZMoLHH5C-i{EwPkXzyaQGpTtNX3{>MTB%2kBd3< z!Ec0qF9&-T*MUwzOg!2Wp*DoEXk)!sx^Ys1_SY~yLVQ_nLN#iGuTBsa{dxO~hp|I< z`+)DsOswhYBiI)+(kalcm$qS3`Y8m^^wq}=s>wM64hCG@XL+qWw1i=oHPZ`d^$o#@K_B$Z&r zB#8cS*7XyC^&yT%9*`wp&LE6`wGZv_xsVD0X6xK9V&^No!$hj%R&g$iwICxDl(b1W zAr;bpkejUs^@1Vui%m1RfG#@=!eQ=JEpWEVvmmUW&~45d^ub17$R!MyRwI5%ZbAVT z=VIna2ICkyE_KT8oEfRK%+g~JWI(y6JRu3X2PZgIeR1t4nTr#Cl-rP_ik9!G1 z9l-6$vQ|!_k}vHn8ZO>=#&1G?3ZiB*x>b()MfKf#i%r=h7q1InpUfz^Ucw2~ZQoYe=64r;H39l)omX z%Vs2g&JM6>8mCgOrg(Pe9u*?xr_>EH4di8hd|76}1sU%DsYhHjCLomkE^9gp;xE>N z{aN-J^q46%Vn#9z=E9Oo_A>P*xI-cK%83Psd75qZn&R z@WANazP88WIlyft@hzSFnj#Y*ZO%g}3xM#}i`8O&X9tjzJ>;}aVFtf7y~%mS2)%Fb zlQ(WdLhiqRvJc0T?H-wYM-|Di!wE7;5&a1iR&)%68kUk`{%L!<8OikqOcIN8vKuL9 zf4muW{>WDLyop}`MsRl=*+n`{XFmZtX-{+?!#OeoAKnRiBW8NbK>hPG-bc(Wye;YnEtaRu zNLE@<;-1kuS`fj$Z=-)WjP|~IDGv1bwJCQy1Ae={$CVUoE=O~GN{)u^hIo`2IC53$ z2Wzs!vy>CIh|m@VI{OxFTb!?zGfU7GU8OWxp>Q@3>P#vA<|~{hX9I9i;QMh?PrbV- zt-b+}J-10?+)*OM#*_XsuP;&ckZvEhOhZ4ZoDQOO z`P!-%6L%0(`{a9}!0pP0AXMaGIQ`MKdyjF_aZ9BmS>CzgpaX12V;pVD&2O(Y5dbK!e11Q=175_6q*s(Pm$#V+N`5N*QnJlI883p+D z@e`BxyXFqFZ|$_Ai-*(R|9*j;m`*KEa!D)^Tfw2R@i~M@=<7G|o!jjJ3=v>@S&$%C92!N}i?eEL?EN zUqH!Vw>f?v{cObG+}a!Mvyw+t^8Yp7nk1+s{dqrnv~Ka%>?zB=ssu>D;PXTfndP{X zseJAt%j&Vu?0NqR?HH;;^<~(LQ-D4!714}-M8P->BLNDne6;S?qqZ1RBh@kn;g?t# z=)eYSGM;7aH}UVr%&eFxx^ZYXS{3F~lZ=o)d8=y1ZWdT1lhYpI39+H+n86?4?cMvW zd-~qbbX8#?i@RvmquVbpoKsGQxGJ>83HOv*W76t2$MO5^>vuvju6lRid;qQ|!C%?p zB>;KD40+KbjoIx`5^sGC`owZB9x|Jb8%fm83X@kGGd~=OQ8f055F-SeaP1cRu%z-^ zEO4S_<_F$pf;yfFIR2cO6=+CJ$FDVZF8BO3h{PSHRcY-C}<~T16t_5x}0LSP&^+_|0V@tpkV~4}1Nt z4w@x4H(oEIZsC`9-b*g}z|VekZta7^j;&%JKsYeCbdYOT6ic63OHth)t_HNlb6!BN}T-7tzr84~vMzF&eCcmm9Dr(x_ z%c-17N8&_f+XY@-20B49kLNkmHqUv0Qo#4G^p{e3#0xQS9$g&=q&Mqt%Td#-^sDih z=?{`|V*hNkhjDUEHk5Sid}x$=O2LfFuS!Z3A%Tb$enF8S`;P_)eNY;|*4f!)Q0G>D zw-7P7jwp1#pHa=Y*ueom2Q<1xq{s6g^nGw>sLOIShXfu_Tt*BSd( z_L>#~jbyQD+pBIp?$tM+*!yHa;HVO{DMFvtH?qd zEx?Kxn@m+Bhz@X-M=br1RhW>ga>VYZQ~IdCu_cTD|(tcO@`Yglt zo`S{Wb|!ha41dNdV0UQJm(cO?9Tz5s%_b$_&0t=ANxE&GkTZD z`%o^#)-p-s7!Y@msuzg9UMAqx=xr4|ekNnJ>_44C5+!fHQXu^i#5Ld&&sAynP;OsnJ2CV&?0DV z4XdUNeB)elIEH$Do2|^3m3@nrb*#Z)_Yc?Y`~9If!b#J=sts>p!QIPU^3MuPsxPDX zQ-&*)`~OT;T!0Pwfnfq4U3FL$ws+7hO5End`e&(?S`|q07l9KnfF|gz86%ohSdTmp z56%|$5+7J4QDzZ;St;-=7R2r@#yT#5E%npepr!164IreQ`{qs#p029%4uoV#YxEM${L!jpH^6Or zv#f?{7ep#jG;8J#P4ud10d&@2jR#+1$pj9?<+``38rHs*0kD1y(6o?}sMxOr-Uzn@ zO7cg+*&vZE?N0G4GJeO~nl%##GKWLd6*c-J8hv}c>ZUzvbj!F8b%qvcw$EDzrTTDW z0iDzmThy$pVzQ+C6XL8aWOi2h9sZS{-*)*m2gCtte$#~sIz}5a2g4VtU4YPmUhdN7 z1l~SG1OAS<7`~RstE0f69%81?(g|B@6D}(%Ix{?C*_4T_cdW5&6u-g+bWO@5vKkGBnEQZF6YebF)t1kJ|tN z-(HXDC&5v*JxkC%8?*aS87fXBo{BOeQZ-$*kvb6-4@k}P;a$(dtmrV%dcS;?+x0~F zo8tDvHs)L($C)cI>1Sw`>f{pU!-pEPVQg7SZ1J>sj;vWTy?!YKjDmK0c0?LOPFZhL z!E8jgp@#*xiX;En#k_Kde1P^y+;zRYqMCyquYlwpLF4*NI(J}rf{=fGV!iREYq}ezu&vUS-Jr$+ zVa2U@3&1VL8jXYrlf6t-d+{p-Wi&(yf7abF`-8*>g3PZes|MdphST4t3cV7gIM6Cc zv3Q>h9#d~Q?0s@wAW)7y_Ma1ucO^lmt{*ne^O4miU8Utk33Jfvy>T9 z&r74HA@`yMdXuwADXuHWZ^`w+z(#0!P+IHoYA5Vum5$v6EOIOO#LJ>ZKNfWxkd_eP zS?sn_+7!=L=bW7tr}#EOwaZGEcM!FZs?Slw#M4L~%gP}J{3mpnk@7sX2Rh9+0CygH zT|DUdVQuZwn?|m8?1|?dK9BpCv~FtCs4FMp(}faNwx$M z5|YQpfyJL^piT)oCra4(7tcv#60X~Mew;t>z^#YjEGfMFmceg>Qw+`aQRf(c`Zm(o zt1s?CCtRHkrGzk*{t$Y)XPJF?4Zr@5$yaI*@=}%v7ePm>5pqfRhL6c!0g3rbfIC}S#UuoR=XYA{?~5WM6cy}; z+Q&CLsPsM2oET4B^QM%fKZ@x~MmWWaNBc*6-{~L~$F)lSg&ea3%bQYmME~)2RGHd z30Gn`xj+s3(Xm)9SNB%59j+e0xLnDJd-TkdUZORSGwAIc+ zGx18`-Q(A;`7fDQ+h~CEzs_dw4PY2YdWdDa!DE>9S@EB%(tfNVb*c-b9ryfR0^<^` zxGQ~?QKRK-uk07VMH12#={c{Fib?^0HIOs5nADo z9)uZ-KVz+$%VGPT>wJ^TFtBo&r|}tnG&LGRuJw?TMn_XQ7Px;^Qe>y3iPV2HMgA~l zcdbl_U$i$y+x=v$-)(rWhG(r!i%>J2-G{0yMqT_BpSWiu0 zrl;<2p@A8@wN2>JN5fbx;(S58bX=95z}!@|0hCvm4@ZK$S;pBo1jLR!Pk7*qoOh}b z^!g(r>9s$QN?yZeL~8N(V%xWFM+GY9IwM*W-9-CZ5$C<+@-NuQsqExGfDYeXK7u#c z?!bj$Hp+jWynf!^mTto{pU6tR9|0jK!jUIpVVP?tbf>c#;756Ac7fLcz%x1{3h>z)X9Nv z@%!@&w0p6=iO3CJz3utRkP89 z6@QDK${GlUTCl7mmaLkLWuLG7=g5jHWAk(=UXQZ90vsiRC$j$V4*wF85}$L_^v_pNLFo8PgZF;_X3Pg3C|!tkO>_nzu#87wUHr9~Kj*T@CgLKBGb*eXicatA>}>MN@*3SAz-H z0kD80jD^SM&SiKDNV=_nn8OdWiJ?6fl~BFocM_GRoixCaYuf5*(ZQE{@wY+q!gbCk zblqiVhJOxv`tR{LmyL}Lz4xqzJgfHB$VJdQ32WYCSJ|Xk*?+k#T6K%5pf>+o_I10D zl5#QjJ09e825l*D+=vT`%$=W_P|DdF@@thwk#nhkpf#_KK?x)w?v&wY*Y-lcXi-Cr zU!V@0L!2Tk;W>8kV@cFc^EI~{-`>z|HU{~896^^nQ+Ea})M9p_q@xZG) z6A`FEMAjJQZBGoaO#d3>%zrh*XKL8wM|&S2R-k1dTz&qJFsbx8!tE}0a%}bgm*Nrg z=B$thJjy>OL+P`w14A)sZcnm?@}QzsWPpV`yd8mmKmP8OuCL5Rp%Z^tV?V84Y$+;q zZ^3N8{ZnygcLYa#DIb-&^*tOp9OdP0qjrc6Lf59CWSu3`{%`hq=d7i3>Dw5#H-&Zm zrFB0I3fl`gqS?o3tmNzTwVoHNCAN4rjf{+r_%F-a9zGj;=3|>3ZG70Cf2Mb~bd)1y zcev;=`Z_@NRi==FsC<(4o8E!`7}nPaAM`uK@v-$VR*lqU##0o!`7^A__ql(d!iBWR zxWwk-A15eQRUX2P%B_hYH+guhrRf#2oWvXIJYIG&q0~iXJBPjDglF#ws$2f}eBVe?6ZAP;G?fYH3{+|lzv`c@ z*+5wQqr>#OxyZqbpwcWnLR_uVraTLxk(|#%X9%^XdX;M~7ciD|RS)Y?OIHZxXCgLy zY&*yN(2oI17T0~cN;quc|6ii^(NRP?c23aBsUL$(htZ;}HTOY~R>jqrT0f?r+=kaX zIn|hC3${|V_ai!dcN{&lvsiv1TbTnXJ;77ttZJg6^K5d8N-oO+qvf)0*#CK-o}=GM zpXi7V95RW)+=zJ^&a7K3VyfIbk5uzEX*-Ehj3L0QH~yr%ylZZHQH+XymYDa)_^%FzzhUIyHo0tlru=OlW zjIhLL&qN*Hl|bvE{~&`D#Df(=jMI$VjT$y@27kp{2E|+`vC^bYDQngq73gF#S%} z;)L=gPxhZoPUP8pZJrAAubv>Sv7Q{GlFG=(FqM`JUft^O*JVH*1V;mQ`pn*ZdHWwn zrlG=ohdCp<`D0CDLJ4UE(PengE zt{3l*G@4(`q9^rlC04VU#-@-d5&GzaC|)Z$x@D2hN`{1>nkfCuF+WIVgxUhvH;%lZ z+BYxA8h@0d6V4arR`o`BRegV>$lQjEbUi!D?XTYU82m#;sqkFmL2H0swA!D~hQj=R zq^7#$kC7)h=RsqpOnKQev{H}vRgL&kl~tRQxK!-e7t&*tI%lG`KY(W}G*b7)>di79beyO%PmF{BCopfA#`G;e0S`*30IsBK9L6Q>etGMd^9SG;S^k6@>rL!N^^d966z zOhvl3xua}e?Q2h>lV!n|Zo!w0OXCj$n~7yvW! zl7+9@Wa{NYkIa6riT*I4ji5`7nlTjg=ifwF@P!h_+6nwyag(JF_(?ij8z6mq6vgT1 z$IIBiF_Pg`^#I zXy(g+Zi#EeFMvZ;O?)~t{a4Z{t?a4~x_Qq{jd;JnRCegVn>MKLHU)BpiGPOVBWF)c z4rd0GH+*PF6%Jrs8mWKwp)cddIW4E57clmVL9>v@bWc}m%*}yg?7>K^n9F>jTKcGj z?Y@B@axH@J`0^BY+E>~%+0@reMjk_EHBiEg=T9w5d-vrn@i|j|bKypSnbL0pb`B1? z<+(%Q|HKoyKYV_0%|UD{$FaQCMu#%(?MJxZ_etL`|5T82N54fS_0kAhhFbj5PmM$@ zUmf;j+T}g;o}7i--~;P@q^@LGMRf~Zg3RTB58d5;yFXjhZ~1Fplt$9CG^=B|?P?W> zpNH`@Klv3!_FC?&bjb4X@Pr-$lOfs|XPkV@uWS-Ct zxO-X)gKz>i0iYvx=6(9Gb7#3qD(}M|ZUROKO_?s@0uU<~yt7AZF0ro z4%gB8&q>L^uK$Zou@(%9&J)gUx;xQC9Tm0tJjM1#pZBF7eL?9ylH(_Jc#eR zSq3#n<{AE}FnC<}sD}~cp_#`4dhEpg&|`h^eOJXs7}MXJ)(1_Bx+Z9QbcdV zyB10nm}YolWbrvQ7d5}%SKUOc$Jk@xVG1(K54<0)GSKmlqp`-_aLvbx^~1u-_0ER$u>~^2a9``EKgxPQ@@hQ{J41KQT<~ zmNPnn!0iXthWvhKZtC##I{foXWXn7YC;xQ%A1HD;~2ib>zUWL%wPM2 z%Tk2VD5$6`&!qo77a{BV_|UwtoF_g}mLUq*4=lFhbFr9WdIg+k1%QwH?Z5pwAbfn* zpU^K?jcGnjqvKOFhOL?hD<-X9$OgYrJjG*jL4@!`pouV`PyqG6|wWF6&tm@3h$r`w>4T^{$1Hc2iI@4(1v z2_~&yA?x=e74l5unUE`-{7eA@2i_H-LiIN?tQ1=c5_&DUHsm~+;Z3YAM=qWoC*6~6 zSd6WaN52amfMODoAbUwN8E|Dhk|%AqaG*FhuE_f zR$rBs}on^1>v^2@g%>0&~v4_vO2aR6IFm-6241o@OP znz9#|M<9g;;5Lbd_V zb^lq)fz*foY`3;3YBMN0Q{c5=`;9jt{Y1%6C?pu0=^A|TFml?s0l1a;R}l%aKeHO0 zgd>CnWprE2GGd}^EmpSe#C>rUzWs06{5X`|*ksAtgqI2$LQ*CrVOXWSs#J69q{t7X zoThYoN@f>9TxO4G%RQxIY`w^^4H~OI4d59(a-vG=_yHC13mBXzzE|eQGfGjdX(?x) z>{b)$>gkmcbpCzhiY92a^V|R3xB|7E*_6$zc4W6?0@zW=e%a-yH|$l&nTXqRUnjPj zlu4l%KMWx8yo@~ufc+J#GDeImF^Z~MJG)s0X&V|w89>72AqBe4aF=yb zC3^IJcO=-AAmi#eFQXEKQBF^YU8th+>++UM9NWJLC1%-{7UYW8b-?Kh}-%*P9!Qr5&#G#5?cM4+!9MjE}p+ z?8hS>_x6kx{W!k-rY*HBCe7|X|0yn_@9TeD?Lw*gkzVp<$V~`i!hJ-stG^@g_-HuE-t@`aZKJFL1d7T5K*cMZNrX)adKwXby(g?gm z%b+EC_N>PI$_Z!et(^F%hym(6K38&8zeDm-R5{Q7c!%sn85>$RXrh9p?#`7!l*K6& z5b$W0e|N?oV0Jsz+)#t|^jFGjQkGQJGG)5fSP<UBP+gBMxb`%m`tY(+;xs}BNZjwe>}6y2gY<*H!+P>PqiG~vtCQNgjm> z3?X6fBx04>qTBti3IM4i@h`*b{45CT|Kg!aO?qW|*rF+UrijQF&#mC6oxypFg=6>S5L4 zB|+Op$bo;1S&$Uwq}y8|dT5Ny;IELH?uA#dq_pM#vhH#kgc+H^5^29W(!XKY6R+$) z3OJMcNsfznA@qJj`HLy_NUYT1_Xj?U?${vn9S@^O!5#mL4!7t2EG|e4_}NsoAYjqW z9Z5ks$Kdd3+Sp;HS6%!D9b8XVURhw!o;7?E{_kLcC3ECDU@(atjRyFmdqA0qbkH}8vKFdJRi$}Hj}5S0baV;S z7jhI33|auU?>kfdl#bdTQLmLukIg$OyQxme(V38~d1LN(TdMdy%a{#^FdcY$BAdk) zu0=HsQuL$}Ru8*b6A=CkXZ<#ZU-7qPPHH1{0Vb;EJn(fV*r0sF0HVT3m~Pi2)jN{o zo7o1I=K9c+8hJz5Ga~7~TA}#elAZd)CT^!|wLbDXyUG9QeccqFivdz9uPzAeV97lt%rLAK(R_BH%M+%=7+}ygkmc5)p#4R; zq!$f5adi32o}ur_ntf56Ig#WV5Ym1|U*0B{mGE+TN}VzaBI$jSpy0;6=|X1uH=M>Z zI8(bQz)1&zC>S;I-hRvS18>wTxQO0aakU^(pDb*>)r&80d=Vo!U)9SmeT2 zhgM9Y2p@^9$4j{?4XN*ieEJ?xtcx&}-Mw_9$!7v{;v=H5E!*Eu6j;c{n35hD(acdU z5kHi3c*37x+gX$b|7~I21Y7e}Yx2xD2(3ivO*NBolZduW{MNA@vkn^OA+NwE-v6*X zoR{oLH8#3F?Xm!fSKRw69zYAfx)#Bt2%^uy#Tze1cuIA%Q3=-O|0P4%PJ^QYL-EB690dhZU<2hfZKA-w`&(kr;7VvjL5&D{K-66j zLH`l-X*A4lC-^z<6L#)HdLrpPcV-!5(s}zpe;J?l)EEPkA`~AS0HOA5A1R-_u>e)} z-lYkm*YD?xP&o@QPSBxQEc~ki)bB6}cQh+m+&W-KBKBHltIk}3F>ZwZSLGGw%vH1F zP$-GU5==-+8Kb=nReu)C=7#4;1Fr-r!H}u=?GMLuCs7a9%2mC0RIAkAkkk9tXMM?j z#f7bjyfg5!Ts{J`Isw}3j|Kveul@XtacLY#CPA1W!&2*6XX$tmP_+E6lT$*A^&AQ# z$3q`^H_4|VxyvTd+Af!@i?CR^kT;h-HkEZ^BK*~0A;o{8SMeqU{hQ0m^ai@;!8Bo1 zIp(yRL4FjG6bl5XJ7|6fYUT)8uu}0EMZV*T^%#?)ZeIKl^PZ98J@O}cf>mcHI*oV$ zJ@{uF(9P|c=ZAodjbHYw$8-I4+_?k$3IdJapLb&W^=vmzBt_RkxlKcwUviqg4Ld>F zV?i7T9}{be5fAW~lO_7jrMTKjT4yG0ard)gRc(CHEpuF7Lxc&hS0frccnBeF+0|Yeka=H*~$aTI+`?o<9P5DN|3bYyrfDtHi|kDx!$A+?4Oi% zqbAVil171$1K3x`(9v3t+Og^TyTaoh1v{6$<4q&^)g50vWINh1;=I$&escNiKtUS1 zcln?l_v90NAgiwTk!j4YTstP>p|RJ}P=PL#jJ%5kyM*34JLP9((Z1c)4*hhctt(q0 zrk{$m)C-PH8E3taZ>~#p`X=rQ94%yVtXZkUAU6^pt-G){2A%br+X_*XJO)54K$rr? zsO%kNN;x0##e&MtP@3nWIzI)gwF5W7Lj^}@Em>3cVQVbtlUNyJp4^6#Ydr6dXQ=gG z@CFK$pf$_00RQ`sh9YV`?yfC9@v-81%kB(1A0oplK`*-w;BKr`F-0KX!mV8p|Niiq znlT|10RI|Kiomb(r*x+}sSL228N>4H6enPW!C26o4pP+3Qe0u<&ZNO>5C~`!YN-4Y z=g3UMY{g>aznYZ%HMC!aM$n1T{A|od)&;`Zh=USeeRZE0 zM3CS`XzQikCoQ7e-XGTOC~N|nAdp2;U^7hBcb@<2g5c^%mcEC0mI(+1`vJe}zGD%A z&g{(dv=_aHxYHtnG1_xpp}rgs1$`AI6NDRrbr2>;EJZL93nFYOPTJ2gdWh-c!C2Ud z-gDLuV3B@MR?uzxrmGg-tp;Pi{kx+L;&haFGGFpijVWde9|fLCd-sn82C!uWKl9i^ zlZT$=`gHOYm2xGS_S5pU-Usl%kmp+ZhO0NoBj|5!J^P<+l1-5ieyWU*G-7gm%YDS71+6Iw^0 z%h1f-Ybi?5Fvm@fL|@T@n${Z-x?zeB{B9HYFWuU6K0aN{DNjDi)dk8jqI~&Zf&6gW zG84am6;#>KlA+th)HK+;>Z8b7Mcy`6PqU-m|CEn!Ki8I$plMCiV>T~+ctX<&Y6Fl? zCLllpK-aVFoDd+KyuRFTK>ewxu^j4s{ky`)t@7ITAy9=Ryc^I)^sl=E&8phYY~6zZ z_kDL7!S|tp?c!q_*3~QSf6>QDTWi4`A|&7-khwj{pQ&};B?E<$5(uE>Ewowg>j&&) zizm&Yr|}v_14Z*&a3d5KPc)E6q-IF@5OsC} zqj|_f{4J}awzEDE*cW+^tK6LJC+yW^6U}(J-@>+c-3_L1lVM+zZ8Dhy-Q}dAbBjz! z^~9Mj9y$6jXPHOVwvoJ5MA3#Ud{Yeo`Xy$vH81azS3lwI30EC_x9i)t?oaq>tFXs` zdV^%b74KCiFs*)cHEmtr2Zh>WR7(HU)OoIj@~GoZCK3lU%J(R?$wl^xwd0$v`@}$y zgL!}N&NtU7xR1^3=;Ot0Bky#q)K1Nn>&KXfsD)^n>3vB&9V+m za;+vS?CQ&kU#;56ko|V8~as>XQXKG7iSluyxxeXvXhrc zBMj#mRIa3V_mAy7eEn1e2Dp2o2f#@9B*^8~qcv1NQ$J{yK?xFH2MeeBey~;JZ=*m1 zqCBb`=tLAwSgteWN7vO@`Nzx>?_SP%Rc7sJwmL{`;5?SI4l`fh8E=pRN;t^rg`jX6 zuTgCN&Goi&vCfYbS>XYeB5S<4&jvHqOYgvArk!g5%KX)gx_&UjR5;~J+VJGTwrKa8 zS88J~9}AV6oUPoUwVNPQ4Ve->ZY@-y(av;r#j>yUWm?V4&d$+S?4i3AMo)=fJ zje;Jz1e*L~ZMN4cVCrChvWEgkja7mSU1A~5{n>x77T!C5s?o+we->l@_CkDR_2h?# zwibyZZ^h->;M`ChJcR6dWUp`sKMzm0IU^JeNWcG5MwO*;vgQcgQowKf_WlG@*Qf@W zCPykh1|YI68yRjatuR)ZC!X8&QC#dB_|8-JIR)0lPMNFy5MZ`0C@jSA*dFZ#p#Y?K z5ro_uP}cYYU*cX+;=+bNM(Jp7jMv@_%SPz03vmh~-V&>SDX#VSnOZHl=Rs~fezpxJ zg#5wR9zazF535zq&f8({F?T~Pwrwn=EtryX*OmGip&J%z{GPiOXpBz|Rh&a>@{+LMq) z@mz!MrLQF4Kl&Zles)KBIZw`qKV>xaXUiaJ1~>2Ksv+~1;sk5(S$iG0C4)Z`IXoE< z1zM0t2!LQ`-}4Y>M_)18cTXxC*e!p3{(!p7oLhtq(!j4x`SNfn;^Y;6_lhJ&_gmgK zFQ|*{lXoC_i?+;bmJIHksnzwts!3M)!CC|XV{Aa{5g!$`dy(DUcAQ~DZG-bRvCoa! ztwMZFKuJ*-lH32{wK16Y)U!An27j++61K7>Q)e1YE0q87U5JHJD%1#T+HT>Bx!DY= z{CQZYn5XXZBdF#f@tQ_{%o+Y4al(LoK`ZwIubK5ieEx2`{DArW<-pFFL_1oVI#{Lz z>ZE={Tj|X3ucVgn<|d;-9?KUzt5ZL*2duBkbj6Guvyx%YS*k4M6tUa9gW2`+inDe0 z2|siukG+_EqK0CH>ts|+!@XVuqE;N&EbUx*c_G6Po-io|E#fwFskKN;#DZ_x{HMx3 z{!sH7n|2!sZOqsZ$kD;|NX)d@)&6LNB=8_~!h*br?5lNP>aHb9`2iKWYyy)^Hj^7( zHn?T#C~7c9=*`F2*oS^Hd4Jc&%oe%`qU9eGh?)P6T=Na&x;1{t+VxD81EXCAe*JRb zAVZh52tczzXD5nQwYRr}&U_gan>OXe&rp&DY*0g6JqEYayIqeszPPtf7Vuw(X=Aon zBOGMRJQcCwM#csysh@7&^(xUJ!AIV>sSu_ew<4Qn2us{d=cz|O+HNakR;nc8QDg)W4*>K5 zAbn^btlOU7K4`y%;4?mhj?HK^MsKXET*lj*HJ@X_6ZBQwt2DM~B8VdGrG+i)Xh70J z9QNHYbuB&vrB!PRM&HX)yr_W`MSSS_pEWkkXglqqenl7X?tlnj+Yrg-71)-f_z8;E zEucU8>vgAEo(D2$49L2v9FM4N+Fcd6r(V1suyfpnW^Wsz>~;M9JwLR;^@r>x@j4ZK zm86YQC~`n-6U(mS`28L42bh3;gWnyxd0-yq*y%>|GiN! z|1!wbKuHd=mw9wBdu7gc^pTCLK-jEGY2GKv;hjq-wUZ@JLu$BCkqB%SBKK1yo7L?i zi3YA;{Py^ZDy0XqwtwbyAujjmH)u4DliPj+&yDTHEx_B5X3xrRZuD-crzaa>NR!87 zN$F~d%4NcW^fg-X*3nDIpUeZHlu^V)b33t+9H_CIP}YmVQiQ7pgdp!4KJLs#&**`q zxL-0;Cv!a!YrZcmC$Yvhjk$tqU6ZhPOB)ilZ3lc0yLsqpUKx8b=Y_?_a*a*w;n5Zo z8Q^x!t*(v!(Q=|R$8}Xcmc`d ztinMN?*D|Y(>|spD%fo@6h6LmrqK;<6s3II5?_&2Z9A~{oigU?nG!D5LjKFi?M!YZ zS#^Oxpb0{%(q${)wvBZ6oZX0?{SL_cnO3WPQu=^6SCYjri+x=$UdSM+YWL;%Pq7c< z96B#E`|Q~oEoDa5Yv)2?WcUp{>ejur8&N+KHK;ArE{5(mbPdy(Or9Uy@8OX{x!6&P zub&9DnL;hE%mTAZq}#yb_~PnfvE}I@Icr_--RH!<==5TYgEEZ2f;o!@r;XF5qOHCM z&4ibH?adaF`gL@JeNEh-oPRbH(lxi!vB+fXyJG?=xNvf;N;IW(@=#moulP>2K+z?WF4SZBNnH;W(;D`8wF!pRmD1 zr1ShBS6c}Ld7fh$RZtN@v%J+T#1-*5R*we}qqF~AHt|@?S8;6qFEDh8O5HOHK7tQF zgc4ZU{$4c1h3lZ$I5odqslG?*po&n(&d7Pv38&fd(-{FMDNh3oa1r29qjDms3A>kH ze^SUKXmI~1Xzhsm8(n1Lk=yS6A!7U-3LttJ*Gt* zlS|@kiLu7jVr$w=1`^v{6OYxZ51(w$ZXet#t*gHvN}gf=sd)5gDoN?}df5jVUm0Ax z|3ts>pfyy@XxJWPkCv3G2YL54o;ypU*-;B@eH~Mq<9!47juxTve;gwJa!AMVC#Xi) z*FM_~ch)o-fA$eoCip!9r^ACD0WKlPa%e09BKIv7jCj!R^(6TRa{_A)PheI zicJzK5aXfHk6O3`qj9c-e<8)6TsC#{l|9zgZ?@mwVgyj5@-Q6#wK^k7+DuMY$DuHx z@}PY>V;$7f!4O=FnhQk+8Rgg~stiJ$5T)W0lNr4ge|<5xNA)q7w$y#?!Nt>5DGmg) zr>MegV{uZOVQjXVKI$@IP=pXt@^91q;cnL>o11L`H@CP!_`@iyAOF;=(~|(mQ@-8i zgvlKazo;Oru}bR8*y^|v_r2&-+G3cwyKH-AkvMNv9ZW6#Os4%1Q8C{?kNkH%<>YrH zRC!T>UDNN=aAy8=I|0qx1GRN-rUmaq`L8Ve*{W&Tt2w&X8}l8bq+MG#S_$MeOF9`b zKO7tL^}dAqUOR4UA{7H)++L+ec2;NKUG{be&%C>f=@|C9dGFW5g!^?OR*(bz-JB49 zh)t7S2eI`u`NSh1z19O7!@xb;VRt5)nSFlbWHIJHfBX0vrmX?k&FztP-X74zo<D z-rJ)_1Rwx2Dzfyn>g<^^KR0?g|Q&-Z=!!M;=S!P2o3}S zrSAS7x{ktnvZv883NCpDXqZ$ny(Dpp{_v!*ky|gorLvK|SkC8V(qz-pHNbgKAJW`k zqspeGWl)-}e-fKtTK-bP1dUT3%k2(y{GmA{FIhOZPkj-b$tn`@5BlBTn{|i>@Z@5l zzk6AbGjnwFWzqpHR!hR<;JSE;V%8?KXU%b&|DfgFqyq?jeIpfEy~moWT2NbCJAdJk z3Gf?v?JwYuRs!W@dXe(c<+-xnrhx0XYkRwLcf3pnuPgg?3#j(O?+WgD7n92jl$(_H zbA@3POu;Nxd7H!DX+<^_WTa?8ZhoxYqYP5o6XhV_^T>P)R_}l14HB>=EAVW}o$H_n zz;|8%G#A~UHP4fA^r``)U+B`>gjIuYWa#p>$$2^U-@&|bxo$ve4Y1c1$n_Ipxg}^4 z6?j^419(5<ZfExLVLfg0>;@p2k(G}aF}`^7{G zNY;D*+-9z^zr7^~KJBi8*<2FZ1@4X+!qkHKm&A`n4_fKPX7pSE7QTq3{ zL?8RzUMgM#zVtVU^LGBHWW5Kz229tN{|@XbUy!t8E@y#@WrgtLh%Gswq!?TLQ|IMUm6 z!ezB7*ht@mj_2-wbN$1|j=&oCnp@KQ&kiCkj3^eQ#1_h3dBbW}5Y28&;UF6Rz7T;8 zSLC%Pmbt1Ozjt=dAmsxtt7VU*Po;g#VzV7*-qb+f`xM-L_P>Rnp03YoL?0}t(RiXy z8^fZbg~>@@U%f&v%a{NzYK_$@{XBo6#zqFfV4Mj2|LqRFLOl73uUagO9<+}oyK-G@N*!eYe!7va zRN($sZEeG-w5MwC&192Z-|zwKQAMS~ahsUWs(&;$GtM`DPan}5>jH9xO&7$bMwwo7 z%z|4#A{@69G6599;W>iEe*B9mqt<;=5mX=Uv(p%y*-NiSK#!0PUW0S5M*>0mhspLS zXT*siZZnmNAXtEKIet-6-r&dm%5BU(x($g|waFV^sWb>)Zu8xio+2Y3VBs}xdgTJO zFzpa;mm>oAc(97qx5z7hm1B@F_y%PG__;9HC7%-I49=%!Xawv(cO_E`4#yu!go68I z-`2GK8-m^B$@j&bWf8Rp+|2b^vfcgpez7-aOs+`gy_1J)Jqa>DgR$vx910!U$uXXFVjuYj zg(xywo6#C`zQefJY+ZY9D1%DJNwN$(V~D-n?Fz?B2Q>sPeB}}=X$@?{oNE#{)5dOV zAQXKNc0)q?yLs7y(WK$0&ECL)ygXY4f_nFT`kv8h4kxJPr| z`O4m%T)zgxlt%%)9;2VZmltGc6Z(5RE>Z|Rt{3_SZjTRSmX^{5oTSk==QYGZOI-92 z-Zw5+@JG}W9vIteOT4X46K&{axkh%CyFc(5(3(qF>2N9@q-A-|n3aeaN8Q^_0+dZzJ@Cum>*fDx0Tekn0d7*xBae@-eITCk z>0(r&2&{Zkpw#oQeZO&z`O?PbVRTXN)M)<5hOT%Z=4W^z1~>{Ua%EX_4=P=6BZWxK zPhb6#3JlHNw6UM6w`n|4lAo=6oZVPuaeJ>!dsAgJB@wvL>?W5z?c$izaj?*AC#*a* zwgSY9;9y>G6pmsRx~*=xSOD>r^Nmh+2booV;=Mi_pc?Z6gafgL;i|3)v%{9x34=~-7 zx!+FL7$@*8wdhF&q9N8}N>&FR_m>7K@KW{vYF>=RKI~Z8u}j+Y2+v}d$Ku|0guU{L+_V`-2?{J*lrqElkMoI`hE$0-;W?$egf~j+kmP zu`zls)C%_DT>Fm?`n6X7YzC(awjy6BO|2bWOP|RC01xjnYNS%9RvcF6DDYHL3HoNx z1BKKiQazM3C4a;mO_On9&zJ_oTOsvHC}EAe_8I5T>u&=(=P(21Hv;0BI9X9a7(FlW zcQ%H!7XRx#qPkP|sd*uqC7a#Fc^PEQG_ znwMq|&fzM*CH>#O93YaL>KM!Z2zo!YMu!AirxTdUd!>~F%F37vTYcC>SRWbBRx({s zy4>fH9re&p3d19WQ&i&d@>!`A`Od}QjKI6>?`xP5HlUukwsf-D!#OHv9t@C||6tJh z^D(1Zo-y_ThP8p1v0}hn+1`buh-UYw7_X2)-u__E05&n&@2oCFZ|C}2e0L`;Gw)Z{ z7Zgo}09n~>QBt5H=w8#_PE0dxF(l2|mWU*W&OIM3%ftjt-|5u89R?cK@ri?Z-_-rPBA^RA>LqPk1l<9 zo@=ff0Zc#H#-@l;0x#E9=cDwHD$&}1pJ88|k{#DOBv`;JPwx2Zzm%ryl7ExGsI*J5(+JN_p$MJg+dc-gyA;|EtKXGJgt=`0rg z{q*o?iQxaf330&vO5MC#mw2O&uG9X7B}BC9?sig~f^p)oshEkQc)ZswzQBAPu8z+} zLSOhHD|M2zoHf6xIiM%mLBvr_oYP!n#wNrSuF#Ngpnw>1e;1a_JB3%oG3w?G$-Ip4 zP7zCup}4vnJ*K2ftddIBihmSmjvwWsL%MCOaGb-A_iIwn&a$w8v!!x<@&|-$+c8Tx z?%P*PI!<9PLh<+GFF{(#HABxyLMoJ*Lt*y>Q{|c zV;+i8;*shWH0DULq6AsDdHdcsN&L4K4lLa}Y`?e`GMEq|XhG`K=QsGzch3 zZn8T+ct7Vq_~Wsz5cl%;{}y@q=}+PJ>g2C}Hs1fdn%Q+!7li+0yj5IF5PH1+$!6|- z(mzbN$_J&?ul4iRAc$S3lf4JE$9R$R;~PKN#GNpDO(o9fY%#=3Nv?S|N2936%(h2l zfo{RXp-7u2rd8iyET%~4*nGKi*5S8pNg2VJ^NEfNpYCGmL8;}578s#}ofdOi@N#~H zt^s4W7C*Sj)}^6~_WX88A^C~DFKr@ix_y`ZNNo{mvZQ53G~Tywq#eYvN9bb>ayVX{;GZ;{YUA_WJ1ObcP&=EV+12hr#$s~)lGy`CbwjY;ZTWv zB$8SIV)#A8S2EWF%<%ECM5TE`&Nrs^o+b|!X?u{3S_DB}9rf&3?f`}~%ayZ6f=A8c zQ6k#||FMmwCpDB)L&JHLv22`&)P0t$r0X>l46={g*UwNba!Ip`Lsv|)1N#4a=e%dB z>9Z4f70(l+CJY0j@Qa(wYl{6Fl^DBQ9~(8lq@Vg)ChzDS)wMWE`KL<;eLgO`*sOo9 zPO(rmz1kSZ|M{nG_8CpY;1%>OyPNt`P~KoW{v;o>rJ&hmg!v9n(6!f+?`pf?8M{uY zDl39Xznd4tRrt9NNi@PjBP*gWE?v60s*IT-nLtwWWq&r2s3f_0(#%OFiWcirnU^mu zCHx*P;eJo(23}ffPjWTi(?P!U zbdR%Y#AN5-=vnZ71OFp-Oi#sygcQ+(%dDP-`tD?uf8tr13S00}GyJ_AgWo^e3HnU6 zjcMf+Bzp0?zo5DJ>2#Z-NfW)XVTFiAK#-Pum5B&EWc>E`N|Q8-r*EXLwo*V1$pjc| z#(D<*fUVR4hD`W%^HXU2z;on#$o&~Dpb;n&vCuumT?a1E3VdmL#}B5WkL|f%uWVhfAV2qwaiWA@{J0aZYc(XhzZtX_w@&lgV;=kO zhKPk>z3<^vuBYy!WSBU8Lgy!2)*~h>S4f~gS4={joML}t>o&e$Yl_fofpJ(INE&kp95VG;-Yg$-$6BsU+D!|0ji*@1O5w+J&_s8^R=U;?SyZ zIL$#{r$AMvo-hj}19hX3<|G2#q7bCEWeA5zA|bRf(IES&?7-q%peQKka3wEG=f1tA z+?jwT0_s9e@bw-@26(7|r$2ucyRkt+CgCptf}?pu?vcUwa=UNX+09XN`|56agDGyZ zqDT->DDgpP5}EI+H>rl$oq+e-)0?`r5i|r-WwJv5O<>FGM<$=Xf)9}bU-IOixp@FF zfuN-fpmOh>sD0fP_znY%#d|v~f{26sFBA&cLyc^oJb(4+9R`_z13o~t7TlyBqQ>J! zE3lFp#$*=`O_;Hy(6MI?T!!?USJs&q#w#F@MaMn1?^~zkspU6%nbB??d2;Gx z*N^mrnBh(~sezW-1TJky27rBnhJ<*_rcyyKetj_x2mV-SA5!Y*uB`s-ieByab7Mp! zGDtT8M{i&_jpLxVvKs{%G028KxyLm1aX*G0_m!t;=u=$HoHBkeR(|PeOd*Le55=s?`tCYZo-F!Wwtm<-pe-aB%+OAI`N9A%an( z0TX8(fFC{KA&}1+n?wXZT_{SSJ})-lsJMUz?*6Z4q92GJLOH6Nk!MYV8o7?Wlt20S z|3Z2;pr#uogX_|MZU7`ZV!Fsm_Y9&0bWc6pfTxTlz(~N-IuR%KicNN(8WUcx8@P{h zvwUQpaRrFv1Zr6V1lhu_{QxBc{GIcIl<0JP_!3w= z)dpVi#nVdBfaEN|*vBc+gnj_}z`6lvKWIq!t|&TpPWKG*=6QDA`R(vOjimMJ$sfAI8wts;gui_{yaZ$zzD$ar>bMjkB}Ble~Ekv~Q4xELpKBp!`JK zT`gy)qF4r?NoQo`_bkPzrd$8_0&mk{LsHsx{m6^ zy-MdYgWv2;)dlTphy#ZhaN@NJJ$ZKkK8gs^Gs2i>X;s}n@^7Bys%#!aTJ#K=J8;rW z-1A2PZUR=MygY_z>cw>>A!hSof&E0@T&peUo;moew|To1PRD&dxA2hak=@9#%f;#`WOI`oXO138(Uv{NaqBHN1 zKFiT1!zaQ5eXEoTt6S-rc(6G!2Y1o;!+F=uzf+n6fDFoe^JU&|ciEQ*m=k_fE5?r- zsI?g)OifG62kFKDRL&5vv|2%^cPvH&Et!F^3Vz;QQ`g~zf)Ho{g3lWiwr@v|y%$tD zx{_Kmr`djNv@`z#RE(v-sj<0ccZQXa;~?0-)mel^U0}loC8I9eyHEh#Tm3E5Mkr9p zo(y0DDzG-0gp+_mB2B`5z@cTl)a!{yAQ6G3uA%CQMaZ4tuP+vfz$b&#q#5JMXZ3^8 zgQNLQDI}RecOHMQokoQOn#f0Y=#ZJ{`9 zC6f#k0Y=Jk4(`0cLF1x812<9w(_uKixvjTAW}a#!8wC{U4+P|fYGRO|VYMEU^6oKU(Y z_w)7WKG*_`RdcXwQ*p~%TxR0)ayJ{qTa19x$HX}h`M+jB9Ae#+n_@`m59hQK%4R5% z9!wmI8+-2_^~^||pnJdTaOlaff{{L9m6_{#sQj$T?v6L)(^gQ}br?4%E2=hgec*+e zuO--u88=s^AOu#;Rtjlt(RKD&rdqzOvZ~kNk4I>cXrcqFL5a%Mr%O_w)Kxyv3#{_8Rk6sD8_JQhe zvf3;F!Q<8#eDsJcVAN;{ls2))KU=W#Lhd2nD6pG90V4O87B>5TWGvstrvyFZyJ5)T z<|Gv^acWWs;{vshJYQySClD1@cHWWP9Em+?_rDnaG!b(#5g&?z-i0apJ|eF6xYp7L zW~02koA?fL*6|$(3_mnXNKyjbH7jtgO8^R+4e;llSFRmQ8Z|8teST#mQ-@8#aT~{l zM%QS{k_Vb6YqSrS(br*vcE(p%yVvEhEVYmW1Fz#>#0WXbD$PxHdGO|)nLaV7Q6>u& z@zgSS9)Z-=T0+?*TU}>1L4wQ+=qy#XMhU2pDE|IgrUddXW*}Xn*&H#~G6KGSkKbhi z_YqTGmPTlZXvq21Z5Ab7{z=gjob5XXm6Z2mg+NzmB1StAI==)2gKY&ZAmLc*|vA*}R4yZxz)|nCN-v;}M zezww_FfaB51eX^i12KiK-RF_C{D~zpyT7QRAuR@UJXRCdiJsEzyA%`-mk}VS}yyzgF}rt zKLB%YBEai3|J8}=clM>pZHOuTGl&Rn24ccG$0_C|uQT=iJ8(+qK(&-FpOqz5UqxE- zfUrL_531XnG()%Ry;25>7-UpnB~+yAIQcUh^6+puAjPv&#`|vMa#rfWxaJB41DBw> z^WGm^Cj^rlB*0>f8-&#I2H&zNwK?XhkMxX8OpJY?^`Oe}S#Tr+sTK$!!q?*;k6b5$ zDP7yoKg~#U(6Bo{C_pdGc(~ea*zZB+Ho_3$a<=mxE$m<j#EiO_C|Z7v;AN-tG2RNATwEE#+$Bu;SK4a81+e?8cg2Bdu#;pi}!j zW8HdZ)*k>^=FO&+SCtdHvxk0Gx&$L@=dz~7D?za3SC6z6-~5{Kuh3Z~O*DlyIq+ms zgoOzkFu=t=i68c1-A4{;4To9Yzt8f!4u#YS0bFrM|N94Z#U%de5w62ef{G2?})8kn&*!dj*i!l zSVI+Zgn(9_uhGBK_|16|nI+GUkbRRk2RJx^Vw1kLY}fg2p)mtK{RBZ)tZw;d0;a^H zejZUqa(6>A4Y2n(D2nvmz#$gft{;;nFQN&^uRJFtqf7IGvC@dsGONPQ?}c6G<2OG@ zj00-I{QS%C~+4*-tcqI9! z=N4em|4!r|7nfZ=-hu9DG$z%pkH9w4u{(N!dYi{#P5SnFc9*O%XTMBF+R~EX{X6q^ z|MmGyerz6V1ogQ#grWAuLEnXn%(NNx-Ep$i)r%UxhuSns;X*HBm3wYDDUz3Y+tYpNIZEF#DFgys{{e2isNnl~O)tkl zK;^qsm+|5}*_kbYli>ACrd49dbO!G;dXT@U zEKETt${!tCU7nzvq(EmFrDqQO*fDIn%(bO-AqM?)#ZXb zluZmakW>xzx1~zPw#2T)emmp_)pR?6DM#r%CyV(C4H45I{La4{X@c4#|t~ zpj`}IdECumMoI`17qfZTBda%23AX)=(_HhKkOfF#uq+s~HN;egF*cyd_5YXi3ikx4n#6%^XU2k_P z@xB8>ALK1ELPxppU=xJ_2S8v42x0CpYoGv?1aJ~Z0~>t-cKpyb3LS~ZX3>1y#x*W^ zV+b5t_(96%JI@ITbRatgbJhefATtnqxa!v|PWA&`HoWil3K)BWe843HV1bVjW266# zdFiF4W&Q?#J8vdLosqDgfEY>g_O~o&#sd=;gS^8`YonBI^4-LQWOg{@Y&ZXNEfPRu z&Rh1}?o)HTKM&o47)-;<+p`uhZwHM&dfJ_8@zkaBxV_u^lLqMB6USM3ahd_ChbzM5 zA)7CMR*(H3gemg?{fwG@U|r{FQ2AqqA@WrBlg>Y4QHEhr(8nd#m|iy#~zM=q44~MdYmS|1;qqTa3341>xjoZS!X4C z+;YDQ-GPUn=*$fmsVMmg67*>2bN)o5VstuG{#96(P(FL2uEIO?D#w_HMz5rM^LZm1 zt*c@2989!ByI-cH;}q!9q)~yC@!3bE@7_~ng8olS?8ff@QdkRP?+|L{&_P1coxZ1;RCT8;M4;$ z4i7)TJIG2jJB6mWE#((ShJEccK5dbS&B@VMf10!5i*XYvqTdCFs+3FHolufBqb%^_ z7Vfoj2jm=(6fi<2daLBIWugVsF^)3q zx+U|p>og%X;Q&k66s2scF@%VTdN5zTGRgYm$5(b;H`eVM=bQGchhXFKY;Uk3I*YvE zeYSTP4*{DYtQ;Hz0M3faoR~mdQErowq=oFpY4goubUbY*w5WtRDn&TOvz^Ft481MiA1qu@Rlw23L$ybaz zBPN7t%j(@9-%Eh=O*YtT#*^_PcZ#2<13d7e&icm3%VCvPN zKKq%lC$(xir@Xu^+6&)55tY>)6xci5(!djz*J+v+p;kb8hhTbiTHdVhymIMk6@0^U zo?JfM6p`kyi+r&hKnu#rNFZH)0t7xXK-U4vW#q1|Vd6?Xwy2AHO&VW5P%H-{eXkp! ziA$8aXS6rg_C7NNHX&EU%S;{rp9^rdYYmbJ{kGN>-Il#)5uy-8lq%p9%zbs_7fSn- zRZJ6hF>Li!pp4B69k~PKT`alZ(7J=L^UWwq*Y6-n_=ESG+4b{>d(r`@8!`=K%kp+7 zcJ(tK%>c2XX{eGmT^{5fg0OY~6fuQzbcRBq^ZT2Ji519zqJHtiLr@7Lzj7I!7X~$9 z#aFE&ve%+r zC+7ib+MfX3jKDXldXzW<*3OBac#eoWy^(z{3=?^-&6k_Q@Jmt1L$J!=dmF$qI2;@< zQ9lq0yzkpp%M-{v67z{5{?IlDZ~MASx;ggq+fK3KQ1;KXN5x%4pJ3GE(c+vT0yWGQ z;o4%d8F5c3TB2gvKT)idn1YmW;(Um#+$QgA`{e4>sh|ATmWLnL_c%9mbwUEmn8R&& zzTYt!?WT2Ld?%wh`xQMI4Z)_=`IV+RDk)#G;r`$~M0{NZnjx1If%?jCl3>$ERbi=0 zMfXRo&P!6kkGiZxttM35_-$PD$q*d#>$?o@h%wmuFU&c-qpsuD11=0|{yU?6r3FXfFyZJh6F=gUp@Rf+%4deu2)^*=tyoNIk3}V4%qp>S4%YFNg%=C?E4+wNmn%O@3)Qbr9)u=M?hq=Fc|A& zczAlY?1uc|y@0-J8ln*I)%&6&co^sFzG>eue;J^)T&`dFE3n1Q^7W=2L5_syCtwOe z4V|#y;(pqP#)e$q7D}I@@FyNWEChCicke$TF%4h}Hr*080Q;PD$8rB`5AfZeE*}9{ zAe9I*_DhX%cV^8@tKs&KPA`Sl%1iUC=Id?yFO=B5Nd#oo_hMM`UT?>yVN`z9_1;Vn zMp#iyU4SeEIFj>fM=lV|j{d;pc^iWUI{QM!iM-zVBc<}a7PeV4Pn^}jo9EA`rZRqfi93N(%I3B2MinB9UuND5(=VpFSQN!9Hw{07r2kTgvMir6dC)nr&>%{r8aEqw>g7Pic&=;C< zY^5*I*u^7e?qfYv*E*vz!zKjyog*1^ZFpo@WfU`<#(rRE4C>z;afnXKMap!I1&DgvuXTIE*7((X-bC?{8UIU>QPIO9AEPbWiUcjxUI zbiSBif!Lg4U?{EuuBt1 z!GlkB0s(`Fpl&xgw?`|$i?Gy7HbgZ{4(r!=g@ci$i zpJ_FqtpO~A`R088@%{Z}owX7MV<^gihlko-4R`?E^ddfoWI9ty6-J>I`W0ERi;)Y* zK5Zho>7^lg_>VM>LULQZ>x?)R9%P<38aXUfVS?pGJ@ZD-{^9-(yulAuRaL4KogT=CIV3 z`RF5;<@tAuc2JQuTz*}B5;<1K>VHe@!4Phle*2&;dHCe@2_a~0Y?VTledQEcVPs52 z#ach@p0@CuPla%=V3c;KnVbFA$4iJYw0_kM@eJ+yzY|ehk}qDljO#5?jVVSX{Q8z< z=0}CpCp^e=p0TEGss*OPi8dW&yXgZN`I}KzScP-}ZA7r4q+j#xm%z(b7CrZmzhPQl z58!MCYu+#lx@ie=%|imL*QsHix$;~?Yx?DMgfjt2fSp4gSol)iwDRn3j$z zck-_0R$mQ#{{#aN!IPqvKDE62oF8ADHyGa|eTkEP|ImVOagQ@!bK071vCX@nhwB5@1^e@HFC z*eYGr#Oek*G*U~y#rjZ;o`DX@R;ClybBi66Yj=o=**RejikV>v;dfiS?AkKYdMI)z7R=!WPYAqs?nJ{riFmDPt@kOd(T2Vg`KAfjr+|sXyu#e$k{T~ZxNW6oQKV-f z=k_d;0{e@B@&l2dWv>;b{A?P!DK7WZnEoF)dDRwFh+8=k9>Pe=E*)CuPD&OR-2wk~ zCfL0rWo9{cB7Iv26BQZ?WW_jk<{1|jmr^K0=RV7SwuBuXBP`CA-(ku(Q3E%9C#kjY8YyL z74&|o``g>68G3qL(X+N|`sQg+gZuH(%KGGtGqa~Fx7e1jR05}1I>|$y{bavXSpy@K zgz%ZZ6M{;3%}MN%`~Te#FKVWp34cYR&rDO;Q$&(>%e2XP*l-g;iF5Lrwg8NUdVRY=%og znSKTW4@sUCNWfAiM&7&$X?{S;DgpoIJUjfb7KAN#G_}z32E0eYNGZUvuN) zDtHz?C3AfHfPTDMe1iD#tG{B=t4rmqpt-L0a|xZtzuhi?z_2L=u21)al8Q!`a!DeU|qBSk7>o(I-1{|j8f)Eu|R2FS&cmlG1^P`&WV z03#xR!)lWDrMs?H)e@S(aPpM$;?}Ql#QlQIIlIGV?C#7C0|LQ+#?CmgrKc+)iakxW zoIzm21X;f#y_db~3^YdqU`Hu{Nv5mKvQbM@BmyqrXA^JcU{|D@l`N)sgTV}oip^Z!oh5X+8({yxnp}--k-e=iF=d{geG^8 zb|`bo{>r^Ck(sz4?pq>bgD>-pP6cpI=@j+r@C3MQDh z2j~_m;C#D1#lFF#dcSrj$<1&KO96xf0$SP$FrWco$AqZ+Mz3wV6irPu0Bbz@hz-}= zUWodwApvy<rDDpfChJmQ;B;eEw}kLfPpEe0k#YtLAFp384{u~sw?@?90gitBUQ@-Cxcik!9GoaxPRyCL0>&39DT z`OGFImw~mfuqkiEW#uKwnIYzPdG7A^KnG?34kK6uYzyT0#%<9q|TJ;DE4j&)4R5nZYlbTFI36A&g37zvmxdzi+H{~pdz2INSD33sCgFLw|q*!|lp!(!gh+~t-y-dbcJE>PRHyrFLJAO! zmcmn$laqCvg*d=621DZN_J1aTU=xePZuq5pLq5)7vH(pIir2ZS85k%oDk=(oSM;st z6@rVUjBxB#`uz1PYFwX!Oc+t(K#w{ID+doBU*=>Zj@tv|b*%feo%jbaGBaA$nIN#> z@W(g>qh>zJcbY0HHj@Q?Umd4j{E&pwM)T(_Ev5WDUX`kye~e%>0cuD|L^c^}!HyU{^Sdmq))`eHT2JAXjG*}m%_&AO z@_Pn6AbRZ@;Tx*ld~X*7h!XJ;pq}Y`yI621ZQ`tK*wj#q=M^yccz<{E&n!8JjmaXo zB?*A?g;#>&d3;pks33h2!8vO4J;uGwW4^kLn;04$?X7Y28y96G^Fw6Usj8DcC5XLFLH$50rV&Rn<}N~QXO7dL z6L(8@at%YU?F8N~q*A)V2ugeMqc`?dt&Q}(Q^x`w=seq&0_#o?Zlb7NGF90^Z<+be z*f%x~RR|$oG!Tv@ZxyBmIH1FOB+BSVUtx)p}@V6i!5DS)63l+6&# zTi3=AI`eBm(C1m~EtcY4+#f$skTPjsZaED(FMh*`n~}NUSV##t-3o!-rFYMk8HZa0 zU!eu$EaRu(-uLXD^S)k_{^qiLZLxR{%8WudpOrEzR5+FvRU)oL#1-KE1v;9_R{>PW9U!X@chDsDZv6l>Hfb zS`-OQCeLA)KLtphn~#F_{^cpnsYW_}yGD@agq}Y;osk0U#2~(vLYi=Tb zZ&t+l`1pYBI3p16ps)>%kMFGWS{vPOVwqO!n0$5UDuo6s9Q=CQd^$|SY4PoE^0gFj za|4!#vcbWUvQbY;%F5mYi0em{u_q0=*e6ArAiY%>HY%9?ZhQ|#Damfm$+0&2Vqjh0 zf^oC={MG4pH@C8eMoHCZ_?R7kxnV>zxRpAtPdO@z^?Gy-q6Am$2CL2c&))nj!bC7b z1f8b8Z?d5*S7&zG{dnf@b*!Q6$OaCyo#}F2*v)vG*YD|2Fq#W{<LuvqeovL+aiwR|g`<1k_PXEmUeF?L zt)UbaPvBaNj)zP-8v>mj$r6-0n=VoYuoBff1cET~you)|>!tX`?HRyTO#kq$K#t8o zH#vkS#kmd0GNoU?rh=SW5BWBZ+IeRJw6f81w@$u$zT3wZwH`AnGy%7DYx(At7!0I3iVu%alvGp-YCnHAy&s)#Kp3J{&ZxM!REzrlmYTj- zU6=KA&1{Qr@!Vx!<&K%CaR)!~u>Wu1@<9qBkzOqhDwG7P=2ICMjLKYLH>vZNoO*lfVFpYbx+{Jh}CpD73>c-9LLXQ$d`@Zm2iWwcCn;*-MOJjN+k%Fc!SD2R{W5#0KUxU@n+56(iA zjc;ame&ovk!zS#QB=4Q z_&E{*pAHY5o&QOfk!W{53|~e9{f-{>xWb6P>0mZo^xPv{uz^b;KO)Qg2vl8}4f>fP8aKRAf% z=olD8$<=!9YDC}z7~20vc2%`$B%#lWiECWL-=88Hm%5wSbn{hF<1-EpY)gXHAa#_s zWq?Ji1Jc48@FWc7P52qCu3}QsWOMW3?Oc`5)TMG?kWbe=dPtxoEqBKWvN?41cN<#) z!}oxoU?;sAjW1b(@T;J$oxF|17XEZssr^d;4LlscwuDcA4i;lWJ45iySs^s^&7vRo z=jkq8;~>OcKg96*7_bRGF<9r=y&pUwCGg*)Nd%0*7Xae}r#;$6RN)C%Q(Rb>OtAS4 zKUWinQ5)%knLC<%WF)xl)Q(O4{a?t-%U2oIg=M?6#^kp7)Y+Xofw1N0BO$j0HLG#b zKMLeMV`F14_euqq{Ce;~v(lF(SP5ufP~#R4+h>jfb7AA;UUOJs1kudO8Egl{F8_3Z>&q_vwo$G3|UWI{hZCz+i3^*s_ycN`zjW zc*gwey!j*c!rlu!g7r(8xE2#P<##=Mp8=WuQZQjc@)Z)Q(1V2kzbE5#B#-gPo$Fnj z7U#iiJzL0iC-0$UKtTCgP>PLhW-@6sz`Ihwg5AQzfrFHyjje56;FSjgzaRN4fCR>(s@mEFh8M>*HT+z@C5H_h&G^%Ubtq8EFkRyKh3aChHDYX>Vaii!FOQ z*~Hw%y~k&sz?85SSQ~txk{tbG);2UUQds*r2@r=O5}*Rz8R3Ct4_Og^{|bO+P{qoM zUdaLEY?G7c?t!gC{le#o3khlu$P>Q>`v!YlS4(SfT{hbN1yQ`75dl882s9)QOhQk0 zvd<8Nvd-K3OdytaU;7?xP-Qv<%%H0l6^(!wC@+<$M+Pttt^>ZTLe6v2PEL34|GmlT z57U&g-5w?ekGZoJ!*ltO<%?X4&kw^*-J56Y@YH-q6TJ?XW&hi=5$<`Amy5WfOQFAN zU5{y2f^Tx)@A^*uOM?UW`(DTIX)EevgPKxlF$lw^VZ%Ji|m0Y7Kc5i}gos%On_>Klrzq ze6p!4$4Zu_F`a$H3@Rf*O!3){)Jjp&*9f4;0qPizDv~&^28X73(9cd+>8A*<<|vj| zrh>mj51w$EBs$C%eAAC;G5W0N-VEU4(ux8o=`Is^2Y@>(!hHI3-j`xS8pA zLt#@8y(+CxK$zhJl-56O50X)!P36~xP71#{KPayE8%lnS=Gz6;Or3qTtBpwi2!eQ5JlNjB2p$Or=)TZ~A6_>4|+9XeislQVS2 ziGgdpzsk75txZC&;x}*{Qx?h505wsWPB9jcAlQLaRxn>m1k|uc(LG&YxD0l_*sQ51 z4wxKr&d<*Ss?>OD)pUjTn~g)CjYD@JvB?C4Q8aTqraD6i5qHG{a#sW<(eL?676=4_ z2$jO1Q`RVt>a*Hj>q2Q(QFZ+J6;o5<{ApibA1EIaaj6$|>#QPc#Y^7ZDbfZ-jE+`{ z1pMPX+wHWkU(F9f2aQ>oaRW2(7iI+boZp~nkn&NDX$zr2cG_Lrg|3#7(veA{-+H7= z{Ijb6&jla_gb8cV%6T0vL<0Ih#V`pWIM(~8*>vY?-BDE2pKli8N%d}fn4Mm>s4O>| zxin-&R_G9HnQ=9B%V!~{PSywlE4NXf_d=g(2laSBvsDpb@l9hQnP zK?8_N=djp{V_uUk5@7oG9tF|;)%ZRjf;B;%8QZh|efOM@mNpSTOn7H>dp&yKN1;8Q zKqnL$Y%&JqZ>q}fG1a3_GhmTJ`fvR*BG-eGqSAZabmVu(Ae=ur_vxP1$q<4l4r1KW z@MSUIly?5?;JX_He}h8EdGLG|PhsjduzroGf{BHJlcZ>M0vOMnPl$#{LV@chx<{ip z4G^JCBp`A|HrGK{3KB(iYRn}F2?@O>lJsO^BlU2tnKj$ibw61)O_h9S$JB9}a~7%+ z;uuN4mu}b@8xr%zs+rJqatd0`ShnkNYQ!fcCB4o0zB`|Ku88zcM9d&A4RE2soYM;n zYg|`PPj0ytz=+%FR{#tJWB1;^`8wmOCoS$6dyacK+^nCQYm-{f_u;4UUN7n-D)wF}M)QZgGHsJKf)m1%Ek$mRS;K7oT($H+*!-Tyq}-%r)R*4K}OvT@W^ zX#Cb5A->+!ZutHOxft&EwOluyopdGr63Zs6MBB^W|310Hdal$g8M$ zgPtt1)~F5%k)Dg-$8W+T`bPa^?kh) z*}{o{Se2ZfJ^;vk%EiNJdnpiNVh+9ce!-+~ET|t4lcw}iQ*(U^Lk5!61(oaD=nhie z`I%h0Ve;?2x{@`|{N2q{MgYiPOs;=kc~ZX@+oX8ZMt9)wdm;K#Yt5~VV<9XRkP;O1 zl@it%{)QCIPtm8kC2CbV%~TI{$)fMAHc7Rxc(OfzF1W>)X8mf6LQurxk_S{=o0l;} zp{v*wK5fftIU)pY%j3eTpLjgBa+Mf2`|olANhk;6I<~oge|PSqU!{lHIWh1fJNgtH zcnFydw&Ya=E;TamBv8YEAxnymM5N+?4BA{jq%z3QKRxXqe14@dx@$J)hB3kdCVXBGjYdTQCncrT{}5Q07Mbo<`a zv}+7}I8r~dXg)pE-NVBtr=k1UFaRVF4t z#-$N|S8L*T-?AIiu!vX?hF?3w{!uUJ7TS%yzkfJ#edqyGbT)Q&ySX3vIU`c`^-^wY z(lmi5FF8rDpPl$iaGJyBL|9Q5kMSUE-LYSczZf8~>nU|x)p`6rv8*D&=5GH>4?a~q zlqZy8rgExL)i60F<)hN@@DkV-uNzJ3=HDVpDLcsO;>PZ@XU$6ra z{{4NKZ$BwT$eCxd_N8h_@bz@@^s6$HMs}g>=WE)?=6P3qf=+t&;NTk^8!NV0?a)(F zk(m{Y=A2(Ly3h&_w{A5F;bw2_WWyTx%5O+dKC=NP222*s>pWe%82JxU}wps76)t zz`JvV!#1dnPi;r`7TU9U6SIq`z`akNby0Gmeeqt2$#N-&gAAYYhr=(J=v1ku{qq;*;hkHTXG%yHe1CyIlnaHBgUbS*63E&8suV=!y z^S%D-2wEU9|9^)s8=YoYL7w#8h1A;J#p=M354y|)BLRN{*CYoMfGX?x16XFwLY|ju zY}fh0t@!wFw3;-iHFk38i%qaJ)%~&9m_WQb4-}*?t?q6DX4EDqsr6ecf}uayYdVAX}nfOoo7c>06xi4}B;>g<|~|1#b=R-}0yR4Pbep6V82gM=38a z2O;J0hgHt)cUK=a3bcDm9|iPg=mI6(4GM6BrpM61XOnHZu(jbRcUdS!z;6@An#i+0PXb4Q!;0T zRmZa8m}h$RV53qv>C0HCc0-DBzZg%S$iP0nc1d27A0lobr)anIFxH`{w~)z@HBO!B zNyRQWp_>UTkF5TddU$bq=@y-|Um%=Ti+9^z+1sZNCV!?nSl=4U(*4?wcFN^( zm3%b?pFe9owCChdfgP=*0t$zB%uE~x)mznV$5_2dq$DVSaK!ZgX!^>qDA%rSr4^(b z=@5{T5Re9Gqy(i?S^;U0kd~5e1d#?65Rg=m?hurc?(UBH*6io~e(il62F}bq*R|G} zm%bJjznL5w_ApAGBhavr9@C&EE1NhN6`W?9Zo+ujkrOSd1c)YVW+J_TObJDLoYZuE zvIaqm07Wz${hh3|{S${2VydNvf@wqKB_pw~IVbT+y4GQK)Sh9P$xX1VeX6NRIj!-a z@!F+_wIRQ{XJOCrg4+Ep%Nb~jYY+NG0*%;3G~=}jQ-bAPf~`^)V3S9>&~h~Ml>WW$pvL)eW@+TT$DE;h4}4S@ zXh-XRm&R8zwI4ROXL|x#4_wF-KeZ`)GPr39{17F=PN!HqKQ3sZ#g45%;!5q(!5vuu zj`$Q0dC{<2VK@}LK^-(c2?@4;Qbeq9p#f8jG~%P46EEO5c!l7-=xu!P<{`K%qdfdZ zTHZAbo`&YFhMe~J_)72PE684oj99_S2v;qV?RH+*qJb)aS?GEZq%|VGaE~On>E#T+ z8oU$Nv5>93@fQiwc@`Xatp38RMK+bxP7)TK#q#14PB3sr=_8!(TlHr^E!6gk!^w0B znWp9nZN#5uF?WPOIOf=LY1aRgMwjW- z9Q4QfJ0{J3+TS_apx}3;tDZV01Fon$K#gqHt+w_kN#AC$>?XvxJyI=@87K{}nB23Y z#1q}UaRWI|!dl4u$<}+!Q>QB@VVV2|gXXLYcZ`hz&u!X}2Fn>ggc$eFLk5&zpsgGS z$+OkK2Las#Q_L#sY+D7X{_?w!eHhA|;!G~IsRVMxmbn)B05XdJqK@}NjeZjRY#{%X zN)(ZTtCvmKk#4MmHPmaQIjX(RV}C^PD~+Q7oC7sALhmue_PYKQzuq5V+Xn3@OmA>q zoUP_WHwT1AmNgB!%gIql{J@B8YyKn~0i<1}-RS%N504Jo$c$IX&~3%;@^K&XY^h&F zIcI(>D|-i(p0Ob3L@l$Q(sQ%4da9=UDv z(bMtNAHyTp&<>k?GW}+NL)P{iw@Ufu(7E5a3k~%nmnQpapy_R5(z*fPehgsSfanzt z!l*G5Sk-yAdSRDU9?su-yM2N)ux%q|VV*bN8j#2VdB&t*UmxhV5Tr(wMnpvl73m(? zyZJ660@aLmtWot)REl8C-Nd-*$d&+hLx`)#@2oGw5_^n7FXXaHhrWX96o4BV z2)Y3j!iYBMKkV9N7~0m*L|_xYAJMZU7q18Xe5cNE$soIMhu5%Lag*x~|4|TIyxryH za4NVm~f5yPxR0-UtBD4{x_k0%^U`-&X)5Lx5Hzsa|Ilr^6=l= zj*X?v#_6chjXaGO3moQf7@EUS0-zLlZ()B4 zTGFCl`_XnZcpiXqSgY6|74VW(SDIX>iDHc$3>3Ibq=%XM(gZB}sVZVM_nTHRVaDRO z`HglH!1Raek$8%)6>qgMw;@%zqn3SU&=l-_Jig-6y{ZCyt&rcfsG_2x%UT?eF%7Y- zG?j=W2?RjOPo`V2BOA}(G#;I$9)<*+4V;~wd3YPGkE$)TM2k1sSM0SCshLzv+Wc*x zxX~utb`vk*lROurTh7gk+IvoW9-<>f`pIhIXDKzO9{J*wSbnzPOqOJ*&P{-C6(A}a z1*+8IhGlPoRROW3PzEk$Upo3`Qc@De(tGZo2zWsYKhAdM93Od@%|D*@+Ti4nJ1T5i z!ElK6GpYzd!o&&RV^q>EiLu3X@nd3fJ&g{dDtU?%LOpLEvX%oB7bG%b&0K-0yhn$& zvNus>3+$s3m=SRcp1PNU6D1yEMt}2E-L!r@KoA#RS&GAyP@q*3QicT~@UIMF@R{$) zvKon2Wt)D|MyaTl@8+SYU=6?z$d(CR?^=Wt7h6dxl(t+}$B=|!f{mkWHXhyo#pUcx zk;*j{M;6K@#$Y(Rv(``Cq_Hl2IcL!wg`$*&3Eax!+UtoQHDROa%vKCYv@G3TlDW;_ zh#`$3ec|o9{`X5nQ!^CXSwG9grdk<14c^>HzTGUuUwGUiNU^k!ufg49EkCQfw6cP^ zI2Z`4ri8=Y)__2bax+={0H8b{xOQtjeGydOOO5ZsIv~#Q=}MEV|Lo{GIVl0-Gyp|) zd70Ow=?0QNoAPqC0Yx=k7tJqFUvvV{0c)oVWC8TS)HML7cM8vI28mK#=kep5C_kh+ zkDWVb_wl{~q1t2BtjL;#q_L4L|AaXK^vM2O6uRN275fR5BSb&NTM)a$BP;|Ys4sh+8uF6&i?OZ`jx#&?BpZQ@M1lAi~^ z$Tgb9dBUDNt{K+H*Vaqt@BE9B64cR$8#B=mF*IBQT&q%e1b5w4@^1tr6!C2Q8Qry` zgw`D@h*HE;RtX-qQf0iNga-5*OvCvDCM*3@@I-nYuZPX-6qJ?WhvkB3pkc3_3d;n@ zz?T!1Yo9v&TH2!$JVim;Q~bQNAE+TDO&@-|)nb45R>-G>xA+-Q8JaaC4@(_tSgG@J zneWu2`T6R__<)+w{(6zDhoHZY;D{|A|<>Pc2 zMph`=y_a&8v{I1dov4|3l<4up$RIBdm?5mU!N%V3jQJ>K^r>d`{**$ms_4GnZ$9RH zhRw{}#;@=M9TKXK3mJ^G65EKS(ausk{vg?rwkMy#b-lOh&23xBIj3(kkBO`~M+b zx*&?Pc^3EZaHq6&lU+SRA~msDMgO6g1Jf>|KTARl_|PX#{hHEWCaR>Q;T&!LWMm*K zJjlMI54&ni*{jKi!`>5sQZ0$(U|^ZY9rErE9f|xfp_Xi!G{4P*`({S=@x;%OKVL^m zU((Gm@=X7Ui?^Am)YVdZXM9eQo`3Nm4^J~$^r5SU#I5dk_&o7<9TVF04@!cYdc7`u zv!ay}@83kun`;e|Tez$jip*qBxBON$B{hHgWHaSlQ*!rVIz2^LkkcBdJ^7OzXga9S z1v8|}PU<=Gwm3Mwvc2Hmz4Qy_=H#?Ei|!#8-dEMDR2ZJjN_G#wzB(VSQ8Y*HwO{Lx zAL)I!?Fvv6&xS3*fBs8j;1v?J$m-x&`D&~byy%x0P_9YBfOlY$lJMFObDG~T-4mN- z)V1|b{JbL1;q!oR54?CIGj9+DM*~CL5)!5Ggx7Q)?H^Zi*ytrlw`O*R$vD`UVNNvP z9Fu>NP2i7f%ToQrR6?|nN%R}Fd8|6=!PeyWr{$W=FroCo-VV~PZupO|3#$S7emF5A7^kHy;eKO*7q|rD`MI3_tHw44B&_<}t?~CFuhy1)q@NDLf~TCf zpP&OmwweKo8>*b0|GL}w@;Z3lWFMC6737tQj{+q7yFUAZ+)5_=Z_MbBb3&^c8VFBk z1*A|<{nR?fcqjQY^pOG~F!ybi6>}$Nup582Z_KR;ii;zJ99lLuuKXIN-4*;sKg6|3 zZzW7YJcX>FMtI1~B~DXa9|?CS#>Cv{V6IkXRxV7$LPQy-K6*S3^n-|)J~$ELeTZmD z4uhUGT%dxJ^H-h zzzs!>9#saQAT0q|Wkj$rTBp4s`Gp~!^AiI3;-jbpETJP1#4x%=<^(trcYG0K+(tpN zI)#Sy4=wE7d6-o8lNMA$bX3llbe>Mm96|MDTF314b;3GBVD~z zp+)ic)EN9Ur29 zue^R3{k&SH($p*t^;`;4to+B{kw!_Y@L4cex2PduSvk?sudW}n9oA%&IIzdj2WoEZ z{6L7`(cB;sV;-6f8}uPyafnoZI4xK6ZCqSwppWcr_=$nZvwSU?YNoM;ml?v;G0Ke) z;&Ajj6A;Z8FI)uLLgXtP402Pq^f^eClnoVb-l60l^n4PZzxUjezBe5|;SpDOI@6{u z)zwUu-MzH@p<;{sN=o6vUi_gs-Cj?Z_a0YD)avodgyQA_G99~6&?#kHh9DF*$_fKK zo;MJ4<}I4r)-;8>Y2{>*Mh*)gDt-?r3$YgbcV@UY{76Hp;xK?0es>2J8*& z1*`1-%x7cLGTawQhmC8v+QC@veFKqy{^~BKujWR9)5-@%HrK@cGpnp*qIC|H%)}T) z_}7RyG|HT8PeCkD7#96lqk{y1x?o~`Yd=+otN~~+%9_5Wm$63%c+d%8fV-;ka$osz zr|juz=*i3uq-&c<&Q6u<07@9Cr%3_Zr{ASH7fn_u1)mj<#Xd3UN2w>OJC@j&pd_%( zvV0%%K`?(jqy+TxNmHD_sb@Fip~sKj4;Og+eGqB&2Du{vr>_m#uZ+q=7fWb)?Xz5h zJ)~vHjjEQ@;PmY5$~V*V+{Vari3HGmXrx33(%;iul`i!Re%m7Gv)|5M(E84T3=SzD zX#ONkDQMeEq$VCFxqE$a01U#6dg@u?9%|w90xqBPV|1)e{)o3^td;baASDw{b520M zdk}JT<@V^XKk_cc=?~~@c3SM5i}3XYTI2=%%?fO;c=8`6TwUtz{(hcoy1r8JHe<;0 zzPv>m5e$wxr_Hekmi-xun!(|h2PLJQ!d?k5B41gksHm6?jUrlOW14!Cjh8{IAn4=Q z9-Y+$sJOqZ&g#D+BCrCQo<~w>ebotb_<0o~gNZlq@(4Mp8T%b+587Md(u((jHWn!p zfmN0o6PKoG&T;)ek{hZx6_iw7OgB_hNSJy{hVzdhs2pnOSBw5Y4{COjcZsPw_m#Y4BeS_?Zg9N|An9}a>hxPgEUc>L{-BZ=ns3u8)`5X$KPEA>tT*f94w>BfD@WkjMvPCP^Fx<#xG`eF6pS^KCg5bh=W2Pzdkg)t+f@n6Oh@WZh+XO5r?id`UzaHd zPu06IGDlJhgdeO7@PRO(&SgznHx;vB*yi?7xKLNHXZV9Za<@EFdRJ&E_6C|7U%648 z{5yEgLHcXHO*w!*HNWZJfu;0NYNgJXyP_WWVK2tYvNtEH;$c{JnyMO|b4rYhNqNU& zK}=(DnS1tA9c}p^9K-=Zwa&{t_LrbA!b?p}h0V*#Us&wrB_}BeGVaKKDnAU(^V*J} zJ8#3#)O`^A0)}!hv~xouei#7J)O<#nzu;nop@$8bRD~Dm`*x`(i7J-BHwsDxhbncS zzF)eV145(Iz^WpgSfPVVIz7Y%450u+^}xyq3J9J=_UBOPyItUvr$n5=J-Z?x5l9Tx zh~dz+(Apeq48j)M{tqJZF!L#VO^P%=(9qJ#4N5a;xUyaV!|Y|-E|!fqW*+YjdBAr$ zjh%guX3wG^F=KHxa))Qjyjk=w88+)w6kKH-j22x&!&?*3D1L>GROXg<8j6|R06KXN z!0WZ~isWtTvG1<;!Bg_ieHTDkp0y%HQBgMz6r`3ql13~Is@8>853r`@ZQFmfeC93b zkP0m@QbHpw`pPWVdXXXQTxi@V4oBO3Fqx5LJYfWWLZWA9_k?MkrdFPk%o%ZWB_sBj zyU;v^gzsI@MTo+iFL$5}A0HeXf%gm}*!Wp&FA`B@8(MyuM~ioZB;9kq>)udh6UzeZ z-oI)V9nl%PI%64ccCGOhobC6N#uz!g$d6KVWiLFVZD=%X@E}lA^GWMKTnojsJo(!( zwQ=MKUuFI$X%ohPTEA^5FVoDijC%K2!x=a3-LzlPoXaBuh4%M8k3Hj2y)|AFz;joS zo15T1?VCJle}&DNCTImx;7*gN#1{GYu#!A?W#$a!yBRi{J%q zN1=JhGbk6m+$FzyIJk3CVb-PuxV5l(sdM)6{ehi>0r3?m4b6Amcn0*M6+RBdquBHw zc!h~EM89BILyf2zDbx7^6!5cB-Ij!+r-xKQ5tI^4phY3rtx+)V*_S{Z2wq)W%m(7^ z?{#Die$jV_!!?^phzQ>l%if6AbCF1QAka*q3^g(`dMfHdX}%* zQnxl3KO#;fJZL#jX%@3YIu`u=?uIhPuwEUc30&5gp##!xd3|{js|gGRlHQf4^&}2$ zvGMX%R19NcW-%?tV%IJv-~Fri#S1~*FZt4M=BE!x&K-8YwJ01K?e3vCsV&WeqI#|^ zIRz*h!#&QoK{@GAFtSad?AAT zG6t7{$Zo%i7Y~r=?(`c(@^d2}Sv*W3-+7_xX*avU`>#IR!((aor#NeMFx!0g3|%mI zK>PxQ^d|f=6(;W8kCuPeH>Ur;79c7Ogx{#8qb&g2Ni8WQuKBt8VHo%b0LJ=tT>c)q zjm;YT25Nc^aEND()G1cwLX4{c?*NWzS<6>ejj zW`nD~-GE!bN0m^?hr_)1H}QaSlQKT17IxokFvDt$<_`=$%ZA7{22gOJDf{UZKc+N^ z^CVT8w&+9^G;hcGN-LTI5tHr5$rP>LURRVDfAt!0 z@a}Gb(Iuz-PAb4sX{0)V5z66g&h<|gtg7^i3`-tLV}URsETUq;%_b#oeuU3P_$pa5 zE-kGaoBQY6!=8`w0{Fh7X02F&VlX_G3>aHrTR0Z6UwJRGjrH}1Kjq_+XIHK5QB(V-@ZWL3eEe zM=){=3WjoRp1N&Mbp7SjM~!RM*bJ?fm^d9R%~Aq&CqQQAFswmNRWKjmm8ad>>>K6V zH}j~t!G|KPqzGtVk$yE7Si8Zhp{GaJ?|QOrD3o!F-)sGY8P|cf>DXa*S?SHlY;cFVxS^MoEORqevG`SPsNef;dH@ENC zD>bJ=U)Oy|Py^?LXP|-_#4UGTt_N&6#3s>X-?iAha|tRn)Ho1B4E?ZkuXEiA$+y}3 zR~6xE=f`s2t{_&~g<$J>R@Rhd4uqi^_)WP&f&n~;{dhBe@-{pL3JgUWKQK7;^M@f1 zB(8)h-hCMdiiXVOpOK~wxOXii$D~NY>CC#b(&T>ye)hN+$GI61k6pSpSy{4IGcXO! zVGa4hob*^qO2fpgQU_T{EjaDM_}&B4ZuGi0&f#bFuD9vP2_i{9WEdHxnZlQRu+lF( zMkXCI#0+`8l;JIFCNOhz=za@2I6KQf#-{IF{g;{^dWZSf+~N%4S}-D+gI)q*(0UNx zG??zdz{&Xm`nOkUO4Jcl;0aRr$hl&5@qX(g#f~3L`4kUd3JKVanDqSRysWvD$E6m3 zaXXbv^JDgd7rXdK(W~=HFDO_V-t8r`)V9Q|$3|vI1VqTffm5iSjZ*KvmEZf+Qg!yu zS3}S$GJTPyWc%}Utk(G+NC@*jmNltsXe=X?64{3uJe?o8Y)W3&dP4(9>ow{{Aenj` z*w~Sg9hy0Eu8_3W0vBcB(*j&rV(*H1C0v>W@5a3F0&Aj*x%+(UYt_*9Nv#BR5(?5~ zY;^h}<5F?$Vl7ipHTipyntEHuegVN2OgOo@xj;lb`tC5J3Ls;VULHF{0LaoQfWHK` zhPz@;K?msQbr92L7aJER{p=a3Z(~-mb_vLHewZ}&Zy2S5v`yoOm#fd$JL99da$rLW z(40YAnVEHt4S**)R!<@^BO^nS`N-s=E7&TOKI;J{vc_2lYA7 zC0Wkkh#IbYVyBwX88v~}Z|ANgb8Hn+P*%>7xj6Rl5l6lk;N=;-{GITexUk6L52C?g zMqJ4-k1iX)1n?Rh?@Y>T2Hys_O~K&}yy=dQGG6RV6s^R9yt=Y}{x-09Q6(d7ki7um zCbTOD2aFRlAm;>)#Y6G4Q${#ONr!x^>?J*~swcO=2GpMT+^xW(=h^9{(X%fGZQa`U zyCjkw=I=i*l7sUrF&`Wa7|r zN7Ifs4hbf0L%O%YZ1_N2D|BK0%#k2=_cEG?CeP~}-NT$(3s2DY$bDgl;jzjgxvBOT znhyctT(IQhT&!@#%*W)l@1zymwOUF?<`Wx5N`r8233SX!v|e0_x2us4qz)J8HhA2s zy{AgYK`QtDZr6Q2D<_bMf@^Ia&&%d|a^k-X7DR=ob|fQ*!{Wf~pBKlo|Lpk4oSu_K zb$(lC@%*AL${R9?o;YeJg%$s-I?nG?YnY;s)NeIa79hH-vpdPwx9=?=ioO~DmlQyb z@Qd&0?7(gejtkLD(sH8VN)(Kv_>)F&DjHjro?G-Vg5S_GSd{0tLD*w99xBo_;wZr{ z8mpNnj@<7vD_Hxw1`OWbQ|g+Em}jiU@MGUW_-%f0_tw%rN6f6KKDjAw0rX;SZ~t z`x7(9*o$vj1$3@euA=uf#Jdp$qNc%CJ#*UH@*JN6m1wLy2pqB(y^L`jegyH|iE}hE zsSw#zqm+x{3G9}hCMNjIOrOgv&`*f4*q0U-3VQwYB)tJkSH5{Z^MR*t`g2?Z^WTqI zvqie)&sb@f5E%`1L-O-|5ve?h59kxbL<$2)uXWiSv{V|^jr5eJ(==n2mtydhP(C?f zV991ue^!i>%WQYp(@gqs-{uL=^Y?eFroa4(*!N&A-SlqA^U0wFbRUje>i z3>aRm|J6Let*3_cjlZw-DMZ{j!0We^RR@6;@K2BkAS3YDqiRrJzruQ&vMA`uhls~s zlCe^yprY$Ucaj{5@h`jJbY&#_8$OYQqDtV&XRgkQpbmR3#<~6>rHzVcDaSh@dLL2X zQB)Y70F0GByDLI~s9KgHI$SpOap42?W~XL>u-fae-#lyaf~LE*LKgXW+j-gK_@( z*UAvbt}^|H0=aCW(!xx%uO2Bf8QgH7e8!*^aepjPp@EWyh2x>pY%Y(?R4+rMg(sVA zY9R76M?Q8gqRIUw1WNmwo;CvP&7OZS@bRC09=R;AR5ZV(ZHUulWvZvS&PlFBn9clE zScl`jOFheH3SKE0W?qNIn4pckP?AJ3T8B#RvRzFj_SO#l->hF0MORiBzx&Zl7hDWo zJp!k0<94I<^~rbSt7weJVv@^>%VEmCwUB`!sfHbdo zpP~6u!}r2=EK%zYZ7^Q2;w`({bgTyT@Ks&U>}Tvap3FfXI4h^8M$v@kR9d zbOC9|Bx^H%dEvUjvbT46CWjDsvha7(p7}oCM%+2#O$YM*p=~QD$cxX+RCRJ9ghsJ4 zv$orAON$1Y7n;p5(%Q^?7275!6LEV{VZFz&AH9rV;pYDpNx|*(weTG%Qi!X2EGm)L z9-$o*2(+v|as}%ZX-UHzwmMP-z$Nq3DqB~#%}%q6V;tY>Gvkl16%PtF9>EZm1Gg+P zy9m_OWl)hx%4L8BTY%l*PeT!N%p=GSd*m7pu{n}I(q`q-riDLr-@0{K`I3Sry2H8= z$fJ>B-H61JXHIoxgO2yeO|`O^j(35Py*SO4o^O+%vAe8Nd&00=#E1b*6MDn*L=h=G^dn;HeISgvvfg`uAnMzJq=cYu_?p)%l zyy6cCss`f|jW2ovn-8f*aVT3s&GAM9#}=n0mR zDD&G^YO28P(NumLLWiGc`D^qwO=lG%cac9MH!m+R0Zf_5ptCJE&8PY?$?gfj)I#>; zaKie~&7B^YM{4a)d7x4?U|_=>rZte{W%#3(nzV!*$@M(VOp{)zUr;hXV2h+;l^ZVNl35S>Z5vcr^?;)xTY=+By9E-*D*9XEL%uQEpOPG9T6Jg@0! zc}JZT6m96*m;9uaI3MpZ+J+|jDml0{K#!gro$gshDhJL}IXA<>wMh z^R?eu{(vXmi8(>!3jJX0KApkp|Z zIAm}jz+-DMSES+hxLL%h$74GZrC{i^5S6p~_X`!Qtt?zWQIL7pPYqtyddevJgKFlV z^NtU3Ci{!s*GH#0>;!+F&O6U9EO>bS!e8DM=1Gd_56APS`|U!($%j4x-h&t?-;L4}p?B478hO z=FnU-B+oRej3GIU1|Mx%erg|-kYa+H6UW>4L->~byv zmb^kn??fpszuPWN?h4~hVXDsXxhE+#iH)~hVs~O#SeO*Ukn2>#3HJ0$5@KTH1=OD@ zW;e&Vg^74J%+FQ-nH~tRdt48>gwFmR%lLfO%;UAM79qEK`7IK+Q6aYLNiprqO^C^1zL+%cxpiN}Ynr81y=WDJP*Ljs+v+%hSy-^dZCtr;e z8K&nyNmBGL?ip&KK_2hx*X5QX1b?%>ZfL10(EXG~mziSyeGZ}#c1Y!8VPR=^ zQpBJ31qB9Z))9sqrDnmmTRYm~s~9638n2Q-+yDuUM0B&?dMM|PJpG)niaqh47KjJl z$V@%Qktj9j>d*LaSH$-&{cM`l=-@}AA5Ldy2P;HGWG#FHDgy1(-z=2D1%%lZtG(=B8t2i=xsOy+QWP7ygKBHPr0X^~0)q z1_3s0b1k9cKVMM+OGIU*npG7f`9a*U^SLqSY}KeSv>E^194ik@NztM4p1rA35?FRr zQfI=H)U1UzDoFdFYV?Z@Q&-I73_F1XaT5tj;|S_B#jWapUf9Y>9CLt|Z+1@n z0uN)SgcrX~3iEC%((+LrZvMf}^muO}5QI z3vTnf<`yVyurp{0Jf42~$-&*+Gf1`Y-zo?D4iW8nD&anJuXK%VAK{-6p33iN)^eAh z*SgBPWhig0DP^Sc+&@!_AkIzwXx6?G!d!waODnNhcWd9bv47^;jSN1I7f0&?AVCTP zcU)Z3^mxMt_V?%2_Jn*O18oV7j){%^1&~IUSX{?9i=N#L-*BxjqSCu`*?L~bLTo!k&wJwli))qI-t#+@G3n_&=dK&8e@e)x{yLa|^5?VekF=`B z%K#SsKnL*HMUZkXk481_gK90l;(q8Hqnyq}vL?j+_yKBZc=^qjx+j@?Wi*NkT&Bpt z1{Hb~CPukPAXNWLPlWmi5K+^7ty^bjB+aX7ZShCF7>})D^q9ps&obanl?lbG;52_T zOR9l|ROV*4gm!lKGKl@)Yy523!wrF6koEE1eq1_IqIWS zAAI2B)ecC66?a+{^KIkx5r&hK+c)0@j3jR3evn!Ke9H}1fTR^Nh|4kfIq z6LBA{=I#hqdg(KD_KBfLeo3opUd9WqzP?3G4E4JJpSmIq@Jl_(2R##2UCDUR z#%7$n+Y}7atmrrYNI%uoo41q6jEm`l|C0s_8i6#A>x45_wnnKGs{LI7c9X%?M|k3g z9|^rI_KUvgIrV`75}y5B#dt|H>6|n{I}GXFUcQ@w{A_G&_@Ag$>}dtJ36^kf$8T!p z5e2@PSRkV5u&)M_b{BBtWoYNc-l>UnkPvfM$Pu57xvNW&;BO3b-_J-^<(?5O-_tov zq{=*;q(`^H?GAvVX3aqzjK{`7RmKlpqV_NT8?U1GlUvjE3prZuJ8dVOi~=f>{ZHXm z?BC}m2|ROHTXwA5PIk{4C`vg=rc4=em2WdrwDYTcZs3n&PW<#tb54f0QOY7gm-|QS z)2EpjHzhdrl|Y&Y+@jg90Pfm`v!!pd2yli%eeIbi;&}Gj`Cz@J!G?a$!ZbqS(9d z4*$;U{UDMY8Hp2gqkhV?(r@xr_1=0(>AXvg4%EJHj5>fL<~_&(qM=oX{3<#utZmK( z!)P$@s(Sn-m?!`J62=UhB7N8PRlOVqGUh2<om*WMv$L5qO}v82f(MFMrP#UKQ<=x_p=eGB)yy3qZ&V5T>0yt&0e;(w(lRs zIe4sQhvGNI{xi6(Mq=hJw{In~FSyiMPk#TBed$uZ zQCe0O1Md&nTcm2vWli%dxll}CDDGZ&N$ICxF4FF~KWFYjcWcbe;BCPN*~l+bcQj{i zle2vbEc!x&S|ME;aKUxCAM zbE<+Wx&SU(Z=(7Zg2FmZH28(;nlTHWL)+yD?tmajzV2;!al@A7?;_MdZbG2|wIoHkBy&f=a zYA=NUZx-h6>Dl+x;BvTK^p00FcJdyITF|k^x>4|wchZ{Qu1aMI;L$^R#1=tRiT4HQ zpUm+Td5AFVF}PT#{QjB!RFyUFwyQCl-`U2UQ`ZkPQ*{+@PVAX@T-Pi9d9F}5JiOR( z*`5H2EAngc`=cr9H(oxVipU|ud%@{ZXI~Fr|3*p*Idl%8Dp>Y;rBca9eXA_Z)Ox?%eAY2-^D$w97Cxz&Yc{mGJTEmF=9fkr(dXjU3 zyP}HZ3h^!epyn=xJV}-4B{MV6Mnec>J6XJ6B6HlD{RPaTFu5k_>B7l{2k02X!lEqt z(*EF3h>~Z`3_#7puaTmG-M{G$anpI&t1wFw2eF)94iB77&TQjOhbzr5+(#3C8Zf-} zV!pNM)sivdoD9luLLT#29$JIi;1`4dSWndnw88Y99fU!7mE8SJBUj(t%nZ)87m$Gd za;ywwdj}{7WQ)LV^?`1^x3*%XX1(}6gzMdCvUFB&^cB0UhMw>i3Wt>Qlx{&5coeuo zX7>_!SJR{#PWnX}PUj>1$sT9xyQuf1`*CtK$@QW0Fy2AkxF;-30eUxh`v#x`b?`{C z`I&)%D1^+&tq!CNnOnPgKHWb!fZh`N&50ioz?+q=jXoKpcFVbfUd#X*u{!7DP2muT z*8vL)B>cUw*rH?k`|8jI)Z&nI9zXTwc(CHRT6jdb&HEU`)&H*r82u?0LB_ef*Kuzb zly3`;LXl@PWu~Yl&#qNKF?|Y0ENW(*FS{s-{a)CjSwUG*X11z zMnZVBX$VLefj^AY`rH73>E45K7BDQb0CSuFymY#?qa$bI(ZlqBTNYq$;(7M#?3{M~hq&M1DH+(*j+)zPamq`; zLks_s9FUe~2){{M$?_BDd3F?{yhOsCC<)}fGOXjU@w==J-mIRygJSmYfqxHSktRp0dB!?*yu{jS{cb`nfZbw7ElezF`^X7f_5P-bC0pczWa#O&CrTgqh#f8__AviBFkjgMyqKv z<1~>eIxpSoKf!QiTo^?0E~b3uUBD&EBUhyy2&-g6;|gVT^rU068woG-x{>lGUqeMx z_vwEm1}k?Wj-xqVWNS~vQgD^^m$@O|CrMsw0JYwr^*JO!KSRgZ*-I#M-!Y2u zCky>GYNQ-nkOIh6+CqmKnuNR_UamTc0&}Gl#+|BzC}Z{{QxAj;4YhGma`xyQW|23T zH-K|x;N!y%SUBx5Mg$~)4WQ2)fGa2=jD*NadjIf5!ZI65tj?Z3StST5L_>x*J~wh@ zE-!x#GOdT;w@0)2@cUg<)LiQx(Pzjvjl6C=TTVwlGmw6PVSf^1S|r7*;>5|1QSqp9 zX2%KwdtUxKy-zpZV8aLS^X$Qyq|zg|&3AqIdu_?aFx8pvf|?)bP;`fn%uuI7rl|x$ zWCI^dFdw`ebA@$yZrGLmS+T-Pl}O&u#s>(~2iL!KRy1R?u`l^0BxoCrkw~uhMPD_% zdnrnZgvdaTy047EppM*yw>f*`5uOK(X^n`?*0^nd#_InG~v$C=DEGR@>l6(4cTZG4 zcNJ;~#${y|P5t+RmQPsPTjXx!J@IPO=VRPmW#_oeR78^3o|(#-_~s!#^7KwTydb79BqFPXtB z-_J9-AFa9|3h-?+EhnC|XC-VmvsiK|? z_U@6k`e^+^rj#LZJQQ9ZXRU97jKym5_{%@v3+h~c+n|D6Cc2mS5Yeu<@<7b$*_85< z)&IVfP*70q!IgyfLqz}8jX%03i_Ta@yUop^llb|6^FLXa?7C4riyew&mWf>=vGh)n z6>Ix!U$PubZj-Jg#X+BIszp9UWJ&++;j2lyyv4;ZqOn&Gl{qG1C$r9oLy_zmNzy7* z_Xj`w>(xQ}e-G>{^y(?Ip$=W1U0bPl;CRCqprLp80Z>q>P-JRGjez>Put7A>T2EpU z5_AWSKza&W@^smcW1n~J`m?Ga`AueTKL2k1xbG^d_>MJAYO1arhYuj;QDs54+9mUj zj|K-lUN5UGpa#WwDElZBnO9HlUASKF&4n`kRY7WB^WH>nyKMZ8a<&@SKja(O{dGb6 zQTanivVbJkFR;t)Ubyxg4D@$@GB0>9R~X4$F8uPc{WGg^WQA2fNI=(30=~?K-^ZX7 z{8Be!GxHNw;q*A&0TyV}gXh_1vKgvm($LqD$-jRy2#2ZhqCtF~z4fV<3l{^eI705_ zPhwl^ytv*HD&rbB41(opsXK_&RI;Iiy5Z`tzK4<$5|Kq(!69S^QXoOKynE&YI*n;! zYbE8>3TRa*}l7vJHU=EvV{UFP z6SA1R#m4IQqT*_us?vfS-CS>Kg+EUXorK@DH@HGPF8B8QCpQlcCA{~7OS9ynC?D*! zM869QGo4=`^4!>}R%!c;z3OLuz3C=mg*Aem~ukYU!aw`+?6aw27M=bL!&@KS|!q z?2eI-t6P&Y5wCXut&Yp~-9&b6vtP-^aWOGXB^AbP8%Mv(q-2-~ORM>3hn_Bk{cg6rv_3$&y(gtb1kW@tlGGSH*DV(suneOWvNvuNR$?ym|P9>G^X`wL*qENF49Q z_CH}bSSYKVc{vQ3T)qmubW^TLo7}P-+coXI>M&Rz`PwsLRWN^Wrc-Vv94#RpJ5}$d zd#Z3H_(QlErxdG1mg0X@Jz1}_O#@E%w~_{}DB>l<=OnGXFR_THH{W|=uyt*@9W0Hd z4O&f4=e(#fS5+xoXz%;Z-+*VU@;(ipJAj6$$aV}<-fhvbsmi!qQ&zS>d@wpl=~hBQ zk~GhyEDDWU*jhgskN4@j*)1{B>2q6-$m8Jsw=(NGAQQq13SlLwlU^v*t$0Qk)Vzb; za2Nez28v#Lo+jyV4h}XbF^kXq_8p5)G$mxR&Ih;ae~Ru42D71 z59?t9yBExmzr!cJ~^0*P!_-F3p*0r4A2g@!yM zP|qm(O8MLPGXjWrd;g$ksi(J5w(kkwa--sKJ_Lt0h>M*P5~nl@)AlZv1Y@sFmEa)> zt*G@s$dGGToUEV_2VC~qHk6vl8Nxp8){Rmv7-JuBUkql3MTH4CiK=Fb+VmQORl!kc zGbH)JEHz5eMeRlA8<+rj6GH+7Zv)aAR^!WMeYv8G6srNFf=v1Vr4pUVZk3GA`LHI$ ze#^qIfKt%7wqfGu=*Ch;!8^Gq%^O`%>ihs|FCB$G9BvJhQDlVT&r};rXg+;&vHbYA2b$Pu{@AtT(7;=y6`TcV%4VMDl8) zikXu$3IhBB0C?xwKab@voOug1ILlr#(G6Sspm5HRz9>tbVmn zE>8;>_E44G^IWD~&vib29DBRF&UB)~?e9NxeEqp@JMrBnqGN%+eQUW-aC&jV?rWZg z65~P$H~%*ExX9{MMe=JS@>>aLi!nY%+?vvPK}t#u74OT?MIG;{ec;7C0k{M?V8F=i z@!}sNn3%$T`kenR)G*iMcc3BeS3l?Oos?#Nr1}7dM2$S3{{Mg9TB*nuV?8`?dCwZQ z%#sY$1cTy6K>qQZz{Xrqo=oxj?c%DOAV4H{Ui-rh(7VAFYyS9~v4Onzr7!JwyQ%;Wpq${<-i>-CTQYE*<>3#7g6k@L3=h8-OQ( zZ(1HnEFWrKK936Jg_1}?aMn?zi8MybpOjhE)7l^0U73u8l z#`#Pp+qF`?Y}_sB5yrxAW{%430FuexGZR&w2Q!;~;G-CNNqJ8#V=-btq5!%Lpx2Fh zRdc1C%Hi=aziO6v)b?n(&36;Sn^H%Vf6hR1?>Ir-?!K{ETjB4@QTg^Bt`y|CrSQ3P zl|Lw#%StG<1QT?5008Ia=f_P8Q>R^5*SqM_ zw+Uq+VKl$mnywE8r#&4$)KApo%xvykLi(J5<3?$(A6S6zy{8uZ+t(TvZSF4gE+T@F zp8lrcD3~75=}T|J>Uv zDP-X-J3qg$OH5W_HtqzuKZ@1%QxI>ETab7E!r`Nj;rij~bl^saAz6a^L+x#qNqyF^ zZRyvgnC&|*wVhq#HkT70{OXC^Bh%x46LQsW^4>7=JtMgR8zPbg#SaK0qQ&!)P!YBv zxNLwiw*}jI1=%Xl0X(5JoK^(ju(D*gD9V*>WUovvVfdq__91*`r9d&B`QNW>V-w<4&rU^SZ)tP&!mfBzu-8seBm2 z5NuIS&(RY&R#Q$rXQVvc9gPKZ%r8SJo_WqlVJO?@b8%JC_wt zy(p!zo^crWgVempupU?9x+VciF9r)7j3L0_j>7d>mju*_EN*tsK}r^qCqOs$X?r3# z)4QLQgziwJa@O;0OGa~w2|x>5rUFK^v?n}1=PpW0$_k1$y?DxgFJP=aq#ewv->!>* zYwqKQZxZ~7`A=r+f4}zp6Lzk>I-WcONrPkEUH`!8e75nE-}BqH)0a*mxD?G`=X-T$ zTX0!Tr!w}=eXS3E%;Wb6ciz9|bKkz}Vz`3LWy(*t|72JF73n4fxGx@toR2n?%$bDK zK=6SywhIs}MP)Tc0f9uXUlZ8!PSJF+NLub*majCBp~ikcQlRW^7<$O2{SZFco;jgtO^5h8g{Z|{lj&IbPDS;qhvdh#tLg>B^bKh%ZSbl#WP!0LlFMoX!%6;NQ< zUxltNj?9q)nNz2m5aoqM@3D0>3iGK7xdgI-I38rn!=-)_V72H1OLO9`X!q9{tb<0# z56$2L*2YH=zKkzG9{>xlw$TX6fCY>JFxzIvG9sh_Sb-3u@b)KZbc4X3$&R|--p6JY zb*8Lu=?A>)bi??%x%f|s;^$cFLiV=yR!uFJ_mK7mxf+lr_?%qa4DJ5&uerLzJ%L7y z2l;00krWADN;M1bMLai$1K<{B|Yh zJZGQJ`OcOF1upnHd>e^5y^3xMctbU0Gx=K|Gk1S9oE8=JiNbgny`csO0S17vmx6-{ zT9LW22#`(_C?SsPS`$F4rm}L^w&i{jblfVRZySK7KSss{Uh~gBha`U+4!I#cR0HNT zcF<(gNQ>4DD;fyExB#aXF~p3%SnyuCgMx}QE$DFiMyRZr%ogO(CwK2<#KA!D2^xdNv z3~HcuJA}*f7|_-DtNEe(E-}k%$6BmJd{VZ_zdlLx?AQ^5>;0;pTtI zlOnjxFGGi9)W;1Q|B}Y@{%goZ#W}+G@Bl2YV@1xiZgH!hea3CCVc^jWfS4L3sRD0Q zA@q7{p3gF~sw`El_b9EkYwC;RIGBGdq2d2Ov0?v4m;8Qi4o-ALbZ<280>v8+eOdO& z$ArZ%yN>Acg^GyT2>z1Q={>+mmZ69aO(?N&4sbI3qDn?9)|r#T7gA-_gHDw2Amc2E zGzTs_FW_Q+FZ8`P_j0@wG)7&oYUMO0hX;e8f%a@~bK0kc~At9cqNLa|4B zoswRQgU}P2&Q6e>8hSVC=xOx_WuxlW79k5a)EIL5S?BPSl|UH_P#EAV5rt~?7S)c< z2duWCEK}$s(qs;HTg6FsLxSw9L6V4GDY%T7JAV!8iY9#lr@xMl&J7=|T;8IMr@Wqj z%DO-00OLl?V;g;Cy6o^NNzh#qKb;$~Iuuo&gZ8+zVshKUWeh{D_(6LgBZ*kthUdKi za|BvZIigel09=|mO=Dv*0Fi?MHd*TB#d0NFu&C*`lXhoNLZ@lxmt^8~G13k^lKYX^$g0S+!fW~wGV@1vY zY#*X8*Key+inKZ3CFT@IkQ5jD?{{r5GU{P*a+xZQJ7!6EBp&aGqq|Xut{%_sJ{imJ z?d<%zcXk;xJnHd=S{lXf-cL?uJ#N-grB2Rhg`f_^*>E5s(d{(WeQ$3tG~N)I*C_(! z^=88mYykt6OC6w&Ydgc~JjnsJJ^-I9{f%V!J}JiiMSY9TPIRYNaHwUcU!oJSsJBzE z6ao+t6{1PPWnAgK|KZj3>)xuWKF@w|B0Q%SUFzpe0(^AA#T+K%GGA#ikR^%|2j-Vk zijaU`hfb)LNnXCI`~2BdIoBdIXym>@`gRY_aOzGI6{JhQI+;;>D?!es_m4Flvkqu~z}0?LehXVy zEDKkIlw>SVQ4^P1LTS*r*6^fX3}g<1xFlGdyJttci#c)9*>UBn4hxEvFv6C82gtEK z&>f6_npx{cW0F0c#V^Nf1K+cX}V^LS9y@FCWe(Wn>KE?FuZw=IWu)?DD zZOt7n+ z@g7ry`Nodd0;mLK-jU4HYTe*CzWbuj#e*6fubAuI|6XmB_XPeS|&896nHa+W3%T6Y0wB3(|QX4vLV2^#~ZU6;sjkPB8W! zZ+`5>&oA&5;$JhwPiUdVpF%U=NJ*!kTUk94!gK6>3Y*y07!$QAUd)tj9n6~RufEJ0 zULIM-C$>Gn5b!%*bgehJPJm55UFtqR4Bu{O@o&rH-b1W@KAi2=N~)i^iiAFw|5x=O z6%Y9(K9xvN$`n%)_P*xb9q8W(o83t@xFWPLU>Culi;*){2x70@i)8BV|3pc+oRpXt zB9df-gi-t}*x|@;S%jTp*F2j|O;(5niKGQ8b=bW1dM+ClLI*AglwjpR0#wC)J>E~RK4ew5#7iMYuu0%=y8M&B?+jfgnC zNn8=38Pj4dfcOk&MH^W>jXr(9a@lUUMQxxhA24{kF-+xwx9Bu!AygsfSug#I{fB%Lc zHtMt6Bs?At1_Cna$Zy>PF z2@H>~i#n|`q+#2iuBwYqHw=L|+ubY)*JY6&+GkhFSo$Rf7QbFy2URwCUH{-ZSEdm_ zMZwW%HW7=TyfahUq-GD;7D3-N?bO|Ew# zQp^(3x#rXzhpwU)3&hV#ln_{=Qj806fQ%U(VzJKkReO7^Q9)!5MfL;l$;};q2<}e)6 z&!AnZ_D(7sg~9-iEwIBeOG#zF^gBsN$ROAIE^v4r8h?5z))9`kQae*o^>ml!C&V~t zE$AFxO=z$_jl_?7W$AJWBMKX+%c0V{#~~1X)0_+Q<4tU`>A-s^@fv6bE#V>(wOYX& z6Q*@!Lf*u=IabT@ApCYN1d{w27+UYYi8I~&6ptjuhn!H8S8`9s@PUnf+)MEAZB13s z!jHVVwpQi3qL9jI6g;)f1R9JlJ3rkT_5?FUoVj0wGv8(sikw|5^x9p3vi(|G{~&0v zjJ~?i0Ypm#@5hrCW6%Ylf~1TmOW?jA2l+OR^9@}(!(GWNJhMopoyTtP{?81v@Uj17 zacS^FnBj2c;xfYwzEj=f*X|)z_pRG&eidM{5p6ox&h;l)d9S09 z2HaxTVM(vUa|h@6ly_lu6N*>9y-mmG+n@yBhv))6pE1U5SyE3P_LPSiF14Hj-S?pm z3Y0ddYqrB z5x-%;XtpHd8D9d6>G#-<20kuVt}NZps>$ zP;9r5U)=QD=zoGg)sz)JaFpl!wJKkT44{b!xiGs$xa)a*{tcw2eaJU~7u(q{F! zk$(T_@#9uY^~<_J_#7GOx^j39y2WIQ$Vjj+Mhy|W?)-89u%NCx;zLt$D~+wIVetB0 zu9JAmjW@HqtHnh*%pxbe&yDNk(|ir9j5#VybTf8vsw*K|`-q`EpE9iEq1|`J(k@)I zCN1{Xw0_Du2W*~vdV0=JONL8{9jxJ%`-iGagMF?4+v+!#LM|Z2sQV&-(Tq=v_xS0y zR7_VUEwfR@7h*lLyM0oQ)w9%^#wN20H!N9O4ASeo+9fb!JFq8(X!Kh6Eejsp)KWW% zt=wyW`-#vprGb3O-^`50D8oolMLE|^&ISf8Fl$TSzr0QJ(wa&t2ibJF`y6Ggkxo4E zmT;W=n$SrRIZD?PCN-TZlqLym3iqW}|WC|87U^w^v26V3X8cP=B6T#QY2=D}pgAk+TX+jKIT z6UrYV0^w*577qy4w?w=h-15TkqK}AG-n=3Ais4rbEjIg~#S7_SX6RNEyYlLA@1qZu z$~(f^_a+Yb}V_1)mXGUHBJ2Sv`7i6{r)_Isl0E4E4+jaNtV&m zN0f1LVIw1M?mBDEt7&)vPXd|dV>-2e=HIRQ1QwpZQT_CR_cP{`lpct^tQFRY5V7>p zwihpHa>ERhAZs7fuAX?E5UBH7q*>hlvHd(r|Eb#IsLQVq+!I`}3r2 zto5^o7;#_ReRj8>yz}Ovf|NSS$oHRZ@l{&tPd^g1|c^*B>5EV>0FA zFjS4|5mr;+*>YATej;2B`RiZHUm2E`-a?aiu-vuBe?v1FDc$}kB3fYiXWk*{8nWo{ zuc4PraS2tnlEl(Qg^}wIPJ51lc;?AU?Ie;g`fcukXhzxTL4(Tog&9c%BqSt`svaLo zyYOP)gi;4A5$`;B8nk4E#j_a^L6%y0ki2B$a+%g0y*PtfFQ}iQ-rHf*!P?A211Ahm zb5hI8PT*e@?}n$$O&m@i+Sfl-PGv3X(RFuchB`RpTLB)v32g;Xtio9RroGB1b4>+> z*W}JrW30F<9ZZX{XhZQgqdPNSB6IruqM{*AiHSus8zs*n!J9;e?VDo;4hl{2U(~i9 zu2VYi{SnaRDZWy{!a^x(73g|pt~Zi%8XfH9*k5`!zC-n|bFFTHm>PA#4&QAp+8ZDb zsYup}Oy$xYyz^j291Iz7UFc`3TjZ~0!b%~6#3E*CX=Gv#Pw0C%hG5$#AR_AcvNLOR zzJf<3?b+Ur^ZrqV8B{MzZ(^BV_+dz2o}xjy$T5zDc2Qz#s)nNpC;6eO2qYCqBfgIY2lHP6hA$63AGObhcPQH$kj#jCRzeB&N_KD87oqdn zVb=*fMeQJxi=5qQ!8D^7iUDa_3`fP+;9}jPgATmKOl)Hnev4mI#QN;IS^ex?AN%$d zmOB_~^}G~$$@xqI=0B3_uRCJlAqhz`y!6+$MSS|nF{E1&d-$FFsoDd3d;t^@s9-UuVye7$rN9Db*y2bYU@CHMUZ6spx1iU2 ze)OND+0C;G${YUx>F+XEnM0;xx1t`uJ<23_1%eHj(VjM$1*=WLzy2}+lBe-on z;vwx|P{H?;RE_~AH9)7k?hj3F>wB`R1T6fluK^?MiGng+7*kPE!Ze?>WP z3lNgq7#|Y@umKK*h?APekVe3z1)JZ=E|=Gpo$)WxpN_%?Tj`+1Fsway7~W zIlS1#$<7LBEa|NL6zaD%?I@nT6SJtBHLAB~(n%wj!lTIdP{Fxe=+keb?+I+#)f}XV z^uO$7K?#jW&AkZ-e$pat`6C%xJHq!r#{^!8I9}UEaCCTH;H(sUSm3C%+Nz`|vcc_< zP<>#A|BQmhV$8(m^ob(yJ-bOk*jdZ2zklAO#O5kS4GR7YxR@-tOb;3}>eQxwn*0&l zz}S~e`PV0f6lvN5rH;hWzj26Aj3~-}>$bInR`$4V*=|8Q+Hxq+{t^n|rI}~CqCU%a z<6>!%jT;qiayQ1aGrJ+-*?ToX5fP7=D%Q<3tji?mP~<(zC{7au>`<3Yno#?i+Xt*5 z4(8X!LJqf_&5%^v!%OZc@Ei2h-Xs&6P65Ok6@ndbb*`i3a{CnrhFM#GMaze#s<$NH zhyFNqmSNg8810Fg; zy!*=84*{$4qaIrjEGZ+2F?I-o%;2}d22118dZsh@YN%I3UT~tv>b{MKR|V_}!{aSR zb|X4C>|?ta9c_B@cRQxr?On+Izt>w)d5p#Hq~xAqt(5U67~;-oX-l&W9Ctiu=u?tr z56@rRU@3MYOcSz4SF*IUs+y(VYbdL85nK}?!^^j!wu6j_Gh3`Y#W)N`FfpxN?6$IWQ=?K1D=uOeFQ9q!b*nHlescmu z+C@43!ShZ$)fFkJsbkPGck^7o7u*$>#`9PqOeKYti>qtPh@wDHE-jfnus@XxM%&!*2-oN8Uzq&ZWXTiJ`KEC$kM?a5%`=*wlMc%~YsCnB5Qywtukv|@CL?;aIK$!?SD=j*19Gd>_ zDx8VeDe_uufkyQ`{JB8nmBjI;72xBWqGKv<$zJkXO9pd!}=HZU~NFWf~TV`Kj*`oJ*PAF;jryR>}yo!5fwl*e1s08#Lu zMtuz^C-Y{aZ9+;BzU$948S2$7LL@Kdz)_W7_{AE&>ZVQ)$FpN}jqx-l32~^n=@tWD zadENPjDCD@7<@i{R5kbp+1Rhy~*KSbZO!Akzu|DLfO_BNp^2Wnm(!}P~w(8}S zV#&}D<+bTcUYu&dlq3p5(9#5g4}gr{nw8~E&J&pHxB4OAOZUaMJ-Gj3oik42;G^Yt zGhd`MM84xv-N*lA@BeH?`mI;;!~_W@9V&}DFmPu#wX##X27{4E zGvEZ{A@l_WLt5O`wf}bMiGslUpX}5%oggmYIgr(fzb?x5je<$kAp|y{B5|xt;l^_C zuLCa$a&RCLn2guunx?wX+rRS-x}v%KpoRk37i_}7jZ^$S@Ezr21wnXR@vsC6==ssC znF8{SG3mhXf>LKm?=tjXL=JD@c#DTU{Oy9g!qbBLv031i$NJDaRoHcXwhGc9&-#`R zGUPK(RIoM{bm=n{&cipeHmsOYiqBD<=@WK@!smA$^` z--w<$X#eW#yb}~1;plLCk^8neY6Y3Y_n~XN2I4h3aq&!``;51m2STyND|OMG>9l&j zt*E7^=lK2eqjHlB#MQZWd=8!ryu03iMtdL@Vb1g7ISP}rzrr%oYHkipK~vPr-B9y` zwB)f!B&?n5nS&$-Xi7SeJ6)1enhrc}5RS`_3!>DFukmPyP|az;W4nfNR|aoaD8xQA z^L<9**?6bbJO#I|JO^}l`+fZtPpW4|3T#9FA<0{%<(e<~&Hqfo@96lH|O7zMU zV8H5AU!R=p%kX8_U91>rx_o?2u%-7@t@OymO$3}qPlTPc{yE1Ab7ti`AUOTjLG)fV z)E0UqzrhJ~aEQaowf|m4HcQ+u)w#ten7gCp3Bk`PK}urw&Xprya5)DZAIX?Q#~zby z@0KaT{*#r@Fpn^IDsp(iuSZe8@Cvyrz$Ji{&w$m;EVGH8tnYj4I>8@9(wl8J$|LUF zynA;lnBMbYWzZd$EmM_Oq4dplq3CkHg^WpTC%bXrb`ni`_4UHVi_a*B|noX3I6iQb2?BJqsrLN7>w2;2SYp@ye0Tbb_H=chXpr5F5<%2)aP zjvX1X7T!u`3Y9)WgXQp`@yiaGup=uNNmoGKKM>#`Ot?2BzFTR(#?C8q+~p`%v->Tc zF67zBd%8ylZYjgckZec=6?a%@XV;%-6pjM);$vVYC|_M4g{?R7EsN{k$o0Q1+uTi3 z(uwG>Euu4?T{f7Vu^1*jplOwbJk1xW?6<1HaYY0j>7}0sMf<$lrYmj7Lg<=GiBwXA z)YR&pp27jGJRcu^v@ns=4arZm4VF#B58$P^Gg0<94m{=XW)lz)Jd~GDc=wLF<}cey zT*OCNSWO`N{H#V77J7l6M9M%zAm0OvI32_vcHML{n+Tw|C&r%o@uLzuJ4#)>#LiJVAw}5P z*@;g~99Go+0AQQkk7hhh%Y>EA;j;dZyi2S+pg?*a5G3FCZKe|S4TZKk0LUjE4f*Z1^h~4`0-ZJ@CMF;J zS3q4I1?Y)?lo~yjRF*h!Av09cgp;-rq3LcJ5F{>C~g@RS%7V>{Q_SI*cp`A$0OWpTLME=#~O{rBr3 z?%dj%#MRt5{KyxwNx|JgX&!vh;5F4YJ&g(N%ye#w#1B}1c^@xlN>R`KE>9EWG*^UQ z{B6`ULaRq^H402fnhLN-rJ4C+0&inDjlaf!rx3E~Vtk0$!4=1oI+ckRQpN-r=ukY} zgNQd#g$8AmP)cgA4L=b)IGQpiq9zJktl%>a{~kB`8%xMb;~H~bBpic)UV;6-{x_n^Z~zKGHQN_Vgl&PgNM+<>Z-h_N zUHc~??C#f;x1Vb1GS|r4c#K+KZ|cUiiLW7jlUVst_=9m%a8pa7En^V&R!RE@dy%%z zhhFkGp`oFa7-Zz!6f|S4!y>#nlmMBWEQjqkAzn+<$S5PqdA6Jj1__r9{`K~`s!pT9 zpFVLya;!3N>87in;z2MM$X`*c+$J+TCBojpB#?qx9=)AJg$;u;kAG}jdH6fno;_3? zi{}DBb5?3lZi$2>>ABjZJXk5EPc=O*A(H|!M$;ty_&+|bpn)P&cho&G@9Qb zVm}uF-kyv_JgR$ehX&2+i|w)p6Q?`hQue{#;JUnub^Ld};|=w*OT+~Xx+NZ3>oLk< zwtq56%oJgfk}{iQU*ls;NX}A|+X>zN9ry$>KqAnq+&ibGO4Eu@I0*leC|gGUsx^3g z^!?@JZaf`&w=&WeQscasUG_N8YIvdf&q8e`TI+5P2?5;1o`NgP+Uz?cIGI6w#Vjp; zRYy;v!Wnqv80`0Kq6V$(h_q~7J19mll z`&&(3T8zfN#k&dwjl zzlRCozuhTK(dd{ts3t&^O-N*8bP1+Cncy#Fv;0qf$>=XZ1mkFOTAD6P74AI<2UKNb zG%l#I{{E}2Z$*1KH=IWHp7)F6%zMh0D^r2Lj?T&*uF8cGfLVR}DcY#Ud5VVa$qqbF zTbt3`mwxDbaC=Sk*r&HyTO07%l?0JVz01;#3|P69-jSa|Kh&C?KZA%?Xclxo5q$Lu zUf(46VPQSSQi$lxPSG5Lbgu_w^N?c90>;o>h+~ezF&o?4z*sf^D3p7HR2@QcNUI@_ zDk;ve&SxD`&PuND=N_Cbi@KK+N;Cw zB%pF_aoIsc$0Jk|%dZ?_k-)|8v=glM+6{oGi4;Wm{%oyMC1qs9#>TNmdlP(e<}Y?x zR<{r}W7)psqNSzve)-CtHRelUh1CDK5F0K$9UAC<;^nj0|;%8t;8!+EniEOc~ z#yXYLxkxZ@sUk-03O~&iycXEl*l>k={OA{T%CT`Tho6cf2#A6I+w2T3_2`+iT(Ff~cI1(afqN3I_cFPmJy8eedBJLVH13-+JuJt2@ew!5t;!ZE6=Ff$k ze`Lu=w=9DJ5p}x*;smS0?vwjt($z@BGkoDtn}%6BDVAvT>PB`iGv2Cw0kz4x5dW-TLfc4UUz;wK5Bh z>-6>y@tvw~O7f{#rVZM5kkI5V5Rj}b{{B(ik+_@o>+s@xUxN~;K`URgRgK6!!L7!p zq0xBP`7rge61{biC$LbW_?Cc~Zb~hD3`(8m(JtaN*Ljtv z_=uu4Yl2Tbgmsmn}V@l0%N4HNN2Urp)EbVeSC%w zhy~uDM$0XymQv6rh%dEA>u{zV`ZME97n)Pn`kYCeB@<)}S`}v01GWP9_`FB%p=yKP zdltN8AdG$rQo$15!lI%V;JUwZMG{MvKpS`fxs1aYay_X{Nl))CjC&2%oEBTIZc~e) zhlcjr(a+g;SqkpQ5p(cn44Aa|M}o)--?qB=P-mUJ=);E_A&lAr{E2%mnd=r*j?(n> z!TKDJMnBl#%>Bj2hM4J(Mj_w7v(8)uWgBQLT*@BP=_7>76>>W-6%^$e@ZGoE7{t8_ zO+8?%N6W#&!+YAFx~Qb4#>m4H2l}S-ligv^plF%s44f?T6hdD#os)SeTU^n}iF<#4 zAM(-gT8D?ToBdGjgVE7Z*$jgJdhm5m5#4WUs5#ABVt@t1EiOh z`Zhj)t~4&=JpIiPgMrri{46%R*_V)%hZ4>H;g5S42Tc3|0#$~b{zsE?F&ISfntp)M z1K4&!b+el9ET4;t@ZDMryhcnTQu?S26%`Y;@#p+_7t&`?&Zl>c?zS7{?-+lSMhgve zzdVVv-9^02_f&zV%Z*>2kA9WAFkA+2ZrZ`xY6x`-dfzlR2sX@!H*dNfh2fuojIe}M zBOu`VVA5L-t9YBG1Vx%$n^FTfv6k3*SfpA=%(+aIR7wq|KM7Bw$tvA1`*Ndx)ViI* zslc!9Z)eZc4Rlmg1Z=9`tobdz7?O#)F@nZc=Jcj&fMNTq>uBJI{$$jc3hcRzrMf#k zgtiv{n)BN3`B7{%ok-zHxJU;q71eaU>0itD+(G9gskUdvsSC5JHL3@|!81s$UbAtc z(zsRSS_Mxx9z0JL)ic2*9GxqEFUH*j6c-%4(!m^Y}PnW?XPns zK88e>4A`6!)8m!uoa$J8!~#Dc#gOp}2E|<${$}u&%O)xdVc<~=ROWU>L`7QmuXA03 zZuBn6T@&KK+T$`|(NC=G{86YI7(|5t1jDL3rk=e;w)q^D|L`g(quSQeYr6DLAk0xF z8<%56Y_)ZaKkVOEd8izL&g^j+MLf;iWFeBW-u}$mGu$7*9NvP)Z>Zy z`Vtjg%J6(cBLK0*0%xSq0#0Q*7y7~rcu4sQgB16$-U>gDqLo(YRf6&@gKtG>j@SuE zew}?OrE-p!a*=m=c_9P(!-o%FAwvxuMveR4l1-3vVc5=Daw%jt@(NH>+DrSq&z2bj z5Nl86mIJRDB?PVvitKqy0JW(M6|S&jhvi5|3_ z3=LNfRh<6#s8yHK1NJwN@??zlz#RqFU>D@bZ zIm_Ta#9eEl%NG~B=JTyn(WHSS6*e5V>F$g!>FW^X<_1nBG5>Cr7A(>vk}B0zv!-sp z`#oX`A-5f?-;spwOLCXsGEBp2Oe2~R70>djT_Nusw`6w7+=I7THFJVKaN1iW+06f2+JB)e=M&I5TYO2;nj&WcjDLv^TtsWe-k z1VY5{uhaKws4vLs(_Yy0e>!+^dCK?PMF0EcO*D|A+}aS}7RIfc9ejW%^&RF@>AXu= zAf@j9o=_+7@?3C^m{={q6N0iQ?tz}(;z79f($QXLC>Dy5$s>i#dfecFcD=WkiC~ou_c>c;@8-rBW4eK0jAihq&EShK3&| zBYIsNS#MEJU`eH-o>L&m_G|1$gZtCCp;%!BjhXB_u}JGbGkKm`Q-NGIbCavIZ7?04 zsSBjl*4Boz;C4A5qGT_~d=4bL&>M66dntN;yrrtFj0INS4FDLfPChbQn{{q2u(ei? z!C)}j;CaGJ<_&ZlXh5v()FI!rJ$bK6YIL38J89|cjzS`%y}}RhO5~z=t0oQpk!F&< zy}6!!E#6L1e}sI&H*VDES>{NP%BW*p2XP-b`Z4A!=18J#+HXCqcpK3(2TT{|15+Pi zMVJpE6LB&~fek2*dhGD)RCmcitLK_+%P|Esf~-c{~?zXS_rpEI|`z zSrFc(nmSVT;1;_CG1(o3*LajeAi;w(rP9CGwC8ln*>HF~DH~RWupnsl8n8U}aHPwO z3x08X!SXlbK0f0WG>qYWT&b|?j!%#O2Hov&`~Y-9Pd~Oq z-+(IZ@eDM`dRn*l<-0Sa$myKWE9tx(@(m%Un z9i*)HfT+emF86iNP0nDs?>jGH?SdF$|Dp{IWN-L6QxE+8xxPGK)ab#buHWj;R2u$% z?Y+Dk|D%J0&5D=ndIJ z+rhKT>R~=|_22Wz`IE)O%*;~dh&w=qTrv9MFPe~$fC>`^xTsSqfA!vhhD|Fys0CTz+vSJ$5Xov!#{XObqb)7*W7a^vD-NYuMh%M^|kS@cdy zEi(>|KbeB2eMlUWDQ?*afs~2IWy2r0JIb2hFCUi~fJpq|!-wc7!|MR|y`_ zl9Tt&;jI%A6Eihp-uz@z9Sn%RBWhd0w}wsLeDEUu0yEOuY2?cJ+ws0j@>rC(QBKdA z5yDWItFy6{(>nZx&dO8}w-wOLU)IL&?O{TVkO-5L6?d_(D?^Q^t9fxF`szBE@$7TN zw`qm5;NuH{VGCWuXYUf*E!V20xz7@&zmgf08}^FXs+w`4(Dtk zk$255$(!=KqVfGJ1I#P0bI~;G&|y(Grk1~q-F`rLUV|R``#P4B3s;qr78Zdo?zxOy z9wu{!T{KzAb8IFfoM1f%oypUYoNH3|M#Y*AF0j2s;cb6JQvu;}7SNaH=wWxD@^^y2 z8-8a4E>{N|Z>V6eIV5A;JH5Q!;(;l5idr(85EOE5O6X>%_N{;_J&R`}u=k)q^}5n5 zTch}*@Qwz^P%$%f|GK#nKRMaJbH>lrTAU;R8A z0y~qBDh=DAiRsOrHRrY1{JU$W)QgA7R*I4iov*6OZ8>dGb0LM}lwi(d#^Msp_~9uJlN2$cJTmE%+$Xnh(*QNI$tV)YrE>@3l~%JPv*m z-s^Ew{SZb+olJg5YHtT%AU)ku-2lMEfXq}y@M?KWn3-OF7-^ua<>&bZ73NsJ@~J<{ ziSiAc9`3jDN1odiAmtpkWORS2kM%ms1^|~)$3?P27|@0l&OETS2tfY z!!gjm+mf`oe{*jPTx|#w$;hK|3qz55t_{R!O@8$eRph}rW zjkdq%55T%!OfAlT(t&pd-D!XuEM-=-=E7V|?d`wYw1NLdJ$4ph!-t-CWK7I*aZ&aS zEV%rPi3;f#FiS(`5lOT>12T7D7#M1B1z6CHXa7FHAFJ3_~n*tyo&V(4yRRz&npKp-Zt6xA2 zHn^0ky%wbwvV@`+zc{VQt>0u#+ii%7!iKHoQ||>HdvK7d)P<$+&TMVZ%F$jQN5#lT zZ3a_ZNkWtRf!QJp{tqm^Maf`#!?`kZK5M`%EkaI*4h@z3z}5hN7_v(2eG7RT`jTCe zMs8ylsAzMpJ=7Bs5%0BEy45oH!>Zz9^T4Dt2S!P6Ng85W5Wsz2>wl)zdL5+rHtnHU zrzuS(GM`%T2}egi(-lG3ILeK+&u6~|FCd^0~UiH2$= z^5+@l;bvY9(D5YwPCoxCt#Cj6$T|~!0Hvzy=?Go7W;!aHZ!40>2Lcv-w#(|sB<%1< zM-Mi#y$`V7(1`ZI7X-x-IK_1({*81%+bHw;)rG)|bv5W3^*`@$wiuenOLLm zLwuz~$Sz<|P!jo5@%3xD+jLbY-=t*dg}J%yOiQt8f5+fNvf;4Px9yd~pXv5@&WKU@ z4cG2MUgPB)FPTtS1vpv-vZVYdLfk}8Hb+g>v!kNkbXy9S?*FuFcSTiqF&GfmdAzw%h8EP9;e2kv<=( za$mi&eS`K>PPh7r;tF2ZExFrFzt+;saiuI%jt+fac6RmX8XHFq@8$s}z`s@om(O=S zJAr8^=jg}@_8D+k;siZB z|Lbma_Vo?Iq1OtTam2&Q`fd9NB|!|h+`q+{%8NpcLqi+}kSSC1f8&+un`lsYSxw;D z3JvsuvyGL5aWIqaI>gGWhiWLPS#NzVfDWWth8S_(Y?*(~w=MU+)tlMa_AHEhacjtL zwn-U&uukB@^&0<|nT4+TKX&iz{wq<}Wk%srb^V6y+umAec!5mGB+7efnMk>* zOs6q@;}hQ|7(Bdz|2iQ){uQG#YF85LFCJkYE>+_K)g4#6>5{>1-S2ll%EU%s9(*6? z;7t6YXJB(8O=qBU!$2nXU|gbDl=v?;xGrNR?RAH?COKpHo$U4{$a!^s#FzZI+cr#G@$bigMN7ZeH>r_by7oEIcOoY)O{%^o}96w(#K z0R)L@Z0-jP$Xa(f@W7N=2qfLb4Q&Avj^YhDU6ms ze{8QH?|QJR%=m5Ph#vt*mZ&c}Xbv2XAET-+lx^-BF3|5H9VYM5|ACB)}!FhqJjP*i@EQa&(m%9~H{i|TuKY4kvr&{^q z>0A3y#95m8G|;{we-#L337iOoAEJRn>^=$pI^=`OIY@xQoKRx-rE`qG@?|ILKZZK{w&!|L2+r+j`3_WL1 zc*374=)`Db$VDQswkB0GapO0Gp&tJu1DlT9sh{wknLDsy?gWnfq}Ro}IiXdsV{vJ<IAnZHXCJD!E<66E zqx#V_NBp-Y;eA)gM?JJrC%(;?)>G$1ni6fIU71C%safUUQwZ9<>d}>*6@D&D2s}2h zkD12y+9eVT(1F0ZOb{V+v9(1GBw=WBqx6BWPErrW@Il|vi|u3ga-99VB3252c{pxy zWnXLEt$`yp*bIxE&Ziv;RbFyn8|i@?FZyQkcJwyZ!SJM)*&G! zhLMD|2BJ`178B;NJTwdT0Fu1BtYM6&}QguHbb$3zD&Mv(p32vvGb1CSe&@>zOc@okvwrWHE%~ zTjVJ;-EteQC7eh)artn(8fPu^+3o$3S`qhkp69NQwdI@d>@acO?vq@?%Wljaou%Kb zznj=ux9Hyw?W)q&uT09gZP!F^sU{xw279Qzcg<6v2yx#tTH5zL3Zv$zy8SxqksQ&l zQj@Q3mJWJL&&Do1aImrMKvB{6lk{6pcb_OR9J@z$0BA>HYe4ZgG_ENrZ@)2gn?f)G zq?IVlkzxAHpw5=&i_JNv6j1!3ntr1YTmj-q8Q710);h;AD!XsZ$>-hvpnRI#!3?c| z>CZN(iY|e{8Lzv^A)s%5&23*cPg$8FwK??q8XDHY!J)pqPU5%SsW>FwhNdp(sg4FN z&<#0G7Ny^2Hh8YVrU$cYGv6&6r{6zQt}c%G)+++_Q?R+rPH*x^ASFzZZUVP(FNYYS z4qPR7NWu9C%?ead&d)Cn!zGgrn~SZquv|@A+%xJB2oiA7v*h?DLbD#MyzS=j2TT+v zH6R|fY?aUirreYYEyTFNyFSVnYEdeqLjJ|dmfYLKgu%>)=JA1*b^+NcC+h?DvBcmq7Ij2Wp>(xms!z&nzF(?ye zM`8E%_u0W%&N3zSZX!|S`rzB8LmbOItC};!@6-N%lYt!R90(;J$yX*~x=|49<|PUl z5s#d4^DMvHkLi^s5K@vV%7@*mYm7O+^ShLWIdA#7^UqIM?L5HBDXl@4e?i1j_R~l= z&X?bPnY`hw^-7Cz)%_nEU3eQSZ7YRSD7rd@hygtXHg9IILIyMwO%Khm2>tY!kUx)* zPmhONsfRt5(F7amPU8)WRJ?_fYgPRBxCOSHwXmKWojkb{rPUs|V?#19rS&|IqN&T$ z0+bAZ*$sv7KVGefr)I0lB=Kr^x5u)o1P|<|V*ATDJfViv|57=W$6JJVAM{+i>rF*A z!qkBt0HXwjegiILHMQgiS%R+fkh$e&cR?TeUL9TC0}~y$(G+WNOespwWNvF7TKUSb}Xk5A>mXR=i{o`)zFfbVEy=E=W`A>>(Cj^6Cgvuh-GB{N#?R68MHKu&SZu zv7~}!q#r9?g9PC&^{f}K%ZLo%-iLtOkl&&B@FCzszk2V}dxnQp-sf0bBOb^SB2rF7 zrAIF<5qSHXWejO!?`N|GYCfBcil88Xbq^jGd`yE1<1VYXH)}xmT1nEBraN)_OM7fy zM`quayKRpA{Fxs+D(WMF5m8Q0&qQ&>lPbf?u-OQxvNyuvT69Y)Ck_Vr^b8I_z(a{; zD?^QVGt@deyO&7=;3tY2wS#eH&c{twe<_r5RtdJb@^&i9TbKctONyTifby~m0=rmq z%tAFEn$kg)u*Tq1?~IJ+`^!vFyQ4P8)Ad0B{Rpo))me%XK}^g%CLJ7xdfX$@sevGN z@$nJ0urPp6A}9A-jp`w-wi_?JgK6z{i;b~UU2lijK++l`gph)rfRr-4dVoCtX39UT z@RTn)G8qXE3lKO#3t<(Xa%8c~p3<|2b==aJo8Q{{_j^1u*?c+tNT)x+&<<_%lLx$p8WjR z3ikWgiZtI9x+ZlGr0-KWH}MfEWA^p+Z%tP*+=I)tvyu|@stw!QU6NBjTU=oXI~r<6 zuaZDCgR#an0|_AuTLB9L4(K9N3OiYMbl<5_(y*N@nH01iGpsLNy_Azn|5qz5Dmv42 zu|Rb@Q7gI!3(LdbR}}c1o&uhKpmQjokVKH9$;~~L=jSD3@attjE@hy*yC90e3JuS% zCN?VAu>lB1Hdg(B_cqa>nRkV3Z~S05Lk>4l)H3d+^2Wc39iL0S)j5s7t%{55u;2k+ z;;un*+{n@R^b?`ehzN??G=gMw%Ju;^)_QjdUG{!mhw64BemEx(5t*F)qsDIXvko#W z|B>zfqtO7Rn%DUS%?1YjPXys`@>)LfHvHmf#MC6S42)GhJ#StzM(Bk1HpNHE;BxV2 za^26;0P{ME_z!Jvj9!^}Uk_2QW2zgGpzyK!VhE`D)s2luY)I&f*uq@NyV>lE2H}3b23paD+{W81}7C9&1-tyh`p{V@BGZZgAS?Lh`GUPUY`LSg8zF zczk@L`JTV?{d1yYDTVuJrmt;hwaNG@VC=iPg17@kia;1}(b9vcmw(lxGiHLYbaV1Rt=vO!ymW*Xf;sx1!NdWRX@(J&@Z$|72m%a(ama`x%UW)-8|a zVHPKiY)Xa`{xyjTy!;|xxXPkz6 zOfXc33mk#m9zf0r$%(su>z6_^65XIQ<&pWz6hJyGANfYc#r5o-9Jbw}#fRtgQ^?^y z2;zh@W4Ff5*0!Bny0(*~H;#OLkXND)dmdK1c3{ALFKW`>=4Z_ol-nmIO`vgJ@H>$N zD-L+{i9nCnW?Kz`PIpmD?*Gwr7C=?CUl&J3QYi`P5GiRy5Co)4>5v9NT0lujB}BSQ zKw7$9M7m2#8l=0soBQqi&HQKH8Ap79d+#~td7i!ZTE7L2k`AOp0jdHyXb)Ge?zoxs zfXU(XCWbx$rV!BOL~p!0KX6?(F;gwJ5u(6B2gAvm_oLxU=34CNkZv|$ZUWbmY_pW6 z=WWxD9nFwtQ)-~!90vdv?P*G$dX$Y zEBnzKA&x5~(V;+j4u&!$kUlpjCl!z^NRYn6&fs0h#LJS8!}Ap3G3{y8w=^5%iK>K= z7&I+>Nqe5!41enmRH1OEN5guY?tF<(Kshkt#7JLj;Dh`1KHLCkN7T?vof=WyUl$NDUsh27ZZaVLW8G>-|2`+ z$n|*e=O&NM7C$7(Ar}fM4Gjij`G+9!NK;)^!#czszA6n(7I^#g``)?xJqrGkNbXuV zEFT;mz+8or!3VQ=wwQQ}$6Tbs=nmwM+@pFC4?iUq!lmTvGpt7EVseWuvY0mhrv-oq zTwGQ*;FZM_=1*asA|UuOftlvkY(omHxd&TiQMa06dv%5@0(3FPKaY_8{EzBnV}u+P zvIz#Accr)=g4$aU%?Ce!B%OsBw6andz~Z9pMyhul8{F%j0N|>i?F{{=Jkl18zR%s z(SpW%j>gN>ZJQs3z374JhMAR>9xCfT;mcq$UdxiBI0KMB(4hG+SnE_Nwlp^nk44u^ z72ecK2F2E%x3jL)B7`9}ObP?(RD;82`(myk=o;miR1^MoQRSd;pj1qT46RGt)~f2S zH4A2l(mqYAt)<8w%1W{tn`me{pM)_lEDejlAy5_Sr3aKmZLP8ofZz2nannnD=A5X0UDHc;RWQPU@JdAE zKwx@;uc=PL#-_-QpCUaq^(EobY7jUQOFcTAjJOjd-o&dTLjb?)(l-oTToN=NQC7WbHHd1&g6|Ft43bb*PJ{QpprR1E_AfWX z^SVG|AEqmRQc49Sls}Y~#7)5KGLuv@S)x7>;aq*zn*_V7HX#u)R?Rd!e82CsJ`1~U zB@Lt$`i)o;LI3pum`LaZ8R390v9_mPIE$QvC1$|E`5VZb%CCW1C=cQjtn3zM3WaPY zRwhDFACtheCOx&}CD43S#t@Kw_Jn(l5GPwaIy(9rl-BB&h~JYoQIn1WFadw@NpG<@ z5AiJVbuc-f0%*B%HZ|a*1h!Wj+$R#cx=AHkg>(!IQd{FSn)(&#bjsldG!zQ{mGQ}J zP=5UMT!luZZ_7MhR+f{pt}cCdNS&JXY^gXXIHkutp$2OMGJx;24T*w3{QlUy*;zn$ zf|jk-<8+!aNx@&hOTqsNy5t!CL$pXy3_d;;t#*YCn|AguVnFkxJIVzf%#NlzTAl9# zbhO#DEMRZXuJrxpGskOetb^ri&-4FeWMv1EM4y!|SFml|0Jazp$6s}6WhD?i4(O+0 zBP=0yj5V9oOm-=2K>lnz4q>SO3Y ze`jJV$A+r{$4}q7Uuw~Wao-?4OvTqv4{=jIZv7U-FW@N;3=S5$ji-^X4wJ@Y_Ap)By}nRCnf>`=^VWGeR+%!vyCUxWmPl$}c+nsaN@lXJx*bup z07TEXhp}w8WeMOVIPA{o=~g?)Mzp9Puf&#ZmnW5ZEeYE4y^lAt9@o%#;XJ^~B7y-2Vg~ zOJ*`1i~&|>I{7Y!W`ewsV3I=i*;$n0@F&j1-nSD+78|w1B;BXt3JRgHriK5PUAO?6 zVWKw41K}s|xPrfP<9h!%U2>z&?o`h?C_Vk{vAraG@%(C=Z+2tlg&q3jQ*N6=hgGy4 zi~Q<(&6Zi=1*h_Ds4JK~PJCGPRVKY$7bMPKX^f7|+p%~pRZ%@Za55^>YJ)YONGq-{uJM|hUuU;= zQ^Bz>I(A~jE&taqH2}(efWRm&mp#LB1lYP3VAM?LA``!~jZ_Acogu z29DRD`VX4wj`M5DEs$1@fGDxI>;Jt&K4p3h(60XmT2AIo4W_`rB}bi`&5r>4cx3%7 zKPOp9XGy=FHO1rZ-HmIny~bP04W4!RU5aXI(ICbpMy?dJw4%ukpm9gux$q22z{vj( z#~;nUCO{zSs^3@+7chgCI}jGk<$_{uPjIERf{oUkCsVYLbzvC??5S`@rHsO3rW_u8-s>VD*%2`z#0tKTZ4 zh`}uRx6rX@UUmq8{)Ae5?~0cN;yVBGR0NH>|CX3n_6SLV`rP~9EPKyr$E}~?n-mOa zH~N14s@c8!Nv-<6)c_Wq^9kqH7Hunq&+)2S|q z=zU~Xa074xkU`x>Mn>?V_T&y_cmDZ@1PQF34j34{n5Z=J0jcm>zYdIPAp%Nrte#pl z?ysKtNFLEK`S2>t5Vj`j5+O!H4FW_EcRr+pMWD@)6V&vf(4#V{D6<-44-AseYt05& zHLw<3B=|Ky7BdADuUnp}Nl5T-PgGxm_65bNgUz)J`$_!$yHUG229RzWpJ>A7MUDq-8mCq1CxWa0-T~= zNe3QZB%abIT%9v`ANdB zj0j15w!mJTGhE+mngQnqFn<}9P5k-WxdL0qmcnev>}uTVB8MLD3H|E?jU@p>zA3vq z`I^N=x-K#gIvUIe#*q2hAo~cGdIYi{V@S;O_4A7;HN)+WV_Sr*OByhf;CDE~18k4c?u>`ro8{we178YDIOM&AW*QjgKyU2}PB1vC z!a^JnEaYagB7AojqB?}#DYPc1z97dZ8xYw8JB05+aD-FU&38^C$mKuWF{}aX+);=Fpv+OA|*b5Tx#t z!zZmm@AJje<1+84J z2F?q>?a9NZ_Z$kr)xH$XpvoO2!7k`IizwXAlsTHkRKVARf*2g>^!|c(84QfSM9~Kr zcZ3aX$J+R6^~hP*yhpXq&kgu@u%Pdf|3mC>aUY4!& znkPq!!F&D|67U#|1E7?le=n`wZ_h4T_Oll<(8{l>{=H#0)mwrs(^^p;o59-hqPXw5 zg!nzxga(8A<9WAn!w0K*K?#dg1M<7=uv%CMYE^h9v*bjQh0;FcH^hUtvJC6>9As(_ zl~a?Gi$e<;$%_U_i^;0)LgVhGR+($atNp@X2H7@rl!6Y!f@Qp4hd^Gfs;m17g1q46 z4La*!CWw1)PdY2X*IXn}r)wND!1YT1n70FPr1q=qNb_W@iXj)LXIUpm{jZUo3s~ae z%+V$3qgl|pnXANNOujB+h$7`9`d)F;(?XUD6y>VB87Mpk{hwKPBL)d-$U+%O>Ctc= zeh#OHpBJP^Tas&eO~z;0`bRkfnwV@5DF)9~P)xo?*Xb|QYsl64lcVem`XKU;>?v0)kDNxPRne-s2Dmb)M4u3&K?p-`;1>-rP8NBzv?wOC8nO3bgGej^aA# zMcX!b9&L<7xjJq}K-vDu<~kGShPUzX&jv4kJ}#KBjZLUhQW^L*MxEh{XX5U9F^P2mNKrDRlJ|) z^`-sB5_4z@f6<9of8!U2wSN)NIY(+D;9&KT-!4^mZEuY$lkx+wo)+wWCwz%^8 zA-xY3b+hWt?!N|0mjUXK2CVcw#J}SQ7bSOi|7!Z47(^}INT2oZTRB)Tr>0urcpbY& z{wlf<`GQ;JBQ_?cR-LHSNdXN0$F00eyK*+{2JCNAds3(j0>%|46?=nq3$HERd2hVP zsNHon`V4q)WMm41CHaevag44cC7G`@XmBM5_f|9)^Y=&2EUyrkXNGdp;Uk=c z)Hy^<>)$vWR*Qsr1^@7CU~+JNfA!NMvgqV%<`cX-DvxIzWaB7;iv%3|1=HL72N^cl z3@vY~yc{1`@Kd|Hy>Psc^}koS+icHPOXwZvP%cfM_c6vi$o}h`_}TAFmZ+J>F%x=& zCMDfi3hG4vBv1--Zewp+!l=9Tn#aY$F6KCw|v{+6466EL>Q^!X3 zwy(_(PeE>f6L3zd3BTkQTZf%1d00VOuHC(~&A-z#Td@*c75 zHKCe$&+BSSS8GG`q0M`)ZbO$ZnJFBysxo}2#mN*N{MzqcXg0}z&-*oDSNXq>v%!yq zB_gb!U*y-jXSdTd1!{M`V{YIzB?-OWewwAAK8M4jp763L<%R!ZeY}R_NU2}glgC6QwzePn(^EyJ+^xL7 z;F+WWMly+HouL>z3!8pEKW2!!QpXQEb#|8=hC-{4uhb@E=pD8=#PTRY9NAvL?TEa~ z5ngxuQra?%aEHk&>C?Q5Gxk%1!e2z>?TNXeTNDvSDHx$p9;g3MQBCFP!Avte>hkNS zf1=TjuBEAV8X4};c7;0DX!TITn_e4Hu?J(1{EzUFsY8tGfuc z7cu*m%FJo6MaNudGYtt+*(qM4>Md)wq1)2M6p@q%R`+`-ILEAs;nI%~FxX>^FMLsdvrA{ z`WSj`(=>%7>Wc#lRXDXyZ*R)>;iP8_E#F_3<5={6`uei%EzPu0Vgh%JJA~W+C_jn_ zcY#Lyn?kjjn+8r0_Ck8N;nm`zs5i$-pIPTZ8zN(y{9b>mr?|>QX87=nfSf-}+$Sb> z0Mz9!`_I88qq7wz)kvN(oTUbNxc}9AkT1X6LF}Jzx~ghILg}sBo>~{!$c#lapZOFj zFKEg_*wtP6IQHcP**-%~G6Y^2ed*cdF6`^?TE9(3TULyX-E8rh4V4;8gOKbBQR+PN zMOl6Q5|Jbr)%l7na{x2#5!=f(UK&YFQ42Z@xl$6Pt+|anChr35bUG%MK~$o3bv$pm zmll|KGGY%hvqWVQI-?IS7ynLGITH{nomSKs94J2R;n{t@D|DK4_T!9> zPjO-uOHE;&1|=`_(eX6%iL0JJr+DEIp3E#*rKV)q=obFVdcZrgNLE6^*Pclz8Ki4~ z`^X5A%W-oM3ql3shMa_7AjOI24hTWs4OSqLlB0#>5Sc(PB$Nyy2XHJ-&0|%`nC!B;=4Dg$~)=_ZTv!jN8JuNJ& zK?kHu$ofp7AWXMRD22Ms%uL@FX`Bc6 zRHAj&j@?=GXi)b;^Nn0Uijjd~u*k#ht?^qKLA%e3(qYIh!lMYLKryjH-;eLAr)|J# z&UxZRBH*Jm7RS`6$=sbg&x%X^7Tf5YuNyybPDS)q%U7H%sIgm4*FNxo%i*?sgKjfS z&#$sOH3|Kwov&oxm_LMak5+_Zx4L~+pp+Mxo(JyFg}T$(2GgsUcDO{#`5X=O|16u7 zH%yj1P9vfc53@HBXg6pxyHwRuwV`ZJN~pg*9X^s}JD|PY$LV1}D70xvTDy-^eBSNb z$m&;MmZVu zEGVA4``VlJkq+|s`hvTNK!?Hmb7bU#2)&2^OQfcOTgX%`#SMf|BO)%a@88dQmmiBk zyVsNOV6Xc6{2;|^=Ps4k)dz1MA0TzXNNnneAFd&^$irbmO8SN7&)a!WmZrmaVWPVL zo`C}6qn+s>Z*T7gH7^*nL3_;`uRy2augsV$zE2ytp}AeDivAw(Ut^ z>L;<=;6n5L>Zh!x68!S`mBRO@m_QQ;mzAF}CeN8=5plft)kA?IgKYy3ng;D=gPXJmN|uoU*}% zf1Rq;MgFV($~~DT-~+U(DhdZkUoe06W9QrGpcGt#Alqy7slQ3%Wg|f)-GzU}mHENS z7edCXvPz4@u$!zCxE^n1C36JQ$ta4v>IkLeN_=H0uF3off=WJZmBJI)JK@fuUoEk# zF9d7{ewy6b)LZ1w3OW|K2sym*kfmH@B|i-1Y{L8@GvN#L0RvNM$}~9`*og`4HrXm5 ztMv<>w*YuaJ*O|Y(c)0CI1GM5km8%dXK1sIBvG?2;+k9R_YTTJDYK0IL%~n=8Dgd$ zs+9Zf*aOug)uYDgGa!{pU>yLczxP`eyPB!^S6JiY&jDGA5!DH8Shmq(1Kap)Mb8hi zTtLWgS#~1lc%wU%xa-|ueB+wcL>;~3nt?B7mBxB+p6Cwx2O9mjNA8X;X)WaT!Pg|d`E7>=Um)ln zLU}Bqd;%EV?UTs2^kUng1ubUpOlkdE7h@qEU3x5aYhqb5PZyN!U+w0b@9KX#A3?}m zCkuO!g?0Ggc|+41v3$^L`T!!ySRv2{%&!E*#O*q^uhT4s7{Hd2G%d1=b_-@KnI8T) z(S#(2|BShfA?#;YVwBm(phrnn!w*(wanCy=%L>9jL7#zP3v(bAIsH1D^%KA80cWVvZb zDSLs&0%$0}!2fx!lQD&!hbL0lp;FN6(5f}(%4iRka;3Lb^hgx0y9|#==$tih zMNVwsXrZv!&bQyT&^EQ0Y+P=s4z5{7PG+pW7niJHkz6Y6pjtecv|Rob%DK!6%TLU^OKaEAXS5uwe~PCvM5! zUt({PkXJ0II>>^zGU%!zoPy9qFT04@0=AJ$Oo=%-zDt%o?bbyd8(lvFK3cfL1ZAY;#j}P*zq(%5_C0 zCMGT`;?nOHS5?1%DNE={J^RyCYHlMLBn*EsAG4(a=gq97dFSAIhD{LX1{*? z|IcBAel4qy^jKf#w=r^RsalcOZ_xgTl5v|Bb9#Omr{3>hwj}*7`DyzRZMjm=O!vHE z{veT{Wk|8RkZ5gkSl!&ee-EKe3~o%K1TKIu;hJ zuxQK7HkYuv% zyV|l2EAe(2G^KAot+i7#8HC|(2_!^CZ6Bw4w-}+PrAGVAf=0AEHWx8Xma~mu)$fpMeP~THBex{ z+@0*D6rKPUMsFXF#TvDun^0;&xG|fSKf(bmB!xgA)rt|u?D&CV(D&lqoXu_kGt$sp zr&M!d)T|-ciybzmWDi{-a>Gm7^~DRIWdLy%tIcE~?m>eR#B?Cf(e zo8%_l0x{je?-?AT0kDq_%ks#m#6^$ZJInImeuJ;90{MAjwreP*&62S@@B1J?% zKw(wx{%bgY@z~C+zVdL(`K%D&HI{b($7z84QY$OJCpCok!rCdSOi0YG{Rdc)%DyzPAV8$zK$aXatm5=y2ao3wyng4n6(L^8gWvr^*;o@%?B+aRjUM3fvAsT;hX z_QiaF?0C=ACOo$n7E=a>hm#}*^kQ#eV)j0djL0qQn@Z9>zI>{fe(_-Iy8b=9(&L)l zBT{Ub3u_d-L3S#k+H$_tbh5N`)Qa_ej85-8;t)vz z1#EIuLlP=8l7X-}Ry_8~dREY(YK|L;4TvC>T061Vi|$QXPR&>w{@8FFDqRSg|`)gOqK4r!S0e6sNTEv zmkKfS`)mLByQ#gDl<-zkY2+wz!0hfE7WdVej9zqwCOti)v#&d2c%4GD6kz?<-Xfq6Nv(AXc^$~C{xO@5@=eq6@xbK`4@BoOQfqc#SS;&6^uR0uiWe{QG9 zx*(o=nT=PsXwwl;6z`BLKfvXo!Mq?Ng$C!b<;3M~-YO6yueJt6AvsGh04cE=QveQc z@62?KR*fmIO@(~EaX?7Oa>~%m3aC3M_+7$-ACXcBx*|C@pu%&3oX*ako>W-zAs+NU z=RT@xhn?2wT-ASIfzqj;3~W)pt@7FeJf=@!MgiC>eLz;qT3WKysHngIPQOl8WIogM z{M^3#SSn1IrrfQM3dkKL;{}?nyOR~T%#TkZ33S=xL=iKsrjQ=lHGkybv^(7m&&0n5 zFA+B<#H(*?gqUxL4owI5TYP!M^w zJ5yJ_9~T=N84=MmQ|Feeoy7tNZ?jpz%1u|qons4w(D~tt2#j?g@2j=mvuSaE&Orc9J$30fNU@%_HW{F&GGMSYR^;{CtG=R9=T$xiw3#s) zH%6?&b7ocnf&dytIs_vL# zj0es*Yl;_X4uiOq3rZKX>Beszet3w6Z1Cm)%{mYTSEnUinw&#wW6Sp{0j@>mVySS|AAp^i8XQ7g0925b8wZi13sOe)wVfVz$N^_YV5K zRA{X)nHco1S%gF|d{nzU7H$z%d~2Is6bl`{`{P7yHlu z`YL+ufoo&{Na-p=gYgZ9v?yUBCl6tX6{ia$g4k{lxJC@DI^RJU1h2B}FF5c0 z$#2YPi7O;!#^va0Q62rt`Jhq3d;YW3BJQ&T1~PKl^KoU)IS&ycI=XMLAYD?|cbJxg zgb5Rio6Gd*8gjcL!6pZ_i|KtMaOe7mV4#cHU%JPC!idGwTVQW}91p0A7YR z%X*2H#suf)@83U^m-zWwKL6O%)C3dK0J4$%MTcsBulW}P1Ilj-X?^|tX(Rqw)2iht zMZl(pnm-~12cJ`sv(K;|EPQY?S85Q3C3hYm(|{#l0$zi6$DPUF9@6?YRBSrs>0)g% zAsJM6j9(ojQzBOs7mN9AfE&F7uO-p zr6?~?cZA}3H}Cjzmh!wq&sIqJ3C85swh>UE#SElGM>4C<(9xyeZj+SM95%>rm0*$3 zKxIY_^U-lBYZB2RnbL z*3HZY6JXkNA{MnX)g=Q=4xv3LIaC2U$ez^jS?TI*kx?PMKm9S#|C{FRK<@flMg{|l zk#|krcOi=8M|1jHT^kar3h7N#N@DyCj}G^$RI!djws@gS@trL%Myd+`=4P=e8o8N$TpB&$y=9!8dQV0PpiS(jNt6@Z&JjwXY z^{Ldge9N&$%ehLKyTB%VFmNECkNkR{Eb@CbqU+$!=5p<>E{r?X^VH@+#+&|OF_Hgp zlk|t%&E4%AQc^%3E8vilO5=&`wUNk|xSUqQl~V?Y6bcOeL6JQV6nYHWH&<@w`W@h>gp%$4fl#c-X9qLq52}6oY3BjQP1Nhs^=$ z*x4xRQ-Z4XYlade7!c0KbDWhBOqu7nR4azi6>;W6NMH1S7R|`3cTb0oKN*fi*&kS z%d2$ZGO!9&&yaeJ#cp6aR+MZ>Z!zO8)-0w~<^K43XH6QW?Dx)x?r)4SLe4nA;r_Fk zcGX}-e4V;--&VT66be?NrSsJvjC_o7H`lvIMfmGxo=2|KI=^4dwUID}$0c?oUtb!l zpt4u!)Hz_gSRLp%?EYv44qU)D3^QPB7wA?+HaB;FW%WVs!wCtJ8#mX;PYz_x6XaYc zL@`*llxfO0G3~{Q(pN%P(5=fM)JdTBoKnWozqT06WJcERDy~eGx-t$X^!AYOb+SG2 z0cQL3kL--&)X&;>O zKA%#0TP({K?4m!OY>`^36^B0mu|eR-`_*mRG~0eQ#&dO!99 zINl#%G;B49p!V?O+e2Wxe0oPqQ*R)tl@;5t{}ysE4Z@ zsoLdvTjO}F%cA?|Yx4aL^Q-&67A!{>uBzX2besA#Q^ueqMJp3rOholo&4 z9T?CvW4kj+KkB#iy)ET@Kk-y=CNF{}6(ZQ7--k`Yx)4yRXnw$=p4XZs4?zXMMm0X! z!bQ>{kJh*dby?=hKBZVXXBPClVw*Tzs{8d7J_Q!3_=@@8DXVBcQWH$H+J4 zwLhwE_BWnO)y#HNZ{+6OtKuED{}8S*=I;{I-iaq0yK-kQf1+p3ozz!wz1vWO4gx8! zCE(&xR6+{(FkUe<1sGet?y@Sbm-55HfA!-<0z83(bZO5A2DrXDByUC_+!i60H}pb* zN$O`H9D2A68)+;@^8C7&>FDWEeN(OC*xjKw3)@p5%~8&HQ{%)Im=4w>+PB6dRm4O@`(WF{w=U&exk1OcU(V(B*P6}$ z>1Fd>=&IM=P#o*o^rzw*xR=HQCWnr&2_$N-@`>4$U#)OoTwWc1LV4p;2pfx3J3jOq zEdG_?8?fpJk6Vwf66wd?luv2vO=O4F>|LE>J8boTnSUe&ABrVQo5n)MbNf9PwLT=b zw@Ayg_=iveuiJ0pJPqHuZHuA3%Bcw>qPJ!H-2o3!ULdCIAt60-_qTh;0Z}&+PS!MP zcx+Egn*@IM&)F%S-=7suh64;caCz2FK4k_KT?Yl@{{8u$1cO_=eg%4$CvdHC-Rrij zY^silS&6a1YfNJwuEDzQ5X}vbDB&##N8yARifTG3SINiRMTe1^KGyoBl@Jed$zt|KcF|2Lh3 z7-{q}JRq(H*GE#80^92n!0v#mFd6d!BfK?X$W2)mnX6mJQ1JJug2xF<1#>}<4 z--L^nb>97jr}SElH@rjWd8l_#oLcQruA~q8EN?veP6Ks?v(Pjot0OTg&=SG)Z}}2E z_c7LzrcMyQ_d&B)ccH#`~Q_Y~){c!_&?uxD|pDmxvw;h}hE zPv6!G4yb?r*27%o&hzlt_CbyYa4`J<<_)mBpTiRIw-xij2l>u#_$}i{+YHU(EwZxX z$PpPt5!?a;i|J7N>*2tlmUV~7xI`pY`r7s4=>z@mpVQyPMvtVl7_AmE+wNc4_mvvnxc^OQnA=5nh`B&y z^zEYEAed&HgJNB^kDe?ZFEOiZ{88;{vdYyFU~v*r4*hX1Zz#2?@$k>x;3oN0(#O?R zR&`OMw;la+nPZfOr+A?Zbc66%f$9}KtJc9_x(8axU0>w`N@2DO|5 zR|eDwn~=B(1*hK=ZsKxiIApDrjP1D==nQuyF*w??Ou*3BD$%jG)003Uob=Z($=a9D zCH%$tQWUr=x*Byjj$$+E9&)+!LvLlZs0NM8^2nTw*ILHK1(jLkDu*&yg~WhQ%^G!S z^g{TjjHrSQj!JWQjlUAte{{QWaHSA%fd`CY`-_>02`AufY^rndVQ$c_Z@GFb8`Y&G zBS3*86+#svaWJZ53yMqmm*EL9FzEuIe{3oZPnLqesarjlMdXU?1(4?wUVP$Sq2ZaI zH-eZ|WSdf2ZfDJz5xloZtlY0V7A9`T*(r(j9l>eJAx$&tt6m#N%mR*EnMC}E*qg4F z%gREJxW90Yf|h+*go<7nhztkn#o17ZbnA6xz!mY3(;NjPdd7dZo(EH2q7rZ>I&N+W zEE#N{l{35)xM~vjuk|ZbUmLD^04P4-0KBi5Z5Pq^h@lDcYw-`uIo!+2!$@PCA5z}W z^-6*6AA=-pKY}dBWqA6vUIl+u7(qkb1bfof4>t_VQmzjR5+G22<}gCDM^uV^vNPTb zqG@Kw*z{CG4)(8sL;<0p3m-*A$IsD;`|qwPGI z@1Waj_4?G5`~8NRH!CsxhrF$!i71zxHOA@%+Vx;6-pG;?I~ZGadYsO53+`S;fx>lf zuO-aVt#o=`DX2e%Li)o8F5DMqD6-SlxmIc<9!}AD#t#w0s?aCqH0`;Du4U&X+f8(; z%Is=;SceUV5p0&0SP9M{GGWsBW)A#vp!&OCT3YIUv7Vi|^{Q#+cZ%V+<3Z`_)eC&3fWr4a>C>xI6@SW|EsSWs7;{+eo<3?;4eBpF0gPAPs{p zg3`3~3cF|e?jLSvW#R19o$4d85+e3*z`KP8YCsU9Lfc0c)DoXA*7qf(G>~ABDTkrg zXk>byp*I~ELOyDRmjC1D;IhL3(x*qFi9|i-q1E9{d?F<#{wgg8J?mSrcODMP zKfo8858<21l#lxh)QC_iQ|hmNZ6ziq77!emsTE}!?6qRS($;>^Mq+@31H7cb-vQzhno}7&;De_xg;ki9I{fve|SDjV?w4eKw*GSMTK)4`I$Wa z*L;3R!oSOclAYaasjGUBedi$gVv)y*=QpIqXKF{nB3^+iQb>oC||p|!AVbTWrx;z$ji_R zJb=;)HqRDQ$ohVUB;qxK{o-GRb20Jx0M@zRtUm(gV7K~9j2>zKGXIjELzr;5;#Gug zkZU|_fa)O?EL@>+$`pUO-_bgj*9K>z%57UONDUN*+1&mX>-9P3dzd@AgY^J;m`$GZSzfGo4(AFyNl z$loPN;5VX!1dpK{+hVhuz|MFbFQJPlmBD%sH{lx>_CUNBSASZdgWLSM!MzG4>}Wl&LtFD0 z#r!CLjK?eLG=1M}eYP(-4c8yr*1-lXAt3=y{@x6b3K1LI4zHyZAoGDQ!#(+@Iagu4 z3PM<5HsdumOJbjYTvvfWq26#>XK3DClPOC&I1qwOC{@f4>y?`u890T8i&W7w;hxR+ zcue6$_vjHcd)Ppev1Blm4Wl9X%goC+y(j^ua6XP!V@Q@0 zo7d&hxuAl~UMS zChfaw;$~w-DZhf^EQ9k~c~$tl@Rh(cs#i~Sos0N+rmA)$aXs5O+Zg9RcDljIKjDDW zd$2#)_gNN2pK(?b_w5$r!~UhI%eFvU|7cehx%q4YLQl>7`u`*-ON#ysG4_O!+~^;u zES&5|xFu4aY);~a6B6s9(Xba0-)3?Y7^!`~%P>%`qnUiFLuS)N5O!+0xg37#Bu(@Z zR4Z!5(cOOII!74&0QjYp{k9jTXFGv9Tyln`l*~>7^ZhKhu$|h&K4&J!Sd}alB4T#k zKmA7HqB)<&_RFmvP#YvplrQA!3COwWU1dVrtgM~w>`M9 zDpplfkx^@t5sp8;6((|1DSC6dawxZB6;S-GlR-eAUhHu^5KVNT- zLbz3l%RUJ`Bcm~Q0y3eQidh}40D-$G{SqwhK(qU`+y5gx9B;HhtEDG_2VacSdTlz- z?9^NXWu}t;=zxtF+ce5GSCiQ*i|_H{SMu}t>01{ABtfF0*fMHYh>V7p zweLR#$~;54xVnq;~WQg}?N)sn_01PK36S`uao`&*jPXVphO|Te7EA)1qSf zKQ5}0rmXWc@}(fnH81J{d1y#`cA6+&U@=@JzEbYNys@C zq!<^+b;&&i_Z*8+VAvNN9Ud2#?^+o#!SC_6%*P)mVuRPj)nRwXku#Z=n>!F77WNCR zm{hK7v?#^5(*M&>Gfa&?VVeYBPZ><|P`xv4rptL+Z7PrrgH$F4(>f7s`|w;=eK~cn z(=buxo}TZ#-8#d&jFK2wSSmw7zL_OCGQ7gIX9kMNgS@}@vve^isla;Fbvlbc5gRO3 z?chF8x6Rxg#Q(;T-yq^mGElX+iqmepW%%N^!T>zd}4a|j~{;%ZR=&r7+%No*;0w&Bb5d+VVN+A3?>(h zEnRHqKI#0hv0G|P%ICzo*dA&;Rrvu)cEqgIWBV)S{;+Vb=!_Dh1zL2D-Xiz03qR0=JWID6+DU zr?$B{L+C}zeEi7WIr%%cAu4LT%sK3PY+6)tM_85DJ%+M?y2}&$suDcf;dXDB0Ktio z)42WK_otRUi2x1W1N9Vb?LEOU0k1Eq8oCtai&j=uf5tKzznWnlnzLE}wDRrUS>Q4}Pe>kL*qT7}%od2#M~t=D%J;SE?M zn6t_~prq(Qx>G8=thxx%{-*_?2I(ZUkZ(oaqi!=tnK8(ii!%TuW;b~Pz&-^uI_M9Ezc84 zHf84AL*ky&;o|*&beWcP_n%_5Tmqzua7g~nvB3{_nshN#8dwKf-eRsCabyqo>L|N_ zq+(c1rzZJ3?{Ls1V~U4Z_+y#LJAShBAfIe9Enl$XM~+T0nn2?k+(wY8^4x8`C&g1c z3&)9{qIE13oi_e(fFh*)UaHm*0}=@FDW&f)dqPGpTaR)wYWVIVERO07WgeG~tQ-Wi z!%}{MoF5K*`G#JkJx|a|fK0V88hJc0i(#Zi+Px6Ev9L8N^+_a~_Ql(SSWxnWw%c?L zm~7f8ksxq3#>8Q8^kTXFrLzEe5(VNW2z#Gejow4;akn*XEE|13@_DrI!L3?m!J7u> z^}(2+ICgH2b_buUm+mSmd{|iQ`+d$cTuUmK>8FCuxnJWRGrh?QIbLv^a_6}%3f^{v z-u=eJR}7$EyhnhONr@4T3p!_rR+6TpYwm7~%NAKY^Ar9sU$O71m>2%_m&i3~qT^F+ zfV5;Jl9j^J^82x^F~AS3Cu3h7oNTj5zW?lws!3n}d~rKu7=Ft8E9YrP5+5|2BZ9SW z-1yp$c#>J2Z`vf=Xw>B{^1mb0*ZK&o(_mgoOH1oje`7mmJLmPFxOjOl+c@U6b+~|W zhI?&;uQ*wIw%MB&xanhUdIMqXTH*jYWFJ->TN#|ooUb0_1sr?o&>_3no;n44aCOHfk zU~R1i${@5^Y4Da-mOnkem}SUybB?TLjEEi%*_p16iXKje92fWkoE{=4e#u=Znw#ZD z!Ab5;WI<2C=ks$u$&{Nc)7KZQI&gWBNN($ik^o3IK(bvS!KLD6(FG#JexjROo7_bBA}+pL`NnZzOR^;x<kI zL37Az=9%rxE2u<nEZezH0yJLS{FWtG)XO4kpwH2A-f9~JDxc(;Qdzq!-_3#`nykiypOifM)N zJauHyEOhG)f#Tv4j_gpdlqD*722B42EKo=6?(e;e&Em@;!P-NEi)9rCg?JgzRRW|O zIoB@Gs|n3h&pUw1Fe*7JBt0jWznNUmp+-nTTJmt+ECmt??nCRQhNC)4*=mpJ;AT_~z~umc2q&NrBn}6Lk+2peoe)R+OZ)PGe;$@(g)F%k z^YQ=g29dDcMosY&rUqM1rwKecH(64u(>8LHSx_7816BnJTXAllu>U@leGfx42XXx8 z(5XV8^H`-qLGL@SZ952LN++yM-ccHZqch$&o$R4lNLSpyZFpR`&kHM%!9Xy)F}bcj z))u`%HWfFY6@sP$;@^0eK@ZXn%~;r@A#8HM-CsRl9TXdcy47mWCRVo#ABkq=YP^Cj zTk0r18lf-N9N=#rK``^iNIuu%G^rY-FhhHY8#;uU(%jGo5|xn(BWnwc$u}QUnBE0a zgxtYzljdeR*R1 zw-^x#yW(K6tq2nHA56Ntus^MCGWYWVr)S}_E`Ef8!u4$#c)b$fw?)RQWJrbm)Tt;M zD{O#E1b8Uuxl~vw1d7NbU>3LNqJ^U$9re@YE{I4OQFb>&@;bVoEJQ5-L-~F`EsEnh zLCv&qPh#z%LFPbnM72Jx*eToBW+H<>|J|c1Y#IVD>BvDV#MF>JKM=OPDUp+`L5`WG z1ik&K+%ZtWZ_l;$BaKz>Ki|C)PPt?SYlf(_eJp(i$~5)W{1)urZ_Z417ynl@#*i;`Y}#chIwy2q(%dy1`>p`zbwg&X63|Op~oeMVG|Z zkbp7`ZHxw_h865>Srg9&}VYAzxOlgU;*BNm(hmDzSZt`9Df@!JDl@}-b!Ft4- z_f5bsKuW&v%n(@5XT>4lT=r@O$SNJ{4u;Qkh82baF~g)GJ&6D$RbAo`Nr(g<KU*Gb3u1l4 ziF|sM0jLZ*z^@&P3{UBOudaeDhpIpCYvvK=uROqRDluw_=iB9(zS!#?E1umXZNO~+ zv0~-1t(UP`mje6Bsxb~X4-A8D_BZmO?K!}daJ;D~I=CAuxaQnJDHE31`s_Xbagghr zupQzKQWNQVya_U9mMJP+7z!=L>RbSZX^}(cunPhbIzZ}$40%IVdp~zq%-mXCZuW_q~)xt-iAuWjV1! z74f+MUO_Q<#TnqoI2{kb7kebuK1yF&6xu+r3o*EO*G+5k{zOpBg z+%_j~_ zu{-k^L#JDSRq4-9cDOvFgSoC$0ycl%HW@S0vEfKkX zQn%)I1c*m=VejI8@%z%VLWKY1KWaWctc0!(KO9bLxf>%hFKp{hEUJ!H)Zl(=u~A^c z6&4o8JR>aE_+kKqRboH;5F{0K%a^U~5uUV-{5f^-sjgI;~a{6;*5`B6E21`FK;s7JNFOuYO@Lx><|@8N^8U%9PQkOD zv+iMWULm26cRU}Fot#=UzMCKYbS1+^v0ti&yU#%J)132_m#KOjbG8bDf4hsc*f_U9;dKw2+07c|w$z8dPX`Fhkn=N5tN198w)lj|InJWomwL}Z zzKhr&`5bpn7|`gFo*i|(fCSm+#zZA%W`I$Khz-1NF_}%wO*4jCa`dZY*%)Ra1lIs8 zMzPADDZ;27P-g_hh6$+b#W_n{mZWa+^!cc)iTb zVFG^8jMOl)&&aL3Wg|oFbhY2k=ABpl>9mU{P76J~nT6SIzv3Hv&k0b-2ZK)KsjlwJ zU)6@Tr}!&Rx}Fub*V(^=uR5;GNJHEy9sHrLIz)hpM#-2B9;uM}*<-R3gigu$;!=d@ zKWbpH!9Ysj_2~y#;VVFp;2@-Y0=xgq(Vy(j>%*p?Il43f3nTy|N^Q?Y=72(2`Nk4@ zAF@6d@()>`eFkp}C=)cH>9FAgP(IOS6{s@ zy0UfV2{}fXN)0X_=+v$$=Es)Rry4_6CUCm)dEA)kF@^(7Dg=mHA;5cYK7wj2Ey+4+ zI$MPV1QxO{m6(LxD?x=D2YJc!j+G7~DxfmX&C3dcB&ECM9IEVaAyb#$Rtw$omPq4zy zg+OFw#D0!j-nMD}1qb*V=`iylD;vWED;at1_zN2T61u>|s`;E+QF`uk3%s6co|OGJ za!Q5u@nn=AE#>w2F&>0A(D1W|e;?8c_ka-Ry894va(;G1u?_KA7gtZ$Nt@}6d=BCh ze6`|7VG4qoX!KZ^0<_WVY}4ZU{#N9$h52d>eZ_=r({ZxkE66>42r7?x85aRhm#w9< zt$Ib}M@RoAVZ9<>A1!1Q?eSwi%xMR19EjryfDNP!Tuhij>NkG^4OUKCPQShWOY5n-7#maAEjI9uQ zWfAu!8bNd5+&6-*tPZ@L{bXy(&(9AmYz@1MU1Q~TX%WM6;W2oXp4gZR#>N*%OO^=g zC0|&4NIzjvp~FkxcTccv};%)@rch- z8wz)*VlrVrgQw6RpoiO&OFdr=e&EJ-Z1lZ893i+(E)?|Kw$>l!fFKO(h0_q^H0M6s z*UeOLhD~1q!neV2^9!NT7!gMVJ>Cv5L-GA}n=MqN06q{f1X*WiXEE;@R@4^gE#{@X zgM}vDQv*MVk&h9A)b4oVh-zwp_B_6vvNKKX1y3TG;}HQa$GsBkVHWhGFPZrXO5F^+ zvH^Ira1pEBKgR7V4N4*L}o;9-0diT-t^T&fg z=;iVCgZ05GRSG!4C?b4P10rE4x3yWWS?%(ABSOPRe2}V8dXeI37AOA?6xFc zqxfI7_QTT)oZPu^;O}sK=?*VLWp*(G?o43QkqX$5g6?UqD_aGv z^}m+5(LF>T8$Au|mXqv&~mw3OHv=S2{srg}*Rmb6Y5Kaq_! zZ`+EdCVKFO@zf-DDNJS;dfP%gQ(JdhSGM+#rE{(CdV!=kA>$u`y)Xg&&8!x^yK6>Y&rr zbAq~dzb3;=OlB4asdF%$TOPAaTPy=f(BJ~PddJ!hw+JkTnZr^jQ5Y>7;T_rar|o+> z`9CZ&InS^6=4-$Bj-OVWkrQ)L_k?wX{Yf2GIcoDDP%Y>5f~9qtgxaVodxYpIQ%C^5 zO$7b^hNQ@yn>6P)!XgoqI^*{AH3;OD7OH!{Eg!lqszj!7$?tqqBony99OV{8xpiUr zr`QYq&-LfYKBHeTv07X-r?|)5C2{^5CY?IMt?{d!320BZ)lIr$rjUR3f^$E<%UllO zI?q9Yh8;GN?qbW{s6XH0i_$lZcrVr)RK(Uqo)z7|hevT9tk}@_)3C23Xc_N%34>{n znn6MkE$w_{xRDXp*^Om@77Znppb6kJM@P=wdWxKWaJ%UNg34ywQab*<;O}5G@f1~X zzeZVhy@Lt$)P`hzZBX z^H?%rYr4SOlnY=OM**g{dPl~9VoLGF&K7z7vyH>8fwc0DDLZkrlvv32jS6hide^jj zA#;-zNR>Ld+0OO8NFB~MAzC$j;f;M-{-wdO>V6}!GLfS-eIl<^U-K<)IWMRb?0EP3 z{Z^7O6P~(Gg?Lb3%Fwr^`|GXF)@Vpke|$Z67U!@cA+APr0K?&0Gz2l7)cP|NHv|=3 z5>~JKJ;LQvBbAvt#N$G@IFlU_r(nQ4Ri1DYwZZewjRuQJXCTeUN8{+9m%tV;Z0DH@ z5&PeiE`H3{Q`Tf;($_t!iHm6!D;Ix|3gvp2S`t=a0n>`8NO27f~_=oiP@SJz-V3#82bl*g#`;zl4 zWB9P*HdUk|wgiMwVvJIdOkO&s^^CZFc}vn21wJv$Bg&22H#icykm_iXRdjJH6|ARZ zgO(Ue7zYV4IS$bUemD5|Pq%%RAG^D)Mt*$tVBY)8)!mJDGJQzX{>-!0otDd^_?cc! z)PNG(*K2-Pn$A3WJq&dWA0XC3NxB*+@OYbcn2O&fEY9&aVW(vFsNf`Hqbg4@UkQ;Ux8FO_w8 z$s!kL%-_0Rw4PSJ6Eyhk-1||Iq&Sp7iMH}Ie&<5Vz-rqiezL&ZDd*H~7?0dg`Elj2 zzlg@&@Mb!KVnLp5vX?r+qq%AE+yTGW6#U-pR?`an($>|326q9Q9l|^mY$-V;^z6=SE+GkmVJ@GRLhu_iw+r?jImjbDQ&m z%5(VvgM57J`jOUh2XA*2AsucWb`agY%N=*H9x>1{F-U3s<~NZETMuEnP9)7mNLN|F z`Wf$YGtJFB>VEpBtbeQO%NPBV+PNMcFPsbyrpCqaBdcZWz131gfQr6<=10~jC>p6G zTkvPVS!>b1({xez1}9P9N9qQ~W303p^njO*lEGqTs&`qdGK>8Djysd&6TKR(laD8v z+aD;mSnNFhJVsEnD?GnXS(a~bJ9QfkodlY4*Guj+4o7u3uYwQ{jTb#+nHG8U}@&%`N$IU?i!GL+q^V@2H zgZszmYs*_DsHZy~Xhm!LS=1t&Sa5ieH zNyIR`S~SJo_PIy({zF5kn^^X1Y|+v+x(`>0WhHf)U+IwNGx;wlKTXizHX&6ae*f@x zq#6uf?5hpT1#nJ5${VerQEDO&*08zn4+uYrxX0XOe4_ie?s+_m$?j*`o6(8!4e}(0 zkD=!WEJP-dr&vNEZT&!_frQu||VB1lq~JohM0oX~k1OF#YDyph{j;d`q484PA)7s&?J_HnQFvmX;E$ zs^D=Qgsl8)d?v-{nxy4fCm}%lM#=|2rUunEZ^*I9;u}c85pvweCiur0O>)*p!;Q10K)+eOTYdC(XhLbOot#9m|DyxESxi42WdE}ETg5B|r4hFCm5*n&f zXjG{G=O`1;*2~}>= z!g=@Ntq$Tu6k@e_d@9WMh$-D>F54s(YKiD*iB3haX|Jn3oGN>%s^zGR3t(CtjnNaU zElPzcfjbFKmN%wx~W%oErD(PG%>+k$yCBN~_ z=>DkS+5DKs?mlfkegA<6CjN&79fF-9@r zL#o7y29^9h2tN<2IlsMrpy*== zSuv{9KBF|*z0H=XvpLCj5IKQkI7*1kmpt>QbRJ_KKfiJNZJf~k+r%8YN}S80pj(jqP!zo2a&ekN(NqMqupmT$ku;>4$FKQm)o^zTpf-L^=Ib-{%t8nA21P1rJ4 zQ9e5nfly^gD&PlGD(epnwXT)-&t&MQXFPka5hDXMO71;Dt4Z%D_+)KBr3E2KoIy=;vb@CH!Ua#r}2!JZ|i!u5Hu(MGU zV1vzwZ1ot}X=$b?U1w5b()cU#eqAgLH?z-P$h)U|F#m6(!MQ+RL0Cz>8o9+1vfoac z8{fftrFMDApBeosHCI>b+xNVpnk4SeYZ^>~ zWVJtbXc_QWo^?O1L0EKul7^?kuKs`+M)4M%am+&Z)u2-t*tg8y;#mvHvm)_>k<>2# zrv-r7;Kpsy#vK2H4O#{5aTg3)ds`~ET?)#UCVm8@LL z8T)|xCn-owN3W}EK0WXvU~sW{)}Bm!LEvC7v<{AKF1(idlH1@tg2+R{ zh~3b_t9Ns!#6%t@G2t2T=o%oK4mQTB6E-&P!#T|&%zONGdH>uuVlZ4eo^87F*c{ud zrAWv_a|Hx-72I{uyBYps2g@2C%>7KaKTB_%tSWxvDZ?xWt*66<&y(92#UhHrWb*fG*I8DfD zW`2$<43yg${j`~s0UzATWVP$WIcUr)i_#4ujw{E~$>cPb@%8qP$(Gl?2 z)bR3^e^CjsqJeh7<1--yQ`_6+`L%Nd=rfMxSaD*TcKV*w>)hfvqTc z@%A88K}IcA=-nl?R1yD4Tl76K1?xC-uk2Q}Jnn>#7HWzS59G0hoKJ}W|i{yS{1mMMn%hz>ot$7O(?Um+ef4gWIa?(9FAJ1{7$NSi0FT8!DGU_e6|Mh? zOSsP5*g9ewDIW2gtLh=_5J@1;8~A3<3R_3}U+49>eF9R4@z=nWzrpP6H^f$Z&B-v= zwHUFW)jbQbJTs4dB*1bXREZog5t2CCcw;jWXDM9Pl=A5su=G5)s`1GN`fZXLpx=~2 zf^d5W90xAr#=A;%fNI81kLvwBD|@oEZui@{Bd>akftNMZvo&II(sjB8;$+I}#r=u` zOU7QeO1SSV*6*FZ(uD3?tpgecv5|wbW_tg4K!;2`ZtvXA&{h{U0dN^$ZQN4K0rXh#Xdy1G72<(Lh{Bqr5kHtTZ<^+f6Tot-G<1S|XM$qI2>AmZX&lg(}&#p;-%bH7y8b6#a)C6%>73+%ZlnONq-&xN>;`^CyMV__^dPmk0d~>S>UNY9N8uz^t(Z*Dj$nhTVc}3m%i*OKbc=78q zz`}bgcIG?!IA2UnxOW~Ta}s(UqWt6rzoqzI>ANRM;2IrBD29p*MZbm-Y%*s|)c2-{yn9Qp(tRKb&;t)9IXE2r z&lFM}4mJ$xw~I!cg2(f15t)HbkeZ*ZSvjhX zO>B}Y&CF7VT3mecqb5{N z?Y+GWjEsTb_nk~ptAo>~M@5Dke}9C+*22A7x%{xzR~XHh>gEgo?*NfdtS7m~68!xB zz^{W_Ft~4-A1~f#^uT=mL!r%hUHF4(WM9!_^&gmaRp2oC;_BUlA1@%r4c7{kM6oF; zs^C%PzM!zH>4o1m_Cr*xZPY0FP6S@ z%FY5OAyYIFa48Te0^eUKHJ%jtR>m4vg;dMs6|=lNU4$@PIzKw`VtZZ}*Yo_RJ&YUE zOwpgWzr-5pXWW@3XE?TK5#+9EHoEg-3gwO}@f4IOjs&=02_W_bY$n=!x9W2NZbz6& zV^b1F$(sbIaj>+~VsykUB#2+c;i<~>j_HX9w$?|7D~eMdd+a{Pvp4jA*jULSz3T2* zAh|HtZ2sUB&zzO!8kl*ZiRDKCsWU?{3=Bleq*5TzVq#-4>H1K3y*R?|(Hq`Q-G{B; z;+|#J5pk;Q1SIC~hVUf(U#mi-Cag<^e%FR&T)*Y-{C0Hr+c$gPY$RH*bGZi_vJ)hP z%oSx<#yVgpSzT68d_Ek0kDo2b>fipW<91_R4UI;q7L>!2_}$*DC4V(4ryO>mh!HtE z+yLO5DO{hhqbcu5m=Yz$gbsLU&V^I?xQ{$0>uf)k7MFWwK^$an#z2Y(lE`h3+Q;5+ zSZotQb5p&1p^DJ6HppOwtY0dJ^+MXpuZ!kiFI@No@gFS3FsMSgWxDzIN2~Gbo<|~& zIW7PdfQqMHsL!t=^Hvy{r$ryW5CTE6}d^1Es70T1oCp)AVqsXc8Cb=WwkfO!ef z19D1AqF`*)R*_Q^PH{5V#ko3_DT+B%kC$(K)?WXBi5&oP0jaeT=4fn$J|z(^t_*Hn~5X*j>3`~77!HyV;YfzpM}da{LJ(=B-m)6)JbRo2^-BF&B0ZhjRRS-S|Y zyx9F$uZzHMz}Wq)wF?~KxTOI_&qql+O4tIW6GUJl2YtxcPjPZwq;jDf=6)|Az2>pk zp&p#279hc_cFTSD?X{PBOU0~1Y=W^iB>lrq@rO=tq46Fx6L1;5A&aL zy&7Q?G4$pai2+hPpDIkgn*M|cRENWfVeZjx)TcPSU;hlN!{{a4RtR8_+_vdp8ktA; zh`M8*Mx2(`AT+Ce68G1pW0}VR3v_{CNJ#}Wmz)-Jb~gwe1+pPWq{Jzy4E8rT_OSAX z-k@}g3gxH^Qs;vAn+07QFOaVo__#lxpe$d!sNFjp$G~$z9XPw=HaCR^%*vY`d6LX^ z5gmjBeeuOSmC$5S*#=dZ7Mn9V&-+zwy<|#?sOvZHae)cjNdHmi6qu6nu$o~kRY?Y3 z+}Hg5XVWarP0GfjuE@}&jFnmR7d{t+{O3y6H&*U?Dz;=Ic#(|3n5ZT~S%R0u&IIOl zgvB}*3JPwK^tm4iuQPYFD=~_SK9AQ8-Vh9rl(_f^D|)Hzq)1CY(V!neo<=1P@FBId zOu9W}G-q#`_|yBV-+XNr_E;y3NAK~q`g|pe&Xizi>%n;rt+Eq0r!C)-7n9M^#)rq; zQt;!g<@|7Q^fQVKB2M*Ox@zLAW0a}P5_RW_)PAL@{VU61%Dv$Bq^}LP= z`rW-6=bhH4QeHnVe*?{xhkSe8zYhpt_kk!((8w(JivBZO{G#o(-S9Z{u^yjU_L6+T z{7qF_PCHtqii4lyn9%w~r~ce0RN!g5DW&YVK#y3%7nQ zB~B?QXdwAfidss|MIMCfjo@cmft{i>KqC;OJwdER6~0^b=u{nqd&~D{?7i~RkAl+_ zDZS(TLy38I#)8evEF5>6aazNvsHWVa{VU->hXE`}UhLDt*oTKWT)k5l=}gh4+gvq- zbIo~Ra1e7q1`x6N*tcxdo)H=^w@Q91&EY&o`u%lYi8*M$A~Ev@2b;a`ToIAAV^WWYWFmCrt5ndC>+r%ws5 zeB}2Ofa*IL-=fFl-zVYOpj^N7ay)vd6hefBm9u_V`DiFFc?(4Df5(HiWEXUGiSjwg zA46Vxb7A?@Qj>_phkgrl464U#(?$;c@+26AYs7tVg+UdL(u+p9BJ?>KFR8c%UTPj8 z!SclefW(=1eQ*HNoW$udt%0p;{xp^J;~RF&Y)|i)ZpnQF{cl^#U>Ulu{sHlzaf9epw3F75)=*Jp^bX&shQ==E(maTAQxmrUFP4$mSiN{lgI=io=2Qo9UL9EkdVlqLR z&N`p26oDfKIE0dPUnIw2X>bn%VNa->SXegYBJQFkHV+~r)tnjMYTUUF^fZk z)LYO@e6t2}>zjNla%^dz3o?-D$-=o9!))BD;SJAC^|&Lgo2XKI0jKc2zj33m5V}q| zlXTT1Y*|L-GDKn-#7UlY;a@T!ISp`VY#x6SivAv_B}4OoFu%;r4)eXqK$Bv@f2WVM&)Iv=MaM5#TW%5- zv4+g@_q0MZDD_+5{P=-LYR09S)>IaM{jO1Fx}PHbNNe$~asrTWKbeed`|S+V1p_h8!! zHI}tA-d)hyaTCJ?#C-Z>15aQZ@D1%nQlP=g3+QrSvYddSf8NovWK8+U-p!;D%kS-! zw+FOF8$muB+O^6@zAl_%+`?Eb&ZaSYaB5N`xO94Q?gOo+yI5ipvksq%?4JaicO~p$ zcwzwT9jHf3VX%KqnsuJb98e3w`I??+po6nL*D*UJD=Uit?;87PF;4K1jRO*yi3Dk? zx5p85Q7a6J+AZfcs7%}kWwXh<`n3dTY#iL@Pp`4*e-DBJ;1VF$V7VIu_~SMIYi}2z zx@^tC0cWp210P4twv3Y@7(fmAUQK;cj{g#cT#8G#c^R9;Eb!3emWEZ9IPQzo?b3^`Z+vH5p$0L$bRrv-SZ~k%&m^?kuUWo z2j<`uRp#8FG1R}QYkItbYPyy~382X4vOYDFtv*lY_CCwQjj`{qTO;?{`S){M97Sut&pLfmACi)xDaGTyYvv>lTgqfBxC3nU(7wJh zQ8D_oCk zL7y2T-172~to1mAA%>u)hK5X;MGv5PttZa!RwIWoO(|B+H0|^&%#WC{WNDyfWg*9) z4eT%L>C&sz9X*Ec*dG>9*m=?L&CBzj8YVFi2qf?HzDh)P+Xg>Cj(uCaFX|EoqVB=m zz8^1>TH%@V<)EkXJ|$8tv4hiqB7g~`+M;}*(^lJH(h;M5+=fXm;rk@PWypPk`L}Gkp zC9vON6=s3*&v(d~nDvG9oF5 z+xyfBa;2ml9dStn>Z0J{_zv|P5Lu*~vDpb-mP73X&-Ubb`H?(*U(ClD)G?{H{lBj6 zbmtovMC)SHgVy0nBuhuFUD&%QW zF8ZLY_~#)r&If2HGsz^PK-c$?*D|6pu%3oawqk;#3?Zis`-W=4NFmIC@gct4gs%L| zW>YX-R#SNOFmOay!6ppG=>Ryl_wU5TjOod?doNL(FgsWOz1o* zD6XnA$yw{;CK$TtH9x$?s>1*B_ocUB+HLE1AO`!W#3%94*`~ngxDDw+EVbO&C`vkd zVgD|D1o1(a@7vRZH5#n@;x0pu=Z$m0k?mFr>?1-)(_r!iKJ=TukK97{1pw6{9E%J9 zOaj+tiYs(!CnR=VFd+^}yQzd1e@bars?@4obkePSU8g-0pvKP}<14Zmy=^$!w5pDc z0Uz!k@z?^w|W`?>0d3kY1`!{UkGlfl#GUk zM)RAZcwwKRyT-%LA(F_@iF-?*GWU*dCDt0VuT?BhMR(AoQ{ z@fz|Q+EdGWfI~}*icRT(YP}CwQo=qcA(_Fr@4lp|8XpQZWHU*vJ5R}0Z#9bOINVPC z?;I)X&Ze4}ogJ8(;hZml&mO@`*pwNR`LS)gcu|{HfoauPL*KnRsxij&Rdi>}z<~7V zhCK!I=kV_co$#y9zE7u?4|ewI(r&T#41C7wX*}8-$ImhwE-!FU#8okEQ`~f-(0dEE z`G?NIqs!q7H*SJGMl0X9Z+U*GBX;Rn`G|1{^gwxz(H>ih4oO zN6aeFS*%z6;RRPv*XqJhBCpu_(4f+1=K_HW1BiJJ!4RXXhAQEp_^7M>MXTrF34j)c zDN|Yx7%OAjyF2OB@4Pts`MT8`sx3~O>sPU>Q@(byUAc_Z4Q3;N?`*anAb&V9zmvM8 zI*0+S(n}Z}?=@T+|7j<eK#qy1(YX8#0({RWR`|A&$%#~6a2wN*;(ZR@zB#Rfz6g09aC;Gx!vI~> zTHCqrrBU0_F5TttURY;cx7BGw<;&6heMp zOIMGIk$S7e42)S}7=X4}lylo%cxh;A1Y1#1c1Ygp;}&MVW{Cfx)~C6~o@LKf=spCw z0;r|a`TVrhtYdlh%7#=BY#LZEK-rV?(4+feblAiqJMoqKmXXuB=AZ^F7V}{JTp{f9 z)~n$_|(qJ(q412m*?&Cnm<^0VB?uC#o!ll_iXJlg3r?L zJ)oDJEI8%X^hYxzj{6xgC)=8SzeV+@c%6Ug{rx&jArW7EXY|~x$iMwJNi^E~VqzjT z6_lFbot6Y6u9w>{pf*teqX!(}_+WN$R|bQC^_~2uE;5PxuVT*y zxTkG@onP%JAWE`gGW&nNer`QlB&)B_GIsA<;{L>&{k6$tfM3CM4vUTe`#LiV3lCZW zflcT;mtg|4jc5(UQD>i%MG_uf9!$Jgdv`Y-yLdSCT}XqoK38CasstO0nS&*xqT*vl zJ5n~iq$YcGJu@#4J6{DlU#vQXm^cA53-h^0${A0a6yUhSZA0CY5?Ztl`+i8RJkUWj zwTLcy)#;v0F0*pi8C@Y;B^xT#Nw|J%jY74aMXNmr+`~hK&q-+bybzqB32d_w<94~N z?f?+aW&!<#<Hvb}$~RC~u#sJXA%$fDU1+s&V8T{;XAsHanwRya;n$b zBNj8u*bhmq?B#f11iyn9a14X;?fAG7yJ?C0w;7*2{wU?*Zv*{IQpY_4csUI(f*@dp z`HTc!Z#MvK^?q328$H-6-ax^0LKXFDfG`PGtR1s_KO1_c3# ziTTs3?=C@fH#JtDT9O>pA;(};>Y1~pW@YWFxOlm61?!2#IbA%9PL4(R-tn0v0;lhZ zUj0kTuama_#U4v;%-*nZ5rt3{IoyUl&KjlK(*yWyaRIwDQ$x-V!yZQXPxS@!%zWLo zgXGn``ooKNzt;D@Jrqm;+8NJmFpl!xsIH!@@v5FiRk1&L_*2n^Rx%?8?zomn3c(1_ z2s$pt>+Jr0Rs6KpdR3F7Nc7l-98_mnsp&6BDA$2qafzAC zv5NqDdo7gpL^Hv$Rwh(<)IcO#riKuU6vC_~oxE~#{q_f8@I)=*sKGGCF=bza0 zH3wVYG;n#wUyEPzKiOmb{4k}La_8G0OSj8*(n^*Xlf7&*WuM+oHEOuol+VcCYx@qy zni@YoKDLq@md*CK8~5f(j4ocp>%)CRSE(((SIwMP(Mz*4_yS-TGh- zpALn?$|v@0vwjH9fp%MTTwG9mt_mV~mmJ!A3e+nT*}Utc#e%K)$0{EW8rZ6ww-k(x z30Nhcc0HZsI7m=H$S*P~qhez$W9 zy_bsm@F8GyJ==9_$|i`9RkK(!E70wta=G$g~{q^y_0Wno=7EiB6EQLtQo*&+)QOVTa9AHnNprl_386d!%=)B_1nJnMeu$a+nL!={&)*rrorRJ*0^J3 zOq>8X!$P24DVxp0&hHO1dsc{XbIb^pM*ig22EQWe;F{&+{N>+Snm~01Z>Nl zM+F}WKZ?3iawNoNG(Pn82-^S7mxSg$hywY?-QFisJfSI1DN&TjK&V)@1!dp%U3M@g!c?+dwSC zroqO#@@LaUCY=B@oVZ7k!H2r56aKC_weQ;cqS#(2MNH7@4rk4_Z%ZiBDpUoufS1)0SnnJDc9!@k9DDnL zXh3!E(Q40Hd_nknJu0LNqESHD$9(KnXJ1TRof67v+lah)^G8?GXvjXH|C63{7jeSQ zU}~C_3>WDPw1#uuByeE8m-bC3piBq_UOJaqI|1z`nENWf*RQdF-me#RlIBkvN0YKO z;-&ch^j4l``E6(!pAH}OZE@1nf4;;_4+go}01CTbt0kEV>veDPX zDW5_uqoY`DX$LxIQD#||C60?MEIRpNLrtotI)w(1HIt>?^~^0s3@Sv~Avs+6B5v!m zmM*2^CaV7(C{O%8|Ah{EFHo2$0Fxi&2HiNn;Zda_{yUvgpD`wZjy=SWCqPZs2LoieDKE~SlG z${s3`lTi}rX@zI36Iy!5t8G;AL|6^oN0!@rQ)z&xx_OJ-K#GaT>@4CzH&EvRNI_aI zRqLZ##*Mu2f)i&S{Hd@L(G(7#VyQtLhT+~`u1`xl{6}xsbY0?fh4#LhW%v2rfWN;W zYY1f6q?l?H5d~btH19*Q8~FFB)h0PW0u(&o=1@5y#^rNALBOuz+AU6MW@ZQ!k0^!n zzr@EAX`!B5!GnvCbU}4%bfWp>?!P)iUpdF)ZDVj3w!v1K4FO~?lU{88b*?#BdcHui zpqTXZFGe@IEqAKce6e$OPB_fcAP5MEWi`W!!@m*+ZpNm5zmGwGqxa1W2OdmrpGS>^ zS!#eZPG}_&P`E4L@(&LQ$AvoH!x5NaDq^M?MMje3g0^8jw)1COuDd6a2_+uGYxZfk z1?);3lbDvWCzO;yfHzlQh4%QgDTvyN3lE-2D!mwzVccUM242BCG}dvSg`_)S~I zdgN;dkofd~ZLJtd`6CRu@rRqzXWzJ~xJacV8jZ>wx)61nxr8rqObtzDpr0u|ZNN4{ zgI|)2&z@jY^gF~rVMdf6*iJsd4fHf!<{EsCc9j>9BXoMWsW{#>t^smU5aM-!%m`|e zrp~JdGftIm$Fm?_O>BR&0&1<+HyyUFzmdN#*;dS6UmMDo1z;JBUM>3P;>UDqyGbcVIf!MflK%+gzp%b!!6#Lvm2&hL6V zd>C*%w+;`&0%2Bh?IAfRMl?c9=Jqg+#5Stp99TOS78W3Yy&J?jC#XH7!b5e2VjNMR zT9>qFmM>Lrg?;}sXsO1#@PP)yl3vI%1NCAfe287nqlRq|!KyNPz_fbJBG-{D^UK51 z@y+{^4)IqOg|4l60if*RvtRm$?wD%x+^T0(^^KPIxWv)2r~7_!NW|9_gM zdWzx^YtpALj1}s%hnwBDwwD)9Hofx6yLsH-K_ilz{gK~nG|xia(DnD$4OLZK=DVb8 zbTX}9m+hAPqh09CbuET%%gE3cL04I!l9#*NsNNsUUzHVSY}FI=H8&^tgr{y9glUxO&4Pv)pmF-u&Weu{Rf~jI#KE^VDJT(vvU| zS!#ly8lu0d^jXtFNM`zm7X-o0#;$ThvC4ZI_(%?x6S5EIY~o1hzof;!aQ)NN!_q~H z29mM)TyyY*=Ty=o&9=*hU<29I49FC!5 zY_`JQ1ULF548`92DiP6@`rt@>h-9ZLC#B-fAjtWnJLbQF#HI4Ky?fU%DA(PN`%;`1 zccuV-!BS07-wl+{5~r?I;NfQY!NpO1eU{y{Pr$(Jn49s!!!`;@0c(w<$dN-jjML^W zimLY}zwv7MZl~z}R&#-Zyx7#n8fBzl-THLPEcwzhk4&SK`Q!>)1w$T~y3c>s z0P)1g1HCe?! zT%gTZKZAGU_I)!bnuH8{cX!MqiididXNMJvJ|tlKljN|fcQ|l_3W}2a&+K%!MqOrO zKwX@T(JrgxL(lUe*{&vvzH+0sf`HaBciw8|&Oln~e*37HDl@UDd*Mxg)XG|a4RjVT z->6S%10KcB0Glu2Gg%f1yPhV^AMx;(BuS`n{Tpu$_$hrg@|C<{ry5!&Fr~SXw9=Q! z5pUFTGdpP8bs?K@g{ujztc1`udT&}Z_q`giiK8UG~J{raW({f0Z-Rz><+`h#&!%^3y0*=vSL>2Iv!_wL+L zQFQKVykI~;jPS;7o9HD!A&-t6QvL55M#JAQHYS6b94l;9RQ6I@>fA5eyTNBiIy3&A zibg)!{yKv{*lHe{zTDn55Uan*Y2r@PqZCAcz>@P*T)=};+}NM5sxo; zvuHOff1fg5aM`^5RQY#?$d2{eJz`<)A6>Q^v$WlPZQC-5$7ny2#pO&vfxn#6*u!55 zKZa{%Wit)Wn8O{9WS_FV8%E_!sPm&!;P~(Gz?jIU3F7=Y$-McX?R&vgJH^R8&AaPI zihCdZ4+mlzX}h)Z^4!%!NV46ZCMDEUaGGk^X{Tz44g3w`shKo;j9aLc!5MNTVQpIj z|8bKW4}Nt$kKKq7E^i54S3~Sf*W9(3t|W`ywPxbho8d*T*iJn?1?nc_K7BQNzcqR8 zb=~kem8uHneIf&MA0zlpzScx*Q*?#8)dV%&d{0vEzic2ja=7x8vB=6h-=B7XOEXNy zzC}OOrI`QuPZfHRof|uGX(Y;}*?+5AI!){3-o}?>kASZYy}Wy`%$~aE zm^&CZ?j~iID_9}o8j>SbjZ)nB(GcXEvAnE!TSA;~9d9FhmkQ{*5e$ z?*X^^+-i~{U?VATX1V^vH?oa(rqn@IcU#)CX!Fw}mz)Q0SvkccyF`Cx{*6E4&O3@X z&XmKYyg%tHh^X*&-}+8V-|;~gjfF;IanA4ZFgO+n6vbPtJ!7L1zWTK$tf>qAfNJkH z=eVauxW~pW^kzld7Yau=#cESV)%c&Kt*Zbz!l{ps|ZfW-Njb?qf4S~f6&EgfW z%&gVNrohRRuBn25k2eL&nZmoc$WO`EFVliXyr_FVUOaHcv&a9>m6HmTyn%-_?$-8t zTVHoK#@%3*2)(;INE~^8dF1J-@Fz7M9v-1_s`}|ZXuUt3??f(~(*_7e5crpm+eF_! z_}0A+9otW0-Xa`SNUyN#;Skq0a|Q{`&`Yfl|3`{=%E;Ujh!!Wp+eYniVu)X+HSFU* zciNlqGvyb)YWuUl@($9Iv-(1!oVohi!h#{ez!3+maf|KIv^IZAQsHI0$W;5tyj;bOlnm2laNow z#613ez9=Y#V3^%k;xbuPF&`*HqcG(oI$7;}l^r#&$*9Kd)x7<2VzaCdU0>;w(4|(U zkpJguTM3SI0fR7`fy+mPAxG=8Le@X5j(rDddm}bC4y?1E7JmtUapAD0ea9?Im}z$n zeQ^3egV~&aBZYT5o`eGHl>M)QCpWF`dz$a9jiMxK)aSVRO?kh3&XbVY{P0&sJ4xl% zlLGxCjeNYP*~H;8_21m-E%xdchrNWX_!T-A!V~;3ALd_F&g-^SF8jO1VUlS%x9;=R z{!)Fm$dEN^i{E&indHrKkE+{z=E45!`On7*SH%R(B?l!$zvhw0BT<;L5@c=^TLz%= zg^w@6$fp5Hvj2~!tB#7Q``Q*tDGh?OGy)fPi!kHA+Z#w{(Y63Ij+B zNDm<0okQo$cX)s6`(N?2_U%@%?tN4GLNV=HuW-hRsoz}7YQiP%55oUK&K+HxP$| zEY=PM+!+HuVP1qo<)7h0f_uLFHj{C{CPQvB^PX#9`upepN+bAmNs?Szb!lxxl&_gX zCFOT1od`e+z%0Uyb@XR}iQgP9oT?6jvb+RIhbehY?g3v7Q{YlZjsp#SFo-{!1k+J0 zmgE;M-x8I!`xSskCCJ}2AUw5rAQim(p5u2Rt-NK(x5v?pU)1+24&qvjU>)&ZM<_Y+ zV2hDo&A|K{iL5SJ8tg`9U(NNhr3 zG@p`V=wr-jdYbT#?yS1c<$o(85cT2_9a#sVxp!84UH(WX)oq=OGjWXPtb1Uj+qI(q zj4gSodR#)N^5zbEfE%q^fiYP-8c_uFs94cH|7-|>i zxPM8oZE+G<^-%g?m^^X#tQAkc#*rEL_&vP^tq`Ji;TSmM{Z3z0_^xMJSDc9@4)VFCAD3P%kn!^ zMvG?eoBB7Tx>2zO`ui0%)E8<#zq%5m2d#H|-mUg0E3rM!)y$ zDljU0-B?0cHhJ8xZpuNJ8sO0W70mrt0)1acHrn@2NkW`&)j`vqH$oCnZ37ixwE#wh zfg~|*fS$Ld6Nhg3--bO^Q^0KaEJ^Y><3exN*6F6XSqgN{!$|-PVP2pGsx;2F%!4sJ z+p4e>ymb^+jts!|^??Aeg73UQo0;%%K3b9Vbu>y=fhS5)RCBKB8d#s0n9jbw$$17a zyp((baDifG#xcX_%@y<+NU>|{8xsW&{|oyl#~(v0P^&11R65kj(F9@3W45<3c_O=? zlAoL>Mg%3HYJI(oe(U2xDwi%f!k?xJ#&S5sGxNt$5#w)_gDct(X4L)oXRiYC0j&qf zgYNmYXsneJ#%*0}jVc!yrs5SwCBJ@c@WQglRzs7z+ESm4tBjoxA3l~W9azHCxq-4@ z%uUQsyf3WDB;qpJwQ2+T?mQYX$KPc9@w)T;=AfE896Dm!n#f&ml%sO*630BVeD^q; z9xebi5X}jMH5la4#?x0L{r(J#wv#I;Dy_?-jee(I?6KWf;5RamH&!SD%nEcxtB^P( zxvtA1R>SSq3DWBQ;pdsVwM>--cW-OPcPG5+Gk;nHQy}_t%irsdBrlllhpIj#7;&0* zr502#M+FdnBA04gu&-q2iHs_D+MoAfPL<2f8d^%Rc3%v}0bQP?+~&7lLU0fifL;r6 z)_Mp82Wep1xM(jLhN5}&07y_5IZ+N1W{C?Y?mgdq-d9;JP^c8~w~hQZuX~%q#|6mq zqAr`mr<2wb*KND@hO%sbbdAudR-+}Fz#CWGmOwKfq1sSV{sE+<@LT;00uv7!K%4_6 zq8tCRo(2S9^+?M*?)~$m1R_&#jzvM*J@=Zs4hJB1Cv3hy)_8JW-}|?n@*T}~XIU_{ zFZ&lQ_8IWCzYPNGvVpl8z^YQgL<*~~JnMnWq}!(ffCd@2`gZ|JC0v+|g$c-dvW=Pn zF>@p_jOuLPsm3K$2XVDHr!XLo_k>;}8>i5);Sp_Qm%Qn)b6lc(hdMLqUVRD)wPxG< zTn^yX4st|3?Y|~q&tn57#2^rsYqk^MCx6^FNGT{%Ep0Uh!MGZY>fzM-Q%;E1muIZr zcL6tb+Ez#~)$pZ)ysGMpVfv894*G@ef{g4rm+Kq28(7cT-NHV*MxZXXoDmmR0RABn z#Qt)0#6O#KaUZ0zwyjn&xTL>l`-2q9KLGP1@`t2VVMzWS1RMDP6GsMS)b-s$kuG5*nmok|Huj><}W26jCY~0FO}vX zSwWnV)8{u<)PU0YD6z|rc6b889GPlI(qR7r029?XRwwx>Y?OsmA(E|72IZuTyAJC<3i(PDWMjFC%HVkgIN_sm5 zBMrI|D|(t5;g_4#apZC!Pk?_C;-`yM;h&O0);1MDn&4$vME*~~a^oxQ@oP#$x~C%V ziygi_+Ala58psHzklM&#?CVn}r@J&N)S}}Se zfODrMEO~DPuyP4~TYc{UKo!7!ozhkye8Qd?_E`8m{a({4_Y(Dk#vV*JeZn*1ms(g!kh@C?CM%tLFG!r9J$7IB9hoK;tIet@hhI! zjO~vq0=W#-7>ZdG%n;ivv)aB-@FSo+%svpxjg8m*?T?M*omFrel(P8W#PzJjc^T(r zsP)}W!%SF!y38LKA6KpwWUfzD(22S2@>l5kj<1=54MM9}D|mUC)R}S-3F^ylG^W4*d3FM+1{iQ;6&41SZa)KzAW<&~9!}j=4WR9d(Ts_%?+09|Fl<$0 zb%uvj{J(cxW0R8=fRV9+k_z9hj66Yfe7Xv_Z(-8Nt;6rm8yYvN1yD=rfq5<9TvY!& z(g14O!k|T&Cr7xlL+TFs3Z;x3yfwn+enun0Pb9pAfx8$#;0(d(0Zv*_l%~l`oNUkj zh4vLNjo27%phy|K^(p{Y-wRa%cj#tLoF};V_ZhY(^~e>C#%iR$m~FEkYlhbyrc(Vw z8ej{#v>A~Ov_eE+s->M$)a?Xq9$`t|KQYdiFr+YKT5_=XvMTxUB*?AiUat8@^5-2) zrQ0e>U3(1!&lFQKmy#lvzhg{9C?nWHmdHW&p@^PK&!&viz^1vmeoAL+II~P=N4HGt z(&?4;+cFosP;0!9pr4=b#W6iIo=d!k{UPTW)^Kk5?BB-`+R6U4iw8}c`V#I!v~yt@d3}T)IM7GmItcz8s?ru@nMSO=hZIRMgw|MI|XBL^?+W>z9H5t!_cV#n`u0Av3mw}1H{Q;`IN=5v8 z8ZbGs*GmIONW}kh0n}kXyb|UryqK&UcV1SxuSa5xPk%HM+3G!5eH64K)$@}6^wvZA zlh4x+9fhRENFLhdSYA69uZs7>wKV@G&&~c4m zJg1x90$EeXKi;K}Ha2EGWV(aEH~{2d7L}D%ElGxlhhLqhyYb@tfqbEqk4(OC#BI-i zuBMbkYaB!-LR(EAU(Q2})qKi=8#oB8$9;l7K#OAngPX-Ra$}SD)^27BQu>M9$lbz~ z_E^zGvA`rySy6L119uU8gS&S8>FZ`5@3zg807 zvVm8@v(6R}r{y2EPCeyC;?tG!=Ge7=l28p_{kxivm#e-~U|oGbg1AA!Vt z01z+kopN5D?q_1I0^z_d{$$dI{+?aaIk_DIb$O$yb(&Z%x`KI-;n*|dhPhwS)pkuF zE(ruDK#RA!q6<}-b&&#`UnbDvbx~TRwamY|JJHmbF#Ll%jP@mvQ0J3Ig?zX)Gc%iZ zSffcAUhW!y*SF4VD!fxYa>GNAj(oBiM5EV801|iY2B3Xd;{(QhYXc)ccbq5sbHQ*lzg9ock!1T@jaC2Ey|)iml=HY zOK+??v@y=HC*wuNN3rd$LoKZ4`W-R)m($Z`=#uShf(tzkLNDXrC|DOKmIJ)ZT zv5rG^s}g&h=!=!B04?0wbc;elUvLaKcC2WA{A4AQ^rpEaW4J#LkbO;3Senmw?7)ix zNK}3^o~}+>135RT0Z#-=3vN;($;pSPa4o~Pssn-s+i^+Ii76K`u8%*~Ji%@O6DP|O zVPJoh{P4-x2of12Ip2#~Nc1s~;!=eWx!Efy?q1I{o*^Wlm^H|GZ`A-X_C4$XDOZmY zokBKQuLn%*@++j}Q$>hL+uF*fJ}38|_$Kq217VdgpmqZvm?vF79hM5q%gw={+-t;&iXix|Z+zJ-aX=x|#I>x1^6uG0xX|sug7=OQn1R zOvuTQxR`OETFSg0z&yV~YQiql>uUOhJ$XN^sSBU50yB8PXkY47`I{*enG+$3%g#Jf zptjb-Spl+VW`Xawh(3yIy(s9F}i3Bm8Qgl%YO0u=sv} z$`{?!Y*L_Ac{tQp(r_{PD2V2#R7HV9$b)p&xY0DG{txpC9ie`4iInbUz6s#@;eGS#)Sxk}pg*`lrLr=*Qd=e;dX-I5jZ7wu$nE=PjD9rm z`aWNjEp!z%5!-&soNsy1FgMd{=r)6e9U)QAvtrmV=|xZY?c0r9;p)-|<4n3%`Z17r zWo~5fH=z9lbv+02HR~W5#;c9*azs)$({*Cy<*%z?VYB^?DAVDXZ#SVY-PQbqdv|X86TSmkuQ!a;i}VI!G>M{M33Azu>oV(2(2pN4 z{0YTR)=eMN^3>l-q9Ti&5_&#;H*7DB)x-MVuckr|hW>0N?1jSITS|%CE0=Q^2>|P1 z0k_s%5Yjjfw7?1NjDPkY_bYKasKZ`D5l{l!Hu*F;&X*oXlGrU^D*a&`%aW+W-&>85 zMk#&KQ(Qv)<|Cr;A_26lf(o+NfJx*Fb$R27!RFSMNWLB9MyNxs%Wb|%hk0zo#vcAN zt<|IE_Y=8cjoucW)(Gw<0i+_vG~yMuJ5Xm423K=5oM)H3H^Oc~%H~=SQyh` zii(ZRnW}>YCrqcG4S+LAaxh2Et+=GP5USA!$jQksRurRN%yPcoVIAx|@5)rT6Up$e z-kTtC_C+jc*wWU=5~H#r38Sq(ICII2(YN+~TQObkA1^e#c(iuqPUJ-Xsq6W@*NasF zf_Nr#|J!iemEz?#*tp{Zn%~5C=F!+Yq~x2=i$sY}dj9LnIa)g`J|;hr))yu=IEe_< zj$_gO#U+5^d{{v?R*04SAj1t;EPga4eyf~pjT6yocK)mx0;`d(AszhDKbmbC$)pwW7u%f4U*AaX&DUtD@XFA}dURRYG7wes0i^GCR{woN!3HPLD|*7; zdxs0B9hw%v&zMXtCmP7(aPIr{h;#VN0&^2Sz|CB4#p7A-X3fmCYc?$`D_a4^v?_%R zJAN2s90@}V-eoB%W7`|5)egLrnoh@7hC60gq4JDx{wnhkY?~e~$81EHBjXNdboQpfyW(o@n--Ceib&c70UenI~ zA;6o`Z5n$ly_6oz3u4HPO>~*iZs5e%>Nj91pk(d-z<~vVLYDP7pc;TDEEG zH49s#DgU)f<>Ndgi%-oTq{RKpKdXxd9Mu`Y16v~(acqW4+nBMtaH&{~OG1Y#gSEas zCGjgW?YurYQ}v6}Jv$@J=F9KEr+x~`PxW0hkAss_;8G_&N~?m*>!T|F z7l~*-_^j7k1=S&GB~SEM9}(h+PBwlwlik~Ds+_}X(ztf5LPSBb!8@}7Le;7AZ2k=I z4iWyCNozfKr>dom}Z*z!9=0*gDI^!Le@q@o31BAgc zK{87lbYLuE4(oFS9Zm(}02g+CF0ZMJzA0M{*9I)wgNOjv50foz%MB{Rr%|yv{9{vZ zG5YE*$)$UoSlg;BXY z%JdRXOtL(fJ{%YV#%g)H?TVH)&eQSwnfBJRe#+Vb-!_gp)TK5Meq-f^(kgw9pp+(2 z)KxW#{m752p`rCgl5M&eb?f-}_|}bJl9JEDM6o+3QbG}^Mc*AiLCv(wGu@E{p+Xal zo{55J_b!xsy;?y(Gd5GSUKntf2w{-^`f*s$*Zm%ZJoZL_uAD9!57NvIzy#>I`yWD+ zAvT{(A@oIpM{(ul)r@SKYtxrEqDWSshQ?ZQ|K+fef%#EgqZa-JkWlk}z0&wZ>aEf= ziK<5jiZ$&wpfG;4v@{0bcnWC^a#zIo0e5MA_h^ak(=QU<7eMdn%ottx(&sJVYvG#g z*79aWZ*=}WyT#$>Dr~q^X=7seTo3=eEK8y#$mfr4{cS3b&lfU5&G7!Ua51tgV$@iC z^J_Ne0D|jsh$PX~<1-meHJ)6Ib9>yf zJ@p1Om4ZV}5lSncfkY@Hl=D(3c8hsq%0EWMQq3d6@&@a^gZ>C3>drUx{U86*m75yA zq;=fC+L!zlv#Lu6HvDCUIp57W`>d^S>}G>Me*9L|9MCeeMAV*SX`*;C+vFS)I9~R4 z0F?a}{-1%yJQVm1N-gN5q^1(J&I6WJfm~$9O+6H=D%zc~PVUAr){mLoD$lDCP25Q!Ao~$4b5N04Q?)A!Gv(ioje-0oZL{;)SW!-1d|DIjYLfoRZIFi13Krcpz54PBU7GsV;F znOd9C$2?+K|MpHt{{8!GshHu3-n)0_m7|l>dB*@;LEn-cO$`C!hYo(`o8&_fmwB~Q z0XG94cdod$nG%gQm*5+}etjvgc`)9574ZIf;-g=$Z0pZo_w+U}|bpWGC$>*@)59C_UmVNg!)xb>wU;C^=KjWWekmy{D)&K-O?20=RunMs$ zQQDCDgaSz-rG{1aEnDq;4xiM!?X@6_BKLcZX3dQ%^O%_@?gH%{G}T3Yh9whvUr7w$=1 z?MthQo|D!*O&*gKSvgk<>i)T1b?@uR(R^gBls8}5mX&4k#OskCHvC+3uSa?eq@K9` z?)L~A)9DwHm|M+@OEN%vi|rH~-xQTb(TSGsMF%gK_i{HvPEaNo6q`7^dvc82(0Vv# zY7hh;JiA-SBshU9(mFab-ZwR**iiKT1;{_V zX1Y#xYL?erf+%q6zW&;+w+@!gNfR)+XT)vL4%8s-8y$&@R?@$tTBNHF=jzQkl@he> z#=tR$KqS<#%6T!M&mTm3wYP^cqI&WqnctF_Q@3OVgY($78rc>~R~& z{}f`h=)R$b^OR$PJSsp&FVKkFPe(0D;)>JyrP?CCyivlten8#38P5p}(Mg@wyxOND zA@^_MShS@ChIOh%kC7vKjgHI$0s_+xmpiDBiDQ>-if+UP4E4;MZ#6V};yAf2fEAq< zcX{rQ;utyfbF$32E|N%ys;X*4-;=K`01s#f0uvEWq`Z|?f~KZs+fE%OBxYDcytVBn z07KLCsoe2qcTWmcf2we_EFQ%{=M%eI_WdF`@iWYRMVgpD`F*LvU@EWQ_qi$6G;5djB3+Xk{?AH7a`Kpw>W>{1?u>6+yjs^NZ+E!W6twB@1S_+I$>6- zr``s{9|5=esd!tXyYGPk-)(RWAUB;y9faBeR(GqmE_7oXJvC>P#>UQN44UR=6-75N zxlCiP&&5dQ@V~*|K|~hcKW)57c&6V9x9ERA|NGS$g7y+kWZAhV0YCPT$kM$<88TS? z>r1{0kzi@uyG!{bC;05|ZIPIK7vmH`x!{_lG&k|bR?SRYBKg-Rn&*&Si*vg$-IP;> z5(S%(OQVk*L2P1u?C$=W&9!u}VN`3MQeO4bei)abcMWCMERkV_K^@-5caHBiQ)E`K z>OJ~e`tZcm^f?G*zHs7Nuazj~G2{+X*@5B}qjJ#k%a{7;>z~>`YPtvZ=BySn@t)`( zZ90fwoQn3>J8{;*c`^^xf)@@I?>nEG_P944%}#>+C%GFv#7)+U4TLz{9?V&ILC`(p z5Zj%WG!Z(7iIqe?E2O;t!ot!oCm7gO&$`uoq`)pZQEQXxiU!JnP~6*RU7;BIuFS*u z>(lmFH4QDT_Ttf!*fU@UfP$mf;KH8E^5S0uvG6$EPX-3;XI#cXyu*7r+YI2Fv|tfIAMWNHq&M%=~Vj$ESFYS&4I|P9ZVO(EIWy@niI6L zYP5I4=7{th$qk_ufgr{U>{pHn+=dD5m)9cVmunX6Ta(Ppktx3MXRvwngESLc2E7K? zG8=dwz&9Z0UQL&c!ON?z??Hf#a;jiB9rQ>VRFayYS}FS%3DJdPE-|CaENfn8SqZaw zc`%&o%WB{k|MKLqi;K$<`9vE3Q*dkqskfl$$Kgnw7}e} zfEn33%&b(T`pl%xcBThLGO*-}#{;_6+kh3ndV@Cr%}uU3>>&t89^B@6TOS8Jad}|} z=QqUHu`wN>eC}N2p?FHdZm5~Byx@!1q~-&aOW-yFg725@0I2%|lH?a_7=HIwAX))w zLwKvS3dkTBc_5KAE@FC3pF{?(&MEJ-eE#v+nKQ3&Q&7rt+ykG$uJM?Bs@8S|2nUrG z-H@ZauG^2f8;=~pCAANmDZv1H@`ju7@Zm!(ZJpa7ag-0IuGYkTk!J8|G&bl+K~$a* z<70d_rJ|qSeB?`TI{T7l$<02W4`aw1k&`BZIg+z)z^sYq$Ol?}b8>i5Rq%&Xv`D_OY${Y891RG5 zLv@d%Pd<)+NjjoSozcEL2Z<<8WOVLGi$iE3@7~l%7w29OaUU3ik!{dEa@+i*T0UYo zq0M68VXmV$(Nk_X9s7IaEl8q|SY#df*R#{B)$12%y=UZV3;P}~4#o}LXxy$ZxIsj6 z=$_AMLv6~4&<NMGjN!#-bAY<##psPZDeE z>%AMBVSMD$iphm$mZ^2w?}}y03_r2C%&G>fkam{r?b{3cCa;I=o?b5DG3PzCCyrzy z4mZJmIwbIpBv4KO!q&r;b>UxAB&H*P+S3r8V1CgubygvQ8PBqgm#T(lb~8Uw#hno7?&Mon5p zNoUkG2;?u3UHhc!YCQF?H_JN^rZBy!4C3vAaUI1I{7dIkzNi0OlcI~RZ22>{n``d$ zuA@kR0|nF4{(V;nedCz|dVoRD4b>+kl7zND=5xgduV3KAX#weTN47%`k5sMt47`Rx zJ)@sl%6~Ocn^G)f6{^{q2O9c8w4V?Mk2+?ihslsV2Sw*e?7<5A%vS4>+=qOZLg_f} zDn5KOy_vJqALlZM^cE))j*Su^g|H&?EYUoCjR`XQzLuS(LAOgyRBlom9luwINOi}P zl3R{b2%)TKIAK^b!df?iPB3hD#`8I7X<1Y4`$Y zosnR{&4W8O`v*ab5AS->2GR}BNaqSDn>vNzNE`4Hb3yTMquU+yF=pp}(RJ3pw&^2# zN@_^i8uO2Oc-~eraaHs$5IlkwB96rtHhJy@Nz&76PpA}qQqMu8#l&^dvs*F6c@=7j zE7ZgX-r`C35@X+`<7&oMp5mB`re0u_CLnS)!)C>(#QGalzTS5XvP0B4*jB*I>gzXUzX=cKI;S@5(g6y z>+U93>rje23V)`SG4~Dri$D|iq`F4&V#VhkhaJ@8d{HC5+UF5#7N$-h$Xr<`eTC%gQP4gkkRa9fUdxe6a_^0PjSWIWw2 z6pmQ|+Z9B{opqN{z)bJ~8T;xt)$8wvD2=%QkwTc^8$mL=2N!W7)w&Av+J$ty36ygJ8pTB zZ$BDB(|upna0ZRm{k3|ux{k@87r6&Zj-X(BIYq4W(h@Uadka9k;F_1+a|bm)n>|4g zFV({Gin7WRT%rzs*vwAIQq;Z)d}F|xwSThvd5lCR7)v2jUc0}(&;YV)cNmvzb&Q*H8XNq zljSMN`HpqF#sp&R{o%-;5BQO4a;{aVQbgUYm{lBD(V18 zW)-E;0SYN?k?)&o)8gRm*e?9`r(LPY<(=wkK!47`hyR|eNd_q*Hh)8Yte9Z7?ThZn zU+URl3mkRE0 z4gdF5Y!!kA@doJVj#y+>_k?k@VvYvCK$~AGg=hEHf$xh9-}pf%-P-V-oxJvnfFF}Y zR)RH^j_(6>a z7x7?wJt4}YBixycqu%{X3Wn1K${}`7a3HHCloT;ps?KOax*Q(Rec~ng3q96VaAx8Ske zlfOhlVvj>C3#RI{hQ*HWs|JemXdb>mL9!4nW*UmolS8_edd#IJvIRonV(bb^mZGKZ zFAj$lI)7-Um5HX1Ho3ZM?Em-Zn$>4h?uxqn7Olu<{RK<@svG-(KqaI0ELY&(fI3JqQl(?ANyJN`{jw-zSHq{N%DgImdc5&krFDPNgMUNB>wfCQ|$#T$+ zQ%U)bpWm|NkwQ`+uCsMnmKEiS&w6{}@P$wG#G#onMi04ExRzc2)VM6?@o|`4aDTM5 z%$K$;&tKW&7k6-8PnGUlzVBo>;}*ZF9}EeU;sVmsvX@>Qmt0wdE$`oKnWwQc;B@~# z7of+7jg*lPn~hU4df3NWIBYI6*7Yjk+TPlqx|wI^j7x$zV*d1U>d1(RxMl5dldp*M z8XykZ}mtrjdj`uGRmR)uek)7G@;?Sc8kzd9t{8VjAsP!ND+?Vvh9m@D^l`GMs z&i9$WP>sa7d(+}Gq^(9PXwrSLOi42Tn<4dW^ORGQX(SFuU;XLIS>B9LtMa{whcQ?e zahDA0xtl~8aZ7Fye9^@CZA%B48U4m7sDUH*uv}Z@Pb%gPlkaD4i;52%G_2*R3w+#i zYv{sd_uajM>B~n-N4V!}@%EIxB#c5FiU>Ru*pMOPYf6h`6-?zD&wVoI5(HH&=6%UzurGMIG8$` z1Ian-Nttu`7^+k%_I16_20dIyp}3z78#YNY6DQyR#3dyquia=iJZ@m)V6z&5H@J>! z>ggf#F|NDQvJMW$spu(Cr^(88*j(7@UUP#+qZERdTUBB`R26NZ!_EM;!+dm%x!syo`LF8K+3}}>&k7#fZq;?Bl%P@qpG05?s3`m z;j(6`PSZeRaWRiP=w4yj&EF|gK83%zM@@WnGPx5MAIF3$wzyYWR+}(99K$qg1HU}6 z0SFfh%Jy=~+L~l7)#GYx%GI;Xv5CFY4%L6UV1?ND(VS!Z01yXkpzs=(8o;(INRnPZ z*>Xjy5C3us5urb6H=-|`QkPq6ceqGX!0gckFKzSYCAV$=`xOf`>SjrIJ9hx~=(o72 z%D(!!{yM#3lbxIU$KAnQ)Z3FSw^0DC6*`ls*}zZBXGqD(%)#d)=fLCy5E8*%a%;22 z6(t7`;iac5XqE@Ty{CHeXJ9Ys2zGqj@DE{GrJ2V40g)@cGsMiqNQkev#zRbJa-A37 zY-T66q2aH?^b9#H<>XYA{fuM{v|@6Atzolc^fqJYf{FyzX+2h+@0OXf#9D;^FZpK& z1hg~9z#9;cT_wD(Dw#yY9oQY`uBpF#u2KZ<5#TW9d)DnH4Qr*z#9RZXV!(|VU+!b8 z`NiOCGgUW~SNiuP+Ve`MNB$~HC#Jn&XMA+$8-Qd~A7z&`RCVl4*Te$r`kgP)J?PyT zRH$NM5jp$z-t35^k57gvPx$rMudhXyT_qn&E#+DNm;%f5Ibjfwlt$RxkGbh#nctay z3n(kZl#h}U2#2JI>`HV5FZ!E8vdFF_W%rS@3W4bm!aJFB|NR+E4L;B9^{zL|^M_b= zZU;MvVq_V4O@^>VzDT~tbSTH#TU&cACbz)zDW8HHWE+S;qLtp^nLm5&wC+#H7w9^Mt1odbmwe5dkH32tqO)xAl~~v$Zc5I z9%MU7e^h+Y{WA??!xY*92L=9&ueTUebStJQp}z3Y(MH!dw{J{KFw z*R1z|+3Zr>LL1)&0mB(E+!wKK4+=xyO}cLR^xDhwo>b~u{Z8A1s@b*eDmklAY87_Y z?}MJeD*my+L0TAyui6KJQoL43HgBnnR6hy&{L0b}Dy<(Sp-~90Q;X zk+n3Z$&nkdE_cst^Y zq1~VNfkio;8gHcY!uj8uTubvKBbQ?hkoxMcoL12Re{L-Z@@@dsMyEu2nh)h;jeCHCN37_B5e!l-{;z#@sZbN_4St0b7c+nG$i@9;-hrF&%y3gwjhTx9bqtVi zV_4t85nAcWd?h3vfaj+0f9fwIP1@_^_AB7msV^UTk~RoD)7MCEZ>YfJDJA>>`YKR2 zRDS3t9xFA>Ib?5Y*xa-NzSx#s zHWw9+C=-C^CV`I*08@#HYM7=uMnUi0uOuJ}1bIP9(UlD_%~W*g)J)m|Q7-z2qXSv0 zpsiIPhGU}A!88?JQe0}X)cG)(`!>A~O{r1yEZRD$u`vs{6jZVtu-9wQ1jXJ+NcYVxF9_la}3C9msxx;y| zk_%NR%Zs_`V~YWC{2u<;4iOt6u9%|7Ixg)#T9+g|x<9zav;d!3&;#9pm@X|jR$ZL2sYh(12{DMh= zmvuN^<5DeOo=~x-Qpv(7e}8!Pj7K{r>d$*f{GP@ng)0}SRJ$8?t<8)gKUE%}>H#}1 zYGd4J$7l84#%qmrI}n%NiF8XQ0b`Xa70gD%UC*s%Eu-Q=#qJlVo{rJZ8YQuvk{Ys6 z?NMj0Jw<)}fze*T$b;v9JdYDRT)ba?xsl9~)o|?I}Z-(hg zr8TaovdM6pEdd4u;+mB89f+`yS$#@8Nb=7S+~Q*^MU;Q8gC> zlGU+7O)YyoXt9d%5=7ynN{o!ywS@v!cD{RlrQKu7fo7FiGo$K6p)l8TBW&rV8p()W z_(`jPpLYe}Nj8*&{g>)|kHSv$N!8>raTVm^90(qT1E$c2} z24MGDE(eI^q7?;AJ)rf&2Ns{{&1){e&cvIbysk)rbX z%iRa;FwXtBs-BI;yPAJJu0do8X^(L%QhRTo!slru(CNWRQ) z=>;u!`}gG>4wGT2#b~Sk5f+WLtGmWWaGbkTq9(m_4pcwP+7hOjJdtDyP>3(`i`h*K zQA_cBTAYkP_DDEYrnb=q@Kldj}qisJZ(=mMqrYh1RTb*6=j-aha*Xu)RT<-k%)UMgs=sPDCS99#L3Dtqt7PwfbjbLu_PmP*S#C*iR{1}_(QmyP z?!$s7@g=r&?seV{KT)cx{+(O0Q6cZLerTF(O-vwOP`=SriTF4Ryupv>Zm(&(kTN`) zKQoNwdzS={*(@h1^P`5TI*!C_+4{lI zp?Cs86;019OYvk_R%w*RB!m>Uu&A(F)A+O|HCNu)$R<5$CH743nYmJ<*|ppJx!$w; zjOExYE9fe_Q@$80zuflBvN(z&H&^XFBTq)#oeNqVgnVC#%a=RGa?QxjQfyYMAT3wp zD1#w{CKLPidLq}yn0Yg?vn0yqfwruu9Oh<j5IS|VqKHN)M~@(ItR`EngVmf!*?W~6KXkjPj+p+5_Cv9 zRMdU`m zu11ocP`-DHEC383vjvkKgizJ^?8UlPek3n@5-!$)mPdi_7j)u9_44YAgW>KvE#2%J zR8=2Q`6MB0y`*7lBrT}1ZAx}WUxI6opE-{En9Z0XT<>+C9w&kC*PSpI zW9KB4!KL8}OHaEP*1<3|*XrEx=9|7Fh^t$)RpSA)8fn$y=lLjoWd?!t)spIT5l;UT z_r|FRvlbU8kGT6g={L#iYaRz~dAE5tuOl2c~|FQ$48VDn7 z%}L7`Uq}siRv|V{L0o?pHuMpZVau6|w3d%k_{=N-Zy-B=;Kn0<0sXMMQt{U0!5z+O7$UeYLz@g!H%AmL>vJ;x~;`$o%- zkYAL24NW|a#m2YnHpMB@u9$L!k9kTeK!}SEzP?$$8$XQA8nU`-*byDb%ySw|p@4AI zyTR}!up&E^0unEb=dF&44j1JYJ7)J)p*3T43NQlU0Z5VVb|LW!yZZtY73;w-GN_`Q zpCr*^`GMYNJh>WCoZ2)-S{p{j1GsSq@62LusOiRCz5EDmIg`VXluQkIE{%gl>{W$D zvOK8lPbMPr*x}JBSpxz3ehTxF2W;mfuFFcoSmMPmB#Wtyimt?Z;VOv6(NQRL#OLN( z_vd$llaMJ}X6F^b6vY=x_a(lE@2VdD7)^+!y(<;llQ6}FTMRRjO0 z6`Uj+uT}jd{zInyLhEXe2{uu9vQqsa_C?|v8%5vPvU69!I!5m=sn^eCBYCh{^*dCP zDXcgmwL;4CVrY|B1fcn=Bt2&#%lWMh8X=z)lsp}|s5cxfb)SDU`E8&$?6cT+_Y=l? zAul$2<;Xxip!;}lazUX*EcSjlYwueQHB^Ad7<`W3PVxNVzeLxkn!%cZC5KiX>Ygxx zZkIXt-1GRe3M&j}B;=u8$p=fPTb2ThNP&56eYu9MXAubvViI;LF*IbQpPTw-S=4+9 zhQnFNgO*Z#&py0wxw&zG*RO|)ZmHNkBYQGt z_mLuV<7y`;UTaH{6h}^oM>E%Z;>TYPtr6Ad5`SS^7dZFt$St4KeL`G#Z>M^<%ym{jsyM$vpOP9Q!!@ z?(@B_-ydC<>&kqO*L~lw*Yo*&%)>gvV{h9Cz({hlm@U`()_Vtv3vKWr^+4Jg1$lD( z@tOJqMp^h>9;QGJpy21UP2ux4=;mnwRyd-izhAopjj%AD!0pmj>+Nq3K{%@Jio~X7 z;Q`vK%*WB za27-flMbIc-B819F#Gffd+##Q1#J2vcC(|;S2p}RMQp3&6dgM z&ESTK0cA>Q9A%a3vA;ZPe!34o?)=#1BKWq$88BW0|C^UPXh`KTwSuWrL{gD|u7WNJ zOYN;!kkf@`9}n=~!tUb*U;M=HuO%4h<_?cEH(ng{w@PARAxX~rIZNhB4}qm9ISJ}R z>Ck5l-Ei-PHvLs5o_rrkb=#;qB2lMwNb>eOLEao?=;zblxl17X-QVse(}LMQIN7H& z46?d7fKEw?joun0v+XQ2r%qQZsL-QD1Ph@-MN%aW^TjJB)-8wtg<)9?84y{?UY_3b z9W-BoA>blF*Z1A_%d7W80g~nB*at^Wn0gs>4L+ebj%FKUAq|mpPUSJgiWi zOc@AqZ4wgFdxOL$F}NS?ugY34ZxITE{D|Q8>6XmEo{MM;FI??ua{$PNuri-H)5Beo zkjRVz<_TGcp1WJT_vk)Vd|MBB+iQ9L<1>H*-XRdpp8y@j zkI8>!O?%7rCv(aHP$D3X3!TA~J(Qn6%}q!X^P!f420mZ~6nh{u8$GD4^_=rS$-Uzf zumLAmjjdl(cG%CPs3V_EQ@yVn*iMrw*6^ndfZ9U;$c-%!JG;n69@xj1kAEj$r*LQb zpBH*u+0wUG8x(86NRek;cn=foIz=?tCC4c{q#O^RuIINRH&44OOHJ#3lB*1le@gPwAepdoj z>u`R~yFxx{^swY>?x%i6q4zf2f7k$FRb4lp3q&nOaHsENQ zzw^P0*aR0b&KX?GBA~wFwUe5s4hE{*=wtJk&!Kw}8{yAR_9Hed zuI!=Zkr>BxK+_Owj(FnZsly8(LoUme%e&wDPAnDi=MFe< z5+Y}N(18;e;CPVR)NhxcC29ky9Wm#QTD`Bi<;unU17p>w*~5k@OS4i&yDXKC;&~2|tCMw#C1zJJI}OZfqhG_6z(fu}F6j!(pnbsl&`(YsVg92PbGv^cWO#5=O!=?A zCCcD-_Ohm9AAG7U0`FE4(e4Fu@9*Mgeq}A|jzUd``a(@RqL-KLq=eU{E&K;{>1E-< zla{{Bz>jSW7M5W16;g?@FX;as1g;KLw}cFDzt&a00aggU?a5LPZg!<#+!Q*l*DSTj zAHHg7_1=kkBIoJ@+hLu(GR=S=+ z)Ofm;ZTA-jv-8(3E-aAdQx40%rhg}A;ZJ`CE0xZ9Bhvv-Q*buGMfg7js89Ib+S=(w z_==hN*^yP`A|ihCP5 zhFiL$`^Qdmb5bmP4O@KzXXpu&>7>nGtW)#(by(6c3PHN`k1n+s?7q|32{;P73 zUitd$s60?xSW1nPY3*kt>)RD_sVP*gy?7s}pZyJ$uOBUS-oxAY@u2|$ffP$>-;2u- zaeaM#l0BJyf#=Hxb&)1F$f?vDYH7yAcMEJe?`Nw@FFp~rM6TWr?H0nS@Mv=TUc-h|{Vz{WyRYLb2%YA|T&Q{;V z7}W=$Y-<_JOg8&iuC1=J1zY8ZuY3K6%uRv;GMJE>L6rl>wnfZ^DWLV zFo6M2qZNoc?t(H?R9uXox)BowsUO#-yK?Cj)TOyWZly^CyAg^fd}=N_4>dT6eWa#c z`TIqwnf}Li|B>4^N7YgYH(jMatdc@4Sy>-mM{mXmlgVeuBn7o!f4?`S{`RfN;NT$I z{+1g^0N_PIquI%TQ8_m^_qeXkQGuES4GgfNZ@_pnVSI9O3|Kr*lXBk=7}{w@aD4E+ z*gatV(|RQWmxIb*U3lODmg4u@wvR@E*U-59*Moo&9$JBFyWw>}Q@Hi=WN#jX!LXBk zLQpV2(E!ixhQdG7JUN^=K)5~A=*`SL69XA4ZO9`IU)HWHf*YoXU38tVxJhaKhDM&9 z4&_9DYyRwO)2fmi$Lau&yfmmdgj zd>46r3ltLplQL^yzdw9bMD&S>JjX-b?r+J9j<4*Wyf0CKJxB~UOG1VFga%eD31U73 zkXx>h2+yq!Uvb-`Ez>_R+>L1`PvlZ+|f`{@S+esw*^j`7RJo#kK^Zn}DI(mGOd4K)PlDUV1 z{LJ^V1qon!+4D5o5{U*%s?502V zXeFW_lHX7eCXlZcj?(IR1&$x-0XQr^GN=dK6J2pC8{HL_hzOsrsj0JNXwBrE)>V+5 z4}Mk{u%-UV_hJQ60AIfkT)uoS%Xm%E$?1Fbvy8z#xh4mP+ah*D&(hv4-wrl2FDxpv z{qJ8JAZ}=<`D_*9U9EafVSp8$6o8A)%gVaTi8$+K?95@v?EL)T3u~Z|dHtKH$@DJ_ z{bRbi4K9# z`RT4KC^pAIe2!1g04=xLT3w=%*&XOa?u0J2ZATGFUH|yuJ3cOe$!r-i?cmJS`$ zk%^LVX}uBnTX(n4?6DV03uNY~jj0gd(f^JYzLc7K5$e_d6beWu*_e@lDj$AUxcCU3 z*`%~56nT7j|J_P=bS*M0<`VrpPuutY2M~T4n>k4t)AL)H3_LIi*u~qUau&odHb{>*A1Qxk=>c~Xh*(D6ZU2qJ?_xMmE>T`Saq3e+s zrJ3!^N}yc+SL2}jTu<*exzqwNF}H5Um%DexUgOi@)BHwz-z?Te-M+`I2ZauW^Q7+} z3U)U$6t6CE$--9G^)-ABH->;Yv2php_K})tqL@x!!{utCe8b~#q{OT?4$yMno;&1= zd2ShAmnO&v03VAa*J5m&o6VwAHUo)S)d9%i#R4CqkjSbEPxg!_HIhJJDRco zH^B{Qu;Dm!A4wv932cwR<@<7^UG;kxt#l7zSkn zO)-E7=K@kZphg*;8@DbdkdTzDDtN_CN+-lg8)cQkO+!kj|NOaPSNlyvBO`V)Wd?3t;eZ3}m)CZ8l}-P~Y&~R=K`I*Gdjd>vbo(OA9(j6CriWw1z@dCN&=o!+ zltG3ty#9c}+r(Oj|LD!}yUDtcd1q5%|KU*`1MN1=Y#v*#UQr%vrdW*{ShcPXOPgG4 z^XH(T>i}N!{9fbS#c~&{>ZRgMuoYJ#E-+1KfC%t|w6IwJ%%p?_H?XGwx~BlYqZnym zzdSn*qZF5r07gDuGbMI8gWuxPV4my^2=JSxe_}zRh_<@2gYSi-o)%+kp4PVrI-O5e zhw~ufs=&-bT~+nwY`rVdKM-A)^8;LTety%2iXgQ|WW;!vqKV-)y2-$I73oYvnC z4jy-10!#|Njs>+VNxIJ1>q6%*4%Ye$3rv4+fkO+>gVg$48W|c^c=j7(tw65g6=;JI zW%%w%hf*+hSK?x9089fYEF&3R5$eOUltFJ-x+}f=!Paq41`yjc)o*-ZJzw~8f(I{U z2Ato%af>3$ZaBl@Z5NH%rzsi%lZvcY1Yi|bB5nhnLesDG=HNX65}L4UL2YbuzfOQj zlI=2suD2Z$1h+alft<>-#C+~AZwSKIR2I2k29<8?pi^=YcswMdQ?zx_G2CeIp7uRo zkT?yY_z!AdI@Qw`!a?&>665w3jC#LCkiq>rPgW6)J3&1uHZE-d2yll{YpKwd z3sDen6tDZZH#%9<=Va^Vb(WngHqcC!oo_nHQtkhCxHmsbQc~nCXu`1=h#Q|V4=k-c zus(%Pe7@}%5%As%vErb)w)B*5O#gAmJ07)kztoSDRSa3=VQ9x&W4TOY-rHCC1JqI$ zfj3yS(};fqtPr1N{Q^$yMU&Ffg39PsQ!pE)E}lyrr^bobQoLw*$sQ5{tXCCvg`-Zz zo_y6cM|RSIKoTBEC4aVh&!q|X9=vF0)}uXaYteqn_0O#?7>Xp(SP zRt*jI9EO5`k)4GS28E0L8%uiML5pP3pW)_p#h|tilPC_HYTfw~F(Ig~ARP_lH`{-IAPSzn;|vo)J{P{!awG!@ALs$ese5dQ5(Jxa`p9ZHLrD8R>y|KIX4iiN*$aLR~x>x~0 zK^2XLrE|tGYFg2g9@f(T1onZOcz<#DvY(;Z*W)(M)M9%*#vJxV3F6HBjZXD8`cwO}CSLN8}2&Uqr!=T1)o2GY(Xz~&GH=fnX}bp}Y1 zeGAI*FM|6fdH34zh18{Kw;QA;;lFR`!5s$<3Dzg4=NAcmgn{3BaJPI#N4C&1@u{fR zEBUx^&MTi&*x~kMj9rFr^sqc`+@CJE<0jj0V!|DX+#YzYZiir^mOD!BL~lHcKtDwg z){>U39O`zSsrwVKvWVFqZDf722N-DJ3a-`oyCeTq@zX8zzZ-WdCNq3Dhg3t4nv|%v zaOP1fycV^bJXzWNFUaAx*LqdrTKuu@`yRt$fWXZH|3`4*Lh+gO z?k#Y|rdkrD329F7#suJrlR)`g?J&mO zG$5psB$Ph=%ToYsfY`xKnTv|A=Ej?a=Lqo)E?^t?7J77b9m;Y{vIu4<0Os24q`*XB zE@!82iWt2LHREbtn^tr>@WYO zqu)Qg#t}mKH0pjY41_!C@va^lI|~Wi;A=ddaZ=CK$EVAHgE{DuM^u>iG+&ynT?j0a z@Ts12tDH%OX~20tSU-|dJin{FWc-&@>V96on8vPiL9VXoK>>L~3TKWTjDIHHv0HI1 z{YoB{@2&NHKfCn0z8dy;SGpEiXzUUF3Woc4zMBao)YP={^(F!6cZVcG2D|N|Jxdx84xdjFC0M)mGeNjCIpA`A+NQ~HA zPb~?Q)Rs*6R`qJl-6n?De!L(yQIxO+du$F`%WG@T*N1SF4_*R1=uW5-&ocj!7PY;N z$R2vu=DvR6+1y*i{ZB=WI6eb;ySY;|&;ec^B(+v#*CdqZ1*+D&@R%OuNwV_svfU+a z@KfYe;?#fgh^f-rj;qFgB&eI5F;=bf3;@k)u|2Jdcj#2+xJ81GuwW_(UIIPdEb&jb z1Kl*P=x2~9@Zj4HY>3de{{yK#)}Xri$zd3Mj`l(vQe;-o3!}>daXz$qYl%y$L&HC$ zI!JQ5L5&B&t!!q{p*dFMF9m1fPtw)M1_pMANK-POw|Da#+<^{V43(dG8gMKq)O6uq zOw?1y8cr>|AM){gAvI6>J1Sr-u>q%a3-7hWvZ)Xj8(Nor?y>Xv47z8fSg+slvAPOZ zMY<(@+h%>dqy8}(iAfsNFg4hFP2Or%S%v>#v>#|02Gb{Zx3gswSnIfQCQh7lvOcIO z2D>Rv8GCC~g2v#d1aP29winl^L8KknCy4o+-1-NQxg&pFi9eBE9?&^G;N(163{(J; z)Yp4+nMJh$yQgCs+0|(Z2(6^6ix4IDpti4!g=B=EsHs(WiDd|+)%&#G?z0ocYvi7~ z&m{GoJm23gZ{^@(g8%tQOs3S9g?&gsOn(bZP^fv45Su*w>kO>TV}}b;N`sC~<`$cG zD4ix*AI|>#d%=hB&m;;WAmO;VcTUF6%UiL~v=pRv9}+zNDX#C#<6EW8tZ@r&Pe!Uj z^yTksalO=|hXDoN)$7DodUILd59}Lp;Ah5(5m%Mki#T00HkV3Eg zaHnbdu|uMl&+^mJ`3*wy;)`ivg#N$l*i;6gFg7x$&&^=vyh}lImn@VQa+1++)e-st zWM`ztsc|Qe6yxRHJp?@&aHEK+4JkF>Af6F4rqx>{KYw%o7MSA=Q3(egAy&F z@UpG_=A6&0bFeadF!h_6nrZ(_Eg1v9qX`qt%3R~l!W%CTIW`P97l4fG2>Lh zy~LCkzTi*hi_K=AYYuh6{QARr`6=j~x>n&}m5KhA*>83O{&6}Qz3u88{-pwp(kxD0 z?s~qel94Jw|B@t`?&FbZZQClygpX=h0GLx>bKqkk{Io|s0F?9&49t8XuH|I~G^c+v zXVJd=GBzqleLkq#%-o%E0Hqmv2Az+#4XD zgz5595BcMY472mXs#f6786ctt7l<34!!6T^SY1^eOSPN-VIVCpN1w5Jqu#QM1piBQZ5fgL_`&F# zb$M$A$fP_it_?t))_U5mBwiMaL*)V;;VOJFZbNEkb-bZg4T(1?0RF1f7Na8iaIE~B zeleD7OzVX1HbVc4@1P1qUz(l#`QDLh(%4JQ>XigR3%S|aK5mwJyq18 zJgP-e!XU5slxsIY`y-SVN&H>VwBjq}gQpwr7Q`)==i76W5J&F|&+e7@$f1y7_bc@! zQXTk7IisxFy#@Wm#S$#AT}9v3t11dcW&{rL+ZbPfT+NGDq}}in1OCME;E=SB`TE4* zJyQq{9!#w_U9`ZS$sPRnpq5JT=qSi+;%7mk<8WrYRh<~$<>VL;_bURD(gFuLjiBwW&9tIy!jfy#pmhy5bb5hHhXXRMQ*76lL?m2R|kFnA+{AhYy zo`22yex?hn(U@v=t8J7Kgq|Wqjr-oWWkYfN@GM(h#PN9kzpQ}{!Y}OLFE{ zgF(dW%iSI4&&5}t?(h%hBwrE-vZv)9(?nGPh6xFY(m;mX=QibAV%Gb2Ln#;r{(;_r zv|q4*YN|I5N^L>Bc%Im%BomiJqWf?sUte7*TNo@JYbj1;+3NA!2+D_!Pr3#l_ zv&*yLz=}zzFB-V9;?+1eK$nou96FoucR2Bz%fHO}%Xn~W@$i;dHO_{O_Q*HVHsZ;~ zipNLWs5^^j6kA3gd=r`5-e2=2-2nYS{MnOV2x^}Wt5D)ZnQJ4FJnB-VFb34z9b59V znyjdlp`qWT9dQ9PYAI5SmMbC55->pLRI^_|FnoeCb^c@jRC==Ym7VPVAr-J_=*kd4 zdtVJsmSZ#u%_X5ez3K`m)t}_`X+NK@JawS)YJ2&0y1L~Vaa#zT#lHi-u^f*ioewY1 z2R2hu;%E-zfK{ncVq>VO@9wQu+%e-d=Ev_Us@>uug?@d#rz_j$tY_>`nUf-OB$Pcq zlxawK=N;^6Es8f)y(#dgJM`2#CdBJ1vYb`$uC&LNHj}JpDV}pa$cV*MY|j}9_q_f zXU#6wITxoojHx`SS(%)`>G|^PUg0hOZKsqMw08)s(7Cohyk7r|ERNGdbEI9AV{QmFH#oF*VT;!V?XFG61ex(9(H=~(4 z=V#)y5AH|3#b`@mmOAe=+Xh^nTYB5nUKrb_kL;mQPoNv=(-L(GU}w_Ze8##pS&Xuc z%hSzt5qMx^@AqPwR?7hZWu%);AU{UvEA78r-Z|o7qNonQ`7EOev-Q6FqMDLGFfBOX z8&JrtzRXE?qK764QeYO(YnZ zf<44DFoW@%R_ev40xo!8$(hyKZ$7>kpRc8u5*HEieW9$jxeS(voGhiIh2AKocH~s5 zhWHJY=dQ8v0F6i(&yHQ+zm7XRAMVPcQ9NPYKU97`&+xIm20XR4o1<%A=tZuFuhyBn zz6SHXcT=sOQg8c#(j}}tCZ|i)tKf1*6fb}IJsQjU4>yNJ#cSk&9>h$g&q`-?IgD|2 zOX1Qoj!vHC;d#k{i-E^X>pipPmVCdp{dP|veKG(B1qJf1k^cX%=QjkBdEN808i?RrlVp6lpy{FL^Sj2$?vS18frc+U3oNTrF{aJF@a9CGWHmIm+YqZ zN%P6qJfpo?^oJW^U9Zv>wudb|*Fejx^Yo-O^W83E=u410_~GBuGZrq#%vQ1CNAVO` z)U)5j`~4TNYrS~6Pe&+u!|vZ%_~jXLSD!{8_08GCzV!k5?lw!?hX2MegIZJ&pjt($#XDJPXT5YZ~#nax!Q zhtD1B{%~=Pzyy3h!N^_%fugctcdhbMgyQiATk+v@GtBThXuX6Ae}DA$$5B-Di$HQ# z`84y(x1?$Y{e8rNTtAnU1Av(G+Tq?rq}Y(`rv0lYg4Q&#?v0d}=E~-X zWGfq5Ina69xgSb#yFT0q!bM{yR{M_m=8~pP{!!=pKoj4k*0KNK+)SBivTaJDmq82n zja#yVbqDSsSyEXxmXOoD?i&ae0|8k7_I&r&t9#=ermJq^XBa?orNYs(sgR0i|E!s!GlAa+7x9hRtuO-z9MZX7w(E9>elP`ucY% z1aQRX9gqtfrDFxTt2qy?b9*YVef7f*N`V=u)>KA)7XPrEB4jZrOho|K& zD#&Cc?LEy|KTZ{QP&(O@0}UGnkiGV@cbN+Tw2s79`}-YN?KO-KV)ZCvFjGHcX0+`- znKr(<3GDFywu;O009XXn0U&Ysl(3(KfxO(T%6%tYKzemHocdZUTWS*_#Rj4pS) zEZ9Y<-5!?5GT~>$%A27cRqE~JQUnip%{!n2cUn5t}>J#&!j1-kfMRNG@z0z31q+PVd z$O#D$Jn%sv$OwUQw=+i9ogOf<=x6~u^3hRpK;m-ivPl9F_r^e|0&NmE_>*OIBl^(+ zam&SlfQE5m1=tMb`AbK+JsJSr``vd4kbHTI>FwzfK2tZ2Y8#n1{9Q-Uis0w7m^u!Z z3DZF5n7|&)DSR>2bgA||Yg3EKW1AYK(<*U!`BFWW0(Q?=W;!veB|3aOt@=r(knAPt7o#RE2F%4q2+bEwgwSm_Di$+H- zJ)cG~sS4A-I<2r0gx084D_FG?eiJh9WB<$q zGW?uO8{N4&Lhrm@RM;g_+Mn4juY5bkog!YDGvwO3Lh-j8Hr&b*K>u;IqT^0AlS$^e z;iu53xsx^`RBI;g)LBBRS!LdXfJZmEy&V>4b9IB_U%fe>|DA_Gq=5zs!I+i<`*8>56cCoS^}MXBXO98jQ%AU{hJ zc31(w{NVo51K>&pN?A{!L;<=Jfv+~LX~u~O`)5c4JX8n_->lCf*8HT8NPl^vboi6? zMO8A0e*=hvs<*c^UOG#wm`20;&BvN zWLh{Jo8`IPd^#&JSp~XW-+)>I-1bLj3GveihkLqr=)a;*k8T+|5a}moxqe#^jF~jx zq*DAd@xkPNSa7^NrGDx$U+sJ5LKkzf!#1PtpXjac?+FvWmEomH_RTQUg3VvI{I zdcTYO1hn|V`k(sn9G5E$pTXQ?`(W$Atwli_c6Q>BAPbK{3H+CUTVxci$x7MUIPocy73RJ=FXa?oY z(5P=($&~*9n>)9rCPDd*bb@_J+FRG?mFAPh{D9~FP4Au^3{EZz`yBFuhXOD&L|*rp z*5*;IUtNg3ei~y7gt*^Q)D;PkX}%Pbut4S>CJ(`Ex3B}}wSmklF;Ghza;vh)lAb%s z8V#l%Zu1~s@W{k@QH#x4zz260-8zm2(xl^s(q#NaS$Tf@n9JDz4CQ8C}%Nxh8ETn zjwHjE0KdfdaQ;aQ%e=!(4PR=1->F|8H3;~oq~Z>E6idVk{{RFep$W;${e`dtK%>Cx zx2VLg8+YpcR&Z$y0S}!A;0XN5Y9MI8iSPf152(t@Po` z+#nyAut6S-%qE)fggSovA{oow)ykB6b8SnZhV6J#liwDS5q2M z3%(#n;S9`vMZodk9@#BEf!P6&ux|^5AI8m)IQ;K-la$tdr*dmJQN-qe`f2q2P1RI) z3w&Mk-B&uL@#9}%?z*RYGQSjpWgo?o2FKZYBzlUX9fxE0JIM%tDg6+t{&`4KC}*Jb z{M|si%+bV9z1wNn(%!AMCl%J4uW}Kc%dOZwnISnW6Brutr%1W#$R@vB3c5LvA~#zL zVC^esKzRc4hPUnK8brcX(vG&Jgh9%xdGr_?J&EX9_3+d$&tjuY7y4SW*70bVmwWNB z?=f;XQx}LvH@#M(#9c>%4Eei&F+KTs%}_vqn!*E2e8rq+lG5O>ynxh!S3qC|Y!{XQ z;6#yFIeCN-ysoQ(DM(5|%QSBnSscM+RD>SEUQdI=Er5e@=^nZ*}MMSvQ7Wj)n3 zZ~JYJNS?Nnu^=ourLHur2k6rUKfe6E8<{F*SGRCCjW+HGzPX)h*7%AmKh0ef-%5ID zR;NPThOgK5%g@Y2TAqkHuJSs$gfU;F$vM@@4I+ZTq&cc|w$7Biz5g5jlofzGGV#6D zhrPJK_yO70S2bEe3$=V5X77T)K(m@RA>cd)UOYTRK>qUdwNkd;(6(2<*Elx+eh-Y! zk6STZE8_Flk93>IFi>O7YV4Pci?mXYb0^-FP3h)p0guTLK2jbu-GE^D0<<~si@b4Q zTtbe191fgEcrI~-&c`k{MoG3VPvUjqyGN4`J;m71yf6L0`#X5JIW|^Bq_e2}TlmpB zpmppIT$`zJQFhq$$?#r(0m4prq8}gdeuKbt*j_fw7P;|uI^?H!E8me*0W*29$0;#y zini*2RkpN6fB#q^$>L!<{B|)4G=3xXgjZ)+0d5dXFCGGzW-b^8gHfTd>w+vx;Kf@o zjF>EE0oU8~7RM1srW)DSXS}gx8zU{dbLKfUOIB zK6YkfJElkAuF~DQRPo{(h&G6vnOj7{0!w~ezoE#>RCIWuKE|1$#+Ix8h>80p;rpp0 z>?L-(#-*9ANy;_UL8`Y=w(lChpR%4uCG58ApuRdK$!8jJ*>ovwbq>lNacy&|k^k&j z9nuw$$T>_s^CXN1(QGxfhtOj(v*}nSD;_19-C}UCp-kKluz5;9TI9DC!3nieO@DY)w(AN5^UUE8Hfjp<6&feR*R)!{PgL8hf1op=3RByNCEbmgxj%(4_+h`HE|l;VL=hk0)khi#`#AKzx2InATbSW+sI(x6~bD{Fu=?lIo6a z6J!Wiw-KS=GX9VLT8}L*o?i(gEVp;YztPe@MYwCSW>&fxl01BAD{(6iQ&#mfV>o%T zPsy>56!I%0{Y0CaPDvoy)0WPtJAe4J5{{CN84oKj#aSkL4QVPWe*D{E!i+6TNluwJ z%td=Fk5utzY(?#NayxeUHj8=(OfKdAsMwTGon1Q@Smav#nkw6h@zy)OevIHsI18E? zP$cA_;!A)=%&UiZQ-scRE+^JKa<|BEjOcE1xU_&vv~wQ#rSJdx8P_dP_1|JXBwOY0}DLT<~!8n=H&k_Jw!h z8#hDfxi_Jxfmm$!%1TIgJo0KEv5y|T)MPf@at`xH?B~@tos=|(%EFQmB|$u55F_Zu znwiUBot~t^!p`kyikkd02}*pb6L9TN{Wa(CXHj$+70;r)5BzGqK_2cBdHEq6cV1Kr z+oE<#Px8LJGZ5Ja;tlB-;I5B7xex;w&w+k^coq5w$wK{S;ha%yc0H$hX-SWEL&IZq z(4mq+m*P`?IZmZmFAQx4vR(nj`khxC8aNH-m%gt3&)j+?1k(QlQ)Mjc9gv@KH3vVr zG~n2eu8Gd%xrfSeD_-rz$fSS|dwH62AGs7kc|YA7S$Oe_&4U|iy*{>PLJgg^SZfzh z$V3+RI~|A=xwMYnTOGtwYAjfqSRHaKc@D-Sz4%eB0irih)*8=4C%t#olD%JWLBun_ zqXDm7cxt^o0`G9N8h8%+V!ONq<_V)d22jjLGy4;^$YMUW{kYw(_JnG+mD(LMeM8Qi(ew z1$-Bjp?uOdlFxS9F;eL^|7rS!Afg~B$-AXn*0Z;GnMr3eaeQt3M)u1M0=tcPsW-YC zadk?YQcvuCFk8dOGs}c$$mXY_AtvjEyj=fM2F=i0#%fUo>t`uwcOndj7>dHvV0{{H z^HD$ERF{=01z9hnd*uDUds}}vWK=OSh8l(RZyrV#a0nlFltQEE`;3;yl6 z;nfcQhc~WUk#oEo`{%Ow`CTeeu>Py19k5S-;uXfya8Jjny-B9v{>%ydz>j+Y23sn) zUx<(#r;MVD2#V&(U8#@hiT`+_`dCgiI5B%ZN*}=ySI>YRMX6<5x9-I0dB)Z5_f;UGVqWpw zV2xBQ%`TT%Z6?!upRvfEsm2*Lqhac4qmZmtiOM{-+4z%f{M^Da}f@Q|Z$4VQ5z zB{`o8dSZp~^MofS4zGSP9yGs){JCkfFuMJK26{)$M`NOi0TK1F!t2qHjw%0|0w~5b z{U-aMi}XsPdPep#;5cQUH?{#heNa~8ynWusLjh0+NhQPh{PzfPuGWF8kTXmmI}-)U z4SVH;Fx>iGjRp)jKwhDtC+H|6FJnPTYLg}-Ly{!=2WDS<{ytGdmrvdqBE+$6v|O_U(3x`hy!$|f_yV9ZvpyxIZtaTk9uI^La~ z4EMhQqTp~z3}bcx_J9P{Rueo^C5G_#-XX7|V~- z&q7>l48Qod^5MRixycWsNc>VIeXGF<`SS9F@#swx6_YlSW@7Nm@d*6+?eZa`Ud#*l zbIS#xp2S8BxGZY|-!L=9<#D104Bnp9UzQda%MR=JyEJy6-cv#a7A=N7lu z`yw~n;tt(HIKm8H!Hhwj+>X}0dh|#~(-;ab;+;7a<*WI1_ytkI9Fwr~x8`;fqr`!z zyv(0V*3U`OXlP}HUdoG+PRjRAuPkFeQ2wbBY&?9R6rX9N9>}0dVQjmDrAbf5*4B(babxy|fN0ljy|454rShQ7N)OG21eS8k zbtg(&zCx2tJb{dE5W@gZCWSb5-4LHHz1>9~QGMVT+Ffz1rW6BJlkH+e=JKk=Pa5W0 z_-{t`d4JR$HGsc*oVdSw#@s{7ih3iO;Nuqc?AHs)m=g`T|9<=W9?l%hK>c{8u9){l z>7Y3nGcy)Q1v4-3idzCi_W zL#rOLSF7DJ@#U5i93MGqc@lVi))2IdB?Ks2N0p57IJ!i1d+LoP7hw=%ArZT4sh|Ln zEmSqxGEa`)R+*gtXgF#xdJsCZEL}k53NMZ6=~Kl5?n9qGP1BO29^nw1kX8fNH2_QSq~;`NZeNN^Ds{1=#@w#$iM3P){=4tNoV z7+w-}B@Xw0HW`a92+U3q;$5FR*9*8ftCmU8e?yFBk_(r5VKtTAP#A%Rip43{OvvQF zF#a625RTdJL%!*l(a1keN@Pm${sS19_>&(S4hT`xg2%GS@MHmq1a|xb*dwO*1GT1v z?A1{9mV~NFTX9)Trpc#riKUP#^?uB>H-20xK)zS>=oOc2@@Wm(PtgU{aJ_8=>SqGG z@@6!n`x=zysDun*@Z(KUDcQuo`Nikv8J1BdeXktt6>PZi7W=PC`{10e&UtSW$qL%O zm@TLBgLu~De&Dj%;!4@r<07z3F zkywKM!zdmemO?V)3)WQ|XKzuA!%Bc3LeF0 z&(;#}d*lifawAgD4Tv^&pI~J&=sY_$L+Bh3dQkB#!8cJ}Jr3AYveK}LWeKczi;ryW za=I_a$`RdTlq6R^PA0qXns1*p2?X)sGo4j=n%4sCmc)uP7X`NC#qB$Zm>n~O@v)vV zHXI&=8InmdDG^xDSqcZuD$3gz&!X&`1=>2#TYbg|#TR|M;1u3wCuZV7(E_8$n< z406yAYB_!N<%7v+`q62we)v*>!mpDOL%92L^XvEU%Y3-#!H>U)RubVTqIy|1`wr)3 zU%ewmSt6-n)C|JXHg}ND06S&GqAT6MPCj9E&(Z(8{z#*=$_Yk3V`NqUmp+E?KQTN9C zmp_k;0H?y`=?#4ipxC|{ODsD0`YwZa;cqJUu-l{Og-TmKy|mbrfS z@x+XhZb#!T-zgDuge*)MY8Xl9`8)rPj!qLz@Z^`h@Smo>1j;0GYs-VIXvaF)X1M3- z7cA?4f4s~dLWyG~jihACDG-GWx*6p`C%3|lV(1)XJ-g!*y)K1l!dYB}6es-NV{6ye zbpF{GLnDnFS-*pLfH&nax=mUWt4>Z=cLpP8&>RnRX%{f3>GD*%2G?#G zZ%9Zk-e|Qc5SoWDT{+kR8OFtTP+oO-EwwDzjeAL9 zrP1SZs6{gSAi~ikn|S&mm%nErF881A8hL0)CooVc@r|Ibp4(Cy4QeKc>_Yt=*AN9X z03@MDbo==QK_yjA2{XqAxPPb(UL(Qy8`c90xWW=;{^4C83 z&z)jv0-qu3)C;n4V7~k@SnrV*FMUZuAhr~*SmuI|G5r^Glhk3hj2Up~B*gM`>#g5Q z_A^&uAY0pjHn=_~~ zyCpdgam+tGW`Zw9tFN9dD(KQ?@WZnY4BO*Bnp{0+KqQJ~_Fl4n1Y4!8YaAwjCzim` zMxRlXu^bwgvR_+!MYL*DhA1J>jg}>ei80ae36?anY(ZbW3+h2?|~W1`v?$2Bo`e=zO2~ zt@ZxpTDtCqIdjhUJhAs@ANd%kA&|6g(JkwLOB0e>DJLP%yfqrMF z{&G>F;%K*(+zn4n^eScW%3TPOg}fn`Rax`D8)`{pFPR^U0#(#3T@;7LdH;KBoI*K& ztJLJsTIo^YXnK%SAwx=7#gOx*w2SRB)c+{{VFdT$bPb$-T-fk7s7AL;T^;MmEUU)< zQZh$6zP$WvC;rizgJ;rF3mb84{v}uWW@OtwBXd+(>pHc%^QXQFJrRW2d-&WG8PQ=( zz1I9`_K06d(b!k!ciL5}MEpleeL7-rOXEc?7Jbc2+%=FEx~7LtSnRe7M)=5&z^yRN zMfSOvU4_3w?x1L&5v;Pl9RA&(r~r>`NT0}Y%0Ukd;TCD^O14&>s3Lq^$9 zOQg>8;*{LIuOajyZ}jxYc}tcq$#=(19T=oGAk)3_a&qXq>ta8G9JD`Io&+5@<|TG9 zWTLBXbN=h9ho$LTs8&8}&6yFIo1iO4oWNYJv5xjwa%*U zl-D(GnU{PDV-}E{iB1i^Lpf*eo_1=QpVYi{R`MAoD`No3Vu$=Yl&kcfCCL<9d{Ojh z5^kLBCePG5MSBp5DtklWQ#hPlMA?)y5i3wHDHV>kYs)cNthKqj32lgK@N6%a?=`mO zx0f;}n}m@F?iG3zwzB223-u~45B~`KJEr?x|3-xHIY+~LZ1A0YNdHf2D7EO=#?zm<}R?-Mie#lVb0=cKrcl-Io7Eir;ZAba~9^j{XvQb=**yEm9-%K+{N!-2Hmu^3dT^w54 z<(W1ui){NHV(ET*O}~QJKmN7KNe10=uI1ZFp%*4vTRaIeMr+^Nl+Ub96^lFA&ge_{ zh_w`7O~$MUi=Oe$z$D3cRGz~fNt7r#@1gLz<7%zRHdaU*BO%(GgCjM<{2^SS)SaYB zPl}zboE3`SY~fx;NG5?yd5%2Pz#Y+rKgr;J<4`m1hPvP?duQ!uPZWv9&T*i|u=gmr zND0+sr)N!+aOQOH*Q3mOFJ)C0_S`=lnDj(8>AnlO$0BKXhq;x{M>b5B3!;zKbQ~YB z=hDB&S$8S&_aGETZIo9`n^1wsrt{clKH;Q$6uEo)2zk0zl%vYxNBHX0nJ7l|^z%zD zS=KnnStG)`5%lu*q+2%Jyznjfr+8iJ3M@pTA*46)-P0g-R6Ihrqa%F!O;O$D-<>)l z#yIIC2S;9nIkK3n^b5a56RkfbM^9r>`R}&~IXs#Ht|>tjx;I<*)2d`gTzwgusvmb@ zpJB$+2ofKEAuvrMy_~_J$}G~%Hrx;!j8~wXRKJ0fq!iAngTNQp)6@IuVvUX9K6CMG z-3xzg@(o$j6lq?|Ewt@~$k1_rY~C*RKJ@%PeYtZzUuBYLoJ9RnQimer>R~am zI;1_!$-Id!3f=(dlFOmfZTnj0jm(^AMG{q(+3vQO|6HQ1F5>OEQL9JD)@Qvg^hEtG zynpl0d%6zq|Ev0PDKuk=Hr($vT-Sh_7`<`ea9PWei4@|zG7XH%_S7eL0k|cT8 zhRcq;_8Z=$qyjrPKsXnshtrKc>Z(-ac3yWIx74YCh|)oP{aYhjOjh~cfChVKV(Zl% z60+=hX3_Tw&MfE!g(Vfo9R8EyAdkv)>fYr)MRX)Qu^xUn5n0`&3#<*g~0m ziz6yMlvPNBWV-w9hwO*$1gIy;QB*x)SlBdDN3suIF_!KRFQ2UK%B}n{YkM^x_t1nF zkI|o!0%h{*lHbMQp>$d)E@Jc3*s$~HlY(0jSJzc;u~%4Oe)#77vheI}E{n+Z($MW4 zOcK>eCNtGq)>5k+M@fW37WbanC%<>`5zSc}U{gZ-+R+O(Q_06BbsjZ`K--8R5Iyz= z^Xr?2&zrTLB_0w_D8A@8SUXOkKCK0emcJUFul>=w*~GRuBDka4>9{6L=H4c(EA4)h zij^Occ%t5lTx%*=FWI7$czK(yuGBZ*hhZp@VrKnhN3z&+8yWqy1NAIf3lZ>FxrXBR znQlwfj;U@pt4nEs2>(>Fa?-Ms0>b)X2}@}_%Sf~q-H(4^B#4`SkCEyx6;tqJ?<1FA zvvBf>mA)MC92MrW!+M8$a5d^;l#nYvzH;h3?9$;+B9S=r_8lTBuk_|N-sZob&J{}r zA4WO3v#wOBmck}8rz7%xkr&#|#j!n||Cml+xLRaz9O;eRuhWa_JgHVSjHwc=Ng zl{_Vng%(q3m|ZR-Izt}r9Xo5_jzl>Ts{cAHf} zXa@}cmz~WUpZ(ohMLv7}l4LhQ3gU!jDf6Fs&8`kCa-)`OKtH`8JGRrEi+&N!P+Iu=*A!$if?r*cmT2W0s z!jJ8-ShRDgQ*_q&n4br`K3O(hdl!YWKd9NC-yf2>aN(q_xKk73iDu;W469;pP<;vh z=H*%}aT=S!y`XR2t@aEN(%mkGl`fY^+Bmj2k}k(^SL!{9sMm%v;sXz=HQ3>(|I9QO zS6j=ML=+LEzHpM-z-A;f?mTSN?b=e1xa?6my@t@h4}Pq9|@UkDNJYSWog zV7r}KeJ;dR7FY`n(ar}rGk-u&%-o-=;d65q1eOY+G4xjqR!2#I>d zc?A++$K@;c zBT5lcA!i)eJL@<2-1ygC_mK`M!Lr@AUB8BsaISB<(&8XhU)!xs$Gz!GcJiJh;n2=p z#4A?}LN-v_*FRkgpaEql+34~&ZsM0?VcB32^u9Ptop*9xK4K{hf_azXiX-Nv(#G@W zIo{AfRIRH~G*qCL*^&tzPlvrihZp07FkmDG)icMH)4-LA>W?H)mJ#0ew}l;Kw>qR_ z-+)Cee(E!)Ebw6ha=&W1cUCPgcf)pM+{EVna!)a$9Q|M1y?tYFq;%?8rZSGhkv>}B zsy(v);I@Co0e~&>1v2RJ(_C3W8uuQWf7tL^UmRA4i(NWbJFH7}K6nDNU)s5#+diKn zOZ}AUO-f8&>ei#qsvSkb0U=P{N?D>^ZArG+&kXsHf?lSh?1?gj5v`NctL@L-4h{Oh z9+I1lik0LMz4K@#N!5&K5-}NGNX=^x4!#{OdTLsoK9*X0I1%HpJ_5ctDW3-AI3(hm zrpK8>V>ask{rmJWw|qWj@T!z0A@3Q4HM0tAlr^nP)js?1?B1j3DaROfhsSsG7Z(RO!szasOhw~@CB8q4gJpbQM#`gF3hhPAi zGcqjtdD*!jfj~+}QlLF#AoOVJ|B9L5lr9u<)kys=3+!O+eMCE7De=rjZb{-waJz;T zG+uY{0|nN*%1sAyZs9(7Tvo4=M)38E-l>ZSJ=4TtOK>o0L98qXaWUVvdv>(!`s4)E zude611W6ksekb+kK7>Brdg|??_7|`t_$rTZc2DwByTN{&&Y$SRu~LAJn=~J;HGeif zCCGmtzy9F|3LP6z+ygkF!`XT#>?pN3{9;;+Dx_uXzmk;bXbrT#t>f}5(kw|B<~`?| z)?Nn99dYQ&`k%wa!TM;n^)wp`8=JwPY zOi3;V?5mXC+2KOLFKIIcbqhC=@7+)R#ij9@i?(#OiH$lt50$~toWZcH8%7jZ3cD+W2DdSda zXY1B=c{bTCDSuJwBgL0tiMqW|;I!%XT;Wc89+jNU^rndVGAbm0{A-r~{rxQFsn}S$ zJp1N6K9L$xTvzI|__X{qxr;1XgG8LaCoww4>gES&YrN-RjvP0L5P`I-dj+wt zKTNSWS=2f`LN|LLfnOJ566-z-0wia-&0q_w%WBPKzjkcpv`g|9(;)-`REb>zD=)rtFUfK*g0q-`GXn<{tZ$i|rVXEA_2^%2lUHpU&wH0{7x_-}798c;0Y+PwM!-&Hl$A+UY=VS+wV`X=J#z**=N zlE;(-{PIp01?s_9mq*+^0@?ggGMTkxkz4fZ0C|aqhcQGp{ncDjJtCV|8Gc|sr(qi* z`0$r6L^fPW4uizrXA$TH($vZFcuzv*>w?eC375n6hgLQG zgLw+`&|uhD5T>rP=O`&i&ie1)DUaP5Qd1g$i(75KDp5?U`GJyV_3uwM1L=s|U(dg< zXD8>iLp%`rQaojwx6`Rtmu9r`s)Hnpw{Dvb=xT^T{>w_Rs^H)4fn4tS)_Tbp#+s@v zNjW*s1d%{6&jcBAX@nif3sLq(RZe>Z!t(D%(+}$oVzCQ>c(HU(p*R0owYQ~vjEtl4 zU6$}Oz)n*+wTyE(VMl-VL$u}H^RP6jA;CaDe&z+;pPhRaY=K_AEwQ&WFS zg^|Drodr0Q8bNuCE74bBrY03mE(=jTEUc`JluDAccbDVhb!WfTY~Q3Y8!F=*%=ePa z;h5{0wgF&cJ45{2uKiW+ighMJa@Vm;hL)I`eCi>r+4h^Kv+$`g8$*3d3UzzWo9h4%!kuFRuycpb7tFs zB`8jC)$s|NvQ14%q!|)^UQrW5!MTe+$Eif)p z><6y;>%F{yaHSIYM)J zefoAF@LrA6GK{3>FVCok))M?<$vwI__p26&;K4jtBQa;F*>%2gJ7Q@JQeCfANP^UtDuq3{995*8G`+lz! z&&?k&A7WwFgL%A4!Uy!tcOFJ^Dd07&H?sjGXC4C^MBM4z1}%8#6<^hIYNzbgk=I+7 z;W#A6M`|eOG62N@MxVXrp#o4)`vQf|7S{Y92ilozhU2vM0GjZ+(u(4fxOdi^z8~Q^ zpbn?bA5zE3>7ekihS&?0S`{f0`8MU@ec28$By=pjz`>AORhG5x&Dwn;$Zs4f(u#n? zjdK$=kFHCv08QMHXp+`YAmV0wazCEibVnY-m{P=lpYr)N-hdK=+`62(frsHuW!(BG z0niNY(n*Kqz?JyJMudeK@bvzN`gzdF|6ciSfd0oI!^YdUnWL6P;}0|c1u^Z-6NquR zk9`amUXuY`g)o}R!>E`d0~tkPT+8dP|F{L#gcs5i-|n|R${Q-x)!qKu6BSQ{`RUrb zYWrE>DjaY9vrgocjM)9_%{Dshc!(bn{^qF=`- zJ7IzhYnF;1A$UwHSaNk<`~GmlGHLtbd5K*4W{9R73@j7*=~2HlKHCiz$m3yQX8uT_ zGg)V!{DPt;Q@Kqpxqv9F%L{JMeiXZto3g6tAfnrEiAsZ*uJ_|_zd zzx6LzlhfD!Ddk3=pY66La?V>sw_^cc5iKa5O{%|)15{j=7=7>tJ6E=Ld~Ec+>eSzy z(o{{MtAlZqRRb+nfjgjz?YPB6-aAyFR{QzWz?i7*f{YX%m10g=B)C-*T^mj(?2bgv zf4u*+C{BuDgkl$fCQGoyL<#L>_!&QaF<9r}=6WDX;muaM(fKEcN{`eBWbvumkC&zb z8<{j7?+OYq+cq`Ej19$!6!rq;09+j{6X{E~lRrmcIk5;oCG?>}aaLmwhYVU|e1qiNpVz_@?Gjr61GG}_UzP(T&`qfXbYIlwMO>+g?4Ki?9;^q%y5^RLC~ z*mpW(R#|Gog@pO=*&m%xO*~dz;hO1v^J+>(()uv$iX+>6;j)wd^VomZZ9~nxD=E1j_aEajF4$>ruL{(BKR2hy$iHT&tV* z7g9)ks9cHRkZoDT+2OZ?gUe3QCymidr)iH?u+_|o9^t?VAnyW#e8H99*E#X75AxU{ zt_Z}sY`oI#O27->w$h&s|Ky8vQz9o?HXgLX!rbq#g+1hx@rsvg=tV8{SFCPfy`Yh3 zxJJ}4_T7Nx$rA>mpgwRs^I1JMx;#G_VdN#He;?0U3$&KvcV~}gKVrCTe8*lNEgkG& zf?F=>^6lU8KB#90Esu<`d`FgaG3Q&t44Q-EhS);;@)RD)$!UJK9Or_2MT!dEmJ zjphQPD%7shHUE8hUFBZoCdN2-tC})?{M$}}7~GA6ARB(0{%nRvpvI#yIzQbrSo6u7 zu}6}#B6f~{PJW2S($GpNRpbOpg+8{(S&(m)^2q-MS&RPTPgj-VwDe3 zoq6@zPLG4>U|!fdI|lNi=O4Mle*ebLWdym)pSxX&BG1^i0~jpIF01Dz>x8Saoxf8! z3r{^M(!dN9+*vcGW{apOluS((BU;RHuGLW`)AWy-(hSfxI-OT&Jw)Z~S}i=+S^aI2 z?$Q!W>wdvIKmAaN=i$#}DiPQi?N(ZRn*}@S9)wkLu4c==g4c~nnun&#ssvha1kOAF zoeH5<>bmZ`H=sbsr+q*dC-ZUnozw2DAHfOAET4wquir5Gy-pyS@|!L~c7q{Vr^UpkS~LrB--;QrNdt5}7C5TOWH1UW5rfyG&qPz0FH~}XK+S`XbnmQvUxo@`a^g}$5(w2h?c_j+( zK11*v0=l&oO=ZNw^Nj^adkgS=ws?c3vLP!H5*-~LE5;u9lT8;fGbRAl=O=Lkou7(4y2N(;uSuvwCZ|t?cKuwo zM6cySoOOBnN2!tyEV*RVR8{USdtVUDlr`aBr}=f3=60=Npjm#R+m>&tJwV_vuy*e@ zP)qrzjtS)BA0KS^YN#5anxsQPUC!w_ApoO`jXep{FELj2>RRyPH;A}MhKAzb);v+& zS#MqRO$ot3;z&*sBS}JjyeCEl*@Z`;ZSS&DKz-d}f zmz9kHq@zD*CH8i6WHkw~-tn5i3<pqgkax;iEJ zzKl!K(h6(Mbb3Mw42RsR5L)hZ4dR;arikP z$Pqe5a@W2G-#)vDJ3GD8v=cz^7w!JU#DVEPM|W)Fi)psfob}ySZm&!BguK-wh}N-8g(;1)2S>$*qTuD9^eRmBX7zrW;YoS=$mj|FPRwLeHu3`) zy%XgJip7ILgR?x&HQkEewM7b%Vn$vK!gE=zUV_Jclsau*xa?1(wFVPj$V<|k>`X>9 zb_ye@JA`=(E^ribiRIrou{6=U(H)sa$_Z&@o@6k2j`pMu{o0f3NfGTnDMq0y$EHAh zwOT0$5fVY?<74o&{#wmTzSh>!LHqQnBEbO(yW+L7Te4?|6bal;PgVGyov9tp+g5#5 z$-c>JF*1ar$<78?Oo+;T6qS{oEf{AL%$?u+v72}F^(_|A>vW4RleECY1t zUIl00AvbMdA5z0oB003 zkG^1Gd6mS|zZ0vI!XA9l_o4cGdl=eAq5mfv1Uf#gPC(0BN zyB<~)PELM4kQUo!kZr>LJyK5|jNnux4fy7A|Hs?x+xiJI=?m zon>i z+32;xHoK&%;9U*=WQ|k)mG>p}^-xM)Ik7&htG6Bp!>>{}Fcbz?#><&tB&Oo|;d0&! z;0~X|f1rj*ie#0PT8FANg#O_=!vXj?IM{f&P=oxI6pe%zIX*Vq7j~2FG!dA;hZ}Xq z&xeNnl!^>>lGG>RGlqA0KRz~4Lurk!E;(9#DesrxYUm>4whOiLZS0OZB|b2L3z5?PhciB%oO}xeAt)k*NmMiq4W_PMen1!*-JdO`R_f_-DQy4w_I=jAGQAhZUK}ArHVg@z zc6ppz@N04{<#GbR9n=cJ<{CvdFxa_ALedJER{pK?jH9_WbAN8ZFd#FZ@#H*3z%ymO zHNx)vj}097*Ja4@nG}+EXy6B$q`B}V5HX@Au6pC2G%X*f);NAJS1+})lqgb{%>jD; zg5fec2!UH+Mxs|q`!v0}Ai_|W_zzn|S$Ezt2cKzQq$JgoEQ8i?^q~ktD%Jtl5yFy0 z4Ti?kaBsY0GBF!WK0Sl;rh$WSVN%H%r;vS%aO!v24Gg`)RD=U6zE#4#{2 zoEPKb?_$EW^PgU>q~_$r_h_QPDO-*Z35Zc0d~2G-mT*=4l6mMPX5e&IC8yDUKu_}4 zr?*(8){fN^ZtT*k5g#Q_*wDn*j{@XqZ0GcU*6hSo59_CwTW3Fd`=J>Tu$AMs9Q%k4 zv%seiZi8qr8w)E4P*4o?_4b2~BMEKTZrqtg^0@NA3=MGeECT#%$C7?HWj|Z}x{b44 z!`3FIHXH*)Q9Qsg)f?uY#>&oGdglSB0zDIxTA?A!smMN|jST}S*Mgbn@ho=dxx>1n zSkfDVrUG$qdrGSCyHsXSu!n}fOa|1bmct?9PgsvrV5v*c#y8>TZ=6<#ew6*Ypq8 z9fM?@>uPx)HG`BBScOJ*-H-@T#QELPcU~0FoHq6QW*Y96POqcBf1IVY`jvUN#_6gD z7#NS$jLKJ4+R<@$!pF*D51#6D!KOoEGXv-4@%fE&4D_2QCfUjm&NDnpp0>5GcRWAA z(5T$Rt4%?{T7SCkqWYaiT(2>mkWdo*PmE9ZSDOGXdjjVc8pyf$p3~tkk6avUGaPcz z*~8=48FCj!*q?e*g>1Tq9nkVCGCp@w zTIU}so1J~%wPq(~5+*D` z!7A?@@RlMu+<(n(j@6i?BeZ-db>98@NP|QA%!(Y59&s%?KM=3MDnftruLD%4E-mc- zI&$WY=O18F3N%=zC!09g(qXiGBSG9F?cNyIcW#^y6}%xS?KQde<_S!4X?z1`Lc@&C zf9)D?hH!Y5Qf4`@(jbcDV*hRC_4;~Xtn}?Zov*LGJSB#&=FS>_pPjRjH#8nZ_dl3i zoE|(zAU}_3UpPH8Fh~^j5W*mv0j5S4Y?blIkBMQx39)r%!_~ZS;O99Mjhtez{7NO* z?eFgo#?~pYzEyb9mG~?^A%QfYe#ht1Qsx(I96pB!8;=LyH#9WFN+(GSUJ znZn?qym(()#}L7c8=VSUdthWw_t(91!zULIhYC{A>8YA*x~grO|Le!;%i@&ey#oyL z?Ok09g+=BAnJ=)1wZ6Kq?7PpFFZZaF8!K$pEpN3U4sd=l(ws?{4YK(yl=q6#(b4(B zchX;@WsPf4{x5Vhb97zu_E+Uh%z9AR%F4=~|2NMs*BsAP`dVR=!@R6`lwCrFCU;vY zXO|>^z~7h3>zuRW3)`c;xNAEw)1AMa$Nk_>ksEh8QweaVGX0C=pF95k_gD^z}3IWdQfp znH&!WDI2Hbgf=WGu+KfID&Yxx{N_LM?nSPaX+WM7YL+}Kb{nj%BRKRRR2Y1WIy(FI zt}DIXTcb$xJ9MP<(N(A=AWVPEmMl}Z_ZmR4VL4fSKhmAUf{`7D z&D6W6?(X~j`m7p-UC~OSdW$4%-Re^Fgv?>gKQHunYI(J{GqPTp z_x{~yCIYG-Ui&L5Xs8m*CEEk2yu{_&fpwz|vkCvXrpz3mOp(T6c z`RQam2{bQm4t}*Uh>mvTI7ls!`;e9>Q4~Hsf57E+>8|s$E+*E4rNUzHOUeZK3r{h_ z>1EU|7~S4@yUrlPI~fznppnU!LC569BN z#yGdV>GUV`(u*XZz71kA1ndqU|Fjrg%8n4rGwx2J2Hv1ko(t-qU}SWopX|LS?>cg4 zaqgjG7yfPk@&$k%m(_+q^W8@}=YBMogd`-=5nGeppTmhKY&)M!xOSF&IWx)emouB~ zHjU-jz@>d4_8>@WnP4VPt7RbHiCr;GNHTrOL*V0#c;-n6+129GqF8fC<1Vp(J`PgV zj)|I`$}ynKUszU?An_+fQrSx>Kbs_#zn%|?-WA5PAyakAa+MGWImTU4+dASalEZrQ(yN|i>-IM_c=qpES#mguj0jjVr^yVz zF(clRH#l!()UGzX1?Lusmp{UwdB$FdjLVP%-q@8>?k{AA@47gb@3jbB4LD!=MWM{e zIdZDrOD}P@QtcJr#02_)1@v0LZ(Hn)=U27zqy6+=WO&7KL*)=>TJO1|BOh#CB;Fgq zh|Yx%k4A6^+7&6J048Cm$Kw6@7X~8c+Pe1h3k)bmgI|+Y5K|Bk%b~x3(}3x%i=?SS1$Dp+}vwjNj@4c5pr_G>@B}+=2BX#v$*AzV;tqU;v-o2$m*BQJStGnU^Jx0d;$1RM|kUWf+=MMHg z+WOa;k$)4dl>o|1PuX%H&8G(6o9ih3f||R2B{y7_#Ngh_zj-H5t2YhJ$IgV9KXJL! zBP#DaGal=y6h$Y8mgS=?mx)&%Caf^{mnO=9lHgUcP*_y9^lBiByv)?!&=G-inarF4 zne5pTNy!eG%DZ}XrrfOB_K2$!2&3f!e5vc42I3hN06T!e<=r9r9Xda$PO?{M=MB;M^f*H&1Je(8uSd>ctX zb(J`1LMpUR1~m&hcFJt9nm6fRcdFBrEX?T&UY9e%g5)O43FMn3a4kBtgi%E(;OLX% zBis%&(47d(OT>M8mlvVbS=y?3R&Kp53R>$&PrEu~zNa^nLXbhO*FsE3(LE5*WsA$Io6!JX%6;^JI-b1$LY?tu3p{6=^3 zxfPSEHlMd9$6|m$Nc!lk#^$)Uslwu4_&p{?odq{c40KrqV=IG}E;8Z$z85^TyNq1i z+$?YM9-~ofZ>wz4>X|W1pM}(#1PH&m$D}~Z$oMe1yXV(;gG8>yJLbcM%|lLk`jz8j zRd!7G7(-147TW*(`9r>}x)3;Y|oNL;_sEaH!&$GW9Y4d5%jZ4j5?z_4MEo z4+s4sigBD}@5KPCC>`qMOfbAz9GrG$XdrvZAVt_A7?fv`#IG)%60<+;zw~-m+Ef4e zH}2l-qS0)V!Kh2C_2LJqt?DEpe|6icTv{cj684YUo4&k`x%)VE*Vz5j+*s)sAXy$z zW-vMET4;{}RU%Po?I|l&PnyE3S9n^80yk`7tB8U|+6>23p)ag%^oGj|A0f6C^~WHh z+-zQIZhjundg-fOBGTs7>UYsR5T5HKT(5Z=SNb?er7om*vEYw=h8Rdacl zR;t_7Cl}S4)n((6dQ&XP3hcy(Cmt=rE=WX{fj%B$I~^nG-wW#ZX7>!n2;O|yKfzrI zDf_p+IIP9Q&1rd=rs5h1w z;q&y!F%!0EFmQ~9SB%YZU;2r#SI0po=8`UpkaQG7#mifU$H%^_LNp|f;@e(ju%HVd zpi*pE#ha6FLBYo1(a$u``f2CPtgNj4MP2VZdNPft7oNqoL}j)ox8cPiDnxQqS>k+3U zb{6{ldV1sf0c83~fBTj7_7hTP$E@<71B37_vCE3*O0Bp5>VCInz|-%GR9($dTa2Bo~an4UAZeK=GYjX)2W;>MURw z?}W6Tw!c3uT=t`+cYaOs)NJy7W7b7@wE6m1&4fB|efZ0Bxcx5>YU+~8$p;OdImD?r zsE93R*!M8jIBmDgend0h3K<#oBYxbYxc{O)@U$P21gjltBfMkPix8=h_OK^@MESr$ zjJI1QpyZD02gQX`|Bnc)`&&8mz0c>eoSW+itJ_+m8RW-~QBqk3S*t@;cB06(rsc#= z%uU=7iB5l)V&NORBRDVnofBI{8TFt#*6xvqV9 zvOKowm^3vM3!xbIC@8`vt@5?D5&N5G)U~dMFUK~BL&9>c(?tQ=Bp+`l)mORgi(bn> zub;96gHJaSS7qDL;ghKp!#_pZ4 zFzaIgj&{#Sh&-2)x(P?#^8WbX15q9lh-fBJ+V}DqH0edJ^AuBs3z&Is%jZ0$7p3*1 zp0~dVVw4*ho)F1n zwePlat*5b92W$8hL5b3xclCB!bf{Rx7iM#SfxS0j#ZMr1&LNr}k5BQ}Zl#YVn1oFV z6op~SFLlnPqFN&=deW=;Sy-u(>Tlk8gbrSLnS;QduL7lXZlNKkA}w&<2v$)e*UIc-O`P6xu8-sGE~lTwwX#i z#Rt}){=kAPOhp;_D)^sBqSU_$yB z7WP|~4onAY?>gc292DljW!30BSVtI2&glakjB2GKg-E)(TmZo%^YJGUV)T-@ zKG5>a9k_(Py@YdDEnDgi7kBx`#pMVb1S!-miGICBr)qR^%g46D~A((UY>-jgBRM3dCXI$v7f_=~Z+XN*6RLP9gxn%t~Plte< z_yDsZJ3?6hoVAuuPWyhbgyQYHNn*V5YQ(0~%9*UJRO(4ls@6m+pB;Q6AHS(fnP&>8 z%Uu9yTTj!Y+aGAG?LI;sI96^#AR+#JO#58Qn&^3cjSx+gVcN66rdgcJ6l%Va0xALD z`E7eGrX0WGu2=7Zq{+Nqk_|)TurkfasX(A*Ibv%n?x8`#lE?;Qfy(jFRD&qFVKjeX$Xr}P3`)tbJ zRd2AWfBjuj$=k$e34!H_sz#l*-NU1#ckcWD3`>i?d>Sj|7VPwRXsqMMSb#cF6EU$r z(sXKIca$iT-|bh*GrPPtB#$cbZIg5#XAO+vNF>9J1KQ*Bjk60+C^Mdm8*3UR_!d8`(i8eKHXY6=q@QZUnaFsA6fs@nLD@Vt0y=m?8$N2m)P`XUm z&ntM(KxH9=u~^F78_QxngIYJ6&gbUZ+rfEW^~1;oRZ@v_e6hbW_3ldX*7`wFKXV)b z*8Vn`3>+8F&DZW0YL&`KNJz43ewAtV3x4aOp3&7P!JFZ=^Ne8KB%sgIYe zs+^g^yhWqbY-hso{9CLK(?Nj&(uRDPmc5jgEH^?KztbV#fqR$h@O}>A+tD(bOK;uD zr6q39%zZAp-ui;TNC}h#A;taX6hSH?3W{#H)qTmH?)e~b_9R$&vn9)}tmkq&iY}6# zKI+x&SblQC?ZUFhnm4W-%-3CgN6kb>TU9-_>>Ey+XHIAt6)P)yP*9NG@MuxwZ)kIE zsw&>IerZAViu6MBc*k-7f{Z0oaZ^Y5a4dW1+q+~6Sy?#9=n{1E_=)VQc$VA+;$>re zB(s0dHhJAdB}*zVDLp)-z{jRyzr@iiS1FgegMW3xRBw5f+c!^eUlStNzK9*3KhN!m zOXPD9O3ItHtxDjW_tFs@xw1F9E8vZcLuz^$^H6!JXZ#fGNu zm6wx{V*b5|f88Jy2?`H~$cuDIyQ;poPt)A@Wc3XU#%rwE^Oe&7OSybeX%6aFa#;ml zRmsO4;xxU!v1paMn3@`DCv)fz#Na+O5VczhJd{UJ3N^$R)GC4X`H@8uxRxoA5bff$|)3_axEF?4TpFNqG72+k0YZkIoJmH6=R z90ft9%JSxb)Of_Kl7+=`rY57PC^cqjXqX};2Z?*QPvd(=;a*yM-X9Pqc7-R?5|%Cw zf8o2Gb$Nv5a>ERQT*y5kwZ>7ROhtc1e96%bFOKeF+WtER4TC`o^3Qb?V6 zt+#4d4DG+=CVJFyPm}JAL}8T5zvf*DuAPGCKzYt?;g=hHi&if1SWP5ATje=XmMK0Z zuwQWKzsie@^e7NeqTTzk;%<27oOVaz~;QZB>KoJ>jb^HI{2vpt$6Z7@j7zaz$S z_tmF=^Lh=)8z~(E&4G5JT`@l6nI6(;MFpzLcJ;Ov)FChIs>UnK?(tzIwT6vuN}-&0 zOeuRzOddJ%#6L>!4T+70ElZB$KhsDsR1d zCLe0tD~i@;P`P^ZGx&SmJEw~S0A39{N4!mHc>}|9ji28d*HJ%!z8UFbQCj=u*DnF2 z6~QuBuL=Wa)So{uCq4BwpsXeopDbEe<$6k4Ix63?8k*6~8gOCi#)BifDddnK`avjZ ziEpYIl&PwfR_`z`uO(Mn-V(YxH%-EhX!UK~l#AcOyKLT>+n#~VOEK~n(j7~7 zq7C`_y1wx%ZSz+WnQvZgT4(y1VW^Z&ZAZZr8{4>9CIZptEuECnJa}K5ZOIqJL&-p+-eyMSB zz{i8L%QqSi>tjm`mrdhK#dJ}H>AG4$+kKqY@Z7v^l$&Sylk#qTw6~M zgd9TP|tMmuvkI=>t$1p0* z1xWFePNk=NUIu}XQ?L9MD7)np74exAQ>X<66YB^>Ozk@!NJOu97##-MeH2QC2}sg) zY*hifB_0637hJvd+wKLwtvIA^wuV#C{pZZtdA!N@%)0I%?`Z=wLw0K@zSUG0W(}^0 z7+1yT!rpXS%*vhT%_*2J1!>6|G&h_fuHB|aWaz7=FI+A>8Y~cJLBLqwcMojG?Wt}E zOLATsH-St=2rr%pz0K-CD1n2dB!J-r!wl+mZIj%fyk8n6uk+6PbBuKVbRmbr%dW*G zBHbVKDvr<24!1MUzM}qL3(&M^i{{F@Bhr5|;$ED3TwYVcC-v@KE=Lwt306%^*FG8_ z7lbjw7bwei;gnYeW0$;#oayBd=A1AppSUJb^1n>tO59yq%+gLpb1Qzhq=wT|wU)p# zw$?1ka<+VoT4rBZiK2T@Cw1`fe{qViHHJ;-+|k+Nc`=7Q;^}X10_lX3ltfSbKw;qj zqQFZPHga&3HAB;Ou%fjjs0l-Q=-;WHTz-ZaQ-AO_uxM2Cpp|jc?x;INrc>vR7jBX3 zb3nM2xJh4ow%-z$9MZWs_j6SD#qIW=2Wy<>3o<=vf-mIcZof`U1S`hp;^N0rVPqvo z52B(mF3#*k&xFI0d1hxCe5D97;n}G>-D6Y5&!P6qkezSoO4t>}CZJq^b;uH|08wM} z5q6j7E-=_Hkfy=nF&n_@EQicbf?0y(6Rb8dlNos3*1}#)0Gw^ z*Rk>GpFK;4ljrWEM@7tCiMZhkyC41cmyOEs6$g22-#rEC$P2h#4~@G+T~17QrHzyF zlwY7|N;7ea5;(HIG~!$oMRn8e;H>GoJt?gdN~9|&u$=f6m6KzL-oWOWg;G4@8;j)h zni5dZ&G8uG8Sea(>>x6b)$xU0iIkl|>z&K}Z0Yr%(I6+$KRcWTG*9L3nLAnpU}$8- z;zJP)Iux|}LfeEhX>ueK);aQPy70y&<(!j*`>q8Fow3XHQndIG3ln3?+2A%)I2C_3 zL&w1z5$dJm`T6;~5)r~o44c#}F4zFzj!cPtH4sjL7BkCGKOLJC7OkUy=1BjY?zp^N zR+fHK1{@3DymMelisrjR=_eyAJG&4RBfLN3TWz_$?{so;H0h1dH7g%yMr4sb^OUD$ z5e!<$p?9QTudg)e#om8#tvMJLJj^g#ps0r*%aRBeqscQJ`;3|}_UXH%ZbQWtnW0Q=C`pjjj3fAI!ONuRBKYA@q`>^_@x?G?f0P8x19Vh z4sQE_l~XC`7h3bvJsvD&{P&kXyqva}hIckRzj0^eHU|AmN$FLUx5+a{@9JY>iBp#8 zFGM5A1%1TDmpYU2yEx8NSy>b3e(anlJKNksB9ZQh*%TyFnd^D><6zf-4xhsJw2x2M z2lP24s0hg}#n`(7H(Fa-26Ajby0^&-mW_<`t;vU=E-~6*GYE1J`YuQ=yz~1yn#pMK zt?FicXEPkbuL0Q4YoX4>wvJ4bo3r18jJ>@DZ;YX@uVbJ*o<*I=OeLZxV&heI@mY=; z|C|KuQ!hdX$)J;E`RbV~EiV;8**)nbt<0#(_xVKto*pW%+Z~A1Q1x(HO$dlMX~=;J zt`Q!v;K5a%C4=F$;js^n7veMw%jM-{r~y*YTM*e?N(6o(YM?#bH(33$%+AO6<;#<8 z7$U2n*Bh?jDl{0~u7^!NN4D;#ZsU0vR2xBz2?V|PL^I6OXnUas1JZY#CjvZbM1u1J zg}cA+8^EweGdTXU!O@nOcAaHalp3s+W`1M9RLA1{YGcC^Nok!R^=ly{PPcEZN7K0$ zQTOhsWKWT5k@A+8ol-G?!%$q8jj<9BU{_ zTbrELN9RAbJVdzoYy`)#u^^Cso*oTH>~QBM4Ht1+99lx?o>UMKgN&Tq(#E)*tGcAh zc{E9an4Tkrs;yO5gnE?o`{_E53hRGqQQ1<_k&)hms|%oCpM_1o5iBGc?5Sdu6fR<5 zL#XmyUsAIa5eDxP<=4Nrp-u&neLt8n&_(s-x+B{20x8UQhZ06B!T027ur~C6Je_wq z)&KwhONpqAWENRvpR$z^*&}-W3*qpPKJoY(95dOq)u`~7y$`~Bjfm8I2t$H}_)FmwhpelgQa3y>BHPi;ib5>1Eo zE1Mf`J&N&qcKDg={(SN02bhArveNLDPsAtD|>kebc3VQsKjY+7#%*!J!BD zQ<;?#HEeliTIrC&bvJ}2GGuL{)v2kexwNt8fzeqaBmvF`Z~6>`dG{oQ*NkiJMb0iq zYvEZ(PscqTY%~@c+X^I|9;?O^HXW-+C!QB1H_un6+RWv5mHg{wc=o8=W7p)Z=UR(X zB~yAS=rk+aZ0BO@;^7{?mc1O$>17|LW+WzO|0M1gv)t}Yny{p|AVmfqyyeW(s|LqY z(oNp`;l=gHR-wL2umD|}N0+Y5@;-6rpY5AG<=hGs6S#&Jx{@`Y#6X5rShQG66(RVJ z0PwDhHM&?<-dAqZW738dPL=ZBpFgBM#`#Wo?@Ogt+BU-Evt@SEs0|O7`wvJf=IiCD zm(H|yNM|&Ed+7kpn}(wm4oL!7whp(G>gs9&DBaXB3Ry{ieGeL>2@YdU{oyy%Bns^3 z{lV;6?$PtB-01iloZXHf4Ura0RN(~uIIHO6fD zkyLjGDib|0F-e_YZ1N;K-d}_58hmj=q)bI!J9g%o z<#3*W>PSr+n;h<|2pR_cmlV&mb9K)L2kvfg@e!HkUe(soFT07w?s;mzQfZ7C@#= zi(E;QtbN7LLlHfI?OELan|7D=MN5l$c^)GCw6tHC4$NMQz%{h$U8O+(t6q%QRJ*7t zB@*-B+AnV6<$ThFQ=%wFkubALXR$SQ7w_Ms=YQP2o0EjpCnXbYC42cskMrikJs%`X zlTEWt<)>;>@Ro}G_85Qu)p_zYY>nu-`B!$mC7E%r&)jV!s>dyuU1(Svj=1ic_9wmF zcPYf@tPC>RFk?GrCXR+`tzf(O(Mzr!T4_A-b`uwEbPEZQ@4&!747C*Ip9fti;+v08 z=}wiDVmmUeM^QbZc`oSNmeK?w{EE*dwdHQPdS=OY&H+UB|Ywv&I?wTEw7Bq#$ie78ul@gM}ty0*y0U=MjRE}7< z(%`A|(@B;MlE343>U=zEYWuO529pK)>v-2ax29_F%Nn#CG+>suiQ`^%rD6D zOFmd87mO(KW1pIiI+HO3AI{!VpKahK$y4ci1k16`GQ)$c&5*JBr_xo?q$j20YjAAi zB@Qx}7%eyb?o00+_C9b=QqccTGAWk2izmw`P`JiIX{xzc;?^z2$sHza#e02@<7tPb z1Sk&$ClZ60({(uHD7HK_lEuEnDOBP;H-hm~mWM?94~RE9rBxToL%KaF%t$AG2bkSk zcdh>3(=!iAhgpc*H9D2keGiB4wCQPtQ^xcJO{StxGv>^1H>6%CDSphh&>zV0|~ro}1ybw_=-*n4PEA5LFC zGP_X0t^3(?tpYqZG0DlwpZ*r#p6zB>dE&;8%-C-lan@kwI>MbqVxFE|B;OY3$^ISi zaXZQ0jOWBXYerv>JQ$DIG}-WNx%WOv+)r-#+>1?YLu#tcv_4bN)KIfrCi>IG2|pp{ zm0C475yPx&+%c8E?$nMbOh070Ju8JzTf;v-?3W#bM)3E4@q3$HcT#%0+84)e7~7~u#@Ht2#PE%eUx2VwA>@I6~K=jIdx1Uv*p^(~N>$#3E zAP_B&ndYuV<-WjH{l->|d1|9_2aRphiPpNhnsg}b+}{Hc421DX`qZB zUHfCbgj4(UkDF}R3L|<=`H8{~L9|7zM&x&<{g0&BZm9*NG-{HCm}lf@ZwzrVrzqKn1T08+UWPU-vkQ}(|)1kO$DSsrZ;78(IZ1% zFtfg>Hidy!f(%7&u)^8H&sr|&t zvGV=%_2(y0il>YdF`|k1%UFhQy-BxMnUU1aIZY?C*ld^8`d1Vx zDZymld$4!rOKdy8sjAgxRKGnL*|6AEjYa>(om2u($5>&p=W7`b^;0jCd@D?dkFQpB zLu@mU8{?1D~d&T1M^V^;3W@OIkp#lx$4kp&xXx2izY~oEe_3CZ> z1?M^<<-A<;ps)vlY})oe!^kUM2so!kY7JB#$n;rp#~k6~qD!jrNgFvw@6nzA4cnfY zO=~xu&@~q1X(WAroM3z!giU4Jk~wX{l5Q)8=&12w-}=7(YSSBN-5+?#$U6WRU2lq& zHzBraI@~x4!kb9b`)F~J2#9GK2`i4-@;a*{r6p6uh*-Ea^tmdO!10)S(acx+VBL?z z*@d3=Jzc&oD~0pRxoZiAsopqZY|6rsSqD>X5m_Vqo3~A=dCn6*V@3u{F%hrx#I*jj zoJRLOwq>Y0tbdvw&Wz9AHX`%dzHUj@5#mRKNXUJF#Ielu4DC*~gx9|k(@PN`L69|X z%cPf>T68nY>OMA`4oD*|-^{8$P?}3fmub^E)!foCPQoYUiGFH^SN@aACBct`g2G19pMMaTz)ufLB)^p;Ygp0|t4`^C+xzg*SD16rCIgmf(x|_rC_a;UaXurE1qE6+~ z{le&mn~N!JVmKoxZXo6BBo*ek*;pJecpIa3`>lP&$ok03ean?4El65$Xc4Ku#fu$D zbaE0gr#G;;{S84P=jf;~#yByvKa%B4Z$0c~D@bb*-$)eCJ8)y9S$=5$XlygH*<1@k zi9E0xt#`!o(X8)!xwOa+!ff0AXJH4Z_u&5jFQR#VV1t-bmi=Vt{zv3HAB{o(diHyx zsA5~p($R^uj8k+7!G#7fm(gm6+uGXNXjFz-Qs(OK>Z=v@Lzat=o^(yd5M5E_J~}5Y z=|u7Vp65r8FTcRT{FfRlr!syHmpM6-CsbiSVzb!$bk*bjH@0#KU%!Uv_O%=^4?xy9 z1RNxv0={eor|Cq=nH+5Vr8u$5I^-qmNwRL@|~m z%g_f7FYR2X_fu~64@c#Z$mBlRCFB14MdW|`s)UQ`z6rp_X3o}xF(qZO+w;7|;f=$S z$fzhm(Z16U?sg$p*76LCnU$imVqZ#!FDdWI+Uey9yXxrLYh5`@)rL3-L8Dob`V`Fv zIYyF}ieA=Fk(bvCFiR8iN=gjWO$yWcRM*tJp2ecnrA>zdvC9|f=BnMLg%KX!ded!) zLLYs;J?k?)YdK&e6@Km|BvCB^fg)2sC7Ox(0b8i#5}QD=MbyV1Pg4eA%5gonqH> z(ErBD?uo26iorn0&XMF^4XX0JCy`L5;#~C-oYFaN{@S7@umWi`;&edh1huQs#9~rXI(F;^`4~cCL6KdZ; z7%jE#q6N3F@M=96`ye?K2L=`s6Gpq`qx@tXvjCD!V)RP`ds(J`xOO$v0+#KG+SWsZ3PI!M`9Rc?gbSLTlJe zfQIYCy)|uZOu3f`+-HKG|4e;t7I$eV|?G3vmF-uY8y7_FWhKwk%lJyZU;n3WV?bMO>*+M+S-4PQ%|#upJYD7_D$~Di^|nU!RW*k7n-u-M=H{ zz1y+~If+I&V0%FLW$`sLML+mptZ=p7`#InbIKZ5R8|gblzBdjr?)CNe!MPSCu|949(gwuibAx}17d89a_ahy^;O*9gg z`<3C^)Pn;Z8U#{ZrG01(wt{syS5A1G*mT@hqrd0Q1#&?4M8z(>5KPS{qiOuX$=dr) zqB&(SSHf-Y(zCvI(*SIdZzsVbp-}3Q^3_{OdSofsjaD$qJraOX^3qaJe zz44duTw&_;2X$m_82`H|k^g_qx|2o6iD1tvgZfu?tD8^FNL3Pe63}pV=hiRd$ex*{ z5`CQ6p5b{Ks5ACq3-<+UJfek4_dACm>uYam(Yl+-=8}2tbBw}FWYb#AAGr;=d24BG z{NYBF4m$f*(E}uHa1!2Z@J>8S2oAf9x2CoZG)?~F5kYUSw(3- z9jTSI+I`D!pzylj9Al4ju1T5ok^3bTlfnV_SFBQhNkxRXiHd3OgrwJNSqax_$DxUa zg~k5VOh&`?*KzioQSA{EtndZJb*V59xa+iv0*YF|FZv~L#5Aq#8A+;(rbW_+ut%S~ zimwvM9<1uQqKuQ43AiAo5WdiJ=(E56?p}&)`9hC+=)*8kzto(~rBBh68^;WAoq(H5 zVNkfJ&^XSfa1?xIw1zBYd0il)^4+BDSpwLld^r=}U$p8BKyjcwqIo(xU$1$}XBIRduAQZ3b z+#MuLKBYc?scB)o344Siwc{sC3RBPer7B8Y?D6YAcOEf&oIvcN>o(3Bo2^?wsW8K7 zUOv7&T*1p6okYUo6_a(N;}r)lZ#bCn-a#>IG~{AuBVG>tr{UnWx7rbU^)_M93BW=v z$=C(NZk3c?k!et^_vr`C$vTLX!*IkKe5hJr?E1}2QQB(+x?nJYbQCS)3$nVf?dU6w zG%?Hau{Zq4cOa5Z>9wi+TOW=D>5w&R;>%+<2MGVfJ`Hc0u#UL%@UvQ>-jg?YVQ}t0 zccc$92)Yu@R#+Ta;{Hc;P(<#qQDI)+#fVC_w4`J&Qku*n%)8AGPlEpBaEB(fGB^V) zwhe#xw;?As<>9ciKA8dhBDO&DQ!LsxRT0AJXW$JfWCyAeqP2N$x3g$gM@s{;)nu|2 zD^>_uP{_Q_Udj2j@Xd7@BCfFhS+_+nJhe%0o_?YAO^^H0``$aNHB!tuSm_|0Y_+^E zMtqS;WcU4aRK(&t_aFZmG^Isqu<*R1GYIj0p+a}Km8e-v85mA=DunG$qA;7(+J6-^ z^{^FZaLk&k$ckp$nBI#-@(UtZZW*y%yX{ZvKx536PQH=9v(LFXW7f=Dqq`OF!R4Lb zj~k1qHALk)aYux`%%jO8D`fKii9=NA#xz~+wVKbKx#()g2wk<75NmQAEqM){z#pnj zgJzrC5j!zk5Fj+JKYc9fINC7N8oulFO}5Tq#EFB0BPKPK(6Jm#4e|A?kT*de06S%y zYQxiV1~QUO%MlQIU-1KMfaP`COuefgm?9)`5Vd~ zT(dhtXwL$@80IYj5Y>Wli3Ii1Xdb)gfA+)=7}?AmAT+Fy4r*`0BpE~L3}F$3|ekHtG6!{`J$y4y^21c&~LnU1V@J7GTdlt|CGZsjNC zzrbpJ6I-0-&%eEJBFq^Zq_cX1=SBM}yJbsuAqhKsQw&A1fCF8#ny!K7GD-2qRulFh z94+vcuJeSC2l4u~rnE|mptS^S(98;PO>;pF%kFGc;LCuSbU39yFet}g{EK9kVobZd zs_g;Ov*+S}AEk?{lv1&XuKQCVR^w zYg}ODBraXmZn%xq+CxsEB)`1>cUx}_q`Onq$^^=-(JI3&mlEI=KpyCT;~%+cB9SfFrx>tRi=!v zd!GH}P&^M?{oZHq7lWCcrXW(PSB$)oE0pN)_epqsjMp3~2$NwF?J_Fl&H2WXw407DG>+cMY)*7^r-Q*Orz2n0Q58WtlheOJI&R)Q=6^mJPfmxNM*f# z-8B`UD{T$GYcazC%k$V`pYDIA5;L=EIu#}&fhDoNtMpJVoQv9=!QMO6$1qc*z~3lx zG*)eEk4sMejIXn5|K8o11%IxdB1oJuqGV%pYrZq#lU|-qPLsSXFK#O|sPg;`0SIQIl!=`VPlbvOJk7VCk=(5T0OlhYeFLwGJ-U2F!{x2g&mih3xR)wvB zR0`+$gh%yU+YTt&-Thvht03f>mycblgCxx8n*W2F4MT=z506S`K@^B1oaRG6zsU-w zU+t}fPWxZ~*)_v1<^Wf24f|XO3iZ5`aP!Y&&wTeJ;%~8!KK}&@26e~;5Yo=2rKJZa zz7PeBp!82Ex6$D&B?H78L|X|lJ2>c-j6DyVL;ol;tMEgkIQLk|`k1t>_0RSx`_ZYc zB#iwYf_z%Sb#syD)Y*#xjI}X2N-`|c@%b*LAZ02oM{YUKXeZ=YCx3m;it@V?sZUDC zlnC^+#=(6BEfj>o55bRmMd=b)lp%UEyX$p$R8V|TxdUjf8UewV33-~&k1GWT<|5M9 z&C})+xI>HlYG5{(b(8Ie#oammv5$e0m;#ptjJFK|!JT`EOVd z1@r0i70oo`BAYF5TUkN3iSB=;a^ICj0^k%!jo2i?5e}61Ub<(eslx0Ja`yypid-uh zoKR~YAcsDagif-U!xWX^Uxlcn6A!1}M+o#|He2aTT@=u;_!29F+4W4rsn2o0DdXUz zPqgcN=Uq6md8{AZU_B2EL+xnhNDY8fC_ z4g(HB0A&z%Nwp5e**f#*r4P&v?zHPL)W=&8ce1VtS`7OKVxtFE6|*~+5^6qPmm}R< z9S;~;w7?x~>J=t8C}m(;2-EGQ_dmhuPc8?JW@d~u0#ekvuBU@{&7;0Za@xOC)?Fp`gm2y_jLJo z;49!sfCGaujdFFohHg3O5|d~IPiQElTO%M{OT~Kr8jk^K`qyl#feKTZLo+{An99;3 zSu5}1WR6b|2{{WH>q4K^bSp7R#Ltvx;4>O!ERIAzLiQZSE6~@kTjB z!)z{(?68|WH~rcCX}HNy+H*X$#H2BlM0wtC`L~a)Jo{40)2{rtu4z_mDqt~r+}$nl zthZaTZySw4vZS^*!xLZmrf||vmBLSfPv!HesQ&nVfJOfLJ=egMq5RE**=R8NbO9e7 z+UCnUe5>%OO0fG?XlXMr-y#3>y}Os_!-o%ND#wYb0VF3&sL*bUxX}!RA_<5s0QW&@ zdybcJNfEzGZ};;wTy70-jN-OlB4)pq8zLN{$hAo|eeSA|W;}I5=*YftSe7FPi!`c9 zNBR`AR)FQYf$()ees4-h?rb>5Ank|qi63N#U6Xk4o7xeVOW^Gd7n}7x3r|{-yC7me z%wg7*`{BbY#mh{0;g4KhUFq*Ra_OiR8t@tvTq6y3?s+G2HLa&DfI_l?GYQ!g!qU4~ zVQ4nE=5WZUo1@Np>sIh`aV>3UZhRP@^HN=W@|C0RlO*jRVfJI^jV&AseHKmC_;81D zDtab#L4gXhv-6mCg72|5)<<`rMnaJifnhLK>C6IWjoU)JT$lUM8}X4NX3ylbxw)fmry@4c zV^wzS0+ryb@$1=|?;6|vTP*y1An+H(U>^ADO1uJ7AoVo6Yl_ZE`T zPcBMOYhN7wj3zp241;*VbzJDd3-JgOMs<3u9<`GGkF$ezE(cjMZz<4z>_s0^Mx+Gi z=hPg`j0xdnpZ~I;Lmz^k?%zh;AsjO-2%e6b)(}9=nGr=VO z;wIRQXGhF@EjoYZQ9TP&bo{1uL_g*`TVlo~@k|x3A(_3v9YJR=ABbLao4~fTFl9zt zxCCu&n%+!jL%n>Yy*@dx*~VC;x5phNwwRQVkQ*;q&x?5J5-lU8$?8x-id^awF=~n{ zFm~;#Vu&OPYO|wb`}iSAwsR-{NJwurpRLEO z({a27W&y!DZ(RON$hQ9`fes3;^OCVIU_l>zl?AH-Jd|I*0(`vuzFF*}2xb{K<%)Rw z#)gmRvvc$FXSKeV=-*POkA`4tX2_|6Y+X_~HT4;B_k$N3Zq)huU)ow!;x-O(JAPF% zsX4J)6+CA;l%ir5I~3UR3@Yv+P8St$(k#)JmNa223MZn6HzmAwxT?|B-)=+cuGy`r z{q50D(QgwHuHoL%T$k~P|NUE%1O87DBzoQ&=oR4I5nt?0{{%1c)lJU%Dgm2dY8&c* z_0m7MGws_>*gsx(v7Y_$kh~`nR9P=fk~$W@I#>;#p=_g?aa+_i_-vx zo{iI#Y5as%-vh9w;t10>7Y>4j$%iJSIrD2q{~~d_Zq#7uL#4V?FV4Z7M+D2EGKZAY z%Aw0V2U(Mv9(rl`U$6PEl+7jrMr=Oja&OSG(v~E-)H#*pqgWRX$ z162TO3v6v~cl7nO+f=v!BHLxH;{CzP5doXtRF#DOHmIwd+CBV^_`V424d@pL1{-N8 zt)bZB_t<8_pa@1f5LZ%I8P01Sb(xLPDO-b5)H}XJpqt6Vc5K=Fchx3ru@gr}$-I92 z`MjrAzOE#{D}ymIRAkPU{?MrwAGWg4&rU_A5E`+DkV246N3wOy&r47shaa4wI`z&3X+SFXfRb}q#!-X`Ld+)#= zcTEPjaZcxg?VtQ>u&MnD0xEiiMn~0a|6k zC|{-5dYM;_|4)%{`V}#`_zAW$=ZW4zegeEcq~|%rUam*=I6a$fyG*!YAKZMPqeBgF z7O)1s1DD;a_}`;nm_%#dgQf&tEXJ%rV9MP**Vq@7fcSa(H}ho}wlLL=@{Z_q=W`>k z2`!SlTWP9YZkedV$J^5-OhU!CuOrQ+P31f1hWX*F2f( z2HPm0#D!THs2H8<16$P+LD5%8zUBGR+c7S<}K2Rn45{!bf=#n&xpwQFw;N5)r~}H)YHwO;4epzzZ4EYB}9M-7M_%^9_J) zGv~dlX7|SBB#a{|tt&c?W7FWI9*%2bZ9}iBXoggi2@CB(5$tXftBgTx! z#8*A|XvyK(Trx2aA4cK*L?veQd5x}TKwbseFZow-AG_-G(dx(NDPF z8;7Z7Jy z7%p2EZ8G+ZYYh>*?_(o$4)4xc^gQd)E)rvV>&kr@K}twl-vl05>1;bsP>p~1_4>Z( z9($2%z4~Yd)@M|t=f&M_z30(CZH(uR>*(pyT~))Jo-rs$5Shd@SC!7bsD6?KNRfKq zlNU9=p%fxul&tifvP5S6dA4yx;0ND;BTADJxU&GV2jGN1_enUYLz-0#JVf{R zYgkw_AGSwjB^zOYBm;X2+whua96j8QD*W*5+I5Dl?&uL$gkN13&qBO0qlbEW+m6qhX@y&FHv^$#qDYSP^~hqYP5I`@ zLJ#4huj+K^ekT&}^QRtA8NZBwl_hQbs1P2bY(HEWT0#n@>hQir3a@F387jBDZl#Yl%EqJK#6$MPb^@0*~m-Ub%vkU?5$64hT#?d&ZbY_PYUvrtY=&CbXbXq zcFMclVU@|&f%8kHw4{2e`zJHJ<4L6?+js#)W zZKFZ9&ghi*jk&)sOsJWAnjw@@YQ*pO>uYRxx7&4T4A*Bm#B@XB$vbfuqUfBv5c4Ud zWr9El8#n+yDa9F=33DBLgGnAjV?Tr`9$uihSo^b%T|{Jvv?o-uK9*+8&u5zFsW9Tx zVTo$$JzqA}#7cKb&aEx~ZTYQ_Pt#{m0+O16f|@i4)&E$Sn8JIU$a~K*Q=GoaXw49i zBW(oyQL&-To>%P=W}1J4?QH#Iv6}_M+K$k(80w>)YKfgywC(RIs*CysmQ?;XWKOfj zork|N!0nyk`H!#c#6ADLliq)7 zy?CC2LVwxG7%yMz@cAR^@b)x!M3QP`f*e{~7{N}PJ^hQSb*s$w zSQ{(uhfIu4(#G1&6A{^{%cqQUR)`z@qKjc93UJy5|{zZ0O8 z?ZLBD;*~)~gHZo%&36QhPD@K$*|U8om_UTIT4ov+rvqa9c=Wj_!9ieYWm!Eppaw4f zuL2_lcFb39+>_s38B&pPy8vOv$d#d3YLB&}1V{_O^FCH*X8VX<#ET~20>v=G9)fX@ z7G?vo4{w^ON(0^hqIT)t>zC`avgI#u(5k)I;@Y6qf(3J`scRkaUwz|D*#Bab#N-RJ z?%xR6eA@7Y3zt{oz%&BdrhWhO5XEWQr?Pv8h8GA$36w?dP+uo{M7>=Uq<22 znzFvHRg4v7@_78knsTh&f4mdih_!dk@nZMi+U&fd61^`FC`Q+MG<$eMX8i+5d@woV zJ?Ksxs(Uq2>m}s9=jibd$H2{LC|pNW^QY~|xqdz;&55kz9~lr9$ultDNEt|^k0WhUL!@fRE z{6VMB<|DPFsPySLOopj+sUGV`c3@?BQZq!{Zi(%$J$&CHUshhO))CGnF*~ysBGv16 z!*}j`Ebe#`p92f!>O^^s=^p%d@YrFH@xAWr_X@xETV>)3K7L`&|grmqYsE*RMz)&RH@0;VyDe{*AI2T+rYce$?^_4NqG!z{$A& zP@s!_`&JGPSKqsO!dLu2V<&u`V`F2&bsz6eV4OSF3@Ow)g-&C!Gz+fv+Vn8b{s zX3=2_A{y)7N<s~{t!)}i@pqnl;?t9To%Bkk0m_7|^*m^UGB~0jiUlwz z!1P^ks;fqT*_@M=^(+S=Vd(wuTbe1qX7raytld>GxY$6@NbfRdy7R=>*yDeHl?)92 zV`d|890xx=qd;Od6{z!X-K2zvRwz_2Y8ehw`C~FFN4s|l}?uNHO&5r%>_`m^5Skkbow^4E|aN#dNghLKm#MB|5LcSE_^03 z)G+x`HQ6F}x|rQvvN$%4YLwfr=cSTnd=5K1^uiTl3Tu-)jIIUh-LH9(hw{eBnwb`~P z@`cEFdt3yOADbnv=j`)(Aw)M;`{Kx`DR7u*kldf2^**}OC7_lmIg(T5!nyim&NJ3t zK{2~mO$A$O%RAv4xVVmJNqbtH5D-;1N=FJIQpMLJU4K0PsLWT14-DvxmCbt-z=S70 z%P+*MfP9n;#7>p9IAvw!sfN=N$I+@-i-#c=6~?lvu;&1`<7Y#!ZzU@*_Djn~iM1?ic zH=I}afnAnl<-1?B#nx1(Q%~25s=x%H=WfnA~iqQ-`IhUDU zP8d$L(ru-RuV(tv2%ZunSx&XgRy}G2RQTBR*nUcnuX#8cJkDI(Kf<&vI(ig}7A&`s zkME;TP(Bm=`(q`)Ru%E&r7RsxvvlwNoxDE7-5w1#EFf;>-ju0V$JX6Q%r0js3efhD z#^`J;E+*uCG{r^Tka2Ude4c+>ul4W#wPs#FTFc3Gp??We+Y3CwVwLZ>oJ}_Tu*i&M z29N4LLEBCJP&%beUQvve%#lZTe&@aShI5qeD0o`4@$Y7Bo2kRXU2oIQ26Ah5nuTY* zsU7^4zg6M!y=iNba=2aLmx=mG{Ev9bj9%g`oMyo1m}dSZd;9BpD5WoDCo6FDuGyN* z`9v>wkk?l>uRj>FGPA^MI0w2$m|0*=7H@`$h1w}D@g%-B4E*kU70G?Y{<9@*Z`T{E zHv^mCQnOUS)OsatHU9_lkOh(ML!Hjn%@v~mjE?<3i;S}Lsp@E0!#pD<%xQ@zikpR~$fgPCyz1oNW%y;4#&ccSOUtIc@Xbgqgl3v{NqaM(AzZI$K! z=K}CIQjpfpMBVv9tk1v~cSYf|z31qw&MxxKa5o-u>qgu5u9%ruIyLH3dY@UwV$;@9Cg8gw}3}%KUDm z(t(~aZD%<3!HYU`VT5YmHoSgR=A0a!Wty7;p^jy(X6}F$=kaLdM``+RYC4Rq?{~7~ zGxe-RTT?9GI`O_U3~QxnQ?qCpO>79qH%t1xOyAlrqesebb^6yAV(Zil$(s~5mtK;P z%_r=sNkt%+Is$Ewrj+THC~X=<57Xy?&FLn0>$#T{qs|>}XHkbFn~Fs$9kbDF#E*TM z2UX(pjr0wWjMI;F6mwP;ZLw=qCo8FV>!(@jqS&;8 z9hxfTSev>yeIXw*G2Zf!3zm2xzdmcWm zv1@?fK!h)}I`{BSS5M#NNug`qPTr>!b-VVA&9e5=6GxFf?T%aePhW*EXoTzRN`4VX zzML%x&zD16FK{Kcudd+I@PnH+LBc|P#BzeGXf2*1{69l^7b~d80}f-Z&YlD{lPfTsh*%+z1DjQ#A^Gm zcoZxX>}#@T4|TU_I^g3$Vh?&yo8B8??ODo5n&z&7EqZJj6;G;Ri=6tggf@q>$=j12 zybJbD(*Cs^T*+j(p%~|4ht&fAQMlpIqP}MFjxJ($&RD}nk35=zTwO>Ljmka^=+&h` zsC8_oZQ-J@NTxZ8GBX+$y~(35YvgXPn`Tg|&AJ>20a8tFG#t>lIiqz7b)Gnq>8D(X zbN>$=y(MNQisWHsP}mM$&) z9Iq6e5r0c__HZ*kHi<6K5F$=(_LXDp^sX-yBZ{>QG~@z13(3pH0<>0>NIO7Am0lI~ zPC?hku0%aC)?>t0O-a(l=V4HmSd^0j|{up5Rwaq7uEcQ1BpoA&@+o%wDZMYUPo)8}|4}~l$QZK`RQDsp+t`XE2 zNNLL)Nr=K&&+>wZ&S7k9Ji5BaHlC4@QEWzD>+#0*MtHr4heE&~%oq0cjEyf}>=+^6 za9N|sQBJYVxYy?Cl1Grv4U<7(=!sB{<1L;``co6Yv=XCtPwwWqvs>FDK&X9GlV$!S z#OHW#W}9gQQT7BjB0xJAeQIM9>wDz>!c;C}JMiD4<*`FKFRPG+ih)SSpLdt859d%s zTOTm+@=`RO+8-VYKmQeEy87A|McT3@_bw`b z#Y8GbGX2Q(@g{&&!BGn73e$}`9;@T*OCRhQq2=x4!;n2wjLY*f!E(@b?>v#=@^vaV z0ZV1@a2)`00jUJg!?=W$o@MvhQ{6Rhaxxg#xvvl32pI(Uj#7Zf(3>l*zTny-6zLrv zGUh%?zcehQT~t4^NPJf8&*+V$@zx)oGS|bG>KA?xqA_1gD%nJDCBjIDYnIN1@%oW3 zzsy#hDD|pHbenb36UE#|@B4)#Avtu-1+ONVs@RqI1$>Q7i~Dde^KZ3mv@BiX`FIH5 z16!>OcxhK>ul(0%S!zzj_HpBx#lD~XaYDUAYOu^yyHiDC3Pt54-G**VMjzoTgUt3> zFOM320*SJfM|G}i{Is41af-!$t1BIU9`+*U&XuDeDmI%Rl|ng`-0Tv%T&=F|`&LFb zICp{|l`d7|S-vqFeewH)P)ShG*&j1H#d0B2(W;HQl8CUmLyf~%mp0}GH8-cuQKXWo zB(wlp9nRXkPn@e>1A^&fVa>DV$?wTx7N6rC{=_7mf!!mfs?%VmWYOIr!Erk6z3ir| zt>i(F55-ma+?P4sxhi^+V~7e{Z1xQS8EX!RcSn8y{?v?Bxv*16Zkz!@_jQ(KHreWU zeFhLiv?}cN_hh7q)! z!r{BnWLE@k)9HY+kq_y=u*o&8FyQ`e zXYanV8*bEP0%j^7LIj`s!Ql~7TG%%Zo)I9uMZcOXUJO6LBI{1}9!CY$shP>IbYS|| zPmckAwh%sVMzEv-F{&MW2myi>nMom)4eek>?D#I;3=nogbZSws_YQh%u78wxe6qs9 z5C&%lP%)b#Vq*Zo%xyc14Hu9#Qe?`#d=;Lf{oTjx;; zu#*;g)0+pNaX^GW0sd9+(PK%?xn)EJpCn za~v;AsAlQ520#3DNmLB5I=G9+1}f_<0)h#@{vzXO_3w8zpUDnn+}9z^z2lz3=UjzKId6naX0bu8V+B^2mniqOl=l$w&(bipw3_&Un_nsT@f-C=G z?%+TQ{s%_%`S0(c@mIgvjSt7)rrID`nyi;min?yGo3G!UE)q@<4UbaX?afRdv&a+- zbt-Kyy6vf7V1X1%u(Oyej8kAa{k+feVXw7{K5tzZjzZ- zs6WQJ6zesi$9(K}1&PZ>N4GbpOL-?td5gY&#g|I)@vyQ#b=Z+y(6~#ZC zq(2|2XW7g7JAD>US4Nt}CHB_!@`jJIp|z3ei zBk68kNtR(>O|X0bu7+?2Y{WLBYKaMibV5^L&H6Vc?navmDi%5=Q;>ktH1cH6T%_Zq zWML_$dl^wott2;ep3v*-qsbZg!CsEYWz(h6GKrRa(?!3EYnlz5NYp%m437H>7wM>2 zSfZP!AKX-yiEGRZ2oZPRxCx5UO#y)@STtTNO<6?cW|_;HmzsozM=!GA4@h8f%V}5~ zI`;da(6|mqup@fSbiouTk9r5QL&DVx1HkK0kjY6rK~!iEj>f6K#VE>EOaIpiBe`F6 zx&;P3Q{0vk2QQ442Ktf!3sf27wL-JK;P)L4`VB`f{{o)pX~~3aDfaMa1nTX;-vM~C zTZ$3aJCc7`iF@JblZI>K0TUTlJRd+w=!AlGlbVI4PYg0~K1c44a^L5SNB%+0?uu<3 z<2t8Z?_}lrgk1UUa(ttuwHb@Xfd^>qdMOi-{NS%&bZAf z3xe~3qfYfYh}wb~sTW`O{k8r*3J@EP5|5L6aCUA31L~6NpO+Z0ZlQdTS+59{r|?0_ zJ#u1(VC2&Y9K|LrY8h@UU>gmJ;b7J?r?DhwT;=Sxot+9r~9bR>qcHxvYLUSbvuBrLDsBp{TNLn_FG4UzDF zG@S)hm220v)q{$nA`OCogp@RhAdN^$BMs6L(wzpK($dlr(#-~>TcnY0q}g=uf9>}l z-x93e7&<Od=TaYgORM$AX*UqJSaF*03TCI4+GH%v~B6Dmq! zIe%DkA6wU75`OwPEY=Y@LCfyAu97$Q<2O_XNscZQKm~xFOg=ldV!-VDr$*WI0fk^M zpqaX&vY+T-^-=!nab%0{U2n$lN5wYHZLA)Yv&V!Mdz)`H|c)r zCX=#(Tx9^P$t90vwZ7W0c?3Ls#Vd&bEGeRr4QOKuBK?J*m7e6gu`X$=CX-tO*5 zSZ)-z!_&`q9QY0!iAJt)JNi{C&pXy^Iu8yU;@uckVV_g;Y9?#fs_hDHHKUap`%K_D?%e)V-HjdBa`+^ zojCEXEXtMd2dFyvbOz^bI>8atgGZ-iWU|8h7Z~Jg4lh10ZaZ}`B2|r~rKNYzOUsO_ ztfmjHC|(rnnHU=bAZAiJu7{bF>pf+EW@-dLiS{7CaIi?{i-Z!Ytf*S|8r-CoMcU;^bjf zgRn8Klg%1!yZL5c4jvvg$j?FiS)I1){WpOAYj$?_dL!24?WL>B+eTJah3Jn5i1a9@ zI-M%(67>U9*-MQ^ulnAe{mz^eH|bh!mcNkqn`@T9@4A-%kxtHiW#w{PU;R(o~Xc(E6Ze)l}H7yW;TYnCBY6kaZ63No&LG+JHi zI{SAFEk{P>$i@QJYA=|z@z3hpx@qzR!d*UEyV5%3`Uy+;tz!+J%`f6+U|`kQ<}f+1ZWF8-ZNc&TE7k)v}J!`1^Mca+^v; zeeVu#+4c0W)XAEdn5_4|TMF;lihSU&qTT@J_k`q}2nEqBXlZBWPN| zkR{=G`BLj>ZK#9G3mygD?|boh2|VD`rOsE)CroOYVmZA!-|0z8BI7mg#)qyz2=B|q z5PoP@TaM(#IMhvXREic%l^FV?uNk~So}W#Y8k>>#uP4wLssa4dJ?f;xJ%9*qho9$~O`L zlaTFo9N~RJ=5NqP@~F3?g^i)%-_m|CZQZ?j^Yp|MG8V}?!JoV~mCI=^V)E%bELRXw z<_{Fl!ssoYlUm1hHuq7>w_f#;tc}{gH;Nj=B!*-WUz?D_CSFSJ-b)lES-Nv&- zu5Zb5bg;M9|2}D4)sX+# zAKwD3UIWm$K2tCL3YM!|@-;V_EYj5U<3x@Vvdz+j1Sn~}8dH&YPuYOCVA7w`GgU+J zr~drS>+V;bqagl>hv;5t1gmf~e_kB>j(*CNzScn2grMrXWt$0uUYx8gIBMTi(|Zwr zT+?=Im+BhC?mw!U+^n8vdFxRDmZ67tKd_G(|EZ5wylZPB;*vPh4V$g+sj-O!f%kqJ zRG?jU7oR>NcSIFE%N_>#R}wqQ_z0^rr>%|^d^4y}cY%l~q#Ks(qv?4k0lqm!Z7U*3 zQCwLJC!+EZrwFzYcTEdP)h!eryZ4|5b?AkuELzXTAQ2NvggN8ABS7}xC46rXV|vMF zjYHTHtQ>v$=TL3Zx@yuEh8dBpWD_*d{!;>0LfUsMjiL8{({EC`>`Wiq)=+?f*%~+&NPBPHni8jz`f;T$CRmM{ki`P3g_SwTnWfPhcE{C!03-kXzV^Mu!Mba^PcJDO+>ttq4n7rEzw;th_EFWdWI zR=5t~!4JUHV!Srkf0+Im+Mr^$Za|?Y!zlK;maraz&74Vs&3)*N%ELM_Ug6GT*dB5xROn2q zoTZR5+W9M2-ab8F$Lo`DE;TbXS49j~pI;_rH<@yKih1;uo;?2-!$7}F^(E_KZjOh=gO#&X&#HA;S_C5j-ug^bVU8Kl z!4EZ>pFMqgs|voOz4rz=uIGh;-REf_0=sXUrA}2GeQxd4GKUO#8o{%Gb38o%7@0cp7S|w`$R{= zyj4Jmb$yMFn)*MCDX;S_^$%}NdK0?fcQEh3Z1&5_naXDl*+DUu!GIA75K}+c9)%7o z(;Z45wY&dr4xO$AVkulLoVURQPiQ2<$!U4$h`@ltJ*NHT)nI`cCrix|E|_F{fL$5Z zW->5lV-+XUKSUt9;8X{=y4syQaAbi_Gc!7g-&IM?Z+Pru_Hiy(#cbv%G3|lcYrzBe zxE|l3tzlQJ8#&bfMr7m!HL3Krk5J+#yN~##nfK+JB}SSQEhHH{TuLmlP^sVf0@7@) zTl)-s*gGYYEz|yKNU+oyk(_)7gwJ@bBNIB8m5C{83c)`?SAkBe9d1MjTE%I}J842Q zU#HgLIweR8;DTgE1B`$*KtxBxLdp zED<%Edp(fuxL;7NYfRavmgXTyTge`qzfJHXqzZ zvpyy)3bX#I_NAwcS~r~fIRPBz*y*DMpVN}VnOQ80PS7<2gDFUD7K^wWjTszcC=p)f z(uF&8@>8=S2o_k7!pcAq71Hy1Dv}Gc%%ZZFmRHvFCBRF)2n6VMNdDfQ;w>#LtF9hx zLvQ}UB=D#54XG}%z$5?ZFi;m6EK7Sp&RA_w6DaT4-v4B>^t#Q-iqG!3c?v>g_EuzMb+(wq zbE^rm8k8_4^3>A>3jyypmSxOQfBMYPPlj0h=s{HGYnAP{ijy(>2a@evY+h-rY^ zBLQ|fG)EThEY6w4Pi-c5SbT!Z_3X}RBLM(#=LroIx`70gX7(rs6so=a9e$yt01$`d zk`P8XT{CoqDeo_6HYxc2={(+^4h9999h^vEwFYxExwMPM{1GdNG@-K}0re|z7PV|S zQD+(L%+!Pb_Mt$?Yt^rW((RU_qM~f!#)piMT1(-1fWvJ*jOLcZUHORH90P+W8ulZK zh_**l!Vvr%yF}g9RLs(d4MHz?9ajkAtzM4R`m?w1`r*tX$mxqhkj%}fzMOi#`yUCI z8t$^JADRvlqq=QvK?@v)nBw0?>^n_asr~yUOl;*9Vms)nDx>>@)GK#b`;Pw|%u7M6 z{a5A#GK*iIJ;Vfo+Apa7w%0>UAWGtF@zB+EO^7wuOiz)eixK&93k1#IUf8B93FRJa`G2<&-kJ!UvXjxfg-s3(S0cIw z;t(9Q2V~k-po9NyG<)S)poe0H*zo()Ot`Q*w5_HCQpD=lm2BXCi^AIT%&p!|z4+4S*xIGk7|)PJ`B(){q2OpZ=T8oD4kjs2F- znVkIpyAI4*%)m^bKaLkXpYhz$0ex1@&WwAmMn2)JH%fr?x#h1cpJm?`y=LngSp1Yh zs9$M~3Iwy}EpZ>&{#2zhnxOC(u6T;pa~!*(YCo_hu_e1;@)d`Yk=N-V5vcn4t8YD{ z^=>#qsUX<$y;3OT<4@joey1m}R=QH$D8g^ja^RI9(byOJw*NbmYJk4kE^^?e@F!f? z+&%qv3Q#=Hu5>dM89^uov_!tR#f>f|a?je>_655hj+jZ(eEOKG_EbK*$8ii;vm|KI z8ZdzWIef^OTg$LZ^4$1PrC;{<)ZefSlcP#3k?9R7#bnMR~u&Y7SSbegfX?f!GUj<8H z+d`X+ES)Os(Z0j+!K1{Imz{lkE?W!}3NQIRT5xEOg=#W*zR+njzy9dAeEL{3XeZ3! z@J}H!a>5?)xGc5wpUw_9_UA87TUCP$&kn{8j`TpxhS=Ndxdt%)B(PBp7K@1Gz0L0~ z&?rN?)vFi0wLCL(FHU`%NRZg)H)7IaRrUm9%A--VBx*VagqE9m+Y=iQ!yzX2SL`Qqu_sVmjR$YuIdX zNO(rb1X%=c?_!c9vhVMk>S6{2MBOI&inti;!9>Jb9Q+>_;0dvy2ZnCZ*g*&%SyQMn z@ae~<>l|kmk)WBrd zoh;JN|D}JGIYF(+`^gdP>cPYHDuxl+tG9~-K9O46yQHKt-AMdvZikyvITkPXdwwVP ze&gn>RGhO#rpqfgSI_592Uu^l;F7w_> z3b*ASVTwjUp4pJtA|!avk*#?05ApC?z>eMrhaw7YFOd;SZ1i2`h__K}e#q!|pVVC2 zTK~tCD?d3b5I#OW7`onz_kl8>W_{16HYhaj7fbJL?d&&?F)+mU(%A$TK32lGhq?bH zbddh8reqlS28Fok<1;m0X$rAFAQ}nP)(Y%A3YI(vEyr6{md48!H6A)pNWtLd^qP9H zz}NwTcdqFTh0!A(7$?!SA1QfQ!=v%&T+X+uNFexiwm_?;bh!rtmD2^i^nPl09jp}@mZ-a& z%=?=SmNUR14s%8F&ZjmiNm*I&1A$9J3XVYWSM3fLSJ%GWzTdujXi%aa5Ey!$!S!@m z4UDgMDLi(s3tgUMtnEtr?EQ$03J2~1s78r2-^(=|Lzr}Epvc|#(ngYLn3sz)F2!7+W-NOen6TMS@-}okzYh0drRxUkv zx49DIZ#VjvMMWZ64YwYHmbq%$=_Mo#tspzdJC^O@63}8D{_yy?W_n8Z8N@TZqTrX& zpVg$x*Y@RY5wv9OI2ttyuYc(L7>vcG-OTzbs!EG&Pig(}ClM5BuyS9Q0{_5a|C^*2 zI2h$tL$?@c!K^vuIi_=R;k8}6fsT3bpWJ%TJt&p+&Hv-)3y(}6Xd?4pofuwwku2x5 zIi7YrvTVl*ZLiFSh`ny6lMBIXDUbJ_In^bDK6AmYM#j?H!us=O6f@uky^e3oi^BFH6f=2KJvxw|tO25=hp5Fs$A#txC|$M+5Pz@mQ794h5H}Qvy;P` zQxjNunV=KH&CQ)aL^~_w=Rf;nRDn=ewYyZ48ARQ5&^d{L7#Dx^$O9?xV1H&Jf}O+R zk!2~N#%msYLb!4Ljf_ogmkHDcD<%k`xF6l4btRF~is+|! zy2TN-gURr&>EaYH8h?uQ|AXC%oN}Gzb1iyp%suY=523EV3v1RMoIy)$hqB)d8o5MK z8mai|Wh4}9j>|EoP;r0$VF~7%rg@udgn%{ghxHh}el&8yjRI5V|xVaN$XE> z{jn@r<|F343A|B74HwdZTzEK%n$&CCUux{pihPao_w4A{7)3MQyvr55WSjFZb}64X zH_5I&l}`b12Q)vvM%vxLc(M85Vewvm45+Gs3-r4q)uXra=VbM~-v)0A?i7Z~4|NfW z#{bX>E_st z%3pA)-;WlMfCkwRmOwv~7~R_<-)8adTF2f}yL*T6*i9dy^571YtEg{HWH`C;DN)a< z{v`J1L?hxk3QtfrHrKR2gNg)-5>^}D|( zccSwz&2r?+JWlN@EhpO{XbZM=IVoNV8l0x<7hTbKXm;Nf_VFrExL3}1D(AZP;n<-C z?z}fJ-xp6lMjXJx75r`@e+aBVs|A|%K{zcEUpfLo=PE+83lnK{qKSTX6s5 z^!A@Id{JXh4cbjX1bRqN>5#qjO8O{7PyWfrEe^C+KV!gYgidZt^CverF4QTPkv4$v zZ{Jr9BmRo30tP4Z-l)c|%V!}-*OQR8;nM*ft$oXV9_TQ3e7d|nUTzlkw=Gk)^cwmd z@6L;%n-)c~DQFpjb>sen?XSnwmfqkLfY20~me=>qMoMWDvIovnQILVQ-H7s1KKGsR zlHJ0#76nT~UaH`P<@0(FOFcy^A{bQ*Wo7>vGiT9&o5}0)Ouqz!wHvBtqP&=u0gONV zJe5!0bb{>uDLXsPLr^hepPgKuZLq386Y2NuGilM9TMlA+ME&t^nTbS&kjLds5dysY zk-SFTka2Ayr$pv-l=K%WJ4TGAKLxP;Aoivdo=rSgfaMctx&8e6_fAi5A`Vhf#EyJE zT+HRZj|!DLPpC)m4omlyx{M>0_O|PGiXq$WOUXQe0f>EqU0O(S|GYL|NCJ-vgPT<|ZWe5> zp0rYcQ3F0S@V?>&ES__ls~AGR4D|h<0FFrc3667Ch!RpBZEpe1hS#8)Fl-Mq{`K*C z(?wCOBMIr5JDWe_`$n;jqp7JJY_zXPukqZ5S4Z)Z2(!!-ugtw;<8a~l^oPQeSoGlo zSW|?|nypa%%#=%hB@o`G4_VJ**l0W{@P+=3mr%znGICn1$R5E_S9o`$sLtWM&KcD$ za6Byog|hbp={KQJcIyc`BMvu}{9X=hNJwC@^8kv61K0q}E_{@j&(QV@$U9tuSP&>_ zP~)d(E!_?*_W7Y=E=$ZW`pSq$vVzLs31aWQ`0o6UpvS+5Xa>l%)#ht(B%$NSzAEi? zQvdf^2|>H-Th(}Q#QK85_L{oxIjE=LdCBM6x2L7dQ(GT9Pl1!vsRTcG1>b@I@U|h!{v}voLt`07|UpL37eoaIHU!>Ln6GKHZ zla}Vmhclz?(@R^^Z2@N!+{lmHg^f%5U8T+Z`zh{LFPA+VX91A>g}mOa6q0M)4*p5W z{nq7}tknNoYMhyq^#o&c)>&AOSeOG`hPs|xs(?$t+iuvfVr(XuvRS;JnVCr$fq-D0 zB-c?A%h}3MWjWsXdUvlh$Blaprz={TDK1n^?>-8%1-u#mH=N4}@_`kw!J^TmfY>8; zQ9x2@?ZwA2GLM<%M_5CrL%k3fLHJEr3;aJ@17iI|^4k*wsnSTc6a~?x@C*xQEsf5%xXxXj zGcYkN12xa&Fkz(KnLM=U?Kx_8pX|2o)0L92rbSn?Jxog__JKfaAF7g?7`#i--tgi(O~J}Jws7zp zfDl@z?WsV{HZ!xvH76g(LBhYdrITBfw3_O{4AH~;VUA*t>jt)kDZl5)zB&Q^76}8; z2h!8p4pXxpiU(4}`275Vi9Fk|Zatj6z2hxhO;tdYR}1Wq$icztBkG^NDJ6nTs^pE4 zRzzC#j5Gz>C6TZPwGsV-R}Hny8u$Z=6w5gQMX%FO?u0i+o~)dz>9&t7WyD#%C}21J zofASLkgyPmoABJM8)Rbl5`L3DI1rr`7mf(5{JSX`IB)yiX&N>p42+D~I+K-2f4+Y* zj?CQd^*CC~!6(H38vi#IPwxHut+Bi?y~o7#QH3?B-XgkQw0l3zJ99)!7DsI!vK*F}@X%uEKzHtsl2^6G2v& z7_N=}Bwbrub66Ynb$=a7Rbi(55Hx2`sk7uYid&W)_L{Mj$4l(#4mUxHU(Zz69co-Qepaais0~QZ+D?CsM^?kRuWT2X&5}D9yriW$f(coS z=9_QAo^zThXLQRydda9k??}LY)$3E=%3Q%r{TtnH2nbs{OyKr7jA_Up&ylC_8Ae@w zITp%<^9Wkc1i&tU4yp~7DAQs5d-+U~rT#$a_onC5<*emNUpvQ&jk*G0%YUAjjBZ4f815Lt zQj$rN11if_=m*v8G_Zq(>iTsLUT;CuB->len&p(7oKdhXu>(%pm*Nr&wdXe+K0Tl$ zdr)TAc2_L6wz&A?I|X^=55qrjZoDWv*5E%IH$3t1Dy%=!EUmQWN$H4veDHq4LxqcG z-4IU^5fLj9RUB}CBq+Rf2b^&7SC>0JjG_QcgO1|cv4A4Sb5w^*SYTDQIhp*7M?~%s zZp~~pPB96qJ zox#E@Y^K3rRi)L1P2}h439r)Pcd0rXE9mf$E4~;NvZO)l+1_c2&AFR5V-bk ztq~d^pNsEBB_;J1Zt76N)9(dnP^aZ_-r@A6#m`fa%YU(s)cP$ny1ygov|Hp|9|Uhf z@QmJ&DnASsq!hWdS)-Lgj@AqMJ~ve{7|`E%<=~60(zMui)W*w%Tcd4H+ajPd74>j z*+jN=G4d_$QB4hR#3zNIk{$+}dt@&jGNff@j(^*gaBZ9o@899EcprV@d3G?rAae`U zX*~pSdWHC=35b@6)(Gl0%)~;<{evNS(zZ1c5)w*`o;cRU!!g|q<&H3i^(kMu$1ew< zqlYeq_n8=ia-w|aPs;w;A*WJQ^SsT{73-oek?xPBllx5$GAJ2GRVmE9y}gH`F#ic; z%XtpG*8|x&j%e{xqV==j*K#>S3R&jB9|V2KBhYsj=+up>aE1h9hXqV%vVGrKaM&6R zCAF@NXz|B-aXclsLc`dcXr_5~`ZB|m6`{+DEx^?@t`oyv7aN0I4{Fe7$>le+Y~57} zkL!N=v-?Zoty>R+7*Qi1t6C*4wh@P)>@OB95(uSyeH7U6J8p|T0W#ElkWIxzkreY@ z$ftMoKa#wkdC=phPfAS-W8S@cxAzDuBw9bNM+y$F#J1;K7-CP*?22okj`f{3F;>HO)1&*-!P`1b62o+SA0;AdH(yxziK40nA`!$n9CMS4m#OJ*I zDo5_|+ITKrPKwhbfJi}2q$ovTZ6b>)U#;jl-^3cnjvm~Gpcx-1XM?`8DCT&4@*8m@ z{ods~(~$WDF0-JPdCCGaIAty7sfAaW^!~Cca!se%_zl1VkR}(*COZ^o~*$fvCpT9>awv5zIu}tGzw!>k5q` zp0y5Z29mooPUl`;sZpWH8+-6d1 zLJT8U!P{hQ$EhLl7$ZSMY)+eifn01SwOT44l51JrkBUs{1+jd@uf1g<`hGqsn1=y= zWdc=7V3dy7u6i#CP=lq}<9G{~L$m5>y#^n6r!%wA5g9=t!|fg~ES^43oKx@;+-W#x z0inuma+ix(=gFX|7N&9X2> zE7MbAjQg_Qh6!noZ||Rmh!kg=VNk7(t?n;6WV3-h6bM7UzT&c}R~+yHP}CZP$_3bp zI?gFPK0ch2l+K32RQn-X6tLIF1JDXV)P;Q#&f!lqoV|w?Rkmyos}QkHo#P_hzoS33 z{2xXFff~YRd&3I0jLNOvXu9#iwM7Si#HRQS_fF)k^d-zY)Qpo#VY5%$WHES$Dm;x{ z`7F(qB~ir5Mq825-|+rciEo_5YDS55%`&0igU3Prt32DYiW%GUn`OVVtE<0R$3DxA zlppuv7Tj}Q9jPsyXm50wqlGrmz0_G#?b~BI&H{0|c7Z7m>Nnl7DDL^8E!t2xg5Xx} z!b>7;o9k7xf;Srb5ru`190=5OZEt@1o=U)^_Jo={6_4&s^UnU_{`u~yCpr_`VS6|p z(7(`?z8^okO_!Y6*>sM!cYs0N5DIhuE4F>l%*1~qeRjAd6Es8;xMTW9^~*K=dsA^N zI?@_7iD!rFwyVj`>tkPLJAjL6LqiK&bF$`nHdaXwUU%en(`B^p7ymLI1XUt?p>cvDrpbve^!01S7adCVC0!4N8S1^ zY;c7J(r1&lX60m<>5rmLGQ3c)Q#{wq%wB9tt(fiKRYphVg^??Ypp-_;J}22mQJ-xVv)p-lRHGOV=OXmv?P~m#!MxUWUz%X zJb)Odo!GZp<-d#{O>sS37j~Mx(z4XeI}KWo*nj3FIJfPxwEbaYvhY7}6DTbx&_=8L z2=VbHQ{{Pn2R)+o1bp@2B2uzwjJjU8=ou?(Tu~#^@4&6z4i=L3Y$%OeWR7jbz_)4# zbPn`C4sINFgJYlfB%#R`Ny=lz7#tJ?zY+nsgXj06wOMk?|KcWEDml?oxPJc#A4>@q3@!gc&PQ z0DfQ*?@~%&~k@(yfUKnU&>bsnmYDj`6k2tvTL$U+v-Zy{rif3mzn@lnQLVh_>gp=}{#{{Zb>p=U_p%NSbuyg) zyCVm20py|gMF{d&jBRa8o;k<$xJ#_Pmi*#>FLry+4xWA)nykyvi_NT?V(#hgUTP!K zdzQ=+XZ1?mMxC5jiRORrv+*K`$wfwUT+8bLbS3Mg!kS5-9GAxj8ytttS`2Ba(=wS+9NG< zpStWsZB`&>V1_SOEiNI;ZCE^qHi zlA8aP5ITl#f^>KXHP+p`OMtp(#lpIq1s59>ltBhRvQThR|66@z+Up|27fBX-RHWB9 zQ!VHj;T3#!<{>!U`OF}@+owh_SL49bst!b)hhQqO0c-V-%8mC;){rhsP)vVD#H>rx z)6<>Ru^0iJv^91?Af!BZ(Bdhh@(BBuz-GYAhgxkG~K8H z&;Ht>o3Wx|1Y)aFD|DY5YIv9N^M{f}{S)3^jd~DEKh5>@<+9P*;pr00Qun?{s2%9q z*A$pkwp;fezWXLyd(UgMS7UHvzx%*s?e0#&bI^Y;LkEct?jpz*)LT;1V~&QB9LEeE zRF(51<)^MP+MKUeE`Ux#D?nbf3Bv84i$Yf?i*WaKPfh79r?&+!LkBX{85tEtm)I}u z>kPW3st#s*dv8@)iNo1*ifTQ#dh*ka2G6|>_}$~4SSk4>&*R>*{zpjY2b-)pZFo0& zo;$h1P+mj++c^Y-&cJ(ba6KA9l+{$ZZN`O37cN@&DnNdD{?d8P@<311t+sb2o_U%) z9L7TA>@y82IC#>*H*6$l71j%`%^4ghwdNyP&aTdl?Lgx-xOLQ7u%%H7RsxXsb_2;f zzQVKaz=eMgxN3GVDcT*&*MXoo0%CZgpq|Uqumd9~!m8@32J9L%Sa%=Dz-S_RrcS9- z=;qc~Ba3GQ%6DaD<>mR7n|?U@<*9JyV!*<}T7uo;Qlxc5gb-@4?7(FTEuB8v80+V9 zu;&G}yB;mvNSW|Dz^ME()!7-$CyF z%s|_6hAFQmzumU<^#8N~GBp%Q?AarP)Xkc^a&a;sRhD+HdAS#I^k|0<;Jp)OFxo&wPkO^t!_ zB2-*`VS_LLCfIIj$NMZfQFTD80m9=zn%He6B_&J?{n0{=o<;9-QlYEr=PX_t8h5UH zUrX)p2iX8sTg+J{QVd)0S1M~`Qn#1SL;iz57|$)$`{~L|%gh#^>Lxvpd3~SsM&zMT z;c7zD*gro#oRtp8jgps}<39HpXtqo3$P?I_=y87*pdNAkc~b@wI-@P2oZuPLBa`lE zhJ%esqQq%AMMa#+O6$enFSm!^WK*@7!yU;FtApuYRFv7D@7Hn1!QO&a7c{;56R;yG zy>Ns>-PzCj-5a%U%Lf3P`ozCwLIXlMtwJ_7LW$3H z8XvzbEBR~bBl@J$@~>adT8QM%dPk$v7N3_o^1s84RADLGzz8w;2lHCzo!@mk^%D89 z%)n4$s;N2Ot?miEVLoI*`&#qunh>Y)Q11>8XDiIQ#!a_@7X&dZy;5|JtF3ri#il#t z4X|&LG>`6S-kGk814C65BnB|Oeb%8bU)nkgEv)S)U%#Useuj5%)EfRBAeQJ255znV zR({X|T@|{)3u7$)b(o{&IV{=ZVD%M3U6irtJy!J(93DgS_%=5;CF|evduRR#fSs6@ zd3>gU<{>m>h@VE}Bzy6JL^cc5UJ%qCrX#eq0ClgqiZNZWS58l5WV*)@uAE@{rfFL< zLS1=YRBubXp0bsoxs(kTvKV~^85M65pJy57$U&w(TH647Fll}Lv;l7ojY+eK^4tnk zy0px5QaBO#{d=XTkCXV^lG38%fnimAfS!Lb{dbMXdq2}j^Iqh#RS5b<;Jq8s4Z_=! zY?WyuvR_4)j1#i62TH`~5Q3~0qXqLRipRyy)^Z^=g)uPku6Lk*x^VC3Rc`5ldiC=B zm=1bzVDMf%K7+dZf7k*CUrQ{$@1-dOfMKs*XPY2zBn-A)!7dkM=s}k+cQ+nKCNNj}Mn=mq(L%L|x*xdwAWQ>6V zQ^<7d%bnWbSZ4Xw#r;x)Hf(L%Hi9$HpkIqlL#tEvr*mJh-w!>F0xz70u8yDEKfSBZ ztaF@$Z$XYB2R5E+8)5H>DvO>p1ej#Ue?4}d$;IlE)IT)2E}DiO+s#&0i`W<;`$YHr z;L2gWN#sg%e0}S>_eYSEaafMC-KCnL9!P~LyEWkDYc{V77XS=|^{*O2Q2d4QKfPVo zdPrY>!Ixo3-O0_IiS(TIz3%06)8q}Q+4PEhd8MEtGS_J2g%e)rd zHlNqU`{L>76BRr6lk4#0;?9)c#bx(?%X)?4U}ah)XbMo*Rd)%)vi4G;cYCtJ{(82i z-KuhEIeL_+v3w@t7AVAVTWi754yZdTe^@ASiJvNs%01x z?p`J7$jHcp^etk+gq`H*XrGqt0Cr>C*b{wbS$g7D$YG9-Hu@Kxj|8Xz?00YR%(_rh zc<k=l7ZrPo|+}nwN>9jr=b3@zvL)NFSZ? zg$0TJ)NJ27x4uZH(g}LUAV&mHNDreM`n;%>7G*!~BZ?ORub>zvhw+~XlQnZh$YS`T zMqEMSdyj6eI=Qz}=H0p|X%|XC_=R*L4KfeJAEz0IE^MG`U3*Nxm-uFXg_+9hG56xn zF+Z+B`VY;7$QsFB|IZ>&J2OIl)T|O(|N3+6;$8KDCV_w6?3JOwl@B8NfQ`>Js^GSDxx+UM%ycY9Zz$7dAuhKw^v<3)Y)aSzsnWaMxu$Z< zzjsr|wDM0Bqsso(j98>bmi*lsS;;j%_h#~1XblV@6@mqsFHxti)Z#HkpWBDTyIBcnt0Ie-s>|P zehSQ!P*ary7sKU*{1A%A@83FIw(dpHDFFsR<9Lzp>S|b7))OfnOdL7DI>=QiM&IyI zg`hDEWxaTRZsRQgjt6lG-<>2FT<1&+V+BSQS-iR0vo2nP0yJ`31pLAL_olb5*$NeBVJvM@+D(n$#y@ss_hnM4sSfVq>J|O8u>zf zonmn``5O^??O*QI%NQN+M&%3CTV2VGgN(Nb4#}`WKN1gzRTbo%;%&UqK;gqv!%)4B zME<;B-q9wp5q^`%whX2=hkuPWnG&CLrEMAKF-y2cGHFQaA@f*^2`cZ2hSfb-{F{!7h*aYU$?q4#u z9<8uJ!^v<-;P~Oax9T*+;np5`OZOk+t2Qt*)@1Jyx>n7Gsok!9tE+D5Y)+06>Kprx zQdlZ7^iWsPC5wrBD_E;Ui1e!3B|KG$a<0){ar0z$;r4p$y~$6gtH`ZbK5_pL&+w(* zBSZiLW4Ww(=&)4o?#ctxP0m(EuC#&%nOxag-2JmMjW-$mcl+7f;oCZ#W;JW5Lt%EX z;^_zhrea{YqVv17dQrz>cW}2$USF``l6T?}#eWo@E3Z`k8`&s&iojndj^?|nn&Xv2 zuql@CfKd4G zj&a_w{i`Zc#qgeHM?QjY7uCNv5-*0C(~~=K?;BX&y?N6L_}Wwqe(WJD<_#@uy_j_K zjH4VckV_*!xqD~=_^yPSwUlxVPX7Hz6|1JUkk{4m(`O9X5m*(Ct=7lnNX5O-H<)KQ zN1)?ujGMpR*{b593c&}XKOb02CXRhmqZ>s(mv7cXT>_%q1c``iKkpRDg>9Umu7z`d zX|Ii9?R6lpXy}W)uiG(}Apf=LV761jS(9ppRmvML&sDZu7{fyM+nFeC-j^c$;jr*W zukYli*(ZsM!k5{rh07NmGrErKmQ~kWx67Xsf2?#)B<2g(`A z5lf`*;k202)l9{sN=s4y`=0JY&i#_zZl#C?2Z}?EQn{{P4+9Hx5&k0nJTJ}^zAG2x z`XOOY53#P|RnYRt$-)nWr~xYWQHDt#UXzKD5@+WWYq>EnP2BRHQ_~iU$KP#ehlTr>XCsgHTR+Tq%u<)|J6ac8q>y5rBJf2ADYZ5qHcq#PM*0o8!8%NEY)Fzp4?`;vZJQ zRqV+(#PlUHH!jN?%^9Z3tFYl-6u(!b9p~u5?4qALiX;74BL*Scnux`W>*z1@tu7C` zTYOjH?RXtuex)qgT=bPn)8h4(sc1PJricA*_PL+hF9Ee*sW6%!(L{G#pSAs=3R`ua z6C-$n@frMpU*D3DY|hk^LV&_NSZfFf2x6|*Q;Wnsmg_RuE!;2WZN3S!b$$%Kg>J}i z>eLm#B#Vg8)$k#EV1e=FI`BG$CwnW6Om~{Cod4l^OJDrhod0;8Xn=)N`Iw!};j-_@ zpF8v&<(}u(L_~6O=(N=sfrd{_?W540OE_qTNd`q06&miMZd%U44B`6qrC}!QC6E zZm*|#rS0)1AAE0vK$k|Ngs-ObK&<(KOe%I~u=&xacOS9Nj&lqJ^u;<&*r5p2bcWOWnk}^oW26T6Pu3h0_;5^@HCvE4mbn#k9;TIDN`nfItVC ziQ-2n>|YQNA)2O;sTU^T#rDHHHLECw;?Pb0fLvt}egci<%X92sCV0SgVcWjADMol(x~qjrHbjNl_YxF&e>T_C@x!%E2+^zAtbnxqQ;+i;iYq9+^P(w9LFcJ^L^~^ zN01vQhI594E9R}Fj$iWB6b)uLVpT7vad)3b-(V9HWp|=v&EQN4sC9bvmyXX0d5Sfah4Z&9# zda*)}5{iRH`p>sWv9yO!#l8cT+o-5Si*GkZS0FY{2#xuG?W_m#@2UsbL!l15*Vz)T z?xIKe3ryl~%*+^|BhQ=ajl3xl6t+9wDj^c_;XcL}{=?x}>eTa)e&7TFVT27*B4jQU z*g9uZlVstD50CjDnz^m54Njf%|2PeD-5Dim=5218CVQfr%TK{YHzX(2=XPZzivnUG zQiii(m}~T>PEYuTcgQyh)o{9jL**|g9o@kwnxZ5jlAewg_t$EZ_fuTRnHs@r4#KmA z3km~Q#`%n?fJq6PxnClL&rTwN2w;C=bA1P~|G=xUzxlsmYlTz@=!}9kELZ^w%V$G& zj?Z1pCS;=Vi8!sdwFS=)lphcg0GEhgr<)6RcITKA;1EH<;cVz1UOGGgr?koZXJ2Uf z%bef*44soOzk4Pf>JM=x!`hxb{n;KT|V-{>tyf4#?f|o8Erc`;8q&e#hJn@Q*?39&Dp- z2sUF6KGO%yR)z=l8Eca}+SFGd5UxJoj({YkIIIREaNc7+sa~CN=WRsYe0>2<)QE`V zkQ;&Q3K+z^c3^+eHY#s~;w<85!lE}(C>5Yw5ec#Afc0#qiVBpE;*P>0Qif)W&w(<- zkrWi^p|IsdHU4Hw4a*#*&m|ss%$`;_94zf}uL0ajAi};S5!td7xLVTLZ4fw{E{^q7 z;OYeku?$YJdt5ErwV=|%;nMjq#ciDdx;g`^V|Y=iloncq^=^k$%-k{HR?&Q|t4J~l z^8s>fzJ-B>VScDz$aHUsPNU3?;HIy*$=%OB*n$@y=Ue=hH8tCb>>JzFUuh1|(GU${o=0?YL_{Mer_9=HUPu%-G zsAPwGiTy?GMLr!5JA&;bmM%pyvA42z*f0xG5M88uff5j87D$HZ1rX@fYj(sXpyO!` zTKo?0L9}=IFc+X>?pp`YrzS8lIh7&PVNct7G``p2{Z(CL? zHZ*+>2neIgGYTmgnZ^Bn!J$x!Pvr+(EPx?6+U}c=@vaBm3*LC;2U18vy@M?z+k*Ef zAvY-YhzW}=*5RQsw>U$H01JOd_^J>Nif3IYXGrS*L#Ucc?GiqQXKOw}sBEZ4i*V$v zRnXFAhN1dg6`9DtgVneE{R~xmIhl-=mjgo&jzxym?_TzeTOo^qEC01Gxwl(w1$Dbt z_%%6L#KxW|Dw8tMGJw*kE&mmLp~?zj1Qm7s_e;rxogtdv_&IHFnkaVdr_m-M{q`^l z7@l6xbzB5d(lYcMS58+`7on6^RX~K{OKA5I^ID?T+5>_&3Hjf47ev*VafPi&u7geb>{b(PO)4+{hC%=i)=n1bw&KCr!hAqb=6L;T*zzY`Q5 zB49qI!d4-5tb0YA%zEiYhk2z`zQ4ahipA@*BFSGJ_;DoSzq!#$I&UWHz7wK~pxHvt zm%46iw2X|+XzxXm^OY+h$)@>v;2s4X;zpS-w&KHvKlOONH%B=hOD2=e#)_%p^7$ell^}@op(6Z{~P~H zWL3y6BcyEElqj;ZPBtAM8CjXhc1lK8X7tlgBHLoX+FWyWw%Kx$s;y7U@`jPBlBT!%nQ ztEGqRB`Z1Z-MDz^^4|yeuO|9o><6wM_1vG&IXHPOmUzzG2(Nw%#`(E9*Kg)yWLlB#l%qWh>gGEU zx_}d?PJI)>`%zNcC}=hC1mFlgKmXavv9q6_QaGP#&7T&m$W+b3qre;{MiK@^!SF#^ zd2um~NaqtY=Jc1eP4&3Me5lYPqJLVIe??pUIaHX_JpcL?^N@)QhKan44=R^I0aP{FOoP;S z32ne8^WEjk?T+_)lBA75_ztctG;!SvBYl?p95 zGSdGe2W*x8>AiWwdfAv53K8mGiegkQr}n*UMcs`32k%&u*+Pp{x%qjk{u^jgFmK|X zPybkQwNC&OoAsR3zCkgo{4J80|GSA2`crJ}`W9Fq6&yTw1Ccn};@8)iSQjmg#0CRGh z{Hv7Ws=B4($u&j@IZKNR2k1V#gg-8DR6~wHsw6Gos`_VGsw-_quf4k^EgcT?5CkF_ zy>+^y<;M+eYhmlL4`3w0td;m4NP977-C}RG(pQo-SQ-iTFz9)!rYI++_1q{QF%0=F`dYFuUI23c?c3%?EeeP&9rneVZ;@ z5@x6&e22TqfM2U`(d<|n+o|4o+C$c@TWUS5Y@`vnNPFFwM9lj8miu5x2iLpX-P$)Y*h{} zbvWA}X6~YTh=#gItsI-bZqc-+cKGRW&D}+aAhf+uJP$meo8ad7RFe2;Yzk%4xfD#_ z%vwe~&%(wlQ4EzKVbozYe>tAXmd z>!Gi=*vD#f!a3)%Sjvkli;!r-eok zmoU>ipK4lJW|EA=TdAn{c!wQAL5}qk^Hw+SA#poB%gx!KLx(P#vm#{xg#tSg#AV=t zQCjy`wXyM6yYYN=Gym_}B}@k541L`5OO_|i26{8~5%^eGHb+{S2Ll$Zw~c zNo=!UyqKi=t5nmpxDg|NM>QEdS^O4o!ii**v8pj{1oHj$XJ%8KI>(ORuHSVdCv z$>kZ1Rn9HX!dvcBRcy-;XdyWUDXgC8SYOe!rqD)?V6gsPrw{ZvGH3<7gbBKZNhoL> zb+Zq@Bks^neWNa0^ODiK+#j$3O*Iz$caI{v`RcE{74kR`lsX!ErvCnIgZBxmND*S% zwsD#m>deRQi}C-Qo}cw@dLCO{0R3ACP4RcT(d_$Vi;IqX+PXV&>9XT>%d{Je4-=!J zUOXBo*XD*{SHgA^l^v)+?x_1j)jBUb{r!G%U|FgXR0$wxjTgw|ltQ$`pR`59xe~p*&CAp(ncc!OC)Ni)}lx)fqeoocTl ziGcdt_bYuArvL^mw=}rYNPqknael_XP?xQ;CtRp_dX4eHK(>u8yNCwv57@c{ObH&} zqxu7k&lZ@rcmFseto@r2;!xN9X&z-jYzK$0<}2ts!&?FEz*aXlq?5 z+rsrF?%K_9_xMv6w!LvVEyK*!U|LS=pMF=ka|ba;-nJjl5@Fe6kS*Ms9y;Z&LXlBk zo^Lm<(;EFY+)FNm((sl@*tTGJ?MGhr#F(rsabo=UHHhRO_jEJ^+sr6P6@^vjzc(0v ziu+n@`6)T-shqzBRb6h5sQKJ{G|}bvp>*jgog&tgbmadgYh`EH$Lm9;>fX@L!mJb^ z)_wH#KmW`sFqu!IkMMmTL#rfE^*eMOKvk#HiD}QdKAeH|l48EDsG>UGc{l9|X&}A( z!4&lBATRtvN9Q<5b?zW!tUb6;*pr!l;V|&n6IM^CdYENIBqgc9rVxopGlTvdD82<( zr#(4&d9(FBgf76;UIFB1kCZ{e=zg348GrDQ%3>m4B#PJ&7Q@)w#DACz3e zUkB!O-Qtq)p|C-+*|f1qf<%6`~d&;9)Jb1=8n86Q$;W zFt^S61KgJeJ+Z}N@TX+x0V)!M4v^tmk|u}E0$Y1lnIb2MZ9rjtVS9&c5JEGP)o5`E zZeV(vFKG@GfCUu9)P6I}_F@nvwfLE=w9NP1a;D)S za6WER5#gAWPV#+7Tox9rBU_mEv|riIv^WIzOoKNtVlHU;$w(fxm<3FEa1&l*_ycuK zW?^B^(=pA75~IP@FqcjMK;Eg3nI<pZxnGuN*5Trx zcUwK=a?_{h&u-N+Sd_yCE4r5_d;YoJrpv!lK|HJ5pfNq2G!j$@|nmEH9K(*x#Qjhw;Jy794dk<2>2oFA5 z>e+i2pa3Sk7`Q{GyK@*`j|G_&KEi2lSQjYF`n4V+Acm416!u8zCsiHn=f=}`=_IFM^ZNV^s zk-<8BL1&0*C^m5ZYICjicFPVZm=Y8jxKqXM5vK=}NFvEVHh1H0+GGl-Vz++DlF41^ z<+~6hoy1(#;@Z>IQLao^A<~C=1c$-Jq^0cwYJ_ z-n!1X#{t@kQTO3pH~~dP`Bk0K^zw>a{JPX`EW#iDuKIp;yw{L%4hfUUy0RWVNk%|L z|LokjwXM)8*uldf&f<7<18>!xuJUsFlSmCKD|d^P7#M~U%%FxU9o24 z_Zpq|j5if658SQ1rSi_1hr{j9Ju?mwm|}`ZQk_pi zI_{tdW5c!c~p_3GKPo6Ya6c5BU5Y zmV95zGD^(M6+>9SvTqt(c`n@s~kKW3yDA8J4&RSuBf9jH0R|R?z?dmi^@*b z1Nmp@nPk%qQI^v2^@V>^Dgf9^*gc$|bU52nkoJ+adICsNeeA2r8qGLzm;0QqaYDQN zvXPI{q`WPEXK{k?7kqEH{OTujufJ`%O&X!jt}6cdSJ8K;%8JW;aWN(~rWEo+Zbv{m z#^@R@;!!3#3cM)Mkhd$zL@KQB?)x5*m7Xbk8PU zFlu(j)8=Y~Gben;YQJarqpS~9QMBUp9_XmSN!gpjiuH&;_kT{jMt z1zjNAyyVa8ZxM)0P?}ySJi)Y!OBEkWZojE8F$H~DWv{IYsO}T*luhgruC>yVJz)iB zGaILb|JeyD+dFXownP-f>H@AZ;@(!&yIbt$iyI$qep_{X?A2dmeR{X3;2j*J<~vEv zUAwwe{Am@Iv&$1XsnajY3OCTsv>mi>F5I*nP?V*EipP z={;(8=%qn;Ur#_+Zk1JWe8T8)CBxusAiX^yuwDHD2+U>tiRxGL z5b6oF1N?h-L3H8FcW@y7JGK%7HYS)PRC8k@iHv9Y@6 z;DvX6%W$n`4^AQwA-1E$t%GcdV);ys2nRmD^sU>UtXx&y(ywUQP-4Top9wMfhfkU4 zx`}8iRaHM*ta!W)o8v9NU+m>Cds{XSaOWh0;;*mg+$R^RT z{>a-WDEk?(}UU#A8zNVea>2X5lyL1B;^K7luF>ty%j zV4uSE76s3VPhRmG+L7It@A?!tsPJwZ+_tNHh~Nmh-T&L)nxQe0&xOHFpHk}nVd!ix zf_v>}P(HE!ma@q=t?FgDe?LQ95Y|^T^8XBHj9X(EOi!`Y$Q0D5V8hS{qiU(=$D-Rx z#tTOb-gi?R+vq{crPkQUirqim;r~_Zd>6hV>~xn{cB!~HSHNWWuHzwa4Of`+IwxTh zy{^>Q;B5u2VJLt|NJ$l64fx&3Qw5?Hv1+2I=4rPX2v%6+{!j1#$16KKem%=`a#mEgmj^0U=k%vuiI*v1y>>4F?F%MB zY>P#8#hYG(8={i$;Df>YF0&S@BmOKIo-=@u$m04E-Q8>;+1sOP{CAY_OkG0ch1nfq ze;I%*kxsg|rySdb(Bu?WhUTaymSVqORFhbwg5nED1A?_y^rLY|hYBk`91li{goqBk z{kXXDrKV%3Y-JoU-`_z~t?sr21IUmsCQUu=OU<3%cl$eBK`STcuSiV${51v8$D}@H znSuE8g3PZ1U4BU4n7lateSvXYpWVW~&x=lA6U=V|w|u?klph%&h!Qf#39_{`@=+!$k}&C#J) z@;yK6?CL^XiSTPB>i%~9rP@!J+Yyp6e3lvfJ5Ve1?!Qubv{jF}-K$}Y!ZNSL6M@r_ zUPJLRa}|SXYo%LMlK1(w>O($s`1Z<=?5_*kOozhs+~2$Lyz7Y8=qiv006sE+(V+r>Jp>Hd^nHPgLGIQqKIFb{>H7*%%xXGP1_ z^@1Pzcs^~0_Rf1Ekj)brH1XUbk%Ftr_!EZ1iZS*Ob-`!Ol#Au|5ZkqgQZ-?UE>0y6qr5(;tSCd`toZlB;_XCE08in;%d$2BSOVJvpU7#zlnyM-O3;|eF zR)8U-mOIf#zcQhmL}Aa&%*|(=BvwYMxPi$eTJNzB(d}^NI)YlR7TyIH>^07#Lx|8i zJd-@_V5_v5xt3>T`x_n9&41$jWZpQu5>kebQzzn%udYUd7ayW+$Iy<(7e9V z2!3I(>wkxE;{^EG#UGlTrqd`+x$<8pe5XiD*U2#77Kyl@$+xSD( zM}d#V-=m|`%z&rjRqrMsHtBE)bST0_LW1OY;9&V#HTqmZ(IMSsIxjXk<2U!nkih6QkRP9-cGN-!B7d+ zu5vmOB{JW8u3L<~fBpqMo59en;D72@+0el{(HJnBbHg~Lh3>sY7~k`{?cOMR%i(hB zyDpnQryqK{f$asb4XCwfT~27$^z1Y06R8@|YO-$Dz9&>=eg_k$Etp*m)O)%@vtIFh z)@#~l`4to7EErKpGbg>3e(ojUY|*#0@Jub;bXko-$}9e08kH8vM_QsaQ`D^H5b4nFc^Yfryb`I(|cV^d;vs0~%TB@dF zQUu(miH|I=)vD&359DP3>OXvf!CsJr38qNc=8ij@2m;N=H5_mLH>zHD;m61;L;c2C zg#pB%)z}vbsggLb>}^9BjDY>Q7aKuIWtJaw-NO&2Po(#MK4{2i-NbIzve$}HvCzp- zuG}ISh`AG-QObI&^fR}j{zW;>$8wJ@YH9?NuzvpU;!U{~-!HGTl{7t6a>Y1_S!%_q zdpE9k4<$9tg;Fs z_0CexNlQj$A(8$8Ojli!MnKGrhgt1rx4pV^a!HZXQJf)B8)idcjA9=}$*M;9jZifP zYGbgnpH$2rFe;x!J$iH^RX1g_&!2Jj-@Pw^2uq~@*5k+Ps9SVR%wPWoJ80HVp6Qy# zdP_;me1`pNbT4qPhVUvwN3{C>2aW9SH9qhD8~c@x12Bl61^7Ppid`#RlsHeNrrJ~T@-5?j)VD8YBx@Qh$TFcln5`&C`iX3F?GC_RQ8m>(mQ2@qwHK@nUIf zSuR(Rm)YO__lPt2j#ElyMv5l(>pphSi{p0H)#MIYbA?9ec_=+8>y-T6j#$y+y?&Q% z*HF$3F@VLJF1RZZFgk<%dX6=oe|Q|wyzWH(mdrl5@H$9@XwtW3&iS`<@JqT(znCU) zIoDGD;@$9^*U#a||AJX0Dp+pQs4VvorZUGTwVzz(8@-A_Rh?mt-5dK9?$O=;o$Wur z)N1JThzc2>y`tLJ?BXW-QGfgrhg6)*d1GiCLTJ_R>JzN#S@>kL{1!sfw_@W*aXBI1 z0%=RZ%Zt_B-p`Sc7r;6ETmiY9MMSSxFt2Oi${Xp!m_#^j3oXK6$krwwsx9o{^`4_MiXo`Ic8kV1A%sL|j zSG0AeH(rZRYh~8;Tz%gmi$G|$^)tVbabzfnvhqJ|Oo$^X`EqTsS3&B+>x3$qh6pSf zhLx2^@p+?Q6stIAJ@#hxaro{PVv-B}yCs#CmGhZl1D!-gq^ z1p)Gzh8$hK*npx_sPK=!!!LQz9JlyM-9N>R&J__pHHjSmxKP&9ST6a$ui9)lQc}g> z_bm{i{k5rc+v2^&Z!hslOupnu*lU(FX{2H#VM%b@UzbO$-Ct~$sO9+ezTBTTk-NG0 z<1Pm||CLlDG|vJrT0cr-;eDL@mY^}(qLpWX^ax9j3~;A##}X~J_qcb3C@5y$`Bmk9 z{=m)M6wOj%$)Lu1SW5C~TPAp+S)hS~+HH6r_)&X!en;ikgGEb3ZdFC}@$+y02&_8F z&VP~Rcd=l2LypC`1B>nHCIWTsy|q7dGnsd)Lu|IFwrkz(B6P1MA0xl#=pcK~*mD>X zHP&3|u70r>sMrdv{ou4C&l-7*B;9OqVZ_RSV_GqwxRdh7v+w)%F=Pbqv6<_%+t?&G zN7ZSg10Cd2>&?qiHkcfOb?lu!<0W%^d+lF>>WpnBnlpcuyN$RdZXSnpN$wIw>xy>K zgnc@NFQB{l=>`U#U*fQcky43g$5aK&^%3xBR%3#V_z@@?Wo~;08~oU24;B>f^o_{p z4%3sD#u(L>lN7a;9>oh7<@*LFnq|o6uI4g?>lVoTa}Y1;Qak#iJub$R&VY42dOOqi zot9AZWdHZS>8iDSNeW4X!|3xcy8Hh`%1g`~!Qry#Pfb6VBvx`m-=7>S*Z|;V99CR~ zmfvY^AqZ=EGW}uCmF4kKadz<6LzE{*CN z&Mtqum#p>U^R~BIE`1I=zJ$bY*OA)HYc89G-HSckMrb#Rz(K1m8HpCZqJ;c*xvMjz zC8Xi)rn^}A!&3X7YkPK zV^;i6}Skc3|fY*;R>ixX(DUzqkB& zm`mFd$|eX{F#7hIqOLi$0Q>IIg}PG5zIxsdPV$Zrd$lvIHTV1Babljc0wcIQ@2Vx= zOC_+zf@%hG8a{*39w5#;&dP4x!_y*dXF1%49+F6|NheasXvy;OCjmPsANW+458cs7 zE*&(aK{nh3j7F_9%Ak#`-qIeRoe(FMW0HBfCZlQ%FwaCur9El%PFCBxN!YClQa}sQ z#*>8`yKR8$UlGpfoX}-zhvY$v3Lx*3RGtqp$g1FpHo2vJ=~JXgpfz{XAURS*BExjk z`qgs%`>FzIt{c|4C9URM)vXl~oRAyaN%uRUV~l|V4X6WJ#b&(T$FMvb;7&uoXn2ji?f%6hom2)op42>kLa080_E&_nE6US3{SR?g~~Fe;YUFsIW& zUZKL~zzzU6HJNJ9yZ7wJO5ef3$gV(tu;v_O?cX0?0s(^gU=Aa z8?bB--}T>g1)<4&+mi7@s&oZYknkG^#4?~#+CaOgSL-CA(rDb3Ae4LPcKlirsV?QV zcIN6j-^QS%Qt{C-aeu~G%=VYr<()t6;mqpR);B-}$wEc4xPemKo>!NSOiawzCcR9G z0)kR0MA?D5VjK+25chM7o&6J_MSfN44;JiW!!$EN8*aYTqmM89Sng=VmYIcK>IFWq zasYW12VkxD%%`gbS#rn{OmtJSLuw`M#a8cN|yhBT7cL#xGgHT=nuz%L?;tC7FV*IL_7cgdOv6`9_jM+4>RA3(JkzlVq;9~R zw`7?0+eIc$tFclRpaYu2X@^Iz^zA3+%MLzxI;`#dN;@edWw&|~dec?MNn z(0s;$mVs9l5873*w^GIO=?8P)Kh8hBRAdmD9M$vl`2$|Ud{_?3P*|V+jq%>7))2k2 z7X(V3o#2NL1&o#cZ~Oi^CeRX$)!NX)rTqiR=5X1>5{17EpkOEgiVpFEk^zbTie6!UvUrRym!lm`)zZI0%ikr{*(Mv$U|A4U#9Jz&{ z$*}~Hbs}vg7DWcYE00KwN?d;Ediaaw#iZQ}QxyzQG@Ko7xl|fx z(^M5dnE^r6>Ym|?rk!{!d?p;Yn2vW>oMq9Xv~NJnsSYEz6S99V;loak*Dee4y}V~T zW@8Zc6|Ydb!kayA4r?2BMaDj_7eZ-Es6_(`ayKlW~7)! zLLk!hZs_Q5T_YPAqa?AWx-8*q38Qn|y7@iO%<3AldKPx$f;&cL45ujz3L*wl_tO7e zKGNmG619pE1WCPbOh>~%dmU}JeEP@+!A;x=`c&M=&{*D0^R`fR9GV!fh#a{x;MfxP zaj>vK84KNHbuTZgjLgik2MjSE!!u?4op1<0moxy!?Mvvs76y{-fMYhN?pMi6#tfDW zuWxIBcYfWOkM^_vD)6{#o!Fxh*~*&t`95;%mc!Q40vn2c+CNd{_9^INnA3}I$@#u} z>v*=<^@&YNiYad5m91GoINgsAvuq#2cw2!hgj18@dN7)r{qo)kHxn8miA%G}nwlY) zFze9{|4zKLwvSe)QV6ep6g2-d^R+mAu-=5msGX~E{4a6KEkH?CIHf&k* zreP_5Ey-6M^54BjaE_oOBEN!Plo&t)FvN$ct2)#-FLh8HN9{U%yU? zL*H{U8Yf1Tl_@`s8Sw$OkQJCtf|QhSK3vxs&v1YqXcQTUAl;A5dz5Aybas*P=O?~V=EHkMd=hQ)q!`>Xjp*i4X zy|SYPKyrxkQ#3MSC@U-Lh)8gNsz}?Q$|C~iYB+xf42s%2qwm&cH}L{7^JxhM<*Wov zzKyYaD#>69nXe(&q|B#$Z3kLT#D(d^K?Gcvte8UM>W>1M3b=#?2(rTQB{tD#KV96d z;%)2;&rl-#?+y>$^0o40B3b2j51t{t#O;uJr>3)$MM;2n|NGbStL;&k>$FcGf5WM7 zXy|KPALov$2lg8}vF>)(XFX7kOxfX7Ut?vPIKUa`H(O+jnCI$)HkUhRyqr06)KZkk z56fddau?~>@14`~8pN>51(&*4AU)|B_Z6`Jd?u8le!ngBwS~>eIUZ@gfAa{PPleDS9wD=$onVaW(D1)2VN+VK<4dC3rK@^ zdG0rte>Ke@8}n9(oAIVnA4h9355HM)Z#1EOe4&49Hp7+6RlRX}W3eHDQ9d=R#mEfS(fPCAieo)ucYdrU~=?)a}G-M z;4s1Q&Dv;DuF_zRh1y3EGrG5Lr61j*`_-<#s!ZS>mL@D(0=&(!@7%3QyvANdYcr5i z0dem_qDcr&wVZwI>O8F`>W(k1?(Ef&Qf*1L8T5^3v?TcM`{T=FW!{8Rjo~X0zn1Cg zdik1H5OlHip9|L>+Y|p33T=H}_j-0vUD|x6pm+o235xUhg6^$7_ZtIKU`cLO4&prNPDHMm7+v$w==EGnr&Zekr758#a zSNIXRYN=-MnY;hSqXBNk7}x!3cDR(Ur)hdt_?2ITSECtc-&MN*jtW)B9BLlaJs(3k|=1RB$J6Zn&zxT!E?tg{ALe{BRp6(rxt#%StkZ7qG#1TCm0 zEMbcRHaaP1b0Pc=kTdBUy0Ia;-BH!(DamBfd+HlcCpPP4p0}`De{k-J7TQlSS~G=J zpwezC5v0XX319s+kJ$#pjZM;XPG`BXv)!G(m>~YJsafIFWnkB_Ja`ZS%P;rnf5Ucy z?0#%>A;Shjv#V=j9V(nqyRkzJju`PpJ z_nUb)4cHHb45Wu{)J+xNBovk+C%OldQa}xvwndpLdvDG(hC_(~Z%^pn39J-=Oq|*J zZ*4@JQO(Iq3h9QCvwd@OP&)NzhnPI;TXvOLC=g{}f9Z=CFctSc7Dgxo2Ods%xj-p* zf->VygLEfR!Z%2TQ&dvT#!Q9Po_&1KQIo6&yajRRr7Li5`2*g(*;tb5Up0=VxM?!X z32_;k2?=qKi*^Z!T3K+hVD|QogqrJDq5d-vKQ+(3C(P5SGAKSN@~pOdid~=tHk#6# zH=^U?OmK7U=o_JEEA`mBv0?8n4bhZv@WHucCiwth)4K@7FgVFzIz#D!s1ixY?C}8t z3>=`j&u1jA{;5lw82{4oG0oF7srP;@%`rThQQ%v0Uihmi@>g_bt^He&cIqsF9t!f@ zh6=Sq-p<(j37}3f?f<4K0-YASok5Wn86W?8eJ@T$pR~f8e!SuMkD&isEl&4nh1u+K zvE$7}|E2;NEqNA$MILfHw$qAY%n?h5GogDV_EB<7wI7-k1zDO(Es2~-oGE{Z zxTV|bfCvP-={c}{JyB3_-u?T;q5v6Ov{($U;m9FY zr%>+)coLtA<;Co}ucqN{1H?#1#Kz`yRLj5y;C{UF@^I%=LMmRo^F{z=nR*-rzlBxM zL?jIv(#PE-uQETpMD>r8T*ZIqc;6mk@CB*Llj*mYI_JZ`Z#dk8$hL2;YZj}U+=O^} z%*gd?4`c(b%rb{#FWJfyFz+Na@%4S!_v?)v9Vdi3ox9{p&a>Q)!FQV`H^2uP2a7`^ z7DHHJ(aZXq`jsD*HGjSDM`jDf7j8V+r3_bgTDuGSj28|AWuh!plYIU9q|UBXcdM#> z496*xv$LKAML|+cICzSem9ylvR}j}&+JrQTGBPsu+T++;`xmkoU{%gWv>NIkbbBya zx;NJH?J>H`%irIj?O9&&QQuLVP_Trp($BJJ#7FT_rBK&y7)hrrS=D9ymwl;KQewAp zldl^h`4xb8->~utFY{BS;0d|{F&JLoUsVsk0QmR}SRvP+Wz?m<3>_tphXnp;{`Xg^KT3#~l=aun2d zn0LL>8t;u?)3b|*4&rYo)<0SrpJ;S2SL)#ZeBX|UFZ;8OeQ?;(O3_IgLW|*AXA33I z-qQWO>*^}rvVoI@Cs*w^2bwnj%(zMA`{OeU0mBlAO`eVxhvPaTbMy1PbweEqWh*;( zdRxrunbRM=)ligC9LRC80h+YXOK-c%1Bhpiik@f7QM>|^YI7V+l(W`LzuUgHK$Gnn zlXwSjbxgYjB|hF`KI5N{6qRy+rhsI$=Xg#9Cg1O4qQKyn!ts&{p_uBqA>pInmoUo6*CeR5DJa6ezV?tHc)W&_pMrh2mC_hmrykA}VcVtz_a zWO6Al%=poPUA6s8Dje~X$DK>puqbO~65aA*azn0II72Ld|M9+!vd6B1J^%rcyUiOH zI3-Z$r`n@4E~DXEX?;1G)sNQHt@}`cQ+CrRnLi%}+BSDro)I8X)+dNB2jO2!wIo2v zir>0b=roXVtk<#JYWS~0IXEKmLD7v z&YFu$jLw^^e{&IjH5p%MOC}-35ANYbdz4KxqsN@C_^kId+8tQ-kJa7wt}`-*#Y6$# zOUR{5nA}(1RpiBZ`DRdX@KTdM>ER_GWh(y1&`ttWA`L7)%AdFgbM2WEMQxPf#i;Z% z7Pfgh<8$_RU~v^yuPI74tSJ>g5A<|X5`GT?!i_a08k0}bAuz_`xvm*yZl zYz?7>x$Mo%Yu%aQQjjW7t6%Zv>Mc%07Lj+FhqC))Vn5Z0=y}uLRqZy?uDE%e4I7u( z%8(Y$osdvzwIc@0{F5_}y^eji&^1UDp#iquWXJ$`tyeY|Xta4`sHLEZpP(dD2X;t7JS z5WC)=nOF+3WGykKV`&+dpwYG2hS-AvxJZCCcv(g@X6tJ=mHG&8#`{4UoI6bCS?pe) zvbwow*!lITpQQg6@uD>(+VM!R@%)$+Zj)RH4I+Q2*QmxJbqrAkN!dbiB5L_e4^6lf z=?Mw(MQn!tz^Ke&>lcg8`Ki$I&$FOryXq}o!)e?F`MDyL>+b&R)|<_7*{rLcn+@K( zsBev5-8GsCs0zw{`y86D@43Yb?#Vvd2H()lK54(t(74$B*Z&@1hU$IDdPP+w&5u+$ zE~gtgt~-6x^%B@+_td}p_|VX)^5Z62yugqXLe1UfN4yC=Gg-J9y}eOX!q!6(aNtP< zaoM(F-F`B5r?C7Xf~qUaD&@cf{WR%5HSuY=Pdx>mW#=&Bd~}zUJGQ5zITO*I@d%xY zPfrrifmw90$u?a1)o|%7ZAJYa-9j_j-S@q3o}UMtVELE*%}<*NUsC9w>4^+{m_%FL zi}90zhb-IZXd6_MuW)XCi1>{^yqNB_^}FGRt??=zE$l$^=_5t-du=yw;YUj!zSZC` zqZ5NJK(0S20iGm;L&8i}wC`L+X2H#w<5><5*~6h=F@<_!nJsWH`N>>_kaWK4n`vT! zJw4o)fpuCo!T2@{^iw?gXgU^rNPC^zsJpqd;YSQvrOsgc15o>Sl-)=aegGT1ZFC5@641hNN%Imo| z?U0NjjyhL2JzsAMXf9stn|x!)+xzy8ZS4}+MyGS!FiDDv;yr*m_if&&N#8SdbEX#f zds82^dK`(^-hW_EY5Wa(qgK#OBZ`~Q{=64wbEfw@t}T_j+`2kqHJVN(pz@QhvnGjq zcblR>;+H)F{}-W^^!m(*RrcTh>}4&b27k-h2j-Tp*G}5CUDBZP8wF7%9PJZD?VdU* zlQ=qxJxli@08{`3hHAS*1Z9;27qNXfJvbu6frZg2jXk1*<^bGI)SwnaOs_upG;s@A zAhV7}*$cUR?IQWSB$a-UV!KB~5O zW?@P|prvLCLh`P>g_HhNSBCu@kmc7B>7 z_z4vGg6`X94^clD3g8g+qI#T8!0EPTv89dO2dA3IZ??5sgX+ZtQg~=2S>vlIIk9~n3lkJvC3EOFUz=w}+!o5z3rFXj))`AI zb`M>NE@anx?n?N~% zzM~Q(62Ml=+VEO3ttUR6=#wHIP-8+FRWNg67+_30LZ+ilYSsXD!g@;HV#yqVh)NKc zr=07KHGz~MFL84c#g2V5=Zzlv7r`yR;9DM^Nx?Zr{ox*!S}u*F&&hgK*f~E;r1&j* zn`U1N+Wytpu=B!+1^*kfY2J0D)ymfjf(E0!{AIDYZwXePhK9TDq(K=gDJ{=`EsDIAJwVVn?y>r(b{8o2kkK^P*O}g&$;g4IB)XNS znHj)-SGD3q33&c82X{RkR^|x6F<9Wv#Ja@axTX+HM7DReSUS`Dc3-RE zok(XgN|Dr@Z;v*xTAFTIl&Jx=DTa-6fm&*S-bAW#SLo5$+b^5iwmPH!rFy?K<`=W< zOd>4`eILm`Ng=W+^uJN!p^6N_f-jl(y5(S7Xe84EJDv*%(hkysRIGQ~Rl-vrMhYsH zOxNpw{pSfg$Gf-25?2F@`Yt|lg6oTmfI=uk>v_Bq>WBw{EsIV9FEn%sd|kXws~|0LhM&ZOWJ|HzcOAyYZpfv*DcrBpiW8RZwcXf_@@_zHZ^dX zMVwQauR3X#4XJ+C(Wkk-+}nvhLIm$gbv>6M4_>!csj} zV*=fSG*Ol7a$5oZ>ReUCO*9&LLrManodgSefjJ;9#$?F-`+YNMGyY8uTK=-X-%UiK!-wJml-fe&W{M-cgs<%u?*f3rjO# z5jmv^p4?bY8kIiSnmLybRA650zZSl28F)?dclss9`;yAgz$90Mjtn6l-}7HGp17Vh zx$kVV@m=2a4_~EBC;fa`rYg8U;lIlEHd!jLo%$Na^w8s;Q;Q-*=-q@9f3rn(f1R#< zj(QppWfi)}K`EW-Rl5y}b4Q}=z09c1?Lp`IjB(mzUQCVrJzo^-2<9FTp6 z!)i?)H<5jrG9ge!*1P{j@L+?nJ7N0r%2??XT}k*bJd)$U_=RKuh6j!euSLpqk@U&@ zxH>J)&5>HFX)C==X<@Ea+il&m&se-Kr#DGSS#1Dqemxa6>kjjaB!1+R7AuejM5-o< z!PKs$pG_fB5tqvgT;P|Iryl#ON-*h_h>Fc|S7enH|8#Dw+}P3<_WOWQ!U_MjF%Xe}QLi?7dgz zc(+m1mQIlB1$Y^I&OJnpz0X8|ob?9+?EPdAltCIx0741jM|W`ZH#U9hdHPNI=r$o4 zbC>h^cAbw?(zKoJnSUl->gr(NY_r0j&h1~2@eJhr@S6sxQCnkTim=^EV{bBBgPkyM zSC%xD*5Pu(j*!uVUtb-rq#SP(wU6WthxwBSXC(^nQy?&5q}dVaCVAI$a(1=d84ENT z&<*;kceo#fz+2&Sin*H=QjpZouaQ*G=p@0M=qhM40{7Q3C(dqj%<2mT*jY@0 z=ga32QK8*&j`!Ew_U_)fgUfw5#-VMVF+HDb@KxkeIUT^YOb?@FQxN9qd z?AJpP%!h3~np%J0IxXolah$Z&O6mJ7SN*1!nUtT$8KlnYdc{&XVCLiiXYmk*C}2tn z>Ol$^MZ=zl^Cf^m2aGo6LCrm$=inrfKHDxYFW&n3z?4E}3-CyWKY69n$mp-I`u&QM zB6uQ^`Q`OmkuW$zEP9hD=ie-&PFY9Q^+<&Oaqx5Sf1fQCnvbUU!nD%X`lSBrXM79N z2s1@QrJklmPyAz;G#FT_pkuE%Zw=yXG z;+M+%FS?Bvg{0GhZsW`|U{Dsen~VpQ#Vp1{-0Scje}i}g9}4^60yG!5nm!kZ{{M^s zQvNM?*k;M_pYE|%c}KdQK(7(%*8W0wp|AE6ZD(YXZ@bq;4oStUH~UBbaqOfUa_e=fT~JAHu@S|Ag=l6nVNr&_%yb zuR+ycum!-?gkFJ;%Kq?jBYe%>znI%#(g5S9AQ*%__BQ;VATpj1UvAi9Re!VjuCK)X zwu|(; z2p^bZmvSF1z9VYh(e?Z2DlCD$bfY8}^T@C2t$YP;Ry-MH8>R$Sg0zs)w3i_A z`3#GTu+?O|2mI2|Mv-}})aY}@L1}yp6T{7*m}DuJU^T`EgIP+Hu=l`m`5_VhrPG}} z6e8PN8yaLVq^WCjhMrTy?&?(cy{KK!Z_Is?XO%2DP8-oH%*Y$YZAEIvCv@om9f%X8ci2ZwE zxk%|dlZ_tH29If=aP2$ieg#ivTjSAeHz3_V8bfK{h7kfOYJ@}xVGWSwbu~KOBS{Sy}=4`A|BymE>brUu99{Jp2j^DF0o|9`# zEFxcn(*)u>wIY}8j}x9-7b~IM>IRyWG;*%)UR8V=fsYHOIk420ys{X$rWH?{!e;P{@v;O-2{V7o0fzS`*YTQRP zfAF8#PAOzZ-~ZZ=W8LU*zM}Yo``|V6q4CE{!|`&9%hqe7jX4Bw>8&EWH$Fjg8FGEs zlvL}chZ?;dYk0Y?4wHS*9{Ueo5efp90<}zYRK7~bEN!>4{|!E?(X{$ptDZOAY*sAH zLID}ygR?h{UxuuHPia@?ZIppL>%B9t@v|V zUbvo=3>h;g8!rp+5Kbk>qCdYW$gcdPe+EtuL17OZ80>$3Rl`$lS3CfKkJ}<|?m&O+ zY`c+AdVbH86Wg4B@3GuqvtvkNSY`OE`6_r#v%MNzL7bie;x0{?Akq>%bWM9zMX;d` z;e)ydD+0}{^qS9}oq(a=UL>8E+`BZm+y0D^9Ru4!fVK%1oq==0netWT57^_dRm|Iq ze3{yepgW6lkiHctE%0VCCMc5H0X^w(5BU%)xZPI*U1y)|p@4ZWc@lIb^MBJ+2mhua<|h?umYY;~4_s$N zz**7*2W1F|2rbhyy~kaI;?>&2&bwO4E?BJ7p2&=*Svato+x~K}u+;1PW-bGcjKim& zf+xRNnlzm>|GKC~Q1!-BmcNh}PB0j)!^^*_GYa9IcdZ(%J&VSr+ zym(Pczohg+2sHbPqlM4Je^uOto=djPlp|&*T;}u!Tzo7nuKW(e%r-);{O4!!v9XFd za?1{rbb7sgZ5xMiF$oIoH*P@?gyN))>y5RHNHLe4js@o!_Y=H~U49)rG1osbAisid zv9~W#%Fqqn(&hUE`@{L6&8ft>00;tM_{H!TrwL%5bq6bpN7y%sVk+-N_S@Cu^dPzq)YIiD$4 ze9kFKF2_w*>~V5k2=A6sB@c5Iu`T-$@;aKKbqN{kw0?|Ad|;X`z8oMPKh#Q;Y6~VA z5y;W0*5EjL$?-Wj@m0ytTn}OmiNHlvHtN+m27dT(8~WG&uVUvC-$jMY8nJRdd{xsm z4Y>&-ABAAM^EY(wtluN`d-NlEdf9k?i7-XLlH?YR;9?a_q#X15JVjjaqQAbie(vdc zerO~cmtZ&>9$};8I9Ocp^HVr>@w$EDs$*LoD9Ex5Pt3vyL zZ` zgk~`?u|UqRv1~Y%=hLSV--uN-g*+;IPc)TtQxZd0AziSVt^GVz1J*^2JH?oKLooyw%q2zWG`) z*f6fPX1zcww?xr3o4ha@+iuvEd!&^YpVa>KzQFhPt!^^*t5;#y{SM>y-cy$i?h0p& z*b+?JE6_m4#3glX?6J++1-+GfxBdM5ukCET!$q4rCW>TwOY_$R;b$OfE!ULxO=H=X zI&;nqo+UxTT%vaH&DPaJ$A6o;uG{E#$N55?J&L9?TS<)A7FNYJSgHxBf3-Hi%VDPQ zbbK<;=me0VezM_7Uz}EX`iI%H%f!PexJ^MveDn7GD-dVfznrZ%RVfaMwWhgXDt&zI zZs_a>sm@z=$f$tlBa;I^c*1k5v?~6{9T!)Z4~EXJv)u>OM^R7dCx}mB|MrparP2#PGbgJnD!4yj5J_;a4jVnQ~&(gir?+*rXuFOZ2<*BWbXD z8;`y{n!H}DT_%ImdRo2l2DGbtW_`Q;^@cjlwh~us%a^?-10q3XoQlFmLIYSc=!1x5 zwx-MZFBI$Jev2o6DmPZpqO_`;^`52eD?g;Ga!u!cH{x9XS@y=<>p?hMLHq@;c>j3m zG?%CMujW6ey>#w+^M@8yQy(NegtkB2HlX=OUHqc_qJN{N#Z$*i8Lxj@(1tB8jN~ID z4Wmq%#9tpyhC&_7LLQP<|CS&|kHYO6ZHYvQD!xQ@?dl0HsoZ6*WGitS#wTA59%(Jo zO3mE_N%bzG5h4CkdJ`4xX0|)8&3GLYXIGfVinKADh6MI!L0X*J8qJTdJ)wWmM?_y9 zT=Sb+1sEB7?7HhqpK zIK6dR3r7kHbt;&6325Ii|MiB2N43F!^s4}^YH+6}gDQG!q=0bf79t9$^AK|Jv^)W4 z!jf+~`Snl#=&-28;_@cA=0|frYR{i1Jn()yEeQ~36TqyPS5=uGS~16YK(zo#D+mb# z=wh)bC1rWkYzMuJPPkpiynUNei$l8$wKu=_4l}>RG2%b0ZT}>FN&$oY%+zAu+}C?? zEV?7e{;Jfks7IE*u=t zAp^2Oh0)8j7{u-OnX!W4o21FEw>a=G;rVM z&C6}iiQeCB_i0BR#-RFJH}-y1S*XFN8+wGvC+pprWxcO#LWO@^MDA-Ya0?3NG1r&~ z#3fT?;20s z-DO$B&f<=K!<(TV+AS{(Kj>%0GgkBkEoPF$M#l7j@v-3$F7rL+xb$>C8Im|g01zuE zsorCLGNW5Vj|p4<=Vegp!@b=Iw^ThiKPQ2SCOt%0q*={DUoJlzx;U_>37X~8L$B1= zkNSJ#NRqe>Ra*+kxz1njV81}>lcV08p*BL zt66dC^yNB%{HWkb&mQNSv;4M^F?V9iwdZZz7aoYRzSY4e{7-&6ChuQ)mAf(VbuBzZ z{kdgQ?L_>|z@CP`t&zL1*05%J#(}b3v%&Q(lr59s8?hKnUqD3{jDRrTk59o^`l>Zh z^2Ip90IpjFCIGl0Fbey_Ut~XnTuywk>0h@skk;BSTm}6eCWaRYO*_C(K=)+4zkTE6 zmJkibdM}wt(S6ds&v0|&4hhU&=*zLoFVqkI_#6dYZtpSWLCgC9I1TY1KMthj{9fE$ zc#_Lpu&Xms29?rCoe67rswN0mdwY83K_iBg+Viq$8w9IGKhNwhw#W`$Cv*T0Iql8= z1}^Xc2KOKgQWez8^h8xNM0%k%74b%k%y}N%(mxa@jY03CqCLgmQWFG!oG_s2TK@ht zie5qqID>Xk@8UnNv6)=(=plQ;U}XALK|{#)SI18C&uqeTNFum#o$BW|3keZ)OPFnNPPl{ zp(+7y({c8_FIh|0fn&0cp58F#>4zHOgMb6cPm1zKSY5nrq}IqZ+xNUZ=*9&)$O0)$%i6+BE$534o0zGJ(h?$w2NfAqdvktPr*_E=L8J7dN5tG zea8nq2r3eczXdiMNRjwf)@4j#z~?1Xx6!J=Zuu)0H)GQVJ~Af6n~`ds zl=#t@l@_kz+U{BoGb?L+Li$rpV;8+KeBxh34r}Gl3rRGUK;a#c+rb%k{8D&Gw%im#*Xc|V`<(> z86wpYpNe==i*(yQ*ccIC$MHA}45X|>a3OPn%X-M z5R7qj5d|Zy_`JwRp>ZthV-3kKDDToj%nZnPh57S`?yv|#iEy3-Yirf()~hxc@osUU zKKmAa%5+xYxJlt$mLzy`9zdkCr?8JCCKOXSnM-!E+Rfb9FNw_Ho1QLExH?w2(=hy! z(9lpcia{+V#XZpbJVS`$C#V$Kzz#?9>W?`Ay?763nILYMfqVPyW0>VZQ7EH?iQ0tJ z!&W1=N_coz22=X;2VOyM-*~m`r}rZcMTrTgvP=C^mu9Q%ujG^rh$}- z3Po8~Y5Uu0)!ePa*07Bb?%E+Zva4TPFDDX znALH99t#lqmN0vUCP-KrK@m$Ikb~1zv4crT6jX;ZxN#MGf#1G2+~OM`z27w+9kKgs zw&gV3RgQSek#D~Jc$Iwo?k+vW=q8j~r{HZ3GG(POa&A1FQnomzdz9qlvh8xWQuY_fFubC# z{hiDGDH!r1oS3F=KKL^2ZZgW|JzMKru@f<}Smk@yQeLOQmD@XW?u`^kU+E=0007Id z+j35*jzl;NW=ah~19fa)FX11|xF5ZIS@ggh%rvPKE_s(gamvvyH#m0m1e#PGdk`zr zVfI1O#mok;DRX~YH0@Zz!rxp4a83%nHQvc+ z#Mqp^bEb41mg`nUnS)!ydYlqWP6cEBr3%dm+~|oX_lfQwqa!lKeB5#U$OB+?oU#$Z zbf*B*@)Y>1wV+rLb^k*KW3=y-hTfI|H-etLfDjhUL*?M$0Ds_)$VY-af*{kwKZ4H- zH3PF{!$>F##`aG@G6sN5cj)vKJh!Mx(ot$oP<{E5x?m{1)x3&oC=7a*n#02}tub{6 z+M{0vKT6=^3~LUDaFQo;Cfwhg8c=Aq(c&DPZvNmMn($Qsi2iwRg>tF-a{8_q);q_3 zg&$>zl~R@qocU<%ob920iNz_IIpw;bbG1D`WR+1NK$&a>`hPn;k?@w87wD7^%x&DKTX; zT@Pbxk2irNhwW7mS^`f$49vvoqnA(HQEq?NhzV%L6!bO+1Z;<|gZy%jwA&q=O)0{5 z84e-0Zd{|4c$bi7%h9SIxy{7=M+e(^SB)dSEkI`*XO7vGffpb)uX@LH3^%*-b{9RP#!W z0PynAdz5f)jF&er?BUx<`F+Z=bRk_VI*1rS3;&2jUnI}YB zlo{?{PCne(Wh*)-Y_?FYRD2&m;Hxf;-^j_~=XWP$m^*|mD_eoM;*N0vJ7uePX^0eO zQnA2)SdZ0PPjqhoQ7jj|TxkkHy89 zib<^n7q2hrfdtvf+|a-f!sABcyQ6$7CGx=xiC1q7z^GbIZbbUYpb^H|_q9smmA4Sp+1!m{8j68g74ba@?a92ELPiFSKTA+3G@< zYmX1wFTHt=;n5$lHhOVMM#lrb1i`UuzF9E=?XlJu1CONF3?}?HaMNbZ@eE`l$~Ul_ z^^NhpEWEoJ(%WMu9xDh|-Z{veI631!u;8yLB}SHQ^!&KjtzN{nv|~8BQSMs$5`BzP z15;$E}wQqTqBJ(ZN2YQXBhV9_768>j@pOcr~ z*S@o}*IqG*x-L}j4SrtiaKzFFL)wi9#;gqxWus&b?InlH7ut4HR~BHa>R7i|2Y8=t z?Zpjf-Ka)VkA*sQ>Izd6$F;i)>f53jLKQ40ywS|+{DQ}@*-{;Y3$I#Yx)@PkRT5VTupX)J-<&HQ1dT&@yg_?sW-{PuL*c|HM6GaI{6ruig z$`8q!e8>N$6$@XIv$KG;0To1J%gn>WL+DhOwVAn&ZEQP&b z;zn^#?5U?D*loJCv|aeJP~EEbJ3X|?S2 z6sNCh{}O|_(v8Qv4?8|*QnqF9Q}h;dD2~V7sInYI{rrAUlaXtnWfj$~x?#@nEFpS7 z5G6scqTzXFk>0F(qmV-s&-#O0FI&i$g^ZucX7wH6PP=G-MCL?tafF~S!yl6fr#e#^ zGk;u;aMzqRHMxX`R4Jr~vYm5553mB8!qr z7ask*kGea2njh1=yywpIo1^<&h&|F{4{D+6^TNxx2JkcC&wle!97sx&0~R&{SGQ2o_iuYaqv)aX7#HY>woTM%T`j_L(8T_ihDSJ(2Ap-(56+2`YWmE6zD%QUrUl>fd!08))##rab=OLXLF7D0F0%%KR_+O5dr=mtwhDvnN z`9Hy0QS=PZ1}_~zE=TyWVQS=O5UBbJaC)*Ae94s zTtB*p5ErnSjFG!VqYln$s9_o6et)IP95efZCER>Ayzb1Ad615BA{c9+R~f-Crure?=BfNnwz>6J{SKNt=Z$r5$)$Nl6@gpNf&N=-H##|!#XJ4&~urT6q1u`95;Irhrb^zB^+RoI0ogyh-PwM?QXzO zGBYP+xfH*+v#|CvM*Mh^BcZ zfxH3rWjb-sf5HO>B9=3`4*=WHvgI1@!hv7VuDgax?i9adGZc_4X}KpI_2)9M9+ zZry7_pPFhs#aT34<-4f}t#>^<5^sKG>qH0e-(l<|We-?p zi6nYsD=c-1CWV$t6cfIV{qIc)T!y8Mp z?i{)UDuNMnocAklUP%kY7j;}>d$LVyywE7gODK7Kg%upk3lsR5zJq);-ST$B&No`X zFgG!8LnpT^)!?OHy`zeaR@Yil{-m$=V3r!7-SkU)PRM!?nJc8 z;-J60`9|bkn27ffy@cqyzie32z z3SNVV=`A_D-p;qbuG4kh-JXN9M&;79okN4537USkp~~aPHp#mo0n_WWy9Ar?LSPoU z*bxoRIXbwA#_N@N5bhS;c=ivI2b67TdlzqXy-3xRw(s@WpD~Qyj7$Gp0-WK155hn_ z@^mPm`x4C8fJR{*l9JNbNN=;L6&4$pp+_z{+G><=Q1j+#44Cb{z%{LM9_x_~yx<7@ zn!7r1JLeBu9(38CT=FA&A+b@X#+JLyoRSV2w#7$kISUe6F^U-KRcmg^IKq$OG1o`p#=E+fdGu1C)_gL1IZ_7c)dJ4*Mh1R zB)dNax?}#aKivOD0Yt#JNr%Y^(Q4RoVL@g#APR3t%_m`jlI6aTk;zFIIhO$vNBb$4 zn)nGbBrx%HHLrK2A(mq0zf8SF@17rV8GH?al8xR6?Z8u)8~MOH^LL{=CW%pm*0XsZ zyGi%&E!E(7c?PeDZ^<>XSL;XX{uC9z`H_W1BV;o_#Xba02`+(}F2M7TeL!+WYJ(;qoqC!kR%12w^`M#$Y_hy7X)@mVK&{slsE2wA- z$h988fP+CV@Z(x*QwWDv@%+K5P9@?2fOg>dF;iftdSJ|4GbcHzll4c_vHPRj7fX~J zM!dJI)SJMm{rhIFLf(+9`$ZwX=U5>v`{;#z{OXLj`~L9!*gfJy7M{p~;He)}Ny~{h zXx6XL|Dp|)e{q1WLwq)9yy3Eu_un-^+4=B?-k%W5Dx7)M=_)d&F_AlwB~Gwqi=Qm@ z^^O+5N+w}ZA`1=9(%q?bK-Ge!f3n6$>M}(v5oT8Lpa4u4b@JO&@|!w%Kp43&j&)50 z@mlIxfvVZ+ARVTcLf{yru6)rW&uQzG}t>?LT2@Bb-1RQ#ft^8Gg zR?uNJ$Vzs$qbI&Pe{g$l^$2y~{)3$wdZKrTT7XSA`nrwfvGClC)bTFe8IJI*Gbvy( zj9|%v?>{3=Xuz@cmyHll(;CJxssIYpHCy3{6Ee=+W*o3dU}hrTc=|VAK6=}; z>dk|}+pJK8b+d6|ewCyZRGx6&V})p5^VYV&4iQ@LFOglJ|Mv!L*t2)$A@V8DBH94< zvEcINQ@r5zR!EWXC%Ou~{_ll${-A8SZ{?MeKU|4Mgh44|uD26P=6bJr+rT3Vip{KNtE#n&i(IIo zRKajqMF1M^|3hLXrdUj0HogBp>HCLL&lI0NHC<@?PtSR;bB5pM_DF#WHkR;F16BD1 zK1oLAg*berV1@h;&QlNDVbBAYKTbH;g=Y}g49c0&>08p5P|p>$mxdX2-cpIlt1HXy2umC0l7S%U zqGjiWt#pB_kIhBnZgT%hwE0SWpgF`#_#3BzJQgkkKu&e)WR}~h-r<*XMChK~(lX$V z?T?S%9A)R?1-9XEjy$2`V13b#{q+Af=|p|Lo`4`bAH#5O*oZA>H>^$Mytk z(pqZ>SD^d2{4s@}>TFj>pr+xycwN+7j{9xI^ji#o7b8W7*hGo})PrN|vURyxv1(Do z{GIZ~3o!-7<}|yN%6^_FN{XPN`zbKs{Dg2UZsXm3h*(w{#is+{;)l`HC>Yf5tQlT z4t_PiJxQ6J8}n)O%bKW}M~n2sVH7<4?d>ZlKc!!o-@^>kuReyN4P%wK^I3~C5qy~T z5Ys!HYxztyQ*7XsU1Ok(<8mThZT@sM4)~qNbKyXN;kv90q{exj-n45x7g7L^Nb<;e z{k8(GVDUt*k|Q5U=tEX)pF6K(7gDk^1ieZ@>+38aH z8KwL^zuJY)oHUtwu59C*kUG$^fxe3GWWyghaf$e&098D!9)raKgVS4*tgP7D>ER%c zWn=;G6x(f`gEN2I$xnONux9z1=62+R$~@Qy_6{GlI9_-+OC5XpzCTgrZP}wdLg#q? zMDz>k?C*S(Ql_}A{6n5YW#dM4PqibZxiESzyoTCEckXE6N>)PB`_v~PIPSWg^1bzy z@){9*BY&+(qnHT|GBOE$+O}LVBSRiL57AWl_SFN;N;md*|27{3k-)yQZm?ImdgjjP zO-wFu8L|?ymVn@)P&020q(q=YZd-QzSt;w38}Rq9GoUvmn=`u^yl$$DTw#0ljcYtg zrT08%XZ1J~W%C;ec_XW+FehC+R4z|l)6OAscM%^p4A5NfzH4N@$1;q14V3Z>=nq>g zc7%34c}ZOR^`=F}^IBhQt-kKh)nza=xFR1#-@0*VOzH>Xih080-IJ@pG>d~B7%(S6 zM6F$IkgBoiiVfAlJtCZ}69Ez>X7wUdMHqnjtKdY@NdWzEs>MrAry-)1aE*Xj5Uncfvy6{^2_!WSgO7o)mdrEI>c3tW)$CF{9zicbojEk*G?K6D z-7NjFxk9zN%LtF(rw*?n$8+y^S58`f{fG^H?E4<%KJtzAhikJngxdNpd@J>54}nCLPx$7T~m?7 z&o6Q_zb)P%0oppKQ^0HRP~zRc7Kk2i#onPIJcdojapP@G(V1@uYrVjdiy6HJYrBYg z;%x%ujlLPA2!ov|X`tpm7BYj^6?SB8C80@ShB!(-*YgFw#7H~f3NM$FO| z{qpTW!SQP0*7U+7fQ1wX{VJLYw}6lO;<#bg{qo{A)$2rT-vLh|j#Z@8}N(x4a z)z9xr6JLJz{c^C+ckGuT^p+a)9v(+qd0;DeUX<{?NUQksvl<>X@x5bF_h+We2y}hI z(+u$x2w}6oe}1QWV>I6?k%P;yB7HPfkV?wv>XlxtqAOou3Iba6a^yZ9Xc$fYZSv|i z8%bP!4A@xiFh0l#0T)Nc>+uq`$qEzJ8iHE#XFR&yWWQ}r>&kgXv4FCSom$%cdSL+9Jq9x?E#9VYh z(_H+bR0c<(FjX4*4#X`GH2cE^qA&?HYHanJ&i}n?y1ekX?zhCD74dSgbo$#T<&Qs) z=F|WVqWf>wtwg7Z_BzIU>}b}{6!3cb-&?4@!2)dpF*H(J0y5L7LzIf@gU`uz7#& zE-EB6((K3lQwv^7PaPM3_wqld)7-kU=06iT|t@1m}7Vo#eo^eFAyi z(>7Dc#68lk;Hw0!)}LjhaY2@h^C(T3XW^H1`Et$Nzg-X(-?b6SDbEGv9PLf7?d))# z>@$~%ildrFB*Y0$Uhn9t!&wu%Q@58-(<_?R4mB9!biE){P2qJ%NGzP*^h7OEJTAC| zNczz1Q=v%rLWPiiad&qEgO`-?d;F;@4Ib_gEJ`w{hj}- z&6FSLUog?Asl@77Lu#3W$yFAl43tCs@vJ{9&8(aB>l0Wr5HozG ztvmJ`>vQOyHVfO~G8Z9Z+;!aF55$SR=p*Ag%d~$fN^3!3B-+)(_EF4x0RdY;jqU7} z$Kb@l(8}%rInRO!a=W#2lqC!e^nLn%938Ixy!L+M?BDWIX#92Oj~1EkH`ueRlX)Jm z>szP`vfq{XNI>`S{+-eH!ie3ibP?C90T?7O@YD?4m&t<&X$JqUW?Ja7wY|(!;)7B= z->czYtLzhakB2+VT~zI1_D?4 zAyuyH{rkWE*qF$ryYdlB#}z0{RDF9e!8Oda0IPi0$+tH@)1gd@hIes<6edTA!MIaf z0V5Tx>+9?50@lL;*xSvS@wzEDsrUl?si<(60hqzuMNV_tbQ1^@9Up?}SHM0oBjL=p zjy>{N0id&QL@%tyiVrY8V5nNnz0eL{)MP@#DF2~v5qnpS)!8H8os*fN*)qdxkZEBL zE`ED?=+@Xuz^{vXFN$2@H%AU=(LJO_|IsSG4WH?0`u$jt3Isd}CR}h9qXL&4oF6-P z4jA(=Fy5uXhug2)I&eKtds%745`0J2Ur&Bn08SfS^PC#scw(hv5>s$nd@j!n!QT#< zN+QsEx_XCakeq`qproYab=|mixlzru`(kMKRHFwa&SO2B5)Gv|@A)gB)nk!3zXJy| zY<72OM`3gp6v|wd=3)MM>3@(4Rn6D(Mgw;8F!jA63P| zpad~%Z~Z6#}@L+%B+53Z(45wE|0ogx1i!RWVc&5S93_fXx~RTMcptPdZifh5`Q zy_d()Crf(fmDSvUGlp7Bco4=e-{O0YE$x`2UhhoRrS<_wdUqbMNsAr0z@ZFI@B`LI zcAen$xZL5ql>6RSg=oYD%x^;1IuFuJ6xbEiq~6f_fb;(NZ`F{3s;W6SZX#~Gmw%ec zx$q7Yz1+>lyG!xvI(*6UCwfPQWq8?45tpzc!5WU8DrBbwE%{@}KMr;2+=J|N>}3~X z(~pv!7MYjP>qBa*+YQ0PC5rr#r^k1%1l_FTti>g=U-?cYd5Ly{DC7(%@AE82@;!H3 zgYU4^%sF6#6vON6=<(-u*FU4HpI?-ULBVSL4=MV*_{FW0SV*h7md5W-B;<-Rau8ryx?ddSKUHl zRa(K6Q~XW()lAPApKhS`^gXTZ8N+$?s1y$%wsUUR>kt=bvMG2Jr^Ntk17Yt~o8QKsfJWr82J>XyN{D zjU>YaV5Cx3IjLO7mw#y;wRehu2hI}k7MPqI?Hz-3F_$pdQ!Aji^E!r8V7T?KzjH%A z&A>~60suzMICRskLAVp0QsgLg*Ksf(frCnoHD+j{k|tm_-{QOGn}xK8lJsI1bNoeK zTzCJPeHp&0mJuv`ctfR}+3XfgVJ$1S@@6xSoz_YCQr==3*%DolM5C6ujMk#y$h`&U z#+R2#9+yVFRZBy~UORlu47Y8>(uCcj-0>JC&(&`b1WOqH?x78gl44{Yxs7w2^3Z}{ zGC*URE0$Kmkt}Vt>*5-A42kXfTxLfHGyIu@VdVY(j>eXjYnm1ii`Oa{9Rki!y{8{f z88Csr($eCRJ}on~SHSqC^>^Q_X;sZMI#2!C%S9wI!5$a@%y9;d9?zHO|-qb9RKca_@LX{Ef#>&7)`8z~evi(U4?4pi;ao~=|DjaD%bLr^p ztZ+ZLxAHy3AK09bY;}hPbVDc@+^p3v{j!}PqNE&x8tHgH{1O5M-^>;X-Vt?JynY41 z)HT1~g@bT!w|Pj?q9cSj%sBJ;QzV|-#oCH7RyBJa(Sde4OycRqWARiw8Rm8l9FAt6 zSo(C5KW8({_FbUmoLu;+g)oQ4cd%Vn1K z|GF&%GAU#yJP8lEh_F9N)MD)BXy6LmH&DN6=yX6l;HAlAqcopS1Ve0I0ba9?{FkvG zDcskvdw1m;Ly)KYyv$Pe^N)D!CZbT?YQrag25V=jYar|o^vFuH?+!nM5|u{Sc7DbC zyf)j*1B3Cd?;lI=m3#u`?PIjhWe%v&Fk?aJ^ej=sOcE9V{`#Y}$I%;QkUchz{x@w` zH)zO&5m`B{9_DJ7X9(E+^aCV^-Uqm|WtR?qd7|OI4j5y|BSywbt4xd z#w!0yl4^ZC^U`afy7mc&ilF8L(+4*a`rdafsxEme7Lo7ksks;Z-2Z^p6z z%A%e_5`%$KX#4i}WNpC}ED+7~Po-CN)GgMUwY<;j$meU~aoHqz2zB*Utk66qP-XgI zJOrzbKY`KLm%!Z*G>uTUKP`)MH(JAFEK`MwzL2jgt!L+0#`+aqRgWx5`USmf3s5%ybT?fe zrm*s}Ym_qM;u6rOh+rl62g)DByu3?}g+`AI;V_E8RqN1XVb`d<4&H4xo@9I#r})%V zJREPmbCZs};E|C9$?Df$S~ew|69@d-4%XLs$)1TjjhHWUPbFO4)7fSaq>(n=`KV5I zM@D8vgS#KEV1xUXZ{G^v%#t^OflSi^(Wg32Jj}qWCAeSkxsyug#tk!43k6DiK_`~) zD*BI4#7_(Gx;FsshtGH&v?QL-L#4fYTcgDCkQdDPeA}F(WHZzl3uoeRby$;ddne^gDx;kH*DNqWzU3o7w5=;>n2n-DX?GM>6|zA6-6UcZB@=VDvya$57;Lt`LKgb)#D@ zTKy9O6N~0~Fd&R`9d5L?(z>4lw`x{a>Nmy4Ds*w)aL4@Ix+ z{eTq_qT~aIHq1`k`XA#!ucxDWwyeDrL@S;Mfyi%fc?;C$n&6uQ_d~Fh=S8-sLwU2f zpVH5hXB~V@3CxKRMR|z!xIiK~-K!RjclvcHO<^Xr3-Ej=d)*9xB8L#@Ms-hw=RheYe*_liaQG;2qvt*^Rf72%jquwn znAs`dLco#eW2!0a(EepUQv{%Dl%ZP!p-ULk0Y`5ybCRm@**`ix$6l7n!1-^!*MI01v8YrmO|;VazVo{(EsUJwI^`BUuZz`$9K?nGb%wTk05|gq>d@VN ztNC(yCGG6mW0iyNB~PI~$FR{LEb1}g8uIV3^2~6v&u7N&`lNsEjhM2wdFs~+gpr%Lt3e9dpOX|e6Yi)rpmYzFE;^zjr3}3vXzj(6OiDH{KElooyIKE z)1x`m5q9l<-#>)7B;XKQV95VdKp0xFkgB0tNY@>5CqvRJ40JpTuGaaLPYoub^GaBc z>)t2i7BDc+6RMXRP}7$es3X71yqEp-KgGqZjd2Z8(G(7?xV;m!EVa+TFPG;kg?*>h z)>B9q{KJtyBiUy7;;F$#?Lm9Xn^QRKCz}UDmUB%K1(m7};Q9D7S{lxI@5TUXx#>)9 z4>CZn#_0kfH<*CVFY}@Odw34#!y*BN=m?xi41bbXV8i)fIzu9%Ts;;v1w?qSoS}oePXEEI^7@br(pyU7s4j$@xsb~4SkVo(LekX}1#|R|{8*V7H zPSuNFaR2&h=|8cGS_o!XnRT=93jnhRU~+}mG;98v-C>!${y(F0wAUoHyJqePH{rz( z^P&AS-T%7725H7#p6rc(MXE=nkkQ0B?Y|H;kN5HDy7}&KQ}K{`OvG!&<`WztGAVfz z@|CMy#f78ugylH~(mR(Ky#79ri>!}^X9Z)AlcjEkqZvjPo4}>(7oWmsCm+20W&;V!1fV6FFJRDqR%yx(1()tO8!7;QWB~}CR?0PEoU(caYD-MdC0| zPvi&Tcuk-AvY0}u>>?EAZhTvHmaRCCoi7H@(J_BsJ~SpS7Jr047%#8Y`rpzM>tpZ; z?r#m}{av4>NgJ@;R^CT=iEdVtGYz_~A>c)dVrc+$Rw!(-5QhSYWc5_f0%YseYeFq4`$-6uxEMR6*;gEB|(+06#w4cj4s`$u$C2P0VCb{Ak8W;O#gW;{woo zpwRtX2 zQ8K7iCy>FQXA+FXP%4?Iu;FvWz_`11DNgyrLm$cBL{3Z~GQe68TVoT5g-%N83`PyA--5p|=*xfQTT@P}>iIUhnnf=J{qVprY60eSo9S<} zh$f7wV@*BCGV2Y#cJ~eCeANtb%eS=g#Bv;cT|fVB0qjjD&)w;O&rh4~?SM&zh?ZQE zk>b)*>`lt}gdA5G^SkM-Za{|_lUd#`Mgy=8>R2oWJ7**kkAm&~l}y_3C3 zHp$8s3E5lr3>nw&yza;E`|the5`r(zZtLm;KBq6BWaSZ@K>+80+UFFAk}M55L0dRT z`L3HkQ#_D+FZz<;Uwok3zqquUZG?YO0Zbn|4F40FUyz2ChBlwB@r?V6r#F>`mDXdb zqV{2~rI9vD6LBz`zrKpUs~@YLaew*rTrA%A{Ue4q@emq(ff{?dmJ|ZdOM|^$yt9#uzxISvsJ>`fyd#y? z^WBm1byQ>&Ks>j1y>Z%U+R}u`%4pr2esC14N|_PoA6*{*asBgKxLSM0li;_#h(nY8 z9hq_?d#ZT{mpd52^3F!f)AsJUVSTHDQ z;keE+2pDU9HVqs|ns~=gApJeNJ)o@CMNiZa1fo~q>gz^Y)=WEfby7NaE}jc^Fa)D^BtzI56yv}ua)?VG6&Ek4z0rsA0YF}}p)CiX& z;7DQ21L#M#YRbhK5v1fnhs06}9p1v8i}Q+L5V0fy)Gfn1zj>zqyJR;S^>puvm~?EN z0K8_v!S6{+>7WI>$gj|^!7>Xy>qq*jb!t_&ZExKjFc2XmN(;js;^O6EjQ@beInX6; z(@-WveSgs7F@3Y}o+(ce+4WA{T)yt!oOapspc>2Bpf#`_88vHrD;zEbRsT58J?o)= z8*#Bq1ECBfR45}{w3QG%R+Bq_vu`vGTPBWjHWRkL>9wdM+Q;;taNXTyePwD|HN6vD zR9KwN7O3w*PeT(vXz^&`288K55UX+nsB_!6gnIXXcbZD<$6sjJ#1Or zmxiW49;eHp|D5W8R#&Iw8mD=>EQX}d?|2t=Zd(i{>xnSl0hSV}O{Z2&H)(?it5#%{ zUixrcTpZZ)1vJttIn1btYFze%a~&pcUDGyKPF048u%BPSM;KSMmHMFH_R^4w)>)Y9 zT2u0yg>%Rpq5{gOXHk4c^@`gtQ-)J~qsPS{xv1ZadtL zTch1O@BmlAo{&{O`xohzuxGTkvN~JO4x6ChSYuI4zD?QUmtE(EPg8%QOyZf!9GS!M zC=w|Ii4IY`%iUCQcCTO7celSm4zc|epif^v)d z#1E(GdUHpt*AC2%-Z^^Xdr-F!RMp1U(nf2Oo2&i!X+$@1(IkV7;FgFHx2xjv zk{Q`QAvj`!d;+p6e#A@cu!p+yDy zX3Yo^9Lgdo_CD{)g`j_4I*>2 zU<=)zeX1Hngdld*Cd4Y4jQ<9`~34x;<){6OGS^oed(8@vVK{i;Fi44FzsL~?ceVOSy2UPq)M6bcJO zzkC`-OEtbTzWDzBm;3kv-{l>06>Qy1$p)_&21ko^q>>Sic}@P|!@~w{27FUv6P1{- z_xXeSZU1{?bDLRba*j2WYU|{@2(s(*)2{l%qQgF>m@KX`d^gytW;%;*YK&LWV4ciR zUyn1vy*X~B^48bDT`I=Aow4Vk+SrI2T~mj{>gxHq%Ln6!ndk=(HzcycDR3%$BzZHB zj@~`+OS?E>78Xv}x&MVCOryj&i&1`e#m-$MB#M@2^sGzjy7>&Q8UoN#12;%Z1)l23!Ugv92MN}bE3z{!%6jqjSk1o6<>CJg~22Lo}+@uI&U$JdW zX+|7K(Q7CLYrm|SCY`gRb!`$xnfp~#gn>2V)HM4(8Z8bB_HhjVv48_42RH|2mRYOX z&&Z*tj1nyE3*tR{C8|M|G@XB8qu=;F6Mp5mz z73;Lin-aI1ZUjmW*QEonUNkkqGHHEa`L=VjNyyOUQlz zG3Rzc?)T*oHL((%3Pn(cfY2U5EFlk?nL;s5!M9iIX5U!7)!dRfKFd&NhY__a zV|fjhy8}r{jsF|VU|9%41_1!rujSV@R`UYEO$B59AKzIwS^XuZbmWpkIM8_vIo>mh zLwm#Z+Y8UVtJ8D$USbUYKw@mLk7JqCjg@T))5qsS76Eu0ygvZ{8Y1 zz^DHPf#d_^wJ6t=%8`$V{oG>^^4~{ASwi^%0EtkXWN0C?PDjp&yA-7xrl86mGn(8P zSvEHCHe~!T>3#g*ouMSid3*8q2M821jLKc#u6_4fkkXK>8(r+<`mq|+f`kT#M?k7X z^_?aIjEAXI!RY(J3myvijYLqy(>VQ_WFfEWq7bsB`$pq2ARq!Rn#`2oXf4fA0FHy` zkoR}s`1q0gXbhCxSFcPXyL#Yff_oeT+F)f>df%Q#zFPPy8;nE?j#A#gOuDE44V=S| za)a^DH;dW+ z+HJkqjlR(|szX7II4Rb_YuMsJVcOS`7lwZd_y)f~Y(*BZ+R&uD3@Z)Gv-d>pc7^Zn zdBv6LHzmHS!v_Nx*W4482g<^qtLr0FQGfqL`3vj$FLY>TIbME1*4+oj&C>?0>L0He zM%?D0ZECmWbS^qclO^lElDQ^z$4qvzv@pd71~Dr_5FS4oxB2EjbYkq$P%G;Np&C|2 z!A^%-1pAP85cD~FJ;q+_*JE7f?FLIm1yC9vGoW#vgT_h8?}I^K!K-l%5tUWAcP@b% zY3Qc&$Va-3*f7y`>sKVKo}Y5(H4<@4!c=L`(~1I5!73Bi2(gU6Es~}m%RR8gd$FxA zVK{|WF#!clrXZw22RnLGM{x9OXpjS0wI2!*iO`8j;?CgxlJk8y%GZ{Fg^nguH|tH4 zR%e&vdnJS-J;OZ0`8PYF!kSvw$cfbKxMz3f>l zwG&3Tx8kq;Pv?V{0=D$elTeu|SmU3;rq^@f9bk5U9?mGmaJ`dPlQK978xMYbFWj3m zaMX_S(h-ma3S*xa`o}gJI4CPP9^5sE^*?}3=jZ&^p`LzVVlk$PewZ!X%4 zw1+YHW?NQ3PyIhzA{8O%Q7OpJKLnZ61rUv}CM3M8@qKVQ@Fmdck6W(2jeHJ*Kv2~- zdNf*UEqIA0Bhp0o=aj>bBpY*Ho!pJ#gK1y>HFd|q#~nZftVWv<~bLz`f25Q;~SRifwmBKd?5_m ztCbbA?c11*6Q?29*LUX&O_*UP9>(=cp}~1o4&SkQWElk>lDypS&@Oy}<_G908jU1A z-n^AIBaT!5#N-08vayvuGpwr5CAiZ5uzmRK7bNC2* zcNAXP5OHf%EW;^fqEYI1qqB;0Qx=VIF( z{?Yry23{p-6c~h$ht+taaMEY%4%7hzCmgCLKnXjKm;^kYLjSg~6WF@CyB8utb9}mR z?&O#i#lmMU+uFg|dB%nsyJEspn*pT1*wIksXEHdP;GWZLHhH2lIyGPRKj0X4uuk>6}6Yvctu2HdeXMQbS{)EL?4LKRXVCPeUEVXWr(0{{yC~pSB_E zinA$&dHq_USRoZfM~C0;&2rrWY>E!Gh}Fa1*gQg}T&3kCUgJ!t%c=6}Cr{*l0fTj~ z;cNpgL=0|#Y6iiHERLTCN!$jBP*GXwHvJ6hz*T}&$RM>+jtXLsu&WC|DYv0duNJG< zd4jG|(enhbsRUN)e`o`8^+5XhZ7iBMr`O%hUPsnb#jN8cI)daHRXh|BbeNPMmSX;B zTNCqyDRW!UQZ7Za*AMLmAh$nsHH=gBD2DG@npOMTSh4NDVgIP(jk`W6~* z!D^9$ojw*f=S|vp2}V9ylfd(aUf@hPZ8wrwz`8Ynu2{;)v!^Kk?>)94 z75uW#d9q?CNm%%7S(d-1eYkUOJ^-advqfWJR(JFAYQQs8 zY!6s^^D<}FZSy^E_S!=FSF2|%gXr2i66lhp-%6zBA9VC3d~VsU9trqaOmvu4^c1X~ z_f(U(SARRqz7KcdcioVBoXqzw&0(_#dSC*<*0DWRbMA2NK`ZTw!Rdx&ZVhjq;?}b= z`O}t{ho6+2_b+iR2o?N!0%9g(X)6EE3vlkfGhJi9^<|`n@qheGN{M6oK>THePngFG z=gjBZW5db!J?Ozepg7+J_UVVH4I?CGq0EH@_Nt%PGnY>$jA|Yym2Uwb$JTH*57(g1 zu4}E53OQdqE+epzXb^8i9g3#}m!CUc=c`Fx;Fi4i;(g;!uf=nUjk%ip@SwhJAEw4{ zoWnt>^4;8(@~@2m1)}kUO6LJRk>L!g>~st|9Ee(+mg&up-=6*`vE&6Um%cAmS@q}8 zz`tag%?dc=})iOO%U z4~W^{wXkpWf2v;)ap2ha=a!N8IT@@O2Xmu7$P*+VQI6b*pb`BLA77l0<>!6bxB~G@ zIV_r|`HxLtvg;NmUk_vM$p|3yPbhgd3R@@n+d_sjKWZKk+qUUNI1~bH9l)}}fv0hUsiuHih!gx7i^#bPnJ@OULq&oGfaZRT z@Ri`vrGQ3-oLbf7Dg}1F2z!DJR{B2zOEA6*9sP@*RstzUzlU-QZH1# z%1)9?Rqy|PU8j>9kdGbuyN=UWIYz!r|qflWo8D(r}wxlk4B;D;snrFgd) z?n`~Q>8xfOg~6R?l^Q&;wfcRLdEnoQDRX*C>htiq7VoD;&aA}5OXBXAVoe@97Twc= zgd*(_Ud;nwG9+b_2MxaH_3>|t7HU9F47QoP#zWVKv^@;Pj)K(7tYm$<3;AjPG+Vrd z0blUu@AhJXt`VC^cizfpe^v}=u%L0Qk5T1qB5jlT3XO4KjIXK+brz$ASY5J09sSFb z;_=JlBaOrZw-*KAO*aISCqU$?;Al5O?6}R)*R7thwioKdd{juU(!e0)9SAw+qZ9v| zcOabdamzv<{v_eGaTHu_I$Em$Ie3+T#edKEnhw5hg8j=Zu6uJCu--X@#icU`bB6!| z9K~x;OR~PwOVW&N59z*4yFHYHE8VuG(W7^-aMcsmU`LkmblpGzQBOvxMh}8S zb4q-@&9%piUC0`bIZZ86TL#}^OA8MR5^*jp^jI@&VpfjVXk^OXXquYZe0RjLbfZ7T zlJ;i5|M=<_zh79GpC1~Y*^}OHcPqsQw*$-fqB8#!WzH&)HycXL?~1=q&#)|RY-}vq z#BWRTp?Mp~(nr+wK8D%z_5Mo?YL#aJuWAzjLjgGN_8UOxXOUM{bxw;{{Mcr_*fr^z zdUG)zbJSuCH9mOEx`~RI-=MNVX1=*aNW;(r)2ZHPE5;9nRxa21d3pM`i6{hoH!pQR z6jAgTcU~FPzedwF(oZNeX!{7G@UuE6qTju=0LE^4n;>J3bo!z#$5@ zDfj)OAzVqC)E_#%W3nGW@KH4bz@A57-#I$$OYoeb_dUI&A2Lsjh3G63;ipge$w!@A zN6Tu%SkGuh|IOx@V*WOq56&W>1A06AYw?VHmALrmxHx-nE%}gz5#jSIG#_#CzIJ8m z5F*aVb&2oiKGFgLquiIUoI!p3#N1b~pufERk8>)HrdAT{o+zv47?WDCqTD#trQ*35 z^RmrL$iYoEh>GKx$cHRCOT@@Ul4{CZ$%m+$6t#cc5M%Pspg* zMCcu6soA9;uIt_d=ayKE8*JWVYNAf_UoSv9$$9)oD{Q@Q135VBkn?QtdHQ{i59dc~ zH%}=PFB*0YPZH!U6cx`12@SWPy+kE*x}%VYd2njsU~TdBWZ{bvkvHypIWX&D386R6 zCohkBJMGRG`bZ<%CwLhJnMriW3V@YAkSRrzHQZym+>^4Ll(Y_;6iwKZ!#Y;c&@kPJ zBPljw;9lIYk}3hsqnMS|RYtd2%C?uY*k2`uf+IvA0Sd<)fmAjsYrZW)0|r2*Die8U z7nc+fw_@|lnf8OHF6m(+2pkC?(zn<2GVU`1NV#qUKX}2B*GES(3*3#RjMDLYM8(`6 z4+C57Xcdti)-5n@lVP)t;#x~}5)MV;S+?B5y!#ogCit%s$rF{WHPOF)=y#)xu^5^m z3OpFX;HwmJThS+feves%Ez#HGc)2luV$r$L-^s#D68Y`ucn^UoXLWWdO)IO>3;R9K ztuj#}x#d(_o1?Do$oh01D99Lb3+3uoc5vqbC!XN)CkC$-BFNJz12!q3T;DXt8$$bk zCh8b89%p{C3MHFEjzH|Mo`_4g74FXoPV0?7?5KbI_%ZW#VitAd^pkm6)mIK3Y158P zbU9R_doTv)=nfnC9PNl=!SRU=`EO0l86qsTO0z%2?%OjaUl_2?P$9Cbf5+_%Bfb4D zBos8mD2)aT(h>W3j4=|f|8&_=SATd@Vyw1Kh-oO`K}K!NCW2~H#be7S^sOI-}4Qom^W_C+)#o<@+;0ed24+MJWo?+(hYyT{d~JX zFcpQm;Hs#PEgisHS|YKUDJ0_Jz7lPO#Ht=RDu{b2vJmvgw1oVakv_RWxa3Pm7f?`2 z$WtI#%EOdxcwQ~}ZzGrXzIy~PfHMUV>9SVeR^)d!8`a?sfO_=8>BdZmwg0<#B0{aX zYr{-v_w~an&x2PGZhj{l5OK|Exu}(=da#izFAfM|nX6-FsPXI051hpjUYDcBTx1O= zXJ*BtKfa@0z~xgf6i(hJ=M}?CIHmNgMNE2o{MQW$GO&UC6S4P%3&jt&42nb<$H0$; zcZD{sDRY zn12kCKNA344(!l*$o}VqyLYM2&H; z-yB+jiwC~y+cK~HOdmx(XTtos73KV}3Z@v?RZ)-F9T37(_A~z*q?gdFO7%XVe>l+pEKhpk?L|)>u6np|&fT-4&$5g)OClnAk z^XphUGZ>JmdCQWk7?qFmHAAUPyKVI8bXCJm$= zFvie7#$|B#wqnGM)jl3mKw}pSeNy}<^;WIiNAO3SjE~?RO~Cr7n-fid&(`p}Cruwwx(TDw#)h8$uWg__KS{u@^8 z5QNCpKOz^h{gvJsj4w-l@JGt~I2+nMAEyfasLs)>J(_4mO1Jt&ZB2j9d9|vtcE-xW z!Uw{qvy_98QG-^D?hw1W$J{|(DJexeR}?0yMzu&Q1tctiSDjFNRE-({ziH)@c8B{I z1^dZ-1-l-s)?wCXr1j3@;G|kYM>F#NV{RvMA4QZW;_+;~E_wG-i_DEL)AYrJe(U8; zyVOX2({4hnjlwW5%?})qQ*Pv$YVxS)z3}mcC5d#_>BnwfSLC6-d)Rl1)$2d+{_8jP zM1$E3pglNBKJaA>>S`+afPm{XTsuTu7Eoi3V_SyVkEJdw#`o?L*c)i!&53XcTJDb2 zt}x1|_+n?WUGHR45;NLr5^?SaWcX@$N}y0%pC?1!jDtuE5BqMeie~V>H6=@$SLZzV zc-(eTHmj_rW?V{)@yv>#AaL(|(`D-`lxvaKDQi?-{RpNuA66^H6 z@MZBQ&eZdyfI3o!`oO;=Bs4KTJSp-r$N}iDP(ihEw)%*J8MC4AdZNNOt7d%&L~NNF zn{gWLhL1R05T;1BG~Gu-_;JP+JZXGP^{m1dJkZzT-wN$k40Ot#k#VxHYh*Lx!bHIdYs*}ikc%n5vmfy4oRV5F-E}0WXh`~{0-JEY4#6f5*U4>lM269Y8 zK3pCzYd~21IhD-v7MwP5naQ1SAq63Vq@uYx7Ke6E@ z9oO6pnKT~yguJSF>S^N5E}U~_^y#qf{Ddj~2H_XIkd@DJJZy*e=Shu>2(yyb7ZXuA zQDoEZ3W)0{wmD#k9*neHlqt>X-Pvn$%r;SbvmyJem>uPuVd>un@HN=`*#zOlS`p-Y)h^UXCQjR38(V@1D;0n>&xKt z{aWZ9Pb_;5|238ONn<1+zM=1dQgQ>$wP+05YtYe*#WYp107+#4oS*^NUI0}?QtJW8 zfO1x{`pWG^PumU*_M>tR&Ftrw4_;5qdff02q4#|cy}hVJnuUxYO_3^G>4+rtHc{>I z^{-6N4+UNCGTa{@3?n+tw^o?ZMZxseh}4fycf_t+MS|A@1Qw7bl! z{8_>yz|e>gY04P4{Z+YDkOkCm9PtKmS@V?ndnPW`ccedd=ixmsh9(n52Z!6xoRMok zEn-4%Qj}Eb`bPkk(bi*MxalNj-m9N2>Rj+LnbId*r@fPtt2XP^8Wj|Th!fdt?C-kgKRbNjn@J>>oMY2(>_Y@c8kF9g!~8_lUT=f= z>iA{P{mq%xtOVS=`rssjYS1+NJ-LUgk}p(X9i{y06?5y%F)Il}R-xD*7RcbQ{>YJs zTdBxiOI_aN4Hm)5@jVykttwBKeSZTl$Ei0~5)@UQp>c zkA^B5(a?~eU+|HJa` z5oAoDc%TjGtN4M|P4jB_`+ym8?rr-0q{q)~?kd$=e*KT0Ms%nd@%GqH5fWu~xYAqdZBGUdn}-uO0LT|``m0q# zPR^%w%Mpqu;UU8;C>Rc?D!&JJUQ)^EMubmJ*1y)v#b}ZW3k<|iMb0{iUi{r%-kx%S zLK7x8Vc*p)>V!v9H%S~j zoCpK3nQ`ov7P&1|kK{bOlnsvLh*ix*ws`+tNfEC(@V-LDvUiLfkp?md1RXE#LPA3C zN7AsFusUuq#Xl`gwXxnHS5?qe!LE#s z{eF~Ajd18aLy}0FRW)xGZz>hlKsFHEB)tQDD;XYhiYm(5+CtJ3VRFDYtT=K*eQ+M z=q@?E*>LYK1N882!#cg~lTdBLR{|`(6e{jSKO7;v-fXbef;Rp|7*o|KAlwMA~j#L!uKssB zi=5DH`9MEHCA5Jm9wz*{PsEIcfeu`YsnOPZFKga1E?2N&Q>0+BNHA0wTRu;pb*MW) zCT4$pDVu?1M8afjmGHoMcSfK5p5A-48mbA-vXgpw9~&E-BZ`5Jd~C27l71n6bewf* z&DM9-q12b;4|v1OwEGk}*5qV7|7A^t!yorFD2z-N2lh4Ab05Vsnq2c$*UN*SKO^cv zYVBq9P49>pAxz1j7Q*Btb-Yx+1>OYR8t*}yTXP-^Ov$RKSrI<+9isC-YRLQlfR&*7 zNl4rC-`}17NIHq^t;3~%yoVth*ch-zQsNACYVbVlWp8LyRGUK}xWUyeqHE}NQ;XVZ zJSUaU%gd|O_w-Ze*a~vO(Tj8reFFb)Qf4M|4_T zKUBsH!FWt1c_vFO^4P2^ED5cb?&^YM{~|zLwihZchC%MvXW_#BYU zYzGF)Vvya9w_GWVS!tX--k76H8Z&?P8(;q`;U?qr6G^P__wWC#;sr^*U|}dW^4VWN zLruvio8!5Vtbm&YZqw`#?2&-P+Flu;B&}`x@MY}{VyE|HNm+HBQAR!&JyBWN@;x7Z zR_JCytBionF-IU_1kL?m=hc3)24sQym12*225`mbfoaJNWuvk1UP? z&Q|ROw@cMcTM=wd;E1-5nyV7Z@j0UD z*zYYz5yBEA4ra?p7t3K3o(#_I!l&!hsK6f#zk>(;0|T)BaLAFp`X^NX1q}%5-?mO& z_P)Mz-5Ko?xK_6ms%ZTSvW+@h1j8_I8~v9V;ovvn5_8iQ!4n*NX6ko$%ikQpw zVwheOZ224>6aW+L9e$R>!kC8WKHQdS+_s(x4R#CH+A`;{Ozf~pNJ!M1@RioZ5Uqee zf|0+92$B=bQc?jW#V@*kT8~Wv`4x&b`KRk_sZs4qa7e83HY))E#LCnDhVS`#(n~rF zDxO_~CrEj;QLS6tl;4!X;(it1g++)}Dv1Bh4Tklz;)?A}cMyZZ6Mm$gEnXV}rF#`$ zG|+aEIP||w5^lxf(@6J=`a&}J;e?y3)PocO#jfiCaXaqi)ZgjkBDgb{eMqBH@9z>-h6jlQ;c@ciVvPHdB zD`x9Jr@48HfZp!Vn-7iFgtl0&c?53{JJBX*k*CbCA-1&iF}&sP`^ikb^l8X%EYHKV z5EK#dnA&LCk8)f<_a(B*)0~t}$A$T-kEKz@b^3oXbaWcnVemZv`ms>jW1}l^p`1Br(2}vMJYKqi}apYr&;qgof6x|({7$&|KPHE9h&$6ypwizLwZ#@c!iuyJ`Bdd@4*p@16=C@7`mkueW&bkyoe#`}3s;ldi;oU@14=~}{ zVeu~)-ysXd-9-TKgp0IYYK~p4=O1FI=L@u$;D{j{js?=|=a*oAbG?yzRbz%t343yaJHvfLbwgStTbIintl}4g7AuF$?&}FE_8U~ z!u(?)eZ8KAxW$Qeb_Hj}qqQ?S87JDh>BxK(!vmpt^hl91o7esN1z>c5T0{%ng^?V2 zFXR=$HP!IEaUWM#ts4E)5$4a9&v3U>-+1Ek#-AF?N+P5jTaAOx>xc)bS2}D9i=4Rf z6q2Pj6m?yHGuzk)a_gPzRDO$ng%}EnwG$W<>^<^)Ek+BN5_Y}L@cnP>thbvonk5O) z-?0gyd;ga+1rL-6YH=J+H`c12`tPNsc5nk90^+7Tyf`UsFQp-Z@d`{NaHX6b`SWEe z`LL7pA22;4saXX)M_#hk@=wGcVx3MOXIr4rUbQyct6;R!i-&F>hUD4et)DRXaL+*qxU;ok~_xFw`i6 zZbs2cKQ2k+M!^s~2bpfJqnA_H`gZ^O| z8XA3U^ugC-m|nV{af$9V#wDbr?B&2RW)V167GV3RH&nmOS$dwoknZ$ELql~HY-DC| zd+4lx5~eQd}3*cGG{y)__OmD->G)x_&}l z_}yRI{r|iGTuwPh^LV|aVR)JMBpAGLTk6)NN)u#DYqn1&bPpic9(ZL$Z1bzM7ar07 z>)LiYcZRF(`w#BVf1hHagIy+_XWeVP&B-|MZn?$tY7k~Z+wNwHJ(3*DSMRd$h+aQ{ z!o8A6v%h;DOsoJ&4tjS3Gd43(cAe*izd%FZNm_{T@=LvfAz|3_@cVxAG_?5U{^^v< zAJdROxQB^dwC8w%@Zg%S*;vv1H1JF;?)2i4MZ_j>cpa5!Zy%}j_cYObj}CvXFy6hX zhP4ZnGzlREo9p;koy|$n{o4=QooRYD79J{UB460Ijp9 zOFu6wh)ITVXbfQ;`meRL+1&SgTjg8E@4bWkdoFMv`nnt8ezzj~s4j~{n>IW=(tN(P z<#>CFt5lEeCQnivplCDFXAE~+*6BfXaD4m-C(9hn}6=IMo=Q z8#y7-jYFT<*@6g~Rgfh*CBYVT2jlC;PL~_}EdDKT{vOV_3?G~=M~(e4&Y0Yv3P$fC z$Oc76X)tacY72-^P^nlredPaV03J%E^4->XX(UMpY%-Uj4S7&j=@U z&|)7+lJue(67FUw_)PoV#yds&C2WIfb=f{s&R{43 zF~}#ZtROZr7FicTUMlkbZaqmmBWpLF@6hzn(MjuEynb1jAmq z%Cx7r{oylgBWrx+qz5q1RkR-7#cBulCw$bp=q~5r*Rq(bd?bqn8%Se^f5b0TB-}PT zTTW->Gbh4(mh-hkV<81M{*yoH!C`~hf9bL>Tpf+*wNxbBH(uUDulD1-^NFgQYzhf3 zzNqbVTCg-e1eHWtLv=aLv8|RTee<#b?k1o0C=x_xXnx%|G;nO|I#>>7yA*U;%bwbS zZ%a&iY(&Rmwn3f2{+qY(MRaI9XM^TfWp(AXZTPa%)ZCN`%cbda2tZQ;k$uCz|SHz(c>MC4|M&o&O!oZHG*{IDK&Rzn)S!ZW=rN^F8B?o^{nF-x} z*H2FiJ_H}vvlG33<44OCh4A@l3& z*O<3*rtHAj`hzbSTQ)S1SiAHI5wNe}k%iNIJ!8nMlA|1#-^tEGID()1M2YDF4!@h( zuY$rrL&o&Ct-YV@2p3aaP>ZM^1V+Q-sSx6M;GQ)ej98?LJ0u;GYpzIaR?sj$bkHruT?|$Kg7GgGdALvm<$C z<69L^8?~)f5`^aBSXC47CK~?HNFxwZ8q#yIBMysYAvd^qnL4w7qAzHTpPboxmag_Q zbv*3Y3~A@H6`odI<4SPQ>CzItX%p-H3A6KY1SdMf>g(5gZ%<#ZBrywF&m_Z0_!<<# zVlbq{=BZRW&P&7YyuG#MVzSYm2Hy4#h|SuVjoMPD0kgiF#_^8CT^xAT<74OQPV#+Q zZlQK4DF6MI#9%5-P#f6OR$o4A{c#f(n5E)1D2^`l0qjynArR_-EPO zWowT~C-Dr)`yM=T>}szTKL$lm^7|KZVCs}SV-=lH%ewzh|C<54c8@__dFSV#XXDBa&Gx&F2H?AJDA)=!^aQ5NOp! z?&CVZ6RKRSHQ{`ax7LyT!<16WM(9sU3aD61PV1>x!~Ols4eObvqo3ISYj<&3UmSAF zC7;z4HN5$vt(pvfScj3jx?W)^0aLYXZ}I5d3m&8(fYZUc_LC*hbiDSY)+7Z(&X36X z9A-Sr+gkDU_4QqMXQ*;eLXzIXq8yI0#WQje*nX-e@>EeK2E}6CLlye*Emrk-`kp)E z%D@JR-g5!%sT?uJyw3&xe=^Hf~5wa*eLG*uTbmkVZORn>e;(q0MNH(H~s0qX^m47yasR~-*yWATK`Kuuvbn^b|yRwa49 zzh4}YzQ>zC!eJ8pKr1B&(#HGn@Q%GxK}0*cVAU0H5puk0EVifXqQF?Fm8*#FP``NV z5{&`odO<0E^g?F^bLPkWake^N$QNrDzf3!>4_#lOI*5~u5#SB_&BD?NdX?V4%Oi^> zIhEpPE#C?+2uqoBy04#;Ty;DX>A>LV82u8q&i*+#P*FDNCIsmT4DvhV$be-~08f*<(uWYYs4dR6%5UjN-DQ&JsPvYc}{N`@5=G~TWzra9zW>^v7L zocD9-?S(G6URuzt8K)QyN3d$>W$Td!ZNF8#6(z{}l9mf*o6gLxdcdCZzH#CwBNuxb z*y#U@1>w3HfZ1zuE{qqqQ)=LR##S5Kn5tp_@k7(+@6@`_ z^_1n>S&*}+t1GN-aMRnoj0m2xX;`%T2f^wubZszIvs})r+;%h`t{R|D1`j$keLp8* z{OU|Jtl_FH1RV*fq_mQrd|4^Gu%)+L>g*wEe^rVoSl`?0a50k38VS>G?!39rL7#-X zsFRUYN&oKiVV~}FP@ccJnMp>pfwoftN31QmTe4C~3SQRib%f|nX5%NX{xy^Z8A=Um zwM(Akzt}{zR)8<}RgJY+hB``Uh5Hnk@5v#y2^Js}Kjm<1vBUFiYj%)8YEfK)fcuPI zdAXM`K%l@;!qA3n2d%&6Z;SBar_St?X37F&IuEPXUpKC0mjv2m>1~=dC0kG2yrAzfA*d8x{ zjac_pdY*#2jtX-x{z8<@kRHyoXN&an&knsVE?K$uX(BMh+~joo;!_*M&L;KlCqC=# zGz-(2dMCx3zpTeXofV0eU1Jjyhrs3I@Qm@M?B|a543F8~k3qP9UYlYNI58W`pW|4x({^qd_mhkp3by?g+L_IOcYv+rcG({+ju&ae&D*&U+pQ>rJBX}`g3;*%dXP<-xPnOX@Y*YU60OBYmEk9eimHx@hKP7uB2 z4*wxg!y#LQooW0f0xoG_twlA1O^J(Rpf~m!e?Kca)fnumnnB{O@UC44ox#Zz`&0{< zCRh%JWtUaCJ!0P5Smr88=E&l@n% zt)0GNL!B~eaH@lXk*+Uj$rKnI9V=*g%@<~+e5eyYE+qTRJ&|fJEhVMB!AxFU!|J&h zj!JM=mSfv>@GouyqUEr~)Zew>aROq-82C5${7-Ni*96rFo)A}H&yqY zBR=l$1K;n6i+9!ECUPGSK~D`Tj>~7c#x3C-x@?IkjQNP&WvfT2A@+H6;G8hFZ2Ai- z+K3tkvZUYcpI=>O#-n0RkT9#si`Mfs!e7%ON5prXBEH3G1UDD%>_IbjjpdM5v8%Pi z)>|65Hm$hyN{TGhW8jxa`AEBBzEQizi zp?dWQM2fDoq!6HF?Ej048~#!2LSbZ38wPfHu@JB->hBtBu0?TtA~exl%NpKWpP6!* zw0mC9VY6|b7@d+}?G5|Y;d%2FdGTK#|C^g3R}!q1!*U~__f8~j^;5cWa-~lyEwGvT za!4tsdF)R-hro~)c6yyfsVV2*PuGFp0UT&hOvESI*8f9 z7xc~80!v9XaFugzKeFBJMa)%TtL1La$;{k=~)u(SMM zU;Yi}GWw*EC4Ia%p9N2+K;iv_ttfXz(=j{Gw?>!O4|0|vD>?M?-J^=0aASfMTNziR zojVOr0j@@7kJu-UWr`_ZSs#;Aq?T8as(OZWQ27?>{qsC94*r{=UXU`Gk&+NP5qB!n zaNXwrC;*^P5blGdx1mS5v*$hU3n&-=sFJ`{2pfOtHl(A+)sG>o^dIWW(axbewyBD} zT6vY$)UXCnw0dOH{Qi9!w+%7?3R;1?N<>B%NskpB1CSOx8BGdEps0ruvqpGWXNIJw z&t$WUete6gKY$gkqJ(3uJCSfB_&tYoC@Z3;*>`N3hx;FqiM|$jz@Z8ht*_qZW_r@{qp*4v=98pQVS2k+fYW zWd<%A>sh>wZwD=MABJMkx?>tuB&BcL`M?X;Y%#{6`4Iz1`}qZOJ8k(?Z60F&^GR{0 zrF2Q2S5;M2@D5X+^5~hJz3R~*`IbDUsR_e6JfY0RV6vAx3?Fv0!D{t4X+G4yWmX8FTID}?heZBc%hT)J!tmhVvZsnlAdxj=xi=iFkiAG=NL_T@EnsnT_ zBeI=pF&Lt0W*CIA=-N0i7mlt{g3CyHljek#em5f?ZF9o$fx5d06z|;&)IEH_bhdvQ zOwzy;k+g>0FIjoT$vZHAaV3ocLIN)wc7@A1A@odA+>tip<===_EBD=-A~!ayhEA8= z*HfLdY^rTcc0!7EiKh@j6K)x(A-vJmTI;D+k!~A(046>g*ZpwtBgF!_oflEGgM51S zyS9RVGZLdr3^`wp-0Rk@(7ch<-vv+aupV9p-a#KM#F-#1Sb0GNbV@o#daLhvDU6{Gq*%Lrpj|#>?N4lTmA6`@1?U$qeOBmy_k~10duF7f3^rW|1g^ln6X6^jjg{2TIfF7E+w=UH5x0Q5kAVA}jN3OJk(KOG%HBjaWsfovPWC#PMP)TOW@Kb# zXOkVuNM#+4y@imncOiSv^SjUcd*A=cIL>*V@ADn^eO=cl$AtF!ty>`jHn%IF#f$Yk zZuJ&H_F$zv9LN(xiM8vV%Rfoti>UOFC7oAsZJbewNH7Yz{l+6PWRN4zM9;|dQv{@#Em(ISxQ&6<+rbTD*ls;p zSgEYVi_M=W$WwxyYmKX?sF)eEa$yBlx12UuKb!Jt`4sGCf43Tsv^y zoDJ}(XnLxIt~w{LsRFazjFSYTE!Mpys-&<5LG3sfO`w3(}{Nft{dpG%*= zhcuR*Rg%&88XT%v-S*(c%eacIA3ZbCRILPLbDtb{gSG>Yg+Hy_pKsLT7j>Gk?F;Z& z!PEY*)%c(B#IWrrAY?|zr_ZwnMG7Vdk zqIUx2_K54dRI+&_eAg4mwV4zC2v08%ZaOBJ%3I12q$egGdYIrYfVYU>Wgsw*9Gww- zEsASg@bjY<+DH^8SudSV$P>$5cV;f4wKTb?a^_yVZW=5Q6OzVAg*Iu>(k#yYi9!`_ z&~}95d#I$!+uD8c^yM&n=TZ>6 z;#a$@Djb6E)xZ}nEv#UKxKb`iI1X{j%fAJkAV`QJCa7L??hA90J*gZMMxBHmYW+Fy zf3Btkj0;EIqlt*wT@-lw9!cu43I|T7JYIhP`^i-W--wS9f=^zW`LAm6Z7!n9Z(aQr z|LJqd$^j|ryUMd3`$1OYifPEB$OprY?Zb!9P7Yj8>gPJ)GIYD>lD*2zl0-2Z|cbJa)M>B6Z>$T*}f;qRd00#5$mz~6%bZIpk1M)p)s33 z3)y-Az6_|k&UFmrK-gbsDHQ*p4!pBG+}%X<`NFZQ}96zvl0&9lv*;gZ^ z4V;ku9FC(RS^B`m!+6U{5Rpg^nKQD|2TE|{`EF!ZZw)NKtG7HOaQrh~K@-K}oI8>B zfOU}-aX^$M8JDHEw^cwf9vZN{V6oViTZa>s8ihJw*7~58f>VZM`s`hX?8QHD+`Pyn zc`>u^@g4?jME}N0eRU473TN{)vQF;C*?Y1hEb_9HLX@;uFk}k}S0hi*5z|2nig8g= z64QAy^-Z%9p|t!|_8p&yKXES3ymyhOeG){8vqeK;@i~oEj|5i*IkO@iJq&JIL5kO}ClU zr1^Jz5FdgW9l~;WsYsLW zMOLiK`gL}Mb+8u6U;sh?PE9_j{c?w5^5sO*E=SD@hRxf_-;oR2R!+#gDD2D7MboIo zhXYxHExB=hzryCvz3lV>am~B)K@67fA>|S385j7@L)AW}xCp6s@7~*TJZdX4u-q+# zjr&6WaMgYWuw?fcU)GZQv+{Q-l^i!6m(o-I9jg?EkQ!z7-xZH>aJH2c37gGqwW~*m z(Rx+}>Q0zXQV_+8$M?I!OA2Kv+?yh0GZPR5;6mYlfG)#(zCI8q^DF}oB)4l^Bdhrk zsoI0QkqOe3?ZFZY;Bbt)k@d+QU&L>O<`DE%v+Tm@ds3yMIPOGkHSbSx*u8pOQm6t# zP}WFH z-Py)=pH`6;+m5&2UQvq})CeM77|468%{VzGy3mEl@cnlxcE;1_ zg%T z;k3%F`MJoqV0TEp9U2PS1UQZQ%*ZZNvvHK@7>pw7xsa}9_#XbrfrprRXeQsbwq}PT zgUe06rR>qDsHg(i?KVmvQ$j3K^8p7G>e_k^5?1fP7I>?Z;c-*&;1Z{#(3JC{%>T3i zm)LGl>|z7|GsdGrBeV2!+Mw2Dg5sf#nzEaTq0;pKX|4P2yw3bB;V&UyJ*pD24Ht{$ z57wJJ76=BuINTQ%6^oRWvyxLW_8@q9Iho3tnU*-@chV+W)6wHmtlEd#4~g&FBB+v6 zoVJ<}x%|0wABj}>#kY;Qd;BFWmSfPwT6ayyC@j5n%hg_V*2*!j)f_wFQfj})wuZZU z$$B3xT^RlLv>6n%;EOCx?#U*JeED=VHZJvXx$xrrJiiU-@IN^5H@%Z$+c@@?6>(*E zpL`)@lAL^10Tlp>Dtb}hAalRJ7X>3%Jf_cx`o$a59%fkm>@k7!*reymbte$bWcciv zI8`6ogG%f51&Z4=0oBj`Z0$rOADqhL${K*UOn8`q3WhacwzJ!2L*?~=I7oWQaMDl` zf)hY_YC>LgW130Dp;6*@4@w}=s}NzxW2F4IxsP0f>@+fS|2 zzPp$yk<}`K2S=Q~CzHjHo5aZ%>AA!ORl~NJ<`5`MNLl)&?~jylad8dHs^Uc55O$FO z9~TD|d>>lYZar~lC21p*WbeLC2Y8h6<}xPca_$y}^mTZ0(p#3-B(vR_MSEAh3rnEj(h6p9qhiYw z3~S3VWyFS{?LypKS4*)Ta?rQzNt7PN`1#>9lZ`4wF^JDr%?}~G`p1L-bQ&hk$A5CZ7Dv9jKhxJZULV>e+4Qp_371gxnRiCcnuN zC4sF`l=o&W4=HJg4HRlNuvG~a{XiBQ@u90Y<$Jtpd~{1r-iarHu;4opCB;OL%|=3; z7Ci9Ha0tZ#1n(Z4c&nzjKX`M$(y6i^9sz)|{lZ|+fvcgY@WFPe-zhM9mtBFcfg4vq z48mE>o~EwEAYzk}-i*zG@j3Mc;>QoMj;@|E`eXk0r;zhAPbnh9dSrG=1}))~?%lm%8?);u4C^jldm8-(zdABg+O`{UjOxCnh{ zVdJYq0?i$BbJ@_)$f~uP74R55JPRNsW^;Jv!-N~|(Xls2%yF2`s_ZU0K=O zM8xwvdORqW{}>z$_-W$sNkKPoB~jJbRoME3G*zMXq;+$qH~VGhDlDq00wqMtv79Z zFo5rLd-nNy5wp~!3kS3taS7M5ayA!)3TR+E-adD!SXM(_;~t)PvEWnQO4Vhs6fxbV z%){DeBkNZf!d@VG?{TaI1jHKV1gi%%f6YZ0M-z9NT^9QBP=?r)L9)A6DE5v)AL;Kb z<(ZnF=aqlWSZnlE&Zny8)|pW-W*J*UYOHiXS2+GbzpSpEm$v^oy&J-3Bxvh%`hg>1 zL%t0xtV{b-*QZQD^LP6q%RT5mf{XFJ>#O}^h_@2`ldFo@?JA16A{X{AO^_LNV70Ju zL~$8oeI}ghr`hsw^T6VgC9iBziB*+sarGtjn5j8yL3%5RQL@Ehw59`Uoizv%ZMgYH zqgb!WiTkSN$4xYq)4Mmk{$&eVs`HW5^uKE;EraLzUX!GQ5=hdb=6NFvF(t7n5jd$> zA=15N;koIqF1v@AIeQiFp--?K24sp*s~sGbe$>xe^4Rkl)aPJMxS)tYP~S@=iyG^N zz~7A{D7irS#9Y<#Bf#X20b1S@lhY2uQC-owL~)GVcvEx z%?~Zzhc<8a?|;yVA|W`atH*CEZBVfbWWHCP4p#vSws_+ieAT z`3%ShgX31XqRX1Oo6EBcV)pXn)F7{dI7kQ!bkR2Tci%%fCrNZ(lrejm!(HZSFLtl=k&O&SD6zIv|;SmDCNb*fPekS>`2jxTR@4+J* zjq9`^z()vgH3}qY`)d~Si=Kj~&O;xl_Q}zS;KOK(VrqX^`i-EDFT4NH2;Sa1L6fj4 zhUpo|q`$nA7C2ftwiR@$VV{dKjj;34K z!fD>(Vk+~aVqcs3$m(ixBUC6N3;lpOZ~DQ2hd~xs2ayhva1*oyt<9zMw=Xi^Gnn)) z;oy3na6)UqrWKs$i;ZB&e!thOmC2=MG1EGgRymG59QjiETO4w!G`|dXcCbakMMbbW z4?Y1xIAh{qD`-Lw=k}+tuDFSnUHB85vK58n=E1432eClErdniwEJth<1ASEMA!_b? z(bcke!(=LnY(C6#4)8Ta&9c;pxqh1)Tc{})j>S0Z#rEj&OxTW9MSHx)rb|0ePt;Cz{T6#z4WWIH;O8JE zCKoI;vU|U5&H>&t;T(>j0$y9#8x$H;aA!LGY5;rKfOtf2)z$+!8K0Y=Us#yiW!WB# zQW*I9*huTxph$lvFA<4T9J|cmkA8i62NEcj2KT3)jMel~2Ux9QxN>;1qu<>G-8!g= z*mI{LZXXC_CpL2qfMOr}=DYwx;@iYINPs4&5=+xQFNnG-5%!pgd#iT@3GqsXlmYO2 zFgQ?w_THI8FM==m#JAiI^*&y$d~&!f5XcRH0(>yLsSsa3km;NbVFt@~+rMAcalNTh z=NU5_H*WU={uat)bG7UBHjvQcA&VOSc`W%oUF&U?pIWW2)$eKgbO^bIQUEa29U0mm zOEL*qn&My;CkLDA&$ojfo&BW_;UFa$Dl&=7^c`(omMA`)eoZ0m@%LfPi`v#N-{6r; z5XMAI%n);Dgm`i{Su;|NtnVM=N{oESlB<2V*0r3M;OYAIx_sI_P3w`h2|>V#{;HC0 zi@4Se@?z!~GoGq)6(TsR%1om6EncpV_IBuTYr}EG(IDM5$Pvx6iIc9mG&MEVEg@|M`lv@N?3o@80OLw@H*%GRW3g z9uEG#ki^?oBr<0_TPLlnD|qQV1qvE)WCdWst>P(*`>ev}Z$gVdAL1HtR~Nm#cMB`r zj>X<8{f;S1dI^)MC36tbF6LbmMEt(#7$uu6C-yWRtGwyHwW;|gIXE`P$@F6&!c$d` zv2jQn5=W>HWmyr#T4!%Aw{fZ7@Rf>{`uxXv)mH$&u>lV9u+pSSy9WI3>ro>9>tiN- zzZV$x%NC9Ud~?SacX=X-F32@x)Okq*01j-v7w{@4d~e}d@bGu}Q*MkE-Zyu*paE-# z?>!za*E<;BG&wJNK0=Ao`YWl;Z2|&DTbZFz+p}SGCmW~Oa`(v@J-VkX^$*?Sp72NO z4!+_<4fPpwOZ$G6OLO`nod@YSk;o>B@gDf|0Zyw@6o@e!)}s(fMk36siX*X3$c zC&Z>$YQ4UvY5!dgpGq`O3sE-vh3-O z?)S{T_mDVp_13#ZxhGtl4=6}3u;FrHL5MVW_$Py7`vm2wFqE3(;J^y1vrAs;#P~|1 z1L6=C0IZ{WMMHaf3Wcp3fQ!Ln6_Wh9FXLR>$=8V%{bvWwlBMkq4glFRQns`i_8u=_3_1QBS z#U;Mu=HbAY)Poj`mHHDx;87lrAAELGr)XFG!j>|x#u`Gn>n}y$&PxCBdSbreqydAP zA9EOU{`(f)@P_jgx7!9V=|*w)umO8x?vDVH#7(cX+4AalS|5 z@Ff+MW30>>w(n4}aBX(;*m(mn)4H&UNQvz^kGkBzKy4b^mQu*`iD%#{BOzDlMxByh{(K1$*KQeirIC$|w#uAXF=OYpu=UXT;0HIj*X%N{tnJyk`UpWyrxRi@n=pttG0B#SN88KJTQ7QM* z-#sk@T1E4;#8=%*!_H9=VY6RHZd;cddu#?V$hfavg|wV=fS3&IN&xE92Wl>;)lnG> z8d_eFj1Vi(p`oEUkN=igJIB0wOPxfID;u&!2@H!N5X=`s{_i1f=8Ln+ycCQChCFK? zCqIAK3`eldR}<9ku#t@KKSrRmD-jnKe>Nt#>$z{iU3V-5zgt!1@&75g5+*nNJ9u?*PR)|;G?ua17*=I_yTH^+)XHoxtI+Xb1-7Jla78f@b|_XG!JJhlviqja@7#0Os;jnQY39 zaRrW@B$4rvat^5C<-zVhaC8;)DS*NuIwHHp&K`h^HpqMS@JM!x80yz;_7#?%CwVhd zx>FcOwh^)|>bQmuLKMnsa$b^Z`7|MnT#LL<)Pt}01u`%ti;Wf&cX4Cz_zZDjNV=R@ z?IMDA=8?ZEKKCT?!yA>QSBr!inXEpKFtIR(`|YlV1ciwCo}@lX0f3|B{L!VK{@%tO zj1cBbe`RAsrLOZkm^((#p5k^VWJu}$&mJcwM3(f8$E^kzV`9(c#5U}l(J4$5B)+W+}b)R3~|OFH?k}T4{FEL)vZr281JD>;|e8!i{>y z%F6OZd2y9*;UbGUopS~ZUx_Vs71n9~14sydB1twwH6u?&YGVO=mgB!eg*1L=n>>+D zj;j8}=d>FN!WrT~E7JO>FkRXe|LN#AHvS?4+;POQg4&7v;m?o1x#w;})^&A(Hq+kf zgcb~VNKSc#u-W@(LJ&eAW;J`uvwjGkb&W_6z4U|0>dN|W6?5;aVC;pD@H(hO0a9w} zeU4m?pH@<_u;@5$FP@-Xn-?N6Q=jRieSH^V$u`Xc5*J&0TDsXi{Iq!;-i_}%-+tZ~ zT0>kI7VhZJkB^#bxS!NM?%U94NWl^vW~KcnkNs^n@9u#|VPGZU*6sJlyPZU5=1-5Q z89^^MFd%|6O+v{%Pu-Fo$cL19-SJl2`H@e?tCo;*zPQWdH*g}rjD7q^o3KSh896-r z6~U`!pKyx4kOyk+O~o(oH4(sO}WVNTy%PJ zX!8uKG_N{VUIo=w`^My{q2$vuLN-?Jn<xgjBB2#-Yi^AXu-(qR9)JI`W^k2FSG6o@ANJq&(Z--dm z1SMCm5z@C4;$}f^*1&B2$GZ&$^%`%Y^K+*^s$6LTX<>baI$kEuRWRt!BT~*j*H-_1 zL;z#|^G-!aPY-7c1LChs4TxAo?-hj4G$RcrSrY$ZbWW7jmB4x*zh7{8gy;N=bJ|5l zTmI>b6SBxm#GDHMDnoDa&6*#XZwpeZ!)Av$y-j$3-=pm;9-~kRRP3B(Xj3-j6wWQY zeBbWlxk+j1BJWp(lsWii7oV_fXP!qHFdI|oZFoHzs<(7U%Ooy4j=A5w3hla9Ce4%i zxZ7%r=O&k2e$?G@&&7c)DJw!&kTNjrzHS8n3hX<1%Eyes(hJ)@$Ovw{PphMc_7vP; zx)!fH^{QyezjIpI!c%qna+7p~@kx1guwADIHdU^+Xf{5Ug&jZM&p_sH1HMs^ho84J zj59;OiYr#-WqV^^iP`$aDb@n;QzYwxz^Bgelg<#WA12D7zP|5M=Pjd5jEgn-Xei*M z-8|wC1sBZ;lKOaR*m8jzhe&u#YC?yJ;uZYp#_Oypb} z`IE^nRFqE+4j+QEB4BJT3qm}J*?!eMl={<`{wJ{)Q|2&tTk*4NGxy%Q!Yi{uoYc%P z9M|1*15X_oJ-cuQV*-E~%-^bd^n7)cF0~i@FaO#5Bb3{Z*K(dD|9Heq&c{V>l?B{a zxQuV}^J0>d9qw7y1gs=3pVXSlD_$;5!d#fdI`x;lRe;1c5P91UcO`r@|C(YTV1&z- zP{jQ;oS6WmyOi{e;E#OWvT_+(8?q_!fx*FBe{+0d5H@4qR$;*igLvA{kQ)xTwbr79 z;={v&Or?hEiialU)`U3Mv@!Y`K+vyPSy`PNbqh26^DC}3)ys3F1Ir0@=-4M5zLwvx zsH1xO&jG6WLUb(bUvN2LkY&1_zGx2{H3(!0-u}~vI`(HzEsntOx}8K0E4^?}MP;^> z>N5yq-T6(|@z5F~nkErD*}>fM(1YBTJ)~f~ZTjxUMKxs-7F`d_=Iy=(1hnTp^`^r& zeoVEqd&~^}UWVS6y*E<0%``g^@Q7jl%B)|!~ z5(RV&4e8;Q9*;yJMf@VSb*|Iu%lcyFEQ{DGgHW`V2DN#VrTLA=&(7za$p|&1#-6uR z46BN+KXJ)feAD|T(%|L!niVsRe$^L;4@v^P{8{*VGbQ-w4=vQ|_g)9QW|V%wI_Bp9YF+hYu6Cv5<%~4_;Lm*;F>`iFs3?LnNq=XY8oy9ZH zZNdqzugcve#!J_v)$0c}KuJ|T`Y}yD{oX$F#4eM>>gm1%4zv6vmgOd?q zLHd7LFXdAXsmPb5Sih?Av-BRlVKt-t1mxHyx2+bMD306)CyIsF$zl%uAEh=4_o3aSDvWf7<}0Cen4Al;NLg1WB_d=;=VXNqX182i+Tf{x)W8cW{C5E%9)GgZ)5(?#`Z(WXBoEdY**sa(5d{NiVgYgE_@WB2+ zef|TgW=ht(qdy#f6yEexr84}{y%;3JvlKQ48lHKHPS7Ol{17C#3|{?<=JDgI~sHA7=Dc+}l(quEzT zdeCg80@|{RClplg>+@)l=&P0~`i(b+H8xpvg+|X_bP?r0uzY$vke96l6VhH7hxsdE zz+E4NdDi%#;<1a+ox?UA61756Znib=@Z7BgJBW$QR$91e~jwixdUPl=rN zh}*`5Q2;OQjbsuCkI_HxOxe@~!Fe(s)09w+7q8tlga(M}M9uC8#8*;;X8oztbr1ry zS$};Me>Yi_(0}^(pNzb(rL4#!zV$420Rb&c3HV+5p@3>VH&=e{Qo(BrvzI?gk$$j8 z*a@c~y_D~VIXn4s#&foviG4u!Sq~u6Zrg4q`^^;c`zg2+ zZO&hoa=O}^T7VxLv7Ia{xgi2(A5srF#N&F4=IfQ?%G*PWxsuLr?`)U(97|r1m5i>d zbx++6*)eA^F6x7^1jr?d_xEQeUevrWyr8 zPw1QXR%ZbxmqETy$estOtoO-gyX;nr)g2Fd++XYjKJ!j`T6LC zWVN=#yA#rs9ex$|65x9SIT7r#R;D*Sa77!aQkkp^Nb={|4(3NfqB>)(T)gSm$NoEu zCgh-LNf0o6)tb$dg#3FX(%N?cf-?v;qBO~;6OybGjP@@j&i1m8w6P(z@i(y;PhP(G z%=f|X2=zA;xFbDj(@|Ck+Zcks3Psg|j#pnOqQ8_7sZf?(he{rR8pPC>o zZY_G(!5_e&miUmN4WF8d%D>gIH6}4?aAC(Xo;z9AF0Ay5`@q7?b$X@Bk_qGextkWT z)aWiTIH9A_-sXftH0vx4Z#vYowG!}2cy!Wp8w?&8zw*vHpOuvbtdg#M3l!)oes+J4sUT~wUIV0Ot_t?y1SX$X`V!U-rZR@VF$CK%E0J^#4EDI|sn(v}XN)fl08u+lIN$G}d85Yezo{I3(xTc9 zUNqNQGH498uMS@d>iM4*;AQG4h5m&dfuJNQe>IH4sSKSzT71FXJYb|bIZ&hQ{9xAX z4|HKei7!gIe41}vc;ToQ-M_%QYpX#brf~4;Qu@tUN){a_r;^RLM&yr_Eu%-SOaBlw zyV^nawSP5XL%G@=%Zde3(B{y69h_hP>|i^}&jK5M=qHs4gjeX+Gs=02JJ;>LTRRkF z#$fytv!nkVNoeDc77H9JM+bZiEh$Yf(*8YN!F=!NWiHUXZZ{1&zJasf=SR}u!t`}%tkX$*YQXVo| z{Y%H*{z3{j_T3)VL0(!qX=7tU%iNh8;W%Q4#q!ky35nRs3e0hy2OZdg70e_^lvgO} z>D&64BtavQuz^Sr5Duf*5Jv`%6u;6kc)4#a>-n)PIWerVllYSXOh12=nY2L%zYFh~U`g;x@R=NBNg&V=)r&}JhT2p?n zg^?w);A#N(S7+k=gY{)U5gdimkO};)*03_A$@|oJoSa?veiddFgKmg-b`zPL8+bJO zk?+s<3<&9z3yaiY%rZl0s8NuUCm;rr+V!_3Zw&N)Ov?7Uephx`)Vw1#JL7%stQE<; zouWe6|7(B4QKhcx1Y2EMf0|_T1Ks3=b!dB6raqe03VqM%T3*heYPN8m)msiCeuk(C zC5mgMRHNH$Lf^kKK6Bbq-@5y>*jFmM4 z#5bwy0z&*YX+1@hI4EwH?i34@zj@W-zFpJO>QT61p7g)h#&m>hZOkHM{bi446zSf^ z)?=lp`x7SWIyo%GOH2HZuCQ@QX8k?Bf;Ctc3~Qj#Rjg8aC0~}3`@w$i7=t9O$jto^ zz|Jri>9&T4cFQyBNwu~3?*%PYv7}$zO_ROv+7iB<%{$2{`Mzd2QkVBWZ(>mZFEOP6 zZ`UW`@Mxv#8#;PcczDCV@-CiyB-TYXUi~8|Qfh^W2|+Uo3pIuRSM0hHWVAOBjY7>?}k?`Ocq6ddi2nehS_QN3%N5e<#{#GB4|kW)=GHYcg!) zXOE3^F&MIQvPKN|6k8r-qf{2`_6eo)Dt@ zjcQhb9(?E+mo;HkJN;og`Qo%0zL8?0B0U=d$1-AvFRR!)3O&RU&KFvz^8^tFFMHYkHof#7JmY7s3?O-k~6v(jNbCw)C1EzqDZFe&xB3lBuf( zeimwX6Wk5#J}T=W^8-wjiF5PY7D?4IEEqqB;mNSoBz#9+vUrn?I$Pg_?eU}E0RTAy zzTitWZ)Iit{Ruw>u&<@v@>{E^K7+m*^GPOl1_~43AKISetZMZh8@+q{7n703AIUCT zgfem0Ey4%~R30!ZuHWu`gF;iT3kUNE(hu&wPL`64`q5E`aNH#h?`LreHHsaLXL@hi zKxUx;>Y&)@=vTdt90Ip$DgC(ib;OL=PTMmuU>4TpSFiE;KZo)K^HfI<9juk{7rGPgIL4lwc*h0ZeQ!9i`?y(PrlE-#gLrm7H{RI0 zFD!+%RQMepTHhlpFck~~Zvz88*g4o;8LHr9f;Z#Fuo#4){AchDYqWiK{J6U9jDrh+ zU2Fhh`T+ZOAXQGK)ufzpZ2BN&xisTd2W6$NzSY`F$wy6DWxHVW;sZlM?pM2Ufu}|0 zw@kgelj1SN4+`r%c_P@@*l7FkPZH_l3vZDisv&XRR4;d3gJOb>fXCFnGVzw%vr^QVqhI66_L(UMH+L{Z)+w*Pghm+M>(1T7lT!f?)Mal) zD~;2-2wmT(#?-DC2N^orVrjEY)7mo+>g7=?WjlkK@44=}c6KN}^|XpFh%DJWnj!yST5CHvw#zTPudk0sKmfAU-omEa zVla5o8XX_2$8qO}UxUrRiP{je6!Al|W9P5NI0LUxLEIGfU@ibXByI`0G#MVVBztSW zwd2fEPHpB&@iGNXBYN!@{E3(w<~GfTcZJQ4=tT0gIHDgBLsnn+q>op!nAasU--&>h z+6_1`T!flW$fZ>ozqPdQeuoRxFi_P1xCZ~1?XI^B?sc34HQId3CXMGYQqV*hhAoJl zdZ37D^37WY?h;rPlOeeD^hg0aE7tXnPYbGmGHwUNI9?vZPdNDtJm z=1K?j*0I#nwu?|ibN~b-wqx7->%YIFM>W685D`ybad=ke|1eKCcjt=KRHU^ zSZ3bdg389x)R6xJL(C2?u<_Gx;al0!=!Jx@SoaO6DCa@f2c+^a3Uw?Ej_GJm+CCj`TcEkd_7u<;$0gI*E}{M*=`xD9qBY1x#{ep(nU@@Batz`Fz+_DS_y$qBw+0R4hekaqff$) zQpIhz3Cytjx!~-Yd^xT zL8flH=~vWt@8-Di%I#B6GIJr*N~c~>)VLaKmwF$%0g>`f&&l+8dY{k6`}UTdCl$s7 z7PT*Jz|RZNI1Lt>#O}_)K3QDrh(pjkv2O}XZpj<;s5huhxF_|Q_SQBuwN|fub2^M< zu=FMgS4%wBbt4Wsvo(&e2@@axs~q~>Y4t|!A>zLT|gx<93?X!W+JDOLfek6I4FCWu~7T2CFOE|iyXu_h} zLT#3XkE14gRWgI#p4zEy6*M^0(+^_r$7=d06RHd+V)@qtyw)AceE6W0m+4yH zu8t)i4ut$W+tyK5S&g_XH)wh)3oQp>@nb9Vbv zjj9|wEnE%1=H-&Ina3Dv(>r0S+A>bkl42=U{K2F-;_-WRpEHx3z)WiB(y<4#iG6}3A#kb=}2 zb+*k%%<@T_kd(SRdk{JYbps^!(^^{5)J#3=G}= z=R=YtydydB= zHdiYjml+fI9PQYFM$l*+yS3026C2yKNyC#=`9OZkyW$EP0fpR9>cb45e06zF@xa3S z`7SEM{O}T1Urn`7*MApFHHTmVnwMapN3@)fs4&2 zQ}4|~YNw9|;U9YhA)=&j5A=uqWxt^JzM7zWJIX6nv*q%)K79&6E7jJP%3 znvp~zm_dD6j~3Es1OpODg@b%0Ze(_2RZ=k1bi7v>PZA}c`>}d936tfrH(>=DUTz9z%v7uM)MCI#Ye0`uT zz^#+fv#6a~sF!o$QpkC)59vW<3OG3fFp2JBzC+k$|8mJKhq01t-HCpYYOk7?X4{w^ zyUiYe&izyPgwvnZN**YOzAQ5Q6bv2cA*Vl!+}g%fE^N%&Hpkzd4i%d9$){ zeT9<-xqY0A%dBqrchG~O4>8bBPGoZa7_|7TkfF*P7b6Dsx?tQK=vXPX^>swrov-Eh?=yb| zJDT3}#wNj5_6=b|@5-0gp?f(u?SFpy#cdI{7Gh@LOoGl-4CEYx*-nngr1P^SUrsSm zQg&-dgJa-@9w84|=$aK}hPRc}BPpzVCOux0;)u|JfNUpS$=J z1ISVwgaN8Y#hn>k~$wWH+PnQzYf5cg8TX4&)aLNmsq~9 z$$7nrcmo-q1-PUS7d#+LOI8dO%cWnT4Nzi-DcT##*k%KZUYP^oi`t zCsX&jhvgA5DlI3Y2~LRTUW zkQJi!tF!n)5hrba59bKP`S0`#*%i(Xa?Wa@UzyT|@!AJYtyz1=K!T&yJu+vEL`KIO zb$UVbj_AhMFcGm*0R{RUsI-u~Cp1BZ7=cVbIC6w|DzrcjWaDAP_Nxa68&pqzA2C57 z6C?<%&*wzR*WAm*ti&a%ZT`^Z77R<&WNT8iB<2zvXSvRWKU8@)bkF}G@m;jG&YPjf zwtQ=JKAZbXhy0?J!LLe&a6%Nh4EuW^NR4+Z=v$?B@3)x2dk>8Lh7Mp{ed;f zkDTPNyBgYv+LupZ8?4kB%hlQ0+ZeMx4gCjiAk5Y<6)Hawgij65v{3joxDc5&1?>C^ zoq!_o6^SS6;Man{eq@38RknH152K;}K%IO()Y zXD4$y?}U*~VX50@@tP&Did*P2}%U z1whZnWxH4EKIAPxBwL#i@;X$<*kJ7>Ec$qGGc>9B= z=hms-p!izV+$E?(7h)Y(eompSm^^*)aeUTazl_06cMJ4PkFQ|kef#Pz2E=B7R+gJcMD<|(8u-I(P$Lah{CB^WVsrz$FkeU-3f22>}^+5X-7hg7#iQo3^;-#Z7^jr_Y? zVJ~-C-dcJ-Ql=x8JF+6I4WBi(k=Q^4PcFo4Lq2z0W}=Qc9HX*B zmSwJ90(;!~hU39W4qjwlI*jc#qoI^%Gg-m)GjnS?XJ)!%CmD&or z%VIQ8-@SmVICG!vB#X7xP9crAM30fWx()SpUUY1Dov_DEKyzBj#!#meP^+Xbw@v)ZW`L5Y8z*dc;dV<<+qCoZTN*QLC@UNeX_D2;{tWTIlD=e=m=e zvYZm_{P}6-JlAIWwex!qZXvL2PA&zWK*o?ZpHpR9z0dJo<{zL00h5h?_v+$OK}Y~W zWqb44d7=mjVsKi<@7GQ^Wq9qGoCAoj%7o{O&hB0alBKGeAUw)gf^d!_JNLi8Vxp%X zEM7NmfF*?rcsfSH0E!|0`?HP+S!y2Ix_>gRn@7qKi~-rHvSumKod*#}H#chVDKyPI z-}+IO$jj@p5<*lrQJju}ip^m3$;oyfx8IDO&@N@HoZYsYDtgUyr(MO)gnlI1b!bS?9?Y75G88cL zJxu@?%gkmKxfKIJX_Qxbi06LW@|e2pE+``$hr=LakUj9dK2U-n^fq6Wjg8=b{=J!n zL`%c1mz>Z>UW7bCC1U@vRc`n+V1LiZuN)5;oavW(j$5g#f$wLLP3xos5jG*y(UkWx z<+b6|@WC{X6h;Y;p!n4puK~>dr2c8f+GusSMjQ|DI2k1zIr%#IJgJwfT%9+Lsz!Fp zf8^%cb>Ct<8H)2RpVtB*tZFs3|C5)LrOyIgzINt!us!Ma$tR{LOopg$x6<(&4hT%| z9v66eczeU2D>ME0(YRWf@0*Rsk-nCqot%{hbbV#yb1xT9 zS=oc>*SPaHtfzqru1Fb_>IO5~NszEC_@Ui>4;La+RSD6$9 z5JA0;Vw*-1zNF5^$Ovl}qb8)&af%ttZM28gD5wEl1Sv^v91jfx!=GLKz+Z)Cl|taI z|7};vLVdLd^0_{sY_ttaj9beRk+Y}!lPcTlzy}DE8!ACp5-71Lsj4Nsw|afy`SK7+ z@0`_=k=E{b92}DYkEnR+xoc8kPY&z1EI7=S2F2g`?k0@f;(!5xbZ~dR6&+h^`ju3+ z%X3OA1&0IV2z+ovB=!cPc>%A0$HxWR87=_pIo$N^;-xxMR|i>o7N~P(|ibo{%3zt{B%e9Yow;BDL-7 zoD;vOqNeLYNi{>IU#f?T9D1+OnTC2+H%Vtjd^5=-PdJu+bRVN~<~k|3j=H`|L`X@q@OZgtSNw1?A+01Xg~X`Nu<`aAknz%Vru4lw}3H36|B=(yPP<_C2Y)bOF& z`TkBH^Zddh?oWr)Usogrs7RT~$Ggcl>7ZN$8_v)-Z>E7!GI?MwW$8e8dnp_> zUZ~tqbaNABV{5#dDj5l42*3tLLOmVgAr|SHs)G-6oNu2#PF*NX#!|?g+<;x!0)$ZG zA{QyWHeMW^SSX+~Ff&Lgx--oTBuAeLl?c`6Xr+%XCn4gA6?9#fmF0|BeKMZ-Tbqb3 ze53!om?HkK7sY?n+0~*|@wjcrJuFcJUl_ARx;j<~8_IN;s0U@g%0Qk(0aPw!&kp8l z9{+_1O(8n}Jk|aCf2hdbdn9}Bkr^R-7fNR;I*xK=)SuizQZ=QEK!hnvK7N zk=7CXGq9(hf*zpXz~CTY(&yrIq}lqON)%&W{k9+%Kg5%-)Pt_RK8Fq7cC1?flr!K4 zAG_@$2v+YeO|V`@&*gP9c|aL<9r$IhGoZrixRA%5!mwB?cZ2hyC zEFYm_VuM2omnrWlf#$pm(q$ zjOMM5IFoQ(LrwzX^(1To4P-ZGZP0sZ5JR1Cd zsgHD1?poj(=^r&GHBH}ep(3w34*T4N3)0ZCy1e2P{|CyMp+#zrfRcNP=5zNnOEI&u-#1pDMq4qWdNwY zcP9%n(I?-O=K~0v{F!$6@TW#RXDgf@=*iD<$Q({M5b}@@#mBJT@uLdk$L=4jU=gZg z_IKb}F4C%J)|PUb9k;2ciaDY&jL8N^4v^Z=0G4Jqoa_EU=uJ?O{?}f-K&t$BpS(AZ zCpR*eZg}&VJ~^v(7@MDM#X7${+x9+KGBW&R9D2KYW%t){UD+oTI1tT(bL!^r18mUS zv2k%F2jWrfEZ4iLi~4tk3pA_3RKWPlD=vr}ar`D?!FlX0Xnmch`7$LW1jlwDMGHQN zlQ-l15Tz60{s19yeC^sbpgjRWrxj8jV^YwT|8hCD#I3Cg&?C`=E)K|F+AIWVK?0Nn z+A-yPcSAr1b-s6X(l3m@1>7}0z`oyGVe*B(a}>|aw>(4EemtK5%vx-Bb9vfA)1z%2P-k!W7&4b&Z0KpBG(+vAIK zG7K^5OTUwuPxpBsg)g-DTfcRbY|s>=_mrIPIEz3%uGwuY93|#yro{472~CHO7|GOa zH5q4n?U5Vu#wm_=Ud=K0gG-)D5BS_z z#A!v5;xKy?XMC&&re)EnmdvgT6HJWF`7Xjv59kKLnO~IaHu#YftdAG?TwF&RB*O); zKJxO<;Z}(`ip3<^#FMnr)#~tl|KZjj$Q^RVi3*tov~aY$9Lzoe+pUK7osdDwrVRh% zP7wN!(;|vZg*K zuZ%YL;cdP)zxY^iSRe+KTW79X+y|-G+)!q*vv&9>FHjjzD#RfcvdUwVNI#7iDu4j2 z2{rmj3n;mT_Zunm)IAakz!>)GHz3+#e2{Qvgm|qabB{n!Vqo|IY~lR8kD>;~nQB1Q zK4P=4z8NrHPy9Vo+Wq|kK)AZPZ~F>t4~FpsPD?7f&LM=EoUu-#`R{zf^`Z7een}q; z%ZtksDhe7PoOOoFDd23M1D;r35ga7E=bt=!u=Kt#@G`?=ndbSk-w}Y;v;n`c(zw)C zfYFStkY>crW&E+b>+ClS7&fiQZmQn3WRqe~!A$Ct=BM0tUtC^FQ55A88yZG|~f7D&b zl_>v&f_y$m8Ho48q|z=pIzuOQ+$EAN*#q^#5?tBK(8j`rD>zpgwus;=lG)nn6}QG`Fu7R%9Yrxwkyv796EHVFA)hF?+jmk z+8r9`T93m?yPm#*BU)OqBD#pYTb+;LMvIOG)b1UfzP>Sxxyjg@B3Wh>z*x8`ShA;f zK)&qF<&5W_XPTKpSkd?l!trA>-Y*}^`=aa+uC5tZSd!aCe#$B{Kj z+vchGe!%^cyIG=}(w(X%0SVe-yeUg$k$6UkYJEB`iQJF&w0QED9~QdbyYg;Pj}#1C z(IO?K_bdLU4E|wis+xrm|+DHX-#D7e0U#h$-UQ)ey?6pm7QIyZXF4DAvO>g(pAEMsJ6I29QcUH3Ro zv+Zu9*-}0%+}NRDcX82%>A0^=tVm5j4_?pzGV|fdKPx zi=F7!eEOuF5SJiAny`_jY|v{p5Asyk4K5%Bk?Eo6*M;Yg1`T2=kZ0S!v9`{Ax0?nK zvCzaH+!cg8H_`n4sm7yO|70cvkuk!$!QbBxov_iLB5u7Wkm>>EN{}Dp=lrh$2NzeL z60b9RdA#sQgG1xM!j=+WFp%)9{l>oEL)`LTB*>{fX!{3&B_GSp0SL<7eC=e-LfHsN z`o;@xcU3dnuAYC;7<-8FoR<6u)Njb%E`X4GLv8hJ?XBNUQr?n4Sc&j{#wsL~S6+__ zCS;gT%&&_!yd4T>>E3*}04Y6rM{&D3n~>1Eia{6l<~_nI{60ByUZ=cBXy}XZS738BU;W|w^m{3b~l5zZHJ`YW0YfOCvzpUdmT^fAedPvQ^nvTbf>u%JA1m z?tqpB;U9Q@%j?D>wX(9JKAaca-QD##Nl5y7T`&CTTLPS{o3~%$5E9PyyE^MvS}D^? zJTDoyFP!;^Q8(?zGiN!6mcm|DQc)7zTuNF49LTz>NgEPHIXO9ZXxVuai&N!?oz5E* zPi87C@4{1QMUVADd)?l2ZLh-gXkex#xqw(eVCRxA9#Iw$$k|qR%GlAd5lmjR!Z?{r z9)w#?oUi0+faHnm)aCBs2)tjx-*3Gfm}}Uhe~MW>w?ZJtcH=HK=hmuT2_?_{;`?_o zZ#OgqTn%2u(|cMjOsJ52x8XFhN$Y{WONwrmIQ2MNk$MrO9vKe+biE z8=y42TKgL~`O;RzZPOSGmcgdPOY-%4yt}nW<>lEx2By05Jr-h{C7U(nnsZICM1UcQ zQc~D|@zl)H9B9+&uirJ*)p0TBcs$v)o(tWryvLnN>U;(o`DWzEM2%~li37RX1KWIw zX3(=Kp6p%$aO}}ap8ld*DdkQ6#VPLjQ?5_H-;(~Wv+7hIexDy>EEEGj2!k~tE>=xP zz6Z)Cy#AIumu(B>G{HIs{ULozk(XXF&(TC6@K|q#A{b0~&|RqgNP!1Mop+WdX@xPL zdY)alK#9OjlbNHh@Hs5Z1vBQu*Dn|DILol>^AE17f&rE{PbVsv-+Kph8TQTiniy6! zqArWd^voO4W#z&8IERF^5r-*%Qr%994$=W|_uJ6z!Ipe7MYItT5jI*HktFTI9*BoZ zuOR=2TG}Bk-`tp}w0;xpstIw5+3cSo@mA>TH(G?`=Kw@^-#e*ybgQ?ljYxyraK* z9g$EOy#tZVi{tMk)8G>ae3Hr2KWO1?2p?yNcd-9Trq}!FuT1kWfarE0661;}%u@TZ zp0e+$v-@H%`Wx)y+phEEpKtOg8dIZ|5x`YbR6<-W!=z$vWk!e+!2!;}dn+PJF0C_k zfn&cvyalDN4m=OV0Y=%|;GY4Y2^G!CKu)4vEM`!P6u2j^SJ-Wvp^djeW$EE@65+ii z1|$MiB6p&4iS>Pxd9ryA@H}v7z(W)oIRm0gE9&$%p0uF&+sd`ru#|AY{>O&)ZY{2A zwAadaf|DtbKg=WUan-N*J{U;5-7hjArvb!0eMp#xS?Dqz))!F$%m{)@L7XhgFXz~Z z`>ng>PnmAO6YVV@OEg>@{!x1t)2k&RHork(Zhjvn#tfPRo89;x^gIK2U*X_S?YjSz zRxd`+nAcBjySbRouY?j0xR(|~*~<0w+9~3W>gq3Wi7p5H7Z;a@%AP%fHF(eS)E>g2 z?;JRc70rBWc%osGl959DM+B|$$+hwptv}+1nwmD?lEHhqAE&s` z0+~%aV|B)#UnT!^p8Ar2X@mhqHi(3(;1||^6@%54b!Bj%@T>2{@9A$Vf(#0$Y7&kh zG-Yd5KaUvGjP@{=@u~BH2L~0u@!RDg2(=MubVse>;T={yMOt0FK3&Q5jZI3rhd^9~ z)_esRFGg}G%uz$FylZ6^muNC8q@>L|*=si+fAnQanxf>jfA4KvQEs zzq1G9NUehip=?%uiK=65V=NhSTLiZs>AI4-3&+FVWrC)fdA77(S>>Cv_b?fpw^V_k zdEooyRleg2e+2by%!2lJl(>Oo`Om&!8{PTFWU=_2#CUS)KpvZradGHg*B}ssf~Mpn zK)f+reBb)Acn|*!qBP-VB#EWfN_`BIk1vevP+3(>ZY>9{<8MU?`dl+gXP`@pU9P@PB*V51Ck)q_OEvl>J-~0Ql z0i6nQ$tpNCfx`fsxnZNM5$F1a%Az*GP)6Ue@_>16%u0BYzA)rAE&ohL+t>hmjPT{k zk0<}qZlBjYqq_oT`=DjAvQB4BR9l?n8h}+6oNo`9I`+FoSafn;XLmLplwL93&&WvO z3Yab8$OHuMjii$soJKEHycfMbBqmb+njDnh_CK28=vwQzt_eF(SXj7X@*4DM_zQtg zE)w@Qqe4TOB_zgmHO3PX4qpGteu8%eH$2Qmiow!SNj{QR{1!F7u+xODlj551ga;-5 zbsDjC1+Z;&5zSl)6Td%Uz{8^HsF>(-Owo;} znT>_Vm@bBoBf^_bXQm}D$#g~)R?yLoYTNn~*l)q7siDC`BQVf>A;<3aWT9=DOc1nD5fKp#X!#qX2C<1*7uymrR6URr3D4P)QhJ|JhNIGX1mD-Bl~uKCkULU#h{mTxkB*R zRES*LKCI&IZzsR#{6LDfgF|`A7p$s1aXNBpoYXQ~$B~|^%)~oCMB;F+`E_>rf>vj) zu;ONJAilSQ1Gji;20NIicYkt`25Y$fpXNT6b8Ue*AMf`iN-_}sx~zClmdt?;Uq-)) z3*sCmC_x2-cW+WJ0^i!=`yjK^)R+72l0lBUqInmPQ6t%w!U6ecD-DQliaF(F;yoK3nS&r1*_wb(SB|2~PUjfyA) zY;h_EEC(t?{-hiGr`}d3^Xy*&r5Fxw`Ukc-qe2Q{mEKYKQT|i5)|2Dr8(9N_&8~4> ze_hWQR4rgejdPW1yg4LXpiB!&nLVl>D?uCO*hOpp*=m@VSKOywQ}I2jxq<;t$@{P- z1ePyjz(>nlNcShO z$L`gyGDwC3UtU{8FGb3gtLX6To!Rdss#?O&|JIL0WyW`Ic1xyvzALE!QsZ{ahv=uX z8;>7EnaO&t{h-&Do_SM;8>#{t^R>Y&fBtEXgTK3g&H1%)Cd9@0E0Hcl3>DoC1cM8J z0CMIp>R2fgWl-pZCA0DU_%L$w`oJd-y)ifN^V$Ce4h(lyZG~5yoY!d_J#C98;FV1GJKPz5}Eu!2} z>-mGalz>@6=ME>G{{h{z!Uy7;rk;c7((v;>>$Q`I5h` zp4*VLJD;4an{+CHATr|3nZ~3wk$jf7pT}$SfLvl@x5QcdSjudK)gU9i4QAu@O$z>n z2RZkGZR+2OE27d96Z1D5aUh`jCeG8TFSUC9q^<57(~&8_r%g@mCITxQELk1Xk(U*( z3j%K5X6r*U4K>-DEa1>x#9V7~}o)$Ri+KX5Ol zeJ8UJ+}9j#j{qB4E4f3sbv z3ddGUFB*)$>^lZj#H6S9M`zsy1Rt8Qho)nGnC=)la>#%{Ga3?#+ev!YN=e+hzWWik zz51t0E;YR>V`EQwkNcqk0Qn!MpL}`x+(!Cq+16{*HwRytBrUNs4a|ew}FQvZz*$RUj@tqs#|g z&JLK#hg^jcoz15_>R2+JhG+ii2E@b>2FF3WxOPgfNDMlZ+V!!WvkMy=n>4q}`S}Ia zax;b&c?db4VtbD|C7^ThlIiG>T659BX3_r(b`VyNE76lxxy&dC*u3KVO-B_E*%7Bh zihosAUEOx{E>V_R51n)KdrExdnq8(z*ET%F;QftvY#NC>@RD&!seJ&p za1y3<%(p8yV`XIrUk#NU{;dIHU-)iEzvl9*MjDyDNr;apkBx2j($m+Me6}ZW3Vvlf zrksp%e3h(C>M|5XW8q!Jhmli~Uw+Y%2_#4aw2z(<2Ms3twy?LfjOh*J3_BZd!?&Su zhyTsVBIZ^iwGl7Td-|R(tyKHTPZTW|hdlDAT@k9WrjV+o-l~@?+Hw!iNr0+nd2!0r z00Tyo#zSmTEmFD-n~NX<)3K)8+68eIoJy*At^-rb>`xH+7$tZ!tJ2Tw-JGe5(&bbFp%C1HPx_L;P;HpBTgQ$C#~{}}|K6aj zyC6(@zP#R6NdJ;lVZB1=wmG5DiAzyr*g(pmlVLS<7Cxw0@pQ=Rugk^3tF2#=<7Qsz z+1ZL!!XjXIDtvP78gRF+mXC*Dft&3oI8{6y`viG$C6)lb!PC$IIcK3OvOTcw0e@l) zi;6G+?qIm{)UA_ZPD{(VrSV%oghqf;>k@>Yn!v zQ(;7S7}^>MeH6k_m|@HL`9PAnPiM(wHNvfz98G5|d-jsS!=nLBKmaBKKtWnV>6~XE zytbL|;DGG(>y*sI_DF*0q|AYU8)P43y*^(lO#=06Z=7uB4+HfM|DG& z%ZIv(ztg%G&?5dH>C)r+g`S|s=+gP{9qgFVzS@usY|}H$)*+eBhN$^zd!CK=C48I7 zE>b@D#A_2$=MR28g2ZXcz9a#=jl|NwZ?Bz zCi}RIs<{`;AX+HCM-hg1ADi;(XiEtOY0ucRGF#9=C3;j(I+>2ma}zR3cI+pQG&D~g zpc%6z=WcApMy2^MT1UrZgQHxvRVVKJ{5;y*d=17z_qmO1+k*(u2B5&r^V6zuniK|o zV1IB?42ljJe`5oSSJTgm``3nio#lO93E5YQMR=`-M-;YN6BkEBK+1e>*Zq?Lusz&Q zMmlJY<{cd4`W*&SlHF@;?;U0%MFn^w7worC!8?g}*JQRJ9yDE#=)Dz4j(9ITxr*%V zfrLcR*bYo|Z^Z`>@~>^XOe6z<0%sd=(^GWhFXHdqw3ujFIFyD23;MIeZR@qzJh(qb za_rpc2{{0{y9Uc#paI3k_5rsQsC}IKl=?7qhtR)o^PS=LlaMq^Cl0QR8+Q4`aBrso zIxqxX3gy?jkUG?bI4A&^L0=ycF?D^zT>GaQFx#&IwBFz3u?L=#BJnvu=9; zu+be_KX!e4ll3k+?IKC%ijMi8(+ddQHvY|Yvh8>74S6B00muapNZWi0*Uf0j{kR8w zkMs?VzaJx|?jzxpgminX(uZ}+OG&8CROe=8AmoFe^)f)Ur~W(m8{H*I!&_7yUo|s? z1G(ZjMC^BM%IdRe61a`#0Jdpe+1rI9dNt{M@1gIIcI``Z-^wy$()k6Fwyw{mf5J-r zjm+O9Aj!IdH%=}e&a}wmIjdWxab(0IDP@TlNUZ)1t$RGHIKj&u=rF2Zyea}_0gT8R%U%9UI%>vMBWR-pTkIbEgR6qhcOq{6yRxVU2Own`MK`xfB#q0i4AX)i4UsEL0OoeVvJ zRcVp`GB+qw?jZ{aXV4 z9x+trDeW2p{b-}j#k{0tLA!q%QeQKOf24_p0|&@u-H?%Lyi5zm5)gU%may)XMRyrP zmod;7j+>MxHr>*Mw6)V^I(O!dMjCZ8N8U3j#zud@QH1z~C-x}NVKw~w?Ra9_`67UL zw0!36i>1CwAO?V6YW}zYPG~1T~RA9TY&yP^)HHl1NKQ7{|2L9K zWsblIAmCz!G)O)o8A1BG#<%QRse&$pcY0YlDo@Y08&)zJ+hZuc%4y))2h>DY`rJBI zPRU%u$ z`ue)j<$rchZUolsG&@`xEOIH~^qew{i_A7-K>6UhmX{>#GJB^tkq-{?R7kU60xBj~ z>(UFf_rK4d>fmtx<`0&4gRScRoXmbbq5(5BW4pVSF=XmH9HdBLeXlU8MLgvtiEOfL zRGn_vK`G)OtHpKXdXtZQlIg=J5b$LFdES`d=Z6Ud{vUUVqQt|5pBy{Y%(8XXR{gX= z$p=WhY#f-m1ot*BUhZ6#ep;v>L!Liw)L%T<-MLF7@^sKbwpF3k3paeC-V)2FCF
    yyt3wPepjnD~Z~5)bQ8HJT-o+6%COIXgzOivgesWZ7 zdobZp-bgnf0I?+jeTwGL>tM47#*CZ>?4j_&u^^2D=?VH$ zzHuG1>%s{Fh2T~XhN38nQPzI6P>4fVXK?MO^hBe3xsXqmZgh5=BCc%U)$_}dpXP$p z9R{nd2CG81ir6AieFCU(lWIFM{2;Jxq#2{IA#$dbPFdLU42isf zwrR5vRE4U$)j~IPHpE4)wre2@6$dPukTJ&xFT1zgZ#l8Y4S;5KZLHC#5)oCD{INLR zXRrT7dvia#s+tNq?8WhYxukoizkRI{*qZ1)7hm0+1sH zGHHbO5D=8(agGr@qUPZ6Nx^P)k23w2kEtm=7Z=y!(vrcd>!Yr1%?01zCn*0N{Sis; zML}k*1@KEl2IdH(W)2Mu42p~^Nrg*Ty|^WgbkC2Y0?Fd)>Vq1buH*ByQT1@+Kn@^67EpRcH^^i#t@$fA4 z32BjOkuW&LKy3r&do(_pDTfPWWFQsBb!!Bez`LqS7>Vu2_E{cG0zhjHX*mf4ah+Sm z*WR{_p!R9S0NG!Fm=^HOqG?#-p000GR<6$zBQhiry&sB2o@sJ?9ulSV@x_vQM~7rj ze?Naw zRQ?=m9wE8w>G>M2O1M2rN;G9M+D7KBt*wgy8i#U+B{wjvc4qCBE)+Vj)H(MN-+%PB z85koJF5D&QA>Ie*KnPqsyxVT)_5bk0+gJjoljcmJ@PK1G4%0HYN1o(nuFsN1 zLmxDqh}e6Ke!%C+U#~YbHd1eU#CX%8-!c?opZDBo-jt*%#lR^{Aa&hR>v7L#3+Q5AlKyRHrD0Wxe)HG{xx=IDT&w~s~fMh@PREL%bVHZz}T8+(D( z4FpZ60e+|XU2eRj`NwCd_HdV{Ml9l*Y`YUPTnN zYK+VU^xiIyeqUI_=YTu0w$t{pz6$quN?@#dfU12s1rviBS7*|^1`_=;J%}u&_B=N7 zH`Oi&Q=pHm72V22_ z1Bc-u@`HM#`uoac#*du&#fUx{H>sv!zyCg2^mk-VZ5CR152z0ThGO>&6`sB8Z*$2L ze5w>Spd4V&dh3uqA;7ZOSN-;Su3lw%wUb&PlX1$l_`+N6%h8J2`#w1#IDcq%^ox$= z88ZI=&qO)5;!y~MEl{LWSR$(8+wT~w>O>j}Y6!b16)1c3LCA-_WOmp>UPm_amf=dg zF4A~%$R^iCSRZ9IMz8BgqKAwRprVQ>`0w*inn$rCOi0blsO5GMg6ekyl=Edb>B7^b z*)JMM(vxi26Q<@|G?n^M_Ys{e0=Xf&84*@*tIkVdtL^9;bCK{p6?4k{-bDAeZRH)+ z{}$9tc%5PJIpZ`ZKUL@Y@hwF54(_863W2e6E5{+pwg?;J@O8Z%^+jc_03&4Ei=Sy2 z<;2NKKF{fMl`2D&mA%ye`!cyMTL=Vvi3U#Q#yCGGm)cB^_aBDOBP=rv*1aG zI{R&|8#=~J*{o+jsnP$=45kK(I$4S)r$@9c`sXubOk{L-8>*Jj1_l_uS6bv<6b>PR z8hua$X`h1_pLpj?qeFvPOy=5{MNc_`LL0W}Ell{QUJ6sJMJ{G@;2t*rbYLhCUz>HGca!tZ^6 z+fLMaNmzHuSQX}lb0#<#Xm{Wb;Ng`Eu}w(eG`4Yv`CgqqJ!96&F`sD$;<^Y--QNcM z0Bh{8M`oX$rmy0NYFVEL-1z&q`Du$Pj%Z2D z6KuGp@l78Wv#*Udkcs5?4|Wc}ZTf3^osjl>ikLh#qDbvu{T`vMW~Ks|mFp`i_{d>l z6G1zCKe4Z)w9XAChbA4J-7FaH3K)=tz5HjpxjNVA)#gMDfYoGFOi@?5%A45_x`(Xx|XC*&#kX+{nReHsKM!a{RAx z8v(ADBol3~T|S+b^cqvs$csEvqowls(U)1HQX4*(5dFoF5ue1CFOFG5)-ZnmakbUJ z!*8lL5G0)=Pv)v6kVNmPdm_Oz0p#u$alOo(yytw&G*^2~n8&hd3=_!k%;IM;&cheH z_BYj9d@kyN!GGIl{@7#E86*>s(@}$bN~wOpen(qdJYDKFJ0D+UCzC)I`&C*Ck+hGb znFIHd_#cCQkVRZPvH9W@Jslk$?zO7JQ%qSL1=;QblY8Zpy$9_+rQr?`AL6WA{eTZ# zq+0JpCyUtb%xeg&VSXV6cIP*k=RiDo5_14STejCWzv=gSV+4W~+Y-kKO5|yF)!)`2F!ru3Oy%mF=tM!om;;o~U@;C*Gw) z9o5eswvZs*bob#gU8xhqRpBJ4v2oL1HGSDH25bAKSkboXb+aCd@Ggyaoy}EiG>9yE z)`Ir%mz^3Se46T)uH#d{1tnlb$9jTim~P!aHt(IeGca!m!K_P-ro06;B!8)q&QXB6 zMklVA!>wiN5xt|Mr$vmihgM}!%`k8Mo%Kfhowg775348`LjqYSP z*X?y%Fz~3FHNf{_qQzOBbakQ<3oYt#ON|6?4{vB*&8OeGPy3`ovQoy3&jrO>xSv6Q zv{xa$iN1{y;Z3H~J1?ev&Sq*Fkt%Lme*@!V>7GCBZaUd>nP7l5jUCl}7iN?md!-T$ zOUsWYF82mHf6qV%dYyneq~Zn){R((bUuMd4W%bvN6XOTD{7ye^BZ_{>(6UzxSKxc+ zu`gl?#fHq{;@30fU0M%EAXos|eW_-uCs=RMIwlF036C>##RlY$=-0#?Znj~X8)p;w!HbB1dkeHN$LHY!;Ir}9bEDLB z=J0xk=QS(5^ zCNJ6uyV(&Bi_(if4*d!M-e_|@q5jB^qiA9nU^$6H&Z^gI?&pm(lNFpj8XjFIal^f= zg}QOn%I*)EY7iaxO}9#lW8W0D5V3_Ys_(rO)uy3{x)l+~tG&TFw;M!YKU8(C=Sgq0 z`K&mhL6ORlbCd)cCP<3%p^r<>=c}&5KK&nIuo}T2&v;el=*qiK!jW{h#{I;WZGD0w>bd`+5%pB*}T6J}^zjHG;g?|ScFhKEK;(vWA6Z&-7l zJA64^cf>iyI=u}EjEIWWW9C~V#55=GME72_Yek23^HvEJX>xRmq?Fdg=<5xN`IH5O z2God0aQ;@SMWUDx&M`@i|JI`(Pk6ZfuGUPKq9`Bl8`nr0{&Fw|JmFvvUeV;(lAU%o z31%RnU%Q-YQRucuZsZVvVh{dz<%cn{W38$U!aZq(6wb z6OwgB!cDyMuMw}|WSJ0@8gbIwn*jqWOU>`FTz_w!Z!9IzNW1a2>W28X98d!N)Mhy% z`LA2|)>Utx>xA?n?Q^p*WLyIDje{H)2|MTI8AF|j_310AX-^(Fk3i5uvfzSdlQ1&u zaWeCDoO|pwRF9;?nmk8!nbKzCAIL%@fqOU zQ#%-|d_cqhdeY1NVZ49i4bB1!V||r-^nKs-wLEbA)i)@XdStefZRn^W&d_l z6v#2=zbc1^-AI>1c921SVGtHJA=usWEx3Q4VG^sc$+C|p4>$Z!(^z#pr;I$&Uv!DY z&O7DQyL(3s%CWYN{Tt*HJJ)Ch9^es*N=_iJRbS$8OLab$WeV<5AkvM-rq^O3BKpAB@vJ^n(;nzvcc^farhZN2VG# zTiwpGz{y?wEj|v38rDFP>jjQWXx~ycc#eVhCFu=ON4INc0I#>7B2567VYU?-*>qRl#VUsK>T$qC#9 z(FgV6-Bx1x^Bq}{{DF@U-VND20!_(}C~Tt`WPc9jbng#u?YMvWUT}|Xim`4G(*dGc3Owas+a^B9Bj!7(@%rXXdM7a9+Hq0-gWo{g!`huk9zZ*?@;< z0rT5L>_{n}WjR_|A7O88)$qcP5rPgB?f29OKOLPvZMq*PYpP+z_Soo^Q$l7@!&h8J zlk==>-Re|7BR|!Q6hZeWPH=&yD^cxT(sXk-BDPfNWlpN8(mQ-Mci z^dP-zf4pk>J(~FeE-lw&qh0yDP0i<)!is1%jU?mEvkN&^KV3m37~pOny@c?dg#%BI z-69ScCWP8to}YlXeBffd4Y0z0tcyABoMAcr`K*<7-yhbEO)E%ge(JowvNp zQd|rzEv#lIeaSn!yjvjuGAW@f@92&aMG{(?Ub3vjyqLeqck6*lh&e$~2x(y@`Oyb% ztJhNA9X^88i(?*ptK>ZezeG0)rBfxI&%@mk8XhT&B}b}48bl{Zjk`OXmb5xl8f;da zWk0b+`1ztH5?DpEucUxmz;>lOilK8UtR1E#a6PGV4b>1+a3@MMQpvq6cm9(Ioduxo zWnO`z_LTy8J}17ARj8(=p^WDckgF2W-U^--_P=Uw5H_X@MahQUnLRp7Xw!Om{=XB~b(`&DUeW$&1M~wjltG7DP zI0Ka0_^YSbO|asKqY=MMzx?CmX6C!~I+}Kd7j+#*p|?zPeRg&f&=Oksf5FcIa#?xr zfH48@32su~lL!mwVGu5hs!f4rpayA6xnIop?uLO^`}t*XZdY3%UZwPA1icW1V7&f~ ztmf3Ac!XmA`X$-d(bb1b(@>9MDY0I>mj(Iim~)C#V_!pMF2V3IbLkLWQDNylSO5aCDsXZldW(eEbE^mKE4GBeA)=f%!E(#_mbExvhLG>n#zy;IW6*}MxMYutx03aa0sJCDu z3w3vNhM7M{rYC8t*3Q2+_Z!#!gGSz>#^6xjVS{5A+JVrO%^i}J+Q zZKXN8Pz)5gnWLlR(})WKDfhJ>d=fK1D$cg;{`}&@^*0)gP`#0olIDK6U<9`4cdA}? zYv;0iJRRkP-`>?lKzA}1yiKC03xg2QLI&Kaa2(f;jz!~LJgMeA(5xCmjD;c&G+C;^ z@F!8MaD?9$pkI^JtBsCtv7Dpzl2W5sE;DDkLfISpItXboszQGZ>0Z1#Zv3 zk~&#gK*8CS>hYk=#CwUJ_!;dYuh%`S)4yEikR5CqyB5RFeWAd%~@CEq$KJC|4 z$R~DVY38LpR}!0BC5P%ihphE_v+@AZ#a`$s?bw3 zxgry6%{~x2geCYHrAu}0JRthyI;ebQ&dN8AU*CB6XV{YTqs`!SC3WuPz#)9c=Dp;d z^MoB^t`zX8$yd8P8Ss5E^?JP0euR-h2;+ZT02MQ@EwOM4F0dY}YHD<0Hh*(K0b~}N zzxA#i9bNHH&DDFb+Lr%;$+Q<(86KX)Bfi@wXXPfoFh$So*kfvYq=XX~;`x~~A|htW zSYL*ltNr!IUb;5htD}K!DV)3}pYx5>j5{?sTaU7C^v;+cpF`O;m6B%5*UK^&dB#WRH@`H#$kX+XZzBv`WGuUG8 zE2<@8Tu#U!qXbPE(1rz*IfU0<*>SvQ8V~=s{;M>+eQxoBXxi>~%^I(#GYOb)b+9{) z7Q52`0f-s;Kx(q1WBG1Keg778K!;e(3N{9tl}B~Y7p9cAUkrRb3benQ%|GM7uV7V((nDw zF;9GB&qw;FMsJOadxzYT}0LY#nct&Mld0LF0@)` zYaW1W(dA=>^S0a-@azjC7Ppm^z)R}ujJGSO02W4HN@zyQ%e9u?n*8Cqbi7N&Ep$mU zboA?GoxI~OO$d8GfxKY!iv7`7Et(UC$LEq;~LL#NpERJ_=$_ zv+wsk*!=nZBQ&-hlGdeLRG66q1qQf50-9iiKH((3fD{9c8~VHl8Y(*J-nPOZ@vkpf zY|4cFDt%Z9ez=U(msiA7FMcd=lpe<}3&c9uHd))eBDqC*+BrWqZ;Z^ZGDK|auhA)! z7pakFh|CGuv$BwPTu;jN`S6=u7)#{1XfU7WSXDHj@a0;`P8!%d;*|fjy(B&ZUp^KV zm57_t(!q?fp0p%fp{ybzaXoSD$nu7lCk{QNVPRo_1zLlaf%oA)zqn=f@1$6-e+|LD z?-{}y2er1%WN*J(CB6wpui8uH#dGTZoeV3EVm7QSmwIB|@rs$y*&5!cIJ5 z3}?T~{Syompx~Aj@z?a2@rn5KDXxq=HKk%%!rc&qxht`l<0fT!wJR+(a#9+}29#Zh)p$a1|7juYwvM z#A#OZ9ieF6WKGqR7)&&+aSC;eZB*Bs(jH7y4Pj6zqR-S7t!`neIXnbii8!;gbSe-J z(qE!T+NzERu**HoG?2dZ)4XBQ66b75riw@ZWVv?gO)4xf`pX@jE1k@nR*E*xgM?86 zGE}eoT%KS1JMmA&ZS%KUl6Ly{{(k9aXHOOtc?_FPcn>I0o=f+iuesp0*xI? z(@O!@MasHU2k!Q%lGF&BwTRTf{@W$w1SWUyY znN=q7dsvD%vD#fZKC?6oflFG1NL?zY!1BpN@Z>;MjD#DI=! z$O;pxJ{1Jz=y^x(xT+mQZ*>iTvZfzEawH|iP@|5sc*%@~+KCV3;uM+Y34irGTu!!Q zXU^;Dflf{CnG|G-@=E(mT29En+9B>Q|ANbW^~uQa>QFT{Opg|e3NoxfMX@=Zj;u>- zX^~~@yg{nh#t$@Q41t{NkKAS%G)xH^Y~f3qYMsovQaR)@>;@0ww|hN<0*JVS_0(^b zDmaBUR4TWzU{)Ld{eDsPHzV+XeX!T>5tVfS&$PT0t%yS)~ z3#zk+($6{g#1g)q9q#^i#2Zxf)T<9#{65E$_8sky^{j4M#lZphjEEi@|C0beX8xA* z7p$??_o8C&f0g>XZ+&*8LrR2mFWkjIzDO z<<07XnA!;yC^s2kj>5ec8F*_O?DN^3eKuxV>+8|LSXH4naG=VN|iNc~V=mC0*U2#FA51V}!;0lL+m@uZ-Ux3<*K)`@bICrS| zjo0tm#+VE2!=ZXTYg1QOV9MrnGLbSReS?AUZN$#oj9m&!EM8ZXZkk}s;iIt2?BUda zrH`R)8m}))2xI00Ia>TB_>XM=RS0%|RPVTVYxBDFLvpjANvgaj1Mzyb;RWq@;iOHj z(QPLSC*iaIkEZhur@HUsc*!^!nGv!hD}*8$Wv{aLmJta_M#({BuaHglDqFT=WF@;o zvJ*0oy^iyI&huQ)<-hy7?{jm``F+2i@qWMF6;UfkzY-ZFh5u6W8hreg-=At!@7B2) zk=d$a*aP7Tbujd=|C{pU@$>Z>!o`ypV>MPl5I=k^{c`_bFNm#a=0q^ho6QBurFMnD zy2S|#e%a_?C^Ce-dME<@fCQFwK+XAD;O_Nckp6~3g5&7f-&6-sf*zjV7K7&ia~vM# zsl+`7I{}T>=Y&niU7A-FWTkj=Rgwiis)s4|+p6r}7;q0#G+R$}d#iFSs)s~z&pJ64 zU4GwcPui%EA}H@TaJi>}LiOMKkaiLg`@=uIIxuf@9eW^FBQ|dS-FDqog3M7!=ZArD z;Ti|(70fO1+Rov%0f*YgC*!!=*|XAi#||HW019v(nl-q$*;S2#M3&c}^rM9hjnY$h zEYm|JT^j_%X{y5v4>u3bVPR$(Xrtwv4=VqoeVI`vM{`--i@wNem^(G)?buR|LD_Vm zEW@5)i3Z46eiJam3j@$cQ59fiyyltF(yxyxhzW~4jig4N=oSpEXMOnKXCdON5Cfoj;G>An37l0M<E{wnf#>mFzvSEfqD?%cVKti^Oz zDkrcnfB7O0EYP>LJ!-%c0z9ZdRN}iQr4Db-!9!zHC)*Xp)cusnCC4K@eC1~iA0*Fp zi@D|6QVSZC@`D_Iv75!$W;iV_3|Fih%7efny6hv?d+$}-JQNH|AIPXKs zbjd11>-_~D2Eq8?HzDnPRA_B))1<4Lv46~0FWlG2)FH2?+R7G}d7naC;F6qjYt=NR zcr9RbLGQDO+OK@kC*f!By*mA0s4?pr$!MEo!X5&ev`9XfDn@>aUf-u zpoxJE*dH4?*YcNN#9%{Ys_h&gaXPoKMRgb$fNURiam_NL#-N3bI zPKK!S4^&~KQlwo|e1-^dmeE?hZ@Bs0u!=uy&fxU{6OLA=)$!H`Wk-fuKO&3SU6~}J zz`-GDXlTk|?+WZ8#kj5qhd`uhf9ekqXsfD;@hVS2aCYE;AvnQ5bhs7HQ^vkWwnW;p zwa!!J7a8H&y)LqY9~^zv8ai&%XPpTjEl^RJK!UxN-ss-uTw8_2o`+#X`&@_8c)4%$ z;z(lJy9j*WQ?!!w$=dV2IW{$4-NbR^_alSR@h=n67Q4qhAO*=NY*>EYX>Vh*wOU3` zZ2RGZ(P(}IGX{a&m`vm1k=fX$D{pwW$JfO^RA}0j{q#ym zIPjW=%-%If0^m2TivcQ397U$(1QtSg5U>^nXC(eZM+p%fZEN<5ikr$8;cu|QwqT%| z8>BKT)mvo;F&E^#a+NioNjwau4xYM*SFB&E?xszD+xeHSq?`_ujKb{TFW)D2CZZY7 zTj>&w4IUE&7Nt|M@$y_``SiK;m&^@w6%yL7R*%2;WEG^R;1dyVzIXGb_ww(&@c8O` zRsWXOe-kH-b2oPQ24{S=lZA1~4ZnX&LYVrKJK^ky0D#9i1aLXRvc`yOxM{qpx9xLT zf7TL%vS9+*yB@j5HN_w}?1itSN{B}JZx(p`hleN-+S)qeFL=jTg|9-4oo!zN&c1=e z!k){`OUU!7-?p(?yS)#az|seU8B)WtY(IUm9oV%~?1~qC4XW__IIZ+Rrr&bvv&MXxS--&&Ib$Hw$~_& z@vLd2okV^v&c`ZsZFT(Gbbtjnl&`1NnCk+?FDNg6HL&#Ta&9r*=Dd`ezWk1c=Obg; zJu1cp>ou9~k&R=<$HUp2u(RD6-OnsEs^YrLr)xQLb^-W-fL9lwz40;{>hKZt`zYw4 z(}@@PhR)ubY#sZMHX^w{>?8^rmis~RfzvTVNsFsnAR$LQA+--ea_q;EKqhdQ{Cs2G z*{S`u-weKm@}ctFnsI$8ezGqUXaS9j5*{f_p##jC8NYobl!EGc!mL^Kjn0bX^d2 zDhw?{o^79glc@crwtIeHGN&LVpvUxdgx&W4?Y(CA${tdJ12R&=PZHYc^a>8IqBEqw` zG8zo89}u{dk=hUC#N*$&H>q%J!`dm2)S$qQx3gh?4cz9vxO560EX=qxE1=FWMK82& zK&5>T5C%Ad7jA?FET~TQdRa0xrD5uTT*tkC%I7!mYAX{n77rC}R-`tTi%T&-QEsVI z&@&v`?NWI%`&Ik3?GMs68?)Hq-I#(7zTnjRj#TL46&a;C+LuZkiv}rPPqx!lUgZ_T zs`RL~D<@adA_P-iUD=@NEM${QqqN{szet<^AYIYF!0%IPt#*$7gOL)0ZaZrJ6FUOH z-N>;P>Nhjow&H+lcU=bKr2Q>;YyrTAhp?C|GOl5OX9ISzRJGL;8I(+*T^3-7!aL9IyhoY{&prbb9tQ7ijODXN@$$Vz|SL_sOAea2b8E_FAu3r%y` zL*zC*n24!XrCVQZyyLHm*JZ+*ACxSl0-&>ef5Vua6K8=O1S~Ev3Z!4341Juqti?w6 z-0Nw37r?{BeU>SpOPK#_;*2Z#a3Tdj==G8 zLZt#CAXxG(!fm2x^K7ittlDuZ1uQ8+CD>bV-q^#&zZV{|2y}R8xl~_I0AbGoD zgxrk#)AqObW<6eQo%p=X4{^H;f*t8H0oB7rI*aX|q?$v)&6&j$=QnV zy|p|KmdPvpf{^gplGO~$*22L{^l{VU6ubO>oq))4Or3C{r&aCy?DhE7Y>9w z({l6Kf?AjMV7wAbb@gH6HsQa>F}IgTtVAagz`Kz zloeGkl)vw|tz&AM7U8;E9CT-#;2r`$?fNiY=37A2E0;@I?pgDL($I{k36&98&xIJH9GB zJf^xOwC7$Otg#8AW~+C~_Gjt>6NG$G!E~||#6hsVLEQhJTj2<^6e#F`NthQT1uKGn zvF;~TIK8%rPLWBk&31JRw_?NJ)mQQS`$rXMX1Mh}<(5^K$*%RLGHtOPZ4Zfql}_ca z;M`mrn43P6NQbQR=Z9NX7%Gpq=mai`8>7PiA7ObGJ#>17nFJY|@?5_bghk$3DB zs~0&QL!{40ZZ-d__Trad(!LR6z@AIfLGpJnQwb(SDrFf_lV+A}Bzfn$Tt}1_W8|~W z3ACL}%m&%=VCA;fBXO>{SBU_<*w|pFcRkCG-IX!l%|zkH*|~F+|_y%e(0hEluTK z^Xd=*Ee!biw3Wf(9BlWrw~~t1nhRD(>e+MlH#Ye1)A0Y`g&Ew_%?>kAVb8`7B@V+6 zr6TC%zW?seV|`sWq1gMv2-RO^mR1wV>B*%B(HSc%8AdUS)2q?1%=goz*nt+Mh_XPBN49M@-smkX9aM zx?Seb(t%g+nwZnH^BdrOTj=^3R*?3Bk{$h4_ zrwCo7uLpR6HnWvh9J`e1G}(d?++s}|K+rebNxd_I$?{a`VY5c0Gj^=8bw;u@&N31Oiu=UaJRFrvtEVm_}ZNt)C} z^2_e7|HxbBiB^9 z+qZ*kmG!dj{C;FP8q>BF!!sjp(sXmi=qz~aFA~Y}zaQ+u{36rr8^vH}p(|Tf*YrFi z$OFIilj{AwFyJ#rJ|BctkNGS6JmB|+)r$Ff1vfLAn~Y6av|NVeQ7U|d7o@9?nr z6kTZ3)sOJ%cr`rg8R>N)HIY&<(BE-W`s{x{%w3EQ=PEl$sHg9FXA`LhT+`k5@S@C3 z>HYauXm-!~A`*K;zhuvhlnK%QUG($?Vz5ytaOaH|+c)nFI4il%_{pTq2 zoy=m>>1Dk*LlN85Gqf64%zx8r3~Ukps`&p)U@uF0Mu;^gM0heU@0Q!b z6X}nPnV`S%mfb@qH((Y^RQ)oyUZ6uajy=Su3OvOtE9_Nm;jT?8M+g7^tFRnR zq#daT7Dsd^F6H_r%UYM78;{-CN-`>>;UP1r7oy~4xI`5YU}l-?HhiPN35|H(E%*4! z;tlk?;8qgaCYr7*#Gl=BowF_3d+!6F!S;QReh4j{> z{g0?}gr8L7lgn^Cu(Pk-tif?Ij$wd8=$;XUZSH#38$wN|^Im#=I z^=^}>RZttZun=7iH|@K3RH2cfZL2%;d*Q~EK=q&MPzJ2K85*-~G2|!B9;12&mx}-+djH}fy5j2>x1+Wq*E330Y8qSY` z`MsCePy0F9IV$R^*WjNNsWveWenQK}P-Nbd^^q)&_mZ0aU@$i0>#i#r8v1KTw`d;y z@~rEK`b&3bY*Lb6{4Cl_cKrVHpT4^4@t>EB`E6`%lapu9!Z&A}t77B%=x(Qy)lNPg zp=_Z0>7uK9Ei_bR{G-09k6=%R|Y63+XEi|Mv(=Caxu2k z=u@OnNhq3o$fXB`-X4xwvXypig`RTdcn|vZc+YA2LY+-(e$gf}=_GEh4sF5+x6QV| zTaV_quw2uN1AiJfQ46P;OVGR&`*&~ed+8PLZcL>*Pc^u0&9cGj5^J%JVi#2~iSF(Q z+M#m}O)UPpI@JhbVi(vT-)laOJ_WLg|Iv+MJLg&s*%0g;Igdv_lfnHq)QxhTV<)ER8SG1!cy_$YgSi7DX}d7) zEWZD10Y<*4Ku{HpLItZHOLr8@5^QkV*WG4*oTN&63lorB&eaRo=VA(O^u~Gwr$1Zh z(#n!mpuJ(*10=H!ffc}~SlNF|ssKM?tj2r(xegAhn<<6Q0?x`6`2*lboP=@TK5 zLHN~x6Mk4!nQjpkY#i9ZBY8g80`(9kVdHhr;!w^UO;9Um7W6$SLr)#d;EW1^*?#fy zyn64Tv|eRkSHznLrxO@yQh3hJo^~c>oac*CN`l`IeG!cil}ZDyb1011=G_&SqoVGe zK=M|u{b0u0c>Nn+%t;nPL4)SK_9DxRv$psm9Tipe?_Fy<6`q^S3}U``1*5&;Pk(Kz z7Cby#QW1F}_)ZG7kp-+~8XnU?g>bs8Vn;;P`&m`kL1s8n>4qe>xu28TeoE>YV)-oR zT!ZK4fDHCX9EJ^hj92d4-O$mp_2V3#eMXX`z^O_DGMXjyy}5S7z<@B1;dIewRB;9B zij=D5%Om%i*h$|fl`c>%qYxP{+6}9Z#O84LAc)0P0I%~m_$qA{V*-vyggmDuRTBk1 zh3?{+mj#1*c5AxuK>gw$&k?N8(YCP5hUC|4iEYJ(ntAejM~7Z#KCpbr`b2)A#VNT` zJyjxsic^g>Frc{DwE1O_60>*w6N9pM*FBjbm{9GAcu|XnAXI+u*s#v)CdltQm(Xmn zx*s)T!PjrC6)$9W$Dehpee*-Yy1Z%rzr`Ta4)#e=BM>>*&hOs^eq z5p2`lt1zs#pX&Jl9V|{otb8#xmhz)<%?S1eBTw!9y`$w5MT`B7Y3kcp@0~xkpeZ!G zL7U-xEGm#VxqHTV+b0bKb<~vf=Zmj#-bw&T>+_AZ`bKl4_x2w<;K@Q1pEEUN4XEiz9GYKqqjz%?OI)^z!V*efsj0hV&alj&vJdq2xYP#-Xh=m1`;k`)81MaamwVQ1?o~)vLPVeyC~7cAa0DGQF)&cT7IRKo?ji0ua|74maAO_Y@mJJ1KpWuK^Sg^eR~84)1zn%b=hF} z4@XWly-ml{AI19MZ@E-9Q<9Ko`o5b%QZW@GMKGpMd{0-DVCcX2HK98j!OqSudf|&- zbXXL-ghUDiB4}s%c(ZeGIEyHQjAx}TM)&9j(DJ#x&VR^#K_0IlW_mZ<;y{ni!CJw1}NXx#I)Rg zQ&wY=uRPIbm)Q!n^LVSI)ABCb5qo{uy1*t``wOdIq#cx8C7lbbF9f~D;>TMdr*v;)UIxLP`Wa zoJk8y7RWi47ym$7QF4HCjV0nX5imlj6Xa`Z(a^BfWn@If8(Z+{7KRKiDeRF96SQv` zl;$SOo;93&LoyIW63I1`AzrqY=oYC0#WW;5ayU?DlfuO&lm~KNY*iEI*6OPu^@9WO z-UXnTBlY$a;wefrf{P_-B(~c}5Cg7RW^M-2ICVZN+h#Qub;__F zE&aBrmN@bG5#!PJ-#m}?U|NYI9ZJwvTcHVC&1{A3~%+-W`61?Q|G z?e0;DW)|M+7*bVTU33hDO#dcbcv92W=vZq`Y7JG|@Cg1Z!~>8_1vjyPL4Jk~iI#_}`05QKTJ=BHx^Uv?NOlcYD1#{4w)(Y?LEcpzB4!AzUvw zMP-vTOIQW#3Tg>ogGz;DqKFlzshC|X8JL17#AXFD4)p3lVBP8(j4YsJP&?i*fsm?04~w%X=c^RFI* z=e1r_>a*QT1p%CtX)_<@884#I>Y37H!J}=}MWaB@6bDBF{`V4-L_UMkD^U4uVCYA$ zyM-awGNY@!(CqlG%R_ANd(Z_8pPep0imq_)z3wN3xhu6P8jL}Dj%&g>(;6)UdNeX@ zC_4=dfcxP+_A2U7i|}<34P#H*Z6h-Lqz|^G9@w{2$du&YWaQSBSA_*8&JL=@-y|~2ghS7ETHi<92jp_i z(#77jXuBPZ=S=YW{!~NrLoiOc{ylhZ$ghuaove96hdDkFdriw6-7Rwo_3zcaNCWGJ zUHWL+W>qbF&5O*xJ)_7U{)H$W?`#!n5c>vYb06xmY-)JH(A*;b`I5oWpEyoC46jue zkv8RS&h-xnpOSqnVUu$jp~1_j$(cq^K@j)g0-t^z>H!b>^%Y51*0ScGL69SoRDJ%e z7z^6PT-eU6{jML)RA5^fso?`v-UqeBu_R&Ju=jS0d$=TYxPy|#Tm&%3dvbbu=a_xA z?vIR!=Ir&jC_VmBdHJq~6HyV*XHPKap%=Q*j!Oo~hiCFW(h1;1fq2awA;nsFW_PAv zZ9dyrGy3gJ2S7V3sJOp$b-Epm*iuo)kwzh^>L$S~tK_6En#pe_3sD_tFUdkG)kNQz zD*HiJPJoM5JX_$RqMC=}96q~gAE|9DoLXu)+7MiW{%VPWIS7E^QQ(%e!op#YQeW<{ z!^QM&aUff3q3f*2cWVhgy*O_h@mf(FAq=DcfuMo~vatp8B*dC@DXtyPmEaYj*-E#-Dn?`cf{V$=l&~ z9ox>5VBK{T>J^b-q%Ve|iL{MGWSUi*#O|5V&IK~}jq_|BnmOI$w*D1-XN}qyX>)08`c6BpGCWb;Z}3VX{)r;=?Q}mLMyq&UMaO`9!nW zhQ|@0U(=`F6BVzCK&{ojuot)|!^M@dy~wAXqxj;#NJOa2?z}jzo~z{g#++4-maG3` zlbw^j7r6uvu4#+UM6QWuwv7t(%o7mM4X~N~qi~2`h5{Ot%Wz8Ee|`3%sp1>&lEy;WJpC zYK+d%_Mdx&+^Dw?jne@{8S@yip1pS-wi zU`UPw&-u;g#}OZP+WnMNJurvVqxlcncqLNEWlo3UdlKUIn=|;qUF9Dy1fFg{wlha3 zE@firv=eA}bwlXPz5H=@JN6)!SwJ{~0zw`}h!k79qZ-e5>7>t2-lb#)vGt%*v@>Cr^>GY&`)ZFBZqj`RHc%>mQc;Il#n zOWu(s4ZM1{#q9fUleO#$bu3 z-R0^V_g(C!L7!>H9j0*diX`pMi?5qQv=R&b>H9Jwqn*&(*Kj9Ou+l(U={o8z_Zf`@ z_@o~c!mbu%h@@8w*JHzmKoAqnGOAMu$|v|>@R5HW#=aXZ(8wt^w$b9o^O<$4KYBJ6 zo!{r}JP>H9@>*$8$XK~OIqaj-XXQu2pC73~iOQ=vQM?y?k_=>nCD|03} z4S|9xsA8ET{v2^c0}MKX7|bb@Z5Qp3Kwh+D|`%N9{yzTzQDM`VsA#+9|w~9qOfb+ZhX6PuEYH%=0xe z2Sf%XpbI4*{_qK8=A!h%AOktQ3j{D!iHr1 z){$=R_uIv=Z{BXbKn_LP%Fj=Xpy09W9!QsjOY3cxGOiFY|2O0hovX0V7v<)2N(_R6 zgqIim{1fBj0p{=oWI(t7+$RC8PP@pbjA5C=Ry=xfS5~ve7o8jAi{>mzZ5+|xmv$Z2 zw=|U1XTt|&#LS*vW=oQIEg?rpPE4Fq^sW{R?qe34{vFP82~>+@ZLv8@-T(kbdsXh9 zCEhT34_KdfGDve>y?;f|=>$zbKoJwqZ60xn$%36%_$v$Z3t@O(UdJ*^`y|1gHRoY3 z>^X2%HJ-O9mfj#2e zlP-TZzav+4^T;#1m4f$~469u!;h7@kOcU8-(Rr83)@wxg{BPB7qo%F)aDKzO5UIuk z3hi~3vC=6@Sz*sV6||`PHffLt7d5`>UXa8^@g9F=;3GtX^=*Pxl7|nr#lFS}2QEE=T*Fvp>+DBf;7@REtXlqA!w9;q=80R5;wTPw_Cmm9fqmVy z$tyNBS!}OacdrPi?4|gIJ$wX(cMljZsp{P4@C0fnO@C%{lX{&zl{8{D_g%ET{q%Qk z*|X61-R6z@5AcONH%uYgrYBL52H4mt@0%CyMJDPUxIN?Ya7+V|EPLT&0vw4wc*NTJHk=uU3}#>E8BINs9caz5fnfP&s(`r{p!mWk-sCkHvj(qH3ms7>B}>ah1s z#E%u`KX7P{e13WtT(xR&kDU*lnzJ05ZFArJnGR!zL%JlP&~XHtVWS6`d08|rNDKx8 zD?9czhB*4LvjHv0!o*gu2LB|S@i-$>m+}mv!~eZU8y-}+{FoVe`V0uH5A*vvuZPp6 z(tLD@q_ZOIfoS83$@SfT6MSB6!uu+IgR76$8(xe*z5Q~3E!0ta^WqAM;%|gt$1%w% z+*WWez=&ZOK%=@p>>IXzIeEa4_)s?i5`g~2mLi{=k$MI4XvRHiGB=^)jwj4jP1@v+ zu_#7_GZ&49A~v(J=PGUvLy)S`IS_$9p8U)N;KOf8(mFfL66t9&NVq=LULbdcEO!AB zp%h&m&uye_|J4Y)QXsFOc+>f}VX2v#m=eM1I??CZu1E$n5F9FOSM`B_DYv?mF&cWW zva&M7Rq^|H#*SWafgUKS!ji*z&4xzm6 ztrd&F2eUWsIklYxZuI2mNGQV5Jsi=2g{e=P_m6#`p-!vp?fq!p{Qk$2pGh;QK!#gS z92dqbgQi~X50#p|%EfZ3J~}RAO1qogy39w_q`Y+1U1Y; zYug2<>84~{W1n`XDtY(p%j7Hs#R_rPGBPpOx?oYkVg&cjBKHCM-wl-uE=~aa5m~!$ z$SbWERiUi&Hoyq#G>D>@?zFeb5rO%b#5S(4(qr8bW*nt=sPj}IWBY~D0x*on>mL!} zsO^vw3ILX2O$}CVqe}c>a;AAuT7qmT0Z>_J$kat|3}u0bWS>c5=%cqz6!YD#TovjE z7z6$=2Y()63Ldig@XLIJc)plD1Gpf+l%^YF_ctcwpf|5+3`Cy(%FN@b8Tm#*BaH;7 z5{=vplj{4s;g{AY7d+|%C}q#z4o-iuI)?7(io{WKpx>vdsHn(4av3YV(Q6xbbkv<` zB!q0ino{PAnp5~dMk5IO-a>9R?SO@Hfq}(b`w@xz>a^KG)69?Ki|A&|^`FQ+hFeLJ zM;CN?R)!Z|$bHzEJ~`WX-9-C?x=nV|(jiE+oSyKEH+5fuTh8S6;m|iSD`hdUyf;VK zl4Vkkw}>bq>2}vtglij-PKhjjne3kh+YsB2e6K=WbGjD4=7RYS!3Ifpf)+k#0vIXT zwg+DHn_Y}z(!v~lHEVRgU%^d!W$V%=q_F;afeY6PUio*O6LLmkj5qecW@~j&)_CNO zDF8=4ed^SudoPb~>3=lzpK`W8s=VA_^`p^~1zb(y?#q5M1-~>`AE#+LTnhbJXcTvy zL5vIZe=noIF93oRSB1{i2$Zn}<$90Wz3uzZ>R93SN9w4a^p@*|LQHcke#aBs~U*B0S!U2?o0dL!V5< zU>GUxb10N<@j5}U<@6l@QiNTmO@RdwIkqqd1`@C+8=svO=jP>Wm+0TX8~~|iTadL) zY{s{5B(G20b0dxCMPCeQ=O7GqV9pU58kO%IPJ4WM3lgm$jR7}wqrM)MDr^vWh)O|P zHCX<=O76!Cd8lCDjmMlER6Rzs(;cdH{AeT>INA&C!FVl_1urBdY{YFO`YNn(Lm`)ZsCs1-Kr`X?nclQz*b{*m zK^9i}JJ#1g`a2sO$w)8tvO0q+*!#1%@2NOgv}}g2XO=%b@t~0*+XA!-l)xXH# z+4T|tq7GAOT!J$0`Be$Mh&x{1<*DKYw8;_f49SsKKCGVG?3ejXjtXsq|eNKjHz!m{Wb z!!0M}(Sp=zJuJl0!i4={Acrg;SK>RuXsL&Ao!z44%T$HmK2}@Ue&A}@Wx=i`3*X-2 zgp!bm2%EL#VNsW#rkhA$zpjkA%({!0|N6w51>48R@Fsm;Hz?9hrsPm`72${jN;PHT zmXH z&WqR&$m;3AGBH0d>gWWyph&1wpl&;$RCTn!>e34eeO}(MM2nSgDL+aoCLn_+Pc0Dw znXSAejds_5P?!BHi2IrD)^@PDBXX6UlQxyWzx`-uNkvWVWbl*BVeJ?aMq5VpJ_NT@ z)9-JR+Dsj5yeVA=+2u)QE8|~{!1=r;0`r-@s%|xo>Dg<FVXZjQq!q5H=IY@cpNf8wg2@_@h0iPPAQ5GDM97@QU?{h-F>(zFy|1Ey+kk zp@UgK4dp!_M-}k;hQJRuw_ayVly-lI_N{m}LX zV*SLyxNmtDK)@Z=>R4r6V~qA{ju+5jV4KZ}wo7e_LiE3RL*doT{4nzo(Ec&pH=)uCo}|MdiQqz1`M--QuU^TN=!?hozi^G(WdhD~V3J!WL7yl;?ZcIa}5I z@_N-e$yF9)uE9PFg*NUzgZkOT1!b#UGVmG8^~NIQDF+V>FRkESHRLCD*?-lcOYs-& zRg?CJttb619jl76@&~iYeRrQC4knA2q2uFqR|i5#1f{CDv zeh!9%JB_}-JEPX`Qpmc3@67s<_44m}k>pZ!C<`7!p$_}mXql(&im)W+K5f0Ad<_OY z*5g}qyIzy?km3(N=}5ti%QMw+o9}O0jCg5whLen$!}76Q?2I7baBg=2v32ees;0)4!qfjDxQ_B@4@- zQk6`5Lh?#?A4QYsm^1w(-9!ymZ2p1dP0yLLiiK_nrwrf;W z3(tRJfug0IWv#INr(X`f6}SMDn>V#^8fb8%vYpy<`rb${ooax2_(0Qi6_HER5f|dh zO>2Bw1+_7VdGqAeD^=c*^e)uRV$V^E=91ujck@xVcBKuE&Iwf7MpbrJwUhKhvriwU zOM9OjjgGOj9Z^3XK(MKNP3SSfOlQOT9c)dB26s_m@x0)(-e)vuLlc?Q@12gp5HEUZ z5SGVKe7F6&^+X+W;8l6$t_LY>8w&PN#ybDnw0GAgNXo8ui^@i*N zB1cS}fjOilY+x-?q(Ae$9S4Z= zU$(4*T&Ux?tOTJZ!WYX^@b~?~^+M~bh9Ymzkl>Gj4khDDpgcs+KYXLalINaWnVX;Q z=v_ZijXc?V^w@%G`|Nqhm`;9E)5!*fxz`-QZvim{V!FRZsQtIBhc{*mbjfBfvz7mH z3Z#W>(?(FJaEWldX}}f&8iRE7=0%^NI4)#sWCSS)hu_HAiwGZBC|9xB{9V4C?|czx z++S*T-Qnx3;BJ%aAIVLUIT*~25BzKSb4zE3Z=jKT4!StGzo)@lX96(%$K8CPma$T{4JMFG#6d0d@1wsPC z7mz>-4-Oy;e0!T8&e2{HoF?(m%CSJ*g~R0(c!r& zxy6-0l?QOUVXf~CfTI0)QjfRCbUsHwD5{Sq5g-nUcV5h2gOE}{moYXyzt4Q?m#Lb< z$JAfsezs{!IWhY}CYh4uPjmM@h*TPP(uW~_!Nbfs92~IK{|(?7w5#+VbYwYxA<>y` zDS9pY3pNWFptb9>o&f@Co>?0rw(tLzAO+B#q5Zz*NNnn%)6g*RWPKlSw{4Ov!j_l4uD_X&$* zY}~1zCJO0(OWC@DUoX3@h?fq+6NNp82qH4(*WW4f5cy`nMG4d-{r-VrdI3GOus0Ta zHt)>FsXZ*2`D%Y-5k5}aZ_C4yd%L4e;QWz1oP7g|f*2%b)k2x4!el*yMJx&zFH~ap z0J4MeiNB@Xxc~!nOHQFM+vF3L@~d#2g~YrI{DOaPFWO@DN$5Myde|-W@$g)EpddB) zP!*mVf_sW~Z}oStBc8Cl(SH2_6aQV;$DBN@Q?>E)(>usH*$@`<4~8_5M-1PuzgfY| z|Gf#8hdaJwTjvBZv3z>{ii3-^3u1YbD-nu+&yZVfHeC_){U_91<;WRd8U=`9VU^h- zvRV(GcbW3-IqthTJ&tV4>IhB}bzsNL{2RpeEtL)=_JiqrBs&f}6qJ-%3r~Fm@K>T8 z=~tHVx0dQx$Lf}x$DxHmv!d~GaQ-=nAY_`)`)l2OzM{|`31BNL#lj${6527 z*%YZ>R+YwgT@aK(FFwm;ZT;xhOSK}(J29lj_>{R2?04BW* z>A-z;_#)m09?RL8Y*d}etQ>h;+P=k3k9p3$EIe_Jt4%UbJK zIXAF=*L|?4dm#(-ST=Tcl`r~eOA87_9?!DIjdDs$`?t94E*4}WtznZ5`j^Y$Ki)wi z=p#LTyx@+~GQqOJRk47YGlL>il}Gcv?zrf=NUa{$Z*;)%hJc-kDo=90_hqaA243VUl zCl>nGgVQ{b7o_<&j#(UMto1;1Cv(0hQwF)!{M#0LSz!+Be9EjA(>{o-ttFPFy`|z~ z`b*GG{Fu(+#n9u;VTjt2N9|Ad;=Olwl=hC7A)*D*3$lqVFG((~zVajLzgPJmVIhw? zXY)F9v{Lu=Yu6laIZBg*i!~q3*j(P8B)@6zpDJ}C2V-}72`BCn{ZfQ*{B+yNSc}Gk zEg??ux^=u=84&cp#~pBnIeFwcPQN$JU0ErKb6kTWG%O?p;d{1X4`+nh$FKx5nX{fl zAo#-K^cHB3rn^~k5V1RZJ1mAsL|Z!nNl)r^K@t0Y8;I{<H@ zv$`+4^dh?*lh^cUg?QyDSjHKDA{(Nc4KjNA(Vl_4A3`4zzt0f62d9$u^*#_#R_hOvOJ zeFi_dLO5-n!%n^_e2Z@~(I@2w1Zn=UJd2$6Z)^(|sqas_6Bn<>Bd??&;`7CohRwBp z3Q%MI{dPG}Hb}I*toa8-MhGlcuJ@UcSGrQ?(|`2%Hq->mcM{%S<&Q&)zS?&xEwC8l zdG+EZo2=Iq*)Pe7ehbUjgUM1RYI!0{y28jaA5h+_xVafgH2>y_SGidy{k;4va`*EH z>*IKxCNzdXwz*xm$AiRYJc?#ox)t6uqgr34&M;b$Ii^dvapZkg5GxrGk}*e65ChqS zhR-+xmOnS2{t{j94QsXXOxj|+4b^FR6%5;2GWQE)@oYg~miEx9 z1p~@F6G%&z>kl(*;B{g6Ea!rp3n1`gU34SX$yc|6TrcspOVClh4fp`sQ@BtAk=Qlx z7%Pw0x{%X`+bxTy$6n*&e74(j)49Q!@QFC-jD~*z0gq{2$eeKgP2%_5rSx`CA_bwA zrTl7duM-lI7T(n7d%G2WzSrK^JM{r7A*P$_}3c+?XESfzE(^`OMC{a!~dU>&9A(fr_d5o=jABKqW56qXHSj4dK_dPO5mMk1e| z&VVq2to@jUg+&8od=wIL4IjMF^VLp2h%?8!^KYb5wk~BN-;S*U#U6*@SUrU2BioQ4 znBjwmKIX9f+pGR`4;xM+puUQfiY_j8nrV@9k&)nf3w3jB^2&T|M=;jyRF;8B@$b=O z!wWv=T9V)n6>w7H*^Ed|?K-U8W<+gFjt0zggN;=nu%1aJsuT=d^A{U|6?E=gyS z=h{H$4Z(D8K*9L$HNR;!qGGrr#qn-TJYVAbtXQ`@J;F&Hd(`d}^x0x}Op!2L*8QkU zsaLEH@oX?gUb{r}uwcIg(I^fZJ*Q7NJ=i>E5|!!(`?)vY14@KqPhdgA$@ zwzmjhMtv*d#H8n30dH=!5t4e20~V><(w#jlaO4UdNm|8(d5y3 zvcywfSWYYgw8$C?Vt8Y}CJF`{>UA|L}Jx6qDNv#h-}Im790 z{0;5WfCH85qe*QPJfqKG@Jvj@AHpXR_wPsXoLeI$-r3;-Id1#%qxi>o!&5%6hUP)% zCVurH3~!6Q1HWx}f!?5@qj^to`+3#4>nWPpVWsiI4#oV3>0bf3l`Oh)f%)kc zw?UcdA}sK_t+jxBn)~^PTGf-YF5WE^oZ-Sw%jFbZ6cL3Y0iR3LFWwo|#?`sltDBEUU3^f zmMXyO-N>XDL1NNNk)-t}vN`oPznlEQCx?O9qm8tB=S8I=qNR^R zV4$BwKtju&7iK-Y+&Ee7Ah-JS6J=}Q!OWO$8fJZI(7?~U`Q`Zlv6E&MFskldb;fT( zD_W#T?p+Hak%DLUb=N-A-D>45g>v@^?f&I+|9%$r{f=J3&Hhc31&pOe-^r3o-cwcO zeM3mzZmmViyPA~X_<%8cA4drQhWgGnCbFI`5w}5=D`8Ro-9D5+pop(^bKsKzjy?H273-j)PcJ zfn*LP-$$)}{=VtdDn(SBl(O%@SfCwE{l{YyIN!L=^(z-A}b^7vO_i@k(o`h_xc^z{ri0XxF7v-dt6TEd0y}L z`#6r*>-oA#BOxR0zPG`1!)54>pPf7&KUYF*d4oq`VnArO;XY@eairRnXJNWllHRX!ok; z<-X6mp$Vu(eT$gl?wWy|~gEJC!Q zqL;M_ET6267WcrGv-11!M$9YyflBM6E#h|)uJ1s14ePc%Onu*mMOZ@z9MUv3vzoqt zSMF2R)`PaI^!X>`8JMyh$LrcPzGjOpbLn%!j)#qwiwQZ6yaA$s)Y%rJ_nucq^fK?` z3kbxX4I=z8J9MpPl4sg~ojaHau%}<#f9t=y$%>I(x(i*PD=sH{cIatu+;e7&D&sN> zI63KC=%Lkh>n#X)TsGk=w3bh?VF+=YxA7m5~+D%e- z;qtRm#a^(4<;=P%4SqJ@wHe4%k7ZEQ)T0LJX+B8lb4?r7bgfAdS%m3*9U!Dcr;96w z-OjUUd;PK_had2QAdv~}v)rdPGd>0M&@zk~nV4CAK)W&C78-edUQh(f?#Uma*1l4g+2Y9=*?cn6sdeMH|~P>itLLmyC>@NEPYf_=W}Tj>mgIR>+VT z&uV%=QC*IebH*jXo~)yGE)ABlNGi#eU;I{3uw5e@D+tcpWn za?2Y9N7{E6q{P`cn4Gacq1B+jJ>g)sW&MQzX}@WN%0a6OX2a-F2)(qy8ivMF_=Bje zi;J0>*ubfOgCou7M<9pz;;}&mLi2t&>H~s6{KayAHZ&B`u+B=yA??q_T6*YK6iA(f zsZ6sp1pB$P$6m?e-R4E8%}Du`iP3mTf-tLneg7_L;K0XR+)s@o@Y6Wpd;nK=0 zAEzF=sYP_~^6KS^mfdEG)rvJ#~^zI z5fDGDVqoj*WZb8dRUyj?xcR;1ETmCdFK*+sNe7i<2u&)*{q$t1Xu_uo!T;R9{Qe7N zy`iH*r-Fm94mw2%Qb+(vMJl9`Ger6Pq}Q&*v{ug4kp|Y<(Fj6f>-`tNBRyIBjK-+f z?XPK7`%C6>K;wkixoKY{?9Z5LnK5{E784(3R0f$Oo}KKLLJv38E#Qv2$NJ_{9{~zc z%Fc~N4`|c7pfAjU8%DZAo32t@5Pc@kv_Gy3d6fc0PyRCnGv!K1R|T$ci1gJ zo{G9SSy#B1A?~13=GPUu9_VpClNYf0T%6GhQczPwooRvZFuOnIb2o1R9l!}Q={l0S zgjZ??EFd>6;_{@Z_Liw$1y#4EUZU^uuGV`Qf8FMd=lmW=hG+n2pHrLl?=;jv>-$}X z{Wg`?lh1PB22DpK3sC$cY*($c^#s*Ofz~{|y+rQle$6EG{AnN=JPi-^P`Ss@$6A8$ z2F+2Rc*<<*!Qr1aCkM}=3y}V4lET@61C6;0V37q$QRJT>LMfd5`IBv!)S?Nm=&9&g z2>!*2QQ76kM;X`Xl^^kFfaEk*`_`j7f;L=qYH7I{f*4$3tz{N%;Kzj*D#>kq;s!f& zK><5zvfvgx8qC?r@fXT^d1~Y3Ov&m>iL?926lmASv-r)D{~nz;IUVr(yTXRoz z;pp7EWzqJ%*X>EYziw8c-?G$(=+;H4X>DsbFRB^z3*WPN=^A|Yl^9&>CsBefccmTA zWL2ZjJjduft1rOB+qLtaZ30Ch!RC)SWY1yWnB3UpbFK8gd#-F|@24WCW_-reTw>>v zT!J>Mj`U|Za(ROUbx&Hx3g~=@i62oC;m79tx(WV{5EErITwu+G9zFw1GGRER2z|pK zmBPu^d$=vs0u{vQv;?SNL9_Gxc;^Z5H1^xcn?Fm@F{>oZs!WjmC_%I^V5K^V-p+nY zi(8C=Svn@KZAk)}$(9Wd*NZbGN-E2|oIXaQU`YI6Z8%&O^ZNFDiJ8QezsjqKuj&jZ|r9y1&Pr z7sutt5A^ilsSJQSNDVS9jGCM>GCiNYYkGx^59!vRVw;DH3y7709>%DDT<(t#fW9H<2K`;)Vv5=cG{+pRtDIIrfw z(@) znDJ%=^L^CR+~7#dIr*eXFgA*sv-*2N1!EU0?m$?X!(X3!%l?f8n15 z#vX@hQTYIfFlC2J9U2+}-F+%dl3=&LuspZx)zME^KYfic6PPG zi>vxK-C;uhwhszXs9PWhV(Eah?dt%~8zBpBJ2c;L1|_CQx_VFXPaT`8!`W?bG%?H7 z)fKIfI|&hTlYU-nX2`8yq@Ny@{KGUkpN(o*9{?vnAAy5SOhiQbRbmGnaoH=S0}b?{ zW185(SYQlQhsgjN6vs`sS=fr)S-sA>n~D2zpm*{TS~jqzzq{x93ha?-cA0rYjCQ1} z0zajvWlsYz{b!p4_x_Ehi(h)B%=q7bZm_}FVp*jL%=?_2oLY|!4!CZnb8qo5a{(?= zI$~n<9X~$)m~}(x!wmIj71D6IMUKR`Z^M@Cy~SYxVmhA?E>056^(#)KXp7LPFP9**}l<**rZ7dH82 z>9T8~?!jq(0SXx!akuYG5l<(5D$)bmb8I`R3I4T@Hw1iaD-grU%@0GsS$b!AAeuuD zoxB>dyR1dp^K2~o_r}kG^1(x1J@qqKv0|wyDQWI|C-nCA1~{B2vLRl91Bd_li1Kv6 zrQc;{-0ptX@#U}ko6T&6`{x1uj)XD6M0KRA%39($IB!&Ha}2KLu`3d375yS6yUNb+ zOHtgaO`^{;W*b5tvN648& zBq|m8b<;(B&~ajB=hU+n+4g{!5b=66_S0t5BvMaLl|pOeY+E)+;qGwyLvL^Kri&9! zxW*gl(!FlmgK6?LQ?!gd621&FDM?#Dr&8TT_Pqs>TP^f7V$T!6NKKj|`bfI%x50qC z9^%v24dgDAF=6}_~pBfFGEc)XJVQvOblR3bhI!Z{b4>?0}+X#xYH`pC(i4<5gI}`9BY6iF6w$y zbJHTNeW^YCE-Pb+{6-RqnKYsEf&5}fY~v&}J?wuLb%CaY;g~P>TE%^q(%Ug#?Z6_+((er#p!TIuPy2Zpx)9h2E7bYt zYgi(^c6w`3fur=$`RZ}m3x<*5atUOD*j0xF`8ndkej|!f(l_L324r`3yQtH@V(b*I z1R1T6E`IQZPceu}zAcMor`OvTA}Tq#(`7P!{&@QWe>ub`%;Nhhbk^){jD7kSYVS2A z!L3a=_ZbM@uO2Nb}Io1i@&YDUk4sEHJ-ES)sa4Zl!b23L2F8K=B)i| z79H$a&bI2%gkm?YWLsTbb)2Yt3!pVB4#VW~b2hrg4`=2Bs`QLZDR~jTyRP(7UUiAA zSv{ws0Vsj4hXr9Du?fQ%kgB9DIj`B&+y%3{ zMyk*s=9|z&f+Cizx#FCLtF)su!Wez{bPnkt)c!~Bw(040?XqCwvK;+_&?^Z#_l94{ za@6OY?HE=D=^+d+A1%gO6kJ;hcCN)G7IP(E!qpE?5&MZf-?p2@%gj4OEW5bCIrP69 zzonA>Byc()Y&kSXCq+Zb^eaVp?WeSLhpjo{7#AtsyX*;qLg8AE-*l43^3n9Ew<~0^4)yTQ*E|2qx#nyeB)j zn*om*M!D?qvFEf{E>-&;*pDEl5B_`9E7-JM={pQba!nZ93*n-Y z8o`QnQw$@P6pz`{sh;TL;}T&J@aj!Z5a+4n&YmGBI^#qhtJC!)8blW3SOupEkpJ)P zc|-n_%0Wj=(~OfdWJ`Nof;5R-pRvnS&y{5O4F{*f1_6@}cgV?_a`C<*zx}Y{e@~DJMdDtzQ%GQC7#8dj2HPj)H;fsgkT{ z$0XwK8yUv@(6?1Fi`rWZYF7_2P+BNuTGSdHeX~=APLUzM=_A(0zm9oI?Yg02bOG51 zyyVUm<8Qe236gurgz$PZ#vDb$+Hc?R=lLyA@OcAYU9-rR3;MCvF} z@qBN#kAEO7&*_oge(&%^g zbJtgGKZ2x{K#AfHBmH#T&vfeO@kUZ&rI0mRnKqmZ!zr%OsawP^-dXB1VA^YE4II6T zWdw%9Ta}9=8I6$D>38J3qe9&6822N{RTfrID2s87%e6CJ08lFbsB~JM?vM}g1f+q0#&r())_P0=b15i5>r*Z#=XqnVeYVQ6+w0;~aK*hZ#Sf+|J@O%M? z!}PPrOdgoa3=>U_$NNR*S>H}_nSQbsgATuHZ?YEv!%zSlR#2`mIz+ACbC*5T_=Pi-BQF2dxwZxoH_h$9#1h zZw1>>^zfp<(a9L12aqQz(g9Mt%L7ka&X9r2ju*aQhq%3kiSyO`G|fTTI2o?w2KvT; zNnOZ(N0pwsN&|3Jrt{241}BsI0$ZE zLhrYR1f8#>2?;2B!R;7nUhIUs{A3!HcEBRnxZYim6;r##;A=(k?tP+i7{86do2K2U z`^O+GXHQM>8rnGgHyRF<_%a^$x9aJl+r#g~V4Df5wxfd6iWQVVj+2$KRqH>?k;5!W z+-53ycw~$doBjr#BF^Kzb?$*Pq9B3}Bju(Vl9EX_aegvASqqBa9#g?LgNL0R8`U!% zKOiz0)GpAywVKb5EHbVQgI>7V(DvoWU6}bTgd3R;F8(z6Fz;F5g`}0THofCLu-u9- z>|I(^#ppGEt`5qvU4%1B*1*ZTr&)ft&AOYZ5Ms3gKe}GE|MmFLDtUyjrf%MzG^L$l z`R|+BvL(=M89DWDKS(A?Ct`6{D^rQJq$quzz%W60KSL+6k}I96efy`eFZ-^h2mke| zQ!~$PnB7_d6}!>zKF`N^a4GEk-fYrpFy3!wkD1YcCuuuU#$Wf(Ao2T^mRR`*c$1B7 zbMGPG3U&O`1iZ-D5&tG1p{9E=_NDh;x~;h2XZh~nLTe9{Z*lSSA3){+P?4S|XKv5tM zdY*CUv&qRpmPZP_vpmV+aL$QOjg^f|6Fy%3NM{#d<@42f<|YY{GSoi2zJbXzt({UE zf#Yk`C^sYBlz6foRZtyyAfWcui7^S3WV8;2|7K~FQ0 zX^@?J`}_+I;0z)+CeS%c5MqIZxQ#M9Dxm2=Lr^JjGf$DkKs!eP_rs-ny71!ZW{7$XH@-i>9>vng281=yzRvZ0N5?ZZP;SK-R4B%kiei zze$k$qcx7emhnW#*cPc8KQ(m+wIRl%dH40e+b$VVH5X+$u68Kw_T$cIWyC_V!M@ba ztU9-+o)KpbM<0!7`@Q}&5wd)#5KB!m@ml<6!Mb`1@<_$O%7;1=Gj6xG6~kja{y8>! zWp5#?cwfwodZnZia&xoWvhbF&nSXS;<5X852?Z#`UF2wP1X*uk67qTX+6oK{9zU6r zgEdICB3~Wg0JNalxgw|i(a9|Tkso2x%m7So?!n&%v9mc!a;_JrGnYR;J+wM&e2ID3 z;J3NB|Mu3lcg4|@Io?XU7nnDwg>B|uncKSh;eP=4y#m7fSn=PQzi8T2M?rjnMI!?2 z`0fz!gUM88vu}@%JbA*)j|5LO_-$i zbES~C{z_RGZJfz!`!w|7crRXq{m*up&moVFtu&p4#n|S)jeyIbjOinCL7U$9h=w{- z&nb7b0|r0<38K^9jI%PC8GX&C*XN$s9Rc>dz@(Z7T>6iJYX^IC0wgL05gqxC!{7-g zxu@^%xVa`gR4O6IZv!s^@|A}e)O^p~Wq|xkDRJZvWjoONncde$vR&g@&i3Y`zkMT} zs~TfTJ13N)CY0a{j=FO{WqvFhlkkS$Cz*M2|MT0p%W_*b{S9w+0?K_!z=ue+T6sUcLU%;g8ue$iM#*nl|024rsz4q)EnTz%d=jO8iSa{Fsf!^w$j-b=&ZWIp?n0Cp4H#J#l7NLRb9q(h*#uTV zR5VyJb+Y9`E~icprTbM|17J^utP_{HPhB<>C-H-AKzdb~|K zWT)rqt^Z^qv;+GUq|(~&~6bZX;jC5%XF;e2H^0%3@9 z>+leLzVw$8+=@|fDN2!4mCt_PCK7TSiLw0hC4omT>|n$B&*Zoef`T6-xkoW_Z4{td zca&{^m0iWfZ3jL6>{7Zj3uMP6BfKd;4nG8vu`S@=XJ6bX`C#3pO(W?s;>C0Gjuh#= zJB`gpi-#xf1!*WUr?b|qEVckwRQXcnyBPA^^Mr(Fj;aq^{<%;%C*NtYHTLlMdfb8g z751o>x+(|RVYP>wvpqw}8}o(F7tzEFK*V%xxCViXqW;C~>{S3gi=2u1=T14o;F>nu zB4}NC{D2jnI{`wcjKX9MNPcg@O)BVr<|W}Is*MAi3+S(}ffGw%%w1v2nvIz`)pI68 z#dw0!rZ4TuPwJPW-}Mri$mL?cv};?lanC;$j^-xivK%!BX4+;&|DPej-;@ zl0xh&T^-`!vZa^;C4-jD+WZCTm)66jG>t3giW++ zIs_MuDGU&&4w|B}-!$*;`|$AX)O{;m5Bx&l@KC zQho!ek=XRjCu*9+YPGbNC8w04PBb8%XkUCz>1X!EQVfpW9ZuC~@VSi3$`<{klPL6D zAhqkRBh`DILNz-FqrzbEG1Ine`A~FMdT&*y_N#e?K6m5si9>e+=XGsSr;(D3dfOkB zxS%lZqz#ammCIS*+xRV7W+?kmQHT3GQ_{$rt##*LWyEPj(CLhGEG2i=zYSlL&bj9d zc}zSa=)uQxNQ$EuiX4eA$o=>+9_VT{+w(3p@7}MIhDS0>@kDv=uUS;!YHMWu>0K2( z@)L`=bN^;_Ph>(|j$)WLLpD}bX+dQE_(Ms*;CR+Yvt#ZMmlNJIf6Yxo(NdYq2%U&U ziT_$4ugArG@$v%#5U<>PU52@lCGL*5ClmV@68d`oFssU_ zScZ|#0osy*^M1HRv~yG_e!cb#x)%n>rva*!Jm)Iu4;qO_y8N#4@_oZipbfw-tr5zrg4bWt3WrXc}<`S)iC>S_jOXl_ilR z^euos9e$VPwKvpx1dB_}`OM`~frUnn9Tj8|F>`Zsmm$U6-k@)dM?El4?N^Q_xgLva zT!u;<5#0<8lD|$^?zxFjL|pDfT}o#@Fx}fFzjNuk6ZZD=+A0&S=z;!bWaZLZQ-`2OiLQuh3s3q*coojYlg`0BR9m{)~Fed?pOi^vGX^ zk#Sxv<9ShO5=)P-1is}(o)(2?FYLd3$Su+m49>+DWLPHcrt90#AO!7 zf(7z~92m>Kyvr10rEC3!9ceEF>Xj+m&&PVZY7wEJaj0{7y<( zAoV{zClt!YmfKMY>#QXF_ytS?=sFeZw*o))XY1#j-}dp@dF;=4pto4M^W)RTiNnN$ z=75V`_4g}28^#DqF}!TtpfRSshH73ojy&E-6D~4I{{}f(u1@xj*Vxsd&@LG5sU$nKn%`2;UX zaCe=V)5n6JPahg+W>^%DIVV!^i&AB8$G1-UF8AqpA%b`1#J02vi$_v?4LOVqy)$&5 z3l)VT3`K5a`@h!y7F?$E+g-AkfHMn~cg`os{b4@)h6_cs@fgV;X5w((58rnMc6S(% z>cf!1;ZS5$4;-iN3g2-7r}_;K01Vhb)_CoXau>jH20&2t@V-xy(^UP3({m!-QlItW zYQx&+VtG9YHbZpbD-*?>z_WV%9h2~yNK=cD0zieuGDuBpE+xGWX3?t!qhLI(q|cw- zlXYVu`3}|h-?dbj z;~Z{tsRXv?N%%&EDF<*|+1&SrlrL&I9LSufY&_dh1#VyGdZ`8m7QAXNqbhe)5}+r+ zwCCM0={IJEgVmNr*H3QVYXdq#l$0w21_hYkR_fsFfc1DY6TF6IY1xE?i{HB9-D<1w zLka2d*p^2MBf*dj6HZ?Uc>>r#sHD%KcBQQrm9Wz{$hB1(vTT18YnfYoz%KPZgN}6R6R!4XI!xqw72OwV%S9=CY;I&jS$mE-YcGzs)R3f93qoZ*mN&iz6#|58M0i z{9c`DYNBi>D%%*4PCaS4ezyGkvdm-37<^+8&GZDUSCi7VklHG4_IIKpJ&sv*`?qt` z&_(_M>hPbB+xpKNQ>Bctf$G&S4G@%yu&W+HejF0sx-h+9p$kXwGVTs+FtMvY+8wy@)+3*^ zyr3SuiHe4&WH=%1dB7ELQH#k`V|tSal$c+EtQPBVYN#8q^Dh95l_`5COyz##5`12K zCh_s5@YW9_Kffw`6E$Pr!pZPpPJ$T4a9r{ZFV$%sxlJK)8*w{~^=}OKzXv z5W61b?QE5D!=0)~JiqqKhYO3o6jJnvH-uk}po(%cdkXTQTqf2EcLxDbBg!$j)@CeN zFE8|k+O=RpTaeCd4F%OC(?>^Uq=UeGN3^`ko#dX7lJ$j!BTTV8CvxY8InY zl%k|V85F}_zQnwK@WGC1#`l~q(|6~g_rXd_6`004?_4f@k8MLc%cneNuEs3({aNEM zV8lXQwp<5a(uX6hyDsURHx^OBfwC3(*HL_;v#Ejl_voTxj}uZdm(ohSnY zX=ze^-op?l0baFlC1&+ti|9?^XFO`>>fPt`zZSCa!pA@7tbN;CvvSVzcb34SnN!2Q zK5UbHW}Y*BbMm2FXaCH6yZ|^s!T0#e#>BKT@Uqe9B)MBCK1(bBUZz9F)&Sz~Oj9&8 zeRda<&3`nQcmj*G9j4Y0oE|VE1s$N;fWf;0f`Ul!2iAgsDDv+(K~yjaE}(+Y zq)WIq+rjtr6;1Ed;c#l0RhbE{at0j79mWpc9rnKEqtNNBq|`{4^WWj~Pbi$JOpUzS zedM`!aP#6R(`kkCIIXQI&MIPA9j*U9RHRpAFC#Vddhv`AuYLKalm#kP!0~7Fcg-|~ z70-~*yd3PWZ8jDb=ik05Aw&6@M@2&);l-GlXKb2x+!$*^RLT0}!Dhhj=$JX>Dr5GH zOJ|1X7GzA#?;h7cKA+LidTG+d>A8PL6m<{O!*j4Qq7Eh$&ith>PsW;#AY>`RGsycq zzx?U+dDCK&Ns9Qsq??(5+lo9I*z&d2g2HIB^P{oKiN6JR*VPGo_TwqYkieUsg&&W*CPQ9P-7+B=+tSp_R1 zeELb-gnIVTS zYkoAEJ`m2XT7|mofN%b`AUfTgHNTuDK6aNGz7d04GONF`Z@p!{CHDLX2gr5|tep=s zyxoblpF^sR3KI)M;LU2c_X?J{Q4mF&`!Ig{)E)Gsp$gCUR};E5E)lf++Qam#-@{;l3qto4~j3`q)VA>;8;O#oF>G7V zE!YhjY0#Ox7n8L`rd4=EV0h7^Y}%5Ov`G9HV}EYuw5ef%3@8*v+T1uW|HdisH8yJA zaDlv7jwC;~zVLx_Y@BWmTcNDg-8&B`+!>UNO(>jk^dAcw_VQ+#oGPuEBT#inQth{E zXOUiN(h03!1J;|I@}MCF(p=Ca*TtZOQ=!D;NA6fV)y!Sv&@cKv?dL<>3>(rwx^v^( zl6AC}8zfF?tmQ2S&ULktU@BvU#OZ*`(;3HKWwB6`@Alt!9!4qRgKob4XK`23dNq$V zCI+MZCEA(-Bl(v^ujmPH&cQH6G^^=y5gIlFoK9o(%-t(TWNFFdvw0+nnw>L-ZJ+01 z+N@zIoRUc1t{OxbZKW(cYM5qUo_+B{orOqpts_1=#S;2*bc^3n8E7 zH~b~}1pH`41${PAlQi}l$FtSXPH!>6xrxgBYqZb5hU*2Y_PV9Ai5v#;nXXZAuE9WD zlPS;+9DsAtz!pB#I~1&bb@_OlOAkVP&Pz!QY7+ zb&o8&;$WAmac=grMmFdcn!dFk{XD;KA8!@>OjsR%_wM)2{q=lDTl!9o z94_meGsWE{Rrz;^r!N{0=I7^;3qudO`gev(1Ee98iHKz|S(G;z=hFd>%db01r#v6S zLH9B z6-r(Ds}{n^g1Mu8m00Yp8&`J+LT`T09l>wUs`e}5X~LlFGGQ@qeV&#VdyQVLHl?SP z(>pO_snlWcin{IDMJL0>*{+78NTi0;c`$q(pN~y>0y&+qLxVyiUo^bS85P*G_@|3C z%Np9l#ODv>8tV3k9H~^3xGaX3`fSrWKyzs8dcF_jwDkM`Q1J`JR$ODwW)BTddEGOr zXOQVB;KC)Oqy6O@+_YOdh(4=)YnKcd+Kyg$fy=WRvh^I+52h1})JRHc{l z-Ni6gmi;!Vi7UK4mS#njHaQxoBY!LxEAtX#XRDJg;IZeuqXE$wD0pE)QX}Eioy1N3 zOJSj%v+AzR(0!T0&0!5*d|;SyBz? zy>=-0cFa`OZ)Ig=zR{A_;icd-ui~&|-1Vc7T@wFuclzI7^Ij5h8Px?xri)~&X4Lyk zMzX*wINmm1Ddc~7?x`IYPmBZo>POnzB+4rQx|ACRj~87*0jp8DHEx$l^~)F-@Wp{o z50tl2_y5TZ3*G!XGXOM7!!OUV)2kh2ak^k!V)VtH8RsD!3CqTXZqiITLt84D7p-3& z3aFpzXN;D5l|O#ZeU0hqj~`rzrFX}8i#VfR?aC=hNNZhKdg)PBxokw!h{dH)5M~8P z#REVX030{#@Az!ZWJf2no4r0dG7Jd?$!%PW=DQb92Y>^HH-E>fdOYGFgy^jN@l_a| zjaM`_uez1P9E(MbVgL|7K(&y##iI5Y&L24F5I2j-nlD!?G(s`yG2}?HFV;!?Gw+jw z-we%_9A52pKO1S%DW#*N#ZCO##Wk|&VjjM`Ef64q+`8SRn0JlWE$pdQyLO-DC;@Mt zn(?CI7DDy;oZ>%UTN+{Yp6k4LKc(z1M=^!$VmI`V?76AzGARm+PZqeVoI@7&A7eFX z=Y=Ufs($@?co7R1=;Vr5%r;T+90ulnKrBJeZlS&W>#qp7{Y28IC@x{DCkH4FG8oR? z9TI{UU+(Tb|G$O{`_%0@3VTg3$sPHmp8>0hRF>GS(6*>P3&Dw|xcN(sW!zHi@TeKZ zT?SrSPG|M8@|_MUQ;v;`L{zpDbDKSG@oKfcm7-4fWXWq6z8}S9Y(JF?UduA6*q80+ z5lbY-Y3qIKzRrt9TODeEK!zL_8@sB8-gKNSg8{mzW|h4jJk|m9VGfdFMbeWvOU37u zn>P;lmPYw2>$jCArhY|5Qj3(kwkef>O=4%U^bb_kLeA4PaG%G_1Nw$FB>VbAtCts( z5jk!J+LPl|oKjiQth7E1k0qA+{=ihJW7Ta3{)q#IGvw&1%E{sOXWs7tVYRJVRH??h z@6cpx-5;xd3?is^6394!OldZnBKvbQvH_W^%uXq3PdZiSdHpKC6QTB)(N{MyjEDMP zqoILLgURxVyjd@w(p-+i(RgLEwk`pl_%j~6=&4@(?e z?l9j{joav10vfF_i%d^fA(Q!-vp&D7>T}NvB;|q3gX8e|JYg#1HwWn=4)%^wP@*5a zVY2};nHW+a(K8<<|KWAyJ-4m9P)6kVpL-{8n$x}%o%R*HyULvnwVN8!6Az7Z8y_Ec zYx;aYkyCF8+5r*NPnuG~ldYSZPaa?srTExQgWyeeQ=E z8q4s)DJi4d#O48~Wc4NCr28?wz_z3RL82AuYt@{c7MCv41R7ntj9EFL57SpvK>u+Nc0Q$OO8H3pQOqCZ zGO#+ssLHiDLWimr7a9= zNQpcef-YVh%++4N5A*M3XWzXG=REj(Go`C9?M(mtldMP6IDtnbnqMdl{-2P!w(&1k zANh%Rs?!mYK3iw_ZMCJBehGja1}k@tYR0I8{`DJ4Zg3|cW%uFyiMtsqM0{Pfe)5z) zc(&s}^hdrr^}bYSF}9G@>B23Ke-rI+MntPhAn?QrYZTLlw%ENiA$#0BQHmod<*h@cUS18*j^ z-wBk*pLAsS8Tg0h04--bpLLHlcZ;1dNO%j*U zEB8_2Y%V9HKbP=w(o-#pb6yi$*=6!YYk<85A=7-jk$xwOh<&w}HD{YX*BmHzy!Cx3srqE7osp_6~tr7ieQeS^$Km1g%jb5K=bLj zxMDadx|@JQ&t_ctDOQDpluXuPTYcfkD2xKT+xo#bn;aKZ)wyO^QeCtV>kR@U^KdK| zwsSR{4>A0`8QNNyx?ZvOp<=TojjH}yn&y|0^#Dh2p0!t$ITs@yzw{{U(<*y_uydaT zo9XHL80$9l&IK%5vCweja7LvoG2R@Ba|49S;J#C4YJ5kNTVi#K3KtibnBi{V_qfNv zS0SK1yK>7sOBqZ)6x)ArnBoLKYV5Xdk>7K99N^Ms@xj{C(&Yv1)(~-;(pwT{B8)%t z(XWV(&M#h$KQ8Ogkz68@%NZ9^yt9cO17tw)FdN`7v|VqF(h6V=3ZJ{8W7;;Fsgmd3==~F6b@dq z$!~J6hE!-{VZ!hZCRX?S)<|iqshdWQ7d}MSZp>73Qd3vl#G?GPQM21$!gVt5IltME ziYPQFh2jCt1O=MWwI79YEF5yP16|qBbQ~!%!!JFQg#W5sJ<_~^h!T9#u=3#~Nj1)+*=LlZEuPCo*dvsnb&6AuCt-&sN zLMi2=I+=zRptCJpHeO1{1gfh+VO?NQDs=_uvVVYyk#vZKh1dZ#Rj&Ve^D?v^+h7t& zB&XB+1gmaON8k(77{Z5J4hQ`|V2*qv_T}KwDijrR7-Pb=P?=RZ{Cp0PGZ;-geE!o# zToC?OVDgcNHaZj#G!>4=LYi50O$kt-UkQlH0+0L(w(sVCTOMeEk>Mu`Mu=?h`u_bf6c&N{3CkK| znC{FlX=V@+Ndh?)xacPEr2_75+1OlL>o9#(A)`O`Hat8}e`5kvR<2O`!}VM3srvCE z2VQW4iYc_s>8*Ck$yWED-y}4eof=bIrYZ=@25nLzY>P(5pK+%yQVRz@QqxrK0>{x( z7jN!goI3-GOu=|!9Ao#`X@()BU@PpeE!z(|j#de2x}-<>xe}of-+5jK%!Lq39Q>NE zv>%1CYoWE8ms#!>$obwts}AyBx=!{V+NSZ9m>-^Y-9$Pqh&$I};2Cs3L&M#m*YHp> zwYx6Y-8SvdErrKXQZvz=^Rn=hc%!CEJM(U}uU7k$17H*IO)S(isAt z`l%n~+(bXvNVKT&Lgb6ur>}Oe@9*>q4!CU1%Az;CT`ch|R6FYRZgp0QA$nzBJ|-b) z?l-Id-qj`Me9E@Qw|4g7t26BqNBvMwZ)az_4oqcs8{ZxN9lsSbc20e8`F!gxB>+t1 zaKJ`RrTOUiy4mGS?B-ZCFE|cR@Eb1QuM+wH-Km`x zx%QLui8bYia#t<&Pq~FB)DTrr8!VGx--$X(cdiYAc8dV=hqa(t;T6!QM_c}&7TBoT zn@6F_XqnDKe1|*K?MCaE>3p~7m;3X~4-P(@NvHEdgkR9Ln+u217kAn1{M^p^J5AIo z7?JxF%)HY?RK$e|4V5)9K}IHaTA%8IQKG{=6$4T|Fn$Qab6;ppf9K*esVzy2SACC| z-W`*@HJ_8&Q~$Lp;iZ{Z`#Z(LTe;~PteL3I@e|PMA%}mdL%gtnjMnqMOIib!a4~Y}aM$EEDLUm18n)_S)bqyy85dS_n4xlMj zxLSlV#(EckxVYh!N;n-urioIJYY*3#Qfrd>%W8l~_2@emey&YU-UXMZ>BcGwQc8ui zP^)YqEI}RPxkTl(?}^n3KS(o0yJbo=PyhSa)@=;cT)je@207vf!r>fT82n31^ncGk zGhM-T*RCItu&+x*t}v1!R0bpPJeglHafSOb|D~ZJqA!ZGuvcD1NhF6STcD~A6mXu8 zkFec}4v8L7yat_|M1?JKCZocU?t-#fTJ1ybp`+6pwnvC4e7pozr z;+r<7Uo04;x=`8eo{M_&6UlD0kp45;M<#5SRvTGj7U}f4+QbH`XAZtT)Tyl*=ml->7v?0MF&j!Qk9=@UYrOM zp1s1}`tRnlncqb$_2Jv$jxBe8Rw_<@Cz==1H&V=pcza+gXUIiG#?DRPj6ED>_WG-9 zcet6M&|B`49c@WHlmO};FC9l+LgC%A)F4e|gwlT(IE0E3;3~{n=vG2C$tnI(G5)o< z6w)28jNq`!88J+uu6o>9n$W$#sJq$xp4{?i^?gAQ-C8=SeeRrdbLjJuJfrt?E!2$+ z!L{=oPJ7ykAOCws4~}=YmrgrcO?v&loZMUN=;3SwfuY(9Q~EFC8wbge5`v!_-_L!% z%7E$^;v?gi6Iz~c<5+LWA@O~PY-4)xT8zlVKUvf9T7qp%^BYv~?isePudl>Ski>i~ zl|ex{rS+i$%>NAU58Oje==$&fy_qKR3{7nPK6IYkaLyL+<#2#jXd`|_`!WmVpKVw3 z?xv&1eF0m2gOr01oFmVBrjLZOv)AkXkT^uP+RH(k#YLQEMVrBWDy@Rdcn>eI&7yP8 z?V5yat~W-0Zn{b;r-oNI5k_DB+;eRItc3Dr-@xAn>*}w7$xFMa=|$hX>H?<9L3c^6 zrh9SZvIqs#a^lJl^KZX5Jl;KG6;YR0(b;U2*}%8ppT^5O#D(`a<@tFAGQAH@1q2i? zR0PHKe;sUy+<%lj%t!n)XeK&*HDF~LJCjvXLuyNmTxK@IgR(=TVjN@hZaTNi-Rxya zf00FTH?yXdgN=h`!w;%$f{Otg$CzaffloDY0!J1vO)v4k^?YAJA|wp6nDJUa2cR%e z?wS17UXpQq&)oFpr`+Ve6{_W(Rga~2k(>Lr^k>{hm$nSK+Zh_uj?(2v+i^SDI`4{%@TlH;{ZJ(3P*jheO+5%XE9dd?0XyNa67q0 zr?fvE;Qt7S?hGnfhYloir;Y~=>VsHLtBtg($scn`s^b0}yhBLO?s_ndza~K5U7iH9CuS-8j6X+bw zCj8<&<3E4Wlf>B*O6MyBJ2PN57mj?-YW?nex^F)EI$-eWRMcbgUNIn&;rS>yvmshja@AP_843dz2+un6zDY@$DzV!u z2qd`XmG~(qtHJlO=(ajVH1LI$z7BK(7NHgx2?QE<()0NNn)45WtzZuLxMFPkDvZQ|#XyFXX*cA>$MP?FO`lgWLYeB#VRX746(dBkRc*zI+le{w>NB zCclcXcdCqVdD$Cpu=r}XZN~riGYE^w0Bu-Ae5VF!$^#Ynb$#f}v<>5`vg@79_wT1e zn$%0^#ML*SL)&lxrm%UBG91e8(xt{G8 z@Q$JJvW*R=S7bpWkizGWAyPj@5R1DT4A%Xi%m(k+_<6y90lT;R&>GF}$^-8WN;A7Z z+=a2z6X@ZBiUne|t}2&T`1ziX8br_vQZ<=VHL4Ul*94LyDEjplOI!cB#LVxy9!wgY zO#e=P#?F2h3t|wL(f*0mvUK*q?pk=kK;=LSh4Z|J*zr-kM$N&5gX_c{>r-FU;j|Cv zecHX!lhnA{M+!FQTxyF-lV>G&PhZrmm(+LlcXMp*#v3vO!aP%_Gv)a z3|zeGOaIEZz5h1WVT;dQ(PyPLDJh=!$!_+i6y$fTS;T&ex#MIQProJ@j_XbAhKyjAKk~-W1uL$)n+JeDGYw!-;C;`^F zy}3@hO<(#o@;-Z!{9NTBOOVn`&AkVdIt2f@&b+(|sN}1`*KkUl$KybG6&n*n^y{Uv z?N1aXXwb>P&P;R9>m|s6VX~+I6LmKIA_{P7bB&y@1Jq%$JGpb!<(hgA&(wKxa~q*_ zFFr5oWuJ}JK&Is1iH7joed$nl)ygR3T-m6$9eOoUWshdzgQVq?`K2)o^MKPg4D}+u zgFz`a2L2AeRR_R5f;vJ6^Pf@wtC_!vLh#DJCTMZ07%KMhM7J+RdwO>AdSbPIzz?7$ zx%rLKiJ=8#xr`;|QhBVoE$c7LT^AiVzjAJ)Su)G@kxz zgT5@#8>tmXzkG28J4-w?o!;BIob*!aTvtPicC>f!`*paLI3p!NFy9|q(wj)q$EU2LZm|k zL_$*OkZwUh>F#D|B<9=meCu8QbJo(MGtAuAwf8R%j<-ye*VHoZ^IBswsiiaPTtM?! z-Z9QMaK2j&O!Ct+CPJL#?FOMm*KL;`bK@0mHiq4W_yIHlHwYnvck@xO5AN0mke~``7_#{RppV zfZEf;sJ%m?`c)X!d~q-w*qf61v7kr@)EmNH`~L}6y-Hsxd&LlS5Wy7*HCW$66%^)L>w**npXs8dW)(U0wg2(DZe0pn1hNvAp&`KZK zd-P5HRD*Ai#igeDC^kvzgFV%R1r2{Sh}##DDH=ojfvv|1CxFa5kh6M5?)T@8p(_!A zdg~cqW+gM4GD8)G)^y8(=yW@cyv<_rgGVJ&AfR2fJQ*~cD`IQ z=f+tCxjtCghfsb)zYgu-Ko@~5Z_FAEFM_6P@ug-Jy1 zj{VM4i^&v6l~5uqIM%N|v~1z+`f+Ytjk%+4S!~F9(pHz~d99uQAPL-X$y{)rg($v7U-v!h>TZG)m#*g2~QpKb23E3gp+Zcf%QN$Sl zCE(AXZ&M68?OJ2R3Bf>0bQc<4Bsa`uF92n9Z=xV0^@bB#a{IN3$t66kEK* zE&pulPWzqCpc>)jn!&2&=X1K-wiF}&3vUiNwy)k3jW-pXe9&1)6TvE7pm%17&8kYq z^Dy32fvS~q2W$2kNR#!~>ZVOsyN1t=(5QcK08}$HxBr+!4uaT*#NNp7rIa!y{Lrk6V(?^Zy0! zQTIbL{L8382>wpy+i-si6+h6Y4k@Lm7#9Q)#vtA>e)-fyPt^kF?4eIO>W)@;5L%YyNR&}G?u4fU2pBng<*>6VnEMj7QZ!Q-v ze^riht8Gu!nE$ zca^GPFDyIoKm&UdW|eb)v)Q^*yTsSPv7uaCsuk4a;|*3=5Q`i!p)zhyM)v9oz&<%D zwx1g#`7ljZ9W~oJus@>0gFEtP>DE1y5k5lY1e&R}B`8#h5VJlz{BUs==9h-m9|m)p z@sN847&X{U8j_(`69L2eEmEs5{a+bB(pd(R#{IaZ5YgVw4NaSnH~Bm+;K%BKw2BHg zkUOz}f7?aOE9MOuV7L{0$l2;76=Py@-gF=m=sUb~BW7w~z9rau_k8WfhvLcHglX@! z4EjWc<)H;>vGew_pSL@y)(=nTuBdsW%mk2w%`2a}wJ;L3K6eZynNM%!os0Y_S~!y-DpKy z1KTP5F`M6kg9kn-BJQXhpXC->={PNv*-o>-#R{78I^gWeoyLRDjrUnuO&6a6w!~bv zu)iIgdw$di$a74XE+%e}0Y6ocZ zWGbhAhx{x}^M#~WRQU&tFC}I(Zzqu>`Gfm^r<`d21N0yv9`Xop;rZlhc75;=h_0T+ z_WiZv5gZ66oM$6_p)}%G6s`f8HWHE;>6}b``s`hMr-#dBOgH`h@NK6C3(k54Lxo_O z@41WYXv=m$qF^Mo|78o0KSZ$pEWH3t#2X#90Nj)mGSJqh$=wUVM~y_8IS(m$o<4lo z{j1k{wxKrhV$FHnpu+2s)=x-V#Fxr3MP4fu@H$R=`ASd&=TX{p8KEy9^W|yzQ^&du zMv1I*VF^>*L(+cUEK`>9iS5hsb-WP#1|An;+_1*j!I(vlZ~b<+ysvYqzo?{G~4f^mkSEL>%gWUf0=5te>8 zFBNGOVi6L5@MU|K{VvO9mHyLZoWsF8>MHITua>%8_rxJvs^(_CQQa>tEYjg`hWhEa z`7L;UXXaF|k2E7l)yvpL1Yb@nfJZ#8#rOO2w;o9J)$Hja0@Yg{&{7a_C>KN31_IU9 z>N!#bDjI43YvH$OxI^lKNTmYH4FGAW&81;G8rQ z(%(zt9}3*jf)1j_4tS(cK|*gH|8xjgXqSPeP-Vm-!{{Ur>c#-lN|U~v`{iU6*mv%S zaNk>n5}&ja&z;E*K+WEatvw$&ozsRg?!!r8i*^jSo9*7-yNQ<31kwEk+Dz~Px3V?9 zp6c9#7rQ)jIt$^o!fsHuYHT~4`2FL;*_L%hg?R0W9HLhYwilsh>@bFJ+OLcEPTX$ z^|t56Y948|LID!cLo#v?;%-ba2#KDAf*9*yWuWUh(jJEp3g1a(xa|z`76NHLU7Wl8 zWsfbh=(ziEPeyn@gfkx0x)4G|=l=A^CMnbwC7j;@x7oC}%Tn!TOWLNcfXPxlA9%FF za9@E^x*QU99|+p@C-tq-+&ZMn-a$QyBX&{xw#~YnqfcZHw5yIxHxR~ir@a|t+ekvV<%!{WwG6fNyuyc6Jf2E zGS<(LjLepmi22ct51@ZH6@EG~CF#VBO?>bq;7KK&3`b=@?4Z|p|z1^KY>K%f$d;WKq*#f(k%pue*VDjRSx_{wUFp6>-~y^_APwM7CrV3kw;jew~A!e%Xb{58qKEfn_vWY&E&b-Kyr8E>*n6d z43Ss!^G|kP42S&v_AKoTu8pB)CsUw)&rgpAH%6+tAaRl5(oz5lry(~)N=fN1ssuAe z{N%nzv9hcRkuR-d zG4ALVX$FhG_82*?EkPx&ri;724NDcU8v6Mr(LOL1g2uz3yXV#U0RueEE#Mte?4}&t z=CVoiU5#u;{`%kO?Ry^uWP|WeL3}+=dm9o>(%%H%UU0TYLB;PWv*;k2rck%=U{ss3 z|6~hIGP22720_RG&Igv#hJ(91qGCOOPIk3OhsE+dhVHM8zdN#-{;PI;*#xAd^3eLZ zKKo4+s(*5>KJ4JAJIJ8JC1`b4mJK*4pP`Ay*DGArr6O-rg8e(TGll$^2t;oygZD9j z70Sm5sG2rBRBthicN)8o#9lQ`+;3G9<4#cFE7U9Moq9M6{g=^XBR1olzoe`KNpqE{ z)DH8@!Ldg7@FB`zp(EI|L-zn9x7>0=i!Lau61Qf!(~kJ=nslB~$g1hOEZ(}vT)-(M zR20GM<2}496^oG+IX*7rMrHGIj~Sjd?=&~ zR7i^C%&14zxY^GcN5ZnCU2B2T=SU&ZuDJ;zwF_yt=EmuMr2J?GEDADo?_>=sr&YaFP*&}8 zzDyPW$9(hODHH!6`TDAz{c~Tks%8LVzP}uM56}az*S=ZWO@3BZ*0T~b{Y%WP00`@Z z)~B1O&xnB~+{b1!FDb5HQlQBDVt!Vm19^V<)_stvj=E{7ArgYci~ z$o|EM!m&5x0zSb6Vvf%ZC{O&@Ahg8~k zuVE#Z@&wk>R;3|bZxgvMpG%xOiq$NmsugGE5ng9tFA2pD^7p&oeZHy^5)0Q%XAF54 z7FSa@2uGvGwz6NJ?g|;J!9v;zW1e%Jkxq$D`0h+yO0G9%`|>+9IE#6Gkyoc@BbS#) zBQfYVM0UR_1Eu^+`?Z7LIX??7;8Pi?(A?TbWVIh~beE?#e)gtf1 zZd00OgID9!`-iiy_fjT%pdgpw^4=Q4Ykepi(9kG~b6hwfd0Xd`xcNOst`ZX~N z16T9(hoDf$=ck!7Vt@ZVqTpnbQfe|j;{Zu4b9I{2Jx=;s6LE;n90Wy>u;;ZHN2;J; zV*m3RKMK6?Y8~j!WPiWc1Mwo;Bfj@%J-xR z;)z!8nE|{!&PPphdE8~XK-s%JAN*065*MZ`aI(0pslhA(tyeNGE)}Z1#2nlAI7Ze2 zL&VUe!5j28`oV7y^}(A2jSpd1fw&dhZ`m z4`o5>!BC+t8^}xS>rZRw=!PgdYceEUZ_~@j%OkX6lALSP*IQznBnPrW5tXlhy!<`H zzJy~L()aQGxvZe2&t0ip+in%Ov!-*!0K-4lnEu$e}(?jJEz*5ZXFwQOKxBo^_P-P;s6+~7& zkY}viMxcQpgwJB| zZyd@YvfUL6&z%gaA-;`(lN5T~X>}eJLt6o#!+0#vPgb>8T1tS)LQE{b&H_EG6e)lT zk^N@Ggw&TDFc29vxuxW1c~55QvldsrorJrpIR>g7@f8k7!1ovbp?V@m{K#f z?)b)I8^3ez7{*sUwa-$z=u2Q3Sl7&nR#vL_fHYoVoi{%P{1?bdH~w)kaOMa5o*$6k z@i8~rd@wha3~IJq@@9M8BC_9@s{8hP`UV93qF4d`AbGbKURG2v{-5FrG`)wu+!o6i za!rT0&*fsWO@H}=#Cs?pFwTJ!RrI*d6G)VAC+RfXpmNc#;kz}Nhbk89b-r};y$;7d zIIWOGI@N!p0kfpIV|5w?y6^%EovyZ)b$Wf7wEtE1F3B_duaApAqL*k$k}!3f;y7VO z#bUa)X&Dov7i`n%py`=?Ak@aeAYvYMR(#4n3@Hr69b~t-R2!619zA;vqjL6^NNMBs z14Ttcg~|AKaxkf&PscfGjc65U)$)MHaTU^au_;A}llne3Nz~!n-T)uB6YH<->Dowd zNW%RrqZmB+;QO9St(VQRC=LCO7jtmbL2~>7q%z!;TXeEzxckEtrz=eRH@Gao_X$z- zgn7>0&B)!?YUv_Rd`rBBgT!9a8#Vfb%2WFhXOk&@1xL&ix_41-v!N+%upmhmcjc}vgG8@kAfwQ&pO6uJwu`fT@4$EJxsev%g zd~&?pl}Xfd+hVQ67$xXvyH@V_R!l{nfy+cBnG%TIdn{K&hjDfjPSi-6g#ih_pa$>#*(&S6-07BOxZuJ?SJELnVI9*0)oau^hV$|HvT<&*IDd9kX=?bD0kf15$mn3xLK z6aBm5bt8@Pcu;8k1ZcC0NB*seHJp{vNl-`rx75(dXGmL1O94|((C28~yhQa)Angbl z0a?#Yyy?NpjLbYBe~L{SQ^&`jn)o6**r!a(=3fwEF~vWW$4wPKzyKX}E&$H`XBLc9 zrCoKlSob9ygdlBo2{8Q}T&G&%E|vJ6Y(C%W>N} zp@R;TYVIqXPKOf{$U6ZK zI6PlNG6q~Azk&_6c{EO@2;6wt>6)>gkTtXc@r=iIkAHpKMA^AX$$~`T{bQYUmwSj{et8R8PVirKjh8RIQ-23(W^*}8 zf9$IeMH<;RpbUeNhb|trejfKp9p1ay5Oa~ArN)TrgLuFj0WGVO+hBdW7GmP4=U0o7+*)<^P=zk4?ZfT%riA#Sa z42b2a3e#cMIcgpnE*Z9r7<^l5YQNCbj5$lS^fR&9zVI!J%~K8?tcUa;t+2Xih`+pR zuX4pS*MHm{8_AkLA#7Jz5VDjxnF{F?DQfc zslu*n0AQ0qGzp9uiz!o|_@Rh52kQvlwwQLF^|l z^}!yzhV8RgL|Mtn7*dPs%CeioAf5_yoJqWz_C8F;L$>ab1}&Y6Ld`T2@P*(Xr3J~i zf5eOlG5*_B^`{p*cLnL9)_kw7!a2eIa0(c$EA$coS0Ov;6bPTap}-H46OwtPMh&9vqs5> za_`%>5H539rwb8o22$v>lluF*KCj!SjE;@+6qh>XID>1cLd20}zPN{{<~)YdVR$RO zd3_)O{wcytr>LwdokXb0yDUfm&;5Jny~gO;{fj-l5}A8lTr+1M{CpoLvHx=) zk!V?P9{#a)=+!vuc7h4Z-vxC^rlq~gM3!mU>Msxv^kfL)VKp1KL$Q;s+z;uk{XNnE zc?D2GZy7XrJ&KEqlaZ}OsllW$zz;6_>{+TVt$vF#uLTQ7=wpTjG4{ua?hV|w;;CO2 zJjlbQc0{WW+lkc6T*o*MR{PW;alSMXkCKn)hZCdh^BmQ;bUzC56sPesUo3JsdYtTU z9=d{8GG`iO=Yg;wf>-Re#q;NIT`DC`smC@ElaO@8waKHD0?@a`0PSnQcxmOiSluPz z3>o4>1u1&$%nqz_q`k7mMZLV3d|=OAN#|is%GXKre*3_$o#qAOXT)T%o6kQSGu_q{EWW-~6ck=&HJN4??c6DTHFF z6OoT)WYB@u$+v5UTj1@bw+go3zlI*T)d+0KB#?FPn_9IlBx;#L=8R)SJL<}Zh)XOk z`2MJ(qzNcHWuEfKMOZa#Q-gt{LJy^mlF<3~ECs?7TsM!%X2HKhf%|IYaIpJFrM7q_ zFA`$?pVqmtfHiv+mbZm()o-8P5DZwnIP6+j;onbO4A-!HA$_Ki+tMDEATHbKb*<%P zuKoLC)g-NeMA5*-N`kq`a;#oyIYJ@Qgn_+4J3EAXgfb)n-(7Ft2B_RkLepogu(J1R z{1s&Fjb`I%wapJwS5`)~he_jU@BF3+`pBkNW{DsKg_jIqVqS&MbT=80(S{^BpB$x_p+8w`xkJrIQr+4$xZsQm z2C&yDQme0esi-t*|DstVhFXf1C$ELx`0BxxKCYKXM>5S$3$+>Wqz(K>9J2wZ;_+3C z*>)gRBnukcs_YcXSd3~!ZtSqd3F|uTyhWUxi1OV@t9D%dpqRDadN_!fz1XYt8F&Dt z1aAh{ZrX8|MOm8O+>3a}y<^(sE35dPe-geM<`T$l*UX(u_HtnhME;ivlJX#XiLMw* zE1<~Tb|ycL*F3$EZJRQAU8X}cbx3rQaj>1j>nmb)7fgr`C2Z7gv@m}txswc?BVx!) zEC}&SjFHTIoGTg+ISw?(ff=2yIZI3q9|;~aXKu1D;vB*>c~a<7P&qyL-V8U8I5n=G z0q4Qzw(OKFcK0qy40?TWD!u`8lY^2Ml1Am|(giyp>{i;lNh1MW* z=-lxb$>~yt4>ya)s(1ekOm2`GFg3}w^JG(HXR1O;m(|f($IpZsf;Mkan(Q7E!g}>K zCMpVp_4eK2%j@~P=`vj?e2A3`pis+?Z?sR+(yvsL;-ku@xJw|FLS)c36@ug7CFFl8 z1aE%y9xMd+_?rYwEZ(065FcXGqAMqSW5&3An)*3x)eU1wk z8!9^IHeBMqyJNv*xB6Ptd7eg^+xnE#5P1jpe#ojbj6?T1O`D>;b7~9DN$>6H*@_vY z1ZV?xreHw;al*f1X{FyqMb#9Xzpgx#6!pr^{4ui7)+dc&*c(*qq1cCD{^L>5QGwL* z;`w40=CbVI#WrD#^|DTCypiul-$S|q@=QW4OeOaTE1_I*9>3tTp@+s`*xr4I0mWJ2 zqCAAvQ~T)vn`A@juL35^R26L?u+=5b>EVXMWPQcFqc5ejdh|%VQ0@+Vp@IgJ zz$Zutiya@=bMe%Lbfx@7Lx0nEp@c{2kVU)c((q28!dj7eNXHAl&45W ze5(V?Ov)a(=q@aJiqXD-iAGf6_f&uE(ruNs;g*q@IDCmjLADY|~k4NF@an z5=s?w$M8#2k1Mvfs#$+>IqYB^{f2LyJC#K8cFRQQ3*}9@eU5B7jT;gik?DNigeH5s ztons;Rf;(dHWPR>T|`G`9bd95&TKD8w}C$Wl@KM=MuH$+E$n7k8-dYJ>`QP@6Fd;x z{~5e`ivYgUgP)bh>pe?bB=xsX*MA2GsAoNCYVaPJ5ais%HJp`(QG+G@X)?f4pjv4D zZy$ixcwWx;PE7_sLDKdzE#V7BN6dZUpmgCSgQFQbRIhR5Dl1>VYf|j;yB_m~fNb|C zQjih~J+3rVFZ4K=FMLc}(vY2Lw5>kXn-3#Z(!zJ7%H zQ6KwKUONc--GpDB-KZv6jtip#W~b~jjJkE-Sv^ON-Jl`_!2R48&28BWZgmbPhbw*R zk*gdS=r@Xu%H!cG;st2}3_J1wYHo~`Qa_Z7dffH3Rl=(q|1CCE{|cMFt(&?u$b7r< z=D1Ma$fb;X%&h`%uYjb}s`3?v=eE6zNJ5UNRcQXj!t1XzFjfy+DZySd0e`~1D+`;y zET|Pj>LFdLRa;lbO77JM_0P&Zpm~iW?ET20+;Nu#aNQ_M!F#H97<48M)3~lzBBSD|fM)4Q#9CHk?sf$s7KXu$7iE3knJ&WoiOt z1y)U=#f{&7^s!O~t@PXZ_fhEIAr$$;T$Uuh6AK0_t~CMcI_t+tnpTh7BueN{zMh$l zKE7Mejx;6VB35)}A!+bA!MonTrhJ?Go$0xfp)EtYV-Yc199d(GLHiw7ytjA(X%Ni1 z-~A8|1uvq+;t0YH7Oh+id0gsj;9^iJQIrDbiyIjv%7%cfS))Y?7ULv9i5J|2FwbtD z-Smf_cVBML2VsvRW&let(Qm-F`YLd~z9aIbuR*nFM2y7cyE}UU|6vvzSO1E=k+_h# z%6+aol4I|--@C%*E4J{k&U5P*%+dypJ~(l4oc5z%LrauFsP$60aNq+I-j-l7)BG5! z%kfotd9i78Cdh9-+qJ4~#3gbEHjXDiHpC;xZJ}pBY!$zy@=uhRKLl!hscOnG2L`*d zE41CZN-?woUiV&|RLRL8c(n-7kGxu3R)pbBF}pQ1w(iyV!MQI6T38H)IV^mTZXryl}_xzvX!0#4R@&93#4U;iM6cqe`9}Cl7-?x zF!2GCNWx;Z|6h8u^Bde9(RqN5Ab@#4|D5;T-PZ6q|L4;+ujHuSNqvmB7W(TIs-7j% zkzBsyb@gM+;Y{CTLlvevNGSdMe26ca%O)T^AwJhY|#Itd%EgW!`SE@1gv?&W%g69$@Xf|ij_?f2Fw$OHPd%3+9qzt#<$yf-kd`jYVE&N&SSS&|#>_wJa&b!#Z# zlx@)w;V@U80r>Q9(B=bff)e4OR)gKIU(L|g@BT>4IWvHHIF7lCpkMb6frjtyp&OO z%V5(lxxIJ)1ksnmMhbxCqXSiboTNVIw+@pv1T6G-@F;SvBJaTwaWjfq5*3p1Xs@XB z-MgqN4*>>j$Hy>TPn-DjqFB*sI_+v9(%>pe{G$TEAp#?ue7~wXCdc+(X7Y z7*CrV4C>O;1|^Zx38%=MxvaF#{O6UESh#n2WjpWEVvw7RBM$b$)v3|Sbz?M^%cBQH z2e40RIC_U)>)-tydRz3X9fgwIch;D&yzp4e*(0rxefu|rPpcg{c0}FG_J2Mh=F$v; zml&Kt|8fRooJhG5zR%N|4dp(3Hccb$|L44umhJb4_}0ia%X}K=IXf_0sadha^zVPVMS}fI{+5k$zv`eMZ$= z?^v`YR@Td(A9*jS!dxO#s_*k^P(1p-hb(?S1JT$G>qZ*_(cLmI^wFzH>t{Y08#nfz zA(l|gwey`nl{~|S@5A3eieSqSe=%Y^3$$;HfYYz3wz)4-d3WWmK76|41qxI-t4*_a z7s%^U%A0p^K1*LPv4Fc5JQ~tU8KwbO{LB;fVv?Sg^vKfyWLJB5mc}<9T}Lht(Yn!d zI4ZFzJv;n=Qa`={)Wt1x3SY`p>le+8=#@4Biur1Tv8kz@@OnPF+|w!`=Hl6DR%*c? z*0@hxb<_Uue)h>lgT!YzZ&VfOP%wsa62}T`HkLb$}s0g%^a zSwY*K?a!XP!n?tbk9|4oU1Avd0OF@(?qClzLezGXWc5|~*rr-XXl+kb#)MJ%zH{+R z%(={187N$%Za_+a((i7GKDXaLH>Ct-wNolC#J)7%;l6YXO`fJ(xq6m_H31i_0`0A# zIQG{2vFW@v6vD2ZwD|PPYL6A9=X0Db%!|A}$3h=7=p=0DLmBDOPbuF>CYw2ui0On& zc4E}$d|rnJVyJ`s0^`on-W)YOs!w}&#zg0?AnUD?{;x4w=*kn4Qs%zAE+sa2c=3%F zC&LxG${6#N8na!1_w zL&3kbqord?leb*zmfqJrpBk#2 z?K>ybj7@Vj`6h+NUj&?8xQ=+GkeN)*YF1vy4tQr1KfS77Qmy*fjZYncnX+-yvDz^8 zW+AaEffnn5|J^w(wrPsG6>o79U{_iv-CuamU*8#`z$GU&BH;M`Aa7pv*^{BBuB~hk zHq{srpqx+2q+GzwPVJk-K=VN%0LPC&LLhA^6Y|6fb2bQ#l7h6h>|TZE$%Y= zg=Nc>ds6RyWi~{Lo2Y9vG6jv&=+g4gwTj{Q9DR;iulofK%Js<=VG(zqx23Ct)lwF{ zXCEQvXFd5o+=QJIyf*9%1RXn67-x#g$~i#)E11nSH5%2qO%yK|PM&%-fSemu1rN7g z7jVMC5eE0Zbx`k5H6qO%R?P=Zm1ernF;+F7c1%i<8o_zQ;edblLSx2)p2t6Sh>2@BVpDEgzK7+U||th6d3DM-@Q`^Ewumo#Q4w~eXXj?b!Skv zcBaLsiVtLJpq`-d+->j@(>JJcNFl%-<1@TC4pb{iul;J0Bl7#VCMciZxo?e4%^DXd z`!;P2k}aKIlpt&8TNsWq0WB8@`$$qDc!FEnB@O3JcN!PhofB#^P)c;Js2+=aX?tFj zP~pu9Jnbxq|ud8hx`xKZo ziiF-JW&D%W`+2X4froC`QuFKJ++~x3plv|^C7VeF{np=N^Yt=wJ@HcZUy|-Gd(L8I zH4Kho0s^knym-5Wc*X2rtuzbFF7fZ)ki6c%$zk}Tbz!LF4e7hDUzzQcF-VgYXRC;- zxqq{{n~785IW->NlYdWvvI0efDb!AU5WcQELCQ2t?J)9lM9Zh~KZk=@hyvGstSK!= z9@mikeV~~4!R_%Dp zJ2>5Gu3%=HCf^c$nf=1O@tk~7wQie#pD;}${71_(%<)c7o4}-#c8_CXu8*nWmS+^~ zmpL&4sAn7dffz>7a%c3C(_jC;h~&Cc!3ew_QK*%xwbd!!C_3K~CLbLwmgA{<`8bELv4mn?Y(kk!SnieXi zZ8+fPf4t`#gB1Z-0a#fa4dS8*t_gp`g>TsViCcytvTxcD9_mAqXijX$xr=R|KMsF! z`|RTT$C{bppChq)4MmNV%|E6E)fN8Ghc9y;TDazhR^D=4aDrdRJ}(xM2bRvN-RPy9 z*an?pui}u;yv+5_wFl=no7?tVcizW;xe-ye7W`EBy9Za=kFJorWr{xz{!s^3%d=jy zHw>;V9FNU?di>@Rj?}uC%1C09=!CcafcGd-oDo z_+bTLA?I4BGW0zf+}~Ca=kauf3GX1wz4(Vqd^w@^{0JvCQ@VgTREYsiR-v-E;i{8J z&*#c1M38XEjW7F3@nnaeQK}<0H-5;v5R`pi4w61~qwiU#6r;zp%U9k<;KWw{NR+l~arS`rv@Lu8F z3hoyQHNC{q!<|c6+OZjK=8(t{4e>eW-kK#t?l_BN-27ID)>_P2yeg3V5$~nQnpC(O zMx;FfW5$Vo7df(E27ea2*2X?dWj|yg_|hzaPHC(Ii;qv)leX_H?Z^LrnLm}cu5<;H zw-$9v`K^U!zaTHwJBZ$r-jyhR@C}2i>JPJ0vP6!SqIQUONbp@250;latj+s)t%jl8 z->^)+Xe|mpUE6CwiZhvLx%%;9gl9+6=#t>Pva?^hvu{cuJu* z|E?ng@J)=!vmB8aa#H8_T1LdTUHxdgn)V>DYCw+UfB&`BE3o57RMsH@oO1Q@cyY1*DG` zOR%d(FE#=r=+I;~){j4m;=n9Lm{CDna85xaVFO`A9$>92?Ulq)_83#1vN$Mj^2;cdizywJMvORi5wF(<~LcU zv=!85J3+0yF@0{JJj~H|mE^nMashQff4^qC5G^H%UsUfeoV5hs=6y9Rb4c@=_k;~V z_d-*>xcKz`lY||VE*L6p;u90C7g4>R{msimzSk*)49Z58S1486VNi1WtMwG4O+wIwR zw#Xx|Sh3>-O=qo5pQ?Sq7fAK4q9qQUV*&Pm=|M@Y$u_Fg_PwYrQA$dxYCpRcM&#!J z56Wo`-vi@i3;gF9==I)V{vGMTFohCF>%o)(pq_(u2S%ov^EIWxIn&mTNi=Y7Gno{B z8;2ZK9ur?hU+&k18y{a)=5X!;3U9}ntA$!YgB>J??+F5diBL)93x{E4kNR&w3NBaK zlBV(PyCz?4YzxM(?kvHW00FzXAdxxYtufGkn!4shi=J9$!J7Q+r+K;wTaUIe_YUp9 zu4j^RpvMoIjU;ag(@rf`)tclRLHb@k9H3h0GpJYqH=vowsY-2Q+w8xUG}cjKQC%Hbi(g_)vUZd$MH;NW7rei z_gRDTP%D1s{)aW<9*ael=@`Ra!v!u8lS-~_xkb99XXbPH7|mQ;f(UxX+g)nbwaB`V zE~?j2kPmr3X(VL_(N}V1cy@FKoWRIuZ>*f|In{A!+E_dDM_4E&v8blb4MJi&pxKh3 z{l?Mr34;XlGKin(O=MBEFT_(FVK%*1gQ$0<9zq75@3792@!Rv^Z1R5smF0Tf%5kB7 zHPuH}UAlR^*ai+VK4#rQdfw)XGse2B%W`?VX^D_}~$fD=aNn@Kdd0MWwQ;&Atly~vOYVW{3apddbnMx5T> z+0wFUe@6hl_9NqhzcR(8;rzLEK>7(}PVnryfZNIO>yn||iTs75J zA=MTtsVrP<>^O581@7i@*&$?f0u~#E3St6IDLpKmtvT~~;rfE#WBu3&E+s=xy~Nyh zM^oN)D}dd}71bjNDMk~J^a-ci^Ml-;Y~yz9NA951gD>oGg%ulo=M{_YeY?+X`zg(tx{5r3LQmI#`VQO-92n)UJ!>8Q6R~2 zR=aL{kJCs2ULlKq356s(^38aS5K5C0JAZnyap^$%*|ojQ_BS003xdxKb{YP$I@%FAb;OW95NN5|F>wQOjA07grl=v@k6T`hrw3!@vtRc>De-jlK(Zde^#;@ zgOc6(kNPGp3vT=WUX*G>!z3yZ2}Ps-d=iXun6adYdGk1Wq6p|5_#P8;nWX3DazIoh zs&nKr{k=8X|7iixng^QRFNdx4g~tq9WghCJ4#(&9+5RmigKU)?aIG-wRuFHEInmJZ z#KY=7B`$I!x8(53a}IE)OgDa|(M82N?0tyhi5GM-*{ZNQG0jsfKjAO7v|#|r6sf_&&zFi*mIvntw%Km#Mv$m$MP;Pb8+ zl=feX`B5A&fQasg1JHJYrlkZ=t&EXTIu2JJs*FJAn-;%U?8wWL!!@p-Abnata$w?w z9%-eENIWS}jw(ucA#XZzX{Oh2jb;PiGTLyPfH63 zsJ&L!jR5*!4Yv}XeU)r-ml!76FbO9%@_1aQ8QOH~Thlaqq^);=(mtN6u6wLwr~%5uwa+n$;B0?jx+ zR+JQv-!C~(GMYnC(Ls3@^17sMl$V#Y8&-r2N+8n#3`LZJO-EleV^Px7J5WYNk4(fU4VSkDb#cX#X_M)`i{oFXqPemvsG z!bh7pTxt*JK^|DRkeFVt#2Ya7nj1B8xdr{}6P*4n+FM2>5rR~z0}wT|C{NYazb~g^ z>#*yG9-Q@lEIP2xlJIKO507%ouiSGJc_ow{MJotTdw3G~Q$xeMqrff(6zz0-wWJC(PgEv(|Vr*|5Tfug1QkJn_99Gt$PmMPH+MI4+Di2=p|tIe2V}@ zFrfD3jyoc;^KL?35b|OByMh7h9y_G@|ILAuRbSA3i<5E1Nd#p*SLFsioE;^u&E`|lh^O#2i4R(by8t~)OuJ~W z4k{(xAe~A#N+>1WDBay5N~6-`?0va%>6yh;c7PRA9icU@W{8JoZnbS?8Ab`E3U57>&8XBI@|> zxQPmSW-yOey9yqit><-bdbt0a(T}5jm=$_F?}Tz)tO>=>jtrVd zMw{iC$?uxJJABg?@w&2Kd2d#CFUoeEE{4sCrTWIL&3&?2RVKa3r$~Tw3uAaue@F88|sm0Vl)NayZ2VBwPl+_ul=Rio9kLbmawsu+Ay3ZHRJG zbMvQEk!ESzgABKXH6i)ppFz<}64~n2MW5iNt z(5b4Pl^s8QS1x5~><(S)FPWO*X$>ZcHvjJs9KiJrd&$W1eCv0l->$QYQr`+aywj$AF0nyWUGyk1gm!Q6gFc^CKB!iBp+l;G?+gBaH0BJn)YI z<_w~r=e{CmQiQDA-wDF!ly42j-fvvLk8+v`UshGJud?V&7}4kfFjRizAYp&cOL>oj z-}cgS)FB%7{(CSS)_{Xq*m<%oPvUED@3D=>q;Bfh*d%^iEh(SF-K}Gi78I#s0 zC0A84hESG9a}yKJa;wqpPkZI<`)j%!#QqAh-;;g~P@#`((me)->un`zs`{G)dn4H1 zq02HBH(Aw23S|6%bl3?mJDw{f6oKoh_^wutpIq zps}&im=O@Tc+{W9wITABuH1x2U3~afkb^Car4KhU{u z2?s$_f4=0Gb3x<;hkqw=jPap)=ka$yYeh!ItwE>A(g|YSr*boxM(PDFy31_YO;qpO z`LmWRseC{edcYM6&hRHiVRt60Ja3!C4;8bZ57!&@vt3`Dr_uLE;4$%e+6+N&yh#y! z+b|3Cn|K`})EDQbQ+lnMF151#D>1;MO(2b?af9iB68a3DRTvehW_M4v21(I#m|VVC z_QVA0%J$t>!M4(8{ZSTN@? zJa|Awd2SR(1J#)~V8FaMR<8z@PzV7tD_H^D05LI!hlf>icoQ2Mav5(v*rru|}ZoCew9uhSbD?kvS!BHo30pVYqr?R@xxW#L8-dC!y!s~$W85(GnMP!pU zG7?Dss?@%Njaj2sfsm1LRsjHdVSuJ;vuh=2KHs{pQ=yp)TWLPC@2`00T7?23-ck7U zQxtr2hNg1aMB9ugdP}05^LSCS?`?O7IQc#52uy$5Us`qW2ZkKmz z-KTd$6It}qc|xFVz(hIUiK8voZ$%RF&6Jf(zY_!#!Z49mVeiKdbA@6M0bzz64crPn z7rtL7o`3uJKR}Fic1E|3eldQSz4WW@Z?=ni}^?>U+P0Ut&&9&Zqx( ziifG!OJ2^N(;W9eE<(rj z9()1l7wd`Hz`WI)uU0B^ag6rJfVU~D?7{PeznPvO_@vXN@rWeym}# zwe+nmSCDoUK_M^a|ISn;!+2So0{0%W{Gr=77?~Iwyf+z1@69Me4`2DS zGVbnpx-xXq_nq4_y?{5=+pQ@Mv9FyLe&pf$_|rbof!9$Sp|K5P)0R-tJDeA_IO|Kec@E&Tl7k{Rh_lo2Q4P#l^*yFRmV7>`m`4gW-8G)P-m1 z$k(UQR$br?p+W1&h}OQ(x3!YZ4;5?Lm1gizTiA`gCpD~R29~US3-~_3Q?H;HPs_82 zIQQNxNsrmxE6WUDf$k-76Q1&N6O+I*YpsCtwe=gyTt@(8_=61kk4HtE z_2!*&rRHedeTPhAVs&^Bq{nLQwYy^PhX3~RJ^I@Twk+o*?Epzh3L$I;jBkD?YcrEI z-p!Jp_$?l1*t8WQxd9_A$kO`xO7s7q4?e`r4oxBOKU?KgzQ%^L(>e)ivh7`0C7YqdEtnBNqn zcXcsjbqF*xua}b%$dyDevKwneVKYWWDQG6(Ucbbh(O=&6j>(5LGomOYB>MgmsfXw*ac<=$ zw)wL2g}5=!8`ipM&N@faQ(#m5mt;YIv)AqGRyt-6L`MwHFkCg;Z0S_ZwfR5p^j!ENykGGLJx zUf#Dc%FD}(OG@Pa<(r*G5L#JzyEiWFoj3andftM$stj#1yz==VW;5=4CH?HNwzaV* zbTDxkq%0qMo;i7YS<1%wa0p$#rl&vsTQfA-!Ta&>1vxJ){1kj@jR`kNnrcGkW9|Wme&Z5b!j16u*Q_k@&;Ke+zt|Cl zaSN;f9n1S>FDI%Vieo}RUUvrBj*ouRfPXU~L+?#E-9Lr7(`Gw`G#%>!-8q4irG3PmH8`jr!$1U95m32A*DBI6Z9V3mF{;dHn?oep01%xqBx``-ZgJPXl3&UvVzWs!O0N2ICo%S`iG-knatxo6^w}iBHZiM%Zg0C!b@I|%+-1~| zp#6!l+i)!M42S%epd#-=8N`bx1M{bMY#^t7P3OG^VgArivTQ^R^#dH+!saqck=;9M{XFZ`WKRK#dPgLMegVHS z@amR$p7=nXP(LGOMuG?(zU1k(*4>$2*v2cUstT3#q^jnSoZs3WDN!c5rxO7x-T~Nf z`5lix_S{mOztash(cp`#vfVX7n>Qe4I00TrNP<@X9*Kw_YHzIXc_F$9LSd9 zvFH#uBj(^3$h)dy>G<`}ZEvC|J}EQt$D2@`{o9vTHZ}%RB09fQFeB|QkY)*Bh)j?Q zf|}#I{bl+77ABT>=dQ9C?B=wMmneLm>Ygts+rxYcTAcW5*7u6f`GkTzd>k)xHHscv zs&GpW)+dT4{27hq;Bu_9(kCfPp?BB)qTI1DWe7!${^hKKTQwF}O-E?m9)!&OLjiS5 za^Yfv(`J*c-{tM#Oa9!GeI@ejLiheNw{_!}`)4^rn^Pqp2+tRxF4eGt_Up?X3uvZ~ zp+r20R8;(Uy2)ldr>&Idvqs6BwDR8tP-8K%UYikQ50;W!C7qu<-}yuOdSNCMNA&Lt znimfZgOHiPZvtE4R{@A`Kt~iIW*fiCce-De)2YZ0*)|1n`Mj<<`or3KOWZ}{tt0Z843z1=f#Mk3plW&xVZS)YTy7Q<&I@EikAbc1ZC%Oba zF&yRK72wDLt&<|Ydk`oQA)~7n;BA6vRhN_Lqfb777B$C9Aajuh?zRtvgM&ZHt8w2{{u>?Dff-A<+kq}WI6 z#;&Lqzugj>K?8W7@|iEm%6Ta;Wxe<1x=L5_F4KG@bbCC%-&Bh3e#Bn>NKnev=dCLt z+>12b(WcwN%~y@$X?I(;4!@jw>zV{1?}iy!lJ!Rt2@)pG5h?x7DY+FURP;lSxuS=9 zSCkSLDbC5KVC6MtfuBg% zw+n8l>?rX1@DyFwFduXg%WvJURp5W~XRcKDr}{Z1qnvDzPKGN?P5;?FzYThEv3J#y zII?3oM4G{b=73Rp<0cONUv*XjTxcLOfkO~RixQo3EvQAq-I$->7|-Cdw3|E#bZIIV zhNo<4Vt*N0qvMhk6*1mCc4@tO(U#Q(v$oy%m%EsXNdL3M?;~`9*ACm&pb|5-?_eY; zhkHQvk*wj|UGnaMOuGFk#|n4E;1R|7hz2-ahs3XNvr6|zex*NB0zg0NwEOv-_wt>s zKcD(57uyM?0R*Wos0wB2yjClo+ESMZFXsZYABfC$RGOdf<_CNs*Gh(ooJQ3F@XhFL z$Q-p)-jv86n~r`a%$sixw`QqT9bzbu{EL;qLaAsukAkDb!!g(}Q^b|lFmO&fJp7>s ztTNJ&l`$*O`5j8(^|*2B$ni}^RGZjxWUf@>rRlmR&I4o93)7$3Lyi@(r9ce%82@gk z)_ycBOUU8i;%BZ(jv!(Dv`liTDkwCZ`9VmrA7&AFcWFyuV*y{8eOUCE*H02ME|riZ z?!;jHUE{YWzmctKTK@%N=>Buf9|0!<0gI7^FNT|kJxmQbYA{y48C@O0*L_i`m|c}g z)MnF%KkalLgcEs(x{Y;E);0Ucb0UO@DnD1SuuzLdr6RN{GTK+*b2^*f+`BiQn|TMu zLo~d{M(F(VOA53oR*Tq5IsHFAW?=oI=QZT0#eRFrah+=iC9=<6Co#tCB_wQk?$BcJP7Fshu3v0KMG0pUs$0vP2vaf4NN9Meli0$t=t9DA~{WJY;snSI(Vc|!X^^ZwPERk;@y)EV31DGqAoKOcr9 z)jy6|Rl@Wf)(#27ebyYKX(G_hQ++^|nnEGj<{IovY(SyF z%#KYvsefy8l+{)6hgF5;KMoAx>vV%TotZVZ6R>7A{_3?11?>e8vw?r{P6B|l;^D3) zCyez5=s$@mDIMsSGc01l(pLaJZ3d=}uCC*iavj)UKAGgV#x?v2vjv3rhcC~!E4nBL zmS2pv1U#IhJi)`S4SEH??Ph_r-A9wUJ`H_#lgnGsK4;f>LVEw%4XoFXt}$T1MR?_S z+J#LxeyAak8d^ERA*&$}c1e}GnU*fH_w6}){beUGS^gi%+$yV?NcXnMZc z_ew$9g`UD{sW|eZ-dXbSZ>dt%BHy~L=i7eR90p$fXmcxd0xaj%7mpDzovs3+>h-pjtfbKx(!O2Epw-QsmwvmU!2Ymchf zS89lfsj_;1`<9C17k6(>#KUTBwHS70%I1S#O3D=Bn}6%?v1R!mzO^EzdXt0Td*BLA zw9Zq;-vl}qNg+4i4`k{%CfhX?u>V{xH3A%}w<|-0XQN8FRw#bh8^Zn?-UI2-kOon5 z@FYah=pB|WJGq1WrI`NVOwkw(ah zy)ABy%Zo!(qs(XH_%iHooQ}U^_CFT)GpX7~;UoB3BBWLy(*tzhr`y7azw}19&X#XR zf$lJI>Cie8)UQHja1=1qvm+V*&5cMdAbiU8Yi>b1q{PMX!eCFw&Wj@ZB>fu6LckL0 z#~XBo&B$N8*wJfrNF8n&-!1`F3*dOiY;2xl62|x+_@#R=kgwwVQs2A5ISj&3CwMJI zNwm%@g4EHilBTVa?(Xhz7kQ)m;o0)B_W*^Bd6>q{;Kyx?y!C<&p>7K5X?H^3c+a>n z)&3m{Tv}S90B6Xz{B7sq!+P)O(6@&5pZ)9X2o*4I$}p#@6q*->(ce@~zj}1OdKi(~ z=kI(|njwjZ*#7qhUdSuRztS(?@rJP<#()w;N0LI_&i_>Dya8XBRBd6SmRm_c; ztT$F&ncF*#St~2OT9awH|KAHhB3-}>tVNcElhuf4_uWm^FB<)y26k7pYBBO9YOy5m z-F_2P9#Ao^y#4f(Vo!d2E1fMpm!Mkj67F(8Tv17TE+CrkX$vQeAw~Ty& zv-|Nue~`B0NYriDKl(cNiVT@UON1Cq`aU|=nI~4K;EF_Xc9jaq20>GnA{=S#+NHSZ zJZ0+{s1G}+uLlrBert*fgm$3_3BO~j)RDNwSfvxCF~9uz>~A)1E8VLeo4er@Cb=h& z&R)71jYKL*xx{C`m3pK%`uUWyY_vRF!n6d1NtvvjR}oNvZRve7 z`x5o-B471ihNgn)G2#RXuQb?FN_j?t7K$kGqO&L_mF^a=q zxH?9&1%8p4mBnD`UF6wJ%Zk;dWJTKtiN2Nq^GNb|XOvXTZGg{(5(t|Ftdh1FoCa;5 zqwbtRjtS6Igtzc%bN(~Y8!z}!%TqHDJi)7(IVN3jpwzPwbeAV$@)RXBmk=$~Oz%)< z$U;8E?ht(>@N6ZN=-kVz#j+dkcZF#_^e`2BXZ4t0*kE=CamL$=6W>4|hX1}+h*@?u zs{z#}wLYZPdI)6?Ul{HnQc2iG0jVtoAcK`J1`S4g&v>?4br6rL7Y8e!n5+7IZKD-E zKI>nVQ+b#D(=XtE&Pvd;Gk|-ft8ueKqtt_3SgWNdEoIQ0=UB3fX$U0+Z2ic;3*mp^E9~?L|Gm_AURKBw)@5CCzxKaN5Tl^~D z+TfTEQw1iH3iWIn^>X!&a;8DwK1*EcSIW@qnhdaIau!v@T3mC zcTossGt4uROh=(sxnBRHK+MMw4+bKONMr{KLkbiwZHAKjshgW`@O&P)|CEQa-LQ7C z#N!ds(?+iXow2(2Q2bq*)_!UKsT?~KykNedJQDyS)1pfCK1bG+Dh47jRSA`~4FUoONaK1QZI$#NoxZonnGw>o%~tWb_Ii*`ln@S` z<>j+f%^0KiMzp$hTe}NIv^=jn9UTuk^wx@eSVtysm8%!W8WZ|WQ^~~^ix@Z^T+$Ns zeFT=;l1O5Rux&8))(#pkTSLhI>>W`o5(!u<>2F(v&|9?6k4aR<9Py%_A@ql?JzH%g zGl_i>Atpor96zFokoUPCQ?IkMdkqeYyo!?jr#P2AhuC-{q>{MS62f<1-Wt22Cpi@V z)*EOnk9##_>)`T4E%lzO8VN!uWTd4)JSB9!=~RPV7BE$qw}MegppUk8-IihSmvohV z{Lgd(W#|5YVXUZrZ1uxln21I~DtS=sZP=2W`UeVfpgb5>xDi-v)3s(ZE~#(>dCeprU)6z+Kj!=>yv-a2F(BAe5Z6f51zhD_ zTm?3p&>6_){yZ-v;al_?Ze-+aqp`W%o9_-s|0s0VgXpU%Q7C8B?vk~dHzC#!jpG|t zjp2)K6#3)f^K`#H6eySfz8L|3gdhHs@2KS&JeDaSFsUGKoiC%y^5_A{vm){d0q)Jc zv*wWYEWYrX<^RyX9`7dlF=9Y(8b7=Z5_D|^zjUYZF@n!+>k=}BPSjhF8pS~cX1tF7 zwpuMNExoylH?MGRqC(<1n(h_zoz#4Bl}}f&|MRiepjMZ?F0Sxm5p@$B>^wD#qy_Z# z-CUuyU70qC!Ga$&^{pQ;Y?Pf462oLRm2 zsq~BHFsq&fav%2|vp=7Wfs^cv$~&%jovwATHc{^;MuY3ScvDc-Wc)}yPcRdL)RaN+ zI;BAy9kzbsU!#^*STb?}I^mmHo1zoXq2#>4-UT+b3$aM!4=c?C3zA|NRZq<(VI$F3T&^R z59kJCc$N@}+}WJeSJcUjA06Bs7rXNVt-#A-Q|%wZWB8e7R5C{%eo}(Au%UJ9tMfZR zq(U?-FI69+{yp4XDyNmu`}%?lV!(kbk4oxy5$Af9X4m0n)#*|XsvL#D+8tnXJMGnP`=yK`&Vj3DmK`fdNyx@W&Y@)BTuL+VoR{I5z zvnL8F&DYcK$HBuVFV6}Vp%wU-ge%Yx6__Rq)o3>b$<&%mw=ECkCv z8>@qXnjyw^bAmUp2j+L0*i*>mv!{LQPAA!9v)pt#kz^KMutA+`$EwF7;zChf zeLb?9ZXH(2X&28*o%Eq_MD;t}+Y9Yk)bmRO3o|chtDv^d`W@15oKT;bjzUSMt=vulHKn(v6Wxn^Yr{s`$XscY)ahVDk@OYH*C-M+ok1{ z)I`loe5ewCZSNhk#@+qS=>abu-TRc}fh1~&mSs|$uS@FzmsLN}B^B<=UFOjwjDki2 z`fa+F-X#ORI&}-Jxwj#P{91fJok_i^LD&QndTfV_nO9HwER#Qd8A^@nQK-YL^AM@o zTeD7u*9oY>o{MdCFt~K697HXKhWyqp`y~ifyBtA=7vpWpSm^J+o9F;+p}qQNy=sF{ zS6sd6Jgr2xD}s=dwtnEoxt(;f)1KVjE;0D;hfPf)4+Bs7C=aE7|Ak?ah`KnKshBBn zM?UHc9IW($gnAclmubU+P=^S=4P7V>Y1{e{`d6WQVe|Kbv6 z*sRqq$FAEGd%;rx5Z#%V^8q$hS|nKV*Wtl>t9a+@V4T6wZz5X%(2L!pGw+}SW^6L< z4j>!;Y9spEZnKyBQ0gc2yM*?yGJ?sF^vKl5!t2SEcR9`Tyt&O6u7$&Z%41Tc-_ zF`ek6OqjPJs9R$@SPE=lNE&rcG}qcqCY5WK6422N0di!bo+pz^=zWnQ0!e+aONgak zE+0UBGTK!B4QgHO=UXJuo}q&hMxXw zX4kJK`SB|M({#OdlMV;$+2!V%eZ>6MaJ7`WKth5UUFOEapn1iytugOXTXhg8l_BjA zyRWRIv^344%&yxc_`@s#P(PYL{D8fqjI{I{LfZBJ>))UdwWFu4za2zx#y(*!g1kn| zrdI5={S+gZ;lNwG_~xa2&)_@F3II^^gjAw?SMNRhtTN7RjE+d`OnvD#ufM-o9X0UY zJfjzh1QdS;M0!HtQQe1582ys*ir3^Dq3P*?sQ)0Xy#xmVwDVZY!jbfOMk+X6@9y(Iy%xg}X&D#VaG-T>KF@km=H&AO-Vd3= z&Q#KvOG`6B%5a=HhNY|D<1ZBZ78rmGlV*S~US}dxc;jl?E#TZ{Y>hecLZcE$drm)( zHo_k7w5A1YHWM-6C1Rr} zo+@Y?3JA%0jGN;w%Is)*IW;#X5e^hZ7I!nx7j?jVN^B_?4EHn5IuFvHITFFf6V(l` zKCvAAZVO!Vf=G}Spk!%^rpkiawh{dPr)~hm2d>fs%dVIzr?y#_dKMP7JQ0z@u##g~> zu?RIEaB?>`Td=@2UlKw-l!hz5Pnr+-;Zsn7%j^%Cfu+RQ-G*SeWre&NG!zE%ZoGi& zocQvtDUX3{jo&}>h3}8WFZX0#-KiMr|4`8^4Uf*5+g3RAGR&X)CwHkvcy?QJG*iX0 zupYE%Mn_QIqAM4dm=Zb|KC{_N0zyw{S>D!9+x!5VLq{}y=;!}x7~u9ML+Y|ee7t`~7A-W4dW!<>S<@8*R+T_u(r z5tULZ_u$*t!J0q1>ARV>%9ah+rl$fhMR+ZpO zYJG^vvl)a8M@kiE42@V+adFZ|gKtR}hI6j|pJ5qIBb7%a=BE1=u^dl~8;;)4pj$(J zx1QOgaha;se9hLZS2O-{Regs6qvD%zR8om@Rp9J{f8TZ`moK2NffC9&<>}$tz>_WF zTnfG$D=RA^+)$PNs8Z{P$XKe`$I)qAnk7+%`5drtg?)rv@}AC#i~ku^_q^AGg^$R{ zX$^RQ~OYN)W?0yu!l_37j#!oo?q2;V$7SwrYlGn(B}sr0eJ8K6FdpH$cf=1 zFd}3?XpH|Py78%`#oL4tPFd%j*4hF058|%eC?GJC{YtgDo6gz%y?_k=hnC~$n*C*^DHv&Bo1jGW!pia^_ZMteGKpc+4U_zMFnT7mnmvX_;^=nn7}d{xegTp> zc;rVepJkN#u%L1Jnwy`v{KadtSFj(P@tyue0v9;gb?+}9taf$HZX<3qQUBTW$QqAl zCcq`c3uT<&{5jI1)6HUYS?d<@h+YA%FuemRw&1gJ_Y?nf=Y5jGD#F*Np}qa|TOe6b z4J@RcDW#a6b+ko0fuRB3m2){ANi1g$i_ffVO1oq7ksIOC4AKP&Rr@ph+juW_r|wcp zm5elN5VeOirH)pNtsfaQSx5i5t#Ltj7Sq4lW>c^4t%^gucRS|4RQq?@Yw91@RTo2l zF)o|MDaUzxWpf>cZ$hBi|6=wf`VDF~{0vs3HkzB|-&*NLE1yyw52X~gWwe2wD?H()M4tpscg{n@tc0`$C(3Dtw& z4M9Fj%Y>M5WyR22cy^v$aYQ@iB;N{7=N@>vxvu6L?1|y}@MF_sE89C9yua5|?i;45 zQj73=_n3(fdnvUI8P;3WY%+Oo?3shxqPQY7Q6mpXKj<+QCY4`;k%M?cQYSRv?zg%) zc82rHH2+!Rx_kdnHrF3v``EQOlI3<@U%B}Z&D{$34ls7%EO;wn!)0^B+$>S-tNW-t zZQuq2l;~(VnAMmjrl-mH{G5igdzx7Sp-_2GO8nn*5|WYk9a` zL40uAO*B`Dzq;i~<6ig8wSlV~J5s0DwC7)~5oFQ^8Q1^2MV|IEwU>mMnwi^Vb%7Y!tF1p>R6@J`@_w0IB$^GB=Q2MC|ns>0EQ{h zHRMvS{SF@PK)!VFC00W)i3YEmw7V511t^mKkNV9ccDv7C68FFEwp>J&&T;jqIrEuG042kb zgwgL&m=_*>a56gMX(6+=7 zm)zZvI-H9Wx`m~Z=#P3;Nh>Q>P&7T+Y+sZ3dr(zKxHjJY^ac*i3Kow*?>ooJT;4I3^+?hKJm#P?{B+_)7#TycKk8^ z)MHM3WMt%Y;i^|bm8t7c71S8f#H6;r`ZbvsokmH?$w#r?R+rdhoU-yZteKRVRyJEG zgs*^)<6pzH^c$U3HK(J>@`-+>8G~^4;0OvTdaG>)srdN?GNJxapj6#< zd&uw%6b}gZ!4n;ZygDC-fBwiPmaA`g(G6fj~_pb1O z9a|RrL#RTNF!4XMv%WkGl$twp=NI5V1^llnPgvXi%{0DcSCjhksdlfuk)>XlMTls| z5ePs1WMEAzWhrt?OEv9F9-(2Yh0{>o_wkDS zJ?Zz6AVB*)Wg{a$~< z{+?#d>w^j3D>wF@sQkdT<>S4jXr3j+nP0oU1@esRP?8n#i51^&X<2^%{yNOa;gF364+d9v=>h_n{I6?C!#GxM zin3{U^mj$9O&&`%1p=`+9xT##TjyPI*Y;DEnOBq-wkuk@wvVa~ztUYp`vlxw>h}*K z417T~m8?kbrrp876n35o8FZ0xNP@TQjXkIjUGiuInE@_s?PBbz0t<4w-S%rv$`5y&J{>^Mh#pXDF69n9G&0>4^?po^ZDUU8MBzB?!U(xV66cC20_kA zzq2IQEO{k{AMz#*IsQ(QIJ_OCFr2gHHo9}fB7zX}7T5TEp&ZUHX6VYIlz!BylX-_tWf=-83$^cZh?) zHQyVFz^JBFWKisDj=ToW=Bx=uU+zm-#D~2UY+FZA{+N7iuKQY>M+-fBtz{JQD z^JINF=S2>=u+`22p8a#jqDKESejq`%K{1l9HYtb&7Jm(03I7P9YhmMPB&A_&#v$YW zWjrPA1}d`6&x{{2@ix4cUAQe*FCz2oxGHPs3w=Dnw)14})SFc;Po*_5Kv7MtRB@D5 zd}N}*mb6d+H!sczGdsl>Qcj&$P)Q0Ll)O~Z%l?HabAsXT0v59AcHR%2FD_~_OrqR-y2TC4)2t9TJ-i2o@z zkP#Fj|BTggA7DL|z?WXEq;+98o2)MRs9&E37)H;%1qL`-<-qqZj}lU~Jgc@Qxu=l; z%8@6iRii6KJ$Ph2`T5CghhIRc24$3uNg%kKPHjwP@nB_mtoS#Q>CF4s2@2cEOnJ_; z07dhQy=(Qw*UjlNeCWnO6PR-z_#_#+IDdywD22FpW#o1{-wv0pf zJtXALSYUW%z43Z}w|jZ`TO6I*MFKkg2E!}hxjW9iw%@_4v+&-3cU)U@p$V$Sx}OY$ zPY%=1;yC1-Xb^wXV@3k{cs6`Y&>_$$pJdvhfDLrmxy+#MeWJ#E4>hdtxGWts6t}%C z3=UOpzPpe9oLEH08Y7RWg)8k5WNO7ZZ+&C8{4cIv;!+82WS#!K7!s;S4K1ircO2E5 zR%uzo=B9s5HCxPM1@jKoOdF(887+uLCRC{W1wTL54d(w#jNThmy%>A|vN<~3X+Kd( zi2fPW=Ou5&|K6Ccr>k>qiUgHsf6_MJklcWS^B`CAY!T*1P&uDz#nD;zMDt`VvtiKI z8>*i_J#f;EkCN4TufxIo1}JN<&rE-^L4_w=5UK;VUrc?mkNz#loHSC>f2|VQR$DHY zaNsSErb$oNJC_+>+2a+q~S zrTRwqx%OT8}ty+=(3}NML*-Zce-3W^=8&4V?h@-1c2|$N+~f=rsoE$kvDWc zJtb8g74_OmK{l&jB{cgW4$ilWn9Q?X4p^D>{>_tiOC2M-p9b`CU*J+dBlSV3PKR8f$;gI>!)ErtYDG_UYlq=e2BD_l4Bywo-%lfc^qBWZ zbTLPC&@vX{q_X50bUk>bI_rIA+no4>DsqIZNhv0;n8gP%Sf!HtzBNaPYhZe}x&1A< zi5VO?umQb=M-d_LvllqluzQP!s(6?80mSj*Bg43=Nl7SlvletzbfknW&;JGk2D&4d1*UeeYZ!5->!R*wBcJR(iQO98{8jKD z@Rte=j<%XSNZuIx9q&!D<^TF*DAE-d&`-hjGABDFnM0@C=w^JkD!A@59@@HC4UbOa zhDtZp36IRx3?2Vo7Eiqq3O5H>lUWe+T5sw6&uQ`o*#C%4+kAf59`>*rKy>XWqvIimGEovg@-c0xJH-0&e9ZJXcj z>kyaOucNTMiNK{!!&ruAtQzz}m*#zWSiW?oFX)%i> z?;qJPb8XLEPHiKF^&&eKEPU|Y;xKL!``l6gxeX&w3L18XmJX1r|sf)tY915IwY^%bt zw7IymL|91k^h^w_so|jMGFHt#fF4N1jOWm%xh-F_5B*Rd6EhDkg~GS&?J77t;Iozc zb}o|rlCOI6PN;OpdT;P=W=L%_0!9Qr6fIy;Im~-A0rI=hW%dC$!PN2t0~&XZrzem5 zd2p$PW6-M2uo#ctTizF6b3NCGuA|v3TJOvlx>|(N0xpgj^L9eJlf%t;6^wHFH&DU& z5-QWjJ7R7N1i25q_(?Yym+Re^S+&%PH5xR`i4N9f6!cEVq$Or<3jAgGN%=%eixCED zfxKQY_-eFB-vo?B0bK$%$s-4J3y)ThKmQ(R9S|A+Fa&v#@|T_xKl8+F=cMHH z9v!h}&H0pfcx40z2#i0$x}$Z6NItMgha$j0G*?ZDIqmJEmK*2e4oOj~_tEcUSPgx> zhc?Je`&c*xo7i69h3BFrf3qLbm<4oG%B(C}@J5F*(!$rpS|#pX=PE}muU`1<<4Ec# z?RReG7&lavT-Y_=_(6C>p90vaLxeUx_nR<#DIs5NB@w&W{qJaU`OTnh#Y3kXZpK~8(O85!GccpXn zxd!;h)N;kkdyxH3-BSHm^~K~$1YdIY`4HVZz3okPju02M9Vw1f$>+P9-v7y4X`s-X z|EaU=K9aIgQPUC3U~(px#{M6pvdsT#2$*=yv8H(YaeVT?me-2fQq@Lse_~j*nX(cV zH_EL5cWf4`Ppvi^$j)fg{l=fVY!|EuPh#9Kblm{j@xKz8+$_2xH|tBTyQIllMet@- zihF!4D64(}E_ws!-ojW|K2_6yd9Q2xkCJVY(P%u=|NTpdcb#r0AD-AWddmjQKSdSB%&-*@z=Hj&#xOF5mQV9Et zF1k&4NRZusw`5)I%@86ctS^ij?@`8K!VGUG;n1R}e;5bv1OBbLaooI#CNjQpS1iSt zS}$mr4>UqD;iU&dVv~TnoP`O0A6CVBE&h~vWiskBy34n#cUmK2U+CORly-2qhwgM0 z&PJmQ45{GIvCBf9!HG*h%%uh{O0$msc1p;r!w1uRywCPUsb*kgT+h);@HH3fZR1Ar z^BZg-XcpDu8*l*pX(#sl{$EF@4>%a40$cMTv$8Q4q2d}ciYe=0ea&}+5WN&d+ftzq z`w5_mCqU`~r1uIu1RZpT0(RdYT_UdqnbwlEEa2;{*WXeavwcBR>_3bIo z?z^#PNA-6>jfR&b_H_c>bFaDt1=Kf5x{7$p;8`2X4P~CKM$5j_Gk?jJtK)}oS`y#J zzq?XK7bII49`hb+{+e<+@;f@crk>|L^~JUO*m)YgXoUM(2YKV1M)A|f(7CV)r4q32 zD*%YC|H;)$W@hHSp7fquxX!n9o{9~H@^AzMVL zDs!3wv${X+zi;cbPb-Weo1Lm(>GnF$^(CoE2j<zeu$(i=C9U$8#o|j^XCTGV@HmhJ-Cq@0W=F2&Lr_~;>fh+$4|V~P z`%iP=dJiEVNCpD+<}nZ@!Ez%^ma3f2y?Atv+iUl_MN%Dzg(1y8KCW$kV8F2*ZE=Vc zl6n0b=_l|_ff)*z*1nyvx_0u}BE}2dgcO(Tce&kcxL;>y!sh&Gd0$GoHu?}m1NniH zub{eIH(Vh%cQxrZHg>`0^d??1OA+|RyD`+=VRyEHH7@XH2BLvrbpca#Sor*^2cPp4 zbTm_lTVdjb&+^{0wz4vEzbstNpS`M=pT`jhIid2Y)B4iBxw6h>$gWk)4r!ca`2Mx6 zdKh|J`143-v?;F9KF#vlv6vcKtvNVwU9dcnWTn%uFY;fpwoM*xFfnIMv+*gw8BHw> zGmo5@sF)ayZ==ShoZQ#a0IqP6hGNciEWn+A?sNel3nX2!)c4BGb2?>s3cV0BeLf`n zJAhe8IfnB4r4ItVPzS1=1dCG2+p()pqQI#E>`Lg+y3o{ovo($s=hf?w5DdwyL+$s^ zDt_j^ob3gK4HSgz2pvhClcYs;oS_O78023*E>p|n*C{DH*??J#M$)5WftZ+>u&;DM zX_+ncGI(O&lX{u_iIEWv>=7WTWQl3c1WD5>3%P5;A5G~-gSKx?H9w_9Je0E-`t8@T z6|8|{w23l+@&|wu06q?+cn7!bPj}ETHgX;ltL3hv$@+>t>8QWc>1j_G)|)jWP2wka zfdU3&w=^$Bp+nt!Sl3H04yNj3W*6FWYcXtsGN8i(L=KFRip+E~Pv2L3#dIXA#lS%hS6p%Q_DC428GPYUo2*ZyEHUhwYym#g*w-o4YRsTY}-r4e=K1q$H&q=dAeGVSuxJ%q%vx^8> zpQ&o{|4{XnQBn0@xB`j@C?F^(B@NOgDM(0nN(~~RwDizON;fEJ&p_b z3|$F03LakyI6r#c_(o#lNxVX=4{&qXFmzI4s;kzvi-Zb@mm~KbNY66u=8|5oqhr^g zm(;1|YE9QbHN?97{%{3ib{LA~Z^F|Ub)Pr&_|?Sd`P?AqP`kP9oXxj+mj+Dj3^Uu- z!u$=AT`+*byq60{y-8^e=h2mdbfX>#8AgZYlmFmxjSV@1eJ&v(p&9H$N@tnQSPJl$ zgC}+dhNn_-p{8ZcRFC=ROQ39o_6d0s|CoPGmJv6`%5=dB6pyu|b}W=tJCDl?>1ySZ zm3Tpp%*@M##1`18Tzsd2lerB72|g<3z*nbmfWL$&lS=?`zZK=*5v$1r*oB-JzdM~* z8soj|3X^D-CmU!FSU|cCuC+vXuIfN&!0T5sOb;K*N%P@IHO0dw5iRLV*LboiaVjN! zbaMwAa^aR`x&LI%18ZyeU$_9cJ=E8V_2A;_$xMLU?LWXeWr?~3D|iW^LB{bblt*0q z(+ll_p~I`tNd)nLGJzBK#Jo1MJWfLeo{wSLd@jb_l2~#DBxbuqDhKHkL}_`j`~CGu zmN(FaiHA@8x<~>MOBt@s)T#3V<>q=6DCO+oUv%_efj~ihgeO%LGe6IlqL|#2H94-b zyXUe`_Hhj6%k5WKUhe-D%Nrx2A1^nPn8Jet>Sr6%WhM3KtULxP!ksbQhd=UcdWkS?QhaX7VmGLFOs|%MqPn6R}s){G_yJ==oG9X zP_7pr7pejS1$163$w;iDwPC|Ty?vXPBWC$qgfPr`yd+h-NNd#UZ?{oO`k2b_@u6~X z3#rH2PX-FY=Mn+1uq<$JVEqRBBvSAC0`6-obvtb4#u%M{5U}f%)c){>IqjLu+8N1Z zT(vCGL?BGUu$D*jz!MNi4T7sYR^(Z%!BY6QLHbgBsS?vH3jTmv`<0Jp@pS!AZwW16 z)|-@WtEW$%vueEQ-l!27v!*)>5NgJ$&w8=gkbNU!o}>45DBhc^G-gSzEv;9u7ig&8 zwNPA!(o$B}(s;Uuy{A`*{)6}Ltn_KJQwWh{q}`?A{~6GCCKx!FB{h(4?xzgfHizEl z1dEw;i`a@rg#1DI%<7LXOmORV!LwlYyDx#gaX&hF;8b_3iIJ8z6)l|7pT{wj8CHWV z`Q+z*{@dXJWQ-|`azTJ+QjTagp48+8i1!2o&n3r-Vh&N5N{8WK|DGO*<1>kmjg$1f z*WKOS45!4LQGuFmTT~qc$1HQdp)cm5i<}rtjyTzJ?s*k*6-$a-SKSPg=Ft);rnq+P zn%_=O7qn8`Wq7YKBYufS^kwsBy%RsgU7;86=#9<&Z#B$V+>wsVGdtN&%){`CdTru@ z=oSKjbSn`4V8H7RA(3hZR@d3zh;wYsTIc7DaA9jXMPA#@ukAZWemWes2Ed=iP(3 zx-SOY;3NE0DpkxnXusN|Gt{x<3XI(irxM)=!dt!Hyte3WbD0KR|K|B-z^2e~%JcH6 z6lDJ@@V%q_4^M`jCU522P_X(z)bSZk+lLItliNhxw>gedQWiR?+cH;$4`Jo%?CIYk zfp{~1%Tpkph^3^YW}k%O1xUm7Rj8BS3yQ`R(7$4bpG)wCM2^l|ftpN72zXkD{w2^W zQS;_VCYo}E=7f}#nAoK6Du!Oz4I8$LE%d)My$oo5*yubUp-bRF#)=2iasvx~##X%m zT@M_HyFX2wEy+EhDB_xzDNWPaxo>*|a_QuGf9`*M0^%2W0v9o+1=92UWq;OAk6upd zVN+Ws%z178ypsv9^!t07YJ{{ViT)b#z4a8_Ev$$q-X8J!jSgihWj3_jnR-cbA9&RC z4aO2ig;)!2KD%}Hfy#D7dS!f3biEUv__5%XJLlh^2Sf3U>&j2LP$&+(f$O2F?>&4Vu8oQ(7;=bp zx@X>aIcM#h0;G1$-H8r@c@o*Ep3yBDWwgmyAi_ zDM5%K!dt&sjGwbK4Mo%L&awQgdRGyqBK{Vhfn7%y=nr)Gh2w z(C}XVOCrR?>Gp^~Eoa!>)1Kw{mPW4?{I)_eNuT4`~2IyhR9DyoW zjV#HQEHU~}d;4j+8^b6QvEIZA`Kc>I{F?t%d|D5$VS^FATKDZhaU|Z815+RIo*w0~ zPe!B5_T%O1t||g|R_{Ow_>VjHQx1ouPPkq@sxL6ehfYxG-3?<&`P~H7oQ`5{YF3;hGabTs5*FGKW0*ZLmjboZj2?;Q*}X*(1@PZlAb2mcz@p9sPFAK)8IPO~{1 zJhwf&Lm6Ox3Tmg?-Es+Ry0%nqx_+qv6Y;MmXK66%Gjede?SGm8U>6wyb;09Fa}@F# z1nvxFm`{B6nELtOmbv)Z3-zEZTIKZ=T-?<=-JcT_tQg7XOZ7_?K=O!XreS3xuz(gx ztI+xP+R2^O!$t}U_+HZi5iCs&E`mACeLd4U6D~P#;1(JwvXB!H5IC|rXQOH@c`raL zGvl)d$L^z0YnhWh@L~T9?n^>@+71vMbcQ+EEN8u})0`z=N&Itg)!}Hbo1`y=TNC)f zYc}|Ah3yT}P|L$9(T7)B)+H9|qPIw%tZCdg|6rvI^1m3{@WY*GUK*+C3&?0(gfqL) zX~xIkBy6_vZTN=m$m&le+@}&4T=LtSwpzzFV}t zkqce?l5`Nl7arLI@oAS1w*oIz?~&dU6EphlaLd%ymKs%zc;Gxqfp{sFe8B!{_~k{e zsAraM__W+brO+`vqH^9JBqb)kvdx&jXvQO^;S0JB#!~8vCJnw(0YU$N^AXq>-f&XM z!#@`ciExChGqdFXcSAp_D#)j%=v*&uC((UeM&ZFx=mV*Sqwa8vFX)ci5B^uxY_a!C za#ogFH?fhwT{s09&Nx#i%W7%&S&5q#cK}1Ks+5j-aXF4Vf^s1yj$YsG_EJWf^5?n% z8)n&3XG0=QAxk{p_ct@+Jsb9qP^j%zqDs4&3`EVpJaFzp<_E+a*y}VQ{P#EK`j*no zIz{IE1|ZqB*ytlG4CkQsCl_+~kpM&P%h{vxZDwzRaIV1U!rbbVuMeDlb@SQ>z~D0s zVEm;<<7ygHOkv~keM9&Ef>KyZ4JxYF1oW8FxF+e34gke@^T7W;T=&n~oY$WmcdX>G z7LLsK-XORfAAiU4)@ZK&zgYGYr4X6YkZVoEHrmV=|` z^knX08A2xV7v?Ig0J;EQ{#}qO$)EY`DXS=d01eNZn+5PYJapbv-i*myTkBbQ@?wS!asYzdTkW) zk+WqnCcPvqLO^-8#+kk?oWv4Zlm2avW@1M@I0AMj*DLs+!Skj6O$&sji25B8oSn?6 zLr(pWW|oqILcN>Ar^@wsS_vdv-c+J5-<7^I!1R&^n>BdxFH}uX2_QdrlH8FD&2*~2 z7Mb^YB4VEZjkx3ek(5s+HF1?Vc<#bQ+l&X0_M(<89(xi#SJ;#8G>du*gy9H80RSS= z3!1k#g3oWz#sAQFlyxq*CB2j)M^M}({jyOpo6UY!_9U zJQ+#11s#69l%$KNowKBx)OglDhl@iO|LuwQNp{VhgLYAhlKC61LQ@%((g) zAVQKrn56Z_b!id$x6#qe$5O@5FY1%fOT}meUPwSb`@-G?VqurJ@##NHkmmRRZ0q@p zt-1ajDtBL{(1^K<+FVe#stnuh)ujE~m`Vqm)b6;UIX3X*uCP-(5IS!zde}g^lg4Ev z3+DYppCi}X+=%`#O4CP80W}cY?0t~!g<^9LhQuqSTQ+V z+Ex>TX44Y?5MWe-3XnozZDH>g;WDmG=v2>m2?5*-oM_AalW%S1HQPWge({_Uzx$wQ z2rf~qW(R)2Vo7QFv$@%a#8iBjawpciA5L=E^sW{%62Es+k$19dz%=d@wCTEHUL>MkoPIZk@N+buaz_1&U;VFCql9iWrfwou203MmKyB9 zvq!hl&l^0;JSyot1A&33!{XPpkh&@mL!u05A4Hz;P@2Ac#ga~c{vC^XcK>Msp=$;< zDN-O`)eY?4*o^%E<1_Lm=hK-a^gpWo@W;$<3_D#+ms_^ocoQx7uSDJvz~qYO_q)fb z>z#OBfqZ0L8TC&ZO=Ux`%Um%Jt+1RZpZQmxZ>V>XU7KU_f3*O`!sX$luU}p`LXJ)>sBR$- zbd#DtcyW=!XJ#^5+{4c8^q0%>(1pD8%va5fy_GKii&~M*%Pz!%Gi#oNyssD^M`?X} z<542yyMGeL<&7j0a{H+UX^JuLW5zdyBsj8to~jcoiyugL%R;9J=ad9~#T11+wSt1i ze~f-fZdoh$iEjI?axd0)Nz`|QeO?C}rSj)4%;gB92{{w;`j4NN< z(Ak~}pF4DarLVTW>rEx$#5E+f%Y}vIReD+G};4dPYPm42@| z>!I22(}5Rwp|Y&DXD4oQ@ze=Qfc5u=UL!wznkJ0rqv=nN&|b{ljmY_m%5 z9b?Wpb=s5aB>fIQ|2&6t%|*2W5tGaJ3a&^OnQt~hPp9@B`!f&Tv+Uh5X#dYejs(Q< zYGB%_{nJX+%1e-ZaqgvG`)=?!*k6jiX0I`Jb+o{(OJY^JZ;2KdKMVkhJUqfKLL#Wc%Ma(DLImytv=7hEXq#DW82{Lu6wD$)HhjG z4fQuQ=&Kx2YzZe^6dv#jeN@24gbxmg5NA z1W4N*Rf|Z2^#(z{*5f0C(;nXM?X)#>GCHm|3v7*ft=sOWeD-7glzy_PoLsvbC$U}N zQ-=1twqhueTRx?t}=Fj&blj0QADkWg#f z`%~zg+lckG0l^UZP`ST$h&Ll6vflT-)Tz#p(O}$!l}KuZu+=jv{I_@RP_@1sb9lCW zm4uFw>0vfwBO}d2pZ>Rbowk8K3ixyu@4jy*zzvG+mjx(N77Su9hSAR+eO;aqCZ>Nd zEx;d3CjIastsIr-Lrtp$DbK&6nsM|)OeE6%1K)i{z8Ui7rEr-yW`PmPsK8`C`bL;6 zRk#cvMby*<1Vdvx-KPV(Ho%!%Htjj(sKoKR+Jo9Mm&%?x-w{IXRC($`0a^V~=xlCc z`V9Pu;4zbQV1H4Zd;P9ZsI4xvX(V?_9!yIoj2OS0g@wDC;}>uXSa;KS>BQ$}jLHXv zf|ir+o3!FW4x4s+b6|Sn^O)4T`@o^wjXd|;D3M|el|{|apxEtRjv&^lVxl|(Yq!3X zf+T?~aysZbl1a^UTI{qFaUG2nc@+eaf3xtLZlk;T_hZnh10t^EP0h$E*66I=R^vs( zBCS{U<#plq<*PQOtpkwABe7N6X7na(s|VhAbZv|%ibb1il(v<9eEcgo|F0^EJ*7~`OksEyn>NDW!*AX zUJi~(xEG9qDG<(w2X`fRx<6uT;ypK}Fk_*GpWL~ca)Fz{^bJWfU8ZXNGptCT54INf z+7EaOY-$A0aSpv5#$FhdnO+P2%s4TCIvJEOb+>SX=XYF665uH=ecUxK|84XM7CVyh)){sDuCTl*n03mgBK%}*P#DleEIG`hS?=0m9+b}0_N^c?q$yZ zb8oZ!6{x5JL@oTh=bO{s*v;RturVqn3Aj@iIEP)IUdTNELiSsaVz3}t%c-RM%<7!S;iN&lsRL#*0LG@`-LGon%-$& ztlA4o`br&7DOI5TOq?-qT9nW)~R@eS>rjV@we9}mhm%O2W z(oCG@!M63yd>RC`LQs&Z#n%p!Ylo}z-OYcz#9k}<2`9CBuVfr+)n4RrU>(U+0#*dJ zws>y@S3=&LO?SqT%nDz+rDH)r-6>?8p9lOHRwQS=0dtMI(C?oFA8uYSIZKCrUs;>||7K-RMe?%j>e z3qTZMPJ{9EI$$X;Sr-x)T*yrnMtyEX{yu+d!wPR)>W!miH>jkde&7JOk~N`aRg+%v zi0Nn<=VQOezZxwia{Z5u#t!IRRQ6BKOx*^AK}r}|wf=KwLuAfw$QjPsC&M31Cq7<< z`@kBYET9hyA;oavi*5;xhg8!X_6{%7sUmDDJBeK~=dC#*Db~=?%2Nm{=Oo4CT_a%} zEgbq&zh3@PVQ-B_!bP$RmGWz79P;-RB5!v5hO|%B+q|qV!j*O)7nM`|Z(Wo*>8WLW zEV}U|9GE$ipA{y#rH61)fveDGf9)~&LLn2I1jG>w|9$zG$6UzXdN;1m6$k^fdiN#h z!u!Iq!n8qRZIg6|*wt+)lf$Hd3pxsx0AN7FSPvU+1BGi|n$+E+I0r9RlV9?I6_diB zTn>?eTn|nI4o+;{Y^m=G+x8DipF>XkmHjP)q2aRt>2y=qYheGd0CVL2$|tXC^0}>p z(Y=gvy-!*Yw@4xgXwEtX-jdM$=}s*73mMbkuo+<6m3En}oA-*5&RqXIOTW3^6)@rZ z9ic9cc&5DX1-(;%E?}WVo1q-q^UY_|fxUYRc1`|-jPIeN=hG9+!-X!*KSz;6Aq);} z2E}=?D(Yu9Oj<=;akDlKmUlZ>#t&-ltza%xP|I&}0{xolv1EfE=7`cXux$@1sgQ<( z&>r14&Ip$9G{`vSI87-5dVn2>$;5t8i3 zUWPsLXzro_m^ahmbHWD>hw2N^{70d2HL}>YOVinM<=9qSF17$mp)y<8;u;qyc0rB_!Ko~h`r zPE5D25bqY&T-;pFZy02ilJ@L>0ub-eABK?ZY<@>K!=+)(a?^t*(lAt~>NwwhHdNzi zk&nhA39pd(-QsaFG1SY5pq1OdsTUj1m0kNlIq+;Au7gP z&lzjp)R!2<_2m0*yHx!gT#8>jSqhy%Q%cekTz*pHM0@ML9;k=Oh{N9eLleP@clj~i zQGfN^7711|H&X5|fqNcgmcFxZ;tV?@pQ6Hp2~3#kfWBSlQ$`#_g;-omX{%rPzlPI= zy_CpN`U}nWQgY=bK@2j5)~7*fU{C6Y`J>2fel+FIwcVes_oOPMFF;Nl@WYP6wSV2a ztzP`yphVr@xX9GrzW;`YJBZE-84KYt^B1{lP}6h0VoZp2kZ7hxx#aPdtm?)6MNKB& z8Ixo?PrFdcO+1~GMSYdClZll^jq9)O=U6{irr3E-TX8qEW0{txN4dt8lUm8~o|}`} zJcWXStDz&M$pr5GB#q=tykUZmYzC5@C7S^cRLNIK1$8C`HVq#eZV^5&#*HDL*VWZ| z1KE>M;lRYg^0q%lEzQ3wMg$QUvN%TZ_$QYaCd~f%_`t{QYTNw!!yne9rYM~vz3$v9 zR7^)hRIhb4${kZrT{qAteQ4$hdEM0c`*UCKX?Zy@J&?E0vA(HH^zitd%$ zJ1c~|Hy6TO4GG_?cB-t5DfkabNRw*}dU1JcOrw72JlJXHw3QC?YrzPx@>cq=abz=k z>d%fh_L-tyci7*?2|K;BlE2VLo|#^K0JSK2(BXQTo16Z{JBYbh<@@~tK~G}?t6|2x zKGOStUEgg66-$;dech=;?JVZ7MH!ZtCJf`qoqOMr8Vf_(1nJkR#_knzae)g)zb(s_6 z`lKaIdOSzn&xwwL7a$CHP3(fvU@xv@nS3DHZ-F*Mc<-9hYr}knp~q4c6S=pxj{O`E zHa(>}n_;JK=N}W#@|1ORR+KTgODS`*yXRj~ARHKL>GD1Xv2$3uwk*|JIg%08uD_b% zj1t%CLd!7-a@{HEZDSo=5e^8z6mays?v5@fV7yM2oL-Rs(8=66>#)SsR^G;do9A0> zv6}KdE`~LA^{~%uvgi#%9h|(ReeLcQCfR#REI#ggjE$oaP<%BYOAXi6hYwR^qpZs7!D-KblNa?$rkH=`yB z1&%y084dQHs8XrLe$0#HI%$dAw{I;@#^C{*yohQ4^Ce#rZ*}z^*h-Xqhy8r{k|sqC6pJJ8iI?Du^BZ(yKfSxGh+; zMqO`7lM-x?q@=U!))NETT1hEWS9cD&UH;V$<&^ungbWt2>zjIv>!;<^yXGMp47d?K zt1JL1U2lepQAph;Qtsi?K)n4U+00v*_h6Dk!o4zWk<~T#B|ti*2;38&bM4u)B(T|z!_Pw81*d0C#=z$ zQb*Xp`HsvrmWNE)V5Gw|<1raE>>HZ={01RP;BRlssFh#z@!Xgl0K&of#E9VXO161w z;KhkGHmcTw3Y-$K4IO1foNrwyX{hHL4&|7RAKmB6pADR!I~w0v>PI@_<13_X45?l$ zrPe&YbaVbn+}Ff3T=?~g5er`>l7gU%k$cr%@SWKv}nV}7QCb# zy<0y@#f7Y>4%j)kz4wrXdl+W(@%5qmCLa=pa^i2QyM48WMXv*f&}9&-qx*Q9#>~yH zZEx0FCbk_0ddwD2aY47|SK4T-5+O(O?e!+dr!e{a-f5+Nz^GUCU0)b)fQz6@^2fO| z1Hs{0mbeGxPz3xbDLs$tO3t$lecr=azgyH5TNq6l9A}8=S+DpWUZJE75dn`~bs(cU zbs-+`Ci4n9soeOO7-DSiSb5XA53$7y#R*xZ+gyh!v)`RZAH;o)rYE#^oqs&=R?6{y zmHmVafeg?_qCWqWWKk!_Ce=4YV>Yp>N9*G;)SH@Ba z0)gj#HJ7Uj=qnI`?PtQwa(`#*z_lblG2fPQxv|h1sSj@kEn%$eV7w2R#clRJ;`tMU1(8tP5BEWp| z5`B2By!I;|d=6Qq7X54q%%H?pecw9HuP^>r_o&BeVyc;^UDwM1UUADS@PS?_UdR#s z0I(+oq{9Bz-G%6CyW_o3`^}Q>o5(k+S!!?zgey%%8=Q?0d_@Ztl zOpkUkvSnS&RJIsTo3>lqw28@8LTst9=|KqHgqdQM-MCo5>6_%AnYXq^$_%Y9gb-7r z5;cdzAL%R){o5cO7(9?=eIBQe?llVzNw&ei!Cc7v&G*0w#9>hjc(`RIRgmF-1UjOu z>(l|*W*_*Bl%vG_Pyakx4&dvGoBJ#yH|rb7np&#I@-Sf#zzY@lN4I_aP2GpXXvF+N zOUoOhYG(b2+W1UWDeWCZT^IR4M*)D?sInM?=Si54&okBU=rPh;J3NTr?!9>Cg`SQE zpPsaYV#wbdp1KiO)GjB8S2*YVhp`-z!A^_g<|}HL^G89tg}r z*ZyNNOO~!QRhv#SnWxrQA>TtiV30a>D|Y-XziZd%8?b}Zlq(Z*b)HivRLo84ohE(X zhF?e2Er3r#(^)l_UnP>*C0PQA*$={vs`RC_G8193(ZA@-f*!ad!BuyXaC3iiMit&t zCG;)J6pP}4Dk=}Y6fVF9p+_u?5ENSS=IjROKdU&U=^t5m$SB#yQOr7Q;>oMk)8qL0y}&A zf6~tdp0AMBFNeK(%E+G|d93(?Gn?zM=tOa8+$VN=+dJ0$CF&93MR&ctdW>odAnqZ%fG4 zZDfqYRPMKPUD|OEjv3QkjRM4N)Mj&8_frlIeo-R*EOH~$I8>r8W2AkXy0)52RI*!_ zW$*S2YlZ8&>jXdn~JWR)NN#f{38r5tdxHjjEZyLtd+{!hkr)F&aVq z43nbN>-Vp-?=g=LR&>JKv4uB1N<;2y+F|1O3IVJ_d&2LFzZN&zg!u_o5L{_@wf?GheMZ)dq6$T$<6a*9salRsdUICvTKvYk8Bg*$WG zy;;oVgQfozx~&YRK%zBtl0eDTM)veyB-wa?GNiVR>SwbX7SLc@$`Q_XwePfl)M{H! zn$5<60{#bxIY99OYy%K88E*;}x3a|(rQF%+;w`+6PmhR1eg3}DJzdw^vcsGOl+vM> zGktFoTJJ27Tv(sPZGb~sjidO<7hFz*)9%38Q<9CTss(t7-D6R^3o)_dFTjasPnwQa zadyS${;8gFYFppa$+x?;Hc=5T@4A#qyPbb5dV$)lt3Ij967>$2aB6}Qc-8#gRBZ9P z4xzYvZ4!h+>6EzJ)|-3LvT0^7Zk~J1Lu&SuFE_7K3*#~ynhw8$^%v{pajHGXKRws| zA1^ng2rB&-(?w)DDwF0eS75MN-kz5O#<{(ig+TLuz59kNV82hA0vg~o z{E0}}9-s@yp zX2*`Q?jWmOS&1jT;pvmOMk)^aseiqNq*gFPw6pnfIhM2V^?pTOvjO68NIk2YMEcW@ zFrwB|a7w(DvcyMJ3^i`SanKorg9nnJPCnv(t=Id8m4$7ohOUR(7C!r^-^HaAKRcha zKevPWRv1)>ebwYt1VUs24(G2zGR;9OmBpO2=l#T;wUK4M-mLb-q_kt6=pO@f7p|zC z<1XQdVG)*p#8=+3kYv0xi#z(|_Cxl(e<$vGL@$vUMQho7d1I;BLAG9u4Vfod{f8Q! zux|^YdQLXaB?dS&^xBUrfAu=W#nfl9OMS)M7LdcbCg5H-sk(d<;lHPE17ZJwv~X9% zaUpHDO2UZL^LxG@+_3)j!y<*l&1Y|bdfQhNqiKi0B5p>Gg26o(H z_LhVTiu6)ETO{XXRgI5Yuj$@getKmKA2Ey&Pj7AFk9ga)>Ou!1?yGlTa-e_?Z}^HC zi9^~VZY*e$BqS!nGiiat=mWFtBZx?2H!C3K&JgLcqymX>D4k|UeOANI{8aT|DLf-+zg`JB)`D?pDsiRZq^bkw5!6bvV z2i>`Ir+U%EbAp$G+wS?4<}EVytO<=YmKDs`2@O9 zcVTQ*upzb0Fr}3GZ31nm^DFmtgq)S_uHwFWsqFyCrSHuyK<-SS1A$F6Nl8iDS5M~| z|LIAFzoy16A8>~2ppY(f{{OVW8@=Rbj+x~BJe8Aq&Pj#vmhPH&e5o0WN$ z%9C3ecU)FZF$#s>u3aJr$_(J$gXVfG8jN>hMzWIInU+iy9b#d3ssDOLn*=|WiVd)v<&@KY8&(@LI z+jQ|T492!EzC(0Q^{cGnX5-iL@_ef7g0K}aZG6PxH)vIWP&fMSbF1lm%MiZyBGde@ zXN`jD$&ji8Zf=?LgQg39hCxi)Gi&Cg@w6PEP ztv#i3Vx2ybC;3{_c465A!k-o9B+ZWS+fCS)&mYW*?6i?c|4|lz(gR62mJ|qDj^}i zEE=*2J$yCvFAw6-M+T}-lw)Tk=cbc1ysIiE6A}Mc3qYa;b+@yAyXrQm{ku*fsLEG( zq5*$JsRQS_A=ScGtErK8R7gFkwO3+S`Y0t|HT{2v*Qw(=3}c2OyE=rhQYt$;DC{n`(}O_sJRg;A$+IiNkE|MkZ@0LuHu#A}#B;KaqW$Vc*CC z@6*g2W$lzCJaa;SbRf-g^viphDcRly(vb*0&}-}2Dbh454BUl^rZ*L7{vBIQ&UrIS z8&5E8guw7xaWkqVN8P#~6lAnDfSxfDG-~kVh6xA_;R;JTp6Okv0MdW8jaNa~P8s<3 zG-UqZdi+d);d0napzMEDCbF8svGU0}-^TPW-x+*>5JkNn=o3ZtbMt;!3A)Q`CM)4d z7$;t`7&9TyBhO&2g?hOwz~*PgFmH^OT~B7EM0r4pvEhnrs|rp7%q(HVN?| z?$Ou9urzk^BQLOp?oXY(B31JXV6;?-w*Z$J3UXek2B>2B76HDLfP>)a~6?zUnYjAEsrKa8)60@epHaI-buN`1cSmGS6t# z21!g}A7IvI*q8PNv|Vv@VhXVt!gZVVn-0^piI46*tsu&w5{bJjZ2vXjOnkD&dFYU> zOVDFdXtVarvmEsmFWl9YnIclR&~5yR!h^g0%?>)qXS_ zIG$Ep;3>)M-5uvJ{zO2~^0O>cSy^NLci?GA|5JQWEG*URttTp#QOjZ_c4Ha|hm_A{e;I;mHu1h|bO8eKrd$pU;9z&Et>4JQQ5x_yPHj}4& zeNuDlYZx?l$-lDnx&}?jshtI>lEa^jbnRV3^FZSc1X$0q*VgYgpxg%IrB9oGI+_%l z<_iSc|4y}~(z!t{(q7Sx?b$IX8!R6PqkQnuZu~wtp};wY zEm*z`L7kHxYe6YkcSMud-|km1OwCHeeZG0(=5^;}-DVy4< zmh6Hf5O*V8BP#c}k&M5bK0Ez>d?uy$^5dKZPPa>UGTCnMqPH1EPEQH>~Em@Lo{Ypq17r;f0>M=2v)B;Vsb@e$c(xfUUp= z^F+^Q-v|h`)rPU8zmQ1k3fE9@{oWb_saocP>$yYE&6_NcK#O5cG{t$fSA;meoRfs9 zofKfF4lMKeduzxBq<4p{hKTVgVX9)j8gQtqOOMo zEl+>c%MgcjbxJBMfqw4kAPrb`?71~AV~3Icrv#Dhr1&J#JAC@3de*bkBv=wY>Na<9 zLNJqJ{ELmPmrJR{RFVl`7f4P^0}Agkk5uM*P5O>(dMsJVZO=F)ixIye#&VZY0x>=? z$iQp4!!GH9B!uolsgcx>49?l(|1vMUld`qsz7YM(&15I zZe78;aHaEH4rEE}95i`hy96O=76V<8e@G6=o^UYfJc4eUx;jHmar5!F(L8p8CHkD8 zcETlzqbcqHh)eQpOqU@3&-)1a&Xv8T+B2hr+?cj#+@;4fh$7UO_eM6waz3r<3v!aK zyAnEh%#~rtoWt!iXBAyH^nlB0cbs_~GXKv7*HV6e;bHG(dKro)z&9arSOtbhEcBt8 z#*+`V*Zn?#M5S_tkp{=axRd^t+!WVC`Pxh+T6?j-D4%H5n_FYer3qf(*setTWBTGD&*G0wJth$P0_!dZEJFFkU5 zA77tmfpVvAUOB-_WA@w1-j=xs%}|u)S1-*lOO<@P7KzD4GDoNHQD&nzcX zDgmAEJkuWj4kRYV^({c`V{Mu;f=WqhaXqR_NM!i z>xfB#;%_`0F4J1A;h*t=XOrdMe^mV;pAUnLh~yrNJAhgqb`ut`QpD6=NI`s;$J!Xq zWs@4me9vrSFd=+6yEXPnKN?=_3VEDhb1Q}(NF^-SuU}&DMcVMYg2xSd@%HXSrfc{i zW2J^9c~$_MrEu%sW;UHIryVabTT_qJcWd%h!sfDrU~d zK^lkY;Qj@a|59)2N5~#O1##XyjXTyZ@fkV%;d5=+pD$es5Ov@O&L4SpD#AiL<>kR_ zRhW#9h62yqK7>UptL!h|4MfX(|D`CN-XZwjh%0QSC%-e2Urxd5I#{~% zaZ&g@H!^BacbKZi zW2O=JdmSisp=rOXZhYU3i5?jQt?*zDQMvb&TMyg)Zd~oqvrikiJ>4Ez-|&@@DEdR% zt+ytxxsHbhh)ggXI0$9UiIMA7YVoE!0@Mh8%qa3sW_sNpYbgxR9v)ZCul7maUD6BQ zzUbhE{MM;e)bAY~RYCd8+4hy>Pxyq7)n)W~wXzlSc}D*$SNzfQOEPLT(FVzcZ4*`k zo|PFSn{mY@)*Fjm%j5g1d1f&iB$r=|vF}4Kf!`;SrW--<6otJ?20|bZx#_OfecFXi zW^r^91!l<&XHfO<#lV#=-a06PrI<5Q#b>WRY~|v}uX>___2V_}2m#Fb6v|dmsYthh z=e<@!O9;MYh884?-xaZonF~yUaG%fCeFn@)#vXR$g!p#^toR_SaosJJdV%D!XaHIi zMp-!(-&1>16l zu%k_8*W<}R{S^(kTS7iU_z)8FEdPem#kpRD)%*Tm3wybGaj(9iEoYnRXPq7BSoH4^ zA$ac$n$NnF424q)^u7=Txwd)rlKF14gbCi!U1<&$8200IKOyXJKBdB6eMS7p6%-pu$w?bwc5vu_qW$$xE!5&(Td#NEiHT}n%IO0AP*T?4 zo``*k9O}Zb^xxY$b}0A0i{+s7$@28E=zmrEag_rmquatULH&s|0a;Am(KYADdh5dzqZ(<3}GXp zv&Iu)<^6zs%d!>oD!APny{BF;%QN|`&aQ~M&8wI5>1fN)PZN&xH>V@2P!tbUY}|PK zKxHLjyc^C2dzwMs4(;%$aWE=|0Q7I@p>X5x}2yU~@2_)l%Z(S7bs`Zh-LsfL%B} zRTGG4(qNF!yMSa5)bOow(w_GcP9a8?%eayX$9;9=3Ij2K&V4yE4M(oO1;EJwDq{#j z+yO~@Fa^k9ug#OervTr9=dVIhfZhQNx*f3pc3gyAfv5MNkh7e2al%*YIydnN`5njFd?|;!N^z=ucElBLd;z9paHhDkHS*uA3 zNIaH@|I3Z3ky;^nvihXg>S_Dg0YpED35<*a|K&_3%V#?hO~CMCs+#-Nze}Lp?1*zZBfpmvwYNw+sA|ZQ&abo07Hh{rYMhooc{ccDFN{$Dy-Wm(A!4 zI)ZD8H+5@n_KSMug}ghV_A}Ho3bxQ>t()=b5VR4NF7~HQy5{#GYA?VIY5iPoA-}g_ z#Od3jH8FxSf-+Dh$1OSC38CD8?|F3B0idE zI)4mIb^(MUop3?ZrvHP z8WZ3k1e6Xs$9I!~L+mYCTLe>n&zs?R%Z!`OB~|x)pG}hsz4(#vUT*6#K?^Xw@hM4N zU6OIO8DIcGoFDY5K`~wU-nyW}e|%t^YCax>$CR5ntK38xPKXhCHio8Q&IFaQa1{rFl<5U`a zDd0M5%~MWR($J70=mA$cw%@VotojwVNzDkr?qC6oBn>XsEeu~mT0n_aFi(-DeSYu;k)2H=dnTg%rq7#e3=`?yh%v7HVc0-clDr z$RlliyW}zL%hnY1OaWGgV!ee*%A(pj$gx#j4R4Fgtj82hU(r35r*5ykvOlb_2D}}y zMy_wv@P)qmTq4Jz{ed}``;g)qfH^sEs>Mgp`Md@MKAS$^H1R0cPxkFs>(#jnr(Wo2 z8Irze2d9`!v!?V}`^b=VArLuOyaijaOvHbM@$EYPujE;sd{qWAEBuhO6we16 ziee0`lxJ)%>XZ3nVIJt6HSg-i2I_&XhKnm}YN{fhx_%!_QSx4i1_O!TR+QfuYCd$- zm6>Pf?L>zq7;|Tk)*_#OQ`_TJl?-4oPe5s1n5foK>?28&Ye=&5_s=6=8(MV`h3ey> z7hS~k*Ec7P6>U|VUwra&j!iH3<9oI}e@Jw@f93py{^?@()!NF9{7Ty?{2+*`-hrZ6 zcsK%i5a+W;ZI{f+MgjD4ZtcygENL-r|p#^dDza9ZrTf3D|Zb%Cwr5I(!Z~(zBc{#d5$;hUvx$b==KSqzE}->6$o^+c!9d1u!}{k#bUB%#iIA~8 zTkenX_IKZyOa;NUN(85P=mKG$&=I&vTOX$|tc{gX8{oQlet~8D+9Mwud^)+8Eswct z51lC+)ab*$)`vnM1e8^aEkE~cWZYkrEm8C>7t0nuEb>N?nN!Rgct$5C z(MQL}6UVm5!r$wDR8aSQNqiX>IFpczlctMrl()b`>l-oK2e)!f=d?E8Daogt(s==`H^lKOaMM-|N#O9(7B<`0^>KS_8(RGuX&(DlUq)4zBCw}R8 znX}7SS*ZCX5QeqA8EpKB+OHh>G3ct}W_E;CTc02L zu^OW4`Rlm=^v>@tUeFY=(eY-b{F6T{YJ1Iv>E2Omcs7Dxyni#59vh!R6`EJA#S(bmMxOob`rQ#2cA2QS<_G*sJd; zyAl{m+m6~c{q_$1+&jOGdFMwl6{23=AzGHM$Y`${$Qz`&*yWYh&4*5rQsdNrG5w|; z2UkxaEj0v4k3<-J0joIt`)z33M;E7!^=#gE4eV=q&%y|4#5`ka=6s?TQ>>DRHT6!; zUS0a`v-d&m?N?|DPr@Dyo*vZ$Kh$iC4@2!cgv18Em0X0akL3j+^{*)}RW(`qD#*(V z>RFIxYwM`EHV?hw&tCWW_wM*;999t|sK>ZF-M#kYy*Fr6bZ1lb%>I8RdYL&W(SM$Z zJ?!J}S>FSF`AtH6NU$P-$;=WkLBGa-Za3(}AE~(7!EY8I4YOb#n4=tKzNW@YAN}Ir z)<-LL;#UmW4^dxUe1Bo8f9t4aDy&h**Q5*Wa%-pd zv=W`E@te?1d^lCZU{eS{==>Rl9BkQNA>tj^3;pXoKdZ-aX;#XsPndTj*v-UWt7KJD z_HC(zFqfTpboDw-$3k8}#~&zU8m@LUIo{u}FAZD@9kM_Dmbw)%;{l@f>(kQcfE$13 z4%og2IMNpFCX!!IB-koqKKDRQy#BO38L7o{;OvnL~xAeR<;58FFl1 zDjibDpFuuq4t=J1*G88+g-}!W)Oo$;A`4zfJjqE(kK1{U;hR~tR}TwRE4#R8X+}aR zh>puk;g*!w^1)Od*UPj(cwYf@xpZqmUmv=Rfxr_ z53$=sEg@d!j3>W&&T#y5m=`(Wh0cqd#?J|_)~hILhpJ?$<^koDCFEAqO^V1^7!imUZrX8}(ZyvRpB zEQg#ePa&T@FB9U)GcOY46)AWWKdQLAa}p_S1y7+@0m|4sE~n{OJPL2m;bQP23!c9u zuY8NS;N>~M%d?PAawdD~&Ye__Dur&Wh;xJ@U5Dalgct52uQb%7-0tBgKg-9~R2cloMUQTd4NuXPW0U@#KJ~ zkk6hcAztKxcT!*-p18bIpNU-;CACenh+DL96^&`a5DDlT9-S@8Tt zc?$U~c=DA8fhQ>`>2Wm2XjhVo5Yf(!>JEXXADMSvxL5ND^JLA7LcWk!b~*$Fz!Qf@ z%aK=k^1`cmGJknqo;$oyd7^oFmhtF3D(_x!;iRBw64$u&*mbjo0_J6O=4QftjNH#c z{@mv6%-$HhbkAQB<()bvH!n{!uRYoEcnbN`!4vQPIh%Rm=A#qn?M1uLjV!a`Ra0eq z4rdjiyOgTzoxA84w_2g^XaT#FqT=!Hrz#$=m$+oZOLS|TLjEtuM|lz?$h(VfbHJeQ5|z9~|#Jt^eR0^UjCDdcm>D`sp_ys|Kg$2-CSv3d6#s^Iyo=YWNL0rASa z_Uz}SX3&`*yx~m#rCmL55B@?pN2W^Am3_PtR;UOB{EU0z3DWwfWz9#n-dwz9$Y$sK zXV0VL@0~uL{N;(klSf{hjl9T)7qNMf052sEKl&(R#*DGdB_lN(+3`4htoT{+QsHGn zJTVOIbUt#78O@{fxcJC*2&waA&r?AB&|&v7VZu}T=EZ4@9~qEOH%}gUktI(Io@gG& zvm-t)bH_(g!Zi09r4}l3J!&q!Dl!p+CoV6e`0IOI&(lvoO~?%8iqCm`@{L!Y{ru+J zwQI*yPd!D+5+&pFcKb!H`oExfnV-DKAKqR(#N%ZayvT-^F4I1H=J5Kn<^zEMID64;bk_w zjLpmJdDAo*GI$Vo-gOtr$;r;dDnZ_fuiHNt3{PYkAH_TBQEp!7ymavrO^HRDSe_yN zuDkk^me!w?l#~LDFU~sNUKogi<2eJoi113GytP1CTQ^KoX3HDG_FIo=8A~naP2CZQ6w*p%>1RLZrlMe>cD9Qr79T5q zw!F}D2&6N}o*O&`%WJZ7x#Pv@;AL)jq4VMU&-#br^^payc^jxYq{{tYFg)4vWWy7S z$F)V8W1f8Fr8MoL>Vk_eS^PZcTfU+ zQe^i_+Q%a}Dhis*Vpv7OwpCX(tn|OSwdCDAF5Y(3O&~shYTj`o*1ZH&L!yG^_5Xhw zc#&J)eFPOg+Ki;FWWz_?cO&0bbbNJ!=BNP0;!%0GELJXgksIETqfjQ3S@Fc>$rqmd ziI21SkvK2%&b!Qq3XUh=cyWe!gkGP3mhKqItCzacnMjZq4zJ2*L(X}((%PSf5AVVY z^+-B9Ugq)p%Z|5ei1L|ty<4*5)w&L?u1fiwwFh*%c#$u>OoR`g%xA^RobamppTaAH zmi2QZ5nkpoeja!cpOgfygP(xxlMO{%3|q)kn`yUKR`=E2rJz6*0xg1MQ9De{vjH}wShyvBF&@+{#|`d^X*UdD)z)Yk4GnIvA))SF5VSi zogr#`RD7ZpDdNO;7H%Sbe4%eJW)| zkBTn@GL%67xkMD=#WxH=$V{aF3?!zDj|}c2$Ngs*0@E}VUdgf7`Y-(MjoE%@a$*<; z(hOzWe}*tHLo$ljWEsP!lvcfM#C(d*`;^#F6ds|LWG!vL#uzF)>a8 zh6!=ZAz-jU;%ep*_B`31_sHyT_Qjq@h)p2FAq+_%I3eI6V9Szqa!cyk7k#Kz)dv|P z**JkazE3~>mzGRCu|Ym`HiDoU;2_4vNeMIUOLa8^ZiwMy>plMc|+|7+*CWj zFb(v2c7WGEKXLytUe|YGF+1LWfPAhZ?2q+dhIGYJxRUJU%M4ehssRfN3;5^%{nLMs zmz$e7H8%(Tu3%q0@AT>Ai>|N8Z7%*@UJwmJ=yPkARHu(h>?sp;uR`#t^b?QKk#O1`dczsFa1s+cTJ zM%wS`@6>B3tCaW>jSNJ8<}hd2A}2}qTlIskV>Utnxyc${$6Jfsbmslm`a}#UEi|ykVqt87>3WM z(<#_}8~^k7zr%JnUsksZ!!VFYn4$K2`dzCF7=(nGQ0>?B`~5D24J49@SpW6$41K)M`}}i$$&3Aj@TS7rpK6DoUlIzoyB~YeMNnv-ReU`&}3Q_mZIL6Y%==YfMki zD11O4gjcU#VRrU}X1}UWMA+Qi#O%qBg7!0g!vJ2qc!{}>=9KV3X05qF4jK6M`E#5) zbt>F`|G>b&ug`zM{Kxa^L5ikt5U{cF3qJkyI3WRx3RKvP2t1z*Vosvy0W73f;ZZ6 z__g<gNg?+VAmSudn0A%1VIO_1C^$!|IJ2;r28AwXfH)y1F{he`{-N&gY1*-{Wtu zui^UYiX4v+p})3%8>_3=BklKIdwYEyD`EcQDc%6?+`WV4kY|{qkIpMfRYC&tKOlfVFIi-y?RyX?buJ8n8oag6Ml-v&l?*!bLLF& zq(&R}`)p$aXBQUSlMAg#naynY`R5IsIei+o)~$vYgcAgwKYxyoKmEkHec{92H2oJZ zf5rT%d2RA)P=E8~OU%v9X{Rnh{a3%e!pRe}Y#Jv*^A~L-cocswfl+ATnb8SF6`CF)`tG>4^5}7iG-=zp8(3`vVZAibu1XBfkYw^?LPvnJqtvH zWHQB{Z0J)vy&v{eDy4T(P(FXR+eP2Dkxpl{dKq=DL$B9Guh&B+n^kzVH6zgP^&|9bv;SuKrqmZUYV|q_g+kDNrY`_&ZEs<^G#zR`*MI%y4Q8fiLhbiIKM23QdW91w zPDK09HDPme6Ca&C8S6hH!iyI#FgG_B>pz1G{PO%4%+Jrq`j5!K&l?-~I7vDi*Q0xESL zHOhYkpxtSsQoa@IKY-9`w@@xu;{7LpcC&>_xy;f`h4NG5@y%8fw<_h>=Z}ccXf{#4 zRSEDysVj62V08b5={Fip=09jDjZ(kSXrO%Smb~2&B2>TGYT{P;mU4ik=({_=X1j$- zxxx+y!}U9@w);8(?YLjjx4IT8m5OX|;qr^0f5Bn^Cw?ebDv|bkv1ZrmI`1c-@|u3P z+jIN}mDgjKey@jer6N01M(EqH-On2q12TPsP%bn3C|ut(Oq6flfGy(vmq;dYV|5j_ z+c+mY1|+1?X3PmKS@#&WoN^-I{cHmgk2pBNv<EtO8U%DYU8GZK*-DSOnCIbImgNSWDlhe&4c4AzA)QJBFamkJ{9mtYA(2c% zK;?D)UbhR=Oa!MZYx=#O1=CEh6e?aN%5-}@n5GHCE&kN)_a3W$--c$B^-|Yz` z48xy)ARkLrzu&iE7;>gTl>hp69|j0yn22;90Acq*20_3KwcpeCQnTd7C)|HR^qt4s zpiujHN}3P?ra`g(6K)qMU02ud+XUD)43lF0Cxo-PFikVoe?kb@eH&&X5$ivKK(F72 zVJ2h!2jM(3&IS&#&wsCH!%P^F{`0oenbkjg)(!VhtA$K98*0CNf5Umdv8=-T>9<0zN6l6fV`F0q zAJhk0%_g$BafJ`)6VPllFqWGLwcpbxH(<-<#zX8^PxBiM=VlE&eON#}#V5jUy@rX2 zyxPb@-xw17*C~9+H)*$4!$cvke>(#D^?D8Yd>#>Hn*RT4J2m8sMSmk2s!u?@wu5}W z81?Js^R-$Hg+ejHeyLxr?qG7V7~pmN?dmp4lch-eJ+fBaMsad7)PDXwsBTv=S(?;% z*>Q?kc0tK~oqCL<>ThptV{)n#;C21#))q=rQxWz{{q3zSl%}R5?dQ6ssZyl<{=tf` z8~5ufP<7vLkpHCako>p3jmgqr|JABh6x~D5IRDjlYA6(o0iLvCfogTzKSbr>wXR>? zsiIh%4DkF@2?*48YA6&7k@oBM>AbFAb8qAoo*n+d|Gw3cFXSWb_g>kk*D#SUgxJqp zQ48$Wcb#=8%zmcd*sWuHVj{rnF?6HRK+erT;4!$a-)b~5KA!V44Z_wBB7sJ;jq$OZ zHdhB|>^4xZ*I4Z!+e3hr3}iAH;pKDjN$1|RuWW`QuZF;L$Nb89!Mfex1F!JvlMj_YaPaB@ds`>yVBEFH_A%#}l+> z6Gz3f@wiV3AR>X054+qo0;$byjtlQ!@Es-3(k>OAVDMrT zwc=e?t_!7Kqgu(%^SP9*CFl9x=)L7JLZ0@`3lanMY3Mxm#S_@6=m4G$#EX&gU_8a} zvRRvOJIxH@S&=Xp@1`v7pT`mNEIZe)RD3^qgz#brJPqLWbl3y&Gz1<;#gp5vR}1XC z0#3i#p-@}I`g1Crn^01Uby)$Z-+f0YX#peW!FU?LL*r@KJiXt%2=ao>k2xq$A-p&& zUWDL-PUOs=S=od9*ywEezw80b}pP^EBRf;Q8V? zfQQ}jN*e4t1m8wcF}p@NRk=LOLw!yxO$t1OI#Bp>%e93*Kvj&ed(;CIG<@GpWIJ{w)raK@|J2_%TJ8#P8s1hpzBgT{3DspH%KJXq-&Iz92 zwK^J42j;PV9@^$f;nj@LC?0RYBbI0V5HubGc;Y<8@lbf|irp4 z?_@e0o?>_$gclJ!4dA_lvIFok{X|`VRh|?cY&O-g;c*1Kh=}21c$%G^6}4L3&*9aB zTHZ!WXp=Af;%D#~0l#;i7d8!_SG(%`add0Ef%-mxl9!l;jce#0EKzmX{41M!_pF_(6C- lEoumScn{lm;MExX{{i55i@rz5cUk}d002ovPDHLkV1mHjWvl=I literal 0 HcmV?d00001 diff --git a/doc/source/ray-air/user-guides.rst b/doc/source/ray-air/user-guides.rst index 2c5056689160..ee92e2068367 100644 --- a/doc/source/ray-air/user-guides.rst +++ b/doc/source/ray-air/user-guides.rst @@ -110,6 +110,11 @@ Please also see the :ref:`Ray Tune environment variables `. - **RAY_AIR_FULL_TRACEBACKS**: If set to 1, will print full tracebacks for training functions, including internal code paths. Otherwise, abbreviated tracebacks that only show user code are printed. Defaults to 0 (disabled). +- **RAY_AIR_NEW_OUTPUT**: If set to 0, this disables + the :ref:`experimental new console output `. +- **RAY_AIR_RICH_LAYOUT**: If set to 1, this enables + the :ref:`stick table layout ` + (only available for Ray Tune). .. _air-multi-tenancy: @@ -125,3 +130,20 @@ If you still want to do this, refer to the :ref:`Ray Tune multi-tenancy docs ` for potential pitfalls. + +.. _air-experimental-overview: + +Experimental features in Ray 2.5+ +--------------------------------- +Starting in Ray 2.5, some experimental +features are enabled by default. + +Experimental features are enabled to allow for feedback +from users. Every experimental feature can be disabled +by setting an environment variable. Some features are +not ready for general testing and can only be *enabled* using an +environment variable. + +Please see the :ref:`experimental features ` +page for more details on the current features and how to enable +or disable them. diff --git a/doc/source/tune/api/env.rst b/doc/source/tune/api/env.rst index df8b9580f5ae..c6846107484e 100644 --- a/doc/source/tune/api/env.rst +++ b/doc/source/tune/api/env.rst @@ -21,6 +21,7 @@ These are the environment variables Ray Tune currently considers: * **TUNE_DISABLE_DATED_SUBDIR**: Ray Tune automatically adds a date string to experiment directories when the name is not specified explicitly or the trainable isn't passed as a string. Setting this environment variable to ``1`` disables adding these date strings. +* **TUNE_NEW_EXECUTION**: Disable :ref:`Ray Tune's new execution engine `. * **TUNE_DISABLE_STRICT_METRIC_CHECKING**: When you report metrics to Tune via ``session.report()`` and passed a ``metric`` parameter to ``Tuner()``, a scheduler, or a search algorithm, Tune will error diff --git a/python/ray/air/config.py b/python/ray/air/config.py index f8c262021f43..964f5f9afcb8 100644 --- a/python/ray/air/config.py +++ b/python/ray/air/config.py @@ -31,6 +31,7 @@ from ray.tune.search.sample import Domain from ray.tune.stopper import Stopper from ray.tune.syncer import SyncConfig + from ray.tune.experimental.output import AirVerbosity from ray.tune.utils.log import Verbosity from ray.tune.execution.placement_groups import PlacementGroupFactory @@ -726,9 +727,12 @@ class RunConfig: intermediate experiment progress. Defaults to CLIReporter if running in command-line, or JupyterNotebookReporter if running in a Jupyter notebook. - verbose: 0, 1, 2, or 3. Verbosity mode. + verbose: 0, 1, or 2. Verbosity mode. + 0 = silent, 1 = default, 2 = verbose. Defaults to 1. + If the ``RAY_AIR_NEW_OUTPUT=0`` environment variable is set, + uses the old verbosity settings: 0 = silent, 1 = only status updates, 2 = status and brief - results, 3 = status and detailed results. Defaults to 2. + results, 3 = status and detailed results. log_to_file: Log stdout and stderr to files in trial directories. If this is `False` (default), no files are written. If `true`, outputs are written to `trialdir/stdout` @@ -748,7 +752,7 @@ class RunConfig: sync_config: Optional["SyncConfig"] = None checkpoint_config: Optional[CheckpointConfig] = None progress_reporter: Optional["ProgressReporter"] = None - verbose: Union[int, "Verbosity"] = 3 + verbose: Optional[Union[int, "AirVerbosity", "Verbosity"]] = None log_to_file: Union[bool, str, Tuple[str, str]] = False # Deprecated @@ -757,6 +761,7 @@ class RunConfig: def __post_init__(self): from ray.tune.syncer import SyncConfig, Syncer from ray.tune.utils.util import _resolve_storage_path + from ray.tune.experimental.output import AirVerbosity, get_air_verbosity if not self.failure_config: self.failure_config = FailureConfig() @@ -822,6 +827,13 @@ def __post_init__(self): "Must specify a remote `storage_path` to use a custom `syncer`." ) + if self.verbose is None: + # Default `verbose` value. For new output engine, + # this is AirVerbosity.DEFAULT. + # For old output engine, this is Verbosity.V3_TRIAL_DETAILS + # Todo (krfricke): Currently uses number to pass test_configs::test_repr + self.verbose = get_air_verbosity(AirVerbosity.DEFAULT) or 3 + def __repr__(self): from ray.tune.syncer import SyncConfig diff --git a/python/ray/air/constants.py b/python/ray/air/constants.py index ddde5372d4bf..f4a85ec59d66 100644 --- a/python/ray/air/constants.py +++ b/python/ray/air/constants.py @@ -66,4 +66,7 @@ AIR_ENV_VARS = { COPY_DIRECTORY_CHECKPOINTS_INSTEAD_OF_MOVING_ENV, DISABLE_LAZY_CHECKPOINTING_ENV, + "RAY_AIR_FULL_TRACEBACKS", + "RAY_AIR_NEW_OUTPUT", + "RAY_AIR_RICH_LAYOUT", } diff --git a/python/ray/tune/constants.py b/python/ray/tune/constants.py index 6d3c84dc4c7f..0b6698aecb26 100644 --- a/python/ray/tune/constants.py +++ b/python/ray/tune/constants.py @@ -5,11 +5,11 @@ # NOTE: When adding a new environment variable, please track it in this list. TUNE_ENV_VARS = { "RAY_AIR_LOCAL_CACHE_DIR", - "RAY_AIR_FULL_TRACEBACKS", "TUNE_DISABLE_AUTO_CALLBACK_LOGGERS", "TUNE_DISABLE_AUTO_CALLBACK_SYNCER", "TUNE_DISABLE_AUTO_INIT", "TUNE_DISABLE_DATED_SUBDIR", + "TUNE_NEW_EXECUTION", "TUNE_DISABLE_STRICT_METRIC_CHECKING", "TUNE_DISABLE_SIGINT_HANDLER", "TUNE_FALLBACK_TO_LATEST_CHECKPOINT", diff --git a/python/ray/tune/experimental/output.py b/python/ray/tune/experimental/output.py index 3820c9549e41..bd7dcf851e4a 100644 --- a/python/ray/tune/experimental/output.py +++ b/python/ray/tune/experimental/output.py @@ -1,5 +1,15 @@ import sys -from typing import Any, Collection, Dict, Iterable, List, Optional, Tuple, TYPE_CHECKING +from typing import ( + Any, + Collection, + Dict, + Iterable, + List, + Optional, + Tuple, + Union, + TYPE_CHECKING, +) import contextlib import collections @@ -15,6 +25,8 @@ import textwrap import time +from ray.tune.utils.log import Verbosity + try: import rich import rich.layout @@ -90,10 +102,21 @@ class AirVerbosity(IntEnum): IS_NOTEBOOK = ray.widgets.util.in_notebook() -def get_air_verbosity() -> Optional[AirVerbosity]: - verbosity = os.environ.get("AIR_VERBOSITY", None) - if verbosity: - return AirVerbosity(int(verbosity)) if verbosity else None +def get_air_verbosity( + verbose: Union[int, AirVerbosity, Verbosity] +) -> Optional[AirVerbosity]: + if os.environ.get("RAY_AIR_NEW_OUTPUT", "0") == "0": + return None + + if isinstance(verbose, AirVerbosity): + return verbose + + verbose_int = verbose if isinstance(verbose, int) else verbose.value + + # Verbosity 2 and 3 both map to AirVerbosity 2 + verbose_int = min(2, verbose_int) + + return AirVerbosity(verbose_int) def _get_time_str(start_time: float, current_time: float) -> Tuple[str, str]: @@ -520,7 +543,7 @@ def _detect_reporter( mode: Optional[str] = None, ): # TODO: Add JupyterNotebook and Ray Client case later. - rich_enabled = "ENABLE_RICH" in os.environ + rich_enabled = bool(int(os.environ.get("RAY_AIR_RICH_LAYOUT", "0"))) if num_samples and num_samples > 1: if rich_enabled: if not rich: @@ -530,7 +553,7 @@ def _detect_reporter( reporter = TuneTerminalReporter(verbosity, num_samples, metric, mode) else: if rich_enabled: - logger.warning("`ENABLE_RICH` is only effective with Tune usecase.") + logger.warning("`RAY_AIR_RICH_LAYOUT` is only effective with Tune usecase.") reporter = TrainReporter(verbosity) return reporter diff --git a/python/ray/tune/tests/test_client.py b/python/ray/tune/tests/test_client.py index 6252574fead1..b8eaf0e26136 100644 --- a/python/ray/tune/tests/test_client.py +++ b/python/ray/tune/tests/test_client.py @@ -29,6 +29,17 @@ def start_client_server_2_cpus(): ray.shutdown() +@pytest.fixture +def legacy_progress_reporter(): + old_val = os.environ.get("RAY_AIR_NEW_OUTPUT") + os.environ["RAY_AIR_NEW_OUTPUT"] = "0" + yield + if old_val is None: + os.environ.pop("RAY_AIR_NEW_OUTPUT") + else: + os.environ["RAY_AIR_NEW_OUTPUT"] = old_val + + @pytest.fixture def start_client_server_4_cpus(): ray.init(num_cpus=4) @@ -37,49 +48,51 @@ def start_client_server_4_cpus(): ray.shutdown() -def test_pbt_function(start_client_server_2_cpus): +def test_pbt_function(legacy_progress_reporter, start_client_server_2_cpus): assert ray.util.client.ray.is_connected() from ray.tune.examples.pbt_function import run_tune_pbt run_tune_pbt() -def test_optuna_example(start_client_server): +def test_optuna_example(legacy_progress_reporter, start_client_server): assert ray.util.client.ray.is_connected() from ray.tune.examples.optuna_example import run_optuna_tune run_optuna_tune(smoke_test=True) -def test_cifar10_pytorch(start_client_server_2_cpus): +def test_cifar10_pytorch(legacy_progress_reporter, start_client_server_2_cpus): assert ray.util.client.ray.is_connected() from ray.tune.examples.cifar10_pytorch import main main(num_samples=1, max_num_epochs=1, gpus_per_trial=0) -def test_tune_mnist_keras(start_client_server_4_cpus): +def test_tune_mnist_keras(legacy_progress_reporter, start_client_server_4_cpus): assert ray.util.client.ray.is_connected() from ray.tune.examples.tune_mnist_keras import tune_mnist tune_mnist(num_training_iterations=5) -def test_mnist_ptl_mini(start_client_server): +def test_mnist_ptl_mini(legacy_progress_reporter, start_client_server): assert ray.util.client.ray.is_connected() from ray.tune.examples.mnist_ptl_mini import tune_mnist tune_mnist(num_samples=1, num_epochs=1, gpus_per_trial=0) -def test_xgboost_example(start_client_server): +def test_xgboost_example(legacy_progress_reporter, start_client_server): assert ray.util.client.ray.is_connected() from ray.tune.examples.xgboost_example import tune_xgboost tune_xgboost() -def test_xgboost_dynamic_resources_example(start_client_server): +def test_xgboost_dynamic_resources_example( + legacy_progress_reporter, start_client_server +): assert ray.util.client.ray.is_connected() from ray.tune.examples.xgboost_dynamic_resources_example import tune_xgboost @@ -87,7 +100,7 @@ def test_xgboost_dynamic_resources_example(start_client_server): tune_xgboost(use_class_trainable=False) -def test_mlflow_example(start_client_server): +def test_mlflow_example(legacy_progress_reporter, start_client_server): assert ray.util.client.ray.is_connected() from ray.tune.examples.mlflow_example import tune_with_callback, tune_with_setup @@ -96,14 +109,14 @@ def test_mlflow_example(start_client_server): tune_with_setup(mlflow_tracking_uri, finish_fast=True) -def test_pbt_transformers(start_client_server): +def test_pbt_transformers(legacy_progress_reporter, start_client_server): assert ray.util.client.ray.is_connected() from ray.tune.examples.pbt_transformers.pbt_transformers import tune_transformer tune_transformer(num_samples=1, gpus_per_trial=0, smoke_test=True) -def test_jupyter_rich_output(start_client_server_4_cpus): +def test_jupyter_rich_output(legacy_progress_reporter, start_client_server_4_cpus): assert ray.util.client.ray.is_connected() def dummy_objective(config): diff --git a/python/ray/tune/tests/test_progress_reporter.py b/python/ray/tune/tests/test_progress_reporter.py index 2691a227e9ca..d1e8a0c93119 100644 --- a/python/ray/tune/tests/test_progress_reporter.py +++ b/python/ray/tune/tests/test_progress_reporter.py @@ -325,12 +325,10 @@ def train(config): # Add "verbose=3)" etc -@pytest.mark.skipif( - "AIR_VERBOSITY" in os.environ, reason="console v2 doesn't work with this v1 test." -) class ProgressReporterTest(unittest.TestCase): def setUp(self) -> None: os.environ["TUNE_MAX_PENDING_TRIALS_PG"] = "auto" + os.environ["RAY_AIR_NEW_OUTPUT"] = "0" def mock_trial(self, status, i): mock = MagicMock() @@ -402,7 +400,7 @@ def test(config): for i in range(3): tune.report(**test_result) - analysis = tune.run(test, num_samples=3) + analysis = tune.run(test, num_samples=3, verbose=3) all_trials = analysis.trials inferred_results = reporter._infer_user_metrics(all_trials) for metric in inferred_results: @@ -421,7 +419,7 @@ def report(self, *args, **kwargs): self._output.append(progress_str) reporter = TestReporter() - analysis = tune.run(test, num_samples=3, progress_reporter=reporter) + analysis = tune.run(test, num_samples=3, progress_reporter=reporter, verbose=3) found = {k: False for k in test_result} for output in reporter._output: for key in test_result: @@ -799,7 +797,12 @@ def should_report(self, trials, done=False): def report(self, trials, done, *sys_info): pass - tune.run(lambda config: 2, num_samples=1, progress_reporter=CustomReporter()) + tune.run( + lambda config: 2, + num_samples=1, + progress_reporter=CustomReporter(), + verbose=3, + ) def testMaxLen(self): trials = [] diff --git a/python/ray/tune/tune.py b/python/ray/tune/tune.py index 4c5c7dd2983a..04398faa9ac8 100644 --- a/python/ray/tune/tune.py +++ b/python/ray/tune/tune.py @@ -35,6 +35,7 @@ get_air_verbosity, _detect_reporter as _detect_air_reporter, IS_NOTEBOOK, + AirVerbosity, ) from ray.tune.impl.placeholder import create_resolvers_map, inject_placeholders @@ -256,7 +257,7 @@ def run( checkpoint_at_end: bool = False, checkpoint_keep_all_ranks: bool = False, checkpoint_upload_from_workers: bool = False, - verbose: Union[int, Verbosity] = Verbosity.V3_TRIAL_DETAILS, + verbose: Optional[Union[int, AirVerbosity, Verbosity]] = None, progress_reporter: Optional[ProgressReporter] = None, log_to_file: bool = False, trial_name_creator: Optional[Callable[[Trial], str]] = None, @@ -394,9 +395,11 @@ def run( training workers. checkpoint_upload_from_workers: Whether to upload checkpoint files directly from distributed training workers. - verbose: 0, 1, 2, or 3. Verbosity mode. - 0 = silent, 1 = only status updates, 2 = status and brief trial - results, 3 = status and detailed trial results. Defaults to 3. + verbose: 0, 1, or 2. Verbosity mode. + 0 = silent, 1 = default, 2 = verbose. Defaults to 1. + If ``RAY_AIR_NEW_OUTPUT=0``, uses the old verbosity settings: + 0 = silent, 1 = only status updates, 2 = status and brief + results, 3 = status and detailed results. progress_reporter: Progress reporter for reporting intermediate experiment progress. Defaults to CLIReporter if running in command-line, or JupyterNotebookReporter if running in @@ -529,11 +532,18 @@ class and registered trainables. DeprecationWarning, ) + if verbose is None: + # Default `verbose` value. For new output engine, this is AirVerbosity.VERBOSE. + # For old output engine, this is Verbosity.V3_TRIAL_DETAILS + verbose = get_air_verbosity(AirVerbosity.VERBOSE) or Verbosity.V3_TRIAL_DETAILS + if _remote: - if get_air_verbosity() is not None: - logger.warning( - "Ignoring AIR_VERBOSITY setting, " - "as it doesn't support ray client mode yet." + if get_air_verbosity(verbose) is not None: + logger.info( + "[output] This uses the legacy output and progress reporter, " + "as Ray client is not supported by the new engine. " + "For more information, see " + "https://docs.ray.io/en/master/ray-air/experimental-features.html" ) remote_run = ray.remote(num_cpus=0)(run) @@ -586,21 +596,28 @@ class and registered trainables. "must be one of ['min', 'max']" ) - air_verbosity = get_air_verbosity() + air_verbosity = get_air_verbosity(verbose) if air_verbosity is not None and IS_NOTEBOOK: - logger.warning( - "Ignoring AIR_VERBOSITY setting, " - "as it doesn't support JupyterNotebook mode yet." + logger.info( + "[output] This uses the legacy output and progress reporter, " + "as Jupyter notebooks are not supported by the new engine, yet. " + "For more information, please see " + "https://docs.ray.io/en/master/ray-air/experimental-features.html" ) air_verbosity = None if air_verbosity is not None: - logger.warning( - f"Testing new AIR console output flow with verbosity={air_verbosity}. " - f"This will also disable the old flow - setting it to 0 now." + logger.info( + f"[output] This will use the new output engine with verbosity " + f"{air_verbosity}. To disable the new output and use the legacy " + f"output engine, set the environment variable RAY_AIR_NEW_OUTPUT=0. " + f"For more information, please see " + f"https://docs.ray.io/en/master/ray-air/experimental-features.html" ) + # Disable old output engine set_verbosity(0) else: + # Use old output engine set_verbosity(verbose) config = config or {} @@ -979,7 +996,7 @@ class and registered trainables. air_verbosity, search_alg.total_samples, metric=metric, mode=mode ) - # rich live context manager has to be called encapsulting + # rich live context manager has to be called encapsulating # the while loop. For other kind of reporters, no op. # `ExitStack` allows us to *conditionally* apply context manager. with contextlib.ExitStack() as stack: @@ -1085,7 +1102,7 @@ def run_experiments( experiments: Union[Experiment, Mapping, Sequence[Union[Experiment, Mapping]]], scheduler: Optional[TrialScheduler] = None, server_port: Optional[int] = None, - verbose: Union[int, Verbosity] = Verbosity.V3_TRIAL_DETAILS, + verbose: Optional[Union[int, AirVerbosity, Verbosity]] = None, progress_reporter: Optional[ProgressReporter] = None, resume: Union[bool, str] = False, reuse_actors: Optional[bool] = None, @@ -1119,11 +1136,18 @@ def run_experiments( if not trial_executor or isinstance(trial_executor, RayTrialExecutor): _ray_auto_init(entrypoint="tune.run_experiments(...)") + if verbose is None: + # Default `verbose` value. For new output engine, this is AirVerbosity.VERBOSE. + # For old output engine, this is Verbosity.V3_TRIAL_DETAILS + verbose = get_air_verbosity(AirVerbosity.VERBOSE) or Verbosity.V3_TRIAL_DETAILS + if _remote: - if get_air_verbosity() is not None: - logger.warning( - "Ignoring AIR_VERBOSITY setting, " - "as it doesn't support ray client mode yet." + if get_air_verbosity(verbose) is not None: + logger.info( + "[output] This uses the legacy output and progress reporter, " + "as Ray client is not supported by the new engine. " + "For more information, see " + "https://docs.ray.io/en/master/ray-air/experimental-features.html" ) remote_run = ray.remote(num_cpus=0)(run_experiments) diff --git a/python/ray/tune/tuner.py b/python/ray/tune/tuner.py index 1658188b9530..08a1cd2de0a9 100644 --- a/python/ray/tune/tuner.py +++ b/python/ray/tune/tuner.py @@ -150,11 +150,15 @@ def __init__( """Configure and construct a tune run.""" kwargs = locals().copy() self._is_ray_client = ray.util.client.ray.is_connected() - if self._is_ray_client and get_air_verbosity() is not None: - logger.warning( - "Ignoring AIR_VERBOSITY setting, " - "as it doesn't support ray client mode yet." - ) + if self._is_ray_client: + _run_config = run_config or RunConfig() + if get_air_verbosity(_run_config.verbose) is not None: + logger.info( + "[output] This uses the legacy output and progress reporter, " + "as Ray client is not supported by the new engine. " + "For more information, see " + "https://docs.ray.io/en/master/ray-air/experimental-features.html" + ) if _tuner_internal: if not self._is_ray_client: From 959f8dc1363700c18a27bfd565c762a8b02e9e2a Mon Sep 17 00:00:00 2001 From: Cuong Nguyen <128072568+can-anyscale@users.noreply.github.com> Date: Tue, 16 May 2023 08:05:38 -0700 Subject: [PATCH 411/424] [ci][byod/1] clean up local environment setup for release tests (#35355) All release tests are using remote execution via anyscale at this point. Remove code path for local environment setup. In particular, the driver_setup is used to install packages on buildkite host, which is no longer neccessary/used. Signed-off-by: Cuong Nguyen --- release/ray_release/glue.py | 36 -------- release/ray_release/schema.json | 6 -- release/ray_release/tests/test_glue.py | 118 +------------------------ release/release_tests.yaml | 11 --- 4 files changed, 1 insertion(+), 170 deletions(-) diff --git a/release/ray_release/glue.py b/release/ray_release/glue.py index 521bcc73d38b..428a01671d6b 100644 --- a/release/ray_release/glue.py +++ b/release/ray_release/glue.py @@ -20,7 +20,6 @@ DEFAULT_WAIT_FOR_NODES_TIMEOUT, RELEASE_PACKAGE_DIR, DEFAULT_AUTOSUSPEND_MINS, - validate_test, ) from ray_release.template import load_test_cluster_env, load_test_cluster_compute from ray_release.exception import ( @@ -32,7 +31,6 @@ PrepareCommandTimeout, TestCommandError, TestCommandTimeout, - LocalEnvSetupError, ClusterEnvCreateError, ) from ray_release.file_manager.job_file_manager import JobFileManager @@ -44,11 +42,6 @@ reset_signal_handling, register_handler, ) -from ray_release.util import ( - run_bash_script, - get_pip_packages, - reinstall_anyscale_dependencies, -) type_str_to_command_runner = { "job": JobRunner, @@ -83,7 +76,6 @@ def _load_test_configuration( smoke_test: bool = False, no_terminate: bool = False, ) -> Tuple[ClusterManager, CommandRunner, str]: - validate_test(test) logger.info(f"Test config: {test}") # Populate result paramaters @@ -235,26 +227,6 @@ def _setup_cluster_environment( return prepare_cmd, prepare_timeout, build_timeout, cluster_timeout, command_timeout -def _setup_local_environment( - test: Test, - command_runner: CommandRunner, - ray_wheels_url: str, -) -> None: - driver_setup_script = test.get("driver_setup", None) - if driver_setup_script: - try: - run_bash_script(driver_setup_script) - except Exception as e: - raise LocalEnvSetupError(f"Driver setup script failed: {e}") from e - - # Install local dependencies - command_runner.prepare_local_env(ray_wheels_url) - - # Re-install anyscale package as local dependencies might have changed - # from local env setup - reinstall_anyscale_dependencies() - - def _local_environment_information( result: Result, cluster_manager: ClusterManager, @@ -265,10 +237,6 @@ def _local_environment_information( cluster_id: Optional[str], cluster_env_id: Optional[str], ) -> None: - pip_packages = get_pip_packages() - pip_package_string = "\n".join(pip_packages) - logger.info(f"Installed python packages:\n{pip_package_string}") - if isinstance(cluster_manager, FullClusterManager): if not no_terminate: register_handler( @@ -456,10 +424,6 @@ def run_release_test( cluster_env_id, ) - buildkite_group(":nut_and_bolt: Setting up local environment") - _setup_local_environment(test, command_runner, ray_wheels_url) - - # Print installed pip packages buildkite_group(":bulb: Local environment information") _local_environment_information( result, diff --git a/release/ray_release/schema.json b/release/ray_release/schema.json index 032f46db3f0b..4c8caeb75295 100644 --- a/release/ray_release/schema.json +++ b/release/ray_release/schema.json @@ -45,9 +45,6 @@ "team": { "type": "string" }, - "driver_setup": { - "type": "string" - }, "cluster": { "$ref": "#/definitions/Cluster" }, @@ -167,9 +164,6 @@ "env": { "type": "string" }, - "driver_setup": { - "type": "string" - }, "cluster": { "type": "object" }, diff --git a/release/ray_release/tests/test_glue.py b/release/ray_release/tests/test_glue.py index ae038d5cb79b..11239ac3db3f 100644 --- a/release/ray_release/tests/test_glue.py +++ b/release/ray_release/tests/test_glue.py @@ -12,15 +12,9 @@ from ray_release.cluster_manager.cluster_manager import ClusterManager from ray_release.cluster_manager.full import FullClusterManager from ray_release.command_runner.command_runner import CommandRunner -from ray_release.config import ( - Test, - DEFAULT_COMMAND_TIMEOUT, - DEFAULT_WAIT_FOR_NODES_TIMEOUT, -) +from ray_release.config import Test from ray_release.exception import ( ReleaseTestConfigError, - LocalEnvSetupError, - ClusterComputeCreateError, ClusterEnvBuildError, ClusterEnvBuildTimeout, ClusterEnvCreateError, @@ -44,7 +38,6 @@ run_release_test, type_str_to_command_runner, command_runner_to_cluster_manager, - TIMEOUT_BUFFER_MINUTES, ) from ray_release.logger import logger from ray_release.reporter.reporter import Reporter @@ -73,8 +66,6 @@ def __getattribute__(self, item): return object.__getattribute__(self, item) -@patch("ray_release.glue.reinstall_anyscale_dependencies", lambda: None) -@patch("ray_release.glue.get_pip_packages", lambda: ["pip-packages"]) class GlueTest(unittest.TestCase): def writeClusterEnv(self, content: str): with open(os.path.join(self.tempdir, "cluster_env.yaml"), "wt") as fp: @@ -174,7 +165,6 @@ def mock_alerter(test: Test, result: Result): cluster_env="cluster_env.yaml", cluster_compute="cluster_compute.yaml" ), alert="unit_test_alerter", - driver_setup="driver_fail.sh", ) self.anyscale_project = "prj_unit12345678" self.ray_wheels_url = "http://mock.wheels/" @@ -184,16 +174,6 @@ def tearDown(self) -> None: def _succeed_until(self, until: str): # These commands should succeed - self.command_runner_return["prepare_local_env"] = None - - if until == "local_env": - return - - self.test["driver_setup"] = "driver_succeed.sh" - - if until == "driver_setup": - return - self.cluster_manager_return["cluster_compute_id"] = "valid" self.cluster_manager_return["create_cluster_compute"] = None @@ -316,102 +296,6 @@ def testInvalidClusterCompute(self): self.assertEqual(result.return_code, ExitCode.CONFIG_ERROR.value) - def testAutomaticClusterEnvVariables(self): - result = Result() - - self._succeed_until("local_env") - - with self.assertRaises(LocalEnvSetupError): - self._run(result) - - cluster_manager = self.instances["cluster_manager"] - - command_timeout = self.test["run"].get("timeout", DEFAULT_COMMAND_TIMEOUT) - prepare_cmd = self.test["run"].get("prepare", None) - if prepare_cmd: - prepare_timeout = self.test["run"].get("prepare_timeout", command_timeout) - else: - prepare_timeout = 0 - command_and_prepare_timeout = command_timeout + prepare_timeout - - wait_timeout = self.test["run"]["wait_for_nodes"].get( - "timeout", DEFAULT_WAIT_FOR_NODES_TIMEOUT - ) - - expected_idle_termination_minutes = int( - command_and_prepare_timeout / 60 + TIMEOUT_BUFFER_MINUTES - ) - expected_maximum_uptime_minutes = int( - expected_idle_termination_minutes + wait_timeout + TIMEOUT_BUFFER_MINUTES - ) - - self.assertEqual( - cluster_manager.cluster_compute["idle_termination_minutes"], - expected_idle_termination_minutes, - ) - self.assertEqual( - cluster_manager.cluster_compute["maximum_uptime_minutes"], - expected_maximum_uptime_minutes, - ) - - def testInvalidPrepareLocalEnv(self): - result = Result() - - self.command_runner_return["prepare_local_env"] = _fail_on_call( - LocalEnvSetupError - ) - with self.assertRaises(LocalEnvSetupError): - self._run(result) - self.assertEqual(result.return_code, ExitCode.LOCAL_ENV_SETUP_ERROR.value) - - def testDriverSetupFails(self): - result = Result() - - self._succeed_until("local_env") - - with self.assertRaises(LocalEnvSetupError): - self._run(result) - self.assertEqual(result.return_code, ExitCode.LOCAL_ENV_SETUP_ERROR.value) - - def testInvalidClusterIdOverride(self): - result = Result() - - self._succeed_until("driver_setup") - - self.sdk.returns["get_cluster_environment"] = None - - with self.assertRaises(ClusterEnvCreateError): - self._run(result, cluster_env_id="existing") - - self.sdk.returns["get_cluster_environment"] = APIDict( - result=APIDict(config_json={"overridden": True}) - ) - - with self.assertRaises(Exception) as cm: # Fail somewhere else - self._run(result, cluster_env_id="existing") - self.assertNotIsInstance(cm.exception, ClusterEnvCreateError) - - def testBuildConfigFailsClusterCompute(self): - result = Result() - - self._succeed_until("driver_setup") - - # These commands should succeed - self.command_runner_return["prepare_local_env"] = None - - # Fails because API response faulty - with self.assertRaisesRegex(ClusterComputeCreateError, "Unexpected"): - self._run(result) - self.assertEqual(result.return_code, ExitCode.CLUSTER_RESOURCE_ERROR.value) - - # Fails for random cluster compute reason - self.cluster_manager_return["create_cluster_compute"] = _fail_on_call( - ClusterComputeCreateError, "Known" - ) - with self.assertRaisesRegex(ClusterComputeCreateError, "Known"): - self._run(result) - self.assertEqual(result.return_code, ExitCode.CLUSTER_RESOURCE_ERROR.value) - def testBuildConfigFailsClusterEnv(self): result = Result() diff --git a/release/release_tests.yaml b/release/release_tests.yaml index dc33f3ffb051..c8984316c172 100644 --- a/release/release_tests.yaml +++ b/release/release_tests.yaml @@ -22,10 +22,6 @@ # # on. This must be a string! # python: "3.7" # -# # Optional location of a bash setup script to run on the driver -# # when setting up the local environment. Relative to working_dir -# driver_setup: setup_driver.sh -# # # Cluster information # cluster: # # Location of cluster env, relative to working_dir @@ -1474,7 +1470,6 @@ cluster_env: horovod/app_config.yaml cluster_compute: horovod/compute_tpl_aws.yaml - driver_setup: horovod/driver_setup_latest.sh run: timeout: 1200 script: python horovod/horovod_user_test.py @@ -1503,7 +1498,6 @@ cluster_env: horovod/app_config_master.yaml cluster_compute: horovod/compute_tpl_aws.yaml - driver_setup: horovod/driver_setup_master.sh run: timeout: 1200 script: python horovod/horovod_user_test.py @@ -1532,7 +1526,6 @@ cluster_env: train/app_config.yaml cluster_compute: train/compute_tpl_aws.yaml - driver_setup: train/driver_setup.sh run: timeout: 36000 script: python train/train_tensorflow_mnist_test.py @@ -1561,7 +1554,6 @@ cluster_env: train/app_config.yaml cluster_compute: train/compute_tpl_aws.yaml - driver_setup: train/driver_setup.sh run: timeout: 36000 script: python train/train_torch_linear_test.py @@ -1646,7 +1638,6 @@ cluster_env: ray-lightning/app_config.yaml cluster_compute: ray-lightning/compute_tpl_aws.yaml - driver_setup: ray-lightning/driver_setup.sh run: timeout: 1200 script: python ray-lightning/ray_lightning_user_test.py @@ -1675,7 +1666,6 @@ cluster_env: ray-lightning/app_config_master.yaml cluster_compute: ray-lightning/compute_tpl_aws.yaml - driver_setup: ray-lightning/driver_setup.sh run: timeout: 1200 script: python ray-lightning/ray_lightning_user_test.py @@ -1704,7 +1694,6 @@ cluster_env: ../rllib_tests/app_config.yaml cluster_compute: tune_rllib/compute_tpl_aws.yaml - driver_setup: tune_rllib/driver_setup.sh run: timeout: 2000 script: python tune_rllib/run_connect_tests.py From 40076f24f439ebe0039f09a38647732d40b20a58 Mon Sep 17 00:00:00 2001 From: Justin Yu Date: Tue, 16 May 2023 08:35:24 -0700 Subject: [PATCH 412/424] [AIR][Telemetry] Cluster storage configuration (#34905) This PR adds telemetry for the storage / syncing configuration by adding 1 usage tag: `AIR_STORAGE_CONFIGURATION` The storage configuration is set by `RunConfig(storage_path, sync_config)`. The possible configurations are: - 'driver' = Default head node syncing if no remote path is specified - 'local' = No synchronization at all. - 'nfs' = Using a mounted shared network filesystem. - ('s3', 'gs', 'hdfs', 'custom_remote_storage'): Various remote storage schemes. - ('local_uri', 'memory'): Mostly used by internal testing by setting `storage_path` to `file://` or `memory://`. Signed-off-by: Justin Yu --- python/ray/air/_internal/remote_storage.py | 23 +++++ python/ray/air/_internal/usage.py | 58 ++++++++++- python/ray/air/tests/test_air_usage.py | 71 +++++++++++-- python/ray/air/tests/test_remote_storage.py | 20 ++++ python/ray/tests/test_usage_stats.py | 1 + python/ray/tune/tune.py | 106 ++++++++++++-------- src/ray/protobuf/usage.proto | 3 +- 7 files changed, 231 insertions(+), 51 deletions(-) diff --git a/python/ray/air/_internal/remote_storage.py b/python/ray/air/_internal/remote_storage.py index c899b1d20727..05180651ccf8 100644 --- a/python/ray/air/_internal/remote_storage.py +++ b/python/ray/air/_internal/remote_storage.py @@ -5,6 +5,7 @@ import urllib.parse from pathlib import Path from pkg_resources import packaging +import psutil import shutil from typing import Any, Dict, List, Optional, Tuple @@ -136,6 +137,28 @@ def is_non_local_path_uri(uri: str) -> bool: _cached_fs = {} +def _get_network_mounts() -> List[str]: + """Get mounted network filesystems on the current node. + + Network file system (NFS), server message block (SMB) and + common internet file system (CIFS) are all file access storage protocols, + used to access files on remote servers and storage servers (such as NAS storage) + as if they were local files. + """ + partitions = psutil.disk_partitions(all=True) + network_fstypes = ("nfs", "smbfs", "cifs") + return [p.mountpoint for p in partitions if p.fstype in network_fstypes] + + +def _is_network_mount(path: str) -> bool: + """Checks if a path is within a mounted network filesystem.""" + resolved_path = Path(path).expanduser().resolve() + network_mounts = set(Path(mount) for mount in _get_network_mounts()) + + # Check if any of the network mounts are one of the path's parents. + return bool(set(resolved_path.parents).intersection(network_mounts)) + + def is_local_path(path: str) -> bool: """Check if a given path is a local path or a remote URI.""" if sys.platform == "win32": diff --git a/python/ray/air/_internal/usage.py b/python/ray/air/_internal/usage.py index 1d5fac1689cb..68a2765f6a10 100644 --- a/python/ray/air/_internal/usage.py +++ b/python/ray/air/_internal/usage.py @@ -1,13 +1,16 @@ import json import os -from typing import TYPE_CHECKING, Set, Union +from typing import TYPE_CHECKING, Optional, Set, Union +import urllib.parse +from ray.air._internal.remote_storage import _is_network_mount from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag if TYPE_CHECKING: from ray.train.trainer import BaseTrainer from ray.tune.schedulers import TrialScheduler from ray.tune.search import BasicVariantGenerator, Searcher + from ray.tune import SyncConfig AIR_TRAINERS = { "AccelerateTrainer", @@ -119,6 +122,59 @@ def tag_scheduler(scheduler: "TrialScheduler"): record_extra_usage_tag(TagKey.TUNE_SCHEDULER, scheduler_name) +def _get_tag_for_remote_path(remote_path: str) -> str: + scheme = urllib.parse.urlparse(remote_path).scheme + if scheme == "file": + # NOTE: We treat a file:// storage_path as a "remote" path, so this case + # differs from the local path only case. + # In particular, default syncing to head node is not enabled here. + tag = "local_uri" + elif scheme == "memory": + # NOTE: This is used in tests and does not make sense to actually use. + # This condition filters the tag out of the `custom` catch-all. + tag = "memory" + elif scheme == "hdfs": + tag = "hdfs" + elif scheme in {"s3", "s3a"}: + tag = "s3" + elif scheme in {"gs", "gcs"}: + tag = "gs" + else: + tag = "custom_remote_storage" + return tag + + +def tag_ray_air_storage_config( + local_path: str, remote_path: Optional[str], sync_config: "SyncConfig" +) -> None: + """Records the storage storage configuration of an experiment. + + The storage configuration is set by `RunConfig(storage_path, sync_config)`. + + The possible configurations are: + - 'driver' = Default syncing to Tune driver node if no remote path is specified. + - 'local' = No synchronization at all. + - 'nfs' = Using a mounted shared network filesystem. + - ('s3', 'gs', 'hdfs', 'custom_remote_storage'): Various remote storage schemes. + - ('local_uri', 'memory'): Mostly used by internal testing by setting `storage_path` + to `file://` or `memory://`. + """ + if remote_path: + # HDFS or cloud storage + storage_config_tag = _get_tag_for_remote_path(remote_path) + elif _is_network_mount(local_path): + # NFS + storage_config_tag = "nfs" + elif sync_config.syncer is None: + # Syncing is disabled - results are only available on node-local storage + storage_config_tag = "local" + else: + # The driver node's local storage is the synchronization point. + storage_config_tag = "driver" + + record_extra_usage_tag(TagKey.AIR_STORAGE_CONFIGURATION, storage_config_tag) + + def tag_ray_air_env_vars() -> bool: """Records usage of environment variables exposed by the Ray AIR libraries. diff --git a/python/ray/air/tests/test_air_usage.py b/python/ray/air/tests/test_air_usage.py index 8fa29c833a31..013f041d9d36 100644 --- a/python/ray/air/tests/test_air_usage.py +++ b/python/ray/air/tests/test_air_usage.py @@ -1,5 +1,6 @@ """Unit tests for AIR telemetry.""" +from collections import namedtuple import json import os @@ -8,8 +9,9 @@ import ray from ray import air, tune -from ray._private.usage.usage_lib import TagKey from ray.air import session +from ray._private.usage.usage_lib import TagKey +from ray.air._internal import usage as air_usage @pytest.fixture @@ -29,6 +31,15 @@ def mock_record_extra_usage_tag(key: TagKey, value: str): yield recorded +def train_fn(config): + session.report({"score": 1}) + + +@pytest.fixture +def tuner(tmp_path): + yield tune.Tuner(train_fn, run_config=air.RunConfig(storage_path=str(tmp_path))) + + @pytest.fixture(scope="module") def ray_start_2_cpus(): address_info = ray.init(num_cpus=2) @@ -36,13 +47,59 @@ def ray_start_2_cpus(): ray.shutdown() -def train_fn(config): - session.report({"score": 1}) - +# (nfs: bool, remote_path: str | None, syncing_disabled: bool, expected: str) +_StorageTestConfig = namedtuple( + "StorageTestConfig", ["nfs", "remote_path", "syncing_disabled", "expected"] +) + +_storage_test_configs = [ + # Local + _StorageTestConfig(False, None, False, "driver"), + _StorageTestConfig(False, None, True, "local"), + # Remote + _StorageTestConfig(False, "s3://mock/bucket?param=1", False, "s3"), + _StorageTestConfig(False, "gs://mock/bucket?param=1", False, "gs"), + _StorageTestConfig(False, "hdfs://mock/bucket?param=1", False, "hdfs"), + _StorageTestConfig(False, "file://mock/bucket?param=1", False, "local_uri"), + _StorageTestConfig(False, "memory://mock/bucket?param=1", False, "memory"), + _StorageTestConfig( + False, "custom://mock/bucket?param=1", False, "custom_remote_storage" + ), + # NFS + _StorageTestConfig(True, None, True, "nfs"), +] + + +@pytest.mark.parametrize( + "storage_test_config", + _storage_test_configs, + ids=[str(config) for config in _storage_test_configs], +) +def test_tag_ray_air_storage_config( + tmp_path, storage_test_config, mock_record, monkeypatch +): + if storage_test_config.nfs: + import ray.air._internal.remote_storage + + monkeypatch.setattr( + ray.air._internal.remote_storage, + "_get_network_mounts", + lambda: [str(tmp_path)], + ) + + local_path = str(tmp_path / "local_path") + sync_config = ( + tune.SyncConfig(syncer=None) + if storage_test_config.syncing_disabled + else tune.SyncConfig() + ) -@pytest.fixture -def tuner(tmp_path): - yield tune.Tuner(train_fn, run_config=air.RunConfig(storage_path=str(tmp_path))) + air_usage.tag_ray_air_storage_config( + local_path=local_path, + remote_path=storage_test_config.remote_path, + sync_config=sync_config, + ) + assert storage_test_config.expected == mock_record[TagKey.AIR_STORAGE_CONFIGURATION] def test_tag_env_vars(ray_start_2_cpus, mock_record, tuner): diff --git a/python/ray/air/tests/test_remote_storage.py b/python/ray/air/tests/test_remote_storage.py index 32ed694045f9..99adcd8939f8 100644 --- a/python/ray/air/tests/test_remote_storage.py +++ b/python/ray/air/tests/test_remote_storage.py @@ -10,6 +10,7 @@ upload_to_uri, download_from_uri, get_fs_and_path, + _is_network_mount, ) from ray.tune.utils.file_transfer import _get_recursive_files_and_stats @@ -215,6 +216,25 @@ def test_get_fs_and_path(): assert find_error +def test_is_network_mount(tmp_path, monkeypatch): + """Test `_is_network_mount` storage utility.""" + + with monkeypatch.context() as m: + import ray.air._internal.remote_storage + + m.setattr( + ray.air._internal.remote_storage, + "_get_network_mounts", + lambda: [str(tmp_path)], + ) + assert _is_network_mount(str(tmp_path / "a/b/c")) + + # Local paths should return False + assert not _is_network_mount(str(tmp_path / "ray_results")) + assert not _is_network_mount("~/ray_results") + assert not _is_network_mount("") # cwd + + if __name__ == "__main__": import sys diff --git a/python/ray/tests/test_usage_stats.py b/python/ray/tests/test_usage_stats.py index f145299ad8ac..3786519c1aff 100644 --- a/python/ray/tests/test_usage_stats.py +++ b/python/ray/tests/test_usage_stats.py @@ -1204,6 +1204,7 @@ def run_usage_stats_server(reporter): if os.environ.get("RAY_MINIMAL") != "1": expected_payload["tune_scheduler"] = "FIFOScheduler" expected_payload["tune_searcher"] = "BasicVariantGenerator" + expected_payload["air_storage_configuration"] = "driver" assert payload["extra_usage_tags"] == expected_payload assert payload["total_num_nodes"] == 1 assert payload["total_num_running_jobs"] == 1 diff --git a/python/ray/tune/tune.py b/python/ray/tune/tune.py index 04398faa9ac8..e09b46994ec6 100644 --- a/python/ray/tune/tune.py +++ b/python/ray/tune/tune.py @@ -16,6 +16,7 @@ Mapping, Optional, Sequence, + Tuple, Type, Union, TYPE_CHECKING, @@ -228,6 +229,59 @@ def _ray_auto_init(entrypoint: str): ) +def _resolve_and_validate_storage_path( + storage_path: str, local_dir: Optional[str], sync_config: Optional[SyncConfig] +) -> Tuple[str, str, Optional[str], SyncConfig]: + # TODO(ml-team): Simplify/remove this in 2.6 when `local_dir` + # and `SyncConfig(upload_dir)` are hard-deprecated. + sync_config = sync_config or SyncConfig() + + # Resolve storage_path + local_path, remote_path = _resolve_storage_path( + storage_path, local_dir, sync_config.upload_dir, error_location="tune.run" + ) + + if sync_config.upload_dir: + assert remote_path == sync_config.upload_dir + warnings.warn( + "Setting a `SyncConfig.upload_dir` is deprecated and will be removed " + "in the future. Pass `RunConfig.storage_path` instead." + ) + # Set upload_dir to None to avoid further downstream resolution. + # Copy object first to not alter user input. + sync_config = copy.copy(sync_config) + sync_config.upload_dir = None + + if local_dir: + assert local_path == local_dir + warnings.warn( + "Passing a `local_dir` is deprecated and will be removed " + "in the future. Pass `storage_path` instead or set the" + "`RAY_AIR_LOCAL_CACHE_DIR` environment variable instead." + ) + local_path = local_dir + + if not remote_path: + # If no remote path is set, try to get Ray Storage URI + remote_path = _get_storage_uri() + if remote_path: + logger.info( + "Using configured Ray storage URI as storage path: " f"{remote_path}" + ) + + sync_config.validate_upload_dir(remote_path) + + if not local_path: + local_path = _get_defaults_results_dir() + + storage_path = storage_path or remote_path or local_path + + if storage_path != local_path and local_path: + os.environ["RAY_AIR_LOCAL_CACHE_DIR"] = local_path + + return storage_path, local_path, remote_path, sync_config + + class _Config(abc.ABC): def to_dict(self) -> dict: """Converts this configuration to a dict format.""" @@ -630,50 +684,18 @@ class and registered trainables. f"Got '{type(config)}' instead." ) - sync_config = sync_config or SyncConfig() - - # Resolve storage_path - local_path, remote_path = _resolve_storage_path( - storage_path, local_dir, sync_config.upload_dir, error_location="tune.run" + ( + storage_path, + local_path, + remote_path, + sync_config, + ) = _resolve_and_validate_storage_path( + storage_path=storage_path, local_dir=local_dir, sync_config=sync_config ) - if sync_config.upload_dir: - assert remote_path == sync_config.upload_dir - warnings.warn( - "Setting a `SyncConfig.upload_dir` is deprecated and will be removed " - "in the future. Pass `RunConfig.storage_path` instead." - ) - # Set upload_dir to None to avoid further downstream resolution. - # Copy object first to not alter user input. - sync_config = copy.copy(sync_config) - sync_config.upload_dir = None - - if local_dir: - assert local_path == local_dir - warnings.warn( - "Passing a `local_dir` is deprecated and will be removed " - "in the future. Pass `storage_path` instead or set the" - "`RAY_AIR_LOCAL_CACHE_DIR` environment variable instead." - ) - local_path = local_dir - - if not remote_path: - # If no remote path is set, try to get Ray Storage URI - remote_path = _get_storage_uri() - if remote_path: - logger.info( - "Using configured Ray storage URI as storage path: " f"{remote_path}" - ) - - sync_config.validate_upload_dir(remote_path) - - if not local_path: - local_path = _get_defaults_results_dir() - - storage_path = storage_path or remote_path or local_path - - if storage_path != local_path and local_path: - os.environ["RAY_AIR_LOCAL_CACHE_DIR"] = local_path + air_usage.tag_ray_air_storage_config( + local_path=local_path, remote_path=remote_path, sync_config=sync_config + ) checkpoint_score_attr = checkpoint_score_attr or "" if checkpoint_score_attr.startswith("min-"): diff --git a/src/ray/protobuf/usage.proto b/src/ray/protobuf/usage.proto index 6773166ec92e..83167a0c4e65 100644 --- a/src/ray/protobuf/usage.proto +++ b/src/ray/protobuf/usage.proto @@ -130,10 +130,11 @@ enum TagKey { // Name of Tune scheduler algorithm or "Custom" if user-defined. // Example: "FIFOScheduler" TUNE_SCHEDULER = 502; - // Ray AIR environment variable usage stored in JSON list format // This lists which of the environment variables exposed by the AIR libraries // are provided by the user. // Ex: ["RAY_AIR_LOCAL_CACHE_DIR", "TUNE_FALLBACK_TO_LATEST_CHECKPOINT"] AIR_ENV_VARS = 503; + // Storage configuration for AIR experiment + AIR_STORAGE_CONFIGURATION = 507; } From 2e6bea10b613b7c95898ba3ce126dd1c95ae6cb8 Mon Sep 17 00:00:00 2001 From: Jiajun Yao Date: Tue, 16 May 2023 08:58:34 -0700 Subject: [PATCH 413/424] Remove previously added debug logs (#35360) Revert some debug logs added in #35062 Signed-off-by: Jiajun Yao --- src/ray/rpc/node_manager/node_manager_client_pool.cc | 4 ++-- src/ray/rpc/server_call.h | 8 -------- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/src/ray/rpc/node_manager/node_manager_client_pool.cc b/src/ray/rpc/node_manager/node_manager_client_pool.cc index 34911a465e67..097ad168d631 100644 --- a/src/ray/rpc/node_manager/node_manager_client_pool.cc +++ b/src/ray/rpc/node_manager/node_manager_client_pool.cc @@ -30,8 +30,8 @@ shared_ptr NodeManagerClientPool::GetOrConnectByAddr auto connection = client_factory_(address); client_map_[raylet_id] = connection; - RAY_LOG(INFO) << "Connected to raylet " << raylet_id << " at " << address.ip_address() - << ":" << address.port(); + RAY_LOG(DEBUG) << "Connected to raylet " << raylet_id << " at " << address.ip_address() + << ":" << address.port(); RAY_CHECK(connection != nullptr); return connection; } diff --git a/src/ray/rpc/server_call.h b/src/ray/rpc/server_call.h index efab149087ba..8242c6b69fe8 100644 --- a/src/ray/rpc/server_call.h +++ b/src/ray/rpc/server_call.h @@ -204,14 +204,6 @@ class ServerCallImpl : public ServerCall { // a new request comes in. factory.CreateCall(); } - // TODO(jjyao) Remove after debugging is done. - if (call_name_ == "NodeManagerService.grpc_server.UpdateResourceUsage") { - static std::string gcs_address = ""; - if (gcs_address == "" || gcs_address != context_.peer()) { - gcs_address = context_.peer(); - RAY_LOG(INFO) << "Handle " << call_name_ << " request from " << context_.peer(); - } - } (service_handler_.*handle_request_function_)( std::move(request_), reply_, From 32021f344af21dfa21f8f65a79e4ba7193716464 Mon Sep 17 00:00:00 2001 From: Cindy Zhang Date: Tue, 16 May 2023 13:16:59 -0700 Subject: [PATCH 414/424] [serve] Catch all exceptions during deploy (#35307) Be extra careful with fetching task references in the controller update loop. - The `deploy_obj_ref` task reference should only be fetched once (for both application state and http state). - Also, `RayTaskError` and `RuntimeEnvSetupError` may not be the only two types of possible exceptions thrown by `ray.get()` (e.g. saw a case of `RaySystemError`) so we should catch all exceptions. --- python/ray/serve/_private/application_state.py | 11 +++++++++-- python/ray/serve/_private/http_state.py | 17 ++++++++++++----- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/python/ray/serve/_private/application_state.py b/python/ray/serve/_private/application_state.py index 084780885ad5..23958d1b87bc 100644 --- a/python/ray/serve/_private/application_state.py +++ b/python/ray/serve/_private/application_state.py @@ -197,6 +197,7 @@ def update(self): finished, pending = ray.wait([self._deploy_obj_ref], timeout=0) if pending: return + self._deploy_obj_ref = None try: ray.get(finished[0]) logger.info(f"Deploy task for app '{self._name}' ran successfully.") @@ -207,7 +208,6 @@ def update(self): # properly with traceback.format_exc(). RayTaskError has its own # custom __str__ function. self._app_msg = f"Deploying app '{self._name}' failed:\n{str(e)}" - self._deploy_obj_ref = None logger.warning(self._app_msg) return except RuntimeEnvSetupError: @@ -216,7 +216,14 @@ def update(self): f"Runtime env setup for app '{self._name}' " f"failed:\n{traceback.format_exc()}" ) - self._deploy_obj_ref = None + logger.warning(self._app_msg) + return + except Exception: + self._status = ApplicationStatus.DEPLOY_FAILED + self._app_msg = ( + "Unexpected error occured while deploying application " + f"'{self._name}':\n{traceback.format_exc()}" + ) logger.warning(self._app_msg) return deployments_statuses = ( diff --git a/python/ray/serve/_private/http_state.py b/python/ray/serve/_private/http_state.py index e3c5e64c83b6..3b52a931b017 100644 --- a/python/ray/serve/_private/http_state.py +++ b/python/ray/serve/_private/http_state.py @@ -3,6 +3,7 @@ import logging import random import time +import traceback from typing import Dict, List, Tuple import ray @@ -35,6 +36,7 @@ def __init__( ): self._actor_handle = actor_handle self._actor_name = actor_name + self._node_id = node_id self._ready_obj_ref = self._actor_handle.ready.remote() self._status = HTTPProxyStatus.STARTING self._health_check_obj_ref = None @@ -77,9 +79,10 @@ def update_actor_details(self, **kwargs) -> None: def update(self): if self._status == HTTPProxyStatus.STARTING: - try: - finished, _ = ray.wait([self._ready_obj_ref], timeout=0) - if finished: + finished, _ = ray.wait([self._ready_obj_ref], timeout=0) + if finished: + self._ready_obj_ref = None + try: worker_id, log_file_path = json.loads(ray.get(finished[0])) self.set_status(HTTPProxyStatus.HEALTHY) self.update_actor_details( @@ -87,8 +90,12 @@ def update(self): log_file_path=log_file_path, status=self._status, ) - except Exception: - self.set_status(HTTPProxyStatus.UNHEALTHY) + except Exception: + self.set_status(HTTPProxyStatus.UNHEALTHY) + logger.warning( + "Unexpected error occured when checking readiness of HTTP " + f"Proxy on node {self._node_id}:\n{traceback.format_exc()}" + ) return # Perform periodic health checks From bebd20cfe6e1be21d20cfe19e8ceee4ff4aedf5e Mon Sep 17 00:00:00 2001 From: Justin Yu Date: Tue, 16 May 2023 13:29:54 -0700 Subject: [PATCH 415/424] [AIR][Telemetry] Experiment tracking integrations + callbacks (#34904) This PR adds telemetry for built-in experiment tracking integrations by adding 3 usage tags: 1. `AIR_SETUP_WANDB_INTEGRATION_USED` ("1" if used) 2. `AIR_SETUP_MLFLOW_INTEGRATION_USED` ("1" if used) 3. `AIR_CALLBACKS` (a JSON string representing a dict of callback name -> count) - The key `CustomCallback` gets a tally if the user passed in just a subclass of `Callback` - The key `CustomLoggerCallback` gets a tally if the user passed in a custom `LoggerCallback` (not including above) The need for 1 and 2 is because wandb and mlflow allow the `setup_x` path, where the user calls this in their training function and logs whatever they want themselves. These 3 can be used together to extract the total wandb/mlflow integration usage. (Ex: `setup_wandb` usage + `WandbLoggerCallback` usage. There may be some overlap, as it's technically possible to use both.) Signed-off-by: Justin Yu --- python/ray/air/_internal/remote_storage.py | 2 +- python/ray/air/_internal/usage.py | 77 +++++++++++++++- python/ray/air/integrations/mlflow.py | 6 +- python/ray/air/integrations/wandb.py | 5 + python/ray/air/tests/test_air_usage.py | 101 +++++++++++++++++++-- python/ray/tune/tune.py | 6 +- python/ray/tune/utils/callback.py | 2 +- src/ray/protobuf/usage.proto | 8 ++ 8 files changed, 193 insertions(+), 14 deletions(-) diff --git a/python/ray/air/_internal/remote_storage.py b/python/ray/air/_internal/remote_storage.py index 05180651ccf8..13aaf91ab919 100644 --- a/python/ray/air/_internal/remote_storage.py +++ b/python/ray/air/_internal/remote_storage.py @@ -153,7 +153,7 @@ def _get_network_mounts() -> List[str]: def _is_network_mount(path: str) -> bool: """Checks if a path is within a mounted network filesystem.""" resolved_path = Path(path).expanduser().resolve() - network_mounts = set(Path(mount) for mount in _get_network_mounts()) + network_mounts = {Path(mount) for mount in _get_network_mounts()} # Check if any of the network mounts are one of the path's parents. return bool(set(resolved_path.parents).intersection(network_mounts)) diff --git a/python/ray/air/_internal/usage.py b/python/ray/air/_internal/usage.py index 68a2765f6a10..96efac9ff3ef 100644 --- a/python/ray/air/_internal/usage.py +++ b/python/ray/air/_internal/usage.py @@ -1,6 +1,7 @@ +import collections import json import os -from typing import TYPE_CHECKING, Optional, Set, Union +from typing import TYPE_CHECKING, Dict, List, Optional, Set, Union import urllib.parse from ray.air._internal.remote_storage import _is_network_mount @@ -10,8 +11,10 @@ from ray.train.trainer import BaseTrainer from ray.tune.schedulers import TrialScheduler from ray.tune.search import BasicVariantGenerator, Searcher + from ray.tune import Callback from ray.tune import SyncConfig + AIR_TRAINERS = { "AccelerateTrainer", "HorovodTrainer", @@ -122,6 +125,78 @@ def tag_scheduler(scheduler: "TrialScheduler"): record_extra_usage_tag(TagKey.TUNE_SCHEDULER, scheduler_name) +def tag_setup_wandb(): + record_extra_usage_tag(TagKey.AIR_SETUP_WANDB_INTEGRATION_USED, "1") + + +def tag_setup_mlflow(): + record_extra_usage_tag(TagKey.AIR_SETUP_MLFLOW_INTEGRATION_USED, "1") + + +def _count_callbacks(callbacks: Optional[List["Callback"]]) -> Dict[str, int]: + """Creates a map of callback class name -> count given a list of callbacks.""" + from ray.tune import Callback + from ray.tune.logger import LoggerCallback + from ray.tune.utils.callback import DEFAULT_CALLBACK_CLASSES + + from ray.air.integrations.wandb import WandbLoggerCallback + from ray.air.integrations.mlflow import MLflowLoggerCallback + from ray.air.integrations.comet import CometLoggerCallback + from ray.tune.logger.aim import AimLoggerCallback + + built_in_callbacks = ( + WandbLoggerCallback, + MLflowLoggerCallback, + CometLoggerCallback, + AimLoggerCallback, + ) + DEFAULT_CALLBACK_CLASSES + + callback_names = [callback_cls.__name__ for callback_cls in built_in_callbacks] + callback_counts = collections.defaultdict(int) + + callbacks = callbacks or [] + for callback in callbacks: + if not isinstance(callback, Callback): + # This will error later, but don't include this as custom usage. + continue + + callback_name = callback.__class__.__name__ + + if callback_name in callback_names: + callback_counts[callback_name] += 1 + elif isinstance(callback, LoggerCallback): + callback_counts["CustomLoggerCallback"] += 1 + else: + callback_counts["CustomCallback"] += 1 + + return callback_counts + + +def tag_callbacks(callbacks: Optional[List["Callback"]]) -> bool: + """Records built-in callback usage via a JSON str representing a + dictionary mapping callback class name -> counts. + + User-defined callbacks will increment the count under the `CustomLoggerCallback` + or `CustomCallback` key depending on which of the provided interfaces they subclass. + NOTE: This will NOT track the name of the user-defined callback, + nor its implementation. + + This will NOT report telemetry if no callbacks are provided by the user. + + Returns: + bool: True if usage was recorded, False otherwise. + """ + if not callbacks: + # User didn't pass in any callbacks -> no usage recorded. + return False + + callback_counts = _count_callbacks(callbacks) + + if callback_counts: + callback_counts_str = json.dumps(callback_counts) + record_extra_usage_tag(TagKey.AIR_CALLBACKS, callback_counts_str) + + def _get_tag_for_remote_path(remote_path: str) -> str: scheme = urllib.parse.urlparse(remote_path).scheme if scheme == "file": diff --git a/python/ray/air/integrations/mlflow.py b/python/ray/air/integrations/mlflow.py index f31f6ffdeaf3..aa671b24786a 100644 --- a/python/ray/air/integrations/mlflow.py +++ b/python/ray/air/integrations/mlflow.py @@ -5,8 +5,8 @@ import ray from ray.air import session - from ray.air._internal.mlflow import _MLflowLoggerUtil +from ray.air._internal import usage as air_usage from ray.tune.logger import LoggerCallback from ray.tune.result import TIMESTEPS_TOTAL, TRAINING_ITERATION from ray.tune.experiment import Trial @@ -194,6 +194,10 @@ def train_fn(config): set_active=True, ) mlflow_util.log_params(_config) + + # Record `setup_mlflow` usage when everything has setup successfully. + air_usage.tag_setup_mlflow() + return mlflow_util._mlflow diff --git a/python/ray/air/integrations/wandb.py b/python/ray/air/integrations/wandb.py index e998a264785a..d0c40ee1d47f 100644 --- a/python/ray/air/integrations/wandb.py +++ b/python/ray/air/integrations/wandb.py @@ -13,6 +13,7 @@ import ray from ray import logger from ray.air import session +from ray.air._internal import usage as air_usage from ray.air.util.node import _force_on_current_node from ray.tune.logger import LoggerCallback @@ -207,6 +208,10 @@ def _setup_wandb( run = _wandb.init(**wandb_init_kwargs) _run_wandb_process_run_info_hook(run) + + # Record `setup_wandb` usage when everything has setup successfully. + air_usage.tag_setup_wandb() + return run diff --git a/python/ray/air/tests/test_air_usage.py b/python/ray/air/tests/test_air_usage.py index 013f041d9d36..040b1a615d4c 100644 --- a/python/ray/air/tests/test_air_usage.py +++ b/python/ray/air/tests/test_air_usage.py @@ -5,30 +5,39 @@ import os import pytest -from unittest import mock +from unittest.mock import MagicMock, patch import ray from ray import air, tune from ray.air import session -from ray._private.usage.usage_lib import TagKey from ray.air._internal import usage as air_usage +from ray.air.integrations import wandb, mlflow, comet +from ray.tune.callback import Callback +from ray.tune.logger import LoggerCallback +from ray.tune.logger.aim import AimLoggerCallback +from ray.tune.utils.callback import DEFAULT_CALLBACK_CLASSES +from ray._private.usage.usage_lib import TagKey -@pytest.fixture -def mock_record(monkeypatch): - import ray.air._internal.usage - +def _mock_record_from_module(module, monkeypatch): recorded = {} def mock_record_extra_usage_tag(key: TagKey, value: str): recorded[key] = value monkeypatch.setattr( - ray.air._internal.usage, + module, "record_extra_usage_tag", mock_record_extra_usage_tag, ) - yield recorded + return recorded + + +@pytest.fixture +def mock_record(monkeypatch): + import ray.air._internal.usage + + yield _mock_record_from_module(ray.air._internal.usage, monkeypatch=monkeypatch) def train_fn(config): @@ -102,6 +111,80 @@ def test_tag_ray_air_storage_config( assert storage_test_config.expected == mock_record[TagKey.AIR_STORAGE_CONFIGURATION] +class _CustomLoggerCallback(LoggerCallback): + pass + + +class _CustomCallback(Callback): + pass + + +_TEST_CALLBACKS = [ + wandb.WandbLoggerCallback, + mlflow.MLflowLoggerCallback, + comet.CometLoggerCallback, + AimLoggerCallback, + _CustomLoggerCallback, + _CustomLoggerCallback, + _CustomCallback, +] + + +def test_tag_setup_wandb(mock_record): + from ray.air.integrations.wandb import _setup_wandb + + with patch.dict(os.environ, {wandb.WANDB_MODE_ENV_VAR: "disabled"}): + _setup_wandb(trial_id="a", trial_name="b", config={}, _wandb=MagicMock()) + assert mock_record[TagKey.AIR_SETUP_WANDB_INTEGRATION_USED] == "1" + + +def test_tag_setup_mlflow(mock_record, monkeypatch): + from ray.air.integrations.mlflow import setup_mlflow + + monkeypatch.setattr(ray.air.integrations.mlflow, "_MLflowLoggerUtil", MagicMock()) + setup_mlflow() + assert mock_record[TagKey.AIR_SETUP_MLFLOW_INTEGRATION_USED] == "1" + + +@pytest.mark.parametrize( + "callback_classes_expected", + [ + (None, None), + ([], None), + ([lambda: None], None), + ( + DEFAULT_CALLBACK_CLASSES, + {cls.__name__: 1 for cls in DEFAULT_CALLBACK_CLASSES}, + ), + ( + _TEST_CALLBACKS, + { + "WandbLoggerCallback": 1, + "MLflowLoggerCallback": 1, + "CometLoggerCallback": 1, + "AimLoggerCallback": 1, + "CustomLoggerCallback": 2, + "CustomCallback": 1, + }, + ), + ], +) +def test_tag_callbacks(mock_record, callback_classes_expected): + callback_classes, expected = callback_classes_expected + + callbacks = ( + [callback_cls() for callback_cls in callback_classes] + if callback_classes + else None + ) + + air_usage.tag_callbacks(callbacks) + + callback_usage_str = mock_record.pop(TagKey.AIR_CALLBACKS, None) + callback_counts = json.loads(callback_usage_str) if callback_usage_str else None + assert callback_counts == expected + + def test_tag_env_vars(ray_start_2_cpus, mock_record, tuner): """Test that env vars are recorded properly, and arbitrary user environment variables are ignored.""" @@ -111,7 +194,7 @@ def test_tag_env_vars(ray_start_2_cpus, mock_record, tuner): } untracked_env_vars = {"RANDOM_USER_ENV_VAR": "asdf"} - with mock.patch.dict(os.environ, {**env_vars_to_record, **untracked_env_vars}): + with patch.dict(os.environ, {**env_vars_to_record, **untracked_env_vars}): tuner.fit() recorded_env_vars = json.loads(mock_record[TagKey.AIR_ENV_VARS]) diff --git a/python/ray/tune/tune.py b/python/ray/tune/tune.py index e09b46994ec6..d6da633d7fd7 100644 --- a/python/ray/tune/tune.py +++ b/python/ray/tune/tune.py @@ -909,7 +909,11 @@ class and registered trainables. progress_metrics = _detect_progress_metrics(_get_trainable(run_or_experiment)) - # Create syncer callbacks + # NOTE: Report callback telemetry before populating the list with default callbacks. + # This tracks user-specified callback usage. + air_usage.tag_callbacks(callbacks) + + # Create default logging + syncer callbacks callbacks = _create_default_callbacks( callbacks, sync_config=sync_config, diff --git a/python/ray/tune/utils/callback.py b/python/ray/tune/utils/callback.py index 00a049c048f3..478f559f5ed1 100644 --- a/python/ray/tune/utils/callback.py +++ b/python/ray/tune/utils/callback.py @@ -46,7 +46,7 @@ def _create_default_callbacks( air_verbosity: Optional["AirVerbosity"] = None, metric: Optional[str] = None, progress_metrics: Optional[Collection[str]] = None, -): +) -> List[Callback]: """Create default callbacks for `Tuner.fit()`. This function takes a list of existing callbacks and adds default diff --git a/src/ray/protobuf/usage.proto b/src/ray/protobuf/usage.proto index 83167a0c4e65..0f212efdcad6 100644 --- a/src/ray/protobuf/usage.proto +++ b/src/ray/protobuf/usage.proto @@ -135,6 +135,14 @@ enum TagKey { // are provided by the user. // Ex: ["RAY_AIR_LOCAL_CACHE_DIR", "TUNE_FALLBACK_TO_LATEST_CHECKPOINT"] AIR_ENV_VARS = 503; + // Fully user-controlled experiment tracking integrations ("1" if used) + // NOTE: These tags + the callback metrics can be aggregated to extract + // total experiment tracking integration usage. + AIR_SETUP_WANDB_INTEGRATION_USED = 504; + AIR_SETUP_MLFLOW_INTEGRATION_USED = 505; + // Built-in callbacks, stored in JSON format with callback name -> count. + // Ex: {"WandbLoggerCallback": 1, "MLflowLoggerCallback": 1} + AIR_CALLBACKS = 506; // Storage configuration for AIR experiment AIR_STORAGE_CONFIGURATION = 507; } From 90e908fed63770651fbb86c3c85d8d620e026bc3 Mon Sep 17 00:00:00 2001 From: Antoni Baum Date: Tue, 16 May 2023 13:38:30 -0700 Subject: [PATCH 416/424] [Train] Change `num_boost_round` to target iterations (#33602) Previously, we have been following the xgboost/lightgbm conventions of fitting for num_boost_round, regardless of how many iterations the model already has been fitted on. This, however, causes issues when resuming from checkpoints, especially during training/tuning, as you may end up with more trees than desired: Trial has num_boost_round=100 Trial fits for 50 and dies Trial is restored from checkpoint, model starts with 50 iterations already complete Because num_boost_round=100, model is fitted for 100 iterations, giving a total of 150 iterations instead of desired 100 Now, we will subtract the already completed iterations when resuming. num_boost_round was already a part of **train_kwargs, we just promote it here for docstring purposes. --------- Signed-off-by: Antoni Baum --- python/ray/train/gbdt_trainer.py | 26 +++++++++++++++++++ python/ray/train/lightgbm/lightgbm_trainer.py | 13 ++++++++-- .../ray/train/tests/test_lightgbm_trainer.py | 2 +- .../ray/train/tests/test_trainer_restore.py | 9 +++---- .../ray/train/tests/test_xgboost_trainer.py | 2 +- python/ray/train/xgboost/xgboost_trainer.py | 5 ++++ 6 files changed, 48 insertions(+), 9 deletions(-) diff --git a/python/ray/train/gbdt_trainer.py b/python/ray/train/gbdt_trainer.py index 114020682cd0..997be4230008 100644 --- a/python/ray/train/gbdt_trainer.py +++ b/python/ray/train/gbdt_trainer.py @@ -1,4 +1,5 @@ import os +import logging import warnings from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Type @@ -21,6 +22,9 @@ from ray.data.preprocessor import Preprocessor _WARN_REPARTITION_THRESHOLD = 10 * 1024**3 +_DEFAULT_NUM_ITERATIONS = 10 + +logger = logging.getLogger(__name__) def _convert_scaling_config_to_ray_params( @@ -119,6 +123,7 @@ class GBDTTrainer(BaseTrainer): params: Framework specific training parameters. dmatrix_params: Dict of ``dataset name:dict of kwargs`` passed to respective :class:`xgboost_ray.RayDMatrix` initializations. + num_boost_round: Target number of boosting iterations (trees in the model). scaling_config: Configuration for how to scale data parallel training. run_config: Configuration for the execution of the training run. preprocessor: A ray.data.Preprocessor to preprocess the @@ -142,6 +147,8 @@ class GBDTTrainer(BaseTrainer): _tune_callback_checkpoint_cls: type _default_ray_params: Dict[str, Any] = {"checkpoint_frequency": 1} _init_model_arg_name: str + _num_iterations_argument: str = "num_boost_round" + _default_num_iterations: int = _DEFAULT_NUM_ITERATIONS def __init__( self, @@ -150,6 +157,7 @@ def __init__( label_column: str, params: Dict[str, Any], dmatrix_params: Optional[Dict[str, Dict[str, Any]]] = None, + num_boost_round: int = _DEFAULT_NUM_ITERATIONS, scaling_config: Optional[ScalingConfig] = None, run_config: Optional[RunConfig] = None, preprocessor: Optional["Preprocessor"] = None, @@ -159,6 +167,7 @@ def __init__( self.label_column = label_column self.params = params + self.num_boost_round = num_boost_round self.train_kwargs = train_kwargs self.dmatrix_params = dmatrix_params or {} @@ -262,6 +271,7 @@ def _checkpoint_at_end(self, model, evals_result: dict) -> None: def training_loop(self) -> None: config = self.train_kwargs.copy() + config[self._num_iterations_argument] = self.num_boost_round dmatrices = self._get_dmatrices( dmatrix_params=self.dmatrix_params, @@ -297,6 +307,22 @@ def training_loop(self) -> None: config[self._init_model_arg_name] = init_model + if init_model: + # If restoring, make sure that we only create num_boosting_round trees, + # and not init_model_trees + num_boosting_round trees + last_iteration = self._model_iteration(init_model) + num_iterations = config.get( + self._num_iterations_argument, self._default_num_iterations + ) + new_iterations = num_iterations - last_iteration + config[self._num_iterations_argument] = new_iterations + logger.warning( + f"Model loaded from checkpoint will train for " + f"additional {new_iterations} iterations (trees) in order " + "to achieve the target number of iterations " + f"({self._num_iterations_argument}={num_iterations})." + ) + model = self._train( params=self.params, dtrain=train_dmatrix, diff --git a/python/ray/train/lightgbm/lightgbm_trainer.py b/python/ray/train/lightgbm/lightgbm_trainer.py index 9735bedb1d9d..38ebe60f9cd8 100644 --- a/python/ray/train/lightgbm/lightgbm_trainer.py +++ b/python/ray/train/lightgbm/lightgbm_trainer.py @@ -1,4 +1,4 @@ -from typing import Dict, Any, Optional, Tuple, TYPE_CHECKING +from typing import Dict, Any, Optional, Tuple, Union, TYPE_CHECKING try: from packaging.version import Version @@ -71,6 +71,11 @@ class LightGBMTrainer(GBDTTrainer): :class:`xgboost_ray.RayDMatrix` initializations, which in turn are passed to ``lightgbm.Dataset`` objects created on each worker. For example, this can be used to add sample weights with the ``weights`` parameter. + num_boost_round: Target number of boosting iterations (trees in the model). + Note that unlike in ``lightgbm.train``, this is the target number + of trees, meaning that if you set ``num_boost_round=10`` and pass a model + that has already been trained for 5 iterations, it will be trained for 5 + iterations more, instead of 10 more. scaling_config: Configuration for how to scale data parallel training. run_config: Configuration for the execution of the training run. preprocessor: A ray.data.Preprocessor to preprocess the @@ -106,7 +111,11 @@ def _load_checkpoint( def _save_model(self, model: lightgbm.LGBMModel, path: str): model.booster_.save_model(path) - def _model_iteration(self, model: lightgbm.LGBMModel) -> int: + def _model_iteration( + self, model: Union[lightgbm.LGBMModel, lightgbm.Booster] + ) -> int: + if isinstance(model, lightgbm.Booster): + return model.current_iteration() return model.booster_.current_iteration() def preprocess_datasets(self) -> None: diff --git a/python/ray/train/tests/test_lightgbm_trainer.py b/python/ray/train/tests/test_lightgbm_trainer.py index 430a74ecc48e..37e9ce84b9c1 100644 --- a/python/ray/train/tests/test_lightgbm_trainer.py +++ b/python/ray/train/tests/test_lightgbm_trainer.py @@ -100,7 +100,7 @@ def test_resume_from_checkpoint(ray_start_6_cpus, tmpdir): scaling_config=scale_config, label_column="target", params=params, - num_boost_round=5, + num_boost_round=10, datasets={TRAIN_DATASET_KEY: train_dataset, "valid": valid_dataset}, resume_from_checkpoint=resume_from, ) diff --git a/python/ray/train/tests/test_trainer_restore.py b/python/ray/train/tests/test_trainer_restore.py index 624ebd7b7c2a..f90f6a60b2ea 100644 --- a/python/ray/train/tests/test_trainer_restore.py +++ b/python/ray/train/tests/test_trainer_restore.py @@ -153,11 +153,10 @@ def test_gbdt_trainer_restore(ray_start_6_cpus, tmpdir, trainer_cls): run_config=RunConfig( local_dir=str(tmpdir), name=exp_name, - checkpoint_config=CheckpointConfig(num_to_keep=1, checkpoint_frequency=1), + checkpoint_config=CheckpointConfig( + num_to_keep=1, checkpoint_frequency=1, checkpoint_at_end=False + ), callbacks=[FailureInjectionCallback(num_iters=2)], - # We also use a stopper, since the restored run will go for - # another 5 boosting rounds otherwise. - stop={"training_iteration": 5}, ), num_boost_round=5, ) @@ -398,7 +397,7 @@ def check_for_raise(): trainer_cls.restore(str(tmpdir)) if should_warn: - with pytest.warns() as warn_record: + with pytest.warns(Warning) as warn_record: check_for_raise() assert any( "Invalid trainer type" in str(record.message) diff --git a/python/ray/train/tests/test_xgboost_trainer.py b/python/ray/train/tests/test_xgboost_trainer.py index 6812a9c7ef67..8ec1c0a56d9d 100644 --- a/python/ray/train/tests/test_xgboost_trainer.py +++ b/python/ray/train/tests/test_xgboost_trainer.py @@ -115,7 +115,7 @@ def test_resume_from_checkpoint(ray_start_4_cpus, tmpdir): scaling_config=scale_config, label_column="target", params=params, - num_boost_round=5, + num_boost_round=10, datasets={TRAIN_DATASET_KEY: train_dataset, "valid": valid_dataset}, resume_from_checkpoint=resume_from, ) diff --git a/python/ray/train/xgboost/xgboost_trainer.py b/python/ray/train/xgboost/xgboost_trainer.py index 5cc92a86c079..154a001129b5 100644 --- a/python/ray/train/xgboost/xgboost_trainer.py +++ b/python/ray/train/xgboost/xgboost_trainer.py @@ -66,6 +66,11 @@ class XGBoostTrainer(GBDTTrainer): :class:`xgboost_ray.RayDMatrix` initializations, which in turn are passed to ``xgboost.DMatrix`` objects created on each worker. For example, this can be used to add sample weights with the ``weights`` parameter. + num_boost_round: Target number of boosting iterations (trees in the model). + Note that unlike in ``xgboost.train``, this is the target number + of trees, meaning that if you set ``num_boost_round=10`` and pass a model + that has already been trained for 5 iterations, it will be trained for 5 + iterations more, instead of 10 more. scaling_config: Configuration for how to scale data parallel training. run_config: Configuration for the execution of the training run. preprocessor: A ray.data.Preprocessor to preprocess the From c8d12fe24a1a7af4f7f99ddeb9d0b98a794aa6d1 Mon Sep 17 00:00:00 2001 From: Edward Oakes Date: Tue, 16 May 2023 15:45:53 -0500 Subject: [PATCH 417/424] [serve][docs] Add user guide for application builders (#35392) Docs follow-up for: https://github.com/ray-project/ray/pull/34584 --- doc/source/_toc.yml | 1 + doc/source/serve/app-builder-guide.md | 132 +++++++++++++++++++++++ doc/source/serve/doc_code/app_builder.py | 79 ++++++++++++++ doc/source/serve/user-guide.md | 1 + 4 files changed, 213 insertions(+) create mode 100644 doc/source/serve/app-builder-guide.md create mode 100644 doc/source/serve/doc_code/app_builder.py diff --git a/doc/source/_toc.yml b/doc/source/_toc.yml index c3555bf6228e..3fe175ec0204 100644 --- a/doc/source/_toc.yml +++ b/doc/source/_toc.yml @@ -267,6 +267,7 @@ parts: - file: serve/scaling-and-resource-allocation - file: serve/model_composition - file: serve/dev-workflow + - file: serve/app-builder-guide - file: serve/multi-app - file: serve/production-guide/index sections: diff --git a/doc/source/serve/app-builder-guide.md b/doc/source/serve/app-builder-guide.md new file mode 100644 index 000000000000..a35dd6c115e2 --- /dev/null +++ b/doc/source/serve/app-builder-guide.md @@ -0,0 +1,132 @@ +# Passing Arguments to Applications + +This section describes how to pass arguments to your applications using an application builder function. + +## Defining an application builder + +When writing an application, there are often parameters that you want to be able to easily change in development or production. +For example, you might have a path to trained model weights and want to test out a newly trained model. +In Ray Serve, these parameters are typically passed to the constructor of your deployments using `.bind()`. +This pattern allows you to be configure deployments using ordinary Python code but it requires modifying the code anytime one of the parameters needs to change. + +To pass arguments without changing the code, define an "application builder" function that takes an arguments dictionary (or [Pydantic object](typed-app-builders)) and returns the built application to be run. + +```{literalinclude} ../serve/doc_code/app_builder.py +:start-after: __begin_untyped_builder__ +:end-before: __end_untyped_builder__ +:language: python +``` + +You can use this application buidler function as the import path in the `serve run` CLI command or the config file (as shown below). +To avoid writing code to handle type conversions and missing arguments, use a [Pydantic object](typed-app-builders) instead. + +### Passing arguments via `serve run` + +Pass arguments to the application builder from `serve run` using the following syntax: + +```bash +$ serve run hello:app_builder key1=val1 key2=val2 +``` + +The arguments are passed to the application builder as a dictionary, in this case `{"key1": "val1", "key2": "val2"}`. +For example, to pass a new message to the `HelloWorld` app defined above (with the code saved in `hello.py`): + +```bash +% serve run hello:app_builder message="Hello from CLI" +2023-05-16 10:47:31,641 INFO scripts.py:404 -- Running import path: 'hello:app_builder'. +2023-05-16 10:47:33,344 INFO worker.py:1615 -- Started a local Ray instance. View the dashboard at http://127.0.0.1:8265 +(ServeController pid=56826) INFO 2023-05-16 10:47:35,115 controller 56826 deployment_state.py:1244 - Deploying new version of deployment default_HelloWorld. +(ServeController pid=56826) INFO 2023-05-16 10:47:35,141 controller 56826 deployment_state.py:1483 - Adding 1 replica to deployment default_HelloWorld. +(HTTPProxyActor pid=56828) INFO: Started server process [56828] +(ServeReplica:default_HelloWorld pid=56830) Message: Hello from CLI +2023-05-16 10:47:36,131 SUCC scripts.py:424 -- Deployed Serve app successfully. +``` + +Notice that the "Hello from CLI" message is printed from within the deployment constructor. + +### Passing arguments via config file + +Pass arguments to the application builder in the config file's `args` field: + +```yaml +applications: + - name: MyApp + import_path: hello:app_builder + args: + message: "Hello from config" +``` + +For example, to pass a new message to the `HelloWorld` app defined above (with the code saved in `hello.py` and the config saved in `config.yaml`): + +```bash +% serve run config.yaml +2023-05-16 10:49:25,247 INFO scripts.py:351 -- Running config file: 'config.yaml'. +2023-05-16 10:49:26,949 INFO worker.py:1615 -- Started a local Ray instance. View the dashboard at http://127.0.0.1:8265 +2023-05-16 10:49:28,678 SUCC scripts.py:419 -- Submitted deploy config successfully. +(ServeController pid=57109) INFO 2023-05-16 10:49:28,676 controller 57109 controller.py:559 - Starting deploy_serve_application task for application MyApp. +(HTTPProxyActor pid=57111) INFO: Started server process [57111] +(ServeController pid=57109) INFO 2023-05-16 10:49:28,942 controller 57109 deployment_state.py:1244 - Deploying new version of deployment MyApp_HelloWorld. +(ServeController pid=57109) INFO 2023-05-16 10:49:29,016 controller 57109 deployment_state.py:1483 - Adding 1 replica to deployment MyApp_HelloWorld. +(ServeReplica:MyApp_HelloWorld pid=57113) Message: Hello from config +(ServeController pid=57109) INFO 2023-05-16 10:49:30,046 controller 57109 application_state.py:202 - Deploy task for app 'MyApp' ran successfully. +``` + +Notice that the "Hello from config" message is printed from within the deployment constructor. + +(typed-app-builders)= +### Typing arguments with Pydantic + +To avoid writing logic to parse and validate the arguments by hand, define a [Pydantic model](https://pydantic-docs.helpmanual.io/usage/models/) as the single input parameter's type to your application builder function (the parameter must be type annotated). +Arguments are passed the same way, but the resulting dictionary is used to construct the Pydantic model using `model.parse_obj(args_dict)`. + +```{literalinclude} ../serve/doc_code/app_builder.py +:start-after: __begin_typed_builder__ +:end-before: __end_typed_builder__ +:language: python +``` + +```bash +% serve run hello:app_builder message="Hello from CLI" +2023-05-16 10:47:31,641 INFO scripts.py:404 -- Running import path: 'hello:app_builder'. +2023-05-16 10:47:33,344 INFO worker.py:1615 -- Started a local Ray instance. View the dashboard at http://127.0.0.1:8265 +(ServeController pid=56826) INFO 2023-05-16 10:47:35,115 controller 56826 deployment_state.py:1244 - Deploying new version of deployment default_HelloWorld. +(ServeController pid=56826) INFO 2023-05-16 10:47:35,141 controller 56826 deployment_state.py:1483 - Adding 1 replica to deployment default_HelloWorld. +(HTTPProxyActor pid=56828) INFO: Started server process [56828] +(ServeReplica:default_HelloWorld pid=56830) Message: Hello from CLI +2023-05-16 10:47:36,131 SUCC scripts.py:424 -- Deployed Serve app successfully. +``` + +## Common patterns + +### Multiple parametrized applications using the same builder + +You can use application builders to run multiple applications with the same code but different parameters. +For example, multiple applications may share preprocessing and HTTP handling logic but use many different trained model weights. +The same application builder `import_path` can take different arguments to define multiple applications as follows: + +```yaml +applications: + - name: Model1 + import_path: my_module:my_model_code + args: + model_uri: s3://my_bucket/model_1 + - name: Model2 + import_path: my_module:my_model_code + args: + model_uri: s3://my_bucket/model_2 + - name: Model3 + import_path: my_module:my_model_code + args: + model_uri: s3://my_bucket/model_3 +``` + +### Configuring multiple composed deployments + +You can use the arguments passed to an application builder to configure multiple deployments in a single application. +For example a model composition application might take weights to two different models as follows: + +```{literalinclude} ../serve/doc_code/app_builder.py +:start-after: __begin_composed_builder__ +:end-before: __end_composed_builder__ +:language: python +``` diff --git a/doc/source/serve/doc_code/app_builder.py b/doc/source/serve/doc_code/app_builder.py new file mode 100644 index 000000000000..99fc28c66508 --- /dev/null +++ b/doc/source/serve/doc_code/app_builder.py @@ -0,0 +1,79 @@ +# flake8: noqa + +# __begin_untyped_builder__ +from typing import Dict + +from ray import serve +from ray.serve import Application + + +@serve.deployment +class HelloWorld: + def __init__(self, message: str): + self._message = message + print("Message:", self._message) + + def __call__(self, request): + return self._message + + +def app_builder(args: Dict[str, str]) -> Application: + return HelloWorld.bind(args["message"]) + + +# __end_untyped_builder__ + +serve.run(app_builder({"message": "Hello bar"})) +resp = requests.get("http://localhost:8000") +assert resp.text == "Hello bar" + +# __begin_typed_builder__ +from pydantic import BaseModel + +from ray import serve +from ray.serve import Application + + +class HelloWorldArgs(BaseModel): + message: str + + +@serve.deployment +class HelloWorld: + def __init__(self, message: str): + self._message = message + print("Message:", self._message) + + def __call__(self, request): + return self._message + + +def typed_app_builder(args: HelloWorldArgs) -> Application: + return HelloWorld.bind(args.message) + + +# __end_typed_builder__ + +serve.run(typed_app_builder(HelloWorldArgs(message="Hello baz"))) +resp = requests.get("http://localhost:8000") +assert resp.text == "Hello baz" + +# __begin_composed_builder__ +from pydantic import BaseModel + +from ray.serve import Application + + +class ComposedArgs(BaseModel): + model1_uri: str + model2_uri: str + + +def composed_app_builder(args: ComposedArgs) -> Application: + return IngressDeployment.bind( + Model1.bind(args.model1_uri), + Model2.bind(args.model2_uri), + ) + + +# __end_composed_builder__ diff --git a/doc/source/serve/user-guide.md b/doc/source/serve/user-guide.md index 3ab3ae86889f..cfb3e26a1904 100644 --- a/doc/source/serve/user-guide.md +++ b/doc/source/serve/user-guide.md @@ -8,6 +8,7 @@ This user guide will help you navigate the Ray Serve project and show you how to - [Scaling and Resource Allocation](scaling-and-resource-allocation) - [Model Composition](serve-model-composition) - [Development Workflow](dev-workflow) +- [Passing Arguments to Applications](app-builder-guide) - [Ray Serve Dashboard](dash-serve-view) - [Production Guide](serve-in-production) - [Performance Tuning](performance) From 569a62af588025f879deaab86ef1c0b2820b54f0 Mon Sep 17 00:00:00 2001 From: Kai Fricke Date: Tue, 16 May 2023 21:58:23 +0100 Subject: [PATCH 418/424] [air/output] Add parameter columns to status table (#35388) Signed-off-by: Kai Fricke --- python/ray/air/config.py | 10 +-- python/ray/tune/experimental/output.py | 81 ++++++++++++++++++--- python/ray/tune/tests/output/test_output.py | 33 +++++++-- python/ray/tune/tune.py | 15 ++-- 4 files changed, 114 insertions(+), 25 deletions(-) diff --git a/python/ray/air/config.py b/python/ray/air/config.py index 964f5f9afcb8..c1fa930ea67d 100644 --- a/python/ray/air/config.py +++ b/python/ray/air/config.py @@ -727,12 +727,12 @@ class RunConfig: intermediate experiment progress. Defaults to CLIReporter if running in command-line, or JupyterNotebookReporter if running in a Jupyter notebook. - verbose: 0, 1, or 2. Verbosity mode. - 0 = silent, 1 = default, 2 = verbose. Defaults to 1. - If the ``RAY_AIR_NEW_OUTPUT=0`` environment variable is set, - uses the old verbosity settings: + verbose: 0, 1, 2, or 3. Verbosity mode. 0 = silent, 1 = only status updates, 2 = status and brief - results, 3 = status and detailed results. + results, 3 = status and detailed results. Defaults to 3. + If the ``RAY_AIR_NEW_OUTPUT=1`` environment variable is set, + uses the new context-aware verbosity settings: + 0 = silent, 1 = default, 2 = verbose. log_to_file: Log stdout and stderr to files in trial directories. If this is `False` (default), no files are written. If `true`, outputs are written to `trialdir/stdout` diff --git a/python/ray/tune/experimental/output.py b/python/ray/tune/experimental/output.py index bd7dcf851e4a..cbd75aa45dfe 100644 --- a/python/ray/tune/experimental/output.py +++ b/python/ray/tune/experimental/output.py @@ -25,6 +25,7 @@ import textwrap import time +from ray.tune.search.sample import Domain from ray.tune.utils.log import Verbosity try: @@ -119,6 +120,20 @@ def get_air_verbosity( return AirVerbosity(verbose_int) +def _infer_params(config: Dict[str, Any]) -> List[str]: + params = [] + flat_config = flatten_dict(config) + for key, val in flat_config.items(): + if isinstance(val, Domain): + params.append(key) + # Grid search is a special named field. Because we flattened + # the whole config, we look it up per string + if key.endswith("/grid_search"): + # Truncate `/grid_search` + params.append(key[:-12]) + return params + + def _get_time_str(start_time: float, current_time: float) -> Tuple[str, str]: """Get strings representing the current and elapsed time. @@ -263,17 +278,31 @@ def _max_len(value: Any, max_len: int = 20, wrap: bool = False) -> Any: return result -def _get_trial_info(trial: Trial, metric_keys: List[str]) -> List[str]: +def _get_trial_info( + trial: Trial, param_keys: List[str], metric_keys: List[str] +) -> List[str]: """Returns the following information about a trial: name | status | metrics... Args: trial: Trial to get information for. + param_keys: Names of parameters to include. metric_keys: Names of metrics to include. """ result = trial.last_result trial_info = [str(trial), trial.status] + + # params + trial_info.extend( + [ + _max_len( + unflattened_lookup(param, trial.config, default=None), + ) + for param in param_keys + ] + ) + # metrics trial_info.extend( [ _max_len( @@ -288,6 +317,7 @@ def _get_trial_info(trial: Trial, metric_keys: List[str]) -> List[str]: def _get_trial_table_data_per_status( status: str, trials: List[Trial], + param_keys: List[str], metric_keys: List[str], force_max_rows: bool = False, ) -> Optional[_PerStatusTrialTableData]: @@ -296,6 +326,7 @@ def _get_trial_table_data_per_status( Args: status: The trial status of interest. trials: all the trials of that status. + param_keys: *Ordered* list of parameters to be displayed in the table. metric_keys: *Ordered* list of metrics to be displayed in the table. Including both default and user defined. force_max_rows: Whether or not to enforce a max row number for this status. @@ -316,12 +347,13 @@ def _get_trial_table_data_per_status( remaining = len(trials) - max_row more_info = f"{remaining} more {status}" break - trial_infos.append(_get_trial_info(t, metric_keys)) + trial_infos.append(_get_trial_info(t, param_keys, metric_keys)) return _PerStatusTrialTableData(trial_infos, more_info) def _get_trial_table_data( trials: List[Trial], + param_keys: List[str], metric_keys: List[str], all_rows: bool = False, ) -> _TrialTableData: @@ -329,6 +361,7 @@ def _get_trial_table_data( Args: trials: List of trials for which progress is to be shown. + param_keys: Ordered list of parameters to be displayed in the table. metric_keys: Ordered list of metrics to be displayed in the table. Including both default and user defined. Will only be shown if at least one trial is having the key. @@ -356,18 +389,28 @@ def _get_trial_table_data( formatted_metric_columns = [ _max_len(k, max_len=max_column_length, wrap=True) for k in metric_keys ] - # Map to the abbreviated version if necessary. - header = ["Trial name", "status"] + [ - DEFAULT_COLUMNS[key] if key in DEFAULT_COLUMNS else key - for key in formatted_metric_columns + + formatted_param_columns = [ + _max_len(k, max_len=max_column_length, wrap=True) for k in param_keys ] + metric_header = [ + DEFAULT_COLUMNS[metric] if metric in DEFAULT_COLUMNS else formatted + for metric, formatted in zip(metric_keys, formatted_metric_columns) + ] + + param_header = formatted_param_columns + + # Map to the abbreviated version if necessary. + header = ["Trial name", "status"] + param_header + metric_header + trial_data = list() for t_status in ORDER: trial_data_per_status = _get_trial_table_data_per_status( t_status, trials_by_state[t_status], - metric_keys=formatted_metric_columns, + param_keys=param_keys, + metric_keys=metric_keys, force_max_rows=not all_rows and len(trials) > max_trial_num_to_show, ) if trial_data_per_status: @@ -541,6 +584,7 @@ def _detect_reporter( num_samples: int, metric: Optional[str] = None, mode: Optional[str] = None, + config: Optional[Dict] = None, ): # TODO: Add JupyterNotebook and Ray Client case later. rich_enabled = bool(int(os.environ.get("RAY_AIR_RICH_LAYOUT", "0"))) @@ -548,9 +592,21 @@ def _detect_reporter( if rich_enabled: if not rich: raise ImportError("Please run `pip install rich`. ") - reporter = TuneRichReporter(verbosity, num_samples, metric, mode) + reporter = TuneRichReporter( + verbosity, + num_samples=num_samples, + metric=metric, + mode=mode, + config=config, + ) else: - reporter = TuneTerminalReporter(verbosity, num_samples, metric, mode) + reporter = TuneTerminalReporter( + verbosity, + num_samples=num_samples, + metric=metric, + mode=mode, + config=config, + ) else: if rich_enabled: logger.warning("`RAY_AIR_RICH_LAYOUT` is only effective with Tune usecase.") @@ -567,12 +623,14 @@ def __init__( num_samples: int, metric: Optional[str] = None, mode: Optional[str] = None, + config: Optional[Dict] = None, ): self._num_samples = num_samples self._metric = metric self._mode = mode # will be populated when first result comes in. self._inferred_metric = None + self._inferred_params = _infer_params(config) super(TuneReporterBase, self).__init__(verbosity=verbosity) def _get_overall_trial_progress_str(self, trials): @@ -609,7 +667,10 @@ def _get_heartbeat( all_metrics = list(DEFAULT_COLUMNS.keys()) + self._inferred_metric trial_table_data = _get_trial_table_data( - trials, all_metrics, all_rows=force_full_output + trials, + param_keys=self._inferred_params, + metric_keys=all_metrics, + all_rows=force_full_output, ) return result, trial_table_data diff --git a/python/ray/tune/tests/output/test_output.py b/python/ray/tune/tests/output/test_output.py index 5fc41d489a6b..4aa225d05205 100644 --- a/python/ray/tune/tests/output/test_output.py +++ b/python/ray/tune/tests/output/test_output.py @@ -3,6 +3,7 @@ from freezegun import freeze_time +from ray import tune from ray.tune.experimental.output import ( _get_time_str, _get_trials_by_state, @@ -13,6 +14,7 @@ _best_trial_str, _get_trial_table_data, _get_dict_as_table_data, + _infer_params, ) from ray.tune.experiment.trial import Trial @@ -135,7 +137,8 @@ def test_get_trial_info(): t.last_result = LAST_RESULT assert _get_trial_info( t, - [ + param_keys=[], + metric_keys=[ "episode_reward_mean", "episode_reward_max", "episode_reward_min", @@ -152,10 +155,11 @@ def test_get_trial_table_data_less_than_20(): t.trial_id = str(i) t.set_status(Trial.RUNNING) t.last_result = {"episode_reward_mean": 100 + i} + t.config = {"param": i} trials.append(t) - table_data = _get_trial_table_data(trials, ["episode_reward_mean"]) + table_data = _get_trial_table_data(trials, ["param"], ["episode_reward_mean"]) header = table_data.header - assert header == ["Trial name", "status", "reward"] + assert header == ["Trial name", "status", "param", "reward"] table_data = table_data.data assert len(table_data) == 1 # only the running category assert len(table_data[0].trial_infos) == 20 @@ -171,10 +175,11 @@ def test_get_trial_table_data_more_than_20(): t.trial_id = str(i) t.set_status(status) t.last_result = {"episode_reward_mean": 100 + i} + t.config = {"param": i} trials.append(t) - table_data = _get_trial_table_data(trials, ["episode_reward_mean"]) + table_data = _get_trial_table_data(trials, ["param"], ["episode_reward_mean"]) header = table_data.header - assert header == ["Trial name", "status", "reward"] + assert header == ["Trial name", "status", "param", "reward"] table_data = table_data.data assert len(table_data) == 3 # only the running category for i in range(3): @@ -184,6 +189,24 @@ def test_get_trial_table_data_more_than_20(): assert table_data[2].more_info == "5 more PENDING" +def test_infer_params(): + assert _infer_params({}) == [] + assert _infer_params({"some": "val"}) == [] + assert _infer_params({"some": "val", "param": tune.uniform(0, 1)}) == ["param"] + assert _infer_params({"some": "val", "param": tune.grid_search([0, 1])}) == [ + "param" + ] + assert sorted( + _infer_params( + { + "some": "val", + "param": tune.grid_search([0, 1]), + "other": tune.choice([0, 1]), + } + ) + ) == ["other", "param"] + + def test_result_table_no_divison(): data = _get_dict_as_table_data( { diff --git a/python/ray/tune/tune.py b/python/ray/tune/tune.py index d6da633d7fd7..877d954439ac 100644 --- a/python/ray/tune/tune.py +++ b/python/ray/tune/tune.py @@ -449,11 +449,12 @@ def run( training workers. checkpoint_upload_from_workers: Whether to upload checkpoint files directly from distributed training workers. - verbose: 0, 1, or 2. Verbosity mode. - 0 = silent, 1 = default, 2 = verbose. Defaults to 1. - If ``RAY_AIR_NEW_OUTPUT=0``, uses the old verbosity settings: + verbose: 0, 1, 2, or 3. Verbosity mode. 0 = silent, 1 = only status updates, 2 = status and brief - results, 3 = status and detailed results. + results, 3 = status and detailed results. Defaults to 3. + If the ``RAY_AIR_NEW_OUTPUT=1`` environment variable is set, + uses the new context-aware verbosity settings: + 0 = silent, 1 = default, 2 = verbose. progress_reporter: Progress reporter for reporting intermediate experiment progress. Defaults to CLIReporter if running in command-line, or JupyterNotebookReporter if running in @@ -1019,7 +1020,11 @@ class and registered trainables. ) else: air_progress_reporter = _detect_air_reporter( - air_verbosity, search_alg.total_samples, metric=metric, mode=mode + air_verbosity, + search_alg.total_samples, + metric=metric, + mode=mode, + config=config, ) # rich live context manager has to be called encapsulating From 58a6a42f4d725af0ee2782ecccf7a55c98d27c46 Mon Sep 17 00:00:00 2001 From: Cade Daniel Date: Tue, 16 May 2023 14:59:56 -0700 Subject: [PATCH 419/424] Revert "Add "all" option for SessionName (#35303)" (#35403) This reverts commit 496024df480c74e83e31363155341605aa28b0bf. --- .../modules/metrics/dashboards/default_dashboard_panels.py | 2 +- .../metrics/dashboards/default_grafana_dashboard_base.json | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/dashboard/modules/metrics/dashboards/default_dashboard_panels.py b/dashboard/modules/metrics/dashboards/default_dashboard_panels.py index fb85030481d7..e8fc7482911c 100644 --- a/dashboard/modules/metrics/dashboards/default_dashboard_panels.py +++ b/dashboard/modules/metrics/dashboards/default_dashboard_panels.py @@ -393,6 +393,6 @@ def max_plus_pending(max_resource, pending_resource): name="DEFAULT", default_uid="rayDefaultDashboard", panels=DEFAULT_GRAFANA_PANELS, - standard_global_filters=['SessionName=~"$SessionName"'], + standard_global_filters=['SessionName="$SessionName"'], base_json_file_name="default_grafana_dashboard_base.json", ) diff --git a/dashboard/modules/metrics/dashboards/default_grafana_dashboard_base.json b/dashboard/modules/metrics/dashboards/default_grafana_dashboard_base.json index 72b0c3cc04da..47d40a198de4 100644 --- a/dashboard/modules/metrics/dashboards/default_grafana_dashboard_base.json +++ b/dashboard/modules/metrics/dashboards/default_grafana_dashboard_base.json @@ -25,7 +25,7 @@ "templating": { "list": [ { - "allValue": ".+", + "allValue": null, "current": { "selected": false }, @@ -34,7 +34,7 @@ "description": "Filter queries to specific ray sessions.", "error": null, "hide": 0, - "includeAll": true, + "includeAll": false, "label": null, "multi": false, "name": "SessionName", From b4c00de6efb55063799a6fe762a38c9efc6ef193 Mon Sep 17 00:00:00 2001 From: Sihan Wang Date: Tue, 16 May 2023 15:05:56 -0700 Subject: [PATCH 420/424] [Serve] Mutliplexed information report impl (#35372) - Multiplexed metrics. - Pass multiplexed information into controller and make it available at `RunningReplicaInfo`. --- python/ray/serve/_private/client.py | 11 ++ python/ray/serve/_private/common.py | 9 ++ python/ray/serve/_private/constants.py | 3 + python/ray/serve/_private/deployment_state.py | 53 +++++++++ python/ray/serve/controller.py | 9 ++ python/ray/serve/multiplex.py | 93 +++++++++++++-- python/ray/serve/tests/test_metrics.py | 43 +++++++ python/ray/serve/tests/test_multiplex.py | 109 +++++++++++++++++- 8 files changed, 319 insertions(+), 11 deletions(-) diff --git a/python/ray/serve/_private/client.py b/python/ray/serve/_private/client.py index d8f04cb3fa2d..fd4b9630e9aa 100644 --- a/python/ray/serve/_private/client.py +++ b/python/ray/serve/_private/client.py @@ -13,6 +13,7 @@ StatusOverview, ApplicationStatus, DeploymentStatusInfo, + MultiplexedReplicaInfo, ) from ray.serve.config import DeploymentConfig, HTTPOptions from ray.serve._private.constants import ( @@ -526,3 +527,13 @@ def log_deployment_ready(self, name: str, version: str, url: str, tag: str) -> N f"Deployment '{name}{':'+version if version else ''}' is ready" f"{url_part}. {tag}" ) + + @_ensure_connected + def record_multiplexed_replica_info(self, info: MultiplexedReplicaInfo): + """Record multiplexed replica information for replica. + + Args: + info: MultiplexedReplicaInfo including deployment name, replica tag and + model ids. + """ + self._controller.record_multiplexed_replica_info.remote(info) diff --git a/python/ray/serve/_private/common.py b/python/ray/serve/_private/common.py index e2e0954eba62..b4eefd485528 100644 --- a/python/ray/serve/_private/common.py +++ b/python/ray/serve/_private/common.py @@ -325,6 +325,7 @@ class RunningReplicaInfo: actor_handle: ActorHandle max_concurrent_queries: int is_cross_language: bool = False + multiplexed_model_ids: List[str] = field(default_factory=list) def __post_init__(self): # Set hash value when object is constructed. @@ -341,6 +342,7 @@ def __post_init__(self): str(self.actor_handle._actor_id), str(self.max_concurrent_queries), str(self.is_cross_language), + str(self.multiplexed_model_ids), ] ) ) @@ -377,3 +379,10 @@ class HTTPProxyStatus(str, Enum): class ServeComponentType(str, Enum): DEPLOYMENT = "deployment" + + +@dataclass +class MultiplexedReplicaInfo: + deployment_name: str + replica_tag: str + model_ids: List[str] diff --git a/python/ray/serve/_private/constants.py b/python/ray/serve/_private/constants.py index c92ae0937947..5c21943b0e3b 100644 --- a/python/ray/serve/_private/constants.py +++ b/python/ray/serve/_private/constants.py @@ -140,6 +140,9 @@ # still replicas in the RECOVERING state. RECOVERING_LONG_POLL_BROADCAST_TIMEOUT_S = 10.0 +# Minimum duration to wait until broadcasting model IDs. +PUSH_MULTIPLEXED_MODEL_IDS_INTERVAL_S = 1.0 + class ServeHandleType(str, Enum): SYNC = "SYNC" diff --git a/python/ray/serve/_private/deployment_state.py b/python/ray/serve/_private/deployment_state.py index 08629fd507c5..f6eea3b4992c 100644 --- a/python/ray/serve/_private/deployment_state.py +++ b/python/ray/serve/_private/deployment_state.py @@ -31,6 +31,7 @@ ReplicaTag, RunningReplicaInfo, ReplicaState, + MultiplexedReplicaInfo, ) from ray.serve.schema import ( DeploymentDetails, @@ -765,6 +766,7 @@ def __init__( state=ReplicaState.STARTING, start_time_s=0, ) + self._multiplexed_model_ids: List = [] def get_running_replica_info(self) -> RunningReplicaInfo: return RunningReplicaInfo( @@ -773,8 +775,17 @@ def get_running_replica_info(self) -> RunningReplicaInfo: actor_handle=self._actor.actor_handle, max_concurrent_queries=self._actor.max_concurrent_queries, is_cross_language=self._actor.is_cross_language, + multiplexed_model_ids=self.multiplexed_model_ids, ) + def record_multiplexed_model_ids(self, multiplexed_model_ids: List[str]): + """Record the multiplexed model ids for this replica.""" + self._multiplexed_model_ids = multiplexed_model_ids + + @property + def multiplexed_model_ids(self) -> List[str]: + return self._multiplexed_model_ids + @property def actor_details(self) -> ReplicaDetails: return self._actor_details @@ -1103,6 +1114,10 @@ def __init__( tag_keys=("deployment", "replica", "application"), ) + # Whether the multiplexed model ids have been updated since the last + # time we checked. + self._multiplexed_model_ids_updated = False + def should_autoscale(self) -> bool: """ Check if the deployment is under autoscaling @@ -1853,8 +1868,12 @@ def update(self) -> Tuple[bool, bool]: # Check the state of existing replicas and transition if necessary. running_replicas_changed |= self._check_and_update_replicas() + # Check if the model_id has changed. + running_replicas_changed |= self._multiplexed_model_ids_updated + if running_replicas_changed: self._notify_running_replicas_changed() + self._multiplexed_model_ids_updated = False deleted, any_replicas_recovering = self._check_curr_status() except Exception: @@ -1866,6 +1885,23 @@ def update(self) -> Tuple[bool, bool]: return deleted, any_replicas_recovering + def record_multiplexed_model_ids( + self, replica_name: str, multiplexed_model_ids: List[str] + ) -> None: + """Records the multiplexed model IDs of a replica. + + Args: + replica_name: Name of the replica. + multiplexed_model_ids: List of model IDs that replica is serving. + """ + # Find the replica + for replica in self._replicas.get(): + if replica.replica_tag == replica_name: + replica.record_multiplexed_model_ids(multiplexed_model_ids) + self._multiplexed_model_ids_updated = True + break + logger.warn(f"Replia {replica_name} not found in deployment {self._name}") + def _stop_one_running_replica_for_testing(self): running_replicas = self._replicas.pop(states=[ReplicaState.RUNNING]) replica_to_stop = running_replicas.pop() @@ -2390,3 +2426,20 @@ def _record_deployment_usage(self): record_extra_usage_tag( TagKey.SERVE_NUM_GPU_DEPLOYMENTS, str(num_gpu_deployments) ) + + def record_multiplexed_replica_info(self, info: MultiplexedReplicaInfo): + """ + Record multiplexed model ids for a multiplexed replica. + + Args: + info: Multiplexed replica info including deployment name, + replica tag and model ids. + """ + if info.deployment_name not in self._deployment_states: + logger.error( + f"Deployment {info.deployment_name} not found in state manager." + ) + return + self._deployment_states[info.deployment_name].record_multiplexed_model_ids( + info.replica_tag, info.model_ids + ) diff --git a/python/ray/serve/controller.py b/python/ray/serve/controller.py index 1367ace05d42..360a2ae57a97 100644 --- a/python/ray/serve/controller.py +++ b/python/ray/serve/controller.py @@ -23,6 +23,7 @@ RunningReplicaInfo, StatusOverview, ServeDeployMode, + MultiplexedReplicaInfo, ) from ray.serve.config import HTTPOptions from ray.serve._private.constants import ( @@ -796,6 +797,14 @@ def delete_apps(self, names: Iterable[str]): self.application_state_manager.delete_application(name) self.delete_deployments(deployments_to_delete) + def record_multiplexed_replica_info(self, info: MultiplexedReplicaInfo): + """Record multiplexed model ids for a replica of deployment + Args: + info: MultiplexedReplicaInfo including deployment name, replica tag and + model ids. + """ + self.deployment_state_manager.record_multiplexed_replica_info(info) + @ray.remote(num_cpus=0, max_calls=1) def deploy_serve_application( diff --git a/python/ray/serve/multiplex.py b/python/ray/serve/multiplex.py index 0cfed15bf62d..a45887d2d473 100644 --- a/python/ray/serve/multiplex.py +++ b/python/ray/serve/multiplex.py @@ -1,10 +1,22 @@ -from ray._private.async_compat import sync_to_async +import asyncio from collections import OrderedDict -from typing import Any, Callable -import logging -from ray.serve._private.constants import SERVE_LOGGER_NAME import inspect -import asyncio +import logging +import time +from typing import Any, Callable + +from ray._private.async_compat import sync_to_async +from ray.serve._private.constants import ( + SERVE_LOGGER_NAME, + PUSH_MULTIPLEXED_MODEL_IDS_INTERVAL_S, +) +from ray.serve.context import ( + get_global_client, + get_internal_replica_context, +) +from ray.serve._private.common import MultiplexedReplicaInfo +from ray._private.utils import run_background_task +from ray.serve import metrics logger = logging.getLogger(SERVE_LOGGER_NAME) @@ -40,9 +52,46 @@ def __init__( per replica. """ self.models = OrderedDict() - self._func = model_load_func - self.self_arg = self_arg - self.max_num_models_per_replica = max_num_models_per_replica + self._func: Callable = model_load_func + self.self_arg: Any = self_arg + self.max_num_models_per_replica: int = max_num_models_per_replica + + self.model_load_latency_s = metrics.Gauge( + "serve_multiplexed_model_load_latency_s", + description="The time it takes to load a model.", + ) + self.model_unload_latency_s = metrics.Gauge( + "serve_multiplexed_model_unload_latency_s", + description="The time it takes to unload a model.", + ) + self.num_models = metrics.Gauge( + "serve_num_multiplexed_models", + description="The number of models loaded on the current replica.", + ) + + self.models_unload_counter = metrics.Counter( + "serve_multiplexed_models_unload_counter", + description="The counter for unloaded models on the current replica.", + ) + self.models_load_counter = metrics.Counter( + "serve_multiplexed_models_load_counter", + description="The counter for loaded models on the current replica.", + ) + + context = get_internal_replica_context() + if context is None: + raise RuntimeError( + "Fail to retrieve serve replica context, the model multiplexer ", + "can only be used within `Deployment`.", + ) + self._deployment_name: str = context.deployment + self._replica_tag: str = context.replica_tag + + # Whether to push the multiplexed replica info to the controller. + self._push_multiplexed_replica_info: bool = False + + # Push the model IDs to the controller periodically. + run_background_task(self._push_model_ids()) async def load_model(self, model_id: str) -> Any: """Load the model if it is not loaded yet, and return the user-constructed model object. @@ -60,6 +109,8 @@ async def load_model(self, model_id: str) -> Any: if not model_id: raise ValueError("The model ID cannot be empty.") + self.num_models.set(len(self.models)) + if model_id in self.models: # Move the model to the end of the OrderedDict to ensure LRU caching. model = self.models.pop(model_id) @@ -72,13 +123,20 @@ async def load_model(self, model_id: str) -> Any: and len(self.models) >= self.max_num_models_per_replica ): # Unload the least recently used model. + self.models_unload_counter.inc() + unload_start_time = time.time() await self.unload_model() + self.model_unload_latency_s.set(time.time() - unload_start_time) # Load the model. logger.info(f"Loading model '{model_id}'.") + self.models_load_counter.inc() + load_start_time = time.time() if self.self_arg is None: self.models[model_id] = await self._func(model_id) else: self.models[model_id] = await self._func(self.self_arg, model_id) + self._push_multiplexed_replica_info = True + self.model_load_latency_s.set(time.time() - load_start_time) return self.models[model_id] async def unload_model(self) -> None: @@ -94,3 +152,22 @@ async def unload_model(self) -> None: else: await sync_to_async(model.__del__)() setattr(model, "__del__", lambda _: None) + + async def _push_model_ids(self): + """Push the multiplexed replica info to the controller.""" + + while True: + try: + if self._push_multiplexed_replica_info: + get_global_client().record_multiplexed_replica_info( + MultiplexedReplicaInfo( + self._deployment_name, self._replica_tag, self.models.keys() + ) + ) + self._push_multiplexed_replica_info = False + except Exception as e: + logger.warning( + "Failed to push the multiplexed replica info " + f"to the controller. Error: {e}" + ) + await asyncio.sleep(PUSH_MULTIPLEXED_MODEL_IDS_INTERVAL_S) diff --git a/python/ray/serve/tests/test_metrics.py b/python/ray/serve/tests/test_metrics.py index f9e1ecf6b91c..15f393ecd30d 100644 --- a/python/ray/serve/tests/test_metrics.py +++ b/python/ray/serve/tests/test_metrics.py @@ -760,6 +760,49 @@ async def __call__(self): self.verify_metrics(histogram_metrics[0], expected_metrics) +def test_multiplexed_metrics(serve_start_shutdown): + """Tests multiplexed API corresponding metrics.""" + + @serve.deployment + class Model: + @serve.multiplexed(max_num_models_per_replica=2) + async def get_model(self, model_id: str): + return model_id + + async def __call__(self, model_id: str): + await self.get_model(model_id) + return + + handle = serve.run(Model.bind(), name="app", route_prefix="/app") + handle.remote("model1") + handle.remote("model2") + # Trigger model eviction. + handle.remote("model3") + expected_metrics = [ + "serve_multiplexed_model_load_latency_s", + "serve_multiplexed_model_unload_latency_s", + "serve_num_multiplexed_models", + "serve_multiplexed_models_load_counter", + "serve_multiplexed_models_unload_counter", + ] + + def verify_metrics(): + try: + resp = requests.get("http://127.0.0.1:9999").text + # Requests will fail if we are crashing the controller + except requests.ConnectionError: + return False + for metric in expected_metrics: + assert metric in resp + return True + + wait_for_condition( + verify_metrics, + timeout=20, + retry_interval_ms=1000, + ) + + def test_actor_summary(serve_instance): @serve.deployment def f(): diff --git a/python/ray/serve/tests/test_multiplex.py b/python/ray/serve/tests/test_multiplex.py index c435c1523925..405f7e448c40 100644 --- a/python/ray/serve/tests/test_multiplex.py +++ b/python/ray/serve/tests/test_multiplex.py @@ -1,13 +1,36 @@ import pytest +from typing import List import ray from ray import serve from ray.serve.multiplex import _ModelMultiplexWrapper +from ray.serve.context import get_internal_replica_context +from ray._private.test_utils import async_wait_for_condition, wait_for_condition +from ray.serve._private.common import RunningReplicaInfo + + +@pytest.fixture() +def start_serve_with_context(): + serve.start() + ray.serve.context._set_internal_replica_context( + "fake_deployment", "fake_replica_tag", None, None, None + ) + yield + serve.shutdown() class TestMultiplexWrapper: + def test_failed_to_get_replica_context(self): + async def model_load_func(model_id: str): + return model_id + + with pytest.raises( + RuntimeError, match="Fail to retrieve serve replica context" + ): + _ModelMultiplexWrapper(model_load_func, None, max_num_models_per_replica=2) + @pytest.mark.asyncio - async def test_multiplex_wrapper(self): + async def test_multiplex_wrapper(self, start_serve_with_context): """Test multiplex wrapper with LRU caching.""" async def model_load_func(model_id: str): @@ -16,27 +39,42 @@ async def model_load_func(model_id: str): multiplexer = _ModelMultiplexWrapper( model_load_func, None, max_num_models_per_replica=2 ) + + # Check the replica info pushed + def check_info_pushed(): + return multiplexer._push_multiplexed_replica_info is False + # Load model1 await multiplexer.load_model("1") assert multiplexer.models == {"1": "1"} + assert multiplexer._push_multiplexed_replica_info + await async_wait_for_condition(check_info_pushed) + # Load model2 await multiplexer.load_model("2") assert multiplexer.models == {"1": "1", "2": "2"} + assert multiplexer._push_multiplexed_replica_info + await async_wait_for_condition(check_info_pushed) # Load model3, model1 should be unloaded await multiplexer.load_model("3") assert multiplexer.models == {"2": "2", "3": "3"} + assert multiplexer._push_multiplexed_replica_info + await async_wait_for_condition(check_info_pushed) # reload model2, model2 should be moved to the end of the LRU cache + # _push_multiplexed_replica_info should be False. await multiplexer.load_model("2") assert multiplexer.models == {"3": "3", "2": "2"} + assert multiplexer._push_multiplexed_replica_info is False # Load model4, model3 should be unloaded await multiplexer.load_model("4") + assert multiplexer._push_multiplexed_replica_info assert multiplexer.models == {"2": "2", "4": "4"} @pytest.mark.asyncio - async def test_bad_call_multiplexed_func(self): + async def test_bad_call_multiplexed_func(self, start_serve_with_context): """Test bad call to multiplexed function""" async def model_load_func(model_id: str): @@ -51,7 +89,7 @@ async def model_load_func(model_id: str): await multiplexer.load_model() @pytest.mark.asyncio - async def test_unload_model_call_del(self): + async def test_unload_model_call_del(self, start_serve_with_context): class MyModel: def __init__(self, model_id): self.model_id = model_id @@ -157,6 +195,71 @@ def test_get_multiplexed_model_id(self): assert serve.get_multiplexed_model_id() == "1" +def test_multiplexed_replica_info(): + """Test MultiplexedReplicaInfo is passed to the controller & router""" + + @serve.deployment + class MyModel: + @serve.multiplexed(max_num_models_per_replica=2) + async def get_model(self, model_id: str): + return + + async def __call__(self, model_id: str): + _ = await self.get_model(model_id) + context = get_internal_replica_context() + return (context.deployment, context.replica_tag) + + handle = serve.run(MyModel.bind()) + deployment, replica_tag = ray.get(handle.remote("model1")) + + def check_replica_information( + replicas: List[RunningReplicaInfo], + deployment: str, + replica_tag: str, + model_ids: List[str], + ): + for replica in replicas: + assert replica.deployment_name == deployment + assert replica.replica_tag == replica_tag + assert list(replica.multiplexed_model_ids) == model_ids + return True + + wait_for_condition( + check_replica_information, + replicas=handle.router._replica_set.in_flight_queries.keys(), + deployment=deployment, + replica_tag=replica_tag, + model_ids=[ + "model1", + ], + ) + + ray.get(handle.remote("model2")) + wait_for_condition( + check_replica_information, + replicas=handle.router._replica_set.in_flight_queries.keys(), + deployment=deployment, + replica_tag=replica_tag, + model_ids=[ + "model1", + "model2", + ], + ) + + # LRU remove the model1 + ray.get(handle.remote("model3")) + wait_for_condition( + check_replica_information, + replicas=handle.router._replica_set.in_flight_queries.keys(), + deployment=deployment, + replica_tag=replica_tag, + model_ids=[ + "model2", + "model3", + ], + ) + + if __name__ == "__main__": import sys From 5911cbf5e6654046c077f65838d2dac03bd2ebdf Mon Sep 17 00:00:00 2001 From: Ricky Xu Date: Wed, 17 May 2023 06:12:25 +0800 Subject: [PATCH 421/424] [core][state] Move state API out of experimental (#35318) This is part of effort to make state API no longer experimental: Move everything under ray/experimental/state into ray/util/state Declare state API's python SDK to be DeveloperAPI, CLIs commands to be Stable. Make all imports from ray.experimental.state.api to ray.util.state: from ray.util.state import list_tasks # works ... Forward importing from ray.experimental.state to ray.utils.state such that existing users will work. from ray.experimental.state.api import list_tasks # old way works, with warning Add warning and telemetry for the deprecating import path (ray.experimental.state) --- dashboard/modules/event/tests/test_event.py | 2 +- dashboard/modules/job/tests/test_job_agent.py | 2 +- dashboard/modules/job/tests/test_sdk.py | 2 +- dashboard/modules/log/log_manager.py | 6 +- .../modules/serve/tests/test_serve_agent.py | 2 +- dashboard/modules/state/state_head.py | 8 +- dashboard/state_aggregator.py | 6 +- dashboard/tests/test_dashboard.py | 6 +- doc/source/ray-contribute/stability.rst | 2 + .../ray-observability/api/state/api.rst | 70 +- .../ray-observability/api/state/cli.rst | 12 +- .../ray-observability/state/state-api.rst | 156 +- python/ray/_private/state_api_test_utils.py | 4 +- python/ray/data/tests/test_formats.py | 2 +- python/ray/experimental/state/api.py | 1431 +-------------- python/ray/experimental/state/common.py | 1607 +---------------- python/ray/experimental/state/custom_types.py | 102 +- python/ray/experimental/state/exception.py | 33 +- python/ray/experimental/state/state_cli.py | 1310 +------------- .../ray/experimental/state/state_manager.py | 459 +---- python/ray/experimental/state/util.py | 49 +- python/ray/scripts/scripts.py | 2 +- .../serve/tests/test_autoscaling_policy.py | 2 +- python/ray/serve/tests/test_cli.py | 4 +- .../serve/tests/test_controller_recovery.py | 3 +- python/ray/serve/tests/test_metrics.py | 3 +- python/ray/serve/tests/test_standalone.py | 2 +- python/ray/serve/tests/test_standalone2.py | 5 +- python/ray/tests/test_actor_advanced.py | 4 +- python/ray/tests/test_actor_state_metrics.py | 2 +- python/ray/tests/test_cancel.py | 3 +- python/ray/tests/test_cli.py | 3 +- python/ray/tests/test_client_builder.py | 2 +- python/ray/tests/test_exit_observability.py | 2 +- python/ray/tests/test_failure_4.py | 2 +- python/ray/tests/test_memory_pressure.py | 2 +- python/ray/tests/test_metrics_agent.py | 2 +- python/ray/tests/test_node_manager.py | 4 +- python/ray/tests/test_out_of_disk_space.py | 2 +- python/ray/tests/test_state_api.py | 18 +- python/ray/tests/test_state_api_2.py | 27 +- python/ray/tests/test_state_api_log.py | 12 +- python/ray/tests/test_state_api_summary.py | 8 +- python/ray/tests/test_task_events.py | 4 +- python/ray/tests/test_task_events_2.py | 5 +- python/ray/util/state/__init__.py | 50 + python/ray/util/state/api.py | 1443 +++++++++++++++ python/ray/util/state/common.py | 1605 ++++++++++++++++ python/ray/util/state/custom_types.py | 100 + python/ray/util/state/exception.py | 18 + python/ray/util/state/state_cli.py | 1308 ++++++++++++++ python/ray/util/state/state_manager.py | 457 +++++ python/ray/util/state/util.py | 61 + .../benchmarks/distributed/dashboard_test.py | 2 +- .../benchmarks/distributed/test_many_tasks.py | 2 +- .../stress_tests/test_state_api_scale.py | 2 +- .../test_state_api_with_other_tests.py | 5 +- .../algorithms/tests/test_worker_failures.py | 2 +- rllib/tests/test_node_failure.py | 2 +- rllib/utils/tests/test_actor_manager.py | 2 +- src/ray/protobuf/usage.proto | 2 + 61 files changed, 5285 insertions(+), 5170 deletions(-) create mode 100644 python/ray/util/state/__init__.py create mode 100644 python/ray/util/state/api.py create mode 100644 python/ray/util/state/common.py create mode 100644 python/ray/util/state/custom_types.py create mode 100644 python/ray/util/state/exception.py create mode 100644 python/ray/util/state/state_cli.py create mode 100644 python/ray/util/state/state_manager.py create mode 100644 python/ray/util/state/util.py diff --git a/dashboard/modules/event/tests/test_event.py b/dashboard/modules/event/tests/test_event.py index c0d7e79fd230..07bae50ddea3 100644 --- a/dashboard/modules/event/tests/test_event.py +++ b/dashboard/modules/event/tests/test_event.py @@ -17,7 +17,7 @@ import numpy as np import ray -from ray.experimental.state.api import list_cluster_events +from ray.util.state import list_cluster_events from ray._private.utils import binary_to_hex from ray.cluster_utils import AutoscalingCluster from ray._private.event.event_logger import get_event_logger diff --git a/dashboard/modules/job/tests/test_job_agent.py b/dashboard/modules/job/tests/test_job_agent.py index 159577d02544..910e740923a5 100644 --- a/dashboard/modules/job/tests/test_job_agent.py +++ b/dashboard/modules/job/tests/test_job_agent.py @@ -33,7 +33,7 @@ ) from ray.dashboard.tests.conftest import * # noqa from ray.runtime_env.runtime_env import RuntimeEnv, RuntimeEnvConfig -from ray.experimental.state.api import list_nodes +from ray.util.state import list_nodes from ray.job_submission import JobStatus, JobSubmissionClient from ray.tests.conftest import _ray_start from ray.dashboard.modules.job.job_head import JobAgentSubmissionClient diff --git a/dashboard/modules/job/tests/test_sdk.py b/dashboard/modules/job/tests/test_sdk.py index 516b87468edd..ae30c2be7f42 100644 --- a/dashboard/modules/job/tests/test_sdk.py +++ b/dashboard/modules/job/tests/test_sdk.py @@ -27,7 +27,7 @@ from ray.tests.conftest import _ray_start import ray import ray.experimental.internal_kv as kv -from ray.experimental.state.api import list_nodes +from ray.util.state import list_nodes def _check_job_succeeded(client: JobSubmissionClient, job_id: str) -> bool: diff --git a/dashboard/modules/log/log_manager.py b/dashboard/modules/log/log_manager.py index b5533903eb9b..cfdea8f25840 100644 --- a/dashboard/modules/log/log_manager.py +++ b/dashboard/modules/log/log_manager.py @@ -5,13 +5,13 @@ from typing import List, Optional, Dict, AsyncIterable, Tuple, Callable from ray.dashboard.modules.job.common import JOB_LOGS_PATH_TEMPLATE -from ray.experimental.state.common import ( +from ray.util.state.common import ( GetLogOptions, protobuf_to_task_state_dict, DEFAULT_RPC_TIMEOUT, ) -from ray.experimental.state.exception import DataSourceUnavailable -from ray.experimental.state.state_manager import StateDataSourceClient +from ray.util.state.exception import DataSourceUnavailable +from ray.util.state.state_manager import StateDataSourceClient # TODO(sang): Remove the usage of this class. from ray.dashboard.datacenter import DataSource diff --git a/dashboard/modules/serve/tests/test_serve_agent.py b/dashboard/modules/serve/tests/test_serve_agent.py index 9ef184594b88..6f2409e71b57 100644 --- a/dashboard/modules/serve/tests/test_serve_agent.py +++ b/dashboard/modules/serve/tests/test_serve_agent.py @@ -10,7 +10,7 @@ from ray import serve from ray._private.test_utils import wait_for_condition import ray._private.ray_constants as ray_constants -from ray.experimental.state.api import list_actors +from ray.util.state import list_actors from ray.serve._private.constants import SERVE_NAMESPACE, MULTI_APP_MIGRATION_MESSAGE from ray.serve.tests.conftest import * # noqa: F401 F403 from ray.serve.schema import ServeInstanceDetails diff --git a/dashboard/modules/state/state_head.py b/dashboard/modules/state/state_head.py index df042e9c0ad0..8ac6bd8c9da5 100644 --- a/dashboard/modules/state/state_head.py +++ b/dashboard/modules/state/state_head.py @@ -20,7 +20,7 @@ from ray.dashboard.optional_utils import rest_response from ray.dashboard.state_aggregator import StateAPIManager from ray.dashboard.utils import Change -from ray.experimental.state.common import ( +from ray.util.state.common import ( RAY_MAX_LIMIT_FROM_API_SERVER, ListApiOptions, GetLogOptions, @@ -32,9 +32,9 @@ DEFAULT_LIMIT, DEFAULT_LOG_LIMIT, ) -from ray.experimental.state.exception import DataSourceUnavailable -from ray.experimental.state.state_manager import StateDataSourceClient -from ray.experimental.state.util import convert_string_to_type +from ray.util.state.exception import DataSourceUnavailable +from ray.util.state.state_manager import StateDataSourceClient +from ray.util.state.util import convert_string_to_type logger = logging.getLogger(__name__) diff --git a/dashboard/state_aggregator.py b/dashboard/state_aggregator.py index 46b8987cfd68..d3e978f87b62 100644 --- a/dashboard/state_aggregator.py +++ b/dashboard/state_aggregator.py @@ -12,7 +12,7 @@ import ray.dashboard.memory_utils as memory_utils -from ray.experimental.state.common import ( +from ray.util.state.common import ( protobuf_message_to_dict, ActorState, JobState, @@ -38,12 +38,12 @@ PredicateType, protobuf_to_task_state_dict, ) -from ray.experimental.state.state_manager import ( +from ray.util.state.state_manager import ( DataSourceUnavailable, StateDataSourceClient, ) from ray.runtime_env import RuntimeEnv -from ray.experimental.state.util import convert_string_to_type +from ray.util.state.util import convert_string_to_type logger = logging.getLogger(__name__) diff --git a/dashboard/tests/test_dashboard.py b/dashboard/tests/test_dashboard.py index 3b8937fd7956..50496c32ab43 100644 --- a/dashboard/tests/test_dashboard.py +++ b/dashboard/tests/test_dashboard.py @@ -39,9 +39,9 @@ import ray.scripts.scripts as scripts from ray.dashboard import dashboard from ray.dashboard.head import DashboardHead -from ray.experimental.state.api import StateApiClient -from ray.experimental.state.common import ListApiOptions, StateResource -from ray.experimental.state.exception import ServerUnavailable +from ray.util.state import StateApiClient +from ray.util.state.common import ListApiOptions, StateResource +from ray.util.state.exception import ServerUnavailable from ray.experimental.internal_kv import _initialize_internal_kv from unittest.mock import MagicMock from ray.dashboard.utils import DashboardHeadModule diff --git a/doc/source/ray-contribute/stability.rst b/doc/source/ray-contribute/stability.rst index 136ac4aa44d6..a4676b93e1f9 100644 --- a/doc/source/ray-contribute/stability.rst +++ b/doc/source/ray-contribute/stability.rst @@ -42,6 +42,8 @@ but **may** include backwards-incompatible changes to beta components. Backwards-incompatible changes **must** be made only after a reasonable deprecation period to provide users with an opportunity to migrate their code. +.. _api-stability-stable: + Stable ~~~~~~ diff --git a/doc/source/ray-observability/api/state/api.rst b/doc/source/ray-observability/api/state/api.rst index eda123718a72..5056422e5bd7 100644 --- a/doc/source/ray-observability/api/state/api.rst +++ b/doc/source/ray-observability/api/state/api.rst @@ -22,9 +22,9 @@ Summary APIs .. autosummary:: :toctree: doc/ - ray.experimental.state.api.summarize_actors - ray.experimental.state.api.summarize_objects - ray.experimental.state.api.summarize_tasks + ray.util.state.summarize_actors + ray.util.state.summarize_objects + ray.util.state.summarize_tasks List APIs ~~~~~~~~~~ @@ -32,14 +32,14 @@ List APIs .. autosummary:: :toctree: doc/ - ray.experimental.state.api.list_actors - ray.experimental.state.api.list_placement_groups - ray.experimental.state.api.list_nodes - ray.experimental.state.api.list_jobs - ray.experimental.state.api.list_workers - ray.experimental.state.api.list_tasks - ray.experimental.state.api.list_objects - ray.experimental.state.api.list_runtime_envs + ray.util.state.list_actors + ray.util.state.list_placement_groups + ray.util.state.list_nodes + ray.util.state.list_jobs + ray.util.state.list_workers + ray.util.state.list_tasks + ray.util.state.list_objects + ray.util.state.list_runtime_envs Get APIs ~~~~~~~~~ @@ -47,12 +47,12 @@ Get APIs .. autosummary:: :toctree: doc/ - ray.experimental.state.api.get_actor - ray.experimental.state.api.get_placement_group - ray.experimental.state.api.get_node - ray.experimental.state.api.get_worker - ray.experimental.state.api.get_task - ray.experimental.state.api.get_objects + ray.util.state.get_actor + ray.util.state.get_placement_group + ray.util.state.get_node + ray.util.state.get_worker + ray.util.state.get_task + ray.util.state.get_objects Log APIs ~~~~~~~~ @@ -60,8 +60,8 @@ Log APIs .. autosummary:: :toctree: doc/ - ray.experimental.state.api.list_logs - ray.experimental.state.api.get_log + ray.util.state.list_logs + ray.util.state.get_log .. _state-api-schema: @@ -72,21 +72,21 @@ State APIs Schema :toctree: doc/ :template: autosummary/class_without_autosummary.rst - ray.experimental.state.common.ActorState - ray.experimental.state.common.TaskState - ray.experimental.state.common.NodeState - ray.experimental.state.common.PlacementGroupState - ray.experimental.state.common.WorkerState - ray.experimental.state.common.ObjectState - ray.experimental.state.common.RuntimeEnvState - ray.experimental.state.common.JobState - ray.experimental.state.common.StateSummary - ray.experimental.state.common.TaskSummaries - ray.experimental.state.common.TaskSummaryPerFuncOrClassName - ray.experimental.state.common.ActorSummaries - ray.experimental.state.common.ActorSummaryPerClass - ray.experimental.state.common.ObjectSummaries - ray.experimental.state.common.ObjectSummaryPerKey + ray.util.state.common.ActorState + ray.util.state.common.TaskState + ray.util.state.common.NodeState + ray.util.state.common.PlacementGroupState + ray.util.state.common.WorkerState + ray.util.state.common.ObjectState + ray.util.state.common.RuntimeEnvState + ray.util.state.common.JobState + ray.util.state.common.StateSummary + ray.util.state.common.TaskSummaries + ray.util.state.common.TaskSummaryPerFuncOrClassName + ray.util.state.common.ActorSummaries + ray.util.state.common.ActorSummaryPerClass + ray.util.state.common.ObjectSummaries + ray.util.state.common.ObjectSummaryPerKey State APIs Exceptions --------------------- @@ -94,4 +94,4 @@ State APIs Exceptions .. autosummary:: :toctree: doc/ - ray.experimental.state.exception.RayStateApiException + ray.util.state.exception.RayStateApiException diff --git a/doc/source/ray-observability/api/state/cli.rst b/doc/source/ray-observability/api/state/cli.rst index 7b38592eb795..e12dfc45fdb5 100644 --- a/doc/source/ray-observability/api/state/cli.rst +++ b/doc/source/ray-observability/api/state/cli.rst @@ -13,19 +13,19 @@ This section contains commands to access the :ref:`live state of Ray resources ( State CLI allows users to access the state of various resources (e.g., actor, task, object). -.. click:: ray.experimental.state.state_cli:task_summary +.. click:: ray.util.state.state_cli:task_summary :prog: ray summary tasks -.. click:: ray.experimental.state.state_cli:actor_summary +.. click:: ray.util.state.state_cli:actor_summary :prog: ray summary actors -.. click:: ray.experimental.state.state_cli:object_summary +.. click:: ray.util.state.state_cli:object_summary :prog: ray summary objects -.. click:: ray.experimental.state.state_cli:ray_list +.. click:: ray.util.state.state_cli:ray_list :prog: ray list -.. click:: ray.experimental.state.state_cli:ray_get +.. click:: ray.util.state.state_cli:ray_get :prog: ray get .. _ray-logs-api-cli-ref: @@ -41,5 +41,5 @@ This section contains commands to :ref:`access logs ` from Ra Log CLI allows users to access the log from the cluster. Note that only the logs from alive nodes are available through this API. -.. click:: ray.experimental.state.state_cli:logs_state_cli_group +.. click:: ray.util.state.state_cli:logs_state_cli_group :prog: ray logs \ No newline at end of file diff --git a/doc/source/ray-observability/state/state-api.rst b/doc/source/ray-observability/state/state-api.rst index 72096718a8f2..33f5bfa4f2d9 100644 --- a/doc/source/ray-observability/state/state-api.rst +++ b/doc/source/ray-observability/state/state-api.rst @@ -5,11 +5,15 @@ Monitoring Ray States .. tip:: We'd love to hear your feedback on using Ray state APIs - `feedback form `_! -Ray state APIs allow users to conveniently access the current state (snapshot) of Ray through CLI or Python SDK. +Ray state APIs allow users to conveniently access the current state (snapshot) of Ray through CLI or Python SDK (developer APIs). .. note:: - APIs are :ref:`alpha `. This feature requires a full installation of Ray using ``pip install "ray[default]"``. This feature also requires the dashboard component to be available. The dashboard component needs to be included when starting the ray cluster, which is the default behavior for ``ray start`` and ``ray.init()``. For more in-depth debugging, you could check the dashboard log at ``/dashboard.log``, which is usually ``/tmp/ray/session_latest/logs/dashboard.log``. + This feature requires a full installation of Ray using ``pip install "ray[default]"``. This feature also requires the dashboard component to be available. The dashboard component needs to be included when starting the Ray cluster, which is the default behavior for ``ray start`` and ``ray.init()``. For more in-depth debugging, check the dashboard log at ``/dashboard.log``, which is usually ``/tmp/ray/session_latest/logs/dashboard.log``. + +.. note:: + + State API CLI commands are :ref:`stable `, while python SDKs are :ref:`DeveloperAPI `. CLI usage is recommended over Python SDKs. Getting Started --------------- @@ -45,17 +49,17 @@ Now, let's see the summarized states of tasks. If it doesn't return the output i .. tabs:: - .. group-tab:: CLI + .. group-tab:: CLI (Recommended) .. code-block:: bash ray summary tasks - .. group-tab:: Python SDK + .. group-tab:: Python SDK (Internal Developer API) .. code-block:: python - from ray.experimental.state.api import summarize_tasks + from ray.util.state import summarize_tasks print(summarize_tasks()) .. code-block:: text @@ -78,17 +82,17 @@ Let's list all actors. .. tabs:: - .. group-tab:: CLI + .. group-tab:: CLI (Recommended) .. code-block:: bash ray list actors - .. group-tab:: Python SDK + .. group-tab:: Python SDK (Internal Developer API) .. code-block:: python - from ray.experimental.state.api import list_actors + from ray.util.state import list_actors print(list_actors()) .. code-block:: text @@ -108,18 +112,18 @@ You can get the state of a single task using the get API. .. tabs:: - .. group-tab:: CLI + .. group-tab:: CLI (Recommended) .. code-block:: bash # In this case, 31405554844820381c2f0f8501000000 ray get actors - .. group-tab:: Python SDK + .. group-tab:: Python SDK (Internal Developer API) .. code-block:: python - from ray.experimental.state.api import get_actor + from ray.util.state import get_actor # In this case, 31405554844820381c2f0f8501000000 print(get_actor(id=)) @@ -141,7 +145,7 @@ You can also access logs through ``ray logs`` API. .. tabs:: - .. group-tab:: CLI + .. group-tab:: CLI (Recommended) .. code-block:: bash @@ -149,11 +153,11 @@ You can also access logs through ``ray logs`` API. # In this case, ACTOR_ID is 31405554844820381c2f0f8501000000 ray logs actor --id - .. group-tab:: Python SDK + .. group-tab:: Python SDK (Internal Developer API) .. code-block:: python - from ray.experimental.state.api import get_log + from ray.util.state import get_log # In this case, ACTOR_ID is 31405554844820381c2f0f8501000000 for line in get_log(actor_id=): @@ -190,17 +194,17 @@ E.g., Summarize all actors .. tabs:: - .. group-tab:: CLI + .. group-tab:: CLI (Recommended) .. code-block:: bash ray summary actors - .. group-tab:: Python SDK + .. group-tab:: Python SDK (Internal Developer API) .. code-block:: python - from ray.experimental.state.api import summarize_actors + from ray.util.state import summarize_actors print(summarize_actors()) E.g., Summarize all tasks @@ -208,17 +212,17 @@ E.g., Summarize all tasks .. tabs:: - .. group-tab:: CLI + .. group-tab:: CLI (Recommended) .. code-block:: bash ray summary tasks - .. group-tab:: Python SDK + .. group-tab:: Python SDK (Internal Developer API) .. code-block:: python - from ray.experimental.state.api import summarize_tasks + from ray.util.state import summarize_tasks print(summarize_tasks()) E.g., Summarize all objects @@ -232,17 +236,17 @@ E.g., Summarize all objects .. tabs:: - .. group-tab:: CLI + .. group-tab:: CLI (Recommended) .. code-block:: bash ray summary objects - .. group-tab:: Python SDK + .. group-tab:: Python SDK (Internal Developer API) .. code-block:: python - from ray.experimental.state.api import summarize_objects + from ray.util.state import summarize_objects print(summarize_objects()) List @@ -250,31 +254,31 @@ List Get a list of resources, possible resources include: -- :ref:`Actors `, e.g., actor id, state, pid, death_cause. (:class:`output schema `) -- :ref:`Tasks `, e.g., name, scheduling state, type, runtime env info (:class:`output schema `) -- :ref:`Objects `, e.g., object id, callsites, reference types. (:class:`output schema `) -- :ref:`Jobs `, e.g., start/end time, entrypoint, status. (:class:`output schema `) -- :ref:`Placement Groups `, e.g., name, bundles, stats. (:class:`output schema `) -- Nodes (Ray worker nodes), e.g., node id, node ip, node state. (:class:`output schema `) -- Workers (Ray worker processes), e.g., worker id, type, exit type and details. (:class:`output schema `) -- :ref:`Runtime environments `, e.g., runtime envs, creation time, nodes (:class:`output schema `) +- :ref:`Actors `, e.g., actor id, state, pid, death_cause. (:class:`output schema `) +- :ref:`Tasks `, e.g., name, scheduling state, type, runtime env info (:class:`output schema `) +- :ref:`Objects `, e.g., object id, callsites, reference types. (:class:`output schema `) +- :ref:`Jobs `, e.g., start/end time, entrypoint, status. (:class:`output schema `) +- :ref:`Placement Groups `, e.g., name, bundles, stats. (:class:`output schema `) +- Nodes (Ray worker nodes), e.g., node id, node ip, node state. (:class:`output schema `) +- Workers (Ray worker processes), e.g., worker id, type, exit type and details. (:class:`output schema `) +- :ref:`Runtime environments `, e.g., runtime envs, creation time, nodes (:class:`output schema `) E.g., List all nodes ~~~~~~~~~~~~~~~~~~~~~ .. tabs:: - .. group-tab:: CLI + .. group-tab:: CLI (Recommended) .. code-block:: bash ray list nodes - .. group-tab:: Python SDK + .. group-tab:: Python SDK (Internal Developer API) .. code-block:: python - from ray.experimental.state.api import list_nodes() + from ray.util.state import list_nodes() list_nodes() E.g., List all placement groups @@ -282,17 +286,17 @@ E.g., List all placement groups .. tabs:: - .. group-tab:: CLI + .. group-tab:: CLI (Recommended) .. code-block:: bash ray list placement-groups - .. group-tab:: Python SDK + .. group-tab:: Python SDK (Internal Developer API) .. code-block:: python - from ray.experimental.state.api import list_placement_groups + from ray.util.state import list_placement_groups list_placement_groups() @@ -303,17 +307,17 @@ E.g., List local referenced objects created by a process .. tabs:: - .. group-tab:: CLI + .. group-tab:: CLI (Recommended) .. code-block:: bash ray list objects -f pid= -f reference_type=LOCAL_REFERENCE - .. group-tab:: Python SDK + .. group-tab:: Python SDK (Internal Developer API) .. code-block:: python - from ray.experimental.state.api import list_objects + from ray.util.state import list_objects list_objects(filters=[("pid", "=", ), ("reference_type", "=", "LOCAL_REFERENCE")]) E.g., List alive actors @@ -321,17 +325,17 @@ E.g., List alive actors .. tabs:: - .. group-tab:: CLI + .. group-tab:: CLI (Recommended) .. code-block:: bash ray list actors -f state=ALIVE - .. group-tab:: Python SDK + .. group-tab:: Python SDK (Internal Developer API) .. code-block:: python - from ray.experimental.state.api import list_actors + from ray.util.state import list_actors list_actors(filters=[("state", "=", "ALIVE")]) E.g., List running tasks @@ -339,17 +343,17 @@ E.g., List running tasks .. tabs:: - .. group-tab:: CLI + .. group-tab:: CLI (Recommended) .. code-block:: bash ray list tasks -f state=RUNNING - .. group-tab:: Python SDK + .. group-tab:: Python SDK (Internal Developer API) .. code-block:: python - from ray.experimental.state.api import list_tasks + from ray.util.state import list_tasks list_tasks(filters=[("state", "=", "RUNNING")]) E.g., List non-running tasks @@ -357,17 +361,17 @@ E.g., List non-running tasks .. tabs:: - .. group-tab:: CLI + .. group-tab:: CLI (Recommended) .. code-block:: bash ray list tasks -f state!=RUNNING - .. group-tab:: Python SDK + .. group-tab:: Python SDK (Internal Developer API) .. code-block:: python - from ray.experimental.state.api import list_tasks + from ray.util.state import list_tasks list_tasks(filters=[("state", "!=", "RUNNING")]) E.g., List running tasks that have a name func @@ -375,17 +379,17 @@ E.g., List running tasks that have a name func .. tabs:: - .. group-tab:: CLI + .. group-tab:: CLI (Recommended) .. code-block:: bash ray list tasks -f state=RUNNING -f name="task_running_300_seconds()" - .. group-tab:: Python SDK + .. group-tab:: Python SDK (Internal Developer API) .. code-block:: python - from ray.experimental.state.api import list_tasks + from ray.util.state import list_tasks list_tasks(filters=[("state", "=", "RUNNING"), ("name", "=", "task_running_300_seconds()")]) E.g., List tasks with more details @@ -395,17 +399,17 @@ E.g., List tasks with more details .. tabs:: - .. group-tab:: CLI + .. group-tab:: CLI (Recommended) .. code-block:: bash ray list tasks --detail - .. group-tab:: Python SDK + .. group-tab:: Python SDK (Internal Developer API) .. code-block:: python - from ray.experimental.state.api import list_tasks + from ray.util.state import list_tasks list_tasks(detail=True) Get @@ -416,17 +420,17 @@ E.g., Get a task info .. tabs:: - .. group-tab:: CLI + .. group-tab:: CLI (Recommended) .. code-block:: bash ray get tasks - .. group-tab:: Python SDK + .. group-tab:: Python SDK (Internal Developer API) .. code-block:: python - from ray.experimental.state.api import get_task + from ray.util.state import get_task get_task(id=) E.g., Get a node info @@ -434,17 +438,17 @@ E.g., Get a node info .. tabs:: - .. group-tab:: CLI + .. group-tab:: CLI (Recommended) .. code-block:: bash ray get nodes - .. group-tab:: Python SDK + .. group-tab:: Python SDK (Internal Developer API) .. code-block:: python - from ray.experimental.state.api import get_node + from ray.util.state import get_node get_node(id=) Logs @@ -460,18 +464,18 @@ E.g., Get all retrievable log file names from a head node in a cluster .. tabs:: - .. group-tab:: CLI + .. group-tab:: CLI (Recommended) .. code-block:: bash ray logs cluster - .. group-tab:: Python SDK + .. group-tab:: Python SDK (Internal Developer API) .. code-block:: python # You could get the node id / node ip from `ray list nodes` - from ray.experimental.state.api import list_logs + from ray.util.state import list_logs # `ray logs` by default print logs from a head node. # So in order to list the same logs, you should provide the head node id. # You could get the node id / node ip from `ray list nodes` @@ -482,7 +486,7 @@ E.g., Get a particular log file from a node .. tabs:: - .. group-tab:: CLI + .. group-tab:: CLI (Recommended) .. code-block:: bash @@ -491,11 +495,11 @@ E.g., Get a particular log file from a node # `ray logs cluster` is alias to `ray logs` when querying with globs. ray logs gcs_server.out --node-id - .. group-tab:: Python SDK + .. group-tab:: Python SDK (Internal Developer API) .. code-block:: python - from ray.experimental.state.api import get_log + from ray.util.state import get_log # Node IP could be retrieved from list_nodes() or ray.nodes() for line in get_log(filename="gcs_server.out", node_id=): @@ -506,7 +510,7 @@ E.g., Stream a log file from a node .. tabs:: - .. group-tab:: CLI + .. group-tab:: CLI (Recommended) .. code-block:: bash @@ -516,11 +520,11 @@ E.g., Stream a log file from a node ray logs cluster raylet.out --node-ip --follow - .. group-tab:: Python SDK + .. group-tab:: Python SDK (Internal Developer API) .. code-block:: python - from ray.experimental.state.api import get_log + from ray.util.state import get_log # Node IP could be retrieved from list_nodes() or ray.nodes() # The loop will block with `follow=True` @@ -532,17 +536,17 @@ E.g., Stream log from an actor with actor id .. tabs:: - .. group-tab:: CLI + .. group-tab:: CLI (Recommended) .. code-block:: bash ray logs actor --id= --follow - .. group-tab:: Python SDK + .. group-tab:: Python SDK (Internal Developer API) .. code-block:: python - from ray.experimental.state.api import get_log + from ray.util.state import get_log # You could get the actor's ID from the output of `ray list actors`. # The loop will block with `follow=True` @@ -554,17 +558,17 @@ E.g., Stream log from a pid .. tabs:: - .. group-tab:: CLI + .. group-tab:: CLI (Recommended) .. code-block:: bash ray logs worker --pid= --follow - .. group-tab:: Python SDK + .. group-tab:: Python SDK (Internal Developer API) .. code-block:: python - from ray.experimental.state.api import get_log + from ray.util.state import get_log # Node IP could be retrieved from list_nodes() or ray.nodes() # You could get the pid of the worker running the actor easily when output diff --git a/python/ray/_private/state_api_test_utils.py b/python/ray/_private/state_api_test_utils.py index 090b025b8c84..9ce3d8a0e542 100644 --- a/python/ray/_private/state_api_test_utils.py +++ b/python/ray/_private/state_api_test_utils.py @@ -10,10 +10,10 @@ import time import traceback from typing import Callable, Dict, List, Optional, Tuple, Union -from ray.experimental.state.api import list_tasks +from ray.util.state import list_tasks import ray from ray.actor import ActorHandle -from ray.experimental.state.api import list_workers +from ray.util.state import list_workers @dataclass diff --git a/python/ray/data/tests/test_formats.py b/python/ray/data/tests/test_formats.py index 73e89fe84668..5790eff6a0a4 100644 --- a/python/ray/data/tests/test_formats.py +++ b/python/ray/data/tests/test_formats.py @@ -329,7 +329,7 @@ def test_get_read_tasks(shutdown_only): # Verify `_get_read_tasks` being executed on same node (head node). def verify_get_read_tasks(): - from ray.experimental.state.api import list_tasks + from ray.util.state import list_tasks task_states = list_tasks(filters=[("name", "=", "_get_read_tasks")]) # Verify only one task being executed on same node. diff --git a/python/ray/experimental/state/api.py b/python/ray/experimental/state/api.py index bf54aede53d5..f62f5f2ef7cd 100644 --- a/python/ray/experimental/state/api.py +++ b/python/ray/experimental/state/api.py @@ -1,1429 +1,4 @@ -import logging -import threading -import urllib -import warnings -from contextlib import contextmanager -from dataclasses import fields -from typing import Any, Dict, Generator, List, Optional, Tuple, Union +from ray.util.state import * # noqa: F401 F403 +from ray.util.state.util import record_deprecated_state_api_import -import requests - -from ray.dashboard.modules.dashboard_sdk import SubmissionClient -from ray.dashboard.utils import ( - get_address_for_submission_client, - ray_address_to_api_server_url, -) -from ray.experimental.state.common import ( - DEFAULT_LIMIT, - DEFAULT_RPC_TIMEOUT, - ActorState, - ClusterEventState, - GetApiOptions, - GetLogOptions, - JobState, - ListApiOptions, - NodeState, - ObjectState, - PlacementGroupState, - PredicateType, - RuntimeEnvState, - StateResource, - SummaryApiOptions, - SummaryResource, - SupportedFilterType, - TaskState, - WorkerState, - dict_to_state, -) -from ray.experimental.state.exception import RayStateApiException, ServerUnavailable - -logger = logging.getLogger(__name__) - - -@contextmanager -def warnings_on_slow_request( - *, address: str, endpoint: str, timeout: float, explain: bool -): - """A context manager to print warnings if the request is replied slowly. - - Warnings are printed 3 times - - Args: - address: The address of the endpoint. - endpoint: The name of the endpoint. - timeout: Request timeout in seconds. - explain: Whether ot not it will print the warning. - """ - # Do nothing if explain is not specified. - if not explain: - yield - return - - # Prepare timers to print warning. - # Print 3 times with exponential backoff. timeout / 2, timeout / 4, timeout / 8 - def print_warning(elapsed: float): - logger.info( - f"({round(elapsed, 2)} / {timeout} seconds) " - "Waiting for the response from the API server " - f"address {address}{endpoint}.", - ) - - warning_timers = [ - threading.Timer(timeout / i, print_warning, args=[timeout / i]) - for i in [2, 4, 8] - ] - - try: - for timer in warning_timers: - timer.start() - yield - finally: - # Make sure all timers are cancelled once request is terminated. - for timer in warning_timers: - timer.cancel() - - -""" -This file contains API client and methods for querying ray state. - -NOTE(rickyyx): This is still a work-in-progress API, and subject to changes. - -If you have any feedback, you could do so at either way as below: - 1. Report bugs/issues with details: https://forms.gle/gh77mwjEskjhN8G46 , - 2. Follow up in #ray-state-observability-dogfooding slack channel of Ray: - https://tinyurl.com/2pm26m4a" - - -Usage: - 1. [Recommended] With StateApiClient: - ``` - client = StateApiClient(address="auto") - data = client.list(StateResource.NODES) - ... - ``` - - 2. With SDK APIs: - The API creates a `StateApiClient` for each invocation. So if multiple - invocations of listing are used, it is better to reuse the `StateApiClient` - as suggested above. - ``` - data = list_nodes(address="auto") - ``` -""" - - -class StateApiClient(SubmissionClient): - """State API Client issues REST GET requests to the server for resource states.""" - - def __init__( - self, - address: Optional[str] = None, - cookies: Optional[Dict[str, Any]] = None, - headers: Optional[Dict[str, Any]] = None, - ): - """Initialize a StateApiClient and check the connection to the cluster. - - Args: - address: Ray bootstrap address (e.g. `127.0.0.0:6379`, `auto`), or Ray - Client adress (e.g. `ray://:10001`), or Ray dashboard - address (e.g. `http://:8265`). - If not provided, it will be detected automatically from any running - local Ray cluster. - cookies: Cookies to use when sending requests to the HTTP job server. - headers: Headers to use when sending requests to the HTTP job server, used - for cases like authentication to a remote cluster. - """ - if requests is None: - raise RuntimeError( - "The Ray state CLI & SDK require the ray[default] " - "installation: `pip install 'ray[default']``" - ) - if not headers: - headers = {"Content-Type": "application/json"} - - # Resolve API server URL - api_server_url = get_address_for_submission_client(address) - - super().__init__( - address=api_server_url, - create_cluster_if_needed=False, - headers=headers, - cookies=cookies, - ) - - @classmethod - def _make_param(cls, options: Union[ListApiOptions, GetApiOptions]) -> Dict: - options_dict = {} - for field in fields(options): - # TODO(rickyyx): We will need to find a way to pass server side timeout - # TODO(rickyyx): We will have to convert filter option - # slightly differently for now. But could we do k,v pair rather than this? - # I see we are also converting dict to XXXApiOptions later on, we could - # probably organize the marshaling a bit better. - if field.name == "filters": - options_dict["filter_keys"] = [] - options_dict["filter_predicates"] = [] - options_dict["filter_values"] = [] - for filter in options.filters: - if len(filter) != 3: - raise ValueError( - f"The given filter has incorrect input type, {filter}. " - "Provide (key, predicate, value) tuples." - ) - filter_k, filter_predicate, filter_val = filter - options_dict["filter_keys"].append(filter_k) - options_dict["filter_predicates"].append(filter_predicate) - options_dict["filter_values"].append(filter_val) - continue - - option_val = getattr(options, field.name) - if option_val is not None: - options_dict[field.name] = option_val - - return options_dict - - def _make_http_get_request( - self, - endpoint: str, - params: Dict, - timeout: float, - _explain: bool = False, - ) -> Dict: - with warnings_on_slow_request( - address=self._address, endpoint=endpoint, timeout=timeout, explain=_explain - ): - # Send a request. - response = None - try: - response = self._do_request( - "GET", - endpoint, - timeout=timeout, - params=params, - ) - # If we have a valid JSON error, don't raise a generic exception but - # instead let the caller parse it to raise a more precise exception. - if ( - response.status_code == 500 - and "application/json" - not in response.headers.get("Content-Type", "") - ): - response.raise_for_status() - except requests.exceptions.RequestException as e: - err_str = f"Failed to make request to {self._address}{endpoint}. " - - # Best-effort to give hints to users on potential reasons of connection - # failure. - err_str += ( - "Failed to connect to API server. Please check the API server " - "log for details. Make sure dependencies are installed with " - "`pip install ray[default]`. Please also check dashboard is " - "available, and included when starting ray cluster, " - "i.e. `ray start --include-dashboard=True --head`. " - ) - if response is None: - raise ServerUnavailable(err_str) - - err_str += f"Response(url={response.url},status={response.status_code})" - raise RayStateApiException(err_str) from e - - # Process the response. - response = response.json() - if response["result"] is False: - raise RayStateApiException( - "API server internal error. See dashboard.log file for more details. " - f"Error: {response['msg']}" - ) - - # Dictionary of `ListApiResponse` or `SummaryApiResponse` - return response["data"]["result"] - - def get( - self, - resource: StateResource, - id: str, - options: Optional[GetApiOptions], - _explain: bool = False, - ) -> Optional[ - Union[ - ActorState, - PlacementGroupState, - NodeState, - WorkerState, - TaskState, - List[ObjectState], - JobState, - ] - ]: - """Get resources states by id - - Args: - resource_name: Resource names, i.e. 'workers', 'actors', 'nodes', - 'placement_groups', 'tasks', 'objects'. - 'jobs' and 'runtime-envs' are not supported yet. - id: ID for the resource, i.e. 'node_id' for nodes. - options: Get options. See `GetApiOptions` for details. - _explain: Print the API information such as API - latency or failed query information. - - Returns: - None if not found, and if found: - - ActorState for actors - - PlacementGroupState for placement groups - - NodeState for nodes - - WorkerState for workers - - TaskState for tasks - - JobState for jobs - - Empty list for objects if not found, or list of ObjectState for objects - - Raises: - This doesn't catch any exceptions raised when the underlying request - call raises exceptions. For example, it could raise `requests.Timeout` - when timeout occurs. - - ValueError: - if the resource could not be GET by id, i.e. jobs and runtime-envs. - - """ - # TODO(rickyyx): Make GET not using filters on list operation - params = self._make_param(options) - - RESOURCE_ID_KEY_NAME = { - StateResource.NODES: "node_id", - StateResource.ACTORS: "actor_id", - StateResource.PLACEMENT_GROUPS: "placement_group_id", - StateResource.WORKERS: "worker_id", - StateResource.TASKS: "task_id", - StateResource.OBJECTS: "object_id", - StateResource.JOBS: "submission_id", - } - if resource not in RESOURCE_ID_KEY_NAME: - raise ValueError(f"Can't get {resource.name} by id.") - - params["filter_keys"] = [RESOURCE_ID_KEY_NAME[resource]] - params["filter_predicates"] = ["="] - params["filter_values"] = [id] - params["detail"] = True - endpoint = f"/api/v0/{resource.value}" - - list_api_response = self._make_http_get_request( - endpoint=endpoint, - params=params, - timeout=options.timeout, - _explain=_explain, - ) - result = list_api_response["result"] - - # Empty result - if len(result) == 0: - return None - - result = [dict_to_state(d, resource) for d in result] - if resource == StateResource.OBJECTS: - # NOTE(rickyyx): - # There might be multiple object entries for a single object id - # because a single object could be referenced at different places - # e.g. pinned as local variable, used as parameter - return result - - if resource == StateResource.TASKS: - # There might be multiple task attempts given a task id due to - # task retries. - if len(result) == 1: - return result[0] - return result - - # For the rest of the resources, there should only be a single entry - # for a particular id. - assert len(result) == 1 - return result[0] - - def _print_api_warning( - self, - resource: StateResource, - api_response: dict, - warn_data_source_not_available: bool = True, - warn_data_truncation: bool = True, - warn_limit: bool = True, - warn_server_side_warnings: bool = True, - ): - """Print the API warnings. - - Args: - resource: Resource names, i.e. 'jobs', 'actors', 'nodes', - see `StateResource` for details. - api_response: The dictionarified `ListApiResponse` or `SummaryApiResponse`. - warn_data_source_not_available: Warn when some data sources - are not available. - warn_data_truncation: Warn when results were truncated at - the data source. - warn_limit: Warn when results were limited. - warn_server_side_warnings: Warn when the server side generates warnings - (E.g., when callsites not enabled for listing objects) - """ - # Print warnings if anything was given. - if warn_data_source_not_available: - warning_msgs = api_response.get("partial_failure_warning", None) - if warning_msgs: - warnings.warn(warning_msgs) - - if warn_data_truncation: - # Print warnings if data is truncated at the data source. - num_after_truncation = api_response["num_after_truncation"] - total = api_response["total"] - if total > num_after_truncation: - # NOTE(rickyyx): For now, there's not much users - # could do (neither can we), with hard truncation. - # Unless we allow users to set a higher - # `RAY_MAX_LIMIT_FROM_DATA_SOURCE`, the data will - # always be truncated at the data source. - warnings.warn( - ( - "The returned data may contain incomplete result. " - f"{num_after_truncation} ({total} total from the cluster) " - f"{resource.value} are retrieved from the data source. " - f"{total - num_after_truncation} entries have been truncated. " - f"Max of {num_after_truncation} entries are retrieved " - "from data source to prevent over-sized payloads." - ), - ) - - if warn_limit: - # Print warnings if return data is limited at the API server due to - # limit enforced at the server side - num_filtered = api_response["num_filtered"] - data = api_response["result"] - if num_filtered > len(data): - warnings.warn( - ( - f"Limit last {len(data)} entries " - f"(Total {num_filtered}). Use `--filter` to reduce " - "the amount of data to return or " - "setting a higher limit with `--limit` to see all data. " - ), - ) - - if warn_server_side_warnings: - # Print the additional warnings. - warnings_to_print = api_response.get("warnings", []) - if warnings_to_print: - for warning_to_print in warnings_to_print: - warnings.warn(warning_to_print) - - def _raise_on_missing_output(self, resource: StateResource, api_response: dict): - """Raise an exception when the API resopnse contains a missing output. - - Output can be missing if (1) Failures on some of data source queries (e.g., - `ray list tasks` queries all raylets, and if some of queries fail, it will - contain missing output. If all queries fail, it will just fail). (2) Data - is truncated because the output is too large. - - Args: - resource: Resource names, i.e. 'jobs', 'actors', 'nodes', - see `StateResource` for details. - api_response: The dictionarified `ListApiResponse` or `SummaryApiResponse`. - """ - # Raise an exception if there are partial failures that cause missing output. - warning_msgs = api_response.get("partial_failure_warning", None) - if warning_msgs: - raise RayStateApiException( - f"Failed to retrieve all {resource.value} from the cluster because" - "they are not reachable due to query failures to the data sources. " - "To avoid raising an exception and allow having missing output, " - "set `raise_on_missing_output=False`. " - ) - # Raise an exception is there is data truncation that cause missing output. - total = api_response["total"] - num_after_truncation = api_response["num_after_truncation"] - - if total != num_after_truncation: - raise RayStateApiException( - f"Failed to retrieve all {resource.value} from the cluster because " - "they are not reachable due to data truncation. It happens " - "when the returned data is too large " - # When the data is truncated, the truncation - # threshold == num_after_truncation. We cannot set this to env - # var because the CLI side might not have the correct env var. - f"(> {num_after_truncation}) " - "To avoid raising an exception and allow having missing output, " - "set `raise_on_missing_output=False`. " - ) - - def list( - self, - resource: StateResource, - options: ListApiOptions, - raise_on_missing_output: bool, - _explain: bool = False, - ) -> List[ - Union[ - ActorState, - JobState, - NodeState, - TaskState, - ObjectState, - PlacementGroupState, - RuntimeEnvState, - WorkerState, - ClusterEventState, - ] - ]: - """List resources states - - Args: - resource: Resource names, i.e. 'jobs', 'actors', 'nodes', - see `StateResource` for details. - options: List options. See `ListApiOptions` for details. - raise_on_missing_output: When True, raise an exception if the output - is incomplete. Output can be incomplete if - (1) there's a partial network failure when the source is distributed. - (2) data is truncated because it is too large. - Set it to False to avoid throwing an exception on missing data. - _explain: Print the API information such as API - latency or failed query information. - - Returns: - A list of queried result from `ListApiResponse`, - - Raises: - This doesn't catch any exceptions raised when the underlying request - call raises exceptions. For example, it could raise `requests.Timeout` - when timeout occurs. - - """ - - endpoint = f"/api/v0/{resource.value}" - params = self._make_param(options) - list_api_response = self._make_http_get_request( - endpoint=endpoint, - params=params, - timeout=options.timeout, - _explain=_explain, - ) - if raise_on_missing_output: - self._raise_on_missing_output(resource, list_api_response) - if _explain: - self._print_api_warning(resource, list_api_response) - return [dict_to_state(d, resource) for d in list_api_response["result"]] - - def summary( - self, - resource: SummaryResource, - *, - options: SummaryApiOptions, - raise_on_missing_output: bool, - _explain: bool = False, - ) -> Dict: - """Summarize resources states - - Args: - resource_name: Resource names, - see `SummaryResource` for details. - options: summary options. See `SummaryApiOptions` for details. - raise_on_missing_output: Raise an exception if the output has missing data. - Output can have missing data if (1) there's a partial network failure - when the source is distributed. (2) data is truncated - because it is too large. - _explain: Print the API information such as API - latency or failed query information. - - Returns: - A dictionary of queried result from `SummaryApiResponse`. - - Raises: - This doesn't catch any exceptions raised when the underlying request - call raises exceptions. For example, it could raise `requests.Timeout` - when timeout occurs. - """ - params = {"timeout": options.timeout} - endpoint = f"/api/v0/{resource.value}/summarize" - summary_api_response = self._make_http_get_request( - endpoint=endpoint, - params=params, - timeout=options.timeout, - _explain=_explain, - ) - if raise_on_missing_output: - self._raise_on_missing_output(resource, summary_api_response) - if _explain: - # There's no limit applied to summary, so we shouldn't warn. - self._print_api_warning(resource, summary_api_response, warn_limit=False) - return summary_api_response["result"]["node_id_to_summary"] - - -def get_actor( - id: str, - address: Optional[str] = None, - timeout: int = DEFAULT_RPC_TIMEOUT, - _explain: bool = False, -) -> Optional[Dict]: - """Get an actor by id. - - Args: - id: Id of the actor - address: Ray bootstrap address, could be `auto`, `localhost:6379`. - If None, it will be resolved automatically from an initialized ray. - timeout: Max timeout value for the state API requests made. - _explain: Print the API information such as API latency or - failed query information. - - Returns: - None if actor not found, or - :class:`ActorState `. - - Raises: - Exceptions: :class:`RayStateApiException ` if the CLI - failed to query the data. - """ # noqa: E501 - return StateApiClient(address=address).get( - StateResource.ACTORS, id, GetApiOptions(timeout=timeout), _explain=_explain - ) - - -def get_job( - id: str, - address: Optional[str] = None, - timeout: int = DEFAULT_RPC_TIMEOUT, - _explain: bool = False, -) -> Optional[JobState]: - """Get a submission job detail by id. - - Args: - id: Submission ID obtained from job API. - address: Ray bootstrap address, could be `auto`, `localhost:6379`. - If None, it will be resolved automatically from an initialized ray. - timeout: Max timeout value for the state API requests made. - _explain: Print the API information such as API latency or - failed query information. - - Returns: - None if job not found, or - :class:`JobState `. - - Raises: - Exceptions: :class:`RayStateApiException ` if the CLI - failed to query the data. - """ # noqa: E501 - return StateApiClient(address=address).get( - StateResource.JOBS, - id, - GetApiOptions(timeout=timeout), - _explain=_explain, - ) - - -def get_placement_group( - id: str, - address: Optional[str] = None, - timeout: int = DEFAULT_RPC_TIMEOUT, - _explain: bool = False, -) -> Optional[PlacementGroupState]: - """Get a placement group by id. - - Args: - id: Id of the placement group - address: Ray bootstrap address, could be `auto`, `localhost:6379`. - If None, it will be resolved automatically from an initialized ray. - timeout: Max timeout value for the state APIs requests made. - _explain: Print the API information such as API latency or - failed query information. - - Returns: - None if actor not found, or - :class:`~ray.experimental.state.common.PlacementGroupState`. - - Raises: - Exceptions: :class:`RayStateApiException ` if the CLI - failed to query the data. - """ # noqa: E501 - return StateApiClient(address=address).get( - StateResource.PLACEMENT_GROUPS, - id, - GetApiOptions(timeout=timeout), - _explain=_explain, - ) - - -def get_node( - id: str, - address: Optional[str] = None, - timeout: int = DEFAULT_RPC_TIMEOUT, - _explain: bool = False, -) -> Optional[NodeState]: - """Get a node by id. - - Args: - id: Id of the node. - address: Ray bootstrap address, could be `auto`, `localhost:6379`. - If None, it will be resolved automatically from an initialized ray. - timeout: Max timeout value for the state APIs requests made. - _explain: Print the API information such as API latency or - failed query information. - - Returns: - None if actor not found, or - :class:`NodeState `. - - Raises: - Exceptions: :class:`RayStateApiException ` - if the CLI is failed to query the data. - """ # noqa: E501 - return StateApiClient(address=address).get( - StateResource.NODES, - id, - GetApiOptions(timeout=timeout), - _explain=_explain, - ) - - -def get_worker( - id: str, - address: Optional[str] = None, - timeout: int = DEFAULT_RPC_TIMEOUT, - _explain: bool = False, -) -> Optional[WorkerState]: - """Get a worker by id. - - Args: - id: Id of the worker - address: Ray bootstrap address, could be `auto`, `localhost:6379`. - If None, it will be resolved automatically from an initialized ray. - timeout: Max timeout value for the state APIs requests made. - _explain: Print the API information such as API latency or - failed query information. - - Returns: - None if actor not found, or - :class:`WorkerState `. - - Raises: - Exceptions: :class:`RayStateApiException ` if the CLI - failed to query the data. - """ # noqa: E501 - return StateApiClient(address=address).get( - StateResource.WORKERS, - id, - GetApiOptions(timeout=timeout), - _explain=_explain, - ) - - -def get_task( - id: str, - address: Optional[str] = None, - timeout: int = DEFAULT_RPC_TIMEOUT, - _explain: bool = False, -) -> Optional[TaskState]: - """Get task attempts of a task by id. - - Args: - id: Id of the task - address: Ray bootstrap address, could be `auto`, `localhost:6379`. - If None, it will be resolved automatically from an initialized ray. - timeout: Max timeout value for the state APIs requests made. - _explain: Print the API information such as API latency or - failed query information. - - Returns: - None if task not found, or a list of - :class:`~ray.experimental.state.common.TaskState` - from the task attempts. - - Raises: - Exceptions: :class:`RayStateApiException ` if the CLI - failed to query the data. - """ # noqa: E501 - return StateApiClient(address=address).get( - StateResource.TASKS, - id, - GetApiOptions(timeout=timeout), - _explain=_explain, - ) - - -def get_objects( - id: str, - address: Optional[str] = None, - timeout: int = DEFAULT_RPC_TIMEOUT, - _explain: bool = False, -) -> List[ObjectState]: - """Get objects by id. - - There could be more than 1 entry returned since an object could be - referenced at different places. - - Args: - id: Id of the object - address: Ray bootstrap address, could be `auto`, `localhost:6379`. - If None, it will be resolved automatically from an initialized ray. - timeout: Max timeout value for the state APIs requests made. - _explain: Print the API information such as API latency or - failed query information. - - Returns: - List of - :class:`~ray.experimental.state.common.ObjectState`. - - Raises: - Exceptions: :class:`RayStateApiException ` if the CLI - failed to query the data. - """ # noqa: E501 - return StateApiClient(address=address).get( - StateResource.OBJECTS, - id, - GetApiOptions(timeout=timeout), - _explain=_explain, - ) - - -def list_actors( - address: Optional[str] = None, - filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, - limit: int = DEFAULT_LIMIT, - timeout: int = DEFAULT_RPC_TIMEOUT, - detail: bool = False, - raise_on_missing_output: bool = True, - _explain: bool = False, -) -> List[ActorState]: - """List actors in the cluster. - - Args: - address: Ray bootstrap address, could be `auto`, `localhost:6379`. - If None, it will be resolved automatically from an initialized ray. - filters: List of tuples of filter key, predicate (=, or !=), and - the filter value. E.g., `("id", "=", "abcd")` - limit: Max number of entries returned by the state backend. - timeout: Max timeout value for the state APIs requests made. - detail: When True, more details info (specified in `ActorState`) - will be queried and returned. See - :class:`ActorState `. - raise_on_missing_output: When True, exceptions will be raised if - there is missing data due to truncation/data source unavailable. - _explain: Print the API information such as API latency or - failed query information. - - Returns: - List of - :class:`ActorState `. - - Raises: - Exceptions: :class:`RayStateApiException ` if the CLI - failed to query the data. - """ # noqa: E501 - return StateApiClient(address=address).list( - StateResource.ACTORS, - options=ListApiOptions( - limit=limit, - timeout=timeout, - filters=filters, - detail=detail, - ), - raise_on_missing_output=raise_on_missing_output, - _explain=_explain, - ) - - -def list_placement_groups( - address: Optional[str] = None, - filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, - limit: int = DEFAULT_LIMIT, - timeout: int = DEFAULT_RPC_TIMEOUT, - detail: bool = False, - raise_on_missing_output: bool = True, - _explain: bool = False, -) -> List[PlacementGroupState]: - """List placement groups in the cluster. - - Args: - address: Ray bootstrap address, could be `auto`, `localhost:6379`. - If None, it will be resolved automatically from an initialized ray. - filters: List of tuples of filter key, predicate (=, or !=), and - the filter value. E.g., `("state", "=", "abcd")` - limit: Max number of entries returned by the state backend. - timeout: Max timeout value for the state APIs requests made. - detail: When True, more details info (specified in `PlacementGroupState`) - will be queried and returned. See - :class:`~ray.experimental.state.common.PlacementGroupState`. - raise_on_missing_output: When True, exceptions will be raised if - there is missing data due to truncation/data source unavailable. - _explain: Print the API information such as API latency or - failed query information. - - Returns: - List of dictionarified - :class:`~ray.experimental.state.common.PlacementGroupState`. - - Raises: - Exceptions: :class:`RayStateApiException ` if the CLI - failed to query the data. - """ # noqa: E501 - return StateApiClient(address=address).list( - StateResource.PLACEMENT_GROUPS, - options=ListApiOptions( - limit=limit, timeout=timeout, filters=filters, detail=detail - ), - raise_on_missing_output=raise_on_missing_output, - _explain=_explain, - ) - - -def list_nodes( - address: Optional[str] = None, - filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, - limit: int = DEFAULT_LIMIT, - timeout: int = DEFAULT_RPC_TIMEOUT, - detail: bool = False, - raise_on_missing_output: bool = True, - _explain: bool = False, -) -> List[NodeState]: - """List nodes in the cluster. - - Args: - address: Ray bootstrap address, could be `auto`, `localhost:6379`. - If None, it will be resolved automatically from an initialized ray. - filters: List of tuples of filter key, predicate (=, or !=), and - the filter value. E.g., `("node_name", "=", "abcd")` - limit: Max number of entries returned by the state backend. - timeout: Max timeout value for the state APIs requests made. - detail: When True, more details info (specified in `NodeState`) - will be queried and returned. See - :class:`NodeState `. - raise_on_missing_output: When True, exceptions will be raised if - there is missing data due to truncation/data source unavailable. - _explain: Print the API information such as API latency or - failed query information. - - Returns: - List of dictionarified - :class:`NodeState `. - - Raises: - Exceptions: :class:`RayStateApiException ` - if the CLI failed to query the data. - """ # noqa: E501 - return StateApiClient(address=address).list( - StateResource.NODES, - options=ListApiOptions( - limit=limit, timeout=timeout, filters=filters, detail=detail - ), - raise_on_missing_output=raise_on_missing_output, - _explain=_explain, - ) - - -def list_jobs( - address: Optional[str] = None, - filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, - limit: int = DEFAULT_LIMIT, - timeout: int = DEFAULT_RPC_TIMEOUT, - detail: bool = False, - raise_on_missing_output: bool = True, - _explain: bool = False, -) -> List[JobState]: - """List jobs submitted to the cluster by :ref: `ray job submission `. - - Args: - address: Ray bootstrap address, could be `auto`, `localhost:6379`. - If None, it will be resolved automatically from an initialized ray. - filters: List of tuples of filter key, predicate (=, or !=), and - the filter value. E.g., `("status", "=", "abcd")` - limit: Max number of entries returned by the state backend. - timeout: Max timeout value for the state APIs requests made. - detail: When True, more details info (specified in `JobState`) - will be queried and returned. See - :class:`JobState `. - raise_on_missing_output: When True, exceptions will be raised if - there is missing data due to truncation/data source unavailable. - _explain: Print the API information such as API latency or - failed query information. - - Returns: - List of dictionarified - :class:`JobState `. - - Raises: - Exceptions: :class:`RayStateApiException ` if the CLI - failed to query the data. - """ # noqa: E501 - return StateApiClient(address=address).list( - StateResource.JOBS, - options=ListApiOptions( - limit=limit, timeout=timeout, filters=filters, detail=detail - ), - raise_on_missing_output=raise_on_missing_output, - _explain=_explain, - ) - - -def list_workers( - address: Optional[str] = None, - filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, - limit: int = DEFAULT_LIMIT, - timeout: int = DEFAULT_RPC_TIMEOUT, - detail: bool = False, - raise_on_missing_output: bool = True, - _explain: bool = False, -) -> List[WorkerState]: - """List workers in the cluster. - - Args: - address: Ray bootstrap address, could be `auto`, `localhost:6379`. - If None, it will be resolved automatically from an initialized ray. - filters: List of tuples of filter key, predicate (=, or !=), and - the filter value. E.g., `("is_alive", "=", "True")` - limit: Max number of entries returned by the state backend. - timeout: Max timeout value for the state APIs requests made. - detail: When True, more details info (specified in `WorkerState`) - will be queried and returned. See - :class:`WorkerState `. - raise_on_missing_output: When True, exceptions will be raised if - there is missing data due to truncation/data source unavailable. - _explain: Print the API information such as API latency or - failed query information. - - Returns: - List of - :class:`WorkerState `. - - Raises: - Exceptions: :class:`RayStateApiException ` if the CLI - failed to query the data. - """ # noqa: E501 - return StateApiClient(address=address).list( - StateResource.WORKERS, - options=ListApiOptions( - limit=limit, timeout=timeout, filters=filters, detail=detail - ), - raise_on_missing_output=raise_on_missing_output, - _explain=_explain, - ) - - -def list_tasks( - address: Optional[str] = None, - filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, - limit: int = DEFAULT_LIMIT, - timeout: int = DEFAULT_RPC_TIMEOUT, - detail: bool = False, - raise_on_missing_output: bool = True, - _explain: bool = False, -) -> List[TaskState]: - """List tasks in the cluster. - - Args: - address: Ray bootstrap address, could be `auto`, `localhost:6379`. - If None, it will be resolved automatically from an initialized ray. - filters: List of tuples of filter key, predicate (=, or !=), and - the filter value. E.g., `("is_alive", "=", "True")` - limit: Max number of entries returned by the state backend. - timeout: Max timeout value for the state APIs requests made. - detail: When True, more details info (specified in `WorkerState`) - will be queried and returned. See - :class:`WorkerState `. - raise_on_missing_output: When True, exceptions will be raised if - there is missing data due to truncation/data source unavailable. - _explain: Print the API information such as API latency or - failed query information. - - Returns: - List of - :class:`TaskState `. - - Raises: - Exceptions: :class:`RayStateApiException ` if the CLI - failed to query the data. - """ # noqa: E501 - return StateApiClient(address=address).list( - StateResource.TASKS, - options=ListApiOptions( - limit=limit, timeout=timeout, filters=filters, detail=detail - ), - raise_on_missing_output=raise_on_missing_output, - _explain=_explain, - ) - - -def list_objects( - address: Optional[str] = None, - filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, - limit: int = DEFAULT_LIMIT, - timeout: int = DEFAULT_RPC_TIMEOUT, - detail: bool = False, - raise_on_missing_output: bool = True, - _explain: bool = False, -) -> List[ObjectState]: - """List objects in the cluster. - - Args: - address: Ray bootstrap address, could be `auto`, `localhost:6379`. - If None, it will be resolved automatically from an initialized ray. - filters: List of tuples of filter key, predicate (=, or !=), and - the filter value. E.g., `("ip", "=", "0.0.0.0")` - limit: Max number of entries returned by the state backend. - timeout: Max timeout value for the state APIs requests made. - detail: When True, more details info (specified in `ObjectState`) - will be queried and returned. See - :class:`ObjectState `. - raise_on_missing_output: When True, exceptions will be raised if - there is missing data due to truncation/data source unavailable. - _explain: Print the API information such as API latency or - failed query information. - - Returns: - List of - :class:`ObjectState `. - - Raises: - Exceptions: :class:`RayStateApiException ` if the CLI - failed to query the data. - """ # noqa: E501 - return StateApiClient(address=address).list( - StateResource.OBJECTS, - options=ListApiOptions( - limit=limit, timeout=timeout, filters=filters, detail=detail - ), - raise_on_missing_output=raise_on_missing_output, - _explain=_explain, - ) - - -def list_runtime_envs( - address: Optional[str] = None, - filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, - limit: int = DEFAULT_LIMIT, - timeout: int = DEFAULT_RPC_TIMEOUT, - detail: bool = False, - raise_on_missing_output: bool = True, - _explain: bool = False, -) -> List[RuntimeEnvState]: - """List runtime environments in the cluster. - - Args: - address: Ray bootstrap address, could be `auto`, `localhost:6379`. - If None, it will be resolved automatically from an initialized ray. - filters: List of tuples of filter key, predicate (=, or !=), and - the filter value. E.g., `("node_id", "=", "abcdef")` - limit: Max number of entries returned by the state backend. - timeout: Max timeout value for the state APIs requests made. - detail: When True, more details info (specified in `RuntimeEnvState`) - will be queried and returned. See - :class:`RuntimeEnvState `. - raise_on_missing_output: When True, exceptions will be raised if - there is missing data due to truncation/data source unavailable. - _explain: Print the API information such as API latency or - failed query information. - - Returns: - List of - :class:`RuntimeEnvState `. - - Raises: - Exceptions: :class:`RayStateApiException ` - if the CLI failed to query the data. - """ # noqa: E501 - return StateApiClient(address=address).list( - StateResource.RUNTIME_ENVS, - options=ListApiOptions( - limit=limit, timeout=timeout, filters=filters, detail=detail - ), - raise_on_missing_output=raise_on_missing_output, - _explain=_explain, - ) - - -def list_cluster_events( - address: Optional[str] = None, - filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, - limit: int = DEFAULT_LIMIT, - timeout: int = DEFAULT_RPC_TIMEOUT, - detail: bool = False, - raise_on_missing_output: bool = True, - _explain: bool = False, -) -> List[Dict]: - return StateApiClient(address=address).list( - StateResource.CLUSTER_EVENTS, - options=ListApiOptions( - limit=limit, timeout=timeout, filters=filters, detail=detail - ), - raise_on_missing_output=raise_on_missing_output, - _explain=_explain, - ) - - -""" -Log APIs -""" - - -def get_log( - address: Optional[str] = None, - node_id: Optional[str] = None, - node_ip: Optional[str] = None, - filename: Optional[str] = None, - actor_id: Optional[str] = None, - task_id: Optional[str] = None, - pid: Optional[int] = None, - follow: bool = False, - tail: int = -1, - timeout: int = DEFAULT_RPC_TIMEOUT, - suffix: str = "out", - encoding: Optional[str] = "utf-8", - errors: Optional[str] = "strict", - submission_id: Optional[str] = None, - attempt_number: int = 0, - _interval: Optional[float] = None, -) -> Generator[str, None, None]: - """Retrieve log file based on file name or some entities ids (pid, actor id, task id). - - Examples: - >>> import ray - >>> from ray.experimental.state.api import get_log # doctest: +SKIP - # To connect to an existing ray instance if there is - >>> ray.init("auto") # doctest: +SKIP - # Node IP could be retrieved from list_nodes() or ray.nodes() - >>> node_ip = "172.31.47.143" # doctest: +SKIP - >>> filename = "gcs_server.out" # doctest: +SKIP - >>> for l in get_log(filename=filename, node_ip=node_ip): # doctest: +SKIP - >>> print(l) # doctest: +SKIP - - Args: - address: Ray bootstrap address, could be `auto`, `localhost:6379`. - If not specified, it will be retrieved from the initialized ray cluster. - node_id: Id of the node containing the logs . - node_ip: Ip of the node containing the logs. (At least one of the node_id and - node_ip have to be supplied when identifying a node). - filename: Name of the file (relative to the ray log directory) to be retrieved. - actor_id: Id of the actor if getting logs from an actor. - task_id: Id of the task if getting logs generated by a task. - pid: PID of the worker if getting logs generated by a worker. When querying - with pid, either node_id or node_ip must be supplied. - follow: When set to True, logs will be streamed and followed. - tail: Number of lines to get from the end of the log file. Set to -1 for getting - the entire log. - timeout: Max timeout for requests made when getting the logs. - suffix: The suffix of the log file if query by id of tasks/workers/actors. Default to "out". - encoding: The encoding used to decode the content of the log file. Default is - "utf-8". Use None to get binary data directly. - errors: The error handling scheme to use for decoding errors. Default is - "strict". See https://docs.python.org/3/library/codecs.html#error-handlers - submission_id: Job submission ID if getting log from a submission job. - attempt_number: The attempt number of the task if getting logs generated by a task. - _interval: The interval in secs to print new logs when `follow=True`. - - Return: - A Generator of log line, None for SendType and ReturnType. - - Raises: - Exceptions: :class:`RayStateApiException ` if the CLI - failed to query the data. - """ # noqa: E501 - - api_server_url = ray_address_to_api_server_url(address) - media_type = "stream" if follow else "file" - - options = GetLogOptions( - node_id=node_id, - node_ip=node_ip, - filename=filename, - actor_id=actor_id, - task_id=task_id, - pid=pid, - lines=tail, - interval=_interval, - media_type=media_type, - timeout=timeout, - suffix=suffix, - submission_id=submission_id, - attempt_number=attempt_number, - ) - options_dict = {} - for field in fields(options): - option_val = getattr(options, field.name) - if option_val is not None: - options_dict[field.name] = option_val - - with requests.get( - f"{api_server_url}/api/v0/logs/{media_type}?" - f"{urllib.parse.urlencode(options_dict)}", - stream=True, - ) as r: - if r.status_code != 200: - raise RayStateApiException(r.text) - for bytes in r.iter_content(chunk_size=None): - bytes = bytearray(bytes) - # First byte 1 means success. - if bytes.startswith(b"1"): - bytes.pop(0) - logs = bytes - if encoding is not None: - logs = bytes.decode(encoding=encoding, errors=errors) - else: - assert bytes.startswith(b"0") - error_msg = bytes.decode("utf-8") - raise RayStateApiException(error_msg) - yield logs - - -def list_logs( - address: Optional[str] = None, - node_id: Optional[str] = None, - node_ip: Optional[str] = None, - glob_filter: Optional[str] = None, - timeout: int = DEFAULT_RPC_TIMEOUT, -) -> Dict[str, List[str]]: - """Listing log files available. - - Args: - address: Ray bootstrap address, could be `auto`, `localhost:6379`. - If not specified, it will be retrieved from the initialized ray cluster. - node_id: Id of the node containing the logs. - node_ip: Ip of the node containing the logs. - glob_filter: Name of the file (relative to the ray log directory) to be - retrieved. E.g. `glob_filter="*worker*"` for all worker logs. - actor_id: Id of the actor if getting logs from an actor. - timeout: Max timeout for requests made when getting the logs. - _interval: The interval in secs to print new logs when `follow=True`. - - Return: - A dictionary where the keys are log groups (e.g. gcs, raylet, worker), and - values are list of log filenames. - - Raises: - Exceptions: :class:`RayStateApiException ` if the CLI - failed to query the data, or ConnectionError if failed to resolve the - ray address. - """ # noqa: E501 - assert ( - node_ip is not None or node_id is not None - ), "At least one of node ip and node id is required" - - api_server_url = ray_address_to_api_server_url(address) - - if not glob_filter: - glob_filter = "*" - - options_dict = {} - if node_ip: - options_dict["node_ip"] = node_ip - if node_id: - options_dict["node_id"] = node_id - if glob_filter: - options_dict["glob"] = glob_filter - options_dict["timeout"] = timeout - - r = requests.get( - f"{api_server_url}/api/v0/logs?{urllib.parse.urlencode(options_dict)}" - ) - r.raise_for_status() - - response = r.json() - if response["result"] is False: - raise RayStateApiException( - "API server internal error. See dashboard.log file for more details. " - f"Error: {response['msg']}" - ) - return response["data"]["result"] - - -""" -Summary APIs -""" - - -def summarize_tasks( - address: Optional[str] = None, - timeout: int = DEFAULT_RPC_TIMEOUT, - raise_on_missing_output: bool = True, - _explain: bool = False, -) -> Dict: - """Summarize the tasks in cluster. - - Args: - address: Ray bootstrap address, could be `auto`, `localhost:6379`. - If None, it will be resolved automatically from an initialized ray. - timeout: Max timeout for requests made when getting the states. - raise_on_missing_output: When True, exceptions will be raised if - there is missing data due to truncation/data source unavailable. - _explain: Print the API information such as API latency or - failed query information. - - Return: - Dictionarified - :class:`~ray.experimental.state.common.TaskSummaries` - - Raises: - Exceptions: :class:`RayStateApiException ` - if the CLI is failed to query the data. - """ # noqa: E501 - return StateApiClient(address=address).summary( - SummaryResource.TASKS, - options=SummaryApiOptions(timeout=timeout), - raise_on_missing_output=raise_on_missing_output, - _explain=_explain, - ) - - -def summarize_actors( - address: Optional[str] = None, - timeout: int = DEFAULT_RPC_TIMEOUT, - raise_on_missing_output: bool = True, - _explain: bool = False, -) -> Dict: - """Summarize the actors in cluster. - - Args: - address: Ray bootstrap address, could be `auto`, `localhost:6379`. - If None, it will be resolved automatically from an initialized ray. - timeout: Max timeout for requests made when getting the states. - raise_on_missing_output: When True, exceptions will be raised if - there is missing data due to truncation/data source unavailable. - _explain: Print the API information such as API latency or - failed query information. - - Return: - Dictionarified - :class:`~ray.experimental.state.common.ActorSummaries` - - Raises: - Exceptions: :class:`RayStateApiException ` if the CLI - failed to query the data. - """ # noqa: E501 - return StateApiClient(address=address).summary( - SummaryResource.ACTORS, - options=SummaryApiOptions(timeout=timeout), - raise_on_missing_output=raise_on_missing_output, - _explain=_explain, - ) - - -def summarize_objects( - address: Optional[str] = None, - timeout: int = DEFAULT_RPC_TIMEOUT, - raise_on_missing_output: bool = True, - _explain: bool = False, -) -> Dict: - """Summarize the objects in cluster. - - Args: - address: Ray bootstrap address, could be `auto`, `localhost:6379`. - If None, it will be resolved automatically from an initialized ray. - timeout: Max timeout for requests made when getting the states. - raise_on_missing_output: When True, exceptions will be raised if - there is missing data due to truncation/data source unavailable. - _explain: Print the API information such as API latency or - failed query information. - - Return: - Dictionarified :class:`~ray.experimental.state.common.ObjectSummaries` - - Raises: - Exceptions: :class:`RayStateApiException ` if the CLI - failed to query the data. - """ # noqa: E501 - return StateApiClient(address=address).summary( - SummaryResource.OBJECTS, - options=SummaryApiOptions(timeout=timeout), - raise_on_missing_output=raise_on_missing_output, - _explain=_explain, - ) +record_deprecated_state_api_import() diff --git a/python/ray/experimental/state/common.py b/python/ray/experimental/state/common.py index edebfbb64906..6fbd488cd0fe 100644 --- a/python/ray/experimental/state/common.py +++ b/python/ray/experimental/state/common.py @@ -1,1605 +1,4 @@ -import datetime -import json -import logging -import sys -from abc import ABC -from dataclasses import asdict, field, fields -from enum import Enum, unique -from typing import Any, Dict, List, Optional, Set, Tuple, Union +from ray.util.state.common import * # noqa: F401 F403 +from ray.util.state.util import record_deprecated_state_api_import -import ray.dashboard.utils as dashboard_utils -from ray._private.ray_constants import env_integer -from ray.core.generated.common_pb2 import TaskStatus, TaskType -from ray.core.generated.gcs_pb2 import TaskEvents -from ray.experimental.state.custom_types import ( - TypeActorStatus, - TypeNodeStatus, - TypePlacementGroupStatus, - TypeReferenceType, - TypeTaskStatus, - TypeTaskType, - TypeWorkerExitType, - TypeWorkerType, -) -from ray.experimental.state.exception import RayStateApiException - -try: - from pydantic.dataclasses import dataclass - - from ray.dashboard.modules.job.pydantic_models import JobDetails - -except ImportError: - # pydantic is not available in the dashboard. - # We will use the dataclass from the standard library. - from dataclasses import dataclass - - JobDetails = object - - -logger = logging.getLogger(__name__) - -DEFAULT_RPC_TIMEOUT = 30 -DEFAULT_LIMIT = 100 -DEFAULT_LOG_LIMIT = 1000 - -# Max number of entries from API server to the client -RAY_MAX_LIMIT_FROM_API_SERVER = env_integer( - "RAY_MAX_LIMIT_FROM_API_SERVER", 10 * 1000 -) # 10k - -# Max number of entries from data sources (rest will be truncated at the -# data source, e.g. raylet) -RAY_MAX_LIMIT_FROM_DATA_SOURCE = env_integer( - "RAY_MAX_LIMIT_FROM_DATA_SOURCE", 10 * 1000 -) # 10k - - -@unique -class StateResource(Enum): - ACTORS = "actors" - JOBS = "jobs" - PLACEMENT_GROUPS = "placement_groups" - NODES = "nodes" - WORKERS = "workers" - TASKS = "tasks" - OBJECTS = "objects" - RUNTIME_ENVS = "runtime_envs" - CLUSTER_EVENTS = "cluster_events" - - -@unique -class SummaryResource(Enum): - ACTORS = "actors" - TASKS = "tasks" - OBJECTS = "objects" - - -SupportedFilterType = Union[str, bool, int, float] - - -PredicateType = str # Literal["=", "!="] - - -class Humanify: - """A class containing default methods to - convert units into a human readable string.""" - - def timestamp(x: float): - """Converts miliseconds to a datetime object.""" - return str(datetime.datetime.fromtimestamp(x / 1000)) - - def memory(x: int): - """Converts raw bytes to a human readable memory size.""" - if x >= 2**30: - return str(format(x / (2**30), ".3f")) + " GiB" - elif x >= 2**20: - return str(format(x / (2**20), ".3f")) + " MiB" - elif x >= 2**10: - return str(format(x / (2**10), ".3f")) + " KiB" - return str(format(x, ".3f")) + " B" - - def duration(x: int): - """Converts miliseconds to a human readable duration.""" - return str(datetime.timedelta(milliseconds=x)) - - def events(events: List[dict]): - """Converts a list of task events into a human readable format.""" - for event in events: - if "created_ms" in event: - event["created_ms"] = Humanify.timestamp(event["created_ms"]) - return events - - def node_resources(resources: dict): - """Converts a node's resources into a human readable format.""" - for resource in resources: - if "memory" in resource: - resources[resource] = Humanify.memory(resources[resource]) - return resources - - -@dataclass(init=True) -class ListApiOptions: - # Maximum number of entries to return - limit: int = DEFAULT_LIMIT - # The timeout for the API call. - timeout: int = DEFAULT_RPC_TIMEOUT - # If True, more detailed output will be printed. - # The API could query more sources than detail == False - # to get more data in detail. - detail: bool = False - # Filters. Each tuple pair (key, predicate, value) means key predicate value. - # If there's more than 1 filter, it means AND. - # E.g., [(key, "=", val), (key2, "!=" val2)] means (key=val) AND (key2!=val2) - filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = field( - default_factory=list - ) - # [only tasks] If driver tasks should be excluded. - exclude_driver: bool = True - # When the request is processed on the server side, - # we should apply multiplier so that server side can finish - # processing a request within timeout. Otherwise, - # timeout will always lead Http timeout. - server_timeout_multiplier: float = 0.8 - - def __post_init__(self): - # To return the data to users, when there's a partial failure - # we need to have a timeout that's smaller than the users' timeout. - # 80% is configured arbitrarily. - self.timeout = int(self.timeout * self.server_timeout_multiplier) - assert self.timeout != 0, "0 second timeout is not supported." - if self.filters is None: - self.filters = [] - - for filter in self.filters: - _, filter_predicate, _ = filter - if filter_predicate != "=" and filter_predicate != "!=": - raise ValueError( - f"Unsupported filter predicate {filter_predicate} is given. " - "Available predicates: =, !=." - ) - - -@dataclass(init=True) -class GetApiOptions: - # Timeout for the HTTP request - timeout: int = DEFAULT_RPC_TIMEOUT - - -@dataclass(init=True) -class SummaryApiOptions: - # Timeout for the HTTP request - timeout: int = DEFAULT_RPC_TIMEOUT - - # Filters. Each tuple pair (key, predicate, value) means key predicate value. - # If there's more than 1 filter, it means AND. - # E.g., [(key, "=", val), (key2, "!=" val2)] means (key=val) AND (key2!=val2) - # For summary endpoints that call list under the hood, we'll pass - # these filters directly into the list call. - filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = field( - default_factory=list - ) - - # Change out to summarize the output. There is a summary_by value for each entity. - # Tasks: by func_name - # Actors: by class - # Objects: by callsite - summary_by: Optional[str] = None - - -def state_column(*, filterable: bool, detail: bool = False, format_fn=None, **kwargs): - """A wrapper around dataclass.field to add additional metadata. - - The metadata is used to define detail / filterable option of - each column. - - Args: - detail: If True, the column is used when detail == True - filterable: If True, the column can be used for filtering. - kwargs: The same kwargs for the `dataclasses.field` function. - """ - m = {"detail": detail, "filterable": filterable, "format_fn": format_fn} - # Default for detail field is None since it could be missing. - if detail and "default" not in kwargs: - kwargs["default"] = None - - if "metadata" in kwargs: - # Metadata explicitly specified, so add detail and filterable if missing. - kwargs["metadata"].update(m) - else: - # Metadata not explicitly specified, so add it. - kwargs["metadata"] = m - return field(**kwargs) - - -class StateSchema(ABC): - """Schema class for Ray resource abstraction. - - The child class must be dataclass. All child classes - - perform runtime type checking upon initialization. - - are supposed to use `state_column` instead of `field`. - It will allow the class to return filterable/detail columns. - If `state_column` is not specified, that column is not filterable - and for non-detail output. - - For example, - ``` - @dataclass - class State(StateSchema): - column_a: str - column_b: int = state_column(detail=True, filterable=True) - - s = State(column_a="abc", b=1) - # Returns {"column_b"} - s.filterable_columns() - # Returns {"column_a"} - s.base_columns() - # Returns {"column_a", "column_b"} - s.columns() - ``` - - In addition, the schema also provides a humanify abstract method to - convert the state object into something human readable, ready for printing. - - Subclasses should override this method, providing logic to convert its own fields - to something human readable, packaged and returned in a dict. - - Each field that wants to be humanified should include a 'format_fn' key in its - metadata dictionary. - """ - - @classmethod - def humanify(cls, state: dict) -> dict: - """Convert the given state object into something human readable.""" - for f in fields(cls): - if ( - f.metadata.get("format_fn") is not None - and f.name in state - and state[f.name] is not None - ): - try: - state[f.name] = f.metadata["format_fn"](state[f.name]) - except Exception as e: - logger.error(f"Failed to format {f.name}:{state[f.name]} with {e}") - return state - - @classmethod - def list_columns(cls, detail: bool = True) -> List[str]: - """Return a list of columns.""" - cols = [] - for f in fields(cls): - if detail: - cols.append(f.name) - elif not f.metadata.get("detail", False): - cols.append(f.name) - - return cols - - @classmethod - def columns(cls) -> Set[str]: - """Return a set of all columns.""" - return set(cls.list_columns()) - - @classmethod - def filterable_columns(cls) -> Set[str]: - """Return a list of filterable columns""" - filterable = set() - for f in fields(cls): - if f.metadata.get("filterable", False): - filterable.add(f.name) - return filterable - - @classmethod - def base_columns(cls) -> Set[str]: - """Return a list of base columns. - - Base columns mean columns to return when detail == False. - """ - return set(cls.list_columns(detail=False)) - - @classmethod - def detail_columns(cls) -> Set[str]: - """Return a list of detail columns. - - Detail columns mean columns to return when detail == True. - """ - return set(cls.list_columns(detail=True)) - - def asdict(self): - return asdict(self) - - # Allow dict like access on the class directly for backward compatibility. - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - setattr(self, key, value) - - def get(self, key, default=None): - return getattr(self, key, default) - - -def filter_fields(data: dict, state_dataclass: StateSchema, detail: bool) -> dict: - """Filter the given data's columns based on the given schema. - - Args: - data: A single data entry to filter columns. - state_dataclass: The schema to filter data. - detail: Whether or not it should include columns for detail output. - """ - filtered_data = {} - columns = state_dataclass.columns() if detail else state_dataclass.base_columns() - for col in columns: - if col in data: - filtered_data[col] = data[col] - else: - filtered_data[col] = None - return filtered_data - - -@dataclass(init=True) -class GetLogOptions: - timeout: int - node_id: Optional[str] = None - node_ip: Optional[str] = None - # One of {file, stream}. File means it will return the whole log. - # stream means it will keep the connection and streaming the log. - media_type: str = "file" - # The file name of the log. - filename: Optional[str] = None - # The actor id of the log. It is used only for worker logs. - actor_id: Optional[str] = None - # The task id of the log. - task_id: Optional[str] = None - # The attempt number of the task. - attempt_number: int = 0 - # The pid of the log. It is used only for worker logs. - pid: Optional[int] = None - # Total log lines to return. - lines: int = 1000 - # The interval where new logs are streamed to. - # Should be used only when media_type == stream. - interval: Optional[float] = None - # The suffix of the log file if file resolution not through filename directly. - # Default to "out". - suffix: str = "out" - # The job submission id for submission job. This doesn't work for driver job - # since Ray doesn't log driver logs to file in the ray logs directory. - submission_id: Optional[str] = None - - def __post_init__(self): - if self.pid: - self.pid = int(self.pid) - if self.interval: - self.interval = float(self.interval) - self.lines = int(self.lines) - - if self.media_type == "file": - assert self.interval is None - if self.media_type not in ["file", "stream"]: - raise ValueError(f"Invalid media type: {self.media_type}") - if not (self.node_id or self.node_ip) and not (self.actor_id or self.task_id): - raise ValueError( - "node_id or node_ip must be provided as constructor arguments when no " - "actor or task_id is supplied as arguments." - ) - if self.node_id and self.node_ip: - raise ValueError( - "Both node_id and node_ip are given. Only one of them can be provided. " - f"Given node id: {self.node_id}, given node ip: {self.node_ip}" - ) - if not ( - self.actor_id - or self.task_id - or self.pid - or self.filename - or self.submission_id - ): - raise ValueError( - "None of actor_id, task_id, pid, submission_id or filename " - "is provided. At least one of them is required to fetch logs." - ) - - if self.suffix not in ["out", "err"]: - raise ValueError( - f"Invalid suffix: {self.suffix}. Must be one of 'out' or 'err'." - ) - - -# See the ActorTableData message in gcs.proto for all potential options that -# can be included in this class. -@dataclass(init=True) -class ActorState(StateSchema): - """Actor State""" - - #: The id of the actor. - actor_id: str = state_column(filterable=True) - #: The class name of the actor. - class_name: str = state_column(filterable=True) - #: The state of the actor. - #: - #: - DEPENDENCIES_UNREADY: Actor is waiting for dependency to be ready. - #: E.g., a new actor is waiting for object ref that's created from - #: other remote task. - #: - PENDING_CREATION: Actor's dependency is ready, but it is not created yet. - #: It could be because there are not enough resources, too many actor - #: entries in the scheduler queue, or the actor creation is slow - #: (e.g., slow runtime environment creation, - #: slow worker startup, or etc.). - #: - ALIVE: The actor is created, and it is alive. - #: - RESTARTING: The actor is dead, and it is restarting. - #: It is equivalent to `PENDING_CREATION`, - #: but means the actor was dead more than once. - #: - DEAD: The actor is permanatly dead. - state: TypeActorStatus = state_column(filterable=True) - #: The job id of this actor. - job_id: str = state_column(filterable=True) - #: The name of the actor given by the `name` argument. - name: Optional[str] = state_column(filterable=True) - #: The node id of this actor. - #: If the actor is restarting, it could be the node id - #: of the dead actor (and it will be re-updated when - #: the actor is successfully restarted). - node_id: Optional[str] = state_column(filterable=True) - #: The pid of the actor. 0 if it is not created yet. - pid: Optional[int] = state_column(filterable=True) - #: The namespace of the actor. - ray_namespace: Optional[str] = state_column(filterable=True) - #: The runtime environment information of the actor. - serialized_runtime_env: Optional[str] = state_column(filterable=False, detail=True) - #: The resource requirement of the actor. - required_resources: Optional[dict] = state_column(filterable=False, detail=True) - #: Actor's death information in detail. None if the actor is not dead yet. - death_cause: Optional[dict] = state_column(filterable=False, detail=True) - #: True if the actor is detached. False otherwise. - is_detached: Optional[bool] = state_column(filterable=False, detail=True) - #: The placement group id that's associated with this actor. - placement_group_id: Optional[str] = state_column(detail=True, filterable=True) - #: Actor's repr name if a customized __repr__ method exists, else empty string. - repr_name: Optional[str] = state_column(detail=True, filterable=True) - - -@dataclass(init=True) -class PlacementGroupState(StateSchema): - """PlacementGroup State""" - - #: The id of the placement group. - placement_group_id: str = state_column(filterable=True) - #: The name of the placement group if it is given by the name argument. - name: str = state_column(filterable=True) - #: The job id of the placement group. - creator_job_id: str = state_column(filterable=True) - #: The state of the placement group. - #: - #: - PENDING: The placement group creation is pending scheduling. - #: It could be because there's not enough resources, some of creation - #: stage has failed (e.g., failed to commit placement gropus because - #: the node is dead). - #: - CREATED: The placement group is created. - #: - REMOVED: The placement group is removed. - #: - RESCHEDULING: The placement group is rescheduling because some of - #: bundles are dead because they were on dead nodes. - state: TypePlacementGroupStatus = state_column(filterable=True) - #: The bundle specification of the placement group. - bundles: Optional[List[dict]] = state_column(filterable=False, detail=True) - #: True if the placement group is detached. False otherwise. - is_detached: Optional[bool] = state_column(filterable=True, detail=True) - #: The scheduling stats of the placement group. - stats: Optional[dict] = state_column(filterable=False, detail=True) - - -@dataclass(init=True) -class NodeState(StateSchema): - """Node State""" - - #: The id of the node. - node_id: str = state_column(filterable=True) - #: The ip address of the node. - node_ip: str = state_column(filterable=True) - #: If this is a head node. - is_head_node: bool = state_column(filterable=True) - #: The state of the node. - #: - #: ALIVE: The node is alive. - #: DEAD: The node is dead. - state: TypeNodeStatus = state_column(filterable=True) - #: The name of the node if it is given by the name argument. - node_name: str = state_column(filterable=True) - #: The total resources of the node. - resources_total: dict = state_column( - filterable=False, format_fn=Humanify.node_resources - ) - #: The time when the node (raylet) starts. - start_time_ms: Optional[int] = state_column( - filterable=False, detail=True, format_fn=Humanify.timestamp - ) - #: The time when the node exits. The timestamp could be delayed - #: if the node is dead unexpectedly (could be delayed - # up to 30 seconds). - end_time_ms: Optional[int] = state_column( - filterable=False, detail=True, format_fn=Humanify.timestamp - ) - - -# NOTE: -# Declaring this as dataclass would make __init__ not being called properly. -class JobState(StateSchema, JobDetails): - """The state of the job that's submitted by Ray's Job APIs or driver jobs""" - - def __init__(self, **kwargs): - JobDetails.__init__(self, **kwargs) - - @classmethod - def filterable_columns(cls) -> Set[str]: - # We are not doing any filtering since filtering is currently done - # at the backend. - return {"job_id", "type", "status", "submission_id"} - - @classmethod - def humanify(cls, state: dict) -> dict: - return state - - @classmethod - def list_columns(cls, detail: bool = False) -> List[str]: - if not detail: - return [ - "job_id", - "submission_id", - "entrypoint", - "type", - "status", - "message", - "error_type", - "driver_info", - ] - if isinstance(JobDetails, object): - # We don't have pydantic in the dashboard. This is because - # we call this method at module import time, so we need to - # check if the class is a pydantic model. - return [] - - return JobDetails.__fields__ - - def asdict(self): - return JobDetails.dict(self) - - @classmethod - def schema_dict(cls) -> Dict[str, Any]: - schema_types = cls.schema()["properties"] - # Get type name to actual type mapping. - return { - k: v["type"] for k, v in schema_types.items() if v.get("type") is not None - } - - -@dataclass(init=True) -class WorkerState(StateSchema): - """Worker State""" - - #: The id of the worker. - worker_id: str = state_column(filterable=True) - #: Whether or not if the worker is alive. - is_alive: bool = state_column(filterable=True) - #: The type of the worker. - #: - #: - WORKER: The regular Ray worker process that executes tasks or - # instantiates an actor. - #: - DRIVER: The driver (Python script that calls `ray.init`). - #: - SPILL_WORKER: The worker that spills objects. - #: - RESTORE_WORKER: The worker that restores objects. - worker_type: TypeWorkerType = state_column(filterable=True) - #: The exit type of the worker if the worker is dead. - #: - #: - SYSTEM_ERROR: Worker exit due to system level failures (i.e. worker crash). - #: - INTENDED_SYSTEM_EXIT: System-level exit that is intended. E.g., - #: Workers are killed because they are idle for a long time. - #: - USER_ERROR: Worker exits because of user error. - #: E.g., execptions from the actor initialization. - #: - INTENDED_USER_EXIT: Intended exit from users (e.g., users exit - #: workers with exit code 0 or exit initated by Ray API such as ray.kill). - exit_type: Optional[TypeWorkerExitType] = state_column(filterable=True) - #: The node id of the worker. - node_id: str = state_column(filterable=True) - #: The ip address of the worker. - ip: str = state_column(filterable=True) - #: The pid of the worker. - pid: int = state_column(filterable=True) - #: The exit detail of the worker if the worker is dead. - exit_detail: Optional[str] = state_column(detail=True, filterable=False) - #: The time worker is first launched. - #: -1 if the value doesn't exist. - #: The lifecycle of worker is as follow. - #: worker_launch_time_ms (process startup requested). - #: -> worker_launched_time_ms (process started). - #: -> start_time_ms (worker is ready to be used). - #: -> end_time_ms (worker is destroyed). - worker_launch_time_ms: Optional[int] = state_column( - filterable=False, detail=True, format_fn=Humanify.timestamp - ) - #: The time worker is succesfully launched - #: -1 if the value doesn't exist. - worker_launched_time_ms: Optional[int] = state_column( - filterable=False, detail=True, format_fn=Humanify.timestamp - ) - #: The time when the worker is started and initialized. - #: 0 if the value doesn't exist. - start_time_ms: Optional[int] = state_column( - filterable=False, detail=True, format_fn=Humanify.timestamp - ) - #: The time when the worker exits. The timestamp could be delayed - #: if the worker is dead unexpectedly. - #: 0 if the value doesn't exist. - end_time_ms: Optional[int] = state_column( - filterable=False, detail=True, format_fn=Humanify.timestamp - ) - - -@dataclass(init=True) -class ClusterEventState(StateSchema): - severity: str = state_column(filterable=True) - time: str = state_column(filterable=False) - source_type: str = state_column(filterable=True) - message: str = state_column(filterable=False) - event_id: str = state_column(filterable=True) - custom_fields: Optional[dict] = state_column(filterable=False, detail=True) - - -@dataclass(init=True) -class TaskState(StateSchema): - """Task State""" - - #: The id of the task. - task_id: str = state_column(filterable=True) - #: The attempt (retry) number of the task. - attempt_number: int = state_column(filterable=True) - #: The name of the task if it is given by the name argument. - name: str = state_column(filterable=True) - #: The state of the task. - #: - #: Refer to src/ray/protobuf/common.proto for a detailed explanation of the state - #: breakdowns and typical state transition flow. - #: - state: TypeTaskStatus = state_column(filterable=True) - #: The job id of this task. - job_id: str = state_column(filterable=True) - #: The actor id that's associated with this task. - #: It is empty if there's no relevant actors. - actor_id: Optional[str] = state_column(filterable=True) - #: The type of the task. - #: - #: - NORMAL_TASK: Tasks created by `func.remote()`` - #: - ACTOR_CREATION_TASK: Actors created by `class.remote()` - #: - ACTOR_TASK: Actor tasks submitted by `actor.method.remote()` - #: - DRIVER_TASK: Driver (A script that calls `ray.init`). - type: TypeTaskType = state_column(filterable=True) - #: The name of the task. If is the name of the function - #: if the type is a task or an actor task. - #: It is the name of the class if it is a actor scheduling task. - func_or_class_name: str = state_column(filterable=True) - #: The parent task id. If the parent is a normal task, it will be the task's id. - #: If the parent runs in a concurrent actor (async actor or threaded actor), - #: it will be the actor's creation task id. - parent_task_id: str = state_column(filterable=True) - #: Id of the node that runs the task. If the task is retried, it could - #: contain the node id of the previous executed task. - #: If empty, it means the task hasn't been scheduled yet. - node_id: Optional[str] = state_column(filterable=True) - #: The worker id that's associated with this task. - worker_id: Optional[str] = state_column(filterable=True) - #: Task error type. - error_type: Optional[str] = state_column(filterable=True) - #: The language of the task. E.g., Python, Java, or Cpp. - language: Optional[str] = state_column(detail=True, filterable=True) - #: The required resources to execute the task. - required_resources: Optional[dict] = state_column(detail=True, filterable=False) - #: The runtime environment information for the task. - runtime_env_info: Optional[dict] = state_column(detail=True, filterable=False) - #: The placement group id that's associated with this task. - placement_group_id: Optional[str] = state_column(detail=True, filterable=True) - #: The list of events of the given task. - #: Refer to src/ray/protobuf/common.proto for a detailed explanation of the state - #: breakdowns and typical state transition flow. - events: Optional[List[dict]] = state_column( - detail=True, filterable=False, format_fn=Humanify.events - ) - #: The list of profile events of the given task. - profiling_data: Optional[dict] = state_column(detail=True, filterable=False) - #: The time when the task is created. A Unix timestamp in ms. - creation_time_ms: Optional[int] = state_column( - detail=True, - filterable=False, - format_fn=Humanify.timestamp, - ) - #: The time when the task starts to run. A Unix timestamp in ms. - start_time_ms: Optional[int] = state_column( - detail=True, - filterable=False, - format_fn=Humanify.timestamp, - ) - #: The time when the task is finished or failed. A Unix timestamp in ms. - end_time_ms: Optional[int] = state_column( - detail=True, filterable=False, format_fn=Humanify.timestamp - ) - #: The task logs info, e.g. offset into the worker log file when the task - #: starts/finishes. - task_log_info: Optional[dict] = state_column(detail=True, filterable=False) - #: Task error detail info. - error_message: Optional[str] = state_column(detail=True, filterable=False) - - -@dataclass(init=True) -class ObjectState(StateSchema): - """Object State""" - - #: The id of the object. - object_id: str = state_column(filterable=True) - #: The size of the object in mb. - object_size: int = state_column(filterable=True, format_fn=Humanify.memory) - #: The status of the task that creates the object. - #: - #: - NIL: We don't have a status for this task because we are not the owner or the - #: task metadata has already been deleted. - #: - WAITING_FOR_DEPENDENCIES: The task is waiting for its dependencies - #: to be created. - #: - SCHEDULED: All dependencies have been created and the task is - #: scheduled to execute. - #: It could be because the task is waiting for resources, - #: runtime environmenet creation, fetching dependencies to the - #: local node, and etc.. - #: - FINISHED: The task finished successfully. - #: - WAITING_FOR_EXECUTION: The task is scheduled properly and - #: waiting for execution. It includes time to deliver the task - #: to the remote worker + queueing time from the execution side. - #: - RUNNING: The task that is running. - task_status: TypeTaskStatus = state_column(filterable=True) - #: The reference type of the object. - #: See :ref:`Debugging with Ray Memory ` for more details. - #: - #: - ACTOR_HANDLE: The reference is an actor handle. - #: - PINNED_IN_MEMORY: The object is pinned in memory, meaning there's - #: in-flight `ray.get` on this reference. - #: - LOCAL_REFERENCE: There's a local reference (e.g., Python reference) - #: to this object reference. The object won't be GC'ed until all of them is gone. - #: - USED_BY_PENDING_TASK: The object reference is passed to other tasks. E.g., - #: `a = ray.put()` -> `task.remote(a)`. In this case, a is used by a - #: pending task `task`. - #: - CAPTURED_IN_OBJECT: The object is serialized by other objects. E.g., - #: `a = ray.put(1)` -> `b = ray.put([a])`. a is serialized within a list. - #: - UNKNOWN_STATUS: The object ref status is unkonwn. - reference_type: TypeReferenceType = state_column(filterable=True) - #: The callsite of the object. - call_site: str = state_column(filterable=True) - #: The worker type that creates the object. - #: - #: - WORKER: The regular Ray worker process that executes tasks or - #: instantiates an actor. - #: - DRIVER: The driver (Python script that calls `ray.init`). - #: - SPILL_WORKER: The worker that spills objects. - #: - RESTORE_WORKER: The worker that restores objects. - type: TypeWorkerType = state_column(filterable=True) - #: The pid of the owner. - pid: int = state_column(filterable=True) - #: The ip address of the owner. - ip: str = state_column(filterable=True) - - -@dataclass(init=True) -class RuntimeEnvState(StateSchema): - """Runtime Environment State""" - - #: The runtime environment spec. - runtime_env: dict = state_column(filterable=True) - #: Whether or not the runtime env creation has succeeded. - success: bool = state_column(filterable=True) - #: The latency of creating the runtime environment. - #: Available if the runtime env is successfully created. - creation_time_ms: Optional[float] = state_column( - filterable=False, format_fn=Humanify.timestamp - ) - #: The node id of this runtime environment. - node_id: str = state_column(filterable=True) - #: The number of actors and tasks that use this runtime environment. - ref_cnt: Optional[int] = state_column(detail=True, filterable=False) - #: The error message if the runtime environment creation has failed. - #: Available if the runtime env is failed to be created. - error: Optional[str] = state_column(detail=True, filterable=True) - - -AVAILABLE_STATES = [ - ActorState, - PlacementGroupState, - NodeState, - WorkerState, - JobState, - TaskState, - ObjectState, - RuntimeEnvState, -] - - -for state in AVAILABLE_STATES: - if len(state.filterable_columns()) > 0: - filterable_cols = "\n\n ".join(state.filterable_columns()) - state.__doc__ += f""" -\nBelow columns can be used for the `--filter` option. -\n - {filterable_cols} -\n -""" - - if len(state.detail_columns()) > 0: - detail_cols = "\n\n ".join(state.detail_columns()) - state.__doc__ += f""" -\nBelow columns are available only when `get` API is used, -\n`--detail` is specified through CLI, or `detail=True` is given to Python APIs. -\n -\n - {detail_cols} -\n -""" - - -@dataclass(init=True) -class ListApiResponse: - # NOTE(rickyyx): We currently perform hard truncation when querying - # resources which could have a large number (e.g. asking raylets for - # the number of all objects). - # The returned of resources seen by the user will go through from the - # below funnel: - # - total - # | With truncation at the data source if the number of returned - # | resource exceeds `RAY_MAX_LIMIT_FROM_DATA_SOURCE` - # v - # - num_after_truncation - # | With filtering at the state API server - # v - # - num_filtered - # | With limiting, - # | set by min(`RAY_MAX_LIMIT_FROM_API_SERER`, ) - # v - # - len(result) - - # Total number of the available resource from the cluster. - total: int - # Number of resources returned by data sources after truncation - num_after_truncation: int - # Number of resources after filtering - num_filtered: int - # Returned data. None if no data is returned. - result: List[Dict] - # List API can have a partial failure if queries to - # all sources fail. For example, getting object states - # require to ping all raylets, and it is possible some of - # them fails. Note that it is impossible to guarantee high - # availability of data because ray's state information is - # not replicated. - partial_failure_warning: Optional[str] = "" - # A list of warnings to print. - warnings: Optional[List[str]] = None - - -""" -Summary API schema -""" - -DRIVER_TASK_ID_PREFIX = "ffffffffffffffffffffffffffffffffffffffff" - - -@dataclass(init=True) -class TaskSummaryPerFuncOrClassName: - #: The function or class name of this task. - func_or_class_name: str - #: The type of the class. Equivalent to protobuf TaskType. - type: str - #: State name to the count dict. State name is equivalent to - #: the protobuf TaskStatus. - state_counts: Dict[TypeTaskStatus, int] = field(default_factory=dict) - - -@dataclass -class Link: - #: The type of entity to link to - type: str - #: The id of the entity to link to - id: str - - -@dataclass(init=True) -class NestedTaskSummary: - #: The name of this task group - name: str - #: A unique identifier for this group - key: str - #: The type of the class. Equivalent to protobuf TaskType, - #: "ACTOR" if it represents an Actor, or "GROUP" if it's a grouping of tasks. - type: str - #: Unix timestamp to use to sort the task group. - timestamp: Optional[int] = None - #: State name to the count dict. State name is equivalent to - #: the protobuf TaskStatus. - state_counts: Dict[TypeTaskStatus, int] = field(default_factory=dict) - #: The child - children: List["NestedTaskSummary"] = field(default_factory=list) - #: A link to more details about this summary. - link: Optional[Link] = None - - -@dataclass -class TaskSummaries: - #: Group key -> summary. - #: Right now, we only have func_class_name as a key. - # TODO(sang): Support the task group abstraction. - summary: Union[Dict[str, TaskSummaryPerFuncOrClassName], List[NestedTaskSummary]] - #: Total Ray tasks. - total_tasks: int - #: Total actor tasks. - total_actor_tasks: int - #: Total scheduled actors. - total_actor_scheduled: int - summary_by: str = "func_name" - - @classmethod - def to_summary_by_func_name(cls, *, tasks: List[Dict]) -> "TaskSummaries": - # NOTE: The argument tasks contains a list of dictionary - # that have the same k/v as TaskState. - summary = {} - total_tasks = 0 - total_actor_tasks = 0 - total_actor_scheduled = 0 - - for task in tasks: - key = task["func_or_class_name"] - if key not in summary: - summary[key] = TaskSummaryPerFuncOrClassName( - func_or_class_name=task["func_or_class_name"], - type=task["type"], - ) - task_summary = summary[key] - - state = task["state"] - if state not in task_summary.state_counts: - task_summary.state_counts[state] = 0 - task_summary.state_counts[state] += 1 - - type_enum = TaskType.DESCRIPTOR.values_by_name[task["type"]].number - if type_enum == TaskType.NORMAL_TASK: - total_tasks += 1 - elif type_enum == TaskType.ACTOR_CREATION_TASK: - total_actor_scheduled += 1 - elif type_enum == TaskType.ACTOR_TASK: - total_actor_tasks += 1 - - return TaskSummaries( - summary=summary, - total_tasks=total_tasks, - total_actor_tasks=total_actor_tasks, - total_actor_scheduled=total_actor_scheduled, - summary_by="func_name", - ) - - @classmethod - def to_summary_by_lineage( - cls, *, tasks: List[Dict], actors: List[Dict] - ) -> "TaskSummaries": - """ - This summarizes tasks by lineage. - i.e. A task will be grouped with another task if they have the - same parent. - - This does things in 4 steps. - Step 1: Iterate through all tasks and keep track of them by id and ownership - Step 2: Put the tasks in a tree structure based on ownership - Step 3: Merge together siblings in the tree if there are more - than one with the same name. - Step 4: Total the children - - This can probably be more efficient if we merge together some steps to - reduce the amount of iterations but this algorithm produces very easy to - understand code. We can optimize in the future. - """ - # NOTE: The argument tasks contains a list of dictionary - # that have the same k/v as TaskState. - - tasks_by_id = {} - task_group_by_id = {} - actor_creation_task_id_for_actor_id = {} - summary = [] - total_tasks = 0 - total_actor_tasks = 0 - total_actor_scheduled = 0 - - # Step 1 - # We cannot assume that a parent task always comes before the child task - # So we need to keep track of all tasks by ids so we can quickly find the - # parent. - # We also track the actor creation tasks so we can quickly figure out the - # ownership of actors. - for task in tasks: - tasks_by_id[task["task_id"]] = task - type_enum = TaskType.DESCRIPTOR.values_by_name[task["type"]].number - if type_enum == TaskType.ACTOR_CREATION_TASK: - actor_creation_task_id_for_actor_id[task["actor_id"]] = task["task_id"] - - actor_dict = {actor["actor_id"]: actor for actor in actors} - - def get_or_create_task_group(task_id: str) -> Optional[NestedTaskSummary]: - """ - Gets an already created task_group - OR - Creates a task group and puts it in the right place under its parent. - For actor tasks, the parent is the Actor that owns it. For all other - tasks, the owner is the driver or task that created it. - - Returns None if there is missing data about the task or one of its parents. - - For task groups that represents actors, the id is in the - format actor:{actor_id} - """ - if task_id in task_group_by_id: - return task_group_by_id[task_id] - - task = tasks_by_id.get(task_id) - if not task: - logger.debug(f"We're missing data about {task_id}") - # We're missing data about this parent. So we're dropping the whole - # tree at that node. - return None - - # Use name first which allows users to customize the name of - # their remote function call using the name option. - func_name = task["name"] or task["func_or_class_name"] - task_id = task["task_id"] - type_enum = TaskType.DESCRIPTOR.values_by_name[task["type"]].number - - task_group_by_id[task_id] = NestedTaskSummary( - name=func_name, - key=task_id, - type=task["type"], - timestamp=task["creation_time_ms"], - link=Link(type="task", id=task_id), - ) - - # Set summary in right place under parent - if ( - type_enum == TaskType.ACTOR_TASK - or type_enum == TaskType.ACTOR_CREATION_TASK - ): - # For actor tasks, the parent is the actor and not the parent task. - parent_task_group = get_or_create_actor_task_group(task["actor_id"]) - if parent_task_group: - parent_task_group.children.append(task_group_by_id[task_id]) - else: - parent_task_id = task["parent_task_id"] - if not parent_task_id or parent_task_id.startswith( - DRIVER_TASK_ID_PREFIX - ): - summary.append(task_group_by_id[task_id]) - else: - parent_task_group = get_or_create_task_group(parent_task_id) - if parent_task_group: - parent_task_group.children.append(task_group_by_id[task_id]) - - return task_group_by_id[task_id] - - def get_or_create_actor_task_group( - actor_id: str, - ) -> Optional[NestedTaskSummary]: - """ - Gets an existing task group that represents an actor. - OR - Creates a task group that represents an actor. The owner of the actor is - the parent of the creation_task that created that actor. - - Returns None if there is missing data about the actor or one of its parents. - """ - key = f"actor:{actor_id}" - actor = actor_dict.get(actor_id) - if key not in task_group_by_id: - creation_task_id = actor_creation_task_id_for_actor_id.get(actor_id) - creation_task = tasks_by_id.get(creation_task_id) - - if not creation_task: - logger.debug(f"We're missing data about actor {actor_id}") - # We're missing data about the parent. So we're dropping the whole - # tree at that node. - return None - - # TODO(rickyx) - # We are using repr name for grouping actors if exists, - # else use class name. We should be using some group_name in the future. - if actor is None: - logger.debug( - f"We are missing actor info for actor {actor_id}, " - f"even though creation task exists: {creation_task}" - ) - [actor_name, *rest] = creation_task["func_or_class_name"].split(".") - else: - actor_name = ( - actor["repr_name"] - if actor["repr_name"] - else actor["class_name"] - ) - - task_group_by_id[key] = NestedTaskSummary( - name=actor_name, - key=key, - type="ACTOR", - timestamp=task["creation_time_ms"], - link=Link(type="actor", id=actor_id), - ) - - parent_task_id = creation_task["parent_task_id"] - if not parent_task_id or parent_task_id.startswith( - DRIVER_TASK_ID_PREFIX - ): - summary.append(task_group_by_id[key]) - else: - parent_task_group = get_or_create_task_group(parent_task_id) - if parent_task_group: - parent_task_group.children.append(task_group_by_id[key]) - - return task_group_by_id[key] - - # Step 2: Create the tree structure based on ownership - for task in tasks: - task_id = task["task_id"] - - task_group = get_or_create_task_group(task_id) - - if not task_group: - # We are probably missing data about this task or one of its parents. - continue - - state = task["state"] - if state not in task_group.state_counts: - task_group.state_counts[state] = 0 - task_group.state_counts[state] += 1 - - type_enum = TaskType.DESCRIPTOR.values_by_name[task["type"]].number - if type_enum == TaskType.NORMAL_TASK: - total_tasks += 1 - elif type_enum == TaskType.ACTOR_CREATION_TASK: - total_actor_scheduled += 1 - elif type_enum == TaskType.ACTOR_TASK: - total_actor_tasks += 1 - - def merge_sibings_for_task_group( - siblings: List[NestedTaskSummary], - ) -> Tuple[List[NestedTaskSummary], Optional[int]]: - """ - Merges task summaries with the same name into a group if there are more than - one child with that name. - - Args: - siblings: A list of NestedTaskSummary's to merge together - - Returns - Index 0: A list of NestedTaskSummary's which have been merged - Index 1: The smallest timestamp amongst the siblings - """ - if not len(siblings): - return siblings, None - - # Group by name - groups = {} - min_timestamp = None - - for child in siblings: - child.children, child_min_timestamp = merge_sibings_for_task_group( - child.children - ) - if child_min_timestamp and child_min_timestamp < ( - child.timestamp or sys.maxsize - ): - child.timestamp = child_min_timestamp - - if child.name not in groups: - groups[child.name] = NestedTaskSummary( - name=child.name, - key=child.name, - type="GROUP", - ) - groups[child.name].children.append(child) - if child.timestamp and child.timestamp < ( - groups[child.name].timestamp or sys.maxsize - ): - groups[child.name].timestamp = child.timestamp - if child.timestamp < (min_timestamp or sys.maxsize): - min_timestamp = child.timestamp - - # Take the groups that have more than one children and return it. - # For groups with just one child, return the child itself instead of - # creating a group. - return [ - group if len(group.children) > 1 else group.children[0] - for group in groups.values() - ], min_timestamp - - # Step 3 - summary, _ = merge_sibings_for_task_group(summary) - - def sort_task_groups(task_groups: List[NestedTaskSummary]) -> None: - # Sort by timestamp - # Put actor creation tasks above other tasks with the same timestamp - task_groups.sort(key=lambda x: 0 if x.type == "ACTOR_CREATION_TASK" else 1) - task_groups.sort(key=lambda x: x.timestamp or sys.maxsize) - - def calc_total_for_task_group( - task_group: NestedTaskSummary, - ) -> NestedTaskSummary: - """ - Calculates the total of a group as the sum of all children. - Sorts children by timestamp - """ - if not len(task_group.children): - return task_group - - for child in task_group.children: - totaled = calc_total_for_task_group(child) - - for state, count in totaled.state_counts.items(): - task_group.state_counts[state] = ( - task_group.state_counts.get(state, 0) + count - ) - - sort_task_groups(task_group.children) - - return task_group - - # Step 4 - summary = [calc_total_for_task_group(task_group) for task_group in summary] - sort_task_groups(summary) - - return TaskSummaries( - summary=summary, - total_tasks=total_tasks, - total_actor_tasks=total_actor_tasks, - total_actor_scheduled=total_actor_scheduled, - summary_by="lineage", - ) - - -@dataclass(init=True) -class ActorSummaryPerClass: - #: The class name of the actor. - class_name: str - #: State name to the count dict. State name is equivalent to - #: the protobuf ActorState. - state_counts: Dict[TypeActorStatus, int] = field(default_factory=dict) - - -@dataclass -class ActorSummaries: - #: Group key (actor class name) -> summary - summary: Dict[str, ActorSummaryPerClass] - #: Total number of actors - total_actors: int - summary_by: str = "class" - - @classmethod - def to_summary(cls, *, actors: List[Dict]): - # NOTE: The argument tasks contains a list of dictionary - # that have the same k/v as ActorState. - summary = {} - total_actors = 0 - - for actor in actors: - key = actor["class_name"] - if key not in summary: - summary[key] = ActorSummaryPerClass( - class_name=actor["class_name"], - ) - actor_summary = summary[key] - - state = actor["state"] - if state not in actor_summary.state_counts: - actor_summary.state_counts[state] = 0 - actor_summary.state_counts[state] += 1 - - total_actors += 1 - - return ActorSummaries( - summary=summary, - total_actors=total_actors, - ) - - -@dataclass(init=True) -class ObjectSummaryPerKey: - #: Total number of objects of the type. - total_objects: int - #: Total size in mb. - total_size_mb: float - #: Total number of workers that reference the type of objects. - total_num_workers: int - #: Total number of nodes that reference the type of objects. - total_num_nodes: int - #: State name to the count dict. State name is equivalent to - #: ObjectState. - task_state_counts: Dict[TypeTaskStatus, int] = field(default_factory=dict) - #: Ref count type to the count dict. State name is equivalent to - #: ObjectState. - ref_type_counts: Dict[TypeReferenceType, int] = field(default_factory=dict) - - -@dataclass -class ObjectSummaries: - #: Group key (actor class name) -> summary - summary: Dict[str, ObjectSummaryPerKey] - #: Total number of referenced objects in the cluster. - total_objects: int - #: Total size of referenced objects in the cluster in MB. - total_size_mb: float - #: Whether or not the callsite collection is enabled. - callsite_enabled: bool - summary_by: str = "callsite" - - @classmethod - def to_summary(cls, *, objects: List[Dict]): - # NOTE: The argument tasks contains a list of dictionary - # that have the same k/v as ObjectState. - summary = {} - total_objects = 0 - total_size_mb = 0 - key_to_workers = {} - key_to_nodes = {} - callsite_enabled = True - - for object in objects: - key = object["call_site"] - if key == "disabled": - callsite_enabled = False - if key not in summary: - summary[key] = ObjectSummaryPerKey( - total_objects=0, - total_size_mb=0, - total_num_workers=0, - total_num_nodes=0, - ) - key_to_workers[key] = set() - key_to_nodes[key] = set() - - object_summary = summary[key] - - task_state = object["task_status"] - if task_state not in object_summary.task_state_counts: - object_summary.task_state_counts[task_state] = 0 - object_summary.task_state_counts[task_state] += 1 - - ref_type = object["reference_type"] - if ref_type not in object_summary.ref_type_counts: - object_summary.ref_type_counts[ref_type] = 0 - object_summary.ref_type_counts[ref_type] += 1 - object_summary.total_objects += 1 - total_objects += 1 - - size_bytes = object["object_size"] - # object_size's unit is byte by default. It is -1, if the size is - # unknown. - if size_bytes != -1: - object_summary.total_size_mb += size_bytes / 1024**2 - total_size_mb += size_bytes / 1024**2 - - key_to_workers[key].add(object["pid"]) - key_to_nodes[key].add(object["ip"]) - - # Convert set of pid & node ips to length. - for key, workers in key_to_workers.items(): - summary[key].total_num_workers = len(workers) - for key, nodes in key_to_nodes.items(): - summary[key].total_num_nodes = len(nodes) - - return ObjectSummaries( - summary=summary, - total_objects=total_objects, - total_size_mb=total_size_mb, - callsite_enabled=callsite_enabled, - ) - - -@dataclass(init=True) -class StateSummary: - #: Node ID -> summary per node - #: If the data is not required to be orgnized per node, it will contain - #: a single key, "cluster". - node_id_to_summary: Dict[str, Union[TaskSummaries, ActorSummaries, ObjectSummaries]] - - -@dataclass(init=True) -class SummaryApiResponse: - # Carried over from ListApiResponse - # We currently use list API for listing the resources - total: int - # Carried over from ListApiResponse - # Number of resources returned by data sources after truncation - num_after_truncation: int - # Number of resources after filtering - num_filtered: int - result: StateSummary = None - partial_failure_warning: Optional[str] = "" - # A list of warnings to print. - warnings: Optional[List[str]] = None - - -def resource_to_schema(resource: StateResource) -> StateSchema: - if resource == StateResource.ACTORS: - return ActorState - elif resource == StateResource.JOBS: - return JobState - elif resource == StateResource.NODES: - return NodeState - elif resource == StateResource.OBJECTS: - return ObjectState - elif resource == StateResource.PLACEMENT_GROUPS: - return PlacementGroupState - elif resource == StateResource.RUNTIME_ENVS: - return RuntimeEnvState - elif resource == StateResource.TASKS: - return TaskState - elif resource == StateResource.WORKERS: - return WorkerState - elif resource == StateResource.CLUSTER_EVENTS: - return ClusterEventState - else: - assert False, "Unreachable" - - -def protobuf_message_to_dict( - message, - fields_to_decode: List[str], - preserving_proto_field_name: bool = True, -) -> dict: - """Convert a protobuf message to dict - - Args: - fields_to_decode: field names which will be decoded from binary to hex. - preserving_proto_field_name: a pass-through option for protobuf message - method. See google.protobuf MessageToDict - - Return: - Dictionary of the converted rpc protobuf. - """ - return dashboard_utils.message_to_dict( - message, - fields_to_decode, - including_default_value_fields=True, - preserving_proto_field_name=preserving_proto_field_name, - ) - - -def protobuf_to_task_state_dict(message: TaskEvents) -> dict: - """ - Convert a TaskEvents to a dic repr of `TaskState` - """ - task_attempt = protobuf_message_to_dict( - message=message, - fields_to_decode=[ - "task_id", - "job_id", - "node_id", - "actor_id", - "parent_task_id", - "worker_id", - "placement_group_id", - "component_id", - ], - ) - - task_state = {} - task_info = task_attempt.get("task_info", {}) - state_updates = task_attempt.get("state_updates", {}) - profiling_data = task_attempt.get("profile_events", {}) - if profiling_data: - for event in profiling_data["events"]: - # End/start times are recorded in ns. We convert them to ms. - event["end_time"] = int(event["end_time"]) / 1e6 - event["start_time"] = int(event["start_time"]) / 1e6 - event["extra_data"] = json.loads(event["extra_data"]) - task_state["profiling_data"] = profiling_data - - # Convert those settable fields - mappings = [ - ( - task_info, - [ - "task_id", - "name", - "actor_id", - "type", - "func_or_class_name", - "language", - "required_resources", - "runtime_env_info", - "parent_task_id", - "placement_group_id", - ], - ), - (task_attempt, ["task_id", "attempt_number", "job_id"]), - ( - state_updates, - ["node_id", "worker_id", "task_log_info", "actor_repr_name"], - ), - ] - for src, keys in mappings: - for key in keys: - task_state[key] = src.get(key) - - task_state["creation_time_ms"] = None - task_state["start_time_ms"] = None - task_state["end_time_ms"] = None - events = [] - - for state in TaskStatus.keys(): - key = f"{state.lower()}_ts" - if key in state_updates: - # timestamp is recorded as nanosecond from the backend. - # We need to convert it to the second. - ts_ms = int(state_updates[key]) // 1e6 - events.append( - { - "state": state, - "created_ms": ts_ms, - } - ) - if state == "PENDING_ARGS_AVAIL": - task_state["creation_time_ms"] = ts_ms - if state == "RUNNING": - task_state["start_time_ms"] = ts_ms - if state == "FINISHED" or state == "FAILED": - task_state["end_time_ms"] = ts_ms - - task_state["events"] = events - if len(events) > 0: - latest_state = events[-1]["state"] - else: - latest_state = "NIL" - task_state["state"] = latest_state - - # Parse error info - if latest_state == "FAILED": - error_info = state_updates.get("error_info", None) - if error_info: - # We captured colored error message printed to console, e.g. - # "\x1b[31mTraceback (most recent call last):\x1b[0m", - # this is to remove the ANSI escape codes. - task_state["error_message"] = remove_ansi_escape_codes( - error_info.get("error_message", "") - ) - task_state["error_type"] = error_info.get("error_type", "") - - # Parse actor task name for actor with repr name. - if ( - state_updates.get("actor_repr_name") - and task_state["type"] == "ACTOR_TASK" - and task_state["name"] - == task_state["func_or_class_name"] # no name option provided. - ): - # If it's an actor task with no name override, and has repr name defined - # for the actor, we override the name. - method_name = task_state["name"].split(".")[-1] - actor_repr_task_name = f"{state_updates['actor_repr_name']}.{method_name}" - task_state["name"] = actor_repr_task_name - - return task_state - - -def remove_ansi_escape_codes(text: str) -> str: - """Remove ANSI escape codes from a string.""" - import re - - return re.sub(r"\x1b[^m]*m", "", text) - - -def dict_to_state(d: Dict, state_schema: StateSchema) -> StateSchema: - """Convert a dict to a state schema. - - Args: - d: a dict to convert. - state_schema: a schema to convert to. - - Returns: - A state schema. - """ - try: - return resource_to_schema(state_schema)(**d) - except Exception as e: - raise RayStateApiException(f"Failed to convert {d} to StateSchema: {e}") from e +record_deprecated_state_api_import() diff --git a/python/ray/experimental/state/custom_types.py b/python/ray/experimental/state/custom_types.py index 5f3535a27446..f5576beaeaa1 100644 --- a/python/ray/experimental/state/custom_types.py +++ b/python/ray/experimental/state/custom_types.py @@ -1,100 +1,4 @@ -import sys +from ray.util.state.custom_types import * # noqa: F401 F403 +from ray.util.state.util import record_deprecated_state_api_import -from ray.core.generated.common_pb2 import ( - TaskStatus, - TaskType, - WorkerExitType, - WorkerType, -) -from ray.core.generated.gcs_pb2 import ( - ActorTableData, - GcsNodeInfo, - PlacementGroupTableData, -) -from ray.dashboard.memory_utils import ReferenceType - -if sys.version_info >= (3, 8): - from typing import Literal -else: - from typing_extensions import Literal - - -ACTOR_STATUS = [ - "DEPENDENCIES_UNREADY", - "PENDING_CREATION", - "ALIVE", - "RESTARTING", - "DEAD", -] -TypeActorStatus = Literal[tuple(ACTOR_STATUS)] -PLACEMENT_GROUP_STATUS = [ - "PENDING", - "CREATED", - "REMOVED", - "RESCHEDULING", -] -TypePlacementGroupStatus = Literal[tuple(PLACEMENT_GROUP_STATUS)] -TASK_STATUS = [ - "NIL", - "PENDING_ARGS_AVAIL", - "PENDING_NODE_ASSIGNMENT", - "PENDING_OBJ_STORE_MEM_AVAIL", - "PENDING_ARGS_FETCH", - "SUBMITTED_TO_WORKER", - "RUNNING", - "RUNNING_IN_RAY_GET", - "RUNNING_IN_RAY_WAIT", - "FINISHED", - "FAILED", -] -TypeTaskStatus = Literal[tuple(TASK_STATUS)] -NODE_STATUS = ["ALIVE", "DEAD"] -TypeNodeStatus = Literal[tuple(NODE_STATUS)] -WORKER_TYPE = [ - "WORKER", - "DRIVER", - "SPILL_WORKER", - "RESTORE_WORKER", -] -TypeWorkerType = Literal[tuple(WORKER_TYPE)] -WORKER_EXIT_TYPE = [ - "SYSTEM_ERROR", - "INTENDED_SYSTEM_EXIT", - "USER_ERROR", - "INTENDED_USER_EXIT", - "NODE_OUT_OF_MEMORY", -] -TypeWorkerExitType = Literal[tuple(WORKER_EXIT_TYPE)] -TASK_TYPE = [ - "NORMAL_TASK", - "ACTOR_CREATION_TASK", - "ACTOR_TASK", - "DRIVER_TASK", -] -TypeTaskType = Literal[tuple(TASK_TYPE)] -TypeReferenceType = Literal[ - tuple(reference_type.value for reference_type in ReferenceType) -] - - -def validate_protobuf_enum(grpc_enum, custom_enum): - """Validate the literal contains the correct enum values from protobuf""" - enum_vals = set(grpc_enum.DESCRIPTOR.values_by_name) - # Sometimes, the grpc enum is mocked, and it - # doesn't include any values in that case. - if len(enum_vals) > 0: - assert enum_vals == set(custom_enum) - - -# Do the enum validation here. -# It is necessary to avoid regression. Alternatively, we can auto generate this -# directly by protobuf. -validate_protobuf_enum(ActorTableData.ActorState, ACTOR_STATUS) -validate_protobuf_enum( - PlacementGroupTableData.PlacementGroupState, PLACEMENT_GROUP_STATUS -) -validate_protobuf_enum(TaskStatus, TASK_STATUS) -validate_protobuf_enum(GcsNodeInfo.GcsNodeState, NODE_STATUS) -validate_protobuf_enum(WorkerType, WORKER_TYPE) -validate_protobuf_enum(WorkerExitType, WORKER_EXIT_TYPE) -validate_protobuf_enum(TaskType, TASK_TYPE) +record_deprecated_state_api_import() diff --git a/python/ray/experimental/state/exception.py b/python/ray/experimental/state/exception.py index 43156d28b5de..49e7099cf325 100644 --- a/python/ray/experimental/state/exception.py +++ b/python/ray/experimental/state/exception.py @@ -1,31 +1,4 @@ -"""Internal Error""" +from ray.util.state.exception import * # noqa: F401 F403 +from ray.util.state.util import record_deprecated_state_api_import - -STATE_OBS_ALPHA_FEEDBACK_MSG = [ - "\n==========ALPHA, FEEDBACK NEEDED ===============", - "State Observability APIs is currently in Alpha. ", - "If you have any feedback, you could do so at either way as below:", - " 1. Report bugs/issues with details: https://forms.gle/gh77mwjEskjhN8G46", - " 2. Follow up in #ray-state-observability-dogfooding slack channel of Ray: " - "https://tinyurl.com/2pm26m4a", - "==========================================================", -] - - -class DataSourceUnavailable(Exception): - pass - - -"""User-facing Error""" - - -class RayStateApiException(Exception): - def __init__(self, err_msg, *args): - err_msg += "\n".join(STATE_OBS_ALPHA_FEEDBACK_MSG) - super().__init__(err_msg, *args) - - -class ServerUnavailable(RayStateApiException): - """Thrown when failing to connect to dashboard server""" - - pass +record_deprecated_state_api_import() diff --git a/python/ray/experimental/state/state_cli.py b/python/ray/experimental/state/state_cli.py index e21337b1a83d..58bc5d31ebdc 100644 --- a/python/ray/experimental/state/state_cli.py +++ b/python/ray/experimental/state/state_cli.py @@ -1,1308 +1,4 @@ -import json -import logging -from datetime import datetime -from enum import Enum, unique -from typing import Dict, List, Optional, Tuple +from ray.util.state.state_cli import * # noqa: F401 F403 +from ray.util.state.util import record_deprecated_state_api_import -import click -import yaml - -import ray._private.services as services -from ray._private.thirdparty.tabulate.tabulate import tabulate -from ray.experimental.state.api import ( - StateApiClient, - get_log, - list_logs, - summarize_actors, - summarize_objects, - summarize_tasks, -) -from ray.experimental.state.common import ( - DEFAULT_LIMIT, - DEFAULT_LOG_LIMIT, - DEFAULT_RPC_TIMEOUT, - GetApiOptions, - ListApiOptions, - PredicateType, - StateResource, - StateSchema, - SupportedFilterType, - resource_to_schema, -) -from ray.experimental.state.exception import RayStateApiException -from ray.util.annotations import PublicAPI - -logger = logging.getLogger(__name__) - - -@unique -class AvailableFormat(Enum): - DEFAULT = "default" - JSON = "json" - YAML = "yaml" - TABLE = "table" - - -def _parse_filter(filter: str) -> Tuple[str, PredicateType, SupportedFilterType]: - """Parse the filter string to a tuple of key, preciate, and value.""" - # The function assumes there's going to be no key that includes "="" or "!=". - # Since key is controlled by us, it should be trivial to keep the invariant. - predicate = None - # Tuple of [predicate_start, predicate_end). - predicate_index = None - - # Find the first predicate match. This logic works because we assume the - # key doesn't contain = or !=. - for i in range(len(filter)): - char = filter[i] - if char == "=": - predicate = "=" - predicate_index = (i, i + 1) - break - elif char == "!": - if len(filter) <= i + 1: - continue - - next_char = filter[i + 1] - if next_char == "=": - predicate = "!=" - predicate_index = (i, i + 2) - break - - if not predicate or not predicate_index: - raise ValueError( - f"The format of a given filter {filter} is invalid: " - "Cannot find the predicate. " - "Please provide key=val or key!=val format string." - ) - - key, predicate, value = ( - filter[: predicate_index[0]], - filter[predicate_index[0] : predicate_index[1]], - filter[predicate_index[1] :], - ) - - assert predicate == "=" or predicate == "!=" - if len(key) == 0 or len(value) == 0: - raise ValueError( - f"The format of a given filter {filter} is invalid: " - f"Cannot identify key {key} or value, {value}. " - "Please provide key=val or key!=val format string." - ) - - return (key, predicate, value) - - -def _get_available_formats() -> List[str]: - """Return the available formats in a list of string""" - return [format_enum.value for format_enum in AvailableFormat] - - -def _get_available_resources( - excluded: Optional[List[StateResource]] = None, -) -> List[str]: - """Return the available resources in a list of string - - Args: - excluded: List of resources that should be excluded - """ - # All resource names use '_' rather than '-'. But users options have '-' - return [ - e.value.replace("_", "-") - for e in StateResource - if excluded is None or e not in excluded - ] - - -def get_table_output(state_data: List, schema: StateSchema, detail: bool) -> str: - """Display the table output. - - The table headers are ordered as the order defined in the dataclass of - `StateSchema`. For example, - - @dataclass - class A(StateSchema): - a: str - b: str - c: str - - will create headers - A B C - ----- - - Args: - state_data: A list of state data. - schema: The schema for the corresponding resource. - - Returns: - The table formatted string. - """ - time = datetime.now() - header = "=" * 8 + f" List: {time} " + "=" * 8 - headers = [] - table = [] - cols = schema.list_columns(detail=detail) - for data in state_data: - for key, val in data.items(): - if isinstance(val, dict): - data[key] = yaml.dump(val, indent=2) - keys = set(data.keys()) - headers = [] - for col in cols: - if col in keys: - headers.append(col.upper()) - table.append([data[header.lower()] for header in headers]) - return f""" -{header} -Stats: ------------------------------- -Total: {len(state_data)} - -Table: ------------------------------- -{tabulate(table, headers=headers, showindex=True, tablefmt="plain", floatfmt=".3f")} -""" - - -def output_with_format( - state_data: List[Dict], - *, - schema: Optional[StateSchema], - format: AvailableFormat = AvailableFormat.DEFAULT, - detail: bool = False, -) -> str: - # humanify all input state data - if schema: - state_data = [schema.humanify(state) for state in state_data] - if format == AvailableFormat.DEFAULT: - return get_table_output(state_data, schema, detail) - if format == AvailableFormat.YAML: - return yaml.dump( - state_data, - indent=4, - explicit_start=True, - # We want to keep the defined ordering of the states, thus sort_keys=False - sort_keys=False, - ) - elif format == AvailableFormat.JSON: - return json.dumps(state_data) - elif format == AvailableFormat.TABLE: - return get_table_output(state_data, schema, detail) - else: - raise ValueError( - f"Unexpected format: {format}. " - f"Supported formatting: {_get_available_formats()}" - ) - - -def format_summary_output(state_data: Dict, *, resource: StateResource) -> str: - if len(state_data) == 0: - return "No resource in the cluster" - - # Parse the data. - cluster_data = state_data["cluster"] - summaries = cluster_data["summary"] - summary_by = cluster_data["summary_by"] - del cluster_data["summary_by"] - del cluster_data["summary"] - - cluster_info_table = yaml.dump(cluster_data, indent=2) - - # Create a table. - table = [] - headers = [] - for summary in summaries.values(): - # Convert dict to yaml for better formatting. - for key, val in summary.items(): - if isinstance(val, dict): - summary[key] = yaml.dump(val, indent=2) - - headers = sorted([key.upper() for key in summary.keys()]) - table.append([summary[header.lower()] for header in headers]) - - summary_table = tabulate( - table, headers=headers, showindex=True, tablefmt="plain", numalign="left" - ) - - time = datetime.now() - header = "=" * 8 + f" {resource.value.capitalize()} Summary: {time} " + "=" * 8 - return f""" -{header} -Stats: ------------------------------------- -{cluster_info_table} - -Table (group by {summary_by}): ------------------------------------- -{summary_table} -""" - - -def format_object_summary_output(state_data: Dict) -> str: - if len(state_data) == 0: - return "No resource in the cluster" - - # Parse the data. - cluster_data = state_data["cluster"] - summaries = cluster_data["summary"] - summary_by = cluster_data["summary_by"] - del cluster_data["summary_by"] - del cluster_data["summary"] - - cluster_info_table = yaml.dump(cluster_data, indent=2) - - # Create a table per callsite. - tables = [] - for callsite, summary in summaries.items(): - # Convert dict to yaml for better formatting. - for key, val in summary.items(): - if isinstance(val, dict): - summary[key] = yaml.dump(val, indent=2) - - table = [] - headers = sorted([key.upper() for key in summary.keys()]) - table.append([summary[header.lower()] for header in headers]) - table_for_callsite = tabulate( - table, headers=headers, showindex=True, numalign="left" - ) - - # Format callsite. | is a separator for ray callsite. - formatted_callsite = callsite.replace("|", "\n|") - tables.append(f"{formatted_callsite}\n{table_for_callsite}") - - time = datetime.now() - header = "=" * 8 + f" Object Summary: {time} " + "=" * 8 - table_string = "\n\n\n\n".join(tables) - return f""" -{header} -Stats: ------------------------------------- -{cluster_info_table} - -Table (group by {summary_by}) ------------------------------------- -{table_string} -""" - - -def format_get_api_output( - state_data: Optional[StateSchema], - id: str, - *, - schema: StateSchema, - format: AvailableFormat = AvailableFormat.YAML, -) -> str: - if not state_data or isinstance(state_data, list) and len(state_data) == 0: - return f"Resource with id={id} not found in the cluster." - - if not isinstance(state_data, list): - state_data = [state_data] - state_data = [state.asdict() for state in state_data] - - return output_with_format(state_data, schema=schema, format=format, detail=True) - - -def format_list_api_output( - state_data: List[StateSchema], - *, - schema: StateSchema, - format: AvailableFormat = AvailableFormat.DEFAULT, - detail: bool = False, -) -> str: - if len(state_data) == 0: - return "No resource in the cluster" - state_data = [state.asdict() for state in state_data] - return output_with_format(state_data, schema=schema, format=format, detail=detail) - - -def _should_explain(format: AvailableFormat) -> bool: - # If the format is json or yaml, it should not print stats because - # users don't want additional strings. - return format == AvailableFormat.DEFAULT or format == AvailableFormat.TABLE - - -""" -Common Options for State API commands -""" -timeout_option = click.option( - "--timeout", - default=DEFAULT_RPC_TIMEOUT, - help=f"Timeout in seconds for the API requests. Default is {DEFAULT_RPC_TIMEOUT}", -) -address_option = click.option( - "--address", - default=None, - help=( - "The address of Ray API server. If not provided, it will be configured " - "automatically from querying the GCS server." - ), -) - - -@click.command() -@click.argument( - "resource", - # NOTE(rickyyx): We are not allowing query job with id, and runtime envs - type=click.Choice( - _get_available_resources( - excluded=[StateResource.JOBS, StateResource.RUNTIME_ENVS] - ) - ), -) -@click.argument( - "id", - type=str, -) -@address_option -@timeout_option -@PublicAPI(stability="alpha") -def ray_get( - resource: str, - id: str, - address: Optional[str], - timeout: float, -): - """Get a state of a given resource by ID. - - We currently DO NOT support get by id for jobs and runtime-envs - - The output schema is defined at :ref:`State API Schema section. ` - - For example, the output schema of `ray get tasks ` is - :class:`~ray.experimental.state.common.TaskState`. - - Usage: - - Get an actor with actor id - - ``` - ray get actors - ``` - - Get a placement group information with - - ``` - ray get placement-groups - ``` - - The API queries one or more components from the cluster to obtain the data. - The returned state snapshot could be stale, and it is not guaranteed to return - the live data. - - Args: - resource: The type of the resource to query. - id: The id of the resource. - - Raises: - :class:`RayStateApiException ` - if the CLI is failed to query the data. - """ # noqa: E501 - # All resource names use '_' rather than '-'. But users options have '-' - resource = StateResource(resource.replace("-", "_")) - - # Create the State API server and put it into context - logger.debug(f"Create StateApiClient to ray instance at: {address}...") - client = StateApiClient(address=address) - options = GetApiOptions(timeout=timeout) - - # If errors occur, exceptions will be thrown. - try: - data = client.get( - resource=resource, - id=id, - options=options, - _explain=_should_explain(AvailableFormat.YAML), - ) - except RayStateApiException as e: - raise click.UsageError(str(e)) - - # Print data to console. - print( - format_get_api_output( - state_data=data, - id=id, - schema=resource_to_schema(resource), - format=AvailableFormat.YAML, - ) - ) - - -@click.command() -@click.argument( - "resource", - type=click.Choice(_get_available_resources()), -) -@click.option( - "--format", default="default", type=click.Choice(_get_available_formats()) -) -@click.option( - "-f", - "--filter", - help=( - "A key, predicate, and value to filter the result. " - "E.g., --filter 'key=value' or --filter 'key!=value'. " - "You can specify multiple --filter options. In this case all predicates " - "are concatenated as AND. For example, --filter key=value --filter key2=value " - "means (key==val) AND (key2==val2)" - ), - multiple=True, -) -@click.option( - "--limit", - default=DEFAULT_LIMIT, - type=int, - help=("Maximum number of entries to return. 100 by default."), -) -@click.option( - "--detail", - help=( - "If the flag is set, the output will contain data in more details. " - "Note that the API could query more sources " - "to obtain information in a greater detail." - ), - is_flag=True, - default=False, -) -@timeout_option -@address_option -@PublicAPI(stability="alpha") -def ray_list( - resource: str, - format: str, - filter: List[str], - limit: int, - detail: bool, - timeout: float, - address: str, -): - """List all states of a given resource. - - Normally, summary APIs are recommended before listing all resources. - - The output schema is defined at :ref:`State API Schema section. ` - - For example, the output schema of `ray list tasks` is - :class:`~ray.experimental.state.common.TaskState`. - - Usage: - - List all actor information from the cluster. - - ``` - ray list actors - ``` - - List 50 actors from the cluster. The sorting order cannot be controlled. - - ``` - ray list actors --limit 50 - ``` - - List 10 actors with state PENDING. - - ``` - ray list actors --limit 10 --filter "state=PENDING" - ``` - - List actors with yaml format. - - ``` - ray list actors --format yaml - ``` - - List actors with details. When --detail is specified, it might query - more data sources to obtain data in details. - - ``` - ray list actors --detail - ``` - - The API queries one or more components from the cluster to obtain the data. - The returned state snapshot could be stale, and it is not guaranteed to return - the live data. - - The API can return partial or missing output upon the following scenarios. - - - When the API queries more than 1 component, if some of them fail, - the API will return the partial result (with a suppressible warning). - - When the API returns too many entries, the API - will truncate the output. Currently, truncated data cannot be - selected by users. - - Args: - resource: The type of the resource to query. - - Raises: - :class:`RayStateApiException ` - if the CLI is failed to query the data. - """ # noqa: E501 - # All resource names use '_' rather than '-'. But users options have '-' - resource = StateResource(resource.replace("-", "_")) - format = AvailableFormat(format) - - # Create the State API server and put it into context - client = StateApiClient(address=address) - - filter = [_parse_filter(f) for f in filter] - - options = ListApiOptions( - limit=limit, - timeout=timeout, - filters=filter, - detail=detail, - ) - - # If errors occur, exceptions will be thrown. Empty data indicate successful query. - try: - data = client.list( - resource, - options=options, - raise_on_missing_output=False, - _explain=_should_explain(format), - ) - except RayStateApiException as e: - raise click.UsageError(str(e)) - - # If --detail is given, the default formatting is yaml. - if detail and format == AvailableFormat.DEFAULT: - format = AvailableFormat.YAML - - # Print data to console. - print( - format_list_api_output( - state_data=data, - schema=resource_to_schema(resource), - format=format, - detail=detail, - ) - ) - - -@click.group("summary") -@click.pass_context -@PublicAPI(stability="alpha") -def summary_state_cli_group(ctx): - """Return the summarized information of a given resource.""" - pass - - -@summary_state_cli_group.command(name="tasks") -@timeout_option -@address_option -@click.pass_context -@PublicAPI(stability="alpha") -def task_summary(ctx, timeout: float, address: str): - """Summarize the task state of the cluster. - - By default, the output contains the information grouped by - task function names. - - The output schema is - :class:`~ray.experimental.state.common.TaskSummaries`. - - Raises: - :class:`RayStateApiException ` - if the CLI is failed to query the data. - """ # noqa: E501 - print( - format_summary_output( - summarize_tasks( - address=address, - timeout=timeout, - raise_on_missing_output=False, - _explain=True, - ), - resource=StateResource.TASKS, - ) - ) - - -@summary_state_cli_group.command(name="actors") -@timeout_option -@address_option -@click.pass_context -@PublicAPI(stability="alpha") -def actor_summary(ctx, timeout: float, address: str): - """Summarize the actor state of the cluster. - - By default, the output contains the information grouped by - actor class names. - - The output schema is - :class:`ray.experimental.state.common.ActorSummaries - `. - - Raises: - :class:`RayStateApiException ` - if the CLI is failed to query the data. - """ # noqa: E501 - print( - format_summary_output( - summarize_actors( - address=address, - timeout=timeout, - raise_on_missing_output=False, - _explain=True, - ), - resource=StateResource.ACTORS, - ) - ) - - -@summary_state_cli_group.command(name="objects") -@timeout_option -@address_option -@click.pass_context -@PublicAPI(stability="alpha") -def object_summary(ctx, timeout: float, address: str): - """Summarize the object state of the cluster. - - The API is recommended when debugging memory leaks. - See :ref:`Debugging with Ray Memory ` for more details. - (Note that this command is almost equivalent to `ray memory`, but it returns - easier-to-understand output). - - By default, the output contains the information grouped by - object callsite. Note that the callsite is not collected and - all data will be aggregated as "disable" callsite if the env var - `RAY_record_ref_creation_sites` is not configured. To enable the - callsite collection, set the following environment variable when - starting Ray. - - Example: - - ``` - RAY_record_ref_creation_sites=1 ray start --head - ``` - - ``` - RAY_record_ref_creation_sites=1 ray_script.py - ``` - - The output schema is - :class:`ray.experimental.state.common.ObjectSummaries - `. - - Raises: - :class:`RayStateApiException ` - if the CLI is failed to query the data. - """ # noqa: E501 - print( - format_object_summary_output( - summarize_objects( - address=address, - timeout=timeout, - raise_on_missing_output=False, - _explain=True, - ), - ) - ) - - -log_follow_option = click.option( - "--follow", - "-f", - required=False, - type=bool, - is_flag=True, - help="Streams the log file as it is updated instead of just tailing.", -) - -log_tail_option = click.option( - "--tail", - required=False, - type=int, - default=DEFAULT_LOG_LIMIT, - help="Number of lines to tail from log. Use -1 to fetch the whole file.", -) - -log_interval_option = click.option( - "--interval", - required=False, - type=float, - default=None, - help="The interval in secs to print new logs when `--follow` is specified.", - hidden=True, -) - -log_timeout_option = click.option( - "--timeout", - default=DEFAULT_RPC_TIMEOUT, - help=( - "Timeout in seconds for the API requests. " - f"Default is {DEFAULT_RPC_TIMEOUT}. If --follow is specified, " - "this option will be ignored." - ), -) - -log_node_ip_option = click.option( - "-ip", - "--node-ip", - required=False, - type=str, - default=None, - help="Filters the logs by this ip address", -) - -log_node_id_option = click.option( - "--node-id", - "-id", - required=False, - type=str, - default=None, - help="Filters the logs by this NodeID", -) - -log_suffix_option = click.option( - "--err", - is_flag=True, - default=False, - help=( - "If supplied, querying stderr files for workers/actors, " - "else defaults to stdout files." - ), -) - -log_encoding_option = click.option( - "--encoding", - required=False, - default="utf-8", - help=( - "The encoding use to decode the log file. Accepts any encoding " - "supported by Python's `codecs` module. Defaults to utf-8." - ), -) - -log_encoding_errors_option = click.option( - "--encoding-errors", - required=False, - default="strict", - help=( - "The error handling scheme to use for decoding errors. " - "Accepts any error handling scheme supported by Python's `codecs`" - "module. Defaults to strict." - ), -) - - -def _get_head_node_ip(address: Optional[str] = None): - """Get the head node ip from the ray address if possible - - Args: - address: ray cluster address, e.g. "auto", "localhost:6379" - - Raises: - click.UsageError if node ip could not be resolved - """ - try: - address = services.canonicalize_bootstrap_address_or_die(address) - return address.split(":")[0] - except (ConnectionError, ValueError) as e: - # Hide all the stack trace - raise click.UsageError(str(e)) - - -def _print_log( - address: Optional[str] = None, - node_id: Optional[str] = None, - node_ip: Optional[str] = None, - filename: Optional[str] = None, - actor_id: Optional[str] = None, - pid: Optional[int] = None, - follow: bool = False, - tail: int = DEFAULT_LOG_LIMIT, - timeout: int = DEFAULT_RPC_TIMEOUT, - interval: Optional[float] = None, - suffix: str = "out", - encoding: str = "utf-8", - encoding_errors: str = "strict", - task_id: Optional[str] = None, - attempt_number: int = 0, - submission_id: Optional[str] = None, -): - """Wrapper around `get_log()` that prints the preamble and the log lines""" - if tail > 0: - print( - f"--- Log has been truncated to last {tail} lines." - " Use `--tail` flag to toggle. Set to -1 for getting the entire file. ---\n" - ) - - if node_id is None and node_ip is None: - # Auto detect node ip from the ray address when address neither is given - node_ip = _get_head_node_ip(address) - - for chunk in get_log( - address=address, - node_id=node_id, - node_ip=node_ip, - filename=filename, - actor_id=actor_id, - tail=tail, - pid=pid, - follow=follow, - _interval=interval, - timeout=timeout, - suffix=suffix, - encoding=encoding, - errors=encoding_errors, - task_id=task_id, - attempt_number=attempt_number, - submission_id=submission_id, - ): - print(chunk, end="", flush=True) - - -LOG_CLI_HELP_MSG = """ -Get logs based on filename (cluster) or resource identifiers (actor) - -Example: - - Get all the log files available on a node (ray address could be - obtained from `ray start --head` or `ray.init()`). - - ``` - ray logs cluster - ``` - - [ray logs cluster] Print the last 500 lines of raylet.out on a head node. - - ``` - ray logs cluster raylet.out --tail 500 - ``` - - Or simply, using `ray logs` as an alias for `ray logs cluster`: - - ``` - ray logs raylet.out --tail 500 - ``` - - Print the last 500 lines of raylet.out on a worker node id A. - - ``` - ray logs raylet.out --tail 500 —-node-id A - ``` - - [ray logs actor] Follow the log file with an actor id ABC. - - ``` - ray logs actor --id ABC --follow - ``` - - [ray logs task] Get the std err generated by a task. - - ``` - ray logs task --id --err - ``` -""" - - -class LogCommandGroup(click.Group): - def resolve_command(self, ctx, args): - """Try resolve the command line args assuming users omitted the subcommand. - - This overrides the default `resolve_command` for the parent class. - This will allow command alias of `ray ` to `ray cluster `. - """ - ctx.resilient_parsing = True - res = super().resolve_command(ctx, args) - cmd_name, cmd, parsed_args = res - if cmd is None: - # It could have been `ray logs ...`, forward to `ray logs cluster ...` - return super().resolve_command(ctx, ["cluster"] + args) - return cmd_name, cmd, parsed_args - - -logs_state_cli_group = LogCommandGroup(help=LOG_CLI_HELP_MSG) - - -@logs_state_cli_group.command(name="cluster") -@click.argument( - "glob_filter", - required=False, - default="*", -) -@address_option -@log_node_id_option -@log_node_ip_option -@log_follow_option -@log_tail_option -@log_interval_option -@log_timeout_option -@log_encoding_option -@log_encoding_errors_option -@click.pass_context -@PublicAPI(stability="alpha") -def log_cluster( - ctx, - glob_filter: str, - address: Optional[str], - node_id: Optional[str], - node_ip: Optional[str], - follow: bool, - tail: int, - interval: float, - timeout: int, - encoding: str, - encoding_errors: str, -): - """Get/List logs that matches the GLOB_FILTER in the cluster. - By default, it prints a list of log files that match the filter. - By default, it prints the head node logs. - If there's only 1 match, it will print the log file. - - Example: - - Print the last 500 lines of raylet.out on a head node. - - ``` - ray logs [cluster] raylet.out --tail 500 - ``` - - Print the last 500 lines of raylet.out on a worker node id A. - - ``` - ray logs [cluster] raylet.out --tail 500 —-node-id A - ``` - - Download the gcs_server.txt file to the local machine. - - ``` - ray logs [cluster] gcs_server.out --tail -1 > gcs_server.txt - ``` - - Follow the log files from the last 100 lines. - - ``` - ray logs [cluster] raylet.out --tail 100 -f - ``` - - Raises: - :class:`RayStateApiException ` if the CLI - is failed to query the data. - """ # noqa: E501 - - if node_id is None and node_ip is None: - node_ip = _get_head_node_ip(address) - - logs = list_logs( - address=address, - node_id=node_id, - node_ip=node_ip, - glob_filter=glob_filter, - timeout=timeout, - ) - - log_files_found = [] - for _, log_files in logs.items(): - for log_file in log_files: - log_files_found.append(log_file) - - if len(log_files_found) != 1: - # Print the list of log files found if no unique log found - if node_id: - print(f"Node ID: {node_id}") - elif node_ip: - print(f"Node IP: {node_ip}") - print(output_with_format(logs, schema=None, format=AvailableFormat.YAML)) - return - - # If there's only 1 file, that means there's a unique match. - filename = log_files_found[0] - - _print_log( - address=address, - node_id=node_id, - node_ip=node_ip, - filename=filename, - tail=tail, - follow=follow, - interval=interval, - timeout=timeout, - encoding=encoding, - encoding_errors=encoding_errors, - ) - - -@logs_state_cli_group.command(name="actor") -@click.option( - "--id", - "-a", - required=False, - type=str, - default=None, - help="Retrieves the logs corresponding to this ActorID.", -) -@click.option( - "--pid", - "-pid", - required=False, - type=str, - default=None, - help="Retrieves the logs from the actor with this pid.", -) -@address_option -@log_node_id_option -@log_node_ip_option -@log_follow_option -@log_tail_option -@log_interval_option -@log_timeout_option -@log_suffix_option -@click.pass_context -@PublicAPI(stability="alpha") -def log_actor( - ctx, - id: Optional[str], - pid: Optional[str], - address: Optional[str], - node_id: Optional[str], - node_ip: Optional[str], - follow: bool, - tail: int, - interval: float, - timeout: int, - err: bool, -): - """Get/List logs associated with an actor. - - Example: - - Follow the log file with an actor id ABC. - - ``` - ray logs actor --id ABC --follow - ``` - - Get the actor log from pid 123, ip ABC. - Note that this goes well with the driver log of Ray which prints - (ip=ABC, pid=123, class_name) logs. - - ``` - ray logs actor --pid=123 —ip=ABC - ``` - - Get the actor err log file. - - ``` - ray logs actor --id ABC --err - ``` - - Raises: - :class:`RayStateApiException ` - if the CLI is failed to query the data. - MissingParameter if inputs are missing. - """ # noqa: E501 - - if pid is None and id is None: - raise click.MissingParameter( - message="At least one of `--pid` and `--id` has to be set", - param_type="option", - ) - - _print_log( - address=address, - node_id=node_id, - node_ip=node_ip, - pid=pid, - actor_id=id, - tail=tail, - follow=follow, - interval=interval, - timeout=timeout, - suffix="err" if err else "out", - ) - - -@logs_state_cli_group.command(name="worker") -@click.option( - "--pid", - "-pid", - # The only identifier supported for now, TODO(rickyx): add worker id support - required=True, - type=str, - help="Retrieves the logs from the worker with this pid.", -) -@address_option -@log_node_id_option -@log_node_ip_option -@log_follow_option -@log_tail_option -@log_interval_option -@log_timeout_option -@log_suffix_option -@click.pass_context -@PublicAPI(stability="alpha") -def log_worker( - ctx, - pid: Optional[str], - address: Optional[str], - node_id: Optional[str], - node_ip: Optional[str], - follow: bool, - tail: int, - interval: float, - timeout: int, - err: bool, -): - """Get logs associated with a worker process. - - Example: - - Follow the log file from a worker process with pid=ABC. - - ``` - ray logs worker --pid ABC --follow - ``` - - Get the stderr logs from a worker process. - - ``` - ray logs worker --pid ABC --err - ``` - - Raises: - :class:`RayStateApiException ` - if the CLI is failed to query the data. - MissingParameter if inputs are missing. - """ # noqa: E501 - - _print_log( - address=address, - node_id=node_id, - node_ip=node_ip, - pid=pid, - tail=tail, - follow=follow, - interval=interval, - timeout=timeout, - suffix="err" if err else "out", - ) - - -@logs_state_cli_group.command(name="job") -@click.option( - "--id", - "submission_id", - required=True, - type=str, - help=( - "Retrieves the logs from a submission job with submission id," - "i.e. raysubmit_XXX" - ), -) -@address_option -@log_follow_option -@log_tail_option -@log_interval_option -@log_timeout_option -@click.pass_context -@PublicAPI(stability="alpha") -def log_job( - ctx, - submission_id: Optional[str], - address: Optional[str], - follow: bool, - tail: int, - interval: float, - timeout: int, -): - """Get logs associated with a submission job. - - Example: - - Follow the log file from a submission job with submission id raysumbit_xxx. - - ``` - ray logs job --id raysubmit_xxx - ``` - - Follow the submission job log. - - ``` - ray logs jobs --id raysubmit_xxx --follow - - ``` - - Raises: - :class:`RayStateApiException ` - if the CLI is failed to query the data. - MissingParameter if inputs are missing. - """ # noqa: E501 - - _print_log( - address=address, - tail=tail, - follow=follow, - interval=interval, - timeout=timeout, - submission_id=submission_id, - ) - - -@logs_state_cli_group.command(name="task") -@click.option( - "--id", - "task_id", - required=True, - type=str, - help="Retrieves the logs from the task with this task id.", -) -@click.option( - "--attempt-number", - "-a", - required=False, - type=int, - default=0, - help="Retrieves the logs from the attempt, default to 0", -) -@address_option -@log_follow_option -@log_interval_option -@log_tail_option -@log_timeout_option -@log_suffix_option -@click.pass_context -@PublicAPI(stability="alpha") -def log_task( - ctx, - task_id: Optional[str], - attempt_number: int, - address: Optional[str], - follow: bool, - interval: float, - tail: int, - timeout: int, - err: bool, -): - """Get logs associated with a task. - - Example: - - Follow the log file from a task with task id. - - ``` - ray logs tasks --id --follow - ``` - - Get the log from a retry attempt 1 from a task. - - ``` - ray logs tasks --id -a 1 - ``` - - Raises: - :class:`RayStateApiException ` - if the CLI is failed to query the data. - MissingParameter if inputs are missing. - """ # noqa: E501 - - _print_log( - address=address, - task_id=task_id, - attempt_number=attempt_number, - follow=follow, - tail=tail, - interval=interval, - timeout=timeout, - suffix="err" if err else "out", - ) +record_deprecated_state_api_import() diff --git a/python/ray/experimental/state/state_manager.py b/python/ray/experimental/state/state_manager.py index 9afb3f75c360..7720606a68b9 100644 --- a/python/ray/experimental/state/state_manager.py +++ b/python/ray/experimental/state/state_manager.py @@ -1,457 +1,4 @@ -import dataclasses -import inspect -import logging -from collections import defaultdict -from functools import wraps -from typing import List, Optional, Tuple +from ray.util.state.state_manager import * # noqa: F401 F403 +from ray.util.state.util import record_deprecated_state_api_import -import grpc -from grpc.aio._call import UnaryStreamCall - -import ray -import ray.dashboard.modules.log.log_consts as log_consts -from ray._private import ray_constants -from ray._private.gcs_utils import GcsAioClient -from ray._private.utils import hex_to_binary -from ray._raylet import ActorID, JobID, TaskID -from ray.core.generated import gcs_service_pb2_grpc -from ray.core.generated.gcs_pb2 import ActorTableData -from ray.core.generated.gcs_service_pb2 import ( - GetAllActorInfoReply, - GetAllActorInfoRequest, - GetAllNodeInfoReply, - GetAllNodeInfoRequest, - GetAllPlacementGroupReply, - GetAllPlacementGroupRequest, - GetAllWorkerInfoReply, - GetAllWorkerInfoRequest, - GetTaskEventsReply, - GetTaskEventsRequest, -) -from ray.core.generated.node_manager_pb2 import ( - GetObjectsInfoReply, - GetObjectsInfoRequest, - GetTasksInfoReply, - GetTasksInfoRequest, -) -from ray.core.generated.node_manager_pb2_grpc import NodeManagerServiceStub -from ray.core.generated.reporter_pb2 import ( - ListLogsReply, - ListLogsRequest, - StreamLogRequest, -) -from ray.core.generated.reporter_pb2_grpc import LogServiceStub -from ray.core.generated.runtime_env_agent_pb2 import ( - GetRuntimeEnvsInfoReply, - GetRuntimeEnvsInfoRequest, -) -from ray.core.generated.runtime_env_agent_pb2_grpc import RuntimeEnvServiceStub -from ray.dashboard.datacenter import DataSource -from ray.dashboard.modules.job.common import JobInfoStorageClient -from ray.dashboard.modules.job.pydantic_models import JobDetails, JobType -from ray.dashboard.modules.job.utils import get_driver_jobs -from ray.dashboard.utils import Dict as Dictionary -from ray.experimental.state.common import ( - RAY_MAX_LIMIT_FROM_DATA_SOURCE, - PredicateType, - SupportedFilterType, -) -from ray.experimental.state.exception import DataSourceUnavailable - -logger = logging.getLogger(__name__) - -_STATE_MANAGER_GRPC_OPTIONS = [ - *ray_constants.GLOBAL_GRPC_OPTIONS, - ("grpc.max_send_message_length", ray_constants.GRPC_CPP_MAX_MESSAGE_SIZE), - ("grpc.max_receive_message_length", ray_constants.GRPC_CPP_MAX_MESSAGE_SIZE), -] - - -def handle_grpc_network_errors(func): - """Decorator to add a network handling logic. - - It is a helper method for `StateDataSourceClient`. - The method can only be used for async methods. - """ - assert inspect.iscoroutinefunction(func) - - @wraps(func) - async def api_with_network_error_handler(*args, **kwargs): - """Apply the network error handling logic to each APIs, - such as retry or exception policies. - - Returns: - If RPC succeeds, it returns what the original function returns. - If RPC fails, it raises exceptions. - Exceptions: - DataSourceUnavailable: if the source is unavailable because it is down - or there's a slow network issue causing timeout. - Otherwise, the raw network exceptions (e.g., gRPC) will be raised. - """ - try: - return await func(*args, **kwargs) - except grpc.aio.AioRpcError as e: - if ( - e.code() == grpc.StatusCode.DEADLINE_EXCEEDED - or e.code() == grpc.StatusCode.UNAVAILABLE - ): - raise DataSourceUnavailable( - "Failed to query the data source. " - "It is either there's a network issue, or the source is down." - ) - else: - logger.exception(e) - raise e - - return api_with_network_error_handler - - -class IdToIpMap: - def __init__(self): - # Node IP to node ID mapping. - self._ip_to_node_id = defaultdict(str) - # Node ID to node IP mapping. - self._node_id_to_ip = defaultdict(str) - - def put(self, node_id: str, address: str): - self._ip_to_node_id[address] = node_id - self._node_id_to_ip[node_id] = address - - def get_ip(self, node_id: str): - return self._node_id_to_ip.get(node_id) - - def get_node_id(self, address: str): - return self._ip_to_node_id.get(address) - - def pop(self, node_id: str): - """Pop the given node id. - - Returns: - False if the corresponding node id doesn't exist. - True if it pops correctly. - """ - ip = self._node_id_to_ip.get(node_id) - if not ip: - return None - assert ip in self._ip_to_node_id - self._node_id_to_ip.pop(node_id) - self._ip_to_node_id.pop(ip) - return True - - -class StateDataSourceClient: - """The client to query states from various data sources such as Raylet, GCS, Agents. - - Note that it doesn't directly query core workers. They are proxied through raylets. - - The module is not in charge of service discovery. The caller is responsible for - finding services and register stubs through `register*` APIs. - - Non `register*` APIs - - Return the protobuf directly if it succeeds to query the source. - - Raises an exception if there's any network issue. - - throw a ValueError if it cannot find the source. - """ - - def __init__(self, gcs_channel: grpc.aio.Channel, gcs_aio_client: GcsAioClient): - self.register_gcs_client(gcs_channel) - self._raylet_stubs = {} - self._runtime_env_agent_stub = {} - self._log_agent_stub = {} - self._job_client = JobInfoStorageClient(gcs_aio_client) - self._id_id_map = IdToIpMap() - self._gcs_aio_client = gcs_aio_client - - def register_gcs_client(self, gcs_channel: grpc.aio.Channel): - self._gcs_actor_info_stub = gcs_service_pb2_grpc.ActorInfoGcsServiceStub( - gcs_channel - ) - self._gcs_pg_info_stub = gcs_service_pb2_grpc.PlacementGroupInfoGcsServiceStub( - gcs_channel - ) - self._gcs_node_info_stub = gcs_service_pb2_grpc.NodeInfoGcsServiceStub( - gcs_channel - ) - self._gcs_worker_info_stub = gcs_service_pb2_grpc.WorkerInfoGcsServiceStub( - gcs_channel - ) - self._gcs_task_info_stub = gcs_service_pb2_grpc.TaskInfoGcsServiceStub( - gcs_channel - ) - - def register_raylet_client(self, node_id: str, address: str, port: int): - full_addr = f"{address}:{port}" - options = _STATE_MANAGER_GRPC_OPTIONS - channel = ray._private.utils.init_grpc_channel( - full_addr, options, asynchronous=True - ) - self._raylet_stubs[node_id] = NodeManagerServiceStub(channel) - self._id_id_map.put(node_id, address) - - def unregister_raylet_client(self, node_id: str): - self._raylet_stubs.pop(node_id) - self._id_id_map.pop(node_id) - - def register_agent_client(self, node_id, address: str, port: int): - options = _STATE_MANAGER_GRPC_OPTIONS - channel = ray._private.utils.init_grpc_channel( - f"{address}:{port}", options=options, asynchronous=True - ) - self._runtime_env_agent_stub[node_id] = RuntimeEnvServiceStub(channel) - self._log_agent_stub[node_id] = LogServiceStub(channel) - self._id_id_map.put(node_id, address) - - def unregister_agent_client(self, node_id: str): - self._runtime_env_agent_stub.pop(node_id) - self._log_agent_stub.pop(node_id) - self._id_id_map.pop(node_id) - - def get_all_registered_raylet_ids(self) -> List[str]: - return self._raylet_stubs.keys() - - def get_all_registered_agent_ids(self) -> List[str]: - assert len(self._log_agent_stub) == len(self._runtime_env_agent_stub) - return self._runtime_env_agent_stub.keys() - - def ip_to_node_id(self, ip: Optional[str]) -> Optional[str]: - """Return the node id that corresponds to the given ip. - - Args: - ip: The ip address. - - Returns: - None if the corresponding id doesn't exist. - Node id otherwise. If None node_ip is given, - it will also return None. - """ - if not ip: - return None - return self._id_id_map.get_node_id(ip) - - @handle_grpc_network_errors - async def get_all_actor_info( - self, - timeout: int = None, - limit: int = None, - filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, - ) -> Optional[GetAllActorInfoReply]: - if not limit: - limit = RAY_MAX_LIMIT_FROM_DATA_SOURCE - if filters is None: - filters = [] - - req_filters = GetAllActorInfoRequest.Filters() - for filter in filters: - key, predicate, value = filter - if predicate != "=": - # We only support EQUAL predicate for source side filtering. - continue - if key == "actor_id": - req_filters.actor_id = ActorID(hex_to_binary(value)).binary() - elif key == "state": - if value not in ActorTableData.ActorState.keys(): - raise ValueError(f"Invalid actor state for filtering: {value}") - req_filters.state = ActorTableData.ActorState.Value(value) - elif key == "job_id": - req_filters.job_id = JobID(hex_to_binary(value)).binary() - - request = GetAllActorInfoRequest(limit=limit, filters=req_filters) - reply = await self._gcs_actor_info_stub.GetAllActorInfo( - request, timeout=timeout - ) - return reply - - @handle_grpc_network_errors - async def get_all_task_info( - self, - timeout: int = None, - limit: int = None, - filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, - exclude_driver: bool = False, - ) -> Optional[GetTaskEventsReply]: - if not limit: - limit = RAY_MAX_LIMIT_FROM_DATA_SOURCE - - if filters is None: - filters = [] - - req_filters = GetTaskEventsRequest.Filters() - for filter in filters: - key, predicate, value = filter - if predicate != "=": - # We only support EQUAL predicate for source side filtering. - continue - - if key == "actor_id": - req_filters.actor_id = ActorID(hex_to_binary(value)).binary() - elif key == "job_id": - req_filters.job_id = JobID(hex_to_binary(value)).binary() - elif key == "name": - req_filters.name = value - elif key == "task_id": - req_filters.task_ids.append(TaskID(hex_to_binary(value)).binary()) - else: - continue - - # Remove the filter from the list so that we don't have to - # filter it again later. - filters.remove(filter) - - req_filters.exclude_driver = exclude_driver - - request = GetTaskEventsRequest(limit=limit, filters=req_filters) - reply = await self._gcs_task_info_stub.GetTaskEvents(request, timeout=timeout) - return reply - - @handle_grpc_network_errors - async def get_all_placement_group_info( - self, timeout: int = None, limit: int = None - ) -> Optional[GetAllPlacementGroupReply]: - if not limit: - limit = RAY_MAX_LIMIT_FROM_DATA_SOURCE - - request = GetAllPlacementGroupRequest(limit=limit) - reply = await self._gcs_pg_info_stub.GetAllPlacementGroup( - request, timeout=timeout - ) - return reply - - @handle_grpc_network_errors - async def get_all_node_info( - self, timeout: int = None - ) -> Optional[GetAllNodeInfoReply]: - request = GetAllNodeInfoRequest() - reply = await self._gcs_node_info_stub.GetAllNodeInfo(request, timeout=timeout) - return reply - - @handle_grpc_network_errors - async def get_all_worker_info( - self, timeout: int = None, limit: int = None - ) -> Optional[GetAllWorkerInfoReply]: - if not limit: - limit = RAY_MAX_LIMIT_FROM_DATA_SOURCE - - request = GetAllWorkerInfoRequest(limit=limit) - reply = await self._gcs_worker_info_stub.GetAllWorkerInfo( - request, timeout=timeout - ) - return reply - - # TODO(rickyx): - # This is currently mirroring dashboard/modules/job/job_head.py::list_jobs - # We should eventually unify the logic. - async def get_job_info(self, timeout: int = None) -> List[JobDetails]: - # Cannot use @handle_grpc_network_errors because async def is not supported yet. - - driver_jobs, submission_job_drivers = await get_driver_jobs( - self._gcs_aio_client, timeout=timeout - ) - submission_jobs = await self._job_client.get_all_jobs(timeout=timeout) - submission_jobs = [ - JobDetails( - **dataclasses.asdict(job), - submission_id=submission_id, - job_id=submission_job_drivers.get(submission_id).id - if submission_id in submission_job_drivers - else None, - driver_info=submission_job_drivers.get(submission_id), - type=JobType.SUBMISSION, - ) - for submission_id, job in submission_jobs.items() - ] - - return list(driver_jobs.values()) + submission_jobs - - async def get_all_cluster_events(self) -> Dictionary: - return DataSource.events - - @handle_grpc_network_errors - async def get_task_info( - self, node_id: str, timeout: int = None, limit: int = None - ) -> Optional[GetTasksInfoReply]: - if not limit: - limit = RAY_MAX_LIMIT_FROM_DATA_SOURCE - stub = self._raylet_stubs.get(node_id) - if not stub: - raise ValueError(f"Raylet for a node id, {node_id} doesn't exist.") - - reply = await stub.GetTasksInfo( - GetTasksInfoRequest(limit=limit), timeout=timeout - ) - return reply - - @handle_grpc_network_errors - async def get_object_info( - self, node_id: str, timeout: int = None, limit: int = None - ) -> Optional[GetObjectsInfoReply]: - if not limit: - limit = RAY_MAX_LIMIT_FROM_DATA_SOURCE - - stub = self._raylet_stubs.get(node_id) - if not stub: - raise ValueError(f"Raylet for a node id, {node_id} doesn't exist.") - - reply = await stub.GetObjectsInfo( - GetObjectsInfoRequest(limit=limit), - timeout=timeout, - ) - return reply - - @handle_grpc_network_errors - async def get_runtime_envs_info( - self, node_id: str, timeout: int = None, limit: int = None - ) -> Optional[GetRuntimeEnvsInfoReply]: - if not limit: - limit = RAY_MAX_LIMIT_FROM_DATA_SOURCE - - stub = self._runtime_env_agent_stub.get(node_id) - if not stub: - raise ValueError(f"Agent for a node id, {node_id} doesn't exist.") - - reply = await stub.GetRuntimeEnvsInfo( - GetRuntimeEnvsInfoRequest(limit=limit), - timeout=timeout, - ) - return reply - - @handle_grpc_network_errors - async def list_logs( - self, node_id: str, glob_filter: str, timeout: int = None - ) -> ListLogsReply: - stub = self._log_agent_stub.get(node_id) - if not stub: - raise ValueError(f"Agent for node id: {node_id} doesn't exist.") - return await stub.ListLogs( - ListLogsRequest(glob_filter=glob_filter), timeout=timeout - ) - - @handle_grpc_network_errors - async def stream_log( - self, - node_id: str, - log_file_name: str, - keep_alive: bool, - lines: int, - interval: Optional[float], - timeout: int, - task_id: Optional[str] = None, - attempt_number: Optional[int] = None, - ) -> UnaryStreamCall: - stub = self._log_agent_stub.get(node_id) - if not stub: - raise ValueError(f"Agent for node id: {node_id} doesn't exist.") - stream = stub.StreamLog( - StreamLogRequest( - keep_alive=keep_alive, - log_file_name=log_file_name, - lines=lines, - interval=interval, - task_id=task_id, - attempt_number=attempt_number, - ), - timeout=timeout, - ) - metadata = await stream.initial_metadata() - if metadata.get(log_consts.LOG_GRPC_ERROR) == log_consts.FILE_NOT_FOUND: - raise ValueError(f'File "{log_file_name}" not found on node {node_id}') - return stream +record_deprecated_state_api_import() diff --git a/python/ray/experimental/state/util.py b/python/ray/experimental/state/util.py index f7ba1d599342..24a26dd72982 100644 --- a/python/ray/experimental/state/util.py +++ b/python/ray/experimental/state/util.py @@ -1,47 +1,4 @@ -from typing import Optional, Union +from ray.util.state.util import * # noqa: F401 F403 +from ray.util.state.util import record_deprecated_state_api_import - -def convert_string_to_type( - val: Optional[Union[str, int, float, bool]], convert_type: Union[int, float, bool] -) -> Union[int, float, bool]: - """Convert the given value to a convert type. - - If the given val is None, it will just return None without the conversion. - - It supports, - str -> int/float/bool - int -> int - bool -> bool - float -> float - """ - if val is None: - return None - elif type(val) is convert_type: - return val - elif convert_type is int: - try: - val = int(val) - except ValueError: - raise ValueError( - f"Failed to convert a value {val} of type {type(val)} to {convert_type}" - ) - elif convert_type is float: - try: - val = float(val) - except ValueError: - raise ValueError( - f"Failed to convert a value {val} of type {type(val)} to {convert_type}" - ) - elif convert_type is bool: - # Without this, "False" will become True. - if val == "False" or val == "false" or val == "0": - val = False - elif val == "True" or val == "true" or val == "1": - val = True - else: - raise ValueError( - f"Failed to convert a value {val} of type {type(val)} to {convert_type}" - ) - else: - assert False, f"Unsupported convert type {convert_type}" - return val +record_deprecated_state_api_import() diff --git a/python/ray/scripts/scripts.py b/python/ray/scripts/scripts.py index 6c8d33c0347e..8dc704ea84a8 100644 --- a/python/ray/scripts/scripts.py +++ b/python/ray/scripts/scripts.py @@ -2427,7 +2427,7 @@ def add_command_alias(command, name, hidden): cli.add_command(enable_usage_stats) try: - from ray.experimental.state.state_cli import ( + from ray.util.state.state_cli import ( ray_get, ray_list, logs_state_cli_group, diff --git a/python/ray/serve/tests/test_autoscaling_policy.py b/python/ray/serve/tests/test_autoscaling_policy.py index 1805c84d718f..efe330e8769d 100644 --- a/python/ray/serve/tests/test_autoscaling_policy.py +++ b/python/ray/serve/tests/test_autoscaling_policy.py @@ -24,7 +24,7 @@ ) from ray.serve.controller import ServeController from ray.serve.deployment import Deployment -import ray.experimental.state.api as state_api +import ray.util.state as state_api from ray.dashboard.modules.serve.sdk import ServeSubmissionClient import ray diff --git a/python/ray/serve/tests/test_cli.py b/python/ray/serve/tests/test_cli.py index 14ecb989eadf..976ed497e9cf 100644 --- a/python/ray/serve/tests/test_cli.py +++ b/python/ray/serve/tests/test_cli.py @@ -15,7 +15,7 @@ import ray from ray import serve -from ray.experimental.state.api import list_actors +from ray.util.state import list_actors from ray._private.test_utils import wait_for_condition from ray.serve.schema import ServeApplicationSchema from ray.serve._private.constants import SERVE_NAMESPACE, MULTI_APP_MIGRATION_MESSAGE @@ -1084,7 +1084,6 @@ async def __call__(self): @pytest.mark.parametrize("node", ["TestBuildFNode", "TestBuildDagNode"]) def test_build(ray_start_stop, node): with NamedTemporaryFile(mode="w+", suffix=".yaml") as tmp: - print(f'Building node "{node}".') # Build an app subprocess.check_output( @@ -1116,7 +1115,6 @@ def test_build(ray_start_stop, node): @pytest.mark.skipif(sys.platform == "win32", reason="File path incorrect on Windows.") def test_build_multi_app(ray_start_stop): with NamedTemporaryFile(mode="w+", suffix=".yaml") as tmp: - print('Building nodes "TestApp1Node" and "TestApp2Node".') # Build an app subprocess.check_output( diff --git a/python/ray/serve/tests/test_controller_recovery.py b/python/ray/serve/tests/test_controller_recovery.py index 77d262c26ee2..34765cf1ccd6 100644 --- a/python/ray/serve/tests/test_controller_recovery.py +++ b/python/ray/serve/tests/test_controller_recovery.py @@ -7,7 +7,7 @@ import ray from ray._private.test_utils import SignalActor -from ray.experimental.state.api import list_actors +from ray.util.state import list_actors from ray import serve from ray.serve._private.constants import ( @@ -23,6 +23,7 @@ def test_recover_start_from_replica_actor_names(serve_instance): """Test controller is able to recover starting -> running replicas from actor names. """ + # Test failed to deploy with total of 2 replicas, # but first constructor call fails. @serve.deployment(name="recover_start_from_replica_actor_names", num_replicas=2) diff --git a/python/ray/serve/tests/test_metrics.py b/python/ray/serve/tests/test_metrics.py index 15f393ecd30d..ff199aa4ed32 100644 --- a/python/ray/serve/tests/test_metrics.py +++ b/python/ray/serve/tests/test_metrics.py @@ -8,7 +8,7 @@ from ray import serve from ray._private.test_utils import wait_for_condition from ray.serve._private.utils import block_until_http_ready -import ray.experimental.state.api as state_api +import ray.util.state as state_api from fastapi import FastAPI from ray.serve.metrics import Counter, Histogram, Gauge from ray.serve._private.constants import DEFAULT_LATENCY_BUCKET_MS @@ -89,7 +89,6 @@ def verify_metrics(do_assert=False): def test_http_metrics(serve_start_shutdown): - # NOTE: These metrics should be documented at # https://docs.ray.io/en/latest/serve/monitoring.html#metrics # Any updates here should be reflected there too. diff --git a/python/ray/serve/tests/test_standalone.py b/python/ray/serve/tests/test_standalone.py index 4d2f10616d66..65ed21cd61db 100644 --- a/python/ray/serve/tests/test_standalone.py +++ b/python/ray/serve/tests/test_standalone.py @@ -40,7 +40,7 @@ ) from ray.serve.schema import ServeApplicationSchema -from ray.experimental.state.api import list_actors +from ray.util.state import list_actors # Explicitly importing it here because it is a ray core tests utility ( # not in the tree) diff --git a/python/ray/serve/tests/test_standalone2.py b/python/ray/serve/tests/test_standalone2.py index 7d7c615b17e6..0c1b9b91b753 100644 --- a/python/ray/serve/tests/test_standalone2.py +++ b/python/ray/serve/tests/test_standalone2.py @@ -13,7 +13,7 @@ import ray import ray.actor import ray._private.state -from ray.experimental.state.api import list_actors +from ray.util.state import list_actors from ray import serve from ray._private.test_utils import ( @@ -246,7 +246,6 @@ def controller_died(handle): def test_get_serve_status(shutdown_ray): - ray.init() @serve.deployment @@ -331,7 +330,6 @@ def generate_pid_based_deserializer(pid, raw_deserializer): """Cannot be deserialized by the process with specified pid.""" def deserializer(*args): - import os if os.getpid() == pid: @@ -576,7 +574,6 @@ def test_deploy_multi_app(self, client: ServeControllerClient): self.check_multi_app() def test_deploy_app_with_overriden_config(self, client: ServeControllerClient): - config = self.get_test_config() config["deployments"] = [ { diff --git a/python/ray/tests/test_actor_advanced.py b/python/ray/tests/test_actor_advanced.py index d90008242b82..3f692e444dfa 100644 --- a/python/ray/tests/test_actor_advanced.py +++ b/python/ray/tests/test_actor_advanced.py @@ -7,7 +7,7 @@ import ray import ray._private.gcs_utils as gcs_utils -from ray.experimental.state.api import list_actors +from ray.util.state import list_actors import ray.cluster_utils from ray._private.test_utils import ( SignalActor, @@ -1303,7 +1303,7 @@ def verify_cached_dead_actor_cleaned(): driver = """ import ray -from ray.experimental.state.api import list_actors +from ray.util.state import list_actors ray.init("auto") @ray.remote diff --git a/python/ray/tests/test_actor_state_metrics.py b/python/ray/tests/test_actor_state_metrics.py index a0069c161f74..e53b06c9df99 100644 --- a/python/ray/tests/test_actor_state_metrics.py +++ b/python/ray/tests/test_actor_state_metrics.py @@ -6,7 +6,7 @@ import ray -from ray.experimental.state.api import list_actors +from ray.util.state import list_actors from ray._private.test_utils import ( raw_metrics, wait_for_condition, diff --git a/python/ray/tests/test_cancel.py b/python/ray/tests/test_cancel.py index 00d8299e1a07..1c6d33d4d61f 100644 --- a/python/ray/tests/test_cancel.py +++ b/python/ray/tests/test_cancel.py @@ -18,7 +18,7 @@ ) from ray._private.utils import DeferSigint from ray._private.test_utils import SignalActor, wait_for_condition -from ray.experimental.state.api import list_tasks +from ray.util.state import list_tasks def valid_exceptions(use_force): @@ -508,7 +508,6 @@ def inner(): @ray.remote(num_cpus=1) def outer(): - x = [inner.remote()] print(x) while True: diff --git a/python/ray/tests/test_cli.py b/python/ray/tests/test_cli.py index 1a9c813a8674..865099a5ab1f 100644 --- a/python/ray/tests/test_cli.py +++ b/python/ray/tests/test_cli.py @@ -44,7 +44,7 @@ import ray.scripts.scripts as scripts from ray._private.test_utils import wait_for_condition from ray.cluster_utils import cluster_not_supported -from ray.experimental.state.api import list_nodes +from ray.util.state import list_nodes import psutil @@ -193,7 +193,6 @@ def _debug_check_line_by_line(result, expected_lines): if i < len(expected_lines): print("!!! ERROR: Expected extra lines (regex):") for line in expected_lines[i:]: - print(repr(line)) assert False diff --git a/python/ray/tests/test_client_builder.py b/python/ray/tests/test_client_builder.py index 318c337132bf..64edd7cb9365 100644 --- a/python/ray/tests/test_client_builder.py +++ b/python/ray/tests/test_client_builder.py @@ -14,7 +14,7 @@ run_string_as_driver_nonblocking, wait_for_condition, ) -from ray.experimental.state.api import list_workers +from ray.util.state import list_workers @pytest.mark.parametrize( diff --git a/python/ray/tests/test_exit_observability.py b/python/ray/tests/test_exit_observability.py index 114ec1d504e5..eea3b9470016 100644 --- a/python/ray/tests/test_exit_observability.py +++ b/python/ray/tests/test_exit_observability.py @@ -8,7 +8,7 @@ import ray from ray._private.test_utils import run_string_as_driver, wait_for_condition -from ray.experimental.state.api import list_workers, list_nodes +from ray.util.state import list_workers, list_nodes from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy diff --git a/python/ray/tests/test_failure_4.py b/python/ray/tests/test_failure_4.py index 17d5a9940d49..d040953b9493 100644 --- a/python/ray/tests/test_failure_4.py +++ b/python/ray/tests/test_failure_4.py @@ -7,7 +7,7 @@ import psutil import pytest from grpc._channel import _InactiveRpcError -from ray.experimental.state.api import list_tasks +from ray.util.state import list_tasks from ray._private.state_api_test_utils import verify_failed_task import ray diff --git a/python/ray/tests/test_memory_pressure.py b/python/ray/tests/test_memory_pressure.py index d19aa502aac1..421141e87b77 100644 --- a/python/ray/tests/test_memory_pressure.py +++ b/python/ray/tests/test_memory_pressure.py @@ -16,7 +16,7 @@ from ray._private.utils import get_used_memory from ray._private.state_api_test_utils import verify_failed_task -from ray.experimental.state.state_manager import StateDataSourceClient +from ray.util.state.state_manager import StateDataSourceClient memory_usage_threshold = 0.65 diff --git a/python/ray/tests/test_metrics_agent.py b/python/ray/tests/test_metrics_agent.py index 1a3fc1e62677..06166b5c3941 100644 --- a/python/ray/tests/test_metrics_agent.py +++ b/python/ray/tests/test_metrics_agent.py @@ -12,7 +12,7 @@ import pytest import ray -from ray.experimental.state.api import list_nodes +from ray.util.state import list_nodes from ray._private.metrics_agent import PrometheusServiceDiscoveryWriter from ray._private.ray_constants import PROMETHEUS_SERVICE_DISCOVERY_FILE from ray._private.test_utils import ( diff --git a/python/ray/tests/test_node_manager.py b/python/ray/tests/test_node_manager.py index 88fcef336742..7e3b1c5b8427 100644 --- a/python/ray/tests/test_node_manager.py +++ b/python/ray/tests/test_node_manager.py @@ -1,5 +1,5 @@ import ray -from ray.experimental.state.api import list_workers +from ray.util.state import list_workers from ray._private.test_utils import ( get_load_metrics_report, run_string_as_driver, @@ -9,7 +9,7 @@ ) import pytest import os -from ray.experimental.state.api import list_objects +from ray.util.state import list_objects import subprocess from ray._private.utils import get_num_cpus import time diff --git a/python/ray/tests/test_out_of_disk_space.py b/python/ray/tests/test_out_of_disk_space.py index 8ef155e75766..b92d4c80fceb 100644 --- a/python/ray/tests/test_out_of_disk_space.py +++ b/python/ray/tests/test_out_of_disk_space.py @@ -10,7 +10,7 @@ import pytest import ray -from ray.experimental.state.api import list_cluster_events +from ray.util.state import list_cluster_events def calculate_capacity_threshold(disk_capacity_in_bytes): diff --git a/python/ray/tests/test_state_api.py b/python/ray/tests/test_state_api.py index bc997971970f..c154ef9f52cf 100644 --- a/python/ray/tests/test_state_api.py +++ b/python/ray/tests/test_state_api.py @@ -8,9 +8,9 @@ from unittest.mock import MagicMock import pytest -from ray.experimental.state.api import get_job +from ray.util.state import get_job from ray.dashboard.modules.job.pydantic_models import JobDetails -from ray.experimental.state.common import Humanify +from ray.util.state.common import Humanify from ray._private.gcs_utils import GcsAioClient import yaml from click.testing import CliRunner @@ -65,7 +65,7 @@ StateAPIManager, _convert_filters_type, ) -from ray.experimental.state.api import ( +from ray.util.state import ( get_actor, get_node, get_objects, @@ -87,7 +87,7 @@ StateApiClient, ) from ray._private.event.event_logger import get_event_id -from ray.experimental.state.common import ( +from ray.util.state.common import ( DEFAULT_LIMIT, DEFAULT_RPC_TIMEOUT, ActorState, @@ -105,16 +105,16 @@ state_column, ) from ray.dashboard.utils import ray_address_to_api_server_url -from ray.experimental.state.exception import DataSourceUnavailable, RayStateApiException -from ray.experimental.state.state_cli import ( +from ray.util.state.exception import DataSourceUnavailable, RayStateApiException +from ray.util.state.state_cli import ( AvailableFormat, format_list_api_output, _parse_filter, summary_state_cli_group, ) -from ray.experimental.state.state_cli import ray_get -from ray.experimental.state.state_cli import ray_list -from ray.experimental.state.state_manager import IdToIpMap, StateDataSourceClient +from ray.util.state.state_cli import ray_get +from ray.util.state.state_cli import ray_list +from ray.util.state.state_manager import IdToIpMap, StateDataSourceClient from ray.job_submission import JobSubmissionClient from ray.runtime_env import RuntimeEnv diff --git a/python/ray/tests/test_state_api_2.py b/python/ray/tests/test_state_api_2.py index 6419bd9c132d..cbb654565aac 100644 --- a/python/ray/tests/test_state_api_2.py +++ b/python/ray/tests/test_state_api_2.py @@ -9,7 +9,7 @@ import pytest from ray._private.profiling import chrome_tracing_dump -from ray.experimental.state.api import ( +from ray.util.state import ( get_actor, list_tasks, list_actors, @@ -252,6 +252,31 @@ def get_actor(self, name): wait_for_condition(_verify_repr_name, id=a._actor_id.hex(), name="inner") +def test_experimental_import_deprecation(): + with pytest.warns(DeprecationWarning): + from ray.experimental.state.api import list_tasks # noqa: F401 + + with pytest.warns(DeprecationWarning): + from ray.experimental.state.common import DEFAULT_RPC_TIMEOUT # noqa: F401 + + with pytest.warns(DeprecationWarning): + from ray.experimental.state.custom_types import ACTOR_STATUS # noqa: F401 + + with pytest.warns(DeprecationWarning): + from ray.experimental.state.exception import RayStateApiException # noqa: F401 + + with pytest.warns(DeprecationWarning): + from ray.experimental.state.state_cli import ray_get # noqa: F401 + + with pytest.warns(DeprecationWarning): + from ray.experimental.state.state_manager import ( # noqa: F401 + StateDataSourceClient, + ) + + with pytest.warns(DeprecationWarning): + from ray.experimental.state.util import convert_string_to_type # noqa: F401 + + def test_actor_task_with_repr_name(): @ray.remote class ReprActor: diff --git a/python/ray/tests/test_state_api_log.py b/python/ray/tests/test_state_api_log.py index 11b8a15b9676..f484f943e6b9 100644 --- a/python/ray/tests/test_state_api_log.py +++ b/python/ray/tests/test_state_api_log.py @@ -7,8 +7,8 @@ from unittest.mock import MagicMock import pytest -from ray.experimental.state.api import list_jobs -from ray.experimental.state.state_cli import logs_state_cli_group +from ray.util.state.state_cli import logs_state_cli_group +from ray.util.state import list_jobs import requests from click.testing import CliRunner import grpc @@ -46,10 +46,10 @@ from ray.dashboard.modules.log.log_agent import _stream_log_in_chunk from ray.dashboard.modules.log.log_manager import LogsManager from ray.dashboard.tests.conftest import * # noqa -from ray.experimental.state.api import get_log, list_logs, list_nodes, list_workers -from ray.experimental.state.common import GetLogOptions -from ray.experimental.state.exception import DataSourceUnavailable -from ray.experimental.state.state_manager import StateDataSourceClient +from ray.util.state import get_log, list_logs, list_nodes, list_workers +from ray.util.state.common import GetLogOptions +from ray.util.state.exception import DataSourceUnavailable +from ray.util.state.state_manager import StateDataSourceClient if sys.version_info >= (3, 8, 0): from unittest.mock import AsyncMock diff --git a/python/ray/tests/test_state_api_summary.py b/python/ray/tests/test_state_api_summary.py index 3cab23083134..a90b8ab7f8e9 100644 --- a/python/ray/tests/test_state_api_summary.py +++ b/python/ray/tests/test_state_api_summary.py @@ -7,7 +7,7 @@ import sys from dataclasses import asdict -from ray.experimental.state.api import ( +from ray.util.state import ( summarize_tasks, summarize_actors, summarize_objects, @@ -28,7 +28,7 @@ generate_actor_data, generate_object_info, ) -from ray.experimental.state.common import ( +from ray.util.state.common import ( DEFAULT_RPC_TIMEOUT, SummaryApiOptions, Link, @@ -39,9 +39,9 @@ from ray.core.generated.gcs_service_pb2 import GetAllActorInfoReply from ray.core.generated.gcs_pb2 import ActorTableData from click.testing import CliRunner -from ray.experimental.state.state_cli import summary_state_cli_group +from ray.util.state.state_cli import summary_state_cli_group from ray.dashboard.state_aggregator import StateAPIManager -from ray.experimental.state.state_manager import StateDataSourceClient +from ray.util.state.state_manager import StateDataSourceClient @pytest.fixture diff --git a/python/ray/tests/test_task_events.py b/python/ray/tests/test_task_events.py index 99784d780be9..9a64b61cea62 100644 --- a/python/ray/tests/test_task_events.py +++ b/python/ray/tests/test_task_events.py @@ -13,14 +13,14 @@ from ray.runtime_env import RuntimeEnv import ray -from ray.experimental.state.common import ListApiOptions, StateResource +from ray.util.state.common import ListApiOptions, StateResource from ray._private.test_utils import ( raw_metrics, run_string_as_driver, run_string_as_driver_nonblocking, wait_for_condition, ) -from ray.experimental.state.api import StateApiClient, list_tasks +from ray.util.state import StateApiClient, list_tasks from ray._private.worker import RayContext diff --git a/python/ray/tests/test_task_events_2.py b/python/ray/tests/test_task_events_2.py index ec5e0c36e149..2374abe3550e 100644 --- a/python/ray/tests/test_task_events_2.py +++ b/python/ray/tests/test_task_events_2.py @@ -11,12 +11,12 @@ verify_tasks_running_or_terminated, verify_failed_task, ) -from ray.experimental.state.common import ListApiOptions, StateResource +from ray.util.state.common import ListApiOptions, StateResource from ray._private.test_utils import ( run_string_as_driver_nonblocking, wait_for_condition, ) -from ray.experimental.state.api import ( +from ray.util.state import ( StateApiClient, list_actors, list_tasks, @@ -485,7 +485,6 @@ def test_fault_tolerance_nested_actors_failed(shutdown_only): def test_fault_tolerance_chained_task_fail( shutdown_only, exit_type, actor_or_normal_tasks ): - ray.init(_system_config=_SYSTEM_CONFIG) def sleep_or_fail(pid_actor=None, exit_type=None): diff --git a/python/ray/util/state/__init__.py b/python/ray/util/state/__init__.py new file mode 100644 index 000000000000..d74f9b650df3 --- /dev/null +++ b/python/ray/util/state/__init__.py @@ -0,0 +1,50 @@ +from ray.util.state.api import ( + get_actor, + get_log, + get_node, + get_objects, + get_placement_group, + get_task, + get_worker, + get_job, + list_actors, + list_jobs, + list_nodes, + list_placement_groups, + list_tasks, + list_workers, + list_objects, + list_runtime_envs, + list_logs, + list_cluster_events, + summarize_actors, + summarize_objects, + summarize_tasks, + StateApiClient, +) + + +__all__ = [ + "get_actor", + "get_log", + "get_node", + "get_objects", + "get_placement_group", + "get_task", + "get_worker", + "get_job", + "list_actors", + "list_jobs", + "list_nodes", + "list_placement_groups", + "list_tasks", + "list_workers", + "list_objects", + "list_runtime_envs", + "list_logs", + "list_cluster_events", + "summarize_actors", + "summarize_objects", + "summarize_tasks", + "StateApiClient", +] diff --git a/python/ray/util/state/api.py b/python/ray/util/state/api.py new file mode 100644 index 000000000000..0cef8d4cf58e --- /dev/null +++ b/python/ray/util/state/api.py @@ -0,0 +1,1443 @@ +import logging +import threading +import urllib +import warnings +from contextlib import contextmanager +from dataclasses import fields +from typing import Any, Dict, Generator, List, Optional, Tuple, Union + +import requests + +from ray.dashboard.modules.dashboard_sdk import SubmissionClient +from ray.dashboard.utils import ( + get_address_for_submission_client, + ray_address_to_api_server_url, +) +from ray.util.annotations import DeveloperAPI +from ray.util.state.common import ( + DEFAULT_LIMIT, + DEFAULT_RPC_TIMEOUT, + ActorState, + ClusterEventState, + GetApiOptions, + GetLogOptions, + JobState, + ListApiOptions, + NodeState, + ObjectState, + PlacementGroupState, + PredicateType, + RuntimeEnvState, + StateResource, + SummaryApiOptions, + SummaryResource, + SupportedFilterType, + TaskState, + WorkerState, + dict_to_state, +) +from ray.util.state.exception import RayStateApiException, ServerUnavailable + +logger = logging.getLogger(__name__) + + +@contextmanager +def warnings_on_slow_request( + *, address: str, endpoint: str, timeout: float, explain: bool +): + """A context manager to print warnings if the request is replied slowly. + + Warnings are printed 3 times + + Args: + address: The address of the endpoint. + endpoint: The name of the endpoint. + timeout: Request timeout in seconds. + explain: Whether ot not it will print the warning. + """ + # Do nothing if explain is not specified. + if not explain: + yield + return + + # Prepare timers to print warning. + # Print 3 times with exponential backoff. timeout / 2, timeout / 4, timeout / 8 + def print_warning(elapsed: float): + logger.info( + f"({round(elapsed, 2)} / {timeout} seconds) " + "Waiting for the response from the API server " + f"address {address}{endpoint}.", + ) + + warning_timers = [ + threading.Timer(timeout / i, print_warning, args=[timeout / i]) + for i in [2, 4, 8] + ] + + try: + for timer in warning_timers: + timer.start() + yield + finally: + # Make sure all timers are cancelled once request is terminated. + for timer in warning_timers: + timer.cancel() + + +""" +This file contains API client and methods for querying ray state. + +Usage: + 1. [Recommended] With StateApiClient: + ``` + client = StateApiClient(address="auto") + data = client.list(StateResource.NODES) + ... + ``` + + 2. With SDK APIs: + The API creates a `StateApiClient` for each invocation. So if multiple + invocations of listing are used, it is better to reuse the `StateApiClient` + as suggested above. + ``` + data = list_nodes(address="auto") + ``` +""" + + +@DeveloperAPI +class StateApiClient(SubmissionClient): + """State API Client issues REST GET requests to the server for resource states.""" + + def __init__( + self, + address: Optional[str] = None, + cookies: Optional[Dict[str, Any]] = None, + headers: Optional[Dict[str, Any]] = None, + ): + """Initialize a StateApiClient and check the connection to the cluster. + + Args: + address: Ray bootstrap address (e.g. `127.0.0.0:6379`, `auto`), or Ray + Client adress (e.g. `ray://:10001`), or Ray dashboard + address (e.g. `http://:8265`). + If not provided, it will be detected automatically from any running + local Ray cluster. + cookies: Cookies to use when sending requests to the HTTP job server. + headers: Headers to use when sending requests to the HTTP job server, used + for cases like authentication to a remote cluster. + """ + if requests is None: + raise RuntimeError( + "The Ray state CLI & SDK require the ray[default] " + "installation: `pip install 'ray[default']``" + ) + if not headers: + headers = {"Content-Type": "application/json"} + + # Resolve API server URL + api_server_url = get_address_for_submission_client(address) + + super().__init__( + address=api_server_url, + create_cluster_if_needed=False, + headers=headers, + cookies=cookies, + ) + + @classmethod + def _make_param(cls, options: Union[ListApiOptions, GetApiOptions]) -> Dict: + options_dict = {} + for field in fields(options): + # TODO(rickyyx): We will need to find a way to pass server side timeout + # TODO(rickyyx): We will have to convert filter option + # slightly differently for now. But could we do k,v pair rather than this? + # I see we are also converting dict to XXXApiOptions later on, we could + # probably organize the marshaling a bit better. + if field.name == "filters": + options_dict["filter_keys"] = [] + options_dict["filter_predicates"] = [] + options_dict["filter_values"] = [] + for filter in options.filters: + if len(filter) != 3: + raise ValueError( + f"The given filter has incorrect input type, {filter}. " + "Provide (key, predicate, value) tuples." + ) + filter_k, filter_predicate, filter_val = filter + options_dict["filter_keys"].append(filter_k) + options_dict["filter_predicates"].append(filter_predicate) + options_dict["filter_values"].append(filter_val) + continue + + option_val = getattr(options, field.name) + if option_val is not None: + options_dict[field.name] = option_val + + return options_dict + + def _make_http_get_request( + self, + endpoint: str, + params: Dict, + timeout: float, + _explain: bool = False, + ) -> Dict: + with warnings_on_slow_request( + address=self._address, endpoint=endpoint, timeout=timeout, explain=_explain + ): + # Send a request. + response = None + try: + response = self._do_request( + "GET", + endpoint, + timeout=timeout, + params=params, + ) + # If we have a valid JSON error, don't raise a generic exception but + # instead let the caller parse it to raise a more precise exception. + if ( + response.status_code == 500 + and "application/json" + not in response.headers.get("Content-Type", "") + ): + response.raise_for_status() + except requests.exceptions.RequestException as e: + err_str = f"Failed to make request to {self._address}{endpoint}. " + + # Best-effort to give hints to users on potential reasons of connection + # failure. + err_str += ( + "Failed to connect to API server. Please check the API server " + "log for details. Make sure dependencies are installed with " + "`pip install ray[default]`. Please also check dashboard is " + "available, and included when starting ray cluster, " + "i.e. `ray start --include-dashboard=True --head`. " + ) + if response is None: + raise ServerUnavailable(err_str) + + err_str += f"Response(url={response.url},status={response.status_code})" + raise RayStateApiException(err_str) from e + + # Process the response. + response = response.json() + if response["result"] is False: + raise RayStateApiException( + "API server internal error. See dashboard.log file for more details. " + f"Error: {response['msg']}" + ) + + # Dictionary of `ListApiResponse` or `SummaryApiResponse` + return response["data"]["result"] + + def get( + self, + resource: StateResource, + id: str, + options: Optional[GetApiOptions], + _explain: bool = False, + ) -> Optional[ + Union[ + ActorState, + PlacementGroupState, + NodeState, + WorkerState, + TaskState, + List[ObjectState], + JobState, + ] + ]: + """Get resources states by id + + Args: + resource_name: Resource names, i.e. 'workers', 'actors', 'nodes', + 'placement_groups', 'tasks', 'objects'. + 'jobs' and 'runtime-envs' are not supported yet. + id: ID for the resource, i.e. 'node_id' for nodes. + options: Get options. See `GetApiOptions` for details. + _explain: Print the API information such as API + latency or failed query information. + + Returns: + None if not found, and if found: + - ActorState for actors + - PlacementGroupState for placement groups + - NodeState for nodes + - WorkerState for workers + - TaskState for tasks + - JobState for jobs + + Empty list for objects if not found, or list of ObjectState for objects + + Raises: + This doesn't catch any exceptions raised when the underlying request + call raises exceptions. For example, it could raise `requests.Timeout` + when timeout occurs. + + ValueError: + if the resource could not be GET by id, i.e. jobs and runtime-envs. + + """ + # TODO(rickyyx): Make GET not using filters on list operation + params = self._make_param(options) + + RESOURCE_ID_KEY_NAME = { + StateResource.NODES: "node_id", + StateResource.ACTORS: "actor_id", + StateResource.PLACEMENT_GROUPS: "placement_group_id", + StateResource.WORKERS: "worker_id", + StateResource.TASKS: "task_id", + StateResource.OBJECTS: "object_id", + StateResource.JOBS: "submission_id", + } + if resource not in RESOURCE_ID_KEY_NAME: + raise ValueError(f"Can't get {resource.name} by id.") + + params["filter_keys"] = [RESOURCE_ID_KEY_NAME[resource]] + params["filter_predicates"] = ["="] + params["filter_values"] = [id] + params["detail"] = True + endpoint = f"/api/v0/{resource.value}" + + list_api_response = self._make_http_get_request( + endpoint=endpoint, + params=params, + timeout=options.timeout, + _explain=_explain, + ) + result = list_api_response["result"] + + # Empty result + if len(result) == 0: + return None + + result = [dict_to_state(d, resource) for d in result] + if resource == StateResource.OBJECTS: + # NOTE(rickyyx): + # There might be multiple object entries for a single object id + # because a single object could be referenced at different places + # e.g. pinned as local variable, used as parameter + return result + + if resource == StateResource.TASKS: + # There might be multiple task attempts given a task id due to + # task retries. + if len(result) == 1: + return result[0] + return result + + # For the rest of the resources, there should only be a single entry + # for a particular id. + assert len(result) == 1 + return result[0] + + def _print_api_warning( + self, + resource: StateResource, + api_response: dict, + warn_data_source_not_available: bool = True, + warn_data_truncation: bool = True, + warn_limit: bool = True, + warn_server_side_warnings: bool = True, + ): + """Print the API warnings. + + Args: + resource: Resource names, i.e. 'jobs', 'actors', 'nodes', + see `StateResource` for details. + api_response: The dictionarified `ListApiResponse` or `SummaryApiResponse`. + warn_data_source_not_available: Warn when some data sources + are not available. + warn_data_truncation: Warn when results were truncated at + the data source. + warn_limit: Warn when results were limited. + warn_server_side_warnings: Warn when the server side generates warnings + (E.g., when callsites not enabled for listing objects) + """ + # Print warnings if anything was given. + if warn_data_source_not_available: + warning_msgs = api_response.get("partial_failure_warning", None) + if warning_msgs: + warnings.warn(warning_msgs) + + if warn_data_truncation: + # Print warnings if data is truncated at the data source. + num_after_truncation = api_response["num_after_truncation"] + total = api_response["total"] + if total > num_after_truncation: + # NOTE(rickyyx): For now, there's not much users + # could do (neither can we), with hard truncation. + # Unless we allow users to set a higher + # `RAY_MAX_LIMIT_FROM_DATA_SOURCE`, the data will + # always be truncated at the data source. + warnings.warn( + ( + "The returned data may contain incomplete result. " + f"{num_after_truncation} ({total} total from the cluster) " + f"{resource.value} are retrieved from the data source. " + f"{total - num_after_truncation} entries have been truncated. " + f"Max of {num_after_truncation} entries are retrieved " + "from data source to prevent over-sized payloads." + ), + ) + + if warn_limit: + # Print warnings if return data is limited at the API server due to + # limit enforced at the server side + num_filtered = api_response["num_filtered"] + data = api_response["result"] + if num_filtered > len(data): + warnings.warn( + ( + f"Limit last {len(data)} entries " + f"(Total {num_filtered}). Use `--filter` to reduce " + "the amount of data to return or " + "setting a higher limit with `--limit` to see all data. " + ), + ) + + if warn_server_side_warnings: + # Print the additional warnings. + warnings_to_print = api_response.get("warnings", []) + if warnings_to_print: + for warning_to_print in warnings_to_print: + warnings.warn(warning_to_print) + + def _raise_on_missing_output(self, resource: StateResource, api_response: dict): + """Raise an exception when the API resopnse contains a missing output. + + Output can be missing if (1) Failures on some of data source queries (e.g., + `ray list tasks` queries all raylets, and if some of queries fail, it will + contain missing output. If all queries fail, it will just fail). (2) Data + is truncated because the output is too large. + + Args: + resource: Resource names, i.e. 'jobs', 'actors', 'nodes', + see `StateResource` for details. + api_response: The dictionarified `ListApiResponse` or `SummaryApiResponse`. + """ + # Raise an exception if there are partial failures that cause missing output. + warning_msgs = api_response.get("partial_failure_warning", None) + if warning_msgs: + raise RayStateApiException( + f"Failed to retrieve all {resource.value} from the cluster because" + "they are not reachable due to query failures to the data sources. " + "To avoid raising an exception and allow having missing output, " + "set `raise_on_missing_output=False`. " + ) + # Raise an exception is there is data truncation that cause missing output. + total = api_response["total"] + num_after_truncation = api_response["num_after_truncation"] + + if total != num_after_truncation: + raise RayStateApiException( + f"Failed to retrieve all {resource.value} from the cluster because " + "they are not reachable due to data truncation. It happens " + "when the returned data is too large " + # When the data is truncated, the truncation + # threshold == num_after_truncation. We cannot set this to env + # var because the CLI side might not have the correct env var. + f"(> {num_after_truncation}) " + "To avoid raising an exception and allow having missing output, " + "set `raise_on_missing_output=False`. " + ) + + def list( + self, + resource: StateResource, + options: ListApiOptions, + raise_on_missing_output: bool, + _explain: bool = False, + ) -> List[ + Union[ + ActorState, + JobState, + NodeState, + TaskState, + ObjectState, + PlacementGroupState, + RuntimeEnvState, + WorkerState, + ClusterEventState, + ] + ]: + """List resources states + + Args: + resource: Resource names, i.e. 'jobs', 'actors', 'nodes', + see `StateResource` for details. + options: List options. See `ListApiOptions` for details. + raise_on_missing_output: When True, raise an exception if the output + is incomplete. Output can be incomplete if + (1) there's a partial network failure when the source is distributed. + (2) data is truncated because it is too large. + Set it to False to avoid throwing an exception on missing data. + _explain: Print the API information such as API + latency or failed query information. + + Returns: + A list of queried result from `ListApiResponse`, + + Raises: + This doesn't catch any exceptions raised when the underlying request + call raises exceptions. For example, it could raise `requests.Timeout` + when timeout occurs. + + """ + + endpoint = f"/api/v0/{resource.value}" + params = self._make_param(options) + list_api_response = self._make_http_get_request( + endpoint=endpoint, + params=params, + timeout=options.timeout, + _explain=_explain, + ) + if raise_on_missing_output: + self._raise_on_missing_output(resource, list_api_response) + if _explain: + self._print_api_warning(resource, list_api_response) + return [dict_to_state(d, resource) for d in list_api_response["result"]] + + def summary( + self, + resource: SummaryResource, + *, + options: SummaryApiOptions, + raise_on_missing_output: bool, + _explain: bool = False, + ) -> Dict: + """Summarize resources states + + Args: + resource_name: Resource names, + see `SummaryResource` for details. + options: summary options. See `SummaryApiOptions` for details. + raise_on_missing_output: Raise an exception if the output has missing data. + Output can have missing data if (1) there's a partial network failure + when the source is distributed. (2) data is truncated + because it is too large. + _explain: Print the API information such as API + latency or failed query information. + + Returns: + A dictionary of queried result from `SummaryApiResponse`. + + Raises: + This doesn't catch any exceptions raised when the underlying request + call raises exceptions. For example, it could raise `requests.Timeout` + when timeout occurs. + """ + params = {"timeout": options.timeout} + endpoint = f"/api/v0/{resource.value}/summarize" + summary_api_response = self._make_http_get_request( + endpoint=endpoint, + params=params, + timeout=options.timeout, + _explain=_explain, + ) + if raise_on_missing_output: + self._raise_on_missing_output(resource, summary_api_response) + if _explain: + # There's no limit applied to summary, so we shouldn't warn. + self._print_api_warning(resource, summary_api_response, warn_limit=False) + return summary_api_response["result"]["node_id_to_summary"] + + +@DeveloperAPI +def get_actor( + id: str, + address: Optional[str] = None, + timeout: int = DEFAULT_RPC_TIMEOUT, + _explain: bool = False, +) -> Optional[Dict]: + """Get an actor by id. + + Args: + id: Id of the actor + address: Ray bootstrap address, could be `auto`, `localhost:6379`. + If None, it will be resolved automatically from an initialized ray. + timeout: Max timeout value for the state API requests made. + _explain: Print the API information such as API latency or + failed query information. + + Returns: + None if actor not found, or + :class:`ActorState `. + + Raises: + Exceptions: :class:`RayStateApiException ` if the CLI + failed to query the data. + """ # noqa: E501 + return StateApiClient(address=address).get( + StateResource.ACTORS, id, GetApiOptions(timeout=timeout), _explain=_explain + ) + + +@DeveloperAPI +def get_job( + id: str, + address: Optional[str] = None, + timeout: int = DEFAULT_RPC_TIMEOUT, + _explain: bool = False, +) -> Optional[JobState]: + """Get a submission job detail by id. + + Args: + id: Submission ID obtained from job API. + address: Ray bootstrap address, could be `auto`, `localhost:6379`. + If None, it will be resolved automatically from an initialized ray. + timeout: Max timeout value for the state API requests made. + _explain: Print the API information such as API latency or + failed query information. + + Returns: + None if job not found, or + :class:`JobState `. + + Raises: + Exceptions: :class:`RayStateApiException ` if the CLI + failed to query the data. + """ # noqa: E501 + return StateApiClient(address=address).get( + StateResource.JOBS, + id, + GetApiOptions(timeout=timeout), + _explain=_explain, + ) + + +@DeveloperAPI +def get_placement_group( + id: str, + address: Optional[str] = None, + timeout: int = DEFAULT_RPC_TIMEOUT, + _explain: bool = False, +) -> Optional[PlacementGroupState]: + """Get a placement group by id. + + Args: + id: Id of the placement group + address: Ray bootstrap address, could be `auto`, `localhost:6379`. + If None, it will be resolved automatically from an initialized ray. + timeout: Max timeout value for the state APIs requests made. + _explain: Print the API information such as API latency or + failed query information. + + Returns: + None if actor not found, or + :class:`~ray.util.state.common.PlacementGroupState`. + + Raises: + Exceptions: :class:`RayStateApiException ` if the CLI + failed to query the data. + """ # noqa: E501 + return StateApiClient(address=address).get( + StateResource.PLACEMENT_GROUPS, + id, + GetApiOptions(timeout=timeout), + _explain=_explain, + ) + + +@DeveloperAPI +def get_node( + id: str, + address: Optional[str] = None, + timeout: int = DEFAULT_RPC_TIMEOUT, + _explain: bool = False, +) -> Optional[NodeState]: + """Get a node by id. + + Args: + id: Id of the node. + address: Ray bootstrap address, could be `auto`, `localhost:6379`. + If None, it will be resolved automatically from an initialized ray. + timeout: Max timeout value for the state APIs requests made. + _explain: Print the API information such as API latency or + failed query information. + + Returns: + None if actor not found, or + :class:`NodeState `. + + Raises: + Exceptions: :class:`RayStateApiException ` + if the CLI is failed to query the data. + """ # noqa: E501 + return StateApiClient(address=address).get( + StateResource.NODES, + id, + GetApiOptions(timeout=timeout), + _explain=_explain, + ) + + +@DeveloperAPI +def get_worker( + id: str, + address: Optional[str] = None, + timeout: int = DEFAULT_RPC_TIMEOUT, + _explain: bool = False, +) -> Optional[WorkerState]: + """Get a worker by id. + + Args: + id: Id of the worker + address: Ray bootstrap address, could be `auto`, `localhost:6379`. + If None, it will be resolved automatically from an initialized ray. + timeout: Max timeout value for the state APIs requests made. + _explain: Print the API information such as API latency or + failed query information. + + Returns: + None if actor not found, or + :class:`WorkerState `. + + Raises: + Exceptions: :class:`RayStateApiException ` if the CLI + failed to query the data. + """ # noqa: E501 + return StateApiClient(address=address).get( + StateResource.WORKERS, + id, + GetApiOptions(timeout=timeout), + _explain=_explain, + ) + + +@DeveloperAPI +def get_task( + id: str, + address: Optional[str] = None, + timeout: int = DEFAULT_RPC_TIMEOUT, + _explain: bool = False, +) -> Optional[TaskState]: + """Get task attempts of a task by id. + + Args: + id: Id of the task + address: Ray bootstrap address, could be `auto`, `localhost:6379`. + If None, it will be resolved automatically from an initialized ray. + timeout: Max timeout value for the state APIs requests made. + _explain: Print the API information such as API latency or + failed query information. + + Returns: + None if task not found, or a list of + :class:`~ray.util.state.common.TaskState` + from the task attempts. + + Raises: + Exceptions: :class:`RayStateApiException ` if the CLI + failed to query the data. + """ # noqa: E501 + return StateApiClient(address=address).get( + StateResource.TASKS, + id, + GetApiOptions(timeout=timeout), + _explain=_explain, + ) + + +@DeveloperAPI +def get_objects( + id: str, + address: Optional[str] = None, + timeout: int = DEFAULT_RPC_TIMEOUT, + _explain: bool = False, +) -> List[ObjectState]: + """Get objects by id. + + There could be more than 1 entry returned since an object could be + referenced at different places. + + Args: + id: Id of the object + address: Ray bootstrap address, could be `auto`, `localhost:6379`. + If None, it will be resolved automatically from an initialized ray. + timeout: Max timeout value for the state APIs requests made. + _explain: Print the API information such as API latency or + failed query information. + + Returns: + List of + :class:`~ray.util.state.common.ObjectState`. + + Raises: + Exceptions: :class:`RayStateApiException ` if the CLI + failed to query the data. + """ # noqa: E501 + return StateApiClient(address=address).get( + StateResource.OBJECTS, + id, + GetApiOptions(timeout=timeout), + _explain=_explain, + ) + + +@DeveloperAPI +def list_actors( + address: Optional[str] = None, + filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, + limit: int = DEFAULT_LIMIT, + timeout: int = DEFAULT_RPC_TIMEOUT, + detail: bool = False, + raise_on_missing_output: bool = True, + _explain: bool = False, +) -> List[ActorState]: + """List actors in the cluster. + + Args: + address: Ray bootstrap address, could be `auto`, `localhost:6379`. + If None, it will be resolved automatically from an initialized ray. + filters: List of tuples of filter key, predicate (=, or !=), and + the filter value. E.g., `("id", "=", "abcd")` + limit: Max number of entries returned by the state backend. + timeout: Max timeout value for the state APIs requests made. + detail: When True, more details info (specified in `ActorState`) + will be queried and returned. See + :class:`ActorState `. + raise_on_missing_output: When True, exceptions will be raised if + there is missing data due to truncation/data source unavailable. + _explain: Print the API information such as API latency or + failed query information. + + Returns: + List of + :class:`ActorState `. + + Raises: + Exceptions: :class:`RayStateApiException ` if the CLI + failed to query the data. + """ # noqa: E501 + return StateApiClient(address=address).list( + StateResource.ACTORS, + options=ListApiOptions( + limit=limit, + timeout=timeout, + filters=filters, + detail=detail, + ), + raise_on_missing_output=raise_on_missing_output, + _explain=_explain, + ) + + +@DeveloperAPI +def list_placement_groups( + address: Optional[str] = None, + filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, + limit: int = DEFAULT_LIMIT, + timeout: int = DEFAULT_RPC_TIMEOUT, + detail: bool = False, + raise_on_missing_output: bool = True, + _explain: bool = False, +) -> List[PlacementGroupState]: + """List placement groups in the cluster. + + Args: + address: Ray bootstrap address, could be `auto`, `localhost:6379`. + If None, it will be resolved automatically from an initialized ray. + filters: List of tuples of filter key, predicate (=, or !=), and + the filter value. E.g., `("state", "=", "abcd")` + limit: Max number of entries returned by the state backend. + timeout: Max timeout value for the state APIs requests made. + detail: When True, more details info (specified in `PlacementGroupState`) + will be queried and returned. See + :class:`~ray.util.state.common.PlacementGroupState`. + raise_on_missing_output: When True, exceptions will be raised if + there is missing data due to truncation/data source unavailable. + _explain: Print the API information such as API latency or + failed query information. + + Returns: + List of :class:`~ray.util.state.common.PlacementGroupState`. + + Raises: + Exceptions: :class:`RayStateApiException ` if the CLI + failed to query the data. + """ # noqa: E501 + return StateApiClient(address=address).list( + StateResource.PLACEMENT_GROUPS, + options=ListApiOptions( + limit=limit, timeout=timeout, filters=filters, detail=detail + ), + raise_on_missing_output=raise_on_missing_output, + _explain=_explain, + ) + + +@DeveloperAPI +def list_nodes( + address: Optional[str] = None, + filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, + limit: int = DEFAULT_LIMIT, + timeout: int = DEFAULT_RPC_TIMEOUT, + detail: bool = False, + raise_on_missing_output: bool = True, + _explain: bool = False, +) -> List[NodeState]: + """List nodes in the cluster. + + Args: + address: Ray bootstrap address, could be `auto`, `localhost:6379`. + If None, it will be resolved automatically from an initialized ray. + filters: List of tuples of filter key, predicate (=, or !=), and + the filter value. E.g., `("node_name", "=", "abcd")` + limit: Max number of entries returned by the state backend. + timeout: Max timeout value for the state APIs requests made. + detail: When True, more details info (specified in `NodeState`) + will be queried and returned. See + :class:`NodeState `. + raise_on_missing_output: When True, exceptions will be raised if + there is missing data due to truncation/data source unavailable. + _explain: Print the API information such as API latency or + failed query information. + + Returns: + List of dictionarified + :class:`NodeState `. + + Raises: + Exceptions: :class:`RayStateApiException ` + if the CLI failed to query the data. + """ # noqa: E501 + return StateApiClient(address=address).list( + StateResource.NODES, + options=ListApiOptions( + limit=limit, timeout=timeout, filters=filters, detail=detail + ), + raise_on_missing_output=raise_on_missing_output, + _explain=_explain, + ) + + +@DeveloperAPI +def list_jobs( + address: Optional[str] = None, + filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, + limit: int = DEFAULT_LIMIT, + timeout: int = DEFAULT_RPC_TIMEOUT, + detail: bool = False, + raise_on_missing_output: bool = True, + _explain: bool = False, +) -> List[JobState]: + """List jobs submitted to the cluster by :ref: `ray job submission `. + + Args: + address: Ray bootstrap address, could be `auto`, `localhost:6379`. + If None, it will be resolved automatically from an initialized ray. + filters: List of tuples of filter key, predicate (=, or !=), and + the filter value. E.g., `("status", "=", "abcd")` + limit: Max number of entries returned by the state backend. + timeout: Max timeout value for the state APIs requests made. + detail: When True, more details info (specified in `JobState`) + will be queried and returned. See + :class:`JobState `. + raise_on_missing_output: When True, exceptions will be raised if + there is missing data due to truncation/data source unavailable. + _explain: Print the API information such as API latency or + failed query information. + + Returns: + List of dictionarified + :class:`JobState `. + + Raises: + Exceptions: :class:`RayStateApiException ` if the CLI + failed to query the data. + """ # noqa: E501 + return StateApiClient(address=address).list( + StateResource.JOBS, + options=ListApiOptions( + limit=limit, timeout=timeout, filters=filters, detail=detail + ), + raise_on_missing_output=raise_on_missing_output, + _explain=_explain, + ) + + +@DeveloperAPI +def list_workers( + address: Optional[str] = None, + filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, + limit: int = DEFAULT_LIMIT, + timeout: int = DEFAULT_RPC_TIMEOUT, + detail: bool = False, + raise_on_missing_output: bool = True, + _explain: bool = False, +) -> List[WorkerState]: + """List workers in the cluster. + + Args: + address: Ray bootstrap address, could be `auto`, `localhost:6379`. + If None, it will be resolved automatically from an initialized ray. + filters: List of tuples of filter key, predicate (=, or !=), and + the filter value. E.g., `("is_alive", "=", "True")` + limit: Max number of entries returned by the state backend. + timeout: Max timeout value for the state APIs requests made. + detail: When True, more details info (specified in `WorkerState`) + will be queried and returned. See + :class:`WorkerState `. + raise_on_missing_output: When True, exceptions will be raised if + there is missing data due to truncation/data source unavailable. + _explain: Print the API information such as API latency or + failed query information. + + Returns: + List of + :class:`WorkerState `. + + Raises: + Exceptions: :class:`RayStateApiException ` if the CLI + failed to query the data. + """ # noqa: E501 + return StateApiClient(address=address).list( + StateResource.WORKERS, + options=ListApiOptions( + limit=limit, timeout=timeout, filters=filters, detail=detail + ), + raise_on_missing_output=raise_on_missing_output, + _explain=_explain, + ) + + +@DeveloperAPI +def list_tasks( + address: Optional[str] = None, + filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, + limit: int = DEFAULT_LIMIT, + timeout: int = DEFAULT_RPC_TIMEOUT, + detail: bool = False, + raise_on_missing_output: bool = True, + _explain: bool = False, +) -> List[TaskState]: + """List tasks in the cluster. + + Args: + address: Ray bootstrap address, could be `auto`, `localhost:6379`. + If None, it will be resolved automatically from an initialized ray. + filters: List of tuples of filter key, predicate (=, or !=), and + the filter value. E.g., `("is_alive", "=", "True")` + limit: Max number of entries returned by the state backend. + timeout: Max timeout value for the state APIs requests made. + detail: When True, more details info (specified in `WorkerState`) + will be queried and returned. See + :class:`WorkerState `. + raise_on_missing_output: When True, exceptions will be raised if + there is missing data due to truncation/data source unavailable. + _explain: Print the API information such as API latency or + failed query information. + + Returns: + List of + :class:`TaskState `. + + Raises: + Exceptions: :class:`RayStateApiException ` if the CLI + failed to query the data. + """ # noqa: E501 + return StateApiClient(address=address).list( + StateResource.TASKS, + options=ListApiOptions( + limit=limit, timeout=timeout, filters=filters, detail=detail + ), + raise_on_missing_output=raise_on_missing_output, + _explain=_explain, + ) + + +@DeveloperAPI +def list_objects( + address: Optional[str] = None, + filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, + limit: int = DEFAULT_LIMIT, + timeout: int = DEFAULT_RPC_TIMEOUT, + detail: bool = False, + raise_on_missing_output: bool = True, + _explain: bool = False, +) -> List[ObjectState]: + """List objects in the cluster. + + Args: + address: Ray bootstrap address, could be `auto`, `localhost:6379`. + If None, it will be resolved automatically from an initialized ray. + filters: List of tuples of filter key, predicate (=, or !=), and + the filter value. E.g., `("ip", "=", "0.0.0.0")` + limit: Max number of entries returned by the state backend. + timeout: Max timeout value for the state APIs requests made. + detail: When True, more details info (specified in `ObjectState`) + will be queried and returned. See + :class:`ObjectState `. + raise_on_missing_output: When True, exceptions will be raised if + there is missing data due to truncation/data source unavailable. + _explain: Print the API information such as API latency or + failed query information. + + Returns: + List of + :class:`ObjectState `. + + Raises: + Exceptions: :class:`RayStateApiException ` if the CLI + failed to query the data. + """ # noqa: E501 + return StateApiClient(address=address).list( + StateResource.OBJECTS, + options=ListApiOptions( + limit=limit, timeout=timeout, filters=filters, detail=detail + ), + raise_on_missing_output=raise_on_missing_output, + _explain=_explain, + ) + + +@DeveloperAPI +def list_runtime_envs( + address: Optional[str] = None, + filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, + limit: int = DEFAULT_LIMIT, + timeout: int = DEFAULT_RPC_TIMEOUT, + detail: bool = False, + raise_on_missing_output: bool = True, + _explain: bool = False, +) -> List[RuntimeEnvState]: + """List runtime environments in the cluster. + + Args: + address: Ray bootstrap address, could be `auto`, `localhost:6379`. + If None, it will be resolved automatically from an initialized ray. + filters: List of tuples of filter key, predicate (=, or !=), and + the filter value. E.g., `("node_id", "=", "abcdef")` + limit: Max number of entries returned by the state backend. + timeout: Max timeout value for the state APIs requests made. + detail: When True, more details info (specified in `RuntimeEnvState`) + will be queried and returned. See + :class:`RuntimeEnvState `. + raise_on_missing_output: When True, exceptions will be raised if + there is missing data due to truncation/data source unavailable. + _explain: Print the API information such as API latency or + failed query information. + + Returns: + List of + :class:`RuntimeEnvState `. + + Raises: + Exceptions: :class:`RayStateApiException ` + if the CLI failed to query the data. + """ # noqa: E501 + return StateApiClient(address=address).list( + StateResource.RUNTIME_ENVS, + options=ListApiOptions( + limit=limit, timeout=timeout, filters=filters, detail=detail + ), + raise_on_missing_output=raise_on_missing_output, + _explain=_explain, + ) + + +@DeveloperAPI +def list_cluster_events( + address: Optional[str] = None, + filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, + limit: int = DEFAULT_LIMIT, + timeout: int = DEFAULT_RPC_TIMEOUT, + detail: bool = False, + raise_on_missing_output: bool = True, + _explain: bool = False, +) -> List[Dict]: + return StateApiClient(address=address).list( + StateResource.CLUSTER_EVENTS, + options=ListApiOptions( + limit=limit, timeout=timeout, filters=filters, detail=detail + ), + raise_on_missing_output=raise_on_missing_output, + _explain=_explain, + ) + + +""" +Log APIs +""" + + +@DeveloperAPI +def get_log( + address: Optional[str] = None, + node_id: Optional[str] = None, + node_ip: Optional[str] = None, + filename: Optional[str] = None, + actor_id: Optional[str] = None, + task_id: Optional[str] = None, + pid: Optional[int] = None, + follow: bool = False, + tail: int = -1, + timeout: int = DEFAULT_RPC_TIMEOUT, + suffix: str = "out", + encoding: Optional[str] = "utf-8", + errors: Optional[str] = "strict", + submission_id: Optional[str] = None, + attempt_number: int = 0, + _interval: Optional[float] = None, +) -> Generator[str, None, None]: + """Retrieve log file based on file name or some entities ids (pid, actor id, task id). + + Examples: + >>> import ray + >>> from ray.util.state import get_log # doctest: +SKIP + # To connect to an existing ray instance if there is + >>> ray.init("auto") # doctest: +SKIP + # Node IP could be retrieved from list_nodes() or ray.nodes() + >>> node_ip = "172.31.47.143" # doctest: +SKIP + >>> filename = "gcs_server.out" # doctest: +SKIP + >>> for l in get_log(filename=filename, node_ip=node_ip): # doctest: +SKIP + >>> print(l) # doctest: +SKIP + + Args: + address: Ray bootstrap address, could be `auto`, `localhost:6379`. + If not specified, it will be retrieved from the initialized ray cluster. + node_id: Id of the node containing the logs . + node_ip: Ip of the node containing the logs. (At least one of the node_id and + node_ip have to be supplied when identifying a node). + filename: Name of the file (relative to the ray log directory) to be retrieved. + actor_id: Id of the actor if getting logs from an actor. + task_id: Id of the task if getting logs generated by a task. + pid: PID of the worker if getting logs generated by a worker. When querying + with pid, either node_id or node_ip must be supplied. + follow: When set to True, logs will be streamed and followed. + tail: Number of lines to get from the end of the log file. Set to -1 for getting + the entire log. + timeout: Max timeout for requests made when getting the logs. + suffix: The suffix of the log file if query by id of tasks/workers/actors. Default to "out". + encoding: The encoding used to decode the content of the log file. Default is + "utf-8". Use None to get binary data directly. + errors: The error handling scheme to use for decoding errors. Default is + "strict". See https://docs.python.org/3/library/codecs.html#error-handlers + submission_id: Job submission ID if getting log from a submission job. + attempt_number: The attempt number of the task if getting logs generated by a task. + _interval: The interval in secs to print new logs when `follow=True`. + + Return: + A Generator of log line, None for SendType and ReturnType. + + Raises: + Exceptions: :class:`RayStateApiException ` if the CLI + failed to query the data. + """ # noqa: E501 + + api_server_url = ray_address_to_api_server_url(address) + media_type = "stream" if follow else "file" + + options = GetLogOptions( + node_id=node_id, + node_ip=node_ip, + filename=filename, + actor_id=actor_id, + task_id=task_id, + pid=pid, + lines=tail, + interval=_interval, + media_type=media_type, + timeout=timeout, + suffix=suffix, + submission_id=submission_id, + attempt_number=attempt_number, + ) + options_dict = {} + for field in fields(options): + option_val = getattr(options, field.name) + if option_val is not None: + options_dict[field.name] = option_val + + with requests.get( + f"{api_server_url}/api/v0/logs/{media_type}?" + f"{urllib.parse.urlencode(options_dict)}", + stream=True, + ) as r: + if r.status_code != 200: + raise RayStateApiException(r.text) + for bytes in r.iter_content(chunk_size=None): + bytes = bytearray(bytes) + # First byte 1 means success. + if bytes.startswith(b"1"): + bytes.pop(0) + logs = bytes + if encoding is not None: + logs = bytes.decode(encoding=encoding, errors=errors) + else: + assert bytes.startswith(b"0") + error_msg = bytes.decode("utf-8") + raise RayStateApiException(error_msg) + yield logs + + +@DeveloperAPI +def list_logs( + address: Optional[str] = None, + node_id: Optional[str] = None, + node_ip: Optional[str] = None, + glob_filter: Optional[str] = None, + timeout: int = DEFAULT_RPC_TIMEOUT, +) -> Dict[str, List[str]]: + """Listing log files available. + + Args: + address: Ray bootstrap address, could be `auto`, `localhost:6379`. + If not specified, it will be retrieved from the initialized ray cluster. + node_id: Id of the node containing the logs. + node_ip: Ip of the node containing the logs. + glob_filter: Name of the file (relative to the ray log directory) to be + retrieved. E.g. `glob_filter="*worker*"` for all worker logs. + actor_id: Id of the actor if getting logs from an actor. + timeout: Max timeout for requests made when getting the logs. + _interval: The interval in secs to print new logs when `follow=True`. + + Return: + A dictionary where the keys are log groups (e.g. gcs, raylet, worker), and + values are list of log filenames. + + Raises: + Exceptions: :class:`RayStateApiException ` if the CLI + failed to query the data, or ConnectionError if failed to resolve the + ray address. + """ # noqa: E501 + assert ( + node_ip is not None or node_id is not None + ), "At least one of node ip and node id is required" + + api_server_url = ray_address_to_api_server_url(address) + + if not glob_filter: + glob_filter = "*" + + options_dict = {} + if node_ip: + options_dict["node_ip"] = node_ip + if node_id: + options_dict["node_id"] = node_id + if glob_filter: + options_dict["glob"] = glob_filter + options_dict["timeout"] = timeout + + r = requests.get( + f"{api_server_url}/api/v0/logs?{urllib.parse.urlencode(options_dict)}" + ) + r.raise_for_status() + + response = r.json() + if response["result"] is False: + raise RayStateApiException( + "API server internal error. See dashboard.log file for more details. " + f"Error: {response['msg']}" + ) + return response["data"]["result"] + + +""" +Summary APIs +""" + + +@DeveloperAPI +def summarize_tasks( + address: Optional[str] = None, + timeout: int = DEFAULT_RPC_TIMEOUT, + raise_on_missing_output: bool = True, + _explain: bool = False, +) -> Dict: + """Summarize the tasks in cluster. + + Args: + address: Ray bootstrap address, could be `auto`, `localhost:6379`. + If None, it will be resolved automatically from an initialized ray. + timeout: Max timeout for requests made when getting the states. + raise_on_missing_output: When True, exceptions will be raised if + there is missing data due to truncation/data source unavailable. + _explain: Print the API information such as API latency or + failed query information. + + Return: + Dictionarified + :class:`~ray.util.state.common.TaskSummaries` + + Raises: + Exceptions: :class:`RayStateApiException ` + if the CLI is failed to query the data. + """ # noqa: E501 + return StateApiClient(address=address).summary( + SummaryResource.TASKS, + options=SummaryApiOptions(timeout=timeout), + raise_on_missing_output=raise_on_missing_output, + _explain=_explain, + ) + + +@DeveloperAPI +def summarize_actors( + address: Optional[str] = None, + timeout: int = DEFAULT_RPC_TIMEOUT, + raise_on_missing_output: bool = True, + _explain: bool = False, +) -> Dict: + """Summarize the actors in cluster. + + Args: + address: Ray bootstrap address, could be `auto`, `localhost:6379`. + If None, it will be resolved automatically from an initialized ray. + timeout: Max timeout for requests made when getting the states. + raise_on_missing_output: When True, exceptions will be raised if + there is missing data due to truncation/data source unavailable. + _explain: Print the API information such as API latency or + failed query information. + + Return: + Dictionarified + :class:`~ray.util.state.common.ActorSummaries` + + Raises: + Exceptions: :class:`RayStateApiException ` if the CLI + failed to query the data. + """ # noqa: E501 + return StateApiClient(address=address).summary( + SummaryResource.ACTORS, + options=SummaryApiOptions(timeout=timeout), + raise_on_missing_output=raise_on_missing_output, + _explain=_explain, + ) + + +@DeveloperAPI +def summarize_objects( + address: Optional[str] = None, + timeout: int = DEFAULT_RPC_TIMEOUT, + raise_on_missing_output: bool = True, + _explain: bool = False, +) -> Dict: + """Summarize the objects in cluster. + + Args: + address: Ray bootstrap address, could be `auto`, `localhost:6379`. + If None, it will be resolved automatically from an initialized ray. + timeout: Max timeout for requests made when getting the states. + raise_on_missing_output: When True, exceptions will be raised if + there is missing data due to truncation/data source unavailable. + _explain: Print the API information such as API latency or + failed query information. + + Return: + Dictionarified :class:`~ray.util.state.common.ObjectSummaries` + + Raises: + Exceptions: :class:`RayStateApiException ` if the CLI + failed to query the data. + """ # noqa: E501 + return StateApiClient(address=address).summary( + SummaryResource.OBJECTS, + options=SummaryApiOptions(timeout=timeout), + raise_on_missing_output=raise_on_missing_output, + _explain=_explain, + ) diff --git a/python/ray/util/state/common.py b/python/ray/util/state/common.py new file mode 100644 index 000000000000..6a00d0cafb75 --- /dev/null +++ b/python/ray/util/state/common.py @@ -0,0 +1,1605 @@ +import datetime +import json +import logging +import sys +from abc import ABC +from dataclasses import asdict, field, fields +from enum import Enum, unique +from typing import Any, Dict, List, Optional, Set, Tuple, Union + +import ray.dashboard.utils as dashboard_utils +from ray._private.ray_constants import env_integer +from ray.core.generated.common_pb2 import TaskStatus, TaskType +from ray.core.generated.gcs_pb2 import TaskEvents +from ray.util.state.custom_types import ( + TypeActorStatus, + TypeNodeStatus, + TypePlacementGroupStatus, + TypeReferenceType, + TypeTaskStatus, + TypeTaskType, + TypeWorkerExitType, + TypeWorkerType, +) +from ray.util.state.exception import RayStateApiException + +try: + from pydantic.dataclasses import dataclass + + from ray.dashboard.modules.job.pydantic_models import JobDetails + +except ImportError: + # pydantic is not available in the dashboard. + # We will use the dataclass from the standard library. + from dataclasses import dataclass + + JobDetails = object + + +logger = logging.getLogger(__name__) + +DEFAULT_RPC_TIMEOUT = 30 +DEFAULT_LIMIT = 100 +DEFAULT_LOG_LIMIT = 1000 + +# Max number of entries from API server to the client +RAY_MAX_LIMIT_FROM_API_SERVER = env_integer( + "RAY_MAX_LIMIT_FROM_API_SERVER", 10 * 1000 +) # 10k + +# Max number of entries from data sources (rest will be truncated at the +# data source, e.g. raylet) +RAY_MAX_LIMIT_FROM_DATA_SOURCE = env_integer( + "RAY_MAX_LIMIT_FROM_DATA_SOURCE", 10 * 1000 +) # 10k + + +@unique +class StateResource(Enum): + ACTORS = "actors" + JOBS = "jobs" + PLACEMENT_GROUPS = "placement_groups" + NODES = "nodes" + WORKERS = "workers" + TASKS = "tasks" + OBJECTS = "objects" + RUNTIME_ENVS = "runtime_envs" + CLUSTER_EVENTS = "cluster_events" + + +@unique +class SummaryResource(Enum): + ACTORS = "actors" + TASKS = "tasks" + OBJECTS = "objects" + + +SupportedFilterType = Union[str, bool, int, float] + + +PredicateType = str # Literal["=", "!="] + + +class Humanify: + """A class containing default methods to + convert units into a human readable string.""" + + def timestamp(x: float): + """Converts miliseconds to a datetime object.""" + return str(datetime.datetime.fromtimestamp(x / 1000)) + + def memory(x: int): + """Converts raw bytes to a human readable memory size.""" + if x >= 2**30: + return str(format(x / (2**30), ".3f")) + " GiB" + elif x >= 2**20: + return str(format(x / (2**20), ".3f")) + " MiB" + elif x >= 2**10: + return str(format(x / (2**10), ".3f")) + " KiB" + return str(format(x, ".3f")) + " B" + + def duration(x: int): + """Converts miliseconds to a human readable duration.""" + return str(datetime.timedelta(milliseconds=x)) + + def events(events: List[dict]): + """Converts a list of task events into a human readable format.""" + for event in events: + if "created_ms" in event: + event["created_ms"] = Humanify.timestamp(event["created_ms"]) + return events + + def node_resources(resources: dict): + """Converts a node's resources into a human readable format.""" + for resource in resources: + if "memory" in resource: + resources[resource] = Humanify.memory(resources[resource]) + return resources + + +@dataclass(init=True) +class ListApiOptions: + # Maximum number of entries to return + limit: int = DEFAULT_LIMIT + # The timeout for the API call. + timeout: int = DEFAULT_RPC_TIMEOUT + # If True, more detailed output will be printed. + # The API could query more sources than detail == False + # to get more data in detail. + detail: bool = False + # Filters. Each tuple pair (key, predicate, value) means key predicate value. + # If there's more than 1 filter, it means AND. + # E.g., [(key, "=", val), (key2, "!=" val2)] means (key=val) AND (key2!=val2) + filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = field( + default_factory=list + ) + # [only tasks] If driver tasks should be excluded. + exclude_driver: bool = True + # When the request is processed on the server side, + # we should apply multiplier so that server side can finish + # processing a request within timeout. Otherwise, + # timeout will always lead Http timeout. + server_timeout_multiplier: float = 0.8 + + def __post_init__(self): + # To return the data to users, when there's a partial failure + # we need to have a timeout that's smaller than the users' timeout. + # 80% is configured arbitrarily. + self.timeout = int(self.timeout * self.server_timeout_multiplier) + assert self.timeout != 0, "0 second timeout is not supported." + if self.filters is None: + self.filters = [] + + for filter in self.filters: + _, filter_predicate, _ = filter + if filter_predicate != "=" and filter_predicate != "!=": + raise ValueError( + f"Unsupported filter predicate {filter_predicate} is given. " + "Available predicates: =, !=." + ) + + +@dataclass(init=True) +class GetApiOptions: + # Timeout for the HTTP request + timeout: int = DEFAULT_RPC_TIMEOUT + + +@dataclass(init=True) +class SummaryApiOptions: + # Timeout for the HTTP request + timeout: int = DEFAULT_RPC_TIMEOUT + + # Filters. Each tuple pair (key, predicate, value) means key predicate value. + # If there's more than 1 filter, it means AND. + # E.g., [(key, "=", val), (key2, "!=" val2)] means (key=val) AND (key2!=val2) + # For summary endpoints that call list under the hood, we'll pass + # these filters directly into the list call. + filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = field( + default_factory=list + ) + + # Change out to summarize the output. There is a summary_by value for each entity. + # Tasks: by func_name + # Actors: by class + # Objects: by callsite + summary_by: Optional[str] = None + + +def state_column(*, filterable: bool, detail: bool = False, format_fn=None, **kwargs): + """A wrapper around dataclass.field to add additional metadata. + + The metadata is used to define detail / filterable option of + each column. + + Args: + detail: If True, the column is used when detail == True + filterable: If True, the column can be used for filtering. + kwargs: The same kwargs for the `dataclasses.field` function. + """ + m = {"detail": detail, "filterable": filterable, "format_fn": format_fn} + # Default for detail field is None since it could be missing. + if detail and "default" not in kwargs: + kwargs["default"] = None + + if "metadata" in kwargs: + # Metadata explicitly specified, so add detail and filterable if missing. + kwargs["metadata"].update(m) + else: + # Metadata not explicitly specified, so add it. + kwargs["metadata"] = m + return field(**kwargs) + + +class StateSchema(ABC): + """Schema class for Ray resource abstraction. + + The child class must be dataclass. All child classes + - perform runtime type checking upon initialization. + - are supposed to use `state_column` instead of `field`. + It will allow the class to return filterable/detail columns. + If `state_column` is not specified, that column is not filterable + and for non-detail output. + + For example, + ``` + @dataclass + class State(StateSchema): + column_a: str + column_b: int = state_column(detail=True, filterable=True) + + s = State(column_a="abc", b=1) + # Returns {"column_b"} + s.filterable_columns() + # Returns {"column_a"} + s.base_columns() + # Returns {"column_a", "column_b"} + s.columns() + ``` + + In addition, the schema also provides a humanify abstract method to + convert the state object into something human readable, ready for printing. + + Subclasses should override this method, providing logic to convert its own fields + to something human readable, packaged and returned in a dict. + + Each field that wants to be humanified should include a 'format_fn' key in its + metadata dictionary. + """ + + @classmethod + def humanify(cls, state: dict) -> dict: + """Convert the given state object into something human readable.""" + for f in fields(cls): + if ( + f.metadata.get("format_fn") is not None + and f.name in state + and state[f.name] is not None + ): + try: + state[f.name] = f.metadata["format_fn"](state[f.name]) + except Exception as e: + logger.error(f"Failed to format {f.name}:{state[f.name]} with {e}") + return state + + @classmethod + def list_columns(cls, detail: bool = True) -> List[str]: + """Return a list of columns.""" + cols = [] + for f in fields(cls): + if detail: + cols.append(f.name) + elif not f.metadata.get("detail", False): + cols.append(f.name) + + return cols + + @classmethod + def columns(cls) -> Set[str]: + """Return a set of all columns.""" + return set(cls.list_columns()) + + @classmethod + def filterable_columns(cls) -> Set[str]: + """Return a list of filterable columns""" + filterable = set() + for f in fields(cls): + if f.metadata.get("filterable", False): + filterable.add(f.name) + return filterable + + @classmethod + def base_columns(cls) -> Set[str]: + """Return a list of base columns. + + Base columns mean columns to return when detail == False. + """ + return set(cls.list_columns(detail=False)) + + @classmethod + def detail_columns(cls) -> Set[str]: + """Return a list of detail columns. + + Detail columns mean columns to return when detail == True. + """ + return set(cls.list_columns(detail=True)) + + def asdict(self): + return asdict(self) + + # Allow dict like access on the class directly for backward compatibility. + def __getitem__(self, key): + return getattr(self, key) + + def __setitem__(self, key, value): + setattr(self, key, value) + + def get(self, key, default=None): + return getattr(self, key, default) + + +def filter_fields(data: dict, state_dataclass: StateSchema, detail: bool) -> dict: + """Filter the given data's columns based on the given schema. + + Args: + data: A single data entry to filter columns. + state_dataclass: The schema to filter data. + detail: Whether or not it should include columns for detail output. + """ + filtered_data = {} + columns = state_dataclass.columns() if detail else state_dataclass.base_columns() + for col in columns: + if col in data: + filtered_data[col] = data[col] + else: + filtered_data[col] = None + return filtered_data + + +@dataclass(init=True) +class GetLogOptions: + timeout: int + node_id: Optional[str] = None + node_ip: Optional[str] = None + # One of {file, stream}. File means it will return the whole log. + # stream means it will keep the connection and streaming the log. + media_type: str = "file" + # The file name of the log. + filename: Optional[str] = None + # The actor id of the log. It is used only for worker logs. + actor_id: Optional[str] = None + # The task id of the log. + task_id: Optional[str] = None + # The attempt number of the task. + attempt_number: int = 0 + # The pid of the log. It is used only for worker logs. + pid: Optional[int] = None + # Total log lines to return. + lines: int = 1000 + # The interval where new logs are streamed to. + # Should be used only when media_type == stream. + interval: Optional[float] = None + # The suffix of the log file if file resolution not through filename directly. + # Default to "out". + suffix: str = "out" + # The job submission id for submission job. This doesn't work for driver job + # since Ray doesn't log driver logs to file in the ray logs directory. + submission_id: Optional[str] = None + + def __post_init__(self): + if self.pid: + self.pid = int(self.pid) + if self.interval: + self.interval = float(self.interval) + self.lines = int(self.lines) + + if self.media_type == "file": + assert self.interval is None + if self.media_type not in ["file", "stream"]: + raise ValueError(f"Invalid media type: {self.media_type}") + if not (self.node_id or self.node_ip) and not (self.actor_id or self.task_id): + raise ValueError( + "node_id or node_ip must be provided as constructor arguments when no " + "actor or task_id is supplied as arguments." + ) + if self.node_id and self.node_ip: + raise ValueError( + "Both node_id and node_ip are given. Only one of them can be provided. " + f"Given node id: {self.node_id}, given node ip: {self.node_ip}" + ) + if not ( + self.actor_id + or self.task_id + or self.pid + or self.filename + or self.submission_id + ): + raise ValueError( + "None of actor_id, task_id, pid, submission_id or filename " + "is provided. At least one of them is required to fetch logs." + ) + + if self.suffix not in ["out", "err"]: + raise ValueError( + f"Invalid suffix: {self.suffix}. Must be one of 'out' or 'err'." + ) + + +# See the ActorTableData message in gcs.proto for all potential options that +# can be included in this class. +@dataclass(init=True) +class ActorState(StateSchema): + """Actor State""" + + #: The id of the actor. + actor_id: str = state_column(filterable=True) + #: The class name of the actor. + class_name: str = state_column(filterable=True) + #: The state of the actor. + #: + #: - DEPENDENCIES_UNREADY: Actor is waiting for dependency to be ready. + #: E.g., a new actor is waiting for object ref that's created from + #: other remote task. + #: - PENDING_CREATION: Actor's dependency is ready, but it is not created yet. + #: It could be because there are not enough resources, too many actor + #: entries in the scheduler queue, or the actor creation is slow + #: (e.g., slow runtime environment creation, + #: slow worker startup, or etc.). + #: - ALIVE: The actor is created, and it is alive. + #: - RESTARTING: The actor is dead, and it is restarting. + #: It is equivalent to `PENDING_CREATION`, + #: but means the actor was dead more than once. + #: - DEAD: The actor is permanatly dead. + state: TypeActorStatus = state_column(filterable=True) + #: The job id of this actor. + job_id: str = state_column(filterable=True) + #: The name of the actor given by the `name` argument. + name: Optional[str] = state_column(filterable=True) + #: The node id of this actor. + #: If the actor is restarting, it could be the node id + #: of the dead actor (and it will be re-updated when + #: the actor is successfully restarted). + node_id: Optional[str] = state_column(filterable=True) + #: The pid of the actor. 0 if it is not created yet. + pid: Optional[int] = state_column(filterable=True) + #: The namespace of the actor. + ray_namespace: Optional[str] = state_column(filterable=True) + #: The runtime environment information of the actor. + serialized_runtime_env: Optional[str] = state_column(filterable=False, detail=True) + #: The resource requirement of the actor. + required_resources: Optional[dict] = state_column(filterable=False, detail=True) + #: Actor's death information in detail. None if the actor is not dead yet. + death_cause: Optional[dict] = state_column(filterable=False, detail=True) + #: True if the actor is detached. False otherwise. + is_detached: Optional[bool] = state_column(filterable=False, detail=True) + #: The placement group id that's associated with this actor. + placement_group_id: Optional[str] = state_column(detail=True, filterable=True) + #: Actor's repr name if a customized __repr__ method exists, else empty string. + repr_name: Optional[str] = state_column(detail=True, filterable=True) + + +@dataclass(init=True) +class PlacementGroupState(StateSchema): + """PlacementGroup State""" + + #: The id of the placement group. + placement_group_id: str = state_column(filterable=True) + #: The name of the placement group if it is given by the name argument. + name: str = state_column(filterable=True) + #: The job id of the placement group. + creator_job_id: str = state_column(filterable=True) + #: The state of the placement group. + #: + #: - PENDING: The placement group creation is pending scheduling. + #: It could be because there's not enough resources, some of creation + #: stage has failed (e.g., failed to commit placement gropus because + #: the node is dead). + #: - CREATED: The placement group is created. + #: - REMOVED: The placement group is removed. + #: - RESCHEDULING: The placement group is rescheduling because some of + #: bundles are dead because they were on dead nodes. + state: TypePlacementGroupStatus = state_column(filterable=True) + #: The bundle specification of the placement group. + bundles: Optional[List[dict]] = state_column(filterable=False, detail=True) + #: True if the placement group is detached. False otherwise. + is_detached: Optional[bool] = state_column(filterable=True, detail=True) + #: The scheduling stats of the placement group. + stats: Optional[dict] = state_column(filterable=False, detail=True) + + +@dataclass(init=True) +class NodeState(StateSchema): + """Node State""" + + #: The id of the node. + node_id: str = state_column(filterable=True) + #: The ip address of the node. + node_ip: str = state_column(filterable=True) + #: If this is a head node. + is_head_node: bool = state_column(filterable=True) + #: The state of the node. + #: + #: ALIVE: The node is alive. + #: DEAD: The node is dead. + state: TypeNodeStatus = state_column(filterable=True) + #: The name of the node if it is given by the name argument. + node_name: str = state_column(filterable=True) + #: The total resources of the node. + resources_total: dict = state_column( + filterable=False, format_fn=Humanify.node_resources + ) + #: The time when the node (raylet) starts. + start_time_ms: Optional[int] = state_column( + filterable=False, detail=True, format_fn=Humanify.timestamp + ) + #: The time when the node exits. The timestamp could be delayed + #: if the node is dead unexpectedly (could be delayed + # up to 30 seconds). + end_time_ms: Optional[int] = state_column( + filterable=False, detail=True, format_fn=Humanify.timestamp + ) + + +# NOTE: +# Declaring this as dataclass would make __init__ not being called properly. +class JobState(StateSchema, JobDetails): + """The state of the job that's submitted by Ray's Job APIs or driver jobs""" + + def __init__(self, **kwargs): + JobDetails.__init__(self, **kwargs) + + @classmethod + def filterable_columns(cls) -> Set[str]: + # We are not doing any filtering since filtering is currently done + # at the backend. + return {"job_id", "type", "status", "submission_id"} + + @classmethod + def humanify(cls, state: dict) -> dict: + return state + + @classmethod + def list_columns(cls, detail: bool = False) -> List[str]: + if not detail: + return [ + "job_id", + "submission_id", + "entrypoint", + "type", + "status", + "message", + "error_type", + "driver_info", + ] + if isinstance(JobDetails, object): + # We don't have pydantic in the dashboard. This is because + # we call this method at module import time, so we need to + # check if the class is a pydantic model. + return [] + + return JobDetails.__fields__ + + def asdict(self): + return JobDetails.dict(self) + + @classmethod + def schema_dict(cls) -> Dict[str, Any]: + schema_types = cls.schema()["properties"] + # Get type name to actual type mapping. + return { + k: v["type"] for k, v in schema_types.items() if v.get("type") is not None + } + + +@dataclass(init=True) +class WorkerState(StateSchema): + """Worker State""" + + #: The id of the worker. + worker_id: str = state_column(filterable=True) + #: Whether or not if the worker is alive. + is_alive: bool = state_column(filterable=True) + #: The type of the worker. + #: + #: - WORKER: The regular Ray worker process that executes tasks or + # instantiates an actor. + #: - DRIVER: The driver (Python script that calls `ray.init`). + #: - SPILL_WORKER: The worker that spills objects. + #: - RESTORE_WORKER: The worker that restores objects. + worker_type: TypeWorkerType = state_column(filterable=True) + #: The exit type of the worker if the worker is dead. + #: + #: - SYSTEM_ERROR: Worker exit due to system level failures (i.e. worker crash). + #: - INTENDED_SYSTEM_EXIT: System-level exit that is intended. E.g., + #: Workers are killed because they are idle for a long time. + #: - USER_ERROR: Worker exits because of user error. + #: E.g., execptions from the actor initialization. + #: - INTENDED_USER_EXIT: Intended exit from users (e.g., users exit + #: workers with exit code 0 or exit initated by Ray API such as ray.kill). + exit_type: Optional[TypeWorkerExitType] = state_column(filterable=True) + #: The node id of the worker. + node_id: str = state_column(filterable=True) + #: The ip address of the worker. + ip: str = state_column(filterable=True) + #: The pid of the worker. + pid: int = state_column(filterable=True) + #: The exit detail of the worker if the worker is dead. + exit_detail: Optional[str] = state_column(detail=True, filterable=False) + #: The time worker is first launched. + #: -1 if the value doesn't exist. + #: The lifecycle of worker is as follow. + #: worker_launch_time_ms (process startup requested). + #: -> worker_launched_time_ms (process started). + #: -> start_time_ms (worker is ready to be used). + #: -> end_time_ms (worker is destroyed). + worker_launch_time_ms: Optional[int] = state_column( + filterable=False, detail=True, format_fn=Humanify.timestamp + ) + #: The time worker is succesfully launched + #: -1 if the value doesn't exist. + worker_launched_time_ms: Optional[int] = state_column( + filterable=False, detail=True, format_fn=Humanify.timestamp + ) + #: The time when the worker is started and initialized. + #: 0 if the value doesn't exist. + start_time_ms: Optional[int] = state_column( + filterable=False, detail=True, format_fn=Humanify.timestamp + ) + #: The time when the worker exits. The timestamp could be delayed + #: if the worker is dead unexpectedly. + #: 0 if the value doesn't exist. + end_time_ms: Optional[int] = state_column( + filterable=False, detail=True, format_fn=Humanify.timestamp + ) + + +@dataclass(init=True) +class ClusterEventState(StateSchema): + severity: str = state_column(filterable=True) + time: str = state_column(filterable=False) + source_type: str = state_column(filterable=True) + message: str = state_column(filterable=False) + event_id: str = state_column(filterable=True) + custom_fields: Optional[dict] = state_column(filterable=False, detail=True) + + +@dataclass(init=True) +class TaskState(StateSchema): + """Task State""" + + #: The id of the task. + task_id: str = state_column(filterable=True) + #: The attempt (retry) number of the task. + attempt_number: int = state_column(filterable=True) + #: The name of the task if it is given by the name argument. + name: str = state_column(filterable=True) + #: The state of the task. + #: + #: Refer to src/ray/protobuf/common.proto for a detailed explanation of the state + #: breakdowns and typical state transition flow. + #: + state: TypeTaskStatus = state_column(filterable=True) + #: The job id of this task. + job_id: str = state_column(filterable=True) + #: The actor id that's associated with this task. + #: It is empty if there's no relevant actors. + actor_id: Optional[str] = state_column(filterable=True) + #: The type of the task. + #: + #: - NORMAL_TASK: Tasks created by `func.remote()`` + #: - ACTOR_CREATION_TASK: Actors created by `class.remote()` + #: - ACTOR_TASK: Actor tasks submitted by `actor.method.remote()` + #: - DRIVER_TASK: Driver (A script that calls `ray.init`). + type: TypeTaskType = state_column(filterable=True) + #: The name of the task. If is the name of the function + #: if the type is a task or an actor task. + #: It is the name of the class if it is a actor scheduling task. + func_or_class_name: str = state_column(filterable=True) + #: The parent task id. If the parent is a normal task, it will be the task's id. + #: If the parent runs in a concurrent actor (async actor or threaded actor), + #: it will be the actor's creation task id. + parent_task_id: str = state_column(filterable=True) + #: Id of the node that runs the task. If the task is retried, it could + #: contain the node id of the previous executed task. + #: If empty, it means the task hasn't been scheduled yet. + node_id: Optional[str] = state_column(filterable=True) + #: The worker id that's associated with this task. + worker_id: Optional[str] = state_column(filterable=True) + #: Task error type. + error_type: Optional[str] = state_column(filterable=True) + #: The language of the task. E.g., Python, Java, or Cpp. + language: Optional[str] = state_column(detail=True, filterable=True) + #: The required resources to execute the task. + required_resources: Optional[dict] = state_column(detail=True, filterable=False) + #: The runtime environment information for the task. + runtime_env_info: Optional[dict] = state_column(detail=True, filterable=False) + #: The placement group id that's associated with this task. + placement_group_id: Optional[str] = state_column(detail=True, filterable=True) + #: The list of events of the given task. + #: Refer to src/ray/protobuf/common.proto for a detailed explanation of the state + #: breakdowns and typical state transition flow. + events: Optional[List[dict]] = state_column( + detail=True, filterable=False, format_fn=Humanify.events + ) + #: The list of profile events of the given task. + profiling_data: Optional[dict] = state_column(detail=True, filterable=False) + #: The time when the task is created. A Unix timestamp in ms. + creation_time_ms: Optional[int] = state_column( + detail=True, + filterable=False, + format_fn=Humanify.timestamp, + ) + #: The time when the task starts to run. A Unix timestamp in ms. + start_time_ms: Optional[int] = state_column( + detail=True, + filterable=False, + format_fn=Humanify.timestamp, + ) + #: The time when the task is finished or failed. A Unix timestamp in ms. + end_time_ms: Optional[int] = state_column( + detail=True, filterable=False, format_fn=Humanify.timestamp + ) + #: The task logs info, e.g. offset into the worker log file when the task + #: starts/finishes. + task_log_info: Optional[dict] = state_column(detail=True, filterable=False) + #: Task error detail info. + error_message: Optional[str] = state_column(detail=True, filterable=False) + + +@dataclass(init=True) +class ObjectState(StateSchema): + """Object State""" + + #: The id of the object. + object_id: str = state_column(filterable=True) + #: The size of the object in mb. + object_size: int = state_column(filterable=True, format_fn=Humanify.memory) + #: The status of the task that creates the object. + #: + #: - NIL: We don't have a status for this task because we are not the owner or the + #: task metadata has already been deleted. + #: - WAITING_FOR_DEPENDENCIES: The task is waiting for its dependencies + #: to be created. + #: - SCHEDULED: All dependencies have been created and the task is + #: scheduled to execute. + #: It could be because the task is waiting for resources, + #: runtime environmenet creation, fetching dependencies to the + #: local node, and etc.. + #: - FINISHED: The task finished successfully. + #: - WAITING_FOR_EXECUTION: The task is scheduled properly and + #: waiting for execution. It includes time to deliver the task + #: to the remote worker + queueing time from the execution side. + #: - RUNNING: The task that is running. + task_status: TypeTaskStatus = state_column(filterable=True) + #: The reference type of the object. + #: See :ref:`Debugging with Ray Memory ` for more details. + #: + #: - ACTOR_HANDLE: The reference is an actor handle. + #: - PINNED_IN_MEMORY: The object is pinned in memory, meaning there's + #: in-flight `ray.get` on this reference. + #: - LOCAL_REFERENCE: There's a local reference (e.g., Python reference) + #: to this object reference. The object won't be GC'ed until all of them is gone. + #: - USED_BY_PENDING_TASK: The object reference is passed to other tasks. E.g., + #: `a = ray.put()` -> `task.remote(a)`. In this case, a is used by a + #: pending task `task`. + #: - CAPTURED_IN_OBJECT: The object is serialized by other objects. E.g., + #: `a = ray.put(1)` -> `b = ray.put([a])`. a is serialized within a list. + #: - UNKNOWN_STATUS: The object ref status is unkonwn. + reference_type: TypeReferenceType = state_column(filterable=True) + #: The callsite of the object. + call_site: str = state_column(filterable=True) + #: The worker type that creates the object. + #: + #: - WORKER: The regular Ray worker process that executes tasks or + #: instantiates an actor. + #: - DRIVER: The driver (Python script that calls `ray.init`). + #: - SPILL_WORKER: The worker that spills objects. + #: - RESTORE_WORKER: The worker that restores objects. + type: TypeWorkerType = state_column(filterable=True) + #: The pid of the owner. + pid: int = state_column(filterable=True) + #: The ip address of the owner. + ip: str = state_column(filterable=True) + + +@dataclass(init=True) +class RuntimeEnvState(StateSchema): + """Runtime Environment State""" + + #: The runtime environment spec. + runtime_env: dict = state_column(filterable=True) + #: Whether or not the runtime env creation has succeeded. + success: bool = state_column(filterable=True) + #: The latency of creating the runtime environment. + #: Available if the runtime env is successfully created. + creation_time_ms: Optional[float] = state_column( + filterable=False, format_fn=Humanify.timestamp + ) + #: The node id of this runtime environment. + node_id: str = state_column(filterable=True) + #: The number of actors and tasks that use this runtime environment. + ref_cnt: Optional[int] = state_column(detail=True, filterable=False) + #: The error message if the runtime environment creation has failed. + #: Available if the runtime env is failed to be created. + error: Optional[str] = state_column(detail=True, filterable=True) + + +AVAILABLE_STATES = [ + ActorState, + PlacementGroupState, + NodeState, + WorkerState, + JobState, + TaskState, + ObjectState, + RuntimeEnvState, +] + + +for state in AVAILABLE_STATES: + if len(state.filterable_columns()) > 0: + filterable_cols = "\n\n ".join(state.filterable_columns()) + state.__doc__ += f""" +\nBelow columns can be used for the `--filter` option. +\n + {filterable_cols} +\n +""" + + if len(state.detail_columns()) > 0: + detail_cols = "\n\n ".join(state.detail_columns()) + state.__doc__ += f""" +\nBelow columns are available only when `get` API is used, +\n`--detail` is specified through CLI, or `detail=True` is given to Python APIs. +\n +\n + {detail_cols} +\n +""" + + +@dataclass(init=True) +class ListApiResponse: + # NOTE(rickyyx): We currently perform hard truncation when querying + # resources which could have a large number (e.g. asking raylets for + # the number of all objects). + # The returned of resources seen by the user will go through from the + # below funnel: + # - total + # | With truncation at the data source if the number of returned + # | resource exceeds `RAY_MAX_LIMIT_FROM_DATA_SOURCE` + # v + # - num_after_truncation + # | With filtering at the state API server + # v + # - num_filtered + # | With limiting, + # | set by min(`RAY_MAX_LIMIT_FROM_API_SERER`, ) + # v + # - len(result) + + # Total number of the available resource from the cluster. + total: int + # Number of resources returned by data sources after truncation + num_after_truncation: int + # Number of resources after filtering + num_filtered: int + # Returned data. None if no data is returned. + result: List[Dict] + # List API can have a partial failure if queries to + # all sources fail. For example, getting object states + # require to ping all raylets, and it is possible some of + # them fails. Note that it is impossible to guarantee high + # availability of data because ray's state information is + # not replicated. + partial_failure_warning: Optional[str] = "" + # A list of warnings to print. + warnings: Optional[List[str]] = None + + +""" +Summary API schema +""" + +DRIVER_TASK_ID_PREFIX = "ffffffffffffffffffffffffffffffffffffffff" + + +@dataclass(init=True) +class TaskSummaryPerFuncOrClassName: + #: The function or class name of this task. + func_or_class_name: str + #: The type of the class. Equivalent to protobuf TaskType. + type: str + #: State name to the count dict. State name is equivalent to + #: the protobuf TaskStatus. + state_counts: Dict[TypeTaskStatus, int] = field(default_factory=dict) + + +@dataclass +class Link: + #: The type of entity to link to + type: str + #: The id of the entity to link to + id: str + + +@dataclass(init=True) +class NestedTaskSummary: + #: The name of this task group + name: str + #: A unique identifier for this group + key: str + #: The type of the class. Equivalent to protobuf TaskType, + #: "ACTOR" if it represents an Actor, or "GROUP" if it's a grouping of tasks. + type: str + #: Unix timestamp to use to sort the task group. + timestamp: Optional[int] = None + #: State name to the count dict. State name is equivalent to + #: the protobuf TaskStatus. + state_counts: Dict[TypeTaskStatus, int] = field(default_factory=dict) + #: The child + children: List["NestedTaskSummary"] = field(default_factory=list) + #: A link to more details about this summary. + link: Optional[Link] = None + + +@dataclass +class TaskSummaries: + #: Group key -> summary. + #: Right now, we only have func_class_name as a key. + # TODO(sang): Support the task group abstraction. + summary: Union[Dict[str, TaskSummaryPerFuncOrClassName], List[NestedTaskSummary]] + #: Total Ray tasks. + total_tasks: int + #: Total actor tasks. + total_actor_tasks: int + #: Total scheduled actors. + total_actor_scheduled: int + summary_by: str = "func_name" + + @classmethod + def to_summary_by_func_name(cls, *, tasks: List[Dict]) -> "TaskSummaries": + # NOTE: The argument tasks contains a list of dictionary + # that have the same k/v as TaskState. + summary = {} + total_tasks = 0 + total_actor_tasks = 0 + total_actor_scheduled = 0 + + for task in tasks: + key = task["func_or_class_name"] + if key not in summary: + summary[key] = TaskSummaryPerFuncOrClassName( + func_or_class_name=task["func_or_class_name"], + type=task["type"], + ) + task_summary = summary[key] + + state = task["state"] + if state not in task_summary.state_counts: + task_summary.state_counts[state] = 0 + task_summary.state_counts[state] += 1 + + type_enum = TaskType.DESCRIPTOR.values_by_name[task["type"]].number + if type_enum == TaskType.NORMAL_TASK: + total_tasks += 1 + elif type_enum == TaskType.ACTOR_CREATION_TASK: + total_actor_scheduled += 1 + elif type_enum == TaskType.ACTOR_TASK: + total_actor_tasks += 1 + + return TaskSummaries( + summary=summary, + total_tasks=total_tasks, + total_actor_tasks=total_actor_tasks, + total_actor_scheduled=total_actor_scheduled, + summary_by="func_name", + ) + + @classmethod + def to_summary_by_lineage( + cls, *, tasks: List[Dict], actors: List[Dict] + ) -> "TaskSummaries": + """ + This summarizes tasks by lineage. + i.e. A task will be grouped with another task if they have the + same parent. + + This does things in 4 steps. + Step 1: Iterate through all tasks and keep track of them by id and ownership + Step 2: Put the tasks in a tree structure based on ownership + Step 3: Merge together siblings in the tree if there are more + than one with the same name. + Step 4: Total the children + + This can probably be more efficient if we merge together some steps to + reduce the amount of iterations but this algorithm produces very easy to + understand code. We can optimize in the future. + """ + # NOTE: The argument tasks contains a list of dictionary + # that have the same k/v as TaskState. + + tasks_by_id = {} + task_group_by_id = {} + actor_creation_task_id_for_actor_id = {} + summary = [] + total_tasks = 0 + total_actor_tasks = 0 + total_actor_scheduled = 0 + + # Step 1 + # We cannot assume that a parent task always comes before the child task + # So we need to keep track of all tasks by ids so we can quickly find the + # parent. + # We also track the actor creation tasks so we can quickly figure out the + # ownership of actors. + for task in tasks: + tasks_by_id[task["task_id"]] = task + type_enum = TaskType.DESCRIPTOR.values_by_name[task["type"]].number + if type_enum == TaskType.ACTOR_CREATION_TASK: + actor_creation_task_id_for_actor_id[task["actor_id"]] = task["task_id"] + + actor_dict = {actor["actor_id"]: actor for actor in actors} + + def get_or_create_task_group(task_id: str) -> Optional[NestedTaskSummary]: + """ + Gets an already created task_group + OR + Creates a task group and puts it in the right place under its parent. + For actor tasks, the parent is the Actor that owns it. For all other + tasks, the owner is the driver or task that created it. + + Returns None if there is missing data about the task or one of its parents. + + For task groups that represents actors, the id is in the + format actor:{actor_id} + """ + if task_id in task_group_by_id: + return task_group_by_id[task_id] + + task = tasks_by_id.get(task_id) + if not task: + logger.debug(f"We're missing data about {task_id}") + # We're missing data about this parent. So we're dropping the whole + # tree at that node. + return None + + # Use name first which allows users to customize the name of + # their remote function call using the name option. + func_name = task["name"] or task["func_or_class_name"] + task_id = task["task_id"] + type_enum = TaskType.DESCRIPTOR.values_by_name[task["type"]].number + + task_group_by_id[task_id] = NestedTaskSummary( + name=func_name, + key=task_id, + type=task["type"], + timestamp=task["creation_time_ms"], + link=Link(type="task", id=task_id), + ) + + # Set summary in right place under parent + if ( + type_enum == TaskType.ACTOR_TASK + or type_enum == TaskType.ACTOR_CREATION_TASK + ): + # For actor tasks, the parent is the actor and not the parent task. + parent_task_group = get_or_create_actor_task_group(task["actor_id"]) + if parent_task_group: + parent_task_group.children.append(task_group_by_id[task_id]) + else: + parent_task_id = task["parent_task_id"] + if not parent_task_id or parent_task_id.startswith( + DRIVER_TASK_ID_PREFIX + ): + summary.append(task_group_by_id[task_id]) + else: + parent_task_group = get_or_create_task_group(parent_task_id) + if parent_task_group: + parent_task_group.children.append(task_group_by_id[task_id]) + + return task_group_by_id[task_id] + + def get_or_create_actor_task_group( + actor_id: str, + ) -> Optional[NestedTaskSummary]: + """ + Gets an existing task group that represents an actor. + OR + Creates a task group that represents an actor. The owner of the actor is + the parent of the creation_task that created that actor. + + Returns None if there is missing data about the actor or one of its parents. + """ + key = f"actor:{actor_id}" + actor = actor_dict.get(actor_id) + if key not in task_group_by_id: + creation_task_id = actor_creation_task_id_for_actor_id.get(actor_id) + creation_task = tasks_by_id.get(creation_task_id) + + if not creation_task: + logger.debug(f"We're missing data about actor {actor_id}") + # We're missing data about the parent. So we're dropping the whole + # tree at that node. + return None + + # TODO(rickyx) + # We are using repr name for grouping actors if exists, + # else use class name. We should be using some group_name in the future. + if actor is None: + logger.debug( + f"We are missing actor info for actor {actor_id}, " + f"even though creation task exists: {creation_task}" + ) + [actor_name, *rest] = creation_task["func_or_class_name"].split(".") + else: + actor_name = ( + actor["repr_name"] + if actor["repr_name"] + else actor["class_name"] + ) + + task_group_by_id[key] = NestedTaskSummary( + name=actor_name, + key=key, + type="ACTOR", + timestamp=task["creation_time_ms"], + link=Link(type="actor", id=actor_id), + ) + + parent_task_id = creation_task["parent_task_id"] + if not parent_task_id or parent_task_id.startswith( + DRIVER_TASK_ID_PREFIX + ): + summary.append(task_group_by_id[key]) + else: + parent_task_group = get_or_create_task_group(parent_task_id) + if parent_task_group: + parent_task_group.children.append(task_group_by_id[key]) + + return task_group_by_id[key] + + # Step 2: Create the tree structure based on ownership + for task in tasks: + task_id = task["task_id"] + + task_group = get_or_create_task_group(task_id) + + if not task_group: + # We are probably missing data about this task or one of its parents. + continue + + state = task["state"] + if state not in task_group.state_counts: + task_group.state_counts[state] = 0 + task_group.state_counts[state] += 1 + + type_enum = TaskType.DESCRIPTOR.values_by_name[task["type"]].number + if type_enum == TaskType.NORMAL_TASK: + total_tasks += 1 + elif type_enum == TaskType.ACTOR_CREATION_TASK: + total_actor_scheduled += 1 + elif type_enum == TaskType.ACTOR_TASK: + total_actor_tasks += 1 + + def merge_sibings_for_task_group( + siblings: List[NestedTaskSummary], + ) -> Tuple[List[NestedTaskSummary], Optional[int]]: + """ + Merges task summaries with the same name into a group if there are more than + one child with that name. + + Args: + siblings: A list of NestedTaskSummary's to merge together + + Returns + Index 0: A list of NestedTaskSummary's which have been merged + Index 1: The smallest timestamp amongst the siblings + """ + if not len(siblings): + return siblings, None + + # Group by name + groups = {} + min_timestamp = None + + for child in siblings: + child.children, child_min_timestamp = merge_sibings_for_task_group( + child.children + ) + if child_min_timestamp and child_min_timestamp < ( + child.timestamp or sys.maxsize + ): + child.timestamp = child_min_timestamp + + if child.name not in groups: + groups[child.name] = NestedTaskSummary( + name=child.name, + key=child.name, + type="GROUP", + ) + groups[child.name].children.append(child) + if child.timestamp and child.timestamp < ( + groups[child.name].timestamp or sys.maxsize + ): + groups[child.name].timestamp = child.timestamp + if child.timestamp < (min_timestamp or sys.maxsize): + min_timestamp = child.timestamp + + # Take the groups that have more than one children and return it. + # For groups with just one child, return the child itself instead of + # creating a group. + return [ + group if len(group.children) > 1 else group.children[0] + for group in groups.values() + ], min_timestamp + + # Step 3 + summary, _ = merge_sibings_for_task_group(summary) + + def sort_task_groups(task_groups: List[NestedTaskSummary]) -> None: + # Sort by timestamp + # Put actor creation tasks above other tasks with the same timestamp + task_groups.sort(key=lambda x: 0 if x.type == "ACTOR_CREATION_TASK" else 1) + task_groups.sort(key=lambda x: x.timestamp or sys.maxsize) + + def calc_total_for_task_group( + task_group: NestedTaskSummary, + ) -> NestedTaskSummary: + """ + Calculates the total of a group as the sum of all children. + Sorts children by timestamp + """ + if not len(task_group.children): + return task_group + + for child in task_group.children: + totaled = calc_total_for_task_group(child) + + for state, count in totaled.state_counts.items(): + task_group.state_counts[state] = ( + task_group.state_counts.get(state, 0) + count + ) + + sort_task_groups(task_group.children) + + return task_group + + # Step 4 + summary = [calc_total_for_task_group(task_group) for task_group in summary] + sort_task_groups(summary) + + return TaskSummaries( + summary=summary, + total_tasks=total_tasks, + total_actor_tasks=total_actor_tasks, + total_actor_scheduled=total_actor_scheduled, + summary_by="lineage", + ) + + +@dataclass(init=True) +class ActorSummaryPerClass: + #: The class name of the actor. + class_name: str + #: State name to the count dict. State name is equivalent to + #: the protobuf ActorState. + state_counts: Dict[TypeActorStatus, int] = field(default_factory=dict) + + +@dataclass +class ActorSummaries: + #: Group key (actor class name) -> summary + summary: Dict[str, ActorSummaryPerClass] + #: Total number of actors + total_actors: int + summary_by: str = "class" + + @classmethod + def to_summary(cls, *, actors: List[Dict]): + # NOTE: The argument tasks contains a list of dictionary + # that have the same k/v as ActorState. + summary = {} + total_actors = 0 + + for actor in actors: + key = actor["class_name"] + if key not in summary: + summary[key] = ActorSummaryPerClass( + class_name=actor["class_name"], + ) + actor_summary = summary[key] + + state = actor["state"] + if state not in actor_summary.state_counts: + actor_summary.state_counts[state] = 0 + actor_summary.state_counts[state] += 1 + + total_actors += 1 + + return ActorSummaries( + summary=summary, + total_actors=total_actors, + ) + + +@dataclass(init=True) +class ObjectSummaryPerKey: + #: Total number of objects of the type. + total_objects: int + #: Total size in mb. + total_size_mb: float + #: Total number of workers that reference the type of objects. + total_num_workers: int + #: Total number of nodes that reference the type of objects. + total_num_nodes: int + #: State name to the count dict. State name is equivalent to + #: ObjectState. + task_state_counts: Dict[TypeTaskStatus, int] = field(default_factory=dict) + #: Ref count type to the count dict. State name is equivalent to + #: ObjectState. + ref_type_counts: Dict[TypeReferenceType, int] = field(default_factory=dict) + + +@dataclass +class ObjectSummaries: + #: Group key (actor class name) -> summary + summary: Dict[str, ObjectSummaryPerKey] + #: Total number of referenced objects in the cluster. + total_objects: int + #: Total size of referenced objects in the cluster in MB. + total_size_mb: float + #: Whether or not the callsite collection is enabled. + callsite_enabled: bool + summary_by: str = "callsite" + + @classmethod + def to_summary(cls, *, objects: List[Dict]): + # NOTE: The argument tasks contains a list of dictionary + # that have the same k/v as ObjectState. + summary = {} + total_objects = 0 + total_size_mb = 0 + key_to_workers = {} + key_to_nodes = {} + callsite_enabled = True + + for object in objects: + key = object["call_site"] + if key == "disabled": + callsite_enabled = False + if key not in summary: + summary[key] = ObjectSummaryPerKey( + total_objects=0, + total_size_mb=0, + total_num_workers=0, + total_num_nodes=0, + ) + key_to_workers[key] = set() + key_to_nodes[key] = set() + + object_summary = summary[key] + + task_state = object["task_status"] + if task_state not in object_summary.task_state_counts: + object_summary.task_state_counts[task_state] = 0 + object_summary.task_state_counts[task_state] += 1 + + ref_type = object["reference_type"] + if ref_type not in object_summary.ref_type_counts: + object_summary.ref_type_counts[ref_type] = 0 + object_summary.ref_type_counts[ref_type] += 1 + object_summary.total_objects += 1 + total_objects += 1 + + size_bytes = object["object_size"] + # object_size's unit is byte by default. It is -1, if the size is + # unknown. + if size_bytes != -1: + object_summary.total_size_mb += size_bytes / 1024**2 + total_size_mb += size_bytes / 1024**2 + + key_to_workers[key].add(object["pid"]) + key_to_nodes[key].add(object["ip"]) + + # Convert set of pid & node ips to length. + for key, workers in key_to_workers.items(): + summary[key].total_num_workers = len(workers) + for key, nodes in key_to_nodes.items(): + summary[key].total_num_nodes = len(nodes) + + return ObjectSummaries( + summary=summary, + total_objects=total_objects, + total_size_mb=total_size_mb, + callsite_enabled=callsite_enabled, + ) + + +@dataclass(init=True) +class StateSummary: + #: Node ID -> summary per node + #: If the data is not required to be orgnized per node, it will contain + #: a single key, "cluster". + node_id_to_summary: Dict[str, Union[TaskSummaries, ActorSummaries, ObjectSummaries]] + + +@dataclass(init=True) +class SummaryApiResponse: + # Carried over from ListApiResponse + # We currently use list API for listing the resources + total: int + # Carried over from ListApiResponse + # Number of resources returned by data sources after truncation + num_after_truncation: int + # Number of resources after filtering + num_filtered: int + result: StateSummary = None + partial_failure_warning: Optional[str] = "" + # A list of warnings to print. + warnings: Optional[List[str]] = None + + +def resource_to_schema(resource: StateResource) -> StateSchema: + if resource == StateResource.ACTORS: + return ActorState + elif resource == StateResource.JOBS: + return JobState + elif resource == StateResource.NODES: + return NodeState + elif resource == StateResource.OBJECTS: + return ObjectState + elif resource == StateResource.PLACEMENT_GROUPS: + return PlacementGroupState + elif resource == StateResource.RUNTIME_ENVS: + return RuntimeEnvState + elif resource == StateResource.TASKS: + return TaskState + elif resource == StateResource.WORKERS: + return WorkerState + elif resource == StateResource.CLUSTER_EVENTS: + return ClusterEventState + else: + assert False, "Unreachable" + + +def protobuf_message_to_dict( + message, + fields_to_decode: List[str], + preserving_proto_field_name: bool = True, +) -> dict: + """Convert a protobuf message to dict + + Args: + fields_to_decode: field names which will be decoded from binary to hex. + preserving_proto_field_name: a pass-through option for protobuf message + method. See google.protobuf MessageToDict + + Return: + Dictionary of the converted rpc protobuf. + """ + return dashboard_utils.message_to_dict( + message, + fields_to_decode, + including_default_value_fields=True, + preserving_proto_field_name=preserving_proto_field_name, + ) + + +def protobuf_to_task_state_dict(message: TaskEvents) -> dict: + """ + Convert a TaskEvents to a dic repr of `TaskState` + """ + task_attempt = protobuf_message_to_dict( + message=message, + fields_to_decode=[ + "task_id", + "job_id", + "node_id", + "actor_id", + "parent_task_id", + "worker_id", + "placement_group_id", + "component_id", + ], + ) + + task_state = {} + task_info = task_attempt.get("task_info", {}) + state_updates = task_attempt.get("state_updates", {}) + profiling_data = task_attempt.get("profile_events", {}) + if profiling_data: + for event in profiling_data["events"]: + # End/start times are recorded in ns. We convert them to ms. + event["end_time"] = int(event["end_time"]) / 1e6 + event["start_time"] = int(event["start_time"]) / 1e6 + event["extra_data"] = json.loads(event["extra_data"]) + task_state["profiling_data"] = profiling_data + + # Convert those settable fields + mappings = [ + ( + task_info, + [ + "task_id", + "name", + "actor_id", + "type", + "func_or_class_name", + "language", + "required_resources", + "runtime_env_info", + "parent_task_id", + "placement_group_id", + ], + ), + (task_attempt, ["task_id", "attempt_number", "job_id"]), + ( + state_updates, + ["node_id", "worker_id", "task_log_info", "actor_repr_name"], + ), + ] + for src, keys in mappings: + for key in keys: + task_state[key] = src.get(key) + + task_state["creation_time_ms"] = None + task_state["start_time_ms"] = None + task_state["end_time_ms"] = None + events = [] + + for state in TaskStatus.keys(): + key = f"{state.lower()}_ts" + if key in state_updates: + # timestamp is recorded as nanosecond from the backend. + # We need to convert it to the second. + ts_ms = int(state_updates[key]) // 1e6 + events.append( + { + "state": state, + "created_ms": ts_ms, + } + ) + if state == "PENDING_ARGS_AVAIL": + task_state["creation_time_ms"] = ts_ms + if state == "RUNNING": + task_state["start_time_ms"] = ts_ms + if state == "FINISHED" or state == "FAILED": + task_state["end_time_ms"] = ts_ms + + task_state["events"] = events + if len(events) > 0: + latest_state = events[-1]["state"] + else: + latest_state = "NIL" + task_state["state"] = latest_state + + # Parse error info + if latest_state == "FAILED": + error_info = state_updates.get("error_info", None) + if error_info: + # We captured colored error message printed to console, e.g. + # "\x1b[31mTraceback (most recent call last):\x1b[0m", + # this is to remove the ANSI escape codes. + task_state["error_message"] = remove_ansi_escape_codes( + error_info.get("error_message", "") + ) + task_state["error_type"] = error_info.get("error_type", "") + + # Parse actor task name for actor with repr name. + if ( + state_updates.get("actor_repr_name") + and task_state["type"] == "ACTOR_TASK" + and task_state["name"] + == task_state["func_or_class_name"] # no name option provided. + ): + # If it's an actor task with no name override, and has repr name defined + # for the actor, we override the name. + method_name = task_state["name"].split(".")[-1] + actor_repr_task_name = f"{state_updates['actor_repr_name']}.{method_name}" + task_state["name"] = actor_repr_task_name + + return task_state + + +def remove_ansi_escape_codes(text: str) -> str: + """Remove ANSI escape codes from a string.""" + import re + + return re.sub(r"\x1b[^m]*m", "", text) + + +def dict_to_state(d: Dict, state_schema: StateSchema) -> StateSchema: + """Convert a dict to a state schema. + + Args: + d: a dict to convert. + state_schema: a schema to convert to. + + Returns: + A state schema. + """ + try: + return resource_to_schema(state_schema)(**d) + except Exception as e: + raise RayStateApiException(f"Failed to convert {d} to StateSchema: {e}") from e diff --git a/python/ray/util/state/custom_types.py b/python/ray/util/state/custom_types.py new file mode 100644 index 000000000000..5f3535a27446 --- /dev/null +++ b/python/ray/util/state/custom_types.py @@ -0,0 +1,100 @@ +import sys + +from ray.core.generated.common_pb2 import ( + TaskStatus, + TaskType, + WorkerExitType, + WorkerType, +) +from ray.core.generated.gcs_pb2 import ( + ActorTableData, + GcsNodeInfo, + PlacementGroupTableData, +) +from ray.dashboard.memory_utils import ReferenceType + +if sys.version_info >= (3, 8): + from typing import Literal +else: + from typing_extensions import Literal + + +ACTOR_STATUS = [ + "DEPENDENCIES_UNREADY", + "PENDING_CREATION", + "ALIVE", + "RESTARTING", + "DEAD", +] +TypeActorStatus = Literal[tuple(ACTOR_STATUS)] +PLACEMENT_GROUP_STATUS = [ + "PENDING", + "CREATED", + "REMOVED", + "RESCHEDULING", +] +TypePlacementGroupStatus = Literal[tuple(PLACEMENT_GROUP_STATUS)] +TASK_STATUS = [ + "NIL", + "PENDING_ARGS_AVAIL", + "PENDING_NODE_ASSIGNMENT", + "PENDING_OBJ_STORE_MEM_AVAIL", + "PENDING_ARGS_FETCH", + "SUBMITTED_TO_WORKER", + "RUNNING", + "RUNNING_IN_RAY_GET", + "RUNNING_IN_RAY_WAIT", + "FINISHED", + "FAILED", +] +TypeTaskStatus = Literal[tuple(TASK_STATUS)] +NODE_STATUS = ["ALIVE", "DEAD"] +TypeNodeStatus = Literal[tuple(NODE_STATUS)] +WORKER_TYPE = [ + "WORKER", + "DRIVER", + "SPILL_WORKER", + "RESTORE_WORKER", +] +TypeWorkerType = Literal[tuple(WORKER_TYPE)] +WORKER_EXIT_TYPE = [ + "SYSTEM_ERROR", + "INTENDED_SYSTEM_EXIT", + "USER_ERROR", + "INTENDED_USER_EXIT", + "NODE_OUT_OF_MEMORY", +] +TypeWorkerExitType = Literal[tuple(WORKER_EXIT_TYPE)] +TASK_TYPE = [ + "NORMAL_TASK", + "ACTOR_CREATION_TASK", + "ACTOR_TASK", + "DRIVER_TASK", +] +TypeTaskType = Literal[tuple(TASK_TYPE)] +TypeReferenceType = Literal[ + tuple(reference_type.value for reference_type in ReferenceType) +] + + +def validate_protobuf_enum(grpc_enum, custom_enum): + """Validate the literal contains the correct enum values from protobuf""" + enum_vals = set(grpc_enum.DESCRIPTOR.values_by_name) + # Sometimes, the grpc enum is mocked, and it + # doesn't include any values in that case. + if len(enum_vals) > 0: + assert enum_vals == set(custom_enum) + + +# Do the enum validation here. +# It is necessary to avoid regression. Alternatively, we can auto generate this +# directly by protobuf. +validate_protobuf_enum(ActorTableData.ActorState, ACTOR_STATUS) +validate_protobuf_enum( + PlacementGroupTableData.PlacementGroupState, PLACEMENT_GROUP_STATUS +) +validate_protobuf_enum(TaskStatus, TASK_STATUS) +validate_protobuf_enum(GcsNodeInfo.GcsNodeState, NODE_STATUS) +validate_protobuf_enum(WorkerType, WORKER_TYPE) +validate_protobuf_enum(WorkerExitType, WORKER_EXIT_TYPE) +validate_protobuf_enum(TaskType, TASK_TYPE) diff --git a/python/ray/util/state/exception.py b/python/ray/util/state/exception.py new file mode 100644 index 000000000000..8d8a180c2c32 --- /dev/null +++ b/python/ray/util/state/exception.py @@ -0,0 +1,18 @@ +"""Internal Error""" + + +class DataSourceUnavailable(Exception): + pass + + +"""User-facing Error""" + + +class RayStateApiException(Exception): + pass + + +class ServerUnavailable(RayStateApiException): + """Thrown when failing to connect to dashboard server""" + + pass diff --git a/python/ray/util/state/state_cli.py b/python/ray/util/state/state_cli.py new file mode 100644 index 000000000000..f8c992d58b2d --- /dev/null +++ b/python/ray/util/state/state_cli.py @@ -0,0 +1,1308 @@ +import json +import logging +from datetime import datetime +from enum import Enum, unique +from typing import Dict, List, Optional, Tuple + +import click +import yaml + +import ray._private.services as services +from ray._private.thirdparty.tabulate.tabulate import tabulate +from ray.util.state import ( + StateApiClient, + get_log, + list_logs, + summarize_actors, + summarize_objects, + summarize_tasks, +) +from ray.util.state.common import ( + DEFAULT_LIMIT, + DEFAULT_LOG_LIMIT, + DEFAULT_RPC_TIMEOUT, + GetApiOptions, + ListApiOptions, + PredicateType, + StateResource, + StateSchema, + SupportedFilterType, + resource_to_schema, +) +from ray.util.state.exception import RayStateApiException +from ray.util.annotations import PublicAPI + +logger = logging.getLogger(__name__) + + +@unique +class AvailableFormat(Enum): + DEFAULT = "default" + JSON = "json" + YAML = "yaml" + TABLE = "table" + + +def _parse_filter(filter: str) -> Tuple[str, PredicateType, SupportedFilterType]: + """Parse the filter string to a tuple of key, preciate, and value.""" + # The function assumes there's going to be no key that includes "="" or "!=". + # Since key is controlled by us, it should be trivial to keep the invariant. + predicate = None + # Tuple of [predicate_start, predicate_end). + predicate_index = None + + # Find the first predicate match. This logic works because we assume the + # key doesn't contain = or !=. + for i in range(len(filter)): + char = filter[i] + if char == "=": + predicate = "=" + predicate_index = (i, i + 1) + break + elif char == "!": + if len(filter) <= i + 1: + continue + + next_char = filter[i + 1] + if next_char == "=": + predicate = "!=" + predicate_index = (i, i + 2) + break + + if not predicate or not predicate_index: + raise ValueError( + f"The format of a given filter {filter} is invalid: " + "Cannot find the predicate. " + "Please provide key=val or key!=val format string." + ) + + key, predicate, value = ( + filter[: predicate_index[0]], + filter[predicate_index[0] : predicate_index[1]], + filter[predicate_index[1] :], + ) + + assert predicate == "=" or predicate == "!=" + if len(key) == 0 or len(value) == 0: + raise ValueError( + f"The format of a given filter {filter} is invalid: " + f"Cannot identify key {key} or value, {value}. " + "Please provide key=val or key!=val format string." + ) + + return (key, predicate, value) + + +def _get_available_formats() -> List[str]: + """Return the available formats in a list of string""" + return [format_enum.value for format_enum in AvailableFormat] + + +def _get_available_resources( + excluded: Optional[List[StateResource]] = None, +) -> List[str]: + """Return the available resources in a list of string + + Args: + excluded: List of resources that should be excluded + """ + # All resource names use '_' rather than '-'. But users options have '-' + return [ + e.value.replace("_", "-") + for e in StateResource + if excluded is None or e not in excluded + ] + + +def get_table_output(state_data: List, schema: StateSchema, detail: bool) -> str: + """Display the table output. + + The table headers are ordered as the order defined in the dataclass of + `StateSchema`. For example, + + @dataclass + class A(StateSchema): + a: str + b: str + c: str + + will create headers + A B C + ----- + + Args: + state_data: A list of state data. + schema: The schema for the corresponding resource. + + Returns: + The table formatted string. + """ + time = datetime.now() + header = "=" * 8 + f" List: {time} " + "=" * 8 + headers = [] + table = [] + cols = schema.list_columns(detail=detail) + for data in state_data: + for key, val in data.items(): + if isinstance(val, dict): + data[key] = yaml.dump(val, indent=2) + keys = set(data.keys()) + headers = [] + for col in cols: + if col in keys: + headers.append(col.upper()) + table.append([data[header.lower()] for header in headers]) + return f""" +{header} +Stats: +------------------------------ +Total: {len(state_data)} + +Table: +------------------------------ +{tabulate(table, headers=headers, showindex=True, tablefmt="plain", floatfmt=".3f")} +""" + + +def output_with_format( + state_data: List[Dict], + *, + schema: Optional[StateSchema], + format: AvailableFormat = AvailableFormat.DEFAULT, + detail: bool = False, +) -> str: + # humanify all input state data + if schema: + state_data = [schema.humanify(state) for state in state_data] + if format == AvailableFormat.DEFAULT: + return get_table_output(state_data, schema, detail) + if format == AvailableFormat.YAML: + return yaml.dump( + state_data, + indent=4, + explicit_start=True, + # We want to keep the defined ordering of the states, thus sort_keys=False + sort_keys=False, + ) + elif format == AvailableFormat.JSON: + return json.dumps(state_data) + elif format == AvailableFormat.TABLE: + return get_table_output(state_data, schema, detail) + else: + raise ValueError( + f"Unexpected format: {format}. " + f"Supported formatting: {_get_available_formats()}" + ) + + +def format_summary_output(state_data: Dict, *, resource: StateResource) -> str: + if len(state_data) == 0: + return "No resource in the cluster" + + # Parse the data. + cluster_data = state_data["cluster"] + summaries = cluster_data["summary"] + summary_by = cluster_data["summary_by"] + del cluster_data["summary_by"] + del cluster_data["summary"] + + cluster_info_table = yaml.dump(cluster_data, indent=2) + + # Create a table. + table = [] + headers = [] + for summary in summaries.values(): + # Convert dict to yaml for better formatting. + for key, val in summary.items(): + if isinstance(val, dict): + summary[key] = yaml.dump(val, indent=2) + + headers = sorted([key.upper() for key in summary.keys()]) + table.append([summary[header.lower()] for header in headers]) + + summary_table = tabulate( + table, headers=headers, showindex=True, tablefmt="plain", numalign="left" + ) + + time = datetime.now() + header = "=" * 8 + f" {resource.value.capitalize()} Summary: {time} " + "=" * 8 + return f""" +{header} +Stats: +------------------------------------ +{cluster_info_table} + +Table (group by {summary_by}): +------------------------------------ +{summary_table} +""" + + +def format_object_summary_output(state_data: Dict) -> str: + if len(state_data) == 0: + return "No resource in the cluster" + + # Parse the data. + cluster_data = state_data["cluster"] + summaries = cluster_data["summary"] + summary_by = cluster_data["summary_by"] + del cluster_data["summary_by"] + del cluster_data["summary"] + + cluster_info_table = yaml.dump(cluster_data, indent=2) + + # Create a table per callsite. + tables = [] + for callsite, summary in summaries.items(): + # Convert dict to yaml for better formatting. + for key, val in summary.items(): + if isinstance(val, dict): + summary[key] = yaml.dump(val, indent=2) + + table = [] + headers = sorted([key.upper() for key in summary.keys()]) + table.append([summary[header.lower()] for header in headers]) + table_for_callsite = tabulate( + table, headers=headers, showindex=True, numalign="left" + ) + + # Format callsite. | is a separator for ray callsite. + formatted_callsite = callsite.replace("|", "\n|") + tables.append(f"{formatted_callsite}\n{table_for_callsite}") + + time = datetime.now() + header = "=" * 8 + f" Object Summary: {time} " + "=" * 8 + table_string = "\n\n\n\n".join(tables) + return f""" +{header} +Stats: +------------------------------------ +{cluster_info_table} + +Table (group by {summary_by}) +------------------------------------ +{table_string} +""" + + +def format_get_api_output( + state_data: Optional[StateSchema], + id: str, + *, + schema: StateSchema, + format: AvailableFormat = AvailableFormat.YAML, +) -> str: + if not state_data or isinstance(state_data, list) and len(state_data) == 0: + return f"Resource with id={id} not found in the cluster." + + if not isinstance(state_data, list): + state_data = [state_data] + state_data = [state.asdict() for state in state_data] + + return output_with_format(state_data, schema=schema, format=format, detail=True) + + +def format_list_api_output( + state_data: List[StateSchema], + *, + schema: StateSchema, + format: AvailableFormat = AvailableFormat.DEFAULT, + detail: bool = False, +) -> str: + if len(state_data) == 0: + return "No resource in the cluster" + state_data = [state.asdict() for state in state_data] + return output_with_format(state_data, schema=schema, format=format, detail=detail) + + +def _should_explain(format: AvailableFormat) -> bool: + # If the format is json or yaml, it should not print stats because + # users don't want additional strings. + return format == AvailableFormat.DEFAULT or format == AvailableFormat.TABLE + + +""" +Common Options for State API commands +""" +timeout_option = click.option( + "--timeout", + default=DEFAULT_RPC_TIMEOUT, + help=f"Timeout in seconds for the API requests. Default is {DEFAULT_RPC_TIMEOUT}", +) +address_option = click.option( + "--address", + default=None, + help=( + "The address of Ray API server. If not provided, it will be configured " + "automatically from querying the GCS server." + ), +) + + +@click.command() +@click.argument( + "resource", + # NOTE(rickyyx): We are not allowing query job with id, and runtime envs + type=click.Choice( + _get_available_resources( + excluded=[StateResource.JOBS, StateResource.RUNTIME_ENVS] + ) + ), +) +@click.argument( + "id", + type=str, +) +@address_option +@timeout_option +@PublicAPI(stability="stable") +def ray_get( + resource: str, + id: str, + address: Optional[str], + timeout: float, +): + """Get a state of a given resource by ID. + + We currently DO NOT support get by id for jobs and runtime-envs + + The output schema is defined at :ref:`State API Schema section. ` + + For example, the output schema of `ray get tasks ` is + :class:`~ray.util.state.common.TaskState`. + + Usage: + + Get an actor with actor id + + ``` + ray get actors + ``` + + Get a placement group information with + + ``` + ray get placement-groups + ``` + + The API queries one or more components from the cluster to obtain the data. + The returned state snapshot could be stale, and it is not guaranteed to return + the live data. + + Args: + resource: The type of the resource to query. + id: The id of the resource. + + Raises: + :class:`RayStateApiException ` + if the CLI is failed to query the data. + """ # noqa: E501 + # All resource names use '_' rather than '-'. But users options have '-' + resource = StateResource(resource.replace("-", "_")) + + # Create the State API server and put it into context + logger.debug(f"Create StateApiClient to ray instance at: {address}...") + client = StateApiClient(address=address) + options = GetApiOptions(timeout=timeout) + + # If errors occur, exceptions will be thrown. + try: + data = client.get( + resource=resource, + id=id, + options=options, + _explain=_should_explain(AvailableFormat.YAML), + ) + except RayStateApiException as e: + raise click.UsageError(str(e)) + + # Print data to console. + print( + format_get_api_output( + state_data=data, + id=id, + schema=resource_to_schema(resource), + format=AvailableFormat.YAML, + ) + ) + + +@click.command() +@click.argument( + "resource", + type=click.Choice(_get_available_resources()), +) +@click.option( + "--format", default="default", type=click.Choice(_get_available_formats()) +) +@click.option( + "-f", + "--filter", + help=( + "A key, predicate, and value to filter the result. " + "E.g., --filter 'key=value' or --filter 'key!=value'. " + "You can specify multiple --filter options. In this case all predicates " + "are concatenated as AND. For example, --filter key=value --filter key2=value " + "means (key==val) AND (key2==val2)" + ), + multiple=True, +) +@click.option( + "--limit", + default=DEFAULT_LIMIT, + type=int, + help=("Maximum number of entries to return. 100 by default."), +) +@click.option( + "--detail", + help=( + "If the flag is set, the output will contain data in more details. " + "Note that the API could query more sources " + "to obtain information in a greater detail." + ), + is_flag=True, + default=False, +) +@timeout_option +@address_option +@PublicAPI(stability="stable") +def ray_list( + resource: str, + format: str, + filter: List[str], + limit: int, + detail: bool, + timeout: float, + address: str, +): + """List all states of a given resource. + + Normally, summary APIs are recommended before listing all resources. + + The output schema is defined at :ref:`State API Schema section. ` + + For example, the output schema of `ray list tasks` is + :class:`~ray.util.state.common.TaskState`. + + Usage: + + List all actor information from the cluster. + + ``` + ray list actors + ``` + + List 50 actors from the cluster. The sorting order cannot be controlled. + + ``` + ray list actors --limit 50 + ``` + + List 10 actors with state PENDING. + + ``` + ray list actors --limit 10 --filter "state=PENDING" + ``` + + List actors with yaml format. + + ``` + ray list actors --format yaml + ``` + + List actors with details. When --detail is specified, it might query + more data sources to obtain data in details. + + ``` + ray list actors --detail + ``` + + The API queries one or more components from the cluster to obtain the data. + The returned state snapshot could be stale, and it is not guaranteed to return + the live data. + + The API can return partial or missing output upon the following scenarios. + + - When the API queries more than 1 component, if some of them fail, + the API will return the partial result (with a suppressible warning). + - When the API returns too many entries, the API + will truncate the output. Currently, truncated data cannot be + selected by users. + + Args: + resource: The type of the resource to query. + + Raises: + :class:`RayStateApiException ` + if the CLI is failed to query the data. + """ # noqa: E501 + # All resource names use '_' rather than '-'. But users options have '-' + resource = StateResource(resource.replace("-", "_")) + format = AvailableFormat(format) + + # Create the State API server and put it into context + client = StateApiClient(address=address) + + filter = [_parse_filter(f) for f in filter] + + options = ListApiOptions( + limit=limit, + timeout=timeout, + filters=filter, + detail=detail, + ) + + # If errors occur, exceptions will be thrown. Empty data indicate successful query. + try: + data = client.list( + resource, + options=options, + raise_on_missing_output=False, + _explain=_should_explain(format), + ) + except RayStateApiException as e: + raise click.UsageError(str(e)) + + # If --detail is given, the default formatting is yaml. + if detail and format == AvailableFormat.DEFAULT: + format = AvailableFormat.YAML + + # Print data to console. + print( + format_list_api_output( + state_data=data, + schema=resource_to_schema(resource), + format=format, + detail=detail, + ) + ) + + +@click.group("summary") +@click.pass_context +@PublicAPI(stability="stable") +def summary_state_cli_group(ctx): + """Return the summarized information of a given resource.""" + pass + + +@summary_state_cli_group.command(name="tasks") +@timeout_option +@address_option +@click.pass_context +@PublicAPI(stability="stable") +def task_summary(ctx, timeout: float, address: str): + """Summarize the task state of the cluster. + + By default, the output contains the information grouped by + task function names. + + The output schema is + :class:`~ray.util.state.common.TaskSummaries`. + + Raises: + :class:`RayStateApiException ` + if the CLI is failed to query the data. + """ # noqa: E501 + print( + format_summary_output( + summarize_tasks( + address=address, + timeout=timeout, + raise_on_missing_output=False, + _explain=True, + ), + resource=StateResource.TASKS, + ) + ) + + +@summary_state_cli_group.command(name="actors") +@timeout_option +@address_option +@click.pass_context +@PublicAPI(stability="stable") +def actor_summary(ctx, timeout: float, address: str): + """Summarize the actor state of the cluster. + + By default, the output contains the information grouped by + actor class names. + + The output schema is + :class:`ray.util.state.common.ActorSummaries + `. + + Raises: + :class:`RayStateApiException ` + if the CLI is failed to query the data. + """ # noqa: E501 + print( + format_summary_output( + summarize_actors( + address=address, + timeout=timeout, + raise_on_missing_output=False, + _explain=True, + ), + resource=StateResource.ACTORS, + ) + ) + + +@summary_state_cli_group.command(name="objects") +@timeout_option +@address_option +@click.pass_context +@PublicAPI(stability="stable") +def object_summary(ctx, timeout: float, address: str): + """Summarize the object state of the cluster. + + The API is recommended when debugging memory leaks. + See :ref:`Debugging with Ray Memory ` for more details. + (Note that this command is almost equivalent to `ray memory`, but it returns + easier-to-understand output). + + By default, the output contains the information grouped by + object callsite. Note that the callsite is not collected and + all data will be aggregated as "disable" callsite if the env var + `RAY_record_ref_creation_sites` is not configured. To enable the + callsite collection, set the following environment variable when + starting Ray. + + Example: + + ``` + RAY_record_ref_creation_sites=1 ray start --head + ``` + + ``` + RAY_record_ref_creation_sites=1 ray_script.py + ``` + + The output schema is + :class:`ray.util.state.common.ObjectSummaries + `. + + Raises: + :class:`RayStateApiException ` + if the CLI is failed to query the data. + """ # noqa: E501 + print( + format_object_summary_output( + summarize_objects( + address=address, + timeout=timeout, + raise_on_missing_output=False, + _explain=True, + ), + ) + ) + + +log_follow_option = click.option( + "--follow", + "-f", + required=False, + type=bool, + is_flag=True, + help="Streams the log file as it is updated instead of just tailing.", +) + +log_tail_option = click.option( + "--tail", + required=False, + type=int, + default=DEFAULT_LOG_LIMIT, + help="Number of lines to tail from log. Use -1 to fetch the whole file.", +) + +log_interval_option = click.option( + "--interval", + required=False, + type=float, + default=None, + help="The interval in secs to print new logs when `--follow` is specified.", + hidden=True, +) + +log_timeout_option = click.option( + "--timeout", + default=DEFAULT_RPC_TIMEOUT, + help=( + "Timeout in seconds for the API requests. " + f"Default is {DEFAULT_RPC_TIMEOUT}. If --follow is specified, " + "this option will be ignored." + ), +) + +log_node_ip_option = click.option( + "-ip", + "--node-ip", + required=False, + type=str, + default=None, + help="Filters the logs by this ip address", +) + +log_node_id_option = click.option( + "--node-id", + "-id", + required=False, + type=str, + default=None, + help="Filters the logs by this NodeID", +) + +log_suffix_option = click.option( + "--err", + is_flag=True, + default=False, + help=( + "If supplied, querying stderr files for workers/actors, " + "else defaults to stdout files." + ), +) + +log_encoding_option = click.option( + "--encoding", + required=False, + default="utf-8", + help=( + "The encoding use to decode the log file. Accepts any encoding " + "supported by Python's `codecs` module. Defaults to utf-8." + ), +) + +log_encoding_errors_option = click.option( + "--encoding-errors", + required=False, + default="strict", + help=( + "The error handling scheme to use for decoding errors. " + "Accepts any error handling scheme supported by Python's `codecs`" + "module. Defaults to strict." + ), +) + + +def _get_head_node_ip(address: Optional[str] = None): + """Get the head node ip from the ray address if possible + + Args: + address: ray cluster address, e.g. "auto", "localhost:6379" + + Raises: + click.UsageError if node ip could not be resolved + """ + try: + address = services.canonicalize_bootstrap_address_or_die(address) + return address.split(":")[0] + except (ConnectionError, ValueError) as e: + # Hide all the stack trace + raise click.UsageError(str(e)) + + +def _print_log( + address: Optional[str] = None, + node_id: Optional[str] = None, + node_ip: Optional[str] = None, + filename: Optional[str] = None, + actor_id: Optional[str] = None, + pid: Optional[int] = None, + follow: bool = False, + tail: int = DEFAULT_LOG_LIMIT, + timeout: int = DEFAULT_RPC_TIMEOUT, + interval: Optional[float] = None, + suffix: str = "out", + encoding: str = "utf-8", + encoding_errors: str = "strict", + task_id: Optional[str] = None, + attempt_number: int = 0, + submission_id: Optional[str] = None, +): + """Wrapper around `get_log()` that prints the preamble and the log lines""" + if tail > 0: + print( + f"--- Log has been truncated to last {tail} lines." + " Use `--tail` flag to toggle. Set to -1 for getting the entire file. ---\n" + ) + + if node_id is None and node_ip is None: + # Auto detect node ip from the ray address when address neither is given + node_ip = _get_head_node_ip(address) + + for chunk in get_log( + address=address, + node_id=node_id, + node_ip=node_ip, + filename=filename, + actor_id=actor_id, + tail=tail, + pid=pid, + follow=follow, + _interval=interval, + timeout=timeout, + suffix=suffix, + encoding=encoding, + errors=encoding_errors, + task_id=task_id, + attempt_number=attempt_number, + submission_id=submission_id, + ): + print(chunk, end="", flush=True) + + +LOG_CLI_HELP_MSG = """ +Get logs based on filename (cluster) or resource identifiers (actor) + +Example: + + Get all the log files available on a node (ray address could be + obtained from `ray start --head` or `ray.init()`). + + ``` + ray logs cluster + ``` + + [ray logs cluster] Print the last 500 lines of raylet.out on a head node. + + ``` + ray logs cluster raylet.out --tail 500 + ``` + + Or simply, using `ray logs` as an alias for `ray logs cluster`: + + ``` + ray logs raylet.out --tail 500 + ``` + + Print the last 500 lines of raylet.out on a worker node id A. + + ``` + ray logs raylet.out --tail 500 —-node-id A + ``` + + [ray logs actor] Follow the log file with an actor id ABC. + + ``` + ray logs actor --id ABC --follow + ``` + + [ray logs task] Get the std err generated by a task. + + ``` + ray logs task --id --err + ``` +""" + + +class LogCommandGroup(click.Group): + def resolve_command(self, ctx, args): + """Try resolve the command line args assuming users omitted the subcommand. + + This overrides the default `resolve_command` for the parent class. + This will allow command alias of `ray ` to `ray cluster `. + """ + ctx.resilient_parsing = True + res = super().resolve_command(ctx, args) + cmd_name, cmd, parsed_args = res + if cmd is None: + # It could have been `ray logs ...`, forward to `ray logs cluster ...` + return super().resolve_command(ctx, ["cluster"] + args) + return cmd_name, cmd, parsed_args + + +logs_state_cli_group = LogCommandGroup(help=LOG_CLI_HELP_MSG) + + +@logs_state_cli_group.command(name="cluster") +@click.argument( + "glob_filter", + required=False, + default="*", +) +@address_option +@log_node_id_option +@log_node_ip_option +@log_follow_option +@log_tail_option +@log_interval_option +@log_timeout_option +@log_encoding_option +@log_encoding_errors_option +@click.pass_context +@PublicAPI(stability="stable") +def log_cluster( + ctx, + glob_filter: str, + address: Optional[str], + node_id: Optional[str], + node_ip: Optional[str], + follow: bool, + tail: int, + interval: float, + timeout: int, + encoding: str, + encoding_errors: str, +): + """Get/List logs that matches the GLOB_FILTER in the cluster. + By default, it prints a list of log files that match the filter. + By default, it prints the head node logs. + If there's only 1 match, it will print the log file. + + Example: + + Print the last 500 lines of raylet.out on a head node. + + ``` + ray logs [cluster] raylet.out --tail 500 + ``` + + Print the last 500 lines of raylet.out on a worker node id A. + + ``` + ray logs [cluster] raylet.out --tail 500 —-node-id A + ``` + + Download the gcs_server.txt file to the local machine. + + ``` + ray logs [cluster] gcs_server.out --tail -1 > gcs_server.txt + ``` + + Follow the log files from the last 100 lines. + + ``` + ray logs [cluster] raylet.out --tail 100 -f + ``` + + Raises: + :class:`RayStateApiException ` if the CLI + is failed to query the data. + """ # noqa: E501 + + if node_id is None and node_ip is None: + node_ip = _get_head_node_ip(address) + + logs = list_logs( + address=address, + node_id=node_id, + node_ip=node_ip, + glob_filter=glob_filter, + timeout=timeout, + ) + + log_files_found = [] + for _, log_files in logs.items(): + for log_file in log_files: + log_files_found.append(log_file) + + if len(log_files_found) != 1: + # Print the list of log files found if no unique log found + if node_id: + print(f"Node ID: {node_id}") + elif node_ip: + print(f"Node IP: {node_ip}") + print(output_with_format(logs, schema=None, format=AvailableFormat.YAML)) + return + + # If there's only 1 file, that means there's a unique match. + filename = log_files_found[0] + + _print_log( + address=address, + node_id=node_id, + node_ip=node_ip, + filename=filename, + tail=tail, + follow=follow, + interval=interval, + timeout=timeout, + encoding=encoding, + encoding_errors=encoding_errors, + ) + + +@logs_state_cli_group.command(name="actor") +@click.option( + "--id", + "-a", + required=False, + type=str, + default=None, + help="Retrieves the logs corresponding to this ActorID.", +) +@click.option( + "--pid", + "-pid", + required=False, + type=str, + default=None, + help="Retrieves the logs from the actor with this pid.", +) +@address_option +@log_node_id_option +@log_node_ip_option +@log_follow_option +@log_tail_option +@log_interval_option +@log_timeout_option +@log_suffix_option +@click.pass_context +@PublicAPI(stability="stable") +def log_actor( + ctx, + id: Optional[str], + pid: Optional[str], + address: Optional[str], + node_id: Optional[str], + node_ip: Optional[str], + follow: bool, + tail: int, + interval: float, + timeout: int, + err: bool, +): + """Get/List logs associated with an actor. + + Example: + + Follow the log file with an actor id ABCDEFG. + + ``` + ray logs actor --id ABCDEFG --follow + ``` + + Get the actor log from pid 123, ip x.x.x.x + Note that this goes well with the driver log of Ray which prints + (ip=x.x.x.x, pid=123, class_name) logs. + + ``` + ray logs actor --pid=123 —ip=x.x.x.x + ``` + + Get the actor err log file. + + ``` + ray logs actor --id ABCDEFG --err + ``` + + Raises: + :class:`RayStateApiException ` + if the CLI is failed to query the data. + MissingParameter if inputs are missing. + """ # noqa: E501 + + if pid is None and id is None: + raise click.MissingParameter( + message="At least one of `--pid` and `--id` has to be set", + param_type="option", + ) + + _print_log( + address=address, + node_id=node_id, + node_ip=node_ip, + pid=pid, + actor_id=id, + tail=tail, + follow=follow, + interval=interval, + timeout=timeout, + suffix="err" if err else "out", + ) + + +@logs_state_cli_group.command(name="worker") +@click.option( + "--pid", + "-pid", + # The only identifier supported for now, TODO(rickyx): add worker id support + required=True, + type=str, + help="Retrieves the logs from the worker with this pid.", +) +@address_option +@log_node_id_option +@log_node_ip_option +@log_follow_option +@log_tail_option +@log_interval_option +@log_timeout_option +@log_suffix_option +@click.pass_context +@PublicAPI(stability="stable") +def log_worker( + ctx, + pid: Optional[str], + address: Optional[str], + node_id: Optional[str], + node_ip: Optional[str], + follow: bool, + tail: int, + interval: float, + timeout: int, + err: bool, +): + """Get logs associated with a worker process. + + Example: + + Follow the log file from a worker process with pid=123 + + ``` + ray logs worker --pid 123 --follow + ``` + + Get the stderr logs from a worker process. + + ``` + ray logs worker --pid 123 --err + ``` + + Raises: + :class:`RayStateApiException ` + if the CLI is failed to query the data. + MissingParameter if inputs are missing. + """ # noqa: E501 + + _print_log( + address=address, + node_id=node_id, + node_ip=node_ip, + pid=pid, + tail=tail, + follow=follow, + interval=interval, + timeout=timeout, + suffix="err" if err else "out", + ) + + +@logs_state_cli_group.command(name="job") +@click.option( + "--id", + "submission_id", + required=True, + type=str, + help=( + "Retrieves the logs from a submission job with submission id," + "i.e. raysubmit_XXX" + ), +) +@address_option +@log_follow_option +@log_tail_option +@log_interval_option +@log_timeout_option +@click.pass_context +@PublicAPI(stability="stable") +def log_job( + ctx, + submission_id: Optional[str], + address: Optional[str], + follow: bool, + tail: int, + interval: float, + timeout: int, +): + """Get logs associated with a submission job. + + Example: + + Follow the log file from a submission job with submission id raysumbit_xxx. + + ``` + ray logs job --id raysubmit_xxx + ``` + + Follow the submission job log. + + ``` + ray logs jobs --id raysubmit_xxx --follow + + ``` + + Raises: + :class:`RayStateApiException ` + if the CLI is failed to query the data. + MissingParameter if inputs are missing. + """ # noqa: E501 + + _print_log( + address=address, + tail=tail, + follow=follow, + interval=interval, + timeout=timeout, + submission_id=submission_id, + ) + + +@logs_state_cli_group.command(name="task") +@click.option( + "--id", + "task_id", + required=True, + type=str, + help="Retrieves the logs from the task with this task id.", +) +@click.option( + "--attempt-number", + "-a", + required=False, + type=int, + default=0, + help="Retrieves the logs from the attempt, default to 0", +) +@address_option +@log_follow_option +@log_interval_option +@log_tail_option +@log_timeout_option +@log_suffix_option +@click.pass_context +@PublicAPI(stability="stable") +def log_task( + ctx, + task_id: Optional[str], + attempt_number: int, + address: Optional[str], + follow: bool, + interval: float, + tail: int, + timeout: int, + err: bool, +): + """Get logs associated with a task. + + Example: + + Follow the log file from a task with task id = ABCDEFG + + ``` + ray logs tasks --id ABCDEFG --follow + ``` + + Get the log from a retry attempt 1 from a task. + + ``` + ray logs tasks --id ABCDEFG -a 1 + ``` + + Raises: + :class:`RayStateApiException ` + if the CLI is failed to query the data. + MissingParameter if inputs are missing. + """ # noqa: E501 + + _print_log( + address=address, + task_id=task_id, + attempt_number=attempt_number, + follow=follow, + tail=tail, + interval=interval, + timeout=timeout, + suffix="err" if err else "out", + ) diff --git a/python/ray/util/state/state_manager.py b/python/ray/util/state/state_manager.py new file mode 100644 index 000000000000..5617e8ea9e14 --- /dev/null +++ b/python/ray/util/state/state_manager.py @@ -0,0 +1,457 @@ +import dataclasses +import inspect +import logging +from collections import defaultdict +from functools import wraps +from typing import List, Optional, Tuple + +import grpc +from grpc.aio._call import UnaryStreamCall + +import ray +import ray.dashboard.modules.log.log_consts as log_consts +from ray._private import ray_constants +from ray._private.gcs_utils import GcsAioClient +from ray._private.utils import hex_to_binary +from ray._raylet import ActorID, JobID, TaskID +from ray.core.generated import gcs_service_pb2_grpc +from ray.core.generated.gcs_pb2 import ActorTableData +from ray.core.generated.gcs_service_pb2 import ( + GetAllActorInfoReply, + GetAllActorInfoRequest, + GetAllNodeInfoReply, + GetAllNodeInfoRequest, + GetAllPlacementGroupReply, + GetAllPlacementGroupRequest, + GetAllWorkerInfoReply, + GetAllWorkerInfoRequest, + GetTaskEventsReply, + GetTaskEventsRequest, +) +from ray.core.generated.node_manager_pb2 import ( + GetObjectsInfoReply, + GetObjectsInfoRequest, + GetTasksInfoReply, + GetTasksInfoRequest, +) +from ray.core.generated.node_manager_pb2_grpc import NodeManagerServiceStub +from ray.core.generated.reporter_pb2 import ( + ListLogsReply, + ListLogsRequest, + StreamLogRequest, +) +from ray.core.generated.reporter_pb2_grpc import LogServiceStub +from ray.core.generated.runtime_env_agent_pb2 import ( + GetRuntimeEnvsInfoReply, + GetRuntimeEnvsInfoRequest, +) +from ray.core.generated.runtime_env_agent_pb2_grpc import RuntimeEnvServiceStub +from ray.dashboard.datacenter import DataSource +from ray.dashboard.modules.job.common import JobInfoStorageClient +from ray.dashboard.modules.job.pydantic_models import JobDetails, JobType +from ray.dashboard.modules.job.utils import get_driver_jobs +from ray.dashboard.utils import Dict as Dictionary +from ray.util.state.common import ( + RAY_MAX_LIMIT_FROM_DATA_SOURCE, + PredicateType, + SupportedFilterType, +) +from ray.util.state.exception import DataSourceUnavailable + +logger = logging.getLogger(__name__) + +_STATE_MANAGER_GRPC_OPTIONS = [ + *ray_constants.GLOBAL_GRPC_OPTIONS, + ("grpc.max_send_message_length", ray_constants.GRPC_CPP_MAX_MESSAGE_SIZE), + ("grpc.max_receive_message_length", ray_constants.GRPC_CPP_MAX_MESSAGE_SIZE), +] + + +def handle_grpc_network_errors(func): + """Decorator to add a network handling logic. + + It is a helper method for `StateDataSourceClient`. + The method can only be used for async methods. + """ + assert inspect.iscoroutinefunction(func) + + @wraps(func) + async def api_with_network_error_handler(*args, **kwargs): + """Apply the network error handling logic to each APIs, + such as retry or exception policies. + + Returns: + If RPC succeeds, it returns what the original function returns. + If RPC fails, it raises exceptions. + Exceptions: + DataSourceUnavailable: if the source is unavailable because it is down + or there's a slow network issue causing timeout. + Otherwise, the raw network exceptions (e.g., gRPC) will be raised. + """ + try: + return await func(*args, **kwargs) + except grpc.aio.AioRpcError as e: + if ( + e.code() == grpc.StatusCode.DEADLINE_EXCEEDED + or e.code() == grpc.StatusCode.UNAVAILABLE + ): + raise DataSourceUnavailable( + "Failed to query the data source. " + "It is either there's a network issue, or the source is down." + ) + else: + logger.exception(e) + raise e + + return api_with_network_error_handler + + +class IdToIpMap: + def __init__(self): + # Node IP to node ID mapping. + self._ip_to_node_id = defaultdict(str) + # Node ID to node IP mapping. + self._node_id_to_ip = defaultdict(str) + + def put(self, node_id: str, address: str): + self._ip_to_node_id[address] = node_id + self._node_id_to_ip[node_id] = address + + def get_ip(self, node_id: str): + return self._node_id_to_ip.get(node_id) + + def get_node_id(self, address: str): + return self._ip_to_node_id.get(address) + + def pop(self, node_id: str): + """Pop the given node id. + + Returns: + False if the corresponding node id doesn't exist. + True if it pops correctly. + """ + ip = self._node_id_to_ip.get(node_id) + if not ip: + return None + assert ip in self._ip_to_node_id + self._node_id_to_ip.pop(node_id) + self._ip_to_node_id.pop(ip) + return True + + +class StateDataSourceClient: + """The client to query states from various data sources such as Raylet, GCS, Agents. + + Note that it doesn't directly query core workers. They are proxied through raylets. + + The module is not in charge of service discovery. The caller is responsible for + finding services and register stubs through `register*` APIs. + + Non `register*` APIs + - Return the protobuf directly if it succeeds to query the source. + - Raises an exception if there's any network issue. + - throw a ValueError if it cannot find the source. + """ + + def __init__(self, gcs_channel: grpc.aio.Channel, gcs_aio_client: GcsAioClient): + self.register_gcs_client(gcs_channel) + self._raylet_stubs = {} + self._runtime_env_agent_stub = {} + self._log_agent_stub = {} + self._job_client = JobInfoStorageClient(gcs_aio_client) + self._id_id_map = IdToIpMap() + self._gcs_aio_client = gcs_aio_client + + def register_gcs_client(self, gcs_channel: grpc.aio.Channel): + self._gcs_actor_info_stub = gcs_service_pb2_grpc.ActorInfoGcsServiceStub( + gcs_channel + ) + self._gcs_pg_info_stub = gcs_service_pb2_grpc.PlacementGroupInfoGcsServiceStub( + gcs_channel + ) + self._gcs_node_info_stub = gcs_service_pb2_grpc.NodeInfoGcsServiceStub( + gcs_channel + ) + self._gcs_worker_info_stub = gcs_service_pb2_grpc.WorkerInfoGcsServiceStub( + gcs_channel + ) + self._gcs_task_info_stub = gcs_service_pb2_grpc.TaskInfoGcsServiceStub( + gcs_channel + ) + + def register_raylet_client(self, node_id: str, address: str, port: int): + full_addr = f"{address}:{port}" + options = _STATE_MANAGER_GRPC_OPTIONS + channel = ray._private.utils.init_grpc_channel( + full_addr, options, asynchronous=True + ) + self._raylet_stubs[node_id] = NodeManagerServiceStub(channel) + self._id_id_map.put(node_id, address) + + def unregister_raylet_client(self, node_id: str): + self._raylet_stubs.pop(node_id) + self._id_id_map.pop(node_id) + + def register_agent_client(self, node_id, address: str, port: int): + options = _STATE_MANAGER_GRPC_OPTIONS + channel = ray._private.utils.init_grpc_channel( + f"{address}:{port}", options=options, asynchronous=True + ) + self._runtime_env_agent_stub[node_id] = RuntimeEnvServiceStub(channel) + self._log_agent_stub[node_id] = LogServiceStub(channel) + self._id_id_map.put(node_id, address) + + def unregister_agent_client(self, node_id: str): + self._runtime_env_agent_stub.pop(node_id) + self._log_agent_stub.pop(node_id) + self._id_id_map.pop(node_id) + + def get_all_registered_raylet_ids(self) -> List[str]: + return self._raylet_stubs.keys() + + def get_all_registered_agent_ids(self) -> List[str]: + assert len(self._log_agent_stub) == len(self._runtime_env_agent_stub) + return self._runtime_env_agent_stub.keys() + + def ip_to_node_id(self, ip: Optional[str]) -> Optional[str]: + """Return the node id that corresponds to the given ip. + + Args: + ip: The ip address. + + Returns: + None if the corresponding id doesn't exist. + Node id otherwise. If None node_ip is given, + it will also return None. + """ + if not ip: + return None + return self._id_id_map.get_node_id(ip) + + @handle_grpc_network_errors + async def get_all_actor_info( + self, + timeout: int = None, + limit: int = None, + filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, + ) -> Optional[GetAllActorInfoReply]: + if not limit: + limit = RAY_MAX_LIMIT_FROM_DATA_SOURCE + if filters is None: + filters = [] + + req_filters = GetAllActorInfoRequest.Filters() + for filter in filters: + key, predicate, value = filter + if predicate != "=": + # We only support EQUAL predicate for source side filtering. + continue + if key == "actor_id": + req_filters.actor_id = ActorID(hex_to_binary(value)).binary() + elif key == "state": + if value not in ActorTableData.ActorState.keys(): + raise ValueError(f"Invalid actor state for filtering: {value}") + req_filters.state = ActorTableData.ActorState.Value(value) + elif key == "job_id": + req_filters.job_id = JobID(hex_to_binary(value)).binary() + + request = GetAllActorInfoRequest(limit=limit, filters=req_filters) + reply = await self._gcs_actor_info_stub.GetAllActorInfo( + request, timeout=timeout + ) + return reply + + @handle_grpc_network_errors + async def get_all_task_info( + self, + timeout: int = None, + limit: int = None, + filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None, + exclude_driver: bool = False, + ) -> Optional[GetTaskEventsReply]: + if not limit: + limit = RAY_MAX_LIMIT_FROM_DATA_SOURCE + + if filters is None: + filters = [] + + req_filters = GetTaskEventsRequest.Filters() + for filter in filters: + key, predicate, value = filter + if predicate != "=": + # We only support EQUAL predicate for source side filtering. + continue + + if key == "actor_id": + req_filters.actor_id = ActorID(hex_to_binary(value)).binary() + elif key == "job_id": + req_filters.job_id = JobID(hex_to_binary(value)).binary() + elif key == "name": + req_filters.name = value + elif key == "task_id": + req_filters.task_ids.append(TaskID(hex_to_binary(value)).binary()) + else: + continue + + # Remove the filter from the list so that we don't have to + # filter it again later. + filters.remove(filter) + + req_filters.exclude_driver = exclude_driver + + request = GetTaskEventsRequest(limit=limit, filters=req_filters) + reply = await self._gcs_task_info_stub.GetTaskEvents(request, timeout=timeout) + return reply + + @handle_grpc_network_errors + async def get_all_placement_group_info( + self, timeout: int = None, limit: int = None + ) -> Optional[GetAllPlacementGroupReply]: + if not limit: + limit = RAY_MAX_LIMIT_FROM_DATA_SOURCE + + request = GetAllPlacementGroupRequest(limit=limit) + reply = await self._gcs_pg_info_stub.GetAllPlacementGroup( + request, timeout=timeout + ) + return reply + + @handle_grpc_network_errors + async def get_all_node_info( + self, timeout: int = None + ) -> Optional[GetAllNodeInfoReply]: + request = GetAllNodeInfoRequest() + reply = await self._gcs_node_info_stub.GetAllNodeInfo(request, timeout=timeout) + return reply + + @handle_grpc_network_errors + async def get_all_worker_info( + self, timeout: int = None, limit: int = None + ) -> Optional[GetAllWorkerInfoReply]: + if not limit: + limit = RAY_MAX_LIMIT_FROM_DATA_SOURCE + + request = GetAllWorkerInfoRequest(limit=limit) + reply = await self._gcs_worker_info_stub.GetAllWorkerInfo( + request, timeout=timeout + ) + return reply + + # TODO(rickyx): + # This is currently mirroring dashboard/modules/job/job_head.py::list_jobs + # We should eventually unify the logic. + async def get_job_info(self, timeout: int = None) -> List[JobDetails]: + # Cannot use @handle_grpc_network_errors because async def is not supported yet. + + driver_jobs, submission_job_drivers = await get_driver_jobs( + self._gcs_aio_client, timeout=timeout + ) + submission_jobs = await self._job_client.get_all_jobs(timeout=timeout) + submission_jobs = [ + JobDetails( + **dataclasses.asdict(job), + submission_id=submission_id, + job_id=submission_job_drivers.get(submission_id).id + if submission_id in submission_job_drivers + else None, + driver_info=submission_job_drivers.get(submission_id), + type=JobType.SUBMISSION, + ) + for submission_id, job in submission_jobs.items() + ] + + return list(driver_jobs.values()) + submission_jobs + + async def get_all_cluster_events(self) -> Dictionary: + return DataSource.events + + @handle_grpc_network_errors + async def get_task_info( + self, node_id: str, timeout: int = None, limit: int = None + ) -> Optional[GetTasksInfoReply]: + if not limit: + limit = RAY_MAX_LIMIT_FROM_DATA_SOURCE + stub = self._raylet_stubs.get(node_id) + if not stub: + raise ValueError(f"Raylet for a node id, {node_id} doesn't exist.") + + reply = await stub.GetTasksInfo( + GetTasksInfoRequest(limit=limit), timeout=timeout + ) + return reply + + @handle_grpc_network_errors + async def get_object_info( + self, node_id: str, timeout: int = None, limit: int = None + ) -> Optional[GetObjectsInfoReply]: + if not limit: + limit = RAY_MAX_LIMIT_FROM_DATA_SOURCE + + stub = self._raylet_stubs.get(node_id) + if not stub: + raise ValueError(f"Raylet for a node id, {node_id} doesn't exist.") + + reply = await stub.GetObjectsInfo( + GetObjectsInfoRequest(limit=limit), + timeout=timeout, + ) + return reply + + @handle_grpc_network_errors + async def get_runtime_envs_info( + self, node_id: str, timeout: int = None, limit: int = None + ) -> Optional[GetRuntimeEnvsInfoReply]: + if not limit: + limit = RAY_MAX_LIMIT_FROM_DATA_SOURCE + + stub = self._runtime_env_agent_stub.get(node_id) + if not stub: + raise ValueError(f"Agent for a node id, {node_id} doesn't exist.") + + reply = await stub.GetRuntimeEnvsInfo( + GetRuntimeEnvsInfoRequest(limit=limit), + timeout=timeout, + ) + return reply + + @handle_grpc_network_errors + async def list_logs( + self, node_id: str, glob_filter: str, timeout: int = None + ) -> ListLogsReply: + stub = self._log_agent_stub.get(node_id) + if not stub: + raise ValueError(f"Agent for node id: {node_id} doesn't exist.") + return await stub.ListLogs( + ListLogsRequest(glob_filter=glob_filter), timeout=timeout + ) + + @handle_grpc_network_errors + async def stream_log( + self, + node_id: str, + log_file_name: str, + keep_alive: bool, + lines: int, + interval: Optional[float], + timeout: int, + task_id: Optional[str] = None, + attempt_number: Optional[int] = None, + ) -> UnaryStreamCall: + stub = self._log_agent_stub.get(node_id) + if not stub: + raise ValueError(f"Agent for node id: {node_id} doesn't exist.") + stream = stub.StreamLog( + StreamLogRequest( + keep_alive=keep_alive, + log_file_name=log_file_name, + lines=lines, + interval=interval, + task_id=task_id, + attempt_number=attempt_number, + ), + timeout=timeout, + ) + metadata = await stream.initial_metadata() + if metadata.get(log_consts.LOG_GRPC_ERROR) == log_consts.FILE_NOT_FOUND: + raise ValueError(f'File "{log_file_name}" not found on node {node_id}') + return stream diff --git a/python/ray/util/state/util.py b/python/ray/util/state/util.py new file mode 100644 index 000000000000..16a5221e458f --- /dev/null +++ b/python/ray/util/state/util.py @@ -0,0 +1,61 @@ +from typing import Optional, Union + + +def convert_string_to_type( + val: Optional[Union[str, int, float, bool]], convert_type: Union[int, float, bool] +) -> Union[int, float, bool]: + """Convert the given value to a convert type. + + If the given val is None, it will just return None without the conversion. + + It supports, + str -> int/float/bool + int -> int + bool -> bool + float -> float + """ + if val is None: + return None + elif type(val) is convert_type: + return val + elif convert_type is int: + try: + val = int(val) + except ValueError: + raise ValueError( + f"Failed to convert a value {val} of type {type(val)} to {convert_type}" + ) + elif convert_type is float: + try: + val = float(val) + except ValueError: + raise ValueError( + f"Failed to convert a value {val} of type {type(val)} to {convert_type}" + ) + elif convert_type is bool: + # Without this, "False" will become True. + if val == "False" or val == "false" or val == "0": + val = False + elif val == "True" or val == "true" or val == "1": + val = True + else: + raise ValueError( + f"Failed to convert a value {val} of type {type(val)} to {convert_type}" + ) + else: + assert False, f"Unsupported convert type {convert_type}" + return val + + +def record_deprecated_state_api_import(): + import warnings + from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag + + warnings.warn( + "Ray state API is no longer experimental. Please import from `ray.util.state`. " + "instead. Importing from `ray.experimental` will be deprecated in " + "future releases. ", + DeprecationWarning, + ) + + record_extra_usage_tag(TagKey.EXPERIMENTAL_STATE_API_IMPORT, "1") diff --git a/release/benchmarks/distributed/dashboard_test.py b/release/benchmarks/distributed/dashboard_test.py index 36fb815b9acb..4fdde6973829 100644 --- a/release/benchmarks/distributed/dashboard_test.py +++ b/release/benchmarks/distributed/dashboard_test.py @@ -8,7 +8,7 @@ import logging from collections import defaultdict -from ray.experimental.state.api import list_nodes +from ray.util.state import list_nodes from ray._private.test_utils import fetch_prometheus_metrics from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy from pydantic import BaseModel diff --git a/release/benchmarks/distributed/test_many_tasks.py b/release/benchmarks/distributed/test_many_tasks.py index bb7639700534..8a6864a28a08 100644 --- a/release/benchmarks/distributed/test_many_tasks.py +++ b/release/benchmarks/distributed/test_many_tasks.py @@ -4,7 +4,7 @@ import time import tqdm -from ray.experimental.state.api import summarize_tasks +from ray.util.state import summarize_tasks from dashboard_test import DashboardTestAtScale from ray._private.state_api_test_utils import ( StateAPICallSpec, diff --git a/release/nightly_tests/stress_tests/test_state_api_scale.py b/release/nightly_tests/stress_tests/test_state_api_scale.py index 780d9f19fa01..b50c008c8611 100644 --- a/release/nightly_tests/stress_tests/test_state_api_scale.py +++ b/release/nightly_tests/stress_tests/test_state_api_scale.py @@ -15,7 +15,7 @@ import time import os -from ray.experimental.state.api import ( +from ray.util.state import ( get_log, list_actors, list_objects, diff --git a/release/nightly_tests/stress_tests/test_state_api_with_other_tests.py b/release/nightly_tests/stress_tests/test_state_api_with_other_tests.py index be8c396a7df3..b5367d7712f8 100644 --- a/release/nightly_tests/stress_tests/test_state_api_with_other_tests.py +++ b/release/nightly_tests/stress_tests/test_state_api_with_other_tests.py @@ -6,7 +6,7 @@ import ray -from ray.experimental.state.api import ( +from ray.util.state import ( list_actors, list_nodes, list_objects, @@ -85,7 +85,6 @@ def run_release_test_in_subprocess(test_file: str, args: List[str]) -> bool: def run_test(test_name: str, test_args: List[str]): - monitor_actor = test_utils.monitor_memory_usage() start = time.perf_counter() @@ -110,7 +109,6 @@ def run_test_with_state_api( call_interval_s: int = 3, print_interval_s: int = 15, ) -> Dict: - start_time = time.perf_counter() # Stage 1: Run with state APIs @@ -175,7 +173,6 @@ def test( test_args, call_interval_s, ): - # Set up state API calling methods def not_none(res): return res is not None diff --git a/rllib/algorithms/tests/test_worker_failures.py b/rllib/algorithms/tests/test_worker_failures.py index e522b5b11439..bb6b2f9d4c26 100644 --- a/rllib/algorithms/tests/test_worker_failures.py +++ b/rllib/algorithms/tests/test_worker_failures.py @@ -5,7 +5,7 @@ import unittest import ray -from ray.experimental.state.api import list_actors +from ray.util.state import list_actors from ray.rllib.algorithms.algorithm_config import AlgorithmConfig from ray.rllib.algorithms.a3c import A3CConfig from ray.rllib.algorithms.apex_dqn import ApexDQNConfig diff --git a/rllib/tests/test_node_failure.py b/rllib/tests/test_node_failure.py index ab1671b66704..3787eaa62a52 100644 --- a/rllib/tests/test_node_failure.py +++ b/rllib/tests/test_node_failure.py @@ -5,7 +5,7 @@ import ray from ray._private.test_utils import get_other_nodes from ray.cluster_utils import Cluster -from ray.experimental.state.api import list_actors +from ray.util.state import list_actors from ray.rllib.algorithms.ppo import PPO, PPOConfig diff --git a/rllib/utils/tests/test_actor_manager.py b/rllib/utils/tests/test_actor_manager.py index f65f925d0508..cf11efde0d00 100644 --- a/rllib/utils/tests/test_actor_manager.py +++ b/rllib/utils/tests/test_actor_manager.py @@ -7,7 +7,7 @@ import unittest import ray -from ray.experimental.state.api import list_actors +from ray.util.state import list_actors from ray.rllib.utils.actor_manager import FaultAwareApply, FaultTolerantActorManager diff --git a/src/ray/protobuf/usage.proto b/src/ray/protobuf/usage.proto index 0f212efdcad6..fbbe4bf0afab 100644 --- a/src/ray/protobuf/usage.proto +++ b/src/ray/protobuf/usage.proto @@ -114,6 +114,8 @@ enum TagKey { NUM_ACTOR_TASKS = 306; NUM_NORMAL_TASKS = 307; NUM_DRIVERS = 308; + // State api import usage. + EXPERIMENTAL_STATE_API_IMPORT = 309; // Data // Logical operators, stored in JSON format with operator name and count. From 335c145bec942680dcf7463350cb9502a499689c Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Tue, 16 May 2023 15:34:36 -0700 Subject: [PATCH 422/424] [data] Improve map batches error message for strict mode migration (#35368) --- doc/source/data/data.rst | 8 +++----- python/ray/data/_internal/planner/map_batches.py | 13 +++++++++++-- python/ray/data/block.py | 2 +- 3 files changed, 15 insertions(+), 8 deletions(-) diff --git a/doc/source/data/data.rst b/doc/source/data/data.rst index e93cdaae90aa..95624773d7d2 100644 --- a/doc/source/data/data.rst +++ b/doc/source/data/data.rst @@ -8,16 +8,14 @@ Ray Data: Scalable Datasets for ML .. _data-intro: -Ray Data scales common ML data processing patterns that arise in batch inference -and distributed training applications. These problems occur when it becomes necessary to -combine data preprocessing and model computations in the same job. Ray Data does this by providing +Ray Data scales common ML data processing patterns in batch inference +and distributed training applications. Ray Data does this by providing streaming distributed transformations such as maps (:meth:`map_batches `), global and grouped aggregations (:class:`GroupedData `), and shuffling operations (:meth:`random_shuffle `, :meth:`sort `, -:meth:`repartition `), -and is compatible with a variety of file formats, data sources, and distributed frameworks. +:meth:`repartition `). Read on for an overview of the main use cases and operations supported by Ray Data. diff --git a/python/ray/data/_internal/planner/map_batches.py b/python/ray/data/_internal/planner/map_batches.py index e661c07d89bf..d404b5a59a57 100644 --- a/python/ray/data/_internal/planner/map_batches.py +++ b/python/ray/data/_internal/planner/map_batches.py @@ -7,7 +7,7 @@ from ray.data._internal.output_buffer import BlockOutputBuffer from ray.data._internal.numpy_support import is_valid_udf_return from ray.data._internal.util import _truncated_repr -from ray.data.block import UserDefinedFunction, Block, DataBatch +from ray.data.block import UserDefinedFunction, Block, DataBatch, StrictModeError from ray.data.context import DEFAULT_BATCH_SIZE, DataContext @@ -25,7 +25,7 @@ def generate_map_batches_fn( def fn( blocks: Iterator[Block], - ctx: TaskContext, + task_context: TaskContext, batch_fn: UserDefinedFunction, *fn_args, **fn_kwargs, @@ -51,6 +51,15 @@ def validate_batch(batch: Block) -> None: "`numpy.ndarray`, `list`, or `dict[str, numpy.ndarray]`." ) + if context.strict_mode and isinstance(batch, list): + raise StrictModeError( + f"Error validating {_truncated_repr(batch)}: " + "Returning a list of objects from `map_batches` is not " + "allowed in Ray 2.5. To return Python objects, " + "wrap them in a named dict field, e.g., " + "return `{'results': objects}` instead of just `objects`." + ) + if isinstance(batch, collections.abc.Mapping): for key, value in list(batch.items()): if not is_valid_udf_return(value): diff --git a/python/ray/data/block.py b/python/ray/data/block.py index b75144b75c02..7037c8a4bf43 100644 --- a/python/ray/data/block.py +++ b/python/ray/data/block.py @@ -471,7 +471,7 @@ def for_block(block: Block) -> "BlockAccessor[T]": "Standalone Python objects are not " "allowed in Ray 2.5. To use Python objects in a dataset, " "wrap them in a dict of numpy arrays, e.g., " - "return `{'item': np.array(batch)}` instead of just `batch`." + "return `{'item': batch}` instead of just `batch`." ) return SimpleBlockAccessor(block) else: From 21a265c63f0899e1b7a188da80ffe31d862ca4bc Mon Sep 17 00:00:00 2001 From: Artur Niederfahrenhorst Date: Wed, 17 May 2023 00:47:02 +0200 Subject: [PATCH 423/424] [RLlib] Add missing `sampler_results` key to fetch min desired reward in RLlib release tests (#35354) Signed-off-by: Artur Niederfahrenhorst --- rllib/utils/test_utils.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/rllib/utils/test_utils.py b/rllib/utils/test_utils.py index 6d1db9114104..fd57836fb47b 100644 --- a/rllib/utils/test_utils.py +++ b/rllib/utils/test_utils.py @@ -786,9 +786,9 @@ def should_check_eval(experiment): check_eval = should_check_eval(e) episode_reward_key = ( - "episode_reward_mean" + "sampler_results/episode_reward_mean" if not check_eval - else "evaluation/episode_reward_mean" + else "evaluation/sampler_results/episode_reward_mean" ) # For smoke-tests, we just run for n min. @@ -904,14 +904,18 @@ def should_check_eval(experiment): if check_eval: episode_reward_mean = np.mean( [ - t.metric_analysis["evaluation/episode_reward_mean"]["max"] + t.metric_analysis[ + "evaluation/sampler_results/episode_reward_mean" + ]["max"] for t in trials_for_experiment ] ) else: episode_reward_mean = np.mean( [ - t.metric_analysis["episode_reward_mean"]["max"] + t.metric_analysis["sampler_results/episode_reward_mean"][ + "max" + ] for t in trials_for_experiment ] ) From 03c362361f3c84d2cd505d2c4dc843753fc9cc4e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 May 2023 23:19:59 +0000 Subject: [PATCH 424/424] [data](deps): Bump aioboto3 in /python/requirements/data_processing Bumps [aioboto3](https://github.com/terrycain/aioboto3) from 8.3.0 to 11.2.0. - [Changelog](https://github.com/terrycain/aioboto3/blob/main/CHANGELOG.rst) - [Commits](https://github.com/terrycain/aioboto3/compare/v8.3.0...v11.2.0) --- updated-dependencies: - dependency-name: aioboto3 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- python/requirements/data_processing/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/requirements/data_processing/requirements.txt b/python/requirements/data_processing/requirements.txt index 4ec222774ace..325a383c2cc1 100644 --- a/python/requirements/data_processing/requirements.txt +++ b/python/requirements/data_processing/requirements.txt @@ -3,7 +3,7 @@ dask[complete]==2022.2.0; python_version < '3.8' dask[complete]==2022.10.1; python_version >= '3.8' -aioboto3==11.0.1 +aioboto3==11.2.0 crc32c==2.3 flask_cors s3fs==2023.1.0